summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2020-08-08 01:41:01 +0200
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2020-08-08 01:41:01 +0200
commit9e8238020c5beba64e7ffafbb7ea0fb02fe68270 (patch)
tree37c7fd953cfa7ebd3d6c476bc4c7d4de2302cdc3 /drivers
parentInput: elan_i2c - add more hardware ID for Lenovo laptops (diff)
parentInput: exc3000 - add support to query model and fw_version (diff)
downloadlinux-9e8238020c5beba64e7ffafbb7ea0fb02fe68270.tar.xz
linux-9e8238020c5beba64e7ffafbb7ea0fb02fe68270.zip
Merge branch 'next' into for-linus
Prepare input updates for 5.9 merge window.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig7
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/accessibility/braille/braille_console.c4
-rw-r--r--drivers/acpi/Kconfig24
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c25
-rw-r--r--drivers/acpi/acpi_lpss.c6
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/acpica/acconvert.h4
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h4
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/dbinput.c16
-rw-r--r--drivers/acpi/acpica/dbxface.c1
-rw-r--r--drivers/acpi/acpica/dswexec.c33
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c35
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c17
-rw-r--r--drivers/acpi/acpica/hwgpe.c47
-rw-r--r--drivers/acpi/acpica/hwsleep.c12
-rw-r--r--drivers/acpi/acpica/nsnames.c12
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c12
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c9
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utprint.c7
-rw-r--r--drivers/acpi/arm64/iort.c6
-rw-r--r--drivers/acpi/battery.c10
-rw-r--r--drivers/acpi/button.c15
-rw-r--r--drivers/acpi/cppc_acpi.c33
-rw-r--r--drivers/acpi/device_pm.c6
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c8
-rw-r--r--drivers/acpi/ec.c331
-rw-r--r--drivers/acpi/fan.c20
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c14
-rw-r--r--drivers/acpi/nfit/nfit.h13
-rw-r--r--drivers/acpi/numa/srat.c41
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/pci_link.c4
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/acpi/proc.c14
-rw-r--r--drivers/acpi/processor_throttling.c7
-rw-r--r--drivers/acpi/sleep.c35
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/tiny-power-button.c46
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/acpi/wakeup.c105
-rw-r--r--drivers/acpi/x86/utils.c20
-rw-r--r--drivers/amba/bus.c1
-rw-r--r--drivers/android/binderfs.c200
-rw-r--r--drivers/ata/Kconfig77
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c40
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/libata-core.c1126
-rw-r--r--drivers/ata/libata-eh.c224
-rw-r--r--drivers/ata/libata-pata-timings.c192
-rw-r--r--drivers/ata/libata-pmp.c1
-rw-r--r--drivers/ata/libata-sata.c1483
-rw-r--r--drivers/ata/libata-scsi.c583
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/libata-transport.c10
-rw-r--r--drivers/ata/libata.h25
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_promise.c8
-rw-r--r--drivers/atm/.gitignore2
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/panel.c2
-rw-r--r--drivers/base/arch_topology.c56
-rw-r--r--drivers/base/component.c11
-rw-r--r--drivers/base/core.c227
-rw-r--r--drivers/base/cpu.c23
-rw-r--r--drivers/base/dd.c87
-rw-r--r--drivers/base/firmware_loader/Makefile1
-rw-r--r--drivers/base/firmware_loader/fallback.c2
-rw-r--r--drivers/base/firmware_loader/fallback.h10
-rw-r--r--drivers/base/firmware_loader/fallback_platform.c36
-rw-r--r--drivers/base/firmware_loader/fallback_table.c1
-rw-r--r--drivers/base/firmware_loader/firmware.h4
-rw-r--r--drivers/base/firmware_loader/main.c33
-rw-r--r--drivers/base/memory.c130
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/platform.c29
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/main.c21
-rw-r--r--drivers/base/power/power.h3
-rw-r--r--drivers/base/power/runtime.c36
-rw-r--r--drivers/base/power/sysfs.c55
-rw-r--r--drivers/base/power/wakeup.c17
-rw-r--r--drivers/base/property.c1
-rw-r--r--drivers/block/Makefile6
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/drbd/drbd_main.c14
-rw-r--r--drivers/block/drbd/drbd_receiver.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c1
-rw-r--r--drivers/block/floppy.c1093
-rw-r--r--drivers/block/loop.c67
-rw-r--r--drivers/block/nbd.c27
-rw-r--r--drivers/block/null_blk.h29
-rw-r--r--drivers/block/null_blk_main.c184
-rw-r--r--drivers/block/null_blk_trace.c21
-rw-r--r--drivers/block/null_blk_trace.h79
-rw-r--r--drivers/block/null_blk_zoned.c57
-rw-r--r--drivers/block/pktcdvd.c15
-rw-r--r--drivers/block/ps3vram.c3
-rw-r--r--drivers/block/rbd.c248
-rw-r--r--drivers/block/rsxx/dev.c3
-rw-r--r--drivers/block/rsxx/dma.c2
-rw-r--r--drivers/block/umem.c4
-rw-r--r--drivers/block/virtio_blk.c92
-rw-r--r--drivers/block/xen-blkfront.c23
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/bfusb.c33
-rw-r--r--drivers/bluetooth/btintel.c4
-rw-r--r--drivers/bluetooth/btqca.c10
-rw-r--r--drivers/bluetooth/btqca.h6
-rw-r--r--drivers/bluetooth/btrtl.c12
-rw-r--r--drivers/bluetooth/btrtl.h4
-rw-r--r--drivers/bluetooth/btusb.c32
-rw-r--r--drivers/bluetooth/hci_ag6xx.c2
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_h5.c49
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_qca.c174
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c33
-rw-r--r--drivers/bus/hisi_lpc.c27
-rw-r--r--drivers/bus/mhi/Kconfig14
-rw-r--r--drivers/bus/mhi/Makefile2
-rw-r--r--drivers/bus/mhi/core/Makefile3
-rw-r--r--drivers/bus/mhi/core/boot.c507
-rw-r--r--drivers/bus/mhi/core/init.c1294
-rw-r--r--drivers/bus/mhi/core/internal.h684
-rw-r--r--drivers/bus/mhi/core/main.c1521
-rw-r--r--drivers/bus/mhi/core/pm.c973
-rw-r--r--drivers/bus/ti-sysc.c604
-rw-r--r--drivers/char/Kconfig167
-rw-r--r--drivers/char/Makefile5
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/efirtc.c366
-rw-r--r--drivers/char/hw_random/Kconfig17
-rw-r--r--drivers/char/hw_random/imx-rngc.c85
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c5
-rw-r--r--drivers/char/hw_random/via-rng.c7
-rw-r--r--drivers/char/hw_random/virtio-rng.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c18
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c151
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/nwbutton.h1
-rw-r--r--drivers/char/nwflash.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/ppdev.c20
-rw-r--r--drivers/char/random.c84
-rw-r--r--drivers/char/rtc.c1311
-rw-r--r--drivers/char/toshiba.c2
-rw-r--r--drivers/char/tpm/eventlog/common.c12
-rw-r--r--drivers/char/tpm/eventlog/of.c3
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c2
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c12
-rw-r--r--drivers/char/tpm/tpm-interface.c2
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm2-cmd.c3
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c153
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h1
-rw-r--r--drivers/char/tpm/tpm_tis_core.c8
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c7
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c3
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/at91/Makefile4
-rw-r--r--drivers/clk/at91/at91rm9200.c199
-rw-r--r--drivers/clk/at91/at91sam9g45.c220
-rw-r--r--drivers/clk/at91/at91sam9n12.c238
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c91
-rw-r--r--drivers/clk/at91/clk-usb.c9
-rw-r--r--drivers/clk/at91/sam9x60.c14
-rw-r--r--drivers/clk/at91/sama5d3.c240
-rw-r--r--drivers/clk/clk-asm9260.c2
-rw-r--r--drivers/clk/clk-si5341.c212
-rw-r--r--drivers/clk/clk.c130
-rw-r--r--drivers/clk/imx/clk-composite-8m.c20
-rw-r--r--drivers/clk/imx/clk-fixup-div.c2
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-gate2.c8
-rw-r--r--drivers/clk/imx/clk-imx6sl.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c2
-rw-r--r--drivers/clk/imx/clk-imx8mm.c63
-rw-r--r--drivers/clk/imx/clk-imx8mn.c41
-rw-r--r--drivers/clk/imx/clk-imx8mp.c24
-rw-r--r--drivers/clk/imx/clk-imx8mq.c53
-rw-r--r--drivers/clk/imx/clk-pfdv2.c61
-rw-r--r--drivers/clk/imx/clk-pll14xx.c4
-rw-r--r--drivers/clk/imx/clk-pllv4.c12
-rw-r--r--drivers/clk/imx/clk-sscg-pll.c14
-rw-r--r--drivers/clk/imx/clk.h13
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c4
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c55
-rw-r--r--drivers/clk/ingenic/tcu.c10
-rw-r--r--drivers/clk/keystone/Kconfig8
-rw-r--r--drivers/clk/keystone/Makefile1
-rw-r--r--drivers/clk/keystone/syscon-clk.c172
-rw-r--r--drivers/clk/meson/g12a.c129
-rw-r--r--drivers/clk/meson/g12a.h6
-rw-r--r--drivers/clk/meson/gxbb.c21
-rw-r--r--drivers/clk/meson/gxbb.h2
-rw-r--r--drivers/clk/meson/meson8b.c21
-rw-r--r--drivers/clk/mmp/Makefile2
-rw-r--r--drivers/clk/mmp/clk-mix.c2
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c146
-rw-r--r--drivers/clk/mmp/clk-pll.c170
-rw-r--r--drivers/clk/mmp/clk.h24
-rw-r--r--drivers/clk/qcom/Kconfig17
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c277
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h12
-rw-r--r--drivers/clk/qcom/clk-rpm.c35
-rw-r--r--drivers/clk/qcom/clk-rpmh.c79
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c50
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c2
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c72
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c55
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c3690
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c37
-rw-r--r--drivers/clk/qcom/mss-sc7180.c143
-rw-r--r--drivers/clk/renesas/Kconfig3
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c40
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c17
-rw-r--r--drivers/clk/samsung/clk.c4
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c40
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c42
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c17
-rw-r--r--drivers/clk/socfpga/clk-s10.c29
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h25
-rw-r--r--drivers/clk/sprd/Kconfig8
-rw-r--r--drivers/clk/sprd/Makefile1
-rw-r--r--drivers/clk/sprd/common.c10
-rw-r--r--drivers/clk/sprd/composite.h39
-rw-r--r--drivers/clk/sprd/div.h20
-rw-r--r--drivers/clk/sprd/gate.c17
-rw-r--r--drivers/clk/sprd/gate.h120
-rw-r--r--drivers/clk/sprd/mux.h28
-rw-r--r--drivers/clk/sprd/pll.c7
-rw-r--r--drivers/clk/sprd/pll.h55
-rw-r--r--drivers/clk/sprd/sc9863a-clk.c1773
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c115
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-id.h12
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c37
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c122
-rw-r--r--drivers/clk/tegra/clk-tegra114.c43
-rw-r--r--drivers/clk/tegra/clk-tegra124.c48
-rw-r--r--drivers/clk/tegra/clk-tegra20.c9
-rw-r--r--drivers/clk/tegra/clk-tegra210.c34
-rw-r--r--drivers/clk/tegra/clk-tegra30.c33
-rw-r--r--drivers/clk/tegra/clk.h1
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-814x.c7
-rw-r--r--drivers/clk/ti/clkctrl.c99
-rw-r--r--drivers/clk/versatile/clk-icst.c25
-rw-r--r--drivers/clk/versatile/clk-icst.h22
-rw-r--r--drivers/clk/versatile/clk-impd1.c80
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c30
-rw-r--r--drivers/clocksource/bcm2835_timer.c8
-rw-r--r--drivers/clocksource/bcm_kona_timer.c10
-rw-r--r--drivers/clocksource/dw_apb_timer.c11
-rw-r--r--drivers/clocksource/exynos_mct.c12
-rw-r--r--drivers/clocksource/hyperv_timer.c7
-rw-r--r--drivers/clocksource/ingenic-ost.c189
-rw-r--r--drivers/clocksource/ingenic-timer.c3
-rw-r--r--drivers/clocksource/mips-gic-timer.c8
-rw-r--r--drivers/clocksource/mxs_timer.c10
-rw-r--r--drivers/clocksource/nomadik-mtu.c11
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c12
-rw-r--r--drivers/clocksource/timer-atlas7.c50
-rw-r--r--drivers/clocksource/timer-cs5535.c9
-rw-r--r--drivers/clocksource/timer-efm32.c10
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c10
-rw-r--r--drivers/clocksource/timer-fttmr010.c68
-rw-r--r--drivers/clocksource/timer-imx-gpt.c10
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c2
-rw-r--r--drivers/clocksource/timer-imx-tpm.c2
-rw-r--r--drivers/clocksource/timer-integrator-ap.c11
-rw-r--r--drivers/clocksource/timer-meson6.c11
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c1
-rw-r--r--drivers/clocksource/timer-orion.c9
-rw-r--r--drivers/clocksource/timer-owl.c15
-rw-r--r--drivers/clocksource/timer-prima2.c14
-rw-r--r--drivers/clocksource/timer-pxa.c10
-rw-r--r--drivers/clocksource/timer-sp804.c11
-rw-r--r--drivers/clocksource/timer-ti-dm.c217
-rw-r--r--drivers/clocksource/timer-u300.c9
-rw-r--r--drivers/clocksource/timer-vf-pit.c10
-rw-r--r--drivers/clocksource/timer-vt8500.c11
-rw-r--r--drivers/clocksource/timer-zevio.c13
-rw-r--r--drivers/counter/104-quad-8.c330
-rw-r--r--drivers/counter/stm32-timer-cnt.c66
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/Kconfig.x864
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c5
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq-dt.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.h4
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/cpufreq/cpufreq_stats.c14
-rw-r--r--drivers/cpufreq/e_powersaver.c2
-rw-r--r--drivers/cpufreq/elanfreq.c2
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c13
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c71
-rw-r--r--drivers/cpufreq/intel_pstate.c100
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/longrun.c3
-rw-r--r--drivers/cpufreq/p4-clockmod.c2
-rw-r--r--drivers/cpufreq/powernow-k6.c4
-rw-r--r--drivers/cpufreq/powernow-k7.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c30
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c191
-rw-r--r--drivers/cpufreq/sc520_freq.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c14
-rw-r--r--drivers/cpufreq/speedstep-ich.c10
-rw-r--r--drivers/cpufreq/speedstep-smi.c10
-rw-r--r--drivers/cpufreq/ti-cpufreq.c7
-rw-r--r--drivers/cpuidle/Kconfig.arm8
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c12
-rw-r--r--drivers/cpuidle/cpuidle-psci.c46
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c392
-rw-r--r--drivers/cpuidle/cpuidle.c40
-rw-r--r--drivers/cpuidle/governor.c2
-rw-r--r--drivers/crypto/Kconfig50
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c4
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h2
-rw-r--r--drivers/crypto/atmel-i2c.c3
-rw-r--r--drivers/crypto/bcm/util.c40
-rw-r--r--drivers/crypto/caam/Kconfig2
-rw-r--r--drivers/crypto/caam/caamalg.c421
-rw-r--r--drivers/crypto/caam/caamalg_desc.c30
-rw-r--r--drivers/crypto/caam/caamalg_qi.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h6
-rw-r--r--drivers/crypto/caam/caamhash.c344
-rw-r--r--drivers/crypto/caam/caampkc.c189
-rw-r--r--drivers/crypto/caam/caampkc.h10
-rw-r--r--drivers/crypto/caam/caamrng.c405
-rw-r--r--drivers/crypto/caam/ctrl.c88
-rw-r--r--drivers/crypto/caam/desc.h2
-rw-r--r--drivers/crypto/caam/intern.h9
-rw-r--r--drivers/crypto/caam/jr.c49
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/qi.c60
-rw-r--r--drivers/crypto/caam/qi.h4
-rw-r--r--drivers/crypto/caam/regs.h7
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c3
-rw-r--r--drivers/crypto/ccp/sev-dev.c39
-rw-r--r--drivers/crypto/ccp/sp-dev.h1
-rw-r--r--drivers/crypto/ccp/sp-pci.c9
-rw-r--r--drivers/crypto/ccree/cc_aead.c176
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c229
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h5
-rw-r--r--drivers/crypto/ccree/cc_cipher.c78
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c29
-rw-r--r--drivers/crypto/ccree/cc_driver.c127
-rw-r--r--drivers/crypto/ccree/cc_driver.h18
-rw-r--r--drivers/crypto/ccree/cc_hash.c228
-rw-r--r--drivers/crypto/ccree/cc_hash.h31
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h332
-rw-r--r--drivers/crypto/ccree/cc_pm.c60
-rw-r--r--drivers/crypto/ccree/cc_pm.h21
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c48
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h19
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c78
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h45
-rw-r--r--drivers/crypto/chelsio/Kconfig11
-rw-r--r--drivers/crypto/chelsio/Makefile3
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c358
-rw-r--r--drivers/crypto/chelsio/chcr_common.h135
-rw-r--r--drivers/crypto/chelsio/chcr_core.c88
-rw-r--r--drivers/crypto/chelsio/chcr_core.h13
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h16
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c2
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.c2028
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.h98
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c29
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c82
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c10
-rw-r--r--drivers/crypto/hisilicon/Kconfig4
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h3
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c160
-rw-r--r--drivers/crypto/hisilicon/qm.c619
-rw-r--r--drivers/crypto/hisilicon/qm.h72
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h12
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c260
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c294
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c54
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c324
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/marvell/Kconfig37
-rw-r--r--drivers/crypto/marvell/Makefile7
-rw-r--r--drivers/crypto/marvell/cesa/Makefile3
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c (renamed from drivers/crypto/marvell/cesa.c)0
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h (renamed from drivers/crypto/marvell/cesa.h)5
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c (renamed from drivers/crypto/marvell/cipher.c)15
-rw-r--r--drivers/crypto/marvell/cesa/hash.c (renamed from drivers/crypto/marvell/hash.c)38
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c (renamed from drivers/crypto/marvell/tdma.c)10
-rw-r--r--drivers/crypto/marvell/octeontx/Makefile6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_common.h51
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h824
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf.h34
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_main.c307
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c253
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c1686
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h180
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf.h104
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c1746
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.h188
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c985
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c247
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c612
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h227
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c2
-rw-r--r--drivers/crypto/mxs-dcp.c58
-rw-r--r--drivers/crypto/nx/nx.h2
-rw-r--r--drivers/crypto/omap-sham.c4
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c3
-rw-r--r--drivers/crypto/qce/common.c2
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/dma.c11
-rw-r--r--drivers/crypto/qce/dma.h2
-rw-r--r--drivers/crypto/qce/skcipher.c30
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/vmx/.gitignore1
-rw-r--r--drivers/crypto/xilinx/Makefile2
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c457
-rw-r--r--drivers/dax/bus.c4
-rw-r--r--drivers/dax/kmem.c14
-rw-r--r--drivers/dax/super.c28
-rw-r--r--drivers/devfreq/devfreq.c14
-rw-r--r--drivers/devfreq/governor.h21
-rw-r--r--drivers/devfreq/governor_simpleondemand.c4
-rw-r--r--drivers/devfreq/governor_userspace.c2
-rw-r--r--drivers/devfreq/tegra30-devfreq.c4
-rw-r--r--drivers/dio/dio-driver.c9
-rw-r--r--drivers/dma-buf/Kconfig13
-rw-r--r--drivers/dma-buf/dma-buf.c113
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c121
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/at_xdmac.c44
-rw-r--r--drivers/dma/bcm-sba-raid.c2
-rw-r--r--drivers/dma/dmaengine.c162
-rw-r--r--drivers/dma/dmaengine.h16
-rw-r--r--drivers/dma/dmatest.c15
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c15
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c21
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h2
-rw-r--r--drivers/dma/idxd/cdev.c4
-rw-r--r--drivers/dma/idxd/device.c11
-rw-r--r--drivers/dma/idxd/irq.c26
-rw-r--r--drivers/dma/idxd/sysfs.c19
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/mmp_tdma.c5
-rw-r--r--drivers/dma/owl-dma.c8
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sprd-dma.c26
-rw-r--r--drivers/dma/stm32-dma.c96
-rw-r--r--drivers/dma/stm32-dmamux.c93
-rw-r--r--drivers/dma/stm32-mdma.c78
-rw-r--r--drivers/dma/sun4i-dma.c4
-rw-r--r--drivers/dma/tegra20-apb-dma.c556
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti/dma-crossbar.c8
-rw-r--r--drivers/dma/ti/edma.c79
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c18
-rw-r--r--drivers/dma/ti/k3-udma.c119
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/uniphier-mdmac.c2
-rw-r--r--drivers/dma/uniphier-xdmac.c609
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c85
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/amd64_edac.c14
-rw-r--r--drivers/edac/armada_xp_edac.c26
-rw-r--r--drivers/edac/dmc520_edac.c656
-rw-r--r--drivers/edac/edac_mc.c511
-rw-r--r--drivers/edac/edac_mc.h6
-rw-r--r--drivers/edac/edac_mc_sysfs.c110
-rw-r--r--drivers/edac/edac_module.h1
-rw-r--r--drivers/edac/ghes_edac.c16
-rw-r--r--drivers/edac/i10nm_base.c8
-rw-r--r--drivers/edac/mce_amd.c2
-rw-r--r--drivers/edac/pnd2_edac.c4
-rw-r--r--drivers/edac/sb_edac.c14
-rw-r--r--drivers/edac/skx_base.c2
-rw-r--r--drivers/edac/synopsys_edac.c22
-rw-r--r--drivers/eisa/.gitignore1
-rw-r--r--drivers/extcon/extcon-axp288.c34
-rw-r--r--drivers/extcon/extcon-palmas.c8
-rw-r--r--drivers/extcon/extcon.c1
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/common.h115
-rw-r--r--drivers/firmware/arm_scmi/driver.c293
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c184
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/arm_scmi/shmem.c83
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/firmware/arm_sdei.c71
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/firmware/edd.c6
-rw-r--r--drivers/firmware/efi/Kconfig5
-rw-r--r--drivers/firmware/efi/Makefile4
-rw-r--r--drivers/firmware/efi/apple-properties.c12
-rw-r--r--drivers/firmware/efi/arm-init.c85
-rw-r--r--drivers/firmware/efi/arm-runtime.c18
-rw-r--r--drivers/firmware/efi/capsule-loader.c2
-rw-r--r--drivers/firmware/efi/cper.c64
-rw-r--r--drivers/firmware/efi/dev-path-parser.c38
-rw-r--r--drivers/firmware/efi/earlycon.c14
-rw-r--r--drivers/firmware/efi/efi-bgrt.c7
-rw-r--r--drivers/firmware/efi/efi-pstore.c4
-rw-r--r--drivers/firmware/efi/efi.c473
-rw-r--r--drivers/firmware/efi/efivars.c2
-rw-r--r--drivers/firmware/efi/embedded-firmware.c150
-rw-r--r--drivers/firmware/efi/esrt.c6
-rw-r--r--drivers/firmware/efi/fdtparams.c126
-rw-r--r--drivers/firmware/efi/libstub/Makefile9
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c199
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c1
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c24
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c822
-rw-r--r--drivers/firmware/efi/libstub/efistub.h626
-rw-r--r--drivers/firmware/efi/libstub/fdt.c7
-rw-r--r--drivers/firmware/efi/libstub/file.c259
-rw-r--r--drivers/firmware/efi/libstub/hidden.h6
-rw-r--r--drivers/firmware/efi/libstub/mem.c307
-rw-r--r--drivers/firmware/efi/libstub/random.c136
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c124
-rw-r--r--drivers/firmware/efi/libstub/skip_spaces.c11
-rw-r--r--drivers/firmware/efi/libstub/string.c56
-rw-r--r--drivers/firmware/efi/libstub/tpm.c5
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c835
-rw-r--r--drivers/firmware/efi/memattr.c13
-rw-r--r--drivers/firmware/efi/reboot.c4
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c4
-rw-r--r--drivers/firmware/efi/tpm.c7
-rw-r--r--drivers/firmware/efi/vars.c2
-rw-r--r--drivers/firmware/imx/scu-pd.c13
-rw-r--r--drivers/firmware/meson/meson_sm.c2
-rw-r--r--drivers/firmware/pcdp.c8
-rw-r--r--drivers/firmware/psci/psci_checker.c4
-rw-r--r--drivers/firmware/stratix10-svc.c1
-rw-r--r--drivers/firmware/tegra/Kconfig2
-rw-r--r--drivers/firmware/xilinx/Kconfig2
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c27
-rw-r--r--drivers/fpga/dfl-pci.c6
-rw-r--r--drivers/fpga/zynq-fpga.c3
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c44
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpio/gpio-eic-sprd.c9
-rw-r--r--drivers/gpio/gpio-exar.c7
-rw-r--r--drivers/gpio/gpio-mlxbf2.c335
-rw-r--r--drivers/gpio/gpio-mmio.c23
-rw-r--r--drivers/gpio/gpio-mockup.c2
-rw-r--r--drivers/gpio/gpio-mt7621.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c17
-rw-r--r--drivers/gpio/gpio-mxc.c7
-rw-r--r--drivers/gpio/gpio-omap.c29
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pl061.c13
-rw-r--r--drivers/gpio/gpio-pxa.c15
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-siox.c28
-rw-r--r--drivers/gpio/gpio-tegra.c1
-rw-r--r--drivers/gpio/gpio-tegra186.c64
-rw-r--r--drivers/gpio/gpio-uniphier.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c9
-rw-r--r--drivers/gpio/gpio-zx.c10
-rw-r--r--drivers/gpio/gpiolib-devres.c46
-rw-r--r--drivers/gpio/gpiolib-of.c139
-rw-r--r--drivers/gpio/gpiolib-of.h2
-rw-r--r--drivers/gpio/gpiolib.c1344
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c220
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c250
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c230
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c496
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h338
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c257
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c152
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c150
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c554
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c91
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c266
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c64
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c208
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c352
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c2204
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c (renamed from drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c168
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c256
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c244
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h72
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h51
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c69
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c67
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h34
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h63
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c42
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c68
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h38
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c36
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h17
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c182
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h32
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c20
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h (renamed from drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c)24
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c205
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c118
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c77
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c183
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c84
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h6
-rw-r--r--drivers/gpu/drm/ast/ast_main.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c27
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c24
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c34
-rw-r--r--drivers/gpu/drm/bridge/Kconfig51
-rw-r--r--drivers/gpu/drm/bridge/Makefile6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig13
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h40
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c28
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c23
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c87
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c295
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c300
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c21
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/panel.c23
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c349
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c3
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c342
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c329
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c9
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c1046
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c267
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c235
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c211
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c51
-rw-r--r--drivers/gpu/drm/drm_atomic.c117
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c83
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c102
-rw-r--r--drivers/gpu/drm/drm_auth.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c751
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c379
-rw-r--r--drivers/gpu/drm/drm_bufs.c40
-rw-r--r--drivers/gpu/drm/drm_client.c2
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c96
-rw-r--r--drivers/gpu/drm/drm_context.c28
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c2
-rw-r--r--drivers/gpu/drm/drm_dma.c21
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c141
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c195
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_edid.c216
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c22
-rw-r--r--drivers/gpu/drm/drm_file.c231
-rw-r--r--drivers/gpu/drm/drm_format_helper.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c122
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c61
-rw-r--r--drivers/gpu/drm/drm_hdcp.c166
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioctl.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_lock.c11
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c39
-rw-r--r--drivers/gpu/drm/drm_mm.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c82
-rw-r--r--drivers/gpu/drm/drm_prime.c37
-rw-r--r--drivers/gpu/drm/drm_scatter.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c46
-rw-r--r--drivers/gpu/drm/drm_syncobj.c87
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/drm_vblank.c177
-rw-r--r--drivers/gpu/drm/drm_vm.c26
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c52
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c42
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c59
-rw-r--r--drivers/gpu/drm/etnaviv/state_blt.xml.h2
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c79
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c11
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Kconfig7
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile25
-rw-r--r--drivers/gpu/drm/i915/Makefile22
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c406
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c255
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c444
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1106
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h73
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c602
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c (renamed from drivers/gpu/drm/i915/intel_csr.c)46
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.h (renamed from drivers/gpu/drm/i915/intel_csr.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c1406
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3546
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c766
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h119
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c885
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c194
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1521
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c168
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c267
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c223
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h87
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c527
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c434
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c203
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c223
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c407
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c363
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c482
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c445
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c14
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c494
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c96
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c504
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c803
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c136
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c114
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c178
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c74
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c402
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.h15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c27
-rw-r--r--drivers/gpu/drm/i915/gt/hsw_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.c63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c98
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c180
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h48
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c141
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c128
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c504
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c76
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c109
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c236
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c83
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c244
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/ivb_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c1846
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c24
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c296
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c188
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c9
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c445
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c255
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c224
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c63
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h62
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c219
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c351
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c127
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c270
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c18
-rw-r--r--drivers/gpu/drm/i915/i915_active.c139
-rw-r--r--drivers/gpu/drm/i915/i915_active.h11
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2428
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c250
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.h14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1215
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h237
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c43
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h5
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c7
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.h17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c309
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c5
-rw-r--r--drivers/gpu/drm/i915/i915_params.c11
-rw-r--r--drivers/gpu/drm/i915/i915_params.h74
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c18
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c170
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h72
-rw-r--r--drivers/gpu/drm/i915/i915_request.c270
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h3
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c17
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h2
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h66
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c1
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h22
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c72
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h25
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c125
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h11
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c45
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c500
-rw-r--r--drivers/gpu/drm/i915/intel_dram.h14
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c21
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c66
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c767
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c16
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c203
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c1
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c489
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.h18
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c176
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c9
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c16
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c134
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h4
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c63
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c5
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h1
-rw-r--r--drivers/gpu/drm/lima/lima_regs.h1
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c46
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c9
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c64
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c4
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c184
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c93
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h7
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c86
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c115
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c98
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c620
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h71
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c82
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c97
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c16
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h12
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c28
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/cursc37a.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/timer.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/user.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/timer.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvif/userc361.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c17
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig22
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c97
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c183
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c137
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c217
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c349
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c71
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c313
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c59
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c295
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c48
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c34
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c53
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c178
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c269
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c247
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c88
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c83
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig44
-rw-r--r--drivers/gpu/drm/panel/Makefile5
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c854
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c352
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c526
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c14
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c1098
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c293
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c321
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c123
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h26
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c18
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c6
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c73
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c49
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/.gitignore1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c22
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c36
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c5
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h27
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c56
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c88
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c11
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c4
-rw-r--r--drivers/gpu/drm/stm/ltdc.c103
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c104
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h14
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c131
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/tegra/dc.c20
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c34
-rw-r--r--drivers/gpu/drm/tidss/Kconfig14
-rw-r--r--drivers/gpu/drm/tidss/Makefile12
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c442
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.h48
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c2753
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h137
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h243
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c285
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h39
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c96
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.h17
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c146
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h77
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c299
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.h15
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c229
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h25
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.c202
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h22
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig22
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c286
-rw-r--r--drivers/gpu/drm/tiny/repaper.c21
-rw-r--r--drivers/gpu/drm/tiny/st7586.c9
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c271
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c110
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c1
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h41
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c13
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h49
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h37
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c90
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c57
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c121
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c369
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h161
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h787
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h466
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h36
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h58
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h347
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h382
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c86
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h186
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c429
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c78
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c66
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c387
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c612
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c166
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c5
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c19
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c4
-rw-r--r--drivers/gpu/host1x/dev.c59
-rw-r--r--drivers/gpu/trace/Kconfig4
-rw-r--r--drivers/gpu/trace/Makefile3
-rw-r--r--drivers/gpu/trace/trace_gpu_mem.c13
-rw-r--r--drivers/hid/Kconfig20
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-alps.c1
-rw-r--r--drivers/hid/hid-appleir.c12
-rw-r--r--drivers/hid/hid-glorious.c86
-rw-r--r--drivers/hid/hid-ids.h13
-rw-r--r--drivers/hid/hid-lg-g15.c10
-rw-r--r--drivers/hid/hid-logitech-dj.c11
-rw-r--r--drivers/hid/hid-mcp2221.c742
-rw-r--r--drivers/hid/hid-multitouch.c3
-rw-r--r--drivers/hid/hid-quirks.c4
-rw-r--r--drivers/hid/hid-rmi.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h2
-rw-r--r--drivers/hid/usbhid/hid-core.c37
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/wacom_sys.c4
-rw-r--r--drivers/hid/wacom_wac.c88
-rw-r--r--drivers/hsi/clients/cmt_speech.c9
-rw-r--r--drivers/hv/channel_mgmt.c3
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_balloon.c25
-rw-r--r--drivers/hv/hv_debugfs.c2
-rw-r--r--drivers/hv/hv_trace.h4
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/vmbus_drv.c105
-rw-r--r--drivers/hwmon/Kconfig11
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adt7475.c95
-rw-r--r--drivers/hwmon/axi-fan-control.c469
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/da9052-hwmon.c4
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c4
-rw-r--r--drivers/hwmon/drivetemp.c8
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpowernv.c8
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/k10temp.c66
-rw-r--r--drivers/hwmon/lm73.c10
-rw-r--r--drivers/hwmon/nct7904.c33
-rw-r--r--drivers/hwmon/pmbus/Kconfig21
-rw-r--r--drivers/hwmon/pmbus/adm1275.c37
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c29
-rw-r--r--drivers/hwmon/pmbus/ir35221.c23
-rw-r--r--drivers/hwmon/pmbus/isl68137.c192
-rw-r--r--drivers/hwmon/pmbus/lm25066.c39
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c130
-rw-r--r--drivers/hwmon/pmbus/ltc3815.c20
-rw-r--r--drivers/hwmon/pmbus/max16064.c7
-rw-r--r--drivers/hwmon/pmbus/max20730.c3
-rw-r--r--drivers/hwmon/pmbus/max31785.c6
-rw-r--r--drivers/hwmon/pmbus/max34440.c25
-rw-r--r--drivers/hwmon/pmbus/max8688.c17
-rw-r--r--drivers/hwmon/pmbus/pmbus.c4
-rw-r--r--drivers/hwmon/pmbus/pmbus.h20
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c119
-rw-r--r--drivers/hwmon/pmbus/tps53679.c172
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c5
-rw-r--r--drivers/hwmon/pmbus/zl6100.c5
-rw-r--r--drivers/hwmon/via-cputemp.c8
-rw-r--r--drivers/hwspinlock/Kconfig12
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h2
-rw-r--r--drivers/hwtracing/coresight/Kconfig21
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c485
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c1206
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.c745
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h235
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h15
-rw-r--r--drivers/hwtracing/coresight/coresight.c86
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h2
-rw-r--r--drivers/hwtracing/intel_th/msu.c49
-rw-r--r--drivers/hwtracing/intel_th/pci.c8
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c8
-rw-r--r--drivers/i2c/busses/i2c-altera.c25
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c27
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c7
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c92
-rw-r--r--drivers/i2c/busses/i2c-at91.h4
-rw-r--r--drivers/i2c/busses/i2c-axxia.c4
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c17
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c33
-rw-r--r--drivers/i2c/busses/i2c-cadence.c7
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c36
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c75
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c4
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c3
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c12
-rw-r--r--drivers/i2c/busses/i2c-efm32.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c18
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c10
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c4
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c155
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c6
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c21
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c6
-rw-r--r--drivers/i2c/busses/i2c-mxs.c10
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c8
-rw-r--r--drivers/i2c/busses/i2c-omap.c6
-rw-r--r--drivers/i2c/busses/i2c-owl.c9
-rw-r--r--drivers/i2c/busses/i2c-parport.c12
-rw-r--r--drivers/i2c/busses/i2c-powermac.c15
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c58
-rw-r--r--drivers/i2c/busses/i2c-qup.c11
-rw-r--r--drivers/i2c/busses/i2c-rcar.c24
-rw-r--r--drivers/i2c/busses/i2c-riic.c6
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c12
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c9
-rw-r--r--drivers/i2c/busses/i2c-sirf.c3
-rw-r--r--drivers/i2c/busses/i2c-sprd.c9
-rw-r--r--drivers/i2c/busses/i2c-st.c6
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c10
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c245
-rw-r--r--drivers/i2c/busses/i2c-stu300.c6
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c18
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c13
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c6
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c7
-rw-r--r--drivers/i2c/busses/i2c-wmt.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c18
-rw-r--r--drivers/i2c/busses/i2c-xlr.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c2
-rw-r--r--drivers/i2c/i2c-core-base.c75
-rw-r--r--drivers/i2c/i2c-core-of.c2
-rw-r--r--drivers/i2c/i2c-core-smbus.c26
-rw-r--r--drivers/i2c/i2c-dev.c50
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c1
-rw-r--r--drivers/i3c/device.c50
-rw-r--r--drivers/i3c/master.c28
-rw-r--r--drivers/i3c/master/dw-i3c-master.c2
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c2
-rw-r--r--drivers/ide/Kconfig20
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/au1xxx-ide.c597
-rw-r--r--drivers/ide/ide-scan-pci.c8
-rw-r--r--drivers/idle/intel_idle.c381
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/TODO19
-rw-r--r--drivers/iio/accel/adis16201.c1
-rw-r--r--drivers/iio/accel/adis16209.c1
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c8
-rw-r--r--drivers/iio/accel/sca3000.c2
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/adc/Kconfig22
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ad7124.c99
-rw-r--r--drivers/iio/adc/ad7192.c (renamed from drivers/staging/iio/adc/ad7192.c)209
-rw-r--r--drivers/iio/adc/ad7292.c5
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/adc/max1118.c10
-rw-r--r--drivers/iio/adc/mcp320x.c3
-rw-r--r--drivers/iio/adc/npcm_adc.c30
-rw-r--r--drivers/iio/adc/rn5t618-adc.c256
-rw-r--r--drivers/iio/adc/stm32-adc.c39
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c21
-rw-r--r--drivers/iio/adc/ti-ads8344.c14
-rw-r--r--drivers/iio/adc/ti-tlc4541.c3
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c95
-rw-r--r--drivers/iio/amplifiers/Kconfig10
-rw-r--r--drivers/iio/amplifiers/Makefile1
-rw-r--r--drivers/iio/amplifiers/ad8366.c30
-rw-r--r--drivers/iio/amplifiers/hmc425a.c248
-rw-r--r--drivers/iio/chemical/atlas-sensor.c109
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c375
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c6
-rw-r--r--drivers/iio/dac/Kconfig71
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5755.c22
-rw-r--r--drivers/iio/dac/ad5770r.c695
-rw-r--r--drivers/iio/dac/ltc2632.c102
-rw-r--r--drivers/iio/dac/vf610_dac.c1
-rw-r--r--drivers/iio/gyro/adis16136.c62
-rw-r--r--drivers/iio/gyro/adis16260.c1
-rw-r--r--drivers/iio/imu/adis.c68
-rw-r--r--drivers/iio/imu/adis16400.c140
-rw-r--r--drivers/iio/imu/adis16460.c40
-rw-r--r--drivers/iio/imu/adis16480.c197
-rw-r--r--drivers/iio/imu/adis_buffer.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig12
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c660
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c111
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h58
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c49
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c57
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c74
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c160
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h7
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c23
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c24
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c37
-rw-r--r--drivers/iio/industrialio-core.c61
-rw-r--r--drivers/iio/light/Kconfig31
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/al3010.c242
-rw-r--r--drivers/iio/light/al3320a.c72
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c15
-rw-r--r--drivers/iio/light/gp2ap002.c720
-rw-r--r--drivers/iio/light/gp2ap020a00f.c23
-rw-r--r--drivers/iio/light/iqs621-als.c617
-rw-r--r--drivers/iio/light/si1133.c37
-rw-r--r--drivers/iio/light/vcnl4000.c144
-rw-r--r--drivers/iio/position/Kconfig19
-rw-r--r--drivers/iio/position/Makefile7
-rw-r--r--drivers/iio/position/iqs624-pos.c284
-rw-r--r--drivers/iio/potentiostat/lmp91000.c18
-rw-r--r--drivers/iio/pressure/Kconfig11
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c14
-rw-r--r--drivers/iio/pressure/icp10100.c658
-rw-r--r--drivers/iio/proximity/srf04.c96
-rw-r--r--drivers/iio/temperature/Kconfig10
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/iqs620at-temp.c97
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c161
-rw-r--r--drivers/infiniband/core/cache.c26
-rw-r--r--drivers/infiniband/core/cm.c764
-rw-r--r--drivers/infiniband/core/cma.c114
-rw-r--r--drivers/infiniband/core/cma_configfs.c6
-rw-r--r--drivers/infiniband/core/cma_priv.h6
-rw-r--r--drivers/infiniband/core/mad_priv.h4
-rw-r--r--drivers/infiniband/core/multicast.c2
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/rdma_core.c32
-rw-r--r--drivers/infiniband/core/rw.c12
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/ucma.c61
-rw-r--r--drivers/infiniband/core/umem.c11
-rw-r--r--drivers/infiniband/core/uverbs.h4
-rw-r--r--drivers/infiniband/core/uverbs_main.c16
-rw-r--r--drivers/infiniband/core/uverbs_std_types_async_fd.c30
-rw-r--r--drivers/infiniband/core/verbs.c24
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c926
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c492
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c489
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h95
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c463
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h85
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c470
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h145
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c48
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h8
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h7
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c158
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c29
-rw-r--r--drivers/infiniband/hw/efa/efa_common_defs.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_regs_defs.h25
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c51
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c2
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c4
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c26
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h4
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c26
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c474
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c46
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1851
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c977
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c96
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c24
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h12
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c12
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c16
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c21
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c267
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h86
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c660
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qos.c136
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c39
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c137
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c15
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c14
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c21
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h6
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/input/input-mt.c2
-rw-r--r--drivers/input/joystick/db9.c10
-rw-r--r--drivers/input/joystick/gamecon.c10
-rw-r--r--drivers/input/joystick/sidewinder.c15
-rw-r--r--drivers/input/joystick/spaceball.c8
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c2
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c1
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c335
-rw-r--r--drivers/input/misc/ati_remote2.c4
-rw-r--r--drivers/input/misc/cm109.c8
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/iqs269a.c21
-rw-r--r--drivers/input/misc/pwm-vibra.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/mouse/appletouch.c2
-rw-r--r--drivers/input/mouse/cyapa_gen3.c4
-rw-r--r--drivers/input/mouse/cyapa_gen5.c2
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h20
-rw-r--r--drivers/input/mouse/elan_i2c_core.c195
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c165
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c35
-rw-r--r--drivers/input/mouse/elantech.c12
-rw-r--r--drivers/input/mouse/hgpk.c4
-rw-r--r--drivers/input/mouse/navpoint.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/sentelic.c4
-rw-r--r--drivers/input/mouse/sermouse.c4
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/input/sparse-keymap.c2
-rw-r--r--drivers/input/tablet/gtco.c6
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c56
-rw-r--r--drivers/input/touchscreen/chipone_icn8505.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c3
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/elo.c2
-rw-r--r--drivers/input/touchscreen/exc3000.c248
-rw-r--r--drivers/input/touchscreen/iqs5xx.c2
-rw-r--r--drivers/input/touchscreen/max11801_ts.c1
-rw-r--r--drivers/input/touchscreen/silead.c2
-rw-r--r--drivers/input/touchscreen/stmfts.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig25
-rw-r--r--drivers/interconnect/qcom/Makefile8
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c367
-rw-r--r--drivers/interconnect/qcom/bcm-voter.h27
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c150
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h149
-rw-r--r--drivers/interconnect/qcom/osm-l3.c276
-rw-r--r--drivers/interconnect/qcom/sc7180.c641
-rw-r--r--drivers/interconnect/qcom/sc7180.h151
-rw-r--r--drivers/interconnect/qcom/sdm845.c1055
-rw-r--r--drivers/interconnect/qcom/sdm845.h142
-rw-r--r--drivers/iommu/Kconfig19
-rw-r--r--drivers/iommu/amd_iommu.c201
-rw-r--r--drivers/iommu/amd_iommu_init.c11
-rw-r--r--drivers/iommu/amd_iommu_types.h11
-rw-r--r--drivers/iommu/arm-smmu-v3.c214
-rw-r--r--drivers/iommu/arm-smmu.c55
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intel-svm.c9
-rw-r--r--drivers/iommu/iommu.c68
-rw-r--r--drivers/iommu/ipmmu-vmsa.c7
-rw-r--r--drivers/iommu/mtk_iommu.c13
-rw-r--r--drivers/iommu/mtk_iommu_v1.c14
-rw-r--r--drivers/iommu/omap-iommu.c10
-rw-r--r--drivers/iommu/omap-iopgtable.h3
-rw-r--r--drivers/iommu/qcom_iommu.c68
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/virtio-iommu.c44
-rw-r--r--drivers/ipack/carriers/tpci200.c1
-rw-r--r--drivers/irqchip/Kconfig21
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c2
-rw-r--r--drivers/irqchip/irq-bcm2835.c15
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c552
-rw-r--r--drivers/irqchip/irq-gic-v3.c27
-rw-r--r--drivers/irqchip/irq-gic-v4.c134
-rw-r--r--drivers/irqchip/irq-i8259.c16
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c1
-rw-r--r--drivers/irqchip/irq-ingenic.c9
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c149
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c271
-rw-r--r--drivers/irqchip/irq-mbigen.c8
-rw-r--r--drivers/irqchip/irq-meson-gpio.c18
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c119
-rw-r--r--drivers/irqchip/irq-stm32-exti.c14
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c3
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c18
-rw-r--r--drivers/irqchip/irq-vic.c9
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c116
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c4
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile99
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-ip30.c86
-rw-r--r--drivers/leds/leds-is31fl32xx.c2
-rw-r--r--drivers/leds/leds-lm3532.c2
-rw-r--r--drivers/leds/leds-lm3697.c2
-rw-r--r--drivers/leds/leds-ns2.c99
-rw-r--r--drivers/leds/leds-pwm.c55
-rw-r--r--drivers/lightnvm/core.c3
-rw-r--r--drivers/lightnvm/pblk-sysfs.c42
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/macintosh/ans-lcd.h2
-rw-r--r--drivers/macintosh/therm_windtunnel.c4
-rw-r--r--drivers/macintosh/via-pmu.c3
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c8
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c20
-rw-r--r--drivers/mailbox/imx-mailbox.c288
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c128
-rw-r--r--drivers/mailbox/sun6i-msgbox.c326
-rw-r--r--drivers/md/bcache/btree.c242
-rw-r--r--drivers/md/bcache/btree.h84
-rw-r--r--drivers/md/bcache/request.c7
-rw-r--r--drivers/md/bcache/request.h3
-rw-r--r--drivers/md/bcache/super.c11
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c164
-rw-r--r--drivers/md/bcache/writeback.h19
-rw-r--r--drivers/md/dm-clone-metadata.c15
-rw-r--r--drivers/md/dm-clone-metadata.h2
-rw-r--r--drivers/md/dm-clone-target.c66
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-integrity.c302
-rw-r--r--drivers/md/dm-linear.c18
-rw-r--r--drivers/md/dm-log-writes.c17
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-stripe.c23
-rw-r--r--drivers/md/dm-verity-fec.c3
-rw-r--r--drivers/md/dm-writecache.c190
-rw-r--r--drivers/md/dm-zoned-metadata.c1
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/md.c11
-rw-r--r--drivers/media/Kconfig6
-rw-r--r--drivers/media/cec/cec-notifier.c41
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c12
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c214
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c4
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c4
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c466
-rw-r--r--drivers/media/dvb-frontends/m88ds3103_priv.h14
-rw-r--r--drivers/media/dvb-frontends/tda10071.c9
-rw-r--r--drivers/media/i2c/Kconfig16
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7180.c12
-rw-r--r--drivers/media/i2c/imx214.c1
-rw-r--r--drivers/media/i2c/imx219.c1481
-rw-r--r--drivers/media/i2c/ov5675.c82
-rw-r--r--drivers/media/i2c/ov5695.c49
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c259
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h4
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c71
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h44
-rw-r--r--drivers/media/i2c/tvp5150.c802
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/mc/mc-entity.c11
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c12
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c5
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c2
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c2
-rw-r--r--drivers/media/pci/saa7146/mxb.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/pci/ttpci/budget-av.c2
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c2
-rw-r--r--drivers/media/platform/Kconfig86
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c13
-rw-r--r--drivers/media/platform/aspeed-video.c86
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c224
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h23
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c2
-rw-r--r--drivers/media/platform/coda/coda-common.c2
-rw-r--r--drivers/media/platform/davinci/isif.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c4
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/imx-pxp.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c2
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c2
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c6
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c2
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c9
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c29
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.c12
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c61
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.h2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c4
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c8
-rw-r--r--drivers/media/platform/pxa_camera.c22
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c6
-rw-r--r--drivers/media/platform/qcom/venus/Makefile2
-rw-r--r--drivers/media/platform/qcom/venus/core.c167
-rw-r--r--drivers/media/platform/qcom/venus/core.h33
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c13
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c448
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c1
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.h5
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c959
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.h65
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c86
-rw-r--r--drivers/media/platform/qcom/venus/venc.c81
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c8
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c91
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c34
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h28
-rw-r--r--drivers/media/platform/rcar_drif.c12
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/renesas-ceu.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c4
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c2
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c2
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c10
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c13
-rw-r--r--drivers/media/platform/sunxi/Makefile1
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c8
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c8
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c7
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/Makefile5
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i-formats.h25
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i-rotate.h135
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c273
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c924
-rw-r--r--drivers/media/platform/ti-vpe/cal.c31
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/via-camera.c6
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c164
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c20
-rw-r--r--drivers/media/platform/vimc/vimc-common.c2
-rw-r--r--drivers/media/platform/vimc/vimc-common.h27
-rw-r--r--drivers/media/platform/vimc/vimc-core.c93
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c21
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c21
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c20
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c17
-rw-r--r--drivers/media/platform/vivid/vivid-core.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c15
-rw-r--r--drivers/media/radio/si470x/Kconfig4
-rw-r--r--drivers/media/rc/bpf-lirc.c5
-rw-r--r--drivers/media/rc/iguanair.c2
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-videostrong-kii-pro.c83
-rw-r--r--drivers/media/rc/lirc_dev.c7
-rw-r--r--drivers/media/rc/nuvoton-cir.c4
-rw-r--r--drivers/media/rc/rc-main.c80
-rw-r--r--drivers/media/spi/gs1662.c20
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-video.c4
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c6
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c3
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c12
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c4
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c45
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c18
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c60
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c4
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/ov519.c10
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c19
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c4
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c18
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c4
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c7
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c192
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c18
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c221
-rw-r--r--drivers/memory/.gitignore1
-rw-r--r--drivers/memory/tegra/tegra124-emc.c5
-rw-r--r--drivers/memory/tegra/tegra20-emc.c5
-rw-r--r--drivers/memory/tegra/tegra30-emc.c5
-rw-r--r--drivers/message/fusion/mptlan.h5
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/mfd/Kconfig23
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/aat2870-core.c2
-rw-r--r--drivers/mfd/cros_ec_dev.c2
-rw-r--r--drivers/mfd/da9062-core.c44
-rw-r--r--drivers/mfd/dln2.c30
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/iqs62x.c1063
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/qcom-pm8xxx.c2
-rw-r--r--drivers/mfd/rk808.c139
-rw-r--r--drivers/mfd/rn5t618.c109
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c52
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/rts5227.c1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c3
-rw-r--r--drivers/misc/eeprom/at24.c1
-rw-r--r--drivers/misc/habanalabs/command_submission.c51
-rw-r--r--drivers/misc/habanalabs/debugfs.c92
-rw-r--r--drivers/misc/habanalabs/device.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c204
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c4
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c2
-rw-r--r--drivers/misc/habanalabs/habanalabs.h62
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c11
-rw-r--r--drivers/misc/habanalabs/hwmon.c106
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h20
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h4
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h39
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h39
-rw-r--r--drivers/misc/habanalabs/memory.c222
-rw-r--r--drivers/misc/habanalabs/mmu.c110
-rw-r--r--drivers/misc/lkdtm/bugs.c111
-rw-r--r--drivers/misc/lkdtm/core.c4
-rw-r--r--drivers/misc/lkdtm/lkdtm.h4
-rw-r--r--drivers/misc/lkdtm/stackleak.c25
-rw-r--r--drivers/misc/mei/bus-fixup.c4
-rw-r--r--drivers/misc/mei/client.c6
-rw-r--r--drivers/misc/mei/hw-me-regs.h6
-rw-r--r--drivers/misc/mei/hw-me.c8
-rw-r--r--drivers/misc/mei/hw-me.h4
-rw-r--r--drivers/misc/mei/hw.h5
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/misc/mei/pci-me.c22
-rw-r--r--drivers/misc/mei/pci-txe.c5
-rw-r--r--drivers/misc/mic/Kconfig6
-rw-r--r--drivers/misc/mic/host/mic_boot.c2
-rw-r--r--drivers/misc/mic/host/mic_x100.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c213
-rw-r--r--drivers/misc/sgi-gru/grulib.h2
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/misc/uacce/Kconfig13
-rw-r--r--drivers/misc/uacce/Makefile2
-rw-r--r--drivers/misc/uacce/uacce.c633
-rw-r--r--drivers/misc/vexpress-syscfg.c2
-rw-r--r--drivers/mmc/core/block.c130
-rw-r--r--drivers/mmc/core/core.c54
-rw-r--r--drivers/mmc/core/mmc.c56
-rw-r--r--drivers/mmc/core/mmc_ops.c196
-rw-r--r--drivers/mmc/core/mmc_ops.h15
-rw-r--r--drivers/mmc/core/mmc_test.c52
-rw-r--r--drivers/mmc/core/queue.c36
-rw-r--r--drivers/mmc/core/sd.c10
-rw-r--r--drivers/mmc/core/sdio_irq.c15
-rw-r--r--drivers/mmc/host/Kconfig12
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/alcor.c6
-rw-r--r--drivers/mmc/host/cavium-octeon.c4
-rw-r--r--drivers/mmc/host/cqhci.c37
-rw-r--r--drivers/mmc/host/cqhci.h6
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c11
-rw-r--r--drivers/mmc/host/mmc_hsq.c348
-rw-r--r--drivers/mmc/host/mmc_hsq.h30
-rw-r--r--drivers/mmc/host/mmci.c43
-rw-r--r--drivers/mmc/host/mmci.h8
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c208
-rw-r--r--drivers/mmc/host/mtk-sd.c41
-rw-r--r--drivers/mmc/host/renesas_sdhi.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c183
-rw-r--r--drivers/mmc/host/sdhci-acpi.c16
-rw-r--r--drivers/mmc/host/sdhci-cadence.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c181
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h1
-rw-r--r--drivers/mmc/host/sdhci-iproc.c17
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c13
-rw-r--r--drivers/mmc/host/sdhci-msm.c29
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c74
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c22
-rw-r--r--drivers/mmc/host/sdhci-omap.c57
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c23
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c30
-rw-r--r--drivers/mmc/host/sdhci-tegra.c35
-rw-r--r--drivers/mmc/host/sdhci-xenon.c10
-rw-r--r--drivers/mmc/host/sdhci.c54
-rw-r--r--drivers/mmc/host/sdhci.h18
-rw-r--r--drivers/mmc/host/sdhci_am654.c264
-rw-r--r--drivers/mmc/host/tmio_mmc.h11
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c77
-rw-r--r--drivers/mmc/host/vub300.c4
-rw-r--r--drivers/most/Kconfig15
-rw-r--r--drivers/most/Makefile4
-rw-r--r--drivers/most/configfs.c (renamed from drivers/staging/most/configfs.c)3
-rw-r--r--drivers/most/core.c (renamed from drivers/staging/most/core.c)11
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c5
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c17
-rw-r--r--drivers/mtd/chips/cfi_util.c12
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/phram.c19
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c12
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c15
-rw-r--r--drivers/mtd/inftlmount.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c5
-rw-r--r--drivers/mtd/mtdblock.c5
-rw-r--r--drivers/mtd/mtdchar.c12
-rw-r--r--drivers/mtd/mtdcore.c252
-rw-r--r--drivers/mtd/mtdpart.c695
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c2
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c237
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c292
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c34
-rw-r--r--drivers/mtd/nand/raw/denali.c1
-rw-r--r--drivers/mtd/nand/raw/denali.h2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c21
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig1
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c4
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c4
-rw-r--r--drivers/mtd/nand/raw/internals.h1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c40
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c71
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c6
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c227
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c58
-rw-r--r--drivers/mtd/nand/raw/nandsim.c4
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c8
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c105
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c44
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c17
-rw-r--r--drivers/mtd/nand/spi/core.c108
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c45
-rw-r--r--drivers/mtd/nand/spi/macronix.c30
-rw-r--r--drivers/mtd/nand/spi/micron.c172
-rw-r--r--drivers/mtd/nand/spi/paragon.c28
-rw-r--r--drivers/mtd/nand/spi/toshiba.c208
-rw-r--r--drivers/mtd/nand/spi/winbond.c34
-rw-r--r--drivers/mtd/spi-nor/Kconfig83
-rw-r--r--drivers/mtd/spi-nor/Makefile28
-rw-r--r--drivers/mtd/spi-nor/atmel.c46
-rw-r--r--drivers/mtd/spi-nor/catalyst.c29
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig75
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile8
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c (renamed from drivers/mtd/spi-nor/aspeed-smc.c)4
-rw-r--r--drivers/mtd/spi-nor/controllers/cadence-quadspi.c (renamed from drivers/mtd/spi-nor/cadence-quadspi.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c (renamed from drivers/mtd/spi-nor/hisi-sfc.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c (renamed from drivers/mtd/spi-nor/intel-spi-pci.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-platform.c (renamed from drivers/mtd/spi-nor/intel-spi-platform.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.c (renamed from drivers/mtd/spi-nor/intel-spi.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.h (renamed from drivers/mtd/spi-nor/intel-spi.h)0
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c (renamed from drivers/mtd/spi-nor/nxp-spifi.c)0
-rw-r--r--drivers/mtd/spi-nor/core.c3466
-rw-r--r--drivers/mtd/spi-nor/core.h441
-rw-r--r--drivers/mtd/spi-nor/eon.c34
-rw-r--r--drivers/mtd/spi-nor/esmt.c25
-rw-r--r--drivers/mtd/spi-nor/everspin.c27
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c20
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c59
-rw-r--r--drivers/mtd/spi-nor/intel.c32
-rw-r--r--drivers/mtd/spi-nor/issi.c83
-rw-r--r--drivers/mtd/spi-nor/macronix.c98
-rw-r--r--drivers/mtd/spi-nor/micron-st.c157
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c565
-rw-r--r--drivers/mtd/spi-nor/sfdp.c1204
-rw-r--r--drivers/mtd/spi-nor/sfdp.h98
-rw-r--r--drivers/mtd/spi-nor/spansion.c95
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5434
-rw-r--r--drivers/mtd/spi-nor/sst.c151
-rw-r--r--drivers/mtd/spi-nor/winbond.c112
-rw-r--r--drivers/mtd/spi-nor/xilinx.c94
-rw-r--r--drivers/mtd/spi-nor/xmc.c23
-rw-r--r--drivers/mtd/ubi/attach.c2
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/debug.c12
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c15
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bareudp.c807
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/bonding/bonding_priv.h5
-rw-r--r--drivers/net/caif/Kconfig6
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c5
-rw-r--r--drivers/net/can/slcan.c11
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c96
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/b53/b53_regs.h8
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c23
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c139
-rw-r--r--drivers/net/dsa/dsa_loop.c1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c4
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c26
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h3
-rw-r--r--drivers/net/dsa/mt7530.c236
-rw-r--r--drivers/net/dsa/mt7530.h29
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c497
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h40
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c285
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h29
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c452
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h39
-rw-r--r--drivers/net/dsa/ocelot/felix.c134
-rw-r--r--drivers/net/dsa/ocelot/felix.h10
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c157
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/sja1105/Kconfig1
-rw-r--r--drivers/net/dsa/sja1105/Makefile1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h49
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c24
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c133
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c340
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c400
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c295
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h31
-rw-r--r--drivers/net/dsa/sja1105/sja1105_sgmii.h53
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c5
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c27
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c30
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/3com/3c509.c8
-rw-r--r--drivers/net/ethernet/3com/3c515.c16
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c283
-rw-r--r--drivers/net/ethernet/3com/typhoon.h4
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c19
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.h1
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c5
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c30
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h18
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c5
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c5
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c9
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c27
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ethtool.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h1
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c176
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c1777
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.h133
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h73
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h77
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c2473
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h323
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h914
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h12
-rw-r--r--drivers/net/ethernet/arc/emac.h1
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c1
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/Kconfig2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c188
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c11
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c11
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c9
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c235
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c76
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c81
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c84
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c10
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c12
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/version.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c303
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c132
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c11
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h4
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c24
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c14
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c28
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c12
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c9
-rw-r--r--drivers/net/ethernet/dlink/sundance.c20
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c3
-rw-r--r--drivers/net/ethernet/fealnx.c20
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c20
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c36
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c61
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig16
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c70
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c13
-rw-r--r--drivers/net/ethernet/freescale/fec.h9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c153
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c50
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c35
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c159
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c387
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c370
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c50
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c16
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c59
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c11
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c416
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c325
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c282
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c508
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c711
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c22
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c323
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c83
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c144
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c1
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c367
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c11
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c192
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h38
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c169
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c287
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c163
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c182
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c861
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c652
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/marvell/skge.h8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c31
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c286
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c268
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1366
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c297
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1193
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c)220
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h)21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c374
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c981
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c296
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c235
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c268
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c543
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c596
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c226
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h12
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c619
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h30
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c163
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c272
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c27
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h403
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c37
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c48
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c66
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c40
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h2
-rw-r--r--drivers/net/ethernet/ni/nixge.c22
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c27
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c46
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c61
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h38
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c504
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c53
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c25
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c67
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c488
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c18
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c104
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.h18
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c25
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c9
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c42
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c37
-rw-r--r--drivers/net/ethernet/sgi/meth.h16
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c9
-rw-r--r--drivers/net/ethernet/socionext/netsec.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c781
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c195
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c110
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c211
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c332
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c9
-rw-r--r--drivers/net/ethernet/sun/cassini.c31
-rw-r--r--drivers/net/ethernet/sun/sungem.c30
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/ti/Kconfig35
-rw-r--r--drivers/net/ethernet/ti/Makefile5
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c747
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1967
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h142
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c40
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c126
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.h30
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c181
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h19
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c444
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c4
-rw-r--r--drivers/net/fddi/skfp/h/skfbi.h5
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c9
-rw-r--r--drivers/net/hamradio/bpqether.c3
-rw-r--r--drivers/net/hyperv/netvsc.c9
-rw-r--r--drivers/net/hyperv/netvsc_drv.c49
-rw-r--r--drivers/net/ieee802154/ca8210.c3
-rw-r--r--drivers/net/ipa/Kconfig19
-rw-r--r--drivers/net/ipa/Makefile12
-rw-r--r--drivers/net/ipa/gsi.c2063
-rw-r--r--drivers/net/ipa/gsi.h257
-rw-r--r--drivers/net/ipa/gsi_private.h118
-rw-r--r--drivers/net/ipa/gsi_reg.h419
-rw-r--r--drivers/net/ipa/gsi_trans.c787
-rw-r--r--drivers/net/ipa/gsi_trans.h226
-rw-r--r--drivers/net/ipa/ipa.h148
-rw-r--r--drivers/net/ipa/ipa_clock.c313
-rw-r--r--drivers/net/ipa/ipa_clock.h53
-rw-r--r--drivers/net/ipa/ipa_cmd.c672
-rw-r--r--drivers/net/ipa/ipa_cmd.h195
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c307
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c329
-rw-r--r--drivers/net/ipa/ipa_data.h280
-rw-r--r--drivers/net/ipa/ipa_endpoint.c1703
-rw-r--r--drivers/net/ipa/ipa_endpoint.h110
-rw-r--r--drivers/net/ipa/ipa_gsi.c54
-rw-r--r--drivers/net/ipa/ipa_gsi.h60
-rw-r--r--drivers/net/ipa/ipa_interrupt.c253
-rw-r--r--drivers/net/ipa/ipa_interrupt.h117
-rw-r--r--drivers/net/ipa/ipa_main.c953
-rw-r--r--drivers/net/ipa/ipa_mem.c314
-rw-r--r--drivers/net/ipa/ipa_mem.h90
-rw-r--r--drivers/net/ipa/ipa_modem.c382
-rw-r--r--drivers/net/ipa/ipa_modem.h31
-rw-r--r--drivers/net/ipa/ipa_qmi.c538
-rw-r--r--drivers/net/ipa/ipa_qmi.h41
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c663
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h252
-rw-r--r--drivers/net/ipa/ipa_reg.c38
-rw-r--r--drivers/net/ipa/ipa_reg.h476
-rw-r--r--drivers/net/ipa/ipa_smp2p.c335
-rw-r--r--drivers/net/ipa/ipa_smp2p.h48
-rw-r--r--drivers/net/ipa/ipa_table.c700
-rw-r--r--drivers/net/ipa/ipa_table.h103
-rw-r--r--drivers/net/ipa/ipa_uc.c211
-rw-r--r--drivers/net/ipa/ipa_uc.h32
-rw-r--r--drivers/net/ipa/ipa_version.h23
-rw-r--r--drivers/net/macsec.c775
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netdevsim/dev.c287
-rw-r--r--drivers/net/netdevsim/health.c4
-rw-r--r--drivers/net/netdevsim/netdevsim.h5
-rw-r--r--drivers/net/phy/Kconfig25
-rw-r--r--drivers/net/phy/Makefile8
-rw-r--r--drivers/net/phy/aquantia_main.c38
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/bcm-phy-lib.c22
-rw-r--r--drivers/net/phy/bcm-phy-lib.h1
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/bcm84881.c33
-rw-r--r--drivers/net/phy/broadcom.c32
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/dp83822.c30
-rw-r--r--drivers/net/phy/dp83867.c150
-rw-r--r--drivers/net/phy/dp83tc811.c21
-rw-r--r--drivers/net/phy/linkmode.c95
-rw-r--r--drivers/net/phy/marvell.c70
-rw-r--r--drivers/net/phy/marvell10g.c373
-rw-r--r--drivers/net/phy/mdio-ipq8064.c166
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c14
-rw-r--r--drivers/net/phy/mdio-mvusb.c120
-rw-r--r--drivers/net/phy/mdio-xpcs.c716
-rw-r--r--drivers/net/phy/mdio_bus.c85
-rw-r--r--drivers/net/phy/micrel.c59
-rw-r--r--drivers/net/phy/microchip_t1.c171
-rw-r--r--drivers/net/phy/mscc/Makefile10
-rw-r--r--drivers/net/phy/mscc/mscc.h402
-rw-r--r--drivers/net/phy/mscc/mscc_fc_buffer.h (renamed from drivers/net/phy/mscc_fc_buffer.h)8
-rw-r--r--drivers/net/phy/mscc/mscc_mac.h (renamed from drivers/net/phy/mscc_mac.h)14
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c1055
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.h (renamed from drivers/net/phy/mscc_macsec.h)67
-rw-r--r--drivers/net/phy/mscc/mscc_main.c (renamed from drivers/net/phy/mscc.c)1593
-rw-r--r--drivers/net/phy/nxp-tja11xx.c16
-rw-r--r--drivers/net/phy/phy-c45.c5
-rw-r--r--drivers/net/phy/phy-core.c71
-rw-r--r--drivers/net/phy/phy.c38
-rw-r--r--drivers/net/phy/phy_device.c101
-rw-r--r--drivers/net/phy/phylink.c522
-rw-r--r--drivers/net/phy/realtek.c60
-rw-r--r--drivers/net/phy/smsc.c16
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/slip/slip.c7
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c121
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_ether.c11
-rw-r--r--drivers/net/usb/cdc_ncm.c411
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c8
-rw-r--r--drivers/net/usb/pegasus.c38
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c34
-rw-r--r--drivers/net/veth.c257
-rw-r--r--drivers/net/virtio_net.c115
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c24
-rw-r--r--drivers/net/vrf.c14
-rw-r--r--drivers/net/vxlan.c6
-rw-r--r--drivers/net/wan/.gitignore1
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/Makefile35
-rw-r--r--drivers/net/wan/farsync.h2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c1
-rw-r--r--drivers/net/wireguard/messages.h2
-rw-r--r--drivers/net/wireguard/noise.c22
-rw-r--r--drivers/net/wireguard/noise.h14
-rw-r--r--drivers/net/wireguard/queueing.c4
-rw-r--r--drivers/net/wireguard/queueing.h10
-rw-r--r--drivers/net/wireguard/receive.c65
-rw-r--r--drivers/net/wireguard/selftest/counter.c17
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/send.c37
-rw-r--r--drivers/net/wireguard/socket.c12
-rw-r--r--drivers/net/wireless/admtek/adm8211.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c86
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.c56
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h28
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c76
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1492
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c66
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h30
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c123
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c224
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c260
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h96
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c174
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c1
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c127
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c26
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c48
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c347
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/soc.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c951
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_common.h2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c10
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/fw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_dld.c6
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c23
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.h8
-rw-r--r--drivers/net/wireless/intersil/p54/lmac.h6
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h2
-rw-r--r--drivers/net/wireless/intersil/prism54/oid_mgt.c34
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c351
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h21
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.h2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h48
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h8
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c392
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h168
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c220
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c92
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c407
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c1015
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c404
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2528
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h262
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h253
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c104
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h248
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c471
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c117
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c689
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c65
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h31
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c121
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h360
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h45
-rw-r--r--drivers/net/wireless/rayctl.h2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h23
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c85
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c495
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c62
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c276
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h27
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h46
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h57
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c219
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c80
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c137
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h16
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c39
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c9
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h4
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c32
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h2
-rw-r--r--drivers/net/wireless/virt_wifi.c12
-rw-r--r--drivers/net/wireless/wl3501.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.h8
-rw-r--r--drivers/nfc/fdp/fdp.c2
-rw-r--r--drivers/nfc/st21nfca/dep.c8
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c290
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h8
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c4
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c2
-rw-r--r--drivers/ntb/ntb_transport.c58
-rw-r--r--drivers/ntb/test/ntb_perf.c57
-rw-r--r--drivers/ntb/test/ntb_tool.c14
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/dimm.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c95
-rw-r--r--drivers/nvdimm/e820.c18
-rw-r--r--drivers/nvdimm/label.h2
-rw-r--r--drivers/nvdimm/namespace_devs.c28
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/of_pmem.c4
-rw-r--r--drivers/nvdimm/pfn.h12
-rw-r--r--drivers/nvdimm/pfn_devs.c40
-rw-r--r--drivers/nvdimm/pmem.c104
-rw-r--r--drivers/nvdimm/region_devs.c130
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/core.c293
-rw-r--r--drivers/nvme/host/fabrics.c8
-rw-r--r--drivers/nvme/host/fc.c17
-rw-r--r--drivers/nvme/host/multipath.c28
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c103
-rw-r--r--drivers/nvme/host/rdma.c19
-rw-r--r--drivers/nvme/host/tcp.c136
-rw-r--r--drivers/nvme/target/admin-cmd.c35
-rw-r--r--drivers/nvme/target/configfs.c156
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/fcloop.c77
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/nvmet.h11
-rw-r--r--drivers/nvme/target/rdma.c226
-rw-r--r--drivers/nvme/target/tcp.c35
-rw-r--r--drivers/nvmem/Kconfig12
-rw-r--r--drivers/nvmem/Makefile5
-rw-r--r--drivers/nvmem/core.c365
-rw-r--r--drivers/nvmem/imx-ocotp.c29
-rw-r--r--drivers/nvmem/jz4780-efuse.c239
-rw-r--r--drivers/nvmem/mxs-ocotp.c30
-rw-r--r--drivers/nvmem/nvmem-sysfs.c263
-rw-r--r--drivers/nvmem/nvmem.h64
-rw-r--r--drivers/nvmem/sprd-efuse.c27
-rw-r--r--drivers/of/address.c273
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/of/property.c12
-rw-r--r--drivers/of/resolver.c5
-rw-r--r--drivers/of/unittest-data/Makefile8
-rw-r--r--drivers/of/unittest-data/overlay_bad_add_dup_prop.dts23
-rw-r--r--drivers/of/unittest-data/overlay_gpio_01.dts23
-rw-r--r--drivers/of/unittest-data/overlay_gpio_02a.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_02b.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_03.dts23
-rw-r--r--drivers/of/unittest-data/overlay_gpio_04a.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_04b.dts16
-rw-r--r--drivers/of/unittest.c685
-rw-r--r--drivers/opp/core.c14
-rw-r--r--drivers/parisc/eisa.c8
-rw-r--r--drivers/pci/ats.c4
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/dwc/Kconfig29
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c231
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c5
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c116
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c144
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h12
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c712
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig34
-rw-r--r--drivers/pci/controller/mobiveil/Makefile5
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c267
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c (renamed from drivers/pci/controller/pcie-mobiveil.c)564
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c61
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.c231
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h226
-rw-r--r--drivers/pci/controller/pci-hyperv.c260
-rw-r--r--drivers/pci/controller/pci-tegra.c187
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c4
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c402
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c28
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c137
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c10
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c35
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c93
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c5
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c4
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c99
-rw-r--r--drivers/pci/p2pdma.c3
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-bridge-emul.c14
-rw-r--r--drivers/pci/pci-mid.c6
-rw-r--r--drivers/pci/pci-sysfs.c33
-rw-r--r--drivers/pci/pci.c82
-rw-r--r--drivers/pci/pci.h32
-rw-r--r--drivers/pci/pcie/Kconfig11
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer.c40
-rw-r--r--drivers/pci/pcie/aer_inject.c6
-rw-r--r--drivers/pci/pcie/aspm.c6
-rw-r--r--drivers/pci/pcie/dpc.c137
-rw-r--r--drivers/pci/pcie/edr.c239
-rw-r--r--drivers/pci/pcie/err.c66
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_core.c21
-rw-r--r--drivers/pci/probe.c42
-rw-r--r--drivers/pci/quirks.c127
-rw-r--r--drivers/pci/rom.c17
-rw-r--r--drivers/pci/setup-bus.c34
-rw-r--r--drivers/pci/slot.c38
-rw-r--r--drivers/pci/switch/switchtec.c22
-rw-r--r--drivers/pcmcia/cs_internal.h2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/sa1100_simpad.c6
-rw-r--r--drivers/pcmcia/soc_common.h2
-rw-r--r--drivers/pcmcia/yenta_socket.c10
-rw-r--r--drivers/perf/arm-ccn.c20
-rw-r--r--drivers/perf/arm_spe_pmu.c2
-rw-r--r--drivers/phy/amlogic/Kconfig22
-rw-r--r--drivers/phy/amlogic/Makefile12
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c188
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-pcie.c192
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c87
-rw-r--r--drivers/phy/cadence/Kconfig6
-rw-r--r--drivers/phy/cadence/Makefile2
-rw-r--r--drivers/phy/cadence/phy-cadence-dp.c541
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c1944
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c64
-rw-r--r--drivers/phy/qualcomm/Kconfig20
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c425
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h114
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c149
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c425
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-ss.c246
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c2
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c102
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c92
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c8
-rw-r--r--drivers/phy/tegra/Kconfig5
-rw-r--r--drivers/phy/tegra/Makefile1
-rw-r--r--drivers/phy/tegra/xusb-tegra124.c6
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c265
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c136
-rw-r--r--drivers/phy/tegra/xusb.c263
-rw-r--r--drivers/phy/tegra/xusb.h25
-rw-r--r--drivers/phy/ti/Kconfig3
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c19
-rw-r--r--drivers/pinctrl/Kconfig12
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c508
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c111
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c5
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/freescale/Kconfig8
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c15
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c11
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8183.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c264
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h16
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c365
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.h3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c35
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c27
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c17
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c7
-rw-r--r--drivers/pinctrl/pinconf-generic.c1
-rw-r--r--drivers/pinctrl/pinctrl-amd.c5
-rw-r--r--drivers/pinctrl/pinctrl-at91.c5
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c7
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c300
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c62
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c5
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c5
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c5
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c5
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c7
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c5
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c5
-rw-r--r--drivers/pinctrl/pinctrl-rza2.c6
-rw-r--r--drivers/pinctrl/pinctrl-st.c14
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c17
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c9
-rw-r--r--drivers/pinctrl/qcom/Kconfig10
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c1107
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c73
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c307
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c5
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c2
-rw-r--r--drivers/pinctrl/sprd/Kconfig10
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c25
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c61
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c16
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c52
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h5
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra194.c47
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c6
-rw-r--r--drivers/platform/chrome/Kconfig27
-rw-r--r--drivers/platform/chrome/Makefile5
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--drivers/platform/chrome/cros_ec.c32
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c4
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c50
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c9
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c16
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c157
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c1065
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c36
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c357
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c4
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c306
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/properties.c3
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c4
-rw-r--r--drivers/platform/x86/Kconfig1284
-rw-r--r--drivers/platform/x86/Makefile198
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c24
-rw-r--r--drivers/platform/x86/asus-wmi.c7
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/dell-rbtn.c4
-rw-r--r--drivers/platform/x86/dell-rbtn.h2
-rw-r--r--drivers/platform/x86/dell-smbios-base.c4
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c2
-rw-r--r--drivers/platform/x86/dell-smbios.h2
-rw-r--r--drivers/platform/x86/dell-smo8800.c3
-rw-r--r--drivers/platform/x86/dell-wmi.c4
-rw-r--r--drivers/platform/x86/dell_rbu.c173
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c2
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c2
-rw-r--r--drivers/platform/x86/intel-hid.c4
-rw-r--r--drivers/platform/x86/intel-uncore-frequency.c65
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c14
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c4
-rw-r--r--drivers/platform/x86/intel_pmc_core.c383
-rw-r--r--drivers/platform/x86/intel_pmc_core.h31
-rw-r--r--drivers/platform/x86/intel_pmc_core_pltdrv.c16
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c4
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c2
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c5
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c7
-rw-r--r--drivers/platform/x86/intel_turbo_max_3.c6
-rw-r--r--drivers/platform/x86/pmc_atom.c8
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/platform/x86/surface3_power.c589
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c65
-rw-r--r--drivers/platform/x86/wmi.c1
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c4
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/at91-reset.c190
-rw-r--r--drivers/power/reset/sc27xx-poweroff.c21
-rw-r--r--drivers/power/supply/Kconfig4
-rw-r--r--drivers/power/supply/ab8500_charger.c35
-rw-r--r--drivers/power/supply/axp288_charger.c57
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c4
-rw-r--r--drivers/power/supply/bq2415x_charger.c4
-rw-r--r--drivers/power/supply/bq27xxx_battery.c7
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c50
-rw-r--r--drivers/power/supply/ingenic-battery.c3
-rw-r--r--drivers/power/supply/isp1704_charger.c2
-rw-r--r--drivers/power/supply/rx51_battery.c4
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c12
-rw-r--r--drivers/power/supply/twl4030_charger.c4
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c87
-rw-r--r--drivers/ps3/sys-manager-core.c2
-rw-r--r--drivers/ptp/Kconfig24
-rw-r--r--drivers/ptp/Makefile2
-rw-r--r--drivers/ptp/ptp_chardev.c9
-rw-r--r--drivers/ptp/ptp_clock.c17
-rw-r--r--drivers/ptp/ptp_idt82p33.c1008
-rw-r--r--drivers/ptp/ptp_idt82p33.h171
-rw-r--r--drivers/ptp/ptp_ines.c4
-rw-r--r--drivers/ptp/ptp_qoriq.c29
-rw-r--r--drivers/ptp/ptp_vmw.c144
-rw-r--r--drivers/pwm/Kconfig58
-rw-r--r--drivers/pwm/core.c135
-rw-r--r--drivers/pwm/pwm-bcm2835.c1
-rw-r--r--drivers/pwm/pwm-imx-tpm.c2
-rw-r--r--drivers/pwm/pwm-imx27.c32
-rw-r--r--drivers/pwm/pwm-jz4740.c162
-rw-r--r--drivers/pwm/pwm-meson.c4
-rw-r--r--drivers/pwm/pwm-mxs.c1
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c219
-rw-r--r--drivers/pwm/pwm-pca9685.c97
-rw-r--r--drivers/pwm/pwm-rcar.c10
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c11
-rw-r--r--drivers/pwm/pwm-sun4i.c13
-rw-r--r--drivers/pwm/pwm-tegra.c6
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c5
-rw-r--r--drivers/regulator/Kconfig18
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/anatop-regulator.c10
-rw-r--r--drivers/regulator/axp20x-regulator.c6
-rw-r--r--drivers/regulator/core.c5
-rw-r--r--drivers/regulator/da9062-regulator.c2
-rw-r--r--drivers/regulator/da9063-regulator.c114
-rw-r--r--drivers/regulator/mp5416.c245
-rw-r--r--drivers/regulator/mp8859.c1
-rw-r--r--drivers/regulator/mp886x.c290
-rw-r--r--drivers/regulator/pwm-regulator.c6
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c9
-rw-r--r--drivers/regulator/qcom_smd-regulator.c47
-rw-r--r--drivers/remoteproc/Kconfig18
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/imx_rproc.c11
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c4
-rw-r--r--drivers/remoteproc/mtk_common.h2
-rw-r--r--drivers/remoteproc/mtk_scp.c8
-rw-r--r--drivers/remoteproc/omap_remoteproc.c1200
-rw-r--r--drivers/remoteproc/omap_remoteproc.h50
-rw-r--r--drivers/remoteproc/qcom_q6v5.c20
-rw-r--r--drivers/remoteproc/qcom_q6v5.h1
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c10
-rw-r--r--drivers/remoteproc/qcom_q6v5_ipa_notify.c85
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c200
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c10
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c161
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c16
-rw-r--r--drivers/remoteproc/remoteproc_elf_helpers.h96
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c189
-rw-r--r--drivers/remoteproc/remoteproc_internal.h16
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c1
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c8
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/remoteproc/st_slim_rproc.c6
-rw-r--r--drivers/remoteproc/stm32_rproc.c5
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c4
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c1
-rw-r--r--drivers/rtc/Kconfig27
-rw-r--r--drivers/rtc/Makefile7
-rw-r--r--drivers/rtc/class.c49
-rw-r--r--drivers/rtc/hctosys.c69
-rw-r--r--drivers/rtc/rtc-88pm860x.c104
-rw-r--r--drivers/rtc/rtc-ab8500.c10
-rw-r--r--drivers/rtc/rtc-au1xxx.c29
-rw-r--r--drivers/rtc/rtc-bd70528.c4
-rw-r--r--drivers/rtc/rtc-cmos.c7
-rw-r--r--drivers/rtc/rtc-cpcap.c13
-rw-r--r--drivers/rtc/rtc-da9052.c18
-rw-r--r--drivers/rtc/rtc-davinci.c58
-rw-r--r--drivers/rtc/rtc-ds1305.c10
-rw-r--r--drivers/rtc/rtc-ds1307.c126
-rw-r--r--drivers/rtc/rtc-ds1374.c27
-rw-r--r--drivers/rtc/rtc-efi-platform.c35
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c23
-rw-r--r--drivers/rtc/rtc-imx-sc.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c9
-rw-r--r--drivers/rtc/rtc-m48t35.c7
-rw-r--r--drivers/rtc/rtc-mpc5121.c61
-rw-r--r--drivers/rtc/rtc-mt2712.c423
-rw-r--r--drivers/rtc/rtc-mxc.c46
-rw-r--r--drivers/rtc/rtc-omap.c1
-rw-r--r--drivers/rtc/rtc-pcf85063.c157
-rw-r--r--drivers/rtc/rtc-pl030.c27
-rw-r--r--drivers/rtc/rtc-pl031.c53
-rw-r--r--drivers/rtc/rtc-pm8xxx.c40
-rw-r--r--drivers/rtc/rtc-puv3.c14
-rw-r--r--drivers/rtc/rtc-rc5t619.c444
-rw-r--r--drivers/rtc/rtc-sa1100.c40
-rw-r--r--drivers/rtc/rtc-sh.c3
-rw-r--r--drivers/rtc/rtc-sirfsoc.c44
-rw-r--r--drivers/rtc/rtc-snvs.c28
-rw-r--r--drivers/rtc/rtc-starfire.c10
-rw-r--r--drivers/rtc/rtc-sun6i.c47
-rw-r--r--drivers/rtc/rtc-zynqmp.c27
-rw-r--r--drivers/rtc/sysfs.c2
-rw-r--r--drivers/s390/block/Kconfig1
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dcssblk.c24
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c2
-rw-r--r--drivers/s390/char/raw3270.h2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_pci.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/s390/char/tape_core.c6
-rw-r--r--drivers/s390/cio/airq.c8
-rw-r--r--drivers/s390/cio/ccwgroup.c69
-rw-r--r--drivers/s390/cio/chsc.c5
-rw-r--r--drivers/s390/cio/chsc.h3
-rw-r--r--drivers/s390/cio/cio.c8
-rw-r--r--drivers/s390/cio/device.c17
-rw-r--r--drivers/s390/cio/idset.c2
-rw-r--r--drivers/s390/cio/qdio.h21
-rw-r--r--drivers/s390/cio/qdio_debug.c79
-rw-r--r--drivers/s390/cio/qdio_debug.h6
-rw-r--r--drivers/s390/cio/qdio_main.c132
-rw-r--r--drivers/s390/cio/qdio_setup.c48
-rw-r--r--drivers/s390/cio/qdio_thinint.c62
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c5
-rw-r--r--drivers/s390/crypto/ap_bus.c212
-rw-r--r--drivers/s390/crypto/ap_bus.h5
-rw-r--r--drivers/s390/crypto/ap_card.c17
-rw-r--r--drivers/s390/crypto/ap_queue.c75
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/crypto/zcrypt_card.c6
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c33
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c76
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c10
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c10
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c4
-rw-r--r--drivers/s390/net/Kconfig17
-rw-r--r--drivers/s390/net/ism_drv.c24
-rw-r--r--drivers/s390/net/qeth_core.h51
-rw-r--r--drivers/s390/net/qeth_core_main.c465
-rw-r--r--drivers/s390/net/qeth_core_mpc.h21
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_ethtool.c150
-rw-r--r--drivers/s390/net/qeth_l2_main.c89
-rw-r--r--drivers/s390/net/qeth_l3_main.c74
-rw-r--r--drivers/s390/net/qeth_l3_sys.c35
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c44
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h32
-rw-r--r--drivers/s390/scsi/zfcp_def.h6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c12
-rw-r--r--drivers/s390/scsi/zfcp_ext.h12
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c313
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h23
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c51
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c70
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/sbus/char/flash.c4
-rw-r--r--drivers/sbus/char/uctrl.c2
-rw-r--r--drivers/scsi/.gitignore1
-rw-r--r--drivers/scsi/BusLogic.c10
-rw-r--r--drivers/scsi/Kconfig54
-rw-r--r--drivers/scsi/aacraid/aachba.c83
-rw-r--r--drivers/scsi/aacraid/comminit.c35
-rw-r--r--drivers/scsi/aacraid/commsup.c51
-rw-r--r--drivers/scsi/aacraid/linit.c178
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/.gitignore1
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx2
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c22
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c13
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c23
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c13
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c15
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c103
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c34
-rw-r--r--drivers/scsi/ch.c40
-rw-r--r--drivers/scsi/constants.c2
-rw-r--r--drivers/scsi/dc395x.c34
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h2
-rw-r--r--drivers/scsi/dpt_i2o.c27
-rw-r--r--drivers/scsi/dpti.h5
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h2
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/hisi_sas/Kconfig1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c3
-rw-r--r--drivers/scsi/hosts.c65
-rw-r--r--drivers/scsi/hpsa.c80
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c212
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/isci/sas.h2
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/libiscsi.c9
-rw-r--r--drivers/scsi/libsas/Kconfig1
-rw-r--r--drivers/scsi/lpfc/lpfc.h58
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c141
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c333
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c519
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h82
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c146
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c96
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c63
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c10
-rw-r--r--drivers/scsi/mvsas/mv_sas.h2
-rw-r--r--drivers/scsi/mvumi.h4
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c51
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h5
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c22
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c80
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h7
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c155
-rw-r--r--drivers/scsi/pmcraid.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c18
-rw-r--r--drivers/scsi/qedi/qedi.h3
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c18
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c104
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c138
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h387
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1699
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c204
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c388
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c757
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c25
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_debug.c5
-rw-r--r--drivers/scsi/scsi_error.c1
-rw-r--r--drivers/scsi/scsi_lib.c99
-rw-r--r--drivers/scsi/scsi_pm.c10
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c18
-rw-r--r--drivers/scsi/scsi_trace.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c139
-rw-r--r--drivers/scsi/scsicam.c186
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c22
-rw-r--r--drivers/scsi/snic/vnic_devcmd.h2
-rw-r--r--drivers/scsi/sr.c22
-rw-r--r--drivers/scsi/sr.h2
-rw-r--r--drivers/scsi/sr_vendor.c8
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c2
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c154
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h15
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c146
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c28
-rw-r--r--drivers/scsi/ufs/ufs.h3
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c449
-rw-r--r--drivers/scsi/ufs/ufshcd.h220
-rw-r--r--drivers/scsi/ufs/unipro.h7
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/scsi/zorro_esp.c5
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/amlogic/Kconfig13
-rw-r--r--drivers/soc/amlogic/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c204
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c78
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c765
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h158
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/qe_common.c2
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c2
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c33
-rw-r--r--drivers/soc/imx/Kconfig12
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/gpc.c24
-rw-r--r--drivers/soc/imx/gpcv2.c1
-rw-r--r--drivers/soc/imx/soc-imx8m.c (renamed from drivers/soc/imx/soc-imx8.c)0
-rw-r--r--drivers/soc/kendryte/Kconfig14
-rw-r--r--drivers/soc/kendryte/Makefile3
-rw-r--r--drivers/soc/kendryte/k210-sysctl.c248
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c5
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c128
-rw-r--r--drivers/soc/qcom/Kconfig7
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c123
-rw-r--r--drivers/soc/qcom/pdr_interface.c757
-rw-r--r--drivers/soc/qcom/pdr_internal.h379
-rw-r--r--drivers/soc/qcom/qcom_aoss.c6
-rw-r--r--drivers/soc/qcom/rpmh-internal.h1
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/qcom/rpmh.c22
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/renesas/Kconfig18
-rw-r--r--drivers/soc/renesas/rcar-sysc.h4
-rw-r--r--drivers/soc/renesas/renesas-soc.c2
-rw-r--r--drivers/soc/tegra/pmc.c688
-rw-r--r--drivers/soc/ti/pm33xx.c21
-rw-r--r--drivers/soc/xilinx/Kconfig4
-rw-r--r--drivers/soundwire/bus.c537
-rw-r--r--drivers/soundwire/bus.h9
-rw-r--r--drivers/soundwire/bus_type.c5
-rw-r--r--drivers/soundwire/cadence_master.c282
-rw-r--r--drivers/soundwire/cadence_master.h17
-rw-r--r--drivers/soundwire/intel.c200
-rw-r--r--drivers/soundwire/qcom.c15
-rw-r--r--drivers/soundwire/slave.c4
-rw-r--r--drivers/soundwire/stream.c115
-rw-r--r--drivers/spi/Kconfig39
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/atmel-quadspi.c119
-rw-r--r--drivers/spi/spi-ar934x.c235
-rw-r--r--drivers/spi/spi-efm32.c44
-rw-r--r--drivers/spi/spi-fsi.c558
-rw-r--r--drivers/spi/spi-fsl-dspi.c732
-rw-r--r--drivers/spi/spi-fsl-lpspi.c9
-rw-r--r--drivers/spi/spi-fsl-qspi.c4
-rw-r--r--drivers/spi/spi-geni-qcom.c26
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c99
-rw-r--r--drivers/spi/spi-mem.c7
-rw-r--r--drivers/spi/spi-meson-spicc.c496
-rw-r--r--drivers/spi/spi-mtk-nor.c689
-rw-r--r--drivers/spi/spi-mux.c187
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-nxp-fspi.c63
-rw-r--r--drivers/spi/spi-pxa2xx.c33
-rw-r--r--drivers/spi/spi-rockchip.c5
-rw-r--r--drivers/spi/spi-rspi.c44
-rw-r--r--drivers/spi/spi-s3c24xx.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c31
-rw-r--r--drivers/spi/spi-stm32.c62
-rw-r--r--drivers/spi/spi.c34
-rw-r--r--drivers/spi/spidev.c23
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/staging/Kconfig11
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c3
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c30
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c13
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.c63
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.h1
-rw-r--r--drivers/staging/comedi/drivers/ni_routing/tools/.gitignore1
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h1
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c8
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c5
-rw-r--r--drivers/staging/comedi/drivers/s626.c3
-rw-r--r--drivers/staging/exfat/Kconfig41
-rw-r--r--drivers/staging/exfat/Makefile10
-rw-r--r--drivers/staging/exfat/TODO69
-rw-r--r--drivers/staging/exfat/exfat.h824
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c136
-rw-r--r--drivers/staging/exfat/exfat_cache.c555
-rw-r--r--drivers/staging/exfat/exfat_core.c2582
-rw-r--r--drivers/staging/exfat/exfat_nls.c212
-rw-r--r--drivers/staging/exfat/exfat_super.c3883
-rw-r--r--drivers/staging/exfat/exfat_upcase.c740
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c6
-rw-r--r--drivers/staging/fbtft/fbtft.h18
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c3
-rw-r--r--drivers/staging/gasket/apex_driver.c7
-rw-r--r--drivers/staging/gasket/gasket_core.c15
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c3
-rw-r--r--drivers/staging/gasket/gasket_sysfs.h4
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c2
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h2
-rw-r--r--drivers/staging/gdm724x/hci_packet.h6
-rw-r--r--drivers/staging/gdm724x/netlink_k.c7
-rw-r--r--drivers/staging/gdm724x/netlink_k.h3
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h2
-rw-r--r--drivers/staging/greybus/gpio.c15
-rw-r--r--drivers/staging/greybus/i2c.c16
-rw-r--r--drivers/staging/greybus/raw.c2
-rw-r--r--drivers/staging/greybus/tools/.gitignore1
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c3
-rw-r--r--drivers/staging/greybus/uart.c4
-rw-r--r--drivers/staging/hp/Kconfig30
-rw-r--r--drivers/staging/hp/Makefile6
-rw-r--r--drivers/staging/hp/hp100.c3034
-rw-r--r--drivers/staging/hp/hp100.h611
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-ad719220
-rw-r--r--drivers/staging/iio/TODO8
-rw-r--r--drivers/staging/iio/accel/adis16203.c1
-rw-r--r--drivers/staging/iio/accel/adis16240.c1
-rw-r--r--drivers/staging/iio/adc/Kconfig12
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7280a.c4
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c17
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c13
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c4
-rw-r--r--drivers/staging/kpc2000/kpc_dma/dma.c9
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c49
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c9
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h4
-rw-r--r--drivers/staging/ks7010/TODO1
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c3
-rw-r--r--drivers/staging/ks7010/ks_hostif.h4
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/allegro-dvt/Makefile2
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-core.c942
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-mail.c37
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-mail.h267
-rw-r--r--drivers/staging/media/hantro/Kconfig16
-rw-r--r--drivers/staging/media/hantro/Makefile3
-rw-r--r--drivers/staging/media/hantro/hantro.h2
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c15
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c19
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h1
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.c76
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.h2
-rw-r--r--drivers/staging/media/hantro/hantro_postproc.c12
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c2
-rw-r--r--drivers/staging/media/hantro/imx8m_vpu_hw.c220
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c24
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c8
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c5
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c19
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c9
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c24
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c18
-rw-r--r--drivers/staging/media/ipu3/TODO2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c6
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.h3
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c4
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c2
-rw-r--r--drivers/staging/media/ipu3/ipu3.c16
-rw-r--r--drivers/staging/media/meson/vdec/Makefile4
-rw-r--r--drivers/staging/media/meson/vdec/codec_h264.c485
-rw-r--r--drivers/staging/media/meson/vdec/codec_h264.h14
-rw-r--r--drivers/staging/media/meson/vdec/codec_hevc_common.c297
-rw-r--r--drivers/staging/media/meson/vdec/codec_hevc_common.h71
-rw-r--r--drivers/staging/media/meson/vdec/codec_vp9.c2141
-rw-r--r--drivers/staging/media/meson/vdec/codec_vp9.h13
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c200
-rw-r--r--drivers/staging/media/meson/vdec/hevc_regs.h218
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c107
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h14
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.c123
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h10
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.c231
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.h13
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c109
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c8
-rw-r--r--drivers/staging/media/rkisp1/TODO1
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-capture.c13
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-common.h3
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-dev.c20
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-isp.c61
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-params.c2
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-resizer.c27
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-stats.c5
-rw-r--r--drivers/staging/media/soc_camera/soc_camera.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c6
-rw-r--r--drivers/staging/media/tegra-vde/vde.c55
-rw-r--r--drivers/staging/media/usbvision/Kconfig (renamed from drivers/media/usb/usbvision/Kconfig)9
-rw-r--r--drivers/staging/media/usbvision/Makefile (renamed from drivers/media/usb/usbvision/Makefile)0
-rw-r--r--drivers/staging/media/usbvision/TODO11
-rw-r--r--drivers/staging/media/usbvision/usbvision-cards.c (renamed from drivers/media/usb/usbvision/usbvision-cards.c)0
-rw-r--r--drivers/staging/media/usbvision/usbvision-cards.h (renamed from drivers/media/usb/usbvision/usbvision-cards.h)0
-rw-r--r--drivers/staging/media/usbvision/usbvision-core.c (renamed from drivers/media/usb/usbvision/usbvision-core.c)0
-rw-r--r--drivers/staging/media/usbvision/usbvision-i2c.c (renamed from drivers/media/usb/usbvision/usbvision-i2c.c)0
-rw-r--r--drivers/staging/media/usbvision/usbvision-video.c (renamed from drivers/media/usb/usbvision/usbvision-video.c)2
-rw-r--r--drivers/staging/media/usbvision/usbvision.h (renamed from drivers/media/usb/usbvision/usbvision.h)0
-rw-r--r--drivers/staging/most/Documentation/ABI/configfs-most.txt204
-rw-r--r--drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt313
-rw-r--r--drivers/staging/most/Kconfig6
-rw-r--r--drivers/staging/most/Makefile3
-rw-r--r--drivers/staging/most/cdev/cdev.c3
-rw-r--r--drivers/staging/most/dim2/dim2.c3
-rw-r--r--drivers/staging/most/i2c/i2c.c3
-rw-r--r--drivers/staging/most/most.h337
-rw-r--r--drivers/staging/most/net/net.c3
-rw-r--r--drivers/staging/most/sound/sound.c3
-rw-r--r--drivers/staging/most/usb/usb.c3
-rw-r--r--drivers/staging/most/video/video.c7
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c3
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts4
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi15
-rw-r--r--drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c304
-rw-r--r--drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt7
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c261
-rw-r--r--drivers/staging/netlogic/platform_net.h4
-rw-r--r--drivers/staging/netlogic/xlr_net.h4
-rw-r--r--drivers/staging/octeon-usb/Kconfig11
-rw-r--r--drivers/staging/octeon-usb/Makefile2
-rw-r--r--drivers/staging/octeon-usb/TODO8
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3737
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h1847
-rw-r--r--drivers/staging/octeon/Kconfig15
-rw-r--r--drivers/staging/octeon/Makefile19
-rw-r--r--drivers/staging/octeon/TODO9
-rw-r--r--drivers/staging/octeon/ethernet-defines.h40
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c178
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h28
-rw-r--r--drivers/staging/octeon/ethernet-mem.c154
-rw-r--r--drivers/staging/octeon/ethernet-mem.h9
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c158
-rw-r--r--drivers/staging/octeon/ethernet-rx.c538
-rw-r--r--drivers/staging/octeon/ethernet-rx.h31
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c30
-rw-r--r--drivers/staging/octeon/ethernet-spi.c226
-rw-r--r--drivers/staging/octeon/ethernet-tx.c717
-rw-r--r--drivers/staging/octeon/ethernet-tx.h14
-rw-r--r--drivers/staging/octeon/ethernet-util.h47
-rw-r--r--drivers/staging/octeon/ethernet.c992
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h107
-rw-r--r--drivers/staging/octeon/octeon-stubs.h1434
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts73
-rw-r--r--drivers/staging/pi433/pi433_if.h4
-rw-r--r--drivers/staging/pi433/rf69.h4
-rw-r--r--drivers/staging/pi433/rf69_enum.h4
-rw-r--r--drivers/staging/pi433/rf69_registers.h4
-rw-r--r--drivers/staging/qlge/qlge.h69
-rw-r--r--drivers/staging/qlge/qlge_dbg.c64
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c24
-rw-r--r--drivers/staging/qlge/qlge_main.c34
-rw-r--r--drivers/staging/qlge/qlge_mpi.c9
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c42
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c22
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c50
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_hwconfig.c54
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c138
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c60
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c42
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c44
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c32
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c20
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c36
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c26
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c14
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h30
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c19
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c30
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c26
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c64
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c2
-rw-r--r--drivers/staging/rtl8712/Kconfig7
-rw-r--r--drivers/staging/rtl8712/ieee80211.h4
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c19
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c19
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c15
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c16
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/Hal8723BReg.h14
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h6
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c9
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_debug.h4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_types.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c62
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c14
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c16
-rw-r--r--drivers/staging/rtl8723bs/include/HalVerDef.h32
-rw-r--r--drivers/staging/rtl8723bs/include/cmd_osdep.h4
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h98
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_h2c.h8
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h14
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy.h2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_cfg.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h4
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h22
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h10
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h2
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_rf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_byteorder.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h112
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h28
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_eeprom.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h8
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_event.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ht.h12
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h66
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h40
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h16
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h18
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h30
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h26
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h8
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h8
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c124
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c288
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c16
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c40
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c12
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c6
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c3
-rw-r--r--drivers/staging/sm750fb/Makefile6
-rw-r--r--drivers/staging/speakup/devsynth.c10
-rw-r--r--drivers/staging/speakup/keyhelp.c2
-rw-r--r--drivers/staging/speakup/main.c3
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/spk_priv.h6
-rw-r--r--drivers/staging/speakup/spk_ttyio.c2
-rw-r--r--drivers/staging/speakup/spk_types.h2
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt12
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c2
-rw-r--r--drivers/staging/uwb/Kconfig72
-rw-r--r--drivers/staging/uwb/Makefile32
-rw-r--r--drivers/staging/uwb/TODO8
-rw-r--r--drivers/staging/uwb/address.c352
-rw-r--r--drivers/staging/uwb/allocator.c374
-rw-r--r--drivers/staging/uwb/beacon.c595
-rw-r--r--drivers/staging/uwb/driver.c143
-rw-r--r--drivers/staging/uwb/drp-avail.c278
-rw-r--r--drivers/staging/uwb/drp-ie.c305
-rw-r--r--drivers/staging/uwb/drp.c842
-rw-r--r--drivers/staging/uwb/est.c450
-rw-r--r--drivers/staging/uwb/hwa-rc.c929
-rw-r--r--drivers/staging/uwb/i1480/Makefile2
-rw-r--r--drivers/staging/uwb/i1480/dfu/Makefile10
-rw-r--r--drivers/staging/uwb/i1480/dfu/dfu.c198
-rw-r--r--drivers/staging/uwb/i1480/dfu/i1480-dfu.h246
-rw-r--r--drivers/staging/uwb/i1480/dfu/mac.c496
-rw-r--r--drivers/staging/uwb/i1480/dfu/phy.c190
-rw-r--r--drivers/staging/uwb/i1480/dfu/usb.c448
-rw-r--r--drivers/staging/uwb/i1480/i1480-est.c85
-rw-r--r--drivers/staging/uwb/ie-rcv.c42
-rw-r--r--drivers/staging/uwb/ie.c366
-rw-r--r--drivers/staging/uwb/include/debug-cmd.h57
-rw-r--r--drivers/staging/uwb/include/spec.h767
-rw-r--r--drivers/staging/uwb/include/umc.h192
-rw-r--r--drivers/staging/uwb/include/whci.h102
-rw-r--r--drivers/staging/uwb/lc-dev.c457
-rw-r--r--drivers/staging/uwb/lc-rc.c569
-rw-r--r--drivers/staging/uwb/neh.c606
-rw-r--r--drivers/staging/uwb/pal.c128
-rw-r--r--drivers/staging/uwb/radio.c196
-rw-r--r--drivers/staging/uwb/reset.c379
-rw-r--r--drivers/staging/uwb/rsv.c1000
-rw-r--r--drivers/staging/uwb/scan.c120
-rw-r--r--drivers/staging/uwb/umc-bus.c211
-rw-r--r--drivers/staging/uwb/umc-dev.c94
-rw-r--r--drivers/staging/uwb/umc-drv.c31
-rw-r--r--drivers/staging/uwb/uwb-debug.c354
-rw-r--r--drivers/staging/uwb/uwb-internal.h366
-rw-r--r--drivers/staging/uwb/uwb.h817
-rw-r--r--drivers/staging/uwb/uwbd.c356
-rw-r--r--drivers/staging/uwb/whc-rc.c467
-rw-r--r--drivers/staging/uwb/whci.c257
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c458
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h36
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c43
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c542
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h76
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c306
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h35
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h2
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/device_main.c8
-rw-r--r--drivers/staging/vt6655/power.c10
-rw-r--r--drivers/staging/vt6656/Makefile4
-rw-r--r--drivers/staging/vt6656/baseband.c46
-rw-r--r--drivers/staging/vt6656/card.c4
-rw-r--r--drivers/staging/vt6656/desc.h35
-rw-r--r--drivers/staging/vt6656/device.h21
-rw-r--r--drivers/staging/vt6656/dpc.c124
-rw-r--r--drivers/staging/vt6656/dpc.h24
-rw-r--r--drivers/staging/vt6656/int.c164
-rw-r--r--drivers/staging/vt6656/int.h47
-rw-r--r--drivers/staging/vt6656/key.c19
-rw-r--r--drivers/staging/vt6656/mac.h263
-rw-r--r--drivers/staging/vt6656/main_usb.c69
-rw-r--r--drivers/staging/vt6656/rxtx.c296
-rw-r--r--drivers/staging/vt6656/rxtx.h61
-rw-r--r--drivers/staging/vt6656/usbpipe.c234
-rw-r--r--drivers/staging/vt6656/usbpipe.h23
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt11
-rw-r--r--drivers/staging/wfx/bh.c8
-rw-r--r--drivers/staging/wfx/bus_sdio.c16
-rw-r--r--drivers/staging/wfx/bus_spi.c45
-rw-r--r--drivers/staging/wfx/data_rx.c3
-rw-r--r--drivers/staging/wfx/data_tx.c12
-rw-r--r--drivers/staging/wfx/data_tx.h2
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h4
-rw-r--r--drivers/staging/wfx/hwio.c2
-rw-r--r--drivers/staging/wfx/main.c23
-rw-r--r--drivers/staging/wfx/main.h1
-rw-r--r--drivers/staging/wfx/queue.c20
-rw-r--r--drivers/staging/wfx/scan.c4
-rw-r--r--drivers/staging/wfx/sta.c5
-rw-r--r--drivers/staging/wilc1000/Kconfig5
-rw-r--r--drivers/staging/wilc1000/cfg80211.c387
-rw-r--r--drivers/staging/wilc1000/hif.c5
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,sdio.txt38
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,spi.txt34
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000.yaml71
-rw-r--r--drivers/staging/wilc1000/mon.c2
-rw-r--r--drivers/staging/wilc1000/netdev.c32
-rw-r--r--drivers/staging/wilc1000/netdev.h10
-rw-r--r--drivers/staging/wilc1000/sdio.c316
-rw-r--r--drivers/staging/wilc1000/spi.c861
-rw-r--r--drivers/staging/wilc1000/wlan.c135
-rw-r--r--drivers/staging/wilc1000/wlan.h97
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c12
-rw-r--r--drivers/staging/wlan-ng/p80211types.h4
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c1
-rw-r--r--drivers/staging/wusbcore/Documentation/wusb-cbaf130
-rw-r--r--drivers/staging/wusbcore/Documentation/wusb-design-overview.rst457
-rw-r--r--drivers/staging/wusbcore/Kconfig39
-rw-r--r--drivers/staging/wusbcore/Makefile28
-rw-r--r--drivers/staging/wusbcore/TODO8
-rw-r--r--drivers/staging/wusbcore/cbaf.c645
-rw-r--r--drivers/staging/wusbcore/crypto.c441
-rw-r--r--drivers/staging/wusbcore/dev-sysfs.c124
-rw-r--r--drivers/staging/wusbcore/devconnect.c1085
-rw-r--r--drivers/staging/wusbcore/host/Kconfig28
-rw-r--r--drivers/staging/wusbcore/host/Makefile3
-rw-r--r--drivers/staging/wusbcore/host/hwa-hc.c875
-rw-r--r--drivers/staging/wusbcore/host/whci/Makefile14
-rw-r--r--drivers/staging/wusbcore/host/whci/asl.c376
-rw-r--r--drivers/staging/wusbcore/host/whci/debug.c153
-rw-r--r--drivers/staging/wusbcore/host/whci/hcd.c356
-rw-r--r--drivers/staging/wusbcore/host/whci/hw.c93
-rw-r--r--drivers/staging/wusbcore/host/whci/init.c177
-rw-r--r--drivers/staging/wusbcore/host/whci/int.c82
-rw-r--r--drivers/staging/wusbcore/host/whci/pzl.c404
-rw-r--r--drivers/staging/wusbcore/host/whci/qset.c831
-rw-r--r--drivers/staging/wusbcore/host/whci/whcd.h202
-rw-r--r--drivers/staging/wusbcore/host/whci/whci-hc.h401
-rw-r--r--drivers/staging/wusbcore/host/whci/wusb.c210
-rw-r--r--drivers/staging/wusbcore/include/association.h151
-rw-r--r--drivers/staging/wusbcore/include/wusb-wa.h304
-rw-r--r--drivers/staging/wusbcore/include/wusb.h362
-rw-r--r--drivers/staging/wusbcore/mmc.c303
-rw-r--r--drivers/staging/wusbcore/pal.c45
-rw-r--r--drivers/staging/wusbcore/reservation.c110
-rw-r--r--drivers/staging/wusbcore/rh.c426
-rw-r--r--drivers/staging/wusbcore/security.c599
-rw-r--r--drivers/staging/wusbcore/wa-hc.c88
-rw-r--r--drivers/staging/wusbcore/wa-hc.h467
-rw-r--r--drivers/staging/wusbcore/wa-nep.c289
-rw-r--r--drivers/staging/wusbcore/wa-rpipe.c539
-rw-r--r--drivers/staging/wusbcore/wa-xfer.c2927
-rw-r--r--drivers/staging/wusbcore/wusbhc.c490
-rw-r--r--drivers/staging/wusbcore/wusbhc.h487
-rw-r--r--drivers/target/iscsi/iscsi_target.c82
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c5
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c4
-rw-r--r--drivers/target/target_core_fabric_lib.c5
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_spc.c13
-rw-r--r--drivers/target/target_core_tmr.c6
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_ua.c8
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/target/target_core_xcopy.c187
-rw-r--r--drivers/target/target_core_xcopy.h9
-rw-r--r--drivers/tee/tee_core.c1
-rw-r--r--drivers/tee/tee_private.h3
-rw-r--r--drivers/tee/tee_shm.c85
-rw-r--r--drivers/thermal/Kconfig42
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/cpufreq_cooling.c24
-rw-r--r--drivers/thermal/imx8mm_thermal.c236
-rw-r--r--drivers/thermal/imx_sc_thermal.c148
-rw-r--r--drivers/thermal/imx_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c5
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c2
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c5
-rw-r--r--drivers/thermal/intel/intel_soc_dts_thermal.c3
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c26
-rw-r--r--drivers/thermal/of-thermal.c62
-rw-r--r--drivers/thermal/qcom/tsens-8960.c4
-rw-r--r--drivers/thermal/qcom/tsens-common.c194
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c6
-rw-r--r--drivers/thermal/qcom/tsens-v1.c6
-rw-r--r--drivers/thermal/qcom/tsens-v2.c24
-rw-r--r--drivers/thermal/qcom/tsens.c65
-rw-r--r--drivers/thermal/qcom/tsens.h105
-rw-r--r--drivers/thermal/qoriq_thermal.c40
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c31
-rw-r--r--drivers/thermal/rcar_thermal.c53
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c4
-rw-r--r--drivers/thermal/sprd_thermal.c552
-rw-r--r--drivers/thermal/st/stm_thermal.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c44
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h4
-rw-r--r--drivers/thunderbolt/domain.c4
-rw-r--r--drivers/thunderbolt/eeprom.c2
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/thunderbolt/usb4.c7
-rw-r--r--drivers/tty/Kconfig173
-rw-r--r--drivers/tty/ehv_bytechan.c21
-rw-r--r--drivers/tty/hvc/Kconfig5
-rw-r--r--drivers/tty/hvc/hvc_console.c23
-rw-r--r--drivers/tty/hvc/hvc_console.h2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c1
-rw-r--r--drivers/tty/n_gsm.c140
-rw-r--r--drivers/tty/n_hdlc.c549
-rw-r--r--drivers/tty/n_tracesink.h2
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/nozomi.c69
-rw-r--r--drivers/tty/rocket.c25
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c80
-rw-r--r--drivers/tty/serial/8250/8250_core.c15
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h2
-rw-r--r--drivers/tty/serial/8250/8250_exar.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c6
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c50
-rw-r--r--drivers/tty/serial/8250/8250_of.c67
-rw-r--r--drivers/tty/serial/8250/8250_omap.c276
-rw-r--r--drivers/tty/serial/8250/8250_pci.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c238
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c2
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c198
-rw-r--r--drivers/tty/serial/8250/Kconfig9
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig20
-rw-r--r--drivers/tty/serial/ar933x_uart.c113
-rw-r--r--drivers/tty/serial/atmel_serial.c23
-rw-r--r--drivers/tty/serial/atmel_serial.h2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h6
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c48
-rw-r--r--drivers/tty/serial/earlycon.c11
-rw-r--r--drivers/tty/serial/efm32-uart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c228
-rw-r--r--drivers/tty/serial/icom.h2
-rw-r--r--drivers/tty/serial/ifx6x60.c195
-rw-r--r--drivers/tty/serial/ifx6x60.h15
-rw-r--r--drivers/tty/serial/imx.c44
-rw-r--r--drivers/tty/serial/jsm/jsm.h2
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/lantiq.c1
-rw-r--r--drivers/tty/serial/omap-serial.c17
-rw-r--r--drivers/tty/serial/owl-uart.c7
-rw-r--r--drivers/tty/serial/pch_uart.c22
-rw-r--r--drivers/tty/serial/pic32_uart.c8
-rw-r--r--drivers/tty/serial/pic32_uart.h2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c59
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/serial_core.c266
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h2
-rw-r--r--drivers/tty/serial/sh-sci.c13
-rw-r--r--drivers/tty/serial/sifive.c56
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h2
-rw-r--r--drivers/tty/serial/sprd_serial.c48
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/sunhv.c3
-rw-r--r--drivers/tty/serial/timbuart.h2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c232
-rw-r--r--drivers/tty/sysrq.c14
-rw-r--r--drivers/tty/vt/selection.c199
-rw-r--r--drivers/tty/vt/vt.c166
-rw-r--r--drivers/tty/vt/vt_ioctl.c75
-rw-r--r--drivers/uio/uio.c38
-rw-r--r--drivers/uio/uio_pdrv_genirq.c34
-rw-r--r--drivers/usb/atm/ueagle-atm.c2
-rw-r--r--drivers/usb/atm/usbatm.h4
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/c67x00/c67x00.h2
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c2
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c4
-rw-r--r--drivers/usb/cdns3/core.c22
-rw-r--r--drivers/usb/cdns3/gadget.c24
-rw-r--r--drivers/usb/cdns3/gadget.h6
-rw-r--r--drivers/usb/chipidea/bits.h2
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c12
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/chipidea/core.c10
-rw-r--r--drivers/usb/chipidea/otg.c7
-rw-r--r--drivers/usb/chipidea/otg.h2
-rw-r--r--drivers/usb/chipidea/otg_fsm.h2
-rw-r--r--drivers/usb/chipidea/udc.c13
-rw-r--r--drivers/usb/chipidea/udc.h2
-rw-r--r--drivers/usb/class/cdc-acm.c54
-rw-r--r--drivers/usb/class/cdc-acm.h5
-rw-r--r--drivers/usb/core/devio.c19
-rw-r--r--drivers/usb/core/driver.c58
-rw-r--r--drivers/usb/core/generic.c48
-rw-r--r--drivers/usb/core/hub.c24
-rw-r--r--drivers/usb/core/message.c15
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/sysfs.c6
-rw-r--r--drivers/usb/core/usb-acpi.c11
-rw-r--r--drivers/usb/core/usb.h8
-rw-r--r--drivers/usb/dwc2/core.h8
-rw-r--r--drivers/usb/dwc2/gadget.c24
-rw-r--r--drivers/usb/dwc2/hcd.h2
-rw-r--r--drivers/usb/dwc2/hw.h8
-rw-r--r--drivers/usb/dwc2/params.c33
-rw-r--r--drivers/usb/dwc2/platform.c101
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/core.c29
-rw-r--r--drivers/usb/dwc3/core.h18
-rw-r--r--drivers/usb/dwc3/drd.c98
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c9
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c182
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c1
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c2
-rw-r--r--drivers/usb/dwc3/gadget.c85
-rw-r--r--drivers/usb/dwc3/host.c4
-rw-r--r--drivers/usb/dwc3/trace.h9
-rw-r--r--drivers/usb/early/xhci-dbc.c8
-rw-r--r--drivers/usb/early/xhci-dbc.h18
-rw-r--r--drivers/usb/gadget/composite.c9
-rw-r--r--drivers/usb/gadget/configfs.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c7
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c2
-rw-r--r--drivers/usb/gadget/function/f_uvc.c2
-rw-r--r--drivers/usb/gadget/function/storage_common.h5
-rw-r--r--drivers/usb/gadget/legacy/Kconfig59
-rw-r--r--drivers/usb/gadget/legacy/Makefile1
-rw-r--r--drivers/usb/gadget/legacy/audio.c4
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c4
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c9
-rw-r--r--drivers/usb/gadget/legacy/ncm.c4
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c1280
-rw-r--r--drivers/usb/gadget/udc/Kconfig11
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.h2
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Kconfig4
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c71
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c30
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c4
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c58
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h43
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c4
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c1331
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c7
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c2
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c28
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c286
-rw-r--r--drivers/usb/host/ehci-mv.c11
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/ehci-platform.c127
-rw-r--r--drivers/usb/host/ehci-tegra.c2
-rw-r--r--drivers/usb/host/ehci.h4
-rw-r--r--drivers/usb/host/fhci-hcd.c1
-rw-r--r--drivers/usb/host/fotg210.h2
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/ohci.h4
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-histb.c3
-rw-r--r--drivers/usb/host/xhci-hub.c72
-rw-r--r--drivers/usb/host/xhci-mem.c1
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-pci.c9
-rw-r--r--drivers/usb/host/xhci-plat.c7
-rw-r--r--drivers/usb/host/xhci-ring.c59
-rw-r--r--drivers/usb/host/xhci-tegra.c235
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/host/xhci.h39
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c237
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c20
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h14
-rw-r--r--drivers/usb/mon/mon_text.c36
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c4
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c9
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/jz4740.c110
-rw-r--r--drivers/usb/musb/mediatek.c18
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_host.c19
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/phy/Kconfig8
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-jz4770.c243
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c3
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c12
-rw-r--r--drivers/usb/roles/class.c31
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c26
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/f81232.c354
-rw-r--r--drivers/usb/serial/garmin_gps.c4
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/io_edgeport.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h4
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c4
-rw-r--r--drivers/usb/serial/usb-serial.c2
-rw-r--r--drivers/usb/storage/uas.c46
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c10
-rw-r--r--drivers/usb/storage/usb.h5
-rw-r--r--drivers/usb/storage/usual-tables.c6
-rw-r--r--drivers/usb/typec/bus.c17
-rw-r--r--drivers/usb/typec/bus.h2
-rw-r--r--drivers/usb/typec/class.c175
-rw-r--r--drivers/usb/typec/mux.c72
-rw-r--r--drivers/usb/typec/mux/Kconfig9
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c438
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c88
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c14
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h9
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c57
-rw-r--r--drivers/vdpa/Kconfig31
-rw-r--r--drivers/vdpa/Makefile4
-rw-r--r--drivers/vdpa/ifcvf/Makefile3
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c387
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h118
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c433
-rw-r--r--drivers/vdpa/vdpa.c180
-rw-r--r--drivers/vdpa/vdpa_sim/Makefile2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c628
-rw-r--r--drivers/vfio/pci/vfio_pci.c390
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h10
-rw-r--r--drivers/vfio/platform/vfio_platform.c2
-rw-r--r--drivers/vfio/vfio.c198
-rw-r--r--drivers/vfio/vfio_iommu_type1.c84
-rw-r--r--drivers/vhost/Kconfig62
-rw-r--r--drivers/vhost/Kconfig.vringh6
-rw-r--r--drivers/vhost/Makefile6
-rw-r--r--drivers/vhost/iotlb.c177
-rw-r--r--drivers/vhost/net.c33
-rw-r--r--drivers/vhost/scsi.c16
-rw-r--r--drivers/vhost/test.c14
-rw-r--r--drivers/vhost/vdpa.c878
-rw-r--r--drivers/vhost/vhost.c235
-rw-r--r--drivers/vhost/vhost.h72
-rw-r--r--drivers/vhost/vringh.c426
-rw-r--r--drivers/vhost/vsock.c37
-rw-r--r--drivers/video/backlight/Kconfig8
-rw-r--r--drivers/video/backlight/corgi_lcd.c68
-rw-r--r--drivers/video/backlight/pwm_bl.c19
-rw-r--r--drivers/video/console/Kconfig76
-rw-r--r--drivers/video/fbdev/Kconfig9
-rw-r--r--drivers/video/fbdev/aty/mach64_gx.c3
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c26
-rw-r--r--drivers/video/fbdev/c2p_core.h12
-rw-r--r--drivers/video/fbdev/cg14.c3
-rw-r--r--drivers/video/fbdev/core/Makefile1
-rw-r--r--drivers/video/fbdev/core/fbcon.c30
-rw-r--r--drivers/video/fbdev/core/fbmem.c38
-rw-r--r--drivers/video/fbdev/g364fb.c29
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/video/fbdev/kyro/STG4000OverlayDevice.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c15
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.h2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c41
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c5
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c7
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c16
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/w100fb.c18
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/video/hdmi.c11
-rw-r--r--drivers/video/logo/.gitignore4
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c2
-rw-r--r--drivers/virtio/Kconfig14
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_balloon.c182
-rw-r--r--drivers/virtio/virtio_input.c1
-rw-r--r--drivers/virtio/virtio_vdpa.c396
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/imx2_wdt.c37
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c1
-rw-r--r--drivers/watchdog/imx_sc_wdt.c2
-rw-r--r--drivers/watchdog/npcm_wdt.c19
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c25
-rw-r--r--drivers/watchdog/qcom-wdt.c34
-rw-r--r--drivers/watchdog/rti_wdt.c255
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/watchdog_core.c12
-rw-r--r--drivers/watchdog/watchdog_dev.c1
-rw-r--r--drivers/watchdog/wm831x_wdt.c27
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/xen/cpu_hotplug.c2
-rw-r--r--drivers/xen/events/events_2l.c16
-rw-r--r--drivers/xen/events/events_base.c93
-rw-r--r--drivers/xen/events/events_fifo.c22
-rw-r--r--drivers/xen/events/events_internal.h30
-rw-r--r--drivers/xen/evtchn.c13
-rw-r--r--drivers/xen/gntdev-common.h3
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/pvcalls-back.c5
-rw-r--r--drivers/xen/pvcalls-front.c15
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.h8
-rw-r--r--drivers/xen/xen-pciback/xenbus.c7
-rw-r--r--drivers/xen/xen-scsiback.c3
-rw-r--r--drivers/xen/xenbus/xenbus_client.c141
-rw-r--r--drivers/zorro/.gitignore1
-rw-r--r--drivers/zorro/zorro-driver.c16
-rw-r--r--drivers/zorro/zorro.c2
-rw-r--r--drivers/zorro/zorro.h7
5094 files changed, 284992 insertions, 142957 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8befa53f43be..dcecc9f6e33f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -138,6 +138,10 @@ source "drivers/virt/Kconfig"
source "drivers/virtio/Kconfig"
+source "drivers/vdpa/Kconfig"
+
+source "drivers/vhost/Kconfig"
+
source "drivers/hv/Kconfig"
source "drivers/xen/Kconfig"
@@ -200,6 +204,8 @@ source "drivers/thunderbolt/Kconfig"
source "drivers/android/Kconfig"
+source "drivers/gpu/trace/Kconfig"
+
source "drivers/nvdimm/Kconfig"
source "drivers/dax/Kconfig"
@@ -228,4 +234,5 @@ source "drivers/interconnect/Kconfig"
source "drivers/counter/Kconfig"
+source "drivers/most/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 31cf17dee252..c0cd1b9075e3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
obj-y += soc/
obj-$(CONFIG_VIRTIO) += virtio/
+obj-$(CONFIG_VDPA) += vdpa/
obj-$(CONFIG_XEN) += xen/
# regulators early, since some subsystems rely on them to initialize
@@ -186,3 +187,4 @@ obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
obj-$(CONFIG_COUNTER) += counter/
+obj-$(CONFIG_MOST) += most/
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index 1339c586bf64..a8f7c278b691 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -347,8 +347,6 @@ int braille_register_console(struct console *console, int index,
{
int ret;
- if (!(console->flags & CON_BRL))
- return 0;
if (!console_options)
/* Only support VisioBraille for now */
console_options = "57600o8";
@@ -371,8 +369,6 @@ int braille_unregister_console(struct console *console)
{
if (braille_co != console)
return -EINVAL;
- if (!(console->flags & CON_BRL))
- return 0;
unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block);
braille_co = NULL;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index cc57bab146b5..ce2730d61a8f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -190,6 +190,30 @@ config ACPI_BUTTON
To compile this driver as a module, choose M here:
the module will be called button.
+config ACPI_TINY_POWER_BUTTON
+ tristate "Tiny Power Button Driver"
+ depends on !ACPI_BUTTON
+ help
+ This driver provides a tiny alternative to the ACPI Button driver.
+ The tiny power button driver only handles the power button. Rather
+ than notifying userspace via the input layer or a netlink event, this
+ driver directly signals the init process to shut down.
+
+ This driver is particularly suitable for cloud and VM environments,
+ which use a simulated power button to initiate a controlled poweroff,
+ but which may not want to run a separate userspace daemon to process
+ input events.
+
+config ACPI_TINY_POWER_BUTTON_SIGNAL
+ int "Tiny Power Button Signal"
+ depends on ACPI_TINY_POWER_BUTTON
+ default 38
+ help
+ Default signal to send to init in response to the power button.
+
+ Likely values here include 38 (SIGRTMIN+4) to power off, or 2
+ (SIGINT) to simulate Ctrl+Alt+Del.
+
config ACPI_VIDEO
tristate "Video"
depends on X86 && BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 33fdaf67454e..e81e1ebbfb32 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o
obj-$(CONFIG_ACPI_AC) += ac.o
obj-$(CONFIG_ACPI_BUTTON) += button.o
+obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o
obj-$(CONFIG_ACPI_FAN) += fan.o
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-$(CONFIG_ACPI_TAD) += acpi_tad.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 829f37d36b9f..69d2db13886b 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -293,29 +293,30 @@ static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d)
return 0;
}
+/* Please keep this list alphabetically sorted */
static const struct dmi_system_id ac_dmi_table[] __initconst = {
{
- /* Thinkpad e530 */
- .callback = thinkpad_e530_quirk,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
+ /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
+ .callback = ac_do_not_check_pmic_quirk,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
},
},
{
- /* ECS EF20EA */
+ /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
.callback = ac_do_not_check_pmic_quirk,
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
},
{
- /* Lenovo Ideapad Miix 320 */
- .callback = ac_do_not_check_pmic_quirk,
+ /* Lenovo Thinkpad e530, see comment in acpi_ac_notify() */
+ .callback = thinkpad_e530_quirk,
.matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
},
},
{},
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index db18df6cb330..dee999938213 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -306,11 +306,9 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
.setup = lpss_deassert_reset,
};
-#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
-
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
- ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 15c5b272e698..bc96457c9e25 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -943,7 +943,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
int i, max_level = 0;
unsigned long long level, level_old;
struct acpi_video_device_brightness *br = NULL;
- int result = -EINVAL;
+ int result;
result = acpi_video_get_levels(device->dev, &br, &max_level);
if (result)
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index ede4b9cc9e85..cf85d66da6e7 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -65,9 +65,7 @@ void cg_write_aml_comment(union acpi_parse_object *op);
/*
* cvparser
*/
-void
-cv_init_file_tree(struct acpi_table_header *table,
- u8 *aml_start, u32 aml_length);
+void cv_init_file_tree(struct acpi_table_header *table, FILE * root_file);
void cv_clear_op_comments(union acpi_parse_object *op);
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6ad0517553d5..ebf6453d0e21 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-u8 acpi_hw_check_all_gpes(void);
+u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 2269e10bc21b..168904ba3086 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -477,7 +477,7 @@
#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) cv_print_one_comment_type (a,b,c,d);
#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) cv_print_one_comment_list (a,b);
#define ASL_CV_FILE_HAS_SWITCHED(a) cv_file_has_switched(a)
-#define ASL_CV_INIT_FILETREE(a,b,c) cv_init_file_tree(a,b,c);
+#define ASL_CV_INIT_FILETREE(a,b) cv_init_file_tree(a,b);
#else
@@ -492,7 +492,7 @@
#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d)
#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b)
#define ASL_CV_FILE_HAS_SWITCHED(a) 0
-#define ASL_CV_INIT_FILETREE(a,b,c)
+#define ASL_CV_INIT_FILETREE(a,b)
#endif
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index e618ddfab2fd..40f6a3c33a15 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -256,6 +256,8 @@ u32
acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing);
+void acpi_ns_normalize_pathname(char *original_path);
+
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
u8 no_trailing);
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index aa71f65395d2..ee6a1b77af3f 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -468,16 +468,14 @@ char *acpi_db_get_next_token(char *string,
return (NULL);
}
- /* Remove any spaces at the beginning */
+ /* Remove any spaces at the beginning, ignore blank lines */
- if (*string == ' ') {
- while (*string && (*string == ' ')) {
- string++;
- }
+ while (*string && isspace(*string)) {
+ string++;
+ }
- if (!(*string)) {
- return (NULL);
- }
+ if (!(*string)) {
+ return (NULL);
}
switch (*string) {
@@ -570,7 +568,7 @@ char *acpi_db_get_next_token(char *string,
/* Find end of token */
- while (*string && (*string != ' ')) {
+ while (*string && !isspace(*string)) {
string++;
}
break;
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index 3eb45ea93e5e..9dfd693cda3e 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -409,6 +409,7 @@ acpi_status acpi_initialize_debugger(void)
acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
acpi_gbl_db_opt_no_ini_methods = FALSE;
+ acpi_gbl_db_opt_no_region_support = FALSE;
acpi_gbl_db_buffer = acpi_os_allocate(ACPI_DEBUG_BUFFER_SIZE);
if (!acpi_gbl_db_buffer) {
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 5e81a1ae44cf..1d4f8c81028c 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -16,6 +16,9 @@
#include "acinterp.h"
#include "acnamesp.h"
#include "acdebug.h"
+#ifdef ACPI_EXEC_APP
+#include "aecommon.h"
+#endif
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswexec")
@@ -329,6 +332,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
u32 op_class;
union acpi_parse_object *next_op;
union acpi_parse_object *first_arg;
+#ifdef ACPI_EXEC_APP
+ char *namepath;
+ union acpi_operand_object *obj_desc;
+#endif
ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
@@ -537,6 +544,32 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ds_eval_buffer_field_operands(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+#ifdef ACPI_EXEC_APP
+ /*
+ * acpi_exec support for namespace initialization file (initialize
+ * buffer_fields in this code.)
+ */
+ namepath =
+ acpi_ns_get_external_pathname(op->common.node);
+ status = ae_lookup_init_file_entry(namepath, &obj_desc);
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_ex_write_data_to_field(obj_desc,
+ op->common.
+ node->object,
+ NULL);
+ if ACPI_FAILURE
+ (status) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While writing to buffer field"));
+ }
+ }
+ ACPI_FREE(namepath);
+ status = AE_OK;
+#endif
break;
case AML_TYPE_CREATE_OBJECT:
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 697974e37edf..27069325b6de 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -14,7 +14,6 @@
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
-
#ifdef ACPI_ASL_COMPILER
#include "acdisasm.h"
#endif
@@ -399,7 +398,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
union acpi_parse_object *op;
acpi_object_type object_type;
acpi_status status = AE_OK;
-
#ifdef ACPI_ASL_COMPILER
u8 param_count;
#endif
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b31457ca926c..edadbe146506 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -15,6 +15,9 @@
#include "acinterp.h"
#include "acnamesp.h"
#include "acevents.h"
+#ifdef ACPI_EXEC_APP
+#include "aecommon.h"
+#endif
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswload2")
@@ -373,6 +376,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
struct acpi_namespace_node *new_node;
u32 i;
u8 region_space;
+#ifdef ACPI_EXEC_APP
+ union acpi_operand_object *obj_desc;
+ char *namepath;
+#endif
ACPI_FUNCTION_TRACE(ds_load2_end_op);
@@ -466,6 +473,11 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
* be evaluated later during the execution phase
*/
status = acpi_ds_create_buffer_field(op, walk_state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "CreateBufferField failure"));
+ goto cleanup;
+ }
break;
case AML_TYPE_NAMED_FIELD:
@@ -604,6 +616,29 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_NAME_OP:
status = acpi_ds_create_node(walk_state, node, op);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+#ifdef ACPI_EXEC_APP
+ /*
+ * acpi_exec support for namespace initialization file (initialize
+ * Name opcodes in this code.)
+ */
+ namepath = acpi_ns_get_external_pathname(node);
+ status = ae_lookup_init_file_entry(namepath, &obj_desc);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Detach any existing object, attach new object */
+
+ if (node->object) {
+ acpi_ns_detach_object(node);
+ }
+ acpi_ns_attach_object(node, obj_desc,
+ obj_desc->common.type);
+ }
+ ACPI_FREE(namepath);
+ status = AE_OK;
+#endif
break;
case AML_METHOD_OP:
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 789d5e920aaf..9efca54c51ac 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -130,7 +130,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
/*
* Initialize the structure that keeps track of fixed event handlers and
- * enable the fixed events.
+ * disable all of the fixed events.
*/
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
acpi_gbl_fixed_event_handlers[i].handler = NULL;
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f2de66bfd8a7..3be60673e461 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
*
* FUNCTION: acpi_any_gpe_status_set
*
- * PARAMETERS: None
+ * PARAMETERS: gpe_skip_number - Number of the GPE to skip
*
* RETURN: Whether or not the status bit is set for any GPE
*
- * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
- * of them is set or FALSE otherwise.
+ * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one
+ * represented by the "skip" argument, and return TRUE if any of
+ * them is set or FALSE otherwise.
*
******************************************************************************/
-u32 acpi_any_gpe_status_set(void)
+u32 acpi_any_gpe_status_set(u32 gpe_skip_number)
{
acpi_status status;
+ acpi_handle gpe_device;
u8 ret;
ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
@@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void)
return (FALSE);
}
- ret = acpi_hw_check_all_gpes();
+ status = acpi_get_gpe_device(gpe_skip_number, &gpe_device);
+ if (ACPI_FAILURE(status)) {
+ gpe_device = NULL;
+ }
+
+ ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number);
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return (ret);
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f4c285c2f595..49c46d4dd070 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
return (AE_OK);
}
+struct acpi_gpe_block_status_context {
+ struct acpi_gpe_register_info *gpe_skip_register_info;
+ u8 gpe_skip_mask;
+ u8 retval;
+};
+
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_block_status
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
+ * context - GPE list walk context data
*
* RETURN: Success
*
@@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
static acpi_status
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
- void *ret_ptr)
+ void *context)
{
+ struct acpi_gpe_block_status_context *c = context;
struct acpi_gpe_register_info *gpe_register_info;
u64 in_enable, in_status;
acpi_status status;
- u8 *ret = ret_ptr;
+ u8 ret_mask;
u32 i;
/* Examine each GPE Register within the block */
@@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
continue;
}
- *ret |= in_enable & in_status;
+ ret_mask = in_enable & in_status;
+ if (ret_mask && c->gpe_skip_register_info == gpe_register_info) {
+ ret_mask &= ~c->gpe_skip_mask;
+ }
+ c->retval |= ret_mask;
}
return (AE_OK);
@@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
*
* FUNCTION: acpi_hw_check_all_gpes
*
- * PARAMETERS: None
+ * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip
+ * gpe_skip_number - Number of the GPE to skip
*
* RETURN: Combined status of all GPEs
*
- * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
+ * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one
+ * represented by the "skip" arguments, and return TRUE if the
* status bit is set for at least one of them of FALSE otherwise.
*
******************************************************************************/
-u8 acpi_hw_check_all_gpes(void)
+u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number)
{
- u8 ret = 0;
+ struct acpi_gpe_block_status_context context = {
+ .gpe_skip_register_info = NULL,
+ .retval = 0,
+ };
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
- (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device,
+ gpe_skip_number);
+ if (gpe_event_info) {
+ context.gpe_skip_register_info = gpe_event_info->register_info;
+ context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info);
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return (ret != 0);
+ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context);
+ return (context.retval != 0);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 243a25add28f..317ae870336b 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -300,6 +300,18 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state)
[ACPI_EVENT_POWER_BUTTON].
status_register_id, ACPI_CLEAR_STATUS);
+ /* Enable sleep button */
+
+ (void)
+ acpi_write_bit_register(acpi_gbl_fixed_event_info
+ [ACPI_EVENT_SLEEP_BUTTON].
+ enable_register_id, ACPI_ENABLE_EVENT);
+
+ (void)
+ acpi_write_bit_register(acpi_gbl_fixed_event_info
+ [ACPI_EVENT_SLEEP_BUTTON].
+ status_register_id, ACPI_CLEAR_STATUS);
+
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 370bbc867745..d91153f65700 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -13,9 +13,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
-/* Local Prototypes */
-static void acpi_ns_normalize_pathname(char *original_path);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -30,7 +27,6 @@ static void acpi_ns_normalize_pathname(char *original_path);
* for error and debug statements.
*
******************************************************************************/
-
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
char *name_buffer;
@@ -164,7 +160,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
/* Build the path in the caller buffer */
(void)acpi_ns_build_normalized_path(node, buffer->pointer,
- required_size, no_trailing);
+ (u32)required_size, no_trailing);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
(char *)buffer->pointer, (u32) required_size));
@@ -315,7 +311,7 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
/* Build the path in the allocated buffer */
- (void)acpi_ns_build_normalized_path(node, name_buffer, size,
+ (void)acpi_ns_build_normalized_path(node, name_buffer, (u32)size,
no_trailing);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, "%s: Path \"%s\"\n",
@@ -346,7 +342,7 @@ char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
char *full_path = NULL;
char *external_path = NULL;
char *prefix_path = NULL;
- u32 prefix_path_length = 0;
+ acpi_size prefix_path_length = 0;
/* If there is a prefix, get the pathname to it */
@@ -411,7 +407,7 @@ cleanup:
*
******************************************************************************/
-static void acpi_ns_normalize_pathname(char *original_path)
+void acpi_ns_normalize_pathname(char *original_path)
{
char *input_path = original_path;
char *new_path_buffer;
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 984129dcaa0c..0e6aba81605b 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -516,7 +516,7 @@ acpi_status acpi_install_method(u8 *buffer)
method_flags = *parser_state.aml++;
aml_start = parser_state.aml;
- aml_length = ACPI_PTR_DIFF(parser_state.pkg_end, aml_start);
+ aml_length = (u32)ACPI_PTR_DIFF(parser_state.pkg_end, aml_start);
/*
* Allocate resources up-front. We don't want to have to delete a new
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index f8403d480318..7490429ddbf6 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -202,14 +202,14 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table)
*
* PARAMETERS: signature - ACPI signature of needed table
* instance - Which instance (for SSDTs)
- * out_table_header - The pointer to the table header to fill
+ * out_table_header - The pointer to the where the table header
+ * is returned
*
- * RETURN: Status and pointer to mapped table header
+ * RETURN: Status and a copy of the table header
*
- * DESCRIPTION: Finds an ACPI table header.
- *
- * NOTE: Caller is responsible in unmapping the header with
- * acpi_os_unmap_memory
+ * DESCRIPTION: Finds and returns an ACPI table header. Caller provides the
+ * memory where a copy of the header is to be returned
+ * (fixed length).
*
******************************************************************************/
acpi_status
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index befdd13b403b..177ab88d95de 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -78,7 +78,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"IPMI", /* 0x07 */
"GeneralPurposeIo", /* 0x08 */
"GenericSerialBus", /* 0x09 */
- "PlatformCommChannel" /* 0x0A */
+ "PCC" /* 0x0A */
};
const char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index eee263cb7beb..c365faf4e6cd 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -452,13 +452,13 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*
* FUNCTION: acpi_ut_update_object_reference
*
- * PARAMETERS: object - Increment ref count for this object
- * and all sub-objects
+ * PARAMETERS: object - Increment or decrement the ref count for
+ * this object and all sub-objects
* action - Either REF_INCREMENT or REF_DECREMENT
*
* RETURN: Status
*
- * DESCRIPTION: Increment the object reference count
+ * DESCRIPTION: Increment or decrement the object reference count
*
* Object references are incremented when:
* 1) An object is attached to a Node (namespace object)
@@ -492,7 +492,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
}
/*
- * All sub-objects must have their reference count incremented
+ * All sub-objects must have their reference count updated
* also. Different object types have different subobjects.
*/
switch (object->common.type) {
@@ -559,6 +559,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
break;
}
}
+
next_object = NULL;
break;
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 3e60bdac2200..bbec04c291d2 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -44,7 +44,7 @@ acpi_ut_get_element_length(u8 object_type,
*
* NOTE: We always allocate the worst-case object descriptor because
* these objects are cached, and we want them to be
- * one-size-satisifies-any-request. This in itself may not be
+ * one-size-satisfies-any-request. This in itself may not be
* the most memory efficient, but the efficiency of the object
* cache should more than make up for this!
*
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index a874dac7db5c..681c11f4af4e 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -332,7 +332,12 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
int i;
pos = string;
- end = string + size;
+
+ if (size != ACPI_UINT32_MAX) {
+ end = string + size;
+ } else {
+ end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
+ }
for (; *format; ++format) {
if (*format != '%') {
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index ed3d2d1a7ae9..7d04424189df 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1015,6 +1015,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
return ops;
if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
struct pci_bus *bus = to_pci_dev(dev)->bus;
struct iort_pci_alias_info info = { .dev = dev };
@@ -1027,8 +1028,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
- if (!err && iort_pci_rc_supports_ats(node))
- dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && iort_pci_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 111a407dcc77..366c389175d8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1365,19 +1365,19 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
},
},
{
- /* ECS EF20EA */
+ /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
.callback = battery_do_not_check_pmic_quirk,
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
},
},
{
- /* Lenovo Ideapad Miix 320 */
+ /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
.callback = battery_do_not_check_pmic_quirk,
.matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
},
{},
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index f6925f16c4a2..78cfc70cb320 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -30,17 +30,14 @@
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
#define ACPI_BUTTON_SUBCLASS_POWER "power"
-#define ACPI_BUTTON_HID_POWER "PNP0C0C"
#define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button"
#define ACPI_BUTTON_TYPE_POWER 0x01
#define ACPI_BUTTON_SUBCLASS_SLEEP "sleep"
-#define ACPI_BUTTON_HID_SLEEP "PNP0C0E"
#define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button"
#define ACPI_BUTTON_TYPE_SLEEP 0x03
#define ACPI_BUTTON_SUBCLASS_LID "lid"
-#define ACPI_BUTTON_HID_LID "PNP0C0D"
#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
#define ACPI_BUTTON_TYPE_LID 0x05
@@ -91,18 +88,6 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
},
{
- /*
- * Asus T200TA, _LID keeps reporting closed after every second
- * openening of the lid. Causing immediate re-suspend after
- * opening every other open. Using LID_INIT_OPEN fixes this.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
- },
- .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
- },
- {
/* GP-electronic T701, _LID method points to a floating GPIO */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a1a858ad4d18..8b2e89c20c11 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -438,13 +438,10 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
* domain info.
*/
for_each_possible_cpu(i) {
- pr = all_cpu_data[i];
- if (!pr)
- continue;
-
if (cpumask_test_cpu(i, covered_cpus))
continue;
+ pr = all_cpu_data[i];
cpc_ptr = per_cpu(cpc_desc_ptr, i);
if (!cpc_ptr) {
retval = -EFAULT;
@@ -495,44 +492,28 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
cpumask_set_cpu(j, pr->shared_cpu_map);
}
- for_each_possible_cpu(j) {
+ for_each_cpu(j, pr->shared_cpu_map) {
if (i == j)
continue;
match_pr = all_cpu_data[j];
- if (!match_pr)
- continue;
-
- match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr) {
- retval = -EFAULT;
- goto err_ret;
- }
-
- match_pdomain = &(match_cpc_ptr->domain_info);
- if (match_pdomain->domain != pdomain->domain)
- continue;
-
match_pr->shared_type = pr->shared_type;
cpumask_copy(match_pr->shared_cpu_map,
pr->shared_cpu_map);
}
}
+ goto out;
err_ret:
for_each_possible_cpu(i) {
pr = all_cpu_data[i];
- if (!pr)
- continue;
/* Assume no coordination on any error parsing domain info */
- if (retval) {
- cpumask_clear(pr->shared_cpu_map);
- cpumask_set_cpu(i, pr->shared_cpu_map);
- pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- }
+ cpumask_clear(pr->shared_cpu_map);
+ cpumask_set_cpu(i, pr->shared_cpu_map);
+ pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
}
-
+out:
free_cpumask_var(covered_cpus);
return retval;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index b64c62bfcea5..5832bc10aca8 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
end:
if (result) {
dev_warn(&device->dev, "Failed to change power state to %s\n",
- acpi_power_state_string(state));
+ acpi_power_state_string(target_state));
} else {
device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n",
device->pnp.bus_id,
- acpi_power_state_string(state)));
+ acpi_power_state_string(target_state)));
}
return result;
@@ -1321,8 +1321,8 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
*/
static const struct acpi_device_id special_pm_ids[] = {
{"PNP0C0B", }, /* Generic ACPI fan */
- {"INT1044", }, /* Fan for Tiger Lake generation */
{"INT3404", }, /* Fan */
+ {"INTC1044", }, /* Fan for Tiger Lake generation */
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 387f27ef3368..e4e8b75d39f0 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -97,8 +97,8 @@ static int dptf_power_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3407_device_ids[] = {
- {"INT1047", 0},
{"INT3407", 0},
+ {"INTC1047", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 1ec7b6900662..bc71a6a60334 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -13,10 +13,6 @@
#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT1040"},
- {"INT1043"},
- {"INT1044"},
- {"INT1047"},
{"INT3400"},
{"INT3401", INT3401_DEVICE},
{"INT3402"},
@@ -28,6 +24,10 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INT3409"},
{"INT340A"},
{"INT340B"},
+ {"INTC1040"},
+ {"INTC1043"},
+ {"INTC1044"},
+ {"INTC1047"},
{""},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d1f1cf5d4bf0..1af2125e17d5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -182,7 +182,6 @@ static bool boot_ec_is_ecdt = false;
static struct workqueue_struct *ec_wq;
static struct workqueue_struct *ec_query_wq;
-static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
@@ -690,21 +689,9 @@ static void advance_transaction(struct acpi_ec *ec)
wakeup = true;
}
goto out;
- } else {
- if (EC_FLAGS_QUERY_HANDSHAKE &&
- !(status & ACPI_EC_FLAG_SCI) &&
- (t->command == ACPI_EC_COMMAND_QUERY)) {
- ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
- t->rdata[t->ri++] = 0x00;
- ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
- ec_dbg_evt("Command(%s) completed by software",
- acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- wakeup = true;
- } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
- acpi_ec_write_cmd(ec, t->command);
- ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
- } else
- goto err;
+ } else if (!(status & ACPI_EC_FLAG_IBF)) {
+ acpi_ec_write_cmd(ec, t->command);
+ ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
goto out;
}
err:
@@ -1427,57 +1414,45 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
return AE_CTRL_TERMINATE;
}
-static void install_gpe_event_handler(struct acpi_ec *ec)
-{
- acpi_status status =
- acpi_install_gpe_raw_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler,
- ec);
- if (ACPI_SUCCESS(status)) {
- /* This is not fatal as we can poll EC events */
- set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
- acpi_ec_leave_noirq(ec);
- if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
- acpi_ec_enable_gpe(ec, true);
- }
-}
-
-/* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */
-static int install_gpio_irq_event_handler(struct acpi_ec *ec,
- struct acpi_device *device)
+static bool install_gpe_event_handler(struct acpi_ec *ec)
{
- int irq = acpi_dev_gpio_irq_get(device, 0);
- int ret;
-
- if (irq < 0)
- return irq;
+ acpi_status status;
- ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED,
- "ACPI EC", ec);
+ status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
+ if (ACPI_FAILURE(status))
+ return false;
- /*
- * Unlike the GPE case, we treat errors here as fatal, we'll only
- * implement GPIO polling if we find a case that needs it.
- */
- if (ret < 0)
- return ret;
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1)
+ acpi_ec_enable_gpe(ec, true);
- ec->irq = irq;
- set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
- acpi_ec_leave_noirq(ec);
+ return true;
+}
- return 0;
+static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
+{
+ return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
+ "ACPI EC", ec) >= 0;
}
-/*
- * Note: This function returns an error code only when the address space
- * handler is not installed, which means "not able to handle
- * transactions".
+/**
+ * ec_install_handlers - Install service callbacks and register query methods.
+ * @ec: Target EC.
+ * @device: ACPI device object corresponding to @ec.
+ *
+ * Install a handler for the EC address space type unless it has been installed
+ * already. If @device is not NULL, also look for EC query methods in the
+ * namespace and register them, and install an event (either GPE or GPIO IRQ)
+ * handler for the EC, if possible.
+ *
+ * Return:
+ * -ENODEV if the address space handler cannot be installed, which means
+ * "unable to handle transactions",
+ * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
+ * or 0 (success) otherwise.
*/
-static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
- bool handle_events)
+static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device)
{
acpi_status status;
@@ -1490,26 +1465,28 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
&acpi_ec_space_handler,
NULL, ec);
if (ACPI_FAILURE(status)) {
- if (status == AE_NOT_FOUND) {
- /*
- * Maybe OS fails in evaluating the _REG
- * object. The AE_NOT_FOUND error will be
- * ignored and OS * continue to initialize
- * EC.
- */
- pr_err("Fail in evaluating the _REG object"
- " of EC device. Broken bios is suspected.\n");
- } else {
- acpi_ec_stop(ec, false);
- return -ENODEV;
- }
+ acpi_ec_stop(ec, false);
+ return -ENODEV;
}
set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
- if (!handle_events)
+ if (!device)
return 0;
+ if (ec->gpe < 0) {
+ /* ACPI reduced hardware platforms use a GpioInt from _CRS. */
+ int irq = acpi_dev_gpio_irq_get(device, 0);
+ /*
+ * Bail out right away for deferred probing or complete the
+ * initialization regardless of any other errors.
+ */
+ if (irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (irq >= 0)
+ ec->irq = irq;
+ }
+
if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
@@ -1518,16 +1495,21 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
- if (ec->gpe >= 0) {
- install_gpe_event_handler(ec);
- } else if (device) {
- int ret = install_gpio_irq_event_handler(ec, device);
-
- if (ret)
- return ret;
- } else { /* No GPE and no GpioInt? */
- return -ENODEV;
+ bool ready = false;
+
+ if (ec->gpe >= 0)
+ ready = install_gpe_event_handler(ec);
+ else if (ec->irq >= 0)
+ ready = install_gpio_irq_event_handler(ec);
+
+ if (ready) {
+ set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
}
+ /*
+ * Failures to install an event handler are not fatal, because
+ * the EC can be polled for events.
+ */
}
/* EC is fully operational, allow queries */
acpi_ec_enable_event(ec);
@@ -1574,61 +1556,46 @@ static void ec_remove_handlers(struct acpi_ec *ec)
}
}
-static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device,
- bool handle_events)
+static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device)
{
int ret;
- ret = ec_install_handlers(ec, device, handle_events);
+ ret = ec_install_handlers(ec, device);
if (ret)
return ret;
/* First EC capable of handling transactions */
- if (!first_ec) {
+ if (!first_ec)
first_ec = ec;
- acpi_handle_info(first_ec->handle, "Used as first EC\n");
- }
- acpi_handle_info(ec->handle,
- "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
- ec->gpe, ec->irq, ec->command_addr, ec->data_addr);
- return ret;
-}
+ pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
+ ec->data_addr);
-static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
-{
- struct acpi_table_ecdt *ecdt_ptr;
- acpi_status status;
- acpi_handle handle;
-
- status = acpi_get_table(ACPI_SIG_ECDT, 1,
- (struct acpi_table_header **)&ecdt_ptr);
- if (ACPI_FAILURE(status))
- return false;
-
- status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
- if (ACPI_FAILURE(status))
- return false;
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
+ if (ec->gpe >= 0)
+ pr_info("GPE=0x%x\n", ec->gpe);
+ else
+ pr_info("IRQ=%d\n", ec->irq);
+ }
- *phandle = handle;
- return true;
+ return ret;
}
static int acpi_ec_add(struct acpi_device *device)
{
- struct acpi_ec *ec = NULL;
- bool dep_update = true;
- acpi_status status;
+ struct acpi_ec *ec;
int ret;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
- if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
- boot_ec_is_ecdt = true;
+ if (boot_ec && (boot_ec->handle == device->handle ||
+ !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
+ /* Fast path: this device corresponds to the boot EC. */
ec = boot_ec;
- dep_update = false;
} else {
+ acpi_status status;
+
ec = acpi_ec_alloc();
if (!ec)
return -ENOMEM;
@@ -1636,12 +1603,11 @@ static int acpi_ec_add(struct acpi_device *device)
status = ec_parse_device(device->handle, 0, ec, NULL);
if (status != AE_CTRL_TERMINATE) {
ret = -EINVAL;
- goto err_alloc;
+ goto err;
}
if (boot_ec && ec->command_addr == boot_ec->command_addr &&
ec->data_addr == boot_ec->data_addr) {
- boot_ec_is_ecdt = false;
/*
* Trust PNP0C09 namespace location rather than
* ECDT ID. But trust ECDT GPE rather than _GPE
@@ -1655,15 +1621,18 @@ static int acpi_ec_add(struct acpi_device *device)
}
}
- ret = acpi_ec_setup(ec, device, true);
+ ret = acpi_ec_setup(ec, device);
if (ret)
- goto err_query;
+ goto err;
if (ec == boot_ec)
acpi_handle_info(boot_ec->handle,
- "Boot %s EC used to handle transactions and events\n",
+ "Boot %s EC initialization complete\n",
boot_ec_is_ecdt ? "ECDT" : "DSDT");
+ acpi_handle_info(ec->handle,
+ "EC: Used to handle transactions and events\n");
+
device->driver_data = ec;
ret = !!request_region(ec->data_addr, 1, "EC data");
@@ -1671,19 +1640,16 @@ static int acpi_ec_add(struct acpi_device *device)
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
- if (dep_update) {
- /* Reprobe devices depending on the EC */
- acpi_walk_dep_device_list(ec->handle);
- }
+ /* Reprobe devices depending on the EC */
+ acpi_walk_dep_device_list(ec->handle);
+
acpi_handle_debug(ec->handle, "enumerated.\n");
return 0;
-err_query:
- if (ec != boot_ec)
- acpi_ec_remove_query_handlers(ec, true, 0);
-err_alloc:
+err:
if (ec != boot_ec)
acpi_ec_free(ec);
+
return ret;
}
@@ -1775,7 +1741,7 @@ void __init acpi_ec_dsdt_probe(void)
* At this point, the GPE is not fully initialized, so do not to
* handle the events.
*/
- ret = acpi_ec_setup(ec, NULL, false);
+ ret = acpi_ec_setup(ec, NULL);
if (ret) {
acpi_ec_free(ec);
return;
@@ -1788,52 +1754,43 @@ void __init acpi_ec_dsdt_probe(void)
}
/*
- * If the DSDT EC is not functioning, we still need to prepare a fully
- * functioning ECDT EC first in order to handle the events.
- * https://bugzilla.kernel.org/show_bug.cgi?id=115021
+ * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
+ *
+ * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not
+ * found a matching object in the namespace.
+ *
+ * Next, in case the DSDT EC is not functioning, it is still necessary to
+ * provide a functional ECDT EC to handle events, so add an extra device object
+ * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021).
+ *
+ * This is useful on platforms with valid ECDT and invalid DSDT EC settings,
+ * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847).
*/
-static int __init acpi_ec_ecdt_start(void)
+static void __init acpi_ec_ecdt_start(void)
{
+ struct acpi_table_ecdt *ecdt_ptr;
acpi_handle handle;
+ acpi_status status;
- if (!boot_ec)
- return -ENODEV;
- /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
- if (!boot_ec_is_ecdt)
- return -ENODEV;
+ /* Bail out if a matching EC has been found in the namespace. */
+ if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT)
+ return;
- /*
- * At this point, the namespace and the GPE is initialized, so
- * start to find the namespace objects and handle the events.
- *
- * Note: ec->handle can be valid if this function is called after
- * acpi_ec_add(), hence the fast path.
- */
- if (boot_ec->handle == ACPI_ROOT_OBJECT) {
- if (!acpi_ec_ecdt_get_handle(&handle))
- return -ENODEV;
- boot_ec->handle = handle;
- }
+ /* Look up the object pointed to from the ECDT in the namespace. */
+ status = acpi_get_table(ACPI_SIG_ECDT, 1,
+ (struct acpi_table_header **)&ecdt_ptr);
+ if (ACPI_FAILURE(status))
+ return;
- /* Register to ACPI bus with PM ops attached */
- return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
-}
+ status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
+ if (ACPI_FAILURE(status))
+ return;
-#if 0
-/*
- * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
- * set, for which case, we complete the QR_EC without issuing it to the
- * firmware.
- * https://bugzilla.kernel.org/show_bug.cgi?id=82611
- * https://bugzilla.kernel.org/show_bug.cgi?id=97381
- */
-static int ec_flag_query_handshake(const struct dmi_system_id *id)
-{
- pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
- EC_FLAGS_QUERY_HANDSHAKE = 1;
- return 0;
+ boot_ec->handle = handle;
+
+ /* Add a special ACPI device object to represent the boot EC. */
+ acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
}
-#endif
/*
* On some hardware it is necessary to clear events accumulated by the EC during
@@ -1962,7 +1919,7 @@ void __init acpi_ec_ecdt_probe(void)
* At this point, the namespace is not initialized, so do not find
* the namespace objects, or handle the events.
*/
- ret = acpi_ec_setup(ec, NULL, false);
+ ret = acpi_ec_setup(ec, NULL);
if (ret) {
acpi_ec_free(ec);
return;
@@ -2042,13 +1999,30 @@ bool acpi_ec_dispatch_gpe(void)
u32 ret;
if (!first_ec)
+ return acpi_any_gpe_status_set(U32_MAX);
+
+ /*
+ * Report wakeup if the status bit is set for any enabled GPE other
+ * than the EC one.
+ */
+ if (acpi_any_gpe_status_set(first_ec->gpe))
+ return true;
+
+ if (ec_no_wakeup)
return false;
+ /*
+ * Dispatch the EC GPE in-band, but do not report wakeup in any case
+ * to allow the caller to process events properly after that.
+ */
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
if (ret == ACPI_INTERRUPT_HANDLED) {
pm_pr_dbg("EC GPE dispatched\n");
- return true;
+
+ /* Flush the event and query workqueues. */
+ acpi_ec_flush_work();
}
+
return false;
}
#endif /* CONFIG_PM_SLEEP */
@@ -2160,14 +2134,13 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
{ },
};
-int __init acpi_ec_init(void)
+void __init acpi_ec_init(void)
{
int result;
- int ecdt_fail, dsdt_fail;
result = acpi_ec_init_workqueues();
if (result)
- return result;
+ return;
/*
* Disable EC wakeup on following systems to prevent periodic
@@ -2178,16 +2151,10 @@ int __init acpi_ec_init(void)
pr_debug("Disabling EC wakeup on suspend-to-idle\n");
}
- /* Drivers must be started after acpi_ec_query_init() */
- dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
- /*
- * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
- * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
- * settings but invalid DSDT settings.
- * https://bugzilla.kernel.org/show_bug.cgi?id=196847
- */
- ecdt_fail = acpi_ec_ecdt_start();
- return ecdt_fail && dsdt_fail ? -ENODEV : 0;
+ /* Driver must be registered after acpi_ec_init_workqueues(). */
+ acpi_bus_register_driver(&acpi_ec_driver);
+
+ acpi_ec_ecdt_start();
}
/* EC driver currently not unloadable */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index aaf4e8f348cf..873e039ad4b7 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -276,29 +276,29 @@ static ssize_t show_state(struct device *dev, struct device_attribute *attr, cha
int count;
if (fps->control == 0xFFFFFFFF || fps->control > 100)
- count = snprintf(buf, PAGE_SIZE, "not-defined:");
+ count = scnprintf(buf, PAGE_SIZE, "not-defined:");
else
- count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+ count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
- count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
else
- count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point);
if (fps->speed == 0xFFFFFFFF)
- count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
else
- count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed);
if (fps->noise_level == 0xFFFFFFFF)
- count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:");
else
- count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100);
if (fps->power == 0xFFFFFFFF)
- count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n");
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n");
else
- count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power);
return count;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3616daec650b..43411a7457cd 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -190,7 +190,7 @@ extern struct acpi_ec *first_ec;
/* External interfaces use first EC only, so remember */
typedef int (*acpi_ec_query_func) (void *data);
-int acpi_ec_init(void);
+void acpi_ec_init(void);
void acpi_ec_ecdt_probe(void);
void acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index a3320f93616d..fa4500f9cfd1 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -360,7 +360,7 @@ static union acpi_object *acpi_label_info(acpi_handle handle)
static u8 nfit_dsm_revid(unsigned family, unsigned func)
{
- static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
+ static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
[NVDIMM_FAMILY_INTEL] = {
[NVDIMM_INTEL_GET_MODES] = 2,
[NVDIMM_INTEL_GET_FWINFO] = 2,
@@ -386,7 +386,7 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
if (family > NVDIMM_FAMILY_MAX)
return 0;
- if (func > 31)
+ if (func > NVDIMM_CMD_MAX)
return 0;
id = revid_table[family][func];
if (id == 0)
@@ -492,7 +492,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* Check for a valid command. For ND_CMD_CALL, we also have to
* make sure that the DSM function is supported.
*/
- if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
+ if (cmd == ND_CMD_CALL &&
+ (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
return -ENOTTY;
else if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
@@ -2026,8 +2027,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
continue;
}
- if (nfit_mem->bdw && nfit_mem->memdev_pmem)
+ if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
set_bit(NDD_ALIASING, &flags);
+ set_bit(NDD_LABELING, &flags);
+ }
/* collate flags across all memdevs for this dimm */
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -3492,7 +3495,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
if (nvdimm && cmd == ND_CMD_CALL &&
call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
func = call_pkg->nd_command;
- if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
+ if (func > NVDIMM_CMD_MAX ||
+ (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
return -EOPNOTSUPP;
}
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 24241941181c..f5525f8bb770 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -34,6 +34,7 @@
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
+#define NVDIMM_CMD_MAX 31
#define NVDIMM_STANDARD_CMDMASK \
(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
@@ -144,32 +145,32 @@ struct nfit_spa {
unsigned long ars_state;
u32 clear_err_unit;
u32 max_ars;
- struct acpi_nfit_system_address spa[0];
+ struct acpi_nfit_system_address spa[];
};
struct nfit_dcr {
struct list_head list;
- struct acpi_nfit_control_region dcr[0];
+ struct acpi_nfit_control_region dcr[];
};
struct nfit_bdw {
struct list_head list;
- struct acpi_nfit_data_region bdw[0];
+ struct acpi_nfit_data_region bdw[];
};
struct nfit_idt {
struct list_head list;
- struct acpi_nfit_interleave idt[0];
+ struct acpi_nfit_interleave idt[];
};
struct nfit_flush {
struct list_head list;
- struct acpi_nfit_flush_address flush[0];
+ struct acpi_nfit_flush_address flush[];
};
struct nfit_memdev {
struct list_head list;
- struct acpi_nfit_memory_map memdev[0];
+ struct acpi_nfit_memory_map memdev[];
};
enum nfit_mem_flags {
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index eadbf90e65d1..47b4969d9b93 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -72,47 +72,6 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
-/**
- * acpi_map_pxm_to_online_node - Map proximity ID to online node
- * @pxm: ACPI proximity ID
- *
- * This is similar to acpi_map_pxm_to_node(), but always returns an online
- * node. When the mapped node from a given proximity ID is offline, it
- * looks up the node distance table and returns the nearest online node.
- *
- * ACPI device drivers, which are called after the NUMA initialization has
- * completed in the kernel, can call this interface to obtain their device
- * NUMA topology from ACPI tables. Such drivers do not have to deal with
- * offline nodes. A node may be offline when a device proximity ID is
- * unique, SRAT memory entry does not exist, or NUMA is disabled, ex.
- * "numa=off" on x86.
- */
-int acpi_map_pxm_to_online_node(int pxm)
-{
- int node, min_node;
-
- node = acpi_map_pxm_to_node(pxm);
-
- if (node == NUMA_NO_NODE)
- node = 0;
-
- min_node = node;
- if (!node_online(node)) {
- int min_dist = INT_MAX, dist, n;
-
- for_each_online_node(n) {
- dist = node_distance(node, n);
- if (dist < min_dist) {
- min_dist = dist;
- min_node = n;
- }
- }
- }
-
- return min_node;
-}
-EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
-
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 41168c027a5a..762c5d50b8fe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1598,6 +1598,7 @@ void acpi_os_delete_lock(acpi_spinlock handle)
*/
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
+ __acquires(lockp)
{
acpi_cpu_flags flags;
spin_lock_irqsave(lockp, flags);
@@ -1609,6 +1610,7 @@ acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
*/
void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
+ __releases(lockp)
{
spin_unlock_irqrestore(lockp, flags);
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 00a6da2121be..ed3d2182cf2c 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -322,10 +322,10 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
resource->res.data.extended_irq.polarity =
link->irq.polarity;
if (link->irq.triggering == ACPI_EDGE_SENSITIVE)
- resource->res.data.irq.shareable =
+ resource->res.data.extended_irq.shareable =
ACPI_EXCLUSIVE;
else
- resource->res.data.irq.shareable = ACPI_SHARED;
+ resource->res.data.extended_irq.shareable = ACPI_SHARED;
resource->res.data.extended_irq.interrupt_count = 1;
resource->res.data.extended_irq.interrupts[0] = irq;
/* ignore resource_source, it's optional */
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d1e666ef3fcc..ac8ad6cb82aa 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -131,6 +131,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = {
{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
{ OSC_PCI_MSI_SUPPORT, "MSI" },
+ { OSC_PCI_EDR_SUPPORT, "EDR" },
{ OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
};
@@ -141,6 +142,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = {
{ OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
{ OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
{ OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
+ { OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" },
};
static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
@@ -153,7 +155,7 @@ static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
buf[0] = '\0';
for (i = 0, entry = table; i < size; i++, entry++)
if (word & entry->bit)
- len += snprintf(buf + len, sizeof(buf) - len, "%s%s",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%s%s",
len ? " " : "", entry->desc);
dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
@@ -440,6 +442,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
if (pci_msi_enabled())
support |= OSC_PCI_MSI_SUPPORT;
+ if (IS_ENABLED(CONFIG_PCIE_EDR))
+ support |= OSC_PCI_EDR_SUPPORT;
decode_osc_support(root, "OS supports", support);
status = acpi_pci_osc_support(root, support);
@@ -487,6 +491,15 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
control |= OSC_PCI_EXPRESS_AER_CONTROL;
}
+ /*
+ * Per the Downstream Port Containment Related Enhancements ECN to
+ * the PCI Firmware Spec, r3.2, sec 4.5.1, table 4-5,
+ * OSC_PCI_EXPRESS_DPC_CONTROL indicates the OS supports both DPC
+ * and EDR.
+ */
+ if (IS_ENABLED(CONFIG_PCIE_DPC) && IS_ENABLED(CONFIG_PCIE_EDR))
+ control |= OSC_PCI_EXPRESS_DPC_CONTROL;
+
requested = control;
status = acpi_pci_osc_control_set(handle, &control,
OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
@@ -916,6 +929,8 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
host_bridge->native_pme = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
host_bridge->native_ltr = 0;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_DPC_CONTROL))
+ host_bridge->native_dpc = 0;
/*
* Evaluate the "PCI Boot Configuration" _DSM Function. If it
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 0e62ef265ce4..7892980b3ce4 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -22,14 +22,13 @@ ACPI_MODULE_NAME("sleep")
static int
acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
{
- struct list_head *node, *next;
+ struct acpi_device *dev, *tmp;
seq_printf(seq, "Device\tS-state\t Status Sysfs node\n");
mutex_lock(&acpi_device_lock);
- list_for_each_safe(node, next, &acpi_wakeup_device_list) {
- struct acpi_device *dev =
- container_of(node, struct acpi_device, wakeup_list);
+ list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
+ wakeup_list) {
struct acpi_device_physical_node *entry;
if (!dev->wakeup.flags.valid)
@@ -96,7 +95,7 @@ acpi_system_write_wakeup_device(struct file *file,
const char __user * buffer,
size_t count, loff_t * ppos)
{
- struct list_head *node, *next;
+ struct acpi_device *dev, *tmp;
char strbuf[5];
char str[5] = "";
@@ -109,9 +108,8 @@ acpi_system_write_wakeup_device(struct file *file,
sscanf(strbuf, "%s", str);
mutex_lock(&acpi_device_lock);
- list_for_each_safe(node, next, &acpi_wakeup_device_list) {
- struct acpi_device *dev =
- container_of(node, struct acpi_device, wakeup_list);
+ list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
+ wakeup_list) {
if (!dev->wakeup.flags.valid)
continue;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 532a1ae3595a..a0bd56ece3ff 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -897,13 +897,6 @@ static long __acpi_processor_get_throttling(void *data)
return pr->throttling.acpi_processor_get_throttling(pr);
}
-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
-{
- if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
- return fn(arg);
- return work_on_cpu(cpu, fn, arg);
-}
-
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
if (!pr)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e5f95922bc21..fd9d4e8318e9 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -980,16 +980,6 @@ static int acpi_s2idle_prepare_late(void)
return 0;
}
-static void acpi_s2idle_sync(void)
-{
- /*
- * The EC driver uses the system workqueue and an additional special
- * one, so those need to be flushed too.
- */
- acpi_ec_flush_work();
- acpi_os_wait_events_complete(); /* synchronize Notify handling */
-}
-
static bool acpi_s2idle_wake(void)
{
if (!acpi_sci_irq_valid())
@@ -1012,21 +1002,16 @@ static bool acpi_s2idle_wake(void)
if (acpi_any_fixed_event_status_set())
return true;
- /*
- * If there are no EC events to process and at least one of the
- * other enabled GPEs is active, the wakeup is regarded as a
- * genuine one.
- *
- * Note that the checks below must be carried out in this order
- * to avoid returning prematurely due to a change of the EC GPE
- * status bit from unset to set between the checks with the
- * status bits of all the other GPEs unset.
- */
- if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
+ /* Check wakeups from drivers sharing the SCI. */
+ if (acpi_check_wakeup_handlers())
+ return true;
+
+ /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+ if (acpi_ec_dispatch_gpe())
return true;
/*
- * Cancel the wakeup and process all pending events in case
+ * Cancel the SCI wakeup and process all pending events in case
* there are any wakeup ones in there.
*
* Note that if any non-EC GPEs are active at this point, the
@@ -1034,8 +1019,7 @@ static bool acpi_s2idle_wake(void)
* should be missed by canceling the wakeup here.
*/
pm_system_cancel_wakeup();
-
- acpi_s2idle_sync();
+ acpi_os_wait_events_complete();
/*
* The SCI is in the "suspended" state now and it cannot produce
@@ -1068,7 +1052,8 @@ static void acpi_s2idle_restore(void)
* of GPEs.
*/
acpi_os_wait_events_complete(); /* synchronize GPE processing */
- acpi_s2idle_sync();
+ acpi_ec_flush_work(); /* flush the EC driver's workqueues */
+ acpi_os_wait_events_complete(); /* synchronize Notify handling */
s2idle_wakeup = false;
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 41675d24a9bc..3d90480ce1b1 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -2,6 +2,7 @@
extern void acpi_enable_wakeup_devices(u8 sleep_state);
extern void acpi_disable_wakeup_devices(u8 sleep_state);
+extern bool acpi_check_wakeup_handlers(void);
extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 180ac4329763..0e905c3d1645 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -501,7 +501,7 @@ static const char * const table_sigs[] = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- NULL };
+ ACPI_SIG_NHLT, NULL };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/tiny-power-button.c b/drivers/acpi/tiny-power-button.c
new file mode 100644
index 000000000000..6273d73c0b59
--- /dev/null
+++ b/drivers/acpi/tiny-power-button.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/acpi.h>
+#include <acpi/button.h>
+
+ACPI_MODULE_NAME("tiny-power-button");
+MODULE_AUTHOR("Josh Triplett");
+MODULE_DESCRIPTION("ACPI Tiny Power Button Driver");
+MODULE_LICENSE("GPL");
+
+static int power_signal __read_mostly = CONFIG_ACPI_TINY_POWER_BUTTON_SIGNAL;
+module_param(power_signal, int, 0644);
+MODULE_PARM_DESC(power_signal, "Power button sends this signal to init");
+
+static const struct acpi_device_id tiny_power_button_device_ids[] = {
+ { ACPI_BUTTON_HID_POWER, 0 },
+ { ACPI_BUTTON_HID_POWERF, 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, tiny_power_button_device_ids);
+
+static int acpi_noop_add_remove(struct acpi_device *device)
+{
+ return 0;
+}
+
+static void acpi_tiny_power_button_notify(struct acpi_device *device, u32 event)
+{
+ kill_cad_pid(power_signal, 1);
+}
+
+static struct acpi_driver acpi_tiny_power_button_driver = {
+ .name = "tiny-power-button",
+ .class = "tiny-power-button",
+ .ids = tiny_power_button_device_ids,
+ .ops = {
+ .add = acpi_noop_add_remove,
+ .remove = acpi_noop_add_remove,
+ .notify = acpi_tiny_power_button_notify,
+ },
+};
+
+module_driver(acpi_tiny_power_button_driver,
+ acpi_bus_register_driver,
+ acpi_bus_unregister_driver);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 419f814d596a..b4994e50608d 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -352,6 +352,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+ {
+ .callback = video_detect_force_native,
+ .ident = "Acer Aspire 5738z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_BOARD_NAME, "JV50"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 9614126bf56e..0b2e42530adf 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -12,6 +12,15 @@
#include "internal.h"
#include "sleep.h"
+struct acpi_wakeup_handler {
+ struct list_head list_node;
+ bool (*wakeup)(void *context);
+ void *context;
+};
+
+static LIST_HEAD(acpi_wakeup_handler_head);
+static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
+
/*
* We didn't lock acpi_device_lock in the file, because it invokes oops in
* suspend/resume and isn't really required as this is called in S-state. At
@@ -30,12 +39,10 @@ ACPI_MODULE_NAME("wakeup_devices")
*/
void acpi_enable_wakeup_devices(u8 sleep_state)
{
- struct list_head *node, *next;
-
- list_for_each_safe(node, next, &acpi_wakeup_device_list) {
- struct acpi_device *dev =
- container_of(node, struct acpi_device, wakeup_list);
+ struct acpi_device *dev, *tmp;
+ list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
+ wakeup_list) {
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
@@ -57,12 +64,10 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
*/
void acpi_disable_wakeup_devices(u8 sleep_state)
{
- struct list_head *node, *next;
-
- list_for_each_safe(node, next, &acpi_wakeup_device_list) {
- struct acpi_device *dev =
- container_of(node, struct acpi_device, wakeup_list);
+ struct acpi_device *dev, *tmp;
+ list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
+ wakeup_list) {
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
@@ -79,13 +84,11 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
int __init acpi_wakeup_device_init(void)
{
- struct list_head *node, *next;
+ struct acpi_device *dev, *tmp;
mutex_lock(&acpi_device_lock);
- list_for_each_safe(node, next, &acpi_wakeup_device_list) {
- struct acpi_device *dev = container_of(node,
- struct acpi_device,
- wakeup_list);
+ list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
+ wakeup_list) {
if (device_can_wakeup(&dev->dev)) {
/* Button GPEs are supposed to be always enabled. */
acpi_enable_gpe(dev->wakeup.gpe_device,
@@ -96,3 +99,75 @@ int __init acpi_wakeup_device_init(void)
mutex_unlock(&acpi_device_lock);
return 0;
}
+
+/**
+ * acpi_register_wakeup_handler - Register wakeup handler
+ * @wake_irq: The IRQ through which the device may receive wakeups
+ * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
+ * @context: Context to pass to the handler when calling it
+ *
+ * Drivers which may share an IRQ with the SCI can use this to register
+ * a handler which returns true when the device they are managing wants
+ * to trigger a wakeup.
+ */
+int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
+ void *context)
+{
+ struct acpi_wakeup_handler *handler;
+
+ /*
+ * If the device is not sharing its IRQ with the SCI, there is no
+ * need to register the handler.
+ */
+ if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
+ return 0;
+
+ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+
+ handler->wakeup = wakeup;
+ handler->context = context;
+
+ mutex_lock(&acpi_wakeup_handler_mutex);
+ list_add(&handler->list_node, &acpi_wakeup_handler_head);
+ mutex_unlock(&acpi_wakeup_handler_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
+
+/**
+ * acpi_unregister_wakeup_handler - Unregister wakeup handler
+ * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
+ * @context: Context passed to acpi_register_wakeup_handler()
+ */
+void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
+ void *context)
+{
+ struct acpi_wakeup_handler *handler;
+
+ mutex_lock(&acpi_wakeup_handler_mutex);
+ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
+ if (handler->wakeup == wakeup && handler->context == context) {
+ list_del(&handler->list_node);
+ kfree(handler);
+ break;
+ }
+ }
+ mutex_unlock(&acpi_wakeup_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
+
+bool acpi_check_wakeup_handlers(void)
+{
+ struct acpi_wakeup_handler *handler;
+
+ /* No need to lock, nothing else is running when we're called. */
+ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
+ if (handler->wakeup(handler->context))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 697a6b12d6b9..bdc1ba00aee9 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -37,7 +37,7 @@ struct always_present_id {
const char *uid;
};
-#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+#define X86_MATCH(model) X86_MATCH_INTEL_FAM6_MODEL(model, NULL)
#define ENTRY(hid, uid, cpu_models, dmi...) { \
{ { hid, }, {} }, \
@@ -51,29 +51,29 @@ static const struct always_present_id always_present_ids[] = {
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
* but Linux uses a separate PWM driver, harmless if not used.
*/
- ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}),
- ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
+ ENTRY("80860F09", "1", X86_MATCH(ATOM_SILVERMONT), {}),
+ ENTRY("80862288", "1", X86_MATCH(ATOM_AIRMONT), {}),
/* Lenovo Yoga Book uses PWM2 for keyboard backlight control */
- ENTRY("80862289", "2", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ ENTRY("80862289", "2", X86_MATCH(ATOM_AIRMONT), {
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
}),
/*
* The INT0002 device is necessary to clear wakeup interrupt sources
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
*/
- ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
+ ENTRY("INT0002", "1", X86_MATCH(ATOM_AIRMONT), {}),
/*
* On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
* the touchscreen ACPI device until a certain time
* after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
* *and* _STA has been called at least 3 times since.
*/
- ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), {
+ ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
- ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), {
+ ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
}),
@@ -89,19 +89,19 @@ static const struct always_present_id always_present_ids[] = {
* was copy-pasted from the GPD win, so it has a disabled KIOX000A
* node which we should not enable, thus we also check the BIOS date.
*/
- ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "02/21/2017")
}),
- ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
}),
- ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index fe1523664816..8558b629880b 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -645,6 +645,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev->dev.release = amba_device_release;
dev->dev.bus = &amba_bustype;
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+ dev->dev.dma_parms = &dev->dma_parms;
dev->res.name = dev_name(&dev->dev);
}
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index f303106b3362..9ecad74183a3 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mount.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
#include <linux/radix-tree.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -48,26 +48,30 @@ static dev_t binderfs_dev;
static DEFINE_MUTEX(binderfs_minors_mutex);
static DEFINE_IDA(binderfs_minors);
-enum {
+enum binderfs_param {
Opt_max,
Opt_stats_mode,
- Opt_err
};
enum binderfs_stats_mode {
- STATS_NONE,
- STATS_GLOBAL,
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
};
-static const match_table_t tokens = {
- { Opt_max, "max=%d" },
- { Opt_stats_mode, "stats=%s" },
- { Opt_err, NULL }
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
};
-static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
+const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
{
- return inode->i_sb->s_fs_info;
+ return sb->s_fs_info;
}
bool is_binderfs_device(const struct inode *inode)
@@ -246,7 +250,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
static void binderfs_evict_inode(struct inode *inode)
{
struct binder_device *device = inode->i_private;
- struct binderfs_info *info = BINDERFS_I(inode);
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
clear_inode(inode);
@@ -264,97 +268,84 @@ static void binderfs_evict_inode(struct inode *inode)
}
}
-/**
- * binderfs_parse_mount_opts - parse binderfs mount options
- * @data: options to set (can be NULL in which case defaults are used)
- */
-static int binderfs_parse_mount_opts(char *data,
- struct binderfs_mount_opts *opts)
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
{
- char *p, *stats;
- opts->max = BINDERFS_MAX_MINOR;
- opts->stats_mode = STATS_NONE;
-
- while ((p = strsep(&data, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- int max_devices;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_max:
- if (match_int(&args[0], &max_devices) ||
- (max_devices < 0 ||
- (max_devices > BINDERFS_MAX_MINOR)))
- return -EINVAL;
-
- opts->max = max_devices;
- break;
- case Opt_stats_mode:
- if (!capable(CAP_SYS_ADMIN))
- return -EINVAL;
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
- stats = match_strdup(&args[0]);
- if (!stats)
- return -ENOMEM;
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
- if (strcmp(stats, "global") != 0) {
- kfree(stats);
- return -EINVAL;
- }
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
- opts->stats_mode = STATS_GLOBAL;
- kfree(stats);
- break;
- default:
- pr_err("Invalid mount options\n");
- return -EINVAL;
- }
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
}
return 0;
}
-static int binderfs_remount(struct super_block *sb, int *flags, char *data)
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
{
- int prev_stats_mode, ret;
- struct binderfs_info *info = sb->s_fs_info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
- prev_stats_mode = info->mount_opts.stats_mode;
- ret = binderfs_parse_mount_opts(data, &info->mount_opts);
- if (ret)
- return ret;
-
- if (prev_stats_mode != info->mount_opts.stats_mode) {
- pr_err("Binderfs stats mode cannot be changed during a remount\n");
- info->mount_opts.stats_mode = prev_stats_mode;
- return -EINVAL;
- }
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
return 0;
}
-static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct binderfs_info *info;
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
- info = root->d_sb->s_fs_info;
if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
seq_printf(seq, ",max=%d", info->mount_opts.max);
- if (info->mount_opts.stats_mode == STATS_GLOBAL)
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
seq_printf(seq, ",stats=global");
+ break;
+ }
return 0;
}
+static void binderfs_put_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+ sb->s_fs_info = NULL;
+}
+
static const struct super_operations binderfs_super_ops = {
.evict_inode = binderfs_evict_inode,
- .remount_fs = binderfs_remount,
- .show_options = binderfs_show_mount_opts,
+ .show_options = binderfs_show_options,
.statfs = simple_statfs,
+ .put_super = binderfs_put_super,
};
static inline bool is_binderfs_control_device(const struct dentry *dentry)
@@ -653,10 +644,11 @@ out:
return ret;
}
-static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
int ret;
struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
struct inode *inode = NULL;
struct binderfs_device device_info = { 0 };
const char *name;
@@ -689,16 +681,14 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
- ret = binderfs_parse_mount_opts(data, &info->mount_opts);
- if (ret)
- return ret;
-
info->root_gid = make_kgid(sb->s_user_ns, 0);
if (!gid_valid(info->root_gid))
info->root_gid = GLOBAL_ROOT_GID;
info->root_uid = make_kuid(sb->s_user_ns, 0);
if (!uid_valid(info->root_uid))
info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
inode = new_inode(sb);
if (!inode)
@@ -730,36 +720,54 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
name++;
}
- if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
return init_binder_logs(sb);
return 0;
}
-static struct dentry *binderfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, binderfs_fill_super);
+ return get_tree_nodev(fc, binderfs_fill_super);
}
-static void binderfs_kill_super(struct super_block *sb)
+static void binderfs_fs_context_free(struct fs_context *fc)
{
- struct binderfs_info *info = sb->s_fs_info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
- kill_litter_super(sb);
+ kfree(ctx);
+}
- if (info && info->ipc_ns)
- put_ipc_ns(info->ipc_ns);
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
- kfree(info);
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
}
static struct file_system_type binder_fs_type = {
- .name = "binder",
- .mount = binderfs_mount,
- .kill_sb = binderfs_kill_super,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init init_binderfs(void)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a6beb2c5a692..05ecdce1b702 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -34,6 +34,12 @@ if ATA
config ATA_NONSTANDARD
bool
+config SATA_HOST
+ bool
+
+config PATA_TIMINGS
+ bool
+
config ATA_VERBOSE_ERROR
bool "Verbose ATA error reporting"
default y
@@ -45,9 +51,26 @@ config ATA_VERBOSE_ERROR
If unsure, say Y.
+config ATA_FORCE
+ bool "\"libata.force=\" kernel parameter support" if EXPERT
+ default y
+ help
+ This option adds support for "libata.force=" kernel parameter for
+ forcing configuration settings.
+
+ For further information, please read
+ <file:Documentation/admin-guide/kernel-parameters.txt>.
+
+ This option will enlarge the kernel by approx. 3KB. Disable it if
+ kernel size is more important than ability to override the default
+ configuration settings.
+
+ If unsure, say Y.
+
config ATA_ACPI
bool "ATA ACPI Support"
depends on ACPI
+ select PATA_TIMINGS
default y
help
This option adds support for ATA-related ACPI objects.
@@ -73,6 +96,7 @@ config SATA_ZPODD
config SATA_PMP
bool "SATA Port Multiplier support"
+ depends on SATA_HOST
default y
help
This option adds support for SATA Port Multipliers
@@ -85,6 +109,7 @@ comment "Controllers with non-SFF native interface"
config SATA_AHCI
tristate "AHCI SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for AHCI Serial ATA.
@@ -111,6 +136,7 @@ config SATA_MOBILE_LPM_POLICY
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
+ select SATA_HOST
help
This option enables support for Platform AHCI Serial ATA
controllers.
@@ -121,6 +147,7 @@ config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
ARCH_BCM_63XX
+ select SATA_HOST
help
This option enables support for the AHCI SATA3 controller found on
Broadcom SoC's.
@@ -130,6 +157,7 @@ config AHCI_BRCM
config AHCI_DA850
tristate "DaVinci DA850 AHCI SATA support"
depends on ARCH_DAVINCI_DA850
+ select SATA_HOST
help
This option enables support for the DaVinci DA850 SoC's
onboard AHCI SATA.
@@ -139,6 +167,7 @@ config AHCI_DA850
config AHCI_DM816
tristate "DaVinci DM816 AHCI SATA support"
depends on ARCH_OMAP2PLUS
+ select SATA_HOST
help
This option enables support for the DaVinci DM816 SoC's
onboard AHCI SATA controller.
@@ -148,6 +177,7 @@ config AHCI_DM816
config AHCI_ST
tristate "ST AHCI SATA support"
depends on ARCH_STI
+ select SATA_HOST
help
This option enables support for ST AHCI SATA controller.
@@ -157,6 +187,7 @@ config AHCI_IMX
tristate "Freescale i.MX AHCI SATA support"
depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
depends on (HWMON && (THERMAL || !THERMAL_OF)) || !HWMON
+ select SATA_HOST
help
This option enables support for the Freescale i.MX SoC's
onboard AHCI SATA.
@@ -166,6 +197,7 @@ config AHCI_IMX
config AHCI_CEVA
tristate "CEVA AHCI SATA support"
depends on OF
+ select SATA_HOST
help
This option enables support for the CEVA AHCI SATA.
It can be found on the Xilinx Zynq UltraScale+ MPSoC.
@@ -176,6 +208,7 @@ config AHCI_MTK
tristate "MediaTek AHCI SATA support"
depends on ARCH_MEDIATEK
select MFD_SYSCON
+ select SATA_HOST
help
This option enables support for the MediaTek SoC's
onboard AHCI SATA controller.
@@ -185,6 +218,7 @@ config AHCI_MTK
config AHCI_MVEBU
tristate "Marvell EBU AHCI SATA support"
depends on ARCH_MVEBU
+ select SATA_HOST
help
This option enables support for the Marvebu EBU SoC's
onboard AHCI SATA.
@@ -203,6 +237,7 @@ config AHCI_OCTEON
config AHCI_SUNXI
tristate "Allwinner sunxi AHCI SATA support"
depends on ARCH_SUNXI
+ select SATA_HOST
help
This option enables support for the Allwinner sunxi SoC's
onboard AHCI SATA.
@@ -212,6 +247,7 @@ config AHCI_SUNXI
config AHCI_TEGRA
tristate "NVIDIA Tegra AHCI SATA support"
depends on ARCH_TEGRA
+ select SATA_HOST
help
This option enables support for the NVIDIA Tegra SoC's
onboard AHCI SATA.
@@ -221,12 +257,14 @@ config AHCI_TEGRA
config AHCI_XGENE
tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
depends on PHY_XGENE
+ select SATA_HOST
help
This option enables support for APM X-Gene SoC SATA host controller.
config AHCI_QORIQ
tristate "Freescale QorIQ AHCI SATA support"
depends on OF
+ select SATA_HOST
help
This option enables support for the Freescale QorIQ AHCI SoC's
onboard AHCI SATA.
@@ -236,6 +274,7 @@ config AHCI_QORIQ
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
depends on FSL_SOC
+ select SATA_HOST
help
This option enables support for Freescale 3.0Gbps SATA controller.
It can be found on MPC837x and MPC8315.
@@ -245,6 +284,7 @@ config SATA_FSL
config SATA_GEMINI
tristate "Gemini SATA bridge support"
depends on ARCH_GEMINI || COMPILE_TEST
+ select SATA_HOST
default ARCH_GEMINI
help
This enabled support for the FTIDE010 to SATA bridge
@@ -255,6 +295,7 @@ config SATA_GEMINI
config SATA_AHCI_SEATTLE
tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
depends on ARCH_SEATTLE
+ select SATA_HOST
help
This option enables support for AMD Seattle SATA host controller.
@@ -263,12 +304,14 @@ config SATA_AHCI_SEATTLE
config SATA_INIC162X
tristate "Initio 162x SATA support (Very Experimental)"
depends on PCI
+ select SATA_HOST
help
This option enables support for Initio 162x Serial ATA.
config SATA_ACARD_AHCI
tristate "ACard AHCI variant (ATP 8620)"
depends on PCI
+ select SATA_HOST
help
This option enables support for Acard.
@@ -277,6 +320,7 @@ config SATA_ACARD_AHCI
config SATA_SIL24
tristate "Silicon Image 3124/3132 SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for Silicon Image 3124/3132 Serial ATA.
@@ -317,6 +361,7 @@ config PDC_ADMA
config PATA_OCTEON_CF
tristate "OCTEON Boot Bus Compact Flash support"
depends on CAVIUM_OCTEON_SOC
+ select PATA_TIMINGS
help
This option enables a polled compact flash driver for use with
compact flash cards attached to the OCTEON boot bus.
@@ -326,6 +371,7 @@ config PATA_OCTEON_CF
config SATA_QSTOR
tristate "Pacific Digital SATA QStor support"
depends on PCI
+ select SATA_HOST
help
This option enables support for Pacific Digital Serial ATA QStor.
@@ -334,6 +380,7 @@ config SATA_QSTOR
config SATA_SX4
tristate "Promise SATA SX4 support (Experimental)"
depends on PCI
+ select SATA_HOST
help
This option enables support for Promise Serial ATA SX4.
@@ -357,6 +404,7 @@ comment "SATA SFF controllers with BMDMA"
config ATA_PIIX
tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for ICH5/6/7/8 Serial ATA
and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series
@@ -368,6 +416,7 @@ config SATA_DWC
tristate "DesignWare Cores SATA support"
depends on DMADEVICES
select GENERIC_PHY
+ select SATA_HOST
help
This option enables support for the on-chip SATA controller of the
AppliedMicro processor 460EX.
@@ -398,6 +447,7 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
depends on ARCH_HIGHBANK || COMPILE_TEST
+ select SATA_HOST
help
This option enables support for the Calxeda Highbank SoC's
onboard SATA.
@@ -409,6 +459,7 @@ config SATA_MV
depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
select GENERIC_PHY
+ select SATA_HOST
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] PCI(-X) chips,
@@ -419,6 +470,7 @@ config SATA_MV
config SATA_NV
tristate "NVIDIA SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for NVIDIA Serial ATA.
@@ -427,6 +479,7 @@ config SATA_NV
config SATA_PROMISE
tristate "Promise SATA TX2/TX4 support"
depends on PCI
+ select SATA_HOST
help
This option enables support for Promise Serial ATA TX2/TX4.
@@ -435,6 +488,7 @@ config SATA_PROMISE
config SATA_RCAR
tristate "Renesas R-Car SATA support"
depends on ARCH_RENESAS || COMPILE_TEST
+ select SATA_HOST
help
This option enables support for Renesas R-Car Serial ATA.
@@ -443,6 +497,7 @@ config SATA_RCAR
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for Silicon Image Serial ATA.
@@ -452,6 +507,7 @@ config SATA_SIS
tristate "SiS 964/965/966/180 SATA support"
depends on PCI
select PATA_SIS
+ select SATA_HOST
help
This option enables support for SiS Serial ATA on
SiS 964/965/966/180 and Parallel ATA on SiS 180.
@@ -462,6 +518,7 @@ config SATA_SIS
config SATA_SVW
tristate "ServerWorks Frodo / Apple K2 SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for Broadcom/Serverworks/Apple K2
SATA support.
@@ -471,6 +528,7 @@ config SATA_SVW
config SATA_ULI
tristate "ULi Electronics SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for ULi Electronics SATA.
@@ -479,6 +537,7 @@ config SATA_ULI
config SATA_VIA
tristate "VIA SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for VIA Serial ATA.
@@ -487,6 +546,7 @@ config SATA_VIA
config SATA_VITESSE
tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
depends on PCI
+ select SATA_HOST
help
This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
@@ -497,6 +557,7 @@ comment "PATA SFF controllers with BMDMA"
config PATA_ALI
tristate "ALi PATA support"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the ALi ATA interfaces
found on the many ALi chipsets.
@@ -506,6 +567,7 @@ config PATA_ALI
config PATA_AMD
tristate "AMD/NVidia PATA support"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the AMD and NVidia PATA
interfaces found on the chipsets for Athlon/Athlon64.
@@ -540,6 +602,7 @@ config PATA_ATIIXP
config PATA_ATP867X
tristate "ARTOP/Acard ATP867X PATA support"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for ARTOP/Acard ATP867X PATA
controllers.
@@ -549,6 +612,7 @@ config PATA_ATP867X
config PATA_BK3710
tristate "Palmchip BK3710 PATA support"
depends on ARCH_DAVINCI
+ select PATA_TIMINGS
help
This option enables support for the integrated IDE controller on
the TI DaVinci SoC.
@@ -558,6 +622,7 @@ config PATA_BK3710
config PATA_CMD64X
tristate "CMD64x PATA support"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the CMD64x series chips
except for the CMD640.
@@ -603,6 +668,7 @@ config PATA_CS5536
config PATA_CYPRESS
tristate "Cypress CY82C693 PATA support (Very Experimental)"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the Cypress/Contaq CY82C693
chipset found in some Alpha systems
@@ -621,6 +687,7 @@ config PATA_EFAR
config PATA_EP93XX
tristate "Cirrus Logic EP93xx PATA support"
depends on ARCH_EP93XX
+ select PATA_TIMINGS
help
This option enables support for the PATA controller in
the Cirrus Logic EP9312 and EP9315 ARM CPU.
@@ -685,6 +752,7 @@ config PATA_HPT3X3_DMA
config PATA_ICSIDE
tristate "Acorn ICS PATA support"
depends on ARM && ARCH_ACORN
+ select PATA_TIMINGS
help
On Acorn systems, say Y here if you wish to use the ICS PATA
interface card. This is not required for ICS partition support.
@@ -693,6 +761,7 @@ config PATA_ICSIDE
config PATA_IMX
tristate "PATA support for Freescale iMX"
depends on ARCH_MXC
+ select PATA_TIMINGS
help
This option enables support for the PATA host available on Freescale
iMX SoCs.
@@ -778,6 +847,7 @@ config PATA_NINJA32
config PATA_NS87415
tristate "Nat Semi NS87415 PATA support"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the National Semiconductor
NS87415 PCI-IDE controller.
@@ -902,6 +972,7 @@ config PATA_TRIFLEX
config PATA_VIA
tristate "VIA PATA support"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the VIA PATA interfaces
found on the many VIA chipsets.
@@ -935,6 +1006,7 @@ comment "PIO-only SFF controllers"
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the CMD640 PCI IDE
interface chip. Only the primary channel is currently
@@ -1005,6 +1077,7 @@ config PATA_MPIIX
config PATA_NS87410
tristate "Nat Semi NS87410 PATA support"
depends on PCI
+ select PATA_TIMINGS
help
This option enables support for the National Semiconductor
NS87410 PCI-IDE controller.
@@ -1085,6 +1158,7 @@ config PATA_RZ1000
config PATA_SAMSUNG_CF
tristate "Samsung SoC PATA support"
depends on SAMSUNG_DEV_IDE
+ select PATA_TIMINGS
help
This option enables basic support for Samsung's S3C/S5P board
PATA controllers via the new ATA layer
@@ -1104,6 +1178,7 @@ comment "Generic fallback / legacy drivers"
config PATA_ACPI
tristate "ACPI firmware driver for PATA"
depends on ATA_ACPI && ATA_BMDMA && PCI
+ select PATA_TIMINGS
help
This option enables an ACPI method driver which drives
motherboard PATA controller interfaces through the ACPI
@@ -1113,6 +1188,7 @@ config PATA_ACPI
config ATA_GENERIC
tristate "Generic ATA support"
depends on PCI && ATA_BMDMA
+ select SATA_HOST
help
This option enables support for generic BIOS configured
ATA controllers via the new ATA layer
@@ -1122,6 +1198,7 @@ config ATA_GENERIC
config PATA_LEGACY
tristate "Legacy ISA PATA support (Experimental)"
depends on (ISA || PCI)
+ select PATA_TIMINGS
help
This option enables support for ISA/VLB/PCI bus legacy PATA
ports and allows them to be accessed via the new ATA layer.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d8cc2e04a6c7..b8aebfb14e82 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -123,7 +123,9 @@ obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
libata-y := libata-core.o libata-scsi.o libata-eh.o \
libata-transport.o libata-trace.o
+libata-$(CONFIG_SATA_HOST) += libata-sata.o
libata-$(CONFIG_ATA_SFF) += libata-sff.o
libata-$(CONFIG_SATA_PMP) += libata-pmp.o
libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
libata-$(CONFIG_SATA_ZPODD) += libata-zpodd.o
+libata-$(CONFIG_PATA_TIMINGS) += libata-pata-timings.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 11ea1aff40db..0c0a736eb861 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -40,6 +40,7 @@
enum {
AHCI_PCI_BAR_STA2X11 = 0,
AHCI_PCI_BAR_CAVIUM = 0,
+ AHCI_PCI_BAR_LOONGSON = 0,
AHCI_PCI_BAR_ENMOTUS = 2,
AHCI_PCI_BAR_CAVIUM_GEN5 = 4,
AHCI_PCI_BAR_STANDARD = 5,
@@ -245,6 +246,7 @@ static const struct ata_port_info ahci_port_info[] = {
static const struct pci_device_id ahci_pci_tbl[] = {
/* Intel */
+ { PCI_VDEVICE(INTEL, 0x06d6), board_ahci }, /* Comet Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
@@ -401,11 +403,15 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
{ PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_mobile }, /* Comet Lake PCH-U AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -589,6 +595,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+ /* Loongson */
+ { PCI_VDEVICE(LOONGSON, 0x7a08), board_ahci },
+
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1488,7 +1497,7 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
static void ahci_remap_check(struct pci_dev *pdev, int bar,
struct ahci_host_priv *hpriv)
{
- int i, count = 0;
+ int i;
u32 cap;
/*
@@ -1509,13 +1518,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar,
continue;
/* We've found a remapped device */
- count++;
+ hpriv->remapped_nvme++;
}
- if (!count)
+ if (!hpriv->remapped_nvme)
return;
- dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+ dev_warn(&pdev->dev, "Found %u remapped NVMe devices.\n",
+ hpriv->remapped_nvme);
dev_warn(&pdev->dev,
"Switch your BIOS from RAID to AHCI mode to use them.\n");
@@ -1635,6 +1645,18 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
}
}
+static ssize_t remapped_nvme_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ return sprintf(buf, "%u\n", hpriv->remapped_nvme);
+}
+
+static DEVICE_ATTR_RO(remapped_nvme);
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@@ -1680,6 +1702,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
if (pdev->device == 0xa084)
ahci_pci_bar = AHCI_PCI_BAR_CAVIUM_GEN5;
+ } else if (pdev->vendor == PCI_VENDOR_ID_LOONGSON) {
+ if (pdev->device == 0x7a08)
+ ahci_pci_bar = AHCI_PCI_BAR_LOONGSON;
}
/* acquire resources */
@@ -1735,6 +1760,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* detect remapped nvme devices */
ahci_remap_check(pdev, ahci_pci_bar, hpriv);
+ sysfs_add_file_to_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr,
+ NULL);
+
/* must set flag prior to save config in order to take effect */
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
@@ -1886,6 +1915,9 @@ static void ahci_shutdown_one(struct pci_dev *pdev)
static void ahci_remove_one(struct pci_dev *pdev)
{
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr,
+ NULL);
pm_runtime_get_noresume(&pdev->dev);
ata_pci_remove_one(pdev);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 3dbf398c92ea..d991dd46e89c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -336,6 +336,7 @@ struct ahci_host_priv {
u32 em_loc; /* enclosure management location */
u32 em_buf_sz; /* EM buffer size in byte */
u32 em_msg_type; /* EM message type */
+ u32 remapped_nvme; /* NVMe remapped device count */
bool got_runtime_pm; /* Did we do pm_runtime_get? */
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
struct reset_control *rsts; /* Optional */
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 948d2c6557f3..388baf528fa8 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -782,7 +782,7 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
struct ata_host *host = dev_get_drvdata(ap->dev);
struct ahci_host_priv *hpriv = host->private_data;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
- int ret = -EIO;
+ int ret;
if (imxpriv->type == AHCI_IMX53)
ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 42c8728f6117..beca5f91bb4c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2,10 +2,6 @@
/*
* libata-core.c - helper library for ATA
*
- * Maintained by: Tejun Heo <tj@kernel.org>
- * Please ALWAYS copy linux-ide@vger.kernel.org
- * on emails.
- *
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
@@ -22,6 +18,11 @@
* http://www.compactflash.org (CF)
* http://www.qic.org (QIC157 - Tape and DSC)
* http://www.ce-ata.org (CE-ATA: not supported)
+ *
+ * libata is essentially a library of internal helper functions for
+ * low-level ATA host controller drivers. As such, the API/ABI is
+ * likely to change as new drivers are added and updated.
+ * Do not depend on ABI/API stability.
*/
#include <linux/kernel.h>
@@ -56,6 +57,7 @@
#include <linux/leds.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
+#include <asm/setup.h>
#define CREATE_TRACE_POINTS
#include <trace/events/libata.h>
@@ -63,11 +65,6 @@
#include "libata.h"
#include "libata-transport.h"
-/* debounce timing parameters in msecs { interval, duration, timeout } */
-const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
-const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
-const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
-
const struct ata_port_operations ata_base_port_ops = {
.prereset = ata_std_prereset,
.postreset = ata_std_postreset,
@@ -82,6 +79,7 @@ const struct ata_port_operations sata_port_ops = {
.qc_defer = ata_std_qc_defer,
.hardreset = sata_std_hardreset,
};
+EXPORT_SYMBOL_GPL(sata_port_ops);
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
@@ -91,14 +89,15 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
atomic_t ata_print_id = ATOMIC_INIT(0);
+#ifdef CONFIG_ATA_FORCE
struct ata_force_param {
const char *name;
- unsigned int cbl;
- int spd_limit;
+ u8 cbl;
+ u8 spd_limit;
unsigned long xfer_mask;
unsigned int horkage_on;
unsigned int horkage_off;
- unsigned int lflags;
+ u16 lflags;
};
struct ata_force_ent {
@@ -110,10 +109,11 @@ struct ata_force_ent {
static struct ata_force_ent *ata_force_tbl;
static int ata_force_tbl_size;
-static char ata_force_param_buf[PAGE_SIZE] __initdata;
+static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
/* param_buf is thrown away after initialization, disallow read */
module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
+#endif
static int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
@@ -224,6 +224,7 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
return NULL;
}
+EXPORT_SYMBOL_GPL(ata_link_next);
/**
* ata_dev_next - device iteration helper
@@ -277,6 +278,7 @@ struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
goto next;
return dev;
}
+EXPORT_SYMBOL_GPL(ata_dev_next);
/**
* ata_dev_phys_link - find physical link for a device
@@ -303,6 +305,7 @@ struct ata_link *ata_dev_phys_link(struct ata_device *dev)
return ap->slave_link;
}
+#ifdef CONFIG_ATA_FORCE
/**
* ata_force_cbl - force cable type according to libata.force
* @ap: ATA port of interest
@@ -483,6 +486,11 @@ static void ata_force_horkage(struct ata_device *dev)
fe->param.name);
}
}
+#else
+static inline void ata_force_link_limits(struct ata_link *link) { }
+static inline void ata_force_xfermask(struct ata_device *dev) { }
+static inline void ata_force_horkage(struct ata_device *dev) { }
+#endif
/**
* atapi_cmd_type - Determine ATAPI command type from SCSI opcode
@@ -521,79 +529,7 @@ int atapi_cmd_type(u8 opcode)
return ATAPI_MISC;
}
}
-
-/**
- * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
- * @tf: Taskfile to convert
- * @pmp: Port multiplier port
- * @is_cmd: This FIS is for command
- * @fis: Buffer into which data will output
- *
- * Converts a standard ATA taskfile to a Serial ATA
- * FIS structure (Register - Host to Device).
- *
- * LOCKING:
- * Inherited from caller.
- */
-void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
-{
- fis[0] = 0x27; /* Register - Host to Device FIS */
- fis[1] = pmp & 0xf; /* Port multiplier number*/
- if (is_cmd)
- fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
-
- fis[2] = tf->command;
- fis[3] = tf->feature;
-
- fis[4] = tf->lbal;
- fis[5] = tf->lbam;
- fis[6] = tf->lbah;
- fis[7] = tf->device;
-
- fis[8] = tf->hob_lbal;
- fis[9] = tf->hob_lbam;
- fis[10] = tf->hob_lbah;
- fis[11] = tf->hob_feature;
-
- fis[12] = tf->nsect;
- fis[13] = tf->hob_nsect;
- fis[14] = 0;
- fis[15] = tf->ctl;
-
- fis[16] = tf->auxiliary & 0xff;
- fis[17] = (tf->auxiliary >> 8) & 0xff;
- fis[18] = (tf->auxiliary >> 16) & 0xff;
- fis[19] = (tf->auxiliary >> 24) & 0xff;
-}
-
-/**
- * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
- * @fis: Buffer from which data will be input
- * @tf: Taskfile to output
- *
- * Converts a serial ATA FIS structure to a standard ATA taskfile.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
-{
- tf->command = fis[2]; /* status */
- tf->feature = fis[3]; /* error */
-
- tf->lbal = fis[4];
- tf->lbam = fis[5];
- tf->lbah = fis[6];
- tf->device = fis[7];
-
- tf->hob_lbal = fis[8];
- tf->hob_lbam = fis[9];
- tf->hob_lbah = fis[10];
-
- tf->nsect = fis[12];
- tf->hob_nsect = fis[13];
-}
+EXPORT_SYMBOL_GPL(atapi_cmd_type);
static const u8 ata_rw_cmds[] = {
/* pio multi */
@@ -868,6 +804,7 @@ unsigned long ata_pack_xfermask(unsigned long pio_mask,
((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
}
+EXPORT_SYMBOL_GPL(ata_pack_xfermask);
/**
* ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
@@ -923,6 +860,7 @@ u8 ata_xfer_mask2mode(unsigned long xfer_mask)
return ent->base + highbit - ent->shift;
return 0xff;
}
+EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
/**
* ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
@@ -946,6 +884,7 @@ unsigned long ata_xfer_mode2mask(u8 xfer_mode)
& ~((1 << ent->shift) - 1);
return 0;
}
+EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
/**
* ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
@@ -968,6 +907,7 @@ int ata_xfer_mode2shift(unsigned long xfer_mode)
return ent->shift;
return -1;
}
+EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
/**
* ata_mode_string - convert xfer_mask to string
@@ -1014,6 +954,7 @@ const char *ata_mode_string(unsigned long xfer_mask)
return xfer_mode_str[highbit];
return "<n/a>";
}
+EXPORT_SYMBOL_GPL(ata_mode_string);
const char *sata_spd_string(unsigned int spd)
{
@@ -1094,6 +1035,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
DPRINTK("unknown device\n");
return ATA_DEV_UNKNOWN;
}
+EXPORT_SYMBOL_GPL(ata_dev_classify);
/**
* ata_id_string - Convert IDENTIFY DEVICE page into string
@@ -1130,6 +1072,7 @@ void ata_id_string(const u16 *id, unsigned char *s,
len -= 2;
}
}
+EXPORT_SYMBOL_GPL(ata_id_string);
/**
* ata_id_c_string - Convert IDENTIFY DEVICE page into C string
@@ -1157,6 +1100,7 @@ void ata_id_c_string(const u16 *id, unsigned char *s,
p--;
*p = '\0';
}
+EXPORT_SYMBOL_GPL(ata_id_c_string);
static u64 ata_id_n_sectors(const u16 *id)
{
@@ -1514,6 +1458,7 @@ unsigned long ata_id_xfermask(const u16 *id)
return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
}
+EXPORT_SYMBOL_GPL(ata_id_xfermask);
static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
@@ -1771,6 +1716,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
return 1;
return 0;
}
+EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
/**
* ata_pio_mask_no_iordy - Return the non IORDY mask
@@ -1811,6 +1757,7 @@ unsigned int ata_do_dev_read_id(struct ata_device *dev,
return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
id, sizeof(id[0]) * ATA_ID_WORDS, 0);
}
+EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
/**
* ata_dev_read_id - Read ID data from the specified device
@@ -2265,6 +2212,8 @@ static int ata_dev_config_ncq(struct ata_device *dev,
desc[0] = '\0';
return 0;
}
+ if (!IS_ENABLED(CONFIG_SATA_HOST))
+ return 0;
if (dev->horkage & ATA_HORKAGE_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
@@ -2783,6 +2732,7 @@ int ata_cable_40wire(struct ata_port *ap)
{
return ATA_CBL_PATA40;
}
+EXPORT_SYMBOL_GPL(ata_cable_40wire);
/**
* ata_cable_80wire - return 80 wire cable type
@@ -2796,6 +2746,7 @@ int ata_cable_80wire(struct ata_port *ap)
{
return ATA_CBL_PATA80;
}
+EXPORT_SYMBOL_GPL(ata_cable_80wire);
/**
* ata_cable_unknown - return unknown PATA cable.
@@ -2808,6 +2759,7 @@ int ata_cable_unknown(struct ata_port *ap)
{
return ATA_CBL_PATA_UNK;
}
+EXPORT_SYMBOL_GPL(ata_cable_unknown);
/**
* ata_cable_ignore - return ignored PATA cable.
@@ -2820,6 +2772,7 @@ int ata_cable_ignore(struct ata_port *ap)
{
return ATA_CBL_PATA_IGN;
}
+EXPORT_SYMBOL_GPL(ata_cable_ignore);
/**
* ata_cable_sata - return SATA cable type
@@ -2832,6 +2785,7 @@ int ata_cable_sata(struct ata_port *ap)
{
return ATA_CBL_SATA;
}
+EXPORT_SYMBOL_GPL(ata_cable_sata);
/**
* ata_bus_probe - Reset and probe ATA bus
@@ -3014,6 +2968,7 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
return NULL;
return pair;
}
+EXPORT_SYMBOL_GPL(ata_dev_pair);
/**
* sata_down_spd_limit - adjust SATA spd limit downward
@@ -3095,252 +3050,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
return 0;
}
-static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
-{
- struct ata_link *host_link = &link->ap->link;
- u32 limit, target, spd;
-
- limit = link->sata_spd_limit;
-
- /* Don't configure downstream link faster than upstream link.
- * It doesn't speed up anything and some PMPs choke on such
- * configuration.
- */
- if (!ata_is_host_link(link) && host_link->sata_spd)
- limit &= (1 << host_link->sata_spd) - 1;
-
- if (limit == UINT_MAX)
- target = 0;
- else
- target = fls(limit);
-
- spd = (*scontrol >> 4) & 0xf;
- *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
-
- return spd != target;
-}
-
-/**
- * sata_set_spd_needed - is SATA spd configuration needed
- * @link: Link in question
- *
- * Test whether the spd limit in SControl matches
- * @link->sata_spd_limit. This function is used to determine
- * whether hardreset is necessary to apply SATA spd
- * configuration.
- *
- * LOCKING:
- * Inherited from caller.
- *
- * RETURNS:
- * 1 if SATA spd configuration is needed, 0 otherwise.
- */
-static int sata_set_spd_needed(struct ata_link *link)
-{
- u32 scontrol;
-
- if (sata_scr_read(link, SCR_CONTROL, &scontrol))
- return 1;
-
- return __sata_set_spd_needed(link, &scontrol);
-}
-
-/**
- * sata_set_spd - set SATA spd according to spd limit
- * @link: Link to set SATA spd for
- *
- * Set SATA spd of @link according to sata_spd_limit.
- *
- * LOCKING:
- * Inherited from caller.
- *
- * RETURNS:
- * 0 if spd doesn't need to be changed, 1 if spd has been
- * changed. Negative errno if SCR registers are inaccessible.
- */
-int sata_set_spd(struct ata_link *link)
-{
- u32 scontrol;
- int rc;
-
- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
- return rc;
-
- if (!__sata_set_spd_needed(link, &scontrol))
- return 0;
-
- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
- return rc;
-
- return 1;
-}
-
-/*
- * This mode timing computation functionality is ported over from
- * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
- */
-/*
- * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
- * These were taken from ATA/ATAPI-6 standard, rev 0a, except
- * for UDMA6, which is currently supported only by Maxtor drives.
- *
- * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
- */
-
-static const struct ata_timing ata_timing[] = {
-/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
- { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
- { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
- { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
- { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
- { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
- { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
- { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
-
- { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
- { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
- { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
-
- { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
- { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
- { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
- { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
- { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
-
-/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
- { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
- { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
- { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
- { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
- { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
- { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
- { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
-
- { 0xFF }
-};
-
-#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
-#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
-
-static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
-{
- q->setup = EZ(t->setup, T);
- q->act8b = EZ(t->act8b, T);
- q->rec8b = EZ(t->rec8b, T);
- q->cyc8b = EZ(t->cyc8b, T);
- q->active = EZ(t->active, T);
- q->recover = EZ(t->recover, T);
- q->dmack_hold = EZ(t->dmack_hold, T);
- q->cycle = EZ(t->cycle, T);
- q->udma = EZ(t->udma, UT);
-}
-
-void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
- struct ata_timing *m, unsigned int what)
-{
- if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
- if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
- if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
- if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
- if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
- if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
- if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
- if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
- if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
-}
-
-const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
-{
- const struct ata_timing *t = ata_timing;
-
- while (xfer_mode > t->mode)
- t++;
-
- if (xfer_mode == t->mode)
- return t;
-
- WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
- __func__, xfer_mode);
-
- return NULL;
-}
-
-int ata_timing_compute(struct ata_device *adev, unsigned short speed,
- struct ata_timing *t, int T, int UT)
-{
- const u16 *id = adev->id;
- const struct ata_timing *s;
- struct ata_timing p;
-
- /*
- * Find the mode.
- */
-
- if (!(s = ata_timing_find_mode(speed)))
- return -EINVAL;
-
- memcpy(t, s, sizeof(*s));
-
- /*
- * If the drive is an EIDE drive, it can tell us it needs extended
- * PIO/MW_DMA cycle timing.
- */
-
- if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
- memset(&p, 0, sizeof(p));
-
- if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
- if (speed <= XFER_PIO_2)
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
- else if ((speed <= XFER_PIO_4) ||
- (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
- } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
- p.cycle = id[ATA_ID_EIDE_DMA_MIN];
-
- ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
- }
-
- /*
- * Convert the timing to bus clock counts.
- */
-
- ata_timing_quantize(t, t, T, UT);
-
- /*
- * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
- * S.M.A.R.T * and some other commands. We have to ensure that the
- * DMA cycle timing is slower/equal than the fastest PIO timing.
- */
-
- if (speed > XFER_PIO_6) {
- ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
- ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
- }
-
- /*
- * Lengthen active & recovery time so that cycle time is correct.
- */
-
- if (t->act8b + t->rec8b < t->cyc8b) {
- t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
- t->rec8b = t->cyc8b - t->act8b;
- }
-
- if (t->active + t->recover < t->cycle) {
- t->active += (t->cycle - (t->active + t->recover)) / 2;
- t->recover = t->cycle - t->active;
- }
-
- /* In a few cases quantisation may produce enough errors to
- leave t->cycle too low for the sum of active and recovery
- if so we must correct this */
- if (t->active + t->recover > t->cycle)
- t->cycle = t->active + t->recover;
-
- return 0;
-}
-
+#ifdef CONFIG_ATA_ACPI
/**
* ata_timing_cycle2mode - find xfer mode for the specified cycle duration
* @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
@@ -3391,6 +3101,7 @@ u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
return last_mode;
}
+#endif
/**
* ata_down_xfermask_limit - adjust dev xfer masks downward
@@ -3662,6 +3373,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
*r_failed_dev = dev;
return rc;
}
+EXPORT_SYMBOL_GPL(ata_do_set_mode);
/**
* ata_wait_ready - wait for link to become ready
@@ -3771,216 +3483,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
return ata_wait_ready(link, deadline, check_ready);
}
-
-/**
- * sata_link_debounce - debounce SATA phy status
- * @link: ATA link to debounce SATA phy status for
- * @params: timing parameters { interval, duration, timeout } in msec
- * @deadline: deadline jiffies for the operation
- *
- * Make sure SStatus of @link reaches stable state, determined by
- * holding the same value where DET is not 1 for @duration polled
- * every @interval, before @timeout. Timeout constraints the
- * beginning of the stable state. Because DET gets stuck at 1 on
- * some controllers after hot unplugging, this functions waits
- * until timeout then returns 0 if DET is stable at 1.
- *
- * @timeout is further limited by @deadline. The sooner of the
- * two is used.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int sata_link_debounce(struct ata_link *link, const unsigned long *params,
- unsigned long deadline)
-{
- unsigned long interval = params[0];
- unsigned long duration = params[1];
- unsigned long last_jiffies, t;
- u32 last, cur;
- int rc;
-
- t = ata_deadline(jiffies, params[2]);
- if (time_before(t, deadline))
- deadline = t;
-
- if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
- return rc;
- cur &= 0xf;
-
- last = cur;
- last_jiffies = jiffies;
-
- while (1) {
- ata_msleep(link->ap, interval);
- if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
- return rc;
- cur &= 0xf;
-
- /* DET stable? */
- if (cur == last) {
- if (cur == 1 && time_before(jiffies, deadline))
- continue;
- if (time_after(jiffies,
- ata_deadline(last_jiffies, duration)))
- return 0;
- continue;
- }
-
- /* unstable, start over */
- last = cur;
- last_jiffies = jiffies;
-
- /* Check deadline. If debouncing failed, return
- * -EPIPE to tell upper layer to lower link speed.
- */
- if (time_after(jiffies, deadline))
- return -EPIPE;
- }
-}
-
-/**
- * sata_link_resume - resume SATA link
- * @link: ATA link to resume SATA
- * @params: timing parameters { interval, duration, timeout } in msec
- * @deadline: deadline jiffies for the operation
- *
- * Resume SATA phy @link and debounce it.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int sata_link_resume(struct ata_link *link, const unsigned long *params,
- unsigned long deadline)
-{
- int tries = ATA_LINK_RESUME_TRIES;
- u32 scontrol, serror;
- int rc;
-
- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
- return rc;
-
- /*
- * Writes to SControl sometimes get ignored under certain
- * controllers (ata_piix SIDPR). Make sure DET actually is
- * cleared.
- */
- do {
- scontrol = (scontrol & 0x0f0) | 0x300;
- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
- return rc;
- /*
- * Some PHYs react badly if SStatus is pounded
- * immediately after resuming. Delay 200ms before
- * debouncing.
- */
- if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
- ata_msleep(link->ap, 200);
-
- /* is SControl restored correctly? */
- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
- return rc;
- } while ((scontrol & 0xf0f) != 0x300 && --tries);
-
- if ((scontrol & 0xf0f) != 0x300) {
- ata_link_warn(link, "failed to resume link (SControl %X)\n",
- scontrol);
- return 0;
- }
-
- if (tries < ATA_LINK_RESUME_TRIES)
- ata_link_warn(link, "link resume succeeded after %d retries\n",
- ATA_LINK_RESUME_TRIES - tries);
-
- if ((rc = sata_link_debounce(link, params, deadline)))
- return rc;
-
- /* clear SError, some PHYs require this even for SRST to work */
- if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
- rc = sata_scr_write(link, SCR_ERROR, serror);
-
- return rc != -EINVAL ? rc : 0;
-}
-
-/**
- * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
- * @link: ATA link to manipulate SControl for
- * @policy: LPM policy to configure
- * @spm_wakeup: initiate LPM transition to active state
- *
- * Manipulate the IPM field of the SControl register of @link
- * according to @policy. If @policy is ATA_LPM_MAX_POWER and
- * @spm_wakeup is %true, the SPM field is manipulated to wake up
- * the link. This function also clears PHYRDY_CHG before
- * returning.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- bool spm_wakeup)
-{
- struct ata_eh_context *ehc = &link->eh_context;
- bool woken_up = false;
- u32 scontrol;
- int rc;
-
- rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
- if (rc)
- return rc;
-
- switch (policy) {
- case ATA_LPM_MAX_POWER:
- /* disable all LPM transitions */
- scontrol |= (0x7 << 8);
- /* initiate transition to active state */
- if (spm_wakeup) {
- scontrol |= (0x4 << 12);
- woken_up = true;
- }
- break;
- case ATA_LPM_MED_POWER:
- /* allow LPM to PARTIAL */
- scontrol &= ~(0x1 << 8);
- scontrol |= (0x6 << 8);
- break;
- case ATA_LPM_MED_POWER_WITH_DIPM:
- case ATA_LPM_MIN_POWER_WITH_PARTIAL:
- case ATA_LPM_MIN_POWER:
- if (ata_link_nr_enabled(link) > 0)
- /* no restrictions on LPM transitions */
- scontrol &= ~(0x7 << 8);
- else {
- /* empty port, power off */
- scontrol &= ~0xf;
- scontrol |= (0x1 << 2);
- }
- break;
- default:
- WARN_ON(1);
- }
-
- rc = sata_scr_write(link, SCR_CONTROL, scontrol);
- if (rc)
- return rc;
-
- /* give the link time to transit out of LPM state */
- if (woken_up)
- msleep(10);
-
- /* clear PHYRDY_CHG from SError */
- ehc->i.serror &= ~SERR_PHYRDY_CHG;
- return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
-}
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
/**
* ata_std_prereset - prepare for reset
@@ -4026,118 +3529,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
return 0;
}
-
-/**
- * sata_link_hardreset - reset link via SATA phy reset
- * @link: link to reset
- * @timing: timing parameters { interval, duration, timeout } in msec
- * @deadline: deadline jiffies for the operation
- * @online: optional out parameter indicating link onlineness
- * @check_ready: optional callback to check link readiness
- *
- * SATA phy-reset @link using DET bits of SControl register.
- * After hardreset, link readiness is waited upon using
- * ata_wait_ready() if @check_ready is specified. LLDs are
- * allowed to not specify @check_ready and wait itself after this
- * function returns. Device classification is LLD's
- * responsibility.
- *
- * *@online is set to one iff reset succeeded and @link is online
- * after reset.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
- unsigned long deadline,
- bool *online, int (*check_ready)(struct ata_link *))
-{
- u32 scontrol;
- int rc;
-
- DPRINTK("ENTER\n");
-
- if (online)
- *online = false;
-
- if (sata_set_spd_needed(link)) {
- /* SATA spec says nothing about how to reconfigure
- * spd. To be on the safe side, turn off phy during
- * reconfiguration. This works for at least ICH7 AHCI
- * and Sil3124.
- */
- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
- goto out;
-
- scontrol = (scontrol & 0x0f0) | 0x304;
-
- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
- goto out;
-
- sata_set_spd(link);
- }
-
- /* issue phy wake/reset */
- if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
- goto out;
-
- scontrol = (scontrol & 0x0f0) | 0x301;
-
- if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
- goto out;
-
- /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
- * 10.4.2 says at least 1 ms.
- */
- ata_msleep(link->ap, 1);
-
- /* bring link back */
- rc = sata_link_resume(link, timing, deadline);
- if (rc)
- goto out;
- /* if link is offline nothing more to do */
- if (ata_phys_link_offline(link))
- goto out;
-
- /* Link is online. From this point, -ENODEV too is an error. */
- if (online)
- *online = true;
-
- if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
- /* If PMP is supported, we have to do follow-up SRST.
- * Some PMPs don't send D2H Reg FIS after hardreset if
- * the first port is empty. Wait only for
- * ATA_TMOUT_PMP_SRST_WAIT.
- */
- if (check_ready) {
- unsigned long pmp_deadline;
-
- pmp_deadline = ata_deadline(jiffies,
- ATA_TMOUT_PMP_SRST_WAIT);
- if (time_after(pmp_deadline, deadline))
- pmp_deadline = deadline;
- ata_wait_ready(link, pmp_deadline, check_ready);
- }
- rc = -EAGAIN;
- goto out;
- }
-
- rc = 0;
- if (check_ready)
- rc = ata_wait_ready(link, deadline, check_ready);
- out:
- if (rc && rc != -EAGAIN) {
- /* online is set iff link is online && reset succeeded */
- if (online)
- *online = false;
- ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
- }
- DPRINTK("EXIT, rc=%d\n", rc);
- return rc;
-}
+EXPORT_SYMBOL_GPL(ata_std_prereset);
/**
* sata_std_hardreset - COMRESET w/o waiting or classification
@@ -4164,6 +3556,7 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
return online ? -EAGAIN : rc;
}
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
/**
* ata_std_postreset - standard postreset callback
@@ -4192,6 +3585,7 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes)
DPRINTK("EXIT\n");
}
+EXPORT_SYMBOL_GPL(ata_std_postreset);
/**
* ata_dev_same_device - Determine whether new ID matches configured device
@@ -4979,11 +4373,13 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
return ATA_DEFER_LINK;
}
+EXPORT_SYMBOL_GPL(ata_std_qc_defer);
enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
{
return AC_ERR_OK;
}
+EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
/**
* ata_sg_init - Associate command with scatter-gather table.
@@ -5327,6 +4723,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
__ata_qc_complete(qc);
}
}
+EXPORT_SYMBOL_GPL(ata_qc_complete);
/**
* ata_qc_get_active - get bitmask of active qcs
@@ -5353,64 +4750,6 @@ u64 ata_qc_get_active(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_qc_get_active);
/**
- * ata_qc_complete_multiple - Complete multiple qcs successfully
- * @ap: port in question
- * @qc_active: new qc_active mask
- *
- * Complete in-flight commands. This functions is meant to be
- * called from low-level driver's interrupt routine to complete
- * requests normally. ap->qc_active and @qc_active is compared
- * and commands are completed accordingly.
- *
- * Always use this function when completing multiple NCQ commands
- * from IRQ handlers instead of calling ata_qc_complete()
- * multiple times to keep IRQ expect status properly in sync.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * Number of completed commands on success, -errno otherwise.
- */
-int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
-{
- u64 done_mask, ap_qc_active = ap->qc_active;
- int nr_done = 0;
-
- /*
- * If the internal tag is set on ap->qc_active, then we care about
- * bit0 on the passed in qc_active mask. Move that bit up to match
- * the internal tag.
- */
- if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
- qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
- qc_active ^= qc_active & 0x01;
- }
-
- done_mask = ap_qc_active ^ qc_active;
-
- if (unlikely(done_mask & qc_active)) {
- ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
- ap->qc_active, qc_active);
- return -EINVAL;
- }
-
- while (done_mask) {
- struct ata_queued_cmd *qc;
- unsigned int tag = __ffs64(done_mask);
-
- qc = ata_qc_from_tag(ap, tag);
- if (qc) {
- ata_qc_complete(qc);
- nr_done++;
- }
- done_mask &= ~(1ULL << tag);
- }
-
- return nr_done;
-}
-
-/**
* ata_qc_issue - issue taskfile to device
* @qc: command to issue to device
*
@@ -5486,111 +4825,6 @@ err:
}
/**
- * sata_scr_valid - test whether SCRs are accessible
- * @link: ATA link to test SCR accessibility for
- *
- * Test whether SCRs are accessible for @link.
- *
- * LOCKING:
- * None.
- *
- * RETURNS:
- * 1 if SCRs are accessible, 0 otherwise.
- */
-int sata_scr_valid(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
-
- return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
-}
-
-/**
- * sata_scr_read - read SCR register of the specified port
- * @link: ATA link to read SCR for
- * @reg: SCR to read
- * @val: Place to store read value
- *
- * Read SCR register @reg of @link into *@val. This function is
- * guaranteed to succeed if @link is ap->link, the cable type of
- * the port is SATA and the port implements ->scr_read.
- *
- * LOCKING:
- * None if @link is ap->link. Kernel thread context otherwise.
- *
- * RETURNS:
- * 0 on success, negative errno on failure.
- */
-int sata_scr_read(struct ata_link *link, int reg, u32 *val)
-{
- if (ata_is_host_link(link)) {
- if (sata_scr_valid(link))
- return link->ap->ops->scr_read(link, reg, val);
- return -EOPNOTSUPP;
- }
-
- return sata_pmp_scr_read(link, reg, val);
-}
-
-/**
- * sata_scr_write - write SCR register of the specified port
- * @link: ATA link to write SCR for
- * @reg: SCR to write
- * @val: value to write
- *
- * Write @val to SCR register @reg of @link. This function is
- * guaranteed to succeed if @link is ap->link, the cable type of
- * the port is SATA and the port implements ->scr_read.
- *
- * LOCKING:
- * None if @link is ap->link. Kernel thread context otherwise.
- *
- * RETURNS:
- * 0 on success, negative errno on failure.
- */
-int sata_scr_write(struct ata_link *link, int reg, u32 val)
-{
- if (ata_is_host_link(link)) {
- if (sata_scr_valid(link))
- return link->ap->ops->scr_write(link, reg, val);
- return -EOPNOTSUPP;
- }
-
- return sata_pmp_scr_write(link, reg, val);
-}
-
-/**
- * sata_scr_write_flush - write SCR register of the specified port and flush
- * @link: ATA link to write SCR for
- * @reg: SCR to write
- * @val: value to write
- *
- * This function is identical to sata_scr_write() except that this
- * function performs flush after writing to the register.
- *
- * LOCKING:
- * None if @link is ap->link. Kernel thread context otherwise.
- *
- * RETURNS:
- * 0 on success, negative errno on failure.
- */
-int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
-{
- if (ata_is_host_link(link)) {
- int rc;
-
- if (sata_scr_valid(link)) {
- rc = link->ap->ops->scr_write(link, reg, val);
- if (rc == 0)
- rc = link->ap->ops->scr_read(link, reg, &val);
- return rc;
- }
- return -EOPNOTSUPP;
- }
-
- return sata_pmp_scr_write(link, reg, val);
-}
-
-/**
* ata_phys_link_online - test whether the given link is online
* @link: ATA link to test
*
@@ -5663,6 +4897,7 @@ bool ata_link_online(struct ata_link *link)
return ata_phys_link_online(link) ||
(slave && ata_phys_link_online(slave));
}
+EXPORT_SYMBOL_GPL(ata_link_online);
/**
* ata_link_offline - test whether the given link is offline
@@ -5689,6 +4924,7 @@ bool ata_link_offline(struct ata_link *link)
return ata_phys_link_offline(link) &&
(!slave || ata_phys_link_offline(slave));
}
+EXPORT_SYMBOL_GPL(ata_link_offline);
#ifdef CONFIG_PM
static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
@@ -5875,6 +5111,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
host->dev->power.power_state = mesg;
return 0;
}
+EXPORT_SYMBOL_GPL(ata_host_suspend);
/**
* ata_host_resume - resume host
@@ -5886,6 +5123,7 @@ void ata_host_resume(struct ata_host *host)
{
host->dev->power.power_state = PMSG_ON;
}
+EXPORT_SYMBOL_GPL(ata_host_resume);
#endif
const struct device_type ata_port_type = {
@@ -6105,6 +5343,7 @@ void ata_host_put(struct ata_host *host)
{
kref_put(&host->kref, ata_host_release);
}
+EXPORT_SYMBOL_GPL(ata_host_put);
/**
* ata_host_alloc - allocate and init basic ATA host resources
@@ -6178,6 +5417,7 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
kfree(host);
return NULL;
}
+EXPORT_SYMBOL_GPL(ata_host_alloc);
/**
* ata_host_alloc_pinfo - alloc host and init with port_info array
@@ -6226,68 +5466,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
return host;
}
-
-/**
- * ata_slave_link_init - initialize slave link
- * @ap: port to initialize slave link for
- *
- * Create and initialize slave link for @ap. This enables slave
- * link handling on the port.
- *
- * In libata, a port contains links and a link contains devices.
- * There is single host link but if a PMP is attached to it,
- * there can be multiple fan-out links. On SATA, there's usually
- * a single device connected to a link but PATA and SATA
- * controllers emulating TF based interface can have two - master
- * and slave.
- *
- * However, there are a few controllers which don't fit into this
- * abstraction too well - SATA controllers which emulate TF
- * interface with both master and slave devices but also have
- * separate SCR register sets for each device. These controllers
- * need separate links for physical link handling
- * (e.g. onlineness, link speed) but should be treated like a
- * traditional M/S controller for everything else (e.g. command
- * issue, softreset).
- *
- * slave_link is libata's way of handling this class of
- * controllers without impacting core layer too much. For
- * anything other than physical link handling, the default host
- * link is used for both master and slave. For physical link
- * handling, separate @ap->slave_link is used. All dirty details
- * are implemented inside libata core layer. From LLD's POV, the
- * only difference is that prereset, hardreset and postreset are
- * called once more for the slave link, so the reset sequence
- * looks like the following.
- *
- * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
- * softreset(M) -> postreset(M) -> postreset(S)
- *
- * Note that softreset is called only for the master. Softreset
- * resets both M/S by definition, so SRST on master should handle
- * both (the standard method will work just fine).
- *
- * LOCKING:
- * Should be called before host is registered.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int ata_slave_link_init(struct ata_port *ap)
-{
- struct ata_link *link;
-
- WARN_ON(ap->slave_link);
- WARN_ON(ap->flags & ATA_FLAG_PMP);
-
- link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link)
- return -ENOMEM;
-
- ata_link_init(ap, link, 1);
- ap->slave_link = link;
- return 0;
-}
+EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
static void ata_host_stop(struct device *gendev, void *res)
{
@@ -6436,6 +5615,7 @@ int ata_host_start(struct ata_host *host)
devres_free(start_dr);
return rc;
}
+EXPORT_SYMBOL_GPL(ata_host_start);
/**
* ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
@@ -6454,6 +5634,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->ops = ops;
kref_init(&host->kref);
}
+EXPORT_SYMBOL_GPL(ata_host_init);
void __ata_port_probe(struct ata_port *ap)
{
@@ -6609,6 +5790,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
return rc;
}
+EXPORT_SYMBOL_GPL(ata_host_register);
/**
* ata_host_activate - start host, request IRQ and register it
@@ -6671,6 +5853,7 @@ int ata_host_activate(struct ata_host *host, int irq,
return rc;
}
+EXPORT_SYMBOL_GPL(ata_host_activate);
/**
* ata_port_detach - Detach ATA port in preparation of device removal
@@ -6746,6 +5929,7 @@ void ata_host_detach(struct ata_host *host)
/* the host is dead now, dissociate ACPI */
ata_acpi_dissociate(host);
}
+EXPORT_SYMBOL_GPL(ata_host_detach);
#ifdef CONFIG_PCI
@@ -6766,6 +5950,7 @@ void ata_pci_remove_one(struct pci_dev *pdev)
ata_host_detach(host);
}
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
void ata_pci_shutdown_one(struct pci_dev *pdev)
{
@@ -6786,6 +5971,7 @@ void ata_pci_shutdown_one(struct pci_dev *pdev)
ap->ops->port_stop(ap);
}
}
+EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
@@ -6820,6 +6006,7 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
return (tmp == bits->val) ? 1 : 0;
}
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
#ifdef CONFIG_PM
void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
@@ -6830,6 +6017,7 @@ void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
if (mesg.event & PM_EVENT_SLEEP)
pci_set_power_state(pdev, PCI_D3hot);
}
+EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
int ata_pci_device_do_resume(struct pci_dev *pdev)
{
@@ -6848,6 +6036,7 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)
pci_set_master(pdev);
return 0;
}
+EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
@@ -6862,6 +6051,7 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
return 0;
}
+EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
int ata_pci_device_resume(struct pci_dev *pdev)
{
@@ -6873,8 +6063,8 @@ int ata_pci_device_resume(struct pci_dev *pdev)
ata_host_resume(host);
return rc;
}
+EXPORT_SYMBOL_GPL(ata_pci_device_resume);
#endif /* CONFIG_PM */
-
#endif /* CONFIG_PCI */
/**
@@ -6896,7 +6086,9 @@ int ata_platform_remove_one(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(ata_platform_remove_one);
+#ifdef CONFIG_ATA_FORCE
static int __init ata_parse_force_one(char **cur,
struct ata_force_ent *force_ent,
const char **reason)
@@ -7076,6 +6268,15 @@ static void __init ata_parse_force_param(void)
ata_force_tbl_size = idx;
}
+static void ata_free_force_param(void)
+{
+ kfree(ata_force_tbl);
+}
+#else
+static inline void ata_parse_force_param(void) { }
+static inline void ata_free_force_param(void) { }
+#endif
+
static int __init ata_init(void)
{
int rc;
@@ -7084,7 +6285,7 @@ static int __init ata_init(void)
rc = ata_sff_init();
if (rc) {
- kfree(ata_force_tbl);
+ ata_free_force_param();
return rc;
}
@@ -7108,7 +6309,7 @@ static void __exit ata_exit(void)
ata_release_transport(ata_scsi_transport_template);
libata_transport_exit();
ata_sff_exit();
- kfree(ata_force_tbl);
+ ata_free_force_param();
}
subsys_initcall(ata_init);
@@ -7120,6 +6321,7 @@ int ata_ratelimit(void)
{
return __ratelimit(&ratelimit);
}
+EXPORT_SYMBOL_GPL(ata_ratelimit);
/**
* ata_msleep - ATA EH owner aware msleep
@@ -7152,6 +6354,7 @@ void ata_msleep(struct ata_port *ap, unsigned int msecs)
if (owns_eh)
ata_eh_acquire(ap);
}
+EXPORT_SYMBOL_GPL(ata_msleep);
/**
* ata_wait_register - wait until register value changes
@@ -7198,38 +6401,7 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
return tmp;
}
-
-/**
- * sata_lpm_ignore_phy_events - test if PHY event should be ignored
- * @link: Link receiving the event
- *
- * Test whether the received PHY event has to be ignored or not.
- *
- * LOCKING:
- * None:
- *
- * RETURNS:
- * True if the event has to be ignored.
- */
-bool sata_lpm_ignore_phy_events(struct ata_link *link)
-{
- unsigned long lpm_timeout = link->last_lpm_change +
- msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
-
- /* if LPM is enabled, PHYRDY doesn't mean anything */
- if (link->lpm_policy > ATA_LPM_MAX_POWER)
- return true;
-
- /* ignore the first PHY event after the LPM policy changed
- * as it is might be spurious
- */
- if ((link->flags & ATA_LFLAG_CHANGED) &&
- time_before(jiffies, lpm_timeout))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+EXPORT_SYMBOL_GPL(ata_wait_register);
/*
* Dummy port_ops
@@ -7251,10 +6423,12 @@ struct ata_port_operations ata_dummy_port_ops = {
.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
};
+EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
const struct ata_port_info ata_dummy_port_info = {
.port_ops = &ata_dummy_port_ops,
};
+EXPORT_SYMBOL_GPL(ata_dummy_port_info);
/*
* Utility print functions
@@ -7322,127 +6496,3 @@ void ata_print_version(const struct device *dev, const char *version)
dev_printk(KERN_DEBUG, dev, "version %s\n", version);
}
EXPORT_SYMBOL(ata_print_version);
-
-/*
- * libata is essentially a library of internal helper functions for
- * low-level ATA host controller drivers. As such, the API/ABI is
- * likely to change as new drivers are added and updated.
- * Do not depend on ABI/API stability.
- */
-EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
-EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
-EXPORT_SYMBOL_GPL(sata_deb_timing_long);
-EXPORT_SYMBOL_GPL(ata_base_port_ops);
-EXPORT_SYMBOL_GPL(sata_port_ops);
-EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
-EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-EXPORT_SYMBOL_GPL(ata_link_next);
-EXPORT_SYMBOL_GPL(ata_dev_next);
-EXPORT_SYMBOL_GPL(ata_std_bios_param);
-EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
-EXPORT_SYMBOL_GPL(ata_host_init);
-EXPORT_SYMBOL_GPL(ata_host_alloc);
-EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
-EXPORT_SYMBOL_GPL(ata_slave_link_init);
-EXPORT_SYMBOL_GPL(ata_host_start);
-EXPORT_SYMBOL_GPL(ata_host_register);
-EXPORT_SYMBOL_GPL(ata_host_activate);
-EXPORT_SYMBOL_GPL(ata_host_detach);
-EXPORT_SYMBOL_GPL(ata_sg_init);
-EXPORT_SYMBOL_GPL(ata_qc_complete);
-EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
-EXPORT_SYMBOL_GPL(atapi_cmd_type);
-EXPORT_SYMBOL_GPL(ata_tf_to_fis);
-EXPORT_SYMBOL_GPL(ata_tf_from_fis);
-EXPORT_SYMBOL_GPL(ata_pack_xfermask);
-EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
-EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
-EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
-EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
-EXPORT_SYMBOL_GPL(ata_mode_string);
-EXPORT_SYMBOL_GPL(ata_id_xfermask);
-EXPORT_SYMBOL_GPL(ata_do_set_mode);
-EXPORT_SYMBOL_GPL(ata_std_qc_defer);
-EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_dev_disable);
-EXPORT_SYMBOL_GPL(sata_set_spd);
-EXPORT_SYMBOL_GPL(ata_wait_after_reset);
-EXPORT_SYMBOL_GPL(sata_link_debounce);
-EXPORT_SYMBOL_GPL(sata_link_resume);
-EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
-EXPORT_SYMBOL_GPL(ata_std_prereset);
-EXPORT_SYMBOL_GPL(sata_link_hardreset);
-EXPORT_SYMBOL_GPL(sata_std_hardreset);
-EXPORT_SYMBOL_GPL(ata_std_postreset);
-EXPORT_SYMBOL_GPL(ata_dev_classify);
-EXPORT_SYMBOL_GPL(ata_dev_pair);
-EXPORT_SYMBOL_GPL(ata_ratelimit);
-EXPORT_SYMBOL_GPL(ata_msleep);
-EXPORT_SYMBOL_GPL(ata_wait_register);
-EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
-EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
-EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
-EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
-EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
-EXPORT_SYMBOL_GPL(sata_scr_valid);
-EXPORT_SYMBOL_GPL(sata_scr_read);
-EXPORT_SYMBOL_GPL(sata_scr_write);
-EXPORT_SYMBOL_GPL(sata_scr_write_flush);
-EXPORT_SYMBOL_GPL(ata_link_online);
-EXPORT_SYMBOL_GPL(ata_link_offline);
-#ifdef CONFIG_PM
-EXPORT_SYMBOL_GPL(ata_host_suspend);
-EXPORT_SYMBOL_GPL(ata_host_resume);
-#endif /* CONFIG_PM */
-EXPORT_SYMBOL_GPL(ata_id_string);
-EXPORT_SYMBOL_GPL(ata_id_c_string);
-EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
-EXPORT_SYMBOL_GPL(ata_scsi_simulate);
-
-EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
-EXPORT_SYMBOL_GPL(ata_timing_find_mode);
-EXPORT_SYMBOL_GPL(ata_timing_compute);
-EXPORT_SYMBOL_GPL(ata_timing_merge);
-EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
-EXPORT_SYMBOL_GPL(ata_pci_remove_one);
-#ifdef CONFIG_PM
-EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
-EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
-EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
-EXPORT_SYMBOL_GPL(ata_pci_device_resume);
-#endif /* CONFIG_PM */
-#endif /* CONFIG_PCI */
-
-EXPORT_SYMBOL_GPL(ata_platform_remove_one);
-
-EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
-EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
-EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
-EXPORT_SYMBOL_GPL(ata_port_desc);
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
-#endif /* CONFIG_PCI */
-EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
-EXPORT_SYMBOL_GPL(ata_link_abort);
-EXPORT_SYMBOL_GPL(ata_port_abort);
-EXPORT_SYMBOL_GPL(ata_port_freeze);
-EXPORT_SYMBOL_GPL(sata_async_notification);
-EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
-EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
-EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
-EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
-EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
-EXPORT_SYMBOL_GPL(ata_do_eh);
-EXPORT_SYMBOL_GPL(ata_std_error_handler);
-
-EXPORT_SYMBOL_GPL(ata_cable_40wire);
-EXPORT_SYMBOL_GPL(ata_cable_80wire);
-EXPORT_SYMBOL_GPL(ata_cable_unknown);
-EXPORT_SYMBOL_GPL(ata_cable_ignore);
-EXPORT_SYMBOL_GPL(ata_cable_sata);
-EXPORT_SYMBOL_GPL(ata_host_get);
-EXPORT_SYMBOL_GPL(ata_host_put);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3bfd9da58473..474c6c34fe02 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2,10 +2,6 @@
/*
* libata-eh.c - libata error handling
*
- * Maintained by: Tejun Heo <tj@kernel.org>
- * Please ALWAYS copy linux-ide@vger.kernel.org
- * on emails.
- *
* Copyright 2006 Tejun Heo <htejun@gmail.com>
*
* libata documentation is available via 'make {ps|pdf}docs',
@@ -184,6 +180,7 @@ void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
__ata_ehi_pushv_desc(ehi, fmt, args);
va_end(args);
}
+EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
/**
* ata_ehi_push_desc - push error description with separator
@@ -207,6 +204,7 @@ void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
__ata_ehi_pushv_desc(ehi, fmt, args);
va_end(args);
}
+EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
/**
* ata_ehi_clear_desc - clean error description
@@ -222,6 +220,7 @@ void ata_ehi_clear_desc(struct ata_eh_info *ehi)
ehi->desc[0] = '\0';
ehi->desc_len = 0;
}
+EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
/**
* ata_port_desc - append port description
@@ -249,9 +248,9 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
va_end(args);
}
+EXPORT_SYMBOL_GPL(ata_port_desc);
#ifdef CONFIG_PCI
-
/**
* ata_port_pbar_desc - append PCI BAR description
* @ap: target ATA port
@@ -288,7 +287,7 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
ata_port_desc(ap, "%s 0x%llx", name,
start + (unsigned long long)offset);
}
-
+EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
#endif /* CONFIG_PCI */
static int ata_lookup_timeout_table(u8 cmd)
@@ -973,6 +972,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
/* see: ata_std_sched_eh, unless you know better */
ap->ops->sched_eh(ap);
}
+EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
{
@@ -1015,6 +1015,7 @@ int ata_link_abort(struct ata_link *link)
{
return ata_do_link_abort(link->ap, link);
}
+EXPORT_SYMBOL_GPL(ata_link_abort);
/**
* ata_port_abort - abort all qc's on the port
@@ -1032,6 +1033,7 @@ int ata_port_abort(struct ata_port *ap)
{
return ata_do_link_abort(ap, NULL);
}
+EXPORT_SYMBOL_GPL(ata_port_abort);
/**
* __ata_port_freeze - freeze port
@@ -1088,79 +1090,7 @@ int ata_port_freeze(struct ata_port *ap)
return nr_aborted;
}
-
-/**
- * sata_async_notification - SATA async notification handler
- * @ap: ATA port where async notification is received
- *
- * Handler to be called when async notification via SDB FIS is
- * received. This function schedules EH if necessary.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * 1 if EH is scheduled, 0 otherwise.
- */
-int sata_async_notification(struct ata_port *ap)
-{
- u32 sntf;
- int rc;
-
- if (!(ap->flags & ATA_FLAG_AN))
- return 0;
-
- rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
- if (rc == 0)
- sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
-
- if (!sata_pmp_attached(ap) || rc) {
- /* PMP is not attached or SNTF is not available */
- if (!sata_pmp_attached(ap)) {
- /* PMP is not attached. Check whether ATAPI
- * AN is configured. If so, notify media
- * change.
- */
- struct ata_device *dev = ap->link.device;
-
- if ((dev->class == ATA_DEV_ATAPI) &&
- (dev->flags & ATA_DFLAG_AN))
- ata_scsi_media_change_notify(dev);
- return 0;
- } else {
- /* PMP is attached but SNTF is not available.
- * ATAPI async media change notification is
- * not used. The PMP must be reporting PHY
- * status change, schedule EH.
- */
- ata_port_schedule_eh(ap);
- return 1;
- }
- } else {
- /* PMP is attached and SNTF is available */
- struct ata_link *link;
-
- /* check and notify ATAPI AN */
- ata_for_each_link(link, ap, EDGE) {
- if (!(sntf & (1 << link->pmp)))
- continue;
-
- if ((link->device->class == ATA_DEV_ATAPI) &&
- (link->device->flags & ATA_DFLAG_AN))
- ata_scsi_media_change_notify(link->device);
- }
-
- /* If PMP is reporting that PHY status of some
- * downstream ports has changed, schedule EH.
- */
- if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
- ata_port_schedule_eh(ap);
- return 1;
- }
-
- return 0;
- }
-}
+EXPORT_SYMBOL_GPL(ata_port_freeze);
/**
* ata_eh_freeze_port - EH helper to freeze port
@@ -1182,6 +1112,7 @@ void ata_eh_freeze_port(struct ata_port *ap)
__ata_port_freeze(ap);
spin_unlock_irqrestore(ap->lock, flags);
}
+EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
/**
* ata_port_thaw_port - EH helper to thaw port
@@ -1289,6 +1220,7 @@ void ata_dev_disable(struct ata_device *dev)
*/
ata_ering_clear(&dev->ering);
}
+EXPORT_SYMBOL_GPL(ata_dev_disable);
/**
* ata_eh_detach_dev - detach ATA device
@@ -1420,62 +1352,6 @@ static const char *ata_err_string(unsigned int err_mask)
}
/**
- * ata_eh_read_log_10h - Read log page 10h for NCQ error details
- * @dev: Device to read log page 10h from
- * @tag: Resulting tag of the failed command
- * @tf: Resulting taskfile registers of the failed command
- *
- * Read log page 10h to obtain NCQ error details and clear error
- * condition.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-static int ata_eh_read_log_10h(struct ata_device *dev,
- int *tag, struct ata_taskfile *tf)
-{
- u8 *buf = dev->link->ap->sector_buf;
- unsigned int err_mask;
- u8 csum;
- int i;
-
- err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
- if (err_mask)
- return -EIO;
-
- csum = 0;
- for (i = 0; i < ATA_SECT_SIZE; i++)
- csum += buf[i];
- if (csum)
- ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
- csum);
-
- if (buf[0] & 0x80)
- return -ENOENT;
-
- *tag = buf[0] & 0x1f;
-
- tf->command = buf[2];
- tf->feature = buf[3];
- tf->lbal = buf[4];
- tf->lbam = buf[5];
- tf->lbah = buf[6];
- tf->device = buf[7];
- tf->hob_lbal = buf[8];
- tf->hob_lbam = buf[9];
- tf->hob_lbah = buf[10];
- tf->nsect = buf[12];
- tf->hob_nsect = buf[13];
- if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
- tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
-
- return 0;
-}
-
-/**
* atapi_eh_tur - perform ATAPI TEST_UNIT_READY
* @dev: target ATAPI device
* @r_sense_key: out parameter for sense_key
@@ -1659,80 +1535,6 @@ static void ata_eh_analyze_serror(struct ata_link *link)
}
/**
- * ata_eh_analyze_ncq_error - analyze NCQ error
- * @link: ATA link to analyze NCQ error for
- *
- * Read log page 10h, determine the offending qc and acquire
- * error status TF. For NCQ device errors, all LLDDs have to do
- * is setting AC_ERR_DEV in ehi->err_mask. This function takes
- * care of the rest.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- */
-void ata_eh_analyze_ncq_error(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ata_eh_context *ehc = &link->eh_context;
- struct ata_device *dev = link->device;
- struct ata_queued_cmd *qc;
- struct ata_taskfile tf;
- int tag, rc;
-
- /* if frozen, we can't do much */
- if (ap->pflags & ATA_PFLAG_FROZEN)
- return;
-
- /* is it NCQ device error? */
- if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
- return;
-
- /* has LLDD analyzed already? */
- ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED))
- continue;
-
- if (qc->err_mask)
- return;
- }
-
- /* okay, this error is ours */
- memset(&tf, 0, sizeof(tf));
- rc = ata_eh_read_log_10h(dev, &tag, &tf);
- if (rc) {
- ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
- rc);
- return;
- }
-
- if (!(link->sactive & (1 << tag))) {
- ata_link_err(link, "log page 10h reported inactive tag %d\n",
- tag);
- return;
- }
-
- /* we've got the perpetrator, condemn it */
- qc = __ata_qc_from_tag(ap, tag);
- memcpy(&qc->result_tf, &tf, sizeof(tf));
- qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
- qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if (dev->class == ATA_DEV_ZAC &&
- ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
- char sense_key, asc, ascq;
-
- sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
- asc = (qc->result_tf.auxiliary >> 8) & 0xff;
- ascq = qc->result_tf.auxiliary & 0xff;
- ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
- ata_scsi_set_sense_information(dev, qc->scsicmd,
- &qc->result_tf);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- }
-
- ehc->i.err_mask &= ~AC_ERR_DEV;
-}
-
-/**
* ata_eh_analyze_tf - analyze taskfile of a failed qc
* @qc: qc to analyze
* @tf: Taskfile registers to analyze
@@ -3436,7 +3238,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
int rc;
/* if the link or host doesn't do LPM, noop */
- if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+ if (!IS_ENABLED(CONFIG_SATA_HOST) ||
+ (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
return 0;
/*
@@ -4052,6 +3855,7 @@ void ata_std_error_handler(struct ata_port *ap)
ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
}
+EXPORT_SYMBOL_GPL(ata_std_error_handler);
#ifdef CONFIG_PM
/**
diff --git a/drivers/ata/libata-pata-timings.c b/drivers/ata/libata-pata-timings.c
new file mode 100644
index 000000000000..af341226cc64
--- /dev/null
+++ b/drivers/ata/libata-pata-timings.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Helper library for PATA timings
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+
+/*
+ * This mode timing computation functionality is ported over from
+ * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
+ */
+/*
+ * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * These were taken from ATA/ATAPI-6 standard, rev 0a, except
+ * for UDMA6, which is currently supported only by Maxtor drives.
+ *
+ * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
+ */
+
+static const struct ata_timing ata_timing[] = {
+/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
+ { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
+ { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
+ { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
+ { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
+ { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
+ { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
+ { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
+
+ { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
+ { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
+ { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
+
+ { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
+ { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
+ { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
+ { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
+ { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
+
+/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
+ { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
+ { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
+ { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
+ { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
+ { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
+ { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
+ { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
+
+ { 0xFF }
+};
+
+#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
+#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
+
+static void ata_timing_quantize(const struct ata_timing *t,
+ struct ata_timing *q, int T, int UT)
+{
+ q->setup = EZ(t->setup, T);
+ q->act8b = EZ(t->act8b, T);
+ q->rec8b = EZ(t->rec8b, T);
+ q->cyc8b = EZ(t->cyc8b, T);
+ q->active = EZ(t->active, T);
+ q->recover = EZ(t->recover, T);
+ q->dmack_hold = EZ(t->dmack_hold, T);
+ q->cycle = EZ(t->cycle, T);
+ q->udma = EZ(t->udma, UT);
+}
+
+void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+ struct ata_timing *m, unsigned int what)
+{
+ if (what & ATA_TIMING_SETUP)
+ m->setup = max(a->setup, b->setup);
+ if (what & ATA_TIMING_ACT8B)
+ m->act8b = max(a->act8b, b->act8b);
+ if (what & ATA_TIMING_REC8B)
+ m->rec8b = max(a->rec8b, b->rec8b);
+ if (what & ATA_TIMING_CYC8B)
+ m->cyc8b = max(a->cyc8b, b->cyc8b);
+ if (what & ATA_TIMING_ACTIVE)
+ m->active = max(a->active, b->active);
+ if (what & ATA_TIMING_RECOVER)
+ m->recover = max(a->recover, b->recover);
+ if (what & ATA_TIMING_DMACK_HOLD)
+ m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
+ if (what & ATA_TIMING_CYCLE)
+ m->cycle = max(a->cycle, b->cycle);
+ if (what & ATA_TIMING_UDMA)
+ m->udma = max(a->udma, b->udma);
+}
+EXPORT_SYMBOL_GPL(ata_timing_merge);
+
+const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
+{
+ const struct ata_timing *t = ata_timing;
+
+ while (xfer_mode > t->mode)
+ t++;
+
+ if (xfer_mode == t->mode)
+ return t;
+
+ WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+ __func__, xfer_mode);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ata_timing_find_mode);
+
+int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+ struct ata_timing *t, int T, int UT)
+{
+ const u16 *id = adev->id;
+ const struct ata_timing *s;
+ struct ata_timing p;
+
+ /*
+ * Find the mode.
+ */
+ s = ata_timing_find_mode(speed);
+ if (!s)
+ return -EINVAL;
+
+ memcpy(t, s, sizeof(*s));
+
+ /*
+ * If the drive is an EIDE drive, it can tell us it needs extended
+ * PIO/MW_DMA cycle timing.
+ */
+
+ if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
+ memset(&p, 0, sizeof(p));
+
+ if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
+ if (speed <= XFER_PIO_2)
+ p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
+ else if ((speed <= XFER_PIO_4) ||
+ (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
+ p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
+ } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
+ p.cycle = id[ATA_ID_EIDE_DMA_MIN];
+
+ ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
+ }
+
+ /*
+ * Convert the timing to bus clock counts.
+ */
+
+ ata_timing_quantize(t, t, T, UT);
+
+ /*
+ * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
+ * S.M.A.R.T * and some other commands. We have to ensure that the
+ * DMA cycle timing is slower/equal than the fastest PIO timing.
+ */
+
+ if (speed > XFER_PIO_6) {
+ ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
+ ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
+ }
+
+ /*
+ * Lengthen active & recovery time so that cycle time is correct.
+ */
+
+ if (t->act8b + t->rec8b < t->cyc8b) {
+ t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
+ t->rec8b = t->cyc8b - t->act8b;
+ }
+
+ if (t->active + t->recover < t->cycle) {
+ t->active += (t->cycle - (t->active + t->recover)) / 2;
+ t->recover = t->cycle - t->active;
+ }
+
+ /*
+ * In a few cases quantisation may produce enough errors to
+ * leave t->cycle too low for the sum of active and recovery
+ * if so we must correct this.
+ */
+ if (t->active + t->recover > t->cycle)
+ t->cycle = t->active + t->recover;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 3ff14071617c..79f2aeeb482a 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -763,6 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
+ rc = -ENODEV;
goto fail;
}
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
new file mode 100644
index 000000000000..c16423e44525
--- /dev/null
+++ b/drivers/ata/libata-sata.c
@@ -0,0 +1,1483 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SATA specific part of ATA helper library
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ * Copyright 2006 Tejun Heo <htejun@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/libata.h>
+
+#include "libata.h"
+#include "libata-transport.h"
+
+/* debounce timing parameters in msecs { interval, duration, timeout } */
+const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
+EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
+const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
+EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
+const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
+EXPORT_SYMBOL_GPL(sata_deb_timing_long);
+
+/**
+ * sata_scr_valid - test whether SCRs are accessible
+ * @link: ATA link to test SCR accessibility for
+ *
+ * Test whether SCRs are accessible for @link.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * 1 if SCRs are accessible, 0 otherwise.
+ */
+int sata_scr_valid(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+
+ return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
+}
+EXPORT_SYMBOL_GPL(sata_scr_valid);
+
+/**
+ * sata_scr_read - read SCR register of the specified port
+ * @link: ATA link to read SCR for
+ * @reg: SCR to read
+ * @val: Place to store read value
+ *
+ * Read SCR register @reg of @link into *@val. This function is
+ * guaranteed to succeed if @link is ap->link, the cable type of
+ * the port is SATA and the port implements ->scr_read.
+ *
+ * LOCKING:
+ * None if @link is ap->link. Kernel thread context otherwise.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure.
+ */
+int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+ if (ata_is_host_link(link)) {
+ if (sata_scr_valid(link))
+ return link->ap->ops->scr_read(link, reg, val);
+ return -EOPNOTSUPP;
+ }
+
+ return sata_pmp_scr_read(link, reg, val);
+}
+EXPORT_SYMBOL_GPL(sata_scr_read);
+
+/**
+ * sata_scr_write - write SCR register of the specified port
+ * @link: ATA link to write SCR for
+ * @reg: SCR to write
+ * @val: value to write
+ *
+ * Write @val to SCR register @reg of @link. This function is
+ * guaranteed to succeed if @link is ap->link, the cable type of
+ * the port is SATA and the port implements ->scr_read.
+ *
+ * LOCKING:
+ * None if @link is ap->link. Kernel thread context otherwise.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure.
+ */
+int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ if (ata_is_host_link(link)) {
+ if (sata_scr_valid(link))
+ return link->ap->ops->scr_write(link, reg, val);
+ return -EOPNOTSUPP;
+ }
+
+ return sata_pmp_scr_write(link, reg, val);
+}
+EXPORT_SYMBOL_GPL(sata_scr_write);
+
+/**
+ * sata_scr_write_flush - write SCR register of the specified port and flush
+ * @link: ATA link to write SCR for
+ * @reg: SCR to write
+ * @val: value to write
+ *
+ * This function is identical to sata_scr_write() except that this
+ * function performs flush after writing to the register.
+ *
+ * LOCKING:
+ * None if @link is ap->link. Kernel thread context otherwise.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure.
+ */
+int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+ if (ata_is_host_link(link)) {
+ int rc;
+
+ if (sata_scr_valid(link)) {
+ rc = link->ap->ops->scr_write(link, reg, val);
+ if (rc == 0)
+ rc = link->ap->ops->scr_read(link, reg, &val);
+ return rc;
+ }
+ return -EOPNOTSUPP;
+ }
+
+ return sata_pmp_scr_write(link, reg, val);
+}
+EXPORT_SYMBOL_GPL(sata_scr_write_flush);
+
+/**
+ * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
+ * @tf: Taskfile to convert
+ * @pmp: Port multiplier port
+ * @is_cmd: This FIS is for command
+ * @fis: Buffer into which data will output
+ *
+ * Converts a standard ATA taskfile to a Serial ATA
+ * FIS structure (Register - Host to Device).
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
+{
+ fis[0] = 0x27; /* Register - Host to Device FIS */
+ fis[1] = pmp & 0xf; /* Port multiplier number*/
+ if (is_cmd)
+ fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
+
+ fis[2] = tf->command;
+ fis[3] = tf->feature;
+
+ fis[4] = tf->lbal;
+ fis[5] = tf->lbam;
+ fis[6] = tf->lbah;
+ fis[7] = tf->device;
+
+ fis[8] = tf->hob_lbal;
+ fis[9] = tf->hob_lbam;
+ fis[10] = tf->hob_lbah;
+ fis[11] = tf->hob_feature;
+
+ fis[12] = tf->nsect;
+ fis[13] = tf->hob_nsect;
+ fis[14] = 0;
+ fis[15] = tf->ctl;
+
+ fis[16] = tf->auxiliary & 0xff;
+ fis[17] = (tf->auxiliary >> 8) & 0xff;
+ fis[18] = (tf->auxiliary >> 16) & 0xff;
+ fis[19] = (tf->auxiliary >> 24) & 0xff;
+}
+EXPORT_SYMBOL_GPL(ata_tf_to_fis);
+
+/**
+ * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
+ * @fis: Buffer from which data will be input
+ * @tf: Taskfile to output
+ *
+ * Converts a serial ATA FIS structure to a standard ATA taskfile.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
+{
+ tf->command = fis[2]; /* status */
+ tf->feature = fis[3]; /* error */
+
+ tf->lbal = fis[4];
+ tf->lbam = fis[5];
+ tf->lbah = fis[6];
+ tf->device = fis[7];
+
+ tf->hob_lbal = fis[8];
+ tf->hob_lbam = fis[9];
+ tf->hob_lbah = fis[10];
+
+ tf->nsect = fis[12];
+ tf->hob_nsect = fis[13];
+}
+EXPORT_SYMBOL_GPL(ata_tf_from_fis);
+
+/**
+ * sata_link_debounce - debounce SATA phy status
+ * @link: ATA link to debounce SATA phy status for
+ * @params: timing parameters { interval, duration, timeout } in msec
+ * @deadline: deadline jiffies for the operation
+ *
+ * Make sure SStatus of @link reaches stable state, determined by
+ * holding the same value where DET is not 1 for @duration polled
+ * every @interval, before @timeout. Timeout constraints the
+ * beginning of the stable state. Because DET gets stuck at 1 on
+ * some controllers after hot unplugging, this functions waits
+ * until timeout then returns 0 if DET is stable at 1.
+ *
+ * @timeout is further limited by @deadline. The sooner of the
+ * two is used.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_link_debounce(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline)
+{
+ unsigned long interval = params[0];
+ unsigned long duration = params[1];
+ unsigned long last_jiffies, t;
+ u32 last, cur;
+ int rc;
+
+ t = ata_deadline(jiffies, params[2]);
+ if (time_before(t, deadline))
+ deadline = t;
+
+ if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+ return rc;
+ cur &= 0xf;
+
+ last = cur;
+ last_jiffies = jiffies;
+
+ while (1) {
+ ata_msleep(link->ap, interval);
+ if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+ return rc;
+ cur &= 0xf;
+
+ /* DET stable? */
+ if (cur == last) {
+ if (cur == 1 && time_before(jiffies, deadline))
+ continue;
+ if (time_after(jiffies,
+ ata_deadline(last_jiffies, duration)))
+ return 0;
+ continue;
+ }
+
+ /* unstable, start over */
+ last = cur;
+ last_jiffies = jiffies;
+
+ /* Check deadline. If debouncing failed, return
+ * -EPIPE to tell upper layer to lower link speed.
+ */
+ if (time_after(jiffies, deadline))
+ return -EPIPE;
+ }
+}
+EXPORT_SYMBOL_GPL(sata_link_debounce);
+
+/**
+ * sata_link_resume - resume SATA link
+ * @link: ATA link to resume SATA
+ * @params: timing parameters { interval, duration, timeout } in msec
+ * @deadline: deadline jiffies for the operation
+ *
+ * Resume SATA phy @link and debounce it.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_link_resume(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline)
+{
+ int tries = ATA_LINK_RESUME_TRIES;
+ u32 scontrol, serror;
+ int rc;
+
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ return rc;
+
+ /*
+ * Writes to SControl sometimes get ignored under certain
+ * controllers (ata_piix SIDPR). Make sure DET actually is
+ * cleared.
+ */
+ do {
+ scontrol = (scontrol & 0x0f0) | 0x300;
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ return rc;
+ /*
+ * Some PHYs react badly if SStatus is pounded
+ * immediately after resuming. Delay 200ms before
+ * debouncing.
+ */
+ if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
+ ata_msleep(link->ap, 200);
+
+ /* is SControl restored correctly? */
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ return rc;
+ } while ((scontrol & 0xf0f) != 0x300 && --tries);
+
+ if ((scontrol & 0xf0f) != 0x300) {
+ ata_link_warn(link, "failed to resume link (SControl %X)\n",
+ scontrol);
+ return 0;
+ }
+
+ if (tries < ATA_LINK_RESUME_TRIES)
+ ata_link_warn(link, "link resume succeeded after %d retries\n",
+ ATA_LINK_RESUME_TRIES - tries);
+
+ if ((rc = sata_link_debounce(link, params, deadline)))
+ return rc;
+
+ /* clear SError, some PHYs require this even for SRST to work */
+ if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
+ rc = sata_scr_write(link, SCR_ERROR, serror);
+
+ return rc != -EINVAL ? rc : 0;
+}
+EXPORT_SYMBOL_GPL(sata_link_resume);
+
+/**
+ * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
+ * @link: ATA link to manipulate SControl for
+ * @policy: LPM policy to configure
+ * @spm_wakeup: initiate LPM transition to active state
+ *
+ * Manipulate the IPM field of the SControl register of @link
+ * according to @policy. If @policy is ATA_LPM_MAX_POWER and
+ * @spm_wakeup is %true, the SPM field is manipulated to wake up
+ * the link. This function also clears PHYRDY_CHG before
+ * returning.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ bool spm_wakeup)
+{
+ struct ata_eh_context *ehc = &link->eh_context;
+ bool woken_up = false;
+ u32 scontrol;
+ int rc;
+
+ rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+ if (rc)
+ return rc;
+
+ switch (policy) {
+ case ATA_LPM_MAX_POWER:
+ /* disable all LPM transitions */
+ scontrol |= (0x7 << 8);
+ /* initiate transition to active state */
+ if (spm_wakeup) {
+ scontrol |= (0x4 << 12);
+ woken_up = true;
+ }
+ break;
+ case ATA_LPM_MED_POWER:
+ /* allow LPM to PARTIAL */
+ scontrol &= ~(0x1 << 8);
+ scontrol |= (0x6 << 8);
+ break;
+ case ATA_LPM_MED_POWER_WITH_DIPM:
+ case ATA_LPM_MIN_POWER_WITH_PARTIAL:
+ case ATA_LPM_MIN_POWER:
+ if (ata_link_nr_enabled(link) > 0)
+ /* no restrictions on LPM transitions */
+ scontrol &= ~(0x7 << 8);
+ else {
+ /* empty port, power off */
+ scontrol &= ~0xf;
+ scontrol |= (0x1 << 2);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+ if (rc)
+ return rc;
+
+ /* give the link time to transit out of LPM state */
+ if (woken_up)
+ msleep(10);
+
+ /* clear PHYRDY_CHG from SError */
+ ehc->i.serror &= ~SERR_PHYRDY_CHG;
+ return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
+}
+EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
+
+static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
+{
+ struct ata_link *host_link = &link->ap->link;
+ u32 limit, target, spd;
+
+ limit = link->sata_spd_limit;
+
+ /* Don't configure downstream link faster than upstream link.
+ * It doesn't speed up anything and some PMPs choke on such
+ * configuration.
+ */
+ if (!ata_is_host_link(link) && host_link->sata_spd)
+ limit &= (1 << host_link->sata_spd) - 1;
+
+ if (limit == UINT_MAX)
+ target = 0;
+ else
+ target = fls(limit);
+
+ spd = (*scontrol >> 4) & 0xf;
+ *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
+
+ return spd != target;
+}
+
+/**
+ * sata_set_spd_needed - is SATA spd configuration needed
+ * @link: Link in question
+ *
+ * Test whether the spd limit in SControl matches
+ * @link->sata_spd_limit. This function is used to determine
+ * whether hardreset is necessary to apply SATA spd
+ * configuration.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 1 if SATA spd configuration is needed, 0 otherwise.
+ */
+static int sata_set_spd_needed(struct ata_link *link)
+{
+ u32 scontrol;
+
+ if (sata_scr_read(link, SCR_CONTROL, &scontrol))
+ return 1;
+
+ return __sata_set_spd_needed(link, &scontrol);
+}
+
+/**
+ * sata_set_spd - set SATA spd according to spd limit
+ * @link: Link to set SATA spd for
+ *
+ * Set SATA spd of @link according to sata_spd_limit.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 if spd doesn't need to be changed, 1 if spd has been
+ * changed. Negative errno if SCR registers are inaccessible.
+ */
+int sata_set_spd(struct ata_link *link)
+{
+ u32 scontrol;
+ int rc;
+
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ return rc;
+
+ if (!__sata_set_spd_needed(link, &scontrol))
+ return 0;
+
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ return rc;
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(sata_set_spd);
+
+/**
+ * sata_link_hardreset - reset link via SATA phy reset
+ * @link: link to reset
+ * @timing: timing parameters { interval, duration, timeout } in msec
+ * @deadline: deadline jiffies for the operation
+ * @online: optional out parameter indicating link onlineness
+ * @check_ready: optional callback to check link readiness
+ *
+ * SATA phy-reset @link using DET bits of SControl register.
+ * After hardreset, link readiness is waited upon using
+ * ata_wait_ready() if @check_ready is specified. LLDs are
+ * allowed to not specify @check_ready and wait itself after this
+ * function returns. Device classification is LLD's
+ * responsibility.
+ *
+ * *@online is set to one iff reset succeeded and @link is online
+ * after reset.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
+ unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *))
+{
+ u32 scontrol;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ if (online)
+ *online = false;
+
+ if (sata_set_spd_needed(link)) {
+ /* SATA spec says nothing about how to reconfigure
+ * spd. To be on the safe side, turn off phy during
+ * reconfiguration. This works for at least ICH7 AHCI
+ * and Sil3124.
+ */
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ goto out;
+
+ scontrol = (scontrol & 0x0f0) | 0x304;
+
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ goto out;
+
+ sata_set_spd(link);
+ }
+
+ /* issue phy wake/reset */
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ goto out;
+
+ scontrol = (scontrol & 0x0f0) | 0x301;
+
+ if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
+ goto out;
+
+ /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
+ * 10.4.2 says at least 1 ms.
+ */
+ ata_msleep(link->ap, 1);
+
+ /* bring link back */
+ rc = sata_link_resume(link, timing, deadline);
+ if (rc)
+ goto out;
+ /* if link is offline nothing more to do */
+ if (ata_phys_link_offline(link))
+ goto out;
+
+ /* Link is online. From this point, -ENODEV too is an error. */
+ if (online)
+ *online = true;
+
+ if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
+ /* If PMP is supported, we have to do follow-up SRST.
+ * Some PMPs don't send D2H Reg FIS after hardreset if
+ * the first port is empty. Wait only for
+ * ATA_TMOUT_PMP_SRST_WAIT.
+ */
+ if (check_ready) {
+ unsigned long pmp_deadline;
+
+ pmp_deadline = ata_deadline(jiffies,
+ ATA_TMOUT_PMP_SRST_WAIT);
+ if (time_after(pmp_deadline, deadline))
+ pmp_deadline = deadline;
+ ata_wait_ready(link, pmp_deadline, check_ready);
+ }
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ if (check_ready)
+ rc = ata_wait_ready(link, deadline, check_ready);
+ out:
+ if (rc && rc != -EAGAIN) {
+ /* online is set iff link is online && reset succeeded */
+ if (online)
+ *online = false;
+ ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
+ }
+ DPRINTK("EXIT, rc=%d\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sata_link_hardreset);
+
+/**
+ * ata_qc_complete_multiple - Complete multiple qcs successfully
+ * @ap: port in question
+ * @qc_active: new qc_active mask
+ *
+ * Complete in-flight commands. This functions is meant to be
+ * called from low-level driver's interrupt routine to complete
+ * requests normally. ap->qc_active and @qc_active is compared
+ * and commands are completed accordingly.
+ *
+ * Always use this function when completing multiple NCQ commands
+ * from IRQ handlers instead of calling ata_qc_complete()
+ * multiple times to keep IRQ expect status properly in sync.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Number of completed commands on success, -errno otherwise.
+ */
+int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
+{
+ u64 done_mask, ap_qc_active = ap->qc_active;
+ int nr_done = 0;
+
+ /*
+ * If the internal tag is set on ap->qc_active, then we care about
+ * bit0 on the passed in qc_active mask. Move that bit up to match
+ * the internal tag.
+ */
+ if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+ qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
+ qc_active ^= qc_active & 0x01;
+ }
+
+ done_mask = ap_qc_active ^ qc_active;
+
+ if (unlikely(done_mask & qc_active)) {
+ ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
+ ap->qc_active, qc_active);
+ return -EINVAL;
+ }
+
+ while (done_mask) {
+ struct ata_queued_cmd *qc;
+ unsigned int tag = __ffs64(done_mask);
+
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc) {
+ ata_qc_complete(qc);
+ nr_done++;
+ }
+ done_mask &= ~(1ULL << tag);
+ }
+
+ return nr_done;
+}
+EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
+
+/**
+ * ata_slave_link_init - initialize slave link
+ * @ap: port to initialize slave link for
+ *
+ * Create and initialize slave link for @ap. This enables slave
+ * link handling on the port.
+ *
+ * In libata, a port contains links and a link contains devices.
+ * There is single host link but if a PMP is attached to it,
+ * there can be multiple fan-out links. On SATA, there's usually
+ * a single device connected to a link but PATA and SATA
+ * controllers emulating TF based interface can have two - master
+ * and slave.
+ *
+ * However, there are a few controllers which don't fit into this
+ * abstraction too well - SATA controllers which emulate TF
+ * interface with both master and slave devices but also have
+ * separate SCR register sets for each device. These controllers
+ * need separate links for physical link handling
+ * (e.g. onlineness, link speed) but should be treated like a
+ * traditional M/S controller for everything else (e.g. command
+ * issue, softreset).
+ *
+ * slave_link is libata's way of handling this class of
+ * controllers without impacting core layer too much. For
+ * anything other than physical link handling, the default host
+ * link is used for both master and slave. For physical link
+ * handling, separate @ap->slave_link is used. All dirty details
+ * are implemented inside libata core layer. From LLD's POV, the
+ * only difference is that prereset, hardreset and postreset are
+ * called once more for the slave link, so the reset sequence
+ * looks like the following.
+ *
+ * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
+ * softreset(M) -> postreset(M) -> postreset(S)
+ *
+ * Note that softreset is called only for the master. Softreset
+ * resets both M/S by definition, so SRST on master should handle
+ * both (the standard method will work just fine).
+ *
+ * LOCKING:
+ * Should be called before host is registered.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_slave_link_init(struct ata_port *ap)
+{
+ struct ata_link *link;
+
+ WARN_ON(ap->slave_link);
+ WARN_ON(ap->flags & ATA_FLAG_PMP);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ ata_link_init(ap, link, 1);
+ ap->slave_link = link;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_slave_link_init);
+
+/**
+ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
+ * @link: Link receiving the event
+ *
+ * Test whether the received PHY event has to be ignored or not.
+ *
+ * LOCKING:
+ * None:
+ *
+ * RETURNS:
+ * True if the event has to be ignored.
+ */
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
+{
+ unsigned long lpm_timeout = link->last_lpm_change +
+ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
+
+ /* if LPM is enabled, PHYRDY doesn't mean anything */
+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
+ return true;
+
+ /* ignore the first PHY event after the LPM policy changed
+ * as it is might be spurious
+ */
+ if ((link->flags & ATA_LFLAG_CHANGED) &&
+ time_before(jiffies, lpm_timeout))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+
+static const char *ata_lpm_policy_names[] = {
+ [ATA_LPM_UNKNOWN] = "max_performance",
+ [ATA_LPM_MAX_POWER] = "max_performance",
+ [ATA_LPM_MED_POWER] = "medium_power",
+ [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
+ [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
+ [ATA_LPM_MIN_POWER] = "min_power",
+};
+
+static ssize_t ata_scsi_lpm_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(device);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ata_link *link;
+ struct ata_device *dev;
+ enum ata_lpm_policy policy;
+ unsigned long flags;
+
+ /* UNKNOWN is internal state, iterate from MAX_POWER */
+ for (policy = ATA_LPM_MAX_POWER;
+ policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
+ const char *name = ata_lpm_policy_names[policy];
+
+ if (strncmp(name, buf, strlen(name)) == 0)
+ break;
+ }
+ if (policy == ARRAY_SIZE(ata_lpm_policy_names))
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ }
+ }
+
+ ap->target_lpm_policy = policy;
+ ata_port_schedule_eh(ap);
+out_unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+ return count;
+}
+
+static ssize_t ata_scsi_lpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+
+ if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ ata_lpm_policy_names[ap->target_lpm_policy]);
+}
+DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
+ ata_scsi_lpm_show, ata_scsi_lpm_store);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
+
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ bool ncq_prio_enable;
+ int rc = 0;
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+
+ ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+unlock:
+ spin_unlock_irq(ap->lock);
+
+ return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+}
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ long int input;
+ int rc;
+
+ rc = kstrtol(buf, 10, &input);
+ if (rc)
+ return rc;
+ if ((input < 0) || (input > 1))
+ return -EINVAL;
+
+ ap = ata_shost_to_port(sdev->host);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (unlikely(!dev))
+ return -ENODEV;
+
+ spin_lock_irq(ap->lock);
+ if (input)
+ dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+ else
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+ dev->link->eh_info.action |= ATA_EH_REVALIDATE;
+ dev->link->eh_info.flags |= ATA_EHI_QUIET;
+ ata_port_schedule_eh(ap);
+ spin_unlock_irq(ap->lock);
+
+ ata_port_wait_eh(ap);
+
+ if (input) {
+ spin_lock_irq(ap->lock);
+ if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+ rc = -EIO;
+ }
+ spin_unlock_irq(ap->lock);
+ }
+
+ return rc ? rc : len;
+}
+
+DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+ ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
+
+struct device_attribute *ata_ncq_sdev_attrs[] = {
+ &dev_attr_unload_heads,
+ &dev_attr_ncq_prio_enable,
+ NULL
+};
+EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs);
+
+static ssize_t
+ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
+ return ap->ops->em_store(ap, buf, count);
+ return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+
+ if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
+ return ap->ops->em_show(ap, buf);
+ return -EINVAL;
+}
+DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
+ ata_scsi_em_message_show, ata_scsi_em_message_store);
+EXPORT_SYMBOL_GPL(dev_attr_em_message);
+
+static ssize_t
+ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+
+ return snprintf(buf, 23, "%d\n", ap->em_message_type);
+}
+DEVICE_ATTR(em_message_type, S_IRUGO,
+ ata_scsi_em_message_type_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
+
+static ssize_t
+ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+
+ if (atadev && ap->ops->sw_activity_show &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ return ap->ops->sw_activity_show(atadev, buf);
+ return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+ enum sw_activity val;
+ int rc;
+
+ if (atadev && ap->ops->sw_activity_store &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ val = simple_strtoul(buf, NULL, 0);
+ switch (val) {
+ case OFF: case BLINK_ON: case BLINK_OFF:
+ rc = ap->ops->sw_activity_store(atadev, val);
+ if (!rc)
+ return count;
+ else
+ return rc;
+ }
+ }
+ return -EINVAL;
+}
+DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
+ ata_scsi_activity_store);
+EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
+
+/**
+ * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
+ * @ap: ATA port to which the device change the queue depth
+ * @sdev: SCSI device to configure queue depth for
+ * @queue_depth: new queue depth
+ *
+ * libsas and libata have different approaches for associating a sdev to
+ * its ata_port.
+ *
+ */
+int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth)
+{
+ struct ata_device *dev;
+ unsigned long flags;
+
+ if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+ return sdev->queue_depth;
+
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev || !ata_dev_enabled(dev))
+ return sdev->queue_depth;
+
+ /* NCQ enabled? */
+ spin_lock_irqsave(ap->lock, flags);
+ dev->flags &= ~ATA_DFLAG_NCQ_OFF;
+ if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
+ dev->flags |= ATA_DFLAG_NCQ_OFF;
+ queue_depth = 1;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* limit and apply queue depth */
+ queue_depth = min(queue_depth, sdev->host->can_queue);
+ queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
+ queue_depth = min(queue_depth, ATA_MAX_QUEUE);
+
+ if (sdev->queue_depth == queue_depth)
+ return -EINVAL;
+
+ return scsi_change_queue_depth(sdev, queue_depth);
+}
+EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
+
+/**
+ * ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ * @sdev: SCSI device to configure queue depth for
+ * @queue_depth: new queue depth
+ *
+ * This is libata standard hostt->change_queue_depth callback.
+ * SCSI will call into this callback when user tries to set queue
+ * depth via sysfs.
+ *
+ * LOCKING:
+ * SCSI layer (we don't care)
+ *
+ * RETURNS:
+ * Newly configured queue depth.
+ */
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+
+ return __ata_change_queue_depth(ap, sdev, queue_depth);
+}
+EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
+
+/**
+ * port_alloc - Allocate port for a SAS attached SATA device
+ * @host: ATA host container for all SAS ports
+ * @port_info: Information from low-level host driver
+ * @shost: SCSI host that the scsi device is attached to
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * ata_port pointer on success / NULL on failure.
+ */
+
+struct ata_port *ata_sas_port_alloc(struct ata_host *host,
+ struct ata_port_info *port_info,
+ struct Scsi_Host *shost)
+{
+ struct ata_port *ap;
+
+ ap = ata_port_alloc(host);
+ if (!ap)
+ return NULL;
+
+ ap->port_no = 0;
+ ap->lock = &host->lock;
+ ap->pio_mask = port_info->pio_mask;
+ ap->mwdma_mask = port_info->mwdma_mask;
+ ap->udma_mask = port_info->udma_mask;
+ ap->flags |= port_info->flags;
+ ap->ops = port_info->port_ops;
+ ap->cbl = ATA_CBL_SATA;
+
+ return ap;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
+
+/**
+ * ata_sas_port_start - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized.
+ *
+ * May be used as the port_start() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_sas_port_start(struct ata_port *ap)
+{
+ /*
+ * the port is marked as frozen at allocation time, but if we don't
+ * have new eh, we won't thaw it
+ */
+ if (!ap->ops->error_handler)
+ ap->pflags &= ~ATA_PFLAG_FROZEN;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_start);
+
+/**
+ * ata_port_stop - Undo ata_sas_port_start()
+ * @ap: Port to shut down
+ *
+ * May be used as the port_stop() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_sas_port_stop(struct ata_port *ap)
+{
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_stop);
+
+/**
+ * ata_sas_async_probe - simply schedule probing and return
+ * @ap: Port to probe
+ *
+ * For batch scheduling of probe for sas attached ata devices, assumes
+ * the port has already been through ata_sas_port_init()
+ */
+void ata_sas_async_probe(struct ata_port *ap)
+{
+ __ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_async_probe);
+
+int ata_sas_sync_probe(struct ata_port *ap)
+{
+ return ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
+
+
+/**
+ * ata_sas_port_init - Initialize a SATA device
+ * @ap: SATA port to initialize
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
+ */
+
+int ata_sas_port_init(struct ata_port *ap)
+{
+ int rc = ap->ops->port_start(ap);
+
+ if (rc)
+ return rc;
+ ap->print_id = atomic_inc_return(&ata_print_id);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_init);
+
+int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
+{
+ return ata_tport_add(parent, ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_add);
+
+void ata_sas_tport_delete(struct ata_port *ap)
+{
+ ata_tport_delete(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
+
+/**
+ * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
+ * @ap: SATA port to destroy
+ *
+ */
+
+void ata_sas_port_destroy(struct ata_port *ap)
+{
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
+
+/**
+ * ata_sas_slave_configure - Default slave_config routine for libata devices
+ * @sdev: SCSI device to configure
+ * @ap: ATA port to which SCSI device is attached
+ *
+ * RETURNS:
+ * Zero.
+ */
+
+int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+{
+ ata_scsi_sdev_config(sdev);
+ ata_scsi_dev_config(sdev, ap->link.device);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+
+/**
+ * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
+ * @cmd: SCSI command to be sent
+ * @ap: ATA port to which the command is being sent
+ *
+ * RETURNS:
+ * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ * 0 otherwise.
+ */
+
+int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
+{
+ int rc = 0;
+
+ ata_scsi_dump_cdb(ap, cmd);
+
+ if (likely(ata_dev_enabled(ap->link.device)))
+ rc = __ata_scsi_queuecmd(cmd, ap->link.device);
+ else {
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->scsi_done(cmd);
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
+
+int ata_sas_allocate_tag(struct ata_port *ap)
+{
+ unsigned int max_queue = ap->host->n_tags;
+ unsigned int i, tag;
+
+ for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
+ tag = tag < max_queue ? tag : 0;
+
+ /* the last tag is reserved for internal command. */
+ if (ata_tag_internal(tag))
+ continue;
+
+ if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
+ ap->sas_last_tag = tag;
+ return tag;
+ }
+ }
+ return -1;
+}
+
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
+{
+ clear_bit(tag, &ap->sas_tag_allocated);
+}
+
+/**
+ * sata_async_notification - SATA async notification handler
+ * @ap: ATA port where async notification is received
+ *
+ * Handler to be called when async notification via SDB FIS is
+ * received. This function schedules EH if necessary.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * 1 if EH is scheduled, 0 otherwise.
+ */
+int sata_async_notification(struct ata_port *ap)
+{
+ u32 sntf;
+ int rc;
+
+ if (!(ap->flags & ATA_FLAG_AN))
+ return 0;
+
+ rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+ if (rc == 0)
+ sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+ if (!sata_pmp_attached(ap) || rc) {
+ /* PMP is not attached or SNTF is not available */
+ if (!sata_pmp_attached(ap)) {
+ /* PMP is not attached. Check whether ATAPI
+ * AN is configured. If so, notify media
+ * change.
+ */
+ struct ata_device *dev = ap->link.device;
+
+ if ((dev->class == ATA_DEV_ATAPI) &&
+ (dev->flags & ATA_DFLAG_AN))
+ ata_scsi_media_change_notify(dev);
+ return 0;
+ } else {
+ /* PMP is attached but SNTF is not available.
+ * ATAPI async media change notification is
+ * not used. The PMP must be reporting PHY
+ * status change, schedule EH.
+ */
+ ata_port_schedule_eh(ap);
+ return 1;
+ }
+ } else {
+ /* PMP is attached and SNTF is available */
+ struct ata_link *link;
+
+ /* check and notify ATAPI AN */
+ ata_for_each_link(link, ap, EDGE) {
+ if (!(sntf & (1 << link->pmp)))
+ continue;
+
+ if ((link->device->class == ATA_DEV_ATAPI) &&
+ (link->device->flags & ATA_DFLAG_AN))
+ ata_scsi_media_change_notify(link->device);
+ }
+
+ /* If PMP is reporting that PHY status of some
+ * downstream ports has changed, schedule EH.
+ */
+ if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
+ ata_port_schedule_eh(ap);
+ return 1;
+ }
+
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(sata_async_notification);
+
+/**
+ * ata_eh_read_log_10h - Read log page 10h for NCQ error details
+ * @dev: Device to read log page 10h from
+ * @tag: Resulting tag of the failed command
+ * @tf: Resulting taskfile registers of the failed command
+ *
+ * Read log page 10h to obtain NCQ error details and clear error
+ * condition.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+static int ata_eh_read_log_10h(struct ata_device *dev,
+ int *tag, struct ata_taskfile *tf)
+{
+ u8 *buf = dev->link->ap->sector_buf;
+ unsigned int err_mask;
+ u8 csum;
+ int i;
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
+ if (err_mask)
+ return -EIO;
+
+ csum = 0;
+ for (i = 0; i < ATA_SECT_SIZE; i++)
+ csum += buf[i];
+ if (csum)
+ ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
+ csum);
+
+ if (buf[0] & 0x80)
+ return -ENOENT;
+
+ *tag = buf[0] & 0x1f;
+
+ tf->command = buf[2];
+ tf->feature = buf[3];
+ tf->lbal = buf[4];
+ tf->lbam = buf[5];
+ tf->lbah = buf[6];
+ tf->device = buf[7];
+ tf->hob_lbal = buf[8];
+ tf->hob_lbam = buf[9];
+ tf->hob_lbah = buf[10];
+ tf->nsect = buf[12];
+ tf->hob_nsect = buf[13];
+ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
+ tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
+
+ return 0;
+}
+
+/**
+ * ata_eh_analyze_ncq_error - analyze NCQ error
+ * @link: ATA link to analyze NCQ error for
+ *
+ * Read log page 10h, determine the offending qc and acquire
+ * error status TF. For NCQ device errors, all LLDDs have to do
+ * is setting AC_ERR_DEV in ehi->err_mask. This function takes
+ * care of the rest.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_eh_analyze_ncq_error(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev = link->device;
+ struct ata_queued_cmd *qc;
+ struct ata_taskfile tf;
+ int tag, rc;
+
+ /* if frozen, we can't do much */
+ if (ap->pflags & ATA_PFLAG_FROZEN)
+ return;
+
+ /* is it NCQ device error? */
+ if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
+ return;
+
+ /* has LLDD analyzed already? */
+ ata_qc_for_each_raw(ap, qc, tag) {
+ if (!(qc->flags & ATA_QCFLAG_FAILED))
+ continue;
+
+ if (qc->err_mask)
+ return;
+ }
+
+ /* okay, this error is ours */
+ memset(&tf, 0, sizeof(tf));
+ rc = ata_eh_read_log_10h(dev, &tag, &tf);
+ if (rc) {
+ ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
+ rc);
+ return;
+ }
+
+ if (!(link->sactive & (1 << tag))) {
+ ata_link_err(link, "log page 10h reported inactive tag %d\n",
+ tag);
+ return;
+ }
+
+ /* we've got the perpetrator, condemn it */
+ qc = __ata_qc_from_tag(ap, tag);
+ memcpy(&qc->result_tf, &tf, sizeof(tf));
+ qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+ qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+ if (dev->class == ATA_DEV_ZAC &&
+ ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
+ char sense_key, asc, ascq;
+
+ sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+ asc = (qc->result_tf.auxiliary >> 8) & 0xff;
+ ascq = qc->result_tf.auxiliary & 0xff;
+ ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
+ ata_scsi_set_sense_information(dev, qc->scsicmd,
+ &qc->result_tf);
+ qc->flags |= ATA_QCFLAG_SENSE_VALID;
+ }
+
+ ehc->i.err_mask &= ~AC_ERR_DEV;
+}
+EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index eb2eb599e602..36e588d88b95 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2,10 +2,6 @@
/*
* libata-scsi.c - helper library for ATA
*
- * Maintained by: Tejun Heo <tj@kernel.org>
- * Please ALWAYS copy linux-ide@vger.kernel.org
- * on emails.
- *
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
@@ -36,11 +32,12 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
#include <linux/ioprio.h>
+#include <linux/of.h>
#include "libata.h"
#include "libata-transport.h"
-#define ATA_SCSI_RBUF_SIZE 4096
+#define ATA_SCSI_RBUF_SIZE 576
static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
@@ -49,8 +46,6 @@ typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
const struct scsi_device *scsidev);
-static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
- const struct scsi_device *scsidev);
#define RW_RECOVERY_MPAGE 0x1
#define RW_RECOVERY_MPAGE_LEN 12
@@ -90,71 +85,6 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
0, 30 /* extended self test time, see 05-359r1 */
};
-static const char *ata_lpm_policy_names[] = {
- [ATA_LPM_UNKNOWN] = "max_performance",
- [ATA_LPM_MAX_POWER] = "max_performance",
- [ATA_LPM_MED_POWER] = "medium_power",
- [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
- [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
- [ATA_LPM_MIN_POWER] = "min_power",
-};
-
-static ssize_t ata_scsi_lpm_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(device);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ata_link *link;
- struct ata_device *dev;
- enum ata_lpm_policy policy;
- unsigned long flags;
-
- /* UNKNOWN is internal state, iterate from MAX_POWER */
- for (policy = ATA_LPM_MAX_POWER;
- policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
- const char *name = ata_lpm_policy_names[policy];
-
- if (strncmp(name, buf, strlen(name)) == 0)
- break;
- }
- if (policy == ARRAY_SIZE(ata_lpm_policy_names))
- return -EINVAL;
-
- spin_lock_irqsave(ap->lock, flags);
-
- ata_for_each_link(link, ap, EDGE) {
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->horkage & ATA_HORKAGE_NOLPM) {
- count = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
- }
-
- ap->target_lpm_policy = policy;
- ata_port_schedule_eh(ap);
-out_unlock:
- spin_unlock_irqrestore(ap->lock, flags);
- return count;
-}
-
-static ssize_t ata_scsi_lpm_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
-
- if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
- return -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- ata_lpm_policy_names[ap->target_lpm_policy]);
-}
-DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
- ata_scsi_lpm_show, ata_scsi_lpm_store);
-EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
-
static ssize_t ata_scsi_park_show(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -258,83 +188,6 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-static ssize_t ata_ncq_prio_enable_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap;
- struct ata_device *dev;
- bool ncq_prio_enable;
- int rc = 0;
-
- ap = ata_shost_to_port(sdev->host);
-
- spin_lock_irq(ap->lock);
- dev = ata_scsi_find_dev(ap, sdev);
- if (!dev) {
- rc = -ENODEV;
- goto unlock;
- }
-
- ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
-
-unlock:
- spin_unlock_irq(ap->lock);
-
- return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
-}
-
-static ssize_t ata_ncq_prio_enable_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap;
- struct ata_device *dev;
- long int input;
- int rc;
-
- rc = kstrtol(buf, 10, &input);
- if (rc)
- return rc;
- if ((input < 0) || (input > 1))
- return -EINVAL;
-
- ap = ata_shost_to_port(sdev->host);
- dev = ata_scsi_find_dev(ap, sdev);
- if (unlikely(!dev))
- return -ENODEV;
-
- spin_lock_irq(ap->lock);
- if (input)
- dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
- else
- dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
-
- dev->link->eh_info.action |= ATA_EH_REVALIDATE;
- dev->link->eh_info.flags |= ATA_EHI_QUIET;
- ata_port_schedule_eh(ap);
- spin_unlock_irq(ap->lock);
-
- ata_port_wait_eh(ap);
-
- if (input) {
- spin_lock_irq(ap->lock);
- if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
- dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
- rc = -EIO;
- }
- spin_unlock_irq(ap->lock);
- }
-
- return rc ? rc : len;
-}
-
-DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
- ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
-EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
-
void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
u8 sk, u8 asc, u8 ascq)
{
@@ -383,90 +236,8 @@ static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
field, 0xff, 0);
}
-static ssize_t
-ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
- return ap->ops->em_store(ap, buf, count);
- return -EINVAL;
-}
-
-static ssize_t
-ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
-
- if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
- return ap->ops->em_show(ap, buf);
- return -EINVAL;
-}
-DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
- ata_scsi_em_message_show, ata_scsi_em_message_store);
-EXPORT_SYMBOL_GPL(dev_attr_em_message);
-
-static ssize_t
-ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
-
- return snprintf(buf, 23, "%d\n", ap->em_message_type);
-}
-DEVICE_ATTR(em_message_type, S_IRUGO,
- ata_scsi_em_message_type_show, NULL);
-EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
-
-static ssize_t
-ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
- struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
-
- if (atadev && ap->ops->sw_activity_show &&
- (ap->flags & ATA_FLAG_SW_ACTIVITY))
- return ap->ops->sw_activity_show(atadev, buf);
- return -EINVAL;
-}
-
-static ssize_t
-ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
- struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
- enum sw_activity val;
- int rc;
-
- if (atadev && ap->ops->sw_activity_store &&
- (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
- val = simple_strtoul(buf, NULL, 0);
- switch (val) {
- case OFF: case BLINK_ON: case BLINK_OFF:
- rc = ap->ops->sw_activity_store(atadev, val);
- if (!rc)
- return count;
- else
- return rc;
- }
- }
- return -EINVAL;
-}
-DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
- ata_scsi_activity_store);
-EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
-
struct device_attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads,
- &dev_attr_ncq_prio_enable,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
@@ -499,6 +270,7 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+EXPORT_SYMBOL_GPL(ata_std_bios_param);
/**
* ata_scsi_unlock_native_capacity - unlock native capacity
@@ -528,6 +300,7 @@ void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
}
+EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
/**
* ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
@@ -1215,7 +988,7 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
}
-static void ata_scsi_sdev_config(struct scsi_device *sdev)
+void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
@@ -1255,8 +1028,7 @@ static int atapi_drain_needed(struct request *rq)
return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
}
-static int ata_scsi_dev_config(struct scsi_device *sdev,
- struct ata_device *dev)
+int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
@@ -1344,6 +1116,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
return rc;
}
+EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
/**
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
@@ -1383,71 +1156,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
q->dma_drain_buffer = NULL;
q->dma_drain_size = 0;
}
-
-/**
- * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
- * @ap: ATA port to which the device change the queue depth
- * @sdev: SCSI device to configure queue depth for
- * @queue_depth: new queue depth
- *
- * libsas and libata have different approaches for associating a sdev to
- * its ata_port.
- *
- */
-int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth)
-{
- struct ata_device *dev;
- unsigned long flags;
-
- if (queue_depth < 1 || queue_depth == sdev->queue_depth)
- return sdev->queue_depth;
-
- dev = ata_scsi_find_dev(ap, sdev);
- if (!dev || !ata_dev_enabled(dev))
- return sdev->queue_depth;
-
- /* NCQ enabled? */
- spin_lock_irqsave(ap->lock, flags);
- dev->flags &= ~ATA_DFLAG_NCQ_OFF;
- if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
- dev->flags |= ATA_DFLAG_NCQ_OFF;
- queue_depth = 1;
- }
- spin_unlock_irqrestore(ap->lock, flags);
-
- /* limit and apply queue depth */
- queue_depth = min(queue_depth, sdev->host->can_queue);
- queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
- queue_depth = min(queue_depth, ATA_MAX_QUEUE);
-
- if (sdev->queue_depth == queue_depth)
- return -EINVAL;
-
- return scsi_change_queue_depth(sdev, queue_depth);
-}
-
-/**
- * ata_scsi_change_queue_depth - SCSI callback for queue depth config
- * @sdev: SCSI device to configure queue depth for
- * @queue_depth: new queue depth
- *
- * This is libata standard hostt->change_queue_depth callback.
- * SCSI will call into this callback when user tries to set queue
- * depth via sysfs.
- *
- * LOCKING:
- * SCSI layer (we don't care)
- *
- * RETURNS:
- * Newly configured queue depth.
- */
-int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
-{
- struct ata_port *ap = ata_shost_to_port(sdev->host);
-
- return __ata_change_queue_depth(ap, sdev, queue_depth);
-}
+EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
@@ -2354,10 +2063,6 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
{
- struct ata_taskfile tf;
-
- memset(&tf, 0, sizeof(tf));
-
rbuf[1] = 0x89; /* our page code */
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
rbuf[3] = (0x238 & 0xff);
@@ -2366,14 +2071,14 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
memcpy(&rbuf[16], "libata ", 16);
memcpy(&rbuf[32], DRV_VERSION, 4);
- /* we don't store the ATA device signature, so we fake it */
-
- tf.command = ATA_DRDY; /* really, this is Status reg */
- tf.lbal = 0x1;
- tf.nsect = 0x1;
-
- ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */
rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
+ rbuf[37] = (1 << 7); /* bit 7 indicates Command FIS */
+ /* TODO: PMP? */
+
+ /* we don't store the ATA device signature, so we fake it */
+ rbuf[38] = ATA_DRDY; /* really, this is Status reg */
+ rbuf[40] = 0x1;
+ rbuf[48] = 0x1;
rbuf[56] = ATA_CMD_ID_ATA;
@@ -3089,7 +2794,7 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
* RETURNS:
* Associated ATA device, or %NULL if not found.
*/
-static struct ata_device *
+struct ata_device *
ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
{
struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
@@ -4299,8 +4004,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
* Prints the contents of a SCSI command via printk().
*/
-static inline void ata_scsi_dump_cdb(struct ata_port *ap,
- struct scsi_cmnd *cmd)
+void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd)
{
#ifdef ATA_VERBOSE_DEBUG
struct scsi_device *scsidev = cmd->device;
@@ -4312,8 +4016,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
#endif
}
-static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
- struct ata_device *dev)
+int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
{
u8 scsi_op = scmd->cmnd[0];
ata_xlat_func_t xlat_func;
@@ -4407,6 +4110,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
return rc;
}
+EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
/**
* ata_scsi_simulate - simulate SCSI command on ATA device
@@ -4562,26 +4266,51 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
- rc = scsi_add_host_with_dma(ap->scsi_host,
- &ap->tdev, ap->host->dev);
+ rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
if (rc)
- goto err_add;
+ goto err_alloc;
}
return 0;
- err_add:
- scsi_host_put(host->ports[i]->scsi_host);
err_alloc:
while (--i >= 0) {
struct Scsi_Host *shost = host->ports[i]->scsi_host;
+ /* scsi_host_put() is in ata_devres_release() */
scsi_remove_host(shost);
- scsi_host_put(shost);
}
return rc;
}
+#ifdef CONFIG_OF
+static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
+{
+ struct scsi_device *sdev = dev->sdev;
+ struct device *d = ap->host->dev;
+ struct device_node *np = d->of_node;
+ struct device_node *child;
+
+ for_each_available_child_of_node(np, child) {
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(child, "reg", &val);
+ if (ret)
+ continue;
+ if (val == dev->devno) {
+ dev_dbg(d, "found matching device node\n");
+ sdev->sdev_gendev.of_node = child;
+ return;
+ }
+ }
+}
+#else
+static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
+{
+}
+#endif
+
void ata_scsi_scan_host(struct ata_port *ap, int sync)
{
int tries = 5;
@@ -4607,6 +4336,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
NULL);
if (!IS_ERR(sdev)) {
dev->sdev = sdev;
+ ata_scsi_assign_ofnode(dev, ap);
scsi_device_put(sdev);
} else {
dev->sdev = NULL;
@@ -4929,214 +4659,3 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
}
-
-/**
- * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
- * @host: ATA host container for all SAS ports
- * @port_info: Information from low-level host driver
- * @shost: SCSI host that the scsi device is attached to
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * ata_port pointer on success / NULL on failure.
- */
-
-struct ata_port *ata_sas_port_alloc(struct ata_host *host,
- struct ata_port_info *port_info,
- struct Scsi_Host *shost)
-{
- struct ata_port *ap;
-
- ap = ata_port_alloc(host);
- if (!ap)
- return NULL;
-
- ap->port_no = 0;
- ap->lock = &host->lock;
- ap->pio_mask = port_info->pio_mask;
- ap->mwdma_mask = port_info->mwdma_mask;
- ap->udma_mask = port_info->udma_mask;
- ap->flags |= port_info->flags;
- ap->ops = port_info->port_ops;
- ap->cbl = ATA_CBL_SATA;
-
- return ap;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
-
-/**
- * ata_sas_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_sas_port_start(struct ata_port *ap)
-{
- /*
- * the port is marked as frozen at allocation time, but if we don't
- * have new eh, we won't thaw it
- */
- if (!ap->ops->error_handler)
- ap->pflags &= ~ATA_PFLAG_FROZEN;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_start);
-
-/**
- * ata_port_stop - Undo ata_sas_port_start()
- * @ap: Port to shut down
- *
- * May be used as the port_stop() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-void ata_sas_port_stop(struct ata_port *ap)
-{
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_stop);
-
-/**
- * ata_sas_async_probe - simply schedule probing and return
- * @ap: Port to probe
- *
- * For batch scheduling of probe for sas attached ata devices, assumes
- * the port has already been through ata_sas_port_init()
- */
-void ata_sas_async_probe(struct ata_port *ap)
-{
- __ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_async_probe);
-
-int ata_sas_sync_probe(struct ata_port *ap)
-{
- return ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
-
-
-/**
- * ata_sas_port_init - Initialize a SATA device
- * @ap: SATA port to initialize
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * Zero on success, non-zero on error.
- */
-
-int ata_sas_port_init(struct ata_port *ap)
-{
- int rc = ap->ops->port_start(ap);
-
- if (rc)
- return rc;
- ap->print_id = atomic_inc_return(&ata_print_id);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_init);
-
-int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
-{
- return ata_tport_add(parent, ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_add);
-
-void ata_sas_tport_delete(struct ata_port *ap)
-{
- ata_tport_delete(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
-
-/**
- * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
- * @ap: SATA port to destroy
- *
- */
-
-void ata_sas_port_destroy(struct ata_port *ap)
-{
- if (ap->ops->port_stop)
- ap->ops->port_stop(ap);
- kfree(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
-
-/**
- * ata_sas_slave_configure - Default slave_config routine for libata devices
- * @sdev: SCSI device to configure
- * @ap: ATA port to which SCSI device is attached
- *
- * RETURNS:
- * Zero.
- */
-
-int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
-{
- ata_scsi_sdev_config(sdev);
- ata_scsi_dev_config(sdev, ap->link.device);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
-
-/**
- * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
- * @cmd: SCSI command to be sent
- * @ap: ATA port to which the command is being sent
- *
- * RETURNS:
- * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
- * 0 otherwise.
- */
-
-int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
-{
- int rc = 0;
-
- ata_scsi_dump_cdb(ap, cmd);
-
- if (likely(ata_dev_enabled(ap->link.device)))
- rc = __ata_scsi_queuecmd(cmd, ap->link.device);
- else {
- cmd->result = (DID_BAD_TARGET << 16);
- cmd->scsi_done(cmd);
- }
- return rc;
-}
-EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
-
-int ata_sas_allocate_tag(struct ata_port *ap)
-{
- unsigned int max_queue = ap->host->n_tags;
- unsigned int i, tag;
-
- for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
- tag = tag < max_queue ? tag : 0;
-
- /* the last tag is reserved for internal command. */
- if (ata_tag_internal(tag))
- continue;
-
- if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
- ap->sas_last_tag = tag;
- return tag;
- }
- }
- return -1;
-}
-
-void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
-{
- clear_bit(tag, &ap->sas_tag_allocated);
-}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 038db94216a9..ae7189d1a568 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2,10 +2,6 @@
/*
* libata-sff.c - helper library for PCI IDE BMDMA
*
- * Maintained by: Tejun Heo <tj@kernel.org>
- * Please ALWAYS copy linux-ide@vger.kernel.org
- * on emails.
- *
* Copyright 2003-2006 Red Hat, Inc. All rights reserved.
* Copyright 2003-2006 Jeff Garzik
*
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 12a505bb9c5b..6a40e3c6cf49 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -208,7 +208,7 @@ show_ata_port_##name(struct device *dev, \
{ \
struct ata_port *ap = transport_class_to_port(dev); \
\
- return snprintf(buf, 20, format_string, cast ap->field); \
+ return scnprintf(buf, 20, format_string, cast ap->field); \
}
#define ata_port_simple_attr(field, name, format_string, type) \
@@ -479,7 +479,7 @@ show_ata_dev_##field(struct device *dev, \
{ \
struct ata_device *ata_dev = transport_class_to_dev(dev); \
\
- return snprintf(buf, 20, format_string, cast ata_dev->field); \
+ return scnprintf(buf, 20, format_string, cast ata_dev->field); \
}
#define ata_dev_simple_attr(field, format_string, type) \
@@ -533,7 +533,7 @@ show_ata_dev_id(struct device *dev,
if (ata_dev->class == ATA_DEV_PMP)
return 0;
for(i=0;i<ATA_ID_WORDS;i++) {
- written += snprintf(buf+written, 20, "%04x%c",
+ written += scnprintf(buf+written, 20, "%04x%c",
ata_dev->id[i],
((i+1) & 7) ? ' ' : '\n');
}
@@ -552,7 +552,7 @@ show_ata_dev_gscr(struct device *dev,
if (ata_dev->class != ATA_DEV_PMP)
return 0;
for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) {
- written += snprintf(buf+written, 20, "%08x%c",
+ written += scnprintf(buf+written, 20, "%08x%c",
ata_dev->gscr[i],
((i+1) & 3) ? ' ' : '\n');
}
@@ -581,7 +581,7 @@ show_ata_dev_trim(struct device *dev,
else
mode = "unqueued";
- return snprintf(buf, 20, "%s\n", mode);
+ return scnprintf(buf, 20, "%s\n", mode);
}
static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index cd8090ad43e5..68cdd81d747c 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -37,7 +37,11 @@ extern int libata_noacpi;
extern int libata_allow_tpm;
extern const struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
+#ifdef CONFIG_ATA_FORCE
extern void ata_force_cbl(struct ata_port *ap);
+#else
+static inline void ata_force_cbl(struct ata_port *ap) { }
+#endif
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
@@ -87,6 +91,18 @@ extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+/* libata-sata.c */
+#ifdef CONFIG_SATA_HOST
+int ata_sas_allocate_tag(struct ata_port *ap);
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap);
+#else
+static inline int ata_sas_allocate_tag(struct ata_port *ap)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) { }
+#endif
+
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
extern unsigned int ata_acpi_gtf_filter;
@@ -112,6 +128,8 @@ static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
#endif
/* libata-scsi.c */
+extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
+ const struct scsi_device *scsidev);
extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
@@ -128,9 +146,10 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_bus_probe(struct ata_port *ap);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
-int ata_sas_allocate_tag(struct ata_port *ap);
-void ata_sas_free_tag(unsigned int tag, struct ata_port *ap);
-
+void ata_scsi_sdev_config(struct scsi_device *sdev);
+int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd);
+int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index a6b76cc12a66..e517bd8822a5 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -145,7 +145,7 @@ enum {
/* PORT_IDMA_CTL bits */
IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */
- IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */
+ IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinery */
IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index c451d7d1c817..8729f78cef5f 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -157,7 +157,6 @@ static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
static void pdc_error_handler(struct ata_port *ap);
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
static int pdc_pata_cable_detect(struct ata_port *ap);
-static int pdc_sata_cable_detect(struct ata_port *ap);
static struct scsi_host_template pdc_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
@@ -183,7 +182,7 @@ static const struct ata_port_operations pdc_common_ops = {
static struct ata_port_operations pdc_sata_ops = {
.inherits = &pdc_common_ops,
- .cable_detect = pdc_sata_cable_detect,
+ .cable_detect = ata_cable_sata,
.freeze = pdc_sata_freeze,
.thaw = pdc_sata_thaw,
.scr_read = pdc_sata_scr_read,
@@ -459,11 +458,6 @@ static int pdc_pata_cable_detect(struct ata_port *ap)
return ATA_CBL_PATA80;
}
-static int pdc_sata_cable_detect(struct ata_port *ap)
-{
- return ATA_CBL_SATA;
-}
-
static int pdc_sata_scr_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
diff --git a/drivers/atm/.gitignore b/drivers/atm/.gitignore
index fc0ae5eb05d8..ddd374e91965 100644
--- a/drivers/atm/.gitignore
+++ b/drivers/atm/.gitignore
@@ -1,4 +1,4 @@
-# Ignore generated files
+# SPDX-License-Identifier: GPL-2.0-only
fore200e_mkfirm
fore200e_pca_fw.c
pca200e.bin
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index c0da3820454b..d58278ae9e4a 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -22,8 +22,6 @@
#include "charlcd.h"
-#define LCD_MINOR 156
-
#define DEFAULT_LCD_BWIDTH 40
#define DEFAULT_LCD_HWIDTH 64
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 85965953683e..99980aa3644b 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -57,8 +57,6 @@
#include "charlcd.h"
-#define KEYPAD_MINOR 185
-
#define LCD_MAXBYTES 256 /* max burst write */
#define KEYPAD_BUFFER 64
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 6119e11a9f95..4d0a0038b476 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -21,6 +21,10 @@
#include <linux/sched.h>
#include <linux/smp.h>
+__weak bool arch_freq_counters_available(struct cpumask *cpus)
+{
+ return false;
+}
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
@@ -29,6 +33,14 @@ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
unsigned long scale;
int i;
+ /*
+ * If the use of counters for FIE is enabled, just return as we don't
+ * want to update the scale factor with information from CPUFREQ.
+ * Instead the scale factor will be updated from arch_scale_freq_tick.
+ */
+ if (arch_freq_counters_available(cpus))
+ return;
+
scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
for_each_cpu(i, cpus)
@@ -94,7 +106,7 @@ static void update_topology_flags_workfn(struct work_struct *work)
update_topology = 0;
}
-static u32 capacity_scale;
+static DEFINE_PER_CPU(u32, freq_factor) = 1;
static u32 *raw_capacity;
static int free_raw_capacity(void)
@@ -108,17 +120,23 @@ static int free_raw_capacity(void)
void topology_normalize_cpu_scale(void)
{
u64 capacity;
+ u64 capacity_scale;
int cpu;
if (!raw_capacity)
return;
- pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+ capacity_scale = 1;
for_each_possible_cpu(cpu) {
- pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
- cpu, raw_capacity[cpu]);
- capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
- / capacity_scale;
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity_scale = max(capacity, capacity_scale);
+ }
+
+ pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
+ for_each_possible_cpu(cpu) {
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
+ capacity_scale);
topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, topology_get_cpu_scale(cpu));
@@ -127,6 +145,7 @@ void topology_normalize_cpu_scale(void)
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
+ struct clk *cpu_clk;
static bool cap_parsing_failed;
int ret;
u32 cpu_capacity;
@@ -146,10 +165,22 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return false;
}
}
- capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
cpu_node, raw_capacity[cpu]);
+
+ /*
+ * Update freq_factor for calculating early boot cpu capacities.
+ * For non-clk CPU DVFS mechanism, there's no way to get the
+ * frequency value now, assuming they are running at the same
+ * frequency (by keeping the initial freq_factor value).
+ */
+ cpu_clk = of_clk_get(cpu_node, 0);
+ if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ per_cpu(freq_factor, cpu) =
+ clk_get_rate(cpu_clk) / 1000;
+ clk_put(cpu_clk);
+ }
} else {
if (raw_capacity) {
pr_err("cpu_capacity: missing %pOF raw capacity\n",
@@ -188,11 +219,8 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
- policy->cpuinfo.max_freq / 1000UL;
- capacity_scale = max(raw_capacity[cpu], capacity_scale);
- }
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale();
@@ -281,7 +309,7 @@ static int __init get_cpu_for_node(struct device_node *node)
static int __init parse_core(struct device_node *core, int package_id,
int core_id)
{
- char name[10];
+ char name[20];
bool leaf = true;
int i = 0;
int cpu;
@@ -327,7 +355,7 @@ static int __init parse_core(struct device_node *core, int package_id,
static int __init parse_cluster(struct device_node *cluster, int depth)
{
- char name[10];
+ char name[20];
bool leaf = true;
bool has_cores = false;
struct device_node *c;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c7879f5ae2fb..dcfbe7251dc4 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -256,7 +256,8 @@ static int try_to_bring_up_master(struct master *master,
ret = master->ops->bind(master->dev);
if (ret < 0) {
devres_release_group(master->dev, NULL);
- dev_info(master->dev, "master bind failed: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_info(master->dev, "master bind failed: %d\n", ret);
return ret;
}
@@ -528,7 +529,8 @@ static void component_unbind(struct component *component,
{
WARN_ON(!component->bound);
- component->ops->unbind(component->dev, master->dev, data);
+ if (component->ops && component->ops->unbind)
+ component->ops->unbind(component->dev, master->dev, data);
component->bound = false;
/* Release all resources claimed in the binding of this component */
@@ -610,8 +612,9 @@ static int component_bind(struct component *component, struct master *master,
devres_release_group(component->dev, NULL);
devres_release_group(master->dev, NULL);
- dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
- dev_name(component->dev), component->ops, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+ dev_name(component->dev), component->ops, ret);
}
return ret;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index dbb0f9130f42..0cad34f1eede 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -64,12 +64,12 @@ static inline void device_links_write_unlock(void)
mutex_unlock(&device_links_lock);
}
-int device_links_read_lock(void)
+int device_links_read_lock(void) __acquires(&device_links_srcu)
{
return srcu_read_lock(&device_links_srcu);
}
-void device_links_read_unlock(int idx)
+void device_links_read_unlock(int idx) __releases(&device_links_srcu)
{
srcu_read_unlock(&device_links_srcu, idx);
}
@@ -365,6 +365,7 @@ struct device_link *device_link_add(struct device *consumer,
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
+ link->flags |= DL_FLAG_STATELESS;
goto out;
}
}
@@ -433,12 +434,16 @@ struct device_link *device_link_add(struct device *consumer,
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
if (flags & DL_FLAG_SYNC_STATE_ONLY) {
dev_dbg(consumer,
"Linked as a sync state only consumer to %s\n",
dev_name(supplier));
goto out;
}
+
reorder:
/*
* Move the consumer and all of the devices depending on it to the end
@@ -449,12 +454,9 @@ reorder:
*/
device_reorder_to_tail(consumer, NULL);
- list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
- list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
-
dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
- out:
+out:
device_pm_unlock();
device_links_write_unlock();
@@ -523,9 +525,13 @@ static void device_link_add_missing_supplier_links(void)
mutex_lock(&wfs_lock);
list_for_each_entry_safe(dev, tmp, &wait_for_suppliers,
- links.needs_suppliers)
- if (!fwnode_call_int_op(dev->fwnode, add_links, dev))
+ links.needs_suppliers) {
+ int ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
+ if (!ret)
list_del_init(&dev->links.needs_suppliers);
+ else if (ret != -ENODEV)
+ dev->links.need_for_probe = false;
+ }
mutex_unlock(&wfs_lock);
}
@@ -825,6 +831,13 @@ static void __device_links_supplier_defer_sync(struct device *sup)
list_add_tail(&sup->links.defer_sync, &deferred_sync);
}
+static void device_link_drop_managed(struct device_link *link)
+{
+ link->flags &= ~DL_FLAG_MANAGED;
+ WRITE_ONCE(link->status, DL_STATE_NONE);
+ kref_put(&link->kref, __device_link_del);
+}
+
/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
@@ -838,7 +851,7 @@ static void __device_links_supplier_defer_sync(struct device *sup)
*/
void device_links_driver_bound(struct device *dev)
{
- struct device_link *link;
+ struct device_link *link, *ln;
LIST_HEAD(sync_list);
/*
@@ -878,18 +891,35 @@ void device_links_driver_bound(struct device *dev)
else
__device_links_queue_sync_state(dev, &sync_list);
- list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
+ struct device *supplier;
+
if (!(link->flags & DL_FLAG_MANAGED))
continue;
- WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
- WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ supplier = link->supplier;
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+ /*
+ * When DL_FLAG_SYNC_STATE_ONLY is set, it means no
+ * other DL_MANAGED_LINK_FLAGS have been set. So, it's
+ * save to drop the managed link completely.
+ */
+ device_link_drop_managed(link);
+ } else {
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+ /*
+ * This needs to be done even for the deleted
+ * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
+ * device link that was preventing the supplier from getting a
+ * sync_state() call.
+ */
if (defer_sync_state_count)
- __device_links_supplier_defer_sync(link->supplier);
+ __device_links_supplier_defer_sync(supplier);
else
- __device_links_queue_sync_state(link->supplier,
- &sync_list);
+ __device_links_queue_sync_state(supplier, &sync_list);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
@@ -899,13 +929,6 @@ void device_links_driver_bound(struct device *dev)
device_links_flush_sync_list(&sync_list, dev);
}
-static void device_link_drop_managed(struct device_link *link)
-{
- link->flags &= ~DL_FLAG_MANAGED;
- WRITE_ONCE(link->status, DL_STATE_NONE);
- kref_put(&link->kref, __device_link_del);
-}
-
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
@@ -2341,6 +2364,36 @@ static int device_private_init(struct device *dev)
return 0;
}
+static u32 fw_devlink_flags;
+static int __init fw_devlink_setup(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "off") == 0) {
+ fw_devlink_flags = 0;
+ } else if (strcmp(arg, "permissive") == 0) {
+ fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+ } else if (strcmp(arg, "on") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ } else if (strcmp(arg, "rpm") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
+ DL_FLAG_PM_RUNTIME;
+ }
+ return 0;
+}
+early_param("fw_devlink", fw_devlink_setup);
+
+u32 fw_devlink_get_flags(void)
+{
+ return fw_devlink_flags;
+}
+
+static bool fw_devlink_is_permissive(void)
+{
+ return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+}
+
/**
* device_add - add device to device hierarchy.
* @dev: device.
@@ -2375,6 +2428,7 @@ int device_add(struct device *dev)
struct class_interface *class_intf;
int error = -EINVAL, fw_ret;
struct kobject *glue_dir = NULL;
+ bool is_fwnode_dev = false;
dev = get_device(dev);
if (!dev)
@@ -2472,8 +2526,10 @@ int device_add(struct device *dev)
kobject_uevent(&dev->kobj, KOBJ_ADD);
- if (dev->fwnode && !dev->fwnode->dev)
+ if (dev->fwnode && !dev->fwnode->dev) {
dev->fwnode->dev = dev;
+ is_fwnode_dev = true;
+ }
/*
* Check if any of the other devices (consumers) have been waiting for
@@ -2489,9 +2545,10 @@ int device_add(struct device *dev)
*/
device_link_add_missing_supplier_links();
- if (fwnode_has_op(dev->fwnode, add_links)) {
+ if (fw_devlink_flags && is_fwnode_dev &&
+ fwnode_has_op(dev->fwnode, add_links)) {
fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
- if (fw_ret == -ENODEV)
+ if (fw_ret == -ENODEV && !fw_devlink_is_permissive())
device_link_wait_for_mandatory_supplier(dev);
else if (fw_ret)
device_link_wait_for_optional_supplier(dev);
@@ -3471,6 +3528,126 @@ out:
}
EXPORT_SYMBOL_GPL(device_move);
+static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ struct kobject *kobj = &dev->kobj;
+ struct class *class = dev->class;
+ const struct device_type *type = dev->type;
+ int error;
+
+ if (class) {
+ /*
+ * Change the device groups of the device class for @dev to
+ * @kuid/@kgid.
+ */
+ error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
+ kgid);
+ if (error)
+ return error;
+ }
+
+ if (type) {
+ /*
+ * Change the device groups of the device type for @dev to
+ * @kuid/@kgid.
+ */
+ error = sysfs_groups_change_owner(kobj, type->groups, kuid,
+ kgid);
+ if (error)
+ return error;
+ }
+
+ /* Change the device groups of @dev to @kuid/@kgid. */
+ error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
+ if (error)
+ return error;
+
+ if (device_supports_offline(dev) && !dev->offline_disabled) {
+ /* Change online device attributes of @dev to @kuid/@kgid. */
+ error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
+ kuid, kgid);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * device_change_owner - change the owner of an existing device.
+ * @dev: device.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * This changes the owner of @dev and its corresponding sysfs entries to
+ * @kuid/@kgid. This function closely mirrors how @dev was added via driver
+ * core.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int error;
+ struct kobject *kobj = &dev->kobj;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ /*
+ * Change the kobject and the default attributes and groups of the
+ * ktype associated with it to @kuid/@kgid.
+ */
+ error = sysfs_change_owner(kobj, kuid, kgid);
+ if (error)
+ goto out;
+
+ /*
+ * Change the uevent file for @dev to the new owner. The uevent file
+ * was created in a separate step when @dev got added and we mirror
+ * that step here.
+ */
+ error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
+ kgid);
+ if (error)
+ goto out;
+
+ /*
+ * Change the device groups, the device groups associated with the
+ * device class, and the groups associated with the device type of @dev
+ * to @kuid/@kgid.
+ */
+ error = device_attrs_change_owner(dev, kuid, kgid);
+ if (error)
+ goto out;
+
+ error = dpm_sysfs_change_owner(dev, kuid, kgid);
+ if (error)
+ goto out;
+
+#ifdef CONFIG_BLOCK
+ if (sysfs_deprecated && dev->class == &block_class)
+ goto out;
+#endif
+
+ /*
+ * Change the owner of the symlink located in the class directory of
+ * the device class associated with @dev which points to the actual
+ * directory entry for @dev to @kuid/@kgid. This ensures that the
+ * symlink shows the same permissions as its target.
+ */
+ error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
+ dev_name(dev), kuid, kgid);
+ if (error)
+ goto out;
+
+out:
+ put_device(dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_change_owner);
+
/**
* device_shutdown - call ->shutdown() on each device to shutdown.
*/
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 6265871a4af2..9a1c00fbbaef 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -55,7 +55,7 @@ static int cpu_subsys_online(struct device *dev)
if (from_nid == NUMA_NO_NODE)
return -ENODEV;
- ret = cpu_up(cpuid);
+ ret = cpu_device_up(dev);
/*
* When hot adding memory to memoryless node and enabling a cpu
* on the node, node number of the cpu may internally change.
@@ -69,7 +69,7 @@ static int cpu_subsys_online(struct device *dev)
static int cpu_subsys_offline(struct device *dev)
{
- return cpu_down(dev->id);
+ return cpu_device_down(dev);
}
void unregister_cpu(struct cpu *cpu)
@@ -231,8 +231,7 @@ static struct cpu_attr cpu_attrs[] = {
static ssize_t print_cpus_kernel_max(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
- return n;
+ return sprintf(buf, "%d\n", NR_CPUS - 1);
}
static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
@@ -258,13 +257,13 @@ static ssize_t print_cpus_offline(struct device *dev,
buf[n++] = ',';
if (nr_cpu_ids == total_cpus-1)
- n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
+ n += scnprintf(&buf[n], len - n, "%u", nr_cpu_ids);
else
- n += snprintf(&buf[n], len - n, "%u-%d",
+ n += scnprintf(&buf[n], len - n, "%u-%d",
nr_cpu_ids, total_cpus-1);
}
- n += snprintf(&buf[n], len - n, "\n");
+ n += scnprintf(&buf[n], len - n, "\n");
return n;
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
@@ -272,7 +271,7 @@ static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = 0, len = PAGE_SIZE-2;
+ int n;
cpumask_var_t isolated;
if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
@@ -280,7 +279,7 @@ static ssize_t print_cpus_isolated(struct device *dev,
cpumask_andnot(isolated, cpu_possible_mask,
housekeeping_cpumask(HK_FLAG_DOMAIN));
- n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));
+ n = sprintf(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
@@ -292,11 +291,7 @@ static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
static ssize_t print_cpus_nohz_full(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = 0, len = PAGE_SIZE-2;
-
- n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
-
- return n;
+ return sprintf(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
}
static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
#endif
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index b25bcab2a26b..94037be7f5d7 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -224,76 +224,44 @@ static int deferred_devs_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
-static int deferred_probe_timeout = -1;
+int driver_deferred_probe_timeout;
+EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
+static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
+
static int __init deferred_probe_timeout_setup(char *str)
{
int timeout;
if (!kstrtoint(str, 10, &timeout))
- deferred_probe_timeout = timeout;
+ driver_deferred_probe_timeout = timeout;
return 1;
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
-static int __driver_deferred_probe_check_state(struct device *dev)
-{
- if (!initcalls_done)
- return -EPROBE_DEFER;
-
- if (!deferred_probe_timeout) {
- dev_WARN(dev, "deferred probe timeout, ignoring dependency");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
/**
* driver_deferred_probe_check_state() - Check deferred probe state
* @dev: device to check
*
- * Returns -ENODEV if init is done and all built-in drivers have had a chance
- * to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug
- * timeout has expired, or -EPROBE_DEFER if none of those conditions are met.
+ * Return:
+ * -ENODEV if initcalls have completed and modules are disabled.
+ * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * -EPROBE_DEFER in other cases.
*
* Drivers or subsystems can opt-in to calling this function instead of directly
* returning -EPROBE_DEFER.
*/
int driver_deferred_probe_check_state(struct device *dev)
{
- int ret;
-
- ret = __driver_deferred_probe_check_state(dev);
- if (ret < 0)
- return ret;
-
- dev_warn(dev, "ignoring dependency for device, assuming no driver");
-
- return -ENODEV;
-}
-
-/**
- * driver_deferred_probe_check_state_continue() - check deferred probe state
- * @dev: device to check
- *
- * Returns -ETIMEDOUT if deferred probe debug timeout has expired, or
- * -EPROBE_DEFER otherwise.
- *
- * Drivers or subsystems can opt-in to calling this function instead of
- * directly returning -EPROBE_DEFER.
- *
- * This is similar to driver_deferred_probe_check_state(), but it allows the
- * subsystem to keep deferring probe after built-in drivers have had a chance
- * to probe. One scenario where that is useful is if built-in drivers rely on
- * resources that are provided by modular drivers.
- */
-int driver_deferred_probe_check_state_continue(struct device *dev)
-{
- int ret;
+ if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
+ dev_warn(dev, "ignoring dependency for device, assuming no driver");
+ return -ENODEV;
+ }
- ret = __driver_deferred_probe_check_state(dev);
- if (ret < 0)
- return ret;
+ if (!driver_deferred_probe_timeout && initcalls_done) {
+ dev_warn(dev, "deferred probe timeout, ignoring dependency");
+ return -ETIMEDOUT;
+ }
return -EPROBE_DEFER;
}
@@ -302,12 +270,13 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *private, *p;
- deferred_probe_timeout = 0;
+ driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
dev_info(private->device, "deferred probe pending");
+ wake_up(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -336,9 +305,9 @@ static int deferred_probe_initcall(void)
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
- if (deferred_probe_timeout > 0) {
+ if (driver_deferred_probe_timeout > 0) {
schedule_delayed_work(&deferred_probe_timeout_work,
- deferred_probe_timeout * HZ);
+ driver_deferred_probe_timeout * HZ);
}
return 0;
}
@@ -668,9 +637,10 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
*/
int driver_probe_done(void)
{
- pr_debug("%s: probe_count = %d\n", __func__,
- atomic_read(&probe_count));
- if (atomic_read(&probe_count))
+ int local_probe_count = atomic_read(&probe_count);
+
+ pr_debug("%s: probe_count = %d\n", __func__, local_probe_count);
+ if (local_probe_count)
return -EBUSY;
return 0;
}
@@ -681,6 +651,9 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
+ /* wait for probe timeout */
+ wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
+
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
@@ -1222,7 +1195,7 @@ void driver_detach(struct device_driver *drv)
spin_unlock(&drv->p->klist_devices.k_lock);
break;
}
- dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
+ dev_prv = list_last_entry(&drv->p->klist_devices.k_list,
struct device_private,
knode_driver.n_node);
dev = dev_prv->device;
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
index 0b2dfa6259c9..e87843408fe6 100644
--- a/drivers/base/firmware_loader/Makefile
+++ b/drivers/base/firmware_loader/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
firmware_class-objs := main.o
firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
+firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o
obj-y += builtin/
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 8704e1bae175..1e9c96e3ed63 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -525,7 +525,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
}
retval = fw_sysfs_wait_timeout(fw_priv, timeout);
- if (retval < 0) {
+ if (retval < 0 && retval != -ENOENT) {
mutex_lock(&fw_lock);
fw_load_abort(fw_sysfs);
mutex_unlock(&fw_lock);
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 21063503e4ea..06f4577733a8 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -66,4 +66,14 @@ static inline void unregister_sysfs_loader(void)
}
#endif /* CONFIG_FW_LOADER_USER_HELPER */
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags);
+#else
+static inline int firmware_fallback_platform(struct fw_priv *fw_priv,
+ enum fw_opt opt_flags)
+{
+ return -ENOENT;
+}
+#endif
+
#endif /* __FIRMWARE_FALLBACK_H */
diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c
new file mode 100644
index 000000000000..c88c745590fe
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback_platform.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi_embedded_fw.h>
+#include <linux/property.h>
+#include <linux/security.h>
+#include <linux/vmalloc.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags)
+{
+ const u8 *data;
+ size_t size;
+ int rc;
+
+ if (!(opt_flags & FW_OPT_FALLBACK_PLATFORM))
+ return -ENOENT;
+
+ rc = security_kernel_load_data(LOADING_FIRMWARE_EFI_EMBEDDED);
+ if (rc)
+ return rc;
+
+ rc = efi_get_embedded_fw(fw_priv->fw_name, &data, &size);
+ if (rc)
+ return rc; /* rc == -ENOENT when the fw was not found */
+
+ fw_priv->data = vmalloc(size);
+ if (!fw_priv->data)
+ return -ENOMEM;
+
+ memcpy(fw_priv->data, data, size);
+ fw_priv->size = size;
+ fw_state_done(fw_priv);
+ return 0;
+}
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index ba9d30b28edc..a182e318bd09 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -45,5 +45,4 @@ struct ctl_table firmware_config_table[] = {
},
{ }
};
-EXPORT_SYMBOL_GPL(firmware_config_table);
#endif
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 8656e5239a80..25836a6afc9f 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -29,6 +29,9 @@
* firmware caching mechanism.
* @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes
* precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
+ * @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in
+ * the platform's main firmware. If both this fallback and the sysfs
+ * fallback are enabled, then this fallback will be tried first.
*/
enum fw_opt {
FW_OPT_UEVENT = BIT(0),
@@ -37,6 +40,7 @@ enum fw_opt {
FW_OPT_NO_WARN = BIT(3),
FW_OPT_NOCACHE = BIT(4),
FW_OPT_NOFALLBACK_SYSFS = BIT(5),
+ FW_OPT_FALLBACK_PLATFORM = BIT(6),
};
enum fw_status {
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 57133a9dad09..76f79913916d 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -493,8 +493,10 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
}
fw_priv->size = 0;
- rc = kernel_read_file_from_path(path, &buffer, &size,
- msize, id);
+
+ /* load firmware files from the mount namespace of init */
+ rc = kernel_read_file_from_path_initns(path, &buffer,
+ &size, msize, id);
if (rc) {
if (rc != -ENOENT)
dev_warn(device, "loading %s failed with error %d\n",
@@ -776,6 +778,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
fw_decompress_xz);
#endif
+ if (ret == -ENOENT)
+ ret = firmware_fallback_platform(fw->priv, opt_flags);
+
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
dev_warn(device,
@@ -884,6 +889,30 @@ int request_firmware_direct(const struct firmware **firmware_p,
EXPORT_SYMBOL_GPL(request_firmware_direct);
/**
+ * firmware_request_platform() - request firmware with platform-fw fallback
+ * @firmware: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function is similar in behaviour to request_firmware, except that if
+ * direct filesystem lookup fails, it will fallback to looking for a copy of the
+ * requested firmware embedded in the platform's main (e.g. UEFI) firmware.
+ **/
+int firmware_request_platform(const struct firmware **firmware,
+ const char *name, struct device *device)
+{
+ int ret;
+
+ /* Need to pin this module until return */
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware, name, device, NULL, 0,
+ FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(firmware_request_platform);
+
+/**
* firmware_request_cache() - cache firmware for suspend so resume can use it
* @name: name of firmware file
* @device: device for which firmware should be cached for
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4086718f6876..dbec3a05590a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -27,6 +27,24 @@
#define MEMORY_CLASS_NAME "memory"
+static const char *const online_type_to_str[] = {
+ [MMOP_OFFLINE] = "offline",
+ [MMOP_ONLINE] = "online",
+ [MMOP_ONLINE_KERNEL] = "online_kernel",
+ [MMOP_ONLINE_MOVABLE] = "online_movable",
+};
+
+int memhp_online_type_from_str(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
+ if (sysfs_streq(str, online_type_to_str[i]))
+ return i;
+ }
+ return -EINVAL;
+}
+
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
static int sections_per_block;
@@ -145,45 +163,6 @@ int memory_notify(unsigned long val, void *v)
}
/*
- * The probe routines leave the pages uninitialized, just as the bootmem code
- * does. Make sure we do not access them, but instead use only information from
- * within sections.
- */
-static bool pages_correctly_probed(unsigned long start_pfn)
-{
- unsigned long section_nr = pfn_to_section_nr(start_pfn);
- unsigned long section_nr_end = section_nr + sections_per_block;
- unsigned long pfn = start_pfn;
-
- /*
- * memmap between sections is not contiguous except with
- * SPARSEMEM_VMEMMAP. We lookup the page once per section
- * and assume memmap is contiguous within each section
- */
- for (; section_nr < section_nr_end; section_nr++) {
- if (WARN_ON_ONCE(!pfn_valid(pfn)))
- return false;
-
- if (!present_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) not present\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- } else if (!valid_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- } else if (online_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) is already online\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- }
- pfn += PAGES_PER_SECTION;
- }
-
- return true;
-}
-
-/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
*/
@@ -199,9 +178,6 @@ memory_block_action(unsigned long start_section_nr, unsigned long action,
switch (action) {
case MEM_ONLINE:
- if (!pages_correctly_probed(start_pfn))
- return -EBUSY;
-
ret = online_pages(start_pfn, nr_pages, online_type, nid);
break;
case MEM_OFFLINE:
@@ -245,17 +221,14 @@ static int memory_subsys_online(struct device *dev)
return 0;
/*
- * If we are called from state_store(), online_type will be
- * set >= 0 Otherwise we were called from the device online
- * attribute and need to set the online_type.
+ * When called via device_online() without configuring the online_type,
+ * we want to default to MMOP_ONLINE.
*/
- if (mem->online_type < 0)
- mem->online_type = MMOP_ONLINE_KEEP;
+ if (mem->online_type == MMOP_OFFLINE)
+ mem->online_type = MMOP_ONLINE;
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
-
- /* clear online_type */
- mem->online_type = -1;
+ mem->online_type = MMOP_OFFLINE;
return ret;
}
@@ -267,40 +240,27 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
- /* Can't offline block with non-present sections */
- if (mem->section_count != sections_per_block)
- return -EINVAL;
-
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ const int online_type = memhp_online_type_from_str(buf);
struct memory_block *mem = to_memory_block(dev);
- int ret, online_type;
+ int ret;
+
+ if (online_type < 0)
+ return -EINVAL;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
- if (sysfs_streq(buf, "online_kernel"))
- online_type = MMOP_ONLINE_KERNEL;
- else if (sysfs_streq(buf, "online_movable"))
- online_type = MMOP_ONLINE_MOVABLE;
- else if (sysfs_streq(buf, "online"))
- online_type = MMOP_ONLINE_KEEP;
- else if (sysfs_streq(buf, "offline"))
- online_type = MMOP_OFFLINE;
- else {
- ret = -EINVAL;
- goto err;
- }
-
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
- case MMOP_ONLINE_KEEP:
+ case MMOP_ONLINE:
/* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev);
@@ -312,7 +272,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
ret = -EINVAL; /* should never happen */
}
-err:
unlock_device_hotplug();
if (ret < 0)
@@ -380,7 +339,8 @@ static ssize_t valid_zones_show(struct device *dev,
}
nid = mem->nid;
- default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
+ nr_pages);
strcat(buf, default_zone->name);
print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
@@ -418,23 +378,20 @@ static DEVICE_ATTR_RO(block_size_bytes);
static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (memhp_auto_online)
- return sprintf(buf, "online\n");
- else
- return sprintf(buf, "offline\n");
+ return sprintf(buf, "%s\n",
+ online_type_to_str[memhp_default_online_type]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- if (sysfs_streq(buf, "online"))
- memhp_auto_online = true;
- else if (sysfs_streq(buf, "offline"))
- memhp_auto_online = false;
- else
+ const int online_type = memhp_online_type_from_str(buf);
+
+ if (online_type < 0)
return -EINVAL;
+ memhp_default_online_type = online_type;
return count;
}
@@ -627,7 +584,7 @@ static int init_memory_block(struct memory_block **memory,
static int add_memory_block(unsigned long base_section_nr)
{
- int ret, section_count = 0;
+ int section_count = 0;
struct memory_block *mem;
unsigned long nr;
@@ -638,12 +595,8 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
- ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
- MEM_ONLINE);
- if (ret)
- return ret;
- mem->section_count = section_count;
- return 0;
+ return init_memory_block(&mem, base_memory_block_id(base_section_nr),
+ MEM_ONLINE);
}
static void unregister_memory(struct memory_block *memory)
@@ -679,7 +632,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
break;
- mem->section_count = sections_per_block;
}
if (ret) {
end_block_id = block_id;
@@ -688,7 +640,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- mem->section_count = 0;
unregister_memory(mem);
}
}
@@ -717,7 +668,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- mem->section_count = 0;
unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 98a31bafc8a2..10d7e818e118 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -772,7 +772,7 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
* memory block could have several absent sections from start.
* skip pfn range from absent section
*/
- if (!pfn_present(pfn)) {
+ if (!pfn_in_present_section(pfn)) {
pfn = round_down(pfn + PAGES_PER_SECTION,
PAGES_PER_SECTION) - 1;
continue;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b5ce7b085795..b27d0f6c18c9 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -63,6 +63,28 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
#ifdef CONFIG_HAS_IOMEM
/**
+ * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
+ * platform device and get resource
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @index: resource index
+ * @res: optional output parameter to store a pointer to the obtained resource.
+ */
+void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res)
+{
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res)
+ *res = r;
+ return devm_ioremap_resource(&pdev->dev, r);
+}
+EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
+
+/**
* devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
* device
*
@@ -73,10 +95,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, index);
- return devm_ioremap_resource(&pdev->dev, res);
+ return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
}
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
@@ -361,6 +380,8 @@ struct platform_object {
*/
static void setup_pdev_dma_masks(struct platform_device *pdev)
{
+ pdev->dev.dma_parms = &pdev->dma_parms;
+
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (!pdev->dev.dma_mask) {
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 959d6d5eb000..0a01df608849 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2653,7 +2653,7 @@ static int genpd_iterate_idle_states(struct device_node *dn,
ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
if (ret <= 0)
- return ret;
+ return ret == -ENOENT ? 0 : ret;
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0e99a760aebd..0e07e17c2def 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,6 +40,10 @@
typedef int (*pm_callback_t)(struct device *);
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ device_links_read_lock_held())
+
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@@ -726,7 +730,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
if (is_async(dev)) {
get_device(dev);
- async_schedule(func, dev);
+ async_schedule_dev(func, dev);
return true;
}
@@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -1918,10 +1922,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(!pm_runtime_enabled(dev) &&
- dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED));
-
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1969,8 +1969,7 @@ unlock:
*/
spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
- ((pm_runtime_suspended(dev) && ret > 0) ||
- dev->power.no_pm_callbacks) &&
+ (ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
spin_unlock_irq(&dev->power.lock);
return 0;
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 444f5c169a0b..54292cdd7808 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -74,6 +74,7 @@ extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
+extern int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
#else /* CONFIG_PM */
@@ -88,6 +89,8 @@ static inline void pm_runtime_remove(struct device *dev) {}
static inline int dpm_sysfs_add(struct device *dev) { return 0; }
static inline void dpm_sysfs_remove(struct device *dev) {}
+static inline int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid) { return 0; }
#endif
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 16134a69bf6f..99c7da112c95 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1087,29 +1087,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
+ * pm_runtime_get_if_active - Conditionally bump up the device's usage counter.
* @dev: Device to handle.
*
* Return -EINVAL if runtime PM is disabled for the device.
*
- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
- * and the runtime PM usage counter is nonzero, increment the counter and
- * return 1. Otherwise return 0 without changing the counter.
+ * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either
+ * ign_usage_count is true or the device's usage_count is non-zero, increment
+ * the counter and return 1. Otherwise return 0 without changing the counter.
+ *
+ * If ign_usage_count is true, the function can be used to prevent suspending
+ * the device when its runtime PM status is RPM_ACTIVE.
+ *
+ * If ign_usage_count is false, the function can be used to prevent suspending
+ * the device when both its runtime PM status is RPM_ACTIVE and its usage_count
+ * is non-zero.
+ *
+ * The caller is resposible for putting the device's usage count when ther
+ * return value is greater than zero.
*/
-int pm_runtime_get_if_in_use(struct device *dev)
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
- retval = dev->power.disable_depth > 0 ? -EINVAL :
- dev->power.runtime_status == RPM_ACTIVE
- && atomic_inc_not_zero(&dev->power.usage_count);
+ if (dev->power.disable_depth > 0) {
+ retval = -EINVAL;
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
+ retval = 0;
+ } else if (ign_usage_count) {
+ retval = 1;
+ atomic_inc(&dev->power.usage_count);
+ } else {
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
+ }
trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
+
return retval;
}
-EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d7d82db2e4bc..2b99fe1eb207 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -480,6 +480,14 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
@@ -501,7 +509,13 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
-#endif /* CONFIG_PM_SLEEP */
+#else /* CONFIG_PM_SLEEP */
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+#endif
#ifdef CONFIG_PM_ADVANCED_DEBUG
static ssize_t runtime_usage_show(struct device *dev,
@@ -684,6 +698,45 @@ int dpm_sysfs_add(struct device *dev)
return rc;
}
+int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int rc;
+
+ if (device_pm_not_required(dev))
+ return 0;
+
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+
+ if (pm_runtime_callbacks_present(dev)) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
+ kuid, kgid);
+ if (rc)
+ return rc;
+
+ rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
+ kgid);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
int wakeup_sysfs_add(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 27f3e60608e5..92073ac68473 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@ suspend_state_t pm_suspend_target_state;
#define pm_suspend_target_state (PM_SUSPEND_ON)
#endif
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ srcu_read_lock_held(&wakeup_srcu))
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -241,7 +244,9 @@ void wakeup_source_unregister(struct wakeup_source *ws)
{
if (ws) {
wakeup_source_remove(ws);
- wakeup_source_sysfs_remove(ws);
+ if (ws->dev)
+ wakeup_source_sysfs_remove(ws);
+
wakeup_source_destroy(ws);
}
}
@@ -405,7 +410,7 @@ void device_wakeup_arm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -421,7 +426,7 @@ void device_wakeup_disarm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -874,7 +879,7 @@ void pm_print_active_wakeup_sources(void)
struct wakeup_source *last_activity_ws = NULL;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (ws->active) {
pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
@@ -1025,7 +1030,7 @@ void pm_wakep_autosleep_enabled(bool set)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
ws->autosleep_enabled = set;
@@ -1104,7 +1109,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m,
}
*srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (n-- <= 0)
return ws;
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 511f6d7acdfe..5f35c0ccf5e0 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -566,6 +566,7 @@ const char *fwnode_get_name(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_name);
}
+EXPORT_SYMBOL_GPL(fwnode_get_name);
/**
* fwnode_get_name_prefix - Return the prefix of node for printing purposes
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a53cc1e3a2d3..795facd8cf19 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -6,6 +6,9 @@
# Rewritten to use lists instead of if-statements.
#
+# needed for trace events
+ccflags-y += -I$(src)
+
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
@@ -39,6 +42,9 @@ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
null_blk-objs := null_blk_main.o
+ifeq ($(CONFIG_BLK_DEV_ZONED), y)
+null_blk-$(CONFIG_TRACING) += null_blk_trace.o
+endif
null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o
skd-y := skd_main.o
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 7b32fb673375..a27804d71e12 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -87,9 +87,9 @@ static ssize_t aoedisk_show_netif(struct device *dev,
if (*nd == NULL)
return snprintf(page, PAGE_SIZE, "none\n");
for (p = page; nd < ne; nd++)
- p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
+ p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
p == page ? "" : ",", (*nd)->name);
- p += snprintf(p, PAGE_SIZE - (p-page), "\n");
+ p += scnprintf(p, PAGE_SIZE - (p-page), "\n");
return p-page;
}
/* firmware version */
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 220c5e18aba0..2fb25c348d53 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -381,12 +381,10 @@ static struct brd_device *brd_alloc(int i)
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
- brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
+ brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE);
if (!brd->brd_queue)
goto out_free_dev;
- blk_queue_make_request(brd->brd_queue, brd_make_request);
-
/* This is so fdisk will align partitions on 4k, because of
* direct_access API needing 4k alignment, returning a PFN
* (This is only a problem on very small devices <= 4M,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a18155cdce41..c094c3c2c5d4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2801,7 +2801,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device);
- q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+ q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE);
if (!q)
goto out_no_q;
device->rq_queue = q;
@@ -2828,7 +2828,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
q->backing_dev_info->congested_fn = drbd_congested;
q->backing_dev_info->congested_data = device;
- blk_queue_make_request(q, drbd_make_request);
blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
@@ -3414,22 +3413,11 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
* the meta-data super block. This function sets MD_DIRTY, and starts a
* timer that ensures that within five seconds you have to call drbd_md_sync().
*/
-#ifdef DEBUG
-void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
-{
- if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
- mod_timer(&device->md_sync_timer, jiffies + HZ);
- device->last_md_mark_dirty.line = line;
- device->last_md_mark_dirty.func = func;
- }
-}
-#else
void drbd_md_mark_dirty(struct drbd_device *device)
{
if (!test_and_set_bit(MD_DIRTY, &device->flags))
mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
}
-#endif
void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
{
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 79e216446030..c15e7083b13a 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -33,6 +33,7 @@
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
+#include <linux/part_stat.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b7f605c6e231..0dc019da1f8d 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -22,6 +22,7 @@
#include <linux/random.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
+#include <linux/part_stat.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8ef65c085640..c3daa64cb52c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -171,7 +171,6 @@ static int print_unex = 1;
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#define FDPATCHES
#include <linux/fdreg.h>
#include <linux/fd.h>
#include <linux/hdreg.h>
@@ -306,36 +305,26 @@ static bool initialized;
/* reverse mapping from unit and fdc to drive */
#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
-#define DP (&drive_params[current_drive])
-#define DRS (&drive_state[current_drive])
-#define DRWE (&write_errors[current_drive])
-#define FDCS (&fdc_state[fdc])
-
-#define UDP (&drive_params[drive])
-#define UDRS (&drive_state[drive])
-#define UDRWE (&write_errors[drive])
-#define UFDCS (&fdc_state[FDC(drive)])
-
#define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2)
#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
-/* read/write */
-#define COMMAND (raw_cmd->cmd[0])
-#define DR_SELECT (raw_cmd->cmd[1])
-#define TRACK (raw_cmd->cmd[2])
-#define HEAD (raw_cmd->cmd[3])
-#define SECTOR (raw_cmd->cmd[4])
-#define SIZECODE (raw_cmd->cmd[5])
-#define SECT_PER_TRACK (raw_cmd->cmd[6])
-#define GAP (raw_cmd->cmd[7])
-#define SIZECODE2 (raw_cmd->cmd[8])
+/* read/write commands */
+#define COMMAND 0
+#define DR_SELECT 1
+#define TRACK 2
+#define HEAD 3
+#define SECTOR 4
+#define SIZECODE 5
+#define SECT_PER_TRACK 6
+#define GAP 7
+#define SIZECODE2 8
#define NR_RW 9
-/* format */
-#define F_SIZECODE (raw_cmd->cmd[2])
-#define F_SECT_PER_TRACK (raw_cmd->cmd[3])
-#define F_GAP (raw_cmd->cmd[4])
-#define F_FILL (raw_cmd->cmd[5])
+/* format commands */
+#define F_SIZECODE 2
+#define F_SECT_PER_TRACK 3
+#define F_GAP 4
+#define F_FILL 5
#define NR_F 6
/*
@@ -351,14 +340,14 @@ static bool initialized;
#define MAX_REPLIES 16
static unsigned char reply_buffer[MAX_REPLIES];
static int inr; /* size of reply buffer, when called from interrupt */
-#define ST0 (reply_buffer[0])
-#define ST1 (reply_buffer[1])
-#define ST2 (reply_buffer[2])
-#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
-#define R_TRACK (reply_buffer[3])
-#define R_HEAD (reply_buffer[4])
-#define R_SECTOR (reply_buffer[5])
-#define R_SIZECODE (reply_buffer[6])
+#define ST0 0
+#define ST1 1
+#define ST2 2
+#define ST3 0 /* result of GETSTATUS */
+#define R_TRACK 3
+#define R_HEAD 4
+#define R_SECTOR 5
+#define R_SIZECODE 6
#define SEL_DLY (2 * HZ / 100)
@@ -593,7 +582,7 @@ static int buffer_max = -1;
/* fdc related variables, should end up in a struct */
static struct floppy_fdc_state fdc_state[N_FDC];
-static int fdc; /* current fdc */
+static int current_fdc; /* current fdc */
static struct workqueue_struct *floppy_wq;
@@ -604,9 +593,19 @@ static unsigned char fsector_t; /* sector in track */
static unsigned char in_sector_offset; /* offset within physical sector,
* expressed in units of 512 bytes */
+static inline unsigned char fdc_inb(int fdc, int reg)
+{
+ return fd_inb(fdc_state[fdc].address + reg);
+}
+
+static inline void fdc_outb(unsigned char value, int fdc, int reg)
+{
+ fd_outb(value, fdc_state[fdc].address + reg);
+}
+
static inline bool drive_no_geom(int drive)
{
- return !current_type[drive] && !ITYPE(UDRS->fd_device);
+ return !current_type[drive] && !ITYPE(drive_state[drive].fd_device);
}
#ifndef fd_eject
@@ -630,7 +629,7 @@ static inline void set_debugt(void)
static inline void debugt(const char *func, const char *msg)
{
- if (DP->flags & DEBUGT)
+ if (drive_params[current_drive].flags & DEBUGT)
pr_info("%s:%s dtime=%lu\n", func, msg, jiffies - debugtimer);
}
#else
@@ -683,10 +682,10 @@ static void __reschedule_timeout(int drive, const char *message)
delay = 20UL * HZ;
drive = 0;
} else
- delay = UDP->timeout;
+ delay = drive_params[drive].timeout;
mod_delayed_work(floppy_wq, &fd_timeout, delay);
- if (UDP->flags & FD_DEBUG)
+ if (drive_params[drive].flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
}
@@ -740,33 +739,37 @@ static int disk_change(int drive)
{
int fdc = FDC(drive);
- if (time_before(jiffies, UDRS->select_date + UDP->select_delay))
+ if (time_before(jiffies, drive_state[drive].select_date + drive_params[drive].select_delay))
DPRINT("WARNING disk change called early\n");
- if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
- (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
+ if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive))) ||
+ (fdc_state[fdc].dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
DPRINT("probing disk change on unselected drive\n");
DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive),
- (unsigned int)FDCS->dor);
+ (unsigned int)fdc_state[fdc].dor);
}
- debug_dcl(UDP->flags,
+ debug_dcl(drive_params[drive].flags,
"checking disk change line for drive %d\n", drive);
- debug_dcl(UDP->flags, "jiffies=%lu\n", jiffies);
- debug_dcl(UDP->flags, "disk change line=%x\n", fd_inb(FD_DIR) & 0x80);
- debug_dcl(UDP->flags, "flags=%lx\n", UDRS->flags);
-
- if (UDP->flags & FD_BROKEN_DCL)
- return test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
- if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) {
- set_bit(FD_VERIFY_BIT, &UDRS->flags);
+ debug_dcl(drive_params[drive].flags, "jiffies=%lu\n", jiffies);
+ debug_dcl(drive_params[drive].flags, "disk change line=%x\n",
+ fdc_inb(fdc, FD_DIR) & 0x80);
+ debug_dcl(drive_params[drive].flags, "flags=%lx\n",
+ drive_state[drive].flags);
+
+ if (drive_params[drive].flags & FD_BROKEN_DCL)
+ return test_bit(FD_DISK_CHANGED_BIT,
+ &drive_state[drive].flags);
+ if ((fdc_inb(fdc, FD_DIR) ^ drive_params[drive].flags) & 0x80) {
+ set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
/* verify write protection */
- if (UDRS->maxblock) /* mark it changed */
- set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
+ if (drive_state[drive].maxblock) /* mark it changed */
+ set_bit(FD_DISK_CHANGED_BIT,
+ &drive_state[drive].flags);
/* invalidate its geometry */
- if (UDRS->keep_data >= 0) {
- if ((UDP->flags & FTD_MSG) &&
+ if (drive_state[drive].keep_data >= 0) {
+ if ((drive_params[drive].flags & FTD_MSG) &&
current_type[drive] != NULL)
DPRINT("Disk type is undefined after disk change\n");
current_type[drive] = NULL;
@@ -775,8 +778,8 @@ static int disk_change(int drive)
return 1;
} else {
- UDRS->last_checked = jiffies;
- clear_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags);
+ drive_state[drive].last_checked = jiffies;
+ clear_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[drive].flags);
}
return 0;
}
@@ -799,26 +802,26 @@ static int set_dor(int fdc, char mask, char data)
unsigned char newdor;
unsigned char olddor;
- if (FDCS->address == -1)
+ if (fdc_state[fdc].address == -1)
return -1;
- olddor = FDCS->dor;
+ olddor = fdc_state[fdc].dor;
newdor = (olddor & mask) | data;
if (newdor != olddor) {
unit = olddor & 0x3;
if (is_selected(olddor, unit) && !is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
- debug_dcl(UDP->flags,
+ debug_dcl(drive_params[drive].flags,
"calling disk change from set_dor\n");
disk_change(drive);
}
- FDCS->dor = newdor;
- fd_outb(newdor, FD_DOR);
+ fdc_state[fdc].dor = newdor;
+ fdc_outb(newdor, fdc, FD_DOR);
unit = newdor & 0x3;
if (!is_selected(olddor, unit) && is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
- UDRS->select_date = jiffies;
+ drive_state[drive].select_date = jiffies;
}
}
return olddor;
@@ -826,11 +829,12 @@ static int set_dor(int fdc, char mask, char data)
static void twaddle(void)
{
- if (DP->select_delay)
+ if (drive_params[current_drive].select_delay)
return;
- fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR);
- fd_outb(FDCS->dor, FD_DOR);
- DRS->select_date = jiffies;
+ fdc_outb(fdc_state[current_fdc].dor & ~(0x10 << UNIT(current_drive)),
+ current_fdc, FD_DOR);
+ fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
+ drive_state[current_drive].select_date = jiffies;
}
/*
@@ -841,19 +845,20 @@ static void reset_fdc_info(int mode)
{
int drive;
- FDCS->spec1 = FDCS->spec2 = -1;
- FDCS->need_configure = 1;
- FDCS->perp_mode = 1;
- FDCS->rawcmd = 0;
+ fdc_state[current_fdc].spec1 = fdc_state[current_fdc].spec2 = -1;
+ fdc_state[current_fdc].need_configure = 1;
+ fdc_state[current_fdc].perp_mode = 1;
+ fdc_state[current_fdc].rawcmd = 0;
for (drive = 0; drive < N_DRIVE; drive++)
- if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL))
- UDRS->track = NEED_2_RECAL;
+ if (FDC(drive) == current_fdc &&
+ (mode || drive_state[drive].track != NEED_1_RECAL))
+ drive_state[drive].track = NEED_2_RECAL;
}
/* selects the fdc and drive, and enables the fdc's input/dma. */
static void set_fdc(int drive)
{
- unsigned int new_fdc = fdc;
+ unsigned int new_fdc = current_fdc;
if (drive >= 0 && drive < N_DRIVE) {
new_fdc = FDC(drive);
@@ -863,15 +868,15 @@ static void set_fdc(int drive)
pr_info("bad fdc value\n");
return;
}
- fdc = new_fdc;
- set_dor(fdc, ~0, 8);
+ current_fdc = new_fdc;
+ set_dor(current_fdc, ~0, 8);
#if N_FDC > 1
- set_dor(1 - fdc, ~8, 0);
+ set_dor(1 - current_fdc, ~8, 0);
#endif
- if (FDCS->rawcmd == 2)
+ if (fdc_state[current_fdc].rawcmd == 2)
reset_fdc_info(1);
- if (fd_inb(FD_STATUS) != STATUS_READY)
- FDCS->reset = 1;
+ if (fdc_inb(current_fdc, FD_STATUS) != STATUS_READY)
+ fdc_state[current_fdc].reset = 1;
}
/* locks the driver */
@@ -924,19 +929,19 @@ static void floppy_off(unsigned int drive)
unsigned long volatile delta;
int fdc = FDC(drive);
- if (!(FDCS->dor & (0x10 << UNIT(drive))))
+ if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive))))
return;
del_timer(motor_off_timer + drive);
/* make spindle stop in a position which minimizes spinup time
* next time */
- if (UDP->rps) {
- delta = jiffies - UDRS->first_read_date + HZ -
- UDP->spindown_offset;
- delta = ((delta * UDP->rps) % HZ) / UDP->rps;
+ if (drive_params[drive].rps) {
+ delta = jiffies - drive_state[drive].first_read_date + HZ -
+ drive_params[drive].spindown_offset;
+ delta = ((delta * drive_params[drive].rps) % HZ) / drive_params[drive].rps;
motor_off_timer[drive].expires =
- jiffies + UDP->spindown - delta;
+ jiffies + drive_params[drive].spindown - delta;
}
add_timer(motor_off_timer + drive);
}
@@ -952,20 +957,20 @@ static void scandrives(void)
int drive;
int saved_drive;
- if (DP->select_delay)
+ if (drive_params[current_drive].select_delay)
return;
saved_drive = current_drive;
for (i = 0; i < N_DRIVE; i++) {
drive = (saved_drive + i + 1) % N_DRIVE;
- if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
+ if (drive_state[drive].fd_ref == 0 || drive_params[drive].select_delay != 0)
continue; /* skip closed drives */
set_fdc(drive);
- if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
+ if (!(set_dor(current_fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
(0x10 << UNIT(drive))))
/* switch the motor off again, if it was off to
* begin with */
- set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
+ set_dor(current_fdc, ~(0x10 << UNIT(drive)), 0);
}
set_fdc(saved_drive);
}
@@ -1011,7 +1016,8 @@ static void cancel_activity(void)
* transfer */
static void fd_watchdog(void)
{
- debug_dcl(DP->flags, "calling disk change from watchdog\n");
+ debug_dcl(drive_params[current_drive].flags,
+ "calling disk change from watchdog\n");
if (disk_change(current_drive)) {
DPRINT("disk removed during i/o\n");
@@ -1035,7 +1041,7 @@ static void main_command_interrupt(void)
static int fd_wait_for_completion(unsigned long expires,
void (*function)(void))
{
- if (FDCS->reset) {
+ if (fdc_state[current_fdc].reset) {
reset_fdc(); /* do the reset during sleep to win time
* if we don't need to sleep, it's a good
* occasion anyways */
@@ -1063,13 +1069,13 @@ static void setup_DMA(void)
pr_cont("%x,", raw_cmd->cmd[i]);
pr_cont("\n");
cont->done(0);
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return;
}
if (((unsigned long)raw_cmd->kernel_data) % 512) {
pr_info("non aligned address: %p\n", raw_cmd->kernel_data);
cont->done(0);
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return;
}
f = claim_dma_lock();
@@ -1077,10 +1083,11 @@ static void setup_DMA(void)
#ifdef fd_dma_setup
if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length,
(raw_cmd->flags & FD_RAW_READ) ?
- DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) {
+ DMA_MODE_READ : DMA_MODE_WRITE,
+ fdc_state[current_fdc].address) < 0) {
release_dma_lock(f);
cont->done(0);
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return;
}
release_dma_lock(f);
@@ -1091,7 +1098,7 @@ static void setup_DMA(void)
DMA_MODE_READ : DMA_MODE_WRITE);
fd_set_dma_addr(raw_cmd->kernel_data);
fd_set_dma_count(raw_cmd->length);
- virtual_dma_port = FDCS->address;
+ virtual_dma_port = fdc_state[current_fdc].address;
fd_enable_dma();
release_dma_lock(f);
#endif
@@ -1105,18 +1112,18 @@ static int wait_til_ready(void)
int status;
int counter;
- if (FDCS->reset)
+ if (fdc_state[current_fdc].reset)
return -1;
for (counter = 0; counter < 10000; counter++) {
- status = fd_inb(FD_STATUS);
+ status = fdc_inb(current_fdc, FD_STATUS);
if (status & STATUS_READY)
return status;
}
if (initialized) {
- DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
+ DPRINT("Getstatus times out (%x) on fdc %d\n", status, current_fdc);
show_floppy();
}
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return -1;
}
@@ -1129,17 +1136,17 @@ static int output_byte(char byte)
return -1;
if (is_ready_state(status)) {
- fd_outb(byte, FD_DATA);
+ fdc_outb(byte, current_fdc, FD_DATA);
output_log[output_log_pos].data = byte;
output_log[output_log_pos].status = status;
output_log[output_log_pos].jiffies = jiffies;
output_log_pos = (output_log_pos + 1) % OLOGSIZE;
return 0;
}
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
if (initialized) {
DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
- byte, fdc, status);
+ byte, current_fdc, status);
show_floppy();
}
return -1;
@@ -1162,16 +1169,16 @@ static int result(void)
return i;
}
if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
- reply_buffer[i] = fd_inb(FD_DATA);
+ reply_buffer[i] = fdc_inb(current_fdc, FD_DATA);
else
break;
}
if (initialized) {
DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
- fdc, status, i);
+ current_fdc, status, i);
show_floppy();
}
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return -1;
}
@@ -1208,7 +1215,7 @@ static void perpendicular_mode(void)
default:
DPRINT("Invalid data rate for perpendicular mode!\n");
cont->done(0);
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
/*
* convenient way to return to
* redo without too much hassle
@@ -1219,12 +1226,12 @@ static void perpendicular_mode(void)
} else
perp_mode = 0;
- if (FDCS->perp_mode == perp_mode)
+ if (fdc_state[current_fdc].perp_mode == perp_mode)
return;
- if (FDCS->version >= FDC_82077_ORIG) {
+ if (fdc_state[current_fdc].version >= FDC_82077_ORIG) {
output_byte(FD_PERPENDICULAR);
output_byte(perp_mode);
- FDCS->perp_mode = perp_mode;
+ fdc_state[current_fdc].perp_mode = perp_mode;
} else if (perp_mode) {
DPRINT("perpendicular mode not supported by this FDC.\n");
}
@@ -1279,9 +1286,10 @@ static void fdc_specify(void)
int hlt_max_code = 0x7f;
int hut_max_code = 0xf;
- if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
+ if (fdc_state[current_fdc].need_configure &&
+ fdc_state[current_fdc].version >= FDC_82072A) {
fdc_configure();
- FDCS->need_configure = 0;
+ fdc_state[current_fdc].need_configure = 0;
}
switch (raw_cmd->rate & 0x03) {
@@ -1290,7 +1298,7 @@ static void fdc_specify(void)
break;
case 1:
dtr = 300;
- if (FDCS->version >= FDC_82078) {
+ if (fdc_state[current_fdc].version >= FDC_82078) {
/* chose the default rate table, not the one
* where 1 = 2 Mbps */
output_byte(FD_DRIVESPEC);
@@ -1305,27 +1313,30 @@ static void fdc_specify(void)
break;
}
- if (FDCS->version >= FDC_82072) {
+ if (fdc_state[current_fdc].version >= FDC_82072) {
scale_dtr = dtr;
hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
}
/* Convert step rate from microseconds to milliseconds and 4 bits */
- srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
+ srt = 16 - DIV_ROUND_UP(drive_params[current_drive].srt * scale_dtr / 1000,
+ NOMINAL_DTR);
if (slow_floppy)
srt = srt / 4;
SUPBOUND(srt, 0xf);
INFBOUND(srt, 0);
- hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
+ hlt = DIV_ROUND_UP(drive_params[current_drive].hlt * scale_dtr / 2,
+ NOMINAL_DTR);
if (hlt < 0x01)
hlt = 0x01;
else if (hlt > 0x7f)
hlt = hlt_max_code;
- hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
+ hut = DIV_ROUND_UP(drive_params[current_drive].hut * scale_dtr / 16,
+ NOMINAL_DTR);
if (hut < 0x1)
hut = 0x1;
else if (hut > 0xf)
@@ -1335,11 +1346,12 @@ static void fdc_specify(void)
spec2 = (hlt << 1) | (use_virtual_dma & 1);
/* If these parameters did not change, just return with success */
- if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
+ if (fdc_state[current_fdc].spec1 != spec1 ||
+ fdc_state[current_fdc].spec2 != spec2) {
/* Go ahead and set spec1 and spec2 */
output_byte(FD_SPECIFY);
- output_byte(FDCS->spec1 = spec1);
- output_byte(FDCS->spec2 = spec2);
+ output_byte(fdc_state[current_fdc].spec1 = spec1);
+ output_byte(fdc_state[current_fdc].spec2 = spec2);
}
} /* fdc_specify */
@@ -1350,52 +1362,55 @@ static void fdc_specify(void)
static int fdc_dtr(void)
{
/* If data rate not already set to desired value, set it. */
- if ((raw_cmd->rate & 3) == FDCS->dtr)
+ if ((raw_cmd->rate & 3) == fdc_state[current_fdc].dtr)
return 0;
/* Set dtr */
- fd_outb(raw_cmd->rate & 3, FD_DCR);
+ fdc_outb(raw_cmd->rate & 3, current_fdc, FD_DCR);
/* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
* need a stabilization period of several milliseconds to be
* enforced after data rate changes before R/W operations.
* Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
*/
- FDCS->dtr = raw_cmd->rate & 3;
+ fdc_state[current_fdc].dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
{
pr_cont(": track %d, head %d, sector %d, size %d",
- R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
+ reply_buffer[R_TRACK], reply_buffer[R_HEAD],
+ reply_buffer[R_SECTOR],
+ reply_buffer[R_SIZECODE]);
} /* tell_sector */
static void print_errors(void)
{
DPRINT("");
- if (ST0 & ST0_ECE) {
+ if (reply_buffer[ST0] & ST0_ECE) {
pr_cont("Recalibrate failed!");
- } else if (ST2 & ST2_CRC) {
+ } else if (reply_buffer[ST2] & ST2_CRC) {
pr_cont("data CRC error");
tell_sector();
- } else if (ST1 & ST1_CRC) {
+ } else if (reply_buffer[ST1] & ST1_CRC) {
pr_cont("CRC error");
tell_sector();
- } else if ((ST1 & (ST1_MAM | ST1_ND)) ||
- (ST2 & ST2_MAM)) {
+ } else if ((reply_buffer[ST1] & (ST1_MAM | ST1_ND)) ||
+ (reply_buffer[ST2] & ST2_MAM)) {
if (!probing) {
pr_cont("sector not found");
tell_sector();
} else
pr_cont("probe failed...");
- } else if (ST2 & ST2_WC) { /* seek error */
+ } else if (reply_buffer[ST2] & ST2_WC) { /* seek error */
pr_cont("wrong cylinder");
- } else if (ST2 & ST2_BC) { /* cylinder marked as bad */
+ } else if (reply_buffer[ST2] & ST2_BC) { /* cylinder marked as bad */
pr_cont("bad cylinder");
} else {
pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x",
- ST0, ST1, ST2);
+ reply_buffer[ST0], reply_buffer[ST1],
+ reply_buffer[ST2]);
tell_sector();
}
pr_cont("\n");
@@ -1414,33 +1429,35 @@ static int interpret_errors(void)
if (inr != 7) {
DPRINT("-- FDC reply error\n");
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return 1;
}
/* check IC to find cause of interrupt */
- switch (ST0 & ST0_INTR) {
+ switch (reply_buffer[ST0] & ST0_INTR) {
case 0x40: /* error occurred during command execution */
- if (ST1 & ST1_EOC)
+ if (reply_buffer[ST1] & ST1_EOC)
return 0; /* occurs with pseudo-DMA */
bad = 1;
- if (ST1 & ST1_WP) {
+ if (reply_buffer[ST1] & ST1_WP) {
DPRINT("Drive is write protected\n");
- clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
+ clear_bit(FD_DISK_WRITABLE_BIT,
+ &drive_state[current_drive].flags);
cont->done(0);
bad = 2;
- } else if (ST1 & ST1_ND) {
- set_bit(FD_NEED_TWADDLE_BIT, &DRS->flags);
- } else if (ST1 & ST1_OR) {
- if (DP->flags & FTD_MSG)
+ } else if (reply_buffer[ST1] & ST1_ND) {
+ set_bit(FD_NEED_TWADDLE_BIT,
+ &drive_state[current_drive].flags);
+ } else if (reply_buffer[ST1] & ST1_OR) {
+ if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
- } else if (*errors >= DP->max_errors.reporting) {
+ } else if (*errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
- if (ST2 & ST2_WC || ST2 & ST2_BC)
+ if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
/* wrong cylinder => recal */
- DRS->track = NEED_2_RECAL;
+ drive_state[current_drive].track = NEED_2_RECAL;
return bad;
case 0x80: /* invalid command given */
DPRINT("Invalid FDC command given!\n");
@@ -1473,13 +1490,13 @@ static void setup_rw_floppy(void)
flags |= FD_RAW_INTR;
if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) {
- ready_date = DRS->spinup_date + DP->spinup;
+ ready_date = drive_state[current_drive].spinup_date + drive_params[current_drive].spinup;
/* If spinup will take a long time, rerun scandrives
* again just before spinup completion. Beware that
* after scandrives, we must again wait for selection.
*/
- if (time_after(ready_date, jiffies + DP->select_delay)) {
- ready_date -= DP->select_delay;
+ if (time_after(ready_date, jiffies + drive_params[current_drive].select_delay)) {
+ ready_date -= drive_params[current_drive].select_delay;
function = floppy_start;
} else
function = setup_rw_floppy;
@@ -1522,44 +1539,52 @@ static int blind_seek;
static void seek_interrupt(void)
{
debugt(__func__, "");
- if (inr != 2 || (ST0 & 0xF8) != 0x20) {
+ if (inr != 2 || (reply_buffer[ST0] & 0xF8) != 0x20) {
DPRINT("seek failed\n");
- DRS->track = NEED_2_RECAL;
+ drive_state[current_drive].track = NEED_2_RECAL;
cont->error();
cont->redo();
return;
}
- if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) {
- debug_dcl(DP->flags,
+ if (drive_state[current_drive].track >= 0 &&
+ drive_state[current_drive].track != reply_buffer[ST1] &&
+ !blind_seek) {
+ debug_dcl(drive_params[current_drive].flags,
"clearing NEWCHANGE flag because of effective seek\n");
- debug_dcl(DP->flags, "jiffies=%lu\n", jiffies);
- clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
+ debug_dcl(drive_params[current_drive].flags, "jiffies=%lu\n",
+ jiffies);
+ clear_bit(FD_DISK_NEWCHANGE_BIT,
+ &drive_state[current_drive].flags);
/* effective seek */
- DRS->select_date = jiffies;
+ drive_state[current_drive].select_date = jiffies;
}
- DRS->track = ST1;
+ drive_state[current_drive].track = reply_buffer[ST1];
floppy_ready();
}
static void check_wp(void)
{
- if (test_bit(FD_VERIFY_BIT, &DRS->flags)) {
+ if (test_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags)) {
/* check write protection */
output_byte(FD_GETSTATUS);
output_byte(UNIT(current_drive));
if (result() != 1) {
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return;
}
- clear_bit(FD_VERIFY_BIT, &DRS->flags);
- clear_bit(FD_NEED_TWADDLE_BIT, &DRS->flags);
- debug_dcl(DP->flags,
+ clear_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags);
+ clear_bit(FD_NEED_TWADDLE_BIT,
+ &drive_state[current_drive].flags);
+ debug_dcl(drive_params[current_drive].flags,
"checking whether disk is write protected\n");
- debug_dcl(DP->flags, "wp=%x\n", ST3 & 0x40);
- if (!(ST3 & 0x40))
- set_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
+ debug_dcl(drive_params[current_drive].flags, "wp=%x\n",
+ reply_buffer[ST3] & 0x40);
+ if (!(reply_buffer[ST3] & 0x40))
+ set_bit(FD_DISK_WRITABLE_BIT,
+ &drive_state[current_drive].flags);
else
- clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
+ clear_bit(FD_DISK_WRITABLE_BIT,
+ &drive_state[current_drive].flags);
}
}
@@ -1569,32 +1594,34 @@ static void seek_floppy(void)
blind_seek = 0;
- debug_dcl(DP->flags, "calling disk change from %s\n", __func__);
+ debug_dcl(drive_params[current_drive].flags,
+ "calling disk change from %s\n", __func__);
- if (!test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) &&
+ if (!test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) &&
disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) {
/* the media changed flag should be cleared after the seek.
* If it isn't, this means that there is really no disk in
* the drive.
*/
- set_bit(FD_DISK_CHANGED_BIT, &DRS->flags);
+ set_bit(FD_DISK_CHANGED_BIT,
+ &drive_state[current_drive].flags);
cont->done(0);
cont->redo();
return;
}
- if (DRS->track <= NEED_1_RECAL) {
+ if (drive_state[current_drive].track <= NEED_1_RECAL) {
recalibrate_floppy();
return;
- } else if (test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) &&
+ } else if (test_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags) &&
(raw_cmd->flags & FD_RAW_NEED_DISK) &&
- (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
+ (drive_state[current_drive].track <= NO_TRACK || drive_state[current_drive].track == raw_cmd->track)) {
/* we seek to clear the media-changed condition. Does anybody
* know a more elegant way, which works on all drives? */
if (raw_cmd->track)
track = raw_cmd->track - 1;
else {
- if (DP->flags & FD_SILENT_DCL_CLEAR) {
- set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
+ if (drive_params[current_drive].flags & FD_SILENT_DCL_CLEAR) {
+ set_dor(current_fdc, ~(0x10 << UNIT(current_drive)), 0);
blind_seek = 1;
raw_cmd->flags |= FD_RAW_NEED_SEEK;
}
@@ -1602,7 +1629,7 @@ static void seek_floppy(void)
}
} else {
check_wp();
- if (raw_cmd->track != DRS->track &&
+ if (raw_cmd->track != drive_state[current_drive].track &&
(raw_cmd->flags & FD_RAW_NEED_SEEK))
track = raw_cmd->track;
else {
@@ -1625,9 +1652,9 @@ static void recal_interrupt(void)
{
debugt(__func__, "");
if (inr != 2)
- FDCS->reset = 1;
- else if (ST0 & ST0_ECE) {
- switch (DRS->track) {
+ fdc_state[current_fdc].reset = 1;
+ else if (reply_buffer[ST0] & ST0_ECE) {
+ switch (drive_state[current_drive].track) {
case NEED_1_RECAL:
debugt(__func__, "need 1 recal");
/* after a second recalibrate, we still haven't
@@ -1645,11 +1672,12 @@ static void recal_interrupt(void)
* not to move at recalibration is to
* be already at track 0.) Clear the
* new change flag */
- debug_dcl(DP->flags,
+ debug_dcl(drive_params[current_drive].flags,
"clearing NEWCHANGE flag because of second recalibrate\n");
- clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
- DRS->select_date = jiffies;
+ clear_bit(FD_DISK_NEWCHANGE_BIT,
+ &drive_state[current_drive].flags);
+ drive_state[current_drive].select_date = jiffies;
/* fall through */
default:
debugt(__func__, "default");
@@ -1659,11 +1687,11 @@ static void recal_interrupt(void)
* track 0, this might mean that we
* started beyond track 80. Try
* again. */
- DRS->track = NEED_1_RECAL;
+ drive_state[current_drive].track = NEED_1_RECAL;
break;
}
} else
- DRS->track = ST1;
+ drive_state[current_drive].track = reply_buffer[ST1];
floppy_ready();
}
@@ -1693,20 +1721,20 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
release_dma_lock(f);
do_floppy = NULL;
- if (fdc >= N_FDC || FDCS->address == -1) {
+ if (current_fdc >= N_FDC || fdc_state[current_fdc].address == -1) {
/* we don't even know which FDC is the culprit */
pr_info("DOR0=%x\n", fdc_state[0].dor);
- pr_info("floppy interrupt on bizarre fdc %d\n", fdc);
+ pr_info("floppy interrupt on bizarre fdc %d\n", current_fdc);
pr_info("handler=%ps\n", handler);
is_alive(__func__, "bizarre fdc");
return IRQ_NONE;
}
- FDCS->reset = 0;
+ fdc_state[current_fdc].reset = 0;
/* We have to clear the reset flag here, because apparently on boxes
* with level triggered interrupts (PS/2, Sparc, ...), it is needed to
- * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
- * emission of the SENSEI's.
+ * emit SENSEI's to clear the interrupt line. And fdc_state[fdc].reset
+ * blocks the emission of the SENSEI's.
* It is OK to emit floppy commands because we are in an interrupt
* handler here, and thus we have to fear no interference of other
* activity.
@@ -1725,11 +1753,11 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
if (do_print)
print_result("sensei", inr);
max_sensei--;
- } while ((ST0 & 0x83) != UNIT(current_drive) &&
+ } while ((reply_buffer[ST0] & 0x83) != UNIT(current_drive) &&
inr == 2 && max_sensei);
}
if (!handler) {
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
return IRQ_NONE;
}
schedule_bh(handler);
@@ -1755,7 +1783,7 @@ static void reset_interrupt(void)
{
debugt(__func__, "");
result(); /* get the status ready for set_fdc */
- if (FDCS->reset) {
+ if (fdc_state[current_fdc].reset) {
pr_info("reset set in interrupt, calling %ps\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
}
@@ -1771,7 +1799,7 @@ static void reset_fdc(void)
unsigned long flags;
do_floppy = reset_interrupt;
- FDCS->reset = 0;
+ fdc_state[current_fdc].reset = 0;
reset_fdc_info(0);
/* Pseudo-DMA may intercept 'reset finished' interrupt. */
@@ -1781,12 +1809,13 @@ static void reset_fdc(void)
fd_disable_dma();
release_dma_lock(flags);
- if (FDCS->version >= FDC_82072A)
- fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS);
+ if (fdc_state[current_fdc].version >= FDC_82072A)
+ fdc_outb(0x80 | (fdc_state[current_fdc].dtr & 3),
+ current_fdc, FD_STATUS);
else {
- fd_outb(FDCS->dor & ~0x04, FD_DOR);
+ fdc_outb(fdc_state[current_fdc].dor & ~0x04, current_fdc, FD_DOR);
udelay(FD_RESET_DELAY);
- fd_outb(FDCS->dor, FD_DOR);
+ fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
}
}
@@ -1813,7 +1842,7 @@ static void show_floppy(void)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
reply_buffer, resultsize, true);
- pr_info("status=%x\n", fd_inb(FD_STATUS));
+ pr_info("status=%x\n", fdc_inb(current_fdc, FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
pr_info("do_floppy=%ps\n", do_floppy);
@@ -1850,7 +1879,7 @@ static void floppy_shutdown(struct work_struct *arg)
if (initialized)
DPRINT("floppy timeout called\n");
- FDCS->reset = 1;
+ fdc_state[current_fdc].reset = 1;
if (cont) {
cont->done(0);
cont->redo(); /* this will recall reset when needed */
@@ -1870,29 +1899,29 @@ static int start_motor(void (*function)(void))
mask = 0xfc;
data = UNIT(current_drive);
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) {
- if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) {
+ if (!(fdc_state[current_fdc].dor & (0x10 << UNIT(current_drive)))) {
set_debugt();
/* no read since this drive is running */
- DRS->first_read_date = 0;
+ drive_state[current_drive].first_read_date = 0;
/* note motor start time if motor is not yet running */
- DRS->spinup_date = jiffies;
+ drive_state[current_drive].spinup_date = jiffies;
data |= (0x10 << UNIT(current_drive));
}
- } else if (FDCS->dor & (0x10 << UNIT(current_drive)))
+ } else if (fdc_state[current_fdc].dor & (0x10 << UNIT(current_drive)))
mask &= ~(0x10 << UNIT(current_drive));
/* starts motor and selects floppy */
del_timer(motor_off_timer + current_drive);
- set_dor(fdc, mask, data);
+ set_dor(current_fdc, mask, data);
/* wait_for_completion also schedules reset if needed. */
- return fd_wait_for_completion(DRS->select_date + DP->select_delay,
+ return fd_wait_for_completion(drive_state[current_drive].select_date + drive_params[current_drive].select_delay,
function);
}
static void floppy_ready(void)
{
- if (FDCS->reset) {
+ if (fdc_state[current_fdc].reset) {
reset_fdc();
return;
}
@@ -1901,9 +1930,10 @@ static void floppy_ready(void)
if (fdc_dtr())
return;
- debug_dcl(DP->flags, "calling disk change from floppy_ready\n");
+ debug_dcl(drive_params[current_drive].flags,
+ "calling disk change from floppy_ready\n");
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
- disk_change(current_drive) && !DP->select_delay)
+ disk_change(current_drive) && !drive_params[current_drive].select_delay)
twaddle(); /* this clears the dcl on certain
* drive/controller combinations */
@@ -1932,8 +1962,9 @@ static void floppy_start(void)
reschedule_timeout(current_reqD, "floppy start");
scandrives();
- debug_dcl(DP->flags, "setting NEWCHANGE in floppy_start\n");
- set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
+ debug_dcl(drive_params[current_drive].flags,
+ "setting NEWCHANGE in floppy_start\n");
+ set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags);
floppy_ready();
}
@@ -1991,7 +2022,7 @@ static int wait_til_done(void (*handler)(void), bool interruptible)
return -EINTR;
}
- if (FDCS->reset)
+ if (fdc_state[current_fdc].reset)
command_status = FD_COMMAND_ERROR;
if (command_status == FD_COMMAND_OKAY)
ret = 0;
@@ -2032,14 +2063,14 @@ static int next_valid_format(void)
{
int probed_format;
- probed_format = DRS->probed_format;
+ probed_format = drive_state[current_drive].probed_format;
while (1) {
- if (probed_format >= 8 || !DP->autodetect[probed_format]) {
- DRS->probed_format = 0;
+ if (probed_format >= 8 || !drive_params[current_drive].autodetect[probed_format]) {
+ drive_state[current_drive].probed_format = 0;
return 1;
}
- if (floppy_type[DP->autodetect[probed_format]].sect) {
- DRS->probed_format = probed_format;
+ if (floppy_type[drive_params[current_drive].autodetect[probed_format]].sect) {
+ drive_state[current_drive].probed_format = probed_format;
return 0;
}
probed_format++;
@@ -2051,23 +2082,23 @@ static void bad_flp_intr(void)
int err_count;
if (probing) {
- DRS->probed_format++;
+ drive_state[current_drive].probed_format++;
if (!next_valid_format())
return;
}
err_count = ++(*errors);
- INFBOUND(DRWE->badness, err_count);
- if (err_count > DP->max_errors.abort)
+ INFBOUND(write_errors[current_drive].badness, err_count);
+ if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
- if (err_count > DP->max_errors.reset)
- FDCS->reset = 1;
- else if (err_count > DP->max_errors.recal)
- DRS->track = NEED_2_RECAL;
+ if (err_count > drive_params[current_drive].max_errors.reset)
+ fdc_state[current_fdc].reset = 1;
+ else if (err_count > drive_params[current_drive].max_errors.recal)
+ drive_state[current_drive].track = NEED_2_RECAL;
}
static void set_floppy(int drive)
{
- int type = ITYPE(UDRS->fd_device);
+ int type = ITYPE(drive_state[drive].fd_device);
if (type)
_floppy = floppy_type + type;
@@ -2113,28 +2144,28 @@ static void setup_format_params(int track)
FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK);
raw_cmd->rate = _floppy->rate & 0x43;
raw_cmd->cmd_count = NR_F;
- COMMAND = FM_MODE(_floppy, FD_FORMAT);
- DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head);
- F_SIZECODE = FD_SIZECODE(_floppy);
- F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
- F_GAP = _floppy->fmt_gap;
- F_FILL = FD_FILL_BYTE;
+ raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_FORMAT);
+ raw_cmd->cmd[DR_SELECT] = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head);
+ raw_cmd->cmd[F_SIZECODE] = FD_SIZECODE(_floppy);
+ raw_cmd->cmd[F_SECT_PER_TRACK] = _floppy->sect << 2 >> raw_cmd->cmd[F_SIZECODE];
+ raw_cmd->cmd[F_GAP] = _floppy->fmt_gap;
+ raw_cmd->cmd[F_FILL] = FD_FILL_BYTE;
raw_cmd->kernel_data = floppy_track_buffer;
- raw_cmd->length = 4 * F_SECT_PER_TRACK;
+ raw_cmd->length = 4 * raw_cmd->cmd[F_SECT_PER_TRACK];
- if (!F_SECT_PER_TRACK)
+ if (!raw_cmd->cmd[F_SECT_PER_TRACK])
return;
/* allow for about 30ms for data transport per track */
- head_shift = (F_SECT_PER_TRACK + 5) / 6;
+ head_shift = (raw_cmd->cmd[F_SECT_PER_TRACK] + 5) / 6;
/* a ``cylinder'' is two tracks plus a little stepping time */
track_shift = 2 * head_shift + 3;
/* position of logical sector 1 on this track */
n = (track_shift * format_req.track + head_shift * format_req.head)
- % F_SECT_PER_TRACK;
+ % raw_cmd->cmd[F_SECT_PER_TRACK];
/* determine interleave */
il = 1;
@@ -2142,27 +2173,27 @@ static void setup_format_params(int track)
il++;
/* initialize field */
- for (count = 0; count < F_SECT_PER_TRACK; ++count) {
+ for (count = 0; count < raw_cmd->cmd[F_SECT_PER_TRACK]; ++count) {
here[count].track = format_req.track;
here[count].head = format_req.head;
here[count].sect = 0;
- here[count].size = F_SIZECODE;
+ here[count].size = raw_cmd->cmd[F_SIZECODE];
}
/* place logical sectors */
- for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
+ for (count = 1; count <= raw_cmd->cmd[F_SECT_PER_TRACK]; ++count) {
here[n].sect = count;
- n = (n + il) % F_SECT_PER_TRACK;
+ n = (n + il) % raw_cmd->cmd[F_SECT_PER_TRACK];
if (here[n].sect) { /* sector busy, find next free sector */
++n;
- if (n >= F_SECT_PER_TRACK) {
- n -= F_SECT_PER_TRACK;
+ if (n >= raw_cmd->cmd[F_SECT_PER_TRACK]) {
+ n -= raw_cmd->cmd[F_SECT_PER_TRACK];
while (here[n].sect)
++n;
}
}
}
if (_floppy->stretch & FD_SECTBASEMASK) {
- for (count = 0; count < F_SECT_PER_TRACK; count++)
+ for (count = 0; count < raw_cmd->cmd[F_SECT_PER_TRACK]; count++)
here[count].sect += FD_SECTBASE(_floppy) - 1;
}
}
@@ -2191,7 +2222,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
set_floppy(drive);
if (!_floppy ||
- _floppy->track > DP->tracks ||
+ _floppy->track > drive_params[current_drive].tracks ||
tmp_format_req->track >= _floppy->track ||
tmp_format_req->head >= _floppy->head ||
(_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
@@ -2253,21 +2284,21 @@ static void request_done(int uptodate)
/* maintain values for invalidation on geometry
* change */
block = current_count_sectors + blk_rq_pos(req);
- INFBOUND(DRS->maxblock, block);
+ INFBOUND(drive_state[current_drive].maxblock, block);
if (block > _floppy->sect)
- DRS->maxtrack = 1;
+ drive_state[current_drive].maxtrack = 1;
floppy_end_request(req, 0);
} else {
if (rq_data_dir(req) == WRITE) {
/* record write error information */
- DRWE->write_errors++;
- if (DRWE->write_errors == 1) {
- DRWE->first_error_sector = blk_rq_pos(req);
- DRWE->first_error_generation = DRS->generation;
+ write_errors[current_drive].write_errors++;
+ if (write_errors[current_drive].write_errors == 1) {
+ write_errors[current_drive].first_error_sector = blk_rq_pos(req);
+ write_errors[current_drive].first_error_generation = drive_state[current_drive].generation;
}
- DRWE->last_error_sector = blk_rq_pos(req);
- DRWE->last_error_generation = DRS->generation;
+ write_errors[current_drive].last_error_sector = blk_rq_pos(req);
+ write_errors[current_drive].last_error_generation = drive_state[current_drive].generation;
}
floppy_end_request(req, BLK_STS_IOERR);
}
@@ -2281,43 +2312,46 @@ static void rw_interrupt(void)
int heads;
int nr_sectors;
- if (R_HEAD >= 2) {
+ if (reply_buffer[R_HEAD] >= 2) {
/* some Toshiba floppy controllers occasionnally seem to
* return bogus interrupts after read/write operations, which
* can be recognized by a bad head number (>= 2) */
return;
}
- if (!DRS->first_read_date)
- DRS->first_read_date = jiffies;
+ if (!drive_state[current_drive].first_read_date)
+ drive_state[current_drive].first_read_date = jiffies;
nr_sectors = 0;
- ssize = DIV_ROUND_UP(1 << SIZECODE, 4);
+ ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4);
- if (ST1 & ST1_EOC)
+ if (reply_buffer[ST1] & ST1_EOC)
eoc = 1;
else
eoc = 0;
- if (COMMAND & 0x80)
+ if (raw_cmd->cmd[COMMAND] & 0x80)
heads = 2;
else
heads = 1;
- nr_sectors = (((R_TRACK - TRACK) * heads +
- R_HEAD - HEAD) * SECT_PER_TRACK +
- R_SECTOR - SECTOR + eoc) << SIZECODE >> 2;
+ nr_sectors = (((reply_buffer[R_TRACK] - raw_cmd->cmd[TRACK]) * heads +
+ reply_buffer[R_HEAD] - raw_cmd->cmd[HEAD]) * raw_cmd->cmd[SECT_PER_TRACK] +
+ reply_buffer[R_SECTOR] - raw_cmd->cmd[SECTOR] + eoc) << raw_cmd->cmd[SIZECODE] >> 2;
if (nr_sectors / ssize >
DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
DPRINT("long rw: %x instead of %lx\n",
nr_sectors, current_count_sectors);
- pr_info("rs=%d s=%d\n", R_SECTOR, SECTOR);
- pr_info("rh=%d h=%d\n", R_HEAD, HEAD);
- pr_info("rt=%d t=%d\n", R_TRACK, TRACK);
+ pr_info("rs=%d s=%d\n", reply_buffer[R_SECTOR],
+ raw_cmd->cmd[SECTOR]);
+ pr_info("rh=%d h=%d\n", reply_buffer[R_HEAD],
+ raw_cmd->cmd[HEAD]);
+ pr_info("rt=%d t=%d\n", reply_buffer[R_TRACK],
+ raw_cmd->cmd[TRACK]);
pr_info("heads=%d eoc=%d\n", heads, eoc);
pr_info("spt=%d st=%d ss=%d\n",
- SECT_PER_TRACK, fsector_t, ssize);
+ raw_cmd->cmd[SECT_PER_TRACK], fsector_t, ssize);
pr_info("in_sector_offset=%d\n", in_sector_offset);
}
@@ -2347,7 +2381,7 @@ static void rw_interrupt(void)
}
if (probing) {
- if (DP->flags & FTD_MSG)
+ if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Auto-detected floppy type %s in fd%d\n",
_floppy->name, current_drive);
current_type[current_drive] = _floppy;
@@ -2355,11 +2389,11 @@ static void rw_interrupt(void)
probing = 0;
}
- if (CT(COMMAND) != FD_READ ||
+ if (CT(raw_cmd->cmd[COMMAND]) != FD_READ ||
raw_cmd->kernel_data == bio_data(current_req->bio)) {
/* transfer directly from buffer */
cont->done(1);
- } else if (CT(COMMAND) == FD_READ) {
+ } else if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) {
buffer_track = raw_cmd->track;
buffer_drive = current_drive;
INFBOUND(buffer_max, nr_sectors + fsector_t);
@@ -2418,13 +2452,13 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
min(max_sector, max_sector_2),
blk_rq_sectors(current_req));
- if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
+ if (current_count_sectors <= 0 && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE &&
buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
- if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
+ if (remaining > blk_rq_bytes(current_req) && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
DPRINT("in copy buffer\n");
pr_info("current_count_sectors=%ld\n", current_count_sectors);
pr_info("remaining=%d\n", remaining >> 9);
@@ -2459,16 +2493,16 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
fsector_t, buffer_min);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
- if (CT(COMMAND) == FD_READ)
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
pr_info("read\n");
- if (CT(COMMAND) == FD_WRITE)
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE)
pr_info("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
- if (CT(COMMAND) == FD_READ)
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);
@@ -2486,7 +2520,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
/* work around a bug in pseudo DMA
* (on some FDCs) pseudo DMA does not stop when the CPU stops
* sending data. Hence we need a different way to signal the
- * transfer length: We use SECT_PER_TRACK. Unfortunately, this
+ * transfer length: We use raw_cmd->cmd[SECT_PER_TRACK]. Unfortunately, this
* does not work with MT, hence we can only transfer one head at
* a time
*/
@@ -2495,18 +2529,18 @@ static void virtualdmabug_workaround(void)
int hard_sectors;
int end_sector;
- if (CT(COMMAND) == FD_WRITE) {
- COMMAND &= ~0x80; /* switch off multiple track mode */
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
+ raw_cmd->cmd[COMMAND] &= ~0x80; /* switch off multiple track mode */
- hard_sectors = raw_cmd->length >> (7 + SIZECODE);
- end_sector = SECTOR + hard_sectors - 1;
- if (end_sector > SECT_PER_TRACK) {
+ hard_sectors = raw_cmd->length >> (7 + raw_cmd->cmd[SIZECODE]);
+ end_sector = raw_cmd->cmd[SECTOR] + hard_sectors - 1;
+ if (end_sector > raw_cmd->cmd[SECT_PER_TRACK]) {
pr_info("too many sectors %d > %d\n",
- end_sector, SECT_PER_TRACK);
+ end_sector, raw_cmd->cmd[SECT_PER_TRACK]);
return;
}
- SECT_PER_TRACK = end_sector;
- /* make sure SECT_PER_TRACK
+ raw_cmd->cmd[SECT_PER_TRACK] = end_sector;
+ /* make sure raw_cmd->cmd[SECT_PER_TRACK]
* points to end of transfer */
}
}
@@ -2539,10 +2573,10 @@ static int make_raw_rw_request(void)
raw_cmd->cmd_count = NR_RW;
if (rq_data_dir(current_req) == READ) {
raw_cmd->flags |= FD_RAW_READ;
- COMMAND = FM_MODE(_floppy, FD_READ);
+ raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_READ);
} else if (rq_data_dir(current_req) == WRITE) {
raw_cmd->flags |= FD_RAW_WRITE;
- COMMAND = FM_MODE(_floppy, FD_WRITE);
+ raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_WRITE);
} else {
DPRINT("%s: unknown command\n", __func__);
return 0;
@@ -2550,24 +2584,24 @@ static int make_raw_rw_request(void)
max_sector = _floppy->sect * _floppy->head;
- TRACK = (int)blk_rq_pos(current_req) / max_sector;
+ raw_cmd->cmd[TRACK] = (int)blk_rq_pos(current_req) / max_sector;
fsector_t = (int)blk_rq_pos(current_req) % max_sector;
- if (_floppy->track && TRACK >= _floppy->track) {
+ if (_floppy->track && raw_cmd->cmd[TRACK] >= _floppy->track) {
if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
return 0;
}
- HEAD = fsector_t / _floppy->sect;
+ raw_cmd->cmd[HEAD] = fsector_t / _floppy->sect;
if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
- test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) &&
+ test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags)) &&
fsector_t < _floppy->sect)
max_sector = _floppy->sect;
/* 2M disks have phantom sectors on the first track */
- if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) {
+ if ((_floppy->rate & FD_2M) && (!raw_cmd->cmd[TRACK]) && (!raw_cmd->cmd[HEAD])) {
max_sector = 2 * _floppy->sect / 3;
if (fsector_t >= max_sector) {
current_count_sectors =
@@ -2575,23 +2609,24 @@ static int make_raw_rw_request(void)
blk_rq_sectors(current_req));
return 1;
}
- SIZECODE = 2;
+ raw_cmd->cmd[SIZECODE] = 2;
} else
- SIZECODE = FD_SIZECODE(_floppy);
+ raw_cmd->cmd[SIZECODE] = FD_SIZECODE(_floppy);
raw_cmd->rate = _floppy->rate & 0x43;
- if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2)
+ if ((_floppy->rate & FD_2M) &&
+ (raw_cmd->cmd[TRACK] || raw_cmd->cmd[HEAD]) && raw_cmd->rate == 2)
raw_cmd->rate = 1;
- if (SIZECODE)
- SIZECODE2 = 0xff;
+ if (raw_cmd->cmd[SIZECODE])
+ raw_cmd->cmd[SIZECODE2] = 0xff;
else
- SIZECODE2 = 0x80;
- raw_cmd->track = TRACK << STRETCH(_floppy);
- DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD);
- GAP = _floppy->gap;
- ssize = DIV_ROUND_UP(1 << SIZECODE, 4);
- SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
- SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
+ raw_cmd->cmd[SIZECODE2] = 0x80;
+ raw_cmd->track = raw_cmd->cmd[TRACK] << STRETCH(_floppy);
+ raw_cmd->cmd[DR_SELECT] = UNIT(current_drive) + PH_HEAD(_floppy, raw_cmd->cmd[HEAD]);
+ raw_cmd->cmd[GAP] = _floppy->gap;
+ ssize = DIV_ROUND_UP(1 << raw_cmd->cmd[SIZECODE], 4);
+ raw_cmd->cmd[SECT_PER_TRACK] = _floppy->sect << 2 >> raw_cmd->cmd[SIZECODE];
+ raw_cmd->cmd[SECTOR] = ((fsector_t % _floppy->sect) << 2 >> raw_cmd->cmd[SIZECODE]) +
FD_SECTBASE(_floppy);
/* tracksize describes the size which can be filled up with sectors
@@ -2599,24 +2634,24 @@ static int make_raw_rw_request(void)
*/
tracksize = _floppy->sect - _floppy->sect % ssize;
if (tracksize < _floppy->sect) {
- SECT_PER_TRACK++;
+ raw_cmd->cmd[SECT_PER_TRACK]++;
if (tracksize <= fsector_t % _floppy->sect)
- SECTOR--;
+ raw_cmd->cmd[SECTOR]--;
/* if we are beyond tracksize, fill up using smaller sectors */
while (tracksize <= fsector_t % _floppy->sect) {
while (tracksize + ssize > _floppy->sect) {
- SIZECODE--;
+ raw_cmd->cmd[SIZECODE]--;
ssize >>= 1;
}
- SECTOR++;
- SECT_PER_TRACK++;
+ raw_cmd->cmd[SECTOR]++;
+ raw_cmd->cmd[SECT_PER_TRACK]++;
tracksize += ssize;
}
- max_sector = HEAD * _floppy->sect + tracksize;
- } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) {
+ max_sector = raw_cmd->cmd[HEAD] * _floppy->sect + tracksize;
+ } else if (!raw_cmd->cmd[TRACK] && !raw_cmd->cmd[HEAD] && !(_floppy->rate & FD_2M) && probing) {
max_sector = _floppy->sect;
- } else if (!HEAD && CT(COMMAND) == FD_WRITE) {
+ } else if (!raw_cmd->cmd[HEAD] && CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
/* for virtual DMA bug workaround */
max_sector = _floppy->sect;
}
@@ -2628,12 +2663,12 @@ static int make_raw_rw_request(void)
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
/* data already in track buffer */
- if (CT(COMMAND) == FD_READ) {
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) {
copy_buffer(1, max_sector, buffer_max);
return 1;
}
} else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
- if (CT(COMMAND) == FD_WRITE) {
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
unsigned int sectors;
sectors = fsector_t + blk_rq_sectors(current_req);
@@ -2644,7 +2679,7 @@ static int make_raw_rw_request(void)
}
raw_cmd->flags &= ~FD_RAW_WRITE;
raw_cmd->flags |= FD_RAW_READ;
- COMMAND = FM_MODE(_floppy, FD_READ);
+ raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_READ);
} else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) {
unsigned long dma_limit;
int direct, indirect;
@@ -2677,9 +2712,9 @@ static int make_raw_rw_request(void)
*/
if (!direct ||
(indirect * 2 > direct * 3 &&
- *errors < DP->max_errors.read_track &&
+ *errors < drive_params[current_drive].max_errors.read_track &&
((!probing ||
- (DP->read_track & (1 << DRS->probed_format)))))) {
+ (drive_params[current_drive].read_track & (1 << drive_state[current_drive].probed_format)))))) {
max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = bio_data(current_req->bio);
@@ -2695,7 +2730,7 @@ static int make_raw_rw_request(void)
}
}
- if (CT(COMMAND) == FD_READ)
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
max_size = max_sector; /* unbounded */
/* claim buffer track if needed */
@@ -2703,7 +2738,7 @@ static int make_raw_rw_request(void)
buffer_drive != current_drive || /* bad drive */
fsector_t > buffer_max ||
fsector_t < buffer_min ||
- ((CT(COMMAND) == FD_READ ||
+ ((CT(raw_cmd->cmd[COMMAND]) == FD_READ ||
(!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)) {
@@ -2715,7 +2750,7 @@ static int make_raw_rw_request(void)
raw_cmd->kernel_data = floppy_track_buffer +
((aligned_sector_t - buffer_min) << 9);
- if (CT(COMMAND) == FD_WRITE) {
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) {
/* copy write buffer to track buffer.
* if we get here, we know that the write
* is either aligned or the data already in the buffer
@@ -2737,10 +2772,10 @@ static int make_raw_rw_request(void)
raw_cmd->length <<= 9;
if ((raw_cmd->length < current_count_sectors << 9) ||
(raw_cmd->kernel_data != bio_data(current_req->bio) &&
- CT(COMMAND) == FD_WRITE &&
+ CT(raw_cmd->cmd[COMMAND]) == FD_WRITE &&
(aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
aligned_sector_t < buffer_min)) ||
- raw_cmd->length % (128 << SIZECODE) ||
+ raw_cmd->length % (128 << raw_cmd->cmd[SIZECODE]) ||
raw_cmd->length <= 0 || current_count_sectors <= 0) {
DPRINT("fractionary current count b=%lx s=%lx\n",
raw_cmd->length, current_count_sectors);
@@ -2751,9 +2786,10 @@ static int make_raw_rw_request(void)
current_count_sectors);
pr_info("st=%d ast=%d mse=%d msi=%d\n",
fsector_t, aligned_sector_t, max_sector, max_size);
- pr_info("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
+ pr_info("ssize=%x SIZECODE=%d\n", ssize, raw_cmd->cmd[SIZECODE]);
pr_info("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
- COMMAND, SECTOR, HEAD, TRACK);
+ raw_cmd->cmd[COMMAND], raw_cmd->cmd[SECTOR],
+ raw_cmd->cmd[HEAD], raw_cmd->cmd[TRACK]);
pr_info("buffer drive=%d\n", buffer_drive);
pr_info("buffer track=%d\n", buffer_track);
pr_info("buffer_min=%d\n", buffer_min);
@@ -2772,9 +2808,9 @@ static int make_raw_rw_request(void)
fsector_t, buffer_min, raw_cmd->length >> 9);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
- if (CT(COMMAND) == FD_READ)
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
pr_info("read\n");
- if (CT(COMMAND) == FD_WRITE)
+ if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE)
pr_info("write\n");
return 0;
}
@@ -2841,14 +2877,14 @@ do_request:
disk_change(current_drive);
if (test_bit(current_drive, &fake_change) ||
- test_bit(FD_DISK_CHANGED_BIT, &DRS->flags)) {
+ test_bit(FD_DISK_CHANGED_BIT, &drive_state[current_drive].flags)) {
DPRINT("disk absent or changed during operation\n");
request_done(0);
goto do_request;
}
if (!_floppy) { /* Autodetection */
if (!probing) {
- DRS->probed_format = 0;
+ drive_state[current_drive].probed_format = 0;
if (next_valid_format()) {
DPRINT("no autodetectable formats\n");
_floppy = NULL;
@@ -2857,7 +2893,7 @@ do_request:
}
}
probing = 1;
- _floppy = floppy_type + DP->autodetect[DRS->probed_format];
+ _floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
errors = &(current_req->error_count);
@@ -2867,7 +2903,7 @@ do_request:
goto do_request;
}
- if (test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags))
+ if (test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags))
twaddle();
schedule_bh(floppy_start);
debugt(__func__, "queue fd request");
@@ -2936,8 +2972,9 @@ static int poll_drive(bool interruptible, int flag)
raw_cmd->track = 0;
raw_cmd->cmd_count = 0;
cont = &poll_cont;
- debug_dcl(DP->flags, "setting NEWCHANGE in poll_drive\n");
- set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
+ debug_dcl(drive_params[current_drive].flags,
+ "setting NEWCHANGE in poll_drive\n");
+ set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags);
return wait_til_done(floppy_ready, interruptible);
}
@@ -2967,8 +3004,8 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
return -EINTR;
if (arg == FD_RESET_ALWAYS)
- FDCS->reset = 1;
- if (FDCS->reset) {
+ fdc_state[current_fdc].reset = 1;
+ if (fdc_state[current_fdc].reset) {
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
@@ -3001,8 +3038,8 @@ static const char *drive_name(int type, int drive)
if (type)
floppy = floppy_type + type;
else {
- if (UDP->native_format)
- floppy = floppy_type + UDP->native_format;
+ if (drive_params[drive].native_format)
+ floppy = floppy_type + drive_params[drive].native_format;
else
return "(null)";
}
@@ -3179,23 +3216,23 @@ static int raw_cmd_ioctl(int cmd, void __user *param)
int ret2;
int ret;
- if (FDCS->rawcmd <= 1)
- FDCS->rawcmd = 1;
+ if (fdc_state[current_fdc].rawcmd <= 1)
+ fdc_state[current_fdc].rawcmd = 1;
for (drive = 0; drive < N_DRIVE; drive++) {
- if (FDC(drive) != fdc)
+ if (FDC(drive) != current_fdc)
continue;
if (drive == current_drive) {
- if (UDRS->fd_ref > 1) {
- FDCS->rawcmd = 2;
+ if (drive_state[drive].fd_ref > 1) {
+ fdc_state[current_fdc].rawcmd = 2;
break;
}
- } else if (UDRS->fd_ref) {
- FDCS->rawcmd = 2;
+ } else if (drive_state[drive].fd_ref) {
+ fdc_state[current_fdc].rawcmd = 2;
break;
}
}
- if (FDCS->reset)
+ if (fdc_state[current_fdc].reset)
return -EIO;
ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
@@ -3207,12 +3244,13 @@ static int raw_cmd_ioctl(int cmd, void __user *param)
raw_cmd = my_raw_cmd;
cont = &raw_cmd_cont;
ret = wait_til_done(floppy_start, true);
- debug_dcl(DP->flags, "calling disk change from raw_cmd ioctl\n");
+ debug_dcl(drive_params[current_drive].flags,
+ "calling disk change from raw_cmd ioctl\n");
- if (ret != -EINTR && FDCS->reset)
+ if (ret != -EINTR && fdc_state[current_fdc].reset)
ret = -EIO;
- DRS->track = NO_TRACK;
+ drive_state[current_drive].track = NO_TRACK;
ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
if (!ret)
@@ -3240,9 +3278,9 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
(int)g->head <= 0 ||
/* check for overflow in max_sector */
(int)(g->sect * g->head) <= 0 ||
- /* check for zero in F_SECT_PER_TRACK */
+ /* check for zero in raw_cmd->cmd[F_SECT_PER_TRACK] */
(unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
- g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
+ g->track <= 0 || g->track > drive_params[drive].tracks >> STRETCH(g) ||
/* check if reserved bits are set */
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
return -EINVAL;
@@ -3285,16 +3323,16 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
current_type[drive] = &user_params[drive];
floppy_sizes[drive] = user_params[drive].size;
if (cmd == FDDEFPRM)
- DRS->keep_data = -1;
+ drive_state[current_drive].keep_data = -1;
else
- DRS->keep_data = 1;
+ drive_state[current_drive].keep_data = 1;
/* invalidation. Invalidate only when needed, i.e.
* when there are already sectors in the buffer cache
* whose number will change. This is useful, because
* mtools often changes the geometry of the disk after
* looking at the boot block */
- if (DRS->maxblock > user_params[drive].sect ||
- DRS->maxtrack ||
+ if (drive_state[current_drive].maxblock > user_params[drive].sect ||
+ drive_state[current_drive].maxtrack ||
((user_params[drive].sect ^ oldStretch) &
(FD_SWAPSIDES | FD_SECTBASEMASK)))
invalidate_drive(bdev);
@@ -3407,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
- int type = ITYPE(UDRS->fd_device);
+ int type = ITYPE(drive_state[drive].fd_device);
int i;
int ret;
int size;
@@ -3455,7 +3493,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
switch (cmd) {
case FDEJECT:
- if (UDRS->fd_ref != 1)
+ if (drive_state[drive].fd_ref != 1)
/* somebody else has this drive open */
return -EBUSY;
if (lock_fdc(drive))
@@ -3465,8 +3503,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
* non-Sparc architectures */
ret = fd_eject(UNIT(drive));
- set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
- set_bit(FD_VERIFY_BIT, &UDRS->flags);
+ set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
+ set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
process_fd_request();
return ret;
case FDCLRPRM:
@@ -3474,7 +3512,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
return -EINTR;
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
- UDRS->keep_data = 0;
+ drive_state[drive].keep_data = 0;
return invalidate_drive(bdev);
case FDSETPRM:
case FDDEFPRM:
@@ -3489,17 +3527,17 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
outparam = &inparam.g;
break;
case FDMSGON:
- UDP->flags |= FTD_MSG;
+ drive_params[drive].flags |= FTD_MSG;
return 0;
case FDMSGOFF:
- UDP->flags &= ~FTD_MSG;
+ drive_params[drive].flags &= ~FTD_MSG;
return 0;
case FDFMTBEG:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
- ret = UDRS->flags;
+ ret = drive_state[drive].flags;
process_fd_request();
if (ret & FD_VERIFY)
return -ENODEV;
@@ -3507,7 +3545,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
return -EROFS;
return 0;
case FDFMTTRK:
- if (UDRS->fd_ref != 1)
+ if (drive_state[drive].fd_ref != 1)
return -EBUSY;
return do_format(drive, &inparam.f);
case FDFMTEND:
@@ -3516,13 +3554,13 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
return -EINTR;
return invalidate_drive(bdev);
case FDSETEMSGTRESH:
- UDP->max_errors.reporting = (unsigned short)(param & 0x0f);
+ drive_params[drive].max_errors.reporting = (unsigned short)(param & 0x0f);
return 0;
case FDGETMAXERRS:
- outparam = &UDP->max_errors;
+ outparam = &drive_params[drive].max_errors;
break;
case FDSETMAXERRS:
- UDP->max_errors = inparam.max_errors;
+ drive_params[drive].max_errors = inparam.max_errors;
break;
case FDGETDRVTYP:
outparam = drive_name(type, drive);
@@ -3532,10 +3570,10 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
if (!valid_floppy_drive_params(inparam.dp.autodetect,
inparam.dp.native_format))
return -EINVAL;
- *UDP = inparam.dp;
+ drive_params[drive] = inparam.dp;
break;
case FDGETDRVPRM:
- outparam = UDP;
+ outparam = &drive_params[drive];
break;
case FDPOLLDRVSTAT:
if (lock_fdc(drive))
@@ -3545,18 +3583,18 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
process_fd_request();
/* fall through */
case FDGETDRVSTAT:
- outparam = UDRS;
+ outparam = &drive_state[drive];
break;
case FDRESET:
return user_reset_fdc(drive, (int)param, true);
case FDGETFDCSTAT:
- outparam = UFDCS;
+ outparam = &fdc_state[FDC(drive)];
break;
case FDWERRORCLR:
- memset(UDRWE, 0, sizeof(*UDRWE));
+ memset(&write_errors[drive], 0, sizeof(write_errors[drive]));
return 0;
case FDWERRORGET:
- outparam = UDRWE;
+ outparam = &write_errors[drive];
break;
case FDRAWCMD:
if (type)
@@ -3692,7 +3730,7 @@ static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned
mutex_lock(&floppy_mutex);
drive = (long)bdev->bd_disk->private_data;
- type = ITYPE(UDRS->fd_device);
+ type = ITYPE(drive_state[drive].fd_device);
err = set_geometry(cmd == FDSETPRM32 ? FDSETPRM : FDDEFPRM,
&v, drive, type, bdev);
mutex_unlock(&floppy_mutex);
@@ -3708,7 +3746,8 @@ static int compat_get_prm(int drive,
memset(&v, 0, sizeof(v));
mutex_lock(&floppy_mutex);
- err = get_floppy_geometry(drive, ITYPE(UDRS->fd_device), &p);
+ err = get_floppy_geometry(drive, ITYPE(drive_state[drive].fd_device),
+ &p);
if (err) {
mutex_unlock(&floppy_mutex);
return err;
@@ -3732,25 +3771,26 @@ static int compat_setdrvprm(int drive,
if (!valid_floppy_drive_params(v.autodetect, v.native_format))
return -EINVAL;
mutex_lock(&floppy_mutex);
- UDP->cmos = v.cmos;
- UDP->max_dtr = v.max_dtr;
- UDP->hlt = v.hlt;
- UDP->hut = v.hut;
- UDP->srt = v.srt;
- UDP->spinup = v.spinup;
- UDP->spindown = v.spindown;
- UDP->spindown_offset = v.spindown_offset;
- UDP->select_delay = v.select_delay;
- UDP->rps = v.rps;
- UDP->tracks = v.tracks;
- UDP->timeout = v.timeout;
- UDP->interleave_sect = v.interleave_sect;
- UDP->max_errors = v.max_errors;
- UDP->flags = v.flags;
- UDP->read_track = v.read_track;
- memcpy(UDP->autodetect, v.autodetect, sizeof(v.autodetect));
- UDP->checkfreq = v.checkfreq;
- UDP->native_format = v.native_format;
+ drive_params[drive].cmos = v.cmos;
+ drive_params[drive].max_dtr = v.max_dtr;
+ drive_params[drive].hlt = v.hlt;
+ drive_params[drive].hut = v.hut;
+ drive_params[drive].srt = v.srt;
+ drive_params[drive].spinup = v.spinup;
+ drive_params[drive].spindown = v.spindown;
+ drive_params[drive].spindown_offset = v.spindown_offset;
+ drive_params[drive].select_delay = v.select_delay;
+ drive_params[drive].rps = v.rps;
+ drive_params[drive].tracks = v.tracks;
+ drive_params[drive].timeout = v.timeout;
+ drive_params[drive].interleave_sect = v.interleave_sect;
+ drive_params[drive].max_errors = v.max_errors;
+ drive_params[drive].flags = v.flags;
+ drive_params[drive].read_track = v.read_track;
+ memcpy(drive_params[drive].autodetect, v.autodetect,
+ sizeof(v.autodetect));
+ drive_params[drive].checkfreq = v.checkfreq;
+ drive_params[drive].native_format = v.native_format;
mutex_unlock(&floppy_mutex);
return 0;
}
@@ -3762,25 +3802,26 @@ static int compat_getdrvprm(int drive,
memset(&v, 0, sizeof(struct compat_floppy_drive_params));
mutex_lock(&floppy_mutex);
- v.cmos = UDP->cmos;
- v.max_dtr = UDP->max_dtr;
- v.hlt = UDP->hlt;
- v.hut = UDP->hut;
- v.srt = UDP->srt;
- v.spinup = UDP->spinup;
- v.spindown = UDP->spindown;
- v.spindown_offset = UDP->spindown_offset;
- v.select_delay = UDP->select_delay;
- v.rps = UDP->rps;
- v.tracks = UDP->tracks;
- v.timeout = UDP->timeout;
- v.interleave_sect = UDP->interleave_sect;
- v.max_errors = UDP->max_errors;
- v.flags = UDP->flags;
- v.read_track = UDP->read_track;
- memcpy(v.autodetect, UDP->autodetect, sizeof(v.autodetect));
- v.checkfreq = UDP->checkfreq;
- v.native_format = UDP->native_format;
+ v.cmos = drive_params[drive].cmos;
+ v.max_dtr = drive_params[drive].max_dtr;
+ v.hlt = drive_params[drive].hlt;
+ v.hut = drive_params[drive].hut;
+ v.srt = drive_params[drive].srt;
+ v.spinup = drive_params[drive].spinup;
+ v.spindown = drive_params[drive].spindown;
+ v.spindown_offset = drive_params[drive].spindown_offset;
+ v.select_delay = drive_params[drive].select_delay;
+ v.rps = drive_params[drive].rps;
+ v.tracks = drive_params[drive].tracks;
+ v.timeout = drive_params[drive].timeout;
+ v.interleave_sect = drive_params[drive].interleave_sect;
+ v.max_errors = drive_params[drive].max_errors;
+ v.flags = drive_params[drive].flags;
+ v.read_track = drive_params[drive].read_track;
+ memcpy(v.autodetect, drive_params[drive].autodetect,
+ sizeof(v.autodetect));
+ v.checkfreq = drive_params[drive].checkfreq;
+ v.native_format = drive_params[drive].native_format;
mutex_unlock(&floppy_mutex);
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
@@ -3803,20 +3844,20 @@ static int compat_getdrvstat(int drive, bool poll,
goto Eintr;
process_fd_request();
}
- v.spinup_date = UDRS->spinup_date;
- v.select_date = UDRS->select_date;
- v.first_read_date = UDRS->first_read_date;
- v.probed_format = UDRS->probed_format;
- v.track = UDRS->track;
- v.maxblock = UDRS->maxblock;
- v.maxtrack = UDRS->maxtrack;
- v.generation = UDRS->generation;
- v.keep_data = UDRS->keep_data;
- v.fd_ref = UDRS->fd_ref;
- v.fd_device = UDRS->fd_device;
- v.last_checked = UDRS->last_checked;
- v.dmabuf = (uintptr_t)UDRS->dmabuf;
- v.bufblocks = UDRS->bufblocks;
+ v.spinup_date = drive_state[drive].spinup_date;
+ v.select_date = drive_state[drive].select_date;
+ v.first_read_date = drive_state[drive].first_read_date;
+ v.probed_format = drive_state[drive].probed_format;
+ v.track = drive_state[drive].track;
+ v.maxblock = drive_state[drive].maxblock;
+ v.maxtrack = drive_state[drive].maxtrack;
+ v.generation = drive_state[drive].generation;
+ v.keep_data = drive_state[drive].keep_data;
+ v.fd_ref = drive_state[drive].fd_ref;
+ v.fd_device = drive_state[drive].fd_device;
+ v.last_checked = drive_state[drive].last_checked;
+ v.dmabuf = (uintptr_t) drive_state[drive].dmabuf;
+ v.bufblocks = drive_state[drive].bufblocks;
mutex_unlock(&floppy_mutex);
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
@@ -3834,7 +3875,7 @@ static int compat_getfdcstat(int drive,
struct floppy_fdc_state v;
mutex_lock(&floppy_mutex);
- v = *UFDCS;
+ v = fdc_state[FDC(drive)];
mutex_unlock(&floppy_mutex);
memset(&v32, 0, sizeof(struct compat_floppy_fdc_state));
@@ -3864,7 +3905,7 @@ static int compat_werrorget(int drive,
memset(&v32, 0, sizeof(struct compat_floppy_write_errors));
mutex_lock(&floppy_mutex);
- v = *UDRWE;
+ v = write_errors[drive];
mutex_unlock(&floppy_mutex);
v32.write_errors = v.write_errors;
v32.first_error_sector = v.first_error_sector;
@@ -3933,16 +3974,16 @@ static void __init config_types(void)
/* read drive info out of physical CMOS */
drive = 0;
- if (!UDP->cmos)
- UDP->cmos = FLOPPY0_TYPE;
+ if (!drive_params[drive].cmos)
+ drive_params[drive].cmos = FLOPPY0_TYPE;
drive = 1;
- if (!UDP->cmos)
- UDP->cmos = FLOPPY1_TYPE;
+ if (!drive_params[drive].cmos)
+ drive_params[drive].cmos = FLOPPY1_TYPE;
/* FIXME: additional physical CMOS drive detection should go here */
for (drive = 0; drive < N_DRIVE; drive++) {
- unsigned int type = UDP->cmos;
+ unsigned int type = drive_params[drive].cmos;
struct floppy_drive_params *params;
const char *name = NULL;
char temparea[32];
@@ -3972,7 +4013,7 @@ static void __init config_types(void)
pr_cont("%s fd%d is %s", prepend, drive, name);
}
- *UDP = *params;
+ drive_params[drive] = *params;
}
if (has_drive)
@@ -3985,11 +4026,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
- if (!UDRS->fd_ref--) {
+ if (!drive_state[drive].fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
- UDRS->fd_ref = 0;
+ drive_state[drive].fd_ref = 0;
}
- if (!UDRS->fd_ref)
+ if (!drive_state[drive].fd_ref)
opened_bdev[drive] = NULL;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
@@ -4010,16 +4051,16 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
- old_dev = UDRS->fd_device;
+ old_dev = drive_state[drive].fd_device;
if (opened_bdev[drive] && opened_bdev[drive] != bdev)
goto out2;
- if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) {
- set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
- set_bit(FD_VERIFY_BIT, &UDRS->flags);
+ if (!drive_state[drive].fd_ref && (drive_params[drive].flags & FD_BROKEN_DCL)) {
+ set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
+ set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
}
- UDRS->fd_ref++;
+ drive_state[drive].fd_ref++;
opened_bdev[drive] = bdev;
@@ -4028,7 +4069,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (!floppy_track_buffer) {
/* if opening an ED drive, reserve a big buffer,
* else reserve a small one */
- if ((UDP->cmos == 6) || (UDP->cmos == 5))
+ if ((drive_params[drive].cmos == 6) || (drive_params[drive].cmos == 5))
try = 64; /* Only 48 actually useful */
else
try = 32; /* Only 24 actually useful */
@@ -4056,38 +4097,39 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
}
new_dev = MINOR(bdev->bd_dev);
- UDRS->fd_device = new_dev;
+ drive_state[drive].fd_device = new_dev;
set_capacity(disks[drive], floppy_sizes[new_dev]);
if (old_dev != -1 && old_dev != new_dev) {
if (buffer_drive == drive)
buffer_track = -1;
}
- if (UFDCS->rawcmd == 1)
- UFDCS->rawcmd = 2;
+ if (fdc_state[FDC(drive)].rawcmd == 1)
+ fdc_state[FDC(drive)].rawcmd = 2;
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ drive_state[drive].last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
+ &drive_state[drive].flags);
check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+ !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
goto out;
}
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
out:
- UDRS->fd_ref--;
+ drive_state[drive].fd_ref--;
- if (!UDRS->fd_ref)
+ if (!drive_state[drive].fd_ref)
opened_bdev[drive] = NULL;
out2:
mutex_unlock(&open_lock);
@@ -4103,19 +4145,19 @@ static unsigned int floppy_check_events(struct gendisk *disk,
{
int drive = (long)disk->private_data;
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
- test_bit(FD_VERIFY_BIT, &UDRS->flags))
+ if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
+ test_bit(FD_VERIFY_BIT, &drive_state[drive].flags))
return DISK_EVENT_MEDIA_CHANGE;
- if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
+ if (time_after(jiffies, drive_state[drive].last_checked + drive_params[drive].checkfreq)) {
if (lock_fdc(drive))
return 0;
poll_drive(false, 0);
process_fd_request();
}
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
- test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
+ if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
+ test_bit(FD_VERIFY_BIT, &drive_state[drive].flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive))
return DISK_EVENT_MEDIA_CHANGE;
@@ -4141,7 +4183,7 @@ static void floppy_rb0_cb(struct bio *bio)
if (bio->bi_status) {
pr_info("floppy: error %d while reading block 0\n",
bio->bi_status);
- set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ set_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
}
complete(&cbdata->complete);
}
@@ -4198,8 +4240,8 @@ static int floppy_revalidate(struct gendisk *disk)
int cf;
int res = 0;
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
- test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
+ if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
+ test_bit(FD_VERIFY_BIT, &drive_state[drive].flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive)) {
if (WARN(atomic_read(&usage_count) == 0,
@@ -4209,20 +4251,20 @@ static int floppy_revalidate(struct gendisk *disk)
res = lock_fdc(drive);
if (res)
return res;
- cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
- test_bit(FD_VERIFY_BIT, &UDRS->flags));
+ cf = (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags) ||
+ test_bit(FD_VERIFY_BIT, &drive_state[drive].flags));
if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
process_fd_request(); /*already done by another thread */
return 0;
}
- UDRS->maxblock = 0;
- UDRS->maxtrack = 0;
+ drive_state[drive].maxblock = 0;
+ drive_state[drive].maxtrack = 0;
if (buffer_drive == drive)
buffer_track = -1;
clear_bit(drive, &fake_change);
- clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
+ clear_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
if (cf)
- UDRS->generation++;
+ drive_state[drive].generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
res = __floppy_read_block_0(opened_bdev[drive], drive);
@@ -4232,7 +4274,7 @@ static int floppy_revalidate(struct gendisk *disk)
process_fd_request();
}
}
- set_capacity(disk, floppy_sizes[UDRS->fd_device]);
+ set_capacity(disk, floppy_sizes[drive_state[drive].fd_device]);
return res;
}
@@ -4261,23 +4303,23 @@ static char __init get_fdc_version(void)
int r;
output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
- if (FDCS->reset)
+ if (fdc_state[current_fdc].reset)
return FDC_NONE;
r = result();
if (r <= 0x00)
return FDC_NONE; /* No FDC present ??? */
if ((r == 1) && (reply_buffer[0] == 0x80)) {
- pr_info("FDC %d is an 8272A\n", fdc);
+ pr_info("FDC %d is an 8272A\n", current_fdc);
return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
}
if (r != 10) {
pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
- fdc, r);
+ current_fdc, r);
return FDC_UNKNOWN;
}
if (!fdc_configure()) {
- pr_info("FDC %d is an 82072\n", fdc);
+ pr_info("FDC %d is an 82072\n", current_fdc);
return FDC_82072; /* 82072 doesn't know CONFIGURE */
}
@@ -4285,50 +4327,50 @@ static char __init get_fdc_version(void)
if (need_more_output() == MORE_OUTPUT) {
output_byte(0);
} else {
- pr_info("FDC %d is an 82072A\n", fdc);
+ pr_info("FDC %d is an 82072A\n", current_fdc);
return FDC_82072A; /* 82072A as found on Sparcs. */
}
output_byte(FD_UNLOCK);
r = result();
if ((r == 1) && (reply_buffer[0] == 0x80)) {
- pr_info("FDC %d is a pre-1991 82077\n", fdc);
+ pr_info("FDC %d is a pre-1991 82077\n", current_fdc);
return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
* LOCK/UNLOCK */
}
if ((r != 1) || (reply_buffer[0] != 0x00)) {
pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
- fdc, r);
+ current_fdc, r);
return FDC_UNKNOWN;
}
output_byte(FD_PARTID);
r = result();
if (r != 1) {
pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n",
- fdc, r);
+ current_fdc, r);
return FDC_UNKNOWN;
}
if (reply_buffer[0] == 0x80) {
- pr_info("FDC %d is a post-1991 82077\n", fdc);
+ pr_info("FDC %d is a post-1991 82077\n", current_fdc);
return FDC_82077; /* Revised 82077AA passes all the tests */
}
switch (reply_buffer[0] >> 5) {
case 0x0:
/* Either a 82078-1 or a 82078SL running at 5Volt */
- pr_info("FDC %d is an 82078.\n", fdc);
+ pr_info("FDC %d is an 82078.\n", current_fdc);
return FDC_82078;
case 0x1:
- pr_info("FDC %d is a 44pin 82078\n", fdc);
+ pr_info("FDC %d is a 44pin 82078\n", current_fdc);
return FDC_82078;
case 0x2:
- pr_info("FDC %d is a S82078B\n", fdc);
+ pr_info("FDC %d is a S82078B\n", current_fdc);
return FDC_S82078B;
case 0x3:
- pr_info("FDC %d is a National Semiconductor PC87306\n", fdc);
+ pr_info("FDC %d is a National Semiconductor PC87306\n", current_fdc);
return FDC_87306;
default:
pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n",
- fdc, reply_buffer[0] >> 5);
+ current_fdc, reply_buffer[0] >> 5);
return FDC_82078_UNKN;
}
} /* get_fdc_version */
@@ -4384,7 +4426,7 @@ static void __init set_cmos(int *ints, int dummy, int dummy2)
if (current_drive >= 4 && !FDC2)
FDC2 = 0x370;
#endif
- DP->cmos = ints[2];
+ drive_params[current_drive].cmos = ints[2];
DPRINT("setting CMOS code to %d\n", ints[2]);
}
@@ -4473,7 +4515,7 @@ static ssize_t floppy_cmos_show(struct device *dev,
int drive;
drive = p->id;
- return sprintf(buf, "%X\n", UDP->cmos);
+ return sprintf(buf, "%X\n", drive_params[drive].cmos);
}
static DEVICE_ATTR(cmos, 0444, floppy_cmos_show, NULL);
@@ -4494,7 +4536,7 @@ static int floppy_resume(struct device *dev)
int fdc;
for (fdc = 0; fdc < N_FDC; fdc++)
- if (FDCS->address != -1)
+ if (fdc_state[fdc].address != -1)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
return 0;
@@ -4604,16 +4646,16 @@ static int __init do_floppy_init(void)
config_types();
for (i = 0; i < N_FDC; i++) {
- fdc = i;
- memset(FDCS, 0, sizeof(*FDCS));
- FDCS->dtr = -1;
- FDCS->dor = 0x4;
+ current_fdc = i;
+ memset(&fdc_state[current_fdc], 0, sizeof(*fdc_state));
+ fdc_state[current_fdc].dtr = -1;
+ fdc_state[current_fdc].dor = 0x4;
#if defined(__sparc__) || defined(__mc68000__)
/*sparcs/sun3x don't have a DOR reset which we can fall back on to */
#ifdef __mc68000__
if (MACH_IS_SUN3X)
#endif
- FDCS->version = FDC_82072A;
+ fdc_state[current_fdc].version = FDC_82072A;
#endif
}
@@ -4628,7 +4670,7 @@ static int __init do_floppy_init(void)
fdc_state[1].address = FDC2;
#endif
- fdc = 0; /* reset fdc in case of unexpected interrupt */
+ current_fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
cancel_delayed_work(&fd_timeout);
@@ -4638,12 +4680,12 @@ static int __init do_floppy_init(void)
/* initialise drive state */
for (drive = 0; drive < N_DRIVE; drive++) {
- memset(UDRS, 0, sizeof(*UDRS));
- memset(UDRWE, 0, sizeof(*UDRWE));
- set_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags);
- set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
- set_bit(FD_VERIFY_BIT, &UDRS->flags);
- UDRS->fd_device = -1;
+ memset(&drive_state[drive], 0, sizeof(drive_state[drive]));
+ memset(&write_errors[drive], 0, sizeof(write_errors[drive]));
+ set_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[drive].flags);
+ set_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags);
+ set_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
+ drive_state[drive].fd_device = -1;
floppy_track_buffer = NULL;
max_buffer_sectors = 0;
}
@@ -4655,29 +4697,30 @@ static int __init do_floppy_init(void)
msleep(10);
for (i = 0; i < N_FDC; i++) {
- fdc = i;
- FDCS->driver_version = FD_DRIVER_VERSION;
+ current_fdc = i;
+ fdc_state[current_fdc].driver_version = FD_DRIVER_VERSION;
for (unit = 0; unit < 4; unit++)
- FDCS->track[unit] = 0;
- if (FDCS->address == -1)
+ fdc_state[current_fdc].track[unit] = 0;
+ if (fdc_state[current_fdc].address == -1)
continue;
- FDCS->rawcmd = 2;
+ fdc_state[current_fdc].rawcmd = 2;
if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
- floppy_release_regions(fdc);
- FDCS->address = -1;
- FDCS->version = FDC_NONE;
+ floppy_release_regions(current_fdc);
+ fdc_state[current_fdc].address = -1;
+ fdc_state[current_fdc].version = FDC_NONE;
continue;
}
/* Try to determine the floppy controller type */
- FDCS->version = get_fdc_version();
- if (FDCS->version == FDC_NONE) {
+ fdc_state[current_fdc].version = get_fdc_version();
+ if (fdc_state[current_fdc].version == FDC_NONE) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
- floppy_release_regions(fdc);
- FDCS->address = -1;
+ floppy_release_regions(current_fdc);
+ fdc_state[current_fdc].address = -1;
continue;
}
- if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A)
+ if (can_use_virtual_dma == 2 &&
+ fdc_state[current_fdc].version < FDC_82072A)
can_use_virtual_dma = 0;
have_no_fdc = 0;
@@ -4687,7 +4730,7 @@ static int __init do_floppy_init(void)
*/
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
- fdc = 0;
+ current_fdc = 0;
cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
@@ -4783,7 +4826,7 @@ static void floppy_release_allocated_regions(int fdc, const struct io_region *p)
{
while (p != io_regions) {
p--;
- release_region(FDCS->address + p->offset, p->size);
+ release_region(fdc_state[fdc].address + p->offset, p->size);
}
}
@@ -4794,10 +4837,10 @@ static int floppy_request_regions(int fdc)
const struct io_region *p;
for (p = io_regions; p < ARRAY_END(io_regions); p++) {
- if (!request_region(FDCS->address + p->offset,
+ if (!request_region(fdc_state[fdc].address + p->offset,
p->size, "floppy")) {
DPRINT("Floppy io-port 0x%04lx in use\n",
- FDCS->address + p->offset);
+ fdc_state[fdc].address + p->offset);
floppy_release_allocated_regions(fdc, p);
return -EBUSY;
}
@@ -4839,36 +4882,36 @@ static int floppy_grab_irq_and_dma(void)
}
}
- for (fdc = 0; fdc < N_FDC; fdc++) {
- if (FDCS->address != -1) {
- if (floppy_request_regions(fdc))
+ for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) {
+ if (fdc_state[current_fdc].address != -1) {
+ if (floppy_request_regions(current_fdc))
goto cleanup;
}
}
- for (fdc = 0; fdc < N_FDC; fdc++) {
- if (FDCS->address != -1) {
+ for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) {
+ if (fdc_state[current_fdc].address != -1) {
reset_fdc_info(1);
- fd_outb(FDCS->dor, FD_DOR);
+ fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
}
}
- fdc = 0;
+ current_fdc = 0;
set_dor(0, ~0, 8); /* avoid immediate interrupt */
- for (fdc = 0; fdc < N_FDC; fdc++)
- if (FDCS->address != -1)
- fd_outb(FDCS->dor, FD_DOR);
+ for (current_fdc = 0; current_fdc < N_FDC; current_fdc++)
+ if (fdc_state[current_fdc].address != -1)
+ fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
/*
* The driver will try and free resources and relies on us
* to know if they were allocated or not.
*/
- fdc = 0;
+ current_fdc = 0;
irqdma_allocated = 1;
return 0;
cleanup:
fd_free_irq();
fd_free_dma();
- while (--fdc >= 0)
- floppy_release_regions(fdc);
+ while (--current_fdc >= 0)
+ floppy_release_regions(current_fdc);
atomic_dec(&usage_count);
return -1;
}
@@ -4916,11 +4959,11 @@ static void floppy_release_irq_and_dma(void)
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
- old_fdc = fdc;
- for (fdc = 0; fdc < N_FDC; fdc++)
- if (FDCS->address != -1)
- floppy_release_regions(fdc);
- fdc = old_fdc;
+ old_fdc = current_fdc;
+ for (current_fdc = 0; current_fdc < N_FDC; current_fdc++)
+ if (fdc_state[current_fdc].address != -1)
+ floppy_release_regions(current_fdc);
+ current_fdc = old_fdc;
}
#ifdef MODULE
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 739b372a5112..da693e6a834e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -214,7 +214,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
* LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
* will get updated by ioctl(LOOP_GET_STATUS)
*/
- blk_mq_freeze_queue(lo->lo_queue);
+ if (lo->lo_state == Lo_bound)
+ blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
if (use_dio) {
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
@@ -223,7 +224,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
}
- blk_mq_unfreeze_queue(lo->lo_queue);
+ if (lo->lo_state == Lo_bound)
+ blk_mq_unfreeze_queue(lo->lo_queue);
}
static int
@@ -427,11 +429,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* information.
*/
struct file *file = lo->lo_backing_file;
+ struct request_queue *q = lo->lo_queue;
int ret;
mode |= FALLOC_FL_KEEP_SIZE;
- if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
+ if (!blk_queue_discard(q)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -461,7 +464,7 @@ static void lo_complete_rq(struct request *rq)
if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
- ret = BLK_STS_IOERR;
+ ret = errno_to_blk_status(cmd->ret);
goto end_io;
}
@@ -866,27 +869,46 @@ static void loop_config_discard(struct loop_device *lo)
struct request_queue *q = lo->lo_queue;
/*
+ * If the backing device is a block device, mirror its zeroing
+ * capability. Set the discard sectors to the block device's zeroing
+ * capabilities because loop discards result in blkdev_issue_zeroout(),
+ * not blkdev_issue_discard(). This maintains consistent behavior with
+ * file-backed loop devices: discarded regions read back as zero.
+ */
+ if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
+ struct request_queue *backingq;
+
+ backingq = bdev_get_queue(inode->i_bdev);
+ blk_queue_max_discard_sectors(q,
+ backingq->limits.max_write_zeroes_sectors);
+
+ blk_queue_max_write_zeroes_sectors(q,
+ backingq->limits.max_write_zeroes_sectors);
+
+ /*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
- if ((!file->f_op->fallocate) ||
- lo->lo_encrypt_key_size) {
+ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
- return;
- }
- q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = 0;
+ } else {
+ q->limits.discard_granularity = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
+
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+ blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+ }
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ if (q->limits.max_write_zeroes_sectors)
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
}
static void loop_unprepare_queue(struct loop_device *lo)
@@ -1539,16 +1561,16 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
return -EINVAL;
- if (lo->lo_queue->limits.logical_block_size != arg) {
- sync_blockdev(lo->lo_device);
- kill_bdev(lo->lo_device);
- }
+ if (lo->lo_queue->limits.logical_block_size == arg)
+ return 0;
+
+ sync_blockdev(lo->lo_device);
+ kill_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
/* kill_bdev should have truncated all the pages */
- if (lo->lo_queue->limits.logical_block_size != arg &&
- lo->lo_device->bd_inode->i_mapping->nrpages) {
+ if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
@@ -1953,7 +1975,10 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
- cmd->ret = ret ? -EIO : 0;
+ if (ret == -EOPNOTSUPP)
+ cmd->ret = ret;
+ else
+ cmd->ret = ret ? -EIO : 0;
blk_mq_complete_request(rq);
}
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 78181908f0df..43cff01a5a67 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -395,16 +395,19 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
config = nbd->config;
- if (config->num_connections > 1) {
+ if (config->num_connections > 1 ||
+ (config->num_connections == 1 && nbd->tag_set.timeout)) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
atomic_read(&config->live_connections),
config->num_connections);
/*
* Hooray we have more connections, requeue this IO, the submit
- * path will put it on a real connection.
+ * path will put it on a real connection. Or if only one
+ * connection is configured, the submit path will wait util
+ * a new connection is reconfigured or util dead timeout.
*/
- if (config->socks && config->num_connections > 1) {
+ if (config->socks) {
if (cmd->index < config->num_connections) {
struct nbd_sock *nsock =
config->socks[cmd->index];
@@ -431,12 +434,22 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
* Userspace sets timeout=0 to disable socket disconnection,
* so just warn and reset the timer.
*/
+ struct nbd_sock *nsock = config->socks[cmd->index];
cmd->retries++;
dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
(unsigned long long)blk_rq_pos(req) << 9,
blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
+ mutex_lock(&nsock->tx_lock);
+ if (cmd->cookie != nsock->cookie) {
+ nbd_requeue_cmd(cmd);
+ mutex_unlock(&nsock->tx_lock);
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+ return BLK_EH_DONE;
+ }
+ mutex_unlock(&nsock->tx_lock);
mutex_unlock(&cmd->lock);
nbd_config_put(nbd);
return BLK_EH_RESET_TIMER;
@@ -741,14 +754,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
/*
- * If we've disconnected or we only have 1
- * connection then we need to make sure we
+ * If we've disconnected, we need to make sure we
* complete this request, otherwise error out
* and let the timeout stuff handle resubmitting
* this request onto another connection.
*/
- if (nbd_disconnected(config) ||
- config->num_connections <= 1) {
+ if (nbd_disconnected(config)) {
cmd->status = BLK_STS_IOERR;
goto out;
}
@@ -825,7 +836,7 @@ static int find_fallback(struct nbd_device *nbd, int index)
if (config->num_connections <= 1) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Attempted send on invalid socket\n");
+ "Dead connection, failed to find a fallback\n");
return new_index;
}
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index 62b660821dbc..81b311c9d781 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -85,26 +85,35 @@ struct nullb {
char disk_name[DISK_NAME_LEN];
};
+blk_status_t null_process_cmd(struct nullb_cmd *cmd,
+ enum req_opf op, sector_t sector,
+ unsigned int nr_sectors);
+
#ifdef CONFIG_BLK_DEV_ZONED
-int null_zone_init(struct nullb_device *dev);
-void null_zone_exit(struct nullb_device *dev);
+int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
+int null_register_zoned_dev(struct nullb *nullb);
+void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- sector_t nr_sectors);
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
+ enum req_opf op, sector_t sector,
+ sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len);
#else
-static inline int null_zone_init(struct nullb_device *dev)
+static inline int null_init_zoned_dev(struct nullb_device *dev,
+ struct request_queue *q)
{
pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
return -EINVAL;
}
-static inline void null_zone_exit(struct nullb_device *dev) {}
-static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
- enum req_opf op, sector_t sector,
- sector_t nr_sectors)
+static inline int null_register_zoned_dev(struct nullb *nullb)
+{
+ return -ENODEV;
+}
+static inline void null_free_zoned_dev(struct nullb_device *dev) {}
+static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
+ enum req_opf op, sector_t sector, sector_t nr_sectors)
{
return BLK_STS_NOTSUPP;
}
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 133060431dbd..ce9e33603a4d 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -23,6 +23,7 @@
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static DECLARE_FAULT_ATTR(null_timeout_attr);
static DECLARE_FAULT_ATTR(null_requeue_attr);
+static DECLARE_FAULT_ATTR(null_init_hctx_attr);
#endif
static inline u64 mb_per_tick(int mbps)
@@ -96,11 +97,21 @@ module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC(home_node, "Home node for the device");
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+/*
+ * For more details about fault injection, please refer to
+ * Documentation/fault-injection/fault-injection.rst.
+ */
static char g_timeout_str[80];
module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
+MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
static char g_requeue_str[80];
module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
+MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
+
+static char g_init_hctx_str[80];
+module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
+MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
#endif
static int g_queue_mode = NULL_Q_MQ;
@@ -276,7 +287,7 @@ nullb_device_##NAME##_store(struct config_item *item, const char *page, \
{ \
int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
struct nullb_device *dev = to_nullb_device(item); \
- TYPE uninitialized_var(new_value); \
+ TYPE new_value = 0; \
int ret; \
\
ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
@@ -302,6 +313,12 @@ static int nullb_apply_submit_queues(struct nullb_device *dev,
if (!nullb)
return 0;
+ /*
+ * Make sure that null_init_hctx() does not access nullb->queues[] past
+ * the end of that array.
+ */
+ if (submit_queues > nr_cpu_ids)
+ return -EINVAL;
set = nullb->tag_set;
blk_mq_update_nr_hw_queues(set, submit_queues);
return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
@@ -563,7 +580,7 @@ static void null_free_dev(struct nullb_device *dev)
if (!dev)
return;
- null_zone_exit(dev);
+ null_free_zoned_dev(dev);
badblocks_exit(&dev->badblocks);
kfree(dev);
}
@@ -605,6 +622,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
if (tag != -1U) {
cmd = &nq->cmds[tag];
cmd->tag = tag;
+ cmd->error = BLK_STS_OK;
cmd->nq = nq;
if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
@@ -1258,6 +1276,25 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
}
}
+blk_status_t null_process_cmd(struct nullb_cmd *cmd,
+ enum req_opf op, sector_t sector,
+ unsigned int nr_sectors)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ blk_status_t ret;
+
+ if (dev->badblocks.shift != -1) {
+ ret = null_handle_badblocks(cmd, sector, nr_sectors);
+ if (ret != BLK_STS_OK)
+ return ret;
+ }
+
+ if (dev->memory_backed)
+ return null_handle_memory_backed(cmd, op);
+
+ return BLK_STS_OK;
+}
+
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_opf op)
{
@@ -1276,17 +1313,11 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
goto out;
}
- if (nullb->dev->badblocks.shift != -1) {
- cmd->error = null_handle_badblocks(cmd, sector, nr_sectors);
- if (cmd->error != BLK_STS_OK)
- goto out;
- }
-
- if (dev->memory_backed)
- cmd->error = null_handle_memory_backed(cmd, op);
-
- if (!cmd->error && dev->zoned)
- cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors);
+ if (dev->zoned)
+ cmd->error = null_process_zoned_cmd(cmd, op,
+ sector, nr_sectors);
+ else
+ cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
out:
nullb_complete_cmd(cmd);
@@ -1385,6 +1416,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = bd->rq;
+ cmd->error = BLK_STS_OK;
cmd->nq = nq;
blk_mq_start_request(bd->rq);
@@ -1408,12 +1440,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
}
-static const struct blk_mq_ops null_mq_ops = {
- .queue_rq = null_queue_rq,
- .complete = null_complete_rq,
- .timeout = null_timeout_rq,
-};
-
static void cleanup_queue(struct nullb_queue *nq)
{
kfree(nq->tag_map);
@@ -1430,9 +1456,56 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
+static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+ struct nullb_queue *nq = hctx->driver_data;
+ struct nullb *nullb = nq->dev->nullb;
+
+ nullb->nr_queues--;
+}
+
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+ nq->dev = nullb->dev;
+}
+
+static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
+ unsigned int hctx_idx)
+{
+ struct nullb *nullb = hctx->queue->queuedata;
+ struct nullb_queue *nq;
+
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1))
+ return -EFAULT;
+#endif
+
+ nq = &nullb->queues[hctx_idx];
+ hctx->driver_data = nq;
+ null_init_queue(nullb, nq);
+ nullb->nr_queues++;
+
+ return 0;
+}
+
+static const struct blk_mq_ops null_mq_ops = {
+ .queue_rq = null_queue_rq,
+ .complete = null_complete_rq,
+ .timeout = null_timeout_rq,
+ .init_hctx = null_init_hctx,
+ .exit_hctx = null_exit_hctx,
+};
+
static void null_del_dev(struct nullb *nullb)
{
- struct nullb_device *dev = nullb->dev;
+ struct nullb_device *dev;
+
+ if (!nullb)
+ return;
+
+ dev = nullb->dev;
ida_simple_remove(&nullb_indexes, nullb->index);
@@ -1462,6 +1535,13 @@ static void null_config_discard(struct nullb *nullb)
{
if (nullb->dev->discard == false)
return;
+
+ if (nullb->dev->zoned) {
+ nullb->dev->discard = false;
+ pr_info("discard option is ignored in zoned mode\n");
+ return;
+ }
+
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
@@ -1473,33 +1553,6 @@ static const struct block_device_operations null_ops = {
.report_zones = null_report_zones,
};
-static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
-{
- BUG_ON(!nullb);
- BUG_ON(!nq);
-
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
- nq->dev = nullb->dev;
-}
-
-static void null_init_queues(struct nullb *nullb)
-{
- struct request_queue *q = nullb->q;
- struct blk_mq_hw_ctx *hctx;
- struct nullb_queue *nq;
- int i;
-
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx->nr_ctx || !hctx->tags)
- continue;
- nq = &nullb->queues[i];
- hctx->driver_data = nq;
- null_init_queue(nullb, nq);
- nullb->nr_queues++;
- }
-}
-
static int setup_commands(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
@@ -1526,8 +1579,7 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
- nullb->queues = kcalloc(nullb->dev->submit_queues,
- sizeof(struct nullb_queue),
+ nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
@@ -1573,19 +1625,12 @@ static int null_gendisk_register(struct nullb *nullb)
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
-#ifdef CONFIG_BLK_DEV_ZONED
if (nullb->dev->zoned) {
- if (queue_is_mq(nullb->q)) {
- int ret = blk_revalidate_disk_zones(disk);
- if (ret)
- return ret;
- } else {
- blk_queue_chunk_sectors(nullb->q,
- nullb->dev->zone_size_sects);
- nullb->q->nr_zones = blkdev_nr_zones(disk);
- }
+ int ret = null_register_zoned_dev(nullb);
+
+ if (ret)
+ return ret;
}
-#endif
add_disk(disk);
return 0;
@@ -1669,6 +1714,8 @@ static bool null_setup_fault(void)
return false;
if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
return false;
+ if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
+ return false;
#endif
return true;
}
@@ -1712,19 +1759,17 @@ static int null_add_dev(struct nullb_device *dev)
goto out_cleanup_queues;
nullb->tag_set->timeout = 5 * HZ;
- nullb->q = blk_mq_init_queue(nullb->tag_set);
+ nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
- null_init_queues(nullb);
} else if (dev->queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
+ nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
}
- blk_queue_make_request(nullb->q, null_queue_bio);
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1741,14 +1786,9 @@ static int null_add_dev(struct nullb_device *dev)
}
if (dev->zoned) {
- rv = null_zone_init(dev);
+ rv = null_init_zoned_dev(dev, nullb->q);
if (rv)
goto out_cleanup_blk_queue;
-
- nullb->q->limits.zoned = BLK_ZONED_HM;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
- blk_queue_required_elevator_features(nullb->q,
- ELEVATOR_F_ZBD_SEQ_WRITE);
}
nullb->q->queuedata = nullb;
@@ -1777,8 +1817,7 @@ static int null_add_dev(struct nullb_device *dev)
return 0;
out_cleanup_zone:
- if (dev->zoned)
- null_zone_exit(dev);
+ null_free_zoned_dev(dev);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
@@ -1788,6 +1827,7 @@ out_cleanup_queues:
cleanup_queues(nullb);
out_free_nullb:
kfree(nullb);
+ dev->nullb = NULL;
out:
return rv;
}
diff --git a/drivers/block/null_blk_trace.c b/drivers/block/null_blk_trace.c
new file mode 100644
index 000000000000..f246e7bff698
--- /dev/null
+++ b/drivers/block/null_blk_trace.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * null_blk trace related helpers.
+ *
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#include "null_blk_trace.h"
+
+/*
+ * Helper to use for all null_blk traces to extract disk name.
+ */
+const char *nullb_trace_disk_name(struct trace_seq *p, char *name)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (name && *name)
+ trace_seq_printf(p, "disk=%s, ", name);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
diff --git a/drivers/block/null_blk_trace.h b/drivers/block/null_blk_trace.h
new file mode 100644
index 000000000000..4f83032eb544
--- /dev/null
+++ b/drivers/block/null_blk_trace.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * null_blk device driver tracepoints.
+ *
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nullb
+
+#if !defined(_TRACE_NULLB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NULLB_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "null_blk.h"
+
+const char *nullb_trace_disk_name(struct trace_seq *p, char *name);
+
+#define __print_disk_name(name) nullb_trace_disk_name(p, name)
+
+#ifndef TRACE_HEADER_MULTI_READ
+static inline void __assign_disk_name(char *name, struct gendisk *disk)
+{
+ if (disk)
+ memcpy(name, disk->disk_name, DISK_NAME_LEN);
+ else
+ memset(name, 0, DISK_NAME_LEN);
+}
+#endif
+
+TRACE_EVENT(nullb_zone_op,
+ TP_PROTO(struct nullb_cmd *cmd, unsigned int zone_no,
+ unsigned int zone_cond),
+ TP_ARGS(cmd, zone_no, zone_cond),
+ TP_STRUCT__entry(
+ __array(char, disk, DISK_NAME_LEN)
+ __field(enum req_opf, op)
+ __field(unsigned int, zone_no)
+ __field(unsigned int, zone_cond)
+ ),
+ TP_fast_assign(
+ __entry->op = req_op(cmd->rq);
+ __entry->zone_no = zone_no;
+ __entry->zone_cond = zone_cond;
+ __assign_disk_name(__entry->disk, cmd->rq->rq_disk);
+ ),
+ TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
+ __print_disk_name(__entry->disk),
+ blk_op_str(__entry->op),
+ __entry->zone_no,
+ blk_zone_cond_str(__entry->zone_cond))
+);
+
+TRACE_EVENT(nullb_report_zones,
+ TP_PROTO(struct nullb *nullb, unsigned int nr_zones),
+ TP_ARGS(nullb, nr_zones),
+ TP_STRUCT__entry(
+ __array(char, disk, DISK_NAME_LEN)
+ __field(unsigned int, nr_zones)
+ ),
+ TP_fast_assign(
+ __entry->nr_zones = nr_zones;
+ __assign_disk_name(__entry->disk, nullb->disk);
+ ),
+ TP_printk("%s nr_zones=%u",
+ __print_disk_name(__entry->disk), __entry->nr_zones)
+);
+
+#endif /* _TRACE_NULLB_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE null_blk_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index ed34785dd64b..ed5458f2d367 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -2,6 +2,9 @@
#include <linux/vmalloc.h>
#include "null_blk.h"
+#define CREATE_TRACE_POINTS
+#include "null_blk_trace.h"
+
/* zone_size in MBs to sectors. */
#define ZONE_SIZE_SHIFT 11
@@ -10,7 +13,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
return sect >> ilog2(dev->zone_size_sects);
}
-int null_zone_init(struct nullb_device *dev)
+int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
{
sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
sector_t sector = 0;
@@ -20,6 +23,10 @@ int null_zone_init(struct nullb_device *dev)
pr_err("zone_size must be power-of-two\n");
return -EINVAL;
}
+ if (dev->zone_size > dev->size) {
+ pr_err("Zone size larger than device capacity\n");
+ return -EINVAL;
+ }
dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
dev->nr_zones = dev_size >>
@@ -58,10 +65,27 @@ int null_zone_init(struct nullb_device *dev)
sector += dev->zone_size_sects;
}
+ q->limits.zoned = BLK_ZONED_HM;
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
+
+ return 0;
+}
+
+int null_register_zoned_dev(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+
+ if (queue_is_mq(q))
+ return blk_revalidate_disk_zones(nullb->disk);
+
+ blk_queue_chunk_sectors(q, nullb->dev->zone_size_sects);
+ q->nr_zones = blkdev_nr_zones(nullb->disk);
+
return 0;
}
-void null_zone_exit(struct nullb_device *dev)
+void null_free_zoned_dev(struct nullb_device *dev)
{
kvfree(dev->zones);
}
@@ -80,6 +104,8 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
return 0;
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
+ trace_nullb_report_zones(nullb, nr_zones);
+
for (i = 0; i < nr_zones; i++) {
/*
* Stacked DM target drivers will remap the zone information by
@@ -121,11 +147,16 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
+ blk_status_t ret;
+
+ trace_nullb_zone_op(cmd, zno, zone->cond);
+
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */
- cmd->error = BLK_STS_IOERR;
return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
@@ -138,24 +169,26 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ if (ret != BLK_STS_OK)
+ return ret;
+
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL;
- break;
- case BLK_ZONE_COND_NOT_WP:
- break;
+ return BLK_STS_OK;
default:
/* Invalid zone condition */
return BLK_STS_IOERR;
}
- return BLK_STS_OK;
}
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
+ unsigned int zone_no = null_zone_no(dev, sector);
+ struct blk_zone *zone = &dev->zones[zone_no];
size_t i;
switch (op) {
@@ -203,11 +236,13 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
default:
return BLK_STS_NOTSUPP;
}
+
+ trace_nullb_zone_op(cmd, zone_no, zone->cond);
return BLK_STS_OK;
}
-blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
- sector_t sector, sector_t nr_sectors)
+blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
+ sector_t sector, sector_t nr_sectors)
{
switch (op) {
case REQ_OP_WRITE:
@@ -219,6 +254,6 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
case REQ_OP_ZONE_FINISH:
return null_zone_mgmt(cmd, op, sector);
default:
- return BLK_STS_OK;
+ return null_process_cmd(cmd, op, sector, nr_sectors);
}
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 5f970a7d32c0..0b944ac96d6b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2493,7 +2493,6 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
{
struct request_queue *q = pd->disk->queue;
- blk_queue_make_request(q, pkt_make_request);
blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
q->queuedata = pd;
@@ -2679,6 +2678,11 @@ static unsigned int pkt_check_events(struct gendisk *disk,
return attached_disk->fops->check_events(attached_disk, clearing);
}
+static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
+}
+
static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.open = pkt_open,
@@ -2686,13 +2690,9 @@ static const struct block_device_operations pktcdvd_ops = {
.ioctl = pkt_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = pkt_check_events,
+ .devnode = pkt_devnode,
};
-static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
-}
-
/*
* Set up mapping from pktcdvd device to CD-ROM device.
*/
@@ -2748,9 +2748,8 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
disk->fops = &pktcdvd_ops;
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
- disk->devnode = pktcdvd_devnode;
disk->private_data = pd;
- disk->queue = blk_alloc_queue(GFP_KERNEL);
+ disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE);
if (!disk->queue)
goto out_mem2;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 4628e1a27a2b..821d4d8b1d76 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -737,7 +737,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
ps3vram_proc_init(dev);
- queue = blk_alloc_queue(GFP_KERNEL);
+ queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE);
if (!queue) {
dev_err(&dev->core, "blk_alloc_queue failed\n");
error = -ENOMEM;
@@ -746,7 +746,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
priv->queue = queue;
queue->queuedata = dev;
- blk_queue_make_request(queue, ps3vram_make_request);
blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6343402c09e6..67d65ac785e9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -337,10 +337,7 @@ struct rbd_img_request {
u64 snap_id; /* for reads */
struct ceph_snap_context *snapc; /* for writes */
};
- union {
- struct request *rq; /* block request */
- struct rbd_obj_request *obj_request; /* obj req initiator */
- };
+ struct rbd_obj_request *obj_request; /* obj req initiator */
struct list_head lock_item;
struct list_head object_extents; /* obj_req.ex structs */
@@ -349,7 +346,6 @@ struct rbd_img_request {
struct pending_result pending;
struct work_struct work;
int work_result;
- struct kref kref;
};
#define for_each_obj_request(ireq, oreq) \
@@ -1320,15 +1316,6 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
kref_put(&obj_request->kref, rbd_obj_request_destroy);
}
-static void rbd_img_request_destroy(struct kref *kref);
-static void rbd_img_request_put(struct rbd_img_request *img_request)
-{
- rbd_assert(img_request != NULL);
- dout("%s: img %p (was %d)\n", __func__, img_request,
- kref_read(&img_request->kref));
- kref_put(&img_request->kref, rbd_img_request_destroy);
-}
-
static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request)
{
@@ -1366,18 +1353,10 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req)
static void img_request_layered_set(struct rbd_img_request *img_request)
{
set_bit(IMG_REQ_LAYERED, &img_request->flags);
- smp_mb();
-}
-
-static void img_request_layered_clear(struct rbd_img_request *img_request)
-{
- clear_bit(IMG_REQ_LAYERED, &img_request->flags);
- smp_mb();
}
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
- smp_mb();
return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}
@@ -1619,10 +1598,8 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
if (!rbd_dev->parent_spec)
return false;
- down_read(&rbd_dev->header_rwsem);
if (rbd_dev->parent_overlap)
counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
- up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
@@ -1630,63 +1607,54 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
return counter > 0;
}
-/*
- * Caller is responsible for filling in the list of object requests
- * that comprises the image request, and the Linux request pointer
- * (if there is one).
- */
-static struct rbd_img_request *rbd_img_request_create(
- struct rbd_device *rbd_dev,
- enum obj_operation_type op_type,
- struct ceph_snap_context *snapc)
+static void rbd_img_request_init(struct rbd_img_request *img_request,
+ struct rbd_device *rbd_dev,
+ enum obj_operation_type op_type)
{
- struct rbd_img_request *img_request;
-
- img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
- if (!img_request)
- return NULL;
+ memset(img_request, 0, sizeof(*img_request));
img_request->rbd_dev = rbd_dev;
img_request->op_type = op_type;
- if (!rbd_img_is_write(img_request))
- img_request->snap_id = rbd_dev->spec->snap_id;
- else
- img_request->snapc = snapc;
-
- if (rbd_dev_parent_get(rbd_dev))
- img_request_layered_set(img_request);
INIT_LIST_HEAD(&img_request->lock_item);
INIT_LIST_HEAD(&img_request->object_extents);
mutex_init(&img_request->state_mutex);
- kref_init(&img_request->kref);
+}
+
+static void rbd_img_capture_header(struct rbd_img_request *img_req)
+{
+ struct rbd_device *rbd_dev = img_req->rbd_dev;
- return img_request;
+ lockdep_assert_held(&rbd_dev->header_rwsem);
+
+ if (rbd_img_is_write(img_req))
+ img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ else
+ img_req->snap_id = rbd_dev->spec->snap_id;
+
+ if (rbd_dev_parent_get(rbd_dev))
+ img_request_layered_set(img_req);
}
-static void rbd_img_request_destroy(struct kref *kref)
+static void rbd_img_request_destroy(struct rbd_img_request *img_request)
{
- struct rbd_img_request *img_request;
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
- img_request = container_of(kref, struct rbd_img_request, kref);
-
dout("%s: img %p\n", __func__, img_request);
WARN_ON(!list_empty(&img_request->lock_item));
for_each_obj_request_safe(img_request, obj_request, next_obj_request)
rbd_img_obj_request_del(img_request, obj_request);
- if (img_request_layered_test(img_request)) {
- img_request_layered_clear(img_request);
+ if (img_request_layered_test(img_request))
rbd_dev_parent_put(img_request->rbd_dev);
- }
if (rbd_img_is_write(img_request))
ceph_put_snap_context(img_request->snapc);
- kmem_cache_free(rbd_img_request_cache, img_request);
+ if (test_bit(IMG_REQ_CHILD, &img_request->flags))
+ kmem_cache_free(rbd_img_request_cache, img_request);
}
#define BITS_PER_OBJ 2
@@ -2849,17 +2817,22 @@ static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
{
struct rbd_img_request *img_req = obj_req->img_request;
+ struct rbd_device *parent = img_req->rbd_dev->parent;
struct rbd_img_request *child_img_req;
int ret;
- child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
- OBJ_OP_READ, NULL);
+ child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
if (!child_img_req)
return -ENOMEM;
+ rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
__set_bit(IMG_REQ_CHILD, &child_img_req->flags);
child_img_req->obj_request = obj_req;
+ down_read(&parent->header_rwsem);
+ rbd_img_capture_header(child_img_req);
+ up_read(&parent->header_rwsem);
+
dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
obj_req);
@@ -2888,7 +2861,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
obj_req->copyup_bvecs);
}
if (ret) {
- rbd_img_request_put(child_img_req);
+ rbd_img_request_destroy(child_img_req);
return ret;
}
@@ -3647,15 +3620,15 @@ again:
if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
struct rbd_obj_request *obj_req = img_req->obj_request;
- rbd_img_request_put(img_req);
+ rbd_img_request_destroy(img_req);
if (__rbd_obj_handle_request(obj_req, &result)) {
img_req = obj_req->img_request;
goto again;
}
} else {
- struct request *rq = img_req->rq;
+ struct request *rq = blk_mq_rq_from_pdu(img_req);
- rbd_img_request_put(img_req);
+ rbd_img_request_destroy(img_req);
blk_mq_end_request(rq, errno_to_blk_status(result));
}
}
@@ -3781,11 +3754,7 @@ static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
enum rbd_notify_op notify_op)
{
- struct page **reply_pages;
- size_t reply_len;
-
- __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
- ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
+ __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
}
static void rbd_notify_acquired_lock(struct work_struct *work)
@@ -4554,6 +4523,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
cancel_work_sync(&rbd_dev->unlock_work);
}
+/*
+ * header_rwsem must not be held to avoid a deadlock with
+ * rbd_dev_refresh() when flushing notifies.
+ */
static void rbd_unregister_watch(struct rbd_device *rbd_dev)
{
cancel_tasks_sync(rbd_dev);
@@ -4707,84 +4680,36 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
static void rbd_queue_workfn(struct work_struct *work)
{
- struct request *rq = blk_mq_rq_from_pdu(work);
- struct rbd_device *rbd_dev = rq->q->queuedata;
- struct rbd_img_request *img_request;
- struct ceph_snap_context *snapc = NULL;
+ struct rbd_img_request *img_request =
+ container_of(work, struct rbd_img_request, work);
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+ enum obj_operation_type op_type = img_request->op_type;
+ struct request *rq = blk_mq_rq_from_pdu(img_request);
u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
u64 length = blk_rq_bytes(rq);
- enum obj_operation_type op_type;
u64 mapping_size;
int result;
- switch (req_op(rq)) {
- case REQ_OP_DISCARD:
- op_type = OBJ_OP_DISCARD;
- break;
- case REQ_OP_WRITE_ZEROES:
- op_type = OBJ_OP_ZEROOUT;
- break;
- case REQ_OP_WRITE:
- op_type = OBJ_OP_WRITE;
- break;
- case REQ_OP_READ:
- op_type = OBJ_OP_READ;
- break;
- default:
- dout("%s: non-fs request type %d\n", __func__, req_op(rq));
- result = -EIO;
- goto err;
- }
-
/* Ignore/skip any zero-length requests */
-
if (!length) {
dout("%s: zero-length request\n", __func__);
result = 0;
- goto err_rq;
- }
-
- if (op_type != OBJ_OP_READ) {
- if (rbd_is_ro(rbd_dev)) {
- rbd_warn(rbd_dev, "%s on read-only mapping",
- obj_op_name(op_type));
- result = -EIO;
- goto err;
- }
- rbd_assert(!rbd_is_snap(rbd_dev));
- }
-
- if (offset && length > U64_MAX - offset + 1) {
- rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
- length);
- result = -EINVAL;
- goto err_rq; /* Shouldn't happen */
+ goto err_img_request;
}
blk_mq_start_request(rq);
down_read(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
- if (op_type != OBJ_OP_READ) {
- snapc = rbd_dev->header.snapc;
- ceph_get_snap_context(snapc);
- }
+ rbd_img_capture_header(img_request);
up_read(&rbd_dev->header_rwsem);
if (offset + length > mapping_size) {
rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
length, mapping_size);
result = -EIO;
- goto err_rq;
- }
-
- img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
- if (!img_request) {
- result = -ENOMEM;
- goto err_rq;
+ goto err_img_request;
}
- img_request->rq = rq;
- snapc = NULL; /* img_request consumes a ref */
dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
img_request, obj_op_name(op_type), offset, length);
@@ -4801,23 +4726,51 @@ static void rbd_queue_workfn(struct work_struct *work)
return;
err_img_request:
- rbd_img_request_put(img_request);
-err_rq:
+ rbd_img_request_destroy(img_request);
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
obj_op_name(op_type), length, offset, result);
- ceph_put_snap_context(snapc);
-err:
blk_mq_end_request(rq, errno_to_blk_status(result));
}
static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct request *rq = bd->rq;
- struct work_struct *work = blk_mq_rq_to_pdu(rq);
+ struct rbd_device *rbd_dev = hctx->queue->queuedata;
+ struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
+ enum obj_operation_type op_type;
+
+ switch (req_op(bd->rq)) {
+ case REQ_OP_DISCARD:
+ op_type = OBJ_OP_DISCARD;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ op_type = OBJ_OP_ZEROOUT;
+ break;
+ case REQ_OP_WRITE:
+ op_type = OBJ_OP_WRITE;
+ break;
+ case REQ_OP_READ:
+ op_type = OBJ_OP_READ;
+ break;
+ default:
+ rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
+ return BLK_STS_IOERR;
+ }
+
+ rbd_img_request_init(img_req, rbd_dev, op_type);
- queue_work(rbd_wq, work);
+ if (rbd_img_is_write(img_req)) {
+ if (rbd_is_ro(rbd_dev)) {
+ rbd_warn(rbd_dev, "%s on read-only mapping",
+ obj_op_name(img_req->op_type));
+ return BLK_STS_IOERR;
+ }
+ rbd_assert(!rbd_is_snap(rbd_dev));
+ }
+
+ INIT_WORK(&img_req->work, rbd_queue_workfn);
+ queue_work(rbd_wq, &img_req->work);
return BLK_STS_OK;
}
@@ -4984,18 +4937,8 @@ out:
return ret;
}
-static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct work_struct *work = blk_mq_rq_to_pdu(rq);
-
- INIT_WORK(work, rbd_queue_workfn);
- return 0;
-}
-
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
- .init_request = rbd_init_request,
};
static int rbd_init_disk(struct rbd_device *rbd_dev)
@@ -5027,8 +4970,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- rbd_dev->tag_set.nr_hw_queues = 1;
- rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
+ rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
+ rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
if (err)
@@ -6951,9 +6894,10 @@ static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
- rbd_dev_unprobe(rbd_dev);
- if (rbd_dev->opts)
+ if (!rbd_is_ro(rbd_dev))
rbd_unregister_watch(rbd_dev);
+
+ rbd_dev_unprobe(rbd_dev);
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@@ -6964,6 +6908,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
* device. If this image is the one being mapped (i.e., not a
* parent), initiate a watch on its header object before using that
* object to get detailed information about the rbd image.
+ *
+ * On success, returns with header_rwsem held for write if called
+ * with @depth == 0.
*/
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
{
@@ -6993,11 +6940,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
}
+ if (!depth)
+ down_write(&rbd_dev->header_rwsem);
+
ret = rbd_dev_header_info(rbd_dev);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
- goto err_out_watch;
+ goto err_out_probe;
}
/*
@@ -7042,10 +6992,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
return 0;
err_out_probe:
- rbd_dev_unprobe(rbd_dev);
-err_out_watch:
+ if (!depth)
+ up_write(&rbd_dev->header_rwsem);
if (need_watch)
rbd_unregister_watch(rbd_dev);
+ rbd_dev_unprobe(rbd_dev);
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
@@ -7107,12 +7058,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
goto err_out_rbd_dev;
}
- down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
- if (rc < 0) {
- up_write(&rbd_dev->header_rwsem);
+ if (rc < 0)
goto err_out_rbd_dev;
- }
if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
rbd_warn(rbd_dev, "alloc_size adjusted to %u",
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index c47d28b2ce44..8ffa8260dcaf 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -248,7 +248,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
return -ENOMEM;
}
- card->queue = blk_alloc_queue(GFP_KERNEL);
+ card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE);
if (!card->queue) {
dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
unregister_blkdev(card->major, DRIVER_NAME);
@@ -269,7 +269,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
blk_queue_logical_block_size(card->queue, blk_size);
}
- blk_queue_make_request(card->queue, rsxx_make_request);
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 111eb659e66d..1914f5488b22 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -80,7 +80,7 @@ struct dma_tracker {
struct dma_tracker_list {
spinlock_t lock;
int head;
- struct dma_tracker list[0];
+ struct dma_tracker list[];
};
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 4eaf97d7a170..d84e8a878df2 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -885,11 +885,9 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
card->biotail = &card->bio;
spin_lock_init(&card->lock);
- card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+ card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE);
if (!card->queue)
goto failed_alloc;
-
- blk_queue_make_request(card->queue, mm_make_request);
card->queue->queuedata = card;
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 0736248999b0..9d21bf0f155e 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -15,6 +15,7 @@
#include <linux/blk-mq.h>
#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
+#include <uapi/linux/virtio_ring.h>
#define PART_BITS 4
#define VQ_NAME_LEN 16
@@ -32,6 +33,15 @@ struct virtio_blk_vq {
} ____cacheline_aligned_in_smp;
struct virtio_blk {
+ /*
+ * This mutex must be held by anything that may run after
+ * virtblk_remove() sets vblk->vdev to NULL.
+ *
+ * blk-mq, virtqueue processing, and sysfs attribute code paths are
+ * shut down before vblk->vdev is set to NULL and therefore do not need
+ * to hold this mutex.
+ */
+ struct mutex vdev_mutex;
struct virtio_device *vdev;
/* The disk structure for the kernel. */
@@ -43,6 +53,13 @@ struct virtio_blk {
/* Process context for config space updates */
struct work_struct config_work;
+ /*
+ * Tracks references from block_device_operations open/release and
+ * virtio_driver probe/remove so this object can be freed once no
+ * longer in use.
+ */
+ refcount_t refs;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -294,10 +311,55 @@ out:
return err;
}
+static void virtblk_get(struct virtio_blk *vblk)
+{
+ refcount_inc(&vblk->refs);
+}
+
+static void virtblk_put(struct virtio_blk *vblk)
+{
+ if (refcount_dec_and_test(&vblk->refs)) {
+ ida_simple_remove(&vd_index_ida, vblk->index);
+ mutex_destroy(&vblk->vdev_mutex);
+ kfree(vblk);
+ }
+}
+
+static int virtblk_open(struct block_device *bd, fmode_t mode)
+{
+ struct virtio_blk *vblk = bd->bd_disk->private_data;
+ int ret = 0;
+
+ mutex_lock(&vblk->vdev_mutex);
+
+ if (vblk->vdev)
+ virtblk_get(vblk);
+ else
+ ret = -ENXIO;
+
+ mutex_unlock(&vblk->vdev_mutex);
+ return ret;
+}
+
+static void virtblk_release(struct gendisk *disk, fmode_t mode)
+{
+ struct virtio_blk *vblk = disk->private_data;
+
+ virtblk_put(vblk);
+}
+
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
+ int ret = 0;
+
+ mutex_lock(&vblk->vdev_mutex);
+
+ if (!vblk->vdev) {
+ ret = -ENXIO;
+ goto out;
+ }
/* see if the host passed in geometry config */
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
@@ -313,11 +375,15 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
geo->sectors = 1 << 5;
geo->cylinders = get_capacity(bd->bd_disk) >> 11;
}
- return 0;
+out:
+ mutex_unlock(&vblk->vdev_mutex);
+ return ret;
}
static const struct block_device_operations virtblk_fops = {
.owner = THIS_MODULE,
+ .open = virtblk_open,
+ .release = virtblk_release,
.getgeo = virtblk_getgeo,
};
@@ -388,18 +454,15 @@ static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
cap_str_10,
cap_str_2);
- set_capacity(vblk->disk, capacity);
+ set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
}
static void virtblk_config_changed_work(struct work_struct *work)
{
struct virtio_blk *vblk =
container_of(work, struct virtio_blk, config_work);
- char *envp[] = { "RESIZE=1", NULL };
virtblk_update_capacity(vblk, true);
- revalidate_disk(vblk->disk);
- kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
}
static void virtblk_config_changed(struct virtio_device *vdev)
@@ -657,6 +720,10 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_index;
}
+ /* This reference is dropped in virtblk_remove(). */
+ refcount_set(&vblk->refs, 1);
+ mutex_init(&vblk->vdev_mutex);
+
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
@@ -822,8 +889,6 @@ out:
static void virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
- int index = vblk->index;
- int refc;
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
@@ -833,18 +898,21 @@ static void virtblk_remove(struct virtio_device *vdev)
blk_mq_free_tag_set(&vblk->tag_set);
+ mutex_lock(&vblk->vdev_mutex);
+
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
+ /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
+ vblk->vdev = NULL;
+
put_disk(vblk->disk);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
- kfree(vblk);
- /* Only free device id if we don't have any users */
- if (refc == 1)
- ida_simple_remove(&vd_index_ida, index);
+ mutex_unlock(&vblk->vdev_mutex);
+
+ virtblk_put(vblk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9df516a56bb2..3b889ea950c2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -47,6 +47,7 @@
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/workqueue.h>
+#include <linux/sched/mm.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -2189,10 +2190,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
{
- unsigned int psegs, grants;
+ unsigned int psegs, grants, memflags;
int err, i;
struct blkfront_info *info = rinfo->dev_info;
+ memflags = memalloc_noio_save();
+
if (info->max_indirect_segments == 0) {
if (!HAS_EXTRA_REQ)
grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@@ -2224,7 +2227,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
- struct page *indirect_page = alloc_page(GFP_NOIO);
+ struct page *indirect_page = alloc_page(GFP_KERNEL);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@@ -2235,15 +2238,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].grants_used =
kvcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]),
- GFP_NOIO);
+ GFP_KERNEL);
rinfo->shadow[i].sg = kvcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]),
- GFP_NOIO);
+ GFP_KERNEL);
if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants =
kvcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]),
- GFP_NOIO);
+ GFP_KERNEL);
if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments &&
@@ -2252,6 +2255,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
sg_init_table(rinfo->shadow[i].sg, psegs);
}
+ memalloc_noio_restore(memflags);
return 0;
@@ -2271,6 +2275,9 @@ out_of_memory:
__free_page(indirect_page);
}
}
+
+ memalloc_noio_restore(memflags);
+
return -ENOMEM;
}
@@ -2338,7 +2345,6 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long sector_size;
unsigned int physical_sector_size;
unsigned int binfo;
- char *envp[] = { "RESIZE=1", NULL };
int err, i;
struct blkfront_ring_info *rinfo;
@@ -2354,10 +2360,7 @@ static void blkfront_connect(struct blkfront_info *info)
return;
printk(KERN_INFO "Setting capacity to %Lu\n",
sectors);
- set_capacity(info->gd, sectors);
- revalidate_disk(info->gd);
- kobject_uevent_env(&disk_to_dev(info->gd)->kobj,
- KOBJ_CHANGE, envp);
+ set_capacity_revalidate_and_notify(info->gd, sectors, true);
return;
case BLKIF_STATE_SUSPENDED:
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 1bdb5793842b..ebb234f36909 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -33,6 +33,7 @@
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
+#include <linux/part_stat.h>
#include "zram_drv.h"
@@ -1894,7 +1895,7 @@ static int zram_add(void)
#ifdef CONFIG_ZRAM_WRITEBACK
spin_lock_init(&zram->wb_limit_lock);
#endif
- queue = blk_alloc_queue(GFP_KERNEL);
+ queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
@@ -1902,8 +1903,6 @@ static int zram_add(void)
goto out_free_idr;
}
- blk_queue_make_request(queue, zram_make_request);
-
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index f7aa2dc1ff85..4e73a531b377 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -211,12 +211,12 @@ config BT_HCIUART_RTL
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on GPIOLIB
- depends on ACPI
+ depends on (ACPI || SERIAL_DEV_CTRL_TTYPORT)
select BT_HCIUART_3WIRE
select BT_RTL
help
The Realtek protocol support enables Bluetooth HCI over 3-Wire
- serial port internface for Realtek Bluetooth controllers.
+ serial port interface for Realtek Bluetooth controllers.
Say Y here to compile support for Realtek protocol.
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 0e5954cac98e..5a321b4076aa 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -133,8 +133,8 @@ static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- BT_ERR("%s bulk tx submit failed urb %p err %d",
- data->hdev->name, urb, err);
+ bt_dev_err(data->hdev, "bulk tx submit failed urb %p err %d",
+ urb, err);
skb_unlink(skb, &data->pending_q);
usb_free_urb(urb);
} else
@@ -232,8 +232,8 @@ static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- BT_ERR("%s bulk rx submit failed urb %p err %d",
- data->hdev->name, urb, err);
+ bt_dev_err(data->hdev, "bulk rx submit failed urb %p err %d",
+ urb, err);
skb_unlink(skb, &data->pending_q);
kfree_skb(skb);
usb_free_urb(urb);
@@ -247,7 +247,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len);
if (hdr & 0x10) {
- BT_ERR("%s error in block", data->hdev->name);
+ bt_dev_err(data->hdev, "error in block");
kfree_skb(data->reassembly);
data->reassembly = NULL;
return -EIO;
@@ -259,13 +259,13 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
int pkt_len = 0;
if (data->reassembly) {
- BT_ERR("%s unexpected start block", data->hdev->name);
+ bt_dev_err(data->hdev, "unexpected start block");
kfree_skb(data->reassembly);
data->reassembly = NULL;
}
if (len < 1) {
- BT_ERR("%s no packet type found", data->hdev->name);
+ bt_dev_err(data->hdev, "no packet type found");
return -EPROTO;
}
@@ -277,7 +277,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf;
pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen;
} else {
- BT_ERR("%s event block is too short", data->hdev->name);
+ bt_dev_err(data->hdev, "event block is too short");
return -EILSEQ;
}
break;
@@ -287,7 +287,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf;
pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen);
} else {
- BT_ERR("%s data block is too short", data->hdev->name);
+ bt_dev_err(data->hdev, "data block is too short");
return -EILSEQ;
}
break;
@@ -297,7 +297,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf;
pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen;
} else {
- BT_ERR("%s audio block is too short", data->hdev->name);
+ bt_dev_err(data->hdev, "audio block is too short");
return -EILSEQ;
}
break;
@@ -305,7 +305,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
skb = bt_skb_alloc(pkt_len, GFP_ATOMIC);
if (!skb) {
- BT_ERR("%s no memory for the packet", data->hdev->name);
+ bt_dev_err(data->hdev, "no memory for the packet");
return -ENOMEM;
}
@@ -314,7 +314,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
data->reassembly = skb;
} else {
if (!data->reassembly) {
- BT_ERR("%s unexpected continuation block", data->hdev->name);
+ bt_dev_err(data->hdev, "unexpected continuation block");
return -EIO;
}
}
@@ -366,8 +366,7 @@ static void bfusb_rx_complete(struct urb *urb)
}
if (count < len) {
- BT_ERR("%s block extends over URB buffer ranges",
- data->hdev->name);
+ bt_dev_err(data->hdev, "block extends over URB buffer ranges");
}
if ((hdr & 0xe1) == 0xc1)
@@ -391,8 +390,8 @@ resubmit:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- BT_ERR("%s bulk resubmit failed urb %p err %d",
- data->hdev->name, urb, err);
+ bt_dev_err(data->hdev, "bulk resubmit failed urb %p err %d",
+ urb, err);
}
unlock:
@@ -477,7 +476,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
/* Max HCI frame size seems to be 1511 + 1 */
nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
if (!nskb) {
- BT_ERR("Can't allocate memory for new packet");
+ bt_dev_err(hdev, "Can't allocate memory for new packet");
return -ENOMEM;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 62e781a18bf0..6a0e2c5a8beb 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -376,13 +376,13 @@ struct ibt_cp_reg_access {
__le32 addr;
__u8 mode;
__u8 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct ibt_rp_reg_access {
__u8 status;
__le32 addr;
- __u8 data[0];
+ __u8 data[];
} __packed;
static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index ec69e5dd7bd3..a16845c0751d 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -139,7 +139,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
static void qca_tlv_check_data(struct qca_fw_config *config,
- const struct firmware *fw)
+ const struct firmware *fw, enum qca_btsoc_type soc_type)
{
const u8 *data;
u32 type_len;
@@ -148,6 +148,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
struct tlv_type_hdr *tlv;
struct tlv_type_patch *tlv_patch;
struct tlv_type_nvm *tlv_nvm;
+ uint8_t nvm_baud_rate = config->user_baud_rate;
tlv = (struct tlv_type_hdr *)fw->data;
@@ -216,7 +217,10 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
tlv_nvm->data[0] |= 0x80;
/* UART Baud Rate */
- tlv_nvm->data[2] = config->user_baud_rate;
+ if (soc_type == QCA_WCN3991)
+ tlv_nvm->data[1] = nvm_baud_rate;
+ else
+ tlv_nvm->data[2] = nvm_baud_rate;
break;
@@ -354,7 +358,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
return ret;
}
- qca_tlv_check_data(config, fw);
+ qca_tlv_check_data(config, fw, soc_type);
segment = fw->data;
remain = fw->size;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index f5795b1a3779..e16a4d650597 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -79,7 +79,7 @@ struct qca_fw_config {
struct edl_event_hdr {
__u8 cresp;
__u8 rtype;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct qca_btsoc_version {
@@ -112,12 +112,12 @@ struct tlv_type_nvm {
__le16 tag_len;
__le32 reserve1;
__le32 reserve2;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct tlv_type_hdr {
__le32 type_len;
- __u8 data[0];
+ __u8 data[];
} __packed;
enum qca_btsoc_type {
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 577cfa3329db..67f4bc21e7c5 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -136,6 +136,18 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config" },
+ /* 8822C with UART interface */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8822B,
+ .hci_rev = 0x000c,
+ .hci_ver = 0x0a,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8822cs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8822cs_config" },
+
/* 8822C with USB interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc),
.config_needed = false,
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index 10ad40c3e42c..2a582682136d 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -38,13 +38,13 @@ struct rtl_epatch_header {
struct rtl_vendor_config_entry {
__le16 offset;
__u8 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct rtl_vendor_config {
__le32 signature;
__le16 total_len;
- struct rtl_vendor_config_entry entry[0];
+ struct rtl_vendor_config_entry entry[];
} __packed;
#if IS_ENABLED(CONFIG_BT_RTL)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f5924f3e8b8d..3bdec42c9612 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -57,6 +57,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
#define BTUSB_MEDIATEK 0x200000
+#define BTUSB_WIDEBAND_SPEECH 0x400000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -333,15 +334,21 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
/* Intel Bluetooth devices */
- { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
- { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
- { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -387,6 +394,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3548), .driver_info = BTUSB_REALTEK },
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -1930,7 +1938,14 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (err)
return err;
- bt_dev_info(hdev, "Intel firmware patch completed and activated");
+ /* Need build number for downloaded fw patches in
+ * every power-on boot
+ */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+ bt_dev_info(hdev, "Intel BT fw patch 0x%02x completed & activated",
+ ver.fw_patch_num);
goto complete;
@@ -3859,6 +3874,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BROKEN_ISOC)
data->isoc = NULL;
+ if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 8bafa650b5b0..1f55df93e4ce 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -27,7 +27,7 @@ struct ag6xx_data {
struct pbn_entry {
__le32 addr;
__le32 plen;
- __u8 data[0];
+ __u8 data[];
} __packed;
static int ag6xx_open(struct hci_uart *hu)
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 6dc1fbeb564b..4b3b14a34794 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -71,8 +71,6 @@ static int h4_close(struct hci_uart *hu)
{
struct h4_struct *h4 = hu->priv;
- hu->priv = NULL;
-
BT_DBG("hu %p", hu);
skb_queue_purge(&h4->txq);
@@ -85,7 +83,7 @@ static int h4_close(struct hci_uart *hu)
return 0;
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct h4_struct *h4 = hu->priv;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0b14547482a7..106c110efe56 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
@@ -177,7 +178,7 @@ static void h5_peer_reset(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
- BT_ERR("Peer device has reset");
+ bt_dev_err(hu->hdev, "Peer device has reset");
h5->state = H5_UNINITIALIZED;
@@ -437,21 +438,21 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
H5_HDR_LEN(hdr));
if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
- BT_ERR("Invalid header checksum");
+ bt_dev_err(hu->hdev, "Invalid header checksum");
h5_reset_rx(h5);
return 0;
}
if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
- BT_ERR("Out-of-order packet arrived (%u != %u)",
- H5_HDR_SEQ(hdr), h5->tx_ack);
+ bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
+ H5_HDR_SEQ(hdr), h5->tx_ack);
h5_reset_rx(h5);
return 0;
}
if (h5->state != H5_ACTIVE &&
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
- BT_ERR("Non-link packet received in non-active state");
+ bt_dev_err(hu->hdev, "Non-link packet received in non-active state");
h5_reset_rx(h5);
return 0;
}
@@ -474,7 +475,7 @@ static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
if (!h5->rx_skb) {
- BT_ERR("Can't allocate mem for new packet");
+ bt_dev_err(hu->hdev, "Can't allocate mem for new packet");
h5_reset_rx(h5);
return -ENOMEM;
}
@@ -550,7 +551,7 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
if (h5->rx_pending > 0) {
if (*ptr == SLIP_DELIMITER) {
- BT_ERR("Too short H5 packet");
+ bt_dev_err(hu->hdev, "Too short H5 packet");
h5_reset_rx(h5);
continue;
}
@@ -577,13 +578,13 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
struct h5 *h5 = hu->priv;
if (skb->len > 0xfff) {
- BT_ERR("Packet too long (%u bytes)", skb->len);
+ bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len);
kfree_skb(skb);
return 0;
}
if (h5->state != H5_ACTIVE) {
- BT_ERR("Ignoring HCI data in non-active state");
+ bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state");
kfree_skb(skb);
return 0;
}
@@ -600,7 +601,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
break;
default:
- BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
+ bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb));
kfree_skb(skb);
break;
}
@@ -656,7 +657,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
int i;
if (!valid_packet_type(pkt_type)) {
- BT_ERR("Unknown packet type %u", pkt_type);
+ bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type);
return NULL;
}
@@ -733,7 +734,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
}
skb_queue_head(&h5->unrel, skb);
- BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
}
spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
@@ -753,7 +754,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
}
skb_queue_head(&h5->rel, skb);
- BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
}
unlock:
@@ -785,7 +786,6 @@ static const struct hci_uart_proto h5p = {
static int h5_serdev_probe(struct serdev_device *serdev)
{
- const struct acpi_device_id *match;
struct device *dev = &serdev->dev;
struct h5 *h5;
@@ -800,6 +800,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
serdev_device_set_drvdata(serdev, h5);
if (has_acpi_companion(dev)) {
+ const struct acpi_device_id *match;
+
match = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!match)
return -ENODEV;
@@ -810,8 +812,17 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (h5->vnd->acpi_gpio_map)
devm_acpi_dev_add_driver_gpios(dev,
h5->vnd->acpi_gpio_map);
+ } else {
+ const void *data;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ h5->vnd = (const struct h5_vnd *)data;
}
+
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(h5->enable_gpio))
return PTR_ERR(h5->enable_gpio);
@@ -1003,6 +1014,15 @@ static const struct dev_pm_ops h5_serdev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
};
+static const struct of_device_id rtl_bluetooth_of_match[] = {
+#ifdef CONFIG_BT_HCIUART_RTL
+ { .compatible = "realtek,rtl8822cs-bt",
+ .data = (const void *)&rtl_vnd },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match);
+
static struct serdev_device_driver h5_serdev_driver = {
.probe = h5_serdev_probe,
.remove = h5_serdev_remove,
@@ -1010,6 +1030,7 @@ static struct serdev_device_driver h5_serdev_driver = {
.name = "hci_uart_h5",
.acpi_match_table = ACPI_PTR(h5_acpi_match),
.pm = &h5_serdev_pm_ops,
+ .of_match_table = rtl_bluetooth_of_match,
},
};
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 31f25153087d..f1299da6eed8 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -49,7 +49,7 @@
struct hci_lpm_pkt {
__u8 opcode;
__u8 dlen;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct intel_device {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index d6e0c99ee5eb..439392b1c043 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <linux/mutex.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -69,7 +70,8 @@ enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING,
- QCA_MEMDUMP_COLLECTION
+ QCA_MEMDUMP_COLLECTION,
+ QCA_HW_ERROR_EVENT
};
@@ -138,18 +140,19 @@ struct qca_data {
u32 tx_idle_delay;
struct timer_list wake_retrans_timer;
u32 wake_retrans;
- struct timer_list memdump_timer;
struct workqueue_struct *workqueue;
struct work_struct ws_awake_rx;
struct work_struct ws_awake_device;
struct work_struct ws_rx_vote_off;
struct work_struct ws_tx_vote_off;
struct work_struct ctrl_memdump_evt;
+ struct delayed_work ctrl_memdump_timeout;
struct qca_memdump_data *qca_memdump;
unsigned long flags;
struct completion drop_ev_comp;
wait_queue_head_t suspend_wait_q;
enum qca_memdump_states memdump_state;
+ struct mutex hci_memdump_lock;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -522,23 +525,28 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
hci_uart_tx_wakeup(hu);
}
-static void hci_memdump_timeout(struct timer_list *t)
+
+static void qca_controller_memdump_timeout(struct work_struct *work)
{
- struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ctrl_memdump_timeout.work);
struct hci_uart *hu = qca->hu;
- struct qca_memdump_data *qca_memdump = qca->qca_memdump;
- char *memdump_buf = qca_memdump->memdump_buf_tail;
-
- bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
- /* Inject hw error event to reset the device and driver. */
- hci_reset_dev(hu->hdev);
- vfree(memdump_buf);
- kfree(qca_memdump);
- qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
- del_timer(&qca->memdump_timer);
- cancel_work_sync(&qca->ctrl_memdump_evt);
+
+ mutex_lock(&qca->hci_memdump_lock);
+ if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
+ /* Inject hw error event to reset the device
+ * and driver.
+ */
+ hci_reset_dev(hu->hdev);
+ }
+ }
+
+ mutex_unlock(&qca->hci_memdump_lock);
}
+
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
@@ -558,6 +566,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->tx_wait_q);
skb_queue_head_init(&qca->rx_memdump_q);
spin_lock_init(&qca->hci_ibs_lock);
+ mutex_init(&qca->hci_memdump_lock);
qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
BT_ERR("QCA Workqueue not initialized properly");
@@ -570,6 +579,8 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
+ INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
+ qca_controller_memdump_timeout);
init_waitqueue_head(&qca->suspend_wait_q);
qca->hu = hu;
@@ -596,7 +607,6 @@ static int qca_open(struct hci_uart *hu)
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
- timer_setup(&qca->memdump_timer, hci_memdump_timeout, 0);
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -677,7 +687,6 @@ static int qca_close(struct hci_uart *hu)
skb_queue_purge(&qca->rx_memdump_q);
del_timer(&qca->tx_idle_timer);
del_timer(&qca->wake_retrans_timer);
- del_timer(&qca->memdump_timer);
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
@@ -963,11 +972,20 @@ static void qca_controller_memdump(struct work_struct *work)
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
+ mutex_lock(&qca->hci_memdump_lock);
+ /* Skip processing the received packets if timeout detected. */
+ if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) {
+ mutex_unlock(&qca->hci_memdump_lock);
+ return;
+ }
+
if (!qca_memdump) {
qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
GFP_ATOMIC);
- if (!qca_memdump)
+ if (!qca_memdump) {
+ mutex_unlock(&qca->hci_memdump_lock);
return;
+ }
qca->qca_memdump = qca_memdump;
}
@@ -992,13 +1010,15 @@ static void qca_controller_memdump(struct work_struct *work)
if (!(dump_size)) {
bt_dev_err(hu->hdev, "Rx invalid memdump size");
kfree_skb(skb);
+ mutex_unlock(&qca->hci_memdump_lock);
return;
}
bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
dump_size);
- mod_timer(&qca->memdump_timer, (jiffies +
- msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)));
+ queue_delayed_work(qca->workqueue,
+ &qca->ctrl_memdump_timeout,
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
skb_pull(skb, sizeof(dump_size));
memdump_buf = vmalloc(dump_size);
@@ -1016,6 +1036,7 @@ static void qca_controller_memdump(struct work_struct *work)
kfree(qca_memdump);
kfree_skb(skb);
qca->qca_memdump = NULL;
+ mutex_unlock(&qca->hci_memdump_lock);
return;
}
@@ -1046,16 +1067,20 @@ static void qca_controller_memdump(struct work_struct *work)
memdump_buf = qca_memdump->memdump_buf_head;
dev_coredumpv(&hu->serdev->dev, memdump_buf,
qca_memdump->received_dump, GFP_KERNEL);
- del_timer(&qca->memdump_timer);
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
kfree(qca->qca_memdump);
qca->qca_memdump = NULL;
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
}
+
+ mutex_unlock(&qca->hci_memdump_lock);
}
}
-int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb)
+static int qca_controller_memdump_event(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
@@ -1406,30 +1431,21 @@ static void qca_wait_for_dump_collection(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- struct qca_memdump_data *qca_memdump = qca->qca_memdump;
- char *memdump_buf = NULL;
wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
- if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
- bt_dev_err(hu->hdev, "Clearing the buffers due to timeout");
- if (qca_memdump)
- memdump_buf = qca_memdump->memdump_buf_tail;
- vfree(memdump_buf);
- kfree(qca_memdump);
- qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
- del_timer(&qca->memdump_timer);
- cancel_work_sync(&qca->ctrl_memdump_evt);
- }
}
static void qca_hw_error(struct hci_dev *hdev, u8 code)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = NULL;
+ set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
@@ -1449,6 +1465,23 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
bt_dev_info(hdev, "waiting for dump to complete");
qca_wait_for_dump_collection(hdev);
}
+
+ if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
+ bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
+ mutex_lock(&qca->hci_memdump_lock);
+ if (qca_memdump)
+ memdump_buf = qca_memdump->memdump_buf_head;
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->qca_memdump = NULL;
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
+ skb_queue_purge(&qca->rx_memdump_q);
+ mutex_unlock(&qca->hci_memdump_lock);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+ }
+
+ clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
static void qca_cmd_timeout(struct hci_dev *hdev)
@@ -1529,9 +1562,11 @@ static int qca_power_on(struct hci_dev *hdev)
ret = qca_wcn3990_init(hu);
} else {
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
- /* Controller needs time to bootup. */
- msleep(150);
+ if (qcadev->bt_en) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
+ }
}
return ret;
@@ -1717,7 +1752,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
qca_regulator_disable(qcadev);
- } else {
+ } else if (qcadev->bt_en) {
gpiod_set_value_cansleep(qcadev->bt_en, 0);
}
}
@@ -1726,9 +1761,11 @@ static int qca_power_off(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
/* Stop sending shutdown command if soc crashes. */
- if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ if (qca_is_wcn399x(soc_type)
+ && qca->memdump_state == QCA_MEMDUMP_IDLE) {
qca_send_pre_shutdown_cmd(hdev);
usleep_range(8000, 10000);
}
@@ -1755,7 +1792,11 @@ static int qca_regulator_enable(struct qca_serdev *qcadev)
power->vregs_on = true;
- return 0;
+ ret = clk_prepare_enable(qcadev->susclk);
+ if (ret)
+ qca_regulator_disable(qcadev);
+
+ return ret;
}
static void qca_regulator_disable(struct qca_serdev *qcadev)
@@ -1773,6 +1814,8 @@ static void qca_regulator_disable(struct qca_serdev *qcadev)
regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
power->vregs_on = false;
+
+ clk_disable_unprepare(qcadev->susclk);
}
static int qca_init_regulators(struct qca_power *qca,
@@ -1811,6 +1854,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
struct hci_dev *hdev;
const struct qca_vreg_data *data;
int err;
+ bool power_ctrl_enabled = true;
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
if (!qcadev)
@@ -1839,6 +1883,12 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_power->vregs_on = false;
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+ dev_err(&serdev->dev, "failed to acquire clk\n");
+ return PTR_ERR(qcadev->susclk);
+ }
+
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
@@ -1851,38 +1901,40 @@ static int qca_serdev_probe(struct serdev_device *serdev)
}
} else {
qcadev->btsoc_type = QCA_ROME;
- qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
+ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en)) {
- dev_err(&serdev->dev, "failed to acquire enable gpio\n");
- return PTR_ERR(qcadev->bt_en);
+ if (!qcadev->bt_en) {
+ dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
+ power_ctrl_enabled = false;
}
- qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
- if (IS_ERR(qcadev->susclk)) {
- dev_err(&serdev->dev, "failed to acquire clk\n");
- return PTR_ERR(qcadev->susclk);
- }
-
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ if (!qcadev->susclk) {
+ dev_warn(&serdev->dev, "failed to acquire clk\n");
+ } else {
+ err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
+ if (err)
+ return err;
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
+ err = clk_prepare_enable(qcadev->susclk);
+ if (err)
+ return err;
+ }
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("Rome serdev registration failed");
- clk_disable_unprepare(qcadev->susclk);
+ if (qcadev->susclk)
+ clk_disable_unprepare(qcadev->susclk);
return err;
}
}
- hdev = qcadev->serdev_hu.hdev;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
- hdev->shutdown = qca_power_off;
+ if (power_ctrl_enabled) {
+ hdev = qcadev->serdev_hu.hdev;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hdev->shutdown = qca_power_off;
+ }
return 0;
}
@@ -1893,7 +1945,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(&qcadev->serdev_hu);
- else
+ else if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
hci_uart_unregister_device(&qcadev->serdev_hu);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6095b6df8a81..6d4e4497b59b 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -201,5 +201,6 @@ config DA8XX_MSTPRI
peripherals.
source "drivers/bus/fsl-mc/Kconfig"
+source "drivers/bus/mhi/Kconfig"
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 1320bcf9fa9d..05f32cd694a4 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -34,3 +34,6 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
+
+# MHI
+obj-$(CONFIG_MHI_BUS) += mhi/
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index c78d10ea641f..40526da5c6a6 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -26,6 +26,8 @@
*/
#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+static struct fsl_mc_version mc_version;
+
/**
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
* @root_mc_bus_dev: fsl-mc device representing the root DPRC
@@ -55,20 +57,6 @@ struct fsl_mc_addr_translation_range {
};
/**
- * struct mc_version
- * @major: Major version number: incremented on API compatibility changes
- * @minor: Minor version number: incremented on API additions (that are
- * backward compatible); reset when major version is incremented
- * @revision: Internal revision number: incremented on implementation changes
- * and/or bug fixes that have no impact on API
- */
-struct mc_version {
- u32 major;
- u32 minor;
- u32 revision;
-};
-
-/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the fsl-mc device to match against
* @drv: the device driver to search for matching fsl-mc object type
@@ -338,7 +326,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
*/
static int mc_get_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
- struct mc_version *mc_ver_info)
+ struct fsl_mc_version *mc_ver_info)
{
struct fsl_mc_command cmd = { 0 };
struct dpmng_rsp_get_version *rsp_params;
@@ -364,6 +352,20 @@ static int mc_get_version(struct fsl_mc_io *mc_io,
}
/**
+ * fsl_mc_get_version - function to retrieve the MC f/w version information
+ *
+ * Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
+ */
+struct fsl_mc_version *fsl_mc_get_version(void)
+{
+ if (mc_version.major)
+ return &mc_version;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_version);
+
+/**
* fsl_mc_get_root_dprc - function to traverse to the root dprc
*/
static void fsl_mc_get_root_dprc(struct device *dev,
@@ -862,7 +864,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
int container_id;
phys_addr_t mc_portal_phys_addr;
u32 mc_portal_size;
- struct mc_version mc_version;
struct resource res;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 8101df901830..378f5d62a991 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
}
/*
+ * Released firmware describes the IO port max address as 0x3fff, which is
+ * the max host bus address. Fixup to a proper range. This will probably
+ * never be fixed in firmware.
+ */
+static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
+ struct resource *r)
+{
+ if (r->end != 0x3fff)
+ return;
+
+ if (r->start == 0xe4)
+ r->end = 0xe4 + 0x04 - 1;
+ else if (r->start == 0x2f8)
+ r->end = 0x2f8 + 0x08 - 1;
+ else
+ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
+ r);
+}
+
+/*
* hisi_lpc_acpi_set_io_res - set the resources for a child
* @child: the device node to be updated the I/O resource
* @hostdev: the device node associated with host controller
@@ -418,8 +438,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
return -ENOMEM;
}
count = 0;
- list_for_each_entry(rentry, &resource_list, node)
- resources[count++] = *rentry->res;
+ list_for_each_entry(rentry, &resource_list, node) {
+ resources[count] = *rentry->res;
+ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
+ count++;
+ }
acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
new file mode 100644
index 000000000000..a8bd9bd7db7c
--- /dev/null
+++ b/drivers/bus/mhi/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+#
+
+config MHI_BUS
+ tristate "Modem Host Interface (MHI) bus"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
new file mode 100644
index 000000000000..19e6443b72df
--- /dev/null
+++ b/drivers/bus/mhi/Makefile
@@ -0,0 +1,2 @@
+# core layer
+obj-y += core/
diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
new file mode 100644
index 000000000000..66e2700c9032
--- /dev/null
+++ b/drivers/bus/mhi/core/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MHI_BUS) := mhi.o
+
+mhi-y := init.o main.o pm.o boot.o
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
new file mode 100644
index 000000000000..ebad5eb48e5a
--- /dev/null
+++ b/drivers/bus/mhi/core/boot.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/* Setup RDDM vector table for RDDM transfer and program RXVEC */
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
+{
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 sequence_id;
+ unsigned int i;
+
+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = mhi_buf->len;
+ }
+
+ dev_dbg(dev, "BHIe programming for RDDM\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
+
+ if (unlikely(!sequence_id))
+ sequence_id = 1;
+
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
+ sequence_id);
+
+ dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+}
+
+/* Collect RDDM buffer during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 rx_status;
+ enum mhi_ee_type ee;
+ const u32 delayus = 2000;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ const u32 rddm_timeout_us = 200000;
+ int rddm_retry = rddm_timeout_us / delayus;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /*
+ * This should only be executing during a kernel panic, we expect all
+ * other cores to shutdown while we're collecting RDDM buffer. After
+ * returning from this function, we expect the device to reset.
+ *
+ * Normaly, we read/write pm_state only after grabbing the
+ * pm_lock, since we're in a panic, skipping it. Also there is no
+ * gurantee that this state change would take effect since
+ * we're setting it w/o grabbing pm_lock
+ */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* update should take the effect immediately */
+ smp_wmb();
+
+ /*
+ * Make sure device is not already in RDDM. In case the device asserts
+ * and a kernel panic follows, device will already be in RDDM.
+ * Do not trigger SYS ERR again and proceed with waiting for
+ * image download completion.
+ */
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee != MHI_EE_RDDM) {
+ dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ dev_dbg(dev, "Waiting for device to enter RDDM\n");
+ while (rddm_retry--) {
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_RDDM)
+ break;
+
+ udelay(delayus);
+ }
+
+ if (rddm_retry <= 0) {
+ /* Hardware reset so force device to enter RDDM */
+ dev_dbg(dev,
+ "Did not enter RDDM, do a host req reset\n");
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+ MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
+ return 0;
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+ dev_err(dev, "Did not complete RDDM transfer\n");
+ dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
+ dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
+
+ return -EIO;
+}
+
+/* Download RDDM image from device */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ u32 rx_status;
+
+ if (in_panic)
+ return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status) || rx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
+
+static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
+ const struct mhi_buf *mhi_buf)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ u32 tx_status, sequence_id;
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ return -EIO;
+ }
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+
+ sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
+ sequence_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+
+static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 tx_status, val, session_id;
+ int i, ret;
+ void __iomem *base = mhi_cntrl->bhi;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ dev_dbg(dev, "Starting SBL download via BHI\n");
+ mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+ upper_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+ lower_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
+ BHI_STATUS_MASK, BHI_STATUS_SHIFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ goto invalid_pm_state;
+
+ if (tx_status == BHI_STATUS_ERROR) {
+ dev_err(dev, "Image transfer failed\n");
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base,
+ error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n",
+ error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ return (!ret) ? -ETIMEDOUT : 0;
+
+invalid_pm_state:
+
+ return -EIO;
+}
+
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ int i;
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ for (i = 0; i < image_info->entries; i++, mhi_buf++)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ size_t seg_size = mhi_cntrl->seg_len;
+ int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+ int i;
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entries */
+ img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
+ GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+ for (i = 0; i < segments; i++, mhi_buf++) {
+ size_t vec_size = seg_size;
+
+ /* Vector table is the last entry */
+ if (i == segments - 1)
+ vec_size = sizeof(struct bhi_vec_entry) * i;
+
+ mhi_buf->len = vec_size;
+ mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
+ &mhi_buf->dma_addr,
+ GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+ }
+
+ img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
+ img_info->entries = segments;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ const struct firmware *firmware,
+ struct image_info *img_info)
+{
+ size_t remainder = firmware->size;
+ size_t to_cpy;
+ const u8 *buf = firmware->data;
+ int i = 0;
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+
+ while (remainder) {
+ to_cpy = min(remainder, mhi_buf->len);
+ memcpy(mhi_buf->buf, buf, to_cpy);
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = to_cpy;
+
+ buf += to_cpy;
+ remainder -= to_cpy;
+ i++;
+ bhi_vec++;
+ mhi_buf++;
+ }
+}
+
+void mhi_fw_load_worker(struct work_struct *work)
+{
+ struct mhi_controller *mhi_cntrl;
+ const struct firmware *firmware = NULL;
+ struct image_info *image_info;
+ struct device *dev;
+ const char *fw_name;
+ void *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+ int ret;
+
+ mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
+ dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_PBL(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device MHI is not in valid state\n");
+ return;
+ }
+
+ /* If device is in pass through, do reset to ready state transition */
+ if (mhi_cntrl->ee == MHI_EE_PTHRU)
+ goto fw_load_ee_pthru;
+
+ fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
+ mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+
+ if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
+ !mhi_cntrl->seg_len))) {
+ dev_err(dev,
+ "No firmware image defined or !sbl_size || !seg_len\n");
+ return;
+ }
+
+ ret = request_firmware(&firmware, fw_name, dev);
+ if (ret) {
+ dev_err(dev, "Error loading firmware: %d\n", ret);
+ return;
+ }
+
+ size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
+
+ /* SBL size provided is maximum size, not necessarily the image size */
+ if (size > firmware->size)
+ size = firmware->size;
+
+ buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
+ if (!buf) {
+ release_firmware(firmware);
+ return;
+ }
+
+ /* Download SBL image */
+ memcpy(buf, firmware->data, size);
+ ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
+ mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
+
+ if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
+ release_firmware(firmware);
+
+ /* Error or in EDL mode, we're done */
+ if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+ return;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * If we're doing fbc, populate vector tables while
+ * device transitioning into MHI READY state
+ */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
+ firmware->size);
+ if (ret)
+ goto error_alloc_fw_table;
+
+ /* Load the firmware into BHIE vec table */
+ mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
+ }
+
+fw_load_ee_pthru:
+ /* Transitioning into MHI RESET->READY state */
+ ret = mhi_ready_state_transition(mhi_cntrl);
+
+ if (!mhi_cntrl->fbc_download)
+ return;
+
+ if (ret)
+ goto error_read;
+
+ /* Wait for the SBL event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_SBL ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "MHI did not enter SBL\n");
+ goto error_read;
+ }
+
+ /* Start full firmware image download */
+ image_info = mhi_cntrl->fbc_image;
+ ret = mhi_fw_load_amss(mhi_cntrl,
+ /* Vector table is the last entry */
+ &image_info->mhi_buf[image_info->entries - 1]);
+
+ release_firmware(firmware);
+
+ return;
+
+error_read:
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+
+error_alloc_fw_table:
+ release_firmware(firmware);
+}
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
new file mode 100644
index 000000000000..1f8c82603179
--- /dev/null
+++ b/drivers/bus/mhi/core/init.c
@@ -0,0 +1,1294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+const char * const mhi_ee_str[MHI_EE_MAX] = {
+ [MHI_EE_PBL] = "PBL",
+ [MHI_EE_SBL] = "SBL",
+ [MHI_EE_AMSS] = "AMSS",
+ [MHI_EE_RDDM] = "RDDM",
+ [MHI_EE_WFW] = "WFW",
+ [MHI_EE_PTHRU] = "PASS THRU",
+ [MHI_EE_EDL] = "EDL",
+ [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+ [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+};
+
+const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
+ [DEV_ST_TRANSITION_PBL] = "PBL",
+ [DEV_ST_TRANSITION_READY] = "READY",
+ [DEV_ST_TRANSITION_SBL] = "SBL",
+ [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
+};
+
+const char * const mhi_state_str[MHI_STATE_MAX] = {
+ [MHI_STATE_RESET] = "RESET",
+ [MHI_STATE_READY] = "READY",
+ [MHI_STATE_M0] = "M0",
+ [MHI_STATE_M1] = "M1",
+ [MHI_STATE_M2] = "M2",
+ [MHI_STATE_M3] = "M3",
+ [MHI_STATE_M3_FAST] = "M3_FAST",
+ [MHI_STATE_BHI] = "BHI",
+ [MHI_STATE_SYS_ERR] = "SYS_ERR",
+};
+
+static const char * const mhi_pm_state_str[] = {
+ [MHI_PM_STATE_DISABLE] = "DISABLE",
+ [MHI_PM_STATE_POR] = "POR",
+ [MHI_PM_STATE_M0] = "M0",
+ [MHI_PM_STATE_M2] = "M2",
+ [MHI_PM_STATE_M3_ENTER] = "M?->M3",
+ [MHI_PM_STATE_M3] = "M3",
+ [MHI_PM_STATE_M3_EXIT] = "M3->M0",
+ [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
+ [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+};
+
+const char *to_mhi_pm_state_str(enum mhi_pm_state state)
+{
+ int index = find_last_bit((unsigned long *)&state, 32);
+
+ if (index >= ARRAY_SIZE(mhi_pm_state_str))
+ return "Invalid State";
+
+ return mhi_pm_state_str[index];
+}
+
+/* MHI protocol requires the transfer ring to be aligned with ring length */
+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring,
+ u64 len)
+{
+ ring->alloc_size = len + (len - 1);
+ ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned)
+ return -ENOMEM;
+
+ ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
+
+ return 0;
+}
+
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+}
+
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ /* Setup BHI_INTVEC IRQ */
+ ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
+ mhi_intvec_threaded_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "bhi", mhi_cntrl);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
+ mhi_irq_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "mhi", mhi_event);
+ if (ret) {
+ dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ mhi_cntrl->irq[mhi_event->irq], i);
+ goto error_request;
+ }
+ }
+
+ return 0;
+
+error_request:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+
+ return ret;
+}
+
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event *mhi_event;
+ struct mhi_ring *ring;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+ kfree(mhi_ctxt);
+ mhi_cntrl->mhi_ctxt = NULL;
+}
+
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_ctxt *mhi_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ u32 tmp;
+ int ret = -ENOMEM, i;
+
+ atomic_set(&mhi_cntrl->dev_wake, 0);
+ atomic_set(&mhi_cntrl->pending_pkts, 0);
+
+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ if (!mhi_ctxt)
+ return -ENOMEM;
+
+ /* Setup channel ctxt */
+ mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan,
+ &mhi_ctxt->chan_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->chan_ctxt)
+ goto error_alloc_chan_ctxt;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ /* Skip if it is an offload channel */
+ if (mhi_chan->offload_ch)
+ continue;
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
+ tmp &= ~CHAN_CTX_BRSTMODE_MASK;
+ tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
+ tmp &= ~CHAN_CTX_POLLCFG_MASK;
+ tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ chan_ctxt->chtype = mhi_chan->type;
+ chan_ctxt->erindex = mhi_chan->er_index;
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
+ }
+
+ /* Setup event context */
+ mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings,
+ &mhi_ctxt->er_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->er_ctxt)
+ goto error_alloc_er_ctxt;
+
+ er_ctxt = mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if it is an offload event */
+ if (mhi_event->offload_ev)
+ continue;
+
+ tmp = er_ctxt->intmod;
+ tmp &= ~EV_CTX_INTMODC_MASK;
+ tmp &= ~EV_CTX_INTMODT_MASK;
+ tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
+ er_ctxt->intmod = tmp;
+
+ er_ctxt->ertype = MHI_ER_TYPE_VALID;
+ er_ctxt->msivec = mhi_event->irq;
+ mhi_event->db_cfg.db_mode = true;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_er;
+
+ /*
+ * If the read pointer equals to the write pointer, then the
+ * ring is empty
+ */
+ ring->rp = ring->wp = ring->base;
+ er_ctxt->rbase = ring->iommu_base;
+ er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
+ er_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &er_ctxt->wp;
+ }
+
+ /* Setup cmd context */
+ ret = -ENOMEM;
+ mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) *
+ NR_OF_CMD_RINGS,
+ &mhi_ctxt->cmd_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->cmd_ctxt)
+ goto error_alloc_er;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->elements = CMD_EL_PER_RING;
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_cmd;
+
+ ring->rp = ring->wp = ring->base;
+ cmd_ctxt->rbase = ring->iommu_base;
+ cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
+ cmd_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &cmd_ctxt->wp;
+ }
+
+ mhi_cntrl->mhi_ctxt = mhi_ctxt;
+
+ return 0;
+
+error_alloc_cmd:
+ for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+ i = mhi_cntrl->total_ev_rings;
+ mhi_event = mhi_cntrl->mhi_event + i;
+
+error_alloc_er:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+error_alloc_er_ctxt:
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+error_alloc_chan_ctxt:
+ kfree(mhi_ctxt);
+
+ return ret;
+}
+
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+{
+ u32 val;
+ int i, ret;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ void __iomem *base = mhi_cntrl->regs;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 val;
+ } reg_info[] = {
+ {
+ CCABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ CCABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ ECABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ ECABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ CRCBAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ CRCBAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
+ mhi_cntrl->total_ev_rings,
+ },
+ {
+ MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
+ mhi_cntrl->hw_ev_rings,
+ },
+ {
+ MHICTRLBASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLBASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLLIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHICTRLLIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ { 0, 0, 0 }
+ };
+
+ dev_dbg(dev, "Initializing MHI registers\n");
+
+ /* Read channel db offset */
+ ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
+ CHDBOFF_CHDBOFF_SHIFT, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read CHDBOFF register\n");
+ return -EIO;
+ }
+
+ /* Setup wake db */
+ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+ mhi_cntrl->wake_set = false;
+
+ /* Setup channel db address for each channel in tre_ring */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
+ mhi_chan->tre_ring.db_addr = base + val;
+
+ /* Read event ring db offset */
+ ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
+ ERDBOFF_ERDBOFF_SHIFT, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read ERDBOFF register\n");
+ return -EIO;
+ }
+
+ /* Setup event db address for each ev_ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->ring.db_addr = base + val;
+ }
+
+ /* Setup DB register for primary CMD rings */
+ mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
+
+ /* Write to MMIO registers */
+ for (i = 0; reg_info[i].offset; i++)
+ mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].mask, reg_info[i].shift,
+ reg_info[i].val);
+
+ return 0;
+}
+
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ vfree(buf_ring->base);
+
+ buf_ring->base = tre_ring->base = NULL;
+ chan_ctxt->rbase = 0;
+}
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ tre_ring->el_size = sizeof(struct mhi_tre);
+ tre_ring->len = tre_ring->el_size * tre_ring->elements;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
+ if (ret)
+ return -ENOMEM;
+
+ buf_ring->el_size = sizeof(struct mhi_buf_info);
+ buf_ring->len = buf_ring->el_size * buf_ring->elements;
+ buf_ring->base = vzalloc(buf_ring->len);
+
+ if (!buf_ring->base) {
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ return -ENOMEM;
+ }
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ chan_ctxt->rbase = tre_ring->iommu_base;
+ chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
+ chan_ctxt->rlen = tre_ring->len;
+ tre_ring->ctxt_wp = &chan_ctxt->wp;
+
+ tre_ring->rp = tre_ring->wp = tre_ring->base;
+ buf_ring->rp = buf_ring->wp = buf_ring->base;
+ mhi_chan->db_cfg.db_mode = 1;
+
+ /* Update to all cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_event_config *event_cfg;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, num;
+
+ num = config->num_events;
+ mhi_cntrl->total_ev_rings = num;
+ mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Populate event ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < num; i++) {
+ event_cfg = &config->event_cfg[i];
+
+ mhi_event->er_index = i;
+ mhi_event->ring.elements = event_cfg->num_elements;
+ mhi_event->intmod = event_cfg->irq_moderation_ms;
+ mhi_event->irq = event_cfg->irq;
+
+ if (event_cfg->channel != U32_MAX) {
+ /* This event ring has a dedicated channel */
+ mhi_event->chan = event_cfg->channel;
+ if (mhi_event->chan >= mhi_cntrl->max_chan) {
+ dev_err(dev,
+ "Event Ring channel not available\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->mhi_chan =
+ &mhi_cntrl->mhi_chan[mhi_event->chan];
+ }
+
+ /* Priority is fixed to 1 for now */
+ mhi_event->priority = 1;
+
+ mhi_event->db_cfg.brstmode = event_cfg->mode;
+ if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
+ goto error_ev_cfg;
+
+ if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_event->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_event->data_type = event_cfg->data_type;
+
+ switch (mhi_event->data_type) {
+ case MHI_ER_DATA:
+ mhi_event->process_event = mhi_process_data_event_ring;
+ break;
+ case MHI_ER_CTRL:
+ mhi_event->process_event = mhi_process_ctrl_ev_ring;
+ break;
+ default:
+ dev_err(dev, "Event Ring type not supported\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->hw_ring = event_cfg->hardware_event;
+ if (mhi_event->hw_ring)
+ mhi_cntrl->hw_ev_rings++;
+ else
+ mhi_cntrl->sw_ev_rings++;
+
+ mhi_event->cl_manage = event_cfg->client_managed;
+ mhi_event->offload_ev = event_cfg->offload_channel;
+ mhi_event++;
+ }
+
+ /* We need IRQ for each event ring + additional one for BHI */
+ mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1;
+
+ return 0;
+
+error_ev_cfg:
+
+ kfree(mhi_cntrl->mhi_event);
+ return -EINVAL;
+}
+
+static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ struct mhi_channel_config *ch_cfg;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i;
+ u32 chan;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * The allocation of MHI channels can exceed 32KB in some scenarios,
+ * so to avoid any memory possible allocation failures, vzalloc is
+ * used here
+ */
+ mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
+ sizeof(*mhi_cntrl->mhi_chan));
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
+
+ /* Populate channel configurations */
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel %d not available\n", chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+
+ mhi_chan->tre_ring.elements = ch_cfg->num_elements;
+ if (!mhi_chan->tre_ring.elements)
+ goto error_chan_cfg;
+
+ /*
+ * For some channels, local ring length should be bigger than
+ * the transfer ring length due to internal logical channels
+ * in device. So host can queue much more buffers than transfer
+ * ring length. Example, RSC channels should have a larger local
+ * channel length than transfer ring length.
+ */
+ mhi_chan->buf_ring.elements = ch_cfg->local_elements;
+ if (!mhi_chan->buf_ring.elements)
+ mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
+ mhi_chan->er_index = ch_cfg->event_ring;
+ mhi_chan->dir = ch_cfg->dir;
+
+ /*
+ * For most channels, chtype is identical to channel directions.
+ * So, if it is not defined then assign channel direction to
+ * chtype
+ */
+ mhi_chan->type = ch_cfg->type;
+ if (!mhi_chan->type)
+ mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
+
+ mhi_chan->ee_mask = ch_cfg->ee_mask;
+ mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
+ mhi_chan->lpm_notify = ch_cfg->lpm_notify;
+ mhi_chan->offload_ch = ch_cfg->offload_channel;
+ mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
+ mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->auto_start = ch_cfg->auto_start;
+
+ /*
+ * If MHI host allocates buffers, then the channel direction
+ * should be DMA_FROM_DEVICE
+ */
+ if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ /*
+ * Bi-directional and direction less channel must be an
+ * offload channel
+ */
+ if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
+ mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ if (!mhi_chan->offload_ch) {
+ mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
+ if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
+ dev_err(dev, "Invalid Door bell mode\n");
+ goto error_chan_cfg;
+ }
+ }
+
+ if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_chan->configured = true;
+
+ if (mhi_chan->lpm_notify)
+ list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return -EINVAL;
+}
+
+static int parse_config(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ int ret;
+
+ /* Parse MHI channel configuration */
+ ret = parse_ch_cfg(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ /* Parse MHI event configuration */
+ ret = parse_ev_cfg(mhi_cntrl, config);
+ if (ret)
+ goto error_ev_cfg;
+
+ mhi_cntrl->timeout_ms = config->timeout_ms;
+ if (!mhi_cntrl->timeout_ms)
+ mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+
+ mhi_cntrl->bounce_buf = config->use_bounce_buf;
+ mhi_cntrl->buffer_len = config->buf_len;
+ if (!mhi_cntrl->buffer_len)
+ mhi_cntrl->buffer_len = MHI_MAX_MTU;
+
+ /* By default, host is allowed to ring DB in both M0 and M2 states */
+ mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
+ if (config->m2_no_db)
+ mhi_cntrl->db_access &= ~MHI_PM_M2;
+
+ return 0;
+
+error_ev_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_chan *mhi_chan;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_device *mhi_dev;
+ u32 soc_info;
+ int ret, i;
+
+ if (!mhi_cntrl)
+ return -EINVAL;
+
+ if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+ !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
+ !mhi_cntrl->write_reg)
+ return -EINVAL;
+
+ ret = parse_config(mhi_cntrl, config);
+ if (ret)
+ return -EINVAL;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto error_alloc_cmd;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->transition_list);
+ mutex_init(&mhi_cntrl->pm_mutex);
+ rwlock_init(&mhi_cntrl->pm_lock);
+ spin_lock_init(&mhi_cntrl->transition_lock);
+ spin_lock_init(&mhi_cntrl->wlock);
+ INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
+ INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
+ init_waitqueue_head(&mhi_cntrl->state_event);
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
+ spin_lock_init(&mhi_cmd->lock);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ /* Skip for offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->mhi_cntrl = mhi_cntrl;
+ spin_lock_init(&mhi_event->lock);
+ if (mhi_event->data_type == MHI_ER_CTRL)
+ tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
+ (ulong)mhi_event);
+ else
+ tasklet_init(&mhi_event->task, mhi_ev_task,
+ (ulong)mhi_event);
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ mutex_init(&mhi_chan->mutex);
+ init_completion(&mhi_chan->completion);
+ rwlock_init(&mhi_chan->lock);
+ }
+
+ if (mhi_cntrl->bounce_buf) {
+ mhi_cntrl->map_single = mhi_map_single_use_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
+ } else {
+ mhi_cntrl->map_single = mhi_map_single_no_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
+ }
+
+ /* Read the MHI device info */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+ SOC_HW_VERSION_OFFS, &soc_info);
+ if (ret)
+ goto error_alloc_dev;
+
+ mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
+ SOC_HW_VERSION_FAM_NUM_SHFT;
+ mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
+ SOC_HW_VERSION_DEV_NUM_SHFT;
+ mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
+ SOC_HW_VERSION_MAJOR_VER_SHFT;
+ mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
+ SOC_HW_VERSION_MINOR_VER_SHFT;
+
+ /* Register controller with MHI bus */
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto error_alloc_dev;
+ }
+
+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
+
+ /* Init wakeup source */
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto error_add_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ return 0;
+
+error_add_dev:
+ put_device(&mhi_dev->dev);
+
+error_alloc_dev:
+ kfree(mhi_cntrl->mhi_cmd);
+
+error_alloc_cmd:
+ vfree(mhi_cntrl->mhi_chan);
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_register_controller);
+
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
+ unsigned int i;
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_event);
+
+ /* Drop the references to MHI devices created for channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ put_device(&mhi_chan->mhi_dev->dev);
+ }
+ vfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+}
+EXPORT_SYMBOL_GPL(mhi_unregister_controller);
+
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 bhie_off;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+
+ /*
+ * Allocate RDDM table if specified, this table is for debugging purpose
+ */
+ if (mhi_cntrl->rddm_size) {
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size);
+
+ /*
+ * This controller supports RDDM, so we need to manually clear
+ * BHIE RX registers since POR values are undefined.
+ */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+ &bhie_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHIE offset\n");
+ goto bhie_error;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
+ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
+ 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
+ 4);
+
+ if (mhi_cntrl->rddm_image)
+ mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ }
+
+ mhi_cntrl->pre_init = true;
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return 0;
+
+bhie_error:
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
+
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ mhi_cntrl->pre_init = false;
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
+
+static void mhi_release_device(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created if the mhi_dev
+ * associated with it is NULL. This scenario will happen during the
+ * controller suspend and resume.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_bus_type;
+ dev->release = mhi_release_device;
+ dev->parent = mhi_cntrl->cntrl_dev;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_wake = 0;
+
+ return mhi_dev;
+}
+
+static int mhi_driver_probe(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device_driver *drv = dev->driver;
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ struct mhi_event *mhi_event;
+ struct mhi_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_chan *dl_chan = mhi_dev->dl_chan;
+ int ret;
+
+ /* Bring device out of LPM */
+ ret = mhi_device_get_sync(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ if (ul_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (ul_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
+ goto exit_probe;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ if (ul_chan->auto_start) {
+ ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
+ if (ret)
+ goto exit_probe;
+ }
+ }
+
+ ret = -EINVAL;
+ if (dl_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (dl_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
+ goto exit_probe;
+
+ mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
+
+ /*
+ * If the channel event ring is managed by client, then
+ * status_cb must be provided so that the framework can
+ * notify pending data
+ */
+ if (mhi_event->cl_manage && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+ }
+
+ /* Call the user provided probe function */
+ ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+ if (ret)
+ goto exit_probe;
+
+ if (dl_chan && dl_chan->auto_start)
+ mhi_prepare_channel(mhi_cntrl, dl_chan);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+
+exit_probe:
+ mhi_unprepare_from_transfer(mhi_dev);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+}
+
+static int mhi_driver_remove(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ enum mhi_ch_state ch_state[] = {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_DISABLED
+ };
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Reset both channels */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ /* Wake all threads waiting for completion */
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_EV_CC_INVALID;
+ complete_all(&mhi_chan->completion);
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Set the channel state to disabled */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ ch_state[dir] = mhi_chan->ch_state;
+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Reset the non-offload channel */
+ if (!mhi_chan->offload_ch)
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ mhi_drv->remove(mhi_dev);
+
+ /* De-init channel if it was enabled */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
+ !mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ while (mhi_dev->dev_wake)
+ mhi_device_put(mhi_dev);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+}
+
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_driver_probe;
+ driver->remove = mhi_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_driver_register);
+
+void mhi_driver_unregister(struct mhi_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_driver_unregister);
+
+static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
+ mhi_dev->chan_name);
+}
+
+static int mhi_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->chan_name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_bus_type = {
+ .name = "mhi",
+ .dev_name = "mhi",
+ .match = mhi_match,
+ .uevent = mhi_uevent,
+};
+
+static int __init mhi_init(void)
+{
+ return bus_register(&mhi_bus_type);
+}
+
+static void __exit mhi_exit(void)
+{
+ bus_unregister(&mhi_bus_type);
+}
+
+postcore_initcall(mhi_init);
+module_exit(mhi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Host Interface");
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
new file mode 100644
index 000000000000..095d95bc0e37
--- /dev/null
+++ b/drivers/bus/mhi/core/internal.h
@@ -0,0 +1,684 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef _MHI_INT_H
+#define _MHI_INT_H
+
+#include <linux/mhi.h>
+
+extern struct bus_type mhi_bus_type;
+
+#define MHIREGLEN (0x0)
+#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
+#define MHIREGLEN_MHIREGLEN_SHIFT (0)
+
+#define MHIVER (0x8)
+#define MHIVER_MHIVER_MASK (0xFFFFFFFF)
+#define MHIVER_MHIVER_SHIFT (0)
+
+#define MHICFG (0x10)
+#define MHICFG_NHWER_MASK (0xFF000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xFF0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xFF00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xFF)
+#define MHICFG_NCH_SHIFT (0)
+
+#define CHDBOFF (0x18)
+#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
+#define CHDBOFF_CHDBOFF_SHIFT (0)
+
+#define ERDBOFF (0x20)
+#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
+#define ERDBOFF_ERDBOFF_SHIFT (0)
+
+#define BHIOFF (0x28)
+#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
+#define BHIOFF_BHIOFF_SHIFT (0)
+
+#define BHIEOFF (0x2C)
+#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
+#define BHIEOFF_BHIEOFF_SHIFT (0)
+
+#define DEBUGOFF (0x30)
+#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
+#define DEBUGOFF_DEBUGOFF_SHIFT (0)
+
+#define MHICTRL (0x38)
+#define MHICTRL_MHISTATE_MASK (0x0000FF00)
+#define MHICTRL_MHISTATE_SHIFT (8)
+#define MHICTRL_RESET_MASK (0x2)
+#define MHICTRL_RESET_SHIFT (1)
+
+#define MHISTATUS (0x48)
+#define MHISTATUS_MHISTATE_MASK (0x0000FF00)
+#define MHISTATUS_MHISTATE_SHIFT (8)
+#define MHISTATUS_SYSERR_MASK (0x4)
+#define MHISTATUS_SYSERR_SHIFT (2)
+#define MHISTATUS_READY_MASK (0x1)
+#define MHISTATUS_READY_SHIFT (0)
+
+#define CCABAP_LOWER (0x58)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
+
+#define CCABAP_HIGHER (0x5C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
+
+#define ECABAP_LOWER (0x60)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
+
+#define ECABAP_HIGHER (0x64)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
+
+#define CRCBAP_LOWER (0x68)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
+
+#define CRCBAP_HIGHER (0x6C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
+
+#define CRDB_LOWER (0x70)
+#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
+#define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
+
+#define CRDB_HIGHER (0x74)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
+
+#define MHICTRLBASE_LOWER (0x80)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
+
+#define MHICTRLBASE_HIGHER (0x84)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
+
+#define MHICTRLLIMIT_LOWER (0x88)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
+
+#define MHICTRLLIMIT_HIGHER (0x8C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
+
+#define MHIDATABASE_LOWER (0x98)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
+
+#define MHIDATABASE_HIGHER (0x9C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
+
+#define MHIDATALIMIT_LOWER (0xA0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
+
+#define MHIDATALIMIT_HIGHER (0xA4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
+
+/* Host request register */
+#define MHI_SOC_RESET_REQ_OFFSET (0xB0)
+#define MHI_SOC_RESET_REQ BIT(0)
+
+/* MHI BHI offfsets */
+#define BHI_BHIVERSION_MINOR (0x00)
+#define BHI_BHIVERSION_MAJOR (0x04)
+#define BHI_IMGADDR_LOW (0x08)
+#define BHI_IMGADDR_HIGH (0x0C)
+#define BHI_IMGSIZE (0x10)
+#define BHI_RSVD1 (0x14)
+#define BHI_IMGTXDB (0x18)
+#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHI_TXDB_SEQNUM_SHFT (0)
+#define BHI_RSVD2 (0x1C)
+#define BHI_INTVEC (0x20)
+#define BHI_RSVD3 (0x24)
+#define BHI_EXECENV (0x28)
+#define BHI_STATUS (0x2C)
+#define BHI_ERRCODE (0x30)
+#define BHI_ERRDBG1 (0x34)
+#define BHI_ERRDBG2 (0x38)
+#define BHI_ERRDBG3 (0x3C)
+#define BHI_SERIALNU (0x40)
+#define BHI_SBLANTIROLLVER (0x44)
+#define BHI_NUMSEG (0x48)
+#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
+#define BHI_RSVD5 (0xC4)
+#define BHI_STATUS_MASK (0xC0000000)
+#define BHI_STATUS_SHIFT (30)
+#define BHI_STATUS_ERROR (3)
+#define BHI_STATUS_SUCCESS (2)
+#define BHI_STATUS_RESET (0)
+
+/* MHI BHIE offsets */
+#define BHIE_MSMSOCID_OFFS (0x0000)
+#define BHIE_TXVECADDR_LOW_OFFS (0x002C)
+#define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
+#define BHIE_TXVECSIZE_OFFS (0x0034)
+#define BHIE_TXVECDB_OFFS (0x003C)
+#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECDB_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_OFFS (0x0044)
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
+#define BHIE_RXVECADDR_LOW_OFFS (0x0060)
+#define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
+#define BHIE_RXVECSIZE_OFFS (0x0068)
+#define BHIE_RXVECDB_OFFS (0x0070)
+#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECDB_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_OFFS (0x0078)
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
+
+#define SOC_HW_VERSION_OFFS (0x224)
+#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
+#define SOC_HW_VERSION_FAM_NUM_SHFT (28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
+#define SOC_HW_VERSION_DEV_NUM_SHFT (16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
+#define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
+#define SOC_HW_VERSION_MINOR_VER_SHFT (0)
+
+#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
+#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
+#define EV_CTX_INTMODC_SHIFT 8
+#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
+#define EV_CTX_INTMODT_SHIFT 16
+struct mhi_event_ctxt {
+ __u32 intmod;
+ __u32 ertype;
+ __u32 msivec;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
+#define CHAN_CTX_CHSTATE_SHIFT 0
+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
+#define CHAN_CTX_BRSTMODE_SHIFT 8
+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
+#define CHAN_CTX_POLLCFG_SHIFT 10
+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
+struct mhi_chan_ctxt {
+ __u32 chcfg;
+ __u32 chtype;
+ __u32 erindex;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+struct mhi_ctxt {
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ dma_addr_t er_ctxt_addr;
+ dma_addr_t chan_ctxt_addr;
+ dma_addr_t cmd_ctxt_addr;
+};
+
+struct mhi_tre {
+ u64 ptr;
+ u32 dword[2];
+};
+
+struct bhi_vec_entry {
+ u64 dma_addr;
+ u64 size;
+};
+
+enum mhi_cmd_type {
+ MHI_CMD_NOP = 1,
+ MHI_CMD_RESET_CHAN = 16,
+ MHI_CMD_STOP_CHAN = 17,
+ MHI_CMD_START_CHAN = 18,
+};
+
+/* No operation command */
+#define MHI_TRE_CMD_NOOP_PTR (0)
+#define MHI_TRE_CMD_NOOP_DWORD0 (0)
+#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
+
+/* Channel reset command */
+#define MHI_TRE_CMD_RESET_PTR (0)
+#define MHI_TRE_CMD_RESET_DWORD0 (0)
+#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_RESET_CHAN << 16))
+
+/* Channel stop command */
+#define MHI_TRE_CMD_STOP_PTR (0)
+#define MHI_TRE_CMD_STOP_DWORD0 (0)
+#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_STOP_CHAN << 16))
+
+/* Channel start command */
+#define MHI_TRE_CMD_START_PTR (0)
+#define MHI_TRE_CMD_START_DWORD0 (0)
+#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_START_CHAN << 16))
+
+#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+
+/* Event descriptor macros */
+#define MHI_TRE_EV_PTR(ptr) (ptr)
+#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
+#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
+#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
+#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
+#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
+
+/* Transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) (ptr)
+#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
+ | (ieot << 9) | (ieob << 8) | chain)
+
+/* RSC transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
+#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
+
+enum mhi_pkt_type {
+ MHI_PKT_TYPE_INVALID = 0x0,
+ MHI_PKT_TYPE_NOOP_CMD = 0x1,
+ MHI_PKT_TYPE_TRANSFER = 0x2,
+ MHI_PKT_TYPE_COALESCING = 0x8,
+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+ MHI_PKT_TYPE_TX_EVENT = 0x22,
+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+ MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum mhi_ev_ccs {
+ MHI_EV_CC_INVALID = 0x0,
+ MHI_EV_CC_SUCCESS = 0x1,
+ MHI_EV_CC_EOT = 0x2, /* End of transfer event */
+ MHI_EV_CC_OVERFLOW = 0x3,
+ MHI_EV_CC_EOB = 0x4, /* End of block event */
+ MHI_EV_CC_OOB = 0x5, /* Out of block event */
+ MHI_EV_CC_DB_MODE = 0x6,
+ MHI_EV_CC_UNDEFINED_ERR = 0x10,
+ MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+enum mhi_ch_state {
+ MHI_CH_STATE_DISABLED = 0x0,
+ MHI_CH_STATE_ENABLED = 0x1,
+ MHI_CH_STATE_RUNNING = 0x2,
+ MHI_CH_STATE_SUSPENDED = 0x3,
+ MHI_CH_STATE_STOP = 0x4,
+ MHI_CH_STATE_ERROR = 0x5,
+};
+
+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
+ mode != MHI_DB_BRST_ENABLE)
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
+ "INVALID_EE" : mhi_ee_str[ee])
+
+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
+ ee == MHI_EE_EDL)
+
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
+
+enum dev_st_transition {
+ DEV_ST_TRANSITION_PBL,
+ DEV_ST_TRANSITION_READY,
+ DEV_ST_TRANSITION_SBL,
+ DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_MAX,
+};
+
+extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
+#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
+ "INVALID_STATE" : dev_state_tran_str[state])
+
+extern const char * const mhi_state_str[MHI_STATE_MAX];
+#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
+ !mhi_state_str[state]) ? \
+ "INVALID_STATE" : mhi_state_str[state])
+
+/* internal power states */
+enum mhi_pm_state {
+ MHI_PM_STATE_DISABLE,
+ MHI_PM_STATE_POR,
+ MHI_PM_STATE_M0,
+ MHI_PM_STATE_M2,
+ MHI_PM_STATE_M3_ENTER,
+ MHI_PM_STATE_M3,
+ MHI_PM_STATE_M3_EXIT,
+ MHI_PM_STATE_FW_DL_ERR,
+ MHI_PM_STATE_SYS_ERR_DETECT,
+ MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SHUTDOWN_PROCESS,
+ MHI_PM_STATE_LD_ERR_FATAL_DETECT,
+ MHI_PM_STATE_MAX
+};
+
+#define MHI_PM_DISABLE BIT(0)
+#define MHI_PM_POR BIT(1)
+#define MHI_PM_M0 BIT(2)
+#define MHI_PM_M2 BIT(3)
+#define MHI_PM_M3_ENTER BIT(4)
+#define MHI_PM_M3 BIT(5)
+#define MHI_PM_M3_EXIT BIT(6)
+/* firmware download failure state */
+#define MHI_PM_FW_DL_ERR BIT(7)
+#define MHI_PM_SYS_ERR_DETECT BIT(8)
+#define MHI_PM_SYS_ERR_PROCESS BIT(9)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+/* link not accessible */
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
+ mhi_cntrl->db_access)
+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_EXIT))
+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ MHI_PM_IN_ERROR_STATE(pm_state))
+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
+ (MHI_PM_M3_ENTER | MHI_PM_M3))
+
+#define NR_OF_CMD_RINGS 1
+#define CMD_EL_PER_RING 128
+#define PRIMARY_CMD_RING 0
+#define MHI_DEV_WAKE_DB 127
+#define MHI_MAX_MTU 0xffff
+
+enum mhi_er_type {
+ MHI_ER_TYPE_INVALID = 0x0,
+ MHI_ER_TYPE_VALID = 0x1,
+};
+
+struct db_cfg {
+ bool reset_req;
+ bool db_mode;
+ u32 pollcfg;
+ enum mhi_db_brst_mode brstmode;
+ dma_addr_t db_val;
+ void (*process_db)(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg, void __iomem *io_addr,
+ dma_addr_t db_val);
+};
+
+struct mhi_pm_transitions {
+ enum mhi_pm_state from_state;
+ u32 to_states;
+};
+
+struct state_transition {
+ struct list_head node;
+ enum dev_st_transition state;
+};
+
+struct mhi_ring {
+ dma_addr_t dma_handle;
+ dma_addr_t iommu_base;
+ u64 *ctxt_wp; /* point to ctxt wp */
+ void *pre_aligned;
+ void *base;
+ void *rp;
+ void *wp;
+ size_t el_size;
+ size_t len;
+ size_t elements;
+ size_t alloc_size;
+ void __iomem *db_addr;
+};
+
+struct mhi_cmd {
+ struct mhi_ring ring;
+ spinlock_t lock;
+};
+
+struct mhi_buf_info {
+ void *v_addr;
+ void *bb_addr;
+ void *wp;
+ void *cb_buf;
+ dma_addr_t p_addr;
+ size_t len;
+ enum dma_data_direction dir;
+ bool used; /* Indicates whether the buffer is used or not */
+ bool pre_mapped; /* Already pre-mapped by client */
+};
+
+struct mhi_event {
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *mhi_chan; /* dedicated to channel */
+ u32 er_index;
+ u32 intmod;
+ u32 irq;
+ int chan; /* this event ring is dedicated to a channel (optional) */
+ u32 priority;
+ enum mhi_er_data_type data_type;
+ struct mhi_ring ring;
+ struct db_cfg db_cfg;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ int (*process_event)(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota);
+ bool hw_ring;
+ bool cl_manage;
+ bool offload_ev; /* managed by a device driver */
+};
+
+struct mhi_chan {
+ const char *name;
+ /*
+ * Important: When consuming, increment tre_ring first and when
+ * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
+ * is guranteed to have space so we do not need to check both rings.
+ */
+ struct mhi_ring buf_ring;
+ struct mhi_ring tre_ring;
+ u32 chan;
+ u32 er_index;
+ u32 intmod;
+ enum mhi_ch_type type;
+ enum dma_data_direction dir;
+ struct db_cfg db_cfg;
+ enum mhi_ch_ee_mask ee_mask;
+ enum mhi_ch_state ch_state;
+ enum mhi_ev_ccs ccs;
+ struct mhi_device *mhi_dev;
+ void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
+ struct mutex mutex;
+ struct completion completion;
+ rwlock_t lock;
+ struct list_head node;
+ bool lpm_notify;
+ bool configured;
+ bool offload_ch;
+ bool pre_alloc;
+ bool auto_start;
+ bool wake_capable;
+};
+
+/* Default MHI timeout */
+#define MHI_TIMEOUT_MS (1000)
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
+
+int mhi_destroy_device(struct device *dev, void *data);
+void mhi_create_devices(struct mhi_controller *mhi_cntrl);
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info, size_t alloc_size);
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info);
+
+/* Power management APIs */
+enum mhi_pm_state __must_check mhi_tryset_pm_state(
+ struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state);
+const char *to_mhi_pm_state_str(enum mhi_pm_state state);
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state);
+void mhi_pm_st_worker(struct work_struct *work);
+void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_fw_load_worker(struct work_struct *work);
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
+void mhi_ctrl_ev_task(unsigned long data);
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd);
+
+/* Register access methods */
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
+ void __iomem *db_addr, dma_addr_t db_val);
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_mode, void __iomem *db_addr,
+ dma_addr_t db_val);
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out);
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 shift, u32 *out);
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val);
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 mask, u32 shift, u32 val);
+void mhi_ring_er_db(struct mhi_event *mhi_event);
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val);
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Initialization methods */
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info);
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Memory allocation methods */
+static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ dma_addr_t *dma_handle,
+ gfp_t gfp)
+{
+ void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle,
+ gfp);
+
+ return buf;
+}
+
+static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ void *vaddr,
+ dma_addr_t dma_handle)
+{
+ dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle);
+}
+
+/* Event processing methods */
+void mhi_ctrl_ev_task(unsigned long data);
+void mhi_ev_task(unsigned long data);
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+
+/* ISR handlers */
+irqreturn_t mhi_irq_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ void *buf, void *cb, size_t buf_len, enum mhi_flags flags);
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+
+#endif /* _MHI_INT_H */
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
new file mode 100644
index 000000000000..97e06cc586e4
--- /dev/null
+++ b/drivers/bus/mhi/core/main.c
@@ -0,0 +1,1521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out)
+{
+ return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
+}
+
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 shift, u32 *out)
+{
+ u32 tmp;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ *out = (tmp & mask) >> shift;
+
+ return 0;
+}
+
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val)
+{
+ mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
+}
+
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 mask, u32 shift, u32 val)
+{
+ int ret;
+ u32 tmp;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return;
+
+ tmp &= ~mask;
+ tmp |= (val << shift);
+ mhi_write_reg(mhi_cntrl, base, offset, tmp);
+}
+
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
+ mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
+}
+
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ if (db_cfg->db_mode) {
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+ db_cfg->db_mode = 0;
+ }
+}
+
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+}
+
+void mhi_ring_er_db(struct mhi_event *mhi_event)
+{
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
+ ring->db_addr, *ring->ctxt_wp);
+}
+
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
+{
+ dma_addr_t db;
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_write_db(mhi_cntrl, ring->db_addr, db);
+}
+
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+ dma_addr_t db;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
+ ring->db_addr, db);
+}
+
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
+{
+ u32 exec;
+ int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
+
+ return (ret) ? MHI_EE_MAX : exec;
+}
+
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
+{
+ u32 state;
+ int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &state);
+ return ret ? MHI_STATE_MAX : state;
+}
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
+ buf_info->v_addr, buf_info->len,
+ buf_info->dir);
+ if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
+ &buf_info->p_addr, GFP_ATOMIC);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (buf_info->dir == DMA_TO_DEVICE)
+ memcpy(buf, buf_info->v_addr, buf_info->len);
+
+ buf_info->bb_addr = buf;
+
+ return 0;
+}
+
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
+ buf_info->dir);
+}
+
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ if (buf_info->dir == DMA_FROM_DEVICE)
+ memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
+
+ mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
+ buf_info->p_addr);
+}
+
+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ int nr_el;
+
+ if (ring->wp < ring->rp) {
+ nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ } else {
+ nr_el = (ring->rp - ring->base) / ring->el_size;
+ nr_el += ((ring->base + ring->len - ring->wp) /
+ ring->el_size) - 1;
+ }
+
+ return nr_el;
+}
+
+static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return (addr - ring->iommu_base) + ring->base;
+}
+
+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+int mhi_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev;
+ struct mhi_controller *mhi_cntrl;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy virtual devices thats attached to bus */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /*
+ * For the suspend and resume case, this function will get called
+ * without mhi_unregister_controller(). Hence, we need to drop the
+ * references to mhi_dev created for ul and dl channels. We can
+ * be sure that there will be no instances of mhi_dev left after
+ * this.
+ */
+ if (mhi_dev->ul_chan)
+ put_device(&mhi_dev->ul_chan->mhi_dev->dev);
+
+ if (mhi_dev->dl_chan)
+ put_device(&mhi_dev->dl_chan->mhi_dev->dev);
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
+ mhi_dev->chan_name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+{
+ struct mhi_driver *mhi_drv;
+
+ if (!mhi_dev->dev.driver)
+ return;
+
+ mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
+
+ if (mhi_drv->status_cb)
+ mhi_drv->status_cb(mhi_dev, cb_reason);
+}
+
+/* Bind MHI channels to MHI devices */
+void mhi_create_devices(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *mhi_chan;
+ struct mhi_device *mhi_dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->configured || mhi_chan->mhi_dev ||
+ !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
+ continue;
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev))
+ return;
+
+ mhi_dev->dev_type = MHI_DEVICE_XFER;
+ switch (mhi_chan->dir) {
+ case DMA_TO_DEVICE:
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ break;
+ case DMA_FROM_DEVICE:
+ /* We use dl_chan as offload channels */
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ break;
+ default:
+ dev_err(dev, "Direction not supported\n");
+ put_device(&mhi_dev->dev);
+ return;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Check next channel if it matches */
+ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
+ if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
+ i++;
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+ }
+ }
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->chan_name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(mhi_cntrl->cntrl_dev),
+ mhi_dev->chan_name);
+
+ /* Init wakeup source if available */
+ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+ }
+}
+
+irqreturn_t mhi_irq_handler(int irq_number, void *dev)
+{
+ struct mhi_event *mhi_event = dev;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ /* Only proceed if event ring has pending events */
+ if (ev_ring->rp == dev_rp)
+ return IRQ_HANDLED;
+
+ /* For client managed event ring, notify pending data */
+ if (mhi_event->cl_manage) {
+ struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
+ struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_dev)
+ mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
+ } else {
+ tasklet_schedule(&mhi_event->task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+ enum mhi_state state = MHI_STATE_MAX;
+ enum mhi_pm_state pm_state = 0;
+ enum mhi_ee_type ee = 0;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* If device in RDDM don't bother processing SYS error */
+ if (mhi_cntrl->ee == MHI_EE_RDDM) {
+ if (mhi_cntrl->ee != ee) {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+ goto exit_intvec;
+ }
+
+ if (pm_state == MHI_PM_SYS_ERR_DETECT) {
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* For fatal errors, we let controller decide next step */
+ if (MHI_IN_PBL(ee))
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+ else
+ schedule_work(&mhi_cntrl->syserr_worker);
+ }
+
+exit_intvec:
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+
+ /* Wake up events waiting for state change */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ dma_addr_t ctxt_wp;
+
+ /* Update the WP */
+ ring->wp += ring->el_size;
+ ctxt_wp = *ring->ctxt_wp + ring->el_size;
+
+ if (ring->wp >= (ring->base + ring->len)) {
+ ring->wp = ring->base;
+ ctxt_wp = ring->iommu_base;
+ }
+
+ *ring->ctxt_wp = ctxt_wp;
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result;
+ unsigned long flags = 0;
+ u32 ev_code;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /*
+ * If it's a DB Event then we need to grab the lock
+ * with preemption disabled and as a write because we
+ * have to update db register and there are chances that
+ * another thread could be doing the same.
+ */
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_lock_irqsave(&mhi_chan->lock, flags);
+ else
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_tx_event;
+
+ switch (ev_code) {
+ case MHI_EV_CC_OVERFLOW:
+ case MHI_EV_CC_EOB:
+ case MHI_EV_CC_EOT:
+ {
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+ struct mhi_tre *local_rp, *ev_tre;
+ void *dev_rp;
+ struct mhi_buf_info *buf_info;
+ u16 xfer_len;
+
+ /* Get the TRB this event points to */
+ ev_tre = mhi_to_virtual(tre_ring, ptr);
+
+ dev_rp = ev_tre + 1;
+ if (dev_rp >= (tre_ring->base + tre_ring->len))
+ dev_rp = tre_ring->base;
+
+ result.dir = mhi_chan->dir;
+
+ local_rp = tre_ring->rp;
+ while (local_rp != dev_rp) {
+ buf_info = buf_ring->rp;
+ /* If it's the last TRE, get length from the event */
+ if (local_rp == ev_tre)
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+ else
+ xfer_len = buf_info->len;
+
+ /* Unmap if it's not pre-mapped by client */
+ if (likely(!buf_info->pre_mapped))
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ result.buf_addr = buf_info->cb_buf;
+ result.bytes_xferd = xfer_len;
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ /*
+ * Recycle the buffer if buffer is pre-allocated,
+ * if there is an error, not much we can do apart
+ * from dropping the packet
+ */
+ if (mhi_chan->pre_alloc) {
+ if (mhi_queue_buf(mhi_chan->mhi_dev,
+ mhi_chan->dir,
+ buf_info->cb_buf,
+ buf_info->len, MHI_EOT)) {
+ dev_err(dev,
+ "Error recycling buffer for chan:%d\n",
+ mhi_chan->chan);
+ kfree(buf_info->cb_buf);
+ }
+ }
+ }
+ break;
+ } /* CC_EOT */
+ case MHI_EV_CC_OOB:
+ case MHI_EV_CC_DB_MODE:
+ {
+ unsigned long flags;
+
+ mhi_chan->db_cfg.db_mode = 1;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+ if (tre_ring->wp != tre_ring->rp &&
+ MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ }
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+ break;
+ }
+ case MHI_EV_CC_BAD_TRE:
+ default:
+ dev_err(dev, "Unknown event 0x%x\n", ev_code);
+ break;
+ } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
+
+end_process_tx_event:
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_unlock_irqrestore(&mhi_chan->lock, flags);
+ else
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_result result;
+ int ev_code;
+ u32 cookie; /* offset to local descriptor */
+ u16 xfer_len;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ cookie = MHI_TRE_GET_EV_COOKIE(event);
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+
+ /* Received out of bound cookie */
+ WARN_ON(cookie >= buf_ring->len);
+
+ buf_info = buf_ring->base + cookie;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+ result.bytes_xferd = xfer_len;
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_rsc_event;
+
+ WARN_ON(!buf_info->used);
+
+ /* notify the client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /*
+ * Note: We're arbitrarily incrementing RP even though, completion
+ * packet we processed might not be the same one, reason we can do this
+ * is because device guaranteed to cache descriptors in order it
+ * receive, so even though completion event is different we can re-use
+ * all descriptors in between.
+ * Example:
+ * Transfer Ring has descriptors: A, B, C, D
+ * Last descriptor host queue is D (WP) and first descriptor
+ * host queue is A (RP).
+ * The completion event we just serviced is descriptor C.
+ * Then we can safely queue descriptors to replace A, B, and C
+ * even though host did not receive any completions.
+ */
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ buf_info->used = false;
+
+end_process_rsc_event:
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *tre)
+{
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
+ struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *mhi_ring = &cmd_ring->ring;
+ struct mhi_tre *cmd_pkt;
+ struct mhi_chan *mhi_chan;
+ u32 chan;
+
+ cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+
+ chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+
+ mhi_del_ring_element(mhi_cntrl, mhi_ring);
+}
+
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 chan;
+ int count = 0;
+
+ /*
+ * This is a quick check to avoid unnecessary event processing
+ * in case MHI is already in error state, but it's still possible
+ * to transition to error state while processing events
+ */
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ switch (type) {
+ case MHI_PKT_TYPE_BW_REQ_EVENT:
+ {
+ struct mhi_link_info *link_info;
+
+ link_info = &mhi_cntrl->mhi_link_info;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ link_info->target_link_speed =
+ MHI_TRE_GET_EV_LINKSPEED(local_rp);
+ link_info->target_link_width =
+ MHI_TRE_GET_EV_LINKWIDTH(local_rp);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_dbg(dev, "Received BW_REQ event\n");
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
+ break;
+ }
+ case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+ {
+ enum mhi_state new_state;
+
+ new_state = MHI_TRE_GET_EV_STATE(local_rp);
+
+ dev_dbg(dev, "State change event to state: %s\n",
+ TO_MHI_STATE_STR(new_state));
+
+ switch (new_state) {
+ case MHI_STATE_M0:
+ mhi_pm_m0_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M1:
+ mhi_pm_m1_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M3:
+ mhi_pm_m3_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_SYS_ERR:
+ {
+ enum mhi_pm_state new_state;
+
+ dev_dbg(dev, "System error detected\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_cntrl->syserr_worker);
+ break;
+ }
+ default:
+ dev_err(dev, "Invalid state: %s\n",
+ TO_MHI_STATE_STR(new_state));
+ }
+
+ break;
+ }
+ case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+ mhi_process_cmd_completion(mhi_cntrl, local_rp);
+ break;
+ case MHI_PKT_TYPE_EE_EVENT:
+ {
+ enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
+ enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
+
+ dev_dbg(dev, "Received EE event: %s\n",
+ TO_MHI_EXEC_STR(event));
+ switch (event) {
+ case MHI_EE_SBL:
+ st = DEV_ST_TRANSITION_SBL;
+ break;
+ case MHI_EE_WFW:
+ case MHI_EE_AMSS:
+ st = DEV_ST_TRANSITION_MISSION_MODE;
+ break;
+ case MHI_EE_RDDM:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = event;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ default:
+ dev_err(dev,
+ "Unhandled EE event: 0x%x\n", type);
+ }
+ if (st != DEV_ST_TRANSITION_MAX)
+ mhi_queue_state_transition(mhi_cntrl, st);
+
+ break;
+ }
+ case MHI_PKT_TYPE_TX_EVENT:
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ break;
+ default:
+ dev_err(dev, "Unhandled event type: %d\n", type);
+ break;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ count++;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ int count = 0;
+ u32 chan;
+ struct mhi_chan *mhi_chan;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp && event_quota > 0) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ count++;
+ }
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+void mhi_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ /* process all pending events */
+ spin_lock_bh(&mhi_event->lock);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ spin_unlock_bh(&mhi_event->lock);
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ enum mhi_pm_state pm_state = 0;
+ int ret;
+
+ /*
+ * We can check PM state w/o a lock here because there is no way
+ * PM state can change from reg access valid to no access while this
+ * thread being executed.
+ */
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ /*
+ * We may have a pending event but not allowed to
+ * process it since we are probably in a suspended state,
+ * so trigger a resume.
+ */
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+ return;
+ }
+
+ /* Process ctrl events events */
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+
+ /*
+ * We received an IRQ but no events to process, maybe device went to
+ * SYS_ERR state? Check the state to confirm.
+ */
+ if (!ret) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_cntrl->syserr_worker);
+ }
+}
+
+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ void *tmp = ring->wp + ring->el_size;
+
+ if (tmp >= (ring->base + ring->len))
+ tmp = ring->base;
+
+ return (tmp == ring->rp);
+}
+
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_tre *mhi_tre;
+ int ret;
+
+ /* If MHI host pre-allocates buffers then client drivers cannot queue */
+ if (mhi_chan->pre_alloc)
+ return -EINVAL;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ /* Generate the TRE */
+ buf_info = buf_ring->wp;
+
+ buf_info->v_addr = skb->data;
+ buf_info->cb_buf = skb;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = len;
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ goto map_error;
+
+ mhi_tre = tre_ring->wp;
+
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+map_error:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
+
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_tre *mhi_tre;
+
+ /* If MHI host pre-allocates buffers then client drivers cannot queue */
+ if (mhi_chan->pre_alloc)
+ return -EINVAL;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ dev_err(dev, "MHI is not in activate state, PM state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ /* Generate the TRE */
+ buf_info = buf_ring->wp;
+ WARN_ON(buf_info->used);
+ buf_info->p_addr = mhi_buf->dma_addr;
+ buf_info->pre_mapped = true;
+ buf_info->cb_buf = mhi_buf;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = len;
+
+ mhi_tre = tre_ring->wp;
+
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_dma);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_tre *mhi_tre;
+ struct mhi_buf_info *buf_info;
+ int eot, eob, chain, bei;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ buf_info = buf_ring->wp;
+ buf_info->v_addr = buf;
+ buf_info->cb_buf = cb;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = buf_len;
+
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+
+ eob = !!(flags & MHI_EOB);
+ eot = !!(flags & MHI_EOT);
+ chain = !!(flags & MHI_CHAIN);
+ bei = !!(mhi_chan->intmod);
+
+ mhi_tre = tre_ring->wp;
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ return 0;
+}
+
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * this check here only as a guard, it's always
+ * possible mhi can enter error while executing rest of function,
+ * which is not fatal so we do not need to hold pm_lock
+ */
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ tre_ring = &mhi_chan->tre_ring;
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+ if (unlikely(ret))
+ return ret;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ unsigned long flags;
+
+ read_lock_irqsave(&mhi_chan->lock, flags);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irqrestore(&mhi_chan->lock, flags);
+ }
+
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_buf);
+
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd)
+{
+ struct mhi_tre *cmd_tre = NULL;
+ struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *ring = &mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int chan = 0;
+
+ if (mhi_chan)
+ chan = mhi_chan->chan;
+
+ spin_lock_bh(&mhi_cmd->lock);
+ if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
+ spin_unlock_bh(&mhi_cmd->lock);
+ return -ENOMEM;
+ }
+
+ /* prepare the cmd tre */
+ cmd_tre = ring->wp;
+ switch (cmd) {
+ case MHI_CMD_RESET_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
+ break;
+ case MHI_CMD_START_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
+ break;
+ default:
+ dev_err(dev, "Command not supported\n");
+ break;
+ }
+
+ /* queue to hardware */
+ mhi_add_ring_element(mhi_cntrl, ring);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_cmd->lock);
+
+ return 0;
+}
+
+static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
+
+ /* no more processing events for this channel */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ write_unlock_irq(&mhi_chan->lock);
+ mutex_unlock(&mhi_chan->mutex);
+ return;
+ }
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ goto error_invalid_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
+ if (ret)
+ goto error_invalid_state;
+
+ /* even if it fails we will still reset */
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
+ dev_err(dev,
+ "Failed to receive cmd completion, still resetting\n");
+
+error_invalid_state:
+ if (!mhi_chan->offload_ch) {
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ }
+ dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
+ mutex_unlock(&mhi_chan->mutex);
+}
+
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret = 0;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_err(dev,
+ "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
+ mhi_chan->name);
+ return -ENOTCONN;
+ }
+
+ mutex_lock(&mhi_chan->mutex);
+
+ /* If channel is not in disable state, do not allow it to start */
+ if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
+ ret = -EIO;
+ dev_dbg(dev, "channel: %d is not in disabled state\n",
+ mhi_chan->chan);
+ goto error_init_chan;
+ }
+
+ /* Check of client manages channel context for offload channels */
+ if (!mhi_chan->offload_ch) {
+ ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_init_chan;
+ }
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
+ if (ret)
+ goto error_pm_state;
+
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Pre-allocate buffer for xfer ring */
+ if (mhi_chan->pre_alloc) {
+ int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
+ &mhi_chan->tre_ring);
+ size_t len = mhi_cntrl->buffer_len;
+
+ while (nr_el--) {
+ void *buf;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_pre_alloc;
+ }
+
+ /* Prepare transfer descriptors */
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
+ len, MHI_EOT);
+ if (ret) {
+ kfree(buf);
+ goto error_pre_alloc;
+ }
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ read_lock_irq(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ mutex_unlock(&mhi_chan->mutex);
+
+ dev_dbg(dev, "Chan: %d successfully moved to start state\n",
+ mhi_chan->chan);
+
+ return 0;
+
+error_pm_state:
+ if (!mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+error_init_chan:
+ mutex_unlock(&mhi_chan->mutex);
+
+ return ret;
+
+error_pre_alloc:
+ mutex_unlock(&mhi_chan->mutex);
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+
+ return ret;
+}
+
+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ struct mhi_event_ctxt *er_ctxt,
+ int chan)
+
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long flags;
+
+ dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
+
+ ev_ring = &mhi_event->ring;
+
+ /* mark all stale events related to channel as STALE event */
+ spin_lock_irqsave(&mhi_event->lock, flags);
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ local_rp = ev_ring->rp;
+ while (dev_rp != local_rp) {
+ if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
+ chan == MHI_TRE_GET_EV_CHID(local_rp))
+ local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
+ MHI_PKT_TYPE_STALE_EVENT);
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+
+ dev_dbg(dev, "Finished marking events as stale events\n");
+ spin_unlock_irqrestore(&mhi_event->lock, flags);
+}
+
+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_result result;
+
+ /* Reset any pending buffers */
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ while (tre_ring->rp != tre_ring->wp) {
+ struct mhi_buf_info *buf_info = buf_ring->rp;
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ if (!buf_info->pre_mapped)
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+
+ if (mhi_chan->pre_alloc) {
+ kfree(buf_info->cb_buf);
+ } else {
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+ }
+}
+
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int chan = mhi_chan->chan;
+
+ /* Nothing to reset, client doesn't queue buffers */
+ if (mhi_chan->offload_ch)
+ return;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
+
+ mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
+
+ mhi_reset_data_chan(mhi_cntrl, mhi_chan);
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+
+/* Move channel to start state */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ int ret, dir;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_open_chan;
+ }
+
+ return 0;
+
+error_open_chan:
+ for (--dir; dir >= 0; dir--) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ int dir;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
+
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ int ret;
+
+ spin_lock_bh(&mhi_event->lock);
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
+ spin_unlock_bh(&mhi_event->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_poll);
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
new file mode 100644
index 000000000000..dc83d65f7784
--- /dev/null
+++ b/drivers/bus/mhi/core/pm.c
@@ -0,0 +1,973 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/*
+ * Not all MHI state transitions are synchronous. Transitions like Linkdown,
+ * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
+ * transition to a new state only if we're allowed to.
+ *
+ * Priority increases as we go down. For instance, from any state in L0, the
+ * transition can be made to states in L1, L2 and L3. A notable exception to
+ * this rule is state DISABLE. From DISABLE state we can only transition to
+ * POR state. Also, while in L2 state, user cannot jump back to previous
+ * L1 or L0 states.
+ *
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * POR <--> POR
+ * POR -> M0 -> M2 --> M0
+ * POR -> FW_DL_ERR
+ * FW_DL_ERR <--> FW_DL_ERR
+ * M0 <--> M0
+ * M0 -> FW_DL_ERR
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ */
+static struct mhi_pm_transitions const dev_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_FW_DL_ERR,
+ MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+ },
+};
+
+enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state)
+{
+ unsigned long cur_state = mhi_cntrl->pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
+ return cur_state;
+
+ if (unlikely(dev_state_transitions[index].from_state != cur_state))
+ return cur_state;
+
+ if (unlikely(!(dev_state_transitions[index].to_states & state)))
+ return cur_state;
+
+ mhi_cntrl->pm_state = state;
+ return mhi_cntrl->pm_state;
+}
+
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
+{
+ if (state == MHI_STATE_RESET) {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
+ } else {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK,
+ MHICTRL_MHISTATE_SHIFT, state);
+ }
+}
+
+/* NOP for backward compatibility, host allowed to ring DB in M2 state */
+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+}
+
+/* Handle device ready state transition */
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
+{
+ void __iomem *base = mhi_cntrl->regs;
+ struct mhi_event *mhi_event;
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 reset = 1, ready = 0;
+ int ret, i;
+
+ /* Wait for RESET to be cleared and READY bit to be set by the device */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT, &reset) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &ready) ||
+ (!reset && ready),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ /* Check if device entered error state */
+ if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device link is not accessible\n");
+ return -EIO;
+ }
+
+ /* Timeout if device did not transition to ready state */
+ if (reset || !ready) {
+ dev_err(dev, "Device Ready timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "Device in READY State\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ mhi_cntrl->dev_state = MHI_STATE_READY;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ return -EIO;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device registers not accessible\n");
+ goto error_mmio;
+ }
+
+ /* Configure MMIO registers */
+ ret = mhi_init_mmio(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Error configuring MMIO registers\n");
+ goto error_mmio;
+ }
+
+ /* Add elements to all SW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if this is an offload or HW event */
+ if (mhi_event->offload_ev || mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* Update all cores */
+ smp_wmb();
+
+ /* Ring the event ring db */
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Set MHI to M0 state */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+error_mmio:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+}
+
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state;
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M0;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ dev_err(dev, "Unable to transition to M0 state\n");
+ return -EIO;
+ }
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+
+ /* Ring all event rings and CMD ring only if we're in mission mode */
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct mhi_cmd *mhi_cmd =
+ &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Only ring primary cmd ring if ring is not empty */
+ spin_lock_irq(&mhi_cmd->lock);
+ if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ spin_unlock_irq(&mhi_cmd->lock);
+ }
+
+ /* Ring channel DB registers */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->db_cfg.reset_req)
+ mhi_chan->db_cfg.db_mode = true;
+
+ /* Only ring DB if ring is not empty */
+ if (tre_ring->base && tre_ring->wp != tre_ring->rp)
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/*
+ * After receiving the MHI state change event from the device indicating the
+ * transition to M1 state, the host can transition the device to M2 state
+ * for keeping it in low power state.
+ */
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
+ if (state == MHI_PM_M2) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+ mhi_cntrl->dev_state = MHI_STATE_M2;
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* If there are any pending resources, exit M2 immediately */
+ if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_cntrl->dev_wake))) {
+ dev_dbg(dev,
+ "Exiting M2, pending_pkts: %d dev_wake: %d\n",
+ atomic_read(&mhi_cntrl->pending_pkts),
+ atomic_read(&mhi_cntrl->dev_wake));
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ } else {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
+ }
+ } else {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ }
+}
+
+/* MHI M3 completion handler */
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M3;
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (state != MHI_PM_M3) {
+ dev_err(dev, "Unable to transition to M3 state\n");
+ return -EIO;
+ }
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/* Handle device Mission Mode transition */
+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ dev_dbg(dev, "Processing Mission Mode transition\n");
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ return -EIO;
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+
+ /* Force MHI to be in M0 state before continuing */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto error_mission_mode;
+ }
+
+ /* Add elements to all HW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev || !mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* Update to all cores */
+ smp_wmb();
+
+ spin_lock_irq(&mhi_event->lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl))
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ /*
+ * The MHI devices are only created when the client device switches its
+ * Execution Environment (EE) to either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+error_mission_mode:
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+
+/* Handle SYS_ERR and Shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state transition_state)
+{
+ enum mhi_pm_state cur_state, prev_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ to_mhi_pm_state_str(transition_state));
+
+ /* We must notify MHI control driver so it can clean up first */
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
+ /*
+ * If controller supports RDDM, we do not process
+ * SYS error state, instead we will jump directly
+ * to RDDM state
+ */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev,
+ "Controller supports RDDM, so skip SYS_ERR\n");
+ return;
+ }
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ prev_state = mhi_cntrl->pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+ if (cur_state == transition_state) {
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Wake up threads waiting for state transition */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ if (cur_state != transition_state) {
+ dev_err(dev, "Failed to transition to state: %s from: %s\n",
+ to_mhi_pm_state_str(transition_state),
+ to_mhi_pm_state_str(cur_state));
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (MHI_REG_ACCESS_VALID(prev_state)) {
+ u32 in_reset = -1;
+ unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+ dev_dbg(dev, "Triggering MHI Reset in device\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* Wait for the reset bit to be cleared by the device */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &in_reset) ||
+ !in_reset, timeout);
+ if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ dev_err(dev, "Device failed to exit MHI Reset state\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /*
+ * Device will clear BHI_INTVEC as a part of RESET processing,
+ * hence re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ dev_dbg(dev,
+ "Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+ tasklet_kill(&mhi_event->task);
+ }
+
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ dev_dbg(dev, "Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+ flush_work(&mhi_cntrl->st_worker);
+ flush_work(&mhi_cntrl->fw_worker);
+
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+ /* Reset the ev rings and cmd rings */
+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ mhi_ready_state_transition(mhi_cntrl);
+ } else {
+ /* Move to disable state */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ dev_err(dev, "Error moving from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_DISABLE));
+ }
+
+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Queue a new work item and schedule work */
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state)
+{
+ struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+ unsigned long flags;
+
+ if (!item)
+ return -ENOMEM;
+
+ item->state = state;
+ spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+ list_add_tail(&item->node, &mhi_cntrl->transition_list);
+ spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+ schedule_work(&mhi_cntrl->st_worker);
+
+ return 0;
+}
+
+/* SYS_ERR worker */
+void mhi_pm_sys_err_worker(struct work_struct *work)
+{
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ syserr_worker);
+
+ mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+}
+
+/* Device State Transition worker */
+void mhi_pm_st_worker(struct work_struct *work)
+{
+ struct state_transition *itr, *tmp;
+ LIST_HEAD(head);
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ st_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ spin_lock_irq(&mhi_cntrl->transition_lock);
+ list_splice_tail_init(&mhi_cntrl->transition_list, &head);
+ spin_unlock_irq(&mhi_cntrl->transition_lock);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling state transition: %s\n",
+ TO_DEV_STATE_TRANS_STR(itr->state));
+
+ switch (itr->state) {
+ case DEV_ST_TRANSITION_PBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_IN_PBL(mhi_cntrl->ee))
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ case DEV_ST_TRANSITION_SBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_SBL;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /*
+ * The MHI devices are only created when the client
+ * device switches its Execution Environment (EE) to
+ * either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_MISSION_MODE:
+ mhi_pm_mission_mode_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_READY:
+ mhi_ready_state_transition(mhi_cntrl);
+ break;
+ default:
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->pm_state == MHI_PM_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Assert device wake db */
+static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
+{
+ unsigned long flags;
+
+ /*
+ * If force flag is set, then increment the wake count value and
+ * ring wake db
+ */
+ if (unlikely(force)) {
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ atomic_inc(&mhi_cntrl->dev_wake);
+ if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ } else {
+ /*
+ * If resources are already requested, then just increment
+ * the wake count value and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
+ MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ }
+}
+
+/* De-assert device wake db */
+static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
+ bool override)
+{
+ unsigned long flags;
+
+ /*
+ * Only continue if there is a single resource, else just decrement
+ * and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
+ MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
+ mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
+ mhi_cntrl->wake_set = false;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+}
+
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_ee_type current_ee;
+ enum dev_st_transition next_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 val;
+ int ret;
+
+ dev_info(dev, "Requested to power ON\n");
+
+ if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings)
+ return -EINVAL;
+
+ /* Supply default wake routines if not provided by controller driver */
+ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
+ !mhi_cntrl->wake_toggle) {
+ mhi_cntrl->wake_get = mhi_assert_dev_wake;
+ mhi_cntrl->wake_put = mhi_deassert_dev_wake;
+ mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
+ mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+
+ if (!mhi_cntrl->pre_init) {
+ /* Setup device context */
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+ }
+
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto error_setup_irq;
+
+ /* Setup BHI offset & INTVEC */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhi = mhi_cntrl->regs + val;
+
+ /* Setup BHIE offset */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev, "Error reading BHIE offset\n");
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + val;
+ }
+
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->pm_state = MHI_PM_POR;
+ mhi_cntrl->ee = MHI_EE_MAX;
+ current_ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Confirm that the device is in valid exec env */
+ if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
+ dev_err(dev, "Not a valid EE for power on\n");
+ ret = -EIO;
+ goto error_bhi_offset;
+ }
+
+ /* Transition to next state */
+ next_state = MHI_IN_PBL(current_ee) ?
+ DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
+
+ if (next_state == DEV_ST_TRANSITION_PBL)
+ schedule_work(&mhi_cntrl->fw_worker);
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ dev_info(dev, "Power on setup success\n");
+
+ return 0;
+
+error_bhi_offset:
+ mhi_deinit_free_irq(mhi_cntrl);
+
+error_setup_irq:
+ if (!mhi_cntrl->pre_init)
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_async_power_up);
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* If it's not a graceful shutdown, force MHI to linkdown state */
+ if (!graceful) {
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
+ dev_dbg(dev, "Failed to move to state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ }
+ mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ mhi_deinit_free_irq(mhi_cntrl);
+
+ if (!mhi_cntrl->pre_init) {
+ /* Free all allocated resources */
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_power_down);
+
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret = mhi_async_power_up(mhi_cntrl);
+
+ if (ret)
+ return ret;
+
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
+ if (ret)
+ mhi_power_down(mhi_cntrl, false);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Check if device is already in RDDM */
+ if (mhi_cntrl->ee == MHI_EE_RDDM)
+ return 0;
+
+ dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ /* Wait for RDDM event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_RDDM,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ ret = ret ? 0 : -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
+
+void mhi_device_get(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake++;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_get);
+
+int mhi_device_get_sync(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (!ret)
+ mhi_dev->dev_wake++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_device_get_sync);
+
+void mhi_device_put(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake--;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_put);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 440019655fbb..e5f5f48d69d2 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -15,15 +16,47 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
#include <dt-bindings/bus/ti-sysc.h>
+#define DIS_ISP BIT(2)
+#define DIS_IVA BIT(1)
+#define DIS_SGX BIT(0)
+
+#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
+
#define MAX_MODULE_SOFTRESET_WAIT 10000
-static const char * const reg_names[] = { "rev", "sysc", "syss", };
+enum sysc_soc {
+ SOC_UNKNOWN,
+ SOC_2420,
+ SOC_2430,
+ SOC_3430,
+ SOC_3630,
+ SOC_4430,
+ SOC_4460,
+ SOC_4470,
+ SOC_5430,
+ SOC_AM3,
+ SOC_AM4,
+ SOC_DRA7,
+};
+
+struct sysc_address {
+ unsigned long base;
+ struct list_head node;
+};
+
+struct sysc_soc_info {
+ unsigned long general_purpose:1;
+ enum sysc_soc soc;
+ struct mutex list_lock; /* disabled modules list lock */
+ struct list_head disabled_modules;
+};
enum sysc_clocks {
SYSC_FCK,
@@ -39,6 +72,8 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
+static struct sysc_soc_info *sysc_soc;
+static const char * const reg_names[] = { "rev", "sysc", "syss", };
static const char * const clock_names[SYSC_MAX_CLOCKS] = {
"fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
"opt5", "opt6", "opt7",
@@ -70,11 +105,13 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @child_needs_resume: runtime resume needed for child on resume from suspend
* @disable_on_idle: status flag used for disabling modules with resets
* @idle_work: work structure used to perform delayed idle on a module
- * @clk_enable_quirk: module specific clock enable quirk
- * @clk_disable_quirk: module specific clock disable quirk
+ * @pre_reset_quirk: module specific pre-reset quirk
+ * @post_reset_quirk: module specific post-reset quirk
* @reset_done_quirk: module specific reset done quirk
* @module_enable_quirk: module specific enable quirk
* @module_disable_quirk: module specific disable quirk
+ * @module_unlock_quirk: module specific sysconfig unlock quirk
+ * @module_lock_quirk: module specific sysconfig lock quirk
*/
struct sysc {
struct device *dev;
@@ -97,11 +134,13 @@ struct sysc {
unsigned int needs_resume:1;
unsigned int child_needs_resume:1;
struct delayed_work idle_work;
- void (*clk_enable_quirk)(struct sysc *sysc);
- void (*clk_disable_quirk)(struct sysc *sysc);
+ void (*pre_reset_quirk)(struct sysc *sysc);
+ void (*post_reset_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
void (*module_enable_quirk)(struct sysc *sysc);
void (*module_disable_quirk)(struct sysc *sysc);
+ void (*module_unlock_quirk)(struct sysc *sysc);
+ void (*module_lock_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -624,7 +663,7 @@ static void sysc_check_one_child(struct sysc *ddata,
const char *name;
name = of_get_property(np, "ti,hwmods", NULL);
- if (name)
+ if (name && !of_device_is_compatible(np, "ti,sysc"))
dev_warn(ddata->dev, "really a child ti,hwmods property?");
sysc_check_quirk_stdout(ddata, np);
@@ -861,6 +900,22 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
+/**
+ * sysc_write_sysconfig - handle sysconfig quirks for register write
+ * @ddata: device driver data
+ * @value: register value
+ */
+static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
+{
+ if (ddata->module_unlock_quirk)
+ ddata->module_unlock_quirk(ddata);
+
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
+
+ if (ddata->module_lock_quirk)
+ ddata->module_lock_quirk(ddata);
+}
+
#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
#define SYSC_CLOCACT_ICK 2
@@ -907,7 +962,7 @@ static int sysc_enable_module(struct device *dev)
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_midle:
/* Set MIDLE mode */
@@ -926,14 +981,14 @@ set_midle:
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_autoidle:
/* Autoidle bit must enabled separately if available */
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
reg |= 1 << regbits->autoidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
}
if (ddata->module_enable_quirk)
@@ -991,7 +1046,7 @@ static int sysc_disable_module(struct device *dev)
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_sidle:
/* Set SIDLE mode */
@@ -1014,7 +1069,7 @@ set_sidle:
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
reg |= 1 << regbits->autoidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
return 0;
}
@@ -1216,16 +1271,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
0),
/* Some timers on omap4 and later */
- SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff,
0),
- SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
@@ -1238,19 +1293,27 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
- SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
SYSC_QUIRK_SWSUP_SIDLE),
/* Quirks that need to be set based on detected module */
- SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
+ SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
SYSC_MODULE_QUIRK_AESS),
- SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
- SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
- SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
SYSC_MODULE_QUIRK_HDQ1W),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
@@ -1263,72 +1326,92 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_MODULE_QUIRK_I2C),
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
SYSC_MODULE_QUIRK_I2C),
- SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
- SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
+ SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
SYSC_MODULE_QUIRK_SGX),
- SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff,
+ SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
+ SYSC_MODULE_QUIRK_RTC_UNLOCK),
+ SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
- SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff,
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
+ /* PRUSS on am3, am4 and am5 */
+ SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
+ SYSC_MODULE_QUIRK_PRUSS),
/* Watchdog on am3 and am4 */
SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
- SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
- SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0),
- SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0),
- SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
+ SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
+ SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
+ SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
0xffff00f0, 0),
- SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
- SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
- SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
- SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
- SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
- SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
- SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
- SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
- SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
- SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0),
- SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0),
+ SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
+ SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
- SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0),
- SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0),
+ SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
+ SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
- SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0),
- SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0),
- SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0),
- SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0),
+ SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
- SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0),
- SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0),
- SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0),
- SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0),
- SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0),
- SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
+ SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
+ SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
- SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
+ SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
+ SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
#endif
};
@@ -1350,16 +1433,13 @@ static void sysc_init_early_quirks(struct sysc *ddata)
if (q->base != ddata->module_pa)
continue;
- if (q->rev_offset >= 0 &&
- q->rev_offset != ddata->offsets[SYSC_REVISION])
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
- if (q->sysc_offset >= 0 &&
- q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
- if (q->syss_offset >= 0 &&
- q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
ddata->name = q->name;
@@ -1379,16 +1459,13 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
if (q->base && q->base != ddata->module_pa)
continue;
- if (q->rev_offset >= 0 &&
- q->rev_offset != ddata->offsets[SYSC_REVISION])
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
- if (q->sysc_offset >= 0 &&
- q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
- if (q->syss_offset >= 0 &&
- q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
if (q->revision == ddata->revision ||
@@ -1400,6 +1477,128 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * DSS needs dispc outputs disabled to reset modules. Returns mask of
+ * enabled DSS interrupts. Eventually we may be able to do this on
+ * dispc init rather than top-level DSS init.
+ */
+static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ bool disable)
+{
+ bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
+ const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
+ int manager_count;
+ bool framedonetv_irq;
+ u32 val, irq_mask = 0;
+
+ switch (sysc_soc->soc) {
+ case SOC_2420 ... SOC_3630:
+ manager_count = 2;
+ framedonetv_irq = false;
+ break;
+ case SOC_4430 ... SOC_4470:
+ manager_count = 3;
+ break;
+ case SOC_5430:
+ case SOC_DRA7:
+ manager_count = 4;
+ break;
+ case SOC_AM4:
+ manager_count = 1;
+ break;
+ case SOC_UNKNOWN:
+ default:
+ return 0;
+ };
+
+ /* Remap the whole module range to be able to reset dispc outputs */
+ devm_iounmap(ddata->dev, ddata->module_va);
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ ddata->module_size);
+ if (!ddata->module_va)
+ return -EIO;
+
+ /* DISP_CONTROL */
+ val = sysc_read(ddata, dispc_offset + 0x40);
+ lcd_en = val & lcd_en_mask;
+ digit_en = val & digit_en_mask;
+ if (lcd_en)
+ irq_mask |= BIT(0); /* FRAMEDONE */
+ if (digit_en) {
+ if (framedonetv_irq)
+ irq_mask |= BIT(24); /* FRAMEDONETV */
+ else
+ irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
+ }
+ if (disable & (lcd_en | digit_en))
+ sysc_write(ddata, dispc_offset + 0x40,
+ val & ~(lcd_en_mask | digit_en_mask));
+
+ if (manager_count <= 2)
+ return irq_mask;
+
+ /* DISPC_CONTROL2 */
+ val = sysc_read(ddata, dispc_offset + 0x238);
+ lcd2_en = val & lcd_en_mask;
+ if (lcd2_en)
+ irq_mask |= BIT(22); /* FRAMEDONE2 */
+ if (disable && lcd2_en)
+ sysc_write(ddata, dispc_offset + 0x238,
+ val & ~lcd_en_mask);
+
+ if (manager_count <= 3)
+ return irq_mask;
+
+ /* DISPC_CONTROL3 */
+ val = sysc_read(ddata, dispc_offset + 0x848);
+ lcd3_en = val & lcd_en_mask;
+ if (lcd3_en)
+ irq_mask |= BIT(30); /* FRAMEDONE3 */
+ if (disable && lcd3_en)
+ sysc_write(ddata, dispc_offset + 0x848,
+ val & ~lcd_en_mask);
+
+ return irq_mask;
+}
+
+/* DSS needs child outputs disabled and SDI registers cleared for reset */
+static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
+{
+ const int dispc_offset = 0x1000;
+ int error;
+ u32 irq_mask, val;
+
+ /* Get enabled outputs */
+ irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
+ if (!irq_mask)
+ return;
+
+ /* Clear IRQSTATUS */
+ sysc_write(ddata, dispc_offset + 0x18, irq_mask);
+
+ /* Disable outputs */
+ val = sysc_quirk_dispc(ddata, dispc_offset, true);
+
+ /* Poll IRQSTATUS */
+ error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
+ val, val != irq_mask, 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
+ __func__, val, irq_mask);
+
+ if (sysc_soc->soc == SOC_3430) {
+ /* Clear DSS_SDI_CONTROL */
+ sysc_write(ddata, 0x44, 0);
+
+ /* Clear DSS_PLL_CONTROL */
+ sysc_write(ddata, 0x48, 0);
+ }
+
+ /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
+ sysc_write(ddata, 0x40, 0);
+}
+
/* 1-wire needs module's internal clocks enabled for reset */
static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
{
@@ -1419,7 +1618,7 @@ static void sysc_module_enable_quirk_aess(struct sysc *ddata)
sysc_write(ddata, offset, 1);
}
-/* I2C needs extra enable bit toggling for reset */
+/* I2C needs to be disabled for reset */
static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
{
int offset;
@@ -1440,14 +1639,48 @@ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
sysc_write(ddata, offset, val);
}
-static void sysc_clk_enable_quirk_i2c(struct sysc *ddata)
+static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, false);
+}
+
+static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
{
sysc_clk_quirk_i2c(ddata, true);
}
-static void sysc_clk_disable_quirk_i2c(struct sysc *ddata)
+/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
+static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
{
- sysc_clk_quirk_i2c(ddata, false);
+ u32 val, kick0_val = 0, kick1_val = 0;
+ unsigned long flags;
+ int error;
+
+ if (!lock) {
+ kick0_val = 0x83e70b13;
+ kick1_val = 0x95a4f1e0;
+ }
+
+ local_irq_save(flags);
+ /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
+ error = readl_poll_timeout(ddata->module_va + 0x44, val,
+ !(val & BIT(0)), 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "rtc busy timeout\n");
+ /* Now we have ~15 microseconds to read/write various registers */
+ sysc_write(ddata, 0x6c, kick0_val);
+ sysc_write(ddata, 0x70, kick1_val);
+ local_irq_restore(flags);
+}
+
+static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, false);
+}
+
+static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, true);
}
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
@@ -1483,20 +1716,30 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
dev_warn(ddata->dev, "wdt disable step2 failed\n");
}
+/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
+static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ reg |= SYSC_PRUSS_STANDBY_INIT;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
return;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
- ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
return;
}
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
- ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c;
- ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c;
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
+ ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
return;
}
@@ -1504,6 +1747,16 @@ static void sysc_init_module_quirks(struct sysc *ddata)
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
+ ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
+ ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
+
+ return;
+ }
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
@@ -1511,6 +1764,9 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
}
static int sysc_clockdomain_init(struct sysc *ddata)
@@ -1572,7 +1828,7 @@ static int sysc_reset(struct sysc *ddata)
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
- if (ddata->legacy_mode || sysc_offset < 0 ||
+ if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
return 0;
@@ -1584,19 +1840,21 @@ static int sysc_reset(struct sysc *ddata)
else
syss_done = ddata->cfg.syss_mask;
- if (ddata->clk_disable_quirk)
- ddata->clk_disable_quirk(ddata);
+ if (ddata->pre_reset_quirk)
+ ddata->pre_reset_quirk(ddata);
- sysc_val = sysc_read_sysconfig(ddata);
- sysc_val |= sysc_mask;
- sysc_write(ddata, sysc_offset, sysc_val);
+ if (sysc_offset >= 0) {
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+ }
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,
ddata->cfg.srst_udelay * 2);
- if (ddata->clk_enable_quirk)
- ddata->clk_enable_quirk(ddata);
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
/* Poll on reset status */
if (syss_offset >= 0) {
@@ -2314,6 +2572,16 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
.mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
};
+/*
+ * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
+ */
+static const struct sysc_capabilities sysc_pruss = {
+ .type = TI_SYSC_PRUSS,
+ .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
+ .regbits = &sysc_regbits_omap4_simple,
+ .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
+};
+
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
@@ -2387,6 +2655,154 @@ static void ti_sysc_idle(struct work_struct *work)
pm_runtime_put_sync(ddata->dev);
}
+/*
+ * SoC model and features detection. Only needed for SoCs that need
+ * special handling for quirks, no need to list others.
+ */
+static const struct soc_device_attribute sysc_soc_match[] = {
+ SOC_FLAG("OMAP242*", SOC_2420),
+ SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("OMAP3[45]*", SOC_3430),
+ SOC_FLAG("OMAP3[67]*", SOC_3630),
+ SOC_FLAG("OMAP443*", SOC_4430),
+ SOC_FLAG("OMAP446*", SOC_4460),
+ SOC_FLAG("OMAP447*", SOC_4470),
+ SOC_FLAG("OMAP54*", SOC_5430),
+ SOC_FLAG("AM433", SOC_AM3),
+ SOC_FLAG("AM43*", SOC_AM4),
+ SOC_FLAG("DRA7*", SOC_DRA7),
+
+ { /* sentinel */ },
+};
+
+/*
+ * List of SoCs variants with disabled features. By default we assume all
+ * devices in the device tree are available so no need to list those SoCs.
+ */
+static const struct soc_device_attribute sysc_soc_feat_match[] = {
+ /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
+ SOC_FLAG("AM3505", DIS_SGX),
+ SOC_FLAG("OMAP3525", DIS_SGX),
+ SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
+
+ /* OMAP3630/DM3730 variants with some accelerators disabled */
+ SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
+ SOC_FLAG("DM3725", DIS_SGX),
+ SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
+ SOC_FLAG("OMAP3621", DIS_ISP),
+
+ { /* sentinel */ },
+};
+
+static int sysc_add_disabled(unsigned long base)
+{
+ struct sysc_address *disabled_module;
+
+ disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
+ if (!disabled_module)
+ return -ENOMEM;
+
+ disabled_module->base = base;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_add(&disabled_module->node, &sysc_soc->disabled_modules);
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return 0;
+}
+
+/*
+ * One time init to detect the booted SoC and disable unavailable features.
+ * Note that we initialize static data shared across all ti-sysc instances
+ * so ddata is only used for SoC type. This can be called from module_init
+ * once we no longer need to rely on platform data.
+ */
+static int sysc_init_soc(struct sysc *ddata)
+{
+ const struct soc_device_attribute *match;
+ struct ti_sysc_platform_data *pdata;
+ unsigned long features = 0;
+
+ if (sysc_soc)
+ return 0;
+
+ sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
+ if (!sysc_soc)
+ return -ENOMEM;
+
+ mutex_init(&sysc_soc->list_lock);
+ INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ sysc_soc->general_purpose = true;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->soc_type_gp)
+ sysc_soc->general_purpose = pdata->soc_type_gp();
+
+ match = soc_device_match(sysc_soc_match);
+ if (match && match->data)
+ sysc_soc->soc = (int)match->data;
+
+ match = soc_device_match(sysc_soc_feat_match);
+ if (!match)
+ return 0;
+
+ if (match->data)
+ features = (unsigned long)match->data;
+
+ /*
+ * Add disabled devices to the list based on the module base.
+ * Note that this must be done before we attempt to access the
+ * device and have module revision checks working.
+ */
+ if (features & DIS_ISP)
+ sysc_add_disabled(0x480bd400);
+ if (features & DIS_IVA)
+ sysc_add_disabled(0x5d000000);
+ if (features & DIS_SGX)
+ sysc_add_disabled(0x50000000);
+
+ return 0;
+}
+
+static void sysc_cleanup_soc(void)
+{
+ struct sysc_address *disabled_module;
+ struct list_head *pos, *tmp;
+
+ if (!sysc_soc)
+ return;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ list_del(pos);
+ kfree(disabled_module);
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
+static int sysc_check_disabled_devices(struct sysc *ddata)
+{
+ struct sysc_address *disabled_module;
+ struct list_head *pos;
+ int error = 0;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each(pos, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ if (ddata->module_pa == disabled_module->base) {
+ dev_dbg(ddata->dev, "module disabled for this SoC\n");
+ error = -ENODEV;
+ break;
+ }
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return error;
+}
+
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
@@ -2405,6 +2821,10 @@ static int sysc_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
+ error = sysc_init_soc(ddata);
+ if (error)
+ return error;
+
error = sysc_init_match(ddata);
if (error)
return error;
@@ -2435,6 +2855,10 @@ static int sysc_probe(struct platform_device *pdev)
sysc_init_early_quirks(ddata);
+ error = sysc_check_disabled_devices(ddata);
+ if (error)
+ return error;
+
error = sysc_get_clocks(ddata);
if (error)
return error;
@@ -2539,6 +2963,7 @@ static const struct of_device_id sysc_match[] = {
{ .compatible = "ti,sysc-usb-host-fs",
.data = &sysc_omap4_usb_host_fs, },
{ .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
+ { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
{ },
};
MODULE_DEVICE_TABLE(of, sysc_match);
@@ -2565,6 +2990,7 @@ static void __exit sysc_exit(void)
{
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
platform_driver_unregister(&sysc_driver);
+ sysc_cleanup_soc();
}
module_exit(sysc_exit);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 26956c006987..d4665fe9ccd2 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -7,28 +7,6 @@ menu "Character devices"
source "drivers/tty/Kconfig"
-config DEVMEM
- bool "/dev/mem virtual device support"
- default y
- help
- Say Y here if you want to support the /dev/mem device.
- The /dev/mem device is used to access areas of physical
- memory.
- When in doubt, say "Y".
-
-config DEVKMEM
- bool "/dev/kmem virtual device support"
- # On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
- depends on !ARM64
- help
- Say Y here if you want to support the /dev/kmem device. The
- /dev/kmem device is rarely used, but can be used for certain
- kind of kernel debugging operations.
- When in doubt, say "N".
-
-source "drivers/tty/serial/Kconfig"
-source "drivers/tty/serdev/Kconfig"
-
config TTY_PRINTK
tristate "TTY driver to output user messages via printk"
depends on EXPERT && TTY
@@ -113,8 +91,6 @@ config PPDEV
If unsure, say N.
-source "drivers/tty/hvc/Kconfig"
-
config VIRTIO_CONSOLE
tristate "Virtio console"
depends on VIRTIO && TTY
@@ -220,89 +196,6 @@ config NWFLASH
source "drivers/char/hw_random/Kconfig"
-config NVRAM
- tristate "/dev/nvram support"
- depends on X86 || HAVE_ARCH_NVRAM_OPS
- default M68K || PPC
- ---help---
- If you say Y here and create a character special file /dev/nvram
- with major number 10 and minor number 144 using mknod ("man mknod"),
- you get read and write access to the non-volatile memory.
-
- /dev/nvram may be used to view settings in NVRAM or to change them
- (with some utility). It could also be used to frequently
- save a few bits of very important data that may not be lost over
- power-off and for which writing to disk is too insecure. Note
- however that most NVRAM space in a PC belongs to the BIOS and you
- should NEVER idly tamper with it. See Ralf Brown's interrupt list
- for a guide to the use of CMOS bytes by your BIOS.
-
- This memory is conventionally called "NVRAM" on PowerPC machines,
- "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
-
- To compile this driver as a module, choose M here: the
- module will be called nvram.
-
-#
-# These legacy RTC drivers just cause too many conflicts with the generic
-# RTC framework ... let's not even try to coexist any more.
-#
-if RTC_LIB=n
-
-config RTC
- tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
- depends on ALPHA
- ---help---
- If you say Y here and create a character special file /dev/rtc with
- major number 10 and minor number 135 using mknod ("man mknod"), you
- will get access to the real time clock (or hardware clock) built
- into your computer.
-
- Every PC has such a clock built in. It can be used to generate
- signals from as low as 1Hz up to 8192Hz, and can also be used
- as a 24 hour alarm. It reports status information via the file
- /proc/driver/rtc and its behaviour is set by various ioctls on
- /dev/rtc.
-
- If you run Linux on a multiprocessor machine and said Y to
- "Symmetric Multi Processing" above, you should say Y here to read
- and set the RTC in an SMP compatible fashion.
-
- If you think you have a use for such a device (such as periodic data
- sampling), then say Y here, and read <file:Documentation/admin-guide/rtc.rst>
- for details.
-
- To compile this driver as a module, choose M here: the
- module will be called rtc.
-
-config JS_RTC
- tristate "Enhanced Real Time Clock Support"
- depends on SPARC32 && PCI
- ---help---
- If you say Y here and create a character special file /dev/rtc with
- major number 10 and minor number 135 using mknod ("man mknod"), you
- will get access to the real time clock (or hardware clock) built
- into your computer.
-
- Every PC has such a clock built in. It can be used to generate
- signals from as low as 1Hz up to 8192Hz, and can also be used
- as a 24 hour alarm. It reports status information via the file
- /proc/driver/rtc and its behaviour is set by various ioctls on
- /dev/rtc.
-
- If you think you have a use for such a device (such as periodic data
- sampling), then say Y here, and read <file:Documentation/admin-guide/rtc.rst>
- for details.
-
- To compile this driver as a module, choose M here: the
- module will be called js-rtc.
-
-config EFI_RTC
- bool "EFI Real Time Clock Services"
- depends on IA64
-
-endif # RTC_LIB
-
config DTLK
tristate "Double Talk PC internal speech card support"
depends on ISA
@@ -431,6 +324,48 @@ config NSC_GPIO
pc8736x_gpio drivers. If those drivers are built as
modules, this one will be too, named nsc_gpio
+config DEVMEM
+ bool "/dev/mem virtual device support"
+ default y
+ help
+ Say Y here if you want to support the /dev/mem device.
+ The /dev/mem device is used to access areas of physical
+ memory.
+ When in doubt, say "Y".
+
+config DEVKMEM
+ bool "/dev/kmem virtual device support"
+ # On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
+ depends on !ARM64
+ help
+ Say Y here if you want to support the /dev/kmem device. The
+ /dev/kmem device is rarely used, but can be used for certain
+ kind of kernel debugging operations.
+ When in doubt, say "N".
+
+config NVRAM
+ tristate "/dev/nvram support"
+ depends on X86 || HAVE_ARCH_NVRAM_OPS
+ default M68K || PPC
+ ---help---
+ If you say Y here and create a character special file /dev/nvram
+ with major number 10 and minor number 144 using mknod ("man mknod"),
+ you get read and write access to the non-volatile memory.
+
+ /dev/nvram may be used to view settings in NVRAM or to change them
+ (with some utility). It could also be used to frequently
+ save a few bits of very important data that may not be lost over
+ power-off and for which writing to disk is too insecure. Note
+ however that most NVRAM space in a PC belongs to the BIOS and you
+ should NEVER idly tamper with it. See Ralf Brown's interrupt list
+ for a guide to the use of CMOS bytes by your BIOS.
+
+ This memory is conventionally called "NVRAM" on PowerPC machines,
+ "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvram.
+
config RAW_DRIVER
tristate "RAW driver (/dev/raw/rawN)"
depends on BLOCK
@@ -452,6 +387,14 @@ config MAX_RAW_DEVS
Default is 256. Increase this number in case you need lots of
raw devices.
+config DEVPORT
+ bool "/dev/port character device"
+ depends on ISA || PCI
+ default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
+
config HPET
bool "HPET - High Precision Event Timer" if (X86 || IA64)
default n
@@ -511,14 +454,6 @@ config TELCLOCK
/sys/devices/platform/telco_clock, with a number of files for
controlling the behavior of this hardware.
-config DEVPORT
- bool "/dev/port character device"
- depends on ISA || PCI
- default y
- help
- Say Y here if you want to support the /dev/port device. The /dev/port
- device is similar to /dev/mem, but for I/O ports.
-
source "drivers/s390/char/Kconfig"
source "drivers/char/xillybus/Kconfig"
@@ -539,7 +474,7 @@ endmenu
config RANDOM_TRUST_CPU
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
- depends on X86 || S390 || PPC
+ depends on ARCH_RANDOM
default n
help
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7c5ea6f9df14..ffce287ef415 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -20,9 +20,7 @@ obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
obj-$(CONFIG_DTLK) += dtlk.o
obj-$(CONFIG_APPLICOM) += applicom.o
obj-$(CONFIG_SONYPI) += sonypi.o
-obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_HPET) += hpet.o
-obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
obj-$(CONFIG_NVRAM) += nvram.o
obj-$(CONFIG_TOSHIBA) += toshiba.o
@@ -46,9 +44,6 @@ obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
-obj-$(CONFIG_JS_RTC) += js-rtc.o
-js-rtc-y = rtc.o
-
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
obj-$(CONFIG_ADI) += adi.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 594aee281977..b40edae32817 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -775,7 +775,7 @@ int __init agp_amd64_init(void)
}
/* First check that we have at least one AMD64 NB */
- if (!pci_dev_present(amd_nb_misc_ids)) {
+ if (!amd_nb_num()) {
pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 51121a4b82c7..14b2d8034c51 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -53,7 +53,6 @@
#define MAX_BOARD 8 /* maximum of pc board possible */
#define MAX_ISA_BOARD 4
#define LEN_RAM_IO 0x800
-#define AC_MINOR 157
#ifndef PCI_VENDOR_ID_APPLICOM
#define PCI_VENDOR_ID_APPLICOM 0x1389
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
deleted file mode 100644
index 4f73064d0c6f..000000000000
--- a/drivers/char/efirtc.c
+++ /dev/null
@@ -1,366 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * EFI Time Services Driver for Linux
- *
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
- *
- * Based on skeleton from the drivers/char/rtc.c driver by P. Gortmaker
- *
- * This code provides an architected & portable interface to the real time
- * clock by using EFI instead of direct bit fiddling. The functionalities are
- * quite different from the rtc.c driver. The only way to talk to the device
- * is by using ioctl(). There is a /proc interface which provides the raw
- * information.
- *
- * Please note that we have kept the API as close as possible to the
- * legacy RTC. The standard /sbin/hwclock program should work normally
- * when used to get/set the time.
- *
- * NOTES:
- * - Locking is required for safe execution of EFI calls with regards
- * to interrupts and SMP.
- *
- * TODO (December 1999):
- * - provide the API to set/get the WakeUp Alarm (different from the
- * rtc.c alarm).
- * - SMP testing
- * - Add module support
- */
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/init.h>
-#include <linux/rtc.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/efi.h>
-#include <linux/uaccess.h>
-
-
-#define EFI_RTC_VERSION "0.4"
-
-#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT)
-/*
- * EFI Epoch is 1/1/1998
- */
-#define EFI_RTC_EPOCH 1998
-
-static DEFINE_SPINLOCK(efi_rtc_lock);
-
-static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-
-#define is_leap(year) \
- ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
-
-static const unsigned short int __mon_yday[2][13] =
-{
- /* Normal years. */
- { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
- /* Leap years. */
- { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
-};
-
-/*
- * returns day of the year [0-365]
- */
-static inline int
-compute_yday(efi_time_t *eft)
-{
- /* efi_time_t.month is in the [1-12] so, we need -1 */
- return __mon_yday[is_leap(eft->year)][eft->month-1]+ eft->day -1;
-}
-/*
- * returns day of the week [0-6] 0=Sunday
- *
- * Don't try to provide a year that's before 1998, please !
- */
-static int
-compute_wday(efi_time_t *eft)
-{
- int y;
- int ndays = 0;
-
- if ( eft->year < 1998 ) {
- printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n");
- return -1;
- }
-
- for(y=EFI_RTC_EPOCH; y < eft->year; y++ ) {
- ndays += 365 + (is_leap(y) ? 1 : 0);
- }
- ndays += compute_yday(eft);
-
- /*
- * 4=1/1/1998 was a Thursday
- */
- return (ndays + 4) % 7;
-}
-
-static void
-convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
-{
-
- eft->year = wtime->tm_year + 1900;
- eft->month = wtime->tm_mon + 1;
- eft->day = wtime->tm_mday;
- eft->hour = wtime->tm_hour;
- eft->minute = wtime->tm_min;
- eft->second = wtime->tm_sec;
- eft->nanosecond = 0;
- eft->daylight = wtime->tm_isdst ? EFI_ISDST: 0;
- eft->timezone = EFI_UNSPECIFIED_TIMEZONE;
-}
-
-static void
-convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
-{
- memset(wtime, 0, sizeof(*wtime));
- wtime->tm_sec = eft->second;
- wtime->tm_min = eft->minute;
- wtime->tm_hour = eft->hour;
- wtime->tm_mday = eft->day;
- wtime->tm_mon = eft->month - 1;
- wtime->tm_year = eft->year - 1900;
-
- /* day of the week [0-6], Sunday=0 */
- wtime->tm_wday = compute_wday(eft);
-
- /* day in the year [1-365]*/
- wtime->tm_yday = compute_yday(eft);
-
-
- switch (eft->daylight & EFI_ISDST) {
- case EFI_ISDST:
- wtime->tm_isdst = 1;
- break;
- case EFI_TIME_ADJUST_DAYLIGHT:
- wtime->tm_isdst = 0;
- break;
- default:
- wtime->tm_isdst = -1;
- }
-}
-
-static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
-
- efi_status_t status;
- unsigned long flags;
- efi_time_t eft;
- efi_time_cap_t cap;
- struct rtc_time wtime;
- struct rtc_wkalrm __user *ewp;
- unsigned char enabled, pending;
-
- switch (cmd) {
- case RTC_UIE_ON:
- case RTC_UIE_OFF:
- case RTC_PIE_ON:
- case RTC_PIE_OFF:
- case RTC_AIE_ON:
- case RTC_AIE_OFF:
- case RTC_ALM_SET:
- case RTC_ALM_READ:
- case RTC_IRQP_READ:
- case RTC_IRQP_SET:
- case RTC_EPOCH_READ:
- case RTC_EPOCH_SET:
- return -EINVAL;
-
- case RTC_RD_TIME:
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- status = efi.get_time(&eft, &cap);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- if (status != EFI_SUCCESS) {
- /* should never happen */
- printk(KERN_ERR "efitime: can't read time\n");
- return -EINVAL;
- }
-
- convert_from_efi_time(&eft, &wtime);
-
- return copy_to_user((void __user *)arg, &wtime,
- sizeof (struct rtc_time)) ? - EFAULT : 0;
-
- case RTC_SET_TIME:
-
- if (!capable(CAP_SYS_TIME)) return -EACCES;
-
- if (copy_from_user(&wtime, (struct rtc_time __user *)arg,
- sizeof(struct rtc_time)) )
- return -EFAULT;
-
- convert_to_efi_time(&wtime, &eft);
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- status = efi.set_time(&eft);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- return status == EFI_SUCCESS ? 0 : -EINVAL;
-
- case RTC_WKALM_SET:
-
- if (!capable(CAP_SYS_TIME)) return -EACCES;
-
- ewp = (struct rtc_wkalrm __user *)arg;
-
- if ( get_user(enabled, &ewp->enabled)
- || copy_from_user(&wtime, &ewp->time, sizeof(struct rtc_time)) )
- return -EFAULT;
-
- convert_to_efi_time(&wtime, &eft);
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
- /*
- * XXX Fixme:
- * As of EFI 0.92 with the firmware I have on my
- * machine this call does not seem to work quite
- * right
- */
- status = efi.set_wakeup_time((efi_bool_t)enabled, &eft);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- return status == EFI_SUCCESS ? 0 : -EINVAL;
-
- case RTC_WKALM_RD:
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- if (status != EFI_SUCCESS) return -EINVAL;
-
- ewp = (struct rtc_wkalrm __user *)arg;
-
- if ( put_user(enabled, &ewp->enabled)
- || put_user(pending, &ewp->pending)) return -EFAULT;
-
- convert_from_efi_time(&eft, &wtime);
-
- return copy_to_user(&ewp->time, &wtime,
- sizeof(struct rtc_time)) ? -EFAULT : 0;
- }
- return -ENOTTY;
-}
-
-/*
- * The various file operations we support.
- */
-
-static const struct file_operations efi_rtc_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = efi_rtc_ioctl,
- .llseek = no_llseek,
-};
-
-static struct miscdevice efi_rtc_dev= {
- EFI_RTC_MINOR,
- "efirtc",
- &efi_rtc_fops
-};
-
-/*
- * We export RAW EFI information to /proc/driver/efirtc
- */
-static int efi_rtc_proc_show(struct seq_file *m, void *v)
-{
- efi_time_t eft, alm;
- efi_time_cap_t cap;
- efi_bool_t enabled, pending;
- unsigned long flags;
-
- memset(&eft, 0, sizeof(eft));
- memset(&alm, 0, sizeof(alm));
- memset(&cap, 0, sizeof(cap));
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- efi.get_time(&eft, &cap);
- efi.get_wakeup_time(&enabled, &pending, &alm);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- seq_printf(m,
- "Time : %u:%u:%u.%09u\n"
- "Date : %u-%u-%u\n"
- "Daylight : %u\n",
- eft.hour, eft.minute, eft.second, eft.nanosecond,
- eft.year, eft.month, eft.day,
- eft.daylight);
-
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(m, "Timezone : unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(m, "Timezone : %u\n", eft.timezone);
-
-
- seq_printf(m,
- "Alarm Time : %u:%u:%u.%09u\n"
- "Alarm Date : %u-%u-%u\n"
- "Alarm Daylight : %u\n"
- "Enabled : %s\n"
- "Pending : %s\n",
- alm.hour, alm.minute, alm.second, alm.nanosecond,
- alm.year, alm.month, alm.day,
- alm.daylight,
- enabled == 1 ? "yes" : "no",
- pending == 1 ? "yes" : "no");
-
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(m, "Timezone : unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(m, "Timezone : %u\n", alm.timezone);
-
- /*
- * now prints the capabilities
- */
- seq_printf(m,
- "Resolution : %u\n"
- "Accuracy : %u\n"
- "SetstoZero : %u\n",
- cap.resolution, cap.accuracy, cap.sets_to_zero);
-
- return 0;
-}
-static int __init
-efi_rtc_init(void)
-{
- int ret;
- struct proc_dir_entry *dir;
-
- printk(KERN_INFO "EFI Time Services Driver v%s\n", EFI_RTC_VERSION);
-
- ret = misc_register(&efi_rtc_dev);
- if (ret) {
- printk(KERN_ERR "efirtc: can't misc_register on minor=%d\n",
- EFI_RTC_MINOR);
- return ret;
- }
-
- dir = proc_create_single("driver/efirtc", 0, NULL, efi_rtc_proc_show);
- if (dir == NULL) {
- printk(KERN_ERR "efirtc: can't create /proc/driver/efirtc.\n");
- misc_deregister(&efi_rtc_dev);
- return -1;
- }
- return 0;
-}
-device_initcall(efi_rtc_init);
-
-/*
-MODULE_LICENSE("GPL");
-*/
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 914e293ba62b..9bc46da8d77a 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -244,7 +244,8 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_IMX_RNGC
tristate "Freescale i.MX RNGC Random Number Generator"
- depends on ARCH_MXC
+ depends on HAS_IOMEM && HAVE_CLK
+ depends on SOC_IMX25 || COMPILE_TEST
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -466,6 +467,13 @@ config HW_RANDOM_NPCM
If unsure, say Y.
+config HW_RANDOM_KEYSTONE
+ depends on ARCH_KEYSTONE || COMPILE_TEST
+ default HW_RANDOM
+ tristate "TI Keystone NETCP SA Hardware random number generator"
+ help
+ This option enables Keystone's hardware random generator.
+
endif # HW_RANDOM
config UML_RANDOM
@@ -482,10 +490,3 @@ config UML_RANDOM
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
-
-config HW_RANDOM_KEYSTONE
- depends on ARCH_KEYSTONE || COMPILE_TEST
- default HW_RANDOM
- tristate "TI Keystone NETCP SA Hardware random number generator"
- help
- This option enables Keystone's hardware random generator.
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 30cf00f8e9a0..9c47e431ce90 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -18,12 +18,22 @@
#include <linux/completion.h>
#include <linux/io.h>
+#define RNGC_VER_ID 0x0000
#define RNGC_COMMAND 0x0004
#define RNGC_CONTROL 0x0008
#define RNGC_STATUS 0x000C
#define RNGC_ERROR 0x0010
#define RNGC_FIFO 0x0014
+/* the fields in the ver id register */
+#define RNGC_TYPE_SHIFT 28
+#define RNGC_VER_MAJ_SHIFT 8
+
+/* the rng_type field */
+#define RNGC_TYPE_RNGB 0x1
+#define RNGC_TYPE_RNGC 0x2
+
+
#define RNGC_CMD_CLR_ERR 0x00000020
#define RNGC_CMD_CLR_INT 0x00000010
#define RNGC_CMD_SEED 0x00000002
@@ -31,6 +41,7 @@
#define RNGC_CTRL_MASK_ERROR 0x00000040
#define RNGC_CTRL_MASK_DONE 0x00000020
+#define RNGC_CTRL_AUTO_SEED 0x00000010
#define RNGC_STATUS_ERROR 0x00010000
#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
@@ -100,15 +111,11 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
- if (!ret) {
- imx_rngc_irq_mask_clear(rngc);
+ imx_rngc_irq_mask_clear(rngc);
+ if (!ret)
return -ETIMEDOUT;
- }
- if (rngc->err_reg != 0)
- return -EIO;
-
- return 0;
+ return rngc->err_reg ? -EIO : 0;
}
static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -165,17 +172,17 @@ static irqreturn_t imx_rngc_irq(int irq, void *priv)
static int imx_rngc_init(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
- u32 cmd;
+ u32 cmd, ctrl;
int ret;
/* clear error */
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+ imx_rngc_irq_unmask(rngc);
+
/* create seed, repeat while there is some statistical error */
do {
- imx_rngc_irq_unmask(rngc);
-
/* seed creation */
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
@@ -184,13 +191,42 @@ static int imx_rngc_init(struct hwrng *rng)
RNGC_TIMEOUT);
if (!ret) {
- imx_rngc_irq_mask_clear(rngc);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto err;
}
} while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
- return rngc->err_reg ? -EIO : 0;
+ if (rngc->err_reg) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /*
+ * enable automatic seeding, the rngc creates a new seed automatically
+ * after serving 2^20 random 160-bit words
+ */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_AUTO_SEED;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * if initialisation was successful, we keep the interrupt
+ * unmasked until imx_rngc_cleanup is called
+ * we mask the interrupt ourselves if we return an error
+ */
+ return 0;
+
+err:
+ imx_rngc_irq_mask_clear(rngc);
+ return ret;
+}
+
+static void imx_rngc_cleanup(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+
+ imx_rngc_irq_mask_clear(rngc);
}
static int imx_rngc_probe(struct platform_device *pdev)
@@ -198,6 +234,8 @@ static int imx_rngc_probe(struct platform_device *pdev)
struct imx_rngc *rngc;
int ret;
int irq;
+ u32 ver_id;
+ u8 rng_type;
rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
if (!rngc)
@@ -223,6 +261,17 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ver_id = readl(rngc->base + RNGC_VER_ID);
+ rng_type = ver_id >> RNGC_TYPE_SHIFT;
+ /*
+ * This driver supports only RNGC and RNGB. (There's a different
+ * driver for RNGA.)
+ */
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
+ ret = -ENODEV;
+ goto err;
+ }
+
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
if (ret) {
@@ -235,6 +284,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
rngc->rng.name = pdev->name;
rngc->rng.init = imx_rngc_init;
rngc->rng.read = imx_rngc_read;
+ rngc->rng.cleanup = imx_rngc_cleanup;
rngc->dev = &pdev->dev;
platform_set_drvdata(pdev, rngc);
@@ -244,18 +294,21 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
- dev_err(rngc->dev, "FSL RNGC self test failed.\n");
+ dev_err(rngc->dev, "self test failed\n");
goto err;
}
}
ret = hwrng_register(&rngc->rng);
if (ret) {
- dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
+ dev_err(&pdev->dev, "hwrng registration failed\n");
goto err;
}
- dev_info(&pdev->dev, "Freescale RNGC registered.\n");
+ dev_info(&pdev->dev,
+ "Freescale RNG%c registered (HW revision %d.%02d)\n",
+ rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
+ (ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
err:
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index e08a8887e718..e0d77fa048fb 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -4,7 +4,7 @@
* Copyright (C) 2009 Nokia Corporation
* Author: Juha Yrjola <juha.yrjola@solidboot.com>
*
- * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2013 Pali Rohár <pali@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -177,5 +178,5 @@ module_platform_driver(omap3_rom_rng_driver);
MODULE_ALIAS("platform:omap3-rom-rng");
MODULE_AUTHOR("Juha Yrjola");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index ffe9b0c6c647..39943bc3651a 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -209,20 +209,19 @@ static int __init mod_init(void)
out:
return err;
}
+module_init(mod_init);
static void __exit mod_exit(void)
{
hwrng_unregister(&via_rng);
}
-
-module_init(mod_init);
module_exit(mod_exit);
static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
+ X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
{}
};
+MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 718d8c087650..79a6e47b5fbc 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -11,6 +11,7 @@
#include <linux/virtio.h>
#include <linux/virtio_rng.h>
#include <linux/module.h>
+#include <linux/slab.h>
static DEFINE_IDA(rng_index_ida);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index cad9563f8f48..c48d8f086382 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -618,6 +618,8 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
+#define ipmi_interfaces_mutex_held() \
+ lockdep_is_held(&ipmi_interfaces_mutex)
static struct srcu_struct ipmi_interfaces_srcu;
/*
@@ -1321,7 +1323,8 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
* synchronize_srcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
rcvr->next = rcvrs;
@@ -1599,7 +1602,8 @@ static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
{
struct cmd_rcvr *rcvr;
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
&& (rcvr->chans & (1 << chan)))
return rcvr;
@@ -1614,7 +1618,8 @@ static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
{
struct cmd_rcvr *rcvr;
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
&& (rcvr->chans & chans))
return 0;
@@ -3188,8 +3193,8 @@ static void __get_guid(struct ipmi_smi *intf)
if (rv)
/* Send failed, no GUID available. */
bmc->dyn_guid_set = 0;
-
- wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+ else
+ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
/* dyn_guid_set makes the guid data available. */
smp_rmb();
@@ -3450,7 +3455,8 @@ int ipmi_add_smi(struct module *owner,
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+ list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
+ ipmi_interfaces_mutex_held()) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8ac390c2b514..2704470e021d 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -313,6 +313,7 @@ static int start_send(struct ssif_info *ssif_info,
static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
unsigned long *flags)
+ __acquires(&ssif_info->lock)
{
spin_lock_irqsave(&ssif_info->lock, *flags);
return flags;
@@ -320,6 +321,7 @@ static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
unsigned long *flags)
+ __releases(&ssif_info->lock)
{
spin_unlock_irqrestore(&ssif_info->lock, *flags);
}
@@ -1945,8 +1947,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
if (adev->type != &i2c_adapter_type)
return 0;
- addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
- &addr_info->binfo);
+ addr_info->added_client = i2c_new_client_device(to_i2c_adapter(adev),
+ &addr_info->binfo);
if (!addr_info->adapter_name)
return 1; /* Only try the first I2C adapter by default. */
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index 3c955946e647..a140203c079b 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -12,6 +12,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
@@ -233,58 +234,154 @@ static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
{ .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
};
-static int aspeed_kcs_probe(struct platform_device *pdev)
+static struct kcs_bmc *aspeed_kcs_probe_of_v1(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct aspeed_kcs_bmc *priv;
- struct kcs_bmc *kcs_bmc;
- u32 chan, addr;
+ struct device_node *np;
+ struct kcs_bmc *kcs;
+ u32 channel;
+ u32 slave;
int rc;
- rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan);
- if ((rc != 0) || (chan == 0 || chan > KCS_CHANNEL_MAX)) {
- dev_err(dev, "no valid 'kcs_chan' configured\n");
- return -ENODEV;
+ np = pdev->dev.of_node;
+
+ rc = of_property_read_u32(np, "kcs_chan", &channel);
+ if ((rc != 0) || (channel == 0 || channel > KCS_CHANNEL_MAX)) {
+ dev_err(&pdev->dev, "no valid 'kcs_chan' configured\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
+ if (!kcs)
+ return ERR_PTR(-ENOMEM);
+
+ priv = kcs_bmc_priv(kcs);
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return ERR_PTR(-ENODEV);
}
- rc = of_property_read_u32(dev->of_node, "kcs_addr", &addr);
+ rc = of_property_read_u32(np, "kcs_addr", &slave);
if (rc) {
- dev_err(dev, "no valid 'kcs_addr' configured\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ kcs->ioreg = ast_kcs_bmc_ioregs[channel - 1];
+ aspeed_kcs_set_address(kcs, slave);
+
+ return kcs;
+}
+
+static int aspeed_kcs_calculate_channel(const struct kcs_ioreg *regs)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
+ if (!memcmp(&ast_kcs_bmc_ioregs[i], regs, sizeof(*regs)))
+ return i + 1;
}
- kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan);
- if (!kcs_bmc)
- return -ENOMEM;
+ return -EINVAL;
+}
+
+static struct kcs_bmc *aspeed_kcs_probe_of_v2(struct platform_device *pdev)
+{
+ struct aspeed_kcs_bmc *priv;
+ struct device_node *np;
+ struct kcs_ioreg ioreg;
+ struct kcs_bmc *kcs;
+ const __be32 *reg;
+ int channel;
+ u32 slave;
+ int rc;
+
+ np = pdev->dev.of_node;
+
+ /* Don't translate addresses, we want offsets for the regmaps */
+ reg = of_get_address(np, 0, NULL, NULL);
+ if (!reg)
+ return ERR_PTR(-EINVAL);
+ ioreg.idr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 1, NULL, NULL);
+ if (!reg)
+ return ERR_PTR(-EINVAL);
+ ioreg.odr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 2, NULL, NULL);
+ if (!reg)
+ return ERR_PTR(-EINVAL);
+ ioreg.str = be32_to_cpup(reg);
- priv = kcs_bmc_priv(kcs_bmc);
- priv->map = syscon_node_to_regmap(dev->parent->of_node);
+ channel = aspeed_kcs_calculate_channel(&ioreg);
+ if (channel < 0)
+ return ERR_PTR(channel);
+
+ kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
+ if (!kcs)
+ return ERR_PTR(-ENOMEM);
+
+ kcs->ioreg = ioreg;
+
+ priv = kcs_bmc_priv(kcs);
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
if (IS_ERR(priv->map)) {
- dev_err(dev, "Couldn't get regmap\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return ERR_PTR(-ENODEV);
}
- kcs_bmc->ioreg = ast_kcs_bmc_ioregs[chan - 1];
+ rc = of_property_read_u32(np, "aspeed,lpc-io-reg", &slave);
+ if (rc)
+ return ERR_PTR(rc);
+
+ aspeed_kcs_set_address(kcs, slave);
+
+ return kcs;
+}
+
+static int aspeed_kcs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct kcs_bmc *kcs_bmc;
+ struct device_node *np;
+ int rc;
+
+ np = pdev->dev.of_node;
+ if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc") ||
+ of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc"))
+ kcs_bmc = aspeed_kcs_probe_of_v1(pdev);
+ else if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc-v2") ||
+ of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc-v2"))
+ kcs_bmc = aspeed_kcs_probe_of_v2(pdev);
+ else
+ return -EINVAL;
+
+ if (IS_ERR(kcs_bmc))
+ return PTR_ERR(kcs_bmc);
+
kcs_bmc->io_inputb = aspeed_kcs_inb;
kcs_bmc->io_outputb = aspeed_kcs_outb;
- dev_set_drvdata(dev, kcs_bmc);
-
- aspeed_kcs_set_address(kcs_bmc, addr);
- aspeed_kcs_enable_channel(kcs_bmc, true);
rc = aspeed_kcs_config_irq(kcs_bmc, pdev);
if (rc)
return rc;
+ dev_set_drvdata(dev, kcs_bmc);
+
+ aspeed_kcs_enable_channel(kcs_bmc, true);
+
rc = misc_register(&kcs_bmc->miscdev);
if (rc) {
dev_err(dev, "Unable to register device\n");
return rc;
}
- pr_info("channel=%u addr=0x%x idr=0x%x odr=0x%x str=0x%x\n",
- chan, addr,
- kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str);
+ dev_dbg(&pdev->dev,
+ "Probed KCS device %d (IDR=0x%x, ODR=0x%x, STR=0x%x)\n",
+ kcs_bmc->channel, kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr,
+ kcs_bmc->ioreg.str);
return 0;
}
@@ -301,6 +398,8 @@ static int aspeed_kcs_remove(struct platform_device *pdev)
static const struct of_device_id ast_kcs_bmc_match[] = {
{ .compatible = "aspeed,ast2400-kcs-bmc" },
{ .compatible = "aspeed,ast2500-kcs-bmc" },
+ { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
{ }
};
MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index a9d9f074fbd6..7d583222e8fa 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -75,7 +75,7 @@ struct vma_data {
enum mspec_page_type type; /* Type of pages allocated. */
unsigned long vm_start; /* Original (unsplit) base. */
unsigned long vm_end; /* Original (unsplit) end. */
- unsigned long maddr[0]; /* Array of MSPEC addresses. */
+ unsigned long maddr[]; /* Array of MSPEC addresses. */
};
/*
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
index 9dedfd7adc0e..f2b9fdc1f9ea 100644
--- a/drivers/char/nwbutton.h
+++ b/drivers/char/nwbutton.h
@@ -14,7 +14,6 @@
#define NUM_PRESSES_REBOOT 2 /* How many presses to activate shutdown */
#define BUTTON_DELAY 30 /* How many jiffies for sequence to end */
#define VERSION "0.3" /* Driver version number */
-#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */
/* Structure definitions: */
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index a4a0797daa19..0973c2c2b01a 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -576,7 +576,7 @@ static const struct file_operations flash_fops =
static struct miscdevice flash_miscdev =
{
- FLASH_MINOR,
+ NWFLASH_MINOR,
"nwflash",
&flash_fops
};
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 15bf585af5d3..4edb4174a1e2 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -731,8 +731,9 @@ static void monitor_card(struct timer_list *t)
}
switch (dev->mstate) {
+ case M_CARDOFF: {
unsigned char flags0;
- case M_CARDOFF:
+
DEBUGP(4, dev, "M_CARDOFF\n");
flags0 = inb(REG_FLAGS0(iobase));
if (flags0 & 0x02) {
@@ -755,6 +756,7 @@ static void monitor_card(struct timer_list *t)
dev->mdelay = T_50MSEC;
}
break;
+ }
case M_FETCH_ATR:
DEBUGP(4, dev, "M_FETCH_ATR\n");
xoutb(0x80, REG_FLAGS0(iobase));
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 2c2381a806ae..38b46c7d1737 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -355,14 +355,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct pp_struct *pp = file->private_data;
struct parport *port;
void __user *argp = (void __user *)arg;
+ struct ieee1284_info *info;
+ unsigned char reg;
+ unsigned char mask;
+ int mode;
+ s32 time32[2];
+ s64 time64[2];
+ struct timespec64 ts;
+ int ret;
/* First handle the cases that don't take arguments. */
switch (cmd) {
case PPCLAIM:
{
- struct ieee1284_info *info;
- int ret;
-
if (pp->flags & PP_CLAIMED) {
dev_dbg(&pp->pdev->dev, "you've already got it!\n");
return -EINVAL;
@@ -513,15 +518,6 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
port = pp->pdev->port;
switch (cmd) {
- struct ieee1284_info *info;
- unsigned char reg;
- unsigned char mask;
- int mode;
- s32 time32[2];
- s64 time64[2];
- struct timespec64 ts;
- int ret;
-
case PPRSTATUS:
reg = parport_read_status(port);
if (copy_to_user(argp, &reg, sizeof(reg)))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c7f9584de2c8..0d10e31fd342 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -781,27 +781,55 @@ static int __init parse_trust_cpu(char *arg)
}
early_param("random.trust_cpu", parse_trust_cpu);
-static void crng_initialize(struct crng_state *crng)
+static bool crng_init_try_arch(struct crng_state *crng)
{
int i;
- int arch_init = 1;
+ bool arch_init = true;
unsigned long rv;
- memcpy(&crng->state[0], "expand 32-byte k", 16);
- if (crng == &primary_crng)
- _extract_entropy(&input_pool, &crng->state[4],
- sizeof(__u32) * 12, 0);
- else
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv)) {
rv = random_get_entropy();
- arch_init = 0;
+ arch_init = false;
+ }
+ crng->state[i] ^= rv;
+ }
+
+ return arch_init;
+}
+
+static bool __init crng_init_try_arch_early(struct crng_state *crng)
+{
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
+
+ for (i = 4; i < 16; i++) {
+ if (!arch_get_random_seed_long_early(&rv) &&
+ !arch_get_random_long_early(&rv)) {
+ rv = random_get_entropy();
+ arch_init = false;
}
crng->state[i] ^= rv;
}
- if (trust_cpu && arch_init && crng == &primary_crng) {
+
+ return arch_init;
+}
+
+static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+{
+ memcpy(&crng->state[0], "expand 32-byte k", 16);
+ _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ crng_init_try_arch(crng);
+ crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+}
+
+static void __init crng_initialize_primary(struct crng_state *crng)
+{
+ memcpy(&crng->state[0], "expand 32-byte k", 16);
+ _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
+ if (crng_init_try_arch_early(crng) && trust_cpu) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
@@ -822,7 +850,7 @@ static void do_numa_crng_init(struct work_struct *work)
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
spin_lock_init(&crng->lock);
- crng_initialize(crng);
+ crng_initialize_secondary(crng);
pool[i] = crng;
}
mb();
@@ -1142,14 +1170,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - state->last_time;
- state->last_time = sample.jiffies;
+ delta = sample.jiffies - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, sample.jiffies);
- delta2 = delta - state->last_delta;
- state->last_delta = delta;
+ delta2 = delta - READ_ONCE(state->last_delta);
+ WRITE_ONCE(state->last_delta, delta);
- delta3 = delta2 - state->last_delta2;
- state->last_delta2 = delta2;
+ delta3 = delta2 - READ_ONCE(state->last_delta2);
+ WRITE_ONCE(state->last_delta2, delta2);
if (delta < 0)
delta = -delta;
@@ -1771,7 +1799,7 @@ static void __init init_std_data(struct entropy_store *r)
int __init rand_initialize(void)
{
init_std_data(&input_pool);
- crng_initialize(&primary_crng);
+ crng_initialize_primary(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
urandom_warning.interval = 0;
@@ -2149,11 +2177,11 @@ struct batched_entropy {
/*
* Get a random word for internal kernel use only. The quality of the random
- * number is either as good as RDRAND or as good as /dev/urandom, with the
- * goal of being quite fast and not depleting entropy. In order to ensure
+ * number is good as /dev/urandom, but there is no backtrack protection, with
+ * the goal of being quite fast and not depleting entropy. In order to ensure
* that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
+ * wait_for_random_bytes() should be called and return 0 at least once at any
+ * point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
@@ -2166,15 +2194,6 @@ u64 get_random_u64(void)
struct batched_entropy *batch;
static void *previous;
-#if BITS_PER_LONG == 64
- if (arch_get_random_long((unsigned long *)&ret))
- return ret;
-#else
- if (arch_get_random_long((unsigned long *)&ret) &&
- arch_get_random_long((unsigned long *)&ret + 1))
- return ret;
-#endif
-
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u64);
@@ -2199,9 +2218,6 @@ u32 get_random_u32(void)
struct batched_entropy *batch;
static void *previous;
- if (arch_get_random_int(&ret))
- return ret;
-
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u32);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
deleted file mode 100644
index 3b91184b77ae..000000000000
--- a/drivers/char/rtc.c
+++ /dev/null
@@ -1,1311 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Real Time Clock interface for Linux
- *
- * Copyright (C) 1996 Paul Gortmaker
- *
- * This driver allows use of the real time clock (built into
- * nearly all computers) from user space. It exports the /dev/rtc
- * interface supporting various ioctl() and also the
- * /proc/driver/rtc pseudo-file for status information.
- *
- * The ioctls can be used to set the interrupt behaviour and
- * generation rate from the RTC via IRQ 8. Then the /dev/rtc
- * interface can be used to make use of these timer interrupts,
- * be they interval or alarm based.
- *
- * The /dev/rtc interface will block on reads until an interrupt
- * has been received. If a RTC interrupt has already happened,
- * it will output an unsigned long and then block. The output value
- * contains the interrupt status in the low byte and the number of
- * interrupts since the last read in the remaining high bytes. The
- * /dev/rtc interface can also be used with the select(2) call.
- *
- * Based on other minimal char device drivers, like Alan's
- * watchdog, Ted's random, etc. etc.
- *
- * 1.07 Paul Gortmaker.
- * 1.08 Miquel van Smoorenburg: disallow certain things on the
- * DEC Alpha as the CMOS clock is also used for other things.
- * 1.09 Nikita Schmidt: epoch support and some Alpha cleanup.
- * 1.09a Pete Zaitcev: Sun SPARC
- * 1.09b Jeff Garzik: Modularize, init cleanup
- * 1.09c Jeff Garzik: SMP cleanup
- * 1.10 Paul Barton-Davis: add support for async I/O
- * 1.10a Andrea Arcangeli: Alpha updates
- * 1.10b Andrew Morton: SMP lock fix
- * 1.10c Cesar Barros: SMP locking fixes and cleanup
- * 1.10d Paul Gortmaker: delete paranoia check in rtc_exit
- * 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness.
- * 1.11 Takashi Iwai: Kernel access functions
- * rtc_register/rtc_unregister/rtc_control
- * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init
- * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer
- * CONFIG_HPET_EMULATE_RTC
- * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly.
- * 1.12ac Alan Cox: Allow read access to the day of week register
- * 1.12b David John: Remove calls to the BKL.
- */
-
-#define RTC_VERSION "1.12b"
-
-/*
- * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
- * interrupts disabled. Due to the index-port/data-port (0x70/0x71)
- * design of the RTC, we don't want two different things trying to
- * get to it at once. (e.g. the periodic 11 min sync from
- * kernel/time/ntp.c vs. this driver.)
- */
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/mc146818rtc.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/sched/signal.h>
-#include <linux/sysctl.h>
-#include <linux/wait.h>
-#include <linux/bcd.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/ratelimit.h>
-
-#include <asm/current.h>
-
-#ifdef CONFIG_X86
-#include <asm/hpet.h>
-#endif
-
-#ifdef CONFIG_SPARC32
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <asm/io.h>
-
-static unsigned long rtc_port;
-static int rtc_irq;
-#endif
-
-#ifdef CONFIG_HPET_EMULATE_RTC
-#undef RTC_IRQ
-#endif
-
-#ifdef RTC_IRQ
-static int rtc_has_irq = 1;
-#endif
-
-#ifndef CONFIG_HPET_EMULATE_RTC
-#define is_hpet_enabled() 0
-#define hpet_set_alarm_time(hrs, min, sec) 0
-#define hpet_set_periodic_freq(arg) 0
-#define hpet_mask_rtc_irq_bit(arg) 0
-#define hpet_set_rtc_irq_bit(arg) 0
-#define hpet_rtc_timer_init() do { } while (0)
-#define hpet_rtc_dropped_irq() 0
-#define hpet_register_irq_handler(h) ({ 0; })
-#define hpet_unregister_irq_handler(h) ({ 0; })
-#ifdef RTC_IRQ
-static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
-{
- return 0;
-}
-#endif
-#endif
-
-/*
- * We sponge a minor off of the misc major. No need slurping
- * up another valuable major dev number for this. If you add
- * an ioctl, make sure you don't conflict with SPARC's RTC
- * ioctls.
- */
-
-static struct fasync_struct *rtc_async_queue;
-
-static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
-
-#ifdef RTC_IRQ
-static void rtc_dropped_irq(struct timer_list *unused);
-
-static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
-#endif
-
-static ssize_t rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-
-static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
-
-#ifdef RTC_IRQ
-static __poll_t rtc_poll(struct file *file, poll_table *wait);
-#endif
-
-static void get_rtc_alm_time(struct rtc_time *alm_tm);
-#ifdef RTC_IRQ
-static void set_rtc_irq_bit_locked(unsigned char bit);
-static void mask_rtc_irq_bit_locked(unsigned char bit);
-
-static inline void set_rtc_irq_bit(unsigned char bit)
-{
- spin_lock_irq(&rtc_lock);
- set_rtc_irq_bit_locked(bit);
- spin_unlock_irq(&rtc_lock);
-}
-
-static void mask_rtc_irq_bit(unsigned char bit)
-{
- spin_lock_irq(&rtc_lock);
- mask_rtc_irq_bit_locked(bit);
- spin_unlock_irq(&rtc_lock);
-}
-#endif
-
-#ifdef CONFIG_PROC_FS
-static int rtc_proc_show(struct seq_file *seq, void *v);
-#endif
-
-/*
- * Bits in rtc_status. (6 bits of room for future expansion)
- */
-
-#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
-#define RTC_TIMER_ON 0x02 /* missed irq timer active */
-
-/*
- * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is
- * protected by the spin lock rtc_lock. However, ioctl can still disable the
- * timer in rtc_status and then with del_timer after the interrupt has read
- * rtc_status but before mod_timer is called, which would then reenable the
- * timer (but you would need to have an awful timing before you'd trip on it)
- */
-static unsigned long rtc_status; /* bitmapped status byte. */
-static unsigned long rtc_freq; /* Current periodic IRQ rate */
-static unsigned long rtc_irq_data; /* our output to the world */
-static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
-
-/*
- * If this driver ever becomes modularised, it will be really nice
- * to make the epoch retain its value across module reload...
- */
-
-static unsigned long epoch = 1900; /* year corresponding to 0x00 */
-
-static const unsigned char days_in_mo[] =
-{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-/*
- * Returns true if a clock update is in progress
- */
-static inline unsigned char rtc_is_updating(void)
-{
- unsigned long flags;
- unsigned char uip;
-
- spin_lock_irqsave(&rtc_lock, flags);
- uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
- spin_unlock_irqrestore(&rtc_lock, flags);
- return uip;
-}
-
-#ifdef RTC_IRQ
-/*
- * A very tiny interrupt handler. It runs with interrupts disabled,
- * but there is possibility of conflicting with the set_rtc_mmss()
- * call (the rtc irq and the timer irq can easily run at the same
- * time in two different CPUs). So we need to serialize
- * accesses to the chip with the rtc_lock spinlock that each
- * architecture should implement in the timer code.
- * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.)
- */
-
-static irqreturn_t rtc_interrupt(int irq, void *dev_id)
-{
- /*
- * Can be an alarm interrupt, update complete interrupt,
- * or a periodic interrupt. We store the status in the
- * low byte and the number of interrupts received since
- * the last read in the remainder of rtc_irq_data.
- */
-
- spin_lock(&rtc_lock);
- rtc_irq_data += 0x100;
- rtc_irq_data &= ~0xff;
- if (is_hpet_enabled()) {
- /*
- * In this case it is HPET RTC interrupt handler
- * calling us, with the interrupt information
- * passed as arg1, instead of irq.
- */
- rtc_irq_data |= (unsigned long)irq & 0xF0;
- } else {
- rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0);
- }
-
- if (rtc_status & RTC_TIMER_ON)
- mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
-
- spin_unlock(&rtc_lock);
-
- wake_up_interruptible(&rtc_wait);
-
- kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
-
- return IRQ_HANDLED;
-}
-#endif
-
-/*
- * sysctl-tuning infrastructure.
- */
-static struct ctl_table rtc_table[] = {
- {
- .procname = "max-user-freq",
- .data = &rtc_max_user_freq,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-
-static struct ctl_table rtc_root[] = {
- {
- .procname = "rtc",
- .mode = 0555,
- .child = rtc_table,
- },
- { }
-};
-
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .mode = 0555,
- .child = rtc_root,
- },
- { }
-};
-
-static struct ctl_table_header *sysctl_header;
-
-static int __init init_sysctl(void)
-{
- sysctl_header = register_sysctl_table(dev_root);
- return 0;
-}
-
-static void __exit cleanup_sysctl(void)
-{
- unregister_sysctl_table(sysctl_header);
-}
-
-/*
- * Now all the various file operations that we export.
- */
-
-static ssize_t rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
-#ifndef RTC_IRQ
- return -EIO;
-#else
- DECLARE_WAITQUEUE(wait, current);
- unsigned long data;
- ssize_t retval;
-
- if (rtc_has_irq == 0)
- return -EIO;
-
- /*
- * Historically this function used to assume that sizeof(unsigned long)
- * is the same in userspace and kernelspace. This lead to problems
- * for configurations with multiple ABIs such a the MIPS o32 and 64
- * ABIs supported on the same kernel. So now we support read of both
- * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the
- * userspace ABI.
- */
- if (count != sizeof(unsigned int) && count != sizeof(unsigned long))
- return -EINVAL;
-
- add_wait_queue(&rtc_wait, &wait);
-
- do {
- /* First make it right. Then make it fast. Putting this whole
- * block within the parentheses of a while would be too
- * confusing. And no, xchg() is not the answer. */
-
- __set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irq(&rtc_lock);
- data = rtc_irq_data;
- rtc_irq_data = 0;
- spin_unlock_irq(&rtc_lock);
-
- if (data != 0)
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto out;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- goto out;
- }
- schedule();
- } while (1);
-
- if (count == sizeof(unsigned int)) {
- retval = put_user(data,
- (unsigned int __user *)buf) ?: sizeof(int);
- } else {
- retval = put_user(data,
- (unsigned long __user *)buf) ?: sizeof(long);
- }
- if (!retval)
- retval = count;
- out:
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&rtc_wait, &wait);
-
- return retval;
-#endif
-}
-
-static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
-{
- struct rtc_time wtime;
-
-#ifdef RTC_IRQ
- if (rtc_has_irq == 0) {
- switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- case RTC_PIE_OFF:
- case RTC_PIE_ON:
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- case RTC_IRQP_READ:
- case RTC_IRQP_SET:
- return -EINVAL;
- }
- }
-#endif
-
- switch (cmd) {
-#ifdef RTC_IRQ
- case RTC_AIE_OFF: /* Mask alarm int. enab. bit */
- {
- mask_rtc_irq_bit(RTC_AIE);
- return 0;
- }
- case RTC_AIE_ON: /* Allow alarm interrupts. */
- {
- set_rtc_irq_bit(RTC_AIE);
- return 0;
- }
- case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
- {
- /* can be called from isr via rtc_control() */
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- mask_rtc_irq_bit_locked(RTC_PIE);
- if (rtc_status & RTC_TIMER_ON) {
- rtc_status &= ~RTC_TIMER_ON;
- del_timer(&rtc_irq_timer);
- }
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return 0;
- }
- case RTC_PIE_ON: /* Allow periodic ints */
- {
- /* can be called from isr via rtc_control() */
- unsigned long flags;
-
- /*
- * We don't really want Joe User enabling more
- * than 64Hz of interrupts on a multi-user machine.
- */
- if (!kernel && (rtc_freq > rtc_max_user_freq) &&
- (!capable(CAP_SYS_RESOURCE)))
- return -EACCES;
-
- spin_lock_irqsave(&rtc_lock, flags);
- if (!(rtc_status & RTC_TIMER_ON)) {
- mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
- 2*HZ/100);
- rtc_status |= RTC_TIMER_ON;
- }
- set_rtc_irq_bit_locked(RTC_PIE);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return 0;
- }
- case RTC_UIE_OFF: /* Mask ints from RTC updates. */
- {
- mask_rtc_irq_bit(RTC_UIE);
- return 0;
- }
- case RTC_UIE_ON: /* Allow ints for RTC updates. */
- {
- set_rtc_irq_bit(RTC_UIE);
- return 0;
- }
-#endif
- case RTC_ALM_READ: /* Read the present alarm time */
- {
- /*
- * This returns a struct rtc_time. Reading >= 0xc0
- * means "don't care" or "match all". Only the tm_hour,
- * tm_min, and tm_sec values are filled in.
- */
- memset(&wtime, 0, sizeof(struct rtc_time));
- get_rtc_alm_time(&wtime);
- break;
- }
- case RTC_ALM_SET: /* Store a time into the alarm */
- {
- /*
- * This expects a struct rtc_time. Writing 0xff means
- * "don't care" or "match all". Only the tm_hour,
- * tm_min and tm_sec are used.
- */
- unsigned char hrs, min, sec;
- struct rtc_time alm_tm;
-
- if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg,
- sizeof(struct rtc_time)))
- return -EFAULT;
-
- hrs = alm_tm.tm_hour;
- min = alm_tm.tm_min;
- sec = alm_tm.tm_sec;
-
- spin_lock_irq(&rtc_lock);
- if (hpet_set_alarm_time(hrs, min, sec)) {
- /*
- * Fallthru and set alarm time in CMOS too,
- * so that we will get proper value in RTC_ALM_READ
- */
- }
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
- RTC_ALWAYS_BCD) {
- if (sec < 60)
- sec = bin2bcd(sec);
- else
- sec = 0xff;
-
- if (min < 60)
- min = bin2bcd(min);
- else
- min = 0xff;
-
- if (hrs < 24)
- hrs = bin2bcd(hrs);
- else
- hrs = 0xff;
- }
- CMOS_WRITE(hrs, RTC_HOURS_ALARM);
- CMOS_WRITE(min, RTC_MINUTES_ALARM);
- CMOS_WRITE(sec, RTC_SECONDS_ALARM);
- spin_unlock_irq(&rtc_lock);
-
- return 0;
- }
- case RTC_RD_TIME: /* Read the time/date from RTC */
- {
- memset(&wtime, 0, sizeof(struct rtc_time));
- rtc_get_rtc_time(&wtime);
- break;
- }
- case RTC_SET_TIME: /* Set the RTC */
- {
- struct rtc_time rtc_tm;
- unsigned char mon, day, hrs, min, sec, leap_yr;
- unsigned char save_control, save_freq_select;
- unsigned int yrs;
-#ifdef CONFIG_MACH_DECSTATION
- unsigned int real_yrs;
-#endif
-
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
- sizeof(struct rtc_time)))
- return -EFAULT;
-
- yrs = rtc_tm.tm_year + 1900;
- mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
- day = rtc_tm.tm_mday;
- hrs = rtc_tm.tm_hour;
- min = rtc_tm.tm_min;
- sec = rtc_tm.tm_sec;
-
- if (yrs < 1970)
- return -EINVAL;
-
- leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
- if ((mon > 12) || (day == 0))
- return -EINVAL;
-
- if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
- return -EINVAL;
-
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
- yrs -= epoch;
- if (yrs > 255) /* They are unsigned */
- return -EINVAL;
-
- spin_lock_irq(&rtc_lock);
-#ifdef CONFIG_MACH_DECSTATION
- real_yrs = yrs;
- yrs = 72;
-
- /*
- * We want to keep the year set to 73 until March
- * for non-leap years, so that Feb, 29th is handled
- * correctly.
- */
- if (!leap_yr && mon < 3) {
- real_yrs--;
- yrs = 73;
- }
-#endif
- /* These limits and adjustments are independent of
- * whether the chip is in binary mode or not.
- */
- if (yrs > 169) {
- spin_unlock_irq(&rtc_lock);
- return -EINVAL;
- }
- if (yrs >= 100)
- yrs -= 100;
-
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
- || RTC_ALWAYS_BCD) {
- sec = bin2bcd(sec);
- min = bin2bcd(min);
- hrs = bin2bcd(hrs);
- day = bin2bcd(day);
- mon = bin2bcd(mon);
- yrs = bin2bcd(yrs);
- }
-
- save_control = CMOS_READ(RTC_CONTROL);
- CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
- save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
-#ifdef CONFIG_MACH_DECSTATION
- CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
-#endif
- CMOS_WRITE(yrs, RTC_YEAR);
- CMOS_WRITE(mon, RTC_MONTH);
- CMOS_WRITE(day, RTC_DAY_OF_MONTH);
- CMOS_WRITE(hrs, RTC_HOURS);
- CMOS_WRITE(min, RTC_MINUTES);
- CMOS_WRITE(sec, RTC_SECONDS);
-
- CMOS_WRITE(save_control, RTC_CONTROL);
- CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-
- spin_unlock_irq(&rtc_lock);
- return 0;
- }
-#ifdef RTC_IRQ
- case RTC_IRQP_READ: /* Read the periodic IRQ rate. */
- {
- return put_user(rtc_freq, (unsigned long __user *)arg);
- }
- case RTC_IRQP_SET: /* Set periodic IRQ rate. */
- {
- int tmp = 0;
- unsigned char val;
- /* can be called from isr via rtc_control() */
- unsigned long flags;
-
- /*
- * The max we can do is 8192Hz.
- */
- if ((arg < 2) || (arg > 8192))
- return -EINVAL;
- /*
- * We don't really want Joe User generating more
- * than 64Hz of interrupts on a multi-user machine.
- */
- if (!kernel && (arg > rtc_max_user_freq) &&
- !capable(CAP_SYS_RESOURCE))
- return -EACCES;
-
- while (arg > (1<<tmp))
- tmp++;
-
- /*
- * Check that the input was really a power of 2.
- */
- if (arg != (1<<tmp))
- return -EINVAL;
-
- rtc_freq = arg;
-
- spin_lock_irqsave(&rtc_lock, flags);
- if (hpet_set_periodic_freq(arg)) {
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
- }
-
- val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0;
- val |= (16 - tmp);
- CMOS_WRITE(val, RTC_FREQ_SELECT);
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
- }
-#endif
- case RTC_EPOCH_READ: /* Read the epoch. */
- {
- return put_user(epoch, (unsigned long __user *)arg);
- }
- case RTC_EPOCH_SET: /* Set the epoch. */
- {
- /*
- * There were no RTC clocks before 1900.
- */
- if (arg < 1900)
- return -EINVAL;
-
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- epoch = arg;
- return 0;
- }
- default:
- return -ENOTTY;
- }
- return copy_to_user((void __user *)arg,
- &wtime, sizeof wtime) ? -EFAULT : 0;
-}
-
-static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret;
- ret = rtc_do_ioctl(cmd, arg, 0);
- return ret;
-}
-
-/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
- */
-static int rtc_open(struct inode *inode, struct file *file)
-{
- spin_lock_irq(&rtc_lock);
-
- if (rtc_status & RTC_IS_OPEN)
- goto out_busy;
-
- rtc_status |= RTC_IS_OPEN;
-
- rtc_irq_data = 0;
- spin_unlock_irq(&rtc_lock);
- return 0;
-
-out_busy:
- spin_unlock_irq(&rtc_lock);
- return -EBUSY;
-}
-
-static int rtc_fasync(int fd, struct file *filp, int on)
-{
- return fasync_helper(fd, filp, on, &rtc_async_queue);
-}
-
-static int rtc_release(struct inode *inode, struct file *file)
-{
-#ifdef RTC_IRQ
- unsigned char tmp;
-
- if (rtc_has_irq == 0)
- goto no_irq;
-
- /*
- * Turn off all interrupts once the device is no longer
- * in use, and clear the data.
- */
-
- spin_lock_irq(&rtc_lock);
- if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
- tmp = CMOS_READ(RTC_CONTROL);
- tmp &= ~RTC_PIE;
- tmp &= ~RTC_AIE;
- tmp &= ~RTC_UIE;
- CMOS_WRITE(tmp, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
- }
- if (rtc_status & RTC_TIMER_ON) {
- rtc_status &= ~RTC_TIMER_ON;
- del_timer(&rtc_irq_timer);
- }
- spin_unlock_irq(&rtc_lock);
-
-no_irq:
-#endif
-
- spin_lock_irq(&rtc_lock);
- rtc_irq_data = 0;
- rtc_status &= ~RTC_IS_OPEN;
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-#ifdef RTC_IRQ
-static __poll_t rtc_poll(struct file *file, poll_table *wait)
-{
- unsigned long l;
-
- if (rtc_has_irq == 0)
- return 0;
-
- poll_wait(file, &rtc_wait, wait);
-
- spin_lock_irq(&rtc_lock);
- l = rtc_irq_data;
- spin_unlock_irq(&rtc_lock);
-
- if (l != 0)
- return EPOLLIN | EPOLLRDNORM;
- return 0;
-}
-#endif
-
-/*
- * The various file operations we support.
- */
-
-static const struct file_operations rtc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = rtc_read,
-#ifdef RTC_IRQ
- .poll = rtc_poll,
-#endif
- .unlocked_ioctl = rtc_ioctl,
- .open = rtc_open,
- .release = rtc_release,
- .fasync = rtc_fasync,
-};
-
-static struct miscdevice rtc_dev = {
- .minor = RTC_MINOR,
- .name = "rtc",
- .fops = &rtc_fops,
-};
-
-static resource_size_t rtc_size;
-
-static struct resource * __init rtc_request_region(resource_size_t size)
-{
- struct resource *r;
-
- if (RTC_IOMAPPED)
- r = request_region(RTC_PORT(0), size, "rtc");
- else
- r = request_mem_region(RTC_PORT(0), size, "rtc");
-
- if (r)
- rtc_size = size;
-
- return r;
-}
-
-static void rtc_release_region(void)
-{
- if (RTC_IOMAPPED)
- release_region(RTC_PORT(0), rtc_size);
- else
- release_mem_region(RTC_PORT(0), rtc_size);
-}
-
-static int __init rtc_init(void)
-{
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *ent;
-#endif
-#if defined(__alpha__) || defined(__mips__)
- unsigned int year, ctrl;
- char *guess = NULL;
-#endif
-#ifdef CONFIG_SPARC32
- struct device_node *ebus_dp;
- struct platform_device *op;
-#else
- void *r;
-#ifdef RTC_IRQ
- irq_handler_t rtc_int_handler_ptr;
-#endif
-#endif
-
-#ifdef CONFIG_SPARC32
- for_each_node_by_name(ebus_dp, "ebus") {
- struct device_node *dp;
- for_each_child_of_node(ebus_dp, dp) {
- if (of_node_name_eq(dp, "rtc")) {
- op = of_find_device_by_node(dp);
- if (op) {
- rtc_port = op->resource[0].start;
- rtc_irq = op->irqs[0];
- goto found;
- }
- }
- }
- }
- rtc_has_irq = 0;
- printk(KERN_ERR "rtc_init: no PC rtc found\n");
- return -EIO;
-
-found:
- if (!rtc_irq) {
- rtc_has_irq = 0;
- goto no_irq;
- }
-
- /*
- * XXX Interrupt pin #7 in Espresso is shared between RTC and
- * PCI Slot 2 INTA# (and some INTx# in Slot 1).
- */
- if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
- (void *)&rtc_port)) {
- rtc_has_irq = 0;
- printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
- return -EIO;
- }
-no_irq:
-#else
- r = rtc_request_region(RTC_IO_EXTENT);
-
- /*
- * If we've already requested a smaller range (for example, because
- * PNPBIOS or ACPI told us how the device is configured), the request
- * above might fail because it's too big.
- *
- * If so, request just the range we actually use.
- */
- if (!r)
- r = rtc_request_region(RTC_IO_EXTENT_USED);
- if (!r) {
-#ifdef RTC_IRQ
- rtc_has_irq = 0;
-#endif
- printk(KERN_ERR "rtc: I/O resource %lx is not free.\n",
- (long)(RTC_PORT(0)));
- return -EIO;
- }
-
-#ifdef RTC_IRQ
- if (is_hpet_enabled()) {
- int err;
-
- rtc_int_handler_ptr = hpet_rtc_interrupt;
- err = hpet_register_irq_handler(rtc_interrupt);
- if (err != 0) {
- printk(KERN_WARNING "hpet_register_irq_handler failed "
- "in rtc_init().");
- return err;
- }
- } else {
- rtc_int_handler_ptr = rtc_interrupt;
- }
-
- if (request_irq(RTC_IRQ, rtc_int_handler_ptr, 0, "rtc", NULL)) {
- /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
- rtc_has_irq = 0;
- printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
- rtc_release_region();
-
- return -EIO;
- }
- hpet_rtc_timer_init();
-
-#endif
-
-#endif /* CONFIG_SPARC32 vs. others */
-
- if (misc_register(&rtc_dev)) {
-#ifdef RTC_IRQ
- free_irq(RTC_IRQ, NULL);
- hpet_unregister_irq_handler(rtc_interrupt);
- rtc_has_irq = 0;
-#endif
- rtc_release_region();
- return -ENODEV;
- }
-
-#ifdef CONFIG_PROC_FS
- ent = proc_create_single("driver/rtc", 0, NULL, rtc_proc_show);
- if (!ent)
- printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
-#endif
-
-#if defined(__alpha__) || defined(__mips__)
- rtc_freq = HZ;
-
- /* Each operating system on an Alpha uses its own epoch.
- Let's try to guess which one we are using now. */
-
- if (rtc_is_updating() != 0)
- msleep(20);
-
- spin_lock_irq(&rtc_lock);
- year = CMOS_READ(RTC_YEAR);
- ctrl = CMOS_READ(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
-
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- year = bcd2bin(year); /* This should never happen... */
-
- if (year < 20) {
- epoch = 2000;
- guess = "SRM (post-2000)";
- } else if (year >= 20 && year < 48) {
- epoch = 1980;
- guess = "ARC console";
- } else if (year >= 48 && year < 72) {
- epoch = 1952;
- guess = "Digital UNIX";
-#if defined(__mips__)
- } else if (year >= 72 && year < 74) {
- epoch = 2000;
- guess = "Digital DECstation";
-#else
- } else if (year >= 70) {
- epoch = 1900;
- guess = "Standard PC (1900)";
-#endif
- }
- if (guess)
- printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
- guess, epoch);
-#endif
-#ifdef RTC_IRQ
- if (rtc_has_irq == 0)
- goto no_irq2;
-
- spin_lock_irq(&rtc_lock);
- rtc_freq = 1024;
- if (!hpet_set_periodic_freq(rtc_freq)) {
- /*
- * Initialize periodic frequency to CMOS reset default,
- * which is 1024Hz
- */
- CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
- RTC_FREQ_SELECT);
- }
- spin_unlock_irq(&rtc_lock);
-no_irq2:
-#endif
-
- (void) init_sysctl();
-
- printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n");
-
- return 0;
-}
-
-static void __exit rtc_exit(void)
-{
- cleanup_sysctl();
- remove_proc_entry("driver/rtc", NULL);
- misc_deregister(&rtc_dev);
-
-#ifdef CONFIG_SPARC32
- if (rtc_has_irq)
- free_irq(rtc_irq, &rtc_port);
-#else
- rtc_release_region();
-#ifdef RTC_IRQ
- if (rtc_has_irq) {
- free_irq(RTC_IRQ, NULL);
- hpet_unregister_irq_handler(hpet_rtc_interrupt);
- }
-#endif
-#endif /* CONFIG_SPARC32 */
-}
-
-module_init(rtc_init);
-module_exit(rtc_exit);
-
-#ifdef RTC_IRQ
-/*
- * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
- * (usually during an IDE disk interrupt, with IRQ unmasking off)
- * Since the interrupt handler doesn't get called, the IRQ status
- * byte doesn't get read, and the RTC stops generating interrupts.
- * A timer is set, and will call this function if/when that happens.
- * To get it out of this stalled state, we just read the status.
- * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
- * (You *really* shouldn't be trying to use a non-realtime system
- * for something that requires a steady > 1KHz signal anyways.)
- */
-
-static void rtc_dropped_irq(struct timer_list *unused)
-{
- unsigned long freq;
-
- spin_lock_irq(&rtc_lock);
-
- if (hpet_rtc_dropped_irq()) {
- spin_unlock_irq(&rtc_lock);
- return;
- }
-
- /* Just in case someone disabled the timer from behind our back... */
- if (rtc_status & RTC_TIMER_ON)
- mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
-
- rtc_irq_data += ((rtc_freq/HZ)<<8);
- rtc_irq_data &= ~0xff;
- rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */
-
- freq = rtc_freq;
-
- spin_unlock_irq(&rtc_lock);
-
- printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
- freq);
-
- /* Now we have new data */
- wake_up_interruptible(&rtc_wait);
-
- kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
-}
-#endif
-
-#ifdef CONFIG_PROC_FS
-/*
- * Info exported via "/proc/driver/rtc".
- */
-
-static int rtc_proc_show(struct seq_file *seq, void *v)
-{
-#define YN(bit) ((ctrl & bit) ? "yes" : "no")
-#define NY(bit) ((ctrl & bit) ? "no" : "yes")
- struct rtc_time tm;
- unsigned char batt, ctrl;
- unsigned long freq;
-
- spin_lock_irq(&rtc_lock);
- batt = CMOS_READ(RTC_VALID) & RTC_VRT;
- ctrl = CMOS_READ(RTC_CONTROL);
- freq = rtc_freq;
- spin_unlock_irq(&rtc_lock);
-
-
- rtc_get_rtc_time(&tm);
-
- /*
- * There is no way to tell if the luser has the RTC set for local
- * time or for Universal Standard Time (GMT). Probably local though.
- */
- seq_printf(seq,
- "rtc_time\t: %ptRt\n"
- "rtc_date\t: %ptRd\n"
- "rtc_epoch\t: %04lu\n",
- &tm, &tm, epoch);
-
- get_rtc_alm_time(&tm);
-
- /*
- * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will
- * match any value for that particular field. Values that are
- * greater than a valid time, but less than 0xc0 shouldn't appear.
- */
- seq_puts(seq, "alarm\t\t: ");
- if (tm.tm_hour <= 24)
- seq_printf(seq, "%02d:", tm.tm_hour);
- else
- seq_puts(seq, "**:");
-
- if (tm.tm_min <= 59)
- seq_printf(seq, "%02d:", tm.tm_min);
- else
- seq_puts(seq, "**:");
-
- if (tm.tm_sec <= 59)
- seq_printf(seq, "%02d\n", tm.tm_sec);
- else
- seq_puts(seq, "**\n");
-
- seq_printf(seq,
- "DST_enable\t: %s\n"
- "BCD\t\t: %s\n"
- "24hr\t\t: %s\n"
- "square_wave\t: %s\n"
- "alarm_IRQ\t: %s\n"
- "update_IRQ\t: %s\n"
- "periodic_IRQ\t: %s\n"
- "periodic_freq\t: %ld\n"
- "batt_status\t: %s\n",
- YN(RTC_DST_EN),
- NY(RTC_DM_BINARY),
- YN(RTC_24H),
- YN(RTC_SQWE),
- YN(RTC_AIE),
- YN(RTC_UIE),
- YN(RTC_PIE),
- freq,
- batt ? "okay" : "dead");
-
- return 0;
-#undef YN
-#undef NY
-}
-#endif
-
-static void rtc_get_rtc_time(struct rtc_time *rtc_tm)
-{
- unsigned long uip_watchdog = jiffies, flags;
- unsigned char ctrl;
-#ifdef CONFIG_MACH_DECSTATION
- unsigned int real_year;
-#endif
-
- /*
- * read RTC once any update in progress is done. The update
- * can take just over 2ms. We wait 20ms. There is no need to
- * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
- * If you need to know *exactly* when a second has started, enable
- * periodic update complete interrupts, (via ioctl) and then
- * immediately read /dev/rtc which will block until you get the IRQ.
- * Once the read clears, read the RTC time (again via ioctl). Easy.
- */
-
- while (rtc_is_updating() != 0 &&
- time_before(jiffies, uip_watchdog + 2*HZ/100))
- cpu_relax();
-
- /*
- * Only the values that we read from the RTC are set. We leave
- * tm_wday, tm_yday and tm_isdst untouched. Note that while the
- * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is
- * only updated by the RTC when initially set to a non-zero value.
- */
- spin_lock_irqsave(&rtc_lock, flags);
- rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
- rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
- rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
- rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
- rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
- rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
- /* Only set from 2.6.16 onwards */
- rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK);
-
-#ifdef CONFIG_MACH_DECSTATION
- real_year = CMOS_READ(RTC_DEC_YEAR);
-#endif
- ctrl = CMOS_READ(RTC_CONTROL);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
- rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
- rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
- rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
- rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
- rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
- rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday);
- }
-
-#ifdef CONFIG_MACH_DECSTATION
- rtc_tm->tm_year += real_year - 72;
-#endif
-
- /*
- * Account for differences between how the RTC uses the values
- * and how they are defined in a struct rtc_time;
- */
- rtc_tm->tm_year += epoch - 1900;
- if (rtc_tm->tm_year <= 69)
- rtc_tm->tm_year += 100;
-
- rtc_tm->tm_mon--;
-}
-
-static void get_rtc_alm_time(struct rtc_time *alm_tm)
-{
- unsigned char ctrl;
-
- /*
- * Only the values that we read from the RTC are set. That
- * means only tm_hour, tm_min, and tm_sec.
- */
- spin_lock_irq(&rtc_lock);
- alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
- alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
- alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
- ctrl = CMOS_READ(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
-
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
- alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
- alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
- }
-}
-
-#ifdef RTC_IRQ
-/*
- * Used to disable/enable interrupts for any one of UIE, AIE, PIE.
- * Rumour has it that if you frob the interrupt enable/disable
- * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to
- * ensure you actually start getting interrupts. Probably for
- * compatibility with older/broken chipset RTC implementations.
- * We also clear out any old irq data after an ioctl() that
- * meddles with the interrupt enable/disable bits.
- */
-
-static void mask_rtc_irq_bit_locked(unsigned char bit)
-{
- unsigned char val;
-
- if (hpet_mask_rtc_irq_bit(bit))
- return;
- val = CMOS_READ(RTC_CONTROL);
- val &= ~bit;
- CMOS_WRITE(val, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- rtc_irq_data = 0;
-}
-
-static void set_rtc_irq_bit_locked(unsigned char bit)
-{
- unsigned char val;
-
- if (hpet_set_rtc_irq_bit(bit))
- return;
- val = CMOS_READ(RTC_CONTROL);
- val |= bit;
- CMOS_WRITE(val, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- rtc_irq_data = 0;
-}
-#endif
-
-MODULE_AUTHOR("Paul Gortmaker");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(RTC_MINOR);
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 98f3150e0048..aff0a8e44fff 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -61,8 +61,6 @@
#include <linux/mutex.h>
#include <linux/toshiba.h>
-#define TOSH_MINOR_DEV 181
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
MODULE_DESCRIPTION("Toshiba laptop SMM driver");
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 7a0fca659b6a..7460f230bae4 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -99,11 +99,8 @@ static int tpm_read_log(struct tpm_chip *chip)
*
* If an event log is found then the securityfs files are setup to
* export it to userspace, otherwise nothing is done.
- *
- * Returns -ENODEV if the firmware has no event log or securityfs is not
- * supported.
*/
-int tpm_bios_log_setup(struct tpm_chip *chip)
+void tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
unsigned int cnt;
@@ -112,7 +109,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
rc = tpm_read_log(chip);
if (rc < 0)
- return rc;
+ return;
log_version = rc;
cnt = 0;
@@ -158,13 +155,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
}
- return 0;
+ return;
err:
- rc = PTR_ERR(chip->bios_dir[cnt]);
chip->bios_dir[cnt] = NULL;
tpm_bios_log_teardown(chip);
- return rc;
+ return;
}
void tpm_bios_log_teardown(struct tpm_chip *chip)
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index af347c190819..a9ce66d09a75 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -51,7 +51,8 @@ int tpm_read_log_of(struct tpm_chip *chip)
* endian format. For this reason, vtpm doesn't need conversion
* but physical tpm needs the conversion.
*/
- if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0) {
+ if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0 &&
+ of_property_match_string(np, "compatible", "IBM,vtpm20") < 0) {
size = be32_to_cpup((__force __be32 *)sizep);
base = be64_to_cpup((__force __be64 *)basep);
} else {
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 739b1d9d16b6..2c96977ad080 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -115,6 +115,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
u32 converted_event_size;
u32 converted_event_type;
+ (*pos)++;
converted_event_size = do_endian_conversion(event->event_size);
v += sizeof(struct tcpa_event) + converted_event_size;
@@ -132,7 +133,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
((v + sizeof(struct tcpa_event) + converted_event_size) > limit))
return NULL;
- (*pos)++;
return v;
}
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index b9aeda1cbcd7..e741b1157525 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -94,6 +94,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
size_t event_size;
void *marker;
+ (*pos)++;
event_header = log->bios_event_log;
if (v == SEQ_START_TOKEN) {
@@ -118,7 +119,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
if (((v + event_size) >= limit) || (event_size == 0))
return NULL;
- (*pos)++;
return v;
}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 3d6d394a8661..8c77e88012e9 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -514,15 +514,15 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
return 0;
- rc = __compat_only_sysfs_link_entry_to_kobj(
- &chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL);
if (rc && rc != -ENOENT)
return rc;
/* All the names from tpm-sysfs */
for (i = chip->groups[0]->attrs; *i != NULL; ++i) {
- rc = __compat_only_sysfs_link_entry_to_kobj(
- &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name);
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL);
if (rc) {
tpm_del_legacy_sysfs(chip);
return rc;
@@ -596,9 +596,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_sysfs_add_device(chip);
- rc = tpm_bios_log_setup(chip);
- if (rc != 0 && rc != -ENODEV)
- return rc;
+ tpm_bios_log_setup(chip);
tpm_add_ppi(chip);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index a438b1206fcb..1621ce818705 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -323,7 +323,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
for (i = 0; i < chip->nr_allocated_banks; i++) {
if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
- rc = EINVAL;
+ rc = -EINVAL;
goto out;
}
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 5620747da0cf..0fbcede241ea 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -226,6 +226,7 @@ int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
int tpm2_init_space(struct tpm_space *space);
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
@@ -235,7 +236,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
size_t *bufsiz);
-int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
int tpm_dev_common_init(void);
void tpm_dev_common_exit(void);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 760329598b99..eff1f12d981a 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -615,7 +615,7 @@ out:
return rc;
}
-static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
{
struct tpm_buf buf;
u32 nr_commands;
@@ -681,6 +681,7 @@ out:
rc = -ENODEV;
return rc;
}
+EXPORT_SYMBOL_GPL(tpm2_get_cc_attrs_tbl);
/**
* tpm2_startup - turn on the TPM
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 78cc52690177..09fe45246b8c 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2012 IBM Corporation
+ * Copyright (C) 2012-2020 IBM Corporation
*
* Author: Ashley Lai <ashleydlai@gmail.com>
*
@@ -29,6 +29,7 @@ static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
{ "IBM,vtpm", "IBM,vtpm"},
+ { "IBM,vtpm", "IBM,vtpm20"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
@@ -134,6 +135,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
/**
+ * ibmvtpm_crq_send_init - Send a CRQ initialize message
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "%s failed rc=%d\n", __func__, rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_resume - Resume from suspend
+ *
+ * @dev: device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_ENABLE_CRQ,
+ ibmvtpm->vdev->unit_address);
+ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc) {
+ dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = ibmvtpm_crq_send_init(ibmvtpm);
+ if (rc)
+ dev_err(dev, "Error send_init rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
* tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
* @buf: buffer contains data to send
@@ -146,6 +205,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ bool retry = true;
int rc, sig;
if (!ibmvtpm->rtce_buf) {
@@ -179,18 +239,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
*/
ibmvtpm->tpm_processing_cmd = true;
+again:
rc = ibmvtpm_send_crq(ibmvtpm->vdev,
IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
count, ibmvtpm->rtce_dma_handle);
if (rc != H_SUCCESS) {
+ /*
+ * H_CLOSED can be returned after LPM resume. Call
+ * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
+ * ibmvtpm_send_crq() once before failing.
+ */
+ if (rc == H_CLOSED && retry) {
+ tpm_ibmvtpm_resume(ibmvtpm->dev);
+ retry = false;
+ goto again;
+ }
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
- rc = 0;
ibmvtpm->tpm_processing_cmd = false;
- } else
- rc = 0;
+ }
spin_unlock(&ibmvtpm->rtce_lock);
- return rc;
+ return 0;
}
static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
@@ -269,26 +338,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
}
/**
- * ibmvtpm_crq_send_init - Send a CRQ initialize message
- * @ibmvtpm: vtpm device struct
- *
- * Return:
- * 0 on success.
- * Non-zero on failure.
- */
-static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
-{
- int rc;
-
- rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
- if (rc != H_SUCCESS)
- dev_err(ibmvtpm->dev,
- "ibmvtpm_crq_send_init failed rc=%d\n", rc);
-
- return rc;
-}
-
-/**
* tpm_ibmvtpm_remove - ibm vtpm remove entry point
* @vdev: vio device struct
*
@@ -400,44 +449,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
}
-/**
- * tpm_ibmvtpm_resume - Resume from suspend
- *
- * @dev: device struct
- *
- * Return: Always 0.
- */
-static int tpm_ibmvtpm_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
- int rc = 0;
-
- do {
- if (rc)
- msleep(100);
- rc = plpar_hcall_norets(H_ENABLE_CRQ,
- ibmvtpm->vdev->unit_address);
- } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
-
- if (rc) {
- dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
- return rc;
- }
-
- rc = vio_enable_interrupts(ibmvtpm->vdev);
- if (rc) {
- dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvtpm_crq_send_init(ibmvtpm);
- if (rc)
- dev_err(dev, "Error send_init rc=%d\n", rc);
-
- return rc;
-}
-
static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
{
return (status == 0);
@@ -571,6 +582,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
*/
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm);
+ wake_up_interruptible(&ibmvtpm->crq_queue.wq);
crq->valid = 0;
smp_wmb();
}
@@ -618,6 +630,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
}
crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
+ init_waitqueue_head(&crq_q->wq);
ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
CRQ_RES_BUF_SIZE,
DMA_BIDIRECTIONAL);
@@ -670,6 +683,20 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (rc)
goto init_irq_cleanup;
+ if (!strcmp(id->compat, "IBM,vtpm20")) {
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc)
+ goto init_irq_cleanup;
+ }
+
+ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+ ibmvtpm->rtce_buf != NULL,
+ HZ)) {
+ dev_err(dev, "CRQ response timed out\n");
+ goto init_irq_cleanup;
+ }
+
return tpm_chip_register(chip);
init_irq_cleanup:
do {
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index 7983f1a33267..b92aa7d3e93e 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue {
struct ibmvtpm_crq *crq_addr;
u32 index;
u32 num_entry;
+ wait_queue_head_t wq;
};
struct ibmvtpm_dev {
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 27c6ca031e23..2435216bd10a 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -433,6 +433,9 @@ static void disable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
+ if (priv->irq == 0)
+ return;
+
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
intmask = 0;
@@ -1062,9 +1065,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
- if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
dev_err(&chip->dev, FW_BUG
"TPM interrupt not working, polling instead\n");
+
+ disable_interrupts(chip);
+ }
} else {
tpm_tis_probe_irq(chip, intmask);
}
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
index 37d72e818335..ea759af25634 100644
--- a/drivers/char/tpm/tpm_tis_spi_cr50.c
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -132,7 +132,12 @@ static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy)
if (cr50_needs_waking(cr50_phy)) {
/* Assert CS, wait 1 msec, deassert CS */
- struct spi_transfer spi_cs_wake = { .delay_usecs = 1000 };
+ struct spi_transfer spi_cs_wake = {
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
+ };
spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1);
/* Wait for it to fully wake */
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index d1754fd6c573..d96755935529 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -110,7 +110,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.cs_change = 0;
spi_xfer.len = transfer_len;
- spi_xfer.delay_usecs = 5;
+ spi_xfer.delay.value = 5;
+ spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
if (in) {
spi_xfer.tx_buf = NULL;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 4df9b40d6342..3cbaec925606 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -112,7 +112,7 @@ struct port_buffer {
unsigned int sgpages;
/* sg is used if spages > 0. sg must be the last in is struct */
- struct scatterlist sg[0];
+ struct scatterlist sg[];
};
/*
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 3732241352ce..8b90357f2a93 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -15,7 +15,11 @@ obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
obj-$(CONFIG_HAVE_AT91_SAM9X60_PLL) += clk-sam9x60-pll.o
+obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o
obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
+obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
new file mode 100644
index 000000000000..c44a431b6c97
--- /dev/null
+++ b/drivers/clk/at91/at91rm9200.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+struct sck {
+ char *n;
+ char *p;
+ u8 id;
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+static const struct clk_master_characteristics rm9200_mck_characteristics = {
+ .output = { .min = 0, .max = 80000000 },
+ .divisors = { 1, 2, 3, 4 },
+};
+
+static u8 rm9200_pll_out[] = { 0, 2 };
+
+static const struct clk_range rm9200_pll_outputs[] = {
+ { .min = 80000000, .max = 160000000 },
+ { .min = 150000000, .max = 180000000 },
+};
+
+static const struct clk_pll_characteristics rm9200_pll_characteristics = {
+ .input = { .min = 1000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(rm9200_pll_outputs),
+ .output = rm9200_pll_outputs,
+ .out = rm9200_pll_out,
+};
+
+static const struct sck at91rm9200_systemck[] = {
+ { .n = "udpck", .p = "usbck", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 4 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "pck3", .p = "prog3", .id = 11 },
+};
+
+static const struct pck at91rm9200_periphck[] = {
+ { .n = "pioA_clk", .id = 2 },
+ { .n = "pioB_clk", .id = 3 },
+ { .n = "pioC_clk", .id = 4 },
+ { .n = "pioD_clk", .id = 5 },
+ { .n = "usart0_clk", .id = 6 },
+ { .n = "usart1_clk", .id = 7 },
+ { .n = "usart2_clk", .id = 8 },
+ { .n = "usart3_clk", .id = 9 },
+ { .n = "mci0_clk", .id = 10 },
+ { .n = "udc_clk", .id = 11 },
+ { .n = "twi0_clk", .id = 12 },
+ { .n = "spi0_clk", .id = 13 },
+ { .n = "ssc0_clk", .id = 14 },
+ { .n = "ssc1_clk", .id = 15 },
+ { .n = "ssc2_clk", .id = 16 },
+ { .n = "tc0_clk", .id = 17 },
+ { .n = "tc1_clk", .id = 18 },
+ { .n = "tc2_clk", .id = 19 },
+ { .n = "tc3_clk", .id = 20 },
+ { .n = "tc4_clk", .id = 21 },
+ { .n = "tc5_clk", .id = 22 },
+ { .n = "ohci_clk", .id = 23 },
+ { .n = "macb0_clk", .id = 24 },
+};
+
+static void __init at91rm9200_pmc_setup(struct device_node *np)
+{
+ const char *slowxtal_name, *mainxtal_name;
+ struct pmc_data *at91rm9200_pmc;
+ u32 usb_div[] = { 1, 2, 0, 0 };
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_xtal");
+ if (i < 0)
+ return;
+
+ slowxtal_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91rm9200_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91rm9200_systemck),
+ nck(at91rm9200_periphck), 0);
+ if (!at91rm9200_pmc)
+ return;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout,
+ &rm9200_pll_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
+ &at91rm9200_pll_layout,
+ &rm9200_pll_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slowxtal_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "pllbck";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91rm9200_master_layout,
+ &rm9200_mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->chws[PMC_MCK] = hw;
+
+ hw = at91rm9200_clk_register_usb(regmap, "usbck", "pllbck", usb_div);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slowxtal_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "pllbck";
+ for (i = 0; i < 4; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 4, i,
+ &at91rm9200_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91rm9200_systemck[i].n,
+ at91rm9200_systemck[i].p,
+ at91rm9200_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->shws[at91rm9200_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91rm9200_periphck); i++) {
+ hw = at91_clk_register_peripheral(regmap,
+ at91rm9200_periphck[i].n,
+ "masterck",
+ at91rm9200_periphck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->phws[at91rm9200_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91rm9200_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91rm9200_pmc);
+}
+/*
+ * While the TCB can be used as the clocksource, the system timer is most likely
+ * to be used instead. However, the pinctrl driver doesn't support probe
+ * deferring properly. Once this is fixed, this can be switched to a platform
+ * driver.
+ */
+CLK_OF_DECLARE_DRIVER(at91rm9200_pmc, "atmel,at91rm9200-pmc",
+ at91rm9200_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
new file mode 100644
index 000000000000..38a7d2d2df0c
--- /dev/null
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 0, .max = 133333333 },
+ .divisors = { 1, 2, 4, 3 },
+};
+
+static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
+
+static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 745000000, .max = 800000000 },
+ { .min = 695000000, .max = 750000000 },
+ { .min = 645000000, .max = 700000000 },
+ { .min = 595000000, .max = 650000000 },
+ { .min = 545000000, .max = 600000000 },
+ { .min = 495000000, .max = 555000000 },
+ { .min = 445000000, .max = 500000000 },
+ { .min = 400000000, .max = 450000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} at91sam9g45_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+static const struct clk_pcr_layout at91sam9g45_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+static const struct pck at91sam9g45_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "pioDE_clk", .id = 5, },
+ { .n = "trng_clk", .id = 6, },
+ { .n = "usart0_clk", .id = 7, },
+ { .n = "usart1_clk", .id = 8, },
+ { .n = "usart2_clk", .id = 9, },
+ { .n = "usart3_clk", .id = 10, },
+ { .n = "mci0_clk", .id = 11, },
+ { .n = "twi0_clk", .id = 12, },
+ { .n = "twi1_clk", .id = 13, },
+ { .n = "spi0_clk", .id = 14, },
+ { .n = "spi1_clk", .id = 15, },
+ { .n = "ssc0_clk", .id = 16, },
+ { .n = "ssc1_clk", .id = 17, },
+ { .n = "tcb0_clk", .id = 18, },
+ { .n = "pwm_clk", .id = 19, },
+ { .n = "adc_clk", .id = 20, },
+ { .n = "dma0_clk", .id = 21, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "lcd_clk", .id = 23, },
+ { .n = "ac97_clk", .id = 24, },
+ { .n = "macb0_clk", .id = 25, },
+ { .n = "isi_clk", .id = 26, },
+ { .n = "udphs_clk", .id = 27, },
+ { .n = "aestdessha_clk", .id = 28, },
+ { .n = "mci1_clk", .id = 29, },
+ { .n = "vdec_clk", .id = 30, },
+};
+
+static void __init at91sam9g45_pmc_setup(struct device_node *np)
+{
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *at91sam9g45_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91sam9g45_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91sam9g45_systemck),
+ nck(at91sam9g45_periphck), 0);
+ if (!at91sam9g45_pmc)
+ return;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91rm9200_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "masterck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9g45_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91sam9g45_systemck[i].n,
+ at91sam9g45_systemck[i].p,
+ at91sam9g45_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->shws[at91sam9g45_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9g45_periphck); i++) {
+ hw = at91_clk_register_peripheral(regmap,
+ at91sam9g45_periphck[i].n,
+ "masterck",
+ at91sam9g45_periphck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->phws[at91sam9g45_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9g45_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91sam9g45_pmc);
+}
+/*
+ * The TCB is used as the clocksource so its clock is needed early. This means
+ * this can't be a platform driver.
+ */
+CLK_OF_DECLARE_DRIVER(at91sam9g45_pmc, "atmel,at91sam9g45-pmc",
+ at91sam9g45_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
new file mode 100644
index 000000000000..8bb39d2ba84b
--- /dev/null
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 0, .max = 133333333 },
+ .divisors = { 1, 2, 4, 3 },
+ .have_div3_pres = 1,
+};
+
+static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
+
+static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 745000000, .max = 800000000 },
+ { .min = 695000000, .max = 750000000 },
+ { .min = 645000000, .max = 700000000 },
+ { .min = 595000000, .max = 650000000 },
+ { .min = 545000000, .max = 600000000 },
+ { .min = 495000000, .max = 555000000 },
+ { .min = 445000000, .max = 500000000 },
+ { .min = 400000000, .max = 450000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static u8 pllb_out[] = { 0 };
+
+static const struct clk_range pllb_outputs[] = {
+ { .min = 30000000, .max = 100000000 },
+};
+
+static const struct clk_pll_characteristics pllb_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(pllb_outputs),
+ .output = pllb_outputs,
+ .out = pllb_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} at91sam9n12_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "lcdck", .p = "masterck", .id = 3 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+static const struct clk_pcr_layout at91sam9n12_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+static const struct pck at91sam9n12_periphck[] = {
+ { .n = "pioAB_clk", .id = 2, },
+ { .n = "pioCD_clk", .id = 3, },
+ { .n = "fuse_clk", .id = 4, },
+ { .n = "usart0_clk", .id = 5, },
+ { .n = "usart1_clk", .id = 6, },
+ { .n = "usart2_clk", .id = 7, },
+ { .n = "usart3_clk", .id = 8, },
+ { .n = "twi0_clk", .id = 9, },
+ { .n = "twi1_clk", .id = 10, },
+ { .n = "mci0_clk", .id = 12, },
+ { .n = "spi0_clk", .id = 13, },
+ { .n = "spi1_clk", .id = 14, },
+ { .n = "uart0_clk", .id = 15, },
+ { .n = "uart1_clk", .id = 16, },
+ { .n = "tcb_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "lcdc_clk", .id = 25, },
+ { .n = "sha_clk", .id = 27, },
+ { .n = "ssc0_clk", .id = 28, },
+ { .n = "aes_clk", .id = 29, },
+ { .n = "trng_clk", .id = 30, },
+};
+
+static void __init at91sam9n12_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *at91sam9n12_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91sam9n12_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91sam9n12_systemck), 31, 0);
+ if (!at91sam9n12_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
+ &at91rm9200_pll_layout, &pllb_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "pllbck";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->chws[PMC_MCK] = hw;
+
+ hw = at91sam9n12_clk_register_usb(regmap, "usbck", "pllbck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "pllbck";
+ parent_names[4] = "masterck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9x5_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91sam9n12_systemck[i].n,
+ at91sam9n12_systemck[i].p,
+ at91sam9n12_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->shws[at91sam9n12_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9n12_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &at91sam9n12_pcr_layout,
+ at91sam9n12_periphck[i].n,
+ "masterck",
+ at91sam9n12_periphck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->phws[at91sam9n12_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9n12_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91sam9n12_pmc);
+}
+/*
+ * The TCB is used as the clocksource so its clock is needed early. This means
+ * this can't be a platform driver.
+ */
+CLK_OF_DECLARE_DRIVER(at91sam9n12_pmc, "atmel,at91sam9n12-pmc",
+ at91sam9n12_pmc_setup);
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index dfb354a5ff18..e699803986e5 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -14,27 +14,8 @@
#include "pmc.h"
-#define PMC_PLL_CTRL0 0xc
-#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0)
-#define PMC_PLL_CTRL0_ENPLL BIT(28)
-#define PMC_PLL_CTRL0_ENPLLCK BIT(29)
-#define PMC_PLL_CTRL0_ENLOCK BIT(31)
-
-#define PMC_PLL_CTRL1 0x10
-#define PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0)
-#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
-
-#define PMC_PLL_ACR 0x18
-#define PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL
-#define PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL
-#define PMC_PLL_ACR_UTMIVR BIT(12)
-#define PMC_PLL_ACR_UTMIBG BIT(13)
-#define PMC_PLL_ACR_LOOP_FILTER_MSK GENMASK(31, 24)
-
-#define PMC_PLL_UPDT 0x1c
-#define PMC_PLL_UPDT_UPDATE BIT(8)
-
-#define PMC_PLL_ISR0 0xec
+#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0)
+#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
#define PLL_DIV_MAX (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1)
#define UPLL_DIV 2
@@ -59,7 +40,7 @@ static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
{
unsigned int status;
- regmap_read(regmap, PMC_PLL_ISR0, &status);
+ regmap_read(regmap, AT91_PMC_PLL_ISR0, &status);
return !!(status & BIT(id));
}
@@ -74,12 +55,12 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
u32 val;
spin_lock_irqsave(pll->lock, flags);
- regmap_write(regmap, PMC_PLL_UPDT, pll->id);
+ regmap_write(regmap, AT91_PMC_PLL_UPDT, pll->id);
- regmap_read(regmap, PMC_PLL_CTRL0, &val);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
- regmap_read(regmap, PMC_PLL_CTRL1, &val);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
if (sam9x60_pll_ready(regmap, pll->id) &&
@@ -88,39 +69,39 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
return 0;
}
- /* Recommended value for PMC_PLL_ACR */
+ /* Recommended value for AT91_PMC_PLL_ACR */
if (pll->characteristics->upll)
- val = PMC_PLL_ACR_DEFAULT_UPLL;
+ val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
else
- val = PMC_PLL_ACR_DEFAULT_PLLA;
- regmap_write(regmap, PMC_PLL_ACR, val);
+ val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
+ regmap_write(regmap, AT91_PMC_PLL_ACR, val);
- regmap_write(regmap, PMC_PLL_CTRL1,
+ regmap_write(regmap, AT91_PMC_PLL_CTRL1,
FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul));
if (pll->characteristics->upll) {
/* Enable the UTMI internal bandgap */
- val |= PMC_PLL_ACR_UTMIBG;
- regmap_write(regmap, PMC_PLL_ACR, val);
+ val |= AT91_PMC_PLL_ACR_UTMIBG;
+ regmap_write(regmap, AT91_PMC_PLL_ACR, val);
udelay(10);
/* Enable the UTMI internal regulator */
- val |= PMC_PLL_ACR_UTMIVR;
- regmap_write(regmap, PMC_PLL_ACR, val);
+ val |= AT91_PMC_PLL_ACR_UTMIVR;
+ regmap_write(regmap, AT91_PMC_PLL_ACR, val);
udelay(10);
}
- regmap_update_bits(regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
- regmap_write(regmap, PMC_PLL_CTRL0,
- PMC_PLL_CTRL0_ENLOCK | PMC_PLL_CTRL0_ENPLL |
- PMC_PLL_CTRL0_ENPLLCK | pll->div);
+ regmap_write(regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL |
+ AT91_PMC_PLL_CTRL0_ENPLLCK | pll->div);
- regmap_update_bits(regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
while (!sam9x60_pll_ready(regmap, pll->id))
cpu_relax();
@@ -144,22 +125,24 @@ static void sam9x60_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(pll->lock, flags);
- regmap_write(pll->regmap, PMC_PLL_UPDT, pll->id);
+ regmap_write(pll->regmap, AT91_PMC_PLL_UPDT, pll->id);
- regmap_update_bits(pll->regmap, PMC_PLL_CTRL0,
- PMC_PLL_CTRL0_ENPLLCK, 0);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENPLLCK, 0);
- regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
- regmap_update_bits(pll->regmap, PMC_PLL_CTRL0, PMC_PLL_CTRL0_ENPLL, 0);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENPLL, 0);
if (pll->characteristics->upll)
- regmap_update_bits(pll->regmap, PMC_PLL_ACR,
- PMC_PLL_ACR_UTMIBG | PMC_PLL_ACR_UTMIVR, 0);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_ACR,
+ AT91_PMC_PLL_ACR_UTMIBG |
+ AT91_PMC_PLL_ACR_UTMIVR, 0);
- regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
spin_unlock_irqrestore(pll->lock, flags);
}
@@ -316,10 +299,10 @@ sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
pll->regmap = regmap;
pll->lock = lock;
- regmap_write(regmap, PMC_PLL_UPDT, id);
- regmap_read(regmap, PMC_PLL_CTRL0, &pllr);
+ regmap_write(regmap, AT91_PMC_PLL_UPDT, id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &pllr);
pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr);
- regmap_read(regmap, PMC_PLL_CTRL1, &pllr);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL1, &pllr);
pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr);
hw = &pll->hw;
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 22aede42a336..31d5c45e30d7 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -25,6 +25,7 @@ struct at91sam9x5_clk_usb {
struct clk_hw hw;
struct regmap *regmap;
u32 usbs_mask;
+ u8 num_parents;
};
#define to_at91sam9x5_clk_usb(hw) \
@@ -75,6 +76,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
tmp_parent_rate = req->rate * div;
tmp_parent_rate = clk_hw_round_rate(parent,
tmp_parent_rate);
+ if (!tmp_parent_rate)
+ continue;
+
tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
if (tmp_rate < req->rate)
tmp_diff = req->rate - tmp_rate;
@@ -107,7 +111,7 @@ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- if (index > 1)
+ if (index >= usb->num_parents)
return -EINVAL;
regmap_update_bits(usb->regmap, AT91_PMC_USB, usb->usbs_mask, index);
@@ -211,7 +215,8 @@ _at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
usb->hw.init = &init;
usb->regmap = regmap;
- usb->usbs_mask = SAM9X5_USBS_MASK;
+ usb->usbs_mask = usbs_mask;
+ usb->num_parents = num_parents;
hw = &usb->hw;
ret = clk_hw_register(NULL, &usb->hw);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 77398aefeb6d..cc19e8fb83be 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -124,7 +124,6 @@ static const struct {
char *n;
u8 id;
struct clk_range r;
- bool pll;
} sam9x60_gck[] = {
{ .n = "flex0_gclk", .id = 5, },
{ .n = "flex1_gclk", .id = 6, },
@@ -144,11 +143,9 @@ static const struct {
{ .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, },
{ .n = "flex11_gclk", .id = 32, },
{ .n = "flex12_gclk", .id = 33, },
- { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 },
- .pll = true, },
+ { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, },
{ .n = "pit64b_gclk", .id = 37, },
- { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 },
- .pll = true, },
+ { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, },
{ .n = "tcb1_gclk", .id = 45, },
{ .n = "dbgu_gclk", .id = 47, },
};
@@ -237,9 +234,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[0] = "pllack";
parent_names[1] = "upllck";
- parent_names[2] = "mainck";
- parent_names[3] = "mainck";
- hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 4);
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
if (IS_ERR(hw))
goto err_free;
@@ -290,7 +286,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
sam9x60_gck[i].n,
parent_names, 6,
sam9x60_gck[i].id,
- sam9x60_gck[i].pll,
+ false,
&sam9x60_gck[i].r);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
new file mode 100644
index 000000000000..88506f909c08
--- /dev/null
+++ b/drivers/clk/at91/sama5d3.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 0, .max = 166000000 },
+ .divisors = { 1, 2, 4, 3 },
+};
+
+static u8 plla_out[] = { 0 };
+
+static u16 plla_icpll[] = { 0 };
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 400000000, .max = 1000000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 8000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static const struct clk_pcr_layout sama5d3_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(6, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} sama5d3_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "lcdck", .p = "masterck", .id = 3 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+ struct clk_range r;
+} sama5d3_periphck[] = {
+ { .n = "dbgu_clk", .id = 2, },
+ { .n = "hsmc_clk", .id = 5, },
+ { .n = "pioA_clk", .id = 6, },
+ { .n = "pioB_clk", .id = 7, },
+ { .n = "pioC_clk", .id = 8, },
+ { .n = "pioD_clk", .id = 9, },
+ { .n = "pioE_clk", .id = 10, },
+ { .n = "usart0_clk", .id = 12, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "usart1_clk", .id = 13, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "usart2_clk", .id = 14, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "usart3_clk", .id = 15, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart0_clk", .id = 16, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart1_clk", .id = 17, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "twi0_clk", .id = 18, .r = { .min = 0, .max = 41500000 }, },
+ { .n = "twi1_clk", .id = 19, .r = { .min = 0, .max = 41500000 }, },
+ { .n = "twi2_clk", .id = 20, .r = { .min = 0, .max = 41500000 }, },
+ { .n = "mci0_clk", .id = 21, },
+ { .n = "mci1_clk", .id = 22, },
+ { .n = "mci2_clk", .id = 23, },
+ { .n = "spi0_clk", .id = 24, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "spi1_clk", .id = 25, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "tcb0_clk", .id = 26, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "tcb1_clk", .id = 27, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "pwm_clk", .id = 28, },
+ { .n = "adc_clk", .id = 29, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "dma0_clk", .id = 30, },
+ { .n = "dma1_clk", .id = 31, },
+ { .n = "uhphs_clk", .id = 32, },
+ { .n = "udphs_clk", .id = 33, },
+ { .n = "macb0_clk", .id = 34, },
+ { .n = "macb1_clk", .id = 35, },
+ { .n = "lcdc_clk", .id = 36, },
+ { .n = "isi_clk", .id = 37, },
+ { .n = "ssc0_clk", .id = 38, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "ssc1_clk", .id = 39, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "can0_clk", .id = 40, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "can1_clk", .id = 41, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "sha_clk", .id = 42, },
+ { .n = "aes_clk", .id = 43, },
+ { .n = "tdes_clk", .id = 44, },
+ { .n = "trng_clk", .id = 45, },
+ { .n = "fuse_clk", .id = 48, },
+ { .n = "mpddr_clk", .id = 49, },
+};
+
+static void __init sama5d3_pmc_setup(struct device_node *np)
+{
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *sama5d3_pmc;
+ const char *parent_names[5];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama5d3_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(sama5d3_systemck),
+ nck(sama5d3_periphck), 0);
+ if (!sama5d3_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &sama5d3_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91sam9x5_clk_register_smd(regmap, "smdclk", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "masterck";
+ for (i = 0; i < 3; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9x5_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama5d3_systemck[i].n,
+ sama5d3_systemck[i].p,
+ sama5d3_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->shws[sama5d3_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d3_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d3_pcr_layout,
+ sama5d3_periphck[i].n,
+ "masterck",
+ sama5d3_periphck[i].id,
+ &sama5d3_periphck[i].r);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->phws[sama5d3_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama5d3_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(sama5d3_pmc);
+}
+/*
+ * The TCB is used as the clocksource so its clock is needed early. This means
+ * this can't be a platform driver.
+ */
+CLK_OF_DECLARE_DRIVER(sama5d3_pmc, "atmel,sama5d3-pmc", sama5d3_pmc_setup);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index 536b59aabd2c..bacebd457e6f 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -276,7 +276,7 @@ static void __init asm9260_acc_init(struct device_node *np)
/* TODO: Convert to DT parent scheme */
ref_clk = of_clk_get_parent_name(np, 0);
- hw = __clk_hw_register_fixed_rate_with_accuracy(NULL, NULL, pll_clk,
+ hw = __clk_hw_register_fixed_rate(NULL, NULL, pll_clk,
ref_clk, NULL, NULL, 0, rate, 0,
CLK_FIXED_RATE_PARENT_ACCURACY);
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 6e780c2a9e6b..3c228b018116 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
+#define SI5341_NUM_INPUTS 4
+
#define SI5341_MAX_NUM_OUTPUTS 10
#define SI5340_MAX_NUM_OUTPUTS 4
@@ -56,8 +58,8 @@ struct clk_si5341 {
struct i2c_client *i2c_client;
struct clk_si5341_synth synth[SI5341_NUM_SYNTH];
struct clk_si5341_output clk[SI5341_MAX_NUM_OUTPUTS];
- struct clk *pxtal;
- const char *pxtal_name;
+ struct clk *input_clk[SI5341_NUM_INPUTS];
+ const char *input_clk_name[SI5341_NUM_INPUTS];
const u16 *reg_output_offset;
const u16 *reg_rdiv_offset;
u64 freq_vco; /* 13500–14256 MHz */
@@ -78,10 +80,25 @@ struct clk_si5341_output_config {
#define SI5341_DEVICE_REV 0x0005
#define SI5341_STATUS 0x000C
#define SI5341_SOFT_RST 0x001C
+#define SI5341_IN_SEL 0x0021
+#define SI5341_XAXB_CFG 0x090E
+#define SI5341_IN_EN 0x0949
+#define SI5341_INX_TO_PFD_EN 0x094A
+
+/* Input selection */
+#define SI5341_IN_SEL_MASK 0x06
+#define SI5341_IN_SEL_SHIFT 1
+#define SI5341_IN_SEL_REGCTRL 0x01
+#define SI5341_INX_TO_PFD_SHIFT 4
+
+/* XTAL config bits */
+#define SI5341_XAXB_CFG_EXTCLK_EN BIT(0)
+#define SI5341_XAXB_CFG_PDNB BIT(1)
/* Input dividers (48-bit) */
#define SI5341_IN_PDIV(x) (0x0208 + ((x) * 10))
#define SI5341_IN_PSET(x) (0x020E + ((x) * 10))
+#define SI5341_PX_UPD 0x0230
/* PLL configuration */
#define SI5341_PLL_M_NUM 0x0235
@@ -120,6 +137,10 @@ struct si5341_reg_default {
u8 value;
};
+static const char * const si5341_input_clock_names[] = {
+ "in0", "in1", "in2", "xtal"
+};
+
/* Output configuration registers 0..9 are not quite logically organized */
static const u16 si5341_reg_output_offset[] = {
0x0108,
@@ -390,7 +411,112 @@ static unsigned long si5341_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)res;
}
+static int si5341_clk_get_selected_input(struct clk_si5341 *data)
+{
+ int err;
+ u32 val;
+
+ err = regmap_read(data->regmap, SI5341_IN_SEL, &val);
+ if (err < 0)
+ return err;
+
+ return (val & SI5341_IN_SEL_MASK) >> SI5341_IN_SEL_SHIFT;
+}
+
+static u8 si5341_clk_get_parent(struct clk_hw *hw)
+{
+ struct clk_si5341 *data = to_clk_si5341(hw);
+ int res = si5341_clk_get_selected_input(data);
+
+ if (res < 0)
+ return 0; /* Apparently we cannot report errors */
+
+ return res;
+}
+
+static int si5341_clk_reparent(struct clk_si5341 *data, u8 index)
+{
+ int err;
+ u8 val;
+
+ val = (index << SI5341_IN_SEL_SHIFT) & SI5341_IN_SEL_MASK;
+ /* Enable register-based input selection */
+ val |= SI5341_IN_SEL_REGCTRL;
+
+ err = regmap_update_bits(data->regmap,
+ SI5341_IN_SEL, SI5341_IN_SEL_REGCTRL | SI5341_IN_SEL_MASK, val);
+ if (err < 0)
+ return err;
+
+ if (index < 3) {
+ /* Enable input buffer for selected input */
+ err = regmap_update_bits(data->regmap,
+ SI5341_IN_EN, 0x07, BIT(index));
+ if (err < 0)
+ return err;
+
+ /* Enables the input to phase detector */
+ err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN,
+ 0x7 << SI5341_INX_TO_PFD_SHIFT,
+ BIT(index + SI5341_INX_TO_PFD_SHIFT));
+ if (err < 0)
+ return err;
+
+ /* Power down XTAL oscillator and buffer */
+ err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG,
+ SI5341_XAXB_CFG_PDNB, 0);
+ if (err < 0)
+ return err;
+
+ /*
+ * Set the P divider to "1". There's no explanation in the
+ * datasheet of these registers, but the clockbuilder software
+ * programs a "1" when the input is being used.
+ */
+ err = regmap_write(data->regmap, SI5341_IN_PDIV(index), 1);
+ if (err < 0)
+ return err;
+
+ err = regmap_write(data->regmap, SI5341_IN_PSET(index), 1);
+ if (err < 0)
+ return err;
+
+ /* Set update PDIV bit */
+ err = regmap_write(data->regmap, SI5341_PX_UPD, BIT(index));
+ if (err < 0)
+ return err;
+ } else {
+ /* Disable all input buffers */
+ err = regmap_update_bits(data->regmap, SI5341_IN_EN, 0x07, 0);
+ if (err < 0)
+ return err;
+
+ /* Disable input to phase detector */
+ err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN,
+ 0x7 << SI5341_INX_TO_PFD_SHIFT, 0);
+ if (err < 0)
+ return err;
+
+ /* Power up XTAL oscillator and buffer */
+ err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG,
+ SI5341_XAXB_CFG_PDNB, SI5341_XAXB_CFG_PDNB);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int si5341_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_si5341 *data = to_clk_si5341(hw);
+
+ return si5341_clk_reparent(data, index);
+}
+
static const struct clk_ops si5341_clk_ops = {
+ .set_parent = si5341_clk_set_parent,
+ .get_parent = si5341_clk_get_parent,
.recalc_rate = si5341_clk_recalc_rate,
};
@@ -985,7 +1111,8 @@ static const struct regmap_range si5341_regmap_volatile_range[] = {
regmap_reg_range(0x000C, 0x0012), /* Status */
regmap_reg_range(0x001C, 0x001E), /* reset, finc/fdec */
regmap_reg_range(0x00E2, 0x00FE), /* NVM, interrupts, device ready */
- /* Update bits for synth config */
+ /* Update bits for P divider and synth config */
+ regmap_reg_range(SI5341_PX_UPD, SI5341_PX_UPD),
regmap_reg_range(SI5341_SYNTH_N_UPD(0), SI5341_SYNTH_N_UPD(0)),
regmap_reg_range(SI5341_SYNTH_N_UPD(1), SI5341_SYNTH_N_UPD(1)),
regmap_reg_range(SI5341_SYNTH_N_UPD(2), SI5341_SYNTH_N_UPD(2)),
@@ -1122,6 +1249,7 @@ static int si5341_initialize_pll(struct clk_si5341 *data)
struct device_node *np = data->i2c_client->dev.of_node;
u32 m_num = 0;
u32 m_den = 0;
+ int sel;
if (of_property_read_u32(np, "silabs,pll-m-num", &m_num)) {
dev_err(&data->i2c_client->dev,
@@ -1135,7 +1263,11 @@ static int si5341_initialize_pll(struct clk_si5341 *data)
if (!m_num || !m_den) {
dev_err(&data->i2c_client->dev,
"PLL configuration invalid, assume 14GHz\n");
- m_den = clk_get_rate(data->pxtal) / 10;
+ sel = si5341_clk_get_selected_input(data);
+ if (sel < 0)
+ return sel;
+
+ m_den = clk_get_rate(data->input_clk[sel]) / 10;
m_num = 1400000000;
}
@@ -1143,11 +1275,52 @@ static int si5341_initialize_pll(struct clk_si5341 *data)
SI5341_PLL_M_NUM, m_num, m_den);
}
+static int si5341_clk_select_active_input(struct clk_si5341 *data)
+{
+ int res;
+ int err;
+ int i;
+
+ res = si5341_clk_get_selected_input(data);
+ if (res < 0)
+ return res;
+
+ /* If the current register setting is invalid, pick the first input */
+ if (!data->input_clk[res]) {
+ dev_dbg(&data->i2c_client->dev,
+ "Input %d not connected, rerouting\n", res);
+ res = -ENODEV;
+ for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
+ if (data->input_clk[i]) {
+ res = i;
+ break;
+ }
+ }
+ if (res < 0) {
+ dev_err(&data->i2c_client->dev,
+ "No clock input available\n");
+ return res;
+ }
+ }
+
+ /* Make sure the selected clock is also enabled and routed */
+ err = si5341_clk_reparent(data, res);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(data->input_clk[res]);
+ if (err < 0)
+ return err;
+
+ return res;
+}
+
static int si5341_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct clk_si5341 *data;
struct clk_init_data init;
+ struct clk *input;
const char *root_clock_name;
const char *synth_clock_names[SI5341_NUM_SYNTH];
int err;
@@ -1161,12 +1334,16 @@ static int si5341_probe(struct i2c_client *client,
data->i2c_client = client;
- data->pxtal = devm_clk_get(&client->dev, "xtal");
- if (IS_ERR(data->pxtal)) {
- if (PTR_ERR(data->pxtal) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_err(&client->dev, "Missing xtal clock input\n");
+ for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
+ input = devm_clk_get(&client->dev, si5341_input_clock_names[i]);
+ if (IS_ERR(input)) {
+ if (PTR_ERR(input) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ data->input_clk_name[i] = si5341_input_clock_names[i];
+ } else {
+ data->input_clk[i] = input;
+ data->input_clk_name[i] = __clk_get_name(input);
+ }
}
err = si5341_dt_parse_dt(client, config);
@@ -1188,9 +1365,6 @@ static int si5341_probe(struct i2c_client *client,
if (err < 0)
return err;
- /* "Activate" the xtal (usually a fixed clock) */
- clk_prepare_enable(data->pxtal);
-
if (of_property_read_bool(client->dev.of_node, "silabs,reprogram")) {
initialization_required = true;
} else {
@@ -1223,7 +1397,14 @@ static int si5341_probe(struct i2c_client *client,
ARRAY_SIZE(si5341_reg_defaults));
if (err < 0)
return err;
+ }
+
+ /* Input must be up and running at this point */
+ err = si5341_clk_select_active_input(data);
+ if (err < 0)
+ return err;
+ if (initialization_required) {
/* PLL configuration is required */
err = si5341_initialize_pll(data);
if (err < 0)
@@ -1231,9 +1412,8 @@ static int si5341_probe(struct i2c_client *client,
}
/* Register the PLL */
- data->pxtal_name = __clk_get_name(data->pxtal);
- init.parent_names = &data->pxtal_name;
- init.num_parents = 1; /* For now, only XTAL input supported */
+ init.parent_names = data->input_clk_name;
+ init.num_parents = SI5341_NUM_INPUTS;
init.ops = &si5341_clk_ops;
init.flags = 0;
data->hw.init = &init;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 95adf6c6db3d..2dfb30b963c4 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -488,7 +488,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_rate);
-static unsigned long __clk_get_accuracy(struct clk_core *core)
+static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
{
if (!core)
return 0;
@@ -774,7 +774,7 @@ static void clk_core_rate_restore_protect(struct clk_core *core, int count)
* clk_rate_exclusive_get - get exclusivity over the clk rate control
* @clk: the clk over which the exclusity of rate control is requested
*
- * clk_rate_exlusive_get() begins a critical section during which a clock
+ * clk_rate_exclusive_get() begins a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
@@ -1517,18 +1517,12 @@ static void __clk_recalc_accuracies(struct clk_core *core)
__clk_recalc_accuracies(child);
}
-static long clk_core_get_accuracy(struct clk_core *core)
+static long clk_core_get_accuracy_recalc(struct clk_core *core)
{
- unsigned long accuracy;
-
- clk_prepare_lock();
if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
__clk_recalc_accuracies(core);
- accuracy = __clk_get_accuracy(core);
- clk_prepare_unlock();
-
- return accuracy;
+ return clk_core_get_accuracy_no_lock(core);
}
/**
@@ -1542,10 +1536,16 @@ static long clk_core_get_accuracy(struct clk_core *core)
*/
long clk_get_accuracy(struct clk *clk)
{
+ long accuracy;
+
if (!clk)
return 0;
- return clk_core_get_accuracy(clk->core);
+ clk_prepare_lock();
+ accuracy = clk_core_get_accuracy_recalc(clk->core);
+ clk_prepare_unlock();
+
+ return accuracy;
}
EXPORT_SYMBOL_GPL(clk_get_accuracy);
@@ -1599,19 +1599,12 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
__clk_recalc_rates(child, msg);
}
-static unsigned long clk_core_get_rate(struct clk_core *core)
+static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{
- unsigned long rate;
-
- clk_prepare_lock();
-
if (core && (core->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(core, 0);
- rate = clk_core_get_rate_nolock(core);
- clk_prepare_unlock();
-
- return rate;
+ return clk_core_get_rate_nolock(core);
}
/**
@@ -1624,10 +1617,16 @@ static unsigned long clk_core_get_rate(struct clk_core *core)
*/
unsigned long clk_get_rate(struct clk *clk)
{
+ unsigned long rate;
+
if (!clk)
return 0;
- return clk_core_get_rate(clk->core);
+ clk_prepare_lock();
+ rate = clk_core_get_rate_recalc(clk->core);
+ clk_prepare_unlock();
+
+ return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
@@ -2660,12 +2659,14 @@ static int clk_core_get_phase(struct clk_core *core)
{
int ret;
- clk_prepare_lock();
+ lockdep_assert_held(&prepare_lock);
+ if (!core->ops->get_phase)
+ return 0;
+
/* Always try to update cached phase if possible */
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- ret = core->phase;
- clk_prepare_unlock();
+ ret = core->ops->get_phase(core->hw);
+ if (ret >= 0)
+ core->phase = ret;
return ret;
}
@@ -2679,10 +2680,16 @@ static int clk_core_get_phase(struct clk_core *core)
*/
int clk_get_phase(struct clk *clk)
{
+ int ret;
+
if (!clk)
return 0;
- return clk_core_get_phase(clk->core);
+ clk_prepare_lock();
+ ret = clk_core_get_phase(clk->core);
+ clk_prepare_unlock();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(clk_get_phase);
@@ -2896,13 +2903,22 @@ static struct hlist_head *orphan_list[] = {
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
+ int phase;
+
+ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
- clk_core_get_rate(c), clk_core_get_accuracy(c),
- clk_core_get_phase(c),
- clk_core_get_scaled_duty_cycle(c, 100000));
+ clk_core_get_rate_recalc(c),
+ clk_core_get_accuracy_recalc(c));
+
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "%5d", phase);
+ else
+ seq_puts(s, "-----");
+
+ seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2939,6 +2955,7 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary);
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
+ int phase;
unsigned long min_rate, max_rate;
clk_core_get_boundaries(c, &min_rate, &max_rate);
@@ -2948,11 +2965,13 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
- seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
+ seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
seq_printf(s, "\"min_rate\": %lu,", min_rate);
seq_printf(s, "\"max_rate\": %lu,", max_rate);
- seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
- seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
+ seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "\"phase\": %d,", phase);
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
@@ -3323,7 +3342,9 @@ static void clk_core_reparent_orphans_nolock(void)
static int __clk_core_init(struct clk_core *core)
{
int ret;
+ struct clk_core *parent;
unsigned long rate;
+ int phase;
if (!core)
return -EINVAL;
@@ -3394,7 +3415,7 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
- core->parent = __clk_init_parent(core);
+ parent = core->parent = __clk_init_parent(core);
/*
* Populate core->parent if parent has already been clk_core_init'd. If
@@ -3406,10 +3427,9 @@ static int __clk_core_init(struct clk_core *core)
* clocks and re-parent any that are children of the clock currently
* being clk_init'd.
*/
- if (core->parent) {
- hlist_add_head(&core->child_node,
- &core->parent->children);
- core->orphan = core->parent->orphan;
+ if (parent) {
+ hlist_add_head(&core->child_node, &parent->children);
+ core->orphan = parent->orphan;
} else if (!core->num_parents) {
hlist_add_head(&core->child_node, &clk_root_list);
core->orphan = false;
@@ -3427,21 +3447,24 @@ static int __clk_core_init(struct clk_core *core)
*/
if (core->ops->recalc_accuracy)
core->accuracy = core->ops->recalc_accuracy(core->hw,
- __clk_get_accuracy(core->parent));
- else if (core->parent)
- core->accuracy = core->parent->accuracy;
+ clk_core_get_accuracy_no_lock(parent));
+ else if (parent)
+ core->accuracy = parent->accuracy;
else
core->accuracy = 0;
/*
- * Set clk's phase.
+ * Set clk's phase by clk_core_get_phase() caching the phase.
* Since a phase is by definition relative to its parent, just
* query the current clock phase, or just assume it's in phase.
*/
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- else
- core->phase = 0;
+ phase = clk_core_get_phase(core);
+ if (phase < 0) {
+ ret = phase;
+ pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
+ core->name);
+ goto out;
+ }
/*
* Set clk's duty cycle.
@@ -3456,9 +3479,9 @@ static int __clk_core_init(struct clk_core *core)
*/
if (core->ops->recalc_rate)
rate = core->ops->recalc_rate(core->hw,
- clk_core_get_rate_nolock(core->parent));
- else if (core->parent)
- rate = core->parent->rate;
+ clk_core_get_rate_nolock(parent));
+ else if (parent)
+ rate = parent->rate;
else
rate = 0;
core->rate = core->req_rate = rate;
@@ -3496,6 +3519,9 @@ static int __clk_core_init(struct clk_core *core)
out:
clk_pm_runtime_put(core);
unlock:
+ if (ret)
+ hlist_del_init(&core->child_node);
+
clk_prepare_unlock();
if (!ret)
@@ -4865,8 +4891,8 @@ static int parent_ready(struct device_node *np)
*
* Return: error code or zero on success
*/
-int of_clk_detect_critical(struct device_node *np,
- int index, unsigned long *flags)
+int of_clk_detect_critical(struct device_node *np, int index,
+ unsigned long *flags)
{
struct property *prop;
const __be32 *cur;
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 20f7c91c03d2..99773519b5a5 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -15,6 +15,7 @@
#define PCG_PREDIV_MAX 8
#define PCG_DIV_SHIFT 0
+#define PCG_CORE_DIV_WIDTH 3
#define PCG_DIV_WIDTH 6
#define PCG_DIV_MAX 64
@@ -91,7 +92,7 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
- unsigned long flags = 0;
+ unsigned long flags;
int prediv_value;
int div_value;
int ret;
@@ -126,6 +127,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
+ u32 composite_flags,
unsigned long flags)
{
struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
@@ -133,6 +135,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
struct clk_divider *div = NULL;
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
+ const struct clk_ops *divider_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -150,8 +153,16 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
div_hw = &div->hw;
div->reg = reg;
- div->shift = PCG_PREDIV_SHIFT;
- div->width = PCG_PREDIV_WIDTH;
+ if (composite_flags & IMX_COMPOSITE_CORE) {
+ div->shift = PCG_DIV_SHIFT;
+ div->width = PCG_CORE_DIV_WIDTH;
+ divider_ops = &clk_divider_ops;
+ } else {
+ div->shift = PCG_PREDIV_SHIFT;
+ div->width = PCG_PREDIV_WIDTH;
+ divider_ops = &imx8m_clk_composite_divider_ops;
+ }
+
div->lock = &imx_ccm_lock;
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
@@ -166,8 +177,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ops, div_hw,
- &imx8m_clk_composite_divider_ops,
- gate_hw, &clk_gate_ops, flags);
+ divider_ops, gate_hw, &clk_gate_ops, flags);
if (IS_ERR(hw))
goto fail;
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 4b17b91504ed..100ca828b052 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -55,7 +55,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
struct clk_divider *div = to_clk_divider(hw);
unsigned int divider, value;
- unsigned long flags = 0;
+ unsigned long flags;
u32 val;
divider = parent_rate / rate;
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index b569d919c645..58a67630bb6a 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -42,7 +42,7 @@ static int clk_fixup_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_fixup_mux *fixup_mux = to_clk_fixup_mux(hw);
struct clk_mux *mux = to_clk_mux(hw);
- unsigned long flags = 0;
+ unsigned long flags;
u32 val;
spin_lock_irqsave(mux->lock, flags);
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index 7d44ce814806..ce0060e8873e 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -40,7 +40,7 @@ static int clk_gate2_enable(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
- unsigned long flags = 0;
+ unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
@@ -62,7 +62,7 @@ static void clk_gate2_disable(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
- unsigned long flags = 0;
+ unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
@@ -101,7 +101,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw)
static void clk_gate2_disable_unused(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
- unsigned long flags = 0;
+ unsigned long flags;
u32 reg;
spin_lock_irqsave(gate->lock, flags);
@@ -154,7 +154,7 @@ struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
gate->hw.init = &init;
hw = &gate->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
kfree(gate);
return ERR_PTR(ret);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 4bd44d89eaaa..0f647d148abf 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -208,6 +208,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
anatop_base = base;
hws[IMX6SL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 0c9f7adb41ae..b2057bd42e25 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -802,6 +802,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_hw_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
hws[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0);
hws[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0);
+ hws[IMX7D_PXP_CLK] = imx_clk_hw_gate4("pxp_clk", "main_axi_root_clk", base + 0x44c0, 0);
hws[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0);
hws[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0);
hws[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_hw_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 0620d6c8c072..3710aa0dee9b 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -8,7 +8,7 @@
*/
#include <dt-bindings/clock/imx7ulp-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 2ed93fc25087..925670438f23 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -4,12 +4,10 @@
*/
#include <dt-bindings/clock/imx8mm-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -41,6 +39,8 @@ static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
static const char *imx8mm_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
"sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", };
+static const char * const imx8mm_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char *imx8mm_m4_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "sys_pll1_266m",
"sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
@@ -283,8 +283,10 @@ static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_8
static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
-static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out",
- "vpu_pll", "sys_pll1_80m", };
+static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", "sys_pll1_200m",
+ "audio_pll2_out", "sys_pll2_500m", "vpu_pll", "sys_pll1_80m", };
+static const char *imx8mm_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k", };
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -322,6 +324,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!base))
return -ENOMEM;
@@ -414,20 +417,30 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/* Core Slice */
hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
- hws[IMX8MM_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
- hws[IMX8MM_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
- hws[IMX8MM_CLK_GPU3D_SRC] = imx_clk_hw_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
- hws[IMX8MM_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels));
hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MM_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- hws[IMX8MM_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
- hws[IMX8MM_CLK_GPU3D_CG] = imx_clk_hw_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
- hws[IMX8MM_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- hws[IMX8MM_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- hws[IMX8MM_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
- hws[IMX8MM_CLK_GPU3D_DIV] = imx_clk_hw_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
- hws[IMX8MM_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
+
+ hws[IMX8MM_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mm_m4_sels, base + 0x8080);
+ hws[IMX8MM_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mm_vpu_sels, base + 0x8100);
+ hws[IMX8MM_CLK_GPU3D_CORE] = imx8m_clk_hw_composite_core("gpu3d_core", imx8mm_gpu3d_sels, base + 0x8180);
+ hws[IMX8MM_CLK_GPU2D_CORE] = imx8m_clk_hw_composite_core("gpu2d_core", imx8mm_gpu2d_sels, base + 0x8200);
+
+ /* For backwards compatibility */
+ hws[IMX8MM_CLK_M4_SRC] = hws[IMX8MM_CLK_M4_CORE];
+ hws[IMX8MM_CLK_M4_CG] = hws[IMX8MM_CLK_M4_CORE];
+ hws[IMX8MM_CLK_M4_DIV] = hws[IMX8MM_CLK_M4_CORE];
+ hws[IMX8MM_CLK_VPU_SRC] = hws[IMX8MM_CLK_VPU_CORE];
+ hws[IMX8MM_CLK_VPU_CG] = hws[IMX8MM_CLK_VPU_CORE];
+ hws[IMX8MM_CLK_VPU_DIV] = hws[IMX8MM_CLK_VPU_CORE];
+ hws[IMX8MM_CLK_GPU3D_SRC] = hws[IMX8MM_CLK_GPU3D_CORE];
+ hws[IMX8MM_CLK_GPU3D_CG] = hws[IMX8MM_CLK_GPU3D_CORE];
+ hws[IMX8MM_CLK_GPU3D_DIV] = hws[IMX8MM_CLK_GPU3D_CORE];
+ hws[IMX8MM_CLK_GPU2D_SRC] = hws[IMX8MM_CLK_GPU2D_CORE];
+ hws[IMX8MM_CLK_GPU2D_CG] = hws[IMX8MM_CLK_GPU2D_CORE];
+ hws[IMX8MM_CLK_GPU2D_DIV] = hws[IMX8MM_CLK_GPU2D_CORE];
+
+ /* CORE SEL */
+ hws[IMX8MM_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mm_a53_core_sels, ARRAY_SIZE(imx8mm_a53_core_sels));
/* BUS */
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
@@ -504,6 +517,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
hws[IMX8MM_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
hws[IMX8MM_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
+ hws[IMX8MM_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mm_clko2_sels, base + 0xba80);
hws[IMX8MM_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
hws[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
hws[IMX8MM_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
@@ -564,7 +578,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
hws[IMX8MM_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
hws[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
- hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
+ hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", base + 0x44f0, 0);
hws[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
hws[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
hws[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
@@ -586,7 +600,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
hws[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
hws[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
- hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
+ hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", base + 0x4660, 0);
hws[IMX8MM_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
hws[IMX8MM_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
@@ -594,11 +608,14 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
hws[IMX8MM_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
- hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MM_CLK_A53_DIV]->clk,
- hws[IMX8MM_CLK_A53_SRC]->clk,
+ hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MM_CLK_A53_CORE]->clk,
+ hws[IMX8MM_CLK_A53_CORE]->clk,
hws[IMX8MM_ARM_PLL_OUT]->clk,
- hws[IMX8MM_SYS_PLL1_800M]->clk);
+ hws[IMX8MM_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MM_CLK_A53_SRC], hws[IMX8MM_SYS_PLL1_800M]);
+ clk_hw_set_parent(hws[IMX8MM_CLK_A53_CORE], hws[IMX8MM_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MM_CLK_END);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index c5e7316b4c66..0bc7070235bd 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -4,12 +4,10 @@
*/
#include <dt-bindings/clock/imx8mn-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -40,6 +38,8 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl
"sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
"audio_pll1_out", "sys_pll3_out", };
+static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
"sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", };
@@ -317,6 +317,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!base)) {
ret = -ENOMEM;
goto unregister_hws;
@@ -413,15 +414,21 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
/* CORE */
hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
- hws[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
- hws[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels, ARRAY_SIZE(imx8mn_gpu_shader_sels));
hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
- hws[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
-
hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- hws[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
- hws[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+
+ hws[IMX8MN_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mn_gpu_core_sels, base + 0x8180);
+ hws[IMX8MN_CLK_GPU_SHADER] = imx8m_clk_hw_composite_core("gpu_shader", imx8mn_gpu_shader_sels, base + 0x8200);
+
+ hws[IMX8MN_CLK_GPU_CORE_SRC] = hws[IMX8MN_CLK_GPU_CORE];
+ hws[IMX8MN_CLK_GPU_CORE_CG] = hws[IMX8MN_CLK_GPU_CORE];
+ hws[IMX8MN_CLK_GPU_CORE_DIV] = hws[IMX8MN_CLK_GPU_CORE];
+ hws[IMX8MN_CLK_GPU_SHADER_SRC] = hws[IMX8MN_CLK_GPU_SHADER];
+ hws[IMX8MN_CLK_GPU_SHADER_CG] = hws[IMX8MN_CLK_GPU_SHADER];
+ hws[IMX8MN_CLK_GPU_SHADER_DIV] = hws[IMX8MN_CLK_GPU_SHADER];
+
+ /* CORE SEL */
+ hws[IMX8MN_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mn_a53_core_sels, ARRAY_SIZE(imx8mn_a53_core_sels));
/* BUS */
hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
@@ -523,12 +530,13 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
hws[IMX8MN_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
hws[IMX8MN_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MN_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
hws[IMX8MN_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
hws[IMX8MN_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
hws[IMX8MN_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
hws[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
- hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
+ hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core", base + 0x44f0, 0);
hws[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
hws[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
hws[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
@@ -551,11 +559,14 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
- hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MN_CLK_A53_DIV]->clk,
- hws[IMX8MN_CLK_A53_SRC]->clk,
+ hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MN_CLK_A53_CORE]->clk,
+ hws[IMX8MN_CLK_A53_CORE]->clk,
hws[IMX8MN_ARM_PLL_OUT]->clk,
- hws[IMX8MN_SYS_PLL1_800M]->clk);
+ hws[IMX8MN_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MN_CLK_A53_SRC], hws[IMX8MN_SYS_PLL1_800M]);
+ clk_hw_set_parent(hws[IMX8MN_CLK_A53_CORE], hws[IMX8MN_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MN_CLK_END);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index cf192907b7dc..41469e2cc3de 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -4,13 +4,13 @@
*/
#include <dt-bindings/clock/imx8mp-clock.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "clk.h"
@@ -34,6 +34,8 @@ static const char * const imx8mp_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl
"sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
"audio_pll1_out", "sys_pll3_out", };
+static const char * const imx8mp_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char * const imx8mp_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m",
"vpu_pll_out", "sys_pll1_800m", "audio_pll1_out",
"video_pll1_out", "sys_pll3_out", };
@@ -342,7 +344,7 @@ static const char * const imx8mp_hdmi_fdcc_tst_sels[] = {"osc_24m", "sys_pll1_26
"sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
"audio_pll2_out", "video_pll1_out", };
-static const char * const imx8mp_hdmi_27m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+static const char * const imx8mp_hdmi_24m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
"audio_pll2_out", "sys_pll1_133m", };
@@ -434,6 +436,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
anatop_base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!anatop_base))
return -ENOMEM;
@@ -553,6 +556,9 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3);
hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3);
+ /* CORE SEL */
+ hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels));
+
hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
@@ -631,7 +637,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_IPP_DO_CLKO1] = imx8m_clk_hw_composite("ipp_do_clko1", imx8mp_ipp_do_clko1_sels, ccm_base + 0xba00);
hws[IMX8MP_CLK_IPP_DO_CLKO2] = imx8m_clk_hw_composite("ipp_do_clko2", imx8mp_ipp_do_clko2_sels, ccm_base + 0xba80);
hws[IMX8MP_CLK_HDMI_FDCC_TST] = imx8m_clk_hw_composite("hdmi_fdcc_tst", imx8mp_hdmi_fdcc_tst_sels, ccm_base + 0xbb00);
- hws[IMX8MP_CLK_HDMI_27M] = imx8m_clk_hw_composite("hdmi_27m", imx8mp_hdmi_27m_sels, ccm_base + 0xbb80);
+ hws[IMX8MP_CLK_HDMI_24M] = imx8m_clk_hw_composite("hdmi_24m", imx8mp_hdmi_24m_sels, ccm_base + 0xbb80);
hws[IMX8MP_CLK_HDMI_REF_266M] = imx8m_clk_hw_composite("hdmi_ref_266m", imx8mp_hdmi_ref_266m_sels, ccm_base + 0xbc00);
hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
@@ -671,6 +677,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", ccm_base + 0x4180, 0);
hws[IMX8MP_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", ccm_base + 0x4190, 0);
hws[IMX8MP_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", ccm_base + 0x41a0, 0);
+ hws[IMX8MP_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", ccm_base + 0x4220, 0);
hws[IMX8MP_CLK_PCIE_ROOT] = imx_clk_hw_gate4("pcie_root_clk", "pcie_aux", ccm_base + 0x4250, 0);
hws[IMX8MP_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", ccm_base + 0x4280, 0);
hws[IMX8MP_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", ccm_base + 0x4290, 0);
@@ -722,11 +729,14 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0);
hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0);
- hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MP_CLK_A53_DIV]->clk,
- hws[IMX8MP_CLK_A53_SRC]->clk,
+ hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MP_CLK_A53_CORE]->clk,
+ hws[IMX8MP_CLK_A53_CORE]->clk,
hws[IMX8MP_ARM_PLL_OUT]->clk,
- hws[IMX8MP_SYS_PLL1_800M]->clk);
+ hws[IMX8MP_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MP_CLK_A53_SRC], hws[IMX8MP_SYS_PLL1_800M]);
+ clk_hw_set_parent(hws[IMX8MP_CLK_A53_CORE], hws[IMX8MP_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MP_CLK_END);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 4c0edca1a6d0..fdc68db68de5 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -5,7 +5,7 @@
*/
#include <dt-bindings/clock/imx8mq-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -41,6 +41,8 @@ static const char * const video2_pll_out_sels[] = {"video2_pll1_ref_sel", };
static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
"sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll_out", };
+static const char * const imx8mq_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m",
"sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll_out", };
@@ -305,6 +307,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!base))
return -ENOMEM;
@@ -403,22 +406,29 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
/* CORE */
hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
- hws[IMX8MQ_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
- hws[IMX8MQ_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
- hws[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
- hws[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
-
hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
- hws[IMX8MQ_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- hws[IMX8MQ_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
- hws[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
- hws[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
-
hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- hws[IMX8MQ_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- hws[IMX8MQ_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
- hws[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
- hws[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+
+ hws[IMX8MQ_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mq_arm_m4_sels, base + 0x8080);
+ hws[IMX8MQ_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mq_vpu_sels, base + 0x8100);
+ hws[IMX8MQ_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mq_gpu_core_sels, base + 0x8180);
+ hws[IMX8MQ_CLK_GPU_SHADER] = imx8m_clk_hw_composite("gpu_shader", imx8mq_gpu_shader_sels, base + 0x8200);
+ /* For backwards compatibility */
+ hws[IMX8MQ_CLK_M4_SRC] = hws[IMX8MQ_CLK_M4_CORE];
+ hws[IMX8MQ_CLK_M4_CG] = hws[IMX8MQ_CLK_M4_CORE];
+ hws[IMX8MQ_CLK_M4_DIV] = hws[IMX8MQ_CLK_M4_CORE];
+ hws[IMX8MQ_CLK_VPU_SRC] = hws[IMX8MQ_CLK_VPU_CORE];
+ hws[IMX8MQ_CLK_VPU_CG] = hws[IMX8MQ_CLK_VPU_CORE];
+ hws[IMX8MQ_CLK_VPU_DIV] = hws[IMX8MQ_CLK_VPU_CORE];
+ hws[IMX8MQ_CLK_GPU_CORE_SRC] = hws[IMX8MQ_CLK_GPU_CORE];
+ hws[IMX8MQ_CLK_GPU_CORE_CG] = hws[IMX8MQ_CLK_GPU_CORE];
+ hws[IMX8MQ_CLK_GPU_CORE_DIV] = hws[IMX8MQ_CLK_GPU_CORE];
+ hws[IMX8MQ_CLK_GPU_SHADER_SRC] = hws[IMX8MQ_CLK_GPU_SHADER];
+ hws[IMX8MQ_CLK_GPU_SHADER_CG] = hws[IMX8MQ_CLK_GPU_SHADER];
+ hws[IMX8MQ_CLK_GPU_SHADER_DIV] = hws[IMX8MQ_CLK_GPU_SHADER];
+
+ /* CORE SEL */
+ hws[IMX8MQ_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mq_a53_core_sels, ARRAY_SIZE(imx8mq_a53_core_sels));
/* BUS */
hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
@@ -567,7 +577,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
hws[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
hws[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_hw_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
- hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
+ hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core", base + 0x4570, 0);
hws[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_hw_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
hws[IMX8MQ_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
hws[IMX8MQ_CLK_DISP_AXI_ROOT] = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
@@ -583,11 +593,14 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc_25m", 1, 8);
hws[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
- hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MQ_CLK_A53_DIV]->clk,
- hws[IMX8MQ_CLK_A53_SRC]->clk,
+ hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MQ_CLK_A53_CORE]->clk,
+ hws[IMX8MQ_CLK_A53_CORE]->clk,
hws[IMX8MQ_ARM_PLL_OUT]->clk,
- hws[IMX8MQ_SYS1_PLL_800M]->clk);
+ hws[IMX8MQ_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MQ_CLK_A53_SRC], hws[IMX8MQ_SYS1_PLL_800M]);
+ clk_hw_set_parent(hws[IMX8MQ_CLK_A53_CORE], hws[IMX8MQ_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MQ_CLK_END);
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index de93ce73101b..78e1f7641aaa 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -98,26 +98,45 @@ static unsigned long clk_pfdv2_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_pfdv2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pfdv2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 tmp = *prate;
+ unsigned long parent_rates[] = {
+ 480000000,
+ 528000000,
+ req->best_parent_rate
+ };
+ unsigned long best_rate = -1UL, rate = req->rate;
+ unsigned long best_parent_rate = req->best_parent_rate;
+ u64 tmp;
u8 frac;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(parent_rates); i++) {
+ tmp = parent_rates[i];
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+
+ tmp = parent_rates[i];
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ if (abs(tmp - req->rate) < abs(best_rate - req->rate)) {
+ best_rate = tmp;
+ best_parent_rate = parent_rates[i];
+ }
+ }
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
- frac = tmp;
-
- if (frac < 12)
- frac = 12;
- else if (frac > 35)
- frac = 35;
-
- tmp = *prate;
- tmp *= 18;
- do_div(tmp, frac);
+ req->best_parent_rate = best_parent_rate;
+ req->rate = best_rate;
- return tmp;
+ return 0;
}
static int clk_pfdv2_is_enabled(struct clk_hw *hw)
@@ -139,6 +158,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val;
u8 frac;
+ if (!rate)
+ return -EINVAL;
+
+ /* PFD can NOT change rate without gating */
+ WARN_ON(clk_pfdv2_is_enabled(hw));
+
tmp = tmp * 18 + rate / 2;
do_div(tmp, rate);
frac = tmp;
@@ -161,7 +186,7 @@ static const struct clk_ops clk_pfdv2_ops = {
.enable = clk_pfdv2_enable,
.disable = clk_pfdv2_disable,
.recalc_rate = clk_pfdv2_recalc_rate,
- .round_rate = clk_pfdv2_round_rate,
+ .determine_rate = clk_pfdv2_determine_rate,
.set_rate = clk_pfdv2_set_rate,
.is_enabled = clk_pfdv2_is_enabled,
};
@@ -189,7 +214,7 @@ struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
init.ops = &clk_pfdv2_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.flags = CLK_SET_RATE_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
pfd->hw.init = &init;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 5b0519a81a7a..a83bbbee77d9 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -55,8 +55,10 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
};
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
+ PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384),
PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+ PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
};
@@ -408,6 +410,8 @@ struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
default:
pr_err("%s: Unknown pll type for pll clk %s\n",
__func__, name);
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
};
pll->base = base;
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index f51a800c268c..a49450431855 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -54,7 +54,7 @@ static inline int clk_pllv4_wait_lock(struct clk_pllv4 *pll)
csr, csr & PLL_VLD, 0, LOCK_TIMEOUT_US);
}
-static int clk_pllv4_is_enabled(struct clk_hw *hw)
+static int clk_pllv4_is_prepared(struct clk_hw *hw)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
@@ -175,7 +175,7 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static int clk_pllv4_enable(struct clk_hw *hw)
+static int clk_pllv4_prepare(struct clk_hw *hw)
{
u32 val;
struct clk_pllv4 *pll = to_clk_pllv4(hw);
@@ -187,7 +187,7 @@ static int clk_pllv4_enable(struct clk_hw *hw)
return clk_pllv4_wait_lock(pll);
}
-static void clk_pllv4_disable(struct clk_hw *hw)
+static void clk_pllv4_unprepare(struct clk_hw *hw)
{
u32 val;
struct clk_pllv4 *pll = to_clk_pllv4(hw);
@@ -201,9 +201,9 @@ static const struct clk_ops clk_pllv4_ops = {
.recalc_rate = clk_pllv4_recalc_rate,
.round_rate = clk_pllv4_round_rate,
.set_rate = clk_pllv4_set_rate,
- .enable = clk_pllv4_enable,
- .disable = clk_pllv4_disable,
- .is_enabled = clk_pllv4_is_enabled,
+ .prepare = clk_pllv4_prepare,
+ .unprepare = clk_pllv4_unprepare,
+ .is_prepared = clk_pllv4_is_prepared,
};
struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
index acd1b9002be6..d4a2be16d132 100644
--- a/drivers/clk/imx/clk-sscg-pll.c
+++ b/drivers/clk/imx/clk-sscg-pll.c
@@ -195,10 +195,10 @@ static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
uint64_t ref)
{
- int ret = -EINVAL;
+ int ret;
if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
- return ret;
+ return -EINVAL;
temp_setup->vco1 = ref;
@@ -254,10 +254,10 @@ static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
uint64_t ref)
{
- int ret = -EINVAL;
+ int ret;
if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
- return ret;
+ return -EINVAL;
temp_setup->ref = ref;
@@ -428,7 +428,7 @@ static int __clk_sscg_pll_determine_rate(struct clk_hw *hw,
struct clk_sscg_pll_setup *setup = &pll->setup;
struct clk_hw *parent_hw = NULL;
int bypass_parent_index;
- int ret = -EINVAL;
+ int ret;
req->max_rate = max;
req->min_rate = min;
@@ -467,10 +467,10 @@ static int clk_sscg_pll_determine_rate(struct clk_hw *hw,
uint64_t rate = req->rate;
uint64_t min = req->min_rate;
uint64_t max = req->max_rate;
- int ret = -EINVAL;
+ int ret;
if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
- return ret;
+ return -EINVAL;
ret = __clk_sscg_pll_determine_rate(hw, req, req->rate, req->rate,
rate, PLL_BYPASS2);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index b05213b91dcf..f074dd8ec42e 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -477,20 +477,29 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
+#define IMX_COMPOSITE_CORE BIT(0)
+
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
int num_parents,
void __iomem *reg,
+ u32 composite_flags,
unsigned long flags);
+#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
+ imx8m_clk_hw_composite_flags(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, \
+ IMX_COMPOSITE_CORE, \
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+
#define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \
flags) \
to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \
- num_parents, reg, flags))
+ num_parents, reg, 0, flags))
#define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \
imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, \
+ ARRAY_SIZE(parent_names), reg, 0, \
flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
#define __imx8m_clk_composite(name, parent_names, reg, flags) \
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 956dd653a43d..c051ecba5cf8 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -432,8 +432,10 @@ static void __init jz4770_cgu_init(struct device_node *np)
cgu = ingenic_cgu_new(jz4770_cgu_clocks,
ARRAY_SIZE(jz4770_cgu_clocks), np);
- if (!cgu)
+ if (!cgu) {
pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
retval = ingenic_cgu_register_clocks(cgu);
if (retval)
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index ea905ff72bf0..c758f1643067 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -9,14 +9,16 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4780-cgu.h>
#include "cgu.h"
#include "pm.h"
/* CGU register offsets */
#define CGU_REG_CLOCKCONTROL 0x00
-#define CGU_REG_PLLCONTROL 0x0c
+#define CGU_REG_LCR 0x04
#define CGU_REG_APLL 0x10
#define CGU_REG_MPLL 0x14
#define CGU_REG_EPLL 0x18
@@ -46,8 +48,8 @@
#define CGU_REG_CLOCKSTATUS 0xd4
/* bits within the OPCR register */
-#define OPCR_SPENDN0 (1 << 7)
-#define OPCR_SPENDN1 (1 << 6)
+#define OPCR_SPENDN0 BIT(7)
+#define OPCR_SPENDN1 BIT(6)
/* bits within the USBPCR register */
#define USBPCR_USB_MODE BIT(31)
@@ -88,6 +90,13 @@
#define USBVBFIL_IDDIGFIL_MASK (0xffff << USBVBFIL_IDDIGFIL_SHIFT)
#define USBVBFIL_USBVBFIL_MASK (0xffff)
+/* bits within the LCR register */
+#define LCR_PD_SCPU BIT(31)
+#define LCR_SCPUS BIT(27)
+
+/* bits within the CLKGR1 register */
+#define CLKGR1_CORE1 BIT(15)
+
static struct ingenic_cgu *cgu;
static u8 jz4780_otg_phy_get_parent(struct clk_hw *hw)
@@ -205,6 +214,42 @@ static const struct clk_ops jz4780_otg_phy_ops = {
.set_rate = jz4780_otg_phy_set_rate,
};
+static int jz4780_core1_enable(struct clk_hw *hw)
+{
+ struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ struct ingenic_cgu *cgu = ingenic_clk->cgu;
+ const unsigned int timeout = 5000;
+ unsigned long flags;
+ int retval;
+ u32 lcr, clkgr1;
+
+ spin_lock_irqsave(&cgu->lock, flags);
+
+ lcr = readl(cgu->base + CGU_REG_LCR);
+ lcr &= ~LCR_PD_SCPU;
+ writel(lcr, cgu->base + CGU_REG_LCR);
+
+ clkgr1 = readl(cgu->base + CGU_REG_CLKGR1);
+ clkgr1 &= ~CLKGR1_CORE1;
+ writel(clkgr1, cgu->base + CGU_REG_CLKGR1);
+
+ spin_unlock_irqrestore(&cgu->lock, flags);
+
+ /* wait for the CPU to be powered up */
+ retval = readl_poll_timeout(cgu->base + CGU_REG_LCR, lcr,
+ !(lcr & LCR_SCPUS), 10, timeout);
+ if (retval == -ETIMEDOUT) {
+ pr_err("%s: Wait for power up core1 timeout\n", __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static const struct clk_ops jz4780_core1_ops = {
+ .enable = jz4780_core1_enable,
+};
+
static const s8 pll_od_encoding[16] = {
0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
@@ -699,9 +744,9 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
},
[JZ4780_CLK_CORE1] = {
- "core1", CGU_CLK_GATE,
+ "core1", CGU_CLK_CUSTOM,
.parents = { JZ4780_CLK_CPU, -1, -1, -1 },
- .gate = { CGU_REG_CLKGR1, 15 },
+ .custom = { &jz4780_core1_ops },
},
};
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index ad7daa494fd4..153a954b0d2f 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -189,7 +189,7 @@ static long ingenic_tcu_round_rate(struct clk_hw *hw, unsigned long req_rate,
u8 prescale;
if (req_rate > rate)
- return -EINVAL;
+ return rate;
prescale = ingenic_tcu_get_prescale(rate, req_rate);
@@ -317,10 +317,17 @@ static const struct ingenic_soc_info jz4770_soc_info = {
.has_tcu_clk = false,
};
+static const struct ingenic_soc_info x1000_soc_info = {
+ .num_channels = 8,
+ .has_ost = false, /* X1000 has OST, but it not belong TCU */
+ .has_tcu_clk = false,
+};
+
static const struct of_device_id ingenic_tcu_of_match[] __initconst = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4770_soc_info, },
+ { .compatible = "ingenic,x1000-tcu", .data = &x1000_soc_info, },
{ /* sentinel */ }
};
@@ -471,3 +478,4 @@ static void __init ingenic_tcu_init(struct device_node *np)
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init);
CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-tcu", ingenic_tcu_init);
CLK_OF_DECLARE_DRIVER(jz4770_cgu, "ingenic,jz4770-tcu", ingenic_tcu_init);
+CLK_OF_DECLARE_DRIVER(x1000_cgu, "ingenic,x1000-tcu", ingenic_tcu_init);
diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig
index 38aeefb1e808..ab613f28b502 100644
--- a/drivers/clk/keystone/Kconfig
+++ b/drivers/clk/keystone/Kconfig
@@ -26,3 +26,11 @@ config TI_SCI_CLK_PROBE_FROM_FW
This is mostly only useful for debugging purposes, and will
increase the boot time of the device. If you want the clocks probed
from firmware, say Y. Otherwise, say N.
+
+config TI_SYSCON_CLK
+ tristate "Syscon based clock driver for K2/K3 SoCs"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ default ARCH_KEYSTONE || ARCH_K3
+ help
+ This adds clock driver support for syscon based gate
+ clocks on TI's K2 and K3 SoCs.
diff --git a/drivers/clk/keystone/Makefile b/drivers/clk/keystone/Makefile
index d044de6f965c..0e426e648f7c 100644
--- a/drivers/clk/keystone/Makefile
+++ b/drivers/clk/keystone/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_COMMON_CLK_KEYSTONE) += pll.o gate.o
obj-$(CONFIG_TI_SCI_CLK) += sci-clk.o
+obj-$(CONFIG_TI_SYSCON_CLK) += syscon-clk.o
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
new file mode 100644
index 000000000000..8d7dbea3bd30
--- /dev/null
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct ti_syscon_gate_clk_priv {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 reg;
+ u32 idx;
+};
+
+struct ti_syscon_gate_clk_data {
+ char *name;
+ u32 offset;
+ u32 bit_idx;
+};
+
+static struct
+ti_syscon_gate_clk_priv *to_ti_syscon_gate_clk_priv(struct clk_hw *hw)
+{
+ return container_of(hw, struct ti_syscon_gate_clk_priv, hw);
+}
+
+static int ti_syscon_gate_clk_enable(struct clk_hw *hw)
+{
+ struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw);
+
+ return regmap_write_bits(priv->regmap, priv->reg, priv->idx,
+ priv->idx);
+}
+
+static void ti_syscon_gate_clk_disable(struct clk_hw *hw)
+{
+ struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw);
+
+ regmap_write_bits(priv->regmap, priv->reg, priv->idx, 0);
+}
+
+static int ti_syscon_gate_clk_is_enabled(struct clk_hw *hw)
+{
+ unsigned int val;
+ struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw);
+
+ regmap_read(priv->regmap, priv->reg, &val);
+
+ return !!(val & priv->idx);
+}
+
+static const struct clk_ops ti_syscon_gate_clk_ops = {
+ .enable = ti_syscon_gate_clk_enable,
+ .disable = ti_syscon_gate_clk_disable,
+ .is_enabled = ti_syscon_gate_clk_is_enabled,
+};
+
+static struct clk_hw
+*ti_syscon_gate_clk_register(struct device *dev, struct regmap *regmap,
+ const struct ti_syscon_gate_clk_data *data)
+{
+ struct ti_syscon_gate_clk_priv *priv;
+ struct clk_init_data init;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = data->name;
+ init.ops = &ti_syscon_gate_clk_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ init.flags = 0;
+
+ priv->regmap = regmap;
+ priv->reg = data->offset;
+ priv->idx = BIT(data->bit_idx);
+ priv->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &priv->hw;
+}
+
+static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
+{
+ const struct ti_syscon_gate_clk_data *data, *p;
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int num_clks, i;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(regmap)) {
+ if (PTR_ERR(regmap) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "failed to find parent regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ num_clks = 0;
+ for (p = data; p->name; p++)
+ num_clks++;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ hw_data->num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ hw_data->hws[i] = ti_syscon_gate_clk_register(dev, regmap,
+ &data[i]);
+ if (IS_ERR(hw_data->hws[i]))
+ dev_warn(dev, "failed to register %s\n",
+ data[i].name);
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ hw_data);
+}
+
+#define TI_SYSCON_CLK_GATE(_name, _offset, _bit_idx) \
+ { \
+ .name = _name, \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ }
+
+static const struct ti_syscon_gate_clk_data am654_clk_data[] = {
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk0", 0x0, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk1", 0x4, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk2", 0x8, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk3", 0xc, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk4", 0x10, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk5", 0x14, 0),
+ { /* Sentinel */ },
+};
+
+static const struct of_device_id ti_syscon_gate_clk_ids[] = {
+ {
+ .compatible = "ti,am654-ehrpwm-tbclk",
+ .data = &am654_clk_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_syscon_gate_clk_ids);
+
+static struct platform_driver ti_syscon_gate_clk_driver = {
+ .probe = ti_syscon_gate_clk_probe,
+ .driver = {
+ .name = "ti-syscon-gate-clk",
+ .of_match_table = ti_syscon_gate_clk_ids,
+ },
+};
+module_platform_driver(ti_syscon_gate_clk_driver);
+
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_DESCRIPTION("Syscon backed gate-clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index d2760a021301..fad616cac01e 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -3862,6 +3862,111 @@ static struct clk_regmap g12a_ts = {
},
};
+/* SPICC SCLK source clock */
+
+static const struct clk_parent_data spicc_sclk_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_clk81.hw },
+ { .hw = &g12a_fclk_div4.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div5.hw },
+ { .hw = &g12a_fclk_div7.hw },
+};
+
+static struct clk_regmap g12a_spicc0_sclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .mask = 7,
+ .shift = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc0_sclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = spicc_sclk_parent_data,
+ .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ },
+};
+
+static struct clk_regmap g12a_spicc0_sclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .shift = 0,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc0_sclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc0_sclk_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_spicc0_sclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .bit_idx = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc0_sclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc0_sclk_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_spicc1_sclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .mask = 7,
+ .shift = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc1_sclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = spicc_sclk_parent_data,
+ .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ },
+};
+
+static struct clk_regmap g12a_spicc1_sclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .shift = 16,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc1_sclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc1_sclk_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_spicc1_sclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc1_sclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc1_sclk_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
#define MESON_GATE(_name, _reg, _bit) \
MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
@@ -4159,6 +4264,12 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw,
[CLKID_TS_DIV] = &g12a_ts_div.hw,
[CLKID_TS] = &g12a_ts.hw,
+ [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw,
+ [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw,
+ [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw,
+ [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
+ [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
+ [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4408,6 +4519,12 @@ static struct clk_hw_onecell_data g12b_hw_onecell_data = {
[CLKID_CPUB_CLK_AXI] = &g12b_cpub_clk_axi.hw,
[CLKID_CPUB_CLK_TRACE_SEL] = &g12b_cpub_clk_trace_sel.hw,
[CLKID_CPUB_CLK_TRACE] = &g12b_cpub_clk_trace.hw,
+ [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw,
+ [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw,
+ [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw,
+ [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
+ [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
+ [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4642,6 +4759,12 @@ static struct clk_hw_onecell_data sm1_hw_onecell_data = {
[CLKID_CPU1_CLK] = &sm1_cpu1_clk.hw,
[CLKID_CPU2_CLK] = &sm1_cpu2_clk.hw,
[CLKID_CPU3_CLK] = &sm1_cpu3_clk.hw,
+ [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw,
+ [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw,
+ [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw,
+ [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
+ [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
+ [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4877,6 +5000,12 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&sm1_cpu1_clk,
&sm1_cpu2_clk,
&sm1_cpu3_clk,
+ &g12a_spicc0_sclk_sel,
+ &g12a_spicc0_sclk_div,
+ &g12a_spicc0_sclk,
+ &g12a_spicc1_sclk_sel,
+ &g12a_spicc1_sclk_div,
+ &g12a_spicc1_sclk,
};
static const struct reg_sequence g12a_init_regs[] = {
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 9df4068aced1..a8852556836e 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -255,8 +255,12 @@
#define CLKID_DSU_CLK_DYN1 249
#define CLKID_DSU_CLK_DYN 250
#define CLKID_DSU_CLK_FINAL 251
+#define CLKID_SPICC0_SCLK_SEL 256
+#define CLKID_SPICC0_SCLK_DIV 257
+#define CLKID_SPICC1_SCLK_SEL 259
+#define CLKID_SPICC1_SCLK_DIV 260
-#define NR_CLKS 256
+#define NR_CLKS 262
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/g12a-clkc.h>
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 1f9c056e684c..5fd6a574f8c3 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -2613,19 +2613,12 @@ static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23);
static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24);
static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25);
static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26);
+static MESON_GATE(gxl_acodec, HHI_GCLK_MPEG0, 28);
static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30);
static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2);
static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3);
static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6);
-static MESON_GATE(gxbb_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_GATE(gxbb_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_GATE(gxbb_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_GATE(gxbb_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_GATE(gxbb_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_GATE(gxbb_adc, HHI_GCLK_MPEG1, 13);
static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14);
static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15);
static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16);
@@ -2680,6 +2673,16 @@ static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2);
static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3);
static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
+/* AIU gates */
+static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu.hw);
+static MESON_PCLK(gxbb_iec958, HHI_GCLK_MPEG1, 7, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_i2s_out, HHI_GCLK_MPEG1, 8, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_amclk, HHI_GCLK_MPEG1, 9, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_aififo2, HHI_GCLK_MPEG1, 10, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_mixer, HHI_GCLK_MPEG1, 11, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_adc, HHI_GCLK_MPEG1, 13, &gxbb_aiu_glue.hw);
+
/* Array of all clocks provided by this provider */
static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
@@ -3100,6 +3103,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_HDMI_SEL] = &gxbb_hdmi_sel.hw,
[CLKID_HDMI_DIV] = &gxbb_hdmi_div.hw,
[CLKID_HDMI] = &gxbb_hdmi.hw,
+ [CLKID_ACODEC] = &gxl_acodec.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -3491,6 +3495,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = {
&gxl_hdmi_pll_od,
&gxl_hdmi_pll_od2,
&gxl_hdmi_pll_dco,
+ &gxl_acodec,
};
static const struct meson_eeclkc_data gxbb_clkc_data = {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index b53584fe66cf..1ee8cb7e2f5a 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -188,7 +188,7 @@
#define CLKID_HDMI_SEL 203
#define CLKID_HDMI_DIV 204
-#define NR_CLKS 206
+#define NR_CLKS 207
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 9fd31f23b2a9..34a70c4b4899 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -2605,14 +2605,6 @@ static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30);
static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2);
static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3);
static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6);
-static MESON_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14);
static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15);
static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16);
@@ -2659,6 +2651,19 @@ static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25);
static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26);
static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31);
+/* AIU gates */
+#define MESON_AIU_GLUE_GATE(_name, _reg, _bit) \
+ MESON_PCLK(_name, _reg, _bit, &meson8b_aiu_glue.hw)
+
+static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6, &meson8b_aiu.hw);
+static MESON_AIU_GLUE_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
+static MESON_AIU_GLUE_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
+static MESON_AIU_GLUE_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
+static MESON_AIU_GLUE_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
+static MESON_AIU_GLUE_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
+static MESON_AIU_GLUE_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
+static MESON_AIU_GLUE_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
+
/* Always On (AO) domain gates */
static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0);
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index acc141adf087..14dc8a8a9d08 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -8,7 +8,7 @@ obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
-obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o
+obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o
obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index d2cd36c54474..7a351ec65564 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -441,7 +441,7 @@ const struct clk_ops mmp_clk_mix_ops = {
struct clk *mmp_clk_register_mix(struct device *dev,
const char *name,
- const char **parent_names,
+ const char * const *parent_names,
u8 num_parents,
unsigned long flags,
struct mmp_clk_mix_config *config,
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 6e71591e63a0..52dc8b43acd9 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -48,17 +49,39 @@
#define APMU_SDH1 0x58
#define APMU_SDH2 0xe8
#define APMU_SDH3 0xec
+#define APMU_SDH4 0x15c
#define APMU_USB 0x5c
#define APMU_DISP0 0x4c
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
+#define APBC_THERMAL0 0x90
+#define APBC_THERMAL1 0x98
+#define APBC_THERMAL2 0x9c
+#define APBC_THERMAL3 0xa0
#define APMU_USBHSIC0 0xf8
#define APMU_USBHSIC1 0xfc
-#define MPMU_UART_PLL 0x14
+#define APMU_GPU 0xcc
+
+#define MPMU_FCCR 0x8
+#define MPMU_POSR 0x10
+#define MPMU_UART_PLL 0x14
+#define MPMU_PLL2_CR 0x34
+/* MMP3 specific below */
+#define MPMU_PLL3_CR 0x50
+#define MPMU_PLL3_CTRL1 0x58
+#define MPMU_PLL1_CTRL 0x5c
+#define MPMU_PLL_DIFF_CTRL 0x68
+#define MPMU_PLL2_CTRL1 0x414
+
+enum mmp2_clk_model {
+ CLK_MODEL_MMP2,
+ CLK_MODEL_MMP3,
+};
struct mmp2_clk_unit {
struct mmp_clk_unit unit;
+ enum mmp2_clk_model model;
void __iomem *mpmu_base;
void __iomem *apmu_base;
void __iomem *apbc_base;
@@ -67,11 +90,22 @@ struct mmp2_clk_unit {
static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
{MMP2_CLK_CLK32, "clk32", NULL, 0, 32768},
{MMP2_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000},
- {MMP2_CLK_PLL1, "pll1", NULL, 0, 800000000},
- {MMP2_CLK_PLL2, "pll2", NULL, 0, 960000000},
{MMP2_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000},
};
+static struct mmp_param_pll_clk pll_clks[] = {
+ {MMP2_CLK_PLL1, "pll1", 797330000, MPMU_FCCR, 0x4000, MPMU_POSR, 0},
+ {MMP2_CLK_PLL2, "pll2", 0, MPMU_PLL2_CR, 0x0300, MPMU_PLL2_CR, 10},
+};
+
+static struct mmp_param_pll_clk mmp3_pll_clks[] = {
+ {MMP2_CLK_PLL2, "pll1", 797330000, MPMU_FCCR, 0x4000, MPMU_POSR, 0, 26000000, MPMU_PLL1_CTRL, 25},
+ {MMP2_CLK_PLL2, "pll2", 0, MPMU_PLL2_CR, 0x0300, MPMU_PLL2_CR, 10, 26000000, MPMU_PLL2_CTRL1, 25},
+ {MMP3_CLK_PLL1_P, "pll1_p", 0, MPMU_PLL_DIFF_CTRL, 0x0010, 0, 0, 797330000, MPMU_PLL_DIFF_CTRL, 0},
+ {MMP3_CLK_PLL2_P, "pll2_p", 0, MPMU_PLL_DIFF_CTRL, 0x0100, MPMU_PLL2_CR, 10, 26000000, MPMU_PLL_DIFF_CTRL, 5},
+ {MMP3_CLK_PLL3, "pll3", 0, MPMU_PLL3_CR, 0x0300, MPMU_PLL3_CR, 10, 26000000, MPMU_PLL3_CTRL1, 25},
+};
+
static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
{MMP2_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
{MMP2_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
@@ -113,6 +147,16 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
ARRAY_SIZE(fixed_rate_clks));
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ mmp_register_pll_clks(unit, mmp3_pll_clks,
+ pxa_unit->mpmu_base,
+ ARRAY_SIZE(mmp3_pll_clks));
+ } else {
+ mmp_register_pll_clks(unit, pll_clks,
+ pxa_unit->mpmu_base,
+ ARRAY_SIZE(pll_clks));
+ }
+
mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
ARRAY_SIZE(fixed_factor_clks));
@@ -127,16 +171,16 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
static DEFINE_SPINLOCK(uart0_lock);
static DEFINE_SPINLOCK(uart1_lock);
static DEFINE_SPINLOCK(uart2_lock);
-static const char *uart_parent_names[] = {"uart_pll", "vctcxo"};
+static const char * const uart_parent_names[] = {"uart_pll", "vctcxo"};
static DEFINE_SPINLOCK(ssp0_lock);
static DEFINE_SPINLOCK(ssp1_lock);
static DEFINE_SPINLOCK(ssp2_lock);
static DEFINE_SPINLOCK(ssp3_lock);
-static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
+static const char * const ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
+static const char * const timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);
@@ -176,6 +220,13 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = {
{MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock},
{MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock},
{MMP2_CLK_TIMER, "timer_clk", "timer_mux", CLK_SET_RATE_PARENT, APBC_TIMER, 0x7, 0x3, 0x0, 0, &timer_lock},
+ {MMP2_CLK_THERMAL0, "thermal0_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL0, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+};
+
+static struct mmp_param_gate_clk mmp3_apbc_gate_clks[] = {
+ {MMP3_CLK_THERMAL1, "thermal1_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL1, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+ {MMP3_CLK_THERMAL2, "thermal2_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL2, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+ {MMP3_CLK_THERMAL3, "thermal3_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL3, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
};
static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
@@ -187,10 +238,15 @@ static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
ARRAY_SIZE(apbc_gate_clks));
+
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ mmp_register_gate_clks(unit, mmp3_apbc_gate_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(mmp3_apbc_gate_clks));
+ }
}
static DEFINE_SPINLOCK(sdh_lock);
-static const char *sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
+static const char * const sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
static struct mmp_clk_mix_config sdh_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 10, 2, 8, 32),
};
@@ -201,11 +257,20 @@ static DEFINE_SPINLOCK(usbhsic1_lock);
static DEFINE_SPINLOCK(disp0_lock);
static DEFINE_SPINLOCK(disp1_lock);
-static const char *disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
+static const char * const disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
static DEFINE_SPINLOCK(ccic0_lock);
static DEFINE_SPINLOCK(ccic1_lock);
-static const char *ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
+static const char * const ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
+
+static DEFINE_SPINLOCK(gpu_lock);
+static const char * const mmp2_gpu_gc_parent_names[] = {"pll1_2", "pll1_3", "pll2_2", "pll2_3", "pll2", "usb_pll"};
+static u32 mmp2_gpu_gc_parent_table[] = { 0x0000, 0x0040, 0x0080, 0x00c0, 0x1000, 0x1040 };
+static const char * const mmp2_gpu_bus_parent_names[] = {"pll1_4", "pll2", "pll2_2", "usb_pll"};
+static u32 mmp2_gpu_bus_parent_table[] = { 0x0000, 0x0020, 0x0030, 0x4020 };
+static const char * const mmp3_gpu_bus_parent_names[] = {"pll1_4", "pll1_6", "pll1_2", "pll2_2"};
+static const char * const mmp3_gpu_gc_parent_names[] = {"pll1", "pll2", "pll1_p", "pll2_p"};
+
static struct mmp_clk_mix_config ccic0_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
};
@@ -218,6 +283,15 @@ static struct mmp_param_mux_clk apmu_mux_clks[] = {
{MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
};
+static struct mmp_param_mux_clk mmp3_apmu_mux_clks[] = {
+ {0, "gpu_bus_mux", mmp3_gpu_bus_parent_names, ARRAY_SIZE(mmp3_gpu_bus_parent_names),
+ CLK_SET_RATE_PARENT, APMU_GPU, 4, 2, 0, &gpu_lock},
+ {0, "gpu_3d_mux", mmp3_gpu_gc_parent_names, ARRAY_SIZE(mmp3_gpu_gc_parent_names),
+ CLK_SET_RATE_PARENT, APMU_GPU, 6, 2, 0, &gpu_lock},
+ {0, "gpu_2d_mux", mmp3_gpu_gc_parent_names, ARRAY_SIZE(mmp3_gpu_gc_parent_names),
+ CLK_SET_RATE_PARENT, APMU_GPU, 12, 2, 0, &gpu_lock},
+};
+
static struct mmp_param_div_clk apmu_div_clks[] = {
{0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock},
{0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock},
@@ -226,6 +300,11 @@ static struct mmp_param_div_clk apmu_div_clks[] = {
{0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock},
};
+static struct mmp_param_div_clk mmp3_apmu_div_clks[] = {
+ {0, "gpu_3d_div", "gpu_3d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 24, 4, 0, &gpu_lock},
+ {0, "gpu_2d_div", "gpu_2d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 28, 4, 0, &gpu_lock},
+};
+
static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
{MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock},
@@ -235,8 +314,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
- {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock},
{MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
@@ -246,6 +325,17 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
+ {MMP2_CLK_GPU_BUS, "gpu_bus_clk", "gpu_bus_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0xa, 0xa, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+};
+
+static struct mmp_param_gate_clk mmp2_apmu_gate_clks[] = {
+ {MMP2_CLK_GPU_3D, "gpu_3d_clk", "gpu_3d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0x5, 0x5, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+};
+
+static struct mmp_param_gate_clk mmp3_apmu_gate_clks[] = {
+ {MMP3_CLK_SDH4, "sdh4_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH4, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP3_CLK_GPU_3D, "gpu_3d_clk", "gpu_3d_div", CLK_SET_RATE_PARENT, APMU_GPU, 0x5, 0x5, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+ {MMP3_CLK_GPU_2D, "gpu_2d_clk", "gpu_2d_div", CLK_SET_RATE_PARENT, APMU_GPU, 0x1c0000, 0x1c0000, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
};
static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
@@ -281,6 +371,34 @@ static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
ARRAY_SIZE(apmu_gate_clks));
+
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ mmp_register_mux_clks(unit, mmp3_apmu_mux_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp3_apmu_mux_clks));
+
+ mmp_register_div_clks(unit, mmp3_apmu_div_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp3_apmu_div_clks));
+
+ mmp_register_gate_clks(unit, mmp3_apmu_gate_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp3_apmu_gate_clks));
+ } else {
+ clk_register_mux_table(NULL, "gpu_3d_mux", mmp2_gpu_gc_parent_names,
+ ARRAY_SIZE(mmp2_gpu_gc_parent_names),
+ CLK_SET_RATE_PARENT,
+ pxa_unit->apmu_base + APMU_GPU,
+ 0, 0x10c0, 0,
+ mmp2_gpu_gc_parent_table, &gpu_lock);
+
+ clk_register_mux_table(NULL, "gpu_bus_mux", mmp2_gpu_bus_parent_names,
+ ARRAY_SIZE(mmp2_gpu_bus_parent_names),
+ CLK_SET_RATE_PARENT,
+ pxa_unit->apmu_base + APMU_GPU,
+ 0, 0x4030, 0,
+ mmp2_gpu_bus_parent_table, &gpu_lock);
+
+ mmp_register_gate_clks(unit, mmp2_apmu_gate_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp2_apmu_gate_clks));
+ }
}
static void mmp2_clk_reset_init(struct device_node *np,
@@ -313,6 +431,11 @@ static void __init mmp2_clk_init(struct device_node *np)
if (!pxa_unit)
return;
+ if (of_device_is_compatible(np, "marvell,mmp3-clock"))
+ pxa_unit->model = CLK_MODEL_MMP3;
+ else
+ pxa_unit->model = CLK_MODEL_MMP2;
+
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
@@ -352,3 +475,4 @@ free_memory:
}
CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
+CLK_OF_DECLARE(mmp3_clk, "marvell,mmp3-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-pll.c b/drivers/clk/mmp/clk-pll.c
new file mode 100644
index 000000000000..962014cfdc44
--- /dev/null
+++ b/drivers/clk/mmp/clk-pll.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MMP PLL clock rate calculation
+ *
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include "clk.h"
+
+#define to_clk_mmp_pll(hw) container_of(hw, struct mmp_clk_pll, hw)
+
+struct mmp_clk_pll {
+ struct clk_hw hw;
+ unsigned long default_rate;
+ void __iomem *enable_reg;
+ u32 enable;
+ void __iomem *reg;
+ u8 shift;
+
+ unsigned long input_rate;
+ void __iomem *postdiv_reg;
+ u8 postdiv_shift;
+};
+
+static int mmp_clk_pll_is_enabled(struct clk_hw *hw)
+{
+ struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->enable_reg);
+ if ((val & pll->enable) == pll->enable)
+ return 1;
+
+ /* Some PLLs, if not software controlled, output default clock. */
+ if (pll->default_rate > 0)
+ return 1;
+
+ return 0;
+}
+
+static unsigned long mmp_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
+ u32 fbdiv, refdiv, postdiv;
+ u64 rate;
+ u32 val;
+
+ val = readl_relaxed(pll->enable_reg);
+ if ((val & pll->enable) != pll->enable)
+ return pll->default_rate;
+
+ if (pll->reg) {
+ val = readl_relaxed(pll->reg);
+ fbdiv = (val >> pll->shift) & 0x1ff;
+ refdiv = (val >> (pll->shift + 9)) & 0x1f;
+ } else {
+ fbdiv = 2;
+ refdiv = 1;
+ }
+
+ if (pll->postdiv_reg) {
+ /* MMP3 clock rate calculation */
+ static const u8 postdivs[] = {2, 3, 4, 5, 6, 8, 10, 12, 16};
+
+ val = readl_relaxed(pll->postdiv_reg);
+ postdiv = (val >> pll->postdiv_shift) & 0x7;
+
+ rate = pll->input_rate;
+ rate *= 2 * fbdiv;
+ do_div(rate, refdiv);
+ do_div(rate, postdivs[postdiv]);
+ } else {
+ /* MMP2 clock rate calculation */
+ if (refdiv == 3) {
+ rate = 19200000;
+ } else if (refdiv == 4) {
+ rate = 26000000;
+ } else {
+ pr_err("bad refdiv: %d (0x%08x)\n", refdiv, val);
+ return 0;
+ }
+
+ rate *= fbdiv + 2;
+ do_div(rate, refdiv + 2);
+ }
+
+ return (unsigned long)rate;
+}
+
+static const struct clk_ops mmp_clk_pll_ops = {
+ .is_enabled = mmp_clk_pll_is_enabled,
+ .recalc_rate = mmp_clk_pll_recalc_rate,
+};
+
+static struct clk *mmp_clk_register_pll(char *name,
+ unsigned long default_rate,
+ void __iomem *enable_reg, u32 enable,
+ void __iomem *reg, u8 shift,
+ unsigned long input_rate,
+ void __iomem *postdiv_reg, u8 postdiv_shift)
+{
+ struct mmp_clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &mmp_clk_pll_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+
+ pll->default_rate = default_rate;
+ pll->enable_reg = enable_reg;
+ pll->enable = enable;
+ pll->reg = reg;
+ pll->shift = shift;
+
+ pll->input_rate = input_rate;
+ pll->postdiv_reg = postdiv_reg;
+ pll->postdiv_shift = postdiv_shift;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+void mmp_register_pll_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_pll_clk *clks,
+ void __iomem *base, int size)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ void __iomem *reg = NULL;
+
+ if (clks[i].offset)
+ reg = base + clks[i].offset;
+
+ clk = mmp_clk_register_pll(clks[i].name,
+ clks[i].default_rate,
+ base + clks[i].enable_offset,
+ clks[i].enable,
+ reg, clks[i].shift,
+ clks[i].input_rate,
+ base + clks[i].postdiv_offset,
+ clks[i].postdiv_shift);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index 70bb73257647..20dc1e5dd756 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -97,7 +97,7 @@ struct mmp_clk_mix {
extern const struct clk_ops mmp_clk_mix_ops;
extern struct clk *mmp_clk_register_mix(struct device *dev,
const char *name,
- const char **parent_names,
+ const char * const *parent_names,
u8 num_parents,
unsigned long flags,
struct mmp_clk_mix_config *config,
@@ -124,9 +124,6 @@ extern struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
u32 val_disable, unsigned int gate_flags,
spinlock_t *lock);
-
-extern struct clk *mmp_clk_register_pll2(const char *name,
- const char *parent_name, unsigned long flags);
extern struct clk *mmp_clk_register_apbc(const char *name,
const char *parent_name, void __iomem *base,
unsigned int delay, unsigned int apbc_flags, spinlock_t *lock);
@@ -196,7 +193,7 @@ void mmp_register_gate_clks(struct mmp_clk_unit *unit,
struct mmp_param_mux_clk {
unsigned int id;
char *name;
- const char **parent_name;
+ const char * const *parent_name;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -224,6 +221,23 @@ void mmp_register_div_clks(struct mmp_clk_unit *unit,
struct mmp_param_div_clk *clks,
void __iomem *base, int size);
+struct mmp_param_pll_clk {
+ unsigned int id;
+ char *name;
+ unsigned long default_rate;
+ unsigned long enable_offset;
+ u32 enable;
+ unsigned long offset;
+ u8 shift;
+ /* MMP3 specific: */
+ unsigned long input_rate;
+ unsigned long postdiv_offset;
+ unsigned long postdiv_shift;
+};
+void mmp_register_pll_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_pll_clk *clks,
+ void __iomem *base, int size);
+
#define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc) \
{ \
.width_div = (w_d), \
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 15cdcdc9b3b8..abb121f8de52 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -280,6 +280,15 @@ config SC_GPUCC_7180
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SC_MSS_7180
+ tristate "SC7180 Modem Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the Modem Subsystem clock controller on Qualcomm
+ Technologies, Inc on SC7180 devices.
+ Say Y if you want to use the Modem branch clocks of the Modem
+ subsystem clock controller to reset the MSS subsystem.
+
config SC_VIDEOCC_7180
tristate "SC7180 Video Clock Controller"
select SC_GCC_7180
@@ -366,6 +375,14 @@ config SM_GCC_8150
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8250
+ tristate "SM8250 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8250 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on SPMI || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 656a87e629d4..691efbf7e81f 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
+obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 7c2936da9b14..9b2dfa08acb2 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -52,6 +52,7 @@
#define PLL_CONFIG_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1])
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
+#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
@@ -116,6 +117,22 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x40,
[PLL_OFF_CAL_VAL] = 0x44,
},
+ [CLK_ALPHA_PLL_TYPE_LUCID] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_CAL_L_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_USER_CTL_U1] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x20,
+ [PLL_OFF_TEST_CTL] = 0x24,
+ [PLL_OFF_TEST_CTL_U] = 0x28,
+ [PLL_OFF_TEST_CTL_U1] = 0x2c,
+ [PLL_OFF_STATUS] = 0x30,
+ [PLL_OFF_OPMODE] = 0x38,
+ [PLL_OFF_ALPHA_VAL] = 0x40,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -134,15 +151,14 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define PLL_HUAYRA_N_MASK 0xff
#define PLL_HUAYRA_ALPHA_WIDTH 16
-#define FABIA_OPMODE_STANDBY 0x0
-#define FABIA_OPMODE_RUN 0x1
-
-#define FABIA_PLL_OUT_MASK 0x7
-#define FABIA_PLL_RATE_MARGIN 500
+#define PLL_STANDBY 0x0
+#define PLL_RUN 0x1
+#define PLL_OUT_MASK 0x7
+#define PLL_RATE_MARGIN 500
-#define TRION_PLL_STANDBY 0x0
-#define TRION_PLL_RUN 0x1
-#define TRION_PLL_OUT_MASK 0x7
+/* LUCID PLL specific settings and offsets */
+#define LUCID_PLL_CAL_VAL 0x44
+#define LUCID_PCAL_DONE BIT(26)
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
@@ -544,7 +560,8 @@ static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
vco = alpha_pll_find_vco(pll, rate);
if (pll->vco_table && !vco) {
- pr_err("alpha pll not in a valid vco range\n");
+ pr_err("%s: alpha pll not in a valid vco range\n",
+ clk_hw_get_name(hw));
return -EINVAL;
}
@@ -722,7 +739,7 @@ static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
*/
if (clk_alpha_pll_is_enabled(hw)) {
if (cur_alpha != a) {
- pr_err("clock needs to be gated %s\n",
+ pr_err("%s: clock needs to be gated\n",
clk_hw_get_name(hw));
return -EBUSY;
}
@@ -765,7 +782,7 @@ static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
if (ret)
return 0;
- return ((opmode_regval & TRION_PLL_RUN) && (mode_regval & PLL_OUTCTRL));
+ return ((opmode_regval & PLL_RUN) && (mode_regval & PLL_OUTCTRL));
}
static int clk_trion_pll_is_enabled(struct clk_hw *hw)
@@ -795,7 +812,7 @@ static int clk_trion_pll_enable(struct clk_hw *hw)
}
/* Set operation mode to RUN */
- regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_RUN);
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
ret = wait_for_pll_enable_lock(pll);
if (ret)
@@ -803,7 +820,7 @@ static int clk_trion_pll_enable(struct clk_hw *hw)
/* Enable the PLL outputs */
ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
- TRION_PLL_OUT_MASK, TRION_PLL_OUT_MASK);
+ PLL_OUT_MASK, PLL_OUT_MASK);
if (ret)
return ret;
@@ -836,12 +853,12 @@ static void clk_trion_pll_disable(struct clk_hw *hw)
/* Disable the PLL outputs */
ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
- TRION_PLL_OUT_MASK, 0);
+ PLL_OUT_MASK, 0);
if (ret)
return;
/* Place the PLL mode in STANDBY */
- regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_STANDBY);
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
}
@@ -849,33 +866,12 @@ static unsigned long
clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- struct regmap *regmap = pll->clkr.regmap;
- u32 l, frac;
- u64 prate = parent_rate;
-
- regmap_read(regmap, PLL_L_VAL(pll), &l);
- regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
-
- return alpha_pll_calc_rate(prate, l, frac, ALPHA_REG_16BIT_WIDTH);
-}
-
-static long clk_trion_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- unsigned long min_freq, max_freq;
- u32 l;
- u64 a;
-
- rate = alpha_pll_round_rate(rate, *prate,
- &l, &a, ALPHA_REG_16BIT_WIDTH);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ u32 l, frac, alpha_width = pll_alpha_width(pll);
- min_freq = pll->vco_table[0].min_freq;
- max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
- return clamp(rate, min_freq, max_freq);
+ return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
const struct clk_ops clk_alpha_pll_fixed_ops = {
@@ -921,7 +917,7 @@ const struct clk_ops clk_trion_fixed_pll_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_trion_pll_round_rate,
+ .round_rate = clk_alpha_pll_round_rate,
};
EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops);
@@ -1088,14 +1084,14 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
return ret;
/* Skip If PLL is already running */
- if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL))
+ if ((opmode_val & PLL_RUN) && (val & PLL_OUTCTRL))
return 0;
ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
if (ret)
return ret;
- ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ ret = regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
if (ret)
return ret;
@@ -1104,7 +1100,7 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
if (ret)
return ret;
- ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN);
+ ret = regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
if (ret)
return ret;
@@ -1113,7 +1109,7 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
return ret;
ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
- FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ PLL_OUT_MASK, PLL_OUT_MASK);
if (ret)
return ret;
@@ -1143,13 +1139,12 @@ static void alpha_pll_fabia_disable(struct clk_hw *hw)
return;
/* Disable main outputs */
- ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK,
- 0);
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, 0);
if (ret)
return;
/* Place the PLL in STANDBY */
- regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
}
static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
@@ -1170,7 +1165,7 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
- unsigned long rrate;
+ unsigned long rrate, max = rate + PLL_RATE_MARGIN;
rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
@@ -1178,8 +1173,9 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
* Due to limited number of bits for fractional rate programming, the
* rounded up rate could be marginally higher than the requested rate.
*/
- if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
- pr_err("Call set rate on the PLL with rounded rates!\n");
+ if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("%s: Rounded rate %lu not within range [%lu, %lu)\n",
+ clk_hw_get_name(hw), rrate, rate, max);
return -EINVAL;
}
@@ -1196,6 +1192,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
struct clk_hw *parent_hw;
unsigned long cal_freq, rrate;
u32 cal_l, val, alpha_width = pll_alpha_width(pll);
+ const char *name = clk_hw_get_name(hw);
u64 a;
int ret;
@@ -1210,7 +1207,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
if (!vco) {
- pr_err("alpha pll: not in a valid vco range\n");
+ pr_err("%s: alpha pll not in a valid vco range\n", name);
return -EINVAL;
}
@@ -1227,7 +1224,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
* Due to a limited number of bits for fractional rate programming, the
* rounded up rate could be marginally higher than the requested rate.
*/
- if (rrate > (cal_freq + FABIA_PLL_RATE_MARGIN) || rrate < cal_freq)
+ if (rrate > (cal_freq + PLL_RATE_MARGIN) || rrate < cal_freq)
return -EINVAL;
/* Setup PLL for calibration frequency */
@@ -1236,7 +1233,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
/* Bringup the PLL at calibration frequency */
ret = clk_alpha_pll_enable(hw);
if (ret) {
- pr_err("alpha pll calibration failed\n");
+ pr_err("%s: alpha pll calibration failed\n", name);
return ret;
}
@@ -1394,3 +1391,175 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
+
+/**
+ * clk_lucid_pll_configure - configure the lucid pll
+ *
+ * @pll: clk alpha pll
+ * @regmap: register map
+ * @config: configuration to apply for pll
+ */
+void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ if (config->l)
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+ regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL);
+
+ if (config->alpha)
+ regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+
+ if (config->config_ctl_hi_val)
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (config->config_ctl_hi1_val)
+ regmap_write(regmap, PLL_CONFIG_CTL_U1(pll),
+ config->config_ctl_hi1_val);
+
+ if (config->user_ctl_val)
+ regmap_write(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+
+ if (config->user_ctl_hi_val)
+ regmap_write(regmap, PLL_USER_CTL_U(pll),
+ config->user_ctl_hi_val);
+
+ if (config->user_ctl_hi1_val)
+ regmap_write(regmap, PLL_USER_CTL_U1(pll),
+ config->user_ctl_hi1_val);
+
+ if (config->test_ctl_val)
+ regmap_write(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+
+ if (config->test_ctl_hi_val)
+ regmap_write(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+
+ if (config->test_ctl_hi1_val)
+ regmap_write(regmap, PLL_TEST_CTL_U1(pll),
+ config->test_ctl_hi1_val);
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
+ PLL_UPDATE_BYPASS);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+
+ /* Set operation mode to OFF */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Place the PLL in STANDBY mode */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_lucid_pll_configure);
+
+/*
+ * The Lucid PLL requires a power-on self-calibration which happens when the
+ * PLL comes out of reset. Calibrate in case it is not completed.
+ */
+static int alpha_pll_lucid_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 regval;
+ int ret;
+
+ /* Return early if calibration is not needed. */
+ regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &regval);
+ if (regval & LUCID_PCAL_DONE)
+ return 0;
+
+ /* On/off to calibrate */
+ ret = clk_trion_pll_enable(hw);
+ if (!ret)
+ clk_trion_pll_disable(hw);
+
+ return ret;
+}
+
+static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ unsigned long rrate;
+ u32 regval, l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ int ret;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+
+ /*
+ * Due to a limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Call set rate on the PLL with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ /* Latch the PLL input */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_UPDATE, PLL_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for 2 reference cycles before checking the ACK bit. */
+ udelay(1);
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &regval);
+ if (!(regval & ALPHA_PLL_ACK_LATCH)) {
+ pr_err("Lucid PLL latch failed. Output may be unstable!\n");
+ return -EINVAL;
+ }
+
+ /* Return the latch input to 0 */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_UPDATE, 0);
+ if (ret)
+ return ret;
+
+ if (clk_hw_is_enabled(hw)) {
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for PLL output to stabilize */
+ udelay(100);
+ return 0;
+}
+
+const struct clk_ops clk_alpha_pll_lucid_ops = {
+ .prepare = alpha_pll_lucid_prepare,
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_lucid_ops = {
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index fbc1f67c7a26..704674a153b6 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -14,6 +14,7 @@ enum {
CLK_ALPHA_PLL_TYPE_BRAMMO,
CLK_ALPHA_PLL_TYPE_FABIA,
CLK_ALPHA_PLL_TYPE_TRION,
+ CLK_ALPHA_PLL_TYPE_LUCID,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -30,6 +31,7 @@ enum {
PLL_OFF_CONFIG_CTL_U1,
PLL_OFF_TEST_CTL,
PLL_OFF_TEST_CTL_U,
+ PLL_OFF_TEST_CTL_U1,
PLL_OFF_STATUS,
PLL_OFF_OPMODE,
PLL_OFF_FRAC,
@@ -94,10 +96,13 @@ struct alpha_pll_config {
u32 alpha_hi;
u32 config_ctl_val;
u32 config_ctl_hi_val;
+ u32 config_ctl_hi1_val;
u32 user_ctl_val;
u32 user_ctl_hi_val;
+ u32 user_ctl_hi1_val;
u32 test_ctl_val;
u32 test_ctl_hi_val;
+ u32 test_ctl_hi1_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
@@ -123,10 +128,17 @@ extern const struct clk_ops clk_alpha_pll_fabia_ops;
extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_fabia_ops;
+extern const struct clk_ops clk_alpha_pll_lucid_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_lucid_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+
extern const struct clk_ops clk_trion_fixed_pll_ops;
extern const struct clk_ops clk_trion_pll_postdiv_ops;
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index 9e3110a71f12..f71d228fd6bd 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -543,10 +543,45 @@ static const struct rpm_clk_desc rpm_clk_apq8064 = {
.num_clks = ARRAY_SIZE(apq8064_clks),
};
+/* ipq806x */
+DEFINE_CLK_RPM(ipq806x, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(ipq806x, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(ipq806x, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(ipq806x, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM(ipq806x, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(ipq806x, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(ipq806x, nss_fabric_0_clk, nss_fabric_0_a_clk, QCOM_RPM_NSS_FABRIC_0_CLK);
+DEFINE_CLK_RPM(ipq806x, nss_fabric_1_clk, nss_fabric_1_a_clk, QCOM_RPM_NSS_FABRIC_1_CLK);
+
+static struct clk_rpm *ipq806x_clks[] = {
+ [RPM_APPS_FABRIC_CLK] = &ipq806x_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &ipq806x_afab_a_clk,
+ [RPM_CFPB_CLK] = &ipq806x_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &ipq806x_cfpb_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &ipq806x_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &ipq806x_daytona_a_clk,
+ [RPM_EBI1_CLK] = &ipq806x_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &ipq806x_ebi1_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &ipq806x_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &ipq806x_sfab_a_clk,
+ [RPM_SFPB_CLK] = &ipq806x_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &ipq806x_sfpb_a_clk,
+ [RPM_NSS_FABRIC_0_CLK] = &ipq806x_nss_fabric_0_clk,
+ [RPM_NSS_FABRIC_0_A_CLK] = &ipq806x_nss_fabric_0_a_clk,
+ [RPM_NSS_FABRIC_1_CLK] = &ipq806x_nss_fabric_1_clk,
+ [RPM_NSS_FABRIC_1_A_CLK] = &ipq806x_nss_fabric_1_a_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_ipq806x = {
+ .clks = ipq806x_clks,
+ .num_clks = ARRAY_SIZE(ipq806x_clks),
+};
+
static const struct of_device_id rpm_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
{ .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
{ .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
+ { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 98a118c1e244..e2c669b08aff 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
!= (c->aggr_state & BIT(state));
}
+static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state,
+ struct tcs_cmd *cmd, bool wait)
+{
+ if (wait)
+ return rpmh_write(c->dev, state, cmd, 1);
+
+ return rpmh_write_async(c->dev, state, cmd, 1);
+}
+
static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
{
struct tcs_cmd cmd = { 0 };
u32 cmd_state, on_val;
enum rpmh_state state = RPMH_SLEEP_STATE;
int ret;
+ bool wait;
cmd.addr = c->res_addr;
cmd_state = c->aggr_state;
@@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
if (cmd_state & BIT(state))
cmd.data = on_val;
- ret = rpmh_write_async(c->dev, state, &cmd, 1);
+ wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE;
+ ret = clk_rpmh_send(c, state, &cmd, wait);
if (ret) {
dev_err(c->dev, "set %s state of %s failed: (%d)\n",
!state ? "sleep" :
@@ -216,7 +227,7 @@ static int clk_rpmh_prepare(struct clk_hw *hw)
mutex_unlock(&rpmh_clk_lock);
return ret;
-};
+}
static void clk_rpmh_unprepare(struct clk_hw *hw)
{
@@ -248,38 +259,33 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
{
struct tcs_cmd cmd = { 0 };
u32 cmd_state;
- int ret;
+ int ret = 0;
mutex_lock(&rpmh_clk_lock);
-
- cmd_state = 0;
if (enable) {
cmd_state = 1;
if (c->aggr_state)
cmd_state = c->aggr_state;
+ } else {
+ cmd_state = 0;
}
- if (c->last_sent_aggr_state == cmd_state) {
- mutex_unlock(&rpmh_clk_lock);
- return 0;
- }
+ if (c->last_sent_aggr_state != cmd_state) {
+ cmd.addr = c->res_addr;
+ cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
- cmd.addr = c->res_addr;
- cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
-
- ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
- if (ret) {
- dev_err(c->dev, "set active state of %s failed: (%d)\n",
- c->res_name, ret);
- mutex_unlock(&rpmh_clk_lock);
- return ret;
+ ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
+ if (ret) {
+ dev_err(c->dev, "set active state of %s failed: (%d)\n",
+ c->res_name, ret);
+ } else {
+ c->last_sent_aggr_state = cmd_state;
+ }
}
- c->last_sent_aggr_state = cmd_state;
-
mutex_unlock(&rpmh_clk_lock);
- return 0;
+ return ret;
}
static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
@@ -287,14 +293,14 @@ static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
struct clk_rpmh *c = to_clk_rpmh(hw);
return clk_rpmh_bcm_send_cmd(c, true);
-};
+}
static void clk_rpmh_bcm_unprepare(struct clk_hw *hw)
{
struct clk_rpmh *c = to_clk_rpmh(hw);
clk_rpmh_bcm_send_cmd(c, false);
-};
+}
static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
@@ -310,7 +316,7 @@ static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
clk_rpmh_bcm_send_cmd(c, true);
return 0;
-};
+}
static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
@@ -404,6 +410,28 @@ static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
.num_clks = ARRAY_SIZE(sc7180_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sm8250, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 2);
+
+static struct clk_hw *sm8250_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sm8250_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sm8250_ln_bb_clk1_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8250 = {
+ .clks = sm8250_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8250_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -490,6 +518,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
+ { .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 0bbfef9fa6de..52f63ad787ba 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -525,6 +525,55 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
.num_clks = ARRAY_SIZE(msm8974_clks),
};
+
+/* msm8976 */
+DEFINE_CLK_SMD_RPM(msm8976, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8976, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8976, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk,
+ QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8976, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8976, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM_QDSS(msm8976, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, div_clk2, div_clk2_a, 12);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8976, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8976, bb_clk2_pin, bb_clk2_a_pin, 2);
+
+static struct clk_smd_rpm *msm8976_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8976_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8976_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8976_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8976_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8976_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8976_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8976_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8976_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8976_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8976_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8976_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8976_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &msm8976_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8976_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8976_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8976_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8976_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8976_bb_clk2_a_pin,
+ [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8976_mmssnoc_ahb_clk,
+ [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8976_mmssnoc_ahb_a_clk,
+ [RPM_SMD_DIV_CLK2] = &msm8976_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &msm8976_div_clk2_a,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
+ .clks = msm8976_clks,
+ .num_clks = ARRAY_SIZE(msm8976_clks),
+};
+
/* msm8996 */
DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
@@ -720,6 +769,7 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
+ { .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 },
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
{ .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b0eee0903807..a8456e09c44d 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -1224,6 +1224,8 @@ static struct clk_rcg prng_src = {
.parent_map = gcc_pxo_pll8_map,
},
.clkr = {
+ .enable_reg = 0x2e80,
+ .enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "prng_src",
.parent_names = gcc_pxo_pll8,
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 7f59fb8da033..6a51b5b5fc19 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -2165,6 +2165,71 @@ static struct clk_branch gcc_video_xo_clk = {
},
};
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mfab_axis_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mfab_axis_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_nav_axi_clk = {
+ .halt_reg = 0x8a00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_nav_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x77004,
.pd = {
@@ -2336,6 +2401,11 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GPLL7] = &gpll7.clkr,
[GPLL4] = &gpll4.clkr,
[GPLL1] = &gpll1.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_NAV_AXI_CLK] = &gcc_mss_nav_axi_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
};
static const struct qcom_reset_map gcc_sc7180_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index 20877214acff..732bc7c937e6 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -21,6 +21,7 @@
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_BI_TCXO,
@@ -75,8 +76,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- .name = "bi_tcxo",
+ .hw = &gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_trion_pll_postdiv_ops,
@@ -3171,6 +3171,18 @@ static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
},
};
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_sec_clkref_clk = {
.halt_reg = 0x8c028,
.halt_check = BRANCH_HALT,
@@ -3218,6 +3230,18 @@ static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
},
};
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
/*
* Clock ON depends on external parent 'config noc', so cant poll
* delay and also mark as crtitical for video boot
@@ -3292,6 +3316,24 @@ static struct clk_branch gcc_video_xo_clk = {
},
};
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
@@ -3480,10 +3522,12 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
[GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
[GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
[GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
[GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
[GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
[GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
[GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
[GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
@@ -3527,6 +3571,11 @@ static const struct qcom_reset_map gcc_sm8150_resets[] = {
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
};
+static struct gdsc *gcc_sm8150_gdscs[] = {
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+};
+
static const struct regmap_config gcc_sm8150_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3541,6 +3590,8 @@ static const struct qcom_cc_desc gcc_sm8150_desc = {
.num_clks = ARRAY_SIZE(gcc_sm8150_clocks),
.resets = gcc_sm8150_resets,
.num_resets = ARRAY_SIZE(gcc_sm8150_resets),
+ .gdscs = gcc_sm8150_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8150_gdscs),
};
static const struct of_device_id gcc_sm8150_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
new file mode 100644
index 000000000000..6cb6617b8d88
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -0,0 +1,3690 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm8250.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL9_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "aud_ref_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b038,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d038,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e4d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x75024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7506c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x750a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x10064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x48028,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0xf050,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x10050,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x9000c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x10080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x10080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1007c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = {
+ .halt_reg = 0x8d058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_en = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_axi_clk = {
+ .halt_reg = 0x73008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x73008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_cfg_ahb_clk = {
+ .halt_reg = 0x73004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x73004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_dma_clk = {
+ .halt_reg = 0x4d00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_dma_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_refgen_clk = {
+ .halt_reg = 0x6f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_refgen_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_phy_refgen_clk = {
+ .halt_reg = 0x6f034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie2_phy_refgen_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0x6024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0x602c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mdm_clkref_en = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mdm_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_wifi_clkref_en = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_wifi_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_wigig_clkref_en = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_wigig_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1713c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1726c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1739c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x175fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1772c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x1785c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23138,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1826c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1839c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x185fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x23278,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x23270,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e26c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e39c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e5fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_tsif_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_1x_clkref_en = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_1x_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x75020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x750b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x750b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xf05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_en = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_2_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "pcie_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc = {
+ .gdscr = 0x7d06c,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm8250_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
+ [GCC_NPU_BWMON_CFG_AHB_CLK] = &gcc_npu_bwmon_cfg_ahb_clk.clkr,
+ [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
+ [GCC_PCIE2_PHY_REFGEN_CLK] = &gcc_pcie2_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_MDM_CLKREF_EN] = &gcc_pcie_mdm_clkref_en.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PCIE_WIFI_CLKREF_EN] = &gcc_pcie_wifi_clkref_en.clkr,
+ [GCC_PCIE_WIGIG_CLKREF_EN] = &gcc_pcie_wigig_clkref_en.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_1X_CLKREF_EN] = &gcc_ufs_1x_clkref_en.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_EN] = &gcc_usb3_sec_clkref_en.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL9] = &gpll9.clkr,
+};
+
+static struct gdsc *gcc_sm8250_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_2_GDSC] = &pcie_2_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8250_resets[] = {
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_NPU_BWMON_BCR] = { 0x73000 },
+ [GCC_NPU_BCR] = { 0x4d000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e000 },
+ [GCC_PCIE_2_BCR] = { 0x6000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x1f014 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x1f020 },
+ [GCC_PCIE_2_PHY_BCR] = { 0x1f01c },
+ [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0x1f028 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0xb024, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0xb028, 2 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sm8250_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9c100,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8250_desc = {
+ .config = &gcc_sm8250_regmap_config,
+ .clks = gcc_sm8250_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8250_clocks),
+ .resets = gcc_sm8250_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8250_resets),
+ .gdscs = gcc_sm8250_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8250_gdscs),
+};
+
+static const struct of_device_id gcc_sm8250_match_table[] = {
+ { .compatible = "qcom,gcc-sm8250" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8250_match_table);
+
+static int gcc_sm8250_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8250_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Disable the GPLL0 active input to NPU and GPU
+ * via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, GCC_DISP_AHB_CLK,
+ * GCC_CPUSS_DVM_BUS_CLK, GCC_GPU_CFG_AHB_CLK,
+ * GCC_SYS_NOC_CPUSS_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x4818c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x52000, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sm8250_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8250_driver = {
+ .probe = gcc_sm8250_probe,
+ .driver = {
+ .name = "gcc-sm8250",
+ .of_match_table = gcc_sm8250_match_table,
+ },
+};
+
+static int __init gcc_sm8250_init(void)
+{
+ return platform_driver_register(&gcc_sm8250_driver);
+}
+subsys_initcall(gcc_sm8250_init);
+
+static void __exit gcc_sm8250_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8250_driver);
+}
+module_exit(gcc_sm8250_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8250 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index a96c0b945de2..7b656b6aeced 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -170,8 +170,45 @@ static struct gdsc cx_gdsc = {
.flags = VOTABLE,
};
+/*
+ * On SC7180 the GPU GX domain is *almost* entirely controlled by the GMU
+ * running in the CX domain so the CPU doesn't need to know anything about the
+ * GX domain EXCEPT....
+ *
+ * Hardware constraints dictate that the GX be powered down before the CX. If
+ * the GMU crashes it could leave the GX on. In order to successfully bring back
+ * the device the CPU needs to disable the GX headswitch. There being no sane
+ * way to reach in and touch that register from deep inside the GPU driver we
+ * need to set up the infrastructure to be able to ensure that the GPU can
+ * ensure that the GX is off during this super special case. We do this by
+ * defining a GX gdsc with a dummy enable function and a "default" disable
+ * function.
+ *
+ * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
+ * driver. During power up, nothing will happen from the CPU (and the GMU will
+ * power up normally but during power down this will ensure that the GX domain
+ * is *really* off - this gives us a semi standard way of doing what we need.
+ */
+static int gx_gdsc_enable(struct generic_pm_domain *domain)
+{
+ /* Do nothing but give genpd the impression that we were successful */
+ return 0;
+}
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gx_gdsc",
+ .power_on = gx_gdsc_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
static struct gdsc *gpu_cc_sc7180_gdscs[] = {
[CX_GDSC] = &cx_gdsc,
+ [GX_GDSC] = &gx_gdsc,
};
static struct clk_regmap *gpu_cc_sc7180_clocks[] = {
diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c
new file mode 100644
index 000000000000..673fa1a4f734
--- /dev/null
+++ b/drivers/clk/qcom/mss-sc7180.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,mss-sc7180.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+
+static struct clk_branch mss_axi_nav_clk = {
+ .halt_reg = 0x20bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mss_axi_nav_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "gcc_mss_nav_axi",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mss_axi_crypto_clk = {
+ .halt_reg = 0x20cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mss_axi_crypto_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "gcc_mss_mfab_axis",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct regmap_config mss_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = 0x41aa0cc,
+};
+
+static struct clk_regmap *mss_sc7180_clocks[] = {
+ [MSS_AXI_CRYPTO_CLK] = &mss_axi_crypto_clk.clkr,
+ [MSS_AXI_NAV_CLK] = &mss_axi_nav_clk.clkr,
+};
+
+static const struct qcom_cc_desc mss_sc7180_desc = {
+ .config = &mss_regmap_config,
+ .clks = mss_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(mss_sc7180_clocks),
+};
+
+static int mss_sc7180_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, "cfg_ahb");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
+ if (ret < 0)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int mss_sc7180_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mss_sc7180_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id mss_sc7180_match_table[] = {
+ { .compatible = "qcom,sc7180-mss" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mss_sc7180_match_table);
+
+static struct platform_driver mss_sc7180_driver = {
+ .probe = mss_sc7180_probe,
+ .remove = mss_sc7180_remove,
+ .driver = {
+ .name = "sc7180-mss",
+ .of_match_table = mss_sc7180_match_table,
+ .pm = &mss_sc7180_pm_ops,
+ },
+};
+
+static int __init mss_sc7180_init(void)
+{
+ return platform_driver_register(&mss_sc7180_driver);
+}
+subsys_initcall(mss_sc7180_init);
+
+static void __exit mss_sc7180_exit(void)
+{
+ platform_driver_unregister(&mss_sc7180_driver);
+}
+module_exit(mss_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI MSS SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 250d8165167a..ac2dd92ce2ef 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -20,7 +20,7 @@ config CLK_RENESAS
select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
select CLK_R8A7792 if ARCH_R8A7792
select CLK_R8A7794 if ARCH_R8A7794
- select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 || ARCH_R8A7795
+ select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951
select CLK_R8A77960 if ARCH_R8A77960
select CLK_R8A77961 if ARCH_R8A77961
select CLK_R8A77965 if ARCH_R8A77965
@@ -161,6 +161,7 @@ config CLK_RCAR_GEN3_CPG
config CLK_RCAR_USB2_CLOCK_SEL
bool "Renesas R-Car USB2 clock selector support"
depends on ARCH_RENESAS || COMPILE_TEST
+ select RESET_CONTROLLER
help
This is a driver for R-Car USB2 clock selector
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index fbc8c75f4314..ff5b3020cb03 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -44,6 +44,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -70,6 +71,12 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A7795_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A7795_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A7795_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -242,6 +249,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("can-fd", 914, R8A7795_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A7795_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A7795_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A7795_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A7795_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A7795_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A7795_CLK_CP),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index e8420d3ada94..e8d466dbc7f9 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -46,6 +46,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -72,6 +73,12 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A7796_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -105,6 +112,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x26c),
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A7796_CLK_CPEX, CLK_EXTAL, 2, 1),
@@ -132,6 +140,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A7796_CLK_CR),
DEF_MOD("cmt3", 300, R8A7796_CLK_R),
DEF_MOD("cmt2", 301, R8A7796_CLK_R),
DEF_MOD("cmt1", 302, R8A7796_CLK_R),
@@ -215,6 +224,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("can-fd", 914, R8A7796_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A7796_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A7796_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A7796_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A7796_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A7796_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A7796_CLK_CP),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index b3af4da2ca74..7a05a2fc1cc6 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -43,6 +43,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -68,6 +69,12 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A77965_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77965_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A77965_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -99,7 +106,8 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, CLK_SDSRC, 0x268),
DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, CLK_SDSRC, 0x26c),
- DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cr", R8A77965_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77965_CLK_CPEX, CLK_EXTAL, 2, 1),
@@ -127,6 +135,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A77965_CLK_CR),
DEF_MOD("cmt3", 300, R8A77965_CLK_R),
DEF_MOD("cmt2", 301, R8A77965_CLK_R),
@@ -215,6 +224,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A77965_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A77965_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A77965_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A77965_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A77965_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A77965_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A77965_CLK_CP),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index ceabf55c21c2..8eda2e3e2480 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -105,6 +105,7 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, CLK_SDSRC, 0x026c),
DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cr", R8A77990_CLK_CR, CLK_PLL1D2, 2, 1),
DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77990_CLK_CPEX, CLK_EXTAL, 4, 1),
@@ -135,6 +136,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("sys-dmac2", 217, R8A77990_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A77990_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77990_CLK_S3D1),
+ DEF_MOD("sceg-pub", 229, R8A77990_CLK_CR),
DEF_MOD("cmt3", 300, R8A77990_CLK_R),
DEF_MOD("cmt2", 301, R8A77990_CLK_R),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 962bb337f2e7..056ebf3e70e2 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -91,6 +91,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_FIXED("s3d4", R8A77995_CLK_S3D4, CLK_S3, 4, 1),
DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cr", R8A77995_CLK_CR, CLK_PLL1D2, 2, 1),
DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77995_CLK_CPEX, CLK_EXTAL, 4, 1),
@@ -122,6 +123,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("sys-dmac2", 217, R8A77995_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A77995_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77995_CLK_S3D1),
+ DEF_MOD("sceg-pub", 229, R8A77995_CLK_CR),
DEF_MOD("cmt3", 300, R8A77995_CLK_R),
DEF_MOD("cmt2", 301, R8A77995_CLK_R),
DEF_MOD("cmt1", 302, R8A77995_CLK_R),
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index b97f5f9326cf..d4c02986c34e 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#define USB20_CLKSET0 0x00
@@ -26,9 +27,16 @@
#define CLKSET0_PRIVATE BIT(0)
#define CLKSET0_EXTAL_ONLY (CLKSET0_INTCLK_EN | CLKSET0_PRIVATE)
+static const struct clk_bulk_data rcar_usb2_clocks[] = {
+ { .id = "ehci_ohci", },
+ { .id = "hs-usb-if", },
+};
+
struct usb2_clock_sel_priv {
void __iomem *base;
struct clk_hw hw;
+ struct clk_bulk_data clks[ARRAY_SIZE(rcar_usb2_clocks)];
+ struct reset_control *rsts;
bool extal;
bool xtal;
};
@@ -53,14 +61,32 @@ static void usb2_clock_sel_disable_extal_only(struct usb2_clock_sel_priv *priv)
static int usb2_clock_sel_enable(struct clk_hw *hw)
{
- usb2_clock_sel_enable_extal_only(to_priv(hw));
+ struct usb2_clock_sel_priv *priv = to_priv(hw);
+ int ret;
+
+ ret = reset_control_deassert(priv->rsts);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clks), priv->clks);
+ if (ret) {
+ reset_control_assert(priv->rsts);
+ return ret;
+ }
+
+ usb2_clock_sel_enable_extal_only(priv);
return 0;
}
static void usb2_clock_sel_disable(struct clk_hw *hw)
{
- usb2_clock_sel_disable_extal_only(to_priv(hw));
+ struct usb2_clock_sel_priv *priv = to_priv(hw);
+
+ usb2_clock_sel_disable_extal_only(priv);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clks), priv->clks);
+ reset_control_assert(priv->rsts);
}
/*
@@ -119,6 +145,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
struct usb2_clock_sel_priv *priv;
struct clk *clk;
struct clk_init_data init;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -128,6 +155,15 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ memcpy(priv->clks, rcar_usb2_clocks, sizeof(priv->clks));
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clks), priv->clks);
+ if (ret < 0)
+ return ret;
+
+ priv->rsts = devm_reset_control_array_get(dev, true, false);
+ if (IS_ERR(priv->rsts))
+ return PTR_ERR(priv->rsts);
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 4abe7ff31f53..975454a3dd72 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -51,9 +51,9 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
u16 degrees;
u32 delay_num = 0;
- /* See the comment for rockchip_mmc_set_phase below */
+ /* Constant signal, no measurable phase shift */
if (!rate)
- return -EINVAL;
+ return 0;
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index d17cfb7a3ff4..d7243c09cc84 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
-PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
-
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS),
- GATE(0, "cpll_gpu", "cpll", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "gpll_gpu", "gpll", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "usb480m_gpu", "usb480m", 0,
+ COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
+ RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
- COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
- RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
@@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
/* PD_GPU */
- GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
- GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
+ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
+ GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
/* PD_BUS */
GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index dad31308c071..1949ae7851b2 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -356,10 +356,6 @@ struct samsung_clk_provider * __init samsung_cmu_register_one(
}
ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
- if (!ctx) {
- panic("%s: unable to allocate ctx\n", __func__);
- return ctx;
- }
if (cmu->pll_clks)
samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index 54a464fa63e0..8be4722f6064 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -65,54 +65,49 @@ static const struct clk_ops dbgclk_ops = {
.get_parent = socfpga_gate_get_parent,
};
-struct clk *s10_register_gate(const char *name, const char *parent_name,
- const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *regbase, unsigned long gate_reg,
- unsigned long gate_idx, unsigned long div_reg,
- unsigned long div_offset, u8 div_width,
- unsigned long bypass_reg, u8 bypass_shift,
- u8 fixed_div)
+struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __iomem *regbase)
{
struct clk *clk;
struct socfpga_gate_clk *socfpga_clk;
struct clk_init_data init;
+ const char * const *parent_names = clks->parent_names;
+ const char *parent_name = clks->parent_name;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (!socfpga_clk)
return NULL;
- socfpga_clk->hw.reg = regbase + gate_reg;
- socfpga_clk->hw.bit_idx = gate_idx;
+ socfpga_clk->hw.reg = regbase + clks->gate_reg;
+ socfpga_clk->hw.bit_idx = clks->gate_idx;
gateclk_ops.enable = clk_gate_ops.enable;
gateclk_ops.disable = clk_gate_ops.disable;
- socfpga_clk->fixed_div = fixed_div;
+ socfpga_clk->fixed_div = clks->fixed_div;
- if (div_reg)
- socfpga_clk->div_reg = regbase + div_reg;
+ if (clks->div_reg)
+ socfpga_clk->div_reg = regbase + clks->div_reg;
else
socfpga_clk->div_reg = NULL;
- socfpga_clk->width = div_width;
- socfpga_clk->shift = div_offset;
+ socfpga_clk->width = clks->div_width;
+ socfpga_clk->shift = clks->div_offset;
- if (bypass_reg)
- socfpga_clk->bypass_reg = regbase + bypass_reg;
+ if (clks->bypass_reg)
+ socfpga_clk->bypass_reg = regbase + clks->bypass_reg;
else
socfpga_clk->bypass_reg = NULL;
- socfpga_clk->bypass_shift = bypass_shift;
+ socfpga_clk->bypass_shift = clks->bypass_shift;
- if (streq(name, "cs_pdbg_clk"))
+ if (streq(clks->name, "cs_pdbg_clk"))
init.ops = &dbgclk_ops;
else
init.ops = &gateclk_ops;
- init.name = name;
- init.flags = flags;
+ init.name = clks->name;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names ? parent_names : &parent_name;
socfpga_clk->hw.hw.init = &init;
@@ -121,6 +116,5 @@ struct clk *s10_register_gate(const char *name, const char *parent_name,
kfree(socfpga_clk);
return NULL;
}
-
return clk;
}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 1a191eeeebba..dd6d4056e9de 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -73,26 +73,27 @@ static const struct clk_ops peri_cnt_clk_ops = {
.get_parent = clk_periclk_get_parent,
};
-struct clk *s10_register_periph(const char *name, const char *parent_name,
- const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *reg, unsigned long offset)
+struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
+ void __iomem *reg)
{
struct clk *clk;
struct socfpga_periph_clk *periph_clk;
struct clk_init_data init;
+ const char *name = clks->name;
+ const char *parent_name = clks->parent_name;
+ const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
return NULL;
- periph_clk->hw.reg = reg + offset;
+ periph_clk->hw.reg = reg + clks->offset;
init.name = name;
init.ops = &peri_c_clk_ops;
- init.flags = flags;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names ? parent_names : &parent_name;
periph_clk->hw.hw.init = &init;
@@ -105,38 +106,37 @@ struct clk *s10_register_periph(const char *name, const char *parent_name,
return clk;
}
-struct clk *s10_register_cnt_periph(const char *name, const char *parent_name,
- const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *regbase, unsigned long offset,
- u8 fixed_divider, unsigned long bypass_reg,
- unsigned long bypass_shift)
+struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks,
+ void __iomem *regbase)
{
struct clk *clk;
struct socfpga_periph_clk *periph_clk;
struct clk_init_data init;
+ const char *name = clks->name;
+ const char *parent_name = clks->parent_name;
+ const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
return NULL;
- if (offset)
- periph_clk->hw.reg = regbase + offset;
+ if (clks->offset)
+ periph_clk->hw.reg = regbase + clks->offset;
else
periph_clk->hw.reg = NULL;
- if (bypass_reg)
- periph_clk->bypass_reg = regbase + bypass_reg;
+ if (clks->bypass_reg)
+ periph_clk->bypass_reg = regbase + clks->bypass_reg;
else
periph_clk->bypass_reg = NULL;
- periph_clk->bypass_shift = bypass_shift;
- periph_clk->fixed_div = fixed_divider;
+ periph_clk->bypass_shift = clks->bypass_shift;
+ periph_clk->fixed_div = clks->fixed_divider;
init.name = name;
init.ops = &peri_cnt_clk_ops;
- init.flags = flags;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names ? parent_names : &parent_name;
periph_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 4705eb544f01..a301bb22f36c 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
/* read VCO1 reg for numerator and denominator */
reg = readl(socfpgaclk->hw.reg);
refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
- vco_freq = (unsigned long long)parent_rate / refdiv;
+
+ vco_freq = parent_rate;
+ do_div(vco_freq, refdiv);
/* Read mdiv and fdiv from the fdbck register */
reg = readl(socfpgaclk->hw.reg + 0x4);
@@ -108,19 +110,20 @@ static struct clk_ops clk_boot_ops = {
.prepare = clk_pll_prepare,
};
-struct clk *s10_register_pll(const char *name, const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *reg, unsigned long offset)
+struct clk *s10_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg)
{
struct clk *clk;
struct socfpga_pll *pll_clk;
struct clk_init_data init;
+ const char *name = clks->name;
+ const char * const *parent_names = clks->parent_names;
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (WARN_ON(!pll_clk))
return NULL;
- pll_clk->hw.reg = reg + offset;
+ pll_clk->hw.reg = reg + clks->offset;
if (streq(name, SOCFPGA_BOOT_CLK))
init.ops = &clk_boot_ops;
@@ -128,9 +131,9 @@ struct clk *s10_register_pll(const char *name, const char * const *parent_names,
init.ops = &clk_pll_ops;
init.name = name;
- init.flags = flags;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names;
pll_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 993f3a73c71e..dea7c6c7d269 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -177,9 +177,7 @@ static int s10_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_periph(clks[i].name, clks[i].parent_name,
- clks[i].parent_names, clks[i].num_parents,
- clks[i].flags, base, clks[i].offset);
+ clk = s10_register_periph(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
@@ -198,14 +196,7 @@ static int s10_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *cl
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_cnt_periph(clks[i].name, clks[i].parent_name,
- clks[i].parent_names,
- clks[i].num_parents,
- clks[i].flags, base,
- clks[i].offset,
- clks[i].fixed_divider,
- clks[i].bypass_reg,
- clks[i].bypass_shift);
+ clk = s10_register_cnt_periph(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
@@ -225,16 +216,7 @@ static int s10_clk_register_gate(const struct stratix10_gate_clock *clks,
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_gate(clks[i].name, clks[i].parent_name,
- clks[i].parent_names,
- clks[i].num_parents,
- clks[i].flags, base,
- clks[i].gate_reg,
- clks[i].gate_idx, clks[i].div_reg,
- clks[i].div_offset, clks[i].div_width,
- clks[i].bypass_reg,
- clks[i].bypass_shift,
- clks[i].fixed_div);
+ clk = s10_register_gate(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
@@ -254,10 +236,7 @@ static int s10_clk_register_pll(const struct stratix10_pll_clock *clks,
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_pll(clks[i].name, clks[i].parent_names,
- clks[i].num_parents,
- clks[i].flags, base,
- clks[i].offset);
+ clk = s10_register_pll(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index e8e121907952..fcabef42249c 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -60,21 +60,12 @@ struct stratix10_gate_clock {
u8 fixed_div;
};
-struct clk *s10_register_pll(const char *, const char *const *, u8,
- unsigned long, void __iomem *, unsigned long);
-
-struct clk *s10_register_periph(const char *, const char *,
- const char * const *, u8, unsigned long,
- void __iomem *, unsigned long);
-struct clk *s10_register_cnt_periph(const char *, const char *,
- const char * const *, u8,
- unsigned long, void __iomem *,
- unsigned long, u8, unsigned long,
- unsigned long);
-struct clk *s10_register_gate(const char *, const char *,
- const char * const *, u8,
- unsigned long, void __iomem *,
- unsigned long, unsigned long,
- unsigned long, unsigned long, u8,
- unsigned long, u8, u8);
+struct clk *s10_register_pll(const struct stratix10_pll_clock *,
+ void __iomem *);
+struct clk *s10_register_periph(const struct stratix10_perip_c_clock *,
+ void __iomem *);
+struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *,
+ void __iomem *);
+struct clk *s10_register_gate(const struct stratix10_gate_clock *,
+ void __iomem *);
#endif /* __STRATIX10_CLK_H */
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 3c219af25100..e18c80fbe804 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -13,4 +13,12 @@ config SPRD_SC9860_CLK
tristate "Support for the Spreadtrum SC9860 clocks"
depends on (ARM64 && ARCH_SPRD) || COMPILE_TEST
default ARM64 && ARCH_SPRD
+
+config SPRD_SC9863A_CLK
+ tristate "Support for the Spreadtrum SC9863A clocks"
+ depends on (ARM64 && ARCH_SPRD) || COMPILE_TEST
+ default ARM64 && ARCH_SPRD
+ help
+ Support for the global clock controller on sc9863a devices.
+ Say Y if you want to use peripheral devices on sc9863a SoC.
endif
diff --git a/drivers/clk/sprd/Makefile b/drivers/clk/sprd/Makefile
index d4c00788d53c..41d90e0d7863 100644
--- a/drivers/clk/sprd/Makefile
+++ b/drivers/clk/sprd/Makefile
@@ -10,3 +10,4 @@ clk-sprd-y += pll.o
## SoC support
obj-$(CONFIG_SPRD_SC9860_CLK) += sc9860-clk.o
+obj-$(CONFIG_SPRD_SC9863A_CLK) += sc9863a-clk.o
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index c0af4779892b..d620bbbcdfc8 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -40,7 +40,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
const struct sprd_clk_desc *desc)
{
void __iomem *base;
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct regmap *regmap;
if (of_find_property(node, "sprd,syscon", NULL)) {
@@ -49,6 +50,13 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
pr_err("%s: failed to get syscon regmap\n", __func__);
return PTR_ERR(regmap);
}
+ } else if (of_device_is_compatible(of_get_parent(dev->of_node),
+ "syscon")) {
+ regmap = device_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap from its parent.\n");
+ return PTR_ERR(regmap);
+ }
} else {
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/clk/sprd/composite.h b/drivers/clk/sprd/composite.h
index 04ab3f587ee2..adbabbe596b7 100644
--- a/drivers/clk/sprd/composite.h
+++ b/drivers/clk/sprd/composite.h
@@ -18,26 +18,43 @@ struct sprd_comp {
struct sprd_clk_common common;
};
-#define SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, _table, \
- _mshift, _mwidth, _dshift, _dwidth, _flags) \
+#define SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags, _fn) \
struct sprd_comp _struct = { \
.mux = _SPRD_MUX_CLK(_mshift, _mwidth, _table), \
.div = _SPRD_DIV_CLK(_dshift, _dwidth), \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT_PARENTS(_name, \
- _parent, \
- &sprd_comp_ops, \
- _flags), \
+ .hw.init = _fn(_name, _parent, \
+ &sprd_comp_ops, _flags), \
} \
}
-#define SPRD_COMP_CLK(_struct, _name, _parent, _reg, _mshift, \
- _mwidth, _dshift, _dwidth, _flags) \
- SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, \
- NULL, _mshift, _mwidth, \
- _dshift, _dwidth, _flags)
+#define SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, _flags) \
+ SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags, CLK_HW_INIT_PARENTS)
+
+#define SPRD_COMP_CLK(_struct, _name, _parent, _reg, _mshift, \
+ _mwidth, _dshift, _dwidth, _flags) \
+ SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, NULL, \
+ _mshift, _mwidth, _dshift, _dwidth, _flags)
+
+#define SPRD_COMP_CLK_DATA_TABLE(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, \
+ _dwidth, _flags) \
+ SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags, CLK_HW_INIT_PARENTS_DATA)
+
+#define SPRD_COMP_CLK_DATA(_struct, _name, _parent, _reg, _mshift, \
+ _mwidth, _dshift, _dwidth, _flags) \
+ SPRD_COMP_CLK_DATA_TABLE(_struct, _name, _parent, _reg, NULL, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags)
static inline struct sprd_comp *hw_to_sprd_comp(const struct clk_hw *hw)
{
diff --git a/drivers/clk/sprd/div.h b/drivers/clk/sprd/div.h
index 87510e3d0e14..6acfe6b179fc 100644
--- a/drivers/clk/sprd/div.h
+++ b/drivers/clk/sprd/div.h
@@ -35,20 +35,28 @@ struct sprd_div {
struct sprd_clk_common common;
};
-#define SPRD_DIV_CLK(_struct, _name, _parent, _reg, \
- _shift, _width, _flags) \
+#define SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags, _fn) \
struct sprd_div _struct = { \
.div = _SPRD_DIV_CLK(_shift, _width), \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &sprd_div_ops, \
- _flags), \
+ .hw.init = _fn(_name, _parent, \
+ &sprd_div_ops, _flags), \
} \
}
+#define SPRD_DIV_CLK(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags) \
+ SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags, CLK_HW_INIT)
+
+#define SPRD_DIV_CLK_HW(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags) \
+ SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags, CLK_HW_INIT_HW)
+
static inline struct sprd_div *hw_to_sprd_div(const struct clk_hw *hw)
{
struct sprd_clk_common *common = hw_to_sprd_clk_common(hw);
diff --git a/drivers/clk/sprd/gate.c b/drivers/clk/sprd/gate.c
index f59d1936b412..574cfc116bbc 100644
--- a/drivers/clk/sprd/gate.c
+++ b/drivers/clk/sprd/gate.c
@@ -79,6 +79,17 @@ static int sprd_sc_gate_enable(struct clk_hw *hw)
return 0;
}
+
+static int sprd_pll_sc_gate_prepare(struct clk_hw *hw)
+{
+ struct sprd_gate *sg = hw_to_sprd_gate(hw);
+
+ clk_sc_gate_toggle(sg, true);
+ udelay(sg->udelay);
+
+ return 0;
+}
+
static int sprd_gate_is_enabled(struct clk_hw *hw)
{
struct sprd_gate *sg = hw_to_sprd_gate(hw);
@@ -109,3 +120,9 @@ const struct clk_ops sprd_sc_gate_ops = {
};
EXPORT_SYMBOL_GPL(sprd_sc_gate_ops);
+const struct clk_ops sprd_pll_sc_gate_ops = {
+ .unprepare = sprd_sc_gate_disable,
+ .prepare = sprd_pll_sc_gate_prepare,
+ .is_enabled = sprd_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(sprd_pll_sc_gate_ops);
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index dc352ea55e1f..b55817869367 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -14,37 +14,136 @@ struct sprd_gate {
u32 enable_mask;
u16 flags;
u16 sc_offset;
+ u16 udelay;
struct sprd_clk_common common;
};
-#define SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
- _enable_mask, _flags, _gate_flags, _ops) \
+#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, _fn) \
struct sprd_gate _struct = { \
.enable_mask = _enable_mask, \
.sc_offset = _sc_offset, \
.flags = _gate_flags, \
+ .udelay = _udelay, \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- _ops, \
- _flags), \
+ .hw.init = _fn(_name, _parent, \
+ _ops, _flags), \
} \
}
+#define SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops) \
+ SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, CLK_HW_INIT)
+
+#define SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
+ _enable_mask, _flags, _gate_flags, _ops) \
+ SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, 0, _ops)
+
+#define SPRD_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \
+ _enable_mask, _flags, _gate_flags) \
+ SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
+ _enable_mask, _flags, _gate_flags, \
+ &sprd_sc_gate_ops)
+
#define SPRD_GATE_CLK(_struct, _name, _parent, _reg, \
_enable_mask, _flags, _gate_flags) \
SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, 0, \
_enable_mask, _flags, _gate_flags, \
&sprd_gate_ops)
-#define SPRD_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \
- _enable_mask, _flags, _gate_flags) \
- SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
+#define SPRD_PLL_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \
_enable_mask, _flags, _gate_flags, \
- &sprd_sc_gate_ops)
+ _udelay) \
+ SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, \
+ &sprd_pll_sc_gate_ops)
+
+
+#define SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, \
+ _flags, _gate_flags, \
+ _udelay, _ops) \
+ SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, \
+ CLK_HW_INIT_HW)
+
+#define SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _ops) \
+ SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, \
+ _flags, _gate_flags, 0, _ops)
+
+#define SPRD_SC_GATE_CLK_HW(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags) \
+ SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, &sprd_sc_gate_ops)
+
+#define SPRD_GATE_CLK_HW(_struct, _name, _parent, _reg, \
+ _enable_mask, _flags, _gate_flags) \
+ SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, 0, \
+ _enable_mask, _flags, _gate_flags, \
+ &sprd_gate_ops)
+
+#define SPRD_PLL_SC_GATE_CLK_HW(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay) \
+ SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, \
+ _flags, _gate_flags, _udelay, \
+ &sprd_pll_sc_gate_ops)
+
+#define SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \
+ _reg, _sc_offset, \
+ _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops) \
+ SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, \
+ CLK_HW_INIT_FW_NAME)
+
+#define SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _ops) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \
+ _reg, _sc_offset, \
+ _enable_mask, _flags, \
+ _gate_flags, 0, _ops)
+
+#define SPRD_SC_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, &sprd_sc_gate_ops)
+
+#define SPRD_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \
+ _enable_mask, _flags, _gate_flags) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, 0, \
+ _enable_mask, _flags, _gate_flags, \
+ &sprd_gate_ops)
+
+#define SPRD_PLL_SC_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \
+ _reg, _sc_offset, \
+ _enable_mask, _flags, \
+ _gate_flags, _udelay, \
+ &sprd_pll_sc_gate_ops)
static inline struct sprd_gate *hw_to_sprd_gate(const struct clk_hw *hw)
{
@@ -55,5 +154,6 @@ static inline struct sprd_gate *hw_to_sprd_gate(const struct clk_hw *hw)
extern const struct clk_ops sprd_gate_ops;
extern const struct clk_ops sprd_sc_gate_ops;
+extern const struct clk_ops sprd_pll_sc_gate_ops;
#endif /* _SPRD_GATE_H_ */
diff --git a/drivers/clk/sprd/mux.h b/drivers/clk/sprd/mux.h
index 892e4191cc7f..f3cc31dae06f 100644
--- a/drivers/clk/sprd/mux.h
+++ b/drivers/clk/sprd/mux.h
@@ -36,26 +36,40 @@ struct sprd_mux {
.table = _table, \
}
-#define SPRD_MUX_CLK_TABLE(_struct, _name, _parents, _table, \
- _reg, _shift, _width, \
- _flags) \
+#define SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags, _fn) \
struct sprd_mux _struct = { \
.mux = _SPRD_MUX_CLK(_shift, _width, _table), \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT_PARENTS(_name, \
- _parents, \
- &sprd_mux_ops, \
- _flags), \
+ .hw.init = _fn(_name, _parents, \
+ &sprd_mux_ops, _flags), \
} \
}
+#define SPRD_MUX_CLK_TABLE(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags) \
+ SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags, \
+ CLK_HW_INIT_PARENTS)
+
#define SPRD_MUX_CLK(_struct, _name, _parents, _reg, \
_shift, _width, _flags) \
SPRD_MUX_CLK_TABLE(_struct, _name, _parents, NULL, \
_reg, _shift, _width, _flags)
+#define SPRD_MUX_CLK_DATA_TABLE(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags) \
+ SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags, \
+ CLK_HW_INIT_PARENTS_DATA)
+
+#define SPRD_MUX_CLK_DATA(_struct, _name, _parents, _reg, \
+ _shift, _width, _flags) \
+ SPRD_MUX_CLK_DATA_TABLE(_struct, _name, _parents, NULL, \
+ _reg, _shift, _width, _flags)
+
static inline struct sprd_mux *hw_to_sprd_mux(const struct clk_hw *hw)
{
struct sprd_clk_common *common = hw_to_sprd_clk_common(hw);
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 640270f51aa5..15791484388f 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -87,11 +87,12 @@ static u32 pll_get_ibias(u64 rate, const u64 *table)
{
u32 i, num = table[0];
- for (i = 1; i < num + 1; i++)
- if (rate <= table[i])
+ /* table[0] indicates the number of items in this table */
+ for (i = 0; i < num; i++)
+ if (rate <= table[i + 1])
break;
- return (i == num + 1) ? num : i;
+ return i == num ? num - 1 : i;
}
static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
diff --git a/drivers/clk/sprd/pll.h b/drivers/clk/sprd/pll.h
index e95f11e91ffe..6558f50d0296 100644
--- a/drivers/clk/sprd/pll.h
+++ b/drivers/clk/sprd/pll.h
@@ -61,27 +61,33 @@ struct sprd_pll {
struct sprd_clk_common common;
};
+#define SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _regs_num, _itable, _factors, \
+ _udelay, _k1, _k2, _fflag, \
+ _fvco, _fn) \
+ struct sprd_pll _struct = { \
+ .regs_num = _regs_num, \
+ .itable = _itable, \
+ .factors = _factors, \
+ .udelay = _udelay, \
+ .k1 = _k1, \
+ .k2 = _k2, \
+ .fflag = _fflag, \
+ .fvco = _fvco, \
+ .common = { \
+ .regmap = NULL, \
+ .reg = _reg, \
+ .hw.init = _fn(_name, _parent, \
+ &sprd_pll_ops, 0),\
+ }, \
+ }
+
#define SPRD_PLL_WITH_ITABLE_K_FVCO(_struct, _name, _parent, _reg, \
_regs_num, _itable, _factors, \
_udelay, _k1, _k2, _fflag, _fvco) \
- struct sprd_pll _struct = { \
- .regs_num = _regs_num, \
- .itable = _itable, \
- .factors = _factors, \
- .udelay = _udelay, \
- .k1 = _k1, \
- .k2 = _k2, \
- .fflag = _fflag, \
- .fvco = _fvco, \
- .common = { \
- .regmap = NULL, \
- .reg = _reg, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &sprd_pll_ops, \
- 0), \
- }, \
- }
+ SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco, CLK_HW_INIT)
#define SPRD_PLL_WITH_ITABLE_K(_struct, _name, _parent, _reg, \
_regs_num, _itable, _factors, \
@@ -96,6 +102,19 @@ struct sprd_pll {
_regs_num, _itable, _factors, \
_udelay, 1000, 1000, 0, 0)
+#define SPRD_PLL_FW_NAME(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco) \
+ SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco, CLK_HW_INIT_FW_NAME)
+
+#define SPRD_PLL_HW(_struct, _name, _parent, _reg, _regs_num, _itable, \
+ _factors, _udelay, _k1, _k2, _fflag, _fvco) \
+ SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco, CLK_HW_INIT_HW)
+
static inline struct sprd_pll *hw_to_sprd_pll(struct clk_hw *hw)
{
struct sprd_clk_common *common = hw_to_sprd_clk_common(hw);
diff --git a/drivers/clk/sprd/sc9863a-clk.c b/drivers/clk/sprd/sc9863a-clk.c
new file mode 100644
index 000000000000..2e2dfb2d48ff
--- /dev/null
+++ b/drivers/clk/sprd/sc9863a-clk.c
@@ -0,0 +1,1773 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unisoc SC9863A clock driver
+ *
+ * Copyright (C) 2019 Unisoc, Inc.
+ * Author: Chunyan Zhang <chunyan.zhang@unisoc.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/sprd,sc9863a-clk.h>
+
+#include "common.h"
+#include "composite.h"
+#include "div.h"
+#include "gate.h"
+#include "mux.h"
+#include "pll.h"
+
+/* mpll*_gate clocks control cpu cores, they were enabled by default */
+SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x1e8,
+ 0x1000, BIT(0), 0, 0, 240);
+
+static struct sprd_clk_common *sc9863a_pmu_gate_clks[] = {
+ /* address base is 0x402b0000 */
+ &mpll0_gate.common,
+ &dpll0_gate.common,
+ &lpll_gate.common,
+ &gpll_gate.common,
+ &dpll1_gate.common,
+ &mpll1_gate.common,
+ &mpll2_gate.common,
+ &isppll_gate.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_pmu_gate_hws = {
+ .hws = {
+ [CLK_MPLL0_GATE] = &mpll0_gate.common.hw,
+ [CLK_DPLL0_GATE] = &dpll0_gate.common.hw,
+ [CLK_LPLL_GATE] = &lpll_gate.common.hw,
+ [CLK_GPLL_GATE] = &gpll_gate.common.hw,
+ [CLK_DPLL1_GATE] = &dpll1_gate.common.hw,
+ [CLK_MPLL1_GATE] = &mpll1_gate.common.hw,
+ [CLK_MPLL2_GATE] = &mpll2_gate.common.hw,
+ [CLK_ISPPLL_GATE] = &isppll_gate.common.hw,
+ },
+ .num = CLK_PMU_APB_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_pmu_gate_desc = {
+ .clk_clks = sc9863a_pmu_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_pmu_gate_clks),
+ .hw_clks = &sc9863a_pmu_gate_hws,
+};
+
+static const u64 itable[5] = {4, 1000000000, 1200000000,
+ 1400000000, 1600000000};
+
+static const struct clk_bit_field f_twpll[PLL_FACT_MAX] = {
+ { .shift = 95, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 1, .width = 1 }, /* mod_en */
+ { .shift = 2, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 3, .width = 3 }, /* ibias */
+ { .shift = 8, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 0, .width = 0 }, /* postdiv */
+};
+static SPRD_PLL_FW_NAME(twpll, "twpll", "ext-26m", 0x4, 3, itable,
+ f_twpll, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(twpll_768m, "twpll-768m", &twpll.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_384m, "twpll-384m", &twpll.common.hw, 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_192m, "twpll-192m", &twpll.common.hw, 8, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_96m, "twpll-96m", &twpll.common.hw, 16, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_48m, "twpll-48m", &twpll.common.hw, 32, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_24m, "twpll-24m", &twpll.common.hw, 64, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_12m, "twpll-12m", &twpll.common.hw, 128, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_512m, "twpll-512m", &twpll.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_256m, "twpll-256m", &twpll.common.hw, 6, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_128m, "twpll-128m", &twpll.common.hw, 12, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_64m, "twpll-64m", &twpll.common.hw, 24, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_307m2, "twpll-307m2", &twpll.common.hw, 5, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_219m4, "twpll-219m4", &twpll.common.hw, 7, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_170m6, "twpll-170m6", &twpll.common.hw, 9, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_153m6, "twpll-153m6", &twpll.common.hw, 10, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_76m8, "twpll-76m8", &twpll.common.hw, 20, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_51m2, "twpll-51m2", &twpll.common.hw, 30, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_38m4, "twpll-38m4", &twpll.common.hw, 40, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_19m2, "twpll-19m2", &twpll.common.hw, 80, 1, 0);
+
+static const struct clk_bit_field f_lpll[PLL_FACT_MAX] = {
+ { .shift = 95, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 1, .width = 1 }, /* mod_en */
+ { .shift = 2, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 6, .width = 2 }, /* ibias */
+ { .shift = 8, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 0, .width = 0 }, /* postdiv */
+};
+static SPRD_PLL_HW(lpll, "lpll", &lpll_gate.common.hw, 0x20, 3, itable,
+ f_lpll, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(lpll_409m6, "lpll-409m6", &lpll.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_245m76, "lpll-245m76", &lpll.common.hw, 5, 1, 0);
+
+static const struct clk_bit_field f_gpll[PLL_FACT_MAX] = {
+ { .shift = 95, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 1, .width = 1 }, /* mod_en */
+ { .shift = 2, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 6, .width = 2 }, /* ibias */
+ { .shift = 8, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 80, .width = 1 }, /* postdiv */
+};
+static SPRD_PLL_HW(gpll, "gpll", &gpll_gate.common.hw, 0x38, 3, itable,
+ f_gpll, 240, 1000, 1000, 1, 400000000);
+
+static SPRD_PLL_HW(isppll, "isppll", &isppll_gate.common.hw, 0x50, 3, itable,
+ f_gpll, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(isppll_468m, "isppll-468m", &isppll.common.hw, 2, 1, 0);
+
+static struct sprd_clk_common *sc9863a_pll_clks[] = {
+ /* address base is 0x40353000 */
+ &twpll.common,
+ &lpll.common,
+ &gpll.common,
+ &isppll.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_pll_hws = {
+ .hws = {
+ [CLK_TWPLL] = &twpll.common.hw,
+ [CLK_TWPLL_768M] = &twpll_768m.hw,
+ [CLK_TWPLL_384M] = &twpll_384m.hw,
+ [CLK_TWPLL_192M] = &twpll_192m.hw,
+ [CLK_TWPLL_96M] = &twpll_96m.hw,
+ [CLK_TWPLL_48M] = &twpll_48m.hw,
+ [CLK_TWPLL_24M] = &twpll_24m.hw,
+ [CLK_TWPLL_12M] = &twpll_12m.hw,
+ [CLK_TWPLL_512M] = &twpll_512m.hw,
+ [CLK_TWPLL_256M] = &twpll_256m.hw,
+ [CLK_TWPLL_128M] = &twpll_128m.hw,
+ [CLK_TWPLL_64M] = &twpll_64m.hw,
+ [CLK_TWPLL_307M2] = &twpll_307m2.hw,
+ [CLK_TWPLL_219M4] = &twpll_219m4.hw,
+ [CLK_TWPLL_170M6] = &twpll_170m6.hw,
+ [CLK_TWPLL_153M6] = &twpll_153m6.hw,
+ [CLK_TWPLL_76M8] = &twpll_76m8.hw,
+ [CLK_TWPLL_51M2] = &twpll_51m2.hw,
+ [CLK_TWPLL_38M4] = &twpll_38m4.hw,
+ [CLK_TWPLL_19M2] = &twpll_19m2.hw,
+ [CLK_LPLL] = &lpll.common.hw,
+ [CLK_LPLL_409M6] = &lpll_409m6.hw,
+ [CLK_LPLL_245M76] = &lpll_245m76.hw,
+ [CLK_GPLL] = &gpll.common.hw,
+ [CLK_ISPPLL] = &isppll.common.hw,
+ [CLK_ISPPLL_468M] = &isppll_468m.hw,
+
+ },
+ .num = CLK_ANLG_PHY_G1_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_pll_desc = {
+ .clk_clks = sc9863a_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_pll_clks),
+ .hw_clks = &sc9863a_pll_hws,
+};
+
+static const u64 itable_mpll[6] = {5, 1000000000, 1200000000, 1400000000,
+ 1600000000, 1800000000};
+static SPRD_PLL_HW(mpll0, "mpll0", &mpll0_gate.common.hw, 0x0, 3, itable_mpll,
+ f_gpll, 240, 1000, 1000, 1, 1000000000);
+static SPRD_PLL_HW(mpll1, "mpll1", &mpll1_gate.common.hw, 0x18, 3, itable_mpll,
+ f_gpll, 240, 1000, 1000, 1, 1000000000);
+static SPRD_PLL_HW(mpll2, "mpll2", &mpll2_gate.common.hw, 0x30, 3, itable_mpll,
+ f_gpll, 240, 1000, 1000, 1, 1000000000);
+static CLK_FIXED_FACTOR_HW(mpll2_675m, "mpll2-675m", &mpll2.common.hw, 2, 1, 0);
+
+static struct sprd_clk_common *sc9863a_mpll_clks[] = {
+ /* address base is 0x40359000 */
+ &mpll0.common,
+ &mpll1.common,
+ &mpll2.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_mpll_hws = {
+ .hws = {
+ [CLK_MPLL0] = &mpll0.common.hw,
+ [CLK_MPLL1] = &mpll1.common.hw,
+ [CLK_MPLL2] = &mpll2.common.hw,
+ [CLK_MPLL2_675M] = &mpll2_675m.hw,
+
+ },
+ .num = CLK_ANLG_PHY_G4_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_mpll_desc = {
+ .clk_clks = sc9863a_mpll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_mpll_clks),
+ .hw_clks = &sc9863a_mpll_hws,
+};
+
+static SPRD_SC_GATE_CLK_FW_NAME(audio_gate, "audio-gate", "ext-26m",
+ 0x4, 0x1000, BIT(8), 0, 0);
+
+static SPRD_PLL_FW_NAME(rpll, "rpll", "ext-26m", 0x10,
+ 3, itable, f_lpll, 240, 1000, 1000, 0, 0);
+
+static CLK_FIXED_FACTOR_HW(rpll_390m, "rpll-390m", &rpll.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(rpll_260m, "rpll-260m", &rpll.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(rpll_195m, "rpll-195m", &rpll.common.hw, 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(rpll_26m, "rpll-26m", &rpll.common.hw, 30, 1, 0);
+
+static struct sprd_clk_common *sc9863a_rpll_clks[] = {
+ /* address base is 0x4035c000 */
+ &audio_gate.common,
+ &rpll.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_rpll_hws = {
+ .hws = {
+ [CLK_AUDIO_GATE] = &audio_gate.common.hw,
+ [CLK_RPLL] = &rpll.common.hw,
+ [CLK_RPLL_390M] = &rpll_390m.hw,
+ [CLK_RPLL_260M] = &rpll_260m.hw,
+ [CLK_RPLL_195M] = &rpll_195m.hw,
+ [CLK_RPLL_26M] = &rpll_26m.hw,
+ },
+ .num = CLK_ANLG_PHY_G5_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_rpll_desc = {
+ .clk_clks = sc9863a_rpll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_rpll_clks),
+ .hw_clks = &sc9863a_rpll_hws,
+};
+
+static const u64 itable_dpll[5] = {4, 1211000000, 1320000000, 1570000000,
+ 1866000000};
+static SPRD_PLL_HW(dpll0, "dpll0", &dpll0_gate.common.hw, 0x0, 3, itable_dpll,
+ f_lpll, 240, 1000, 1000, 0, 0);
+static SPRD_PLL_HW(dpll1, "dpll1", &dpll1_gate.common.hw, 0x18, 3, itable_dpll,
+ f_lpll, 240, 1000, 1000, 0, 0);
+
+static CLK_FIXED_FACTOR_HW(dpll0_933m, "dpll0-933m", &dpll0.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll0_622m3, "dpll0-622m3", &dpll0.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_400m, "dpll1-400m", &dpll0.common.hw, 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_266m7, "dpll1-266m7", &dpll0.common.hw, 6, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_123m1, "dpll1-123m1", &dpll0.common.hw, 13, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_50m, "dpll1-50m", &dpll0.common.hw, 32, 1, 0);
+
+static struct sprd_clk_common *sc9863a_dpll_clks[] = {
+ /* address base is 0x40363000 */
+ &dpll0.common,
+ &dpll1.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_dpll_hws = {
+ .hws = {
+ [CLK_DPLL0] = &dpll0.common.hw,
+ [CLK_DPLL1] = &dpll1.common.hw,
+ [CLK_DPLL0_933M] = &dpll0_933m.hw,
+ [CLK_DPLL0_622M3] = &dpll0_622m3.hw,
+ [CLK_DPLL0_400M] = &dpll1_400m.hw,
+ [CLK_DPLL0_266M7] = &dpll1_266m7.hw,
+ [CLK_DPLL0_123M1] = &dpll1_123m1.hw,
+ [CLK_DPLL0_50M] = &dpll1_50m.hw,
+
+ },
+ .num = CLK_ANLG_PHY_G7_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_dpll_desc = {
+ .clk_clks = sc9863a_dpll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_dpll_clks),
+ .hw_clks = &sc9863a_dpll_hws,
+};
+
+static CLK_FIXED_FACTOR_FW_NAME(clk_6m5, "clk-6m5", "ext-26m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_4m3, "clk-4m3", "ext-26m", 6, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_2m, "clk-2m", "ext-26m", 13, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_250k, "clk-250k", "ext-26m", 104, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_25m, "rco-25m", "rco-100m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_4m, "rco-4m", "rco-100m", 25, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_2m, "rco-2m", "rco-100m", 50, 1, 0);
+
+#define SC9863A_MUX_FLAG \
+ (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT)
+
+static CLK_FIXED_FACTOR_FW_NAME(clk_13m, "clk-13m", "ext-26m", 2, 1, 0);
+static const struct clk_parent_data emc_clk_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &twpll.common.hw },
+};
+static SPRD_MUX_CLK_DATA(emc_clk, "emc-clk", emc_clk_parents, 0x220,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aon_apb_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(aon_apb, "aon-apb", aon_apb_parents, 0x224,
+ 0, 3, 8, 2, 0);
+
+static const struct clk_parent_data adi_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_38m4.hw },
+ { .hw = &twpll_51m2.hw },
+};
+static SPRD_MUX_CLK_DATA(adi_clk, "adi-clk", adi_parents, 0x228,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aux_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rpll_26m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_COMP_CLK_DATA(aux0_clk, "aux0-clk", aux_parents, 0x22c,
+ 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux1_clk, "aux1-clk", aux_parents, 0x230,
+ 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux2_clk, "aux2-clk", aux_parents, 0x234,
+ 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(probe_clk, "probe-clk", aux_parents, 0x238,
+ 0, 5, 8, 4, 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rpll_26m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_MUX_CLK_DATA(pwm0_clk, "pwm0-clk", pwm_parents, 0x23c,
+ 0, 2, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm1_clk, "pwm1-clk", pwm_parents, 0x240,
+ 0, 2, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm2_clk, "pwm2-clk", pwm_parents, 0x244,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aon_thm_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &clk_250k.hw },
+};
+static SPRD_MUX_CLK_DATA(aon_thm_clk, "aon-thm-clk", aon_thm_parents, 0x25c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data audif_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_38m4.hw },
+ { .hw = &twpll_51m2.hw },
+};
+static SPRD_MUX_CLK_DATA(audif_clk, "audif-clk", audif_parents, 0x264,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data cpu_dap_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(cpu_dap_clk, "cpu-dap-clk", cpu_dap_parents, 0x26c,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data cpu_ts_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(cpu_ts_clk, "cpu-ts-clk", cpu_ts_parents, 0x274,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data djtag_tck_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(djtag_tck_clk, "djtag-tck-clk", djtag_tck_parents, 0x28c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data emc_ref_parents[] = {
+ { .hw = &clk_6m5.hw },
+ { .hw = &clk_13m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(emc_ref_clk, "emc-ref-clk", emc_ref_parents, 0x29c,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data cssys_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &mpll2_675m.hw },
+};
+static SPRD_COMP_CLK_DATA(cssys_clk, "cssys-clk", cssys_parents, 0x2a0,
+ 0, 4, 8, 2, 0);
+
+static const struct clk_parent_data aon_pmu_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-4m" },
+};
+static SPRD_MUX_CLK_DATA(aon_pmu_clk, "aon-pmu-clk", aon_pmu_parents, 0x2a8,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data pmu_26m_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(pmu_26m_clk, "26m-pmu-clk", pmu_26m_parents, 0x2ac,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aon_tmr_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(aon_tmr_clk, "aon-tmr-clk", aon_tmr_parents, 0x2b0,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data power_cpu_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(power_cpu_clk, "power-cpu-clk", power_cpu_parents, 0x2c4,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data ap_axi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_axi, "ap-axi", ap_axi_parents, 0x2c8,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data sdio_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &rpll_390m.hw },
+ { .hw = &dpll1_400m.hw },
+ { .hw = &lpll_409m6.hw },
+};
+static SPRD_MUX_CLK_DATA(sdio0_2x, "sdio0-2x", sdio_parents, 0x2cc,
+ 0, 3, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio1_2x, "sdio1-2x", sdio_parents, 0x2d4,
+ 0, 3, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio2_2x, "sdio2-2x", sdio_parents, 0x2dc,
+ 0, 3, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(emmc_2x, "emmc-2x", sdio_parents, 0x2e4,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data dpu_parents[] = {
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(dpu_clk, "dpu", dpu_parents, 0x2f4,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data dpu_dpi_parents[] = {
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(dpu_dpi, "dpu-dpi", dpu_dpi_parents, 0x2f8,
+ 0, 2, 8, 4, 0);
+
+static const struct clk_parent_data otg_ref_parents[] = {
+ { .hw = &twpll_12m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(otg_ref_clk, "otg-ref-clk", otg_ref_parents, 0x308,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data sdphy_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_MUX_CLK_DATA(sdphy_apb_clk, "sdphy-apb-clk", sdphy_apb_parents, 0x330,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data alg_io_apb_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_MUX_CLK_DATA(alg_io_apb_clk, "alg-io-apb-clk", alg_io_apb_parents, 0x33c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data gpu_parents[] = {
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &gpll.common.hw },
+};
+static SPRD_COMP_CLK_DATA(gpu_core, "gpu-core", gpu_parents, 0x344,
+ 0, 3, 8, 2, 0);
+static SPRD_COMP_CLK_DATA(gpu_soc, "gpu-soc", gpu_parents, 0x348,
+ 0, 3, 8, 2, 0);
+
+static const struct clk_parent_data mm_emc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_emc, "mm-emc", mm_emc_parents, 0x350,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data mm_ahb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_ahb, "mm-ahb", mm_ahb_parents, 0x354,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data bpc_clk_parents[] = {
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &dpll0_622m3.hw },
+};
+static SPRD_MUX_CLK_DATA(bpc_clk, "bpc-clk", bpc_clk_parents, 0x358,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data dcam_if_parents[] = {
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(dcam_if_clk, "dcam-if-clk", dcam_if_parents, 0x35c,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data isp_parents[] = {
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(isp_clk, "isp-clk", isp_parents, 0x360,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data jpg_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+};
+static SPRD_MUX_CLK_DATA(jpg_clk, "jpg-clk", jpg_parents, 0x364,
+ 0, 2, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(cpp_clk, "cpp-clk", jpg_parents, 0x368,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data sensor_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(sensor0_clk, "sensor0-clk", sensor_parents, 0x36c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor1_clk, "sensor1-clk", sensor_parents, 0x370,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor2_clk, "sensor2-clk", sensor_parents, 0x374,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data mm_vemc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_vemc, "mm-vemc", mm_vemc_parents, 0x378,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static SPRD_MUX_CLK_DATA(mm_vahb, "mm-vahb", mm_ahb_parents, 0x37c,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data vsp_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(clk_vsp, "vsp-clk", vsp_parents, 0x380,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data core_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_512m.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &lpll.common.hw },
+ { .hw = &dpll0.common.hw },
+ { .hw = &mpll2.common.hw },
+ { .hw = &mpll0.common.hw },
+ { .hw = &mpll1.common.hw },
+};
+static SPRD_COMP_CLK_DATA(core0_clk, "core0-clk", core_parents, 0xa20,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core1_clk, "core1-clk", core_parents, 0xa24,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core2_clk, "core2-clk", core_parents, 0xa28,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core3_clk, "core3-clk", core_parents, 0xa2c,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core4_clk, "core4-clk", core_parents, 0xa30,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core5_clk, "core5-clk", core_parents, 0xa34,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core6_clk, "core6-clk", core_parents, 0xa38,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core7_clk, "core7-clk", core_parents, 0xa3c,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(scu_clk, "scu-clk", core_parents, 0xa40,
+ 0, 3, 8, 3, 0);
+
+static SPRD_DIV_CLK_HW(ace_clk, "ace-clk", &scu_clk.common.hw, 0xa44,
+ 8, 3, 0);
+static SPRD_DIV_CLK_HW(axi_periph_clk, "axi-periph-clk", &scu_clk.common.hw, 0xa48,
+ 8, 3, 0);
+static SPRD_DIV_CLK_HW(axi_acp_clk, "axi-acp-clk", &scu_clk.common.hw, 0xa4c,
+ 8, 3, 0);
+
+static const struct clk_parent_data atb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &mpll2.common.hw },
+};
+static SPRD_COMP_CLK_DATA(atb_clk, "atb-clk", atb_parents, 0xa50,
+ 0, 2, 8, 3, 0);
+static SPRD_DIV_CLK_HW(debug_apb_clk, "debug-apb-clk", &atb_clk.common.hw, 0xa54,
+ 8, 3, 0);
+
+static const struct clk_parent_data gic_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_COMP_CLK_DATA(gic_clk, "gic-clk", gic_parents, 0xa58,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(periph_clk, "periph-clk", gic_parents, 0xa5c,
+ 0, 2, 8, 3, 0);
+
+static struct sprd_clk_common *sc9863a_aon_clks[] = {
+ /* address base is 0x402d0000 */
+ &emc_clk.common,
+ &aon_apb.common,
+ &adi_clk.common,
+ &aux0_clk.common,
+ &aux1_clk.common,
+ &aux2_clk.common,
+ &probe_clk.common,
+ &pwm0_clk.common,
+ &pwm1_clk.common,
+ &pwm2_clk.common,
+ &aon_thm_clk.common,
+ &audif_clk.common,
+ &cpu_dap_clk.common,
+ &cpu_ts_clk.common,
+ &djtag_tck_clk.common,
+ &emc_ref_clk.common,
+ &cssys_clk.common,
+ &aon_pmu_clk.common,
+ &pmu_26m_clk.common,
+ &aon_tmr_clk.common,
+ &power_cpu_clk.common,
+ &ap_axi.common,
+ &sdio0_2x.common,
+ &sdio1_2x.common,
+ &sdio2_2x.common,
+ &emmc_2x.common,
+ &dpu_clk.common,
+ &dpu_dpi.common,
+ &otg_ref_clk.common,
+ &sdphy_apb_clk.common,
+ &alg_io_apb_clk.common,
+ &gpu_core.common,
+ &gpu_soc.common,
+ &mm_emc.common,
+ &mm_ahb.common,
+ &bpc_clk.common,
+ &dcam_if_clk.common,
+ &isp_clk.common,
+ &jpg_clk.common,
+ &cpp_clk.common,
+ &sensor0_clk.common,
+ &sensor1_clk.common,
+ &sensor2_clk.common,
+ &mm_vemc.common,
+ &mm_vahb.common,
+ &clk_vsp.common,
+ &core0_clk.common,
+ &core1_clk.common,
+ &core2_clk.common,
+ &core3_clk.common,
+ &core4_clk.common,
+ &core5_clk.common,
+ &core6_clk.common,
+ &core7_clk.common,
+ &scu_clk.common,
+ &ace_clk.common,
+ &axi_periph_clk.common,
+ &axi_acp_clk.common,
+ &atb_clk.common,
+ &debug_apb_clk.common,
+ &gic_clk.common,
+ &periph_clk.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_aon_clk_hws = {
+ .hws = {
+ [CLK_13M] = &clk_13m.hw,
+ [CLK_6M5] = &clk_6m5.hw,
+ [CLK_4M3] = &clk_4m3.hw,
+ [CLK_2M] = &clk_2m.hw,
+ [CLK_250K] = &clk_250k.hw,
+ [CLK_RCO_25M] = &rco_25m.hw,
+ [CLK_RCO_4M] = &rco_4m.hw,
+ [CLK_RCO_2M] = &rco_2m.hw,
+ [CLK_EMC] = &emc_clk.common.hw,
+ [CLK_AON_APB] = &aon_apb.common.hw,
+ [CLK_ADI] = &adi_clk.common.hw,
+ [CLK_AUX0] = &aux0_clk.common.hw,
+ [CLK_AUX1] = &aux1_clk.common.hw,
+ [CLK_AUX2] = &aux2_clk.common.hw,
+ [CLK_PROBE] = &probe_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_AON_THM] = &aon_thm_clk.common.hw,
+ [CLK_AUDIF] = &audif_clk.common.hw,
+ [CLK_CPU_DAP] = &cpu_dap_clk.common.hw,
+ [CLK_CPU_TS] = &cpu_ts_clk.common.hw,
+ [CLK_DJTAG_TCK] = &djtag_tck_clk.common.hw,
+ [CLK_EMC_REF] = &emc_ref_clk.common.hw,
+ [CLK_CSSYS] = &cssys_clk.common.hw,
+ [CLK_AON_PMU] = &aon_pmu_clk.common.hw,
+ [CLK_PMU_26M] = &pmu_26m_clk.common.hw,
+ [CLK_AON_TMR] = &aon_tmr_clk.common.hw,
+ [CLK_POWER_CPU] = &power_cpu_clk.common.hw,
+ [CLK_AP_AXI] = &ap_axi.common.hw,
+ [CLK_SDIO0_2X] = &sdio0_2x.common.hw,
+ [CLK_SDIO1_2X] = &sdio1_2x.common.hw,
+ [CLK_SDIO2_2X] = &sdio2_2x.common.hw,
+ [CLK_EMMC_2X] = &emmc_2x.common.hw,
+ [CLK_DPU] = &dpu_clk.common.hw,
+ [CLK_DPU_DPI] = &dpu_dpi.common.hw,
+ [CLK_OTG_REF] = &otg_ref_clk.common.hw,
+ [CLK_SDPHY_APB] = &sdphy_apb_clk.common.hw,
+ [CLK_ALG_IO_APB] = &alg_io_apb_clk.common.hw,
+ [CLK_GPU_CORE] = &gpu_core.common.hw,
+ [CLK_GPU_SOC] = &gpu_soc.common.hw,
+ [CLK_MM_EMC] = &mm_emc.common.hw,
+ [CLK_MM_AHB] = &mm_ahb.common.hw,
+ [CLK_BPC] = &bpc_clk.common.hw,
+ [CLK_DCAM_IF] = &dcam_if_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_CPP] = &cpp_clk.common.hw,
+ [CLK_SENSOR0] = &sensor0_clk.common.hw,
+ [CLK_SENSOR1] = &sensor1_clk.common.hw,
+ [CLK_SENSOR2] = &sensor2_clk.common.hw,
+ [CLK_MM_VEMC] = &mm_vemc.common.hw,
+ [CLK_MM_VAHB] = &mm_vahb.common.hw,
+ [CLK_VSP] = &clk_vsp.common.hw,
+ [CLK_CORE0] = &core0_clk.common.hw,
+ [CLK_CORE1] = &core1_clk.common.hw,
+ [CLK_CORE2] = &core2_clk.common.hw,
+ [CLK_CORE3] = &core3_clk.common.hw,
+ [CLK_CORE4] = &core4_clk.common.hw,
+ [CLK_CORE5] = &core5_clk.common.hw,
+ [CLK_CORE6] = &core6_clk.common.hw,
+ [CLK_CORE7] = &core7_clk.common.hw,
+ [CLK_SCU] = &scu_clk.common.hw,
+ [CLK_ACE] = &ace_clk.common.hw,
+ [CLK_AXI_PERIPH] = &axi_periph_clk.common.hw,
+ [CLK_AXI_ACP] = &axi_acp_clk.common.hw,
+ [CLK_ATB] = &atb_clk.common.hw,
+ [CLK_DEBUG_APB] = &debug_apb_clk.common.hw,
+ [CLK_GIC] = &gic_clk.common.hw,
+ [CLK_PERIPH] = &periph_clk.common.hw,
+ },
+ .num = CLK_AON_CLK_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_aon_clk_desc = {
+ .clk_clks = sc9863a_aon_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_aon_clks),
+ .hw_clks = &sc9863a_aon_clk_hws,
+};
+
+static const struct clk_parent_data ap_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_apb, "ap-apb", ap_apb_parents, 0x20,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data ap_ce_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_ce, "ap-ce", ap_ce_parents, 0x24,
+ 0, 1, 8, 3, 0);
+
+static const struct clk_parent_data nandc_ecc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+};
+static SPRD_COMP_CLK_DATA(nandc_ecc, "nandc-ecc", nandc_ecc_parents, 0x28,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data nandc_26m_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(nandc_26m, "nandc-26m", nandc_26m_parents, 0x2c,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(emmc_32k, "emmc-32k", nandc_26m_parents, 0x30,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio0_32k, "sdio0-32k", nandc_26m_parents, 0x34,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio1_32k, "sdio1-32k", nandc_26m_parents, 0x38,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio2_32k, "sdio2-32k", nandc_26m_parents, 0x3c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static SPRD_GATE_CLK_HW(otg_utmi, "otg-utmi", &aon_apb.common.hw, 0x40,
+ BIT(16), 0, 0);
+
+static const struct clk_parent_data ap_uart_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_uart0, "ap-uart0", ap_uart_parents, 0x44,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart1, "ap-uart1", ap_uart_parents, 0x48,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart2, "ap-uart2", ap_uart_parents, 0x4c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart3, "ap-uart3", ap_uart_parents, 0x50,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart4, "ap-uart4", ap_uart_parents, 0x54,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data i2c_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_i2c0, "ap-i2c0", i2c_parents, 0x58,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c1, "ap-i2c1", i2c_parents, 0x5c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c2, "ap-i2c2", i2c_parents, 0x60,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c3, "ap-i2c3", i2c_parents, 0x64,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c4, "ap-i2c4", i2c_parents, 0x68,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c5, "ap-i2c5", i2c_parents, 0x6c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c6, "ap-i2c6", i2c_parents, 0x70,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_spi0, "ap-spi0", spi_parents, 0x74,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi1, "ap-spi1", spi_parents, 0x78,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi2, "ap-spi2", spi_parents, 0x7c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi3, "ap-spi3", spi_parents, 0x80,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data iis_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_iis0, "ap-iis0", iis_parents, 0x84,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis1, "ap-iis1", iis_parents, 0x88,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis2, "ap-iis2", iis_parents, 0x8c,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data sim0_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(sim0, "sim0", sim0_parents, 0x90,
+ 0, 3, 8, 3, 0);
+
+static const struct clk_parent_data sim0_32k_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(sim0_32k, "sim0-32k", sim0_32k_parents, 0x94,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static struct sprd_clk_common *sc9863a_ap_clks[] = {
+ /* address base is 0x21500000 */
+ &ap_apb.common,
+ &ap_ce.common,
+ &nandc_ecc.common,
+ &nandc_26m.common,
+ &emmc_32k.common,
+ &sdio0_32k.common,
+ &sdio1_32k.common,
+ &sdio2_32k.common,
+ &otg_utmi.common,
+ &ap_uart0.common,
+ &ap_uart1.common,
+ &ap_uart2.common,
+ &ap_uart3.common,
+ &ap_uart4.common,
+ &ap_i2c0.common,
+ &ap_i2c1.common,
+ &ap_i2c2.common,
+ &ap_i2c3.common,
+ &ap_i2c4.common,
+ &ap_i2c5.common,
+ &ap_i2c6.common,
+ &ap_spi0.common,
+ &ap_spi1.common,
+ &ap_spi2.common,
+ &ap_spi3.common,
+ &ap_iis0.common,
+ &ap_iis1.common,
+ &ap_iis2.common,
+ &sim0.common,
+ &sim0_32k.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_ap_clk_hws = {
+ .hws = {
+ [CLK_AP_APB] = &ap_apb.common.hw,
+ [CLK_AP_CE] = &ap_ce.common.hw,
+ [CLK_NANDC_ECC] = &nandc_ecc.common.hw,
+ [CLK_NANDC_26M] = &nandc_26m.common.hw,
+ [CLK_EMMC_32K] = &emmc_32k.common.hw,
+ [CLK_SDIO0_32K] = &sdio0_32k.common.hw,
+ [CLK_SDIO1_32K] = &sdio1_32k.common.hw,
+ [CLK_SDIO2_32K] = &sdio2_32k.common.hw,
+ [CLK_OTG_UTMI] = &otg_utmi.common.hw,
+ [CLK_AP_UART0] = &ap_uart0.common.hw,
+ [CLK_AP_UART1] = &ap_uart1.common.hw,
+ [CLK_AP_UART2] = &ap_uart2.common.hw,
+ [CLK_AP_UART3] = &ap_uart3.common.hw,
+ [CLK_AP_UART4] = &ap_uart4.common.hw,
+ [CLK_AP_I2C0] = &ap_i2c0.common.hw,
+ [CLK_AP_I2C1] = &ap_i2c1.common.hw,
+ [CLK_AP_I2C2] = &ap_i2c2.common.hw,
+ [CLK_AP_I2C3] = &ap_i2c3.common.hw,
+ [CLK_AP_I2C4] = &ap_i2c4.common.hw,
+ [CLK_AP_I2C5] = &ap_i2c5.common.hw,
+ [CLK_AP_I2C6] = &ap_i2c6.common.hw,
+ [CLK_AP_SPI0] = &ap_spi0.common.hw,
+ [CLK_AP_SPI1] = &ap_spi1.common.hw,
+ [CLK_AP_SPI2] = &ap_spi2.common.hw,
+ [CLK_AP_SPI3] = &ap_spi3.common.hw,
+ [CLK_AP_IIS0] = &ap_iis0.common.hw,
+ [CLK_AP_IIS1] = &ap_iis1.common.hw,
+ [CLK_AP_IIS2] = &ap_iis2.common.hw,
+ [CLK_SIM0] = &sim0.common.hw,
+ [CLK_SIM0_32K] = &sim0_32k.common.hw,
+ },
+ .num = CLK_AP_CLK_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_ap_clk_desc = {
+ .clk_clks = sc9863a_ap_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_ap_clks),
+ .hw_clks = &sc9863a_ap_clk_hws,
+};
+
+static SPRD_SC_GATE_CLK_HW(otg_eb, "otg-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dma_eb, "dma-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ce_eb, "ce-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(nandc_eb, "nandc-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio0_eb, "sdio0-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio1_eb, "sdio1-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio2_eb, "sdio2-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(emmc_eb, "emmc-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(emmc_32k_eb, "emmc-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio0_32k_eb, "sdio0-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio1_32k_eb, "sdio1-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(29), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio2_32k_eb, "sdio2-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(30), 0, 0);
+static SPRD_SC_GATE_CLK_HW(nandc_26m_eb, "nandc-26m-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(31), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dma_eb2, "dma-eb2", &ap_axi.common.hw, 0x18,
+ 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ce_eb2, "ce-eb2", &ap_axi.common.hw, 0x18,
+ 0x1000, BIT(1), 0, 0);
+
+static struct sprd_clk_common *sc9863a_apahb_gate_clks[] = {
+ /* address base is 0x20e00000 */
+ &otg_eb.common,
+ &dma_eb.common,
+ &ce_eb.common,
+ &nandc_eb.common,
+ &sdio0_eb.common,
+ &sdio1_eb.common,
+ &sdio2_eb.common,
+ &emmc_eb.common,
+ &emmc_32k_eb.common,
+ &sdio0_32k_eb.common,
+ &sdio1_32k_eb.common,
+ &sdio2_32k_eb.common,
+ &nandc_26m_eb.common,
+ &dma_eb2.common,
+ &ce_eb2.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_apahb_gate_hws = {
+ .hws = {
+ [CLK_OTG_EB] = &otg_eb.common.hw,
+ [CLK_DMA_EB] = &dma_eb.common.hw,
+ [CLK_CE_EB] = &ce_eb.common.hw,
+ [CLK_NANDC_EB] = &nandc_eb.common.hw,
+ [CLK_SDIO0_EB] = &sdio0_eb.common.hw,
+ [CLK_SDIO1_EB] = &sdio1_eb.common.hw,
+ [CLK_SDIO2_EB] = &sdio2_eb.common.hw,
+ [CLK_EMMC_EB] = &emmc_eb.common.hw,
+ [CLK_EMMC_32K_EB] = &emmc_32k_eb.common.hw,
+ [CLK_SDIO0_32K_EB] = &sdio0_32k_eb.common.hw,
+ [CLK_SDIO1_32K_EB] = &sdio1_32k_eb.common.hw,
+ [CLK_SDIO2_32K_EB] = &sdio2_32k_eb.common.hw,
+ [CLK_NANDC_26M_EB] = &nandc_26m_eb.common.hw,
+ [CLK_DMA_EB2] = &dma_eb2.common.hw,
+ [CLK_CE_EB2] = &ce_eb2.common.hw,
+ },
+ .num = CLK_AP_AHB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_apahb_gate_desc = {
+ .clk_clks = sc9863a_apahb_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_apahb_gate_clks),
+ .hw_clks = &sc9863a_apahb_gate_hws,
+};
+
+/* aon gate clocks */
+static SPRD_SC_GATE_CLK_HW(gpio_eb, "gpio-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pwm0_eb, "pwm0-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pwm1_eb, "pwm1-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(pwm2_eb, "pwm2-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pwm3_eb, "pwm3-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(kpd_eb, "kpd-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_syst_eb, "aon-syst-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_syst_eb, "ap-syst-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aon_tmr_eb, "aon-tmr-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(efuse_eb, "efuse-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(13), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(eic_eb, "eic-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(intc_eb, "intc-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(adi_eb, "adi-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(audif_eb, "audif-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aud_eb, "aud-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_HW(vbc_eb, "vbc-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pin_eb, "pin-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_wdg_eb, "ap-wdg-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(24), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_eb, "mm-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(25), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aon_apb_ckg_eb, "aon-apb-ckg-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(26), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_ts0_eb, "ca53-ts0-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(28), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_ts1_eb, "ca53-ts1-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(29), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_dap_eb, "ca53-dap-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(30), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(pmu_eb, "pmu-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(thm_eb, "thm-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aux0_eb, "aux0-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aux1_eb, "aux1-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aux2_eb, "aux2-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(probe_eb, "probe-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(emc_ref_eb, "emc-ref-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_wdg_eb, "ca53-wdg-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr1_eb, "ap-tmr1-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr2_eb, "ap-tmr2-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(disp_emc_eb, "disp-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(zip_emc_eb, "zip-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gsp_emc_eb, "gsp-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_vsp_eb, "mm-vsp-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mdar_eb, "mdar-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_HW(rtc4m0_cal_eb, "rtc4m0-cal-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_HW(rtc4m1_cal_eb, "rtc4m1-cal-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_HW(djtag_eb, "djtag-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(20), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mbox_eb, "mbox-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(21), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_dma_eb, "aon-dma-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(22), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_apb_def_eb, "aon-apb-def-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ca5_ts0_eb, "ca5-ts0-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dbg_eb, "dbg-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dbg_emc_eb, "dbg-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(29), 0, 0);
+static SPRD_SC_GATE_CLK_HW(cross_trig_eb, "cross-trig-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(30), 0, 0);
+static SPRD_SC_GATE_CLK_HW(serdes_dphy_eb, "serdes-dphy-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(31), 0, 0);
+static SPRD_SC_GATE_CLK_HW(arch_rtc_eb, "arch-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(kpd_rtc_eb, "kpd-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_syst_rtc_eb, "aon-syst-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_syst_rtc_eb, "ap-syst-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aon_tmr_rtc_eb, "aon-tmr-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr0_rtc_eb, "ap-tmr0-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(eic_rtc_eb, "eic-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(eic_rtcdv5_eb, "eic-rtcdv5-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_wdg_rtc_eb, "ap-wdg-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_wdg_rtc_eb, "ca53-wdg-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(thm_rtc_eb, "thm-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(athma_rtc_eb, "athma-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gthma_rtc_eb, "gthma-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_HW(athma_rtc_a_eb, "athma-rtc-a-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gthma_rtc_a_eb, "gthma-rtc-a-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr1_rtc_eb, "ap-tmr1-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(15), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr2_rtc_eb, "ap-tmr2-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(16), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dxco_lc_rtc_eb, "dxco-lc-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_HW(bb_cal_rtc_eb, "bb-cal-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gpu_eb, "gpu-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(disp_eb, "disp-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_emc_eb, "mm-emc-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(power_cpu_eb, "power-cpu-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(hw_i2c_eb, "hw-i2c-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_vsp_emc_eb, "mm-vsp-emc-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(vsp_eb, "vsp-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(16), 0, 0);
+static SPRD_SC_GATE_CLK_HW(cssys_eb, "cssys-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dmc_eb, "dmc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(rosc_eb, "rosc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(s_d_cfg_eb, "s-d-cfg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(s_d_ref_eb, "s-d-ref-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(b_dma_eb, "b-dma-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(anlg_eb, "anlg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(anlg_apb_eb, "anlg-apb-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_HW(bsmtmr_eb, "bsmtmr-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_axi_eb, "ap-axi-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc0_eb, "ap-intc0-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc1_eb, "ap-intc1-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc2_eb, "ap-intc2-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc3_eb, "ap-intc3-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(19), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc4_eb, "ap-intc4-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc5_eb, "ap-intc5-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(21), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(scc_eb, "scc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(22), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dphy_cfg_eb, "dphy-cfg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(23), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dphy_ref_eb, "dphy-ref-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(24), 0, 0);
+static SPRD_SC_GATE_CLK_HW(cphy_cfg_eb, "cphy-cfg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_HW(otg_ref_eb, "otg-ref-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_HW(serdes_eb, "serdes-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_ap_emc_eb, "aon-ap-emc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(28), 0, 0);
+static struct sprd_clk_common *sc9863a_aonapb_gate_clks[] = {
+ /* address base is 0x402e0000 */
+ &gpio_eb.common,
+ &pwm0_eb.common,
+ &pwm1_eb.common,
+ &pwm2_eb.common,
+ &pwm3_eb.common,
+ &kpd_eb.common,
+ &aon_syst_eb.common,
+ &ap_syst_eb.common,
+ &aon_tmr_eb.common,
+ &efuse_eb.common,
+ &eic_eb.common,
+ &intc_eb.common,
+ &adi_eb.common,
+ &audif_eb.common,
+ &aud_eb.common,
+ &vbc_eb.common,
+ &pin_eb.common,
+ &ap_wdg_eb.common,
+ &mm_eb.common,
+ &aon_apb_ckg_eb.common,
+ &ca53_ts0_eb.common,
+ &ca53_ts1_eb.common,
+ &ca53_dap_eb.common,
+ &pmu_eb.common,
+ &thm_eb.common,
+ &aux0_eb.common,
+ &aux1_eb.common,
+ &aux2_eb.common,
+ &probe_eb.common,
+ &emc_ref_eb.common,
+ &ca53_wdg_eb.common,
+ &ap_tmr1_eb.common,
+ &ap_tmr2_eb.common,
+ &disp_emc_eb.common,
+ &zip_emc_eb.common,
+ &gsp_emc_eb.common,
+ &mm_vsp_eb.common,
+ &mdar_eb.common,
+ &rtc4m0_cal_eb.common,
+ &rtc4m1_cal_eb.common,
+ &djtag_eb.common,
+ &mbox_eb.common,
+ &aon_dma_eb.common,
+ &aon_apb_def_eb.common,
+ &ca5_ts0_eb.common,
+ &dbg_eb.common,
+ &dbg_emc_eb.common,
+ &cross_trig_eb.common,
+ &serdes_dphy_eb.common,
+ &arch_rtc_eb.common,
+ &kpd_rtc_eb.common,
+ &aon_syst_rtc_eb.common,
+ &ap_syst_rtc_eb.common,
+ &aon_tmr_rtc_eb.common,
+ &ap_tmr0_rtc_eb.common,
+ &eic_rtc_eb.common,
+ &eic_rtcdv5_eb.common,
+ &ap_wdg_rtc_eb.common,
+ &ca53_wdg_rtc_eb.common,
+ &thm_rtc_eb.common,
+ &athma_rtc_eb.common,
+ &gthma_rtc_eb.common,
+ &athma_rtc_a_eb.common,
+ &gthma_rtc_a_eb.common,
+ &ap_tmr1_rtc_eb.common,
+ &ap_tmr2_rtc_eb.common,
+ &dxco_lc_rtc_eb.common,
+ &bb_cal_rtc_eb.common,
+ &gpu_eb.common,
+ &disp_eb.common,
+ &mm_emc_eb.common,
+ &power_cpu_eb.common,
+ &hw_i2c_eb.common,
+ &mm_vsp_emc_eb.common,
+ &vsp_eb.common,
+ &cssys_eb.common,
+ &dmc_eb.common,
+ &rosc_eb.common,
+ &s_d_cfg_eb.common,
+ &s_d_ref_eb.common,
+ &b_dma_eb.common,
+ &anlg_eb.common,
+ &anlg_apb_eb.common,
+ &bsmtmr_eb.common,
+ &ap_axi_eb.common,
+ &ap_intc0_eb.common,
+ &ap_intc1_eb.common,
+ &ap_intc2_eb.common,
+ &ap_intc3_eb.common,
+ &ap_intc4_eb.common,
+ &ap_intc5_eb.common,
+ &scc_eb.common,
+ &dphy_cfg_eb.common,
+ &dphy_ref_eb.common,
+ &cphy_cfg_eb.common,
+ &otg_ref_eb.common,
+ &serdes_eb.common,
+ &aon_ap_emc_eb.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_aonapb_gate_hws = {
+ .hws = {
+ [CLK_GPIO_EB] = &gpio_eb.common.hw,
+ [CLK_PWM0_EB] = &pwm0_eb.common.hw,
+ [CLK_PWM1_EB] = &pwm1_eb.common.hw,
+ [CLK_PWM2_EB] = &pwm2_eb.common.hw,
+ [CLK_PWM3_EB] = &pwm3_eb.common.hw,
+ [CLK_KPD_EB] = &kpd_eb.common.hw,
+ [CLK_AON_SYST_EB] = &aon_syst_eb.common.hw,
+ [CLK_AP_SYST_EB] = &ap_syst_eb.common.hw,
+ [CLK_AON_TMR_EB] = &aon_tmr_eb.common.hw,
+ [CLK_EFUSE_EB] = &efuse_eb.common.hw,
+ [CLK_EIC_EB] = &eic_eb.common.hw,
+ [CLK_INTC_EB] = &intc_eb.common.hw,
+ [CLK_ADI_EB] = &adi_eb.common.hw,
+ [CLK_AUDIF_EB] = &audif_eb.common.hw,
+ [CLK_AUD_EB] = &aud_eb.common.hw,
+ [CLK_VBC_EB] = &vbc_eb.common.hw,
+ [CLK_PIN_EB] = &pin_eb.common.hw,
+ [CLK_AP_WDG_EB] = &ap_wdg_eb.common.hw,
+ [CLK_MM_EB] = &mm_eb.common.hw,
+ [CLK_AON_APB_CKG_EB] = &aon_apb_ckg_eb.common.hw,
+ [CLK_CA53_TS0_EB] = &ca53_ts0_eb.common.hw,
+ [CLK_CA53_TS1_EB] = &ca53_ts1_eb.common.hw,
+ [CLK_CS53_DAP_EB] = &ca53_dap_eb.common.hw,
+ [CLK_PMU_EB] = &pmu_eb.common.hw,
+ [CLK_THM_EB] = &thm_eb.common.hw,
+ [CLK_AUX0_EB] = &aux0_eb.common.hw,
+ [CLK_AUX1_EB] = &aux1_eb.common.hw,
+ [CLK_AUX2_EB] = &aux2_eb.common.hw,
+ [CLK_PROBE_EB] = &probe_eb.common.hw,
+ [CLK_EMC_REF_EB] = &emc_ref_eb.common.hw,
+ [CLK_CA53_WDG_EB] = &ca53_wdg_eb.common.hw,
+ [CLK_AP_TMR1_EB] = &ap_tmr1_eb.common.hw,
+ [CLK_AP_TMR2_EB] = &ap_tmr2_eb.common.hw,
+ [CLK_DISP_EMC_EB] = &disp_emc_eb.common.hw,
+ [CLK_ZIP_EMC_EB] = &zip_emc_eb.common.hw,
+ [CLK_GSP_EMC_EB] = &gsp_emc_eb.common.hw,
+ [CLK_MM_VSP_EB] = &mm_vsp_eb.common.hw,
+ [CLK_MDAR_EB] = &mdar_eb.common.hw,
+ [CLK_RTC4M0_CAL_EB] = &rtc4m0_cal_eb.common.hw,
+ [CLK_RTC4M1_CAL_EB] = &rtc4m1_cal_eb.common.hw,
+ [CLK_DJTAG_EB] = &djtag_eb.common.hw,
+ [CLK_MBOX_EB] = &mbox_eb.common.hw,
+ [CLK_AON_DMA_EB] = &aon_dma_eb.common.hw,
+ [CLK_AON_APB_DEF_EB] = &aon_apb_def_eb.common.hw,
+ [CLK_CA5_TS0_EB] = &ca5_ts0_eb.common.hw,
+ [CLK_DBG_EB] = &dbg_eb.common.hw,
+ [CLK_DBG_EMC_EB] = &dbg_emc_eb.common.hw,
+ [CLK_CROSS_TRIG_EB] = &cross_trig_eb.common.hw,
+ [CLK_SERDES_DPHY_EB] = &serdes_dphy_eb.common.hw,
+ [CLK_ARCH_RTC_EB] = &arch_rtc_eb.common.hw,
+ [CLK_KPD_RTC_EB] = &kpd_rtc_eb.common.hw,
+ [CLK_AON_SYST_RTC_EB] = &aon_syst_rtc_eb.common.hw,
+ [CLK_AP_SYST_RTC_EB] = &ap_syst_rtc_eb.common.hw,
+ [CLK_AON_TMR_RTC_EB] = &aon_tmr_rtc_eb.common.hw,
+ [CLK_AP_TMR0_RTC_EB] = &ap_tmr0_rtc_eb.common.hw,
+ [CLK_EIC_RTC_EB] = &eic_rtc_eb.common.hw,
+ [CLK_EIC_RTCDV5_EB] = &eic_rtcdv5_eb.common.hw,
+ [CLK_AP_WDG_RTC_EB] = &ap_wdg_rtc_eb.common.hw,
+ [CLK_CA53_WDG_RTC_EB] = &ca53_wdg_rtc_eb.common.hw,
+ [CLK_THM_RTC_EB] = &thm_rtc_eb.common.hw,
+ [CLK_ATHMA_RTC_EB] = &athma_rtc_eb.common.hw,
+ [CLK_GTHMA_RTC_EB] = &gthma_rtc_eb.common.hw,
+ [CLK_ATHMA_RTC_A_EB] = &athma_rtc_a_eb.common.hw,
+ [CLK_GTHMA_RTC_A_EB] = &gthma_rtc_a_eb.common.hw,
+ [CLK_AP_TMR1_RTC_EB] = &ap_tmr1_rtc_eb.common.hw,
+ [CLK_AP_TMR2_RTC_EB] = &ap_tmr2_rtc_eb.common.hw,
+ [CLK_DXCO_LC_RTC_EB] = &dxco_lc_rtc_eb.common.hw,
+ [CLK_BB_CAL_RTC_EB] = &bb_cal_rtc_eb.common.hw,
+ [CLK_GNU_EB] = &gpu_eb.common.hw,
+ [CLK_DISP_EB] = &disp_eb.common.hw,
+ [CLK_MM_EMC_EB] = &mm_emc_eb.common.hw,
+ [CLK_POWER_CPU_EB] = &power_cpu_eb.common.hw,
+ [CLK_HW_I2C_EB] = &hw_i2c_eb.common.hw,
+ [CLK_MM_VSP_EMC_EB] = &mm_vsp_emc_eb.common.hw,
+ [CLK_VSP_EB] = &vsp_eb.common.hw,
+ [CLK_CSSYS_EB] = &cssys_eb.common.hw,
+ [CLK_DMC_EB] = &dmc_eb.common.hw,
+ [CLK_ROSC_EB] = &rosc_eb.common.hw,
+ [CLK_S_D_CFG_EB] = &s_d_cfg_eb.common.hw,
+ [CLK_S_D_REF_EB] = &s_d_ref_eb.common.hw,
+ [CLK_B_DMA_EB] = &b_dma_eb.common.hw,
+ [CLK_ANLG_EB] = &anlg_eb.common.hw,
+ [CLK_ANLG_APB_EB] = &anlg_apb_eb.common.hw,
+ [CLK_BSMTMR_EB] = &bsmtmr_eb.common.hw,
+ [CLK_AP_AXI_EB] = &ap_axi_eb.common.hw,
+ [CLK_AP_INTC0_EB] = &ap_intc0_eb.common.hw,
+ [CLK_AP_INTC1_EB] = &ap_intc1_eb.common.hw,
+ [CLK_AP_INTC2_EB] = &ap_intc2_eb.common.hw,
+ [CLK_AP_INTC3_EB] = &ap_intc3_eb.common.hw,
+ [CLK_AP_INTC4_EB] = &ap_intc4_eb.common.hw,
+ [CLK_AP_INTC5_EB] = &ap_intc5_eb.common.hw,
+ [CLK_SCC_EB] = &scc_eb.common.hw,
+ [CLK_DPHY_CFG_EB] = &dphy_cfg_eb.common.hw,
+ [CLK_DPHY_REF_EB] = &dphy_ref_eb.common.hw,
+ [CLK_CPHY_CFG_EB] = &cphy_cfg_eb.common.hw,
+ [CLK_OTG_REF_EB] = &otg_ref_eb.common.hw,
+ [CLK_SERDES_EB] = &serdes_eb.common.hw,
+ [CLK_AON_AP_EMC_EB] = &aon_ap_emc_eb.common.hw,
+ },
+ .num = CLK_AON_APB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_aonapb_gate_desc = {
+ .clk_clks = sc9863a_aonapb_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_aonapb_gate_clks),
+ .hw_clks = &sc9863a_aonapb_gate_hws,
+};
+
+/* mm gate clocks */
+static SPRD_SC_GATE_CLK_HW(mahb_ckg_eb, "mahb-ckg-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mdcam_eb, "mdcam-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(misp_eb, "misp-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mahbcsi_eb, "mahbcsi-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mcsi_s_eb, "mcsi-s-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mcsi_t_eb, "mcsi-t-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(5), 0, 0);
+static SPRD_GATE_CLK_HW(dcam_axi_eb, "dcam-axi-eb", &mm_ahb.common.hw, 0x8,
+ BIT(0), 0, 0);
+static SPRD_GATE_CLK_HW(isp_axi_eb, "isp-axi-eb", &mm_ahb.common.hw, 0x8,
+ BIT(1), 0, 0);
+static SPRD_GATE_CLK_HW(mcsi_eb, "mcsi-eb", &mm_ahb.common.hw, 0x8,
+ BIT(2), 0, 0);
+static SPRD_GATE_CLK_HW(mcsi_s_ckg_eb, "mcsi-s-ckg-eb", &mm_ahb.common.hw, 0x8,
+ BIT(3), 0, 0);
+static SPRD_GATE_CLK_HW(mcsi_t_ckg_eb, "mcsi-t-ckg-eb", &mm_ahb.common.hw, 0x8,
+ BIT(4), 0, 0);
+static SPRD_GATE_CLK_HW(sensor0_eb, "sensor0-eb", &mm_ahb.common.hw, 0x8,
+ BIT(5), 0, 0);
+static SPRD_GATE_CLK_HW(sensor1_eb, "sensor1-eb", &mm_ahb.common.hw, 0x8,
+ BIT(6), 0, 0);
+static SPRD_GATE_CLK_HW(sensor2_eb, "sensor2-eb", &mm_ahb.common.hw, 0x8,
+ BIT(7), 0, 0);
+static SPRD_GATE_CLK_HW(mcphy_cfg_eb, "mcphy-cfg-eb", &mm_ahb.common.hw, 0x8,
+ BIT(8), 0, 0);
+
+static struct sprd_clk_common *sc9863a_mm_gate_clks[] = {
+ /* address base is 0x60800000 */
+ &mahb_ckg_eb.common,
+ &mdcam_eb.common,
+ &misp_eb.common,
+ &mahbcsi_eb.common,
+ &mcsi_s_eb.common,
+ &mcsi_t_eb.common,
+ &dcam_axi_eb.common,
+ &isp_axi_eb.common,
+ &mcsi_eb.common,
+ &mcsi_s_ckg_eb.common,
+ &mcsi_t_ckg_eb.common,
+ &sensor0_eb.common,
+ &sensor1_eb.common,
+ &sensor2_eb.common,
+ &mcphy_cfg_eb.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_mm_gate_hws = {
+ .hws = {
+ [CLK_MAHB_CKG_EB] = &mahb_ckg_eb.common.hw,
+ [CLK_MDCAM_EB] = &mdcam_eb.common.hw,
+ [CLK_MISP_EB] = &misp_eb.common.hw,
+ [CLK_MAHBCSI_EB] = &mahbcsi_eb.common.hw,
+ [CLK_MCSI_S_EB] = &mcsi_s_eb.common.hw,
+ [CLK_MCSI_T_EB] = &mcsi_t_eb.common.hw,
+ [CLK_DCAM_AXI_EB] = &dcam_axi_eb.common.hw,
+ [CLK_ISP_AXI_EB] = &isp_axi_eb.common.hw,
+ [CLK_MCSI_EB] = &mcsi_eb.common.hw,
+ [CLK_MCSI_S_CKG_EB] = &mcsi_s_ckg_eb.common.hw,
+ [CLK_MCSI_T_CKG_EB] = &mcsi_t_ckg_eb.common.hw,
+ [CLK_SENSOR0_EB] = &sensor0_eb.common.hw,
+ [CLK_SENSOR1_EB] = &sensor1_eb.common.hw,
+ [CLK_SENSOR2_EB] = &sensor2_eb.common.hw,
+ [CLK_MCPHY_CFG_EB] = &mcphy_cfg_eb.common.hw,
+ },
+ .num = CLK_MM_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_mm_gate_desc = {
+ .clk_clks = sc9863a_mm_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_mm_gate_clks),
+ .hw_clks = &sc9863a_mm_gate_hws,
+};
+
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis1_eb, "iis1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis2_eb, "iis2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi0_eb, "spi0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi1_eb, "spi1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi2_eb, "spi2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c0_eb, "i2c0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c1_eb, "i2c1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c2_eb, "i2c2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c3_eb, "i2c3-eb", "ext-26m", 0x0,
+ 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c4_eb, "i2c4-eb", "ext-26m", 0x0,
+ 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart0_eb, "uart0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(13), 0, 0);
+/* uart1_eb is for console, don't gate even if unused */
+static SPRD_SC_GATE_CLK_FW_NAME(uart1_eb, "uart1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart2_eb, "uart2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(15), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart3_eb, "uart3-eb", "ext-26m", 0x0,
+ 0x1000, BIT(16), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart4_eb, "uart4-eb", "ext-26m", 0x0,
+ 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_32k_eb, "sim0_32k-eb", "ext-26m", 0x0,
+ 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi3_eb, "spi3-eb", "ext-26m", 0x0,
+ 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c5_eb, "i2c5-eb", "ext-26m", 0x0,
+ 0x1000, BIT(20), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c6_eb, "i2c6-eb", "ext-26m", 0x0,
+ 0x1000, BIT(21), 0, 0);
+
+static struct sprd_clk_common *sc9863a_apapb_gate[] = {
+ /* address base is 0x71300000 */
+ &sim0_eb.common,
+ &iis0_eb.common,
+ &iis1_eb.common,
+ &iis2_eb.common,
+ &spi0_eb.common,
+ &spi1_eb.common,
+ &spi2_eb.common,
+ &i2c0_eb.common,
+ &i2c1_eb.common,
+ &i2c2_eb.common,
+ &i2c3_eb.common,
+ &i2c4_eb.common,
+ &uart0_eb.common,
+ &uart1_eb.common,
+ &uart2_eb.common,
+ &uart3_eb.common,
+ &uart4_eb.common,
+ &sim0_32k_eb.common,
+ &spi3_eb.common,
+ &i2c5_eb.common,
+ &i2c6_eb.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_apapb_gate_hws = {
+ .hws = {
+ [CLK_SIM0_EB] = &sim0_eb.common.hw,
+ [CLK_IIS0_EB] = &iis0_eb.common.hw,
+ [CLK_IIS1_EB] = &iis1_eb.common.hw,
+ [CLK_IIS2_EB] = &iis2_eb.common.hw,
+ [CLK_SPI0_EB] = &spi0_eb.common.hw,
+ [CLK_SPI1_EB] = &spi1_eb.common.hw,
+ [CLK_SPI2_EB] = &spi2_eb.common.hw,
+ [CLK_I2C0_EB] = &i2c0_eb.common.hw,
+ [CLK_I2C1_EB] = &i2c1_eb.common.hw,
+ [CLK_I2C2_EB] = &i2c2_eb.common.hw,
+ [CLK_I2C3_EB] = &i2c3_eb.common.hw,
+ [CLK_I2C4_EB] = &i2c4_eb.common.hw,
+ [CLK_UART0_EB] = &uart0_eb.common.hw,
+ [CLK_UART1_EB] = &uart1_eb.common.hw,
+ [CLK_UART2_EB] = &uart2_eb.common.hw,
+ [CLK_UART3_EB] = &uart3_eb.common.hw,
+ [CLK_UART4_EB] = &uart4_eb.common.hw,
+ [CLK_SIM0_32K_EB] = &sim0_32k_eb.common.hw,
+ [CLK_SPI3_EB] = &spi3_eb.common.hw,
+ [CLK_I2C5_EB] = &i2c5_eb.common.hw,
+ [CLK_I2C6_EB] = &i2c6_eb.common.hw,
+ },
+ .num = CLK_AP_APB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_apapb_gate_desc = {
+ .clk_clks = sc9863a_apapb_gate,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_apapb_gate),
+ .hw_clks = &sc9863a_apapb_gate_hws,
+};
+
+static const struct of_device_id sprd_sc9863a_clk_ids[] = {
+ { .compatible = "sprd,sc9863a-ap-clk", /* 0x21500000 */
+ .data = &sc9863a_ap_clk_desc },
+ { .compatible = "sprd,sc9863a-pmu-gate", /* 0x402b0000 */
+ .data = &sc9863a_pmu_gate_desc },
+ { .compatible = "sprd,sc9863a-pll", /* 0x40353000 */
+ .data = &sc9863a_pll_desc },
+ { .compatible = "sprd,sc9863a-mpll", /* 0x40359000 */
+ .data = &sc9863a_mpll_desc },
+ { .compatible = "sprd,sc9863a-rpll", /* 0x4035c000 */
+ .data = &sc9863a_rpll_desc },
+ { .compatible = "sprd,sc9863a-dpll", /* 0x40363000 */
+ .data = &sc9863a_dpll_desc },
+ { .compatible = "sprd,sc9863a-aon-clk", /* 0x402d0000 */
+ .data = &sc9863a_aon_clk_desc },
+ { .compatible = "sprd,sc9863a-apahb-gate", /* 0x20e00000 */
+ .data = &sc9863a_apahb_gate_desc },
+ { .compatible = "sprd,sc9863a-aonapb-gate", /* 0x402e0000 */
+ .data = &sc9863a_aonapb_gate_desc },
+ { .compatible = "sprd,sc9863a-mm-gate", /* 0x60800000 */
+ .data = &sc9863a_mm_gate_desc },
+ { .compatible = "sprd,sc9863a-apapb-gate", /* 0x71300000 */
+ .data = &sc9863a_apapb_gate_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sprd_sc9863a_clk_ids);
+
+static int sc9863a_clk_probe(struct platform_device *pdev)
+{
+ const struct sprd_clk_desc *desc;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ ret = sprd_clk_regmap_init(pdev, desc);
+ if (ret)
+ return ret;
+
+ return sprd_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static struct platform_driver sc9863a_clk_driver = {
+ .probe = sc9863a_clk_probe,
+ .driver = {
+ .name = "sc9863a-clk",
+ .of_match_table = sprd_sc9863a_clk_ids,
+ },
+};
+module_platform_driver(sc9863a_clk_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC9863A Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 116e6f826d04..54d1f96f4b68 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -55,10 +55,6 @@
/* All the DRAM gates are exported */
-/* Some more module clocks are exported */
-
-#define CLK_MBUS 112
-
/* And the DSI and GPU module clock is exported */
#define CLK_NUMBER (CLK_GPU + 1)
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index d9668493c3f9..524f33275bc7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -50,8 +50,10 @@ static SUNXI_CCU_M(mixer1_div_a83_clk, "mixer1-div", "pll-de", 0x0c, 4, 4,
CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(rot_div_a83_clk, "rot-div", "pll-de", 0x0c, 0x0c, 4,
+ CLK_SET_RATE_PARENT);
-static struct ccu_common *sun50i_h6_de3_clks[] = {
+static struct ccu_common *sun8i_a83t_de2_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
&wb_clk.common,
@@ -60,16 +62,16 @@ static struct ccu_common *sun50i_h6_de3_clks[] = {
&bus_mixer1_clk.common,
&bus_wb_clk.common,
- &mixer0_div_clk.common,
- &mixer1_div_clk.common,
- &wb_div_clk.common,
+ &mixer0_div_a83_clk.common,
+ &mixer1_div_a83_clk.common,
+ &wb_div_a83_clk.common,
&bus_rot_clk.common,
&rot_clk.common,
- &rot_div_clk.common,
+ &rot_div_a83_clk.common,
};
-static struct ccu_common *sun8i_a83t_de2_clks[] = {
+static struct ccu_common *sun8i_h3_de2_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
&wb_clk.common,
@@ -78,34 +80,38 @@ static struct ccu_common *sun8i_a83t_de2_clks[] = {
&bus_mixer1_clk.common,
&bus_wb_clk.common,
- &mixer0_div_a83_clk.common,
- &mixer1_div_a83_clk.common,
- &wb_div_a83_clk.common,
+ &mixer0_div_clk.common,
+ &mixer1_div_clk.common,
+ &wb_div_clk.common,
};
-static struct ccu_common *sun8i_h3_de2_clks[] = {
+static struct ccu_common *sun8i_v3s_de2_clks[] = {
&mixer0_clk.common,
- &mixer1_clk.common,
&wb_clk.common,
&bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
&bus_wb_clk.common,
&mixer0_div_clk.common,
- &mixer1_div_clk.common,
&wb_div_clk.common,
};
-static struct ccu_common *sun8i_v3s_de2_clks[] = {
+static struct ccu_common *sun50i_a64_de2_clks[] = {
&mixer0_clk.common,
+ &mixer1_clk.common,
&wb_clk.common,
&bus_mixer0_clk.common,
+ &bus_mixer1_clk.common,
&bus_wb_clk.common,
&mixer0_div_clk.common,
+ &mixer1_div_clk.common,
&wb_div_clk.common,
+
+ &bus_rot_clk.common,
+ &rot_clk.common,
+ &rot_div_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
@@ -113,16 +119,19 @@ static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
[CLK_MIXER0] = &mixer0_clk.common.hw,
[CLK_MIXER1] = &mixer1_clk.common.hw,
[CLK_WB] = &wb_clk.common.hw,
+ [CLK_ROT] = &rot_clk.common.hw,
[CLK_BUS_MIXER0] = &bus_mixer0_clk.common.hw,
[CLK_BUS_MIXER1] = &bus_mixer1_clk.common.hw,
[CLK_BUS_WB] = &bus_wb_clk.common.hw,
+ [CLK_BUS_ROT] = &bus_rot_clk.common.hw,
[CLK_MIXER0_DIV] = &mixer0_div_a83_clk.common.hw,
[CLK_MIXER1_DIV] = &mixer1_div_a83_clk.common.hw,
[CLK_WB_DIV] = &wb_div_a83_clk.common.hw,
+ [CLK_ROT_DIV] = &rot_div_a83_clk.common.hw,
},
- .num = CLK_NUMBER_WITHOUT_ROT,
+ .num = CLK_NUMBER_WITH_ROT,
};
static struct clk_hw_onecell_data sun8i_h3_de2_hw_clks = {
@@ -156,7 +165,7 @@ static struct clk_hw_onecell_data sun8i_v3s_de2_hw_clks = {
.num = CLK_NUMBER_WITHOUT_ROT,
};
-static struct clk_hw_onecell_data sun50i_h6_de3_hw_clks = {
+static struct clk_hw_onecell_data sun50i_a64_de2_hw_clks = {
.hws = {
[CLK_MIXER0] = &mixer0_clk.common.hw,
[CLK_MIXER1] = &mixer1_clk.common.hw,
@@ -179,9 +188,19 @@ static struct clk_hw_onecell_data sun50i_h6_de3_hw_clks = {
static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
/*
- * For A83T, H3 and R40, mixer1 reset line is shared with wb, so
- * only RST_WB is exported here.
- * For V3s there's just no mixer1, so it also shares this struct.
+ * Mixer1 reset line is shared with wb, so only RST_WB is
+ * exported here.
+ */
+ [RST_WB] = { 0x08, BIT(2) },
+ [RST_ROT] = { 0x08, BIT(3) },
+};
+
+static struct ccu_reset_map sun8i_h3_de2_resets[] = {
+ [RST_MIXER0] = { 0x08, BIT(0) },
+ /*
+ * Mixer1 reset line is shared with wb, so only RST_WB is
+ * exported here.
+ * V3s doesn't have mixer1, so it also shares this struct.
*/
[RST_WB] = { 0x08, BIT(2) },
};
@@ -190,13 +209,13 @@ static struct ccu_reset_map sun50i_a64_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
+ [RST_ROT] = { 0x08, BIT(3) },
};
-static struct ccu_reset_map sun50i_h6_de3_resets[] = {
+static struct ccu_reset_map sun50i_h5_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
- [RST_ROT] = { 0x08, BIT(3) },
};
static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
@@ -215,28 +234,18 @@ static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
.hw_clks = &sun8i_h3_de2_hw_clks,
- .resets = sun8i_a83t_de2_resets,
- .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
+ .resets = sun8i_h3_de2_resets,
+ .num_resets = ARRAY_SIZE(sun8i_h3_de2_resets),
};
-static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
+ .ccu_clks = sun50i_a64_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
- .hw_clks = &sun8i_h3_de2_hw_clks,
+ .hw_clks = &sun50i_a64_de2_hw_clks,
- .resets = sun50i_a64_de2_resets,
- .num_resets = ARRAY_SIZE(sun50i_a64_de2_resets),
-};
-
-static const struct sunxi_ccu_desc sun50i_h6_de3_clk_desc = {
- .ccu_clks = sun50i_h6_de3_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h6_de3_clks),
-
- .hw_clks = &sun50i_h6_de3_hw_clks,
-
- .resets = sun50i_h6_de3_resets,
- .num_resets = ARRAY_SIZE(sun50i_h6_de3_resets),
+ .resets = sun8i_a83t_de2_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
};
static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
@@ -249,6 +258,26 @@ static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
+ .ccu_clks = sun50i_a64_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+
+ .hw_clks = &sun50i_a64_de2_hw_clks,
+
+ .resets = sun50i_a64_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_a64_de2_resets),
+};
+
+static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
+ .ccu_clks = sun8i_h3_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+
+ .hw_clks = &sun8i_h3_de2_hw_clks,
+
+ .resets = sun50i_h5_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
+};
+
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -338,6 +367,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.data = &sun8i_h3_de2_clk_desc,
},
{
+ .compatible = "allwinner,sun8i-r40-de2-clk",
+ .data = &sun8i_r40_de2_clk_desc,
+ },
+ {
.compatible = "allwinner,sun8i-v3s-de2-clk",
.data = &sun8i_v3s_de2_clk_desc,
},
@@ -347,11 +380,11 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
},
{
.compatible = "allwinner,sun50i-h5-de2-clk",
- .data = &sun50i_a64_de2_clk_desc,
+ .data = &sun50i_h5_de2_clk_desc,
},
{
.compatible = "allwinner,sun50i-h6-de3-clk",
- .data = &sun50i_h6_de3_clk_desc,
+ .data = &sun50i_h5_de2_clk_desc,
},
{ }
};
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index df966ca06788..1f7c30f87ece 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -12,7 +12,6 @@ obj-y += clk-sdmmc-mux.o
obj-y += clk-super.o
obj-y += clk-tegra-audio.o
obj-y += clk-tegra-periph.o
-obj-y += clk-tegra-pmc.o
obj-y += clk-tegra-fixed.o
obj-y += clk-tegra-super-gen4.o
obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index c4faebd32760..ff7da2d3e94d 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -32,7 +32,6 @@ enum clk_id {
tegra_clk_audio4,
tegra_clk_audio4_2x,
tegra_clk_audio4_mux,
- tegra_clk_blink,
tegra_clk_bsea,
tegra_clk_bsev,
tegra_clk_cclk_g,
@@ -44,14 +43,9 @@ enum clk_id {
tegra_clk_clk72Mhz,
tegra_clk_clk72Mhz_8,
tegra_clk_clk_m,
- tegra_clk_clk_m_div2,
- tegra_clk_clk_m_div4,
- tegra_clk_clk_out_1,
- tegra_clk_clk_out_1_mux,
- tegra_clk_clk_out_2,
- tegra_clk_clk_out_2_mux,
- tegra_clk_clk_out_3,
- tegra_clk_clk_out_3_mux,
+ tegra_clk_osc,
+ tegra_clk_osc_div2,
+ tegra_clk_osc_div4,
tegra_clk_cml0,
tegra_clk_cml1,
tegra_clk_csi,
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index 7c6c8abfcde6..77c22cef5014 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -46,7 +46,28 @@ int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
return -EINVAL;
}
+ dt_clk = tegra_lookup_dt_id(tegra_clk_osc, clks);
+ if (!dt_clk)
+ return 0;
+
osc = clk_register_fixed_rate(NULL, "osc", NULL, 0, *osc_freq);
+ *dt_clk = osc;
+
+ /* osc_div2 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_osc_div2, clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "osc_div2", "osc",
+ 0, 1, 2);
+ *dt_clk = clk;
+ }
+
+ /* osc_div4 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_osc_div4, clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "osc_div4", "osc",
+ 0, 1, 4);
+ *dt_clk = clk;
+ }
dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, clks);
if (!dt_clk)
@@ -84,22 +105,6 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
*dt_clk = clk;
}
-
- /* clk_m_div2 */
- dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div2, tegra_clks);
- if (dt_clk) {
- clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
- CLK_SET_RATE_PARENT, 1, 2);
- *dt_clk = clk;
- }
-
- /* clk_m_div4 */
- dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div4, tegra_clks);
- if (dt_clk) {
- clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
- CLK_SET_RATE_PARENT, 1, 4);
- *dt_clk = clk;
- }
}
void tegra_clk_osc_resume(void __iomem *clk_base)
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
deleted file mode 100644
index bec3e008335f..000000000000
--- a/drivers/clk/tegra/clk-tegra-pmc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
- */
-
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/clk/tegra.h>
-
-#include "clk.h"
-#include "clk-id.h"
-
-#define PMC_CLK_OUT_CNTRL 0x1a8
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_CTRL 0
-#define PMC_CTRL_BLINK_ENB 7
-#define PMC_BLINK_TIMER 0x40
-
-struct pmc_clk_init_data {
- char *mux_name;
- char *gate_name;
- const char **parents;
- int num_parents;
- int mux_id;
- int gate_id;
- char *dev_name;
- u8 mux_shift;
- u8 gate_shift;
-};
-
-#define PMC_CLK(_num, _mux_shift, _gate_shift)\
- {\
- .mux_name = "clk_out_" #_num "_mux",\
- .gate_name = "clk_out_" #_num,\
- .parents = clk_out ##_num ##_parents,\
- .num_parents = ARRAY_SIZE(clk_out ##_num ##_parents),\
- .mux_id = tegra_clk_clk_out_ ##_num ##_mux,\
- .gate_id = tegra_clk_clk_out_ ##_num,\
- .dev_name = "extern" #_num,\
- .mux_shift = _mux_shift,\
- .gate_shift = _gate_shift,\
- }
-
-static DEFINE_SPINLOCK(clk_out_lock);
-
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1",
-};
-
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2",
-};
-
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3",
-};
-
-static struct pmc_clk_init_data pmc_clks[] = {
- PMC_CLK(1, 6, 2),
- PMC_CLK(2, 14, 10),
- PMC_CLK(3, 22, 18),
-};
-
-void __init tegra_pmc_clk_init(void __iomem *pmc_base,
- struct tegra_clk *tegra_clks)
-{
- struct clk *clk;
- struct clk **dt_clk;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmc_clks); i++) {
- struct pmc_clk_init_data *data;
-
- data = pmc_clks + i;
-
- dt_clk = tegra_lookup_dt_id(data->mux_id, tegra_clks);
- if (!dt_clk)
- continue;
-
- clk = clk_register_mux(NULL, data->mux_name, data->parents,
- data->num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, data->mux_shift,
- 3, 0, &clk_out_lock);
- *dt_clk = clk;
-
-
- dt_clk = tegra_lookup_dt_id(data->gate_id, tegra_clks);
- if (!dt_clk)
- continue;
-
- clk = clk_register_gate(NULL, data->gate_name, data->mux_name,
- CLK_SET_RATE_PARENT,
- pmc_base + PMC_CLK_OUT_CNTRL,
- data->gate_shift, 0, &clk_out_lock);
- *dt_clk = clk;
- clk_register_clkdev(clk, data->dev_name, data->gate_name);
- }
-
- /* blink */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
-
- dt_clk = tegra_lookup_dt_id(tegra_clk_blink, tegra_clks);
- if (!dt_clk)
- return;
-
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- *dt_clk = clk;
-}
-
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 4efcaaf51b3a..bc9e47a4cb60 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -735,8 +735,9 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_fuse_burn] = { .dt_id = TEGRA114_CLK_FUSE_BURN, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA114_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA114_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA114_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA114_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA114_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA114_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA114_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA114_CLK_PLL_REF, .present = true },
[tegra_clk_pll_c] = { .dt_id = TEGRA114_CLK_PLL_C, .present = true },
[tegra_clk_pll_c_out1] = { .dt_id = TEGRA114_CLK_PLL_C_OUT1, .present = true },
@@ -778,10 +779,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3] = { .dt_id = TEGRA114_CLK_AUDIO3, .present = true },
[tegra_clk_audio4] = { .dt_id = TEGRA114_CLK_AUDIO4, .present = true },
[tegra_clk_spdif] = { .dt_id = TEGRA114_CLK_SPDIF, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA114_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA114_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA114_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA114_CLK_BLINK, .present = true },
[tegra_clk_xusb_host_src] = { .dt_id = TEGRA114_CLK_XUSB_HOST_SRC, .present = true },
[tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA114_CLK_XUSB_FALCON_SRC, .present = true },
[tegra_clk_xusb_fs_src] = { .dt_id = TEGRA114_CLK_XUSB_FS_SRC, .present = true },
@@ -803,9 +800,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_mux] = { .dt_id = TEGRA114_CLK_AUDIO3_MUX, .present = true },
[tegra_clk_audio4_mux] = { .dt_id = TEGRA114_CLK_AUDIO4_MUX, .present = true },
[tegra_clk_spdif_mux] = { .dt_id = TEGRA114_CLK_SPDIF_MUX, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_dsia_mux] = { .dt_id = TEGRA114_CLK_DSIA_MUX, .present = true },
[tegra_clk_dsib_mux] = { .dt_id = TEGRA114_CLK_DSIB_MUX, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA114_CLK_CEC, .present = true },
@@ -815,8 +809,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "clk_m", .dt_id = TEGRA114_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA114_CLK_PLL_REF },
{ .con_id = "clk_32k", .dt_id = TEGRA114_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA114_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA114_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA114_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA114_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA114_CLK_OSC_DIV4 },
{ .con_id = "pll_c", .dt_id = TEGRA114_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA114_CLK_PLL_C_OUT1 },
{ .con_id = "pll_c2", .dt_id = TEGRA114_CLK_PLL_C2 },
@@ -863,10 +858,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio3_2x", .dt_id = TEGRA114_CLK_AUDIO3_2X },
{ .con_id = "audio4_2x", .dt_id = TEGRA114_CLK_AUDIO4_2X },
{ .con_id = "spdif_2x", .dt_id = TEGRA114_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA114_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA114_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA114_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA114_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA114_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA114_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA114_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA114_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA114_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA114_CLK_SCLK },
@@ -900,17 +894,6 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
/* clk_32k */
clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
clks[TEGRA114_CLK_CLK_32K] = clk;
-
- /* clk_m_div2 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
- CLK_SET_RATE_PARENT, 1, 2);
- clks[TEGRA114_CLK_CLK_M_DIV2] = clk;
-
- /* clk_m_div4 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
- CLK_SET_RATE_PARENT, 1, 4);
- clks[TEGRA114_CLK_CLK_M_DIV4] = clk;
-
}
static void __init tegra114_pll_init(void __iomem *clk_base,
@@ -1153,11 +1136,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_UARTB, TEGRA114_CLK_PLL_P, 408000000, 0 },
{ TEGRA114_CLK_UARTC, TEGRA114_CLK_PLL_P, 408000000, 0 },
{ TEGRA114_CLK_UARTD, TEGRA114_CLK_PLL_P, 408000000, 0 },
- { TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA114_CLK_EXTERN1, TEGRA114_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA114_CLK_CLK_OUT_1_MUX, TEGRA114_CLK_EXTERN1, 0, 1 },
- { TEGRA114_CLK_CLK_OUT_1, TEGRA114_CLK_CLK_MAX, 0, 1 },
+ { TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA114_CLK_I2S0, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA114_CLK_I2S1, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA114_CLK_I2S2, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -1359,7 +1339,6 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
tegra114_audio_plls,
ARRAY_SIZE(tegra114_audio_plls), 24000000);
- tegra_pmc_clk_init(pmc_base, tegra114_clks);
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index b3110d5b5a6c..e931319dcc9d 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -860,8 +860,9 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_fuse_burn] = { .dt_id = TEGRA124_CLK_FUSE_BURN, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA124_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA124_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA124_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA124_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA124_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA124_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA124_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA124_CLK_PLL_REF, .present = true },
[tegra_clk_pll_c] = { .dt_id = TEGRA124_CLK_PLL_C, .present = true },
[tegra_clk_pll_c_out1] = { .dt_id = TEGRA124_CLK_PLL_C_OUT1, .present = true },
@@ -902,10 +903,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3] = { .dt_id = TEGRA124_CLK_AUDIO3, .present = true },
[tegra_clk_audio4] = { .dt_id = TEGRA124_CLK_AUDIO4, .present = true },
[tegra_clk_spdif] = { .dt_id = TEGRA124_CLK_SPDIF, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA124_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA124_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA124_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA124_CLK_BLINK, .present = true },
[tegra_clk_xusb_host_src] = { .dt_id = TEGRA124_CLK_XUSB_HOST_SRC, .present = true },
[tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA124_CLK_XUSB_FALCON_SRC, .present = true },
[tegra_clk_xusb_fs_src] = { .dt_id = TEGRA124_CLK_XUSB_FS_SRC, .present = true },
@@ -931,9 +928,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_mux] = { .dt_id = TEGRA124_CLK_AUDIO3_MUX, .present = true },
[tegra_clk_audio4_mux] = { .dt_id = TEGRA124_CLK_AUDIO4_MUX, .present = true },
[tegra_clk_spdif_mux] = { .dt_id = TEGRA124_CLK_SPDIF_MUX, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA124_CLK_CEC, .present = true },
};
@@ -941,8 +935,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "clk_m", .dt_id = TEGRA124_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA124_CLK_PLL_REF },
{ .con_id = "clk_32k", .dt_id = TEGRA124_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA124_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA124_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA124_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA124_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA124_CLK_OSC_DIV4 },
{ .con_id = "pll_c", .dt_id = TEGRA124_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA124_CLK_PLL_C_OUT1 },
{ .con_id = "pll_c2", .dt_id = TEGRA124_CLK_PLL_C2 },
@@ -988,10 +983,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio3_2x", .dt_id = TEGRA124_CLK_AUDIO3_2X },
{ .con_id = "audio4_2x", .dt_id = TEGRA124_CLK_AUDIO4_2X },
{ .con_id = "spdif_2x", .dt_id = TEGRA124_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA124_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA124_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA124_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA124_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA124_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA124_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA124_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA124_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA124_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA124_CLK_SCLK },
@@ -1298,11 +1292,8 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0 },
{ TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0 },
{ TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0 },
- { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA124_CLK_EXTERN1, TEGRA124_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA124_CLK_CLK_OUT_1_MUX, TEGRA124_CLK_EXTERN1, 0, 1 },
- { TEGRA124_CLK_CLK_OUT_1, TEGRA124_CLK_CLK_MAX, 0, 1 },
+ { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 282240000, 0 },
+ { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -1457,11 +1448,9 @@ static void __init tegra132_clock_apply_init_table(void)
* tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
* @np: struct device_node * of the DT node for the SoC CAR IP block
*
- * Register most of the clocks controlled by the CAR IP block, along
- * with a few clocks controlled by the PMC IP block. Everything in
- * this function should be common to Tegra124 and Tegra132. XXX The
- * PMC clock initialization should probably be moved to PMC-specific
- * driver code. No return value.
+ * Register most of the clocks controlled by the CAR IP block.
+ * Everything in this function should be common to Tegra124 and Tegra132.
+ * No return value.
*/
static void __init tegra124_132_clock_init_pre(struct device_node *np)
{
@@ -1504,7 +1493,6 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
tegra124_audio_plls,
ARRAY_SIZE(tegra124_audio_plls), 24576000);
- tegra_pmc_clk_init(pmc_base, tegra124_clks);
/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
plld_base = readl(clk_base + PLLD_BASE);
@@ -1516,11 +1504,11 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
* tegra124_132_clock_init_post - clock initialization postamble for T124/T132
* @np: struct device_node * of the DT node for the SoC CAR IP block
*
- * Register most of the along with a few clocks controlled by the PMC
- * IP block. Everything in this function should be common to Tegra124
+ * Register most of the clocks controlled by the CAR IP block.
+ * Everything in this function should be common to Tegra124
* and Tegra132. This function must be called after
- * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
- * not be set. No return value.
+ * tegra124_132_clock_init_pre(), otherwise clk_base will not be set.
+ * No return value.
*/
static void __init tegra124_132_clock_init_post(struct device_node *np)
{
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index fff5cba87637..085feb04e913 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -458,7 +458,6 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "cdev1", .dt_id = TEGRA20_CLK_CDEV1 },
{ .con_id = "cdev2", .dt_id = TEGRA20_CLK_CDEV2 },
{ .con_id = "clk_32k", .dt_id = TEGRA20_CLK_CLK_32K },
- { .con_id = "blink", .dt_id = TEGRA20_CLK_BLINK },
{ .con_id = "clk_m", .dt_id = TEGRA20_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA20_CLK_PLL_REF },
{ .dev_id = "tegra20-i2s.0", .dt_id = TEGRA20_CLK_I2S1 },
@@ -537,7 +536,6 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
[tegra_clk_csi] = { .dt_id = TEGRA20_CLK_CSI, .present = true },
[tegra_clk_isp] = { .dt_id = TEGRA20_CLK_ISP, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA20_CLK_CLK_32K, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA20_CLK_BLINK, .present = true },
[tegra_clk_hclk] = { .dt_id = TEGRA20_CLK_HCLK, .present = true },
[tegra_clk_pclk] = { .dt_id = TEGRA20_CLK_PCLK, .present = true },
[tegra_clk_pll_p_out1] = { .dt_id = TEGRA20_CLK_PLL_P_OUT1, .present = true },
@@ -1031,10 +1029,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_UARTC, TEGRA20_CLK_PLL_P, 0, 0 },
{ TEGRA20_CLK_UARTD, TEGRA20_CLK_PLL_P, 0, 0 },
{ TEGRA20_CLK_UARTE, TEGRA20_CLK_PLL_P, 0, 0 },
- { TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 1 },
- { TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA20_CLK_CDEV1, TEGRA20_CLK_CLK_MAX, 0, 1 },
- { TEGRA20_CLK_BLINK, TEGRA20_CLK_CLK_MAX, 32768, 1 },
+ { TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 0 },
+ { TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA20_CLK_I2S1, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA20_CLK_I2S2, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA20_CLK_SDMMC1, TEGRA20_CLK_PLL_P, 48000000, 0 },
@@ -1146,7 +1142,6 @@ static void __init tegra20_clock_init(struct device_node *np)
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra20_clks, NULL);
tegra20_periph_clk_init();
tegra20_audio_clk_init();
- tegra_pmc_clk_init(pmc_base, tegra20_clks);
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX);
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 762cd186f714..defe3b7ebfa4 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -2371,8 +2371,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_fuse_burn] = { .dt_id = TEGRA210_CLK_FUSE_BURN, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA210_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA210_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA210_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA210_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA210_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA210_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA210_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA210_CLK_PLL_REF, .present = true },
[tegra_clk_pll_c] = { .dt_id = TEGRA210_CLK_PLL_C, .present = true },
[tegra_clk_pll_c_out1] = { .dt_id = TEGRA210_CLK_PLL_C_OUT1, .present = true },
@@ -2417,10 +2418,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3] = { .dt_id = TEGRA210_CLK_AUDIO3, .present = true },
[tegra_clk_audio4] = { .dt_id = TEGRA210_CLK_AUDIO4, .present = true },
[tegra_clk_spdif] = { .dt_id = TEGRA210_CLK_SPDIF, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA210_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA210_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA210_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA210_CLK_BLINK, .present = true },
[tegra_clk_xusb_gate] = { .dt_id = TEGRA210_CLK_XUSB_GATE, .present = true },
[tegra_clk_xusb_host_src_8] = { .dt_id = TEGRA210_CLK_XUSB_HOST_SRC, .present = true },
[tegra_clk_xusb_falcon_src_8] = { .dt_id = TEGRA210_CLK_XUSB_FALCON_SRC, .present = true },
@@ -2452,9 +2449,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_mux] = { .dt_id = TEGRA210_CLK_AUDIO3_MUX, .present = true },
[tegra_clk_audio4_mux] = { .dt_id = TEGRA210_CLK_AUDIO4_MUX, .present = true },
[tegra_clk_spdif_mux] = { .dt_id = TEGRA210_CLK_SPDIF_MUX, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_maud] = { .dt_id = TEGRA210_CLK_MAUD, .present = true },
[tegra_clk_mipibif] = { .dt_id = TEGRA210_CLK_MIPIBIF, .present = true },
[tegra_clk_qspi] = { .dt_id = TEGRA210_CLK_QSPI, .present = true },
@@ -2497,8 +2491,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "clk_m", .dt_id = TEGRA210_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA210_CLK_PLL_REF },
{ .con_id = "clk_32k", .dt_id = TEGRA210_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA210_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA210_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA210_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA210_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA210_CLK_OSC_DIV4 },
{ .con_id = "pll_c", .dt_id = TEGRA210_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA210_CLK_PLL_C_OUT1 },
{ .con_id = "pll_c2", .dt_id = TEGRA210_CLK_PLL_C2 },
@@ -2540,10 +2535,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio4", .dt_id = TEGRA210_CLK_AUDIO4 },
{ .con_id = "spdif", .dt_id = TEGRA210_CLK_SPDIF },
{ .con_id = "spdif_2x", .dt_id = TEGRA210_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA210_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA210_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA210_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA210_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA210_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA210_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA210_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA210_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA210_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA210_CLK_SCLK },
@@ -2999,7 +2993,7 @@ static const char * const la_parents[] = {
};
static struct tegra_clk_periph tegra210_la =
- TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, 0);
+ TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, NULL);
static __init void tegra210_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
@@ -3448,11 +3442,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_UARTB, TEGRA210_CLK_PLL_P, 408000000, 0 },
{ TEGRA210_CLK_UARTC, TEGRA210_CLK_PLL_P, 408000000, 0 },
{ TEGRA210_CLK_UARTD, TEGRA210_CLK_PLL_P, 408000000, 0 },
- { TEGRA210_CLK_PLL_A, TEGRA210_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA210_CLK_PLL_A_OUT0, TEGRA210_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA210_CLK_EXTERN1, TEGRA210_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA210_CLK_CLK_OUT_1_MUX, TEGRA210_CLK_EXTERN1, 0, 1 },
- { TEGRA210_CLK_CLK_OUT_1, TEGRA210_CLK_CLK_MAX, 0, 1 },
+ { TEGRA210_CLK_PLL_A, TEGRA210_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA210_CLK_PLL_A_OUT0, TEGRA210_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA210_CLK_I2S0, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA210_CLK_I2S1, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA210_CLK_I2S2, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -3693,7 +3684,6 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
tegra210_audio_plls,
ARRAY_SIZE(tegra210_audio_plls), 24576000);
- tegra_pmc_clk_init(pmc_base, tegra210_clks);
/* For Tegra210, PLLD is the only source for DSIA & DSIB */
value = readl(clk_base + PLLD_BASE);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index b20891489e11..3255f82e61b5 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -569,10 +569,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio3_2x", .dt_id = TEGRA30_CLK_AUDIO3_2X },
{ .con_id = "audio4_2x", .dt_id = TEGRA30_CLK_AUDIO4_2X },
{ .con_id = "spdif_2x", .dt_id = TEGRA30_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA30_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA30_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA30_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA30_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA30_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA30_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA30_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA30_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA30_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA30_CLK_SCLK },
@@ -581,8 +580,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "twd", .dt_id = TEGRA30_CLK_TWD },
{ .con_id = "emc", .dt_id = TEGRA30_CLK_EMC },
{ .con_id = "clk_32k", .dt_id = TEGRA30_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA30_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA30_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA30_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA30_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA30_CLK_OSC_DIV4 },
{ .con_id = "cml0", .dt_id = TEGRA30_CLK_CML0 },
{ .con_id = "cml1", .dt_id = TEGRA30_CLK_CML1 },
{ .con_id = "clk_m", .dt_id = TEGRA30_CLK_CLK_M },
@@ -683,8 +683,9 @@ static struct tegra_devclk devclks[] __initdata = {
static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_clk_32k] = { .dt_id = TEGRA30_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA30_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA30_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA30_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA30_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA30_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA30_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA30_CLK_PLL_REF, .present = true },
[tegra_clk_spdif_in_sync] = { .dt_id = TEGRA30_CLK_SPDIF_IN_SYNC, .present = true },
[tegra_clk_i2s0_sync] = { .dt_id = TEGRA30_CLK_I2S0_SYNC, .present = true },
@@ -711,13 +712,6 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_2x] = { .dt_id = TEGRA30_CLK_AUDIO3_2X, .present = true },
[tegra_clk_audio4_2x] = { .dt_id = TEGRA30_CLK_AUDIO4_2X, .present = true },
[tegra_clk_spdif_2x] = { .dt_id = TEGRA30_CLK_SPDIF_2X, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA30_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA30_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA30_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA30_CLK_BLINK, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_hclk] = { .dt_id = TEGRA30_CLK_HCLK, .present = true },
[tegra_clk_pclk] = { .dt_id = TEGRA30_CLK_PCLK, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA30_CLK_I2S0, .present = true },
@@ -1227,12 +1221,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_UARTC, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTD, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTE, TEGRA30_CLK_PLL_P, 408000000, 0 },
- { TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA30_CLK_EXTERN1, TEGRA30_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA30_CLK_CLK_OUT_1_MUX, TEGRA30_CLK_EXTERN1, 0, 0 },
- { TEGRA30_CLK_CLK_OUT_1, TEGRA30_CLK_CLK_MAX, 0, 1 },
- { TEGRA30_CLK_BLINK, TEGRA30_CLK_CLK_MAX, 0, 1 },
+ { TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA30_CLK_I2S0, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA30_CLK_I2S1, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA30_CLK_I2S2, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -1362,7 +1352,6 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
tegra30_audio_plls,
ARRAY_SIZE(tegra30_audio_plls), 24000000);
- tegra_pmc_clk_init(pmc_base, tegra30_clks);
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 416a6b09f6a3..2c9a68302e02 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -854,7 +854,6 @@ void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *pll_params);
-void tegra_pmc_clk_init(void __iomem *pmc_base, struct tegra_clk *tegra_clks);
void tegra_fixed_clk_init(struct tegra_clk *tegra_clks);
int tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
unsigned long *input_freqs, unsigned int num,
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index e001b9bcb6bf..7dc30dd6c8d5 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -212,7 +212,7 @@ static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
};
static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
- { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+ { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c
index 087cfa75ac24..4f8bd34ec1a5 100644
--- a/drivers/clk/ti/clk-814x.c
+++ b/drivers/clk/ti/clk-814x.c
@@ -25,7 +25,6 @@ static const struct omap_clkctrl_reg_data dm814_alwon_clkctrl_regs[] __initconst
{ DM814_WD_TIMER_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
{ DM814_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
{ DM814_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
- { DM814_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
{ DM814_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "mpu_ck" },
{ DM814_RTC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
{ DM814_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
@@ -39,9 +38,15 @@ static const struct omap_clkctrl_reg_data dm814_alwon_clkctrl_regs[] __initconst
{ 0 },
};
+static const struct
+omap_clkctrl_reg_data dm814_alwon_ethernet_clkctrl_regs[] __initconst = {
+ { 0, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
+};
+
const struct omap_clkctrl_data dm814_clkctrl_data[] __initconst = {
{ 0x48180500, dm814_default_clkctrl_regs },
{ 0x48181400, dm814_alwon_clkctrl_regs },
+ { 0x481815d4, dm814_alwon_ethernet_clkctrl_regs },
{ 0 },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 062266034d84..864c484bde1b 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -255,24 +255,53 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
return entry->clk;
}
+/* Get clkctrl clock base name based on clkctrl_name or dts node */
+static const char * __init clkctrl_get_clock_name(struct device_node *np,
+ const char *clkctrl_name,
+ int offset, int index,
+ bool legacy_naming)
+{
+ char *clock_name;
+
+ /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
+ if (clkctrl_name && !legacy_naming) {
+ clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+ clkctrl_name, offset, index);
+ strreplace(clock_name, '_', '-');
+
+ return clock_name;
+ }
+
+ /* l4per:1234:0 old style naming based on clkctrl_name */
+ if (clkctrl_name)
+ return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
+ clkctrl_name, offset, index);
+
+ /* l4per_cm:1234:0 old style naming based on parent node name */
+ if (legacy_naming)
+ return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
+ np->parent, offset, index);
+
+ /* l4per-clkctrl:1234:0 style naming based on node name */
+ return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
+}
+
static int __init
_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
struct device_node *node, struct clk_hw *clk_hw,
u16 offset, u8 bit, const char * const *parents,
- int num_parents, const struct clk_ops *ops)
+ int num_parents, const struct clk_ops *ops,
+ const char *clkctrl_name)
{
struct clk_init_data init = { NULL };
struct clk *clk;
struct omap_clkctrl_clk *clkctrl_clk;
int ret = 0;
- if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
- init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
- node->parent, node, offset,
- bit);
- else
- init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node,
- offset, bit);
+ init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
+ ti_clk_get_features()->flags &
+ TI_CLK_CLKCTRL_COMPAT);
+
clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
if (!init.name || !clkctrl_clk) {
ret = -ENOMEM;
@@ -309,7 +338,7 @@ static void __init
_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
struct device_node *node, u16 offset,
const struct omap_clkctrl_bit_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
struct clk_hw_omap *clk_hw;
@@ -322,7 +351,7 @@ _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
data->bit, data->parents, 1,
- &omap_gate_clk_ops))
+ &omap_gate_clk_ops, clkctrl_name))
kfree(clk_hw);
}
@@ -330,7 +359,7 @@ static void __init
_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
struct device_node *node, u16 offset,
const struct omap_clkctrl_bit_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
struct clk_omap_mux *mux;
int num_parents = 0;
@@ -357,7 +386,7 @@ _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
data->bit, data->parents, num_parents,
- &ti_clk_mux_ops))
+ &ti_clk_mux_ops, clkctrl_name))
kfree(mux);
}
@@ -365,7 +394,7 @@ static void __init
_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
struct device_node *node, u16 offset,
const struct omap_clkctrl_bit_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
struct clk_omap_divider *div;
const struct omap_clkctrl_div_data *div_data = data->data;
@@ -393,7 +422,7 @@ _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
data->bit, data->parents, 1,
- &ti_clk_divider_ops))
+ &ti_clk_divider_ops, clkctrl_name))
kfree(div);
}
@@ -401,7 +430,7 @@ static void __init
_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
struct device_node *node,
const struct omap_clkctrl_reg_data *data,
- void __iomem *reg)
+ void __iomem *reg, const char *clkctrl_name)
{
const struct omap_clkctrl_bit_data *bits = data->bit_data;
@@ -412,17 +441,17 @@ _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
switch (bits->type) {
case TI_CLK_GATE:
_ti_clkctrl_setup_gate(provider, node, data->offset,
- bits, reg);
+ bits, reg, clkctrl_name);
break;
case TI_CLK_DIVIDER:
_ti_clkctrl_setup_div(provider, node, data->offset,
- bits, reg);
+ bits, reg, clkctrl_name);
break;
case TI_CLK_MUX:
_ti_clkctrl_setup_mux(provider, node, data->offset,
- bits, reg);
+ bits, reg, clkctrl_name);
break;
default:
@@ -461,42 +490,10 @@ static char * __init clkctrl_get_name(struct device_node *np)
return name;
}
}
- of_node_put(np);
return NULL;
}
-/* Get clkctrl clock base name based on clkctrl_name or dts node */
-static const char * __init clkctrl_get_clock_name(struct device_node *np,
- const char *clkctrl_name,
- int offset, int index,
- bool legacy_naming)
-{
- char *clock_name;
-
- /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
- if (clkctrl_name && !legacy_naming) {
- clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
- clkctrl_name, offset, index);
- strreplace(clock_name, '_', '-');
-
- return clock_name;
- }
-
- /* l4per:1234:0 old style naming based on clkctrl_name */
- if (clkctrl_name)
- return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
- clkctrl_name, offset, index);
-
- /* l4per_cm:1234:0 old style naming based on parent node name */
- if (legacy_naming)
- return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
- np->parent, offset, index);
-
- /* l4per-clkctrl:1234:0 style naming based on node name */
- return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
-}
-
static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
{
struct omap_clkctrl_provider *provider;
@@ -664,7 +661,7 @@ clkdm_found:
hw->enable_reg.ptr = provider->base + reg_data->offset;
_ti_clkctrl_setup_subclks(provider, node, reg_data,
- hw->enable_reg.ptr);
+ hw->enable_reg.ptr, clkctrl_name);
if (reg_data->flags & CLKF_SW_SUP)
hw->enable_bit = MODULEMODE_SWCTRL;
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index fe686f77787f..692be2fd9261 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -34,18 +34,6 @@
#define INTEGRATOR_AP_PCI_25_33_MHZ BIT(8)
/**
- * enum icst_control_type - the type of ICST control register
- */
-enum icst_control_type {
- ICST_VERSATILE, /* The standard type, all control bits available */
- ICST_INTEGRATOR_AP_CM, /* Only 8 bits of VDW available */
- ICST_INTEGRATOR_AP_SYS, /* Only 8 bits of VDW available */
- ICST_INTEGRATOR_AP_PCI, /* Odd bit pattern storage */
- ICST_INTEGRATOR_CP_CM_CORE, /* Only 8 bits of VDW and 3 bits of OD */
- ICST_INTEGRATOR_CP_CM_MEM, /* Only 8 bits of VDW and 3 bits of OD */
-};
-
-/**
* struct clk_icst - ICST VCO clock wrapper
* @hw: corresponding clock hardware entry
* @vcoreg: VCO register address
@@ -344,12 +332,12 @@ static const struct clk_ops icst_ops = {
.set_rate = icst_set_rate,
};
-static struct clk *icst_clk_setup(struct device *dev,
- const struct clk_icst_desc *desc,
- const char *name,
- const char *parent_name,
- struct regmap *map,
- enum icst_control_type ctype)
+struct clk *icst_clk_setup(struct device *dev,
+ const struct clk_icst_desc *desc,
+ const char *name,
+ const char *parent_name,
+ struct regmap *map,
+ enum icst_control_type ctype)
{
struct clk *clk;
struct clk_icst *icst;
@@ -386,6 +374,7 @@ static struct clk *icst_clk_setup(struct device *dev,
return clk;
}
+EXPORT_SYMBOL_GPL(icst_clk_setup);
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index e36ca1a20e90..1a119ef11066 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -1,4 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
+struct regmap;
+
+/**
+ * enum icst_control_type - the type of ICST control register
+ */
+enum icst_control_type {
+ ICST_VERSATILE, /* The standard type, all control bits available */
+ ICST_INTEGRATOR_AP_CM, /* Only 8 bits of VDW available */
+ ICST_INTEGRATOR_AP_SYS, /* Only 8 bits of VDW available */
+ ICST_INTEGRATOR_AP_PCI, /* Odd bit pattern storage */
+ ICST_INTEGRATOR_CP_CM_CORE, /* Only 8 bits of VDW and 3 bits of OD */
+ ICST_INTEGRATOR_CP_CM_MEM, /* Only 8 bits of VDW and 3 bits of OD */
+ ICST_INTEGRATOR_IM_PD1, /* Like the Versatile, all control bits */
+};
+
/**
* struct clk_icst_desc - descriptor for the ICST VCO
* @params: ICST parameters
@@ -17,3 +32,10 @@ struct clk *icst_clk_register(struct device *dev,
const char *name,
const char *parent_name,
void __iomem *base);
+
+struct clk *icst_clk_setup(struct device *dev,
+ const struct clk_icst_desc *desc,
+ const char *name,
+ const char *parent_name,
+ struct regmap *map,
+ enum icst_control_type ctype);
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 1991f15a5db9..f9f4babe3ca6 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -7,7 +7,11 @@
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/clk-integrator.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "icst.h"
#include "clk-icst.h"
@@ -175,3 +179,79 @@ void integrator_impd1_clk_exit(unsigned int id)
kfree(imc->pclkname);
}
EXPORT_SYMBOL_GPL(integrator_impd1_clk_exit);
+
+static int integrator_impd1_clk_spawn(struct device *dev,
+ struct device_node *parent,
+ struct device_node *np)
+{
+ struct regmap *map;
+ struct clk *clk = ERR_PTR(-EINVAL);
+ const char *name = np->name;
+ const char *parent_name;
+ const struct clk_icst_desc *desc;
+ int ret;
+
+ map = syscon_node_to_regmap(parent);
+ if (IS_ERR(map)) {
+ pr_err("no regmap for syscon IM-PD1 ICST clock parent\n");
+ return PTR_ERR(map);
+ }
+
+ if (of_device_is_compatible(np, "arm,impd1-vco1")) {
+ desc = &impd1_icst1_desc;
+ } else if (of_device_is_compatible(np, "arm,impd1-vco2")) {
+ desc = &impd1_icst2_desc;
+ } else {
+ dev_err(dev, "not a clock node %s\n", name);
+ return -ENODEV;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+ parent_name = of_clk_get_parent_name(np, 0);
+ clk = icst_clk_setup(NULL, desc, name, parent_name, map,
+ ICST_INTEGRATOR_IM_PD1);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ ret = 0;
+ } else {
+ dev_err(dev, "error setting up IM-PD1 ICST clock\n");
+ ret = PTR_ERR(clk);
+ }
+
+ return ret;
+}
+
+static int integrator_impd1_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int ret = 0;
+
+ for_each_available_child_of_node(np, child) {
+ ret = integrator_impd1_clk_spawn(dev, np, child);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id impd1_syscon_match[] = {
+ { .compatible = "arm,im-pd1-syscon", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, impd1_syscon_match);
+
+static struct platform_driver impd1_clk_driver = {
+ .driver = {
+ .name = "impd1-clk",
+ .of_match_table = impd1_syscon_match,
+ },
+ .probe = integrator_impd1_clk_probe,
+};
+builtin_platform_driver(impd1_clk_driver);
+
+MODULE_AUTHOR("Linus Walleij <linusw@kernel.org>");
+MODULE_DESCRIPTION("Arm IM-PD1 module clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cc909e465823..f2142e6bbea3 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -697,6 +697,14 @@ config INGENIC_TIMER
help
Support for the timer/counter unit of the Ingenic JZ SoCs.
+config INGENIC_OST
+ bool "Clocksource for Ingenic OS Timer"
+ depends on MIPS || COMPILE_TEST
+ depends on COMMON_CLK
+ select MFD_SYSCON
+ help
+ Support for the Operating System Timer of the Ingenic JZ SoCs.
+
config MICROCHIP_PIT64B
bool "Microchip PIT64B support"
depends on OF || COMPILE_TEST
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 713686faa549..641ba5383ab5 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
+obj-$(CONFIG_INGENIC_OST) += ingenic-ost.o
obj-$(CONFIG_INGENIC_TIMER) += ingenic-timer.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9a5464c625b4..2204a444e801 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -69,7 +69,11 @@ static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
static bool arch_counter_suspend_stop;
-static bool vdso_default = true;
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
static cpumask_t evtstrm_available = CPU_MASK_NONE;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
@@ -560,8 +564,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
* change both the default value and the vdso itself.
*/
if (wa->read_cntvct_el0) {
- clocksource_counter.archdata.vdso_direct = false;
- vdso_default = false;
+ clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+ vdso_default = VDSO_CLOCKMODE_NONE;
}
}
@@ -885,6 +889,17 @@ static int arch_timer_starting_cpu(unsigned int cpu)
return 0;
}
+static int validate_timer_rate(void)
+{
+ if (!arch_timer_rate)
+ return -EINVAL;
+
+ /* Arch timer frequency < 1MHz can cause trouble */
+ WARN_ON(arch_timer_rate < 1000000);
+
+ return 0;
+}
+
/*
* For historical reasons, when probing with DT we use whichever (non-zero)
* rate was probed first, and don't verify that others match. If the first node
@@ -900,7 +915,7 @@ static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
arch_timer_rate = rate;
/* Check the timer frequency. */
- if (arch_timer_rate == 0)
+ if (validate_timer_rate())
pr_warn("frequency not available\n");
}
@@ -979,7 +994,7 @@ static void __init arch_counter_register(unsigned type)
}
arch_timer_read_counter = rd;
- clocksource_counter.archdata.vdso_direct = vdso_default;
+ clocksource_counter.vdso_clock_mode = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
@@ -1594,9 +1609,10 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
* CNTFRQ value. This *must* be correct.
*/
arch_timer_rate = arch_timer_get_cntfrq();
- if (!arch_timer_rate) {
+ ret = validate_timer_rate();
+ if (ret) {
pr_err(FW_BUG "frequency not available.\n");
- return -EINVAL;
+ return ret;
}
arch_timer_uses_ppi = arch_timer_select_ppi();
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index b235f446ee50..1592650b2c92 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -31,7 +31,6 @@ struct bcm2835_timer {
void __iomem *compare;
int match_mask;
struct clock_event_device evt;
- struct irqaction act;
};
static void __iomem *system_clock __read_mostly;
@@ -113,12 +112,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->evt.set_next_event = bcm2835_time_set_next_event;
timer->evt.cpumask = cpumask_of(0);
- timer->act.name = node->name;
- timer->act.flags = IRQF_TIMER | IRQF_SHARED;
- timer->act.dev_id = timer;
- timer->act.handler = bcm2835_time_interrupt;
- ret = setup_irq(irq, &timer->act);
+ ret = request_irq(irq, bcm2835_time_interrupt, IRQF_TIMER | IRQF_SHARED,
+ node->name, timer);
if (ret) {
pr_err("Can't set up timer IRQ\n");
goto err_timer_free;
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 5c40be9880f5..a50ab5c2154f 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -160,12 +160,6 @@ static irqreturn_t kona_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction kona_timer_irq = {
- .name = "Kona Timer Tick",
- .flags = IRQF_TIMER,
- .handler = kona_timer_interrupt,
-};
-
static int __init kona_timer_init(struct device_node *node)
{
u32 freq;
@@ -192,7 +186,9 @@ static int __init kona_timer_init(struct device_node *node)
kona_timer_disable_and_clear(timers.tmr_regs);
kona_timer_clockevents_init();
- setup_irq(timers.tmr_irq, &kona_timer_irq);
+ if (request_irq(timers.tmr_irq, kona_timer_interrupt, IRQF_TIMER,
+ "Kona Timer Tick", NULL))
+ pr_err("%s: request_irq() failed\n", "Kona Timer Tick");
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
return 0;
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 654766538f93..b207a77b0831 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -270,15 +270,10 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.rating = rating;
dw_ced->ced.name = name;
- dw_ced->irqaction.name = dw_ced->ced.name;
- dw_ced->irqaction.handler = dw_apb_clockevent_irq;
- dw_ced->irqaction.dev_id = &dw_ced->ced;
- dw_ced->irqaction.irq = irq;
- dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL |
- IRQF_NOBALANCING;
-
dw_ced->eoi = apbt_eoi;
- err = setup_irq(irq, &dw_ced->irqaction);
+ err = request_irq(irq, dw_apb_clockevent_irq,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dw_ced->ced.name, &dw_ced->ced);
if (err) {
pr_err("failed to request timer irq\n");
kfree(dw_ced);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index a267fe31ef13..fabad79baafc 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -329,19 +329,15 @@ static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction mct_comp_event_irq = {
- .name = "mct_comp_irq",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = exynos4_mct_comp_isr,
- .dev_id = &mct_comp_device,
-};
-
static int exynos4_clockevent_init(void)
{
mct_comp_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&mct_comp_device, clk_rate,
0xf, 0xffffffff);
- setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
+ if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr,
+ IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq",
+ &mct_comp_device))
+ pr_err("%s: request_irq() failed\n", "mct_comp_irq");
return 0;
}
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index eb0ba7818eb0..09aa44cb8a91 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -370,6 +370,12 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
hv_set_reference_tsc(tsc_msr);
}
+static int hv_cs_enable(struct clocksource *cs)
+{
+ hv_enable_vdso_clocksource();
+ return 0;
+}
+
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
.rating = 250,
@@ -378,6 +384,7 @@ static struct clocksource hyperv_cs_tsc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend= suspend_hv_clock_tsc,
.resume = resume_hv_clock_tsc,
+ .enable = hv_cs_enable,
};
static u64 notrace read_hv_clock_msr(void)
diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
new file mode 100644
index 000000000000..029efc2731b4
--- /dev/null
+++ b/drivers/clocksource/ingenic-ost.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * JZ47xx SoCs TCU Operating System Timer driver
+ *
+ * Copyright (C) 2016 Maarten ter Huurne <maarten@treewalker.org>
+ * Copyright (C) 2020 Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/mfd/ingenic-tcu.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/sched_clock.h>
+
+#define TCU_OST_TCSR_MASK 0xffc0
+#define TCU_OST_TCSR_CNT_MD BIT(15)
+
+#define TCU_OST_CHANNEL 15
+
+/*
+ * The TCU_REG_OST_CNT{L,R} from <linux/mfd/ingenic-tcu.h> are only for the
+ * regmap; these are for use with the __iomem pointer.
+ */
+#define OST_REG_CNTL 0x4
+#define OST_REG_CNTH 0x8
+
+struct ingenic_ost_soc_info {
+ bool is64bit;
+};
+
+struct ingenic_ost {
+ void __iomem *regs;
+ struct clk *clk;
+
+ struct clocksource cs;
+};
+
+static struct ingenic_ost *ingenic_ost;
+
+static u64 notrace ingenic_ost_read_cntl(void)
+{
+ /* Read using __iomem pointer instead of regmap to avoid locking */
+ return readl(ingenic_ost->regs + OST_REG_CNTL);
+}
+
+static u64 notrace ingenic_ost_read_cnth(void)
+{
+ /* Read using __iomem pointer instead of regmap to avoid locking */
+ return readl(ingenic_ost->regs + OST_REG_CNTH);
+}
+
+static u64 notrace ingenic_ost_clocksource_readl(struct clocksource *cs)
+{
+ return ingenic_ost_read_cntl();
+}
+
+static u64 notrace ingenic_ost_clocksource_readh(struct clocksource *cs)
+{
+ return ingenic_ost_read_cnth();
+}
+
+static int __init ingenic_ost_probe(struct platform_device *pdev)
+{
+ const struct ingenic_ost_soc_info *soc_info;
+ struct device *dev = &pdev->dev;
+ struct ingenic_ost *ost;
+ struct clocksource *cs;
+ struct regmap *map;
+ unsigned long rate;
+ int err;
+
+ soc_info = device_get_match_data(dev);
+ if (!soc_info)
+ return -EINVAL;
+
+ ost = devm_kzalloc(dev, sizeof(*ost), GFP_KERNEL);
+ if (!ost)
+ return -ENOMEM;
+
+ ingenic_ost = ost;
+
+ ost->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ost->regs))
+ return PTR_ERR(ost->regs);
+
+ map = device_node_to_regmap(dev->parent->of_node);
+ if (!map) {
+ dev_err(dev, "regmap not found");
+ return -EINVAL;
+ }
+
+ ost->clk = devm_clk_get(dev, "ost");
+ if (IS_ERR(ost->clk))
+ return PTR_ERR(ost->clk);
+
+ err = clk_prepare_enable(ost->clk);
+ if (err)
+ return err;
+
+ /* Clear counter high/low registers */
+ if (soc_info->is64bit)
+ regmap_write(map, TCU_REG_OST_CNTL, 0);
+ regmap_write(map, TCU_REG_OST_CNTH, 0);
+
+ /* Don't reset counter at compare value. */
+ regmap_update_bits(map, TCU_REG_OST_TCSR,
+ TCU_OST_TCSR_MASK, TCU_OST_TCSR_CNT_MD);
+
+ rate = clk_get_rate(ost->clk);
+
+ /* Enable OST TCU channel */
+ regmap_write(map, TCU_REG_TESR, BIT(TCU_OST_CHANNEL));
+
+ cs = &ost->cs;
+ cs->name = "ingenic-ost";
+ cs->rating = 320;
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->mask = CLOCKSOURCE_MASK(32);
+
+ if (soc_info->is64bit)
+ cs->read = ingenic_ost_clocksource_readl;
+ else
+ cs->read = ingenic_ost_clocksource_readh;
+
+ err = clocksource_register_hz(cs, rate);
+ if (err) {
+ dev_err(dev, "clocksource registration failed");
+ clk_disable_unprepare(ost->clk);
+ return err;
+ }
+
+ if (soc_info->is64bit)
+ sched_clock_register(ingenic_ost_read_cntl, 32, rate);
+ else
+ sched_clock_register(ingenic_ost_read_cnth, 32, rate);
+
+ return 0;
+}
+
+static int __maybe_unused ingenic_ost_suspend(struct device *dev)
+{
+ struct ingenic_ost *ost = dev_get_drvdata(dev);
+
+ clk_disable(ost->clk);
+
+ return 0;
+}
+
+static int __maybe_unused ingenic_ost_resume(struct device *dev)
+{
+ struct ingenic_ost *ost = dev_get_drvdata(dev);
+
+ return clk_enable(ost->clk);
+}
+
+static const struct dev_pm_ops __maybe_unused ingenic_ost_pm_ops = {
+ /* _noirq: We want the OST clock to be gated last / ungated first */
+ .suspend_noirq = ingenic_ost_suspend,
+ .resume_noirq = ingenic_ost_resume,
+};
+
+static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = {
+ .is64bit = false,
+};
+
+static const struct ingenic_ost_soc_info jz4770_ost_soc_info = {
+ .is64bit = true,
+};
+
+static const struct of_device_id ingenic_ost_of_match[] = {
+ { .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, },
+ { .compatible = "ingenic,jz4770-ost", .data = &jz4770_ost_soc_info, },
+ { }
+};
+
+static struct platform_driver ingenic_ost_driver = {
+ .driver = {
+ .name = "ingenic-ost",
+#ifdef CONFIG_PM_SUSPEND
+ .pm = &ingenic_ost_pm_ops,
+#endif
+ .of_match_table = ingenic_ost_of_match,
+ },
+};
+builtin_platform_driver_probe(ingenic_ost_driver, ingenic_ost_probe);
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 4bbdb3d3d0c6..496333650de2 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -230,6 +230,7 @@ static const struct of_device_id ingenic_tcu_of_match[] = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
+ { .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
{ /* sentinel */ }
};
@@ -302,7 +303,7 @@ err_free_ingenic_tcu:
TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init);
-
+TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init);
static int __init ingenic_tcu_probe(struct platform_device *pdev)
{
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 37671a5d4ed9..8b5f8ae723cb 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -155,10 +155,10 @@ static u64 gic_hpt_read(struct clocksource *cs)
}
static struct clocksource gic_clocksource = {
- .name = "GIC",
- .read = gic_hpt_read,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC },
+ .name = "GIC",
+ .read = gic_hpt_read,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vdso_clock_mode = VDSO_CLOCKMODE_GIC,
};
static int __init __gic_clocksource_init(void)
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index f6ddae30933f..bc96a4cbf26c 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -117,13 +117,6 @@ static irqreturn_t mxs_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction mxs_timer_irq = {
- .name = "MXS Timer Tick",
- .dev_id = &mxs_clockevent_device,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = mxs_timer_interrupt,
-};
-
static void mxs_irq_clear(char *state)
{
/* Disable interrupt in timer module */
@@ -277,6 +270,7 @@ static int __init mxs_timer_init(struct device_node *np)
if (irq <= 0)
return -EINVAL;
- return setup_irq(irq, &mxs_timer_irq);
+ return request_irq(irq, mxs_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "MXS Timer Tick", &mxs_clockevent_device);
}
TIMER_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 3f7fa8c01367..f49a631d8f58 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -181,13 +181,6 @@ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction nmdk_timer_irq = {
- .name = "Nomadik Timer Tick",
- .flags = IRQF_TIMER,
- .handler = nmdk_timer_interrupt,
- .dev_id = &nmdk_clkevt,
-};
-
static int __init nmdk_timer_init(void __iomem *base, int irq,
struct clk *pclk, struct clk *clk)
{
@@ -232,7 +225,9 @@ static int __init nmdk_timer_init(void __iomem *base, int irq,
sched_clock_register(nomadik_read_sched_clock, 32, rate);
/* Timer 1 is used for events, register irq and clockevents */
- setup_irq(irq, &nmdk_timer_irq);
+ if (request_irq(irq, nmdk_timer_interrupt, IRQF_TIMER,
+ "Nomadik Timer Tick", &nmdk_clkevt))
+ pr_err("%s: request_irq() failed\n", "Nomadik Timer Tick");
nmdk_clkevt.cpumask = cpumask_of(0);
nmdk_clkevt.irq = irq;
clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index dae1b2b5a0c5..f760229d0c7f 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -256,13 +256,6 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction samsung_clock_event_irq = {
- .name = "samsung_time_irq",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = samsung_clock_event_isr,
- .dev_id = &time_event_device,
-};
-
static void __init samsung_clockevent_init(void)
{
unsigned long pclk;
@@ -282,7 +275,10 @@ static void __init samsung_clockevent_init(void)
clock_rate, 1, pwm.tcnt_max);
irq_number = pwm.irq[pwm.event_id];
- setup_irq(irq_number, &samsung_clock_event_irq);
+ if (request_irq(irq_number, samsung_clock_event_isr,
+ IRQF_TIMER | IRQF_IRQPOLL, "samsung_time_irq",
+ &time_event_device))
+ pr_err("%s: request_irq() failed\n", "samsung_time_irq");
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 93c3ac6d72bd..c21c91c2bc56 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -159,29 +159,23 @@ static struct clocksource sirfsoc_clocksource = {
.resume = sirfsoc_clocksource_resume,
};
-static struct irqaction sirfsoc_timer_irq = {
- .name = "sirfsoc_timer0",
- .flags = IRQF_TIMER | IRQF_NOBALANCING,
- .handler = sirfsoc_timer_interrupt,
-};
-
-static struct irqaction sirfsoc_timer1_irq = {
- .name = "sirfsoc_timer1",
- .flags = IRQF_TIMER | IRQF_NOBALANCING,
- .handler = sirfsoc_timer_interrupt,
-};
+static unsigned int sirfsoc_timer_irq, sirfsoc_timer1_irq;
static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
- struct irqaction *action;
-
- if (cpu == 0)
- action = &sirfsoc_timer_irq;
- else
- action = &sirfsoc_timer1_irq;
+ unsigned int irq;
+ const char *name;
+
+ if (cpu == 0) {
+ irq = sirfsoc_timer_irq;
+ name = "sirfsoc_timer0";
+ } else {
+ irq = sirfsoc_timer1_irq;
+ name = "sirfsoc_timer1";
+ }
- ce->irq = action->irq;
+ ce->irq = irq;
ce->name = "local_timer";
ce->features = CLOCK_EVT_FEAT_ONESHOT;
ce->rating = 200;
@@ -196,9 +190,9 @@ static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
ce->min_delta_ticks = 2;
ce->cpumask = cpumask_of(cpu);
- action->dev_id = ce;
- BUG_ON(setup_irq(ce->irq, action));
- irq_force_affinity(action->irq, cpumask_of(cpu));
+ BUG_ON(request_irq(ce->irq, sirfsoc_timer_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING, name, ce));
+ irq_force_affinity(ce->irq, cpumask_of(cpu));
clockevents_register_device(ce);
return 0;
@@ -206,12 +200,14 @@ static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
{
+ struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
+
sirfsoc_timer_count_disable(1);
if (cpu == 0)
- remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+ free_irq(sirfsoc_timer_irq, ce);
else
- remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
+ free_irq(sirfsoc_timer1_irq, ce);
return 0;
}
@@ -268,14 +264,14 @@ static int __init sirfsoc_of_timer_init(struct device_node *np)
return -ENXIO;
}
- sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
- if (!sirfsoc_timer_irq.irq) {
+ sirfsoc_timer_irq = irq_of_parse_and_map(np, 0);
+ if (!sirfsoc_timer_irq) {
pr_err("No irq passed for timer0 via DT\n");
return -EINVAL;
}
- sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
- if (!sirfsoc_timer1_irq.irq) {
+ sirfsoc_timer1_irq = irq_of_parse_and_map(np, 1);
+ if (!sirfsoc_timer1_irq) {
pr_err("No irq passed for timer1 via DT\n");
return -EINVAL;
}
diff --git a/drivers/clocksource/timer-cs5535.c b/drivers/clocksource/timer-cs5535.c
index 8f6bc536bef2..d47acfe848ae 100644
--- a/drivers/clocksource/timer-cs5535.c
+++ b/drivers/clocksource/timer-cs5535.c
@@ -131,14 +131,9 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction mfgptirq = {
- .handler = mfgpt_tick,
- .flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,
- .name = DRV_NAME,
-};
-
static int __init cs5535_mfgpt_init(void)
{
+ unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED;
struct cs5535_mfgpt_timer *timer;
int ret;
uint16_t val;
@@ -158,7 +153,7 @@ static int __init cs5535_mfgpt_init(void)
}
/* And register it with the kernel */
- ret = setup_irq(timer_irq, &mfgptirq);
+ ret = request_irq(timer_irq, mfgpt_tick, flags, DRV_NAME, timer);
if (ret) {
printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
goto err_irq;
diff --git a/drivers/clocksource/timer-efm32.c b/drivers/clocksource/timer-efm32.c
index 5a22cb079ad3..441a4b916841 100644
--- a/drivers/clocksource/timer-efm32.c
+++ b/drivers/clocksource/timer-efm32.c
@@ -119,13 +119,6 @@ static struct efm32_clock_event_ddata clock_event_ddata = {
},
};
-static struct irqaction efm32_clock_event_irq = {
- .name = "efm32 clockevent",
- .flags = IRQF_TIMER,
- .handler = efm32_clock_event_handler,
- .dev_id = &clock_event_ddata,
-};
-
static int __init efm32_clocksource_init(struct device_node *np)
{
struct clk *clk;
@@ -230,7 +223,8 @@ static int __init efm32_clockevent_init(struct device_node *np)
DIV_ROUND_CLOSEST(rate, 1024),
0xf, 0xffff);
- ret = setup_irq(irq, &efm32_clock_event_irq);
+ ret = request_irq(irq, efm32_clock_event_handler, IRQF_TIMER,
+ "efm32 clockevent", &clock_event_ddata);
if (ret) {
pr_err("Failed setup irq\n");
goto err_setup_irq;
diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c
index a9d9a3ca5996..12a2ed7cfaff 100644
--- a/drivers/clocksource/timer-fsl-ftm.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
@@ -176,13 +176,6 @@ static struct clock_event_device ftm_clockevent = {
.rating = 300,
};
-static struct irqaction ftm_timer_irq = {
- .name = "Freescale ftm timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = ftm_evt_interrupt,
- .dev_id = &ftm_clockevent,
-};
-
static int __init ftm_clockevent_init(unsigned long freq, int irq)
{
int err;
@@ -192,7 +185,8 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
ftm_reset_counter(priv->clkevt_base);
- err = setup_irq(irq, &ftm_timer_irq);
+ err = request_irq(irq, ftm_evt_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "Freescale ftm timer", &ftm_clockevent);
if (err) {
pr_err("ftm: setup irq failed: %d\n", err);
return err;
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index fadff7915dd9..edb1d5f193f5 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -38,6 +38,11 @@
#define TIMER_CR (0x30)
/*
+ * Control register set to clear for ast2600 only.
+ */
+#define AST2600_TIMER_CR_CLR (0x3c)
+
+/*
* Control register (TMC30) bit fields for fttmr010/gemini/moxart timers.
*/
#define TIMER_1_CR_ENABLE BIT(0)
@@ -97,6 +102,7 @@ struct fttmr010 {
bool is_aspeed;
u32 t1_enable_val;
struct clock_event_device clkevt;
+ int (*timer_shutdown)(struct clock_event_device *evt);
#ifdef CONFIG_ARM
struct delay_timer delay_timer;
#endif
@@ -140,9 +146,7 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
u32 cr;
/* Stop */
- cr = readl(fttmr010->base + TIMER_CR);
- cr &= ~fttmr010->t1_enable_val;
- writel(cr, fttmr010->base + TIMER_CR);
+ fttmr010->timer_shutdown(evt);
if (fttmr010->is_aspeed) {
/*
@@ -164,6 +168,16 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
return 0;
}
+static int ast2600_timer_shutdown(struct clock_event_device *evt)
+{
+ struct fttmr010 *fttmr010 = to_fttmr010(evt);
+
+ /* Stop */
+ writel(fttmr010->t1_enable_val, fttmr010->base + AST2600_TIMER_CR_CLR);
+
+ return 0;
+}
+
static int fttmr010_timer_shutdown(struct clock_event_device *evt)
{
struct fttmr010 *fttmr010 = to_fttmr010(evt);
@@ -183,9 +197,7 @@ static int fttmr010_timer_set_oneshot(struct clock_event_device *evt)
u32 cr;
/* Stop */
- cr = readl(fttmr010->base + TIMER_CR);
- cr &= ~fttmr010->t1_enable_val;
- writel(cr, fttmr010->base + TIMER_CR);
+ fttmr010->timer_shutdown(evt);
/* Setup counter start from 0 or ~0 */
writel(0, fttmr010->base + TIMER1_COUNT);
@@ -211,9 +223,7 @@ static int fttmr010_timer_set_periodic(struct clock_event_device *evt)
u32 cr;
/* Stop */
- cr = readl(fttmr010->base + TIMER_CR);
- cr &= ~fttmr010->t1_enable_val;
- writel(cr, fttmr010->base + TIMER_CR);
+ fttmr010->timer_shutdown(evt);
/* Setup timer to fire at 1/HZ intervals. */
if (fttmr010->is_aspeed) {
@@ -249,7 +259,21 @@ static irqreturn_t fttmr010_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
+static irqreturn_t ast2600_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct fttmr010 *fttmr010 = to_fttmr010(evt);
+
+ writel(0x1, fttmr010->base + TIMER_INTR_STATE);
+
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static int __init fttmr010_common_init(struct device_node *np,
+ bool is_aspeed,
+ int (*timer_shutdown)(struct clock_event_device *),
+ irq_handler_t irq_handler)
{
struct fttmr010 *fttmr010;
int irq;
@@ -350,6 +374,8 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
fttmr010->tick_rate);
}
+ fttmr010->timer_shutdown = timer_shutdown;
+
/*
* Setup clockevent timer (interrupt-driven) on timer 1.
*/
@@ -357,7 +383,7 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
writel(0, fttmr010->base + TIMER1_LOAD);
writel(0, fttmr010->base + TIMER1_MATCH1);
writel(0, fttmr010->base + TIMER1_MATCH2);
- ret = request_irq(irq, fttmr010_timer_interrupt, IRQF_TIMER,
+ ret = request_irq(irq, irq_handler, IRQF_TIMER,
"FTTMR010-TIMER1", &fttmr010->clkevt);
if (ret) {
pr_err("FTTMR010-TIMER1 no IRQ\n");
@@ -370,10 +396,10 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
fttmr010->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
fttmr010->clkevt.set_next_event = fttmr010_timer_set_next_event;
- fttmr010->clkevt.set_state_shutdown = fttmr010_timer_shutdown;
+ fttmr010->clkevt.set_state_shutdown = fttmr010->timer_shutdown;
fttmr010->clkevt.set_state_periodic = fttmr010_timer_set_periodic;
fttmr010->clkevt.set_state_oneshot = fttmr010_timer_set_oneshot;
- fttmr010->clkevt.tick_resume = fttmr010_timer_shutdown;
+ fttmr010->clkevt.tick_resume = fttmr010->timer_shutdown;
fttmr010->clkevt.cpumask = cpumask_of(0);
fttmr010->clkevt.irq = irq;
clockevents_config_and_register(&fttmr010->clkevt,
@@ -404,14 +430,25 @@ out_disable_clock:
return ret;
}
+static __init int ast2600_timer_init(struct device_node *np)
+{
+ return fttmr010_common_init(np, true,
+ ast2600_timer_shutdown,
+ ast2600_timer_interrupt);
+}
+
static __init int aspeed_timer_init(struct device_node *np)
{
- return fttmr010_common_init(np, true);
+ return fttmr010_common_init(np, true,
+ fttmr010_timer_shutdown,
+ fttmr010_timer_interrupt);
}
static __init int fttmr010_timer_init(struct device_node *np)
{
- return fttmr010_common_init(np, false);
+ return fttmr010_common_init(np, false,
+ fttmr010_timer_shutdown,
+ fttmr010_timer_interrupt);
}
TIMER_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_init);
@@ -419,3 +456,4 @@ TIMER_OF_DECLARE(gemini, "cortina,gemini-timer", fttmr010_timer_init);
TIMER_OF_DECLARE(moxart, "moxa,moxart-timer", fttmr010_timer_init);
TIMER_OF_DECLARE(ast2400, "aspeed,ast2400-timer", aspeed_timer_init);
TIMER_OF_DECLARE(ast2500, "aspeed,ast2500-timer", aspeed_timer_init);
+TIMER_OF_DECLARE(ast2600, "aspeed,ast2600-timer", ast2600_timer_init);
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 706c0d0ff56c..7b2c70f2f353 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -67,7 +67,6 @@ struct imx_timer {
struct clk *clk_ipg;
const struct imx_gpt_data *gpt;
struct clock_event_device ced;
- struct irqaction act;
};
struct imx_gpt_data {
@@ -273,7 +272,6 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
static int __init mxc_clockevent_init(struct imx_timer *imxtm)
{
struct clock_event_device *ced = &imxtm->ced;
- struct irqaction *act = &imxtm->act;
ced->name = "mxc_timer1";
ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
@@ -287,12 +285,8 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm)
clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
0xff, 0xfffffffe);
- act->name = "i.MX Timer Tick";
- act->flags = IRQF_TIMER | IRQF_IRQPOLL;
- act->handler = mxc_timer_interrupt;
- act->dev_id = ced;
-
- return setup_irq(imxtm->irq, act);
+ return request_irq(imxtm->irq, mxc_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced);
}
static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index b7c80a368a1b..18b90fc56bfc 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -4,8 +4,6 @@
#include <linux/interrupt.h>
#include <linux/clockchips.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include "timer-of.h"
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index c1d52d5264c2..6334a35fdc2f 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -8,8 +8,6 @@
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include "timer-of.h"
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index c90a69c7b5fa..b0fcbaac58b0 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -123,13 +123,6 @@ static struct clock_event_device integrator_clockevent = {
.rating = 300,
};
-static struct irqaction integrator_timer_irq = {
- .name = "timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = integrator_timer_interrupt,
- .dev_id = &integrator_clockevent,
-};
-
static int integrator_clockevent_init(unsigned long inrate,
void __iomem *base, int irq)
{
@@ -149,7 +142,9 @@ static int integrator_clockevent_init(unsigned long inrate,
timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
- ret = setup_irq(irq, &integrator_timer_irq);
+ ret = request_irq(irq, integrator_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "timer",
+ &integrator_clockevent);
if (ret)
return ret;
diff --git a/drivers/clocksource/timer-meson6.c b/drivers/clocksource/timer-meson6.c
index 9e8b467c71da..99f5510a2b56 100644
--- a/drivers/clocksource/timer-meson6.c
+++ b/drivers/clocksource/timer-meson6.c
@@ -150,13 +150,6 @@ static irqreturn_t meson6_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction meson6_timer_irq = {
- .name = "meson6_timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = meson6_timer_interrupt,
- .dev_id = &meson6_clockevent,
-};
-
static int __init meson6_timer_init(struct device_node *node)
{
u32 val;
@@ -194,7 +187,9 @@ static int __init meson6_timer_init(struct device_node *node)
/* Stop the timer A */
meson6_clkevt_time_stop();
- ret = setup_irq(irq, &meson6_timer_irq);
+ ret = request_irq(irq, meson6_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "meson6_timer",
+ &meson6_clockevent);
if (ret) {
pr_warn("failed to setup irq %d\n", irq);
return ret;
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index bd63d3484838..59e11ca8ee73 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -264,6 +264,7 @@ static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer,
if (!best_diff) {
timer->mode |= MCHP_PIT64B_MR_SGCLK;
+ clk_set_rate(timer->gclk, gclk_round);
goto done;
}
diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c
index 7d487107e3cd..d01ff4181867 100644
--- a/drivers/clocksource/timer-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -114,12 +114,6 @@ static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction orion_clkevt_irq = {
- .name = "orion_event",
- .flags = IRQF_TIMER,
- .handler = orion_clkevt_irq_handler,
-};
-
static int __init orion_timer_init(struct device_node *np)
{
unsigned long rate;
@@ -172,7 +166,8 @@ static int __init orion_timer_init(struct device_node *np)
sched_clock_register(orion_read_sched_clock, 32, rate);
/* setup timer1 as clockevent timer */
- ret = setup_irq(irq, &orion_clkevt_irq);
+ ret = request_irq(irq, orion_clkevt_irq_handler, IRQF_TIMER,
+ "orion_event", NULL);
if (ret) {
pr_err("%pOFn: unable to setup irq\n", np);
return ret;
diff --git a/drivers/clocksource/timer-owl.c b/drivers/clocksource/timer-owl.c
index 900fe736145d..ac97420bfa7c 100644
--- a/drivers/clocksource/timer-owl.c
+++ b/drivers/clocksource/timer-owl.c
@@ -135,8 +135,11 @@ static int __init owl_timer_init(struct device_node *node)
}
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("Failed to get clock for clocksource (%d)\n", ret);
+ return ret;
+ }
rate = clk_get_rate(clk);
@@ -144,8 +147,12 @@ static int __init owl_timer_init(struct device_node *node)
owl_timer_set_enabled(owl_clksrc_base, true);
sched_clock_register(owl_timer_sched_read, 32, rate);
- clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name,
- rate, 200, 32, clocksource_mmio_readl_up);
+ ret = clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name,
+ rate, 200, 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("Failed to register clocksource (%d)\n", ret);
+ return ret;
+ }
owl_timer_reset(owl_clkevt_base);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index d4a9dcf5fba2..c5d469342a9d 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -165,14 +165,6 @@ static struct clocksource sirfsoc_clocksource = {
.resume = sirfsoc_clocksource_resume,
};
-static struct irqaction sirfsoc_timer_irq = {
- .name = "sirfsoc_timer0",
- .flags = IRQF_TIMER,
- .irq = 0,
- .handler = sirfsoc_timer_interrupt,
- .dev_id = &sirfsoc_clockevent,
-};
-
/* Overwrite weak default sched_clock with more precise one */
static u64 notrace sirfsoc_read_sched_clock(void)
{
@@ -190,6 +182,7 @@ static void __init sirfsoc_clockevent_init(void)
static int __init sirfsoc_prima2_timer_init(struct device_node *np)
{
unsigned long rate;
+ unsigned int irq;
struct clk *clk;
int ret;
@@ -218,7 +211,7 @@ static int __init sirfsoc_prima2_timer_init(struct device_node *np)
return -ENXIO;
}
- sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
+ irq = irq_of_parse_and_map(np, 0);
writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1,
sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
@@ -234,7 +227,8 @@ static int __init sirfsoc_prima2_timer_init(struct device_node *np)
sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
- ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+ ret = request_irq(irq, sirfsoc_timer_interrupt, IRQF_TIMER,
+ "sirfsoc_timer0", &sirfsoc_clockevent);
if (ret) {
pr_err("Failed to setup irq\n");
return ret;
diff --git a/drivers/clocksource/timer-pxa.c b/drivers/clocksource/timer-pxa.c
index 913a5d354a1f..7ad0e5adb2ff 100644
--- a/drivers/clocksource/timer-pxa.c
+++ b/drivers/clocksource/timer-pxa.c
@@ -143,13 +143,6 @@ static struct clock_event_device ckevt_pxa_osmr0 = {
.resume = pxa_timer_resume,
};
-static struct irqaction pxa_ost0_irq = {
- .name = "ost0",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = pxa_ost0_interrupt,
- .dev_id = &ckevt_pxa_osmr0,
-};
-
static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
{
int ret;
@@ -161,7 +154,8 @@ static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
ckevt_pxa_osmr0.cpumask = cpumask_of(0);
- ret = setup_irq(irq, &pxa_ost0_irq);
+ ret = request_irq(irq, pxa_ost0_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "ost0", &ckevt_pxa_osmr0);
if (ret) {
pr_err("Failed to setup irq\n");
return ret;
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 9c841980eed1..5cd0abf9b396 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -168,13 +168,6 @@ static struct clock_event_device sp804_clockevent = {
.rating = 300,
};
-static struct irqaction sp804_timer_irq = {
- .name = "timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = sp804_timer_interrupt,
- .dev_id = &sp804_clockevent,
-};
-
int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
@@ -200,7 +193,9 @@ int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct
writel(0, base + TIMER_CTRL);
- setup_irq(irq, &sp804_timer_irq);
+ if (request_irq(irq, sp804_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "timer", &sp804_clockevent))
+ pr_err("%s: request_irq() failed\n", "timer");
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
return 0;
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 269a994d6a99..2531eab3d6d7 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/arch/arm/plat-omap/dmtimer.c
*
@@ -15,28 +16,11 @@
*
* Copyright (C) 2009 Texas Instruments
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/cpu_pm.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/device.h>
@@ -109,6 +93,47 @@ static void omap_timer_restore_context(struct omap_dm_timer *timer)
timer->context.tclr);
}
+static void omap_timer_save_context(struct omap_dm_timer *timer)
+{
+ timer->context.tclr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ timer->context.twer =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG);
+ timer->context.tldr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_LOAD_REG);
+ timer->context.tmar =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_MATCH_REG);
+ timer->context.tier = readl_relaxed(timer->irq_ena);
+ timer->context.tsicr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_IF_CTRL_REG);
+}
+
+static int omap_timer_context_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
+{
+ struct omap_dm_timer *timer;
+
+ timer = container_of(nb, struct omap_dm_timer, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if ((timer->capability & OMAP_TIMER_ALWON) ||
+ !atomic_read(&timer->enabled))
+ break;
+ omap_timer_save_context(timer);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ if ((timer->capability & OMAP_TIMER_ALWON) ||
+ !atomic_read(&timer->enabled))
+ break;
+ omap_timer_restore_context(timer);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int omap_dm_timer_reset(struct omap_dm_timer *timer)
{
u32 l, timeout = 100000;
@@ -138,35 +163,6 @@ static int omap_dm_timer_reset(struct omap_dm_timer *timer)
return 0;
}
-static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
-{
- int ret;
- struct clk *parent;
-
- /*
- * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
- * do not call clk_get() for these devices.
- */
- if (!timer->fclk)
- return -ENODEV;
-
- parent = clk_get(&timer->pdev->dev, NULL);
- if (IS_ERR(parent))
- return -ENODEV;
-
- /* Bail out if both clocks point to fck */
- if (clk_is_match(parent, timer->fclk))
- return 0;
-
- ret = clk_set_parent(timer->fclk, parent);
- if (ret < 0)
- pr_err("%s: failed to set parent\n", __func__);
-
- clk_put(parent);
-
- return ret;
-}
-
static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
int ret;
@@ -225,21 +221,7 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
static void omap_dm_timer_enable(struct omap_dm_timer *timer)
{
- int c;
-
pm_runtime_get_sync(&timer->pdev->dev);
-
- if (!(timer->capability & OMAP_TIMER_ALWON)) {
- if (timer->get_context_loss_count) {
- c = timer->get_context_loss_count(&timer->pdev->dev);
- if (c != timer->ctx_loss_count) {
- omap_timer_restore_context(timer);
- timer->ctx_loss_count = c;
- }
- } else {
- omap_timer_restore_context(timer);
- }
- }
}
static void omap_dm_timer_disable(struct omap_dm_timer *timer)
@@ -276,9 +258,7 @@ static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
__omap_dm_timer_enable_posted(timer);
omap_dm_timer_disable(timer);
- rc = omap_dm_timer_of_set_source(timer);
- if (rc == -ENODEV)
- return omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
+ rc = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
return rc;
}
@@ -508,7 +488,7 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
int omap_dm_timer_trigger(struct omap_dm_timer *timer)
{
- if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
@@ -532,8 +512,6 @@ static int omap_dm_timer_start(struct omap_dm_timer *timer)
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
}
- /* Save the context */
- timer->context.tclr = l;
return 0;
}
@@ -549,38 +527,19 @@ static int omap_dm_timer_stop(struct omap_dm_timer *timer)
__omap_dm_timer_stop(timer, timer->posted, rate);
- /*
- * Since the register values are computed and written within
- * __omap_dm_timer_stop, we need to use read to retrieve the
- * context.
- */
- timer->context.tclr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
+static int omap_dm_timer_set_load(struct omap_dm_timer *timer,
unsigned int load)
{
- u32 l;
-
if (unlikely(!timer))
return -EINVAL;
omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- if (autoreload)
- l |= OMAP_TIMER_CTRL_AR;
- else
- l &= ~OMAP_TIMER_CTRL_AR;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
- /* Save the context */
- timer->context.tclr = l;
- timer->context.tldr = load;
omap_dm_timer_disable(timer);
return 0;
}
@@ -602,15 +561,12 @@ static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
- /* Save the context */
- timer->context.tclr = l;
- timer->context.tmar = match;
omap_dm_timer_disable(timer);
return 0;
}
static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
- int toggle, int trigger)
+ int toggle, int trigger, int autoreload)
{
u32 l;
@@ -620,20 +576,34 @@ static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
omap_dm_timer_enable(timer);
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
- OMAP_TIMER_CTRL_PT | (0x03 << 10));
+ OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
if (def_on)
l |= OMAP_TIMER_CTRL_SCPWM;
if (toggle)
l |= OMAP_TIMER_CTRL_PT;
l |= trigger << 10;
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
- /* Save the context */
- timer->context.tclr = l;
omap_dm_timer_disable(timer);
return 0;
}
+static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *timer)
+{
+ u32 l;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ omap_dm_timer_disable(timer);
+
+ return l;
+}
+
static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer,
int prescaler)
{
@@ -651,8 +621,6 @@ static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer,
}
omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
- /* Save the context */
- timer->context.tclr = l;
omap_dm_timer_disable(timer);
return 0;
}
@@ -666,9 +634,6 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
omap_dm_timer_enable(timer);
__omap_dm_timer_int_enable(timer, value);
- /* Save the context */
- timer->context.tier = value;
- timer->context.twer = value;
omap_dm_timer_disable(timer);
return 0;
}
@@ -696,9 +661,6 @@ static int omap_dm_timer_set_int_disable(struct omap_dm_timer *timer, u32 mask)
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
- /* Save the context */
- timer->context.tier &= ~mask;
- timer->context.twer &= ~mask;
omap_dm_timer_disable(timer);
return 0;
}
@@ -707,7 +669,7 @@ static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
{
unsigned int l;
- if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return 0;
}
@@ -719,7 +681,7 @@ static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
{
- if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev)))
+ if (unlikely(!timer || !atomic_read(&timer->enabled)))
return -EINVAL;
__omap_dm_timer_write_status(timer, value);
@@ -729,7 +691,7 @@ static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int
static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
{
- if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not iavailable or enabled.\n", __func__);
return 0;
}
@@ -739,7 +701,7 @@ static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
static int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
{
- if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
@@ -767,6 +729,37 @@ int omap_dm_timers_active(void)
return 0;
}
+static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
+{
+ struct omap_dm_timer *timer = dev_get_drvdata(dev);
+
+ atomic_set(&timer->enabled, 0);
+
+ if (timer->capability & OMAP_TIMER_ALWON || !timer->func_base)
+ return 0;
+
+ omap_timer_save_context(timer);
+
+ return 0;
+}
+
+static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
+{
+ struct omap_dm_timer *timer = dev_get_drvdata(dev);
+
+ if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
+ omap_timer_restore_context(timer);
+
+ atomic_set(&timer->enabled, 1);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_dm_timer_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_dm_timer_runtime_suspend,
+ omap_dm_timer_runtime_resume, NULL)
+};
+
static const struct of_device_id omap_timer_match[];
/**
@@ -808,6 +801,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
+ platform_set_drvdata(pdev, timer);
+
if (dev->of_node) {
if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
timer->capability |= OMAP_TIMER_ALWON;
@@ -821,7 +816,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
timer->id = pdev->id;
timer->capability = pdata->timer_capability;
timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
- timer->get_context_loss_count = pdata->get_context_loss_count;
+ }
+
+ if (!(timer->capability & OMAP_TIMER_ALWON)) {
+ timer->nb.notifier_call = omap_timer_context_notifier;
+ cpu_pm_register_notifier(&timer->nb);
}
if (pdata)
@@ -875,6 +874,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
list_for_each_entry(timer, &omap_timer_list, node)
if (!strcmp(dev_name(&timer->pdev->dev),
dev_name(&pdev->dev))) {
+ if (!(timer->capability & OMAP_TIMER_ALWON))
+ cpu_pm_unregister_notifier(&timer->nb);
list_del(&timer->node);
ret = 0;
break;
@@ -903,6 +904,7 @@ static const struct omap_dm_timer_ops dmtimer_ops = {
.set_load = omap_dm_timer_set_load,
.set_match = omap_dm_timer_set_match,
.set_pwm = omap_dm_timer_set_pwm,
+ .get_pwm_status = omap_dm_timer_get_pwm_status,
.set_prescaler = omap_dm_timer_set_prescaler,
.read_counter = omap_dm_timer_read_counter,
.write_counter = omap_dm_timer_write_counter,
@@ -953,6 +955,7 @@ static struct platform_driver omap_dm_timer_driver = {
.driver = {
.name = "omap_timer",
.of_match_table = of_match_ptr(omap_timer_match),
+ .pm = &omap_dm_timer_pm_ops,
},
};
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 32adc3057dda..37cba8dfd45f 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -330,12 +330,6 @@ static irqreturn_t u300_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction u300_timer_irq = {
- .name = "U300 Timer Tick",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = u300_timer_interrupt,
-};
-
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
@@ -420,7 +414,8 @@ static int __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_RGPT1);
/* Set up the IRQ handler */
- ret = setup_irq(irq, &u300_timer_irq);
+ ret = request_irq(irq, u300_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "U300 Timer Tick", NULL);
if (ret)
return ret;
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
index fef0bb4e0c8c..1a86a4e7e344 100644
--- a/drivers/clocksource/timer-vf-pit.c
+++ b/drivers/clocksource/timer-vf-pit.c
@@ -123,19 +123,13 @@ static struct clock_event_device clockevent_pit = {
.rating = 300,
};
-static struct irqaction pit_timer_irq = {
- .name = "VF pit timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = pit_timer_interrupt,
- .dev_id = &clockevent_pit,
-};
-
static int __init pit_clockevent_init(unsigned long rate, int irq)
{
__raw_writel(0, clkevt_base + PITTCTRL);
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
- BUG_ON(setup_irq(irq, &pit_timer_irq));
+ BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "VF pit timer", &clockevent_pit));
clockevent_pit.cpumask = cpumask_of(0);
clockevent_pit.irq = irq;
diff --git a/drivers/clocksource/timer-vt8500.c b/drivers/clocksource/timer-vt8500.c
index bb424bcefbb3..a469b1b5f972 100644
--- a/drivers/clocksource/timer-vt8500.c
+++ b/drivers/clocksource/timer-vt8500.c
@@ -101,13 +101,6 @@ static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction irq = {
- .name = "vt8500_timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = vt8500_timer_interrupt,
- .dev_id = &clockevent,
-};
-
static int __init vt8500_timer_init(struct device_node *np)
{
int timer_irq, ret;
@@ -139,7 +132,9 @@ static int __init vt8500_timer_init(struct device_node *np)
clockevent.cpumask = cpumask_of(0);
- ret = setup_irq(timer_irq, &irq);
+ ret = request_irq(timer_irq, vt8500_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "vt8500_timer",
+ &clockevent);
if (ret) {
pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name);
diff --git a/drivers/clocksource/timer-zevio.c b/drivers/clocksource/timer-zevio.c
index c0041561f1be..ecaa3568841c 100644
--- a/drivers/clocksource/timer-zevio.c
+++ b/drivers/clocksource/timer-zevio.c
@@ -53,7 +53,6 @@ struct zevio_timer {
struct clk *clk;
struct clock_event_device clkevt;
- struct irqaction clkevt_irq;
char clocksource_name[64];
char clockevent_name[64];
@@ -172,12 +171,12 @@ static int __init zevio_timer_add(struct device_node *node)
/* Interrupt to occur when timer value matches 0 */
writel(0, timer->base + IO_MATCH(TIMER_MATCH));
- timer->clkevt_irq.name = timer->clockevent_name;
- timer->clkevt_irq.handler = zevio_timer_interrupt;
- timer->clkevt_irq.dev_id = timer;
- timer->clkevt_irq.flags = IRQF_TIMER | IRQF_IRQPOLL;
-
- setup_irq(irqnr, &timer->clkevt_irq);
+ if (request_irq(irqnr, zevio_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL,
+ timer->clockevent_name, timer)) {
+ pr_err("%s: request_irq() failed\n",
+ timer->clockevent_name);
+ }
clockevents_config_and_register(&timer->clkevt,
clk_get_rate(timer->clk), 0x0001, 0xffff);
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 17e67a84777d..aa13708c2bc3 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -31,6 +31,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
/**
* struct quad8_iio - IIO device private data structure
* @counter: instance of the counter_device
+ * @fck_prescaler: array of filter clock prescaler configurations
* @preset: array of preset values
* @count_mode: array of count mode configurations
* @quadrature_mode: array of quadrature mode configurations
@@ -39,10 +40,13 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
* @preset_enable: array of set_to_preset_on_index attribute configurations
* @synchronous_mode: array of index function synchronous mode configurations
* @index_polarity: array of index function polarity configurations
+ * @cable_fault_enable: differential encoder cable status enable configurations
* @base: base port address of the IIO device
*/
struct quad8_iio {
+ struct mutex lock;
struct counter_device counter;
+ unsigned int fck_prescaler[QUAD8_NUM_COUNTERS];
unsigned int preset[QUAD8_NUM_COUNTERS];
unsigned int count_mode[QUAD8_NUM_COUNTERS];
unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
@@ -51,11 +55,13 @@ struct quad8_iio {
unsigned int preset_enable[QUAD8_NUM_COUNTERS];
unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
unsigned int index_polarity[QUAD8_NUM_COUNTERS];
+ unsigned int cable_fault_enable;
unsigned int base;
};
#define QUAD8_REG_CHAN_OP 0x11
#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
+#define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17
/* Borrow Toggle flip-flop */
#define QUAD8_FLAG_BT BIT(0)
/* Carry Toggle flip-flop */
@@ -84,6 +90,8 @@ struct quad8_iio {
#define QUAD8_RLD_PRESET_CNTR 0x08
/* Transfer Counter to Output Latch */
#define QUAD8_RLD_CNTR_OUT 0x10
+/* Transfer Preset Register LSB to FCK Prescaler */
+#define QUAD8_RLD_PRESET_PSC 0x18
#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
#define QUAD8_CMR_QUADRATURE_X1 0x08
@@ -116,6 +124,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
/* Borrow XOR Carry effectively doubles count range */
*val = (borrow ^ carry) << 24;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1);
@@ -123,6 +133,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
for (i = 0; i < 3; i++)
*val |= (unsigned int)inb(base_offset) << (8 * i);
+ mutex_unlock(&priv->lock);
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_ENABLE:
*val = priv->ab_enable[chan->channel];
@@ -153,6 +165,8 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
if ((unsigned int)val > 0xFFFFFF)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
@@ -176,12 +190,16 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
/* Reset Error flag */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return 0;
case IIO_CHAN_INFO_ENABLE:
/* only boolean values accepted */
if (val < 0 || val > 1)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
priv->ab_enable[chan->channel] = val;
ior_cfg = val | priv->preset_enable[chan->channel] << 1;
@@ -189,11 +207,18 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
/* Load I/O control configuration */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return 0;
case IIO_CHAN_INFO_SCALE:
+ mutex_lock(&priv->lock);
+
/* Quadrature scaling only available in quadrature mode */
- if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+ if (!priv->quadrature_mode[chan->channel] &&
+ (val2 || val != 1)) {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
/* Only three gain states (1, 0.5, 0.25) */
if (val == 1 && !val2)
@@ -207,11 +232,15 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
priv->quadrature_scale[chan->channel] = 2;
break;
default:
+ mutex_unlock(&priv->lock);
return -EINVAL;
}
- else
+ else {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
+ mutex_unlock(&priv->lock);
return 0;
}
@@ -248,6 +277,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
if (preset > 0xFFFFFF)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
priv->preset[chan->channel] = preset;
/* Reset Byte Pointer */
@@ -257,6 +288,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
for (i = 0; i < 3; i++)
outb(preset >> (8 * i), base_offset);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -286,6 +319,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
/* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable;
+ mutex_lock(&priv->lock);
+
priv->preset_enable[chan->channel] = preset_enable;
ior_cfg = priv->ab_enable[chan->channel] |
@@ -294,6 +329,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
/* Load I/O control configuration to Input / Output Control Register */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -351,6 +388,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
unsigned int mode_cfg = cnt_mode << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ mutex_lock(&priv->lock);
+
priv->count_mode[chan->channel] = cnt_mode;
/* Add quadrature mode configuration */
@@ -360,6 +399,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -387,19 +428,26 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int synchronous_mode)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- const unsigned int idr_cfg = synchronous_mode |
- priv->index_polarity[chan->channel] << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ unsigned int idr_cfg = synchronous_mode;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->index_polarity[chan->channel] << 1;
/* Index function must be non-synchronous in non-quadrature mode */
- if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+ if (synchronous_mode && !priv->quadrature_mode[chan->channel]) {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
priv->synchronous_mode[chan->channel] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -427,8 +475,12 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int quadrature_mode)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ unsigned int mode_cfg;
+
+ mutex_lock(&priv->lock);
+
+ mode_cfg = priv->count_mode[chan->channel] << 1;
if (quadrature_mode)
mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
@@ -446,6 +498,8 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -473,15 +527,20 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int index_polarity)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
- index_polarity << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ unsigned int idr_cfg = index_polarity << 1;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->synchronous_mode[chan->channel];
priv->index_polarity[chan->channel] = index_polarity;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -582,7 +641,7 @@ static int quad8_signal_read(struct counter_device *counter,
static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, unsigned long *val)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
@@ -596,6 +655,8 @@ static int quad8_count_read(struct counter_device *counter,
/* Borrow XOR Carry effectively doubles count range */
*val = (unsigned long)(borrow ^ carry) << 24;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1);
@@ -603,13 +664,15 @@ static int quad8_count_read(struct counter_device *counter,
for (i = 0; i < 3; i++)
*val |= (unsigned long)inb(base_offset) << (8 * i);
+ mutex_unlock(&priv->lock);
+
return 0;
}
static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, unsigned long val)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
int i;
@@ -617,6 +680,8 @@ static int quad8_count_write(struct counter_device *counter,
if (val > 0xFFFFFF)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
@@ -640,6 +705,8 @@ static int quad8_count_write(struct counter_device *counter,
/* Reset Error flag */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -660,13 +727,13 @@ static enum counter_count_function quad8_count_functions_list[] = {
static int quad8_function_get(struct counter_device *counter,
struct counter_count *count, size_t *function)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
const int id = count->id;
- const unsigned int quadrature_mode = priv->quadrature_mode[id];
- const unsigned int scale = priv->quadrature_scale[id];
- if (quadrature_mode)
- switch (scale) {
+ mutex_lock(&priv->lock);
+
+ if (priv->quadrature_mode[id])
+ switch (priv->quadrature_scale[id]) {
case 0:
*function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
break;
@@ -680,6 +747,8 @@ static int quad8_function_get(struct counter_device *counter,
else
*function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -690,10 +759,15 @@ static int quad8_function_set(struct counter_device *counter,
const int id = count->id;
unsigned int *const quadrature_mode = priv->quadrature_mode + id;
unsigned int *const scale = priv->quadrature_scale + id;
- unsigned int mode_cfg = priv->count_mode[id] << 1;
unsigned int *const synchronous_mode = priv->synchronous_mode + id;
- const unsigned int idr_cfg = priv->index_polarity[id] << 1;
const int base_offset = priv->base + 2 * id + 1;
+ unsigned int mode_cfg;
+ unsigned int idr_cfg;
+
+ mutex_lock(&priv->lock);
+
+ mode_cfg = priv->count_mode[id] << 1;
+ idr_cfg = priv->index_polarity[id] << 1;
if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
*quadrature_mode = 0;
@@ -729,6 +803,8 @@ static int quad8_function_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -845,15 +921,20 @@ static int quad8_index_polarity_set(struct counter_device *counter,
{
struct quad8_iio *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
- const unsigned int idr_cfg = priv->synchronous_mode[channel_id] |
- index_polarity << 1;
const int base_offset = priv->base + 2 * channel_id + 1;
+ unsigned int idr_cfg = index_polarity << 1;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->synchronous_mode[channel_id];
priv->index_polarity[channel_id] = index_polarity;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -880,19 +961,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
{
struct quad8_iio *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
- const unsigned int idr_cfg = synchronous_mode |
- priv->index_polarity[channel_id] << 1;
const int base_offset = priv->base + 2 * channel_id + 1;
+ unsigned int idr_cfg = synchronous_mode;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->index_polarity[channel_id] << 1;
/* Index function must be non-synchronous in non-quadrature mode */
- if (synchronous_mode && !priv->quadrature_mode[channel_id])
+ if (synchronous_mode && !priv->quadrature_mode[channel_id]) {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
priv->synchronous_mode[channel_id] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -957,6 +1045,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
break;
}
+ mutex_lock(&priv->lock);
+
priv->count_mode[count->id] = cnt_mode;
/* Set count mode configuration value */
@@ -969,6 +1059,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -1010,6 +1102,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
if (err)
return err;
+ mutex_lock(&priv->lock);
+
priv->ab_enable[count->id] = ab_enable;
ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
@@ -1017,6 +1111,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
/* Load I/O control configuration */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -1045,14 +1141,28 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter,
return sprintf(buf, "%u\n", priv->preset[count->id]);
}
+static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id,
+ unsigned int preset)
+{
+ const unsigned int base_offset = quad8iio->base + 2 * id;
+ int i;
+
+ quad8iio->preset[id] = preset;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+}
+
static ssize_t quad8_count_preset_write(struct counter_device *counter,
struct counter_count *count, void *private, const char *buf, size_t len)
{
struct quad8_iio *const priv = counter->priv;
- const int base_offset = priv->base + 2 * count->id;
unsigned int preset;
int ret;
- int i;
ret = kstrtouint(buf, 0, &preset);
if (ret)
@@ -1062,14 +1172,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
if (preset > 0xFFFFFF)
return -EINVAL;
- priv->preset[count->id] = preset;
+ mutex_lock(&priv->lock);
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ quad8_preset_register_set(priv, count->id, preset);
- /* Set Preset Register */
- for (i = 0; i < 3; i++)
- outb(preset >> (8 * i), base_offset);
+ mutex_unlock(&priv->lock);
return len;
}
@@ -1077,15 +1184,20 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, void *private, char *buf)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
+
+ mutex_lock(&priv->lock);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
- return quad8_count_preset_read(counter, count, private, buf);
+ mutex_unlock(&priv->lock);
+ return sprintf(buf, "%u\n", priv->preset[count->id]);
}
+ mutex_unlock(&priv->lock);
+
/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
return sprintf(buf, "33554431\n");
}
@@ -1094,15 +1206,29 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, void *private, const char *buf, size_t len)
{
struct quad8_iio *const priv = counter->priv;
+ unsigned int ceiling;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &ceiling);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (ceiling > 0xFFFFFF)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
- return quad8_count_preset_write(counter, count, private, buf,
- len);
+ quad8_preset_register_set(priv, count->id, ceiling);
+ break;
}
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -1130,6 +1256,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
/* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable;
+ mutex_lock(&priv->lock);
+
priv->preset_enable[count->id] = preset_enable;
ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
@@ -1137,9 +1265,124 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
/* Load I/O control configuration to Input / Output Control Register */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return len;
}
+static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id / 2;
+ const bool disabled = !(priv->cable_fault_enable & BIT(channel_id));
+ unsigned int status;
+ unsigned int fault;
+
+ if (disabled)
+ return -EINVAL;
+
+ /* Logic 0 = cable fault */
+ status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+
+ /* Mask respective channel and invert logic */
+ fault = !(status & BIT(channel_id));
+
+ return sprintf(buf, "%u\n", fault);
+}
+
+static ssize_t quad8_signal_cable_fault_enable_read(
+ struct counter_device *counter, struct counter_signal *signal,
+ void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id / 2;
+ const unsigned int enb = !!(priv->cable_fault_enable & BIT(channel_id));
+
+ return sprintf(buf, "%u\n", enb);
+}
+
+static ssize_t quad8_signal_cable_fault_enable_write(
+ struct counter_device *counter, struct counter_signal *signal,
+ void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id / 2;
+ bool enable;
+ int ret;
+ unsigned int cable_fault_enable;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret)
+ return ret;
+
+ if (enable)
+ priv->cable_fault_enable |= BIT(channel_id);
+ else
+ priv->cable_fault_enable &= ~BIT(channel_id);
+
+ /* Enable is active low in Differential Encoder Cable Status register */
+ cable_fault_enable = ~priv->cable_fault_enable;
+
+ outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+
+ return len;
+}
+
+static ssize_t quad8_signal_fck_prescaler_read(struct counter_device *counter,
+ struct counter_signal *signal, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id / 2;
+
+ return sprintf(buf, "%u\n", priv->fck_prescaler[channel_id]);
+}
+
+static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
+ struct counter_signal *signal, void *private, const char *buf,
+ size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id / 2;
+ const int base_offset = priv->base + 2 * channel_id;
+ u8 prescaler;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &prescaler);
+ if (ret)
+ return ret;
+
+ priv->fck_prescaler[channel_id] = prescaler;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set filter clock factor */
+ outb(prescaler, base_offset);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
+ base_offset + 1);
+
+ return len;
+}
+
+static const struct counter_signal_ext quad8_signal_ext[] = {
+ {
+ .name = "cable_fault",
+ .read = quad8_signal_cable_fault_read
+ },
+ {
+ .name = "cable_fault_enable",
+ .read = quad8_signal_cable_fault_enable_read,
+ .write = quad8_signal_cable_fault_enable_write
+ },
+ {
+ .name = "filter_clock_prescaler",
+ .read = quad8_signal_fck_prescaler_read,
+ .write = quad8_signal_fck_prescaler_write
+ }
+};
+
static const struct counter_signal_ext quad8_index_ext[] = {
COUNTER_SIGNAL_ENUM("index_polarity", &quad8_index_pol_enum),
COUNTER_SIGNAL_ENUM_AVAILABLE("index_polarity", &quad8_index_pol_enum),
@@ -1147,9 +1390,11 @@ static const struct counter_signal_ext quad8_index_ext[] = {
COUNTER_SIGNAL_ENUM_AVAILABLE("synchronous_mode", &quad8_syn_mode_enum)
};
-#define QUAD8_QUAD_SIGNAL(_id, _name) { \
- .id = (_id), \
- .name = (_name) \
+#define QUAD8_QUAD_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+ .ext = quad8_signal_ext, \
+ .num_ext = ARRAY_SIZE(quad8_signal_ext) \
}
#define QUAD8_INDEX_SIGNAL(_id, _name) { \
@@ -1307,6 +1552,9 @@ static int quad8_probe(struct device *dev, unsigned int id)
quad8iio->counter.priv = quad8iio;
quad8iio->base = base[id];
+ /* Initialize mutex */
+ mutex_init(&quad8iio->lock);
+
/* Reset all counters and disable interrupt function */
outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
/* Set initial configuration for all counters */
@@ -1314,6 +1562,12 @@ static int quad8_probe(struct device *dev, unsigned int id)
base_offset = base[id] + 2 * i;
/* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ /* Reset filter clock factor */
+ outb(0, base_offset);
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
+ base_offset + 1);
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Reset Preset Register */
for (j = 0; j < 3; j++)
outb(0x00, base_offset);
@@ -1328,6 +1582,8 @@ static int quad8_probe(struct device *dev, unsigned int id)
/* Disable index function; negative index polarity */
outb(QUAD8_CTR_IDR, base_offset + 1);
}
+ /* Disable Differential Encoder Cable Status for all channels */
+ outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS);
/* Enable all counters */
outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 3eafccec3beb..ef2a974a2f10 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -8,10 +8,10 @@
*
*/
#include <linux/counter.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/types.h>
#include <linux/mfd/stm32-timers.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#define TIM_CCMR_CCXS (BIT(8) | BIT(0))
@@ -20,11 +20,20 @@
#define TIM_CCER_MASK (TIM_CCER_CC1P | TIM_CCER_CC1NP | \
TIM_CCER_CC2P | TIM_CCER_CC2NP)
+struct stm32_timer_regs {
+ u32 cr1;
+ u32 cnt;
+ u32 smcr;
+ u32 arr;
+};
+
struct stm32_timer_cnt {
struct counter_device counter;
struct regmap *regmap;
struct clk *clk;
u32 ceiling;
+ bool enabled;
+ struct stm32_timer_regs bak;
};
/**
@@ -224,6 +233,9 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter,
clk_disable(priv->clk);
}
+ /* Keep enabled state to properly handle low power states */
+ priv->enabled = enable;
+
return len;
}
@@ -358,10 +370,59 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
priv->counter.num_signals = ARRAY_SIZE(stm32_signals);
priv->counter.priv = priv;
+ platform_set_drvdata(pdev, priv);
+
/* Register Counter device */
return devm_counter_register(dev, &priv->counter);
}
+static int __maybe_unused stm32_timer_cnt_suspend(struct device *dev)
+{
+ struct stm32_timer_cnt *priv = dev_get_drvdata(dev);
+
+ /* Only take care of enabled counter: don't disturb other MFD child */
+ if (priv->enabled) {
+ /* Backup registers that may get lost in low power mode */
+ regmap_read(priv->regmap, TIM_SMCR, &priv->bak.smcr);
+ regmap_read(priv->regmap, TIM_ARR, &priv->bak.arr);
+ regmap_read(priv->regmap, TIM_CNT, &priv->bak.cnt);
+ regmap_read(priv->regmap, TIM_CR1, &priv->bak.cr1);
+
+ /* Disable the counter */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ clk_disable(priv->clk);
+ }
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused stm32_timer_cnt_resume(struct device *dev)
+{
+ struct stm32_timer_cnt *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ if (priv->enabled) {
+ clk_enable(priv->clk);
+
+ /* Restore registers that may have been lost */
+ regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr);
+ regmap_write(priv->regmap, TIM_ARR, priv->bak.arr);
+ regmap_write(priv->regmap, TIM_CNT, priv->bak.cnt);
+
+ /* Also re-enables the counter */
+ regmap_write(priv->regmap, TIM_CR1, priv->bak.cr1);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_timer_cnt_pm_ops, stm32_timer_cnt_suspend,
+ stm32_timer_cnt_resume);
+
static const struct of_device_id stm32_timer_cnt_of_match[] = {
{ .compatible = "st,stm32-timer-counter", },
{},
@@ -373,6 +434,7 @@ static struct platform_driver stm32_timer_cnt_driver = {
.driver = {
.name = "stm32-timer-counter",
.of_match_table = stm32_timer_cnt_of_match,
+ .pm = &stm32_timer_cnt_pm_ops,
},
};
module_platform_driver(stm32_timer_cnt_driver);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index bff5295016ae..c3e6bd59e920 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -37,10 +37,12 @@ config CPU_FREQ_STAT
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if BIG_LITTLE
+ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
help
This option sets which CPUFreq governor shall be loaded at
- startup. If in doubt, select 'performance'.
+ startup. If in doubt, use the default setting.
config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
bool "performance"
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 3858d86cf409..15c1a1231516 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -128,7 +128,7 @@ config ARM_OMAP2PLUS_CPUFREQ
config ARM_QCOM_CPUFREQ_NVMEM
tristate "Qualcomm nvmem based CPUFreq"
- depends on ARM64
+ depends on ARCH_QCOM
depends on QCOM_QFPROM
depends on QCOM_SMEM
select PM_OPP
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index a6528388952e..bc58a0809d11 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -8,6 +8,8 @@ config X86_INTEL_PSTATE
depends on X86
select ACPI_PROCESSOR if ACPI
select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
+ select CPU_FREQ_GOV_PERFORMANCE
+ select CPU_FREQ_GOV_SCHEDUTIL if SMP
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
@@ -25,7 +27,7 @@ config X86_PCC_CPUFREQ
This driver adds support for the PCC interface.
For details, take a look at:
- <file:Documentation/cpu-freq/pcc-cpufreq.txt>.
+ <file:Documentation/admin-guide/pm/cpufreq_drivers.rst>.
To compile this driver as a module, choose M here: the
module will be called pcc-cpufreq.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d6f7df33ab8c..289e8ce3fd13 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -30,6 +30,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
@@ -991,8 +992,8 @@ late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
static const struct x86_cpu_id acpi_cpufreq_ids[] = {
- X86_FEATURE_MATCH(X86_FEATURE_ACPI),
- X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
+ X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
+ X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index e2df9d112106..f7c4206d4c90 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -18,6 +18,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
#include "cpufreq_ondemand.h"
@@ -144,7 +145,7 @@ static void __exit amd_freq_sensitivity_exit(void)
module_exit(amd_freq_sensitivity_exit);
static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
- X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK),
+ X86_MATCH_FEATURE(X86_FEATURE_PROC_FEEDBACK, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index f2ae9cd455c1..cb9db16bea61 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -141,6 +141,11 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap3", },
+ { .compatible = "qcom,ipq8064", },
+ { .compatible = "qcom,apq8064", },
+ { .compatible = "qcom,msm8974", },
+ { .compatible = "qcom,msm8960", },
+
{ }
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index d2b5f062a07b..26fe8dfb9ce6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -363,6 +363,10 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
dt_cpufreq_driver.resume = data->resume;
if (data->suspend)
dt_cpufreq_driver.suspend = data->suspend;
+ if (data->get_intermediate) {
+ dt_cpufreq_driver.target_intermediate = data->target_intermediate;
+ dt_cpufreq_driver.get_intermediate = data->get_intermediate;
+ }
}
ret = cpufreq_register_driver(&dt_cpufreq_driver);
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
index a5a45b547d0b..28c8af7ec5ef 100644
--- a/drivers/cpufreq/cpufreq-dt.h
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -14,6 +14,10 @@ struct cpufreq_policy;
struct cpufreq_dt_platform_data {
bool have_governor_per_policy;
+ unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+ int (*target_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 808874bccf4a..045f9fe157ce 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1733,6 +1733,26 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
+/**
+ * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
+ * @cpu: CPU number
+ *
+ * The default return value is the max_freq field of cpuinfo.
+ */
+__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
+
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
if (unlikely(policy_is_inactive(policy)))
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index f9bcf0f3ea30..94d959a8e954 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -90,35 +90,35 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled)
return 0;
- len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
- len += snprintf(buf + len, PAGE_SIZE - len, " : ");
+ len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
stats->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
stats->trans_table[i*stats->max_state+j]);
}
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
}
if (len >= PAGE_SIZE) {
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 45c18c6b8081..776a58bab0ff 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -385,7 +385,7 @@ static struct cpufreq_driver eps_driver = {
/* This driver will work only on Centaur C7 processors with
* Enhanced SpeedStep/PowerSaver registers */
static const struct x86_cpu_id eps_cpu_id[] = {
- { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_EST },
+ X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_EST, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, eps_cpu_id);
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 2242541f7ae3..4ce5eb35dc46 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -198,7 +198,7 @@ static struct cpufreq_driver elanfreq_driver = {
};
static const struct x86_cpu_id elan_id[] = {
- { X86_VENDOR_AMD, 4, 10, },
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 10, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, elan_id);
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 6cb8193421ea..de206d2745fe 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -19,6 +19,8 @@
#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
+#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT 5
+#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 5)
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
@@ -31,6 +33,9 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
int speed_grade, mkt_segment;
int ret;
+ if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
+ return -ENODEV;
+
ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value);
if (ret)
return ret;
@@ -42,7 +47,13 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
else
speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
- mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+
+ if (of_machine_is_compatible("fsl,imx8mp"))
+ mkt_segment = (cell_value & IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK)
+ >> IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+ else
+ mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK)
+ >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
* Early samples without fuses written report "0 0" which may NOT
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 648a09a1778a..fdb2ffffbd15 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -216,31 +216,41 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
#define OCOTP_CFG3_SPEED_996MHZ 0x2
#define OCOTP_CFG3_SPEED_852MHZ 0x1
-static void imx6q_opp_check_speed_grading(struct device *dev)
+static int imx6q_opp_check_speed_grading(struct device *dev)
{
struct device_node *np;
void __iomem *base;
u32 val;
+ int ret;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
- if (!np)
- return;
+ if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
+ ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ if (ret)
+ return ret;
+ } else {
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+ if (!np)
+ return -ENOENT;
- base = of_iomap(np, 0);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- goto put_node;
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ return -EFAULT;
+ }
+
+ /*
+ * SPEED_GRADING[1:0] defines the max speed of ARM:
+ * 2b'11: 1200000000Hz;
+ * 2b'10: 996000000Hz;
+ * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
+ * 2b'00: 792000000Hz;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ iounmap(base);
}
- /*
- * SPEED_GRADING[1:0] defines the max speed of ARM:
- * 2b'11: 1200000000Hz;
- * 2b'10: 996000000Hz;
- * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
- * 2b'00: 792000000Hz;
- * We need to set the max speed of ARM according to fuse map.
- */
- val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
@@ -257,9 +267,8 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
if (dev_pm_opp_disable(dev, 1200000000))
dev_warn(dev, "failed to disable 1.2GHz OPP\n");
}
- iounmap(base);
-put_node:
- of_node_put(np);
+
+ return 0;
}
#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
@@ -281,6 +290,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "fsl,imx6ull-ocotp");
+ if (!np)
return -ENOENT;
base = of_iomap(np, 0);
@@ -378,23 +390,22 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
+ /* Because we have added the OPPs here, we must free them */
+ free_opp = true;
+
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
ret = imx6ul_opp_check_speed_grading(cpu_dev);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- goto put_node;
-
+ } else {
+ ret = imx6q_opp_check_speed_grading(cpu_dev);
+ }
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
- goto put_node;
- }
- } else {
- imx6q_opp_check_speed_grading(cpu_dev);
+ goto out_free_opp;
}
- /* Because we have added the OPPs here, we must free them */
- free_opp = true;
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c81e1ff29069..4d3429b2058f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -922,6 +922,7 @@ static void intel_pstate_update_limits(unsigned int cpu)
*/
if (global.turbo_disabled_mf != global.turbo_disabled) {
global.turbo_disabled_mf = global.turbo_disabled;
+ arch_set_max_freq_ratio(global.turbo_disabled);
for_each_possible_cpu(cpu)
intel_pstate_update_max_freq(cpu);
} else {
@@ -1058,7 +1059,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
update_turbo_state();
if (global.turbo_disabled) {
- pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock);
return -EPERM;
@@ -1908,51 +1909,51 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-#define ICPU(model, policy) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
- (unsigned long)&policy }
+#define X86_MATCH(model, policy) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ X86_FEATURE_APERFMPERF, &policy)
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
- ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs),
- ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
- ICPU(INTEL_FAM6_HASWELL, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL, core_funcs),
- ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs),
- ICPU(INTEL_FAM6_HASWELL_X, core_funcs),
- ICPU(INTEL_FAM6_HASWELL_L, core_funcs),
- ICPU(INTEL_FAM6_HASWELL_G, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL_G, core_funcs),
- ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs),
- ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
- ICPU(INTEL_FAM6_SKYLAKE, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL_D, core_funcs),
- ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
- ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs),
- ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
+ X86_MATCH(SANDYBRIDGE, core_funcs),
+ X86_MATCH(SANDYBRIDGE_X, core_funcs),
+ X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
+ X86_MATCH(IVYBRIDGE, core_funcs),
+ X86_MATCH(HASWELL, core_funcs),
+ X86_MATCH(BROADWELL, core_funcs),
+ X86_MATCH(IVYBRIDGE_X, core_funcs),
+ X86_MATCH(HASWELL_X, core_funcs),
+ X86_MATCH(HASWELL_L, core_funcs),
+ X86_MATCH(HASWELL_G, core_funcs),
+ X86_MATCH(BROADWELL_G, core_funcs),
+ X86_MATCH(ATOM_AIRMONT, airmont_funcs),
+ X86_MATCH(SKYLAKE_L, core_funcs),
+ X86_MATCH(BROADWELL_X, core_funcs),
+ X86_MATCH(SKYLAKE, core_funcs),
+ X86_MATCH(BROADWELL_D, core_funcs),
+ X86_MATCH(XEON_PHI_KNL, knl_funcs),
+ X86_MATCH(XEON_PHI_KNM, knl_funcs),
+ X86_MATCH(ATOM_GOLDMONT, core_funcs),
+ X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
+ X86_MATCH(SKYLAKE_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
- ICPU(INTEL_FAM6_BROADWELL_D, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
- ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
+ X86_MATCH(BROADWELL_D, core_funcs),
+ X86_MATCH(BROADWELL_X, core_funcs),
+ X86_MATCH(SKYLAKE_X, core_funcs),
{}
};
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
- ICPU(INTEL_FAM6_KABYLAKE, core_funcs),
+ X86_MATCH(KABYLAKE, core_funcs),
{}
};
static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
- ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
- ICPU(INTEL_FAM6_SKYLAKE, core_funcs),
+ X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(SKYLAKE, core_funcs),
{}
};
@@ -2155,15 +2156,19 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
}
}
-static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
+static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
+ struct cpufreq_policy_data *policy)
{
- struct cpudata *cpu = all_cpu_data[policy->cpu];
-
update_turbo_state();
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
intel_pstate_adjust_policy_max(cpu, policy);
+}
+
+static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
+{
+ intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
return 0;
}
@@ -2243,10 +2248,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ /*
+ * Set the policy to powersave to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
return 0;
}
@@ -2268,12 +2274,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- update_turbo_state();
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- intel_pstate_get_max_freq(cpu));
-
- intel_pstate_adjust_policy_max(cpu, policy);
-
+ intel_pstate_verify_cpu_policy(cpu, policy);
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
return 0;
@@ -2725,13 +2726,14 @@ static inline void intel_pstate_request_control_from_smm(void) {}
#define INTEL_PSTATE_HWP_BROADWELL 0x01
-#define ICPU_HWP(model, hwp_mode) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+#define X86_MATCH_HWP(model, hwp_mode) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ X86_FEATURE_HWP, hwp_mode)
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
- ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
- ICPU_HWP(INTEL_FAM6_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
- ICPU_HWP(X86_MODEL_ANY, 0),
+ X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(ANY, 0),
{}
};
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 92d92e67ae0a..123fb006810d 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -910,7 +910,7 @@ static struct cpufreq_driver longhaul_driver = {
};
static const struct x86_cpu_id longhaul_id[] = {
- { X86_VENDOR_CENTAUR, 6 },
+ X86_MATCH_VENDOR_FAM(CENTAUR, 6, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, longhaul_id);
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 0b08be8bff76..1caaec7c280b 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -281,8 +281,7 @@ static struct cpufreq_driver longrun_driver = {
};
static const struct x86_cpu_id longrun_ids[] = {
- { X86_VENDOR_TRANSMETA, X86_FAMILY_ANY, X86_MODEL_ANY,
- X86_FEATURE_LONGRUN },
+ X86_MATCH_VENDOR_FEATURE(TRANSMETA, X86_FEATURE_LONGRUN, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, longrun_ids);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index efc0b46efada..bb61677c11c7 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -231,7 +231,7 @@ static struct cpufreq_driver p4clockmod_driver = {
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ACC },
+ X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_ACC, NULL),
{}
};
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 0196f8129597..41eefef95d87 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -258,8 +258,8 @@ static struct cpufreq_driver powernow_k6_driver = {
};
static const struct x86_cpu_id powernow_k6_ids[] = {
- { X86_VENDOR_AMD, 5, 12 },
- { X86_VENDOR_AMD, 5, 13 },
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 12, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 13, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, powernow_k6_ids);
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 5e5171d3eece..5d515fc34836 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -109,7 +109,7 @@ static int check_fsb(unsigned int fsbspeed)
}
static const struct x86_cpu_id powernow_k7_cpuids[] = {
- { X86_VENDOR_AMD, 6, },
+ X86_MATCH_VENDOR_FAM(AMD, 6, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, powernow_k7_cpuids);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 2db2f1739e09..3984959eed1d 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -452,7 +452,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
static const struct x86_cpu_id powernow_k8_ids[] = {
/* IO based frequency switching */
- { X86_VENDOR_AMD, 0xf },
+ X86_MATCH_VENDOR_FAM(AMD, 0xf, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 56f4bc0d209e..8646eb197cd9 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -902,6 +902,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
+ struct cpufreq_policy *policy;
unsigned int cpu;
cpumask_t mask;
@@ -916,12 +917,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
chip->restore = false;
for_each_cpu(cpu, &mask) {
int index;
- struct cpufreq_policy policy;
- cpufreq_get_policy(&policy, cpu);
- index = cpufreq_table_find_index_c(&policy, policy.cur);
- powernv_cpufreq_target_index(&policy, index);
- cpumask_andnot(&mask, &mask, policy.cpus);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ index = cpufreq_table_find_index_c(policy, policy->cur);
+ powernv_cpufreq_target_index(policy, index);
+ cpumask_andnot(&mask, &mask, policy->cpus);
+ cpufreq_cpu_put(policy);
}
out:
put_online_cpus();
@@ -1080,6 +1083,12 @@ free_and_return:
static inline void clean_chip_info(void)
{
+ int i;
+
+ /* flush any pending work items */
+ if (chips)
+ for (i = 0; i < nr_chips; i++)
+ cancel_work_sync(&chips[i].throttle);
kfree(chips);
}
@@ -1108,9 +1117,6 @@ static int __init powernv_cpufreq_init(void)
if (rc)
goto out;
- register_reboot_notifier(&powernv_cpufreq_reboot_nb);
- opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
-
if (powernv_pstate_info.wof_enabled)
powernv_cpufreq_driver.boost_enabled = true;
else
@@ -1119,15 +1125,17 @@ static int __init powernv_cpufreq_init(void)
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
pr_info("Failed to register the cpufreq driver (%d)\n", rc);
- goto cleanup_notifiers;
+ goto cleanup;
}
if (powernv_pstate_info.wof_enabled)
cpufreq_enable_boost_support();
+ register_reboot_notifier(&powernv_cpufreq_reboot_nb);
+ opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
+
return 0;
-cleanup_notifiers:
- unregister_all_notifiers();
+cleanup:
clean_chip_info();
out:
pr_info("Platform driver disabled. System does not support PState control\n");
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index f0d2d5035413..a1b8238872a2 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -49,12 +49,14 @@ struct qcom_cpufreq_drv;
struct qcom_cpufreq_match_data {
int (*get_version)(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
struct qcom_cpufreq_drv *drv);
const char **genpd_names;
};
struct qcom_cpufreq_drv {
- struct opp_table **opp_tables;
+ struct opp_table **names_opp_tables;
+ struct opp_table **hw_opp_tables;
struct opp_table **genpd_opp_tables;
u32 versions;
const struct qcom_cpufreq_match_data *data;
@@ -62,6 +64,84 @@ struct qcom_cpufreq_drv {
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+static void get_krait_bin_format_a(struct device *cpu_dev,
+ int *speed, int *pvs, int *pvs_ver,
+ struct nvmem_cell *pvs_nvmem, u8 *buf)
+{
+ u32 pte_efuse;
+
+ pte_efuse = *((u32 *)buf);
+
+ *speed = pte_efuse & 0xf;
+ if (*speed == 0xf)
+ *speed = (pte_efuse >> 4) & 0xf;
+
+ if (*speed == 0xf) {
+ *speed = 0;
+ dev_warn(cpu_dev, "Speed bin: Defaulting to %d\n", *speed);
+ } else {
+ dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
+ }
+
+ *pvs = (pte_efuse >> 10) & 0x7;
+ if (*pvs == 0x7)
+ *pvs = (pte_efuse >> 13) & 0x7;
+
+ if (*pvs == 0x7) {
+ *pvs = 0;
+ dev_warn(cpu_dev, "PVS bin: Defaulting to %d\n", *pvs);
+ } else {
+ dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
+ }
+}
+
+static void get_krait_bin_format_b(struct device *cpu_dev,
+ int *speed, int *pvs, int *pvs_ver,
+ struct nvmem_cell *pvs_nvmem, u8 *buf)
+{
+ u32 pte_efuse, redundant_sel;
+
+ pte_efuse = *((u32 *)buf);
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+
+ *pvs_ver = (pte_efuse >> 4) & 0x3;
+
+ switch (redundant_sel) {
+ case 1:
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *speed = (pte_efuse >> 27) & 0xf;
+ break;
+ case 2:
+ *pvs = (pte_efuse >> 27) & 0xf;
+ *speed = pte_efuse & 0x7;
+ break;
+ default:
+ /* 4 bits of PVS are in efuse register bits 31, 8-6. */
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *speed = pte_efuse & 0x7;
+ }
+
+ /* Check SPEED_BIN_BLOW_STATUS */
+ if (pte_efuse & BIT(3)) {
+ dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
+ } else {
+ dev_warn(cpu_dev, "Speed bin not set. Defaulting to 0!\n");
+ *speed = 0;
+ }
+
+ /* Check PVS_BLOW_STATUS */
+ pte_efuse = *(((u32 *)buf) + 4);
+ pte_efuse &= BIT(21);
+ if (pte_efuse) {
+ dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
+ } else {
+ dev_warn(cpu_dev, "PVS bin not set. Defaulting to 0!\n");
+ *pvs = 0;
+ }
+
+ dev_dbg(cpu_dev, "PVS version: %d\n", *pvs_ver);
+}
+
static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
{
size_t len;
@@ -93,11 +173,13 @@ static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
struct qcom_cpufreq_drv *drv)
{
size_t len;
u8 *speedbin;
enum _msm8996_version msm8996_version;
+ *pvs_name = NULL;
msm8996_version = qcom_cpufreq_get_msm_id();
if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -125,10 +207,51 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
return 0;
}
+static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ int speed = 0, pvs = 0, pvs_ver = 0;
+ u8 *speedbin;
+ size_t len;
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ switch (len) {
+ case 4:
+ get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
+ speedbin_nvmem, speedbin);
+ break;
+ case 8:
+ get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
+ speedbin_nvmem, speedbin);
+ break;
+ default:
+ dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
+ return -ENODEV;
+ }
+
+ snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
+ speed, pvs, pvs_ver);
+
+ drv->versions = (1 << speed);
+
+ kfree(speedbin);
+ return 0;
+}
+
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
+static const struct qcom_cpufreq_match_data match_data_krait = {
+ .get_version = qcom_cpufreq_krait_name_version,
+};
+
static const char *qcs404_genpd_names[] = { "cpr", NULL };
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
@@ -141,6 +264,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
struct device *cpu_dev;
+ char *pvs_name = "speedXX-pvsXX-vXX";
unsigned cpu;
const struct of_device_id *match;
int ret;
@@ -153,7 +277,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
+ ret = of_device_is_compatible(np, "operating-points-v2-qcom-cpu");
if (!ret) {
of_node_put(np);
return -ENOENT;
@@ -181,7 +305,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
goto free_drv;
}
- ret = drv->data->get_version(cpu_dev, speedbin_nvmem, drv);
+ ret = drv->data->get_version(cpu_dev,
+ speedbin_nvmem, &pvs_name, drv);
if (ret) {
nvmem_cell_put(speedbin_nvmem);
goto free_drv;
@@ -190,12 +315,20 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
- drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables),
+ drv->names_opp_tables = kcalloc(num_possible_cpus(),
+ sizeof(*drv->names_opp_tables),
GFP_KERNEL);
- if (!drv->opp_tables) {
+ if (!drv->names_opp_tables) {
ret = -ENOMEM;
goto free_drv;
}
+ drv->hw_opp_tables = kcalloc(num_possible_cpus(),
+ sizeof(*drv->hw_opp_tables),
+ GFP_KERNEL);
+ if (!drv->hw_opp_tables) {
+ ret = -ENOMEM;
+ goto free_opp_names;
+ }
drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
sizeof(*drv->genpd_opp_tables),
@@ -213,11 +346,23 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
if (drv->data->get_version) {
- drv->opp_tables[cpu] =
- dev_pm_opp_set_supported_hw(cpu_dev,
- &drv->versions, 1);
- if (IS_ERR(drv->opp_tables[cpu])) {
- ret = PTR_ERR(drv->opp_tables[cpu]);
+
+ if (pvs_name) {
+ drv->names_opp_tables[cpu] = dev_pm_opp_set_prop_name(
+ cpu_dev,
+ pvs_name);
+ if (IS_ERR(drv->names_opp_tables[cpu])) {
+ ret = PTR_ERR(drv->names_opp_tables[cpu]);
+ dev_err(cpu_dev, "Failed to add OPP name %s\n",
+ pvs_name);
+ goto free_opp;
+ }
+ }
+
+ drv->hw_opp_tables[cpu] = dev_pm_opp_set_supported_hw(
+ cpu_dev, &drv->versions, 1);
+ if (IS_ERR(drv->hw_opp_tables[cpu])) {
+ ret = PTR_ERR(drv->hw_opp_tables[cpu]);
dev_err(cpu_dev,
"Failed to set supported hardware\n");
goto free_genpd_opp;
@@ -259,11 +404,18 @@ free_genpd_opp:
kfree(drv->genpd_opp_tables);
free_opp:
for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(drv->opp_tables[cpu]))
+ if (IS_ERR_OR_NULL(drv->names_opp_tables[cpu]))
+ break;
+ dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
+ }
+ for_each_possible_cpu(cpu) {
+ if (IS_ERR_OR_NULL(drv->hw_opp_tables[cpu]))
break;
- dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
+ dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
}
- kfree(drv->opp_tables);
+ kfree(drv->hw_opp_tables);
+free_opp_names:
+ kfree(drv->names_opp_tables);
free_drv:
kfree(drv);
@@ -278,13 +430,16 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu) {
- if (drv->opp_tables[cpu])
- dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
+ if (drv->names_opp_tables[cpu])
+ dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
+ if (drv->hw_opp_tables[cpu])
+ dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
if (drv->genpd_opp_tables[cpu])
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
}
- kfree(drv->opp_tables);
+ kfree(drv->names_opp_tables);
+ kfree(drv->hw_opp_tables);
kfree(drv->genpd_opp_tables);
kfree(drv);
@@ -303,6 +458,10 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
+ { .compatible = "qcom,ipq8064", .data = &match_data_krait },
+ { .compatible = "qcom,apq8064", .data = &match_data_krait },
+ { .compatible = "qcom,msm8974", .data = &match_data_krait },
+ { .compatible = "qcom,msm8960", .data = &match_data_krait },
{},
};
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index c6f647babaad..73a208559fe2 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -95,7 +95,7 @@ static struct cpufreq_driver sc520_freq_driver = {
};
static const struct x86_cpu_id sc520_ids[] = {
- { X86_VENDOR_AMD, 4, 9 },
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 9, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, sc520_ids);
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index b49f494e0285..75b10ecdb60f 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -520,18 +520,12 @@ static struct cpufreq_driver centrino_driver = {
* or ASCII model IDs.
*/
static const struct x86_cpu_id centrino_ids[] = {
- { X86_VENDOR_INTEL, 6, 9, X86_FEATURE_EST },
- { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
- { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
- { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
- { X86_VENDOR_INTEL, 15, 3, X86_FEATURE_EST },
- { X86_VENDOR_INTEL, 15, 4, X86_FEATURE_EST },
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
{}
};
-#if 0
-/* Autoload or not? Do not for now. */
-MODULE_DEVICE_TABLE(x86cpu, centrino_ids);
-#endif
/**
* centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 547fd7af5bf5..f2076d72bf39 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -319,15 +319,11 @@ static struct cpufreq_driver speedstep_driver = {
};
static const struct x86_cpu_id ss_smi_ids[] = {
- { X86_VENDOR_INTEL, 6, 0xb, },
- { X86_VENDOR_INTEL, 6, 0x8, },
- { X86_VENDOR_INTEL, 15, 2 },
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0),
{}
};
-#if 0
-/* Autoload or not? Do not for now. */
-MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids);
-#endif
/**
* speedstep_init - initializes the SpeedStep CPUFreq driver
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index eeb31bc21cc9..0ce9d4b6dfcc 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -299,15 +299,11 @@ static struct cpufreq_driver speedstep_driver = {
};
static const struct x86_cpu_id ss_smi_ids[] = {
- { X86_VENDOR_INTEL, 6, 0xb, },
- { X86_VENDOR_INTEL, 6, 0x8, },
- { X86_VENDOR_INTEL, 15, 2 },
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0),
{}
};
-#if 0
-/* Not auto loaded currently */
-MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids);
-#endif
/**
* speedstep_init - initializes the SpeedStep CPUFreq driver
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 557cb513bf7f..ab0de27539ad 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -25,11 +25,14 @@
#define DRA7_EFUSE_HAS_OD_MPU_OPP 11
#define DRA7_EFUSE_HAS_HIGH_MPU_OPP 15
+#define DRA76_EFUSE_HAS_PLUS_MPU_OPP 18
#define DRA7_EFUSE_HAS_ALL_MPU_OPP 23
+#define DRA76_EFUSE_HAS_ALL_MPU_OPP 24
#define DRA7_EFUSE_NOM_MPU_OPP BIT(0)
#define DRA7_EFUSE_OD_MPU_OPP BIT(1)
#define DRA7_EFUSE_HIGH_MPU_OPP BIT(2)
+#define DRA76_EFUSE_PLUS_MPU_OPP BIT(3)
#define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C
#define OMAP3_CONTROL_IDCODE 0x4830A204
@@ -80,6 +83,10 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
*/
switch (efuse) {
+ case DRA76_EFUSE_HAS_PLUS_MPU_OPP:
+ case DRA76_EFUSE_HAS_ALL_MPU_OPP:
+ calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP;
+ /* Fall through */
case DRA7_EFUSE_HAS_ALL_MPU_OPP:
case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 62272ecfa771..99a2d72ac02b 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -86,3 +86,11 @@ config ARM_MVEBU_V7_CPUIDLE
depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
+
+config ARM_TEGRA_CPUIDLE
+ bool "CPU Idle Driver for NVIDIA Tegra SoCs"
+ depends on ARCH_TEGRA && !ARM64
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
+ select ARM_CPU_SUSPEND
+ help
+ Select this to enable cpuidle for NVIDIA Tegra20/30/114/124 SoCs.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cc8c769d7fa9..55a464f6a78b 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o
cpuidle_psci-y := cpuidle-psci.o
cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
+obj-$(CONFIG_ARM_TEGRA_CPUIDLE) += cpuidle-tegra.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index b0ce9bc78113..fcc53215bac8 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -18,6 +18,10 @@
#include <linux/kvm_para.h>
#include <linux/cpuidle_haltpoll.h>
+static bool force __read_mostly;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Load unconditionally");
+
static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
static enum cpuhp_state haltpoll_hp_state;
@@ -90,6 +94,11 @@ static void haltpoll_uninit(void)
haltpoll_cpuidle_devices = NULL;
}
+static bool haltpoll_want(void)
+{
+ return kvm_para_has_hint(KVM_HINTS_REALTIME) || force;
+}
+
static int __init haltpoll_init(void)
{
int ret;
@@ -101,8 +110,7 @@ static int __init haltpoll_init(void)
cpuidle_poll_state_init(drv);
- if (!kvm_para_available() ||
- !kvm_para_has_hint(KVM_HINTS_REALTIME))
+ if (!kvm_para_available() || !haltpoll_want())
return -ENODEV;
ret = cpuidle_register_driver(drv);
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index edd7a54ef0d3..bae9140a65a5 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -160,6 +160,29 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
return 0;
}
+static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
+ struct psci_cpuidle_data *data,
+ unsigned int state_count, int cpu)
+{
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (!psci_has_osi_support())
+ return 0;
+
+ data->dev = psci_dt_attach_cpu(cpu);
+ if (IS_ERR_OR_NULL(data->dev))
+ return PTR_ERR_OR_ZERO(data->dev);
+
+ /*
+ * Using the deepest state for the CPU to trigger a potential selection
+ * of a shared state for the domain, assumes the domain states are all
+ * deeper states.
+ */
+ drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
+ psci_cpuidle_use_cpuhp = true;
+
+ return 0;
+}
+
static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
struct device_node *cpu_node,
unsigned int state_count, int cpu)
@@ -193,25 +216,10 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
goto free_mem;
}
- /* Currently limit the hierarchical topology to be used in OSI mode. */
- if (psci_has_osi_support()) {
- data->dev = psci_dt_attach_cpu(cpu);
- if (IS_ERR(data->dev)) {
- ret = PTR_ERR(data->dev);
- goto free_mem;
- }
-
- /*
- * Using the deepest state for the CPU to trigger a potential
- * selection of a shared state for the domain, assumes the
- * domain states are all deeper states.
- */
- if (data->dev) {
- drv->states[state_count - 1].enter =
- psci_enter_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
- }
- }
+ /* Initialize optional data, used for the hierarchical topology. */
+ ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
+ if (ret < 0)
+ goto free_mem;
/* Idle states parsed correctly, store them in the per-cpu struct. */
data->psci_states = psci_states;
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
new file mode 100644
index 000000000000..313b0290e97b
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU idle driver for Tegra CPUs
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2011 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ * Gary King <gking@nvidia.com>
+ *
+ * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com>
+ *
+ * Tegra20/124 driver unification by Dmitry Osipenko <digetx@gmail.com>
+ */
+
+#define pr_fmt(fmt) "tegra-cpuidle: " fmt
+
+#include <linux/atomic.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/clk/tegra.h>
+#include <linux/firmware/trusted_foundations.h>
+
+#include <soc/tegra/cpuidle.h>
+#include <soc/tegra/flowctrl.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/irq.h>
+#include <soc/tegra/pm.h>
+#include <soc/tegra/pmc.h>
+
+#include <asm/cpuidle.h>
+#include <asm/firmware.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+enum tegra_state {
+ TEGRA_C1,
+ TEGRA_C7,
+ TEGRA_CC6,
+ TEGRA_STATE_COUNT,
+};
+
+static atomic_t tegra_idle_barrier;
+static atomic_t tegra_abort_flag;
+
+static inline bool tegra_cpuidle_using_firmware(void)
+{
+ return firmware_ops->prepare_idle && firmware_ops->do_idle;
+}
+
+static void tegra_cpuidle_report_cpus_state(void)
+{
+ unsigned long cpu, lcpu, csr;
+
+ for_each_cpu(lcpu, cpu_possible_mask) {
+ cpu = cpu_logical_map(lcpu);
+ csr = flowctrl_read_cpu_csr(cpu);
+
+ pr_err("cpu%lu: online=%d flowctrl_csr=0x%08lx\n",
+ cpu, cpu_online(lcpu), csr);
+ }
+}
+
+static int tegra_cpuidle_wait_for_secondary_cpus_parking(void)
+{
+ unsigned int retries = 3;
+
+ while (retries--) {
+ unsigned int delay_us = 10;
+ unsigned int timeout_us = 500 * 1000 / delay_us;
+
+ /*
+ * The primary CPU0 core shall wait for the secondaries
+ * shutdown in order to power-off CPU's cluster safely.
+ * The timeout value depends on the current CPU frequency,
+ * it takes about 40-150us in average and over 1000us in
+ * a worst case scenario.
+ */
+ do {
+ if (tegra_cpu_rail_off_ready())
+ return 0;
+
+ udelay(delay_us);
+
+ } while (timeout_us--);
+
+ pr_err("secondary CPU taking too long to park\n");
+
+ tegra_cpuidle_report_cpus_state();
+ }
+
+ pr_err("timed out waiting secondaries to park\n");
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_cpuidle_unpark_secondary_cpus(void)
+{
+ unsigned int cpu, lcpu;
+
+ for_each_cpu(lcpu, cpu_online_mask) {
+ cpu = cpu_logical_map(lcpu);
+
+ if (cpu > 0) {
+ tegra_enable_cpu_clock(cpu);
+ tegra_cpu_out_of_reset(cpu);
+ flowctrl_write_cpu_halt(cpu, 0);
+ }
+ }
+}
+
+static int tegra_cpuidle_cc6_enter(unsigned int cpu)
+{
+ int ret;
+
+ if (cpu > 0) {
+ ret = cpu_suspend(cpu, tegra_pm_park_secondary_cpu);
+ } else {
+ ret = tegra_cpuidle_wait_for_secondary_cpus_parking();
+ if (!ret)
+ ret = tegra_pm_enter_lp2();
+
+ tegra_cpuidle_unpark_secondary_cpus();
+ }
+
+ return ret;
+}
+
+static int tegra_cpuidle_c7_enter(void)
+{
+ int err;
+
+ if (tegra_cpuidle_using_firmware()) {
+ err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
+ if (err)
+ return err;
+
+ return call_firmware_op(do_idle, 0);
+ }
+
+ return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
+}
+
+static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
+{
+ if (tegra_pending_sgi()) {
+ /*
+ * CPU got local interrupt that will be lost after GIC's
+ * shutdown because GIC driver doesn't save/restore the
+ * pending SGI state across CPU cluster PM. Abort and retry
+ * next time.
+ */
+ atomic_set(&tegra_abort_flag, 1);
+ }
+
+ cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier);
+
+ if (atomic_read(&tegra_abort_flag)) {
+ cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier);
+ atomic_set(&tegra_abort_flag, 0);
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ int index, unsigned int cpu)
+{
+ int ret;
+
+ /*
+ * CC6 state is the "CPU cluster power-off" state. In order to
+ * enter this state, at first the secondary CPU cores need to be
+ * parked into offline mode, then the last CPU should clean out
+ * remaining dirty cache lines into DRAM and trigger Flow Controller
+ * logic that turns off the cluster's power domain (which includes
+ * CPU cores, GIC and L2 cache).
+ */
+ if (index == TEGRA_CC6) {
+ ret = tegra_cpuidle_coupled_barrier(dev);
+ if (ret)
+ return ret;
+ }
+
+ local_fiq_disable();
+ tegra_pm_set_cpu_in_lp2();
+ cpu_pm_enter();
+
+ switch (index) {
+ case TEGRA_C7:
+ ret = tegra_cpuidle_c7_enter();
+ break;
+
+ case TEGRA_CC6:
+ ret = tegra_cpuidle_cc6_enter(cpu);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ cpu_pm_exit();
+ tegra_pm_clear_cpu_in_lp2();
+ local_fiq_enable();
+
+ return ret;
+}
+
+static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
+{
+ /*
+ * On Tegra30 CPU0 can't be power-gated separately from secondary
+ * cores because it gates the whole CPU cluster.
+ */
+ if (cpu > 0 || index != TEGRA_C7 || tegra_get_chip_id() != TEGRA30)
+ return index;
+
+ /* put CPU0 into C1 if C7 is requested and secondaries are online */
+ if (!IS_ENABLED(CONFIG_PM_SLEEP) || num_online_cpus() > 1)
+ index = TEGRA_C1;
+ else
+ index = TEGRA_CC6;
+
+ return index;
+}
+
+static int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned int cpu = cpu_logical_map(dev->cpu);
+ int err;
+
+ index = tegra_cpuidle_adjust_state_index(index, cpu);
+ if (dev->states_usage[index].disable)
+ return -1;
+
+ if (index == TEGRA_C1)
+ err = arm_cpuidle_simple_enter(dev, drv, index);
+ else
+ err = tegra_cpuidle_state_enter(dev, index, cpu);
+
+ if (err && (err != -EINTR || index != TEGRA_CC6))
+ pr_err_once("failed to enter state %d err: %d\n", index, err);
+
+ return err ? -1 : index;
+}
+
+static void tegra114_enter_s2idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ tegra_cpuidle_enter(dev, drv, index);
+}
+
+/*
+ * The previous versions of Tegra CPUIDLE driver used a different "legacy"
+ * terminology for naming of the idling states, while this driver uses the
+ * new terminology.
+ *
+ * Mapping of the old terms into the new ones:
+ *
+ * Old | New
+ * ---------
+ * LP3 | C1 (CPU core clock gating)
+ * LP2 | C7 (CPU core power gating)
+ * LP2 | CC6 (CPU cluster power gating)
+ *
+ * Note that that the older CPUIDLE driver versions didn't explicitly
+ * differentiate the LP2 states because these states either used the same
+ * code path or because CC6 wasn't supported.
+ */
+static struct cpuidle_driver tegra_idle_driver = {
+ .name = "tegra_idle",
+ .states = {
+ [TEGRA_C1] = ARM_CPUIDLE_WFI_STATE_PWR(600),
+ [TEGRA_C7] = {
+ .enter = tegra_cpuidle_enter,
+ .exit_latency = 2000,
+ .target_residency = 2200,
+ .power_usage = 100,
+ .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .name = "C7",
+ .desc = "CPU core powered off",
+ },
+ [TEGRA_CC6] = {
+ .enter = tegra_cpuidle_enter,
+ .exit_latency = 5000,
+ .target_residency = 10000,
+ .power_usage = 0,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_COUPLED,
+ .name = "CC6",
+ .desc = "CPU cluster powered off",
+ },
+ },
+ .state_count = TEGRA_STATE_COUNT,
+ .safe_state_index = TEGRA_C1,
+};
+
+static inline void tegra_cpuidle_disable_state(enum tegra_state state)
+{
+ cpuidle_driver_state_disabled(&tegra_idle_driver, state, true);
+}
+
+/*
+ * Tegra20 HW appears to have a bug such that PCIe device interrupts, whether
+ * they are legacy IRQs or MSI, are lost when CC6 is enabled. To work around
+ * this, simply disable CC6 if the PCI driver and DT node are both enabled.
+ */
+void tegra_cpuidle_pcie_irqs_in_use(void)
+{
+ struct cpuidle_state *state_cc6 = &tegra_idle_driver.states[TEGRA_CC6];
+
+ if ((state_cc6->flags & CPUIDLE_FLAG_UNUSABLE) ||
+ tegra_get_chip_id() != TEGRA20)
+ return;
+
+ pr_info("disabling CC6 state, since PCIe IRQs are in use\n");
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+}
+
+static void tegra_cpuidle_setup_tegra114_c7_state(void)
+{
+ struct cpuidle_state *s = &tegra_idle_driver.states[TEGRA_C7];
+
+ s->enter_s2idle = tegra114_enter_s2idle;
+ s->target_residency = 1000;
+ s->exit_latency = 500;
+}
+
+static int tegra_cpuidle_probe(struct platform_device *pdev)
+{
+ /* LP2 could be disabled in device-tree */
+ if (tegra_pmc_get_suspend_mode() < TEGRA_SUSPEND_LP2)
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+
+ /*
+ * Required suspend-resume functionality, which is provided by the
+ * Tegra-arch core and PMC driver, is unavailable if PM-sleep option
+ * is disabled.
+ */
+ if (!IS_ENABLED(CONFIG_PM_SLEEP)) {
+ if (!tegra_cpuidle_using_firmware())
+ tegra_cpuidle_disable_state(TEGRA_C7);
+
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+ }
+
+ /*
+ * Generic WFI state (also known as C1 or LP3) and the coupled CPU
+ * cluster power-off (CC6 or LP2) states are common for all Tegra SoCs.
+ */
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* Tegra20 isn't capable to power-off individual CPU cores */
+ tegra_cpuidle_disable_state(TEGRA_C7);
+ break;
+
+ case TEGRA30:
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+ break;
+
+ case TEGRA114:
+ case TEGRA124:
+ tegra_cpuidle_setup_tegra114_c7_state();
+
+ /* coupled CC6 (LP2) state isn't implemented yet */
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
+}
+
+static struct platform_driver tegra_cpuidle_driver = {
+ .probe = tegra_cpuidle_probe,
+ .driver = {
+ .name = "tegra-cpuidle",
+ },
+};
+builtin_platform_driver(tegra_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index de81298051b3..c149d9e20dfd 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -736,53 +736,15 @@ int cpuidle_register(struct cpuidle_driver *drv,
}
EXPORT_SYMBOL_GPL(cpuidle_register);
-#ifdef CONFIG_SMP
-
-/*
- * This function gets called when a part of the kernel has a new latency
- * requirement. This means we need to get all processors out of their C-state,
- * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
- * wakes them all right up.
- */
-static int cpuidle_latency_notify(struct notifier_block *b,
- unsigned long l, void *v)
-{
- wake_up_all_idle_cpus();
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpuidle_latency_notifier = {
- .notifier_call = cpuidle_latency_notify,
-};
-
-static inline void latency_notifier_init(struct notifier_block *n)
-{
- pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
-}
-
-#else /* CONFIG_SMP */
-
-#define latency_notifier_init(x) do { } while (0)
-
-#endif /* CONFIG_SMP */
-
/**
* cpuidle_init - core initializer
*/
static int __init cpuidle_init(void)
{
- int ret;
-
if (cpuidle_disabled())
return -ENODEV;
- ret = cpuidle_add_interface(cpu_subsys.dev_root);
- if (ret)
- return ret;
-
- latency_notifier_init(&cpuidle_latency_notifier);
-
- return 0;
+ return cpuidle_add_interface(cpu_subsys.dev_root);
}
module_param(off, int, 0444);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index e48271e117a3..29acaf48e575 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -109,9 +109,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
*/
s64 cpuidle_governor_latency_req(unsigned int cpu)
{
- int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device);
+ int global_req = cpu_latency_qos_limit();
if (device_req > global_req)
device_req = global_req;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index c2767ed54dfe..2c887e4d005a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -233,20 +233,6 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later.
-config CRYPTO_DEV_MARVELL_CESA
- tristate "Marvell's Cryptographic Engine driver"
- depends on PLAT_ORION || ARCH_MVEBU
- select CRYPTO_LIB_AES
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select SRAM
- help
- This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on MVEBU and ORION
- platforms.
- This driver supports CPU offload through DMA transfers.
-
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
@@ -606,6 +592,7 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
+source "drivers/crypto/marvell/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
@@ -685,6 +672,29 @@ choice
endchoice
+config CRYPTO_DEV_QCE_SW_MAX_LEN
+ int "Default maximum request size to use software for AES"
+ depends on CRYPTO_DEV_QCE && CRYPTO_DEV_QCE_SKCIPHER
+ default 512
+ help
+ This sets the default maximum request size to perform AES requests
+ using software instead of the crypto engine. It can be changed by
+ setting the aes_sw_max_len parameter.
+
+ Small blocks are processed faster in software than hardware.
+ Considering the 256-bit ciphers, software is 2-3 times faster than
+ qce at 256-bytes, 30% faster at 512, and about even at 768-bytes.
+ With 128-bit keys, the break-even point would be around 1024-bytes.
+
+ The default is set a little lower, to 512 bytes, to balance the
+ cost in CPU usage. The minimum recommended setting is 16-bytes
+ (1 AES block), since AES-GCM will fail if you set it lower.
+ Setting this to zero will send all requests to the hardware.
+
+ Note that 192-bit keys are not supported by the hardware and are
+ always processed by the software fallback, and all DES requests
+ are done by the hardware.
+
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -731,6 +741,18 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+config CRYPTO_DEV_ZYNQMP_AES
+ tristate "Support for Xilinx ZynqMP AES hw accelerator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_AES
+ select CRYPTO_ENGINE
+ select CRYPTO_AEAD
+ help
+ Xilinx ZynqMP has AES-GCM engine used for symmetric key
+ encryption and decryption. This driver interfaces with AES hw
+ accelerator. Select this if you want to use the ZynqMP module
+ for AES algorithms.
+
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 40229d499476..944ed7226e37 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
+obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
@@ -47,5 +47,6 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index f72346a44e69..3e4e4bbda34c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -565,10 +565,8 @@ static int sun8i_ce_probe(struct platform_device *pdev)
/* Get Non Secure IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(ce->dev, "Cannot get CryptoEngine Non-secure IRQ\n");
+ if (irq < 0)
return irq;
- }
ce->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(ce->reset)) {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 8f8404c84a4d..0e9eac397e1b 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -214,7 +214,7 @@ struct sun8i_cipher_tfm_ctx {
* this template
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
- * @stat_fb: total of all data len done on this template
+ * @stat_fb: number of request which has fallbacked
*/
struct sun8i_ce_alg_template {
u32 type;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index b5f855f3de10..29c44f279112 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -186,7 +186,7 @@ struct sun8i_cipher_tfm_ctx {
* this template
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
- * @stat_fb: total of all data len done on this template
+ * @stat_fb: number of request which has fallbacked
*/
struct sun8i_ss_alg_template {
u32 type;
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 1d3355913b40..e8e8281e027d 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -176,7 +176,8 @@ static int atmel_i2c_wakeup(struct i2c_client *client)
* device is idle, asleep or during waking up. Don't check for error
* when waking up the device.
*/
- i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+ i2c_transfer_buffer_flags(client, i2c_priv->wake_token,
+ i2c_priv->wake_token_sz, I2C_M_IGNORE_NAK);
/*
* Wait to wake the device. Typical execution times for ecdh and genkey
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index cd7504101acd..2b304fc78059 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -366,88 +366,88 @@ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
ipriv = filp->private_data;
out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Number of SPUs.........%u\n",
ipriv->spu.num_spu);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Current sessions.......%u\n",
atomic_read(&ipriv->session_count));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Session count..........%u\n",
atomic_read(&ipriv->stream_count));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Cipher setkey..........%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Cipher Ops.............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_CIPHER]));
for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
spu_alg_name(alg, mode), op_cnt);
}
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Hash Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HASH]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hash_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"HMAC setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"HMAC Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HMAC]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hmac_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"AEAD setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"AEAD Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_AEAD]));
for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
op_cnt = atomic_read(&ipriv->aead_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
aead_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Bytes of req data......%llu\n",
(u64)atomic64_read(&ipriv->bytes_out));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Bytes of resp data.....%llu\n",
(u64)atomic64_read(&ipriv->bytes_in));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Mailbox full...........%u\n",
atomic_read(&ipriv->mb_no_spc));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Mailbox send failures..%u\n",
atomic_read(&ipriv->mb_send_fail));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Check ICV errors.......%u\n",
atomic_read(&ipriv->bad_icv));
if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
@@ -455,7 +455,7 @@ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
SPU_OFIFO_CTRL);
fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
"SPU %d output FIFO high water.....%u\n",
i, fifo_len);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index fac5b2e26610..a62f228be6da 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -13,6 +13,7 @@ config CRYPTO_DEV_FSL_CAAM
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
select SOC_BUS
select CRYPTO_DEV_FSL_CAAM_COMMON
+ imply FSL_MC_BUS
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -33,6 +34,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
menuconfig CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
+ select CRYPTO_ENGINE
default y
help
Enables the driver module for Job Rings which are part of
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ef1a65f4fc92..b2f9882bc010 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -56,6 +56,7 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamalg_desc.h"
+#include <crypto/engine.h>
/*
* crypto alg
@@ -101,6 +102,7 @@ struct caam_skcipher_alg {
* per-session context
*/
struct caam_ctx {
+ struct crypto_engine_ctx enginectx;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
@@ -114,6 +116,14 @@ struct caam_ctx {
unsigned int authsize;
};
+struct caam_skcipher_req_ctx {
+ struct skcipher_edesc *edesc;
+};
+
+struct caam_aead_req_ctx {
+ struct aead_edesc *edesc;
+};
+
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -858,6 +868,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
* @mapped_src_nents: number of segments in input h/w link table
* @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @bklog: stored to determine if the request needs backlog
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
@@ -868,6 +879,7 @@ struct aead_edesc {
int mapped_src_nents;
int mapped_dst_nents;
int sec4_sg_bytes;
+ bool bklog;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
u32 hw_desc[];
@@ -881,6 +893,7 @@ struct aead_edesc {
* @mapped_dst_nents: number of segments in output h/w link table
* @iv_dma: dma address of iv for checking continuity and link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @bklog: stored to determine if the request needs backlog
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
@@ -893,9 +906,10 @@ struct skcipher_edesc {
int mapped_dst_nents;
dma_addr_t iv_dma;
int sec4_sg_bytes;
+ bool bklog;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
- u32 hw_desc[0];
+ u32 hw_desc[];
};
static void caam_unmap(struct device *dev, struct scatterlist *src,
@@ -941,37 +955,20 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct aead_request *req = context;
- struct aead_edesc *edesc;
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
-
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- aead_unmap(jrdev, edesc, req);
-
- kfree(edesc);
-
- aead_request_complete(req, ecode);
-}
-
-static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
struct aead_request *req = context;
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct aead_edesc *edesc;
int ecode = 0;
+ bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+ edesc = rctx->edesc;
+ has_bklog = edesc->bklog;
if (err)
ecode = caam_jr_strstatus(jrdev, err);
@@ -980,61 +977,32 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
- aead_request_complete(req, ecode);
-}
-
-static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct skcipher_request *req = context;
- struct skcipher_edesc *edesc;
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- int ivsize = crypto_skcipher_ivsize(skcipher);
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
-
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- skcipher_unmap(jrdev, edesc, req);
-
/*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block (CBC mode) or last counter (CTR mode).
- * This is used e.g. by the CTS mode.
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
*/
- if (ivsize && !ecode) {
- memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
- ivsize);
- print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
- }
-
- caam_dump_sg("dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-
- kfree(edesc);
-
- skcipher_request_complete(req, ecode);
+ if (!has_bklog)
+ aead_request_complete(req, ecode);
+ else
+ crypto_finalize_aead_request(jrp->engine, req, ecode);
}
-static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
struct skcipher_request *req = context;
struct skcipher_edesc *edesc;
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
+ bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
+ edesc = rctx->edesc;
+ has_bklog = edesc->bklog;
if (err)
ecode = caam_jr_strstatus(jrdev, err);
@@ -1060,7 +1028,14 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
- skcipher_request_complete(req, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!has_bklog)
+ skcipher_request_complete(req, ecode);
+ else
+ crypto_finalize_skcipher_request(jrp->engine, req, ecode);
}
/*
@@ -1306,6 +1281,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
@@ -1406,6 +1382,9 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
+
+ rctx->edesc = edesc;
+
*all_contig_ptr = !(mapped_src_nents > 1);
sec4_sg_index = 0;
@@ -1436,41 +1415,34 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return edesc;
}
-static int gcm_encrypt(struct aead_request *req)
+static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
{
- struct aead_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- bool all_contig;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor */
- init_gcm_job(req, edesc, all_contig, true);
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+ struct aead_edesc *edesc = rctx->edesc;
+ u32 *desc = edesc->hw_desc;
+ int ret;
- print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_aead_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
aead_unmap(jrdev, edesc, req);
- kfree(edesc);
+ kfree(rctx->edesc);
}
return ret;
}
-static int chachapoly_encrypt(struct aead_request *req)
+static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -1478,180 +1450,130 @@ static int chachapoly_encrypt(struct aead_request *req)
struct device *jrdev = ctx->jrdev;
bool all_contig;
u32 *desc;
- int ret;
edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
- true);
+ encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
desc = edesc->hw_desc;
- init_chachapoly_job(req, edesc, all_contig, true);
+ init_chachapoly_job(req, edesc, all_contig, encrypt);
print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
+ return aead_enqueue_req(jrdev, req);
}
-static int chachapoly_decrypt(struct aead_request *req)
+static int chachapoly_encrypt(struct aead_request *req)
{
- struct aead_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- bool all_contig;
- u32 *desc;
- int ret;
-
- edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
- false);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- desc = edesc->hw_desc;
-
- init_chachapoly_job(req, edesc, all_contig, false);
- print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
- 1);
-
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
+ return chachapoly_crypt(req, true);
}
-static int ipsec_gcm_encrypt(struct aead_request *req)
+static int chachapoly_decrypt(struct aead_request *req)
{
- return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
+ return chachapoly_crypt(req, false);
}
-static int aead_encrypt(struct aead_request *req)
+static inline int aead_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
- u32 *desc;
- int ret = 0;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
- &all_contig, true);
+ &all_contig, encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor */
- init_authenc_job(req, edesc, all_contig, true);
+ init_authenc_job(req, edesc, all_contig, encrypt);
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
+ return aead_enqueue_req(jrdev, req);
+}
- return ret;
+static int aead_encrypt(struct aead_request *req)
+{
+ return aead_crypt(req, true);
}
-static int gcm_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
- struct aead_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- bool all_contig;
- u32 *desc;
- int ret = 0;
+ return aead_crypt(req, false);
+}
- /* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
+static int aead_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = aead_request_cast(areq);
+ struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+ u32 *desc = rctx->edesc->hw_desc;
+ int ret;
- /* Create and submit job descriptor*/
- init_gcm_job(req, edesc, all_contig, false);
+ rctx->edesc->bklog = true;
- print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
+ ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
+ if (ret != -EINPROGRESS) {
+ aead_unmap(ctx->jrdev, rctx->edesc, req);
+ kfree(rctx->edesc);
} else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
+ ret = 0;
}
return ret;
}
-static int ipsec_gcm_decrypt(struct aead_request *req)
-{
- return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
-}
-
-static int aead_decrypt(struct aead_request *req)
+static inline int gcm_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
- u32 *desc;
- int ret = 0;
-
- caam_dump_sg("dec src@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1);
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
- &all_contig, false);
+ edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
+ encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /* Create and submit job descriptor*/
- init_authenc_job(req, edesc, all_contig, false);
+ /* Create and submit job descriptor */
+ init_gcm_job(req, edesc, all_contig, encrypt);
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
+ return aead_enqueue_req(jrdev, req);
+}
- return ret;
+static int gcm_encrypt(struct aead_request *req)
+{
+ return gcm_crypt(req, true);
+}
+
+static int gcm_decrypt(struct aead_request *req)
+{
+ return gcm_crypt(req, false);
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
}
/*
@@ -1662,6 +1584,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1760,6 +1683,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
+ rctx->edesc = edesc;
/* Make sure IV is located in a DMAable area */
if (ivsize) {
@@ -1791,7 +1715,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
if (ivsize || mapped_dst_nents > 1)
sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
- mapped_dst_nents);
+ mapped_dst_nents - 1 + !!ivsize);
if (sec4_sg_bytes) {
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1815,49 +1739,35 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return edesc;
}
-static int skcipher_encrypt(struct skcipher_request *req)
+static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
{
- struct skcipher_edesc *edesc;
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- int ret = 0;
-
- if (!req->cryptlen)
- return 0;
-
- /* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor*/
- init_skcipher_job(req, edesc, true);
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+ u32 *desc = rctx->edesc->hw_desc;
+ int ret;
- print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
+ rctx->edesc->bklog = true;
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
+ ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
+ if (ret != -EINPROGRESS) {
+ skcipher_unmap(ctx->jrdev, rctx->edesc, req);
+ kfree(rctx->edesc);
} else {
- skcipher_unmap(jrdev, edesc, req);
- kfree(edesc);
+ ret = 0;
}
return ret;
}
-static int skcipher_decrypt(struct skcipher_request *req)
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
u32 *desc;
int ret = 0;
@@ -1870,17 +1780,25 @@ static int skcipher_decrypt(struct skcipher_request *req)
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_skcipher_job(req, edesc, false);
- desc = edesc->hw_desc;
+ init_skcipher_job(req, edesc, encrypt);
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
+ desc = edesc->hw_desc;
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
+
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
@@ -1888,6 +1806,16 @@ static int skcipher_decrypt(struct skcipher_request *req)
return ret;
}
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ return skcipher_crypt(req, true);
+}
+
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ return skcipher_crypt(req, false);
+}
+
static struct caam_skcipher_alg driver_algs[] = {
{
.skcipher = {
@@ -3391,6 +3319,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
{
dma_addr_t dma_addr;
struct caam_drv_private *priv;
+ const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
+ sh_desc_enc);
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3406,7 +3336,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
- sh_desc_enc_dma),
+ sh_desc_enc_dma) -
+ sh_desc_enc_offset,
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
@@ -3416,8 +3347,10 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_dec);
- ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
+ sh_desc_dec) -
+ sh_desc_enc_offset;
+ ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
+ sh_desc_enc_offset;
/* copy descriptor header template value */
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
@@ -3431,6 +3364,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+
+ ctx->enginectx.op.do_one_request = skcipher_do_one_req;
return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
false);
@@ -3443,13 +3381,18 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
+
+ ctx->enginectx.op.do_one_request = aead_do_one_req;
+
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
- offsetof(struct caam_ctx, sh_desc_enc_dma),
+ offsetof(struct caam_ctx, sh_desc_enc_dma) -
+ offsetof(struct caam_ctx, sh_desc_enc),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index aa9ccca67045..d6c58184bb57 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1379,6 +1379,9 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
+ u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT;
+ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_CHACHA20);
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
@@ -1417,14 +1420,15 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
LDST_OFFSET_SHIFT));
/* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
- OP_ALG_ENCRYPT);
+ if (is_chacha20)
+ options |= OP_ALG_AS_FINALIZE;
+ append_operation(desc, options);
/* Perform operation */
skcipher_append_src_dst(desc);
/* Store IV */
- if (ivsize)
+ if (!is_chacha20 && ivsize)
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
@@ -1451,6 +1455,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
+ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_CHACHA20);
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
@@ -1499,7 +1505,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
skcipher_append_src_dst(desc);
/* Store IV */
- if (ivsize)
+ if (!is_chacha20 && ivsize)
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
@@ -1518,7 +1524,13 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
*/
void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
{
- __be64 sector_size = cpu_to_be64(512);
+ /*
+ * Set sector size to a big value, practically disabling
+ * sector size segmentation in xts implementation. We cannot
+ * take full advantage of this HW feature with existing
+ * crypto API / dm-crypt SW architecture.
+ */
+ __be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
@@ -1571,7 +1583,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
*/
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
{
- __be64 sector_size = cpu_to_be64(512);
+ /*
+ * Set sector size to a big value, practically disabling
+ * sector size segmentation in xts implementation. We cannot
+ * take full advantage of this HW feature with existing
+ * crypto API / dm-crypt SW architecture.
+ */
+ __be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 4a29e0ef9d63..27e36bdf6163 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -783,7 +783,7 @@ struct aead_edesc {
unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
- struct qm_sg_entry sgt[0];
+ struct qm_sg_entry sgt[];
};
/*
@@ -803,7 +803,7 @@ struct skcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
- struct qm_sg_entry sgt[0];
+ struct qm_sg_entry sgt[];
};
static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 706736776b47..f29cb7bd7dd3 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -114,7 +114,7 @@ struct aead_edesc {
dma_addr_t qm_sg_dma;
unsigned int assoclen;
dma_addr_t assoclen_dma;
- struct dpaa2_sg_entry sgt[0];
+ struct dpaa2_sg_entry sgt[];
};
/*
@@ -132,7 +132,7 @@ struct skcipher_edesc {
dma_addr_t iv_dma;
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
- struct dpaa2_sg_entry sgt[0];
+ struct dpaa2_sg_entry sgt[];
};
/*
@@ -146,7 +146,7 @@ struct ahash_edesc {
dma_addr_t qm_sg_dma;
int src_nents;
int qm_sg_bytes;
- struct dpaa2_sg_entry sgt[0];
+ struct dpaa2_sg_entry sgt[];
};
/**
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 8d9143407fc5..27ff4a3d037e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -65,6 +65,7 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamhash_desc.h"
+#include <crypto/engine.h>
#define CAAM_CRA_PRIORITY 3000
@@ -86,6 +87,7 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
+ struct crypto_engine_ctx enginectx;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@@ -111,9 +113,12 @@ struct caam_hash_state {
int buflen;
int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
- int (*update)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req) ____cacheline_aligned;
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
+ struct ahash_edesc *edesc;
+ void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
+ void *context);
};
struct caam_export_state {
@@ -395,7 +400,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
- if (!ret) {
+ if (ret == -EINPROGRESS) {
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
@@ -521,6 +526,7 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @bklog: stored to determine if the request needs backlog
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* @sec4_sg: h/w link table
*/
@@ -528,8 +534,9 @@ struct ahash_edesc {
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
+ bool bklog;
u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
- struct sec4_sg_entry sec4_sg[0];
+ struct sec4_sg_entry sec4_sg[];
};
static inline void ahash_unmap(struct device *dev,
@@ -565,24 +572,28 @@ static inline void ahash_unmap_ctx(struct device *dev,
ahash_unmap(dev, edesc, req, dst_len);
}
-static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
+ void *context, enum dma_data_direction dir)
{
struct ahash_request *req = context;
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int ecode = 0;
+ bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ edesc = state->edesc;
+ has_bklog = edesc->bklog;
+
if (err)
ecode = caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
@@ -590,95 +601,49 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!has_bklog)
+ req->base.complete(&req->base, ecode);
+ else
+ crypto_finalize_hash_request(jrp->engine, req, ecode);
}
-static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ahash_request *req = context;
- struct ahash_edesc *edesc;
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
- int digestsize = crypto_ahash_digestsize(ahash);
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- kfree(edesc);
-
- scatterwalk_map_and_copy(state->buf, req->src,
- req->nbytes - state->next_buflen,
- state->next_buflen, 0);
- state->buflen = state->next_buflen;
-
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
- state->buflen, 1);
-
- print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-
- req->base.complete(&req->base, ecode);
+ ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
}
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct ahash_request *req = context;
- struct ahash_edesc *edesc;
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- int digestsize = crypto_ahash_digestsize(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
- memcpy(req->result, state->caam_ctx, digestsize);
- kfree(edesc);
-
- print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
-
- req->base.complete(&req->base, ecode);
+ ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
}
-static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
+ void *context, enum dma_data_direction dir)
{
struct ahash_request *req = context;
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
+ bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ edesc = state->edesc;
+ has_bklog = edesc->bklog;
if (err)
ecode = caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
kfree(edesc);
scatterwalk_map_and_copy(state->buf, req->src,
@@ -698,18 +663,42 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
- req->base.complete(&req->base, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!has_bklog)
+ req->base.complete(&req->base, ecode);
+ else
+ crypto_finalize_hash_request(jrp->engine, req, ecode);
+
+}
+
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
+}
+
+static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
}
/*
* Allocate an enhanced descriptor, which contains the hardware descriptor
* and space for hardware scatter table containing sg_num entries.
*/
-static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
+static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc,
- dma_addr_t sh_desc_dma,
- gfp_t flags)
+ dma_addr_t sh_desc_dma)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
@@ -719,6 +708,8 @@ static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
return NULL;
}
+ state->edesc = edesc;
+
init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
HDR_SHARE_DEFER | HDR_REVERSE);
@@ -761,6 +752,62 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
return 0;
}
+static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = state->edesc->hw_desc;
+ int ret;
+
+ state->edesc->bklog = true;
+
+ ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
+
+ if (ret != -EINPROGRESS) {
+ ahash_unmap(jrdev, state->edesc, req, 0);
+ kfree(state->edesc);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int ahash_enqueue_req(struct device *jrdev,
+ void (*cbk)(struct device *jrdev, u32 *desc,
+ u32 err, void *context),
+ struct ahash_request *req,
+ int dst_len, enum dma_data_direction dir)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->edesc;
+ u32 *desc = edesc->hw_desc;
+ int ret;
+
+ state->ahash_op_done = cbk;
+
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, cbk, req);
+
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
+ ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
/* submit update job descriptor */
static int ahash_update_ctx(struct ahash_request *req)
{
@@ -768,8 +815,6 @@ static int ahash_update_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@@ -823,8 +868,8 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
- ctx->sh_desc_update_dma, flags);
+ edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
+ ctx->sh_desc_update_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -870,11 +915,8 @@ static int ahash_update_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
- if (ret)
- goto unmap_ctx;
-
- ret = -EINPROGRESS;
+ ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
+ ctx->ctx_len, DMA_BIDIRECTIONAL);
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
@@ -898,8 +940,6 @@ static int ahash_final_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
@@ -911,8 +951,8 @@ static int ahash_final_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
- ctx->sh_desc_fin_dma, flags);
+ edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
+ ctx->sh_desc_fin_dma);
if (!edesc)
return -ENOMEM;
@@ -947,11 +987,8 @@ static int ahash_final_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (ret)
- goto unmap_ctx;
-
- return -EINPROGRESS;
+ return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
+ digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
@@ -964,8 +1001,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
@@ -994,9 +1029,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
- flags);
+ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1027,11 +1061,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (ret)
- goto unmap_ctx;
-
- return -EINPROGRESS;
+ return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
+ digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
@@ -1044,8 +1075,6 @@ static int ahash_digest(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, mapped_nents;
@@ -1072,9 +1101,8 @@ static int ahash_digest(struct ahash_request *req)
}
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
- ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
- flags);
+ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1103,15 +1131,8 @@ static int ahash_digest(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
-
- return ret;
+ return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
+ DMA_FROM_DEVICE);
}
/* submit ahash final if it the first job descriptor */
@@ -1121,8 +1142,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int buflen = state->buflen;
u32 *desc;
@@ -1131,8 +1150,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int ret;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
- ctx->sh_desc_digest_dma, flags);
+ edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
+ ctx->sh_desc_digest_dma);
if (!edesc)
return -ENOMEM;
@@ -1157,20 +1176,12 @@ static int ahash_final_no_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
-
- return ret;
+ return ahash_enqueue_req(jrdev, ahash_done, req,
+ digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
return -ENOMEM;
-
}
/* submit ahash update if it the first job descriptor after update */
@@ -1180,8 +1191,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@@ -1234,10 +1243,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, pad_nents,
+ edesc = ahash_edesc_alloc(req, pad_nents,
ctx->sh_desc_update_first,
- ctx->sh_desc_update_first_dma,
- flags);
+ ctx->sh_desc_update_first_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1273,11 +1281,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
- if (ret)
- goto unmap_ctx;
-
- ret = -EINPROGRESS;
+ ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
+ ctx->ctx_len, DMA_TO_DEVICE);
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY))
+ return ret;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
@@ -1305,8 +1312,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
@@ -1336,9 +1341,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
- flags);
+ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1368,15 +1372,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
-
- return ret;
+ return ahash_enqueue_req(jrdev, ahash_done, req,
+ digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
@@ -1391,8 +1388,6 @@ static int ahash_update_first(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@@ -1440,11 +1435,10 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
+ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
mapped_nents : 0,
ctx->sh_desc_update_first,
- ctx->sh_desc_update_first_dma,
- flags);
+ ctx->sh_desc_update_first_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1467,11 +1461,10 @@ static int ahash_update_first(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
- if (ret)
- goto unmap_ctx;
-
- ret = -EINPROGRESS;
+ ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
+ ctx->ctx_len, DMA_TO_DEVICE);
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY))
+ return ret;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
@@ -1774,6 +1767,8 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
+ sh_desc_update);
dma_addr_t dma_addr;
struct caam_drv_private *priv;
@@ -1826,7 +1821,8 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
}
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
- offsetof(struct caam_hash_ctx, key),
+ offsetof(struct caam_hash_ctx, key) -
+ sh_desc_update_offset,
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
@@ -1844,11 +1840,16 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->sh_desc_update_dma = dma_addr;
ctx->sh_desc_update_first_dma = dma_addr +
offsetof(struct caam_hash_ctx,
- sh_desc_update_first);
+ sh_desc_update_first) -
+ sh_desc_update_offset;
ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
- sh_desc_fin);
+ sh_desc_fin) -
+ sh_desc_update_offset;
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
- sh_desc_digest);
+ sh_desc_digest) -
+ sh_desc_update_offset;
+
+ ctx->enginectx.op.do_one_request = ahash_do_one_req;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
@@ -1865,7 +1866,8 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
- offsetof(struct caam_hash_ctx, key),
+ offsetof(struct caam_hash_ctx, key) -
+ offsetof(struct caam_hash_ctx, sh_desc_update),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (ctx->key_dir != DMA_NONE)
dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 6619c512ef1a..2e44d685618f 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -117,76 +117,73 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
{
struct akcipher_request *req = context;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct rsa_edesc *edesc;
int ecode = 0;
+ bool has_bklog;
if (err)
ecode = caam_jr_strstatus(dev, err);
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+ edesc = req_ctx->edesc;
+ has_bklog = edesc->bklog;
rsa_pub_unmap(dev, edesc, req);
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, ecode);
-}
-
-static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
- void *context)
-{
- struct akcipher_request *req = context;
- struct rsa_edesc *edesc;
- int ecode = 0;
-
- if (err)
- ecode = caam_jr_strstatus(dev, err);
-
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
-
- rsa_priv_f1_unmap(dev, edesc, req);
- rsa_io_unmap(dev, edesc, req);
- kfree(edesc);
-
- akcipher_request_complete(req, ecode);
-}
-
-static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
- void *context)
-{
- struct akcipher_request *req = context;
- struct rsa_edesc *edesc;
- int ecode = 0;
-
- if (err)
- ecode = caam_jr_strstatus(dev, err);
-
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
-
- rsa_priv_f2_unmap(dev, edesc, req);
- rsa_io_unmap(dev, edesc, req);
- kfree(edesc);
-
- akcipher_request_complete(req, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!has_bklog)
+ akcipher_request_complete(req, ecode);
+ else
+ crypto_finalize_akcipher_request(jrp->engine, req, ecode);
}
-static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
- void *context)
+static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
{
struct akcipher_request *req = context;
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc;
int ecode = 0;
+ bool has_bklog;
if (err)
ecode = caam_jr_strstatus(dev, err);
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+ edesc = req_ctx->edesc;
+ has_bklog = edesc->bklog;
+
+ switch (key->priv_form) {
+ case FORM1:
+ rsa_priv_f1_unmap(dev, edesc, req);
+ break;
+ case FORM2:
+ rsa_priv_f2_unmap(dev, edesc, req);
+ break;
+ case FORM3:
+ rsa_priv_f3_unmap(dev, edesc, req);
+ }
- rsa_priv_f3_unmap(dev, edesc, req);
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!has_bklog)
+ akcipher_request_complete(req, ecode);
+ else
+ crypto_finalize_akcipher_request(jrp->engine, req, ecode);
}
/**
@@ -334,6 +331,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ req_ctx->edesc = edesc;
+
if (!sec4_sg_bytes)
return edesc;
@@ -364,6 +363,33 @@ src_fail:
return ERR_PTR(-ENOMEM);
}
+static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct akcipher_request *req = container_of(areq,
+ struct akcipher_request,
+ base);
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *jrdev = ctx->dev;
+ u32 *desc = req_ctx->edesc->hw_desc;
+ int ret;
+
+ req_ctx->edesc->bklog = true;
+
+ ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
+
+ if (ret != -EINPROGRESS) {
+ rsa_pub_unmap(jrdev, req_ctx->edesc, req);
+ rsa_io_unmap(jrdev, req_ctx->edesc, req);
+ kfree(req_ctx->edesc);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int set_rsa_pub_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
@@ -627,6 +653,53 @@ unmap_p:
return -ENOMEM;
}
+static int akcipher_enqueue_req(struct device *jrdev,
+ void (*cbk)(struct device *jrdev, u32 *desc,
+ u32 err, void *context),
+ struct akcipher_request *req)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct rsa_edesc *edesc = req_ctx->edesc;
+ u32 *desc = edesc->hw_desc;
+ int ret;
+
+ req_ctx->akcipher_op_done = cbk;
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, cbk, req);
+
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
+ switch (key->priv_form) {
+ case FORM1:
+ rsa_priv_f1_unmap(jrdev, edesc, req);
+ break;
+ case FORM2:
+ rsa_priv_f2_unmap(jrdev, edesc, req);
+ break;
+ case FORM3:
+ rsa_priv_f3_unmap(jrdev, edesc, req);
+ break;
+ default:
+ rsa_pub_unmap(jrdev, edesc, req);
+ }
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
static int caam_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
@@ -658,11 +731,7 @@ static int caam_rsa_enc(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_pub_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -691,11 +760,7 @@ static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_priv_f1_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -724,11 +789,7 @@ static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_priv_f2_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -757,11 +818,7 @@ static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_priv_f3_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -1054,6 +1111,8 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return -ENOMEM;
}
+ ctx->enginectx.op.do_one_request = akcipher_do_one_req;
+
return 0;
}
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index c68fb4c03ee6..cc889a525e2f 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -12,6 +12,7 @@
#define _PKC_DESC_H_
#include "compat.h"
#include "pdb.h"
+#include <crypto/engine.h>
/**
* caam_priv_key_form - CAAM RSA private key representation
@@ -87,11 +88,13 @@ struct caam_rsa_key {
/**
* caam_rsa_ctx - per session context.
+ * @enginectx : crypto engine context
* @key : RSA key in DMA zone
* @dev : device structure
* @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
+ struct crypto_engine_ctx enginectx;
struct caam_rsa_key key;
struct device *dev;
dma_addr_t padding_dma;
@@ -103,11 +106,16 @@ struct caam_rsa_ctx {
* @src : input scatterlist (stripped of leading zeros)
* @fixup_src : input scatterlist (that might be stripped of leading zeros)
* @fixup_src_len : length of the fixup_src input scatterlist
+ * @edesc : s/w-extended rsa descriptor
+ * @akcipher_op_done : callback used when operation is done
*/
struct caam_rsa_req_ctx {
struct scatterlist src[2];
struct scatterlist *fixup_src;
unsigned int fixup_src_len;
+ struct rsa_edesc *edesc;
+ void (*akcipher_op_done)(struct device *jrdev, u32 *desc, u32 err,
+ void *context);
};
/**
@@ -117,6 +125,7 @@ struct caam_rsa_req_ctx {
* @mapped_src_nents: number of segments in input h/w link table
* @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes : length of h/w link table
+ * @bklog : stored to determine if the request needs backlog
* @sec4_sg_dma : dma address of h/w link table
* @sec4_sg : pointer to h/w link table
* @pdb : specific RSA Protocol Data Block (PDB)
@@ -128,6 +137,7 @@ struct rsa_edesc {
int mapped_src_nents;
int mapped_dst_nents;
int sec4_sg_bytes;
+ bool bklog;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
union {
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index e8baacaabe07..77d048dfe5d0 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -7,35 +7,12 @@
*
* Based on caamalg.c crypto API driver.
*
- * relationship between job descriptors to shared descriptors:
- *
- * --------------- --------------
- * | JobDesc #0 |-------------------->| ShareDesc |
- * | *(buffer 0) | |------------->| (generate) |
- * --------------- | | (move) |
- * | | (store) |
- * --------------- | --------------
- * | JobDesc #1 |------|
- * | *(buffer 1) |
- * ---------------
- *
- * A job desc looks like this:
- *
- * ---------------------
- * | Header |
- * | ShareDesc Pointer |
- * | SEQ_OUT_PTR |
- * | (output buffer) |
- * ---------------------
- *
- * The SharedDesc never changes, and each job descriptor points to one of two
- * buffers for each device, from which the data will be copied into the
- * requested destination
*/
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/atomic.h>
+#include <linux/kfifo.h>
#include "compat.h"
@@ -45,278 +22,205 @@
#include "jr.h"
#include "error.h"
+#define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
+
/*
- * Maximum buffer size: maximum number of random, cache-aligned bytes that
- * will be generated and moved to seq out ptr (extlen not allowed)
+ * Length of used descriptors, see caam_init_desc()
*/
-#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
- L1_CACHE_BYTES)
-
-/* length of descriptors */
-#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2)
-#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
-
-/* Buffer, its dma address and lock */
-struct buf_data {
- u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
- dma_addr_t addr;
- struct completion filled;
- u32 hw_desc[DESC_JOB_O_LEN];
-#define BUF_NOT_EMPTY 0
-#define BUF_EMPTY 1
-#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
- atomic_t empty;
-};
+#define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
/* rng per-device context */
struct caam_rng_ctx {
+ struct hwrng rng;
struct device *jrdev;
- dma_addr_t sh_desc_dma;
- u32 sh_desc[DESC_RNG_LEN];
- unsigned int cur_buf_idx;
- int current_buf;
- struct buf_data bufs[2];
+ struct device *ctrldev;
+ void *desc_async;
+ void *desc_sync;
+ struct work_struct worker;
+ struct kfifo fifo;
};
-static struct caam_rng_ctx *rng_ctx;
-
-/*
- * Variable used to avoid double free of resources in case
- * algorithm registration was unsuccessful
- */
-static bool init_done;
-
-static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
-{
- if (bd->addr)
- dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
- DMA_FROM_DEVICE);
-}
+struct caam_rng_job_ctx {
+ struct completion *done;
+ int *err;
+};
-static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
{
- struct device *jrdev = ctx->jrdev;
-
- if (ctx->sh_desc_dma)
- dma_unmap_single(jrdev, ctx->sh_desc_dma,
- desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
- rng_unmap_buf(jrdev, &ctx->bufs[0]);
- rng_unmap_buf(jrdev, &ctx->bufs[1]);
+ return (struct caam_rng_ctx *)r->priv;
}
-static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct buf_data *bd;
-
- bd = container_of(desc, struct buf_data, hw_desc[0]);
+ struct caam_rng_job_ctx *jctx = context;
if (err)
- caam_jr_strstatus(jrdev, err);
+ *jctx->err = caam_jr_strstatus(jrdev, err);
- atomic_set(&bd->empty, BUF_NOT_EMPTY);
- complete(&bd->filled);
-
- /* Buffer refilled, invalidate cache */
- dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
-
- print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- bd->buf, RN_BUF_SIZE, 1);
+ complete(jctx->done);
}
-static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
+static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
{
- struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
- struct device *jrdev = ctx->jrdev;
- u32 *desc = bd->hw_desc;
- int err;
-
- dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
- init_completion(&bd->filled);
- err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
- if (err)
- complete(&bd->filled); /* don't wait on failed job*/
- else
- atomic_inc(&bd->empty); /* note if pending */
-
- return err;
+ init_job_desc(desc, 0); /* + 1 cmd_sz */
+ /* Generate random bytes: + 1 cmd_sz */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
+ OP_ALG_PR_ON);
+ /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
+ append_fifo_store(desc, dst_dma,
+ CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
+
+ print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, desc, desc_bytes(desc), 1);
+
+ return desc;
}
-static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
+static int caam_rng_read_one(struct device *jrdev,
+ void *dst, int len,
+ void *desc,
+ struct completion *done)
{
- struct caam_rng_ctx *ctx = rng_ctx;
- struct buf_data *bd = &ctx->bufs[ctx->current_buf];
- int next_buf_idx, copied_idx;
- int err;
-
- if (atomic_read(&bd->empty)) {
- /* try to submit job if there wasn't one */
- if (atomic_read(&bd->empty) == BUF_EMPTY) {
- err = submit_job(ctx, 1);
- /* if can't submit job, can't even wait */
- if (err)
- return 0;
- }
- /* no immediate data, so exit if not waiting */
- if (!wait)
- return 0;
-
- /* waiting for pending job */
- if (atomic_read(&bd->empty))
- wait_for_completion(&bd->filled);
+ dma_addr_t dst_dma;
+ int err, ret = 0;
+ struct caam_rng_job_ctx jctx = {
+ .done = done,
+ .err = &ret,
+ };
+
+ len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
+
+ dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+ dev_err(jrdev, "unable to map destination memory\n");
+ return -ENOMEM;
}
- next_buf_idx = ctx->cur_buf_idx + max;
- dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
- __func__, ctx->current_buf, ctx->cur_buf_idx);
-
- /* if enough data in current buffer */
- if (next_buf_idx < RN_BUF_SIZE) {
- memcpy(data, bd->buf + ctx->cur_buf_idx, max);
- ctx->cur_buf_idx = next_buf_idx;
- return max;
+ init_completion(done);
+ err = caam_jr_enqueue(jrdev,
+ caam_init_desc(desc, dst_dma),
+ caam_rng_done, &jctx);
+ if (err == -EINPROGRESS) {
+ wait_for_completion(done);
+ err = 0;
}
- /* else, copy what's left... */
- copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
- memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
- ctx->cur_buf_idx = 0;
- atomic_set(&bd->empty, BUF_EMPTY);
-
- /* ...refill... */
- submit_job(ctx, 1);
+ dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
- /* and use next buffer */
- ctx->current_buf = !ctx->current_buf;
- dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
-
- /* since there already is some data read, don't wait */
- return copied_idx + caam_read(rng, data + copied_idx,
- max - copied_idx, false);
+ return err ?: (ret ?: len);
}
-static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
+static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
{
- struct device *jrdev = ctx->jrdev;
- u32 *desc = ctx->sh_desc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Generate random bytes */
- append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
-
- /* Store bytes */
- append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
+ struct scatterlist sg[1];
+ struct completion done;
+ int len, nents;
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
+ CAAM_RNG_MAX_FIFO_STORE_SIZE);
+ if (!nents)
+ return;
- ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
+ len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
+ sg[0].length,
+ ctx->desc_async,
+ &done);
+ if (len < 0)
+ return;
- print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
+ kfifo_dma_in_finish(&ctx->fifo, len);
+}
- return 0;
+static void caam_rng_worker(struct work_struct *work)
+{
+ struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
+ worker);
+ caam_rng_fill_async(ctx);
}
-static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
{
- struct device *jrdev = ctx->jrdev;
- struct buf_data *bd = &ctx->bufs[buf_id];
- u32 *desc = bd->hw_desc;
- int sh_len = desc_len(ctx->sh_desc);
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
+ int out;
- init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
+ if (wait) {
+ struct completion done;
- bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, bd->addr)) {
- dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ return caam_rng_read_one(ctx->jrdev, dst, max,
+ ctx->desc_sync, &done);
}
- append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
-
- print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
+ out = kfifo_out(&ctx->fifo, dst, max);
+ if (kfifo_is_empty(&ctx->fifo))
+ schedule_work(&ctx->worker);
- return 0;
+ return out;
}
static void caam_cleanup(struct hwrng *rng)
{
- int i;
- struct buf_data *bd;
-
- for (i = 0; i < 2; i++) {
- bd = &rng_ctx->bufs[i];
- if (atomic_read(&bd->empty) == BUF_PENDING)
- wait_for_completion(&bd->filled);
- }
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
- rng_unmap_ctx(rng_ctx);
+ flush_work(&ctx->worker);
+ caam_jr_free(ctx->jrdev);
+ kfifo_free(&ctx->fifo);
}
-static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_init(struct hwrng *rng)
{
- struct buf_data *bd = &ctx->bufs[buf_id];
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
int err;
- err = rng_create_job_desc(ctx, buf_id);
- if (err)
- return err;
-
- atomic_set(&bd->empty, BUF_EMPTY);
- submit_job(ctx, buf_id == ctx->current_buf);
- wait_for_completion(&bd->filled);
+ ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
+ GFP_DMA | GFP_KERNEL);
+ if (!ctx->desc_sync)
+ return -ENOMEM;
- return 0;
-}
+ ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
+ GFP_DMA | GFP_KERNEL);
+ if (!ctx->desc_async)
+ return -ENOMEM;
-static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
-{
- int err;
+ if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
+ GFP_DMA | GFP_KERNEL))
+ return -ENOMEM;
- ctx->jrdev = jrdev;
+ INIT_WORK(&ctx->worker, caam_rng_worker);
- err = rng_create_sh_desc(ctx);
- if (err)
+ ctx->jrdev = caam_jr_alloc();
+ err = PTR_ERR_OR_ZERO(ctx->jrdev);
+ if (err) {
+ kfifo_free(&ctx->fifo);
+ pr_err("Job Ring Device allocation for transform failed\n");
return err;
+ }
- ctx->current_buf = 0;
- ctx->cur_buf_idx = 0;
+ /*
+ * Fill async buffer to have early randomness data for
+ * hw_random
+ */
+ caam_rng_fill_async(ctx);
- err = caam_init_buf(ctx, 0);
- if (err)
- return err;
-
- return caam_init_buf(ctx, 1);
+ return 0;
}
-static struct hwrng caam_rng = {
- .name = "rng-caam",
- .cleanup = caam_cleanup,
- .read = caam_read,
-};
+int caam_rng_init(struct device *ctrldev);
-void caam_rng_exit(void)
+void caam_rng_exit(struct device *ctrldev)
{
- if (!init_done)
- return;
-
- caam_jr_free(rng_ctx->jrdev);
- hwrng_unregister(&caam_rng);
- kfree(rng_ctx);
+ devres_release_group(ctrldev, caam_rng_init);
}
int caam_rng_init(struct device *ctrldev)
{
- struct device *dev;
+ struct caam_rng_ctx *ctx;
u32 rng_inst;
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- int err;
- init_done = false;
+ int ret;
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
@@ -328,31 +232,30 @@ int caam_rng_init(struct device *ctrldev)
if (!rng_inst)
return 0;
- dev = caam_jr_alloc();
- if (IS_ERR(dev)) {
- pr_err("Job Ring Device allocation for transform failed\n");
- return PTR_ERR(dev);
- }
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
- if (!rng_ctx) {
- err = -ENOMEM;
- goto free_caam_alloc;
- }
- err = caam_init_rng(rng_ctx, dev);
- if (err)
- goto free_rng_ctx;
+ if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
+ return -ENOMEM;
- dev_info(dev, "registering rng-caam\n");
+ ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
- err = hwrng_register(&caam_rng);
- if (!err) {
- init_done = true;
- return err;
+ ctx->ctrldev = ctrldev;
+
+ ctx->rng.name = "rng-caam";
+ ctx->rng.init = caam_init;
+ ctx->rng.cleanup = caam_cleanup;
+ ctx->rng.read = caam_read;
+ ctx->rng.priv = (unsigned long)ctx;
+ ctx->rng.quality = 1024;
+
+ dev_info(ctrldev, "registering rng-caam\n");
+
+ ret = devm_hwrng_register(ctrldev, &ctx->rng);
+ if (ret) {
+ caam_rng_exit(ctrldev);
+ return ret;
}
-free_rng_ctx:
- kfree(rng_ctx);
-free_caam_alloc:
- caam_jr_free(dev);
- return err;
+ devres_close_group(ctrldev, caam_rng_init);
+ return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 7139366da016..4fcdd262e581 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sys_soc.h>
+#include <linux/fsl/mc.h>
#include "compat.h"
#include "regs.h"
@@ -36,7 +37,8 @@ static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
init_job_desc(desc, 0);
op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
+ OP_ALG_PR_ON;
/* INIT RNG in non-test mode */
append_operation(desc, op_flags);
@@ -196,7 +198,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
u32 *desc, status;
int sh_idx, ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
if (!desc)
return -ENOMEM;
@@ -273,17 +275,30 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
if (!desc)
return -ENOMEM;
for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ const u32 rdsta_if = RDSTA_IF0 << sh_idx;
+ const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
+ const u32 rdsta_mask = rdsta_if | rdsta_pr;
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
*/
- if ((1 << sh_idx) & state_handle_mask)
- continue;
+ if (rdsta_if & state_handle_mask) {
+ if (rdsta_pr & state_handle_mask)
+ continue;
+
+ dev_info(ctrldev,
+ "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
+ sh_idx);
+
+ ret = deinstantiate_rng(ctrldev, rdsta_if);
+ if (ret)
+ break;
+ }
/* Create the descriptor for instantiating RNG State Handle */
build_instantiation_desc(desc, sh_idx, gen_sk);
@@ -303,9 +318,9 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
if (ret)
break;
- rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
- !(rdsta_val & (1 << sh_idx))) {
+ (rdsta_val & rdsta_mask) != rdsta_mask) {
ret = -EAGAIN;
break;
}
@@ -341,8 +356,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
r4tst = &ctrl->r4tst[0];
- /* put RNG4 into program mode */
- clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
+ /*
+ * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to
+ * properly invalidate the entropy in the entropy register and
+ * force re-generation.
+ */
+ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
/*
* Performance-wise, it does not make sense to
@@ -372,7 +391,8 @@ start_rng:
* select raw sampling in both entropy shifter
* and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
+ RTMCTL_SAMP_MODE_RAW_ES_SC);
}
static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
@@ -559,6 +579,26 @@ static void caam_remove_debugfs(void *root)
}
#endif
+#ifdef CONFIG_FSL_MC_BUS
+static bool check_version(struct fsl_mc_version *mc_version, u32 major,
+ u32 minor, u32 revision)
+{
+ if (mc_version->major > major)
+ return true;
+
+ if (mc_version->major == major) {
+ if (mc_version->minor > minor)
+ return true;
+
+ if (mc_version->minor == minor &&
+ mc_version->revision > revision)
+ return true;
+ }
+
+ return false;
+}
+#endif
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -577,6 +617,7 @@ static int caam_probe(struct platform_device *pdev)
u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
+ bool pr_support = false;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@@ -662,6 +703,21 @@ static int caam_probe(struct platform_device *pdev)
/* Get the IRQ of the controller (for security violations only) */
ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
+ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
+ ctrlpriv->mc_en = !!np;
+ of_node_put(np);
+
+#ifdef CONFIG_FSL_MC_BUS
+ if (ctrlpriv->mc_en) {
+ struct fsl_mc_version *mc_version;
+
+ mc_version = fsl_mc_get_version();
+ if (mc_version)
+ pr_support = check_version(mc_version, 10, 20, 0);
+ else
+ return -EPROBE_DEFER;
+ }
+#endif
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
@@ -669,10 +725,6 @@ static int caam_probe(struct platform_device *pdev)
* In case of SoCs with Management Complex, MC f/w performs
* the configuration.
*/
- np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
- ctrlpriv->mc_en = !!np;
- of_node_put(np);
-
if (!ctrlpriv->mc_en)
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
@@ -779,7 +831,7 @@ static int caam_probe(struct platform_device *pdev)
* already instantiated, do RNG instantiation
* In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!ctrlpriv->mc_en && rng_vid >= 4) {
+ if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
@@ -789,11 +841,11 @@ static int caam_probe(struct platform_device *pdev)
* to regenerate these keys before the next POR.
*/
gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
- ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+ ctrlpriv->rng4_sh_init &= RDSTA_MASK;
do {
int inst_handles =
rd_reg32(&ctrl->r4tst[0].rdsta) &
- RDSTA_IFMASK;
+ RDSTA_MASK;
/*
* If either SH were instantiated by somebody else
* (e.g. u-boot) then it is assumed that the entropy
@@ -833,7 +885,7 @@ static int caam_probe(struct platform_device *pdev)
* Set handles init'ed by this module as the complement of the
* already initialized ones
*/
- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
/* Enable RDB bit so that RNG works faster */
clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 4b6854bf896a..e796d3cb9be8 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1254,6 +1254,8 @@
#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_PR_ON BIT(1)
+
#define OP_ALG_DIR_SHIFT 0
#define OP_ALG_DIR_MASK 1
#define OP_ALG_DECRYPT 0
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index c7c10c90464b..402d6a362e8c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -11,6 +11,7 @@
#define INTERN_H
#include "ctrl.h"
+#include <crypto/engine.h>
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
@@ -46,6 +47,7 @@ struct caam_drv_private_jr {
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */
+ bool hwrng;
/* Number of scatterlist crypt transforms active on the JobR */
atomic_t tfm_count ____cacheline_aligned;
@@ -60,6 +62,7 @@ struct caam_drv_private_jr {
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
void *outring; /* Base of output ring, DMA-safe */
+ struct crypto_engine *engine;
};
/*
@@ -161,7 +164,7 @@ static inline void caam_pkc_exit(void)
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
int caam_rng_init(struct device *dev);
-void caam_rng_exit(void);
+void caam_rng_exit(struct device *dev);
#else
@@ -170,9 +173,7 @@ static inline int caam_rng_init(struct device *dev)
return 0;
}
-static inline void caam_rng_exit(void)
-{
-}
+static inline void caam_rng_exit(struct device *dev) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index fc97cde27059..4af22e7ceb4f 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -27,7 +27,8 @@ static struct jr_driver_data driver_data;
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
-static void register_algs(struct device *dev)
+static void register_algs(struct caam_drv_private_jr *jrpriv,
+ struct device *dev)
{
mutex_lock(&algs_lock);
@@ -37,7 +38,7 @@ static void register_algs(struct device *dev)
caam_algapi_init(dev);
caam_algapi_hash_init(dev);
caam_pkc_init(dev);
- caam_rng_init(dev);
+ jrpriv->hwrng = !caam_rng_init(dev);
caam_qi_algapi_init(dev);
algs_unlock:
@@ -53,7 +54,6 @@ static void unregister_algs(void)
caam_qi_algapi_exit();
- caam_rng_exit();
caam_pkc_exit();
caam_algapi_hash_exit();
caam_algapi_exit();
@@ -62,6 +62,15 @@ algs_unlock:
mutex_unlock(&algs_lock);
}
+static void caam_jr_crypto_engine_exit(void *data)
+{
+ struct device *jrdev = data;
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+
+ /* Free the resources of crypto-engine */
+ crypto_engine_exit(jrpriv->engine);
+}
+
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@@ -126,6 +135,9 @@ static int caam_jr_remove(struct platform_device *pdev)
jrdev = &pdev->dev;
jrpriv = dev_get_drvdata(jrdev);
+ if (jrpriv->hwrng)
+ caam_rng_exit(jrdev->parent);
+
/*
* Return EBUSY if job ring already allocated.
*/
@@ -324,8 +336,8 @@ void caam_jr_free(struct device *rdev)
EXPORT_SYMBOL(caam_jr_free);
/**
- * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
- * -EBUSY if the queue is full, -EIO if it cannot map the caller's
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
+ * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
* descriptor.
* @dev: device of the job ring to be used. This device should have
* been assigned prior by caam_jr_register().
@@ -377,7 +389,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
- return -EBUSY;
+ return -ENOSPC;
}
head_entry = &jrp->entinfo[head];
@@ -414,7 +426,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_unlock_bh(&jrp->inplock);
- return 0;
+ return -EINPROGRESS;
}
EXPORT_SYMBOL(caam_jr_enqueue);
@@ -505,7 +517,7 @@ static int caam_jr_probe(struct platform_device *pdev)
int error;
jrdev = &pdev->dev;
- jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
+ jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
if (!jrpriv)
return -ENOMEM;
@@ -538,6 +550,25 @@ static int caam_jr_probe(struct platform_device *pdev)
return error;
}
+ /* Initialize crypto engine */
+ jrpriv->engine = crypto_engine_alloc_init(jrdev, false);
+ if (!jrpriv->engine) {
+ dev_err(jrdev, "Could not init crypto-engine\n");
+ return -ENOMEM;
+ }
+
+ error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
+ jrdev);
+ if (error)
+ return error;
+
+ /* Start crypto engine */
+ error = crypto_engine_start(jrpriv->engine);
+ if (error) {
+ dev_err(jrdev, "Could not start crypto-engine\n");
+ return error;
+ }
+
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
if (!jrpriv->irq) {
@@ -562,7 +593,7 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
- register_algs(jrdev->parent);
+ register_algs(jrpriv, jrdev->parent);
return 0;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 5a851ddc48fb..b0e8a4939b4f 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -108,7 +108,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
- if (!ret) {
+ if (ret == -EINPROGRESS) {
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index dacf2fa4aa8e..b390b935db6d 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -4,7 +4,7 @@
* Queue Interface backend functionality
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017, 2019 NXP
+ * Copyright 2016-2017, 2019-2020 NXP
*/
#include <linux/cpumask.h>
@@ -124,8 +124,10 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
do {
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
- if (likely(!ret))
+ if (likely(!ret)) {
+ refcount_inc(&req->drv_ctx->refcnt);
return 0;
+ }
if (ret != -EBUSY)
break;
@@ -148,11 +150,6 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
fd = &msg->ern.fd;
- if (qm_fd_get_format(fd) != qm_fd_compound) {
- dev_err(qidev, "Non-compound FD from CAAM\n");
- return;
- }
-
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (!drv_req) {
dev_err(qidev,
@@ -160,6 +157,13 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
return;
}
+ refcount_dec(&drv_req->drv_ctx->refcnt);
+
+ if (qm_fd_get_format(fd) != qm_fd_compound) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return;
+ }
+
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
@@ -287,9 +291,10 @@ empty_fq:
return ret;
}
-static int empty_caam_fq(struct qman_fq *fq)
+static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
{
int ret;
+ int retries = 10;
struct qm_mcr_queryfq_np np;
/* Wait till the older CAAM FQ get empty */
@@ -304,11 +309,18 @@ static int empty_caam_fq(struct qman_fq *fq)
msleep(20);
} while (1);
- /*
- * Give extra time for pending jobs from this FQ in holding tanks
- * to get processed
- */
- msleep(20);
+ /* Wait until pending jobs from this FQ are processed by CAAM */
+ do {
+ if (refcount_read(&drv_ctx->refcnt) == 1)
+ break;
+
+ msleep(20);
+ } while (--retries);
+
+ if (!retries)
+ dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
+ refcount_read(&drv_ctx->refcnt), fq->fqid);
+
return 0;
}
@@ -340,7 +352,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = new_fq;
/* Empty and remove the older FQ */
- ret = empty_caam_fq(old_fq);
+ ret = empty_caam_fq(old_fq, drv_ctx);
if (ret) {
dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
@@ -453,6 +465,9 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
return ERR_PTR(-ENOMEM);
}
+ /* init reference counter used to track references to request FQ */
+ refcount_set(&drv_ctx->refcnt, 1);
+
drv_ctx->qidev = qidev;
return drv_ctx;
}
@@ -571,6 +586,16 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
return qman_cb_dqrr_stop;
fd = &dqrr->fd;
+
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
+ if (unlikely(!drv_req)) {
+ dev_err(qidev,
+ "Can't find original request for caam response\n");
+ return qman_cb_dqrr_consume;
+ }
+
+ refcount_dec(&drv_req->drv_ctx->refcnt);
+
status = be32_to_cpu(fd->status);
if (unlikely(status)) {
u32 ssrc = status & JRSTA_SSRC_MASK;
@@ -588,13 +613,6 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
return qman_cb_dqrr_consume;
}
- drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
- if (unlikely(!drv_req)) {
- dev_err(qidev,
- "Can't find original request for caam response\n");
- return qman_cb_dqrr_consume;
- }
-
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 848958951f68..5894f16f8fe3 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -3,7 +3,7 @@
* Public definitions for the CAAM/QI (Queue Interface) backend.
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2017, 2020 NXP
*/
#ifndef __QI_H__
@@ -52,6 +52,7 @@ enum optype {
* @context_a: shared descriptor dma address
* @req_fq: to-CAAM request frame queue
* @rsp_fq: from-CAAM response frame queue
+ * @refcnt: reference counter incremented for each frame enqueued in to-CAAM FQ
* @cpu: cpu on which to receive CAAM response
* @op_type: operation type
* @qidev: device pointer for CAAM/QI backend
@@ -62,6 +63,7 @@ struct caam_drv_ctx {
dma_addr_t context_a;
struct qman_fq *req_fq;
struct qman_fq *rsp_fq;
+ refcount_t refcnt;
int cpu;
enum optype op_type;
struct device *qidev;
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 05127b70527d..0f810bc13b2b 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -487,7 +487,8 @@ struct rngtst {
/* RNG4 TRNG test registers */
struct rng4tst {
-#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_ACC BIT(5) /* TRNG access mode */
+#define RTMCTL_PRGM BIT(16) /* 1 -> program mode, 0 -> run mode */
#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
both entropy shifter and
statistical checker */
@@ -523,9 +524,11 @@ struct rng4tst {
u32 rsvd1[40];
#define RDSTA_SKVT 0x80000000
#define RDSTA_SKVN 0x40000000
+#define RDSTA_PR0 BIT(4)
+#define RDSTA_PR1 BIT(5)
#define RDSTA_IF0 0x00000001
#define RDSTA_IF1 0x00000002
-#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
+#define RDSTA_MASK (RDSTA_PR1 | RDSTA_PR0 | RDSTA_IF1 | RDSTA_IF0)
u32 rdsta;
u32 rsvd2[15];
};
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index c4632d84c9a1..e91be9b8b083 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -71,7 +71,7 @@ struct ucode {
char version[VERSION_LEN - 1];
__be32 code_size;
u8 raz[12];
- u64 code[0];
+ u64 code[];
};
/**
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index e95e7aa5dbf1..ae7b44599914 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -215,6 +215,9 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
+
+ if (sp->clear_psp_master_device)
+ sp->clear_psp_master_device(sp);
}
void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e467860f797d..896f190b9a50 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -283,11 +283,11 @@ static int sev_get_platform_state(int *state, int *error)
return rc;
}
-static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{
int state, rc;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
/*
@@ -331,12 +331,12 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
return ret;
}
-static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
int rc;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
if (sev->state == SEV_STATE_UNINIT) {
@@ -348,7 +348,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
return __sev_do_cmd_locked(cmd, NULL, &argp->error);
}
-static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
@@ -356,7 +356,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
void *blob = NULL;
int ret;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
@@ -539,7 +539,7 @@ fw_err:
return ret;
}
-static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
@@ -547,7 +547,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
void *pek_blob, *oca_blob;
int ret;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
@@ -698,7 +698,7 @@ static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
return ret;
}
-static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pdh_cert_export input;
@@ -708,7 +708,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
/* If platform is not in INIT state then transition it to INIT. */
if (sev->state != SEV_STATE_INIT) {
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
ret = __sev_platform_init_locked(&argp->error);
@@ -801,6 +801,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
void __user *argp = (void __user *)arg;
struct sev_issue_cmd input;
int ret = -EFAULT;
+ bool writable = file->f_mode & FMODE_WRITE;
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
@@ -819,25 +820,25 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
switch (input.cmd) {
case SEV_FACTORY_RESET:
- ret = sev_ioctl_do_reset(&input);
+ ret = sev_ioctl_do_reset(&input, writable);
break;
case SEV_PLATFORM_STATUS:
ret = sev_ioctl_do_platform_status(&input);
break;
case SEV_PEK_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable);
break;
case SEV_PDH_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable);
break;
case SEV_PEK_CSR:
- ret = sev_ioctl_do_pek_csr(&input);
+ ret = sev_ioctl_do_pek_csr(&input, writable);
break;
case SEV_PEK_CERT_IMPORT:
- ret = sev_ioctl_do_pek_import(&input);
+ ret = sev_ioctl_do_pek_import(&input, writable);
break;
case SEV_PDH_CERT_EXPORT:
- ret = sev_ioctl_do_pdh_export(&input);
+ ret = sev_ioctl_do_pdh_export(&input, writable);
break;
case SEV_GET_ID:
pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
@@ -896,9 +897,9 @@ EXPORT_SYMBOL_GPL(sev_guest_df_flush);
static void sev_exit(struct kref *ref)
{
- struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
-
misc_deregister(&misc_dev->misc);
+ kfree(misc_dev);
+ misc_dev = NULL;
}
static int sev_misc_init(struct sev_device *sev)
@@ -916,7 +917,7 @@ static int sev_misc_init(struct sev_device *sev)
if (!misc_dev) {
struct miscdevice *misc;
- misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
if (!misc_dev)
return -ENOMEM;
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 423594608ad1..f913f1494af9 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -90,6 +90,7 @@ struct sp_device {
/* get and set master device */
struct sp_device*(*get_psp_master_device)(void);
void (*set_psp_master_device)(struct sp_device *);
+ void (*clear_psp_master_device)(struct sp_device *);
bool irq_registered;
bool use_tasklet;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 56c1f61c0f84..cb6cb47053f4 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -146,6 +146,14 @@ static struct sp_device *psp_get_master(void)
return sp_dev_master;
}
+static void psp_clear_master(struct sp_device *sp)
+{
+ if (sp == sp_dev_master) {
+ sp_dev_master = NULL;
+ dev_dbg(sp->dev, "Cleared sp_dev_master\n");
+ }
+}
+
static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct sp_device *sp;
@@ -206,6 +214,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
sp->set_psp_master_device = psp_set_master;
sp->get_psp_master_device = psp_get_master;
+ sp->clear_psp_master_device = psp_clear_master;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 2fc0e0da790b..1cf51edbc4b9 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -6,8 +6,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
-#include <crypto/internal/des.h>
+#include <crypto/gcm.h>
#include <linux/rtnetlink.h>
+#include <crypto/internal/des.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_aead.h"
@@ -26,7 +27,7 @@
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
struct cc_aead_handle {
- cc_sram_addr_t sram_workspace_addr;
+ u32 sram_workspace_addr;
struct list_head aead_list;
};
@@ -60,11 +61,6 @@ struct cc_aead_ctx {
enum drv_hash_mode auth_mode;
};
-static inline bool valid_assoclen(struct aead_request *req)
-{
- return ((req->assoclen == 16) || (req->assoclen == 20));
-}
-
static void cc_aead_exit(struct crypto_aead *tfm)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -417,7 +413,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
dma_addr_t key_dma_addr = 0;
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+ u32 larval_addr;
struct cc_crypto_req cc_req = {};
unsigned int blocksize;
unsigned int digestsize;
@@ -448,8 +444,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
if (!key)
return -ENOMEM;
- key_dma_addr = dma_map_single(dev, (void *)key, keylen,
- DMA_TO_DEVICE);
+ key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
@@ -460,6 +455,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
/* Load hash initial state */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hashmode);
+ larval_addr = cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode);
set_din_sram(&desc[idx], larval_addr, digestsize);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
@@ -796,7 +793,7 @@ static void cc_proc_authen_desc(struct aead_request *areq,
* assoc. + iv + data -compact in one table
* if assoclen is ZERO only IV perform
*/
- cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ u32 mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
if (areq_ctx->is_single_pass) {
@@ -1170,7 +1167,7 @@ static void cc_mlli_to_sram(struct aead_request *req,
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
!req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
- (unsigned int)ctx->drvdata->mlli_sram_addr,
+ ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
/* Copy MLLI table host-to-sram */
hw_desc_init(&desc[*seq_size]);
@@ -1222,7 +1219,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
- /**
+ /*
* Single-pass flow
*/
cc_set_hmac_desc(req, desc, seq_size);
@@ -1234,7 +1231,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
return;
}
- /**
+ /*
* Double-pass flow
* Fallback for unsupported single-pass modes,
* i.e. using assoc. data of non-word-multiple
@@ -1275,7 +1272,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
- /**
+ /*
* Single-pass flow
*/
cc_set_xcbc_desc(req, desc, seq_size);
@@ -1286,7 +1283,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
return;
}
- /**
+ /*
* Double-pass flow
* Fallback for unsupported single-pass modes,
* i.e. using assoc. data of non-word-multiple
@@ -1611,7 +1608,6 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1799,12 +1795,6 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int cipher_flow_mode;
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- cipher_flow_mode = AES_and_HASH;
- } else { /* Encrypt */
- cipher_flow_mode = AES_to_HASH_and_DOUT;
- }
-
//in RFC4543 no data to encrypt. just copy data from src to dest.
if (req_ctx->plaintext_authenticate_only) {
cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
@@ -1816,6 +1806,12 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
return 0;
}
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_and_HASH;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ }
+
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
@@ -1870,8 +1866,7 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req_ctx->assoclen +
- GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1891,7 +1886,6 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1921,8 +1915,8 @@ static int cc_proc_aead(struct aead_request *req,
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_aead_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_aead_complete;
+ cc_req.user_arg = req;
/* Setup request context */
areq_ctx->gen_ctx.op_type = direct;
@@ -1989,7 +1983,6 @@ static int cc_proc_aead(struct aead_request *req,
/* Load MLLI tables to SRAM if necessary */
cc_mlli_to_sram(req, desc, &seq_len);
- /*TODO: move seq len by reference */
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
case DRV_HASH_SHA256:
@@ -2034,9 +2027,6 @@ static int cc_aead_encrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
- areq_ctx->is_gcm4543 = false;
-
- areq_ctx->plaintext_authenticate_only = false;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2050,22 +2040,17 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
/* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
- areq_ctx->is_gcm4543 = true;
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
cc_proc_rfc4309_ccm(req);
@@ -2086,9 +2071,6 @@ static int cc_aead_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
- areq_ctx->is_gcm4543 = false;
-
- areq_ctx->plaintext_authenticate_only = false;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2099,24 +2081,19 @@ static int cc_aead_decrypt(struct aead_request *req)
static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
- areq_ctx->is_gcm4543 = true;
cc_proc_rfc4309_ccm(req);
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
@@ -2216,28 +2193,20 @@ static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_encrypt() above. */
-
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
- areq_ctx->plaintext_authenticate_only = false;
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2248,17 +2217,12 @@ out:
static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_encrypt() above. */
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
@@ -2270,7 +2234,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
areq_ctx->assoclen = req->assoclen;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2281,28 +2244,20 @@ out:
static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_decrypt() above. */
-
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
- areq_ctx->plaintext_authenticate_only = false;
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2313,17 +2268,12 @@ out:
static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_decrypt() above. */
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
@@ -2335,7 +2285,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
areq_ctx->assoclen = req->assoclen;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2614,7 +2563,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
struct cc_crypto_alg *t_alg;
struct aead_alg *alg;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
@@ -2628,6 +2577,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_blocksize = tmpl->blocksize;
alg->init = cc_aead_init;
alg->exit = cc_aead_exit;
@@ -2643,19 +2593,12 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
int cc_aead_free(struct cc_drvdata *drvdata)
{
struct cc_crypto_alg *t_alg, *n;
- struct cc_aead_handle *aead_handle =
- (struct cc_aead_handle *)drvdata->aead_handle;
-
- if (aead_handle) {
- /* Remove registered algs */
- list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
- entry) {
- crypto_unregister_aead(&t_alg->aead_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
- }
- kfree(aead_handle);
- drvdata->aead_handle = NULL;
+ struct cc_aead_handle *aead_handle = drvdata->aead_handle;
+
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
+ crypto_unregister_aead(&t_alg->aead_alg);
+ list_del(&t_alg->entry);
}
return 0;
@@ -2669,7 +2612,7 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
int alg;
struct device *dev = drvdata_to_dev(drvdata);
- aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
+ aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
if (!aead_handle) {
rc = -ENOMEM;
goto fail0;
@@ -2682,7 +2625,6 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
MAX_HMAC_DIGEST_SIZE);
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
- dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail1;
}
@@ -2705,18 +2647,16 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->aead_alg.base.cra_driver_name);
- goto fail2;
- } else {
- list_add_tail(&t_alg->entry, &aead_handle->aead_list);
- dev_dbg(dev, "Registered %s\n",
- t_alg->aead_alg.base.cra_driver_name);
+ goto fail1;
}
+
+ list_add_tail(&t_alg->entry, &aead_handle->aead_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->aead_alg.base.cra_driver_name);
}
return 0;
-fail2:
- kfree(t_alg);
fail1:
cc_aead_free(drvdata);
fail0:
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index f12169b57f9d..b69591550730 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -66,7 +66,7 @@ struct aead_req_ctx {
/* used to prevent cache coherence problem */
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /* store orig iv */
- u32 assoclen; /* internal assoclen */
+ u32 assoclen; /* size of AAD buffer to authenticate */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
@@ -79,7 +79,6 @@ struct aead_req_ctx {
dma_addr_t gcm_iv_inc2_dma_addr;
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
- bool is_gcm4543;
u8 *icv_virt_addr; /* Virt. address of ICV */
struct async_gen_req_ctx gen_ctx;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index a72586eccd81..b2bd093e7013 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -13,16 +13,6 @@
#include "cc_hash.h"
#include "cc_aead.h"
-enum dma_buffer_type {
- DMA_NULL_TYPE = -1,
- DMA_SGL_TYPE = 1,
- DMA_BUFF_TYPE = 2,
-};
-
-struct buff_mgr_handle {
- struct dma_pool *mlli_buffs_pool;
-};
-
union buffer_array_entry {
struct scatterlist *sgl;
dma_addr_t buffer_dma;
@@ -34,7 +24,6 @@ struct buffer_array {
unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
- enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
};
@@ -64,11 +53,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
enum cc_sg_cpy_direct dir)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = areq_ctx->assoclen + req->cryptlen;
-
- if (areq_ctx->is_gcm4543)
- skip += crypto_aead_ivsize(tfm);
+ u32 skip = req->assoclen + req->cryptlen;
cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
(skip - areq_ctx->req_authsize), skip, dir);
@@ -77,9 +62,13 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
/**
* cc_get_sgl_nents() - Get scatterlist number of entries.
*
+ * @dev: Device object
* @sg_list: SG list
* @nbytes: [IN] Total SGL data bytes.
* @lbytes: [OUT] Returns the amount of bytes at the last entry
+ *
+ * Return:
+ * Number of entries in the scatterlist
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
@@ -87,6 +76,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
{
unsigned int nents = 0;
+ *lbytes = 0;
+
while (nbytes && sg_list) {
nents++;
/* get the number of bytes in the last entry */
@@ -95,6 +86,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
nbytes : sg_list->length;
sg_list = sg_next(sg_list);
}
+
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
}
@@ -103,11 +95,13 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
* cc_copy_sg_portion() - Copy scatter list data,
* from to_skip to end, to dest and vice versa
*
- * @dest:
- * @sg:
- * @to_skip:
- * @end:
- * @direct:
+ * @dev: Device object
+ * @dest: Buffer to copy to/from
+ * @sg: SG list
+ * @to_skip: Number of bytes to skip before copying
+ * @end: Offset of last byte to copy
+ * @direct: Transfer direction (true == from SG list to buffer, false == from
+ * buffer to SG list)
*/
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
@@ -115,7 +109,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 nents;
nents = sg_nents_for_len(sg, end);
- sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+ sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -204,21 +198,15 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
goto build_mlli_exit;
}
/* Point to start of MLLI */
- mlli_p = (u32 *)mlli_params->mlli_virt_addr;
+ mlli_p = mlli_params->mlli_virt_addr;
/* go over all SG's and link it to one MLLI table */
for (i = 0; i < sg_data->num_of_buffers; i++) {
union buffer_array_entry *entry = &sg_data->entry[i];
u32 tot_len = sg_data->total_data_len[i];
u32 offset = sg_data->offset[i];
- if (sg_data->type[i] == DMA_SGL_TYPE)
- rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
- offset, &total_nents,
- &mlli_p);
- else /*DMA_BUFF_TYPE*/
- rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
- tot_len, &total_nents,
- &mlli_p);
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
+ &total_nents, &mlli_p);
if (rc)
return rc;
@@ -244,27 +232,6 @@ build_mlli_exit:
return rc;
}
-static void cc_add_buffer_entry(struct device *dev,
- struct buffer_array *sgl_data,
- dma_addr_t buffer_dma, unsigned int buffer_len,
- bool is_last_entry, u32 *mlli_nents)
-{
- unsigned int index = sgl_data->num_of_buffers;
-
- dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
- index, &buffer_dma, buffer_len, is_last_entry);
- sgl_data->nents[index] = 1;
- sgl_data->entry[index].buffer_dma = buffer_dma;
- sgl_data->offset[index] = 0;
- sgl_data->total_data_len[index] = buffer_len;
- sgl_data->type[index] = DMA_BUFF_TYPE;
- sgl_data->is_last[index] = is_last_entry;
- sgl_data->mlli_nents[index] = mlli_nents;
- if (sgl_data->mlli_nents[index])
- *sgl_data->mlli_nents[index] = 0;
- sgl_data->num_of_buffers++;
-}
-
static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
unsigned int nents, struct scatterlist *sgl,
unsigned int data_len, unsigned int data_offset,
@@ -278,7 +245,6 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->entry[index].sgl = sgl;
sgl_data->offset[index] = data_offset;
sgl_data->total_data_len[index] = data_len;
- sgl_data->type[index] = DMA_SGL_TYPE;
sgl_data->is_last[index] = is_last_table;
sgl_data->mlli_nents[index] = mlli_nents;
if (sgl_data->mlli_nents[index])
@@ -290,37 +256,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
- if (sg_is_last(sg)) {
- /* One entry only case -set to DLLI */
- if (dma_map_sg(dev, sg, 1, direction) != 1) {
- dev_err(dev, "dma_map_sg() single buffer failed\n");
- return -ENOMEM;
- }
- dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
- &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
- sg->offset, sg->length);
- *lbytes = nbytes;
- *nents = 1;
- *mapped_nents = 1;
- } else { /*sg_is_last*/
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
- if (*nents > max_sg_nents) {
- *nents = 0;
- dev_err(dev, "Too many fragments. current %d max %d\n",
- *nents, max_sg_nents);
- return -ENOMEM;
- }
- /* In case of mmu the number of mapped nents might
- * be changed from the original sgl nents
- */
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (*mapped_nents == 0) {
- *nents = 0;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
+ int ret = 0;
+
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
+ if (*nents > max_sg_nents) {
+ *nents = 0;
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
+ return -ENOMEM;
+ }
+
+ ret = dma_map_sg(dev, sg, *nents, direction);
+ if (dma_mapping_error(dev, ret)) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
+ return -ENOMEM;
}
+ *mapped_nents = ret;
+
return 0;
}
@@ -411,7 +365,6 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
{
struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
u32 dummy = 0;
@@ -424,10 +377,9 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
/* Map IV buffer */
if (ivsize) {
- dump_byte_array("iv", (u8 *)info, ivsize);
+ dump_byte_array("iv", info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
- dma_map_single(dev, (void *)info,
- ivsize, DMA_BIDIRECTIONAL);
+ dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
@@ -476,7 +428,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto cipher_exit;
@@ -555,11 +507,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
+ DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -614,18 +567,6 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
- // TODO: what about CTR?? ask Ron
- if (do_chain && areq_ctx->plaintext_authenticate_only) {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
- unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
- /* Chain to given list */
- cc_add_buffer_entry(dev, sg_data,
- (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
- iv_size_to_authenc, is_last,
- &areq_ctx->assoc.mlli_nents);
- areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
- }
chain_iv_exit:
return rc;
@@ -639,13 +580,8 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
int mapped_nents = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
- if (areq_ctx->is_gcm4543)
- size_of_assoc += crypto_aead_ivsize(tfm);
-
if (!sg_data) {
rc = -EINVAL;
goto chain_assoc_exit;
@@ -661,7 +597,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+ mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
if (mapped_nents < 0)
return mapped_nents;
@@ -854,16 +790,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
u32 sg_index = 0;
- bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = areq_ctx->assoclen;
+ u32 size_to_skip = req->assoclen;
struct scatterlist *sgl;
- if (is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
offset = size_to_skip;
if (!sg_data)
@@ -872,16 +803,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_sgl = req->src;
areq_ctx->dst_sgl = req->dst;
- if (is_gcm4543)
- size_for_map += crypto_aead_ivsize(tfm);
-
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
&src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
- while (sg_index <= size_to_skip) {
+ while (src_mapped_nents && (sg_index <= size_to_skip)) {
src_mapped_nents--;
offset -= areq_ctx->src_sgl->length;
sgl = sg_next(areq_ctx->src_sgl);
@@ -901,14 +829,15 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = areq_ctx->assoclen + req->cryptlen;
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- authsize : 0;
- if (is_gcm4543)
- size_for_map += crypto_aead_ivsize(tfm);
+ size_for_map = req->assoclen + req->cryptlen;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_for_map += authsize;
+ else
+ size_for_map -= authsize;
rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
- &areq_ctx->dst.nents,
+ &areq_ctx->dst.mapped_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
if (rc)
@@ -921,7 +850,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
offset = size_to_skip;
//check where the data starts
- while (sg_index <= size_to_skip) {
+ while (dst_mapped_nents && sg_index <= size_to_skip) {
dst_mapped_nents--;
offset -= areq_ctx->dst_sgl->length;
sgl = sg_next(areq_ctx->dst_sgl);
@@ -1012,14 +941,11 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
unsigned int authsize = areq_ctx->req_authsize;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
int rc = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- bool is_gcm4543 = areq_ctx->is_gcm4543;
dma_addr_t dma_addr;
u32 mapped_nents = 0;
u32 dummy = 0; /*used for the assoc data fragments */
- u32 size_to_map = 0;
+ u32 size_to_map;
gfp_t flags = cc_gfp_flags(&req->base);
mlli_params->curr_pool = NULL;
@@ -1116,14 +1042,15 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + areq_ctx->assoclen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_map = req->cryptlen + req->assoclen;
+ /* If we do in-place encryption, we also need the auth tag */
+ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
+ (req->src == req->dst)) {
size_to_map += authsize;
+ }
- if (is_gcm4543)
- size_to_map += crypto_aead_ivsize(tfm);
rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
- &areq_ctx->src.nents,
+ &areq_ctx->src.mapped_nents,
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
@@ -1183,7 +1110,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
*/
if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto aead_map_failure;
@@ -1211,7 +1138,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1229,7 +1155,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
return 0;
}
- /*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
@@ -1258,7 +1183,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/*build mlli */
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
@@ -1296,7 +1221,6 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
unsigned int update_data_len;
u32 total_in_len = nbytes + *curr_buff_cnt;
struct buffer_array sg_data;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
int rc = 0;
u32 dummy = 0;
@@ -1371,7 +1295,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
}
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
@@ -1438,39 +1362,22 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
- struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
- buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
- if (!buff_mgr_handle)
- return -ENOMEM;
-
- drvdata->buff_mgr_handle = buff_mgr_handle;
-
- buff_mgr_handle->mlli_buffs_pool =
+ drvdata->mlli_buffs_pool =
dma_pool_create("dx_single_mlli_tables", dev,
MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
LLI_ENTRY_BYTE_SIZE,
MLLI_TABLE_MIN_ALIGNMENT, 0);
- if (!buff_mgr_handle->mlli_buffs_pool)
- goto error;
+ if (!drvdata->mlli_buffs_pool)
+ return -ENOMEM;
return 0;
-
-error:
- cc_buffer_mgr_fini(drvdata);
- return -ENOMEM;
}
int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
{
- struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
-
- if (buff_mgr_handle) {
- dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
- kfree(drvdata->buff_mgr_handle);
- drvdata->buff_mgr_handle = NULL;
- }
+ dma_pool_destroy(drvdata->mlli_buffs_pool);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index af434872c6ff..653441b6542e 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -24,14 +24,15 @@ enum cc_sg_cpy_direct {
};
struct cc_mlli {
- cc_sram_addr_t sram_addr;
+ u32 sram_addr;
+ unsigned int mapped_nents;
unsigned int nents; //sg nents
unsigned int mlli_nents; //mlli nents might be different than the above
};
struct mlli_params {
struct dma_pool *curr_pool;
- u8 *mlli_virt_addr;
+ void *mlli_virt_addr;
dma_addr_t mlli_dma_addr;
u32 mlli_len;
};
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 7d6252d892d7..a84335328f37 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -20,10 +20,6 @@
#define template_skcipher template_u.skcipher
-struct cc_cipher_handle {
- struct list_head alg_list;
-};
-
struct cc_user_key_info {
u8 *key;
dma_addr_t key_dma_addr;
@@ -184,7 +180,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
ctx_p->user.key);
/* Map key buffer */
- ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+ ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key,
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
@@ -284,7 +280,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
ctx_p, crypto_tfm_alg_name(tfm), keylen);
- dump_byte_array("key", (u8 *)key, keylen);
+ dump_byte_array("key", key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
@@ -387,7 +383,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
ctx_p, crypto_tfm_alg_name(tfm), keylen);
- dump_byte_array("key", (u8 *)key, keylen);
+ dump_byte_array("key", key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
@@ -533,14 +529,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
- unsigned int du_size = nbytes;
-
- struct cc_crypto_alg *cc_alg =
- container_of(tfm->__crt_alg, struct cc_crypto_alg,
- skcipher_alg.base);
-
- if (cc_alg->data_unit)
- du_size = cc_alg->data_unit;
switch (cipher_mode) {
case DRV_CIPHER_ECB:
@@ -753,7 +741,7 @@ static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
&req_ctx->mlli_params.mlli_dma_addr,
req_ctx->mlli_params.mlli_len,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ ctx_p->drvdata->mlli_sram_addr);
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI,
req_ctx->mlli_params.mlli_dma_addr,
@@ -801,16 +789,16 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
req_ctx->in_mlli_nents, NS_BIT);
if (req_ctx->out_nents == 0) {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
- (unsigned int)ctx_p->drvdata->mlli_sram_addr,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ ctx_p->drvdata->mlli_sram_addr,
+ ctx_p->drvdata->mlli_sram_addr);
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
(!last_desc ? 0 : 1));
} else {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
- (unsigned int)ctx_p->drvdata->mlli_sram_addr,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr +
+ ctx_p->drvdata->mlli_sram_addr,
+ ctx_p->drvdata->mlli_sram_addr +
(u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
set_dout_mlli(&desc[*seq_size],
(ctx_p->drvdata->mlli_sram_addr +
@@ -871,7 +859,6 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_0: Init and sanity checks */
- /* TODO: check data length according to mode */
if (validate_data_size(ctx_p, nbytes)) {
dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
rc = -EINVAL;
@@ -893,8 +880,8 @@ static int cc_cipher_process(struct skcipher_request *req,
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_cipher_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_cipher_complete;
+ cc_req.user_arg = req;
/* Setup CPP operation details */
if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
@@ -1228,6 +1215,10 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
+ /* See https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg40576.html
+ * for the reason why this differs from the generic
+ * implementation.
+ */
.name = "xts(aes)",
.driver_name = "xts-aes-ccree",
.blocksize = 1,
@@ -1423,7 +1414,7 @@ static const struct cc_alg_template skcipher_algs[] = {
{
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1576,7 +1567,7 @@ static const struct cc_alg_template skcipher_algs[] = {
{
.name = "ctr(sm4)",
.driver_name = "ctr-sm4-ccree",
- .blocksize = SM4_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1634,7 +1625,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
struct cc_crypto_alg *t_alg;
struct skcipher_alg *alg;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
@@ -1665,36 +1656,23 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
int cc_cipher_free(struct cc_drvdata *drvdata)
{
struct cc_crypto_alg *t_alg, *n;
- struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
-
- if (cipher_handle) {
- /* Remove registered algs */
- list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
- entry) {
- crypto_unregister_skcipher(&t_alg->skcipher_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
- }
- kfree(cipher_handle);
- drvdata->cipher_handle = NULL;
+
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &drvdata->alg_list, entry) {
+ crypto_unregister_skcipher(&t_alg->skcipher_alg);
+ list_del(&t_alg->entry);
}
return 0;
}
int cc_cipher_alloc(struct cc_drvdata *drvdata)
{
- struct cc_cipher_handle *cipher_handle;
struct cc_crypto_alg *t_alg;
struct device *dev = drvdata_to_dev(drvdata);
int rc = -ENOMEM;
int alg;
- cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
- if (!cipher_handle)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&cipher_handle->alg_list);
- drvdata->cipher_handle = cipher_handle;
+ INIT_LIST_HEAD(&drvdata->alg_list);
/* Linux crypto */
dev_dbg(dev, "Number of algorithms = %zu\n",
@@ -1723,14 +1701,12 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->skcipher_alg.base.cra_driver_name);
- kfree(t_alg);
goto fail0;
- } else {
- list_add_tail(&t_alg->entry,
- &cipher_handle->alg_list);
- dev_dbg(dev, "Registered %s\n",
- t_alg->skcipher_alg.base.cra_driver_name);
}
+
+ list_add_tail(&t_alg->entry, &drvdata->alg_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
}
return 0;
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 566999738698..c454afce7781 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -8,10 +8,6 @@
#include "cc_crypto_ctx.h"
#include "cc_debugfs.h"
-struct cc_debugfs_ctx {
- struct dentry *dir;
-};
-
#define CC_DEBUG_REG(_X) { \
.name = __stringify(_X),\
.offset = CC_REG(_X) \
@@ -67,13 +63,8 @@ void __exit cc_debugfs_global_fini(void)
int cc_debugfs_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- struct cc_debugfs_ctx *ctx;
struct debugfs_regset32 *regset, *verset;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
return -ENOMEM;
@@ -81,16 +72,18 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
regset->regs = debug_regs;
regset->nregs = ARRAY_SIZE(debug_regs);
regset->base = drvdata->cc_base;
+ regset->dev = dev;
- ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+ drvdata->dir = debugfs_create_dir(drvdata->plat_dev->name,
+ cc_debugfs_dir);
- debugfs_create_regset32("regs", 0400, ctx->dir, regset);
- debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
+ debugfs_create_regset32("regs", 0400, drvdata->dir, regset);
+ debugfs_create_bool("coherent", 0400, drvdata->dir, &drvdata->coherent);
verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
/* Failing here is not important enough to fail the module load */
if (!verset)
- goto out;
+ return 0;
if (drvdata->hw_rev <= CC_HW_REV_712) {
ver_sig_regs[0].offset = drvdata->sig_offset;
@@ -102,17 +95,13 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
verset->nregs = ARRAY_SIZE(pid_cid_regs);
}
verset->base = drvdata->cc_base;
+ verset->dev = dev;
- debugfs_create_regset32("version", 0400, ctx->dir, verset);
-
-out:
- drvdata->debugfs = ctx;
+ debugfs_create_regset32("version", 0400, drvdata->dir, verset);
return 0;
}
void cc_debugfs_fini(struct cc_drvdata *drvdata)
{
- struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
-
- debugfs_remove_recursive(ctx->dir);
+ debugfs_remove_recursive(drvdata->dir);
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 532bc95a8373..2d50991b9a17 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -14,6 +14,8 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include "cc_driver.h"
#include "cc_request_mgr.h"
@@ -134,7 +136,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
/* if driver suspended return, probably shared interrupt */
- if (cc_pm_is_dev_suspended(dev))
+ if (pm_runtime_suspended(dev))
return IRQ_NONE;
/* read the interrupt status */
@@ -269,7 +271,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
u32 val, hw_rev_pidr, sig_cidr;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
- const struct of_device_id *dev_id;
struct clk *clk;
int irq;
int rc = 0;
@@ -278,11 +279,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
if (!new_drvdata)
return -ENOMEM;
- dev_id = of_match_node(arm_ccree_dev_of_match, np);
- if (!dev_id)
- return -ENODEV;
-
- hw_rev = (struct cc_hw_data *)dev_id->data;
+ hw_rev = of_device_get_match_data(dev);
new_drvdata->hw_rev_name = hw_rev->name;
new_drvdata->hw_rev = hw_rev->rev;
new_drvdata->std_bodies = hw_rev->std_bodies;
@@ -302,22 +299,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk))
- switch (PTR_ERR(clk)) {
- /* Clock is optional so this might be fine */
- case -ENOENT:
- break;
-
- /* Clock not available, let's try again soon */
- case -EPROBE_DEFER:
- return -EPROBE_DEFER;
-
- default:
- dev_err(dev, "Error getting clock: %ld\n",
- PTR_ERR(clk));
- return PTR_ERR(clk);
- }
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "Error getting clock: %pe\n", clk);
+ return PTR_ERR(clk);
+ }
new_drvdata->clk = clk;
new_drvdata->coherent = of_dma_is_coherent(np);
@@ -344,13 +331,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
init_completion(&new_drvdata->hw_queue_avail);
- if (!plat_dev->dev.dma_mask)
- plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
while (dma_mask > 0x7fffffffUL) {
- if (dma_supported(&plat_dev->dev, dma_mask)) {
- rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
+ if (dma_supported(dev, dma_mask)) {
+ rc = dma_set_coherent_mask(dev, dma_mask);
if (!rc)
break;
}
@@ -362,7 +349,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
- rc = cc_clk_on(new_drvdata);
+ rc = clk_prepare_enable(new_drvdata->clk);
if (rc) {
dev_err(dev, "Failed to enable clock");
return rc;
@@ -370,7 +357,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->sec_disabled = cc_sec_disable;
- /* wait for Crytpcell reset completion */
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
+ goto post_pm_err;
+ }
+
+ /* Wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(new_drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
}
@@ -382,7 +379,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
val, hw_rev->sig);
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
sig_cidr = val;
hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
@@ -393,7 +390,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
val, hw_rev->pidr_0124);
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
hw_rev_pidr = val;
@@ -402,7 +399,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
val, hw_rev->cidr_0123);
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
sig_cidr = val;
@@ -421,7 +418,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
default:
dev_err(dev, "Unsupported engines configuration.\n");
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
/* Check security disable state */
@@ -447,14 +444,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata);
if (rc) {
dev_err(dev, "Could not register to interrupt %d\n", irq);
- goto post_clk_err;
+ goto post_pm_err;
}
dev_dbg(dev, "Registered to IRQ: %d\n", irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
dev_err(dev, "init_cc_regs failed\n");
- goto post_clk_err;
+ goto post_pm_err;
}
rc = cc_debugfs_init(new_drvdata);
@@ -477,15 +474,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->mlli_sram_addr =
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
- dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
- goto post_sram_mgr_err;
+ goto post_fips_init_err;
}
rc = cc_req_mgr_init(new_drvdata);
if (rc) {
dev_err(dev, "cc_req_mgr_init failed\n");
- goto post_sram_mgr_err;
+ goto post_fips_init_err;
}
rc = cc_buffer_mgr_init(new_drvdata);
@@ -494,12 +490,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_req_mgr_err;
}
- rc = cc_pm_init(new_drvdata);
- if (rc) {
- dev_err(dev, "cc_pm_init failed\n");
- goto post_buf_mgr_err;
- }
-
/* Allocate crypto algs */
rc = cc_cipher_alloc(new_drvdata);
if (rc) {
@@ -520,15 +510,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_hash_err;
}
- /* All set, we can allow autosuspend */
- cc_pm_go(new_drvdata);
-
/* If we got here and FIPS mode is enabled
* it means all FIPS test passed, so let TEE
* know we're good.
*/
cc_set_ree_fips_status(new_drvdata, true);
+ pm_runtime_put(dev);
return 0;
post_hash_err:
@@ -539,16 +527,17 @@ post_buf_mgr_err:
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
cc_req_mgr_fini(new_drvdata);
-post_sram_mgr_err:
- cc_sram_mgr_fini(new_drvdata);
post_fips_init_err:
cc_fips_fini(new_drvdata);
post_debugfs_err:
cc_debugfs_fini(new_drvdata);
post_regs_err:
fini_cc_regs(new_drvdata);
-post_clk_err:
- cc_clk_off(new_drvdata);
+post_pm_err:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ clk_disable_unprepare(new_drvdata->clk);
return rc;
}
@@ -560,36 +549,22 @@ void fini_cc_regs(struct cc_drvdata *drvdata)
static void cleanup_cc_resources(struct platform_device *plat_dev)
{
+ struct device *dev = &plat_dev->dev;
struct cc_drvdata *drvdata =
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
cc_aead_free(drvdata);
cc_hash_free(drvdata);
cc_cipher_free(drvdata);
- cc_pm_fini(drvdata);
cc_buffer_mgr_fini(drvdata);
cc_req_mgr_fini(drvdata);
- cc_sram_mgr_fini(drvdata);
cc_fips_fini(drvdata);
cc_debugfs_fini(drvdata);
fini_cc_regs(drvdata);
- cc_clk_off(drvdata);
-}
-
-int cc_clk_on(struct cc_drvdata *drvdata)
-{
- struct clk *clk = drvdata->clk;
- int rc;
-
- if (IS_ERR(clk))
- /* Not all devices have a clock associated with CCREE */
- return 0;
-
- rc = clk_prepare_enable(clk);
- if (rc)
- return rc;
-
- return 0;
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ clk_disable_unprepare(drvdata->clk);
}
unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
@@ -600,17 +575,6 @@ unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
return HASH_LEN_SIZE_630;
}
-void cc_clk_off(struct cc_drvdata *drvdata)
-{
- struct clk *clk = drvdata->clk;
-
- if (IS_ERR(clk))
- /* Not all devices have a clock associated with CCREE */
- return;
-
- clk_disable_unprepare(clk);
-}
-
static int ccree_probe(struct platform_device *plat_dev)
{
int rc;
@@ -653,7 +617,6 @@ static struct platform_driver ccree_driver = {
static int __init ccree_init(void)
{
- cc_hash_global_init();
cc_debugfs_global_init();
return platform_driver_register(&ccree_driver);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index c227718ba992..d938886390d2 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -26,7 +26,6 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
-/* Registers definitions from shared/hw/ree_include */
#include "cc_host_regs.h"
#include "cc_crypto_ctx.h"
#include "cc_hw_queue_defs.h"
@@ -71,9 +70,7 @@ enum cc_std_body {
#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
-#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+#define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE)
#define CC_CPP_AES_ABORT_MASK ( \
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
@@ -139,15 +136,15 @@ struct cc_drvdata {
int irq;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
- cc_sram_addr_t mlli_sram_addr;
- void *buff_mgr_handle;
- void *cipher_handle;
+ u32 mlli_sram_addr;
+ struct dma_pool *mlli_buffs_pool;
+ struct list_head alg_list;
void *hash_handle;
void *aead_handle;
void *request_mgr_handle;
void *fips_handle;
- void *sram_mgr_handle;
- void *debugfs;
+ u32 sram_free_offset; /* offset to non-allocated area in SRAM */
+ struct dentry *dir; /* for debugfs */
struct clk *clk;
bool coherent;
char *hw_rev_name;
@@ -158,7 +155,6 @@ struct cc_drvdata {
int std_bodies;
bool sec_disabled;
u32 comp_mask;
- bool pm_on;
};
struct cc_crypto_alg {
@@ -212,8 +208,6 @@ static inline void dump_byte_array(const char *name, const u8 *the_array,
bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
void fini_cc_regs(struct cc_drvdata *drvdata);
-int cc_clk_on(struct cc_drvdata *drvdata);
-void cc_clk_off(struct cc_drvdata *drvdata);
unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 912e5ce5079d..d5310783af15 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -20,8 +20,8 @@
#define CC_SM3_HASH_LEN_SIZE 8
struct cc_hash_handle {
- cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
- cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+ u32 digest_len_sram_addr; /* const value in SRAM*/
+ u32 larval_digest_sram_addr; /* const value in SRAM */
struct list_head hash_list;
};
@@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = {
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
static const u32 cc_digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static u64 cc_sha384_init[] = {
- SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
- SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static u64 cc_sha512_init[] = {
- SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
- SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+
+/*
+ * Due to the way the HW works, every double word in the SHA384 and SHA512
+ * larval hashes must be stored in hi/lo order
+ */
+#define hilo(x) upper_32_bits(x), lower_32_bits(x)
+static const u32 cc_sha384_init[] = {
+ hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
+ hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
+static const u32 cc_sha512_init[] = {
+ hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
+ hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
+
static const u32 cc_sm3_init[] = {
SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
@@ -342,7 +349,6 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
/* Get final MAC result */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
- /* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
@@ -422,8 +428,7 @@ static int cc_hash_digest(struct ahash_request *req)
bool is_hmac = ctx->is_hmac;
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- cc_sram_addr_t larval_digest_addr =
- cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+ u32 larval_digest_addr;
int idx = 0;
int rc = 0;
gfp_t flags = cc_gfp_flags(&req->base);
@@ -465,6 +470,8 @@ static int cc_hash_digest(struct ahash_request *req)
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
} else {
+ larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
+ ctx->hash_mode);
set_din_sram(&desc[idx], larval_digest_addr,
ctx->inter_digestsize);
}
@@ -726,7 +733,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
int digestsize = 0;
int i, idx = 0, rc = 0;
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- cc_sram_addr_t larval_addr;
+ u32 larval_addr;
struct device *dev;
ctx = crypto_ahash_ctx(ahash);
@@ -752,7 +759,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
return -ENOMEM;
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+ dma_map_single(dev, ctx->key_params.key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
@@ -1067,8 +1074,8 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
ctx->key_params.keylen = 0;
ctx->digest_buff_dma_addr =
- dma_map_single(dev, (void *)ctx->digest_buff,
- sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
@@ -1079,7 +1086,7 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
&ctx->digest_buff_dma_addr);
ctx->opad_tmp_keys_dma_addr =
- dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+ dma_map_single(dev, ctx->opad_tmp_keys_buff,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
@@ -1196,8 +1203,8 @@ static int cc_mac_update(struct ahash_request *req)
idx++;
/* Setup request structure */
- cc_req.user_cb = (void *)cc_update_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
@@ -1254,8 +1261,8 @@ static int cc_mac_final(struct ahash_request *req)
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_hash_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
if (state->xcbc_count && rem_cnt == 0) {
/* Load key for ECB decryption */
@@ -1311,7 +1318,6 @@ static int cc_mac_final(struct ahash_request *req)
/* Get final MAC result */
hw_desc_init(&desc[idx]);
- /* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
digestsize, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
@@ -1369,8 +1375,8 @@ static int cc_mac_finup(struct ahash_request *req)
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_hash_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
@@ -1393,7 +1399,6 @@ static int cc_mac_finup(struct ahash_request *req)
/* Get final MAC result */
hw_desc_init(&desc[idx]);
- /* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
digestsize, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
@@ -1448,8 +1453,8 @@ static int cc_mac_digest(struct ahash_request *req)
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_digest_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
@@ -1820,7 +1825,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
struct crypto_alg *alg;
struct ahash_alg *halg;
- t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
+ t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
if (!t_crypto_alg)
return ERR_PTR(-ENOMEM);
@@ -1857,104 +1862,85 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
return t_crypto_alg;
}
+static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
+ unsigned int size, u32 *sram_buff_ofs)
+{
+ struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ unsigned int larval_seq_len = 0;
+ int rc;
+
+ cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ return rc;
+
+ *sram_buff_ofs += size;
+ return 0;
+}
+
int cc_init_hash_sram(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
- cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
- unsigned int larval_seq_len = 0;
- struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
int rc = 0;
/* Copy-to-sram digest-len */
- cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
- ARRAY_SIZE(cc_digest_len_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
+ sizeof(cc_digest_len_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_digest_len_init);
- larval_seq_len = 0;
-
if (large_sha_supported) {
/* Copy-to-sram digest-len for sha384/512 */
- cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
- ARRAY_SIZE(cc_digest_len_sha512_init),
- larval_seq, &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
+ sizeof(cc_digest_len_sha512_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
-
- sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
- larval_seq_len = 0;
}
/* The initial digests offset */
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
/* Copy-to-sram initial SHA* digests */
- cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
- larval_seq, &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_md5_init);
- larval_seq_len = 0;
- cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sha1_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha1_init);
- larval_seq_len = 0;
- cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sha224_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha224_init);
- larval_seq_len = 0;
- cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sha256_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha256_init);
- larval_seq_len = 0;
if (sm3_supported) {
- cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sm3_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sm3_init,
+ sizeof(cc_sm3_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sm3_init);
- larval_seq_len = 0;
}
if (large_sha_supported) {
- cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha384_init,
+ sizeof(cc_sha384_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha384_init);
- larval_seq_len = 0;
- cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha512_init,
+ sizeof(cc_sha512_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
}
@@ -1963,38 +1949,16 @@ init_digest_const_err:
return rc;
}
-static void __init cc_swap_dwords(u32 *buf, unsigned long size)
-{
- int i;
- u32 tmp;
-
- for (i = 0; i < size; i += 2) {
- tmp = buf[i];
- buf[i] = buf[i + 1];
- buf[i + 1] = tmp;
- }
-}
-
-/*
- * Due to the way the HW works we need to swap every
- * double word in the SHA384 and SHA512 larval hashes
- */
-void __init cc_hash_global_init(void)
-{
- cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
- cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
-}
-
int cc_hash_alloc(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle;
- cc_sram_addr_t sram_buff;
+ u32 sram_buff;
u32 sram_size_to_alloc;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
int alg;
- hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
+ hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
if (!hash_handle)
return -ENOMEM;
@@ -2016,7 +1980,6 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
- dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail;
}
@@ -2056,12 +2019,10 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
- kfree(t_alg);
goto fail;
- } else {
- list_add_tail(&t_alg->entry,
- &hash_handle->hash_list);
}
+
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
if (hw_mode == DRV_CIPHER_XCBC_MAC ||
hw_mode == DRV_CIPHER_CMAC)
@@ -2081,18 +2042,16 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
- kfree(t_alg);
goto fail;
- } else {
- list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
+
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
return 0;
fail:
- kfree(drvdata->hash_handle);
- drvdata->hash_handle = NULL;
+ cc_hash_free(drvdata);
return rc;
}
@@ -2101,17 +2060,12 @@ int cc_hash_free(struct cc_drvdata *drvdata)
struct cc_hash_alg *t_hash_alg, *hash_n;
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
- if (hash_handle) {
- list_for_each_entry_safe(t_hash_alg, hash_n,
- &hash_handle->hash_list, entry) {
- crypto_unregister_ahash(&t_hash_alg->ahash_alg);
- list_del(&t_hash_alg->entry);
- kfree(t_hash_alg);
- }
-
- kfree(hash_handle);
- drvdata->hash_handle = NULL;
+ list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
+ entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
}
+
return 0;
}
@@ -2272,22 +2226,23 @@ static const void *cc_larval_digest(struct device *dev, u32 mode)
}
}
-/*!
- * Gets the address of the initial digest in SRAM
+/**
+ * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
* according to the given hash mode
*
- * \param drvdata
- * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
*
- * \return u32 The address of the initial digest in SRAM
+ * Return:
+ * The address of the initial digest in SRAM
*/
-cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
+u32 cc_larval_digest_addr(void *drvdata, u32 mode)
{
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
- cc_sram_addr_t addr;
+ u32 addr;
switch (mode) {
case DRV_HASH_NULL:
@@ -2339,12 +2294,11 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
return hash_handle->larval_digest_sram_addr;
}
-cc_sram_addr_t
-cc_digest_len_addr(void *drvdata, u32 mode)
+u32 cc_digest_len_addr(void *drvdata, u32 mode)
{
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
- cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+ u32 digest_len_addr = hash_handle->digest_len_sram_addr;
switch (mode) {
case DRV_HASH_SHA1:
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 0d6dc61484d7..3d0f2179e07e 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -80,30 +80,27 @@ int cc_hash_alloc(struct cc_drvdata *drvdata);
int cc_init_hash_sram(struct cc_drvdata *drvdata);
int cc_hash_free(struct cc_drvdata *drvdata);
-/*!
- * Gets the initial digest length
+/**
+ * cc_digest_len_addr() - Gets the initial digest length
*
- * \param drvdata
- * \param mode The Hash mode. Supported modes:
- * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
- * \return u32 returns the address of the initial digest length in SRAM
+ * Return:
+ * Returns the address of the initial digest length in SRAM
*/
-cc_sram_addr_t
-cc_digest_len_addr(void *drvdata, u32 mode);
+u32 cc_digest_len_addr(void *drvdata, u32 mode);
-/*!
- * Gets the address of the initial digest in SRAM
+/**
+ * cc_larval_digest_addr() - Gets the address of the initial digest in SRAM
* according to the given hash mode
*
- * \param drvdata
- * \param mode The Hash mode. Supported modes:
- * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
- * \return u32 The address of the initial digest in SRAM
+ * Return:
+ * The address of the initial digest in SRAM
*/
-cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
-
-void cc_hash_global_init(void);
+u32 cc_larval_digest_addr(void *drvdata, u32 mode);
#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 9f4db9956e91..15df58c66911 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -17,46 +17,43 @@
/* Define max. available slots in HW queue */
#define HW_QUEUE_SLOTS_MAX 15
-#define CC_REG_LOW(word, name) \
- (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
-
-#define CC_REG_HIGH(word, name) \
- (CC_REG_LOW(word, name) + \
- CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
-
-#define CC_GENMASK(word, name) \
- GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
-
-#define WORD0_VALUE CC_GENMASK(0, VALUE)
-#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
-#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
-#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
-#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
-#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
-#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
-#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
-#define WORD2_VALUE CC_GENMASK(2, VALUE)
-#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
-#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
-#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE)
-#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT)
-#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT)
-#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
-#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
-#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
-#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY)
-#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
-#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
-#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
-#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2)
-#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO)
-#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE)
-#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0)
-#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
-#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
-#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
-#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
-#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
+#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
+#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
+#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
+
+#define CC_HWQ_GENMASK(word, field) \
+ CC_GENMASK(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## field)
+
+#define WORD0_VALUE CC_HWQ_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_HWQ_GENMASK(0, CPP_CIPHER_MODE)
+#define WORD1_DIN_CONST_VALUE CC_HWQ_GENMASK(1, DIN_CONST_VALUE)
+#define WORD1_DIN_DMA_MODE CC_HWQ_GENMASK(1, DIN_DMA_MODE)
+#define WORD1_DIN_SIZE CC_HWQ_GENMASK(1, DIN_SIZE)
+#define WORD1_NOT_LAST CC_HWQ_GENMASK(1, NOT_LAST)
+#define WORD1_NS_BIT CC_HWQ_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_HWQ_GENMASK(1, LOCK_QUEUE)
+#define WORD2_VALUE CC_HWQ_GENMASK(2, VALUE)
+#define WORD3_DOUT_DMA_MODE CC_HWQ_GENMASK(3, DOUT_DMA_MODE)
+#define WORD3_DOUT_LAST_IND CC_HWQ_GENMASK(3, DOUT_LAST_IND)
+#define WORD3_DOUT_SIZE CC_HWQ_GENMASK(3, DOUT_SIZE)
+#define WORD3_HASH_XOR_BIT CC_HWQ_GENMASK(3, HASH_XOR_BIT)
+#define WORD3_NS_BIT CC_HWQ_GENMASK(3, NS_BIT)
+#define WORD3_QUEUE_LAST_IND CC_HWQ_GENMASK(3, QUEUE_LAST_IND)
+#define WORD4_ACK_NEEDED CC_HWQ_GENMASK(4, ACK_NEEDED)
+#define WORD4_AES_SEL_N_HASH CC_HWQ_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_AES_XOR_CRYPTO_KEY CC_HWQ_GENMASK(4, AES_XOR_CRYPTO_KEY)
+#define WORD4_BYTES_SWAP CC_HWQ_GENMASK(4, BYTES_SWAP)
+#define WORD4_CIPHER_CONF0 CC_HWQ_GENMASK(4, CIPHER_CONF0)
+#define WORD4_CIPHER_CONF1 CC_HWQ_GENMASK(4, CIPHER_CONF1)
+#define WORD4_CIPHER_CONF2 CC_HWQ_GENMASK(4, CIPHER_CONF2)
+#define WORD4_CIPHER_DO CC_HWQ_GENMASK(4, CIPHER_DO)
+#define WORD4_CIPHER_MODE CC_HWQ_GENMASK(4, CIPHER_MODE)
+#define WORD4_CMAC_SIZE0 CC_HWQ_GENMASK(4, CMAC_SIZE0)
+#define WORD4_DATA_FLOW_MODE CC_HWQ_GENMASK(4, DATA_FLOW_MODE)
+#define WORD4_KEY_SIZE CC_HWQ_GENMASK(4, KEY_SIZE)
+#define WORD4_SETUP_OPERATION CC_HWQ_GENMASK(4, SETUP_OPERATION)
+#define WORD5_DIN_ADDR_HIGH CC_HWQ_GENMASK(5, DIN_ADDR_HIGH)
+#define WORD5_DOUT_ADDR_HIGH CC_HWQ_GENMASK(5, DOUT_ADDR_HIGH)
/******************************************************************************
* TYPE DEFINITIONS
@@ -207,31 +204,32 @@ enum cc_hash_cipher_pad {
/* Descriptor packing macros */
/*****************************/
-/*
- * Init a HW descriptor struct
- * @pdesc: pointer HW descriptor struct
+/**
+ * hw_desc_init() - Init a HW descriptor struct
+ * @pdesc: pointer to HW descriptor struct
*/
static inline void hw_desc_init(struct cc_hw_desc *pdesc)
{
memset(pdesc, 0, sizeof(struct cc_hw_desc));
}
-/*
- * Indicates the end of current HW descriptors flow and release the HW engines.
+/**
+ * set_queue_last_ind_bit() - Indicate the end of current HW descriptors flow
+ * and release the HW engines.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
{
pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
}
-/*
- * Set the DIN field of a HW descriptors
+/**
+ * set_din_type() - Set the DIN field of a HW descriptor
*
- * @pdesc: pointer HW descriptor struct
- * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
- * @addr: dinAdr DIN address
+ * @pdesc: Pointer to HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DIN address
* @size: Data size in bytes
* @axi_sec: AXI secure bit
*/
@@ -239,20 +237,20 @@ static inline void set_din_type(struct cc_hw_desc *pdesc,
enum cc_dma_mode dma_mode, dma_addr_t addr,
u32 size, enum cc_axi_sec axi_sec)
{
- pdesc->word[0] = (u32)addr;
+ pdesc->word[0] = lower_32_bits(addr);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
+ pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, upper_32_bits(addr));
#endif
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
FIELD_PREP(WORD1_DIN_SIZE, size) |
FIELD_PREP(WORD1_NS_BIT, axi_sec);
}
-/*
- * Set the DIN field of a HW descriptors to NO DMA mode.
+/**
+ * set_din_no_dma() - Set the DIN field of a HW descriptor to NO DMA mode.
* Used for NOP descriptor, register patches and other special modes.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DIN address
* @size: Data size in bytes
*/
@@ -262,14 +260,11 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
}
-/*
- * Setup the special CPP descriptor
+/**
+ * set_cpp_crypto_key() - Setup the special CPP descriptor
*
- * @pdesc: pointer HW descriptor struct
- * @alg: cipher used (AES / SM4)
- * @mode: mode used (CTR or CBC)
- * @slot: slot number
- * @ksize: key size
+ * @pdesc: Pointer to HW descriptor struct
+ * @slot: Slot number
*/
static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
{
@@ -281,27 +276,26 @@ static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
}
-/*
- * Set the DIN field of a HW descriptors to SRAM mode.
+/**
+ * set_din_sram() - Set the DIN field of a HW descriptor to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
- * adaptor will enforce alignment check.
+ * the adaptor will enforce alignment checks.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DIN address
- * @size Data size in bytes
+ * @size: Data size in bytes
*/
-static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
- u32 size)
+static inline void set_din_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
{
- pdesc->word[0] = (u32)addr;
+ pdesc->word[0] = addr;
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
}
-/*
- * Set the DIN field of a HW descriptors to CONST mode
+/**
+ * set_din_const() - Set the DIN field of a HW descriptor to CONST mode
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @val: DIN const value
* @size: Data size in bytes
*/
@@ -313,20 +307,20 @@ static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
FIELD_PREP(WORD1_DIN_SIZE, size);
}
-/*
- * Set the DIN not last input data indicator
+/**
+ * set_din_not_last_indication() - Set the DIN not last input data indicator
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
{
pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
}
-/*
- * Set the DOUT field of a HW descriptors
+/**
+ * set_dout_type() - Set the DOUT field of a HW descriptor
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
* @addr: DOUT address
* @size: Data size in bytes
@@ -336,24 +330,24 @@ static inline void set_dout_type(struct cc_hw_desc *pdesc,
enum cc_dma_mode dma_mode, dma_addr_t addr,
u32 size, enum cc_axi_sec axi_sec)
{
- pdesc->word[2] = (u32)addr;
+ pdesc->word[2] = lower_32_bits(addr);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
+ pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, upper_32_bits(addr));
#endif
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
FIELD_PREP(WORD3_DOUT_SIZE, size) |
FIELD_PREP(WORD3_NS_BIT, axi_sec);
}
-/*
- * Set the DOUT field of a HW descriptors to DLLI type
+/**
+ * set_dout_dlli() - Set the DOUT field of a HW descriptor to DLLI type
* The LAST INDICATION is provided by the user
*
- * @pdesc pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
- * @last_ind: The last indication bit
* @axi_sec: AXI secure bit
+ * @last_ind: The last indication bit
*/
static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
u32 size, enum cc_axi_sec axi_sec,
@@ -363,29 +357,28 @@ static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
}
-/*
- * Set the DOUT field of a HW descriptors to DLLI type
+/**
+ * set_dout_mlli() - Set the DOUT field of a HW descriptor to MLLI type
* The LAST INDICATION is provided by the user
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
- * @last_ind: The last indication bit
* @axi_sec: AXI secure bit
+ * @last_ind: The last indication bit
*/
-static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
- u32 size, enum cc_axi_sec axi_sec,
- bool last_ind)
+static inline void set_dout_mlli(struct cc_hw_desc *pdesc, u32 addr, u32 size,
+ enum cc_axi_sec axi_sec, bool last_ind)
{
set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
}
-/*
- * Set the DOUT field of a HW descriptors to NO DMA mode.
+/**
+ * set_dout_no_dma() - Set the DOUT field of a HW descriptor to NO DMA mode.
* Used for NOP descriptor, register patches and other special modes.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
* @write_enable: Enables a write operation to a register
@@ -398,54 +391,55 @@ static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
}
-/*
- * Set the word for the XOR operation.
+/**
+ * set_xor_val() - Set the word for the XOR operation.
*
- * @pdesc: pointer HW descriptor struct
- * @val: xor data value
+ * @pdesc: Pointer to HW descriptor struct
+ * @val: XOR data value
*/
static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
{
pdesc->word[2] = val;
}
-/*
- * Sets the XOR indicator bit in the descriptor
+/**
+ * set_xor_active() - Set the XOR indicator bit in the descriptor
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_xor_active(struct cc_hw_desc *pdesc)
{
pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
}
-/*
- * Select the AES engine instead of HASH engine when setting up combined mode
- * with AES XCBC MAC
+/**
+ * set_aes_not_hash_mode() - Select the AES engine instead of HASH engine when
+ * setting up combined mode with AES XCBC MAC
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
{
pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
}
-/*
- * Set aes xor crypto key, this in some secenrios select SM3 engine
+/**
+ * set_aes_xor_crypto_key() - Set aes xor crypto key, which in some scenarios
+ * selects the SM3 engine
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc)
{
pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1);
}
-/*
- * Set the DOUT field of a HW descriptors to SRAM mode
+/**
+ * set_dout_sram() - Set the DOUT field of a HW descriptor to SRAM mode
* Note: No need to check SRAM alignment since host requests do not use SRAM and
- * adaptor will enforce alignment check.
+ * the adaptor will enforce alignment checks.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
*/
@@ -456,32 +450,34 @@ static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
FIELD_PREP(WORD3_DOUT_SIZE, size);
}
-/*
- * Sets the data unit size for XEX mode in data_out_addr[15:0]
+/**
+ * set_xex_data_unit_size() - Set the data unit size for XEX mode in
+ * data_out_addr[15:0]
*
- * @pdesc: pDesc pointer HW descriptor struct
- * @size: data unit size for XEX mode
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Data unit size for XEX mode
*/
static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
{
pdesc->word[2] = size;
}
-/*
- * Set the number of rounds for Multi2 in data_out_addr[15:0]
+/**
+ * set_multi2_num_rounds() - Set the number of rounds for Multi2 in
+ * data_out_addr[15:0]
*
- * @pdesc: pointer HW descriptor struct
- * @num: number of rounds for Multi2
+ * @pdesc: Pointer to HW descriptor struct
+ * @num: Number of rounds for Multi2
*/
static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
{
pdesc->word[2] = num;
}
-/*
- * Set the flow mode.
+/**
+ * set_flow_mode() - Set the flow mode.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_flow_mode(struct cc_hw_desc *pdesc,
@@ -490,22 +486,22 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
}
-/*
- * Set the cipher mode.
+/**
+ * set_cipher_mode() - Set the cipher mode.
*
- * @pdesc: pointer HW descriptor struct
- * @mode: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
-/*
- * Set the cipher mode for hash algorithms.
+/**
+ * set_hash_cipher_mode() - Set the cipher mode for hash algorithms.
*
- * @pdesc: pointer HW descriptor struct
- * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
* @hash_mode: specifies which hash is being handled
*/
static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
@@ -517,10 +513,10 @@ static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
set_aes_xor_crypto_key(pdesc);
}
-/*
- * Set the cipher configuration fields.
+/**
+ * set_cipher_config0() - Set the cipher configuration fields.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
@@ -528,11 +524,11 @@ static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}
-/*
- * Set the cipher configuration fields.
+/**
+ * set_cipher_config1() - Set the cipher configuration fields.
*
- * @pdesc: pointer HW descriptor struct
- * @config: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @config: Padding mode
*/
static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
enum cc_hash_conf_pad config)
@@ -540,10 +536,10 @@ static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
}
-/*
- * Set HW key configuration fields.
+/**
+ * set_hw_crypto_key() - Set HW key configuration fields.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
*/
static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
@@ -555,64 +551,64 @@ static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
(hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
}
-/*
- * Set byte order of all setup-finalize descriptors.
+/**
+ * set_bytes_swap() - Set byte order of all setup-finalize descriptors.
*
- * @pdesc: pointer HW descriptor struct
- * @config: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @config: True to enable byte swapping
*/
static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
{
pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
}
-/*
- * Set CMAC_SIZE0 mode.
+/**
+ * set_cmac_size0_mode() - Set CMAC_SIZE0 mode.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
}
-/*
- * Set key size descriptor field.
+/**
+ * set_key_size() - Set key size descriptor field.
*
- * @pdesc: pointer HW descriptor struct
- * @size: key size in bytes (NOT size code)
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
*/
static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
{
pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
}
-/*
- * Set AES key size.
+/**
+ * set_key_size_aes() - Set AES key size.
*
- * @pdesc: pointer HW descriptor struct
- * @size: key size in bytes (NOT size code)
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
*/
static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
{
set_key_size(pdesc, ((size >> 3) - 2));
}
-/*
- * Set DES key size.
+/**
+ * set_key_size_des() - Set DES key size.
*
- * @pdesc: pointer HW descriptor struct
- * @size: key size in bytes (NOT size code)
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
*/
static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
{
set_key_size(pdesc, ((size >> 3) - 1));
}
-/*
- * Set the descriptor setup mode
+/**
+ * set_setup_mode() - Set the descriptor setup mode
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @mode: Any one of the setup modes defined in [CC7x-DESC]
*/
static inline void set_setup_mode(struct cc_hw_desc *pdesc,
@@ -621,10 +617,10 @@ static inline void set_setup_mode(struct cc_hw_desc *pdesc,
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
}
-/*
- * Set the descriptor cipher DO
+/**
+ * set_cipher_do() - Set the descriptor cipher DO
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @config: Any one of the cipher do defined in [CC7x-DESC]
*/
static inline void set_cipher_do(struct cc_hw_desc *pdesc,
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 24c368b866f6..d39e1664fc7e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -15,29 +15,25 @@
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
-const struct dev_pm_ops ccree_pm = {
- SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
-};
-
-int cc_pm_suspend(struct device *dev)
+static int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
- cc_clk_off(drvdata);
+ clk_disable_unprepare(drvdata->clk);
return 0;
}
-int cc_pm_resume(struct device *dev)
+static int cc_pm_resume(struct device *dev)
{
int rc;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
/* Enables the device source clk */
- rc = cc_clk_on(drvdata);
+ rc = clk_prepare_enable(drvdata->clk);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
@@ -62,53 +58,19 @@ int cc_pm_resume(struct device *dev)
return 0;
}
+const struct dev_pm_ops ccree_pm = {
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
int cc_pm_get(struct device *dev)
{
- int rc = 0;
- struct cc_drvdata *drvdata = dev_get_drvdata(dev);
-
- if (drvdata->pm_on)
- rc = pm_runtime_get_sync(dev);
+ int rc = pm_runtime_get_sync(dev);
return (rc == 1 ? 0 : rc);
}
void cc_pm_put_suspend(struct device *dev)
{
- struct cc_drvdata *drvdata = dev_get_drvdata(dev);
-
- if (drvdata->pm_on) {
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
-}
-
-bool cc_pm_is_dev_suspended(struct device *dev)
-{
- /* check device state using runtime api */
- return pm_runtime_suspended(dev);
-}
-
-int cc_pm_init(struct cc_drvdata *drvdata)
-{
- struct device *dev = drvdata_to_dev(drvdata);
-
- /* must be before the enabling to avoid redundant suspending */
- pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(dev);
- /* set us as active - note we won't do PM ops until cc_pm_go()! */
- return pm_runtime_set_active(dev);
-}
-
-/* enable the PM module*/
-void cc_pm_go(struct cc_drvdata *drvdata)
-{
- pm_runtime_enable(drvdata_to_dev(drvdata));
- drvdata->pm_on = true;
-}
-
-void cc_pm_fini(struct cc_drvdata *drvdata)
-{
- pm_runtime_disable(drvdata_to_dev(drvdata));
- drvdata->pm_on = false;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 80a18e11cae4..50cac33de118 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -15,26 +15,11 @@
extern const struct dev_pm_ops ccree_pm;
-int cc_pm_init(struct cc_drvdata *drvdata);
-void cc_pm_go(struct cc_drvdata *drvdata);
-void cc_pm_fini(struct cc_drvdata *drvdata);
-int cc_pm_suspend(struct device *dev);
-int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
void cc_pm_put_suspend(struct device *dev);
-bool cc_pm_is_dev_suspended(struct device *dev);
#else
-static inline int cc_pm_init(struct cc_drvdata *drvdata)
-{
- return 0;
-}
-
-static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
-
-static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
-
static inline int cc_pm_get(struct device *dev)
{
return 0;
@@ -42,12 +27,6 @@ static inline int cc_pm_get(struct device *dev)
static inline void cc_pm_put_suspend(struct device *dev) {}
-static inline bool cc_pm_is_dev_suspended(struct device *dev)
-{
- /* if PM not supported device is never suspend */
- return false;
-}
-
#endif
#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 9d61e6f12478..1d7649ecf44e 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -206,12 +206,13 @@ static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
}
}
-/*!
- * Completion will take place if and only if user requested completion
- * by cc_send_sync_request().
+/**
+ * request_mgr_complete() - Completion will take place if and only if user
+ * requested completion by cc_send_sync_request().
*
- * \param dev
- * \param dx_compl_h The completion event to signal
+ * @dev: Device pointer
+ * @dx_compl_h: The completion event to signal
+ * @dummy: unused error code
*/
static void request_mgr_complete(struct device *dev, void *dx_compl_h,
int dummy)
@@ -264,15 +265,15 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
return -ENOSPC;
}
-/*!
- * Enqueue caller request to crypto hardware.
+/**
+ * cc_do_send_request() - Enqueue caller request to crypto hardware.
* Need to be called with HW lock held and PM running
*
- * \param drvdata
- * \param cc_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param add_comp If "true": add an artificial dout DMA to mark completion
+ * @drvdata: Associated device driver context
+ * @cc_req: The request to enqueue
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
+ * @add_comp: If "true": add an artificial dout DMA to mark completion
*
*/
static void cc_do_send_request(struct cc_drvdata *drvdata,
@@ -295,7 +296,6 @@ static void cc_do_send_request(struct cc_drvdata *drvdata,
req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
(MAX_REQUEST_QUEUE_SIZE - 1);
- /* TODO: Use circ_buf.h ? */
dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
@@ -377,7 +377,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
rc = cc_queues_status(drvdata, mgr, bli->len);
if (rc) {
/*
- * There is still not room in the FIFO for
+ * There is still no room in the FIFO for
* this request. Bail out. We'll return here
* on the next completion irq.
*/
@@ -476,10 +476,6 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
break;
spin_unlock_bh(&mgr->hw_lock);
- if (rc != -EAGAIN) {
- cc_pm_put_suspend(dev);
- return rc;
- }
wait_for_completion_interruptible(&drvdata->hw_queue_avail);
reinit_completion(&drvdata->hw_queue_avail);
}
@@ -490,16 +486,18 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
return 0;
}
-/*!
- * Enqueue caller request to crypto hardware during init process.
- * assume this function is not called in middle of a flow,
+/**
+ * send_request_init() - Enqueue caller request to crypto hardware during init
+ * process.
+ * Assume this function is not called in the middle of a flow,
* since we set QUEUE_LAST_IND flag in the last descriptor.
*
- * \param drvdata
- * \param desc The crypto sequence
- * \param len The crypto sequence length
+ * @drvdata: Associated device driver context
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
*
- * \return int Returns "0" upon success
+ * Return:
+ * Returns "0" upon success
*/
int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
unsigned int len)
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index ff7746aaaf35..ae25ca843dce 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -12,18 +12,17 @@
int cc_req_mgr_init(struct cc_drvdata *drvdata);
-/*!
- * Enqueue caller request to crypto hardware.
+/**
+ * cc_send_request() - Enqueue caller request to crypto hardware.
*
- * \param drvdata
- * \param cc_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller
- * If "false": this function adds a dummy descriptor completion
- * and waits upon completion signal.
+ * @drvdata: Associated device driver context
+ * @cc_req: The request to enqueue
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
+ * @req: Asynchronous crypto request
*
- * \return int Returns -EINPROGRESS or error
+ * Return:
+ * Returns -EINPROGRESS or error
*/
int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
struct cc_hw_desc *desc, unsigned int len,
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index 62c885e6e791..37a95856361f 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -5,88 +5,61 @@
#include "cc_sram_mgr.h"
/**
- * struct cc_sram_ctx -Internal RAM context manager
- * @sram_free_offset: the offset to the non-allocated area
- */
-struct cc_sram_ctx {
- cc_sram_addr_t sram_free_offset;
-};
-
-/**
- * cc_sram_mgr_fini() - Cleanup SRAM pool.
- *
- * @drvdata: Associated device driver context
- */
-void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
-{
- /* Nothing needed */
-}
-
-/**
* cc_sram_mgr_init() - Initializes SRAM pool.
* The pool starts right at the beginning of SRAM.
* Returns zero for success, negative value otherwise.
*
* @drvdata: Associated device driver context
+ *
+ * Return:
+ * 0 for success, negative error code for failure.
*/
int cc_sram_mgr_init(struct cc_drvdata *drvdata)
{
- struct cc_sram_ctx *ctx;
- dma_addr_t start = 0;
+ u32 start = 0;
struct device *dev = drvdata_to_dev(drvdata);
if (drvdata->hw_rev < CC_HW_REV_712) {
/* Pool starts after ROM bytes */
- start = (dma_addr_t)cc_ioread(drvdata,
- CC_REG(HOST_SEP_SRAM_THRESHOLD));
-
+ start = cc_ioread(drvdata, CC_REG(HOST_SEP_SRAM_THRESHOLD));
if ((start & 0x3) != 0) {
- dev_err(dev, "Invalid SRAM offset %pad\n", &start);
+ dev_err(dev, "Invalid SRAM offset 0x%x\n", start);
return -EINVAL;
}
}
- /* Allocate "this" context */
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
-
- if (!ctx)
- return -ENOMEM;
-
- ctx->sram_free_offset = start;
- drvdata->sram_mgr_handle = ctx;
-
+ drvdata->sram_free_offset = start;
return 0;
}
-/*!
- * Allocated buffer from SRAM pool.
- * Note: Caller is responsible to free the LAST allocated buffer.
- * This function does not taking care of any fragmentation may occur
- * by the order of calls to alloc/free.
+/**
+ * cc_sram_alloc() - Allocate buffer from SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ * @size: The requested numer of bytes to allocate
*
- * \param drvdata
- * \param size The requested bytes to allocate
+ * Return:
+ * Address offset in SRAM or NULL_SRAM_ADDR for failure.
*/
-cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
+u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
{
- struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
- cc_sram_addr_t p;
+ u32 p;
if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
size);
return NULL_SRAM_ADDR;
}
- if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
- dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
- size, smgr_ctx->sram_free_offset);
+ if (size > (CC_CC_SRAM_SIZE - drvdata->sram_free_offset)) {
+ dev_err(dev, "Not enough space to allocate %u B (at offset %u)\n",
+ size, drvdata->sram_free_offset);
return NULL_SRAM_ADDR;
}
- p = smgr_ctx->sram_free_offset;
- smgr_ctx->sram_free_offset += size;
- dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
+ p = drvdata->sram_free_offset;
+ drvdata->sram_free_offset += size;
+ dev_dbg(dev, "Allocated %u B @ %u\n", size, p);
return p;
}
@@ -97,13 +70,12 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
*
* @src: A pointer to array of words to set as consts.
* @dst: The target SRAM buffer to set into
- * @nelements: The number of words in "src" array
+ * @nelement: The number of words in "src" array
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
- unsigned int nelement, struct cc_hw_desc *seq,
- unsigned int *seq_len)
+void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
+ struct cc_hw_desc *seq, unsigned int *seq_len)
{
u32 i;
unsigned int idx = *seq_len;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index 1d14de9ee8c3..1c965ef83002 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -10,42 +10,30 @@
struct cc_drvdata;
-/**
- * Address (offset) within CC internal SRAM
- */
-
-typedef u64 cc_sram_addr_t;
-
-#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
+#define NULL_SRAM_ADDR ((u32)-1)
-/*!
- * Initializes SRAM pool.
+/**
+ * cc_sram_mgr_init() - Initializes SRAM pool.
* The first X bytes of SRAM are reserved for ROM usage, hence, pool
* starts right after X bytes.
*
- * \param drvdata
+ * @drvdata: Associated device driver context
*
- * \return int Zero for success, negative value otherwise.
+ * Return:
+ * Zero for success, negative value otherwise.
*/
int cc_sram_mgr_init(struct cc_drvdata *drvdata);
-/*!
- * Uninits SRAM pool.
+/**
+ * cc_sram_alloc() - Allocate buffer from SRAM pool.
*
- * \param drvdata
- */
-void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
-
-/*!
- * Allocated buffer from SRAM pool.
- * Note: Caller is responsible to free the LAST allocated buffer.
- * This function does not taking care of any fragmentation may occur
- * by the order of calls to alloc/free.
+ * @drvdata: Associated device driver context
+ * @size: The requested bytes to allocate
*
- * \param drvdata
- * \param size The requested bytes to allocate
+ * Return:
+ * Address offset in SRAM or NULL_SRAM_ADDR for failure.
*/
-cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
/**
* cc_set_sram_desc() - Create const descriptors sequence to
@@ -54,12 +42,11 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
*
* @src: A pointer to array of words to set as consts.
* @dst: The target SRAM buffer to set into
- * @nelements: The number of words in "src" array
+ * @nelement: The number of words in "src" array
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
- unsigned int nelement, struct cc_hw_desc *seq,
- unsigned int *seq_len);
+void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
+ struct cc_hw_desc *seq, unsigned int *seq_len);
#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index f078b2686418..f2756836093f 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -42,3 +42,14 @@ config CRYPTO_DEV_CHELSIO_TLS
To compile this driver as a module, choose M here: the module
will be called chtls.
+
+config CHELSIO_TLS_DEVICE
+ bool "Chelsio Inline KTLS Offload"
+ depends on CHELSIO_T4
+ depends on TLS_DEVICE
+ select CRYPTO_DEV_CHELSIO
+ default y
+ help
+ This flag enables support for kernel tls offload over Chelsio T6
+ crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
+ only if CONFIG_TLS and CONFIG_TLS_DEVICE flags are enabled.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index a3c05e2f4562..0e9d035927e9 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -3,5 +3,8 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+chcr-objs += chcr_ktls.o
+#endif
chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b4b9b22125d1..c29b80dd30d8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -715,6 +715,52 @@ static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
return err;
}
+
+static inline int get_qidxs(struct crypto_async_request *req,
+ unsigned int *txqidx, unsigned int *rxqidx)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ int ret = 0;
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ {
+ struct aead_request *aead_req =
+ container_of(req, struct aead_request, base);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ {
+ struct skcipher_request *sk_req =
+ container_of(req, struct skcipher_request, base);
+ struct chcr_skcipher_req_ctx *reqctx =
+ skcipher_request_ctx(sk_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ case CRYPTO_ALG_TYPE_AHASH:
+ {
+ struct ahash_request *ahash_req =
+ container_of(req, struct ahash_request, base);
+ struct chcr_ahash_req_ctx *reqctx =
+ ahash_request_ctx(ahash_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ /* should never get here */
+ BUG();
+ break;
+ }
+ return ret;
+}
+
static inline void create_wreq(struct chcr_context *ctx,
struct chcr_wr *chcr_req,
struct crypto_async_request *req,
@@ -725,7 +771,15 @@ static inline void create_wreq(struct chcr_context *ctx,
unsigned int lcb)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+ unsigned int tx_channel_id, rx_channel_id;
+ unsigned int txqidx = 0, rxqidx = 0;
+ unsigned int qid, fid;
+
+ get_qidxs(req, &txqidx, &rxqidx);
+ qid = u_ctx->lldi.rxq_ids[rxqidx];
+ fid = u_ctx->lldi.rxq_ids[0];
+ tx_channel_id = txqidx / ctx->txq_perchan;
+ rx_channel_id = rxqidx / ctx->rxq_perchan;
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
@@ -734,15 +788,12 @@ static inline void create_wreq(struct chcr_context *ctx,
chcr_req->wreq.len16_pkd =
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
- chcr_req->wreq.rx_chid_to_rx_q_id =
- FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
- !!lcb, ctx->tx_qidx);
+ chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
+ !!lcb, txqidx);
- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
- qid);
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
- ((sizeof(chcr_req->wreq)) >> 4)));
-
+ ((sizeof(chcr_req->wreq)) >> 4)));
chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(chcr_req->key_ctx) + sc_len);
@@ -758,7 +809,8 @@ static inline void create_wreq(struct chcr_context *ctx,
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
@@ -771,7 +823,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
unsigned int kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
- struct adapter *adap = padap(c_ctx(tfm)->dev);
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
@@ -791,7 +844,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1086,8 +1139,12 @@ static int chcr_final_cipher_iv(struct skcipher_request *req,
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
AES_BLOCK_SIZE));
- else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv, 1);
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
+ if (!reqctx->partial_req)
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
+ else
+ ret = chcr_update_tweak(req, iv, 1);
+ }
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
/*Already updated for Decrypt*/
if (!reqctx->op)
@@ -1102,12 +1159,13 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_context *ctx = c_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
- struct cipher_wr_param wrparam;
+ struct cipher_wr_param wrparam;
struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
@@ -1152,7 +1210,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
- wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
+ wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
wrparam.req = req;
wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam);
@@ -1162,14 +1220,24 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
goto unmap;
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
reqctx->last_req_len = bytes;
reqctx->processed += bytes;
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
return 0;
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
return err;
@@ -1188,6 +1256,7 @@ static int process_cipher(struct skcipher_request *req,
int bytes, err = -EINVAL;
reqctx->processed = 0;
+ reqctx->partial_req = 0;
if (!req->iv)
goto error;
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
@@ -1278,6 +1347,7 @@ static int process_cipher(struct skcipher_request *req,
}
reqctx->processed = bytes;
reqctx->last_req_len = bytes;
+ reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
return 0;
unmap:
@@ -1289,31 +1359,43 @@ error:
static int chcr_aes_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
- int err, isfull = 0;
+ int err;
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_context *ctx = c_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
err = chcr_inc_wrcount(dev);
if (err)
return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- c_ctx(tfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
err = -ENOSPC;
goto error;
- }
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
&skb, CHCR_ENCRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ reqctx->partial_req = 1;
+ wait_for_completion(&ctx->cbc_aes_aio_done);
+ }
+ return -EINPROGRESS;
error:
chcr_dec_wrcount(dev);
return err;
@@ -1322,44 +1404,45 @@ error:
static int chcr_aes_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
- int err, isfull = 0;
+ int err;
+ struct chcr_context *ctx = c_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
err = chcr_inc_wrcount(dev);
if (err)
return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- c_ctx(tfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
return -ENOSPC;
- }
-
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
&skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
}
-
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx = NULL;
- unsigned int id;
- int txq_perchan, txq_idx, ntxq;
- int err = 0, rxq_perchan, rxq_idx;
+ int txq_perchan, ntxq;
+ int err = 0, rxq_perchan;
- id = smp_processor_id();
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
- err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
@@ -1367,23 +1450,10 @@ static int chcr_device_init(struct chcr_context *ctx)
ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
- spin_lock(&ctx->dev->lock_chcr_dev);
- ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id =
- (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
- spin_unlock(&ctx->dev->lock_chcr_dev);
- rxq_idx = ctx->tx_chan_id * rxq_perchan;
- rxq_idx += id % rxq_perchan;
- txq_idx = ctx->tx_chan_id * txq_perchan;
- txq_idx += id % txq_perchan;
- ctx->rx_qidx = rxq_idx;
- ctx->tx_qidx = txq_idx;
- /* Channel Id used by SGE to forward packet to Host.
- * Same value should be used in cpl_fw6_pld RSS_CH field
- * by FW. Driver programs PCI channel ID to be used in fw
- * at the time of queue allocation with value "pi->tx_chan"
- */
- ctx->pci_chan_id = txq_idx / txq_perchan;
+ ctx->ntxq = ntxq;
+ ctx->nrxq = u_ctx->lldi.nrxq;
+ ctx->rxq_perchan = rxq_perchan;
+ ctx->txq_perchan = txq_perchan;
}
out:
return err;
@@ -1401,7 +1471,7 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
-
+ init_completion(&ctx->cbc_aes_aio_done);
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
return chcr_device_init(ctx);
@@ -1485,9 +1555,10 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
+ struct chcr_context *ctx = h_ctx(tfm);
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_wr *chcr_req;
struct ulptx_sgl *ulptx;
unsigned int nents = 0, transhdr_len;
@@ -1496,6 +1567,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
GFP_ATOMIC;
struct adapter *adap = padap(h_ctx(tfm)->dev);
int error = 0;
+ unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
@@ -1513,7 +1585,8 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
+
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1576,16 +1649,22 @@ static int chcr_ahash_update(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
struct hash_wr_param params;
- int error, isfull = 0;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
if (nbytes + req_ctx->reqlen >= bs) {
remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1603,12 +1682,10 @@ static int chcr_ahash_update(struct ahash_request *req)
* inflight count for dev guarantees that lldi and padap is valid
*/
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
error = -ENOSPC;
goto err;
- }
}
chcr_init_hctx_per_wr(req_ctx);
@@ -1650,10 +1727,9 @@ static int chcr_ahash_update(struct ahash_request *req)
}
req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
-
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
err:
@@ -1678,16 +1754,22 @@ static int chcr_ahash_final(struct ahash_request *req)
struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct hash_wr_param params;
struct sk_buff *skb;
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
int error = -EINVAL;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
error = chcr_inc_wrcount(dev);
if (error)
return -ENXIO;
chcr_init_hctx_per_wr(req_ctx);
- u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
@@ -1727,7 +1809,7 @@ static int chcr_ahash_final(struct ahash_request *req)
}
req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
return -EINPROGRESS;
err:
@@ -1740,25 +1822,29 @@ static int chcr_ahash_finup(struct ahash_request *req)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_dev *dev = h_ctx(rtfm)->dev;
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
- int error, isfull = 0;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
error = chcr_inc_wrcount(dev);
if (error)
return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
error = -ENOSPC;
goto err;
- }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
@@ -1816,10 +1902,9 @@ static int chcr_ahash_finup(struct ahash_request *req)
req_ctx->reqlen = 0;
req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
-
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
err:
@@ -1832,11 +1917,18 @@ static int chcr_ahash_digest(struct ahash_request *req)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_dev *dev = h_ctx(rtfm)->dev;
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
- int error, isfull = 0;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
@@ -1844,14 +1936,11 @@ static int chcr_ahash_digest(struct ahash_request *req)
if (error)
return -ENXIO;
- u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
error = -ENOSPC;
goto err;
- }
}
chcr_init_hctx_per_wr(req_ctx);
@@ -1907,9 +1996,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
}
req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
err:
@@ -1922,14 +2011,20 @@ static int chcr_ahash_continue(struct ahash_request *req)
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
+ struct chcr_context *ctx = h_ctx(rtfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
if (is_hmac(crypto_ahash_tfm(rtfm))) {
@@ -1969,7 +2064,7 @@ static int chcr_ahash_continue(struct ahash_request *req)
}
hctx_wr->processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
return 0;
err:
@@ -2315,7 +2410,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
int size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
@@ -2331,7 +2427,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(a_ctx(tfm)->dev);
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
if (req->cryptlen == 0)
return NULL;
@@ -2351,7 +2448,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
CHCR_SRC_SG_SIZE, 0);
dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
@@ -2383,7 +2480,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
null ? 0 : 1 + IV,
@@ -2471,8 +2568,9 @@ int chcr_aead_dma_map(struct device *dev,
else
reqctx->b0_dma = 0;
if (req->src == req->dst) {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
+ error = dma_map_sg(dev, req->src,
+ sg_nents_for_len(req->src, dst_size),
+ DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
@@ -2558,13 +2656,14 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
unsigned int authsize = crypto_aead_authsize(tfm);
struct chcr_context *ctx = a_ctx(tfm);
u32 temp;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
temp = req->assoclen + req->cryptlen +
(reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
}
void chcr_add_cipher_src_ent(struct skcipher_request *req,
@@ -2599,14 +2698,14 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst);
reqctx->dstsg = dsgl_walk.last_sg;
reqctx->dst_ofst = dsgl_walk.last_sg_len;
-
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
}
void chcr_add_hash_src_ent(struct ahash_request *req,
@@ -2804,10 +2903,12 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = a_ctx(tfm)->tx_chan_id;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2828,9 +2929,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
auth_offset = 0;
}
-
- sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, 1);
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
sec_cpl->pldlen =
htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
@@ -2973,7 +3072,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
int size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
@@ -2986,7 +3086,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(a_ctx(tfm)->dev);
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8;
@@ -3028,7 +3129,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
//Offset of tag from end
temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- a_ctx(tfm)->tx_chan_id, 2, 1);
+ rx_channel_id, 2, 1);
chcr_req->sec_cpl.pldlen =
htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
@@ -3576,9 +3677,9 @@ static int chcr_aead_op(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct uld_ctx *u_ctx;
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
- int isfull = 0;
struct chcr_dev *cdev;
cdev = a_ctx(tfm)->dev;
@@ -3594,18 +3695,15 @@ static int chcr_aead_op(struct aead_request *req,
return chcr_aead_fallback(req, reqctx->op);
}
- u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
chcr_dec_wrcount(cdev);
return -ENOSPC;
- }
}
/* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
if (IS_ERR_OR_NULL(skb)) {
chcr_dec_wrcount(cdev);
@@ -3613,15 +3711,22 @@ static int chcr_aead_op(struct aead_request *req,
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
}
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
reqctx->verify = VERIFY_HW;
reqctx->op = CHCR_ENCRYPT_OP;
@@ -3643,9 +3748,16 @@ static int chcr_aead_encrypt(struct aead_request *req)
static int chcr_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int size;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
if (aeadctx->mayverify == VERIFY_SW) {
size = crypto_aead_maxauthsize(tfm);
diff --git a/drivers/crypto/chelsio/chcr_common.h b/drivers/crypto/chelsio/chcr_common.h
new file mode 100644
index 000000000000..33f589cbfba1
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_common.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifndef __CHCR_COMMON_H__
+#define __CHCR_COMMON_H__
+
+#include "cxgb4.h"
+
+#define CHCR_MAX_SALT 4
+#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_CPL_TX_SEC_PDU_LEN_64BIT 2
+#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
+#define CHCR_SCMD_PROTO_VERSION_TLS 0
+#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
+#define AES_BLOCK_LEN 16
+
+enum chcr_state {
+ CHCR_INIT = 0,
+ CHCR_ATTACH,
+ CHCR_DETACH,
+};
+
+struct chcr_dev {
+ spinlock_t lock_chcr_dev; /* chcr dev structure lock */
+ enum chcr_state state;
+ atomic_t inflight;
+ int wqretry;
+ struct delayed_work detach_work;
+ struct completion detach_comp;
+ unsigned char tx_channel_id;
+};
+
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct chcr_dev dev;
+};
+
+struct ktls_key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[CHCR_MAX_SALT];
+ __be64 iv_to_auth;
+ unsigned char key[TLS_CIPHER_AES_GCM_128_KEY_SIZE +
+ TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+};
+
+/* Crypto key context */
+#define KEY_CONTEXT_CTX_LEN_S 24
+#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S)
+
+#define KEY_CONTEXT_SALT_PRESENT_S 10
+#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S)
+#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U)
+
+#define KEY_CONTEXT_VALID_S 0
+#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S)
+#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U)
+
+#define KEY_CONTEXT_CK_SIZE_S 6
+#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S)
+
+#define KEY_CONTEXT_MK_SIZE_S 2
+#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S)
+
+#define KEY_CONTEXT_OPAD_PRESENT_S 11
+#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
+#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
+
+#define FILL_KEY_CTX_HDR(ck_size, mk_size, ctx_len) \
+ htonl(KEY_CONTEXT_MK_SIZE_V(mk_size) | \
+ KEY_CONTEXT_CK_SIZE_V(ck_size) | \
+ KEY_CONTEXT_VALID_F | \
+ KEY_CONTEXT_SALT_PRESENT_F | \
+ KEY_CONTEXT_CTX_LEN_V((ctx_len)))
+
+struct uld_ctx *assign_chcr_device(void);
+
+static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
+ void *pos, int length)
+{
+ int left = (void *)q->stat - pos;
+ u64 *p;
+
+ if (likely(length <= left)) {
+ memcpy(pos, src, length);
+ pos += length;
+ } else {
+ memcpy(pos, src, left);
+ memcpy(q->desc, src + left, length - left);
+ pos = (void *)q->desc + (length - left);
+ }
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8) {
+ *p = 0;
+ return p + 1;
+ }
+ return p;
+}
+
+static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline unsigned int chcr_sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+static inline unsigned int chcr_flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+#endif /* __CHCR_COMMON_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index e937605670ac..ffd4ec0c7374 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -28,13 +28,21 @@
static struct chcr_driver_data drv_data;
-typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
-static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
+static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+static void update_netdev_features(void);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
+ [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
+#endif
};
static struct cxgb4_uld_info chcr_uld_info = {
@@ -45,9 +53,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
.tx_handler = chcr_uld_tx_handler,
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
};
static void detach_work_fn(struct work_struct *work)
@@ -125,8 +133,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
atomic_set(&dev->inflight, 0);
mutex_lock(&drv_data.drv_mutex);
list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
- if (!drv_data.last_dev)
- drv_data.last_dev = u_ctx;
mutex_unlock(&drv_data.drv_mutex);
}
@@ -150,14 +156,13 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
return 0;
}
-static int cpl_fw6_pld_handler(struct chcr_dev *dev,
+static int cpl_fw6_pld_handler(struct adapter *adap,
unsigned char *input)
{
struct crypto_async_request *req;
struct cpl_fw6_pld *fw6_pld;
u32 ack_err_status = 0;
int error_status = 0;
- struct adapter *adap = padap(dev);
fw6_pld = (struct cpl_fw6_pld *)input;
req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
@@ -190,6 +195,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
return ERR_PTR(-EOPNOTSUPP);
@@ -201,10 +207,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
}
u_ctx->lldi = *lld;
chcr_dev_init(u_ctx);
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
- if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
- chcr_add_xfrmops(lld);
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_enable_ktls(padap(&u_ctx->dev));
+#endif
out:
return u_ctx;
}
@@ -214,26 +221,37 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
struct chcr_dev *dev = &u_ctx->dev;
+ struct adapter *adap = padap(dev);
const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
- if (rpl->opcode != CPL_FW6_PLD) {
- pr_err("Unsupported opcode\n");
+ if (!work_handlers[rpl->opcode]) {
+ pr_err("Unsupported opcode %d received\n", rpl->opcode);
return 0;
}
if (!pgl)
- work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
+ work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
else
- work_handlers[rpl->opcode](dev, pgl->va);
+ work_handlers[rpl->opcode](adap, pgl->va);
return 0;
}
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
{
+ /* In case if skb's decrypted bit is set, it's nic tls packet, else it's
+ * ipsec packet.
+ */
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (skb->decrypted)
+ return chcr_ktls_xmit(skb, dev);
+#endif
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
return chcr_ipsec_xmit(skb, dev);
+#endif
+ return 0;
}
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
static void chcr_detach_device(struct uld_ctx *u_ctx)
{
@@ -270,6 +288,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
case CXGB4_STATE_DETACH:
chcr_detach_device(u_ctx);
+ if (!atomic_read(&drv_data.dev_count))
+ stop_crypto();
break;
case CXGB4_STATE_START_RECOVERY:
@@ -280,6 +300,24 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
return ret;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+static void update_netdev_features(void)
+{
+ struct uld_ctx *u_ctx, *tmp;
+
+ mutex_lock(&drv_data.drv_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(&u_ctx->lldi);
+ }
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(&u_ctx->lldi);
+ }
+ mutex_unlock(&drv_data.drv_mutex);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int __init chcr_crypto_init(void)
{
INIT_LIST_HEAD(&drv_data.act_dev);
@@ -289,6 +327,12 @@ static int __init chcr_crypto_init(void)
drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
+ #ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ rtnl_lock();
+ update_netdev_features();
+ rtnl_unlock();
+ #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
return 0;
}
@@ -304,12 +348,20 @@ static void __exit chcr_crypto_exit(void)
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_disable_ktls(adap);
+#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_disable_ktls(adap);
+#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index ad874d548aa5..2c09672e00a4 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -43,7 +43,8 @@
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
-#define DRV_VERSION "1.0.0.0"
+#define DRV_VERSION "1.0.0.0-ko"
+#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
@@ -67,7 +68,7 @@ struct _key_ctx {
__be32 ctx_hdr;
u8 salt[MAX_SALT];
__be64 iv_to_auth;
- unsigned char key[0];
+ unsigned char key[];
};
#define KEYCTX_TX_WR_IV_S 55
@@ -147,7 +148,6 @@ struct chcr_dev {
int wqretry;
struct delayed_work detach_work;
struct completion detach_comp;
- unsigned char tx_channel_id;
};
struct uld_ctx {
@@ -222,4 +222,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+void chcr_enable_ktls(struct adapter *adap);
+void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+#endif
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 6db2df8c8a05..542bebae001f 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -187,6 +187,8 @@ struct chcr_aead_reqctx {
unsigned int op;
u16 imm;
u16 verify;
+ u16 txqidx;
+ u16 rxqidx;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
u8 *scratch_pad;
};
@@ -250,10 +252,11 @@ struct __crypto_ctx {
struct chcr_context {
struct chcr_dev *dev;
- unsigned char tx_qidx;
- unsigned char rx_qidx;
- unsigned char tx_chan_id;
- unsigned char pci_chan_id;
+ unsigned char rxq_perchan;
+ unsigned char txq_perchan;
+ unsigned int ntxq;
+ unsigned int nrxq;
+ struct completion cbc_aes_aio_done;
struct __crypto_ctx crypto_ctx[0];
};
@@ -279,6 +282,8 @@ struct chcr_ahash_req_ctx {
u8 *skbfr;
/* SKB which is being sent to the hardware for processing */
u64 data_len; /* Data len till time */
+ u16 txqidx;
+ u16 rxqidx;
u8 reqlen;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -290,12 +295,15 @@ struct chcr_skcipher_req_ctx {
struct scatterlist *dstsg;
unsigned int processed;
unsigned int last_req_len;
+ unsigned int partial_req;
struct scatterlist *srcsg;
unsigned int src_ofst;
unsigned int dst_ofst;
unsigned int op;
u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ u16 txqidx;
+ u16 rxqidx;
};
struct chcr_alg_template {
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 9da0f93a330b..9fd3b9d1ec2f 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -99,9 +99,7 @@ void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
netdev->features |= NETIF_F_HW_ESP;
- rtnl_lock();
netdev_change_features(netdev);
- rtnl_unlock();
}
}
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
new file mode 100644
index 000000000000..43d9e2420110
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -0,0 +1,2028 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#include <linux/highmem.h>
+#include "chcr_ktls.h"
+#include "clip_tbl.h"
+
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+/*
+ * chcr_ktls_save_keys: calculate and save crypto keys.
+ * @tx_info - driver specific tls info.
+ * @crypto_info - tls crypto information.
+ * @direction - TX/RX direction.
+ * return - SUCCESS/FAILURE.
+ */
+static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
+ struct tls_crypto_info *crypto_info,
+ enum tls_offload_ctx_dir direction)
+{
+ int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
+ unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+ struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
+ struct ktls_key_ctx *kctx = &tx_info->key_ctx;
+ struct crypto_cipher *cipher;
+ unsigned char *key, *salt;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ info_128_gcm =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+ tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
+
+ ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+ key = info_128_gcm->key;
+ salt = info_128_gcm->salt;
+ tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
+
+ /* The SCMD fields used when encrypting a full TLS
+ * record. Its a one time calculation till the
+ * connection exists.
+ */
+ tx_info->scmd0_seqno_numivs =
+ SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
+ SCMD_CIPH_AUTH_SEQ_CTRL_F |
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
+ SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
+ SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
+ SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
+ SCMD_NUM_IVS_V(1);
+
+ /* keys will be sent inline. */
+ tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
+
+ /* The SCMD fields used when encrypting a partial TLS
+ * record (no trailer and possibly a truncated payload).
+ */
+ tx_info->scmd0_short_seqno_numivs =
+ SCMD_CIPH_AUTH_SEQ_CTRL_F |
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
+ SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
+ SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
+
+ tx_info->scmd0_short_ivgen_hdrlen =
+ tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
+
+ break;
+
+ default:
+ pr_err("GCM: cipher type 0x%x not supported\n",
+ crypto_info->cipher_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
+ roundup(keylen, 16) + ghash_size;
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret)
+ goto out1;
+
+ memset(ghash_h, 0, ghash_size);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+
+ /* fill the Key context */
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ mac_key_size,
+ key_ctx_size >> 4);
+ } else {
+ ret = -EINVAL;
+ goto out1;
+ }
+
+ memcpy(kctx->salt, salt, tx_info->salt_size);
+ memcpy(kctx->key, key, keylen);
+ memcpy(kctx->key + keylen, ghash_h, ghash_size);
+ tx_info->key_ctx_len = key_ctx_size;
+
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
+ int new_state)
+{
+ /* This function can be called from both rx (interrupt context) and tx
+ * queue contexts.
+ */
+ spin_lock_bh(&tx_info->lock);
+ switch (tx_info->connection_state) {
+ case KTLS_CONN_CLOSED:
+ tx_info->connection_state = new_state;
+ break;
+
+ case KTLS_CONN_ACT_OPEN_REQ:
+ /* only go forward if state is greater than current state. */
+ if (new_state <= tx_info->connection_state)
+ break;
+ /* update to the next state and also initialize TCB */
+ tx_info->connection_state = new_state;
+ /* FALLTHRU */
+ case KTLS_CONN_ACT_OPEN_RPL:
+ /* if we are stuck in this state, means tcb init might not
+ * received by HW, try sending it again.
+ */
+ if (!chcr_init_tcb_fields(tx_info))
+ tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
+ break;
+
+ case KTLS_CONN_SET_TCB_REQ:
+ /* only go forward if state is greater than current state. */
+ if (new_state <= tx_info->connection_state)
+ break;
+ /* update to the next state and check if l2t_state is valid */
+ tx_info->connection_state = new_state;
+ /* FALLTHRU */
+ case KTLS_CONN_SET_TCB_RPL:
+ /* Check if l2t state is valid, then move to ready state. */
+ if (cxgb4_check_l2t_valid(tx_info->l2te)) {
+ tx_info->connection_state = KTLS_CONN_TX_READY;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ctx);
+ }
+ break;
+
+ case KTLS_CONN_TX_READY:
+ /* nothing to be done here */
+ break;
+
+ default:
+ pr_err("unknown KTLS connection state\n");
+ break;
+ }
+ spin_unlock_bh(&tx_info->lock);
+
+ return tx_info->connection_state;
+}
+/*
+ * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * @atid - connection active tid.
+ * return - send success/failure.
+ */
+static int chcr_ktls_act_open_req(struct sock *sk,
+ struct chcr_ktls_info *tx_info,
+ int atid)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct cpl_t6_act_open_req *cpl6;
+ struct cpl_act_open_req *cpl;
+ struct sk_buff *skb;
+ unsigned int len;
+ int qid_atid;
+ u64 options;
+
+ len = sizeof(*cpl6);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ /* mark it a control pkt */
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+
+ cpl6 = __skb_put_zero(skb, len);
+ cpl = (struct cpl_act_open_req *)cpl6;
+ INIT_TP_WR(cpl6, 0);
+ qid_atid = TID_QID_V(tx_info->rx_qid) |
+ TID_TID_V(atid);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
+ cpl->local_port = inet->inet_sport;
+ cpl->peer_port = inet->inet_dport;
+ cpl->local_ip = inet->inet_rcv_saddr;
+ cpl->peer_ip = inet->inet_daddr;
+
+ /* fill first 64 bit option field. */
+ options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
+ SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
+ cpl->opt0 = cpu_to_be64(options);
+
+ /* next 64 bit option field. */
+ options =
+ TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
+ cpl->opt2 = htonl(options);
+
+ return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
+}
+
+/*
+ * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * @atid - connection active tid.
+ * return - send success/failure.
+ */
+static int chcr_ktls_act_open_req6(struct sock *sk,
+ struct chcr_ktls_info *tx_info,
+ int atid)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct cpl_t6_act_open_req6 *cpl6;
+ struct cpl_act_open_req6 *cpl;
+ struct sk_buff *skb;
+ unsigned int len;
+ int qid_atid;
+ u64 options;
+
+ len = sizeof(*cpl6);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ /* mark it a control pkt */
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+
+ cpl6 = __skb_put_zero(skb, len);
+ cpl = (struct cpl_act_open_req6 *)cpl6;
+ INIT_TP_WR(cpl6, 0);
+ qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
+ cpl->local_port = inet->inet_sport;
+ cpl->peer_port = inet->inet_dport;
+ cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
+ cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
+ cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
+ cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
+
+ /* first 64 bit option field. */
+ options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
+ SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
+ cpl->opt0 = cpu_to_be64(options);
+ /* next 64 bit option field. */
+ options =
+ TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
+ cpl->opt2 = htonl(options);
+
+ return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
+}
+
+/*
+ * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP
+ */
+static int chcr_setup_connection(struct sock *sk,
+ struct chcr_ktls_info *tx_info)
+{
+ struct tid_info *t = &tx_info->adap->tids;
+ int atid, ret = 0;
+
+ atid = cxgb4_alloc_atid(t, tx_info);
+ if (atid == -1)
+ return -EINVAL;
+
+ tx_info->atid = atid;
+ tx_info->ip_family = sk->sk_family;
+
+ if (sk->sk_family == AF_INET ||
+ (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ tx_info->ip_family = AF_INET;
+ ret = chcr_ktls_act_open_req(sk, tx_info, atid);
+ } else {
+ tx_info->ip_family = AF_INET6;
+ ret =
+ cxgb4_clip_get(tx_info->netdev,
+ (const u32 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8,
+ 1);
+ if (ret)
+ goto out;
+ ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
+ }
+
+ /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
+ * success, if any other return type clear atid and return that failure.
+ */
+ if (ret) {
+ if (ret == NET_XMIT_CN)
+ ret = 0;
+ else
+ cxgb4_free_atid(t, atid);
+ goto out;
+ }
+
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
+out:
+ return ret;
+}
+
+/*
+ * chcr_set_tcb_field: update tcb fields.
+ * @tx_info - driver specific tls info.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @no_reply - set 1 if not looking for TP response.
+ */
+static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
+ u64 mask, u64 val, int no_reply)
+{
+ struct cpl_set_tcb_field *req;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
+ INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
+ req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
+ NO_REPLY_V(no_reply));
+ req->word_cookie = htons(TCB_WORD_V(word));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+ return cxgb4_ofld_send(tx_info->netdev, skb);
+}
+
+/*
+ * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP.
+ */
+static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
+{
+ return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
+ TCB_T_STATE_V(TCB_T_STATE_M),
+ CHCR_TCB_STATE_CLOSED, 1);
+}
+
+/*
+ * chcr_ktls_dev_del: call back for tls_dev_del.
+ * Remove the tid and l2t entry and close the connection.
+ * it per connection basis.
+ * @netdev - net device.
+ * @tls_cts - tls context.
+ * @direction - TX/RX crypto direction
+ */
+static void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx =
+ chcr_get_ktls_tx_context(tls_ctx);
+ struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+ struct sock *sk;
+
+ if (!tx_info)
+ return;
+ sk = tx_info->sk;
+
+ spin_lock(&tx_info->lock);
+ tx_info->connection_state = KTLS_CONN_CLOSED;
+ spin_unlock(&tx_info->lock);
+
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(netdev,
+ (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
+ 1);
+
+ /* clear tid */
+ if (tx_info->tid != -1) {
+ /* clear tcb state and then release tid */
+ chcr_ktls_mark_tcb_close(tx_info);
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+ }
+
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+}
+
+/*
+ * chcr_ktls_dev_add: call back for tls_dev_add.
+ * Create a tcb entry for TP. Also add l2t entry for the connection. And
+ * generate keys & save those keys locally.
+ * @netdev - net device.
+ * @tls_cts - tls context.
+ * @direction - TX/RX crypto direction
+ * return: SUCCESS/FAILURE.
+ */
+static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ struct dst_entry *dst;
+ struct adapter *adap;
+ struct port_info *pi;
+ struct neighbour *n;
+ u8 daaddr[16];
+ int ret = -1;
+
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+
+ pi = netdev_priv(netdev);
+ adap = pi->adapter;
+ if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
+ pr_err("not expecting for RX direction\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (tx_ctx->chcr_info) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
+ if (!tx_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_init(&tx_info->lock);
+
+ /* clear connection state */
+ spin_lock(&tx_info->lock);
+ tx_info->connection_state = KTLS_CONN_CLOSED;
+ spin_unlock(&tx_info->lock);
+
+ tx_info->sk = sk;
+ /* initialize tid and atid to -1, 0 is a also a valid id. */
+ tx_info->tid = -1;
+ tx_info->atid = -1;
+
+ tx_info->adap = adap;
+ tx_info->netdev = netdev;
+ tx_info->first_qset = pi->first_qset;
+ tx_info->tx_chan = pi->tx_chan;
+ tx_info->smt_idx = pi->smt_idx;
+ tx_info->port_id = pi->port_id;
+
+ tx_info->rx_qid = chcr_get_first_rx_qid(adap);
+ if (unlikely(tx_info->rx_qid < 0))
+ goto out2;
+
+ tx_info->prev_seq = start_offload_tcp_sn;
+ tx_info->tcp_start_seq_number = start_offload_tcp_sn;
+
+ /* save crypto keys */
+ ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
+ if (ret < 0)
+ goto out2;
+
+ /* get peer ip */
+ if (sk->sk_family == AF_INET ||
+ (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ memcpy(daaddr, &sk->sk_daddr, 4);
+ } else {
+ memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+ }
+
+ /* get the l2t index */
+ dst = sk_dst_get(sk);
+ if (!dst) {
+ pr_err("DST entry not found\n");
+ goto out2;
+ }
+ n = dst_neigh_lookup(dst, daaddr);
+ if (!n || !n->dev) {
+ pr_err("neighbour not found\n");
+ dst_release(dst);
+ goto out2;
+ }
+ tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
+
+ neigh_release(n);
+ dst_release(dst);
+
+ if (!tx_info->l2te) {
+ pr_err("l2t entry not found\n");
+ goto out2;
+ }
+
+ tx_ctx->chcr_info = tx_info;
+
+ /* create a filter and call cxgb4_l2t_send to send the packet out, which
+ * will take care of updating l2t entry in hw if not already done.
+ */
+ ret = chcr_setup_connection(sk, tx_info);
+ if (ret)
+ goto out2;
+
+ atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
+ return 0;
+out2:
+ kvfree(tx_info);
+out:
+ atomic64_inc(&adap->chcr_stats.ktls_tx_connection_fail);
+ return ret;
+}
+
+static const struct tlsdev_ops chcr_ktls_ops = {
+ .tls_dev_add = chcr_ktls_dev_add,
+ .tls_dev_del = chcr_ktls_dev_del,
+};
+
+/*
+ * chcr_enable_ktls: add NETIF_F_HW_TLS_TX flag in all the ports.
+ */
+void chcr_enable_ktls(struct adapter *adap)
+{
+ struct net_device *netdev;
+ int i;
+
+ for_each_port(adap, i) {
+ netdev = adap->port[i];
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &chcr_ktls_ops;
+ }
+}
+
+/*
+ * chcr_disable_ktls: remove NETIF_F_HW_TLS_TX flag from all the ports.
+ */
+void chcr_disable_ktls(struct adapter *adap)
+{
+ struct net_device *netdev;
+ int i;
+
+ for_each_port(adap, i) {
+ netdev = adap->port[i];
+ netdev->features &= ~NETIF_F_HW_TLS_TX;
+ netdev->hw_features &= ~NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = NULL;
+ }
+}
+
+/*
+ * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
+ * handling.
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP
+ */
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
+{
+ int ret = 0;
+
+ /* set tcb in offload and bypass */
+ ret =
+ chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
+ TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
+ TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
+ if (ret)
+ return ret;
+ /* reset snd_una and snd_next fields in tcb */
+ ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
+ TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
+ TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+ 0, 1);
+ if (ret)
+ return ret;
+
+ /* reset send max */
+ ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
+ TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
+ 0, 1);
+ if (ret)
+ return ret;
+
+ /* update l2t index and request for tp reply to confirm tcb is
+ * initialised to handle tx traffic.
+ */
+ ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
+ TCB_L2T_IX_V(TCB_L2T_IX_M),
+ TCB_L2T_IX_V(tx_info->l2te->idx), 0);
+ return ret;
+}
+
+/*
+ * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
+ */
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
+{
+ const struct cpl_act_open_rpl *p = (void *)input;
+ struct chcr_ktls_info *tx_info = NULL;
+ unsigned int atid, tid, status;
+ struct tid_info *t;
+
+ tid = GET_TID(p);
+ status = AOPEN_STATUS_G(ntohl(p->atid_status));
+ atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
+
+ t = &adap->tids;
+ tx_info = lookup_atid(t, atid);
+
+ if (!tx_info || tx_info->atid != atid) {
+ pr_err("tx_info or atid is not correct\n");
+ return -1;
+ }
+
+ if (!status) {
+ tx_info->tid = tid;
+ cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+
+ cxgb4_free_atid(t, atid);
+ tx_info->atid = -1;
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info,
+ KTLS_CONN_ACT_OPEN_RPL);
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
+ */
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+{
+ const struct cpl_set_tcb_rpl *p = (void *)input;
+ struct chcr_ktls_info *tx_info = NULL;
+ struct tid_info *t;
+ u32 tid;
+
+ tid = GET_TID(p);
+
+ t = &adap->tids;
+ tx_info = lookup_tid(t, tid);
+ if (!tx_info || tx_info->tid != tid) {
+ pr_err("tx_info or atid is not correct\n");
+ return -1;
+ }
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);
+ return 0;
+}
+
+static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ u32 tid, void *pos, u16 word, u64 mask,
+ u64 val, u32 reply)
+{
+ struct cpl_set_tcb_field_core *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *txpkt;
+
+ /* ULP_TXPKT */
+ txpkt = pos;
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(txpkt + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ idata->len = htonl(sizeof(*cpl));
+ pos = idata + 1;
+
+ cpl = pos;
+ /* CPL_SET_TCB_FIELD */
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
+ NO_REPLY_V(!reply));
+ cpl->word_cookie = htons(TCB_WORD_V(word));
+ cpl->mask = cpu_to_be64(mask);
+ cpl->val = cpu_to_be64(val);
+
+ /* ULPTX_NOOP */
+ idata = (struct ulptx_idata *)(cpl + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+ idata->len = htonl(0);
+ pos = idata + 1;
+
+ return pos;
+}
+
+
+/*
+ * chcr_write_cpl_set_tcb_ulp: update tcb values.
+ * TCB is responsible to create tcp headers, so all the related values
+ * should be correctly updated.
+ * @tx_info - driver specific tls info.
+ * @q - tx queue on which packet is going out.
+ * @tid - TCB identifier.
+ * @pos - current index where should we start writing.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @reply - set 1 if looking for TP response.
+ * return - next position to write.
+ */
+static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u32 tid,
+ void *pos, u16 word, u64 mask,
+ u64 val, u32 reply)
+{
+ int left = (void *)q->q.stat - pos;
+
+ if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
+ if (!left) {
+ pos = q->q.desc;
+ } else {
+ u8 buf[48] = {0};
+
+ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+ mask, val, reply);
+
+ return chcr_copy_to_txd(buf, &q->q, pos,
+ CHCR_SET_TCB_FIELD_LEN);
+ }
+ }
+
+ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+ mask, val, reply);
+
+ /* check again if we are at the end of the queue */
+ if (left == CHCR_SET_TCB_FIELD_LEN)
+ pos = q->q.desc;
+
+ return pos;
+}
+
+/*
+ * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
+ * with updated values like tcp seq, ack, window etc.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_ack
+ * @tcp_win
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u64 tcp_seq,
+ u64 tcp_ack, u64 tcp_win)
+{
+ bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
+ u32 len, cpl = 0, ndesc, wr_len;
+ struct fw_ulptx_wr *wr;
+ int credits;
+ void *pos;
+
+ wr_len = sizeof(*wr);
+ /* there can be max 4 cpls, check if we have enough credits */
+ len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ ndesc = DIV_ROUND_UP(len, 64);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ /* make space for WR, we'll fill it later when we know all the cpls
+ * being sent out and have complete length.
+ */
+ wr = pos;
+ pos += wr_len;
+ /* update tx_max if its a re-transmit or the first wr */
+ if (first_wr || tcp_seq != tx_info->prev_seq) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_TX_MAX_W,
+ TCB_TX_MAX_V(TCB_TX_MAX_M),
+ TCB_TX_MAX_V(tcp_seq), 0);
+ cpl++;
+ }
+ /* reset snd una if it's a re-transmit pkt */
+ if (tcp_seq != tx_info->prev_seq) {
+ /* reset snd_una */
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_SND_UNA_RAW_W,
+ TCB_SND_UNA_RAW_V
+ (TCB_SND_UNA_RAW_M),
+ TCB_SND_UNA_RAW_V(0), 0);
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ooo);
+ cpl++;
+ }
+ /* update ack */
+ if (first_wr || tx_info->prev_ack != tcp_ack) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_RCV_NXT_W,
+ TCB_RCV_NXT_V(TCB_RCV_NXT_M),
+ TCB_RCV_NXT_V(tcp_ack), 0);
+ tx_info->prev_ack = tcp_ack;
+ cpl++;
+ }
+ /* update receive window */
+ if (first_wr || tx_info->prev_win != tcp_win) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_RCV_WND_W,
+ TCB_RCV_WND_V(TCB_RCV_WND_M),
+ TCB_RCV_WND_V(tcp_win), 0);
+ tx_info->prev_win = tcp_win;
+ cpl++;
+ }
+
+ if (cpl) {
+ /* get the actual length */
+ len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ /* ULPTX wr */
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->cookie = 0;
+ /* fill len in wr field */
+ wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+
+ ndesc = DIV_ROUND_UP(len, 64);
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_skb_copy
+ * @nskb - new skb where the frags to be added.
+ * @skb - old skb from which frags will be copied.
+ */
+static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
+ __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
+ }
+
+ skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
+ nskb->len += skb->data_len;
+ nskb->data_len = skb->data_len;
+ nskb->truesize += skb->data_len;
+}
+
+/*
+ * chcr_ktls_get_tx_flits
+ * returns number of flits to be sent out, it includes key context length, WR
+ * size and skb fragments.
+ */
+static unsigned int
+chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+{
+ return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
+}
+
+/*
+ * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale
+ * other than timestamp.
+ * @skb - skb contains partial record..
+ * return: 1 / 0
+ */
+static int
+chcr_ktls_check_tcp_options(struct tcphdr *tcp)
+{
+ int cnt, opt, optlen;
+ u_char *cp;
+
+ cp = (u_char *)(tcp + 1);
+ cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[0];
+ if (opt == TCPOPT_EOL)
+ break;
+ if (opt == TCPOPT_NOP) {
+ optlen = 1;
+ } else {
+ if (cnt < 2)
+ break;
+ optlen = cp[1];
+ if (optlen < 2 || optlen > cnt)
+ break;
+ }
+ switch (opt) {
+ case TCPOPT_NOP:
+ break;
+ default:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
+ * send out separately.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @q - TX queue.
+ * @tx_chan - channel number.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int
+chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ struct sge_eth_txq *q, uint32_t tx_chan)
+{
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ u32 ctrl, iplen, maclen;
+ struct ipv6hdr *ip6;
+ unsigned int ndesc;
+ struct tcphdr *tcp;
+ int len16, pktlen;
+ struct iphdr *ip;
+ int credits;
+ u8 buf[150];
+ void *pos;
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ /* packet length = eth hdr len + ip hdr len + tcp hdr len
+ * (including options).
+ */
+ pktlen = skb->len - skb->data_len;
+
+ ctrl = sizeof(*cpl) + pktlen;
+ len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(len16, 4);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cpl->len = htons(pktlen);
+ /* checksum offload */
+ cpl->ctrl1 = 0;
+
+ pos = cpl + 1;
+
+ memcpy(buf, skb->data, pktlen);
+ if (tx_info->ip_family == AF_INET) {
+ /* we need to correct ip header len */
+ ip = (struct iphdr *)(buf + maclen);
+ ip->tot_len = htons(pktlen - maclen);
+ } else {
+ ip6 = (struct ipv6hdr *)(buf + maclen);
+ ip6->payload_len = htons(pktlen - maclen - iplen);
+ }
+ /* now take care of the tcp header, if fin is not set then clear push
+ * bit as well, and if fin is set, it will be sent at the last so we
+ * need to update the tcp sequence number as per the last packet.
+ */
+ tcp = (struct tcphdr *)(buf + maclen + iplen);
+
+ if (!tcp->fin)
+ tcp->psh = 0;
+ else
+ tcp->seq = htonl(tx_info->prev_seq);
+
+ chcr_copy_to_txd(buf, &q->q, pos, pktlen);
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
+/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
+ * @tgt- buffer into which tail data gets added
+ * @skb- buffer from which the paged data comes from
+ * @shiftlen- shift up to this many bytes
+ */
+static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
+ int shiftlen)
+{
+ skb_frag_t *fragfrom, *fragto;
+ int from, to, todo;
+
+ WARN_ON(shiftlen > skb->data_len);
+
+ todo = shiftlen;
+ from = 0;
+ to = 0;
+ fragfrom = &skb_shinfo(skb)->frags[from];
+
+ while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
+ fragfrom = &skb_shinfo(skb)->frags[from];
+ fragto = &skb_shinfo(tgt)->frags[to];
+
+ if (todo >= skb_frag_size(fragfrom)) {
+ *fragto = *fragfrom;
+ todo -= skb_frag_size(fragfrom);
+ from++;
+ to++;
+
+ } else {
+ __skb_frag_ref(fragfrom);
+ skb_frag_page_copy(fragto, fragfrom);
+ skb_frag_off_copy(fragto, fragfrom);
+ skb_frag_size_set(fragto, todo);
+
+ skb_frag_off_add(fragfrom, todo);
+ skb_frag_size_sub(fragfrom, todo);
+ todo = 0;
+
+ to++;
+ break;
+ }
+ }
+
+ /* Ready to "commit" this state change to tgt */
+ skb_shinfo(tgt)->nr_frags = to;
+
+ /* Reposition in the original skb */
+ to = 0;
+ while (from < skb_shinfo(skb)->nr_frags)
+ skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
+
+ skb_shinfo(skb)->nr_frags = to;
+
+ WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
+
+ skb->len -= shiftlen;
+ skb->data_len -= shiftlen;
+ skb->truesize -= shiftlen;
+ tgt->len += shiftlen;
+ tgt->data_len += shiftlen;
+ tgt->truesize += shiftlen;
+
+ return shiftlen;
+}
+
+/*
+ * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
+ * received has partial end part of the record, send out the complete record, so
+ * that crypto block will be able to generate TAG/HASH.
+ * @skb - segment which has complete or partial end part.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_push - tcp push bit.
+ * @mss - segment size.
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u32 tcp_seq,
+ bool tcp_push, u32 mss)
+{
+ u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
+ struct adapter *adap = tx_info->adap;
+ int credits, left, last_desc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct cpl_tx_sec_pdu *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ void *pos;
+ u64 *end;
+
+ /* get the number of flits required */
+ flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ /* number of descriptors */
+ ndesc = chcr_flits_to_desc(flits);
+ /* check if enough credits available */
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* Credits are below the threshold vaues, stop the queue after
+ * injecting the Work Request for this packet.
+ */
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = pos;
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ /* idata length will include cpl_tx_sec_pdu + key context size +
+ * cpl_tx_data header.
+ */
+ idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
+ sizeof(*tx_data));
+ /* SEC CPL */
+ cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
+ cpl->op_ivinsrtofst =
+ htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
+ cpl->pldlen = htonl(skb->data_len);
+
+ /* encryption should start after tls header size + iv size */
+ cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
+
+ cpl->aadstart_cipherstop_hi =
+ htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
+ CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
+ CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
+
+ /* authentication will also start after tls header + iv size */
+ cpl->cipherstop_lo_authinsert =
+ htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
+ CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
+ CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
+ cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
+ cpl->scmd1 = cpu_to_be64(tx_info->record_no);
+
+ pos = cpl + 1;
+ /* check if space left to fill the keys */
+ left = (void *)q->q.stat - pos;
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
+ tx_info->key_ctx_len);
+ left = (void *)q->q.stat - pos;
+
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* CPL_TX_DATA */
+ tx_data = (void *)pos;
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+
+ tx_data->rsvd = htonl(tcp_seq);
+
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ /* check left again, it might go beyond queue limit */
+ pos = tx_data + 1;
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ /* send the complete packet except the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ atomic64_inc(&adap->chcr_stats.ktls_tx_send_records);
+
+ return 0;
+}
+
+/*
+ * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
+ * a middle part of a record, fetch the prior data to make it 16 byte aligned
+ * and then only send it out.
+ *
+ * @skb - skb contains partial record..
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_push - tcp push bit.
+ * @mss - segment size.
+ * @tls_rec_offset - offset from start of the tls record.
+ * @perior_data - data before the current segment, required to make this record
+ * 16 byte aligned.
+ * @prior_data_len - prior_data length (less than 16)
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q,
+ u32 tcp_seq, bool tcp_push, u32 mss,
+ u32 tls_rec_offset, u8 *prior_data,
+ u32 prior_data_len)
+{
+ struct adapter *adap = tx_info->adap;
+ u32 len16, wr_mid = 0, cipher_start;
+ unsigned int flits = 0, ndesc;
+ int credits, left, last_desc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct cpl_tx_sec_pdu *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ __be64 iv_record;
+ void *pos;
+ u64 *end;
+
+ /* get the number of flits required, it's a partial record so 2 flits
+ * (AES_BLOCK_SIZE) will be added.
+ */
+ flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ /* get the correct 8 byte IV of this record */
+ iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
+ /* If it's a middle record and not 16 byte aligned to run AES CTR, need
+ * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
+ * data will be added.
+ */
+ if (prior_data_len)
+ flits += 2;
+ /* number of descriptors */
+ ndesc = chcr_flits_to_desc(flits);
+ /* check if enough credits available */
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = pos;
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ /* idata length will include cpl_tx_sec_pdu + key context size +
+ * cpl_tx_data header.
+ */
+ idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
+ sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
+ /* SEC CPL */
+ cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
+ /* cipher start will have tls header + iv size extra if its a header
+ * part of tls record. else only 16 byte IV will be added.
+ */
+ cipher_start =
+ AES_BLOCK_LEN + 1 +
+ (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
+
+ cpl->op_ivinsrtofst =
+ htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
+ cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->aadstart_cipherstop_hi =
+ htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
+ cpl->cipherstop_lo_authinsert = 0;
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
+ cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
+ cpl->scmd1 = 0;
+
+ pos = cpl + 1;
+ /* check if space left to fill the keys */
+ left = (void *)q->q.stat - pos;
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
+ tx_info->key_ctx_len);
+ left = (void *)q->q.stat - pos;
+
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* CPL_TX_DATA */
+ tx_data = (void *)pos;
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) |
+ TX_LENGTH_V(skb->data_len + prior_data_len));
+ tx_data->rsvd = htonl(tcp_seq);
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ /* check left again, it might go beyond queue limit */
+ pos = tx_data + 1;
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
+ * bytes of actual IV and 4 bytes of 16 byte-sequence.
+ */
+ memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
+ memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
+ *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
+ htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
+ (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
+
+ pos += 16;
+ /* Prior_data_len will always be less than 16 bytes, fill the
+ * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
+ * to 0.
+ */
+ if (prior_data_len)
+ pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
+ /* send the complete packet except the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+
+ return 0;
+}
+
+/*
+ * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
+ * only plain text (only tls header and iv)
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @tcp_seq
+ * @mss - segment size.
+ * @tcp_push - tcp push bit.
+ * @q - TX queue.
+ * @port_id : port number
+ * @perior_data - data before the current segment, required to make this record
+ * 16 byte aligned.
+ * @prior_data_len - prior_data length (less than 16)
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb, u32 tcp_seq, u32 mss,
+ bool tcp_push, struct sge_eth_txq *q,
+ u32 port_id, u8 *prior_data,
+ u32 prior_data_len)
+{
+ int credits, left, len16, last_desc;
+ unsigned int flits = 0, ndesc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ u32 wr_mid = 0;
+ void *pos;
+ u64 *end;
+
+ flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ if (prior_data_len)
+ flits += 2;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = (struct ulp_txpkt *)(wr + 1);
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_DATAMODIFY_V(0) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_DEST_V(0) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ idata->len = htonl(sizeof(*tx_data) + prior_data_len);
+ /* CPL_TX_DATA */
+ tx_data = (struct cpl_tx_data *)(idata + 1);
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) |
+ TX_LENGTH_V(skb->data_len + prior_data_len));
+ /* set tcp seq number */
+ tx_data->rsvd = htonl(tcp_seq);
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ pos = tx_data + 1;
+ /* apart from prior_data_len, we should set remaining part of 16 bytes
+ * to be zero.
+ */
+ if (prior_data_len)
+ pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
+
+ /* check left again, it might go beyond queue limit */
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* send the complete packet including the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
+/*
+ * chcr_ktls_copy_record_in_skb
+ * @nskb - new skb where the frags to be added.
+ * @record - specific record which has complete 16k record in frags.
+ */
+static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct tls_record_info *record)
+{
+ int i = 0;
+
+ for (i = 0; i < record->num_frags; i++) {
+ skb_shinfo(nskb)->frags[i] = record->frags[i];
+ /* increase the frag ref count */
+ __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
+ }
+
+ skb_shinfo(nskb)->nr_frags = record->num_frags;
+ nskb->data_len = record->len;
+ nskb->len += record->len;
+ nskb->truesize += record->len;
+}
+
+/*
+ * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
+ * sending the same segment again. It will discard the segment which is before
+ * the current tx max.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * return: NET_TX_OK/NET_XMIT_DROP.
+ */
+static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q)
+{
+ struct fw_ulptx_wr *wr;
+ unsigned int ndesc;
+ int credits;
+ void *pos;
+ u32 len;
+
+ len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ ndesc = DIV_ROUND_UP(len, 64);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+
+ wr = pos;
+ /* ULPTX wr */
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->cookie = 0;
+ /* fill len in wr field */
+ wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+
+ pos += sizeof(*wr);
+
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_SND_UNA_RAW_W,
+ TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+ TCB_SND_UNA_RAW_V(0), 0);
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+
+ return 0;
+}
+
+/*
+ * chcr_end_part_handler: This handler will handle the record which
+ * is complete or if record's end part is received. T6 adapter has a issue that
+ * it can't send out TAG with partial record so if its an end part then we have
+ * to send TAG as well and for which we need to fetch the complete record and
+ * send it to crypto module.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record.
+ * @record - complete record of 16K size.
+ * @tcp_seq
+ * @mss - segment size in which TP needs to chop a packet.
+ * @tcp_push_no_fin - tcp push if fin is not set.
+ * @q - TX queue.
+ * @tls_end_offset - offset from end of the record.
+ * @last wr : check if this is the last part of the skb going out.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct tls_record_info *record,
+ u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ struct sge_eth_txq *q,
+ u32 tls_end_offset, bool last_wr)
+{
+ struct sk_buff *nskb = NULL;
+ /* check if it is a complete record */
+ if (tls_end_offset == record->len) {
+ nskb = skb;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_complete_pkts);
+ } else {
+ dev_kfree_skb_any(skb);
+
+ nskb = alloc_skb(0, GFP_KERNEL);
+ if (!nskb)
+ return NETDEV_TX_BUSY;
+ /* copy complete record in skb */
+ chcr_ktls_copy_record_in_skb(nskb, record);
+ /* packet is being sent from the beginning, update the tcp_seq
+ * accordingly.
+ */
+ tcp_seq = tls_record_start_seq(record);
+ /* reset snd una, so the middle record won't send the already
+ * sent part.
+ */
+ if (chcr_ktls_update_snd_una(tx_info, q))
+ goto out;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_end_pkts);
+ }
+
+ if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ (last_wr && tcp_push_no_fin),
+ mss)) {
+ goto out;
+ }
+ return 0;
+out:
+ dev_kfree_skb_any(nskb);
+ return NETDEV_TX_BUSY;
+}
+
+/*
+ * chcr_short_record_handler: This handler will take care of the records which
+ * doesn't have end part (1st part or the middle part(/s) of a record). In such
+ * cases, AES CTR will be used in place of AES GCM to send out partial packet.
+ * This partial record might be the first part of the record, or the middle
+ * part. In case of middle record we should fetch the prior data to make it 16
+ * byte aligned. If it has a partial tls header or iv then get to the start of
+ * tls header. And if it has partial TAG, then remove the complete TAG and send
+ * only the payload.
+ * There is one more possibility that it gets a partial header, send that
+ * portion as a plaintext.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @record - complete record of 16K size.
+ * @tcp_seq
+ * @mss - segment size in which TP needs to chop a packet.
+ * @tcp_push_no_fin - tcp push if fin is not set.
+ * @q - TX queue.
+ * @tls_end_offset - offset from end of the record.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct tls_record_info *record,
+ u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ struct sge_eth_txq *q, u32 tls_end_offset)
+{
+ u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
+ u8 prior_data[16] = {0};
+ u32 prior_data_len = 0;
+ u32 data_len;
+
+ /* check if the skb is ending in middle of tag/HASH, its a big
+ * trouble, send the packet before the HASH.
+ */
+ int remaining_record = tls_end_offset - skb->data_len;
+
+ if (remaining_record > 0 &&
+ remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
+ int trimmed_len = skb->data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
+ struct sk_buff *tmp_skb = NULL;
+ /* don't process the pkt if it is only a partial tag */
+ if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ goto out;
+
+ WARN_ON(trimmed_len > skb->data_len);
+
+ /* shift to those many bytes */
+ tmp_skb = alloc_skb(0, GFP_KERNEL);
+ if (unlikely(!tmp_skb))
+ goto out;
+
+ chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
+ /* free the last trimmed portion */
+ dev_kfree_skb_any(skb);
+ skb = tmp_skb;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_trimmed_pkts);
+ }
+ data_len = skb->data_len;
+ /* check if the middle record's start point is 16 byte aligned. CTR
+ * needs 16 byte aligned start point to start encryption.
+ */
+ if (tls_rec_offset) {
+ /* there is an offset from start, means its a middle record */
+ int remaining = 0;
+
+ if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ prior_data_len = tls_rec_offset;
+ tls_rec_offset = 0;
+ remaining = 0;
+ } else {
+ prior_data_len =
+ (tls_rec_offset -
+ (TLS_HEADER_SIZE + tx_info->iv_size))
+ % AES_BLOCK_LEN;
+ remaining = tls_rec_offset - prior_data_len;
+ }
+
+ /* if prior_data_len is not zero, means we need to fetch prior
+ * data to make this record 16 byte aligned, or we need to reach
+ * to start offset.
+ */
+ if (prior_data_len) {
+ int i = 0;
+ u8 *data = NULL;
+ skb_frag_t *f;
+ u8 *vaddr;
+ int frag_size = 0, frag_delta = 0;
+
+ while (remaining > 0) {
+ frag_size = skb_frag_size(&record->frags[i]);
+ if (remaining < frag_size)
+ break;
+
+ remaining -= frag_size;
+ i++;
+ }
+ f = &record->frags[i];
+ vaddr = kmap_atomic(skb_frag_page(f));
+
+ data = vaddr + skb_frag_off(f) + remaining;
+ frag_delta = skb_frag_size(f) - remaining;
+
+ if (frag_delta >= prior_data_len) {
+ memcpy(prior_data, data, prior_data_len);
+ kunmap_atomic(vaddr);
+ } else {
+ memcpy(prior_data, data, frag_delta);
+ kunmap_atomic(vaddr);
+ /* get the next page */
+ f = &record->frags[i + 1];
+ vaddr = kmap_atomic(skb_frag_page(f));
+ data = vaddr + skb_frag_off(f);
+ memcpy(prior_data + frag_delta,
+ data, (prior_data_len - frag_delta));
+ kunmap_atomic(vaddr);
+ }
+ /* reset tcp_seq as per the prior_data_required len */
+ tcp_seq -= prior_data_len;
+ /* include prio_data_len for further calculation.
+ */
+ data_len += prior_data_len;
+ }
+ /* reset snd una, so the middle record won't send the already
+ * sent part.
+ */
+ if (chcr_ktls_update_snd_una(tx_info, q))
+ goto out;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_middle_pkts);
+ } else {
+ /* Else means, its a partial first part of the record. Check if
+ * its only the header, don't need to send for encryption then.
+ */
+ if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id,
+ prior_data,
+ prior_data_len)) {
+ goto out;
+ }
+ return 0;
+ }
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_start_pkts);
+ }
+
+ if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
+ mss, tls_rec_offset, prior_data,
+ prior_data_len)) {
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_BUSY;
+}
+
+/* nic tls TX handler */
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tcphdr *th = tcp_hdr(skb);
+ int data_len, qidx, ret = 0, mss;
+ struct tls_record_info *record;
+ struct chcr_stats_debug *stats;
+ struct chcr_ktls_info *tx_info;
+ u32 tls_end_offset, tcp_seq;
+ struct tls_context *tls_ctx;
+ struct sk_buff *local_skb;
+ int new_connection_state;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ unsigned long flags;
+
+ tcp_seq = ntohl(th->seq);
+
+ mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
+
+ /* check if we haven't set it for ktls offload */
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ goto out;
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (unlikely(tls_ctx->netdev != dev))
+ goto out;
+
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ tx_info = tx_ctx->chcr_info;
+
+ if (unlikely(!tx_info))
+ goto out;
+
+ /* check the connection state, we don't need to pass new connection
+ * state, state machine will check and update the new state if it is
+ * stuck due to responses not received from HW.
+ * Start the tx handling only if state is KTLS_CONN_TX_READY.
+ */
+ new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
+ if (new_connection_state != KTLS_CONN_TX_READY)
+ goto out;
+
+ /* don't touch the original skb, make a new skb to extract each records
+ * and send them separately.
+ */
+ local_skb = alloc_skb(0, GFP_KERNEL);
+
+ if (unlikely(!local_skb))
+ return NETDEV_TX_BUSY;
+
+ adap = tx_info->adap;
+ stats = &adap->chcr_stats;
+
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ /* if tcp options are set but finish is not send the options first */
+ if (!th->fin && chcr_ktls_check_tcp_options(th)) {
+ ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
+ tx_info->tx_chan);
+ if (ret)
+ return NETDEV_TX_BUSY;
+ }
+ /* update tcb */
+ ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
+ ntohl(th->ack_seq),
+ ntohs(th->window));
+ if (ret) {
+ dev_kfree_skb_any(local_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* copy skb contents into local skb */
+ chcr_ktls_skb_copy(skb, local_skb);
+
+ /* go through the skb and send only one record at a time. */
+ data_len = skb->data_len;
+ /* TCP segments can be in received either complete or partial.
+ * chcr_end_part_handler will handle cases if complete record or end
+ * part of the record is received. Incase of partial end part of record,
+ * we will send the complete record again.
+ */
+
+ do {
+ int i;
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ /* lock taken */
+ spin_lock_irqsave(&tx_ctx->base.lock, flags);
+ /* fetch the tls record */
+ record = tls_get_record(&tx_ctx->base, tcp_seq,
+ &tx_info->record_no);
+ /* By the time packet reached to us, ACK is received, and record
+ * won't be found in that case, handle it gracefully.
+ */
+ if (unlikely(!record)) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
+ goto out;
+ }
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
+ goto out;
+ }
+
+ /* increase page reference count of the record, so that there
+ * won't be any chance of page free in middle if in case stack
+ * receives ACK and try to delete the record.
+ */
+ for (i = 0; i < record->num_frags; i++)
+ __skb_frag_ref(&record->frags[i]);
+ /* lock cleared */
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+
+ tls_end_offset = record->end_seq - tcp_seq;
+
+ pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
+ tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
+ /* if a tls record is finishing in this SKB */
+ if (tls_end_offset <= data_len) {
+ struct sk_buff *nskb = NULL;
+
+ if (tls_end_offset < data_len) {
+ nskb = alloc_skb(0, GFP_KERNEL);
+ if (unlikely(!nskb)) {
+ ret = -ENOMEM;
+ goto clear_ref;
+ }
+
+ chcr_ktls_skb_shift(nskb, local_skb,
+ tls_end_offset);
+ } else {
+ /* its the only record in this skb, directly
+ * point it.
+ */
+ nskb = local_skb;
+ }
+ ret = chcr_end_part_handler(tx_info, nskb, record,
+ tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tls_end_offset,
+ (nskb == local_skb));
+
+ if (ret && nskb != local_skb)
+ dev_kfree_skb_any(local_skb);
+
+ data_len -= tls_end_offset;
+ /* tcp_seq increment is required to handle next record.
+ */
+ tcp_seq += tls_end_offset;
+ } else {
+ ret = chcr_short_record_handler(tx_info, local_skb,
+ record, tcp_seq, mss,
+ (!th->fin && th->psh),
+ q, tls_end_offset);
+ data_len = 0;
+ }
+clear_ref:
+ /* clear the frag ref count which increased locally before */
+ for (i = 0; i < record->num_frags; i++) {
+ /* clear the frag ref count */
+ __skb_frag_unref(&record->frags[i]);
+ }
+ /* if any failure, come out from the loop. */
+ if (ret)
+ goto out;
+ /* length should never be less than 0 */
+ WARN_ON(data_len < 0);
+
+ } while (data_len > 0);
+
+ tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
+
+ atomic64_inc(&stats->ktls_tx_encrypted_packets);
+ atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
+
+ /* tcp finish is set, send a separate tcp msg including all the options
+ * as well.
+ */
+ if (th->fin)
+ chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
+
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/crypto/chelsio/chcr_ktls.h
new file mode 100644
index 000000000000..5a7ae2ca446e
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ktls.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifndef __CHCR_KTLS_H__
+#define __CHCR_KTLS_H__
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#include <net/tls.h>
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "t4_tcb.h"
+#include "l2t.h"
+#include "chcr_common.h"
+#include "cxgb4_uld.h"
+
+#define CHCR_TCB_STATE_CLOSED 0
+#define CHCR_KTLS_KEY_CTX_LEN 16
+#define CHCR_SET_TCB_FIELD_LEN sizeof(struct cpl_set_tcb_field)
+#define CHCR_PLAIN_TX_DATA_LEN (sizeof(struct fw_ulptx_wr) +\
+ sizeof(struct ulp_txpkt) +\
+ sizeof(struct ulptx_idata) +\
+ sizeof(struct cpl_tx_data))
+
+#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
+ sizeof(struct cpl_tx_sec_pdu))
+
+enum chcr_ktls_conn_state {
+ KTLS_CONN_CLOSED,
+ KTLS_CONN_ACT_OPEN_REQ,
+ KTLS_CONN_ACT_OPEN_RPL,
+ KTLS_CONN_SET_TCB_REQ,
+ KTLS_CONN_SET_TCB_RPL,
+ KTLS_CONN_TX_READY,
+};
+
+struct chcr_ktls_info {
+ struct sock *sk;
+ spinlock_t lock; /* state machine lock */
+ struct ktls_key_ctx key_ctx;
+ struct adapter *adap;
+ struct l2t_entry *l2te;
+ struct net_device *netdev;
+ u64 iv;
+ u64 record_no;
+ int tid;
+ int atid;
+ int rx_qid;
+ u32 iv_size;
+ u32 prev_seq;
+ u32 prev_ack;
+ u32 salt_size;
+ u32 key_ctx_len;
+ u32 scmd0_seqno_numivs;
+ u32 scmd0_ivgen_hdrlen;
+ u32 tcp_start_seq_number;
+ u32 scmd0_short_seqno_numivs;
+ u32 scmd0_short_ivgen_hdrlen;
+ enum chcr_ktls_conn_state connection_state;
+ u16 prev_win;
+ u8 tx_chan;
+ u8 smt_idx;
+ u8 port_id;
+ u8 ip_family;
+ u8 first_qset;
+};
+
+struct chcr_ktls_ofld_ctx_tx {
+ struct tls_offload_context_tx base;
+ struct chcr_ktls_info *chcr_info;
+};
+
+static inline struct chcr_ktls_ofld_ctx_tx *
+chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ return container_of(tls_offload_ctx_tx(tls_ctx),
+ struct chcr_ktls_ofld_ctx_tx,
+ base);
+}
+
+static inline int chcr_get_first_rx_qid(struct adapter *adap)
+{
+ /* u_ctx is saved in adap, fetch it */
+ struct uld_ctx *u_ctx = adap->uld[CXGB4_ULD_CRYPTO].handle;
+
+ if (!u_ctx)
+ return -1;
+ return u_ctx->lldi.rxq_ids[0];
+}
+
+void chcr_enable_ktls(struct adapter *adap);
+void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 9b2745ad9e38..d5720a859443 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -445,6 +445,7 @@ void chtls_destroy_sock(struct sock *sk)
chtls_purge_write_queue(sk);
free_tls_keyid(sk);
kref_put(&csk->kref, chtls_sock_release);
+ csk->cdev = NULL;
sk->sk_prot = &tcp_prot;
sk->sk_prot->destroy(sk);
}
@@ -759,8 +760,10 @@ static void chtls_release_resources(struct sock *sk)
csk->l2t_entry = NULL;
}
- cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
- sock_put(sk);
+ if (sk->sk_state != TCP_SYN_SENT) {
+ cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
+ sock_put(sk);
+ }
}
static void chtls_conn_done(struct sock *sk)
@@ -1716,6 +1719,9 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ goto out;
+
sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(sk, SOCK_DONE);
@@ -1748,6 +1754,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+out:
kfree_skb(skb);
}
@@ -1758,6 +1765,10 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp;
csk = rcu_dereference_sk_user_data(sk);
+
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ goto out;
+
tp = tcp_sk(sk);
tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
@@ -1787,6 +1798,7 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
default:
pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
}
+out:
kfree_skb(skb);
}
@@ -1896,6 +1908,7 @@ static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
}
set_abort_rpl_wr(reply_skb, tid, status);
+ kfree_skb(skb);
set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
if (csk_conn_inline(csk)) {
struct l2t_entry *e = csk->l2t_entry;
@@ -1906,7 +1919,6 @@ static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
}
}
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
- kfree_skb(skb);
}
/*
@@ -2008,7 +2020,8 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
chtls_conn_done(sk);
}
- chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
+ chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
+ rst_status, queue);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2042,6 +2055,7 @@ static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
void (*fn)(struct sock *sk, struct sk_buff *skb);
unsigned int hwtid = GET_TID(req);
+ struct chtls_sock *csk;
struct sock *sk;
u8 opcode;
@@ -2051,6 +2065,8 @@ static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (!sk)
goto rel_skb;
+ csk = sk->sk_user_data;
+
switch (opcode) {
case CPL_PEER_CLOSE:
fn = chtls_peer_close;
@@ -2059,6 +2075,11 @@ static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
fn = chtls_close_con_rpl;
break;
case CPL_ABORT_REQ_RSS:
+ /*
+ * Save the offload device in the skb, we may process this
+ * message after the socket has closed.
+ */
+ BLOG_SKB_CB(skb)->cdev = csk->cdev;
fn = chtls_abort_req_rss;
break;
case CPL_ABORT_RPL_RSS:
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 5cf9b021220b..e1401d9cc756 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
make_tx_data_wr(sk, skb, immdlen, len,
credits_needed, completion);
tp->snd_nxt += len;
- tp->lsndtime = tcp_time_stamp(tp);
+ tp->lsndtime = tcp_jiffies32;
if (completion)
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
} else {
@@ -902,14 +902,6 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
return 0;
}
-/* Read TLS header to find content type and data length */
-static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
-{
- if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
- return -EFAULT;
- return (__force int)cpu_to_be16(thdr->length);
-}
-
static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
{
return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
@@ -981,6 +973,37 @@ do_interrupted:
goto do_rm_wq;
}
+static int chtls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type)
+{
+ struct cmsghdr *cmsg;
+ int rc = -EINVAL;
+
+ for_each_cmsghdr(cmsg, msg) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+ if (cmsg->cmsg_level != SOL_TLS)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case TLS_SET_RECORD_TYPE:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
+ return -EINVAL;
+
+ if (msg->msg_flags & MSG_MORE)
+ return -EINVAL;
+
+ *record_type = *(unsigned char *)CMSG_DATA(cmsg);
+ rc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -1022,15 +1045,21 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto wait_for_sndbuf;
if (is_tls_tx(csk) && !csk->tlshws.txleft) {
- struct tls_hdr hdr;
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
- recordsz = tls_header_read(&hdr, &msg->msg_iter);
- size -= TLS_HEADER_LENGTH;
- copied += TLS_HEADER_LENGTH;
+ if (unlikely(msg->msg_controllen)) {
+ err = chtls_proccess_cmsg(sk, msg,
+ &record_type);
+ if (err)
+ goto out_err;
+ }
+
+ recordsz = size;
csk->tlshws.txleft = recordsz;
- csk->tlshws.type = hdr.type;
+ csk->tlshws.type = record_type;
+
if (skb)
- ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
+ ULP_SKB_CB(skb)->ulp.tls.type = record_type;
}
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
@@ -1081,10 +1110,10 @@ new_buf:
pg_size = page_size(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
- merge = 1;
+ merge = true;
goto copy;
}
- merge = 0;
+ merge = false;
if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
MAX_SKB_FRAGS))
goto new_buf;
@@ -1399,6 +1428,8 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
+ struct net_device *dev = csk->egress_dev;
+ struct adapter *adap = netdev2adap(dev);
struct tcp_sock *tp = tcp_sk(sk);
unsigned long avail;
int buffers_freed;
@@ -1521,6 +1552,22 @@ found_ok_skb:
}
}
}
+ /* Set record type if not already done. For a non-data record,
+ * do not proceed if record type could not be copied.
+ */
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+ struct tls_hdr *thdr = (struct tls_hdr *)skb->data;
+ int cerr = 0;
+
+ cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+ sizeof(thdr->type), &thdr->type);
+
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
+ return -EIO;
+ /* don't send tls header, skip copy */
+ goto skip_copy;
+ }
+
if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
if (!copied) {
copied = -EFAULT;
@@ -1540,6 +1587,7 @@ skip_copy:
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
} else {
+ atomic_inc(&adap->chcr_stats.tls_pdu_rx);
tp->copied_seq += hws->rcvpld;
}
chtls_free_skb(sk, skb);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index a038de90b2ea..2110d0893bc7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -174,9 +174,16 @@ static inline void chtls_dev_release(struct kref *kref)
{
struct tls_toe_device *dev;
struct chtls_dev *cdev;
+ struct adapter *adap;
dev = container_of(kref, struct tls_toe_device, kref);
cdev = to_chtls_dev(dev);
+
+ /* Reset tls rx/tx stats */
+ adap = pci_get_drvdata(cdev->pdev);
+ atomic_set(&adap->chcr_stats.tls_pdu_tx, 0);
+ atomic_set(&adap->chcr_stats.tls_pdu_rx, 0);
+
chtls_free_uld(cdev);
}
@@ -229,8 +236,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info)
struct chtls_dev *cdev;
int i, j;
- cdev = kzalloc(sizeof(*cdev) + info->nports *
- (sizeof(struct net_device *)), GFP_KERNEL);
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
goto out;
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 8851161f722f..f09c6cf7823e 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -27,6 +27,7 @@ config CRYPTO_DEV_HISI_SEC2
select CRYPTO_SHA256
select CRYPTO_SHA512
depends on PCI && PCI_MSI
+ depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
@@ -40,6 +41,7 @@ config CRYPTO_DEV_HISI_QM
tristate
depends on ARM64 || COMPILE_TEST
depends on PCI && PCI_MSI
+ depends on UACCE || UACCE=n
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
@@ -49,6 +51,7 @@ config CRYPTO_DEV_HISI_ZIP
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
+ depends on UACCE || UACCE=n
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon ZIP Driver
@@ -56,6 +59,7 @@ config CRYPTO_DEV_HISI_ZIP
config CRYPTO_DEV_HISI_HPRE
tristate "Support for HISI HPRE accelerator"
depends on PCI && PCI_MSI
+ depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index ddf13ea9862a..03d512ec6336 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -46,7 +46,6 @@ struct hpre_debug {
struct hpre {
struct hisi_qm qm;
- struct list_head list;
struct hpre_debug debug;
u32 num_vfs;
unsigned long status;
@@ -76,7 +75,7 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hpre *hpre_find_device(int node);
+struct hisi_qp *hpre_create_qp(void);
int hpre_algs_register(void);
void hpre_algs_unregister(void);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 5d400d69e8e4..65425250b2e9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -147,26 +147,18 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
static struct hisi_qp *hpre_get_qp_and_start(void)
{
struct hisi_qp *qp;
- struct hpre *hpre;
int ret;
- /* find the proper hpre device, which is near the current CPU core */
- hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
- if (!hpre) {
- pr_err("Can not find proper hpre device!\n");
- return ERR_PTR(-ENODEV);
- }
-
- qp = hisi_qm_create_qp(&hpre->qm, 0);
- if (IS_ERR(qp)) {
- pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ qp = hpre_create_qp();
+ if (!qp) {
+ pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
}
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
- hisi_qm_release_qp(qp);
- pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ hisi_qm_free_qps(&qp, 1);
+ pci_err(qp->qm->pdev, "Can not start qp!\n");
return ERR_PTR(-EINVAL);
}
@@ -338,7 +330,7 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
if (is_clear_all) {
idr_destroy(&ctx->req_idr);
kfree(ctx->req_list);
- hisi_qm_release_qp(ctx->qp);
+ hisi_qm_free_qps(&ctx->qp, 1);
}
ctx->crt_g2_mode = false;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 401747de67a8..88be53bf4a38 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -82,8 +82,7 @@
#define HPRE_VIA_MSI_DSM 1
-static LIST_HEAD(hpre_list);
-static DEFINE_MUTEX(hpre_list_lock);
+static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -196,43 +195,17 @@ static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
-static inline void hpre_add_to_list(struct hpre *hpre)
+struct hisi_qp *hpre_create_qp(void)
{
- mutex_lock(&hpre_list_lock);
- list_add_tail(&hpre->list, &hpre_list);
- mutex_unlock(&hpre_list_lock);
-}
-
-static inline void hpre_remove_from_list(struct hpre *hpre)
-{
- mutex_lock(&hpre_list_lock);
- list_del(&hpre->list);
- mutex_unlock(&hpre_list_lock);
-}
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp = NULL;
+ int ret;
-struct hpre *hpre_find_device(int node)
-{
- struct hpre *hpre, *ret = NULL;
- int min_distance = INT_MAX;
- struct device *dev;
- int dev_node = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = &hpre->qm.pdev->dev;
-#ifdef CONFIG_NUMA
- dev_node = dev->numa_node;
- if (dev_node < 0)
- dev_node = 0;
-#endif
- if (node_distance(dev_node, node) < min_distance) {
- ret = hpre;
- min_distance = node_distance(dev_node, node);
- }
- }
- mutex_unlock(&hpre_list_lock);
+ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
+ if (!ret)
+ return qp;
- return ret;
+ return NULL;
}
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
@@ -349,18 +322,14 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hpre_hw_error_disable(struct hpre *hpre)
+static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
-
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
}
-static void hpre_hw_error_enable(struct hpre *hpre)
+static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
-
/* enable hpre hw error interrupts */
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
@@ -713,13 +682,39 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
return 0;
}
-static void hpre_hw_err_init(struct hpre *hpre)
+static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &qm->pdev->dev;
+
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
+
+ writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+}
+
+static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HPRE_HAC_INT_STATUS);
}
+static const struct hisi_qm_err_ini hpre_err_ini = {
+ .hw_err_enable = hpre_hw_error_enable,
+ .hw_err_disable = hpre_hw_error_disable,
+ .get_dev_hw_err_status = hpre_get_hw_err_status,
+ .log_dev_hw_err = hpre_log_hw_error,
+ .err_info = {
+ .ce = QM_BASE_CE,
+ .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
+ .fe = 0,
+ .msi = QM_DB_RANDOM_INVALID,
+ }
+};
+
static int hpre_pf_probe_init(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
@@ -731,7 +726,8 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
- hpre_hw_err_init(hpre);
+ qm->err_ini = &hpre_err_ini;
+ hisi_qm_dev_err_init(qm);
return 0;
}
@@ -776,22 +772,21 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
dev_warn(&pdev->dev, "init debugfs fail!\n");
- hpre_add_to_list(hpre);
+ hisi_qm_add_to_list(qm, &hpre_devices);
ret = hpre_algs_register();
if (ret < 0) {
- hpre_remove_from_list(hpre);
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
return 0;
err_with_qm_start:
+ hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm);
err_with_err_init:
- if (pdev->is_physfn)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@@ -907,7 +902,7 @@ static void hpre_remove(struct pci_dev *pdev)
int ret;
hpre_algs_unregister();
- hpre_remove_from_list(hpre);
+ hisi_qm_del_from_list(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
ret = hpre_sriov_disable(pdev);
if (ret) {
@@ -922,69 +917,13 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_debugfs_exit(hpre);
hisi_qm_stop(qm);
- if (qm->fun_type == QM_HW_PF)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
-static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts)
-{
- const struct hpre_hw_error *err = hpre_hw_errors;
- struct device *dev = &hpre->qm.pdev->dev;
-
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
-}
-
-static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre)
-{
- u32 err_sts;
-
- /* read err sts */
- err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS);
- if (err_sts) {
- hpre_log_hw_error(hpre, err_sts);
-
- /* clear error interrupts */
- writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT);
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, hpre_ret;
-
- /* log qm error */
- qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
-
- /* log hpre error */
- hpre_ret = hpre_hw_error_handle(hpre);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- hpre_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hpre_process_hw_error(pdev);
-}
static const struct pci_error_handlers hpre_err_handler = {
- .error_detected = hpre_error_detected,
+ .error_detected = hisi_qm_dev_err_detected,
};
static struct pci_driver hpre_pci_driver = {
@@ -1013,6 +952,7 @@ static int __init hpre_init(void)
{
int ret;
+ hisi_qm_init_list(&hpre_devices);
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index b57da5ef8b5b..f795fb557630 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -9,6 +9,9 @@
#include <linux/log2.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/uacce.h>
+#include <linux/uaccess.h>
+#include <uapi/misc/uacce/hisi_qm.h>
#include "qm.h"
/* eq/aeq irq enable */
@@ -269,6 +272,12 @@ struct qm_doorbell {
__le16 priority;
};
+struct hisi_qm_resource {
+ struct hisi_qm *qm;
+ int distance;
+ struct list_head list;
+};
+
struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
@@ -277,6 +286,7 @@ struct hisi_qm_hw_ops {
int (*debug_init)(struct hisi_qm *qm);
void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi);
+ void (*hw_error_uninit)(struct hisi_qm *qm);
pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm);
};
@@ -465,9 +475,14 @@ static void qm_cq_head_update(struct hisi_qp *qp)
static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
{
- struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
+ if (qp->event_cb) {
+ qp->event_cb(qp);
+ return;
+ }
if (qp->req_cb) {
+ struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
+
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
dma_rmb();
qp->req_cb(qp, qp->sqe + qm->sqe_size *
@@ -485,17 +500,9 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
}
}
-static void qm_qp_work_func(struct work_struct *work)
+static void qm_work_process(struct work_struct *work)
{
- struct hisi_qp *qp;
-
- qp = container_of(work, struct hisi_qp, work);
- qm_poll_qp(qp, qp->qm);
-}
-
-static irqreturn_t qm_irq_handler(int irq, void *data)
-{
- struct hisi_qm *qm = data;
+ struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qp *qp;
int eqe_num = 0;
@@ -504,7 +511,7 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
eqe_num++;
qp = qm_to_hisi_qp(qm, eqe);
if (qp)
- queue_work(qp->wq, &qp->work);
+ qm_poll_qp(qp, qm);
if (qm->status.eq_head == QM_Q_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
@@ -522,6 +529,17 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
}
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+}
+
+static irqreturn_t do_qm_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = (struct hisi_qm *)data;
+
+ /* the workqueue created by device driver of QM */
+ if (qm->wq)
+ queue_work(qm->wq, &qm->work);
+ else
+ schedule_work(&qm->work);
return IRQ_HANDLED;
}
@@ -531,7 +549,7 @@ static irqreturn_t qm_irq(int irq, void *data)
struct hisi_qm *qm = data;
if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
- return qm_irq_handler(irq, data);
+ return do_qm_irq(irq, data);
dev_err(&qm->pdev->dev, "invalid int source\n");
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
@@ -1011,43 +1029,45 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
+static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
+{
+ writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+}
+
static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
{
- const struct hisi_qm_hw_error *err = qm_hw_error;
+ const struct hisi_qm_hw_error *err;
struct device *dev = &qm->pdev->dev;
u32 reg_val, type, vf_num;
+ int i;
- while (err->msg) {
- if (err->int_msk & error_status) {
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
-
- if (error_status & QM_DB_TIMEOUT) {
- reg_val = readl(qm->io_base +
- QM_ABNORMAL_INF01);
- type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
- QM_DB_TIMEOUT_TYPE_SHIFT;
- vf_num = reg_val & QM_DB_TIMEOUT_VF;
- dev_err(dev, "qm %s doorbell timeout in function %u\n",
- qm_db_timeout[type], vf_num);
- }
-
- if (error_status & QM_OF_FIFO_OF) {
- reg_val = readl(qm->io_base +
- QM_ABNORMAL_INF00);
- type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
- QM_FIFO_OVERFLOW_TYPE_SHIFT;
- vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
-
- if (type < ARRAY_SIZE(qm_fifo_overflow))
- dev_err(dev, "qm %s fifo overflow in function %u\n",
- qm_fifo_overflow[type],
- vf_num);
- else
- dev_err(dev, "unknown error type\n");
- }
+ for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
+ err = &qm_hw_error[i];
+ if (!(err->int_msk & error_status))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & QM_DB_TIMEOUT) {
+ reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
+ type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
+ QM_DB_TIMEOUT_TYPE_SHIFT;
+ vf_num = reg_val & QM_DB_TIMEOUT_VF;
+ dev_err(dev, "qm %s doorbell timeout in function %u\n",
+ qm_db_timeout[type], vf_num);
+ } else if (err->int_msk & QM_OF_FIFO_OF) {
+ reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
+ type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
+ QM_FIFO_OVERFLOW_TYPE_SHIFT;
+ vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
+
+ if (type < ARRAY_SIZE(qm_fifo_overflow))
+ dev_err(dev, "qm %s fifo overflow in function %u\n",
+ qm_fifo_overflow[type], vf_num);
+ else
+ dev_err(dev, "unknown error type\n");
}
- err++;
}
}
@@ -1082,6 +1102,7 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.qm_db = qm_db_v2,
.get_irq_num = qm_get_irq_num_v2,
.hw_error_init = qm_hw_error_init_v2,
+ .hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
};
@@ -1147,20 +1168,9 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
qp->qp_id = qp_id;
qp->alg_type = alg_type;
- INIT_WORK(&qp->work, qm_qp_work_func);
- qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI |
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0);
- if (!qp->wq) {
- ret = -EFAULT;
- goto err_free_qp_mem;
- }
return qp;
-err_free_qp_mem:
- if (qm->use_dma_api)
- dma_free_coherent(dev, qp->qdma.size, qp->qdma.va,
- qp->qdma.dma);
err_clear_bit:
write_lock(&qm->qps_lock);
qm->qp_array[qp_id] = NULL;
@@ -1269,7 +1279,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
* @qp: The qp we want to start to run.
* @arg: Accelerator specific argument.
*
- * After this function, qp can receive request from user. Return qp_id if
+ * After this function, qp can receive request from user. Return 0 if
* successful, Return -EBUSY if failed.
*/
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
@@ -1314,7 +1324,7 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
dev_dbg(dev, "queue %d started\n", qp_id);
- return qp_id;
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
@@ -1395,6 +1405,214 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
}
}
+static void qm_qp_event_notifier(struct hisi_qp *qp)
+{
+ wake_up_interruptible(&qp->uacce_q->wait);
+}
+
+static int hisi_qm_get_available_instances(struct uacce_device *uacce)
+{
+ int i, ret;
+ struct hisi_qm *qm = uacce->priv;
+
+ read_lock(&qm->qps_lock);
+ for (i = 0, ret = 0; i < qm->qp_num; i++)
+ if (!qm->qp_array[i])
+ ret++;
+ read_unlock(&qm->qps_lock);
+
+ return ret;
+}
+
+static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
+ unsigned long arg,
+ struct uacce_queue *q)
+{
+ struct hisi_qm *qm = uacce->priv;
+ struct hisi_qp *qp;
+ u8 alg_type = 0;
+
+ qp = hisi_qm_create_qp(qm, alg_type);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ q->priv = qp;
+ q->uacce = uacce;
+ qp->uacce_q = q;
+ qp->event_cb = qm_qp_event_notifier;
+ qp->pasid = arg;
+
+ return 0;
+}
+
+static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
+{
+ struct hisi_qp *qp = q->priv;
+
+ hisi_qm_cache_wb(qp->qm);
+ hisi_qm_release_qp(qp);
+}
+
+/* map sq/cq/doorbell to user space */
+static int hisi_qm_uacce_mmap(struct uacce_queue *q,
+ struct vm_area_struct *vma,
+ struct uacce_qfile_region *qfr)
+{
+ struct hisi_qp *qp = q->priv;
+ struct hisi_qm *qm = qp->qm;
+ size_t sz = vma->vm_end - vma->vm_start;
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned long vm_pgoff;
+ int ret;
+
+ switch (qfr->type) {
+ case UACCE_QFRT_MMIO:
+ if (qm->ver == QM_HW_V2) {
+ if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
+ QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
+ return -EINVAL;
+ } else {
+ if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ qm->phys_base >> PAGE_SHIFT,
+ sz, pgprot_noncached(vma->vm_page_prot));
+ case UACCE_QFRT_DUS:
+ if (sz != qp->qdma.size)
+ return -EINVAL;
+
+ /*
+ * dma_mmap_coherent() requires vm_pgoff as 0
+ * restore vm_pfoff to initial value for mmap()
+ */
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
+ ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
+ qp->qdma.dma, sz);
+ vma->vm_pgoff = vm_pgoff;
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
+{
+ struct hisi_qp *qp = q->priv;
+
+ return hisi_qm_start_qp(qp, qp->pasid);
+}
+
+static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
+{
+ hisi_qm_stop_qp(q->priv);
+}
+
+static int qm_set_sqctype(struct uacce_queue *q, u16 type)
+{
+ struct hisi_qm *qm = q->uacce->priv;
+ struct hisi_qp *qp = q->priv;
+
+ write_lock(&qm->qps_lock);
+ qp->alg_type = type;
+ write_unlock(&qm->qps_lock);
+
+ return 0;
+}
+
+static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
+ unsigned long arg)
+{
+ struct hisi_qp *qp = q->priv;
+ struct hisi_qp_ctx qp_ctx;
+
+ if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
+ if (copy_from_user(&qp_ctx, (void __user *)arg,
+ sizeof(struct hisi_qp_ctx)))
+ return -EFAULT;
+
+ if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ return -EINVAL;
+
+ qm_set_sqctype(q, qp_ctx.qc_type);
+ qp_ctx.id = qp->qp_id;
+
+ if (copy_to_user((void __user *)arg, &qp_ctx,
+ sizeof(struct hisi_qp_ctx)))
+ return -EFAULT;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct uacce_ops uacce_qm_ops = {
+ .get_available_instances = hisi_qm_get_available_instances,
+ .get_queue = hisi_qm_uacce_get_queue,
+ .put_queue = hisi_qm_uacce_put_queue,
+ .start_queue = hisi_qm_uacce_start_queue,
+ .stop_queue = hisi_qm_uacce_stop_queue,
+ .mmap = hisi_qm_uacce_mmap,
+ .ioctl = hisi_qm_uacce_ioctl,
+};
+
+static int qm_alloc_uacce(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct uacce_device *uacce;
+ unsigned long mmio_page_nr;
+ unsigned long dus_page_nr;
+ struct uacce_interface interface = {
+ .flags = UACCE_DEV_SVA,
+ .ops = &uacce_qm_ops,
+ };
+
+ strncpy(interface.name, pdev->driver->name, sizeof(interface.name));
+
+ uacce = uacce_alloc(&pdev->dev, &interface);
+ if (IS_ERR(uacce))
+ return PTR_ERR(uacce);
+
+ if (uacce->flags & UACCE_DEV_SVA) {
+ qm->use_sva = true;
+ } else {
+ /* only consider sva case */
+ uacce_remove(uacce);
+ qm->uacce = NULL;
+ return -EINVAL;
+ }
+
+ uacce->is_vf = pdev->is_virtfn;
+ uacce->priv = qm;
+ uacce->algs = qm->algs;
+
+ if (qm->ver == QM_HW_V1) {
+ mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ uacce->api_ver = HISI_QM_API_VER_BASE;
+ } else {
+ mmio_page_nr = QM_DOORBELL_PAGE_NR +
+ QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ }
+
+ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
+ sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
+
+ uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
+ uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
+
+ qm->uacce = uacce;
+
+ return 0;
+}
+
/**
* hisi_qm_get_free_qp_num() - Get free number of qp in qm.
* @qm: The qm which want to get free qp.
@@ -1437,10 +1655,14 @@ int hisi_qm_init(struct hisi_qm *qm)
return -EINVAL;
}
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+
ret = pci_enable_device_mem(pdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable device mem!\n");
- return ret;
+ goto err_remove_uacce;
}
ret = pci_request_mem_regions(pdev, qm->dev_name);
@@ -1449,8 +1671,9 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_disable_pcidev;
}
- qm->io_base = ioremap(pci_resource_start(pdev, PCI_BAR_2),
- pci_resource_len(qm->pdev, PCI_BAR_2));
+ qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
+ qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
+ qm->io_base = ioremap(qm->phys_base, qm->phys_size);
if (!qm->io_base) {
ret = -EIO;
goto err_release_mem_regions;
@@ -1479,6 +1702,7 @@ int hisi_qm_init(struct hisi_qm *qm)
qm->qp_in_used = 0;
mutex_init(&qm->mailbox_lock);
rwlock_init(&qm->qps_lock);
+ INIT_WORK(&qm->work, qm_work_process);
dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
qm->use_dma_api ? "dma api" : "iommu api");
@@ -1493,6 +1717,9 @@ err_release_mem_regions:
pci_release_mem_regions(pdev);
err_disable_pcidev:
pci_disable_device(pdev);
+err_remove_uacce:
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
return ret;
}
@@ -1509,6 +1736,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+
if (qm->use_dma_api && qm->qdma.va) {
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
@@ -1856,43 +2086,30 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
-/**
- * hisi_qm_hw_error_init() - Configure qm hardware error report method.
- * @qm: The qm which we want to configure.
- * @ce: Bit mask of correctable error configure.
- * @nfe: Bit mask of non-fatal error configure.
- * @fe: Bit mask of fatal error configure.
- * @msi: Bit mask of error reported by message signal interrupt.
- *
- * Hardware errors of qm can be reported either by RAS interrupts which will
- * be handled by UEFI and then PCIe AER or by device MSI. User can configure
- * each error to use either of above two methods. For RAS interrupts, we can
- * configure an error as one of correctable error, non-fatal error or
- * fatal error.
- *
- * Bits indicating errors can be configured to ce, nfe, fe and msi to enable
- * related report methods. Error report will be masked if related error bit
- * does not configure.
- */
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init(struct hisi_qm *qm)
{
+ const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
+
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
- qm->ops->hw_error_init(qm, ce, nfe, fe, msi);
+ qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe,
+ err_info->fe, err_info->msi);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
-/**
- * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors.
- * @qm: The qm which has non-fatal hardware errors.
- *
- * Accelerators use this function to handle qm non-fatal hardware errors.
- */
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
+static void qm_hw_error_uninit(struct hisi_qm *qm)
+{
+ if (!qm->ops->hw_error_uninit) {
+ dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
+ return;
+ }
+
+ qm->ops->hw_error_uninit(qm);
+}
+
+static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
@@ -1901,7 +2118,6 @@ pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
return qm->ops->hw_error_handle(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle);
/**
* hisi_qm_get_hw_version() - Get hardware version of a qm.
@@ -1922,6 +2138,229 @@ enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
+/**
+ * hisi_qm_dev_err_init() - Initialize device error configuration.
+ * @qm: The qm for which we want to do error initialization.
+ *
+ * Initialize QM and device error related configuration.
+ */
+void hisi_qm_dev_err_init(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_init(qm);
+
+ if (!qm->err_ini->hw_err_enable) {
+ dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
+ return;
+ }
+ qm->err_ini->hw_err_enable(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
+
+/**
+ * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
+ * @qm: The qm for which we want to do error uninitialization.
+ *
+ * Uninitialize QM and device error related configuration.
+ */
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_uninit(qm);
+
+ if (!qm->err_ini->hw_err_disable) {
+ dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
+ return;
+ }
+ qm->err_ini->hw_err_disable(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
+
+/**
+ * hisi_qm_free_qps() - free multiple queue pairs.
+ * @qps: The queue pairs need to be freed.
+ * @qp_num: The num of queue pairs.
+ */
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
+{
+ int i;
+
+ if (!qps || qp_num <= 0)
+ return;
+
+ for (i = qp_num - 1; i >= 0; i--)
+ hisi_qm_release_qp(qps[i]);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
+
+static void free_list(struct list_head *head)
+{
+ struct hisi_qm_resource *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+static int hisi_qm_sort_devices(int node, struct list_head *head,
+ struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm_resource *res, *tmp;
+ struct hisi_qm *qm;
+ struct list_head *n;
+ struct device *dev;
+ int dev_node = 0;
+
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = &qm->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ dev_node = dev_to_node(dev);
+ if (dev_node < 0)
+ dev_node = 0;
+ }
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->qm = qm;
+ res->distance = node_distance(dev_node, node);
+ n = head;
+ list_for_each_entry(tmp, head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
+ }
+
+ return 0;
+}
+
+/**
+ * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
+ * @qm_list: The list of all available devices.
+ * @qp_num: The number of queue pairs need created.
+ * @alg_type: The algorithm type.
+ * @node: The numa node.
+ * @qps: The queue pairs need created.
+ *
+ * This function will sort all available device according to numa distance.
+ * Then try to create all queue pairs from one device, if all devices do
+ * not meet the requirements will return error.
+ */
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps)
+{
+ struct hisi_qm_resource *tmp;
+ int ret = -ENODEV;
+ LIST_HEAD(head);
+ int i;
+
+ if (!qps || !qm_list || qp_num <= 0)
+ return -EINVAL;
+
+ mutex_lock(&qm_list->lock);
+ if (hisi_qm_sort_devices(node, &head, qm_list)) {
+ mutex_unlock(&qm_list->lock);
+ goto err;
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ for (i = 0; i < qp_num; i++) {
+ qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
+ if (IS_ERR(qps[i])) {
+ hisi_qm_free_qps(qps, i);
+ break;
+ }
+ }
+
+ if (i == qp_num) {
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&qm_list->lock);
+ if (ret)
+ pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ node, alg_type, qp_num);
+
+err:
+ free_list(&head);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
+
+static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ if (!qm->err_ini->get_dev_hw_err_status) {
+ dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* get device hardware error status */
+ err_sts = qm->err_ini->get_dev_hw_err_status(qm);
+ if (err_sts) {
+ if (!qm->err_ini->log_dev_hw_err) {
+ dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ qm->err_ini->log_dev_hw_err(qm, err_sts);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, dev_ret;
+
+ /* log qm error */
+ qm_ret = qm_hw_error_handle(qm);
+
+ /* log device error */
+ dev_ret = qm_dev_err_handle(qm);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
+ * @pdev: The PCI device which need report error.
+ * @state: The connectivity between CPU and device.
+ *
+ * We register this function into PCIe AER handlers, It will report device or
+ * qm hardware error status when error occur.
+ */
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return qm_process_dev_error(pdev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 078b8f1f1b77..ec5b6f48db6c 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -77,6 +77,9 @@
#define HISI_ACC_SGL_SGE_NR_MAX 255
+/* page number for queue file region */
+#define QM_DOORBELL_PAGE_NR 1
+
enum qp_state {
QP_STOP,
};
@@ -125,6 +128,28 @@ struct hisi_qm_status {
unsigned long flags;
};
+struct hisi_qm;
+
+struct hisi_qm_err_info {
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+ u32 msi;
+};
+
+struct hisi_qm_err_ini {
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
+ struct hisi_qm_err_info err_info;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+};
+
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
@@ -136,6 +161,7 @@ struct hisi_qm {
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ struct list_head list;
struct qm_dma qdma;
struct qm_sqc *sqc;
@@ -148,6 +174,7 @@ struct hisi_qm {
dma_addr_t aeqe_dma;
struct hisi_qm_status status;
+ const struct hisi_qm_err_ini *err_ini;
rwlock_t qps_lock;
unsigned long *qp_bitmap;
@@ -162,7 +189,15 @@ struct hisi_qm {
u32 error_mask;
u32 msi_mask;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+
+ const char *algs;
bool use_dma_api;
+ bool use_sva;
+ resource_size_t phys_base;
+ resource_size_t phys_size;
+ struct uacce_device *uacce;
};
struct hisi_qp_status {
@@ -192,12 +227,35 @@ struct hisi_qp {
struct hisi_qp_ops *hw_ops;
void *qp_ctx;
void (*req_cb)(struct hisi_qp *qp, void *data);
- struct work_struct work;
- struct workqueue_struct *wq;
+ void (*event_cb)(struct hisi_qp *qp);
struct hisi_qm *qm;
+ u16 pasid;
+ struct uacce_queue *uacce_q;
};
+static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
+{
+ INIT_LIST_HEAD(&qm_list->list);
+ mutex_init(&qm_list->lock);
+}
+
+static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
@@ -211,11 +269,12 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
@@ -227,4 +286,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr);
void hisi_acc_free_sgl_pool(struct device *dev,
struct hisi_acc_sgl_pool *pool);
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps);
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 13e2d8d7be94..3598fa17beb2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -11,6 +11,8 @@
/* Algorithm resource per hardware SEC queue */
struct sec_alg_res {
+ u8 *pbuf;
+ dma_addr_t pbuf_dma;
u8 *c_ivin;
dma_addr_t c_ivin_dma;
u8 *out_mac;
@@ -23,6 +25,8 @@ struct sec_cipher_req {
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
@@ -48,6 +52,7 @@ struct sec_req {
/* Status of the SEC request */
bool fake_busy;
+ bool use_pbuf;
};
/**
@@ -114,6 +119,7 @@ struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
struct sec_dev *sec;
const struct sec_req_op *req_op;
+ struct hisi_qp **qps;
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num;
@@ -128,6 +134,7 @@ struct sec_ctx {
atomic_t dec_qcyclic;
enum sec_alg_type alg_type;
+ bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
};
@@ -162,14 +169,15 @@ struct sec_debug {
struct sec_dev {
struct hisi_qm qm;
- struct list_head list;
struct sec_debug debug;
u32 ctx_q_num;
+ bool iommu_used;
u32 num_vfs;
unsigned long status;
};
-struct sec_dev *sec_find_device(int node);
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(void);
void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index a2cfcc9ccd94..7f1c6a31b82f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -46,7 +46,21 @@
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
+#define SEC_MAX_AAD_LEN 65535
#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+
+#define SEC_PBUF_SZ 512
+#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
+#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
+#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
+ SEC_MAX_MAC_LEN * 2)
+#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
+#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
+ SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
+ SEC_PBUF_LEFT_SZ)
+
#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
@@ -110,12 +124,12 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+static int sec_aead_verify(struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac_out = req->aead_req.out_mac;
u8 *mac = mac_out + SEC_MAX_MAC_LEN;
struct scatterlist *sgl = aead_req->src;
size_t sz;
@@ -163,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
}
if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
- err = sec_aead_verify(req, qp_ctx);
+ err = sec_aead_verify(req);
atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
@@ -245,6 +259,50 @@ static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
res->out_mac, res->out_mac_dma);
}
+static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->pbuf)
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf, res->pbuf_dma);
+}
+
+/*
+ * To improve performance, pbuffer is used for
+ * small packets (< 512Bytes) as IOMMU translation using.
+ */
+static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int pbuf_page_offset;
+ int i, j, k;
+
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ &res->pbuf_dma, GFP_KERNEL);
+ if (!res->pbuf)
+ return -ENOMEM;
+
+ /*
+ * SEC_PBUF_PKG contains data pbuf, iv and
+ * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
+ * Every PAGE contains six SEC_PBUF_PKG
+ * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
+ * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
+ * for the SEC_TOTAL_PBUF_SZ
+ */
+ for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ pbuf_page_offset = PAGE_SIZE * i;
+ for (j = 0; j < SEC_PBUF_NUM; j++) {
+ k = i * SEC_PBUF_NUM + j;
+ if (k == QM_Q_DEPTH)
+ break;
+ res[k].pbuf = res->pbuf +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ res[k].pbuf_dma = res->pbuf_dma +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ }
+ }
+ return 0;
+}
+
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
@@ -259,11 +317,18 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
if (ctx->alg_type == SEC_AEAD) {
ret = sec_alloc_mac_resource(dev, res);
if (ret)
- goto get_fail;
+ goto alloc_fail;
+ }
+ if (ctx->pbuf_supported) {
+ ret = sec_alloc_pbuf_resource(dev, res);
+ if (ret) {
+ dev_err(dev, "fail to alloc pbuf dma resource!\n");
+ goto alloc_fail;
+ }
}
return 0;
-get_fail:
+alloc_fail:
sec_free_civ_resource(dev, res);
return ret;
@@ -276,6 +341,8 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
sec_free_civ_resource(dev, qp_ctx->res);
+ if (ctx->pbuf_supported)
+ sec_free_pbuf_resource(dev, qp_ctx->res);
if (ctx->alg_type == SEC_AEAD)
sec_free_mac_resource(dev, qp_ctx->res);
}
@@ -288,11 +355,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
struct hisi_qp *qp;
int ret = -ENOMEM;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
qp->req_type = 0;
qp->qp_ctx = qp_ctx;
qp->req_cb = sec_req_cb;
@@ -335,7 +399,6 @@ err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
- hisi_qm_release_qp(qp);
return ret;
}
@@ -352,7 +415,6 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
idr_destroy(&qp_ctx->req_idr);
- hisi_qm_release_qp(qp_ctx->qp);
}
static int sec_ctx_base_init(struct sec_ctx *ctx)
@@ -360,14 +422,18 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
struct sec_dev *sec;
int i, ret;
- sec = sec_find_device(cpu_to_node(smp_processor_id()));
- if (!sec) {
- pr_err("Can not find proper Hisilicon SEC device!\n");
+ ctx->qps = sec_create_qps();
+ if (!ctx->qps) {
+ pr_err("Can not create sec qps!\n");
return -ENODEV;
}
+
+ sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
ctx->hlf_q_num = sec->ctx_q_num >> 1;
+ ctx->pbuf_supported = ctx->sec->iommu_used;
+
/* Half of queue depth is taken as fake requests limit in the queue. */
ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
@@ -386,6 +452,7 @@ err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
return ret;
}
@@ -397,6 +464,7 @@ static void sec_ctx_base_uninit(struct sec_ctx *ctx)
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
kfree(ctx->qp_ctx);
}
@@ -447,7 +515,6 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- ctx = crypto_skcipher_ctx(tfm);
ctx->alg_type = SEC_SKCIPHER;
crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
@@ -591,11 +658,94 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int sec_cipher_map(struct device *dev, struct sec_req *req,
+static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
+
+ if (ctx->alg_type == SEC_AEAD)
+ copy_size = aead_req->cryptlen + aead_req->assoclen;
+ else
+ copy_size = c_req->c_len;
+
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
+
+ if (unlikely(pbuf_length != copy_size)) {
+ dev_err(dev, "copy src data to pbuf error!\n");
+ return -EINVAL;
+ }
+
+ c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
+
+ if (!c_req->c_in_dma) {
+ dev_err(dev, "fail to set pbuffer address!\n");
+ return -ENOMEM;
+ }
+
+ c_req->c_out_dma = c_req->c_in_dma;
+
+ return 0;
+}
+
+static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *dst)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
+
+ if (ctx->alg_type == SEC_AEAD)
+ copy_size = c_req->c_len + aead_req->assoclen;
+ else
+ copy_size = c_req->c_len;
+
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
+
+ if (unlikely(pbuf_length != copy_size))
+ dev_err(dev, "copy pbuf data to dst error!\n");
+
+}
+
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_alg_res *res = &qp_ctx->res[req->req_id];
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int ret;
+
+ if (req->use_pbuf) {
+ ret = sec_cipher_pbuf_map(ctx, req, src);
+ c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
+ c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
+ if (ctx->alg_type == SEC_AEAD) {
+ a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
+ a_req->out_mac_dma = res->pbuf_dma +
+ SEC_PBUF_MAC_OFFSET;
+ }
+
+ return ret;
+ }
+ c_req->c_ivin = res->c_ivin;
+ c_req->c_ivin_dma = res->c_ivin_dma;
+ if (ctx->alg_type == SEC_AEAD) {
+ a_req->out_mac = res->out_mac;
+ a_req->out_mac_dma = res->out_mac_dma;
+ }
c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
qp_ctx->c_in_pool,
@@ -626,29 +776,34 @@ static int sec_cipher_map(struct device *dev, struct sec_req *req,
return 0;
}
-static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
+static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
- if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, req->c_in);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (req->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
- hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ }
}
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sq = req->c_req.sk_req;
- return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
+ return sec_cipher_map(ctx, req, sq->src, sq->dst);
}
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_req *c_req = &req->c_req;
- struct skcipher_request *sk_req = c_req->sk_req;
+ struct skcipher_request *sq = req->c_req.sk_req;
- sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
+ sec_cipher_unmap(ctx, req, sq->src, sq->dst);
}
static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
@@ -759,16 +914,14 @@ static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aq = req->aead_req.aead_req;
- return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
+ return sec_cipher_map(ctx, req, aq->src, aq->dst);
}
static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_req *cq = &req->c_req;
struct aead_request *aq = req->aead_req.aead_req;
- sec_cipher_unmap(dev, cq, aq->src, aq->dst);
+ sec_cipher_unmap(ctx, req, aq->src, aq->dst);
}
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
@@ -801,9 +954,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
- u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+ struct sec_cipher_req *c_req = &req->c_req;
- memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -818,8 +971,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
memset(sec_sqe, 0, sizeof(struct sec_sqe));
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr =
- cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
@@ -836,7 +988,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
sec_sqe->type_cipher_auth = bd_type | cipher;
- sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ if (req->use_pbuf)
+ sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
+ else
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
if (c_req->c_in_dma != c_req->c_out_dma)
de = 0x1 << SEC_DE_OFFSET;
@@ -844,7 +999,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->sds_sa_type = (de | scene | sa_type);
/* Just set DST address type */
- da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ if (req->use_pbuf)
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ else
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
sec_sqe->sdm_addr_type |= da_type;
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
@@ -904,9 +1062,9 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+ struct sec_cipher_req *c_req = &req->c_req;
- memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
}
static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
@@ -939,8 +1097,7 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
- sec_sqe->type2.mac_addr =
- cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
+ sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
}
static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -964,6 +1121,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -979,7 +1137,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct scatterlist *sgl = a_req->dst;
sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- qp_ctx->res[req->req_id].out_mac,
+ aead_req->out_mac,
authsize, a_req->cryptlen +
a_req->assoclen);
@@ -1031,6 +1189,7 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
+ struct sec_cipher_req *c_req = &req->c_req;
int ret;
ret = sec_request_init(ctx, req);
@@ -1057,12 +1216,10 @@ err_send_req:
/* As failing, restore the IV from user */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
if (ctx->alg_type == SEC_SKCIPHER)
- memcpy(req->c_req.sk_req->iv,
- req->qp_ctx->res[req->req_id].c_ivin,
+ memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
ctx->c_ctx.ivsize);
else
- memcpy(req->aead_req.aead_req->iv,
- req->qp_ctx->res[req->req_id].c_ivin,
+ memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
ctx->c_ctx.ivsize);
}
@@ -1208,6 +1365,12 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
sreq->c_req.c_len = sk_req->cryptlen;
+
+ if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
+ sreq->use_pbuf = true;
+ else
+ sreq->use_pbuf = false;
+
if (c_alg == SEC_CALG_3DES) {
if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher 3des input length error!\n");
@@ -1321,11 +1484,18 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t authsize = crypto_aead_authsize(tfm);
- if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
+ if (unlikely(!req->src || !req->dst || !req->cryptlen ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
return -EINVAL;
}
+ if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
+ SEC_PBUF_SZ)
+ sreq->use_pbuf = true;
+ else
+ sreq->use_pbuf = false;
+
/* Support AES only */
if (unlikely(c_alg != SEC_CALG_AES)) {
dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2bbaf1e2dae7..1f54ebe164b6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -89,8 +90,7 @@ struct sec_hw_error {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
-static LIST_HEAD(sec_list);
-static DEFINE_MUTEX(sec_list_lock);
+static struct hisi_qm_list sec_devices;
static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
@@ -105,37 +105,6 @@ static const struct sec_hw_error sec_hw_errors[] = {
{ /* sentinel */ }
};
-struct sec_dev *sec_find_device(int node)
-{
-#define SEC_NUMA_MAX_DISTANCE 100
- int min_distance = SEC_NUMA_MAX_DISTANCE;
- int dev_node = 0, free_qp_num = 0;
- struct sec_dev *sec, *ret = NULL;
- struct hisi_qm *qm;
- struct device *dev;
-
- mutex_lock(&sec_list_lock);
- list_for_each_entry(sec, &sec_list, list) {
- qm = &sec->qm;
- dev = &qm->pdev->dev;
-#ifdef CONFIG_NUMA
- dev_node = dev->numa_node;
- if (dev_node < 0)
- dev_node = 0;
-#endif
- if (node_distance(dev_node, node) < min_distance) {
- free_qp_num = hisi_qm_get_free_qp_num(qm);
- if (free_qp_num >= sec->ctx_q_num) {
- ret = sec;
- min_distance = node_distance(dev_node, node);
- }
- }
- }
- mutex_unlock(&sec_list_lock);
-
- return ret;
-}
-
static const char * const sec_dbg_file_name[] = {
[SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
@@ -238,6 +207,32 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
+{
+ hisi_qm_free_qps(qps, qp_num);
+ kfree(qps);
+}
+
+struct hisi_qp **sec_create_qps(void)
+{
+ int node = cpu_to_node(smp_processor_id());
+ u32 ctx_num = ctx_q_num;
+ struct hisi_qp **qps;
+ int ret;
+
+ qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
+ if (!qps)
+ return NULL;
+
+ ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
+ if (!ret)
+ return qps;
+
+ kfree(qps);
+ return NULL;
+}
+
+
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
@@ -245,20 +240,6 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
-static inline void sec_add_to_list(struct sec_dev *sec)
-{
- mutex_lock(&sec_list_lock);
- list_add_tail(&sec->list, &sec_list);
- mutex_unlock(&sec_list_lock);
-}
-
-static inline void sec_remove_from_list(struct sec_dev *sec)
-{
- mutex_lock(&sec_list_lock);
- list_del(&sec->list);
- mutex_unlock(&sec_list_lock);
-}
-
static u8 sec_get_endian(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
@@ -384,9 +365,8 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void sec_hw_error_enable(struct sec_dev *sec)
+static void sec_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
u32 val;
if (qm->ver == QM_HW_V1) {
@@ -414,9 +394,8 @@ static void sec_hw_error_enable(struct sec_dev *sec)
writel(val, qm->io_base + SEC_CONTROL_REG);
}
-static void sec_hw_error_disable(struct sec_dev *sec)
+static void sec_hw_error_disable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
u32 val;
val = readl(qm->io_base + SEC_CONTROL_REG);
@@ -435,27 +414,6 @@ static void sec_hw_error_disable(struct sec_dev *sec)
writel(val, qm->io_base + SEC_CONTROL_REG);
}
-static void sec_hw_error_init(struct sec_dev *sec)
-{
- if (sec->qm.fun_type == QM_HW_VF)
- return;
-
- hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
- | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- sec_hw_error_enable(sec);
-}
-
-static void sec_hw_error_uninit(struct sec_dev *sec)
-{
- if (sec->qm.fun_type == QM_HW_VF)
- return;
-
- sec_hw_error_disable(sec);
- writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK);
-}
-
static u32 sec_current_qm_read(struct sec_debug_file *file)
{
struct hisi_qm *qm = file->qm;
@@ -695,6 +653,51 @@ static void sec_debugfs_exit(struct sec_dev *sec)
debugfs_remove_recursive(sec->qm.debug.debug_root);
}
+static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+{
+ const struct sec_hw_error *errs = sec_hw_errors;
+ struct device *dev = &qm->pdev->dev;
+ u32 err_val;
+
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
+
+ if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
+ err_val = readl(qm->io_base +
+ SEC_CORE_SRAM_ECC_ERR_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ dev_err(dev, "multi ecc sram addr=0x%x\n",
+ SEC_ECC_ADDR(err_val));
+ }
+ }
+ errs++;
+ }
+
+ writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+}
+
+static u32 sec_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + SEC_CORE_INT_STATUS);
+}
+
+static const struct hisi_qm_err_ini sec_err_ini = {
+ .hw_err_enable = sec_hw_error_enable,
+ .hw_err_disable = sec_hw_error_disable,
+ .get_dev_hw_err_status = sec_get_hw_err_status,
+ .log_dev_hw_err = sec_log_hw_error,
+ .err_info = {
+ .ce = QM_BASE_CE,
+ .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT,
+ .fe = 0,
+ .msi = QM_DB_RANDOM_INVALID,
+ }
+};
+
static int sec_pf_probe_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
@@ -713,11 +716,13 @@ static int sec_pf_probe_init(struct sec_dev *sec)
return -EINVAL;
}
+ qm->err_ini = &sec_err_ini;
+
ret = sec_set_user_domain_and_cache(sec);
if (ret)
return ret;
- sec_hw_error_init(sec);
+ hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
return 0;
@@ -750,12 +755,30 @@ static void sec_qm_uninit(struct hisi_qm *qm)
static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
{
+ int ret;
+
+ /*
+ * WQ_HIGHPRI: SEC request must be low delayed,
+ * so need a high priority workqueue.
+ * WQ_UNBOUND: SEC task is likely with long
+ * running CPU intensive workloads.
+ */
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI |
+ WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
+ if (!qm->wq) {
+ pci_err(qm->pdev, "fail to alloc workqueue\n");
+ return -ENOMEM;
+ }
+
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = SEC_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
- return sec_pf_probe_init(sec);
+ ret = sec_pf_probe_init(sec);
+ if (ret)
+ goto err_probe_uninit;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -768,18 +791,43 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
- return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_probe_uninit;
}
} else {
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_probe_uninit;
}
return 0;
+err_probe_uninit:
+ destroy_workqueue(qm->wq);
+ return ret;
}
-static void sec_probe_uninit(struct sec_dev *sec)
+static void sec_probe_uninit(struct hisi_qm *qm)
{
- sec_hw_error_uninit(sec);
+ hisi_qm_dev_err_uninit(qm);
+
+ destroy_workqueue(qm->wq);
+}
+
+static void sec_iommu_used_check(struct sec_dev *sec)
+{
+ struct iommu_domain *domain;
+ struct device *dev = &sec->qm.pdev->dev;
+
+ domain = iommu_get_domain_for_dev(dev);
+
+ /* Check if iommu is used */
+ sec->iommu_used = false;
+ if (domain) {
+ if (domain->type & __IOMMU_DOMAIN_PAGING)
+ sec->iommu_used = true;
+ dev_info(dev, "SMMU Opened, the iommu type = %u\n",
+ domain->type);
+ }
}
static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -795,6 +843,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, sec);
sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
qm = &sec->qm;
@@ -820,7 +869,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- sec_add_to_list(sec);
+ hisi_qm_add_to_list(qm, &sec_devices);
ret = sec_register_to_crypto();
if (ret < 0) {
@@ -831,12 +880,12 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_remove_from_list:
- sec_remove_from_list(sec);
+ hisi_qm_del_from_list(qm, &sec_devices);
sec_debugfs_exit(sec);
hisi_qm_stop(qm);
err_probe_uninit:
- sec_probe_uninit(sec);
+ sec_probe_uninit(qm);
err_qm_uninit:
sec_qm_uninit(qm);
@@ -955,7 +1004,7 @@ static void sec_remove(struct pci_dev *pdev)
sec_unregister_from_crypto();
- sec_remove_from_list(sec);
+ hisi_qm_del_from_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && sec->num_vfs)
(void)sec_sriov_disable(pdev);
@@ -967,89 +1016,13 @@ static void sec_remove(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF)
sec_debug_regs_clear(qm);
- sec_probe_uninit(sec);
+ sec_probe_uninit(qm);
sec_qm_uninit(qm);
}
-static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts)
-{
- const struct sec_hw_error *errs = sec_hw_errors;
- struct device *dev = &sec->qm.pdev->dev;
- u32 err_val;
-
- while (errs->msg) {
- if (errs->int_msk & err_sts) {
- dev_err(dev, "%s [error status=0x%x] found\n",
- errs->msg, errs->int_msk);
-
- if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
- err_val = readl(sec->qm.io_base +
- SEC_CORE_SRAM_ECC_ERR_INFO);
- dev_err(dev, "multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
- dev_err(dev, "multi ecc sram addr=0x%x\n",
- SEC_ECC_ADDR(err_val));
- }
- }
- errs++;
- }
-}
-
-static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec)
-{
- u32 err_sts;
-
- /* read err sts */
- err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS);
- if (err_sts) {
- sec_log_hw_error(sec, err_sts);
-
- /* clear error interrupts */
- writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE);
-
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, sec_ret;
-
- if (!sec) {
- pci_err(pdev, "Can't recover error during device init\n");
- return PCI_ERS_RESULT_NONE;
- }
-
- /* log qm error */
- qm_ret = hisi_qm_hw_error_handle(&sec->qm);
-
- /* log sec error */
- sec_ret = sec_hw_error_handle(sec);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- sec_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t sec_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return sec_process_hw_error(pdev);
-}
-
static const struct pci_error_handlers sec_err_handler = {
- .error_detected = sec_error_detected,
+ .error_detected = hisi_qm_dev_err_detected,
};
static struct pci_driver sec_pci_driver = {
@@ -1078,6 +1051,7 @@ static int __init sec_init(void)
{
int ret;
+ hisi_qm_init_list(&sec_devices);
sec_register_debugfs();
ret = pci_register_driver(&sec_pci_driver);
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index bc1db26598bb..82dc6f867171 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -68,7 +68,7 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-struct hisi_zip *find_zip_device(int node);
+int zip_create_qps(struct hisi_qp **qps, int ctx_num);
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 9815d5e3ccd0..369ec3220574 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -132,29 +132,25 @@ static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
sqe->dest_addr_h = upper_32_bits(d_addr);
}
-static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx,
- int alg_type, int req_type)
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
+ int alg_type, int req_type)
{
- struct hisi_qp *qp;
+ struct device *dev = &qp->qm->pdev->dev;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp->req_type = req_type;
+ qp->alg_type = alg_type;
qp->qp_ctx = ctx;
- ctx->qp = qp;
ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0)
- goto err_release_qp;
+ if (ret < 0) {
+ dev_err(dev, "start qp failed!\n");
+ return ret;
+ }
- return 0;
+ ctx->qp = qp;
-err_release_qp:
- hisi_qm_release_qp(qp);
- return ret;
+ return 0;
}
static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
@@ -165,34 +161,34 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
{
+ struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip *hisi_zip;
- struct hisi_qm *qm;
int ret, i, j;
- /* find the proper zip device */
- hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
- if (!hisi_zip) {
- pr_err("Failed to find a proper ZIP device!\n");
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
+ if (ret) {
+ pr_err("Can not create zip qps!\n");
return -ENODEV;
}
- qm = &hisi_zip->qm;
+
+ hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
- req_type);
- if (ret)
- goto err;
+ ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
+ req_type);
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
+
+ hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
+ return ret;
+ }
hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
}
return 0;
-err:
- for (j = i - 1; j >= 0; j--)
- hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
-
- return ret;
}
static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index e1bab1a91333..fcc85d2dbd07 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "zip.h"
#define PCI_DEVICE_ID_ZIP_PF 0xa250
@@ -60,13 +61,17 @@
#define HZIP_CORE_DEBUG_DECOMP_5 0x309000
#define HZIP_CORE_INT_SOURCE 0x3010A0
-#define HZIP_CORE_INT_MASK 0x3010A4
+#define HZIP_CORE_INT_MASK_REG 0x3010A4
#define HZIP_CORE_INT_STATUS 0x3010AC
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
-#define SRAM_ECC_ERR_NUM_SHIFT 16
-#define SRAM_ECC_ERR_ADDR_SHIFT 24
-#define HZIP_CORE_INT_DISABLE 0x000007FF
+#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
+#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
+#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
+#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
+#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
+#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
#define HZIP_COMP_CORE_NUM 2
#define HZIP_DECOMP_CORE_NUM 6
#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
@@ -83,77 +88,7 @@
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-static LIST_HEAD(hisi_zip_list);
-static DEFINE_MUTEX(hisi_zip_list_lock);
-
-struct hisi_zip_resource {
- struct hisi_zip *hzip;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_zip_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_zip *find_zip_device(int node)
-{
- struct hisi_zip_resource *res, *tmp;
- struct hisi_zip *ret = NULL;
- struct hisi_zip *hisi_zip;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_zip_list_lock);
-
- if (IS_ENABLED(CONFIG_NUMA)) {
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_zip->qm.pdev->dev;
- res->hzip = hisi_zip;
- res->distance = node_distance(dev_to_node(dev), node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
- ret = tmp->hzip;
- break;
- }
- }
-
- free_list(&head);
- } else {
- ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
- }
-
- mutex_unlock(&hisi_zip_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_zip_list_lock);
- return NULL;
-}
+static struct hisi_qm_list zip_devices;
struct hisi_zip_hw_error {
u32 int_msk;
@@ -297,9 +232,6 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
-static int uacce_mode;
-module_param(uacce_mode, int, 0);
-
static u32 vfs_num;
module_param(vfs_num, uint, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
@@ -311,18 +243,11 @@ static const struct pci_device_id hisi_zip_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
+int zip_create_qps(struct hisi_qp **qps, int qp_num)
{
- mutex_lock(&hisi_zip_list_lock);
- list_add_tail(&hisi_zip->list, &hisi_zip_list);
- mutex_unlock(&hisi_zip_list_lock);
-}
+ int node = cpu_to_node(smp_processor_id());
-static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
-{
- mutex_lock(&hisi_zip_list_lock);
- list_del(&hisi_zip->list);
- mutex_unlock(&hisi_zip_list_lock);
+ return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
@@ -353,8 +278,14 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
+
+ if (hisi_zip->qm.use_sva) {
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
+ } else {
+ writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
+ }
/* let's open all compression/decompression cores */
writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
@@ -366,27 +297,32 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
}
-static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
+static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
if (qm->ver == QM_HW_V1) {
- writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
+ writel(HZIP_CORE_INT_MASK_ALL,
+ qm->io_base + HZIP_CORE_INT_MASK_REG);
dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
return;
}
- if (state) {
- /* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
- HZIP_CORE_INT_SOURCE);
- /* enable ZIP hw error interrupts */
- writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
- } else {
- /* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_DISABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
- }
+ /* clear ZIP hw error source if having */
+ writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ /* configure error type */
+ writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ /* enable ZIP hw error interrupts */
+ writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+}
+
+static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
+{
+ /* disable ZIP hw error interrupts */
+ writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -638,14 +574,53 @@ static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
hisi_zip_debug_regs_clear(hisi_zip);
}
-static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
+static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+{
+ const struct hisi_zip_hw_error *err = zip_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 err_val;
+
+ while (err->msg) {
+ if (err->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
+ err_val = readl(qm->io_base +
+ HZIP_CORE_SRAM_ECC_ERR_INFO);
+ dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
+ ((err_val >>
+ HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
+ dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n",
+ (err_val >>
+ HZIP_SRAM_ECC_ERR_ADDR_SHIFT));
+ }
+ }
+ err++;
+ }
+
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
{
- hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_zip_hw_error_set_state(hisi_zip, true);
+ return readl(qm->io_base + HZIP_CORE_INT_STATUS);
}
+static const struct hisi_qm_err_ini hisi_zip_err_ini = {
+ .hw_err_enable = hisi_zip_hw_error_enable,
+ .hw_err_disable = hisi_zip_hw_error_disable,
+ .get_dev_hw_err_status = hisi_zip_get_hw_err_status,
+ .log_dev_hw_err = hisi_zip_log_hw_error,
+ .err_info = {
+ .ce = QM_BASE_CE,
+ .nfe = QM_BASE_NFE |
+ QM_ACC_WB_NOT_READY_TIMEOUT,
+ .fe = 0,
+ .msi = QM_DB_RANDOM_INVALID,
+ }
+};
+
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
{
struct hisi_qm *qm = &hisi_zip->qm;
@@ -671,8 +646,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return -EINVAL;
}
+ qm->err_ini = &hisi_zip_err_ini;
+
hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_zip_hw_error_init(hisi_zip);
+ hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(hisi_zip);
return 0;
@@ -791,27 +768,15 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hisi_zip);
qm = &hisi_zip->qm;
+ qm->use_dma_api = true;
qm->pdev = pdev;
qm->ver = rev_id;
+ qm->algs = "zlib\ngzip";
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
QM_HW_VF;
- switch (uacce_mode) {
- case 0:
- qm->use_dma_api = true;
- break;
- case 1:
- qm->use_dma_api = false;
- break;
- case 2:
- qm->use_dma_api = true;
- break;
- default:
- return -EINVAL;
- }
-
ret = hisi_qm_init(qm);
if (ret) {
dev_err(&pdev->dev, "Failed to init qm!\n");
@@ -849,7 +814,13 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
- hisi_zip_add_to_list(hisi_zip);
+ hisi_qm_add_to_list(qm, &zip_devices);
+
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret)
+ goto err_qm_uninit;
+ }
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
ret = hisi_zip_sriov_enable(pdev, vfs_num);
@@ -860,7 +831,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_remove_from_list:
- hisi_zip_remove_from_list(hisi_zip);
+ hisi_qm_del_from_list(qm, &zip_devices);
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
err_qm_uninit:
@@ -887,92 +858,13 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
- if (qm->fun_type == QM_HW_PF)
- hisi_zip_hw_error_set_state(hisi_zip, false);
-
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_zip_remove_from_list(hisi_zip);
-}
-
-static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts)
-{
- const struct hisi_zip_hw_error *err = zip_hw_error;
- struct device *dev = &hisi_zip->qm.pdev->dev;
- u32 err_val;
-
- while (err->msg) {
- if (err->int_msk & err_sts) {
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
-
- if (HZIP_CORE_INT_STATUS_M_ECC & err->int_msk) {
- err_val = readl(hisi_zip->qm.io_base +
- HZIP_CORE_SRAM_ECC_ERR_INFO);
- dev_warn(dev, "hisi-zip multi ecc sram num=0x%x\n",
- ((err_val >> SRAM_ECC_ERR_NUM_SHIFT) &
- 0xFF));
- dev_warn(dev, "hisi-zip multi ecc sram addr=0x%x\n",
- (err_val >> SRAM_ECC_ERR_ADDR_SHIFT));
- }
- }
- err++;
- }
-}
-
-static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip)
-{
- u32 err_sts;
-
- /* read err sts */
- err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS);
-
- if (err_sts) {
- hisi_zip_log_hw_error(hisi_zip, err_sts);
- /* clear error interrupts */
- writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE);
-
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- pci_ers_result_t qm_ret, zip_ret;
-
- if (!hisi_zip) {
- dev_err(dev,
- "Can't recover ZIP-error occurred during device init\n");
- return PCI_ERS_RESULT_NONE;
- }
-
- qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm);
-
- zip_ret = hisi_zip_hw_error_handle(hisi_zip);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- zip_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_zip_process_hw_error(pdev);
+ hisi_qm_del_from_list(qm, &zip_devices);
}
static const struct pci_error_handlers hisi_zip_err_handler = {
- .error_detected = hisi_zip_error_detected,
+ .error_detected = hisi_qm_dev_err_detected,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -1002,6 +894,7 @@ static int __init hisi_zip_init(void)
{
int ret;
+ hisi_qm_init_list(&zip_devices);
hisi_zip_register_debugfs();
ret = pci_register_driver(&hisi_zip_pci_driver);
@@ -1010,12 +903,10 @@ static int __init hisi_zip_init(void)
goto err_pci;
}
- if (uacce_mode == 0 || uacce_mode == 2) {
- ret = hisi_zip_register_to_crypto();
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_crypto;
- }
+ ret = hisi_zip_register_to_crypto();
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_crypto;
}
return 0;
@@ -1030,8 +921,7 @@ err_pci:
static void __exit hisi_zip_exit(void)
{
- if (uacce_mode == 0 || uacce_mode == 2)
- hisi_zip_unregister_from_crypto();
+ hisi_zip_unregister_from_crypto();
pci_unregister_driver(&hisi_zip_pci_driver);
hisi_zip_unregister_debugfs();
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 25d5227f74a1..0e25fc3087f3 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -103,7 +103,7 @@ struct img_hash_request_ctx {
struct ahash_request fallback_req;
/* Zero length buffer must remain last member of struct */
- u8 buffer[0] __aligned(sizeof(u32));
+ u8 buffer[] __aligned(sizeof(u32));
};
struct img_hash_ctx {
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
new file mode 100644
index 000000000000..13063384f958
--- /dev/null
+++ b/drivers/crypto/marvell/Kconfig
@@ -0,0 +1,37 @@
+#
+# Marvell crypto drivers configuration
+#
+
+config CRYPTO_DEV_MARVELL
+ tristate
+
+config CRYPTO_DEV_MARVELL_CESA
+ tristate "Marvell's Cryptographic Engine driver"
+ depends on PLAT_ORION || ARCH_MVEBU
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select SRAM
+ select CRYPTO_DEV_MARVELL
+ help
+ This driver allows you to utilize the Cryptographic Engines and
+ Security Accelerator (CESA) which can be found on MVEBU and ORION
+ platforms.
+ This driver supports CPU offload through DMA transfers.
+
+config CRYPTO_DEV_OCTEONTX_CPT
+ tristate "Support for Marvell OcteonTX CPT driver"
+ depends on ARCH_THUNDER || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ depends on CRYPTO_LIB_AES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
+ select CRYPTO_DEV_MARVELL
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+ Accelerator Unit(CPT) found in OcteonTX series of processors.
+
+ To compile this driver as module, choose M here:
+ the modules will be called octeontx-cpt and octeontx-cptvf
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile
index b27cab65e696..6c6a1519b0f1 100644
--- a/drivers/crypto/marvell/Makefile
+++ b/drivers/crypto/marvell/Makefile
@@ -1,3 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
-marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx/
diff --git a/drivers/crypto/marvell/cesa/Makefile b/drivers/crypto/marvell/cesa/Makefile
new file mode 100644
index 000000000000..b27cab65e696
--- /dev/null
+++ b/drivers/crypto/marvell/cesa/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
+marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 8a5f0b0bdf77..8a5f0b0bdf77 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index f1ed3b85c0d2..e8632d5f343f 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -436,7 +436,7 @@ struct mv_cesa_dev {
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
* @chain: list of the current tdma descriptors being processed
- * by this engine.
+ * by this engine.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
@@ -467,7 +467,7 @@ struct mv_cesa_engine {
* @step: launch the crypto operation on the next chunk
* @cleanup: cleanup the crypto request (release associated data)
* @complete: complete the request, i.e copy result or context from sram when
- * needed.
+ * needed.
*/
struct mv_cesa_req_ops {
int (*process)(struct crypto_async_request *req, u32 status);
@@ -734,6 +734,7 @@ static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
for (i = 0; i < cesa_dev->caps->nengines; i++) {
struct mv_cesa_engine *engine = cesa_dev->engines + i;
u32 load = atomic_read(&engine->load);
+
if (load < min_load) {
min_load = load;
selected = engine;
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index c24f34a48cef..f133c2ccb5ae 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -106,8 +106,8 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
- BUG_ON(readl(engine->regs + CESA_SA_CMD) &
- CESA_SA_CMD_EN_CESA_SA_ACCL0);
+ WARN_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -178,6 +178,7 @@ static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
+
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
@@ -336,7 +337,8 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
do {
struct mv_cesa_op_ctx *op;
- op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
+ op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
+ flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
@@ -365,9 +367,10 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
- ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
- CESA_SA_DATA_SRAM_OFFSET,
- CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/cesa/hash.c
index a2b35fb0fb89..b971284332b6 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -141,9 +141,11 @@ static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
if (creq->algo_le) {
__le64 bits = cpu_to_le64(creq->len << 3);
+
memcpy(buf + padlen, &bits, sizeof(bits));
} else {
__be64 bits = cpu_to_be64(creq->len << 3);
+
memcpy(buf + padlen, &bits, sizeof(bits));
}
@@ -168,7 +170,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
for (i = 0; i < digsize / 4; i++)
- writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
+ writel_relaxed(creq->state[i],
+ engine->regs + CESA_IVDIG(i));
}
if (creq->cache_ptr)
@@ -245,8 +248,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
- BUG_ON(readl(engine->regs + CESA_SA_CMD) &
- CESA_SA_CMD_EN_CESA_SA_ACCL0);
+ WARN_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -329,11 +332,12 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
- (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
+ CESA_TDMA_RESULT) {
__le32 *data = NULL;
/*
- * Result is already in the correct endianess when the SA is
+ * Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
@@ -347,9 +351,9 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
CESA_IVDIG(i));
if (creq->last_req) {
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
- */
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
if (creq->algo_le) {
__le32 *result = (void *)ahashreq->result;
@@ -439,7 +443,8 @@ static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bool cached = false;
- if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
+ if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
+ !creq->last_req) {
cached = true;
if (!req->nbytes)
@@ -648,7 +653,8 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (!mv_cesa_ahash_req_iter_next_op(&iter))
break;
- op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
+ op = mv_cesa_dma_add_frag(&basereq->chain,
+ &creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
@@ -920,7 +926,7 @@ struct ahash_alg mv_md5_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -990,7 +996,7 @@ struct ahash_alg mv_sha1_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1063,7 +1069,7 @@ struct ahash_alg mv_sha256_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1297,7 +1303,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1367,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1437,6 +1443,6 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 45939d53e8d6..b81ee276fe0e 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -50,8 +50,8 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
engine->regs + CESA_SA_CFG);
writel_relaxed(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
- BUG_ON(readl(engine->regs + CESA_SA_CMD) &
- CESA_SA_CMD_EN_CESA_SA_ACCL0);
+ WARN_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -175,8 +175,10 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
break;
}
- /* Save the last request in error to engine->req, so that the core
- * knows which request was fautly */
+ /*
+ * Save the last request in error to engine->req, so that the core
+ * knows which request was fautly
+ */
if (res) {
spin_lock_bh(&engine->lock);
engine->req = req;
diff --git a/drivers/crypto/marvell/octeontx/Makefile b/drivers/crypto/marvell/octeontx/Makefile
new file mode 100644
index 000000000000..5e956fe1a85b
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx-cpt.o octeontx-cptvf.o
+
+octeontx-cpt-objs := otx_cptpf_main.o otx_cptpf_mbox.o otx_cptpf_ucode.o
+octeontx-cptvf-objs := otx_cptvf_main.o otx_cptvf_mbox.o otx_cptvf_reqmgr.o \
+ otx_cptvf_algs.o
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_common.h b/drivers/crypto/marvell/octeontx/otx_cpt_common.h
new file mode 100644
index 000000000000..ca704a7a265f
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_common.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPT_COMMON_H
+#define __OTX_CPT_COMMON_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#define OTX_CPT_MAX_MBOX_DATA_STR_SIZE 64
+
+enum otx_cptpf_type {
+ OTX_CPT_AE = 2,
+ OTX_CPT_SE = 3,
+ BAD_OTX_CPTPF_TYPE,
+};
+
+enum otx_cptvf_type {
+ OTX_CPT_AE_TYPES = 1,
+ OTX_CPT_SE_TYPES = 2,
+ BAD_OTX_CPTVF_TYPE,
+};
+
+/* VF-PF message opcodes */
+enum otx_cpt_mbox_opcode {
+ OTX_CPT_MSG_VF_UP = 1,
+ OTX_CPT_MSG_VF_DOWN,
+ OTX_CPT_MSG_READY,
+ OTX_CPT_MSG_QLEN,
+ OTX_CPT_MSG_QBIND_GRP,
+ OTX_CPT_MSG_VQ_PRIORITY,
+ OTX_CPT_MSG_PF_TYPE,
+ OTX_CPT_MSG_ACK,
+ OTX_CPT_MSG_NACK
+};
+
+/* OcteonTX CPT mailbox structure */
+struct otx_cpt_mbox {
+ u64 msg; /* Message type MBOX[0] */
+ u64 data;/* Data MBOX[1] */
+};
+
+#endif /* __OTX_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
new file mode 100644
index 000000000000..b8bdb9f134f3
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -0,0 +1,824 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPT_HW_TYPES_H
+#define __OTX_CPT_HW_TYPES_H
+
+#include <linux/types.h>
+
+/* Device IDs */
+#define OTX_CPT_PCI_PF_DEVICE_ID 0xa040
+#define OTX_CPT_PCI_VF_DEVICE_ID 0xa041
+
+#define OTX_CPT_PCI_PF_SUBSYS_ID 0xa340
+#define OTX_CPT_PCI_VF_SUBSYS_ID 0xa341
+
+/* Configuration and status registers are in BAR0 on OcteonTX platform */
+#define OTX_CPT_PF_PCI_CFG_BAR 0
+#define OTX_CPT_VF_PCI_CFG_BAR 0
+
+#define OTX_CPT_BAR_E_CPTX_VFX_BAR0_OFFSET(a, b) \
+ (0x000020000000ll + 0x1000000000ll * (a) + 0x100000ll * (b))
+#define OTX_CPT_BAR_E_CPTX_VFX_BAR0_SIZE 0x400000
+
+/* Mailbox interrupts offset */
+#define OTX_CPT_PF_MBOX_INT 3
+#define OTX_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
+/* Number of MSIX supported in PF */
+#define OTX_CPT_PF_MSIX_VECTORS 4
+/* Maximum supported microcode groups */
+#define OTX_CPT_MAX_ENGINE_GROUPS 8
+
+/* CPT instruction size in bytes */
+#define OTX_CPT_INST_SIZE 64
+/* CPT queue next chunk pointer size in bytes */
+#define OTX_CPT_NEXT_CHUNK_PTR_SIZE 8
+
+/* OcteonTX CPT VF MSIX vectors and their offsets */
+#define OTX_CPT_VF_MSIX_VECTORS 2
+#define OTX_CPT_VF_INTR_MBOX_MASK BIT(0)
+#define OTX_CPT_VF_INTR_DOVF_MASK BIT(1)
+#define OTX_CPT_VF_INTR_IRDE_MASK BIT(2)
+#define OTX_CPT_VF_INTR_NWRP_MASK BIT(3)
+#define OTX_CPT_VF_INTR_SERR_MASK BIT(4)
+
+/* OcteonTX CPT PF registers */
+#define OTX_CPT_PF_CONSTANTS (0x0ll)
+#define OTX_CPT_PF_RESET (0x100ll)
+#define OTX_CPT_PF_DIAG (0x120ll)
+#define OTX_CPT_PF_BIST_STATUS (0x160ll)
+#define OTX_CPT_PF_ECC0_CTL (0x200ll)
+#define OTX_CPT_PF_ECC0_FLIP (0x210ll)
+#define OTX_CPT_PF_ECC0_INT (0x220ll)
+#define OTX_CPT_PF_ECC0_INT_W1S (0x230ll)
+#define OTX_CPT_PF_ECC0_ENA_W1S (0x240ll)
+#define OTX_CPT_PF_ECC0_ENA_W1C (0x250ll)
+#define OTX_CPT_PF_MBOX_INTX(b) (0x400ll | (u64)(b) << 3)
+#define OTX_CPT_PF_MBOX_INT_W1SX(b) (0x420ll | (u64)(b) << 3)
+#define OTX_CPT_PF_MBOX_ENA_W1CX(b) (0x440ll | (u64)(b) << 3)
+#define OTX_CPT_PF_MBOX_ENA_W1SX(b) (0x460ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXEC_INT (0x500ll)
+#define OTX_CPT_PF_EXEC_INT_W1S (0x520ll)
+#define OTX_CPT_PF_EXEC_ENA_W1C (0x540ll)
+#define OTX_CPT_PF_EXEC_ENA_W1S (0x560ll)
+#define OTX_CPT_PF_GX_EN(b) (0x600ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXEC_INFO (0x700ll)
+#define OTX_CPT_PF_EXEC_BUSY (0x800ll)
+#define OTX_CPT_PF_EXEC_INFO0 (0x900ll)
+#define OTX_CPT_PF_EXEC_INFO1 (0x910ll)
+#define OTX_CPT_PF_INST_REQ_PC (0x10000ll)
+#define OTX_CPT_PF_INST_LATENCY_PC (0x10020ll)
+#define OTX_CPT_PF_RD_REQ_PC (0x10040ll)
+#define OTX_CPT_PF_RD_LATENCY_PC (0x10060ll)
+#define OTX_CPT_PF_RD_UC_PC (0x10080ll)
+#define OTX_CPT_PF_ACTIVE_CYCLES_PC (0x10100ll)
+#define OTX_CPT_PF_EXE_CTL (0x4000000ll)
+#define OTX_CPT_PF_EXE_STATUS (0x4000008ll)
+#define OTX_CPT_PF_EXE_CLK (0x4000010ll)
+#define OTX_CPT_PF_EXE_DBG_CTL (0x4000018ll)
+#define OTX_CPT_PF_EXE_DBG_DATA (0x4000020ll)
+#define OTX_CPT_PF_EXE_BIST_STATUS (0x4000028ll)
+#define OTX_CPT_PF_EXE_REQ_TIMER (0x4000030ll)
+#define OTX_CPT_PF_EXE_MEM_CTL (0x4000038ll)
+#define OTX_CPT_PF_EXE_PERF_CTL (0x4001000ll)
+#define OTX_CPT_PF_EXE_DBG_CNTX(b) (0x4001100ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXE_PERF_EVENT_CNT (0x4001180ll)
+#define OTX_CPT_PF_EXE_EPCI_INBX_CNT(b) (0x4001200ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXE_EPCI_OUTBX_CNT(b) (0x4001240ll | (u64)(b) << 3)
+#define OTX_CPT_PF_ENGX_UCODE_BASE(b) (0x4002000ll | (u64)(b) << 3)
+#define OTX_CPT_PF_QX_CTL(b) (0x8000000ll | (u64)(b) << 20)
+#define OTX_CPT_PF_QX_GMCTL(b) (0x8000020ll | (u64)(b) << 20)
+#define OTX_CPT_PF_QX_CTL2(b) (0x8000100ll | (u64)(b) << 20)
+#define OTX_CPT_PF_VFX_MBOXX(b, c) (0x8001000ll | (u64)(b) << 20 | \
+ (u64)(c) << 8)
+
+/* OcteonTX CPT VF registers */
+#define OTX_CPT_VQX_CTL(b) (0x100ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_SADDR(b) (0x200ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_WAIT(b) (0x400ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_INPROG(b) (0x410ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE(b) (0x420ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_ACK(b) (0x440ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_INT_W1S(b) (0x460ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_INT_W1C(b) (0x468ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_ENA_W1S(b) (0x470ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_ENA_W1C(b) (0x478ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_INT(b) (0x500ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_INT_W1S(b) (0x508ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_ENA_W1S(b) (0x510ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_ENA_W1C(b) (0x518ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DOORBELL(b) (0x600ll | (u64)(b) << 20)
+#define OTX_CPT_VFX_PF_MBOXX(b, c) (0x1000ll | ((b) << 20) | ((c) << 3))
+
+/*
+ * Enumeration otx_cpt_ucode_error_code_e
+ *
+ * Enumerates ucode errors
+ */
+enum otx_cpt_ucode_error_code_e {
+ CPT_NO_UCODE_ERROR = 0x00,
+ ERR_OPCODE_UNSUPPORTED = 0x01,
+
+ /* Scatter gather */
+ ERR_SCATTER_GATHER_WRITE_LENGTH = 0x02,
+ ERR_SCATTER_GATHER_LIST = 0x03,
+ ERR_SCATTER_GATHER_NOT_SUPPORTED = 0x04,
+
+};
+
+/*
+ * Enumeration otx_cpt_comp_e
+ *
+ * CPT OcteonTX Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum otx_cpt_comp_e {
+ CPT_COMP_E_NOTDONE = 0x00,
+ CPT_COMP_E_GOOD = 0x01,
+ CPT_COMP_E_FAULT = 0x02,
+ CPT_COMP_E_SWERR = 0x03,
+ CPT_COMP_E_HWERR = 0x04,
+ CPT_COMP_E_LAST_ENTRY = 0x05
+};
+
+/*
+ * Enumeration otx_cpt_vf_int_vec_e
+ *
+ * CPT OcteonTX VF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx_cpt_vf_int_vec_e {
+ CPT_VF_INT_VEC_E_MISC = 0x00,
+ CPT_VF_INT_VEC_E_DONE = 0x01
+};
+
+/*
+ * Structure cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout. Instructions are
+ * stored in memory as little-endian unless CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_inst_s_s
+ * Word 0
+ * doneint:1 Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes, CPT()_VQ()_DONE[DONE] will be
+ * incremented,and based on the rules described there an interrupt may
+ * occur.
+ * Word 1
+ * res_addr [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Word 2
+ * grp:10 [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to use when
+ * CPT submits work SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP() must map
+ * [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * tt:2 [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use when CPT
+ * submits work to SSO
+ * tag:32 [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when CPT
+ * submits work to SSO.
+ * Word 3
+ * wq_ptr [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all context,
+ * output data, and result write operations are visible to other
+ * CNXXXX units and the cores. Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ * Internal:
+ * Bits <63:49>, <2:0> are ignored by hardware, treated as always 0x0.
+ * Word 4
+ * ei0; [319:256] Engine instruction word 0. Passed to the AE/SE.
+ * Word 5
+ * ei1; [383:320] Engine instruction word 1. Passed to the AE/SE.
+ * Word 6
+ * ei2; [447:384] Engine instruction word 1. Passed to the AE/SE.
+ * Word 7
+ * ei3; [511:448] Engine instruction word 1. Passed to the AE/SE.
+ *
+ */
+union otx_cpt_inst_s {
+ u64 u[8];
+
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_17_63:47;
+ u64 doneint:1;
+ u64 reserved_0_15:16;
+#else /* Word 0 - Little Endian */
+ u64 reserved_0_15:16;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+#endif /* Word 0 - End */
+ u64 res_addr;
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ u64 reserved_172_191:20;
+ u64 grp:10;
+ u64 tt:2;
+ u64 tag:32;
+#else /* Word 2 - Little Endian */
+ u64 tag:32;
+ u64 tt:2;
+ u64 grp:10;
+ u64 reserved_172_191:20;
+#endif /* Word 2 - End */
+ u64 wq_ptr;
+ u64 ei0;
+ u64 ei1;
+ u64 ei2;
+ u64 ei3;
+ } s;
+};
+
+/*
+ * Structure cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and
+ * each instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_res_s_s
+ * Word 0
+ * doneint:1 [16:16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ * compcode:8 [7:0] Indicates completion/error status of the CPT coprocessor
+ * for the associated instruction, as enumerated by CPT_COMP_E.
+ * Core software may write the memory location containing [COMPCODE] to
+ * 0x0 before ringing the doorbell, and then poll for completion by
+ * checking for a nonzero value.
+ * Once the core observes a nonzero [COMPCODE] value in this case,the CPT
+ * coprocessor will have also completed L2/DRAM write operations.
+ * Word 1
+ * reserved
+ *
+ */
+union otx_cpt_res_s {
+ u64 u[2];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_17_63:47;
+ u64 doneint:1;
+ u64 reserved_8_15:8;
+ u64 compcode:8;
+#else /* Word 0 - Little Endian */
+ u64 compcode:8;
+ u64 reserved_8_15:8;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+#endif /* Word 0 - End */
+ u64 reserved_64_127;
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_bist_status
+ *
+ * CPT PF Control Bist Status Register
+ * This register has the BIST status of memories. Each bit is the BIST result
+ * of an individual memory (per bit, 0 = pass and 1 = fail).
+ * otx_cptx_pf_bist_status_s
+ * Word0
+ * bstatus [29:0](RO/H) BIST status. One bit per memory, enumerated by
+ * CPT_RAMS_E.
+ */
+union otx_cptx_pf_bist_status {
+ u64 u;
+ struct otx_cptx_pf_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_30_63:34;
+ u64 bstatus:30;
+#else /* Word 0 - Little Endian */
+ u64 bstatus:30;
+ u64 reserved_30_63:34;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_constants
+ *
+ * CPT PF Constants Register
+ * This register contains implementation-related parameters of CPT in CNXXXX.
+ * otx_cptx_pf_constants_s
+ * Word 0
+ * reserved_40_63:24 [63:40] Reserved.
+ * epcis:8 [39:32](RO) Number of EPCI busses.
+ * grps:8 [31:24](RO) Number of engine groups implemented.
+ * ae:8 [23:16](RO/H) Number of AEs. In CNXXXX, for CPT0 returns 0x0,
+ * for CPT1 returns 0x18, or less if there are fuse-disables.
+ * se:8 [15:8](RO/H) Number of SEs. In CNXXXX, for CPT0 returns 0x30,
+ * or less if there are fuse-disables, for CPT1 returns 0x0.
+ * vq:8 [7:0](RO) Number of VQs.
+ */
+union otx_cptx_pf_constants {
+ u64 u;
+ struct otx_cptx_pf_constants_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_40_63:24;
+ u64 epcis:8;
+ u64 grps:8;
+ u64 ae:8;
+ u64 se:8;
+ u64 vq:8;
+#else /* Word 0 - Little Endian */
+ u64 vq:8;
+ u64 se:8;
+ u64 ae:8;
+ u64 grps:8;
+ u64 epcis:8;
+ u64 reserved_40_63:24;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_exe_bist_status
+ *
+ * CPT PF Engine Bist Status Register
+ * This register has the BIST status of each engine. Each bit is the
+ * BIST result of an individual engine (per bit, 0 = pass and 1 = fail).
+ * otx_cptx_pf_exe_bist_status_s
+ * Word0
+ * reserved_48_63:16 [63:48] reserved
+ * bstatus:48 [47:0](RO/H) BIST status. One bit per engine.
+ *
+ */
+union otx_cptx_pf_exe_bist_status {
+ u64 u;
+ struct otx_cptx_pf_exe_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_48_63:16;
+ u64 bstatus:48;
+#else /* Word 0 - Little Endian */
+ u64 bstatus:48;
+ u64 reserved_48_63:16;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_q#_ctl
+ *
+ * CPT Queue Control Register
+ * This register configures queues. This register should be changed only
+ * when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ * otx_cptx_pf_qx_ctl_s
+ * Word0
+ * reserved_60_63:4 [63:60] reserved.
+ * aura:12; [59:48](R/W) Guest-aura for returning this queue's
+ * instruction-chunk buffers to FPA. Only used when [INST_FREE] is set.
+ * For the FPA to not discard the request, FPA_PF_MAP() must map
+ * [AURA] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * reserved_45_47:3 [47:45] reserved.
+ * size:13 [44:32](R/W) Command-buffer size, in number of 64-bit words per
+ * command buffer segment. Must be 8*n + 1, where n is the number of
+ * instructions per buffer segment.
+ * reserved_11_31:21 [31:11] Reserved.
+ * cont_err:1 [10:10](R/W) Continue on error.
+ * 0 = When CPT()_VQ()_MISC_INT[NWRP], CPT()_VQ()_MISC_INT[IRDE] or
+ * CPT()_VQ()_MISC_INT[DOVF] are set by hardware or software via
+ * CPT()_VQ()_MISC_INT_W1S, then CPT()_VQ()_CTL[ENA] is cleared. Due to
+ * pipelining, additional instructions may have been processed between the
+ * instruction causing the error and the next instruction in the disabled
+ * queue (the instruction at CPT()_VQ()_SADDR).
+ * 1 = Ignore errors and continue processing instructions.
+ * For diagnostic use only.
+ * inst_free:1 [9:9](R/W) Instruction FPA free. When set, when CPT reaches the
+ * end of an instruction chunk, that chunk will be freed to the FPA.
+ * inst_be:1 [8:8](R/W) Instruction big-endian control. When set, instructions,
+ * instruction next chunk pointers, and result structures are stored in
+ * big-endian format in memory.
+ * iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
+ * 0 = The hardware issues NCB transient load (LDT) towards the cache,
+ * which if the line hits and is is dirty will cause the line to be
+ * written back before being replaced.
+ * 1 = The hardware issues NCB LDWB read-and-invalidate command towards
+ * the cache when fetching the last word of instructions; as a result the
+ * line will not be written back when replaced. This improves
+ * performance, but software must not read the instructions after they are
+ * posted to the hardware. Reads that do not consume the last word of a
+ * cache line always use LDI.
+ * reserved_4_6:3 [6:4] Reserved.
+ * grp:3; [3:1](R/W) Engine group.
+ * pri:1; [0:0](R/W) Queue priority.
+ * 1 = This queue has higher priority. Round-robin between higher
+ * priority queues.
+ * 0 = This queue has lower priority. Round-robin between lower
+ * priority queues.
+ */
+union otx_cptx_pf_qx_ctl {
+ u64 u;
+ struct otx_cptx_pf_qx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_60_63:4;
+ u64 aura:12;
+ u64 reserved_45_47:3;
+ u64 size:13;
+ u64 reserved_11_31:21;
+ u64 cont_err:1;
+ u64 inst_free:1;
+ u64 inst_be:1;
+ u64 iqb_ldwb:1;
+ u64 reserved_4_6:3;
+ u64 grp:3;
+ u64 pri:1;
+#else /* Word 0 - Little Endian */
+ u64 pri:1;
+ u64 grp:3;
+ u64 reserved_4_6:3;
+ u64 iqb_ldwb:1;
+ u64 inst_be:1;
+ u64 inst_free:1;
+ u64 cont_err:1;
+ u64 reserved_11_31:21;
+ u64 size:13;
+ u64 reserved_45_47:3;
+ u64 aura:12;
+ u64 reserved_60_63:4;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_saddr
+ *
+ * CPT Queue Starting Buffer Address Registers
+ * These registers set the instruction buffer starting address.
+ * otx_cptx_vqx_saddr_s
+ * Word0
+ * reserved_49_63:15 [63:49] Reserved.
+ * ptr:43 [48:6](R/W/H) Instruction buffer IOVA <48:6> (64-byte aligned).
+ * When written, it is the initial buffer starting address; when read,
+ * it is the next read pointer to be requested from L2C. The PTR field
+ * is overwritten with the next pointer each time that the command buffer
+ * segment is exhausted. New commands will then be read from the newly
+ * specified command buffer pointer.
+ * reserved_0_5:6 [5:0] Reserved.
+ *
+ */
+union otx_cptx_vqx_saddr {
+ u64 u;
+ struct otx_cptx_vqx_saddr_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_49_63:15;
+ u64 ptr:43;
+ u64 reserved_0_5:6;
+#else /* Word 0 - Little Endian */
+ u64 reserved_0_5:6;
+ u64 ptr:43;
+ u64 reserved_49_63:15;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_misc_ena_w1s
+ *
+ * CPT Queue Misc Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ * otx_cptx_vqx_misc_ena_w1s_s
+ * Word0
+ * reserved_5_63:59 [63:5] Reserved.
+ * swerr:1 [4:4](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[SWERR].
+ * nwrp:1 [3:3](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[NWRP].
+ * irde:1 [2:2](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[IRDE].
+ * dovf:1 [1:1](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[DOVF].
+ * mbox:1 [0:0](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[MBOX].
+ *
+ */
+union otx_cptx_vqx_misc_ena_w1s {
+ u64 u;
+ struct otx_cptx_vqx_misc_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_5_63:59;
+ u64 swerr:1;
+ u64 nwrp:1;
+ u64 irde:1;
+ u64 dovf:1;
+ u64 mbox:1;
+#else /* Word 0 - Little Endian */
+ u64 mbox:1;
+ u64 dovf:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 swerr:1;
+ u64 reserved_5_63:59;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_doorbell
+ *
+ * CPT Queue Doorbell Registers
+ * Doorbells for the CPT instruction queues.
+ * otx_cptx_vqx_doorbell_s
+ * Word0
+ * reserved_20_63:44 [63:20] Reserved.
+ * dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
+ * to the CPT instruction doorbell count. Readback value is the the
+ * current number of pending doorbell requests. If counter overflows
+ * CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
+ * zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
+ * then write a value of 2^20 minus the read [DBELL_CNT], then write one
+ * to CPT()_VQ()_MISC_INT_W1C[DBELL_DOVF] and
+ * CPT()_VQ()_MISC_INT_ENA_W1S[DBELL_DOVF]. Must be a multiple of 8.
+ * All CPT instructions are 8 words and require a doorbell count of
+ * multiple of 8.
+ */
+union otx_cptx_vqx_doorbell {
+ u64 u;
+ struct otx_cptx_vqx_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_20_63:44;
+ u64 dbell_cnt:20;
+#else /* Word 0 - Little Endian */
+ u64 dbell_cnt:20;
+ u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_inprog
+ *
+ * CPT Queue In Progress Count Registers
+ * These registers contain the per-queue instruction in flight registers.
+ * otx_cptx_vqx_inprog_s
+ * Word0
+ * reserved_8_63:56 [63:8] Reserved.
+ * inflight:8 [7:0](RO/H) Inflight count. Counts the number of instructions
+ * for the VF for which CPT is fetching, executing or responding to
+ * instructions. However this does not include any interrupts that are
+ * awaiting software handling (CPT()_VQ()_DONE[DONE] != 0x0).
+ * A queue may not be reconfigured until:
+ * 1. CPT()_VQ()_CTL[ENA] is cleared by software.
+ * 2. [INFLIGHT] is polled until equals to zero.
+ */
+union otx_cptx_vqx_inprog {
+ u64 u;
+ struct otx_cptx_vqx_inprog_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_8_63:56;
+ u64 inflight:8;
+#else /* Word 0 - Little Endian */
+ u64 inflight:8;
+ u64 reserved_8_63:56;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_misc_int
+ *
+ * CPT Queue Misc Interrupt Register
+ * These registers contain the per-queue miscellaneous interrupts.
+ * otx_cptx_vqx_misc_int_s
+ * Word 0
+ * reserved_5_63:59 [63:5] Reserved.
+ * swerr:1 [4:4](R/W1C/H) Software error from engines.
+ * nwrp:1 [3:3](R/W1C/H) NCB result write response error.
+ * irde:1 [2:2](R/W1C/H) Instruction NCB read response error.
+ * dovf:1 [1:1](R/W1C/H) Doorbell overflow.
+ * mbox:1 [0:0](R/W1C/H) PF to VF mailbox interrupt. Set when
+ * CPT()_VF()_PF_MBOX(0) is written.
+ *
+ */
+union otx_cptx_vqx_misc_int {
+ u64 u;
+ struct otx_cptx_vqx_misc_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_5_63:59;
+ u64 swerr:1;
+ u64 nwrp:1;
+ u64 irde:1;
+ u64 dovf:1;
+ u64 mbox:1;
+#else /* Word 0 - Little Endian */
+ u64 mbox:1;
+ u64 dovf:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 swerr:1;
+ u64 reserved_5_63:59;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done_ack
+ *
+ * CPT Queue Done Count Ack Registers
+ * This register is written by software to acknowledge interrupts.
+ * otx_cptx_vqx_done_ack_s
+ * Word0
+ * reserved_20_63:44 [63:20] Reserved.
+ * done_ack:20 [19:0](R/W/H) Number of decrements to CPT()_VQ()_DONE[DONE].
+ * Reads CPT()_VQ()_DONE[DONE]. Written by software to acknowledge
+ * interrupts. If CPT()_VQ()_DONE[DONE] is still nonzero the interrupt
+ * will be re-sent if the conditions described in CPT()_VQ()_DONE[DONE]
+ * are satisfied.
+ *
+ */
+union otx_cptx_vqx_done_ack {
+ u64 u;
+ struct otx_cptx_vqx_done_ack_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_20_63:44;
+ u64 done_ack:20;
+#else /* Word 0 - Little Endian */
+ u64 done_ack:20;
+ u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done
+ *
+ * CPT Queue Done Count Registers
+ * These registers contain the per-queue instruction done count.
+ * cptx_vqx_done_s
+ * Word0
+ * reserved_20_63:44 [63:20] Reserved.
+ * done:20 [19:0](R/W/H) Done count. When CPT_INST_S[DONEINT] set and that
+ * instruction completes, CPT()_VQ()_DONE[DONE] is incremented when the
+ * instruction finishes. Write to this field are for diagnostic use only;
+ * instead software writes CPT()_VQ()_DONE_ACK with the number of
+ * decrements for this field.
+ * Interrupts are sent as follows:
+ * * When CPT()_VQ()_DONE[DONE] = 0, then no results are pending, the
+ * interrupt coalescing timer is held to zero, and an interrupt is not
+ * sent.
+ * * When CPT()_VQ()_DONE[DONE] != 0, then the interrupt coalescing timer
+ * counts. If the counter is >= CPT()_VQ()_DONE_WAIT[TIME_WAIT]*1024, or
+ * CPT()_VQ()_DONE[DONE] >= CPT()_VQ()_DONE_WAIT[NUM_WAIT], i.e. enough
+ * time has passed or enough results have arrived, then the interrupt is
+ * sent.
+ * * When CPT()_VQ()_DONE_ACK is written (or CPT()_VQ()_DONE is written
+ * but this is not typical), the interrupt coalescing timer restarts.
+ * Note after decrementing this interrupt equation is recomputed,
+ * for example if CPT()_VQ()_DONE[DONE] >= CPT()_VQ()_DONE_WAIT[NUM_WAIT]
+ * and because the timer is zero, the interrupt will be resent immediately.
+ * (This covers the race case between software acknowledging an interrupt
+ * and a result returning.)
+ * * When CPT()_VQ()_DONE_ENA_W1S[DONE] = 0, interrupts are not sent,
+ * but the counting described above still occurs.
+ * Since CPT instructions complete out-of-order, if software is using
+ * completion interrupts the suggested scheme is to request a DONEINT on
+ * each request, and when an interrupt arrives perform a "greedy" scan for
+ * completions; even if a later command is acknowledged first this will
+ * not result in missing a completion.
+ * Software is responsible for making sure [DONE] does not overflow;
+ * for example by insuring there are not more than 2^20-1 instructions in
+ * flight that may request interrupts.
+ *
+ */
+union otx_cptx_vqx_done {
+ u64 u;
+ struct otx_cptx_vqx_done_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_20_63:44;
+ u64 done:20;
+#else /* Word 0 - Little Endian */
+ u64 done:20;
+ u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done_wait
+ *
+ * CPT Queue Done Interrupt Coalescing Wait Registers
+ * Specifies the per queue interrupt coalescing settings.
+ * cptx_vqx_done_wait_s
+ * Word0
+ * reserved_48_63:16 [63:48] Reserved.
+ * time_wait:16; [47:32](R/W) Time hold-off. When CPT()_VQ()_DONE[DONE] = 0
+ * or CPT()_VQ()_DONE_ACK is written a timer is cleared. When the timer
+ * reaches [TIME_WAIT]*1024 then interrupt coalescing ends.
+ * see CPT()_VQ()_DONE[DONE]. If 0x0, time coalescing is disabled.
+ * reserved_20_31:12 [31:20] Reserved.
+ * num_wait:20 [19:0](R/W) Number of messages hold-off.
+ * When CPT()_VQ()_DONE[DONE] >= [NUM_WAIT] then interrupt coalescing ends
+ * see CPT()_VQ()_DONE[DONE]. If 0x0, same behavior as 0x1.
+ *
+ */
+union otx_cptx_vqx_done_wait {
+ u64 u;
+ struct otx_cptx_vqx_done_wait_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_48_63:16;
+ u64 time_wait:16;
+ u64 reserved_20_31:12;
+ u64 num_wait:20;
+#else /* Word 0 - Little Endian */
+ u64 num_wait:20;
+ u64 reserved_20_31:12;
+ u64 time_wait:16;
+ u64 reserved_48_63:16;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done_ena_w1s
+ *
+ * CPT Queue Done Interrupt Enable Set Registers
+ * Write 1 to these registers will enable the DONEINT interrupt for the queue.
+ * cptx_vqx_done_ena_w1s_s
+ * Word0
+ * reserved_1_63:63 [63:1] Reserved.
+ * done:1 [0:0](R/W1S/H) Write 1 will enable DONEINT for this queue.
+ * Write 0 has no effect. Read will return the enable bit.
+ */
+union otx_cptx_vqx_done_ena_w1s {
+ u64 u;
+ struct otx_cptx_vqx_done_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_1_63:63;
+ u64 done:1;
+#else /* Word 0 - Little Endian */
+ u64 done:1;
+ u64 reserved_1_63:63;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_ctl
+ *
+ * CPT VF Queue Control Registers
+ * This register configures queues. This register should be changed (other than
+ * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ * cptx_vqx_ctl_s
+ * Word0
+ * reserved_1_63:63 [63:1] Reserved.
+ * ena:1 [0:0](R/W/H) Enables the logical instruction queue.
+ * See also CPT()_PF_Q()_CTL[CONT_ERR] and CPT()_VQ()_INPROG[INFLIGHT].
+ * 1 = Queue is enabled.
+ * 0 = Queue is disabled.
+ */
+union otx_cptx_vqx_ctl {
+ u64 u;
+ struct otx_cptx_vqx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_1_63:63;
+ u64 ena:1;
+#else /* Word 0 - Little Endian */
+ u64 ena:1;
+ u64 reserved_1_63:63;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Error Address/Error Codes
+ *
+ * In the event of a severe error, microcode writes an 8-byte Error Code
+ * value (ECODE) to host memory at the Rptr address specified by the host
+ * system (in the 64-byte request).
+ *
+ * Word0
+ * [63:56](R) 8-bit completion code
+ * [55:48](R) Number of the core that reported the severe error
+ * [47:0] Lower 6 bytes of M-Inst word2. Used to assist in uniquely
+ * identifying which specific instruction caused the error. This assumes
+ * that each instruction has a unique result location (RPTR), at least
+ * for a given period of time.
+ */
+union otx_cpt_error_code {
+ u64 u;
+ struct otx_cpt_error_code_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t ccode:8;
+ uint64_t coreid:8;
+ uint64_t rptr6:48;
+#else /* Word 0 - Little Endian */
+ uint64_t rptr6:48;
+ uint64_t coreid:8;
+ uint64_t ccode:8;
+#endif /* Word 0 - End */
+ } s;
+};
+
+#endif /*__OTX_CPT_HW_TYPES_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf.h b/drivers/crypto/marvell/octeontx/otx_cptpf.h
new file mode 100644
index 000000000000..73cd0a9bc563
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTPF_H
+#define __OTX_CPTPF_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include "otx_cptpf_ucode.h"
+
+/*
+ * OcteonTX CPT device structure
+ */
+struct otx_cpt_device {
+ void __iomem *reg_base; /* Register start address */
+ struct pci_dev *pdev; /* Pci device handle */
+ struct otx_cpt_eng_grps eng_grps;/* Engine groups information */
+ struct list_head list;
+ u8 pf_type; /* PF type SE or AE */
+ u8 max_vfs; /* Maximum number of VFs supported by the CPT */
+ u8 vfs_enabled; /* Number of enabled VFs */
+};
+
+void otx_cpt_mbox_intr_handler(struct otx_cpt_device *cpt, int mbx);
+void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt);
+
+#endif /* __OTX_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
new file mode 100644
index 000000000000..200fb3303db0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx_cpt_common.h"
+#include "otx_cptpf.h"
+
+#define DRV_NAME "octeontx-cpt"
+#define DRV_VERSION "1.0"
+
+static void otx_cpt_disable_mbox_interrupts(struct otx_cpt_device *cpt)
+{
+ /* Disable mbox(0) interrupts for all VFs */
+ writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1CX(0));
+}
+
+static void otx_cpt_enable_mbox_interrupts(struct otx_cpt_device *cpt)
+{
+ /* Enable mbox(0) interrupts for all VFs */
+ writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1SX(0));
+}
+
+static irqreturn_t otx_cpt_mbx0_intr_handler(int __always_unused irq,
+ void *cpt)
+{
+ otx_cpt_mbox_intr_handler(cpt, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void otx_cpt_reset(struct otx_cpt_device *cpt)
+{
+ writeq(1, cpt->reg_base + OTX_CPT_PF_RESET);
+}
+
+static void otx_cpt_find_max_enabled_cores(struct otx_cpt_device *cpt)
+{
+ union otx_cptx_pf_constants pf_cnsts = {0};
+
+ pf_cnsts.u = readq(cpt->reg_base + OTX_CPT_PF_CONSTANTS);
+ cpt->eng_grps.avail.max_se_cnt = pf_cnsts.s.se;
+ cpt->eng_grps.avail.max_ae_cnt = pf_cnsts.s.ae;
+}
+
+static u32 otx_cpt_check_bist_status(struct otx_cpt_device *cpt)
+{
+ union otx_cptx_pf_bist_status bist_sts = {0};
+
+ bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_BIST_STATUS);
+ return bist_sts.u;
+}
+
+static u64 otx_cpt_check_exe_bist_status(struct otx_cpt_device *cpt)
+{
+ union otx_cptx_pf_exe_bist_status bist_sts = {0};
+
+ bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_EXE_BIST_STATUS);
+ return bist_sts.u;
+}
+
+static int otx_cpt_device_init(struct otx_cpt_device *cpt)
+{
+ struct device *dev = &cpt->pdev->dev;
+ u16 sdevid;
+ u64 bist;
+
+ /* Reset the PF when probed first */
+ otx_cpt_reset(cpt);
+ mdelay(100);
+
+ pci_read_config_word(cpt->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ /* Check BIST status */
+ bist = (u64)otx_cpt_check_bist_status(cpt);
+ if (bist) {
+ dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
+ return -ENODEV;
+ }
+
+ bist = otx_cpt_check_exe_bist_status(cpt);
+ if (bist) {
+ dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
+ return -ENODEV;
+ }
+
+ /* Get max enabled cores */
+ otx_cpt_find_max_enabled_cores(cpt);
+
+ if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
+ (cpt->eng_grps.avail.max_se_cnt == 0)) {
+ cpt->pf_type = OTX_CPT_AE;
+ } else if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
+ (cpt->eng_grps.avail.max_ae_cnt == 0)) {
+ cpt->pf_type = OTX_CPT_SE;
+ }
+
+ /* Get max VQs/VFs supported by the device */
+ cpt->max_vfs = pci_sriov_get_totalvfs(cpt->pdev);
+
+ /* Disable all cores */
+ otx_cpt_disable_all_cores(cpt);
+
+ return 0;
+}
+
+static int otx_cpt_register_interrupts(struct otx_cpt_device *cpt)
+{
+ struct device *dev = &cpt->pdev->dev;
+ u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
+ u32 num_vec = OTX_CPT_PF_MSIX_VECTORS;
+ int ret;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(cpt->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&cpt->pdev->dev,
+ "Request for #%d msix vectors failed\n",
+ num_vec);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handlers */
+ ret = request_irq(pci_irq_vector(cpt->pdev,
+ OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
+ otx_cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
+ if (ret) {
+ dev_err(dev, "Request irq failed\n");
+ pci_free_irq_vectors(cpt->pdev);
+ return ret;
+ }
+ /* Enable mailbox interrupt */
+ otx_cpt_enable_mbox_interrupts(cpt);
+ return 0;
+}
+
+static void otx_cpt_unregister_interrupts(struct otx_cpt_device *cpt)
+{
+ u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
+
+ otx_cpt_disable_mbox_interrupts(cpt);
+ free_irq(pci_irq_vector(cpt->pdev,
+ OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
+ cpt);
+ pci_free_irq_vectors(cpt->pdev);
+}
+
+
+static int otx_cpt_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (numvfs > cpt->max_vfs)
+ numvfs = cpt->max_vfs;
+
+ if (numvfs > 0) {
+ ret = otx_cpt_try_create_default_eng_grps(cpt->pdev,
+ &cpt->eng_grps,
+ cpt->pf_type);
+ if (ret)
+ return ret;
+
+ cpt->vfs_enabled = numvfs;
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret) {
+ cpt->vfs_enabled = 0;
+ return ret;
+ }
+ otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, true);
+ try_module_get(THIS_MODULE);
+ ret = numvfs;
+ } else {
+ pci_disable_sriov(pdev);
+ otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, false);
+ module_put(THIS_MODULE);
+ cpt->vfs_enabled = 0;
+ }
+ dev_notice(&cpt->pdev->dev, "VFs enabled: %d\n", ret);
+
+ return ret;
+}
+
+static int otx_cpt_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct otx_cpt_device *cpt;
+ int err;
+
+ cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
+ if (!cpt)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, cpt);
+ cpt->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_clear_drvdata;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ cpt->reg_base = pci_iomap(pdev, OTX_CPT_PF_PCI_CFG_BAR, 0);
+ if (!cpt->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* CPT device HW initialization */
+ err = otx_cpt_device_init(cpt);
+ if (err)
+ goto err_unmap_region;
+
+ /* Register interrupts */
+ err = otx_cpt_register_interrupts(cpt);
+ if (err)
+ goto err_unmap_region;
+
+ /* Initialize engine groups */
+ err = otx_cpt_init_eng_grps(pdev, &cpt->eng_grps, cpt->pf_type);
+ if (err)
+ goto err_unregister_interrupts;
+
+ return 0;
+
+err_unregister_interrupts:
+ otx_cpt_unregister_interrupts(cpt);
+err_unmap_region:
+ pci_iounmap(pdev, cpt->reg_base);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void otx_cpt_remove(struct pci_dev *pdev)
+{
+ struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
+
+ if (!cpt)
+ return;
+
+ /* Disable VFs */
+ pci_disable_sriov(pdev);
+ /* Cleanup engine groups */
+ otx_cpt_cleanup_eng_grps(pdev, &cpt->eng_grps);
+ /* Disable CPT PF interrupts */
+ otx_cpt_unregister_interrupts(cpt);
+ /* Disengage SE and AE cores from all groups */
+ otx_cpt_disable_all_cores(cpt);
+ pci_iounmap(pdev, cpt->reg_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx_cpt_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX_CPT_PCI_PF_DEVICE_ID) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx_cpt_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = otx_cpt_id_table,
+ .probe = otx_cpt_probe,
+ .remove = otx_cpt_remove,
+ .sriov_configure = otx_cpt_sriov_configure
+};
+
+module_pci_driver(otx_cpt_pci_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX CPT Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx_cpt_id_table);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
new file mode 100644
index 000000000000..a6774232e9a3
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx_cpt_common.h"
+#include "otx_cptpf.h"
+
+static char *get_mbox_opcode_str(int msg_opcode)
+{
+ char *str = "Unknown";
+
+ switch (msg_opcode) {
+ case OTX_CPT_MSG_VF_UP:
+ str = "UP";
+ break;
+
+ case OTX_CPT_MSG_VF_DOWN:
+ str = "DOWN";
+ break;
+
+ case OTX_CPT_MSG_READY:
+ str = "READY";
+ break;
+
+ case OTX_CPT_MSG_QLEN:
+ str = "QLEN";
+ break;
+
+ case OTX_CPT_MSG_QBIND_GRP:
+ str = "QBIND_GRP";
+ break;
+
+ case OTX_CPT_MSG_VQ_PRIORITY:
+ str = "VQ_PRIORITY";
+ break;
+
+ case OTX_CPT_MSG_PF_TYPE:
+ str = "PF_TYPE";
+ break;
+
+ case OTX_CPT_MSG_ACK:
+ str = "ACK";
+ break;
+
+ case OTX_CPT_MSG_NACK:
+ str = "NACK";
+ break;
+ }
+
+ return str;
+}
+
+static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
+{
+ char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
+
+ hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
+ raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
+ if (vf_id >= 0)
+ pr_debug("MBOX opcode %s received from VF%d raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), vf_id,
+ raw_data_str);
+ else
+ pr_debug("MBOX opcode %s received from PF raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
+}
+
+static void otx_cpt_send_msg_to_vf(struct otx_cpt_device *cpt, int vf,
+ struct otx_cpt_mbox *mbx)
+{
+ /* Writing mbox(0) causes interrupt */
+ writeq(mbx->data, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
+ writeq(mbx->msg, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
+}
+
+/*
+ * ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void otx_cpt_mbox_send_ack(struct otx_cpt_device *cpt, int vf,
+ struct otx_cpt_mbox *mbx)
+{
+ mbx->data = 0ull;
+ mbx->msg = OTX_CPT_MSG_ACK;
+ otx_cpt_send_msg_to_vf(cpt, vf, mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to complete the action */
+static void otx_cptpf_mbox_send_nack(struct otx_cpt_device *cpt, int vf,
+ struct otx_cpt_mbox *mbx)
+{
+ mbx->data = 0ull;
+ mbx->msg = OTX_CPT_MSG_NACK;
+ otx_cpt_send_msg_to_vf(cpt, vf, mbx);
+}
+
+static void otx_cpt_clear_mbox_intr(struct otx_cpt_device *cpt, u32 vf)
+{
+ /* W1C for the VF */
+ writeq(1ull << vf, cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
+}
+
+/*
+ * Configure QLEN/Chunk sizes for VF
+ */
+static void otx_cpt_cfg_qlen_for_vf(struct otx_cpt_device *cpt, int vf,
+ u32 size)
+{
+ union otx_cptx_pf_qx_ctl pf_qx_ctl;
+
+ pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+ pf_qx_ctl.s.size = size;
+ pf_qx_ctl.s.cont_err = true;
+ writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+}
+
+/*
+ * Configure VQ priority
+ */
+static void otx_cpt_cfg_vq_priority(struct otx_cpt_device *cpt, int vf, u32 pri)
+{
+ union otx_cptx_pf_qx_ctl pf_qx_ctl;
+
+ pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+ pf_qx_ctl.s.pri = pri;
+ writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+}
+
+static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
+{
+ struct device *dev = &cpt->pdev->dev;
+ struct otx_cpt_eng_grp_info *eng_grp;
+ union otx_cptx_pf_qx_ctl pf_qx_ctl;
+ struct otx_cpt_ucode *ucode;
+
+ if (q >= cpt->max_vfs) {
+ dev_err(dev, "Requested queue %d is > than maximum avail %d",
+ q, cpt->max_vfs);
+ return -EINVAL;
+ }
+
+ if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Requested group %d is > than maximum avail %d",
+ grp, OTX_CPT_MAX_ENGINE_GROUPS);
+ return -EINVAL;
+ }
+
+ eng_grp = &cpt->eng_grps.grp[grp];
+ if (!eng_grp->is_enabled) {
+ dev_err(dev, "Requested engine group %d is disabled", grp);
+ return -EINVAL;
+ }
+
+ pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
+ pf_qx_ctl.s.grp = grp;
+ writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+
+ if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_SE_TYPES))
+ return OTX_CPT_SE_TYPES;
+ else if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_AE_TYPES))
+ return OTX_CPT_AE_TYPES;
+ else
+ return BAD_OTX_CPTVF_TYPE;
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
+{
+ int vftype = 0;
+ struct otx_cpt_mbox mbx = {};
+ struct device *dev = &cpt->pdev->dev;
+ /*
+ * MBOX[0] contains msg
+ * MBOX[1] contains data
+ */
+ mbx.msg = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
+ mbx.data = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
+
+ dump_mbox_msg(&mbx, vf);
+
+ switch (mbx.msg) {
+ case OTX_CPT_MSG_VF_UP:
+ mbx.msg = OTX_CPT_MSG_VF_UP;
+ mbx.data = cpt->vfs_enabled;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_READY:
+ mbx.msg = OTX_CPT_MSG_READY;
+ mbx.data = vf;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_VF_DOWN:
+ /* First msg in VF teardown sequence */
+ otx_cpt_mbox_send_ack(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_QLEN:
+ otx_cpt_cfg_qlen_for_vf(cpt, vf, mbx.data);
+ otx_cpt_mbox_send_ack(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_QBIND_GRP:
+ vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
+ if ((vftype != OTX_CPT_AE_TYPES) &&
+ (vftype != OTX_CPT_SE_TYPES)) {
+ dev_err(dev, "VF%d binding to eng group %llu failed",
+ vf, mbx.data);
+ otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
+ } else {
+ mbx.msg = OTX_CPT_MSG_QBIND_GRP;
+ mbx.data = vftype;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ }
+ break;
+ case OTX_CPT_MSG_PF_TYPE:
+ mbx.msg = OTX_CPT_MSG_PF_TYPE;
+ mbx.data = cpt->pf_type;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_VQ_PRIORITY:
+ otx_cpt_cfg_vq_priority(cpt, vf, mbx.data);
+ otx_cpt_mbox_send_ack(cpt, vf, &mbx);
+ break;
+ default:
+ dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
+ vf, mbx.msg);
+ break;
+ }
+}
+
+void otx_cpt_mbox_intr_handler (struct otx_cpt_device *cpt, int mbx)
+{
+ u64 intr;
+ u8 vf;
+
+ intr = readq(cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
+ pr_debug("PF interrupt mbox%d mask 0x%llx\n", mbx, intr);
+ for (vf = 0; vf < cpt->max_vfs; vf++) {
+ if (intr & (1ULL << vf)) {
+ otx_cpt_handle_mbox_intr(cpt, vf);
+ otx_cpt_clear_mbox_intr(cpt, vf);
+ }
+ }
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
new file mode 100644
index 000000000000..d04baa319592
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -0,0 +1,1686 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include "otx_cpt_common.h"
+#include "otx_cptpf_ucode.h"
+#include "otx_cptpf.h"
+
+#define CSR_DELAY 30
+/* Tar archive defines */
+#define TAR_MAGIC "ustar"
+#define TAR_MAGIC_LEN 6
+#define TAR_BLOCK_LEN 512
+#define REGTYPE '0'
+#define AREGTYPE '\0'
+
+/* tar header as defined in POSIX 1003.1-1990. */
+struct tar_hdr_t {
+ char name[100];
+ char mode[8];
+ char uid[8];
+ char gid[8];
+ char size[12];
+ char mtime[12];
+ char chksum[8];
+ char typeflag;
+ char linkname[100];
+ char magic[6];
+ char version[2];
+ char uname[32];
+ char gname[32];
+ char devmajor[8];
+ char devminor[8];
+ char prefix[155];
+};
+
+struct tar_blk_t {
+ union {
+ struct tar_hdr_t hdr;
+ char block[TAR_BLOCK_LEN];
+ };
+};
+
+struct tar_arch_info_t {
+ struct list_head ucodes;
+ const struct firmware *fw;
+};
+
+static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ struct otx_cpt_bitmap bmap = { {0} };
+ bool found = false;
+ int i;
+
+ if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
+ dev_err(dev, "unsupported number of engines %d on octeontx",
+ eng_grp->g->engs_num);
+ return bmap;
+ }
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (eng_grp->engs[i].type) {
+ bitmap_or(bmap.bits, bmap.bits,
+ eng_grp->engs[i].bmap,
+ eng_grp->g->engs_num);
+ bmap.size = eng_grp->g->engs_num;
+ found = true;
+ }
+ }
+
+ if (!found)
+ dev_err(dev, "No engines reserved for engine group %d",
+ eng_grp->idx);
+ return bmap;
+}
+
+static int is_eng_type(int val, int eng_type)
+{
+ return val & (1 << eng_type);
+}
+
+static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
+ int eng_type)
+{
+ return is_eng_type(eng_grps->eng_types_supported, eng_type);
+}
+
+static void set_ucode_filename(struct otx_cpt_ucode *ucode,
+ const char *filename)
+{
+ strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
+}
+
+static char *get_eng_type_str(int eng_type)
+{
+ char *str = "unknown";
+
+ switch (eng_type) {
+ case OTX_CPT_SE_TYPES:
+ str = "SE";
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static char *get_ucode_type_str(int ucode_type)
+{
+ char *str = "unknown";
+
+ switch (ucode_type) {
+ case (1 << OTX_CPT_SE_TYPES):
+ str = "SE";
+ break;
+
+ case (1 << OTX_CPT_AE_TYPES):
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
+{
+ char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
+ u32 i, val = 0;
+ u8 nn;
+
+ strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ for (i = 0; i < strlen(tmp_ver_str); i++)
+ tmp_ver_str[i] = tolower(tmp_ver_str[i]);
+
+ nn = ucode_hdr->ver_num.nn;
+ if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
+ nn == OTX_CPT_SE_UC_TYPE3))
+ val |= 1 << OTX_CPT_SE_TYPES;
+ if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
+ nn == OTX_CPT_AE_UC_TYPE)
+ val |= 1 << OTX_CPT_AE_TYPES;
+
+ *ucode_type = val;
+
+ if (!val)
+ return -EINVAL;
+ if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
+ is_eng_type(val, OTX_CPT_SE_TYPES))
+ return -EINVAL;
+ return 0;
+}
+
+static int is_mem_zero(const char *ptr, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (ptr[i])
+ return 0;
+ }
+ return 1;
+}
+
+static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
+{
+ struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
+ dma_addr_t dma_addr;
+ struct otx_cpt_bitmap bmap;
+ int i;
+
+ bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ if (eng_grp->mirror.is_ena)
+ dma_addr =
+ eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
+ else
+ dma_addr = eng_grp->ucode[0].align_dma;
+
+ /*
+ * Set UCODE_BASE only for the cores which are not used,
+ * other cores should have already valid UCODE_BASE set
+ */
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ if (!eng_grp->g->eng_ref_cnt[i])
+ writeq((u64) dma_addr, cpt->reg_base +
+ OTX_CPT_PF_ENGX_UCODE_BASE(i));
+ return 0;
+}
+
+static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
+ struct otx_cpt_bitmap bmap = { {0} };
+ int timeout = 10;
+ int i, busy;
+ u64 reg;
+
+ bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Detach the cores from group */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (reg & (1ull << i)) {
+ eng_grp->g->eng_ref_cnt[i]--;
+ reg &= ~(1ull << i);
+ }
+ }
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ if (reg & (1ull << i)) {
+ busy = 1;
+ break;
+ }
+ } while (busy);
+
+ /* Disable the cores only if they are not used anymore */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ if (!eng_grp->g->eng_ref_cnt[i])
+ reg &= ~(1ull << i);
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+
+ return 0;
+}
+
+static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
+ struct otx_cpt_bitmap bmap;
+ u64 reg;
+ int i;
+
+ bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Attach the cores to the group */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (!(reg & (1ull << i))) {
+ eng_grp->g->eng_ref_cnt[i]++;
+ reg |= 1ull << i;
+ }
+ }
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+
+ /* Enable the cores */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ reg |= 1ull << i;
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+
+ return 0;
+}
+
+static int process_tar_file(struct device *dev,
+ struct tar_arch_info_t *tar_arch, char *filename,
+ const u8 *data, u32 size)
+{
+ struct tar_ucode_info_t *tar_info;
+ struct otx_cpt_ucode_hdr *ucode_hdr;
+ int ucode_type, ucode_size;
+
+ /*
+ * If size is less than microcode header size then don't report
+ * an error because it might not be microcode file, just process
+ * next file from archive
+ */
+ if (size < sizeof(struct otx_cpt_ucode_hdr))
+ return 0;
+
+ ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
+ /*
+ * If microcode version can't be found don't report an error
+ * because it might not be microcode file, just process next file
+ */
+ if (get_ucode_type(ucode_hdr, &ucode_type))
+ return 0;
+
+ ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode_size || (size < round_up(ucode_size, 16) +
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
+ dev_err(dev, "Ucode %s invalid size", filename);
+ return -EINVAL;
+ }
+
+ tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
+ if (!tar_info)
+ return -ENOMEM;
+
+ tar_info->ucode_ptr = data;
+ set_ucode_filename(&tar_info->ucode, filename);
+ memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
+ OTX_CPT_UCODE_VER_STR_SZ);
+ tar_info->ucode.ver_num = ucode_hdr->ver_num;
+ tar_info->ucode.type = ucode_type;
+ tar_info->ucode.size = ucode_size;
+ list_add_tail(&tar_info->list, &tar_arch->ucodes);
+
+ return 0;
+}
+
+static void release_tar_archive(struct tar_arch_info_t *tar_arch)
+{
+ struct tar_ucode_info_t *curr, *temp;
+
+ if (!tar_arch)
+ return;
+
+ list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
+ list_del(&curr->list);
+ kfree(curr);
+ }
+
+ if (tar_arch->fw)
+ release_firmware(tar_arch->fw);
+ kfree(tar_arch);
+}
+
+static struct tar_ucode_info_t *get_uc_from_tar_archive(
+ struct tar_arch_info_t *tar_arch,
+ int ucode_type)
+{
+ struct tar_ucode_info_t *curr, *uc_found = NULL;
+
+ list_for_each_entry(curr, &tar_arch->ucodes, list) {
+ if (!is_eng_type(curr->ucode.type, ucode_type))
+ continue;
+
+ if (!uc_found) {
+ uc_found = curr;
+ continue;
+ }
+
+ switch (ucode_type) {
+ case OTX_CPT_AE_TYPES:
+ break;
+
+ case OTX_CPT_SE_TYPES:
+ if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
+ (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
+ && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
+ uc_found = curr;
+ break;
+ }
+ }
+
+ return uc_found;
+}
+
+static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
+ char *tar_filename)
+{
+ struct tar_ucode_info_t *curr;
+
+ pr_debug("Tar archive filename %s", tar_filename);
+ pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data,
+ tar_arch->fw->size);
+ list_for_each_entry(curr, &tar_arch->ucodes, list) {
+ pr_debug("Ucode filename %s", curr->ucode.filename);
+ pr_debug("Ucode version string %s", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d",
+ curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
+ curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
+ pr_debug("Ucode type (%d) %s", curr->ucode.type,
+ get_ucode_type_str(curr->ucode.type));
+ pr_debug("Ucode size %d", curr->ucode.size);
+ pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
+ }
+}
+
+static struct tar_arch_info_t *load_tar_archive(struct device *dev,
+ char *tar_filename)
+{
+ struct tar_arch_info_t *tar_arch = NULL;
+ struct tar_blk_t *tar_blk;
+ unsigned int cur_size;
+ size_t tar_offs = 0;
+ size_t tar_size;
+ int ret;
+
+ tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
+ if (!tar_arch)
+ return NULL;
+
+ INIT_LIST_HEAD(&tar_arch->ucodes);
+
+ /* Load tar archive */
+ ret = request_firmware(&tar_arch->fw, tar_filename, dev);
+ if (ret)
+ goto release_tar_arch;
+
+ if (tar_arch->fw->size < TAR_BLOCK_LEN) {
+ dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ goto release_tar_arch;
+ }
+
+ tar_size = tar_arch->fw->size;
+ tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
+ if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
+ dev_err(dev, "Unsupported format of tar archive %s",
+ tar_filename);
+ goto release_tar_arch;
+ }
+
+ while (1) {
+ /* Read current file size */
+ ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
+ if (ret)
+ goto release_tar_arch;
+
+ if (tar_offs + cur_size > tar_size ||
+ tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
+ dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ goto release_tar_arch;
+ }
+
+ tar_offs += TAR_BLOCK_LEN;
+ if (tar_blk->hdr.typeflag == REGTYPE ||
+ tar_blk->hdr.typeflag == AREGTYPE) {
+ ret = process_tar_file(dev, tar_arch,
+ tar_blk->hdr.name,
+ &tar_arch->fw->data[tar_offs],
+ cur_size);
+ if (ret)
+ goto release_tar_arch;
+ }
+
+ tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
+ if (cur_size % TAR_BLOCK_LEN)
+ tar_offs += TAR_BLOCK_LEN;
+
+ /* Check for the end of the archive */
+ if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
+ dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ goto release_tar_arch;
+ }
+
+ if (is_mem_zero(&tar_arch->fw->data[tar_offs],
+ 2*TAR_BLOCK_LEN))
+ break;
+
+ /* Read next block from tar archive */
+ tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
+ }
+
+ print_tar_dbg_info(tar_arch, tar_filename);
+ return tar_arch;
+release_tar_arch:
+ release_tar_archive(tar_arch);
+ return NULL;
+}
+
+static struct otx_cpt_engs_rsvd *find_engines_by_type(
+ struct otx_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ int i;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ if (eng_grp->engs[i].type == eng_type)
+ return &eng_grp->engs[i];
+ }
+ return NULL;
+}
+
+int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
+{
+ return is_eng_type(ucode->type, eng_type);
+}
+EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
+
+int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ struct otx_cpt_engs_rsvd *engs;
+
+ engs = find_engines_by_type(eng_grp, eng_type);
+
+ return (engs != NULL ? 1 : 0);
+}
+EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
+
+static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
+ char *buf, int size)
+{
+ if (eng_grp->mirror.is_ena) {
+ scnprintf(buf, size, "%s (shared with engine_group%d)",
+ eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
+ eng_grp->mirror.idx);
+ } else {
+ scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
+ }
+}
+
+static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
+ char *buf, int size, int idx)
+{
+ struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
+ struct otx_cpt_engs_rsvd *engs;
+ int len, i;
+
+ buf[0] = '\0';
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (idx != -1 && idx != i)
+ continue;
+
+ if (eng_grp->mirror.is_ena)
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ if (i > 0 && idx == -1) {
+ len = strlen(buf);
+ scnprintf(buf+len, size-len, ", ");
+ }
+
+ len = strlen(buf);
+ scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
+ engs->count + mirrored_engs->count : engs->count,
+ get_eng_type_str(engs->type));
+ if (mirrored_engs) {
+ len = strlen(buf);
+ scnprintf(buf+len, size-len,
+ "(%d shared with engine_group%d) ",
+ engs->count <= 0 ? engs->count +
+ mirrored_engs->count : mirrored_engs->count,
+ eng_grp->mirror.idx);
+ }
+ }
+}
+
+static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
+{
+ pr_debug("Ucode info");
+ pr_debug("Ucode version string %s", ucode->ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn,
+ ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
+ pr_debug("Ucode type %s", get_ucode_type_str(ucode->type));
+ pr_debug("Ucode size %d", ucode->size);
+ pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va);
+ pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
+}
+
+static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
+ struct device *dev, char *buf, int size)
+{
+ struct otx_cpt_bitmap bmap;
+ u32 mask[2];
+
+ bmap = get_cores_bmap(dev, eng_grp);
+ if (!bmap.size) {
+ scnprintf(buf, size, "unknown");
+ return;
+ }
+ bitmap_to_arr32(mask, bmap.bits, bmap.size);
+ scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
+}
+
+
+static void print_dbg_info(struct device *dev,
+ struct otx_cpt_eng_grps *eng_grps)
+{
+ char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
+ struct otx_cpt_eng_grp_info *mirrored_grp;
+ char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
+ struct otx_cpt_eng_grp_info *grp;
+ struct otx_cpt_engs_rsvd *engs;
+ u32 mask[4];
+ int i, j;
+
+ pr_debug("Engine groups global info");
+ pr_debug("max SE %d, max AE %d",
+ eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
+ pr_debug("free SE %d", eng_grps->avail.se_cnt);
+ pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ pr_debug("engine_group%d, state %s", i, grp->is_enabled ?
+ "enabled" : "disabled");
+ if (grp->is_enabled) {
+ mirrored_grp = &eng_grps->grp[grp->mirror.idx];
+ pr_debug("Ucode0 filename %s, version %s",
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].filename :
+ grp->ucode[0].filename,
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].ver_str :
+ grp->ucode[0].ver_str);
+ }
+
+ for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
+ engs = &grp->engs[j];
+ if (engs->type) {
+ print_engs_info(grp, engs_info,
+ 2*OTX_CPT_UCODE_NAME_LENGTH, j);
+ pr_debug("Slot%d: %s", j, engs_info);
+ bitmap_to_arr32(mask, engs->bmap,
+ eng_grps->engs_num);
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ mask[3], mask[2], mask[1], mask[0]);
+ } else
+ pr_debug("Slot%d not used", j);
+ }
+ if (grp->is_enabled) {
+ cpt_print_engines_mask(grp, dev, engs_mask,
+ OTX_CPT_UCODE_NAME_LENGTH);
+ pr_debug("Cmask: %s", engs_mask);
+ }
+ }
+}
+
+static int update_engines_avail_count(struct device *dev,
+ struct otx_cpt_engs_available *avail,
+ struct otx_cpt_engs_rsvd *engs, int val)
+{
+ switch (engs->type) {
+ case OTX_CPT_SE_TYPES:
+ avail->se_cnt += val;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ avail->ae_cnt += val;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int update_engines_offset(struct device *dev,
+ struct otx_cpt_engs_available *avail,
+ struct otx_cpt_engs_rsvd *engs)
+{
+ switch (engs->type) {
+ case OTX_CPT_SE_TYPES:
+ engs->offset = 0;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ engs->offset = avail->max_se_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type)
+ continue;
+
+ if (grp->engs[i].count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail,
+ &grp->engs[i],
+ grp->engs[i].count);
+ if (ret)
+ return ret;
+ }
+
+ grp->engs[i].type = 0;
+ grp->engs[i].count = 0;
+ grp->engs[i].offset = 0;
+ grp->engs[i].ucode = NULL;
+ bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
+ }
+
+ return 0;
+}
+
+static int do_reserve_engines(struct device *dev,
+ struct otx_cpt_eng_grp_info *grp,
+ struct otx_cpt_engines *req_engs)
+{
+ struct otx_cpt_engs_rsvd *engs = NULL;
+ int i, ret;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type) {
+ engs = &grp->engs[i];
+ break;
+ }
+ }
+
+ if (!engs)
+ return -ENOMEM;
+
+ engs->type = req_engs->type;
+ engs->count = req_engs->count;
+
+ ret = update_engines_offset(dev, &grp->g->avail, engs);
+ if (ret)
+ return ret;
+
+ if (engs->count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail, engs,
+ -engs->count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int check_engines_availability(struct device *dev,
+ struct otx_cpt_eng_grp_info *grp,
+ struct otx_cpt_engines *req_eng)
+{
+ int avail_cnt = 0;
+
+ switch (req_eng->type) {
+ case OTX_CPT_SE_TYPES:
+ avail_cnt = grp->g->avail.se_cnt;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ avail_cnt = grp->g->avail.ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", req_eng->type);
+ return -EINVAL;
+ }
+
+ if (avail_cnt < req_eng->count) {
+ dev_err(dev,
+ "Error available %s engines %d < than requested %d",
+ get_eng_type_str(req_eng->type),
+ avail_cnt, req_eng->count);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
+ struct otx_cpt_engines *req_engs, int req_cnt)
+{
+ int i, ret;
+
+ /* Validate if a number of requested engines is available */
+ for (i = 0; i < req_cnt; i++) {
+ ret = check_engines_availability(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Reserve requested engines for this engine group */
+ for (i = 0; i < req_cnt; i++) {
+ ret = do_reserve_engines(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static ssize_t eng_grp_info_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
+ char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
+ char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
+ struct otx_cpt_eng_grp_info *eng_grp;
+ int ret;
+
+ eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
+ mutex_lock(&eng_grp->g->lock);
+
+ print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
+ print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
+ cpt_print_engines_mask(eng_grp, dev, engs_mask,
+ OTX_CPT_UCODE_NAME_LENGTH);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "Microcode : %s\nEngines: %s\nEngines mask: %s\n",
+ ucode_info, engs_info, engs_mask);
+
+ mutex_unlock(&eng_grp->g->lock);
+ return ret;
+}
+
+static int create_sysfs_eng_grps_info(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ int ret;
+
+ eng_grp->info_attr.show = eng_grp_info_show;
+ eng_grp->info_attr.store = NULL;
+ eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
+ eng_grp->info_attr.attr.mode = 0440;
+ sysfs_attr_init(&eng_grp->info_attr.attr);
+ ret = device_create_file(dev, &eng_grp->info_attr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
+{
+ if (ucode->va) {
+ dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
+ ucode->va, ucode->dma);
+ ucode->va = NULL;
+ ucode->align_va = NULL;
+ ucode->dma = 0;
+ ucode->align_dma = 0;
+ ucode->size = 0;
+ }
+
+ memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
+ memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
+ set_ucode_filename(ucode, "");
+ ucode->type = 0;
+}
+
+static int copy_ucode_to_dma_mem(struct device *dev,
+ struct otx_cpt_ucode *ucode,
+ const u8 *ucode_data)
+{
+ u32 i;
+
+ /* Allocate DMAable space */
+ ucode->va = dma_alloc_coherent(dev, ucode->size +
+ OTX_CPT_UCODE_ALIGNMENT,
+ &ucode->dma, GFP_KERNEL);
+ if (!ucode->va) {
+ dev_err(dev, "Unable to allocate space for microcode");
+ return -ENOMEM;
+ }
+ ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
+ ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
+
+ memcpy((void *) ucode->align_va, (void *) ucode_data +
+ sizeof(struct otx_cpt_ucode_hdr), ucode->size);
+
+ /* Byte swap 64-bit */
+ for (i = 0; i < (ucode->size / 8); i++)
+ ((u64 *)ucode->align_va)[i] =
+ cpu_to_be64(((u64 *)ucode->align_va)[i]);
+ /* Ucode needs 16-bit swap */
+ for (i = 0; i < (ucode->size / 2); i++)
+ ((u16 *)ucode->align_va)[i] =
+ cpu_to_be16(((u16 *)ucode->align_va)[i]);
+ return 0;
+}
+
+static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
+ const char *ucode_filename)
+{
+ struct otx_cpt_ucode_hdr *ucode_hdr;
+ const struct firmware *fw;
+ int ret;
+
+ set_ucode_filename(ucode, ucode_filename);
+ ret = request_firmware(&fw, ucode->filename, dev);
+ if (ret)
+ return ret;
+
+ ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
+ memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ ucode->ver_num = ucode_hdr->ver_num;
+ ucode->size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
+ dev_err(dev, "Ucode %s invalid size", ucode_filename);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = get_ucode_type(ucode_hdr, &ucode->type);
+ if (ret) {
+ dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename,
+ ucode->type);
+ goto release_fw;
+ }
+
+ ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
+ if (ret)
+ goto release_fw;
+
+ print_ucode_dbg_info(ucode);
+release_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int ret;
+
+ ret = cpt_set_ucode_base(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ ret = cpt_attach_and_enable_cores(eng_grp, obj);
+ return ret;
+}
+
+static int disable_eng_grp(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int i, ret;
+
+ ret = cpt_detach_and_disable_cores(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Unload ucode used by this engine group */
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ eng_grp->engs[i].ucode = &eng_grp->ucode[0];
+ }
+
+ ret = cpt_set_ucode_base(eng_grp, obj);
+
+ return ret;
+}
+
+static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
+ struct otx_cpt_eng_grp_info *src_grp)
+{
+ /* Setup fields for engine group which is mirrored */
+ src_grp->mirror.is_ena = false;
+ src_grp->mirror.idx = 0;
+ src_grp->mirror.ref_count++;
+
+ /* Setup fields for mirroring engine group */
+ dst_grp->mirror.is_ena = true;
+ dst_grp->mirror.idx = src_grp->idx;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
+{
+ struct otx_cpt_eng_grp_info *src_grp;
+
+ if (!dst_grp->mirror.is_ena)
+ return;
+
+ src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
+
+ src_grp->mirror.ref_count--;
+ dst_grp->mirror.is_ena = false;
+ dst_grp->mirror.idx = 0;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
+ struct otx_cpt_engines *engs, int engs_cnt)
+{
+ struct otx_cpt_engs_rsvd *mirrored_engs;
+ int i;
+
+ for (i = 0; i < engs_cnt; i++) {
+ mirrored_engs = find_engines_by_type(mirrored_eng_grp,
+ engs[i].type);
+ if (!mirrored_engs)
+ continue;
+
+ /*
+ * If mirrored group has this type of engines attached then
+ * there are 3 scenarios possible:
+ * 1) mirrored_engs.count == engs[i].count then all engines
+ * from mirrored engine group will be shared with this engine
+ * group
+ * 2) mirrored_engs.count > engs[i].count then only a subset of
+ * engines from mirrored engine group will be shared with this
+ * engine group
+ * 3) mirrored_engs.count < engs[i].count then all engines
+ * from mirrored engine group will be shared with this group
+ * and additional engines will be reserved for exclusively use
+ * by this engine group
+ */
+ engs[i].count -= mirrored_engs->count;
+ }
+}
+
+static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
+ struct otx_cpt_eng_grp_info *grp)
+{
+ struct otx_cpt_eng_grps *eng_grps = grp->g;
+ int i;
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ continue;
+ if (eng_grps->grp[i].ucode[0].type)
+ continue;
+ if (grp->idx == i)
+ continue;
+ if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
+ grp->ucode[0].ver_str,
+ OTX_CPT_UCODE_VER_STR_SZ))
+ return &eng_grps->grp[i];
+ }
+
+ return NULL;
+}
+
+static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
+ struct otx_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ return &eng_grps->grp[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_update_masks(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
+ struct otx_cpt_bitmap tmp_bmap = { {0} };
+ int i, j, cnt, max_cnt;
+ int bit;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (engs->count <= 0)
+ continue;
+
+ switch (engs->type) {
+ case OTX_CPT_SE_TYPES:
+ max_cnt = eng_grp->g->avail.max_se_cnt;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d", engs->type);
+ return -EINVAL;
+ }
+
+ cnt = engs->count;
+ WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
+ bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
+ for (j = engs->offset; j < engs->offset + max_cnt; j++) {
+ if (!eng_grp->g->eng_ref_cnt[j]) {
+ bitmap_set(tmp_bmap.bits, j, 1);
+ cnt--;
+ if (!cnt)
+ break;
+ }
+ }
+
+ if (cnt)
+ return -ENOSPC;
+
+ bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
+ }
+
+ if (!eng_grp->mirror.is_ena)
+ return 0;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ WARN_ON(!mirrored_engs && engs->count <= 0);
+ if (!mirrored_engs)
+ continue;
+
+ bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ if (engs->count < 0) {
+ bit = find_first_bit(mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ bitmap_clear(tmp_bmap.bits, bit, -engs->count);
+ }
+ bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
+ eng_grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int delete_engine_group(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ int i, ret;
+
+ if (!eng_grp->is_enabled)
+ return -EINVAL;
+
+ if (eng_grp->mirror.ref_count) {
+ dev_err(dev, "Can't delete engine_group%d as it is used by:",
+ eng_grp->idx);
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (eng_grp->g->grp[i].mirror.is_ena &&
+ eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
+ dev_err(dev, "engine_group%d", i);
+ }
+ return -EINVAL;
+ }
+
+ /* Removing engine group mirroring if enabled */
+ remove_eng_grp_mirroring(eng_grp);
+
+ /* Disable engine group */
+ ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
+ if (ret)
+ return ret;
+
+ /* Release all engines held by this engine group */
+ ret = release_engines(dev, eng_grp);
+ if (ret)
+ return ret;
+
+ device_remove_file(dev, &eng_grp->info_attr);
+ eng_grp->is_enabled = false;
+
+ return 0;
+}
+
+static int validate_1_ucode_scenario(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp,
+ struct otx_cpt_engines *engs, int engs_cnt)
+{
+ int i;
+
+ /* Verify that ucode loaded supports requested engine types */
+ for (i = 0; i < engs_cnt; i++) {
+ if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
+ engs[i].type)) {
+ dev_err(dev,
+ "Microcode %s does not support %s engines",
+ eng_grp->ucode[0].filename,
+ get_eng_type_str(engs[i].type));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
+{
+ struct otx_cpt_ucode *ucode;
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+ WARN_ON(!eng_grp->engs[0].type);
+ eng_grp->engs[0].ucode = ucode;
+}
+
+static int create_engine_group(struct device *dev,
+ struct otx_cpt_eng_grps *eng_grps,
+ struct otx_cpt_engines *engs, int engs_cnt,
+ void *ucode_data[], int ucodes_cnt,
+ bool use_uc_from_tar_arch)
+{
+ struct otx_cpt_eng_grp_info *mirrored_eng_grp;
+ struct tar_ucode_info_t *tar_info;
+ struct otx_cpt_eng_grp_info *eng_grp;
+ int i, ret = 0;
+
+ if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
+ return -EINVAL;
+
+ /* Validate if requested engine types are supported by this device */
+ for (i = 0; i < engs_cnt; i++)
+ if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
+ dev_err(dev, "Device does not support %s engines",
+ get_eng_type_str(engs[i].type));
+ return -EPERM;
+ }
+
+ /* Find engine group which is not used */
+ eng_grp = find_unused_eng_grp(eng_grps);
+ if (!eng_grp) {
+ dev_err(dev, "Error all engine groups are being used");
+ return -ENOSPC;
+ }
+
+ /* Load ucode */
+ for (i = 0; i < ucodes_cnt; i++) {
+ if (use_uc_from_tar_arch) {
+ tar_info = (struct tar_ucode_info_t *) ucode_data[i];
+ eng_grp->ucode[i] = tar_info->ucode;
+ ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
+ tar_info->ucode_ptr);
+ } else
+ ret = ucode_load(dev, &eng_grp->ucode[i],
+ (char *) ucode_data[i]);
+ if (ret)
+ goto err_ucode_unload;
+ }
+
+ /* Validate scenario where 1 ucode is used */
+ ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
+ if (ret)
+ goto err_ucode_unload;
+
+ /* Check if this group mirrors another existing engine group */
+ mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
+ if (mirrored_eng_grp) {
+ /* Setup mirroring */
+ setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
+
+ /*
+ * Update count of requested engines because some
+ * of them might be shared with mirrored group
+ */
+ update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
+ }
+
+ /* Reserve engines */
+ ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
+ if (ret)
+ goto err_ucode_unload;
+
+ /* Update ucode pointers used by engines */
+ update_ucode_ptrs(eng_grp);
+
+ /* Update engine masks used by this group */
+ ret = eng_grp_update_masks(dev, eng_grp);
+ if (ret)
+ goto err_release_engs;
+
+ /* Create sysfs entry for engine group info */
+ ret = create_sysfs_eng_grps_info(dev, eng_grp);
+ if (ret)
+ goto err_release_engs;
+
+ /* Enable engine group */
+ ret = enable_eng_grp(eng_grp, eng_grps->obj);
+ if (ret)
+ goto err_release_engs;
+
+ /*
+ * If this engine group mirrors another engine group
+ * then we need to unload ucode as we will use ucode
+ * from mirrored engine group
+ */
+ if (eng_grp->mirror.is_ena)
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ eng_grp->is_enabled = true;
+ if (eng_grp->mirror.is_ena)
+ dev_info(dev,
+ "Engine_group%d: reuse microcode %s from group %d",
+ eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
+ mirrored_eng_grp->idx);
+ else
+ dev_info(dev, "Engine_group%d: microcode loaded %s",
+ eng_grp->idx, eng_grp->ucode[0].ver_str);
+
+ return 0;
+
+err_release_engs:
+ release_engines(dev, eng_grp);
+err_ucode_unload:
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ return ret;
+}
+
+static ssize_t ucode_load_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
+ char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
+ char *start, *val, *err_msg, *tmp;
+ struct otx_cpt_eng_grps *eng_grps;
+ int grp_idx = 0, ret = -EINVAL;
+ bool has_se, has_ie, has_ae;
+ int del_grp_idx = -1;
+ int ucode_idx = 0;
+
+ if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
+ return -EINVAL;
+
+ eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
+ err_msg = "Invalid engine group format";
+ strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
+ start = tmp_buf;
+
+ has_se = has_ie = has_ae = false;
+
+ for (;;) {
+ val = strsep(&start, ";");
+ if (!val)
+ break;
+ val = strim(val);
+ if (!*val)
+ continue;
+
+ if (!strncasecmp(val, "engine_group", 12)) {
+ if (del_grp_idx != -1)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 13)
+ goto err_print;
+ if (kstrtoint((tmp + 12), 10, &del_grp_idx))
+ goto err_print;
+ val = strim(val);
+ if (strncasecmp(val, "null", 4))
+ goto err_print;
+ if (strlen(val) != 4)
+ goto err_print;
+ } else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
+ if (has_se || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX_CPT_SE_TYPES;
+ has_se = true;
+ } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
+ if (has_ae || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX_CPT_AE_TYPES;
+ has_ae = true;
+ } else {
+ if (ucode_idx > 1)
+ goto err_print;
+ if (!strlen(val))
+ goto err_print;
+ if (strnstr(val, " ", strlen(val)))
+ goto err_print;
+ ucode_filename[ucode_idx++] = val;
+ }
+ }
+
+ /* Validate input parameters */
+ if (del_grp_idx == -1) {
+ if (!(grp_idx && ucode_idx))
+ goto err_print;
+
+ if (ucode_idx > 1 && grp_idx < 2)
+ goto err_print;
+
+ if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
+ err_msg = "Error max 2 engine types can be attached";
+ goto err_print;
+ }
+
+ } else {
+ if (del_grp_idx < 0 ||
+ del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Invalid engine group index %d",
+ del_grp_idx);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (!eng_grps->grp[del_grp_idx].is_enabled) {
+ dev_err(dev, "Error engine_group%d is not configured",
+ del_grp_idx);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (grp_idx || ucode_idx)
+ goto err_print;
+ }
+
+ mutex_lock(&eng_grps->lock);
+
+ if (eng_grps->is_rdonly) {
+ dev_err(dev, "Disable VFs before modifying engine groups\n");
+ ret = -EACCES;
+ goto err_unlock;
+ }
+
+ if (del_grp_idx == -1)
+ /* create engine group */
+ ret = create_engine_group(dev, eng_grps, engs, grp_idx,
+ (void **) ucode_filename,
+ ucode_idx, false);
+ else
+ /* delete engine group */
+ ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
+ if (ret)
+ goto err_unlock;
+
+ print_dbg_info(dev, eng_grps);
+err_unlock:
+ mutex_unlock(&eng_grps->lock);
+ return ret ? ret : count;
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+
+ return ret;
+}
+
+int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps,
+ int pf_type)
+{
+ struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 };
+ struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct tar_arch_info_t *tar_arch = NULL;
+ char *tar_filename;
+ int i, ret = 0;
+
+ mutex_lock(&eng_grps->lock);
+
+ /*
+ * We don't create engine group for kernel crypto if attempt to create
+ * it was already made (when user enabled VFs for the first time)
+ */
+ if (eng_grps->is_first_try)
+ goto unlock_mutex;
+ eng_grps->is_first_try = true;
+
+ /* We create group for kcrypto only if no groups are configured */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].is_enabled)
+ goto unlock_mutex;
+
+ switch (pf_type) {
+ case OTX_CPT_AE:
+ case OTX_CPT_SE:
+ tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ tar_arch = load_tar_archive(&pdev->dev, tar_filename);
+ if (!tar_arch)
+ goto unlock_mutex;
+
+ /*
+ * If device supports SE engines and there is SE microcode in tar
+ * archive try to create engine group with SE engines for kernel
+ * crypto functionality (symmetric crypto)
+ */
+ tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
+ if (tar_info[0] &&
+ dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
+
+ engs[0].type = OTX_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) tar_info, 1, true);
+ if (ret)
+ goto release_tar_arch;
+ }
+ /*
+ * If device supports AE engines and there is AE microcode in tar
+ * archive try to create engine group with AE engines for asymmetric
+ * crypto functionality.
+ */
+ tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
+ if (tar_info[0] &&
+ dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
+
+ engs[0].type = OTX_CPT_AE_TYPES;
+ engs[0].count = eng_grps->avail.max_ae_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) tar_info, 1, true);
+ if (ret)
+ goto release_tar_arch;
+ }
+
+ print_dbg_info(&pdev->dev, eng_grps);
+release_tar_arch:
+ release_tar_archive(tar_arch);
+unlock_mutex:
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+}
+
+void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
+ bool is_rdonly)
+{
+ mutex_lock(&eng_grps->lock);
+
+ eng_grps->is_rdonly = is_rdonly;
+
+ mutex_unlock(&eng_grps->lock);
+}
+
+void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
+{
+ int grp, timeout = 100;
+ u64 reg;
+
+ /* Disengage the cores from groups */
+ for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
+ writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
+ udelay(CSR_DELAY);
+ }
+
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
+ while (reg) {
+ udelay(CSR_DELAY);
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
+ if (timeout--) {
+ dev_warn(&cpt->pdev->dev, "Cores still busy");
+ break;
+ }
+ }
+
+ /* Disable the cores */
+ writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+}
+
+void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps)
+{
+ struct otx_cpt_eng_grp_info *grp;
+ int i, j;
+
+ mutex_lock(&eng_grps->lock);
+ if (eng_grps->is_ucode_load_created) {
+ device_remove_file(&pdev->dev,
+ &eng_grps->ucode_load_attr);
+ eng_grps->is_ucode_load_created = false;
+ }
+
+ /* First delete all mirroring engine groups */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].mirror.is_ena)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Delete remaining engine groups */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Release memory */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
+ kfree(grp->engs[j].bmap);
+ grp->engs[j].bmap = NULL;
+ }
+ }
+
+ mutex_unlock(&eng_grps->lock);
+}
+
+int otx_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps, int pf_type)
+{
+ struct otx_cpt_eng_grp_info *grp;
+ int i, j, ret = 0;
+
+ mutex_init(&eng_grps->lock);
+ eng_grps->obj = pci_get_drvdata(pdev);
+ eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
+ eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
+
+ eng_grps->engs_num = eng_grps->avail.max_se_cnt +
+ eng_grps->avail.max_ae_cnt;
+ if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
+ dev_err(&pdev->dev,
+ "Number of engines %d > than max supported %d",
+ eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ grp->g = eng_grps;
+ grp->idx = i;
+
+ snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
+ "engine_group%d", i);
+ for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
+ grp->engs[j].bmap =
+ kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
+ sizeof(long), GFP_KERNEL);
+ if (!grp->engs[j].bmap) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+ }
+
+ switch (pf_type) {
+ case OTX_CPT_SE:
+ /* OcteonTX 83XX SE CPT PF has only SE engines attached */
+ eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
+ break;
+
+ case OTX_CPT_AE:
+ /* OcteonTX 83XX AE CPT PF has only AE engines attached */
+ eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ eng_grps->ucode_load_attr.show = NULL;
+ eng_grps->ucode_load_attr.store = ucode_load_store;
+ eng_grps->ucode_load_attr.attr.name = "ucode_load";
+ eng_grps->ucode_load_attr.attr.mode = 0220;
+ sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
+ ret = device_create_file(&pdev->dev,
+ &eng_grps->ucode_load_attr);
+ if (ret)
+ goto err;
+ eng_grps->is_ucode_load_created = true;
+
+ print_dbg_info(&pdev->dev, eng_grps);
+ return ret;
+err:
+ otx_cpt_cleanup_eng_grps(pdev, eng_grps);
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
new file mode 100644
index 000000000000..14f02b60d0c2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTPF_UCODE_H
+#define __OTX_CPTPF_UCODE_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include "otx_cpt_hw_types.h"
+
+/* CPT ucode name maximum length */
+#define OTX_CPT_UCODE_NAME_LENGTH 64
+/*
+ * On OcteonTX 83xx platform, only one type of engines is allowed to be
+ * attached to an engine group.
+ */
+#define OTX_CPT_MAX_ETYPES_PER_GRP 1
+
+/* Default tar archive file names */
+#define OTX_CPT_UCODE_TAR_FILE_NAME "cpt8x-mc.tar"
+
+/* CPT ucode alignment */
+#define OTX_CPT_UCODE_ALIGNMENT 128
+
+/* CPT ucode signature size */
+#define OTX_CPT_UCODE_SIGN_LEN 256
+
+/* Microcode version string length */
+#define OTX_CPT_UCODE_VER_STR_SZ 44
+
+/* Maximum number of supported engines/cores on OcteonTX 83XX platform */
+#define OTX_CPT_MAX_ENGINES 64
+
+#define OTX_CPT_ENGS_BITMASK_LEN (OTX_CPT_MAX_ENGINES/(BITS_PER_BYTE * \
+ sizeof(unsigned long)))
+
+/* Microcode types */
+enum otx_cpt_ucode_type {
+ OTX_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
+ OTX_CPT_SE_UC_TYPE1 = 20, /* SE-MAIN - combination of 21 and 22 */
+ OTX_CPT_SE_UC_TYPE2 = 21, /* Fast Path IPSec + AirCrypto */
+ OTX_CPT_SE_UC_TYPE3 = 22, /*
+ * Hash + HMAC + FlexiCrypto + RNG + Full
+ * Feature IPSec + AirCrypto + Kasumi
+ */
+};
+
+struct otx_cpt_bitmap {
+ unsigned long bits[OTX_CPT_ENGS_BITMASK_LEN];
+ int size;
+};
+
+struct otx_cpt_engines {
+ int type;
+ int count;
+};
+
+/* Microcode version number */
+struct otx_cpt_ucode_ver_num {
+ u8 nn;
+ u8 xx;
+ u8 yy;
+ u8 zz;
+};
+
+struct otx_cpt_ucode_hdr {
+ struct otx_cpt_ucode_ver_num ver_num;
+ u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];
+ u32 code_length;
+ u32 padding[3];
+};
+
+struct otx_cpt_ucode {
+ u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];/*
+ * ucode version in readable format
+ */
+ struct otx_cpt_ucode_ver_num ver_num;/* ucode version number */
+ char filename[OTX_CPT_UCODE_NAME_LENGTH]; /* ucode filename */
+ dma_addr_t dma; /* phys address of ucode image */
+ dma_addr_t align_dma; /* aligned phys address of ucode image */
+ void *va; /* virt address of ucode image */
+ void *align_va; /* aligned virt address of ucode image */
+ u32 size; /* ucode image size */
+ int type; /* ucode image type SE or AE */
+};
+
+struct tar_ucode_info_t {
+ struct list_head list;
+ struct otx_cpt_ucode ucode;/* microcode information */
+ const u8 *ucode_ptr; /* pointer to microcode in tar archive */
+};
+
+/* Maximum and current number of engines available for all engine groups */
+struct otx_cpt_engs_available {
+ int max_se_cnt;
+ int max_ae_cnt;
+ int se_cnt;
+ int ae_cnt;
+};
+
+/* Engines reserved to an engine group */
+struct otx_cpt_engs_rsvd {
+ int type; /* engine type */
+ int count; /* number of engines attached */
+ int offset; /* constant offset of engine type in the bitmap */
+ unsigned long *bmap; /* attached engines bitmap */
+ struct otx_cpt_ucode *ucode; /* ucode used by these engines */
+};
+
+struct otx_cpt_mirror_info {
+ int is_ena; /*
+ * is mirroring enabled, it is set only for engine
+ * group which mirrors another engine group
+ */
+ int idx; /*
+ * index of engine group which is mirrored by this
+ * group, set only for engine group which mirrors
+ * another group
+ */
+ int ref_count; /*
+ * number of times this engine group is mirrored by
+ * other groups, this is set only for engine group
+ * which is mirrored by other group(s)
+ */
+};
+
+struct otx_cpt_eng_grp_info {
+ struct otx_cpt_eng_grps *g; /* pointer to engine_groups structure */
+ struct device_attribute info_attr; /* group info entry attr */
+ /* engines attached */
+ struct otx_cpt_engs_rsvd engs[OTX_CPT_MAX_ETYPES_PER_GRP];
+ /* Microcode information */
+ struct otx_cpt_ucode ucode[OTX_CPT_MAX_ETYPES_PER_GRP];
+ /* sysfs info entry name */
+ char sysfs_info_name[OTX_CPT_UCODE_NAME_LENGTH];
+ /* engine group mirroring information */
+ struct otx_cpt_mirror_info mirror;
+ int idx; /* engine group index */
+ bool is_enabled; /*
+ * is engine group enabled, engine group is enabled
+ * when it has engines attached and ucode loaded
+ */
+};
+
+struct otx_cpt_eng_grps {
+ struct otx_cpt_eng_grp_info grp[OTX_CPT_MAX_ENGINE_GROUPS];
+ struct device_attribute ucode_load_attr;/* ucode load attr */
+ struct otx_cpt_engs_available avail;
+ struct mutex lock;
+ void *obj;
+ int engs_num; /* total number of engines supported */
+ int eng_types_supported; /* engine types supported SE, AE */
+ u8 eng_ref_cnt[OTX_CPT_MAX_ENGINES];/* engines reference count */
+ bool is_ucode_load_created; /* is ucode_load sysfs entry created */
+ bool is_first_try; /* is this first try to create kcrypto engine grp */
+ bool is_rdonly; /* do engine groups configuration can be modified */
+};
+
+int otx_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps, int pf_type);
+void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps);
+int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps,
+ int pf_type);
+void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
+ bool is_rdonly);
+int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type);
+int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
+ int eng_type);
+
+#endif /* __OTX_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf.h b/drivers/crypto/marvell/octeontx/otx_cptvf.h
new file mode 100644
index 000000000000..dd02f21659af
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTVF_H
+#define __OTX_CPTVF_H
+
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include "otx_cpt_common.h"
+#include "otx_cptvf_reqmgr.h"
+
+/* Flags to indicate the features supported */
+#define OTX_CPT_FLAG_DEVICE_READY BIT(1)
+#define otx_cpt_device_ready(cpt) ((cpt)->flags & OTX_CPT_FLAG_DEVICE_READY)
+/* Default command queue length */
+#define OTX_CPT_CMD_QLEN (4*2046)
+#define OTX_CPT_CMD_QCHUNK_SIZE 1023
+#define OTX_CPT_NUM_QS_PER_VF 1
+
+struct otx_cpt_cmd_chunk {
+ u8 *head;
+ dma_addr_t dma_addr;
+ u32 size; /* Chunk size, max OTX_CPT_INST_CHUNK_MAX_SIZE */
+ struct list_head nextchunk;
+};
+
+struct otx_cpt_cmd_queue {
+ u32 idx; /* Command queue host write idx */
+ u32 num_chunks; /* Number of command chunks */
+ struct otx_cpt_cmd_chunk *qhead;/*
+ * Command queue head, instructions
+ * are inserted here
+ */
+ struct otx_cpt_cmd_chunk *base;
+ struct list_head chead;
+};
+
+struct otx_cpt_cmd_qinfo {
+ u32 qchunksize; /* Command queue chunk size */
+ struct otx_cpt_cmd_queue queue[OTX_CPT_NUM_QS_PER_VF];
+};
+
+struct otx_cpt_pending_qinfo {
+ u32 num_queues; /* Number of queues supported */
+ struct otx_cpt_pending_queue queue[OTX_CPT_NUM_QS_PER_VF];
+};
+
+#define for_each_pending_queue(qinfo, q, i) \
+ for (i = 0, q = &qinfo->queue[i]; i < qinfo->num_queues; i++, \
+ q = &qinfo->queue[i])
+
+struct otx_cptvf_wqe {
+ struct tasklet_struct twork;
+ struct otx_cptvf *cptvf;
+};
+
+struct otx_cptvf_wqe_info {
+ struct otx_cptvf_wqe vq_wqe[OTX_CPT_NUM_QS_PER_VF];
+};
+
+struct otx_cptvf {
+ u16 flags; /* Flags to hold device status bits */
+ u8 vfid; /* Device Index 0...OTX_CPT_MAX_VF_NUM */
+ u8 num_vfs; /* Number of enabled VFs */
+ u8 vftype; /* VF type of SE_TYPE(2) or AE_TYPE(1) */
+ u8 vfgrp; /* VF group (0 - 8) */
+ u8 node; /* Operating node: Bits (46:44) in BAR0 address */
+ u8 priority; /*
+ * VF priority ring: 1-High proirity round
+ * robin ring;0-Low priority round robin ring;
+ */
+ struct pci_dev *pdev; /* Pci device handle */
+ void __iomem *reg_base; /* Register start address */
+ void *wqe_info; /* BH worker info */
+ /* MSI-X */
+ cpumask_var_t affinity_mask[OTX_CPT_VF_MSIX_VECTORS];
+ /* Command and Pending queues */
+ u32 qsize;
+ u32 num_queues;
+ struct otx_cpt_cmd_qinfo cqinfo; /* Command queue information */
+ struct otx_cpt_pending_qinfo pqinfo; /* Pending queue information */
+ /* VF-PF mailbox communication */
+ bool pf_acked;
+ bool pf_nacked;
+};
+
+int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf);
+int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf);
+int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group);
+int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf);
+int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf);
+int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf);
+void otx_cptvf_handle_mbox_intr(struct otx_cptvf *cptvf);
+void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val);
+
+#endif /* __OTX_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
new file mode 100644
index 000000000000..06202bcffb33
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -0,0 +1,1746 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/cryptd.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+#include <linux/rtnetlink.h>
+#include <linux/sort.h>
+#include <linux/module.h>
+#include "otx_cptvf.h"
+#include "otx_cptvf_algs.h"
+#include "otx_cptvf_reqmgr.h"
+
+#define CPT_MAX_VF_NUM 64
+/* Size of salt in AES GCM mode */
+#define AES_GCM_SALT_SIZE 4
+/* Size of IV in AES GCM mode */
+#define AES_GCM_IV_SIZE 8
+/* Size of ICV (Integrity Check Value) in AES GCM mode */
+#define AES_GCM_ICV_SIZE 16
+/* Offset of IV in AES GCM mode */
+#define AES_GCM_IV_OFFSET 8
+#define CONTROL_WORD_LEN 8
+#define KEY2_OFFSET 48
+#define DMA_MODE_FLAG(dma_mode) \
+ (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
+
+/* Truncated SHA digest size */
+#define SHA1_TRUNC_DIGEST_SIZE 12
+#define SHA256_TRUNC_DIGEST_SIZE 16
+#define SHA384_TRUNC_DIGEST_SIZE 24
+#define SHA512_TRUNC_DIGEST_SIZE 32
+
+static DEFINE_MUTEX(mutex);
+static int is_crypto_registered;
+
+struct cpt_device_desc {
+ enum otx_cptpf_type pf_type;
+ struct pci_dev *dev;
+ int num_queues;
+};
+
+struct cpt_device_table {
+ atomic_t count;
+ struct cpt_device_desc desc[CPT_MAX_VF_NUM];
+};
+
+static struct cpt_device_table se_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static struct cpt_device_table ae_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+{
+ int count, ret = 0;
+
+ count = atomic_read(&se_devices.count);
+ if (count < 1)
+ return -ENODEV;
+
+ *cpu_num = get_cpu();
+
+ if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
+ /*
+ * On OcteonTX platform there is one CPT instruction queue bound
+ * to each VF. We get maximum performance if one CPT queue
+ * is available for each cpu otherwise CPT queues need to be
+ * shared between cpus.
+ */
+ if (*cpu_num >= count)
+ *cpu_num %= count;
+ *pdev = se_devices.desc[*cpu_num].dev;
+ } else {
+ pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
+ ret = -EINVAL;
+ }
+ put_cpu();
+
+ return ret;
+}
+
+static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
+{
+ struct otx_cpt_req_ctx *rctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+
+ req = container_of(cpt_req->areq, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ if (memcmp(rctx->fctx.hmac.s.hmac_calc,
+ rctx->fctx.hmac.s.hmac_recv,
+ crypto_aead_authsize(tfm)) != 0)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
+{
+ struct otx_cpt_info_buffer *cpt_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct otx_cpt_req_info *cpt_req;
+ struct pci_dev *pdev;
+
+ cpt_req = cpt_info->req;
+ if (!status) {
+ /*
+ * When selected cipher is NULL we need to manually
+ * verify whether calculated hmac value matches
+ * received hmac value
+ */
+ if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
+ !cpt_req->is_enc)
+ status = validate_hmac_cipher_null(cpt_req);
+ }
+ if (cpt_info) {
+ pdev = cpt_info->pdev;
+ do_request_cleanup(pdev, cpt_info);
+ }
+ if (areq)
+ areq->complete(areq, status);
+}
+
+static void output_iv_copyback(struct crypto_async_request *areq)
+{
+ struct otx_cpt_req_info *req_info;
+ struct skcipher_request *sreq;
+ struct crypto_skcipher *stfm;
+ struct otx_cpt_req_ctx *rctx;
+ struct otx_cpt_enc_ctx *ctx;
+ u32 start, ivsize;
+
+ sreq = container_of(areq, struct skcipher_request, base);
+ stfm = crypto_skcipher_reqtfm(sreq);
+ ctx = crypto_skcipher_ctx(stfm);
+ if (ctx->cipher_type == OTX_CPT_AES_CBC ||
+ ctx->cipher_type == OTX_CPT_DES3_CBC) {
+ rctx = skcipher_request_ctx(sreq);
+ req_info = &rctx->cpt_req;
+ ivsize = crypto_skcipher_ivsize(stfm);
+ start = sreq->cryptlen - ivsize;
+
+ if (req_info->is_enc) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
+ ivsize, 0);
+ } else {
+ if (sreq->src != sreq->dst) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->src,
+ start, ivsize, 0);
+ } else {
+ memcpy(sreq->iv, req_info->iv_out, ivsize);
+ kfree(req_info->iv_out);
+ }
+ }
+ }
+}
+
+static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
+{
+ struct otx_cpt_info_buffer *cpt_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct pci_dev *pdev;
+
+ if (areq) {
+ if (!status)
+ output_iv_copyback(areq);
+ if (cpt_info) {
+ pdev = cpt_info->pdev;
+ do_request_cleanup(pdev, cpt_info);
+ }
+ areq->complete(areq, status);
+ }
+}
+
+static inline void update_input_data(struct otx_cpt_req_info *req_info,
+ struct scatterlist *inp_sg,
+ u32 nbytes, u32 *argcnt)
+{
+ req_info->req.dlen += nbytes;
+
+ while (nbytes) {
+ u32 len = min(nbytes, inp_sg->length);
+ u8 *ptr = sg_virt(inp_sg);
+
+ req_info->in[*argcnt].vptr = (void *)ptr;
+ req_info->in[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ inp_sg = sg_next(inp_sg);
+ }
+}
+
+static inline void update_output_data(struct otx_cpt_req_info *req_info,
+ struct scatterlist *outp_sg,
+ u32 offset, u32 nbytes, u32 *argcnt)
+{
+ req_info->rlen += nbytes;
+
+ while (nbytes) {
+ u32 len = min(nbytes, outp_sg->length - offset);
+ u8 *ptr = sg_virt(outp_sg);
+
+ req_info->out[*argcnt].vptr = (void *) (ptr + offset);
+ req_info->out[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ offset = 0;
+ outp_sg = sg_next(outp_sg);
+ }
+}
+
+static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
+ int ivsize = crypto_skcipher_ivsize(stfm);
+ u32 start = req->cryptlen - ivsize;
+ u64 *ctrl_flags = NULL;
+ gfp_t flags;
+
+ flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
+ req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
+
+ req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
+ ctx->cipher_type == OTX_CPT_DES3_CBC) &&
+ req->src == req->dst) {
+ req_info->iv_out = kmalloc(ivsize, flags);
+ if (!req_info->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_info->iv_out, req->src,
+ start, ivsize, 0);
+ }
+ }
+ /* Encryption data length */
+ req_info->req.param1 = req->cryptlen;
+ /* Authentication data length */
+ req_info->req.param2 = 0;
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
+
+ if (ctx->cipher_type == OTX_CPT_AES_XTS)
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
+ else
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
+
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
+
+ ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
+ *ctrl_flags = cpu_to_be64(*ctrl_flags);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
+
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
+ u32 enc_iv_len)
+{
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+ int ret;
+
+ ret = create_ctx_hdr(req, enc, &argcnt);
+ if (ret)
+ return ret;
+
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
+ req_info->incnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_output_list(struct skcipher_request *req,
+ u32 enc_iv_len)
+{
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+
+ /*
+ * OUTPUT Buffer Processing
+ * AES encryption/decryption output would be
+ * received in the following format
+ *
+ * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
+ * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
+ */
+ update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
+ req_info->outcnt = argcnt;
+}
+
+static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ /* Validate that request doesn't exceed maximum CPT supported size */
+ if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
+ return -E2BIG;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.flags = 0;
+
+ status = create_input_list(req, enc, enc_iv_len);
+ if (status)
+ return status;
+ create_output_list(req, enc_iv_len);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->callback = (void *)otx_cpt_skcipher_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = OTX_CPT_ENC_DEC_REQ;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+ req_info->ctrl.s.grp = 0;
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ status = otx_cpt_do_request(pdev, req_info, cpu_num);
+
+ return status;
+}
+
+static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, true);
+}
+
+static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, false);
+}
+
+static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u8 *key2 = key + (keylen / 2);
+ const u8 *key1 = key;
+ int ret;
+
+ ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ if (ret)
+ return ret;
+ ctx->key_len = keylen;
+ memcpy(ctx->enc_key, key1, keylen / 2);
+ memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
+ ctx->cipher_type = OTX_CPT_AES_XTS;
+ switch (ctx->key_len) {
+ case 2 * AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case 2 * AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return 0;
+}
+
+static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return 0;
+}
+
+static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
+}
+
+static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
+}
+
+static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
+}
+
+static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
+}
+
+static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
+}
+
+static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * Additional memory for skcipher_request is
+ * allocated since the cryptd daemon uses
+ * this memory for request_ctx information
+ */
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
+
+ return 0;
+}
+
+static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+ if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
+ switch (ctx->mac_type) {
+ case OTX_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+ }
+ }
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx));
+
+ return 0;
+}
+
+static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
+}
+
+static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
+}
+
+static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
+}
+
+static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
+}
+
+static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
+}
+
+static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
+}
+
+static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
+}
+
+static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
+}
+
+static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
+}
+
+static void otx_cpt_aead_exit(struct crypto_aead *tfm)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+ if (ctx->hashalg)
+ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+}
+
+/*
+ * This is the Integrity Check Value validation (aka the authentication tag
+ * length)
+ */
+static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (ctx->mac_type) {
+ case OTX_CPT_SHA1:
+ if (authsize != SHA1_DIGEST_SIZE &&
+ authsize != SHA1_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA1_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_SHA256:
+ if (authsize != SHA256_DIGEST_SIZE &&
+ authsize != SHA256_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA256_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_SHA384:
+ if (authsize != SHA384_DIGEST_SIZE &&
+ authsize != SHA384_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA384_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_SHA512:
+ if (authsize != SHA512_DIGEST_SIZE &&
+ authsize != SHA512_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA512_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_MAC_NULL:
+ if (ctx->cipher_type == OTX_CPT_AES_GCM) {
+ if (authsize != AES_GCM_ICV_SIZE)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tfm->authsize = authsize;
+ return 0;
+}
+
+static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+ struct otx_cpt_sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return NULL;
+
+ sdesc->shash.tfm = alg;
+
+ return sdesc;
+}
+
+static inline void swap_data32(void *buf, u32 len)
+{
+ u32 *store = (u32 *) buf;
+ int i = 0;
+
+ for (i = 0 ; i < len/sizeof(u32); i++, store++)
+ *store = cpu_to_be32(*store);
+}
+
+static inline void swap_data64(void *buf, u32 len)
+{
+ u64 *store = (u64 *) buf;
+ int i = 0;
+
+ for (i = 0 ; i < len/sizeof(u64); i++, store++)
+ *store = cpu_to_be64(*store);
+}
+
+static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+{
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+ struct sha1_state *sha1;
+
+ switch (mac_type) {
+ case OTX_CPT_SHA1:
+ sha1 = (struct sha1_state *) in_pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+ memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX_CPT_SHA256:
+ sha256 = (struct sha256_state *) in_pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+ memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX_CPT_SHA384:
+ case OTX_CPT_SHA512:
+ sha512 = (struct sha512_state *) in_pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+ memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aead_hmac_init(struct crypto_aead *cipher)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+ int authkeylen = ctx->auth_key_len;
+ u8 *ipad = NULL, *opad = NULL;
+ int ret = 0, icount = 0;
+
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc)
+ return -ENOMEM;
+
+ ctx->ipad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ctx->opad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ipad = kzalloc(state_size, GFP_KERNEL);
+ if (!ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ opad = kzalloc(state_size, GFP_KERNEL);
+ if (!opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+ authkeylen, ipad);
+ if (ret)
+ goto calc_fail;
+
+ authkeylen = ds;
+ } else {
+ memcpy(ipad, ctx->key, authkeylen);
+ }
+
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+ for (icount = 0; icount < bs; icount++) {
+ ipad[icount] ^= 0x36;
+ opad[icount] ^= 0x5c;
+ }
+
+ /*
+ * Partial Hash calculated from the software
+ * algorithm is retrieved for IPAD & OPAD
+ */
+
+ /* IPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+ ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ if (ret)
+ goto calc_fail;
+
+ /* OPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+ ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+ if (ret)
+ goto calc_fail;
+
+ kfree(ipad);
+ kfree(opad);
+
+ return 0;
+
+calc_fail:
+ kfree(ctx->ipad);
+ ctx->ipad = NULL;
+ kfree(ctx->opad);
+ ctx->opad = NULL;
+ kfree(ipad);
+ kfree(opad);
+ kfree(ctx->sdesc);
+ ctx->sdesc = NULL;
+
+ return ret;
+}
+
+static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ int enckeylen = 0, authkeylen = 0;
+ struct rtattr *rta = (void *)key;
+ int status = -EINVAL;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < enckeylen)
+ goto badkey;
+
+ if (keylen > OTX_CPT_MAX_KEY_SIZE)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+ memcpy(ctx->key, key, keylen);
+
+ switch (enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ goto badkey;
+ }
+
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = authkeylen;
+
+ status = aead_hmac_init(cipher);
+ if (status)
+ goto badkey;
+
+ return 0;
+badkey:
+ return status;
+}
+
+static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta = (void *)key;
+ int enckeylen = 0;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (enckeylen != 0)
+ goto badkey;
+
+ if (keylen > OTX_CPT_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = keylen;
+ return 0;
+badkey:
+ return -EINVAL;
+}
+
+static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+
+ /*
+ * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
+ * and salt (4 bytes)
+ */
+ switch (keylen) {
+ case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_128;
+ break;
+ case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ break;
+ case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_256;
+ break;
+ default:
+ /* Invalid key and salt length */
+ return -EINVAL;
+ }
+
+ /* Store encryption key and salt */
+ memcpy(ctx->key, key, keylen);
+
+ return 0;
+}
+
+static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
+ int mac_len = crypto_aead_authsize(tfm);
+ int ds;
+
+ rctx->ctrl_word.e.enc_data_offset = req->assoclen;
+
+ switch (ctx->cipher_type) {
+ case OTX_CPT_AES_CBC:
+ fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
+ ctx->enc_key_len);
+ /* Copy IV to context */
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
+
+ ds = crypto_shash_digestsize(ctx->hashalg);
+ if (ctx->mac_type == OTX_CPT_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+ if (ctx->ipad)
+ memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
+ if (ctx->opad)
+ memcpy(fctx->hmac.e.opad, ctx->opad, ds);
+ break;
+
+ case OTX_CPT_AES_GCM:
+ fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
+ /* Copy salt to context */
+ memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
+ AES_GCM_SALT_SIZE);
+
+ rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
+ break;
+
+ default:
+ /* Unknown cipher type */
+ return -EINVAL;
+ }
+ rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags);
+
+ req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
+ req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
+ req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ req_info->req.param1 = req->cryptlen;
+ req_info->req.param2 = req->cryptlen + req->assoclen;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ req_info->req.param1 = req->cryptlen - mac_len;
+ req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
+ }
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
+ fctx->enc.enc_ctrl.e.mac_len = mac_len;
+ fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
+ u32 enc)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+
+ req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
+ req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
+ req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
+ DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
+ req_info->is_trunc_hmac = ctx->is_trunc_hmac;
+
+ req_info->req.opcode.s.minor = 0;
+ req_info->req.param1 = ctx->auth_key_len;
+ req_info->req.param2 = ctx->mac_type << 8;
+
+ /* Add authentication key */
+ req_info->in[*argcnt].vptr = ctx->key;
+ req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
+ req_info->req.dlen += round_up(ctx->auth_key_len, 8);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen = req->cryptlen + req->assoclen;
+ u32 status, argcnt = 0;
+
+ status = create_aead_ctx_hdr(req, enc, &argcnt);
+ if (status)
+ return status;
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->incnt = argcnt;
+
+ return 0;
+}
+
+static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
+ u32 mac_len)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0, outputlen = 0;
+
+ if (enc)
+ outputlen = req->cryptlen + req->assoclen + mac_len;
+ else
+ outputlen = req->cryptlen + req->assoclen - mac_len;
+
+ update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
+ req_info->outcnt = argcnt;
+
+ return 0;
+}
+
+static inline u32 create_aead_null_input_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen, argcnt = 0;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ create_hmac_ctx_hdr(req, &argcnt, enc);
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->incnt = argcnt;
+
+ return 0;
+}
+
+static inline u32 create_aead_null_output_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct scatterlist *dst;
+ u8 *ptr = NULL;
+ int argcnt = 0, status, offset;
+ u32 inputlen;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ /*
+ * If source and destination are different
+ * then copy payload to destination
+ */
+ if (req->src != req->dst) {
+
+ ptr = kmalloc(inputlen, (req_info->areq->flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ptr) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ kfree(ptr);
+ }
+
+ if (enc) {
+ /*
+ * In an encryption scenario hmac needs
+ * to be appended after payload
+ */
+ dst = req->dst;
+ offset = inputlen;
+ while (offset >= dst->length) {
+ offset -= dst->length;
+ dst = sg_next(dst);
+ if (!dst) {
+ status = -ENOENT;
+ goto error;
+ }
+ }
+
+ update_output_data(req_info, dst, offset, mac_len, &argcnt);
+ } else {
+ /*
+ * In a decryption scenario calculated hmac for received
+ * payload needs to be compare with hmac received
+ */
+ status = sg_copy_buffer(req->src, sg_nents(req->src),
+ rctx->fctx.hmac.s.hmac_recv, mac_len,
+ inputlen, true);
+ if (status != mac_len) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
+ req_info->out[argcnt].size = mac_len;
+ argcnt++;
+ }
+
+ req_info->outcnt = argcnt;
+ return 0;
+
+error_free:
+ kfree(ptr);
+error:
+ return status;
+}
+
+static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct pci_dev *pdev;
+ u32 status, cpu_num;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.flags = 0;
+
+ req_info->callback = otx_cpt_aead_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = reg_type;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+
+ switch (reg_type) {
+ case OTX_CPT_AEAD_ENC_DEC_REQ:
+ status = create_aead_input_list(req, enc);
+ if (status)
+ return status;
+ status = create_aead_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
+ status = create_aead_null_input_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ status = create_aead_null_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate that request doesn't exceed maximum CPT supported size */
+ if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
+ req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
+ return -E2BIG;
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->ctrl.s.grp = 0;
+
+ status = otx_cpt_do_request(pdev, req_info, cpu_num);
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ return status;
+}
+
+static int otx_cpt_aead_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
+}
+
+static int otx_cpt_aead_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
+}
+
+static int otx_cpt_aead_null_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
+}
+
+static int otx_cpt_aead_null_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
+}
+
+static struct skcipher_alg otx_cpt_skciphers[] = { {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cpt_xts_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_xts_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cpt_cbc_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_cbc_aes_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cpt_ecb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_ecb_aes_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cpt_cfb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_cfb_aes_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cpt_cbc_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = otx_cpt_skcipher_cbc_des3_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cpt_ecb_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .setkey = otx_cpt_skcipher_ecb_des3_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+} };
+
+static struct aead_alg otx_cpt_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha1_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha256_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha384_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha512_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha1_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha1_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha256_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha256_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha384_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha384_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha512_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha512_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "cpt_rfc4106_gcm_aes",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_gcm_aes_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_gcm_aes_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_GCM_IV_SIZE,
+ .maxauthsize = AES_GCM_ICV_SIZE,
+} };
+
+static inline int is_any_alg_used(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
+ if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
+ return true;
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
+ if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
+ return true;
+ return false;
+}
+
+static inline int cpt_register_algs(void)
+{
+ int i, err = 0;
+
+ if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
+ otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx_cpt_skciphers,
+ ARRAY_SIZE(otx_cpt_skciphers));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
+ otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
+ if (err) {
+ crypto_unregister_skciphers(otx_cpt_skciphers,
+ ARRAY_SIZE(otx_cpt_skciphers));
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void cpt_unregister_algs(void)
+{
+ crypto_unregister_skciphers(otx_cpt_skciphers,
+ ARRAY_SIZE(otx_cpt_skciphers));
+ crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
+}
+
+static int compare_func(const void *lptr, const void *rptr)
+{
+ struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+
+ if (ldesc->dev->devfn < rdesc->dev->devfn)
+ return -1;
+ if (ldesc->dev->devfn > rdesc->dev->devfn)
+ return 1;
+ return 0;
+}
+
+static void swap_func(void *lptr, void *rptr, int size)
+{
+ struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+ struct cpt_device_desc desc;
+
+ desc = *ldesc;
+ *ldesc = *rdesc;
+ *rdesc = desc;
+}
+
+int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptpf_type pf_type,
+ enum otx_cptvf_type engine_type,
+ int num_queues, int num_devices)
+{
+ int ret = 0;
+ int count;
+
+ mutex_lock(&mutex);
+ switch (engine_type) {
+ case OTX_CPT_SE_TYPES:
+ count = atomic_read(&se_devices.count);
+ if (count >= CPT_MAX_VF_NUM) {
+ dev_err(&pdev->dev, "No space to add a new device");
+ ret = -ENOSPC;
+ goto err;
+ }
+ se_devices.desc[count].pf_type = pf_type;
+ se_devices.desc[count].num_queues = num_queues;
+ se_devices.desc[count++].dev = pdev;
+ atomic_inc(&se_devices.count);
+
+ if (atomic_read(&se_devices.count) == num_devices &&
+ is_crypto_registered == false) {
+ if (cpt_register_algs()) {
+ dev_err(&pdev->dev,
+ "Error in registering crypto algorithms\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ try_module_get(mod);
+ is_crypto_registered = true;
+ }
+ sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ count = atomic_read(&ae_devices.count);
+ if (count >= CPT_MAX_VF_NUM) {
+ dev_err(&pdev->dev, "No space to a add new device");
+ ret = -ENOSPC;
+ goto err;
+ }
+ ae_devices.desc[count].pf_type = pf_type;
+ ae_devices.desc[count].num_queues = num_queues;
+ ae_devices.desc[count++].dev = pdev;
+ atomic_inc(&ae_devices.count);
+ sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
+ ret = BAD_OTX_CPTVF_TYPE;
+ }
+err:
+ mutex_unlock(&mutex);
+ return ret;
+}
+
+void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptvf_type engine_type)
+{
+ struct cpt_device_table *dev_tbl;
+ bool dev_found = false;
+ int i, j, count;
+
+ mutex_lock(&mutex);
+
+ dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
+ count = atomic_read(&dev_tbl->count);
+ for (i = 0; i < count; i++)
+ if (pdev == dev_tbl->desc[i].dev) {
+ for (j = i; j < count-1; j++)
+ dev_tbl->desc[j] = dev_tbl->desc[j+1];
+ dev_found = true;
+ break;
+ }
+
+ if (!dev_found) {
+ dev_err(&pdev->dev, "%s device not found", __func__);
+ goto exit;
+ }
+
+ if (engine_type != OTX_CPT_AE_TYPES) {
+ if (atomic_dec_and_test(&se_devices.count) &&
+ !is_any_alg_used()) {
+ cpt_unregister_algs();
+ module_put(mod);
+ is_crypto_registered = false;
+ }
+ } else
+ atomic_dec(&ae_devices.count);
+exit:
+ mutex_unlock(&mutex);
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
new file mode 100644
index 000000000000..67cc0025f5d5
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPT_ALGS_H
+#define __OTX_CPT_ALGS_H
+
+#include <crypto/hash.h>
+#include "otx_cpt_common.h"
+
+#define OTX_CPT_MAX_ENC_KEY_SIZE 32
+#define OTX_CPT_MAX_HASH_KEY_SIZE 64
+#define OTX_CPT_MAX_KEY_SIZE (OTX_CPT_MAX_ENC_KEY_SIZE + \
+ OTX_CPT_MAX_HASH_KEY_SIZE)
+enum otx_cpt_request_type {
+ OTX_CPT_ENC_DEC_REQ = 0x1,
+ OTX_CPT_AEAD_ENC_DEC_REQ = 0x2,
+ OTX_CPT_AEAD_ENC_DEC_NULL_REQ = 0x3,
+ OTX_CPT_PASSTHROUGH_REQ = 0x4
+};
+
+enum otx_cpt_major_opcodes {
+ OTX_CPT_MAJOR_OP_MISC = 0x01,
+ OTX_CPT_MAJOR_OP_FC = 0x33,
+ OTX_CPT_MAJOR_OP_HMAC = 0x35,
+};
+
+enum otx_cpt_req_type {
+ OTX_CPT_AE_CORE_REQ,
+ OTX_CPT_SE_CORE_REQ
+};
+
+enum otx_cpt_cipher_type {
+ OTX_CPT_CIPHER_NULL = 0x0,
+ OTX_CPT_DES3_CBC = 0x1,
+ OTX_CPT_DES3_ECB = 0x2,
+ OTX_CPT_AES_CBC = 0x3,
+ OTX_CPT_AES_ECB = 0x4,
+ OTX_CPT_AES_CFB = 0x5,
+ OTX_CPT_AES_CTR = 0x6,
+ OTX_CPT_AES_GCM = 0x7,
+ OTX_CPT_AES_XTS = 0x8
+};
+
+enum otx_cpt_mac_type {
+ OTX_CPT_MAC_NULL = 0x0,
+ OTX_CPT_MD5 = 0x1,
+ OTX_CPT_SHA1 = 0x2,
+ OTX_CPT_SHA224 = 0x3,
+ OTX_CPT_SHA256 = 0x4,
+ OTX_CPT_SHA384 = 0x5,
+ OTX_CPT_SHA512 = 0x6,
+ OTX_CPT_GMAC = 0x7
+};
+
+enum otx_cpt_aes_key_len {
+ OTX_CPT_AES_128_BIT = 0x1,
+ OTX_CPT_AES_192_BIT = 0x2,
+ OTX_CPT_AES_256_BIT = 0x3
+};
+
+union otx_cpt_encr_ctrl {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 enc_cipher:4;
+ u64 reserved1:1;
+ u64 aes_key:2;
+ u64 iv_source:1;
+ u64 mac_type:4;
+ u64 reserved2:3;
+ u64 auth_input_type:1;
+ u64 mac_len:8;
+ u64 reserved3:8;
+ u64 encr_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 encr_offset:16;
+ u64 reserved3:8;
+ u64 mac_len:8;
+ u64 auth_input_type:1;
+ u64 reserved2:3;
+ u64 mac_type:4;
+ u64 iv_source:1;
+ u64 aes_key:2;
+ u64 reserved1:1;
+ u64 enc_cipher:4;
+#endif
+ } e;
+};
+
+struct otx_cpt_cipher {
+ const char *name;
+ u8 value;
+};
+
+struct otx_cpt_enc_context {
+ union otx_cpt_encr_ctrl enc_ctrl;
+ u8 encr_key[32];
+ u8 encr_iv[16];
+};
+
+union otx_cpt_fchmac_ctx {
+ struct {
+ u8 ipad[64];
+ u8 opad[64];
+ } e;
+ struct {
+ u8 hmac_calc[64]; /* HMAC calculated */
+ u8 hmac_recv[64]; /* HMAC received */
+ } s;
+};
+
+struct otx_cpt_fc_ctx {
+ struct otx_cpt_enc_context enc;
+ union otx_cpt_fchmac_ctx hmac;
+};
+
+struct otx_cpt_enc_ctx {
+ u32 key_len;
+ u8 enc_key[OTX_CPT_MAX_KEY_SIZE];
+ u8 cipher_type;
+ u8 key_type;
+};
+
+struct otx_cpt_des3_ctx {
+ u32 key_len;
+ u8 des3_key[OTX_CPT_MAX_KEY_SIZE];
+};
+
+union otx_cpt_offset_ctrl_word {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved:32;
+ u64 enc_data_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 enc_data_offset:16;
+ u64 reserved:32;
+#endif
+ } e;
+};
+
+struct otx_cpt_req_ctx {
+ struct otx_cpt_req_info cpt_req;
+ union otx_cpt_offset_ctrl_word ctrl_word;
+ struct otx_cpt_fc_ctx fctx;
+};
+
+struct otx_cpt_sdesc {
+ struct shash_desc shash;
+};
+
+struct otx_cpt_aead_ctx {
+ u8 key[OTX_CPT_MAX_KEY_SIZE];
+ struct crypto_shash *hashalg;
+ struct otx_cpt_sdesc *sdesc;
+ u8 *ipad;
+ u8 *opad;
+ u32 enc_key_len;
+ u32 auth_key_len;
+ u8 cipher_type;
+ u8 mac_type;
+ u8 key_type;
+ u8 is_trunc_hmac;
+};
+int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptpf_type pf_type,
+ enum otx_cptvf_type engine_type,
+ int num_queues, int num_devices);
+void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptvf_type engine_type);
+void otx_cpt_callback(int status, void *arg, void *req);
+
+#endif /* __OTX_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
new file mode 100644
index 000000000000..a91860b5dc77
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include "otx_cptvf.h"
+#include "otx_cptvf_algs.h"
+#include "otx_cptvf_reqmgr.h"
+
+#define DRV_NAME "octeontx-cptvf"
+#define DRV_VERSION "1.0"
+
+static void vq_work_handler(unsigned long data)
+{
+ struct otx_cptvf_wqe_info *cwqe_info =
+ (struct otx_cptvf_wqe_info *) data;
+
+ otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
+}
+
+static int init_worker_threads(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx_cptvf_wqe_info *cwqe_info;
+ int i;
+
+ cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
+ if (!cwqe_info)
+ return -ENOMEM;
+
+ if (cptvf->num_queues) {
+ dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
+ cptvf->num_queues);
+ }
+
+ for (i = 0; i < cptvf->num_queues; i++) {
+ tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
+ (u64)cwqe_info);
+ cwqe_info->vq_wqe[i].cptvf = cptvf;
+ }
+ cptvf->wqe_info = cwqe_info;
+
+ return 0;
+}
+
+static void cleanup_worker_threads(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx_cptvf_wqe_info *cwqe_info;
+ int i;
+
+ cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
+ if (!cwqe_info)
+ return;
+
+ if (cptvf->num_queues) {
+ dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
+ cptvf->num_queues);
+ }
+
+ for (i = 0; i < cptvf->num_queues; i++)
+ tasklet_kill(&cwqe_info->vq_wqe[i].twork);
+
+ kzfree(cwqe_info);
+ cptvf->wqe_info = NULL;
+}
+
+static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
+{
+ struct otx_cpt_pending_queue *queue;
+ int i;
+
+ for_each_pending_queue(pqinfo, queue, i) {
+ if (!queue->head)
+ continue;
+
+ /* free single queue */
+ kzfree((queue->head));
+ queue->front = 0;
+ queue->rear = 0;
+ queue->qlen = 0;
+ }
+ pqinfo->num_queues = 0;
+}
+
+static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
+ u32 num_queues)
+{
+ struct otx_cpt_pending_queue *queue = NULL;
+ size_t size;
+ int ret;
+ u32 i;
+
+ pqinfo->num_queues = num_queues;
+ size = (qlen * sizeof(struct otx_cpt_pending_entry));
+
+ for_each_pending_queue(pqinfo, queue, i) {
+ queue->head = kzalloc((size), GFP_KERNEL);
+ if (!queue->head) {
+ ret = -ENOMEM;
+ goto pending_qfail;
+ }
+
+ queue->pending_count = 0;
+ queue->front = 0;
+ queue->rear = 0;
+ queue->qlen = qlen;
+
+ /* init queue spin lock */
+ spin_lock_init(&queue->lock);
+ }
+ return 0;
+
+pending_qfail:
+ free_pending_queues(pqinfo);
+
+ return ret;
+}
+
+static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
+ u32 num_queues)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ int ret;
+
+ if (!num_queues)
+ return 0;
+
+ ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
+ num_queues);
+ return ret;
+ }
+ return 0;
+}
+
+static void cleanup_pending_queues(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+
+ if (!cptvf->num_queues)
+ return;
+
+ dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
+ cptvf->num_queues);
+ free_pending_queues(&cptvf->pqinfo);
+}
+
+static void free_command_queues(struct otx_cptvf *cptvf,
+ struct otx_cpt_cmd_qinfo *cqinfo)
+{
+ struct otx_cpt_cmd_queue *queue = NULL;
+ struct otx_cpt_cmd_chunk *chunk = NULL;
+ struct pci_dev *pdev = cptvf->pdev;
+ int i;
+
+ /* clean up for each queue */
+ for (i = 0; i < cptvf->num_queues; i++) {
+ queue = &cqinfo->queue[i];
+
+ while (!list_empty(&cqinfo->queue[i].chead)) {
+ chunk = list_first_entry(&cqinfo->queue[i].chead,
+ struct otx_cpt_cmd_chunk, nextchunk);
+
+ dma_free_coherent(&pdev->dev, chunk->size,
+ chunk->head,
+ chunk->dma_addr);
+ chunk->head = NULL;
+ chunk->dma_addr = 0;
+ list_del(&chunk->nextchunk);
+ kzfree(chunk);
+ }
+ queue->num_chunks = 0;
+ queue->idx = 0;
+
+ }
+}
+
+static int alloc_command_queues(struct otx_cptvf *cptvf,
+ struct otx_cpt_cmd_qinfo *cqinfo,
+ u32 qlen)
+{
+ struct otx_cpt_cmd_chunk *curr, *first, *last;
+ struct otx_cpt_cmd_queue *queue = NULL;
+ struct pci_dev *pdev = cptvf->pdev;
+ size_t q_size, c_size, rem_q_size;
+ u32 qcsize_bytes;
+ int i;
+
+
+ /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
+ cptvf->qsize = min(qlen, cqinfo->qchunksize) *
+ OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1;
+ /* Qsize in bytes to create space for alignment */
+ q_size = qlen * OTX_CPT_INST_SIZE;
+
+ qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE;
+
+ /* per queue initialization */
+ for (i = 0; i < cptvf->num_queues; i++) {
+ c_size = 0;
+ rem_q_size = q_size;
+ first = NULL;
+ last = NULL;
+
+ queue = &cqinfo->queue[i];
+ INIT_LIST_HEAD(&queue->chead);
+ do {
+ curr = kzalloc(sizeof(*curr), GFP_KERNEL);
+ if (!curr)
+ goto cmd_qfail;
+
+ c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
+ rem_q_size;
+ curr->head = dma_alloc_coherent(&pdev->dev,
+ c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
+ &curr->dma_addr, GFP_KERNEL);
+ if (!curr->head) {
+ dev_err(&pdev->dev,
+ "Command Q (%d) chunk (%d) allocation failed\n",
+ i, queue->num_chunks);
+ goto free_curr;
+ }
+ curr->size = c_size;
+
+ if (queue->num_chunks == 0) {
+ first = curr;
+ queue->base = first;
+ }
+ list_add_tail(&curr->nextchunk,
+ &cqinfo->queue[i].chead);
+
+ queue->num_chunks++;
+ rem_q_size -= c_size;
+ if (last)
+ *((u64 *)(&last->head[last->size])) =
+ (u64)curr->dma_addr;
+
+ last = curr;
+ } while (rem_q_size);
+
+ /*
+ * Make the queue circular, tie back last chunk entry to head
+ */
+ curr = first;
+ *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
+ queue->qhead = curr;
+ }
+ return 0;
+free_curr:
+ kfree(curr);
+cmd_qfail:
+ free_command_queues(cptvf, cqinfo);
+ return -ENOMEM;
+}
+
+static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ int ret;
+
+ /* setup command queues */
+ ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n",
+ cptvf->num_queues);
+ return ret;
+ }
+ return ret;
+}
+
+static void cleanup_command_queues(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+
+ if (!cptvf->num_queues)
+ return;
+
+ dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n",
+ cptvf->num_queues);
+ free_command_queues(cptvf, &cptvf->cqinfo);
+}
+
+static void cptvf_sw_cleanup(struct otx_cptvf *cptvf)
+{
+ cleanup_worker_threads(cptvf);
+ cleanup_pending_queues(cptvf);
+ cleanup_command_queues(cptvf);
+}
+
+static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ u32 max_dev_queues = 0;
+ int ret;
+
+ max_dev_queues = OTX_CPT_NUM_QS_PER_VF;
+ /* possible cpus */
+ num_queues = min_t(u32, num_queues, max_dev_queues);
+ cptvf->num_queues = num_queues;
+
+ ret = init_command_queues(cptvf, qlen);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
+ num_queues);
+ return ret;
+ }
+
+ ret = init_pending_queues(cptvf, qlen, num_queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
+ num_queues);
+ goto setup_pqfail;
+ }
+
+ /* Create worker threads for BH processing */
+ ret = init_worker_threads(cptvf);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup worker threads\n");
+ goto init_work_fail;
+ }
+ return 0;
+
+init_work_fail:
+ cleanup_worker_threads(cptvf);
+ cleanup_pending_queues(cptvf);
+
+setup_pqfail:
+ cleanup_command_queues(cptvf);
+
+ return ret;
+}
+
+static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec)
+{
+ irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
+ free_cpumask_var(cptvf->affinity_mask[vec]);
+}
+
+static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val)
+{
+ union otx_cptx_vqx_ctl vqx_ctl;
+
+ vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0));
+ vqx_ctl.s.ena = val;
+ writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0));
+}
+
+void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val)
+{
+ union otx_cptx_vqx_doorbell vqx_dbell;
+
+ vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
+ vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
+ writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
+}
+
+static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val)
+{
+ union otx_cptx_vqx_inprog vqx_inprg;
+
+ vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
+ vqx_inprg.s.inflight = val;
+ writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
+}
+
+static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ vqx_dwait.s.num_wait = val;
+ writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+}
+
+static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ return vqx_dwait.s.num_wait;
+}
+
+static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ vqx_dwait.s.time_wait = time;
+ writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+}
+
+
+static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ return vqx_dwait.s.time_wait;
+}
+
+static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
+
+ vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+ /* Enable SWERR interrupts for the requested VF */
+ vqx_misc_ena.s.swerr = 1;
+ writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+}
+
+static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
+
+ vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+ /* Enable MBOX interrupt for the requested VF */
+ vqx_misc_ena.s.mbox = 1;
+ writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+}
+
+static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done_ena_w1s vqx_done_ena;
+
+ vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
+ /* Enable DONE interrupt for the requested VF */
+ vqx_done_ena.s.done = 1;
+ writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
+}
+
+static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.dovf = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.irde = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.nwrp = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.mbox = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.swerr = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf)
+{
+ return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq,
+ void *arg)
+{
+ struct otx_cptvf *cptvf = arg;
+ struct pci_dev *pdev = cptvf->pdev;
+ u64 intr;
+
+ intr = cptvf_read_vf_misc_intr_status(cptvf);
+ /* Check for MISC interrupt types */
+ if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
+ dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ otx_cptvf_handle_mbox_intr(cptvf);
+ cptvf_clear_mbox_intr(cptvf);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
+ cptvf_clear_dovf_intr(cptvf);
+ /* Clear doorbell count */
+ otx_cptvf_write_vq_doorbell(cptvf, 0);
+ dev_err(&pdev->dev,
+ "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
+ cptvf_clear_irde_intr(cptvf);
+ dev_err(&pdev->dev,
+ "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
+ cptvf_clear_nwrp_intr(cptvf);
+ dev_err(&pdev->dev,
+ "NCB response write error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) {
+ cptvf_clear_swerr_intr(cptvf);
+ dev_err(&pdev->dev,
+ "Software error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else {
+ dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n",
+ cptvf->vfid);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf,
+ int qno)
+{
+ struct otx_cptvf_wqe_info *nwqe_info;
+
+ if (unlikely(qno >= cptvf->num_queues))
+ return NULL;
+ nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
+
+ return &nwqe_info->vq_wqe[qno];
+}
+
+static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done vqx_done;
+
+ vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0));
+ return vqx_done.s.done;
+}
+
+static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf,
+ u32 ackcnt)
+{
+ union otx_cptx_vqx_done_ack vqx_dack_cnt;
+
+ vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
+ vqx_dack_cnt.s.done_ack = ackcnt;
+ writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
+}
+
+static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
+ void *cptvf_dev)
+{
+ struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev;
+ struct pci_dev *pdev = cptvf->pdev;
+ /* Read the number of completions */
+ u32 intr = cptvf_read_vq_done_count(cptvf);
+
+ if (intr) {
+ struct otx_cptvf_wqe *wqe;
+
+ /*
+ * Acknowledge the number of scheduled completions for
+ * processing
+ */
+ cptvf_write_vq_done_ack(cptvf, intr);
+ wqe = get_cptvf_vq_wqe(cptvf, 0);
+ if (unlikely(!wqe)) {
+ dev_err(&pdev->dev, "No work to schedule for VF (%d)",
+ cptvf->vfid);
+ return IRQ_NONE;
+ }
+ tasklet_hi_schedule(&wqe->twork);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
+ GFP_KERNEL)) {
+ dev_err(&pdev->dev,
+ "Allocation failed for affinity_mask for VF %d",
+ cptvf->vfid);
+ return;
+ }
+
+ cpu = cptvf->vfid % num_online_cpus();
+ cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
+ cptvf->affinity_mask[vec]);
+ irq_set_affinity_hint(pci_irq_vector(pdev, vec),
+ cptvf->affinity_mask[vec]);
+}
+
+static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val)
+{
+ union otx_cptx_vqx_saddr vqx_saddr;
+
+ vqx_saddr.u = val;
+ writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0));
+}
+
+static void cptvf_device_init(struct otx_cptvf *cptvf)
+{
+ u64 base_addr = 0;
+
+ /* Disable the VQ */
+ cptvf_write_vq_ctl(cptvf, 0);
+ /* Reset the doorbell */
+ otx_cptvf_write_vq_doorbell(cptvf, 0);
+ /* Clear inflight */
+ cptvf_write_vq_inprog(cptvf, 0);
+ /* Write VQ SADDR */
+ base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
+ cptvf_write_vq_saddr(cptvf, base_addr);
+ /* Configure timerhold / coalescence */
+ cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD);
+ cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD);
+ /* Enable the VQ */
+ cptvf_write_vq_ctl(cptvf, 1);
+ /* Flag the VF ready */
+ cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY;
+}
+
+static ssize_t vf_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ char *msg;
+
+ switch (cptvf->vftype) {
+ case OTX_CPT_AE_TYPES:
+ msg = "AE";
+ break;
+
+ case OTX_CPT_SE_TYPES:
+ msg = "SE";
+ break;
+
+ default:
+ msg = "Invalid";
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
+}
+
+static ssize_t vf_engine_group_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
+}
+
+static ssize_t vf_engine_group_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val < 0)
+ return -EINVAL;
+
+ if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Engine group >= than max available groups %d",
+ OTX_CPT_MAX_ENGINE_GROUPS);
+ return -EINVAL;
+ }
+
+ ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t vf_coalesc_time_wait_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ cptvf_read_vq_done_timewait(cptvf));
+}
+
+static ssize_t vf_coalesc_num_wait_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ cptvf_read_vq_done_numwait(cptvf));
+}
+
+static ssize_t vf_coalesc_time_wait_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val < OTX_CPT_COALESC_MIN_TIME_WAIT ||
+ val > OTX_CPT_COALESC_MAX_TIME_WAIT)
+ return -EINVAL;
+
+ cptvf_write_vq_done_timewait(cptvf, val);
+ return count;
+}
+
+static ssize_t vf_coalesc_num_wait_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val < OTX_CPT_COALESC_MIN_NUM_WAIT ||
+ val > OTX_CPT_COALESC_MAX_NUM_WAIT)
+ return -EINVAL;
+
+ cptvf_write_vq_done_numwait(cptvf, val);
+ return count;
+}
+
+static DEVICE_ATTR_RO(vf_type);
+static DEVICE_ATTR_RW(vf_engine_group);
+static DEVICE_ATTR_RW(vf_coalesc_time_wait);
+static DEVICE_ATTR_RW(vf_coalesc_num_wait);
+
+static struct attribute *otx_cptvf_attrs[] = {
+ &dev_attr_vf_type.attr,
+ &dev_attr_vf_engine_group.attr,
+ &dev_attr_vf_coalesc_time_wait.attr,
+ &dev_attr_vf_coalesc_num_wait.attr,
+ NULL
+};
+
+static const struct attribute_group otx_cptvf_sysfs_group = {
+ .attrs = otx_cptvf_attrs,
+};
+
+static int otx_cptvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct otx_cptvf *cptvf;
+ int err;
+
+ cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
+ if (!cptvf)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, cptvf);
+ cptvf->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto disable_device;
+ }
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0);
+ if (!cptvf->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto release_regions;
+ }
+
+ cptvf->node = dev_to_node(&pdev->dev);
+ err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS,
+ OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "Request for #%d msix vectors failed\n",
+ OTX_CPT_VF_MSIX_VECTORS);
+ goto unmap_region;
+ }
+
+ err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
+ cptvf_misc_intr_handler, 0, "CPT VF misc intr",
+ cptvf);
+ if (err) {
+ dev_err(dev, "Failed to request misc irq");
+ goto free_vectors;
+ }
+
+ /* Enable mailbox interrupt */
+ cptvf_enable_mbox_interrupts(cptvf);
+ cptvf_enable_swerr_interrupts(cptvf);
+
+ /* Check cpt pf status, gets chip ID / device Id from PF if ready */
+ err = otx_cptvf_check_pf_ready(cptvf);
+ if (err)
+ goto free_misc_irq;
+
+ /* CPT VF software resources initialization */
+ cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
+ err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
+ if (err) {
+ dev_err(dev, "cptvf_sw_init() failed");
+ goto free_misc_irq;
+ }
+ /* Convey VQ LEN to PF */
+ err = otx_cptvf_send_vq_size_msg(cptvf);
+ if (err)
+ goto sw_cleanup;
+
+ /* CPT VF device initialization */
+ cptvf_device_init(cptvf);
+ /* Send msg to PF to assign currnet Q to required group */
+ err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp);
+ if (err)
+ goto sw_cleanup;
+
+ cptvf->priority = 1;
+ err = otx_cptvf_send_vf_priority_msg(cptvf);
+ if (err)
+ goto sw_cleanup;
+
+ err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
+ cptvf_done_intr_handler, 0, "CPT VF done intr",
+ cptvf);
+ if (err) {
+ dev_err(dev, "Failed to request done irq\n");
+ goto free_done_irq;
+ }
+
+ /* Enable done interrupt */
+ cptvf_enable_done_interrupts(cptvf);
+
+ /* Set irq affinity masks */
+ cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+ cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+
+ err = otx_cptvf_send_vf_up(cptvf);
+ if (err)
+ goto free_irq_affinity;
+
+ /* Initialize algorithms and set ops */
+ err = otx_cpt_crypto_init(pdev, THIS_MODULE,
+ cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE,
+ cptvf->vftype, 1, cptvf->num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to register crypto algs\n");
+ goto free_irq_affinity;
+ }
+
+ err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group);
+ if (err) {
+ dev_err(dev, "Creating sysfs entries failed\n");
+ goto crypto_exit;
+ }
+
+ return 0;
+
+crypto_exit:
+ otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
+free_irq_affinity:
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+free_done_irq:
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
+sw_cleanup:
+ cptvf_sw_cleanup(cptvf);
+free_misc_irq:
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
+free_vectors:
+ pci_free_irq_vectors(cptvf->pdev);
+unmap_region:
+ pci_iounmap(pdev, cptvf->reg_base);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void otx_cptvf_remove(struct pci_dev *pdev)
+{
+ struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
+
+ if (!cptvf) {
+ dev_err(&pdev->dev, "Invalid CPT-VF device\n");
+ return;
+ }
+
+ /* Convey DOWN to PF */
+ if (otx_cptvf_send_vf_down(cptvf)) {
+ dev_err(&pdev->dev, "PF not responding to DOWN msg");
+ } else {
+ sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
+ otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
+ cptvf_sw_cleanup(cptvf);
+ pci_free_irq_vectors(cptvf->pdev);
+ pci_iounmap(pdev, cptvf->reg_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+/* Supported devices */
+static const struct pci_device_id otx_cptvf_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0},
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx_cptvf_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = otx_cptvf_id_table,
+ .probe = otx_cptvf_probe,
+ .remove = otx_cptvf_remove,
+};
+
+module_pci_driver(otx_cptvf_pci_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
new file mode 100644
index 000000000000..5663787c7a62
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include "otx_cptvf.h"
+
+#define CPT_MBOX_MSG_TIMEOUT 2000
+
+static char *get_mbox_opcode_str(int msg_opcode)
+{
+ char *str = "Unknown";
+
+ switch (msg_opcode) {
+ case OTX_CPT_MSG_VF_UP:
+ str = "UP";
+ break;
+
+ case OTX_CPT_MSG_VF_DOWN:
+ str = "DOWN";
+ break;
+
+ case OTX_CPT_MSG_READY:
+ str = "READY";
+ break;
+
+ case OTX_CPT_MSG_QLEN:
+ str = "QLEN";
+ break;
+
+ case OTX_CPT_MSG_QBIND_GRP:
+ str = "QBIND_GRP";
+ break;
+
+ case OTX_CPT_MSG_VQ_PRIORITY:
+ str = "VQ_PRIORITY";
+ break;
+
+ case OTX_CPT_MSG_PF_TYPE:
+ str = "PF_TYPE";
+ break;
+
+ case OTX_CPT_MSG_ACK:
+ str = "ACK";
+ break;
+
+ case OTX_CPT_MSG_NACK:
+ str = "NACK";
+ break;
+ }
+ return str;
+}
+
+static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
+{
+ char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
+
+ hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
+ raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
+ if (vf_id >= 0)
+ pr_debug("MBOX msg %s received from VF%d raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), vf_id,
+ raw_data_str);
+ else
+ pr_debug("MBOX msg %s received from PF raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
+}
+
+static void cptvf_send_msg_to_pf(struct otx_cptvf *cptvf,
+ struct otx_cpt_mbox *mbx)
+{
+ /* Writing mbox(1) causes interrupt */
+ writeq(mbx->msg, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
+ writeq(mbx->data, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+void otx_cptvf_handle_mbox_intr(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+
+ /*
+ * MBOX[0] contains msg
+ * MBOX[1] contains data
+ */
+ mbx.msg = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
+ mbx.data = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
+
+ dump_mbox_msg(&mbx, -1);
+
+ switch (mbx.msg) {
+ case OTX_CPT_MSG_VF_UP:
+ cptvf->pf_acked = true;
+ cptvf->num_vfs = mbx.data;
+ break;
+ case OTX_CPT_MSG_READY:
+ cptvf->pf_acked = true;
+ cptvf->vfid = mbx.data;
+ dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid);
+ break;
+ case OTX_CPT_MSG_QBIND_GRP:
+ cptvf->pf_acked = true;
+ cptvf->vftype = mbx.data;
+ dev_dbg(&cptvf->pdev->dev, "VF %d type %s group %d\n",
+ cptvf->vfid,
+ ((mbx.data == OTX_CPT_SE_TYPES) ? "SE" : "AE"),
+ cptvf->vfgrp);
+ break;
+ case OTX_CPT_MSG_ACK:
+ cptvf->pf_acked = true;
+ break;
+ case OTX_CPT_MSG_NACK:
+ cptvf->pf_nacked = true;
+ break;
+ default:
+ dev_err(&cptvf->pdev->dev, "Invalid msg from PF, msg 0x%llx\n",
+ mbx.msg);
+ break;
+ }
+}
+
+static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
+ struct otx_cpt_mbox *mbx)
+{
+ int timeout = CPT_MBOX_MSG_TIMEOUT;
+ int sleep = 10;
+
+ cptvf->pf_acked = false;
+ cptvf->pf_nacked = false;
+ cptvf_send_msg_to_pf(cptvf, mbx);
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!cptvf->pf_acked) {
+ if (cptvf->pf_nacked)
+ return -EINVAL;
+ msleep(sleep);
+ if (cptvf->pf_acked)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ dev_err(&cptvf->pdev->dev,
+ "PF didn't ack to mbox msg %llx from VF%u\n",
+ mbx->msg, cptvf->vfid);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Checks if VF is able to comminicate with PF
+ * and also gets the CPT number this VF is associated to.
+ */
+int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_READY;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF.
+ * Must be ACKed.
+ */
+int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_QLEN;
+ mbx.data = cptvf->qsize;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_QBIND_GRP;
+ /* Convey group of the VF */
+ mbx.data = group;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+ if (ret)
+ return ret;
+ cptvf->vfgrp = group;
+
+ return 0;
+}
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
+ /* Convey group of the VF */
+ mbx.data = cptvf->priority;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate to PF that VF is UP and running
+ */
+int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_VF_UP;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate to PF that VF is DOWN and running
+ */
+int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_VF_DOWN;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
new file mode 100644
index 000000000000..df839b880354
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx_cptvf.h"
+#include "otx_cptvf_algs.h"
+
+/* Completion code size and initial value */
+#define COMPLETION_CODE_SIZE 8
+#define COMPLETION_CODE_INIT 0
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+
+/* Default timeout when waiting for free pending entry in us */
+#define CPT_PENTRY_TIMEOUT 1000
+#define CPT_PENTRY_STEP 50
+
+/* Default threshold for stopping and resuming sender requests */
+#define CPT_IQ_STOP_MARGIN 128
+#define CPT_IQ_RESUME_MARGIN 512
+
+#define CPT_DMA_ALIGN 128
+
+void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req)
+{
+ int i;
+
+ pr_debug("Gather list size %d\n", req->incnt);
+ for (i = 0; i < req->incnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->in[i].size, req->in[i].vptr,
+ (void *) req->in[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n",
+ req->in[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->in[i].vptr, req->in[i].size, false);
+ }
+
+ pr_debug("Scatter list size %d\n", req->outcnt);
+ for (i = 0; i < req->outcnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->out[i].size, req->out[i].vptr,
+ (void *) req->out[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->out[i].vptr, req->out[i].size, false);
+ }
+}
+
+static inline struct otx_cpt_pending_entry *get_free_pending_entry(
+ struct otx_cpt_pending_queue *q,
+ int qlen)
+{
+ struct otx_cpt_pending_entry *ent = NULL;
+
+ ent = &q->head[q->rear];
+ if (unlikely(ent->busy))
+ return NULL;
+
+ q->rear++;
+ if (unlikely(q->rear == qlen))
+ q->rear = 0;
+
+ return ent;
+}
+
+static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
+{
+ if (WARN_ON(inc > length))
+ inc = length;
+
+ index += inc;
+ if (unlikely(index >= length))
+ index -= length;
+
+ return index;
+}
+
+static inline void free_pentry(struct otx_cpt_pending_entry *pentry)
+{
+ pentry->completion_addr = NULL;
+ pentry->info = NULL;
+ pentry->callback = NULL;
+ pentry->areq = NULL;
+ pentry->resume_sender = false;
+ pentry->busy = false;
+}
+
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx_cpt_sglist_component *sg_ptr = NULL;
+ int ret = 0, i, j;
+ int components;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (likely(list[i].vptr)) {
+ list[i].dma_addr = dma_map_single(&pdev->dev,
+ list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev,
+ list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ ret = -EIO;
+ goto sg_cleanup;
+ }
+ }
+ }
+
+ components = buf_count / 4;
+ sg_ptr = (struct otx_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % 4;
+
+ switch (components) {
+ case 3:
+ sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ /* Fall through */
+ case 2:
+ sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ /* Fall through */
+ case 1:
+ sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return ret;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[i].dma_addr,
+ list[i].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return ret;
+}
+
+static inline int setup_sgio_list(struct pci_dev *pdev,
+ struct otx_cpt_info_buffer **pinfo,
+ struct otx_cpt_req_info *req, gfp_t gfp)
+{
+ u32 dlen, align_dlen, info_len, rlen;
+ struct otx_cpt_info_buffer *info;
+ u16 g_sz_bytes, s_sz_bytes;
+ int align = CPT_DMA_ALIGN;
+ u32 total_mem_len;
+
+ if (unlikely(req->incnt > OTX_CPT_MAX_SG_IN_CNT ||
+ req->outcnt > OTX_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return -EINVAL;
+ }
+
+ g_sz_bytes = ((req->incnt + 3) / 4) *
+ sizeof(struct otx_cpt_sglist_component);
+ s_sz_bytes = ((req->outcnt + 3) / 4) *
+ sizeof(struct otx_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ rlen = ALIGN(sizeof(union otx_cpt_res_s), align);
+ total_mem_len = align_dlen + info_len + rlen + COMPLETION_CODE_SIZE;
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ *pinfo = info;
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->outcnt;
+ ((u16 *)info->in_buffer)[1] = req->incnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->incnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ return -EFAULT;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->outcnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ return -EFAULT;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, (void *)info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ return -EIO;
+ }
+ /*
+ * Get buffer for union otx_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = (u64 *)(info->in_buffer + align_dlen);
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ /* Create and initialize RPTR */
+ info->out_buffer = (u8 *)info->completion_addr + rlen;
+ info->rptr_baddr = info->comp_baddr + rlen;
+
+ *((u64 *) info->out_buffer) = ~((u64) COMPLETION_CODE_INIT);
+
+ return 0;
+}
+
+
+static void cpt_fill_inst(union otx_cpt_inst_s *inst,
+ struct otx_cpt_info_buffer *info,
+ struct otx_cpt_iq_cmd *cmd)
+{
+ inst->u[0] = 0x0;
+ inst->s.doneint = true;
+ inst->s.res_addr = (u64)info->comp_baddr;
+ inst->u[2] = 0x0;
+ inst->s.wq_ptr = 0;
+ inst->s.ei0 = cmd->cmd.u64;
+ inst->s.ei1 = cmd->dptr;
+ inst->s.ei2 = cmd->rptr;
+ inst->s.ei3 = cmd->cptr.u64;
+}
+
+/*
+ * On OcteonTX platform the parameter db_count is used as a count for ringing
+ * door bell. The valid values for db_count are:
+ * 0 - 1 CPT instruction will be enqueued however CPT will not be informed
+ * 1 - 1 CPT instruction will be enqueued and CPT will be informed
+ */
+static void cpt_send_cmd(union otx_cpt_inst_s *cptinst, struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_cmd_qinfo *qinfo = &cptvf->cqinfo;
+ struct otx_cpt_cmd_queue *queue;
+ struct otx_cpt_cmd_chunk *curr;
+ u8 *ent;
+
+ queue = &qinfo->queue[0];
+ /*
+ * cpt_send_cmd is currently called only from critical section
+ * therefore no locking is required for accessing instruction queue
+ */
+ ent = &queue->qhead->head[queue->idx * OTX_CPT_INST_SIZE];
+ memcpy(ent, (void *) cptinst, OTX_CPT_INST_SIZE);
+
+ if (++queue->idx >= queue->qhead->size / 64) {
+ curr = queue->qhead;
+
+ if (list_is_last(&curr->nextchunk, &queue->chead))
+ queue->qhead = queue->base;
+ else
+ queue->qhead = list_next_entry(queue->qhead, nextchunk);
+ queue->idx = 0;
+ }
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+ otx_cptvf_write_vq_doorbell(cptvf, 1);
+}
+
+static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
+ struct otx_cpt_pending_queue *pqueue,
+ struct otx_cptvf *cptvf)
+{
+ struct otx_cptvf_request *cpt_req = &req->req;
+ struct otx_cpt_pending_entry *pentry = NULL;
+ union otx_cpt_ctrl_info *ctrl = &req->ctrl;
+ struct otx_cpt_info_buffer *info = NULL;
+ union otx_cpt_res_s *result = NULL;
+ struct otx_cpt_iq_cmd iq_cmd;
+ union otx_cpt_inst_s cptinst;
+ int retry, ret = 0;
+ u8 resume_sender;
+ gfp_t gfp;
+
+ gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+ ret = setup_sgio_list(pdev, &info, req, gfp);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev, "Setting up SG list failed");
+ goto request_cleanup;
+ }
+ cpt_req->dlen = info->dlen;
+
+ result = (union otx_cpt_res_s *) info->completion_addr;
+ result->s.compcode = COMPLETION_CODE_INIT;
+
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
+ while (unlikely(!pentry) && retry--) {
+ spin_unlock_bh(&pqueue->lock);
+ udelay(CPT_PENTRY_STEP);
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ }
+
+ if (unlikely(!pentry)) {
+ ret = -ENOSPC;
+ spin_unlock_bh(&pqueue->lock);
+ goto request_cleanup;
+ }
+
+ /*
+ * Check if we are close to filling in entire pending queue,
+ * if so then tell the sender to stop/sleep by returning -EBUSY
+ * We do it only for context which can sleep (GFP_KERNEL)
+ */
+ if (gfp == GFP_KERNEL &&
+ pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
+ pentry->resume_sender = true;
+ } else
+ pentry->resume_sender = false;
+ resume_sender = pentry->resume_sender;
+ pqueue->pending_count++;
+
+ pentry->completion_addr = info->completion_addr;
+ pentry->info = info;
+ pentry->callback = req->callback;
+ pentry->areq = req->areq;
+ pentry->busy = true;
+ info->pentry = pentry;
+ info->time_in = jiffies;
+ info->req = req;
+
+ /* Fill in the command */
+ iq_cmd.cmd.u64 = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
+ iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
+ iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
+ iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses*/
+ iq_cmd.cmd.u64 = cpu_to_be64(iq_cmd.cmd.u64);
+ iq_cmd.dptr = info->dptr_baddr;
+ iq_cmd.rptr = info->rptr_baddr;
+ iq_cmd.cptr.u64 = 0;
+ iq_cmd.cptr.s.grp = ctrl->s.grp;
+
+ /* Fill in the CPT_INST_S type command for HW interpretation */
+ cpt_fill_inst(&cptinst, info, &iq_cmd);
+
+ /* Print debug info if enabled */
+ otx_cpt_dump_sg_list(pdev, req);
+ pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX_CPT_INST_SIZE);
+ print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX_CPT_INST_SIZE, false);
+ pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
+ print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
+ cpt_req->dlen, false);
+
+ /* Send CPT command */
+ cpt_send_cmd(&cptinst, cptvf);
+
+ /*
+ * We allocate and prepare pending queue entry in critical section
+ * together with submitting CPT instruction to CPT instruction queue
+ * to make sure that order of CPT requests is the same in both
+ * pending and instruction queues
+ */
+ spin_unlock_bh(&pqueue->lock);
+
+ ret = resume_sender ? -EBUSY : -EINPROGRESS;
+ return ret;
+
+request_cleanup:
+ do_request_cleanup(pdev, info);
+ return ret;
+}
+
+int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
+ int cpu_num)
+{
+ struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
+
+ if (!otx_cpt_device_ready(cptvf)) {
+ dev_err(&pdev->dev, "CPT Device is not ready");
+ return -ENODEV;
+ }
+
+ if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
+ dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
+ cptvf->vfid);
+ return -EINVAL;
+ } else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
+ (req->ctrl.s.se_req)) {
+ dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
+ cptvf->vfid);
+ return -EINVAL;
+ }
+
+ return process_request(pdev, req, &cptvf->pqinfo.queue[0], cptvf);
+}
+
+static int cpt_process_ccode(struct pci_dev *pdev,
+ union otx_cpt_res_s *cpt_status,
+ struct otx_cpt_info_buffer *cpt_info,
+ struct otx_cpt_req_info *req, u32 *res_code)
+{
+ u8 ccode = cpt_status->s.compcode;
+ union otx_cpt_error_code ecode;
+
+ ecode.u = be64_to_cpu(*((u64 *) cpt_info->out_buffer));
+ switch (ccode) {
+ case CPT_COMP_E_FAULT:
+ dev_err(&pdev->dev,
+ "Request failed with DMA fault\n");
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+
+ case CPT_COMP_E_SWERR:
+ dev_err(&pdev->dev,
+ "Request failed with software error code %d\n",
+ ecode.s.ccode);
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+
+ case CPT_COMP_E_HWERR:
+ dev_err(&pdev->dev,
+ "Request failed with hardware error\n");
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+
+ case COMPLETION_CODE_INIT:
+ /* check for timeout */
+ if (time_after_eq(jiffies, cpt_info->time_in +
+ OTX_CPT_COMMAND_TIMEOUT * HZ))
+ dev_warn(&pdev->dev, "Request timed out 0x%p", req);
+ else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
+ cpt_info->time_in = jiffies;
+ cpt_info->extra_time++;
+ }
+ return 1;
+
+ case CPT_COMP_E_GOOD:
+ /* Check microcode completion code */
+ if (ecode.s.ccode) {
+ /*
+ * If requested hmac is truncated and ucode returns
+ * s/g write length error then we report success
+ * because ucode writes as many bytes of calculated
+ * hmac as available in gather buffer and reports
+ * s/g write length error if number of bytes in gather
+ * buffer is less than full hmac size.
+ */
+ if (req->is_trunc_hmac &&
+ ecode.s.ccode == ERR_SCATTER_GATHER_WRITE_LENGTH) {
+ *res_code = 0;
+ break;
+ }
+
+ dev_err(&pdev->dev,
+ "Request failed with software error code 0x%x\n",
+ ecode.s.ccode);
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+ }
+
+ /* Request has been processed with success */
+ *res_code = 0;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Request returned invalid status\n");
+ break;
+ }
+
+ return 0;
+}
+
+static inline void process_pending_queue(struct pci_dev *pdev,
+ struct otx_cpt_pending_queue *pqueue)
+{
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct otx_cpt_pending_entry *resume_pentry = NULL;
+ struct otx_cpt_pending_entry *pentry = NULL;
+ struct otx_cpt_info_buffer *cpt_info = NULL;
+ union otx_cpt_res_s *cpt_status = NULL;
+ struct otx_cpt_req_info *req = NULL;
+ struct crypto_async_request *areq;
+ u32 res_code, resume_index;
+
+ while (1) {
+ spin_lock_bh(&pqueue->lock);
+ pentry = &pqueue->head[pqueue->front];
+
+ if (WARN_ON(!pentry)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ res_code = -EINVAL;
+ if (unlikely(!pentry->busy)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ if (unlikely(!pentry->callback)) {
+ dev_err(&pdev->dev, "Callback NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_info = pentry->info;
+ if (unlikely(!cpt_info)) {
+ dev_err(&pdev->dev, "Pending entry post arg NULL\n");
+ goto process_pentry;
+ }
+
+ req = cpt_info->req;
+ if (unlikely(!req)) {
+ dev_err(&pdev->dev, "Request NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_status = (union otx_cpt_res_s *) pentry->completion_addr;
+ if (unlikely(!cpt_status)) {
+ dev_err(&pdev->dev, "Completion address NULL\n");
+ goto process_pentry;
+ }
+
+ if (cpt_process_ccode(pdev, cpt_status, cpt_info, req,
+ &res_code)) {
+ spin_unlock_bh(&pqueue->lock);
+ return;
+ }
+ cpt_info->pdev = pdev;
+
+process_pentry:
+ /*
+ * Check if we should inform sending side to resume
+ * We do it CPT_IQ_RESUME_MARGIN elements in advance before
+ * pending queue becomes empty
+ */
+ resume_index = modulo_inc(pqueue->front, pqueue->qlen,
+ CPT_IQ_RESUME_MARGIN);
+ resume_pentry = &pqueue->head[resume_index];
+ if (resume_pentry &&
+ resume_pentry->resume_sender) {
+ resume_pentry->resume_sender = false;
+ callback = resume_pentry->callback;
+ areq = resume_pentry->areq;
+
+ if (callback) {
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * EINPROGRESS is an indication for sending
+ * side that it can resume sending requests
+ */
+ callback(-EINPROGRESS, areq, cpt_info);
+ spin_lock_bh(&pqueue->lock);
+ }
+ }
+
+ callback = pentry->callback;
+ areq = pentry->areq;
+ free_pentry(pentry);
+
+ pqueue->pending_count--;
+ pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * Call callback after current pending entry has been
+ * processed, we don't do it if the callback pointer is
+ * invalid.
+ */
+ if (callback)
+ callback(res_code, areq, cpt_info);
+ }
+}
+
+void otx_cpt_post_process(struct otx_cptvf_wqe *wqe)
+{
+ process_pending_queue(wqe->cptvf->pdev, &wqe->cptvf->pqinfo.queue[0]);
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
new file mode 100644
index 000000000000..a4c9ff730b13
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTVF_REQUEST_MANAGER_H
+#define __OTX_CPTVF_REQUEST_MANAGER_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/pci.h>
+#include "otx_cpt_hw_types.h"
+
+/*
+ * Maximum total number of SG buffers is 100, we divide it equally
+ * between input and output
+ */
+#define OTX_CPT_MAX_SG_IN_CNT 50
+#define OTX_CPT_MAX_SG_OUT_CNT 50
+
+/* DMA mode direct or SG */
+#define OTX_CPT_DMA_DIRECT_DIRECT 0
+#define OTX_CPT_DMA_GATHER_SCATTER 1
+
+/* Context source CPTR or DPTR */
+#define OTX_CPT_FROM_CPTR 0
+#define OTX_CPT_FROM_DPTR 1
+
+/* CPT instruction queue alignment */
+#define OTX_CPT_INST_Q_ALIGNMENT 128
+#define OTX_CPT_MAX_REQ_SIZE 65535
+
+/* Default command timeout in seconds */
+#define OTX_CPT_COMMAND_TIMEOUT 4
+#define OTX_CPT_TIMER_HOLD 0x03F
+#define OTX_CPT_COUNT_HOLD 32
+#define OTX_CPT_TIME_IN_RESET_COUNT 5
+
+/* Minimum and maximum values for interrupt coalescing */
+#define OTX_CPT_COALESC_MIN_TIME_WAIT 0x0
+#define OTX_CPT_COALESC_MAX_TIME_WAIT ((1<<16)-1)
+#define OTX_CPT_COALESC_MIN_NUM_WAIT 0x0
+#define OTX_CPT_COALESC_MAX_NUM_WAIT ((1<<20)-1)
+
+union otx_cpt_opcode_info {
+ u16 flags;
+ struct {
+ u8 major;
+ u8 minor;
+ } s;
+};
+
+struct otx_cptvf_request {
+ u32 param1;
+ u32 param2;
+ u16 dlen;
+ union otx_cpt_opcode_info opcode;
+};
+
+struct otx_cpt_buf_ptr {
+ u8 *vptr;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+union otx_cpt_ctrl_info {
+ u32 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved0:26;
+ u32 grp:3; /* Group bits */
+ u32 dma_mode:2; /* DMA mode */
+ u32 se_req:1; /* To SE core */
+#else
+ u32 se_req:1; /* To SE core */
+ u32 dma_mode:2; /* DMA mode */
+ u32 grp:3; /* Group bits */
+ u32 reserved0:26;
+#endif
+ } s;
+};
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+union otx_cpt_iq_cmd_word0 {
+ u64 u64;
+ struct {
+ u16 opcode;
+ u16 param1;
+ u16 param2;
+ u16 dlen;
+ } s;
+};
+
+union otx_cpt_iq_cmd_word3 {
+ u64 u64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 grp:3;
+ u64 cptr:61;
+#else
+ u64 cptr:61;
+ u64 grp:3;
+#endif
+ } s;
+};
+
+struct otx_cpt_iq_cmd {
+ union otx_cpt_iq_cmd_word0 cmd;
+ u64 dptr;
+ u64 rptr;
+ union otx_cpt_iq_cmd_word3 cptr;
+};
+
+struct otx_cpt_sglist_component {
+ union {
+ u64 len;
+ struct {
+ u16 len0;
+ u16 len1;
+ u16 len2;
+ u16 len3;
+ } s;
+ } u;
+ u64 ptr0;
+ u64 ptr1;
+ u64 ptr2;
+ u64 ptr3;
+};
+
+struct otx_cpt_pending_entry {
+ u64 *completion_addr; /* Completion address */
+ struct otx_cpt_info_buffer *info;
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ u8 resume_sender; /* Notify sender to resume sending requests */
+ u8 busy; /* Entry status (free/busy) */
+};
+
+struct otx_cpt_pending_queue {
+ struct otx_cpt_pending_entry *head; /* Head of the queue */
+ u32 front; /* Process work from here */
+ u32 rear; /* Append new work here */
+ u32 pending_count; /* Pending requests count */
+ u32 qlen; /* Queue length */
+ spinlock_t lock; /* Queue lock */
+};
+
+struct otx_cpt_req_info {
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ struct otx_cptvf_request req;/* Request information (core specific) */
+ union otx_cpt_ctrl_info ctrl;/* User control information */
+ struct otx_cpt_buf_ptr in[OTX_CPT_MAX_SG_IN_CNT];
+ struct otx_cpt_buf_ptr out[OTX_CPT_MAX_SG_OUT_CNT];
+ u8 *iv_out; /* IV to send back */
+ u16 rlen; /* Output length */
+ u8 incnt; /* Number of input buffers */
+ u8 outcnt; /* Number of output buffers */
+ u8 req_type; /* Type of request */
+ u8 is_enc; /* Is a request an encryption request */
+ u8 is_trunc_hmac;/* Is truncated hmac used */
+};
+
+struct otx_cpt_info_buffer {
+ struct otx_cpt_pending_entry *pentry;
+ struct otx_cpt_req_info *req;
+ struct pci_dev *pdev;
+ u64 *completion_addr;
+ u8 *out_buffer;
+ u8 *in_buffer;
+ dma_addr_t dptr_baddr;
+ dma_addr_t rptr_baddr;
+ dma_addr_t comp_baddr;
+ unsigned long time_in;
+ u32 dlen;
+ u32 dma_len;
+ u8 extra_time;
+};
+
+static inline void do_request_cleanup(struct pci_dev *pdev,
+ struct otx_cpt_info_buffer *info)
+{
+ struct otx_cpt_req_info *req;
+ int i;
+
+ if (info->dptr_baddr)
+ dma_unmap_single(&pdev->dev, info->dptr_baddr,
+ info->dma_len, DMA_BIDIRECTIONAL);
+
+ if (info->req) {
+ req = info->req;
+ for (i = 0; i < req->outcnt; i++) {
+ if (req->out[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->out[i].dma_addr,
+ req->out[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ for (i = 0; i < req->incnt; i++) {
+ if (req->in[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->in[i].dma_addr,
+ req->in[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+ kzfree(info);
+}
+
+struct otx_cptvf_wqe;
+void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req);
+void otx_cpt_post_process(struct otx_cptvf_wqe *wqe);
+int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
+ int cpu_num);
+
+#endif /* __OTX_CPTVF_REQUEST_MANAGER_H */
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 9e9f48bb7f85..bd6309e57ab8 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -107,7 +107,7 @@ struct mtk_sha_ctx {
u8 id;
u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
- struct mtk_sha_hmac_ctx base[0];
+ struct mtk_sha_hmac_ctx base[];
};
struct mtk_sha_drv {
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 435ac1c83df9..d84530293036 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -20,6 +20,7 @@
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
@@ -611,49 +612,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
- const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *out_buf = sdcp->coh->sha_out_buf;
- uint8_t *src_buf;
-
struct scatterlist *src;
- unsigned int i, len, clen;
+ unsigned int i, len, clen, oft = 0;
int ret;
int fin = rctx->fini;
if (fin)
rctx->fini = 0;
- for_each_sg(req->src, src, nents, i) {
- src_buf = sg_virt(src);
- len = sg_dma_len(src);
-
- do {
- if (actx->fill + len > DCP_BUF_SZ)
- clen = DCP_BUF_SZ - actx->fill;
- else
- clen = len;
-
- memcpy(in_buf + actx->fill, src_buf, clen);
- len -= clen;
- src_buf += clen;
- actx->fill += clen;
+ src = req->src;
+ len = req->nbytes;
- /*
- * If we filled the buffer and still have some
- * more data, submit the buffer.
- */
- if (len && actx->fill == DCP_BUF_SZ) {
- ret = mxs_dcp_run_sha(req);
- if (ret)
- return ret;
- actx->fill = 0;
- rctx->init = 0;
- }
- } while (len);
+ while (len) {
+ if (actx->fill + len > DCP_BUF_SZ)
+ clen = DCP_BUF_SZ - actx->fill;
+ else
+ clen = len;
+
+ scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
+ 0);
+
+ len -= clen;
+ oft += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer and still have some
+ * more data, submit the buffer.
+ */
+ if (len && actx->fill == DCP_BUF_SZ) {
+ ret = mxs_dcp_run_sha(req);
+ if (ret)
+ return ret;
+ actx->fill = 0;
+ rctx->init = 0;
+ }
}
if (fin) {
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 91c54289124a..c6233173c612 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -37,7 +37,7 @@ struct max_sync_cop {
u32 fc;
u32 mode;
u32 triplets;
- struct msc_triplet trip[0];
+ struct msc_triplet trip[];
} __packed;
struct alg_props {
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 4f915a4ef5b0..e4072cd38585 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -159,7 +159,7 @@ struct omap_sham_reqctx {
int sg_len;
unsigned int total; /* total request */
- u8 buffer[0] OMAP_ALIGNED;
+ u8 buffer[] OMAP_ALIGNED;
};
struct omap_sham_hmac_ctx {
@@ -176,7 +176,7 @@ struct omap_sham_ctx {
/* fallback stuff */
struct crypto_shash *fallback;
- struct omap_sham_hmac_ctx base[0];
+ struct omap_sham_hmac_ctx base[];
};
#define OMAP_SHAM_QUEUE_LENGTH 10
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 594d6b1695d5..62c6fe88b212 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -474,7 +474,7 @@ static struct skcipher_alg cbc_aes_alg = {
};
static const struct x86_cpu_id padlock_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
+ X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index c826abe79e79..a697a4a3f2d0 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -490,7 +490,7 @@ static struct shash_alg sha256_alg_nano = {
};
static const struct x86_cpu_id padlock_sha_ids[] = {
- X86_FEATURE_MATCH(X86_FEATURE_PHE),
+ X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 833bb1d3a11b..e14d3dd291f0 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -97,7 +97,7 @@ struct qat_alg_cd {
struct icp_qat_hw_cipher_algo_blk cipher;
struct icp_qat_hw_auth_algo_blk hash;
} qat_enc_cd;
- struct qat_dec { /* Decrytp content desc */
+ struct qat_dec { /* Decrypt content desc */
struct icp_qat_hw_auth_algo_blk hash;
struct icp_qat_hw_cipher_algo_blk cipher;
} qat_dec_cd;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 3852d31ce0a4..fb504cee0305 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -250,8 +250,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
INIT_LIST_HEAD(&accel_dev->crypto_list);
- strlcpy(key, ADF_NUM_CY, sizeof(key));
- if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+ if (adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val))
return -EFAULT;
if (kstrtoul(val, 0, &num_inst))
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 629e7f34dc09..5006e74c40cd 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,8 +15,6 @@
#include "regs-v5.h"
#include "sha.h"
-#define QCE_SECTOR_SIZE 512
-
static inline u32 qce_read(struct qce_device *qce, u32 offset)
{
return readl(qce->base + offset);
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 282d4317470d..9f989cba0f1b 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -12,6 +12,9 @@
#include <crypto/hash.h>
#include <crypto/internal/skcipher.h>
+/* xts du size */
+#define QCE_SECTOR_SIZE 512
+
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 7da893dc00e7..46db5bf366b4 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -48,9 +48,10 @@ void qce_dma_release(struct qce_dma_data *dma)
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
- int max_ents)
+ unsigned int max_len)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+ unsigned int new_len;
while (sg) {
if (!sg_page(sg))
@@ -61,13 +62,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg && max_ents) {
- sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
- new_sgl->offset);
+ while (new_sgl && sg && max_len) {
+ new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
+ sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
- max_ents--;
+ max_len -= new_len;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index ed25a0d9829e..786402169360 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -43,6 +43,6 @@ void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
- int max_ents);
+ unsigned int max_len);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 4217b745f124..9412433f3b21 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/internal/des.h>
@@ -12,6 +13,13 @@
#include "cipher.h"
+static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
+module_param(aes_sw_max_len, uint, 0644);
+MODULE_PARM_DESC(aes_sw_max_len,
+ "Only use hardware for AES requests larger than this "
+ "[0=always use hardware; anything <16 breaks AES-GCM; default="
+ __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
+
static LIST_HEAD(skcipher_algs);
static void qce_skcipher_done(void *data)
@@ -97,13 +105,14 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
+ QCE_RESULT_BUF_SZ);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -165,15 +174,10 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
+ memcpy(ctx->enc_key, key, keylen);
break;
- default:
- goto fallback;
}
- ctx->enc_keylen = keylen;
- memcpy(ctx->enc_key, key, keylen);
- return 0;
-fallback:
ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
@@ -223,8 +227,14 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
- keylen != AES_KEYSIZE_256) {
+ /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
+ * is not a multiple of it; pass such requests to the fallback
+ */
+ if (IS_AES(rctx->flags) &&
+ (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+ req->cryptlen <= aes_sw_max_len) ||
+ (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
+ req->cryptlen % QCE_SECTOR_SIZE))) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index d66e20a2f54c..2a16800d2579 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -369,7 +369,7 @@ struct s5p_hash_reqctx {
bool error;
u32 bufcnt;
- u8 buffer[0];
+ u8 buffer[];
};
/**
diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore
index af4a7ce4738d..7aa71d83f739 100644
--- a/drivers/crypto/vmx/.gitignore
+++ b/drivers/crypto/vmx/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
aesp8-ppc.S
ghashp8-ppc.S
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
new file mode 100644
index 000000000000..534e32daf76a
--- /dev/null
+++ b/drivers/crypto/xilinx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
new file mode 100644
index 000000000000..09f7f468eef8
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP AES Driver.
+ * Copyright (c) 2020 Xilinx Inc.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_DMA_BIT_MASK 32U
+
+#define ZYNQMP_AES_KEY_SIZE AES_KEYSIZE_256
+#define ZYNQMP_AES_AUTH_SIZE 16U
+#define ZYNQMP_KEY_SRC_SEL_KEY_LEN 1U
+#define ZYNQMP_AES_BLK_SIZE 1U
+#define ZYNQMP_AES_MIN_INPUT_BLK_SIZE 4U
+#define ZYNQMP_AES_WORD_LEN 4U
+
+#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
+#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
+#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
+
+enum zynqmp_aead_op {
+ ZYNQMP_AES_DECRYPT = 0,
+ ZYNQMP_AES_ENCRYPT
+};
+
+enum zynqmp_aead_keysrc {
+ ZYNQMP_AES_KUP_KEY = 0,
+ ZYNQMP_AES_DEV_KEY,
+ ZYNQMP_AES_PUF_KEY
+};
+
+struct zynqmp_aead_drv_ctx {
+ union {
+ struct aead_alg aead;
+ } alg;
+ struct device *dev;
+ struct crypto_engine *engine;
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+struct zynqmp_aead_hw_req {
+ u64 src;
+ u64 iv;
+ u64 key;
+ u64 dst;
+ u64 size;
+ u64 op;
+ u64 keysrc;
+};
+
+struct zynqmp_aead_tfm_ctx {
+ struct crypto_engine_ctx engine_ctx;
+ struct device *dev;
+ u8 key[ZYNQMP_AES_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+ u32 authsize;
+ enum zynqmp_aead_keysrc keysrc;
+ struct crypto_aead *fbk_cipher;
+};
+
+struct zynqmp_aead_req_ctx {
+ enum zynqmp_aead_op op;
+};
+
+static int zynqmp_aes_aead_cipher(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct device *dev = tfm_ctx->dev;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct zynqmp_aead_hw_req *hwreq;
+ dma_addr_t dma_addr_data, dma_addr_hw_req;
+ unsigned int data_size;
+ unsigned int status;
+ size_t dma_size;
+ char *kbuf;
+ int err;
+
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ if (!drv_ctx->eemi_ops->aes)
+ return -ENOTSUPP;
+
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
+ dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ + GCM_AES_IV_SIZE;
+ else
+ dma_size = req->cryptlen + GCM_AES_IV_SIZE;
+
+ kbuf = dma_alloc_coherent(dev, dma_size, &dma_addr_data, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ hwreq = dma_alloc_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
+ &dma_addr_hw_req, GFP_KERNEL);
+ if (!hwreq) {
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ return -ENOMEM;
+ }
+
+ data_size = req->cryptlen;
+ scatterwalk_map_and_copy(kbuf, req->src, 0, req->cryptlen, 0);
+ memcpy(kbuf + data_size, req->iv, GCM_AES_IV_SIZE);
+
+ hwreq->src = dma_addr_data;
+ hwreq->dst = dma_addr_data;
+ hwreq->iv = hwreq->src + data_size;
+ hwreq->keysrc = tfm_ctx->keysrc;
+ hwreq->op = rq_ctx->op;
+
+ if (hwreq->op == ZYNQMP_AES_ENCRYPT)
+ hwreq->size = data_size;
+ else
+ hwreq->size = data_size - ZYNQMP_AES_AUTH_SIZE;
+
+ if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY) {
+ memcpy(kbuf + data_size + GCM_AES_IV_SIZE,
+ tfm_ctx->key, ZYNQMP_AES_KEY_SIZE);
+
+ hwreq->key = hwreq->src + data_size + GCM_AES_IV_SIZE;
+ } else {
+ hwreq->key = 0;
+ }
+
+ drv_ctx->eemi_ops->aes(dma_addr_hw_req, &status);
+
+ if (status) {
+ switch (status) {
+ case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
+ dev_err(dev, "ERROR: Gcm Tag mismatch\n");
+ break;
+ case ZYNQMP_AES_WRONG_KEY_SRC_ERR:
+ dev_err(dev, "ERROR: Wrong KeySrc, enable secure mode\n");
+ break;
+ case ZYNQMP_AES_PUF_NOT_PROGRAMMED:
+ dev_err(dev, "ERROR: PUF is not registered\n");
+ break;
+ default:
+ dev_err(dev, "ERROR: Unknown error\n");
+ break;
+ }
+ err = -status;
+ } else {
+ if (hwreq->op == ZYNQMP_AES_ENCRYPT)
+ data_size = data_size + ZYNQMP_AES_AUTH_SIZE;
+ else
+ data_size = data_size - ZYNQMP_AES_AUTH_SIZE;
+
+ sg_copy_from_buffer(req->dst, sg_nents(req->dst),
+ kbuf, data_size);
+ err = 0;
+ }
+
+ if (kbuf) {
+ memzero_explicit(kbuf, dma_size);
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ }
+ if (hwreq) {
+ memzero_explicit(hwreq, sizeof(struct zynqmp_aead_hw_req));
+ dma_free_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
+ hwreq, dma_addr_hw_req);
+ }
+ return err;
+}
+
+static int zynqmp_fallback_check(struct zynqmp_aead_tfm_ctx *tfm_ctx,
+ struct aead_request *req)
+{
+ int need_fallback = 0;
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ if (tfm_ctx->authsize != ZYNQMP_AES_AUTH_SIZE)
+ need_fallback = 1;
+
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY &&
+ tfm_ctx->keylen != ZYNQMP_AES_KEY_SIZE) {
+ need_fallback = 1;
+ }
+ if (req->assoclen != 0 ||
+ req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE) {
+ need_fallback = 1;
+ }
+ if ((req->cryptlen % ZYNQMP_AES_WORD_LEN) != 0)
+ need_fallback = 1;
+
+ if (rq_ctx->op == ZYNQMP_AES_DECRYPT &&
+ req->cryptlen <= ZYNQMP_AES_AUTH_SIZE) {
+ need_fallback = 1;
+ }
+ return need_fallback;
+}
+
+static int zynqmp_handle_aes_req(struct crypto_engine *engine,
+ void *req)
+{
+ struct aead_request *areq =
+ container_of(req, struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(areq);
+ struct aead_request *subreq = aead_request_ctx(req);
+ int need_fallback;
+ int err;
+
+ need_fallback = zynqmp_fallback_check(tfm_ctx, areq);
+
+ if (need_fallback) {
+ aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
+
+ aead_request_set_callback(subreq, areq->base.flags,
+ NULL, NULL);
+ aead_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ aead_request_set_ad(subreq, areq->assoclen);
+ if (rq_ctx->op == ZYNQMP_AES_ENCRYPT)
+ err = crypto_aead_encrypt(subreq);
+ else
+ err = crypto_aead_decrypt(subreq);
+ } else {
+ err = zynqmp_aes_aead_cipher(areq);
+ }
+
+ crypto_finalize_aead_request(engine, areq, err);
+ return 0;
+}
+
+static int zynqmp_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ unsigned char keysrc;
+
+ if (keylen == ZYNQMP_KEY_SRC_SEL_KEY_LEN) {
+ keysrc = *key;
+ if (keysrc == ZYNQMP_AES_KUP_KEY ||
+ keysrc == ZYNQMP_AES_DEV_KEY ||
+ keysrc == ZYNQMP_AES_PUF_KEY) {
+ tfm_ctx->keysrc = (enum zynqmp_aead_keysrc)keysrc;
+ } else {
+ tfm_ctx->keylen = keylen;
+ }
+ } else {
+ tfm_ctx->keylen = keylen;
+ if (keylen == ZYNQMP_AES_KEY_SIZE) {
+ tfm_ctx->keysrc = ZYNQMP_AES_KUP_KEY;
+ memcpy(tfm_ctx->key, key, keylen);
+ }
+ }
+
+ tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
+}
+
+static int zynqmp_aes_aead_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+
+ tfm_ctx->authsize = authsize;
+ return crypto_aead_setauthsize(tfm_ctx->fbk_cipher, authsize);
+}
+
+static int zynqmp_aes_aead_encrypt(struct aead_request *req)
+{
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ rq_ctx->op = ZYNQMP_AES_ENCRYPT;
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_aes_aead_decrypt(struct aead_request *req)
+{
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ rq_ctx->op = ZYNQMP_AES_DECRYPT;
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_aes_aead_init(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ tfm_ctx->dev = drv_ctx->dev;
+
+ tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req;
+ tfm_ctx->engine_ctx.op.prepare_request = NULL;
+ tfm_ctx->engine_ctx.op.unprepare_request = NULL;
+
+ tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name,
+ 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(tfm_ctx->fbk_cipher)) {
+ pr_err("%s() Error: failed to allocate fallback for %s\n",
+ __func__, drv_ctx->alg.aead.base.cra_name);
+ return PTR_ERR(tfm_ctx->fbk_cipher);
+ }
+
+ crypto_aead_set_reqsize(aead,
+ max(sizeof(struct zynqmp_aead_req_ctx),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(tfm_ctx->fbk_cipher)));
+ return 0;
+}
+
+static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+
+ if (tfm_ctx->fbk_cipher) {
+ crypto_free_aead(tfm_ctx->fbk_cipher);
+ tfm_ctx->fbk_cipher = NULL;
+ }
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_aead_tfm_ctx));
+}
+
+static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
+ .alg.aead = {
+ .setkey = zynqmp_aes_aead_setkey,
+ .setauthsize = zynqmp_aes_aead_setauthsize,
+ .encrypt = zynqmp_aes_aead_encrypt,
+ .decrypt = zynqmp_aes_aead_decrypt,
+ .init = zynqmp_aes_aead_init,
+ .exit = zynqmp_aes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = ZYNQMP_AES_AUTH_SIZE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "xilinx-zynqmp-aes-gcm",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = ZYNQMP_AES_BLK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+static int zynqmp_aes_aead_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+
+ /* ZynqMP AES driver supports only one instance */
+ if (!aes_drv_ctx.dev)
+ aes_drv_ctx.dev = dev;
+ else
+ return -ENODEV;
+
+ aes_drv_ctx.eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(aes_drv_ctx.eemi_ops)) {
+ dev_err(dev, "Failed to get ZynqMP EEMI interface\n");
+ return PTR_ERR(aes_drv_ctx.eemi_ops);
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ if (err < 0) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return err;
+ }
+
+ aes_drv_ctx.engine = crypto_engine_alloc_init(dev, 1);
+ if (!aes_drv_ctx.engine) {
+ dev_err(dev, "Cannot alloc AES engine\n");
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ err = crypto_engine_start(aes_drv_ctx.engine);
+ if (err) {
+ dev_err(dev, "Cannot start AES engine\n");
+ goto err_engine;
+ }
+
+ err = crypto_register_aead(&aes_drv_ctx.alg.aead);
+ if (err < 0) {
+ dev_err(dev, "Failed to register AEAD alg.\n");
+ goto err_aead;
+ }
+ return 0;
+
+err_aead:
+ crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+
+err_engine:
+ if (aes_drv_ctx.engine)
+ crypto_engine_exit(aes_drv_ctx.engine);
+
+ return err;
+}
+
+static int zynqmp_aes_aead_remove(struct platform_device *pdev)
+{
+ crypto_engine_exit(aes_drv_ctx.engine);
+ crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_aes_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
+
+static struct platform_driver zynqmp_aes_driver = {
+ .probe = zynqmp_aes_aead_probe,
+ .remove = zynqmp_aes_aead_remove,
+ .driver = {
+ .name = "zynqmp-aes",
+ .of_match_table = zynqmp_aes_dt_ids,
+ },
+};
+
+module_platform_driver(zynqmp_aes_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 46e46047a1f7..df238c8b6ef2 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -421,8 +421,10 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
* device outside of mmap of the resulting character device.
*/
dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
- if (!dax_dev)
+ if (IS_ERR(dax_dev)) {
+ rc = PTR_ERR(dax_dev);
goto err;
+ }
/* a device_dax instance is dead while the driver is not attached */
kill_dax(dax_dev);
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 3d0a7e702c94..1e678bdf5aed 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -22,6 +22,7 @@ int dev_dax_kmem_probe(struct device *dev)
resource_size_t kmem_size;
resource_size_t kmem_end;
struct resource *new_res;
+ const char *new_res_name;
int numa_node;
int rc;
@@ -48,11 +49,16 @@ int dev_dax_kmem_probe(struct device *dev)
kmem_size &= ~(memory_block_size_bytes() - 1);
kmem_end = kmem_start + kmem_size;
- /* Region is permanently reserved. Hot-remove not yet implemented. */
- new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev));
+ new_res_name = kstrdup(dev_name(dev), GFP_KERNEL);
+ if (!new_res_name)
+ return -ENOMEM;
+
+ /* Region is permanently reserved if hotremove fails. */
+ new_res = request_mem_region(kmem_start, kmem_size, new_res_name);
if (!new_res) {
dev_warn(dev, "could not reserve region [%pa-%pa]\n",
&kmem_start, &kmem_end);
+ kfree(new_res_name);
return -EBUSY;
}
@@ -63,12 +69,12 @@ int dev_dax_kmem_probe(struct device *dev)
* unknown to us that will break add_memory() below.
*/
new_res->flags = IORESOURCE_SYSTEM_RAM;
- new_res->name = dev_name(dev);
rc = add_memory(numa_node, new_res->start, resource_size(new_res));
if (rc) {
release_resource(new_res);
kfree(new_res);
+ kfree(new_res_name);
return rc;
}
dev_dax->dax_kmem_res = new_res;
@@ -83,6 +89,7 @@ static int dev_dax_kmem_remove(struct device *dev)
struct resource *res = dev_dax->dax_kmem_res;
resource_size_t kmem_start = res->start;
resource_size_t kmem_size = resource_size(res);
+ const char *res_name = res->name;
int rc;
/*
@@ -102,6 +109,7 @@ static int dev_dax_kmem_remove(struct device *dev)
/* Release and free dax resources */
release_resource(res);
kfree(res);
+ kfree(res_name);
dev_dax->dax_kmem_res = NULL;
return 0;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0aa4b6bc5101..8e32345be0f7 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -344,6 +344,23 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
}
EXPORT_SYMBOL_GPL(dax_copy_to_iter);
+int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ if (!dax_alive(dax_dev))
+ return -ENXIO;
+ /*
+ * There are no callers that want to zero more than one page as of now.
+ * Once users are there, this check can be removed after the
+ * device mapper code has been updated to split ranges across targets.
+ */
+ if (nr_pages != 1)
+ return -EIO;
+
+ return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
+}
+EXPORT_SYMBOL_GPL(dax_zero_page_range);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -551,9 +568,16 @@ struct dax_device *alloc_dax(void *private, const char *__host,
dev_t devt;
int minor;
+ if (ops && !ops->zero_page_range) {
+ pr_debug("%s: error: device does not provide dax"
+ " operation zero_page_range()\n",
+ __host ? __host : "Unknown");
+ return ERR_PTR(-EINVAL);
+ }
+
host = kstrdup(__host, GFP_KERNEL);
if (__host && !host)
- return NULL;
+ return ERR_PTR(-ENOMEM);
minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
if (minor < 0)
@@ -576,7 +600,7 @@ struct dax_device *alloc_dax(void *private, const char *__host,
ida_simple_remove(&dax_minor_ida, minor);
err_minor:
kfree(host);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7dcf2093e531..6fecd11dafdd 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -550,14 +550,14 @@ out:
EXPORT_SYMBOL(devfreq_monitor_resume);
/**
- * devfreq_interval_update() - Update device devfreq monitoring interval
+ * devfreq_update_interval() - Update device devfreq monitoring interval
* @devfreq: the devfreq instance.
* @delay: new polling interval to be set.
*
* Helper function to set new load monitoring polling interval. Function
- * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
*/
-void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
{
unsigned int cur_delay = devfreq->profile->polling_ms;
unsigned int new_delay = *delay;
@@ -597,7 +597,7 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
out:
mutex_unlock(&devfreq->lock);
}
-EXPORT_SYMBOL(devfreq_interval_update);
+EXPORT_SYMBOL(devfreq_update_interval);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
@@ -705,13 +705,13 @@ static void devfreq_dev_release(struct device *dev)
if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
- if (err)
+ if (err < 0)
dev_warn(dev->parent,
"Failed to remove max_freq request: %d\n", err);
}
if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
- if (err)
+ if (err < 0)
dev_warn(dev->parent,
"Failed to remove min_freq request: %d\n", err);
}
@@ -1424,7 +1424,7 @@ static ssize_t polling_interval_store(struct device *dev,
if (ret != 1)
return -EINVAL;
- df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
+ df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
ret = count;
return ret;
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index dc7533ccc3db..ae4d0cc18359 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -18,7 +18,7 @@
/* Devfreq events */
#define DEVFREQ_GOV_START 0x1
#define DEVFREQ_GOV_STOP 0x2
-#define DEVFREQ_GOV_INTERVAL 0x3
+#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
#define DEVFREQ_GOV_SUSPEND 0x4
#define DEVFREQ_GOV_RESUME 0x5
@@ -30,7 +30,7 @@
* @node: list node - contains registered devfreq governors
* @name: Governor's name
* @immutable: Immutable flag for governor. If the value is 1,
- * this govenror is never changeable to other governor.
+ * this governor is never changeable to other governor.
* @interrupt_driven: Devfreq core won't schedule polling work for this
* governor if value is set to 1.
* @get_target_freq: Returns desired operating frequency for the device.
@@ -57,17 +57,16 @@ struct devfreq_governor {
unsigned int event, void *data);
};
-extern void devfreq_monitor_start(struct devfreq *devfreq);
-extern void devfreq_monitor_stop(struct devfreq *devfreq);
-extern void devfreq_monitor_suspend(struct devfreq *devfreq);
-extern void devfreq_monitor_resume(struct devfreq *devfreq);
-extern void devfreq_interval_update(struct devfreq *devfreq,
- unsigned int *delay);
+void devfreq_monitor_start(struct devfreq *devfreq);
+void devfreq_monitor_stop(struct devfreq *devfreq);
+void devfreq_monitor_suspend(struct devfreq *devfreq);
+void devfreq_monitor_resume(struct devfreq *devfreq);
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
-extern int devfreq_add_governor(struct devfreq_governor *governor);
-extern int devfreq_remove_governor(struct devfreq_governor *governor);
+int devfreq_add_governor(struct devfreq_governor *governor);
+int devfreq_remove_governor(struct devfreq_governor *governor);
-extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
static inline int devfreq_update_stats(struct devfreq *df)
{
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 3d809f228619..1b314e1df028 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -96,8 +96,8 @@ static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
devfreq_monitor_stop(devfreq);
break;
- case DEVFREQ_GOV_INTERVAL:
- devfreq_interval_update(devfreq, (unsigned int *)data);
+ case DEVFREQ_GOV_UPDATE_INTERVAL:
+ devfreq_update_interval(devfreq, (unsigned int *)data);
break;
case DEVFREQ_GOV_SUSPEND:
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index af94942fcf95..0fd6c4851071 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -131,7 +131,7 @@ static int devfreq_userspace_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_userspace = {
- .name = "userspace",
+ .name = DEVFREQ_GOV_USERSPACE,
.get_target_freq = devfreq_userspace_func,
.event_handler = devfreq_userspace_handler,
};
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 0b65f89d74d5..28b2c7ca416e 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -734,7 +734,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
devfreq_monitor_stop(devfreq);
break;
- case DEVFREQ_GOV_INTERVAL:
+ case DEVFREQ_GOV_UPDATE_INTERVAL:
/*
* ACTMON hardware supports up to 256 milliseconds for the
* sampling period.
@@ -745,7 +745,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
}
tegra_actmon_pause(tegra);
- devfreq_interval_update(devfreq, new_delay);
+ devfreq_update_interval(devfreq, new_delay);
ret = tegra_actmon_resume(tegra);
break;
diff --git a/drivers/dio/dio-driver.c b/drivers/dio/dio-driver.c
index a7b174ef4c85..69c46935ffc7 100644
--- a/drivers/dio/dio-driver.c
+++ b/drivers/dio/dio-driver.c
@@ -25,7 +25,7 @@
* dio_device_id structure or %NULL if there is no match.
*/
-const struct dio_device_id *
+static const struct dio_device_id *
dio_match_device(const struct dio_device_id *ids,
const struct dio_dev *d)
{
@@ -105,9 +105,9 @@ void dio_unregister_driver(struct dio_driver *drv)
* @dev: the DIO device structure to match against
* @drv: the &device_driver that points to the array of DIO device id structures to search
*
- * Used by a driver to check whether a DIO device present in the
- * system is in its list of supported devices. Returns the matching
- * dio_device_id structure or %NULL if there is no match.
+ * Used by the driver core to check whether a DIO device present in the
+ * system is in a driver's list of supported devices. Returns 1 if supported,
+ * and 0 if there is no match.
*/
static int dio_bus_match(struct device *dev, struct device_driver *drv)
@@ -137,7 +137,6 @@ static int __init dio_driver_init(void)
postcore_initcall(dio_driver_init);
-EXPORT_SYMBOL(dio_match_device);
EXPORT_SYMBOL(dio_register_driver);
EXPORT_SYMBOL(dio_unregister_driver);
EXPORT_SYMBOL(dio_bus_type);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 0613bb7770f5..9626673f1d83 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -6,7 +6,7 @@ config SYNC_FILE
default n
select DMA_SHARED_BUFFER
---help---
- The Sync File Framework adds explicit syncronization via
+ The Sync File Framework adds explicit synchronization via
userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
@@ -39,6 +39,17 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
+config DMABUF_MOVE_NOTIFY
+ bool "Move notify between drivers (EXPERIMENTAL)"
+ default n
+ help
+ Don't pin buffers if the dynamic DMA-buf interface is available on
+ both the exporter as well as the importer. This fixes a security
+ problem where userspace is able to pin unrestricted amounts of memory
+ through DMA-buf.
+ This is marked experimental because we don't yet have a consistent
+ execution context and memory management between drivers.
+
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index c343c7c10b4c..07df88f2e305 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
return ret;
- case DMA_BUF_SET_NAME:
+ case DMA_BUF_SET_NAME_A:
+ case DMA_BUF_SET_NAME_B:
return dma_buf_set_name(dmabuf, (const char __user *)arg);
default:
@@ -525,7 +526,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
}
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- exp_info->ops->dynamic_mapping))
+ (exp_info->ops->pin || exp_info->ops->unpin)))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
if (!try_module_get(exp_info->owner))
@@ -652,7 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* calls attach() of dma_buf_ops to allow device-specific attach functionality
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
- * @dynamic_mapping: [in] calling convention for map/unmap
+ * @importer_ops: [in] importer operations for the attachment
+ * @importer_priv: [in] importer private pointer for the attachment
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -668,7 +673,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
*/
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
- bool dynamic_mapping)
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv)
{
struct dma_buf_attachment *attach;
int ret;
@@ -676,13 +682,17 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(importer_ops && !importer_ops->move_notify))
+ return ERR_PTR(-EINVAL);
+
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return ERR_PTR(-ENOMEM);
attach->dev = dev;
attach->dmabuf = dmabuf;
- attach->dynamic_mapping = dynamic_mapping;
+ attach->importer_ops = importer_ops;
+ attach->importer_priv = importer_priv;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
@@ -701,15 +711,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_unlock;
+ }
sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
- goto err_unlock;
+ goto err_unpin;
}
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -723,6 +737,10 @@ err_attach:
kfree(attach);
return ERR_PTR(ret);
+err_unpin:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_buf_unpin(attach);
+
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -743,7 +761,7 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
{
- return dma_buf_dynamic_attach(dmabuf, dev, false);
+ return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
@@ -766,8 +784,10 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
+ dma_buf_unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
+ }
}
dma_resv_lock(dmabuf->resv, NULL);
@@ -781,6 +801,44 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
EXPORT_SYMBOL_GPL(dma_buf_detach);
/**
+ * dma_buf_pin - Lock down the DMA-buf
+ *
+ * @attach: [in] attachment which should be pinned
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+ int ret = 0;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->pin)
+ ret = dmabuf->ops->pin(attach);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_pin);
+
+/**
+ * dma_buf_unpin - Remove lock from DMA-buf
+ *
+ * @attach: [in] attachment which should be unpinned
+ */
+void dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->unpin)
+ dmabuf->ops->unpin(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_unpin);
+
+/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
* dma_buf_ops.
@@ -799,6 +857,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ int r;
might_sleep();
@@ -820,13 +879,23 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ r = dma_buf_pin(attach);
+ if (r)
+ return ERR_PTR(r);
+ }
+ }
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
+
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
attach->dir = direction;
@@ -865,10 +934,34 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+
+ if (dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
/**
+ * dma_buf_move_notify - notify attachments that DMA-buf is moving
+ *
+ * @dmabuf: [in] buffer which is moving
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_move_notify(struct dma_buf *dmabuf)
+{
+ struct dma_buf_attachment *attach;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (attach->importer_ops)
+ attach->importer_ops->move_notify(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_move_notify);
+
+/**
* DOC: cpu access
*
* There are mutliple reasons for supporting CPU access to a dma buffer object:
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5142da401db3..023db6883d05 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -241,7 +241,8 @@ config FSL_RAID
config HISI_DMA
tristate "HiSilicon DMA Engine support"
- depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+ depends on ARM64 || COMPILE_TEST
+ depends on PCI_MSI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -616,8 +617,8 @@ config TXX9_DMAC
integrated in chips such as the Toshiba TX4927/38/39.
config TEGRA20_APB_DMA
- bool "NVIDIA Tegra20 APB DMA support"
- depends on ARCH_TEGRA
+ tristate "NVIDIA Tegra20 APB DMA support"
+ depends on ARCH_TEGRA || COMPILE_TEST
select DMA_ENGINE
help
Support for the NVIDIA Tegra20 APB DMA controller driver. The
@@ -658,6 +659,17 @@ config UNIPHIER_MDMAC
UniPhier platform. This DMA controller is used as the external
DMA engine of the SD/eMMC controllers of the LD4, Pro4, sLD8 SoCs.
+config UNIPHIER_XDMAC
+ tristate "UniPhier XDMAC support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the XDMAC (external DMA controller) on the
+ UniPhier platform. This DMA controller can transfer data from
+ memory to memory, memory to peripheral and peripheral to memory.
+
config XGENE_DMA
tristate "APM X-Gene DMA support"
depends on ARCH_XGENE || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 1d908394fbea..e60f81331d4c 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
+obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 672c73b4a2d4..73a20780744b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -146,17 +146,8 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
"scanned %u descriptors on freelist\n", i);
/* no more descriptor available in initial pool: create one more */
- if (!ret) {
- ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
- if (ret) {
- spin_lock_irqsave(&atchan->lock, flags);
- atchan->descs_allocated++;
- spin_unlock_irqrestore(&atchan->lock, flags);
- } else {
- dev_err(chan2dev(&atchan->chan_common),
- "not enough descriptors available\n");
- }
- }
+ if (!ret)
+ ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
return ret;
}
@@ -435,17 +426,19 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
* atc_chain_complete - finish work for one transaction chain
* @atchan: channel we work on
* @desc: descriptor at the head of the chain we want do complete
- *
- * Called with atchan->lock held and bh disabled */
+ */
static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
+ unsigned long flags;
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
+ spin_lock_irqsave(&atchan->lock, flags);
+
/* mark the descriptor as complete for non cyclic cases only */
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
@@ -462,16 +455,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
dma_descriptor_unmap(txd);
/* for cyclic transfers,
* no need to replay callback function while stopping */
- if (!atc_chan_is_cyclic(atchan)) {
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
+ if (!atc_chan_is_cyclic(atchan))
dmaengine_desc_get_callback_invoke(txd, NULL);
- }
dma_run_dependencies(txd);
}
@@ -489,9 +479,12 @@ static void atc_complete_all(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
LIST_HEAD(list);
+ unsigned long flags;
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
+ spin_lock_irqsave(&atchan->lock, flags);
+
/*
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
@@ -503,6 +496,8 @@ static void atc_complete_all(struct at_dma_chan *atchan)
/* empty queue list by moving descriptors (if any) to active_list */
list_splice_init(&atchan->queue, &atchan->active_list);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
}
@@ -510,38 +505,44 @@ static void atc_complete_all(struct at_dma_chan *atchan)
/**
* atc_advance_work - at the end of a transaction, move forward
* @atchan: channel where the transaction ended
- *
- * Called with atchan->lock held and bh disabled
*/
static void atc_advance_work(struct at_dma_chan *atchan)
{
+ unsigned long flags;
+ int ret;
+
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
- if (atc_chan_is_enabled(atchan))
+ spin_lock_irqsave(&atchan->lock, flags);
+ ret = atc_chan_is_enabled(atchan);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+ if (ret)
return;
if (list_empty(&atchan->active_list) ||
- list_is_singular(&atchan->active_list)) {
- atc_complete_all(atchan);
- } else {
- atc_chain_complete(atchan, atc_first_active(atchan));
- /* advance work */
- atc_dostart(atchan, atc_first_active(atchan));
- }
+ list_is_singular(&atchan->active_list))
+ return atc_complete_all(atchan);
+
+ atc_chain_complete(atchan, atc_first_active(atchan));
+
+ /* advance work */
+ spin_lock_irqsave(&atchan->lock, flags);
+ atc_dostart(atchan, atc_first_active(atchan));
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
* atc_handle_error - handle errors reported by DMA controller
* @atchan: channel where error occurs
- *
- * Called with atchan->lock held and bh disabled
*/
static void atc_handle_error(struct at_dma_chan *atchan)
{
struct at_desc *bad_desc;
struct at_desc *child;
+ unsigned long flags;
+ spin_lock_irqsave(&atchan->lock, flags);
/*
* The descriptor currently at the head of the active list is
* broked. Since we don't have any way to report errors, we'll
@@ -573,6 +574,8 @@ static void atc_handle_error(struct at_dma_chan *atchan)
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
atc_dump_lli(atchan, &child->lli);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
/* Pretend the descriptor completed successfully */
atc_chain_complete(atchan, bad_desc);
}
@@ -580,8 +583,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
/**
* atc_handle_cyclic - at the end of a period, run callback function
* @atchan: channel used for cyclic operations
- *
- * Called with atchan->lock held and bh disabled
*/
static void atc_handle_cyclic(struct at_dma_chan *atchan)
{
@@ -600,17 +601,14 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
- unsigned long flags;
- spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
- atc_handle_error(atchan);
- else if (atc_chan_is_cyclic(atchan))
- atc_handle_cyclic(atchan);
- else
- atc_advance_work(atchan);
+ return atc_handle_error(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
+ if (atc_chan_is_cyclic(atchan))
+ return atc_handle_cyclic(atchan);
+
+ atc_advance_work(atchan);
}
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -940,7 +938,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return NULL;
}
- vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
if (!vaddr) {
dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
__func__);
@@ -998,7 +996,7 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
return NULL;
}
- vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
if (!vaddr) {
dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
__func__);
@@ -1446,6 +1444,8 @@ static int atc_terminate_all(struct dma_chan *chan)
list_splice_init(&atchan->queue, &list);
list_splice_init(&atchan->active_list, &list);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
@@ -1454,8 +1454,6 @@ static int atc_terminate_all(struct dma_chan *chan)
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
- spin_unlock_irqrestore(&atchan->lock, flags);
-
return 0;
}
@@ -1516,7 +1514,6 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
@@ -1524,15 +1521,12 @@ static void atc_issue_pending(struct dma_chan *chan)
if (atc_chan_is_cyclic(atchan))
return;
- spin_lock_irqsave(&atchan->lock, flags);
atc_advance_work(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
- * @client: current client requesting the channel be ready for requests
*
* return - the number of allocated descriptors
*/
@@ -1542,10 +1536,8 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc;
struct at_dma_slave *atslave;
- unsigned long flags;
int i;
u32 cfg;
- LIST_HEAD(tmp_list);
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -1555,6 +1547,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
return -EIO;
}
+ if (!list_empty(&atchan->free_list)) {
+ dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
+ return -EIO;
+ }
+
cfg = ATC_DEFAULT_CFG;
atslave = chan->private;
@@ -1570,11 +1567,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
cfg = atslave->cfg;
}
- /* have we already been set up?
- * reconfigure channel but no need to reallocate descriptors */
- if (!list_empty(&atchan->free_list))
- return atchan->descs_allocated;
-
/* Allocate initial pool of descriptors */
for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = atc_alloc_descriptor(chan, GFP_KERNEL);
@@ -1583,23 +1575,18 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
"Only %d initial descriptors\n", i);
break;
}
- list_add_tail(&desc->desc_node, &tmp_list);
+ list_add_tail(&desc->desc_node, &atchan->free_list);
}
- spin_lock_irqsave(&atchan->lock, flags);
- atchan->descs_allocated = i;
- list_splice(&tmp_list, &atchan->free_list);
dma_cookie_init(chan);
- spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
channel_writel(atchan, CFG, cfg);
dev_dbg(chan2dev(chan),
- "alloc_chan_resources: allocated %d descriptors\n",
- atchan->descs_allocated);
+ "alloc_chan_resources: allocated %d descriptors\n", i);
- return atchan->descs_allocated;
+ return i;
}
/**
@@ -1613,9 +1600,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
struct at_desc *desc, *_desc;
LIST_HEAD(list);
- dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
- atchan->descs_allocated);
-
/* ASSERT: channel is idle */
BUG_ON(!list_empty(&atchan->active_list));
BUG_ON(!list_empty(&atchan->queue));
@@ -1628,7 +1612,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
}
list_splice_init(&atchan->free_list, &list);
- atchan->descs_allocated = 0;
atchan->status = 0;
/*
@@ -1671,7 +1654,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
+ atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
if (!atslave)
return NULL;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index fe8a5853ec49..397692e937b3 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -243,7 +243,6 @@ enum atc_status {
* @active_list: list of descriptors dmaengine is being running on
* @queue: list of descriptors ready to be submitted to engine
* @free_list: list of descriptors usable by the channel
- * @descs_allocated: records the actual size of the descriptor pool
*/
struct at_dma_chan {
struct dma_chan chan_common;
@@ -264,7 +263,6 @@ struct at_dma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
- unsigned int descs_allocated;
};
#define channel_readl(atchan, name) \
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index f71c9f77d405..bb0eaf38b594 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{
struct at_xdmac_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&atchan->lock, flags);
/*
* If channel is enabled, do nothing, advance_work will be triggered
@@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
if (!desc->active_xfer)
at_xdmac_start_xfer(atchan, desc);
}
-
- spin_unlock_irqrestore(&atchan->lock, flags);
}
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_bh(&atchan->lock);
+ spin_lock_irq(&atchan->lock);
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
@@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
struct at_xdmac_desc,
xfer_node);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irq(&atchan->lock);
/* Print bad descriptor's details if needed */
dev_dbg(chan2dev(&atchan->chan),
@@ -1640,31 +1635,31 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan);
- spin_lock(&atchan->lock);
+ spin_lock_irq(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
if (!desc->active_xfer) {
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
- spin_unlock(&atchan->lock);
+ spin_unlock_irq(&atchan->lock);
return;
}
txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc);
- spin_unlock(&atchan->lock);
+ spin_unlock_irq(&atchan->lock);
- if (!at_xdmac_chan_is_cyclic(atchan)) {
- dma_cookie_complete(txd);
- if (txd->flags & DMA_PREP_INTERRUPT)
- dmaengine_desc_get_callback_invoke(txd, NULL);
- }
+ dma_cookie_complete(txd);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
+ spin_lock_irq(&atchan->lock);
at_xdmac_advance_work(atchan);
+ spin_unlock_irq(&atchan->lock);
}
}
@@ -1725,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
static void at_xdmac_issue_pending(struct dma_chan *chan)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ unsigned long flags;
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
- if (!at_xdmac_chan_is_cyclic(atchan))
+ if (!at_xdmac_chan_is_cyclic(atchan)) {
+ spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_advance_work(atchan);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+ }
return;
}
@@ -1822,26 +1821,21 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac_desc *desc;
int i;
- unsigned long flags;
-
- spin_lock_irqsave(&atchan->lock, flags);
if (at_xdmac_chan_is_enabled(atchan)) {
dev_err(chan2dev(chan),
"can't allocate channel resources (channel enabled)\n");
- i = -EIO;
- goto spin_unlock;
+ return -EIO;
}
if (!list_empty(&atchan->free_descs_list)) {
dev_err(chan2dev(chan),
"can't allocate channel resources (channel not free from a previous use)\n");
- i = -EIO;
- goto spin_unlock;
+ return -EIO;
}
for (i = 0; i < init_nr_desc_per_channel; i++) {
- desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
+ desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
if (!desc) {
dev_warn(chan2dev(chan),
"only %d descriptors have been allocated\n", i);
@@ -1854,8 +1848,6 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
-spin_unlock:
- spin_unlock_irqrestore(&atchan->lock, flags);
return i;
}
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 275e90fa829d..64239da02e74 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -120,7 +120,7 @@ struct sba_request {
struct brcm_message msg;
struct dma_async_tx_descriptor tx;
/* SBA commands */
- struct brcm_sba_command cmds[0];
+ struct brcm_sba_command cmds[];
};
enum sba_version {
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 17909fd1820f..d31076d9ef25 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -58,6 +58,87 @@ static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
+/* --- debugfs implementation --- */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *rootdir;
+
+static void dmaengine_debug_register(struct dma_device *dma_dev)
+{
+ dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
+ rootdir);
+ if (IS_ERR(dma_dev->dbg_dev_root))
+ dma_dev->dbg_dev_root = NULL;
+}
+
+static void dmaengine_debug_unregister(struct dma_device *dma_dev)
+{
+ debugfs_remove_recursive(dma_dev->dbg_dev_root);
+ dma_dev->dbg_dev_root = NULL;
+}
+
+static void dmaengine_dbg_summary_show(struct seq_file *s,
+ struct dma_device *dma_dev)
+{
+ struct dma_chan *chan;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count) {
+ seq_printf(s, " %-13s| %s", dma_chan_name(chan),
+ chan->dbg_client_name ?: "in-use");
+
+ if (chan->router)
+ seq_printf(s, " (via router: %s)\n",
+ dev_name(chan->router->dev));
+ else
+ seq_puts(s, "\n");
+ }
+ }
+}
+
+static int dmaengine_summary_show(struct seq_file *s, void *data)
+{
+ struct dma_device *dma_dev = NULL;
+
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry(dma_dev, &dma_device_list, global_node) {
+ seq_printf(s, "dma%d (%s): number of channels: %u\n",
+ dma_dev->dev_id, dev_name(dma_dev->dev),
+ dma_dev->chancnt);
+
+ if (dma_dev->dbg_summary_show)
+ dma_dev->dbg_summary_show(s, dma_dev);
+ else
+ dmaengine_dbg_summary_show(s, dma_dev);
+
+ if (!list_is_last(&dma_dev->global_node, &dma_device_list))
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
+
+static void __init dmaengine_debugfs_init(void)
+{
+ rootdir = debugfs_create_dir("dmaengine", NULL);
+
+ /* /sys/kernel/debug/dmaengine/summary */
+ debugfs_create_file("summary", 0444, rootdir, NULL,
+ &dmaengine_summary_fops);
+}
+#else
+static inline void dmaengine_debugfs_init(void) { }
+static inline int dmaengine_debug_register(struct dma_device *dma_dev)
+{
+ return 0;
+}
+
+static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
+#endif /* DEBUG_FS */
+
/* --- sysfs implementation --- */
#define DMA_SLAVE_NAME "slave"
@@ -151,10 +232,6 @@ static void chan_dev_release(struct device *dev)
struct dma_chan_dev *chan_dev;
chan_dev = container_of(dev, typeof(*chan_dev), device);
- if (atomic_dec_and_test(chan_dev->idr_ref)) {
- ida_free(&dma_ida, chan_dev->dev_id);
- kfree(chan_dev->idr_ref);
- }
kfree(chan_dev);
}
@@ -760,6 +837,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
found:
+#ifdef CONFIG_DEBUG_FS
+ chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
+ name);
+#endif
+
chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
if (!chan->name)
return chan;
@@ -837,6 +919,11 @@ void dma_release_channel(struct dma_chan *chan)
chan->name = NULL;
chan->slave = NULL;
}
+
+#ifdef CONFIG_DEBUG_FS
+ kfree(chan->dbg_client_name);
+ chan->dbg_client_name = NULL;
+#endif
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -952,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
}
static int __dma_async_device_channel_register(struct dma_device *device,
- struct dma_chan *chan,
- int chan_id)
+ struct dma_chan *chan)
{
int rc = 0;
- int chancnt = device->chancnt;
- atomic_t *idr_ref;
- struct dma_chan *tchan;
-
- tchan = list_first_entry_or_null(&device->channels,
- struct dma_chan, device_node);
- if (!tchan)
- return -ENODEV;
-
- if (tchan->dev) {
- idr_ref = tchan->dev->idr_ref;
- } else {
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
- if (!idr_ref)
- return -ENOMEM;
- atomic_set(idr_ref, 0);
- }
chan->local = alloc_percpu(typeof(*chan->local));
if (!chan->local)
@@ -988,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
* When the chan_id is a negative value, we are dynamically adding
* the channel. Otherwise we are static enumerating.
*/
- chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+ mutex_lock(&device->chan_mutex);
+ chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
+ mutex_unlock(&device->chan_mutex);
+ if (chan->chan_id < 0) {
+ pr_err("%s: unable to alloc ida for chan: %d\n",
+ __func__, chan->chan_id);
+ goto err_out;
+ }
+
chan->dev->device.class = &dma_devclass;
chan->dev->device.parent = device->dev;
chan->dev->chan = chan;
- chan->dev->idr_ref = idr_ref;
chan->dev->dev_id = device->dev_id;
- atomic_inc(idr_ref);
dev_set_name(&chan->dev->device, "dma%dchan%d",
device->dev_id, chan->chan_id);
-
rc = device_register(&chan->dev->device);
if (rc)
- goto err_out;
+ goto err_out_ida;
chan->client_count = 0;
- device->chancnt = chan->chan_id + 1;
+ device->chancnt++;
return 0;
+ err_out_ida:
+ mutex_lock(&device->chan_mutex);
+ ida_free(&device->chan_ida, chan->chan_id);
+ mutex_unlock(&device->chan_mutex);
err_out:
free_percpu(chan->local);
kfree(chan->dev);
- if (atomic_dec_return(idr_ref) == 0)
- kfree(idr_ref);
return rc;
}
@@ -1019,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
{
int rc;
- rc = __dma_async_device_channel_register(device, chan, -1);
+ rc = __dma_async_device_channel_register(device, chan);
if (rc < 0)
return rc;
@@ -1039,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
+ mutex_lock(&device->chan_mutex);
+ ida_free(&device->chan_ida, chan->chan_id);
+ mutex_unlock(&device->chan_mutex);
device_unregister(&chan->dev->device);
free_percpu(chan->local);
}
@@ -1061,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
*/
int dma_async_device_register(struct dma_device *device)
{
- int rc, i = 0;
+ int rc;
struct dma_chan* chan;
if (!device)
@@ -1166,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
if (rc != 0)
return rc;
+ mutex_init(&device->chan_mutex);
+ ida_init(&device->chan_ida);
+
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
- rc = __dma_async_device_channel_register(device, chan, i++);
+ rc = __dma_async_device_channel_register(device, chan);
if (rc < 0)
goto err_out;
}
@@ -1196,6 +1278,8 @@ int dma_async_device_register(struct dma_device *device)
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
+ dmaengine_debug_register(device);
+
return 0;
err_out:
@@ -1229,6 +1313,8 @@ void dma_async_device_unregister(struct dma_device *device)
{
struct dma_chan *chan, *n;
+ dmaengine_debug_unregister(device);
+
list_for_each_entry_safe(chan, n, &device->channels, device_node)
__dma_async_device_channel_unregister(device, chan);
@@ -1239,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
*/
dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance();
+ ida_free(&dma_ida, device->dev_id);
dma_device_put(device);
mutex_unlock(&dma_list_mutex);
}
@@ -1559,6 +1646,11 @@ static int __init dma_bus_init(void)
if (err)
return err;
- return class_register(&dma_devclass);
+
+ err = class_register(&dma_devclass);
+ if (!err)
+ dmaengine_debugfs_init();
+
+ return err;
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index e8a320c9e57c..1bfbd64b1371 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -182,4 +182,20 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
+ return dma_dev->dbg_dev_root;
+}
+#else
+struct dentry;
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
#endif
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a2cadfa2e6d7..0425984db118 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
struct dmatest_thread *thread;
list_for_each_entry(thread, &dtc->threads, node) {
- if (!thread->done)
+ if (!thread->done && !thread->pending)
return true;
}
}
@@ -662,8 +662,8 @@ static int dmatest_func(void *data)
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
- while (!kthread_should_stop()
- && !(params->iterations && total_tests >= params->iterations)) {
+ while (!(kthread_should_stop() ||
+ (params->iterations && total_tests >= params->iterations))) {
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
@@ -1166,10 +1166,11 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
mutex_unlock(&info->lock);
return ret;
} else if (dmatest_run) {
- if (is_threaded_test_pending(info))
- start_threaded_tests(info);
- else
- pr_info("Could not start test, no channels configured\n");
+ if (!is_threaded_test_pending(info)) {
+ pr_info("No channels configured, continue with any\n");
+ add_threaded_test(info);
+ }
+ start_threaded_tests(info);
} else {
stop_threaded_test(info);
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index c70a7965f140..4ec909e0b810 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -790,6 +790,20 @@ static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
return 0;
}
+static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_priv *priv;
+ struct device *dev;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ dpaa2_dpdmai_dpio_unbind(priv);
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
@@ -805,6 +819,7 @@ static struct fsl_mc_driver dpaa2_qdma_driver = {
},
.probe = dpaa2_qdma_probe,
.remove = dpaa2_qdma_remove,
+ .shutdown = dpaa2_qdma_shutdown,
.match_id_table = dpaa2_qdma_id_table
};
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index f8d22115154a..878662aaa1c2 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -160,6 +160,27 @@ int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
}
/**
+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_destroy);
+
+/**
* dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
index 6d785093da8e..b13b9bf0c003 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
@@ -18,6 +18,7 @@
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
+#define DPDMAI_CMDID_DESTROY DPDMAI_CMDID_FORMAT(0x900)
#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
@@ -160,6 +161,7 @@ struct dpdmai_rx_queue_attr {
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token);
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
const struct dpdmai_cfg *cfg, u16 *token);
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 989b7a25ca61..ff49847e37a8 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -74,12 +74,10 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_device *idxd;
struct idxd_wq *wq;
struct device *dev;
- struct idxd_cdev *idxd_cdev;
wq = inode_wq(inode);
idxd = wq->idxd;
dev = &idxd->pdev->dev;
- idxd_cdev = &wq->idxd_cdev;
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
@@ -139,6 +137,8 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
dev_dbg(&pdev->dev, "%s called\n", __func__);
rc = check_vma(wq, vma, __func__);
+ if (rc < 0)
+ return rc;
vma->vm_flags |= VM_DONTCOPY;
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ada69e722f84..8d79a8787104 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -62,6 +62,13 @@ int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
perm.ignore = 0;
iowrite32(perm.bits, idxd->reg_base + offset);
+ /*
+ * A readback from the device ensures that any previously generated
+ * completion record writes are visible to software based on PCI
+ * ordering rules.
+ */
+ perm.bits = ioread32(idxd->reg_base + offset);
+
return 0;
}
@@ -584,11 +591,11 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
struct idxd_group *group = &idxd->groups[i];
if (group->tc_a == -1)
- group->grpcfg.flags.tc_a = 0;
+ group->tc_a = group->grpcfg.flags.tc_a = 0;
else
group->grpcfg.flags.tc_a = group->tc_a;
if (group->tc_b == -1)
- group->grpcfg.flags.tc_b = 1;
+ group->tc_b = group->grpcfg.flags.tc_b = 1;
else
group->grpcfg.flags.tc_b = group->tc_b;
group->grpcfg.flags.use_token_limit = group->use_token_limit;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index d6fcd2e60103..6510791b9921 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -173,6 +173,7 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
struct llist_node *head;
int queued = 0;
+ *processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
return 0;
@@ -197,6 +198,7 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
struct list_head *node, *next;
int queued = 0;
+ *processed = 0;
if (list_empty(&irq_entry->work_list))
return 0;
@@ -218,10 +220,9 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
return queued;
}
-irqreturn_t idxd_wq_thread(int irq, void *data)
+static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
{
- struct idxd_irq_entry *irq_entry = data;
- int rc, processed = 0, retry = 0;
+ int rc, processed, total = 0;
/*
* There are two lists we are processing. The pending_llist is where
@@ -244,15 +245,26 @@ irqreturn_t idxd_wq_thread(int irq, void *data)
*/
do {
rc = irq_process_work_list(irq_entry, &processed);
- if (rc != 0) {
- retry++;
+ total += processed;
+ if (rc != 0)
continue;
- }
rc = irq_process_pending_llist(irq_entry, &processed);
- } while (rc != 0 && retry != 10);
+ total += processed;
+ } while (rc != 0);
+
+ return total;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ int processed;
+ processed = idxd_desc_process(irq_entry);
idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+ /* catch anything unprocessed after unmasking */
+ processed += idxd_desc_process(irq_entry);
if (processed == 0)
return IRQ_NONE;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 6ca6e520a2fa..3999827970ab 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -419,7 +419,7 @@ static ssize_t engine_group_id_store(struct device *dev,
struct idxd_device *idxd = engine->idxd;
long id;
int rc;
- struct idxd_group *prevg, *group;
+ struct idxd_group *prevg;
rc = kstrtol(buf, 10, &id);
if (rc < 0)
@@ -439,7 +439,6 @@ static ssize_t engine_group_id_store(struct device *dev,
return count;
}
- group = &idxd->groups[id];
prevg = engine->group;
if (prevg)
@@ -513,9 +512,6 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->token_limit == 0)
- return -EPERM;
-
if (val > idxd->max_tokens)
return -EINVAL;
@@ -561,8 +557,6 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->token_limit == 0)
- return -EPERM;
if (val < 4 * group->num_engines ||
val > group->tokens_reserved + idxd->nr_tokens)
return -EINVAL;
@@ -1180,6 +1174,16 @@ static ssize_t op_cap_show(struct device *dev,
}
static DEVICE_ATTR_RO(op_cap);
+static ssize_t gen_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
+}
+static DEVICE_ATTR_RO(gen_cap);
+
static ssize_t configurable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1317,6 +1321,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_max_batch_size.attr,
&dev_attr_max_transfer_size.attr,
&dev_attr_op_cap.attr,
+ &dev_attr_gen_cap.attr,
&dev_attr_configurable.attr,
&dev_attr_clients.attr,
&dev_attr_state.attr,
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index be61c32a876f..0be385587c4c 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -102,7 +102,7 @@ struct ioat_dca_priv {
int max_requesters;
int requester_count;
u8 tag_map[IOAT_TAG_MAP_LEN];
- struct ioat_dca_slot req_slots[0];
+ struct ioat_dca_slot req_slots[];
};
static int ioat_dca_dev_managed(struct dca_provider *dca,
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 10117f271b12..d683232d7fea 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size);
tdmac->desc_arr = NULL;
+ if (tdmac->status == DMA_ERROR)
+ tdmac->status = DMA_COMPLETE;
return;
}
@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc)
goto err_out;
- mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
+ if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
+ goto err_out;
while (buf < buf_len) {
desc = &tdmac->desc_arr[i];
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index c683051257fd..66ef70b00ec0 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -175,13 +175,11 @@ struct owl_dma_txd {
* @id: physical index to this channel
* @base: virtual memory base for the dma channel
* @vchan: the virtual channel currently being served by this physical channel
- * @lock: a lock to use when altering an instance of this struct
*/
struct owl_dma_pchan {
u32 id;
void __iomem *base;
struct owl_dma_vchan *vchan;
- spinlock_t lock;
};
/**
@@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
for (i = 0; i < od->nr_pchans; i++) {
pchan = &od->pchans[i];
- spin_lock_irqsave(&pchan->lock, flags);
+ spin_lock_irqsave(&od->lock, flags);
if (!pchan->vchan) {
pchan->vchan = vchan;
- spin_unlock_irqrestore(&pchan->lock, flags);
+ spin_unlock_irqrestore(&od->lock, flags);
break;
}
- spin_unlock_irqrestore(&pchan->lock, flags);
+ spin_unlock_irqrestore(&od->lock, flags);
}
return pchan;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 581e7a290d98..a3b0b4c56a19 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
}
pci_set_master(pdev);
+ pd->dma.dev = &pdev->dev;
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
if (err) {
@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_free_irq;
}
- pd->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&pd->dma.channels);
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fbabd2e88a18..4db000d5f01c 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4303,7 +4303,7 @@ static ssize_t devices_show(struct device_driver *dev, char *buf)
for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
if (ppc440spe_adma_devices[i] == -1)
continue;
- size += snprintf(buf + size, PAGE_SIZE - size,
+ size += scnprintf(buf + size, PAGE_SIZE - size,
"PPC440SP(E)-ADMA.%d: %s\n", i,
ppc_adma_errors[ppc440spe_adma_devices[i]]);
}
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index afb68055ed1b..0fa7f14a65a1 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -78,7 +78,7 @@ struct sa11x0_dma_desc {
bool cyclic;
unsigned sglen;
- struct sa11x0_dma_sg sg[0];
+ struct sa11x0_dma_sg sg[];
};
struct sa11x0_dma_phy;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index f06016d38a05..59b36ab5d684 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1219,7 +1219,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
sg_len = buf_len / period_len;
if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
dev_err(chan->device->dev,
- "chan%u: sg length %d exceds limit %d",
+ "chan%u: sg length %d exceeds limit %d",
rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
return NULL;
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index c51de498b5b4..2deeaab078a4 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -709,7 +709,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
BUG_ON(!schan->desc_num);
if (sg_len > SHDMA_MAX_SG_LEN) {
- dev_err(schan->dev, "sg length %d exceds limit %d",
+ dev_err(schan->dev, "sg length %d exceeds limit %d",
sg_len, SHDMA_MAX_SG_LEN);
return NULL;
}
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 9a31a315dbef..0ef5ca81ba4d 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -212,7 +212,7 @@ struct sprd_dma_dev {
struct clk *ashb_clk;
int irq;
u32 total_chns;
- struct sprd_dma_chn channels[0];
+ struct sprd_dma_chn channels[];
};
static void sprd_dma_free_desc(struct virt_dma_desc *vd);
@@ -486,6 +486,28 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
return 0;
}
+static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 reg, val, req_id;
+
+ if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+ return;
+
+ /* The DMA request id always starts from 0. */
+ req_id = schan->dev_id - 1;
+
+ if (req_id < 32) {
+ reg = SPRD_DMA_GLB_REQ_PEND0_EN;
+ val = BIT(req_id);
+ } else {
+ reg = SPRD_DMA_GLB_REQ_PEND1_EN;
+ val = BIT(req_id - 32);
+ }
+
+ sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
+}
+
static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
struct sprd_dma_desc *sdesc)
{
@@ -532,6 +554,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
*/
sprd_dma_set_chn_config(schan, schan->cur_desc);
sprd_dma_set_uid(schan);
+ sprd_dma_set_pending(schan, true);
sprd_dma_enable_chn(schan);
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
@@ -543,6 +566,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
static void sprd_dma_stop(struct sprd_dma_chn *schan)
{
sprd_dma_stop_and_disable(schan);
+ sprd_dma_set_pending(schan, false);
sprd_dma_unset_uid(schan);
sprd_dma_clear_int(schan);
schan->cur_desc = NULL;
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 5989b0893521..0ddbaa4b4f0b 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -207,7 +208,6 @@ struct stm32_dma_device {
struct dma_device ddev;
void __iomem *base;
struct clk *clk;
- struct reset_control *rst;
bool mem2mem;
struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
};
@@ -422,29 +422,19 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- u32 dma_scr, id;
+ u32 dma_scr, id, reg;
id = chan->id;
- dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ reg = STM32_DMA_SCR(id);
+ dma_scr = stm32_dma_read(dmadev, reg);
if (dma_scr & STM32_DMA_SCR_EN) {
dma_scr &= ~STM32_DMA_SCR_EN;
- stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr);
-
- do {
- dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
- dma_scr &= STM32_DMA_SCR_EN;
- if (!dma_scr)
- break;
-
- if (time_after_eq(jiffies, timeout)) {
- dev_err(chan2dev(chan), "%s: timeout!\n",
- __func__);
- return -EBUSY;
- }
- cond_resched();
- } while (1);
+ stm32_dma_write(dmadev, reg, dma_scr);
+
+ return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
+ dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
+ 10, 1000000);
}
return 0;
@@ -488,8 +478,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (chan->busy) {
- stm32_dma_stop(chan);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vdesc);
+ if (chan->busy)
+ stm32_dma_stop(chan);
chan->desc = NULL;
}
@@ -545,6 +537,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
if (!vdesc)
return;
+ list_del(&vdesc->node);
+
chan->desc = to_stm32_dma_desc(vdesc);
chan->next_sg = 0;
}
@@ -555,6 +549,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
sg_req = &chan->desc->sg_req[chan->next_sg];
reg = &sg_req->chan_reg;
+ reg->dma_scr &= ~STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
@@ -622,7 +617,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
} else {
chan->busy = false;
if (chan->next_sg == chan->desc->num_sgs) {
- list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL;
}
@@ -1275,6 +1269,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
struct dma_device *dd;
const struct of_device_id *match;
struct resource *res;
+ struct reset_control *rst;
int i, ret;
match = of_match_device(stm32_dma_of_match, &pdev->dev);
@@ -1296,8 +1291,10 @@ static int stm32_dma_probe(struct platform_device *pdev)
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dmadev->clk)) {
- dev_err(&pdev->dev, "Error: Missing controller clock\n");
- return PTR_ERR(dmadev->clk);
+ ret = PTR_ERR(dmadev->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get clock\n");
+ return ret;
}
ret = clk_prepare_enable(dmadev->clk);
@@ -1309,13 +1306,19 @@ static int stm32_dma_probe(struct platform_device *pdev)
dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
"st,mem2mem");
- dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(dmadev->rst)) {
- reset_control_assert(dmadev->rst);
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto clk_free;
+ } else {
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(dmadev->rst);
+ reset_control_deassert(rst);
}
+ dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
+
dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_PRIVATE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
@@ -1336,7 +1339,9 @@ static int stm32_dma_probe(struct platform_device *pdev)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
dd->max_burst = STM32_DMA_MAX_BURST;
+ dd->descriptor_reuse = true;
dd->dev = &pdev->dev;
INIT_LIST_HEAD(&dd->channels);
@@ -1427,7 +1432,39 @@ static int stm32_dma_runtime_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dma_suspend(struct device *dev)
+{
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+ int id, ret, scr;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
+ scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ if (scr & STM32_DMA_SCR_EN) {
+ dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
+ return -EBUSY;
+ }
+ }
+
+ pm_runtime_put_sync(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int stm32_dma_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
static const struct dev_pm_ops stm32_dma_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
stm32_dma_runtime_resume, NULL)
};
@@ -1438,10 +1475,11 @@ static struct platform_driver stm32_dma_driver = {
.of_match_table = stm32_dma_of_match,
.pm = &stm32_dma_pm_ops,
},
+ .probe = stm32_dma_probe,
};
static int __init stm32_dma_init(void)
{
- return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe);
+ return platform_driver_register(&stm32_dma_driver);
}
subsys_initcall(stm32_dma_init);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 3c89bd39e096..12f7637e13a1 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -35,12 +35,14 @@ struct stm32_dmamux {
struct stm32_dmamux_data {
struct dma_router dmarouter;
struct clk *clk;
- struct reset_control *rst;
void __iomem *iomem;
u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
spinlock_t lock; /* Protects register access */
unsigned long *dma_inuse; /* Used DMA channel */
+ u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
+ * in suspend
+ */
u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
* [0] holds number of DMA Masters.
* To be kept at very end end of this structure
@@ -179,6 +181,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
struct stm32_dmamux_data *stm32_dmamux;
struct resource *res;
void __iomem *iomem;
+ struct reset_control *rst;
int i, count, ret;
u32 dma_req;
@@ -251,16 +254,26 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(stm32_dmamux->clk)) {
ret = PTR_ERR(stm32_dmamux->clk);
- if (ret == -EPROBE_DEFER)
- dev_info(&pdev->dev, "Missing controller clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Missing clock controller\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(stm32_dmamux->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
return ret;
}
- stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(stm32_dmamux->rst)) {
- reset_control_assert(stm32_dmamux->rst);
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk;
+ } else {
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(stm32_dmamux->rst);
+ reset_control_deassert(rst);
}
stm32_dmamux->iomem = iomem;
@@ -271,14 +284,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- if (!IS_ERR(stm32_dmamux->clk)) {
- ret = clk_prepare_enable(stm32_dmamux->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
- return ret;
- }
- }
-
pm_runtime_get_noresume(&pdev->dev);
/* Reset the dmamux */
@@ -287,8 +292,17 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- return of_dma_router_register(node, stm32_dmamux_route_allocate,
+ ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
+ if (ret)
+ goto err_clk;
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(stm32_dmamux->clk);
+
+ return ret;
}
#ifdef CONFIG_PM
@@ -318,7 +332,54 @@ static int stm32_dmamux_runtime_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dmamux_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+ int i, ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < stm32_dmamux->dma_requests; i++)
+ stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
+ STM32_DMAMUX_CCR(i));
+
+ pm_runtime_put_sync(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int stm32_dmamux_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+ int i, ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < stm32_dmamux->dma_requests; i++)
+ stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
+ stm32_dmamux->ccr[i]);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops stm32_dmamux_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
stm32_dmamux_runtime_resume, NULL)
};
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 5838311cf990..5469563703d1 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -273,7 +273,6 @@ struct stm32_mdma_device {
void __iomem *base;
struct clk *clk;
int irq;
- struct reset_control *rst;
u32 nr_channels;
u32 nr_requests;
u32 nr_ahb_addr_masks;
@@ -1127,6 +1126,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
return;
}
+ list_del(&vdesc->node);
+
chan->desc = to_stm32_mdma_desc(vdesc);
hwdesc = chan->desc->node[0].hwdesc;
chan->curr_hwdesc = 0;
@@ -1242,8 +1243,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
LIST_HEAD(head);
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (chan->busy) {
- stm32_mdma_stop(chan);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vdesc);
+ if (chan->busy)
+ stm32_mdma_stop(chan);
chan->desc = NULL;
}
vchan_get_all_descriptors(&chan->vchan, &head);
@@ -1331,7 +1334,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
{
- list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL;
chan->busy = false;
@@ -1532,6 +1534,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
struct dma_device *dd;
struct device_node *of_node;
struct resource *res;
+ struct reset_control *rst;
u32 nr_channels, nr_requests;
int i, count, ret;
@@ -1579,8 +1582,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dmadev->clk)) {
ret = PTR_ERR(dmadev->clk);
- if (ret == -EPROBE_DEFER)
- dev_info(&pdev->dev, "Missing controller clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Missing clock controller\n");
return ret;
}
@@ -1590,11 +1593,15 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
- dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(dmadev->rst)) {
- reset_control_assert(dmadev->rst);
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk;
+ } else {
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(dmadev->rst);
+ reset_control_deassert(rst);
}
dd = &dmadev->ddev;
@@ -1614,6 +1621,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dd->device_resume = stm32_mdma_resume;
dd->device_terminate_all = stm32_mdma_terminate_all;
dd->device_synchronize = stm32_mdma_synchronize;
+ dd->descriptor_reuse = true;
+
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
@@ -1637,25 +1646,27 @@ static int stm32_mdma_probe(struct platform_device *pdev)
}
dmadev->irq = platform_get_irq(pdev, 0);
- if (dmadev->irq < 0)
- return dmadev->irq;
+ if (dmadev->irq < 0) {
+ ret = dmadev->irq;
+ goto err_clk;
+ }
ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
0, dev_name(&pdev->dev), dmadev);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ\n");
- return ret;
+ goto err_clk;
}
ret = dmaenginem_async_device_register(dd);
if (ret)
- return ret;
+ goto err_clk;
ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
if (ret < 0) {
dev_err(&pdev->dev,
"STM32 MDMA DMA OF registration failed %d\n", ret);
- goto err_unregister;
+ goto err_clk;
}
platform_set_drvdata(pdev, dmadev);
@@ -1668,7 +1679,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0;
-err_unregister:
+err_clk:
+ clk_disable_unprepare(dmadev->clk);
+
return ret;
}
@@ -1697,7 +1710,40 @@ static int stm32_mdma_runtime_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int stm32_mdma_pm_suspend(struct device *dev)
+{
+ struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+ u32 ccr, id;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (id = 0; id < dmadev->nr_channels; id++) {
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
+ if (ccr & STM32_MDMA_CCR_EN) {
+ dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
+ return -EBUSY;
+ }
+ }
+
+ pm_runtime_put_sync(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int stm32_mdma_pm_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
static const struct dev_pm_ops stm32_mdma_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
stm32_mdma_runtime_resume, NULL)
};
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index bbc2bda3b902..e7ff09a5031d 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -697,11 +697,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dest = sconfig->dst_addr;
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
- SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
} else {
src = sconfig->src_addr;
dest = buf;
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
}
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 4a750e29bfb5..b9f0d9636620 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/wait.h>
#include "dmaengine.h"
@@ -59,7 +60,7 @@
#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
#define TEGRA_APBDMA_CHAN_CSRE 0x00C
-#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
+#define TEGRA_APBDMA_CHAN_CSRE_PAUSE BIT(31)
/* AHB memory address */
#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
@@ -120,21 +121,21 @@ struct tegra_dma;
* @support_separate_wcount_reg: Support separate word count register.
*/
struct tegra_dma_chip_data {
- int nr_channels;
- int channel_reg_size;
- int max_dma_count;
+ unsigned int nr_channels;
+ unsigned int channel_reg_size;
+ unsigned int max_dma_count;
bool support_channel_pause;
bool support_separate_wcount_reg;
};
/* DMA channel registers */
struct tegra_dma_channel_regs {
- unsigned long csr;
- unsigned long ahb_ptr;
- unsigned long apb_ptr;
- unsigned long ahb_seq;
- unsigned long apb_seq;
- unsigned long wcount;
+ u32 csr;
+ u32 ahb_ptr;
+ u32 apb_ptr;
+ u32 ahb_seq;
+ u32 apb_seq;
+ u32 wcount;
};
/*
@@ -168,7 +169,7 @@ struct tegra_dma_desc {
struct list_head node;
struct list_head tx_list;
struct list_head cb_node;
- int cb_count;
+ unsigned int cb_count;
};
struct tegra_dma_channel;
@@ -181,8 +182,7 @@ struct tegra_dma_channel {
struct dma_chan dma_chan;
char name[12];
bool config_init;
- int id;
- int irq;
+ unsigned int id;
void __iomem *chan_addr;
spinlock_t lock;
bool busy;
@@ -202,7 +202,9 @@ struct tegra_dma_channel {
/* Channel-slave specific configuration */
unsigned int slave_id;
struct dma_slave_config dma_sconfig;
- struct tegra_dma_channel_regs channel_reg;
+ struct tegra_dma_channel_regs channel_reg;
+
+ struct wait_queue_head wq;
};
/* tegra_dma: Tegra DMA specific information */
@@ -222,9 +224,6 @@ struct tegra_dma {
*/
u32 global_pause_count;
- /* Some register need to be cache before suspend */
- u32 reg_gen;
-
/* Last member of the structure */
struct tegra_dma_channel channels[0];
};
@@ -240,7 +239,7 @@ static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
}
static inline void tdc_write(struct tegra_dma_channel *tdc,
- u32 reg, u32 val)
+ u32 reg, u32 val)
{
writel(val, tdc->chan_addr + reg);
}
@@ -255,8 +254,8 @@ static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
return container_of(dc, struct tegra_dma_channel, dma_chan);
}
-static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
- struct dma_async_tx_descriptor *td)
+static inline struct tegra_dma_desc *
+txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
{
return container_of(td, struct tegra_dma_desc, txd);
}
@@ -267,12 +266,9 @@ static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
}
static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
-static int tegra_dma_runtime_suspend(struct device *dev);
-static int tegra_dma_runtime_resume(struct device *dev);
/* Get DMA desc from free list, if not there then allocate it. */
-static struct tegra_dma_desc *tegra_dma_desc_get(
- struct tegra_dma_channel *tdc)
+static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
{
struct tegra_dma_desc *dma_desc;
unsigned long flags;
@@ -299,11 +295,12 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
dma_desc->txd.tx_submit = tegra_dma_tx_submit;
dma_desc->txd.flags = 0;
+
return dma_desc;
}
static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
- struct tegra_dma_desc *dma_desc)
+ struct tegra_dma_desc *dma_desc)
{
unsigned long flags;
@@ -314,29 +311,29 @@ static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
spin_unlock_irqrestore(&tdc->lock, flags);
}
-static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
- struct tegra_dma_channel *tdc)
+static struct tegra_dma_sg_req *
+tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
{
- struct tegra_dma_sg_req *sg_req = NULL;
+ struct tegra_dma_sg_req *sg_req;
unsigned long flags;
spin_lock_irqsave(&tdc->lock, flags);
if (!list_empty(&tdc->free_sg_req)) {
- sg_req = list_first_entry(&tdc->free_sg_req,
- typeof(*sg_req), node);
+ sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
+ node);
list_del(&sg_req->node);
spin_unlock_irqrestore(&tdc->lock, flags);
return sg_req;
}
spin_unlock_irqrestore(&tdc->lock, flags);
- sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
+ sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
return sg_req;
}
static int tegra_dma_slave_config(struct dma_chan *dc,
- struct dma_slave_config *sconfig)
+ struct dma_slave_config *sconfig)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
@@ -353,11 +350,12 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
tdc->slave_id = sconfig->slave_id;
}
tdc->config_init = true;
+
return 0;
}
static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
- bool wait_for_burst_complete)
+ bool wait_for_burst_complete)
{
struct tegra_dma *tdma = tdc->tdma;
@@ -392,13 +390,13 @@ out:
}
static void tegra_dma_pause(struct tegra_dma_channel *tdc,
- bool wait_for_burst_complete)
+ bool wait_for_burst_complete)
{
struct tegra_dma *tdma = tdc->tdma;
if (tdma->chip_data->support_channel_pause) {
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
- TEGRA_APBDMA_CHAN_CSRE_PAUSE);
+ TEGRA_APBDMA_CHAN_CSRE_PAUSE);
if (wait_for_burst_complete)
udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
} else {
@@ -410,17 +408,15 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
{
struct tegra_dma *tdma = tdc->tdma;
- if (tdma->chip_data->support_channel_pause) {
+ if (tdma->chip_data->support_channel_pause)
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
- } else {
+ else
tegra_dma_global_resume(tdc);
- }
}
static void tegra_dma_stop(struct tegra_dma_channel *tdc)
{
- u32 csr;
- u32 status;
+ u32 csr, status;
/* Disable interrupts */
csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
@@ -441,7 +437,7 @@ static void tegra_dma_stop(struct tegra_dma_channel *tdc)
}
static void tegra_dma_start(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *sg_req)
+ struct tegra_dma_sg_req *sg_req)
{
struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
@@ -455,11 +451,11 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc,
/* Start DMA */
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
- ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
+ ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
}
static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *nsg_req)
+ struct tegra_dma_sg_req *nsg_req)
{
unsigned long status;
@@ -493,9 +489,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
if (tdc->tdma->chip_data->support_separate_wcount_reg)
tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
- nsg_req->ch_regs.wcount);
+ nsg_req->ch_regs.wcount);
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
- nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
+ nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
nsg_req->words_xferred = 0;
@@ -506,11 +502,7 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
{
struct tegra_dma_sg_req *sg_req;
- if (list_empty(&tdc->pending_sg_req))
- return;
-
- sg_req = list_first_entry(&tdc->pending_sg_req,
- typeof(*sg_req), node);
+ sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
tegra_dma_start(tdc, sg_req);
sg_req->configured = true;
sg_req->words_xferred = 0;
@@ -519,34 +511,32 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
{
- struct tegra_dma_sg_req *hsgreq;
- struct tegra_dma_sg_req *hnsgreq;
-
- if (list_empty(&tdc->pending_sg_req))
- return;
+ struct tegra_dma_sg_req *hsgreq, *hnsgreq;
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
- hnsgreq = list_first_entry(&hsgreq->node,
- typeof(*hnsgreq), node);
+ hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
+ node);
tegra_dma_configure_for_next(tdc, hnsgreq);
}
}
-static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *sg_req, unsigned long status)
+static inline unsigned int
+get_current_xferred_count(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *sg_req,
+ unsigned long status)
{
return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
}
static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
{
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
while (!list_empty(&tdc->pending_sg_req)) {
- sgreq = list_first_entry(&tdc->pending_sg_req,
- typeof(*sgreq), node);
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
+ node);
list_move_tail(&sgreq->node, &tdc->free_sg_req);
if (sgreq->last_sg) {
dma_desc = sgreq->dma_desc;
@@ -556,7 +546,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
/* Add in cb list if it is not there. */
if (!dma_desc->cb_count)
list_add_tail(&dma_desc->cb_node,
- &tdc->cb_desc);
+ &tdc->cb_desc);
dma_desc->cb_count++;
}
}
@@ -564,15 +554,9 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
}
static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
+ bool to_terminate)
{
- struct tegra_dma_sg_req *hsgreq = NULL;
-
- if (list_empty(&tdc->pending_sg_req)) {
- dev_err(tdc2dev(tdc), "DMA is running without req\n");
- tegra_dma_stop(tdc);
- return false;
- }
+ struct tegra_dma_sg_req *hsgreq;
/*
* Check that head req on list should be in flight.
@@ -582,7 +566,8 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!hsgreq->configured) {
tegra_dma_stop(tdc);
- dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
+ pm_runtime_put(tdc->tdma->dev);
+ dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
tegra_dma_abort_all(tdc);
return false;
}
@@ -590,14 +575,15 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
/* Configure next request */
if (!to_terminate)
tdc_configure_next_head_desc(tdc);
+
return true;
}
static void handle_once_dma_done(struct tegra_dma_channel *tdc,
- bool to_terminate)
+ bool to_terminate)
{
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
tdc->busy = false;
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
@@ -616,17 +602,22 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
list_add_tail(&sgreq->node, &tdc->free_sg_req);
/* Do not start DMA if it is going to be terminate */
- if (to_terminate || list_empty(&tdc->pending_sg_req))
+ if (to_terminate)
return;
+ if (list_empty(&tdc->pending_sg_req)) {
+ pm_runtime_put(tdc->tdma->dev);
+ return;
+ }
+
tdc_start_head_req(tdc);
}
static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
- bool to_terminate)
+ bool to_terminate)
{
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
bool st;
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
@@ -647,7 +638,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
list_move_tail(&sgreq->node, &tdc->pending_sg_req);
sgreq->configured = false;
- st = handle_continuous_head_request(tdc, sgreq, to_terminate);
+ st = handle_continuous_head_request(tdc, to_terminate);
if (!st)
dma_desc->dma_status = DMA_ERROR;
}
@@ -658,13 +649,13 @@ static void tegra_dma_tasklet(unsigned long data)
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
struct dmaengine_desc_callback cb;
struct tegra_dma_desc *dma_desc;
+ unsigned int cb_count;
unsigned long flags;
- int cb_count;
spin_lock_irqsave(&tdc->lock, flags);
while (!list_empty(&tdc->cb_desc)) {
- dma_desc = list_first_entry(&tdc->cb_desc,
- typeof(*dma_desc), cb_node);
+ dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
+ cb_node);
list_del(&dma_desc->cb_node);
dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
@@ -682,10 +673,9 @@ static void tegra_dma_tasklet(unsigned long data)
static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
{
struct tegra_dma_channel *tdc = dev_id;
- unsigned long status;
- unsigned long flags;
+ u32 status;
- spin_lock_irqsave(&tdc->lock, flags);
+ spin_lock(&tdc->lock);
trace_tegra_dma_isr(&tdc->dma_chan, irq);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
@@ -693,13 +683,15 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
tdc->isr_handler(tdc, false);
tasklet_schedule(&tdc->tasklet);
- spin_unlock_irqrestore(&tdc->lock, flags);
+ wake_up_all(&tdc->wq);
+ spin_unlock(&tdc->lock);
return IRQ_HANDLED;
}
- spin_unlock_irqrestore(&tdc->lock, flags);
- dev_info(tdc2dev(tdc),
- "Interrupt already served status 0x%08lx\n", status);
+ spin_unlock(&tdc->lock);
+ dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
+ status);
+
return IRQ_NONE;
}
@@ -715,6 +707,7 @@ static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
cookie = dma_cookie_assign(&dma_desc->txd);
list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
spin_unlock_irqrestore(&tdc->lock, flags);
+
return cookie;
}
@@ -722,6 +715,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned long flags;
+ int err;
spin_lock_irqsave(&tdc->lock, flags);
if (list_empty(&tdc->pending_sg_req)) {
@@ -729,6 +723,12 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
goto end;
}
if (!tdc->busy) {
+ err = pm_runtime_get_sync(tdc->tdma->dev);
+ if (err < 0) {
+ dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
+ goto end;
+ }
+
tdc_start_head_req(tdc);
/* Continuous single mode: Configure next req */
@@ -748,11 +748,10 @@ end:
static int tegra_dma_terminate_all(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
unsigned long flags;
- unsigned long status;
- unsigned long wcount;
+ u32 status, wcount;
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
@@ -778,30 +777,69 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
tegra_dma_stop(tdc);
if (!list_empty(&tdc->pending_sg_req) && was_busy) {
- sgreq = list_first_entry(&tdc->pending_sg_req,
- typeof(*sgreq), node);
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
+ node);
sgreq->dma_desc->bytes_transferred +=
get_current_xferred_count(tdc, sgreq, wcount);
}
tegra_dma_resume(tdc);
+ pm_runtime_put(tdc->tdma->dev);
+ wake_up_all(&tdc->wq);
+
skip_dma_stop:
tegra_dma_abort_all(tdc);
while (!list_empty(&tdc->cb_desc)) {
- dma_desc = list_first_entry(&tdc->cb_desc,
- typeof(*dma_desc), cb_node);
+ dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
+ cb_node);
list_del(&dma_desc->cb_node);
dma_desc->cb_count = 0;
}
spin_unlock_irqrestore(&tdc->lock, flags);
+
return 0;
}
+static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
+{
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+
+ return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
+}
+
+static void tegra_dma_synchronize(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ int err;
+
+ err = pm_runtime_get_sync(tdc->tdma->dev);
+ if (err < 0) {
+ dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
+ return;
+ }
+
+ /*
+ * CPU, which handles interrupt, could be busy in
+ * uninterruptible state, in this case sibling CPU
+ * should wait until interrupt is handled.
+ */
+ wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
+
+ tasklet_kill(&tdc->tasklet);
+
+ pm_runtime_put(tdc->tdma->dev);
+}
+
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
struct tegra_dma_sg_req *sg_req)
{
- unsigned long status, wcount = 0;
+ u32 status, wcount = 0;
if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
return 0;
@@ -858,7 +896,8 @@ static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
}
static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc;
@@ -905,11 +944,12 @@ found:
trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
spin_unlock_irqrestore(&tdc->lock, flags);
+
return ret;
}
-static inline int get_bus_width(struct tegra_dma_channel *tdc,
- enum dma_slave_buswidth slave_bw)
+static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
+ enum dma_slave_buswidth slave_bw)
{
switch (slave_bw) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -922,16 +962,17 @@ static inline int get_bus_width(struct tegra_dma_channel *tdc,
return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
default:
dev_warn(tdc2dev(tdc),
- "slave bw is not supported, using 32bits\n");
+ "slave bw is not supported, using 32bits\n");
return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
}
}
-static inline int get_burst_size(struct tegra_dma_channel *tdc,
- u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
+static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
+ u32 burst_size,
+ enum dma_slave_buswidth slave_bw,
+ u32 len)
{
- int burst_byte;
- int burst_ahb_width;
+ unsigned int burst_byte, burst_ahb_width;
/*
* burst_size from client is in terms of the bus_width.
@@ -958,9 +999,12 @@ static inline int get_burst_size(struct tegra_dma_channel *tdc,
}
static int get_transfer_param(struct tegra_dma_channel *tdc,
- enum dma_transfer_direction direction, unsigned long *apb_addr,
- unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
- enum dma_slave_buswidth *slave_bw)
+ enum dma_transfer_direction direction,
+ u32 *apb_addr,
+ u32 *apb_seq,
+ u32 *csr,
+ unsigned int *burst_size,
+ enum dma_slave_buswidth *slave_bw)
{
switch (direction) {
case DMA_MEM_TO_DEV:
@@ -981,13 +1025,15 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
default:
dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
- return -EINVAL;
+ break;
}
+
return -EINVAL;
}
static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
- struct tegra_dma_channel_regs *ch_regs, u32 len)
+ struct tegra_dma_channel_regs *ch_regs,
+ u32 len)
{
u32 len_field = (len - 4) & 0xFFFC;
@@ -997,20 +1043,23 @@ static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
ch_regs->csr |= len_field;
}
-static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
- struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_slave_sg(struct dma_chan *dc,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags,
+ void *context)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_sg_req *sg_req = NULL;
+ u32 csr, ahb_seq, apb_ptr, apb_seq;
+ enum dma_slave_buswidth slave_bw;
struct tegra_dma_desc *dma_desc;
- unsigned int i;
- struct scatterlist *sg;
- unsigned long csr, ahb_seq, apb_ptr, apb_seq;
struct list_head req_list;
- struct tegra_dma_sg_req *sg_req = NULL;
- u32 burst_size;
- enum dma_slave_buswidth slave_bw;
+ struct scatterlist *sg;
+ unsigned int burst_size;
+ unsigned int i;
if (!tdc->config_init) {
dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
@@ -1022,7 +1071,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
}
if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
- &burst_size, &slave_bw) < 0)
+ &burst_size, &slave_bw) < 0)
return NULL;
INIT_LIST_HEAD(&req_list);
@@ -1068,7 +1117,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
len = sg_dma_len(sg);
if ((len & 3) || (mem & 3) ||
- (len > tdc->tdma->chip_data->max_dma_count)) {
+ len > tdc->tdma->chip_data->max_dma_count) {
dev_err(tdc2dev(tdc),
"DMA length/memory address is not supported\n");
tegra_dma_desc_put(tdc, dma_desc);
@@ -1120,20 +1169,21 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
return &dma_desc->txd;
}
-static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
- struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
+ size_t buf_len,
+ size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma_desc *dma_desc = NULL;
struct tegra_dma_sg_req *sg_req = NULL;
- unsigned long csr, ahb_seq, apb_ptr, apb_seq;
- int len;
- size_t remain_len;
- dma_addr_t mem = buf_addr;
- u32 burst_size;
+ u32 csr, ahb_seq, apb_ptr, apb_seq;
enum dma_slave_buswidth slave_bw;
+ struct tegra_dma_desc *dma_desc;
+ dma_addr_t mem = buf_addr;
+ unsigned int burst_size;
+ size_t len, remain_len;
if (!buf_len || !period_len) {
dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
@@ -1167,13 +1217,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
len = period_len;
if ((len & 3) || (buf_addr & 3) ||
- (len > tdc->tdma->chip_data->max_dma_count)) {
+ len > tdc->tdma->chip_data->max_dma_count) {
dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
return NULL;
}
if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
- &burst_size, &slave_bw) < 0)
+ &burst_size, &slave_bw) < 0)
return NULL;
ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
@@ -1259,15 +1309,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma *tdma = tdc->tdma;
- int ret;
dma_cookie_init(&tdc->dma_chan);
- tdc->config_init = false;
-
- ret = pm_runtime_get_sync(tdma->dev);
- if (ret < 0)
- return ret;
return 0;
}
@@ -1275,33 +1318,29 @@ static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma *tdma = tdc->tdma;
struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req;
struct list_head dma_desc_list;
struct list_head sg_req_list;
- unsigned long flags;
INIT_LIST_HEAD(&dma_desc_list);
INIT_LIST_HEAD(&sg_req_list);
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
- if (tdc->busy)
- tegra_dma_terminate_all(dc);
+ tegra_dma_terminate_all(dc);
+ tasklet_kill(&tdc->tasklet);
- spin_lock_irqsave(&tdc->lock, flags);
list_splice_init(&tdc->pending_sg_req, &sg_req_list);
list_splice_init(&tdc->free_sg_req, &sg_req_list);
list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
INIT_LIST_HEAD(&tdc->cb_desc);
tdc->config_init = false;
tdc->isr_handler = NULL;
- spin_unlock_irqrestore(&tdc->lock, flags);
while (!list_empty(&dma_desc_list)) {
- dma_desc = list_first_entry(&dma_desc_list,
- typeof(*dma_desc), node);
+ dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
+ node);
list_del(&dma_desc->node);
kfree(dma_desc);
}
@@ -1311,7 +1350,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_del(&sg_req->node);
kfree(sg_req);
}
- pm_runtime_put(tdma->dev);
tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
}
@@ -1320,8 +1358,8 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct tegra_dma *tdma = ofdma->of_dma_data;
- struct dma_chan *chan;
struct tegra_dma_channel *tdc;
+ struct dma_chan *chan;
if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
@@ -1374,23 +1412,48 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
.support_separate_wcount_reg = true,
};
+static int tegra_dma_init_hw(struct tegra_dma *tdma)
+{
+ int err;
+
+ err = reset_control_assert(tdma->rst);
+ if (err) {
+ dev_err(tdma->dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_enable(tdma->dma_clk);
+ if (err) {
+ dev_err(tdma->dev, "failed to enable clk: %d\n", err);
+ return err;
+ }
+
+ /* reset DMA controller */
+ udelay(2);
+ reset_control_deassert(tdma->rst);
+
+ /* enable global DMA registers */
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+ tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
+ tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
+
+ clk_disable(tdma->dma_clk);
+
+ return 0;
+}
+
static int tegra_dma_probe(struct platform_device *pdev)
{
- struct resource *res;
+ const struct tegra_dma_chip_data *cdata;
struct tegra_dma *tdma;
+ unsigned int i;
+ size_t size;
int ret;
- int i;
- const struct tegra_dma_chip_data *cdata;
cdata = of_device_get_match_data(&pdev->dev);
- if (!cdata) {
- dev_err(&pdev->dev, "Error: No device match data found\n");
- return -ENODEV;
- }
+ size = struct_size(tdma, channels, cdata->nr_channels);
- tdma = devm_kzalloc(&pdev->dev,
- struct_size(tdma, channels, cdata->nr_channels),
- GFP_KERNEL);
+ tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!tdma)
return -ENOMEM;
@@ -1398,8 +1461,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma->chip_data = cdata;
platform_set_drvdata(pdev, tdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
@@ -1417,64 +1479,54 @@ static int tegra_dma_probe(struct platform_device *pdev)
spin_lock_init(&tdma->global_lock);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
- ret = tegra_dma_runtime_resume(&pdev->dev);
- else
- ret = pm_runtime_get_sync(&pdev->dev);
-
- if (ret < 0) {
- pm_runtime_disable(&pdev->dev);
+ ret = clk_prepare(tdma->dma_clk);
+ if (ret)
return ret;
- }
- /* Reset DMA controller */
- reset_control_assert(tdma->rst);
- udelay(2);
- reset_control_deassert(tdma->rst);
-
- /* Enable global DMA registers */
- tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
- tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
- tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+ ret = tegra_dma_init_hw(tdma);
+ if (ret)
+ goto err_clk_unprepare;
- pm_runtime_put(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
+ int irq;
tdc->chan_addr = tdma->base_addr +
TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
(i * cdata->channel_reg_size);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- ret = -EINVAL;
- dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
- goto err_irq;
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto err_pm_disable;
}
- tdc->irq = res->start;
+
snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
- ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
+ ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
+ tdc->name, tdc);
if (ret) {
dev_err(&pdev->dev,
"request_irq failed with err %d channel %d\n",
ret, i);
- goto err_irq;
+ goto err_pm_disable;
}
tdc->dma_chan.device = &tdma->dma_dev;
dma_cookie_init(&tdc->dma_chan);
list_add_tail(&tdc->dma_chan.device_node,
- &tdma->dma_dev.channels);
+ &tdma->dma_dev.channels);
tdc->tdma = tdma;
tdc->id = i;
tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
- (unsigned long)tdc);
+ (unsigned long)tdc);
spin_lock_init(&tdc->lock);
+ init_waitqueue_head(&tdc->wq);
INIT_LIST_HEAD(&tdc->pending_sg_req);
INIT_LIST_HEAD(&tdc->free_sg_req);
@@ -1506,6 +1558,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
tdma->dma_dev.device_config = tegra_dma_slave_config;
tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
+ tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
@@ -1513,7 +1566,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"Tegra20 APB DMA driver registration failed %d\n", ret);
- goto err_irq;
+ goto err_pm_disable;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -1524,118 +1577,92 @@ static int tegra_dma_probe(struct platform_device *pdev)
goto err_unregister_dma_dev;
}
- dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
- cdata->nr_channels);
+ dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n",
+ cdata->nr_channels);
+
return 0;
err_unregister_dma_dev:
dma_async_device_unregister(&tdma->dma_dev);
-err_irq:
- while (--i >= 0) {
- struct tegra_dma_channel *tdc = &tdma->channels[i];
-
- free_irq(tdc->irq, tdc);
- tasklet_kill(&tdc->tasklet);
- }
+err_pm_disable:
pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- tegra_dma_runtime_suspend(&pdev->dev);
+
+err_clk_unprepare:
+ clk_unprepare(tdma->dma_clk);
+
return ret;
}
static int tegra_dma_remove(struct platform_device *pdev)
{
struct tegra_dma *tdma = platform_get_drvdata(pdev);
- int i;
- struct tegra_dma_channel *tdc;
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
-
- for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
- tdc = &tdma->channels[i];
- free_irq(tdc->irq, tdc);
- tasklet_kill(&tdc->tasklet);
- }
-
pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- tegra_dma_runtime_suspend(&pdev->dev);
+ clk_unprepare(tdma->dma_clk);
return 0;
}
-static int tegra_dma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
- int i;
-
- tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
- for (i = 0; i < tdma->chip_data->nr_channels; i++) {
- struct tegra_dma_channel *tdc = &tdma->channels[i];
- struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
-
- /* Only save the state of DMA channels that are in use */
- if (!tdc->config_init)
- continue;
-
- ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
- ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
- ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
- ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
- ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
- if (tdma->chip_data->support_separate_wcount_reg)
- ch_reg->wcount = tdc_read(tdc,
- TEGRA_APBDMA_CHAN_WCOUNT);
- }
- clk_disable_unprepare(tdma->dma_clk);
+ clk_disable(tdma->dma_clk);
return 0;
}
-static int tegra_dma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
- int i, ret;
- ret = clk_prepare_enable(tdma->dma_clk);
- if (ret < 0) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
- return ret;
- }
+ return clk_enable(tdma->dma_clk);
+}
- tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
- tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
- tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+ bool busy;
for (i = 0; i < tdma->chip_data->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
- struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
-
- /* Only restore the state of DMA channels that are in use */
- if (!tdc->config_init)
- continue;
-
- if (tdma->chip_data->support_separate_wcount_reg)
- tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
- ch_reg->wcount);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
- (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
+
+ tasklet_kill(&tdc->tasklet);
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ busy = tdc->busy;
+ spin_unlock_irqrestore(&tdc->lock, flags);
+
+ if (busy) {
+ dev_err(tdma->dev, "channel %u busy\n", i);
+ return -EBUSY;
+ }
}
- return 0;
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra_dma_init_hw(tdma);
+ if (err)
+ return err;
+
+ return pm_runtime_force_resume(dev);
}
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
};
static const struct of_device_id tegra_dma_of_match[] = {
@@ -1668,7 +1695,6 @@ static struct platform_driver tegra_dmac_driver = {
module_platform_driver(tegra_dmac_driver);
-MODULE_ALIAS("platform:tegra20-apbdma");
MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 6e1268552f74..db58d7e4f9fe 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -164,7 +164,7 @@ struct tegra_adma {
const struct tegra_adma_chip_data *cdata;
/* Last member of the structure */
- struct tegra_adma_chan channels[0];
+ struct tegra_adma_chan channels[];
};
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
@@ -900,7 +900,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) {
dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
- goto irq_dispose;
+ goto rpm_put;
}
ret = of_dma_controller_register(pdev->dev.of_node,
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index f255056696ee..4ba8fa5d9c36 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -133,7 +133,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct device_node *dma_node;
struct ti_am335x_xbar_data *xbar;
- struct resource *res;
void __iomem *iomem;
int i, ret;
@@ -173,8 +172,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
xbar->xbar_events = TI_AM335X_XBAR_LINES;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
@@ -323,7 +321,6 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
struct device_node *dma_node;
struct ti_dra7_xbar_data *xbar;
struct property *prop;
- struct resource *res;
u32 safe_val;
int sz;
void __iomem *iomem;
@@ -403,8 +400,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
kfree(rsv_events);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 03a7f647f7b2..c4a5c170c1f9 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1275,6 +1275,81 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
+static struct dma_async_tx_descriptor *
+edma_prep_dma_interleaved(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long tx_flags)
+{
+ struct device *dev = chan->device->dev;
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct edmacc_param *param;
+ struct edma_desc *edesc;
+ size_t src_icg, dst_icg;
+ int src_bidx, dst_bidx;
+
+ /* Slave mode is not supported */
+ if (is_slave_direction(xt->dir))
+ return NULL;
+
+ if (xt->frame_size != 1 || xt->numf == 0)
+ return NULL;
+
+ if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
+ return NULL;
+
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+ if (src_icg) {
+ src_bidx = src_icg + xt->sgl[0].size;
+ } else if (xt->src_inc) {
+ src_bidx = xt->sgl[0].size;
+ } else {
+ dev_err(dev, "%s: SRC constant addressing is not supported\n",
+ __func__);
+ return NULL;
+ }
+
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+ if (dst_icg) {
+ dst_bidx = dst_icg + xt->sgl[0].size;
+ } else if (xt->dst_inc) {
+ dst_bidx = xt->sgl[0].size;
+ } else {
+ dev_err(dev, "%s: DST constant addressing is not supported\n",
+ __func__);
+ return NULL;
+ }
+
+ if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
+ return NULL;
+
+ edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
+ if (!edesc)
+ return NULL;
+
+ edesc->direction = DMA_MEM_TO_MEM;
+ edesc->echan = echan;
+ edesc->pset_nr = 1;
+
+ param = &edesc->pset[0].param;
+
+ param->src = xt->src_start;
+ param->dst = xt->dst_start;
+ param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
+ param->ccnt = 1;
+ param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ param->src_dst_cidx = 0;
+
+ param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ param->opt |= ITCCHEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ param->opt |= TCINTEN;
+ else
+ edesc->polled = true;
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
@@ -1917,7 +1992,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
"Legacy memcpy is enabled, things might not work\n");
dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
s_ddev->directions = BIT(DMA_MEM_TO_MEM);
}
@@ -1953,8 +2030,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
dma_cap_zero(m_ddev->cap_mask);
dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
m_ddev->device_free_chan_resources = edma_free_chan_resources;
m_ddev->device_issue_pending = edma_issue_pending;
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index d7b965049ccb..fb7c8150b0d1 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
soc_ep_map = &j721e_ep_map;
} else {
pr_err("PSIL: No compatible machine found for map\n");
+ mutex_unlock(&ep_map_mutex);
return ERR_PTR(-ENOTSUPP);
}
pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 4d7561a1b3e3..64c8955e0cf1 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -32,6 +32,7 @@ struct k3_udma_glue_common {
bool epib;
u32 psdata_size;
u32 swdata_size;
+ u32 atype;
};
struct k3_udma_glue_tx_channel {
@@ -121,6 +122,15 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
return -ENOENT;
thread_id = dma_spec.args[0];
+ if (dma_spec.args_count == 2) {
+ if (dma_spec.args[1] > 2) {
+ dev_err(common->dev, "Invalid channel atype: %u\n",
+ dma_spec.args[1]);
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+ common->atype = dma_spec.args[1];
+ }
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
ret = -EINVAL;
@@ -202,7 +212,8 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id;
req.index = tx_chn->udma_tchan_id;
if (tx_chn->tx_pause_on_err)
@@ -216,6 +227,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
req.tx_supr_tdpkt = 1;
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+ req.tx_atype = tx_chn->common.atype;
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
@@ -502,7 +514,8 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id;
req.index = rx_chn->udma_rchan_id;
@@ -519,6 +532,7 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
req.flowid_cnt = rx_chn->flow_num;
}
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ req.rx_atype = rx_chn->common.atype;
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
if (ret)
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 0536866a58ce..a90e154b0ae0 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -149,6 +149,7 @@ struct udma_dev {
struct udma_chan *channels;
u32 psil_base;
+ u32 atype;
};
struct udma_desc {
@@ -192,6 +193,7 @@ struct udma_chan_config {
u32 hdesc_size; /* Size of a packet descriptor in packet mode */
bool notdpkt; /* Suppress sending TDC packet */
int remote_thread_id;
+ u32 atype;
u32 src_thread;
u32 dst_thread;
enum psil_endpoint_type ep_type;
@@ -1569,7 +1571,8 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
#define TISCI_RCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
@@ -1579,7 +1582,8 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
{
@@ -1601,6 +1605,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_tx.txcq_qnum = tc_ring;
+ req_tx.tx_atype = ud->atype;
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret) {
@@ -1614,6 +1619,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_rx.rxcq_qnum = tc_ring;
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+ req_rx.rx_atype = ud->atype;
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret)
@@ -1649,6 +1655,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
req_tx.tx_supr_tdpkt = uc->config.notdpkt;
req_tx.tx_fetch_size = fetch_size >> 2;
req_tx.txcq_qnum = tc_ring;
+ req_tx.tx_atype = uc->config.atype;
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret)
@@ -1685,6 +1692,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
req_rx.rx_fetch_size = fetch_size >> 2;
req_rx.rxcq_qnum = rx_ring;
req_rx.rx_chan_type = mode;
+ req_rx.rx_atype = uc->config.atype;
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret) {
@@ -2148,7 +2156,8 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->residue += sg_dma_len(sgent);
}
- cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
return d;
}
@@ -2725,7 +2734,8 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
tr_req[1].dicnt3 = 1;
}
- cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
if (uc->config.metadata_size)
d->vd.tx.metadata_ops = &metadata_ops;
@@ -3063,13 +3073,18 @@ static void udma_free_chan_resources(struct dma_chan *chan)
static struct platform_driver udma_driver;
+struct udma_filter_param {
+ int remote_thread_id;
+ u32 atype;
+};
+
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
{
struct udma_chan_config *ucc;
struct psil_endpoint_config *ep_config;
+ struct udma_filter_param *filter_param;
struct udma_chan *uc;
struct udma_dev *ud;
- u32 *args;
if (chan->device->dev->driver != &udma_driver.driver)
return false;
@@ -3077,9 +3092,16 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
uc = to_udma_chan(chan);
ucc = &uc->config;
ud = uc->ud;
- args = param;
+ filter_param = param;
+
+ if (filter_param->atype > 2) {
+ dev_err(ud->dev, "Invalid channel atype: %u\n",
+ filter_param->atype);
+ return false;
+ }
- ucc->remote_thread_id = args[0];
+ ucc->remote_thread_id = filter_param->remote_thread_id;
+ ucc->atype = filter_param->atype;
if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
ucc->dir = DMA_MEM_TO_DEV;
@@ -3092,6 +3114,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->remote_thread_id);
ucc->dir = DMA_MEM_TO_MEM;
ucc->remote_thread_id = -1;
+ ucc->atype = 0;
return false;
}
@@ -3130,13 +3153,20 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
{
struct udma_dev *ud = ofdma->of_dma_data;
dma_cap_mask_t mask = ud->ddev.cap_mask;
+ struct udma_filter_param filter_param;
struct dma_chan *chan;
- if (dma_spec->args_count != 1)
+ if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
return NULL;
- chan = __dma_request_channel(&mask, udma_dma_filter_fn,
- &dma_spec->args[0], ofdma->of_node);
+ filter_param.remote_thread_id = dma_spec->args[0];
+ if (dma_spec->args_count == 2)
+ filter_param.atype = dma_spec->args[1];
+ else
+ filter_param.atype = 0;
+
+ chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
return ERR_PTR(-EINVAL);
@@ -3473,6 +3503,66 @@ static int udma_setup_rx_flush(struct udma_dev *ud)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static void udma_dbg_summary_show_chan(struct seq_file *s,
+ struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_chan_config *ucc = &uc->config;
+
+ seq_printf(s, " %-13s| %s", dma_chan_name(chan),
+ chan->dbg_client_name ?: "in-use");
+ seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
+ ucc->src_thread, ucc->dst_thread);
+ break;
+ case DMA_DEV_TO_MEM:
+ seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
+ ucc->src_thread, ucc->dst_thread);
+ break;
+ case DMA_MEM_TO_DEV:
+ seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
+ ucc->src_thread, ucc->dst_thread);
+ break;
+ default:
+ seq_printf(s, ")\n");
+ return;
+ }
+
+ if (ucc->ep_type == PSIL_EP_NATIVE) {
+ seq_printf(s, "PSI-L Native");
+ if (ucc->metadata_size) {
+ seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
+ if (ucc->psd_size)
+ seq_printf(s, " PSDsize:%u", ucc->psd_size);
+ seq_printf(s, " ]");
+ }
+ } else {
+ seq_printf(s, "PDMA");
+ if (ucc->enable_acc32 || ucc->enable_burst)
+ seq_printf(s, "[%s%s ]",
+ ucc->enable_acc32 ? " ACC32" : "",
+ ucc->enable_burst ? " BURST" : "");
+ }
+
+ seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
+}
+
+static void udma_dbg_summary_show(struct seq_file *s,
+ struct dma_device *dma_dev)
+{
+ struct dma_chan *chan;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count)
+ udma_dbg_summary_show_chan(s, chan);
+ }
+}
+#endif /* CONFIG_DEBUG_FS */
+
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
@@ -3519,6 +3609,12 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
+ if (!ret && ud->atype > 2) {
+ dev_err(dev, "Invalid atype: %u\n", ud->atype);
+ return -EINVAL;
+ }
+
ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
@@ -3553,6 +3649,9 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.device_resume = udma_resume;
ud->ddev.device_terminate_all = udma_terminate_all;
ud->ddev.device_synchronize = udma_synchronize;
+#ifdef CONFIG_DEBUG_FS
+ ud->ddev.dbg_summary_show = udma_dbg_summary_show;
+#endif
ud->ddev.device_free_chan_resources = udma_free_chan_resources;
ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index a014ab96e673..918301e17552 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -124,7 +124,7 @@ struct omap_desc {
uint32_t csdp; /* CSDP value */
unsigned sglen;
- struct omap_sg sg[0];
+ struct omap_sg sg[];
};
enum {
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index 21b8f1131d55..618839df0748 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -68,7 +68,7 @@ struct uniphier_mdmac_device {
struct dma_device ddev;
struct clk *clk;
void __iomem *reg_base;
- struct uniphier_mdmac_chan channels[0];
+ struct uniphier_mdmac_chan channels[];
};
static struct uniphier_mdmac_chan *
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
new file mode 100644
index 000000000000..7b2f8a8c2d31
--- /dev/null
+++ b/drivers/dma/uniphier-xdmac.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * External DMA controller driver for UniPhier SoCs
+ * Copyright 2019 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define XDMAC_CH_WIDTH 0x100
+
+#define XDMAC_TFA 0x08
+#define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
+#define XDMAC_TFA_MASK GENMASK(5, 0)
+#define XDMAC_SADM 0x10
+#define XDMAC_SADM_STW_MASK GENMASK(25, 24)
+#define XDMAC_SADM_SAM BIT(4)
+#define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
+#define XDMAC_SADM_SAM_INC 0
+#define XDMAC_DADM 0x14
+#define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
+#define XDMAC_DADM_DAM XDMAC_SADM_SAM
+#define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
+#define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
+#define XDMAC_EXSAD 0x18
+#define XDMAC_EXDAD 0x1c
+#define XDMAC_SAD 0x20
+#define XDMAC_DAD 0x24
+#define XDMAC_ITS 0x28
+#define XDMAC_ITS_MASK GENMASK(25, 0)
+#define XDMAC_TNUM 0x2c
+#define XDMAC_TNUM_MASK GENMASK(15, 0)
+#define XDMAC_TSS 0x30
+#define XDMAC_TSS_REQ BIT(0)
+#define XDMAC_IEN 0x34
+#define XDMAC_IEN_ERRIEN BIT(1)
+#define XDMAC_IEN_ENDIEN BIT(0)
+#define XDMAC_STAT 0x40
+#define XDMAC_STAT_TENF BIT(0)
+#define XDMAC_IR 0x44
+#define XDMAC_IR_ERRF BIT(1)
+#define XDMAC_IR_ENDF BIT(0)
+#define XDMAC_ID 0x48
+#define XDMAC_ID_ERRIDF BIT(1)
+#define XDMAC_ID_ENDIDF BIT(0)
+
+#define XDMAC_MAX_CHANS 16
+#define XDMAC_INTERVAL_CLKS 20
+#define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
+
+/* cut lower bit for maintain alignment of maximum transfer size */
+#define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
+
+#define UNIPHIER_XDMAC_BUSWIDTHS \
+ (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct uniphier_xdmac_desc_node {
+ dma_addr_t src;
+ dma_addr_t dst;
+ u32 burst_size;
+ u32 nr_burst;
+};
+
+struct uniphier_xdmac_desc {
+ struct virt_dma_desc vd;
+
+ unsigned int nr_node;
+ unsigned int cur_node;
+ enum dma_transfer_direction dir;
+ struct uniphier_xdmac_desc_node nodes[];
+};
+
+struct uniphier_xdmac_chan {
+ struct virt_dma_chan vc;
+ struct uniphier_xdmac_device *xdev;
+ struct uniphier_xdmac_desc *xd;
+ void __iomem *reg_ch_base;
+ struct dma_slave_config sconfig;
+ int id;
+ unsigned int req_factor;
+};
+
+struct uniphier_xdmac_device {
+ struct dma_device ddev;
+ void __iomem *reg_base;
+ int nr_chans;
+ struct uniphier_xdmac_chan channels[];
+};
+
+static struct uniphier_xdmac_chan *
+to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct uniphier_xdmac_chan, vc);
+}
+
+static struct uniphier_xdmac_desc *
+to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct uniphier_xdmac_desc, vd);
+}
+
+/* xc->vc.lock must be held by caller */
+static struct uniphier_xdmac_desc *
+uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&xc->vc);
+ if (!vd)
+ return NULL;
+
+ list_del(&vd->node);
+
+ return to_uniphier_xdmac_desc(vd);
+}
+
+/* xc->vc.lock must be held by caller */
+static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
+ struct uniphier_xdmac_desc *xd)
+{
+ u32 src_mode, src_addr, src_width;
+ u32 dst_mode, dst_addr, dst_width;
+ u32 val, its, tnum;
+ enum dma_slave_buswidth buswidth;
+
+ src_addr = xd->nodes[xd->cur_node].src;
+ dst_addr = xd->nodes[xd->cur_node].dst;
+ its = xd->nodes[xd->cur_node].burst_size;
+ tnum = xd->nodes[xd->cur_node].nr_burst;
+
+ /*
+ * The width of MEM side must be 4 or 8 bytes, that does not
+ * affect that of DEV side and transfer size.
+ */
+ if (xd->dir == DMA_DEV_TO_MEM) {
+ src_mode = XDMAC_SADM_SAM_FIXED;
+ buswidth = xc->sconfig.src_addr_width;
+ } else {
+ src_mode = XDMAC_SADM_SAM_INC;
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ }
+ src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
+
+ if (xd->dir == DMA_MEM_TO_DEV) {
+ dst_mode = XDMAC_DADM_DAM_FIXED;
+ buswidth = xc->sconfig.dst_addr_width;
+ } else {
+ dst_mode = XDMAC_DADM_DAM_INC;
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ }
+ dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
+
+ /* setup transfer factor */
+ val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
+ val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
+ writel(val, xc->reg_ch_base + XDMAC_TFA);
+
+ /* setup the channel */
+ writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
+ writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
+
+ writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
+ writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
+
+ src_mode |= src_width;
+ dst_mode |= dst_width;
+ writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
+ writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
+
+ writel(its, xc->reg_ch_base + XDMAC_ITS);
+ writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
+
+ /* enable interrupt */
+ writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
+ xc->reg_ch_base + XDMAC_IEN);
+
+ /* start XDMAC */
+ val = readl(xc->reg_ch_base + XDMAC_TSS);
+ val |= XDMAC_TSS_REQ;
+ writel(val, xc->reg_ch_base + XDMAC_TSS);
+}
+
+/* xc->vc.lock must be held by caller */
+static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
+{
+ u32 val;
+
+ /* disable interrupt */
+ val = readl(xc->reg_ch_base + XDMAC_IEN);
+ val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
+ writel(val, xc->reg_ch_base + XDMAC_IEN);
+
+ /* stop XDMAC */
+ val = readl(xc->reg_ch_base + XDMAC_TSS);
+ val &= ~XDMAC_TSS_REQ;
+ writel(0, xc->reg_ch_base + XDMAC_TSS);
+
+ /* wait until transfer is stopped */
+ return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
+ !(val & XDMAC_STAT_TENF), 100, 1000);
+}
+
+/* xc->vc.lock must be held by caller */
+static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
+{
+ struct uniphier_xdmac_desc *xd;
+
+ xd = uniphier_xdmac_next_desc(xc);
+ if (xd)
+ uniphier_xdmac_chan_start(xc, xd);
+
+ /* set desc to chan regardless of xd is null */
+ xc->xd = xd;
+}
+
+static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
+{
+ u32 stat;
+ int ret;
+
+ spin_lock(&xc->vc.lock);
+
+ stat = readl(xc->reg_ch_base + XDMAC_ID);
+
+ if (stat & XDMAC_ID_ERRIDF) {
+ ret = uniphier_xdmac_chan_stop(xc);
+ if (ret)
+ dev_err(xc->xdev->ddev.dev,
+ "DMA transfer error with aborting issue\n");
+ else
+ dev_err(xc->xdev->ddev.dev,
+ "DMA transfer error\n");
+
+ } else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
+ xc->xd->cur_node++;
+ if (xc->xd->cur_node >= xc->xd->nr_node) {
+ vchan_cookie_complete(&xc->xd->vd);
+ uniphier_xdmac_start(xc);
+ } else {
+ uniphier_xdmac_chan_start(xc, xc->xd);
+ }
+ }
+
+ /* write bits to clear */
+ writel(stat, xc->reg_ch_base + XDMAC_IR);
+
+ spin_unlock(&xc->vc.lock);
+}
+
+static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
+{
+ struct uniphier_xdmac_device *xdev = dev_id;
+ int i;
+
+ for (i = 0; i < xdev->nr_chans; i++)
+ uniphier_xdmac_chan_irq(&xdev->channels[i]);
+
+ return IRQ_HANDLED;
+}
+
+static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static struct dma_async_tx_descriptor *
+uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_desc *xd;
+ unsigned int nr;
+ size_t burst_size, tlen;
+ int i;
+
+ if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
+ return NULL;
+
+ nr = 1 + len / XDMAC_MAX_WORD_SIZE;
+
+ xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
+ if (!xd)
+ return NULL;
+
+ for (i = 0; i < nr; i++) {
+ burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
+ xd->nodes[i].src = src;
+ xd->nodes[i].dst = dst;
+ xd->nodes[i].burst_size = burst_size;
+ xd->nodes[i].nr_burst = len / burst_size;
+ tlen = rounddown(len, burst_size);
+ src += tlen;
+ dst += tlen;
+ len -= tlen;
+ }
+
+ xd->dir = DMA_MEM_TO_MEM;
+ xd->nr_node = nr;
+ xd->cur_node = 0;
+
+ return vchan_tx_prep(vc, &xd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+ struct uniphier_xdmac_desc *xd;
+ struct scatterlist *sg;
+ enum dma_slave_buswidth buswidth;
+ u32 maxburst;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ buswidth = xc->sconfig.src_addr_width;
+ maxburst = xc->sconfig.src_maxburst;
+ } else {
+ buswidth = xc->sconfig.dst_addr_width;
+ maxburst = xc->sconfig.dst_maxburst;
+ }
+
+ if (!maxburst)
+ maxburst = 1;
+ if (maxburst > xc->xdev->ddev.max_burst) {
+ dev_err(xc->xdev->ddev.dev,
+ "Exceed maximum number of burst words\n");
+ return NULL;
+ }
+
+ xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
+ if (!xd)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
+ ? xc->sconfig.src_addr : sg_dma_address(sg);
+ xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
+ ? xc->sconfig.dst_addr : sg_dma_address(sg);
+ xd->nodes[i].burst_size = maxburst * buswidth;
+ xd->nodes[i].nr_burst =
+ sg_dma_len(sg) / xd->nodes[i].burst_size;
+
+ /*
+ * Currently transfer that size doesn't align the unit size
+ * (the number of burst words * bus-width) is not allowed,
+ * because the driver does not support the way to transfer
+ * residue size. As a matter of fact, in order to transfer
+ * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
+ * dma_slave_config must be 1.
+ */
+ if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
+ dev_err(xc->xdev->ddev.dev,
+ "Unaligned transfer size: %d", sg_dma_len(sg));
+ kfree(xd);
+ return NULL;
+ }
+
+ if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
+ dev_err(xc->xdev->ddev.dev,
+ "Exceed maximum transfer size");
+ kfree(xd);
+ return NULL;
+ }
+ }
+
+ xd->dir = direction;
+ xd->nr_node = sg_len;
+ xd->cur_node = 0;
+
+ return vchan_tx_prep(vc, &xd->vd, flags);
+}
+
+static int uniphier_xdmac_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+
+ memcpy(&xc->sconfig, config, sizeof(*config));
+
+ return 0;
+}
+
+static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+ unsigned long flags;
+ int ret = 0;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (xc->xd) {
+ vchan_terminate_vdesc(&xc->xd->vd);
+ xc->xd = NULL;
+ ret = uniphier_xdmac_chan_stop(xc);
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return ret;
+}
+
+static void uniphier_xdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !xc->xd)
+ uniphier_xdmac_start(xc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_uniphier_xdmac_desc(vd));
+}
+
+static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
+ int ch)
+{
+ struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
+
+ xc->xdev = xdev;
+ xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
+ xc->vc.desc_free = uniphier_xdmac_desc_free;
+
+ vchan_init(&xc->vc, &xdev->ddev);
+}
+
+static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
+ int chan_id = dma_spec->args[0];
+
+ if (chan_id >= xdev->nr_chans)
+ return NULL;
+
+ xdev->channels[chan_id].id = chan_id;
+ xdev->channels[chan_id].req_factor = dma_spec->args[1];
+
+ return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
+}
+
+static int uniphier_xdmac_probe(struct platform_device *pdev)
+{
+ struct uniphier_xdmac_device *xdev;
+ struct device *dev = &pdev->dev;
+ struct dma_device *ddev;
+ int irq;
+ int nr_chans;
+ int i, ret;
+
+ if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
+ return -EINVAL;
+ if (nr_chans > XDMAC_MAX_CHANS)
+ nr_chans = XDMAC_MAX_CHANS;
+
+ xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->nr_chans = nr_chans;
+ xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xdev->reg_base))
+ return PTR_ERR(xdev->reg_base);
+
+ ddev = &xdev->ddev;
+ ddev->dev = dev;
+ dma_cap_zero(ddev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_MEM_TO_MEM);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ ddev->max_burst = XDMAC_MAX_WORDS;
+ ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
+ ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
+ ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
+ ddev->device_config = uniphier_xdmac_slave_config;
+ ddev->device_terminate_all = uniphier_xdmac_terminate_all;
+ ddev->device_synchronize = uniphier_xdmac_synchronize;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = uniphier_xdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++)
+ uniphier_xdmac_chan_init(xdev, i);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
+ IRQF_SHARED, "xdmac", xdev);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(dev, "Failed to register XDMA device\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(dev->of_node,
+ of_dma_uniphier_xlate, xdev);
+ if (ret) {
+ dev_err(dev, "Failed to register XDMA controller\n");
+ goto out_unregister_dmac;
+ }
+
+ platform_set_drvdata(pdev, xdev);
+
+ dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
+ nr_chans);
+
+ return 0;
+
+out_unregister_dmac:
+ dma_async_device_unregister(ddev);
+
+ return ret;
+}
+
+static int uniphier_xdmac_remove(struct platform_device *pdev)
+{
+ struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
+ struct dma_device *ddev = &xdev->ddev;
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &ddev->channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ uniphier_xdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(ddev);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_xdmac_match[] = {
+ { .compatible = "socionext,uniphier-xdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
+
+static struct platform_driver uniphier_xdmac_driver = {
+ .probe = uniphier_xdmac_probe,
+ .remove = uniphier_xdmac_remove,
+ .driver = {
+ .name = "uniphier-xdmac",
+ .of_match_table = uniphier_xdmac_match,
+ },
+};
+module_platform_driver(uniphier_xdmac_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier external DMA controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a9c5d5cc9f2b..5429497d3560 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -125,7 +125,9 @@
#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
/* HW specific definitions */
-#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
+#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
+#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -468,6 +470,7 @@ struct xilinx_dma_config {
struct clk **tx_clk, struct clk **txs_clk,
struct clk **rx_clk, struct clk **rxs_clk);
irqreturn_t (*irq_handler)(int irq, void *data);
+ const int max_channels;
};
/**
@@ -485,16 +488,15 @@ struct xilinx_dma_config {
* @txs_clk: DMA mm2s stream clock
* @rx_clk: DMA s2mm clock
* @rxs_clk: DMA s2mm stream clock
- * @nr_channels: Number of channels DMA device supports
- * @chan_id: DMA channel identifier
+ * @s2mm_chan_id: DMA s2mm channel identifier
+ * @mm2s_chan_id: DMA mm2s channel identifier
* @max_buffer_len: Max buffer length
- * @s2mm_index: S2MM channel index
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
- struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
+ struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -504,10 +506,9 @@ struct xilinx_dma_device {
struct clk *txs_clk;
struct clk *rx_clk;
struct clk *rxs_clk;
- u32 nr_channels;
- u32 chan_id;
+ u32 s2mm_chan_id;
+ u32 mm2s_chan_id;
u32 max_buffer_len;
- u32 s2mm_index;
};
/* Macros */
@@ -1229,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
return ret;
spin_lock_irqsave(&chan->lock, flags);
-
- desc = list_last_entry(&chan->active_list,
- struct xilinx_dma_tx_descriptor, node);
- /*
- * VDMA and simple mode do not support residue reporting, so the
- * residue field will always be 0.
- */
- if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
- residue = xilinx_dma_get_residue(chan, desc);
-
+ if (!list_empty(&chan->active_list)) {
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ /*
+ * VDMA and simple mode do not support residue reporting, so the
+ * residue field will always be 0.
+ */
+ if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+ residue = xilinx_dma_get_residue(chan, desc);
+ }
spin_unlock_irqrestore(&chan->lock, flags);
dma_set_residue(txstate, residue);
@@ -1745,7 +1746,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
return IRQ_NONE;
if (chan->direction == DMA_DEV_TO_MEM)
- chan_offset = chan->xdev->s2mm_index;
+ chan_offset = chan->xdev->dma_config->max_channels / 2;
chan_offset = chan_offset + (chan_id - 1);
chan = chan->xdev->chan[chan_offset];
@@ -2404,16 +2405,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
u32 reg;
int err;
- if (chan->cyclic)
- xilinx_dma_chan_reset(chan);
-
- err = chan->stop_transfer(chan);
- if (err) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
+ if (!chan->cyclic) {
+ err = chan->stop_transfer(chan);
+ if (err) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, dma_ctrl_read(chan,
+ XILINX_DMA_REG_DMASR));
+ chan->err = true;
+ }
}
+ xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
chan->idle = true;
@@ -2730,12 +2732,11 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
- * @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
- struct device_node *node, int chan_id)
+ struct device_node *node)
{
struct xilinx_dma_chan *chan;
bool has_dre = false;
@@ -2787,8 +2788,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
chan->direction = DMA_MEM_TO_DEV;
- chan->id = chan_id;
- chan->tdest = chan_id;
+ chan->id = xdev->mm2s_chan_id++;
+ chan->tdest = chan->id;
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2804,9 +2805,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
- chan->id = chan_id;
- xdev->s2mm_index = xdev->nr_channels;
- chan->tdest = chan_id - xdev->nr_channels;
+ chan->id = xdev->s2mm_chan_id++;
+ chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
if (chan->has_vflip) {
@@ -2908,9 +2908,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
dev_warn(xdev->dev, "missing dma-channels property\n");
for (i = 0; i < nr_channels; i++)
- xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
-
- xdev->nr_channels += nr_channels;
+ xilinx_dma_chan_probe(xdev, node);
return 0;
}
@@ -2928,7 +2926,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_dma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
+ if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
@@ -2938,23 +2936,27 @@ static const struct xilinx_dma_config axidma_config = {
.dmatype = XDMA_TYPE_AXIDMA,
.clk_init = axidma_clk_init,
.irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
};
static const struct xilinx_dma_config aximcdma_config = {
.dmatype = XDMA_TYPE_AXIMCDMA,
.clk_init = axidma_clk_init,
.irq_handler = xilinx_mcdma_irq_handler,
+ .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
};
static const struct xilinx_dma_config axicdma_config = {
.dmatype = XDMA_TYPE_CDMA,
.clk_init = axicdma_clk_init,
.irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
};
static const struct xilinx_dma_config axivdma_config = {
.dmatype = XDMA_TYPE_VDMA,
.clk_init = axivdma_clk_init,
.irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
};
static const struct of_device_id xilinx_dma_of_ids[] = {
@@ -3011,6 +3013,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+ xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
@@ -3104,7 +3107,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- for (i = 0; i < xdev->nr_channels; i++)
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xdev->chan[i]->num_frms = num_frames;
}
@@ -3134,7 +3137,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
disable_clks:
xdma_disable_allclks(xdev);
error:
- for (i = 0; i < xdev->nr_channels; i++)
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
@@ -3156,7 +3159,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&xdev->common);
- for (i = 0; i < xdev->nr_channels; i++)
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d47749a35863..ff253696d183 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -434,6 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
struct zynqmp_dma_desc_sw *child, *next;
chan->desc_free_cnt++;
+ list_del(&sdesc->node);
list_add_tail(&sdesc->node, &chan->free_list);
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
chan->desc_free_cnt++;
@@ -608,8 +609,6 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
dma_async_tx_callback callback;
void *callback_param;
- list_del(&desc->node);
-
callback = desc->async_tx.callback;
callback_param = desc->async_tx.callback_param;
if (callback) {
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index b3c99bb5fe77..fe2eb892a1bd 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -523,4 +523,11 @@ config EDAC_BLUEFIELD
Support for error detection and correction on the
Mellanox BlueField SoCs.
+config EDAC_DMC520
+ tristate "ARM DMC-520 ECC"
+ depends on ARM64
+ help
+ Support for error detection and correction on the
+ SoCs with ARM DMC-520 DRAM controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index d77200c9680b..269e15118cea 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -87,3 +87,4 @@ obj-$(CONFIG_EDAC_TI) += ti_edac.o
obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
+obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9fbad908a854..f91f3bc1e0b2 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3626,13 +3626,13 @@ static void setup_pci_device(void)
}
static const struct x86_cpu_id amd64_cpuids[] = {
- { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
+ X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index 7f227bdcbc84..a7502ebe9bdc 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -429,26 +429,26 @@ static void aurora_l2_check(struct edac_device_ctl_info *dci)
src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
if (src <= 3)
- len += snprintf(msg+len, size-len, "src=CPU%d ", src);
+ len += scnprintf(msg+len, size-len, "src=CPU%d ", src);
else
- len += snprintf(msg+len, size-len, "src=IO ");
+ len += scnprintf(msg+len, size-len, "src=IO ");
txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
switch (txn) {
case 0:
- len += snprintf(msg+len, size-len, "txn=Data-Read ");
+ len += scnprintf(msg+len, size-len, "txn=Data-Read ");
break;
case 1:
- len += snprintf(msg+len, size-len, "txn=Isn-Read ");
+ len += scnprintf(msg+len, size-len, "txn=Isn-Read ");
break;
case 2:
- len += snprintf(msg+len, size-len, "txn=Clean-Flush ");
+ len += scnprintf(msg+len, size-len, "txn=Clean-Flush ");
break;
case 3:
- len += snprintf(msg+len, size-len, "txn=Eviction ");
+ len += scnprintf(msg+len, size-len, "txn=Eviction ");
break;
case 4:
- len += snprintf(msg+len, size-len,
+ len += scnprintf(msg+len, size-len,
"txn=Read-Modify-Write ");
break;
}
@@ -456,19 +456,19 @@ static void aurora_l2_check(struct edac_device_ctl_info *dci)
err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
switch (err) {
case 0:
- len += snprintf(msg+len, size-len, "err=CorrECC ");
+ len += scnprintf(msg+len, size-len, "err=CorrECC ");
break;
case 1:
- len += snprintf(msg+len, size-len, "err=UnCorrECC ");
+ len += scnprintf(msg+len, size-len, "err=UnCorrECC ");
break;
case 2:
- len += snprintf(msg+len, size-len, "err=TagParity ");
+ len += scnprintf(msg+len, size-len, "err=TagParity ");
break;
}
- len += snprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
- len += snprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
- len += snprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
+ len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
+ len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
+ len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
/* clear error capture registers */
writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
new file mode 100644
index 000000000000..fc1153ab1ebb
--- /dev/null
+++ b/drivers/edac/dmc520_edac.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * EDAC driver for DMC-520 memory controller.
+ *
+ * The driver supports 10 interrupt lines,
+ * though only dram_ecc_errc and dram_ecc_errd are currently handled.
+ *
+ * Authors: Rui Zhao <ruizhao@microsoft.com>
+ * Lei Wang <lewan@microsoft.com>
+ * Shiping Ji <shji@microsoft.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include "edac_mc.h"
+
+/* DMC-520 registers */
+#define REG_OFFSET_FEATURE_CONFIG 0x130
+#define REG_OFFSET_ECC_ERRC_COUNT_31_00 0x158
+#define REG_OFFSET_ECC_ERRC_COUNT_63_32 0x15C
+#define REG_OFFSET_ECC_ERRD_COUNT_31_00 0x160
+#define REG_OFFSET_ECC_ERRD_COUNT_63_32 0x164
+#define REG_OFFSET_INTERRUPT_CONTROL 0x500
+#define REG_OFFSET_INTERRUPT_CLR 0x508
+#define REG_OFFSET_INTERRUPT_STATUS 0x510
+#define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00 0x528
+#define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32 0x52C
+#define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00 0x530
+#define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32 0x534
+#define REG_OFFSET_ADDRESS_CONTROL_NOW 0x1010
+#define REG_OFFSET_MEMORY_TYPE_NOW 0x1128
+#define REG_OFFSET_SCRUB_CONTROL0_NOW 0x1170
+#define REG_OFFSET_FORMAT_CONTROL 0x18
+
+/* DMC-520 types, masks and bitfields */
+#define RAM_ECC_INT_CE_BIT BIT(0)
+#define RAM_ECC_INT_UE_BIT BIT(1)
+#define DRAM_ECC_INT_CE_BIT BIT(2)
+#define DRAM_ECC_INT_UE_BIT BIT(3)
+#define FAILED_ACCESS_INT_BIT BIT(4)
+#define FAILED_PROG_INT_BIT BIT(5)
+#define LINK_ERR_INT_BIT BIT(6)
+#define TEMPERATURE_EVENT_INT_BIT BIT(7)
+#define ARCH_FSM_INT_BIT BIT(8)
+#define PHY_REQUEST_INT_BIT BIT(9)
+#define MEMORY_WIDTH_MASK GENMASK(1, 0)
+#define SCRUB_TRIGGER0_NEXT_MASK GENMASK(1, 0)
+#define REG_FIELD_DRAM_ECC_ENABLED GENMASK(1, 0)
+#define REG_FIELD_MEMORY_TYPE GENMASK(2, 0)
+#define REG_FIELD_DEVICE_WIDTH GENMASK(9, 8)
+#define REG_FIELD_ADDRESS_CONTROL_COL GENMASK(2, 0)
+#define REG_FIELD_ADDRESS_CONTROL_ROW GENMASK(10, 8)
+#define REG_FIELD_ADDRESS_CONTROL_BANK GENMASK(18, 16)
+#define REG_FIELD_ADDRESS_CONTROL_RANK GENMASK(25, 24)
+#define REG_FIELD_ERR_INFO_LOW_VALID BIT(0)
+#define REG_FIELD_ERR_INFO_LOW_COL GENMASK(10, 1)
+#define REG_FIELD_ERR_INFO_LOW_ROW GENMASK(28, 11)
+#define REG_FIELD_ERR_INFO_LOW_RANK GENMASK(31, 29)
+#define REG_FIELD_ERR_INFO_HIGH_BANK GENMASK(3, 0)
+#define REG_FIELD_ERR_INFO_HIGH_VALID BIT(31)
+
+#define DRAM_ADDRESS_CONTROL_MIN_COL_BITS 8
+#define DRAM_ADDRESS_CONTROL_MIN_ROW_BITS 11
+
+#define DMC520_SCRUB_TRIGGER_ERR_DETECT 2
+#define DMC520_SCRUB_TRIGGER_IDLE 3
+
+/* Driver settings */
+/*
+ * The max-length message would be: "rank:7 bank:15 row:262143 col:1023".
+ * Max length is 34. Using a 40-size buffer is enough.
+ */
+#define DMC520_MSG_BUF_SIZE 40
+#define EDAC_MOD_NAME "dmc520-edac"
+#define EDAC_CTL_NAME "dmc520"
+
+/* the data bus width for the attached memory chips. */
+enum dmc520_mem_width {
+ MEM_WIDTH_X32 = 2,
+ MEM_WIDTH_X64 = 3
+};
+
+/* memory type */
+enum dmc520_mem_type {
+ MEM_TYPE_DDR3 = 1,
+ MEM_TYPE_DDR4 = 2
+};
+
+/* memory device width */
+enum dmc520_dev_width {
+ DEV_WIDTH_X4 = 0,
+ DEV_WIDTH_X8 = 1,
+ DEV_WIDTH_X16 = 2
+};
+
+struct ecc_error_info {
+ u32 col;
+ u32 row;
+ u32 bank;
+ u32 rank;
+};
+
+/* The interrupt config */
+struct dmc520_irq_config {
+ char *name;
+ int mask;
+};
+
+/* The interrupt mappings */
+static struct dmc520_irq_config dmc520_irq_configs[] = {
+ {
+ .name = "ram_ecc_errc",
+ .mask = RAM_ECC_INT_CE_BIT
+ },
+ {
+ .name = "ram_ecc_errd",
+ .mask = RAM_ECC_INT_UE_BIT
+ },
+ {
+ .name = "dram_ecc_errc",
+ .mask = DRAM_ECC_INT_CE_BIT
+ },
+ {
+ .name = "dram_ecc_errd",
+ .mask = DRAM_ECC_INT_UE_BIT
+ },
+ {
+ .name = "failed_access",
+ .mask = FAILED_ACCESS_INT_BIT
+ },
+ {
+ .name = "failed_prog",
+ .mask = FAILED_PROG_INT_BIT
+ },
+ {
+ .name = "link_err",
+ .mask = LINK_ERR_INT_BIT
+ },
+ {
+ .name = "temperature_event",
+ .mask = TEMPERATURE_EVENT_INT_BIT
+ },
+ {
+ .name = "arch_fsm",
+ .mask = ARCH_FSM_INT_BIT
+ },
+ {
+ .name = "phy_request",
+ .mask = PHY_REQUEST_INT_BIT
+ }
+};
+
+#define NUMBER_OF_IRQS ARRAY_SIZE(dmc520_irq_configs)
+
+/*
+ * The EDAC driver private data.
+ * error_lock is to protect concurrent writes to the mci->error_desc through
+ * edac_mc_handle_error().
+ */
+struct dmc520_edac {
+ void __iomem *reg_base;
+ spinlock_t error_lock;
+ u32 mem_width_in_bytes;
+ int irqs[NUMBER_OF_IRQS];
+ int masks[NUMBER_OF_IRQS];
+};
+
+static int dmc520_mc_idx;
+
+static u32 dmc520_read_reg(struct dmc520_edac *pvt, u32 offset)
+{
+ return readl(pvt->reg_base + offset);
+}
+
+static void dmc520_write_reg(struct dmc520_edac *pvt, u32 val, u32 offset)
+{
+ writel(val, pvt->reg_base + offset);
+}
+
+static u32 dmc520_calc_dram_ecc_error(u32 value)
+{
+ u32 total = 0;
+
+ /* Each rank's error counter takes one byte. */
+ while (value > 0) {
+ total += (value & 0xFF);
+ value >>= 8;
+ }
+ return total;
+}
+
+static u32 dmc520_get_dram_ecc_error_count(struct dmc520_edac *pvt,
+ bool is_ce)
+{
+ u32 reg_offset_low, reg_offset_high;
+ u32 err_low, err_high;
+ u32 err_count;
+
+ reg_offset_low = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_31_00 :
+ REG_OFFSET_ECC_ERRD_COUNT_31_00;
+ reg_offset_high = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_63_32 :
+ REG_OFFSET_ECC_ERRD_COUNT_63_32;
+
+ err_low = dmc520_read_reg(pvt, reg_offset_low);
+ err_high = dmc520_read_reg(pvt, reg_offset_high);
+ /* Reset error counters */
+ dmc520_write_reg(pvt, 0, reg_offset_low);
+ dmc520_write_reg(pvt, 0, reg_offset_high);
+
+ err_count = dmc520_calc_dram_ecc_error(err_low) +
+ dmc520_calc_dram_ecc_error(err_high);
+
+ return err_count;
+}
+
+static void dmc520_get_dram_ecc_error_info(struct dmc520_edac *pvt,
+ bool is_ce,
+ struct ecc_error_info *info)
+{
+ u32 reg_offset_low, reg_offset_high;
+ u32 reg_val_low, reg_val_high;
+ bool valid;
+
+ reg_offset_low = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00 :
+ REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00;
+ reg_offset_high = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32 :
+ REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32;
+
+ reg_val_low = dmc520_read_reg(pvt, reg_offset_low);
+ reg_val_high = dmc520_read_reg(pvt, reg_offset_high);
+
+ valid = (FIELD_GET(REG_FIELD_ERR_INFO_LOW_VALID, reg_val_low) != 0) &&
+ (FIELD_GET(REG_FIELD_ERR_INFO_HIGH_VALID, reg_val_high) != 0);
+
+ if (valid) {
+ info->col = FIELD_GET(REG_FIELD_ERR_INFO_LOW_COL, reg_val_low);
+ info->row = FIELD_GET(REG_FIELD_ERR_INFO_LOW_ROW, reg_val_low);
+ info->rank = FIELD_GET(REG_FIELD_ERR_INFO_LOW_RANK, reg_val_low);
+ info->bank = FIELD_GET(REG_FIELD_ERR_INFO_HIGH_BANK, reg_val_high);
+ } else {
+ memset(info, 0, sizeof(*info));
+ }
+}
+
+static bool dmc520_is_ecc_enabled(void __iomem *reg_base)
+{
+ u32 reg_val = readl(reg_base + REG_OFFSET_FEATURE_CONFIG);
+
+ return FIELD_GET(REG_FIELD_DRAM_ECC_ENABLED, reg_val);
+}
+
+static enum scrub_type dmc520_get_scrub_type(struct dmc520_edac *pvt)
+{
+ enum scrub_type type = SCRUB_NONE;
+ u32 reg_val, scrub_cfg;
+
+ reg_val = dmc520_read_reg(pvt, REG_OFFSET_SCRUB_CONTROL0_NOW);
+ scrub_cfg = FIELD_GET(SCRUB_TRIGGER0_NEXT_MASK, reg_val);
+
+ if (scrub_cfg == DMC520_SCRUB_TRIGGER_ERR_DETECT ||
+ scrub_cfg == DMC520_SCRUB_TRIGGER_IDLE)
+ type = SCRUB_HW_PROG;
+
+ return type;
+}
+
+/* Get the memory data bus width, in number of bytes. */
+static u32 dmc520_get_memory_width(struct dmc520_edac *pvt)
+{
+ enum dmc520_mem_width mem_width_field;
+ u32 mem_width_in_bytes = 0;
+ u32 reg_val;
+
+ reg_val = dmc520_read_reg(pvt, REG_OFFSET_FORMAT_CONTROL);
+ mem_width_field = FIELD_GET(MEMORY_WIDTH_MASK, reg_val);
+
+ if (mem_width_field == MEM_WIDTH_X32)
+ mem_width_in_bytes = 4;
+ else if (mem_width_field == MEM_WIDTH_X64)
+ mem_width_in_bytes = 8;
+ return mem_width_in_bytes;
+}
+
+static enum mem_type dmc520_get_mtype(struct dmc520_edac *pvt)
+{
+ enum mem_type mt = MEM_UNKNOWN;
+ enum dmc520_mem_type type;
+ u32 reg_val;
+
+ reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW);
+ type = FIELD_GET(REG_FIELD_MEMORY_TYPE, reg_val);
+
+ switch (type) {
+ case MEM_TYPE_DDR3:
+ mt = MEM_DDR3;
+ break;
+
+ case MEM_TYPE_DDR4:
+ mt = MEM_DDR4;
+ break;
+ }
+
+ return mt;
+}
+
+static enum dev_type dmc520_get_dtype(struct dmc520_edac *pvt)
+{
+ enum dmc520_dev_width device_width;
+ enum dev_type dt = DEV_UNKNOWN;
+ u32 reg_val;
+
+ reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW);
+ device_width = FIELD_GET(REG_FIELD_DEVICE_WIDTH, reg_val);
+
+ switch (device_width) {
+ case DEV_WIDTH_X4:
+ dt = DEV_X4;
+ break;
+
+ case DEV_WIDTH_X8:
+ dt = DEV_X8;
+ break;
+
+ case DEV_WIDTH_X16:
+ dt = DEV_X16;
+ break;
+ }
+
+ return dt;
+}
+
+static u32 dmc520_get_rank_count(void __iomem *reg_base)
+{
+ u32 reg_val, rank_bits;
+
+ reg_val = readl(reg_base + REG_OFFSET_ADDRESS_CONTROL_NOW);
+ rank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_RANK, reg_val);
+
+ return BIT(rank_bits);
+}
+
+static u64 dmc520_get_rank_size(struct dmc520_edac *pvt)
+{
+ u32 reg_val, col_bits, row_bits, bank_bits;
+
+ reg_val = dmc520_read_reg(pvt, REG_OFFSET_ADDRESS_CONTROL_NOW);
+
+ col_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_COL, reg_val) +
+ DRAM_ADDRESS_CONTROL_MIN_COL_BITS;
+ row_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_ROW, reg_val) +
+ DRAM_ADDRESS_CONTROL_MIN_ROW_BITS;
+ bank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_BANK, reg_val);
+
+ return (u64)pvt->mem_width_in_bytes << (col_bits + row_bits + bank_bits);
+}
+
+static void dmc520_handle_dram_ecc_errors(struct mem_ctl_info *mci,
+ bool is_ce)
+{
+ struct dmc520_edac *pvt = mci->pvt_info;
+ char message[DMC520_MSG_BUF_SIZE];
+ struct ecc_error_info info;
+ u32 cnt;
+
+ dmc520_get_dram_ecc_error_info(pvt, is_ce, &info);
+
+ cnt = dmc520_get_dram_ecc_error_count(pvt, is_ce);
+ if (!cnt)
+ return;
+
+ snprintf(message, ARRAY_SIZE(message),
+ "rank:%d bank:%d row:%d col:%d",
+ info.rank, info.bank,
+ info.row, info.col);
+
+ spin_lock(&pvt->error_lock);
+ edac_mc_handle_error((is_ce ? HW_EVENT_ERR_CORRECTED :
+ HW_EVENT_ERR_UNCORRECTED),
+ mci, cnt, 0, 0, 0, info.rank, -1, -1,
+ message, "");
+ spin_unlock(&pvt->error_lock);
+}
+
+static irqreturn_t dmc520_edac_dram_ecc_isr(int irq, struct mem_ctl_info *mci,
+ bool is_ce)
+{
+ struct dmc520_edac *pvt = mci->pvt_info;
+ u32 i_mask;
+
+ i_mask = is_ce ? DRAM_ECC_INT_CE_BIT : DRAM_ECC_INT_UE_BIT;
+
+ dmc520_handle_dram_ecc_errors(mci, is_ce);
+
+ dmc520_write_reg(pvt, i_mask, REG_OFFSET_INTERRUPT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dmc520_edac_dram_all_isr(int irq, struct mem_ctl_info *mci,
+ u32 irq_mask)
+{
+ struct dmc520_edac *pvt = mci->pvt_info;
+ irqreturn_t irq_ret = IRQ_NONE;
+ u32 status;
+
+ status = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_STATUS);
+
+ if ((irq_mask & DRAM_ECC_INT_CE_BIT) &&
+ (status & DRAM_ECC_INT_CE_BIT))
+ irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, true);
+
+ if ((irq_mask & DRAM_ECC_INT_UE_BIT) &&
+ (status & DRAM_ECC_INT_UE_BIT))
+ irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, false);
+
+ return irq_ret;
+}
+
+static irqreturn_t dmc520_isr(int irq, void *data)
+{
+ struct mem_ctl_info *mci = data;
+ struct dmc520_edac *pvt = mci->pvt_info;
+ u32 mask = 0;
+ int idx;
+
+ for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
+ if (pvt->irqs[idx] == irq) {
+ mask = pvt->masks[idx];
+ break;
+ }
+ }
+ return dmc520_edac_dram_all_isr(irq, mci, mask);
+}
+
+static void dmc520_init_csrow(struct mem_ctl_info *mci)
+{
+ struct dmc520_edac *pvt = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 pages_per_rank;
+ enum dev_type dt;
+ enum mem_type mt;
+ int row, ch;
+ u64 rs;
+
+ dt = dmc520_get_dtype(pvt);
+ mt = dmc520_get_mtype(pvt);
+ rs = dmc520_get_rank_size(pvt);
+ pages_per_rank = rs >> PAGE_SHIFT;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+
+ for (ch = 0; ch < csi->nr_channels; ch++) {
+ dimm = csi->channels[ch]->dimm;
+ dimm->grain = pvt->mem_width_in_bytes;
+ dimm->dtype = dt;
+ dimm->mtype = mt;
+ dimm->edac_mode = EDAC_FLAG_SECDED;
+ dimm->nr_pages = pages_per_rank / csi->nr_channels;
+ }
+ }
+}
+
+static int dmc520_edac_probe(struct platform_device *pdev)
+{
+ bool registered[NUMBER_OF_IRQS] = { false };
+ int irqs[NUMBER_OF_IRQS] = { -ENXIO };
+ int masks[NUMBER_OF_IRQS] = { 0 };
+ struct edac_mc_layer layers[1];
+ struct dmc520_edac *pvt = NULL;
+ struct mem_ctl_info *mci;
+ void __iomem *reg_base;
+ u32 irq_mask_all = 0;
+ struct resource *res;
+ struct device *dev;
+ int ret, idx, irq;
+ u32 reg_val;
+
+ /* Parse the device node */
+ dev = &pdev->dev;
+
+ for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
+ irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name);
+ irqs[idx] = irq;
+ masks[idx] = dmc520_irq_configs[idx].mask;
+ if (irq >= 0) {
+ irq_mask_all |= dmc520_irq_configs[idx].mask;
+ edac_dbg(0, "Discovered %s, irq: %d.\n", dmc520_irq_configs[idx].name, irq);
+ }
+ }
+
+ if (!irq_mask_all) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "At least one valid interrupt line is expected.\n");
+ return -EINVAL;
+ }
+
+ /* Initialize dmc520 edac */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ if (!dmc520_is_ecc_enabled(reg_base))
+ return -ENXIO;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = dmc520_get_rank_count(reg_base);
+ layers[0].is_virt_csrow = true;
+
+ mci = edac_mc_alloc(dmc520_mc_idx++, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "Failed to allocate memory for mc instance\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ pvt = mci->pvt_info;
+
+ pvt->reg_base = reg_base;
+ spin_lock_init(&pvt->error_lock);
+ memcpy(pvt->irqs, irqs, sizeof(irqs));
+ memcpy(pvt->masks, masks, sizeof(masks));
+
+ platform_set_drvdata(pdev, mci);
+
+ mci->pdev = dev;
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+ mci->scrub_mode = dmc520_get_scrub_type(pvt);
+ mci->ctl_name = EDAC_CTL_NAME;
+ mci->dev_name = dev_name(mci->pdev);
+ mci->mod_name = EDAC_MOD_NAME;
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ pvt->mem_width_in_bytes = dmc520_get_memory_width(pvt);
+
+ dmc520_init_csrow(mci);
+
+ /* Clear interrupts, not affecting other unrelated interrupts */
+ reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL);
+ dmc520_write_reg(pvt, reg_val & (~irq_mask_all),
+ REG_OFFSET_INTERRUPT_CONTROL);
+ dmc520_write_reg(pvt, irq_mask_all, REG_OFFSET_INTERRUPT_CLR);
+
+ for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
+ irq = irqs[idx];
+ if (irq >= 0) {
+ ret = devm_request_irq(&pdev->dev, irq,
+ dmc520_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), mci);
+ if (ret < 0) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to request irq %d\n", irq);
+ goto err;
+ }
+ registered[idx] = true;
+ }
+ }
+
+ /* Reset DRAM CE/UE counters */
+ if (irq_mask_all & DRAM_ECC_INT_CE_BIT)
+ dmc520_get_dram_ecc_error_count(pvt, true);
+
+ if (irq_mask_all & DRAM_ECC_INT_UE_BIT)
+ dmc520_get_dram_ecc_error_count(pvt, false);
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "Failed to register with EDAC core\n");
+ goto err;
+ }
+
+ /* Enable interrupts, not affecting other unrelated interrupts */
+ dmc520_write_reg(pvt, reg_val | irq_mask_all,
+ REG_OFFSET_INTERRUPT_CONTROL);
+
+ return 0;
+
+err:
+ for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
+ if (registered[idx])
+ devm_free_irq(&pdev->dev, pvt->irqs[idx], mci);
+ }
+ if (mci)
+ edac_mc_free(mci);
+
+ return ret;
+}
+
+static int dmc520_edac_remove(struct platform_device *pdev)
+{
+ u32 reg_val, idx, irq_mask_all = 0;
+ struct mem_ctl_info *mci;
+ struct dmc520_edac *pvt;
+
+ mci = platform_get_drvdata(pdev);
+ pvt = mci->pvt_info;
+
+ /* Disable interrupts */
+ reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL);
+ dmc520_write_reg(pvt, reg_val & (~irq_mask_all),
+ REG_OFFSET_INTERRUPT_CONTROL);
+
+ /* free irq's */
+ for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
+ if (pvt->irqs[idx] >= 0) {
+ irq_mask_all |= pvt->masks[idx];
+ devm_free_irq(&pdev->dev, pvt->irqs[idx], mci);
+ }
+ }
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static const struct of_device_id dmc520_edac_driver_id[] = {
+ { .compatible = "arm,dmc-520", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, dmc520_edac_driver_id);
+
+static struct platform_driver dmc520_edac_driver = {
+ .driver = {
+ .name = "dmc520",
+ .of_match_table = dmc520_edac_driver_id,
+ },
+
+ .probe = dmc520_edac_probe,
+ .remove = dmc520_edac_remove
+};
+
+module_platform_driver(dmc520_edac_driver);
+
+MODULE_AUTHOR("Rui Zhao <ruizhao@microsoft.com>");
+MODULE_AUTHOR("Lei Wang <lewan@microsoft.com>");
+MODULE_AUTHOR("Shiping Ji <shji@microsoft.com>");
+MODULE_DESCRIPTION("DMC-520 ECC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 69e0d90460e6..75ede27bdf6a 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -55,6 +55,11 @@ static LIST_HEAD(mc_devices);
*/
static const char *edac_mc_owner;
+static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e)
+{
+ return container_of(e, struct mem_ctl_info, error_desc);
+}
+
int edac_get_report_status(void)
{
return edac_report;
@@ -278,6 +283,12 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
static void _edac_mc_free(struct mem_ctl_info *mci)
{
+ put_device(&mci->dev);
+}
+
+static void mci_release(struct device *dev)
+{
+ struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
struct csrow_info *csr;
int i, chn, row;
@@ -305,103 +316,26 @@ static void _edac_mc_free(struct mem_ctl_info *mci)
kfree(mci);
}
-struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
- unsigned int n_layers,
- struct edac_mc_layer *layers,
- unsigned int sz_pvt)
+static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
{
- struct mem_ctl_info *mci;
- struct edac_mc_layer *layer;
- struct csrow_info *csr;
- struct rank_info *chan;
- struct dimm_info *dimm;
- u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
- unsigned int pos[EDAC_MAX_LAYERS];
- unsigned int idx, size, tot_dimms = 1, count = 1;
- unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
- void *pvt, *p, *ptr = NULL;
- int i, j, row, chn, n, len;
- bool per_rank = false;
-
- if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
- return NULL;
-
- /*
- * Calculate the total amount of dimms and csrows/cschannels while
- * in the old API emulation mode
- */
- for (idx = 0; idx < n_layers; idx++) {
- tot_dimms *= layers[idx].size;
-
- if (layers[idx].is_virt_csrow)
- tot_csrows *= layers[idx].size;
- else
- tot_channels *= layers[idx].size;
-
- if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
- per_rank = true;
- }
-
- /* Figure out the offsets of the various items from the start of an mc
- * structure. We want the alignment of each item to be at least as
- * stringent as what the compiler would provide if we could simply
- * hardcode everything into a single struct.
- */
- mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
- layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
- for (i = 0; i < n_layers; i++) {
- count *= layers[i].size;
- edac_dbg(4, "errcount layer %d size %d\n", i, count);
- ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
- ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
- tot_errcount += 2 * count;
- }
-
- edac_dbg(4, "allocating %d error counters\n", tot_errcount);
- pvt = edac_align_ptr(&ptr, sz_pvt, 1);
- size = ((unsigned long)pvt) + sz_pvt;
-
- edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
- size,
- tot_dimms,
- per_rank ? "ranks" : "dimms",
- tot_csrows * tot_channels);
-
- mci = kzalloc(size, GFP_KERNEL);
- if (mci == NULL)
- return NULL;
-
- /* Adjust pointers so they point within the memory we just allocated
- * rather than an imaginary chunk of memory located at address 0.
- */
- layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
- for (i = 0; i < n_layers; i++) {
- mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
- mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
- }
- pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
-
- /* setup index and various internal pointers */
- mci->mc_idx = mc_num;
- mci->tot_dimms = tot_dimms;
- mci->pvt_info = pvt;
- mci->n_layers = n_layers;
- mci->layers = layer;
- memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
- mci->nr_csrows = tot_csrows;
- mci->num_cschannel = tot_channels;
- mci->csbased = per_rank;
+ unsigned int tot_channels = mci->num_cschannel;
+ unsigned int tot_csrows = mci->nr_csrows;
+ unsigned int row, chn;
/*
* Alocate and fill the csrow/channels structs
*/
mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
- goto error;
+ return -ENOMEM;
+
for (row = 0; row < tot_csrows; row++) {
+ struct csrow_info *csr;
+
csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
if (!csr)
- goto error;
+ return -ENOMEM;
+
mci->csrows[row] = csr;
csr->csrow_idx = row;
csr->mci = mci;
@@ -409,34 +343,51 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
GFP_KERNEL);
if (!csr->channels)
- goto error;
+ return -ENOMEM;
for (chn = 0; chn < tot_channels; chn++) {
+ struct rank_info *chan;
+
chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
if (!chan)
- goto error;
+ return -ENOMEM;
+
csr->channels[chn] = chan;
chan->chan_idx = chn;
chan->csrow = csr;
}
}
+ return 0;
+}
+
+static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
+{
+ unsigned int pos[EDAC_MAX_LAYERS];
+ unsigned int row, chn, idx;
+ int layer;
+ void *p;
+
/*
* Allocate and fill the dimm structs
*/
- mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
+ mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
if (!mci->dimms)
- goto error;
+ return -ENOMEM;
memset(&pos, 0, sizeof(pos));
row = 0;
chn = 0;
- for (idx = 0; idx < tot_dimms; idx++) {
+ for (idx = 0; idx < mci->tot_dimms; idx++) {
+ struct dimm_info *dimm;
+ struct rank_info *chan;
+ int n, len;
+
chan = mci->csrows[row]->channels[chn];
dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
if (!dimm)
- goto error;
+ return -ENOMEM;
mci->dimms[idx] = dimm;
dimm->mci = mci;
dimm->idx = idx;
@@ -446,16 +397,16 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
*/
len = sizeof(dimm->label);
p = dimm->label;
- n = snprintf(p, len, "mc#%u", mc_num);
+ n = snprintf(p, len, "mc#%u", mci->mc_idx);
p += n;
len -= n;
- for (j = 0; j < n_layers; j++) {
+ for (layer = 0; layer < mci->n_layers; layer++) {
n = snprintf(p, len, "%s#%u",
- edac_layer_name[layers[j].type],
- pos[j]);
+ edac_layer_name[mci->layers[layer].type],
+ pos[layer]);
p += n;
len -= n;
- dimm->location[j] = pos[j];
+ dimm->location[layer] = pos[layer];
if (len <= 0)
break;
@@ -467,29 +418,109 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
dimm->cschannel = chn;
/* Increment csrow location */
- if (layers[0].is_virt_csrow) {
+ if (mci->layers[0].is_virt_csrow) {
chn++;
- if (chn == tot_channels) {
+ if (chn == mci->num_cschannel) {
chn = 0;
row++;
}
} else {
row++;
- if (row == tot_csrows) {
+ if (row == mci->nr_csrows) {
row = 0;
chn++;
}
}
/* Increment dimm location */
- for (j = n_layers - 1; j >= 0; j--) {
- pos[j]++;
- if (pos[j] < layers[j].size)
+ for (layer = mci->n_layers - 1; layer >= 0; layer--) {
+ pos[layer]++;
+ if (pos[layer] < mci->layers[layer].size)
break;
- pos[j] = 0;
+ pos[layer] = 0;
}
}
+ return 0;
+}
+
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+ unsigned int n_layers,
+ struct edac_mc_layer *layers,
+ unsigned int sz_pvt)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer *layer;
+ unsigned int idx, size, tot_dimms = 1;
+ unsigned int tot_csrows = 1, tot_channels = 1;
+ void *pvt, *ptr = NULL;
+ bool per_rank = false;
+
+ if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
+ return NULL;
+
+ /*
+ * Calculate the total amount of dimms and csrows/cschannels while
+ * in the old API emulation mode
+ */
+ for (idx = 0; idx < n_layers; idx++) {
+ tot_dimms *= layers[idx].size;
+
+ if (layers[idx].is_virt_csrow)
+ tot_csrows *= layers[idx].size;
+ else
+ tot_channels *= layers[idx].size;
+
+ if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
+ per_rank = true;
+ }
+
+ /* Figure out the offsets of the various items from the start of an mc
+ * structure. We want the alignment of each item to be at least as
+ * stringent as what the compiler would provide if we could simply
+ * hardcode everything into a single struct.
+ */
+ mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
+ layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
+ pvt = edac_align_ptr(&ptr, sz_pvt, 1);
+ size = ((unsigned long)pvt) + sz_pvt;
+
+ edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+ size,
+ tot_dimms,
+ per_rank ? "ranks" : "dimms",
+ tot_csrows * tot_channels);
+
+ mci = kzalloc(size, GFP_KERNEL);
+ if (mci == NULL)
+ return NULL;
+
+ mci->dev.release = mci_release;
+ device_initialize(&mci->dev);
+
+ /* Adjust pointers so they point within the memory we just allocated
+ * rather than an imaginary chunk of memory located at address 0.
+ */
+ layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
+ pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
+
+ /* setup index and various internal pointers */
+ mci->mc_idx = mc_num;
+ mci->tot_dimms = tot_dimms;
+ mci->pvt_info = pvt;
+ mci->n_layers = n_layers;
+ mci->layers = layer;
+ memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
+ mci->nr_csrows = tot_csrows;
+ mci->num_cschannel = tot_channels;
+ mci->csbased = per_rank;
+
+ if (edac_mc_alloc_csrows(mci))
+ goto error;
+
+ if (edac_mc_alloc_dimms(mci))
+ goto error;
+
mci->op_state = OP_ALLOC;
return mci;
@@ -505,9 +536,6 @@ void edac_mc_free(struct mem_ctl_info *mci)
{
edac_dbg(1, "\n");
- if (device_is_registered(&mci->dev))
- edac_unregister_sysfs(mci);
-
_edac_mc_free(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
@@ -902,88 +930,51 @@ const char *edac_layer_name[] = {
};
EXPORT_SYMBOL_GPL(edac_layer_name);
-static void edac_inc_ce_error(struct mem_ctl_info *mci,
- bool enable_per_layer_report,
- const int pos[EDAC_MAX_LAYERS],
- const u16 count)
+static void edac_inc_ce_error(struct edac_raw_error_desc *e)
{
- int i, index = 0;
-
- mci->ce_mc += count;
-
- if (!enable_per_layer_report) {
- mci->ce_noinfo_count += count;
- return;
- }
+ int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+ struct mem_ctl_info *mci = error_desc_to_mci(e);
+ struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
- for (i = 0; i < mci->n_layers; i++) {
- if (pos[i] < 0)
- break;
- index += pos[i];
- mci->ce_per_layer[i][index] += count;
+ mci->ce_mc += e->error_count;
- if (i < mci->n_layers - 1)
- index *= mci->layers[i + 1].size;
- }
+ if (dimm)
+ dimm->ce_count += e->error_count;
+ else
+ mci->ce_noinfo_count += e->error_count;
}
-static void edac_inc_ue_error(struct mem_ctl_info *mci,
- bool enable_per_layer_report,
- const int pos[EDAC_MAX_LAYERS],
- const u16 count)
+static void edac_inc_ue_error(struct edac_raw_error_desc *e)
{
- int i, index = 0;
-
- mci->ue_mc += count;
-
- if (!enable_per_layer_report) {
- mci->ue_noinfo_count += count;
- return;
- }
+ int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+ struct mem_ctl_info *mci = error_desc_to_mci(e);
+ struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
- for (i = 0; i < mci->n_layers; i++) {
- if (pos[i] < 0)
- break;
- index += pos[i];
- mci->ue_per_layer[i][index] += count;
+ mci->ue_mc += e->error_count;
- if (i < mci->n_layers - 1)
- index *= mci->layers[i + 1].size;
- }
+ if (dimm)
+ dimm->ue_count += e->error_count;
+ else
+ mci->ue_noinfo_count += e->error_count;
}
-static void edac_ce_error(struct mem_ctl_info *mci,
- const u16 error_count,
- const int pos[EDAC_MAX_LAYERS],
- const char *msg,
- const char *location,
- const char *label,
- const char *detail,
- const char *other_detail,
- const bool enable_per_layer_report,
- const unsigned long page_frame_number,
- const unsigned long offset_in_page,
- long grain)
+static void edac_ce_error(struct edac_raw_error_desc *e)
{
+ struct mem_ctl_info *mci = error_desc_to_mci(e);
unsigned long remapped_page;
- char *msg_aux = "";
-
- if (*msg)
- msg_aux = " ";
if (edac_mc_get_log_ce()) {
- if (other_detail && *other_detail)
- edac_mc_printk(mci, KERN_WARNING,
- "%d CE %s%son %s (%s %s - %s)\n",
- error_count, msg, msg_aux, label,
- location, detail, other_detail);
- else
- edac_mc_printk(mci, KERN_WARNING,
- "%d CE %s%son %s (%s %s)\n",
- error_count, msg, msg_aux, label,
- location, detail);
+ edac_mc_printk(mci, KERN_WARNING,
+ "%d CE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx%s%s)\n",
+ e->error_count, e->msg,
+ *e->msg ? " " : "",
+ e->label, e->location, e->page_frame_number, e->offset_in_page,
+ e->grain, e->syndrome,
+ *e->other_detail ? " - " : "",
+ e->other_detail);
}
- edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
+
+ edac_inc_ce_error(e);
if (mci->scrub_mode == SCRUB_SW_SRC) {
/*
@@ -998,60 +989,64 @@ static void edac_ce_error(struct mem_ctl_info *mci,
* be scrubbed.
*/
remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
+ mci->ctl_page_to_phys(mci, e->page_frame_number) :
+ e->page_frame_number;
- edac_mc_scrub_block(remapped_page,
- offset_in_page, grain);
+ edac_mc_scrub_block(remapped_page, e->offset_in_page, e->grain);
}
}
-static void edac_ue_error(struct mem_ctl_info *mci,
- const u16 error_count,
- const int pos[EDAC_MAX_LAYERS],
- const char *msg,
- const char *location,
- const char *label,
- const char *detail,
- const char *other_detail,
- const bool enable_per_layer_report)
+static void edac_ue_error(struct edac_raw_error_desc *e)
{
- char *msg_aux = "";
-
- if (*msg)
- msg_aux = " ";
+ struct mem_ctl_info *mci = error_desc_to_mci(e);
if (edac_mc_get_log_ue()) {
- if (other_detail && *other_detail)
- edac_mc_printk(mci, KERN_WARNING,
- "%d UE %s%son %s (%s %s - %s)\n",
- error_count, msg, msg_aux, label,
- location, detail, other_detail);
- else
- edac_mc_printk(mci, KERN_WARNING,
- "%d UE %s%son %s (%s %s)\n",
- error_count, msg, msg_aux, label,
- location, detail);
+ edac_mc_printk(mci, KERN_WARNING,
+ "%d UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
+ e->error_count, e->msg,
+ *e->msg ? " " : "",
+ e->label, e->location, e->page_frame_number, e->offset_in_page,
+ e->grain,
+ *e->other_detail ? " - " : "",
+ e->other_detail);
}
if (edac_mc_get_panic_on_ue()) {
- if (other_detail && *other_detail)
- panic("UE %s%son %s (%s%s - %s)\n",
- msg, msg_aux, label, location, detail, other_detail);
- else
- panic("UE %s%son %s (%s%s)\n",
- msg, msg_aux, label, location, detail);
+ panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
+ e->msg,
+ *e->msg ? " " : "",
+ e->label, e->location, e->page_frame_number, e->offset_in_page,
+ e->grain,
+ *e->other_detail ? " - " : "",
+ e->other_detail);
}
- edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
+ edac_inc_ue_error(e);
}
-void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
- struct mem_ctl_info *mci,
- struct edac_raw_error_desc *e)
+static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan)
{
- char detail[80];
- int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+ struct mem_ctl_info *mci = error_desc_to_mci(e);
+ enum hw_event_mc_err_type type = e->type;
+ u16 count = e->error_count;
+
+ if (row < 0)
+ return;
+
+ edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
+
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ mci->csrows[row]->ce_count += count;
+ if (chan >= 0)
+ mci->csrows[row]->channels[chan]->ce_count += count;
+ } else {
+ mci->csrows[row]->ue_count += count;
+ }
+}
+
+void edac_raw_mc_handle_error(struct edac_raw_error_desc *e)
+{
+ struct mem_ctl_info *mci = error_desc_to_mci(e);
u8 grain_bits;
/* Sanity-check driver-supplied grain value. */
@@ -1062,31 +1057,16 @@ void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
/* Report the error via the trace interface */
if (IS_ENABLED(CONFIG_RAS))
- trace_mc_event(type, e->msg, e->label, e->error_count,
+ trace_mc_event(e->type, e->msg, e->label, e->error_count,
mci->mc_idx, e->top_layer, e->mid_layer,
e->low_layer,
(e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
grain_bits, e->syndrome, e->other_detail);
- /* Memory type dependent details about the error */
- if (type == HW_EVENT_ERR_CORRECTED) {
- snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
- e->page_frame_number, e->offset_in_page,
- e->grain, e->syndrome);
- edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
- detail, e->other_detail, e->enable_per_layer_report,
- e->page_frame_number, e->offset_in_page, e->grain);
- } else {
- snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%ld",
- e->page_frame_number, e->offset_in_page, e->grain);
-
- edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
- detail, e->other_detail, e->enable_per_layer_report);
- }
-
-
+ if (e->type == HW_EVENT_ERR_CORRECTED)
+ edac_ce_error(e);
+ else
+ edac_ue_error(e);
}
EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
@@ -1108,25 +1088,27 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i, n_labels = 0;
struct edac_raw_error_desc *e = &mci->error_desc;
+ bool any_memory = true;
edac_dbg(3, "MC%d\n", mci->mc_idx);
/* Fills the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = error_count;
+ e->type = type;
e->top_layer = top_layer;
e->mid_layer = mid_layer;
e->low_layer = low_layer;
e->page_frame_number = page_frame_number;
e->offset_in_page = offset_in_page;
e->syndrome = syndrome;
- e->msg = msg;
- e->other_detail = other_detail;
+ /* need valid strings here for both: */
+ e->msg = msg ?: "";
+ e->other_detail = other_detail ?: "";
/*
- * Check if the event report is consistent and if the memory
- * location is known. If it is known, enable_per_layer_report will be
- * true, the DIMM(s) label info will be filled and the per-layer
+ * Check if the event report is consistent and if the memory location is
+ * known. If it is, the DIMM(s) label info will be filled and the DIMM's
* error counters will be incremented.
*/
for (i = 0; i < mci->n_layers; i++) {
@@ -1145,7 +1127,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
pos[i] = -1;
}
if (pos[i] >= 0)
- e->enable_per_layer_report = true;
+ any_memory = false;
}
/*
@@ -1176,24 +1158,25 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
/*
* If the error is memory-controller wide, there's no need to
- * seek for the affected DIMMs because the whole
- * channel/memory controller/... may be affected.
- * Also, don't show errors for empty DIMM slots.
+ * seek for the affected DIMMs because the whole channel/memory
+ * controller/... may be affected. Also, don't show errors for
+ * empty DIMM slots.
*/
- if (!e->enable_per_layer_report || !dimm->nr_pages)
+ if (!dimm->nr_pages)
continue;
- if (n_labels >= EDAC_MAX_LABELS) {
- e->enable_per_layer_report = false;
- break;
- }
n_labels++;
- if (p != e->label) {
- strcpy(p, OTHER_LABEL);
- p += strlen(OTHER_LABEL);
+ if (n_labels > EDAC_MAX_LABELS) {
+ p = e->label;
+ *p = '\0';
+ } else {
+ if (p != e->label) {
+ strcpy(p, OTHER_LABEL);
+ p += strlen(OTHER_LABEL);
+ }
+ strcpy(p, dimm->label);
+ p += strlen(p);
}
- strcpy(p, dimm->label);
- p += strlen(p);
/*
* get csrow/channel of the DIMM, in order to allow
@@ -1213,22 +1196,12 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
chan = -2;
}
- if (!e->enable_per_layer_report) {
+ if (any_memory)
strcpy(e->label, "any memory");
- } else {
- edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
- if (p == e->label)
- strcpy(e->label, "unknown memory");
- if (type == HW_EVENT_ERR_CORRECTED) {
- if (row >= 0) {
- mci->csrows[row]->ce_count += error_count;
- if (chan >= 0)
- mci->csrows[row]->channels[chan]->ce_count += error_count;
- }
- } else
- if (row >= 0)
- mci->csrows[row]->ue_count += error_count;
- }
+ else if (!*e->label)
+ strcpy(e->label, "unknown memory");
+
+ edac_inc_csrow(e, row, chan);
/* Fill the RAM location data */
p = e->location;
@@ -1244,6 +1217,6 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
if (p > e->location)
*(p - 1) = '\0';
- edac_raw_mc_handle_error(type, mci, e);
+ edac_raw_mc_handle_error(e);
}
EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 02aac5c61d00..881b00eadf7a 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -212,17 +212,13 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
* edac_raw_mc_handle_error() - Reports a memory event to userspace without
* doing anything to discover the error location.
*
- * @type: severity of the error (CE/UE/Fatal)
- * @mci: a struct mem_ctl_info pointer
* @e: error description
*
* This raw function is used internally by edac_mc_handle_error(). It should
* only be called directly when the hardware error come directly from BIOS,
* like in the case of APEI GHES driver.
*/
-void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
- struct mem_ctl_info *mci,
- struct edac_raw_error_desc *e);
+void edac_raw_mc_handle_error(struct edac_raw_error_desc *e);
/**
* edac_mc_handle_error() - Reports a memory event to userspace.
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index c70ec0a306d8..4e6aca595133 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -274,14 +274,8 @@ static const struct attribute_group *csrow_attr_groups[] = {
NULL
};
-static void csrow_attr_release(struct device *dev)
-{
- /* release device with _edac_mc_free() */
-}
-
static const struct device_type csrow_attr_type = {
.groups = csrow_attr_groups,
- .release = csrow_attr_release,
};
/*
@@ -387,6 +381,14 @@ static const struct attribute_group *csrow_dev_groups[] = {
NULL
};
+static void csrow_release(struct device *dev)
+{
+ /*
+ * Nothing to do, just unregister sysfs here. The mci
+ * device owns the data and will also release it.
+ */
+}
+
static inline int nr_pages_per_csrow(struct csrow_info *csrow)
{
int chan, nr_pages = 0;
@@ -405,6 +407,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
csrow->dev.type = &csrow_attr_type;
csrow->dev.groups = csrow_dev_groups;
+ csrow->dev.release = csrow_release;
device_initialize(&csrow->dev);
csrow->dev.parent = &mci->dev;
csrow->mci = mci;
@@ -441,10 +444,8 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci)
error:
for (--i; i >= 0; i--) {
- csrow = mci->csrows[i];
- if (!nr_pages_per_csrow(csrow))
- continue;
- device_unregister(&mci->csrows[i]->dev);
+ if (device_is_registered(&mci->csrows[i]->dev))
+ device_unregister(&mci->csrows[i]->dev);
}
return err;
@@ -453,15 +454,13 @@ error:
static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
{
int i;
- struct csrow_info *csrow;
- for (i = mci->nr_csrows - 1; i >= 0; i--) {
- csrow = mci->csrows[i];
- if (!nr_pages_per_csrow(csrow))
- continue;
- device_unregister(&mci->csrows[i]->dev);
+ for (i = 0; i < mci->nr_csrows; i++) {
+ if (device_is_registered(&mci->csrows[i]->dev))
+ device_unregister(&mci->csrows[i]->dev);
}
}
+
#endif
/*
@@ -552,10 +551,8 @@ static ssize_t dimmdev_ce_count_show(struct device *dev,
char *data)
{
struct dimm_info *dimm = to_dimm(dev);
- u32 count;
- count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][dimm->idx];
- return sprintf(data, "%u\n", count);
+ return sprintf(data, "%u\n", dimm->ce_count);
}
static ssize_t dimmdev_ue_count_show(struct device *dev,
@@ -563,10 +560,8 @@ static ssize_t dimmdev_ue_count_show(struct device *dev,
char *data)
{
struct dimm_info *dimm = to_dimm(dev);
- u32 count;
- count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][dimm->idx];
- return sprintf(data, "%u\n", count);
+ return sprintf(data, "%u\n", dimm->ue_count);
}
/* dimm/rank attribute files */
@@ -602,16 +597,18 @@ static const struct attribute_group *dimm_attr_groups[] = {
NULL
};
-static void dimm_attr_release(struct device *dev)
-{
- /* release device with _edac_mc_free() */
-}
-
static const struct device_type dimm_attr_type = {
.groups = dimm_attr_groups,
- .release = dimm_attr_release,
};
+static void dimm_release(struct device *dev)
+{
+ /*
+ * Nothing to do, just unregister sysfs here. The mci
+ * device owns the data and will also release it.
+ */
+}
+
/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
struct dimm_info *dimm)
@@ -620,6 +617,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
dimm->mci = mci;
dimm->dev.type = &dimm_attr_type;
+ dimm->dev.release = dimm_release;
device_initialize(&dimm->dev);
dimm->dev.parent = &mci->dev;
@@ -659,7 +657,9 @@ static ssize_t mci_reset_counters_store(struct device *dev,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
- int cnt, row, chan, i;
+ struct dimm_info *dimm;
+ int row, chan;
+
mci->ue_mc = 0;
mci->ce_mc = 0;
mci->ue_noinfo_count = 0;
@@ -675,11 +675,9 @@ static ssize_t mci_reset_counters_store(struct device *dev,
ri->channels[chan]->ce_count = 0;
}
- cnt = 1;
- for (i = 0; i < mci->n_layers; i++) {
- cnt *= mci->layers[i].size;
- memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
- memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
+ mci_for_each_dimm(mci, dimm) {
+ dimm->ue_count = 0;
+ dimm->ce_count = 0;
}
mci->start_time = jiffies;
@@ -884,14 +882,8 @@ static const struct attribute_group *mci_attr_groups[] = {
NULL
};
-static void mci_attr_release(struct device *dev)
-{
- /* release device with _edac_mc_free() */
-}
-
static const struct device_type mci_attr_type = {
.groups = mci_attr_groups,
- .release = mci_attr_release,
};
/*
@@ -910,8 +902,6 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
/* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type;
- device_initialize(&mci->dev);
-
mci->dev.parent = mci_pdev;
mci->dev.groups = groups;
dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
@@ -921,7 +911,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
- put_device(&mci->dev);
+ /* no put_device() here, free mci with _edac_mc_free() */
return err;
}
@@ -937,24 +927,20 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
err = edac_create_dimm_object(mci, dimm);
if (err)
- goto fail_unregister_dimm;
+ goto fail;
}
#ifdef CONFIG_EDAC_LEGACY_SYSFS
err = edac_create_csrow_objects(mci);
if (err < 0)
- goto fail_unregister_dimm;
+ goto fail;
#endif
edac_create_debugfs_nodes(mci);
return 0;
-fail_unregister_dimm:
- mci_for_each_dimm(mci, dimm) {
- if (device_is_registered(&dimm->dev))
- device_unregister(&dimm->dev);
- }
- device_unregister(&mci->dev);
+fail:
+ edac_remove_sysfs_mci_device(mci);
return err;
}
@@ -966,6 +952,9 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
struct dimm_info *dimm;
+ if (!device_is_registered(&mci->dev))
+ return;
+
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
@@ -976,17 +965,14 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
#endif
mci_for_each_dimm(mci, dimm) {
- if (dimm->nr_pages == 0)
+ if (!device_is_registered(&dimm->dev))
continue;
edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
device_unregister(&dimm->dev);
}
-}
-void edac_unregister_sysfs(struct mem_ctl_info *mci)
-{
- edac_dbg(1, "unregistering device %s\n", dev_name(&mci->dev));
- device_unregister(&mci->dev);
+ /* only remove the device, but keep mci */
+ device_del(&mci->dev);
}
static void mc_attr_release(struct device *dev)
@@ -1000,9 +986,6 @@ static void mc_attr_release(struct device *dev)
kfree(dev);
}
-static const struct device_type mc_attr_type = {
- .release = mc_attr_release,
-};
/*
* Init/exit code for the module. Basically, creates/removes /sys/class/rc
*/
@@ -1015,11 +998,10 @@ int __init edac_mc_sysfs_init(void)
return -ENOMEM;
mci_pdev->bus = edac_get_sysfs_subsys();
- mci_pdev->type = &mc_attr_type;
- device_initialize(mci_pdev);
- dev_set_name(mci_pdev, "mc");
+ mci_pdev->release = mc_attr_release;
+ mci_pdev->init_name = "mc";
- err = device_add(mci_pdev);
+ err = device_register(mci_pdev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
put_device(mci_pdev);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 388427d378b1..aa1f91688eb8 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -28,7 +28,6 @@ void edac_mc_sysfs_exit(void);
extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups);
extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
-void edac_unregister_sysfs(struct mem_ctl_info *mci);
extern int edac_get_log_ue(void);
extern int edac_get_log_ce(void);
extern int edac_get_panic_on_ue(void);
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index b99080d8a10c..cb3dab56a875 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -201,7 +201,6 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
{
- enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
struct ghes_edac_pvt *pvt;
@@ -240,17 +239,17 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
switch (sev) {
case GHES_SEV_CORRECTED:
- type = HW_EVENT_ERR_CORRECTED;
+ e->type = HW_EVENT_ERR_CORRECTED;
break;
case GHES_SEV_RECOVERABLE:
- type = HW_EVENT_ERR_UNCORRECTED;
+ e->type = HW_EVENT_ERR_UNCORRECTED;
break;
case GHES_SEV_PANIC:
- type = HW_EVENT_ERR_FATAL;
+ e->type = HW_EVENT_ERR_FATAL;
break;
default:
case GHES_SEV_NO:
- type = HW_EVENT_ERR_INFO;
+ e->type = HW_EVENT_ERR_INFO;
}
edac_dbg(1, "error validation_bits: 0x%08llx\n",
@@ -356,11 +355,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
mem_err->mem_dev_handle);
index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
- if (index >= 0) {
+ if (index >= 0)
e->top_layer = index;
- e->enable_per_layer_report = true;
- }
-
}
if (p > e->location)
*(p - 1) = '\0';
@@ -442,7 +438,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
if (p > pvt->other_detail)
*(p - 1) = '\0';
- edac_raw_mc_handle_error(type, mci, e);
+ edac_raw_mc_handle_error(e);
unlock:
spin_unlock_irqrestore(&ghes_lock, flags);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 059eccf0582b..df08de963d10 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -123,10 +123,10 @@ static int i10nm_get_all_munits(void)
}
static const struct x86_cpu_id i10nm_cpuids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_D, 0, 0 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, 0 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_D, 0, 0 },
- { }
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
+ {}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ea980c556f2e..8874b7722b2f 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1239,7 +1239,7 @@ static int __init mce_amd_init(void)
case 0x17:
case 0x18:
- pr_warn("Decoding supported only on Scalable MCA processors.\n");
+ pr_warn_once("Decoding supported only on Scalable MCA processors.\n");
return -EINVAL;
default:
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 933f7722b893..bc47328eb485 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1537,8 +1537,8 @@ static struct dunit_ops dnv_ops = {
};
static const struct x86_cpu_id pnd2_cpuids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_D, 0, (kernel_ulong_t)&dnv_ops },
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4957e8ee1879..7d51c82be62b 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3420,13 +3420,13 @@ fail0:
}
static const struct x86_cpu_id sbridge_cpuids[] = {
- INTEL_CPU_FAM6(SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
- INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table),
- INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table),
- INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table),
- INTEL_CPU_FAM6(BROADWELL_D, pci_dev_descr_broadwell_table),
- INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table),
- INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table),
+ X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
+ X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table),
+ X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table),
+ X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 83545b4facb7..46a3a3440f5e 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -158,7 +158,7 @@ fail:
}
static const struct x86_cpu_id skx_cpuids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 },
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 880ffd833718..12211dc040e8 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -477,16 +477,16 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
if (p->ce_cnt) {
pinf = &p->ceinfo;
- if (!priv->p_data->quirks) {
+ if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
- "CE", pinf->row, pinf->bank, pinf->col,
+ "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
+ "CE", pinf->row, pinf->bank,
+ pinf->bankgrpnr, pinf->blknr,
pinf->bitpos, pinf->data);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
+ "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
"CE", pinf->row, pinf->bank, pinf->col,
- pinf->bankgrpnr, pinf->blknr,
pinf->bitpos, pinf->data);
}
@@ -497,15 +497,15 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
if (p->ue_cnt) {
pinf = &p->ueinfo;
- if (!priv->p_data->quirks) {
+ if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d ",
- "UE", pinf->row, pinf->bank, pinf->col);
+ "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
+ "UE", pinf->row, pinf->bank,
+ pinf->bankgrpnr, pinf->blknr);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d",
- "UE", pinf->row, pinf->bank, pinf->col,
- pinf->bankgrpnr, pinf->blknr);
+ "DDR ECC error type :%s Row %d Bank %d Col %d ",
+ "UE", pinf->row, pinf->bank, pinf->col);
}
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
diff --git a/drivers/eisa/.gitignore b/drivers/eisa/.gitignore
index 4b335c0aedb0..7d0a2ad5abe2 100644
--- a/drivers/eisa/.gitignore
+++ b/drivers/eisa/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
devlist.h
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index a7f216191493..525345367260 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -107,7 +107,7 @@ struct axp288_extcon_info {
};
static const struct x86_cpu_id cherry_trail_cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT, X86_FEATURE_ANY },
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
@@ -443,9 +443,40 @@ static int axp288_extcon_probe(struct platform_device *pdev)
/* Start charger cable type detection */
axp288_extcon_enable(info);
+ device_init_wakeup(dev, true);
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+}
+
+static int __maybe_unused axp288_extcon_suspend(struct device *dev)
+{
+ struct axp288_extcon_info *info = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(info->irq[VBUS_RISING_IRQ]);
+
return 0;
}
+static int __maybe_unused axp288_extcon_resume(struct device *dev)
+{
+ struct axp288_extcon_info *info = dev_get_drvdata(dev);
+
+ /*
+ * Wakeup when a charger is connected to do charger-type
+ * connection and generate an extcon event which makes the
+ * axp288 charger driver set the input current limit.
+ */
+ if (device_may_wakeup(dev))
+ disable_irq_wake(info->irq[VBUS_RISING_IRQ]);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(axp288_extcon_pm_ops, axp288_extcon_suspend,
+ axp288_extcon_resume);
+
static const struct platform_device_id axp288_extcon_table[] = {
{ .name = "axp288_extcon" },
{},
@@ -457,6 +488,7 @@ static struct platform_driver axp288_extcon_driver = {
.id_table = axp288_extcon_table,
.driver = {
.name = "axp288_extcon",
+ .pm = &axp288_extcon_pm_ops,
},
};
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index edc5016f46f1..cea58d0cb457 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -205,14 +205,18 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
GPIOD_IN);
- if (IS_ERR(palmas_usb->id_gpiod)) {
+ if (PTR_ERR(palmas_usb->id_gpiod) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(palmas_usb->id_gpiod)) {
dev_err(&pdev->dev, "failed to get id gpio\n");
return PTR_ERR(palmas_usb->id_gpiod);
}
palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
GPIOD_IN);
- if (IS_ERR(palmas_usb->vbus_gpiod)) {
+ if (PTR_ERR(palmas_usb->vbus_gpiod) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(palmas_usb->vbus_gpiod)) {
dev_err(&pdev->dev, "failed to get vbus gpio\n");
return PTR_ERR(palmas_usb->vbus_gpiod);
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e055893fd5c3..2dfbfec572f9 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1406,6 +1406,7 @@ const char *extcon_get_edev_name(struct extcon_dev *edev)
{
return !edev ? NULL : edev->name;
}
+EXPORT_SYMBOL_GPL(extcon_get_edev_name);
static int __init extcon_class_init(void)
{
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ea869addc89b..8007d4aa76dc 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -206,7 +206,7 @@ config FW_CFG_SYSFS_CMDLINE
config INTEL_STRATIX10_SERVICE
tristate "Intel Stratix10 Service Layer"
- depends on ARCH_STRATIX10 && HAVE_ARM_SMCCC
+ depends on (ARCH_STRATIX10 || ARCH_AGILEX) && HAVE_ARM_SMCCC
default n
help
Intel Stratix10 service layer runs at privileged exception level,
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 5f298f00a82e..6694d0d908d6 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
+obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
+scmi-transport-y = mailbox.o shmem.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index df35358ff324..5ac06469b01c 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -33,8 +33,8 @@ enum scmi_common_cmd {
/**
* struct scmi_msg_resp_prot_version - Response for a message
*
- * @major_version: Major version of the ABI that firmware supports
* @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
*
* In general, ABI version changes follow the rule that minor version increments
* are backward compatible. Major revision changes in ABI may not be
@@ -47,6 +47,19 @@ struct scmi_msg_resp_prot_version {
__le16 major_version;
};
+#define MSG_ID_MASK GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
+#define MSG_TYPE_MASK GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND 0
+#define MSG_TYPE_DELAYED_RESP 2
+#define MSG_TYPE_NOTIFICATION 3
+#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
+#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
@@ -68,6 +81,33 @@ struct scmi_msg_hdr {
};
/**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ * protocol id and sequence id.
+ *
+ * Return: 32-bit packed message header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+ return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+ FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+ FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+ hdr->id = MSG_XTRACT_ID(msg_hdr);
+ hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
* struct scmi_msg - Message(Tx/Rx) structure
*
* @buf: Buffer pointer
@@ -88,7 +128,7 @@ struct scmi_msg {
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
* @done: command message transmit completion event
- * @async: pointer to delayed response message received event completion
+ * @async_done: pointer to delayed response message received event completion
*/
struct scmi_xfer {
int transfer_id;
@@ -113,3 +153,74 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
u8 *prot_imp);
int scmi_base_protocol_init(struct scmi_handle *h);
+
+/* SCMI Transport */
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel information
+ *
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ * channel
+ * @handle: Pointer to SCMI entity handle
+ * @transport_info: Transport layer related information
+ */
+struct scmi_chan_info {
+ struct device *dev;
+ struct scmi_handle *handle;
+ void *transport_info;
+};
+
+/**
+ * struct scmi_transport_ops - Structure representing a SCMI transport ops
+ *
+ * @chan_available: Callback to check if channel is available or not
+ * @chan_setup: Callback to allocate and setup a channel
+ * @chan_free: Callback to free a channel
+ * @send_message: Callback to send a message
+ * @mark_txdone: Callback to mark tx as done
+ * @fetch_response: Callback to fetch response
+ * @poll_done: Callback to poll transfer status
+ */
+struct scmi_transport_ops {
+ bool (*chan_available)(struct device *dev, int idx);
+ int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx);
+ int (*chan_free)(int id, void *p, void *data);
+ int (*send_message)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
+ void (*fetch_response)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
+};
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @ops: Pointer to the transport specific ops structure
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct scmi_desc {
+ struct scmi_transport_ops *ops;
+ int max_rx_timeout_ms;
+ int max_msg;
+ int max_msg_size;
+};
+
+extern const struct scmi_desc scmi_mailbox_desc;
+
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
+
+/* shmem related declarations */
+struct scmi_shared_mem;
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 2c96f6b5a7d8..dbec767222e9 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -19,12 +19,10 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
-#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/processor.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include "common.h"
@@ -32,19 +30,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
-#define MSG_ID_MASK GENMASK(7, 0)
-#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
-#define MSG_TYPE_MASK GENMASK(9, 8)
-#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
-#define MSG_TYPE_COMMAND 0
-#define MSG_TYPE_DELAYED_RESP 2
-#define MSG_TYPE_NOTIFICATION 3
-#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
-#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
-#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
-#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
-#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
-
enum scmi_error_codes {
SCMI_SUCCESS = 0, /* Success */
SCMI_ERR_SUPPORT = -1, /* Not supported */
@@ -83,45 +68,13 @@ struct scmi_xfers_info {
};
/**
- * struct scmi_desc - Description of SoC integration
- *
- * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
- * @max_msg: Maximum number of messages that can be pending
- * simultaneously in the system
- * @max_msg_size: Maximum size of data per message that can be handled.
- */
-struct scmi_desc {
- int max_rx_timeout_ms;
- int max_msg;
- int max_msg_size;
-};
-
-/**
- * struct scmi_chan_info - Structure representing a SCMI channel information
- *
- * @cl: Mailbox Client
- * @chan: Transmit/Receive mailbox channel
- * @payload: Transmit/Receive mailbox channel payload area
- * @dev: Reference to device in the SCMI hierarchy corresponding to this
- * channel
- * @handle: Pointer to SCMI entity handle
- */
-struct scmi_chan_info {
- struct mbox_client cl;
- struct mbox_chan *chan;
- void __iomem *payload;
- struct device *dev;
- struct scmi_handle *handle;
-};
-
-/**
* struct scmi_info - Structure representing a SCMI instance
*
* @dev: Device pointer
* @desc: SoC description for this instance
- * @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification.
+ * @handle: Instance of SCMI handle to send to clients
* @tx_minfo: Universal Transmit Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
@@ -143,27 +96,8 @@ struct scmi_info {
int users;
};
-#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
-/*
- * SCMI specification requires all parameters, message headers, return
- * arguments or any protocol data to be expressed in little endian
- * format only.
- */
-struct scmi_shared_mem {
- __le32 reserved;
- __le32 channel_status;
-#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
-#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
- __le32 reserved1[2];
- __le32 flags;
-#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
- __le32 length;
- __le32 msg_header;
- u8 msg_payload[0];
-};
-
static const int scmi_linux_errmap[] = {
/* better than switch case as long as return value is continuous */
0, /* SCMI_SUCCESS */
@@ -199,77 +133,6 @@ static inline void scmi_dump_header_dbg(struct device *dev,
hdr->id, hdr->seq, hdr->protocol_id);
}
-static void scmi_fetch_response(struct scmi_xfer *xfer,
- struct scmi_shared_mem __iomem *mem)
-{
- xfer->hdr.status = ioread32(mem->msg_payload);
- /* Skip the length of header and status in payload area i.e 8 bytes */
- xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
-
- /* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
-}
-
-/**
- * pack_scmi_header() - packs and returns 32-bit header
- *
- * @hdr: pointer to header containing all the information on message id,
- * protocol id and sequence id.
- *
- * Return: 32-bit packed message header to be sent to the platform.
- */
-static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
-{
- return FIELD_PREP(MSG_ID_MASK, hdr->id) |
- FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
- FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
-}
-
-/**
- * unpack_scmi_header() - unpacks and records message and protocol id
- *
- * @msg_hdr: 32-bit packed message header sent from the platform
- * @hdr: pointer to header to fetch message and protocol id.
- */
-static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
-{
- hdr->id = MSG_XTRACT_ID(msg_hdr);
- hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
-}
-
-/**
- * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
- *
- * @cl: client pointer
- * @m: mailbox message
- *
- * This function prepares the shared memory which contains the header and the
- * payload.
- */
-static void scmi_tx_prepare(struct mbox_client *cl, void *m)
-{
- struct scmi_xfer *t = m;
- struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
- /*
- * Ideally channel must be free by now unless OS timeout last
- * request and platform continued to process the same, wait
- * until it releases the shared memory, otherwise we may endup
- * overwriting its response with new message payload or vice-versa
- */
- spin_until_cond(ioread32(&mem->channel_status) &
- SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
- /* Mark channel busy + clear error */
- iowrite32(0x0, &mem->channel_status);
- iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
- &mem->flags);
- iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
- iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
- if (t->tx.buf)
- memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
-}
-
/**
* scmi_xfer_get() - Allocate one message
*
@@ -338,10 +201,10 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
}
/**
- * scmi_rx_callback() - mailbox client callback for receive messages
+ * scmi_rx_callback() - callback for receiving messages
*
- * @cl: client pointer
- * @m: mailbox message
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
@@ -349,21 +212,14 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
-static void scmi_rx_callback(struct mbox_client *cl, void *m)
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
{
- u8 msg_type;
- u32 msg_hdr;
- u16 xfer_id;
- struct scmi_xfer *xfer;
- struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
- struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
- msg_hdr = ioread32(&mem->msg_header);
- msg_type = MSG_XTRACT_TYPE(msg_hdr);
- xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+ struct device *dev = cinfo->dev;
+ struct scmi_xfer *xfer;
if (msg_type == MSG_TYPE_NOTIFICATION)
return; /* Notifications not yet supported */
@@ -378,7 +234,7 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
scmi_dump_header_dbg(dev, &xfer->hdr);
- scmi_fetch_response(xfer, mem);
+ info->desc->ops->fetch_response(cinfo, xfer);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
@@ -403,28 +259,15 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
__scmi_xfer_put(&info->tx_minfo, xfer);
}
-static bool
-scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
-{
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
- u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
-
- if (xfer->hdr.seq != xfer_id)
- return false;
-
- return ioread32(&mem->channel_status) &
- (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
- SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
-}
-
#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
-static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
+static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
{
- ktime_t __cur = ktime_get();
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
+ return info->desc->ops->poll_done(cinfo, xfer) ||
+ ktime_after(ktime_get(), stop);
}
/**
@@ -453,29 +296,26 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
- ret = mbox_send_message(cinfo->chan, xfer);
+ ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
- dev_dbg(dev, "mbox send fail %d\n", ret);
+ dev_dbg(dev, "Failed to send message %d\n", ret);
return ret;
}
- /* mbox_send_message returns non-negative value on success, so reset */
- ret = 0;
-
if (xfer->hdr.poll_completion) {
ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
if (ktime_before(ktime_get(), stop))
- scmi_fetch_response(xfer, cinfo->payload);
+ info->desc->ops->fetch_response(cinfo, xfer);
else
ret = -ETIMEDOUT;
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout)) {
- dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
+ dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
@@ -484,13 +324,8 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
- /*
- * NOTE: we might prefer not to need the mailbox ticker to manage the
- * transfer queueing since the protocol layer queues things by itself.
- * Unfortunately, we have to kick the mailbox framework after we have
- * received our message.
- */
- mbox_client_txdone(cinfo->chan, ret);
+ if (info->desc->ops->mark_txdone)
+ info->desc->ops->mark_txdone(cinfo, ret);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
@@ -731,23 +566,12 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
-static int scmi_mailbox_check(struct device_node *np, int idx)
-{
- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
- idx, NULL);
-}
-
-static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
- int prot_id, bool tx)
+static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+ int prot_id, bool tx)
{
int ret, idx;
- struct resource res;
- resource_size_t size;
- struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
- struct mbox_client *cl;
struct idr *idr;
- const char *desc = tx ? "Tx" : "Rx";
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
@@ -758,7 +582,7 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
if (cinfo)
return 0;
- if (scmi_mailbox_check(np, idx)) {
+ if (!info->desc->ops->chan_available(dev, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
@@ -771,36 +595,9 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
cinfo->dev = dev;
- cl = &cinfo->cl;
- cl->dev = dev;
- cl->rx_callback = scmi_rx_callback;
- cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
- cl->tx_block = false;
- cl->knows_txdone = tx;
-
- shmem = of_parse_phandle(np, "shmem", idx);
- ret = of_address_to_resource(shmem, 0, &res);
- of_node_put(shmem);
- if (ret) {
- dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
- return ret;
- }
-
- size = resource_size(&res);
- cinfo->payload = devm_ioremap(info->dev, res.start, size);
- if (!cinfo->payload) {
- dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
- return -EADDRNOTAVAIL;
- }
-
- cinfo->chan = mbox_request_channel(cl, idx);
- if (IS_ERR(cinfo->chan)) {
- ret = PTR_ERR(cinfo->chan);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to request SCMI %s mailbox\n",
- desc);
+ ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
+ if (ret)
return ret;
- }
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
@@ -814,12 +611,12 @@ idr_alloc:
}
static inline int
-scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
- int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
+ int ret = scmi_chan_setup(info, dev, prot_id, true);
if (!ret) /* Rx is optional, hence no error check */
- scmi_mbox_chan_setup(info, dev, prot_id, false);
+ scmi_chan_setup(info, dev, prot_id, false);
return ret;
}
@@ -837,7 +634,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
return;
}
- if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
+ if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
return;
@@ -890,12 +687,6 @@ static int scmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
- /* Only mailbox method supported, check for the presence of one */
- if (scmi_mailbox_check(np, 0)) {
- dev_err(dev, "no mailbox found in %pOF\n", np);
- return -EINVAL;
- }
-
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
@@ -920,7 +711,7 @@ static int scmi_probe(struct platform_device *pdev)
handle->dev = info->dev;
handle->version = &info->version;
- ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
@@ -955,19 +746,9 @@ static int scmi_probe(struct platform_device *pdev)
return 0;
}
-static int scmi_mbox_free_channel(int id, void *p, void *data)
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
{
- struct scmi_chan_info *cinfo = p;
- struct idr *idr = data;
-
- if (!IS_ERR_OR_NULL(cinfo->chan)) {
- mbox_free_channel(cinfo->chan);
- cinfo->chan = NULL;
- }
-
idr_remove(idr, id);
-
- return 0;
}
static int scmi_remove(struct platform_device *pdev)
@@ -987,11 +768,11 @@ static int scmi_remove(struct platform_device *pdev)
return ret;
/* Safe to free channels since no more users */
- ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->tx_idr);
idr = &info->rx_idr;
- ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->rx_idr);
return ret;
@@ -1043,15 +824,9 @@ static struct attribute *versions_attrs[] = {
};
ATTRIBUTE_GROUPS(versions);
-static const struct scmi_desc scmi_generic_desc = {
- .max_rx_timeout_ms = 30, /* We may increase this if required */
- .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
-};
-
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
- { .compatible = "arm,scmi", .data = &scmi_generic_desc },
+ { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
{ /* Sentinel */ },
};
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
new file mode 100644
index 000000000000..73077bbc4ad9
--- /dev/null
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Mailbox Transport
+ * driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_mailbox - Structure representing a SCMI mailbox transport
+ *
+ * @cl: Mailbox Client
+ * @chan: Transmit/Receive mailbox channel
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ */
+struct scmi_mailbox {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+};
+
+#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+
+static void tx_prepare(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ shmem_tx_prepare(smbox->shmem, m);
+}
+
+static void rx_callback(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
+}
+
+static bool mailbox_chan_available(struct device *dev, int idx)
+{
+ return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", idx, NULL);
+}
+
+static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ const char *desc = tx ? "Tx" : "Rx";
+ struct device *cdev = cinfo->dev;
+ struct scmi_mailbox *smbox;
+ struct device_node *shmem;
+ int ret, idx = tx ? 0 : 1;
+ struct mbox_client *cl;
+ resource_size_t size;
+ struct resource res;
+
+ smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
+ if (!smbox)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
+ return ret;
+ }
+
+ size = resource_size(&res);
+ smbox->shmem = devm_ioremap(dev, res.start, size);
+ if (!smbox->shmem) {
+ dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
+ return -EADDRNOTAVAIL;
+ }
+
+ cl = &smbox->cl;
+ cl->dev = cdev;
+ cl->tx_prepare = tx ? tx_prepare : NULL;
+ cl->rx_callback = rx_callback;
+ cl->tx_block = false;
+ cl->knows_txdone = tx;
+
+ smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
+ if (IS_ERR(smbox->chan)) {
+ ret = PTR_ERR(smbox->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cdev, "failed to request SCMI %s mailbox\n",
+ tx ? "Tx" : "Rx");
+ return ret;
+ }
+
+ cinfo->transport_info = smbox;
+ smbox->cinfo = cinfo;
+
+ return 0;
+}
+
+static int mailbox_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ if (!IS_ERR(smbox->chan)) {
+ mbox_free_channel(smbox->chan);
+ cinfo->transport_info = NULL;
+ smbox->chan = NULL;
+ smbox->cinfo = NULL;
+ }
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int mailbox_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+ int ret;
+
+ ret = mbox_send_message(smbox->chan, xfer);
+
+ /* mbox_send_message returns non-negative value on success, so reset */
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(smbox->chan, ret);
+}
+
+static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_response(smbox->shmem, xfer);
+}
+
+static bool
+mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ return shmem_poll_done(smbox->shmem, xfer);
+}
+
+static struct scmi_transport_ops scmi_mailbox_ops = {
+ .chan_available = mailbox_chan_available,
+ .chan_setup = mailbox_chan_setup,
+ .chan_free = mailbox_chan_free,
+ .send_message = mailbox_send_message,
+ .mark_txdone = mailbox_mark_txdone,
+ .fetch_response = mailbox_fetch_response,
+ .poll_done = mailbox_poll_done,
+};
+
+const struct scmi_desc scmi_mailbox_desc = {
+ .ops = &scmi_mailbox_ops,
+ .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index ec81e6f7e7a4..34f3a917dd8d 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -89,7 +89,7 @@ struct scmi_msg_resp_perf_describe_levels {
__le32 power;
__le16 transition_latency_us;
__le16 reserved;
- } opp[0];
+ } opp[];
};
struct scmi_perf_get_fc_info {
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
new file mode 100644
index 000000000000..e1e816e0018c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transport using shared mem structure.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/processor.h>
+#include <linux/types.h>
+
+#include "common.h"
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+ __le32 reserved;
+ __le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
+ __le32 reserved1[2];
+ __le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
+ __le32 length;
+ __le32 msg_header;
+ u8 msg_payload[];
+};
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ /*
+ * Ideally channel must be free by now unless OS timeout last
+ * request and platform continued to process the same, wait
+ * until it releases the shared memory, otherwise we may endup
+ * overwriting its response with new message payload or vice-versa
+ */
+ spin_until_cond(ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ /* Mark channel busy + clear error */
+ iowrite32(0x0, &shmem->channel_status);
+ iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+ &shmem->flags);
+ iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
+ iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
+ if (xfer->tx.buf)
+ memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+{
+ return ioread32(&shmem->msg_header);
+}
+
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ xfer->hdr.status = ioread32(shmem->msg_payload);
+ /* Skip the length of header and status in shmem area i.e 8 bytes */
+ xfer->rx.len = min_t(size_t, xfer->rx.len,
+ ioread32(&shmem->length) - 8);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+}
+
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ u16 xfer_id;
+
+ xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
+
+ if (xfer->hdr.seq != xfer_id)
+ return false;
+
+ return ioread32(&shmem->channel_status) &
+ (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index a80c331c3a6e..d0dee37ad522 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -262,12 +262,12 @@ struct scpi_drvinfo {
struct scpi_shared_mem {
__le32 command;
__le32 status;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct legacy_scpi_shared_mem {
__le32 status;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct scp_capabilities {
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index a479023fa036..334c8be0c11f 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -267,26 +267,19 @@ static struct sdei_event *sdei_event_create(u32 event_num,
event->private_registered = regs;
}
- if (sdei_event_find(event_num)) {
- kfree(event->registered);
- kfree(event);
- event = ERR_PTR(-EBUSY);
- } else {
- spin_lock(&sdei_list_lock);
- list_add(&event->list, &sdei_list);
- spin_unlock(&sdei_list_lock);
- }
+ spin_lock(&sdei_list_lock);
+ list_add(&event->list, &sdei_list);
+ spin_unlock(&sdei_list_lock);
return event;
}
-static void sdei_event_destroy(struct sdei_event *event)
+static void sdei_event_destroy_llocked(struct sdei_event *event)
{
lockdep_assert_held(&sdei_events_lock);
+ lockdep_assert_held(&sdei_list_lock);
- spin_lock(&sdei_list_lock);
list_del(&event->list);
- spin_unlock(&sdei_list_lock);
if (event->type == SDEI_EVENT_TYPE_SHARED)
kfree(event->registered);
@@ -296,6 +289,13 @@ static void sdei_event_destroy(struct sdei_event *event)
kfree(event);
}
+static void sdei_event_destroy(struct sdei_event *event)
+{
+ spin_lock(&sdei_list_lock);
+ sdei_event_destroy_llocked(event);
+ spin_unlock(&sdei_list_lock);
+}
+
static int sdei_api_get_version(u64 *version)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
@@ -412,14 +412,19 @@ int sdei_event_enable(u32 event_num)
return -ENOENT;
}
- spin_lock(&sdei_list_lock);
- event->reenable = true;
- spin_unlock(&sdei_list_lock);
+ cpus_read_lock();
if (event->type == SDEI_EVENT_TYPE_SHARED)
err = sdei_api_event_enable(event->event_num);
else
err = sdei_do_cross_call(_local_event_enable, event);
+
+ if (!err) {
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+ }
+ cpus_read_unlock();
mutex_unlock(&sdei_events_lock);
return err;
@@ -491,11 +496,6 @@ static int _sdei_event_unregister(struct sdei_event *event)
{
lockdep_assert_held(&sdei_events_lock);
- spin_lock(&sdei_list_lock);
- event->reregister = false;
- event->reenable = false;
- spin_unlock(&sdei_list_lock);
-
if (event->type == SDEI_EVENT_TYPE_SHARED)
return sdei_api_event_unregister(event->event_num);
@@ -518,6 +518,11 @@ int sdei_event_unregister(u32 event_num)
break;
}
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
err = _sdei_event_unregister(event);
if (err)
break;
@@ -585,26 +590,15 @@ static int _sdei_event_register(struct sdei_event *event)
lockdep_assert_held(&sdei_events_lock);
- spin_lock(&sdei_list_lock);
- event->reregister = true;
- spin_unlock(&sdei_list_lock);
-
if (event->type == SDEI_EVENT_TYPE_SHARED)
return sdei_api_event_register(event->event_num,
sdei_entry_point,
event->registered,
SDEI_EVENT_REGISTER_RM_ANY, 0);
-
err = sdei_do_cross_call(_local_event_register, event);
- if (err) {
- spin_lock(&sdei_list_lock);
- event->reregister = false;
- event->reenable = false;
- spin_unlock(&sdei_list_lock);
-
+ if (err)
sdei_do_cross_call(_local_event_unregister, event);
- }
return err;
}
@@ -632,12 +626,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
break;
}
+ cpus_read_lock();
err = _sdei_event_register(event);
if (err) {
sdei_event_destroy(event);
pr_warn("Failed to register event %u: %d\n", event_num,
err);
+ } else {
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
}
+ cpus_read_unlock();
} while (0);
mutex_unlock(&sdei_events_lock);
@@ -645,16 +645,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
}
EXPORT_SYMBOL(sdei_event_register);
-static int sdei_reregister_event(struct sdei_event *event)
+static int sdei_reregister_event_llocked(struct sdei_event *event)
{
int err;
lockdep_assert_held(&sdei_events_lock);
+ lockdep_assert_held(&sdei_list_lock);
err = _sdei_event_register(event);
if (err) {
pr_err("Failed to re-register event %u\n", event->event_num);
- sdei_event_destroy(event);
+ sdei_event_destroy_llocked(event);
return err;
}
@@ -683,7 +684,7 @@ static int sdei_reregister_shared(void)
continue;
if (event->reregister) {
- err = sdei_reregister_event(event);
+ err = sdei_reregister_event_llocked(event);
if (err)
break;
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 2045566d622f..f59163cb7cba 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -11,6 +11,10 @@
#include <asm/dmi.h>
#include <asm/unaligned.h>
+#ifndef SMBIOS_ENTRY_POINT_SCAN_START
+#define SMBIOS_ENTRY_POINT_SCAN_START 0xF0000
+#endif
+
struct kobject *dmi_kobj;
EXPORT_SYMBOL_GPL(dmi_kobj);
@@ -663,7 +667,7 @@ static void __init dmi_scan_machine(void)
return;
}
} else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
- p = dmi_early_remap(0xF0000, 0x10000);
+ p = dmi_early_remap(SMBIOS_ENTRY_POINT_SCAN_START, 0x10000);
if (p == NULL)
goto error;
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 29906e39ab4b..14d0970a7198 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -341,7 +341,7 @@ edd_show_legacy_max_cylinder(struct edd_device *edev, char *buf)
if (!info || !buf)
return -EINVAL;
- p += snprintf(p, left, "%u\n", info->legacy_max_cylinder);
+ p += scnprintf(p, left, "%u\n", info->legacy_max_cylinder);
return (p - buf);
}
@@ -356,7 +356,7 @@ edd_show_legacy_max_head(struct edd_device *edev, char *buf)
if (!info || !buf)
return -EINVAL;
- p += snprintf(p, left, "%u\n", info->legacy_max_head);
+ p += scnprintf(p, left, "%u\n", info->legacy_max_head);
return (p - buf);
}
@@ -371,7 +371,7 @@ edd_show_legacy_sectors_per_track(struct edd_device *edev, char *buf)
if (!info || !buf)
return -EINVAL;
- p += snprintf(p, left, "%u\n", info->legacy_sectors_per_track);
+ p += scnprintf(p, left, "%u\n", info->legacy_sectors_per_track);
return (p - buf);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index ecc83e2f032c..613828d3f106 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -239,6 +239,11 @@ config EFI_DISABLE_PCI_DMA
endmenu
+config EFI_EMBEDDED_FIRMWARE
+ bool
+ depends on EFI
+ select CRYPTO_LIB_SHA256
+
config UEFI_CPER
bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 554d795270d9..7a216984552b 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -13,19 +13,21 @@ KASAN_SANITIZE_runtime-wrappers.o := n
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
obj-$(CONFIG_EFI) += capsule.o memmap.o
+obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
-obj-$(CONFIG_EFI_STUB) += libstub/
+subdir-$(CONFIG_EFI_STUB) += libstub
obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
+obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
fake_map-y += fake_mem.o
fake_map-$(CONFIG_X86) += x86_fake_mem.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 5ccf39986a14..34f53d898acb 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -31,7 +31,7 @@ __setup("dump_apple_properties", dump_properties_enable);
struct dev_header {
u32 len;
u32 prop_count;
- struct efi_dev_path path[0];
+ struct efi_dev_path path[];
/*
* followed by key/value pairs, each key and value preceded by u32 len,
* len includes itself, value may be empty (in which case its len is 4)
@@ -42,11 +42,11 @@ struct properties_header {
u32 len;
u32 version;
u32 dev_count;
- struct dev_header dev_header[0];
+ struct dev_header dev_header[];
};
static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
- struct device *dev, void *ptr,
+ struct device *dev, const void *ptr,
struct property_entry entry[])
{
int i;
@@ -117,10 +117,10 @@ static int __init unmarshal_devices(struct properties_header *properties)
while (offset + sizeof(struct dev_header) < properties->len) {
struct dev_header *dev_header = (void *)properties + offset;
struct property_entry *entry = NULL;
+ const struct efi_dev_path *ptr;
struct device *dev;
size_t len;
int ret, i;
- void *ptr;
if (offset + dev_header->len > properties->len ||
dev_header->len <= sizeof(*dev_header)) {
@@ -131,10 +131,10 @@ static int __init unmarshal_devices(struct properties_header *properties)
ptr = dev_header->path;
len = dev_header->len - sizeof(*dev_header);
- dev = efi_get_device_by_path((struct efi_dev_path **)&ptr, &len);
+ dev = efi_get_device_by_path(&ptr, &len);
if (IS_ERR(dev)) {
pr_err("device path parse error %ld at %#zx:\n",
- PTR_ERR(dev), ptr - (void *)dev_header);
+ PTR_ERR(dev), (void *)ptr - (void *)dev_header);
print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
16, 1, dev_header, dev_header->len, true);
dev = NULL;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index d99f5b0c8a09..9e5e62f5f94d 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -22,8 +22,6 @@
#include <asm/efi.h>
-u64 efi_system_table;
-
static int __init is_memory(efi_memory_desc_t *md)
{
if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC))
@@ -36,7 +34,7 @@ static int __init is_memory(efi_memory_desc_t *md)
* as some data members of the EFI system table are virtually remapped after
* SetVirtualAddressMap() has been called.
*/
-static phys_addr_t efi_to_phys(unsigned long addr)
+static phys_addr_t __init efi_to_phys(unsigned long addr)
{
efi_memory_desc_t *md;
@@ -55,7 +53,7 @@ static phys_addr_t efi_to_phys(unsigned long addr)
static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
-static __initdata efi_config_table_type_t arch_tables[] = {
+static const efi_config_table_type_t arch_tables[] __initconst = {
{LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
{NULL_GUID, NULL, NULL}
};
@@ -83,17 +81,15 @@ static void __init init_screen_info(void)
memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
}
-static int __init uefi_init(void)
+static int __init uefi_init(u64 efi_system_table)
{
- efi_char16_t *c16;
- void *config_tables;
+ efi_config_table_t *config_tables;
+ efi_system_table_t *systab;
size_t table_size;
- char vendor[100] = "unknown";
- int i, retval;
+ int retval;
- efi.systab = early_memremap_ro(efi_system_table,
- sizeof(efi_system_table_t));
- if (efi.systab == NULL) {
+ systab = early_memremap_ro(efi_system_table, sizeof(efi_system_table_t));
+ if (systab == NULL) {
pr_warn("Unable to map EFI system table.\n");
return -ENOMEM;
}
@@ -102,53 +98,29 @@ static int __init uefi_init(void)
if (IS_ENABLED(CONFIG_64BIT))
set_bit(EFI_64BIT, &efi.flags);
- /*
- * Verify the EFI Table
- */
- if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
- pr_err("System table signature incorrect\n");
- retval = -EINVAL;
+ retval = efi_systab_check_header(&systab->hdr, 2);
+ if (retval)
goto out;
- }
- if ((efi.systab->hdr.revision >> 16) < 2)
- pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
- efi.systab->hdr.revision >> 16,
- efi.systab->hdr.revision & 0xffff);
-
- efi.runtime_version = efi.systab->hdr.revision;
-
- /* Show what we know for posterity */
- c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
- sizeof(vendor) * sizeof(efi_char16_t));
- if (c16) {
- for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
- vendor[i] = c16[i];
- vendor[i] = '\0';
- early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
- }
- pr_info("EFI v%u.%.02u by %s\n",
- efi.systab->hdr.revision >> 16,
- efi.systab->hdr.revision & 0xffff, vendor);
+ efi.runtime = systab->runtime;
+ efi.runtime_version = systab->hdr.revision;
- table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
- config_tables = early_memremap_ro(efi_to_phys(efi.systab->tables),
+ efi_systab_report_header(&systab->hdr, efi_to_phys(systab->fw_vendor));
+
+ table_size = sizeof(efi_config_table_t) * systab->nr_tables;
+ config_tables = early_memremap_ro(efi_to_phys(systab->tables),
table_size);
if (config_tables == NULL) {
pr_warn("Unable to map EFI config table array.\n");
retval = -ENOMEM;
goto out;
}
- retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
- sizeof(efi_config_table_t),
+ retval = efi_config_parse_tables(config_tables, systab->nr_tables,
arch_tables);
- if (!retval)
- efi.config_table = (unsigned long)efi.systab->tables;
-
early_memunmap(config_tables, table_size);
out:
- early_memunmap(efi.systab, sizeof(efi_system_table_t));
+ early_memunmap(systab, sizeof(efi_system_table_t));
return retval;
}
@@ -233,19 +205,13 @@ static __init void reserve_regions(void)
void __init efi_init(void)
{
struct efi_memory_map_data data;
- struct efi_fdt_params params;
+ u64 efi_system_table;
/* Grab UEFI information placed in FDT by stub */
- if (!efi_get_fdt_params(&params))
+ efi_system_table = efi_get_fdt_params(&data);
+ if (!efi_system_table)
return;
- efi_system_table = params.system_table;
-
- data.desc_version = params.desc_ver;
- data.desc_size = params.desc_size;
- data.size = params.mmap_size;
- data.phys_map = params.mmap;
-
if (efi_memmap_init_early(&data) < 0) {
/*
* If we are booting via UEFI, the UEFI memory map is the only
@@ -259,7 +225,7 @@ void __init efi_init(void)
"Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
efi.memmap.desc_version);
- if (uefi_init() < 0) {
+ if (uefi_init(efi_system_table) < 0) {
efi_memmap_unmap();
return;
}
@@ -267,9 +233,8 @@ void __init efi_init(void)
reserve_regions();
efi_esrt_init();
- memblock_reserve(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
+ memblock_reserve(data.phys_map & PAGE_MASK,
+ PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
init_screen_info();
@@ -349,7 +314,7 @@ static int efifb_add_links(const struct fwnode_handle *fwnode,
* If this fails, retrying this function at a later point won't
* change anything. So, don't return an error after this.
*/
- if (!device_link_add(dev, sup_dev, 0))
+ if (!device_link_add(dev, sup_dev, fw_devlink_get_flags()))
dev_warn(dev, "device_link_add() failed\n");
put_device(sup_dev);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 9dda2602c862..b876373f2297 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -25,8 +25,6 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-extern u64 efi_system_table;
-
#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
#include <asm/ptdump.h>
@@ -54,13 +52,11 @@ device_initcall(ptdump_init);
static bool __init efi_virtmap_init(void)
{
efi_memory_desc_t *md;
- bool systab_found;
efi_mm.pgd = pgd_alloc(&efi_mm);
mm_init_cpumask(&efi_mm);
init_new_context(NULL, &efi_mm);
- systab_found = false;
for_each_efi_memory_desc(md) {
phys_addr_t phys = md->phys_addr;
int ret;
@@ -76,20 +72,6 @@ static bool __init efi_virtmap_init(void)
&phys, ret);
return false;
}
- /*
- * If this entry covers the address of the UEFI system table,
- * calculate and record its virtual address.
- */
- if (efi_system_table >= phys &&
- efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
- efi.systab = (void *)(unsigned long)(efi_system_table -
- phys + md->virt_addr);
- systab_found = true;
- }
- }
- if (!systab_found) {
- pr_err("No virtual mapping found for the UEFI System Table\n");
- return false;
}
if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index d3067cbd5114..4dde8edd53b6 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -168,7 +168,7 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
size_t count, loff_t *offp)
{
- int ret = 0;
+ int ret;
struct capsule_info *cap_info = file->private_data;
struct page *page;
void *kbuff = NULL;
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index b1af0de2e100..f564e15fbc7e 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -101,7 +101,7 @@ void cper_print_bits(const char *pfx, unsigned int bits,
if (!len)
len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
else
- len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
+ len += scnprintf(buf+len, sizeof(buf)-len, ", %s", str);
}
if (len)
printk("%s\n", buf);
@@ -407,6 +407,58 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
}
}
+static const char * const fw_err_rec_type_strs[] = {
+ "IPF SAL Error Record",
+ "SOC Firmware Error Record Type1 (Legacy CrashLog Support)",
+ "SOC Firmware Error Record Type2",
+};
+
+static void cper_print_fw_err(const char *pfx,
+ struct acpi_hest_generic_data *gdata,
+ const struct cper_sec_fw_err_rec_ref *fw_err)
+{
+ void *buf = acpi_hest_get_payload(gdata);
+ u32 offset, length = gdata->error_data_length;
+
+ printk("%s""Firmware Error Record Type: %s\n", pfx,
+ fw_err->record_type < ARRAY_SIZE(fw_err_rec_type_strs) ?
+ fw_err_rec_type_strs[fw_err->record_type] : "unknown");
+ printk("%s""Revision: %d\n", pfx, fw_err->revision);
+
+ /* Record Type based on UEFI 2.7 */
+ if (fw_err->revision == 0) {
+ printk("%s""Record Identifier: %08llx\n", pfx,
+ fw_err->record_identifier);
+ } else if (fw_err->revision == 2) {
+ printk("%s""Record Identifier: %pUl\n", pfx,
+ &fw_err->record_identifier_guid);
+ }
+
+ /*
+ * The FW error record may contain trailing data beyond the
+ * structure defined by the specification. As the fields
+ * defined (and hence the offset of any trailing data) vary
+ * with the revision, set the offset to account for this
+ * variation.
+ */
+ if (fw_err->revision == 0) {
+ /* record_identifier_guid not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier_guid);
+ } else if (fw_err->revision == 1) {
+ /* record_identifier not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier);
+ } else {
+ offset = sizeof(*fw_err);
+ }
+
+ buf += offset;
+ length -= offset;
+
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true);
+}
+
static void cper_print_tstamp(const char *pfx,
struct acpi_hest_generic_data_v300 *gdata)
{
@@ -494,6 +546,16 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
#endif
+ } else if (guid_equal(sec_type, &CPER_SEC_FW_ERR_REC_REF)) {
+ struct cper_sec_fw_err_rec_ref *fw_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: Firmware Error Record Reference\n",
+ newpfx);
+ /* The minimal FW Error Record contains 16 bytes */
+ if (gdata->error_data_length >= SZ_16)
+ cper_print_fw_err(newpfx, gdata, fw_err);
+ else
+ goto err_section_too_small;
} else {
const void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 20123384271c..5c9625e552f4 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -31,13 +31,13 @@ static int __init match_acpi_dev(struct device *dev, const void *data)
return !strcmp("0", hid_uid.uid);
}
-static long __init parse_acpi_path(struct efi_dev_path *node,
+static long __init parse_acpi_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
struct acpi_hid_uid hid_uid = {};
struct device *phys_dev;
- if (node->length != 12)
+ if (node->header.length != 12)
return -EINVAL;
sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
@@ -69,12 +69,12 @@ static int __init match_pci_dev(struct device *dev, void *data)
return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
}
-static long __init parse_pci_path(struct efi_dev_path *node,
+static long __init parse_pci_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
unsigned int devfn;
- if (node->length != 6)
+ if (node->header.length != 6)
return -EINVAL;
if (!parent)
return -EINVAL;
@@ -105,19 +105,19 @@ static long __init parse_pci_path(struct efi_dev_path *node,
* search for a device.
*/
-static long __init parse_end_path(struct efi_dev_path *node,
+static long __init parse_end_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
- if (node->length != 4)
+ if (node->header.length != 4)
return -EINVAL;
- if (node->sub_type != EFI_DEV_END_INSTANCE &&
- node->sub_type != EFI_DEV_END_ENTIRE)
+ if (node->header.sub_type != EFI_DEV_END_INSTANCE &&
+ node->header.sub_type != EFI_DEV_END_ENTIRE)
return -EINVAL;
if (!parent)
return -ENODEV;
*child = get_device(parent);
- return node->sub_type;
+ return node->header.sub_type;
}
/**
@@ -156,7 +156,7 @@ static long __init parse_end_path(struct efi_dev_path *node,
* %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
* %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
*/
-struct device * __init efi_get_device_by_path(struct efi_dev_path **node,
+struct device * __init efi_get_device_by_path(const struct efi_dev_path **node,
size_t *len)
{
struct device *parent = NULL, *child;
@@ -166,16 +166,16 @@ struct device * __init efi_get_device_by_path(struct efi_dev_path **node,
return NULL;
while (!ret) {
- if (*len < 4 || *len < (*node)->length)
+ if (*len < 4 || *len < (*node)->header.length)
ret = -EINVAL;
- else if ((*node)->type == EFI_DEV_ACPI &&
- (*node)->sub_type == EFI_DEV_BASIC_ACPI)
+ else if ((*node)->header.type == EFI_DEV_ACPI &&
+ (*node)->header.sub_type == EFI_DEV_BASIC_ACPI)
ret = parse_acpi_path(*node, parent, &child);
- else if ((*node)->type == EFI_DEV_HW &&
- (*node)->sub_type == EFI_DEV_PCI)
+ else if ((*node)->header.type == EFI_DEV_HW &&
+ (*node)->header.sub_type == EFI_DEV_PCI)
ret = parse_pci_path(*node, parent, &child);
- else if (((*node)->type == EFI_DEV_END_PATH ||
- (*node)->type == EFI_DEV_END_PATH2))
+ else if (((*node)->header.type == EFI_DEV_END_PATH ||
+ (*node)->header.type == EFI_DEV_END_PATH2))
ret = parse_end_path(*node, parent, &child);
else
ret = -ENOTSUPP;
@@ -185,8 +185,8 @@ struct device * __init efi_get_device_by_path(struct efi_dev_path **node,
return ERR_PTR(ret);
parent = child;
- *node = (void *)*node + (*node)->length;
- *len -= (*node)->length;
+ *node = (void *)*node + (*node)->header.length;
+ *len -= (*node)->header.length;
}
if (ret == EFI_DEV_END_ENTIRE)
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index 5d4f84781aa0..a52236e11e5f 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -114,14 +114,16 @@ static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
const u32 color_black = 0x00000000;
const u32 color_white = 0x00ffffff;
const u8 *src;
- u8 s8;
- int m;
+ int m, n, bytes;
+ u8 x;
- src = font->data + c * font->height;
- s8 = *(src + h);
+ bytes = BITS_TO_BYTES(font->width);
+ src = font->data + c * font->height * bytes + h * bytes;
- for (m = 0; m < 8; m++) {
- if ((s8 >> (7 - m)) & 1)
+ for (m = 0; m < font->width; m++) {
+ n = m % 8;
+ x = *(src + m / 8);
+ if ((x >> (7 - n)) & 1)
*dst = color_white;
else
*dst = color_black;
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index b07c17643210..6aafdb67dbca 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -42,7 +42,12 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
return;
}
*bgrt = *(struct acpi_table_bgrt *)table;
- if (bgrt->version != 1) {
+ /*
+ * Only version 1 is defined but some older laptops (seen on Lenovo
+ * Ivy Bridge models) have a correct version 1 BGRT table with the
+ * version set to 0, so we accept version 0 and 1.
+ */
+ if (bgrt->version > 1) {
pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
bgrt->version);
goto out;
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 9ea13e8d12ec..c2f1d4e6630b 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -161,7 +161,7 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
*
* @record: pstore record to pass to callback
*
- * You MUST call efivar_enter_iter_begin() before this function, and
+ * You MUST call efivar_entry_iter_begin() before this function, and
* efivar_entry_iter_end() afterwards.
*
*/
@@ -356,7 +356,7 @@ static struct pstore_info efi_pstore_info = {
static __init int efivars_pstore_init(void)
{
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
return 0;
if (!efivars_kobject())
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 21ea99f65113..4e3055238f31 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -17,10 +17,10 @@
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/of.h>
-#include <linux/of_fdt.h>
#include <linux/io.h>
#include <linux/kexec.h>
#include <linux/platform_device.h>
@@ -35,27 +35,21 @@
#include <asm/early_ioremap.h>
struct efi __read_mostly efi = {
- .mps = EFI_INVALID_TABLE_ADDR,
+ .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
.acpi = EFI_INVALID_TABLE_ADDR,
.acpi20 = EFI_INVALID_TABLE_ADDR,
.smbios = EFI_INVALID_TABLE_ADDR,
.smbios3 = EFI_INVALID_TABLE_ADDR,
- .boot_info = EFI_INVALID_TABLE_ADDR,
- .hcdp = EFI_INVALID_TABLE_ADDR,
- .uga = EFI_INVALID_TABLE_ADDR,
- .fw_vendor = EFI_INVALID_TABLE_ADDR,
- .runtime = EFI_INVALID_TABLE_ADDR,
- .config_table = EFI_INVALID_TABLE_ADDR,
.esrt = EFI_INVALID_TABLE_ADDR,
- .properties_table = EFI_INVALID_TABLE_ADDR,
- .mem_attr_table = EFI_INVALID_TABLE_ADDR,
- .rng_seed = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
.tpm_final_log = EFI_INVALID_TABLE_ADDR,
- .mem_reserve = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
+unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
+
struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.mm_users = ATOMIC_INIT(2),
@@ -122,8 +116,6 @@ static ssize_t systab_show(struct kobject *kobj,
if (!kobj || !buf)
return -EINVAL;
- if (efi.mps != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "MPS=0x%lx\n", efi.mps);
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
if (efi.acpi != EFI_INVALID_TABLE_ADDR)
@@ -137,67 +129,39 @@ static ssize_t systab_show(struct kobject *kobj,
str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
- if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
- if (efi.uga != EFI_INVALID_TABLE_ADDR)
- str += sprintf(str, "UGA=0x%lx\n", efi.uga);
+
+ if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
+ str = efi_systab_show_arch(str);
return str - buf;
}
static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
-#define EFI_FIELD(var) efi.var
-
-#define EFI_ATTR_SHOW(name) \
-static ssize_t name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
-}
-
-EFI_ATTR_SHOW(fw_vendor);
-EFI_ATTR_SHOW(runtime);
-EFI_ATTR_SHOW(config_table);
-
static ssize_t fw_platform_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
}
-static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
-static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
-static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
+extern __weak struct kobj_attribute efi_attr_fw_vendor;
+extern __weak struct kobj_attribute efi_attr_runtime;
+extern __weak struct kobj_attribute efi_attr_config_table;
static struct kobj_attribute efi_attr_fw_platform_size =
__ATTR_RO(fw_platform_size);
static struct attribute *efi_subsys_attrs[] = {
&efi_attr_systab.attr,
+ &efi_attr_fw_platform_size.attr,
&efi_attr_fw_vendor.attr,
&efi_attr_runtime.attr,
&efi_attr_config_table.attr,
- &efi_attr_fw_platform_size.attr,
NULL,
};
-static umode_t efi_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
+umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
{
- if (attr == &efi_attr_fw_vendor.attr) {
- if (efi_enabled(EFI_PARAVIRT) ||
- efi.fw_vendor == EFI_INVALID_TABLE_ADDR)
- return 0;
- } else if (attr == &efi_attr_runtime.attr) {
- if (efi.runtime == EFI_INVALID_TABLE_ADDR)
- return 0;
- } else if (attr == &efi_attr_config_table.attr) {
- if (efi.config_table == EFI_INVALID_TABLE_ADDR)
- return 0;
- }
-
return attr->mode;
}
@@ -325,6 +289,59 @@ free_entry:
static inline int efivar_ssdt_load(void) { return 0; }
#endif
+#ifdef CONFIG_DEBUG_FS
+
+#define EFI_DEBUGFS_MAX_BLOBS 32
+
+static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
+
+static void __init efi_debugfs_init(void)
+{
+ struct dentry *efi_debugfs;
+ efi_memory_desc_t *md;
+ char name[32];
+ int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
+ int i = 0;
+
+ efi_debugfs = debugfs_create_dir("efi", NULL);
+ if (IS_ERR_OR_NULL(efi_debugfs))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ switch (md->type) {
+ case EFI_BOOT_SERVICES_CODE:
+ snprintf(name, sizeof(name), "boot_services_code%d",
+ type_count[md->type]++);
+ break;
+ case EFI_BOOT_SERVICES_DATA:
+ snprintf(name, sizeof(name), "boot_services_data%d",
+ type_count[md->type]++);
+ break;
+ default:
+ continue;
+ }
+
+ if (i >= EFI_DEBUGFS_MAX_BLOBS) {
+ pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
+ EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
+ break;
+ }
+
+ debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
+ debugfs_blob[i].data = memremap(md->phys_addr,
+ debugfs_blob[i].size,
+ MEMREMAP_WB);
+ if (!debugfs_blob[i].data)
+ continue;
+
+ debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
+ i++;
+ }
+}
+#else
+static inline void efi_debugfs_init(void) {}
+#endif
+
/*
* We register the efi subsystem with the firmware subsystem and the
* efivars subsystem with the efi subsystem, if the system was booted with
@@ -334,21 +351,30 @@ static int __init efisubsys_init(void)
{
int error;
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ efi.runtime_supported_mask = 0;
+
if (!efi_enabled(EFI_BOOT))
return 0;
- /*
- * Since we process only one efi_runtime_service() at a time, an
- * ordered workqueue (which creates only one execution context)
- * should suffice all our needs.
- */
- efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
- if (!efi_rts_wq) {
- pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
- clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
- return 0;
+ if (efi.runtime_supported_mask) {
+ /*
+ * Since we process only one efi_runtime_service() at a time, an
+ * ordered workqueue (which creates only one execution context)
+ * should suffice for all our needs.
+ */
+ efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+ if (!efi_rts_wq) {
+ pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ efi.runtime_supported_mask = 0;
+ return 0;
+ }
}
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
+ platform_device_register_simple("rtc-efi", 0, NULL, 0);
+
/* We register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
@@ -356,12 +382,13 @@ static int __init efisubsys_init(void)
return -ENOMEM;
}
- error = generic_ops_register();
- if (error)
- goto err_put;
-
- if (efi_enabled(EFI_RUNTIME_SERVICES))
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) {
efivar_ssdt_load();
+ error = generic_ops_register();
+ if (error)
+ goto err_put;
+ platform_device_register_simple("efivars", 0, NULL, 0);
+ }
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
if (error) {
@@ -381,12 +408,16 @@ static int __init efisubsys_init(void)
goto err_remove_group;
}
+ if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
+ efi_debugfs_init();
+
return 0;
err_remove_group:
sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
err_unregister:
- generic_ops_unregister();
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
+ generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
return error;
@@ -467,30 +498,27 @@ void __init efi_mem_reserve(phys_addr_t addr, u64 size)
efi_arch_mem_reserve(addr, size);
}
-static __initdata efi_config_table_type_t common_tables[] = {
+static const efi_config_table_type_t common_tables[] __initconst = {
{ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
- {HCDP_TABLE_GUID, "HCDP", &efi.hcdp},
- {MPS_TABLE_GUID, "MPS", &efi.mps},
{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
{SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
- {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
- {EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
- {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
- {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi_mem_attr_table},
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi_rng_seed},
{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
{LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
- {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve},
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &mem_reserve},
+ {EFI_RT_PROPERTIES_TABLE_GUID, "RTPROP", &rt_prop},
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys},
#endif
{NULL_GUID, NULL, NULL},
};
-static __init int match_config_table(efi_guid_t *guid,
+static __init int match_config_table(const efi_guid_t *guid,
unsigned long table,
- efi_config_table_type_t *table_types)
+ const efi_config_table_type_t *table_types)
{
int i;
@@ -509,48 +537,47 @@ static __init int match_config_table(efi_guid_t *guid,
return 0;
}
-int __init efi_config_parse_tables(void *config_tables, int count, int sz,
- efi_config_table_type_t *arch_tables)
+int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
+ int count,
+ const efi_config_table_type_t *arch_tables)
{
- void *tablep;
+ const efi_config_table_64_t *tbl64 = (void *)config_tables;
+ const efi_config_table_32_t *tbl32 = (void *)config_tables;
+ const efi_guid_t *guid;
+ unsigned long table;
int i;
- tablep = config_tables;
pr_info("");
for (i = 0; i < count; i++) {
- efi_guid_t guid;
- unsigned long table;
-
- if (efi_enabled(EFI_64BIT)) {
- u64 table64;
- guid = ((efi_config_table_64_t *)tablep)->guid;
- table64 = ((efi_config_table_64_t *)tablep)->table;
- table = table64;
-#ifndef CONFIG_64BIT
- if (table64 >> 32) {
+ if (!IS_ENABLED(CONFIG_X86)) {
+ guid = &config_tables[i].guid;
+ table = (unsigned long)config_tables[i].table;
+ } else if (efi_enabled(EFI_64BIT)) {
+ guid = &tbl64[i].guid;
+ table = tbl64[i].table;
+
+ if (IS_ENABLED(CONFIG_X86_32) &&
+ tbl64[i].table > U32_MAX) {
pr_cont("\n");
pr_err("Table located above 4GB, disabling EFI.\n");
return -EINVAL;
}
-#endif
} else {
- guid = ((efi_config_table_32_t *)tablep)->guid;
- table = ((efi_config_table_32_t *)tablep)->table;
+ guid = &tbl32[i].guid;
+ table = tbl32[i].table;
}
- if (!match_config_table(&guid, table, common_tables))
- match_config_table(&guid, table, arch_tables);
-
- tablep += sz;
+ if (!match_config_table(guid, table, common_tables))
+ match_config_table(guid, table, arch_tables);
}
pr_cont("\n");
set_bit(EFI_CONFIG_TABLES, &efi.flags);
- if (efi.rng_seed != EFI_INVALID_TABLE_ADDR) {
+ if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
struct linux_efi_random_seed *seed;
u32 size = 0;
- seed = early_memremap(efi.rng_seed, sizeof(*seed));
+ seed = early_memremap(efi_rng_seed, sizeof(*seed));
if (seed != NULL) {
size = READ_ONCE(seed->size);
early_memunmap(seed, sizeof(*seed));
@@ -558,7 +585,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
pr_err("Could not map UEFI random seed!\n");
}
if (size > 0) {
- seed = early_memremap(efi.rng_seed,
+ seed = early_memremap(efi_rng_seed,
sizeof(*seed) + size);
if (seed != NULL) {
pr_notice("seeding entropy pool\n");
@@ -570,35 +597,17 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
}
}
- if (efi_enabled(EFI_MEMMAP))
+ if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
efi_memattr_init();
efi_tpm_eventlog_init();
- /* Parse the EFI Properties table if it exists */
- if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
- efi_properties_table_t *tbl;
-
- tbl = early_memremap(efi.properties_table, sizeof(*tbl));
- if (tbl == NULL) {
- pr_err("Could not map Properties table!\n");
- return -ENOMEM;
- }
-
- if (tbl->memory_protection_attribute &
- EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
- set_bit(EFI_NX_PE_DATA, &efi.flags);
-
- early_memunmap(tbl, sizeof(*tbl));
- }
-
- if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
- unsigned long prsv = efi.mem_reserve;
+ if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
+ unsigned long prsv = mem_reserve;
while (prsv) {
struct linux_efi_memreserve *rsv;
u8 *p;
- int i;
/*
* Just map a full page: that is what we will get
@@ -627,186 +636,78 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
}
}
- return 0;
-}
-
-int __init efi_config_init(efi_config_table_type_t *arch_tables)
-{
- void *config_tables;
- int sz, ret;
-
- if (efi.systab->nr_tables == 0)
- return 0;
-
- if (efi_enabled(EFI_64BIT))
- sz = sizeof(efi_config_table_64_t);
- else
- sz = sizeof(efi_config_table_32_t);
+ if (rt_prop != EFI_INVALID_TABLE_ADDR) {
+ efi_rt_properties_table_t *tbl;
- /*
- * Let's see what config tables the firmware passed to us.
- */
- config_tables = early_memremap(efi.systab->tables,
- efi.systab->nr_tables * sz);
- if (config_tables == NULL) {
- pr_err("Could not map Configuration table!\n");
- return -ENOMEM;
+ tbl = early_memremap(rt_prop, sizeof(*tbl));
+ if (tbl) {
+ efi.runtime_supported_mask &= tbl->runtime_services_supported;
+ early_memunmap(tbl, sizeof(*tbl));
+ }
}
- ret = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sz,
- arch_tables);
-
- early_memunmap(config_tables, efi.systab->nr_tables * sz);
- return ret;
+ return 0;
}
-#ifdef CONFIG_EFI_VARS_MODULE
-static int __init efi_load_efivars(void)
+int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
+ int min_major_version)
{
- struct platform_device *pdev;
-
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
- return 0;
-
- pdev = platform_device_register_simple("efivars", 0, NULL, 0);
- return PTR_ERR_OR_ZERO(pdev);
-}
-device_initcall(efi_load_efivars);
-#endif
-
-#ifdef CONFIG_EFI_PARAMS_FROM_FDT
-
-#define UEFI_PARAM(name, prop, field) \
- { \
- { name }, \
- { prop }, \
- offsetof(struct efi_fdt_params, field), \
- sizeof_field(struct efi_fdt_params, field) \
+ if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ pr_err("System table signature incorrect!\n");
+ return -EINVAL;
}
-struct params {
- const char name[32];
- const char propname[32];
- int offset;
- int size;
-};
-
-static __initdata struct params fdt_params[] = {
- UEFI_PARAM("System Table", "linux,uefi-system-table", system_table),
- UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap),
- UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size),
- UEFI_PARAM("MemMap Desc. Size", "linux,uefi-mmap-desc-size", desc_size),
- UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver)
-};
-
-static __initdata struct params xen_fdt_params[] = {
- UEFI_PARAM("System Table", "xen,uefi-system-table", system_table),
- UEFI_PARAM("MemMap Address", "xen,uefi-mmap-start", mmap),
- UEFI_PARAM("MemMap Size", "xen,uefi-mmap-size", mmap_size),
- UEFI_PARAM("MemMap Desc. Size", "xen,uefi-mmap-desc-size", desc_size),
- UEFI_PARAM("MemMap Desc. Version", "xen,uefi-mmap-desc-ver", desc_ver)
-};
-
-#define EFI_FDT_PARAMS_SIZE ARRAY_SIZE(fdt_params)
+ if ((systab_hdr->revision >> 16) < min_major_version)
+ pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
+ systab_hdr->revision >> 16,
+ systab_hdr->revision & 0xffff,
+ min_major_version);
-static __initdata struct {
- const char *uname;
- const char *subnode;
- struct params *params;
-} dt_params[] = {
- { "hypervisor", "uefi", xen_fdt_params },
- { "chosen", NULL, fdt_params },
-};
-
-struct param_info {
- int found;
- void *params;
- const char *missing;
-};
+ return 0;
+}
-static int __init __find_uefi_params(unsigned long node,
- struct param_info *info,
- struct params *params)
+#ifndef CONFIG_IA64
+static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
+ size_t size)
{
- const void *prop;
- void *dest;
- u64 val;
- int i, len;
-
- for (i = 0; i < EFI_FDT_PARAMS_SIZE; i++) {
- prop = of_get_flat_dt_prop(node, params[i].propname, &len);
- if (!prop) {
- info->missing = params[i].name;
- return 0;
- }
+ const efi_char16_t *ret;
- dest = info->params + params[i].offset;
- info->found++;
-
- val = of_read_number(prop, len / sizeof(u32));
-
- if (params[i].size == sizeof(u32))
- *(u32 *)dest = val;
- else
- *(u64 *)dest = val;
-
- if (efi_enabled(EFI_DBG))
- pr_info(" %s: 0x%0*llx\n", params[i].name,
- params[i].size * 2, val);
- }
-
- return 1;
+ ret = early_memremap_ro(fw_vendor, size);
+ if (!ret)
+ pr_err("Could not map the firmware vendor!\n");
+ return ret;
}
-static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
- int depth, void *data)
+static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
{
- struct param_info *info = data;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
- const char *subnode = dt_params[i].subnode;
-
- if (depth != 1 || strcmp(uname, dt_params[i].uname) != 0) {
- info->missing = dt_params[i].params[0].name;
- continue;
- }
-
- if (subnode) {
- int err = of_get_flat_dt_subnode_by_name(node, subnode);
-
- if (err < 0)
- return 0;
-
- node = err;
- }
-
- return __find_uefi_params(node, info, dt_params[i].params);
- }
-
- return 0;
+ early_memunmap((void *)fw_vendor, size);
}
+#else
+#define map_fw_vendor(p, s) __va(p)
+#define unmap_fw_vendor(v, s)
+#endif
-int __init efi_get_fdt_params(struct efi_fdt_params *params)
+void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
+ unsigned long fw_vendor)
{
- struct param_info info;
- int ret;
+ char vendor[100] = "unknown";
+ const efi_char16_t *c16;
+ size_t i;
- pr_info("Getting EFI parameters from FDT:\n");
+ c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
+ if (c16) {
+ for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
+ vendor[i] = c16[i];
+ vendor[i] = '\0';
- info.found = 0;
- info.params = params;
-
- ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
- if (!info.found)
- pr_info("UEFI not found.\n");
- else if (!ret)
- pr_err("Can't find '%s' in device tree!\n",
- info.missing);
+ unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
+ }
- return ret;
+ pr_info("EFI v%u.%.02u by %s\n",
+ systab_hdr->revision >> 16,
+ systab_hdr->revision & 0xffff,
+ vendor);
}
-#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
static __initdata char memory_type_name[][20] = {
"Reserved",
@@ -968,10 +869,10 @@ static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
static int __init efi_memreserve_map_root(void)
{
- if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ if (mem_reserve == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
- efi_memreserve_root = memremap(efi.mem_reserve,
+ efi_memreserve_root = memremap(mem_reserve,
sizeof(*efi_memreserve_root),
MEMREMAP_WB);
if (WARN_ON_ONCE(!efi_memreserve_root))
@@ -1076,7 +977,7 @@ static int update_efi_random_seed(struct notifier_block *nb,
if (!kexec_in_progress)
return NOTIFY_DONE;
- seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB);
+ seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
if (seed != NULL) {
size = min(seed->size, EFI_RANDOM_SEED_SIZE);
memunmap(seed);
@@ -1084,7 +985,7 @@ static int update_efi_random_seed(struct notifier_block *nb,
pr_err("Could not map UEFI random seed!\n");
}
if (size > 0) {
- seed = memremap(efi.rng_seed, sizeof(*seed) + size,
+ seed = memremap(efi_rng_seed, sizeof(*seed) + size,
MEMREMAP_WB);
if (seed != NULL) {
seed->size = size;
@@ -1101,9 +1002,9 @@ static struct notifier_block efi_random_seed_nb = {
.notifier_call = update_efi_random_seed,
};
-static int register_update_efi_random_seed(void)
+static int __init register_update_efi_random_seed(void)
{
- if (efi.rng_seed == EFI_INVALID_TABLE_ADDR)
+ if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
return 0;
return register_reboot_notifier(&efi_random_seed_nb);
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index aff3dfb4d7ba..78ad1ba8c987 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -678,7 +678,7 @@ int efivars_sysfs_init(void)
struct kobject *parent_kobj = efivars_kobject();
int error = 0;
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
return -ENODEV;
/* No efivars has been registered yet */
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
new file mode 100644
index 000000000000..a1b199de9006
--- /dev/null
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for extracting embedded firmware for peripherals from EFI code,
+ *
+ * Copyright (c) 2018 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/efi_embedded_fw.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <crypto/sha.h>
+
+/* Exported for use by lib/test_firmware.c only */
+LIST_HEAD(efi_embedded_fw_list);
+EXPORT_SYMBOL_GPL(efi_embedded_fw_list);
+
+static bool checked_for_fw;
+
+static const struct dmi_system_id * const embedded_fw_table[] = {
+#ifdef CONFIG_TOUCHSCREEN_DMI
+ touchscreen_dmi_table,
+#endif
+ NULL
+};
+
+/*
+ * Note the efi_check_for_embedded_firmwares() code currently makes the
+ * following 2 assumptions. This may needs to be revisited if embedded firmware
+ * is found where this is not true:
+ * 1) The firmware is only found in EFI_BOOT_SERVICES_CODE memory segments
+ * 2) The firmware always starts at an offset which is a multiple of 8 bytes
+ */
+static int __init efi_check_md_for_embedded_firmware(
+ efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc)
+{
+ struct sha256_state sctx;
+ struct efi_embedded_fw *fw;
+ u8 sha256[32];
+ u64 i, size;
+ u8 *map;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ map = memremap(md->phys_addr, size, MEMREMAP_WB);
+ if (!map) {
+ pr_err("Error mapping EFI mem at %#llx\n", md->phys_addr);
+ return -ENOMEM;
+ }
+
+ for (i = 0; (i + desc->length) <= size; i += 8) {
+ if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN))
+ continue;
+
+ sha256_init(&sctx);
+ sha256_update(&sctx, map + i, desc->length);
+ sha256_final(&sctx, sha256);
+ if (memcmp(sha256, desc->sha256, 32) == 0)
+ break;
+ }
+ if ((i + desc->length) > size) {
+ memunmap(map);
+ return -ENOENT;
+ }
+
+ pr_info("Found EFI embedded fw '%s'\n", desc->name);
+
+ fw = kmalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ memunmap(map);
+ return -ENOMEM;
+ }
+
+ fw->data = kmemdup(map + i, desc->length, GFP_KERNEL);
+ memunmap(map);
+ if (!fw->data) {
+ kfree(fw);
+ return -ENOMEM;
+ }
+
+ fw->name = desc->name;
+ fw->length = desc->length;
+ list_add(&fw->list, &efi_embedded_fw_list);
+
+ return 0;
+}
+
+void __init efi_check_for_embedded_firmwares(void)
+{
+ const struct efi_embedded_fw_desc *fw_desc;
+ const struct dmi_system_id *dmi_id;
+ efi_memory_desc_t *md;
+ int i, r;
+
+ for (i = 0; embedded_fw_table[i]; i++) {
+ dmi_id = dmi_first_match(embedded_fw_table[i]);
+ if (!dmi_id)
+ continue;
+
+ fw_desc = dmi_id->driver_data;
+
+ /*
+ * In some drivers the struct driver_data contains may contain
+ * other driver specific data after the fw_desc struct; and
+ * the fw_desc struct itself may be empty, skip these.
+ */
+ if (!fw_desc->name)
+ continue;
+
+ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_BOOT_SERVICES_CODE)
+ continue;
+
+ r = efi_check_md_for_embedded_firmware(md, fw_desc);
+ if (r == 0)
+ break;
+ }
+ }
+
+ checked_for_fw = true;
+}
+
+int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size)
+{
+ struct efi_embedded_fw *iter, *fw = NULL;
+
+ if (!checked_for_fw) {
+ pr_warn("Warning %s called while we did not check for embedded fw\n",
+ __func__);
+ return -ENOENT;
+ }
+
+ list_for_each_entry(iter, &efi_embedded_fw_list, list) {
+ if (strcmp(name, iter->name) == 0) {
+ fw = iter;
+ break;
+ }
+ }
+
+ if (!fw)
+ return -ENOENT;
+
+ *data = fw->data;
+ *size = fw->length;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efi_get_embedded_fw);
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 2762e0662bf4..e3d692696583 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -240,7 +240,6 @@ void __init efi_esrt_init(void)
{
void *va;
struct efi_system_resource_table tmpesrt;
- struct efi_system_resource_entry_v1 *v1_entries;
size_t size, max, entry_size, entries_size;
efi_memory_desc_t md;
int rc;
@@ -288,14 +287,13 @@ void __init efi_esrt_init(void)
memcpy(&tmpesrt, va, sizeof(tmpesrt));
early_memunmap(va, size);
- if (tmpesrt.fw_resource_version == 1) {
- entry_size = sizeof (*v1_entries);
- } else {
+ if (tmpesrt.fw_resource_version != 1) {
pr_err("Unsupported ESRT version %lld.\n",
tmpesrt.fw_resource_version);
return;
}
+ entry_size = sizeof(struct efi_system_resource_entry_v1);
if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) {
pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n",
max - size, entry_size);
diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
new file mode 100644
index 000000000000..bb042ab7c2be
--- /dev/null
+++ b/drivers/firmware/efi/fdtparams.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+
+#include <asm/unaligned.h>
+
+enum {
+ SYSTAB,
+ MMBASE,
+ MMSIZE,
+ DCSIZE,
+ DCVERS,
+
+ PARAMCOUNT
+};
+
+static __initconst const char name[][22] = {
+ [SYSTAB] = "System Table ",
+ [MMBASE] = "MemMap Address ",
+ [MMSIZE] = "MemMap Size ",
+ [DCSIZE] = "MemMap Desc. Size ",
+ [DCVERS] = "MemMap Desc. Version ",
+};
+
+static __initconst const struct {
+ const char path[17];
+ const char params[PARAMCOUNT][26];
+} dt_params[] = {
+ {
+#ifdef CONFIG_XEN // <-------17------>
+ .path = "/hypervisor/uefi",
+ .params = {
+ [SYSTAB] = "xen,uefi-system-table",
+ [MMBASE] = "xen,uefi-mmap-start",
+ [MMSIZE] = "xen,uefi-mmap-size",
+ [DCSIZE] = "xen,uefi-mmap-desc-size",
+ [DCVERS] = "xen,uefi-mmap-desc-ver",
+ }
+ }, {
+#endif
+ .path = "/chosen",
+ .params = { // <-----------26----------->
+ [SYSTAB] = "linux,uefi-system-table",
+ [MMBASE] = "linux,uefi-mmap-start",
+ [MMSIZE] = "linux,uefi-mmap-size",
+ [DCSIZE] = "linux,uefi-mmap-desc-size",
+ [DCVERS] = "linux,uefi-mmap-desc-ver",
+ }
+ }
+};
+
+static int __init efi_get_fdt_prop(const void *fdt, int node, const char *pname,
+ const char *rname, void *var, int size)
+{
+ const void *prop;
+ int len;
+ u64 val;
+
+ prop = fdt_getprop(fdt, node, pname, &len);
+ if (!prop)
+ return 1;
+
+ val = (len == 4) ? (u64)be32_to_cpup(prop) : get_unaligned_be64(prop);
+
+ if (size == 8)
+ *(u64 *)var = val;
+ else
+ *(u32 *)var = (val < U32_MAX) ? val : U32_MAX; // saturate
+
+ if (efi_enabled(EFI_DBG))
+ pr_info(" %s: 0x%0*llx\n", rname, size * 2, val);
+
+ return 0;
+}
+
+u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
+{
+ const void *fdt = initial_boot_params;
+ unsigned long systab;
+ int i, j, node;
+ struct {
+ void *var;
+ int size;
+ } target[] = {
+ [SYSTAB] = { &systab, sizeof(systab) },
+ [MMBASE] = { &mm->phys_map, sizeof(mm->phys_map) },
+ [MMSIZE] = { &mm->size, sizeof(mm->size) },
+ [DCSIZE] = { &mm->desc_size, sizeof(mm->desc_size) },
+ [DCVERS] = { &mm->desc_version, sizeof(mm->desc_version) },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
+
+ for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
+ node = fdt_path_offset(fdt, dt_params[i].path);
+ if (node < 0)
+ continue;
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Getting UEFI parameters from %s in DT:\n",
+ dt_params[i].path);
+
+ for (j = 0; j < ARRAY_SIZE(target); j++) {
+ const char *pname = dt_params[i].params[j];
+
+ if (!efi_get_fdt_prop(fdt, node, pname, name[j],
+ target[j].var, target[j].size))
+ continue;
+ if (!j)
+ goto notfound;
+ pr_err("Can't find property '%s' in DT!\n", pname);
+ return 0;
+ }
+ return systab;
+ }
+notfound:
+ pr_info("UEFI not found.\n");
+ return 0;
+}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 98a81576213d..094eabdecfe6 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -12,7 +12,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
$(call cc-disable-warning, address-of-packed-member) \
- $(call cc-disable-warning, gnu)
+ $(call cc-disable-warning, gnu) \
+ -fno-asynchronous-unwind-tables
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
@@ -25,6 +26,7 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+ -include $(srctree)/drivers/firmware/efi/libstub/hidden.h \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector) \
@@ -39,11 +41,11 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
- random.o pci.o
+ file.o mem.o random.o randomalloc.o pci.o \
+ skip_spaces.o lib-cmdline.o lib-ctype.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
-arm-deps-$(CONFIG_ARM64) += sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
@@ -53,6 +55,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
+lib-$(CONFIG_X86) += x86-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 7bbef4a67350..48161b1dd098 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -10,7 +10,7 @@
*/
#include <linux/efi.h>
-#include <linux/sort.h>
+#include <linux/libfdt.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -36,6 +36,7 @@
#endif
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
+static bool __efistub_global flat_va_mapping;
static efi_system_table_t *__efistub_global sys_table;
@@ -59,7 +60,11 @@ static struct screen_info *setup_graphics(void)
si = alloc_screen_info();
if (!si)
return NULL;
- efi_setup_gop(si, &gop_proto, size);
+ status = efi_setup_gop(si, &gop_proto, size);
+ if (status != EFI_SUCCESS) {
+ free_screen_info(si);
+ return NULL;
+ }
}
return si;
}
@@ -87,6 +92,39 @@ void install_memreserve_table(void)
pr_efi_err("Failed to install memreserve config table!\n");
}
+static unsigned long get_dram_base(void)
+{
+ efi_status_t status;
+ unsigned long map_size, buff_size;
+ unsigned long membase = EFI_ERROR;
+ struct efi_memory_map map;
+ efi_memory_desc_t *md;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = (efi_memory_desc_t **)&map.map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &map.desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&boot_map);
+ if (status != EFI_SUCCESS)
+ return membase;
+
+ map.map_end = map.map + map_size;
+
+ for_each_efi_memory_desc_in_map(&map, md) {
+ if (md->attribute & EFI_MEMORY_WB) {
+ if (membase > md->phys_addr)
+ membase = md->phys_addr;
+ }
+ }
+
+ efi_bs_call(free_pool, map.map);
+
+ return membase;
+}
/*
* This function handles the architcture specific differences between arm and
@@ -100,38 +138,46 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *reserve_size,
unsigned long dram_base,
efi_loaded_image_t *image);
+
+asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size);
+
/*
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
* that is described in the PE/COFF header. Most of the code is the same
* for both archictectures, with the arch-specific code provided in the
* handle_kernel_image() function.
*/
-unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
- unsigned long *image_addr)
+efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
{
efi_loaded_image_t *image;
efi_status_t status;
+ unsigned long image_addr;
unsigned long image_size = 0;
unsigned long dram_base;
/* addr/point and size pairs for memory management*/
- unsigned long initrd_addr;
- u64 initrd_size = 0;
+ unsigned long initrd_addr = 0;
+ unsigned long initrd_size = 0;
unsigned long fdt_addr = 0; /* Original DTB */
unsigned long fdt_size = 0;
char *cmdline_ptr = NULL;
int cmdline_size = 0;
- unsigned long new_fdt_addr;
efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
unsigned long reserve_addr = 0;
unsigned long reserve_size = 0;
enum efi_secureboot_mode secure_boot;
struct screen_info *si;
+ efi_properties_table_t *prop_tbl;
+ unsigned long max_addr;
sys_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
- if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ status = EFI_INVALID_PARAMETER;
goto fail;
+ }
status = check_platform_features();
if (status != EFI_SUCCESS)
@@ -152,6 +198,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
dram_base = get_dram_base();
if (dram_base == EFI_ERROR) {
pr_efi_err("Failed to find DRAM base\n");
+ status = EFI_LOAD_ERROR;
goto fail;
}
@@ -160,9 +207,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
+ cmdline_ptr = efi_convert_cmdline(image, &cmdline_size, ULONG_MAX);
if (!cmdline_ptr) {
pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
+ status = EFI_OUT_OF_RESOURCES;
goto fail;
}
@@ -178,7 +226,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
si = setup_graphics();
- status = handle_kernel_image(image_addr, &image_size,
+ status = handle_kernel_image(&image_addr, &image_size,
&reserve_addr,
&reserve_size,
dram_base, image);
@@ -204,8 +252,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
if (strstr(cmdline_ptr, "dtb="))
pr_efi("Ignoring DTB from command line.\n");
} else {
- status = handle_cmdline_files(image, cmdline_ptr, "dtb=",
- ~0UL, &fdt_addr, &fdt_size);
+ status = efi_load_dtb(image, &fdt_addr, &fdt_size);
if (status != EFI_SUCCESS) {
pr_efi_err("Failed to load device tree!\n");
@@ -225,18 +272,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
if (!fdt_addr)
pr_efi("Generating empty DTB\n");
- status = handle_cmdline_files(image, cmdline_ptr, "initrd=",
- efi_get_max_initrd_addr(dram_base,
- *image_addr),
- (unsigned long *)&initrd_addr,
- (unsigned long *)&initrd_size);
- if (status != EFI_SUCCESS)
- pr_efi_err("Failed initrd from command line!\n");
+ if (!noinitrd()) {
+ max_addr = efi_get_max_initrd_addr(dram_base, image_addr);
+ status = efi_load_initrd_dev_path(&initrd_addr, &initrd_size,
+ max_addr);
+ if (status == EFI_SUCCESS) {
+ pr_efi("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd(image, &initrd_addr, &initrd_size,
+ ULONG_MAX, max_addr);
+ if (status == EFI_SUCCESS && initrd_size > 0)
+ pr_efi("Loaded initrd from command line option\n");
+ }
+ if (status != EFI_SUCCESS)
+ pr_efi_err("Failed to load initrd!\n");
+ }
efi_random_get_seed();
+ /*
+ * If the NX PE data feature is enabled in the properties table, we
+ * should take care not to create a virtual mapping that changes the
+ * relative placement of runtime services code and data regions, as
+ * they may belong to the same PE/COFF executable image in memory.
+ * The easiest way to achieve that is to simply use a 1:1 mapping.
+ */
+ prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID);
+ flat_va_mapping = prop_tbl &&
+ (prop_tbl->memory_protection_attribute &
+ EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
+
/* hibernation expects the runtime regions to stay in the same place */
- if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
+ if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr() && !flat_va_mapping) {
/*
* Randomize the base of the UEFI runtime services region.
* Preserve the 2 MB alignment of the region by taking a
@@ -257,71 +324,30 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
install_memreserve_table();
- new_fdt_addr = fdt_addr;
- status = allocate_new_fdt_and_exit_boot(handle,
- &new_fdt_addr, efi_get_max_fdt_addr(dram_base),
- initrd_addr, initrd_size, cmdline_ptr,
- fdt_addr, fdt_size);
+ status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr,
+ efi_get_max_fdt_addr(dram_base),
+ initrd_addr, initrd_size,
+ cmdline_ptr, fdt_addr, fdt_size);
+ if (status != EFI_SUCCESS)
+ goto fail_free_initrd;
- /*
- * If all went well, we need to return the FDT address to the
- * calling function so it can be passed to kernel as part of
- * the kernel boot protocol.
- */
- if (status == EFI_SUCCESS)
- return new_fdt_addr;
+ efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
+ /* not reached */
+fail_free_initrd:
pr_efi_err("Failed to update FDT and exit boot services\n");
efi_free(initrd_size, initrd_addr);
efi_free(fdt_size, fdt_addr);
fail_free_image:
- efi_free(image_size, *image_addr);
+ efi_free(image_size, image_addr);
efi_free(reserve_size, reserve_addr);
fail_free_cmdline:
free_screen_info(si);
efi_free(cmdline_size, (unsigned long)cmdline_ptr);
fail:
- return EFI_ERROR;
-}
-
-static int cmp_mem_desc(const void *l, const void *r)
-{
- const efi_memory_desc_t *left = l, *right = r;
-
- return (left->phys_addr > right->phys_addr) ? 1 : -1;
-}
-
-/*
- * Returns whether region @left ends exactly where region @right starts,
- * or false if either argument is NULL.
- */
-static bool regions_are_adjacent(efi_memory_desc_t *left,
- efi_memory_desc_t *right)
-{
- u64 left_end;
-
- if (left == NULL || right == NULL)
- return false;
-
- left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
-
- return left_end == right->phys_addr;
-}
-
-/*
- * Returns whether region @left and region @right have compatible memory type
- * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
- */
-static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
- efi_memory_desc_t *right)
-{
- static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
- EFI_MEMORY_WC | EFI_MEMORY_UC |
- EFI_MEMORY_RUNTIME;
-
- return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+ return status;
}
/*
@@ -336,23 +362,10 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
int *count)
{
u64 efi_virt_base = virtmap_base;
- efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
+ efi_memory_desc_t *in, *out = runtime_map;
int l;
- /*
- * To work around potential issues with the Properties Table feature
- * introduced in UEFI 2.5, which may split PE/COFF executable images
- * in memory into several RuntimeServicesCode and RuntimeServicesData
- * regions, we need to preserve the relative offsets between adjacent
- * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
- * The easiest way to find adjacent regions is to sort the memory map
- * before traversing it.
- */
- if (IS_ENABLED(CONFIG_ARM64))
- sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc,
- NULL);
-
- for (l = 0; l < map_size; l += desc_size, prev = in) {
+ for (l = 0; l < map_size; l += desc_size) {
u64 paddr, size;
in = (void *)memory_map + l;
@@ -362,8 +375,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
paddr = in->phys_addr;
size = in->num_pages * EFI_PAGE_SIZE;
+ in->virt_addr = in->phys_addr;
if (novamap()) {
- in->virt_addr = in->phys_addr;
continue;
}
@@ -372,9 +385,7 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
* a 4k page size kernel to kexec a 64k page size kernel and
* vice versa.
*/
- if ((IS_ENABLED(CONFIG_ARM64) &&
- !regions_are_adjacent(prev, in)) ||
- !regions_have_compatible_memory_type_attrs(prev, in)) {
+ if (!flat_va_mapping) {
paddr = round_down(in->phys_addr, SZ_64K);
size += in->phys_addr - paddr;
@@ -389,10 +400,10 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
efi_virt_base = round_up(efi_virt_base, SZ_2M);
else
efi_virt_base = round_up(efi_virt_base, SZ_64K);
- }
- in->virt_addr = efi_virt_base + in->phys_addr - paddr;
- efi_virt_base += size;
+ in->virt_addr += efi_virt_base - paddr;
+ efi_virt_base += size;
+ }
memcpy(out, in, desc_size);
out = (void *)out + desc_size;
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 7b2a6382b647..7826553af2ba 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -227,6 +227,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
* Relocate the zImage, so that it appears in the lowest 128 MB
* memory window.
*/
+ *image_addr = (unsigned long)image->image_base;
*image_size = image->image_size;
status = efi_relocate_kernel(image_addr, *image_size, *image_size,
kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 2915b44132e6..fc9f8ab533a7 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -6,17 +6,11 @@
* Adapted from ARM version by Mark Salter <msalter@redhat.com>
*/
-/*
- * To prevent the compiler from emitting GOT-indirected (and thus absolute)
- * references to the section markers, override their visibility as 'hidden'
- */
-#pragma GCC visibility push(hidden)
-#include <asm/sections.h>
-#pragma GCC visibility pop
#include <linux/efi.h>
#include <asm/efi.h>
#include <asm/memory.h>
+#include <asm/sections.h>
#include <asm/sysreg.h>
#include "efistub.h"
@@ -49,7 +43,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
- void *old_image_addr = (void *)*image_addr;
unsigned long preferred_offset;
u64 phys_seed = 0;
@@ -82,14 +75,12 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
- * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
- * displacement in the interval [0, MIN_KIMG_ALIGN) that
- * doesn't violate this kernel's de-facto alignment
+ * Produce a displacement in the interval [0, MIN_KIMG_ALIGN)
+ * that doesn't violate this kernel's de-facto alignment
* constraints.
*/
u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
- u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
- (phys_seed >> 32) & mask : TEXT_OFFSET;
+ u32 offset = (phys_seed >> 32) & mask;
/*
* With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
@@ -123,6 +114,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
+ *image_addr = (unsigned long)_text;
if (*image_addr == preferred_offset)
return EFI_SUCCESS;
@@ -147,7 +139,11 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
*image_addr = *reserve_addr + TEXT_OFFSET;
}
- memcpy((void *)*image_addr, old_image_addr, kernel_size);
+
+ if (image->image_base != _text)
+ pr_efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+
+ memcpy((void *)*image_addr, _text, kernel_size);
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 74ddfb496140..9f34c7242939 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -12,34 +12,27 @@
#include "efistub.h"
-/*
- * Some firmware implementations have problems reading files in one go.
- * A read chunk size of 1MB seems to work for most platforms.
- *
- * Unfortunately, reading files in chunks triggers *other* bugs on some
- * platforms, so we provide a way to disable this workaround, which can
- * be done by passing "efi=nochunk" on the EFI boot stub command line.
- *
- * If you experience issues with initrd images being corrupt it's worth
- * trying efi=nochunk, but chunking is enabled by default because there
- * are far more machines that require the workaround than those that
- * break with it enabled.
- */
-#define EFI_READ_CHUNK_SIZE (1024 * 1024)
-
-static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE;
-
+static bool __efistub_global efi_nochunk;
static bool __efistub_global efi_nokaslr;
+static bool __efistub_global efi_noinitrd;
static bool __efistub_global efi_quiet;
static bool __efistub_global efi_novamap;
static bool __efistub_global efi_nosoftreserve;
static bool __efistub_global efi_disable_pci_dma =
IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+bool __pure nochunk(void)
+{
+ return efi_nochunk;
+}
bool __pure nokaslr(void)
{
return efi_nokaslr;
}
+bool __pure noinitrd(void)
+{
+ return efi_noinitrd;
+}
bool __pure is_quiet(void)
{
return efi_quiet;
@@ -53,13 +46,6 @@ bool __pure __efi_soft_reserve_enabled(void)
return !efi_nosoftreserve;
}
-#define EFI_MMAP_NR_SLACK_SLOTS 8
-
-struct file_info {
- efi_file_handle_t *handle;
- u64 size;
-};
-
void efi_printk(char *str)
{
char *s8;
@@ -77,369 +63,6 @@ void efi_printk(char *str)
}
}
-static inline bool mmap_has_headroom(unsigned long buff_size,
- unsigned long map_size,
- unsigned long desc_size)
-{
- unsigned long slack = buff_size - map_size;
-
- return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
-}
-
-efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
-{
- efi_memory_desc_t *m = NULL;
- efi_status_t status;
- unsigned long key;
- u32 desc_version;
-
- *map->desc_size = sizeof(*m);
- *map->map_size = *map->desc_size * 32;
- *map->buff_size = *map->map_size;
-again:
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
- *map->map_size, (void **)&m);
- if (status != EFI_SUCCESS)
- goto fail;
-
- *map->desc_size = 0;
- key = 0;
- status = efi_bs_call(get_memory_map, map->map_size, m,
- &key, map->desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL ||
- !mmap_has_headroom(*map->buff_size, *map->map_size,
- *map->desc_size)) {
- efi_bs_call(free_pool, m);
- /*
- * Make sure there is some entries of headroom so that the
- * buffer can be reused for a new map after allocations are
- * no longer permitted. Its unlikely that the map will grow to
- * exceed this headroom once we are ready to trigger
- * ExitBootServices()
- */
- *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
- *map->buff_size = *map->map_size;
- goto again;
- }
-
- if (status != EFI_SUCCESS)
- efi_bs_call(free_pool, m);
-
- if (map->key_ptr && status == EFI_SUCCESS)
- *map->key_ptr = key;
- if (map->desc_ver && status == EFI_SUCCESS)
- *map->desc_ver = desc_version;
-
-fail:
- *map->map = m;
- return status;
-}
-
-
-unsigned long get_dram_base(void)
-{
- efi_status_t status;
- unsigned long map_size, buff_size;
- unsigned long membase = EFI_ERROR;
- struct efi_memory_map map;
- efi_memory_desc_t *md;
- struct efi_boot_memmap boot_map;
-
- boot_map.map = (efi_memory_desc_t **)&map.map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &map.desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
- if (status != EFI_SUCCESS)
- return membase;
-
- map.map_end = map.map + map_size;
-
- for_each_efi_memory_desc_in_map(&map, md) {
- if (md->attribute & EFI_MEMORY_WB) {
- if (membase > md->phys_addr)
- membase = md->phys_addr;
- }
- }
-
- efi_bs_call(free_pool, map.map);
-
- return membase;
-}
-
-/*
- * Allocate at the highest possible address that is not above 'max'.
- */
-efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long max)
-{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *map;
- efi_status_t status;
- unsigned long nr_pages;
- u64 max_addr = 0;
- int i;
- struct efi_boot_memmap boot_map;
-
- boot_map.map = &map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
- if (status != EFI_SUCCESS)
- goto fail;
-
- /*
- * Enforce minimum alignment that EFI or Linux requires when
- * requesting a specific address. We are doing page-based (or
- * larger) allocations, and both the address and size must meet
- * alignment constraints.
- */
- if (align < EFI_ALLOC_ALIGN)
- align = EFI_ALLOC_ALIGN;
-
- size = round_up(size, EFI_ALLOC_ALIGN);
- nr_pages = size / EFI_PAGE_SIZE;
-again:
- for (i = 0; i < map_size / desc_size; i++) {
- efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
- u64 start, end;
-
- desc = efi_early_memdesc_ptr(m, desc_size, i);
- if (desc->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-
- if (efi_soft_reserve_enabled() &&
- (desc->attribute & EFI_MEMORY_SP))
- continue;
-
- if (desc->num_pages < nr_pages)
- continue;
-
- start = desc->phys_addr;
- end = start + desc->num_pages * EFI_PAGE_SIZE;
-
- if (end > max)
- end = max;
-
- if ((start + size) > end)
- continue;
-
- if (round_down(end - size, align) < start)
- continue;
-
- start = round_down(end - size, align);
-
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL.
- */
- if (start == 0x0)
- continue;
-
- if (start > max_addr)
- max_addr = start;
- }
-
- if (!max_addr)
- status = EFI_NOT_FOUND;
- else {
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &max_addr);
- if (status != EFI_SUCCESS) {
- max = max_addr;
- max_addr = 0;
- goto again;
- }
-
- *addr = max_addr;
- }
-
- efi_bs_call(free_pool, map);
-fail:
- return status;
-}
-
-/*
- * Allocate at the lowest possible address that is not below 'min'.
- */
-efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min)
-{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *map;
- efi_status_t status;
- unsigned long nr_pages;
- int i;
- struct efi_boot_memmap boot_map;
-
- boot_map.map = &map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
- if (status != EFI_SUCCESS)
- goto fail;
-
- /*
- * Enforce minimum alignment that EFI or Linux requires when
- * requesting a specific address. We are doing page-based (or
- * larger) allocations, and both the address and size must meet
- * alignment constraints.
- */
- if (align < EFI_ALLOC_ALIGN)
- align = EFI_ALLOC_ALIGN;
-
- size = round_up(size, EFI_ALLOC_ALIGN);
- nr_pages = size / EFI_PAGE_SIZE;
- for (i = 0; i < map_size / desc_size; i++) {
- efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
- u64 start, end;
-
- desc = efi_early_memdesc_ptr(m, desc_size, i);
-
- if (desc->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-
- if (efi_soft_reserve_enabled() &&
- (desc->attribute & EFI_MEMORY_SP))
- continue;
-
- if (desc->num_pages < nr_pages)
- continue;
-
- start = desc->phys_addr;
- end = start + desc->num_pages * EFI_PAGE_SIZE;
-
- if (start < min)
- start = min;
-
- start = round_up(start, align);
- if ((start + size) > end)
- continue;
-
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &start);
- if (status == EFI_SUCCESS) {
- *addr = start;
- break;
- }
- }
-
- if (i == map_size / desc_size)
- status = EFI_NOT_FOUND;
-
- efi_bs_call(free_pool, map);
-fail:
- return status;
-}
-
-void efi_free(unsigned long size, unsigned long addr)
-{
- unsigned long nr_pages;
-
- if (!size)
- return;
-
- nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- efi_bs_call(free_pages, addr, nr_pages);
-}
-
-static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16,
- void **handle, u64 *file_sz)
-{
- efi_file_handle_t *h, *fh = __fh;
- efi_file_info_t *info;
- efi_status_t status;
- efi_guid_t info_guid = EFI_FILE_INFO_ID;
- unsigned long info_sz;
-
- status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to open file: ");
- efi_char16_printk(filename_16);
- efi_printk("\n");
- return status;
- }
-
- *handle = h;
-
- info_sz = 0;
- status = h->get_info(h, &info_guid, &info_sz, NULL);
- if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk("Failed to get file info size\n");
- return status;
- }
-
-grow:
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz,
- (void **)&info);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to alloc mem for file info\n");
- return status;
- }
-
- status = h->get_info(h, &info_guid, &info_sz, info);
- if (status == EFI_BUFFER_TOO_SMALL) {
- efi_bs_call(free_pool, info);
- goto grow;
- }
-
- *file_sz = info->file_size;
- efi_bs_call(free_pool, info);
-
- if (status != EFI_SUCCESS)
- efi_printk("Failed to get initrd info\n");
-
- return status;
-}
-
-static efi_status_t efi_file_read(efi_file_handle_t *handle,
- unsigned long *size, void *addr)
-{
- return handle->read(handle, size, addr);
-}
-
-static efi_status_t efi_file_close(efi_file_handle_t *handle)
-{
- return handle->close(handle);
-}
-
-static efi_status_t efi_open_volume(efi_loaded_image_t *image,
- efi_file_handle_t **__fh)
-{
- efi_file_io_interface_t *io;
- efi_file_handle_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- efi_handle_t handle = image->device_handle;
-
- status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk("Failed to handle fs_proto\n");
- return status;
- }
-
- status = io->open_volume(io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk("Failed to open volume\n");
- else
- *__fh = fh;
-
- return status;
-}
-
/*
* Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
@@ -450,316 +73,42 @@ static efi_status_t efi_open_volume(efi_loaded_image_t *image,
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- char *str;
-
- str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- efi_nokaslr = true;
-
- str = strstr(cmdline, "quiet");
- if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- efi_quiet = true;
-
- /*
- * If no EFI parameters were specified on the cmdline we've got
- * nothing to do.
- */
- str = strstr(cmdline, "efi=");
- if (!str)
- return EFI_SUCCESS;
-
- /* Skip ahead to first argument */
- str += strlen("efi=");
-
- /*
- * Remember, because efi= is also used by the kernel we need to
- * skip over arguments we don't understand.
- */
- while (*str && *str != ' ') {
- if (!strncmp(str, "nochunk", 7)) {
- str += strlen("nochunk");
- efi_chunk_size = -1UL;
- }
-
- if (!strncmp(str, "novamap", 7)) {
- str += strlen("novamap");
- efi_novamap = true;
- }
-
- if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
- !strncmp(str, "nosoftreserve", 7)) {
- str += strlen("nosoftreserve");
- efi_nosoftreserve = true;
- }
-
- if (!strncmp(str, "disable_early_pci_dma", 21)) {
- str += strlen("disable_early_pci_dma");
- efi_disable_pci_dma = true;
- }
-
- if (!strncmp(str, "no_disable_early_pci_dma", 24)) {
- str += strlen("no_disable_early_pci_dma");
- efi_disable_pci_dma = false;
- }
-
- /* Group words together, delimited by "," */
- while (*str && *str != ' ' && *str != ',')
- str++;
-
- if (*str == ',')
- str++;
- }
-
- return EFI_SUCCESS;
-}
-
-/*
- * Check the cmdline for a LILO-style file= arguments.
- *
- * We only support loading a file from the same filesystem as
- * the kernel image.
- */
-efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
- char *cmd_line, char *option_string,
- unsigned long max_addr,
- unsigned long *load_addr,
- unsigned long *load_size)
-{
- struct file_info *files;
- unsigned long file_addr;
- u64 file_size_total;
- efi_file_handle_t *fh = NULL;
+ size_t len = strlen(cmdline) + 1;
efi_status_t status;
- int nr_files;
- char *str;
- int i, j, k;
-
- file_addr = 0;
- file_size_total = 0;
-
- str = cmd_line;
-
- j = 0; /* See close_handles */
-
- if (!load_addr || !load_size)
- return EFI_INVALID_PARAMETER;
-
- *load_addr = 0;
- *load_size = 0;
-
- if (!str || !*str)
- return EFI_SUCCESS;
-
- for (nr_files = 0; *str; nr_files++) {
- str = strstr(str, option_string);
- if (!str)
- break;
-
- str += strlen(option_string);
-
- /* Skip any leading slashes */
- while (*str == '/' || *str == '\\')
- str++;
-
- while (*str && *str != ' ' && *str != '\n')
- str++;
- }
-
- if (!nr_files)
- return EFI_SUCCESS;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
- nr_files * sizeof(*files), (void **)&files);
- if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to alloc mem for file handle list\n");
- goto fail;
- }
-
- str = cmd_line;
- for (i = 0; i < nr_files; i++) {
- struct file_info *file;
- efi_char16_t filename_16[256];
- efi_char16_t *p;
-
- str = strstr(str, option_string);
- if (!str)
- break;
-
- str += strlen(option_string);
-
- file = &files[i];
- p = filename_16;
-
- /* Skip any leading slashes */
- while (*str == '/' || *str == '\\')
- str++;
-
- while (*str && *str != ' ' && *str != '\n') {
- if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
- break;
-
- if (*str == '/') {
- *p++ = '\\';
- str++;
- } else {
- *p++ = *str++;
- }
- }
-
- *p = '\0';
+ char *str, *buf;
- /* Only open the volume once. */
- if (!i) {
- status = efi_open_volume(image, &fh);
- if (status != EFI_SUCCESS)
- goto free_files;
- }
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
+ if (status != EFI_SUCCESS)
+ return status;
- status = efi_file_size(fh, filename_16, (void **)&file->handle,
- &file->size);
- if (status != EFI_SUCCESS)
- goto close_handles;
+ str = skip_spaces(memcpy(buf, cmdline, len));
- file_size_total += file->size;
- }
+ while (*str) {
+ char *param, *val;
- if (file_size_total) {
- unsigned long addr;
+ str = next_arg(str, &param, &val);
- /*
- * Multiple files need to be at consecutive addresses in memory,
- * so allocate enough memory for all the files. This is used
- * for loading multiple files.
- */
- status = efi_high_alloc(file_size_total, 0x1000, &file_addr,
- max_addr);
- if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to alloc highmem for files\n");
- goto close_handles;
- }
+ if (!strcmp(param, "nokaslr")) {
+ efi_nokaslr = true;
+ } else if (!strcmp(param, "quiet")) {
+ efi_quiet = true;
+ } else if (!strcmp(param, "noinitrd")) {
+ efi_noinitrd = true;
+ } else if (!strcmp(param, "efi") && val) {
+ efi_nochunk = parse_option_str(val, "nochunk");
+ efi_novamap = parse_option_str(val, "novamap");
- /* We've run out of free low memory. */
- if (file_addr > max_addr) {
- pr_efi_err("We've run out of free low memory\n");
- status = EFI_INVALID_PARAMETER;
- goto free_file_total;
- }
+ efi_nosoftreserve = IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
+ parse_option_str(val, "nosoftreserve");
- addr = file_addr;
- for (j = 0; j < nr_files; j++) {
- unsigned long size;
-
- size = files[j].size;
- while (size) {
- unsigned long chunksize;
-
- if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size)
- chunksize = efi_chunk_size;
- else
- chunksize = size;
-
- status = efi_file_read(files[j].handle,
- &chunksize,
- (void *)addr);
- if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to read file\n");
- goto free_file_total;
- }
- addr += chunksize;
- size -= chunksize;
- }
-
- efi_file_close(files[j].handle);
+ if (parse_option_str(val, "disable_early_pci_dma"))
+ efi_disable_pci_dma = true;
+ if (parse_option_str(val, "no_disable_early_pci_dma"))
+ efi_disable_pci_dma = false;
}
-
- }
-
- efi_bs_call(free_pool, files);
-
- *load_addr = file_addr;
- *load_size = file_size_total;
-
- return status;
-
-free_file_total:
- efi_free(file_size_total, file_addr);
-
-close_handles:
- for (k = j; k < i; k++)
- efi_file_close(files[k].handle);
-free_files:
- efi_bs_call(free_pool, files);
-fail:
- *load_addr = 0;
- *load_size = 0;
-
- return status;
-}
-/*
- * Relocate a kernel image, either compressed or uncompressed.
- * In the ARM64 case, all kernel images are currently
- * uncompressed, and as such when we relocate it we need to
- * allocate additional space for the BSS segment. Any low
- * memory that this function should avoid needs to be
- * unavailable in the EFI memory map, as if the preferred
- * address is not available the lowest available address will
- * be used.
- */
-efi_status_t efi_relocate_kernel(unsigned long *image_addr,
- unsigned long image_size,
- unsigned long alloc_size,
- unsigned long preferred_addr,
- unsigned long alignment,
- unsigned long min_addr)
-{
- unsigned long cur_image_addr;
- unsigned long new_addr = 0;
- efi_status_t status;
- unsigned long nr_pages;
- efi_physical_addr_t efi_addr = preferred_addr;
-
- if (!image_addr || !image_size || !alloc_size)
- return EFI_INVALID_PARAMETER;
- if (alloc_size < image_size)
- return EFI_INVALID_PARAMETER;
-
- cur_image_addr = *image_addr;
-
- /*
- * The EFI firmware loader could have placed the kernel image
- * anywhere in memory, but the kernel has restrictions on the
- * max physical address it can run at. Some architectures
- * also have a prefered address, so first try to relocate
- * to the preferred address. If that fails, allocate as low
- * as possible while respecting the required alignment.
- */
- nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &efi_addr);
- new_addr = efi_addr;
- /*
- * If preferred address allocation failed allocate as low as
- * possible.
- */
- if (status != EFI_SUCCESS) {
- status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
- min_addr);
}
- if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate usable memory for kernel.\n");
- return status;
- }
-
- /*
- * We know source/dest won't overlap since both memory ranges
- * have been allocated by UEFI, so we can safely use memcpy.
- */
- memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
-
- /* Return the new address of the relocated image. */
- *image_addr = new_addr;
-
- return status;
+ efi_bs_call(free_pool, buf);
+ return EFI_SUCCESS;
}
/*
@@ -811,23 +160,19 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
return dst;
}
-#ifndef MAX_CMDLINE_ADDRESS
-#define MAX_CMDLINE_ADDRESS ULONG_MAX
-#endif
-
/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
char *efi_convert_cmdline(efi_loaded_image_t *image,
- int *cmd_line_len)
+ int *cmd_line_len, unsigned long max_addr)
{
const u16 *s2;
u8 *s1 = NULL;
unsigned long cmdline_addr = 0;
- int load_options_chars = image->load_options_size / 2; /* UTF-16 */
- const u16 *options = image->load_options;
+ int load_options_chars = efi_table_attr(image, load_options_size) / 2;
+ const u16 *options = efi_table_attr(image, load_options);
int options_bytes = 0; /* UTF-8 bytes */
int options_chars = 0; /* UTF-16 chars */
efi_status_t status;
@@ -849,8 +194,7 @@ char *efi_convert_cmdline(efi_loaded_image_t *image,
options_bytes++; /* NUL termination */
- status = efi_high_alloc(options_bytes, 0, &cmdline_addr,
- MAX_CMDLINE_ADDRESS);
+ status = efi_allocate_pages(options_bytes, &cmdline_addr, max_addr);
if (status != EFI_SUCCESS)
return NULL;
@@ -962,3 +306,89 @@ void efi_char16_printk(efi_char16_t *str)
efi_call_proto(efi_table_attr(efi_system_table(), con_out),
output_string, str);
}
+
+/*
+ * The LINUX_EFI_INITRD_MEDIA_GUID vendor media device path below provides a way
+ * for the firmware or bootloader to expose the initrd data directly to the stub
+ * via the trivial LoadFile2 protocol, which is defined in the UEFI spec, and is
+ * very easy to implement. It is a simple Linux initrd specific conduit between
+ * kernel and firmware, allowing us to put the EFI stub (being part of the
+ * kernel) in charge of where and when to load the initrd, while leaving it up
+ * to the firmware to decide whether it needs to expose its filesystem hierarchy
+ * via EFI protocols.
+ */
+static const struct {
+ struct efi_vendor_dev_path vendor;
+ struct efi_generic_dev_path end;
+} __packed initrd_dev_path = {
+ {
+ {
+ EFI_DEV_MEDIA,
+ EFI_DEV_MEDIA_VENDOR,
+ sizeof(struct efi_vendor_dev_path),
+ },
+ LINUX_EFI_INITRD_MEDIA_GUID
+ }, {
+ EFI_DEV_END_PATH,
+ EFI_DEV_END_ENTIRE,
+ sizeof(struct efi_generic_dev_path)
+ }
+};
+
+/**
+ * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path
+ * @load_addr: pointer to store the address where the initrd was loaded
+ * @load_size: pointer to store the size of the loaded initrd
+ * @max: upper limit for the initrd memory allocation
+ * @return: %EFI_SUCCESS if the initrd was loaded successfully, in which
+ * case @load_addr and @load_size are assigned accordingly
+ * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd
+ * device path
+ * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
+ * %EFI_OUT_OF_RESOURCES if memory allocation failed
+ * %EFI_LOAD_ERROR in all other cases
+ */
+efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long max)
+{
+ efi_guid_t lf2_proto_guid = EFI_LOAD_FILE2_PROTOCOL_GUID;
+ efi_device_path_protocol_t *dp;
+ efi_load_file2_protocol_t *lf2;
+ unsigned long initrd_addr;
+ unsigned long initrd_size;
+ efi_handle_t handle;
+ efi_status_t status;
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ dp = (efi_device_path_protocol_t *)&initrd_dev_path;
+ status = efi_bs_call(locate_device_path, &lf2_proto_guid, &dp, &handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(handle_protocol, handle, &lf2_proto_guid,
+ (void **)&lf2);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ status = efi_allocate_pages(initrd_size, &initrd_addr, max);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd_size,
+ (void *)initrd_addr);
+ if (status != EFI_SUCCESS) {
+ efi_free(initrd_size, initrd_addr);
+ return EFI_LOAD_ERROR;
+ }
+
+ *load_addr = initrd_addr;
+ *load_size = initrd_size;
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index c244b165005e..62943992f02f 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -25,13 +25,15 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
-#ifdef CONFIG_ARM
+#if defined(CONFIG_ARM) || defined(CONFIG_X86)
#define __efistub_global __section(.data)
#else
#define __efistub_global
#endif
+extern bool __pure nochunk(void);
extern bool __pure nokaslr(void);
+extern bool __pure noinitrd(void);
extern bool __pure is_quiet(void);
extern bool __pure novamap(void);
@@ -43,10 +45,562 @@ extern __pure efi_system_table_t *efi_system_table(void);
#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg)
-void efi_char16_printk(efi_char16_t *);
-void efi_char16_printk(efi_char16_t *);
+/* Helper macros for the usual case of using simple C variables: */
+#ifndef fdt_setprop_inplace_var
+#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
+ fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#ifndef fdt_setprop_var
+#define fdt_setprop_var(fdt, node_offset, name, var) \
+ fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#define get_efi_var(name, vendor, ...) \
+ efi_rt_call(get_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_rt_call(set_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define efi_get_handle_at(array, idx) \
+ (efi_is_native() ? (array)[idx] \
+ : (efi_handle_t)(unsigned long)((u32 *)(array))[idx])
+
+#define efi_get_handle_num(size) \
+ ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
+
+#define for_each_efi_handle(handle, array, size, i) \
+ for (i = 0; \
+ i < efi_get_handle_num(size) && \
+ ((handle = efi_get_handle_at((array), i)) || true); \
+ i++)
+
+/*
+ * Allocation types for calls to boottime->allocate_pages.
+ */
+#define EFI_ALLOCATE_ANY_PAGES 0
+#define EFI_ALLOCATE_MAX_ADDRESS 1
+#define EFI_ALLOCATE_ADDRESS 2
+#define EFI_MAX_ALLOCATE_TYPE 3
+
+/*
+ * The type of search to perform when calling boottime->locate_handle
+ */
+#define EFI_LOCATE_ALL_HANDLES 0
+#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
+#define EFI_LOCATE_BY_PROTOCOL 2
+
+/*
+ * An efi_boot_memmap is used by efi_get_memory_map() to return the
+ * EFI memory map in a dynamically allocated buffer.
+ *
+ * The buffer allocated for the EFI memory map includes extra room for
+ * a minimum of EFI_MMAP_NR_SLACK_SLOTS additional EFI memory descriptors.
+ * This facilitates the reuse of the EFI memory map buffer when a second
+ * call to ExitBootServices() is needed because of intervening changes to
+ * the EFI memory map. Other related structures, e.g. x86 e820ext, need
+ * to factor in this headroom requirement as well.
+ */
+#define EFI_MMAP_NR_SLACK_SLOTS 8
+
+struct efi_boot_memmap {
+ efi_memory_desc_t **map;
+ unsigned long *map_size;
+ unsigned long *desc_size;
+ u32 *desc_ver;
+ unsigned long *key_ptr;
+ unsigned long *buff_size;
+};
+
+typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+
+/*
+ * EFI Boot Services table
+ */
+union efi_boot_services {
+ struct {
+ efi_table_hdr_t hdr;
+ void *raise_tpl;
+ void *restore_tpl;
+ efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long,
+ efi_physical_addr_t *);
+ efi_status_t (__efiapi *free_pages)(efi_physical_addr_t,
+ unsigned long);
+ efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *,
+ unsigned long *,
+ unsigned long *, u32 *);
+ efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
+ void **);
+ efi_status_t (__efiapi *free_pool)(void *);
+ void *create_event;
+ void *set_timer;
+ void *wait_for_event;
+ void *signal_event;
+ void *close_event;
+ void *check_event;
+ void *install_protocol_interface;
+ void *reinstall_protocol_interface;
+ void *uninstall_protocol_interface;
+ efi_status_t (__efiapi *handle_protocol)(efi_handle_t,
+ efi_guid_t *, void **);
+ void *__reserved;
+ void *register_protocol_notify;
+ efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t *);
+ efi_status_t (__efiapi *locate_device_path)(efi_guid_t *,
+ efi_device_path_protocol_t **,
+ efi_handle_t *);
+ efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
+ void *);
+ void *load_image;
+ void *start_image;
+ efi_status_t __noreturn (__efiapi *exit)(efi_handle_t,
+ efi_status_t,
+ unsigned long,
+ efi_char16_t *);
+ void *unload_image;
+ efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
+ unsigned long);
+ void *get_next_monotonic_count;
+ void *stall;
+ void *set_watchdog_timer;
+ void *connect_controller;
+ efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
+ efi_handle_t,
+ efi_handle_t);
+ void *open_protocol;
+ void *close_protocol;
+ void *open_protocol_information;
+ void *protocols_per_handle;
+ void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
+ void **);
+ void *install_multiple_protocol_interfaces;
+ void *uninstall_multiple_protocol_interfaces;
+ void *calculate_crc32;
+ void *copy_mem;
+ void *set_mem;
+ void *create_event_ex;
+ };
+ struct {
+ efi_table_hdr_t hdr;
+ u32 raise_tpl;
+ u32 restore_tpl;
+ u32 allocate_pages;
+ u32 free_pages;
+ u32 get_memory_map;
+ u32 allocate_pool;
+ u32 free_pool;
+ u32 create_event;
+ u32 set_timer;
+ u32 wait_for_event;
+ u32 signal_event;
+ u32 close_event;
+ u32 check_event;
+ u32 install_protocol_interface;
+ u32 reinstall_protocol_interface;
+ u32 uninstall_protocol_interface;
+ u32 handle_protocol;
+ u32 __reserved;
+ u32 register_protocol_notify;
+ u32 locate_handle;
+ u32 locate_device_path;
+ u32 install_configuration_table;
+ u32 load_image;
+ u32 start_image;
+ u32 exit;
+ u32 unload_image;
+ u32 exit_boot_services;
+ u32 get_next_monotonic_count;
+ u32 stall;
+ u32 set_watchdog_timer;
+ u32 connect_controller;
+ u32 disconnect_controller;
+ u32 open_protocol;
+ u32 close_protocol;
+ u32 open_protocol_information;
+ u32 protocols_per_handle;
+ u32 locate_handle_buffer;
+ u32 locate_protocol;
+ u32 install_multiple_protocol_interfaces;
+ u32 uninstall_multiple_protocol_interfaces;
+ u32 calculate_crc32;
+ u32 copy_mem;
+ u32 set_mem;
+ u32 create_event_ex;
+ } mixed_mode;
+};
+
+typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
+
+union efi_uga_draw_protocol {
+ struct {
+ efi_status_t (__efiapi *get_mode)(efi_uga_draw_protocol_t *,
+ u32*, u32*, u32*, u32*);
+ void *set_mode;
+ void *blt;
+ };
+ struct {
+ u32 get_mode;
+ u32 set_mode;
+ u32 blt;
+ } mixed_mode;
+};
+
+union efi_simple_text_output_protocol {
+ struct {
+ void *reset;
+ efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *,
+ efi_char16_t *);
+ void *test_string;
+ };
+ struct {
+ u32 reset;
+ u32 output_string;
+ u32 test_string;
+ } mixed_mode;
+};
+
+#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
+#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
+#define PIXEL_BIT_MASK 2
+#define PIXEL_BLT_ONLY 3
+#define PIXEL_FORMAT_MAX 4
+
+typedef struct {
+ u32 red_mask;
+ u32 green_mask;
+ u32 blue_mask;
+ u32 reserved_mask;
+} efi_pixel_bitmask_t;
+
+typedef struct {
+ u32 version;
+ u32 horizontal_resolution;
+ u32 vertical_resolution;
+ int pixel_format;
+ efi_pixel_bitmask_t pixel_information;
+ u32 pixels_per_scan_line;
+} efi_graphics_output_mode_info_t;
+
+typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t;
+
+union efi_graphics_output_protocol_mode {
+ struct {
+ u32 max_mode;
+ u32 mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long size_of_info;
+ efi_physical_addr_t frame_buffer_base;
+ unsigned long frame_buffer_size;
+ };
+ struct {
+ u32 max_mode;
+ u32 mode;
+ u32 info;
+ u32 size_of_info;
+ u64 frame_buffer_base;
+ u32 frame_buffer_size;
+ } mixed_mode;
+};
+
+typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
+
+union efi_graphics_output_protocol {
+ struct {
+ void *query_mode;
+ void *set_mode;
+ void *blt;
+ efi_graphics_output_protocol_mode_t *mode;
+ };
+ struct {
+ u32 query_mode;
+ u32 set_mode;
+ u32 blt;
+ u32 mode;
+ } mixed_mode;
+};
+
+typedef union {
+ struct {
+ u32 revision;
+ efi_handle_t parent_handle;
+ efi_system_table_t *system_table;
+ efi_handle_t device_handle;
+ void *file_path;
+ void *reserved;
+ u32 load_options_size;
+ void *load_options;
+ void *image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ efi_status_t (__efiapi *unload)(efi_handle_t image_handle);
+ };
+ struct {
+ u32 revision;
+ u32 parent_handle;
+ u32 system_table;
+ u32 device_handle;
+ u32 file_path;
+ u32 reserved;
+ u32 load_options_size;
+ u32 load_options;
+ u32 image_base;
+ __aligned_u64 image_size;
+ u32 image_code_type;
+ u32 image_data_type;
+ u32 unload;
+ } mixed_mode;
+} efi_loaded_image_t;
+
+typedef struct {
+ u64 size;
+ u64 file_size;
+ u64 phys_size;
+ efi_time_t create_time;
+ efi_time_t last_access_time;
+ efi_time_t modification_time;
+ __aligned_u64 attribute;
+ efi_char16_t filename[];
+} efi_file_info_t;
+
+typedef struct efi_file_protocol efi_file_protocol_t;
-unsigned long get_dram_base(void);
+struct efi_file_protocol {
+ u64 revision;
+ efi_status_t (__efiapi *open) (efi_file_protocol_t *,
+ efi_file_protocol_t **,
+ efi_char16_t *, u64, u64);
+ efi_status_t (__efiapi *close) (efi_file_protocol_t *);
+ efi_status_t (__efiapi *delete) (efi_file_protocol_t *);
+ efi_status_t (__efiapi *read) (efi_file_protocol_t *,
+ unsigned long *, void *);
+ efi_status_t (__efiapi *write) (efi_file_protocol_t *,
+ unsigned long, void *);
+ efi_status_t (__efiapi *get_position)(efi_file_protocol_t *, u64 *);
+ efi_status_t (__efiapi *set_position)(efi_file_protocol_t *, u64);
+ efi_status_t (__efiapi *get_info) (efi_file_protocol_t *,
+ efi_guid_t *, unsigned long *,
+ void *);
+ efi_status_t (__efiapi *set_info) (efi_file_protocol_t *,
+ efi_guid_t *, unsigned long,
+ void *);
+ efi_status_t (__efiapi *flush) (efi_file_protocol_t *);
+};
+
+typedef struct efi_simple_file_system_protocol efi_simple_file_system_protocol_t;
+
+struct efi_simple_file_system_protocol {
+ u64 revision;
+ int (__efiapi *open_volume)(efi_simple_file_system_protocol_t *,
+ efi_file_protocol_t **);
+};
+
+#define EFI_FILE_MODE_READ 0x0000000000000001
+#define EFI_FILE_MODE_WRITE 0x0000000000000002
+#define EFI_FILE_MODE_CREATE 0x8000000000000000
+
+typedef enum {
+ EfiPciIoWidthUint8,
+ EfiPciIoWidthUint16,
+ EfiPciIoWidthUint32,
+ EfiPciIoWidthUint64,
+ EfiPciIoWidthFifoUint8,
+ EfiPciIoWidthFifoUint16,
+ EfiPciIoWidthFifoUint32,
+ EfiPciIoWidthFifoUint64,
+ EfiPciIoWidthFillUint8,
+ EfiPciIoWidthFillUint16,
+ EfiPciIoWidthFillUint32,
+ EfiPciIoWidthFillUint64,
+ EfiPciIoWidthMaximum
+} EFI_PCI_IO_PROTOCOL_WIDTH;
+
+typedef enum {
+ EfiPciIoAttributeOperationGet,
+ EfiPciIoAttributeOperationSet,
+ EfiPciIoAttributeOperationEnable,
+ EfiPciIoAttributeOperationDisable,
+ EfiPciIoAttributeOperationSupported,
+ EfiPciIoAttributeOperationMaximum
+} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
+
+typedef struct {
+ u32 read;
+ u32 write;
+} efi_pci_io_protocol_access_32_t;
+
+typedef union efi_pci_io_protocol efi_pci_io_protocol_t;
+
+typedef
+efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *,
+ EFI_PCI_IO_PROTOCOL_WIDTH,
+ u32 offset,
+ unsigned long count,
+ void *buffer);
+
+typedef struct {
+ void *read;
+ void *write;
+} efi_pci_io_protocol_access_t;
+
+typedef struct {
+ efi_pci_io_protocol_cfg_t read;
+ efi_pci_io_protocol_cfg_t write;
+} efi_pci_io_protocol_config_access_t;
+
+union efi_pci_io_protocol {
+ struct {
+ void *poll_mem;
+ void *poll_io;
+ efi_pci_io_protocol_access_t mem;
+ efi_pci_io_protocol_access_t io;
+ efi_pci_io_protocol_config_access_t pci;
+ void *copy_mem;
+ void *map;
+ void *unmap;
+ void *allocate_buffer;
+ void *free_buffer;
+ void *flush;
+ efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *,
+ unsigned long *segment_nr,
+ unsigned long *bus_nr,
+ unsigned long *device_nr,
+ unsigned long *func_nr);
+ void *attributes;
+ void *get_bar_attributes;
+ void *set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+ };
+ struct {
+ u32 poll_mem;
+ u32 poll_io;
+ efi_pci_io_protocol_access_32_t mem;
+ efi_pci_io_protocol_access_32_t io;
+ efi_pci_io_protocol_access_32_t pci;
+ u32 copy_mem;
+ u32 map;
+ u32 unmap;
+ u32 allocate_buffer;
+ u32 free_buffer;
+ u32 flush;
+ u32 get_location;
+ u32 attributes;
+ u32 get_bar_attributes;
+ u32 set_bar_attributes;
+ u64 romsize;
+ u32 romimage;
+ } mixed_mode;
+};
+
+#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
+#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
+#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
+#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
+#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
+#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
+#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+
+struct efi_dev_path;
+
+typedef union apple_properties_protocol apple_properties_protocol_t;
+
+union apple_properties_protocol {
+ struct {
+ unsigned long version;
+ efi_status_t (__efiapi *get)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *, void *, u32 *);
+ efi_status_t (__efiapi *set)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *, void *, u32);
+ efi_status_t (__efiapi *del)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *);
+ efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *,
+ void *buffer, u32 *);
+ };
+ struct {
+ u32 version;
+ u32 get;
+ u32 set;
+ u32 del;
+ u32 get_all;
+ } mixed_mode;
+};
+
+typedef u32 efi_tcg2_event_log_format;
+
+typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
+
+union efi_tcg2_protocol {
+ struct {
+ void *get_capability;
+ efi_status_t (__efiapi *get_event_log)(efi_handle_t,
+ efi_tcg2_event_log_format,
+ efi_physical_addr_t *,
+ efi_physical_addr_t *,
+ efi_bool_t *);
+ void *hash_log_extend_event;
+ void *submit_command;
+ void *get_active_pcr_banks;
+ void *set_active_pcr_banks;
+ void *get_result_of_set_active_pcr_banks;
+ };
+ struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 submit_command;
+ u32 get_active_pcr_banks;
+ u32 set_active_pcr_banks;
+ u32 get_result_of_set_active_pcr_banks;
+ } mixed_mode;
+};
+
+typedef union efi_load_file_protocol efi_load_file_protocol_t;
+typedef union efi_load_file_protocol efi_load_file2_protocol_t;
+
+union efi_load_file_protocol {
+ struct {
+ efi_status_t (__efiapi *load_file)(efi_load_file_protocol_t *,
+ efi_device_path_protocol_t *,
+ bool, unsigned long *, void *);
+ };
+ struct {
+ u32 load_file;
+ } mixed_mode;
+};
+
+void efi_pci_disable_bridge_busmaster(void);
+
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func);
+
+void efi_char16_printk(efi_char16_t *);
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
@@ -71,23 +625,57 @@ efi_status_t check_platform_features(void);
void *get_efi_config_table(efi_guid_t guid);
-/* Helper macros for the usual case of using simple C variables: */
-#ifndef fdt_setprop_inplace_var
-#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
- fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var))
-#endif
+void efi_printk(char *str);
-#ifndef fdt_setprop_var
-#define fdt_setprop_var(fdt, node_offset, name, var) \
- fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
-#endif
+void efi_free(unsigned long size, unsigned long addr);
-#define get_efi_var(name, vendor, ...) \
- efi_rt_call(get_variable, (efi_char16_t *)(name), \
- (efi_guid_t *)(vendor), __VA_ARGS__)
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len,
+ unsigned long max_addr);
-#define set_efi_var(name, vendor, ...) \
- efi_rt_call(set_variable, (efi_char16_t *)(name), \
- (efi_guid_t *)(vendor), __VA_ARGS__)
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
+
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min);
+
+static inline
+efi_status_t efi_low_alloc(unsigned long size, unsigned long align,
+ unsigned long *addr)
+{
+ /*
+ * Don't allocate at 0x0. It will confuse code that
+ * checks pointers against NULL. Skip the first 8
+ * bytes so we start at a nice even number.
+ */
+ return efi_low_alloc_above(size, align, addr, 0x8);
+}
+
+efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ unsigned long max);
+
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr);
+
+efi_status_t efi_parse_options(char const *cmdline);
+
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size);
+
+efi_status_t efi_load_dtb(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size);
+
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit);
+
+efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long max);
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0a91e5232127..46cffac7a5f1 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -199,10 +199,6 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
return EFI_SUCCESS;
}
-#ifndef EFI_FDT_ALIGN
-# define EFI_FDT_ALIGN EFI_PAGE_SIZE
-#endif
-
struct exit_boot_struct {
efi_memory_desc_t *runtime_map;
int *runtime_entry_count;
@@ -281,8 +277,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
pr_efi("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
- status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN,
- new_fdt_addr, max_addr);
+ status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
pr_efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
new file mode 100644
index 000000000000..ea66b1f16a79
--- /dev/null
+++ b/drivers/firmware/efi/libstub/file.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#define MAX_FILENAME_SIZE 256
+
+/*
+ * Some firmware implementations have problems reading files in one go.
+ * A read chunk size of 1MB seems to work for most platforms.
+ *
+ * Unfortunately, reading files in chunks triggers *other* bugs on some
+ * platforms, so we provide a way to disable this workaround, which can
+ * be done by passing "efi=nochunk" on the EFI boot stub command line.
+ *
+ * If you experience issues with initrd images being corrupt it's worth
+ * trying efi=nochunk, but chunking is enabled by default on x86 because
+ * there are far more machines that require the workaround than those that
+ * break with it enabled.
+ */
+#define EFI_READ_CHUNK_SIZE SZ_1M
+
+struct finfo {
+ efi_file_info_t info;
+ efi_char16_t filename[MAX_FILENAME_SIZE];
+};
+
+static efi_status_t efi_open_file(efi_file_protocol_t *volume,
+ struct finfo *fi,
+ efi_file_protocol_t **handle,
+ unsigned long *file_size)
+{
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ efi_file_protocol_t *fh;
+ unsigned long info_sz;
+ efi_status_t status;
+
+ status = volume->open(volume, &fh, fi->filename, EFI_FILE_MODE_READ, 0);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to open file: ");
+ efi_char16_printk(fi->filename);
+ efi_printk("\n");
+ return status;
+ }
+
+ info_sz = sizeof(struct finfo);
+ status = fh->get_info(fh, &info_guid, &info_sz, fi);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to get file info\n");
+ fh->close(fh);
+ return status;
+ }
+
+ *handle = fh;
+ *file_size = fi->info.file_size;
+ return EFI_SUCCESS;
+}
+
+static efi_status_t efi_open_volume(efi_loaded_image_t *image,
+ efi_file_protocol_t **fh)
+{
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_simple_file_system_protocol_t *io;
+ efi_status_t status;
+
+ status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto,
+ (void **)&io);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to handle fs_proto\n");
+ return status;
+ }
+
+ status = io->open_volume(io, fh);
+ if (status != EFI_SUCCESS)
+ pr_efi_err("Failed to open volume\n");
+
+ return status;
+}
+
+static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
+ const efi_char16_t *prefix, int prefix_size,
+ efi_char16_t *result, int result_len)
+{
+ int prefix_len = prefix_size / 2;
+ bool found = false;
+ int i;
+
+ for (i = prefix_len; i < cmdline_len; i++) {
+ if (!memcmp(&cmdline[i - prefix_len], prefix, prefix_size)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ while (--result_len > 0 && i < cmdline_len) {
+ if (cmdline[i] == L'\0' ||
+ cmdline[i] == L'\n' ||
+ cmdline[i] == L' ')
+ break;
+ *result++ = cmdline[i++];
+ }
+ *result = L'\0';
+ return i;
+}
+
+/*
+ * Check the cmdline for a LILO-style file= arguments.
+ *
+ * We only support loading a file from the same filesystem as
+ * the kernel image.
+ */
+static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ const efi_char16_t *cmdline = image->load_options;
+ int cmdline_len = image->load_options_size / 2;
+ unsigned long efi_chunk_size = ULONG_MAX;
+ efi_file_protocol_t *volume = NULL;
+ efi_file_protocol_t *file;
+ unsigned long alloc_addr;
+ unsigned long alloc_size;
+ efi_status_t status;
+ int offset;
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ if (IS_ENABLED(CONFIG_X86) && !nochunk())
+ efi_chunk_size = EFI_READ_CHUNK_SIZE;
+
+ alloc_addr = alloc_size = 0;
+ do {
+ struct finfo fi;
+ unsigned long size;
+ void *addr;
+
+ offset = find_file_option(cmdline, cmdline_len,
+ optstr, optstr_size,
+ fi.filename, ARRAY_SIZE(fi.filename));
+
+ if (!offset)
+ break;
+
+ cmdline += offset;
+ cmdline_len -= offset;
+
+ if (!volume) {
+ status = efi_open_volume(image, &volume);
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+
+ status = efi_open_file(volume, &fi, &file, &size);
+ if (status != EFI_SUCCESS)
+ goto err_close_volume;
+
+ /*
+ * Check whether the existing allocation can contain the next
+ * file. This condition will also trigger naturally during the
+ * first (and typically only) iteration of the loop, given that
+ * alloc_size == 0 in that case.
+ */
+ if (round_up(alloc_size + size, EFI_ALLOC_ALIGN) >
+ round_up(alloc_size, EFI_ALLOC_ALIGN)) {
+ unsigned long old_addr = alloc_addr;
+
+ status = EFI_OUT_OF_RESOURCES;
+ if (soft_limit < hard_limit)
+ status = efi_allocate_pages(alloc_size + size,
+ &alloc_addr,
+ soft_limit);
+ if (status == EFI_OUT_OF_RESOURCES)
+ status = efi_allocate_pages(alloc_size + size,
+ &alloc_addr,
+ hard_limit);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to allocate memory for files\n");
+ goto err_close_file;
+ }
+
+ if (old_addr != 0) {
+ /*
+ * This is not the first time we've gone
+ * around this loop, and so we are loading
+ * multiple files that need to be concatenated
+ * and returned in a single buffer.
+ */
+ memcpy((void *)alloc_addr, (void *)old_addr, alloc_size);
+ efi_free(alloc_size, old_addr);
+ }
+ }
+
+ addr = (void *)alloc_addr + alloc_size;
+ alloc_size += size;
+
+ while (size) {
+ unsigned long chunksize = min(size, efi_chunk_size);
+
+ status = file->read(file, &chunksize, addr);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to read file\n");
+ goto err_close_file;
+ }
+ addr += chunksize;
+ size -= chunksize;
+ }
+ file->close(file);
+ } while (offset > 0);
+
+ *load_addr = alloc_addr;
+ *load_size = alloc_size;
+
+ if (volume)
+ volume->close(volume);
+ return EFI_SUCCESS;
+
+err_close_file:
+ file->close(file);
+
+err_close_volume:
+ volume->close(volume);
+ efi_free(alloc_size, alloc_addr);
+ return status;
+}
+
+efi_status_t efi_load_dtb(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2,
+ ULONG_MAX, ULONG_MAX, load_addr, load_size);
+}
+
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
+ soft_limit, hard_limit, load_addr, load_size);
+}
diff --git a/drivers/firmware/efi/libstub/hidden.h b/drivers/firmware/efi/libstub/hidden.h
new file mode 100644
index 000000000000..3493b041f419
--- /dev/null
+++ b/drivers/firmware/efi/libstub/hidden.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * To prevent the compiler from emitting GOT-indirected (and thus absolute)
+ * references to any global symbols, override their visibility as 'hidden'
+ */
+#pragma GCC visibility push(hidden)
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
new file mode 100644
index 000000000000..09f4fa01914e
--- /dev/null
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+static inline bool mmap_has_headroom(unsigned long buff_size,
+ unsigned long map_size,
+ unsigned long desc_size)
+{
+ unsigned long slack = buff_size - map_size;
+
+ return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
+/**
+ * efi_get_memory_map() - get memory map
+ * @map: on return pointer to memory map
+ *
+ * Retrieve the UEFI memory map. The allocated memory leaves room for
+ * up to EFI_MMAP_NR_SLACK_SLOTS additional memory map entries.
+ *
+ * Return: status code
+ */
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
+{
+ efi_memory_desc_t *m = NULL;
+ efi_status_t status;
+ unsigned long key;
+ u32 desc_version;
+
+ *map->desc_size = sizeof(*m);
+ *map->map_size = *map->desc_size * 32;
+ *map->buff_size = *map->map_size;
+again:
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ *map->map_size, (void **)&m);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ *map->desc_size = 0;
+ key = 0;
+ status = efi_bs_call(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
+ if (status == EFI_BUFFER_TOO_SMALL ||
+ !mmap_has_headroom(*map->buff_size, *map->map_size,
+ *map->desc_size)) {
+ efi_bs_call(free_pool, m);
+ /*
+ * Make sure there is some entries of headroom so that the
+ * buffer can be reused for a new map after allocations are
+ * no longer permitted. Its unlikely that the map will grow to
+ * exceed this headroom once we are ready to trigger
+ * ExitBootServices()
+ */
+ *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ *map->buff_size = *map->map_size;
+ goto again;
+ }
+
+ if (status == EFI_SUCCESS) {
+ if (map->key_ptr)
+ *map->key_ptr = key;
+ if (map->desc_ver)
+ *map->desc_ver = desc_version;
+ } else {
+ efi_bs_call(free_pool, m);
+ }
+
+fail:
+ *map->map = m;
+ return status;
+}
+
+/**
+ * efi_allocate_pages() - Allocate memory pages
+ * @size: minimum number of bytes to allocate
+ * @addr: On return the address of the first allocated page. The first
+ * allocated page has alignment EFI_ALLOC_ALIGN which is an
+ * architecture dependent multiple of the page size.
+ * @max: the address that the last allocated memory page shall not
+ * exceed
+ *
+ * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
+ * to EFI_ALLOC_ALIGN. The last allocated page will not exceed the address
+ * given by @max.
+ *
+ * Return: status code
+ */
+efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ unsigned long max)
+{
+ efi_physical_addr_t alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
+ int slack = EFI_ALLOC_ALIGN / EFI_PAGE_SIZE - 1;
+ efi_status_t status;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *addr = ALIGN((unsigned long)alloc_addr, EFI_ALLOC_ALIGN);
+
+ if (slack > 0) {
+ int l = (alloc_addr % EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+
+ if (l) {
+ efi_bs_call(free_pages, alloc_addr, slack - l + 1);
+ slack = l - 1;
+ }
+ if (slack)
+ efi_bs_call(free_pages, *addr + size, slack);
+ }
+ return EFI_SUCCESS;
+}
+/**
+ * efi_low_alloc_above() - allocate pages at or above given address
+ * @size: size of the memory area to allocate
+ * @align: minimum alignment of the allocated memory area. It should
+ * a power of two.
+ * @addr: on exit the address of the allocated memory
+ * @min: minimum address to used for the memory allocation
+ *
+ * Allocate at the lowest possible address that is not below @min as
+ * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at
+ * least EFI_ALLOC_ALIGN. The first allocated page will not below the address
+ * given by @min.
+ *
+ * Return: status code
+ */
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
+{
+ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
+ */
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
+
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+ if (start < min)
+ start = min;
+
+ start = round_up(start, align);
+ if ((start + size) > end)
+ continue;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
+ if (status == EFI_SUCCESS) {
+ *addr = start;
+ break;
+ }
+ }
+
+ if (i == map_size / desc_size)
+ status = EFI_NOT_FOUND;
+
+ efi_bs_call(free_pool, map);
+fail:
+ return status;
+}
+
+/**
+ * efi_free() - free memory pages
+ * @size: size of the memory area to free in bytes
+ * @addr: start of the memory area to free (must be EFI_PAGE_SIZE
+ * aligned)
+ *
+ * @size is rounded up to a multiple of EFI_ALLOC_ALIGN which is an
+ * architecture specific multiple of EFI_PAGE_SIZE. So this function should
+ * only be used to return pages allocated with efi_allocate_pages() or
+ * efi_low_alloc_above().
+ */
+void efi_free(unsigned long size, unsigned long addr)
+{
+ unsigned long nr_pages;
+
+ if (!size)
+ return;
+
+ nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ efi_bs_call(free_pages, addr, nr_pages);
+}
+
+/**
+ * efi_relocate_kernel() - copy memory area
+ * @image_addr: pointer to address of memory area to copy
+ * @image_size: size of memory area to copy
+ * @alloc_size: minimum size of memory to allocate, must be greater or
+ * equal to image_size
+ * @preferred_addr: preferred target address
+ * @alignment: minimum alignment of the allocated memory area. It
+ * should be a power of two.
+ * @min_addr: minimum target address
+ *
+ * Copy a memory area to a newly allocated memory area aligned according
+ * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address
+ * is not available, the allocated address will not be below @min_addr.
+ * On exit, @image_addr is updated to the target copy address that was used.
+ *
+ * This function is used to copy the Linux kernel verbatim. It does not apply
+ * any relocation changes.
+ *
+ * Return: status code
+ */
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr)
+{
+ unsigned long cur_image_addr;
+ unsigned long new_addr = 0;
+ efi_status_t status;
+ unsigned long nr_pages;
+ efi_physical_addr_t efi_addr = preferred_addr;
+
+ if (!image_addr || !image_size || !alloc_size)
+ return EFI_INVALID_PARAMETER;
+ if (alloc_size < image_size)
+ return EFI_INVALID_PARAMETER;
+
+ cur_image_addr = *image_addr;
+
+ /*
+ * The EFI firmware loader could have placed the kernel image
+ * anywhere in memory, but the kernel has restrictions on the
+ * max physical address it can run at. Some architectures
+ * also have a prefered address, so first try to relocate
+ * to the preferred address. If that fails, allocate as low
+ * as possible while respecting the required alignment.
+ */
+ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
+ new_addr = efi_addr;
+ /*
+ * If preferred address allocation failed allocate as low as
+ * possible.
+ */
+ if (status != EFI_SUCCESS) {
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
+ }
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to allocate usable memory for kernel.\n");
+ return status;
+ }
+
+ /*
+ * We know source/dest won't overlap since both memory ranges
+ * have been allocated by UEFI, so we can safely use memcpy.
+ */
+ memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+ /* Return the new address of the relocated image. */
+ *image_addr = new_addr;
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 316ce9ff0193..24aa37535372 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -4,7 +4,6 @@
*/
#include <linux/efi.h>
-#include <linux/log2.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -26,6 +25,17 @@ union efi_rng_protocol {
} mixed_mode;
};
+/**
+ * efi_get_random_bytes() - fill a buffer with random bytes
+ * @size: size of the buffer
+ * @out: caller allocated buffer to receive the random bytes
+ *
+ * The call will fail if either the firmware does not implement the
+ * EFI_RNG_PROTOCOL or there are not enough random bytes available to fill
+ * the buffer.
+ *
+ * Return: status code
+ */
efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
@@ -39,119 +49,19 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
return efi_call_proto(rng, get_rng, NULL, size, out);
}
-/*
- * Return the number of slots covered by this entry, i.e., the number of
- * addresses it covers that are suitably aligned and supply enough room
- * for the allocation.
+/**
+ * efi_random_get_seed() - provide random seed as configuration table
+ *
+ * The EFI_RNG_PROTOCOL is used to read random bytes. These random bytes are
+ * saved as a configuration table which can be used as entropy by the kernel
+ * for the initialization of its pseudo random number generator.
+ *
+ * If the EFI_RNG_PROTOCOL is not available or there are not enough random bytes
+ * available, the configuration table will not be installed and an error code
+ * will be returned.
+ *
+ * Return: status code
*/
-static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
- unsigned long size,
- unsigned long align_shift)
-{
- unsigned long align = 1UL << align_shift;
- u64 first_slot, last_slot, region_end;
-
- if (md->type != EFI_CONVENTIONAL_MEMORY)
- return 0;
-
- if (efi_soft_reserve_enabled() &&
- (md->attribute & EFI_MEMORY_SP))
- return 0;
-
- region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
-
- first_slot = round_up(md->phys_addr, align);
- last_slot = round_down(region_end - size + 1, align);
-
- if (first_slot > last_slot)
- return 0;
-
- return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
-}
-
-/*
- * The UEFI memory descriptors have a virtual address field that is only used
- * when installing the virtual mapping using SetVirtualAddressMap(). Since it
- * is unused here, we can reuse it to keep track of each descriptor's slot
- * count.
- */
-#define MD_NUM_SLOTS(md) ((md)->virt_addr)
-
-efi_status_t efi_random_alloc(unsigned long size,
- unsigned long align,
- unsigned long *addr,
- unsigned long random_seed)
-{
- unsigned long map_size, desc_size, total_slots = 0, target_slot;
- unsigned long buff_size;
- efi_status_t status;
- efi_memory_desc_t *memory_map;
- int map_offset;
- struct efi_boot_memmap map;
-
- map.map = &memory_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = NULL;
- map.key_ptr = NULL;
- map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&map);
- if (status != EFI_SUCCESS)
- return status;
-
- if (align < EFI_ALLOC_ALIGN)
- align = EFI_ALLOC_ALIGN;
-
- /* count the suitable slots in each memory map entry */
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
- unsigned long slots;
-
- slots = get_entry_num_slots(md, size, ilog2(align));
- MD_NUM_SLOTS(md) = slots;
- total_slots += slots;
- }
-
- /* find a random number between 0 and total_slots */
- target_slot = (total_slots * (u16)random_seed) >> 16;
-
- /*
- * target_slot is now a value in the range [0, total_slots), and so
- * it corresponds with exactly one of the suitable slots we recorded
- * when iterating over the memory map the first time around.
- *
- * So iterate over the memory map again, subtracting the number of
- * slots of each entry at each iteration, until we have found the entry
- * that covers our chosen slot. Use the residual value of target_slot
- * to calculate the randomly chosen address, and allocate it directly
- * using EFI_ALLOCATE_ADDRESS.
- */
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
- efi_physical_addr_t target;
- unsigned long pages;
-
- if (target_slot >= MD_NUM_SLOTS(md)) {
- target_slot -= MD_NUM_SLOTS(md);
- continue;
- }
-
- target = round_up(md->phys_addr, align) + target_slot * align;
- pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
-
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, pages, &target);
- if (status == EFI_SUCCESS)
- *addr = target;
- break;
- }
-
- efi_bs_call(free_pool, memory_map);
-
- return status;
-}
-
efi_status_t efi_random_get_seed(void)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
new file mode 100644
index 000000000000..4578f59e160c
--- /dev/null
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/efi.h>
+#include <linux/log2.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align_shift)
+{
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ return 0;
+
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return 0;
+
+ region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+ (u64)ULONG_MAX);
+
+ first_slot = round_up(md->phys_addr, align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
+ return 0;
+
+ return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md) ((md)->virt_addr)
+
+efi_status_t efi_random_alloc(unsigned long size,
+ unsigned long align,
+ unsigned long *addr,
+ unsigned long random_seed)
+{
+ unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long buff_size;
+ efi_status_t status;
+ efi_memory_desc_t *memory_map;
+ int map_offset;
+ struct efi_boot_memmap map;
+
+ map.map = &memory_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = NULL;
+ map.key_ptr = NULL;
+ map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&map);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ /* count the suitable slots in each memory map entry */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ unsigned long slots;
+
+ slots = get_entry_num_slots(md, size, ilog2(align));
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ }
+
+ /* find a random number between 0 and total_slots */
+ target_slot = (total_slots * (u16)random_seed) >> 16;
+
+ /*
+ * target_slot is now a value in the range [0, total_slots), and so
+ * it corresponds with exactly one of the suitable slots we recorded
+ * when iterating over the memory map the first time around.
+ *
+ * So iterate over the memory map again, subtracting the number of
+ * slots of each entry at each iteration, until we have found the entry
+ * that covers our chosen slot. Use the residual value of target_slot
+ * to calculate the randomly chosen address, and allocate it directly
+ * using EFI_ALLOCATE_ADDRESS.
+ */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ efi_physical_addr_t target;
+ unsigned long pages;
+
+ if (target_slot >= MD_NUM_SLOTS(md)) {
+ target_slot -= MD_NUM_SLOTS(md);
+ continue;
+ }
+
+ target = round_up(md->phys_addr, align) + target_slot * align;
+ pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
+ if (status == EFI_SUCCESS)
+ *addr = target;
+ break;
+ }
+
+ efi_bs_call(free_pool, memory_map);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/skip_spaces.c b/drivers/firmware/efi/libstub/skip_spaces.c
new file mode 100644
index 000000000000..a700b3c7f7d0
--- /dev/null
+++ b/drivers/firmware/efi/libstub/skip_spaces.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ctype.h>
+#include <linux/types.h>
+
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c
index ed10e3f602c5..1ac2f8764715 100644
--- a/drivers/firmware/efi/libstub/string.c
+++ b/drivers/firmware/efi/libstub/string.c
@@ -6,6 +6,7 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
+#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -56,3 +57,58 @@ int strncmp(const char *cs, const char *ct, size_t count)
return 0;
}
#endif
+
+/* Works only for digits and letters, but small and fast */
+#define TOLOWER(x) ((x) | 0x20)
+
+static unsigned int simple_guess_base(const char *cp)
+{
+ if (cp[0] == '0') {
+ if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
+ return 16;
+ else
+ return 8;
+ } else {
+ return 10;
+ }
+}
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+{
+ unsigned long long result = 0;
+
+ if (!base)
+ base = simple_guess_base(cp);
+
+ if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+ cp += 2;
+
+ while (isxdigit(*cp)) {
+ unsigned int value;
+
+ value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+ if (value >= base)
+ break;
+ result = result * base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+ if (*cp == '-')
+ return -simple_strtoull(cp + 1, endp, base);
+
+ return simple_strtoull(cp, endp, base);
+}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 1d59e103a2e3..e9a684637b70 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -54,7 +54,7 @@ void efi_retrieve_tpm2_eventlog(void)
efi_status_t status;
efi_physical_addr_t log_location = 0, log_last_entry = 0;
struct linux_efi_tpm_eventlog *log_tbl = NULL;
- struct efi_tcg2_final_events_table *final_events_table;
+ struct efi_tcg2_final_events_table *final_events_table = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
efi_bool_t truncated;
@@ -127,7 +127,8 @@ void efi_retrieve_tpm2_eventlog(void)
* Figure out whether any events have already been logged to the
* final events structure, and if so how much space they take up
*/
- final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
int offset;
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
new file mode 100644
index 000000000000..f0339b5d3658
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* -----------------------------------------------------------------------
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+#include <asm/efi.h>
+#include <asm/e820/types.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+#include <asm/boot.h>
+
+#include "efistub.h"
+
+/* Maximum physical address for 64-bit kernel with 4-level paging */
+#define MAXMEM_X86_64_4LEVEL (1ull << 46)
+
+static efi_system_table_t *sys_table __efistub_global;
+extern const bool efi_is64;
+extern u32 image_offset;
+
+__pure efi_system_table_t *efi_system_table(void)
+{
+ return sys_table;
+}
+
+__attribute_const__ bool efi_is_64bit(void)
+{
+ if (IS_ENABLED(CONFIG_EFI_MIXED))
+ return efi_is64;
+ return IS_ENABLED(CONFIG_X86_64);
+}
+
+static efi_status_t
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+{
+ struct pci_setup_rom *rom = NULL;
+ efi_status_t status;
+ unsigned long size;
+ uint64_t romsize;
+ void *romimage;
+
+ /*
+ * Some firmware images contain EFI function pointers at the place where
+ * the romimage and romsize fields are supposed to be. Typically the EFI
+ * code is mapped at high addresses, translating to an unrealistically
+ * large romsize. The UEFI spec limits the size of option ROMs to 16
+ * MiB so we reject any ROMs over 16 MiB in size to catch this.
+ */
+ romimage = efi_table_attr(pci, romimage);
+ romsize = efi_table_attr(pci, romsize);
+ if (!romimage || !romsize || romsize > SZ_16M)
+ return EFI_INVALID_PARAMETER;
+
+ size = romsize + sizeof(*rom);
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&rom);
+ if (status != EFI_SUCCESS) {
+ efi_printk("Failed to allocate memory for 'rom'\n");
+ return status;
+ }
+
+ memset(rom, 0, sizeof(*rom));
+
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = pci->romsize;
+ *__rom = rom;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_VENDOR_ID, 1, &rom->vendor);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk("Failed to read rom->vendor\n");
+ goto free_struct;
+ }
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_DEVICE_ID, 1, &rom->devid);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk("Failed to read rom->devid\n");
+ goto free_struct;
+ }
+
+ status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
+ &rom->device, &rom->function);
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ memcpy(rom->romdata, romimage, romsize);
+ return status;
+
+free_struct:
+ efi_bs_call(free_pool, rom);
+ return status;
+}
+
+/*
+ * There's no way to return an informative status from this function,
+ * because any analysis (and printing of error messages) needs to be
+ * done directly at the EFI function call-site.
+ *
+ * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we
+ * just didn't find any PCI devices, but there's no way to tell outside
+ * the context of the call.
+ */
+static void setup_efi_pci(struct boot_params *params)
+{
+ efi_status_t status;
+ void **pci_handle = NULL;
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long size = 0;
+ struct setup_data *data;
+ efi_handle_t h;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &size, pci_handle);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&pci_handle);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &size, pci_handle);
+ }
+
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ for_each_efi_handle(h, pci_handle, size, i) {
+ efi_pci_io_protocol_t *pci = NULL;
+ struct pci_setup_rom *rom;
+
+ status = efi_bs_call(handle_protocol, h, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = preserve_pci_rom_image(pci, &rom);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (data)
+ data->next = (unsigned long)rom;
+ else
+ params->hdr.setup_data = (unsigned long)rom;
+
+ data = (struct setup_data *)rom;
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
+
+static void retrieve_apple_device_properties(struct boot_params *boot_params)
+{
+ efi_guid_t guid = APPLE_PROPERTIES_PROTOCOL_GUID;
+ struct setup_data *data, *new;
+ efi_status_t status;
+ u32 size = 0;
+ apple_properties_protocol_t *p;
+
+ status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&p);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (efi_table_attr(p, version) != 0x10000) {
+ efi_printk("Unsupported properties proto version\n");
+ return;
+ }
+
+ efi_call_proto(p, get_all, NULL, &size);
+ if (!size)
+ return;
+
+ do {
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ size + sizeof(struct setup_data),
+ (void **)&new);
+ if (status != EFI_SUCCESS) {
+ efi_printk("Failed to allocate memory for 'properties'\n");
+ return;
+ }
+
+ status = efi_call_proto(p, get_all, new->data, &size);
+
+ if (status == EFI_BUFFER_TOO_SMALL)
+ efi_bs_call(free_pool, new);
+ } while (status == EFI_BUFFER_TOO_SMALL);
+
+ new->type = SETUP_APPLE_PROPERTIES;
+ new->len = size;
+ new->next = 0;
+
+ data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
+ if (!data) {
+ boot_params->hdr.setup_data = (unsigned long)new;
+ } else {
+ while (data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+ data->next = (unsigned long)new;
+ }
+}
+
+static const efi_char16_t apple[] = L"Apple";
+
+static void setup_quirks(struct boot_params *boot_params)
+{
+ efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+ efi_table_attr(efi_system_table(), fw_vendor);
+
+ if (!memcmp(fw_vendor, apple, sizeof(apple))) {
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+ retrieve_apple_device_properties(boot_params);
+ }
+}
+
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
+static efi_status_t
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
+{
+ efi_status_t status;
+ u32 width, height;
+ void **uga_handle = NULL;
+ efi_uga_draw_protocol_t *uga = NULL, *first_uga;
+ efi_handle_t handle;
+ int i;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&uga_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ uga_proto, NULL, &size, uga_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ height = 0;
+ width = 0;
+
+ first_uga = NULL;
+ for_each_efi_handle(handle, uga_handle, size, i) {
+ efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ u32 w, h, depth, refresh;
+ void *pciio;
+
+ status = efi_bs_call(handle_protocol, handle, uga_proto,
+ (void **)&uga);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pciio = NULL;
+ efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
+
+ status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
+ if (status == EFI_SUCCESS && (!first_uga || pciio)) {
+ width = w;
+ height = h;
+
+ /*
+ * Once we've found a UGA supporting PCIIO,
+ * don't bother looking any further.
+ */
+ if (pciio)
+ break;
+
+ first_uga = uga;
+ }
+ }
+
+ if (!width && !height)
+ goto free_handle;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_depth = 32;
+ si->lfb_width = width;
+ si->lfb_height = height;
+
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
+
+free_handle:
+ efi_bs_call(free_pool, uga_handle);
+
+ return status;
+}
+
+static void setup_graphics(struct boot_params *boot_params)
+{
+ efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+ struct screen_info *si;
+ efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ efi_status_t status;
+ unsigned long size;
+ void **gop_handle = NULL;
+ void **uga_handle = NULL;
+
+ si = &boot_params->screen_info;
+ memset(si, 0, sizeof(*si));
+
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &graphics_proto, NULL, &size, gop_handle);
+ if (status == EFI_BUFFER_TOO_SMALL)
+ status = efi_setup_gop(si, &graphics_proto, size);
+
+ if (status != EFI_SUCCESS) {
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &uga_proto, NULL, &size, uga_handle);
+ if (status == EFI_BUFFER_TOO_SMALL)
+ setup_uga(si, &uga_proto, size);
+ }
+}
+
+
+static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
+{
+ efi_bs_call(exit, handle, status, 0, NULL);
+ for(;;)
+ asm("hlt");
+}
+
+void startup_32(struct boot_params *boot_params);
+
+void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+
+/*
+ * Because the x86 boot code expects to be passed a boot_params we
+ * need to create one ourselves (usually the bootloader would create
+ * one for us).
+ */
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+{
+ struct boot_params *boot_params;
+ struct setup_header *hdr;
+ efi_loaded_image_t *image;
+ void *image_base;
+ efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
+ int options_size = 0;
+ efi_status_t status;
+ char *cmdline_ptr;
+ unsigned long ramdisk_addr;
+ unsigned long ramdisk_size;
+
+ sys_table = sys_table_arg;
+
+ /* Check if we were booted by the EFI firmware */
+ if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
+ status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
+ if (status != EFI_SUCCESS) {
+ efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+ efi_exit(handle, status);
+ }
+
+ image_base = efi_table_attr(image, image_base);
+ image_offset = (void *)startup_32 - image_base;
+
+ status = efi_allocate_pages(0x4000, (unsigned long *)&boot_params, ULONG_MAX);
+ if (status != EFI_SUCCESS) {
+ efi_printk("Failed to allocate lowmem for boot params\n");
+ efi_exit(handle, status);
+ }
+
+ memset(boot_params, 0x0, 0x4000);
+
+ hdr = &boot_params->hdr;
+
+ /* Copy the second sector to boot_params */
+ memcpy(&hdr->jump, image_base + 512, 512);
+
+ /*
+ * Fill out some of the header fields ourselves because the
+ * EFI firmware loader doesn't load the first sector.
+ */
+ hdr->root_flags = 1;
+ hdr->vid_mode = 0xffff;
+ hdr->boot_flag = 0xAA55;
+
+ hdr->type_of_loader = 0x21;
+
+ /* Convert unicode cmdline to ascii */
+ cmdline_ptr = efi_convert_cmdline(image, &options_size, ULONG_MAX);
+ if (!cmdline_ptr)
+ goto fail;
+
+ hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
+ /* Fill in upper bits of command line address, NOP on 32 bit */
+ boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
+
+ hdr->ramdisk_image = 0;
+ hdr->ramdisk_size = 0;
+
+ if (efi_is_native()) {
+ status = efi_parse_options(cmdline_ptr);
+ if (status != EFI_SUCCESS)
+ goto fail2;
+
+ if (!noinitrd()) {
+ status = efi_load_initrd(image, &ramdisk_addr,
+ &ramdisk_size,
+ hdr->initrd_addr_max,
+ ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ goto fail2;
+ hdr->ramdisk_image = ramdisk_addr & 0xffffffff;
+ hdr->ramdisk_size = ramdisk_size & 0xffffffff;
+ boot_params->ext_ramdisk_image = (u64)ramdisk_addr >> 32;
+ boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32;
+ }
+ }
+
+ efi_stub_entry(handle, sys_table, boot_params);
+ /* not reached */
+
+fail2:
+ efi_free(options_size, (unsigned long)cmdline_ptr);
+fail:
+ efi_free(0x4000, (unsigned long)boot_params);
+
+ efi_exit(handle, status);
+}
+
+static void add_e820ext(struct boot_params *params,
+ struct setup_data *e820ext, u32 nr_entries)
+{
+ struct setup_data *data;
+
+ e820ext->type = SETUP_E820_EXT;
+ e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+ e820ext->next = 0;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ if (data)
+ data->next = (unsigned long)e820ext;
+ else
+ params->hdr.setup_data = (unsigned long)e820ext;
+}
+
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
+{
+ struct boot_e820_entry *entry = params->e820_table;
+ struct efi_info *efi = &params->efi_info;
+ struct boot_e820_entry *prev = NULL;
+ u32 nr_entries;
+ u32 nr_desc;
+ int i;
+
+ nr_entries = 0;
+ nr_desc = efi->efi_memmap_size / efi->efi_memdesc_size;
+
+ for (i = 0; i < nr_desc; i++) {
+ efi_memory_desc_t *d;
+ unsigned int e820_type = 0;
+ unsigned long m = efi->efi_memmap;
+
+#ifdef CONFIG_X86_64
+ m |= (u64)efi->efi_memmap_hi << 32;
+#endif
+
+ d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i);
+ switch (d->type) {
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ case EFI_PAL_CODE:
+ e820_type = E820_TYPE_RESERVED;
+ break;
+
+ case EFI_UNUSABLE_MEMORY:
+ e820_type = E820_TYPE_UNUSABLE;
+ break;
+
+ case EFI_ACPI_RECLAIM_MEMORY:
+ e820_type = E820_TYPE_ACPI;
+ break;
+
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ if (efi_soft_reserve_enabled() &&
+ (d->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else
+ e820_type = E820_TYPE_RAM;
+ break;
+
+ case EFI_ACPI_MEMORY_NVS:
+ e820_type = E820_TYPE_NVS;
+ break;
+
+ case EFI_PERSISTENT_MEMORY:
+ e820_type = E820_TYPE_PMEM;
+ break;
+
+ default:
+ continue;
+ }
+
+ /* Merge adjacent mappings */
+ if (prev && prev->type == e820_type &&
+ (prev->addr + prev->size) == d->phys_addr) {
+ prev->size += d->num_pages << 12;
+ continue;
+ }
+
+ if (nr_entries == ARRAY_SIZE(params->e820_table)) {
+ u32 need = (nr_desc - i) * sizeof(struct e820_entry) +
+ sizeof(struct setup_data);
+
+ if (!e820ext || e820ext_size < need)
+ return EFI_BUFFER_TOO_SMALL;
+
+ /* boot_params map full, switch to e820 extended */
+ entry = (struct boot_e820_entry *)e820ext->data;
+ }
+
+ entry->addr = d->phys_addr;
+ entry->size = d->num_pages << PAGE_SHIFT;
+ entry->type = e820_type;
+ prev = entry++;
+ nr_entries++;
+ }
+
+ if (nr_entries > ARRAY_SIZE(params->e820_table)) {
+ u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_table);
+
+ add_e820ext(params, e820ext, nr_e820ext);
+ nr_entries -= nr_e820ext;
+ }
+
+ params->e820_entries = (u8)nr_entries;
+
+ return EFI_SUCCESS;
+}
+
+static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(struct setup_data) +
+ sizeof(struct e820_entry) * nr_desc;
+
+ if (*e820ext) {
+ efi_bs_call(free_pool, *e820ext);
+ *e820ext = NULL;
+ *e820ext_size = 0;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)e820ext);
+ if (status == EFI_SUCCESS)
+ *e820ext_size = size;
+
+ return status;
+}
+
+static efi_status_t allocate_e820(struct boot_params *params,
+ struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ unsigned long map_size, desc_size, map_key;
+ efi_status_t status;
+ __u32 nr_desc, desc_version;
+
+ /* Only need the size of the mem map and size of each mem descriptor */
+ map_size = 0;
+ status = efi_bs_call(get_memory_map, &map_size, NULL, &map_key,
+ &desc_size, &desc_version);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return (status != EFI_SUCCESS) ? status : EFI_UNSUPPORTED;
+
+ nr_desc = map_size / desc_size + EFI_MMAP_NR_SLACK_SLOTS;
+
+ if (nr_desc > ARRAY_SIZE(params->e820_table)) {
+ u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
+
+ status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+
+ return EFI_SUCCESS;
+}
+
+struct exit_boot_struct {
+ struct boot_params *boot_params;
+ struct efi_info *efi;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
+ void *priv)
+{
+ const char *signature;
+ struct exit_boot_struct *p = priv;
+
+ signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+ : EFI32_LOADER_SIGNATURE;
+ memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
+
+ p->efi->efi_systab = (unsigned long)efi_system_table();
+ p->efi->efi_memdesc_size = *map->desc_size;
+ p->efi->efi_memdesc_version = *map->desc_ver;
+ p->efi->efi_memmap = (unsigned long)*map->map;
+ p->efi->efi_memmap_size = *map->map_size;
+
+#ifdef CONFIG_X86_64
+ p->efi->efi_systab_hi = (unsigned long)efi_system_table() >> 32;
+ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+#endif
+
+ return EFI_SUCCESS;
+}
+
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
+{
+ unsigned long map_sz, key, desc_size, buff_size;
+ efi_memory_desc_t *mem_map;
+ struct setup_data *e820ext = NULL;
+ __u32 e820ext_size = 0;
+ efi_status_t status;
+ __u32 desc_version;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &mem_map;
+ map.map_size = &map_sz;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_version;
+ map.key_ptr = &key;
+ map.buff_size = &buff_size;
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+
+ status = allocate_e820(boot_params, &e820ext, &e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Might as well exit boot services now */
+ status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Historic? */
+ boot_params->alt_mem_k = 32 * 1024;
+
+ status = setup_e820(boot_params, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return EFI_SUCCESS;
+}
+
+/*
+ * On success, we return the address of startup_32, which has potentially been
+ * relocated by efi_relocate_kernel.
+ * On failure, we exit to the firmware via efi_exit instead of returning.
+ */
+unsigned long efi_main(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
+{
+ unsigned long bzimage_addr = (unsigned long)startup_32;
+ unsigned long buffer_start, buffer_end;
+ struct setup_header *hdr = &boot_params->hdr;
+ efi_status_t status;
+ unsigned long cmdline_paddr;
+
+ sys_table = sys_table_arg;
+
+ /* Check if we were booted by the EFI firmware */
+ if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
+ /*
+ * If the kernel isn't already loaded at a suitable address,
+ * relocate it.
+ *
+ * It must be loaded above LOAD_PHYSICAL_ADDR.
+ *
+ * The maximum address for 64-bit is 1 << 46 for 4-level paging. This
+ * is defined as the macro MAXMEM, but unfortunately that is not a
+ * compile-time constant if 5-level paging is configured, so we instead
+ * define our own macro for use here.
+ *
+ * For 32-bit, the maximum address is complicated to figure out, for
+ * now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
+ * KASLR uses.
+ *
+ * Also relocate it if image_offset is zero, i.e. the kernel wasn't
+ * loaded by LoadImage, but rather by a bootloader that called the
+ * handover entry. The reason we must always relocate in this case is
+ * to handle the case of systemd-boot booting a unified kernel image,
+ * which is a PE executable that contains the bzImage and an initrd as
+ * COFF sections. The initrd section is placed after the bzImage
+ * without ensuring that there are at least init_size bytes available
+ * for the bzImage, and thus the compressed kernel's startup code may
+ * overwrite the initrd unless it is moved out of the way.
+ */
+
+ buffer_start = ALIGN(bzimage_addr - image_offset,
+ hdr->kernel_alignment);
+ buffer_end = buffer_start + hdr->init_size;
+
+ if ((buffer_start < LOAD_PHYSICAL_ADDR) ||
+ (IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
+ (IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
+ (image_offset == 0)) {
+ status = efi_relocate_kernel(&bzimage_addr,
+ hdr->init_size, hdr->init_size,
+ hdr->pref_address,
+ hdr->kernel_alignment,
+ LOAD_PHYSICAL_ADDR);
+ if (status != EFI_SUCCESS) {
+ efi_printk("efi_relocate_kernel() failed!\n");
+ goto fail;
+ }
+ /*
+ * Now that we've copied the kernel elsewhere, we no longer
+ * have a set up block before startup_32(), so reset image_offset
+ * to zero in case it was set earlier.
+ */
+ image_offset = 0;
+ }
+
+ /*
+ * efi_pe_entry() may have been called before efi_main(), in which
+ * case this is the second time we parse the cmdline. This is ok,
+ * parsing the cmdline multiple times does not have side-effects.
+ */
+ cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ ((u64)boot_params->ext_cmd_line_ptr << 32));
+ efi_parse_options((char *)cmdline_paddr);
+
+ /*
+ * At this point, an initrd may already have been loaded, either by
+ * the bootloader and passed via bootparams, or loaded from a initrd=
+ * command line option by efi_pe_entry() above. In either case, we
+ * permit an initrd loaded from the LINUX_EFI_INITRD_MEDIA_GUID device
+ * path to supersede it.
+ */
+ if (!noinitrd()) {
+ unsigned long addr, size;
+
+ status = efi_load_initrd_dev_path(&addr, &size, ULONG_MAX);
+ if (status == EFI_SUCCESS) {
+ hdr->ramdisk_image = (u32)addr;
+ hdr->ramdisk_size = (u32)size;
+ boot_params->ext_ramdisk_image = (u64)addr >> 32;
+ boot_params->ext_ramdisk_size = (u64)size >> 32;
+ } else if (status != EFI_NOT_FOUND) {
+ efi_printk("efi_load_initrd_dev_path() failed!\n");
+ goto fail;
+ }
+ }
+
+ /*
+ * If the boot loader gave us a value for secure_boot then we use that,
+ * otherwise we ask the BIOS.
+ */
+ if (boot_params->secure_boot == efi_secureboot_mode_unset)
+ boot_params->secure_boot = efi_get_secureboot();
+
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation();
+
+ efi_random_get_seed();
+
+ efi_retrieve_tpm2_eventlog();
+
+ setup_graphics(boot_params);
+
+ setup_efi_pci(boot_params);
+
+ setup_quirks(boot_params);
+
+ status = exit_boot(boot_params, handle);
+ if (status != EFI_SUCCESS) {
+ efi_printk("exit_boot() failed!\n");
+ goto fail;
+ }
+
+ return bzimage_addr;
+fail:
+ efi_printk("efi_main() failed!\n");
+
+ efi_exit(handle, status);
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 58452fde92cc..5737cb0fcd44 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -13,6 +13,7 @@
#include <asm/early_ioremap.h>
static int __initdata tbl_size;
+unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
/*
* Reserve the memory associated with the Memory Attributes configuration
@@ -22,13 +23,13 @@ int __init efi_memattr_init(void)
{
efi_memory_attributes_table_t *tbl;
- if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
+ if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
return 0;
- tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
+ tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl));
if (!tbl) {
pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
- efi.mem_attr_table);
+ efi_mem_attr_table);
return -ENOMEM;
}
@@ -39,7 +40,7 @@ int __init efi_memattr_init(void)
}
tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
- memblock_reserve(efi.mem_attr_table, tbl_size);
+ memblock_reserve(efi_mem_attr_table, tbl_size);
set_bit(EFI_MEM_ATTR, &efi.flags);
unmap:
@@ -147,10 +148,10 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
return 0;
- tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
+ tbl = memremap(efi_mem_attr_table, tbl_size, MEMREMAP_WB);
if (!tbl) {
pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
- efi.mem_attr_table);
+ efi_mem_attr_table);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 7effff969eb9..73089a24f04b 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -15,7 +15,7 @@ void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
const char *str[] = { "cold", "warm", "shutdown", "platform" };
int efi_mode, cap_reset_mode;
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM))
return;
switch (reboot_mode) {
@@ -64,7 +64,7 @@ static void efi_power_off(void)
static int __init efi_shutdown_init(void)
{
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM))
return -ENODEV;
if (efi_poweroff_required()) {
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 65fffaa22210..1410beaef5c3 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -40,9 +40,9 @@
* code doesn't get too cluttered:
*/
#define efi_call_virt(f, args...) \
- efi_call_virt_pointer(efi.systab->runtime, f, args)
+ efi_call_virt_pointer(efi.runtime, f, args)
#define __efi_call_virt(f, args...) \
- __efi_call_virt_pointer(efi.systab->runtime, f, args)
+ __efi_call_virt_pointer(efi.runtime, f, args)
struct efi_runtime_work efi_rts_work;
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 31f9f0e369b9..c1955d320fec 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -16,7 +16,7 @@
int efi_tpm_final_log_size;
EXPORT_SYMBOL(efi_tpm_final_log_size);
-static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
+static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
{
struct tcg_pcr_event2_head *header;
int event_size, size = 0;
@@ -62,8 +62,11 @@ int __init efi_tpm_eventlog_init(void)
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
- if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR)
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+ log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
goto out;
+ }
final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 436d1776bc7b..5f2a4d162795 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -1071,7 +1071,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_iter_end);
* entry on the list. It is safe for @func to remove entries in the
* list via efivar_entry_delete().
*
- * You MUST call efivar_enter_iter_begin() before this function, and
+ * You MUST call efivar_entry_iter_begin() before this function, and
* efivar_entry_iter_end() afterwards.
*
* It is possible to begin iteration from an arbitrary entry within
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index af3ae0087de4..fb5523aa16ee 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -93,7 +93,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
{ "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
- { "mu_b", IMX_SC_R_MU_13B, 1, true, 13 },
+ { "mu_b", IMX_SC_R_MU_5B, 9, true, 5 },
/* CONN SS */
{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
@@ -109,6 +109,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
{ "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
{ "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
+ { "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 },
{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
{ "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
@@ -116,7 +117,13 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
{ "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
{ "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
+ { "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 },
{ "sai", IMX_SC_R_SAI_0, 3, true, 0 },
+ { "sai3", IMX_SC_R_SAI_3, 1, false, 0 },
+ { "sai4", IMX_SC_R_SAI_4, 1, false, 0 },
+ { "sai5", IMX_SC_R_SAI_5, 1, false, 0 },
+ { "sai6", IMX_SC_R_SAI_6, 1, false, 0 },
+ { "sai7", IMX_SC_R_SAI_7, 1, false, 0 },
{ "amix", IMX_SC_R_AMIX, 1, false, 0 },
{ "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
{ "dsp", IMX_SC_R_DSP, 1, false, 0 },
@@ -158,6 +165,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+
+ /* CM40 SS */
+ { "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 },
+ { "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 1d5b4d74f96d..2854b56f6e0b 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -44,6 +44,8 @@ static const struct meson_sm_chip gxbb_chip = {
CMD(SM_EFUSE_WRITE, 0x82000031),
CMD(SM_EFUSE_USER_MAX, 0x82000033),
CMD(SM_GET_CHIP_ID, 0x82000044),
+ CMD(SM_A1_PWRC_SET, 0x82000093),
+ CMD(SM_A1_PWRC_GET, 0x82000095),
{ /* sentinel */ },
},
};
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 4adeb7a2bdf5..715a45442d1c 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -80,6 +80,8 @@ setup_vga_console(struct pcdp_device *dev)
#endif
}
+extern unsigned long hcdp_phys;
+
int __init
efi_setup_pcdp_console(char *cmdline)
{
@@ -89,11 +91,11 @@ efi_setup_pcdp_console(char *cmdline)
int i, serial = 0;
int rc = -ENODEV;
- if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
+ if (hcdp_phys == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
- pcdp = early_memremap(efi.hcdp, 4096);
- printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
+ pcdp = early_memremap(hcdp_phys, 4096);
+ printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, hcdp_phys);
if (strstr(cmdline, "console=hcdp")) {
if (pcdp->rev < 3)
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 6a445397771c..873841af8d57 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -84,7 +84,7 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
/* Try to power down all CPUs in the mask. */
for_each_cpu(cpu, cpus) {
- int ret = cpu_down(cpu);
+ int ret = remove_cpu(cpu);
/*
* cpu_down() checks the number of online CPUs before the TOS
@@ -116,7 +116,7 @@ static unsigned int down_and_up_cpus(const struct cpumask *cpus,
/* Try to power up all the CPUs that have been offlined. */
for_each_cpu(cpu, offlined_cpus) {
- int ret = cpu_up(cpu);
+ int ret = add_cpu(cpu);
if (ret != 0) {
pr_err("Error occurred (%d) while trying "
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 7ffb42b0775e..d5f0769f3761 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -966,6 +966,7 @@ EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
static const struct of_device_id stratix10_svc_drv_match[] = {
{.compatible = "intel,stratix10-svc"},
+ {.compatible = "intel,agilex-svc"},
{},
};
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
index a887731f50d6..1c8ba1f47c7c 100644
--- a/drivers/firmware/tegra/Kconfig
+++ b/drivers/firmware/tegra/Kconfig
@@ -7,7 +7,7 @@ config TEGRA_IVC
help
IVC (Inter-VM Communication) protocol is part of the IPC
(Inter Processor Communication) framework on Tegra. It maintains the
- data and the different commuication channels in SysRAM or RAM and
+ data and the different communication channels in SysRAM or RAM and
keeps the content is synchronization between host CPU and remote
processors.
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index bd33bbf70daf..9a9bd190888e 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -6,6 +6,8 @@ menu "Zynq MPSoC Firmware Drivers"
config ZYNQMP_FIRMWARE
bool "Enable Xilinx Zynq MPSoC firmware interface"
+ depends on ARCH_ZYNQMP
+ default y if ARCH_ZYNQMP
select MFD_CORE
help
Firmware interface driver is used by different
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index c6d0724da4db..43bc6cfdab45 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -35,7 +35,7 @@ static struct pm_api_info pm_api_list[] = {
PM_API(PM_QUERY_DATA),
};
-struct dentry *firmware_debugfs_root;
+static struct dentry *firmware_debugfs_root;
/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index ecc339d846de..41b65164a367 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -512,6 +512,8 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
{
switch (ioctl_id) {
+ case IOCTL_SD_DLL_RESET:
+ case IOCTL_SET_SD_TAPDELAY:
case IOCTL_SET_PLL_FRAC_MODE:
case IOCTL_GET_PLL_FRAC_MODE:
case IOCTL_SET_PLL_FRAC_DATA:
@@ -707,6 +709,30 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
qos, ack, NULL);
}
+/**
+ * zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
+ * AES-GCM core.
+ * @address: Address of the AesParams structure.
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
+ lower_32_bits(address),
+ 0, 0, ret_payload);
+ *out = ret_payload[1];
+
+ return ret;
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
.get_chipid = zynqmp_pm_get_chipid,
@@ -730,6 +756,7 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.set_requirement = zynqmp_pm_set_requirement,
.fpga_load = zynqmp_pm_fpga_load,
.fpga_get_status = zynqmp_pm_fpga_get_status,
+ .aes = zynqmp_pm_aes_engine,
};
/**
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 89ca292236ad..538755062ab7 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -248,11 +248,13 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
return ret;
ret = pci_enable_sriov(pcidev, num_vfs);
- if (ret)
+ if (ret) {
dfl_fpga_cdev_config_ports_pf(cdev);
+ return ret;
+ }
}
- return ret;
+ return num_vfs;
}
static void cci_pci_remove(struct pci_dev *pcidev)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index ee7765049607..07fa8d9ec675 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -583,7 +583,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
- dev_err(dev, "input clock not found\n");
+ if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
+ dev_err(dev, "input clock not found\n");
return PTR_ERR(priv->clk);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b8013cf90064..1b96169d84f7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -394,13 +394,13 @@ config GPIO_MVEBU
config GPIO_MXC
def_bool y
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
config GPIO_MXS
def_bool y
- depends on ARCH_MXS
+ depends on ARCH_MXS || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -1399,6 +1399,13 @@ config GPIO_MLXBF
help
Say Y here if you want GPIO support on Mellanox BlueField SoC.
+config GPIO_MLXBF2
+ tristate "Mellanox BlueField 2 SoC GPIO"
+ depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
+ select GPIO_GENERIC
+ help
+ Say Y here if you want GPIO support on Mellanox BlueField 2 SoC.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on X86 || COMPILE_TEST
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0b571264ddbc..b2cfc21a97f3 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
+obj-$(CONFIG_GPIO_MLXBF2) += gpio-mlxbf2.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index baee8c3f06ad..cf3687a7925f 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -625,7 +625,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_gpio->reg_base)) {
- ret = -ENXIO;
+ ret = PTR_ERR(kona_gpio->reg_base);
goto err_irq_domain;
}
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 05e3f99ae59c..fcfc1a1f1a5c 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -603,6 +603,49 @@ static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
.resume_noirq = brcmstb_gpio_resume,
};
+static void brcmstb_gpio_set_names(struct device *dev,
+ struct brcmstb_gpio_bank *bank)
+{
+ struct device_node *np = dev->of_node;
+ const char **names;
+ int nstrings, base;
+ unsigned int i;
+
+ base = bank->id * MAX_GPIO_PER_BANK;
+
+ nstrings = of_property_count_strings(np, "gpio-line-names");
+ if (nstrings <= base)
+ /* Line names not present */
+ return;
+
+ names = devm_kcalloc(dev, MAX_GPIO_PER_BANK, sizeof(*names),
+ GFP_KERNEL);
+ if (!names)
+ return;
+
+ /*
+ * Make sure to not index beyond the end of the number of descriptors
+ * of the GPIO device.
+ */
+ for (i = 0; i < bank->width; i++) {
+ const char *name;
+ int ret;
+
+ ret = of_property_read_string_index(np, "gpio-line-names",
+ base + i, &name);
+ if (ret) {
+ if (ret != -ENODATA)
+ dev_err(dev, "unable to name line %d: %d\n",
+ base + i, ret);
+ break;
+ }
+ if (*name)
+ names[i] = name;
+ }
+
+ bank->gc.names = names;
+}
+
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -726,6 +769,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
+ brcmstb_gpio_set_names(dev, bank);
err = gpiochip_add_data(gc, bank);
if (err) {
dev_err(dev, "Could not add gpiochip for bank %d\n",
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index e0b025689625..085b874db2a9 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -259,11 +259,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.of_gpio_n_cells = 2;
chips->chip.parent = dev;
chips->chip.of_node = dev->of_node;
-
- if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- chips->chip.request = gpiochip_generic_request;
- chips->chip.free = gpiochip_generic_free;
- }
+ chips->chip.request = gpiochip_generic_request;
+ chips->chip.free = gpiochip_generic_free;
#endif
spin_lock_init(&chips->lock);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index bb287f35cf40..8c9757774010 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -569,6 +569,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
const struct sprd_eic_variant_data *pdata;
struct gpio_irq_chip *irq;
struct sprd_eic *sprd_eic;
+ struct resource *res;
int ret, i;
pdata = of_device_get_match_data(&pdev->dev);
@@ -595,9 +596,13 @@ static int sprd_eic_probe(struct platform_device *pdev)
* have one bank EIC, thus base[1] and base[2] can be
* optional.
*/
- sprd_eic->base[i] = devm_platform_ioremap_resource(pdev, i);
- if (IS_ERR(sprd_eic->base[i]))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
continue;
+
+ sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sprd_eic->base[i]))
+ return PTR_ERR(sprd_eic->base[i]);
}
sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type];
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index da1ef0b1c291..b1accfba017d 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev)
mutex_init(&exar_gpio->lock);
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto err_destroy;
+ if (index < 0) {
+ ret = index;
+ goto err_mutex_destroy;
+ }
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
@@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
err_destroy:
ida_simple_remove(&ida_index, index);
+err_mutex_destroy:
mutex_destroy(&exar_gpio->lock);
return ret;
}
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
new file mode 100644
index 000000000000..da570e63589d
--- /dev/null
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/resource.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+/*
+ * There are 3 YU GPIO blocks:
+ * gpio[0]: HOST_GPIO0->HOST_GPIO31
+ * gpio[1]: HOST_GPIO32->HOST_GPIO63
+ * gpio[2]: HOST_GPIO64->HOST_GPIO69
+ */
+#define MLXBF2_GPIO_MAX_PINS_PER_BLOCK 32
+
+/*
+ * arm_gpio_lock register:
+ * bit[31] lock status: active if set
+ * bit[15:0] set lock
+ * The lock is enabled only if 0xd42f is written to this field
+ */
+#define YU_ARM_GPIO_LOCK_ADDR 0x2801088
+#define YU_ARM_GPIO_LOCK_SIZE 0x8
+#define YU_LOCK_ACTIVE_BIT(val) (val >> 31)
+#define YU_ARM_GPIO_LOCK_ACQUIRE 0xd42f
+#define YU_ARM_GPIO_LOCK_RELEASE 0x0
+
+/*
+ * gpio[x] block registers and their offset
+ */
+#define YU_GPIO_DATAIN 0x04
+#define YU_GPIO_MODE1 0x08
+#define YU_GPIO_MODE0 0x0c
+#define YU_GPIO_DATASET 0x14
+#define YU_GPIO_DATACLEAR 0x18
+#define YU_GPIO_MODE1_CLEAR 0x50
+#define YU_GPIO_MODE0_SET 0x54
+#define YU_GPIO_MODE0_CLEAR 0x58
+
+#ifdef CONFIG_PM
+struct mlxbf2_gpio_context_save_regs {
+ u32 gpio_mode0;
+ u32 gpio_mode1;
+};
+#endif
+
+/* BlueField-2 gpio block context structure. */
+struct mlxbf2_gpio_context {
+ struct gpio_chip gc;
+
+ /* YU GPIO blocks address */
+ void __iomem *gpio_io;
+
+#ifdef CONFIG_PM
+ struct mlxbf2_gpio_context_save_regs *csave_regs;
+#endif
+};
+
+/* BlueField-2 gpio shared structure. */
+struct mlxbf2_gpio_param {
+ void __iomem *io;
+ struct resource *res;
+ struct mutex *lock;
+};
+
+static struct resource yu_arm_gpio_lock_res = {
+ .start = YU_ARM_GPIO_LOCK_ADDR,
+ .end = YU_ARM_GPIO_LOCK_ADDR + YU_ARM_GPIO_LOCK_SIZE - 1,
+ .name = "YU_ARM_GPIO_LOCK",
+};
+
+static DEFINE_MUTEX(yu_arm_gpio_lock_mutex);
+
+static struct mlxbf2_gpio_param yu_arm_gpio_lock_param = {
+ .res = &yu_arm_gpio_lock_res,
+ .lock = &yu_arm_gpio_lock_mutex,
+};
+
+/* Request memory region and map yu_arm_gpio_lock resource */
+static int mlxbf2_gpio_get_lock_res(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ resource_size_t size;
+ int ret = 0;
+
+ mutex_lock(yu_arm_gpio_lock_param.lock);
+
+ /* Check if the memory map already exists */
+ if (yu_arm_gpio_lock_param.io)
+ goto exit;
+
+ res = yu_arm_gpio_lock_param.res;
+ size = resource_size(res);
+
+ if (!devm_request_mem_region(dev, res->start, size, res->name)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ yu_arm_gpio_lock_param.io = devm_ioremap(dev, res->start, size);
+ if (IS_ERR(yu_arm_gpio_lock_param.io))
+ ret = PTR_ERR(yu_arm_gpio_lock_param.io);
+
+exit:
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
+
+ return ret;
+}
+
+/*
+ * Acquire the YU arm_gpio_lock to be able to change the direction
+ * mode. If the lock_active bit is already set, return an error.
+ */
+static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
+{
+ u32 arm_gpio_lock_val;
+
+ mutex_lock(yu_arm_gpio_lock_param.lock);
+ spin_lock(&gs->gc.bgpio_lock);
+
+ arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
+
+ /*
+ * When lock active bit[31] is set, ModeX is write enabled
+ */
+ if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
+ spin_unlock(&gs->gc.bgpio_lock);
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
+ return -EINVAL;
+ }
+
+ writel(YU_ARM_GPIO_LOCK_ACQUIRE, yu_arm_gpio_lock_param.io);
+
+ return 0;
+}
+
+/*
+ * Release the YU arm_gpio_lock after changing the direction mode.
+ */
+static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
+{
+ writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
+ spin_unlock(&gs->gc.bgpio_lock);
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
+}
+
+/*
+ * mode0 and mode1 are both locked by the gpio_lock field.
+ *
+ * Together, mode0 and mode1 define the gpio Mode dependeing also
+ * on Reg_DataOut.
+ *
+ * {mode1,mode0}:{Reg_DataOut=0,Reg_DataOut=1}->{DataOut=0,DataOut=1}
+ *
+ * {0,0}:Reg_DataOut{0,1}->{Z,Z} Input PAD
+ * {0,1}:Reg_DataOut{0,1}->{0,1} Full drive Output PAD
+ * {1,0}:Reg_DataOut{0,1}->{0,Z} 0-set PAD to low, 1-float
+ * {1,1}:Reg_DataOut{0,1}->{Z,1} 0-float, 1-set PAD to high
+ */
+
+/*
+ * Set input direction:
+ * {mode1,mode0} = {0,0}
+ */
+static int mlxbf2_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip);
+ int ret;
+
+ /*
+ * Although the arm_gpio_lock was set in the probe function, check again
+ * if it is still enabled to be able to write to the ModeX registers.
+ */
+ ret = mlxbf2_gpio_lock_acquire(gs);
+ if (ret < 0)
+ return ret;
+
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_CLEAR);
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR);
+
+ mlxbf2_gpio_lock_release(gs);
+
+ return ret;
+}
+
+/*
+ * Set output direction:
+ * {mode1,mode0} = {0,1}
+ */
+static int mlxbf2_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset,
+ int value)
+{
+ struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip);
+ int ret = 0;
+
+ /*
+ * Although the arm_gpio_lock was set in the probe function,
+ * check again it is still enabled to be able to write to the
+ * ModeX registers.
+ */
+ ret = mlxbf2_gpio_lock_acquire(gs);
+ if (ret < 0)
+ return ret;
+
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR);
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_SET);
+
+ mlxbf2_gpio_lock_release(gs);
+
+ return ret;
+}
+
+/* BlueField-2 GPIO driver initialization routine. */
+static int
+mlxbf2_gpio_probe(struct platform_device *pdev)
+{
+ struct mlxbf2_gpio_context *gs;
+ struct device *dev = &pdev->dev;
+ struct gpio_chip *gc;
+ struct resource *res;
+ unsigned int npins;
+ int ret;
+
+ gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
+ if (!gs)
+ return -ENOMEM;
+
+ /* YU GPIO block address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ gs->gpio_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (!gs->gpio_io)
+ return -ENOMEM;
+
+ ret = mlxbf2_gpio_get_lock_res(pdev);
+ if (ret) {
+ dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n");
+ return ret;
+ }
+
+ if (device_property_read_u32(dev, "npins", &npins))
+ npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK;
+
+ gc = &gs->gc;
+
+ ret = bgpio_init(gc, dev, 4,
+ gs->gpio_io + YU_GPIO_DATAIN,
+ gs->gpio_io + YU_GPIO_DATASET,
+ gs->gpio_io + YU_GPIO_DATACLEAR,
+ NULL,
+ NULL,
+ 0);
+
+ gc->direction_input = mlxbf2_gpio_direction_input;
+ gc->direction_output = mlxbf2_gpio_direction_output;
+ gc->ngpio = npins;
+ gc->owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, gs);
+
+ ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ if (ret) {
+ dev_err(dev, "Failed adding memory mapped gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mlxbf2_gpio_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev);
+
+ gs->csave_regs->gpio_mode0 = readl(gs->gpio_io +
+ YU_GPIO_MODE0);
+ gs->csave_regs->gpio_mode1 = readl(gs->gpio_io +
+ YU_GPIO_MODE1);
+
+ return 0;
+}
+
+static int mlxbf2_gpio_resume(struct platform_device *pdev)
+{
+ struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev);
+
+ writel(gs->csave_regs->gpio_mode0, gs->gpio_io +
+ YU_GPIO_MODE0);
+ writel(gs->csave_regs->gpio_mode1, gs->gpio_io +
+ YU_GPIO_MODE1);
+
+ return 0;
+}
+#endif
+
+static const struct acpi_device_id mlxbf2_gpio_acpi_match[] = {
+ { "MLNXBF22", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf2_gpio_acpi_match);
+
+static struct platform_driver mlxbf2_gpio_driver = {
+ .driver = {
+ .name = "mlxbf2_gpio",
+ .acpi_match_table = ACPI_PTR(mlxbf2_gpio_acpi_match),
+ },
+ .probe = mlxbf2_gpio_probe,
+#ifdef CONFIG_PM
+ .suspend = mlxbf2_gpio_suspend,
+ .resume = mlxbf2_gpio_resume,
+#endif
+};
+
+module_platform_driver(mlxbf2_gpio_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField-2 GPIO Driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f729e3e9e983..b778f33cc6af 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -389,12 +389,10 @@ static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
return GPIO_LINE_DIRECTION_IN;
}
-static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
- gc->set(gc, gpio, val);
-
spin_lock_irqsave(&gc->bgpio_lock, flags);
gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
@@ -405,7 +403,21 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
+{
+ bgpio_dir_out(gc, gpio, val);
+ gc->set(gc, gpio, val);
+ return 0;
+}
+
+static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
+{
+ gc->set(gc, gpio, val);
+ bgpio_dir_out(gc, gpio, val);
return 0;
}
@@ -538,7 +550,10 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
if (dirout || dirin) {
gc->reg_dir_out = dirout;
gc->reg_dir_in = dirin;
- gc->direction_output = bgpio_dir_out;
+ if (flags & BGPIOF_NO_SET_ON_INPUT)
+ gc->direction_output = bgpio_dir_out_dir_first;
+ else
+ gc->direction_output = bgpio_dir_out_val_first;
gc->direction_input = bgpio_dir_in;
gc->get_direction = bgpio_get_dir;
} else {
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 7d343bea784a..3eb94f3740d1 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -171,7 +171,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
/* Change the value unless we're actively driving the line. */
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
- !test_bit(FLAG_IS_OUT, &desc->flags))
+ !test_bit(FLAG_IS_OUT, &desc->flags))
__gpio_mockup_set(chip, offset, value);
out:
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index b992321bb852..82fb20dca53a 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -227,8 +227,8 @@ mediatek_gpio_bank_probe(struct device *dev,
ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
- ret = bgpio_init(&rg->chip, dev, 4,
- dat, set, ctrl, diro, NULL, 0);
+ ret = bgpio_init(&rg->chip, dev, 4, dat, set, ctrl, diro, NULL,
+ BGPIOF_NO_SET_ON_INPUT);
if (ret) {
dev_err(dev, "bgpio_init() failed\n");
return ret;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d2b999c7987f..bd65114eb170 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -782,6 +782,15 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
"marvell,armada-370-gpio"))
return 0;
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+ return 0;
+
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
@@ -804,12 +813,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
if (IS_ERR(mvpwm->membase))
return PTR_ERR(mvpwm->membase);
@@ -1247,7 +1250,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
* pins.
*/
for (i = 0; i < 4; i++) {
- int irq = platform_get_irq(pdev, i);
+ int irq = platform_get_irq_optional(pdev, i);
if (irq < 0)
continue;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index c77d474185f3..64278a4756f0 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -485,11 +485,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_bgio;
- if (of_property_read_bool(np, "gpio-ranges")) {
- port->gc.request = gpiochip_generic_request;
- port->gc.free = gpiochip_generic_free;
- }
-
+ port->gc.request = gpiochip_generic_request;
+ port->gc.free = gpiochip_generic_free;
port->gc.to_irq = mxc_gpio_to_irq;
port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
pdev->id * 32;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 3bd8adaeed9e..b8e2ecc3eade 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1102,23 +1102,13 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
void __iomem *base = bank->base;
- u32 mask, nowake;
+ u32 nowake;
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
- /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
- mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
- mask &= ~bank->context.risingdetect;
- bank->saved_datain |= mask;
-
- /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
- mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
- mask &= ~bank->context.fallingdetect;
- bank->saved_datain &= ~mask;
-
if (!may_lose_context)
goto update_gpio_context_count;
@@ -1237,26 +1227,35 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb,
{
struct gpio_bank *bank;
unsigned long flags;
+ int ret = NOTIFY_OK;
+ u32 isr, mask;
bank = container_of(nb, struct gpio_bank, nb);
raw_spin_lock_irqsave(&bank->lock, flags);
+ if (bank->is_suspended)
+ goto out_unlock;
+
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
- if (bank->is_suspended)
+ mask = omap_get_gpio_irqbank_mask(bank);
+ isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask;
+ if (isr) {
+ ret = NOTIFY_BAD;
break;
+ }
omap_gpio_idle(bank, true);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
- if (bank->is_suspended)
- break;
omap_gpio_unidle(bank);
break;
}
+
+out_unlock:
raw_spin_unlock_irqrestore(&bank->lock, flags);
- return NOTIFY_OK;
+ return ret;
}
static const struct omap_gpio_reg_offs omap2_gpio_regs = {
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 5638b4e5355f..4269ea9a817e 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -531,7 +531,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- switch (config) {
+ switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
return pca953x_gpio_set_pull_up_down(chip, offset, config);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 5df7782e348f..e241fb884c12 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -298,11 +298,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(pl061->base);
raw_spin_lock_init(&pl061->lock);
- if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- pl061->gc.request = gpiochip_generic_request;
- pl061->gc.free = gpiochip_generic_free;
- }
-
+ pl061->gc.request = gpiochip_generic_request;
+ pl061->gc.free = gpiochip_generic_free;
pl061->gc.base = -1;
pl061->gc.get_direction = pl061_get_direction;
pl061->gc.direction_input = pl061_direction_input;
@@ -326,10 +323,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
writeb(0, pl061->base + GPIOIE); /* disable irqs */
irq = adev->irq[0];
- if (irq < 0) {
- dev_err(&adev->dev, "invalid IRQ\n");
- return -ENODEV;
- }
+ if (!irq)
+ dev_warn(&adev->dev, "IRQ support disabled\n");
pl061->parent_irq = irq;
girq = &pl061->gc.irq;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 9888b62f37af..0cb6600b8eee 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -361,11 +361,8 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
-
- if (pxa_gpio_has_pinctrl()) {
- pchip->chip.request = gpiochip_generic_request;
- pchip->chip.free = gpiochip_generic_free;
- }
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
@@ -652,8 +649,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pchip->irqdomain)
return -ENOMEM;
- irq0 = platform_get_irq_byname(pdev, "gpio0");
- irq1 = platform_get_irq_byname(pdev, "gpio1");
+ irq0 = platform_get_irq_byname_optional(pdev, "gpio0");
+ irq1 = platform_get_irq_byname_optional(pdev, "gpio1");
irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
|| (irq_mux <= 0))
@@ -663,8 +660,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq1 = irq1;
gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!gpio_reg_base)
- return -EINVAL;
+ if (IS_ERR(gpio_reg_base))
+ return PTR_ERR(gpio_reg_base);
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index f800b250971c..7284473c9fe3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -116,7 +116,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
spin_lock_irqsave(&p->lock, flags);
- /* Configure postive or negative logic in POSNEG */
+ /* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
/* Configure edge or level trigger in EDGLEVEL */
@@ -228,7 +228,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
spin_lock_irqsave(&p->lock, flags);
- /* Configure postive logic in POSNEG */
+ /* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
/* Select "General Input/Output Mode" in IOINTSEL */
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 311f66757b92..26e1fe092304 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -15,7 +15,7 @@ struct gpio_siox_ddata {
u8 setdata[1];
u8 getdata[3];
- spinlock_t irqlock;
+ raw_spinlock_t irqlock;
u32 irq_enable;
u32 irq_status;
u32 irq_type[20];
@@ -44,7 +44,7 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
mutex_lock(&ddata->lock);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock_irq(&ddata->irqlock);
for (offset = 0; offset < 12; ++offset) {
unsigned int bitpos = 11 - offset;
@@ -66,7 +66,7 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
trigger = ddata->irq_status & ddata->irq_enable;
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock_irq(&ddata->irqlock);
ddata->getdata[0] = buf[0];
ddata->getdata[1] = buf[1];
@@ -84,9 +84,9 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
* handler of the irq chip. But it doesn't, so we have
* to clean the irq_status here.
*/
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock_irq(&ddata->irqlock);
ddata->irq_status &= ~(1 << offset);
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock_irq(&ddata->irqlock);
handle_nested_irq(irq);
}
@@ -101,9 +101,9 @@ static void gpio_siox_irq_ack(struct irq_data *d)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_status &= ~(1 << d->hwirq);
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
}
static void gpio_siox_irq_mask(struct irq_data *d)
@@ -112,9 +112,9 @@ static void gpio_siox_irq_mask(struct irq_data *d)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_enable &= ~(1 << d->hwirq);
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
}
static void gpio_siox_irq_unmask(struct irq_data *d)
@@ -123,9 +123,9 @@ static void gpio_siox_irq_unmask(struct irq_data *d)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_enable |= 1 << d->hwirq;
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
}
static int gpio_siox_irq_set_type(struct irq_data *d, u32 type)
@@ -134,9 +134,9 @@ static int gpio_siox_irq_set_type(struct irq_data *d, u32 type)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_type[d->hwirq] = type;
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
return 0;
}
@@ -222,7 +222,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
dev_set_drvdata(dev, ddata);
mutex_init(&ddata->lock);
- spin_lock_init(&ddata->irqlock);
+ raw_spin_lock_init(&ddata->irqlock);
ddata->gchip.base = -1;
ddata->gchip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index acb99eff9939..86568154cdb3 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -368,6 +368,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
struct tegra_gpio_info *tgi = bank->tgi;
unsigned int gpio = d->hwirq;
+ tegra_gpio_irq_mask(d);
gpiochip_unlock_as_irq(&tgi->gc, gpio);
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index de241263d4be..79b553dc39a3 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -58,11 +58,20 @@ struct tegra_gpio_port {
unsigned int pins;
};
+struct tegra186_pin_range {
+ unsigned int offset;
+ const char *group;
+};
+
struct tegra_gpio_soc {
const struct tegra_gpio_port *ports;
unsigned int num_ports;
const char *name;
unsigned int instance;
+
+ const struct tegra186_pin_range *pin_ranges;
+ unsigned int num_pin_ranges;
+ const char *pinmux;
};
struct tegra_gpio {
@@ -254,6 +263,50 @@ static int tegra186_gpio_set_config(struct gpio_chip *chip,
return 0;
}
+static int tegra186_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ struct pinctrl_dev *pctldev;
+ struct device_node *np;
+ unsigned int i, j;
+ int err;
+
+ if (!gpio->soc->pinmux || gpio->soc->num_pin_ranges == 0)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, gpio->soc->pinmux);
+ if (!np)
+ return -ENODEV;
+
+ pctldev = of_pinctrl_get(np);
+ of_node_put(np);
+ if (!pctldev)
+ return -EPROBE_DEFER;
+
+ for (i = 0; i < gpio->soc->num_pin_ranges; i++) {
+ unsigned int pin = gpio->soc->pin_ranges[i].offset, port;
+ const char *group = gpio->soc->pin_ranges[i].group;
+
+ port = pin / 8;
+ pin = pin % 8;
+
+ if (port >= gpio->soc->num_ports) {
+ dev_warn(chip->parent, "invalid port %u for %s\n",
+ port, group);
+ continue;
+ }
+
+ for (j = 0; j < port; j++)
+ pin += gpio->soc->ports[j].pins;
+
+ err = gpiochip_add_pingroup_range(chip, pctldev, pin, group);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec,
u32 *flags)
@@ -578,12 +631,15 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.label = gpio->soc->name;
gpio->gpio.parent = &pdev->dev;
+ gpio->gpio.request = gpiochip_generic_request;
+ gpio->gpio.free = gpiochip_generic_free;
gpio->gpio.get_direction = tegra186_gpio_get_direction;
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get,
gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
+ gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
gpio->gpio.base = -1;
@@ -783,11 +839,19 @@ static const struct tegra_gpio_port tegra194_main_ports[] = {
TEGRA194_MAIN_GPIO_PORT(GG, 0, 0, 2)
};
+static const struct tegra186_pin_range tegra194_main_pin_ranges[] = {
+ { TEGRA194_MAIN_GPIO(GG, 0), "pex_l5_clkreq_n_pgg0" },
+ { TEGRA194_MAIN_GPIO(GG, 1), "pex_l5_rst_n_pgg1" },
+};
+
static const struct tegra_gpio_soc tegra194_main_soc = {
.num_ports = ARRAY_SIZE(tegra194_main_ports),
.ports = tegra194_main_ports,
.name = "tegra194-gpio",
.instance = 0,
+ .num_pin_ranges = ARRAY_SIZE(tegra194_main_pin_ranges),
+ .pin_ranges = tegra194_main_pin_ranges,
+ .pinmux = "nvidia,tegra194-pinmux",
};
#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 7ec97499b7f7..f99f3c10bed0 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -30,7 +30,7 @@ struct uniphier_gpio_priv {
struct irq_domain *domain;
void __iomem *regs;
spinlock_t lock;
- u32 saved_vals[0];
+ u32 saved_vals[];
};
static unsigned int uniphier_gpio_bank_to_reg(unsigned int bank)
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 74913f2e5697..1cbce5990855 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -57,16 +57,19 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
- int value;
+ unsigned int value;
regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value);
- return !!(value && WCD_PIN_MASK(pin));
+ return !!(value & WCD_PIN_MASK(pin));
}
static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
{
- wcd_gpio_direction_output(chip, pin, val);
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin), val ? WCD_PIN_MASK(pin) : 0);
}
static int wcd_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 98cbaf0e415e..64bfb722756a 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -226,13 +226,11 @@ static int zx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- raw_spin_lock_init(&chip->lock);
- if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- chip->gc.request = gpiochip_generic_request;
- chip->gc.free = gpiochip_generic_free;
- }
-
id = of_alias_get_id(dev->of_node, "gpio");
+
+ raw_spin_lock_init(&chip->lock);
+ chip->gc.request = gpiochip_generic_request;
+ chip->gc.free = gpiochip_generic_free;
chip->gc.direction_input = zx_direction_input;
chip->gc.direction_output = zx_direction_output;
chip->gc.get = zx_get_value;
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 72b6001c56ef..5c91c4365da1 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -478,3 +478,49 @@ void devm_gpio_free(struct device *dev, unsigned int gpio)
&gpio));
}
EXPORT_SYMBOL_GPL(devm_gpio_free);
+
+static void devm_gpio_chip_release(struct device *dev, void *res)
+{
+ struct gpio_chip *gc = *(struct gpio_chip **)res;
+
+ gpiochip_remove(gc);
+}
+
+/**
+ * devm_gpiochip_add_data() - Resource managed gpiochip_add_data()
+ * @dev: pointer to the device that gpio_chip belongs to.
+ * @gc: the GPIO chip to register
+ * @data: driver-private data associated with this chip
+ *
+ * Context: potentially before irqs will work
+ *
+ * The gpio chip automatically be released when the device is unbound.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * gc->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
+ */
+int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc,
+ void *data)
+{
+ struct gpio_chip **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = gpiochip_add_data(gc, data);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = gc;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index c6d30f73df07..ccc449df3792 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -605,6 +605,39 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
}
/**
+ * of_gpiochip_add_hog - Add all hogs in a hog device node
+ * @chip: gpio chip to act on
+ * @hog: device node describing the hogs
+ *
+ * Returns error if it fails otherwise 0 on success.
+ */
+static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog)
+{
+ enum gpiod_flags dflags;
+ struct gpio_desc *desc;
+ unsigned long lflags;
+ const char *name;
+ unsigned int i;
+ int ret;
+
+ for (i = 0;; i++) {
+ desc = of_parse_own_gpio(hog, chip, i, &name, &lflags, &dflags);
+ if (IS_ERR(desc))
+ break;
+
+ ret = gpiod_hog(desc, name, lflags, dflags);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_OF_DYNAMIC
+ desc->hog = hog;
+#endif
+ }
+
+ return 0;
+}
+
+/**
* of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
* @chip: gpio chip to act on
*
@@ -614,35 +647,109 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*/
static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
{
- struct gpio_desc *desc = NULL;
struct device_node *np;
- const char *name;
- unsigned long lflags;
- enum gpiod_flags dflags;
- unsigned int i;
int ret;
for_each_available_child_of_node(chip->of_node, np) {
if (!of_property_read_bool(np, "gpio-hog"))
continue;
- for (i = 0;; i++) {
- desc = of_parse_own_gpio(np, chip, i, &name, &lflags,
- &dflags);
- if (IS_ERR(desc))
- break;
-
- ret = gpiod_hog(desc, name, lflags, dflags);
- if (ret < 0) {
- of_node_put(np);
- return ret;
- }
+ ret = of_gpiochip_add_hog(chip, np);
+ if (ret < 0) {
+ of_node_put(np);
+ return ret;
}
+
+ of_node_set_flag(np, OF_POPULATED);
}
return 0;
}
+#ifdef CONFIG_OF_DYNAMIC
+/**
+ * of_gpiochip_remove_hog - Remove all hogs in a hog device node
+ * @chip: gpio chip to act on
+ * @hog: device node describing the hogs
+ */
+static void of_gpiochip_remove_hog(struct gpio_chip *chip,
+ struct device_node *hog)
+{
+ struct gpio_desc *descs = chip->gpiodev->descs;
+ unsigned int i;
+
+ for (i = 0; i < chip->ngpio; i++) {
+ if (test_bit(FLAG_IS_HOGGED, &descs[i].flags) &&
+ descs[i].hog == hog)
+ gpiochip_free_own_desc(&descs[i]);
+ }
+}
+
+static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+{
+ return chip->gpiodev->dev.of_node == data;
+}
+
+static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+{
+ return gpiochip_find(np, of_gpiochip_match_node);
+}
+
+static int of_gpio_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct gpio_chip *chip;
+ int ret;
+
+ /*
+ * This only supports adding and removing complete gpio-hog nodes.
+ * Modifying an existing gpio-hog node is not supported (except for
+ * changing its "status" property, which is treated the same as
+ * addition/removal).
+ */
+ switch (of_reconfig_get_state_change(action, arg)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ if (!of_property_read_bool(rd->dn, "gpio-hog"))
+ return NOTIFY_OK; /* not for us */
+
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
+ chip = of_find_gpiochip_by_node(rd->dn->parent);
+ if (chip == NULL)
+ return NOTIFY_OK; /* not for us */
+
+ ret = of_gpiochip_add_hog(chip, rd->dn);
+ if (ret < 0) {
+ pr_err("%s: failed to add hogs for %pOF\n", __func__,
+ rd->dn);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
+ return notifier_from_errno(ret);
+ }
+ break;
+
+ case OF_RECONFIG_CHANGE_REMOVE:
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK; /* already depopulated */
+
+ chip = of_find_gpiochip_by_node(rd->dn->parent);
+ if (chip == NULL)
+ return NOTIFY_OK; /* not for us */
+
+ of_gpiochip_remove_hog(chip, rd->dn);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+struct notifier_block gpio_of_notifier = {
+ .notifier_call = of_gpio_notify,
+};
+#endif /* CONFIG_OF_DYNAMIC */
+
/**
* of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 9768831b1fe2..ed26664f1537 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -35,4 +35,6 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
}
#endif /* CONFIG_OF_GPIO */
+extern struct notifier_block gpio_of_notifier;
+
#endif /* GPIOLIB_OF_H */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 00fb91feba70..c14f0784274a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -81,14 +81,14 @@ LIST_HEAD(gpio_devices);
static DEFINE_MUTEX(gpio_machine_hogs_mutex);
static LIST_HEAD(gpio_machine_hogs);
-static void gpiochip_free_hogs(struct gpio_chip *chip);
-static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+static void gpiochip_free_hogs(struct gpio_chip *gc);
+static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
-static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
-static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
-static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
+static void gpiochip_irqchip_remove(struct gpio_chip *gc);
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gc);
+static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc);
+static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc);
static bool gpiolib_initialized;
@@ -132,23 +132,24 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
/**
* gpiochip_get_desc - get the GPIO descriptor corresponding to the given
* hardware number for this chip
- * @chip: GPIO chip
+ * @gc: GPIO chip
* @hwnum: hardware number of the GPIO for this chip
*
* Returns:
- * A pointer to the GPIO descriptor or %ERR_PTR(-EINVAL) if no GPIO exists
+ * A pointer to the GPIO descriptor or ``ERR_PTR(-EINVAL)`` if no GPIO exists
* in the given chip for the specified hardware number.
*/
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
unsigned int hwnum)
{
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
return &gdev->descs[hwnum];
}
+EXPORT_SYMBOL_GPL(gpiochip_get_desc);
/**
* desc_to_gpio - convert a GPIO descriptor to the integer namespace
@@ -213,11 +214,11 @@ static int gpiochip_find_base(int ngpio)
*/
int gpiod_get_direction(struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
unsigned offset;
int ret;
- chip = gpiod_to_chip(desc);
+ gc = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
/*
@@ -228,10 +229,10 @@ int gpiod_get_direction(struct gpio_desc *desc)
test_bit(FLAG_IS_OUT, &desc->flags))
return 0;
- if (!chip->get_direction)
+ if (!gc->get_direction)
return -ENOTSUPP;
- ret = chip->get_direction(chip, offset);
+ ret = gc->get_direction(gc, offset);
if (ret < 0)
return ret;
@@ -301,6 +302,9 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
struct gpio_device *gdev;
unsigned long flags;
+ if (!name)
+ return NULL;
+
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list) {
@@ -309,7 +313,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
for (i = 0; i != gdev->ngpio; ++i) {
struct gpio_desc *desc = &gdev->descs[i];
- if (!desc->name || !name)
+ if (!desc->name)
continue;
if (!strcmp(desc->name, name)) {
@@ -356,16 +360,16 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
return 0;
}
-static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip)
+static unsigned long *gpiochip_allocate_mask(struct gpio_chip *gc)
{
unsigned long *p;
- p = bitmap_alloc(chip->ngpio, GFP_KERNEL);
+ p = bitmap_alloc(gc->ngpio, GFP_KERNEL);
if (!p)
return NULL;
/* Assume by default all GPIOs are valid */
- bitmap_fill(p, chip->ngpio);
+ bitmap_fill(p, gc->ngpio);
return p;
}
@@ -392,10 +396,10 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
return 0;
}
-static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip)
+static void gpiochip_free_valid_mask(struct gpio_chip *gc)
{
- bitmap_free(gpiochip->valid_mask);
- gpiochip->valid_mask = NULL;
+ bitmap_free(gc->valid_mask);
+ gc->valid_mask = NULL;
}
static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
@@ -406,13 +410,13 @@ static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
return 0;
}
-bool gpiochip_line_is_valid(const struct gpio_chip *gpiochip,
+bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
/* No mask means all valid */
- if (likely(!gpiochip->valid_mask))
+ if (likely(!gc->valid_mask))
return true;
- return test_bit(offset, gpiochip->valid_mask);
+ return test_bit(offset, gc->valid_mask);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
@@ -546,6 +550,9 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (ret)
return ret;
}
+
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_CONFIG, desc);
}
return 0;
}
@@ -722,6 +729,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_descs;
}
+
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
}
@@ -787,8 +798,6 @@ out_free_lh:
* @irq: the interrupt that trigger in response to events on this GPIO
* @wait: wait queue that handles blocking reads of events
* @events: KFIFO for the GPIO events
- * @read_lock: mutex lock to protect reads from colliding with adding
- * new events to the FIFO
* @timestamp: cache for the timestamp storing it between hardirq
* and IRQ thread, used to bring the timestamp close to the actual
* event
@@ -801,7 +810,6 @@ struct lineevent_state {
int irq;
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
- struct mutex read_lock;
u64 timestamp;
};
@@ -817,7 +825,7 @@ static __poll_t lineevent_poll(struct file *filep,
poll_wait(filep, &le->wait, wait);
- if (!kfifo_is_empty(&le->events))
+ if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
events = EPOLLIN | EPOLLRDNORM;
return events;
@@ -830,43 +838,52 @@ static ssize_t lineevent_read(struct file *filep,
loff_t *f_ps)
{
struct lineevent_state *le = filep->private_data;
- unsigned int copied;
+ struct gpioevent_data ge;
+ ssize_t bytes_read = 0;
int ret;
- if (count < sizeof(struct gpioevent_data))
+ if (count < sizeof(ge))
return -EINVAL;
do {
+ spin_lock(&le->wait.lock);
if (kfifo_is_empty(&le->events)) {
- if (filep->f_flags & O_NONBLOCK)
+ if (bytes_read) {
+ spin_unlock(&le->wait.lock);
+ return bytes_read;
+ }
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock(&le->wait.lock);
return -EAGAIN;
+ }
- ret = wait_event_interruptible(le->wait,
+ ret = wait_event_interruptible_locked(le->wait,
!kfifo_is_empty(&le->events));
- if (ret)
+ if (ret) {
+ spin_unlock(&le->wait.lock);
return ret;
+ }
}
- if (mutex_lock_interruptible(&le->read_lock))
- return -ERESTARTSYS;
- ret = kfifo_to_user(&le->events, buf, count, &copied);
- mutex_unlock(&le->read_lock);
-
- if (ret)
- return ret;
-
- /*
- * If we couldn't read anything from the fifo (a different
- * thread might have been faster) we either return -EAGAIN if
- * the file descriptor is non-blocking, otherwise we go back to
- * sleep and wait for more data to arrive.
- */
- if (copied == 0 && (filep->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ ret = kfifo_out(&le->events, &ge, 1);
+ spin_unlock(&le->wait.lock);
+ if (ret != 1) {
+ /*
+ * This should never happen - we were holding the lock
+ * from the moment we learned the fifo is no longer
+ * empty until now.
+ */
+ ret = -EIO;
+ break;
+ }
- } while (copied == 0);
+ if (copy_to_user(buf + bytes_read, &ge, sizeof(ge)))
+ return -EFAULT;
+ bytes_read += sizeof(ge);
+ } while (count >= bytes_read + sizeof(ge));
- return copied;
+ return bytes_read;
}
static int lineevent_release(struct inode *inode, struct file *filep)
@@ -945,7 +962,7 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
* we didn't get the timestamp from lineevent_irq_handler().
*/
if (!le->timestamp)
- ge.timestamp = ktime_get_real_ns();
+ ge.timestamp = ktime_get_ns();
else
ge.timestamp = le->timestamp;
@@ -968,9 +985,12 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
return IRQ_NONE;
}
- ret = kfifo_put(&le->events, ge);
+ ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
+ 1, &le->wait.lock);
if (ret)
wake_up_poll(&le->wait, EPOLLIN);
+ else
+ pr_debug_ratelimited("event FIFO is full - event dropped\n");
return IRQ_HANDLED;
}
@@ -983,7 +1003,7 @@ static irqreturn_t lineevent_irq_handler(int irq, void *p)
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event.
*/
- le->timestamp = ktime_get_real_ns();
+ le->timestamp = ktime_get_ns();
return IRQ_WAKE_THREAD;
}
@@ -1067,6 +1087,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_desc;
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
le->irq = gpiod_to_irq(desc);
if (le->irq <= 0) {
ret = -ENODEV;
@@ -1083,7 +1106,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
INIT_KFIFO(le->events);
init_waitqueue_head(&le->wait);
- mutex_init(&le->read_lock);
/* Request a thread to read the events */
ret = request_threaded_irq(le->irq,
@@ -1139,17 +1161,94 @@ out_free_le:
return ret;
}
+static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
+ struct gpioline_info *info)
+{
+ struct gpio_chip *gc = desc->gdev->chip;
+ bool ok_for_pinctrl;
+ unsigned long flags;
+
+ /*
+ * This function takes a mutex so we must check this before taking
+ * the spinlock.
+ *
+ * FIXME: find a non-racy way to retrieve this information. Maybe a
+ * lock common to both frameworks?
+ */
+ ok_for_pinctrl =
+ pinctrl_gpio_can_use_line(gc->base + info->line_offset);
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (desc->name) {
+ strncpy(info->name, desc->name, sizeof(info->name));
+ info->name[sizeof(info->name) - 1] = '\0';
+ } else {
+ info->name[0] = '\0';
+ }
+
+ if (desc->label) {
+ strncpy(info->consumer, desc->label, sizeof(info->consumer));
+ info->consumer[sizeof(info->consumer) - 1] = '\0';
+ } else {
+ info->consumer[0] = '\0';
+ }
+
+ /*
+ * Userspace only need to know that the kernel is using this GPIO so
+ * it can't use it.
+ */
+ info->flags = 0;
+ if (test_bit(FLAG_REQUESTED, &desc->flags) ||
+ test_bit(FLAG_IS_HOGGED, &desc->flags) ||
+ test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
+ test_bit(FLAG_EXPORT, &desc->flags) ||
+ test_bit(FLAG_SYSFS, &desc->flags) ||
+ !ok_for_pinctrl)
+ info->flags |= GPIOLINE_FLAG_KERNEL;
+ if (test_bit(FLAG_IS_OUT, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_IS_OUT;
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ info->flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+ GPIOLINE_FLAG_IS_OUT);
+ if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ info->flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+ GPIOLINE_FLAG_IS_OUT);
+ if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
+ if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
+ if (test_bit(FLAG_PULL_UP, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+}
+
+struct gpio_chardev_data {
+ struct gpio_device *gdev;
+ wait_queue_head_t wait;
+ DECLARE_KFIFO(events, struct gpioline_info_changed, 32);
+ struct notifier_block lineinfo_changed_nb;
+ unsigned long *watched_lines;
+};
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct gpio_device *gdev = filp->private_data;
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chardev_data *priv = filp->private_data;
+ struct gpio_device *gdev = priv->gdev;
+ struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
+ struct gpio_desc *desc;
+ __u32 offset;
+ int hwgpio;
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!chip)
+ if (!gc)
return -ENODEV;
/* Fill in the struct and pass to userspace */
@@ -1168,68 +1267,51 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
return -EFAULT;
return 0;
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
struct gpioline_info lineinfo;
- struct gpio_desc *desc;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- desc = gpiochip_get_desc(chip, lineinfo.line_offset);
+ desc = gpiochip_get_desc(gc, lineinfo.line_offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
- if (desc->name) {
- strncpy(lineinfo.name, desc->name,
- sizeof(lineinfo.name));
- lineinfo.name[sizeof(lineinfo.name)-1] = '\0';
- } else {
- lineinfo.name[0] = '\0';
- }
- if (desc->label) {
- strncpy(lineinfo.consumer, desc->label,
- sizeof(lineinfo.consumer));
- lineinfo.consumer[sizeof(lineinfo.consumer)-1] = '\0';
- } else {
- lineinfo.consumer[0] = '\0';
- }
+ hwgpio = gpio_chip_hwgpio(desc);
- /*
- * Userspace only need to know that the kernel is using
- * this GPIO so it can't use it.
- */
- lineinfo.flags = 0;
- if (test_bit(FLAG_REQUESTED, &desc->flags) ||
- test_bit(FLAG_IS_HOGGED, &desc->flags) ||
- test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
- test_bit(FLAG_EXPORT, &desc->flags) ||
- test_bit(FLAG_SYSFS, &desc->flags) ||
- !pinctrl_gpio_can_use_line(chip->base + lineinfo.line_offset))
- lineinfo.flags |= GPIOLINE_FLAG_KERNEL;
- if (test_bit(FLAG_IS_OUT, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_IS_OUT;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
- GPIOLINE_FLAG_IS_OUT);
- if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
- GPIOLINE_FLAG_IS_OUT);
- if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_BIAS_DISABLE;
- if (test_bit(FLAG_PULL_DOWN, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
+ if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL &&
+ test_bit(hwgpio, priv->watched_lines))
+ return -EBUSY;
+
+ gpio_desc_to_lineinfo(desc, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
+
+ if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL)
+ set_bit(hwgpio, priv->watched_lines);
+
return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
+ if (copy_from_user(&offset, ip, sizeof(offset)))
+ return -EFAULT;
+
+ desc = gpiochip_get_desc(gc, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ hwgpio = gpio_chip_hwgpio(desc);
+
+ if (!test_bit(hwgpio, priv->watched_lines))
+ return -EBUSY;
+
+ clear_bit(hwgpio, priv->watched_lines);
+ return 0;
}
return -EINVAL;
}
@@ -1242,6 +1324,101 @@ static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
}
#endif
+static struct gpio_chardev_data *
+to_gpio_chardev_data(struct notifier_block *nb)
+{
+ return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
+}
+
+static int lineinfo_changed_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct gpio_chardev_data *priv = to_gpio_chardev_data(nb);
+ struct gpioline_info_changed chg;
+ struct gpio_desc *desc = data;
+ int ret;
+
+ if (!test_bit(gpio_chip_hwgpio(desc), priv->watched_lines))
+ return NOTIFY_DONE;
+
+ memset(&chg, 0, sizeof(chg));
+ chg.info.line_offset = gpio_chip_hwgpio(desc);
+ chg.event_type = action;
+ chg.timestamp = ktime_get_ns();
+ gpio_desc_to_lineinfo(desc, &chg.info);
+
+ ret = kfifo_in_spinlocked(&priv->events, &chg, 1, &priv->wait.lock);
+ if (ret)
+ wake_up_poll(&priv->wait, EPOLLIN);
+ else
+ pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
+
+ return NOTIFY_OK;
+}
+
+static __poll_t lineinfo_watch_poll(struct file *filep,
+ struct poll_table_struct *pollt)
+{
+ struct gpio_chardev_data *priv = filep->private_data;
+ __poll_t events = 0;
+
+ poll_wait(filep, &priv->wait, pollt);
+
+ if (!kfifo_is_empty_spinlocked_noirqsave(&priv->events,
+ &priv->wait.lock))
+ events = EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static ssize_t lineinfo_watch_read(struct file *filep, char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct gpio_chardev_data *priv = filep->private_data;
+ struct gpioline_info_changed event;
+ ssize_t bytes_read = 0;
+ int ret;
+
+ if (count < sizeof(event))
+ return -EINVAL;
+
+ do {
+ spin_lock(&priv->wait.lock);
+ if (kfifo_is_empty(&priv->events)) {
+ if (bytes_read) {
+ spin_unlock(&priv->wait.lock);
+ return bytes_read;
+ }
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock(&priv->wait.lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_locked(priv->wait,
+ !kfifo_is_empty(&priv->events));
+ if (ret) {
+ spin_unlock(&priv->wait.lock);
+ return ret;
+ }
+ }
+
+ ret = kfifo_out(&priv->events, &event, 1);
+ spin_unlock(&priv->wait.lock);
+ if (ret != 1) {
+ ret = -EIO;
+ break;
+ /* We should never get here. See lineevent_read(). */
+ }
+
+ if (copy_to_user(buf + bytes_read, &event, sizeof(event)))
+ return -EFAULT;
+ bytes_read += sizeof(event);
+ } while (count >= bytes_read + sizeof(event));
+
+ return bytes_read;
+}
+
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
@@ -1252,14 +1429,48 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp)
{
struct gpio_device *gdev = container_of(inode->i_cdev,
struct gpio_device, chrdev);
+ struct gpio_chardev_data *priv;
+ int ret = -ENOMEM;
/* Fail on open if the backing gpiochip is gone */
if (!gdev->chip)
return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
+ if (!priv->watched_lines)
+ goto out_free_priv;
+
+ init_waitqueue_head(&priv->wait);
+ INIT_KFIFO(priv->events);
+ priv->gdev = gdev;
+
+ priv->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
+ ret = atomic_notifier_chain_register(&gdev->notifier,
+ &priv->lineinfo_changed_nb);
+ if (ret)
+ goto out_free_bitmap;
+
get_device(&gdev->dev);
- filp->private_data = gdev;
+ filp->private_data = priv;
+
+ ret = nonseekable_open(inode, filp);
+ if (ret)
+ goto out_unregister_notifier;
- return nonseekable_open(inode, filp);
+ return ret;
+
+out_unregister_notifier:
+ atomic_notifier_chain_unregister(&gdev->notifier,
+ &priv->lineinfo_changed_nb);
+out_free_bitmap:
+ bitmap_free(priv->watched_lines);
+out_free_priv:
+ kfree(priv);
+ return ret;
}
/**
@@ -1270,17 +1481,23 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp)
*/
static int gpio_chrdev_release(struct inode *inode, struct file *filp)
{
- struct gpio_device *gdev = container_of(inode->i_cdev,
- struct gpio_device, chrdev);
+ struct gpio_chardev_data *priv = filp->private_data;
+ struct gpio_device *gdev = priv->gdev;
+ bitmap_free(priv->watched_lines);
+ atomic_notifier_chain_unregister(&gdev->notifier,
+ &priv->lineinfo_changed_nb);
put_device(&gdev->dev);
+ kfree(priv);
+
return 0;
}
-
static const struct file_operations gpio_fileops = {
.release = gpio_chrdev_release,
.open = gpio_chrdev_open,
+ .poll = lineinfo_watch_poll,
+ .read = lineinfo_watch_read,
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = gpio_ioctl,
@@ -1332,12 +1549,12 @@ err_remove_device:
return ret;
}
-static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog)
+static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
{
struct gpio_desc *desc;
int rv;
- desc = gpiochip_get_desc(chip, hog->chip_hwnum);
+ desc = gpiochip_get_desc(gc, hog->chip_hwnum);
if (IS_ERR(desc)) {
pr_err("%s: unable to get GPIO desc: %ld\n",
__func__, PTR_ERR(desc));
@@ -1350,18 +1567,18 @@ static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog)
rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags);
if (rv)
pr_err("%s: unable to hog GPIO line (%s:%u): %d\n",
- __func__, chip->label, hog->chip_hwnum, rv);
+ __func__, gc->label, hog->chip_hwnum, rv);
}
-static void machine_gpiochip_add(struct gpio_chip *chip)
+static void machine_gpiochip_add(struct gpio_chip *gc)
{
struct gpiod_hog *hog;
mutex_lock(&gpio_machine_hogs_mutex);
list_for_each_entry(hog, &gpio_machine_hogs, list) {
- if (!strcmp(chip->label, hog->chip_label))
- gpiochip_machine_hog(chip, hog);
+ if (!strcmp(gc->label, hog->chip_label))
+ gpiochip_machine_hog(gc, hog);
}
mutex_unlock(&gpio_machine_hogs_mutex);
@@ -1380,14 +1597,14 @@ static void gpiochip_setup_devs(void)
}
}
-int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
unsigned long flags;
int ret = 0;
unsigned i;
- int base = chip->base;
+ int base = gc->base;
struct gpio_device *gdev;
/*
@@ -1398,19 +1615,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (!gdev)
return -ENOMEM;
gdev->dev.bus = &gpio_bus_type;
- gdev->chip = chip;
- chip->gpiodev = gdev;
- if (chip->parent) {
- gdev->dev.parent = chip->parent;
- gdev->dev.of_node = chip->parent->of_node;
+ gdev->chip = gc;
+ gc->gpiodev = gdev;
+ if (gc->parent) {
+ gdev->dev.parent = gc->parent;
+ gdev->dev.of_node = gc->parent->of_node;
}
#ifdef CONFIG_OF_GPIO
/* If the gpiochip has an assigned OF node this takes precedence */
- if (chip->of_node)
- gdev->dev.of_node = chip->of_node;
+ if (gc->of_node)
+ gdev->dev.of_node = gc->of_node;
else
- chip->of_node = gdev->dev.of_node;
+ gc->of_node = gdev->dev.of_node;
#endif
gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
@@ -1421,37 +1638,37 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
- if (chip->parent && chip->parent->driver)
- gdev->owner = chip->parent->driver->owner;
- else if (chip->owner)
+ if (gc->parent && gc->parent->driver)
+ gdev->owner = gc->parent->driver->owner;
+ else if (gc->owner)
/* TODO: remove chip->owner */
- gdev->owner = chip->owner;
+ gdev->owner = gc->owner;
else
gdev->owner = THIS_MODULE;
- gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
+ gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
ret = -ENOMEM;
goto err_free_ida;
}
- if (chip->ngpio == 0) {
- chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
+ if (gc->ngpio == 0) {
+ chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
ret = -EINVAL;
goto err_free_descs;
}
- if (chip->ngpio > FASTPATH_NGPIO)
- chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n",
- chip->ngpio, FASTPATH_NGPIO);
+ if (gc->ngpio > FASTPATH_NGPIO)
+ chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
+ gc->ngpio, FASTPATH_NGPIO);
- gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
+ gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
ret = -ENOMEM;
goto err_free_descs;
}
- gdev->ngpio = chip->ngpio;
+ gdev->ngpio = gc->ngpio;
gdev->data = data;
spin_lock_irqsave(&gpio_lock, flags);
@@ -1464,7 +1681,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
* of the sysfs interface anyways.
*/
if (base < 0) {
- base = gpiochip_find_base(chip->ngpio);
+ base = gpiochip_find_base(gc->ngpio);
if (base < 0) {
ret = base;
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1476,7 +1693,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
* see if anyone makes use of this, else drop this and assign
* a poison instead.
*/
- chip->base = base;
+ gc->base = base;
}
gdev->base = base;
@@ -1486,60 +1703,62 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_label;
}
- for (i = 0; i < chip->ngpio; i++)
+ for (i = 0; i < gc->ngpio; i++)
gdev->descs[i].gdev = gdev;
spin_unlock_irqrestore(&gpio_lock, flags);
+ ATOMIC_INIT_NOTIFIER_HEAD(&gdev->notifier);
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- ret = gpiochip_set_desc_names(chip);
+ ret = gpiochip_set_desc_names(gc);
if (ret)
goto err_remove_from_list;
- ret = gpiochip_alloc_valid_mask(chip);
+ ret = gpiochip_alloc_valid_mask(gc);
if (ret)
goto err_remove_from_list;
- ret = of_gpiochip_add(chip);
+ ret = of_gpiochip_add(gc);
if (ret)
goto err_free_gpiochip_mask;
- ret = gpiochip_init_valid_mask(chip);
+ ret = gpiochip_init_valid_mask(gc);
if (ret)
goto err_remove_of_chip;
- for (i = 0; i < chip->ngpio; i++) {
+ for (i = 0; i < gc->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
- if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
+ if (gc->get_direction && gpiochip_line_is_valid(gc, i)) {
assign_bit(FLAG_IS_OUT,
- &desc->flags, !chip->get_direction(chip, i));
+ &desc->flags, !gc->get_direction(gc, i));
} else {
assign_bit(FLAG_IS_OUT,
- &desc->flags, !chip->direction_input);
+ &desc->flags, !gc->direction_input);
}
}
- ret = gpiochip_add_pin_ranges(chip);
+ ret = gpiochip_add_pin_ranges(gc);
if (ret)
goto err_remove_of_chip;
- acpi_gpiochip_add(chip);
+ acpi_gpiochip_add(gc);
- machine_gpiochip_add(chip);
+ machine_gpiochip_add(gc);
- ret = gpiochip_irqchip_init_valid_mask(chip);
+ ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
goto err_remove_acpi_chip;
- ret = gpiochip_irqchip_init_hw(chip);
+ ret = gpiochip_irqchip_init_hw(gc);
if (ret)
goto err_remove_acpi_chip;
- ret = gpiochip_add_irqchip(chip, lock_key, request_key);
+ ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
goto err_remove_irqchip_mask;
@@ -1559,17 +1778,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
return 0;
err_remove_irqchip:
- gpiochip_irqchip_remove(chip);
+ gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
- gpiochip_irqchip_free_valid_mask(chip);
+ gpiochip_irqchip_free_valid_mask(gc);
err_remove_acpi_chip:
- acpi_gpiochip_remove(chip);
+ acpi_gpiochip_remove(gc);
err_remove_of_chip:
- gpiochip_free_hogs(chip);
- of_gpiochip_remove(chip);
+ gpiochip_free_hogs(gc);
+ of_gpiochip_remove(gc);
err_free_gpiochip_mask:
- gpiochip_remove_pin_ranges(chip);
- gpiochip_free_valid_mask(chip);
+ gpiochip_remove_pin_ranges(gc);
+ gpiochip_free_valid_mask(gc);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
@@ -1584,7 +1803,7 @@ err_free_gdev:
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,
- chip->label ? : "generic", ret);
+ gc->label ? : "generic", ret);
kfree(gdev);
return ret;
}
@@ -1592,41 +1811,39 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
/**
* gpiochip_get_data() - get per-subdriver data for the chip
- * @chip: GPIO chip
+ * @gc: GPIO chip
*
* Returns:
* The per-subdriver data for the chip.
*/
-void *gpiochip_get_data(struct gpio_chip *chip)
+void *gpiochip_get_data(struct gpio_chip *gc)
{
- return chip->gpiodev->data;
+ return gc->gpiodev->data;
}
EXPORT_SYMBOL_GPL(gpiochip_get_data);
/**
* gpiochip_remove() - unregister a gpio_chip
- * @chip: the chip to unregister
+ * @gc: the chip to unregister
*
* A gpio_chip with any GPIOs still requested may not be removed.
*/
-void gpiochip_remove(struct gpio_chip *chip)
+void gpiochip_remove(struct gpio_chip *gc)
{
- struct gpio_device *gdev = chip->gpiodev;
- struct gpio_desc *desc;
+ struct gpio_device *gdev = gc->gpiodev;
unsigned long flags;
- unsigned i;
- bool requested = false;
+ unsigned int i;
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
- gpiochip_free_hogs(chip);
+ gpiochip_free_hogs(gc);
/* Numb the device, cancelling all outstanding operations */
gdev->chip = NULL;
- gpiochip_irqchip_remove(chip);
- acpi_gpiochip_remove(chip);
- of_gpiochip_remove(chip);
- gpiochip_remove_pin_ranges(chip);
- gpiochip_free_valid_mask(chip);
+ gpiochip_irqchip_remove(gc);
+ acpi_gpiochip_remove(gc);
+ of_gpiochip_remove(gc);
+ gpiochip_remove_pin_ranges(gc);
+ gpiochip_free_valid_mask(gc);
/*
* We accept no more calls into the driver from this point, so
* NULL the driver data pointer
@@ -1635,13 +1852,12 @@ void gpiochip_remove(struct gpio_chip *chip)
spin_lock_irqsave(&gpio_lock, flags);
for (i = 0; i < gdev->ngpio; i++) {
- desc = &gdev->descs[i];
- if (test_bit(FLAG_REQUESTED, &desc->flags))
- requested = true;
+ if (gpiochip_is_requested(gc, i))
+ break;
}
spin_unlock_irqrestore(&gpio_lock, flags);
- if (requested)
+ if (i != gdev->ngpio)
dev_crit(&gdev->dev,
"REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -1656,52 +1872,6 @@ void gpiochip_remove(struct gpio_chip *chip)
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
-static void devm_gpio_chip_release(struct device *dev, void *res)
-{
- struct gpio_chip *chip = *(struct gpio_chip **)res;
-
- gpiochip_remove(chip);
-}
-
-/**
- * devm_gpiochip_add_data() - Resource managed gpiochip_add_data()
- * @dev: pointer to the device that gpio_chip belongs to.
- * @chip: the chip to register, with chip->base initialized
- * @data: driver-private data associated with this chip
- *
- * Context: potentially before irqs will work
- *
- * The gpio chip automatically be released when the device is unbound.
- *
- * Returns:
- * A negative errno if the chip can't be registered, such as because the
- * chip->base is invalid or already associated with a different chip.
- * Otherwise it returns zero as a success code.
- */
-int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
- void *data)
-{
- struct gpio_chip **ptr;
- int ret;
-
- ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = gpiochip_add_data(chip, data);
- if (ret < 0) {
- devres_free(ptr);
- return ret;
- }
-
- *ptr = chip;
- devres_add(dev, ptr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
-
/**
* gpiochip_find() - iterator for locating a specific gpio_chip
* @data: data to pass to match function
@@ -1714,31 +1884,31 @@ EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
* more gpio_chips.
*/
struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *chip,
+ int (*match)(struct gpio_chip *gc,
void *data))
{
struct gpio_device *gdev;
- struct gpio_chip *chip = NULL;
+ struct gpio_chip *gc = NULL;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list)
if (gdev->chip && match(gdev->chip, data)) {
- chip = gdev->chip;
+ gc = gdev->chip;
break;
}
spin_unlock_irqrestore(&gpio_lock, flags);
- return chip;
+ return gc;
}
EXPORT_SYMBOL_GPL(gpiochip_find);
-static int gpiochip_match_name(struct gpio_chip *chip, void *data)
+static int gpiochip_match_name(struct gpio_chip *gc, void *data)
{
const char *name = data;
- return !strcmp(chip->label, name);
+ return !strcmp(gc->label, name);
}
static struct gpio_chip *find_chip_by_name(const char *name)
@@ -1778,21 +1948,21 @@ static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
return 0;
}
-static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
+static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
{
- bitmap_free(gpiochip->irq.valid_mask);
- gpiochip->irq.valid_mask = NULL;
+ bitmap_free(gc->irq.valid_mask);
+ gc->irq.valid_mask = NULL;
}
-bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
unsigned int offset)
{
- if (!gpiochip_line_is_valid(gpiochip, offset))
+ if (!gpiochip_line_is_valid(gc, offset))
return false;
/* No mask means all valid */
- if (likely(!gpiochip->irq.valid_mask))
+ if (likely(!gc->irq.valid_mask))
return true;
- return test_bit(offset, gpiochip->irq.valid_mask);
+ return test_bit(offset, gc->irq.valid_mask);
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
@@ -1844,16 +2014,16 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
/**
* gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
- * @gpiochip: the gpiochip to set the irqchip nested handler to
+ * @gc: the gpiochip to set the irqchip nested handler to
* @irqchip: the irqchip to nest to the gpiochip
* @parent_irq: the irq number corresponding to the parent IRQ for this
* nested irqchip
*/
-void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
+void gpiochip_set_nested_irqchip(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int parent_irq)
{
- gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, NULL);
+ gpiochip_set_cascaded_irqchip(gc, parent_irq, NULL);
}
EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
@@ -2030,7 +2200,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
return ret;
}
-static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *chip,
+static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc,
unsigned int offset)
{
return offset;
@@ -2090,7 +2260,7 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
return !!gc->irq.parent_domain;
}
-void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type)
{
@@ -2100,7 +2270,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
if (!fwspec)
return NULL;
- fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->fwnode = gc->irq.parent_domain->fwnode;
fwspec->param_count = 2;
fwspec->param[0] = parent_hwirq;
fwspec->param[1] = parent_type;
@@ -2109,7 +2279,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
-void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type)
{
@@ -2119,7 +2289,7 @@ void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
if (!fwspec)
return NULL;
- fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->fwnode = gc->irq.parent_domain->fwnode;
fwspec->param_count = 4;
fwspec->param[0] = 0;
fwspec->param[1] = parent_hwirq;
@@ -2157,28 +2327,28 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct gpio_chip *chip = d->host_data;
+ struct gpio_chip *gc = d->host_data;
int ret = 0;
- if (!gpiochip_irqchip_irq_valid(chip, hwirq))
+ if (!gpiochip_irqchip_irq_valid(gc, hwirq))
return -ENXIO;
- irq_set_chip_data(irq, chip);
+ irq_set_chip_data(irq, gc);
/*
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
- irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
- irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
+ irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
+ irq_set_chip_and_handler(irq, gc->irq.chip, gc->irq.handler);
/* Chips that use nested thread handlers have them marked */
- if (chip->irq.threaded)
+ if (gc->irq.threaded)
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
- if (chip->irq.num_parents == 1)
- ret = irq_set_parent(irq, chip->irq.parents[0]);
- else if (chip->irq.map)
- ret = irq_set_parent(irq, chip->irq.map[hwirq]);
+ if (gc->irq.num_parents == 1)
+ ret = irq_set_parent(irq, gc->irq.parents[0]);
+ else if (gc->irq.map)
+ ret = irq_set_parent(irq, gc->irq.map[hwirq]);
if (ret < 0)
return ret;
@@ -2187,8 +2357,8 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
* No set-up of the hardware will happen if IRQ_TYPE_NONE
* is passed as default type.
*/
- if (chip->irq.default_type != IRQ_TYPE_NONE)
- irq_set_irq_type(irq, chip->irq.default_type);
+ if (gc->irq.default_type != IRQ_TYPE_NONE)
+ irq_set_irq_type(irq, gc->irq.default_type);
return 0;
}
@@ -2196,9 +2366,9 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_map);
void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
- struct gpio_chip *chip = d->host_data;
+ struct gpio_chip *gc = d->host_data;
- if (chip->irq.threaded)
+ if (gc->irq.threaded)
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
@@ -2230,9 +2400,9 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
int gpiochip_irq_domain_activate(struct irq_domain *domain,
struct irq_data *data, bool reserve)
{
- struct gpio_chip *chip = domain->host_data;
+ struct gpio_chip *gc = domain->host_data;
- return gpiochip_lock_as_irq(chip, data->hwirq);
+ return gpiochip_lock_as_irq(gc, data->hwirq);
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate);
@@ -2248,17 +2418,17 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate);
void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *data)
{
- struct gpio_chip *chip = domain->host_data;
+ struct gpio_chip *gc = domain->host_data;
- return gpiochip_unlock_as_irq(chip, data->hwirq);
+ return gpiochip_unlock_as_irq(gc, data->hwirq);
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
-static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
+static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset)
{
- struct irq_domain *domain = chip->irq.domain;
+ struct irq_domain *domain = gc->irq.domain;
- if (!gpiochip_irqchip_irq_valid(chip, offset))
+ if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -2267,7 +2437,7 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
spec.fwnode = domain->fwnode;
spec.param_count = 2;
- spec.param[0] = chip->irq.child_offset_to_irq(chip, offset);
+ spec.param[0] = gc->irq.child_offset_to_irq(gc, offset);
spec.param[1] = IRQ_TYPE_NONE;
return irq_create_fwspec_mapping(&spec);
@@ -2279,32 +2449,32 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
static int gpiochip_irq_reqres(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- return gpiochip_reqres_irq(chip, d->hwirq);
+ return gpiochip_reqres_irq(gc, d->hwirq);
}
static void gpiochip_irq_relres(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- gpiochip_relres_irq(chip, d->hwirq);
+ gpiochip_relres_irq(gc, d->hwirq);
}
static void gpiochip_irq_enable(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- gpiochip_enable_irq(chip, d->hwirq);
- if (chip->irq.irq_enable)
- chip->irq.irq_enable(d);
+ gpiochip_enable_irq(gc, d->hwirq);
+ if (gc->irq.irq_enable)
+ gc->irq.irq_enable(d);
else
- chip->irq.chip->irq_unmask(d);
+ gc->irq.chip->irq_unmask(d);
}
static void gpiochip_irq_disable(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
/*
* Since we override .irq_disable() we need to mimic the
@@ -2313,23 +2483,23 @@ static void gpiochip_irq_disable(struct irq_data *d)
* behaviour of mask_irq() which calls .irq_mask() if
* it exists.
*/
- if (chip->irq.irq_disable)
- chip->irq.irq_disable(d);
- else if (chip->irq.chip->irq_mask)
- chip->irq.chip->irq_mask(d);
- gpiochip_disable_irq(chip, d->hwirq);
+ if (gc->irq.irq_disable)
+ gc->irq.irq_disable(d);
+ else if (gc->irq.chip->irq_mask)
+ gc->irq.chip->irq_mask(d);
+ gpiochip_disable_irq(gc, d->hwirq);
}
-static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip)
+static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
{
- struct irq_chip *irqchip = gpiochip->irq.chip;
+ struct irq_chip *irqchip = gc->irq.chip;
if (!irqchip->irq_request_resources &&
!irqchip->irq_release_resources) {
irqchip->irq_request_resources = gpiochip_irq_reqres;
irqchip->irq_release_resources = gpiochip_irq_relres;
}
- if (WARN_ON(gpiochip->irq.irq_enable))
+ if (WARN_ON(gc->irq.irq_enable))
return;
/* Check if the irqchip already has this hook... */
if (irqchip->irq_enable == gpiochip_irq_enable) {
@@ -2337,27 +2507,27 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip)
* ...and if so, give a gentle warning that this is bad
* practice.
*/
- chip_info(gpiochip,
+ chip_info(gc,
"detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
return;
}
- gpiochip->irq.irq_enable = irqchip->irq_enable;
- gpiochip->irq.irq_disable = irqchip->irq_disable;
+ gc->irq.irq_enable = irqchip->irq_enable;
+ gc->irq.irq_disable = irqchip->irq_disable;
irqchip->irq_enable = gpiochip_irq_enable;
irqchip->irq_disable = gpiochip_irq_disable;
}
/**
* gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
- * @gpiochip: the GPIO chip to add the IRQ chip to
+ * @gc: the GPIO chip to add the IRQ chip to
* @lock_key: lockdep class for IRQ lock
* @request_key: lockdep class for IRQ request
*/
-static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
- struct irq_chip *irqchip = gpiochip->irq.chip;
+ struct irq_chip *irqchip = gc->irq.chip;
const struct irq_domain_ops *ops = NULL;
struct device_node *np;
unsigned int type;
@@ -2366,13 +2536,13 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
if (!irqchip)
return 0;
- if (gpiochip->irq.parent_handler && gpiochip->can_sleep) {
- chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n");
+ if (gc->irq.parent_handler && gc->can_sleep) {
+ chip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n");
return -EINVAL;
}
- np = gpiochip->gpiodev->dev.of_node;
- type = gpiochip->irq.default_type;
+ np = gc->gpiodev->dev.of_node;
+ type = gc->irq.default_type;
/*
* Specifying a default trigger is a terrible idea if DT or ACPI is
@@ -2383,74 +2553,74 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
"%s: Ignoring %u default trigger\n", np->full_name, type))
type = IRQ_TYPE_NONE;
- if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+ if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
+ acpi_handle_warn(ACPI_HANDLE(gc->parent),
"Ignoring %u default trigger\n", type);
type = IRQ_TYPE_NONE;
}
- gpiochip->to_irq = gpiochip_to_irq;
- gpiochip->irq.default_type = type;
- gpiochip->irq.lock_key = lock_key;
- gpiochip->irq.request_key = request_key;
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.default_type = type;
+ gc->irq.lock_key = lock_key;
+ gc->irq.request_key = request_key;
/* If a parent irqdomain is provided, let's build a hierarchy */
- if (gpiochip_hierarchy_is_hierarchical(gpiochip)) {
- int ret = gpiochip_hierarchy_add_domain(gpiochip);
+ if (gpiochip_hierarchy_is_hierarchical(gc)) {
+ int ret = gpiochip_hierarchy_add_domain(gc);
if (ret)
return ret;
} else {
/* Some drivers provide custom irqdomain ops */
- if (gpiochip->irq.domain_ops)
- ops = gpiochip->irq.domain_ops;
+ if (gc->irq.domain_ops)
+ ops = gc->irq.domain_ops;
if (!ops)
ops = &gpiochip_domain_ops;
- gpiochip->irq.domain = irq_domain_add_simple(np,
- gpiochip->ngpio,
- gpiochip->irq.first,
- ops, gpiochip);
- if (!gpiochip->irq.domain)
+ gc->irq.domain = irq_domain_add_simple(np,
+ gc->ngpio,
+ gc->irq.first,
+ ops, gc);
+ if (!gc->irq.domain)
return -EINVAL;
}
- if (gpiochip->irq.parent_handler) {
- void *data = gpiochip->irq.parent_handler_data ?: gpiochip;
+ if (gc->irq.parent_handler) {
+ void *data = gc->irq.parent_handler_data ?: gc;
- for (i = 0; i < gpiochip->irq.num_parents; i++) {
+ for (i = 0; i < gc->irq.num_parents; i++) {
/*
* The parent IRQ chip is already using the chip_data
* for this IRQ chip, so our callbacks simply use the
* handler_data.
*/
- irq_set_chained_handler_and_data(gpiochip->irq.parents[i],
- gpiochip->irq.parent_handler,
+ irq_set_chained_handler_and_data(gc->irq.parents[i],
+ gc->irq.parent_handler,
data);
}
}
- gpiochip_set_irq_hooks(gpiochip);
+ gpiochip_set_irq_hooks(gc);
- acpi_gpiochip_request_interrupts(gpiochip);
+ acpi_gpiochip_request_interrupts(gc);
return 0;
}
/**
* gpiochip_irqchip_remove() - removes an irqchip added to a gpiochip
- * @gpiochip: the gpiochip to remove the irqchip from
+ * @gc: the gpiochip to remove the irqchip from
*
* This is called only from gpiochip_remove()
*/
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
+static void gpiochip_irqchip_remove(struct gpio_chip *gc)
{
- struct irq_chip *irqchip = gpiochip->irq.chip;
+ struct irq_chip *irqchip = gc->irq.chip;
unsigned int offset;
- acpi_gpiochip_free_interrupts(gpiochip);
+ acpi_gpiochip_free_interrupts(gc);
- if (irqchip && gpiochip->irq.parent_handler) {
- struct gpio_irq_chip *irq = &gpiochip->irq;
+ if (irqchip && gc->irq.parent_handler) {
+ struct gpio_irq_chip *irq = &gc->irq;
unsigned int i;
for (i = 0; i < irq->num_parents; i++)
@@ -2459,18 +2629,18 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
/* Remove all IRQ mappings and delete the domain */
- if (gpiochip->irq.domain) {
+ if (gc->irq.domain) {
unsigned int irq;
- for (offset = 0; offset < gpiochip->ngpio; offset++) {
- if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
+ for (offset = 0; offset < gc->ngpio; offset++) {
+ if (!gpiochip_irqchip_irq_valid(gc, offset))
continue;
- irq = irq_find_mapping(gpiochip->irq.domain, offset);
+ irq = irq_find_mapping(gc->irq.domain, offset);
irq_dispose_mapping(irq);
}
- irq_domain_remove(gpiochip->irq.domain);
+ irq_domain_remove(gc->irq.domain);
}
if (irqchip) {
@@ -2479,20 +2649,20 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
irqchip->irq_release_resources = NULL;
}
if (irqchip->irq_enable == gpiochip_irq_enable) {
- irqchip->irq_enable = gpiochip->irq.irq_enable;
- irqchip->irq_disable = gpiochip->irq.irq_disable;
+ irqchip->irq_enable = gc->irq.irq_enable;
+ irqchip->irq_disable = gc->irq.irq_disable;
}
}
- gpiochip->irq.irq_enable = NULL;
- gpiochip->irq.irq_disable = NULL;
- gpiochip->irq.chip = NULL;
+ gc->irq.irq_enable = NULL;
+ gc->irq.irq_disable = NULL;
+ gc->irq.chip = NULL;
- gpiochip_irqchip_free_valid_mask(gpiochip);
+ gpiochip_irqchip_free_valid_mask(gc);
}
/**
* gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
- * @gpiochip: the gpiochip to add the irqchip to
+ * @gc: the gpiochip to add the irqchip to
* @irqchip: the irqchip to add to the gpiochip
* @first_irq: if not dynamically assigned, the base (first) IRQ to
* allocate gpiochip irqs from
@@ -2517,7 +2687,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* the pins on the gpiochip can generate a unique IRQ. Everything else
* need to be open coded.
*/
-int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
@@ -2528,23 +2698,23 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
{
struct device_node *of_node;
- if (!gpiochip || !irqchip)
+ if (!gc || !irqchip)
return -EINVAL;
- if (!gpiochip->parent) {
+ if (!gc->parent) {
pr_err("missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
- gpiochip->irq.threaded = threaded;
- of_node = gpiochip->parent->of_node;
+ gc->irq.threaded = threaded;
+ of_node = gc->parent->of_node;
#ifdef CONFIG_OF_GPIO
/*
* If the gpiochip has an assigned OF node this takes precedence
- * FIXME: get rid of this and use gpiochip->parent->of_node
+ * FIXME: get rid of this and use gc->parent->of_node
* everywhere
*/
- if (gpiochip->of_node)
- of_node = gpiochip->of_node;
+ if (gc->of_node)
+ of_node = gc->of_node;
#endif
/*
* Specifying a default trigger is a terrible idea if DT or ACPI is
@@ -2554,29 +2724,29 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
if (WARN(of_node && type != IRQ_TYPE_NONE,
"%pOF: Ignoring %d default trigger\n", of_node, type))
type = IRQ_TYPE_NONE;
- if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+ if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
+ acpi_handle_warn(ACPI_HANDLE(gc->parent),
"Ignoring %d default trigger\n", type);
type = IRQ_TYPE_NONE;
}
- gpiochip->irq.chip = irqchip;
- gpiochip->irq.handler = handler;
- gpiochip->irq.default_type = type;
- gpiochip->to_irq = gpiochip_to_irq;
- gpiochip->irq.lock_key = lock_key;
- gpiochip->irq.request_key = request_key;
- gpiochip->irq.domain = irq_domain_add_simple(of_node,
- gpiochip->ngpio, first_irq,
- &gpiochip_domain_ops, gpiochip);
- if (!gpiochip->irq.domain) {
- gpiochip->irq.chip = NULL;
+ gc->irq.chip = irqchip;
+ gc->irq.handler = handler;
+ gc->irq.default_type = type;
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.lock_key = lock_key;
+ gc->irq.request_key = request_key;
+ gc->irq.domain = irq_domain_add_simple(of_node,
+ gc->ngpio, first_irq,
+ &gpiochip_domain_ops, gc);
+ if (!gc->irq.domain) {
+ gc->irq.chip = NULL;
return -EINVAL;
}
- gpiochip_set_irq_hooks(gpiochip);
+ gpiochip_set_irq_hooks(gc);
- acpi_gpiochip_request_interrupts(gpiochip);
+ acpi_gpiochip_request_interrupts(gc);
return 0;
}
@@ -2584,60 +2754,65 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
-static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+static inline int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
return 0;
}
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+static void gpiochip_irqchip_remove(struct gpio_chip *gc) {}
-static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
+static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
{
return 0;
}
-static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
+static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
{
return 0;
}
-static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
+static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
{ }
#endif /* CONFIG_GPIOLIB_IRQCHIP */
/**
* gpiochip_generic_request() - request the gpio function for a pin
- * @chip: the gpiochip owning the GPIO
+ * @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to request for GPIO function
*/
-int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_gpio_request(chip->gpiodev->base + offset);
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return 0;
+#endif
+
+ return pinctrl_gpio_request(gc->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
/**
* gpiochip_generic_free() - free the gpio function from a pin
- * @chip: the gpiochip to request the gpio function for
+ * @gc: the gpiochip to request the gpio function for
* @offset: the offset of the GPIO to free from GPIO function
*/
-void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
{
- pinctrl_gpio_free(chip->gpiodev->base + offset);
+ pinctrl_gpio_free(gc->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
/**
* gpiochip_generic_config() - apply configuration for a pin
- * @chip: the gpiochip owning the GPIO
+ * @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to apply the configuration
* @config: the configuration to be applied
*/
-int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset,
unsigned long config)
{
- return pinctrl_gpio_set_config(chip->gpiodev->base + offset, config);
+ return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_config);
@@ -2645,7 +2820,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config);
/**
* gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping
- * @chip: the gpiochip to add the range for
+ * @gc: the gpiochip to add the range for
* @pctldev: the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_group: name of the pin group inside the pin controller
@@ -2655,24 +2830,24 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config);
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
struct gpio_pin_range *pin_range;
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
if (!pin_range) {
- chip_err(chip, "failed to allocate pin ranges\n");
+ chip_err(gc, "failed to allocate pin ranges\n");
return -ENOMEM;
}
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
- pin_range->range.gc = chip;
- pin_range->range.name = chip->label;
+ pin_range->range.gc = gc;
+ pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->pctldev = pctldev;
@@ -2686,7 +2861,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
pinctrl_add_gpio_range(pctldev, &pin_range->range);
- chip_dbg(chip, "created GPIO range %d->%d ==> %s PINGRP %s\n",
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n",
gpio_offset, gpio_offset + pin_range->range.npins - 1,
pinctrl_dev_get_devname(pctldev), pin_group);
@@ -2698,7 +2873,7 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
* gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
- * @chip: the gpiochip to add the range for
+ * @gc: the gpiochip to add the range for
* @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
@@ -2713,24 +2888,24 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
struct gpio_pin_range *pin_range;
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
if (!pin_range) {
- chip_err(chip, "failed to allocate pin ranges\n");
+ chip_err(gc, "failed to allocate pin ranges\n");
return -ENOMEM;
}
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
- pin_range->range.gc = chip;
- pin_range->range.name = chip->label;
+ pin_range->range.gc = gc;
+ pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
pin_range->range.npins = npins;
@@ -2738,11 +2913,11 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
&pin_range->range);
if (IS_ERR(pin_range->pctldev)) {
ret = PTR_ERR(pin_range->pctldev);
- chip_err(chip, "could not create pin range\n");
+ chip_err(gc, "could not create pin range\n");
kfree(pin_range);
return ret;
}
- chip_dbg(chip, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
gpio_offset, gpio_offset + npins - 1,
pinctl_name,
pin_offset, pin_offset + npins - 1);
@@ -2755,12 +2930,12 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
/**
* gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings
- * @chip: the chip to remove all the mappings for
+ * @gc: the chip to remove all the mappings for
*/
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+void gpiochip_remove_pin_ranges(struct gpio_chip *gc)
{
struct gpio_pin_range *pin_range, *tmp;
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) {
list_del(&pin_range->node);
@@ -2779,7 +2954,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
*/
static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
{
- struct gpio_chip *chip = desc->gdev->chip;
+ struct gpio_chip *gc = desc->gdev->chip;
int ret;
unsigned long flags;
unsigned offset;
@@ -2805,12 +2980,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
goto done;
}
- if (chip->request) {
- /* chip->request may sleep */
+ if (gc->request) {
+ /* gc->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
offset = gpio_chip_hwgpio(desc);
- if (gpiochip_line_is_valid(chip, offset))
- ret = chip->request(chip, offset);
+ if (gpiochip_line_is_valid(gc, offset))
+ ret = gc->request(gc, offset);
else
ret = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
@@ -2822,8 +2997,8 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
goto done;
}
}
- if (chip->get_direction) {
- /* chip->get_direction may sleep */
+ if (gc->get_direction) {
+ /* gc->get_direction may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
gpiod_get_direction(desc);
spin_lock_irqsave(&gpio_lock, flags);
@@ -2897,7 +3072,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
{
bool ret = false;
unsigned long flags;
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
might_sleep();
@@ -2905,12 +3080,12 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
spin_lock_irqsave(&gpio_lock, flags);
- chip = desc->gdev->chip;
- if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
- if (chip->free) {
+ gc = desc->gdev->chip;
+ if (gc && test_bit(FLAG_REQUESTED, &desc->flags)) {
+ if (gc->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
- might_sleep_if(chip->can_sleep);
- chip->free(chip, gpio_chip_hwgpio(desc));
+ might_sleep_if(gc->can_sleep);
+ gc->free(gc, gpio_chip_hwgpio(desc));
spin_lock_irqsave(&gpio_lock, flags);
}
kfree_const(desc->label);
@@ -2923,10 +3098,16 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
clear_bit(FLAG_PULL_DOWN, &desc->flags);
clear_bit(FLAG_BIAS_DISABLE, &desc->flags);
clear_bit(FLAG_IS_HOGGED, &desc->flags);
+#ifdef CONFIG_OF_DYNAMIC
+ desc->hog = NULL;
+#endif
ret = true;
}
spin_unlock_irqrestore(&gpio_lock, flags);
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_RELEASED, desc);
+
return ret;
}
@@ -2942,7 +3123,7 @@ void gpiod_free(struct gpio_desc *desc)
/**
* gpiochip_is_requested - return string iff signal was requested
- * @chip: controller managing the signal
+ * @gc: controller managing the signal
* @offset: of signal within controller's 0..(ngpio - 1) range
*
* Returns NULL if the GPIO is not currently requested, else a string.
@@ -2953,14 +3134,16 @@ void gpiod_free(struct gpio_desc *desc)
* help with diagnostics, and knowing that the signal is used as a GPIO
* can help avoid accidentally multiplexing it to another controller.
*/
-const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
+const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset)
{
struct gpio_desc *desc;
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return NULL;
- desc = &chip->gpiodev->descs[offset];
+ desc = gpiochip_get_desc(gc, offset);
+ if (IS_ERR(desc))
+ return NULL;
if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
return NULL;
@@ -2970,7 +3153,7 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
- * @chip: GPIO chip
+ * @gc: GPIO chip
* @hwnum: hardware number of the GPIO for which to request the descriptor
* @label: label for the GPIO
* @lflags: lookup flags for this GPIO or 0 if default, this can be used to
@@ -2989,17 +3172,17 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
* code on failure.
*/
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags)
{
- struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
+ struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);
int ret;
if (IS_ERR(desc)) {
- chip_err(chip, "failed to get GPIO descriptor\n");
+ chip_err(gc, "failed to get GPIO descriptor\n");
return desc;
}
@@ -3009,7 +3192,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
- chip_err(chip, "setup of own GPIO %s failed\n", label);
+ chip_err(gc, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
return ERR_PTR(ret);
}
@@ -3051,9 +3234,9 @@ static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
return gc->set_config(gc, offset, config);
}
-static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
- enum pin_config_param mode)
+static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode)
{
+ struct gpio_chip *gc = desc->gdev->chip;
unsigned long config;
unsigned arg;
@@ -3068,10 +3251,10 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
}
config = PIN_CONF_PACKED(mode, arg);
- return gpio_do_set_config(gc, offset, config);
+ return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
}
-static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
+static int gpio_set_bias(struct gpio_desc *desc)
{
int bias = 0;
int ret = 0;
@@ -3084,7 +3267,7 @@ static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
bias = PIN_CONFIG_BIAS_PULL_DOWN;
if (bias) {
- ret = gpio_set_config(chip, gpio_chip_hwgpio(desc), bias);
+ ret = gpio_set_config(desc, bias);
if (ret != -ENOTSUPP)
return ret;
}
@@ -3102,18 +3285,18 @@ static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int ret = 0;
VALIDATE_DESC(desc);
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
/*
* It is legal to have no .get() and .direction_input() specified if
* the chip is output-only, but you can't specify .direction_input()
* and not support the .get() operation, that doesn't make sense.
*/
- if (!chip->get && chip->direction_input) {
+ if (!gc->get && gc->direction_input) {
gpiod_warn(desc,
"%s: missing get() but have direction_input()\n",
__func__);
@@ -3126,10 +3309,10 @@ int gpiod_direction_input(struct gpio_desc *desc)
* direction (if .get_direction() is supported) else we silently
* assume we are in input mode after this.
*/
- if (chip->direction_input) {
- ret = chip->direction_input(chip, gpio_chip_hwgpio(desc));
- } else if (chip->get_direction &&
- (chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) {
+ if (gc->direction_input) {
+ ret = gc->direction_input(gc, gpio_chip_hwgpio(desc));
+ } else if (gc->get_direction &&
+ (gc->get_direction(gc, gpio_chip_hwgpio(desc)) != 1)) {
gpiod_warn(desc,
"%s: missing direction_input() operation and line is output\n",
__func__);
@@ -3137,7 +3320,7 @@ int gpiod_direction_input(struct gpio_desc *desc)
}
if (ret == 0) {
clear_bit(FLAG_IS_OUT, &desc->flags);
- ret = gpio_set_bias(chip, desc);
+ ret = gpio_set_bias(desc);
}
trace_gpio_direction(desc_to_gpio(desc), 1, ret);
@@ -3221,7 +3404,6 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- struct gpio_chip *gc;
int ret;
VALIDATE_DESC(desc);
@@ -3239,11 +3421,9 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
return -EIO;
}
- gc = desc->gdev->chip;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
- ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_DRAIN);
+ ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN);
if (!ret)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
@@ -3253,8 +3433,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
}
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_SOURCE);
+ ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
@@ -3263,12 +3442,11 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
goto set_output_flag;
}
} else {
- gpio_set_config(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_PUSH_PULL);
+ gpio_set_config(desc, PIN_CONFIG_DRIVE_PUSH_PULL);
}
set_output_value:
- ret = gpio_set_bias(gc, desc);
+ ret = gpio_set_bias(desc);
if (ret)
return ret;
return gpiod_direction_output_raw_commit(desc, value);
@@ -3287,6 +3465,26 @@ set_output_flag:
EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
+ * gpiod_set_config - sets @config for a GPIO
+ * @desc: descriptor of the GPIO for which to set the configuration
+ * @config: Same packed config format as generic pinconf
+ *
+ * Returns:
+ * 0 on success, %-ENOTSUPP if the controller doesn't support setting the
+ * configuration.
+ */
+int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
+{
+ struct gpio_chip *gc;
+
+ VALIDATE_DESC(desc);
+ gc = desc->gdev->chip;
+
+ return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+}
+EXPORT_SYMBOL_GPL(gpiod_set_config);
+
+/**
* gpiod_set_debounce - sets @debounce time for a GPIO
* @desc: descriptor of the GPIO for which to set debounce time
* @debounce: debounce time in microseconds
@@ -3297,14 +3495,10 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
*/
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
- struct gpio_chip *chip;
- unsigned long config;
-
- VALIDATE_DESC(desc);
- chip = desc->gdev->chip;
+ unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config);
+ return gpiod_set_config(desc, config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -3318,7 +3512,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce);
*/
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
unsigned long packed;
int gpio;
int rc;
@@ -3331,14 +3525,14 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
- chip = desc->gdev->chip;
- if (!chip->set_config)
+ gc = desc->gdev->chip;
+ if (!gc->set_config)
return 0;
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = gpio_do_set_config(chip, gpio, packed);
+ rc = gpio_do_set_config(gc, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
@@ -3397,28 +3591,28 @@ EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int offset;
int value;
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
- value = chip->get ? chip->get(chip, offset) : -EIO;
+ value = gc->get ? gc->get(gc, offset) : -EIO;
value = value < 0 ? value : !!value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
-static int gpio_chip_get_multiple(struct gpio_chip *chip,
+static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- if (chip->get_multiple) {
- return chip->get_multiple(chip, mask, bits);
- } else if (chip->get) {
+ if (gc->get_multiple) {
+ return gc->get_multiple(gc, mask, bits);
+ } else if (gc->get) {
int i, value;
- for_each_set_bit(i, mask, chip->ngpio) {
- value = chip->get(chip, i);
+ for_each_set_bit(i, mask, gc->ngpio) {
+ value = gc->get(gc, i);
if (value < 0)
return value;
__assign_bit(i, bits, value);
@@ -3466,26 +3660,26 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
}
while (i < array_size) {
- struct gpio_chip *chip = desc_array[i]->gdev->chip;
+ struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
int first, j, ret;
- if (likely(chip->ngpio <= FASTPATH_NGPIO)) {
+ if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
} else {
- mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio),
+ mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio),
sizeof(*mask),
can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mask)
return -ENOMEM;
}
- bits = mask + BITS_TO_LONGS(chip->ngpio);
- bitmap_zero(mask, chip->ngpio);
+ bits = mask + BITS_TO_LONGS(gc->ngpio);
+ bitmap_zero(mask, gc->ngpio);
if (!can_sleep)
- WARN_ON(chip->can_sleep);
+ WARN_ON(gc->can_sleep);
/* collect all inputs belonging to the same chip */
first = i;
@@ -3500,9 +3694,9 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
i = find_next_zero_bit(array_info->get_mask,
array_size, i);
} while ((i < array_size) &&
- (desc_array[i]->gdev->chip == chip));
+ (desc_array[i]->gdev->chip == gc));
- ret = gpio_chip_get_multiple(chip, mask, bits);
+ ret = gpio_chip_get_multiple(gc, mask, bits);
if (ret) {
if (mask != fastpath)
kfree(mask);
@@ -3640,13 +3834,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0;
- struct gpio_chip *chip = desc->gdev->chip;
+ struct gpio_chip *gc = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
- ret = chip->direction_input(chip, offset);
+ ret = gc->direction_input(gc, offset);
} else {
- ret = chip->direction_output(chip, offset, 0);
+ ret = gc->direction_output(gc, offset, 0);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
}
@@ -3665,15 +3859,15 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0;
- struct gpio_chip *chip = desc->gdev->chip;
+ struct gpio_chip *gc = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
- ret = chip->direction_output(chip, offset, 1);
+ ret = gc->direction_output(gc, offset, 1);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- ret = chip->direction_input(chip, offset);
+ ret = gc->direction_input(gc, offset);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
@@ -3684,33 +3878,34 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- chip->set(chip, gpio_chip_hwgpio(desc), value);
+ gc->set(gc, gpio_chip_hwgpio(desc), value);
}
/*
* set multiple outputs on the same chip;
* use the chip's set_multiple function if available;
* otherwise set the outputs sequentially;
+ * @chip: the GPIO chip we operate on
* @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
* defines which outputs are to be changed
* @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
* defines the values the outputs specified by mask are to be set to
*/
-static void gpio_chip_set_multiple(struct gpio_chip *chip,
+static void gpio_chip_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- if (chip->set_multiple) {
- chip->set_multiple(chip, mask, bits);
+ if (gc->set_multiple) {
+ gc->set_multiple(gc, mask, bits);
} else {
unsigned int i;
/* set outputs if the corresponding mask bit is set */
- for_each_set_bit(i, mask, chip->ngpio)
- chip->set(chip, i, test_bit(i, bits));
+ for_each_set_bit(i, mask, gc->ngpio)
+ gc->set(gc, i, test_bit(i, bits));
}
}
@@ -3749,26 +3944,26 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
}
while (i < array_size) {
- struct gpio_chip *chip = desc_array[i]->gdev->chip;
+ struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
int count = 0;
- if (likely(chip->ngpio <= FASTPATH_NGPIO)) {
+ if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
} else {
- mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio),
+ mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio),
sizeof(*mask),
can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mask)
return -ENOMEM;
}
- bits = mask + BITS_TO_LONGS(chip->ngpio);
- bitmap_zero(mask, chip->ngpio);
+ bits = mask + BITS_TO_LONGS(gc->ngpio);
+ bitmap_zero(mask, gc->ngpio);
if (!can_sleep)
- WARN_ON(chip->can_sleep);
+ WARN_ON(gc->can_sleep);
do {
struct gpio_desc *desc = desc_array[i];
@@ -3804,10 +3999,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
i = find_next_zero_bit(array_info->set_mask,
array_size, i);
} while ((i < array_size) &&
- (desc_array[i]->gdev->chip == chip));
+ (desc_array[i]->gdev->chip == gc));
/* push collected bits to outputs */
if (count != 0)
- gpio_chip_set_multiple(chip, mask, bits);
+ gpio_chip_set_multiple(gc, mask, bits);
if (mask != fastpath)
kfree(mask);
@@ -3969,7 +4164,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
*/
int gpiod_to_irq(const struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int offset;
/*
@@ -3980,10 +4175,10 @@ int gpiod_to_irq(const struct gpio_desc *desc)
if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
return -EINVAL;
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
- if (chip->to_irq) {
- int retirq = chip->to_irq(chip, offset);
+ if (gc->to_irq) {
+ int retirq = gc->to_irq(gc, offset);
/* Zero means NO_IRQ */
if (!retirq)
@@ -3997,17 +4192,17 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
/**
* gpiochip_lock_as_irq() - lock a GPIO to be used as IRQ
- * @chip: the chip the GPIO to lock belongs to
+ * @gc: the chip the GPIO to lock belongs to
* @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to lock down
* a certain GPIO line to be used for IRQs.
*/
-int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
- desc = gpiochip_get_desc(chip, offset);
+ desc = gpiochip_get_desc(gc, offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -4015,18 +4210,20 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
* If it's fast: flush the direction setting if something changed
* behind our back
*/
- if (!chip->can_sleep && chip->get_direction) {
+ if (!gc->can_sleep && gc->get_direction) {
int dir = gpiod_get_direction(desc);
if (dir < 0) {
- chip_err(chip, "%s: cannot get GPIO direction\n",
+ chip_err(gc, "%s: cannot get GPIO direction\n",
__func__);
return dir;
}
}
- if (test_bit(FLAG_IS_OUT, &desc->flags)) {
- chip_err(chip,
+ /* To be valid for IRQ the line needs to be input or open drain */
+ if (test_bit(FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ chip_err(gc,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
@@ -4049,17 +4246,17 @@ EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
/**
* gpiochip_unlock_as_irq() - unlock a GPIO used as IRQ
- * @chip: the chip the GPIO to lock belongs to
+ * @gc: the chip the GPIO to lock belongs to
* @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to indicate
* that a certain GPIO is no longer used exclusively for IRQ.
*/
-void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
- desc = gpiochip_get_desc(chip, offset);
+ desc = gpiochip_get_desc(gc, offset);
if (IS_ERR(desc))
return;
@@ -4072,9 +4269,9 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
-void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset)
{
- struct gpio_desc *desc = gpiochip_get_desc(chip, offset);
+ struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
!WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags)))
@@ -4082,75 +4279,80 @@ void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_disable_irq);
-void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
{
- struct gpio_desc *desc = gpiochip_get_desc(chip, offset);
+ struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
!WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags));
+ /*
+ * We must not be output when using IRQ UNLESS we are
+ * open drain.
+ */
+ WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
}
}
EXPORT_SYMBOL_GPL(gpiochip_enable_irq);
-bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
-int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset)
+int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset)
{
int ret;
- if (!try_module_get(chip->gpiodev->owner))
+ if (!try_module_get(gc->gpiodev->owner))
return -ENODEV;
- ret = gpiochip_lock_as_irq(chip, offset);
+ ret = gpiochip_lock_as_irq(gc, offset);
if (ret) {
- chip_err(chip, "unable to lock HW IRQ %u for IRQ\n", offset);
- module_put(chip->gpiodev->owner);
+ chip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset);
+ module_put(gc->gpiodev->owner);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_reqres_irq);
-void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset)
{
- gpiochip_unlock_as_irq(chip, offset);
- module_put(chip->gpiodev->owner);
+ gpiochip_unlock_as_irq(gc, offset);
+ module_put(gc->gpiodev->owner);
}
EXPORT_SYMBOL_GPL(gpiochip_relres_irq);
-bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_DRAIN, &chip->gpiodev->descs[offset].flags);
+ return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
-bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_SOURCE, &chip->gpiodev->descs[offset].flags);
+ return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
-bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags);
+ return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -4388,7 +4590,7 @@ EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
*/
void gpiod_add_hogs(struct gpiod_hog *hogs)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
struct gpiod_hog *hog;
mutex_lock(&gpio_machine_hogs_mutex);
@@ -4400,9 +4602,9 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
* The chip may have been registered earlier, so check if it
* exists and, if so, try to hog the line now.
*/
- chip = find_chip_by_name(hog->chip_label);
- if (chip)
- gpiochip_machine_hog(chip, hog);
+ gc = find_chip_by_name(hog->chip_label);
+ if (gc)
+ gpiochip_machine_hog(gc, hog);
}
mutex_unlock(&gpio_machine_hogs_mutex);
@@ -4452,7 +4654,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return desc;
for (p = &table->table[0]; p->chip_label; p++) {
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
/* idx must always match exactly */
if (p->idx != idx)
@@ -4462,9 +4664,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (p->con_id && (!con_id || strcmp(p->con_id, con_id)))
continue;
- chip = find_chip_by_name(p->chip_label);
+ gc = find_chip_by_name(p->chip_label);
- if (!chip) {
+ if (!gc) {
/*
* As the lookup table indicates a chip with
* p->chip_label should exist, assume it may
@@ -4477,15 +4679,15 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return ERR_PTR(-EPROBE_DEFER);
}
- if (chip->ngpio <= p->chip_hwnum) {
+ if (gc->ngpio <= p->chip_hwnum) {
dev_err(dev,
"requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
- idx, p->chip_hwnum, chip->ngpio - 1,
- chip->label);
+ idx, p->chip_hwnum, gc->ngpio - 1,
+ gc->label);
return ERR_PTR(-EINVAL);
}
- desc = gpiochip_get_desc(chip, p->chip_hwnum);
+ desc = gpiochip_get_desc(gc, p->chip_hwnum);
*flags = p->flags;
return desc;
@@ -4771,6 +4973,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return ERR_PTR(ret);
}
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
return desc;
}
EXPORT_SYMBOL_GPL(gpiod_get_index);
@@ -4836,6 +5041,9 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
return ERR_PTR(ret);
}
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
return desc;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -4880,20 +5088,20 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
struct gpio_desc *local_desc;
int hwnum;
int ret;
- chip = gpiod_to_chip(desc);
+ gc = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
- local_desc = gpiochip_request_own_desc(chip, hwnum, name,
+ local_desc = gpiochip_request_own_desc(gc, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
- name, chip->label, hwnum, ret);
+ name, gc->label, hwnum, ret);
return ret;
}
@@ -4911,15 +5119,15 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/**
* gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog
- * @chip: gpio chip to act on
+ * @gc: gpio chip to act on
*/
-static void gpiochip_free_hogs(struct gpio_chip *chip)
+static void gpiochip_free_hogs(struct gpio_chip *gc)
{
int id;
- for (id = 0; id < chip->ngpio; id++) {
- if (test_bit(FLAG_IS_HOGGED, &chip->gpiodev->descs[id].flags))
- gpiochip_free_own_desc(&chip->gpiodev->descs[id]);
+ for (id = 0; id < gc->ngpio; id++) {
+ if (test_bit(FLAG_IS_HOGGED, &gc->gpiodev->descs[id].flags))
+ gpiochip_free_own_desc(&gc->gpiodev->descs[id]);
}
}
@@ -4942,7 +5150,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
struct gpio_desc *desc;
struct gpio_descs *descs;
struct gpio_array *array_info = NULL;
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int count, bitmap_size;
count = gpiod_count(dev, con_id);
@@ -4962,7 +5170,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->desc[descs->ndescs] = desc;
- chip = gpiod_to_chip(desc);
+ gc = gpiod_to_chip(desc);
/*
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
@@ -4970,8 +5178,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
struct gpio_descs *array;
- bitmap_size = BITS_TO_LONGS(chip->ngpio > count ?
- chip->ngpio : count);
+ bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
+ gc->ngpio : count);
array = kzalloc(struct_size(descs, desc, count) +
struct_size(array_info, invert_mask,
@@ -4994,7 +5202,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->desc = descs->desc;
array_info->size = count;
- array_info->chip = chip;
+ array_info->chip = gc;
bitmap_set(array_info->get_mask, descs->ndescs,
count - descs->ndescs);
bitmap_set(array_info->set_mask, descs->ndescs,
@@ -5002,7 +5210,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->info = array_info;
}
/* Unmark array members which don't belong to the 'fast' chip */
- if (array_info && array_info->chip != chip) {
+ if (array_info && array_info->chip != gc) {
__clear_bit(descs->ndescs, array_info->get_mask);
__clear_bit(descs->ndescs, array_info->set_mask);
}
@@ -5027,8 +5235,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
}
} else if (array_info) {
/* Exclude open drain or open source from fast output */
- if (gpiochip_line_is_open_drain(chip, descs->ndescs) ||
- gpiochip_line_is_open_source(chip, descs->ndescs))
+ if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
+ gpiochip_line_is_open_source(gc, descs->ndescs))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -5116,10 +5324,16 @@ static int __init gpiolib_dev_init(void)
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
bus_unregister(&gpio_bus_type);
- } else {
- gpiolib_initialized = true;
- gpiochip_setup_devs();
+ return ret;
}
+
+ gpiolib_initialized = true;
+ gpiochip_setup_devs();
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC) && IS_ENABLED(CONFIG_OF_GPIO)
+ WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier));
+#endif /* CONFIG_OF_DYNAMIC && CONFIG_OF_GPIO */
+
return ret;
}
core_initcall(gpiolib_dev_init);
@@ -5129,7 +5343,7 @@ core_initcall(gpiolib_dev_init);
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
unsigned i;
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chip *gc = gdev->chip;
unsigned gpio = gdev->base;
struct gpio_desc *gdesc = &gdev->descs[0];
bool is_out;
@@ -5152,7 +5366,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s",
gpio, gdesc->name ? gdesc->name : "", gdesc->label,
is_out ? "out" : "in ",
- chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ",
+ gc->get ? (gc->get(gc, i) ? "hi" : "lo") : "? ",
is_irq ? "IRQ " : "",
active_low ? "ACTIVE LOW" : "");
seq_printf(s, "\n");
@@ -5204,10 +5418,10 @@ static void gpiolib_seq_stop(struct seq_file *s, void *v)
static int gpiolib_seq_show(struct seq_file *s, void *v)
{
struct gpio_device *gdev = v;
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chip *gc = gdev->chip;
struct device *parent;
- if (!chip) {
+ if (!gc) {
seq_printf(s, "%s%s: (dangling chip)", (char *)s->private,
dev_name(&gdev->dev));
return 0;
@@ -5216,19 +5430,19 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private,
dev_name(&gdev->dev),
gdev->base, gdev->base + gdev->ngpio - 1);
- parent = chip->parent;
+ parent = gc->parent;
if (parent)
seq_printf(s, ", parent: %s/%s",
parent->bus ? parent->bus->name : "no-bus",
dev_name(parent));
- if (chip->label)
- seq_printf(s, ", %s", chip->label);
- if (chip->can_sleep)
+ if (gc->label)
+ seq_printf(s, ", %s", gc->label);
+ if (gc->can_sleep)
seq_printf(s, ", can sleep");
seq_printf(s, ":\n");
- if (chip->dbg_show)
- chip->dbg_show(s, chip);
+ if (gc->dbg_show)
+ gc->dbg_show(s, gc);
else
gpiolib_dbg_show(s, gdev);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 3e0aab2945d8..853ce681b4a4 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -56,6 +56,7 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
+ struct atomic_notifier_head notifier;
#ifdef CONFIG_PINCTRL
/*
@@ -119,6 +120,9 @@ struct gpio_desc {
const char *label;
/* Name of the GPIO */
const char *name;
+#ifdef CONFIG_OF_DYNAMIC
+ struct device_node *hog;
+#endif
};
int gpiod_request(struct gpio_desc *desc, const char *label);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index f17d01f076c7..835c88318cec 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
+obj-$(CONFIG_TRACE_GPU_MEM) += trace/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d0aa6cff2e02..43594978958e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,9 +54,6 @@ config DRM_DEBUG_MM
If in doubt, say "N".
-config DRM_EXPORT_FOR_TESTS
- bool
-
config DRM_DEBUG_SELFTEST
tristate "kselftests for DRM"
depends on DRM
@@ -389,6 +386,8 @@ source "drivers/gpu/drm/aspeed/Kconfig"
source "drivers/gpu/drm/mcde/Kconfig"
+source "drivers/gpu/drm/tidss/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
@@ -468,6 +467,9 @@ config DRM_SAVAGE
endif # DRM_LEGACY
+config DRM_EXPORT_FOR_TESTS
+ bool
+
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
config DRM_PANEL_ORIENTATION_QUIRKS
tristate
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6493088a0fdd..7f72ef5e7811 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
+drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
+ drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
@@ -122,3 +123,4 @@ obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
+obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 13340f353ea8..216d932a7831 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: MIT
menu "ACP (Audio CoProcessor) Configuration"
+ depends on DRM_AMDGPU
config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index da3bcff61b97..8ac1581a6b53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -579,6 +579,7 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+ void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
/* initialize doorbell layout for specific asic*/
@@ -944,6 +945,7 @@ struct amdgpu_device {
/* s3/s4 mask */
bool in_suspend;
+ bool in_hibernate;
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
@@ -969,6 +971,7 @@ struct amdgpu_device {
int pstate;
/* enable runtime pm on the device */
bool runpm;
+ bool in_runpm;
bool pm_sysfs_en;
bool ucode_sysfs_en;
@@ -992,6 +995,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
@@ -1174,9 +1179,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8609287620ea..abfbe89e805e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/dma-buf.h>
#include "amdgpu_xgmi.h"
+#include <uapi/linux/kfd_ioctl.h>
static const unsigned int compute_vmid_bitmap = 0xFF00;
@@ -126,7 +127,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* this is going to have a few of the MSBs set that we need to
* clear
*/
- bitmap_complement(gpu_resources.queue_bitmap,
+ bitmap_complement(gpu_resources.cp_queue_bitmap,
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
@@ -137,7 +138,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
- clear_bit(i, gpu_resources.queue_bitmap);
+ clear_bit(i, gpu_resources.cp_queue_bitmap);
amdgpu_doorbell_get_kfd_info(adev,
&gpu_resources.doorbell_physical_address,
@@ -178,18 +179,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev);
+ kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev);
+ r = kgd2kfd_resume(adev->kfd.dev, run_pm);
return r;
}
@@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9)
+ void **cpu_ptr, bool cp_mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
- if (mqd_gfx9)
- bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+ if (cp_mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
@@ -402,7 +403,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100;
- else if (adev->powerplay.pp_funcs) {
+ else if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@@ -427,7 +428,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
/* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100;
- else if (adev->powerplay.pp_funcs)
+ else if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
@@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
metadata_size, &metadata_flags);
if (flags) {
*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM
+ : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
}
out_put:
@@ -525,6 +527,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
return adev->gmc.xgmi.hive_id;
}
+
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->unique_id;
+}
+
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
{
struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
@@ -647,13 +657,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- uint32_t flush_type = 0;
+ const uint32_t flush_type = 0;
bool all_hub = false;
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20)
- flush_type = 2;
-
if (adev->family == AMDGPU_FAMILY_AI)
all_hub = true;
@@ -677,6 +683,11 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ return 0;
+}
+
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
@@ -713,11 +724,11 @@ void kgd2kfd_exit(void)
{
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 47b0f2957d1f..13feb313e9b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */
@@ -122,8 +123,8 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -171,6 +172,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
@@ -240,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config);
+
/* KGD2KFD callbacks */
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
@@ -249,8 +254,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd);
-int kgd2kfd_resume(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 4bcc175a149d..6529caca88fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
- /* fall through */
+ fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
@@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index a7b17c8deb00..4ec6d0c03201 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -42,38 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-#if 0
-/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but
- * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu
- * changes commented out related code, doing the same here for now but
- * need to sync with Ken et al
- */
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-#endif
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -805,7 +773,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 8f052e98a3c6..0b7e78748540 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19a10db93d68..ccd635b812b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -41,31 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 8562afe5b761..df841c2ac5e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -48,28 +48,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -736,7 +714,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 63d3e6683dfe..aedf67d57449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa8ac9d19a7a..6a5b91d23fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -29,6 +29,7 @@
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
+#include <uapi/linux/kfd_ioctl.h>
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -276,6 +277,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ struct amdgpu_bo *root = bo;
+ struct amdgpu_vm_bo_base *vm_bo;
+ struct amdgpu_vm *vm;
+ struct amdkfd_process_info *info;
+ struct amdgpu_amdkfd_fence *ef;
+ int ret;
+
+ /* we can always get vm_bo from root PD bo.*/
+ while (root->parent)
+ root = root->parent;
+
+ vm_bo = root->vm_bo;
+ if (!vm_bo)
+ return 0;
+
+ vm = vm_bo->vm;
+ if (!vm)
+ return 0;
+
+ info = vm->process_info;
+ if (!info || !info->eviction_fence)
+ return 0;
+
+ ef = container_of(dma_fence_get(&info->eviction_fence->base),
+ struct amdgpu_amdkfd_fence, base);
+
+ BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
+ ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
+ dma_resv_unlock(bo->tbo.base.resv);
+
+ dma_fence_put(&ef->base);
+ return ret;
+}
+
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
@@ -364,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
uint32_t mapping_flags;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev)
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -847,9 +884,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
vm_list_node) {
struct amdgpu_bo *pd = peer_vm->root.base.bo;
- ret = amdgpu_sync_resv(NULL,
- sync, pd->tbo.base.resv,
- AMDGPU_FENCE_OWNER_KFD, false);
+ ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_FENCE_OWNER_KFD);
if (ret)
return ret;
}
@@ -1044,6 +1081,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
list_del(&vm->vm_list_node);
mutex_unlock(&process_info->lock);
+ vm->process_info = NULL;
+
/* Release per-process resources when last compute VM is destroyed */
if (!process_info->n_vms) {
WARN_ON(!list_empty(&process_info->kfd_bo_list));
@@ -1122,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
/*
* Check on which domain to allocate BO
*/
- if (flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
- alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
+ alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- } else if (flags & ALLOC_MEM_FLAGS_GTT) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
- } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
alloc_flags = 0;
if (!offset || !*offset)
return -EINVAL;
user_addr = untagged_addr(*offset);
- } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
- ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
bo_type = ttm_bo_type_sg;
@@ -1160,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
}
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
+ (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
/* Workaround for AQL queue wraparound bug. Map the same
* memory twice. That means we only actually allocate half
@@ -1304,7 +1343,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
/* Free the BO*/
- amdgpu_bo_unref(&mem->bo);
+ drm_gem_object_put_unlocked(&mem->bo->tbo.base);
mutex_destroy(&mem->lock);
kfree(mem);
@@ -1642,12 +1681,15 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
+
(*mem)->alloc_flags =
((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
- ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
+ | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
- (*mem)->bo = amdgpu_bo_ref(bo);
+ drm_gem_object_get(&bo->tbo.base);
+ (*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
@@ -2204,3 +2246,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
kfree(mem);
return 0;
}
+
+/* Returns GPU-specific tiling mode information */
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ /* Those values are not set from GFX9 onwards */
+ config->num_banks = adev->gfx.config.num_banks;
+ config->num_ranks = adev->gfx.config.num_ranks;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 50dff69a0f6e..b1172d93c99c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = adev->pdev->rom;
+ size_t romlen = adev->pdev->romlen;
+ void __iomem *bios;
adev->bios = NULL;
- bios = pci_platform_rom(adev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- adev->bios = kzalloc(size, GFP_KERNEL);
- if (adev->bios == NULL)
+ adev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!adev->bios)
return false;
- memcpy_fromio(adev->bios, bios, size);
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
- return false;
- }
+ memcpy_fromio(adev->bios, bios, romlen);
+ iounmap(bios);
- adev->bios_size = size;
+ if (!check_atom_bios(adev->bios, romlen))
+ goto free_bios;
+
+ adev->bios_size = romlen;
return true;
+free_bios:
+ kfree(adev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index a62cbc8199de..f355d9a752d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1461,6 +1461,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
return MODE_OK;
}
+static int
+amdgpu_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ int r = 0;
+
+ if (amdgpu_connector->ddc_bus->has_aux) {
+ amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
+ r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
.get_modes = amdgpu_connector_dp_get_modes,
.mode_valid = amdgpu_connector_dp_mode_valid,
@@ -1475,6 +1489,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
@@ -1485,6 +1500,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
void
@@ -1931,7 +1947,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_connector_register(connector);
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a52a084158b1..af91627b19b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -28,6 +28,7 @@
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/sync_file.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold) {
+ if (p->bytes_moved < p->bytes_moved_threshold &&
+ (!bo->tbo.base.dma_buf ||
+ list_empty(&bo->tbo.base.dma_buf->attachments))) {
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
@@ -651,16 +654,19 @@ out:
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
int r;
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
- amdgpu_bo_explicit_sync(bo));
-
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
@@ -1202,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1211,7 +1216,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
if (r)
goto error_unlock;
@@ -1255,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42f29ea..6ed36a2c5f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
+static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+{
+ switch (prio) {
+ case DRM_SCHED_PRIORITY_HIGH_HW:
+ case DRM_SCHED_PRIORITY_KERNEL:
+ return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ default:
+ return AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ }
+}
+
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
+ enum gfx_pipe_priority hw_prio;
enum drm_sched_priority priority;
int r;
@@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
switch (hw_ip) {
- case AMDGPU_HW_IP_GFX:
- sched = &adev->gfx.gfx_ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- scheds = adev->gfx.compute_sched;
- num_scheds = adev->gfx.num_compute_sched;
- break;
- case AMDGPU_HW_IP_DMA:
- scheds = adev->sdma.sdma_sched;
- num_scheds = adev->sdma.num_sdma_sched;
- break;
- case AMDGPU_HW_IP_UVD:
- sched = &adev->uvd.inst[0].ring.sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- sched = &adev->vce.ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- sched = &adev->uvd.inst[0].ring_enc[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- scheds = adev->vcn.vcn_dec_sched;
- num_scheds = adev->vcn.num_vcn_dec_sched;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- scheds = adev->vcn.vcn_enc_sched;
- num_scheds = adev->vcn.num_vcn_enc_sched;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- scheds = adev->jpeg.jpeg_sched;
- num_scheds = adev->jpeg.num_jpeg_sched;
- break;
+ case AMDGPU_HW_IP_GFX:
+ sched = &adev->gfx.gfx_ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ break;
+ case AMDGPU_HW_IP_DMA:
+ scheds = adev->sdma.sdma_sched;
+ num_scheds = adev->sdma.num_sdma_sched;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ sched = &adev->uvd.inst[0].ring.sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ sched = &adev->vce.ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ sched = &adev->uvd.inst[0].ring_enc[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
+ adev->vcn.num_vcn_dec_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
+ adev->vcn.num_vcn_enc_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ scheds = adev->jpeg.jpeg_sched;
+ num_scheds = adev->jpeg.num_jpeg_sched;
+ break;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ enum drm_sched_priority priority)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ enum gfx_pipe_priority hw_prio;
+ struct drm_gpu_scheduler **scheds = NULL;
+ unsigned num_scheds;
+
+ /* set sw priority */
+ drm_sched_entity_set_priority(&aentity->entity, priority);
+
+ /* set hw priority */
+ if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ drm_sched_entity_modify_sched(&aentity->entity, scheds,
+ num_scheds);
+ }
+}
+
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
@@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
- struct drm_sched_entity *entity;
-
if (!ctx->entities[i][j])
continue;
- entity = &ctx->entities[i][j]->entity;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
+ i, ctx_prio);
}
}
}
@@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(&mgr->lock);
}
+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+ int num_compute_sched_normal = 0;
+ int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+ int i;
+
+ /* use one drm sched array, gfx.compute_sched to store both high and
+ * normal priority drm compute schedulers */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ if (!adev->gfx.compute_ring[i].has_high_prio)
+ adev->gfx.compute_sched[num_compute_sched_normal++] =
+ &adev->gfx.compute_ring[i].sched;
+ else
+ adev->gfx.compute_sched[num_compute_sched_high--] =
+ &adev->gfx.compute_ring[i].sched;
+ }
+
+ /* compute ring only has two priority for now */
+ i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+ i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+ /* When compute has no high priority rings then use */
+ /* normal priority sched array */
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+ } else {
+ adev->gfx.compute_prio_sched[i] =
+ &adev->gfx.compute_sched[num_compute_sched_high - 1];
+ adev->gfx.num_compute_sched[i] =
+ adev->gfx.num_compute_rings - num_compute_sched_normal;
+ }
+}
+
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
{
int i, j;
+ amdgpu_ctx_init_compute_sched(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
- adev->gfx.num_compute_sched++;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 337d7cdce8e9..c0f9a651dc06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -31,6 +31,9 @@
#include <drm/drm_debugfs.h>
#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dm_debugfs.h"
+#include "amdgpu_ras.h"
/**
* amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -176,7 +179,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- WREG32(*pos >> 2, value);
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
}
if (r) {
result = r;
@@ -840,6 +843,55 @@ err:
return result;
}
+/**
+ * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit zero to disable or a 32-bit non-zero to enable
+ */
+static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return r;
+ }
+
+ amdgpu_gfx_off_ctrl(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -888,6 +940,11 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
+ .owner = THIS_MODULE,
+ .write = amdgpu_debugfs_gfxoff_write,
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -897,6 +954,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_sensors_fops,
&amdgpu_debugfs_wave_fops,
&amdgpu_debugfs_gpr_fops,
+ &amdgpu_debugfs_gfxoff_fops,
};
static const char *debugfs_regs_names[] = {
@@ -908,6 +966,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_sensors",
"amdgpu_wave",
"amdgpu_gpr",
+ "amdgpu_gfxoff",
};
/**
@@ -934,18 +993,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1211,11 +1258,47 @@ failure:
return 0;
}
+static int amdgpu_debugfs_sclk_set(void *data, u64 val)
+{
+ int ret = 0;
+ uint32_t max_freq, min_freq;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
+ if (ret || val > max_freq || val < min_freq)
+ return -EINVAL;
+ ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
+ } else {
+ return 0;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
amdgpu_debugfs_ib_preempt, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+ amdgpu_debugfs_sclk_set, "%llu\n");
+
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
+ int r, i;
+
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
adev->ddev->primary->debugfs_root, adev,
@@ -1225,24 +1308,78 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return -EIO;
}
+ adev->smu.debugfs_sclk =
+ debugfs_create_file("amdgpu_force_sclk", 0200,
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_sclk_set);
+ if (!(adev->smu.debugfs_sclk)) {
+ DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+ return -EIO;
+ }
+
+ /* Register debugfs entries for amdgpu_ttm */
+ r = amdgpu_ttm_debugfs_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init debugfs\n");
+ return r;
+ }
+
+ r = amdgpu_debugfs_pm_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to register debugfs file for dpm!\n");
+ return r;
+ }
+
+ if (amdgpu_debugfs_sa_init(adev)) {
+ dev_err(adev->dev, "failed to register debugfs file for SA\n");
+ }
+
+ if (amdgpu_debugfs_fence_init(adev))
+ dev_err(adev->dev, "fence debugfs file creation failed\n");
+
+ r = amdgpu_debugfs_gem_init(adev);
+ if (r)
+ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_regs_init(adev);
+ if (r)
+ DRM_ERROR("registering register debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r)
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_device_has_dc_support(adev)) {
+ if (dtn_debugfs_init(adev))
+ DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
+ }
+#endif
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring)
+ continue;
+
+ if (amdgpu_debugfs_ring_init(adev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ }
+
+ amdgpu_ras_debugfs_create_all(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
-{
- debugfs_remove(adev->debugfs_preempt);
-}
-
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index f289d28ad6b2..de12d1101526 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -32,9 +32,8 @@ struct amdgpu_debugfs {
};
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev);
+void amdgpu_debugfs_fini(struct amdgpu_device *adev);
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
const struct drm_info_list *files,
unsigned nfiles);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b8975857d60d..affde2de2a0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write)
{
- uint64_t last;
unsigned long flags;
+ uint32_t hi = ~0;
+ uint64_t last;
+
+
+#ifdef CONFIG_64BIT
+ last = min(pos + size, adev->gmc.visible_vram_size);
+ if (last > pos) {
+ void __iomem *addr = adev->mman.aper_base_kaddr + pos;
+ size_t count = last - pos;
+
+ if (write) {
+ memcpy_toio(addr, buf, count);
+ mb();
+ amdgpu_asic_flush_hdp(adev, NULL);
+ } else {
+ amdgpu_asic_invalidate_hdp(adev, NULL);
+ mb();
+ memcpy_fromio(buf, addr, count);
+ }
+
+ if (count == size)
+ return;
+
+ pos += count;
+ buf += count / 4;
+ size -= count;
+ }
+#endif
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ for (last = pos + size; pos < last; pos += 4) {
+ uint32_t tmp = pos >> 31;
- last = size - 4;
- for (last += pos; pos <= last; pos += 4) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (tmp != hi) {
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ hi = tmp;
+ }
if (write)
WREG32_NO_KIQ(mmMM_DATA, *buf++);
else
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
/*
@@ -275,6 +306,26 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG();
}
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
+ if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+ writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
+}
+
/**
* amdgpu_mm_wreg - write to a memory mapped IO register
*
@@ -288,8 +339,6 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
adev->last_mm_index = v;
}
@@ -297,20 +346,26 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
return amdgpu_kiq_wreg(adev, reg, v);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
- writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
+/*
+ * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
+ *
+ * this function is invoked only the debugfs register access
+ * */
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
+{
+ if (amdgpu_sriov_fullaccess(adev) &&
+ adev->gfx.rlc.funcs &&
+ adev->gfx.rlc.funcs->is_rlcg_access_range) {
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
+ if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
}
+
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
}
/**
@@ -1136,7 +1191,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
@@ -1953,8 +2008,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
*/
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
- return !!memcmp(adev->gart.ptr, adev->reset_magic,
- AMDGPU_RESET_MAGIC_NUM);
+ if (memcmp(adev->gart.ptr, adev->reset_magic,
+ AMDGPU_RESET_MAGIC_NUM))
+ return true;
+
+ if (!adev->in_gpu_reset)
+ return false;
+
+ /*
+ * For all ASICs with baco/mode1 reset, the VRAM is
+ * always assumed to be lost.
+ */
+ switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_BACO:
+ case AMD_RESET_METHOD_MODE1:
+ return true;
+ default:
+ return false;
+ }
}
/**
@@ -2344,15 +2415,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
+ if(!amdgpu_sriov_vf(adev)){
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
+ }
}
}
-
adev->ip_blocks[i].status.hw = false;
}
@@ -2686,6 +2758,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (adev->asic_reset_res)
goto fail;
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
} else {
task_barrier_full(&hive->tb);
@@ -2800,7 +2875,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
- adev->usec_timeout *= 2;
+ adev->usec_timeout *= 10;
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
@@ -3088,22 +3163,6 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- r = amdgpu_debugfs_gem_init(adev);
- if (r)
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_regs_init(adev);
- if (r)
- DRM_ERROR("registering register debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_firmware_init(adev);
- if (r)
- DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_init(adev);
- if (r)
- DRM_ERROR("Creating debugfs files failed (%d).\n", r);
-
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -3177,6 +3236,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+ /* make sure IB test finished before entering exclusive mode
+ * to avoid preemption on IB test
+ * */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -3219,13 +3284,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- amdgpu_debugfs_regs_cleanup(adev);
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- amdgpu_debugfs_preempt_cleanup(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
amdgpu_discovery_fini(adev);
}
@@ -3309,12 +3372,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
- amdgpu_amdkfd_suspend(adev);
-
amdgpu_ras_suspend(adev);
r = amdgpu_device_ip_suspend_phase1(adev);
+ amdgpu_amdkfd_suspend(adev, !fbcon);
+
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
@@ -3393,7 +3456,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
}
}
- r = amdgpu_amdkfd_resume(adev);
+ r = amdgpu_amdkfd_resume(adev, !fbcon);
if (r)
return r;
@@ -3866,8 +3929,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
- if (!r && amdgpu_ras_intr_triggered())
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev->mmhub.funcs &&
+ tmp_adev->mmhub.funcs->reset_ras_error_count)
+ tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+ }
+
amdgpu_ras_intr_cleared();
+ }
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index f95092741c38..27d8ae19a7a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -307,7 +307,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
- DRM_INFO("set register base offset for %s\n",
+ DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6d520a3eec40..84cee27cd7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -99,7 +99,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(crtc)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -219,7 +219,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
if (!adev->enable_virtual_display)
work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+ amdgpu_get_vblank_counter_kms(crtc);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -924,3 +924,15 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
return AMDGPU_CRTC_IRQ_NONE;
}
}
+
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index a59cd47aa6c1..ffeb20f11c07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -223,6 +223,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
}
/**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ /* pin buffer into GTT */
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ amdgpu_bo_unpin(bo);
+}
+
+/**
* amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
* @dir: DMA direction
@@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct sg_table *sgt;
long r;
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return ERR_PTR(r);
+ if (!bo->pin_count) {
+ /* move buffer into GTT */
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return ERR_PTR(r);
+
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ AMDGPU_GEM_DOMAIN_GTT)) {
+ return ERR_PTR(-EBUSY);
+ }
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
@@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
- amdgpu_bo_unpin(bo);
}
/**
@@ -327,9 +364,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
+ .pin = amdgpu_dma_buf_pin,
+ .unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
.unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
@@ -413,6 +451,73 @@ error:
}
/**
+ * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ *
+ * @attach: the DMA-buf attachment
+ *
+ * Invalidate the DMA-buf attachment, making sure that the we re-create the
+ * mapping before the next use.
+ */
+static void
+amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_placement placement = {};
+ struct amdgpu_vm_bo_base *bo_base;
+ int r;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ return;
+
+ r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
+ return;
+ }
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+
+ if (ticket) {
+ /* When we get an error here it means that somebody
+ * else is holding the VM lock and updating page tables
+ * So we can just continue here.
+ */
+ r = dma_resv_lock(resv, ticket);
+ if (r)
+ continue;
+
+ } else {
+ /* TODO: This is more problematic and we actually need
+ * to allow page tables updates without holding the
+ * lock.
+ */
+ if (!dma_resv_trylock(resv))
+ continue;
+ }
+
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (!r)
+ r = amdgpu_vm_handle_moved(adev, vm);
+
+ if (r && r != -EBUSY)
+ DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+ r);
+
+ dma_resv_unlock(resv);
+ }
+}
+
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .move_notify = amdgpu_dma_buf_move_notify
+};
+
+/**
* amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
* @dev: DRM device
* @dma_buf: Shared DMA buffer
@@ -444,7 +549,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
if (IS_ERR(obj))
return obj;
- attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
+ &amdgpu_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
drm_gem_object_put(obj);
return ERR_CAST(attach);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index a2e8c3dfb4f1..ba1bb95a3cf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -1171,3 +1171,20 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate)
+{
+ int ret = 0;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_set_df_cstate(smu, cstate);
+ else if (pp_funcs &&
+ pp_funcs->set_df_cstate)
+ ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 902ca6c00cca..936d85aa0fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -448,6 +448,8 @@ struct amdgpu_pm {
/* powerplay feature */
uint32_t pp_feature;
+ /* Used for I2C access to various EEPROMs on relevant ASICs */
+ struct i2c_adapter smu_i2c;
};
#define R600_SSTU_DFLT 0
@@ -533,4 +535,7 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 42f4febe24c6..a735d79a717b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -85,9 +85,10 @@
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
* - 3.36.0 - Allow reading more status registers on si/cik
+ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 36
+#define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -1021,6 +1022,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct drm_device *dev;
+ struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
int ret, retry = 0;
bool supports_atomic = false;
@@ -1090,6 +1092,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ amdgpu_driver_load_kms(dev, ent->driver_data);
+
retry_init:
ret = drm_dev_register(dev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) {
@@ -1100,6 +1104,11 @@ retry_init:
} else if (ret)
goto err_pci;
+ adev = dev->dev_private;
+ ret = amdgpu_debugfs_init(adev);
+ if (ret)
+ DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
+
return 0;
err_pci:
@@ -1119,9 +1128,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
#endif
DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
- drm_dev_put(dev);
+ amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ drm_dev_put(dev);
}
static void
@@ -1171,7 +1181,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_dev->dev_private;
int r;
+ adev->in_hibernate = true;
r = amdgpu_device_suspend(drm_dev, true);
+ adev->in_hibernate = false;
if (r)
return r;
return amdgpu_asic_reset(adev);
@@ -1220,11 +1232,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
}
+ adev->in_runpm = true;
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
ret = amdgpu_device_suspend(drm_dev, false);
+ if (ret)
+ return ret;
+
if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
@@ -1278,6 +1294,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ adev->in_runpm = false;
return 0;
}
@@ -1285,24 +1302,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_dev->dev_private;
- struct drm_crtc *crtc;
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ int ret = 1;
if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
+ if (amdgpu_device_has_dc_support(adev)) {
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(drm_dev);
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ if (crtc->state->active) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ drm_modeset_unlock_all(drm_dev);
+
+ } else {
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+ ret = -EBUSY;
+ break;
+ }
}
+
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+ mutex_unlock(&drm_dev->mode_config.mutex);
}
+ if (ret == -EBUSY)
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- return 1;
+ return ret;
}
long amdgpu_drm_ioctl(struct file *filp,
@@ -1377,32 +1425,15 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-static bool
-amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE,
- .load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
- .unload = amdgpu_driver_unload_kms,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = amdgpu_get_crtc_scanout_position,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 2672dc64a310..25ddb482466a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
u32 cpp;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ AMDGPU_GEM_CREATE_VRAM_CLEARED;
info = drm_get_format_info(adev->ddev, mode_cmd);
cpp = info->cpp[0];
@@ -336,15 +335,12 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
&amdgpu_fb_helper_funcs);
- ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
- AMDGPUFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
if (ret) {
kfree(rfbdev);
return ret;
}
- drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
-
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
drm_helper_disable_unused_functions(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3c01252b1e0e..7531527067df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -503,9 +503,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- if (amdgpu_debugfs_fence_init(adev))
- dev_err(adev->dev, "fence debugfs file creation failed\n");
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 0f960b498792..6b9c9193cdfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
return adev->gfx.mec.num_mec > 1;
}
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue)
+{
+ /* Policy: make queue 0 of each pipe as high priority compute queue */
+ return (queue == 0);
+
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
@@ -477,7 +485,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
@@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_gfx_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ca17ffb01301..5825692d07e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -41,6 +41,15 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+enum gfx_pipe_priority {
+ AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
+ AMDGPU_GFX_PIPE_PRIO_HIGH,
+ AMDGPU_GFX_PIPE_PRIO_MAX
+};
+
+#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
+#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+
struct amdgpu_mec {
struct amdgpu_bo *hpd_eop_obj;
u64 hpd_eop_gpu_addr;
@@ -151,6 +160,8 @@ struct amdgpu_gfx_config {
unsigned num_gpus;
unsigned multi_gpu_tile_size;
unsigned mc_arb_ramcfg;
+ unsigned num_banks;
+ unsigned num_ranks;
unsigned gb_addr_config;
unsigned num_rbs;
unsigned gs_vgt_table_depth;
@@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs {
u32 queue, u32 vmid);
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
+ void (*reset_ras_error_count) (struct amdgpu_device *adev);
};
struct sq_work {
@@ -278,8 +290,9 @@ struct amdgpu_gfx {
uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
- uint32_t num_compute_sched;
+ uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 60655834d649..ccbd7acfc4cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -48,7 +48,6 @@
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
/**
* amdgpu_ib_get - request an IB (Indirect Buffer)
@@ -295,9 +294,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
}
adev->ib_pool_ready = true;
- if (amdgpu_debugfs_sa_init(adev)) {
- dev_err(adev->dev, "failed to register debugfs file for SA\n");
- }
+
return 0;
}
@@ -421,7 +418,7 @@ static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
#endif
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d42be880a236..4981e443a884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
drm_sched_job_cleanup(s_job);
- amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
int r;
if (!f)
@@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 60591dbc2097..fd1dc3236eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
-
if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -170,10 +167,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+ (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */
+ adev->runpm = true;
+ else if (amdgpu_device_supports_baco(dev) &&
+ (amdgpu_runtime_pm != 0) &&
+ (adev->asic_type >= CHIP_TOPAZ) &&
+ (adev->asic_type != CHIP_VEGA10) &&
+ (adev->asic_type != CHIP_VEGA20) &&
+ (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */
adev->runpm = true;
else if (amdgpu_device_supports_baco(dev) &&
- (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+ (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 on CI */
adev->runpm = true;
/* Call ACPI methods: require modeset init
@@ -1110,14 +1114,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
/**
* amdgpu_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int vpos, hpos, stat;
u32 count;
@@ -1177,14 +1182,15 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to enable vblank interrupt for
+ * @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
@@ -1194,13 +1200,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to disable vblank interrupt for
+ * @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
index 676c48c02d77..ead3dc572ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
};
if (!adev->mmhub.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1cd78940cf82..e89fb35fec71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index eb9975f4decb..37ba07e2feb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -612,6 +612,11 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
/* fbdev layer */
int amdgpu_fbdev_init(struct amdgpu_device *adev);
void amdgpu_fbdev_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 7d5c3a9de9ea..6201a5f4b4fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "pcie_bif_err_count",
- .debugfs_name = "pcie_bif_err_inject",
};
if (!adev->nbio.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e3f16b49e970..c687f5415b3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,6 +31,7 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
@@ -925,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+ if (bo->tbo.base.import_attach)
+ dma_buf_pin(bo->tbo.base.import_attach);
+
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
@@ -1008,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
amdgpu_bo_subtract_pin_size(bo);
+ if (bo->tbo.base.import_attach)
+ dma_buf_unpin(bo->tbo.base.import_attach);
+
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
@@ -1274,6 +1281,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
+ if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ bo->mem.mem_type != TTM_PL_SYSTEM)
+ dma_buf_move_notify(abo->tbo.base.dma_buf);
+
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
@@ -1307,6 +1318,12 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(abo);
+ /* We only remove the fence if the resv has individualized. */
+ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
+ && bo->base.resv != &bo->base._resv);
+ if (bo->base.resv == &bo->base._resv)
+ amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1403,30 +1420,52 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
}
/**
- * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
*
- * @bo: buffer object
+ * @adev: amdgpu device pointer
+ * @resv: reservation object to sync to
+ * @sync_mode: synchronization mode
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
+ * Extract the fences from the reservation object and waits for them to finish.
+ *
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
+ amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
-
return r;
}
/**
+ * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
+ * @bo: buffer object to wait for
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Wrapper to wait for fences in a BO.
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER, owner, intr);
+}
+
+/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 36dec51d1ef1..5e39ecd8cc28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
@@ -316,6 +319,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
#endif
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
bool amdgpu_bo_support_uswc(u64 bo_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b03b1eb7ba04..abe94a55ecad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -41,8 +41,6 @@
#include "hwmgr.h"
#define WIDTH_4K 3840
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
@@ -91,9 +89,13 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
+
+ if (is_support_sw_smu(adev))
+ smu_set_ac_dc(&adev->smu);
}
}
@@ -3398,11 +3400,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file unique_id\n");
return ret;
}
- ret = amdgpu_debugfs_pm_init(adev);
- if (ret) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- return ret;
- }
if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU)) {
@@ -3669,7 +3666,7 @@ static const struct drm_info_list amdgpu_pm_info_list[] = {
};
#endif
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 3da1da277805..5db0ef86e84c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -43,4 +43,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 146f96661b6b..deaa26808841 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,6 +24,7 @@
*/
#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -38,6 +39,42 @@
static void psp_set_funcs(struct amdgpu_device *adev);
+static int psp_sysfs_init(struct amdgpu_device *adev);
+static void psp_sysfs_fini(struct amdgpu_device *adev);
+
+/*
+ * Due to DF Cstate management centralized to PMFW, the firmware
+ * loading sequence will be updated as below:
+ * - Load KDB
+ * - Load SYS_DRV
+ * - Load tOS
+ * - Load PMFW
+ * - Setup TMR
+ * - Load other non-psp fw
+ * - Load ASD
+ * - Load XGMI/RAS/HDCP/DTM TA if any
+ *
+ * This new sequence is required for
+ * - Arcturus
+ * - Navi12 and onwards
+ */
+static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ psp->pmfw_centralized_cstate_management = false;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ if ((adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type >= CHIP_NAVI12))
+ psp->pmfw_centralized_cstate_management = true;
+}
+
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -75,6 +112,8 @@ static int psp_early_init(void *handle)
psp->adev = adev;
+ psp_check_pmfw_centralized_cstate_management(psp);
+
return 0;
}
@@ -101,6 +140,13 @@ static int psp_sw_init(void *handle)
return ret;
}
+ if (adev->asic_type == CHIP_NAVI10) {
+ ret= psp_sysfs_init(adev);
+ if (ret) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -117,6 +163,10 @@ static int psp_sw_fini(void *handle)
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
}
+
+ if (adev->asic_type == CHIP_NAVI10)
+ psp_sysfs_fini(adev);
+
return 0;
}
@@ -150,6 +200,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
int ret;
int index;
int timeout = 2000;
+ bool ras_intr = false;
mutex_lock(&psp->mutex);
@@ -174,7 +225,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
* because gpu reset thread triggered and lock resource should
* be released for psp resume sequence.
*/
- if (amdgpu_ras_intr_triggered())
+ ras_intr = amdgpu_ras_intr_triggered();
+ if (ras_intr)
break;
msleep(1);
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
@@ -187,7 +239,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if (psp->cmd_buf_mem->resp.status || !timeout) {
+ if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
@@ -558,7 +610,7 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
}
-static int psp_xgmi_terminate(struct psp_context *psp)
+int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
@@ -579,7 +631,7 @@ static int psp_xgmi_terminate(struct psp_context *psp)
return 0;
}
-static int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -766,7 +818,7 @@ static int psp_ras_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_ras_ucode_size ||
!psp->adev->psp.ta_ras_start_addr) {
- dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
@@ -850,7 +902,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_hdcp_ucode_size ||
!psp->adev->psp.ta_hdcp_start_addr) {
- dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
@@ -996,7 +1048,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_dtm_ucode_size ||
!psp->adev->psp.ta_dtm_start_addr) {
- dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
@@ -1081,7 +1133,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
int ret;
- if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
+ if (!amdgpu_sriov_vf(adev)) {
if (psp->kdb_bin_size &&
(psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp);
@@ -1116,10 +1168,17 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
+ /*
+ * For those ASICs with DF Cstate management centralized
+ * to PMFW, TMR setup should be performed after PMFW
+ * loaded and before other non-psp firmware loaded.
+ */
+ if (!psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
}
return 0;
@@ -1316,9 +1375,10 @@ static int psp_np_fw_load(struct psp_context *psp)
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
- if (psp->autoload_supported) {
+ if (psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- if (!ucode->fw)
+ if (!ucode->fw || amdgpu_sriov_vf(adev))
goto out;
ret = psp_execute_np_fw_load(psp, ucode);
@@ -1326,6 +1386,14 @@ static int psp_np_fw_load(struct psp_context *psp)
return ret;
}
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
+ }
+
out:
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
@@ -1333,7 +1401,9 @@ out:
continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- (psp_smu_reload_quirk(psp) || psp->autoload_supported))
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
continue;
if (amdgpu_sriov_vf(adev) &&
@@ -1444,16 +1514,6 @@ skip_memalloc:
return ret;
}
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- ret = psp_xgmi_initialize(psp);
- /* Warning the XGMI seesion initialize failure
- * Instead of stop driver initialization
- */
- if (ret)
- dev_err(psp->adev->dev,
- "XGMI: Failed to initialize XGMI session\n");
- }
-
if (psp->adev->psp.ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
@@ -1518,10 +1578,6 @@ static int psp_hw_fini(void *handle)
void *tmr_buf;
void **pptr;
- if (adev->gmc.xgmi.num_physical_nodes > 1 &&
- psp->xgmi_context.initialized == 1)
- psp_xgmi_terminate(psp);
-
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
psp_dtm_terminate(psp);
@@ -1777,6 +1833,97 @@ static int psp_set_powergating_state(void *handle,
return 0;
}
+static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t fw_ver;
+ int ret;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
+ mutex_unlock(&adev->psp.mutex);
+
+ if (ret) {
+ DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+}
+
+static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int ret;
+ char fw_name[100];
+ const struct firmware *usbc_pd_fw;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
+ ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
+ if (ret)
+ goto fail;
+
+ /* We need contiguous physical mem to place the FW for psp to access */
+ cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
+
+ ret = dma_mapping_error(adev->dev, dma_addr);
+ if (ret)
+ goto rel_buf;
+
+ memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
+
+ /*
+ * x86 specific workaround.
+ * Without it the buffer is invisible in PSP.
+ *
+ * TODO Remove once PSP starts snooping CPU cache
+ */
+#ifdef CONFIG_X86
+ clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
+#endif
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
+ mutex_unlock(&adev->psp.mutex);
+
+rel_buf:
+ dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
+ release_firmware(usbc_pd_fw);
+
+fail:
+ if (ret) {
+ DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
+ psp_usbc_pd_fw_sysfs_read,
+ psp_usbc_pd_fw_sysfs_write);
+
+
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
@@ -1795,6 +1942,21 @@ const struct amd_ip_funcs psp_ip_funcs = {
.set_powergating_state = psp_set_powergating_state,
};
+static int psp_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
+
+ if (ret)
+ DRM_ERROR("Failed to create USBC PD FW control file!");
+
+ return ret;
+}
+
+static void psp_sysfs_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
+}
+
static const struct amdgpu_psp_funcs psp_funcs = {
.check_fw_loading_status = psp_check_fw_loading_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 611021514c52..297435c0c7c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -114,6 +114,8 @@ struct psp_funcs
int (*mem_training)(struct psp_context *psp, uint32_t ops);
uint32_t (*ring_get_wptr)(struct psp_context *psp);
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
+ int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
+ int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -264,6 +266,8 @@ struct psp_context
atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported;
+ /* flag to mark whether df cstate management centralized to PMFW */
+ bool pmfw_centralized_cstate_management;
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
@@ -349,6 +353,14 @@ struct amdgpu_psp_funcs {
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+#define psp_load_usbc_pd_fw(psp, dma_addr) \
+ ((psp)->funcs->load_usbc_pd_fw ? \
+ (psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
+
+#define psp_read_usbc_pd_fw(psp, fw_ver) \
+ ((psp)->funcs->read_usbc_pd_fw ? \
+ (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -362,6 +374,8 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size);
+int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index cef94e2169fe..ab379b44679c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -31,6 +31,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
@@ -280,6 +281,11 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
struct ras_debug_if data;
int ret = 0;
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ return size;
+ }
+
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
if (ret)
return -EINVAL;
@@ -393,6 +399,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
.head = obj->head,
};
+ if (amdgpu_ras_intr_triggered())
+ return snprintf(buf, PAGE_SIZE,
+ "Query currently inaccessible\n");
+
if (amdgpu_ras_error_query(obj->adev, &info))
return -EINVAL;
@@ -720,6 +730,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
if (adev->nbio.funcs->query_ras_error_count)
adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+ break;
default:
break;
}
@@ -742,20 +755,6 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
-uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
-{
- uint32_t df_inst_id;
-
- if ((!adev->df.funcs) ||
- (!adev->df.funcs->get_df_inst_id) ||
- (!adev->df.funcs->get_dram_base_addr))
- return addr;
-
- df_inst_id = adev->df.funcs->get_df_inst_id(adev);
-
- return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
-}
-
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -775,8 +774,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
/* Calculate XGMI relative offset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- block_info.address = get_xgmi_relative_phy_addr(adev,
- block_info.address);
+ block_info.address =
+ amdgpu_xgmi_get_relative_phy_addr(adev,
+ block_info.address);
}
switch (info->head.block) {
@@ -1122,6 +1122,32 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
&amdgpu_ras_debugfs_ops);
}
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_fs_if fs_info;
+
+ /*
+ * it won't be called in resume path, no need to check
+ * suspend and gpu reset status
+ */
+ if (!con)
+ return;
+
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ list_for_each_entry(obj, &con->head, node) {
+ if (amdgpu_ras_is_supported(adev, obj->head.block) &&
+ (obj->attr_inuse == 1)) {
+ sprintf(fs_info.debugfs_name, "%s_err_inject",
+ ras_block_str(obj->head.block));
+ fs_info.head = obj->head;
+ amdgpu_ras_debugfs_create(adev, &fs_info);
+ }
+ }
+}
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
@@ -1154,7 +1180,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
amdgpu_ras_sysfs_create_feature_node(adev);
- amdgpu_ras_debugfs_create_ctrl_node(adev);
return 0;
}
@@ -1319,6 +1344,33 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
}
/* ih end */
+/* traversal all IPs except NBIO to query error counter */
+static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ /*
+ * PCIE_BIF IP has one different isr by ras controller
+ * interrupt, the specific ras counter query will be
+ * done in that isr. So skip such block from common
+ * sync flood interrupt isr calling.
+ */
+ if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
+ continue;
+
+ amdgpu_ras_error_query(adev, &info);
+ }
+}
+
/* recovery begin */
/* return 0 on success.
@@ -1372,6 +1424,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ struct amdgpu_device *remote_adev = NULL;
+ struct amdgpu_device *adev = ras->adev;
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_log_on_err_counter(remote_adev);
+ }
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
@@ -1713,18 +1781,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*hw_supported = 0;
*supported = 0;
- if (amdgpu_sriov_vf(adev) ||
+ if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
(adev->asic_type != CHIP_VEGA20 &&
adev->asic_type != CHIP_ARCTURUS))
return;
- if (adev->is_atom_fw &&
- (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
- amdgpu_atomfirmware_sram_ecc_supported(adev)))
- *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ DRM_INFO("HBM ECC is active.\n");
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("HBM ECC is not presented.\n");
+
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ DRM_INFO("SRAM ECC is active.\n");
+ *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("SRAM ECC is not presented.\n");
+
+ /* hw_supported needs to be aligned with RAS block mask. */
+ *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
*supported = amdgpu_ras_enable == 0 ?
- 0 : *hw_supported & amdgpu_ras_mask;
+ 0 : *hw_supported & amdgpu_ras_mask;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -1825,8 +1905,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
goto interrupt;
}
- amdgpu_ras_debugfs_create(adev, fs_info);
-
r = amdgpu_ras_sysfs_create(adev, fs_info);
if (r)
goto sysfs;
@@ -1835,7 +1913,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
cleanup:
amdgpu_ras_sysfs_remove(adev, ras_block);
sysfs:
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
interrupt:
@@ -1852,7 +1929,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
return;
amdgpu_ras_sysfs_remove(adev, ras_block);
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
amdgpu_ras_feature_enable(adev, ras_block, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index a5fe29a9373e..55c3eceb390d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head);
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2a8e04895595..c0096097bbcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -25,10 +25,11 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include <linux/bits.h>
-#include "smu_v11_0_i2c.h"
+#include "atom.h"
-#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
-#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -55,6 +56,45 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
+static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+ if (!i2c_addr || !atom_ctx)
+ return false;
+
+ if (strnstr(atom_ctx->vbios_version,
+ "D342",
+ sizeof(atom_ctx->vbios_version)))
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342;
+ else
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
+
+ return true;
+}
+
+static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ if (!i2c_addr)
+ return false;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
+ break;
+
+ case CHIP_ARCTURUS:
+ return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
unsigned char *buff)
{
@@ -83,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
unsigned char *buff)
{
int ret = 0;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
struct i2c_msg msg = {
.addr = 0,
.flags = 0,
@@ -96,15 +137,13 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
msg.addr = control->i2c_address;
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1)
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
return ret;
}
-
-
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
{
int i;
@@ -212,32 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff,
};
- mutex_init(&control->tbl_mutex);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
- ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
- break;
-
- case CHIP_ARCTURUS:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
- ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
- break;
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.smu_i2c.algo)
+ return -ENOENT;
- default:
- return 0;
- }
+ if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
+ return -EINVAL;
- if (ret) {
- DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
- return ret;
- }
+ mutex_init(&control->tbl_mutex);
msg.addr = control->i2c_address;
-
/* Read/Create table header from EEPROM address 0 */
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1) {
DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
return ret;
@@ -263,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return ret == 1 ? 0 : -EIO;
}
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
- break;
- case CHIP_ARCTURUS:
- smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
- break;
-
- default:
- return;
- }
-}
-
static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *record,
unsigned char *buff)
@@ -436,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
control->next_addr += EEPROM_TABLE_RECORD_SIZE;
}
- ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
+ ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num);
if (ret < 1) {
DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ca78f812d436..7e8647a05df7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header {
struct amdgpu_ras_eeprom_control {
struct amdgpu_ras_eeprom_table_header tbl_hdr;
- struct i2c_adapter eeprom_accessor;
uint32_t next_addr;
unsigned int num_recs;
struct mutex tbl_mutex;
@@ -79,7 +78,6 @@ struct eeprom_table_record {
}__attribute__((__packed__));
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e5c83e164d82..a7e1d0425ed0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -48,9 +48,6 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -154,76 +151,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
- * amdgpu_ring_priority_put - restore a ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Release a request for executing at @priority
- */
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- int i;
-
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
- return;
-
- /* no need to restore if the job is already at the lowest priority */
- if (priority == DRM_SCHED_PRIORITY_NORMAL)
- return;
-
- mutex_lock(&ring->priority_mutex);
- /* something higher prio is executing, no need to decay */
- if (ring->priority > priority)
- goto out_unlock;
-
- /* decay priority to the next level with a job available */
- for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
- if (i == DRM_SCHED_PRIORITY_NORMAL
- || atomic_read(&ring->num_jobs[i])) {
- ring->priority = i;
- ring->funcs->set_priority(ring, i);
- break;
- }
- }
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
- * amdgpu_ring_priority_get - change the ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Request a ring's priority to be raised to @priority (refcounted).
- */
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
- return;
-
- mutex_lock(&ring->priority_mutex);
- if (priority <= ring->priority)
- goto out_unlock;
-
- ring->priority = priority;
- ring->funcs->set_priority(ring, priority);
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
@@ -334,10 +261,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
- if (amdgpu_debugfs_ring_init(adev, ring)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
- }
-
return 0;
}
@@ -351,12 +274,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->sched.ready = false;
/* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
+ ring->sched.ready = false;
+
amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
@@ -367,8 +291,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
&ring->gpu_addr,
(void **)&ring->ring);
- amdgpu_debugfs_ring_fini(ring);
-
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
@@ -485,8 +407,8 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev->ddev->primary;
@@ -507,13 +429,6 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
return 0;
}
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
-{
-#if defined(CONFIG_DEBUG_FS)
- debugfs_remove(ring->ent);
-#endif
-}
-
/**
* amdgpu_ring_test_helper - tests ring and set sched readiness status
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 930316e60155..9a443013d70d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -167,9 +167,6 @@ struct amdgpu_ring_funcs {
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
- /* priority functions */
- void (*set_priority) (struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
@@ -222,6 +219,7 @@ struct amdgpu_ring {
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
+ bool has_high_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
@@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -328,4 +322,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index d3d4707f2168..60bb3e8b3118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -126,6 +126,9 @@ struct amdgpu_rlc_funcs {
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
+ void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+ void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+ bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
struct amdgpu_rlc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index a2ee30b16212..250a309e4dee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
uint32_t index = 0;
int r;
- if (vmid == 0 || !amdgpu_mcbp)
+ /* don't enable OS preemption on SDMA under SRIOV */
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
return 0;
r = amdgpu_sdma_get_index_from_ring(ring, &index);
@@ -92,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
struct ras_fs_if fs_info = {
.sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
};
if (!ih_info)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 485335267d78..4b352206354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs {
void (*ras_fini)(struct amdgpu_device *adev);
int (*query_ras_error_count)(struct amdgpu_device *adev,
uint32_t instance, void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_sdma {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a09b6b9c27d1..b86392253696 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @explicit_sync: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
*
* Sync to the fence
*/
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner, bool explicit_sync)
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner)
{
struct dma_resv_list *flist;
struct dma_fence *f;
- void *fence_owner;
unsigned i;
int r = 0;
@@ -229,30 +228,46 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
for (i = 0; i < flist->shared_count; ++i) {
+ void *fence_owner;
+
f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv));
+
+ fence_owner = amdgpu_sync_get_owner(f);
+
+ /* Always sync to moves, no matter what */
+ if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
+ r = amdgpu_sync_fence(sync, f, false);
+ if (r)
+ break;
+ }
+
/* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise.
*/
- fence_owner = amdgpu_sync_get_owner(f);
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
- if (amdgpu_sync_same_dev(adev, f)) {
- /* VM updates only sync with moves but not with user
- * command submissions or KFD evictions fences
- */
- if (owner == AMDGPU_FENCE_OWNER_VM &&
- fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ /* Ignore fences depending on the sync mode */
+ switch (mode) {
+ case AMDGPU_SYNC_ALWAYS:
+ break;
+
+ case AMDGPU_SYNC_NE_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner == owner)
continue;
+ break;
- /* Ignore fence from the same owner and explicit one as
- * long as it isn't undefined.
- */
- if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
- (fence_owner == owner || explicit_sync))
+ case AMDGPU_SYNC_EQ_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner != owner)
continue;
+ break;
+
+ case AMDGPU_SYNC_EXPLICIT:
+ continue;
}
r = amdgpu_sync_fence(sync, f, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index d62c2b81d92b..cfbe5788b8b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -31,6 +31,13 @@ struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
+enum amdgpu_sync_mode {
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_SYNC_EQ_OWNER,
+ AMDGPU_SYNC_EXPLICIT
+};
+
/*
* Container for fences used to sync command submissions.
*/
@@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
bool explicit);
int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner,
- bool explicit_sync);
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c6e9885c071f..6309ff72bd78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -60,20 +60,14 @@
#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
+#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window,
struct amdgpu_ring *ring,
uint64_t *addr);
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
-static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
* memory request.
@@ -776,7 +770,6 @@ struct amdgpu_ttm_tt {
static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
(1 << 0), /* HMM_PFN_VALID */
(1 << 1), /* HMM_PFN_WRITE */
- 0 /* HMM_PFN_DEVICE_PRIVATE */
};
static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
@@ -857,7 +850,7 @@ retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
- r = hmm_range_fault(range, 0);
+ r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
if (unlikely(r <= 0)) {
/*
@@ -1034,7 +1027,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
- if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+ if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
@@ -1042,7 +1035,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
if (r)
goto gart_bind_fail;
- /* Patch mtype of the second part BO */
+ /* The memory type of the first page defaults to UC. Now
+ * modify the memory type to NC from the second page of
+ * the BO onward.
+ */
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
@@ -1596,7 +1592,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
- uint32_t bytes = 4 - (pos & 3);
+ uint64_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8;
uint32_t mask = 0xffffffff << shift;
@@ -1605,20 +1601,28 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
bytes = len;
}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
- if (!write || mask != 0xffffffff)
- value = RREG32_NO_KIQ(mmMM_DATA);
- if (write) {
- value &= ~mask;
- value |= (*(uint32_t *)buf << shift) & mask;
- WREG32_NO_KIQ(mmMM_DATA, value);
- }
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- if (!write) {
- value = (value & mask) >> shift;
- memcpy(buf, &value, bytes);
+ if (mask != 0xffffffff) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
+ if (!write || mask != 0xffffffff)
+ value = RREG32_NO_KIQ(mmMM_DATA);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ }
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if (!write) {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+ } else {
+ bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
+ bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
+ bytes, write);
}
ret += bytes;
@@ -1638,7 +1642,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
@@ -1836,9 +1839,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*The reserved vram for memory training must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
- r = amdgpu_ttm_training_reserve_vram_init(adev);
- if (r)
- return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+ }
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
@@ -1911,12 +1916,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- /* Register debugfs entries for amdgpu_ttm */
- r = amdgpu_ttm_debugfs_init(adev);
- if (r) {
- DRM_ERROR("Failed to init debugfs\n");
- return r;
- }
return 0;
}
@@ -1938,7 +1937,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
- amdgpu_ttm_debugfs_fini(adev);
amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
@@ -2113,8 +2111,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
}
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2198,7 +2196,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2279,7 +2278,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
- int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -2287,27 +2285,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO;
+ size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
while (size) {
- unsigned long flags;
- uint32_t value;
-
- if (*pos >= adev->gmc.mc_vram_size)
- return result;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
+ uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ amdgpu_device_vram_access(adev, *pos, value, bytes, false);
+ if (copy_to_user(buf, value, bytes))
+ return -EFAULT;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
+ result += bytes;
+ buf += bytes;
+ *pos += bytes;
+ size -= bytes;
}
return result;
@@ -2544,7 +2534,7 @@ static const struct {
#endif
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned count;
@@ -2579,13 +2569,3 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return 0;
#endif
}
-
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
- debugfs_remove(adev->mman.debugfs_entries[i]);
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0dddedc06ae3..bd05bbb4878d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -133,4 +133,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f4d40855147b..9dd51f0d2c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_umc_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a92f3b18e657..5fd32ad1c575 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free;
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f96464e2c157..a41272fbcba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
@@ -656,15 +654,10 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index daaf909d009a..f0128f745bd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -270,6 +270,9 @@ struct amdgpu_virt {
#define amdgpu_sriov_runtime(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
+#define amdgpu_sriov_fullaccess(adev) \
+(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d16231d6a790..6d9252a27916 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -120,23 +120,17 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
{
- unsigned shift = 0xff;
-
switch (level) {
case AMDGPU_VM_PDB2:
case AMDGPU_VM_PDB1:
case AMDGPU_VM_PDB0:
- shift = 9 * (AMDGPU_VM_PDB0 - level) +
+ return 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size;
- break;
case AMDGPU_VM_PTB:
- shift = 0;
- break;
+ return 0;
default:
- dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ return ~0;
}
-
- return shift;
}
/**
@@ -235,19 +229,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
}
-
-/**
- * amdgpu_vm_bo_relocated - vm_bo is reloacted
- *
- * @vm_bo: vm_bo which is relocated
- *
- * State for PDs/PTs which needs to update their parent PD.
- */
-static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
-{
- list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
-}
-
/**
* amdgpu_vm_bo_moved - vm_bo is moved
*
@@ -291,6 +272,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_relocated - vm_bo is reloacted
+ *
+ * @vm_bo: vm_bo which is relocated
+ *
+ * State for PDs/PTs which needs to update their parent PD.
+ * For the root PD, just move to idle state.
+ */
+static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
+{
+ if (vm_bo->bo->parent)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+ else
+ amdgpu_vm_bo_idle(vm_bo);
+}
+
+/**
* amdgpu_vm_bo_done - vm_bo is done
*
* @vm_bo: vm_bo which is now done
@@ -588,8 +585,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- /* One for TTM and one for the CS job */
- entry->tv.num_shared = 2;
+ /* Two for VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 4;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -697,10 +694,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_moved(bo_base);
} else {
vm->update_funcs->map_table(bo);
- if (bo->parent)
- amdgpu_vm_bo_relocated(bo_base);
- else
- amdgpu_vm_bo_idle(bo_base);
+ amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -803,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1086,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct dma_fence *fence = NULL;
bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
+ bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
int r;
+ if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
@@ -1299,7 +1297,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1448,21 +1446,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- /* make sure that the page tables covering the address range are
- * actually allocated
- */
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
- params->direct);
- if (r)
- return r;
-
- pt = cursor.entry->base.bo;
-
- /* The root level can't be a huge page */
- if (cursor.level == adev->vm_manager.root_level) {
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ /* make sure that the page tables covering the
+ * address range are actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm,
+ &cursor, params->direct);
+ if (r)
+ return r;
}
shift = amdgpu_vm_level_shift(adev, cursor.level);
@@ -1480,25 +1471,38 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* smaller than the address shift. Go to the next
* child entry and try again.
*/
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
- } else if (frag >= parent_shift &&
- cursor.level - 1 != adev->vm_manager.root_level) {
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (frag >= parent_shift) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again
- * unless one level up is the root level.
+ * shift we should go up one level and check it again.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
- return -ENOENT;
+ return -EINVAL;
continue;
}
+ pt = cursor.entry->base.bo;
+ if (!pt) {
+ /* We need all PDs and PTs for mapping something, */
+ if (flags & AMDGPU_PTE_VALID)
+ return -ENOENT;
+
+ /* but unmapping something can happen at a higher
+ * level.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+
+ pt = cursor.entry->base.bo;
+ shift = parent_shift;
+ }
+
/* Looks good so far, calculate parameters for the update */
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (uint64_t)(mask + 1) << shift;
+ entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1506,6 +1510,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift;
+ /* This can happen when we set higher level PDs to
+ * silent to stop fault floods.
+ */
+ nptes = max(nptes, 1u);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
flags | AMDGPU_PTE_FRAG(frag));
@@ -1550,7 +1558,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* @adev: amdgpu_device pointer
* @vm: requested vm
* @direct: direct submission in a page fault
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
@@ -1565,14 +1573,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
- void *owner = AMDGPU_FENCE_OWNER_VM;
+ enum amdgpu_sync_mode sync_mode;
int r;
memset(&params, 0, sizeof(params));
@@ -1581,9 +1589,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.direct = direct;
params.pages_addr = pages_addr;
- /* sync to everything except eviction fences on unmapping */
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping. Sync to moving fences before mapping.
+ */
if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_KFD;
+ sync_mode = AMDGPU_SYNC_EQ_OWNER;
+ else
+ sync_mode = AMDGPU_SYNC_EXPLICIT;
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
@@ -1591,7 +1603,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock;
}
- r = vm->update_funcs->prepare(&params, owner, exclusive);
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ struct amdgpu_bo *root = vm->root.base.bo;
+
+ if (!dma_fence_is_signaled(vm->last_direct))
+ amdgpu_bo_fence(root, vm->last_direct, true);
+ }
+
+ r = vm->update_funcs->prepare(&params, resv, sync_mode);
if (r)
goto error_unlock;
@@ -1610,7 +1629,7 @@ error_unlock:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
@@ -1626,7 +1645,7 @@ error_unlock:
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
@@ -1696,13 +1715,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
- } else if (flags & AMDGPU_PTE_VALID) {
+ } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
start, last, flags, addr,
dma_addr, fence);
if (r)
@@ -1741,7 +1760,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
- struct dma_fence *exclusive, **last_update;
+ struct dma_fence **last_update;
+ struct dma_resv *resv;
uint64_t flags;
struct amdgpu_device *bo_adev = adev;
int r;
@@ -1749,7 +1769,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (clear || !bo) {
mem = NULL;
nodes = NULL;
- exclusive = NULL;
+ resv = vm->root.base.bo->tbo.base.resv;
} else {
struct ttm_dma_tt *ttm;
@@ -1759,7 +1779,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = bo->tbo.moving;
+ resv = bo->tbo.base.resv;
}
if (bo) {
@@ -1769,7 +1789,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
flags = 0x0;
}
- if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
+ if (clear || (bo && bo->tbo.base.resv ==
+ vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1783,7 +1804,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
mapping, flags, bo_adev, nodes,
last_update);
if (r)
@@ -1978,6 +1999,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
@@ -1992,7 +2014,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
mapping->start, mapping->last,
init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2563,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
- !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+ if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -2741,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;
- timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
- if (timeout <= 0)
- return timeout;
-
- return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
+ return dma_fence_wait_timeout(vm->last_direct, true, timeout);
}
/**
@@ -2818,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub();
- vm->last_delayed = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2873,7 +2889,6 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_direct);
- dma_fence_put(vm->last_delayed);
drm_sched_entity_destroy(&vm->delayed);
error_free_direct:
@@ -3076,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_wait(vm->last_direct, false);
dma_fence_put(vm->last_direct);
- dma_fence_wait(vm->last_delayed, false);
- dma_fence_put(vm->last_delayed);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3188,6 +3201,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ long timeout = msecs_to_jiffies(2000);
int r;
switch (args->in.op) {
@@ -3199,6 +3213,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
+ if (amdgpu_sriov_runtime(adev))
+ timeout = 8 * timeout;
+
+ /* Wait vm idle to make sure the vmid set in SPM_VMID is
+ * not referenced anymore.
+ */
+ r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
+ if (r)
+ return r;
+
+ r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
+ if (r < 0)
+ return r;
+
+ amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index b4640ab38c95..06fe30e1492d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo);
- int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
- struct dma_fence *exclusive);
+ int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -276,7 +276,6 @@ struct amdgpu_vm {
/* Last submission to the scheduler entities */
struct dma_fence *last_direct;
- struct dma_fence *last_delayed;
unsigned int pasid;
/* dedicated to vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 73fec7a0ced5..e38516304070 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
* Returns:
* Negativ errno, 0 for success.
*/
-static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
- struct dma_fence *exclusive)
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- int r;
-
- /* Wait for any BO move to be completed */
- if (exclusive) {
- r = dma_fence_wait(exclusive, true);
- if (unlikely(r))
- return r;
- }
-
- /* Don't wait for submissions during page fault */
- if (p->direct)
+ if (!resv)
return 0;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
}
/**
@@ -86,6 +74,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{
unsigned int i;
uint64_t value;
+ int r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r)
+ return r;
+ }
pe += (unsigned long)amdgpu_bo_kptr(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 19b7f80758f1..cf96c335b258 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- void *owner, struct dma_fence *exclusive)
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- struct amdgpu_bo *root = p->vm->root.base.bo;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
@@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p->num_dw_left = ndw;
- /* Wait for moves to be completed */
- r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
- if (r)
- return r;
-
- /* Don't wait for any submissions during page fault handling */
- if (p->direct)
+ if (!resv)
return 0;
- return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
+ return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
}
/**
@@ -111,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- tmp = dma_fence_get(f);
- if (p->direct)
+ if (p->direct) {
+ tmp = dma_fence_get(f);
swap(p->vm->last_direct, tmp);
- else
- swap(p->vm->last_delayed, tmp);
- dma_fence_put(tmp);
+ dma_fence_put(tmp);
+ } else {
+ dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+ }
if (fence && !p->direct)
swap(*fence, f);
@@ -147,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -174,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
@@ -208,6 +202,11 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t *pte;
int r;
+ /* Wait for PD/PT moves to be completed */
+ r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
+ if (r)
+ return r;
+
do {
ndw = p->num_dw_left;
ndw -= p->job->ibs->length_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a97af422575a..95b3327168ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -26,7 +26,12 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
+#include "soc15.h"
#include "df/df_3_6_offset.h"
+#include "xgmi/xgmi_4_0_0_smn.h"
+#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "wafl/wafl2_4_0_0_smn.h"
+#include "wafl/wafl2_4_0_0_sh_mask.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex);
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
+static const int xgmi_pcs_err_status_reg_vg20[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int wafl_pcs_err_status_reg_vg20[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int xgmi_pcs_err_status_reg_arct[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
+};
+
+/* same as vg20*/
+static const int wafl_pcs_err_status_reg_arct[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
+ {"XGMI PCS DataLossErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI PCS TrainingErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI PCS CRCErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI PCS BERExceededErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI PCS TxMetaDataErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"XGMI PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI PCS DataParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI PCS DeskewErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
+static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
+ {"WAFL PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
+ {"WAFL PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
+ {"WAFL PCS CRCErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
+ {"WAFL PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
+ {"WAFL PCS TxMetaDataErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"WAFL PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"WAFL PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
+ {"WAFL PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"WAFL PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"WAFL PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"WAFL PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
+ {"WAFL PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"WAFL PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"WAFL PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"WAFL PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"WAFL PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"WAFL PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"WAFL PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
return &hive->device_list;
@@ -365,6 +473,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
return 0;
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ ret = psp_xgmi_initialize(&adev->psp);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to initialize xgmi session\n");
+ return ret;
+ }
+
ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
if (ret) {
dev_err(adev->dev,
@@ -451,16 +566,16 @@ exit:
return ret;
}
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
struct amdgpu_hive_info *hive;
if (!adev->gmc.xgmi.supported)
- return;
+ return -EINVAL;
hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive)
- return;
+ return -EINVAL;
if (!(hive->number_devices--)) {
amdgpu_xgmi_sysfs_destroy(adev, hive);
@@ -471,6 +586,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
+
+ return psp_xgmi_terminate(&adev->psp);
}
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
@@ -481,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "xgmi_wafl_err_count",
- .debugfs_name = "xgmi_wafl_err_inject",
};
if (!adev->gmc.xgmi.supported ||
@@ -521,3 +637,129 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
kfree(ras_if);
}
}
+
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ uint32_t df_inst_id;
+ uint64_t dram_base_addr = 0;
+ const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
+
+ if ((!df_funcs) ||
+ (!df_funcs->get_df_inst_id) ||
+ (!df_funcs->get_dram_base_addr)) {
+ dev_warn(adev->dev,
+ "XGMI: relative phy_addr algorithm is not supported\n");
+ return addr;
+ }
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
+ dev_warn(adev->dev,
+ "failed to disable DF-Cstate, DF register may not be accessible\n");
+ return addr;
+ }
+
+ df_inst_id = df_funcs->get_df_inst_id(adev);
+ dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+
+ return addr + dram_base_addr;
+}
+
+static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
+ uint32_t value,
+ uint32_t *ue_count,
+ uint32_t *ce_count,
+ bool is_xgmi_pcs)
+{
+ int i;
+ int ue_cnt;
+
+ if (is_xgmi_pcs) {
+ /* query xgmi pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
+ ue_cnt = (value &
+ xgmi_pcs_ras_fields[i].pcs_err_mask) >>
+ xgmi_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ xgmi_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ } else {
+ /* query wafl pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
+ ue_cnt = (value &
+ wafl_pcs_ras_fields[i].pcs_err_mask) >>
+ wafl_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ wafl_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ int i;
+ uint32_t data;
+ uint32_t ue_cnt = 0, ce_cnt = 0;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ case CHIP_VEGA20:
+ default:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ }
+
+ err_data->ue_count += ue_cnt;
+ err_data->ce_count += ce_cnt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 74011fbc2251..4a92067fe595 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -37,15 +37,25 @@ struct amdgpu_hive_info {
struct task_barrier tb;
};
+struct amdgpu_pcs_ras_field {
+ const char *err_name;
+ uint32_t pcs_err_mask;
+ uint32_t pcs_err_shift;
+};
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index dd30f4e61a8c..cae426c7c086 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 5000)) {
- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > 10000)) {
+ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index ea702a64f807..9b74cfdba7b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -186,16 +186,10 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
{
- int ret;
-
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
- amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
- ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
- if (!ret)
- amdgpu_connector->ddc_bus->has_aux = true;
-
- WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+ drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
+ amdgpu_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 006f21ef7ddf..62635e58e45e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = cik_asic_pci_config_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 40d2ac723dd6..2512e7ebfedf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2494,6 +2494,10 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2685,6 +2689,7 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.prepare = dce_v10_0_crtc_prepare,
.commit = dce_v10_0_crtc_commit,
.disable = dce_v10_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 898ef72d423c..0dde22db9848 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2573,6 +2573,10 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2793,6 +2797,7 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.prepare = dce_v11_0_crtc_prepare,
.commit = dce_v11_0_crtc_commit,
.disable = dce_v11_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index db15a112becc..84219534bd38 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2388,6 +2388,10 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2575,6 +2579,7 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
.disable = dce_v6_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f06c9022c1fd..3a640702d7d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2395,6 +2395,10 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2593,6 +2597,7 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.prepare = dce_v8_0_crtc_prepare,
.commit = dce_v8_0_crtc_commit,
.disable = dce_v8_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4f94863332c..13e12be667fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -123,6 +123,10 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_virtual_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -218,6 +222,7 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
.prepare = dce_virtual_crtc_prepare,
.commit = dce_virtual_crtc_commit,
.disable = dce_virtual_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
@@ -609,7 +614,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_register(connector);
/* link them */
drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 02702597ddeb..0e0daf0021b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -35,6 +35,8 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
+#include "smuio/smuio_11_0_0_sh_mask.h"
#include "navi10_enum.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
@@ -222,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
};
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+}
+
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
{
/* Pending on emulation bring up */
@@ -500,29 +545,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
long r;
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
return r;
- }
-
- WREG32(scratch, 0xCAFEDEAD);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ if (r)
goto err1;
- }
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
@@ -530,15 +574,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
- tmp = RREG32(scratch);
+ tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF)
r = 0;
else
@@ -547,8 +589,7 @@ err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
-
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1016,6 +1057,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1068,7 +1113,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
+ memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
@@ -1783,11 +1828,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
return 0;
}
@@ -1895,6 +1940,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
} else {
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v10_0_init_csb(adev);
+ return 0;
+ }
+
adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
@@ -2395,7 +2445,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].sched.ready = false;
}
- WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
@@ -3168,12 +3218,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
- DRM_ERROR("kfq enable failed\n");
- kiq_ring->sched.ready = false;
- }
- return r;
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3216,6 +3261,22 @@ done:
return r;
}
+static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3341,6 +3402,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a compute queue/ring */
+ gfx_v10_0_compute_mqd_set_priority(ring, mqd);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3790,7 +3854,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3930,9 +3994,8 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@@ -4041,6 +4104,12 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 0 - Disable some blocks' MGCG */
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000);
+
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4080,19 +4149,20 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* 2 - disable MGLS in RLC */
+ /* 2 - disable MGLS in CP */
+ data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+ }
+
+ /* 3 - disable MGLS in RLC */
data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
- /* 3 - disable MGLS in CP */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
- if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
- data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
- }
}
}
@@ -4220,6 +4290,45 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
+}
+
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
.set_safe_mode = gfx_v10_0_set_safe_mode,
@@ -4230,7 +4339,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.resume = gfx_v10_0_rlc_resume,
.stop = gfx_v10_0_rlc_stop,
.reset = gfx_v10_0_rlc_reset,
- .start = gfx_v10_0_rlc_start
+ .start = gfx_v10_0_rlc_start,
+ .update_spm_vmid = gfx_v10_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v10_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
static int gfx_v10_0_set_powergating_state(void *handle,
@@ -4241,11 +4353,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
- if (!enable) {
- amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- } else
- amdgpu_gfx_off_ctrl(adev, true);
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -4419,15 +4527,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v10_0_ring_emit_de_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
amdgpu_ring_write(ring, header);
@@ -4574,9 +4682,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
{
uint32_t dw2 = 0;
- if (amdgpu_mcbp)
+ if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
gfx_v10_0_ring_emit_ce_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
gfx_v10_0_ring_emit_tmz(ring, true);
@@ -4806,6 +4914,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
+static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
+ unsigned vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t value = 0;
+
+ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+}
+
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -5197,6 +5318,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v10_0_ring_soft_recovery,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8f20a5dd44fe..733d398c61cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig, tmp, tmp2;
@@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.resume = gfx_v7_0_rlc_resume,
.stop = gfx_v7_0_rlc_stop,
.reset = gfx_v7_0_rlc_reset,
- .start = gfx_v7_0_rlc_start
+ .start = gfx_v7_0_rlc_start,
+ .update_spm_vmid = gfx_v7_0_update_spm_vmid
};
static int gfx_v7_0_early_init(void *handle)
@@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fa245973de12..fc32586ef80b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
@@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
return r;
}
+static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
/* defaults */
mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
- mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
- mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
@@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
+ /* set static priority for a queue/ring */
+ gfx_v8_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
.set_safe_mode = gfx_v8_0_set_safe_mode,
@@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.resume = gfx_v8_0_rlc_resume,
.stop = gfx_v8_0_rlc_stop,
.reset = gfx_v8_0_rlc_reset,
- .start = gfx_v8_0_rlc_start
+ .start = gfx_v8_0_rlc_start,
+ .update_spm_vmid = gfx_v8_0_update_spm_vmid
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v8_0_ring_emit_de_meta(ring);
}
@@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
-static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v8_0_hqd_set_priority(adev, ring, acquire);
- gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v8_0_ring_set_priority_compute,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 889154a78c4a..d2d9dce68c2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
+static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
+ {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
+ {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
+};
+
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
{
mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
@@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
+void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ if (offset == grbm_cntl || offset == grbm_idx) {
+ if (offset == grbm_cntl)
+ writel(v, scratch_reg2);
+ else if (offset == grbm_idx)
+ writel(v, scratch_reg3);
+
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+ }
+
+}
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -738,9 +796,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
@@ -1106,10 +1164,11 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
- if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ if ((adev->asic_type != CHIP_ARCTURUS) &&
+ ((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
- (adev->gfx.pfp_feature_version < 46))
+ (adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) {
@@ -1158,6 +1217,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.mec_fw_write_wait = true;
break;
default:
+ adev->gfx.me_fw_write_wait = true;
+ adev->gfx.mec_fw_write_wait = true;
break;
}
}
@@ -1173,6 +1234,10 @@ struct amdgpu_gfxoff_quirk {
static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
+ { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
+ /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
{ 0, 0, 0, 0, 0 },
};
@@ -1846,6 +1911,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
break;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1883,7 +1952,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
+ memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
@@ -1916,7 +1985,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1928,7 +1997,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1992,7 +2061,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_0_ras_error_inject,
- .query_ras_error_count = &gfx_v9_0_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
@@ -2003,7 +2073,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_4_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -3309,6 +3380,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
+static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3445,6 +3532,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a queue/ring */
+ gfx_v9_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3963,6 +4054,63 @@ static int gfx_v9_0_soft_reset(void *handle)
return 0;
}
+static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 9 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 16) | /* count sel */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
+ (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+
+failed_kiq_read:
+ pr_err("failed to read gpu clock\n");
+ return ~0;
+}
+
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -3970,16 +4118,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
- uint32_t tmp, lsb, msb, i = 0;
- do {
- if (i != 0)
- udelay(1);
- tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
- msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- i++;
- } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
- clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
@@ -4053,6 +4192,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+ 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
+ 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
+ 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
+ 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
+ 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
+ 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
+ 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
+ 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
+ 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
+ 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
+ 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
+ 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
+ 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
+ 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
+ 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
+ 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
+ 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
+ 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
+ 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
+ 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
+ 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
+ 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
+ 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
+ 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
+ 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
+ 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
+ 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
+ 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
+ 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
+ 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
+ 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
+ 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
+ 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
+ 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
+ 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
+ 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
+ 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
+ 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
+ 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
+ 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
+ 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
+ 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
+ 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
+ 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
+ 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
+ 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
+ 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
+ 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
+ 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
+ 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
+ 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
+ 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
+ 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
+ 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
+ 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
+ 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
+ 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
+ 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
+ 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
+ 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
+ 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
+ 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
+ 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
+ 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
+ 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
+ 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
+ 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
+ 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
+ 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
+ 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
+ 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
+ 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
+ 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
+ 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
+ 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
+ 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
+ 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
+ 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
+ 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
+ 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
+ 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
+ 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
+ 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
+ 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
+ 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
+ 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
+ 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
+ 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
+ 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
+ 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
+ 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
+ 0xbf84fff8, 0xbf810000,
+};
+
/* When below register arrays changed, please update gpr_reg_size,
and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
to cover all gfx9 ASICs */
@@ -4073,6 +4307,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
+static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
static const struct soc15_reg_entry sgpr1_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
@@ -4141,7 +4392,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
- { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
};
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -4204,7 +4454,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se;
int sgpr_work_group_size = 5;
- int gpr_reg_size = compute_dim_x / 16 + 6;
+ int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
+ int vgpr_init_shader_size;
+ const u32 *vgpr_init_shader_ptr;
+ const struct soc15_reg_entry *vgpr_init_regs_ptr;
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
@@ -4214,6 +4467,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
+ vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
+ } else {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
+ vgpr_init_regs_ptr = vgpr_init_regs;
+ }
+
total_size =
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
@@ -4222,7 +4485,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
- total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
+ total_size += ALIGN(vgpr_init_shader_size, 256);
sgpr_offset = total_size;
total_size += sizeof(sgpr_init_compute_shader);
@@ -4235,8 +4498,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* load the compute shaders */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
- ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+ for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
+ ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
@@ -4248,9 +4511,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write the register state for the compute dispatch */
for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
@@ -4262,7 +4525,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4342,18 +4605,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- switch (adev->asic_type)
- {
- case CHIP_VEGA20:
- gfx_v9_0_clear_ras_edc_counter(adev);
- break;
- case CHIP_ARCTURUS:
- gfx_v9_4_clear_ras_edc_counter(adev);
- break;
- default:
- break;
- }
-
fail:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -4401,6 +4652,10 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
+ if (adev->gfx.funcs &&
+ adev->gfx.funcs->reset_ras_error_count)
+ adev->gfx.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gfx_ras_late_init(adev);
if (r)
return r;
@@ -4705,6 +4960,47 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v9_0_check_rlcg_range(adev, offset,
+ (void *)rlcg_access_gc_9_0,
+ ARRAY_SIZE(rlcg_access_gc_9_0));
+}
+
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
.set_safe_mode = gfx_v9_0_set_safe_mode,
@@ -4716,7 +5012,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset,
- .start = gfx_v9_0_rlc_start
+ .start = gfx_v9_0_rlc_start,
+ .update_spm_vmid = gfx_v9_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v9_0_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4728,10 +5027,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
switch (adev->asic_type) {
case CHIP_RAVEN:
case CHIP_RENOIR:
- if (!enable) {
+ if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- }
+
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
@@ -4755,12 +5053,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
amdgpu_gfx_off_ctrl(adev, true);
break;
case CHIP_VEGA12:
- if (!enable) {
- amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- } else {
- amdgpu_gfx_off_ctrl(adev, true);
- }
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -4919,7 +5212,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v9_0_ring_emit_de_meta(ring);
}
@@ -5044,105 +5337,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr;
}
-static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
-static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v9_0_hqd_set_priority(adev, ring, acquire);
- gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -6322,10 +6516,13 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
return 0;
}
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
/* read back registers to clear the counters */
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
@@ -6513,7 +6710,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v9_0_ring_set_priority_compute,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index f099f13d7f1e..dce945ef21a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -710,14 +710,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -893,10 +895,13 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 2e3f6f755ad4..1ffecc5c0f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
+
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index b70c7b483c24..cc866c367939 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Disable AGP. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
-
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
-
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Disable AGP. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ + adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+ }
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
@@ -135,6 +142,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These regs are not accessible for VF, PF will program these in SRIOV */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -256,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
gfxhub_v2_0_init_gart_aperture_regs(adev);
gfxhub_v2_0_init_system_aperture_regs(adev);
@@ -298,9 +297,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
- /* Setup L2 cache */
- WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index cc0c273a86f9..8606f877478f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -476,13 +476,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
- u32 j, inv_req, tmp;
+ u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= adev->num_vmhubs);
hub = &adev->vmhub[vmhub];
- inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
+ inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ } else {
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ inv_req2 = 0;
+ }
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
@@ -521,21 +534,27 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
+ do {
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
- /*
- * Issue a dummy read to wait for the ACK register to be cleared
- * to avoid a false ACK due to the new fast GRBM interface.
- */
- if (vmhub == AMDGPU_GFXHUB_0)
- RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
+ /*
+ * Issue a dummy read to wait for the ACK register to
+ * be cleared to avoid a false ACK due to the new fast
+ * GRBM interface.
+ */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- if (tmp & (1 << vmid))
- break;
- udelay(1);
- }
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
+ if (tmp & (1 << vmid))
+ break;
+ udelay(1);
+ }
+
+ inv_req = inv_req2;
+ inv_req2 = 0;
+ } while (inv_req);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore)
@@ -577,9 +596,26 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
return -EIO;
if (ring->sched.ready) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20);
+ /* 2 dwords flush + 8 dwords fence */
+ unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
+
+ if (vega20_xgmi_wa)
+ ndw += kiq->pmf->invalidate_tlbs_size;
+
spin_lock(&adev->gfx.kiq.ring_lock);
/* 2 dwords flush + 8 dwords fence */
- amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ amdgpu_ring_alloc(ring, ndw);
+ if (vega20_xgmi_wa)
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
@@ -886,32 +922,25 @@ static int gmc_v9_0_late_init(void *handle)
if (r)
return r;
/* Check if ecc is available */
- if (!amdgpu_sriov_vf(adev)) {
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- r = amdgpu_atomfirmware_mem_ecc_supported(adev);
- if (!r) {
- DRM_INFO("ECC is not present.\n");
- if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
- adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else {
- DRM_INFO("ECC is active.\n");
- }
-
- r = amdgpu_atomfirmware_sram_ecc_supported(adev);
- if (!r) {
- DRM_INFO("SRAM ECC is not present.\n");
- } else {
- DRM_INFO("SRAM ECC is active.\n");
- }
- break;
- default:
- break;
- }
+ if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+ r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("ECC is not present.\n");
+ if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else
+ DRM_INFO("ECC is active.\n");
+
+ r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+ if (!r)
+ DRM_INFO("SRAM ECC is not present.\n");
+ else
+ DRM_INFO("SRAM ECC is active.\n");
}
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 49a3a56ec017..396c2a624de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index bde189680521..fb3f228458e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+ }
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
@@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
mmhub_v2_0_init_gart_aperture_regs(adev);
mmhub_v2_0_init_system_aperture_regs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index a5281df8d84f..c0e3efcb09bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1539,8 +1539,11 @@ static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
};
-static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
- uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t value,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
@@ -1553,7 +1556,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].sec_count_mask) >>
mmhub_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
mmhub_v9_4_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
@@ -1563,7 +1566,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].ded_count_mask) >>
mmhub_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
mmhub_v9_4_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
@@ -1588,7 +1591,7 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
if (reg_value)
- mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+ mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
@@ -1596,7 +1599,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
new file mode 100644
index 000000000000..1b5086c7d4e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V2_0_H__
+#define __MMSCH_V2_0_H__
+
+// addressBlock: uvd0_mmsch_dec
+// base address: 0x1e000
+#define mmMMSCH_UCODE_ADDR 0x0000
+#define mmMMSCH_UCODE_ADDR_BASE_IDX 0
+#define mmMMSCH_UCODE_DATA 0x0001
+#define mmMMSCH_UCODE_DATA_BASE_IDX 0
+#define mmMMSCH_SRAM_ADDR 0x0002
+#define mmMMSCH_SRAM_ADDR_BASE_IDX 0
+#define mmMMSCH_SRAM_DATA 0x0003
+#define mmMMSCH_SRAM_DATA_BASE_IDX 0
+#define mmMMSCH_VF_SRAM_OFFSET 0x0004
+#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_DB_SRAM_OFFSET 0x0005
+#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTX_SRAM_OFFSET 0x0006
+#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTL 0x0007
+#define mmMMSCH_CTL_BASE_IDX 0
+#define mmMMSCH_INTR 0x0008
+#define mmMMSCH_INTR_BASE_IDX 0
+#define mmMMSCH_INTR_ACK 0x0009
+#define mmMMSCH_INTR_ACK_BASE_IDX 0
+#define mmMMSCH_INTR_STATUS 0x000a
+#define mmMMSCH_INTR_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f
+#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010
+#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_SIZE 0x0011
+#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0 0x0014
+#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015
+#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1 0x0016
+#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017
+#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0
+#define mmMMSCH_CNTL 0x001c
+#define mmMMSCH_CNTL_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET0 0x001d
+#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE0 0x001e
+#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET1 0x001f
+#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE1 0x0020
+#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0
+#define mmMMSCH_PDEBUG_STATUS 0x0021
+#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EPC 0x0024
+#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025
+#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0
+#define mmMMSCH_PROC_STATE1 0x0026
+#define mmMMSCH_PROC_STATE1_BASE_IDX 0
+#define mmMMSCH_LAST_MC_ADDR 0x0027
+#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028
+#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029
+#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0
+#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a
+#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmMMSCH_SCRATCH_0 0x002b
+#define mmMMSCH_SCRATCH_0_BASE_IDX 0
+#define mmMMSCH_SCRATCH_1 0x002c
+#define mmMMSCH_SCRATCH_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f
+#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_0 0x0033
+#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_0 0x0034
+#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_0 0x0035
+#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038
+#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_1 0x003c
+#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_1 0x003d
+#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_1 0x003e
+#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT 0x003f
+#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0
+#define mmMMSCH_SCRATCH_2 0x0040
+#define mmMMSCH_SCRATCH_2_BASE_IDX 0
+#define mmMMSCH_SCRATCH_3 0x0041
+#define mmMMSCH_SCRATCH_3_BASE_IDX 0
+#define mmMMSCH_SCRATCH_4 0x0042
+#define mmMMSCH_SCRATCH_4_BASE_IDX 0
+#define mmMMSCH_SCRATCH_5 0x0043
+#define mmMMSCH_SCRATCH_5_BASE_IDX 0
+#define mmMMSCH_SCRATCH_6 0x0044
+#define mmMMSCH_SCRATCH_6_BASE_IDX 0
+#define mmMMSCH_SCRATCH_7 0x0045
+#define mmMMSCH_SCRATCH_7_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046
+#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047
+#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048
+#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049
+#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0
+#define mmMMSCH_NACK_STATUS 0x004a
+#define mmMMSCH_NACK_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX0_DATA 0x004b
+#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX1_DATA 0x004c
+#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053
+#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056
+#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_2 0x005a
+#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_2 0x005b
+#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_2 0x005c
+#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060
+#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061
+#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_0 0x0062
+#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_1 0x0063
+#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_2 0x0064
+#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0
+
+#define MMSCH_VERSION_MAJOR 2
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+enum mmsch_v2_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v2_0_init_header {
+ uint32_t version;
+ uint32_t header_size;
+ uint32_t vcn_init_status;
+ uint32_t vcn_table_offset;
+ uint32_t vcn_table_size;
+};
+
+struct mmsch_v2_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_direct_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v2_0_cmd_direct_read_modify_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v2_0_cmd_direct_polling {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v2_0_cmd_end {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v2_0_cmd_indirect_write {
+ struct mmsch_v2_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t value)
+{
+ direct_wt->cmd_header.reg_offset = reg_offset;
+ direct_wt->reg_value = value;
+ memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t data)
+{
+ direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
+ direct_rd_mod_wt->mask_value = mask;
+ direct_rd_mod_wt->write_data = data;
+ memcpy((void *)init_table, direct_rd_mod_wt,
+ sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t wait)
+{
+ direct_poll->cmd_header.reg_offset = reg_offset;
+ direct_poll->mask_value = mask;
+ direct_poll->wait_value = wait;
+ memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling));
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
+ init_table, (reg), \
+ (mask), (data)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \
+ mmsch_v2_0_insert_direct_wt(&direct_wt, \
+ init_table, (reg), \
+ (value)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ mmsch_v2_0_insert_direct_poll(&direct_poll, \
+ init_table, (reg), \
+ (mask), (wait)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index cf557a428298..e08245a446fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -32,6 +32,7 @@
#include "soc15_common.h"
#include "navi10_ih.h"
+#define MAX_REARM_RETRY 10
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -284,6 +285,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * navi10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-write doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* navi10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ navi10_ih_irq_rearm(adev, ih);
} else
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
index 074a9a09c0a7..a5b60c9a2418 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
@@ -73,6 +73,22 @@
#define SDMA_OP_AQL_COPY 0
#define SDMA_OP_AQL_BARRIER_OR 0
+#define SDMA_GCR_RANGE_IS_PA (1 << 18)
+#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
+#define SDMA_GCR_GL2_WB (1 << 15)
+#define SDMA_GCR_GL2_INV (1 << 14)
+#define SDMA_GCR_GL2_DISCARD (1 << 13)
+#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
+#define SDMA_GCR_GL2_US (1 << 10)
+#define SDMA_GCR_GL1_INV (1 << 9)
+#define SDMA_GCR_GLV_INV (1 << 8)
+#define SDMA_GCR_GLK_INV (1 << 7)
+#define SDMA_GCR_GLK_WB (1 << 6)
+#define SDMA_GCR_GLM_INV (1 << 5)
+#define SDMA_GCR_GLM_WB (1 << 4)
+#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
+#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
+
/*define for op field*/
#define SDMA_PKT_HEADER_op_offset 0
#define SDMA_PKT_HEADER_op_mask 0x000000FF
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 65eb378fa035..149d386590df 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -318,6 +318,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
{
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
+ struct ras_err_data err_data = {0, 0, 0, NULL};
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -332,7 +333,19 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
* clear error status after ras_controller_intr according to
* hw team and count ue number for query
*/
- nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+ nbio_v7_4_query_ras_error_count(adev, &err_data);
+
+ /* logging on error counter and printing for awareness */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+
+ if (err_data.ce_count)
+ DRM_INFO("%ld correctable errors detected in %s block\n",
+ obj->err_data.ce_count, adev->nbio.ras_if->name);
+
+ if (err_data.ue_count)
+ DRM_INFO("%ld uncorrectable errors detected in %s block\n",
+ obj->err_data.ue_count, adev->nbio.ras_if->name);
DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2d1bebdf1603..52318b03c424 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
ret = smu_baco_enter(smu);
if (ret)
return ret;
@@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
if (ret)
return ret;
} else {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev);
}
@@ -516,7 +512,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 36b65797434e..a44fd6060d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -31,6 +31,9 @@
#define GFX_CMD_RESERVED_MASK 0x7FF00000
#define GFX_CMD_RESPONSE_MASK 0x80000000
+/* USBC PD FW version retrieval command */
+#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000
+
/* TEE Gfx Command IDs for the register interface.
* Command ID must be between 0x00010000 and 0x000F0000.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0829188c1a5c..0afd610a1263 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras.h"
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v11_0.h"
@@ -65,6 +66,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+/* For large FW files the time to complete can be very long */
+#define USBC_PD_POLLING_LIMIT_S 240
+
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -420,7 +424,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- psp_v11_0_reroute_ih(psp);
+ if (!amdgpu_sriov_vf(adev))
+ psp_v11_0_reroute_ih(psp);
ring = &psp->km_ring;
@@ -864,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
if (ret)
return -EINVAL;
+ /* If err_event_athub occurs error inject was successful, however
+ return status from TA is no long reliable */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
return ras_cmd->ras_status;
}
@@ -1108,6 +1118,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
+static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t reg_status;
+ int ret, i = 0;
+
+ /* Write lower 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the lower address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ /* Write upper 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the upper address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
+
+ /* FW load takes very long time */
+ do {
+ msleep(1000);
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if (reg_status & 0x80000000)
+ goto done;
+
+ } while (++i < USBC_PD_POLLING_LIMIT_S);
+
+ return -ETIME;
+done:
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (!ret)
+ *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
+
+ return ret;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -1132,6 +1218,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.mem_training = psp_v11_0_memory_training,
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
+ .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e55884d204bd..5f3a5ee2a3f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -677,7 +677,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
- * sdma_v4_0_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_page_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
@@ -977,7 +977,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
@@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle)
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- int i;
- /* read back edc counter registers to clear the counters */
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- for (i = 0; i < adev->sdma.num_instances; i++)
- RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
- }
+ if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
return adev->sdma.funcs->ras_late_init(adev, &ih_info);
@@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
};
+static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i;
+
+ /* read back edc counter registers to clear the counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+ }
+}
+
static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
.ras_late_init = amdgpu_sdma_ras_late_init,
.ras_fini = amdgpu_sdma_ras_fini,
.query_ras_error_count = sdma_v4_0_query_ras_error_count,
+ .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
};
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 67b9830b7c7e..d2840c2f6286 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
+ /* Invalidate L2, because if we don't do it, we might get stale cache
+ * lines from previous IBs.
+ */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
+ SDMA_GCR_GL2_WB |
+ SDMA_GCR_GLM_INV |
+ SDMA_GCR_GLM_WB) << 16);
+ amdgpu_ring_write(ring, 0xffffff80);
+ amdgpu_ring_write(ring, 0xffff);
+
/* An IB packet must end on a 8 DW boundary--the next dword
* must be on a 8-dword boundary. Our IB packet below is 6
* dwords long, thus add x number of NOPs, such that, in
@@ -746,11 +758,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
sdma_v5_0_enable(adev, true);
}
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -1597,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
- .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
+ .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib,
.emit_fence = sdma_v5_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 4cb4c891120b..0860e85a2d35 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3439,7 +3439,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0xC3) ||
(adev->pdev->device == 0x6664) ||
(adev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index c902f26cf50d..9bffbab35041 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -46,8 +46,7 @@
#define I2C_NO_STOP 1
#define I2C_RESTART 2
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
-#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor)
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
{
@@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_lock(i2c)) {
DRM_ERROR("Failed to lock the bus from SMU");
@@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
DRM_ERROR("Failed to unlock the bus from SMU");
@@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
int i, ret;
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!control->bus_locked) {
DRM_ERROR("I2C bus unlocked, stopping transaction!");
@@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
res = i2c_add_adapter(control);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d8945c31b622..d42a8d8a0dea 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
return amdgpu_dpm_mode2_reset(adev);
default:
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
return soc15_asic_mode1_reset(adev);
}
}
@@ -852,6 +848,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
/* change this when we implement soft reset */
return true;
}
+
+static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+ return;
+ /*read back hdp ras counter to reset it to 0 */
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
@@ -1019,6 +1024,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_config_memsize = &soc15_get_config_memsize,
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
+ .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1264,6 +1270,10 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
+ if (adev->asic_funcs &&
+ adev->asic_funcs->reset_hdp_ras_error_count)
+ adev->asic_funcs->reset_hdp_ras_error_count(adev);
+
if (adev->nbio.funcs->ras_late_init)
r = adev->nbio.funcs->ras_late_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index d0fb7a67c1a3..b03f950c486c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -42,6 +42,13 @@ struct soc15_reg_golden {
u32 or_mask;
};
+struct soc15_reg_rlcg {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+};
+
struct soc15_reg_entry {
uint32_t hwip;
uint32_t inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 19e870c79896..c893c645a4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -70,10 +70,9 @@
} \
} while (0)
-#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
#define WREG32_RLC(reg, value) \
do { \
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
@@ -98,7 +97,7 @@
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 793bf70e64b1..14d346321a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
&(err_data->ue_count));
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
@@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
}
- /* skip error address process if -ENOMEM */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
@@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
}
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
umc_inst);
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index b7f17342bbf0..ec8091a661df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -29,6 +29,7 @@
#include "soc15d.h"
#include "amdgpu_pm.h"
#include "amdgpu_psp.h"
+#include "mmsch_v2_0.h"
#include "vcn/vcn_2_0_0_offset.h"
#include "vcn/vcn_2_0_0_sh_mask.h"
@@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
-
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers
*
@@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->vcn.num_vcn_inst = 1;
- adev->vcn.num_enc_rings = 2;
+ if (amdgpu_sriov_vf(adev))
+ adev->vcn.num_enc_rings = 1;
+ else
+ adev->vcn.num_enc_rings = 2;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
@@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ else
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
@@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle)
adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
+ if (amdgpu_sriov_vf(adev))
+ vcn_v2_0_start_sriov(adev);
+
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* UVD disable CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* enable UVD CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
@@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
@@ -1215,6 +1246,9 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (enable) {
/* wait for STATUS to clear */
if (!vcn_v2_0_is_idle(handle))
@@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 4);
if (r)
@@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle,
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->vcn.cur_state)
return 0;
@@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle,
return ret;
}
+static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v2_0_init_header *header;
+ uint32_t size;
+ int i;
+
+ header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
+ size = header->header_size + header->vcn_table_size;
+
+ /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+ adev->vcn.inst->ring_dec.wptr = 0;
+ adev->vcn.inst->ring_dec.wptr_old = 0;
+ vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ adev->vcn.inst->ring_enc[i].wptr = 0;
+ adev->vcn.inst->ring_enc[i].wptr_old = 0;
+ vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
+ }
+
+ /* 5, kick off the initialization and wait until
+ * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop = 1000;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(10);
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ DRM_ERROR("failed to init MMSCH, " \
+ "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
+{
+ int r;
+ uint32_t tmp;
+ struct amdgpu_ring *ring;
+ uint32_t offset, size;
+ uint32_t table_size = 0;
+ struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
+ struct mmsch_v2_0_cmd_end end = { {0} };
+ struct mmsch_v2_0_init_header *header;
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ uint8_t i = 0;
+
+ header = (struct mmsch_v2_0_init_header *)init_table;
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
+ header->version = MMSCH_VERSION;
+ header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
+
+ header->vcn_table_offset = header->header_size;
+
+ init_table += header->vcn_table_offset;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ 0xFFFFFFFF, 0x00000004);
+
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ tmp = AMDGPU_UCODE_ID_VCN;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[tmp].tmr_mc_addr_lo);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[tmp].tmr_mc_addr_hi);
+ offset = 0;
+ } else {
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr));
+ offset = size;
+ }
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ size);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+ ring = &adev->vcn.inst->ring_enc[r];
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ ring->ring_size / 4);
+ }
+
+ ring = &adev->vcn.inst->ring_dec;
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+ /* force RBC into idle state */
+ tmp = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* add end packet */
+ tmp = sizeof(struct mmsch_v2_0_cmd_end);
+ memcpy((void *)init_table, &end, tmp);
+ table_size += (tmp / 4);
+ header->vcn_table_size = table_size;
+
+ }
+ return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
+}
+
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 678253d81154..c6363f5ad564 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v2_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
} else {
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ u32 harvest;
+ int i;
+
+ adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->vcn.harvest_config |= 1 << i;
+ }
+
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else
+ adev->vcn.num_vcn_inst = 1;
+
adev->vcn.num_enc_rings = 2;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 78b35901643b..3ce10e05d0d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -765,8 +765,6 @@ static int vi_asic_reset(struct amdgpu_device *adev)
int r;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = vi_asic_pci_config_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 3f0300e53727..0ec5f25adf56 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -127,6 +127,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
return PTR_ERR(process);
if (kfd_is_locked()) {
+ dev_dbg(kfd_device, "kfd is locked!\n"
+ "process %d unreferenced", process->pasid);
kfd_unref_process(process);
return -EAGAIN;
}
@@ -1167,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
if (!dev)
return -EINVAL;
- dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+ amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
args->gb_addr_config = config.gb_addr_config;
args->num_banks = config.num_banks;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 2a9e40131735..05bc6d96ec52 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -648,6 +648,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->get_hive_id)
kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
+ if (kfd->kfd2kgd->get_unique_id)
+ kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
+
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
@@ -710,7 +713,7 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
@@ -731,7 +734,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
kfd->dqm->ops.pre_reset(kfd->dqm);
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
kfd_signal_reset_event(kfd);
return 0;
@@ -765,21 +768,23 @@ bool kfd_is_locked(void)
return (atomic_read(&kfd_locked) > 0);
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
if (!kfd->init_complete)
return;
- /* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_locked) == 1)
- kfd_suspend_all_processes();
+ /* for runtime suspend, skip locking kfd */
+ if (!run_pm) {
+ /* For first KFD device suspend all the KFD processes */
+ if (atomic_inc_return(&kfd_locked) == 1)
+ kfd_suspend_all_processes();
+ }
kfd->dqm->ops.stop(kfd->dqm);
-
kfd_iommu_suspend(kfd);
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count;
@@ -790,10 +795,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_locked);
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
- ret = kfd_resume_all_processes();
+ /* for runtime resume, skip unlocking kfd */
+ if (!run_pm) {
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+ }
return ret;
}
@@ -1104,9 +1112,9 @@ kfd_gtt_out:
return 0;
kfd_gtt_no_free_chunk:
- pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
+ pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
- kfree(mem_obj);
+ kfree(*mem_obj);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 80d22bf702e8..77ea0f0cb163 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
return true;
return false;
}
-unsigned int get_queues_num(struct device_queue_manager *dqm)
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
- return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+ return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES);
}
@@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
return dqm->dev->device_info->num_xgmi_sdma_engines;
}
+static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
+{
+ return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
+}
+
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return dqm->dev->device_info->num_sdma_engines
@@ -132,6 +137,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
qpd->sh_mem_bases);
}
+void increment_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count++;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count++;
+}
+
+void decrement_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count--;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count--;
+}
+
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
struct kfd_dev *dev = qpd->dqm->dev;
@@ -281,8 +302,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
int retval;
- print_queue(q);
-
dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
@@ -359,12 +378,7 @@ add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
- dqm->queue_count++;
-
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
+ increment_queue_count(dqm, q->properties.type);
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -446,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
- if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
deallocate_hqd(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- } else {
+ else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
return -EINVAL;
@@ -494,7 +506,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
}
qpd->queue_count--;
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
return retval;
}
@@ -563,13 +575,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
/*
* check active state vs. the previous state and modify
* counter accordingly. map_queues_cpsch uses the
- * dqm->queue_count to determine whether a new runlist must be
+ * dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
if (q->properties.is_active && !prev_active)
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
else if (!q->properties.is_active && prev_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
@@ -618,7 +630,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -662,7 +674,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
@@ -731,7 +743,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -786,7 +798,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,16 +911,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->next_pipe_to_allocate = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->active_cp_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue;
}
@@ -924,7 +935,7 @@ static void uninitialize(struct device_queue_manager *dqm)
{
int i;
- WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+ WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -966,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (dqm->sdma_bitmap == 0)
+ if (dqm->sdma_bitmap == 0) {
+ pr_err("No more SDMA queue to allocate\n");
return -ENOMEM;
+ }
+
bit = __ffs64(dqm->sdma_bitmap);
dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -976,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
get_num_sdma_engines(dqm);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (dqm->xgmi_sdma_bitmap == 0)
+ if (dqm->xgmi_sdma_bitmap == 0) {
+ pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
+ }
bit = __ffs64(dqm->xgmi_sdma_bitmap);
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -1029,7 +1045,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
/ dqm->dev->shared_resources.num_pipe_per_mec;
- if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+ if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
continue;
/* only acquire queues from the first MEC */
@@ -1064,9 +1080,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->processes_count = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->processes_count = 0;
+ dqm->active_cp_queue_count = 0;
+
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
- dqm->queue_count++;
+ increment_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
@@ -1238,13 +1254,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
-
if (q->properties.is_active) {
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
+
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
@@ -1298,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int unmap_sdma_queues(struct device_queue_manager *dqm)
-{
- int i, retval = 0;
-
- for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
- dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
- retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
- if (retval)
- return retval;
- }
- return retval;
-}
-
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
@@ -1319,7 +1317,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
if (!dqm->sched_running)
return 0;
- if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+ if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0;
if (dqm->active_runlist)
return 0;
@@ -1349,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
return retval;
- pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
- dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
-
- if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
- unmap_sdma_queues(dqm);
-
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0);
if (retval)
@@ -1427,18 +1419,15 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
@@ -1648,7 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1656,16 +1645,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) {
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
dqm->total_queue_count--;
}
@@ -1742,14 +1728,13 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- (dev->device_info->num_sdma_engines +
- dev->device_info->num_xgmi_sdma_engines) *
+ get_num_all_sdma_engines(dqm) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
- (void *)&(mem_obj->cpu_ptr), true);
+ (void *)&(mem_obj->cpu_ptr), false);
return retval;
}
@@ -1979,7 +1964,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
if (!test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
continue;
r = dqm->dev->kfd2kgd->hqd_dump(
@@ -1995,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
- get_num_xgmi_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 871d3b628d2d..50d919f814e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,9 +180,8 @@ struct device_queue_manager {
struct list_head queues;
unsigned int saved_flags;
unsigned int processes_count;
- unsigned int queue_count;
- unsigned int sdma_queue_count;
- unsigned int xgmi_sdma_queue_count;
+ unsigned int active_queue_count;
+ unsigned int active_cp_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
@@ -219,7 +218,7 @@ void device_queue_manager_init_v10_navi10(
struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 1f8365575b12..15476fca8fa6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd,
if (p->signal_mapped_size &&
p->signal_event_count == p->signal_mapped_size / 8) {
if (!p->signal_event_limit_reached) {
- pr_warn("Signal event wasn't created because limit was reached\n");
+ pr_debug("Signal event wasn't created because limit was reached\n");
p->signal_event_limit_reached = true;
}
return -ENOSPC;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index bb77b8890e77..78714f9a8b11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
{
/*
* node id couldn't be 0 - the three MSB bits of
- * aperture shoudn't be 0
+ * aperture shouldn't be 0
*/
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 436b7f518979..48cda3073b70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
int retval;
struct kfd_mem_obj *mqd_mem_obj = NULL;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
+ /* For V9 only, due to a HW bug, the control stack of a user mode
+ * compute queue needs to be allocated just behind the page boundary
+ * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
+ * the first page of the buffer serves as the regular MQD buffer
+ * purpose and the remaining is for control stack. Although the two
+ * parts are in the same buffer object, they need different memory
+ * types: MQD part needs UC (uncached) as usual, while control stack
+ * needs NC (non coherent), which is different from the UC type which
+ * is used when control stack is allocated in user space.
+ *
+ * Because of all those, we use the gtt allocation function instead
+ * of sub-allocation function for this enlarged MQD buffer. Moreover,
+ * in order to achieve two memory types in a single buffer object, we
+ * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
+ * amdgpu memory functions to do so.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index dc406e6dee23..efdb75e7677b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -47,9 +47,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
struct kfd_dev *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
- queue_count = pm->dqm->queue_count;
- compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
- pm->dqm->xgmi_sdma_queue_count;
+ queue_count = pm->dqm->active_queue_count;
+ compute_queue_count = pm->dqm->active_cp_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -62,7 +61,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_queues_num(pm->dqm)) {
+ compute_queue_count > get_cp_queues_num(pm->dqm)) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
@@ -141,7 +140,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n",
- pm->dqm->processes_count, pm->dqm->queue_count);
+ pm->dqm->processes_count, pm->dqm->active_queue_count);
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6af1b5881f43..c24cad3c64ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -41,6 +41,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
+#include <linux/swap.h>
#include "amd_shared.h"
@@ -293,6 +294,9 @@ struct kfd_dev {
/* xGMI */
uint64_t hive_id;
+
+ /* UUID */
+ uint64_t unique_id;
bool pci_atomic_requested;
@@ -502,6 +506,9 @@ struct queue {
struct kfd_process *process;
struct kfd_dev *device;
void *gws;
+
+ /* procfs */
+ struct kobject kobj;
};
/*
@@ -646,6 +653,7 @@ struct kfd_process_device {
* function.
*/
bool already_dequeued;
+ bool runtime_inuse;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
@@ -729,6 +737,7 @@ struct kfd_process {
/* Kobj for our procfs */
struct kobject *kobj;
+ struct kobject *kobj_queues;
struct attribute attr_pasid;
};
@@ -835,6 +844,8 @@ extern struct device *kfd_device;
/* KFD's procfs */
void kfd_procfs_init(void);
void kfd_procfs_shutdown(void);
+int kfd_procfs_add_queue(struct queue *q);
+void kfd_procfs_del_queue(struct queue *q);
/* Topology */
int kfd_topology_init(void);
@@ -1039,7 +1050,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev);
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
{
-#if defined(CONFIG_CGROUP_DEVICE)
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = kfd->ddev;
return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 25b90f70aecd..fe0cd49d4ea7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
+#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
@@ -132,6 +133,88 @@ void kfd_procfs_shutdown(void)
}
}
+static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct queue *q = container_of(kobj, struct queue, kobj);
+
+ if (!strcmp(attr->name, "size"))
+ return snprintf(buffer, PAGE_SIZE, "%llu",
+ q->properties.queue_size);
+ else if (!strcmp(attr->name, "type"))
+ return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
+ else if (!strcmp(attr->name, "gpuid"))
+ return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
+ else
+ pr_err("Invalid attribute");
+
+ return 0;
+}
+
+static struct attribute attr_queue_size = {
+ .name = "size",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_type = {
+ .name = "type",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_gpuid = {
+ .name = "gpuid",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute *procfs_queue_attrs[] = {
+ &attr_queue_size,
+ &attr_queue_type,
+ &attr_queue_gpuid,
+ NULL
+};
+
+static const struct sysfs_ops procfs_queue_ops = {
+ .show = kfd_procfs_queue_show,
+};
+
+static struct kobj_type procfs_queue_type = {
+ .sysfs_ops = &procfs_queue_ops,
+ .default_attrs = procfs_queue_attrs,
+};
+
+int kfd_procfs_add_queue(struct queue *q)
+{
+ struct kfd_process *proc;
+ int ret;
+
+ if (!q || !q->process)
+ return -EINVAL;
+ proc = q->process;
+
+ /* Create proc/<pid>/queues/<queue id> folder */
+ if (!proc->kobj_queues)
+ return -EFAULT;
+ ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
+ proc->kobj_queues, "%u", q->properties.queue_id);
+ if (ret < 0) {
+ pr_warn("Creating proc/<pid>/queues/%u failed",
+ q->properties.queue_id);
+ kobject_put(&q->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+void kfd_procfs_del_queue(struct queue *q)
+{
+ if (!q)
+ return;
+
+ kobject_del(&q->kobj);
+ kobject_put(&q->kobj);
+}
+
int kfd_process_create_wq(void)
{
if (!kfd_process_wq)
@@ -244,10 +327,10 @@ err_alloc_mem:
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
- ALLOC_MEM_FLAGS_WRITABLE |
- ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+ KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+ KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
+ KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -323,6 +406,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (ret)
pr_warn("Creating pasid for pid %d failed",
(int)process->lead_thread->pid);
+
+ process->kobj_queues = kobject_create_and_add("queues",
+ process->kobj);
+ if (!process->kobj_queues)
+ pr_warn("Creating KFD proc/queues folder failed");
}
out:
if (!IS_ERR(process))
@@ -440,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfree(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
+ /*
+ * before destroying pdd, make sure to report availability
+ * for auto suspend
+ */
+ if (pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
+ pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
+ pdd->runtime_inuse = false;
+ }
+
kfree(pdd);
}
}
@@ -457,6 +555,9 @@ static void kfd_process_wq_release(struct work_struct *work)
/* Remove the procfs files */
if (p->kobj) {
sysfs_remove_file(p->kobj, &p->attr_pasid);
+ kobject_del(p->kobj_queues);
+ kobject_put(p->kobj_queues);
+ p->kobj_queues = NULL;
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -540,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
+ /* Signal the eviction fence after user mode queues are
+ * destroyed. This allows any BOs to be freed without
+ * triggering pointless evictions or waiting for fences.
+ */
+ dma_fence_signal(p->ef);
mutex_unlock(&p->mutex);
@@ -591,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
+ | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -754,6 +861,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
+ pdd->runtime_inuse = false;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -843,15 +951,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
+ /*
+ * signal runtime-pm system to auto resume and prevent
+ * further runtime suspend once device pdd is created until
+ * pdd is destroyed.
+ */
+ if (!pdd->runtime_inuse) {
+ err = pm_runtime_get_sync(dev->ddev->dev);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
err = kfd_iommu_bind_process_to_device(pdd);
if (err)
- return ERR_PTR(err);
+ goto out;
err = kfd_process_device_init_vm(pdd, NULL);
if (err)
- return ERR_PTR(err);
+ goto out;
+
+ /*
+ * make sure that runtime_usage counter is incremented just once
+ * per pdd
+ */
+ pdd->runtime_inuse = true;
return pdd;
+
+out:
+ /* balance runpm reference count and exit with error */
+ if (!pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(dev->ddev->dev);
+ pm_runtime_put_autosuspend(dev->ddev->dev);
+ }
+
+ return ERR_PTR(err);
}
struct kfd_process_device *kfd_get_first_process_device_data(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 31fcd1b51f00..084c35f55d59 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -241,23 +241,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
- >= get_num_sdma_queues(dev->dqm)) ||
- (type == KFD_QUEUE_TYPE_SDMA_XGMI &&
- dev->dqm->xgmi_sdma_queue_count
- >= get_num_xgmi_sdma_queues(dev->dqm))) {
- pr_debug("Over-subscription is not allowed for SDMA.\n");
- retval = -EPERM;
- goto err_create_queue;
- }
-
+ /* SDMA queues are always allocated statically no matter
+ * which scheduler mode is used. We also do not need to
+ * check whether a SDMA queue can be allocated here, because
+ * allocate_sdma_queue() in create_queue() has the
+ * corresponding check logic.
+ */
retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -266,7 +261,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((dev->dqm->sched_policy ==
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
- (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+ (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
@@ -278,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -299,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -322,12 +316,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (q) {
pr_debug("PQM done creating queue\n");
+ kfd_procfs_add_queue(q);
print_queue_properties(&q->properties);
}
return retval;
err_create_queue:
+ uninit_queue(q);
+ if (kq)
+ kernel_queue_uninit(kq, false);
kfree(pqn);
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
@@ -378,6 +376,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q) {
+ kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 203c823d65f1..aa0bfa78a667 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -490,6 +490,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, "num_cp_queues",
dev->node_props.num_cp_queues);
+ sysfs_show_64bit_prop(buffer, "unique_id",
+ dev->node_props.unique_id);
if (dev->gpu) {
log_max_watch_addr =
@@ -1318,7 +1320,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
- dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
+ dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
+ dev->node_props.unique_id = gpu->unique_id;
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 74e9b1682af8..46eeecaf1b68 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -54,6 +54,7 @@
struct kfd_node_properties {
uint64_t hive_id;
+ uint64_t unique_id;
uint32_t cpu_cores_count;
uint32_t simd_count;
uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6240259b3a93..7fc15b82fe48 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -98,6 +98,9 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -129,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
-
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs,
@@ -383,8 +383,8 @@ static void dm_pflip_high_irq(void *interrupt_params)
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
- amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
- amdgpu_crtc->crtc_id);
+ amdgpu_crtc->last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -407,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling is done here after end of front-porch in
* vrr mode, as vblank timestamping will give valid results
@@ -440,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
/**
* dm_crtc_high_irq() - Handles CRTC interrupt
- * @interrupt_params: ignored
+ * @interrupt_params: used for determining the CRTC instance
*
* Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
* event handler.
@@ -454,94 +455,44 @@ static void dm_crtc_high_irq(void *interrupt_params)
unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
- if (acrtc) {
- acrtc_state = to_dm_crtc_state(acrtc->base.state);
-
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
-
- /* Core vblank handling at start of front-porch is only possible
- * in non-vrr mode, as only there vblank timestamping will give
- * valid results while done in front-porch. Otherwise defer it
- * to dm_vupdate_high_irq after end of front-porch.
- */
- if (!amdgpu_dm_vrr_active(acrtc_state))
- drm_crtc_handle_vblank(&acrtc->base);
-
- /* Following stuff must happen at start of vblank, for crc
- * computation and below-the-range btr support in vrr mode.
- */
- amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
- if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
- acrtc_state->vrr_params.supported &&
- acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- mod_freesync_handle_v_update(
- adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
-
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- }
- }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-/**
- * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
- * @interrupt params - interrupt parameters
- *
- * Notify DRM's vblank event handler at VSTARTUP
- *
- * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
- * * We are close enough to VUPDATE - the point of no return for hw
- * * We are in the fixed portion of variable front porch when vrr is enabled
- * * We are before VUPDATE, where double-buffered vrr registers are swapped
- *
- * It is therefore the correct place to signal vblank, send user flip events,
- * and update VRR.
- */
-static void dm_dcn_crtc_high_irq(void *interrupt_params)
-{
- struct common_irq_params *irq_params = interrupt_params;
- struct amdgpu_device *adev = irq_params->adev;
- struct amdgpu_crtc *acrtc;
- struct dm_crtc_state *acrtc_state;
- unsigned long flags;
-
- acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
if (!acrtc)
return;
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state),
acrtc_state->active_planes);
+ /**
+ * Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!amdgpu_dm_vrr_active(acrtc_state))
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /**
+ * Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
- drm_crtc_handle_vblank(&acrtc->base);
+
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return;
spin_lock_irqsave(&adev->ddev->event_lock, flags);
- if (acrtc_state->vrr_params.supported &&
+ if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
- mod_freesync_handle_v_update(
- adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
}
/*
@@ -554,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
* avoid race conditions between flip programming and completion,
* which could cause too early flip completion events.
*/
- if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ if (adev->family >= AMDGPU_FAMILY_RV &&
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
acrtc_state->active_planes == 0) {
if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
@@ -566,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
-#endif
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
@@ -813,10 +764,20 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
- memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
- fw_inst_const_size);
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
adev->bios_size);
@@ -835,6 +796,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb_base = adev->gmc.fb_start;
hw_params.fb_offset = adev->gmc.aper_base;
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
if (dmcu)
hw_params.psp_version = dmcu->psp_version;
@@ -897,7 +862,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.chip_family = adev->family;
- init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
init_data.asic_id.vram_width = adev->gmc.vram_width;
@@ -972,7 +937,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) {
- adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
@@ -1003,11 +968,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
-#if defined(CONFIG_DEBUG_FS)
- if (dtn_debugfs_init(adev))
- DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
-#endif
-
DRM_DEBUG_DRIVER("KMS initialized.\n");
return 0;
@@ -1091,9 +1051,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20:
case CHIP_NAVI10:
case CHIP_NAVI14:
- case CHIP_NAVI12:
case CHIP_RENOIR:
return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
@@ -1204,22 +1166,21 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
- AMDGPU_UCODE_ID_DMCUB;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
- adev->dm.dmcub_fw_version);
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
dmub_srv = adev->dm.dmub_srv;
@@ -1839,8 +1800,63 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
};
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ u32 max_cll, min_cll, max, min, q, r;
+ struct amdgpu_dm_backlight_caps *caps;
+ struct amdgpu_display_manager *dm;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ static const u8 pre_computed_values[] = {
+ 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
+ 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
+
+ if (!aconnector || !aconnector->dc_link)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = conn_base->dev->dev_private;
+ dm = &adev->dm;
+ caps = &dm->backlight_caps;
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+ max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
+
+ if (caps->ext_caps->bits.oled == 1 ||
+ caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+ caps->aux_support = true;
+
+ /* From the specification (CTA-861-G), for calculating the maximum
+ * luminance we need to use:
+ * Luminance = 50*2**(CV/32)
+ * Where CV is a one-byte value.
+ * For calculating this expression we may need float point precision;
+ * to avoid this complexity level, we take advantage that CV is divided
+ * by a constant. From the Euclids division algorithm, we know that CV
+ * can be written as: CV = 32*q + r. Next, we replace CV in the
+ * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
+ * need to pre-compute the value of r/32. For pre-computing the values
+ * We just used the following Ruby line:
+ * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
+ * The results of the above expressions can be verified at
+ * pre_computed_values.
+ */
+ q = max_cll >> 5;
+ r = max_cll % 32;
+ max = (1 << q) * pre_computed_values[r];
+
+ // min luminance: maxLum * (CV/255)^2 / 100
+ q = DIV_ROUND_CLOSEST(min_cll, 255);
+ min = max * DIV_ROUND_CLOSEST((q * q), 100);
+
+ caps->aux_max_input_signal = max;
+ caps->aux_min_input_signal = min;
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
@@ -1941,19 +1957,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
- drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+ if (aconnector->dc_link->aux_mode) {
+ drm_dp_cec_unset_edid(
+ &aconnector->dm_dp_aux.aux);
+ }
} else {
aconnector->edid =
- (struct edid *) sink->dc_edid.raw_edid;
-
+ (struct edid *)sink->dc_edid.raw_edid;
drm_connector_update_edid_property(connector,
- aconnector->edid);
- drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
- aconnector->edid);
+ aconnector->edid);
+
+ if (aconnector->dc_link->aux_mode)
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
}
- amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ update_connector_ext_caps(aconnector);
} else {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
@@ -2169,10 +2190,10 @@ static void handle_hpd_rx_irq(void *param)
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
- if (adev->dm.hdcp_workqueue)
- hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
- }
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
@@ -2373,8 +2394,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
+ amdgpu_dm_irq_register_interrupt(
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_dcn_crtc_high_irq, c_irq_params);
+ dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
@@ -2567,6 +2616,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -2581,9 +2631,11 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
if (caps.caps_valid) {
+ dm->backlight_caps.caps_valid = true;
+ if (caps.aux_support)
+ return;
dm->backlight_caps.min_input_signal = caps.min_input_signal;
dm->backlight_caps.max_input_signal = caps.max_input_signal;
- dm->backlight_caps.caps_valid = true;
} else {
dm->backlight_caps.min_input_signal =
AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
@@ -2591,40 +2643,95 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
}
#else
+ if (dm->backlight_caps.aux_support)
+ return;
+
dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
#endif
}
+static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
+{
+ bool rc;
+
+ if (!link)
+ return 1;
+
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+
+ return rc ? 0 : 1;
+}
+
+static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ const uint32_t user_brightness)
+{
+ u32 min, max, conversion_pace;
+ u32 brightness = user_brightness;
+
+ if (!caps)
+ goto out;
+
+ if (!caps->aux_support) {
+ max = caps->max_input_signal;
+ min = caps->min_input_signal;
+ /*
+ * The brightness input is in the range 0-255
+ * It needs to be rescaled to be between the
+ * requested min and max input signal
+ * It also needs to be scaled up by 0x101 to
+ * match the DC interface which has a range of
+ * 0 to 0xffff
+ */
+ conversion_pace = 0x101;
+ brightness =
+ user_brightness
+ * conversion_pace
+ * (max - min)
+ / AMDGPU_MAX_BL_LEVEL
+ + min * conversion_pace;
+ } else {
+ /* TODO
+ * We are doing a linear interpolation here, which is OK but
+ * does not provide the optimal result. We probably want
+ * something close to the Perceptual Quantizer (PQ) curve.
+ */
+ max = caps->aux_max_input_signal;
+ min = caps->aux_min_input_signal;
+
+ brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
+ + user_brightness * max;
+ // Multiple the value by 1000 since we use millinits
+ brightness *= 1000;
+ brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+ }
+
+out:
+ return brightness;
+}
+
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps;
- uint32_t brightness = bd->props.brightness;
+ struct dc_link *link = NULL;
+ u32 brightness;
+ bool rc;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
- /*
- * The brightness input is in the range 0-255
- * It needs to be rescaled to be between the
- * requested min and max input signal
- *
- * It also needs to be scaled up by 0x101 to
- * match the DC interface which has a range of
- * 0 to 0xffff
- */
- brightness =
- brightness
- * 0x101
- * (caps.max_input_signal - caps.min_input_signal)
- / AMDGPU_MAX_BL_LEVEL
- + caps.min_input_signal * 0x101;
-
- if (dc_link_set_backlight_level(dm->backlight_link,
- brightness, 0))
- return 0;
- else
- return 1;
+
+ link = (struct dc_link *)dm->backlight_link;
+
+ brightness = convert_brightness(&caps, bd->props.brightness);
+ // Change brightness based on AUX property
+ if (caps.aux_support)
+ return set_backlight_via_aux(link, brightness);
+
+ rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+
+ return rc ? 0 : 1;
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -2909,6 +3016,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ /* No userspace support. */
+ dm->dc->debug.disable_tri_buf = true;
+
return 0;
fail:
kfree(aencoder);
@@ -3212,7 +3322,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
const union dc_tiling_info *tiling_info,
const uint64_t info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool force_disable_dcc)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
@@ -3224,6 +3335,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
memset(&input, 0, sizeof(input));
memset(&output, 0, sizeof(output));
+ if (force_disable_dcc)
+ return 0;
+
if (!offset)
return 0;
@@ -3273,7 +3387,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool force_disable_dcc)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
@@ -3379,7 +3494,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info,
- tiling_flags, dcc, address);
+ tiling_flags, dcc, address,
+ force_disable_dcc);
if (ret)
return ret;
}
@@ -3471,7 +3587,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state,
const uint64_t tiling_flags,
struct dc_plane_info *plane_info,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool force_disable_dcc)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
@@ -3511,6 +3628,9 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_NV12:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
break;
+ case DRM_FORMAT_P010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
+ break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
@@ -3550,7 +3670,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
- &plane_info->dcc, address);
+ &plane_info->dcc, address,
+ force_disable_dcc);
if (ret)
return ret;
@@ -3573,6 +3694,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_info plane_info;
uint64_t tiling_flags;
int ret;
+ bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info);
if (ret)
@@ -3587,9 +3709,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
if (ret)
return ret;
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&plane_info,
- &dc_plane_state->address);
+ &dc_plane_state->address,
+ force_disable_dcc);
if (ret)
return ret;
@@ -4200,9 +4324,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dmcu *dmcu = core_dc->res_pool->dmcu;
stream->psr_version = dmcu->dmcu_version.psr_version;
- mod_build_vsc_infopacket(stream,
- &stream->vsc_infopacket,
- &stream->use_vsc_sdp_for_colorimetry);
+
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = false;
+ if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ stream->use_vsc_sdp_for_colorimetry =
+ aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+ } else {
+ if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ stream->use_vsc_sdp_for_colorimetry = true;
+ }
+ }
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
}
}
finish:
@@ -4293,10 +4430,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
- /* Do not set vupdate for DCN hardware */
- if (adev->family > AMDGPU_FAMILY_AI)
- return 0;
-
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -4352,8 +4485,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static enum drm_connector_status
@@ -4518,6 +4653,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
i2c_del_adapter(&aconnector->i2c->base);
kfree(aconnector->i2c);
}
+ kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
}
@@ -4574,6 +4710,28 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
return &new_state->base;
}
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int r;
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.reset = amdgpu_dm_connector_funcs_reset,
.detect = amdgpu_dm_connector_detect,
@@ -4583,6 +4741,7 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
.early_unregister = amdgpu_dm_connector_unregister
};
@@ -4959,7 +5118,8 @@ static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
.disable = dm_crtc_helper_disable,
.atomic_check = dm_crtc_helper_atomic_check,
- .mode_fixup = dm_crtc_helper_mode_fixup
+ .mode_fixup = dm_crtc_helper_mode_fixup,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static void dm_encoder_helper_disable(struct drm_encoder *encoder)
@@ -5171,6 +5331,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
uint64_t tiling_flags;
uint32_t domain;
int r;
+ bool force_disable_dcc = false;
dm_plane_state_old = to_dm_plane_state(plane->state);
dm_plane_state_new = to_dm_plane_state(new_state);
@@ -5229,11 +5390,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc,
- &plane_state->address);
+ &plane_state->address,
+ force_disable_dcc);
}
return 0;
@@ -5377,6 +5540,8 @@ static int get_plane_formats(const struct drm_plane *plane,
if (plane_cap && plane_cap->pixel_format_support.nv12)
formats[num_formats++] = DRM_FORMAT_NV12;
+ if (plane_cap && plane_cap->pixel_format_support.p010)
+ formats[num_formats++] = DRM_FORMAT_P010;
break;
case DRM_PLANE_TYPE_OVERLAY:
@@ -5429,12 +5594,15 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
}
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
- plane_cap && plane_cap->pixel_format_support.nv12) {
+ plane_cap &&
+ (plane_cap->pixel_format_support.nv12 ||
+ plane_cap->pixel_format_support.p010)) {
/* This only affects YUV formats. */
drm_plane_create_color_properties(
plane,
BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
@@ -5763,7 +5931,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.underscan_vborder_property,
0);
- drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+ if (!aconnector->mst_port)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
@@ -5782,8 +5951,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
&aconnector->base.base,
dm->ddev->mode_config.hdr_output_metadata_property, 0);
- drm_connector_attach_vrr_capable_property(
- &aconnector->base);
+ if (!aconnector->mst_port)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
@@ -5922,16 +6092,9 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
- drm_connector_register(&aconnector->base);
-#if defined(CONFIG_DEBUG_FS)
- connector_debugfs_init(aconnector);
- aconnector->debugfs_dpcd_address = 0;
- aconnector->debugfs_dpcd_size = 0;
-#endif
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_dm_initialize_dp_connector(dm, aconnector);
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
out_free:
if (res) {
@@ -6113,12 +6276,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y <= -amdgpu_crtc->max_cursor_height)
return 0;
- if (crtc->primary->state) {
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
- }
-
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
@@ -6128,6 +6285,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y = 0;
}
position->enable = true;
+ position->translate_by_source = true;
position->x = x;
position->y = y;
position->x_hotspot = xorigin;
@@ -6414,7 +6572,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
- bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -6460,9 +6617,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
- if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
- swizzle = false;
-
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -6514,7 +6668,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count],
- &bundle->flip_addrs[planes_count].address);
+ &bundle->flip_addrs[planes_count].address,
+ false);
+
+ DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+ new_plane_state->plane->index,
+ bundle->plane_infos[planes_count].dcc.enable);
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
@@ -6563,7 +6722,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
- last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
}
else {
/* For variable refresh rate mode only:
@@ -6592,7 +6751,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(target_vblank -
- amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
usleep_range(1000, 1100);
}
@@ -6671,8 +6830,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active &&
- swizzle) {
+ !acrtc_state->stream->link->psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -7697,6 +7855,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ struct amdgpu_crtc *new_acrtc;
bool needs_reset;
int ret = 0;
@@ -7706,9 +7865,23 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state = to_dm_plane_state(new_plane_state);
dm_old_plane_state = to_dm_plane_state(old_plane_state);
- /*TODO Implement atomic check for cursor plane */
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ /*TODO Implement better atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
+ (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
+ DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
+ return -EINVAL;
+ }
+
return 0;
+ }
needs_reset = should_reset_plane(state, plane, old_plane_state,
new_plane_state);
@@ -7935,7 +8108,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
plane_info,
- &flip_addr->address);
+ &flip_addr->address,
+ false);
if (ret)
goto cleanup;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 7ea9acb0358d..5cab3e65d992 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -90,15 +90,41 @@ struct dm_comressor_info {
};
/**
- * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
- * @min_input_signal: minimum possible input in range 0-255
- * @max_input_signal: maximum possible input in range 0-255
- * @caps_valid: true if these values are from the ACPI interface
+ * struct amdgpu_dm_backlight_caps - Information about backlight
+ *
+ * Describe the backlight support for ACPI or eDP AUX.
*/
struct amdgpu_dm_backlight_caps {
+ /**
+ * @ext_caps: Keep the data struct with all the information about the
+ * display support for HDR.
+ */
+ union dpcd_sink_ext_caps *ext_caps;
+ /**
+ * @aux_min_input_signal: Min brightness value supported by the display
+ */
+ u32 aux_min_input_signal;
+ /**
+ * @aux_max_input_signal: Max brightness value supported by the display
+ * in nits.
+ */
+ u32 aux_max_input_signal;
+ /**
+ * @min_input_signal: minimum possible input in range 0-255.
+ */
int min_input_signal;
+ /**
+ * @max_input_signal: maximum possible input in range 0-255.
+ */
int max_input_signal;
+ /**
+ * @caps_valid: true if these values are from the ACPI interface.
+ */
bool caps_valid;
+ /**
+ * @aux_support: Describes if the display supports AUX backlight.
+ */
+ bool aux_support;
};
/**
@@ -457,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector);
+
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f81d3439ee8c..0461fecd68db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,6 +32,19 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
+#include "dmub/inc/dmub_srv.h"
+
+struct dmub_debugfs_trace_header {
+ uint32_t entry_count;
+ uint32_t reserved[3];
+};
+
+struct dmub_debugfs_trace_entry {
+ uint32_t trace_code;
+ uint32_t tick_count;
+ uint32_t param0;
+ uint32_t param1;
+};
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -675,6 +688,73 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return bytes_from_user;
}
+/**
+ * Returns the DMCUB tracebuffer contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
+ */
+static int dmub_tracebuffer_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_debugfs_trace_entry *entries;
+ uint8_t *tbuf_base;
+ uint32_t tbuf_size, max_entries, num_entries, i;
+
+ if (!fb_info)
+ return 0;
+
+ tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
+ if (!tbuf_base)
+ return 0;
+
+ tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
+ sizeof(struct dmub_debugfs_trace_entry);
+
+ num_entries =
+ ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+
+ num_entries = min(num_entries, max_entries);
+
+ entries = (struct dmub_debugfs_trace_entry
+ *)(tbuf_base +
+ sizeof(struct dmub_debugfs_trace_header));
+
+ for (i = 0; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the DMCUB firmware state contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
+ */
+static int dmub_fw_state_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ uint8_t *state_base;
+ uint32_t state_size;
+
+ if (!fb_info)
+ return 0;
+
+ state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
+ if (!state_base)
+ return 0;
+
+ state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
+
+ return seq_write(m, state_base, state_size);
+}
+
/*
* Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -880,6 +960,8 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r;
}
+DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
+DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range);
@@ -1008,6 +1090,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ connector->debugfs_dpcd_address = 0;
+ connector->debugfs_dpcd_size = 0;
+
}
/*
@@ -1188,5 +1273,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
+ adev, &dmub_tracebuffer_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
+ adev, &dmub_fw_state_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 0acd3409dd6c..dcf84a61de37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -28,6 +28,13 @@
#include "amdgpu_dm.h"
#include "dm_helpers.h"
#include <drm/drm_hdcp.h>
+#include "hdcp_psp.h"
+
+/*
+ * If the SRM version being loaded is less than or equal to the
+ * currently loaded SRM, psp will return 0xFFFF as the version
+ */
+#define PSP_SRM_VERSION_MAX 0xFFFF
static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
@@ -67,6 +74,59 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return NULL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return NULL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
+ *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
+
+
+ return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
+}
+
+static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return -EINVAL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
+ hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+ hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
+ return -EINVAL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
+ return 0;
+}
+
static void process_output(struct hdcp_workqueue *hdcp_work)
{
struct mod_hdcp_output output = hdcp_work->output;
@@ -88,6 +148,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
}
+static void link_lock(struct hdcp_workqueue *work, bool lock)
+{
+
+ int i = 0;
+
+ for (i = 0; i < work->max_link; i++) {
+ if (lock)
+ mutex_lock(&work[i].mutex);
+ else
+ mutex_unlock(&work[i].mutex);
+ }
+}
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index,
struct amdgpu_dm_connector *aconnector,
@@ -112,11 +184,21 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
if (enable_encryption) {
+ /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
+ * (s3 resume case)
+ */
+ if (hdcp_work->srm_size > 0)
+ psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+ &hdcp_work->srm_version);
+
display->adjust.disable = 0;
- if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
+ hdcp_w->link.adjust.hdcp1.disable = 0;
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
- else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1)
+ } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
+ hdcp_w->link.adjust.hdcp1.disable = 1;
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
+ }
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -184,7 +266,7 @@ static void event_callback(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
- cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+ cancel_delayed_work(&hdcp_work->callback_dwork);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
&hdcp_work->output);
@@ -265,6 +347,8 @@ static void event_watchdog_timer(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
mod_hdcp_process_event(&hdcp_work->hdcp,
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
&hdcp_work->output);
@@ -301,8 +385,9 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
}
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
-
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -313,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
- memset(display, 0, sizeof(*display));
- memset(link, 0, sizeof(*link));
-
- display->index = aconnector->base.index;
-
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
return;
}
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE;
if (aconnector->dc_sink != NULL)
@@ -332,26 +417,171 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dig_be = config->link_enc_inst;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->dp.mst_supported = config->mst_supported;
display->adjust.disable = 1;
- link->adjust.auth_delay = 2;
+ link->adjust.auth_delay = 3;
+ link->adjust.hdcp1.disable = 0;
hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
}
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+
+/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+ * will automatically call once or twice depending on the size
+ *
+ * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
+ *
+ * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
+ * srm_data_write can be called multiple times.
+ *
+ * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
+ * the last call we will send the full SRM. PSP will fail on every call before the last.
+ *
+ * This means we don't know if the SRM is good until the last call. And because of this limitation we
+ * cannot throw errors early as it will stop the kernel from writing to sysfs
+ *
+ * Example 1:
+ * Good SRM size = 5096
+ * first call to write 4096 -> PSP fails
+ * Second call to write 1000 -> PSP Pass -> SRM is set
+ *
+ * Example 2:
+ * Bad SRM size = 4096
+ * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * is the last call)
+ *
+ * Solution?:
+ * 1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * below
+ *
+ * Easy Solution:
+ * Always call get after Set to verify if set was successful.
+ * +----------------------+
+ * | Why it works: |
+ * +----------------------+
+ * PSP will only update its srm if its older than the one we are trying to load.
+ * Always do set first than get.
+ * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * version and save it
+ *
+ * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * same(newer) version back and save it
+ *
+ * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * incorrect/corrupted and we should correct our SRM by getting it from PSP
+ */
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint32_t srm_version = 0;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+ link_lock(work, true);
+
+ memcpy(work->srm_temp + pos, buffer, count);
+
+ if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
+ DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
+ memcpy(work->srm, work->srm_temp, pos + count);
+ work->srm_size = pos + count;
+ work->srm_version = srm_version;
+ }
+
+
+ link_lock(work, false);
+
+ return count;
+}
+
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint8_t *srm = NULL;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ size_t ret = count;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+
+ link_lock(work, true);
+
+ srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
+
+ if (!srm)
+ return -EINVAL;
+
+ if (pos >= srm_size)
+ ret = 0;
+
+ if (srm_size - pos < count) {
+ memcpy(buffer, srm + pos, srm_size - pos);
+ ret = srm_size - pos;
+ goto ret;
+ }
+
+ memcpy(buffer, srm + pos, count);
+
+ret:
+ link_lock(work, false);
+ return ret;
+}
+
+/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
+ *
+ * For example,
+ * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * across boot/reboots/suspend/resume/shutdown
+ *
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
+ * to make the SRM persistent.
+ *
+ * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
+ * -The kernel cannot write to the file systems.
+ * -So we need usermode to do this for us, which is why an interface for usermode is needed
+ *
+ *
+ *
+ * Usermode can read/write to/from PSP using the sysfs interface
+ * For example:
+ * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ */
+static const struct bin_attribute data_attr = {
+ .attr = {.name = "hdcp_srm", .mode = 0664},
+ .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
+ .write = srm_data_write,
+ .read = srm_data_read,
+};
+
+
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
{
int max_caps = dc->caps.max_links;
- struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ struct hdcp_workqueue *hdcp_work;
int i = 0;
+ hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
if (hdcp_work == NULL)
+ return NULL;
+
+ hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+
+ if (hdcp_work->srm == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+
+ if (hdcp_work->srm_temp == NULL)
goto fail_alloc_context;
hdcp_work->max_link = max_caps;
for (i = 0; i < max_caps; i++) {
-
mutex_init(&hdcp_work[i].mutex);
INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
@@ -360,7 +590,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
@@ -371,9 +601,17 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
cp_psp->funcs.update_stream_config = update_config;
cp_psp->handle = hdcp_work;
+ /* File created at /sys/class/drm/card0/device/hdcp_srm*/
+ hdcp_work[0].attr = data_attr;
+
+ if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
+ DRM_WARN("Failed to create device file hdcp_srm");
+
return hdcp_work;
fail_alloc_context:
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
return NULL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 6abde86bce4a..5159b3a5e5b0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -30,6 +30,7 @@
#include "hdcp.h"
#include "dc.h"
#include "dm_cp_psp.h"
+#include "amdgpu.h"
struct mod_hdcp;
struct mod_hdcp_link;
@@ -52,6 +53,12 @@ struct hdcp_workqueue {
enum mod_hdcp_encryption_status encryption_status;
uint8_t max_link;
+
+ uint8_t *srm;
+ uint8_t *srm_temp;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ struct bin_attribute attr;
};
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
@@ -64,6 +71,6 @@ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_destroy(struct hdcp_workqueue *work);
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 318b474ff20e..c20fb08c450b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return false;
+ DRM_ERROR("Failed to find connector for link!");
+ return false;
}
if (boot) {
@@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return;
+ DRM_ERROR("Failed to find connector for link!");
+ return;
}
DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c(
bool result;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- BUG_ON("Failed to found connector for link!");
+ BUG_ON("Failed to find connector for link!");
return true;
}
@@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid(
/* We don't need the original edid anymore */
kfree(edid);
+ /* connector->display_info will be parsed from EDID and saved
+ * into drm_connector->display_info from edid by call stack
+ * below:
+ * drm_parse_ycbcr420_deep_color_info
+ * drm_parse_hdmi_forum_vsdb
+ * drm_parse_cea_ext
+ * drm_add_display_info
+ * drm_connector_update_edid_property
+ *
+ * drm_connector->display_info will be used by amdgpu_dm funcs,
+ * like fill_stream_properties_from_drm_display_mode
+ */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
edid_status = dm_helpers_parse_edid_caps(
ctx,
&sink->dc_edid,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index da73161043d5..d2917759b7ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -154,15 +154,18 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+ int r;
+
+ r = drm_dp_mst_connector_late_register(connector,
+ amdgpu_dm_connector->port);
+ if (r < 0)
+ return r;
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
- amdgpu_dm_connector->debugfs_dpcd_address = 0;
- amdgpu_dm_connector->debugfs_dpcd_size = 0;
#endif
- return drm_dp_mst_connector_late_register(connector, port);
+ return 0;
}
static void
@@ -204,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
dsc_caps, NULL,
- &dc_sink->sink_dsc_caps.dsc_dec_caps))
+ &dc_sink->dsc_caps.dsc_dec_caps))
return false;
return true;
@@ -259,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!validate_dsc_caps_on_connector(aconnector))
- memset(&aconnector->dc_sink->sink_dsc_caps,
- 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+ memset(&aconnector->dc_sink->dsc_caps,
+ 0, sizeof(aconnector->dc_sink->dsc_caps));
#endif
}
}
@@ -407,6 +410,14 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_attach_encoder(&aconnector->base,
&aconnector->mst_encoder->base);
+ connector->max_bpc_property = master->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
+ connector->vrr_capable_property = master->base.vrr_capable_property;
+ if (connector->vrr_capable_property)
+ drm_connector_attach_vrr_capable_property(connector);
+
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
@@ -437,9 +448,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
- struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -455,39 +463,25 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
}
drm_connector_unregister(connector);
- if (adev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
drm_connector_put(connector);
}
-static void dm_dp_mst_register_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- if (adev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
- else
- DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
-
- drm_connector_register(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.destroy_connector = dm_dp_destroy_mst_connector,
- .register_connector = dm_dp_mst_register_connector
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector)
+ struct amdgpu_dm_connector *aconnector,
+ int link_index)
{
- aconnector->dm_dp_aux.aux.name = "dmdc";
- aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
+ aconnector->dm_dp_aux.aux.name =
+ kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
+ link_index);
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
- drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
@@ -548,7 +542,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
- &params[i].sink->sink_dsc_caps.dsc_dec_caps,
+ &params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
0,
params[i].timing,
@@ -569,7 +563,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
- &param.sink->sink_dsc_caps.dsc_dec_caps,
+ &param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
(int) kbps, param.timing, &dsc_config);
@@ -766,14 +760,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].port = aconnector->port;
- params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+ params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
dsc_policy.min_target_bpp,
dsc_policy.max_target_bpp,
- &stream->sink->sink_dsc_caps.dsc_dec_caps,
+ &stream->sink->dsc_caps.dsc_dec_caps,
&stream->timing, &params[count].bw_range))
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
@@ -855,7 +849,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (!aconnector || !aconnector->dc_sink)
continue;
- if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+ if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
continue;
if (computed_streams[i])
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index d6813ce67bbd..d2c56579a2cc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -32,7 +32,8 @@ struct amdgpu_dm_connector;
int dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector);
+ struct amdgpu_dm_connector *aconnector,
+ int link_index);
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 2f1c9584ac32..37fa7b48250e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
case OBJECT_TYPE_CONNECTOR:
case OBJECT_TYPE_GENERIC:
/* Both Generic and Connector Object ID
@@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
default:
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index c4ba6e84db65..8edc2506d49e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -221,8 +221,8 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev;
- if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
- BREAK_TO_DEBUGGER();
+ BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
+
switch (crev) {
case 6:
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7388c987c595..204d7942a6e5 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -53,25 +53,18 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
+ case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
-#endif
-
case DCN_VERSION_2_0:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
case DCN_VERSION_2_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
- case DCE_VERSION_12_0:
- case DCE_VERSION_12_1:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
+#endif
default:
/* Unsupported DCE */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 5d081c42e81b..2c6db379afae 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[0].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[4], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[1].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[5], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[2].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[6], bw_int_to_fixed(1000)));
- if (ctx->dc->caps.max_slave_planes) {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ calcs_output->stutter_entry_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[0], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[1], bw_int_to_fixed(1000)));
- } else {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[7], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[8], bw_int_to_fixed(1000)));
- }
- calcs_output->stutter_entry_wm_ns[5].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[9], bw_int_to_fixed(1000)));
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1a37550731de..3960a8db94cb 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -703,11 +703,24 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
}
-unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
+unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{
- /* for dali & pollock, the highest voltage level we want is 0 */
- if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
- return 0;
+ /* for low power RV2 variants, the highest voltage level we want is 0 */
+ if (ASICREV_IS_RAVEN2(hw_internal_rev))
+ switch (pci_revision_id) {
+ case PRID_DALI_DE:
+ case PRID_DALI_DF:
+ case PRID_DALI_E3:
+ case PRID_DALI_E4:
+ case PRID_POLLOCK_94:
+ case PRID_POLLOCK_95:
+ case PRID_POLLOCK_E9:
+ case PRID_POLLOCK_EA:
+ case PRID_POLLOCK_EB:
+ return 0;
+ default:
+ break;
+ }
/* we are ok with all levels */
return 4;
@@ -1277,7 +1290,9 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev))
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
+ dc->ctx->asic_id.hw_internal_rev,
+ dc->ctx->asic_id.pci_revision_id))
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a78e5c74c79c..8ec2dfe45d40 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -63,6 +63,25 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, total_plane_count;
+
+ total_plane_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_status stream_status = context->stream_status[i];
+
+ /*
+ * Sum up plane_count for all streams ( active and virtual ).
+ */
+ total_plane_count += stream_status.plane_count;
+ }
+
+ return total_plane_count;
+}
+
void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{
struct dc_link *edp_link = get_edp_link(dc);
@@ -134,13 +153,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
- if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
- ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
- /* TEMP: this check has to come before ASICREV_IS_RENOIR */
- /* which also incorrectly returns true for Dali/Pollock*/
- rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
- break;
- }
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 49ce46b543ea..55d09adbf0d9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
- if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
- }
}
}
@@ -158,6 +157,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
+ bool p_state_change_support;
+ int total_plane_count;
if (dc->work_arounds.skip_clock_update)
return;
@@ -213,9 +214,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000);
}
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 9ef3f7b91a1d..24c5765890fa 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -46,6 +46,7 @@
/* Constants */
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
+#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
/* Macros */
@@ -405,7 +406,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
}
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
{
int i, num_valid_sets;
@@ -465,16 +466,15 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
- struct pp_smu_wm_range_sets ranges = {0};
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
if (!debug->disable_pplib_wm_range) {
- build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+ build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges);
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
- pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges);
}
}
@@ -504,7 +504,7 @@ static struct clk_mgr_funcs dcn21_funcs = {
.notify_wm_ranges = rn_notify_wm_ranges
};
-struct clk_bw_params rn_bw_params = {
+static struct clk_bw_params rn_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
@@ -544,7 +544,7 @@ struct clk_bw_params rn_bw_params = {
};
-struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -581,7 +581,7 @@ struct wm_table ddr4_wm_table = {
}
};
-struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
- if (clock_table->FClocks[i].Freq != 0) {
+ if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
j = i;
break;
}
@@ -721,6 +721,13 @@ void rn_clk_mgr_construct(
} else {
struct clk_log_info log_info = {0};
+ clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
+
+ /* SMU Version 55.51.0 and up no longer have an issue
+ * that needs to limit minimum dispclk */
+ if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
+ debug->min_disp_clk_khz = 0;
+
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04441dbcba76..47431ca6986d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
int i = 0;
bool ret = false;
+ stream->adjust = *adjust;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -701,7 +703,7 @@ static bool dc_construct(struct dc *dc,
dc_ctx->created_bios = true;
}
-
+ dc->vendor_signature = init_params->vendor_signature;
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
@@ -761,6 +763,28 @@ static bool disable_all_writeback_pipes_for_stream(
return true;
}
+void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+{
+ int i = 0;
+
+ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+ if (dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, lock);
+ else {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Copied conditions that were previously in dce110_apply_ctx_for_surface
+ if (stream == pipe_ctx->stream) {
+ if (!pipe_ctx->top_pipe &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -786,11 +810,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (dc->hwss.apply_ctx_for_surface)
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
}
- if (dc->hwss.program_front_end_for_ctx)
- dc->hwss.program_front_end_for_ctx(dc, dangling_context);
}
current_ctx = dc->current_state;
@@ -801,11 +834,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
- int count = 0;
- struct pipe_ctx *pipe;
PERF_TRACE();
for (i = 0; i < MAX_PIPES; i++) {
- pipe = &context->res_ctx.pipe_ctx[i];
+ int count = 0;
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state)
continue;
@@ -1210,16 +1242,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed)
continue;
-
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
}
+ }
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1238,19 +1273,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
- if (dc->hwss.program_front_end_for_ctx)
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context);
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
/*
* enable stereo
@@ -1318,10 +1361,24 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
-bool dc_is_hw_initialized(struct dc *dc)
+static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- return dcb->funcs->is_accelerated_mode(dcb);
+ int i;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (pipe->plane_state->status.is_flip_pending)
+ return true;
+ }
+ return false;
}
bool dc_post_update_surfaces_to_stream(struct dc *dc)
@@ -1329,11 +1386,14 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
int i;
struct dc_state *context = dc->current_state;
- if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
+ if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
return true;
post_surface_trace(dc);
+ if (is_flip_pending_in_pipes(dc, context))
+ return true;
+
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
@@ -1341,9 +1401,11 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ dc->hwss.optimize_bandwidth(dc, context);
+
dc->optimized_required = false;
+ dc->wm_optimized_required = false;
- dc->hwss.optimize_bandwidth(dc, context);
return true;
}
@@ -1665,6 +1727,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->coeff_reduction_factor)
update_flags->bits.coeff_reduction_change = 1;
+ if (u->gamut_remap_matrix)
+ update_flags->bits.gamut_remap_change = 1;
+
if (u->gamma) {
enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
@@ -1690,7 +1755,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (update_flags->bits.input_csc_change
|| update_flags->bits.coeff_reduction_change
- || update_flags->bits.gamma_change) {
+ || update_flags->bits.gamma_change
+ || update_flags->bits.gamut_remap_change) {
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
@@ -1734,14 +1800,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->wb_update)
su_flags->bits.wb_update = 1;
+
+ if (stream_update->dsc_config)
+ su_flags->bits.dsc_changed = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
-
- if (stream_update->dsc_config)
- overall_type = UPDATE_TYPE_FULL;
}
for (i = 0 ; i < surface_count; i++) {
@@ -1776,8 +1843,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL) {
- if (stream_update)
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
@@ -1791,6 +1861,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
+
+ dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -1829,6 +1901,8 @@ static void copy_surface_update_to_plane(
surface->time.index++;
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
surface->time.index = 0;
+
+ surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
}
if (srf_update->scaling_info) {
@@ -1928,6 +2002,10 @@ static void copy_surface_update_to_plane(
if (srf_update->coeff_reduction_factor)
surface->coeff_reduction_factor =
*srf_update->coeff_reduction_factor;
+
+ if (srf_update->gamut_remap_matrix)
+ surface->gamut_remap_matrix =
+ *srf_update->gamut_remap_matrix;
}
static void copy_stream_update_to_stream(struct dc *dc,
@@ -2093,18 +2171,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
- if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
- dp_update_dsc_config(pipe_ctx);
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
- }
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
- if (stream_update->dpms_off) {
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (stream_update->dsc_config)
+ dp_update_dsc_config(pipe_ctx);
+ if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/
@@ -2118,8 +2192,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
core_link_enable_stream(dc->current_state, pipe_ctx);
}
-
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -2175,6 +2247,32 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream &&
+ pipe_ctx->stream == stream) {
+ top_pipe_to_program = pipe_ctx;
+ }
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ else
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+
+
// Stream updates
if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -2189,6 +2287,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+
+ dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -2224,8 +2328,6 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream == stream) {
struct dc_stream_status *stream_status = NULL;
- top_pipe_to_program = pipe_ctx;
-
if (!pipe_ctx->plane_state)
continue;
@@ -2270,12 +2372,6 @@ static void commit_planes_for_stream(struct dc *dc,
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
- /* Lock the top pipe while updating plane addrs, since freesync requires
- * plane addr update event triggers to be synchronized.
- * top_pipe_to_program is expected to never be NULL
- */
- dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2317,9 +2413,30 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
+ }
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
- }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
// Fire manual trigger only when bottom plane is flipped
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -2347,7 +2464,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
enum surface_update_type update_type;
struct dc_state *context;
struct dc_context *dc_ctx = dc->ctx;
- int i;
+ int i, j;
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -2385,6 +2502,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_surface_update_to_plane(surface, &srf_updates[i]);
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
}
copy_stream_update_to_stream(dc, context, stream, stream_update);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a09119c10d7c..67cfff1586e9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,7 +45,7 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger)
@@ -585,20 +585,23 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
}
-static bool detect_dp(
- struct dc_link *link,
- struct display_sink_capability *sink_caps,
- bool *converter_disable_audio,
- struct audio_support *audio_support,
- enum dc_detect_reason reason)
+static bool detect_dp(struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ bool *converter_disable_audio,
+ struct audio_support *audio_support,
+ enum dc_detect_reason reason)
{
bool boot = false;
+
sink_caps->signal = link_detect_sink(link, reason);
sink_caps->transaction_type =
get_ddc_transaction_type(sink_caps->signal);
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ dpcd_set_source_specific_data(link);
+
if (!detect_dp_sink_caps(link))
return false;
@@ -606,9 +609,8 @@ static bool detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
- dal_ddc_service_set_transaction_type(
- link->ddc,
- sink_caps->transaction_type);
+ dal_ddc_service_set_transaction_type(link->ddc,
+ sink_caps->transaction_type);
/*
* This call will initiate MST topology discovery. Which
@@ -637,13 +639,10 @@ static bool detect_dp(
if (reason == DETECT_REASON_BOOT)
boot = true;
- dm_helpers_dp_update_branch_info(
- link->ctx,
- link);
+ dm_helpers_dp_update_branch_info(link->ctx, link);
- if (!dm_helpers_dp_mst_start_top_mgr(
- link->ctx,
- link, boot)) {
+ if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
+ link, boot)) {
/* MST not supported */
link->type = dc_connection_single;
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
@@ -651,7 +650,7 @@ static bool detect_dp(
}
if (link->type != dc_connection_mst_branch &&
- is_dp_active_dongle(link)) {
+ is_dp_active_dongle(link)) {
/* DP active dongles */
link->type = dc_connection_active_dongle;
if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
@@ -662,14 +661,15 @@ static bool detect_dp(
return true;
}
- if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER)
*converter_disable_audio = true;
}
} else {
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
- sink_caps,
- audio_support);
+ sink_caps,
+ audio_support);
}
return true;
@@ -769,8 +769,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink)
+ link->local_sink) {
+
+ // need to re-write OUI and brightness in resume case
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ dpcd_set_source_specific_data(link);
+ dc_link_set_default_brightness_aux(link); //TODO: use cached
+ }
+
return true;
+ }
if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
@@ -818,6 +826,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
+ read_current_link_settings_on_detect(link);
+
+ dpcd_set_source_specific_data(link);
+
detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -958,10 +970,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
}
+ if (link->local_sink->edid_caps.panel_patch.disable_fec)
+ link->ctx->dc->debug.disable_fec = true;
+
// Check if edid is the same
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
@@ -1480,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
}
}
-static enum dc_status enable_link_dp(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
+static enum dc_status enable_link_dp(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
@@ -1492,6 +1509,7 @@ static enum dc_status enable_link_dp(
bool fec_enable;
int i;
bool apply_seamless_boot_optimization = false;
+ uint32_t bl_oled_enable_delay = 50; // in ms
// check for seamless boot
for (i = 0; i < state->stream_count; i++) {
@@ -1513,31 +1531,45 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization)
- state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+
+ // during mode switch we do DP_SET_POWER off then on, and OUI is lost
+ dpcd_set_source_specific_data(link);
skip_video_pattern = true;
if (link_settings.link_rate == LINK_RATE_LOW)
- skip_video_pattern = false;
-
- if (perform_link_training_with_retries(
- &link_settings,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal)) {
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(&link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal)) {
link->cur_link_settings = link_settings;
status = DC_OK;
- }
- else
+ } else {
status = DC_FAIL_DP_LINK_TRAINING;
+ }
- if (link->preferred_training_settings.fec_enable != NULL)
+ if (link->preferred_training_settings.fec_enable)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
dp_set_fec_enable(link, fec_enable);
+
+ // during mode set we do DP_SET_POWER off then on, aux writes are lost
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+ dc_link_set_default_brightness_aux(link); // TODO: use cached if known
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ dc_link_backlight_enable_aux(link, true);
+ }
+
return status;
}
@@ -1733,8 +1765,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1752,8 +1783,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1765,8 +1795,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1786,8 +1815,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1805,8 +1833,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1818,8 +1845,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1837,8 +1863,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1849,8 +1874,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1861,10 +1885,14 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set retimer failed");
}
static void write_i2c_default_retimer_setting(
@@ -1889,8 +1917,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1901,8 +1928,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0B to 0xDA or 0xD8 */
buffer[0] = 0x0B;
@@ -1913,8 +1939,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1925,8 +1950,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0C to 0x1D or 0x91 */
buffer[0] = 0x0C;
@@ -1937,8 +1961,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1949,8 +1972,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
if (is_vga_mode) {
@@ -1965,8 +1987,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1977,8 +1998,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1989,9 +2009,13 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set default retimer failed");
}
static void write_i2c_redriver_setting(
@@ -2020,8 +2044,7 @@ static void write_i2c_redriver_setting(
slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ DC_LOG_DEBUG("Set redriver failed");
}
static void disable_link(struct dc_link *link, enum signal_type signal)
@@ -2400,8 +2423,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if ((psr != NULL) && link->psr_feature_enabled)
- psr->funcs->set_psr_enable(psr, allow_active);
+ if (psr != NULL && link->psr_feature_enabled)
+ psr->funcs->psr_enable(psr, allow_active);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
@@ -2417,7 +2440,7 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmub_psr *psr = dc->res_pool->psr;
if (psr != NULL && link->psr_feature_enabled)
- psr->funcs->get_psr_state(psr_state);
+ psr->funcs->psr_get_state(psr, psr_state);
else if (dmcu != NULL && link->psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
@@ -2589,7 +2612,7 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0;
if (psr)
- link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+ link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else
link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
@@ -2922,10 +2945,13 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
memset(&config, 0, sizeof(config));
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ /*stream_enc_inst*/
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ config.mst_supported = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
}
@@ -3061,6 +3087,9 @@ void core_link_enable_stream(
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
+ if (stream->sink_patches.delay_ignore_msa > 0)
+ msleep(stream->sink_patches.delay_ignore_msa);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_HDCP)
@@ -3373,7 +3402,7 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
- if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
/* Account for FEC overhead.
* We have to do it based on caps,
* and not based on FEC being set ready,
@@ -3417,3 +3446,11 @@ void dc_link_overwrite_extended_receiver_cap(
dp_overwrite_extended_receiver_cap(link);
}
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ return (dc_is_dp_signal(link->connector_signal) &&
+ link->link_enc->features.fec_supported &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index a49c10d5df26..256889eed93e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -126,22 +126,16 @@ struct aux_payloads {
struct vector payloads;
};
-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+static bool dal_ddc_i2c_payloads_create(
+ struct dc_context *ctx,
+ struct i2c_payloads *payloads,
+ uint32_t count)
{
- struct i2c_payloads *payloads;
-
- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
-
- if (!payloads)
- return NULL;
-
if (dal_vector_construct(
&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
- return payloads;
-
- kfree(payloads);
- return NULL;
+ return true;
+ return false;
}
static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
@@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
return p->payloads.count;
}
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
{
- if (!p || !*p)
+ if (!p)
return;
- dal_vector_destruct(&(*p)->payloads);
- kfree(*p);
- *p = NULL;
+ dal_vector_destruct(&p->payloads);
}
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
@@ -524,9 +516,13 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
+
if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
return false;
+ if (!payloads_num)
+ return false;
+
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
@@ -557,23 +553,25 @@ bool dal_ddc_service_query_ddc_data(
ret = dal_ddc_submit_aux_command(ddc, &payload);
}
} else {
- struct i2c_payloads *payloads =
- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+ struct i2c_command command = {0};
+ struct i2c_payloads payloads;
- struct i2c_command command = {
- .payloads = dal_ddc_i2c_payloads_get(payloads),
- .number_of_payloads = 0,
- .engine = DDC_I2C_COMMAND_ENGINE,
- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ return false;
+
+ command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.number_of_payloads = 0;
+ command.engine = DDC_I2C_COMMAND_ENGINE;
+ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
dal_ddc_i2c_payloads_add(
- payloads, address, write_size, write_buf, true);
+ &payloads, address, write_size, write_buf, true);
dal_ddc_i2c_payloads_add(
- payloads, address, read_size, read_buf, false);
+ &payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(payloads);
+ dal_ddc_i2c_payloads_get_count(&payloads);
ret = dm_helpers_submit_i2c(
ddc->ctx,
@@ -686,6 +684,10 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
uint8_t write_buffer[2] = {0};
/*Lower than 340 Scramble bit from SCDC caps*/
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &sink_version, sizeof(sink_version));
if (sink_version == 1) {
@@ -715,6 +717,10 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
uint8_t tmds_config = 0;
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index fd9e69634c50..caa090d0b6ac 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -220,6 +220,30 @@ static enum dpcd_training_patterns
return dpcd_tr_pattern;
}
+static uint8_t dc_dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ uint8_t disable_scrabled_data_symbols = 0;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ disable_scrabled_data_symbols = 1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ disable_scrabled_data_symbols = 0;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+ return disable_scrabled_data_symbols;
+}
+
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
{
return (!link->is_lttpr_mode_transparent && offset != 0);
@@ -252,6 +276,9 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dc_dp_initialize_scrambling_data_symbols(link, pattern);
+
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
@@ -945,6 +972,17 @@ static enum link_training_result perform_channel_equalization_sequence(
}
#define TRAINING_AUX_RD_INTERVAL 100 //us
+static void start_clock_recovery_pattern_early(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
+ __func__);
+ dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+ dp_set_hw_lane_settings(link, lt_settings, offset);
+ udelay(400);
+}
+
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings,
@@ -962,7 +1000,8 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0;
retry_count = 0;
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1434,6 +1473,13 @@ enum link_training_result dc_link_dp_perform_link_training(
&link->preferred_training_settings,
&lt_settings);
+ /* Configure lttpr mode */
+ if (!link->is_lttpr_mode_transparent)
+ configure_lttpr_mode(link);
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1445,8 +1491,6 @@ enum link_training_result dc_link_dp_perform_link_training(
dp_set_fec_ready(link, fec_enable);
if (!link->is_lttpr_mode_transparent) {
- /* Configure lttpr mode */
- configure_lttpr_mode(link);
/* 2. perform link training (set link training done
* to false is done as well)
@@ -1654,6 +1698,8 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_set_panel_mode(link, panel_mode);
/* Attempt to train with given link training settings */
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
/* Set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1892,6 +1938,16 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal);
+ dp_cs_id = get_clock_source_id(link);
+
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+ cur_link_setting = initial_link_settings;
+
/* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training.
@@ -1902,15 +1958,6 @@ bool dp_verify_link_cap(
dp_disable_link_phy(link, link->connector_signal);
}
- dp_cs_id = get_clock_source_id(link);
-
- /* link training starts with the maximum common settings
- * supported by both sink and ASIC.
- */
- initial_link_settings = get_common_supported_link_settings(
- *known_limit_link_setting,
- max_link_cap);
- cur_link_setting = initial_link_settings;
do {
skip_video_pattern = true;
@@ -2654,9 +2701,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
break;
}
- test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+ if (dpcd_test_params.bits.CLR_FORMAT == 0)
+ test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+ else
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
dc_link_dp_set_test_pattern(
link,
@@ -2888,6 +2938,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.blank_stream(pipe_ctx);
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
break;
}
@@ -2904,6 +2960,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_reallocate_mst_payload(link);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
@@ -3165,6 +3227,23 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false;
}
+/* Read additional sink caps defined in source specific DPCD area
+ * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
+ */
+static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data;
+
+ if (!link)
+ return false;
+
+ if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
+ return false;
+
+ link->dpcd_sink_ext_caps.raw = dpcd_data;
+ return true;
+}
+
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -3448,6 +3527,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
}
+ if (!dpcd_read_sink_ext_caps(link))
+ link->dpcd_sink_ext_caps.raw = 0;
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -3600,6 +3682,8 @@ void detect_edp_sink_caps(struct dc_link *link)
}
}
link->verified_link_cap = link->reported_link_cap;
+
+ dc_link_set_default_brightness_aux(link);
}
void dc_link_dp_enable_hpd(const struct dc_link *link)
@@ -3691,7 +3775,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *odm_pipe;
enum controller_dp_color_space controller_color_space;
int opp_cnt = 1;
- int count;
+ int offset = 0;
+ int dpg_width = width;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3713,33 +3798,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
+ dpg_width = width / opp_cnt;
+ offset = dpg_width;
- width /= opp_cnt;
+ opp->funcs->opp_set_disp_pattern_generator(opp,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
controller_test_pattern,
controller_color_space,
color_depth,
NULL,
- width,
- height);
- }
- opp->funcs->opp_set_disp_pattern_generator(opp,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- width,
- height);
- /* wait for dpg to blank pixel data with test pattern */
- for (count = 0; count < 1000; count++) {
- if (opp->funcs->dpg_is_blanked(opp))
- break;
- udelay(100);
+ dpg_width,
+ height,
+ offset);
+ offset += offset;
}
}
}
@@ -3757,11 +3839,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
else if (opp->funcs->opp_set_disp_pattern_generator) {
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ int dpg_width = width;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
- width /= opp_cnt;
+ dpg_width = width / opp_cnt;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
@@ -3771,16 +3854,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
opp->funcs->opp_set_disp_pattern_generator(opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
}
break;
@@ -3958,6 +4043,11 @@ bool dc_link_dp_set_test_pattern(
default:
break;
}
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
/* update MSA to requested color space */
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream->timing,
@@ -3965,9 +4055,27 @@ bool dc_link_dp_set_test_pattern(
pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
+ else
+ pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
+ resource_build_info_frame(pipe_ctx);
+ link->dc->hwss.update_info_frame(pipe_ctx);
+ }
+
/* CRTC Patterns */
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
-
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -4097,8 +4205,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
struct link_encoder *link_enc = link->link_enc;
uint8_t fec_config = 0;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_ready &&
@@ -4133,8 +4240,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
{
struct link_encoder *link_enc = link->link_enc;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_enable &&
@@ -4157,3 +4263,163 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
+void dpcd_set_source_specific_data(struct dc_link *link)
+{
+ const uint32_t post_oui_delay = 30; // 30ms
+ uint8_t dspc = 0;
+ enum dc_status ret = DC_ERROR_UNEXPECTED;
+
+ ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
+ sizeof(dspc));
+
+ if (ret != DC_OK) {
+ DC_LOG_ERROR("Error in DP aux read transaction,"
+ " not writing source specific data\n");
+ return;
+ }
+
+ /* Return if OUI unsupported */
+ if (!(dspc & DP_OUI_SUPPORT))
+ return;
+
+ if (!link->dc->vendor_signature.is_valid) {
+ struct dpcd_amd_signature amd_signature;
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+ amd_signature.device_id_byte1 =
+ (uint8_t)(link->ctx->asic_id.chip_id);
+ amd_signature.device_id_byte2 =
+ (uint8_t)(link->ctx->asic_id.chip_id >> 8);
+ memset(&amd_signature.zero, 0, 4);
+ amd_signature.dce_version =
+ (uint8_t)(link->ctx->dce_version);
+ amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+
+ } else {
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ link->dc->vendor_signature.data.raw,
+ sizeof(link->dc->vendor_signature.data.raw));
+ }
+
+ // Sink may need to configure internals based on vendor, so allow some
+ // time before proceeding with possibly vendor specific transactions
+ msleep(post_oui_delay);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ uint8_t backlight_control = isHDR ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ union dpcd_source_backlight_get dpcd_backlight_get;
+
+ memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ dpcd_backlight_get.raw,
+ sizeof(union dpcd_source_backlight_get)))
+ return false;
+
+ *backlight_millinits_avg =
+ dpcd_backlight_get.bytes.backlight_millinits_avg;
+ *backlight_millinits_peak =
+ dpcd_backlight_get.bytes.backlight_millinits_peak;
+
+ /* On non-supported panels dpcd_read usually succeeds with 0 returned */
+ if (*backlight_millinits_avg == 0 ||
+ *backlight_millinits_avg > *backlight_millinits_peak)
+ return false;
+
+ return true;
+}
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
+{
+ uint8_t backlight_enable = enable ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
+ &backlight_enable, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+// we read default from 0x320 because we expect BIOS wrote it there
+// regular get_backlight_nit reads from panel set at 0x326
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
+{
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *) backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+
+ return true;
+}
+
+bool dc_link_set_default_brightness_aux(struct dc_link *link)
+{
+ uint32_t default_backlight;
+
+ if (link &&
+ (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ if (!dc_link_read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+ // if < 5 nits or > 5000, it might be wrong readback
+ if (default_backlight < 5000 || default_backlight > 5000000)
+ default_backlight = 150000; //
+
+ return dc_link_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index ddb855045767..51e0ee6e7695 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -153,18 +153,19 @@ bool edp_receiver_ready_T9(struct dc_link *link)
unsigned char edpRev = 0;
enum dc_status result = DC_OK;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- do {
- sinkstatus = 1;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 0)
- break;
- if (result != DC_OK)
- break;
- udelay(100); //MAx T9
- } while (++tries < 50);
+
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ }
if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
@@ -183,21 +184,22 @@ bool edp_receiver_ready_T7(struct dc_link *link)
unsigned long long time_taken_in_ns = 0;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (result == DC_OK && edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- enter_timestamp = dm_get_timestamp(link->ctx);
- do {
- sinkstatus = 0;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 1)
- break;
- if (result != DC_OK)
- break;
- udelay(25);
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
- } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
+ do {
+ sinkstatus = 0;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 1)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+ }
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -429,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -533,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
DC_LOG_DSC(" ");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a0eb9e533a61..f4bcc71b2920 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,12 +46,12 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
+#include "dce120/dce120_resource.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h"
-#endif
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
-#include "dce120/dce120_resource.h"
+#endif
#define DC_LOGGER_INIT(logger)
@@ -532,6 +532,51 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+int get_num_odm_splits(struct pipe_ctx *pipe)
+{
+ int odm_split_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_split_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_split_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_split_count;
+}
+
+static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+{
+ *split_count = get_num_odm_splits(pipe_ctx);
+ *split_idx = 0;
+ if (*split_count == 0) {
+ /*Check for mpc split*/
+ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_idx)++;
+ (*split_count)++;
+ split_pipe = split_pipe->top_pipe;
+ }
+ split_pipe = pipe_ctx->bottom_pipe;
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_count)++;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ } else {
+ /*Get odm split index*/
+ struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+
+ while (split_pipe) {
+ (*split_idx)++;
+ split_pipe = split_pipe->prev_odm_pipe;
+ }
+ }
+}
+
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -541,16 +586,16 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ int split_count = 0;
+ int split_idx = 0;
bool orthogonal_rotation, flip_y_start, flip_x_start;
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pri_split = false;
- sec_split = false;
+ split_count = 0;
+ split_idx = 0;
}
/* The actual clip is an intersection between stream
@@ -609,23 +654,32 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport.height = clip.height * surf_src.height / dest.height;
/* Handle split */
- if (pri_split || sec_split) {
+ if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = 0;
+
if (orthogonal_rotation) {
- if (flip_y_start != pri_split)
- data->viewport.height /= 2;
- else {
- data->viewport.y += data->viewport.height / 2;
- /* Ceil offset pipe */
- data->viewport.height = (data->viewport.height + 1) / 2;
- }
+ if (flip_y_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.height % (split_count + 1);
+
+ data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.y += split_idx - epimo - 1;
+ data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
} else {
- if (flip_x_start != pri_split)
- data->viewport.width /= 2;
- else {
- data->viewport.x += data->viewport.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- }
+ if (flip_x_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.width % (split_count + 1);
+
+ data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.x += split_idx - epimo - 1;
+ data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
}
}
@@ -644,58 +698,58 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_clip = plane_state->clip_rect;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
- bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
-
- pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+ bool pri_split_tb = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ bool sec_split_tb = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ int split_count = 0;
+ int split_idx = 0;
+
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
+ data->recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
- pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
- - stream->src.x) * stream->dst.width
+ data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
/ stream->src.width;
- pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
- stream->dst.width / stream->src.width;
- if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
- stream->dst.x + stream->dst.width)
- pipe_ctx->plane_res.scl_data.recout.width =
- stream->dst.x + stream->dst.width
- - pipe_ctx->plane_res.scl_data.recout.x;
+ data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
+ if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
+ data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
- pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+ data->recout.y = stream->dst.y;
if (stream->src.y < surf_clip.y)
- pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
- - stream->src.y) * stream->dst.height
+ data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
/ stream->src.height;
- pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
- stream->dst.height / stream->src.height;
- if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
- stream->dst.y + stream->dst.height)
- pipe_ctx->plane_res.scl_data.recout.height =
- stream->dst.y + stream->dst.height
- - pipe_ctx->plane_res.scl_data.recout.y;
+ data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
+ if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
+ data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
/* Handle h & v split, handle rotation using viewport */
- if (sec_split && top_bottom_split) {
- pipe_ctx->plane_res.scl_data.recout.y +=
- pipe_ctx->plane_res.scl_data.recout.height / 2;
+ if (sec_split_tb) {
+ data->recout.y += data->recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height =
- (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
- } else if (pri_split && top_bottom_split)
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else if (sec_split) {
- pipe_ctx->plane_res.scl_data.recout.x +=
- pipe_ctx->plane_res.scl_data.recout.width / 2;
- /* Ceil offset pipe */
- pipe_ctx->plane_res.scl_data.recout.width =
- (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- } else if (pri_split)
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ data->recout.height = (data->recout.height + 1) / 2;
+ } else if (pri_split_tb)
+ data->recout.height /= 2;
+ else if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = split_count - data->recout.width % (split_count + 1);
+
+ /*no recout offset due to odm */
+ if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
+ data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->recout.x += split_idx - epimo - 1;
+ }
+ data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+ }
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -832,12 +886,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
+ int odm_idx = 0;
/*
* Need to calculate the scan direction for viewport to make adjustments
@@ -869,6 +925,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* stream->dst.width / stream->src.width -
src.x * plane_state->dst_rect.width / src.width
* stream->dst.width / stream->src.width);
+ /*modified recout_skip_h calculation due to odm having no recout offset*/
+ while (odm_pipe) {
+ odm_idx++;
+ odm_pipe = odm_pipe->prev_odm_pipe;
+ }
+ if (odm_idx)
+ recout_skip_h += odm_idx * data->recout.width;
+
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
src.y * plane_state->dst_rect.height / src.height
@@ -1013,6 +1077,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* on certain displays, such as the Sharp 4k
*/
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
@@ -1021,6 +1086,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
store_h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
timing->v_border_top + timing->v_border_bottom;
+ if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
+ pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -2034,7 +2101,7 @@ enum dc_status resource_map_pool_resources(
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
- context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst;
context->stream_status[i].audio_inst =
pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
@@ -2108,10 +2175,10 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
- if (dc->res_pool->funcs->get_default_swizzle_mode &&
+ if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
- result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
+ result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state);
if (result != DC_OK)
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 6ddbb00ed37a..4f0e7203dba4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
-static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
-{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- unsigned int vupdate_line;
- unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
- struct dc_stream_state *stream = pipe_ctx->stream;
- unsigned int us_per_line;
-
- if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
- ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
-
- vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
- return;
-
- if (vpos >= vupdate_line)
- return;
-
- us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
- lines_to_vupdate = vupdate_line - vpos;
- us_to_vupdate = lines_to_vupdate * us_per_line;
-
- /* 70 us is a conservative estimate of cursor update time*/
- if (us_to_vupdate < 70)
- udelay(us_to_vupdate);
- }
-#endif
-}
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
-
- delay_cursor_until_vupdate(pipe_ctx, dc);
- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
}
if (pipe_to_program)
- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true;
}
@@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
-
- delay_cursor_until_vupdate(pipe_ctx, dc);
- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
}
if (pipe_to_program)
- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index a96d8de9380e..64cf24a9ab08 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
+
+ if (pa_config->is_hvm_enabled == 0)
+ dc->debug.nv12_iflip_vm_wa = false;
}
return num_vmids;
@@ -62,7 +65,7 @@ int dc_get_vmid_use_vector(struct dc *dc)
int i;
int in_use = 0;
- for (i = 0; i < dc->vm_helper->num_vmid; i++)
+ for (i = 0; i < MAX_HUBP; i++)
in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0]
| dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1];
return in_use;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8ff25b5dd2f6..1935cf6601eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.69"
+#define DC_VER "3.2.76"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -126,6 +126,7 @@ struct dc_bug_wa {
bool no_connect_phy_config;
bool dedcn20_305_wa;
bool skip_clock_update;
+ bool lt_early_cr_pattern;
};
struct dc_dcc_surface_param {
@@ -229,6 +230,7 @@ struct dc_config {
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
+ bool psr_on_dmub;
};
enum visual_confirm {
@@ -388,6 +390,7 @@ struct dc_debug_options {
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
+ bool disable_mem_low_power;
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
@@ -453,6 +456,7 @@ struct dc_phy_addr_space_config {
} gart_config;
bool valid;
+ bool is_hvm_enabled;
uint64_t page_table_default_page_addr;
};
@@ -518,6 +522,7 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ bool wm_optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
int optimize_seamless_boot_streams;
@@ -526,6 +531,7 @@ struct dc {
struct compressor *fbc_compressor;
struct dc_debug_data debug_data;
+ struct dpcd_vendor_signature vendor_signature;
const char *build_id;
struct vm_helper *vm_helper;
@@ -565,12 +571,14 @@ struct dc_init_data {
struct dc_reg_helper_state *dmub_offload;
struct dc_config flags;
- uint32_t log_mask;
+ uint64_t log_mask;
+
/**
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
+ struct dpcd_vendor_signature vendor_signature;
};
struct dc_callback_init {
@@ -682,7 +690,6 @@ struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
struct fixed31_32 hdr_multiplier;
- bool initialized; /*remove after diag fix*/
union dc_3dlut_state state;
struct dc_context *ctx;
};
@@ -719,6 +726,7 @@ union surface_update_flags {
uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
+ uint32_t gamut_remap_change:1;
/* Full updates */
uint32_t new_plane:1;
@@ -753,6 +761,7 @@ struct dc_plane_state {
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
struct fixed31_32 hdr_mult;
+ struct colorspace_transform gamut_remap_matrix;
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
@@ -832,6 +841,7 @@ struct dc_surface_update {
const struct dc_transfer_func *func_shaper;
const struct dc_3dlut *lut3d_func;
const struct dc_transfer_func *blend_tf;
+ const struct colorspace_transform *gamut_remap_matrix;
};
/*
@@ -865,6 +875,7 @@ struct dc_flip_addrs {
unsigned int flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
+ bool triplebuffer_flips;
};
bool dc_post_update_surfaces_to_stream(
@@ -979,6 +990,20 @@ struct dpcd_caps {
};
+union dpcd_sink_ext_caps {
+ struct {
+ /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
+ * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
+ */
+ uint8_t sdr_aux_backlight_control : 1;
+ uint8_t hdr_aux_backlight_control : 1;
+ uint8_t reserved_1 : 2;
+ uint8_t oled : 1;
+ uint8_t reserved : 3;
+ } bits;
+ uint8_t raw;
+};
+
#include "dc_link.h"
/*******************************************************************************
@@ -1004,6 +1029,11 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_fec_caps {
+ bool is_rx_fec_supported;
+ bool is_topology_fec_supported;
+};
+
/*
* The sink structure contains EDID and other display device properties
*/
@@ -1017,7 +1047,10 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
- struct dc_sink_dsc_caps sink_dsc_caps;
+ struct dc_sink_dsc_caps dsc_caps;
+ struct dc_sink_fec_caps fec_caps;
+
+ bool is_vsc_sdp_colorimetry_supported;
/* private to DC core */
struct dc_link *link;
@@ -1075,7 +1108,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc);
-bool dc_is_hw_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index dfe4472c9e40..bb2730e9521e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -432,6 +432,54 @@ struct dp_sink_hw_fw_revision {
uint8_t ieee_fw_rev[2];
};
+struct dpcd_vendor_signature {
+ bool is_valid;
+
+ union dpcd_ieee_vendor_signature {
+ struct {
+ uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+ uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+ uint8_t ieee_hw_rev;
+ uint8_t ieee_fw_rev[2];
+ };
+ uint8_t raw[12];
+ } data;
+};
+
+struct dpcd_amd_signature {
+ uint8_t AMD_IEEE_TxSignature_byte1;
+ uint8_t AMD_IEEE_TxSignature_byte2;
+ uint8_t AMD_IEEE_TxSignature_byte3;
+ uint8_t device_id_byte1;
+ uint8_t device_id_byte2;
+ uint8_t zero[4];
+ uint8_t dce_version;
+ uint8_t dal_version_byte1;
+ uint8_t dal_version_byte2;
+};
+
+struct dpcd_source_backlight_set {
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ } backlight_level_millinits;
+
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ } backlight_transition_time_ms;
+};
+
+union dpcd_source_backlight_get {
+ struct {
+ uint32_t backlight_millinits_peak; /* 326h */
+ uint32_t backlight_millinits_avg; /* 32Ah */
+ } bytes;
+ uint8_t raw[8];
+};
+
/*DPCD register of DP receiver capability field bits-*/
union edp_configuration_cap {
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 25c50bcab9e9..a8dc3082e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -385,6 +385,8 @@ struct dc_cursor_position {
*/
bool enable;
+ /* Translate cursor x/y by the source rectangle for each plane. */
+ bool translate_by_source;
};
struct dc_cursor_mi_param {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d25603128394..00ff5e98278c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -26,6 +26,7 @@
#ifndef DC_LINK_H_
#define DC_LINK_H_
+#include "dc.h"
#include "dc_types.h"
#include "grph_object_defs.h"
@@ -128,6 +129,7 @@ struct dc_link {
enum edp_revision edp_revision;
bool psr_feature_enabled;
bool psr_allow_active;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
/* MST record stream using this link */
struct link_flags {
@@ -178,6 +180,21 @@ bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits,
+ uint32_t *backlight_millinits_peak);
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
+
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
+bool dc_link_set_default_brightness_aux(struct dc_link *link);
+
int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
@@ -316,4 +333,7 @@ bool dc_submit_i2c_oem(
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
+
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 92096de79dec..a5c7ef47b8d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -118,6 +118,7 @@ union stream_update_flags {
uint32_t dpms_off:1;
uint32_t gamut_remap:1;
uint32_t wb_update:1;
+ uint32_t dsc_changed : 1;
} bits;
uint32_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index e59532d98cb4..0d210104ba0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -229,7 +229,9 @@ struct dc_panel_patch {
unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off;
unsigned int extra_t7_ms;
- unsigned int manage_secondary_link;
+ unsigned int skip_scdc_overwrite;
+ unsigned int delay_ignore_msa;
+ unsigned int disable_fec;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fdf3d8f87eee..fbfcff700971 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 68c4049cbc2a..743042d5905a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
retry_on_defer = true;
- /* fall through */
+ fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 30d953acd016..f0cebe721bcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -378,6 +378,11 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
+ struct dc_context *ctx = dmcu->ctx;
+ unsigned int i;
+ // 5 4 3 2 1 0
+ // F E D C B A - bit 0 is A, bit 5 is F
+ unsigned int tx_interrupt_mask = 0;
PERF_TRACE();
/* Definition of DC_DMCU_SCRATCH
@@ -387,6 +392,15 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
*/
dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
+ for (i = 0; i < ctx->dc->link_count; i++) {
+ if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) {
+ if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A &&
+ ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) {
+ tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter;
+ }
+ }
+ }
+
switch (dmcu->dmcu_state) {
case DMCU_UNLOADED:
status = false;
@@ -401,6 +415,8 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
/* Set backlight ramping stepsize */
REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
+ REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask);
+
/* Set command to initialize microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_INIT_DMCU);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 066188ba7949..24adec407972 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -267,6 +267,9 @@ static void set_speed(
uint32_t xtal_ref_div = 0;
uint32_t prescale = 0;
+ if (speed == 0)
+ return;
+
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
if (xtal_ref_div == 0)
@@ -274,17 +277,15 @@ static void set_speed(
prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
- if (speed) {
- if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
- REG_UPDATE_N(SPEED, 3,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
- else
- REG_UPDATE_N(SPEED, 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
- }
+ if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+ REG_UPDATE_N(SPEED, 3,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+ else
+ REG_UPDATE_N(SPEED, 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
static bool setup_engine(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 8aa937f496c4..51481e922eb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
sign = 1;
floating = 1;
- /* fall through */
+ fallthrough;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
index 48862bebf29e..7311f312369f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -22,1004 +22,1330 @@
* Authors: AMD
*
*/
-
#include "transform.h"
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 16
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_16p[18] = {
- 4096, 0,
- 3840, 256,
- 3584, 512,
- 3328, 768,
- 3072, 1024,
- 2816, 1280,
- 2560, 1536,
- 2304, 1792,
- 2048, 2048
+ 0x1000, 0x0000,
+ 0x0FF0, 0x0010,
+ 0x0FB0, 0x0050,
+ 0x0F34, 0x00CC,
+ 0x0E68, 0x0198,
+ 0x0D44, 0x02BC,
+ 0x0BC4, 0x043C,
+ 0x09FC, 0x0604,
+ 0x0800, 0x0800
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_upscale[27] = {
- 2048, 2048, 0,
- 1708, 2424, 16348,
- 1372, 2796, 16308,
- 1056, 3148, 16272,
- 768, 3464, 16244,
- 512, 3728, 16236,
- 296, 3928, 16252,
- 124, 4052, 16296,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_16p_117[27] = {
- 2048, 2048, 0,
- 1824, 2276, 16376,
- 1600, 2496, 16380,
- 1376, 2700, 16,
- 1156, 2880, 52,
- 948, 3032, 108,
- 756, 3144, 192,
- 580, 3212, 296,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_116[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_16p_150[27] = {
- 2048, 2048, 0,
- 1872, 2184, 36,
- 1692, 2308, 88,
- 1516, 2420, 156,
- 1340, 2516, 236,
- 1168, 2592, 328,
- 1004, 2648, 440,
- 844, 2684, 560,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_149[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0660, 0x098C, 0x0014,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_183[27] = {
- 2048, 2048, 0,
- 1892, 2104, 92,
- 1744, 2152, 196,
- 1592, 2196, 300,
- 1448, 2232, 412,
- 1304, 2256, 528,
- 1168, 2276, 648,
- 1032, 2288, 772,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x0754, 0x0880, 0x002C,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x036C, 0x0A40, 0x0254,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_upscale[36] = {
- 0, 4096, 0, 0,
- 16240, 4056, 180, 16380,
- 16136, 3952, 404, 16364,
- 16072, 3780, 664, 16344,
- 16040, 3556, 952, 16312,
- 16036, 3284, 1268, 16272,
- 16052, 2980, 1604, 16224,
- 16084, 2648, 1952, 16176,
- 16128, 2304, 2304, 16128
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
};
-static const uint16_t filter_4tap_16p_117[36] = {
- 428, 3236, 428, 0,
- 276, 3232, 604, 16364,
- 148, 3184, 800, 16340,
- 44, 3104, 1016, 16312,
- 16344, 2984, 1244, 16284,
- 16284, 2832, 1488, 16256,
- 16244, 2648, 1732, 16236,
- 16220, 2440, 1976, 16220,
- 16212, 2216, 2216, 16212
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_116[36] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
};
-static const uint16_t filter_4tap_16p_150[36] = {
- 696, 2700, 696, 0,
- 560, 2700, 848, 16364,
- 436, 2676, 1008, 16348,
- 328, 2628, 1180, 16336,
- 232, 2556, 1356, 16328,
- 152, 2460, 1536, 16328,
- 84, 2344, 1716, 16332,
- 28, 2208, 1888, 16348,
- 16376, 2052, 2052, 16376
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_149[36] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_183[36] = {
- 940, 2208, 940, 0,
- 832, 2200, 1052, 4,
- 728, 2180, 1164, 16,
- 628, 2148, 1280, 36,
- 536, 2100, 1392, 60,
- 448, 2044, 1504, 92,
- 368, 1976, 1612, 132,
- 296, 1900, 1716, 176,
- 232, 1812, 1812, 232
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
};
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 64
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_64p[66] = {
- 4096, 0,
- 4032, 64,
- 3968, 128,
- 3904, 192,
- 3840, 256,
- 3776, 320,
- 3712, 384,
- 3648, 448,
- 3584, 512,
- 3520, 576,
- 3456, 640,
- 3392, 704,
- 3328, 768,
- 3264, 832,
- 3200, 896,
- 3136, 960,
- 3072, 1024,
- 3008, 1088,
- 2944, 1152,
- 2880, 1216,
- 2816, 1280,
- 2752, 1344,
- 2688, 1408,
- 2624, 1472,
- 2560, 1536,
- 2496, 1600,
- 2432, 1664,
- 2368, 1728,
- 2304, 1792,
- 2240, 1856,
- 2176, 1920,
- 2112, 1984,
- 2048, 2048 };
+ 0x1000, 0x0000,
+ 0x1000, 0x0000,
+ 0x0FFC, 0x0004,
+ 0x0FF8, 0x0008,
+ 0x0FF0, 0x0010,
+ 0x0FE4, 0x001C,
+ 0x0FD8, 0x0028,
+ 0x0FC4, 0x003C,
+ 0x0FB0, 0x0050,
+ 0x0F98, 0x0068,
+ 0x0F7C, 0x0084,
+ 0x0F58, 0x00A8,
+ 0x0F34, 0x00CC,
+ 0x0F08, 0x00F8,
+ 0x0ED8, 0x0128,
+ 0x0EA4, 0x015C,
+ 0x0E68, 0x0198,
+ 0x0E28, 0x01D8,
+ 0x0DE4, 0x021C,
+ 0x0D98, 0x0268,
+ 0x0D44, 0x02BC,
+ 0x0CEC, 0x0314,
+ 0x0C90, 0x0370,
+ 0x0C2C, 0x03D4,
+ 0x0BC4, 0x043C,
+ 0x0B58, 0x04A8,
+ 0x0AE8, 0x0518,
+ 0x0A74, 0x058C,
+ 0x09FC, 0x0604,
+ 0x0980, 0x0680,
+ 0x0900, 0x0700,
+ 0x0880, 0x0780,
+ 0x0800, 0x0800
+};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_upscale[99] = {
- 2048, 2048, 0,
- 1960, 2140, 16376,
- 1876, 2236, 16364,
- 1792, 2328, 16356,
- 1708, 2424, 16348,
- 1620, 2516, 16336,
- 1540, 2612, 16328,
- 1456, 2704, 16316,
- 1372, 2796, 16308,
- 1292, 2884, 16296,
- 1212, 2976, 16288,
- 1136, 3060, 16280,
- 1056, 3148, 16272,
- 984, 3228, 16264,
- 908, 3312, 16256,
- 836, 3388, 16248,
- 768, 3464, 16244,
- 700, 3536, 16240,
- 636, 3604, 16236,
- 572, 3668, 16236,
- 512, 3728, 16236,
- 456, 3784, 16236,
- 400, 3836, 16240,
- 348, 3884, 16244,
- 296, 3928, 16252,
- 252, 3964, 16260,
- 204, 4000, 16268,
- 164, 4028, 16284,
- 124, 4052, 16296,
- 88, 4072, 16316,
- 56, 4084, 16336,
- 24, 4092, 16356,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x07A8, 0x0860, 0x3FF8,
+ 0x0754, 0x08BC, 0x3FF0,
+ 0x0700, 0x0918, 0x3FE8,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x0654, 0x09D8, 0x3FD4,
+ 0x0604, 0x0A34, 0x3FC8,
+ 0x05B0, 0x0A90, 0x3FC0,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x050C, 0x0B48, 0x3FAC,
+ 0x04BC, 0x0BA0, 0x3FA4,
+ 0x0470, 0x0BF4, 0x3F9C,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x03D8, 0x0C9C, 0x3F8C,
+ 0x038C, 0x0CF0, 0x3F84,
+ 0x0344, 0x0D40, 0x3F7C,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x02BC, 0x0DD0, 0x3F74,
+ 0x027C, 0x0E14, 0x3F70,
+ 0x023C, 0x0E54, 0x3F70,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x01C8, 0x0EC8, 0x3F70,
+ 0x0190, 0x0EFC, 0x3F74,
+ 0x015C, 0x0F2C, 0x3F78,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x00FC, 0x0F7C, 0x3F88,
+ 0x00CC, 0x0FA4, 0x3F90,
+ 0x00A4, 0x0FC0, 0x3F9C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0058, 0x0FE8, 0x3FC0,
+ 0x0038, 0x0FF4, 0x3FD4,
+ 0x0018, 0x1000, 0x3FE8,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_64p_117[99] = {
- 2048, 2048, 0,
- 1992, 2104, 16380,
- 1936, 2160, 16380,
- 1880, 2220, 16376,
- 1824, 2276, 16376,
- 1768, 2332, 16376,
- 1712, 2388, 16376,
- 1656, 2444, 16376,
- 1600, 2496, 16380,
- 1544, 2548, 0,
- 1488, 2600, 4,
- 1432, 2652, 8,
- 1376, 2700, 16,
- 1320, 2748, 20,
- 1264, 2796, 32,
- 1212, 2840, 40,
- 1156, 2880, 52,
- 1104, 2920, 64,
- 1052, 2960, 80,
- 1000, 2996, 92,
- 948, 3032, 108,
- 900, 3060, 128,
- 852, 3092, 148,
- 804, 3120, 168,
- 756, 3144, 192,
- 712, 3164, 216,
- 668, 3184, 240,
- 624, 3200, 268,
- 580, 3212, 296,
- 540, 3220, 328,
- 500, 3228, 360,
- 464, 3232, 392,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_116[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07C0, 0x0844, 0x3FFC,
+ 0x0780, 0x0888, 0x3FF8,
+ 0x0740, 0x08D0, 0x3FF0,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x06C0, 0x0958, 0x3FE8,
+ 0x0684, 0x0998, 0x3FE4,
+ 0x0644, 0x09DC, 0x3FE0,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x05C4, 0x0A5C, 0x3FE0,
+ 0x0588, 0x0A9C, 0x3FDC,
+ 0x0548, 0x0ADC, 0x3FDC,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x04CC, 0x0B54, 0x3FE0,
+ 0x0490, 0x0B8C, 0x3FE4,
+ 0x0458, 0x0BC0, 0x3FE8,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x03E0, 0x0C28, 0x3FF8,
+ 0x03A8, 0x0C58, 0x0000,
+ 0x0374, 0x0C88, 0x0004,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0308, 0x0CD8, 0x0020,
+ 0x02D8, 0x0CFC, 0x002C,
+ 0x02A0, 0x0D20, 0x0040,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x0244, 0x0D58, 0x0064,
+ 0x0214, 0x0D70, 0x007C,
+ 0x01E8, 0x0D84, 0x0094,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0198, 0x0DA0, 0x00C8,
+ 0x0170, 0x0DAC, 0x00E4,
+ 0x014C, 0x0DB0, 0x0104,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_64p_150[99] = {
- 2048, 2048, 0,
- 2004, 2080, 8,
- 1960, 2116, 16,
- 1916, 2148, 28,
- 1872, 2184, 36,
- 1824, 2216, 48,
- 1780, 2248, 60,
- 1736, 2280, 76,
- 1692, 2308, 88,
- 1648, 2336, 104,
- 1604, 2368, 120,
- 1560, 2392, 136,
- 1516, 2420, 156,
- 1472, 2444, 172,
- 1428, 2472, 192,
- 1384, 2492, 212,
- 1340, 2516, 236,
- 1296, 2536, 256,
- 1252, 2556, 280,
- 1212, 2576, 304,
- 1168, 2592, 328,
- 1124, 2608, 356,
- 1084, 2624, 384,
- 1044, 2636, 412,
- 1004, 2648, 440,
- 964, 2660, 468,
- 924, 2668, 500,
- 884, 2676, 528,
- 844, 2684, 560,
- 808, 2688, 596,
- 768, 2692, 628,
- 732, 2696, 664,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_149[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07CC, 0x0834, 0x0000,
+ 0x0798, 0x0868, 0x0000,
+ 0x0764, 0x089C, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0700, 0x08FC, 0x0004,
+ 0x06CC, 0x092C, 0x0008,
+ 0x0698, 0x095C, 0x000C,
+ 0x0660, 0x098C, 0x0014,
+ 0x062C, 0x09B8, 0x001C,
+ 0x05FC, 0x09E4, 0x0020,
+ 0x05C4, 0x0A10, 0x002C,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x055C, 0x0A64, 0x0040,
+ 0x0528, 0x0A8C, 0x004C,
+ 0x04F8, 0x0AB0, 0x0058,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0490, 0x0AF8, 0x0078,
+ 0x0460, 0x0B18, 0x0088,
+ 0x0430, 0x0B38, 0x0098,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x03D0, 0x0B6C, 0x00C4,
+ 0x03A0, 0x0B88, 0x00D8,
+ 0x0374, 0x0B9C, 0x00F0,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x0318, 0x0BC4, 0x0124,
+ 0x02EC, 0x0BD4, 0x0140,
+ 0x02C4, 0x0BE0, 0x015C,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0274, 0x0BF4, 0x0198,
+ 0x024C, 0x0BFC, 0x01B8,
+ 0x0228, 0x0BFC, 0x01DC,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_183[99] = {
- 2048, 2048, 0,
- 2008, 2060, 20,
- 1968, 2076, 44,
- 1932, 2088, 68,
- 1892, 2104, 92,
- 1856, 2116, 120,
- 1816, 2128, 144,
- 1780, 2140, 168,
- 1744, 2152, 196,
- 1704, 2164, 220,
- 1668, 2176, 248,
- 1632, 2188, 272,
- 1592, 2196, 300,
- 1556, 2204, 328,
- 1520, 2216, 356,
- 1484, 2224, 384,
- 1448, 2232, 412,
- 1412, 2240, 440,
- 1376, 2244, 468,
- 1340, 2252, 496,
- 1304, 2256, 528,
- 1272, 2264, 556,
- 1236, 2268, 584,
- 1200, 2272, 616,
- 1168, 2276, 648,
- 1132, 2280, 676,
- 1100, 2284, 708,
- 1064, 2288, 740,
- 1032, 2288, 772,
- 996, 2292, 800,
- 964, 2292, 832,
- 932, 2292, 868,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x07D4, 0x0824, 0x0008,
+ 0x07AC, 0x0840, 0x0014,
+ 0x0780, 0x0860, 0x0020,
+ 0x0754, 0x0880, 0x002C,
+ 0x0728, 0x089C, 0x003C,
+ 0x0700, 0x08B8, 0x0048,
+ 0x06D4, 0x08D4, 0x0058,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x067C, 0x090C, 0x0078,
+ 0x0650, 0x0924, 0x008C,
+ 0x0628, 0x093C, 0x009C,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x05D0, 0x096C, 0x00C4,
+ 0x05A8, 0x0980, 0x00D8,
+ 0x0578, 0x0998, 0x00F0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x0528, 0x09BC, 0x011C,
+ 0x04FC, 0x09D0, 0x0134,
+ 0x04D4, 0x09E0, 0x014C,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0480, 0x09FC, 0x0184,
+ 0x045C, 0x0A08, 0x019C,
+ 0x0434, 0x0A14, 0x01B8,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x03E0, 0x0A2C, 0x01F4,
+ 0x03B8, 0x0A34, 0x0214,
+ 0x0394, 0x0A38, 0x0234,
+ 0x036C, 0x0A40, 0x0254,
+ 0x0348, 0x0A44, 0x0274,
+ 0x0324, 0x0A48, 0x0294,
+ 0x0300, 0x0A48, 0x02B8,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_upscale[132] = {
- 0, 4096, 0, 0,
- 16344, 4092, 40, 0,
- 16308, 4084, 84, 16380,
- 16272, 4072, 132, 16380,
- 16240, 4056, 180, 16380,
- 16212, 4036, 232, 16376,
- 16184, 4012, 288, 16372,
- 16160, 3984, 344, 16368,
- 16136, 3952, 404, 16364,
- 16116, 3916, 464, 16360,
- 16100, 3872, 528, 16356,
- 16084, 3828, 596, 16348,
- 16072, 3780, 664, 16344,
- 16060, 3728, 732, 16336,
- 16052, 3676, 804, 16328,
- 16044, 3616, 876, 16320,
- 16040, 3556, 952, 16312,
- 16036, 3492, 1028, 16300,
- 16032, 3424, 1108, 16292,
- 16032, 3356, 1188, 16280,
- 16036, 3284, 1268, 16272,
- 16036, 3212, 1352, 16260,
- 16040, 3136, 1436, 16248,
- 16044, 3056, 1520, 16236,
- 16052, 2980, 1604, 16224,
- 16060, 2896, 1688, 16212,
- 16064, 2816, 1776, 16200,
- 16076, 2732, 1864, 16188,
- 16084, 2648, 1952, 16176,
- 16092, 2564, 2040, 16164,
- 16104, 2476, 2128, 16152,
- 16116, 2388, 2216, 16140,
- 16128, 2304, 2304, 16128 };
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3FDC, 0x0FFC, 0x0028, 0x0000,
+ 0x3FB4, 0x0FF8, 0x0054, 0x0000,
+ 0x3F94, 0x0FE8, 0x0084, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F58, 0x0FC4, 0x00E8, 0x3FFC,
+ 0x3F3C, 0x0FAC, 0x0120, 0x3FF8,
+ 0x3F24, 0x0F90, 0x0158, 0x3FF4,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3EF8, 0x0F4C, 0x01D0, 0x3FEC,
+ 0x3EE8, 0x0F20, 0x0210, 0x3FE8,
+ 0x3ED8, 0x0EF4, 0x0254, 0x3FE0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EC0, 0x0E90, 0x02DC, 0x3FD4,
+ 0x3EB8, 0x0E58, 0x0324, 0x3FCC,
+ 0x3EB0, 0x0E20, 0x036C, 0x3FC4,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA8, 0x0DA4, 0x0404, 0x3FB0,
+ 0x3EA4, 0x0D60, 0x0454, 0x3FA8,
+ 0x3EA4, 0x0D1C, 0x04A4, 0x3F9C,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EA8, 0x0C88, 0x0548, 0x3F88,
+ 0x3EAC, 0x0C3C, 0x059C, 0x3F7C,
+ 0x3EB0, 0x0BF0, 0x05F0, 0x3F70,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3EBC, 0x0B54, 0x0698, 0x3F58,
+ 0x3EC4, 0x0B00, 0x06F0, 0x3F4C,
+ 0x3ECC, 0x0AAC, 0x0748, 0x3F40,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3EE0, 0x0A04, 0x07F8, 0x3F24,
+ 0x3EEC, 0x09AC, 0x0850, 0x3F18,
+ 0x3EF8, 0x0954, 0x08A8, 0x3F0C,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
+};
-static const uint16_t filter_4tap_64p_117[132] = {
- 420, 3248, 420, 0,
- 380, 3248, 464, 16380,
- 344, 3248, 508, 16372,
- 308, 3248, 552, 16368,
- 272, 3240, 596, 16364,
- 236, 3236, 644, 16356,
- 204, 3224, 692, 16352,
- 172, 3212, 744, 16344,
- 144, 3196, 796, 16340,
- 116, 3180, 848, 16332,
- 88, 3160, 900, 16324,
- 60, 3136, 956, 16320,
- 36, 3112, 1012, 16312,
- 16, 3084, 1068, 16304,
- 16380, 3056, 1124, 16296,
- 16360, 3024, 1184, 16292,
- 16340, 2992, 1244, 16284,
- 16324, 2956, 1304, 16276,
- 16308, 2920, 1364, 16268,
- 16292, 2880, 1424, 16264,
- 16280, 2836, 1484, 16256,
- 16268, 2792, 1548, 16252,
- 16256, 2748, 1608, 16244,
- 16248, 2700, 1668, 16240,
- 16240, 2652, 1732, 16232,
- 16232, 2604, 1792, 16228,
- 16228, 2552, 1856, 16224,
- 16220, 2500, 1916, 16220,
- 16216, 2444, 1980, 16216,
- 16216, 2388, 2040, 16216,
- 16212, 2332, 2100, 16212,
- 16212, 2276, 2160, 16212,
- 16212, 2220, 2220, 16212 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_116[132] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x017C, 0x0CB8, 0x01D0, 0x3FFC,
+ 0x0158, 0x0CB8, 0x01F8, 0x3FF8,
+ 0x0130, 0x0CB4, 0x0228, 0x3FF4,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x00EC, 0x0CA8, 0x0284, 0x3FE8,
+ 0x00CC, 0x0C9C, 0x02B4, 0x3FE4,
+ 0x00AC, 0x0C90, 0x02E8, 0x3FDC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0070, 0x0C70, 0x0350, 0x3FD0,
+ 0x0058, 0x0C5C, 0x0384, 0x3FC8,
+ 0x003C, 0x0C48, 0x03BC, 0x3FC0,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x0010, 0x0C10, 0x042C, 0x3FB4,
+ 0x3FFC, 0x0BF4, 0x0464, 0x3FAC,
+ 0x3FE8, 0x0BD4, 0x04A0, 0x3FA4,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3FC4, 0x0B8C, 0x0518, 0x3F98,
+ 0x3FB4, 0x0B68, 0x0554, 0x3F90,
+ 0x3FA8, 0x0B40, 0x0590, 0x3F88,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F90, 0x0AEC, 0x0608, 0x3F7C,
+ 0x3F84, 0x0ABC, 0x0648, 0x3F78,
+ 0x3F7C, 0x0A90, 0x0684, 0x3F70,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F6C, 0x0A2C, 0x0700, 0x3F68,
+ 0x3F64, 0x09F8, 0x0740, 0x3F64,
+ 0x3F60, 0x09C4, 0x077C, 0x3F60,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F58, 0x0958, 0x07F8, 0x3F58,
+ 0x3F58, 0x091C, 0x0834, 0x3F58,
+ 0x3F54, 0x08E4, 0x0870, 0x3F58,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
+};
-static const uint16_t filter_4tap_64p_150[132] = {
- 696, 2700, 696, 0,
- 660, 2704, 732, 16380,
- 628, 2704, 768, 16376,
- 596, 2704, 804, 16372,
- 564, 2700, 844, 16364,
- 532, 2696, 884, 16360,
- 500, 2692, 924, 16356,
- 472, 2684, 964, 16352,
- 440, 2676, 1004, 16352,
- 412, 2668, 1044, 16348,
- 384, 2656, 1088, 16344,
- 360, 2644, 1128, 16340,
- 332, 2632, 1172, 16336,
- 308, 2616, 1216, 16336,
- 284, 2600, 1260, 16332,
- 260, 2580, 1304, 16332,
- 236, 2560, 1348, 16328,
- 216, 2540, 1392, 16328,
- 196, 2516, 1436, 16328,
- 176, 2492, 1480, 16324,
- 156, 2468, 1524, 16324,
- 136, 2440, 1568, 16328,
- 120, 2412, 1612, 16328,
- 104, 2384, 1656, 16328,
- 88, 2352, 1700, 16332,
- 72, 2324, 1744, 16332,
- 60, 2288, 1788, 16336,
- 48, 2256, 1828, 16340,
- 36, 2220, 1872, 16344,
- 24, 2184, 1912, 16352,
- 12, 2148, 1952, 16356,
- 4, 2112, 1996, 16364,
- 16380, 2072, 2036, 16372 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_149[132] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0294, 0x0A94, 0x02DC, 0x3FFC,
+ 0x0274, 0x0A94, 0x0300, 0x3FF8,
+ 0x0250, 0x0A94, 0x0328, 0x3FF4,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x0214, 0x0A8C, 0x0374, 0x3FEC,
+ 0x01F0, 0x0A88, 0x03A0, 0x3FE8,
+ 0x01D4, 0x0A80, 0x03C8, 0x3FE4,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0198, 0x0A70, 0x041C, 0x3FDC,
+ 0x0180, 0x0A64, 0x0444, 0x3FD8,
+ 0x0164, 0x0A54, 0x0470, 0x3FD8,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x0130, 0x0A38, 0x04C8, 0x3FD0,
+ 0x0118, 0x0A24, 0x04F4, 0x3FD0,
+ 0x0100, 0x0A14, 0x0520, 0x3FCC,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x00D4, 0x09E8, 0x057C, 0x3FC8,
+ 0x00C0, 0x09D0, 0x05A8, 0x3FC8,
+ 0x00AC, 0x09B8, 0x05D4, 0x3FC8,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0084, 0x0984, 0x0630, 0x3FC8,
+ 0x0074, 0x0964, 0x065C, 0x3FCC,
+ 0x0064, 0x0948, 0x0688, 0x3FCC,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x0044, 0x0908, 0x06E0, 0x3FD4,
+ 0x0038, 0x08E8, 0x070C, 0x3FD4,
+ 0x002C, 0x08C4, 0x0738, 0x3FD8,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x0014, 0x087C, 0x078C, 0x3FE4,
+ 0x0008, 0x0858, 0x07B4, 0x3FEC,
+ 0x0000, 0x0830, 0x07DC, 0x3FF4,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
+};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_183[132] = {
- 944, 2204, 944, 0,
- 916, 2204, 972, 0,
- 888, 2200, 996, 0,
- 860, 2200, 1024, 4,
- 832, 2196, 1052, 4,
- 808, 2192, 1080, 8,
- 780, 2188, 1108, 12,
- 756, 2180, 1140, 12,
- 728, 2176, 1168, 16,
- 704, 2168, 1196, 20,
- 680, 2160, 1224, 24,
- 656, 2152, 1252, 28,
- 632, 2144, 1280, 36,
- 608, 2132, 1308, 40,
- 584, 2120, 1336, 48,
- 560, 2112, 1364, 52,
- 536, 2096, 1392, 60,
- 516, 2084, 1420, 68,
- 492, 2072, 1448, 76,
- 472, 2056, 1476, 84,
- 452, 2040, 1504, 92,
- 428, 2024, 1532, 100,
- 408, 2008, 1560, 112,
- 392, 1992, 1584, 120,
- 372, 1972, 1612, 132,
- 352, 1956, 1636, 144,
- 336, 1936, 1664, 156,
- 316, 1916, 1688, 168,
- 300, 1896, 1712, 180,
- 284, 1876, 1736, 192,
- 268, 1852, 1760, 208,
- 252, 1832, 1784, 220,
- 236, 1808, 1808, 236 };
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0394, 0x08A0, 0x03CC, 0x0000,
+ 0x037C, 0x089C, 0x03E8, 0x0000,
+ 0x0360, 0x089C, 0x0400, 0x0004,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x032C, 0x0894, 0x0438, 0x0008,
+ 0x0310, 0x0890, 0x0454, 0x000C,
+ 0x02F8, 0x0888, 0x0474, 0x000C,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x02C4, 0x087C, 0x04AC, 0x0014,
+ 0x02AC, 0x0874, 0x04C8, 0x0018,
+ 0x0290, 0x086C, 0x04E4, 0x0020,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x0264, 0x0858, 0x051C, 0x0028,
+ 0x024C, 0x084C, 0x0538, 0x0030,
+ 0x0234, 0x0844, 0x0554, 0x0034,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x0208, 0x0828, 0x058C, 0x0044,
+ 0x01F0, 0x081C, 0x05A8, 0x004C,
+ 0x01DC, 0x080C, 0x05C4, 0x0054,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x01B4, 0x07EC, 0x05FC, 0x0064,
+ 0x019C, 0x07DC, 0x0618, 0x0070,
+ 0x018C, 0x07CC, 0x0630, 0x0078,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0164, 0x07A8, 0x0664, 0x0090,
+ 0x0150, 0x0794, 0x0680, 0x009C,
+ 0x0140, 0x0780, 0x0698, 0x00A8,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x0120, 0x0758, 0x06C8, 0x00C0,
+ 0x0110, 0x0740, 0x06E0, 0x00D0,
+ 0x0100, 0x072C, 0x06F8, 0x00DC,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_upscale[165] = {
- 15936, 2496, 2496, 15936, 0,
- 15948, 2404, 2580, 15924, 0,
- 15960, 2312, 2664, 15912, 4,
- 15976, 2220, 2748, 15904, 8,
- 15992, 2128, 2832, 15896, 12,
- 16004, 2036, 2912, 15888, 16,
- 16020, 1944, 2992, 15880, 20,
- 16036, 1852, 3068, 15876, 20,
- 16056, 1760, 3140, 15876, 24,
- 16072, 1668, 3216, 15872, 28,
- 16088, 1580, 3284, 15872, 32,
- 16104, 1492, 3352, 15876, 32,
- 16120, 1404, 3420, 15876, 36,
- 16140, 1316, 3480, 15884, 40,
- 16156, 1228, 3540, 15892, 40,
- 16172, 1144, 3600, 15900, 40,
- 16188, 1060, 3652, 15908, 44,
- 16204, 980, 3704, 15924, 44,
- 16220, 900, 3756, 15936, 44,
- 16236, 824, 3800, 15956, 44,
- 16248, 744, 3844, 15972, 44,
- 16264, 672, 3884, 15996, 44,
- 16276, 600, 3920, 16020, 44,
- 16292, 528, 3952, 16044, 40,
- 16304, 460, 3980, 16072, 40,
- 16316, 396, 4008, 16104, 36,
- 16328, 332, 4032, 16136, 32,
- 16336, 272, 4048, 16172, 28,
- 16348, 212, 4064, 16208, 24,
- 16356, 156, 4080, 16248, 16,
- 16368, 100, 4088, 16292, 12,
- 16376, 48, 4092, 16336, 4,
- 0, 0, 4096, 0, 0 };
+ 0x3E40, 0x09C0, 0x09C0, 0x3E40, 0x0000,
+ 0x3E50, 0x0964, 0x0A18, 0x3E34, 0x0000,
+ 0x3E5C, 0x0908, 0x0A6C, 0x3E2C, 0x0004,
+ 0x3E6C, 0x08AC, 0x0AC0, 0x3E20, 0x0008,
+ 0x3E78, 0x0850, 0x0B14, 0x3E18, 0x000C,
+ 0x3E88, 0x07F4, 0x0B60, 0x3E14, 0x0010,
+ 0x3E98, 0x0798, 0x0BB0, 0x3E0C, 0x0014,
+ 0x3EA8, 0x073C, 0x0C00, 0x3E08, 0x0014,
+ 0x3EB8, 0x06E4, 0x0C48, 0x3E04, 0x0018,
+ 0x3ECC, 0x0684, 0x0C90, 0x3E04, 0x001C,
+ 0x3EDC, 0x062C, 0x0CD4, 0x3E04, 0x0020,
+ 0x3EEC, 0x05D4, 0x0D1C, 0x3E04, 0x0020,
+ 0x3EFC, 0x057C, 0x0D5C, 0x3E08, 0x0024,
+ 0x3F0C, 0x0524, 0x0D98, 0x3E10, 0x0028,
+ 0x3F20, 0x04CC, 0x0DD8, 0x3E14, 0x0028,
+ 0x3F30, 0x0478, 0x0E14, 0x3E1C, 0x0028,
+ 0x3F40, 0x0424, 0x0E48, 0x3E28, 0x002C,
+ 0x3F50, 0x03D4, 0x0E7C, 0x3E34, 0x002C,
+ 0x3F60, 0x0384, 0x0EAC, 0x3E44, 0x002C,
+ 0x3F6C, 0x0338, 0x0EDC, 0x3E54, 0x002C,
+ 0x3F7C, 0x02E8, 0x0F08, 0x3E68, 0x002C,
+ 0x3F8C, 0x02A0, 0x0F2C, 0x3E7C, 0x002C,
+ 0x3F98, 0x0258, 0x0F50, 0x3E94, 0x002C,
+ 0x3FA4, 0x0210, 0x0F74, 0x3EB0, 0x0028,
+ 0x3FB0, 0x01CC, 0x0F90, 0x3ECC, 0x0028,
+ 0x3FC0, 0x018C, 0x0FA8, 0x3EE8, 0x0024,
+ 0x3FC8, 0x014C, 0x0FC0, 0x3F0C, 0x0020,
+ 0x3FD4, 0x0110, 0x0FD4, 0x3F2C, 0x001C,
+ 0x3FE0, 0x00D4, 0x0FE0, 0x3F54, 0x0018,
+ 0x3FE8, 0x009C, 0x0FF0, 0x3F7C, 0x0010,
+ 0x3FF0, 0x0064, 0x0FFC, 0x3FA4, 0x000C,
+ 0x3FFC, 0x0030, 0x0FFC, 0x3FD4, 0x0004,
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000
+};
-static const uint16_t filter_5tap_64p_117[165] = {
- 16056, 2372, 2372, 16056, 0,
- 16052, 2312, 2432, 16060, 0,
- 16052, 2252, 2488, 16064, 0,
- 16052, 2188, 2548, 16072, 0,
- 16052, 2124, 2600, 16076, 0,
- 16052, 2064, 2656, 16088, 0,
- 16052, 2000, 2708, 16096, 0,
- 16056, 1932, 2760, 16108, 0,
- 16060, 1868, 2808, 16120, 0,
- 16064, 1804, 2856, 16132, 0,
- 16068, 1740, 2904, 16148, 16380,
- 16076, 1676, 2948, 16164, 16380,
- 16080, 1612, 2992, 16180, 16376,
- 16088, 1544, 3032, 16200, 16372,
- 16096, 1480, 3072, 16220, 16372,
- 16104, 1420, 3108, 16244, 16368,
- 16112, 1356, 3144, 16268, 16364,
- 16120, 1292, 3180, 16292, 16360,
- 16128, 1232, 3212, 16320, 16356,
- 16136, 1168, 3240, 16344, 16352,
- 16144, 1108, 3268, 16376, 16344,
- 16156, 1048, 3292, 20, 16340,
- 16164, 988, 3316, 52, 16332,
- 16172, 932, 3336, 88, 16328,
- 16184, 872, 3356, 124, 16320,
- 16192, 816, 3372, 160, 16316,
- 16204, 760, 3388, 196, 16308,
- 16212, 708, 3400, 236, 16300,
- 16220, 656, 3412, 276, 16292,
- 16232, 604, 3420, 320, 16284,
- 16240, 552, 3424, 364, 16276,
- 16248, 504, 3428, 408, 16268,
- 16256, 456, 3428, 456, 16256 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_116[165] = {
+ 0x3EDC, 0x0924, 0x0924, 0x3EDC, 0x0000,
+ 0x3ED8, 0x08EC, 0x095C, 0x3EE0, 0x0000,
+ 0x3ED4, 0x08B0, 0x0994, 0x3EE8, 0x0000,
+ 0x3ED0, 0x0878, 0x09C8, 0x3EF0, 0x0000,
+ 0x3ED0, 0x083C, 0x09FC, 0x3EF8, 0x0000,
+ 0x3ED0, 0x0800, 0x0A2C, 0x3F04, 0x0000,
+ 0x3ED0, 0x07C4, 0x0A5C, 0x3F10, 0x0000,
+ 0x3ED0, 0x0788, 0x0A8C, 0x3F1C, 0x0000,
+ 0x3ED0, 0x074C, 0x0AC0, 0x3F28, 0x3FFC,
+ 0x3ED4, 0x0710, 0x0AE8, 0x3F38, 0x3FFC,
+ 0x3ED8, 0x06D0, 0x0B18, 0x3F48, 0x3FF8,
+ 0x3EDC, 0x0694, 0x0B3C, 0x3F5C, 0x3FF8,
+ 0x3EE0, 0x0658, 0x0B68, 0x3F6C, 0x3FF4,
+ 0x3EE4, 0x061C, 0x0B90, 0x3F80, 0x3FF0,
+ 0x3EEC, 0x05DC, 0x0BB4, 0x3F98, 0x3FEC,
+ 0x3EF0, 0x05A0, 0x0BD8, 0x3FB0, 0x3FE8,
+ 0x3EF8, 0x0564, 0x0BF8, 0x3FC8, 0x3FE4,
+ 0x3EFC, 0x0528, 0x0C1C, 0x3FE0, 0x3FE0,
+ 0x3F04, 0x04EC, 0x0C38, 0x3FFC, 0x3FDC,
+ 0x3F0C, 0x04B4, 0x0C54, 0x0014, 0x3FD8,
+ 0x3F14, 0x047C, 0x0C70, 0x0030, 0x3FD0,
+ 0x3F1C, 0x0440, 0x0C88, 0x0050, 0x3FCC,
+ 0x3F24, 0x0408, 0x0CA0, 0x0070, 0x3FC4,
+ 0x3F2C, 0x03D0, 0x0CB0, 0x0094, 0x3FC0,
+ 0x3F34, 0x0398, 0x0CC4, 0x00B8, 0x3FB8,
+ 0x3F3C, 0x0364, 0x0CD4, 0x00DC, 0x3FB0,
+ 0x3F48, 0x032C, 0x0CE0, 0x0100, 0x3FAC,
+ 0x3F50, 0x02F8, 0x0CEC, 0x0128, 0x3FA4,
+ 0x3F58, 0x02C4, 0x0CF8, 0x0150, 0x3F9C,
+ 0x3F60, 0x0290, 0x0D00, 0x017C, 0x3F94,
+ 0x3F68, 0x0260, 0x0D04, 0x01A8, 0x3F8C,
+ 0x3F74, 0x0230, 0x0D04, 0x01D4, 0x3F84,
+ 0x3F7C, 0x0200, 0x0D08, 0x0200, 0x3F7C
+};
-static const uint16_t filter_5tap_64p_150[165] = {
- 16368, 2064, 2064, 16368, 0,
- 16352, 2028, 2100, 16380, 16380,
- 16340, 1996, 2132, 12, 16376,
- 16328, 1960, 2168, 24, 16376,
- 16316, 1924, 2204, 44, 16372,
- 16308, 1888, 2236, 60, 16368,
- 16296, 1848, 2268, 76, 16364,
- 16288, 1812, 2300, 96, 16360,
- 16280, 1772, 2328, 116, 16356,
- 16272, 1736, 2360, 136, 16352,
- 16268, 1696, 2388, 160, 16348,
- 16260, 1656, 2416, 180, 16344,
- 16256, 1616, 2440, 204, 16340,
- 16248, 1576, 2464, 228, 16336,
- 16244, 1536, 2492, 252, 16332,
- 16240, 1496, 2512, 276, 16324,
- 16240, 1456, 2536, 304, 16320,
- 16236, 1416, 2556, 332, 16316,
- 16232, 1376, 2576, 360, 16312,
- 16232, 1336, 2592, 388, 16308,
- 16232, 1296, 2612, 416, 16300,
- 16232, 1256, 2628, 448, 16296,
- 16232, 1216, 2640, 480, 16292,
- 16232, 1172, 2652, 512, 16288,
- 16232, 1132, 2664, 544, 16284,
- 16232, 1092, 2676, 576, 16280,
- 16236, 1056, 2684, 608, 16272,
- 16236, 1016, 2692, 644, 16268,
- 16240, 976, 2700, 680, 16264,
- 16240, 936, 2704, 712, 16260,
- 16244, 900, 2708, 748, 16256,
- 16248, 860, 2708, 788, 16252,
- 16248, 824, 2708, 824, 16248 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_149[165] = {
+ 0x3FF4, 0x080C, 0x080C, 0x3FF4, 0x0000,
+ 0x3FE8, 0x07E8, 0x0830, 0x0000, 0x0000,
+ 0x3FDC, 0x07C8, 0x0850, 0x0010, 0x3FFC,
+ 0x3FD0, 0x07A4, 0x0878, 0x001C, 0x3FF8,
+ 0x3FC4, 0x0780, 0x0898, 0x0030, 0x3FF4,
+ 0x3FB8, 0x075C, 0x08B8, 0x0040, 0x3FF4,
+ 0x3FB0, 0x0738, 0x08D8, 0x0050, 0x3FF0,
+ 0x3FA8, 0x0710, 0x08F8, 0x0064, 0x3FEC,
+ 0x3FA0, 0x06EC, 0x0914, 0x0078, 0x3FE8,
+ 0x3F98, 0x06C4, 0x0934, 0x008C, 0x3FE4,
+ 0x3F90, 0x06A0, 0x094C, 0x00A4, 0x3FE0,
+ 0x3F8C, 0x0678, 0x0968, 0x00B8, 0x3FDC,
+ 0x3F84, 0x0650, 0x0984, 0x00D0, 0x3FD8,
+ 0x3F80, 0x0628, 0x099C, 0x00E8, 0x3FD4,
+ 0x3F7C, 0x0600, 0x09B8, 0x0100, 0x3FCC,
+ 0x3F78, 0x05D8, 0x09D0, 0x0118, 0x3FC8,
+ 0x3F74, 0x05B0, 0x09E4, 0x0134, 0x3FC4,
+ 0x3F70, 0x0588, 0x09F8, 0x0150, 0x3FC0,
+ 0x3F70, 0x0560, 0x0A08, 0x016C, 0x3FBC,
+ 0x3F6C, 0x0538, 0x0A20, 0x0188, 0x3FB4,
+ 0x3F6C, 0x0510, 0x0A30, 0x01A4, 0x3FB0,
+ 0x3F6C, 0x04E8, 0x0A3C, 0x01C4, 0x3FAC,
+ 0x3F6C, 0x04C0, 0x0A48, 0x01E4, 0x3FA8,
+ 0x3F6C, 0x0498, 0x0A58, 0x0200, 0x3FA4,
+ 0x3F6C, 0x0470, 0x0A60, 0x0224, 0x3FA0,
+ 0x3F6C, 0x0448, 0x0A70, 0x0244, 0x3F98,
+ 0x3F70, 0x0420, 0x0A78, 0x0264, 0x3F94,
+ 0x3F70, 0x03F8, 0x0A80, 0x0288, 0x3F90,
+ 0x3F74, 0x03D4, 0x0A84, 0x02A8, 0x3F8C,
+ 0x3F74, 0x03AC, 0x0A8C, 0x02CC, 0x3F88,
+ 0x3F78, 0x0384, 0x0A90, 0x02F0, 0x3F84,
+ 0x3F7C, 0x0360, 0x0A90, 0x0314, 0x3F80,
+ 0x3F7C, 0x033C, 0x0A90, 0x033C, 0x3F7C
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_183[165] = {
- 228, 1816, 1816, 228, 0,
- 216, 1792, 1836, 248, 16380,
- 200, 1772, 1860, 264, 16376,
- 184, 1748, 1884, 280, 16376,
- 168, 1728, 1904, 300, 16372,
- 156, 1704, 1928, 316, 16368,
- 144, 1680, 1948, 336, 16364,
- 128, 1656, 1968, 356, 16364,
- 116, 1632, 1988, 376, 16360,
- 104, 1604, 2008, 396, 16356,
- 96, 1580, 2024, 416, 16356,
- 84, 1556, 2044, 440, 16352,
- 72, 1528, 2060, 460, 16348,
- 64, 1504, 2076, 484, 16348,
- 52, 1476, 2092, 504, 16344,
- 44, 1448, 2104, 528, 16344,
- 36, 1424, 2120, 552, 16340,
- 28, 1396, 2132, 576, 16340,
- 20, 1368, 2144, 600, 16340,
- 12, 1340, 2156, 624, 16336,
- 4, 1312, 2168, 652, 16336,
- 0, 1284, 2180, 676, 16336,
- 16376, 1256, 2188, 700, 16332,
- 16372, 1228, 2196, 728, 16332,
- 16368, 1200, 2204, 752, 16332,
- 16364, 1172, 2212, 780, 16332,
- 16356, 1144, 2216, 808, 16332,
- 16352, 1116, 2220, 836, 16332,
- 16352, 1084, 2224, 860, 16332,
- 16348, 1056, 2228, 888, 16336,
- 16344, 1028, 2232, 916, 16336,
- 16340, 1000, 2232, 944, 16336,
- 16340, 972, 2232, 972, 16340 };
+ 0x0168, 0x069C, 0x0698, 0x0164, 0x0000,
+ 0x0154, 0x068C, 0x06AC, 0x0174, 0x0000,
+ 0x0144, 0x0674, 0x06C0, 0x0188, 0x0000,
+ 0x0138, 0x0664, 0x06D0, 0x0198, 0x3FFC,
+ 0x0128, 0x0654, 0x06E0, 0x01A8, 0x3FFC,
+ 0x0118, 0x0640, 0x06F0, 0x01BC, 0x3FFC,
+ 0x010C, 0x0630, 0x0700, 0x01CC, 0x3FF8,
+ 0x00FC, 0x061C, 0x0710, 0x01E0, 0x3FF8,
+ 0x00F0, 0x060C, 0x071C, 0x01F0, 0x3FF8,
+ 0x00E4, 0x05F4, 0x072C, 0x0204, 0x3FF8,
+ 0x00D8, 0x05E4, 0x0738, 0x0218, 0x3FF4,
+ 0x00CC, 0x05D0, 0x0744, 0x022C, 0x3FF4,
+ 0x00C0, 0x05B8, 0x0754, 0x0240, 0x3FF4,
+ 0x00B4, 0x05A4, 0x0760, 0x0254, 0x3FF4,
+ 0x00A8, 0x0590, 0x076C, 0x0268, 0x3FF4,
+ 0x009C, 0x057C, 0x0778, 0x027C, 0x3FF4,
+ 0x0094, 0x0564, 0x0780, 0x0294, 0x3FF4,
+ 0x0088, 0x0550, 0x0788, 0x02A8, 0x3FF8,
+ 0x0080, 0x0538, 0x0794, 0x02BC, 0x3FF8,
+ 0x0074, 0x0524, 0x079C, 0x02D4, 0x3FF8,
+ 0x006C, 0x0510, 0x07A4, 0x02E8, 0x3FF8,
+ 0x0064, 0x04F4, 0x07AC, 0x0300, 0x3FFC,
+ 0x005C, 0x04E4, 0x07B0, 0x0314, 0x3FFC,
+ 0x0054, 0x04C8, 0x07B8, 0x032C, 0x0000,
+ 0x004C, 0x04B4, 0x07C0, 0x0340, 0x0000,
+ 0x0044, 0x04A0, 0x07C4, 0x0358, 0x0000,
+ 0x003C, 0x0488, 0x07C8, 0x0370, 0x0004,
+ 0x0038, 0x0470, 0x07CC, 0x0384, 0x0008,
+ 0x0030, 0x045C, 0x07D0, 0x039C, 0x0008,
+ 0x002C, 0x0444, 0x07D0, 0x03B4, 0x000C,
+ 0x0024, 0x042C, 0x07D4, 0x03CC, 0x0010,
+ 0x0020, 0x0414, 0x07D4, 0x03E0, 0x0018,
+ 0x001C, 0x03FC, 0x07D4, 0x03F8, 0x001C
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_upscale[198] = {
- 0, 0, 4092, 0, 0, 0,
- 12, 16332, 4092, 52, 16368, 0,
- 24, 16280, 4088, 108, 16356, 0,
- 36, 16236, 4080, 168, 16340, 0,
- 44, 16188, 4064, 228, 16324, 0,
- 56, 16148, 4052, 292, 16308, 0,
- 64, 16108, 4032, 356, 16292, 4,
- 72, 16072, 4008, 424, 16276, 4,
- 80, 16036, 3980, 492, 16256, 4,
- 88, 16004, 3952, 564, 16240, 8,
- 96, 15972, 3920, 636, 16220, 8,
- 100, 15944, 3884, 712, 16204, 12,
- 108, 15916, 3844, 788, 16184, 16,
- 112, 15896, 3800, 864, 16164, 20,
- 116, 15872, 3756, 944, 16144, 20,
- 120, 15852, 3708, 1024, 16124, 24,
- 120, 15836, 3656, 1108, 16104, 28,
- 124, 15824, 3600, 1192, 16084, 32,
- 124, 15808, 3544, 1276, 16064, 36,
- 124, 15800, 3484, 1360, 16044, 40,
- 128, 15792, 3420, 1448, 16024, 44,
- 128, 15784, 3352, 1536, 16004, 48,
- 124, 15780, 3288, 1624, 15988, 52,
- 124, 15776, 3216, 1712, 15968, 56,
- 124, 15776, 3144, 1800, 15948, 64,
- 120, 15776, 3068, 1888, 15932, 68,
- 120, 15780, 2992, 1976, 15912, 72,
- 116, 15784, 2916, 2064, 15896, 76,
- 112, 15792, 2836, 2152, 15880, 80,
- 108, 15796, 2752, 2244, 15868, 84,
- 104, 15804, 2672, 2328, 15852, 88,
- 104, 15816, 2588, 2416, 15840, 92,
- 100, 15828, 2504, 2504, 15828, 100 };
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000,
+ 0x000C, 0x3FD0, 0x0FFC, 0x0034, 0x3FF4, 0x0000,
+ 0x0018, 0x3F9C, 0x0FF8, 0x006C, 0x3FE8, 0x0000,
+ 0x0024, 0x3F6C, 0x0FF0, 0x00A8, 0x3FD8, 0x0000,
+ 0x002C, 0x3F44, 0x0FE4, 0x00E4, 0x3FC8, 0x0000,
+ 0x0038, 0x3F18, 0x0FD4, 0x0124, 0x3FB8, 0x0000,
+ 0x0040, 0x3EF0, 0x0FC0, 0x0164, 0x3FA8, 0x0004,
+ 0x0048, 0x3EC8, 0x0FAC, 0x01A8, 0x3F98, 0x0004,
+ 0x0050, 0x3EA8, 0x0F94, 0x01EC, 0x3F84, 0x0004,
+ 0x0058, 0x3E84, 0x0F74, 0x0234, 0x3F74, 0x0008,
+ 0x0060, 0x3E68, 0x0F54, 0x027C, 0x3F60, 0x0008,
+ 0x0064, 0x3E4C, 0x0F30, 0x02C8, 0x3F4C, 0x000C,
+ 0x006C, 0x3E30, 0x0F04, 0x0314, 0x3F3C, 0x0010,
+ 0x0070, 0x3E18, 0x0EDC, 0x0360, 0x3F28, 0x0014,
+ 0x0074, 0x3E04, 0x0EB0, 0x03B0, 0x3F14, 0x0014,
+ 0x0078, 0x3DF0, 0x0E80, 0x0400, 0x3F00, 0x0018,
+ 0x0078, 0x3DE0, 0x0E4C, 0x0454, 0x3EEC, 0x001C,
+ 0x007C, 0x3DD0, 0x0E14, 0x04A8, 0x3ED8, 0x0020,
+ 0x007C, 0x3DC4, 0x0DDC, 0x04FC, 0x3EC4, 0x0024,
+ 0x007C, 0x3DBC, 0x0DA0, 0x0550, 0x3EB0, 0x0028,
+ 0x0080, 0x3DB4, 0x0D5C, 0x05A8, 0x3E9C, 0x002C,
+ 0x0080, 0x3DAC, 0x0D1C, 0x0600, 0x3E88, 0x0030,
+ 0x007C, 0x3DA8, 0x0CDC, 0x0658, 0x3E74, 0x0034,
+ 0x007C, 0x3DA4, 0x0C94, 0x06B0, 0x3E64, 0x0038,
+ 0x007C, 0x3DA4, 0x0C48, 0x0708, 0x3E50, 0x0040,
+ 0x0078, 0x3DA4, 0x0C00, 0x0760, 0x3E40, 0x0044,
+ 0x0078, 0x3DA8, 0x0BB4, 0x07B8, 0x3E2C, 0x0048,
+ 0x0074, 0x3DAC, 0x0B68, 0x0810, 0x3E1C, 0x004C,
+ 0x0070, 0x3DB4, 0x0B18, 0x0868, 0x3E0C, 0x0050,
+ 0x006C, 0x3DBC, 0x0AC4, 0x08C4, 0x3DFC, 0x0054,
+ 0x0068, 0x3DC4, 0x0A74, 0x0918, 0x3DF0, 0x0058,
+ 0x0068, 0x3DCC, 0x0A20, 0x0970, 0x3DE0, 0x005C,
+ 0x0064, 0x3DD4, 0x09C8, 0x09C8, 0x3DD4, 0x0064
+};
-static const uint16_t filter_6tap_64p_117[198] = {
- 16168, 476, 3568, 476, 16168, 0,
- 16180, 428, 3564, 528, 16156, 0,
- 16192, 376, 3556, 584, 16144, 4,
- 16204, 328, 3548, 636, 16128, 4,
- 16216, 280, 3540, 692, 16116, 8,
- 16228, 232, 3524, 748, 16104, 12,
- 16240, 188, 3512, 808, 16092, 12,
- 16252, 148, 3492, 864, 16080, 16,
- 16264, 104, 3472, 924, 16068, 16,
- 16276, 64, 3452, 984, 16056, 20,
- 16284, 28, 3428, 1044, 16048, 24,
- 16296, 16376, 3400, 1108, 16036, 24,
- 16304, 16340, 3372, 1168, 16024, 28,
- 16316, 16304, 3340, 1232, 16016, 32,
- 16324, 16272, 3308, 1296, 16004, 32,
- 16332, 16244, 3272, 1360, 15996, 36,
- 16344, 16212, 3236, 1424, 15988, 36,
- 16352, 16188, 3200, 1488, 15980, 40,
- 16360, 16160, 3160, 1552, 15972, 40,
- 16368, 16136, 3116, 1616, 15964, 40,
- 16372, 16112, 3072, 1680, 15956, 44,
- 16380, 16092, 3028, 1744, 15952, 44,
- 0, 16072, 2980, 1808, 15948, 44,
- 8, 16052, 2932, 1872, 15944, 48,
- 12, 16036, 2880, 1936, 15940, 48,
- 16, 16020, 2828, 2000, 15936, 48,
- 20, 16008, 2776, 2064, 15936, 48,
- 24, 15996, 2724, 2128, 15936, 48,
- 28, 15984, 2668, 2192, 15936, 48,
- 32, 15972, 2612, 2252, 15940, 44,
- 36, 15964, 2552, 2316, 15940, 44,
- 40, 15956, 2496, 2376, 15944, 44,
- 40, 15952, 2436, 2436, 15952, 40 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_116[198] = {
+ 0x3F0C, 0x0240, 0x0D68, 0x0240, 0x3F0C, 0x0000,
+ 0x3F18, 0x0210, 0x0D64, 0x0274, 0x3F00, 0x0000,
+ 0x3F24, 0x01E0, 0x0D58, 0x02A8, 0x3EF8, 0x0004,
+ 0x3F2C, 0x01B0, 0x0D58, 0x02DC, 0x3EEC, 0x0004,
+ 0x3F38, 0x0180, 0x0D50, 0x0310, 0x3EE0, 0x0008,
+ 0x3F44, 0x0154, 0x0D40, 0x0348, 0x3ED8, 0x0008,
+ 0x3F50, 0x0128, 0x0D34, 0x037C, 0x3ECC, 0x000C,
+ 0x3F5C, 0x00FC, 0x0D20, 0x03B4, 0x3EC4, 0x0010,
+ 0x3F64, 0x00D4, 0x0D14, 0x03EC, 0x3EB8, 0x0010,
+ 0x3F70, 0x00AC, 0x0CFC, 0x0424, 0x3EB0, 0x0014,
+ 0x3F78, 0x0084, 0x0CE8, 0x0460, 0x3EA8, 0x0014,
+ 0x3F84, 0x0060, 0x0CCC, 0x0498, 0x3EA0, 0x0018,
+ 0x3F90, 0x003C, 0x0CB4, 0x04D0, 0x3E98, 0x0018,
+ 0x3F98, 0x0018, 0x0C9C, 0x050C, 0x3E90, 0x0018,
+ 0x3FA0, 0x3FFC, 0x0C78, 0x0548, 0x3E88, 0x001C,
+ 0x3FAC, 0x3FDC, 0x0C54, 0x0584, 0x3E84, 0x001C,
+ 0x3FB4, 0x3FBC, 0x0C3C, 0x05BC, 0x3E7C, 0x001C,
+ 0x3FBC, 0x3FA0, 0x0C14, 0x05F8, 0x3E78, 0x0020,
+ 0x3FC4, 0x3F84, 0x0BF0, 0x0634, 0x3E74, 0x0020,
+ 0x3FCC, 0x3F68, 0x0BCC, 0x0670, 0x3E70, 0x0020,
+ 0x3FD4, 0x3F50, 0x0BA4, 0x06AC, 0x3E6C, 0x0020,
+ 0x3FDC, 0x3F38, 0x0B78, 0x06E8, 0x3E6C, 0x0020,
+ 0x3FE0, 0x3F24, 0x0B50, 0x0724, 0x3E68, 0x0020,
+ 0x3FE8, 0x3F0C, 0x0B24, 0x0760, 0x3E68, 0x0020,
+ 0x3FF0, 0x3EFC, 0x0AF4, 0x0798, 0x3E68, 0x0020,
+ 0x3FF4, 0x3EE8, 0x0AC8, 0x07D4, 0x3E68, 0x0020,
+ 0x3FFC, 0x3ED8, 0x0A94, 0x0810, 0x3E6C, 0x001C,
+ 0x0000, 0x3EC8, 0x0A64, 0x0848, 0x3E70, 0x001C,
+ 0x0000, 0x3EB8, 0x0A38, 0x0880, 0x3E74, 0x001C,
+ 0x0004, 0x3EAC, 0x0A04, 0x08BC, 0x3E78, 0x0018,
+ 0x0008, 0x3EA4, 0x09D0, 0x08F4, 0x3E7C, 0x0014,
+ 0x000C, 0x3E98, 0x0998, 0x092C, 0x3E84, 0x0014,
+ 0x0010, 0x3E90, 0x0964, 0x0960, 0x3E8C, 0x0010
+};
-static const uint16_t filter_6tap_64p_150[198] = {
- 16148, 920, 2724, 920, 16148, 0,
- 16152, 880, 2724, 956, 16148, 0,
- 16152, 844, 2720, 996, 16144, 0,
- 16156, 804, 2716, 1032, 16144, 0,
- 16156, 768, 2712, 1072, 16144, 0,
- 16160, 732, 2708, 1112, 16144, 16380,
- 16164, 696, 2700, 1152, 16144, 16380,
- 16168, 660, 2692, 1192, 16148, 16380,
- 16172, 628, 2684, 1232, 16148, 16380,
- 16176, 592, 2672, 1272, 16152, 16376,
- 16180, 560, 2660, 1312, 16152, 16376,
- 16184, 524, 2648, 1348, 16156, 16376,
- 16192, 492, 2632, 1388, 16160, 16372,
- 16196, 460, 2616, 1428, 16164, 16372,
- 16200, 432, 2600, 1468, 16168, 16368,
- 16204, 400, 2584, 1508, 16176, 16364,
- 16212, 368, 2564, 1548, 16180, 16364,
- 16216, 340, 2544, 1588, 16188, 16360,
- 16220, 312, 2524, 1628, 16196, 16356,
- 16228, 284, 2504, 1668, 16204, 16356,
- 16232, 256, 2480, 1704, 16212, 16352,
- 16240, 232, 2456, 1744, 16224, 16348,
- 16244, 204, 2432, 1780, 16232, 16344,
- 16248, 180, 2408, 1820, 16244, 16340,
- 16256, 156, 2380, 1856, 16256, 16336,
- 16260, 132, 2352, 1896, 16268, 16332,
- 16268, 108, 2324, 1932, 16280, 16328,
- 16272, 88, 2296, 1968, 16292, 16324,
- 16276, 64, 2268, 2004, 16308, 16320,
- 16284, 44, 2236, 2036, 16324, 16312,
- 16288, 24, 2204, 2072, 16340, 16308,
- 16292, 8, 2172, 2108, 16356, 16304,
- 16300, 16372, 2140, 2140, 16372, 16300 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_149[198] = {
+ 0x3F14, 0x0394, 0x0AB0, 0x0394, 0x3F14, 0x0000,
+ 0x3F18, 0x036C, 0x0AB0, 0x03B8, 0x3F14, 0x0000,
+ 0x3F18, 0x0348, 0x0AAC, 0x03E0, 0x3F14, 0x0000,
+ 0x3F1C, 0x0320, 0x0AAC, 0x0408, 0x3F10, 0x0000,
+ 0x3F20, 0x02FC, 0x0AA8, 0x042C, 0x3F10, 0x0000,
+ 0x3F24, 0x02D8, 0x0AA0, 0x0454, 0x3F10, 0x0000,
+ 0x3F28, 0x02B4, 0x0A98, 0x047C, 0x3F10, 0x0000,
+ 0x3F28, 0x0290, 0x0A90, 0x04A4, 0x3F14, 0x0000,
+ 0x3F30, 0x026C, 0x0A84, 0x04CC, 0x3F14, 0x0000,
+ 0x3F34, 0x024C, 0x0A7C, 0x04F4, 0x3F14, 0x3FFC,
+ 0x3F38, 0x0228, 0x0A70, 0x051C, 0x3F18, 0x3FFC,
+ 0x3F3C, 0x0208, 0x0A64, 0x0544, 0x3F1C, 0x3FF8,
+ 0x3F40, 0x01E8, 0x0A54, 0x056C, 0x3F20, 0x3FF8,
+ 0x3F44, 0x01C8, 0x0A48, 0x0594, 0x3F24, 0x3FF4,
+ 0x3F4C, 0x01A8, 0x0A34, 0x05BC, 0x3F28, 0x3FF4,
+ 0x3F50, 0x0188, 0x0A28, 0x05E4, 0x3F2C, 0x3FF0,
+ 0x3F54, 0x016C, 0x0A10, 0x060C, 0x3F34, 0x3FF0,
+ 0x3F5C, 0x014C, 0x09FC, 0x0634, 0x3F3C, 0x3FEC,
+ 0x3F60, 0x0130, 0x09EC, 0x065C, 0x3F40, 0x3FE8,
+ 0x3F68, 0x0114, 0x09D0, 0x0684, 0x3F48, 0x3FE8,
+ 0x3F6C, 0x00F8, 0x09B8, 0x06AC, 0x3F54, 0x3FE4,
+ 0x3F74, 0x00E0, 0x09A0, 0x06D0, 0x3F5C, 0x3FE0,
+ 0x3F78, 0x00C4, 0x098C, 0x06F8, 0x3F64, 0x3FDC,
+ 0x3F7C, 0x00AC, 0x0970, 0x0720, 0x3F70, 0x3FD8,
+ 0x3F84, 0x0094, 0x0954, 0x0744, 0x3F7C, 0x3FD4,
+ 0x3F88, 0x007C, 0x093C, 0x0768, 0x3F88, 0x3FD0,
+ 0x3F90, 0x0064, 0x091C, 0x0790, 0x3F94, 0x3FCC,
+ 0x3F94, 0x0050, 0x08FC, 0x07B4, 0x3FA4, 0x3FC8,
+ 0x3F98, 0x003C, 0x08E0, 0x07D8, 0x3FB0, 0x3FC4,
+ 0x3FA0, 0x0024, 0x08C0, 0x07FC, 0x3FC0, 0x3FC0,
+ 0x3FA4, 0x0014, 0x08A4, 0x081C, 0x3FD0, 0x3FB8,
+ 0x3FAC, 0x0000, 0x0880, 0x0840, 0x3FE0, 0x3FB4,
+ 0x3FB0, 0x3FF0, 0x0860, 0x0860, 0x3FF0, 0x3FB0
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_183[198] = {
- 16296, 1032, 2196, 1032, 16296, 0,
- 16292, 1004, 2200, 1060, 16304, 16380,
- 16288, 976, 2200, 1088, 16308, 16380,
- 16284, 952, 2196, 1116, 16312, 16376,
- 16284, 924, 2196, 1144, 16320, 16376,
- 16280, 900, 2192, 1172, 16324, 16372,
- 16276, 872, 2192, 1200, 16332, 16368,
- 16276, 848, 2188, 1228, 16340, 16368,
- 16272, 820, 2180, 1256, 16348, 16364,
- 16272, 796, 2176, 1280, 16356, 16360,
- 16268, 768, 2168, 1308, 16364, 16360,
- 16268, 744, 2164, 1336, 16372, 16356,
- 16268, 716, 2156, 1364, 16380, 16352,
- 16264, 692, 2148, 1392, 4, 16352,
- 16264, 668, 2136, 1420, 16, 16348,
- 16264, 644, 2128, 1448, 28, 16344,
- 16264, 620, 2116, 1472, 36, 16340,
- 16264, 596, 2108, 1500, 48, 16340,
- 16268, 572, 2096, 1524, 60, 16336,
- 16268, 548, 2080, 1552, 72, 16332,
- 16268, 524, 2068, 1576, 88, 16328,
- 16268, 504, 2056, 1604, 100, 16324,
- 16272, 480, 2040, 1628, 112, 16324,
- 16272, 456, 2024, 1652, 128, 16320,
- 16272, 436, 2008, 1680, 144, 16316,
- 16276, 416, 1992, 1704, 156, 16312,
- 16276, 392, 1976, 1724, 172, 16308,
- 16280, 372, 1956, 1748, 188, 16308,
- 16280, 352, 1940, 1772, 204, 16304,
- 16284, 332, 1920, 1796, 224, 16300,
- 16288, 312, 1900, 1816, 240, 16296,
- 16288, 296, 1880, 1840, 256, 16296,
- 16292, 276, 1860, 1860, 276, 16292 };
+ 0x002C, 0x0420, 0x076C, 0x041C, 0x002C, 0x0000,
+ 0x0028, 0x040C, 0x0768, 0x0430, 0x0034, 0x0000,
+ 0x0020, 0x03F8, 0x0768, 0x0448, 0x003C, 0x3FFC,
+ 0x0018, 0x03E4, 0x0768, 0x045C, 0x0044, 0x3FFC,
+ 0x0014, 0x03D0, 0x0768, 0x0470, 0x004C, 0x3FF8,
+ 0x000C, 0x03BC, 0x0764, 0x0484, 0x0058, 0x3FF8,
+ 0x0008, 0x03A4, 0x0764, 0x049C, 0x0060, 0x3FF4,
+ 0x0004, 0x0390, 0x0760, 0x04B0, 0x0068, 0x3FF4,
+ 0x0000, 0x037C, 0x0760, 0x04C4, 0x0070, 0x3FF0,
+ 0x3FFC, 0x0364, 0x075C, 0x04D8, 0x007C, 0x3FF0,
+ 0x3FF8, 0x0350, 0x0758, 0x04F0, 0x0084, 0x3FEC,
+ 0x3FF4, 0x033C, 0x0750, 0x0504, 0x0090, 0x3FEC,
+ 0x3FF0, 0x0328, 0x074C, 0x0518, 0x009C, 0x3FE8,
+ 0x3FEC, 0x0314, 0x0744, 0x052C, 0x00A8, 0x3FE8,
+ 0x3FE8, 0x0304, 0x0740, 0x0540, 0x00B0, 0x3FE4,
+ 0x3FE4, 0x02EC, 0x073C, 0x0554, 0x00BC, 0x3FE4,
+ 0x3FE0, 0x02DC, 0x0734, 0x0568, 0x00C8, 0x3FE0,
+ 0x3FE0, 0x02C4, 0x072C, 0x057C, 0x00D4, 0x3FE0,
+ 0x3FDC, 0x02B4, 0x0724, 0x058C, 0x00E4, 0x3FDC,
+ 0x3FDC, 0x02A0, 0x0718, 0x05A0, 0x00F0, 0x3FDC,
+ 0x3FD8, 0x028C, 0x0714, 0x05B4, 0x00FC, 0x3FD8,
+ 0x3FD8, 0x0278, 0x0704, 0x05C8, 0x010C, 0x3FD8,
+ 0x3FD4, 0x0264, 0x0700, 0x05D8, 0x0118, 0x3FD8,
+ 0x3FD4, 0x0254, 0x06F0, 0x05EC, 0x0128, 0x3FD4,
+ 0x3FD0, 0x0244, 0x06E8, 0x05FC, 0x0134, 0x3FD4,
+ 0x3FD0, 0x0230, 0x06DC, 0x060C, 0x0144, 0x3FD4,
+ 0x3FD0, 0x021C, 0x06D0, 0x0620, 0x0154, 0x3FD0,
+ 0x3FD0, 0x0208, 0x06C4, 0x0630, 0x0164, 0x3FD0,
+ 0x3FD0, 0x01F8, 0x06B8, 0x0640, 0x0170, 0x3FD0,
+ 0x3FCC, 0x01E8, 0x06AC, 0x0650, 0x0180, 0x3FD0,
+ 0x3FCC, 0x01D8, 0x069C, 0x0660, 0x0190, 0x3FD0,
+ 0x3FCC, 0x01C4, 0x068C, 0x0670, 0x01A4, 0x3FD0,
+ 0x3FCC, 0x01B8, 0x0680, 0x067C, 0x01B4, 0x3FCC
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_upscale[231] = {
- 176, 15760, 2488, 2488, 15760, 176, 0,
- 172, 15772, 2404, 2572, 15752, 180, 16380,
- 168, 15784, 2324, 2656, 15740, 184, 16380,
- 164, 15800, 2240, 2736, 15732, 188, 16376,
- 160, 15812, 2152, 2816, 15728, 192, 16376,
- 152, 15828, 2068, 2896, 15724, 192, 16376,
- 148, 15848, 1984, 2972, 15720, 196, 16372,
- 140, 15864, 1896, 3048, 15720, 196, 16372,
- 136, 15884, 1812, 3124, 15720, 196, 16368,
- 128, 15900, 1724, 3196, 15720, 196, 16368,
- 120, 15920, 1640, 3268, 15724, 196, 16368,
- 116, 15940, 1552, 3336, 15732, 196, 16364,
- 108, 15964, 1468, 3400, 15740, 196, 16364,
- 104, 15984, 1384, 3464, 15748, 192, 16364,
- 96, 16004, 1300, 3524, 15760, 188, 16364,
- 88, 16028, 1216, 3584, 15776, 184, 16364,
- 84, 16048, 1132, 3640, 15792, 180, 16360,
- 76, 16072, 1048, 3692, 15812, 176, 16360,
- 68, 16092, 968, 3744, 15832, 168, 16360,
- 64, 16116, 888, 3788, 15856, 160, 16360,
- 56, 16140, 812, 3832, 15884, 152, 16360,
- 52, 16160, 732, 3876, 15912, 144, 16360,
- 44, 16184, 656, 3912, 15944, 136, 16364,
- 40, 16204, 584, 3944, 15976, 124, 16364,
- 32, 16228, 512, 3976, 16012, 116, 16364,
- 28, 16248, 440, 4004, 16048, 104, 16364,
- 24, 16268, 372, 4028, 16092, 88, 16368,
- 20, 16288, 304, 4048, 16132, 76, 16368,
- 12, 16308, 240, 4064, 16180, 60, 16372,
- 8, 16328, 176, 4076, 16228, 48, 16372,
- 4, 16348, 112, 4088, 16276, 32, 16376,
- 0, 16364, 56, 4092, 16328, 16, 16380,
- 0, 0, 0, 4096, 0, 0, 0 };
+ 0x00B0, 0x3D98, 0x09BC, 0x09B8, 0x3D94, 0x00B0, 0x0000,
+ 0x00AC, 0x3DA0, 0x0968, 0x0A10, 0x3D88, 0x00B4, 0x0000,
+ 0x00A8, 0x3DAC, 0x0914, 0x0A60, 0x3D80, 0x00B8, 0x0000,
+ 0x00A4, 0x3DB8, 0x08C0, 0x0AB4, 0x3D78, 0x00BC, 0x3FFC,
+ 0x00A0, 0x3DC8, 0x0868, 0x0B00, 0x3D74, 0x00C0, 0x3FFC,
+ 0x0098, 0x3DD8, 0x0818, 0x0B54, 0x3D6C, 0x00C0, 0x3FF8,
+ 0x0094, 0x3DE8, 0x07C0, 0x0B9C, 0x3D6C, 0x00C4, 0x3FF8,
+ 0x008C, 0x3DFC, 0x0768, 0x0BEC, 0x3D68, 0x00C4, 0x3FF8,
+ 0x0088, 0x3E0C, 0x0714, 0x0C38, 0x3D68, 0x00C4, 0x3FF4,
+ 0x0080, 0x3E20, 0x06BC, 0x0C80, 0x3D6C, 0x00C4, 0x3FF4,
+ 0x0078, 0x3E34, 0x0668, 0x0CC4, 0x3D70, 0x00C4, 0x3FF4,
+ 0x0074, 0x3E48, 0x0610, 0x0D08, 0x3D78, 0x00C4, 0x3FF0,
+ 0x006C, 0x3E5C, 0x05BC, 0x0D48, 0x3D80, 0x00C4, 0x3FF0,
+ 0x0068, 0x3E74, 0x0568, 0x0D84, 0x3D88, 0x00C0, 0x3FF0,
+ 0x0060, 0x3E88, 0x0514, 0x0DC8, 0x3D94, 0x00BC, 0x3FEC,
+ 0x0058, 0x3E9C, 0x04C0, 0x0E04, 0x3DA4, 0x00B8, 0x3FEC,
+ 0x0054, 0x3EB4, 0x046C, 0x0E38, 0x3DB4, 0x00B4, 0x3FEC,
+ 0x004C, 0x3ECC, 0x0418, 0x0E6C, 0x3DC8, 0x00B0, 0x3FEC,
+ 0x0044, 0x3EE0, 0x03C8, 0x0EA4, 0x3DDC, 0x00A8, 0x3FEC,
+ 0x0040, 0x3EF8, 0x0378, 0x0ED0, 0x3DF4, 0x00A0, 0x3FEC,
+ 0x0038, 0x3F0C, 0x032C, 0x0EFC, 0x3E10, 0x0098, 0x3FEC,
+ 0x0034, 0x3F24, 0x02DC, 0x0F24, 0x3E2C, 0x0090, 0x3FEC,
+ 0x002C, 0x3F38, 0x0294, 0x0F4C, 0x3E48, 0x0088, 0x3FEC,
+ 0x0028, 0x3F50, 0x0248, 0x0F68, 0x3E6C, 0x007C, 0x3FF0,
+ 0x0020, 0x3F64, 0x0200, 0x0F88, 0x3E90, 0x0074, 0x3FF0,
+ 0x001C, 0x3F7C, 0x01B8, 0x0FA4, 0x3EB4, 0x0068, 0x3FF0,
+ 0x0018, 0x3F90, 0x0174, 0x0FBC, 0x3EDC, 0x0058, 0x3FF4,
+ 0x0014, 0x3FA4, 0x0130, 0x0FD0, 0x3F08, 0x004C, 0x3FF4,
+ 0x000C, 0x3FB8, 0x00F0, 0x0FE4, 0x3F34, 0x003C, 0x3FF8,
+ 0x0008, 0x3FCC, 0x00B0, 0x0FF0, 0x3F64, 0x0030, 0x3FF8,
+ 0x0004, 0x3FDC, 0x0070, 0x0FFC, 0x3F98, 0x0020, 0x3FFC,
+ 0x0000, 0x3FF0, 0x0038, 0x0FFC, 0x3FCC, 0x0010, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000
+};
-static const uint16_t filter_7tap_64p_117[231] = {
- 92, 15868, 2464, 2464, 15868, 92, 0,
- 96, 15864, 2404, 2528, 15876, 88, 0,
- 100, 15860, 2344, 2584, 15884, 84, 0,
- 104, 15856, 2280, 2644, 15892, 76, 0,
- 108, 15852, 2216, 2700, 15904, 72, 0,
- 108, 15852, 2152, 2756, 15916, 64, 0,
- 112, 15852, 2088, 2812, 15932, 60, 0,
- 112, 15852, 2024, 2864, 15948, 52, 0,
- 112, 15856, 1960, 2916, 15964, 44, 0,
- 116, 15860, 1892, 2964, 15984, 36, 0,
- 116, 15864, 1828, 3016, 16004, 24, 4,
- 116, 15868, 1760, 3060, 16024, 16, 4,
- 116, 15876, 1696, 3108, 16048, 8, 8,
- 116, 15884, 1628, 3152, 16072, 16380, 8,
- 112, 15892, 1564, 3192, 16100, 16372, 8,
- 112, 15900, 1496, 3232, 16124, 16360, 12,
- 112, 15908, 1428, 3268, 16156, 16348, 12,
- 108, 15920, 1364, 3304, 16188, 16336, 16,
- 108, 15928, 1300, 3340, 16220, 16324, 20,
- 104, 15940, 1232, 3372, 16252, 16312, 20,
- 104, 15952, 1168, 3400, 16288, 16300, 24,
- 100, 15964, 1104, 3428, 16328, 16284, 28,
- 96, 15980, 1040, 3452, 16364, 16272, 28,
- 96, 15992, 976, 3476, 20, 16256, 32,
- 92, 16004, 916, 3496, 64, 16244, 36,
- 88, 16020, 856, 3516, 108, 16228, 40,
- 84, 16032, 792, 3532, 152, 16216, 44,
- 80, 16048, 732, 3544, 200, 16200, 48,
- 80, 16064, 676, 3556, 248, 16184, 48,
- 76, 16080, 616, 3564, 296, 16168, 52,
- 72, 16092, 560, 3568, 344, 16156, 56,
- 68, 16108, 504, 3572, 396, 16140, 60,
- 64, 16124, 452, 3576, 452, 16124, 64 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_116[231] = {
+ 0x0020, 0x3E58, 0x0988, 0x0988, 0x3E58, 0x0020, 0x0000,
+ 0x0024, 0x3E4C, 0x0954, 0x09C0, 0x3E64, 0x0018, 0x0000,
+ 0x002C, 0x3E44, 0x091C, 0x09F4, 0x3E70, 0x0010, 0x0000,
+ 0x0030, 0x3E3C, 0x08E8, 0x0A24, 0x3E80, 0x0008, 0x0000,
+ 0x0034, 0x3E34, 0x08AC, 0x0A5C, 0x3E90, 0x0000, 0x0000,
+ 0x003C, 0x3E30, 0x0870, 0x0A84, 0x3EA0, 0x3FFC, 0x0004,
+ 0x0040, 0x3E28, 0x0838, 0x0AB4, 0x3EB4, 0x3FF4, 0x0004,
+ 0x0044, 0x3E24, 0x07FC, 0x0AE4, 0x3EC8, 0x3FEC, 0x0004,
+ 0x0048, 0x3E24, 0x07C4, 0x0B08, 0x3EDC, 0x3FE4, 0x0008,
+ 0x0048, 0x3E20, 0x0788, 0x0B3C, 0x3EF4, 0x3FD8, 0x0008,
+ 0x004C, 0x3E20, 0x074C, 0x0B60, 0x3F0C, 0x3FD0, 0x000C,
+ 0x0050, 0x3E20, 0x0710, 0x0B8C, 0x3F24, 0x3FC4, 0x000C,
+ 0x0050, 0x3E20, 0x06D4, 0x0BB0, 0x3F40, 0x3FBC, 0x0010,
+ 0x0054, 0x3E24, 0x0698, 0x0BD4, 0x3F5C, 0x3FB0, 0x0010,
+ 0x0054, 0x3E24, 0x065C, 0x0BFC, 0x3F78, 0x3FA4, 0x0014,
+ 0x0054, 0x3E28, 0x0624, 0x0C1C, 0x3F98, 0x3F98, 0x0014,
+ 0x0058, 0x3E2C, 0x05E4, 0x0C3C, 0x3FB8, 0x3F8C, 0x0018,
+ 0x0058, 0x3E34, 0x05A8, 0x0C58, 0x3FD8, 0x3F80, 0x001C,
+ 0x0058, 0x3E38, 0x0570, 0x0C78, 0x3FF8, 0x3F74, 0x001C,
+ 0x0058, 0x3E40, 0x0534, 0x0C94, 0x0018, 0x3F68, 0x0020,
+ 0x0058, 0x3E48, 0x04F4, 0x0CAC, 0x0040, 0x3F5C, 0x0024,
+ 0x0058, 0x3E50, 0x04BC, 0x0CC4, 0x0064, 0x3F50, 0x0024,
+ 0x0054, 0x3E58, 0x0484, 0x0CD8, 0x008C, 0x3F44, 0x0028,
+ 0x0054, 0x3E60, 0x0448, 0x0CEC, 0x00B4, 0x3F38, 0x002C,
+ 0x0054, 0x3E68, 0x0410, 0x0CFC, 0x00E0, 0x3F28, 0x0030,
+ 0x0054, 0x3E74, 0x03D4, 0x0D0C, 0x010C, 0x3F1C, 0x0030,
+ 0x0050, 0x3E7C, 0x03A0, 0x0D18, 0x0138, 0x3F10, 0x0034,
+ 0x0050, 0x3E88, 0x0364, 0x0D24, 0x0164, 0x3F04, 0x0038,
+ 0x004C, 0x3E94, 0x0330, 0x0D30, 0x0194, 0x3EF4, 0x0038,
+ 0x004C, 0x3EA0, 0x02F8, 0x0D34, 0x01C4, 0x3EE8, 0x003C,
+ 0x0048, 0x3EAC, 0x02C0, 0x0D3C, 0x01F4, 0x3EDC, 0x0040,
+ 0x0048, 0x3EB8, 0x0290, 0x0D3C, 0x0224, 0x3ED0, 0x0040,
+ 0x0044, 0x3EC4, 0x0258, 0x0D40, 0x0258, 0x3EC4, 0x0044
+};
-static const uint16_t filter_7tap_64p_150[231] = {
- 16224, 16380, 2208, 2208, 16380, 16224, 0,
- 16232, 16360, 2172, 2236, 16, 16216, 0,
- 16236, 16340, 2140, 2268, 40, 16212, 0,
- 16244, 16324, 2104, 2296, 60, 16204, 4,
- 16252, 16304, 2072, 2324, 84, 16196, 4,
- 16256, 16288, 2036, 2352, 108, 16192, 4,
- 16264, 16268, 2000, 2380, 132, 16184, 8,
- 16272, 16252, 1960, 2408, 160, 16176, 8,
- 16276, 16240, 1924, 2432, 184, 16172, 8,
- 16284, 16224, 1888, 2456, 212, 16164, 8,
- 16288, 16212, 1848, 2480, 240, 16160, 12,
- 16296, 16196, 1812, 2500, 268, 16152, 12,
- 16300, 16184, 1772, 2524, 296, 16144, 12,
- 16308, 16172, 1736, 2544, 324, 16140, 12,
- 16312, 16164, 1696, 2564, 356, 16136, 12,
- 16320, 16152, 1656, 2584, 388, 16128, 12,
- 16324, 16144, 1616, 2600, 416, 16124, 12,
- 16328, 16136, 1576, 2616, 448, 16116, 12,
- 16332, 16128, 1536, 2632, 480, 16112, 12,
- 16340, 16120, 1496, 2648, 516, 16108, 12,
- 16344, 16112, 1456, 2660, 548, 16104, 12,
- 16348, 16104, 1416, 2672, 580, 16100, 12,
- 16352, 16100, 1376, 2684, 616, 16096, 12,
- 16356, 16096, 1336, 2696, 652, 16092, 12,
- 16360, 16092, 1296, 2704, 688, 16088, 12,
- 16364, 16088, 1256, 2712, 720, 16084, 12,
- 16368, 16084, 1220, 2720, 760, 16084, 8,
- 16368, 16080, 1180, 2724, 796, 16080, 8,
- 16372, 16080, 1140, 2732, 832, 16080, 8,
- 16376, 16076, 1100, 2732, 868, 16076, 4,
- 16380, 16076, 1060, 2736, 908, 16076, 4,
- 16380, 16076, 1020, 2740, 944, 16076, 0,
- 0, 16076, 984, 2740, 984, 16076, 0 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_149[231] = {
+ 0x3F68, 0x3FEC, 0x08A8, 0x08AC, 0x3FF0, 0x3F68, 0x0000,
+ 0x3F70, 0x3FDC, 0x0888, 0x08CC, 0x0000, 0x3F60, 0x0000,
+ 0x3F74, 0x3FC8, 0x0868, 0x08F0, 0x0014, 0x3F58, 0x0000,
+ 0x3F7C, 0x3FB4, 0x0844, 0x0908, 0x002C, 0x3F54, 0x0004,
+ 0x3F84, 0x3FA4, 0x0820, 0x0924, 0x0044, 0x3F4C, 0x0004,
+ 0x3F88, 0x3F90, 0x0800, 0x0944, 0x005C, 0x3F44, 0x0004,
+ 0x3F90, 0x3F80, 0x07D8, 0x095C, 0x0074, 0x3F40, 0x0008,
+ 0x3F98, 0x3F70, 0x07B0, 0x097C, 0x008C, 0x3F38, 0x0008,
+ 0x3F9C, 0x3F60, 0x0790, 0x0994, 0x00A8, 0x3F30, 0x0008,
+ 0x3FA4, 0x3F54, 0x0764, 0x09B0, 0x00C4, 0x3F28, 0x0008,
+ 0x3FA8, 0x3F48, 0x0740, 0x09C4, 0x00DC, 0x3F24, 0x000C,
+ 0x3FB0, 0x3F38, 0x0718, 0x09DC, 0x00FC, 0x3F1C, 0x000C,
+ 0x3FB4, 0x3F2C, 0x06F0, 0x09F4, 0x0118, 0x3F18, 0x000C,
+ 0x3FBC, 0x3F24, 0x06C8, 0x0A08, 0x0134, 0x3F10, 0x000C,
+ 0x3FC0, 0x3F18, 0x06A0, 0x0A1C, 0x0154, 0x3F08, 0x0010,
+ 0x3FC8, 0x3F10, 0x0678, 0x0A2C, 0x0170, 0x3F04, 0x0010,
+ 0x3FCC, 0x3F04, 0x0650, 0x0A40, 0x0190, 0x3F00, 0x0010,
+ 0x3FD0, 0x3EFC, 0x0628, 0x0A54, 0x01B0, 0x3EF8, 0x0010,
+ 0x3FD4, 0x3EF4, 0x0600, 0x0A64, 0x01D0, 0x3EF4, 0x0010,
+ 0x3FDC, 0x3EEC, 0x05D8, 0x0A6C, 0x01F4, 0x3EF0, 0x0010,
+ 0x3FE0, 0x3EE8, 0x05B0, 0x0A7C, 0x0214, 0x3EE8, 0x0010,
+ 0x3FE4, 0x3EE0, 0x0588, 0x0A88, 0x0238, 0x3EE4, 0x0010,
+ 0x3FE8, 0x3EDC, 0x055C, 0x0A98, 0x0258, 0x3EE0, 0x0010,
+ 0x3FEC, 0x3ED8, 0x0534, 0x0AA0, 0x027C, 0x3EDC, 0x0010,
+ 0x3FF0, 0x3ED4, 0x050C, 0x0AAC, 0x02A0, 0x3ED8, 0x000C,
+ 0x3FF4, 0x3ED0, 0x04E4, 0x0AB4, 0x02C4, 0x3ED4, 0x000C,
+ 0x3FF4, 0x3ECC, 0x04C0, 0x0ABC, 0x02E8, 0x3ED0, 0x000C,
+ 0x3FF8, 0x3ECC, 0x0494, 0x0AC0, 0x030C, 0x3ED0, 0x000C,
+ 0x3FFC, 0x3EC8, 0x046C, 0x0AC8, 0x0334, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x0444, 0x0AC8, 0x0358, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x041C, 0x0ACC, 0x0380, 0x3EC8, 0x0008,
+ 0x0000, 0x3EC8, 0x03F4, 0x0AD0, 0x03A8, 0x3EC8, 0x0004,
+ 0x0004, 0x3EC8, 0x03CC, 0x0AD0, 0x03CC, 0x3EC8, 0x0004
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_183[231] = {
- 16216, 324, 1884, 1884, 324, 16216, 0,
- 16220, 304, 1864, 1904, 344, 16216, 0,
- 16224, 284, 1844, 1924, 364, 16216, 0,
- 16224, 264, 1824, 1944, 384, 16212, 16380,
- 16228, 248, 1804, 1960, 408, 16212, 16380,
- 16228, 228, 1784, 1976, 428, 16208, 16380,
- 16232, 212, 1760, 1996, 452, 16208, 16380,
- 16236, 192, 1740, 2012, 472, 16208, 16376,
- 16240, 176, 1716, 2028, 496, 16208, 16376,
- 16240, 160, 1696, 2040, 516, 16208, 16376,
- 16244, 144, 1672, 2056, 540, 16208, 16376,
- 16248, 128, 1648, 2068, 564, 16208, 16372,
- 16252, 112, 1624, 2084, 588, 16208, 16372,
- 16256, 96, 1600, 2096, 612, 16208, 16368,
- 16256, 84, 1576, 2108, 636, 16208, 16368,
- 16260, 68, 1552, 2120, 660, 16208, 16368,
- 16264, 56, 1524, 2132, 684, 16212, 16364,
- 16268, 40, 1500, 2140, 712, 16212, 16364,
- 16272, 28, 1476, 2152, 736, 16216, 16360,
- 16276, 16, 1448, 2160, 760, 16216, 16356,
- 16280, 4, 1424, 2168, 788, 16220, 16356,
- 16284, 16376, 1396, 2176, 812, 16224, 16352,
- 16288, 16368, 1372, 2184, 840, 16224, 16352,
- 16292, 16356, 1344, 2188, 864, 16228, 16348,
- 16292, 16344, 1320, 2196, 892, 16232, 16344,
- 16296, 16336, 1292, 2200, 916, 16236, 16344,
- 16300, 16324, 1264, 2204, 944, 16240, 16340,
- 16304, 16316, 1240, 2208, 972, 16248, 16336,
- 16308, 16308, 1212, 2212, 996, 16252, 16332,
- 16312, 16300, 1184, 2216, 1024, 16256, 16332,
- 16316, 16292, 1160, 2216, 1052, 16264, 16328,
- 16316, 16284, 1132, 2216, 1076, 16268, 16324,
- 16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+ 0x3FA4, 0x01E8, 0x0674, 0x0674, 0x01E8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01D4, 0x0668, 0x0684, 0x01F8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01C4, 0x0658, 0x0690, 0x0208, 0x3FA8, 0x0000,
+ 0x3FA0, 0x01B4, 0x064C, 0x06A0, 0x021C, 0x3FA8, 0x3FFC,
+ 0x3FA0, 0x01A4, 0x063C, 0x06AC, 0x022C, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0194, 0x0630, 0x06B4, 0x0240, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0184, 0x0620, 0x06C4, 0x0250, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0174, 0x0614, 0x06CC, 0x0264, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0164, 0x0604, 0x06D8, 0x0278, 0x3FB4, 0x3FF4,
+ 0x3FA0, 0x0154, 0x05F4, 0x06E4, 0x0288, 0x3FB8, 0x3FF4,
+ 0x3FA0, 0x0148, 0x05E4, 0x06EC, 0x029C, 0x3FBC, 0x3FF0,
+ 0x3FA0, 0x0138, 0x05D4, 0x06F4, 0x02B0, 0x3FC0, 0x3FF0,
+ 0x3FA0, 0x0128, 0x05C4, 0x0704, 0x02C4, 0x3FC0, 0x3FEC,
+ 0x3FA0, 0x011C, 0x05B4, 0x0708, 0x02D8, 0x3FC4, 0x3FEC,
+ 0x3FA4, 0x010C, 0x05A4, 0x0714, 0x02E8, 0x3FC8, 0x3FE8,
+ 0x3FA4, 0x0100, 0x0590, 0x0718, 0x02FC, 0x3FD0, 0x3FE8,
+ 0x3FA4, 0x00F0, 0x0580, 0x0724, 0x0310, 0x3FD4, 0x3FE4,
+ 0x3FA4, 0x00E4, 0x056C, 0x072C, 0x0324, 0x3FD8, 0x3FE4,
+ 0x3FA8, 0x00D8, 0x055C, 0x0730, 0x0338, 0x3FDC, 0x3FE0,
+ 0x3FA8, 0x00CC, 0x0548, 0x0738, 0x034C, 0x3FE4, 0x3FDC,
+ 0x3FA8, 0x00BC, 0x0538, 0x0740, 0x0360, 0x3FE8, 0x3FDC,
+ 0x3FAC, 0x00B0, 0x0528, 0x0744, 0x0374, 0x3FEC, 0x3FD8,
+ 0x3FAC, 0x00A4, 0x0514, 0x0748, 0x0388, 0x3FF4, 0x3FD8,
+ 0x3FB0, 0x0098, 0x0500, 0x074C, 0x039C, 0x3FFC, 0x3FD4,
+ 0x3FB0, 0x0090, 0x04EC, 0x0750, 0x03B0, 0x0000, 0x3FD4,
+ 0x3FB0, 0x0084, 0x04DC, 0x0758, 0x03C4, 0x0004, 0x3FD0,
+ 0x3FB4, 0x0078, 0x04CC, 0x0758, 0x03D8, 0x000C, 0x3FCC,
+ 0x3FB4, 0x006C, 0x04B8, 0x075C, 0x03EC, 0x0014, 0x3FCC,
+ 0x3FB8, 0x0064, 0x04A0, 0x0760, 0x0400, 0x001C, 0x3FC8,
+ 0x3FB8, 0x0058, 0x0490, 0x0760, 0x0414, 0x0024, 0x3FC8,
+ 0x3FBC, 0x0050, 0x047C, 0x0760, 0x0428, 0x002C, 0x3FC4,
+ 0x3FBC, 0x0048, 0x0464, 0x0764, 0x043C, 0x0034, 0x3FC4,
+ 0x3FC0, 0x003C, 0x0454, 0x0764, 0x0450, 0x003C, 0x3FC0
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_upscale[264] = {
- 0, 0, 0, 4096, 0, 0, 0, 0,
- 16376, 20, 16328, 4092, 56, 16364, 4, 0,
- 16372, 36, 16272, 4088, 116, 16340, 12, 0,
- 16364, 56, 16220, 4080, 180, 16320, 20, 0,
- 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
- 16356, 92, 16124, 4048, 312, 16276, 32, 16380,
- 16352, 108, 16080, 4032, 380, 16252, 40, 16380,
- 16344, 124, 16036, 4008, 452, 16228, 48, 16380,
- 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
- 16340, 152, 15956, 3952, 600, 16180, 64, 16376,
- 16336, 164, 15920, 3920, 672, 16156, 76, 16376,
- 16332, 176, 15888, 3884, 752, 16132, 84, 16376,
- 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
- 16328, 200, 15828, 3800, 908, 16080, 100, 16372,
- 16324, 208, 15804, 3756, 992, 16056, 108, 16372,
- 16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
- 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
- 16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
- 16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
- 16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
- 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
- 16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
- 16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
- 16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
- 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
- 16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
- 16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
- 16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
- 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
- 16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
- 16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
- 16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
- 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3FFC, 0x0014, 0x3FC8, 0x1000, 0x0038, 0x3FEC, 0x0004, 0x0000,
+ 0x3FF4, 0x0024, 0x3F94, 0x0FFC, 0x0074, 0x3FD8, 0x000C, 0x0000,
+ 0x3FF0, 0x0038, 0x3F60, 0x0FEC, 0x00B4, 0x3FC4, 0x0014, 0x0000,
+ 0x3FEC, 0x004C, 0x3F2C, 0x0FE4, 0x00F4, 0x3FAC, 0x0018, 0x0000,
+ 0x3FE4, 0x005C, 0x3F00, 0x0FD4, 0x0138, 0x3F94, 0x0020, 0x0000,
+ 0x3FE0, 0x006C, 0x3ED0, 0x0FC4, 0x017C, 0x3F7C, 0x0028, 0x0000,
+ 0x3FDC, 0x007C, 0x3EA8, 0x0FA4, 0x01C4, 0x3F68, 0x0030, 0x0000,
+ 0x3FD8, 0x0088, 0x3E80, 0x0F90, 0x020C, 0x3F50, 0x0038, 0x3FFC,
+ 0x3FD4, 0x0098, 0x3E58, 0x0F70, 0x0258, 0x3F38, 0x0040, 0x3FFC,
+ 0x3FD0, 0x00A4, 0x3E34, 0x0F54, 0x02A0, 0x3F1C, 0x004C, 0x3FFC,
+ 0x3FD0, 0x00B0, 0x3E14, 0x0F28, 0x02F0, 0x3F04, 0x0054, 0x3FFC,
+ 0x3FCC, 0x00BC, 0x3DF4, 0x0F08, 0x033C, 0x3EEC, 0x005C, 0x3FF8,
+ 0x3FC8, 0x00C8, 0x3DD8, 0x0EDC, 0x038C, 0x3ED4, 0x0064, 0x3FF8,
+ 0x3FC8, 0x00D0, 0x3DC0, 0x0EAC, 0x03E0, 0x3EBC, 0x006C, 0x3FF4,
+ 0x3FC4, 0x00D8, 0x3DA8, 0x0E7C, 0x0430, 0x3EA4, 0x0078, 0x3FF4,
+ 0x3FC4, 0x00E0, 0x3D94, 0x0E48, 0x0484, 0x3E8C, 0x0080, 0x3FF0,
+ 0x3FC4, 0x00E8, 0x3D80, 0x0E10, 0x04D8, 0x3E74, 0x0088, 0x3FF0,
+ 0x3FC4, 0x00F0, 0x3D70, 0x0DD8, 0x052C, 0x3E5C, 0x0090, 0x3FEC,
+ 0x3FC0, 0x00F4, 0x3D60, 0x0DA0, 0x0584, 0x3E44, 0x0098, 0x3FEC,
+ 0x3FC0, 0x00F8, 0x3D54, 0x0D68, 0x05D8, 0x3E2C, 0x00A0, 0x3FE8,
+ 0x3FC0, 0x00FC, 0x3D48, 0x0D20, 0x0630, 0x3E18, 0x00AC, 0x3FE8,
+ 0x3FC0, 0x0100, 0x3D40, 0x0CE0, 0x0688, 0x3E00, 0x00B4, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D3C, 0x0C98, 0x06DC, 0x3DEC, 0x00BC, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D38, 0x0C58, 0x0734, 0x3DD8, 0x00C0, 0x3FE0,
+ 0x3FC4, 0x0104, 0x3D38, 0x0C0C, 0x078C, 0x3DC4, 0x00C8, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0BC4, 0x07E4, 0x3DB0, 0x00D0, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0B78, 0x083C, 0x3DA0, 0x00D8, 0x3FD8,
+ 0x3FC8, 0x0100, 0x3D3C, 0x0B28, 0x0890, 0x3D90, 0x00DC, 0x3FD8,
+ 0x3FC8, 0x00FC, 0x3D40, 0x0ADC, 0x08E8, 0x3D80, 0x00E4, 0x3FD4,
+ 0x3FCC, 0x00FC, 0x3D48, 0x0A84, 0x093C, 0x3D74, 0x00E8, 0x3FD4,
+ 0x3FCC, 0x00F8, 0x3D50, 0x0A38, 0x0990, 0x3D64, 0x00F0, 0x3FD0,
+ 0x3FD0, 0x00F4, 0x3D58, 0x09E0, 0x09E4, 0x3D5C, 0x00F4, 0x3FD0
+};
-static const uint16_t filter_8tap_64p_117[264] = {
- 116, 16100, 428, 3564, 428, 16100, 116, 0,
- 112, 16116, 376, 3564, 484, 16084, 120, 16380,
- 104, 16136, 324, 3560, 540, 16064, 124, 16380,
- 100, 16152, 272, 3556, 600, 16048, 128, 16380,
- 96, 16168, 220, 3548, 656, 16032, 136, 16376,
- 88, 16188, 172, 3540, 716, 16016, 140, 16376,
- 84, 16204, 124, 3528, 780, 16000, 144, 16376,
- 80, 16220, 76, 3512, 840, 15984, 148, 16372,
- 76, 16236, 32, 3496, 904, 15968, 152, 16372,
- 68, 16252, 16376, 3480, 968, 15952, 156, 16372,
- 64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
- 60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
- 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
- 48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
- 44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
- 40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
- 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
- 32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
- 28, 4, 16052, 3208, 1564, 15836, 176, 16364,
- 24, 16, 16028, 3168, 1632, 15828, 176, 16364,
- 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
- 16, 40, 15976, 3080, 1768, 15812, 176, 16364,
- 12, 52, 15952, 3036, 1836, 15808, 176, 16364,
- 8, 64, 15932, 2988, 1904, 15800, 176, 16364,
- 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
- 4, 84, 15892, 2888, 2040, 15796, 172, 16364,
- 0, 96, 15876, 2836, 2104, 15792, 168, 16364,
- 16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
- 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
- 16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
- 16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
- 16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
- 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_116[264] = {
+ 0x0080, 0x3E90, 0x0268, 0x0D14, 0x0264, 0x3E90, 0x0080, 0x0000,
+ 0x007C, 0x3E9C, 0x0238, 0x0D14, 0x0298, 0x3E84, 0x0080, 0x0000,
+ 0x0078, 0x3EAC, 0x0200, 0x0D10, 0x02D0, 0x3E78, 0x0084, 0x0000,
+ 0x0078, 0x3EB8, 0x01D0, 0x0D0C, 0x0304, 0x3E6C, 0x0084, 0x0000,
+ 0x0074, 0x3EC8, 0x01A0, 0x0D00, 0x033C, 0x3E60, 0x0088, 0x0000,
+ 0x0070, 0x3ED4, 0x0170, 0x0D00, 0x0374, 0x3E54, 0x0088, 0x3FFC,
+ 0x006C, 0x3EE4, 0x0140, 0x0CF8, 0x03AC, 0x3E48, 0x0088, 0x3FFC,
+ 0x006C, 0x3EF0, 0x0114, 0x0CE8, 0x03E4, 0x3E3C, 0x008C, 0x3FFC,
+ 0x0068, 0x3F00, 0x00E8, 0x0CD8, 0x041C, 0x3E34, 0x008C, 0x3FFC,
+ 0x0064, 0x3F10, 0x00BC, 0x0CCC, 0x0454, 0x3E28, 0x008C, 0x3FFC,
+ 0x0060, 0x3F1C, 0x0090, 0x0CBC, 0x0490, 0x3E20, 0x008C, 0x3FFC,
+ 0x005C, 0x3F2C, 0x0068, 0x0CA4, 0x04CC, 0x3E18, 0x008C, 0x3FFC,
+ 0x0058, 0x3F38, 0x0040, 0x0C94, 0x0504, 0x3E10, 0x008C, 0x3FFC,
+ 0x0054, 0x3F48, 0x001C, 0x0C7C, 0x0540, 0x3E08, 0x0088, 0x3FFC,
+ 0x0050, 0x3F54, 0x3FF8, 0x0C60, 0x057C, 0x3E04, 0x0088, 0x3FFC,
+ 0x004C, 0x3F64, 0x3FD4, 0x0C44, 0x05B8, 0x3DFC, 0x0088, 0x3FFC,
+ 0x0048, 0x3F70, 0x3FB4, 0x0C28, 0x05F4, 0x3DF8, 0x0084, 0x3FFC,
+ 0x0044, 0x3F80, 0x3F90, 0x0C0C, 0x0630, 0x3DF4, 0x0080, 0x3FFC,
+ 0x0040, 0x3F8C, 0x3F70, 0x0BE8, 0x066C, 0x3DF4, 0x0080, 0x3FFC,
+ 0x003C, 0x3F9C, 0x3F50, 0x0BC8, 0x06A8, 0x3DF0, 0x007C, 0x3FFC,
+ 0x0038, 0x3FA8, 0x3F34, 0x0BA0, 0x06E4, 0x3DF0, 0x0078, 0x0000,
+ 0x0034, 0x3FB4, 0x3F18, 0x0B80, 0x071C, 0x3DF0, 0x0074, 0x0000,
+ 0x0030, 0x3FC0, 0x3EFC, 0x0B5C, 0x0758, 0x3DF0, 0x0070, 0x0000,
+ 0x002C, 0x3FCC, 0x3EE4, 0x0B34, 0x0794, 0x3DF4, 0x0068, 0x0000,
+ 0x002C, 0x3FDC, 0x3ECC, 0x0B08, 0x07CC, 0x3DF4, 0x0064, 0x0000,
+ 0x0028, 0x3FE4, 0x3EB4, 0x0AE0, 0x0808, 0x3DF8, 0x0060, 0x0000,
+ 0x0024, 0x3FF0, 0x3EA0, 0x0AB0, 0x0840, 0x3E00, 0x0058, 0x0004,
+ 0x0020, 0x3FFC, 0x3E90, 0x0A84, 0x0878, 0x3E04, 0x0050, 0x0004,
+ 0x001C, 0x0004, 0x3E7C, 0x0A54, 0x08B0, 0x3E0C, 0x004C, 0x0008,
+ 0x0018, 0x000C, 0x3E68, 0x0A28, 0x08E8, 0x3E18, 0x0044, 0x0008,
+ 0x0018, 0x0018, 0x3E54, 0x09F4, 0x0920, 0x3E20, 0x003C, 0x000C,
+ 0x0014, 0x0020, 0x3E48, 0x09C0, 0x0954, 0x3E2C, 0x0034, 0x0010,
+ 0x0010, 0x002C, 0x3E3C, 0x098C, 0x0988, 0x3E38, 0x002C, 0x0010
+};
-static const uint16_t filter_8tap_64p_150[264] = {
- 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
- 0, 16020, 992, 2756, 1068, 16024, 16376, 0,
- 4, 16020, 952, 2752, 1108, 16024, 16372, 0,
- 8, 16020, 916, 2748, 1148, 16028, 16368, 0,
- 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
- 16, 16020, 840, 2740, 1224, 16036, 16356, 4,
- 20, 16024, 800, 2732, 1264, 16040, 16352, 4,
- 20, 16024, 764, 2724, 1304, 16044, 16348, 8,
- 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
- 28, 16028, 692, 2704, 1380, 16056, 16336, 12,
- 28, 16032, 656, 2696, 1420, 16064, 16328, 12,
- 32, 16036, 620, 2684, 1460, 16072, 16324, 12,
- 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
- 36, 16044, 548, 2656, 1536, 16088, 16308, 16,
- 36, 16048, 516, 2640, 1576, 16096, 16304, 20,
- 40, 16052, 480, 2624, 1612, 16108, 16296, 20,
- 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
- 44, 16064, 416, 2588, 1692, 16132, 16280, 24,
- 44, 16068, 384, 2568, 1728, 16144, 16276, 24,
- 44, 16076, 352, 2548, 1764, 16156, 16268, 28,
- 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
- 44, 16088, 292, 2508, 1840, 16184, 16252, 28,
- 44, 16096, 264, 2484, 1876, 16200, 16244, 32,
- 48, 16100, 232, 2460, 1912, 16216, 16236, 32,
- 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
- 48, 16116, 176, 2412, 1980, 16248, 16220, 36,
- 48, 16124, 152, 2384, 2016, 16264, 16216, 36,
- 44, 16128, 124, 2356, 2052, 16284, 16208, 36,
- 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
- 44, 16144, 72, 2300, 2116, 16324, 16192, 40,
- 44, 16152, 48, 2272, 2148, 16344, 16184, 40,
- 44, 16160, 24, 2244, 2180, 16364, 16176, 40,
- 44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_149[264] = {
+ 0x0008, 0x3E8C, 0x03F8, 0x0AE8, 0x03F8, 0x3E8C, 0x0008, 0x0000,
+ 0x000C, 0x3E8C, 0x03D0, 0x0AE8, 0x0420, 0x3E90, 0x0000, 0x0000,
+ 0x000C, 0x3E8C, 0x03AC, 0x0AE8, 0x0444, 0x3E90, 0x0000, 0x0000,
+ 0x0010, 0x3E90, 0x0384, 0x0AE0, 0x046C, 0x3E94, 0x3FFC, 0x0000,
+ 0x0014, 0x3E90, 0x035C, 0x0ADC, 0x0494, 0x3E94, 0x3FF8, 0x0004,
+ 0x0018, 0x3E90, 0x0334, 0x0AD8, 0x04BC, 0x3E98, 0x3FF4, 0x0004,
+ 0x001C, 0x3E94, 0x0310, 0x0AD0, 0x04E4, 0x3E9C, 0x3FEC, 0x0004,
+ 0x0020, 0x3E98, 0x02E8, 0x0AC4, 0x050C, 0x3EA0, 0x3FE8, 0x0008,
+ 0x0020, 0x3E98, 0x02C4, 0x0AC0, 0x0534, 0x3EA4, 0x3FE4, 0x0008,
+ 0x0024, 0x3E9C, 0x02A0, 0x0AB4, 0x055C, 0x3EAC, 0x3FDC, 0x0008,
+ 0x0024, 0x3EA0, 0x027C, 0x0AA8, 0x0584, 0x3EB0, 0x3FD8, 0x000C,
+ 0x0028, 0x3EA4, 0x0258, 0x0A9C, 0x05AC, 0x3EB8, 0x3FD0, 0x000C,
+ 0x0028, 0x3EA8, 0x0234, 0x0A90, 0x05D4, 0x3EC0, 0x3FC8, 0x0010,
+ 0x002C, 0x3EAC, 0x0210, 0x0A80, 0x05FC, 0x3EC8, 0x3FC4, 0x0010,
+ 0x002C, 0x3EB4, 0x01F0, 0x0A70, 0x0624, 0x3ED0, 0x3FBC, 0x0010,
+ 0x002C, 0x3EB8, 0x01CC, 0x0A60, 0x064C, 0x3EDC, 0x3FB4, 0x0014,
+ 0x0030, 0x3EBC, 0x01A8, 0x0A50, 0x0674, 0x3EE4, 0x3FB0, 0x0014,
+ 0x0030, 0x3EC4, 0x0188, 0x0A38, 0x069C, 0x3EF0, 0x3FA8, 0x0018,
+ 0x0030, 0x3ECC, 0x0168, 0x0A28, 0x06C0, 0x3EFC, 0x3FA0, 0x0018,
+ 0x0030, 0x3ED0, 0x0148, 0x0A14, 0x06E8, 0x3F08, 0x3F98, 0x001C,
+ 0x0030, 0x3ED8, 0x012C, 0x0A00, 0x070C, 0x3F14, 0x3F90, 0x001C,
+ 0x0034, 0x3EE0, 0x0108, 0x09E4, 0x0734, 0x3F24, 0x3F8C, 0x001C,
+ 0x0034, 0x3EE4, 0x00EC, 0x09CC, 0x0758, 0x3F34, 0x3F84, 0x0020,
+ 0x0034, 0x3EEC, 0x00D0, 0x09B8, 0x077C, 0x3F40, 0x3F7C, 0x0020,
+ 0x0034, 0x3EF4, 0x00B4, 0x0998, 0x07A4, 0x3F50, 0x3F74, 0x0024,
+ 0x0030, 0x3EFC, 0x0098, 0x0980, 0x07C8, 0x3F64, 0x3F6C, 0x0024,
+ 0x0030, 0x3F04, 0x0080, 0x0968, 0x07E8, 0x3F74, 0x3F64, 0x0024,
+ 0x0030, 0x3F0C, 0x0060, 0x094C, 0x080C, 0x3F88, 0x3F5C, 0x0028,
+ 0x0030, 0x3F14, 0x0048, 0x0930, 0x0830, 0x3F98, 0x3F54, 0x0028,
+ 0x0030, 0x3F1C, 0x0030, 0x0914, 0x0850, 0x3FAC, 0x3F4C, 0x0028,
+ 0x0030, 0x3F24, 0x0018, 0x08F0, 0x0874, 0x3FC0, 0x3F44, 0x002C,
+ 0x002C, 0x3F2C, 0x0000, 0x08D4, 0x0894, 0x3FD8, 0x3F3C, 0x002C,
+ 0x002C, 0x3F34, 0x3FEC, 0x08B4, 0x08B4, 0x3FEC, 0x3F34, 0x002C
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_183[264] = {
- 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
- 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
- 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
- 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
- 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
- 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
- 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
- 16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
- 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
- 16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
- 16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
- 16308, 16188, 868, 2200, 1448, 0, 16220, 4,
- 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
- 16316, 16180, 816, 2184, 1500, 28, 16212, 4,
- 16320, 16176, 792, 2172, 1524, 40, 16208, 4,
- 16324, 16172, 764, 2164, 1548, 56, 16204, 0,
- 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
- 16328, 16168, 712, 2144, 1596, 88, 16196, 0,
- 16332, 16164, 688, 2132, 1620, 100, 16192, 0,
- 16336, 16164, 664, 2120, 1644, 120, 16192, 0,
- 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
- 16344, 16160, 616, 2096, 1688, 152, 16184, 0,
- 16344, 16160, 592, 2080, 1712, 168, 16180, 0,
- 16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
- 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
- 16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
- 16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
- 16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
- 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
- 16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
- 16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
- 16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
- 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+ 0x3F88, 0x0048, 0x047C, 0x0768, 0x047C, 0x0048, 0x3F88, 0x0000,
+ 0x3F88, 0x003C, 0x0468, 0x076C, 0x0490, 0x0054, 0x3F84, 0x0000,
+ 0x3F8C, 0x0034, 0x0454, 0x0768, 0x04A4, 0x005C, 0x3F84, 0x0000,
+ 0x3F8C, 0x0028, 0x0444, 0x076C, 0x04B4, 0x0068, 0x3F80, 0x0000,
+ 0x3F90, 0x0020, 0x042C, 0x0768, 0x04C8, 0x0074, 0x3F80, 0x0000,
+ 0x3F90, 0x0018, 0x041C, 0x0764, 0x04DC, 0x0080, 0x3F7C, 0x0000,
+ 0x3F94, 0x0010, 0x0408, 0x075C, 0x04F0, 0x008C, 0x3F7C, 0x0000,
+ 0x3F94, 0x0004, 0x03F8, 0x0760, 0x0500, 0x0098, 0x3F7C, 0x3FFC,
+ 0x3F98, 0x0000, 0x03E0, 0x075C, 0x0514, 0x00A4, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF8, 0x03CC, 0x0754, 0x0528, 0x00B0, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF0, 0x03B8, 0x0754, 0x0538, 0x00BC, 0x3F78, 0x3FFC,
+ 0x3FA0, 0x3FE8, 0x03A4, 0x0750, 0x054C, 0x00CC, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FE0, 0x0390, 0x074C, 0x055C, 0x00D8, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FDC, 0x037C, 0x0744, 0x0570, 0x00E4, 0x3F74, 0x3FF8,
+ 0x3FA8, 0x3FD4, 0x0368, 0x0740, 0x0580, 0x00F4, 0x3F74, 0x3FF4,
+ 0x3FA8, 0x3FCC, 0x0354, 0x073C, 0x0590, 0x0104, 0x3F74, 0x3FF4,
+ 0x3FAC, 0x3FC8, 0x0340, 0x0730, 0x05A4, 0x0110, 0x3F74, 0x3FF4,
+ 0x3FB0, 0x3FC0, 0x0330, 0x0728, 0x05B4, 0x0120, 0x3F74, 0x3FF0,
+ 0x3FB0, 0x3FBC, 0x031C, 0x0724, 0x05C4, 0x0130, 0x3F70, 0x3FF0,
+ 0x3FB4, 0x3FB4, 0x0308, 0x0720, 0x05D4, 0x013C, 0x3F70, 0x3FF0,
+ 0x3FB8, 0x3FB0, 0x02F4, 0x0714, 0x05E4, 0x014C, 0x3F74, 0x3FEC,
+ 0x3FB8, 0x3FAC, 0x02E0, 0x0708, 0x05F8, 0x015C, 0x3F74, 0x3FEC,
+ 0x3FBC, 0x3FA8, 0x02CC, 0x0704, 0x0604, 0x016C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3FA0, 0x02BC, 0x06F8, 0x0614, 0x017C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3F9C, 0x02A8, 0x06F4, 0x0624, 0x018C, 0x3F74, 0x3FE4,
+ 0x3FC4, 0x3F98, 0x0294, 0x06E8, 0x0634, 0x019C, 0x3F74, 0x3FE4,
+ 0x3FC8, 0x3F94, 0x0284, 0x06D8, 0x0644, 0x01AC, 0x3F78, 0x3FE0,
+ 0x3FC8, 0x3F90, 0x0270, 0x06D4, 0x0650, 0x01BC, 0x3F78, 0x3FE0,
+ 0x3FCC, 0x3F8C, 0x025C, 0x06C8, 0x0660, 0x01D0, 0x3F78, 0x3FDC,
+ 0x3FCC, 0x3F8C, 0x024C, 0x06B8, 0x066C, 0x01E0, 0x3F7C, 0x3FDC,
+ 0x3FD0, 0x3F88, 0x0238, 0x06B0, 0x067C, 0x01F0, 0x3F7C, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0228, 0x069C, 0x0688, 0x0204, 0x3F80, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
+};
const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_16p_117;
+ return filter_3tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_16p_150;
+ return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
@@ -1029,9 +1355,9 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_64p_117;
+ return filter_3tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_64p_150;
+ return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
@@ -1041,9 +1367,9 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_16p_117;
+ return filter_4tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_16p_150;
+ return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
@@ -1053,9 +1379,9 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_64p_117;
+ return filter_4tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_64p_150;
+ return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
@@ -1065,9 +1391,9 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_5tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_5tap_64p_117;
+ return filter_5tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_5tap_64p_150;
+ return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
@@ -1077,9 +1403,9 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_6tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_6tap_64p_117;
+ return filter_6tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_6tap_64p_150;
+ return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
@@ -1089,9 +1415,9 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_7tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_7tap_64p_117;
+ return filter_7tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_7tap_64p_150;
+ return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
@@ -1101,9 +1427,9 @@ const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_8tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_8tap_64p_117;
+ return filter_8tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_8tap_64p_150;
+ return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
index f78cbae9db88..bb0e1b80ec3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,15 +23,3 @@
*
*/
-#ifndef __DC_COMMON_DEFS_H__
-#define __DC_COMMON_DEFS_H__
-
-#include "dm_services.h"
-#include "dc_features.h"
-#include "display_mode_structs.h"
-#include "display_mode_enums.h"
-
-
-double dml_round(double a);
-
-#endif /* __DC_COMMON_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 225955ec6d39..bc109d4fc6e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -27,25 +27,55 @@
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../../dmub/inc/dmub_srv.h"
-#include "dmub_fw_state.h"
+#include "../../dmub/inc/dmub_gpint_cmd.h"
#include "core_types.h"
-#include "ipp.h"
#define MAX_PIPES 6
/**
* Get PSR state from firmware.
*/
-static void dmub_get_psr_state(uint32_t *psr_state)
+static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
{
- // Not yet implemented
- // Trigger GPINT interrupt from firmware
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+
+ // Send gpint command and wait for ack
+ dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+ dmub_srv_get_gpint_response(srv, psr_state);
+}
+
+/**
+ * Set PSR version.
+ */
+static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ cmd.psr_set_version.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
+
+ if (stream->psr_version == 0x0) // Unsupported
+ return false;
+ else if (stream->psr_version == 0x1)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
+ else if (stream->psr_version == 0x2)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
+
+ cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
}
/**
* Enable/Disable PSR.
*/
-static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -67,13 +97,13 @@ static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
/**
* Set PSR level.
*/
-static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
{
union dmub_rb_cmd cmd;
uint32_t psr_state = 0;
struct dc_context *dc = dmub->ctx;
- dmub_get_psr_state(&psr_state);
+ dmub_psr_get_state(dmub, &psr_state);
if (psr_state == 0)
return;
@@ -91,7 +121,7 @@ static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
/**
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
-static bool dmub_setup_psr(struct dmub_psr *dmub,
+static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -101,21 +131,22 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
= &cmd.psr_copy_settings.psr_copy_settings_data;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
- for (int i = 0; i < MAX_PIPES; i++) {
- if (res_ctx &&
- res_ctx->pipe_ctx[i].stream &&
- res_ctx->pipe_ctx[i].stream->link &&
- res_ctx->pipe_ctx[i].stream->link == link &&
- res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
pipe_ctx = &res_ctx->pipe_ctx[i];
break;
}
}
- if (!pipe_ctx ||
- !&pipe_ctx->plane_res ||
- !&pipe_ctx->stream_res)
+ if (!pipe_ctx)
+ return false;
+
+ // First, set the psr version
+ if (!dmub_psr_set_version(dmub, pipe_ctx->stream))
return false;
// Program DP DPHY fast training registers
@@ -138,10 +169,6 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
- if (pipe_ctx->plane_res.hubp)
- copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
- else
- copy_settings_data->hubp_inst = 0;
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -157,18 +184,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
// Misc
copy_settings_data->psr_level = psr_context->psr_level.u32all;
- copy_settings_data->hyst_frames = psr_context->timehyst_frames;
- copy_settings_data->hyst_lines = psr_context->hyst_lines;
- copy_settings_data->phy_type = psr_context->phyType;
- copy_settings_data->aux_repeat = psr_context->aux_repeats;
- copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
- copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
+ copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
- copy_settings_data->smu_phy_id = psr_context->smuPhyId;
- copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
- copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
- copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -178,10 +196,10 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
}
static const struct dmub_psr_funcs psr_funcs = {
- .set_psr_enable = dmub_set_psr_enable,
- .setup_psr = dmub_setup_psr,
- .get_psr_state = dmub_get_psr_state,
- .set_psr_level = dmub_set_psr_level,
+ .psr_copy_settings = dmub_psr_copy_settings,
+ .psr_enable = dmub_psr_enable,
+ .psr_get_state = dmub_psr_get_state,
+ .psr_set_level = dmub_psr_set_level,
};
/**
@@ -215,6 +233,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
*/
void dmub_psr_destroy(struct dmub_psr **dmub)
{
- kfree(dmub);
+ kfree(*dmub);
*dmub = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 229958de3035..f404fecd6410 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -27,6 +27,7 @@
#define _DMUB_PSR_H_
#include "os_types.h"
+#include "dc_link.h"
struct dmub_psr {
struct dc_context *ctx;
@@ -34,14 +35,14 @@ struct dmub_psr {
};
struct dmub_psr_funcs {
- void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
- bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
- void (*get_psr_state)(uint32_t *psr_state);
- void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+ bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+ void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+ void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
+ void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
void dmub_psr_destroy(struct dmub_psr **dmub);
-#endif /* _DCE_DMUB_H_ */
+#endif /* _DMUB_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5b689273ff44..10527593868c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -71,6 +71,8 @@
#define PANEL_POWER_UP_TIMEOUT 300
#define PANEL_POWER_DOWN_TIMEOUT 500
#define HPD_CHECK_INTERVAL 10
+#define OLED_POST_T7_DELAY 100
+#define OLED_PRE_T11_DELAY 150
#define CTX \
hws->ctx
@@ -696,8 +698,10 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
}
/*todo: cloned in stream enc, fix*/
-static bool is_panel_backlight_on(struct dce_hwseq *hws)
+bool dce110_is_panel_backlight_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t value;
REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
@@ -705,11 +709,12 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
return value;
}
-static bool is_panel_powered_on(struct dce_hwseq *hws)
+bool dce110_is_panel_powered_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
@@ -816,7 +821,7 @@ void dce110_edp_power_control(
return;
}
- if (power_up != is_panel_powered_on(hwseq)) {
+ if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -896,7 +901,7 @@ void dce110_edp_backlight_control(
return;
}
- if (enable && is_panel_backlight_on(hws)) {
+ if (enable && hws->funcs.is_panel_backlight_on(link)) {
DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
@@ -936,9 +941,21 @@ void dce110_edp_backlight_control(
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link);
link_transmitter_control(ctx->dc_bios, &cntl);
+
+ if (enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_POST_T7_DELAY);
+
+ if (link->dpcd_sink_ext_caps.bits.oled ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
+ dc_link_backlight_enable_aux(link, enable);
+
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
edp_receiver_ready_T9(link);
+
+ if (!enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_PRE_T11_DELAY);
}
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -2576,17 +2593,6 @@ static void dce110_apply_ctx_for_surface(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (stream == pipe_ctx->stream) {
- if (!pipe_ctx->top_pipe &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream != stream)
continue;
@@ -2607,20 +2613,16 @@ static void dce110_apply_ctx_for_surface(
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if ((stream == pipe_ctx->stream) &&
- (!pipe_ctx->top_pipe) &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- }
-
if (dc->fbc_compressor)
enable_fbc(dc, context);
}
+static void dce110_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+}
+
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
@@ -2683,6 +2685,23 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ pos_cpy.x += pipe_ctx->plane_state->src_rect.x;
+ pos_cpy.y += pipe_ctx->plane_state->src_rect.y;
+ }
+
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
@@ -2722,6 +2741,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dce110_post_unlock_program_front_end,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
@@ -2736,6 +2756,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
+ .interdependent_update_lock = NULL,
+ .cursor_lock = dce_pipe_control_lock,
.prepare_bandwidth = dce110_prepare_bandwidth,
.optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr,
@@ -2763,6 +2785,8 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 26a9c14a58b1..34be166e8ff0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,5 +85,9 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
+bool dce110_is_panel_backlight_on(struct dc_link *link);
+
+bool dce110_is_panel_powered_on(struct dc_link *link);
+
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index bbd6e01b3eca..47a39eb9400b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -316,6 +316,7 @@ bool cm_helper_translate_curve_to_hw_format(
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
+ struct pwl_result_data *rgb_minus_1;
int32_t region_start, region_end;
int32_t i;
@@ -465,9 +466,20 @@ bool cm_helper_translate_curve_to_hw_format(
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
+ rgb_minus_1 = rgb;
i = 1;
while (i != hw_points + 1) {
+
+ if (i >= hw_points - 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
+ }
+
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
@@ -482,6 +494,7 @@ bool cm_helper_translate_curve_to_hw_format(
}
++rgb_plus_1;
+ rgb_minus_1 = rgb;
++rgb;
++i;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 446ba0a7a4b3..deccab0228d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -128,8 +128,8 @@ bool hubbub1_verify_allow_pstate_change_high(
* pstate takes around ~100us on linux. Unknown currently as to
* why it takes that long on linux
*/
- static unsigned int pstate_wait_timeout_us = 200;
- static unsigned int pstate_wait_expected_timeout_us = 40;
+ const unsigned int pstate_wait_timeout_us = 200;
+ const unsigned int pstate_wait_expected_timeout_us = 40;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
@@ -147,8 +147,9 @@ bool hubbub1_verify_allow_pstate_change_high(
forced_pstate_allow = false;
}
- /* RV2:
- * dchubbubdebugind, at: 0xB
+ /* The following table only applies to DCN1 and DCN2,
+ * for newer DCNs, need to consult with HW IP folks to read RTL
+ * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
* description
* 0: Pipe0 Plane0 Allow Pstate Change
* 1: Pipe0 Plane1 Allow Pstate Change
@@ -181,64 +182,6 @@ bool hubbub1_verify_allow_pstate_change_high(
* 28: WB0 Allow Pstate Change
* 29: WB1 Allow Pstate Change
* 30: Arbiter's allow_pstate_change
- * 31: SOC pstate change request"
- */
- /*DCN2.x:
- HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
- 0: Pipe0 Plane0 Allow P-state Change
- 1: Pipe0 Plane1 Allow P-state Change
- 2: Pipe0 Cursor0 Allow P-state Change
- 3: Pipe0 Cursor1 Allow P-state Change
- 4: Pipe1 Plane0 Allow P-state Change
- 5: Pipe1 Plane1 Allow P-state Change
- 6: Pipe1 Cursor0 Allow P-state Change
- 7: Pipe1 Cursor1 Allow P-state Change
- 8: Pipe2 Plane0 Allow P-state Change
- 9: Pipe2 Plane1 Allow P-state Change
- 10: Pipe2 Cursor0 Allow P-state Change
- 11: Pipe2 Cursor1 Allow P-state Change
- 12: Pipe3 Plane0 Allow P-state Change
- 13: Pipe3 Plane1 Allow P-state Change
- 14: Pipe3 Cursor0 Allow P-state Change
- 15: Pipe3 Cursor1 Allow P-state Change
- 16: Pipe4 Plane0 Allow P-state Change
- 17: Pipe4 Plane1 Allow P-state Change
- 18: Pipe4 Cursor0 Allow P-state Change
- 19: Pipe4 Cursor1 Allow P-state Change
- 20: Pipe5 Plane0 Allow P-state Change
- 21: Pipe5 Plane1 Allow P-state Change
- 22: Pipe5 Cursor0 Allow P-state Change
- 23: Pipe5 Cursor1 Allow P-state Change
- 24: Pipe6 Plane0 Allow P-state Change
- 25: Pipe6 Plane1 Allow P-state Change
- 26: Pipe6 Cursor0 Allow P-state Change
- 27: Pipe6 Cursor1 Allow P-state Change
- 28: WB0 Allow P-state Change
- 29: WB1 Allow P-state Change
- 30: Arbiter`s Allow P-state Change
- 31: SOC P-state Change request
- */
- /* RV1:
- * dchubbubdebugind, at: 0x7
- * description "3-0: Pipe0 cursor0 QOS
- * 7-4: Pipe1 cursor0 QOS
- * 11-8: Pipe2 cursor0 QOS
- * 15-12: Pipe3 cursor0 QOS
- * 16: Pipe0 Plane0 Allow Pstate Change
- * 17: Pipe1 Plane0 Allow Pstate Change
- * 18: Pipe2 Plane0 Allow Pstate Change
- * 19: Pipe3 Plane0 Allow Pstate Change
- * 20: Pipe0 Plane1 Allow Pstate Change
- * 21: Pipe1 Plane1 Allow Pstate Change
- * 22: Pipe2 Plane1 Allow Pstate Change
- * 23: Pipe3 Plane1 Allow Pstate Change
- * 24: Pipe0 cursor0 Allow Pstate Change
- * 25: Pipe1 cursor0 Allow Pstate Change
- * 26: Pipe2 cursor0 Allow Pstate Change
- * 27: Pipe3 cursor0 Allow Pstate Change
- * 28: WB0 Allow Pstate Change
- * 29: WB1 Allow Pstate Change
- * 30: Arbiter's allow_pstate_change
* 31: SOC pstate change request
*/
@@ -300,7 +243,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -308,6 +251,7 @@ void hubbub1_program_urgent_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -321,7 +265,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
@@ -331,7 +276,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -344,7 +290,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
@@ -354,7 +301,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -367,7 +315,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
@@ -377,7 +326,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -390,7 +340,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
@@ -400,10 +351,13 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -411,6 +365,7 @@ void hubbub1_program_stutter_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -425,7 +380,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -439,7 +396,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -454,7 +413,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -468,7 +429,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -483,7 +446,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -497,7 +462,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -512,7 +479,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -526,11 +495,14 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+ return wm_pending;
}
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -538,6 +510,7 @@ void hubbub1_program_pstate_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
@@ -552,7 +525,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -567,7 +542,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -582,7 +559,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -597,23 +576,33 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -627,6 +616,7 @@ void hubbub1_program_watermarks(
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
#endif
+ return wm_pending;
}
void hubbub1_update_dchub(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index af57751253de..343a537172c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -308,7 +308,7 @@ bool hubbub1_verify_allow_pstate_change_high(
void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -331,17 +331,17 @@ void hubbub1_construct(struct hubbub *hubbub,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask);
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1008ac8a0f2a..416afb99529d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -48,8 +48,8 @@
#include "dc_link_dp.h"
#include "dccg.h"
#include "clk_mgr.h"
-
-
+#include "link_hwss.h"
+#include "dpcd_defs.h"
#include "dsc.h"
#define DC_LOGGER_INIT(logger)
@@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
-static void dcn10_lock_all_pipes(struct dc *dc,
+void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
{
@@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
+
/*
* Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled.
@@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc,
continue;
if (lock)
- tg->funcs->lock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
else
- tg->funcs->unlock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
}
@@ -900,6 +901,10 @@ static void dcn10_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
@@ -1263,7 +1268,8 @@ void dcn10_init_hw(struct dc *dc)
}
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
return;
}
@@ -1317,6 +1323,31 @@ void dcn10_init_hw(struct dc *dc)
if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /*
+ * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
+ * which needs to read dpcd info with the help of aconnector.
+ * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
+ * cannot be read.
+ */
+ if (dc->links[i]->priv) {
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1325,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc)
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
hws->funcs.init_pipes(dc, dc->current_state);
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
for (i = 0; i < res_pool->audio_count; i++) {
@@ -1355,8 +1389,8 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
-
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -1576,7 +1610,7 @@ void dcn10_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (dc->debug.sanity_checks)
@@ -1591,6 +1625,85 @@ void dcn10_pipe_control_lock(
hws->funcs.verify_allow_pstate_change_high(dc);
}
+/**
+ * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
+ *
+ * Software keepout workaround to prevent cursor update locking from stalling
+ * out cursor updates indefinitely or from old values from being retained in
+ * the case where the viewport changes in the same frame as the cursor.
+ *
+ * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
+ * too close to VUPDATE, then stall out until VUPDATE finishes.
+ *
+ * TODO: Optimize cursor programming to be once per frame before VUPDATE
+ * to avoid the need for this workaround.
+ */
+static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct crtc_position position;
+ uint32_t vupdate_start, vupdate_end;
+ unsigned int lines_to_vupdate, us_to_vupdate, vpos;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
+ return;
+
+ if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ /* Avoid wraparound calculation issues */
+ vupdate_start += stream->timing.v_total;
+ vupdate_end += stream->timing.v_total;
+ vpos += stream->timing.v_total;
+
+ if (vpos <= vupdate_start) {
+ /* VPOS is in VACTIVE or back porch. */
+ lines_to_vupdate = vupdate_start - vpos;
+ } else if (vpos > vupdate_end) {
+ /* VPOS is in the front porch. */
+ return;
+ } else {
+ /* VPOS is in VUPDATE. */
+ lines_to_vupdate = 0;
+ }
+
+ /* Calculate time until VUPDATE in microseconds. */
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ /* 70 us is a conservative estimate of cursor update time*/
+ if (us_to_vupdate > 70)
+ return;
+
+ /* Stall out until the cursor update completes. */
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+ udelay(us_to_vupdate + us_vupdate);
+}
+
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
+{
+ /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
+ if (!pipe || pipe->top_pipe)
+ return;
+
+ /* Prevent cursor lock from stalling out cursor updates. */
+ if (lock)
+ delay_cursor_until_vupdate(dc, pipe);
+
+ dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
+ pipe->stream_res.opp->inst, lock);
+}
+
static bool wait_for_reset_trigger_to_occur(
struct dc_context *dc_ctx,
struct timing_generator *tg)
@@ -1970,6 +2083,12 @@ void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
adjust.temperature_matrix[i] =
pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+ } else if (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
@@ -2090,6 +2209,10 @@ void dcn10_get_hdr_visual_confirm_color(
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
+ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
}
break;
case PIXEL_FORMAT_FP16:
@@ -2512,12 +2635,17 @@ void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
uint32_t underflow_check_delay_us;
- bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
dcn10_find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
+ // Clear pipe_ctx flag
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe_ctx->update_flags.raw = 0;
+ }
+
if (!top_pipe_to_program)
return;
@@ -2531,11 +2659,6 @@ void dcn10_apply_ctx_for_surface(
if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, true);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
-
if (underflow_check_delay_us != 0xFFFFFFFF)
udelay(underflow_check_delay_us);
@@ -2552,18 +2675,6 @@ void dcn10_apply_ctx_for_surface(
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
- }
if ((!pipe_ctx->plane_state ||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
@@ -2571,7 +2682,7 @@ void dcn10_apply_ctx_for_surface(
old_pipe_ctx->stream_res.tg == tg) {
hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ pipe_ctx->update_flags.bits.disable = 1;
DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
@@ -2597,21 +2708,35 @@ void dcn10_apply_ctx_for_surface(
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
}
+}
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, false);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream) {
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
- if (num_planes == 0)
- false_optc_underflow_wa(dc, stream, tg);
+ if (context->stream_status[i].plane_count == 0)
+ false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
+ }
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i]) {
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
dc->hwss.optimize_bandwidth(dc, context);
break;
}
@@ -2656,7 +2781,7 @@ void dcn10_prepare_bandwidth(
false);
}
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -2693,6 +2818,7 @@ void dcn10_optimize_bandwidth(
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+
dcn10_stereo_hw_frame_pack_wa(dc, context);
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
@@ -2884,6 +3010,7 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
bool flip_pending;
+ struct dc *dc = plane_state->ctx->dc;
if (plane_state == NULL)
return;
@@ -2901,6 +3028,19 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
}
+
+ if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
+ struct dce_hwseq *hwseq = dc->hwseq;
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ unsigned int cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
+ }
+ }
}
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
@@ -2960,12 +3100,50 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
- // translate cursor from stream space to plane space
+ /**
+ * DC cursor is stream space, HW cursor is plane space and drawn
+ * as part of the framebuffer.
+ *
+ * Cursor position can't be negative, but hotspot can be used to
+ * shift cursor out of the plane bounds. Hotspot must be smaller
+ * than the cursor size.
+ */
+
+ /**
+ * Translate cursor from stream space to plane space.
+ *
+ * If the cursor is scaled then we need to scale the position
+ * to be in the approximately correct place. We can't do anything
+ * about the actual size being incorrect, that's a limitation of
+ * the hardware.
+ */
x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ x_pos += pipe_ctx->plane_state->src_rect.x;
+ y_pos += pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /**
+ * If the position is negative then we need to add to the hotspot
+ * to shift the cursor outside the plane.
+ */
+
if (x_pos < 0) {
pos_cpy.x_hotspot -= x_pos;
x_pos = 0;
@@ -3127,7 +3305,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
-static void dcn10_calc_vupdate_position(
+void dcn10_calc_vupdate_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 4d20f6586bb5..42b6e016d71e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -34,6 +34,11 @@ struct dc;
void dcn10_hw_sequencer_construct(struct dc *dc);
int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_calc_vupdate_position(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line);
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
@@ -49,6 +54,7 @@ void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
void dcn10_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -70,11 +76,18 @@ void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_lock_all_pipes(
+ struct dc *dc,
+ struct dc_state *context,
+ bool lock);
void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn10_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index e7e5352ec424..9e8e32629e47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -32,6 +32,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.init_hw = dcn10_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -49,6 +50,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock,
+ .cursor_lock = dcn10_cursor_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
.set_drr = dcn10_set_drr,
@@ -69,6 +72,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
@@ -85,6 +89,8 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 1a37c90e9d43..d3617d6785a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -782,6 +782,11 @@ bool dcn10_link_encoder_validate_output_with_stream(
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
bool is_valid;
+ //if SCDC (340-600MHz) is disabled, set to HDMI 1.4 timing limit
+ if (stream->sink->edid_caps.panel_patch.skip_scdc_overwrite &&
+ enc10->base.features.max_hdmi_pixel_clock > 300000)
+ enc10->base.features.max_hdmi_pixel_clock = 300000;
+
switch (stream->signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index eb13589b9a81..762109174fb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -62,11 +62,11 @@
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
- SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LE_DCN10_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
LE_DCN_COMMON_REG_LIST(id)
struct dcn10_link_enc_aux_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 04f863499cfb..3fcd408e9103 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
+ /* Configure VUPDATE lock set for this MPCC to map to the OPP */
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
+
/* update mpc tree mux setting */
if (tree->opp_list == insert_above_mpcc) {
/* insert the toppest mpcc */
@@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
/* mark this mpcc as not in use */
mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
@@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
}
}
@@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
}
@@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
@@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
MPCC_BUSY, &s->busy);
}
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
+}
+
static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.update_blending = mpc1_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
.set_denorm = NULL,
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 962a68e322ee..66a4719c22a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -39,11 +39,12 @@
SRII(MPCC_BG_G_Y, MPCC, inst),\
SRII(MPCC_BG_R_CR, MPCC, inst),\
SRII(MPCC_BG_B_CB, MPCC, inst),\
- SRII(MPCC_BG_B_CB, MPCC, inst),\
- SRII(MPCC_SM_CONTROL, MPCC, inst)
+ SRII(MPCC_SM_CONTROL, MPCC, inst),\
+ SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
- SRII(MUX, MPC_OUT, inst)
+ SRII(MUX, MPC_OUT, inst),\
+ VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
#define MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
@@ -55,7 +56,9 @@
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
- uint32_t MUX[MAX_OPP];
+ uint32_t MUX[MAX_OPP]; \
+ uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
+ uint32_t CUR[MAX_OPP];
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
@@ -78,7 +81,8 @@
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
- SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+ SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
+ SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
#define MPC_REG_FIELD_LIST(type) \
type MPCC_TOP_SEL;\
@@ -101,7 +105,9 @@
type MPCC_SM_FIELD_ALT;\
type MPCC_SM_FORCE_NEXT_FRAME_POL;\
type MPCC_SM_FORCE_NEXT_TOP_POL;\
- type MPC_OUT_MUX;
+ type MPC_OUT_MUX;\
+ type MPCC_UPDATE_LOCK_SEL;\
+ type CUR_VUPDATE_LOCK_SET;
struct dcn_mpc_registers {
MPC_COMMON_REG_VARIABLE_LIST
@@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
int mpcc_inst,
struct mpcc_state *s);
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a9a43b397db9..17d96ec6acd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc,
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
- int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc,
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
- vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
- if (vertical_line_start < 0)
- v_fp2 = -vertical_line_start;
+ if (optc1->vstartup_start > asic_blank_end)
+ v_fp2 = optc1->vstartup_start - asic_blank_end;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
@@ -345,6 +343,23 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
}
/**
+ * optc1_set_timing_double_buffer() - DRR double buffering control
+ *
+ * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN,
+ * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers.
+ *
+ * Options: any time, start of frame, dp start of frame (range timing)
+ */
+void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t mode = enable ? 2 : 0;
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode);
+}
+
+/**
* unblank_crtc
* Call ASIC Control Object to UnBlank CRTC.
*/
@@ -1195,7 +1210,7 @@ static void optc1_enable_stereo(struct timing_generator *optc,
REG_UPDATE_3(OTG_STEREO_CONTROL,
OTG_STEREO_EN, stereo_en,
OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
- OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+ OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
if (flags->PROGRAM_POLARITY)
REG_UPDATE(OTG_STEREO_CONTROL,
@@ -1355,6 +1370,7 @@ void optc1_clear_optc_underflow(struct timing_generator *optc)
void optc1_tg_init(struct timing_generator *optc)
{
optc1_set_blank_data_double_buffer(optc, true);
+ optc1_set_timing_double_buffer(optc, true);
optc1_clear_optc_underflow(optc);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index f277656d5464..9a459a8fe8a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -185,6 +185,7 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
@@ -643,6 +644,8 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable);
+
bool optc1_get_otg_active_size(struct timing_generator *optc,
uint32_t *otg_active_width,
uint32_t *otg_active_height);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 3b71898e859e..ba849aa31e6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id {
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## 0 ## _ ## block ## id
+
+/* set field/register/bitfield name */
+#define SFRB(field_name, reg_name, bitfield, post_fix)\
+ .field_name = reg_name ## __ ## bitfield ## post_fix
+
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIF_BASE__INST0_SEG ## seg
@@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = {
};
static const struct dcn_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
+ SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
};
static const struct dcn_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
+ SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
};
#define tg_regs(id)\
@@ -552,7 +562,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -570,7 +581,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -584,7 +595,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = true,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
@@ -598,7 +609,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1233,7 +1244,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
return DC_OK;
}
-static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -1295,7 +1306,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
- .get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 376c4264d295..7eba9333c328 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1667,5 +1667,6 @@ void dcn10_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 50bffbfdd394..62cc2651e00c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 13e057d7ee93..42bba7c9548b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes(
}
}
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
-
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps)
-{
- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
- return false;
-
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
- /* TODO: add lb check */
-
- /* No support for programming ratio of 8, drop to 7.99999.. */
- if (scl_data->ratios.horz.value == (8ll << 32))
- scl_data->ratios.horz.value--;
- if (scl_data->ratios.vert.value == (8ll << 32))
- scl_data->ratios.vert.value--;
- if (scl_data->ratios.horz_c.value == (8ll << 32))
- scl_data->ratios.horz_c.value--;
- if (scl_data->ratios.vert_c.value == (8ll << 32))
- scl_data->ratios.vert_c.value--;
-
- /* Set default taps if none are provided */
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
- scl_data->taps.h_taps = 8;
- else
- scl_data->taps.h_taps = 4;
- } else
- scl_data->taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
- scl_data->taps.v_taps = 8;
- else
- scl_data->taps.v_taps = 4;
- } else
- scl_data->taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
- scl_data->taps.v_taps_c = 4;
- else
- scl_data->taps.v_taps_c = 2;
- } else
- scl_data->taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
- scl_data->taps.h_taps_c = 4;
- else
- scl_data->taps.h_taps_c = 2;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- scl_data->taps.h_taps_c = in_taps->h_taps_c;
-
- if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
- scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
- scl_data->taps.v_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.horz_c))
- scl_data->taps.h_taps_c = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert_c))
- scl_data->taps.v_taps_c = 1;
- }
-
- return true;
-}
-
void oppn20_dummy_program_regamma_pwl(
struct dpp *dpp,
const struct pwl_params *params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 6bdfee20b6a7..1b1ae9ce2799 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
@@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons
reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
- reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf;
}
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 9235f7d29454..c0b21d7450d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -562,19 +562,23 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
}
}
-static void hubbub2_program_watermarks(
+static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* There's a special case when going from p-state support to p-state unsupported
@@ -592,6 +596,7 @@ static void hubbub2_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ return wm_pending;
}
static const struct hubbub_funcs hubbub2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index a444fed94184..a023a4d59f41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -307,7 +307,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
if (num_opps == 2) {
bottom_opp->funcs->opp_set_disp_pattern_generator(
@@ -317,7 +318,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
}
hws->funcs.wait_for_blank_complete(opp);
@@ -645,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
+ dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -974,7 +979,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
@@ -985,7 +991,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
}
if (!blank)
@@ -1088,29 +1095,6 @@ void dcn20_enable_plane(
// }
}
-
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock)
-{
- if (lock) {
- pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- } else {
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe->stream_res.tg);
- }
-}
-
void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1121,7 +1105,7 @@ void dcn20_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (pipe->plane_state != NULL)
@@ -1389,6 +1373,7 @@ static void dcn20_update_dchubp_dpp(
}
if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
@@ -1536,48 +1521,32 @@ static void dcn20_program_pipe(
}
}
-static bool does_pipe_need_lock(struct pipe_ctx *pipe)
-{
- if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->update_flags.raw)
- return true;
- if (pipe->bottom_pipe)
- return does_pipe_need_lock(pipe->bottom_pipe);
-
- return false;
-}
-
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
struct dce_hwseq *hws = dc->hwseq;
- bool pipe_locked[MAX_PIPES] = {false};
DC_LOGGER_INIT(dc->ctx->logger);
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
- context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
- dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL &&
+ !dc->debug.disable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+ }
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (!context->res_ctx.pipe_ctx[i].top_pipe &&
- does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
- pipe_locked[i] = true;
- }
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1615,17 +1584,17 @@ void dcn20_program_front_end_for_ctx(
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
+}
- /* Unlock all locked pipes */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (pipe_locked[i]) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ struct dce_hwseq *hwseq = dc->hwseq;
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
- }
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -1651,11 +1620,26 @@ void dcn20_program_front_end_for_ctx(
}
/* WA to apply WM setting*/
- if (dc->hwseq->wa.DEGVIDCN21)
+ if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-}
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
@@ -1668,7 +1652,7 @@ void dcn20_prepare_bandwidth(
false);
/* program dchubbub watermarks */
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2052,6 +2036,10 @@ static void dcn20_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
@@ -2306,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(REFCLK_CNTL, 0);
+ if (REG(REFCLK_CNTL))
+ REG_WRITE(REFCLK_CNTL, 0);
//
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 02c9be5ebd47..63ce763f148e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -35,6 +35,9 @@ bool dcn20_set_shaper_3dlut(
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context);
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
@@ -58,10 +61,6 @@ void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 5e640f17d3d4..8334bbd6eabb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -33,6 +33,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -50,7 +51,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -81,6 +83,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
@@ -96,6 +99,8 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
index 3fccd5eeecbb..7bcee5894d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -36,26 +36,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SR(reg_name)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SRI2(reg_name, block, id)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
-
#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index de9c857ab3e9..570dfd9a243f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index c78fd5123497..496658f420db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -179,7 +179,8 @@
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
- SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+ SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
/*
* DCN2 MPC_OCSC debug status register:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 023cc71fad0f..138321e151eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height)
+ int height,
+ int offset)
{
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
enum test_pattern_color_format bit_depth;
@@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator(
DPG_ACTIVE_WIDTH, width,
DPG_ACTIVE_HEIGHT, height);
+ /* set DPG offset */
+ REG_SET_2(DPG_OFFSET_SEGMENT, 0,
+ DPG_X_OFFSET, offset,
+ DPG_SEGMENT_WIDTH, 0);
+
switch (test_pattern) {
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 4093bec172c1..64c5b429c79a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -36,6 +36,7 @@
#define OPP_DPG_REG_LIST(id) \
SRI(DPG_CONTROL, DPG, id), \
SRI(DPG_DIMENSIONS, DPG, id), \
+ SRI(DPG_OFFSET_SEGMENT, DPG, id), \
SRI(DPG_COLOUR_B_CB, DPG, id), \
SRI(DPG_COLOUR_G_Y, DPG, id), \
SRI(DPG_COLOUR_R_CR, DPG, id), \
@@ -53,6 +54,7 @@
uint32_t FMT_422_CONTROL; \
uint32_t DPG_CONTROL; \
uint32_t DPG_DIMENSIONS; \
+ uint32_t DPG_OFFSET_SEGMENT; \
uint32_t DPG_COLOUR_B_CB; \
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
@@ -68,6 +70,8 @@
OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
@@ -97,6 +101,8 @@
type DPG_HRES; \
type DPG_ACTIVE_WIDTH; \
type DPG_ACTIVE_HEIGHT; \
+ type DPG_X_OFFSET; \
+ type DPG_SEGMENT_WIDTH; \
type DPG_COLOUR0_R_CR; \
type DPG_COLOUR1_R_CR; \
type DPG_COLOUR0_B_CB; \
@@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e310d67c399a..e4348e3b6389 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
@@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
@@ -506,6 +508,10 @@ enum dcn20_clk_src_array_id {
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
@@ -1010,7 +1016,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -1039,7 +1046,7 @@ static const struct resource_caps res_cap_nv14 = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -1058,7 +1065,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1254,6 +1261,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1668,7 +1676,7 @@ static void acquire_dsc(struct resource_context *res_ctx,
}
}
-static void release_dsc(struct resource_context *res_ctx,
+void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
struct display_stream_compressor **dsc)
{
@@ -1728,7 +1736,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream_res.dsc)
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
+ dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
}
}
@@ -1972,22 +1980,6 @@ void dcn20_populate_dml_writeback_from_context(
}
-static int get_num_odm_heads(struct pipe_ctx *pipe)
-{
- int odm_head_count = 0;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
- while (next_pipe) {
- odm_head_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
- odm_head_count++;
- pipe = pipe->prev_odm_pipe;
- }
- return odm_head_count ? odm_head_count + 1 : 0;
-}
-
int dcn20_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
@@ -2067,8 +2059,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
- case 2:
+ switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
+ case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
default:
@@ -2076,9 +2068,14 @@ int dcn20_populate_dml_pipes_from_context(
}
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
- == res_ctx->pipe_ctx[i].plane_state)
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
- else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
+ == res_ctx->pipe_ctx[i].plane_state) {
+ struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe;
+
+ while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+ == res_ctx->pipe_ctx[i].plane_state)
+ first_pipe = first_pipe->top_pipe;
+ pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ } else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
while (first_pipe->prev_odm_pipe)
@@ -2163,16 +2160,20 @@ int dcn20_populate_dml_pipes_from_context(
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
- * Use max cursor settings for calculations to minimize
+ * For graphic plane, cursor number is 1, nv12 is 0
* bw calculations due to cursor on/off
*/
- pipes[pipe_cnt].pipe.src.num_cursors = 2;
+ if (res_ctx->pipe_ctx[i].plane_state &&
+ res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pipes[pipe_cnt].pipe.src.num_cursors = 0;
+ else
+ pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
+
pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
- pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
- pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
if (!res_ctx->pipe_ctx[i].plane_state) {
+ pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
@@ -2198,19 +2199,21 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
- pipes[pipe_cnt].pipe.src.is_hsplit = 0;
- pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
+
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) {
+ pipes[pipe_cnt].pipe.src.viewport_width /= 2;
+ pipes[pipe_cnt].pipe.dest.recout_width /= 2;
+ }
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
- pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
- && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
- || (res_ctx->pipe_ctx[i].top_pipe
- && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
+ pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
+ || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
+ || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
@@ -2235,18 +2238,22 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
- pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
- if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
- } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
+ pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+ pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
+ else {
+ struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ split_pipe = res_ctx->pipe_ctx[i].top_pipe;
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->top_pipe;
+ }
}
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
@@ -2413,6 +2420,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+ stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -2499,7 +2507,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
}
-void dcn20_merge_pipes_for_validate(
+static void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context)
{
@@ -2524,7 +2532,7 @@ void dcn20_merge_pipes_for_validate(
odm_pipe->prev_odm_pipe = NULL;
odm_pipe->next_odm_pipe = NULL;
if (odm_pipe->stream_res.dsc)
- release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+ dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
/* Clear plane_res and stream_res */
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2562,41 +2570,29 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split)
+ bool *split,
+ bool *merge)
{
int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
bool force_split = false;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
- /* Single display loop, exits if there is more than one display */
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- bool exit_loop = false;
- if (!pipe->stream || pipe->top_pipe)
- continue;
-
- if (dc->debug.force_single_disp_pipe_split) {
- if (!force_split)
- force_split = true;
- else {
- force_split = false;
- exit_loop = true;
- }
- }
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
- if (avoid_split)
- avoid_split = false;
- else {
- avoid_split = true;
- exit_loop = true;
- }
- }
- if (exit_loop)
- break;
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
}
- /* TODO: fix dc bugs and remove this split threshold thing */
- if (context->stream_count > dc->res_pool->pipe_count / 2)
+ if (plane_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
@@ -2619,11 +2615,12 @@ int dcn20_validate_apply_pipe_split_flags(
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
split[i] = true;
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
@@ -2636,10 +2633,44 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
+ /*Already split odm pipe tree, don't try to split again*/
+ split[i] = false;
+ split[pipe->prev_odm_pipe->pipe_idx] = false;
+ } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
+ && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
+ split[i] = false;
+ split[pipe->top_pipe->pipe_idx] = false;
+ } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
+ if (split[i] == false) {
+ /*Exiting mpc/odm combine*/
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ ASSERT(0); /*should not actually happen yet*/
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else
+ merge[pipe->top_pipe->pipe_idx] = true;
+ } else {
+ /*Transition from mpc combine to odm combine or vice versa*/
+ ASSERT(0); /*should not actually happen yet*/
+ split[i] = true;
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ split[pipe->prev_odm_pipe->pipe_idx] = true;
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else {
+ split[pipe->top_pipe->pipe_idx] = true;
+ merge[pipe->top_pipe->pipe_idx] = true;
+ }
+ }
+ }
+
/* Adjust dppclk when split is forced, do not bother with dispclk */
if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
@@ -2681,7 +2712,7 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
@@ -2901,6 +2932,9 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
/*
* An artifact of dml pipe split/odm is that pipes get merged back together for
* calculation. Therefore we need to only extract for first pipe in ascending index order
@@ -3034,25 +3068,32 @@ validate_out:
return out;
}
-
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
+ struct dc_state *context, bool fast_validate)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
bool dummy_pstate_supported = false;
double p_state_latency_us;
- DC_FP_START();
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
dc->debug.disable_dram_clock_change_vactive_support;
if (fast_validate) {
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
-
- DC_FP_END();
- return voltage_supported;
+ return dcn20_validate_bandwidth_internal(dc, context, true);
}
// Best case, we support full UCLK switch latency
@@ -3081,7 +3122,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
restore_dml_state:
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+ return voltage_supported;
+}
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported = false;
+ DC_FP_START();
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
DC_FP_END();
return voltage_supported;
}
@@ -3138,7 +3187,7 @@ static struct dc_cap_funcs cap_funcs = {
};
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -3164,7 +3213,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
@@ -3313,7 +3362,7 @@ void dcn20_cap_soc_clocks(
void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
+ struct _vcs_dpi_voltage_scaling_st calculated_states[DC__VOLTAGE_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
@@ -3886,6 +3935,15 @@ static bool dcn20_resource_construct(
dcn20_hw_sequencer_construct(dc);
+ // IF NV12, set PG function pointer to NULL. It's not that
+ // PG isn't supported for NV12, it's that we don't want to
+ // program the registers because that will cause more power
+ // to be consumed. We could have created dcn20_init_hw to get
+ // the same effect by checking ASIC rev, but there was a
+ // request at some point to not check ASIC rev on hw sequencer.
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ dc->hwseq->funcs.enable_power_gating_plane = NULL;
+
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index f5893840b79b..9d5bff9455fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
-void dcn20_merge_pipes_for_validate(
- struct dc *dc,
- struct dc_state *context);
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split);
+ bool *split,
+ bool *merge);
+void dcn20_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc);
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
@@ -159,7 +160,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
void dcn20_patch_bounding_box(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 9b70a1e7b962..99a7ef6ab878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -616,5 +616,6 @@ void dcn20_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
index 02fafb013fc6..f1ef46e8da5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -34,13 +34,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
#define DCN20_VMID_REG_LIST(id)\
SRI(CNTL, DCN_VM_CONTEXT, id),\
SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index f546260c15b7..5e2d14b897af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -141,7 +141,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
return NUM_VMID;
}
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -149,6 +149,7 @@ void hubbub21_program_urgent_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -163,7 +164,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -172,7 +174,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -180,14 +184,18 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
+
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -201,7 +209,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -210,7 +219,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -218,7 +229,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
@@ -226,7 +239,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -240,7 +254,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -249,7 +264,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -257,7 +274,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
@@ -265,7 +284,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -279,7 +299,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -288,7 +309,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -296,7 +319,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
@@ -304,10 +329,13 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -315,6 +343,7 @@ void hubbub21_program_stutter_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -330,7 +359,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -345,7 +376,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -361,7 +394,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -376,7 +411,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -392,7 +429,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -407,7 +446,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -423,7 +464,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -438,10 +481,14 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -450,6 +497,8 @@ void hubbub21_program_pstate_watermarks(
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
+
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
@@ -464,7 +513,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -480,7 +531,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = false;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -496,7 +549,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -512,20 +567,30 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
- hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
@@ -549,6 +614,8 @@ void hubbub21_program_watermarks(
DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
}
void hubbub21_wm_read_state(struct hubbub *hubbub,
@@ -635,6 +702,7 @@ static const struct hubbub_funcs hubbub21_funcs = {
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index c4840dfb1fa5..ef3ef28509ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -113,22 +113,22 @@
void dcn21_dchvm_init(struct hubbub *hubbub);
int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index cf09b9335728..d285ba622d61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -79,32 +79,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- uint32_t cur_value;
+ uint32_t refcyc_per_vm_group_vblank;
+ uint32_t refcyc_per_vm_req_vblank;
+ uint32_t refcyc_per_vm_group_flip;
+ uint32_t refcyc_per_vm_req_flip;
+ const uint32_t uninitialized_hw_default = 0;
- REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_vblank)
+ REG_GET(VBLANK_PARAMETERS_5,
+ REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank);
+
+ if (refcyc_per_vm_group_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank)
REG_SET(VBLANK_PARAMETERS_5, 0,
REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
REG_GET(VBLANK_PARAMETERS_6,
- REFCYC_PER_VM_REQ_VBLANK,
- &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_vblank)
+ REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank);
+
+ if (refcyc_per_vm_req_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank)
REG_SET(VBLANK_PARAMETERS_6, 0,
REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
- REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_flip)
+ REG_GET(FLIP_PARAMETERS_3,
+ REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip);
+
+ if (refcyc_per_vm_group_flip == uninitialized_hw_default ||
+ refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip)
REG_SET(FLIP_PARAMETERS_3, 0,
REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
- REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_flip)
+ REG_GET(FLIP_PARAMETERS_4,
+ REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip);
+
+ if (refcyc_per_vm_req_flip == uninitialized_hw_default ||
+ refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip)
REG_SET(FLIP_PARAMETERS_4, 0,
REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
REG_SET(FLIP_PARAMETERS_5, 0,
REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c);
+
REG_SET(FLIP_PARAMETERS_6, 0,
REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c);
}
@@ -325,13 +340,9 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
- // The format of default addr is 48:12 of the 48 bit addr
- mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 081ad8e43d58..ada65b1a7eb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state(
true);
}
+/* If user hotplug a HDMI monitor while in monitor off,
+ * OS will do a mode set (with output timing) but keep output off.
+ * In this case DAL will ask vbios to power up the pll in the PHY.
+ * If user unplug the monitor (while we are on monitor off) or
+ * system attempt to enter modern standby (which we will disable PLL),
+ * PHY will hang on the next mode set attempt.
+ * if enable PLL follow by disable PLL (without executing lane enable/disable),
+ * RDPCS_PHY_DP_MPLLB_STATE remains 1,
+ * which indicate that PLL disable attempt actually didn’t go through.
+ * As a workaround, insert PHY lane enable/disable before PLL disable.
+ */
+void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx->stream->dpms_off)
+ return;
+
+ pipe_ctx->stream->dpms_off = false;
+ core_link_enable_stream(context, pipe_ctx);
+ core_link_disable_stream(pipe_ctx);
+ pipe_ctx->stream->dpms_off = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index 182736096123..26bf24d3b59f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state(
const struct dc *dc,
struct dc_state *context);
+void dcn21_PLAT_58856_wa(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index fddbd59bf4f9..4dd634118df2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -51,7 +52,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -84,6 +86,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
@@ -104,6 +107,8 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -127,6 +132,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn20_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+ .PLAT_58856_wa = dcn21_PLAT_58856_wa,
};
void dcn21_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 33d0a176841a..a721bb401ef0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -84,7 +84,7 @@
#include "dcn21_resource.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -156,17 +156,18 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.clock_limits = {
{
.state = 0,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 440.0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 400.00,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -174,10 +175,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 1,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 618.0,
+ .dcfclk_mhz = 464.52,
+ .fabricclk_mhz = 800.0,
+ .dispclk_mhz = 654.55,
+ .dppclk_mhz = 626.09,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -185,32 +186,65 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 2,
- .dcfclk_mhz = 608.0,
- .fabricclk_mhz = 1066.0,
- .dispclk_mhz = 888.0,
- .dppclk_mhz = 888.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 514.29,
+ .fabricclk_mhz = 933.0,
+ .dispclk_mhz = 757.89,
+ .dppclk_mhz = 685.71,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 287.67,
- .dram_speed_mts = 2133.0,
+ .dram_speed_mts = 1866.0,
},
{
.state = 3,
- .dcfclk_mhz = 676.0,
- .fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 576.00,
+ .fabricclk_mhz = 1067.0,
+ .dispclk_mhz = 847.06,
+ .dppclk_mhz = 757.89,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 318.334,
- .dram_speed_mts = 4266.0,
+ .dram_speed_mts = 2134.0,
},
{
.state = 4,
- .dcfclk_mhz = 810.0,
+ .dcfclk_mhz = 626.09,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 900.00,
+ .dppclk_mhz = 847.06,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 953.0,
+ .dscclk_mhz = 489.0,
+ .dram_speed_mts = 2400.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 685.71,
+ .fabricclk_mhz = 1333.0,
+ .dispclk_mhz = 1028.57,
+ .dppclk_mhz = 960.00,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 278.0,
+ .dscclk_mhz = 287.67,
+ .dram_speed_mts = 2666.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 757.89,
+ .fabricclk_mhz = 1467.0,
+ .dispclk_mhz = 1107.69,
+ .dppclk_mhz = 1028.57,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 715.0,
+ .dscclk_mhz = 318.334,
+ .dram_speed_mts = 3200.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
- .dppclk_mhz = 1285.0,
+ .dppclk_mhz = 1285.00,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
@@ -218,8 +252,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
/*Extra state, no dispclk ramping*/
{
- .state = 5,
- .dcfclk_mhz = 810.0,
+ .state = 8,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.0,
@@ -250,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.dram_channel_width_bytes = 4,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.dcn_downspread_percent = 0.5,
- .downspread_percent = 0.5,
+ .downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
@@ -266,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
- .num_states = 5
+ .num_states = 8
};
#ifndef MAX
@@ -306,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIF0_BASE__INST0_SEG ## seg
@@ -804,7 +842,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -821,11 +860,12 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
+ .min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
@@ -841,7 +881,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -962,6 +1002,9 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
@@ -1335,26 +1378,50 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
{
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
- int i;
-
- dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
- dcn2_1_soc.num_chans = bw_params->num_channels;
-
- for (i = 0; i < clk_table->num_entries; i++) {
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, j, closest_clk_lvl;
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
+ dcn2_1_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
- dcn2_1_soc.clock_limits[i].state = i;
- dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+ clock_limits[i].state = i;
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn2_1_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn2_1_soc.num_states = clk_table->num_entries;
+ /* duplicate last level */
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
+ }
}
- dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
- dcn2_1_soc.num_states = i;
- // diags does not retrieve proper values from SMU, do not update DML instance for diags
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
/* Temporary Place holder until we can get them from fuse */
@@ -1476,6 +1543,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN21 = true;
+ hws->wa.disallow_self_refresh_during_multi_plane_transition = true;
}
return hws;
}
@@ -1499,6 +1567,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1639,6 +1708,19 @@ static int dcn21_populate_dml_pipes_from_context(
return pipe_cnt;
}
+enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ enum dc_status result = DC_OK;
+
+ if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
+ plane_state->dcc.enable = 1;
+ /* align to our worst case block width */
+ plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
+ }
+ result = dcn20_patch_unknown_plane_state(plane_state);
+ return result;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
@@ -1648,7 +1730,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = update_bw_bounding_box
@@ -1695,6 +1777,7 @@ static bool dcn21_resource_construct(
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1758,9 +1841,15 @@ static bool dcn21_resource_construct(
goto create_fail;
}
- // Leave as NULL to not affect current dmcu psr programming sequence
- // Will be uncommented when functionality is confirmed to be working
- pool->base.psr = NULL;
+ if (dc->debug.disable_dmcu) {
+ pool->base.psr = dmub_psr_create(ctx);
+
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 626d22d437f4..968c46dfb506 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -32,6 +32,7 @@ struct cp_psp_stream_config {
uint8_t otg_inst;
uint8_t link_enc_inst;
uint8_t stream_enc_inst;
+ uint8_t mst_supported;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 7ee8b8460a9b..e34c3376efc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
- dml_common_defs.o
ifdef CONFIG_DRM_AMD_DC_DCN
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
index ea4cde952f4f..2a1983324629 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
@@ -29,7 +29,7 @@
#define DC__PRESENT 1
#define DC__PRESENT__1 1
#define DC__NUM_DPP 4
-#define DC__VOLTAGE_STATES 7
+#define DC__VOLTAGE_STATES 9
#define DC__NUM_DPP__4 1
#define DC__NUM_DPP__0_PRESENT 1
#define DC__NUM_DPP__1_PRESENT 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 485a9c62ec58..5bbbafacc720 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2614,6 +2614,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
+ }
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
index 8c86b63ddf07..1e557ddcb638 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
@@ -26,7 +26,6 @@
#ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__
#define __DML20_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
index 0378406bf7e7..0d53e871a9d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
@@ -26,7 +26,6 @@
#ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__
#define __DML20V2_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index a38baa73d484..b8ec08e3b7a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params(
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
- if (htaps_l <= 1)
+ if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
@@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params(
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
- if (htaps_c <= 1)
+ if (hratio_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
@@ -1522,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
- disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
- disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
+ disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
index 83e95f8cbff2..e8f7785e3fc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
@@ -26,7 +26,7 @@
#ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__
#define __DML21_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
+#include "dm_services.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index cf2758ca5b02..c77c3d827e4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -25,8 +25,10 @@
#ifndef __DISPLAY_MODE_LIB_H__
#define __DISPLAY_MODE_LIB_H__
-
-#include "dml_common_defs.h"
+#include "dm_services.h"
+#include "dc_features.h"
+#include "display_mode_structs.h"
+#include "display_mode_enums.h"
#include "display_mode_vba.h"
enum dml_project {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 658f81e757e9..687010c17324 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -22,11 +22,12 @@
* Authors: AMD
*
*/
+
+#include "dc_features.h"
+
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
-#define MAX_CLOCK_LIMIT_STATES 8
-
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
@@ -61,12 +62,15 @@ struct _vcs_dpi_voltage_scaling_st {
double dram_speed_mts;
double fabricclk_mhz;
double dispclk_mhz;
+ double dram_bw_per_chan_gbps;
double phyclk_mhz;
double dppclk_mhz;
double dtbclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
double urgent_latency_us;
@@ -109,8 +113,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
- unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ double min_dcfclk;
bool do_urgent_latency_adjustment;
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
@@ -189,7 +192,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int min_vblank_lines;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
- unsigned int dcfclk_cstate_latency;
+ double dcfclk_cstate_latency;
unsigned int dppclk_delay_scl;
unsigned int dppclk_delay_scl_lb_only;
unsigned int dppclk_delay_cnvc_formatter;
@@ -202,6 +205,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int LineBufferFixedBpp;
unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
unsigned int bug_forcing_LC_req_same_size_fixed;
+ unsigned int number_of_cursors;
};
struct _vcs_dpi_display_xfc_params_st {
@@ -325,7 +329,6 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
- unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index b3c96d9b472f..6b525c52124c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -266,8 +266,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
- mode_lib->vba.MinVoltageLevel = 0;
- mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
mode_lib->vba.DoUrgentLatencyAdjustment =
soc->do_urgent_latency_adjustment;
@@ -379,7 +377,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
- mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -396,11 +393,11 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_y_c;
mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
- mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height;
- mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width;
+ mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_y;
+ mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_y;
mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
- mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_c;
- mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_c;
+ mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_c;
+ mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_c;
mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c;
mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 2875efd85467..3a734171f083 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -27,8 +27,6 @@
#ifndef __DML2_DISPLAY_MODE_VBA_H__
#define __DML2_DISPLAY_MODE_VBA_H__
-#include "dml_common_defs.h"
-
struct display_mode_lib;
void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
@@ -389,7 +387,6 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
- bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
@@ -842,8 +839,6 @@ struct vba_vars_st {
double DCCRateChroma[DC__NUM_DPP__MAX];
double PHYCLKD18PerState[DC__VOLTAGE_STATES + 1];
- int MinVoltageLevel;
- int MaxVoltageLevel;
bool WritebackSupportInterleaveAndUsingWholeBufferForASingleStream;
bool NumberOfHDMIFRLSupport;
@@ -880,7 +875,6 @@ struct vba_vars_st {
double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
- bool UseMinimumRequiredDCFCLK;
double WritebackDelayTime[DC__NUM_DPP__MAX];
unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index 1f24db830737..2555ef0358c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -26,7 +26,6 @@
#ifndef __DISPLAY_RQ_DLG_HELPERS_H__
#define __DISPLAY_RQ_DLG_HELPERS_H__
-#include "dml_common_defs.h"
#include "display_mode_lib.h"
/* Function: Printer functions
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
index 304164986bd8..9c06913ad767 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -26,8 +26,6 @@
#ifndef __DISPLAY_RQ_DLG_CALC_H__
#define __DISPLAY_RQ_DLG_CALC_H__
-#include "dml_common_defs.h"
-
struct display_mode_lib;
#include "display_rq_dlg_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index ded71ea82413..02e06c9b3230 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -26,7 +26,6 @@
#ifndef __DML_INLINE_DEFS_H__
#define __DML_INLINE_DEFS_H__
-#include "dml_common_defs.h"
#include "dcn_calc_math.h"
#include "dml_logger.h"
@@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity)
return (double) dcn_bw_floor2(a, granularity);
}
+static inline double dml_round(double a)
+{
+ double round_pt = 0.5;
+ double ceil = dml_ceil(a, 1);
+ double floor = dml_floor(a, 1);
+
+ if (a - floor >= round_pt)
+ return ceil;
+ else
+ return floor;
+}
+
static inline int dml_log2(double x)
{
return dml_round((double)dcn_bw_log(x, 2));
@@ -112,7 +123,7 @@ static inline double dml_log(double x, double base)
static inline unsigned int dml_round_to_multiple(unsigned int num,
unsigned int multiple,
- bool up)
+ unsigned char up)
{
unsigned int remainder;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index d2d36d48caaa..f252af1947c3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -47,9 +47,9 @@
#include "dce120/hw_factory_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_factory_dcn10.h"
-#endif
#include "dcn20/hw_factory_dcn20.h"
#include "dcn21/hw_factory_dcn21.h"
+#endif
#include "diagnostics/hw_factory_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 5d396657a1ee..04e2c0f74cb0 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -45,9 +45,9 @@
#include "dce120/hw_translate_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_translate_dcn10.h"
-#endif
#include "dcn20/hw_translate_dcn20.h"
#include "dcn21/hw_translate_dcn21.h"
+#endif
#include "diagnostics/hw_translate_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f285b76888fb..d523fc9547e7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -124,7 +124,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream);
- enum dc_status (*get_default_swizzle_mode)(
+ enum dc_status (*patch_unknown_plane_state)(
struct dc_plane_state *plane_state);
struct stream_encoder *(*find_first_free_match_stream_enc_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 8b1f0ce6c2a7..e94e5fbf2aa2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -78,6 +78,8 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+void dpcd_set_source_specific_data(struct dc_link *link);
+
void dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index ac530c057ddd..ce65678c03b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -27,6 +27,7 @@
#define __DAL_CLK_MGR_H__
#include "dc.h"
+#include "dm_pp_smu.h"
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
@@ -193,6 +194,7 @@ struct clk_mgr {
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
+ struct pp_smu_wm_range_sets ranges;
};
/* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 862952c0286a..9311d0de377f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -296,6 +296,10 @@ int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context);
+
#endif //__DAL_CLK_MGR_INTERNAL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 05ee5295d2c1..336c80a18175 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -27,11 +27,12 @@
#define __DAL_DCCG_H__
#include "dc_types.h"
+#include "hw_shared.h"
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
-
+ int pipe_dppclk_khz[MAX_PIPES];
int ref_dppclk;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index c0dc1d0f5cae..f5dd0cc73c63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -134,7 +134,7 @@ struct hubbub_funcs {
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz);
- void (*program_watermarks)(
+ bool (*program_watermarks)(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c59740084ebc..7c2a3328b208 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -39,6 +39,7 @@ struct dsc_config {
uint32_t pic_height;
enum dc_pixel_encoding pixel_encoding;
enum dc_color_depth color_depth; /* Bits per component */
+ bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 459f95f52486..f30ab4916242 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -25,16 +25,15 @@
#ifndef __DC_DWBC_H__
#define __DC_DWBC_H__
+#include "dal_types.h"
#include "dc_hw_types.h"
-
#define DWB_SW_V2 1
#define DWB_MCIF_BUF_COUNT 4
/* forward declaration of mcif_wb struct */
struct mcif_wb;
-enum dce_version;
enum dwb_sw_version {
dwb_ver_1_0 = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index fb748f082c56..c2b392a533b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -68,6 +68,7 @@ struct encoder_feature_support {
unsigned int max_hdmi_pixel_clock;
bool hdmi_ycbcr420_supported;
bool dp_ycbcr420_supported;
+ bool fec_supported;
};
union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 094afc4c8173..50ee8aa7ec3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -210,6 +210,22 @@ struct mpc_funcs {
struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id);
+ /*
+ * Lock cursor updates for the specified OPP.
+ * OPP defines the set of MPCC that are locked together for cursor.
+ *
+ * Parameters:
+ * [in] mpc - MPC context.
+ * [in] opp_id - The OPP to lock cursor updates on
+ * [in] lock - lock/unlock the OPP
+ *
+ * Return: void
+ */
+ void (*cursor_lock)(
+ struct mpc *mpc,
+ int opp_id,
+ bool lock);
+
struct mpcc* (*get_mpcc_for_dpp)(
struct mpc_tree *tree,
int dpp_id);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7575564b2265..2717352eb697 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -310,7 +310,8 @@ struct opp_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 351b387ad606..ac6523c0828e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -103,6 +103,7 @@ struct stream_encoder {
struct dc_context *ctx;
struct dc_bios *bp;
enum engine_id id;
+ uint32_t stream_enc_inst;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 209118f9f193..08307f3796e3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -66,6 +66,8 @@ struct hw_sequencer_funcs {
int num_planes, struct dc_state *context);
void (*program_front_end_for_ctx)(struct dc *dc,
struct dc_state *context);
+ void (*post_unlock_program_front_end)(struct dc *dc,
+ struct dc_state *context);
void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
void (*update_dchub)(struct dce_hwseq *hws,
@@ -78,17 +80,23 @@ struct hw_sequencer_funcs {
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
/* Pipe Lock Related */
- void (*pipe_control_lock_global)(struct dc *dc,
- struct pipe_ctx *pipe, bool lock);
void (*pipe_control_lock)(struct dc *dc,
struct pipe_ctx *pipe, bool lock);
+ void (*interdependent_update_lock)(struct dc *dc,
+ struct dc_state *context, bool lock);
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
bool flip_immediate);
+ void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
/* Timing Related */
void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
struct crtc_position *position);
int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
+ void (*calc_vupdate_position)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line);
void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
int group_size, struct pipe_ctx *grouped_pipes[]);
void (*enable_timing_synchronization)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index ecf566378ccd..52a26e6be066 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -40,10 +40,13 @@ struct dce_hwseq_wa {
bool false_optc_underflow;
bool DEGVIDCN10_254;
bool DEGVIDCN21;
+ bool disallow_self_refresh_during_multi_plane_transition;
};
struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
+ bool disallow_self_refresh_during_multi_plane_transition_applied;
+ unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
};
struct pipe_ctx;
@@ -97,6 +100,8 @@ struct hwseq_private_funcs {
struct dc *dc);
void (*edp_backlight_control)(struct dc_link *link,
bool enable);
+ bool (*is_panel_backlight_on)(struct dc_link *link);
+ bool (*is_panel_powered_on)(struct dc_link *link);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
@@ -140,6 +145,8 @@ struct hwseq_private_funcs {
const struct dc_plane_state *plane_state);
bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
+ void (*PLAT_58856_wa)(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5ae8ada154ef..ca4c36c0c9bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -179,4 +179,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+
+int get_num_odm_splits(struct pipe_ctx *pipe);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index c34eba19860a..6d7bca562eec 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -108,7 +108,7 @@
#define ASSERT(expr) ASSERT_CRITICAL(expr)
#else
-#define ASSERT(expr) WARN_ON(!(expr))
+#define ASSERT(expr) WARN_ON_ONCE(!(expr))
#endif
#define BREAK_TO_DEBUGGER() ASSERT(0)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index cd9532b4f14d..10b5fa9d2588 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -50,6 +50,7 @@ enum dmub_cmd_type {
DMUB_CMD__REG_REG_WAIT = 4,
DMUB_CMD__PLAT_54186_WA = 5,
DMUB_CMD__PSR = 64,
+ DMUB_CMD__ABM = 66,
DMUB_CMD__VBIOS = 128,
};
@@ -216,7 +217,6 @@ struct dmub_rb_cmd_dpphy_init {
struct dmub_cmd_psr_copy_settings_data {
uint16_t psr_level;
- uint8_t hubp_inst;
uint8_t dpp_inst;
uint8_t mpcc_inst;
uint8_t opp_inst;
@@ -225,17 +225,8 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t digbe_inst;
uint8_t dpphy_inst;
uint8_t aux_inst;
- uint8_t hyst_frames;
- uint8_t hyst_lines;
- uint8_t phy_num;
- uint8_t phy_type;
- uint8_t aux_repeat;
uint8_t smu_optimizations_en;
- uint8_t skip_wait_for_pll_lock;
uint8_t frame_delay;
- uint8_t smu_phy_id;
- uint8_t num_of_controllers;
- uint8_t link_rate;
uint8_t frame_cap_ind;
};
@@ -257,13 +248,59 @@ struct dmub_rb_cmd_psr_enable {
struct dmub_cmd_header header;
};
-struct dmub_cmd_psr_setup_data {
+struct dmub_cmd_psr_set_version_data {
enum psr_version version; // PSR version 1 or 2
};
-struct dmub_rb_cmd_psr_setup {
+struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_header header;
- struct dmub_cmd_psr_setup_data psr_setup_data;
+ struct dmub_cmd_psr_set_version_data psr_set_version_data;
+};
+
+struct dmub_cmd_abm_set_pipe_data {
+ uint32_t ramping_boundary;
+ uint32_t otg_inst;
+};
+
+struct dmub_rb_cmd_abm_set_pipe {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data;
+};
+
+struct dmub_cmd_abm_set_backlight_data {
+ uint32_t frame_ramp;
+};
+
+struct dmub_rb_cmd_abm_set_backlight {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data;
+};
+
+struct dmub_cmd_abm_set_level_data {
+ uint32_t level;
+};
+
+struct dmub_rb_cmd_abm_set_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_level_data abm_set_level_data;
+};
+
+struct dmub_cmd_abm_set_ambient_level_data {
+ uint32_t ambient_lux;
+};
+
+struct dmub_rb_cmd_abm_set_ambient_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data;
+};
+
+struct dmub_cmd_abm_set_pwm_frac_data {
+ uint32_t fractional_pwm;
+};
+
+struct dmub_rb_cmd_abm_set_pwm_frac {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
};
union dmub_rb_cmd {
@@ -277,11 +314,16 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
struct dmub_rb_cmd_dpphy_init dpphy_init;
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
- struct dmub_rb_cmd_psr_enable psr_enable;
+ struct dmub_rb_cmd_psr_set_version psr_set_version;
struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+ struct dmub_rb_cmd_psr_enable psr_enable;
struct dmub_rb_cmd_psr_set_level psr_set_level;
struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
- struct dmub_rb_cmd_psr_setup psr_setup;
+ struct dmub_rb_cmd_abm_set_pipe abm_set_pipe;
+ struct dmub_rb_cmd_abm_set_backlight abm_set_backlight;
+ struct dmub_rb_cmd_abm_set_level abm_set_level;
+ struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
+ struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index 7b69eb37f762..d37535d21928 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,7 +32,7 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_SET_VERSION = 0,
DMUB_CMD__PSR_COPY_SETTINGS = 1,
DMUB_CMD__PSR_ENABLE = 2,
DMUB_CMD__PSR_DISABLE = 3,
@@ -42,7 +42,16 @@ enum dmub_cmd_psr_type {
enum psr_version {
PSR_VERSION_1 = 0x10, // PSR Version 1
PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
- PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+ PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+enum dmub_cmd_abm_type {
+ DMUB_CMD__ABM_INIT_CONFIG = 0,
+ DMUB_CMD__ABM_SET_PIPE = 1,
+ DMUB_CMD__ABM_SET_BACKLIGHT = 2,
+ DMUB_CMD__ABM_SET_LEVEL = 3,
+ DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4,
+ DMUB_CMD__ABM_SET_PWM_FRAC = 5,
};
#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
new file mode 100644
index 000000000000..652d6fc061b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_GPINT_CMD_H_
+#define _DMUB_GPINT_CMD_H_
+
+#include "dmub_types.h"
+
+/**
+ * The register format for sending a command via the GPINT.
+ */
+union dmub_gpint_data_register {
+ struct {
+ uint32_t param : 16;
+ uint32_t command_code : 12;
+ uint32_t status : 4;
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * The shifts and masks below may alternatively be used to format and read
+ * the command register bits.
+ */
+
+#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF
+#define DMUB_GPINT_DATA_PARAM_SHIFT 0
+
+#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF
+#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16
+
+#define DMUB_GPINT_DATA_STATUS_MASK 0xF
+#define DMUB_GPINT_DATA_STATUS_SHIFT 28
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_gpint_command {
+ DMUB_GPINT__INVALID_COMMAND = 0,
+ DMUB_GPINT__GET_FW_VERSION = 1,
+ DMUB_GPINT__STOP_FW = 2,
+ DMUB_GPINT__GET_PSR_STATE = 7,
+};
+
+/**
+ * Command responses.
+ */
+
+#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD
+
+#endif /* _DMUB_GPINT_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index f8917594036a..c2671f2616c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -66,6 +66,7 @@
#include "dmub_types.h"
#include "dmub_cmd.h"
+#include "dmub_gpint_cmd.h"
#include "dmub_rb.h"
#if defined(__cplusplus)
@@ -103,7 +104,7 @@ enum dmub_window_id {
DMUB_WINDOW_4_MAILBOX,
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
- DMUB_WINDOW_7_RESERVED,
+ DMUB_WINDOW_7_SCRATCH_MEM,
DMUB_WINDOW_TOTAL,
};
@@ -262,6 +263,14 @@ struct dmub_srv_hw_funcs {
bool (*is_phy_init)(struct dmub_srv *dmub);
bool (*is_auto_load_done)(struct dmub_srv *dmub);
+
+ void (*set_gpint)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ bool (*is_gpint_acked)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
};
/**
@@ -307,6 +316,7 @@ struct dmub_srv {
enum dmub_asic asic;
void *user_ctx;
bool is_virtual;
+ struct dmub_fb scratch_mem_fb;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -516,6 +526,45 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us);
+/**
+ * dmub_srv_send_gpint_command() - Sends a GPINT based command.
+ * @dmub: the dmub service
+ * @command_code: the command code to send
+ * @param: the command parameter to send
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Sends a command via the general purpose interrupt (GPINT).
+ * Waits for the number of microseconds specified by timeout_us
+ * for the command ACK before returning.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for ACK timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us);
+
+/**
+ * dmub_srv_get_gpint_response() - Queries the GPINT response.
+ * @dmub: the dmub service
+ * @response: the response for the last GPINT
+ *
+ * Returns the response code for the last GPINT interrupt.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index b2ca8e0dbac9..63bb9e2c81de 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -60,6 +60,12 @@ static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
{
uint32_t tmp;
+ if (dmub->fb_base || dmub->fb_offset) {
+ *fb_base = dmub->fb_base;
+ *fb_offset = dmub->fb_offset;
+ return;
+ }
+
REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
*fb_base = (uint64_t)tmp << 24;
@@ -77,11 +83,52 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn20_reset(struct dmub_srv *dmub)
{
+ union dmub_gpint_data_register cmd;
+ const uint32_t timeout = 30;
+ uint32_t in_reset, scratch, i;
+
+ REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &in_reset);
+
+ if (in_reset == 0) {
+ cmd.bits.status = 1;
+ cmd.bits.command_code = DMUB_GPINT__STOP_FW;
+ cmd.bits.param = 0;
+
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /**
+ * Timeout covers both the ACK and the wait
+ * for remaining work to finish.
+ *
+ * This is mostly bound by the PHY disable sequence.
+ * Each register check will be greater than 1us, so
+ * don't bother using udelay.
+ */
+
+ for (i = 0; i < timeout; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ break;
+ }
+
+ for (i = 0; i < timeout; ++i) {
+ scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ break;
+ }
+
+ /* Clear the GPINT command manually so we don't reset again. */
+ cmd.all = 0;
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /* Force reset in case we timed out, DMCUB is likely hung. */
+ }
+
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ REG_WRITE(DMCUB_SCRATCH0, 0);
}
void dmub_dcn20_reset_release(struct dmub_srv *dmub)
@@ -217,3 +264,25 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
return supported;
}
+
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all);
+}
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ union dmub_gpint_data_register test;
+
+ reg.bits.status = 0;
+ test.all = REG_READ(DMCUB_GPINT_DATAIN1);
+
+ return test.all == reg.all;
+}
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_SCRATCH7);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 04b0fa13153d..7f046c73927e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -91,6 +91,7 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH13) \
DMUB_SR(DMCUB_SCRATCH14) \
DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(DMCUB_GPINT_DATAIN1) \
DMUB_SR(CC_DC_PIPE_DIS) \
DMUB_SR(MMHUBBUB_SOFT_RESET) \
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
@@ -183,4 +184,12 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 85a518bf8a76..ce32cc7933c4 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -52,8 +52,11 @@
/* Default tracebuffer size if meta is absent. */
#define DMUB_TRACE_BUFFER_SIZE (1024)
+/* Default scratch mem size. */
+#define DMUB_SCRATCH_MEM_SIZE (256)
+
/* Number of windows in use. */
-#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
#define DMUB_CW0_BASE (0x60000000)
@@ -126,6 +129,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
funcs->is_hw_init = dmub_dcn20_is_hw_init;
+ funcs->set_gpint = dmub_dcn20_set_gpint;
+ funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
+ funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
if (asic == DMUB_ASIC_DCN21) {
dmub->regs = &dmub_srv_dcn21_regs;
@@ -208,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@@ -253,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
fw_state->base = dmub_align(trace_buff->top, 256);
fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
- out->fb_size = dmub_align(fw_state->top, 4096);
+ scratch_mem->base = dmub_align(fw_state->top, 256);
+ scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+
+ out->fb_size = dmub_align(scratch_mem->top, 4096);
return DMUB_STATUS_OK;
}
@@ -331,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
struct dmub_rb_init_params rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
@@ -367,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->hw_funcs.reset(dmub);
if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
- fw_state_fb) {
+ fw_state_fb && scratch_mem_fb) {
cw2.offset.quad_part = data_fb->gpu_addr;
cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
cw2.region.top = cw2.region.base + data_fb->size;
@@ -393,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->scratch_mem_fb = *scratch_mem_fb;
+
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
&cw5, &cw6);
@@ -522,3 +536,50 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us)
+{
+ union dmub_gpint_data_register reg;
+ uint32_t i;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.set_gpint)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_gpint_acked)
+ return DMUB_STATUS_INVALID;
+
+ reg.bits.status = 1;
+ reg.bits.command_code = command_code;
+ reg.bits.param = param;
+
+ dmub->hw_funcs.set_gpint(dmub, reg);
+
+ for (i = 0; i < timeout_us; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
+ return DMUB_STATUS_OK;
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response)
+{
+ *response = 0;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.get_gpint_response)
+ return DMUB_STATUS_INVALID;
+
+ *response = dmub->hw_funcs.get_gpint_response(dmub);
+
+ return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index a2903985b9e8..2359e88d6029 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,31 +134,28 @@
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
-#define RAVEN2_15D8_REV_94 0x94
-#define RAVEN2_15D8_REV_95 0x95
-#define RAVEN2_15D8_REV_E3 0xE3
-#define RAVEN2_15D8_REV_E4 0xE4
-#define RAVEN2_15D8_REV_E9 0xE9
-#define RAVEN2_15D8_REV_EA 0xEA
-#define RAVEN2_15D8_REV_EB 0xEB
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
+#define RENOIR_A0 0x91
#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#endif
+#define PRID_DALI_DE 0xDE
+#define PRID_DALI_DF 0xDF
+#define PRID_DALI_E3 0xE3
+#define PRID_DALI_E4 0xE4
+
+#define PRID_POLLOCK_94 0x94
+#define PRID_POLLOCK_95 0x95
+#define PRID_POLLOCK_E9 0xE9
+#define PRID_POLLOCK_EA 0xEA
+#define PRID_POLLOCK_EB 0xEB
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
#ifndef ASICREV_IS_RAVEN2
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0))
#endif
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
- || (eChipRev == RAVEN2_15D8_REV_E4))
-#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
- || eChipRev == RAVEN2_15D8_REV_95 \
- || eChipRev == RAVEN2_15D8_REV_E9 \
- || eChipRev == RAVEN2_15D8_REV_EA \
- || eChipRev == RAVEN2_15D8_REV_EB)
#define FAMILY_RV 142 /* DCN 1*/
@@ -175,9 +172,7 @@ enum {
#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0)
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#define RENOIR_A0 0x91
-#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
-#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
+#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
/*
* ASIC chip ID
@@ -187,6 +182,9 @@ enum {
#define DEVICE_ID_TEMASH_9839 0x9839
#define DEVICE_ID_TEMASH_983D 0x983D
+/* RENOIR */
+#define DEVICE_ID_RENOIR_1636 0x1636
+
/* Asic Family IDs for different asic family. */
#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 2c90d1b46c8b..3d29646c7cb4 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -149,4 +149,12 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
+#define DP_SOURCE_TABLE_REVISION 0x310
+#define DP_SOURCE_PAYLOAD_SIZE 0x311
+#define DP_SOURCE_SINK_CAP 0x317
+#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
+#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
+#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
+#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
+
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 89a709267019..d66f9d8eefb4 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -124,36 +124,37 @@ enum dc_log_type {
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
(1 << LOG_DETECTION_EDID_PARSER))
-#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
- (1 << LOG_WARNING) | \
- (1 << LOG_EVENT_MODE_SET) | \
- (1 << LOG_EVENT_DETECTION) | \
- (1 << LOG_EVENT_LINK_TRAINING) | \
- (1 << LOG_EVENT_LINK_LOSS) | \
- (1 << LOG_EVENT_UNDERFLOW) | \
- (1 << LOG_RESOURCE) | \
- (1 << LOG_FEATURE_OVERRIDE) | \
- (1 << LOG_DETECTION_EDID_PARSER) | \
- (1 << LOG_DC) | \
- (1 << LOG_HW_HOTPLUG) | \
- (1 << LOG_HW_SET_MODE) | \
- (1 << LOG_HW_RESUME_S3) | \
- (1 << LOG_HW_HPD_IRQ) | \
- (1 << LOG_SYNC) | \
- (1 << LOG_BANDWIDTH_VALIDATION) | \
- (1 << LOG_MST) | \
- (1 << LOG_DETECTION_DP_CAPS) | \
- (1 << LOG_BACKLIGHT)) | \
- (1 << LOG_I2C_AUX) | \
- (1 << LOG_IF_TRACE) | \
- (1 << LOG_DTN) /* | \
- (1 << LOG_DEBUG) | \
- (1 << LOG_BIOS) | \
- (1 << LOG_SURFACE) | \
- (1 << LOG_SCALER) | \
- (1 << LOG_DML) | \
- (1 << LOG_HW_LINK_TRAINING) | \
- (1 << LOG_HW_AUDIO)| \
- (1 << LOG_BANDWIDTH_CALCS)*/
+#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \
+ (1ULL << LOG_WARNING) | \
+ (1ULL << LOG_EVENT_MODE_SET) | \
+ (1ULL << LOG_EVENT_DETECTION) | \
+ (1ULL << LOG_EVENT_LINK_TRAINING) | \
+ (1ULL << LOG_EVENT_LINK_LOSS) | \
+ (1ULL << LOG_EVENT_UNDERFLOW) | \
+ (1ULL << LOG_RESOURCE) | \
+ (1ULL << LOG_FEATURE_OVERRIDE) | \
+ (1ULL << LOG_DETECTION_EDID_PARSER) | \
+ (1ULL << LOG_DC) | \
+ (1ULL << LOG_HW_HOTPLUG) | \
+ (1ULL << LOG_HW_SET_MODE) | \
+ (1ULL << LOG_HW_RESUME_S3) | \
+ (1ULL << LOG_HW_HPD_IRQ) | \
+ (1ULL << LOG_SYNC) | \
+ (1ULL << LOG_BANDWIDTH_VALIDATION) | \
+ (1ULL << LOG_MST) | \
+ (1ULL << LOG_DETECTION_DP_CAPS) | \
+ (1ULL << LOG_BACKLIGHT)) | \
+ (1ULL << LOG_I2C_AUX) | \
+ (1ULL << LOG_IF_TRACE) | \
+ (1ULL << LOG_HDMI_FRL) | \
+ (1ULL << LOG_DTN) /* | \
+ (1ULL << LOG_DEBUG) | \
+ (1ULL << LOG_BIOS) | \
+ (1ULL << LOG_SURFACE) | \
+ (1ULL << LOG_SCALER) | \
+ (1ULL << LOG_DML) | \
+ (1ULL << LOG_HW_LINK_TRAINING) | \
+ (1ULL << LOG_HW_AUDIO)| \
+ (1ULL << LOG_BANDWIDTH_CALCS)*/
#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b9992ebf77a6..c33454a9e0b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -524,12 +524,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
infopacket->sb[6] |= 0x04;
/* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000);
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
- infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
//FreeSync HDR
@@ -734,6 +734,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
{
struct core_freesync *core_freesync = NULL;
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned long long rounded_nominal_in_uhz = 0;
unsigned int refresh_range = 0;
unsigned long long min_refresh_in_uhz = 0;
unsigned long long max_refresh_in_uhz = 0;
@@ -747,24 +748,23 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
- /* Rounded to the nearest Hz */
- nominal_field_rate_in_uhz = 1000000ULL *
- div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
-
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
- // Don't allow min > max
- if (min_refresh_in_uhz > max_refresh_in_uhz)
- min_refresh_in_uhz = max_refresh_in_uhz;
-
// Full range may be larger than current video timing, so cap at nominal
if (max_refresh_in_uhz > nominal_field_rate_in_uhz)
max_refresh_in_uhz = nominal_field_rate_in_uhz;
// Full range may be larger than current video timing, so cap at nominal
- if (min_refresh_in_uhz > nominal_field_rate_in_uhz)
- min_refresh_in_uhz = nominal_field_rate_in_uhz;
+ if (min_refresh_in_uhz > max_refresh_in_uhz)
+ min_refresh_in_uhz = max_refresh_in_uhz;
+
+ // If a monitor reports exactly max refresh of 2x of min, enforce it on nominal
+ rounded_nominal_in_uhz =
+ div_u64(nominal_field_rate_in_uhz + 50000, 100000) * 100000;
+ if (in_config->max_refresh_in_uhz == (2 * in_config->min_refresh_in_uhz) &&
+ in_config->max_refresh_in_uhz == rounded_nominal_in_uhz)
+ min_refresh_in_uhz = div_u64(nominal_field_rate_in_uhz, 2);
if (!vrr_settings_require_update(core_freesync,
in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
@@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
- in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
- 2 * in_out_vrr->min_duration_in_us;
- if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
- in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
-
in_out_vrr->supported = true;
}
@@ -808,9 +803,14 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.btr_enabled = in_config->btr;
- if (in_out_vrr->max_refresh_in_uhz <
- 2 * in_out_vrr->min_refresh_in_uhz)
+ if (in_out_vrr->max_refresh_in_uhz < (2 * in_out_vrr->min_refresh_in_uhz))
in_out_vrr->btr.btr_enabled = false;
+ else {
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+ }
in_out_vrr->btr.btr_active = false;
in_out_vrr->btr.inserted_duration_in_us = 0;
@@ -1012,8 +1012,8 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
unsigned int total = stream->timing.h_total * stream->timing.v_total;
/* Calculate nominal field rate for stream, rounded up to nearest integer */
- nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
- nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
+ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz;
+ nominal_field_rate_in_uhz *= 100000000ULL;
nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 8aa528e874c4..cc1d3f470b99 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -61,7 +61,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
is_auth_needed &&
- !hdcp->connection.link.adjust.hdcp1.disable;
+ !hdcp->connection.link.adjust.hdcp1.disable &&
+ !hdcp->connection.is_hdcp1_revoked;
}
static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
@@ -72,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -103,8 +104,6 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- /* update topology event if hdcp is not desired */
- status = mod_hdcp_add_display_topology(hdcp);
} else if (is_in_hdcp1_states(hdcp)) {
status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
} else if (is_in_hdcp1_dp_states(hdcp)) {
@@ -115,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
} else if (is_in_hdcp2_dp_states(hdcp)) {
status = mod_hdcp_hdcp2_dp_execution(hdcp,
event_ctx, &input->hdcp2);
+ } else {
+ event_ctx->unexpected_event = 1;
+ goto out;
}
out:
return status;
@@ -191,14 +193,7 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
mod_hdcp_hdcp1_destroy_session(hdcp);
}
- if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -212,25 +207,12 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
goto out;
}
}
- if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else if (is_in_cp_not_desired_state(hdcp)) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -337,16 +319,19 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* add display to connection */
- hdcp->connection.link = *link;
- *display_container = *display;
-
/* reset retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+ status = mod_hdcp_add_display_to_topology(hdcp, display->index);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
/* request authentication */
if (current_state(hdcp) != HDCP_INITIALIZED)
set_state_id(hdcp, output, HDCP_INITIALIZED);
@@ -379,17 +364,20 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* remove display */
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
-
/* clear retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
- /* request authentication for remaining displays*/
- if (get_active_display_count(hdcp) > 0)
+ /* remove display */
+ status = mod_hdcp_remove_display_from_topology(hdcp, index);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+
+ /* request authentication when connection is not reset */
+ if (current_state(hdcp) != HDCP_UNINITIALIZED)
callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
output);
out:
@@ -496,10 +484,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
- mode = MOD_HDCP_MODE_DP;
- break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
- mode = MOD_HDCP_MODE_DP_MST;
+ mode = MOD_HDCP_MODE_DP;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index af78e4f1be68..5cb4546be0ef 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -41,7 +41,6 @@ enum mod_hdcp_trans_input_result {
struct mod_hdcp_transition_input_hdcp1 {
uint8_t bksv_read;
uint8_t bksv_validation;
- uint8_t add_topology;
uint8_t create_session;
uint8_t an_write;
uint8_t aksv_write;
@@ -71,7 +70,6 @@ struct mod_hdcp_transition_input_hdcp1 {
struct mod_hdcp_transition_input_hdcp2 {
uint8_t hdcp2version_read;
uint8_t hdcp2_capable_check;
- uint8_t add_topology;
uint8_t create_session;
uint8_t ake_init_prepare;
uint8_t ake_init_write;
@@ -167,9 +165,9 @@ struct mod_hdcp_auth_counters {
/* contains values per connection */
struct mod_hdcp_connection {
struct mod_hdcp_link link;
- struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
uint8_t is_repeater;
uint8_t is_km_stored;
+ uint8_t is_hdcp1_revoked;
uint8_t is_hdcp2_revoked;
struct mod_hdcp_trace trace;
uint8_t hdcp1_retry_count;
@@ -202,6 +200,8 @@ struct mod_hdcp {
struct mod_hdcp_config config;
/* per connection */
struct mod_hdcp_connection connection;
+ /* per displays */
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
/* per authentication attempt */
struct mod_hdcp_authentication auth;
/* per state in an authentication */
@@ -327,10 +327,10 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* TODO: add adjustment log */
/* psp functions */
-enum mod_hdcp_status mod_hdcp_add_display_topology(
- struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_remove_display_topology(
- struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+ struct mod_hdcp *hdcp, uint8_t index);
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -392,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
- hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP);
}
static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
+ hdcp->connection.link.dp.mst_supported);
}
static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
@@ -519,7 +519,7 @@ static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_active(&hdcp->connection.displays[i]))
+ if (is_display_active(&hdcp->displays[i]))
added_count++;
return added_count;
}
@@ -530,7 +530,7 @@ static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i]))
+ if (is_display_added(&hdcp->displays[i]))
added_count++;
return added_count;
}
@@ -542,8 +542,8 @@ static inline struct mod_hdcp_display *get_first_added_display(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (is_display_added(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -556,9 +556,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (hdcp->connection.displays[i].index == index &&
- is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (hdcp->displays[i].index == index &&
+ is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -571,8 +571,8 @@ static inline struct mod_hdcp_display *get_empty_display_container(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (!is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (!is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37670db64855..37c8c05497d6 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -168,10 +168,6 @@ static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 76edcbe51f71..f3711914364e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -46,8 +46,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
break;
case H1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -173,8 +172,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
break;
case D1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -210,7 +208,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->rx_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -231,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
(!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
}
if (conn->is_repeater) {
set_watchdog_in_ms(hdcp, 5000, output);
@@ -290,7 +292,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ksvlist_vp_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 55246711700b..491c00f48026 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -34,7 +34,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
if (is_dp_hdcp(hdcp))
is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
else
- is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+ is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) &&
(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
return is_ready ? MOD_HDCP_STATUS_SUCCESS :
@@ -67,7 +67,7 @@ static inline enum mod_hdcp_status check_reauthentication_request(
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
else
- ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+ ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[1]) ?
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
return ret;
@@ -259,6 +259,7 @@ static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
+
if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
&input->hdcp2version_read, &status,
hdcp, "hdcp2version_read"))
@@ -281,10 +282,7 @@ static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 8cae3e3aacd5..e738c7ae66ec 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -47,8 +47,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
}
break;
case H2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
@@ -389,8 +388,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
}
break;
case D2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index ff9d54812e62..bb5130f4228d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id {
MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
@@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
@@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
@@ -405,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+ hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
hdcp->auth.msg.hdcp2.ake_cert+1,
sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
@@ -423,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+ hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
hdcp->auth.msg.hdcp2.ake_h_prime+1,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
@@ -441,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+ hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
hdcp->auth.msg.hdcp2.ake_pairing_info+1,
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
@@ -459,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+ hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
hdcp->auth.msg.hdcp2.lc_l_prime+1,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
@@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
{
- enum mod_hdcp_status status;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
- status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
- hdcp->auth.msg.hdcp2.rx_id_list+1,
- sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+ uint32_t device_count = 0;
+ uint32_t rx_id_list_size = 0;
+ uint32_t bytes_read = 0;
+ hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list+1,
+ HDCP_MAX_AUX_TRANSACTION_SIZE);
+ if (status == MOD_HDCP_STATUS_SUCCESS) {
+ bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE;
+ device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+ (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+ rx_id_list_size = MIN((21 + 5 * device_count),
+ (sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1));
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
+ hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read,
+ (rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE);
+ }
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
hdcp->auth.msg.hdcp2.rx_id_list,
@@ -495,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 724ebcee9a19..44956f9ba178 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -90,10 +90,14 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index ff91373ebada..d3192b9d0c3d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -37,10 +37,11 @@
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
HDCP_LOG_ERR(hdcp, \
- "[Link %d] WARNING %s IN STATE %s", \
+ "[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \
hdcp->config.index, \
mod_hdcp_status_to_str(status), \
- mod_hdcp_state_id_to_str(hdcp->state.id))
+ mod_hdcp_state_id_to_str(hdcp->state.id), \
+ hdcp->state.stay_count)
#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 1.4 enabled on display %d", \
@@ -49,6 +50,15 @@
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 2.2 enabled on display %d", \
hdcp->config.index, displayIndex)
+#define HDCP_HDCP1_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 2.2 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+
/* state machine logs */
#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
HDCP_LOG_FSM(hdcp, \
@@ -102,6 +112,9 @@
sizeof(hdcp->auth.msg.hdcp1.bksv)); \
HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
+ sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
sizeof(hdcp->auth.msg.hdcp1.an)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 7911dc157d5a..c2929815c3ee 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -44,83 +44,80 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,
in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg3_desc.msg_size = 0;
}
-enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
-{
-
- struct psp_context *psp = hdcp->config.psp.handle;
- struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
- uint8_t i;
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index)
+ {
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display =
+ get_active_display_at_index(hdcp, index);
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (is_display_added(&(hdcp->connection.displays[i]))) {
-
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
-
- display = &hdcp->connection.displays[i];
+ if (!display || !is_display_added(display))
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
- HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
- }
- }
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+
}
-
-enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
+ uint8_t index)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
+ struct mod_hdcp_display *display =
+ get_active_display_at_index(hdcp, index);
struct mod_hdcp_link *link = &hdcp->connection.link;
- uint8_t i;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
return MOD_HDCP_STATUS_FAILURE;
}
+ if (!display || is_display_added(display))
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
- display = &hdcp->connection.displays[i];
-
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
-
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
- dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
- dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
- dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
- dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
- TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
-
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
-
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
- display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
- }
- }
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ if (is_dp_hdcp(hdcp))
+ dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
+
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -164,6 +161,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -177,6 +175,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_HDCP1_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -210,6 +216,10 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
hdcp->connection.is_repeater = 0;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
@@ -245,6 +255,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -264,10 +275,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+ hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -281,14 +301,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->displays[i].adjust.disable)
continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
- hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -296,8 +316,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return MOD_HDCP_STATUS_SUCCESS;
@@ -385,6 +405,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -398,6 +419,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_HDCP2_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -473,9 +502,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
}
- return MOD_HDCP_STATUS_FAILURE;
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -630,20 +662,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct mod_hdcp_display *display = get_first_added_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
- msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
-
- hdcp2_message_init(hdcp, msg_in);
-
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -695,6 +722,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
}
@@ -717,10 +747,10 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->displays[i].adjust.disable)
continue;
- hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
@@ -729,8 +759,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
break;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 82a5e997d573..1a663dbbf810 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -117,6 +117,8 @@ struct ta_dtm_shared_memory {
int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
uint64_t fence_mc_addr);
+enum { PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE = 5120 };
+
enum ta_hdcp_command {
TA_HDCP_COMMAND__INITIALIZE,
TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
@@ -134,7 +136,10 @@ enum ta_hdcp_command {
TA_HDCP_COMMAND__UNUSED_3,
TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
- TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
+ TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP_DESTROY_ALL_SESSIONS,
+ TA_HDCP_COMMAND__HDCP_SET_SRM,
+ TA_HDCP_COMMAND__HDCP_GET_SRM
};
enum ta_hdcp2_msg_id {
@@ -235,7 +240,8 @@ enum ta_hdcp_authentication_status {
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
- TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED = 0x0A
};
enum ta_hdcp2_msg_authentication_status {
@@ -253,7 +259,8 @@ enum ta_hdcp2_msg_authentication_status {
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
- TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED
};
enum ta_hdcp_content_type {
@@ -415,6 +422,22 @@ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
uint32_t display_handle;
};
+struct ta_hdcp_cmd_set_srm_input {
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_set_srm_output {
+ uint8_t valid_signature;
+ uint32_t srm_version;
+};
+
+struct ta_hdcp_cmd_get_srm_output {
+ uint32_t srm_version;
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
/**********************************************************/
/* Common input structure for HDCP callbacks */
union ta_hdcp_cmd_input {
@@ -432,6 +455,7 @@ union ta_hdcp_cmd_input {
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
hdcp2_prepare_process_authentication_message_v2;
struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_set_srm_input hdcp_set_srm;
};
/* Common output structure for HDCP callbacks */
@@ -444,6 +468,8 @@ union ta_hdcp_cmd_output {
struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
hdcp2_prepare_process_authentication_message_v2;
+ struct ta_hdcp_cmd_set_srm_output hdcp_set_srm;
+ struct ta_hdcp_cmd_get_srm_output hdcp_get_srm;
};
/**********************************************************/
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f2a0e1a064da..c088602bc1a0 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -56,8 +56,10 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
@@ -100,6 +102,7 @@ enum mod_hdcp_status {
struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_supported;
+ uint8_t mst_supported;
};
struct mod_hdcp_hdmi {
@@ -108,8 +111,7 @@ struct mod_hdcp_hdmi {
enum mod_hdcp_operation_mode {
MOD_HDCP_MODE_OFF,
MOD_HDCP_MODE_DEFAULT,
- MOD_HDCP_MODE_DP,
- MOD_HDCP_MODE_DP_MST
+ MOD_HDCP_MODE_DP
};
enum mod_hdcp_display_state {
@@ -155,7 +157,8 @@ struct mod_hdcp_display_adjustment {
struct mod_hdcp_link_adjustment_hdcp1 {
uint8_t disable : 1;
uint8_t postpone_encryption : 1;
- uint8_t reserved : 6;
+ uint8_t min_auth_retries_wa : 1;
+ uint8_t reserved : 5;
};
enum mod_hdcp_force_hdcp_type {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 42cbeffac640..13c57ff2abdc 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -34,8 +34,7 @@ struct dc_info_packet;
struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry);
+ struct dc_info_packet *info_packet);
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 6a8a056424b8..cff3ab15fc0c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -130,8 +130,7 @@ enum ColorimetryYCCDP {
};
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry)
+ struct dc_info_packet *info_packet)
{
unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
@@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
unsigned int colorimetryFormat = 0;
bool stereo3dSupport = false;
- /* Initialize first, later if infopacket is valid determine if VSC SDP
- * should be used to signal colorimetry format and pixel encoding.
- */
- *use_vsc_sdp_for_colorimetry = false;
-
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
vsc_packet_revision = vsc_packet_rev1;
stereo3dSupport = true;
@@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
if (stream->psr_version != 0)
vsc_packet_revision = vsc_packet_rev2;
- /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
- if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+ /* Update to revision 5 for extended colorimetry support */
+ if (stream->use_vsc_sdp_for_colorimetry)
vsc_packet_revision = vsc_packet_rev5;
/* VSC packet not needed based on the features
@@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
- /* If we are using VSC SDP revision 05h, use this to signal for
- * colorimetry format and pixel encoding. HW should later be
- * programmed to set MSA MISC1 bit 6 to indicate ignore
- * colorimetry format and pixel encoding in the MSA.
- */
- *use_vsc_sdp_for_colorimetry = true;
-
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
* Data Bytes DB 18~16
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index f0a153704f6e..00f132f8ad55 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -40,14 +40,18 @@ struct core_vmid {
static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
- core_vmid->num_vmids_available--;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
+ core_vmid->num_vmids_available--;
+ }
}
static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = 0;
- core_vmid->num_vmids_available++;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = 0;
+ core_vmid->num_vmids_available++;
+ }
}
static void evict_vmids(struct core_vmid *core_vmid)
@@ -57,7 +61,7 @@ static void evict_vmids(struct core_vmid *core_vmid)
// At this point any positions with value 0 are unused vmids, evict them
for (i = 1; i < core_vmid->num_vmid; i++) {
- if (ord & (1u << i))
+ if (!(ord & (1u << i)))
clear_entry_from_vmid_table(core_vmid, i);
}
}
@@ -91,7 +95,7 @@ static int get_next_available_vmid(struct core_vmid *core_vmid)
uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
{
struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
- unsigned int vmid = 0;
+ int vmid = 0;
// Physical address gets vmid 0
if (ptb == 0)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..82b6cc25205e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SH_MASK_HEADER
+#define _wafl2_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
index 723af0b2dda0..4a51a90c611a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,25 +19,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#include "dml_common_defs.h"
-#include "dcn_calc_math.h"
-
-#include "dml_inline_defs.h"
-
-double dml_round(double a)
-{
- double round_pt = 0.5;
- double ceil = dml_ceil(a, 1);
- double floor = dml_floor(a, 1);
-
- if (a - floor >= round_pt)
- return ceil;
- else
- return floor;
-}
+#ifndef _wafl2_4_0_0_SMN_HEADER
+#define _wafl2_4_0_0_SMN_HEADER
+#define smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS 0x11cf0210
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..f37712f05b03
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SH_MASK_HEADER
+#define _xgmi_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX16_PCS_ERROR_STATUS
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
new file mode 100644
index 000000000000..6ccbac4ce87e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SMN_HEADER
+#define _xgmi_4_0_0_SMN_HEADER
+
+#define smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS 0x11af0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a607b1034962..a3c238c39ef5 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
uint32_t num_queue_per_pipe;
/* Bit n == 1 means Queue n is available for KFD */
- DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
+ DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
/* SDMA doorbell assignments (SOC15 and later chips only). Only
* specific doorbells are routed to each SDMA engine. Others
@@ -151,6 +151,7 @@ struct kgd2kfd_shared_resources {
/* Minor device number of the render node */
int drm_render_minor;
+
};
struct tile_config {
@@ -166,27 +167,6 @@ struct tile_config {
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
-/*
- * Allocation flag domains
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
-#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
-#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
-
-/*
- * Allocation flags attributes/access options.
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
-#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
-#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
-#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
-#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
-#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
-
/**
* struct kfd2kgd_calls
*
@@ -222,8 +202,6 @@ struct tile_config {
* @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
* Only used for no cp scheduling mode
*
- * @get_tile_config: Returns GPU-specific tiling mode information
- *
* @set_vm_context_page_table_base: Program page table base for a VMID
*
* @invalidate_tlbs: Invalidate TLBs for a specific PASID
@@ -236,6 +214,8 @@ struct tile_config {
*
* @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
*
+ * @get_unique_id: Returns uuid id of current device
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -307,12 +287,11 @@ struct kfd2kgd_calls {
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
- int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
-
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
+ uint64_t (*get_unique_id)(struct kgd_dev *kgd);
};
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index c195575366a3..8e2acb4df860 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -319,12 +319,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
+ !hwmgr->hwmgr_func->get_asic_baco_capability)
return 0;
mutex_lock(&hwmgr->smu_lock);
@@ -1469,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
+ !hwmgr->hwmgr_func->set_asic_baco_state)
return 0;
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 96e81c7bc266..e77046931e4c 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -23,15 +23,12 @@
#include <linux/firmware.h>
#include <linux/pci.h>
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
-#include "amd_pcie.h"
#include "vega20_ppt.h"
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
@@ -121,20 +118,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
if (enabled) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
} else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
}
@@ -195,21 +192,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
return -EINVAL;
if (if_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, if_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
}
if (smu_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, smu_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
}
@@ -218,17 +207,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
}
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max, bool lock_needed)
{
int ret = 0;
- if (min < 0 && max < 0)
- return -EINVAL;
-
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -251,7 +242,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -259,7 +250,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -335,12 +326,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
- ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
- param);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &param);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
+ param, &param);
if (ret)
return ret;
@@ -542,7 +529,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_id | ((argument & 0xFFFF) << 16));
+ table_id | ((argument & 0xFFFF) << 16),
+ NULL);
if (ret)
return ret;
@@ -900,6 +888,7 @@ static int smu_sw_init(void *handle)
mutex_init(&smu->sensor_lock);
mutex_init(&smu->metrics_lock);
+ mutex_init(&smu->message_lock);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -943,6 +932,13 @@ static int smu_sw_init(void *handle)
return ret;
}
+ if (adev->smu.ppt_funcs->i2c_eeprom_init) {
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -952,6 +948,9 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
+ if (adev->smu.ppt_funcs->i2c_eeprom_fini)
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
kfree(smu->irq_source);
smu->irq_source = NULL;
@@ -1113,12 +1112,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
+
/* smu_dump_pptable(smu); */
if (!amdgpu_sriov_vf(adev)) {
- ret = smu_set_driver_table_location(smu);
- if (ret)
- return ret;
-
/*
* Copy pptable bo in the vram to smc with SMU MSGs such as
* SetDriverDramAddr and TransferTableDram2Smu.
@@ -1155,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
}
}
}
+
+ if (smu->ppt_funcs->set_power_source) {
+ /*
+ * For Navi1X, manually switch it to AC mode as PMFW
+ * may boot it with DC mode.
+ */
+ if (adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+ if (ret) {
+ pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
+ return ret;
+ }
+ }
}
if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_notify_display_change(smu);
@@ -1454,29 +1468,84 @@ int smu_reset(struct smu_context *smu)
return ret;
}
+static int smu_disable_dpm(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
+ int ret = 0;
+ bool use_baco = !smu->is_apu &&
+ ((adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version.\n");
+ return ret;
+ }
+
+ /*
+ * Disable all enabled SMU features.
+ * This should be handled in SMU FW, as a backup
+ * driver can issue call to SMU FW until sequence
+ * in SMU FW is operational.
+ */
+ ret = smu_system_features_control(smu, false);
+ if (ret) {
+ pr_err("Failed to disable smu features.\n");
+ return ret;
+ }
+
+ /*
+ * Arcturus does not have BACO bit in disable feature mask.
+ * Enablement of BACO bit on Arcturus should be skipped.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ if (use_baco && (smu_version > 0x360e00))
+ return 0;
+ }
+
+ /* For baco, need to leave BACO feature enabled */
+ if (use_baco) {
+ /*
+ * Correct the way for checking whether SMU_FEATURE_BACO_BIT
+ * is supported.
+ *
+ * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
+ * always return false as the 'smu_system_features_control(smu, false)'
+ * was just issued above which disabled all SMU features.
+ *
+ * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
+ * now for the checking.
+ */
+ if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+ if (ret) {
+ pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int smu_suspend(void *handle)
{
- int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = false;
+ int ret;
- if (!smu->pm_enabled)
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if(!smu->is_apu)
- baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
-
- ret = smu_system_features_control(smu, false);
- if (ret)
- return ret;
+ if (!smu->pm_enabled)
+ return 0;
- if (baco_feature_is_enabled) {
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
- if (ret) {
- pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ if(!amdgpu_sriov_vf(adev)) {
+ ret = smu_disable_dpm(smu);
+ if (ret)
return ret;
- }
}
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
@@ -1675,12 +1744,12 @@ static int smu_enable_umd_pstate(void *handle,
if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
smu_dpm_ctx->enable_umd_pstate = true;
- amdgpu_device_ip_set_clockgating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1942,7 +2011,7 @@ int smu_set_mp1_state(struct smu_context *smu,
return 0;
}
- ret = smu_send_smc_msg(smu, msg);
+ ret = smu_send_smc_msg(smu, msg, NULL);
if (ret)
pr_err("[PrepareMp1] Failed!\n");
@@ -2018,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
return 0;
}
+int smu_set_ac_dc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ /* controlled by firmware */
+ if (smu->dc_controlled_by_gpio)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->set_power_source) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+ if (ret)
+ pr_err("Failed to switch to %s mode!\n",
+ smu->adev->pm.ac_power ? "AC" : "DC");
+ }
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
@@ -2620,12 +2712,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
return ret;
}
-
-int smu_send_smc_msg(struct smu_context *smu,
- enum smu_message_type msg)
-{
- int ret;
-
- ret = smu_send_smc_msg_with_param(smu, msg, 0);
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 14ba6aa876e2..1ef0923f7190 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -36,13 +35,14 @@
#include "arcturus_ppt.h"
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
+#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "amdgpu_xgmi.h"
#include <linux/i2c.h>
#include <linux/pci.h>
#include "amdgpu_ras.h"
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -127,6 +127,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(WaflTest, PPSMC_MSG_WaflTest),
MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -373,13 +374,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[%s] failed to get dpm levels!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[%s] number of clk levels is invalid!\n", __func__);
return -EINVAL;
@@ -389,12 +390,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[%s] failed to get dpm freq by index!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[%s] clk value is invalid!\n", __func__);
return -EINVAL;
@@ -552,13 +553,13 @@ static int arcturus_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
if (ret) {
pr_err("RunAfllBtc failed!\n");
return ret;
}
- return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -743,7 +744,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -758,7 +760,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -773,7 +776,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -790,8 +794,21 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
struct arcturus_dpm_table *dpm_table;
struct arcturus_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
+ uint32_t smu_version;
int ret = 0;
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1288,12 +1305,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1497,7 +1513,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
pr_err("Fail to set workload type %d\n", workload_type);
return ret;
@@ -1508,6 +1525,38 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return 0;
}
+static int arcturus_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return smu_v11_0_set_performance_level(smu, level);
+}
+
static void arcturus_dump_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -2187,7 +2236,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &arcturus_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
res = i2c_add_adapter(control);
if (res)
@@ -2207,6 +2256,18 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
i2c_del_adapter(control);
}
+static bool arcturus_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
+
static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
@@ -2214,6 +2275,27 @@ static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
+static int arcturus_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */
+ if (smu_version < 0x360F00) {
+ pr_err("DFCstateControl is only supported by PMFW 54.15.0 and onwards\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2248,7 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
- .set_performance_level = smu_v11_0_set_performance_level,
+ .set_performance_level = arcturus_set_performance_level,
/* debug (internal used) */
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
@@ -2277,7 +2359,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2298,7 +2379,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= arcturus_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
@@ -2307,6 +2388,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
+ .set_df_cstate = arcturus_set_df_cstate,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 77c14671866c..719597c5d27d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -984,6 +984,32 @@ static int init_thermal_controller(
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{
+ hwmgr->thermal_controller.ucType =
+ powerplay_table->sThermalController.ucType;
+ hwmgr->thermal_controller.ucI2cLine =
+ powerplay_table->sThermalController.ucI2cLine;
+ hwmgr->thermal_controller.ucI2cAddress =
+ powerplay_table->sThermalController.ucI2cAddress;
+
+ hwmgr->thermal_controller.fanInfo.bNoFan =
+ (0 != (powerplay_table->sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN));
+
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+ powerplay_table->sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+
+ hwmgr->thermal_controller.fanInfo.ulMinRPM
+ = powerplay_table->sThermalController.ucFanMinRPM * 100UL;
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM
+ = powerplay_table->sThermalController.ucFanMaxRPM * 100UL;
+
+ set_hw_cap(hwmgr,
+ ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+ PHM_PlatformCaps_ThermalController);
+
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index bf04cfefb283..4795eb66b2b2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
switch (sources) {
default:
pr_err("Unknown throttling event sources.");
- /* fall through */
+ fallthrough;
case 0:
protection = false;
/* src is unused */
@@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change(
data->force_pcie_gen = PP_PCIEGen2;
if (current_link_speed == PP_PCIEGen2)
break;
- /* fall through */
+ fallthrough;
case PP_PCIEGen2:
if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
break;
+ fallthrough;
#endif
- /* fall through */
default:
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
break;
@@ -3804,9 +3804,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
{
uint32_t i;
+ /* force the trim if mclk_switching is disabled to prevent flicker */
+ bool force_trim = (low_limit == high_limit);
for (i = 0; i < dpm_table->count; i++) {
/*skip the trim if od is enabled*/
- if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+ if ((!hwmgr->od_enabled || force_trim)
+ && (dpm_table->dpm_levels[i].value < low_limit
|| dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 92a65e3daff4..f29f95be1e56 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3382,7 +3382,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -3390,7 +3390,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3b3ec5666051..08b6ba39a6d7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -487,15 +487,16 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
+ bool use_baco = (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ (adev->in_runpm && amdgpu_asic_supports_baco(adev));
ret = vega20_init_sclk_threshold(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to init sclk threshold!",
return ret);
- if (adev->in_gpu_reset &&
- (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
-
+ if (use_baco) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
pr_err("Failed to apply vega20 baco workaround!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 97b6714e83e6..ae2c318dd6fa 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -362,6 +362,7 @@ struct smu_context
struct mutex mutex;
struct mutex sensor_lock;
struct mutex metrics_lock;
+ struct mutex message_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
@@ -371,6 +372,9 @@ struct smu_context
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
void *od_settings;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_sclk;
+#endif
uint32_t pstate_sclk;
uint32_t pstate_mclk;
@@ -404,6 +408,7 @@ struct smu_context
uint32_t smc_if_version;
bool uploading_custom_pp_table;
+ bool dc_controlled_by_gpio;
};
struct i2c_adapter;
@@ -514,8 +519,7 @@ struct pptable_funcs {
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg_with_param)(struct smu_context *smu,
- enum smu_message_type msg, uint32_t param);
- int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
@@ -567,6 +571,7 @@ struct pptable_funcs {
int (*override_pcie_parameters)(struct smu_context *smu);
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
+ int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
};
int smu_load_microcode(struct smu_context *smu);
@@ -707,7 +712,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool lock_needed);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -715,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
+int smu_set_ac_dc(struct smu_context *smu);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index e3291259b249..f736d773f9d6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -110,7 +110,11 @@
//Others
#define PPSMC_MSG_SetMemoryChannelEnable 0x39
-#define PPSMC_Message_Count 0x3A
+//OOB
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A
+
+#define PPSMC_MSG_DFCstateControl 0x3B
+#define PPSMC_Message_Count 0x3C
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index 822cd8b5bf90..cea65093b6ad 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -37,7 +37,7 @@
#define PP_ASSERT_WITH_CODE(cond, msg, code) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
code; \
} \
} while (0)
@@ -45,7 +45,7 @@
#define PP_ASSERT(cond, msg) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
} \
} while (0)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index ac0120e384be..4b2da98afcd2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -701,7 +701,8 @@ typedef struct {
// APCC Settings
uint16_t PccThresholdLow;
uint16_t PccThresholdHigh;
- uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+ uint32_t MGpuFanBoostLimitRpm;
+ uint32_t PaddingAPCC[5];
// Temperature Dependent Vmin
uint16_t VDDGFX_TVmin; //Celcius
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index d5314d12628a..674e426ed59b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -28,8 +28,9 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x33
-#define SMU11_DRIVER_IF_VERSION_NV14 0x34
+#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_NV12 0x33
+#define SMU11_DRIVER_IF_VERSION_NV14 0x36
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
-
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
@@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
int smu_v11_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index d79e54b5ebf6..7fbebc1979cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping {
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg);
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
-
int smu_v12_0_wait_for_response(struct smu_context *smu);
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v12_0_check_fw_status(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index aed4d6e60907..15030284b444 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include <linux/pci.h>
#include "amdgpu.h"
@@ -29,14 +28,15 @@
#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_navi10.h"
-#include "soc15_common.h"
#include "atom.h"
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
-#include "nbio/nbio_7_4_sh_mask.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -349,7 +349,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
| FEATURE_MASK(FEATURE_FW_CTF_BIT)
@@ -393,6 +392,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+ if (smu->dc_controlled_by_gpio)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
/* only for navi10 A0 */
@@ -527,6 +529,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
+ if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
mutex_lock(&smu_baco->mutex);
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
@@ -661,14 +666,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -686,14 +691,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret)
return ret;
}
@@ -970,7 +975,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return size;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return size;
break;
@@ -1042,7 +1047,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
int ret = 0;
uint32_t max_freq = 0;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret)
return ret;
@@ -1066,7 +1071,8 @@ static int navi10_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
if (ret)
return ret;
}
@@ -1093,7 +1099,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -1119,7 +1125,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -1391,7 +1397,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
if (workload_type < 0)
return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type, NULL);
return ret;
}
@@ -1456,7 +1462,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -1678,10 +1685,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu)
return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1746,10 +1753,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1859,12 +1866,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1904,7 +1910,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
if (ret)
return ret;
@@ -1950,13 +1957,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetVoltageByDpm,
- param);
+ param,
+ &value);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, &value);
*voltage = (uint16_t)value;
return 0;
@@ -1980,6 +1987,18 @@ static int navi10_setup_od_limits(struct smu_context *smu) {
return 0;
}
+static bool navi10_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
+
static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
OverDriveTable_t *od_table, *boot_od_table;
int ret = 0;
@@ -2213,7 +2232,7 @@ static int navi10_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
if (ret)
pr_err("RunBtc failed!\n");
@@ -2225,9 +2244,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
int result = 0;
if (!enable)
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
else
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
return result;
}
@@ -2336,7 +2355,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2357,7 +2375,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= navi10_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
@@ -2370,6 +2388,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_pptable_power_limit = navi10_get_pptable_power_limit,
.run_btc = navi10_run_btc,
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
+ .set_power_source = smu_v11_0_set_power_source,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 3ad0f4aa3aa3..b0ed1b3fe79a 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -24,7 +24,6 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
#include "smu_v12_0.h"
@@ -240,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+ bool cur_value_match_level = false;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -298,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
}
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+
return size;
}
@@ -342,14 +347,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -367,14 +372,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
@@ -423,7 +428,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -456,7 +461,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -622,22 +627,24 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq :
- soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq :
- soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
+ NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -645,10 +652,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -672,14 +679,19 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0) {
- pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ /*
+ * TODO: If some case need switch to powersave/default power mode
+ * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
+ */
+ pr_err_once("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
return -EINVAL;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
- pr_err("Fail to set workload type %d\n", workload_type);
+ pr_err_once("Fail to set workload type %d\n", workload_type);
return ret;
}
@@ -697,7 +709,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
@@ -705,7 +717,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -881,6 +893,22 @@ static int renoir_read_sensor(struct smu_context *smu,
return ret;
}
+static bool renoir_is_dpm_running(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /*
+ * Util now, the pmfw hasn't exported the interface of SMU
+ * feature mask to APU SKU so just force on all the feature
+ * at early initial stage.
+ */
+ if (adev->in_suspend)
+ return false;
+ else
+ return true;
+
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_clk_index = renoir_get_smu_clk_index,
@@ -910,7 +938,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.powergate_vcn = smu_v12_0_powergate_vcn,
.powergate_jpeg = smu_v12_0_powergate_jpeg,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control,
.init_smc_tables = smu_v12_0_init_smc_tables,
@@ -922,6 +949,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
.set_driver_table_location = smu_v12_0_set_driver_table_location,
+ .is_dpm_running = renoir_is_dpm_running,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq; \
break; \
case SMU_MCLK: \
- freq = table->MemClocks[dpm_level].Freq; \
+ freq = table->FClocks[dpm_level].Freq; \
break; \
case SMU_DCEFCLK: \
freq = table->DcfClocks[dpm_level].Freq; \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 7bd200ffcda8..40c35bcc5a0a 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -79,12 +79,13 @@
#define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
+#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0)
+
+static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) {
+ return smu_send_smc_msg_with_param(smu, msg, 0, read_arg);
+}
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
#define smu_alloc_dpm_context(smu) \
((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
#define smu_init_display_count(smu, count) \
@@ -210,4 +211,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
#define smu_disable_umc_cdr_12gbps_workaround(smu) \
((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+#define smu_set_power_source(smu, power_src) \
+ ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c9e5ce135fd4..655ba4fb05dc 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,7 +26,6 @@
#define SMU_11_0_PARTIAL_PPTABLE
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
@@ -43,8 +42,6 @@
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
#include "asic_reg/mp/mp_11_0_offset.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
-#include "asic_reg/nbio/nbio_7_4_offset.h"
-#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
@@ -64,7 +61,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -92,7 +89,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -101,11 +99,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v11_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -115,10 +114,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v11_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
smu_get_message_name(smu, msg), index, param, ret);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v11_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -262,6 +272,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
case CHIP_NAVI10:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
+ case CHIP_NAVI12:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ break;
case CHIP_NAVI14:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
@@ -671,12 +684,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
- address_high);
+ address_high,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
- address_low);
+ address_low,
+ NULL);
if (ret)
return ret;
@@ -685,15 +700,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
- address_high);
+ address_high, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
- address_low);
+ address_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
- (uint32_t)memory_pool->size);
+ (uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
@@ -757,7 +772,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
int ret;
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk);
+ SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
@@ -784,11 +799,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
@@ -802,11 +819,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
- upper_32_bits(tool_table->mc_address));
+ upper_32_bits(tool_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
- lower_32_bits(tool_table->mc_address));
+ lower_32_bits(tool_table->mc_address),
+ NULL);
}
return ret;
@@ -819,7 +838,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
if (!smu->pm_enabled)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret;
}
@@ -837,12 +856,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
- feature_mask[1]);
+ feature_mask[1], NULL);
if (ret)
goto failed;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
- feature_mask[0]);
+ feature_mask[0], NULL);
if (ret)
goto failed;
@@ -862,17 +881,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return -EINVAL;
if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -894,7 +907,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
+ SMU_MSG_DisableAllSmuFeatures), NULL);
if (ret)
return ret;
@@ -923,7 +936,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret;
}
@@ -947,30 +960,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
- if (ret)
- return ret;
-
if (*clock != 0)
return 0;
/* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
-
- return ret;
+ return 0;
}
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
@@ -1106,7 +1113,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
pr_err("[%s] Set power limit Failed!\n", __func__);
return ret;
@@ -1136,11 +1143,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (asic_clk_id << 16));
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &freq);
+ (asic_clk_id << 16), &freq);
if (ret)
return ret;
}
@@ -1375,9 +1378,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break;
default:
break;
@@ -1515,10 +1518,18 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ NULL);
return ret;
}
+static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
+}
+
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
@@ -1552,6 +1563,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
break;
}
+ } else if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == 0xfe)
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
}
return 0;
@@ -1591,6 +1605,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
if (ret)
return ret;
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ 0xfe,
+ irq_src);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -1628,21 +1648,19 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
+ ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
return ret;
}
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
- return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
- uint32_t val;
bool baco_support;
mutex_lock(&smu_baco->mutex);
@@ -1657,11 +1675,7 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
- val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
- if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- return true;
-
- return false;
+ return true;
}
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
@@ -1678,11 +1692,9 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
-
struct smu_baco_context *smu_baco = &smu->smu_baco;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint32_t bif_doorbell_intr_cntl;
uint32_t data;
int ret = 0;
@@ -1691,32 +1703,26 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
mutex_lock(&smu_baco->mutex);
- bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
-
if (state == SMU_BACO_STATE_ENTER) {
- bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
- BIF_DOORBELL_INT_CNTL,
- DOORBELL_INTERRUPT_DISABLE, 1);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
-
if (!ras || !ras->supported) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
}
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+ ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
goto out;
- bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
- BIF_DOORBELL_INT_CNTL,
- DOORBELL_INTERRUPT_DISABLE, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+ if (ras && ras->supported) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ if (ret)
+ goto out;
+ }
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
@@ -1777,19 +1783,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
param = (clk_id & 0xffff) << 16;
if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret)
goto failed;
}
@@ -1811,7 +1811,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -1819,7 +1819,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -1939,3 +1939,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
return ret;
}
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source,
+ NULL);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 518e6597bf2d..169ebdad87b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -50,7 +49,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -78,7 +77,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -87,11 +87,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v12_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -101,10 +102,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v12_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
index, ret, param);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v12_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -163,9 +175,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
}
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
@@ -174,9 +186,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL);
}
int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
@@ -185,9 +197,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
else
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
}
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
@@ -196,7 +208,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
return 0;
return smu_v12_0_send_msg_with_param(smu,
- SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
+ SMU_MSG_SetGfxCGPG,
+ enable ? 1 : 0,
+ NULL);
}
int smu_v12_0_read_sensor(struct smu_context *smu,
@@ -262,10 +276,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0, timeout = 500;
if (enable) {
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
/* confirm gfx is back to "on" state, timeout is 0.5 second */
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
@@ -331,17 +345,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu,
if (!feature_mask || num < 2)
return -EINVAL;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -388,14 +396,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
if (ret) {
pr_err("Attempt to get max GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, max);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -419,14 +424,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
if (ret) {
pr_err("Attempt to get min GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, min);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -450,7 +452,7 @@ failed:
}
int smu_v12_0_mode2_reset(struct smu_context *smu){
- return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
}
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -461,39 +463,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
if (ret)
return ret;
break;
case SMU_FCLK:
case SMU_MCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_VCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
if (ret)
return ret;
break;
@@ -512,11 +514,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 49e5ef3e3876..16aa171971d3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -33,6 +33,8 @@
#include "smu7_smumgr.h"
#include "vega20_hwmgr.h"
+#include "smu_v11_0_i2c.h"
+
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
struct vega20_smumgr *priv;
unsigned long tools_size = 0x19000;
int ret = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
struct cgs_firmware_info info = {0};
@@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
+ ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
+
return 0;
err4:
@@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
@@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 4ad8d6c14ee5..3f1044326dcb 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -36,6 +35,7 @@
#include "vega20_ppt.h"
#include "vega20_pptable.h"
#include "vega20_ppsmc.h"
+#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -587,7 +587,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu)
static int vega20_run_btc_afll(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
}
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -670,13 +670,13 @@ vega20_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
return -EINVAL;
@@ -687,12 +687,12 @@ vega20_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[GetDpmFreqByIndex] clk value is invalid!");
return -EINVAL;
@@ -1200,7 +1200,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -1215,7 +1216,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -1230,7 +1232,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -1245,7 +1248,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_FCLK << 16) | (freq & 0xffff));
+ (PPCLK_FCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s fclk !\n",
max ? "max" : "min");
@@ -1260,7 +1264,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
if (!max) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+ (PPCLK_DCEFCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set hard min dcefclk !\n");
return ret;
@@ -1421,7 +1426,9 @@ static int vega20_force_clk_levels(struct smu_context *smu,
}
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ SMU_MSG_SetMinLinkDpmByIndex,
+ soft_min_level,
+ NULL);
if (ret)
pr_err("Failed to set min link dpm level!\n");
@@ -1477,13 +1484,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetAVFSVoltageByDpm,
- ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+ voltage);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, voltage);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1956,8 +1963,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type,
+ NULL);
return ret;
}
@@ -2029,7 +2038,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2047,7 +2057,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu)
if (!smu->smu_dpm.dpm_context)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(smu,
&dpm_table->mem_table);
if (ret)
@@ -2074,7 +2084,8 @@ static int vega20_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_send_smc_msg_with_param(smu,
SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
}
return ret;
@@ -2247,7 +2258,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -2262,7 +2274,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2853,8 +2866,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
- (uint32_t)pptable->FanTargetTemperature);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetFanTemperatureTarget,
+ (uint32_t)pptable->FanTargetTemperature,
+ NULL);
return ret;
}
@@ -2864,15 +2879,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu,
{
int ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed);
if (ret) {
pr_err("Attempt to get current RPM from SMC Failed!\n");
return ret;
}
- smu_read_smc_arg(smu, speed);
-
return 0;
}
@@ -3137,7 +3150,7 @@ static int vega20_set_df_cstate(struct smu_context *smu,
return -EINVAL;
}
- return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
static int vega20_update_pcie_parameters(struct smu_context *smu,
@@ -3155,12 +3168,24 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
}
return ret;
}
+static bool vega20_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
@@ -3229,7 +3254,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -3250,7 +3274,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= vega20_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 8ae1e1f97a73..be7c29cec318 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -9,7 +9,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
@@ -138,24 +137,9 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
~ARCPGU_CTRL_ENABLE_MASK);
}
-static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
- }
-}
-
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
.mode_valid = arc_pgu_crtc_mode_valid,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
- .atomic_begin = arc_pgu_crtc_atomic_begin,
.atomic_enable = arc_pgu_crtc_atomic_enable,
.atomic_disable = arc_pgu_crtc_atomic_disable,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 8fd7094beece..52839934f2fb 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -40,7 +40,7 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
return ret;
/* Link drm_bridge to encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ac8a78bfda03..f2dc371bd8e5 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -129,18 +129,12 @@ int armada_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fbh, 1);
+ ret = drm_fb_helper_init(dev, fbh);
if (ret) {
DRM_ERROR("failed to initialize drm fb helper\n");
goto err_fb_helper;
}
- ret = drm_fb_helper_single_add_all_connectors(fbh);
- if (ret) {
- DRM_ERROR("failed to add fb connectors\n");
- goto err_fb_setup;
- }
-
ret = drm_fb_helper_initial_config(fbh, 32);
if (ret) {
DRM_ERROR("failed to set initial config\n");
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index f5d8780776ae..656d591b154b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -121,6 +121,7 @@ struct ast_private {
unsigned int next_index;
} cursor;
+ struct drm_encoder encoder;
struct drm_plane primary_plane;
struct drm_plane cursor_plane;
@@ -238,13 +239,8 @@ struct ast_crtc {
u8 offset_x, offset_y;
};
-struct ast_encoder {
- struct drm_encoder base;
-};
-
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
#define to_ast_connector(x) container_of(x, struct ast_connector, base)
-#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
struct ast_vbios_stdtable {
u8 misc;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b79f484e9bd2..18a0a4ce00f6 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -388,31 +388,9 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
-
- struct ast_private *ast = dev->dev_private;
- unsigned long fbsize, fbpages, max_fbpages;
-
- /* To support double buffering, a framebuffer may not
- * consume more than half of the available VRAM.
- */
- max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
- return MODE_MEM;
-
- return MODE_OK;
-}
-
static const struct drm_mode_config_funcs ast_mode_funcs = {
.fb_create = drm_gem_fb_create,
- .mode_valid = ast_mode_config_mode_valid,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 34608f0499eb..cdd6c46d6557 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -40,6 +40,7 @@
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
@@ -833,8 +834,6 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct ast_vbios_mode_info *vbios_mode_info;
struct drm_display_mode *adjusted_mode;
- crtc->state->no_vblank = true;
-
ast_state = to_ast_crtc_state(crtc->state);
format = ast_state->format;
@@ -959,28 +958,18 @@ err_kfree:
* Encoder
*/
-static void ast_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_funcs ast_enc_funcs = {
- .destroy = ast_encoder_destroy,
-};
-
static int ast_encoder_init(struct drm_device *dev)
{
- struct ast_encoder *ast_encoder;
+ struct ast_private *ast = dev->dev_private;
+ struct drm_encoder *encoder = &ast->encoder;
+ int ret;
- ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
- if (!ast_encoder)
- return -ENOMEM;
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ if (ret)
+ return ret;
- drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ encoder->possible_crtcs = 1;
- ast_encoder->base.possible_crtcs = 1;
return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 121b62682d80..e2019fe97fff 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -114,7 +114,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
}
if (bridge) {
- ret = drm_bridge_attach(&output->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&output->encoder, bridge, NULL, 0);
if (!ret)
return 0;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 10460878414e..addb0568c1af 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -23,7 +23,6 @@ static void bochs_unload(struct drm_device *dev)
bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
- bochs_hw_fini(dev);
kfree(bochs);
dev->dev_private = NULL;
}
@@ -69,6 +68,7 @@ static struct drm_driver bochs_driver = {
.major = 1,
.minor = 0,
DRM_GEM_VRAM_DRIVER,
+ .release = bochs_unload,
};
/* ---------------------------------------------------------------------- */
@@ -148,9 +148,9 @@ static void bochs_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- drm_dev_unregister(dev);
- bochs_unload(dev);
+ bochs_hw_fini(dev);
drm_dev_put(dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index a4fc4e6aee39..dce4672e3fc8 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -4,6 +4,7 @@
#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include "bochs.h"
@@ -192,6 +193,8 @@ void bochs_hw_fini(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
+ /* TODO: shot down existing vram mappings */
+
if (bochs->mmio)
iounmap(bochs->mmio);
if (bochs->ioports)
@@ -205,6 +208,11 @@ void bochs_hw_fini(struct drm_device *dev)
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
bochs->xres = mode->hdisplay;
bochs->yres = mode->vdisplay;
bochs->bpp = 32;
@@ -230,11 +238,18 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+
+ drm_dev_exit(idx);
}
void bochs_hw_setformat(struct bochs_device *bochs,
const struct drm_format_info *format)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
(format->format >> 0) & 0xff,
(format->format >> 8) & 0xff,
@@ -254,13 +269,18 @@ void bochs_hw_setformat(struct bochs_device *bochs,
__func__, format->format);
break;
}
+
+ drm_dev_exit(idx);
}
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, int stride, u64 addr)
{
unsigned long offset;
- unsigned int vx, vy, vwidth;
+ unsigned int vx, vy, vwidth, idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
bochs->stride = stride;
offset = (unsigned long)addr +
@@ -275,4 +295,6 @@ void bochs_hw_setbase(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+
+ drm_dev_exit(idx);
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 3f0006c2470d..8066d7d370d5 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "bochs.h"
@@ -57,16 +56,8 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct bochs_device *bochs = pipe->crtc.dev->dev_private;
- struct drm_crtc *crtc = &pipe->crtc;
bochs_plane_update(bochs, pipe->plane.state);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
@@ -92,32 +83,11 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
return count;
}
-static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct bochs_device *bochs =
- container_of(connector, struct bochs_device, connector);
- unsigned long size = mode->hdisplay * mode->vdisplay * 4;
-
- /*
- * Make sure we can fit two framebuffers into video memory.
- * This allows up to 1600x1200 with 16 MB (default size).
- * If you want more try this:
- * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
- */
- if (size * 2 > bochs->fb_size)
- return MODE_BAD;
-
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
- .mode_valid = bochs_connector_mode_valid,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -157,6 +127,7 @@ bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_config_funcs bochs_mode_funcs = {
.fb_create = bochs_gem_fb_create,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -192,6 +163,9 @@ int bochs_kms_init(struct bochs_device *bochs)
void bochs_kms_fini(struct bochs_device *bochs)
{
+ if (!bochs->dev->mode_config.num_connector)
+ return;
+
drm_atomic_helper_shutdown(bochs->dev);
drm_mode_config_cleanup(bochs->dev);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 0b9ca5862455..aaed2347ace9 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,13 +27,16 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
-config DRM_DUMB_VGA_DAC
- tristate "Dumb VGA DAC Bridge support"
+config DRM_DISPLAY_CONNECTOR
+ tristate "Display connector support"
depends on OF
- select DRM_KMS_HELPER
help
- Support for non-programmable RGB to VGA DAC bridges, such as ADI
- ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
+ Driver for display connectors with support for DDC and hot-plug
+ detection. Most display controller handle display connectors
+ internally and don't need this driver, but the DRM subsystem is
+ moving towards separating connector handling from display controllers
+ on ARM-based platforms. Saying Y here when this driver is not needed
+ will not cause any issue.
config DRM_LVDS_CODEC
tristate "Transparent LVDS encoders and decoders support"
@@ -72,6 +75,17 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_PARADE_PS8640
+ tristate "Parade PS8640 MIPI DSI to eDP Converter"
+ depends on OF
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ help
+ Choose this option if you have PS8640 for display
+ The PS8640 is a high-performance and low-power
+ MIPI DSI to eDP converter
+
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
@@ -87,6 +101,7 @@ config DRM_SII902X
select DRM_KMS_HELPER
select REGMAP_I2C
select I2C_MUX
+ select SND_SOC_HDMI_CODEC if SND_SOC
---help---
Silicon Image sii902x bridge chip driver.
@@ -98,6 +113,14 @@ config DRM_SII9234
It is an I2C driver, that detects connection of MHL bridge
and starts encapsulation of HDMI signal.
+config DRM_SIMPLE_BRIDGE
+ tristate "Simple DRM bridge support"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Support for non-programmable DRM bridges, such as ADI ADV7123, TI
+ THS8134 and THS8135 or passive resistor ladder DACs.
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -122,6 +145,16 @@ config DRM_TOSHIBA_TC358767
---help---
Toshiba TC358767 eDP bridge chip driver.
+config DRM_TOSHIBA_TC358768
+ tristate "Toshiba TC358768 MIPI DSI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
@@ -139,6 +172,14 @@ config DRM_TI_SN65DSI86
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver
+config DRM_TI_TPD12S015
+ tristate "TI TPD12S015 HDMI level shifter and ESD protection"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Texas Instruments TPD12S015 HDMI level shifter and ESD protection
+ driver.
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cd16ce830270..6fb062b5b0f0 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,19 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
-obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
+obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
+obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 8a56ff81f4fb..47d4eb9e845d 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -4,8 +4,9 @@ config DRM_I2C_ADV7511
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
+ select DRM_MIPI_DSI
help
- Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+ Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
config DRM_I2C_ADV7511_AUDIO
bool "ADV7511 HDMI Audio driver"
@@ -15,16 +16,8 @@ config DRM_I2C_ADV7511_AUDIO
Support the ADV7511 HDMI Audio interface. This is used in
conjunction with the AV7511 HDMI driver.
-config DRM_I2C_ADV7533
- bool "ADV7533 encoder"
- depends on DRM_I2C_ADV7511
- select DRM_MIPI_DSI
- default y
- help
- Support for the Analog Devices ADV7533 DSI to HDMI encoder.
-
config DRM_I2C_ADV7511_CEC
- bool "ADV7511/33 HDMI CEC driver"
+ bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
select CEC_CORE
default y
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index b46ebeb35fd4..d8ceb534b51f 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-adv7511-y := adv7511_drv.o
+adv7511-y := adv7511_drv.o adv7533.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
-adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 52b2adfdc877..a9bb734366ae 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -320,6 +320,7 @@ struct adv7511_video_config {
enum adv7511_type {
ADV7511,
ADV7533,
+ ADV7535,
};
#define ADV7511_MAX_ADDRS 3
@@ -393,7 +394,6 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
}
#endif
-#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
@@ -402,44 +402,6 @@ int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
-#else
-static inline void adv7533_dsi_power_on(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_dsi_power_off(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_mode_set(struct adv7511 *adv,
- const struct drm_display_mode *mode)
-{
-}
-
-static inline int adv7533_patch_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_attach_dsi(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline void adv7533_detach_dsi(struct adv7511 *adv)
-{
-}
-
-static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
-{
- return -ENODEV;
-}
-#endif
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 9e13e466e72c..87b58c1acff4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -367,7 +367,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_on(adv7511);
adv7511->powered = true;
}
@@ -387,7 +387,7 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
static void adv7511_power_off(struct adv7511 *adv7511)
{
__adv7511_power_off(adv7511);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_off(adv7511);
adv7511->powered = false;
}
@@ -761,7 +761,7 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_mode_set(adv7511, adj_mode);
drm_mode_copy(&adv7511->curr_mode, adj_mode);
@@ -847,11 +847,17 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
adv7511_mode_set(adv, mode, adj_mode);
}
-static int adv7511_bridge_attach(struct drm_bridge *bridge)
+static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -874,7 +880,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
&adv7511_connector_helper_funcs);
drm_connector_attach_encoder(&adv->connector, bridge->encoder);
- if (adv->type == ADV7533)
+ if (adv->type == ADV7533 || adv->type == ADV7535)
ret = adv7533_attach_dsi(adv);
if (adv->i2c_main->irq)
@@ -952,7 +958,7 @@ static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
struct i2c_client *i2c = to_i2c_client(dev);
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
reg -= ADV7533_REG_CEC_OFFSET;
switch (reg) {
@@ -994,7 +1000,7 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
goto err;
}
- if (adv->type == ADV7533) {
+ if (adv->type == ADV7533 || adv->type == ADV7535) {
ret = adv7533_patch_cec_registers(adv);
if (ret)
goto err;
@@ -1242,7 +1248,7 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
@@ -1266,9 +1272,8 @@ static const struct i2c_device_id adv7511_i2c_ids[] = {
{ "adv7511", ADV7511 },
{ "adv7511w", ADV7511 },
{ "adv7513", ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ "adv7533", ADV7533 },
-#endif
+ { "adv7535", ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
@@ -1277,9 +1282,8 @@ static const struct of_device_id adv7511_of_ids[] = {
{ .compatible = "adi,adv7511", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7513", .data = (void *)ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ .compatible = "adi,adv7533", .data = (void *)ADV7533 },
-#endif
+ { .compatible = "adi,adv7535", .data = (void *)ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(of, adv7511_of_ids);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 2dfa2fd2a23b..9af39ec958db 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -485,6 +485,9 @@ static int anx6345_get_modes(struct drm_connector *connector)
num_modes += drm_add_edid_modes(connector, anx6345->edid);
+ /* Driver currently supports only 6bpc */
+ connector->display_info.bpc = 6;
+
unlock:
if (power_off)
anx6345_poweroff(anx6345);
@@ -519,11 +522,17 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx6345_bridge_attach(struct drm_bridge *bridge)
+static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -711,16 +720,20 @@ static int anx6345_i2c_probe(struct i2c_client *client,
DRM_DEBUG("No panel found\n");
/* 1.2V digital core power regulator */
- anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+ anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12");
if (IS_ERR(anx6345->dvdd12)) {
- DRM_ERROR("dvdd12-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd12) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd12 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd12));
return PTR_ERR(anx6345->dvdd12);
}
/* 2.5V digital core power regulator */
- anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+ anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25");
if (IS_ERR(anx6345->dvdd25)) {
- DRM_ERROR("dvdd25-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd25) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd25 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd25));
return PTR_ERR(anx6345->dvdd25);
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 41867be03751..0d5a5ad0c9ee 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -722,10 +722,9 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+ SP_DP_MAIN_LINK_BW_SET_REG,
+ anx78xx->dpcd[DP_MAX_LINK_RATE]);
if (err)
return err;
@@ -887,11 +886,17 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6effe532f820..76736fb8ed94 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1216,13 +1216,19 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
+static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -1289,19 +1295,21 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
return conn_state->crtc;
}
-static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Don't touch the panel if we're coming back from PSR */
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
@@ -1366,20 +1374,22 @@ out_dp_clk_pre:
return ret;
}
-static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Not a full enable, just disable PSR and continue */
if (old_crtc_state && old_crtc_state->self_refresh_active) {
ret = analogix_dp_disable_psr(dp);
@@ -1440,18 +1450,20 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state)
goto out;
@@ -1463,20 +1475,21 @@ out:
analogix_dp_bridge_disable(bridge);
}
-static
-void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state || !new_crtc_state->self_refresh_active)
return;
@@ -1563,6 +1576,9 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable,
.atomic_enable = analogix_dp_bridge_atomic_enable,
.atomic_disable = analogix_dp_bridge_atomic_disable,
@@ -1588,7 +1604,7 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev,
bridge->driver_private = dp;
bridge->funcs = &analogix_dp_bridge_funcs;
- ret = drm_bridge_attach(dp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach drm bridge\n");
return -EINVAL;
@@ -1636,8 +1652,7 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
}
struct analogix_dp_device *
-analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
- struct analogix_dp_plat_data *plat_data)
+analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
struct platform_device *pdev = to_platform_device(dev);
struct analogix_dp_device *dp;
@@ -1740,22 +1755,30 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_disable_pm_runtime;
+ return ERR_PTR(ret);
}
disable_irq(dp->irq);
+ return dp;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_probe);
+
+int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
+{
+ int ret;
+
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
dp->aux.name = "DP-AUX";
dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = &pdev->dev;
+ dp->aux.dev = dp->dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- pm_runtime_enable(dev);
+ pm_runtime_enable(dp->dev);
ret = analogix_dp_create_bridge(drm_dev, dp);
if (ret) {
@@ -1763,13 +1786,12 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
goto err_disable_pm_runtime;
}
- return dp;
+ return 0;
err_disable_pm_runtime:
+ pm_runtime_disable(dp->dev);
- pm_runtime_disable(dev);
-
- return ERR_PTR(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(analogix_dp_bind);
@@ -1786,10 +1808,15 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
drm_dp_aux_unregister(&dp->aux);
pm_runtime_disable(dp->dev);
- clk_disable_unprepare(dp->clock);
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
+void analogix_dp_remove(struct analogix_dp_device *dp)
+{
+ clk_disable_unprepare(dp->clock);
+}
+EXPORT_SYMBOL_GPL(analogix_dp_remove);
+
#ifdef CONFIG_PM
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index b7c97f060241..69c3892caee5 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -644,7 +644,8 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
return 0;
}
-static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
+static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -656,7 +657,8 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ flags);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
new file mode 100644
index 000000000000..4d278573cdb9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+
+struct display_connector {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+};
+
+static inline struct display_connector *
+to_display_connector(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct display_connector, bridge);
+}
+
+static int display_connector_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static enum drm_connector_status
+display_connector_detect(struct drm_bridge *bridge)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ if (conn->hpd_gpio) {
+ if (gpiod_get_value_cansleep(conn->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ if (conn->bridge.ddc && drm_probe_ddc(conn->bridge.ddc))
+ return connector_status_connected;
+
+ switch (conn->bridge.type) {
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ /*
+ * For DVI and HDMI connectors a DDC probe failure indicates
+ * that no cable is connected.
+ */
+ return connector_status_disconnected;
+
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_VGA:
+ default:
+ /*
+ * Composite and S-Video connectors have no other detection
+ * mean than the HPD GPIO. For VGA connectors, even if we have
+ * an I2C bus, we can't assume that the cable is disconnected
+ * if drm_probe_ddc fails, as some cables don't wire the DDC
+ * pins.
+ */
+ return connector_status_unknown;
+ }
+}
+
+static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ return drm_get_edid(connector, conn->bridge.ddc);
+}
+
+static const struct drm_bridge_funcs display_connector_bridge_funcs = {
+ .attach = display_connector_attach,
+ .detect = display_connector_detect,
+ .get_edid = display_connector_get_edid,
+};
+
+static irqreturn_t display_connector_hpd_irq(int irq, void *arg)
+{
+ struct display_connector *conn = arg;
+ struct drm_bridge *bridge = &conn->bridge;
+
+ drm_bridge_hpd_notify(bridge, display_connector_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int display_connector_probe(struct platform_device *pdev)
+{
+ struct display_connector *conn;
+ unsigned int type;
+ const char *label;
+ int ret;
+
+ conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, conn);
+
+ type = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ /* Get the exact connector type. */
+ switch (type) {
+ case DRM_MODE_CONNECTOR_DVII: {
+ bool analog, digital;
+
+ analog = of_property_read_bool(pdev->dev.of_node, "analog");
+ digital = of_property_read_bool(pdev->dev.of_node, "digital");
+ if (analog && !digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVIA;
+ } else if (!analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVID;
+ } else if (analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVII;
+ } else {
+ dev_err(&pdev->dev, "DVI connector with no type\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case DRM_MODE_CONNECTOR_HDMIA: {
+ const char *hdmi_type;
+
+ ret = of_property_read_string(pdev->dev.of_node, "type",
+ &hdmi_type);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "HDMI connector with no type\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(hdmi_type, "a") || !strcmp(hdmi_type, "c") ||
+ !strcmp(hdmi_type, "d") || !strcmp(hdmi_type, "e")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ } else if (!strcmp(hdmi_type, "b")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIB;
+ } else {
+ dev_err(&pdev->dev,
+ "Unsupported HDMI connector type '%s'\n",
+ hdmi_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+
+ default:
+ conn->bridge.type = type;
+ break;
+ }
+
+ /* All the supported connector types support interlaced modes. */
+ conn->bridge.interlace_allowed = true;
+
+ /* Get the optional connector label. */
+ of_property_read_string(pdev->dev.of_node, "label", &label);
+
+ /*
+ * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * edge interrupts, register an interrupt handler.
+ */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA) {
+ conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
+ GPIOD_IN);
+ if (IS_ERR(conn->hpd_gpio)) {
+ if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to retrieve HPD GPIO\n");
+ return PTR_ERR(conn->hpd_gpio);
+ }
+
+ conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio);
+ } else {
+ conn->hpd_irq = -EINVAL;
+ }
+
+ if (conn->hpd_irq >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, conn->hpd_irq,
+ NULL, display_connector_hpd_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "HPD", conn);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "Failed to request HPD edge interrupt, falling back to polling\n");
+ conn->hpd_irq = -EINVAL;
+ }
+ }
+
+ /* Retrieve the DDC I2C adapter for DVI, HDMI and VGA connectors. */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_VGA) {
+ struct device_node *phandle;
+
+ phandle = of_parse_phandle(pdev->dev.of_node, "ddc-i2c-bus", 0);
+ if (phandle) {
+ conn->bridge.ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!conn->bridge.ddc)
+ return -EPROBE_DEFER;
+ } else {
+ dev_dbg(&pdev->dev,
+ "No I2C bus specified, disabling EDID readout\n");
+ }
+ }
+
+ conn->bridge.funcs = &display_connector_bridge_funcs;
+ conn->bridge.of_node = pdev->dev.of_node;
+
+ if (conn->bridge.ddc)
+ conn->bridge.ops |= DRM_BRIDGE_OP_EDID
+ | DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_gpio)
+ conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_irq >= 0)
+ conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
+ dev_dbg(&pdev->dev,
+ "Found %s display connector '%s' %s DDC bus and %s HPD GPIO (ops 0x%x)\n",
+ drm_get_connector_type_name(conn->bridge.type),
+ label ? label : "<unlabelled>",
+ conn->bridge.ddc ? "with" : "without",
+ conn->hpd_gpio ? "with" : "without",
+ conn->bridge.ops);
+
+ drm_bridge_add(&conn->bridge);
+
+ return 0;
+}
+
+static int display_connector_remove(struct platform_device *pdev)
+{
+ struct display_connector *conn = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&conn->bridge);
+
+ if (!IS_ERR(conn->bridge.ddc))
+ i2c_put_adapter(conn->bridge.ddc);
+
+ return 0;
+}
+
+static const struct of_device_id display_connector_match[] = {
+ {
+ .compatible = "composite-video-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_Composite,
+ }, {
+ .compatible = "dvi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DVII,
+ }, {
+ .compatible = "hdmi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_HDMIA,
+ }, {
+ .compatible = "svideo-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_SVIDEO,
+ }, {
+ .compatible = "vga-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_VGA,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, display_connector_match);
+
+static struct platform_driver display_connector_driver = {
+ .probe = display_connector_probe,
+ .remove = display_connector_remove,
+ .driver = {
+ .name = "display-connector",
+ .of_match_table = display_connector_match,
+ },
+};
+module_platform_driver(display_connector_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Display connector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
deleted file mode 100644
index cc33dc411b9e..000000000000
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015-2016 Free Electrons
- * Copyright (C) 2015-2016 NextThing Co
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- */
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-
-struct dumb_vga {
- struct drm_bridge bridge;
- struct drm_connector connector;
-
- struct i2c_adapter *ddc;
- struct regulator *vdd;
-};
-
-static inline struct dumb_vga *
-drm_bridge_to_dumb_vga(struct drm_bridge *bridge)
-{
- return container_of(bridge, struct dumb_vga, bridge);
-}
-
-static inline struct dumb_vga *
-drm_connector_to_dumb_vga(struct drm_connector *connector)
-{
- return container_of(connector, struct dumb_vga, connector);
-}
-
-static int dumb_vga_get_modes(struct drm_connector *connector)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
- struct edid *edid;
- int ret;
-
- if (!vga->ddc)
- goto fallback;
-
- edid = drm_get_edid(connector, vga->ddc);
- if (!edid) {
- DRM_INFO("EDID readout failed, falling back to standard modes\n");
- goto fallback;
- }
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- return ret;
-
-fallback:
- /*
- * In case we cannot retrieve the EDIDs (broken or missing i2c
- * bus), fallback on the XGA standards
- */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anyone can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
-}
-
-static const struct drm_connector_helper_funcs dumb_vga_con_helper_funcs = {
- .get_modes = dumb_vga_get_modes,
-};
-
-static enum drm_connector_status
-dumb_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
-
- /*
- * Even if we have an I2C bus, we can't assume that the cable
- * is disconnected if drm_probe_ddc fails. Some cables don't
- * wire the DDC pins, or the I2C bus might not be working at
- * all.
- */
- if (vga->ddc && drm_probe_ddc(vga->ddc))
- return connector_status_connected;
-
- return connector_status_unknown;
-}
-
-static const struct drm_connector_funcs dumb_vga_con_funcs = {
- .detect = dumb_vga_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int dumb_vga_attach(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret;
-
- if (!bridge->encoder) {
- DRM_ERROR("Missing encoder\n");
- return -ENODEV;
- }
-
- drm_connector_helper_add(&vga->connector,
- &dumb_vga_con_helper_funcs);
- ret = drm_connector_init_with_ddc(bridge->dev, &vga->connector,
- &dumb_vga_con_funcs,
- DRM_MODE_CONNECTOR_VGA,
- vga->ddc);
- if (ret) {
- DRM_ERROR("Failed to initialize connector\n");
- return ret;
- }
-
- drm_connector_attach_encoder(&vga->connector,
- bridge->encoder);
-
- return 0;
-}
-
-static void dumb_vga_enable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret = 0;
-
- if (vga->vdd)
- ret = regulator_enable(vga->vdd);
-
- if (ret)
- DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
-}
-
-static void dumb_vga_disable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
-
- if (vga->vdd)
- regulator_disable(vga->vdd);
-}
-
-static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
- .attach = dumb_vga_attach,
- .enable = dumb_vga_enable,
- .disable = dumb_vga_disable,
-};
-
-static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
-{
- struct device_node *phandle, *remote;
- struct i2c_adapter *ddc;
-
- remote = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!remote)
- return ERR_PTR(-EINVAL);
-
- phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- of_node_put(remote);
- if (!phandle)
- return ERR_PTR(-ENODEV);
-
- ddc = of_get_i2c_adapter_by_node(phandle);
- of_node_put(phandle);
- if (!ddc)
- return ERR_PTR(-EPROBE_DEFER);
-
- return ddc;
-}
-
-static int dumb_vga_probe(struct platform_device *pdev)
-{
- struct dumb_vga *vga;
-
- vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
- if (!vga)
- return -ENOMEM;
- platform_set_drvdata(pdev, vga);
-
- vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
- if (IS_ERR(vga->vdd)) {
- int ret = PTR_ERR(vga->vdd);
- if (ret == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- vga->vdd = NULL;
- dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
- }
-
- vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
- if (IS_ERR(vga->ddc)) {
- if (PTR_ERR(vga->ddc) == -ENODEV) {
- dev_dbg(&pdev->dev,
- "No i2c bus specified. Disabling EDID readout\n");
- vga->ddc = NULL;
- } else {
- dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
- return PTR_ERR(vga->ddc);
- }
- }
-
- vga->bridge.funcs = &dumb_vga_bridge_funcs;
- vga->bridge.of_node = pdev->dev.of_node;
- vga->bridge.timings = of_device_get_match_data(&pdev->dev);
-
- drm_bridge_add(&vga->bridge);
-
- return 0;
-}
-
-static int dumb_vga_remove(struct platform_device *pdev)
-{
- struct dumb_vga *vga = platform_get_drvdata(pdev);
-
- drm_bridge_remove(&vga->bridge);
-
- if (vga->ddc)
- i2c_put_adapter(vga->ddc);
-
- return 0;
-}
-
-/*
- * We assume the ADV7123 DAC is the "default" for historical reasons
- * Information taken from the ADV7123 datasheet, revision D.
- * NOTE: the ADV7123EP seems to have other timings and need a new timings
- * set if used.
- */
-static const struct drm_bridge_timings default_dac_timings = {
- /* Timing specifications, datasheet page 7 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- .setup_time_ps = 500,
- .hold_time_ps = 1500,
-};
-
-/*
- * Information taken from the THS8134, THS8134A, THS8134B datasheet named
- * "SLVS205D", dated May 1990, revised March 2000.
- */
-static const struct drm_bridge_timings ti_ths8134_dac_timings = {
- /* From timing diagram, datasheet page 9 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 12 */
- .setup_time_ps = 3000,
- /* I guess this means latched input */
- .hold_time_ps = 0,
-};
-
-/*
- * Information taken from the THS8135 datasheet named "SLAS343B", dated
- * May 2001, revised April 2013.
- */
-static const struct drm_bridge_timings ti_ths8135_dac_timings = {
- /* From timing diagram, datasheet page 14 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 16 */
- .setup_time_ps = 2000,
- .hold_time_ps = 500,
-};
-
-static const struct of_device_id dumb_vga_match[] = {
- {
- .compatible = "dumb-vga-dac",
- .data = NULL,
- },
- {
- .compatible = "adi,adv7123",
- .data = &default_dac_timings,
- },
- {
- .compatible = "ti,ths8135",
- .data = &ti_ths8135_dac_timings,
- },
- {
- .compatible = "ti,ths8134",
- .data = &ti_ths8134_dac_timings,
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, dumb_vga_match);
-
-static struct platform_driver dumb_vga_driver = {
- .probe = dumb_vga_probe,
- .remove = dumb_vga_remove,
- .driver = {
- .name = "dumb-vga-dac",
- .of_match_table = dumb_vga_match,
- },
-};
-module_platform_driver(dumb_vga_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("Dumb VGA DAC bridge driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 5f04cc11227e..24fb1befdfa2 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -21,19 +21,23 @@ struct lvds_codec {
u32 connector_type;
};
-static int lvds_codec_attach(struct drm_bridge *bridge)
+static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ return container_of(bridge, struct lvds_codec, bridge);
+}
+
+static int lvds_codec_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
- bridge);
+ bridge, flags);
}
static void lvds_codec_enable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
@@ -41,14 +45,13 @@ static void lvds_codec_enable(struct drm_bridge *bridge)
static void lvds_codec_disable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
}
-static struct drm_bridge_funcs funcs = {
+static const struct drm_bridge_funcs funcs = {
.attach = lvds_codec_attach,
.enable = lvds_codec_enable,
.disable = lvds_codec_disable,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index e8a49f6146c6..6200f12a37e6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -206,13 +206,19 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
+static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct drm_connector *connector = &ge_b850v3_lvds_ptr->connector;
struct i2c_client *stdp4028_i2c
= ge_b850v3_lvds_ptr->stdp4028_i2c;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 57ff01339559..438e566ce0a4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -236,11 +236,17 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index f66777e24968..8461ee8304ba 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -53,12 +53,16 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int panel_bridge_attach(struct drm_bridge *bridge)
+static int panel_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
return -ENODEV;
@@ -120,6 +124,14 @@ static void panel_bridge_post_disable(struct drm_bridge *bridge)
drm_panel_unprepare(panel_bridge->panel);
}
+static int panel_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ return drm_panel_get_modes(panel_bridge->panel, connector);
+}
+
static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.attach = panel_bridge_attach,
.detach = panel_bridge_detach,
@@ -127,6 +139,11 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.enable = panel_bridge_enable,
.disable = panel_bridge_disable,
.post_disable = panel_bridge_post_disable,
+ .get_modes = panel_bridge_get_modes,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
};
/**
@@ -151,7 +168,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* known type. Calling this function with a panel whose connector type is
* DRM_MODE_CONNECTOR_Unknown will return NULL.
*
- * See devm_drm_panel_bridge_add() for an automatically manged version of this
+ * See devm_drm_panel_bridge_add() for an automatically managed version of this
* function.
*/
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
@@ -196,6 +213,8 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
#ifdef CONFIG_OF
panel_bridge->bridge.of_node = panel->dev->of_node;
#endif
+ panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
+ panel_bridge->bridge.type = connector_type;
drm_bridge_add(&panel_bridge->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 10c47c008b40..d789ea2a7fb9 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -476,11 +476,17 @@ static const struct drm_connector_funcs ps8622_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ps8622_attach(struct drm_bridge *bridge)
+static int ps8622_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
new file mode 100644
index 000000000000..d3a53442d449
--- /dev/null
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define PAGE2_GPIO_H 0xa7
+#define PS_GPIO9 BIT(1)
+#define PAGE2_I2C_BYPASS 0xea
+#define I2C_BYPASS_EN 0xd0
+#define PAGE2_MCS_EN 0xf3
+#define MCS_EN BIT(0)
+#define PAGE3_SET_ADD 0xfe
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+#define DP_NUM_LANES 4
+
+/*
+ * PS8640 uses multiple addresses:
+ * page[0]: for DP control
+ * page[1]: for VIDEO Bridge
+ * page[2]: for control top
+ * page[3]: for DSI Link Control1
+ * page[4]: for MIPI Phy
+ * page[5]: for VPLL
+ * page[6]: for DSI Link Control2
+ * page[7]: for SPI ROM mapping
+ */
+enum page_addr_offset {
+ PAGE0_DP_CNTL = 0,
+ PAGE1_VDO_BDG,
+ PAGE2_TOP_CNTL,
+ PAGE3_DSI_CNTL1,
+ PAGE4_MIPI_PHY,
+ PAGE5_VPLL,
+ PAGE6_DSI_CNTL2,
+ PAGE7_SPI_CNTL,
+ MAX_DEVS
+};
+
+enum ps8640_vdo_control {
+ DISABLE = VDO_DIS,
+ ENABLE = VDO_EN,
+};
+
+struct ps8640 {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+ struct mipi_dsi_device *dsi;
+ struct i2c_client *page[MAX_DEVS];
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_powerdown;
+};
+
+static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
+{
+ return container_of(e, struct ps8640, bridge);
+}
+
+static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
+ const enum ps8640_vdo_control ctrl)
+{
+ struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+ u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
+ sizeof(vdo_ctrl_buf),
+ vdo_ctrl_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void ps8640_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
+ unsigned long timeout;
+ int ret, status;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0) {
+ DRM_ERROR("cannot enable regulators %d\n", ret);
+ return;
+ }
+
+ gpiod_set_value(ps_bridge->gpio_powerdown, 0);
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ usleep_range(2000, 2500);
+ gpiod_set_value(ps_bridge->gpio_reset, 0);
+
+ /*
+ * Wait for the ps8640 embedded MCU to be ready
+ * First wait 200ms and then check the MCU ready flag every 20ms
+ */
+ msleep(200);
+
+ timeout = jiffies + msecs_to_jiffies(200) + 1;
+
+ while (time_is_after_jiffies(timeout)) {
+ status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
+ goto err_regulators_disable;
+ }
+ if ((status & PS_GPIO9) == PS_GPIO9)
+ break;
+
+ msleep(20);
+ }
+
+ msleep(50);
+
+ /*
+ * The Manufacturer Command Set (MCS) is a device dependent interface
+ * intended for factory programming of the display module default
+ * parameters. Once the display module is configured, the MCS shall be
+ * disabled by the manufacturer. Once disabled, all MCS commands are
+ * ignored by the display interface.
+ */
+ status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
+ goto err_regulators_disable;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
+ status & ~MCS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+ if (ret) {
+ DRM_ERROR("failed to enable VDO: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ /* Switch access edp panel's edid through i2c */
+ ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
+ I2C_BYPASS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ return;
+
+err_regulators_disable:
+ regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+}
+
+static void ps8640_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int ret;
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+ if (ret < 0)
+ DRM_ERROR("failed to disable VDO: %d\n", ret);
+
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ gpiod_set_value(ps_bridge->gpio_powerdown, 1);
+ ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0)
+ DRM_ERROR("cannot disable regulators %d\n", ret);
+}
+
+static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct device *dev = &ps_bridge->page[0]->dev;
+ struct device_node *in_ep, *dsi_node;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+ const struct mipi_dsi_device_info info = { .type = "ps8640",
+ .channel = 0,
+ .node = NULL,
+ };
+ /* port@0 is ps8640 dsi input port */
+ in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ if (!in_ep)
+ return -ENODEV;
+
+ dsi_node = of_graph_get_remote_port_parent(in_ep);
+ of_node_put(in_ep);
+ if (!dsi_node)
+ return -ENODEV;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ of_node_put(dsi_node);
+ if (!host)
+ return -ENODEV;
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ return ret;
+ }
+
+ ps_bridge->dsi = dsi;
+
+ dsi->host = host;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = DP_NUM_LANES;
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ goto err_dsi_attach;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ &ps_bridge->bridge, flags);
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8640_bridge_funcs = {
+ .attach = ps8640_bridge_attach,
+ .post_disable = ps8640_post_disable,
+ .pre_enable = ps8640_pre_enable,
+};
+
+static int ps8640_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct ps8640 *ps_bridge;
+ struct drm_panel *panel;
+ int ret;
+ u32 i;
+
+ ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
+ if (!ps_bridge)
+ return -ENOMEM;
+
+ /* port@1 is ps8640 output port */
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+ if (ret < 0)
+ return ret;
+ if (!panel)
+ return -ENODEV;
+
+ panel->connector_type = DRM_MODE_CONNECTOR_eDP;
+
+ ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ps_bridge->panel_bridge))
+ return PTR_ERR(ps_bridge->panel_bridge);
+
+ ps_bridge->supplies[0].supply = "vdd33";
+ ps_bridge->supplies[1].supply = "vdd12";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret)
+ return ret;
+
+ ps_bridge->gpio_powerdown = devm_gpiod_get(&client->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_powerdown))
+ return PTR_ERR(ps_bridge->gpio_powerdown);
+
+ /*
+ * Assert the reset to avoid the bridge being initialized prematurely
+ */
+ ps_bridge->gpio_reset = devm_gpiod_get(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_reset))
+ return PTR_ERR(ps_bridge->gpio_reset);
+
+ ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
+ ps_bridge->bridge.of_node = dev->of_node;
+
+ ps_bridge->page[PAGE0_DP_CNTL] = client;
+
+ for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
+ ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ client->addr + i);
+ if (IS_ERR(ps_bridge->page[i])) {
+ dev_err(dev, "failed i2c dummy device, address %02x\n",
+ client->addr + i);
+ return PTR_ERR(ps_bridge->page[i]);
+ }
+ }
+
+ i2c_set_clientdata(client, ps_bridge);
+
+ drm_bridge_add(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static int ps8640_remove(struct i2c_client *client)
+{
+ struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ps8640_match[] = {
+ { .compatible = "parade,ps8640" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ps8640_match);
+
+static struct i2c_driver ps8640_driver = {
+ .probe_new = ps8640_probe,
+ .remove = ps8640_remove,
+ .driver = {
+ .name = "ps8640",
+ .of_match_table = ps8640_match,
+ },
+};
+module_i2c_driver(ps8640_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_AUTHOR("CK Hu <ck.hu@mediatek.com>");
+MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
+MODULE_DESCRIPTION("PARADE ps8640 DSI-eDP converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index b70e8c5cf2e1..6dad025f8da7 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -399,12 +399,18 @@ out:
mutex_unlock(&sii902x->mutex);
}
-static int sii902x_bridge_attach(struct drm_bridge *bridge)
+static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
drm_connector_helper_add(&sii902x->connector,
&sii902x_connector_helper_funcs);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 4c0eef406eb1..92acd336aa89 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2202,7 +2202,8 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
return container_of(bridge, struct sii8620, bridge);
}
-static int sii8620_attach(struct drm_bridge *bridge)
+static int sii8620_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
new file mode 100644
index 000000000000..a2dca7a3ef03
--- /dev/null
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015-2016 Free Electrons
+ * Copyright (C) 2015-2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+struct simple_bridge_info {
+ const struct drm_bridge_timings *timings;
+ unsigned int connector_type;
+};
+
+struct simple_bridge {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ const struct simple_bridge_info *info;
+
+ struct i2c_adapter *ddc;
+ struct regulator *vdd;
+ struct gpio_desc *enable;
+};
+
+static inline struct simple_bridge *
+drm_bridge_to_simple_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct simple_bridge, bridge);
+}
+
+static inline struct simple_bridge *
+drm_connector_to_simple_bridge(struct drm_connector *connector)
+{
+ return container_of(connector, struct simple_bridge, connector);
+}
+
+static int simple_bridge_get_modes(struct drm_connector *connector)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+ struct edid *edid;
+ int ret;
+
+ if (!sbridge->ddc)
+ goto fallback;
+
+ edid = drm_get_edid(connector, sbridge->ddc);
+ if (!edid) {
+ DRM_INFO("EDID readout failed, falling back to standard modes\n");
+ goto fallback;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
+
+fallback:
+ /*
+ * In case we cannot retrieve the EDIDs (broken or missing i2c
+ * bus), fallback on the XGA standards
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anyone can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs simple_bridge_con_helper_funcs = {
+ .get_modes = simple_bridge_get_modes,
+};
+
+static enum drm_connector_status
+simple_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+
+ /*
+ * Even if we have an I2C bus, we can't assume that the cable
+ * is disconnected if drm_probe_ddc fails. Some cables don't
+ * wire the DDC pins, or the I2C bus might not be working at
+ * all.
+ */
+ if (sbridge->ddc && drm_probe_ddc(sbridge->ddc))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs simple_bridge_con_funcs = {
+ .detect = simple_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int simple_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&sbridge->connector,
+ &simple_bridge_con_helper_funcs);
+ ret = drm_connector_init_with_ddc(bridge->dev, &sbridge->connector,
+ &simple_bridge_con_funcs,
+ sbridge->info->connector_type,
+ sbridge->ddc);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_attach_encoder(&sbridge->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static void simple_bridge_enable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (sbridge->vdd) {
+ ret = regulator_enable(sbridge->vdd);
+ if (ret)
+ DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+ }
+
+ gpiod_set_value_cansleep(sbridge->enable, 1);
+}
+
+static void simple_bridge_disable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+
+ gpiod_set_value_cansleep(sbridge->enable, 0);
+
+ if (sbridge->vdd)
+ regulator_disable(sbridge->vdd);
+}
+
+static const struct drm_bridge_funcs simple_bridge_bridge_funcs = {
+ .attach = simple_bridge_attach,
+ .enable = simple_bridge_enable,
+ .disable = simple_bridge_disable,
+};
+
+static struct i2c_adapter *simple_bridge_retrieve_ddc(struct device *dev)
+{
+ struct device_node *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return ERR_PTR(-EINVAL);
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
+static int simple_bridge_probe(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge;
+
+ sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
+ if (!sbridge)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, sbridge);
+
+ sbridge->info = of_device_get_match_data(&pdev->dev);
+
+ sbridge->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(sbridge->vdd)) {
+ int ret = PTR_ERR(sbridge->vdd);
+ if (ret == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ sbridge->vdd = NULL;
+ dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+ }
+
+ sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sbridge->enable)) {
+ if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n");
+ return PTR_ERR(sbridge->enable);
+ }
+
+ sbridge->ddc = simple_bridge_retrieve_ddc(&pdev->dev);
+ if (IS_ERR(sbridge->ddc)) {
+ if (PTR_ERR(sbridge->ddc) == -ENODEV) {
+ dev_dbg(&pdev->dev,
+ "No i2c bus specified. Disabling EDID readout\n");
+ sbridge->ddc = NULL;
+ } else {
+ dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
+ return PTR_ERR(sbridge->ddc);
+ }
+ }
+
+ sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
+ sbridge->bridge.of_node = pdev->dev.of_node;
+ sbridge->bridge.timings = sbridge->info->timings;
+
+ drm_bridge_add(&sbridge->bridge);
+
+ return 0;
+}
+
+static int simple_bridge_remove(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&sbridge->bridge);
+
+ if (sbridge->ddc)
+ i2c_put_adapter(sbridge->ddc);
+
+ return 0;
+}
+
+/*
+ * We assume the ADV7123 DAC is the "default" for historical reasons
+ * Information taken from the ADV7123 datasheet, revision D.
+ * NOTE: the ADV7123EP seems to have other timings and need a new timings
+ * set if used.
+ */
+static const struct drm_bridge_timings default_bridge_timings = {
+ /* Timing specifications, datasheet page 7 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ .setup_time_ps = 500,
+ .hold_time_ps = 1500,
+};
+
+/*
+ * Information taken from the THS8134, THS8134A, THS8134B datasheet named
+ * "SLVS205D", dated May 1990, revised March 2000.
+ */
+static const struct drm_bridge_timings ti_ths8134_bridge_timings = {
+ /* From timing diagram, datasheet page 9 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 12 */
+ .setup_time_ps = 3000,
+ /* I guess this means latched input */
+ .hold_time_ps = 0,
+};
+
+/*
+ * Information taken from the THS8135 datasheet named "SLAS343B", dated
+ * May 2001, revised April 2013.
+ */
+static const struct drm_bridge_timings ti_ths8135_bridge_timings = {
+ /* From timing diagram, datasheet page 14 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 16 */
+ .setup_time_ps = 2000,
+ .hold_time_ps = 500,
+};
+
+static const struct of_device_id simple_bridge_match[] = {
+ {
+ .compatible = "dumb-vga-dac",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "adi,adv7123",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &default_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,opa362",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_Composite,
+ },
+ }, {
+ .compatible = "ti,ths8135",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8135_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,ths8134",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8134_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, simple_bridge_match);
+
+static struct platform_driver simple_bridge_driver = {
+ .probe = simple_bridge_probe,
+ .remove = simple_bridge_remove,
+ .driver = {
+ .name = "simple-bridge",
+ .of_match_table = simple_bridge_match,
+ },
+};
+module_platform_driver(simple_bridge_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Simple DRM bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 24965e53d351..383b1073d7de 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1820,13 +1820,32 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
unsigned int vdisplay, hdisplay;
- vmode->mtmdsclock = vmode->mpixelclock = mode->clock * 1000;
+ vmode->mpixelclock = mode->clock * 1000;
dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
+ vmode->mtmdsclock = vmode->mpixelclock;
+
+ if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
+ switch (hdmi_bus_fmt_color_depth(
+ hdmi->hdmi_data.enc_out_bus_format)) {
+ case 16:
+ vmode->mtmdsclock = vmode->mpixelclock * 2;
+ break;
+ case 12:
+ vmode->mtmdsclock = vmode->mpixelclock * 3 / 2;
+ break;
+ case 10:
+ vmode->mtmdsclock = vmode->mpixelclock * 5 / 4;
+ break;
+ }
+ }
+
if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
vmode->mtmdsclock /= 2;
+ dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock);
+
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
(dw_hdmi_support_scdc(hdmi) &&
@@ -2084,11 +2103,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
- /* TOFIX: Get input format from plat data or fallback to RGB888 */
if (hdmi->plat_data->input_bus_format)
hdmi->hdmi_data.enc_in_bus_format =
hdmi->plat_data->input_bus_format;
- else
+ else if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
/* TOFIX: Get input encoding from plat data or fallback to none */
@@ -2098,8 +2116,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
else
hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
- /* TOFIX: Default to RGB888 output format */
- hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
+ hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
@@ -2377,7 +2395,279 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs =
.atomic_check = dw_hdmi_connector_atomic_check,
};
-static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
+/*
+ * Possible output formats :
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48,
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_YUV16_1X48,
+ * - MEDIA_BUS_FMT_RGB161616_1X48,
+ * - MEDIA_BUS_FMT_UYVY12_1X24,
+ * - MEDIA_BUS_FMT_YUV12_1X36,
+ * - MEDIA_BUS_FMT_RGB121212_1X36,
+ * - MEDIA_BUS_FMT_UYVY10_1X20,
+ * - MEDIA_BUS_FMT_YUV10_1X30,
+ * - MEDIA_BUS_FMT_RGB101010_1X30,
+ * - MEDIA_BUS_FMT_UYVY8_1X16,
+ * - MEDIA_BUS_FMT_YUV8_1X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
+ */
+
+/* Can return a maximum of 11 possible output formats for a mode/connector */
+#define MAX_OUTPUT_SEL_FORMATS 11
+
+static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_display_info *info = &conn->display_info;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ u8 max_bpc = conn_state->max_requested_bpc;
+ bool is_hdmi2_sink = info->hdmi.scdc.supported ||
+ (info->color_formats & DRM_COLOR_FORMAT_YCRCB420);
+ u32 *output_fmts;
+ unsigned int i = 0;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts),
+ GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ /* If dw-hdmi is the only bridge, avoid negociating with ourselves */
+ if (list_is_singular(&bridge->encoder->bridge_chain)) {
+ *num_output_fmts = 1;
+ output_fmts[0] = MEDIA_BUS_FMT_FIXED;
+
+ return output_fmts;
+ }
+
+ /*
+ * If the current mode enforces 4:2:0, force the output but format
+ * to 4:2:0 and do not add the YUV422/444/RGB formats
+ */
+ if (conn->ycbcr_420_allowed &&
+ (drm_mode_is_420_only(info, mode) ||
+ (is_hdmi2_sink && drm_mode_is_420_also(info, mode)))) {
+
+ /* Order bus formats from 16bit to 8bit if supported */
+ if (max_bpc >= 16 && info->bpc == 16 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY16_0_5X48;
+
+ if (max_bpc >= 12 && info->bpc >= 12 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY12_0_5X36;
+
+ if (max_bpc >= 10 && info->bpc >= 10 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
+
+ /* Default 8bit fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+ }
+
+ /*
+ * Order bus formats from 16bit to 8bit and from YUV422 to RGB
+ * if supported. In any case the default RGB888 format is added
+ */
+
+ if (max_bpc >= 16 && info->bpc == 16) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ }
+
+ if (max_bpc >= 12 && info->bpc >= 12) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ }
+
+ if (max_bpc >= 10 && info->bpc >= 10) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ }
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+
+ /* Default 8bit RGB fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+}
+
+/*
+ * Possible input formats :
+ * - MEDIA_BUS_FMT_RGB888_1X24
+ * - MEDIA_BUS_FMT_YUV8_1X24
+ * - MEDIA_BUS_FMT_UYVY8_1X16
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24
+ * - MEDIA_BUS_FMT_RGB101010_1X30
+ * - MEDIA_BUS_FMT_YUV10_1X30
+ * - MEDIA_BUS_FMT_UYVY10_1X20
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30
+ * - MEDIA_BUS_FMT_RGB121212_1X36
+ * - MEDIA_BUS_FMT_YUV12_1X36
+ * - MEDIA_BUS_FMT_UYVY12_1X24
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36
+ * - MEDIA_BUS_FMT_RGB161616_1X48
+ * - MEDIA_BUS_FMT_YUV16_1X48
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48
+ */
+
+/* Can return a maximum of 3 possible input formats for an output format */
+#define MAX_INPUT_SEL_FORMATS 3
+
+static u32 *dw_hdmi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+ unsigned int i = 0;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
+ case MEDIA_BUS_FMT_FIXED:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ /* 8bit */
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ break;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+
+ /* 10bit */
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ break;
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+
+ /* 12bit */
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ break;
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+
+ /* 16bit */
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ break;
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ break;
+
+ /*YUV 4:2:0 */
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ input_fmts[i++] = output_fmt;
+ break;
+ }
+
+ *num_input_fmts = i;
+
+ if (*num_input_fmts == 0) {
+ kfree(input_fmts);
+ input_fmts = NULL;
+ }
+
+ return input_fmts;
+}
+
+static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ hdmi->hdmi_data.enc_out_bus_format =
+ bridge_state->output_bus_cfg.format;
+
+ hdmi->hdmi_data.enc_in_bus_format =
+ bridge_state->input_bus_cfg.format;
+
+ dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n",
+ bridge_state->input_bus_cfg.format,
+ bridge_state->output_bus_cfg.format);
+
+ return 0;
+}
+
+static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
@@ -2385,6 +2675,11 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
struct cec_connector_info conn_info;
struct cec_notifier *notifier;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2395,6 +2690,14 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ /*
+ * drm_connector_attach_max_bpc_property() requires the
+ * connector to have a state.
+ */
+ drm_atomic_helper_connector_reset(connector);
+
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
drm_object_attach_property(&connector->base,
connector->dev->mode_config.hdr_output_metadata_property, 0);
@@ -2479,8 +2782,14 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
}
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.attach = dw_hdmi_bridge_attach,
.detach = dw_hdmi_bridge_detach,
+ .atomic_check = dw_hdmi_bridge_atomic_check,
+ .atomic_get_output_bus_fmts = dw_hdmi_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = dw_hdmi_bridge_atomic_get_input_bus_fmts,
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
@@ -2949,6 +3258,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
+ if (hdmi->version >= 0x200a)
+ hdmi->connector.ycbcr_420_allowed =
+ hdmi->plat_data->ycbcr_420_allowed;
+ else
+ hdmi->connector.ycbcr_420_allowed = false;
+
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
@@ -3082,7 +3397,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
if (IS_ERR(hdmi))
return hdmi;
- ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
if (ret) {
dw_hdmi_remove(hdmi);
DRM_ERROR("Failed to initialize bridge with drm\n");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b18351b6760a..5ef0f154aa7b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -824,7 +824,8 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
* This needs to be fixed in the drm_bridge framework and the API
* needs to be updated to manage our own call chains...
*/
- dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (dsi->panel_bridge->funcs->post_disable)
+ dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
if (phy_ops->power_off)
phy_ops->power_off(dsi->plat_data->priv_data);
@@ -935,7 +936,8 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return mode_status;
}
-static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
+static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -948,7 +950,8 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ flags);
}
static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
@@ -1119,7 +1122,7 @@ int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
{
int ret;
- ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 96207fcfde19..5ac1430fab04 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -349,12 +349,18 @@ static void tc358764_enable(struct drm_bridge *bridge)
dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
-static int tc358764_attach(struct drm_bridge *bridge)
+static int tc358764_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(drm, &ctx->connector,
&tc358764_connector_funcs,
@@ -369,7 +375,6 @@ static int tc358764_attach(struct drm_bridge *bridge)
drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
drm_panel_attach(ctx->panel, &ctx->connector);
ctx->connector.funcs->reset(&ctx->connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, &ctx->connector);
drm_connector_register(&ctx->connector);
return 0;
@@ -378,10 +383,8 @@ static int tc358764_attach(struct drm_bridge *bridge)
static void tc358764_detach(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- struct drm_device *drm = bridge->dev;
drm_connector_unregister(&ctx->connector);
- drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
drm_panel_detach(ctx->panel);
ctx->panel = NULL;
drm_connector_put(&ctx->connector);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index fbdb42d4e772..e4c0ea03ae3a 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -31,6 +31,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/* Registers */
@@ -1403,13 +1404,19 @@ static const struct drm_connector_funcs tc_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tc_bridge_attach(struct drm_bridge *bridge)
+static int tc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct tc_data *tc = bridge_to_tc(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
/* Create DP/eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
new file mode 100644
index 000000000000..1b39e8d37834
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* Global (16-bit addressable) */
+#define TC358768_CHIPID 0x0000
+#define TC358768_SYSCTL 0x0002
+#define TC358768_CONFCTL 0x0004
+#define TC358768_VSDLY 0x0006
+#define TC358768_DATAFMT 0x0008
+#define TC358768_GPIOEN 0x000E
+#define TC358768_GPIODIR 0x0010
+#define TC358768_GPIOIN 0x0012
+#define TC358768_GPIOOUT 0x0014
+#define TC358768_PLLCTL0 0x0016
+#define TC358768_PLLCTL1 0x0018
+#define TC358768_CMDBYTE 0x0022
+#define TC358768_PP_MISC 0x0032
+#define TC358768_DSITX_DT 0x0050
+#define TC358768_FIFOSTATUS 0x00F8
+
+/* Debug (16-bit addressable) */
+#define TC358768_VBUFCTRL 0x00E0
+#define TC358768_DBG_WIDTH 0x00E2
+#define TC358768_DBG_VBLANK 0x00E4
+#define TC358768_DBG_DATA 0x00E8
+
+/* TX PHY (32-bit addressable) */
+#define TC358768_CLW_DPHYCONTTX 0x0100
+#define TC358768_D0W_DPHYCONTTX 0x0104
+#define TC358768_D1W_DPHYCONTTX 0x0108
+#define TC358768_D2W_DPHYCONTTX 0x010C
+#define TC358768_D3W_DPHYCONTTX 0x0110
+#define TC358768_CLW_CNTRL 0x0140
+#define TC358768_D0W_CNTRL 0x0144
+#define TC358768_D1W_CNTRL 0x0148
+#define TC358768_D2W_CNTRL 0x014C
+#define TC358768_D3W_CNTRL 0x0150
+
+/* TX PPI (32-bit addressable) */
+#define TC358768_STARTCNTRL 0x0204
+#define TC358768_DSITXSTATUS 0x0208
+#define TC358768_LINEINITCNT 0x0210
+#define TC358768_LPTXTIMECNT 0x0214
+#define TC358768_TCLK_HEADERCNT 0x0218
+#define TC358768_TCLK_TRAILCNT 0x021C
+#define TC358768_THS_HEADERCNT 0x0220
+#define TC358768_TWAKEUP 0x0224
+#define TC358768_TCLK_POSTCNT 0x0228
+#define TC358768_THS_TRAILCNT 0x022C
+#define TC358768_HSTXVREGCNT 0x0230
+#define TC358768_HSTXVREGEN 0x0234
+#define TC358768_TXOPTIONCNTRL 0x0238
+#define TC358768_BTACNTRL1 0x023C
+
+/* TX CTRL (32-bit addressable) */
+#define TC358768_DSI_CONTROL 0x040C
+#define TC358768_DSI_STATUS 0x0410
+#define TC358768_DSI_INT 0x0414
+#define TC358768_DSI_INT_ENA 0x0418
+#define TC358768_DSICMD_RDFIFO 0x0430
+#define TC358768_DSI_ACKERR 0x0434
+#define TC358768_DSI_ACKERR_INTENA 0x0438
+#define TC358768_DSI_ACKERR_HALT 0x043c
+#define TC358768_DSI_RXERR 0x0440
+#define TC358768_DSI_RXERR_INTENA 0x0444
+#define TC358768_DSI_RXERR_HALT 0x0448
+#define TC358768_DSI_ERR 0x044C
+#define TC358768_DSI_ERR_INTENA 0x0450
+#define TC358768_DSI_ERR_HALT 0x0454
+#define TC358768_DSI_CONFW 0x0500
+#define TC358768_DSI_LPCMD 0x0500
+#define TC358768_DSI_RESET 0x0504
+#define TC358768_DSI_INT_CLR 0x050C
+#define TC358768_DSI_START 0x0518
+
+/* DSITX CTRL (16-bit addressable) */
+#define TC358768_DSICMD_TX 0x0600
+#define TC358768_DSICMD_TYPE 0x0602
+#define TC358768_DSICMD_WC 0x0604
+#define TC358768_DSICMD_WD0 0x0610
+#define TC358768_DSICMD_WD1 0x0612
+#define TC358768_DSICMD_WD2 0x0614
+#define TC358768_DSICMD_WD3 0x0616
+#define TC358768_DSI_EVENT 0x0620
+#define TC358768_DSI_VSW 0x0622
+#define TC358768_DSI_VBPR 0x0624
+#define TC358768_DSI_VACT 0x0626
+#define TC358768_DSI_HSW 0x0628
+#define TC358768_DSI_HBPR 0x062A
+#define TC358768_DSI_HACT 0x062C
+
+/* TC358768_DSI_CONTROL (0x040C) register */
+#define TC358768_DSI_CONTROL_DIS_MODE BIT(15)
+#define TC358768_DSI_CONTROL_TXMD BIT(7)
+#define TC358768_DSI_CONTROL_HSCKMD BIT(5)
+#define TC358768_DSI_CONTROL_EOTDIS BIT(0)
+
+/* TC358768_DSI_CONFW (0x0500) register */
+#define TC358768_DSI_CONFW_MODE_SET (5 << 29)
+#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
+#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+
+static const char * const tc358768_supplies[] = {
+ "vddc", "vddmipi", "vddio"
+};
+
+struct tc358768_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct tc358768_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(tc358768_supplies)];
+ struct clk *refclk;
+ int enabled;
+ int error;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct tc358768_dsi_output output;
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 fbd; /* PLL feedback divider */
+ u32 prd; /* PLL input divider */
+ u32 frs; /* PLL Freqency range for HSCK (post divider) */
+
+ u32 dsiclk; /* pll_clk / 2 */
+};
+
+static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
+ *host)
+{
+ return container_of(host, struct tc358768_priv, dsi_host);
+}
+
+static inline struct tc358768_priv *bridge_to_tc358768(struct drm_bridge
+ *bridge)
+{
+ return container_of(bridge, struct tc358768_priv, bridge);
+}
+
+static int tc358768_clear_error(struct tc358768_priv *priv)
+{
+ int ret = priv->error;
+
+ priv->error = 0;
+ return ret;
+}
+
+static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600)
+ count = 1;
+
+ priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+}
+
+static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600) {
+ *val = 0;
+ count = 1;
+ }
+
+ priv->error = regmap_bulk_read(priv->regmap, reg, val, count);
+}
+
+static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ u32 val)
+{
+ u32 tmp, orig;
+
+ tc358768_read(priv, reg, &orig);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ if (tmp != orig)
+ tc358768_write(priv, reg, tmp);
+}
+
+static int tc358768_sw_reset(struct tc358768_priv *priv)
+{
+ /* Assert Reset */
+ tc358768_write(priv, TC358768_SYSCTL, 1);
+ /* Release Reset, Exit Sleep */
+ tc358768_write(priv, TC358768_SYSCTL, 0);
+
+ return tc358768_clear_error(priv);
+}
+
+static void tc358768_hw_enable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (priv->enabled)
+ return;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ if (priv->reset_gpio)
+ usleep_range(200, 300);
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * DEASSERT (value = 0) the reset_gpio to enable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+
+ /* wait for encoder clocks to stabilize */
+ usleep_range(1000, 2000);
+
+ priv->enabled = true;
+}
+
+static void tc358768_hw_disable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (!priv->enabled)
+ return;
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * ASSERT (value = 1) the reset_gpio to disable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ priv->enabled = false;
+}
+
+static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk)
+{
+ return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->pd_lines);
+}
+
+static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk)
+{
+ return (u32)div_u64((u64)pclk * priv->pd_lines, priv->dsi_lanes);
+}
+
+static int tc358768_calc_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode,
+ bool verify_only)
+{
+ const u32 frs_limits[] = {
+ 1000000000,
+ 500000000,
+ 250000000,
+ 125000000,
+ 62500000
+ };
+ unsigned long refclk;
+ u32 prd, target_pll, i, max_pll, min_pll;
+ u32 frs, best_diff, best_pll, best_prd, best_fbd;
+
+ target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000);
+
+ /* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */
+
+ for (i = 0; i < ARRAY_SIZE(frs_limits); i++)
+ if (target_pll >= frs_limits[i])
+ break;
+
+ if (i == ARRAY_SIZE(frs_limits) || i == 0)
+ return -EINVAL;
+
+ frs = i - 1;
+ max_pll = frs_limits[i - 1];
+ min_pll = frs_limits[i];
+
+ refclk = clk_get_rate(priv->refclk);
+
+ best_diff = UINT_MAX;
+ best_pll = 0;
+ best_prd = 0;
+ best_fbd = 0;
+
+ for (prd = 0; prd < 16; ++prd) {
+ u32 divisor = (prd + 1) * (1 << frs);
+ u32 fbd;
+
+ for (fbd = 0; fbd < 512; ++fbd) {
+ u32 pll, diff;
+
+ pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+
+ if (pll >= max_pll || pll < min_pll)
+ continue;
+
+ diff = max(pll, target_pll) - min(pll, target_pll);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_pll = pll;
+ best_prd = prd;
+ best_fbd = fbd;
+
+ if (best_diff == 0)
+ goto found;
+ }
+ }
+ }
+
+ if (best_diff == UINT_MAX) {
+ dev_err(priv->dev, "could not find suitable PLL setup\n");
+ return -EINVAL;
+ }
+
+found:
+ if (verify_only)
+ return 0;
+
+ priv->fbd = best_fbd;
+ priv->prd = best_prd;
+ priv->frs = frs;
+ priv->dsiclk = best_pll / 2;
+
+ return 0;
+}
+
+static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n",
+ dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * tc358768 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * tc358768 supports RGB888, RGB666, RGB666_PACKED and RGB565, but only
+ * RGB888 is verified.
+ */
+ if (dev->format != MIPI_DSI_FMT_RGB888) {
+ dev_warn(priv->dev, "Only MIPI_DSI_FMT_RGB888 tested!\n");
+ return -ENOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel,
+ &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
+
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int tc358768_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct mipi_dsi_packet packet;
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return -ENODEV;
+ }
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (msg->tx_len > 8) {
+ dev_warn(priv->dev, "Maximum 8 byte MIPI tx is supported\n");
+ return -ENOTSUPP;
+ }
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret)
+ return ret;
+
+ if (mipi_dsi_packet_format_is_short(msg->type)) {
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x10 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, 0);
+ tc358768_write(priv, TC358768_DSICMD_WD0,
+ (packet.header[2] << 8) | packet.header[1]);
+ } else {
+ int i;
+
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x40 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, packet.payload_length);
+ for (i = 0; i < packet.payload_length; i += 2) {
+ u16 val = packet.payload[i];
+
+ if (i + 1 < packet.payload_length)
+ val |= packet.payload[i + 1] << 8;
+
+ tc358768_write(priv, TC358768_DSICMD_WD0 + i, val);
+ }
+ }
+
+ /* start transfer */
+ tc358768_write(priv, TC358768_DSICMD_TX, 1);
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+ else
+ ret = packet.size;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
+ .attach = tc358768_dsi_host_attach,
+ .detach = tc358768_dsi_host_detach,
+ .transfer = tc358768_dsi_host_transfer,
+};
+
+static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
+ dev_err(priv->dev, "needs atomic updates support\n");
+ return -ENOTSUPP;
+ }
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+tc358768_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (tc358768_calc_pll(priv, mode, true))
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static void tc358768_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ /* set FrmStop */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(15), BIT(15));
+
+ /* wait at least for one frame */
+ msleep(50);
+
+ /* clear PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), 0);
+
+ /* set RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(14), BIT(14));
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+}
+
+static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ tc358768_hw_disable(priv);
+}
+
+static int tc358768_setup_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u32 fbd, prd, frs;
+ int ret;
+
+ ret = tc358768_calc_pll(priv, mode, false);
+ if (ret) {
+ dev_err(priv->dev, "PLL calculation failed: %d\n", ret);
+ return ret;
+ }
+
+ fbd = priv->fbd;
+ prd = priv->prd;
+ frs = priv->frs;
+
+ dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ clk_get_rate(priv->refclk), fbd, prd, frs);
+ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
+ priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+ mode->clock * 1000);
+
+ /* PRD[15:12] FBD[8:0] */
+ tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(1) | BIT(0));
+
+ /* wait for lock */
+ usleep_range(1000, 2000);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] PLL_CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(4) | BIT(1) | BIT(0));
+
+ return tc358768_clear_error(priv);
+}
+
+#define TC358768_PRECISION 1000
+static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
+{
+ return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
+}
+
+static u32 tc358768_to_ns(u32 nsk)
+{
+ return (nsk / TC358768_PRECISION);
+}
+
+static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u32 val, val2, lptxcnt, hact, data_type;
+ const struct drm_display_mode *mode;
+ u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+ u32 dsiclk, dsibclk;
+ int ret, i;
+
+ tc358768_hw_enable(priv);
+
+ ret = tc358768_sw_reset(priv);
+ if (ret) {
+ dev_err(priv->dev, "Software reset failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+ ret = tc358768_setup_pll(priv, mode);
+ if (ret) {
+ dev_err(priv->dev, "PLL setup failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ dsiclk = priv->dsiclk;
+ dsibclk = dsiclk / 4;
+
+ /* Data Format Control Register */
+ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= (0x3 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= (0x4 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= (0x4 << 4) | BIT(3);
+ hact = mode->hdisplay * 18 / 8;
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ val |= (0x5 << 4);
+ hact = mode->hdisplay * 2;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ dev_err(priv->dev, "Invalid data format (%u)\n",
+ dsi_dev->format);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ /* VSDly[9:0] */
+ tc358768_write(priv, TC358768_VSDLY, 1);
+
+ tc358768_write(priv, TC358768_DATAFMT, val);
+ tc358768_write(priv, TC358768_DSITX_DT, data_type);
+
+ /* Enable D-PHY (HiZ->LP11) */
+ tc358768_write(priv, TC358768_CLW_CNTRL, 0x0000);
+ /* Enable lanes */
+ for (i = 0; i < dsi_dev->lanes; i++)
+ tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+ /* DSI Timings */
+ dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+ dsibclk);
+ dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+ ui_nsk = dsiclk_nsk / 2;
+ phy_delay_nsk = dsibclk_nsk + 2 * dsiclk_nsk;
+ dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+ dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+ dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+ dev_dbg(priv->dev, "phy_delay_nsk: %u\n", phy_delay_nsk);
+
+ /* LP11 > 100us for D-PHY Rx Init */
+ val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+ dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+ /* LPTimeCnt > 50ns */
+ val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
+ lptxcnt = val;
+ dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+ val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+ /* TCLK_PREPARE > 300ns */
+ val2 = tc358768_ns_to_cnt(300 + tc358768_to_ns(3 * ui_nsk),
+ dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk - dsibclk_nsk)) << 8;
+ dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+ /* TCLK_TRAIL > 60ns + 3*UI */
+ val = 60 + tc358768_to_ns(3 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+ val = 50 + tc358768_to_ns(4 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ /* THS_ZERO > 145ns + 10*UI */
+ val2 = tc358768_ns_to_cnt(145 - tc358768_to_ns(ui_nsk), dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk)) << 8;
+ dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+ /* TWAKEUP > 1ms in lptxcnt steps */
+ val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
+ val = val / (lptxcnt + 1) - 1;
+ dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TWAKEUP, val);
+
+ /* TCLK_POSTCNT > 60ns + 52*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+ dsibclk_nsk) - 3;
+ dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+ /* 60ns + 4*UI < THS_PREPARE < 105ns + 12*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(15 * ui_nsk),
+ dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+ val = BIT(0);
+ for (i = 0; i < dsi_dev->lanes; i++)
+ val |= BIT(i + 1);
+ tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+ val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+ dsibclk_nsk) - 2;
+ val |= val2 << 16;
+ dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+ /* START[0] */
+ tc358768_write(priv, TC358768_STARTCNTRL, 1);
+
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+ mode->vtotal - mode->vsync_start);
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
+ /* vact */
+ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+
+ /* (hsw + hbp) * byteclk * ndl / pclk */
+ val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+ ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+ mode->clock * 1000);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+ /* hbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_HBPR, 0);
+ /* hact (bytes) */
+ tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+ /* VSYNC polarity */
+ if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
+ /* HSYNC polarity */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
+
+ /* Start DSI Tx */
+ tc358768_write(priv, TC358768_DSI_START, 0x1);
+
+ /* Configure DSI_Control register */
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_TXMD | TC358768_DSI_CONTROL_HSCKMD |
+ 0x3 << 1 | TC358768_DSI_CONTROL_EOTDIS;
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= (dsi_dev->lanes - 1) << 1;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
+ val |= TC358768_DSI_CONTROL_TXMD;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ val |= TC358768_DSI_CONTROL_HSCKMD;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
+ val |= TC358768_DSI_CONTROL_EOTDIS;
+
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_DIS_MODE; /* DSI mode */
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static void tc358768_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return;
+ }
+
+ /* clear FrmStop and RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, 0x3 << 14, 0);
+
+ /* set PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ .attach = tc358768_bridge_attach,
+ .mode_valid = tc358768_bridge_mode_valid,
+ .pre_enable = tc358768_bridge_pre_enable,
+ .enable = tc358768_bridge_enable,
+ .disable = tc358768_bridge_disable,
+ .post_disable = tc358768_bridge_post_disable,
+};
+
+static const struct drm_bridge_timings default_tc358768_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static bool tc358768_is_reserved_reg(unsigned int reg)
+{
+ switch (reg) {
+ case 0x114 ... 0x13f:
+ case 0x200:
+ case 0x20c:
+ case 0x400 ... 0x408:
+ case 0x41c ... 0x42f:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tc358768_writeable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_CHIPID:
+ case TC358768_FIFOSTATUS:
+ case TC358768_DSITXSTATUS ... (TC358768_DSITXSTATUS + 2):
+ case TC358768_DSI_CONTROL ... (TC358768_DSI_INT_ENA + 2):
+ case TC358768_DSICMD_RDFIFO ... (TC358768_DSI_ERR_HALT + 2):
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool tc358768_readable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_STARTCNTRL:
+ case TC358768_DSI_CONFW ... (TC358768_DSI_CONFW + 2):
+ case TC358768_DSI_INT_CLR ... (TC358768_DSI_INT_CLR + 2):
+ case TC358768_DSI_START ... (TC358768_DSI_START + 2):
+ case TC358768_DBG_DATA:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config tc358768_regmap_config = {
+ .name = "tc358768",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = TC358768_DSI_HACT,
+ .cache_type = REGCACHE_NONE,
+ .writeable_reg = tc358768_writeable_reg,
+ .readable_reg = tc358768_readable_reg,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct i2c_device_id tc358768_i2c_ids[] = {
+ { "tc358768", 0 },
+ { "tc358778", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
+
+static const struct of_device_id tc358768_of_ids[] = {
+ { .compatible = "toshiba,tc358768", },
+ { .compatible = "toshiba,tc358778", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358768_of_ids);
+
+static int tc358768_get_regulators(struct tc358768_priv *priv)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(priv->supplies); ++i)
+ priv->supplies[i].supply = tc358768_supplies[i];
+
+ ret = devm_regulator_bulk_get(priv->dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "failed to get regulators: %d\n", ret);
+
+ return ret;
+}
+
+static int tc358768_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc358768_priv *priv;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->dev = dev;
+
+ ret = tc358768_get_regulators(priv);
+ if (ret)
+ return ret;
+
+ priv->refclk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(priv->refclk))
+ return PTR_ERR(priv->refclk);
+
+ /*
+ * RESX is low active, to disable tc358768 initially (keep in reset)
+ * the gpio line must be LOW. This is the ASSERTED state of
+ * GPIO_ACTIVE_LOW (GPIOD_OUT_HIGH == ASSERTED).
+ */
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ priv->regmap = devm_regmap_init_i2c(client, &tc358768_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "Failed to init regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &tc358768_dsi_host_ops;
+
+ priv->bridge.funcs = &tc358768_bridge_funcs;
+ priv->bridge.timings = &default_tc358768_timings;
+ priv->bridge.of_node = np;
+
+ i2c_set_clientdata(client, priv);
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static int tc358768_i2c_remove(struct i2c_client *client)
+{
+ struct tc358768_priv *priv = i2c_get_clientdata(client);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+
+ return 0;
+}
+
+static struct i2c_driver tc358768_driver = {
+ .driver = {
+ .name = "tc358768",
+ .of_match_table = tc358768_of_ids,
+ },
+ .id_table = tc358768_i2c_ids,
+ .probe = tc358768_i2c_probe,
+ .remove = tc358768_i2c_remove,
+};
+module_i2c_driver(tc358768_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("TC358768AXBG/TC358778XBG DSI bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 3d74129b2995..97d8129760e9 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -42,11 +42,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
return container_of(bridge, struct thc63_dev, bridge);
}
-static int thc63_attach(struct drm_bridge *bridge)
+static int thc63_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
+ return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9a2dd986afa5..6ad688b320ae 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -51,6 +51,7 @@
#define SN_ENH_FRAME_REG 0x5A
#define VSTREAM_ENABLE BIT(3)
#define SN_DATA_FORMAT_REG 0x5B
+#define BPP_18_RGB BIT(0)
#define SN_HPD_DISABLE_REG 0x5C
#define HPD_DISABLE BIT(0)
#define SN_AUX_WDATA_REG(x) (0x64 + (x))
@@ -100,6 +101,7 @@ struct ti_sn_bridge {
struct drm_panel *panel;
struct gpio_desc *enable_gpio;
struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM];
+ int dp_lanes;
};
static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
@@ -264,7 +266,8 @@ static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
pdata->supplies);
}
-static int ti_sn_bridge_attach(struct drm_bridge *bridge)
+static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
int ret, val;
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
@@ -275,6 +278,11 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
.node = NULL,
};
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init(bridge->dev, &pdata->connector,
&ti_sn_bridge_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
@@ -312,7 +320,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
goto err_dsi_host;
}
- /* TODO: setting to 4 lanes always for now */
+ /* TODO: setting to 4 MIPI lanes always for now */
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
@@ -417,6 +425,32 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
REFCLK_FREQ(i));
}
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn_bridge *pdata)
+{
+ unsigned int bit_rate_mhz, clk_freq_mhz;
+ unsigned int val;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ /* set DSIA clk frequency */
+ bit_rate_mhz = (mode->clock / 1000) *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+
+ /* for each increment in val, frequency increases by 5MHz */
+ val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
+ (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
+ regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+}
+
+static unsigned int ti_sn_bridge_get_bpp(struct ti_sn_bridge *pdata)
+{
+ if (pdata->connector.display_info.bpc <= 6)
+ return 18;
+ else
+ return 24;
+}
+
/**
* LUT index corresponds to register value and
* LUT values corresponds to dp data rate supported
@@ -426,32 +460,106 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
{
- unsigned int bit_rate_mhz, clk_freq_mhz, dp_rate_mhz;
- unsigned int val, i;
+ unsigned int bit_rate_khz, dp_rate_mhz;
+ unsigned int i;
struct drm_display_mode *mode =
&pdata->bridge.encoder->crtc->state->adjusted_mode;
- /* set DSIA clk frequency */
- bit_rate_mhz = (mode->clock / 1000) *
- mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
- clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+ /* Calculate minimum bit rate based on our pixel clock. */
+ bit_rate_khz = mode->clock * ti_sn_bridge_get_bpp(pdata);
- /* for each increment in val, frequency increases by 5MHz */
- val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
- (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
- regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+ /* Calculate minimum DP data rate, taking 80% as per DP spec */
+ dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM,
+ 1000 * pdata->dp_lanes * DP_CLK_FUDGE_DEN);
- /* set DP data rate */
- dp_rate_mhz = ((bit_rate_mhz / pdata->dsi->lanes) * DP_CLK_FUDGE_NUM) /
- DP_CLK_FUDGE_DEN;
- for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
+ for (i = 1; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
break;
- regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
- DP_DATARATE_MASK, DP_DATARATE(i));
+ return i;
+}
+
+static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
+ bool rate_valid[])
+{
+ unsigned int rate_per_200khz;
+ unsigned int rate_mhz;
+ u8 dpcd_val;
+ int ret;
+ int i, j;
+
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_EDP_DPCD_REV, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read eDP rev (%d), assuming 1.1\n", ret);
+ dpcd_val = DP_EDP_11;
+ }
+
+ if (dpcd_val >= DP_EDP_14) {
+ /* eDP 1.4 devices must provide a custom table */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ ret = drm_dp_dpcd_read(&pdata->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+
+ if (ret != sizeof(sink_rates)) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read supported rate table (%d)\n", ret);
+
+ /* By zeroing we'll fall back to DP_MAX_LINK_RATE. */
+ memset(sink_rates, 0, sizeof(sink_rates));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ rate_per_200khz = le16_to_cpu(sink_rates[i]);
+
+ if (!rate_per_200khz)
+ break;
+
+ rate_mhz = rate_per_200khz * 200 / 1000;
+ for (j = 0;
+ j < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ j++) {
+ if (ti_sn_bridge_dp_rate_lut[j] == rate_mhz)
+ rate_valid[j] = true;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); i++) {
+ if (rate_valid[i])
+ return;
+ }
+ DRM_DEV_ERROR(pdata->dev,
+ "No matching eDP rates in table; falling back\n");
+ }
+
+ /* On older versions best we can do is use DP_MAX_LINK_RATE */
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LINK_RATE, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read max rate (%d); assuming 5.4 GHz\n",
+ ret);
+ dpcd_val = DP_LINK_BW_5_4;
+ }
+
+ switch (dpcd_val) {
+ default:
+ DRM_DEV_ERROR(pdata->dev,
+ "Unexpected max rate (%#x); assuming 5.4 GHz\n",
+ (int)dpcd_val);
+ /* fall through */
+ case DP_LINK_BW_5_4:
+ rate_valid[7] = 1;
+ /* fall through */
+ case DP_LINK_BW_2_7:
+ rate_valid[4] = 1;
+ /* fall through */
+ case DP_LINK_BW_1_62:
+ rate_valid[1] = 1;
+ break;
+ }
}
static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
@@ -493,24 +601,30 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
usleep_range(10000, 10500); /* 10ms delay recommended by spec */
}
-static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+static unsigned int ti_sn_get_max_lanes(struct ti_sn_bridge *pdata)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
- unsigned int val;
+ u8 data;
int ret;
- /* DSI_A lane config */
- val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
- regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
- CHA_DSI_LANES_MASK, val);
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LANE_COUNT, &data);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read lane count (%d); assuming 4\n", ret);
+ return 4;
+ }
- /* DP lane config */
- val = DP_NUM_LANES(pdata->dsi->lanes - 1);
- regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
- val);
+ return data & DP_LANE_COUNT_MASK;
+}
- /* set dsi/dp clk frequency value */
- ti_sn_bridge_set_dsi_dp_rate(pdata);
+static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
+ const char **last_err_str)
+{
+ unsigned int val;
+ int ret;
+
+ /* set dp clk frequency value */
+ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
+ DP_DATARATE_MASK, DP_DATARATE(dp_rate_idx));
/* enable DP PLL */
regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1);
@@ -519,10 +633,62 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
val & DPPLL_SRC_DP_PLL_LOCK, 1000,
50 * 1000);
if (ret) {
- DRM_ERROR("DP_PLL_LOCK polling failed (%d)\n", ret);
- return;
+ *last_err_str = "DP_PLL_LOCK polling failed";
+ goto exit;
+ }
+
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ *last_err_str = "Training complete polling failed";
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ *last_err_str = "Link training failed, link is off";
+ ret = -EIO;
}
+exit:
+ /* Disable the PLL if we failed */
+ if (ret)
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
+
+ return ret;
+}
+
+static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ bool rate_valid[ARRAY_SIZE(ti_sn_bridge_dp_rate_lut)] = { };
+ const char *last_err_str = "No supported DP rate";
+ int dp_rate_idx;
+ unsigned int val;
+ int ret = -EINVAL;
+
+ /*
+ * Run with the maximum number of lanes that the DP sink supports.
+ *
+ * Depending use cases, we might want to revisit this later because:
+ * - It's plausible that someone may have run fewer lines to the
+ * sink than the sink actually supports, assuming that the lines
+ * will just be driven at a higher rate.
+ * - The DP spec seems to indicate that it's more important to minimize
+ * the number of lanes than the link rate.
+ *
+ * If we do revisit, it would be important to measure the power impact.
+ */
+ pdata->dp_lanes = ti_sn_get_max_lanes(pdata);
+
+ /* DSI_A lane config */
+ val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+ regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
+ CHA_DSI_LANES_MASK, val);
+
+ /* set dsi clk frequency value */
+ ti_sn_bridge_set_dsi_rate(pdata);
+
/**
* The SN65DSI86 only supports ASSR Display Authentication method and
* this method is enabled by default. An eDP panel must support this
@@ -532,17 +698,30 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
- /* Semi auto link training mode */
- regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
- ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
- val == ML_TX_MAIN_LINK_OFF ||
- val == ML_TX_NORMAL_MODE, 1000,
- 500 * 1000);
+ /* Set the DP output format (18 bpp or 24 bpp) */
+ val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
+ regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
+
+ /* DP lane config */
+ val = DP_NUM_LANES(min(pdata->dp_lanes, 3));
+ regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
+ val);
+
+ ti_sn_bridge_read_valid_rates(pdata, rate_valid);
+
+ /* Train until we run out of rates */
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata);
+ dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ dp_rate_idx++) {
+ if (!rate_valid[dp_rate_idx])
+ continue;
+
+ ret = ti_sn_link_training(pdata, dp_rate_idx, &last_err_str);
+ if (!ret)
+ break;
+ }
if (ret) {
- DRM_ERROR("Training complete polling failed (%d)\n", ret);
- return;
- } else if (val == ML_TX_MAIN_LINK_OFF) {
- DRM_ERROR("Link training failed, link is off\n");
+ DRM_DEV_ERROR(pdata->dev, "%s (%d)\n", last_err_str, ret);
return;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index f195a4732e0b..e3eb6364c0f7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -4,14 +4,12 @@
* Author: Jyri Sarha <jsarha@ti.com>
*/
-#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -24,16 +22,13 @@
struct tfp410 {
struct drm_bridge bridge;
struct drm_connector connector;
- unsigned int connector_type;
u32 bus_format;
- struct i2c_adapter *ddc;
- struct gpio_desc *hpd;
- int hpd_irq;
struct delayed_work hpd_work;
struct gpio_desc *powerdown;
struct drm_bridge_timings timings;
+ struct drm_bridge *next_bridge;
struct device *dev;
};
@@ -56,13 +51,18 @@ static int tfp410_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!dvi->ddc)
- goto fallback;
+ edid = drm_bridge_get_edid(dvi->next_bridge, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ if (edid != ERR_PTR(-ENOTSUPP))
+ DRM_INFO("EDID read failed. Fallback to standard modes\n");
- edid = drm_get_edid(connector, dvi->ddc);
- if (!edid) {
- DRM_INFO("EDID read failed. Fallback to standard modes\n");
- goto fallback;
+ /*
+ * No EDID, fallback on the XGA standard modes and prefer a mode
+ * pretty much anything can handle.
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return ret;
}
drm_connector_update_edid_property(connector, edid);
@@ -72,15 +72,6 @@ static int tfp410_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
-
-fallback:
- /* No EDID, fallback on the XGA standard modes */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anything can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
}
static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
@@ -92,21 +83,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- if (dvi->hpd) {
- if (gpiod_get_value_cansleep(dvi->hpd))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- if (dvi->ddc) {
- if (drm_probe_ddc(dvi->ddc))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- return connector_status_unknown;
+ return drm_bridge_detect(dvi->next_bridge);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -118,27 +95,60 @@ static const struct drm_connector_funcs tfp410_con_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tfp410_attach(struct drm_bridge *bridge)
+static void tfp410_hpd_work_func(struct work_struct *work)
+{
+ struct tfp410 *dvi;
+
+ dvi = container_of(work, struct tfp410, hpd_work.work);
+
+ if (dvi->bridge.dev)
+ drm_helper_hpd_irq_event(dvi->bridge.dev);
+}
+
+static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
+{
+ struct tfp410 *dvi = arg;
+
+ mod_delayed_work(system_wq, &dvi->hpd_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+}
+
+static int tfp410_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
+ ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
dev_err(dvi->dev, "Missing encoder\n");
return -ENODEV;
}
- if (dvi->hpd_irq >= 0)
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_DETECT)
dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
else
dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
+ drm_bridge_hpd_enable(dvi->next_bridge, tfp410_hpd_callback,
+ dvi);
+ }
+
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
&tfp410_con_funcs,
- dvi->connector_type,
- dvi->ddc);
+ dvi->next_bridge->type,
+ dvi->next_bridge->ddc);
if (ret) {
dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n",
ret);
@@ -148,12 +158,21 @@ static int tfp410_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector,
- bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
return 0;
}
+static void tfp410_detach(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ if (dvi->connector.dev && dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_disable(dvi->next_bridge);
+ cancel_delayed_work_sync(&dvi->hpd_work);
+ }
+}
+
static void tfp410_enable(struct drm_bridge *bridge)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
@@ -168,31 +187,25 @@ static void tfp410_disable(struct drm_bridge *bridge)
gpiod_set_value_cansleep(dvi->powerdown, 1);
}
-static const struct drm_bridge_funcs tfp410_bridge_funcs = {
- .attach = tfp410_attach,
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-};
-
-static void tfp410_hpd_work_func(struct work_struct *work)
+static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
- struct tfp410 *dvi;
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
- dvi = container_of(work, struct tfp410, hpd_work.work);
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
- if (dvi->bridge.dev)
- drm_helper_hpd_irq_event(dvi->bridge.dev);
+ return MODE_OK;
}
-static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
-{
- struct tfp410 *dvi = arg;
-
- mod_delayed_work(system_wq, &dvi->hpd_work,
- msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
-
- return IRQ_HANDLED;
-}
+static const struct drm_bridge_funcs tfp410_bridge_funcs = {
+ .attach = tfp410_attach,
+ .detach = tfp410_detach,
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
+ .mode_valid = tfp410_mode_valid,
+};
static const struct drm_bridge_timings tfp410_default_timings = {
.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
@@ -271,51 +284,9 @@ static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
return 0;
}
-static int tfp410_get_connector_properties(struct tfp410 *dvi)
-{
- struct device_node *connector_node, *ddc_phandle;
- int ret = 0;
-
- /* port@1 is the connector node */
- connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
- if (!connector_node)
- return -ENODEV;
-
- if (of_device_is_compatible(connector_node, "hdmi-connector"))
- dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- else
- dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
- "hpd", 0, GPIOD_IN, "hpd");
- if (IS_ERR(dvi->hpd)) {
- ret = PTR_ERR(dvi->hpd);
- dvi->hpd = NULL;
- if (ret == -ENOENT)
- ret = 0;
- else
- goto fail;
- }
-
- ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
- if (!ddc_phandle)
- goto fail;
-
- dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
- if (dvi->ddc)
- dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
- else
- ret = -EPROBE_DEFER;
-
- of_node_put(ddc_phandle);
-
-fail:
- of_node_put(connector_node);
- return ret;
-}
-
static int tfp410_init(struct device *dev, bool i2c)
{
+ struct device_node *node;
struct tfp410 *dvi;
int ret;
@@ -327,21 +298,31 @@ static int tfp410_init(struct device *dev, bool i2c)
dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
if (!dvi)
return -ENOMEM;
+
+ dvi->dev = dev;
dev_set_drvdata(dev, dvi);
dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
- dvi->dev = dev;
+ dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
ret = tfp410_parse_timings(dvi, i2c);
if (ret)
- goto fail;
+ return ret;
- ret = tfp410_get_connector_properties(dvi);
- if (ret)
- goto fail;
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ dvi->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+ if (!dvi->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the powerdown GPIO. */
dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);
if (IS_ERR(dvi->powerdown)) {
@@ -349,48 +330,18 @@ static int tfp410_init(struct device *dev, bool i2c)
return PTR_ERR(dvi->powerdown);
}
- if (dvi->hpd)
- dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
- else
- dvi->hpd_irq = -ENXIO;
-
- if (dvi->hpd_irq >= 0) {
- INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
-
- ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
- NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi-hpd", dvi);
- if (ret) {
- DRM_ERROR("failed to register hpd interrupt\n");
- goto fail;
- }
- }
-
+ /* Register the DRM bridge. */
drm_bridge_add(&dvi->bridge);
return 0;
-fail:
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
- return ret;
}
static int tfp410_fini(struct device *dev)
{
struct tfp410 *dvi = dev_get_drvdata(dev);
- if (dvi->hpd_irq >= 0)
- cancel_delayed_work_sync(&dvi->hpd_work);
-
drm_bridge_remove(&dvi->bridge);
- if (dvi->ddc)
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
new file mode 100644
index 000000000000..514cbf0eac75
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPD12S015 HDMI ESD protection & level shifter chip driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific encoder-opa362 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+
+struct tpd12s015_device {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *ct_cp_hpd_gpio;
+ struct gpio_desc *ls_oe_gpio;
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+
+ struct drm_bridge *next_bridge;
+};
+
+static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tpd12s015_device, bridge);
+}
+
+static int tpd12s015_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ bridge, flags);
+ if (ret < 0)
+ return ret;
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 1);
+
+ /* DC-DC converter needs at max 300us to get to 90% of 5V. */
+ usleep_range(300, 1000);
+
+ return 0;
+}
+
+static void tpd12s015_detach(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 0);
+}
+
+static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ if (gpiod_get_value_cansleep(tpd->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 1);
+}
+
+static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 0);
+}
+
+static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
+ .attach = tpd12s015_attach,
+ .detach = tpd12s015_detach,
+ .detect = tpd12s015_detect,
+ .hpd_enable = tpd12s015_hpd_enable,
+ .hpd_disable = tpd12s015_hpd_disable,
+};
+
+static irqreturn_t tpd12s015_hpd_isr(int irq, void *data)
+{
+ struct tpd12s015_device *tpd = data;
+ struct drm_bridge *bridge = &tpd->bridge;
+
+ drm_bridge_hpd_notify(bridge, tpd12s015_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int tpd12s015_probe(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd;
+ struct device_node *node;
+ struct gpio_desc *gpio;
+ int ret;
+
+ tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
+ if (!tpd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tpd);
+
+ tpd->bridge.funcs = &tpd12s015_bridge_funcs;
+ tpd->bridge.of_node = pdev->dev.of_node;
+ tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
+
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ tpd->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+
+ if (!tpd->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the control and HPD GPIOs. */
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ct_cp_hpd_gpio = gpio;
+
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ls_oe_gpio = gpio;
+
+ gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2, GPIOD_IN);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->hpd_gpio = gpio;
+
+ /* Register the IRQ if the HPD GPIO is IRQ-capable. */
+ tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio);
+ if (tpd->hpd_irq) {
+ ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL,
+ tpd12s015_hpd_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "tpd12s015 hpd", tpd);
+ if (ret)
+ return ret;
+
+ tpd->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ }
+
+ /* Register the DRM bridge. */
+ drm_bridge_add(&tpd->bridge);
+
+ return 0;
+}
+
+static int __exit tpd12s015_remove(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&tpd->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id tpd12s015_of_match[] = {
+ { .compatible = "ti,tpd12s015", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+
+static struct platform_driver tpd12s015_driver = {
+ .probe = tpd12s015_probe,
+ .remove = __exit_p(tpd12s015_remove),
+ .driver = {
+ .name = "tpd12s015",
+ .of_match_table = tpd12s015_of_match,
+ },
+};
+
+module_platform_driver(tpd12s015_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TPD12S015 HDMI level shifter and ESD protection driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 248c9f765c45..d2ff63ce8eaf 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -38,7 +38,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
#define DRIVER_NAME "cirrus"
#define DRIVER_DESC "qemu cirrus vga"
@@ -152,9 +151,13 @@ static int cirrus_pitch(struct drm_framebuffer *fb)
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
+ int idx;
u32 addr;
u8 tmp;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return;
+
addr = offset >> 2;
wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
@@ -169,6 +172,8 @@ static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
tmp &= 0x7f;
tmp |= (addr >> 12) & 0x80;
wreg_crt(cirrus, 0x1d, tmp);
+
+ drm_dev_exit(idx);
}
static int cirrus_mode_set(struct cirrus_device *cirrus,
@@ -177,9 +182,12 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
{
int hsyncstart, hsyncend, htotal, hdispend;
int vtotal, vdispend;
- int tmp;
+ int tmp, idx;
int sr07 = 0, hdr = 0;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return -1;
+
htotal = mode->htotal / 8;
hsyncend = mode->hsync_end / 8;
hsyncstart = mode->hsync_start / 8;
@@ -265,6 +273,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
hdr = 0xc5;
break;
default:
+ drm_dev_exit(idx);
return -1;
}
@@ -293,6 +302,8 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
outb(0x20, 0x3c0);
+
+ drm_dev_exit(idx);
return 0;
}
@@ -301,10 +312,16 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
{
struct cirrus_device *cirrus = fb->dev->dev_private;
void *vmap;
+ int idx, ret;
+
+ ret = -ENODEV;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ goto out;
+ ret = -ENOMEM;
vmap = drm_gem_shmem_vmap(fb->obj[0]);
if (!vmap)
- return -ENOMEM;
+ goto out_dev_exit;
if (cirrus->cpp == fb->format->cpp[0])
drm_fb_memcpy_dstclip(cirrus->vram,
@@ -324,7 +341,12 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
WARN_ON_ONCE("cpp mismatch");
drm_gem_shmem_vunmap(fb->obj[0], vmap);
- return 0;
+ ret = 0;
+
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
+ return ret;
}
static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
@@ -434,13 +456,6 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
@@ -510,6 +525,14 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
+static void cirrus_release(struct drm_device *dev)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+
+ drm_mode_config_cleanup(dev);
+ kfree(cirrus);
+}
+
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
@@ -523,6 +546,7 @@ static struct drm_driver cirrus_driver = {
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ .release = cirrus_release,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -606,12 +630,11 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct cirrus_device *cirrus = dev->dev_private;
- drm_dev_unregister(dev);
- drm_mode_config_cleanup(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
iounmap(cirrus->mmio);
iounmap(cirrus->vram);
drm_dev_put(dev);
- kfree(cirrus);
pci_release_regions(pdev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index d33691512a8e..9ccfbf213d72 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -1018,6 +1019,122 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_get_bridge_state - get bridge state
+ * @state: global atomic state object
+ * @bridge: bridge to get state object for
+ *
+ * This function returns the bridge state for the given bridge, allocating it
+ * if needed. It will also grab the relevant bridge lock to make sure that the
+ * state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted.
+ */
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
+ if (IS_ERR(obj_state))
+ return ERR_CAST(obj_state);
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_bridge_state);
+
+/**
+ * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the old bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
+
+/**
+ * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the new bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
+
+/**
+ * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
+ * @state: atomic state
+ * @encoder: DRM encoder
+ *
+ * This function adds all bridges attached to @encoder. This is needed to add
+ * bridge states to @state and make them available when
+ * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
+ * &drm_bridge_funcs.atomic_enable(),
+ * &drm_bridge_funcs.atomic_disable_post_disable() are called.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_state *bridge_state;
+ struct drm_bridge *bridge;
+
+ if (!encoder)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
+ encoder->base.id, encoder->name, state);
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ /* Skip bridges that don't implement the atomic state hooks. */
+ if (!bridge->funcs->atomic_duplicate_state)
+ continue;
+
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ if (IS_ERR(bridge_state))
+ return PTR_ERR(bridge_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
+
+/**
* drm_atomic_add_affected_connectors - add connectors for CRTC
* @state: atomic state
* @crtc: DRM CRTC
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4511c2e07bb9..85d163f16801 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -437,12 +437,12 @@ mode_fixup(struct drm_atomic_state *state)
funcs = encoder->helper_private;
bridge = drm_bridge_chain_get_first_bridge(encoder);
- ret = drm_bridge_chain_mode_fixup(bridge,
- &new_crtc_state->mode,
- &new_crtc_state->adjusted_mode);
- if (!ret) {
- DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
- return -EINVAL;
+ ret = drm_atomic_bridge_chain_check(bridge,
+ new_crtc_state,
+ new_conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
+ return ret;
}
if (funcs && funcs->atomic_check) {
@@ -583,6 +583,7 @@ mode_valid(struct drm_atomic_state *state)
* &drm_crtc_state.connectors_changed is set when a connector is added or
* removed from the CRTC. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
+ * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
@@ -649,6 +650,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return -EINVAL;
}
+
+ if (drm_dev_has_vblank(dev))
+ new_crtc_state->no_vblank = false;
+ else
+ new_crtc_state->no_vblank = true;
}
ret = handle_conflicting_encoders(state, false);
@@ -730,6 +736,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return ret;
}
+ /*
+ * Iterate over all connectors again, and add all affected bridges to
+ * the state.
+ */
+ for_each_oldnew_connector_in_state(state, connector,
+ old_connector_state,
+ new_connector_state, i) {
+ struct drm_encoder *encoder;
+
+ encoder = old_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+
+ encoder = new_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+ }
+
ret = mode_valid(state);
if (ret)
return ret;
@@ -2215,7 +2241,9 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* when a job is queued, and any change to the pipeline that does not touch the
* connector is leading to timeouts when calling
* drm_atomic_helper_wait_for_vblanks() or
- * drm_atomic_helper_wait_for_flip_done().
+ * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
+ * connectors, this function can also fake VBLANK events for CRTCs without
+ * VBLANK interrupt.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -3508,3 +3536,44 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
+ * the input end of a bridge
+ * @bridge: bridge control structure
+ * @bridge_state: new bridge state
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ * @output_fmt: tested output bus format
+ * @num_input_fmts: will contain the size of the returned array
+ *
+ * This helper is a pluggable implementation of the
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
+ * modify the bus configuration between their input and their output. It
+ * returns an array of input formats with a single element set to @output_fmt.
+ *
+ * RETURNS:
+ * a valid format array of size @num_input_fmts, or NULL if the allocation
+ * failed
+ */
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 7cf3cf936547..8fce6a115dfe 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -551,3 +552,104 @@ void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj
memcpy(state, obj->state, sizeof(*state));
}
EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
+
+/**
+ * __drm_atomic_helper_bridge_duplicate_state() - Copy atomic bridge state
+ * @bridge: bridge object
+ * @state: atomic bridge state
+ *
+ * Copies atomic state from a bridge's current state and resets inferred values.
+ * This is useful for drivers that subclass the bridge state.
+ */
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ __drm_atomic_helper_private_obj_duplicate_state(&bridge->base,
+ &state->base);
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_duplicate_state() - Duplicate a bridge state object
+ * @bridge: bridge object
+ *
+ * Allocates a new bridge state and initializes it with the current bridge
+ * state values. This helper is meant to be used as a bridge
+ * &drm_bridge_funcs.atomic_duplicate_state hook for bridges that don't
+ * subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *new;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (new)
+ __drm_atomic_helper_bridge_duplicate_state(bridge, new);
+
+ return new;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_destroy_state() - Destroy a bridge state object
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to destroy
+ *
+ * Destroys a bridge state previously created by
+ * &drm_atomic_helper_bridge_reset() or
+ * &drm_atomic_helper_bridge_duplicate_state(). This helper is meant to be
+ * used as a bridge &drm_bridge_funcs.atomic_destroy_state hook for bridges
+ * that don't subclass the bridge state.
+ */
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_destroy_state);
+
+/**
+ * __drm_atomic_helper_bridge_reset() - Initialize a bridge state to its
+ * default
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to initialize
+ *
+ * Initializes the bridge state to default values. This is meant to be called
+ * by the bridge &drm_bridge_funcs.atomic_reset hook for bridges that subclass
+ * the bridge state.
+ */
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ memset(state, 0, sizeof(*state));
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset);
+
+/**
+ * drm_atomic_helper_bridge_reset() - Allocate and initialize a bridge state
+ * to its default
+ * @bridge: the bridge this state refers to
+ *
+ * Allocates the bridge state and initializes it to default values. This helper
+ * is meant to be used as a bridge &drm_bridge_funcs.atomic_reset hook for
+ * bridges that don't subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *bridge_state;
+
+ bridge_state = kzalloc(sizeof(*bridge_state), GFP_KERNEL);
+ if (!bridge_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_bridge_reset(bridge, bridge_state);
+ return bridge_state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_reset);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index cc9acd986c68..531b876d0ed8 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -153,11 +153,6 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
return -ENOMEM;
}
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, fpriv->master);
- if (ret)
- goto out_err;
- }
fpriv->is_master = 1;
fpriv->authenticated = 1;
@@ -332,9 +327,6 @@ static void drm_master_destroy(struct kref *kref)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c2cf0c90fa26..afdec8e5fc68 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
@@ -38,26 +39,56 @@
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge::
+ * either connected to it directly, or through a chain of bridges::
*
- * encoder ---> bridge B ---> bridge A
+ * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
*
- * Here, the output of the encoder feeds to bridge B, and that furthers feeds to
- * bridge A.
+ * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
+ * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
+ * Chaining multiple bridges to the output of a bridge, or the same bridge to
+ * the output of different bridges, is not supported.
*
- * The driver using the bridge is responsible to make the associations between
- * the encoder and bridges. Once these links are made, the bridges will
- * participate along with encoder functions to perform mode_set/enable/disable
- * through the ops provided in &drm_bridge_funcs.
+ * Display drivers are responsible for linking encoders with the first bridge
+ * in the chains. This is done by acquiring the appropriate bridge with
+ * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
+ * panel with drm_panel_bridge_add_typed() (or the managed version
+ * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
+ * attached to the encoder with a call to drm_bridge_attach().
*
- * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
+ * Bridges are responsible for linking themselves with the next bridge in the
+ * chain, if any. This is done the same way as for encoders, with the call to
+ * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
+ *
+ * Once these links are created, the bridges can participate along with encoder
+ * functions to perform mode validation and fixup (through
+ * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
+ * setting (through drm_bridge_chain_mode_set()), enable (through
+ * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
+ * and disable (through drm_atomic_bridge_chain_disable() and
+ * drm_atomic_bridge_chain_post_disable()). Those functions call the
+ * corresponding operations provided in &drm_bridge_funcs in sequence for all
+ * bridges in the chain.
+ *
+ * For display drivers that use the atomic helpers
+ * drm_atomic_helper_check_modeset(),
+ * drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
+ * commit check and commit tail handlers, or through the higher-level
+ * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
+ * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
+ * requires no intervention from the driver. For other drivers, the relevant
+ * DRM bridge chain functions shall be called manually.
+ *
+ * Bridges also participate in implementing the &drm_connector at the end of
+ * the bridge chain. Display drivers may use the drm_bridge_connector_init()
+ * helper to create the &drm_connector, or implement it manually on top of the
+ * connector-related operations exposed by the bridge (see the overview
+ * documentation of bridge operations for more details).
+ *
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
* CRTCs, encoders or connectors and hence are not visible to userspace. They
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
- *
- * Bridges can also be chained up using the &drm_bridge.chain_node field.
- *
- * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
static DEFINE_MUTEX(bridge_lock);
@@ -70,6 +101,8 @@ static LIST_HEAD(bridge_list);
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ mutex_init(&bridge->hpd_mutex);
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
@@ -86,15 +119,43 @@ void drm_bridge_remove(struct drm_bridge *bridge)
mutex_lock(&bridge_lock);
list_del_init(&bridge->list);
mutex_unlock(&bridge_lock);
+
+ mutex_destroy(&bridge->hpd_mutex);
}
EXPORT_SYMBOL(drm_bridge_remove);
+static struct drm_private_state *
+drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
+{
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_duplicate_state(bridge);
+ return state ? &state->base : NULL;
+}
+
+static void
+drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
+ struct drm_private_state *s)
+{
+ struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+
+ bridge->funcs->atomic_destroy_state(bridge, state);
+}
+
+static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
+ .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
+ .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
+};
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
* @encoder: DRM encoder
* @bridge: bridge to attach
* @previous: previous bridge in the chain (optional)
+ * @flags: DRM_BRIDGE_ATTACH_* flags
*
* Called by a kms driver to link the bridge to an encoder's chain. The previous
* argument specifies the previous bridge in the chain. If NULL, the bridge is
@@ -112,7 +173,8 @@ EXPORT_SYMBOL(drm_bridge_remove);
* Zero on success, error code on failure
*/
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
- struct drm_bridge *previous)
+ struct drm_bridge *previous,
+ enum drm_bridge_attach_flags flags)
{
int ret;
@@ -134,16 +196,36 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge);
- if (ret < 0) {
- list_del(&bridge->chain_node);
- bridge->dev = NULL;
- bridge->encoder = NULL;
- return ret;
+ ret = bridge->funcs->attach(bridge, flags);
+ if (ret < 0)
+ goto err_reset_bridge;
+ }
+
+ if (bridge->funcs->atomic_reset) {
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_reset(bridge);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ goto err_detach_bridge;
}
+
+ drm_atomic_private_obj_init(bridge->dev, &bridge->base,
+ &state->base,
+ &drm_bridge_priv_state_funcs);
}
return 0;
+
+err_detach_bridge:
+ if (bridge->funcs->detach)
+ bridge->funcs->detach(bridge);
+
+err_reset_bridge:
+ bridge->dev = NULL;
+ bridge->encoder = NULL;
+ list_del(&bridge->chain_node);
+ return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -155,6 +237,9 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
+ if (bridge->funcs->atomic_reset)
+ drm_atomic_private_obj_fini(&bridge->base);
+
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
@@ -163,14 +248,92 @@ void drm_bridge_detach(struct drm_bridge *bridge)
}
/**
- * DOC: bridge callbacks
+ * DOC: bridge operations
+ *
+ * Bridge drivers expose operations through the &drm_bridge_funcs structure.
+ * The DRM internals (atomic and CRTC helpers) use the helpers defined in
+ * drm_bridge.c to call bridge operations. Those operations are divided in
+ * three big categories to support different parts of the bridge usage.
+ *
+ * - The encoder-related operations support control of the bridges in the
+ * chain, and are roughly counterparts to the &drm_encoder_helper_funcs
+ * operations. They are used by the legacy CRTC and the atomic modeset
+ * helpers to perform mode validation, fixup and setting, and enable and
+ * disable the bridge automatically.
+ *
+ * The enable and disable operations are split in
+ * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
+ * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
+ * finer-grained control.
+ *
+ * Bridge drivers may implement the legacy version of those operations, or
+ * the atomic version (prefixed with atomic\_), in which case they shall also
+ * implement the atomic state bookkeeping operations
+ * (&drm_bridge_funcs.atomic_duplicate_state,
+ * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
+ * Mixing atomic and non-atomic versions of the operations is not supported.
+ *
+ * - The bus format negotiation operations
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
+ * negotiate the formats transmitted between bridges in the chain when
+ * multiple formats are supported. Negotiation for formats is performed
+ * transparently for display drivers by the atomic modeset helpers. Only
+ * atomic versions of those operations exist, bridge drivers that need to
+ * implement them shall thus also implement the atomic version of the
+ * encoder-related operations. This feature is not supported by the legacy
+ * CRTC helpers.
+ *
+ * - The connector-related operations support implementing a &drm_connector
+ * based on a chain of bridges. DRM bridges traditionally create a
+ * &drm_connector for bridges meant to be used at the end of the chain. This
+ * puts additional burden on bridge drivers, especially for bridges that may
+ * be used in the middle of a chain or at the end of it. Furthermore, it
+ * requires all operations of the &drm_connector to be handled by a single
+ * bridge, which doesn't always match the hardware architecture.
+ *
+ * To simplify bridge drivers and make the connector implementation more
+ * flexible, a new model allows bridges to unconditionally skip creation of
+ * &drm_connector and instead expose &drm_bridge_funcs operations to support
+ * an externally-implemented &drm_connector. Those operations are
+ * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
+ * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
+ * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
+ * implemented, display drivers shall create a &drm_connector instance for
+ * each chain of bridges, and implement those connector instances based on
+ * the bridge connector operations.
*
- * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
- * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
- * These helpers call a specific &drm_bridge_funcs op for all the bridges
- * during encoder configuration.
+ * Bridge drivers shall implement the connector-related operations for all
+ * the features that the bridge hardware support. For instance, if a bridge
+ * supports reading EDID, the &drm_bridge_funcs.get_edid shall be
+ * implemented. This however doesn't mean that the DDC lines are wired to the
+ * bridge on a particular platform, as they could also be connected to an I2C
+ * controller of the SoC. Support for the connector-related operations on the
+ * running platform is reported through the &drm_bridge.ops flags. Bridge
+ * drivers shall detect which operations they can support on the platform
+ * (usually this information is provided by ACPI or DT), and set the
+ * &drm_bridge.ops flags for all supported operations. A flag shall only be
+ * set if the corresponding &drm_bridge_funcs operation is implemented, but
+ * an implemented operation doesn't necessarily imply that the corresponding
+ * flag will be set. Display drivers shall use the &drm_bridge.ops flags to
+ * decide which bridge to delegate a connector operation to. This mechanism
+ * allows providing a single static const &drm_bridge_funcs instance in
+ * bridge drivers, improving security by storing function pointers in
+ * read-only memory.
*
- * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
+ * In order to ease transition, bridge drivers may support both the old and
+ * new models by making connector creation optional and implementing the
+ * connected-related bridge operations. Connector creation is then controlled
+ * by the flags argument to the drm_bridge_attach() function. Display drivers
+ * that support the new model and create connectors themselves shall set the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
+ * connector creation. For intermediate bridges in the chain, the flag shall
+ * be passed to the drm_bridge_attach() call for the downstream bridge.
+ * Bridge drivers that implement the new model only shall return an error
+ * from their &drm_bridge_funcs.attach handler when the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
+ * should use the new model, and convert the bridge drivers they use if
+ * needed, in order to gradually transition to the new model.
*/
/**
@@ -409,10 +572,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_disable)
- iter->funcs->atomic_disable(iter, old_state);
- else if (iter->funcs->disable)
+ if (iter->funcs->atomic_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_disable(iter, old_bridge_state);
+ } else if (iter->funcs->disable) {
iter->funcs->disable(iter);
+ }
if (iter == bridge)
break;
@@ -443,10 +615,20 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_post_disable)
- bridge->funcs->atomic_post_disable(bridge, old_state);
- else if (bridge->funcs->post_disable)
+ if (bridge->funcs->atomic_post_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_post_disable(bridge,
+ old_bridge_state);
+ } else if (bridge->funcs->post_disable) {
bridge->funcs->post_disable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
@@ -475,10 +657,19 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_pre_enable)
- iter->funcs->atomic_pre_enable(iter, old_state);
- else if (iter->funcs->pre_enable)
+ if (iter->funcs->atomic_pre_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_pre_enable(iter, old_bridge_state);
+ } else if (iter->funcs->pre_enable) {
iter->funcs->pre_enable(iter);
+ }
if (iter == bridge)
break;
@@ -508,14 +699,498 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_enable)
- bridge->funcs->atomic_enable(bridge, old_state);
- else if (bridge->funcs->enable)
+ if (bridge->funcs->atomic_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_enable(bridge, old_bridge_state);
+ } else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
+static int drm_atomic_bridge_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ if (bridge->funcs->atomic_check) {
+ struct drm_bridge_state *bridge_state;
+ int ret;
+
+ bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ bridge);
+ if (WARN_ON(!bridge_state))
+ return -EINVAL;
+
+ ret = bridge->funcs->atomic_check(bridge, bridge_state,
+ crtc_state, conn_state);
+ if (ret)
+ return ret;
+ } else if (bridge->funcs->mode_fixup) {
+ if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
+ &crtc_state->adjusted_mode))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
+ struct drm_bridge *cur_bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 out_bus_fmt)
+{
+ struct drm_bridge_state *cur_state;
+ unsigned int num_in_bus_fmts, i;
+ struct drm_bridge *prev_bridge;
+ u32 *in_bus_fmts;
+ int ret;
+
+ prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
+ cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ cur_bridge);
+
+ /*
+ * If bus format negotiation is not supported by this bridge, let's
+ * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
+ * hope that it can handle this situation gracefully (by providing
+ * appropriate default values).
+ */
+ if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
+ if (cur_bridge != first_bridge) {
+ ret = select_bus_fmt_recursive(first_bridge,
+ prev_bridge, crtc_state,
+ conn_state,
+ MEDIA_BUS_FMT_FIXED);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Driver does not implement the atomic state hooks, but that's
+ * fine, as long as it does not access the bridge state.
+ */
+ if (cur_state) {
+ cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ return 0;
+ }
+
+ /*
+ * If the driver implements ->atomic_get_input_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!cur_state))
+ return -EINVAL;
+
+ in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
+ cur_state,
+ crtc_state,
+ conn_state,
+ out_bus_fmt,
+ &num_in_bus_fmts);
+ if (!num_in_bus_fmts)
+ return -ENOTSUPP;
+ else if (!in_bus_fmts)
+ return -ENOMEM;
+
+ if (first_bridge == cur_bridge) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[0];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ kfree(in_bus_fmts);
+ return 0;
+ }
+
+ for (i = 0; i < num_in_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
+ crtc_state, conn_state,
+ in_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ if (!ret) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[i];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ kfree(in_bus_fmts);
+ return ret;
+}
+
+/*
+ * This function is called by &drm_atomic_bridge_chain_check() just before
+ * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
+ * It performs bus format negotiation between bridge elements. The negotiation
+ * happens in reverse order, starting from the last element in the chain up to
+ * @bridge.
+ *
+ * Negotiation starts by retrieving supported output bus formats on the last
+ * bridge element and testing them one by one. The test is recursive, meaning
+ * that for each tested output format, the whole chain will be walked backward,
+ * and each element will have to choose an input bus format that can be
+ * transcoded to the requested output format. When a bridge element does not
+ * support transcoding into a specific output format -ENOTSUPP is returned and
+ * the next bridge element will have to try a different format. If none of the
+ * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
+ *
+ * This implementation is relying on
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
+ * input/output formats.
+ *
+ * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
+ * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
+ * tries a single format: &drm_connector.display_info.bus_formats[0] if
+ * available, MEDIA_BUS_FMT_FIXED otherwise.
+ *
+ * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
+ * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
+ * bridge element that lacks this hook and asks the previous element in the
+ * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
+ * to do in that case (fail if they want to enforce bus format negotiation, or
+ * provide a reasonable default if they need to support pipelines where not
+ * all elements support bus format negotiation).
+ */
+static int
+drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_bridge_state *last_bridge_state;
+ unsigned int i, num_out_bus_fmts;
+ struct drm_bridge *last_bridge;
+ u32 *out_bus_fmts;
+ int ret = 0;
+
+ last_bridge = list_last_entry(&encoder->bridge_chain,
+ struct drm_bridge, chain_node);
+ last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ last_bridge);
+
+ if (last_bridge->funcs->atomic_get_output_bus_fmts) {
+ const struct drm_bridge_funcs *funcs = last_bridge->funcs;
+
+ /*
+ * If the driver implements ->atomic_get_output_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!last_bridge_state))
+ return -EINVAL;
+
+ out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
+ last_bridge_state,
+ crtc_state,
+ conn_state,
+ &num_out_bus_fmts);
+ if (!num_out_bus_fmts)
+ return -ENOTSUPP;
+ else if (!out_bus_fmts)
+ return -ENOMEM;
+ } else {
+ num_out_bus_fmts = 1;
+ out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
+ if (!out_bus_fmts)
+ return -ENOMEM;
+
+ if (conn->display_info.num_bus_formats &&
+ conn->display_info.bus_formats)
+ out_bus_fmts[0] = conn->display_info.bus_formats[0];
+ else
+ out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ }
+
+ for (i = 0; i < num_out_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
+ conn_state, out_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ kfree(out_bus_fmts);
+
+ return ret;
+}
+
+static void
+drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
+ struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_state *bridge_state, *next_bridge_state;
+ struct drm_bridge *next_bridge;
+ u32 output_flags = 0;
+
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+
+ /* No bridge state attached to this bridge => nothing to propagate. */
+ if (!bridge_state)
+ return;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+
+ /*
+ * Let's try to apply the most common case here, that is, propagate
+ * display_info flags for the last bridge, and propagate the input
+ * flags of the next bridge element to the output end of the current
+ * bridge when the bridge is not the last one.
+ * There are exceptions to this rule, like when signal inversion is
+ * happening at the board level, but that's something drivers can deal
+ * with from their &drm_bridge_funcs.atomic_check() implementation by
+ * simply overriding the flags value we've set here.
+ */
+ if (!next_bridge) {
+ output_flags = conn->display_info.bus_flags;
+ } else {
+ next_bridge_state = drm_atomic_get_new_bridge_state(state,
+ next_bridge);
+ /*
+ * No bridge state attached to the next bridge, just leave the
+ * flags to 0.
+ */
+ if (next_bridge_state)
+ output_flags = next_bridge_state->input_bus_cfg.flags;
+ }
+
+ bridge_state->output_bus_cfg.flags = output_flags;
+
+ /*
+ * Propage the output flags to the input end of the bridge. Again, it's
+ * not necessarily what all bridges want, but that's what most of them
+ * do, and by doing that by default we avoid forcing drivers to
+ * duplicate the "dummy propagation" logic.
+ */
+ bridge_state->input_bus_cfg.flags = output_flags;
+}
+
+/**
+ * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
+ * @bridge: bridge control structure
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ *
+ * First trigger a bus format negotiation before calling
+ * &drm_bridge_funcs.atomic_check() (falls back on
+ * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
+ * starting from the last bridge to the first. These are called before calling
+ * &drm_encoder_helper_funcs.atomic_check()
+ *
+ * RETURNS:
+ * 0 on success, a negative error code on failure
+ */
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+ int ret;
+
+ if (!bridge)
+ return 0;
+
+ ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ int ret;
+
+ /*
+ * Bus flags are propagated by default. If a bridge needs to
+ * tweak the input bus flags for any reason, it should happen
+ * in its &drm_bridge_funcs.atomic_check() implementation such
+ * that preceding bridges in the chain can propagate the new
+ * bus flags.
+ */
+ drm_atomic_bridge_propagate_bus_flags(iter, conn,
+ crtc_state->state);
+
+ ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ if (iter == bridge)
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
+
+/**
+ * drm_bridge_detect - check if anything is attached to the bridge output
+ * @bridge: bridge control structure
+ *
+ * If the bridge supports output detection, as reported by the
+ * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
+ * bridge and return the connection status. Otherwise return
+ * connector_status_unknown.
+ *
+ * RETURNS:
+ * The detection status on success, or connector_status_unknown if the bridge
+ * doesn't support output detection.
+ */
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
+ return connector_status_unknown;
+
+ return bridge->funcs->detect(bridge);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_detect);
+
+/**
+ * drm_bridge_get_modes - fill all modes currently valid for the sink into the
+ * @connector
+ * @bridge: bridge control structure
+ * @connector: the connector to fill with modes
+ *
+ * If the bridge supports output modes retrieval, as reported by the
+ * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
+ * fill the connector with all valid modes and return the number of modes
+ * added. Otherwise return 0.
+ *
+ * RETURNS:
+ * The number of modes added to the connector.
+ */
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
+ return 0;
+
+ return bridge->funcs->get_modes(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
+
+/**
+ * drm_bridge_get_edid - get the EDID data of the connected display
+ * @bridge: bridge control structure
+ * @connector: the connector to read EDID for
+ *
+ * If the bridge supports output EDID retrieval, as reported by the
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
+ * get the EDID and return it. Otherwise return ERR_PTR(-ENOTSUPP).
+ *
+ * RETURNS:
+ * The retrieved EDID on success, or an error pointer otherwise.
+ */
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
+ return ERR_PTR(-ENOTSUPP);
+
+ return bridge->funcs->get_edid(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
+
+/**
+ * drm_bridge_hpd_enable - enable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ * @cb: hot-plug detection callback
+ * @data: data to be passed to the hot-plug detection callback
+ *
+ * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
+ * and @data as hot plug notification callback. From now on the @cb will be
+ * called with @data when an output status change is detected by the bridge,
+ * until hot plug notification gets disabled with drm_bridge_hpd_disable().
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ *
+ * Only one hot plug detection callback can be registered at a time, it is an
+ * error to call this function when hot plug detection is already enabled for
+ * the bridge.
+ */
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+ void (*cb)(void *data,
+ enum drm_connector_status status),
+ void *data)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+
+ if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
+ goto unlock;
+
+ bridge->hpd_cb = cb;
+ bridge->hpd_data = data;
+
+ if (bridge->funcs->hpd_enable)
+ bridge->funcs->hpd_enable(bridge);
+
+unlock:
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
+
+/**
+ * drm_bridge_hpd_disable - disable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ *
+ * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
+ * plug detection callback previously registered with drm_bridge_hpd_enable().
+ * Once this function returns the callback will not be called by the bridge
+ * when an output status change occurs.
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ */
+void drm_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->funcs->hpd_disable)
+ bridge->funcs->hpd_disable(bridge);
+
+ bridge->hpd_cb = NULL;
+ bridge->hpd_data = NULL;
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
+
+/**
+ * drm_bridge_hpd_notify - notify hot plug detection events
+ * @bridge: bridge control structure
+ * @status: output connection status
+ *
+ * Bridge drivers shall call this function to report hot plug events when they
+ * detect a change in the output status, when hot plug detection has been
+ * enabled by drm_bridge_hpd_enable().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
+{
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->hpd_cb)
+ bridge->hpd_cb(bridge->hpd_data, status);
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
+
#ifdef CONFIG_OF
/**
* of_drm_find_bridge - find the bridge corresponding to the device node in
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
new file mode 100644
index 000000000000..c6994fe673f3
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The DRM bridge connector helper object provides a DRM connector
+ * implementation that wraps a chain of &struct drm_bridge. The connector
+ * operations are fully implemented based on the operations of the bridges in
+ * the chain, and don't require any intervention from the display controller
+ * driver at runtime.
+ *
+ * To use the helper, display controller drivers create a bridge connector with
+ * a call to drm_bridge_connector_init(). This associates the newly created
+ * connector with the chain of bridges passed to the function and registers it
+ * with the DRM device. At that point the connector becomes fully usable, no
+ * further operation is needed.
+ *
+ * The DRM bridge connector operations are implemented based on the operations
+ * provided by the bridges in the chain. Each connector operation is delegated
+ * to the bridge closest to the connector (at the end of the chain) that
+ * provides the relevant functionality.
+ *
+ * To make use of this helper, all bridges in the chain shall report bridge
+ * operation flags (&drm_bridge->ops) and bridge output type
+ * (&drm_bridge->type), as well as the DRM_BRIDGE_ATTACH_NO_CONNECTOR attach
+ * flag (none of the bridges shall create a DRM connector directly).
+ */
+
+/**
+ * struct drm_bridge_connector - A connector backed by a chain of bridges
+ */
+struct drm_bridge_connector {
+ /**
+ * @base: The base DRM connector
+ */
+ struct drm_connector base;
+ /**
+ * @encoder:
+ *
+ * The encoder at the start of the bridges chain.
+ */
+ struct drm_encoder *encoder;
+ /**
+ * @bridge_edid:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * EDID read support, if any (see &DRM_BRIDGE_OP_EDID).
+ */
+ struct drm_bridge *bridge_edid;
+ /**
+ * @bridge_hpd:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * hot-plug detection notification, if any (see &DRM_BRIDGE_OP_HPD).
+ */
+ struct drm_bridge *bridge_hpd;
+ /**
+ * @bridge_detect:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector detection, if any (see &DRM_BRIDGE_OP_DETECT).
+ */
+ struct drm_bridge *bridge_detect;
+ /**
+ * @bridge_modes:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector modes detection, if any (see &DRM_BRIDGE_OP_MODES).
+ */
+ struct drm_bridge *bridge_modes;
+};
+
+#define to_drm_bridge_connector(x) \
+ container_of(x, struct drm_bridge_connector, base)
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Hot-Plug Handling
+ */
+
+static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /* Notify all bridges in the pipeline of hotplug events. */
+ drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ if (bridge->funcs->hpd_notify)
+ bridge->funcs->hpd_notify(bridge, status);
+ }
+}
+
+static void drm_bridge_connector_hpd_cb(void *cb_data,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *drm_bridge_connector = cb_data;
+ struct drm_connector *connector = &drm_bridge_connector->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+
+ mutex_lock(&dev->mode_config.mutex);
+ old_status = connector->status;
+ connector->status = status;
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (old_status == status)
+ return;
+
+ drm_bridge_connector_hpd_notify(connector, status);
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+/**
+ * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
+ * @connector: The DRM bridge connector
+ *
+ * This function enables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their resume handler.
+ */
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
+ bridge_connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
+
+/**
+ * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
+ * connector
+ * @connector: The DRM bridge connector
+ *
+ * This function disables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their suspend handler.
+ */
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_disable(hpd);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Functions
+ */
+
+static enum drm_connector_status
+drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *detect = bridge_connector->bridge_detect;
+ enum drm_connector_status status;
+
+ if (detect) {
+ status = detect->funcs->detect(detect);
+
+ drm_bridge_connector_hpd_notify(connector, status);
+ } else {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
+ status = connector_status_connected;
+ break;
+ default:
+ status = connector_status_unknown;
+ break;
+ }
+ }
+
+ return status;
+}
+
+static void drm_bridge_connector_destroy(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hpd) {
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ drm_bridge_hpd_disable(hpd);
+ }
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(bridge_connector);
+}
+
+static const struct drm_connector_funcs drm_bridge_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = drm_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_bridge_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Helper Functions
+ */
+
+static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ enum drm_connector_status status;
+ struct edid *edid;
+ int n;
+
+ status = drm_bridge_connector_detect(connector, false);
+ if (status != connector_status_connected)
+ goto no_edid;
+
+ edid = bridge->funcs->get_edid(bridge, connector);
+ if (!edid || !drm_edid_is_valid(edid)) {
+ kfree(edid);
+ goto no_edid;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+ return n;
+
+no_edid:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
+static int drm_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /*
+ * If display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.
+ */
+ bridge = bridge_connector->bridge_edid;
+ if (bridge)
+ return drm_bridge_connector_get_modes_edid(connector, bridge);
+
+ /*
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
+ */
+ bridge = bridge_connector->bridge_modes;
+ if (bridge)
+ return bridge->funcs->get_modes(bridge, connector);
+
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
+ .get_modes = drm_bridge_connector_get_modes,
+ /* No need for .mode_valid(), the bridges are checked by the core. */
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Initialisation
+ */
+
+/**
+ * drm_bridge_connector_init - Initialise a connector for a chain of bridges
+ * @drm: the DRM device
+ * @encoder: the encoder where the bridge chain starts
+ *
+ * Allocate, initialise and register a &drm_bridge_connector with the @drm
+ * device. The connector is associated with a chain of bridges that starts at
+ * the @encoder. All bridges in the chain shall report bridge operation flags
+ * (&drm_bridge->ops) and bridge output type (&drm_bridge->type), and none of
+ * them may create a DRM connector directly.
+ *
+ * Returns a pointer to the new connector on success, or a negative error
+ * pointer otherwise.
+ */
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_connector *bridge_connector;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc = NULL;
+ struct drm_bridge *bridge;
+ int connector_type;
+
+ bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
+ if (!bridge_connector)
+ return ERR_PTR(-ENOMEM);
+
+ bridge_connector->encoder = encoder;
+
+ /*
+ * TODO: Handle doublescan_allowed, stereo_allowed and
+ * ycbcr_420_allowed.
+ */
+ connector = &bridge_connector->base;
+ connector->interlace_allowed = true;
+
+ /*
+ * Initialise connector status handling. First locate the furthest
+ * bridges in the pipeline that support HPD and output detection. Then
+ * initialise the connector polling mode, using HPD if available and
+ * falling back to polling if supported. If neither HPD nor output
+ * detection are available, we don't support hotplug detection at all.
+ */
+ connector_type = DRM_MODE_CONNECTOR_Unknown;
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->interlace_allowed)
+ connector->interlace_allowed = false;
+
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ bridge_connector->bridge_edid = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ bridge_connector->bridge_hpd = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ bridge_connector->bridge_detect = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ bridge_connector->bridge_modes = bridge;
+
+ if (!drm_bridge_get_next_bridge(bridge))
+ connector_type = bridge->type;
+
+ if (bridge->ddc)
+ ddc = bridge->ddc;
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
+ kfree(bridge_connector);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drm_connector_init_with_ddc(drm, connector, &drm_bridge_connector_funcs,
+ connector_type, ddc);
+ drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
+
+ if (bridge_connector->bridge_hpd)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (bridge_connector->bridge_detect)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return connector;
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8ce9d73fab4f..dcabf5698333 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -134,7 +134,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
shift, add);
}
-/**
+/*
* Core function to create a range of memory available for mapping by a
* non-root process.
*
@@ -149,7 +149,6 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
{
struct drm_local_map *map;
struct drm_map_list *list;
- drm_dma_handle_t *dmah;
unsigned long user_token;
int ret;
@@ -324,14 +323,14 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size);
- if (!dmah) {
+ map->handle = dma_alloc_coherent(&dev->pdev->dev,
+ map->size,
+ &map->offset,
+ GFP_KERNEL);
+ if (!map->handle) {
kfree(map);
return -ENOMEM;
}
- map->handle = dmah->vaddr;
- map->offset = (unsigned long)dmah->busaddr;
- kfree(dmah);
break;
default:
kfree(map);
@@ -399,7 +398,7 @@ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_legacy_findmap);
-/**
+/*
* Ioctl to specify a range of memory that is available for mapping by a
* non-root process.
*
@@ -500,7 +499,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*
@@ -513,7 +512,6 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
- drm_dma_handle_t dmah;
int found = 0;
struct drm_master *master;
@@ -554,10 +552,10 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -661,7 +659,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Cleanup after an error on one of the addbufs() functions.
*
* \param dev DRM device.
@@ -696,7 +694,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
}
#if IS_ENABLED(CONFIG_AGP)
-/**
+/*
* Add AGP buffers for DMA transfers.
*
* \param dev struct drm_device to which the buffers are to be added.
@@ -1232,7 +1230,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
return 0;
}
-/**
+/*
* Add buffers for DMA transfers (ioctl).
*
* \param inode device inode.
@@ -1273,7 +1271,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Get information about the buffer mappings.
*
* This was originally mean for debugging purposes, or by a sophisticated
@@ -1364,7 +1362,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
}
-/**
+/*
* Specifies a low and high water mark for buffer allocation
*
* \param inode device inode.
@@ -1413,7 +1411,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unreserve the buffers in list, previously reserved using drmDMA.
*
* \param inode device inode.
@@ -1465,7 +1463,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Maps all of the DMA buffers into client-virtual space (ioctl).
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index b031b45aa8ef..6b0c6ef8b9b3 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright 2018 Noralf Trønnes
*/
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 3035584f6dc7..7443114bd713 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -1095,15 +1095,17 @@ out:
}
/**
- * drm_client_modeset_commit_force() - Force commit CRTC configuration
+ * drm_client_modeset_commit_locked() - Force commit CRTC configuration
* @client: DRM client
*
- * Commit modeset configuration to crtcs without checking if there is a DRM master.
+ * Commit modeset configuration to crtcs without checking if there is a DRM
+ * master. The assumption is that the caller already holds an internal DRM
+ * master reference acquired with drm_master_internal_acquire().
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_modeset_commit_force(struct drm_client_dev *client)
+int drm_client_modeset_commit_locked(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
@@ -1117,7 +1119,7 @@ int drm_client_modeset_commit_force(struct drm_client_dev *client)
return ret;
}
-EXPORT_SYMBOL(drm_client_modeset_commit_force);
+EXPORT_SYMBOL(drm_client_modeset_commit_locked);
/**
* drm_client_modeset_commit() - Commit CRTC configuration
@@ -1136,7 +1138,7 @@ int drm_client_modeset_commit(struct drm_client_dev *client)
if (!drm_master_internal_acquire(dev))
return -EBUSY;
- ret = drm_client_modeset_commit_force(client);
+ ret = drm_client_modeset_commit_locked(client);
drm_master_internal_release(dev);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2166000ed057..644f0ad10671 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -112,6 +112,21 @@ void drm_connector_ida_destroy(void)
}
/**
+ * drm_get_connector_type_name - return a string for connector type
+ * @type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * Returns: the name of the connector type, or NULL if the type is not valid.
+ */
+const char *drm_get_connector_type_name(unsigned int type)
+{
+ if (type < ARRAY_SIZE(drm_connector_enum_list))
+ return drm_connector_enum_list[type].name;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_get_connector_type_name);
+
+/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
*
@@ -140,6 +155,13 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
connector->force = mode->force;
}
+ if (mode->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) {
+ DRM_INFO("cmdline forces connector %s panel_orientation to %d\n",
+ connector->name, mode->panel_orientation);
+ drm_connector_set_panel_orientation(connector,
+ mode->panel_orientation);
+ }
+
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
connector->name, mode->name,
mode->xres, mode->yres,
@@ -1139,7 +1161,8 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* coordinates, so if userspace rotates the picture to adjust for
* the orientation it must also apply the same transformation to the
* touchscreen input coordinates. This property is initialized by calling
- * drm_connector_init_panel_orientation_property().
+ * drm_connector_set_panel_orientation() or
+ * drm_connector_set_panel_orientation_with_quirk()
*
* scaling mode:
* This property defines how a non-native mode is upscaled to the native
@@ -2046,38 +2069,41 @@ void drm_connector_set_vrr_capable_property(
EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
/**
- * drm_connector_init_panel_orientation_property -
- * initialize the connecters panel_orientation property
- * @connector: connector for which to init the panel-orientation property.
- * @width: width in pixels of the panel, used for panel quirk detection
- * @height: height in pixels of the panel, used for panel quirk detection
+ * drm_connector_set_panel_orientation - sets the connecter's panel_orientation
+ * @connector: connector for which to set the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ *
+ * This function sets the connector's panel_orientation and attaches
+ * a "panel orientation" property to the connector.
*
- * This function should only be called for built-in panels, after setting
- * connector->display_info.panel_orientation first (if known).
+ * Calling this function on a connector where the panel_orientation has
+ * already been set is a no-op (e.g. the orientation has been overridden with
+ * a kernel commandline option).
*
- * This function will check for platform specific (e.g. DMI based) quirks
- * overriding display_info.panel_orientation first, then if panel_orientation
- * is not DRM_MODE_PANEL_ORIENTATION_UNKNOWN it will attach the
- * "panel orientation" property to the connector.
+ * It is allowed to call this function with a panel_orientation of
+ * DRM_MODE_PANEL_ORIENTATION_UNKNOWN, in which case it is a no-op.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_connector_init_panel_orientation_property(
- struct drm_connector *connector, int width, int height)
+int drm_connector_set_panel_orientation(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation)
{
struct drm_device *dev = connector->dev;
struct drm_display_info *info = &connector->display_info;
struct drm_property *prop;
- int orientation_quirk;
- orientation_quirk = drm_get_panel_orientation_quirk(width, height);
- if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- info->panel_orientation = orientation_quirk;
+ /* Already set? */
+ if (info->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return 0;
- if (info->panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ /* Don't attach the property if the orientation is unknown */
+ if (panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return 0;
+ info->panel_orientation = panel_orientation;
+
prop = dev->mode_config.panel_orientation_property;
if (!prop) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
@@ -2094,7 +2120,37 @@ int drm_connector_init_panel_orientation_property(
info->panel_orientation);
return 0;
}
-EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
+EXPORT_SYMBOL(drm_connector_set_panel_orientation);
+
+/**
+ * drm_connector_set_panel_orientation_with_quirk -
+ * set the connecter's panel_orientation after checking for quirks
+ * @connector: connector for which to init the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ * @width: width in pixels of the panel, used for panel quirk detection
+ * @height: height in pixels of the panel, used for panel quirk detection
+ *
+ * Like drm_connector_set_panel_orientation(), but with a check for platform
+ * specific (e.g. DMI based) quirks overriding the passed in panel_orientation.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_set_panel_orientation_with_quirk(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation,
+ int width, int height)
+{
+ int orientation_quirk;
+
+ orientation_quirk = drm_get_panel_orientation_quirk(width, height);
+ if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ panel_orientation = orientation_quirk;
+
+ return drm_connector_set_panel_orientation(connector,
+ panel_orientation);
+}
+EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 1f802d8e5681..c99be950bf17 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -47,7 +47,7 @@ struct drm_ctx_list {
/** \name Context bitmap support */
/*@{*/
-/**
+/*
* Free a handle from the context bitmap.
*
* \param dev DRM device.
@@ -68,7 +68,7 @@ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* Context bitmap allocation.
*
* \param dev DRM device.
@@ -88,7 +88,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
return ret;
}
-/**
+/*
* Context bitmap initialization.
*
* \param dev DRM device.
@@ -104,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
idr_init(&dev->ctx_idr);
}
-/**
+/*
* Context bitmap cleanup.
*
* \param dev DRM device.
@@ -163,7 +163,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
/** \name Per Context SAREA Support */
/*@{*/
-/**
+/*
* Get per-context SAREA.
*
* \param inode device inode.
@@ -211,7 +211,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Set per-context SAREA.
*
* \param inode device inode.
@@ -263,7 +263,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
/** \name The actual DRM context handling routines */
/*@{*/
-/**
+/*
* Switch context.
*
* \param dev DRM device.
@@ -290,7 +290,7 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
return 0;
}
-/**
+/*
* Complete context switch.
*
* \param dev DRM device.
@@ -318,7 +318,7 @@ static int drm_context_switch_complete(struct drm_device *dev,
return 0;
}
-/**
+/*
* Reserve contexts.
*
* \param inode device inode.
@@ -351,7 +351,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Add context.
*
* \param inode device inode.
@@ -404,7 +404,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Get context.
*
* \param inode device inode.
@@ -428,7 +428,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Switch context.
*
* \param inode device inode.
@@ -452,7 +452,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
-/**
+/*
* New context.
*
* \param inode device inode.
@@ -478,7 +478,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove context.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 93a4eec429e8..a4d36aca45ea 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -244,10 +244,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
/* Disable unused encoders */
if (encoder->crtc == NULL)
drm_encoder_disable(encoder);
- /* Disable encoders whose CRTC is about to change */
- if (encoder_funcs->get_crtc &&
- encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- drm_encoder_disable(encoder);
}
}
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c7d5e4c21423..16f2413403aa 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -216,6 +216,8 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index eab0f2687cd6..4e673d318503 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -182,8 +182,7 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
+ if (features && !drm_core_check_all_features(dev, features))
continue;
tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index e22b812c4b80..5d67a41f7c3a 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -372,7 +372,7 @@ void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
- debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ debugfs_create_file("control", S_IRUGO | S_IWUSR, crc_ent, crtc,
&drm_crtc_crc_control_fops);
debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
&drm_crtc_crc_data_fops);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index e45b07890c5a..a7add55a85b4 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -42,10 +42,10 @@
#include "drm_legacy.h"
/**
- * Initialize the DMA data.
+ * drm_legacy_dma_setup() - Initialize the DMA data.
*
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
+ * @dev: DRM device.
+ * Return: zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
@@ -71,9 +71,9 @@ int drm_legacy_dma_setup(struct drm_device *dev)
}
/**
- * Cleanup the DMA resources.
+ * drm_legacy_dma_takedown() - Cleanup the DMA resources.
*
- * \param dev DRM device.
+ * @dev: DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
@@ -120,10 +120,10 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
}
/**
- * Free a buffer.
+ * drm_legacy_free_buffer() - Free a buffer.
*
- * \param dev DRM device.
- * \param buf buffer to free.
+ * @dev: DRM device.
+ * @buf: buffer to free.
*
* Resets the fields of \p buf.
*/
@@ -139,9 +139,10 @@ void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
}
/**
- * Reclaim the buffers.
+ * drm_legacy_reclaim_buffers() - Reclaim the buffers.
*
- * \param file_priv DRM file private.
+ * @dev: DRM device.
+ * @file_priv: DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a5364b5192b8..c6fbe6e6bc9d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -362,6 +362,65 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
+ * drm_dp_send_real_edid_checksum() - send back real edid checksum value
+ * @aux: DisplayPort AUX channel
+ * @real_edid_checksum: real edid checksum for the last block
+ *
+ * Returns:
+ * True on success
+ */
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum)
+{
+ u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
+
+ if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+ auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
+
+ if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_TEST_REQUEST);
+ return false;
+ }
+ link_edid_read &= DP_TEST_LINK_EDID_READ;
+
+ if (!auto_test_req || !link_edid_read) {
+ DRM_DEBUG_KMS("Source DUT does not support TEST_EDID_READ\n");
+ return false;
+ }
+
+ if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+
+ /* send back checksum for the last edid extension block data */
+ if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
+ &real_edid_checksum, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_EDID_CHECKSUM);
+ return false;
+ }
+
+ test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
+ if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_RESPONSE);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
+
+/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -470,8 +529,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
- bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT;
+ bool branch_device = drm_dp_is_branch(dpcd);
seq_printf(m, "\tDP branch device present: %s\n",
branch_device ? "yes" : "no");
@@ -1222,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+struct edid_quirk {
+ u8 mfg_id[2];
+ u8 prod_id[2];
+ u32 quirks;
+};
+
+#define MFG(first, second) { (first), (second) }
+#define PROD_ID(first, second) { (first), (second) }
+
+/*
+ * Some devices have unreliable OUIDs where they don't set the device ID
+ * correctly, and as a result we need to use the EDID for finding additional
+ * DP quirks in such cases.
+ */
+static const struct edid_quirk edid_quirk_list[] = {
+ /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
+ * only supports DPCD backlight controls
+ */
+ { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ /*
+ * Some Dell CML 2020 systems have panels support both AUX and PWM
+ * backlight control, and some only support AUX backlight control. All
+ * said panels start up in AUX mode by default, and we don't have any
+ * support for disabling HDR mode on these panels which would be
+ * required to switch to PWM backlight control mode (plus, I'm not
+ * even sure we want PWM backlight controls over DPCD backlight
+ * controls anyway...). Until we have a better way of detecting these,
+ * force DPCD backlight mode on all of them.
+ */
+ { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+};
+
+#undef MFG
+#undef PROD_ID
+
+/**
+ * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
+ * DP-specific quirks
+ * @edid: The EDID to check
+ *
+ * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
+ * of manufacturers don't seem to like following standards and neglect to fill
+ * the dev-ID in, making it impossible to only use OUIDs for determining
+ * quirks in some cases. This function can be used to check the EDID and look
+ * up any additional DP quirks. The bits returned by this function correspond
+ * to the quirk bits in &drm_dp_quirk.
+ *
+ * Returns: a bitmask of quirks, if any. The driver can check this using
+ * drm_dp_has_quirk().
+ */
+u32 drm_dp_get_edid_quirks(const struct edid *edid)
+{
+ const struct edid_quirk *quirk;
+ u32 quirks = 0;
+ int i;
+
+ if (!edid)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+ if (memcmp(quirk->mfg_id, edid->mfg_id,
+ sizeof(edid->mfg_id)) == 0 &&
+ memcmp(quirk->prod_id, edid->prod_code,
+ sizeof(edid->prod_code)) == 0)
+ quirks |= quirk->quirks;
+ }
+
+ DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
+ (int)sizeof(edid->mfg_id), edid->mfg_id,
+ (int)sizeof(edid->prod_code), edid->prod_code, quirks);
+
+ return quirks;
+}
+EXPORT_SYMBOL(drm_dp_get_edid_quirks);
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ed0fea2ac322..9d89ebf3a749 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -736,6 +736,10 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+ if (crc4 != msg->chunk[msg->curchunk_len - 1])
+ print_hex_dump(KERN_DEBUG, "wrong crc",
+ DUMP_PREFIX_NONE, 16, 1,
+ msg->chunk, msg->curchunk_len, false);
/* copy chunk into bigger msg */
memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
msg->curlen += msg->curchunk_len - 1;
@@ -1035,7 +1039,8 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
}
}
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1045,17 +1050,14 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_write.num_bytes = num_bytes;
req.u.dpcd_write.bytes = bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_LINK_ADDRESS;
drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
@@ -1067,7 +1069,8 @@ static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
return 0;
}
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
+ int port_num)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1078,10 +1081,11 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por
return 0;
}
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
- u8 vcpi, uint16_t pbn,
- u8 number_sdp_streams,
- u8 *sdp_stream_sink)
+static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
+ int port_num,
+ u8 vcpi, uint16_t pbn,
+ u8 number_sdp_streams,
+ u8 *sdp_stream_sink)
{
struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req));
@@ -1094,11 +1098,10 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
number_sdp_streams);
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
-static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
- int port_num, bool power_up)
+static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+ int port_num, bool power_up)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1110,7 +1113,6 @@ static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
req.u.port_num.port_number = port_num;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
@@ -2061,7 +2063,7 @@ ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
* sideband messaging as drm_dp_dpcd_write() does for local
* devices via actual AUX CH.
*
- * Return: 0 on success, negative error code on failure.
+ * Return: number of bytes written on success, negative error code on failure.
*/
ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size)
@@ -2073,29 +2075,27 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
offset, size, buffer);
}
-static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
- int ret;
+ int ret = 0;
memcpy(mstb->guid, guid, 16);
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(
- mstb->mgr,
- mstb->port_parent,
- DP_GUID,
- 16,
- mstb->guid);
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
+ mstb->port_parent,
+ DP_GUID, 16, mstb->guid);
} else {
-
- ret = drm_dp_dpcd_write(
- mstb->mgr->aux,
- DP_GUID,
- mstb->guid,
- 16);
+ ret = drm_dp_dpcd_write(mstb->mgr->aux,
+ DP_GUID, mstb->guid, 16);
}
}
+
+ if (ret < 16 && ret > 0)
+ return -EPROTO;
+
+ return ret == 16 ? 0 : ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2178,7 +2178,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
drm_connector_set_tile_property(port->connector);
}
- mgr->cbs->register_connector(port->connector);
+ drm_connector_register(port->connector);
return;
error:
@@ -2641,7 +2641,8 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
return false;
}
-static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -2650,8 +2651,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_read.dpcd_address = offset;
req.u.dpcd_read.num_bytes = num_bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
@@ -2877,7 +2876,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_link_address_ack_reply *reply;
struct drm_dp_mst_port *port, *tmp;
- int i, len, ret, port_mask = 0;
+ int i, ret, port_mask = 0;
bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2885,7 +2884,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_link_address(txmsg);
+ build_link_address(txmsg);
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2906,7 +2905,15 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, reply->guid);
+ ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+ if (ret) {
+ char buf[64];
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
+ DRM_ERROR("GUID check on %s failed: %d\n",
+ buf, ret);
+ goto out;
+ }
for (i = 0; i < reply->nports; i++) {
port_mask |= BIT(reply->ports[i].port_number);
@@ -2947,14 +2954,14 @@ void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return;
txmsg->dst = mstb;
- len = build_clear_payload_id_table(txmsg);
+ build_clear_payload_id_table(txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2972,7 +2979,6 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
- int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2980,7 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_enum_path_resources(txmsg, port->port_num);
+ build_enum_path_resources(txmsg, port->port_num);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3073,7 +3079,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- int len, ret, port_num;
+ int ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
@@ -3098,9 +3104,9 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
sinks[i] = i;
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port_num,
- id,
- pbn, port->num_sdp_streams, sinks);
+ build_allocate_payload(txmsg, port_num,
+ id,
+ pbn, port->num_sdp_streams, sinks);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3129,7 +3135,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
@@ -3142,7 +3148,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
}
txmsg->dst = port->parent;
- len = build_power_updown_phy(txmsg, port->port_num, power_up);
+ build_power_updown_phy(txmsg, port->port_num, power_up);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
@@ -3364,7 +3370,6 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret = 0;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3379,7 +3384,7 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_read(txmsg, port->port_num, offset, size);
+ build_dpcd_read(txmsg, port->port_num, offset, size);
txmsg->dst = port->parent;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3417,7 +3422,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3432,7 +3436,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+ build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3442,8 +3446,9 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EIO;
else
- ret = 0;
+ ret = size;
}
+
kfree(txmsg);
fail_put:
drm_dp_mst_topology_put_mstb(mstb);
@@ -3504,9 +3509,9 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
- int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
+ mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -3514,6 +3519,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
+ struct drm_dp_payload reset_pay;
+
WARN_ON(mgr->mst_primary);
/* get dpcd info */
@@ -3543,17 +3550,15 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0)
goto out_unlock;
- }
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
queue_work(system_long_wq, &mgr->work);
@@ -3565,27 +3570,19 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- mutex_lock(&mgr->payload_lock);
- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ memset(mgr->payloads, 0,
+ mgr->max_payloads * sizeof(mgr->payloads[0]));
+ memset(mgr->proposed_vcpis, 0,
+ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
-
- if (vcpi) {
- vcpi->vcpi = 0;
- vcpi->num_slots = 0;
- }
- mgr->proposed_vcpis[i] = NULL;
- }
mgr->vcpi_mask = 0;
- mutex_unlock(&mgr->payload_lock);
-
mgr->payload_id_table_cleared = false;
}
out_unlock:
mutex_unlock(&mgr->lock);
+ mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return ret;
@@ -3682,7 +3679,12 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ if (ret) {
+ DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
+ goto out_fail;
+ }
/*
* For the final step of resuming the topology, we need to bring the
@@ -3709,7 +3711,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
- int replylen, origlen, curreply;
+ int replylen, curreply;
int ret;
struct drm_dp_sideband_msg_rx *msg;
int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
@@ -3729,7 +3731,6 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
- origlen = replylen;
replylen -= len;
curreply = len;
while (replylen > 0) {
@@ -4298,6 +4299,7 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
if (pos->vcpi) {
drm_dp_mst_put_port_malloc(port);
pos->vcpi = 0;
+ pos->pbn = 0;
}
return 0;
@@ -4625,15 +4627,34 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
int ret;
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+ if (ret) {
+ seq_printf(m, "dpcd read failed\n");
+ goto out;
+ }
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret) {
+ seq_printf(m, "faux/mst read failed\n");
+ goto out;
+ }
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret) {
+ seq_printf(m, "mst ctrl read failed\n");
+ goto out;
+ }
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret) {
+ seq_printf(m, "branch oui read failed\n");
+ goto out;
+ }
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
+
for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
@@ -4642,6 +4663,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
}
+out:
mutex_unlock(&mgr->lock);
}
@@ -4657,11 +4679,23 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
+static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+{
+ if (!port->connector)
+ return;
+
+ if (port->mgr->cbs->destroy_connector) {
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ } else {
+ drm_connector_unregister(port->connector);
+ drm_connector_put(port->connector);
+ }
+}
+
static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- if (port->connector)
- port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ drm_dp_destroy_connector(port);
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
@@ -5529,7 +5563,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, 0,
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7c18a980cd4b..7b1a628d1f6e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -946,7 +946,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
struct drm_driver *driver = dev->driver;
int ret;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
ret = drm_minor_register(dev, DRM_MINOR_RENDER);
if (ret)
@@ -986,7 +987,8 @@ err_minors:
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
out_unlock:
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_register);
@@ -1079,17 +1081,14 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- mutex_lock(&drm_global_mutex);
minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor)) {
- err = PTR_ERR(minor);
- goto out_unlock;
- }
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
new_fops = fops_get(minor->dev->driver->fops);
if (!new_fops) {
err = -ENODEV;
- goto out_release;
+ goto out;
}
replace_fops(filp, new_fops);
@@ -1098,10 +1097,9 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
else
err = 0;
-out_release:
+out:
drm_minor_release(minor);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 805fb004c8eb..d96e3ce3e535 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -191,10 +191,11 @@ static const struct edid_quirk {
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
- /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
/* Windows Mixed Reality Headsets */
{ "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
@@ -1590,11 +1591,22 @@ static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
{
int i;
- u8 csum = 0;
- for (i = 0; i < EDID_LENGTH; i++)
+ u8 csum = 0, crc = 0;
+
+ for (i = 0; i < EDID_LENGTH - 1; i++)
csum += raw_edid[i];
- return csum;
+ crc = 0x100 - csum;
+
+ return crc;
+}
+
+static bool drm_edid_block_checksum_diff(const u8 *raw_edid, u8 real_checksum)
+{
+ if (raw_edid[EDID_LENGTH - 1] != real_checksum)
+ return true;
+ else
+ return false;
}
static bool drm_edid_is_zero(const u8 *in_edid, int length)
@@ -1652,7 +1664,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
}
csum = drm_edid_block_checksum(raw_edid);
- if (csum) {
+ if (drm_edid_block_checksum_diff(raw_edid, csum)) {
if (edid_corrupt)
*edid_corrupt = true;
@@ -1793,6 +1805,11 @@ static void connector_bad_edid(struct drm_connector *connector,
u8 *edid, int num_blocks)
{
int i;
+ u8 num_of_ext = edid[0x7e];
+
+ /* Calculate real checksum for the last edid extension block data */
+ connector->real_edid_checksum =
+ drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
@@ -2196,15 +2213,29 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_find_dmt);
+static bool is_display_descriptor(const u8 d[18], u8 tag)
+{
+ return d[0] == 0x00 && d[1] == 0x00 &&
+ d[2] == 0x00 && d[3] == tag;
+}
+
+static bool is_detailed_timing_descriptor(const u8 d[18])
+{
+ return d[0] != 0x00 || d[1] != 0x00;
+}
+
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
- int i, n = 0;
+ int i, n;
u8 d = ext[0x02];
u8 *det_base = ext + d;
+ if (d < 4 || d > 127)
+ return;
+
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
@@ -2254,9 +2285,12 @@ static void
is_rb(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE)
- if (r[15] & 0x10)
- *(bool *)data = true;
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[15] & 0x10)
+ *(bool *)data = true;
}
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
@@ -2276,7 +2310,11 @@ static void
find_gtf2(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[10] == 0x02)
*(u8 **)data = r;
}
@@ -2815,13 +2853,13 @@ do_inferred_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct detailed_data_monitor_range *range = &data->data.range;
- if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
return;
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->edid,
timing);
-
+
if (!version_greater(closure->edid, 1, 1))
return; /* GTF not defined yet */
@@ -2894,10 +2932,11 @@ static void
do_established_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_EST_TIMINGS)
- closure->modes += drm_est3_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_EST_TIMINGS))
+ return;
+
+ closure->modes += drm_est3_modes(closure->connector, timing);
}
/**
@@ -2946,19 +2985,19 @@ do_standard_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
struct edid *edid = closure->edid;
+ int i;
- if (data->type == EDID_DETAIL_STD_MODES) {
- int i;
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_STD_MODES))
+ return;
- std = &data->data.timings[i];
- newmode = drm_mode_std(connector, edid, std);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- closure->modes++;
- }
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std = &data->data.timings[i];
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
}
}
}
@@ -3053,15 +3092,16 @@ static void
do_cvt_mode(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_CVT_3BYTE)
- closure->modes += drm_cvt_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_CVT_3BYTE))
+ return;
+
+ closure->modes += drm_cvt_modes(closure->connector, timing);
}
static int
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
-{
+{
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
@@ -3083,27 +3123,28 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(closure->connector->dev,
- closure->edid, timing,
- closure->quirks);
- if (!newmode)
- return;
+ if (!is_detailed_timing_descriptor((const u8 *)timing))
+ return;
- if (closure->preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
- /*
- * Detailed modes are limited to 10kHz pixel clock resolution,
- * so fix up anything that looks like CEA/HDMI mode, but the clock
- * is just slightly off.
- */
- fixup_detailed_cea_mode_clock(newmode);
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(closure->connector, newmode);
- closure->modes++;
- closure->preferred = false;
- }
+ /*
+ * Detailed modes are limited to 10kHz pixel clock resolution,
+ * so fix up anything that looks like CEA/HDMI mode, but the clock
+ * is just slightly off.
+ */
+ fixup_detailed_cea_mode_clock(newmode);
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = false;
}
/*
@@ -3953,6 +3994,13 @@ cea_db_tag(const u8 *db)
static int
cea_revision(const u8 *cea)
{
+ /*
+ * FIXME is this correct for the DispID variant?
+ * The DispID spec doesn't really specify whether
+ * this is the revision of the CEA extension or
+ * the DispID CEA data block. And the only value
+ * given as an example is 0.
+ */
return cea[1];
}
@@ -3977,6 +4025,10 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
* no non-DTD data.
*/
if (cea[0] == DATA_BLOCK_CTA) {
+ /*
+ * for_each_displayid_db() has already verified
+ * that these stay within expected bounds.
+ */
*start = 3;
*end = *start + cea[2];
} else if (cea[0] == CEA_EXT) {
@@ -4282,8 +4334,10 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
static void
monitor_name(struct detailed_timing *t, void *data)
{
- if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
- *(u8 **)data = t->data.other_data.data.str.str;
+ if (!is_display_descriptor((const u8 *)t, EDID_DETAIL_MONITOR_NAME))
+ return;
+
+ *(u8 **)data = t->data.other_data.data.str.str;
}
static int get_monitor_name(struct edid *edid, char name[13])
@@ -4316,7 +4370,7 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
{
int name_length;
char buf[13];
-
+
if (bufsize <= 0)
return;
@@ -4381,6 +4435,7 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
if (cea_revision(cea) >= 3) {
int i, start, end;
+ int sad_count;
if (cea_db_offsets(cea, &start, &end)) {
start = 0;
@@ -4392,8 +4447,6 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
- int sad_count;
-
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = min(dbl / 3, 15 - total_sad_count);
@@ -4594,6 +4647,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*
* Parse the CEA extension according to CEA-861-B.
*
+ * Drivers that have added the modes parsed from EDID to drm_display_info
+ * should use &drm_display_info.is_hdmi instead of calling this function.
+ *
* Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
@@ -4828,6 +4884,8 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
struct drm_display_info *info = &connector->display_info;
u8 len = cea_db_payload_len(db);
+ info->is_hdmi = true;
+
if (len >= 6)
info->dvi_dual = db[6] & 1;
if (len >= 7)
@@ -4880,6 +4938,47 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
+static
+void get_monitor_range(struct detailed_timing *timing,
+ void *info_monitor_range)
+{
+ struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ const struct detailed_non_pixel *data = &timing->data.other_data;
+ const struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
+ return;
+
+ monitor_range->min_vfreq = range->min_vfreq;
+ monitor_range->max_vfreq = range->max_vfreq;
+}
+
+static
+void drm_get_monitor_range(struct drm_connector *connector,
+ const struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ if (!version_greater(edid, 1, 1))
+ return;
+
+ drm_for_each_detailed_block((u8 *)edid, get_monitor_range,
+ &info->monitor_range);
+
+ DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
+ info->monitor_range.min_vfreq,
+ info->monitor_range.max_vfreq);
+}
+
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
@@ -4896,11 +4995,13 @@ drm_reset_display_info(struct drm_connector *connector)
info->cea_rev = 0;
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->is_hdmi = false;
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
info->non_desktop = 0;
+ memset(&info->monitor_range, 0, sizeof(info->monitor_range));
}
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
@@ -4916,6 +5017,8 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ drm_get_monitor_range(connector, edid);
+
DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
if (edid->revision < 3)
@@ -5009,7 +5112,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16));
+ (timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
@@ -5396,14 +5499,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
{
enum hdmi_picture_aspect picture_aspect;
u8 vic, hdmi_vic;
- int err;
if (!frame || !mode)
return -EINVAL;
- err = hdmi_avi_infoframe_init(frame);
- if (err < 0)
- return err;
+ hdmi_avi_infoframe_init(frame);
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4c7cbce7bae7..a9771de4d17e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -250,17 +250,7 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
- /*
- * TODO:
- * We should bail out here if there is a master by dropping _force.
- * Currently these igt tests fail if we do that:
- * - kms_fbcon_fbt@psr
- * - kms_fbcon_fbt@psr-suspend
- *
- * So first these tests need to be fixed so they drop master or don't
- * have an fd open.
- */
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit(&fb_helper->client);
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -294,7 +284,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
continue;
mutex_lock(&helper->lock);
- ret = drm_client_modeset_commit_force(&helper->client);
+ ret = drm_client_modeset_commit_locked(&helper->client);
if (ret)
error = true;
mutex_unlock(&helper->lock);
@@ -460,7 +450,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
- * @max_conn_count: max connector count (not used)
*
* This allocates the structures for the fbdev helper with the given limits.
* Note that this won't yet touch the hardware (through the driver interfaces)
@@ -473,8 +462,7 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* Zero if everything went ok, nonzero otherwise.
*/
int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- int max_conn_count)
+ struct drm_fb_helper *fb_helper)
{
int ret;
@@ -1357,7 +1345,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
pan_set(fb_helper, var->xoffset, var->yoffset);
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
@@ -2135,7 +2123,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, 0);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 92d16724f949..eb009d3ab48f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -48,9 +48,45 @@
#include "drm_internal.h"
#include "drm_legacy.h"
+#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#include <uapi/asm/mman.h>
+#include <drm/drm_vma_manager.h>
+#endif
+
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+bool drm_dev_needs_global_mutex(struct drm_device *dev)
+{
+ /*
+ * Legacy drivers rely on all kinds of BKL locking semantics, don't
+ * bother. They also still need BKL locking for their ioctls, so better
+ * safe than sorry.
+ */
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ return true;
+
+ /*
+ * The deprecated ->load callback must be called after the driver is
+ * already registered. This means such drivers rely on the BKL to make
+ * sure an open can't proceed until the driver is actually fully set up.
+ * Similar hilarity holds for the unload callback.
+ */
+ if (dev->driver->load || dev->driver->unload)
+ return true;
+
+ /*
+ * Drivers with the lastclose callback assume that it's synchronized
+ * against concurrent opens, which again needs the BKL. The proper fix
+ * is to use the drm_client infrastructure with proper locking for each
+ * client.
+ */
+ if (dev->driver->lastclose)
+ return true;
+
+ return false;
+}
+
/**
* DOC: file operations
*
@@ -220,7 +256,7 @@ void drm_file_free(struct drm_file *file)
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
(long)old_encode_dev(file->minor->kdev->devt),
- dev->open_count);
+ atomic_read(&dev->open_count));
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
dev->driver->preclose)
@@ -379,7 +415,10 @@ int drm_open(struct inode *inode, struct file *filp)
return PTR_ERR(minor);
dev = minor->dev;
- if (!dev->open_count++)
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
+
+ if (!atomic_fetch_inc(&dev->open_count))
need_setup = 1;
/* share address_space across all char-devs of a single device */
@@ -395,10 +434,16 @@ int drm_open(struct inode *inode, struct file *filp)
goto err_undo;
}
}
+
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
+
return 0;
err_undo:
- dev->open_count--;
+ atomic_dec(&dev->open_count);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
return retcode;
}
@@ -438,16 +483,18 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
- DRM_DEBUG("open_count = %d\n", dev->open_count);
+ DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
- if (!--dev->open_count)
+ if (atomic_dec_and_test(&dev->open_count))
drm_lastclose(dev);
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
@@ -456,6 +503,40 @@ int drm_release(struct inode *inode, struct file *filp)
EXPORT_SYMBOL(drm_release);
/**
+ * drm_release_noglobal - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
+ *
+ * This function may be used by drivers as their &file_operations.release
+ * method. It frees any resources associated with the open file prior to taking
+ * the drm_global_mutex, which then calls the &drm_driver.postclose driver
+ * callback. If this is the last open file for the DRM device also proceeds to
+ * call the &drm_driver.lastclose driver callback.
+ *
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
+ */
+int drm_release_noglobal(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
+
+ drm_close_helper(filp);
+
+ if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
+ drm_lastclose(dev);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ drm_minor_release(minor);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_release_noglobal);
+
+/**
* drm_read - read method for DRM file
* @filp: file pointer
* @buffer: userspace destination pointer for the read
@@ -796,3 +877,139 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
return file;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
+
+#ifdef CONFIG_MMU
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * drm_addr_inflate() attempts to construct an aligned area by inflating
+ * the area size and skipping the unaligned start of the area.
+ * adapted from shmem_get_unmapped_area()
+ */
+static unsigned long drm_addr_inflate(unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags,
+ unsigned long huge_size)
+{
+ unsigned long offset, inflated_len;
+ unsigned long inflated_addr;
+ unsigned long inflated_offset;
+
+ offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
+ if (offset && offset + len < 2 * huge_size)
+ return addr;
+ if ((addr & (huge_size - 1)) == offset)
+ return addr;
+
+ inflated_len = len + huge_size - PAGE_SIZE;
+ if (inflated_len > TASK_SIZE)
+ return addr;
+ if (inflated_len < len)
+ return addr;
+
+ inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
+ 0, flags);
+ if (IS_ERR_VALUE(inflated_addr))
+ return addr;
+ if (inflated_addr & ~PAGE_MASK)
+ return addr;
+
+ inflated_offset = inflated_addr & (huge_size - 1);
+ inflated_addr += offset - inflated_offset;
+ if (inflated_offset > offset)
+ inflated_addr += huge_size;
+
+ if (inflated_addr > TASK_SIZE - len)
+ return addr;
+
+ return inflated_addr;
+}
+
+/**
+ * drm_get_unmapped_area() - Get an unused user-space virtual memory area
+ * suitable for huge page table entries.
+ * @file: The struct file representing the address space being mmap()'d.
+ * @uaddr: Start address suggested by user-space.
+ * @len: Length of the area.
+ * @pgoff: The page offset into the address space.
+ * @flags: mmap flags
+ * @mgr: The address space manager used by the drm driver. This argument can
+ * probably be removed at some point when all drivers use the same
+ * address space manager.
+ *
+ * This function attempts to find an unused user-space virtual memory area
+ * that can accommodate the size we want to map, and that is properly
+ * aligned to facilitate huge page table entries matching actual
+ * huge pages or huge page aligned memory in buffer objects. Buffer objects
+ * are assumed to start at huge page boundary pfns (io memory) or be
+ * populated by huge pages aligned to the start of the buffer object
+ * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
+ *
+ * Return: aligned user-space address.
+ */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ unsigned long addr;
+ unsigned long inflated_addr;
+ struct drm_vma_offset_node *node;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /*
+ * @pgoff is the file page-offset the huge page boundaries of
+ * which typically aligns to physical address huge page boundaries.
+ * That's not true for DRM, however, where physical address huge
+ * page boundaries instead are aligned with the offset from
+ * buffer object start. So adjust @pgoff to be the offset from
+ * buffer object start.
+ */
+ drm_vma_offset_lock_lookup(mgr);
+ node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
+ if (node)
+ pgoff -= node->vm_node.start;
+ drm_vma_offset_unlock_lookup(mgr);
+
+ addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ if (addr & ~PAGE_MASK)
+ return addr;
+ if (addr > TASK_SIZE - len)
+ return addr;
+
+ if (len < HPAGE_PMD_SIZE)
+ return addr;
+ if (flags & MAP_FIXED)
+ return addr;
+ /*
+ * Our priority is to support MAP_SHARED mapped hugely;
+ * and support MAP_PRIVATE mapped hugely too, until it is COWed.
+ * But if caller specified an address hint, respect that as before.
+ */
+ if (uaddr)
+ return addr;
+
+ inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
+ HPAGE_PMD_SIZE);
+
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
+ len >= HPAGE_PUD_SIZE)
+ inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
+ flags, HPAGE_PUD_SIZE);
+ return inflated_addr;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
+#endif /* CONFIG_MMU */
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 0897cb9aeaff..3b818f2b2392 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright (C) 2016 Noralf Trønnes
*
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57564318ceea..57ac94ce9b9e 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -31,6 +31,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
@@ -548,7 +549,128 @@ int drm_mode_getfb(struct drm_device *dev,
out:
drm_framebuffer_put(fb);
+ return ret;
+}
+
+/**
+ * drm_mode_getfb2 - get extended FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ /* For multi-plane framebuffers, we require the driver to place the
+ * GEM objects directly in the drm_framebuffer. For single-plane
+ * framebuffers, we can fall back to create_handle.
+ */
+ if (!fb->obj[0] &&
+ (fb->format->num_planes > 1 || !fb->funcs->create_handle)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->pixel_format = fb->format->format;
+
+ r->flags = 0;
+ if (dev->mode_config.allow_fb_modifiers)
+ r->flags |= DRM_MODE_FB_MODIFIERS;
+
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ r->handles[i] = 0;
+ r->pitches[i] = 0;
+ r->offsets[i] = 0;
+ r->modifier[i] = 0;
+ }
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ r->pitches[i] = fb->pitches[i];
+ r->offsets[i] = fb->offsets[i];
+ if (dev->mode_config.allow_fb_modifiers)
+ r->modifier[i] = fb->modifier;
+ }
+
+ /* GET_FB2() is an unprivileged ioctl so we must not return a
+ * buffer-handle to non master/root processes! To match GET_FB()
+ * just return invalid handles (0) for non masters/root
+ * rather than making GET_FB2() privileged.
+ */
+ if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
+ ret = 0;
+ goto out;
+ }
+ for (i = 0; i < fb->format->num_planes; i++) {
+ int j;
+
+ /* If we reuse the same object for multiple planes, also
+ * return the same handle.
+ */
+ for (j = 0; j < i; j++) {
+ if (fb->obj[i] == fb->obj[j]) {
+ r->handles[i] = r->handles[j];
+ break;
+ }
+ }
+
+ if (r->handles[i])
+ continue;
+
+ if (fb->obj[i]) {
+ ret = drm_gem_handle_create(file_priv, fb->obj[i],
+ &r->handles[i]);
+ } else {
+ WARN_ON(i > 0);
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handles[i]);
+ }
+
+ if (ret != 0)
+ goto out;
+ }
+
+out:
+ if (ret != 0) {
+ /* Delete any previously-created handles on failure. */
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ int j;
+
+ if (r->handles[i])
+ drm_gem_handle_delete(file_priv, r->handles[i]);
+
+ /* Zero out any handles identical to the one we just
+ * deleted.
+ */
+ for (j = i + 1; j < ARRAY_SIZE(r->handles); j++) {
+ if (r->handles[j] == r->handles[i])
+ r->handles[j] = 0;
+ }
+ }
+ }
+
+ drm_framebuffer_put(fb);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a9e4a610445a..37627d06fb06 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -218,7 +218,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
bool final = false;
- if (WARN_ON(obj->handle_count == 0))
+ if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
return;
/*
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index a4863326061a..92a11bb42365 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1141,3 +1141,64 @@ void drm_vram_helper_release_mm(struct drm_device *dev)
dev->vram_mm = NULL;
}
EXPORT_SYMBOL(drm_vram_helper_release_mm);
+
+/*
+ * Mode-config helpers
+ */
+
+static enum drm_mode_status
+drm_vram_helper_mode_valid_internal(struct drm_device *dev,
+ const struct drm_display_mode *mode,
+ unsigned long max_bpp)
+{
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ unsigned long fbsize, fbpages, max_fbpages;
+
+ if (WARN_ON(!dev->vram_mm))
+ return MODE_BAD;
+
+ max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
+
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+/**
+ * drm_vram_helper_mode_valid - Tests if a display mode's
+ * framebuffer fits into the available video memory.
+ * @dev: the DRM device
+ * @mode: the mode to test
+ *
+ * This function tests if enough video memory is available for using the
+ * specified display mode. Atomic modesetting requires importing the
+ * designated framebuffer into video memory before evicting the active
+ * one. Hence, any framebuffer may consume at most half of the available
+ * VRAM. Display modes that require a larger framebuffer can not be used,
+ * even if the CRTC does support them. Each framebuffer is assumed to
+ * have 32-bit color depth.
+ *
+ * Note:
+ * The function can only test if the display mode is supported in
+ * general. If there are too many framebuffers pinned to video memory,
+ * a display mode may still not be usable in practice. The color depth of
+ * 32-bit fits all current use case. A more flexible test can be added
+ * when necessary.
+ *
+ * Returns:
+ * MODE_OK if the display mode is supported, or an error code of type
+ * enum drm_mode_status otherwise.
+ */
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+
+ return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
+}
+EXPORT_SYMBOL(drm_vram_helper_mode_valid);
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index 9191633a3c43..910108ccaae1 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -23,14 +23,6 @@
#include "drm_internal.h"
-static struct hdcp_srm {
- u32 revoked_ksv_cnt;
- u8 *revoked_ksv_list;
-
- /* Mutex to protect above struct member */
- struct mutex mutex;
-} *srm_data;
-
static inline void drm_hdcp_print_ksv(const u8 *ksv)
{
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
@@ -60,11 +52,11 @@ static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
return ksv_count;
}
-static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
+static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
u32 vrls_length)
{
- u32 parsed_bytes = 0, ksv_count = 0;
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
+ u32 parsed_bytes = 0, ksv_count = 0;
do {
vrl_ksv_cnt = *buf;
@@ -74,10 +66,10 @@ static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
vrl_ksv_cnt);
- memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
+ memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
+ buf, vrl_ksv_sz);
ksv_count += vrl_ksv_cnt;
- revoked_ksv_list += vrl_ksv_sz;
buf += vrl_ksv_sz;
parsed_bytes += (vrl_ksv_sz + 1);
@@ -91,7 +83,8 @@ static inline u32 get_vrl_length(const u8 *buf)
return drm_hdcp_be24_to_cpu(buf);
}
-static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
@@ -131,29 +124,28 @@ static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
- if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
+ if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
vrl_length) != ksv_count) {
- srm_data->revoked_ksv_cnt = 0;
- kfree(srm_data->revoked_ksv_list);
+ *revoked_ksv_cnt = 0;
+ kfree(*revoked_ksv_list);
return -EINVAL;
}
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
-static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
@@ -195,13 +187,11 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
@@ -210,10 +200,10 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
- memcpy(srm_data->revoked_ksv_list, buf, ksv_sz);
+ memcpy(*revoked_ksv_list, buf, ksv_sz);
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
static inline bool is_srm_version_hdcp1(const u8 *buf)
@@ -226,34 +216,45 @@ static inline bool is_srm_version_hdcp2(const u8 *buf)
return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
}
-static void drm_hdcp_srm_update(const u8 *buf, size_t count)
+static int drm_hdcp_srm_update(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
if (count < sizeof(struct hdcp_srm_header))
- return;
+ return -EINVAL;
if (is_srm_version_hdcp1(buf))
- drm_hdcp_parse_hdcp1_srm(buf, count);
+ return drm_hdcp_parse_hdcp1_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
else if (is_srm_version_hdcp2(buf))
- drm_hdcp_parse_hdcp2_srm(buf, count);
+ return drm_hdcp_parse_hdcp2_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
+ else
+ return -EINVAL;
}
-static void drm_hdcp_request_srm(struct drm_device *drm_dev)
+static int drm_hdcp_request_srm(struct drm_device *drm_dev,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
char fw_name[36] = "display_hdcp_srm.bin";
const struct firmware *fw;
-
int ret;
ret = request_firmware_direct(&fw, (const char *)fw_name,
drm_dev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ *revoked_ksv_cnt = 0;
+ *revoked_ksv_list = NULL;
+ ret = 0;
goto exit;
+ }
if (fw->size && fw->data)
- drm_hdcp_srm_update(fw->data, fw->size);
+ ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
+ revoked_ksv_cnt);
exit:
release_firmware(fw);
+ return ret;
}
/**
@@ -279,71 +280,36 @@ exit:
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
*
* Returns:
- * TRUE on any of the KSV is revoked, else FALSE.
+ * Count of the revoked KSVs or -ve error number incase of the failure.
*/
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
- u32 ksv_count)
+int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
+ u32 ksv_count)
{
- u32 rev_ksv_cnt, cnt, i, j;
- u8 *rev_ksv_list;
-
- if (!srm_data)
- return false;
-
- mutex_lock(&srm_data->mutex);
- drm_hdcp_request_srm(drm_dev);
-
- rev_ksv_cnt = srm_data->revoked_ksv_cnt;
- rev_ksv_list = srm_data->revoked_ksv_list;
-
- /* If the Revoked ksv list is empty */
- if (!rev_ksv_cnt || !rev_ksv_list) {
- mutex_unlock(&srm_data->mutex);
- return false;
- }
-
- for (cnt = 0; cnt < ksv_count; cnt++) {
- rev_ksv_list = srm_data->revoked_ksv_list;
- for (i = 0; i < rev_ksv_cnt; i++) {
- for (j = 0; j < DRM_HDCP_KSV_LEN; j++)
- if (ksvs[j] != rev_ksv_list[j]) {
- break;
- } else if (j == (DRM_HDCP_KSV_LEN - 1)) {
- DRM_DEBUG("Revoked KSV is ");
- drm_hdcp_print_ksv(ksvs);
- mutex_unlock(&srm_data->mutex);
- return true;
- }
- /* Move the offset to next KSV in the revoked list */
- rev_ksv_list += DRM_HDCP_KSV_LEN;
- }
-
- /* Iterate to next ksv_offset */
- ksvs += DRM_HDCP_KSV_LEN;
- }
- mutex_unlock(&srm_data->mutex);
- return false;
+ u32 revoked_ksv_cnt = 0, i, j;
+ u8 *revoked_ksv_list = NULL;
+ int ret = 0;
+
+ ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
+ &revoked_ksv_cnt);
+ if (ret)
+ return ret;
+
+ /* revoked_ksv_cnt will be zero when above function failed */
+ for (i = 0; i < revoked_ksv_cnt; i++)
+ for (j = 0; j < ksv_count; j++)
+ if (!memcmp(&ksvs[j * DRM_HDCP_KSV_LEN],
+ &revoked_ksv_list[i * DRM_HDCP_KSV_LEN],
+ DRM_HDCP_KSV_LEN)) {
+ DRM_DEBUG("Revoked KSV is ");
+ drm_hdcp_print_ksv(&ksvs[j * DRM_HDCP_KSV_LEN]);
+ ret++;
+ }
+
+ kfree(revoked_ksv_list);
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
-int drm_setup_hdcp_srm(struct class *drm_class)
-{
- srm_data = kzalloc(sizeof(*srm_data), GFP_KERNEL);
- if (!srm_data)
- return -ENOMEM;
- mutex_init(&srm_data->mutex);
-
- return 0;
-}
-
-void drm_teardown_hdcp_srm(struct class *drm_class)
-{
- if (srm_data) {
- kfree(srm_data->revoked_ksv_list);
- kfree(srm_data);
- }
-}
-
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 6937bf923f05..5714a78365ac 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -41,6 +41,7 @@ struct drm_printer;
/* drm_file.c */
extern struct mutex drm_global_mutex;
+bool drm_dev_needs_global_mutex(struct drm_device *dev);
struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
@@ -235,7 +236,3 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
int drm_framebuffer_debugfs_init(struct drm_minor *minor);
-
-/* drm_hdcp.c */
-int drm_setup_hdcp_srm(struct class *drm_class);
-void drm_teardown_hdcp_srm(struct class *drm_class);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 5afb39688b55..9e41972c4bbc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -671,6 +671,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB2, drm_mode_getfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03bce566a8c3..588be45abd7a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -111,10 +111,6 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (irq == 0)
return -EINVAL;
- /* Driver must have been initialized */
- if (!dev->dev_private)
- return -EINVAL;
-
if (dev->irq_enabled)
return -EBUSY;
dev->irq_enabled = true;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 2c79e8199e3c..f16eefbf2829 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -46,7 +46,7 @@
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-/**
+/*
* Take the heavyweight lock.
*
* \param lock lock pointer.
@@ -93,7 +93,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
*
@@ -150,7 +150,7 @@ static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* Lock ioctl.
*
* \param inode device inode.
@@ -243,7 +243,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unlock ioctl.
*
* \param inode device inode.
@@ -275,7 +275,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
return 0;
}
-/**
+/*
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
@@ -287,7 +287,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
-
void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 16bff1be4b8a..558baf989f5a 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -24,7 +24,6 @@
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -238,6 +237,23 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(mipi_dbi_buf_copy);
+static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
+ unsigned int xs, unsigned int xe,
+ unsigned int ys, unsigned int ye)
+{
+ struct mipi_dbi *dbi = &dbidev->dbi;
+
+ xs += dbidev->left_offset;
+ xe += dbidev->left_offset;
+ ys += dbidev->top_offset;
+ ye += dbidev->top_offset;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, (xs >> 8) & 0xff,
+ xs & 0xff, (xe >> 8) & 0xff, xe & 0xff);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, (ys >> 8) & 0xff,
+ ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
+}
+
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
@@ -271,12 +287,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
tr = cma_obj->vaddr;
}
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
- (rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
- ((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
- (rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
- ((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
+ mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
+ rect->y2 - 1);
ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
width * height * 2);
@@ -299,18 +311,10 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
mipi_dbi_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
@@ -366,10 +370,7 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
memset(dbidev->tx_buf, 0, len);
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
- ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
- ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
+ mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)dbidev->tx_buf, len);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2a6e34663146..8981abe8b7c9 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -399,10 +399,10 @@ next_hole(struct drm_mm *mm,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
u64 adj_start, adj_end;
+ u64 end;
end = node->start + node->size;
if (unlikely(end <= node->start))
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f2e43d341980..81aa21561982 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -51,8 +51,6 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
- unsigned long addr;
- size_t sz;
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -68,47 +66,17 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
&dmah->busaddr,
- GFP_KERNEL | __GFP_COMP);
+ GFP_KERNEL);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page((void *)addr));
- }
-
return dmah;
}
-
EXPORT_SYMBOL(drm_pci_alloc);
-/*
- * Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- unsigned long addr;
- size_t sz;
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page((void *)addr));
- }
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- }
-}
-
/**
* drm_pci_free - Free a PCI consistent memory block
* @dev: DRM device
@@ -119,7 +87,8 @@ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- __drm_legacy_pci_free(dev, dmah);
+ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dmah->busaddr);
kfree(dmah);
}
@@ -197,6 +166,18 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return drm_pci_irq_by_busid(dev, p);
}
+void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (dev->agp) {
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_legacy_agp_clear(dev);
+ kfree(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
+#ifdef CONFIG_DRM_LEGACY
+
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@@ -211,33 +192,9 @@ static void drm_pci_agp_init(struct drm_device *dev)
}
}
-void drm_pci_agp_destroy(struct drm_device *dev)
-{
- if (dev->agp) {
- arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_legacy_agp_clear(dev);
- kfree(dev->agp);
- dev->agp = NULL;
- }
-}
-
-/**
- * drm_get_pci_dev - Register a PCI device with the DRM subsystem
- * @pdev: PCI device
- * @ent: entry from the PCI ID table that matches @pdev
- * @driver: DRM device driver
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- *
- * NOTE: This function is deprecated, please use drm_dev_alloc() and
- * drm_dev_register() instead and remove your &drm_driver.load callback.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
+static int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -280,9 +237,6 @@ err_free:
drm_dev_put(dev);
return ret;
}
-EXPORT_SYMBOL(drm_get_pci_dev);
-
-#ifdef CONFIG_DRM_LEGACY
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1de2cde2277c..282774e469ac 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -962,27 +962,40 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
unsigned count;
struct scatterlist *sg;
struct page *page;
- u32 len, index;
+ u32 page_len, page_index;
dma_addr_t addr;
+ u32 dma_len, dma_index;
- index = 0;
+ /*
+ * Scatterlist elements contains both pages and DMA addresses, but
+ * one shoud not assume 1:1 relation between them. The sg->length is
+ * the size of the physical memory chunk described by the sg->page,
+ * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
+ * described by the sg_dma_address(sg).
+ */
+ page_index = 0;
+ dma_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
- len = sg_dma_len(sg);
+ page_len = sg->length;
page = sg_page(sg);
+ dma_len = sg_dma_len(sg);
addr = sg_dma_address(sg);
- while (len > 0) {
- if (WARN_ON(index >= max_entries))
+ while (pages && page_len > 0) {
+ if (WARN_ON(page_index >= max_entries))
return -1;
- if (pages)
- pages[index] = page;
- if (addrs)
- addrs[index] = addr;
-
+ pages[page_index] = page;
page++;
+ page_len -= PAGE_SIZE;
+ page_index++;
+ }
+ while (addrs && dma_len > 0) {
+ if (WARN_ON(dma_index >= max_entries))
+ return -1;
+ addrs[dma_index] = addr;
addr += PAGE_SIZE;
- len -= PAGE_SIZE;
- index++;
+ dma_len -= PAGE_SIZE;
+ dma_index++;
}
}
return 0;
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d5c386154246..ca520028b2cb 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -99,6 +99,9 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
+ if (request->size > SIZE_MAX - PAGE_SIZE)
+ return -EINVAL;
+
if (dev->sg)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 15fb516ae2d8..74946690aba4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -26,12 +26,51 @@
* entity. Some flexibility for code reuse is provided through a separately
* allocated &drm_connector object and supporting optional &drm_bridge
* encoder drivers.
+ *
+ * Many drivers require only a very simple encoder that fulfills the minimum
+ * requirements of the display pipeline and does not add additional
+ * functionality. The function drm_simple_encoder_init() provides an
+ * implementation of such an encoder.
*/
-static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
.destroy = drm_encoder_cleanup,
};
+/**
+ * drm_simple_encoder_init - Initialize a preallocated encoder with
+ * basic functionality.
+ * @dev: drm device
+ * @encoder: the encoder to initialize
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * The encoder will be cleaned up automatically as part of the mode-setting
+ * cleanup.
+ *
+ * The caller of drm_simple_encoder_init() is responsible for freeing
+ * the encoder's memory after the encoder has been cleaned up. At the
+ * moment this only works reliably if the encoder data structure is
+ * stored in the device structure. Free the encoder's memory as part of
+ * the device release function.
+ *
+ * FIXME: Later improvements to DRM's resource management may allow for
+ * an automated kfree() of the encoder's memory.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_simple_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ int encoder_type)
+{
+ return drm_encoder_init(dev, encoder,
+ &drm_simple_encoder_funcs_cleanup,
+ encoder_type, NULL);
+}
+EXPORT_SYMBOL(drm_simple_encoder_init);
+
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -229,7 +268,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
struct drm_bridge *bridge)
{
- return drm_bridge_attach(&pipe->encoder, bridge, NULL);
+ return drm_bridge_attach(&pipe->encoder, bridge, NULL, 0);
}
EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
@@ -288,8 +327,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret || !connector)
return ret;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 669c93fe2500..42d46414f767 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -43,27 +43,66 @@
* - Signal a syncobj (set a trivially signaled fence)
* - Wait for a syncobj's fence to appear and be signaled
*
+ * The syncobj userspace API also provides operations to manipulate a syncobj
+ * in terms of a timeline of struct &dma_fence_chain rather than a single
+ * struct &dma_fence, through the following operations:
+ *
+ * - Signal a given point on the timeline
+ * - Wait for a given point to appear and/or be signaled
+ * - Import and export from/to a given point of a timeline
+ *
* At it's core, a syncobj is simply a wrapper around a pointer to a struct
* &dma_fence which may be NULL.
* When a syncobj is first created, its pointer is either NULL or a pointer
* to an already signaled fence depending on whether the
* &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
* &DRM_IOCTL_SYNCOBJ_CREATE.
- * When GPU work which signals a syncobj is enqueued in a DRM driver,
- * the syncobj fence is replaced with a fence which will be signaled by the
- * completion of that work.
- * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
- * driver retrieves syncobj's current fence at the time the work is enqueued
- * waits on that fence before submitting the work to hardware.
- * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
- * All manipulation of the syncobjs's fence happens in terms of the current
- * fence at the time the ioctl is called by userspace regardless of whether
- * that operation is an immediate host-side operation (signal or reset) or
- * or an operation which is enqueued in some driver queue.
- * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
- * manipulate a syncobj from the host by resetting its pointer to NULL or
+ *
+ * If the syncobj is considered as a binary (its state is either signaled or
+ * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
+ * the syncobj, the syncobj's fence is replaced with a fence which will be
+ * signaled by the completion of that work.
+ * If the syncobj is considered as a timeline primitive, when GPU work is
+ * enqueued in a DRM driver to signal the a given point of the syncobj, a new
+ * struct &dma_fence_chain pointing to the DRM driver's fence and also
+ * pointing to the previous fence that was in the syncobj. The new struct
+ * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
+ * completion of the DRM driver's work and also any work associated with the
+ * fence previously in the syncobj.
+ *
+ * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
+ * time the work is enqueued, it waits on the syncobj's fence before
+ * submitting the work to hardware. That fence is either :
+ *
+ * - The syncobj's current fence if the syncobj is considered as a binary
+ * primitive.
+ * - The struct &dma_fence associated with a given point if the syncobj is
+ * considered as a timeline primitive.
+ *
+ * If the syncobj's fence is NULL or not present in the syncobj's timeline,
+ * the enqueue operation is expected to fail.
+ *
+ * With binary syncobj, all manipulation of the syncobjs's fence happens in
+ * terms of the current fence at the time the ioctl is called by userspace
+ * regardless of whether that operation is an immediate host-side operation
+ * (signal or reset) or or an operation which is enqueued in some driver
+ * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
+ * to manipulate a syncobj from the host by resetting its pointer to NULL or
* setting its pointer to a fence which is already signaled.
*
+ * With a timeline syncobj, all manipulation of the synobj's fence happens in
+ * terms of a u64 value referring to point in the timeline. See
+ * dma_fence_chain_find_seqno() to see how a given point is found in the
+ * timeline.
+ *
+ * Note that applications should be careful to always use timeline set of
+ * ioctl() when dealing with syncobj considered as timeline. Using a binary
+ * set of ioctl() with a syncobj considered as timeline could result incorrect
+ * synchronization. The use of binary syncobj is supported through the
+ * timeline set of ioctl() by using a point value of 0, this will reproduce
+ * the behavior of the binary set of ioctl() (for example replace the
+ * syncobj's fence when signaling).
+ *
*
* Host-side wait on syncobjs
* --------------------------
@@ -87,6 +126,16 @@
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
+ * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
+ * handles as well as an array of u64 points and does a host-side wait on all
+ * of syncobj fences at the given points simultaneously.
+ *
+ * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
+ * fence to materialize on the timeline without waiting for the fence to be
+ * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
+ * requirement is inherited from the wait-before-signal behavior required by
+ * the Vulkan timeline semaphore API.
+ *
*
* Import/export of syncobjs
* -------------------------
@@ -120,6 +169,18 @@
* Because sync files are immutable, resetting or signaling the syncobj
* will not affect any sync files whose fences have been imported into the
* syncobj.
+ *
+ *
+ * Import/export of timeline points in timeline syncobjs
+ * -----------------------------------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
+ * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
+ * into another syncobj.
+ *
+ * Note that if you want to transfer a struct &dma_fence_chain from a given
+ * point on a timeline syncobj from/into a binary syncobj, you can use the
+ * point 0 to mean take/replace the fence in the syncobj.
*/
#include <linux/anon_inodes.h>
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index dd2bc85f43cc..939f0032aab1 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -85,7 +85,6 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
- drm_setup_hdcp_srm(drm_class);
return 0;
}
@@ -98,7 +97,6 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
- drm_teardown_hdcp_srm(drm_class);
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
@@ -230,7 +228,7 @@ static ssize_t modes_show(struct device *device,
mutex_lock(&connector->dev->mode_config.mutex);
list_for_each_entry(mode, &connector->modes, head) {
- written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+ written += scnprintf(buf + written, PAGE_SIZE - written, "%s\n",
mode->name);
}
mutex_unlock(&connector->dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 1659b13b178c..da7b0b0c1090 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -69,6 +70,12 @@
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
+ *
+ * Drivers for hardware without support for vertical-blanking interrupts
+ * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * automatically generate fake vblank events as part of the display update.
+ * This functionality also can be controlled by the driver by enabling and
+ * disabling struct drm_crtc_state.no_vblank.
*/
/* Retry timestamp calculation up to 3 times to satisfy
@@ -137,10 +144,9 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
- }
-
- if (dev->driver->get_vblank_counter)
+ } else if (dev->driver->get_vblank_counter) {
return dev->driver->get_vblank_counter(dev, pipe);
+ }
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -332,7 +338,8 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) &&
+ !crtc->funcs->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -354,13 +361,11 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (WARN_ON(!crtc))
return;
- if (crtc->funcs->disable_vblank) {
+ if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
- return;
- }
+ } else {
+ dev->driver->disable_vblank(dev, pipe);
}
-
- dev->driver->disable_vblank(dev, pipe);
}
/*
@@ -480,19 +485,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
- /* Driver specific high-precision vblank timestamping supported? */
- if (dev->driver->get_vblank_timestamp)
- DRM_INFO("Driver supports precise vblank timestamp query.\n");
- else
- DRM_INFO("No driver support for vblank timestamp query.\n");
-
- /* Must have precise timestamping for reliable vblank instant disable */
- if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
- dev->vblank_disable_immediate = false;
- DRM_INFO("Setting vblank_disable_immediate to false because "
- "get_vblank_timestamp == NULL\n");
- }
-
return 0;
err:
@@ -502,6 +494,28 @@ err:
EXPORT_SYMBOL(drm_vblank_init);
/**
+ * drm_dev_has_vblank - test if vblanking has been initialized for
+ * a device
+ * @dev: the device
+ *
+ * Drivers may call this function to test if vblank support is
+ * initialized for a device. For most hardware this means that vblanking
+ * can also be enabled.
+ *
+ * Atomic helpers use this function to initialize
+ * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset().
+ *
+ * Returns:
+ * True if vblanking has been initialized for the given device, false
+ * otherwise.
+ */
+bool drm_dev_has_vblank(const struct drm_device *dev)
+{
+ return dev->num_crtcs != 0;
+}
+EXPORT_SYMBOL(drm_dev_has_vblank);
+
+/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
@@ -523,9 +537,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
*
* Calculate and store various constants which are later needed by vblank and
* swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true
- * scanout timing, so they take things like panel scaling or other adjustments
- * into account.
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from
+ * CRTC's true scanout timing, so they take things like panel scaling or
+ * other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -576,9 +590,9 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
- * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank
+ * timestamp helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to time which should receive the timestamp
@@ -586,11 +600,12 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
* if flag is set.
+ * @get_scanout_position:
+ * Callback function to retrieve the scanout position. See
+ * @struct drm_crtc_helper_funcs.get_scanout_position.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be directly
- * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver
- * if &drm_driver.get_scanout_position is implemented.
+ * timings and current video scanout position of a CRTC.
*
* The current implementation only handles standard video modes. For double scan
* and interlaced modes the driver is supposed to adjust the hardware mode
@@ -606,34 +621,30 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe,
- int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq)
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq,
+ drm_vblank_get_scanout_position_func get_scanout_position)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
- struct drm_crtc *crtc;
const struct drm_display_mode *mode;
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int vpos, hpos, i;
int delta_ns, duration_ns;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return false;
-
- crtc = drm_crtc_from_index(dev, pipe);
-
- if (pipe >= dev->num_crtcs || !crtc) {
+ if (pipe >= dev->num_crtcs) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return false;
}
/* Scanout position query not supported? Should not happen. */
- if (!dev->driver->get_scanout_position) {
- DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ if (!get_scanout_position) {
+ DRM_ERROR("Called from CRTC w/o get_scanout_position()!?\n");
return false;
}
@@ -648,7 +659,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
if (mode->crtc_clock == 0) {
DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
-
return false;
}
@@ -664,11 +674,10 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, pipe,
- in_vblank_irq,
- &vpos, &hpos,
- &stime, &etime,
- mode);
+ vbl_status = get_scanout_position(crtc, in_vblank_irq,
+ &vpos, &hpos,
+ &stime, &etime,
+ mode);
/* Return as no-op if scanout query unsupported or failed. */
if (!vbl_status) {
@@ -720,7 +729,49 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
return true;
}
-EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp
+ * helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to time which should receive the timestamp
+ * @in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be directly
+ * used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms
+ * driver if &drm_crtc_helper_funcs.get_scanout_position is implemented.
+ *
+ * The current implementation only handles standard video modes. For double scan
+ * and interlaced modes the driver is supposed to adjust the hardware mode
+ * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
+ * match the scanout position reported.
+ *
+ * Note that atomic drivers must call drm_calc_timestamping_constants() before
+ * enabling a CRTC. The atomic helpers already take care of that in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * Returns:
+ *
+ * Returns true on success, and false on failure, i.e. when no accurate
+ * timestamp could be acquired.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ crtc->helper_private->get_scanout_position);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
@@ -747,15 +798,19 @@ static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
/* Query driver if possible and precision timestamping enabled. */
- if (dev->driver->get_vblank_timestamp && (max_error > 0))
- ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
+ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq);
+ }
/* GPU high precision timestamp query unsupported or failed.
* Return current monotonic/gettimeofday timestamp as best estimate.
@@ -977,9 +1032,11 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
+ } else if (dev->driver->enable_vblank) {
+ return dev->driver->enable_vblank(dev, pipe);
}
- return dev->driver->enable_vblank(dev, pipe);
+ return -EINVAL;
}
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
@@ -1738,6 +1795,8 @@ done:
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ bool high_prec = false;
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
@@ -1760,8 +1819,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq, now,
- dev->driver->get_vblank_timestamp != NULL);
+ if (crtc && crtc->funcs->get_vblank_timestamp)
+ high_prec = true;
+
+ trace_drm_vblank_event(pipe, seq, now, high_prec);
}
/**
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 52e87e4869a5..aa88911bbc06 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -102,7 +102,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
return tmp;
}
-/**
+/*
* \c fault method for AGP virtual memory.
*
* \param vma virtual memory area.
@@ -192,7 +192,7 @@ static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
}
#endif
-/**
+/*
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -225,7 +225,7 @@ static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c close method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -269,8 +269,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
}
if (!found_maps) {
- drm_dma_handle_t dmah;
-
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
@@ -284,10 +282,10 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -296,7 +294,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* \c fault method for DMA virtual memory.
*
* \param address access address.
@@ -331,7 +329,7 @@ static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c fault method for scatter-gather virtual memory.
*
* \param address access address.
@@ -437,7 +435,7 @@ static void drm_vm_close_locked(struct drm_device *dev,
}
}
-/**
+/*
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
@@ -455,7 +453,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
@@ -515,7 +513,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
#endif
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 32d9fac587f9..76d38561c910 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -12,6 +12,7 @@
#include "common.xml.h"
#include "state.xml.h"
+#include "state_blt.xml.h"
#include "state_hi.xml.h"
#include "state_3d.xml.h"
#include "cmdstream.xml.h"
@@ -233,6 +234,8 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 link_target, flush = 0;
+ bool has_blt = !!(gpu->identity.minor_features5 &
+ chipMinorFeatures5_BLT_ENGINE);
lockdep_assert_held(&gpu->lock);
@@ -248,16 +251,38 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
if (flush) {
unsigned int dwords = 7;
+ if (has_blt)
+ dwords += 10;
+
link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
- if (gpu->exec_state == ETNA_PIPE_3D)
- CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
- VIVS_TS_FLUSH_CACHE_FLUSH);
+ if (gpu->exec_state == ETNA_PIPE_3D) {
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ }
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
CMD_END(buffer);
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
@@ -323,6 +348,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
bool switch_mmu_context = gpu->mmu_context != mmu_context;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
+ bool has_blt = !!(gpu->identity.minor_features5 &
+ chipMinorFeatures5_BLT_ENGINE);
lockdep_assert_held(&gpu->lock);
@@ -433,6 +460,15 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
* 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
return_dwords = 7;
+
+ /*
+ * When the BLT engine is present we need 6 more dwords in the return
+ * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
+ * but we don't need the normal TS flush state.
+ */
+ if (has_blt)
+ return_dwords += 6;
+
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
@@ -447,11 +483,25 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
VIVS_GL_FLUSH_CACHE_DEPTH |
VIVS_GL_FLUSH_CACHE_COLOR);
- CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
- VIVS_TS_FLUSH_CACHE_FLUSH);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
}
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
+
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 6b43c1c94e8f..a8685b2e1803 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -551,6 +551,7 @@ static int etnaviv_bind(struct device *dev)
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
+ priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
if (IS_ERR(priv->cmdbuf_suballoc)) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index efc656efeb0f..4d8dc9236e5f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -35,6 +35,7 @@ struct etnaviv_drm_private {
int num_gpus;
struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+ gfp_t shm_gfp_mask;
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
struct etnaviv_iommu_global *mmu_global;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 6adea180d629..dc9ef302f517 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -602,6 +602,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
+ struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_gem_object *obj = NULL;
int ret;
@@ -624,8 +625,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
* above new_inode() why this is required _and_ expected if you're
* going to pin these pages.
*/
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
etnaviv_gem_obj_add(dev, obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 6b68fe16041b..98e60df882b6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -105,7 +105,7 @@ struct etnaviv_gem_submit {
unsigned int nr_pmrs;
struct etnaviv_perfmon_request *pmrs;
unsigned int nr_bos;
- struct etnaviv_gem_submit_bo bos[0];
+ struct etnaviv_gem_submit_bo bos[];
/* No new members here, the previous one is variable-length! */
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3b0afa156d92..54def341c1db 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -238,8 +238,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
}
if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
- submit->bos[i].va != mapping->iova)
+ submit->bos[i].va != mapping->iova) {
+ etnaviv_gem_mapping_unreference(mapping);
return -EINVAL;
+ }
atomic_inc(&etnaviv_obj->gpu_active);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 799ec20b267d..a31eeff2b297 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -333,9 +333,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.revision = etnaviv_field(chipIdentity,
VIVS_HI_CHIP_IDENTITY_REVISION);
} else {
+ u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
+ gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
+ gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
+ gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
/*
* !!!! HACK ALERT !!!!
@@ -350,7 +354,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
/* Another special case */
if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
- u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
if (chipDate == 0x20080814 && chipTime == 0x12051100) {
@@ -373,6 +376,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.model = chipModel_GC3000;
gpu->identity.revision &= 0xffff;
}
+
+ if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
+ gpu->identity.eco_id = 1;
+
+ if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
+ gpu->identity.eco_id = 1;
}
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
@@ -506,7 +515,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* read idle register. */
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
- /* try reseting again if FE it not idle */
+ /* try resetting again if FE is not idle */
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
dev_dbg(gpu->dev, "FE is not idle\n");
continue;
@@ -772,6 +781,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
+ /*
+ * If the GPU is part of a system with DMA addressing limitations,
+ * request pages for our SHM backend buffers from the DMA32 zone to
+ * hopefully avoid performance killing SWIOTLB bounce buffering.
+ */
+ if (dma_addressing_limited(gpu->dev))
+ priv->shm_gfp_mask |= GFP_DMA32;
+
/* Create buffer: */
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
PAGE_SIZE);
@@ -851,6 +868,13 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
verify_dma(gpu, &debug);
+ seq_puts(m, "\tidentity\n");
+ seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
+ seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
+ seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
+ seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
+ seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
+
seq_puts(m, "\tfeatures\n");
seq_printf(m, "\t major_features: 0x%08x\n",
gpu->identity.features);
@@ -930,6 +954,20 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
seq_puts(m, "\t FP is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
seq_puts(m, "\t TS is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
+ seq_puts(m, "\t BL is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
+ seq_puts(m, "\t ASYNCFE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
+ seq_puts(m, "\t MC is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
+ seq_puts(m, "\t PPA is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
+ seq_puts(m, "\t WD is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
+ seq_puts(m, "\t NN is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
+ seq_puts(m, "\t TP is not idle\n");
if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
seq_puts(m, "\t AXI low power mode\n");
@@ -1805,11 +1843,15 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
if (atomic_read(&gpu->sched.hw_rq_count))
return -EBUSY;
- /* Check whether the hardware (except FE) is idle */
- mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
+ /* Check whether the hardware (except FE and MC) is idle */
+ mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
+ VIVS_HI_IDLE_STATE_MC);
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
- if (idle != mask)
+ if (idle != mask) {
+ dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
+ idle);
return -EBUSY;
+ }
return etnaviv_gpu_hw_suspend(gpu);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 97bb48042b4d..8ea48697d132 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -15,11 +15,11 @@ struct etnaviv_gem_submit;
struct etnaviv_vram_mapping;
struct etnaviv_chip_identity {
- /* Chip model. */
u32 model;
-
- /* Revision value.*/
u32 revision;
+ u32 product_id;
+ u32 customer_id;
+ u32 eco_id;
/* Supported feature fields. */
u32 features;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 39b463db76c9..167971a09be7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -7,8 +7,42 @@
static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
{
+ .model = 0x400,
+ .revision = 0x4652,
+ .product_id = 0x70001,
+ .customer_id = 0x100,
+ .eco_id = 0,
+ .stream_count = 4,
+ .register_max = 64,
+ .thread_count = 128,
+ .shader_core_count = 1,
+ .vertex_cache_size = 8,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 256,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 8,
+ .features = 0xa0e9e004,
+ .minor_features0 = 0xe1299fff,
+ .minor_features1 = 0xbe13b219,
+ .minor_features2 = 0xce110010,
+ .minor_features3 = 0x8000001,
+ .minor_features4 = 0x20102,
+ .minor_features5 = 0x120000,
+ .minor_features6 = 0x0,
+ .minor_features7 = 0x0,
+ .minor_features8 = 0x0,
+ .minor_features9 = 0x0,
+ .minor_features10 = 0x0,
+ .minor_features11 = 0x0,
+ },
+ {
.model = 0x7000,
.revision = 0x6214,
+ .product_id = ~0U,
+ .customer_id = ~0U,
+ .eco_id = ~0U,
.stream_count = 16,
.register_max = 64,
.thread_count = 1024,
@@ -43,7 +77,13 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
if (etnaviv_chip_identities[i].model == ident->model &&
- etnaviv_chip_identities[i].revision == ident->revision) {
+ etnaviv_chip_identities[i].revision == ident->revision &&
+ (etnaviv_chip_identities[i].product_id == ident->product_id ||
+ etnaviv_chip_identities[i].product_id == ~0U) &&
+ (etnaviv_chip_identities[i].customer_id == ident->customer_id ||
+ etnaviv_chip_identities[i].customer_id == ~0U) &&
+ (etnaviv_chip_identities[i].eco_id == ident->eco_id ||
+ etnaviv_chip_identities[i].eco_id == ~0U)) {
memcpy(ident, &etnaviv_chip_identities[i],
sizeof(*ident));
return true;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 8adbf2861bff..75f9db8f7bec 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -32,6 +32,7 @@ struct etnaviv_pm_domain {
};
struct etnaviv_pm_domain_meta {
+ unsigned int feature;
const struct etnaviv_pm_domain *domains;
u32 nr_domains;
};
@@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = {
static const struct etnaviv_pm_domain_meta doms_meta[] = {
{
+ .feature = chipFeatures_PIPE_3D,
.nr_domains = ARRAY_SIZE(doms_3d),
.domains = &doms_3d[0]
},
{
+ .feature = chipFeatures_PIPE_2D,
.nr_domains = ARRAY_SIZE(doms_2d),
.domains = &doms_2d[0]
},
{
+ .feature = chipFeatures_PIPE_VG,
.nr_domains = ARRAY_SIZE(doms_vg),
.domains = &doms_vg[0]
}
};
+static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
+{
+ unsigned int num = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (gpu->identity.features & meta->feature)
+ num += meta->nr_domains;
+ }
+
+ return num;
+}
+
+static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
+ unsigned int index)
+{
+ const struct etnaviv_pm_domain *domain = NULL;
+ unsigned int offset = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (!(gpu->identity.features & meta->feature))
+ continue;
+
+ if (index - offset >= meta->nr_domains) {
+ offset += meta->nr_domains;
+ continue;
+ }
+
+ domain = meta->domains + (index - offset);
+ }
+
+ return domain;
+}
+
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_domain *domain)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
+ const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
- if (domain->iter >= meta->nr_domains)
+ if (domain->iter >= nr_domains)
return -EINVAL;
- dom = meta->domains + domain->iter;
+ dom = pm_domain(gpu, domain->iter);
+ if (!dom)
+ return -EINVAL;
domain->id = domain->iter;
domain->nr_signals = dom->nr_signals;
strncpy(domain->name, dom->name, sizeof(domain->name));
domain->iter++;
- if (domain->iter == meta->nr_domains)
+ if (domain->iter == nr_domains)
domain->iter = 0xff;
return 0;
@@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_signal *signal)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
+ const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
- if (signal->domain >= meta->nr_domains)
+ if (signal->domain >= nr_domains)
return -EINVAL;
- dom = meta->domains + signal->domain;
+ dom = pm_domain(gpu, signal->domain);
+ if (!dom)
+ return -EINVAL;
if (signal->iter >= dom->nr_signals)
return -EINVAL;
diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h
index daae55995def..0e8bcf9dcc93 100644
--- a/drivers/gpu/drm/etnaviv/state_blt.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h
@@ -46,6 +46,8 @@ DEALINGS IN THE SOFTWARE.
/* This is a cut-down version of the state_blt.xml.h file */
+#define VIVS_BLT_SET_COMMAND 0x000140ac
+
#define VIVS_BLT_ENABLE 0x000140b8
#define VIVS_BLT_ENABLE_ENABLE 0x00000001
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 41d8da2b6f4f..deaaa99fa654 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
-- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
-- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
-- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
-- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
-- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
-- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
-- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
-- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
-
-Copyright (C) 2012-2018 by the following authors:
+- state.xml ( 26666 bytes, from 2019-12-20 21:20:35)
+- common.xml ( 35468 bytes, from 2018-02-10 13:09:26)
+- common_3d.xml ( 15058 bytes, from 2019-12-28 20:02:03)
+- state_hi.xml ( 30552 bytes, from 2019-12-28 20:02:48)
+- copyright.xml ( 1597 bytes, from 2018-02-10 13:09:26)
+- state_2d.xml ( 51552 bytes, from 2018-02-10 13:09:26)
+- state_3d.xml ( 83098 bytes, from 2019-12-28 20:02:03)
+- state_blt.xml ( 14252 bytes, from 2019-10-20 19:59:15)
+- state_vg.xml ( 5975 bytes, from 2018-02-10 13:09:26)
+
+Copyright (C) 2012-2019 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
@@ -48,6 +48,9 @@ DEALINGS IN THE SOFTWARE.
#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001
#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002
#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003
+#define MMU_EXCEPTION_OUT_OF_BOUND 0x00000004
+#define MMU_EXCEPTION_READ_SECURITY_VIOLATION 0x00000005
+#define MMU_EXCEPTION_WRITE_SECURITY_VIOLATION 0x00000006
#define VIVS_HI 0x00000000
#define VIVS_HI_CLOCK_CONTROL 0x00000000
@@ -81,6 +84,13 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_HI_IDLE_STATE_IM 0x00000200
#define VIVS_HI_IDLE_STATE_FP 0x00000400
#define VIVS_HI_IDLE_STATE_TS 0x00000800
+#define VIVS_HI_IDLE_STATE_BL 0x00001000
+#define VIVS_HI_IDLE_STATE_ASYNCFE 0x00002000
+#define VIVS_HI_IDLE_STATE_MC 0x00004000
+#define VIVS_HI_IDLE_STATE_PPA 0x00008000
+#define VIVS_HI_IDLE_STATE_WD 0x00010000
+#define VIVS_HI_IDLE_STATE_NN 0x00020000
+#define VIVS_HI_IDLE_STATE_TP 0x00040000
#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000
#define VIVS_HI_AXI_CONFIG 0x00000008
@@ -140,6 +150,8 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_HI_CHIP_TIME 0x0000002c
+#define VIVS_HI_CHIP_CUSTOMER_ID 0x00000030
+
#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034
#define VIVS_HI_CACHE_CONTROL 0x00000038
@@ -237,6 +249,8 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_HI_BLT_INTR 0x000000d4
+#define VIVS_HI_CHIP_ECO_ID 0x000000e8
+
#define VIVS_HI_AUXBIT 0x000000ec
#define VIVS_PM 0x00000000
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 1eed3327999f..f2d87a7445c7 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -140,7 +140,7 @@ static void decon_ctx_remove(struct decon_context *ctx)
static u32 decon_calc_clkdiv(struct decon_context *ctx,
const struct drm_display_mode *mode)
{
- unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ unsigned long ideal_clk = mode->clock;
u32 clkdiv;
/* Find the clock divider value that gets us closest to ideal_clk */
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4785885c0f4f..5ee090691390 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -106,7 +106,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
- ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
+ ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge,
+ 0);
if (ret) {
DRM_DEV_ERROR(dp->dev,
"Failed to attach bridge to drm\n");
@@ -158,15 +159,8 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
int ret;
- dp->dev = dev;
dp->drm_dev = drm_dev;
- dp->plat_data.dev_type = EXYNOS_DP;
- dp->plat_data.power_on_start = exynos_dp_poweron;
- dp->plat_data.power_off = exynos_dp_poweroff;
- dp->plat_data.attach = exynos_dp_bridge_attach;
- dp->plat_data.get_modes = exynos_dp_get_modes;
-
if (!dp->plat_data.panel && !dp->ptn_bridge) {
ret = exynos_dp_dt_parse_panel(dp);
if (ret)
@@ -184,13 +178,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->plat_data.encoder = encoder;
- dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
- if (IS_ERR(dp->adp)) {
+ ret = analogix_dp_bind(dp->adp, dp->drm_dev);
+ if (ret)
dp->encoder.funcs->destroy(&dp->encoder);
- return PTR_ERR(dp->adp);
- }
- return 0;
+ return ret;
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -221,6 +213,7 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
+ dp->dev = dev;
/*
* We just use the drvdata until driver run into component
* add function, and then we would set drvdata to null, so
@@ -246,16 +239,29 @@ static int exynos_dp_probe(struct platform_device *pdev)
/* The remote port can be either a panel or a bridge */
dp->plat_data.panel = panel;
+ dp->plat_data.dev_type = EXYNOS_DP;
+ dp->plat_data.power_on_start = exynos_dp_poweron;
+ dp->plat_data.power_off = exynos_dp_poweroff;
+ dp->plat_data.attach = exynos_dp_bridge_attach;
+ dp->plat_data.get_modes = exynos_dp_get_modes;
dp->plat_data.skip_connector = !!bridge;
+
dp->ptn_bridge = bridge;
out:
+ dp->adp = analogix_dp_probe(dev, &dp->plat_data);
+ if (IS_ERR(dp->adp))
+ return PTR_ERR(dp->adp);
+
return component_add(&pdev->dev, &exynos_dp_ops);
}
static int exynos_dp_remove(struct platform_device *pdev)
{
+ struct exynos_dp_device *dp = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_dp_ops);
+ analogix_dp_remove(dp->adp);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ba0f868b2477..57defeb44522 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -270,7 +270,7 @@ static int exynos_drm_bind(struct device *dev)
struct drm_encoder *encoder;
struct drm_device *drm;
unsigned int clone_mask;
- int cnt, ret;
+ int ret;
drm = drm_dev_alloc(&exynos_drm_driver, dev);
if (IS_ERR(drm))
@@ -293,10 +293,9 @@ static int exynos_drm_bind(struct device *dev)
exynos_drm_mode_config_init(drm);
/* setup possible_clones. */
- cnt = 0;
clone_mask = 0;
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
- clone_mask |= (1 << (cnt++));
+ clone_mask |= drm_encoder_mask(encoder);
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
encoder->possible_clones = clone_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a85365c56d4d..e080aa92338c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1514,7 +1514,6 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return 0;
connector->funcs->reset(connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, connector);
drm_connector_register(connector);
return 0;
}
@@ -1540,7 +1539,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
out_bridge = of_drm_find_bridge(device->dev.of_node);
if (out_bridge) {
- drm_bridge_attach(encoder, out_bridge, NULL);
+ drm_bridge_attach(encoder, out_bridge, NULL, 0);
dsi->out_bridge = out_bridge;
list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
@@ -1717,7 +1716,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (dsi->in_bridge_node) {
in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
if (in_bridge)
- drm_bridge_attach(encoder, in_bridge, NULL);
+ drm_bridge_attach(encoder, in_bridge, NULL, 0);
}
return mipi_dsi_host_register(&dsi->dsi_host);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 647a1fd1d815..e6ceaf36fb04 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -200,21 +200,13 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"failed to initialize drm fb helper.\n");
goto err_init;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to register drm_fb_helper_connector.\n");
- goto err_setup;
-
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index f141916eade6..1a7c828fc41d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -960,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
- ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+ ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0);
if (ret)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 9598ee3cc4d2..cff344367f81 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -151,5 +151,5 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
return fsl_dcu_attach_panel(fsl_dev, panel);
}
- return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
+ return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 1ed854f498b7..686385a66167 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -977,6 +977,9 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1459076d1980..1d8f67e4795a 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -513,14 +513,10 @@ int psb_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index a1f9ce9465a5..0e6facf21e33 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -227,7 +227,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
- struct child_device_config devices[0];
+ struct child_device_config devices[];
};
struct bdb_lvds_options {
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6956c8e7501c..2411eb9827b8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -363,7 +363,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
drm_irq_install(dev, dev->pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- dev->driver->get_vblank_counter = psb_get_vblank_counter;
psb_modeset_init(dev);
psb_fbdev_init(dev);
@@ -507,9 +506,6 @@ static struct drm_driver driver = {
.irq_postinstall = psb_irq_postinstall,
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
- .enable_vblank = psb_enable_vblank,
- .disable_vblank = psb_disable_vblank,
- .get_vblank_counter = psb_get_vblank_counter,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 3d4ef3071d45..956926341316 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -681,15 +681,15 @@ extern void psb_irq_turn_off_dpst(struct drm_device *dev);
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
+extern int psb_enable_vblank(struct drm_crtc *crtc);
+extern void psb_disable_vblank(struct drm_crtc *crtc);
void
psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
void
psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
-extern u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+extern u32 psb_get_vblank_counter(struct drm_crtc *crtc);
/* framebuffer.c */
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index fed3b563e62e..531c5485be17 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -433,6 +433,9 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 91f90016dba9..15eb3770d817 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -506,8 +506,10 @@ int psb_irq_disable_dpst(struct drm_device *dev)
/*
* It is used to enable VBLANK interrupt
*/
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int psb_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t reg_val = 0;
@@ -545,8 +547,10 @@ int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
/*
* It is used to disable VBLANK interrupt
*/
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void psb_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -618,8 +622,10 @@ void mdfld_disable_te(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 psb_get_vblank_counter(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
uint32_t high_frame = PIPEAFRAMEHIGH;
uint32_t low_frame = PIPEAFRAMEPIXEL;
uint32_t pipeconf_reg = PIPEACONF;
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 58fd502e3b9d..4f73998848d1 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -12,6 +12,7 @@
#ifndef _PSB_IRQ_H_
#define _PSB_IRQ_H_
+struct drm_crtc;
struct drm_device;
bool sysirq_init(struct drm_device *dev);
@@ -26,9 +27,9 @@ int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
void psb_irq_turn_on_dpst(struct drm_device *dev);
void psb_irq_turn_off_dpst(struct drm_device *dev);
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+int psb_enable_vblank(struct drm_crtc *crtc);
+void psb_disable_vblank(struct drm_crtc *crtc);
+u32 psb_get_vblank_counter(struct drm_crtc *crtc);
int mdfld_enable_te(struct drm_device *dev, int pipe);
void mdfld_disable_te(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 7fa7d4933f60..55b46a7150a5 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -40,6 +40,7 @@ struct hibmc_dislay_pll_config {
};
static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+ {640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ},
{800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
{1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
{1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
@@ -47,6 +48,8 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
{1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
{1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ},
+ {1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
{1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
@@ -80,6 +83,9 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (!crtc_state->enable)
+ return 0;
+
if (state->crtc_x + state->crtc_w >
crtc_state->adjusted_mode.hdisplay ||
state->crtc_y + state->crtc_h >
@@ -184,6 +190,20 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
return plane;
}
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ unsigned int reg;
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
+ reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_DPMS, dpms);
+ reg &= ~HIBMC_CRT_DISP_CTL_TIMING_MASK;
+ if (dpms == HIBMC_CRT_DPMS_ON)
+ reg |= HIBMC_CRT_DISP_CTL_TIMING(1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -200,6 +220,7 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
reg |= HIBMC_CURR_GATE_DISPLAY(1);
hibmc_set_current_gate(priv, reg);
drm_crtc_vblank_on(crtc);
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_ON);
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -208,6 +229,7 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
@@ -221,6 +243,25 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
hibmc_set_current_gate(priv, reg);
}
+static enum drm_mode_status
+hibmc_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int i = 0;
+ int vrefresh = drm_mode_vrefresh(mode);
+
+ if (vrefresh < 59 || vrefresh > 61)
+ return MODE_NOCLOCK;
+
+ for (i = 0; i < ARRAY_SIZE(hibmc_pll_table); i++) {
+ if (hibmc_pll_table[i].hdisplay == mode->hdisplay &&
+ hibmc_pll_table[i].vdisplay == mode->vdisplay)
+ return MODE_OK;
+ }
+
+ return MODE_BAD;
+}
+
static unsigned int format_pll_reg(void)
{
unsigned int pllreg = 0;
@@ -435,6 +476,42 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
}
+static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ void __iomem *mmio = priv->mmio;
+ u16 *r, *g, *b;
+ unsigned int reg;
+ int i;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ unsigned int offset = i << 2;
+ u8 red = *r++ >> 8;
+ u8 green = *g++ >> 8;
+ u8 blue = *b++ >> 8;
+ u32 rgb = (red << 16) | (green << 8) | blue;
+
+ writel(rgb, mmio + HIBMC_CRT_PALETTE + offset);
+ }
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg |= HIBMC_FIELD(HIBMC_CTL_DISP_CTL_GAMMA, 1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static int hibmc_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ hibmc_crtc_load_lut(crtc);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
@@ -444,6 +521,7 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = hibmc_crtc_enable_vblank,
.disable_vblank = hibmc_crtc_disable_vblank,
+ .gamma_set = hibmc_crtc_gamma_set,
};
static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
@@ -452,6 +530,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
.atomic_flush = hibmc_crtc_atomic_flush,
.atomic_enable = hibmc_crtc_atomic_enable,
.atomic_disable = hibmc_crtc_atomic_disable,
+ .mode_valid = hibmc_crtc_mode_valid,
};
int hibmc_de_init(struct hibmc_drm_private *priv)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 4a8a4cfb4b75..222356a4f9a8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -91,11 +91,11 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
priv->dev->mode_config.min_width = 0;
priv->dev->mode_config.min_height = 0;
priv->dev->mode_config.max_width = 1920;
- priv->dev->mode_config.max_height = 1440;
+ priv->dev->mode_config.max_height = 1200;
priv->dev->mode_config.fb_base = priv->fb_base;
priv->dev->mode_config.preferred_depth = 24;
- priv->dev->mode_config.prefer_shadow = 0;
+ priv->dev->mode_config.prefer_shadow = 1;
priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -327,6 +327,11 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
struct drm_device *dev;
int ret;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+ "hibmcdrmfb");
+ if (ret)
+ return ret;
+
dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
if (IS_ERR(dev)) {
DRM_ERROR("failed to allocate drm_device\n");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
index b63a1ee15ceb..17b30c393b10 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -68,6 +68,12 @@
#define HIBMC_CRT_DISP_CTL 0x80200
+#define HIBMC_CRT_DISP_CTL_DPMS(x) ((x) << 30)
+#define HIBMC_CRT_DISP_CTL_DPMS_MASK 0xc0000000
+
+#define HIBMC_CRT_DPMS_ON 0
+#define HIBMC_CRT_DPMS_OFF 3
+
#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25)
#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000
@@ -85,6 +91,9 @@
#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8)
#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100
+#define HIBMC_CTL_DISP_CTL_GAMMA(x) ((x) << 3)
+#define HIBMC_CTL_DISP_CTL_GAMMA_MASK 0x08
+
#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2)
#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4
@@ -170,6 +179,7 @@
#define CRT_PLL1_HS_74MHZ 0x23941dc2
#define CRT_PLL1_HS_80MHZ 0x23941001
#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2
+#define CRT_PLL1_HS_106MHZ 0x237C1641
#define CRT_PLL1_HS_108MHZ 0x23b41b01
#define CRT_PLL1_HS_162MHZ 0x23480681
#define CRT_PLL1_HS_148MHZ 0x23541dc2
@@ -182,10 +192,13 @@
#define CRT_PLL2_HS_78MHZ 0x50E147AE
#define CRT_PLL2_HS_74MHZ 0x602B6AE7
#define CRT_PLL2_HS_80MHZ 0x70000000
+#define CRT_PLL2_HS_106MHZ 0x0075c28f
#define CRT_PLL2_HS_108MHZ 0x80000000
#define CRT_PLL2_HS_162MHZ 0xA0000000
#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD
#define CRT_PLL2_HS_193MHZ 0xC0872B02
+#define HIBMC_CRT_PALETTE 0x80C00
+
#define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 6d98fdc06f6c..678ac2ef2a93 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -11,8 +11,10 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
@@ -20,7 +22,14 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
- return drm_add_modes_noedid(connector, 800, 600);
+ int count;
+
+ count = drm_add_modes_noedid(connector,
+ connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return count;
}
static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 50b988fdd5cc..99397ac3b363 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -54,6 +54,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index bdcf9c6ae9e9..f31068d74b18 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -777,7 +777,7 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
int ret;
/* associate the bridge to dsi encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach external bridge\n");
return ret;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a63790d32d75..c3332209f27a 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1356,10 +1356,16 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
-static int tda998x_bridge_attach(struct drm_bridge *bridge)
+static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
return tda998x_connector_init(priv, bridge->dev);
}
@@ -2022,7 +2028,7 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
if (ret)
goto err_encoder;
- ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL, 0);
if (ret)
goto err_bridge;
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
index d9a77f3b59b2..81972dce1aff 100644
--- a/drivers/gpu/drm/i915/.gitignore
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.hdrtest
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 907c4471f591..9afa5c4a6bf0 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -42,16 +42,9 @@ config DRM_I915
If "M" is selected, the module will be called i915.
-config DRM_I915_ALPHA_SUPPORT
- bool "Enable alpha quality support for new Intel hardware by default"
- depends on DRM_I915
- help
- This option is deprecated. Use DRM_I915_FORCE_PROBE option instead.
-
config DRM_I915_FORCE_PROBE
string "Force probe driver for selected new Intel hardware"
depends on DRM_I915
- default "*" if DRM_I915_ALPHA_SUPPORT
help
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index c280b6ae38eb..0bfd276c19fe 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -20,6 +20,9 @@ config DRM_I915_HEARTBEAT_INTERVAL
check the health of the GPU and undertake regular house-keeping of
internal driver state.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/heartbeat_interval_ms
+
May be 0 to disable heartbeats and therefore disable automatic GPU
hang detection.
@@ -33,11 +36,18 @@ config DRM_I915_PREEMPT_TIMEOUT
expires, the HW will be reset to allow the more important context
to execute.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/preempt_timeout_ms
+
May be 0 to disable the timeout.
-config DRM_I915_SPIN_REQUEST
- int "Busywait for request completion (us)"
- default 5 # microseconds
+ The compiled in default may get overridden at driver probe time on
+ certain platforms and certain engines which will be reflected in the
+ sysfs control.
+
+config DRM_I915_MAX_REQUEST_BUSYWAIT
+ int "Busywait for request completion limit (ns)"
+ default 8000 # nanoseconds
help
Before sleeping waiting for a request (GPU operation) to complete,
we may spend some time polling for its completion. As the IRQ may
@@ -45,6 +55,9 @@ config DRM_I915_SPIN_REQUEST
check if the request will complete in the time it would have taken
us to enable the interrupt.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/max_busywait_duration_ns
+
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
@@ -60,6 +73,9 @@ config DRM_I915_STOP_TIMEOUT
that the reset itself may take longer and so be more disruptive to
interactive or low latency workloads.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/stop_timeout_ms
+
config DRM_I915_TIMESLICE_DURATION
int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
default 1 # milliseconds
@@ -73,4 +89,7 @@ config DRM_I915_TIMESLICE_DURATION
is scheduled for execution for the timeslice duration, before
switching to the next context.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/timeslice_duration_ms
+
May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a1f2411aa21b..6cd1f6253814 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,9 +28,6 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-subdir-ccflags-y += \
- $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
-
subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
@@ -46,15 +43,16 @@ i915-y += i915_drv.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
- intel_csr.o \
intel_device_info.o \
+ intel_dram.o \
intel_memory_region.o \
intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
intel_uncore.o \
- intel_wakeref.o
+ intel_wakeref.o \
+ vlv_suspend.o
# core library code
i915-y += \
@@ -66,7 +64,11 @@ i915-y += \
i915_user_extensions.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
+i915-$(CONFIG_DEBUG_FS) += \
+ i915_debugfs.o \
+ i915_debugfs_params.o \
+ display/intel_display_debugfs.o \
+ display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
@@ -75,9 +77,12 @@ gt-y += \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
gt/gen6_ppgtt.o \
+ gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
+ gt/intel_context_param.o \
+ gt/intel_context_sseu.o \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
@@ -102,7 +107,8 @@ gt-y += \
gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
- gt/intel_workarounds.o
+ gt/intel_workarounds.o \
+ gt/sysfs_engines.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -179,6 +185,7 @@ i915-y += \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
+ display/intel_csr.o \
display/intel_display.o \
display/intel_display_power.o \
display/intel_dpio_phy.o \
@@ -187,6 +194,7 @@ i915-y += \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
+ display/intel_global_state.o \
display/intel_hdcp.o \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index f8e882101396..17cee6f80d8b 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -39,14 +39,14 @@
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
- return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
static inline int payload_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
- return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
}
@@ -55,7 +55,7 @@ static void wait_for_header_credits(struct drm_i915_private *dev_priv,
{
if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
MAX_HEADER_CREDIT, 100))
- DRM_ERROR("DSI header credits not released\n");
+ drm_err(&dev_priv->drm, "DSI header credits not released\n");
}
static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
@@ -63,7 +63,7 @@ static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
{
if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
MAX_PLOAD_CREDIT, 100))
- DRM_ERROR("DSI payload credits not released\n");
+ drm_err(&dev_priv->drm, "DSI payload credits not released\n");
}
static enum transcoder dsi_port_to_transcoder(enum port port)
@@ -97,7 +97,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
dsi->channel = 0;
ret = mipi_dsi_dcs_nop(dsi);
if (ret < 0)
- DRM_ERROR("error sending DCS NOP command\n");
+ drm_err(&dev_priv->drm,
+ "error sending DCS NOP command\n");
}
/* wait for header credits to be released */
@@ -109,9 +110,9 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+ if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
LPTX_IN_PROGRESS), 20))
- DRM_ERROR("LPTX bit not cleared\n");
+ drm_err(&dev_priv->drm, "LPTX bit not cleared\n");
}
}
@@ -129,14 +130,15 @@ static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
free_credits = payload_credits_available(dev_priv, dsi_trans);
if (free_credits < 1) {
- DRM_ERROR("Payload credit not available\n");
+ drm_err(&dev_priv->drm,
+ "Payload credit not available\n");
return false;
}
for (j = 0; j < min_t(u32, len - i, 4); j++)
tmp |= *data++ << 8 * j;
- I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp);
}
return true;
@@ -154,11 +156,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
/* check if header credit available */
free_credits = header_credits_available(dev_priv, dsi_trans);
if (free_credits < 1) {
- DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ drm_err(&dev_priv->drm,
+ "send pkt header failed, not enough hdr credits\n");
return -1;
}
- tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
if (pkt.payload)
tmp |= PAYLOAD_PRESENT;
@@ -175,7 +178,7 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
- I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
return 0;
}
@@ -212,53 +215,55 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
}
@@ -270,7 +275,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 dss_ctl1;
- dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -286,20 +291,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
- DRM_ERROR("DL buffer depth exceed max value\n");
+ drm_err(&dev_priv->drm,
+ "DL buffer depth exceed max value\n");
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- I915_WRITE(DSS_CTL2, dss_ctl2);
+ intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- I915_WRITE(DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -330,15 +336,15 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
- esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+ intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port));
}
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
- esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+ intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
}
}
@@ -348,7 +354,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- WARN_ON(intel_dsi->io_wakeref[port]);
+ drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]);
intel_dsi->io_wakeref[port] =
intel_display_power_get(dev_priv,
port == PORT_A ?
@@ -365,9 +371,9 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
tmp |= COMBO_PHY_MODE_DSI;
- I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
}
get_dsi_io_power_domains(dev_priv, intel_dsi);
@@ -394,40 +400,46 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0);
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
+ tmp);
}
}
@@ -442,12 +454,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* clear common keeper enable bit */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
}
/*
@@ -456,19 +468,19 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* as part of lane phy sequence configuration
*/
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_CL_DW5(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
tmp |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
}
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
}
/* Program swing and de-emphasis */
@@ -476,12 +488,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Set training enable to trigger update */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
}
}
@@ -493,14 +505,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
tmp |= DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), tmp);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
- if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+ if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
500))
- DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+ drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",
+ port_name(port));
}
}
@@ -516,28 +529,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
tmp &= ~MASTER_INIT_TIMER_MASK;
tmp |= intel_dsi->init_count;
- I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
}
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
/* shadow register inside display core */
- I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
}
/* Program DPHY data lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
- intel_dsi->dphy_data_lane_reg);
+ intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
/* shadow register inside display core */
- I915_WRITE(DSI_DATA_TIMING_PARAM(port),
- intel_dsi->dphy_data_lane_reg);
+ intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
}
/*
@@ -549,25 +564,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (IS_GEN(dev_priv, 11)) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp = intel_de_read(dev_priv,
+ DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+ intel_de_write(dev_priv,
+ DPHY_TA_TIMING_PARAM(port),
+ tmp);
/* shadow register inside display core */
- tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp = intel_de_read(dev_priv,
+ DSI_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ intel_de_write(dev_priv,
+ DSI_TA_TIMING_PARAM(port), tmp);
}
}
}
if (IS_ELKHARTLAKE(dev_priv)) {
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_DPHY_CHKN(phy));
+ tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
- I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
+ intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
}
}
}
@@ -579,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -595,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
@@ -613,14 +633,14 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
if (INTEL_GEN(dev_priv) >= 12)
@@ -628,11 +648,11 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
else
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(ICL_DPCLKA_CFGCR0);
+ intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void
@@ -649,7 +669,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
if (intel_dsi->eotp_pkt)
tmp &= ~EOTP_DISABLED;
@@ -726,16 +746,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
}
}
- I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
}
/* enable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp |= PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
/* configure stream splitting */
@@ -746,7 +768,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* select data lane width */
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~DDI_PORT_WIDTH_MASK;
tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
@@ -772,15 +794,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* enable DDI buffer */
tmp |= TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
- LINK_READY), 2500))
- DRM_ERROR("DSI link not ready\n");
+ if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ LINK_READY), 2500))
+ drm_err(&dev_priv->drm, "DSI link not ready\n");
}
}
@@ -836,17 +858,18 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* minimum hactive as per bspec: 256 pixels */
if (adjusted_mode->crtc_hdisplay < 256)
- DRM_ERROR("hactive is less then 256 pixels\n");
+ drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n");
/* if RGB666 format, then hactive must be multiple of 4 pixels */
if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
- DRM_ERROR("hactive pixels are not multiple of 4\n");
+ drm_err(&dev_priv->drm,
+ "hactive pixels are not multiple of 4\n");
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(HTOTAL(dsi_trans),
- (hactive - 1) | ((htotal - 1) << 16));
+ intel_de_write(dev_priv, HTOTAL(dsi_trans),
+ (hactive - 1) | ((htotal - 1) << 16));
}
/* TRANS_HSYNC register to be programmed only for video mode */
@@ -855,11 +878,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
if (hsync_size < 16)
- DRM_ERROR("hsync size < 16 pixels\n");
+ drm_err(&dev_priv->drm,
+ "hsync size < 16 pixels\n");
}
if (hback_porch < 16)
- DRM_ERROR("hback porch < 16 pixels\n");
+ drm_err(&dev_priv->drm, "hback porch < 16 pixels\n");
if (intel_dsi->dual_link) {
hsync_start /= 2;
@@ -868,8 +892,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(HSYNC(dsi_trans),
- (hsync_start - 1) | ((hsync_end - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(dsi_trans),
+ (hsync_start - 1) | ((hsync_end - 1) << 16));
}
}
@@ -882,21 +906,21 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- I915_WRITE(VTOTAL(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, VTOTAL(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
- DRM_ERROR("Invalid vsync_end value\n");
+ drm_err(&dev_priv->drm, "Invalid vsync_end value\n");
if (vsync_start < vactive)
- DRM_ERROR("vsync_start less than vactive\n");
+ drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
/* program TRANS_VSYNC register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
}
/*
@@ -907,15 +931,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
*/
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+ intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
if (INTEL_GEN(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VBLANK(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
}
}
}
@@ -930,14 +954,15 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
tmp |= PIPECONF_ENABLE;
- I915_WRITE(PIPECONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 10))
- DRM_ERROR("DSI transcoder not enabled\n");
+ drm_err(&dev_priv->drm,
+ "DSI transcoder not enabled\n");
}
}
@@ -968,26 +993,26 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
- I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
- I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- tmp = I915_READ(DSI_TA_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
tmp &= ~TA_TIMEOUT_VALUE_MASK;
tmp |= TA_TIMEOUT_VALUE(ta_timeout);
- I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
}
}
@@ -1041,14 +1066,15 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
* FIXME: This uses the number of DW's currently in the payload
* receive queue. This is probably not what we want here.
*/
- tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans));
tmp &= NUMBER_RX_PLOAD_DW_MASK;
/* multiply "Number Rx Payload DW" by 4 to get max value */
tmp = tmp * 4;
dsi = intel_dsi->dsi_hosts[port]->device;
ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
if (ret < 0)
- DRM_ERROR("error setting max return pkt size%d\n", tmp);
+ drm_err(&dev_priv->drm,
+ "error setting max return pkt size%d\n", tmp);
}
/* panel power on related mipi dsi vbt sequences */
@@ -1077,8 +1103,6 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
/* step3b */
gen11_dsi_map_pll(encoder, pipe_config);
@@ -1092,13 +1116,24 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+}
+
+static void gen11_dsi_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ WARN_ON(crtc_state->has_pch_encoder);
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
/* step7: enable backlight */
- intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_panel_enable_backlight(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+
+ intel_crtc_vblank_on(crtc_state);
}
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
@@ -1113,14 +1148,15 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
tmp &= ~PIPECONF_ENABLE;
- I915_WRITE(PIPECONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 50))
- DRM_ERROR("DSI trancoder not disabled\n");
+ drm_err(&dev_priv->drm,
+ "DSI trancoder not disabled\n");
}
}
@@ -1147,32 +1183,34 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans));
tmp |= LINK_ENTER_ULPS;
tmp &= ~LINK_ULPS_TYPE_LP11;
- I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
- LINK_IN_ULPS),
+ if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+ LINK_IN_ULPS),
10))
- DRM_ERROR("DSI link not in ULPS\n");
+ drm_err(&dev_priv->drm, "DSI link not in ULPS\n");
}
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp &= ~PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
}
}
@@ -1186,15 +1224,16 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
tmp &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), tmp);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
- if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+ if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
8))
- DRM_ERROR("DDI port:%c buffer not idle\n",
- port_name(port));
+ drm_err(&dev_priv->drm,
+ "DDI port:%c buffer not idle\n",
+ port_name(port));
}
gen11_dsi_gate_clocks(encoder);
}
@@ -1219,9 +1258,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
tmp &= ~COMBO_PHY_MODE_DSI;
- I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
}
}
@@ -1311,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsc_get_config(encoder, pipe_config);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pipe_config->port_clock =
- cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+ pipe_config->port_clock = intel_dpll_get_freq(i915,
+ pipe_config->shared_dpll);
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
@@ -1357,11 +1396,13 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
return ret;
/* DSI specific sanity checks on the common code */
- WARN_ON(vdsc_cfg->vbr_enable);
- WARN_ON(vdsc_cfg->simple_422);
- WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
- WARN_ON(vdsc_cfg->slice_height < 8);
- WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_height % vdsc_cfg->slice_height);
ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
if (ret)
@@ -1443,7 +1484,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
*pipe = PIPE_A;
@@ -1458,11 +1499,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
*pipe = PIPE_D;
break;
default:
- DRM_ERROR("Invalid PIPE input\n");
+ drm_err(&dev_priv->drm, "Invalid PIPE input\n");
goto out;
}
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
ret = tmp & PIPECONF_ENABLE;
}
out:
@@ -1582,7 +1623,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
*/
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",
+ prepare_cnt);
prepare_cnt = ICL_PREPARE_CNT_MAX;
}
@@ -1590,28 +1632,33 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
ths_prepare_ns, tlpx_ns);
if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
/* trail cnt in escape clocks*/
trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",
+ trail_cnt);
trail_cnt = ICL_TRAIL_CNT_MAX;
}
/* tclk pre count in escape clocks */
tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
}
/* tclk post count in escape clocks */
tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_post_cnt out of range (%d)\n",
+ tclk_post_cnt);
tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
}
@@ -1619,14 +1666,17 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
ths_prepare_ns, tlpx_ns);
if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",
+ hs_zero_cnt);
hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
}
/* hs exit zero cnt in escape clocks */
exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "exit_zero_cnt out of range (%d)\n",
+ exit_zero_cnt);
exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
}
@@ -1668,9 +1718,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- intel_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(&connector->base,
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
@@ -1708,6 +1757,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->enable = gen11_dsi_enable;
encoder->disable = gen11_dsi_disable;
encoder->post_disable = gen11_dsi_post_disable;
encoder->port = port;
@@ -1738,7 +1788,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
- DRM_ERROR("DSI fixed mode info missing\n");
+ drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
goto err;
}
@@ -1764,7 +1814,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- DRM_DEBUG_KMS("no device found\n");
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 3456d33feb46..e21fb14d5e07 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_acpi.h"
+#include "intel_display_types.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
@@ -156,3 +157,91 @@ void intel_register_dsm_handler(void)
void intel_unregister_dsm_handler(void)
{
}
+
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT 0
+#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT 8
+#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT 12
+#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT (1 << 16)
+#define ACPI_DEPENDS_ON_VGA (1 << 17)
+#define ACPI_PIPE_ID_SHIFT 18
+#define ACPI_PIPE_ID_MASK (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME (1ULL << 31)
+
+static u32 acpi_display_type(struct intel_connector *connector)
+{
+ u32 display_type;
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ display_type = ACPI_DISPLAY_TYPE_VGA;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ case DRM_MODE_CONNECTOR_TV:
+ display_type = ACPI_DISPLAY_TYPE_TV;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ default:
+ MISSING_CASE(connector->base.connector_type);
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ }
+
+ return display_type;
+}
+
+void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *drm_dev = &dev_priv->drm;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 display_index[16] = {};
+
+ /* Populate the ACPI IDs for all connectors for a given drm_device */
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 1c576b3fb712..e8b068661d22 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -6,12 +6,17 @@
#ifndef __INTEL_ACPI_H__
#define __INTEL_ACPI_H__
+struct drm_i915_private;
+
#ifdef CONFIG_ACPI
void intel_register_dsm_handler(void);
void intel_unregister_dsm_handler(void);
+void intel_acpi_device_id_update(struct drm_i915_private *i915);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
+static inline
+void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c362eecdd414..d043057d2fa0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -35,7 +35,9 @@
#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -64,8 +66,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
- DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
+ drm_dbg_atomic(&dev_priv->drm,
+ "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -101,8 +104,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return 0;
}
- DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
+ drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -178,6 +181,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
/**
* intel_connector_needs_modeset - check if connector needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ * @connector: the connector
*/
bool
intel_connector_needs_modeset(struct intel_atomic_state *state,
@@ -314,7 +319,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
}
}
- if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+ if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ "Cannot find scaler for %s:%d\n", name, idx))
return;
/* set scaler mode */
@@ -357,8 +363,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
mode = SKL_PS_SCALER_MODE_DYN;
}
- DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
+ drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
}
@@ -409,8 +415,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
/* fail if required scalers > available scalers */
if (num_scalers_need > intel_crtc->num_scalers){
- DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
- num_scalers_need, intel_crtc->num_scalers);
+ drm_dbg_kms(&dev_priv->drm,
+ "Too many scaling requests %d > %d\n",
+ num_scalers_need, intel_crtc->num_scalers);
return -EINVAL;
}
@@ -455,8 +462,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
- DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
- plane->base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to add [PLANE:%d] to drm_state\n",
+ plane->base.id);
return PTR_ERR(state);
}
}
@@ -465,7 +473,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
- if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_plane->pipe != intel_crtc->pipe))
continue;
plane_state = intel_atomic_get_new_plane_state(intel_state,
@@ -494,18 +503,28 @@ intel_atomic_state_alloc(struct drm_device *dev)
return &state->base;
}
+void intel_atomic_state_free(struct drm_atomic_state *_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+
+ drm_atomic_state_default_release(&state->base);
+ kfree(state->global_objs);
+
+ i915_sw_fence_fini(&state->commit_ready);
+
+ kfree(state);
+}
+
void intel_atomic_state_clear(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
+
drm_atomic_state_default_clear(&state->base);
+ intel_atomic_clear_global_state(state);
+
state->dpll_set = state->modeset = false;
state->global_state_changed = false;
state->active_pipes = 0;
- memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
- memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
- memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
- memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
- state->cdclk.pipe = INVALID_PIPE;
}
struct intel_crtc_state *
@@ -520,7 +539,7 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
-int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -539,7 +558,7 @@ int intel_atomic_lock_global_state(struct intel_atomic_state *state)
return 0;
}
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 74c749dbfb4f..11146292b06f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -45,6 +45,7 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc,
void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_free(struct drm_atomic_state *state);
void intel_atomic_state_clear(struct drm_atomic_state *state);
struct intel_crtc_state *
@@ -55,8 +56,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state);
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state);
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 3e97af682b1b..457b258683d3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -37,6 +37,7 @@
#include "i915_trace.h"
#include "intel_atomic_plane.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sprite.h"
@@ -132,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane,
kfree(plane_state);
}
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h),
+ dst_w * dst_h);
+}
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
+ unsigned int pixel_rate;
if (!plane_state->uapi.visible)
return 0;
+ pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
cpp = fb->format->cpp[0];
/*
@@ -152,45 +175,67 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
if (fb->format->is_yuv && fb->format->num_planes > 1)
cpp *= 4;
- return cpp * crtc_state->pixel_rate;
+ return pixel_rate * cpp;
}
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane)
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
- struct intel_crtc_state *crtc_state;
+ const struct intel_cdclk_state *cdclk_state;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
if (!plane_state->uapi.visible || !plane->min_cdclk)
- return false;
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(new_crtc_state, plane_state);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ /*
+ * No need to check against the cdclk state if
+ * the min cdclk for the plane doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ old_crtc_state->min_cdclk[plane->id])
+ return 0;
- crtc_state->min_cdclk[plane->id] =
- plane->min_cdclk(crtc_state, plane_state);
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
/*
- * Does the cdclk need to be bumbed up?
+ * No need to recalculate the cdclk state if
+ * the min cdclk for the pipe doesn't increase.
*
- * Note: we obviously need to be called before the new
- * cdclk frequency is calculated so state->cdclk.logical
- * hasn't been populated yet. Hence we look at the old
- * cdclk state under dev_priv->cdclk.logical. This is
- * safe as long we hold at least one crtc mutex (which
- * must be true since we have crtc_state).
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
*/
- if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id],
- dev_priv->cdclk.logical.cdclk);
- return true;
- }
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ cdclk_state->min_cdclk[crtc->pipe])
+ return 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ new_crtc_state->min_cdclk[plane->id],
+ crtc->base.base.id, crtc->base.name,
+ cdclk_state->min_cdclk[crtc->pipe]);
+ *need_cdclk_calc = true;
- return false;
+ return 0;
}
static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
@@ -225,12 +270,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *new_plane_state)
{
struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- const struct drm_framebuffer *fb;
+ const struct drm_framebuffer *fb = new_plane_state->hw.fb;
int ret;
- intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
- fb = new_plane_state->hw.fb;
-
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
@@ -292,6 +334,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
new_plane_state->uapi.visible = false;
if (!crtc)
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 5cedafdddb55..a6bbf42bae1f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -18,6 +18,9 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
@@ -46,7 +49,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state);
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane);
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index b18040793d9e..62f234f641de 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -30,6 +30,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -148,6 +149,10 @@ static const struct {
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+ { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 },
+ { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 },
+ { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 },
+ { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 },
};
/* HDMI N/CTS table */
@@ -233,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -242,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
+ if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ i = ARRAY_SIZE(hdmi_audio_clock);
+
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
@@ -291,18 +300,18 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
u32 tmp;
int i;
- tmp = I915_READ(reg_eldv);
+ tmp = intel_de_read(dev_priv, reg_eldv);
tmp &= bits_eldv;
if (!tmp)
return false;
- tmp = I915_READ(reg_elda);
+ tmp = intel_de_read(dev_priv, reg_elda);
tmp &= ~bits_elda;
- I915_WRITE(reg_elda, tmp);
+ intel_de_write(dev_priv, reg_elda, tmp);
for (i = 0; i < drm_eld_size(eld) / 4; i++)
- if (I915_READ(reg_edid) != *((const u32 *)eld + i))
+ if (intel_de_read(dev_priv, reg_edid) != *((const u32 *)eld + i))
return false;
return true;
@@ -315,18 +324,18 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 eldv, tmp;
- DRM_DEBUG_KMS("Disable audio codec\n");
+ drm_dbg_kms(&dev_priv->drm, "Disable audio codec\n");
- tmp = I915_READ(G4X_AUD_VID_DID);
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
eldv = G4X_ELDV_DEVCTG;
/* Invalidate ELD */
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp &= ~eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
}
static void g4x_audio_codec_enable(struct intel_encoder *encoder,
@@ -340,9 +349,10 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm, "Enable audio codec, %u bytes ELD\n",
+ drm_eld_size(eld));
- tmp = I915_READ(G4X_AUD_VID_DID);
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
@@ -354,19 +364,20 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
G4X_HDMIW_HDMIEDID))
return;
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
len = (tmp >> 9) & 0x1f; /* ELD buffer size */
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
len = min(drm_eld_size(eld) / 4, len);
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ drm_dbg(&dev_priv->drm, "ELD size %d\n", len);
for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
+ intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID,
+ *((const u32 *)eld + i));
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp |= eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
}
static void
@@ -384,11 +395,12 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
- DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+ drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m,
+ nm->n);
else
- DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+ drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n");
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -400,9 +412,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
@@ -413,7 +425,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -429,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -437,25 +449,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
- DRM_DEBUG_KMS("using N %d\n", n);
+ drm_dbg_kms(&dev_priv->drm, "using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
- DRM_DEBUG_KMS("using automatic N\n");
+ drm_dbg_kms(&dev_priv->drm, "using automatic N\n");
}
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -476,26 +488,26 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n",
+ transcoder_name(cpu_transcoder));
mutex_lock(&dev_priv->av_mutex);
/* Disable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
/* Invalidate ELD */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
mutex_unlock(&dev_priv->av_mutex);
}
@@ -511,16 +523,17 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
- transcoder_name(cpu_transcoder), drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm,
+ "Enable audio codec on transcoder %s, %u bytes ELD\n",
+ transcoder_name(cpu_transcoder), drm_eld_size(eld));
mutex_lock(&dev_priv->av_mutex);
/* Enable audio presence detect, invalidate ELD */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
@@ -530,19 +543,20 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
*/
/* Reset ELD write address */
- tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
+ intel_de_write(dev_priv, HSW_AUD_EDID_DATA(cpu_transcoder),
+ *((const u32 *)eld + i));
/* ELD valid */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_ELD_VALID(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
@@ -561,11 +575,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe));
- if (WARN_ON(port == PORT_A))
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
if (HAS_PCH_IBX(dev_priv)) {
@@ -580,21 +595,21 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
}
/* Disable timestamps */
- tmp = I915_READ(aud_config);
+ tmp = intel_de_read(dev_priv, aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(aud_config, tmp);
+ intel_de_write(dev_priv, aud_config, tmp);
eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp &= ~eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
}
static void ilk_audio_codec_enable(struct intel_encoder *encoder,
@@ -611,11 +626,12 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe), drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm,
+ "Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(eld));
- if (WARN_ON(port == PORT_A))
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
/*
@@ -646,27 +662,28 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp &= ~eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
/* Reset ELD write address */
- tmp = I915_READ(aud_cntl_st);
+ tmp = intel_de_read(dev_priv, aud_cntl_st);
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(aud_cntl_st, tmp);
+ intel_de_write(dev_priv, aud_cntl_st, tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
+ intel_de_write(dev_priv, hdmiw_hdmiedid,
+ *((const u32 *)eld + i));
/* ELD valid */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp |= eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
/* Enable timestamps */
- tmp = I915_READ(aud_config);
+ tmp = intel_de_read(dev_priv, aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
@@ -674,7 +691,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(crtc_state);
- I915_WRITE(aud_config, tmp);
+ intel_de_write(dev_priv, aud_config, tmp);
}
/**
@@ -701,14 +718,15 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
/* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
- DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
- DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- connector->name,
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg(&dev_priv->drm, "ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ encoder->base.base.id,
+ encoder->base.name);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -800,37 +818,61 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
+static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ struct intel_cdclk_state *cdclk_state;
+ int ret;
+
+ /* need to hold at least one crtc lock for the global state */
+ ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx);
+ if (ret)
+ return ret;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ cdclk_state->force_min_cdclk_changed = true;
+ cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+
+ return drm_atomic_commit(&state->base);
+}
+
static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
+ struct intel_crtc *crtc;
int ret;
+ crtc = intel_get_first_crtc(dev_priv);
+ if (!crtc)
+ return;
+
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(&dev_priv->drm);
- if (WARN_ON(!state))
+ if (drm_WARN_ON(&dev_priv->drm, !state))
return;
state->acquire_ctx = &ctx;
retry:
- to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
- to_intel_atomic_state(state)->cdclk.force_min_cdclk =
- enable ? 2 * 96000 : 0;
-
- /* Protects dev_priv->cdclk.force_min_cdclk */
- ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
- if (!ret)
- ret = drm_atomic_commit(state);
-
+ ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc,
+ enable);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
- WARN_ON(ret);
+ drm_WARN_ON(&dev_priv->drm, ret);
drm_atomic_state_put(state);
@@ -850,9 +892,11 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
if (dev_priv->audio_power_refcount++ == 0) {
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
- I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
- DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ intel_de_write(dev_priv, AUD_FREQ_CNTRL,
+ dev_priv->audio_freq_cntrl);
+ drm_dbg_kms(&dev_priv->drm,
+ "restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->audio_freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -860,9 +904,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
glk_force_audio_cdclk(dev_priv, true);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE(AUD_PIN_BUF_CTL,
- (I915_READ(AUD_PIN_BUF_CTL) |
- AUD_PIN_BUF_ENABLE));
+ intel_de_write(dev_priv, AUD_PIN_BUF_CTL,
+ (intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE));
}
return ret;
@@ -897,15 +940,15 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
- tmp = I915_READ(HSW_AUD_CHICKENBIT);
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
- I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
if (enable) {
- tmp = I915_READ(HSW_AUD_CHICKENBIT);
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
- I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
}
@@ -917,7 +960,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
return -ENODEV;
return dev_priv->cdclk.hw.cdclk;
@@ -940,7 +983,8 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
- if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
return NULL;
encoder = dev_priv->av_enc_map[pipe];
@@ -992,7 +1036,8 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
if (!encoder || !encoder->base.crtc) {
- DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
err = -ENODEV;
goto unlock;
}
@@ -1023,7 +1068,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
- DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
mutex_unlock(&dev_priv->av_mutex);
return ret;
}
@@ -1057,10 +1103,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
- if (WARN_ON(acomp->base.ops || acomp->base.dev))
+ if (drm_WARN_ON(&dev_priv->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
- if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !device_link_add(hda_kdev, i915_kdev,
+ DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(&dev_priv->drm);
@@ -1119,15 +1167,18 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
- DRM_ERROR("failed to add audio component (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
- dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
- DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
+ AUD_FREQ_CNTRL);
+ drm_dbg_kms(&dev_priv->drm,
+ "init value of AUD_FREQ_CNTRL of 0x%x\n",
+ dev_priv->audio_freq_cntrl);
}
dev_priv->audio_component_registered = true;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index ef4017a1baba..839124647202 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
@@ -228,17 +227,20 @@ parse_panel_options(struct drm_i915_private *dev_priv,
ret = intel_opregion_get_panel_type(dev_priv);
if (ret >= 0) {
- WARN_ON(ret > 0xf);
+ drm_WARN_ON(&dev_priv->drm, ret > 0xf);
panel_type = ret;
- DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+ drm_dbg_kms(&dev_priv->drm, "Panel type: %d (OpRegion)\n",
+ panel_type);
} else {
if (lvds_options->panel_type > 0xf) {
- DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
- lvds_options->panel_type);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
return;
}
panel_type = lvds_options->panel_type;
- DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+ drm_dbg_kms(&dev_priv->drm, "Panel type: %d (VBT)\n",
+ panel_type);
}
dev_priv->vbt.panel_type = panel_type;
@@ -253,15 +255,17 @@ parse_panel_options(struct drm_i915_private *dev_priv,
switch (drrs_mode) {
case 0:
dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
- DRM_DEBUG_KMS("DRRS supported mode is static\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS supported mode is static\n");
break;
case 2:
dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
- DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported mode is seamless\n");
break;
default:
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS not supported (VBT input)\n");
break;
}
}
@@ -298,7 +302,8 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found panel mode in BIOS VBT legacy lfp table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -309,8 +314,9 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
- DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
- dev_priv->vbt.bios_lvds_val);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT initial LVDS value %x\n",
+ dev_priv->vbt.bios_lvds_val);
}
}
}
@@ -329,20 +335,22 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
return;
if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
- DRM_ERROR("GDTD size %u is too small.\n",
- generic_dtd->gdtd_size);
+ drm_err(&dev_priv->drm, "GDTD size %u is too small.\n",
+ generic_dtd->gdtd_size);
return;
} else if (generic_dtd->gdtd_size !=
sizeof(struct generic_dtd_entry)) {
- DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+ drm_err(&dev_priv->drm, "Unexpected GDTD size %u\n",
+ generic_dtd->gdtd_size);
/* DTD has unknown fields, but keep going */
}
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
if (dev_priv->vbt.panel_type >= num_dtd) {
- DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
- dev_priv->vbt.panel_type, num_dtd);
+ drm_err(&dev_priv->drm,
+ "Panel type %d not found in table of %d DTD's\n",
+ dev_priv->vbt.panel_type, num_dtd);
return;
}
@@ -385,7 +393,8 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found panel mode in BIOS VBT generic dtd table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
@@ -422,8 +431,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return;
if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
- DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
- backlight_data->entry_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported backlight data entry size %u\n",
+ backlight_data->entry_size);
return;
}
@@ -431,8 +441,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) {
- DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
- entry->type);
+ drm_dbg_kms(&dev_priv->drm,
+ "PWM backlight not present in VBT (type %u)\n",
+ entry->type);
return;
}
@@ -449,13 +460,14 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
- DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
- "active %s, min brightness %u, level %u, controller %u\n",
- dev_priv->vbt.backlight.pwm_freq_hz,
- dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- dev_priv->vbt.backlight.min_brightness,
- backlight_data->level[panel_type],
- dev_priv->vbt.backlight.controller);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u, controller %u\n",
+ dev_priv->vbt.backlight.pwm_freq_hz,
+ dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+ dev_priv->vbt.backlight.min_brightness,
+ backlight_data->level[panel_type],
+ dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
@@ -469,7 +481,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
index = i915_modparams.vbt_sdvo_panel_type;
if (index == -2) {
- DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
}
@@ -495,7 +508,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
}
@@ -540,13 +554,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
} else {
dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- dev_priv->vbt.int_tv_support,
- dev_priv->vbt.int_crt_support,
- dev_priv->vbt.lvds_use_ssc,
- dev_priv->vbt.lvds_ssc_freq,
- dev_priv->vbt.display_clock_mode,
- dev_priv->vbt.fdi_rx_polarity_inverted);
+ drm_dbg_kms(&dev_priv->drm,
+ "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+ dev_priv->vbt.int_tv_support,
+ dev_priv->vbt.int_crt_support,
+ dev_priv->vbt.lvds_use_ssc,
+ dev_priv->vbt.lvds_ssc_freq,
+ dev_priv->vbt.display_clock_mode,
+ dev_priv->vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -568,7 +583,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
* accurate and doesn't have to be, as long as it's not too strict.
*/
if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
- DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
+ drm_dbg_kms(&dev_priv->drm, "Skipping SDVO device mapping\n");
return;
}
@@ -586,14 +601,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
if (child->dvo_port != DEVICE_PORT_DVOB &&
child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Incorrect SDVO port. Skip it\n");
continue;
}
- DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
- " %s port\n",
- child->slave_addr,
- (child->dvo_port == DEVICE_PORT_DVOB) ?
- "SDVOB" : "SDVOC");
+ drm_dbg_kms(&dev_priv->drm,
+ "the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ child->slave_addr,
+ (child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
@@ -602,28 +619,30 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
mapping->ddc_pin = child->ddc_pin;
mapping->i2c_pin = child->i2c_pin;
mapping->initialized = 1;
- DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
- mapping->dvo_port,
- mapping->slave_addr,
- mapping->dvo_wiring,
- mapping->ddc_pin,
- mapping->i2c_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ mapping->dvo_port, mapping->slave_addr,
+ mapping->dvo_wiring, mapping->ddc_pin,
+ mapping->i2c_pin);
} else {
- DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
- "two SDVO device.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
}
if (child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
- " is a SDVO device with multiple inputs.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
}
count++;
}
if (!count) {
/* No SDVO device info is found */
- DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No SDVO device info is found in VBT\n");
}
}
@@ -664,7 +683,8 @@ parse_driver_features(struct drm_i915_private *dev_priv,
}
if (bdb->version < 228) {
- DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ drm_dbg_kms(&dev_priv->drm, "DRRS State Enabled:%d\n",
+ driver->drrs_enabled);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
* This is because, VBT is configured in such a way that
@@ -688,7 +708,7 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
if (bdb->version < 228)
return;
- power = find_section(bdb, BDB_LVDS_POWER);
+ power = find_section(bdb, BDB_LFP_POWER);
if (!power)
return;
@@ -742,8 +762,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
- edp_link_params->rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
break;
}
@@ -758,8 +779,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.lanes = 4;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
- edp_link_params->lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP lane count value %u\n",
+ edp_link_params->lanes);
break;
}
@@ -777,8 +799,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
- edp_link_params->preemphasis);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP pre-emphasis value %u\n",
+ edp_link_params->preemphasis);
break;
}
@@ -796,8 +819,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
- edp_link_params->vswing);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP voltage swing value %u\n",
+ edp_link_params->vswing);
break;
}
@@ -824,7 +848,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
psr = find_section(bdb, BDB_PSR);
if (!psr) {
- DRM_DEBUG_KMS("No PSR BDB found.\n");
+ drm_dbg_kms(&dev_priv->drm, "No PSR BDB found.\n");
return;
}
@@ -851,8 +875,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
- psr_table->lines_to_wait);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown PSR lines to wait %u\n",
+ psr_table->lines_to_wait);
break;
}
@@ -874,8 +899,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
- DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
- psr_table->tp1_wakeup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
@@ -893,8 +919,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
- DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
- psr_table->tp2_tp3_wakeup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
@@ -1000,12 +1027,12 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
*/
start = find_section(bdb, BDB_MIPI_CONFIG);
if (!start) {
- DRM_DEBUG_KMS("No MIPI config BDB found");
+ drm_dbg_kms(&dev_priv->drm, "No MIPI config BDB found");
return;
}
- DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
- panel_type);
+ drm_dbg(&dev_priv->drm, "Found MIPI Config block, panel index = %d\n",
+ panel_type);
/*
* get hold of the correct configuration block and pps data as per
@@ -1220,7 +1247,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
- if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !data || dev_priv->vbt.dsi.seq_version != 1))
return 0;
/* index = 1 to skip sequence byte */
@@ -1273,7 +1301,8 @@ static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
if (!len)
return;
- DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
@@ -1308,18 +1337,21 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
- DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No MIPI Sequence found, parsing complete\n");
return;
}
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 4) {
- DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
- sequence->version);
+ drm_err(&dev_priv->drm,
+ "Unable to parse MIPI Sequence Block v%u\n",
+ sequence->version);
return;
}
- DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+ drm_dbg(&dev_priv->drm, "Found MIPI sequence block v%u\n",
+ sequence->version);
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
if (!seq_data)
@@ -1336,13 +1368,15 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
break;
if (seq_id >= MIPI_SEQ_MAX) {
- DRM_ERROR("Unknown sequence %u\n", seq_id);
+ drm_err(&dev_priv->drm, "Unknown sequence %u\n",
+ seq_id);
goto err;
}
/* Log about presence of sequences we won't run. */
if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
- DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported sequence %u\n", seq_id);
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
@@ -1351,7 +1385,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
else
index = goto_next_sequence(data, index, seq_size);
if (!index) {
- DRM_ERROR("Invalid sequence %u\n", seq_id);
+ drm_err(&dev_priv->drm, "Invalid sequence %u\n",
+ seq_id);
goto err;
}
}
@@ -1362,7 +1397,7 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
fixup_mipi_sequences(dev_priv);
- DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
+ drm_dbg(&dev_priv->drm, "MIPI related VBT parsing complete\n");
return;
err:
@@ -1387,13 +1422,15 @@ parse_compression_parameters(struct drm_i915_private *i915,
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
- DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: unsupported compression param entry size\n");
return;
}
block_size = get_blocksize(params);
if (block_size < sizeof(*params)) {
- DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: expected 16 compression param entries\n");
return;
}
}
@@ -1405,12 +1442,14 @@ parse_compression_parameters(struct drm_i915_private *i915,
continue;
if (!params) {
- DRM_DEBUG_KMS("VBT: compression params not available\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: compression params not available\n");
continue;
}
if (child->compression_method_cps) {
- DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: CPS compression not supported\n");
continue;
}
@@ -1458,10 +1497,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
if (p != PORT_NONE) {
- DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
- "disabling port %c DVI/HDMI support\n",
- port_name(port), info->alternate_ddc_pin,
- port_name(p), port_name(p));
+ drm_dbg_kms(&dev_priv->drm,
+ "port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(port), info->alternate_ddc_pin,
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedly sharing the
@@ -1509,10 +1549,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
if (p != PORT_NONE) {
- DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
- "disabling port %c DP support\n",
- port_name(port), info->alternate_aux_channel,
- port_name(p), port_name(p));
+ drm_dbg_kms(&dev_priv->drm,
+ "port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(port), info->alternate_aux_channel,
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
@@ -1572,8 +1613,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
return ddc_pin_map[vbt_pin];
- DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
- vbt_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
return 0;
}
@@ -1624,8 +1666,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info = &dev_priv->vbt.ddi_port_info[port];
if (info->child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
return;
}
@@ -1636,8 +1679,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
- DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
- is_hdmi ? "/HDMI" : "");
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
is_dvi = false;
is_hdmi = false;
}
@@ -1653,11 +1697,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
- port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
- HAS_LSPCON(dev_priv) && child->lspcon,
- info->supports_typec_usb, info->supports_tbt,
- devdata->dsc != NULL);
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+ HAS_LSPCON(dev_priv) && child->lspcon,
+ info->supports_typec_usb, info->supports_tbt,
+ devdata->dsc != NULL);
if (is_dvi) {
u8 ddc_pin;
@@ -1667,9 +1712,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info->alternate_ddc_pin = ddc_pin;
sanitize_ddc_pin(dev_priv, port);
} else {
- DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
- "sticking to defaults\n",
- port_name(port), ddc_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), ddc_pin);
}
}
@@ -1682,9 +1728,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
u8 hdmi_level_shift = child->hdmi_level_shifter_value;
- DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
- port_name(port),
- hdmi_level_shift);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI level shift for port %c: %d\n",
+ port_name(port),
+ hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
info->hdmi_level_shift_set = true;
}
@@ -1708,19 +1755,22 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
}
if (max_tmds_clock)
- DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
- port_name(port), max_tmds_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI max TMDS clock for port %c: %d kHz\n",
+ port_name(port), max_tmds_clock);
info->max_tmds_clock = max_tmds_clock;
}
/* Parse the I_boost config for SKL and above */
if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
- DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
- port_name(port), info->dp_boost_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT (e)DP boost level for port %c: %d\n",
+ port_name(port), info->dp_boost_level);
info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
- DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
- port_name(port), info->hdmi_boost_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI boost level for port %c: %d\n",
+ port_name(port), info->hdmi_boost_level);
}
/* DP max link rate for CNL+ */
@@ -1740,8 +1790,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info->dp_max_link_rate = 162000;
break;
}
- DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
- port_name(port), info->dp_max_link_rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT DP max link rate for port %c: %d\n",
+ port_name(port), info->dp_max_link_rate);
}
info->child = child;
@@ -1775,19 +1826,21 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) {
- DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No general definition block is found, no devices defined.\n");
return;
}
block_size = get_blocksize(defs);
if (block_size < sizeof(*defs)) {
- DRM_DEBUG_KMS("General definitions block too small (%u)\n",
- block_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "General definitions block too small (%u)\n",
+ block_size);
return;
}
bus_pin = defs->crt_ddc_gmbus_pin;
- DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ drm_dbg_kms(&dev_priv->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin;
@@ -1806,19 +1859,22 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
- DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
- bdb->version, expected_size);
+ drm_dbg(&dev_priv->drm,
+ "Expected child device config size for VBT version %u not known; assuming %u\n",
+ bdb->version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
- DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, bdb->version);
+ drm_err(&dev_priv->drm,
+ "Unexpected child device config size %u (expected %u for VBT version %u)\n",
+ defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- DRM_DEBUG_KMS("Child device config size %u is too small.\n",
- defs->child_dev_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Child device config size %u is too small.\n",
+ defs->child_dev_size);
return;
}
@@ -1830,8 +1886,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!child->device_type)
continue;
- DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
- child->device_type);
+ drm_dbg_kms(&dev_priv->drm,
+ "Found VBT child device with type 0x%x\n",
+ child->device_type);
devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
if (!devdata)
@@ -1849,7 +1906,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
}
if (list_empty(&dev_priv->vbt.display_devices))
- DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
@@ -1882,7 +1940,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev_priv));
- DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+ drm_dbg_kms(&dev_priv->drm, "Set default to SSC at %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
}
/* Defaults to initialize only if there is no VBT. */
@@ -1992,13 +2051,14 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
goto err_unmap_oprom;
if (sizeof(struct vbt_header) > size) {
- DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ drm_dbg(&dev_priv->drm, "VBT header incomplete\n");
goto err_unmap_oprom;
}
vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
if (vbt_size > size) {
- DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ drm_dbg(&dev_priv->drm,
+ "VBT incomplete (vbt_size overflows)\n");
goto err_unmap_oprom;
}
@@ -2041,7 +2101,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
- DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping VBT init due to disabled display.\n");
return;
}
@@ -2055,13 +2116,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
vbt = oprom_vbt;
- DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
+ drm_dbg_kms(&dev_priv->drm, "Found valid VBT in PCI ROM\n");
}
bdb = get_bdb_header(vbt);
- DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, bdb->version);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT signature \"%.*s\", BDB version %d\n",
+ (int)sizeof(vbt->signature), vbt->signature, bdb->version);
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
@@ -2086,7 +2148,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
out:
if (!vbt) {
- DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+ drm_info(&dev_priv->drm,
+ "Failed to find VBIOS tables (VBT)\n");
init_vbt_missing_defaults(dev_priv);
}
@@ -2238,13 +2301,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
const struct ddi_vbt_port_info *port_info =
&dev_priv->vbt.ddi_port_info[port];
- return port_info->supports_dp ||
- port_info->supports_dvi ||
- port_info->supports_hdmi;
+ return port_info->child;
}
/* FIXME maybe deal with port A as well? */
- if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ if (drm_WARN_ON(&dev_priv->drm,
+ port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
@@ -2373,8 +2435,9 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
} else if (dvo_port == DVO_PORT_MIPIB ||
dvo_port == DVO_PORT_MIPIC ||
dvo_port == DVO_PORT_MIPID) {
- DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
- port_name(dvo_port - DVO_PORT_MIPIA));
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unsupported DSI port %c\n",
+ port_name(dvo_port - DVO_PORT_MIPIA));
}
}
@@ -2493,7 +2556,7 @@ intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
const struct child_device_config *child =
i915->vbt.ddi_port_info[port].child;
- if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
+ if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEN9_LP(i915)))
return false;
return child && child->hpd_invert;
@@ -2526,8 +2589,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
if (!info->alternate_aux_channel) {
aux_ch = (enum aux_ch)port;
- DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "using AUX %c for port %c (platform default)\n",
+ aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
@@ -2559,8 +2623,78 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
break;
}
- DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "using AUX %c for port %c (VBT)\n",
+ aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
+
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock;
+}
+
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct ddi_vbt_port_info *info =
+ &i915->vbt.ddi_port_info[encoder->port];
+
+ return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1;
+}
+
+int intel_bios_dp_boost_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].dp_boost_level;
+}
+
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].hdmi_boost_level;
+}
+
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate;
+}
+
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin;
+}
+
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_dvi;
+}
+
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_hdmi;
+}
+
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_dp;
+}
+
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915,
+ enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_typec_usb;
+}
+
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_tbt;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index d6a0c29d37ac..e29e79faa01b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,8 +32,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
@@ -247,5 +245,16 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
+int intel_bios_dp_boost_level(struct intel_encoder *encoder);
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder);
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index b228671d5a5d..58b264bc318d 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -122,7 +122,8 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
@@ -132,9 +133,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
- i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
- sp->t_rcd, sp->t_rc);
+ drm_dbg_kms(&dev_priv->drm,
+ "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+ i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+ sp->t_rcd, sp->t_rc);
}
return 0;
@@ -187,7 +189,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
ret = icl_get_qgv_points(dev_priv, &qi);
if (ret) {
- DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
num_channels = qi.num_channels;
@@ -228,8 +231,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */
- DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
- i, j, bi->num_planes, bi->deratedbw[j]);
+ drm_dbg_kms(&dev_priv->drm,
+ "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
}
if (bi->num_planes == 1)
@@ -374,10 +378,9 @@ static struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_private_state *bw_state;
+ struct intel_global_state *bw_state;
- bw_state = drm_atomic_get_private_obj_state(&state->base,
- &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
@@ -392,7 +395,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int data_rate, max_data_rate;
unsigned int num_active_planes;
struct intel_crtc *crtc;
- int i;
+ int i, ret;
/* FIXME earlier gens need some checks too */
if (INTEL_GEN(dev_priv) < 11)
@@ -424,15 +427,20 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
bw_state->data_rate[crtc->pipe] = new_data_rate;
bw_state->num_active_planes[crtc->pipe] = new_active_planes;
- DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
}
if (!bw_state)
return 0;
+ ret = intel_atomic_lock_global_state(&bw_state->base);
+ if (ret)
+ return ret;
+
data_rate = intel_bw_data_rate(dev_priv, bw_state);
num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
@@ -441,15 +449,17 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
data_rate = DIV_ROUND_UP(data_rate, 1000);
if (data_rate > max_data_rate) {
- DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
- data_rate, max_data_rate, num_active_planes);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
+ data_rate, max_data_rate, num_active_planes);
return -EINVAL;
}
return 0;
}
-static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
+static struct intel_global_state *
+intel_bw_duplicate_state(struct intel_global_obj *obj)
{
struct intel_bw_state *state;
@@ -457,18 +467,16 @@ static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj
if (!state)
return NULL;
- __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
-
return &state->base;
}
-static void intel_bw_destroy_state(struct drm_private_obj *obj,
- struct drm_private_state *state)
+static void intel_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
{
kfree(state);
}
-static const struct drm_private_state_funcs intel_bw_funcs = {
+static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_duplicate_state = intel_bw_duplicate_state,
.atomic_destroy_state = intel_bw_destroy_state,
};
@@ -481,13 +489,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
- &state->base, &intel_bw_funcs);
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
+ &state->base, &intel_bw_funcs);
return 0;
}
-
-void intel_bw_cleanup(struct drm_i915_private *dev_priv)
-{
- drm_atomic_private_obj_fini(&dev_priv->bw_obj);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 20b9ad241802..a8aa7624c5aa 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -9,13 +9,14 @@
#include <drm/drm_atomic.h>
#include "intel_display.h"
+#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
struct intel_bw_state {
- struct drm_private_state base;
+ struct intel_global_state base;
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
@@ -25,7 +26,6 @@ struct intel_bw_state {
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
-void intel_bw_cleanup(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0ce5926006ca..979a0241fdcb 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -55,43 +55,43 @@
*/
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
}
static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
}
static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
}
static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
}
static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 400000;
+ cdclk_config->cdclk = 400000;
}
static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
}
static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 hpllcc = 0;
@@ -102,7 +102,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
* FIXME is this the right way to detect 852GM/852GMV?
*/
if (pdev->revision == 0x1) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
@@ -116,24 +116,24 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
case GC_CLOCK_133_200:
case GC_CLOCK_133_200_2:
case GC_CLOCK_100_200:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
case GC_CLOCK_166_250:
- cdclk_state->cdclk = 250000;
+ cdclk_config->cdclk = 250000;
break;
case GC_CLOCK_100_133:
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
break;
case GC_CLOCK_133_266:
case GC_CLOCK_133_266_2:
case GC_CLOCK_166_266:
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
break;
}
}
static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -141,23 +141,23 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
pci_read_config_word(pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_320_MHZ:
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
break;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
- cdclk_state->cdclk = 190000;
+ cdclk_config->cdclk = 190000;
break;
}
}
static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -165,17 +165,17 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
pci_read_config_word(pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_320_MHZ:
- cdclk_state->cdclk = 320000;
+ cdclk_config->cdclk = 320000;
break;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
}
}
@@ -237,20 +237,21 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
- tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
- HPLLVCO_MOBILE : HPLLVCO);
+ tmp = intel_de_read(dev_priv,
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
- DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+ drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
+ tmp);
else
- DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+ drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco);
return vco;
}
static void g33_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
@@ -261,7 +262,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -270,7 +271,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 3200000:
div_table = div_3200;
break;
@@ -287,18 +288,19 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
goto fail;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
- div_table[cdclk_sel]);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
return;
fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 190476;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 190476;
}
static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -307,31 +309,32 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
break;
case GC_DISPLAY_CLOCK_333_MHZ_PNV:
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
break;
case GC_DISPLAY_CLOCK_444_MHZ_PNV:
- cdclk_state->cdclk = 444444;
+ cdclk_config->cdclk = 444444;
break;
case GC_DISPLAY_CLOCK_200_MHZ_PNV:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
default:
- DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ drm_err(&dev_priv->drm,
+ "Unknown pnv display core clock 0x%04x\n", gcfgc);
/* fall through */
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
break;
case GC_DISPLAY_CLOCK_167_MHZ_PNV:
- cdclk_state->cdclk = 166667;
+ cdclk_config->cdclk = 166667;
break;
}
}
static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const u8 div_3200[] = { 16, 10, 8 };
@@ -341,7 +344,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -350,7 +353,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 3200000:
div_table = div_3200;
break;
@@ -364,62 +367,64 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
goto fail;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
- div_table[cdclk_sel]);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
return;
fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 200000;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 200000;
}
static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
cdclk_sel = (tmp >> 12) & 0x1;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 2666667:
case 4000000:
case 5333333:
- cdclk_state->cdclk = cdclk_sel ? 333333 : 222222;
+ cdclk_config->cdclk = cdclk_sel ? 333333 : 222222;
break;
case 3200000:
- cdclk_state->cdclk = cdclk_sel ? 320000 : 228571;
+ cdclk_config->cdclk = cdclk_sel ? 320000 : 228571;
break;
default:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 222222;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 222222;
break;
}
}
static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
- cdclk_state->cdclk = 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
else if (IS_HSW_ULT(dev_priv))
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
else
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
}
static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
@@ -462,17 +467,17 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
}
static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val;
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
- cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL,
- cdclk_state->vco);
+ cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
+ cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ CCK_DISPLAY_CLOCK_CONTROL,
+ cdclk_config->vco);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -480,10 +485,10 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (IS_VALLEYVIEW(dev_priv))
- cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
DSPFREQGUAR_SHIFT;
else
- cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
DSPFREQGUAR_SHIFT_CHV;
}
@@ -510,25 +515,26 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
* WA - write default credits before re-programming
* FIXME: should we also set the resend bit here?
*/
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- default_credits);
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | default_credits);
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- credits | PFI_CREDIT_RESEND);
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND);
/*
* FIXME is this guaranteed to clear
* immediately or should we poll for it?
*/
- WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- u32 val, cmd = cdclk_state->voltage_level;
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
@@ -563,7 +569,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
if (cdclk == 400000) {
@@ -581,7 +588,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
/* adjust self-refresh exit latency value */
@@ -611,11 +619,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- u32 val, cmd = cdclk_state->voltage_level;
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
@@ -645,7 +653,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
vlv_punit_put(dev_priv);
@@ -685,68 +694,70 @@ static u8 bdw_calc_voltage_level(int cdclk)
}
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
- cdclk_state->cdclk = 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_54O_BDW)
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
else
- cdclk_state->cdclk = 675000;
+ cdclk_config->cdclk = 675000;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- bdw_calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ bdw_calc_voltage_level(cdclk_config->cdclk);
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
+ int cdclk = cdclk_config->cdclk;
u32 val;
int ret;
- if (WARN((I915_READ(LCPLL_CTL) &
- (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
- LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
- LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
- LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
- "trying to change cdclk frequency with cdclk not enabled\n"))
+ if (drm_WARN(&dev_priv->drm,
+ (intel_de_read(dev_priv, LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
return;
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
- DRM_ERROR("failed to inform pcode about cdclk change\n");
+ drm_err(&dev_priv->drm,
+ "failed to inform pcode about cdclk change\n");
return;
}
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
/*
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(I915_READ(LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 100))
- DRM_ERROR("Switching to FCLK failed\n");
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CLK_FREQ_MASK;
switch (cdclk) {
@@ -767,20 +778,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
break;
}
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
- I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+ intel_de_write(dev_priv, CDCLK_FREQ,
+ DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
intel_update_cdclk(dev_priv);
}
@@ -821,26 +833,27 @@ static u8 skl_calc_voltage_level(int cdclk)
}
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val;
- cdclk_state->ref = 24000;
- cdclk_state->vco = 0;
+ cdclk_config->ref = 24000;
+ cdclk_config->vco = 0;
- val = I915_READ(LCPLL1_CTL);
+ val = intel_de_read(dev_priv, LCPLL1_CTL);
if ((val & LCPLL_PLL_ENABLE) == 0)
return;
- if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
return;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
- if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
- DPLL_CTRL1_SSC(SKL_DPLL0) |
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
return;
switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
@@ -848,11 +861,11 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
- cdclk_state->vco = 8100000;
+ cdclk_config->vco = 8100000;
break;
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
- cdclk_state->vco = 8640000;
+ cdclk_config->vco = 8640000;
break;
default:
MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -861,32 +874,32 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
}
static void skl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 cdctl;
- skl_dpll0_update(dev_priv, cdclk_state);
+ skl_dpll0_update(dev_priv, cdclk_config);
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+ cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref;
- if (cdclk_state->vco == 0)
+ if (cdclk_config->vco == 0)
goto out;
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
- if (cdclk_state->vco == 8640000) {
+ if (cdclk_config->vco == 8640000) {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
- cdclk_state->cdclk = 432000;
+ cdclk_config->cdclk = 432000;
break;
case CDCLK_FREQ_337_308:
- cdclk_state->cdclk = 308571;
+ cdclk_config->cdclk = 308571;
break;
case CDCLK_FREQ_540:
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
break;
case CDCLK_FREQ_675_617:
- cdclk_state->cdclk = 617143;
+ cdclk_config->cdclk = 617143;
break;
default:
MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -895,16 +908,16 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
} else {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
break;
case CDCLK_FREQ_337_308:
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
break;
case CDCLK_FREQ_540:
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
break;
case CDCLK_FREQ_675_617:
- cdclk_state->cdclk = 675000;
+ cdclk_config->cdclk = 675000;
break;
default:
MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -917,8 +930,8 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- skl_calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ skl_calc_voltage_level(cdclk_config->cdclk);
}
/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@@ -942,7 +955,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
u32 val;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* We always enable DPLL0 with the lowest link rate possible, but still
@@ -953,7 +966,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
* rate later on, with the constraint of choosing a frequency that
* works with vco.
*/
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -965,13 +978,14 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
+ intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, LCPLL1_CTL,
+ intel_de_read(dev_priv, LCPLL1_CTL) | LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
- DRM_ERROR("DPLL0 not locked\n");
+ drm_err(&dev_priv->drm, "DPLL0 not locked\n");
dev_priv->cdclk.hw.vco = vco;
@@ -981,19 +995,20 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, LCPLL1_CTL,
+ intel_de_read(dev_priv, LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
- DRM_ERROR("Couldn't disable DPLL0\n");
+ drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
int ret;
@@ -1005,23 +1020,25 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
* use the corresponding VCO freq as that always leads to using the
* minimum 308MHz CDCLK.
*/
- WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ IS_SKYLAKE(dev_priv) && vco == 8640000);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (%d)\n", ret);
return;
}
/* Choose frequency for this cdclk */
switch (cdclk) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 308571:
case 337500:
@@ -1044,38 +1061,38 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
- cdclk_ctl = I915_READ(CDCLK_CTL);
+ cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
if (dev_priv->cdclk.hw.vco != vco) {
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
}
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
- POSTING_READ(CDCLK_CTL);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
if (dev_priv->cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
- POSTING_READ(CDCLK_CTL);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
/* inform PCU of the change */
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
intel_update_cdclk(dev_priv);
}
@@ -1089,11 +1106,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* There is SWF18 scratchpad register defined which is set by the
* pre-os which can be used by the OS drivers to check the status
*/
- if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+ if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk.hw.vco == 0 ||
@@ -1106,7 +1123,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
if (cdctl == expected)
@@ -1114,7 +1131,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
return;
sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
dev_priv->cdclk.hw.cdclk = 0;
@@ -1122,9 +1139,9 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-static void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state;
+ struct intel_cdclk_config cdclk_config;
skl_sanitize_cdclk(dev_priv);
@@ -1140,26 +1157,26 @@ static void skl_init_cdclk(struct drm_i915_private *dev_priv)
return;
}
- cdclk_state = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
- if (cdclk_state.vco == 0)
- cdclk_state.vco = 8100000;
- cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
- cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+ if (cdclk_config.vco == 0)
+ cdclk_config.vco = 8100000;
+ cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
-static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
@@ -1223,8 +1240,9 @@ static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
- WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1,
+ "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1241,8 +1259,8 @@ static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
table[i].cdclk == cdclk)
return dev_priv->cdclk.hw.ref * table[i].ratio;
- WARN(1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1283,56 +1301,68 @@ static u8 ehl_calc_voltage_level(int cdclk)
return 0;
}
+static u8 tgl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 3;
+ else if (cdclk > 326400)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
+
static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
- cdclk_state->ref = 24000;
+ if (intel_de_read(dev_priv, SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+ cdclk_config->ref = 24000;
else
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
}
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
+ u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
switch (dssm) {
default:
MISSING_CASE(dssm);
/* fall through */
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
- cdclk_state->ref = 24000;
+ cdclk_config->ref = 24000;
break;
case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
break;
case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
- cdclk_state->ref = 38400;
+ cdclk_config->ref = 38400;
break;
}
}
static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val, ratio;
if (INTEL_GEN(dev_priv) >= 11)
- icl_readout_refclk(dev_priv, cdclk_state);
+ icl_readout_refclk(dev_priv, cdclk_config);
else if (IS_CANNONLAKE(dev_priv))
- cnl_readout_refclk(dev_priv, cdclk_state);
+ cnl_readout_refclk(dev_priv, cdclk_config);
else
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
- val = I915_READ(BXT_DE_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
(val & BXT_DE_PLL_LOCK) == 0) {
/*
* CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
* setting it to zero is a way to signal that.
*/
- cdclk_state->vco = 0;
+ cdclk_config->vco = 0;
return;
}
@@ -1343,47 +1373,49 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 10)
ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
else
- ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+ ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- cdclk_state->vco = ratio * cdclk_state->ref;
+ cdclk_config->vco = ratio * cdclk_config->ref;
}
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 divider;
int div;
- bxt_de_pll_readout(dev_priv, cdclk_state);
+ bxt_de_pll_readout(dev_priv, cdclk_config);
if (INTEL_GEN(dev_priv) >= 12)
- cdclk_state->bypass = cdclk_state->ref / 2;
+ cdclk_config->bypass = cdclk_config->ref / 2;
else if (INTEL_GEN(dev_priv) >= 11)
- cdclk_state->bypass = 50000;
+ cdclk_config->bypass = 50000;
else
- cdclk_state->bypass = cdclk_state->ref;
+ cdclk_config->bypass = cdclk_config->ref;
- if (cdclk_state->vco == 0) {
- cdclk_state->cdclk = cdclk_state->bypass;
+ if (cdclk_config->vco == 0) {
+ cdclk_config->cdclk = cdclk_config->bypass;
goto out;
}
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+ divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 8;
break;
default:
@@ -1391,25 +1423,25 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
return;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
out:
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
if (intel_de_wait_for_clear(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL unlock\n");
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
@@ -1419,17 +1451,17 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
u32 val;
- val = I915_READ(BXT_DE_PLL_CTL);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_CTL);
val &= ~BXT_DE_PLL_RATIO_MASK;
val |= BXT_DE_PLL_RATIO(ratio);
- I915_WRITE(BXT_DE_PLL_CTL, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_CTL, val);
- I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
if (intel_de_wait_for_set(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL lock\n");
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
@@ -1438,13 +1470,14 @@ static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(BXT_DE_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
val &= ~BXT_DE_PLL_PLL_ENABLE;
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
- DRM_ERROR("timeout waiting for CDCLK PLL unlock\n");
+ if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
+ drm_err(&dev_priv->drm,
+ "timeout waiting for CDCLK PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
@@ -1455,14 +1488,15 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
u32 val;
val = CNL_CDCLK_PLL_RATIO(ratio);
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
val |= BXT_DE_PLL_PLL_ENABLE;
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
- DRM_ERROR("timeout waiting for CDCLK PLL lock\n");
+ if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
+ drm_err(&dev_priv->drm,
+ "timeout waiting for CDCLK PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
@@ -1488,11 +1522,11 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
}
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
u32 val, divider;
int ret;
@@ -1512,30 +1546,34 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
0x80000000, 150, 2);
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
- ret, cdclk);
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
case 3:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 8:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
@@ -1566,14 +1604,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
*/
if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
+ intel_de_write(dev_priv, CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
if (INTEL_GEN(dev_priv) >= 10) {
ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
} else {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -1583,13 +1621,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
*/
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level,
+ cdclk_config->voltage_level,
150, 2);
}
if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
+ drm_err(&dev_priv->drm,
+ "PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
@@ -1600,7 +1639,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+ dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
@@ -1609,7 +1648,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
int cdclk, vco;
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
@@ -1621,7 +1660,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
* set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
* so sanitize this register.
*/
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
/*
* Let's ignore the pipe field, since BIOS could have configured the
* dividers both synching to an active pipe, or asynchronously
@@ -1672,7 +1711,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
return;
sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
dev_priv->cdclk.hw.cdclk = 0;
@@ -1681,9 +1720,9 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state;
+ struct intel_cdclk_config cdclk_config;
bxt_sanitize_cdclk(dev_priv);
@@ -1691,35 +1730,35 @@ static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
dev_priv->cdclk.hw.vco != 0)
return;
- cdclk_state = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->cdclk.hw;
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
*/
- cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
- cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
- cdclk_state.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
+ cdclk_config.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
/**
- * intel_cdclk_init - Initialize CDCLK
+ * intel_cdclk_init_hw - Initialize CDCLK hardware
* @i915: i915 device
*
* Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
@@ -1727,39 +1766,41 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
*/
-void intel_cdclk_init(struct drm_i915_private *i915)
+void intel_cdclk_init_hw(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
- bxt_init_cdclk(i915);
+ bxt_cdclk_init_hw(i915);
else if (IS_GEN9_BC(i915))
- skl_init_cdclk(i915);
+ skl_cdclk_init_hw(i915);
}
/**
- * intel_cdclk_uninit - Uninitialize CDCLK
+ * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware
* @i915: i915 device
*
* Uninitialize CDCLK. This is done only during the display core
* uninitialization sequence.
*/
-void intel_cdclk_uninit(struct drm_i915_private *i915)
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
- bxt_uninit_cdclk(i915);
+ bxt_cdclk_uninit_hw(i915);
else if (IS_GEN9_BC(i915))
- skl_uninit_cdclk(i915);
+ skl_cdclk_uninit_hw(i915);
}
/**
- * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
+ * configurations requires a modeset on all pipes
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states require pipes to be off during reprogramming, false if not.
+ * True if changing between the two CDCLK configurations
+ * requires all pipes to be off, false if not.
*/
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
return a->cdclk != b->cdclk ||
a->vco != b->vco ||
@@ -1767,17 +1808,19 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
}
/**
- * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
- * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK
+ * configurations requires only a cd2x divider update
+ * @dev_priv: i915 device
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states require just a cd2x divider update, false if not.
+ * True if changing between the two CDCLK configurations
+ * can be done with just a cd2x divider update, false if not.
*/
-static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -1789,117 +1832,138 @@ static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
}
/**
- * intel_cdclk_changed - Determine if two CDCLK states are different
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_changed - Determine if two CDCLK configurations are different
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states don't match, false if they do.
+ * True if the CDCLK configurations don't match, false if they do.
*/
-static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
}
-/**
- * intel_cdclk_swap_state - make atomic CDCLK configuration effective
- * @state: atomic state
- *
- * This is the CDCLK version of drm_atomic_helper_swap_state() since the
- * helper does not handle driver-specific global state.
- *
- * Similarly to the atomic helpers this function does a complete swap,
- * i.e. it also puts the old state into @state. This is used by the commit
- * code to determine how CDCLK has changed (for instance did it increase or
- * decrease).
- */
-void intel_cdclk_swap_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- swap(state->cdclk.logical, dev_priv->cdclk.logical);
- swap(state->cdclk.actual, dev_priv->cdclk.actual);
-}
-
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context)
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+ const char *context)
{
DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
- context, cdclk_state->cdclk, cdclk_state->vco,
- cdclk_state->ref, cdclk_state->bypass,
- cdclk_state->voltage_level);
+ context, cdclk_config->cdclk, cdclk_config->vco,
+ cdclk_config->ref, cdclk_config->bypass,
+ cdclk_config->voltage_level);
}
/**
- * intel_set_cdclk - Push the CDCLK state to the hardware
+ * intel_set_cdclk - Push the CDCLK configuration to the hardware
* @dev_priv: i915 device
- * @cdclk_state: new CDCLK state
+ * @cdclk_config: new CDCLK configuration
* @pipe: pipe with which to synchronize the update
*
* Program the hardware based on the passed in CDCLK state,
* if necessary.
*/
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
+ struct intel_encoder *encoder;
+
+ if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
return;
- if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk))
return;
- intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
+ intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
- dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
+ /*
+ * Lock aux/gmbus while we change cdclk in case those
+ * functions use cdclk. Not all platforms/ports do,
+ * but we'll lock them all for simplicity.
+ */
+ mutex_lock(&dev_priv->gmbus_mutex);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
+ &dev_priv->gmbus_mutex);
+ }
+
+ dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
- if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
- "cdclk state doesn't match!\n")) {
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
- intel_dump_cdclk_state(cdclk_state, "[sw state]");
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->aux.hw_mutex);
+ }
+ mutex_unlock(&dev_priv->gmbus_mutex);
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+ "cdclk state doesn't match!\n")) {
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]");
+ intel_dump_cdclk_config(cdclk_config, "[sw state]");
}
}
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
*
- * Program the hardware before updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware before updating the HW plane state based on the
+ * new CDCLK state, if necessary.
*/
void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe)
+intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
{
- if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
- intel_set_cdclk(dev_priv, new_state, pipe);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe == INVALID_PIPE ||
+ old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
}
/**
* intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
*
- * Program the hardware after updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware after updating the HW plane state based on the
+ * new CDCLK state, if necessary.
*/
void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe)
+intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
{
- if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
- intel_set_cdclk(dev_priv, new_state, pipe);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe != INVALID_PIPE &&
+ old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2017,25 +2081,24 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
if (min_cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->max_cdclk_freq);
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+ min_cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
enum pipe pipe;
- memcpy(state->min_cdclk, dev_priv->min_cdclk,
- sizeof(state->min_cdclk));
-
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
@@ -2043,19 +2106,19 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
if (min_cdclk < 0)
return min_cdclk;
- if (state->min_cdclk[i] == min_cdclk)
+ if (cdclk_state->min_cdclk[i] == min_cdclk)
continue;
- state->min_cdclk[i] = min_cdclk;
+ cdclk_state->min_cdclk[i] = min_cdclk;
- ret = intel_atomic_lock_global_state(state);
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
return ret;
}
- min_cdclk = state->cdclk.force_min_cdclk;
+ min_cdclk = cdclk_state->force_min_cdclk;
for_each_pipe(dev_priv, pipe)
- min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
return min_cdclk;
}
@@ -2073,8 +2136,9 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* future platforms this code will need to be
* adjusted.
*/
-static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
@@ -2082,9 +2146,6 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
int i;
enum pipe pipe;
- memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
- sizeof(state->min_voltage_level));
-
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
@@ -2093,57 +2154,58 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
else
min_voltage_level = 0;
- if (state->min_voltage_level[i] == min_voltage_level)
+ if (cdclk_state->min_voltage_level[i] == min_voltage_level)
continue;
- state->min_voltage_level[i] = min_voltage_level;
+ cdclk_state->min_voltage_level[i] = min_voltage_level;
- ret = intel_atomic_lock_global_state(state);
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
return ret;
}
min_voltage_level = 0;
for_each_pipe(dev_priv, pipe)
- min_voltage_level = max(state->min_voltage_level[pipe],
+ min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
min_voltage_level);
return min_voltage_level;
}
-static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!state->active_pipes) {
- cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk, cdclk;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
@@ -2153,31 +2215,32 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
*/
cdclk = bdw_calc_cdclk(min_cdclk);
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!state->active_pipes) {
- cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk);
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
bdw_calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int skl_dpll0_vco(struct intel_atomic_state *state)
+static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int vco, i;
- vco = state->cdclk.logical.vco;
+ vco = cdclk_state->logical.vco;
if (!vco)
vco = dev_priv->skl_preferred_vco_freq;
@@ -2206,15 +2269,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
return vco;
}
-static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk, cdclk, vco;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
- vco = skl_dpll0_vco(state);
+ vco = skl_dpll0_vco(cdclk_state);
/*
* FIXME should also account for plane ratio
@@ -2222,57 +2285,58 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
*/
cdclk = skl_calc_cdclk(min_cdclk, vco);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!state->active_pipes) {
- cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
+ if (!cdclk_state->active_pipes) {
+ cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco);
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
skl_calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, min_voltage_level, cdclk, vco;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
- min_voltage_level = bxt_compute_min_voltage_level(state);
+ min_voltage_level = bxt_compute_min_voltage_level(cdclk_state);
if (min_voltage_level < 0)
return min_voltage_level;
cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
max_t(int, min_voltage_level,
dev_priv->display.calc_voltage_level(cdclk));
- if (!state->active_pipes) {
- cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
dev_priv->display.calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
@@ -2317,7 +2381,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
return 0;
}
-static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk;
@@ -2326,54 +2390,113 @@ static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
* check that the required minimum frequency doesn't exceed
* the actual cdclk frequency.
*/
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
return 0;
}
+static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return NULL;
+
+ cdclk_state->force_min_cdclk_changed = false;
+ cdclk_state->pipe = INVALID_PIPE;
+
+ return &cdclk_state->base;
+}
+
+static void intel_cdclk_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_cdclk_funcs = {
+ .atomic_duplicate_state = intel_cdclk_duplicate_state,
+ .atomic_destroy_state = intel_cdclk_destroy_state,
+};
+
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
+ if (IS_ERR(cdclk_state))
+ return ERR_CAST(cdclk_state);
+
+ return to_intel_cdclk_state(cdclk_state);
+}
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
+ &cdclk_state->base, &intel_cdclk_funcs);
+
+ return 0;
+}
+
int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
enum pipe pipe;
int ret;
- ret = dev_priv->display.modeset_calc_cdclk(state);
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
+
+ ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state);
if (ret)
return ret;
- /*
- * Writes to dev_priv->cdclk.{actual,logical} must protected
- * by holding all the crtc mutexes even if we don't end up
- * touching the hardware
- */
- if (intel_cdclk_changed(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ if (intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
/*
* Also serialize commits across all crtcs
* if the actual hw needs to be poked.
*/
- ret = intel_atomic_serialize_global_state(state);
+ ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- } else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &state->cdclk.logical)) {
- ret = intel_atomic_lock_global_state(state);
+ } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
+ intel_cdclk_changed(&old_cdclk_state->logical,
+ &new_cdclk_state->logical)) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
return ret;
} else {
return 0;
}
- if (is_power_of_2(state->active_pipes) &&
- intel_cdclk_needs_cd2x_update(dev_priv,
- &dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ if (is_power_of_2(new_cdclk_state->active_pipes) &&
+ intel_cdclk_can_cd2x_update(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- pipe = ilog2(state->active_pipes);
+ pipe = ilog2(new_cdclk_state->active_pipes);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -2387,28 +2510,32 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
}
if (pipe != INVALID_PIPE) {
- state->cdclk.pipe = pipe;
+ new_cdclk_state->pipe = pipe;
- DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
- pipe_name(pipe));
- } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
/* All pipes must be switched off while we change the cdclk. */
ret = intel_modeset_all_pipes(state);
if (ret)
return ret;
- state->cdclk.pipe = INVALID_PIPE;
+ new_cdclk_state->pipe = INVALID_PIPE;
- DRM_DEBUG_KMS("Modeset required for cdclk change\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset required for cdclk change\n");
}
- DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- state->cdclk.logical.cdclk,
- state->cdclk.actual.cdclk);
- DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- state->cdclk.logical.voltage_level,
- state->cdclk.actual.voltage_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ new_cdclk_state->logical.cdclk,
+ new_cdclk_state->actual.cdclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "New voltage level calculated to be logical %u, actual %u\n",
+ new_cdclk_state->logical.voltage_level,
+ new_cdclk_state->actual.voltage_level);
return 0;
}
@@ -2453,11 +2580,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->max_cdclk_freq = 528000;
} else if (IS_GEN9_BC(dev_priv)) {
- u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
vco = dev_priv->skl_preferred_vco_freq;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* Use the lower (vco 8640) cdclk values as a
@@ -2485,7 +2612,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
* How can we know if extra cooling is
* available? PCI ID, VTB, something else?
*/
- if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
else if (IS_BDW_ULX(dev_priv))
dev_priv->max_cdclk_freq = 450000;
@@ -2504,11 +2631,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
- DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
- dev_priv->max_cdclk_freq);
+ drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
+ dev_priv->max_cdclk_freq);
- DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
- dev_priv->max_dotclk_freq);
+ drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
+ dev_priv->max_dotclk_freq);
}
/**
@@ -2528,8 +2655,8 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
* generate GMBus clock. This will vary with the cdclk freq.
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+ intel_de_write(dev_priv, GMBUSFREQ_VLV,
+ DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
}
static int cnp_rawclk(struct drm_i915_private *dev_priv)
@@ -2537,7 +2664,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
u32 rawclk;
int divider, fraction;
- if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+ if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
/* 24 MHz */
divider = 24000;
fraction = 0;
@@ -2557,13 +2684,13 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
- I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk);
return divider + fraction;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
{
- return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+ return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
static int vlv_hrawclk(struct drm_i915_private *dev_priv)
@@ -2578,7 +2705,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
u32 clkcfg;
/* hrawclock is 1/4 the FSB frequency */
- clkcfg = I915_READ(CLKCFG);
+ clkcfg = intel_de_read(dev_priv, CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
return 100000;
@@ -2600,27 +2727,29 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
}
/**
- * intel_update_rawclk - Determine the current RAWCLK frequency
+ * intel_read_rawclk - Determine the current RAWCLK frequency
* @dev_priv: i915 device
*
* Determine the current RAWCLK frequency. RAWCLK is a fixed
* frequency clock so this needs to done only once.
*/
-void intel_update_rawclk(struct drm_i915_private *dev_priv)
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
{
+ u32 freq;
+
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
+ freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->rawclk_freq = pch_rawclk(dev_priv);
+ freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
+ freq = vlv_hrawclk(dev_priv);
else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
+ freq = g4x_hrawclk(dev_priv);
else
/* no rawclk on other platforms, or no need to know it */
- return;
+ return 0;
- DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+ return freq;
}
/**
@@ -2629,7 +2758,12 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ELKHARTLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
@@ -2709,8 +2843,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
else if (IS_I845G(dev_priv))
dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
else { /* 830 */
- WARN(!IS_I830(dev_priv),
- "Unknown platform. Assuming 133 MHz CDCLK\n");
+ drm_WARN(&dev_priv->drm, !IS_I830(dev_priv),
+ "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cf71394cc79c..5731806e4cee 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,11 +8,12 @@
#include <linux/types.h>
+#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
-struct intel_cdclk_state;
struct intel_crtc_state;
struct intel_cdclk_vals {
@@ -22,28 +23,62 @@ struct intel_cdclk_vals {
u8 ratio;
};
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+ bool force_min_cdclk_changed;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+};
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_cdclk_init(struct drm_i915_private *i915);
-void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_cdclk_init_hw(struct drm_i915_private *i915);
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-void intel_cdclk_swap_state(struct intel_atomic_state *state);
-void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe);
-void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context);
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b);
+void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
+void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+ const char *context);
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+
+#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define intel_atomic_get_old_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+#define intel_atomic_get_new_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 3980e8b50c28..c1cce93a1c25 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -157,23 +157,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
if (INTEL_GEN(dev_priv) >= 7) {
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
+ postoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
+ postoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
+ postoff[2]);
}
}
@@ -185,22 +191,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+ coeff[2] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+ coeff[5] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+ coeff[8] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
@@ -297,14 +309,16 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv));
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_identity,
ilk_csc_off_zero);
}
- I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+ intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
@@ -330,51 +344,74 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_postoff_limited_range);
}
- I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+ intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
-/*
- * Set up the pipe CSC unit on CherryView.
- */
-static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_ctm *ctm = blob->data;
enum pipe pipe = crtc->pipe;
+ u16 coeffs[9];
+ int i;
- if (crtc_state->hw.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
- u16 coeffs[9] = {};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff =
- ((1ULL << 63) - 1) & ctm->matrix[i];
-
- /* Round coefficient. */
- abs_coeff += 1 << (32 - 13);
- /* Clamp to hardware limits. */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
-
- /* Write coefficients in S3.12 format. */
- if (ctm->matrix[i] & (1ULL << 63))
- coeffs[i] = 1 << 15;
- coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
- coeffs[i] |= (abs_coeff >> 20) & 0xfff;
- }
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] |= 1 << 15;
- I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
- I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
- I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
- I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
- I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
}
- I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, int bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static u32 i9xx_lut_8(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 8) << 16 |
+ drm_color_lut_extract(color->green, 8) << 8 |
+ drm_color_lut_extract(color->blue, 8);
+}
+
+static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
+{
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8);
}
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
@@ -393,49 +430,34 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
(color->blue >> 8);
}
-static u32 ilk_lut_10(const struct drm_color_lut *color)
+static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
- return drm_color_lut_extract(color->red, 10) << 20 |
- drm_color_lut_extract(color->green, 10) << 10 |
- drm_color_lut_extract(color->blue, 10);
+ entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, ldw);
+ entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
}
-/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
- const struct drm_property_blob *blob)
+static u16 i965_lut_11p6_max_pack(u32 val)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int i;
-
- if (HAS_GMCH(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, pipe);
- }
-
- if (blob) {
- const struct drm_color_lut *lut = blob->data;
-
- for (i = 0; i < 256; i++) {
- u32 word =
- (drm_color_lut_extract(lut[i].red, 8) << 16) |
- (drm_color_lut_extract(lut[i].green, 8) << 8) |
- drm_color_lut_extract(lut[i].blue, 8);
+ /* PIPEGCMAX is 11.6, clamp to 10.6 */
+ return clamp_val(val, 0, 0xffff);
+}
- if (HAS_GMCH(dev_priv))
- I915_WRITE(PALETTE(pipe, i), word);
- else
- I915_WRITE(LGC_PALETTE(pipe, i), word);
- }
- }
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
-static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
@@ -445,10 +467,10 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val;
- val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- I915_WRITE(PIPECONF(pipe), val);
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
}
static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
@@ -458,10 +480,10 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val;
- val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- I915_WRITE(PIPECONF(pipe), val);
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
ilk_load_csc_matrix(crtc_state);
}
@@ -471,7 +493,8 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
ilk_load_csc_matrix(crtc_state);
}
@@ -492,9 +515,10 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
if (crtc_state->csc_enable)
val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
- I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
+ intel_de_write(dev_priv, SKL_BOTTOM_COLOR(pipe), val);
- I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
if (INTEL_GEN(dev_priv) >= 11)
icl_load_csc_matrix(crtc_state);
@@ -502,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
+static void i9xx_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
+ i9xx_load_lut_8(crtc, gamma_lut);
+}
+
static void i965_load_lut_10p6(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -511,28 +564,52 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- I915_WRITE(PALETTE(pipe, 2 * i + 0),
- i965_lut_10p6_ldw(&lut[i]));
- I915_WRITE(PALETTE(pipe, 2 * i + 1),
- i965_lut_10p6_udw(&lut[i]));
+ intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
}
- I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
- I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
- I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
}
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ i9xx_load_lut_8(crtc, gamma_lut);
else
i965_load_lut_10p6(crtc, gamma_lut);
}
+static void ilk_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
static void ilk_load_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -542,7 +619,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
- I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+ intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -551,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
else
ilk_load_lut_10(crtc, gamma_lut);
}
@@ -584,15 +662,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc,
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
+ intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
/* On BDW+ the index auto increment mode actually works */
@@ -606,22 +685,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
- PAL_PREC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
/* We discard half the user entries in split gamma mode */
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
@@ -659,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -682,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -703,17 +783,17 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
- u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
/*
@@ -729,12 +809,13 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
@@ -742,26 +823,26 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- u32 i;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
u32 v = (i << 16) / (lut_size - 1);
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -783,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
glk_load_degamma_lut_linear(crtc_state);
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
@@ -827,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Super Fine segment (let's call it seg1)...
@@ -860,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Fine segment (let's call it seg2)...
@@ -919,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
@@ -945,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
return drm_color_lut_extract(color->red, 14);
}
+static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
+}
+
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -954,10 +1042,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
- chv_cgm_degamma_ldw(&lut[i]));
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
- chv_cgm_degamma_udw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
+ chv_cgm_degamma_ldw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
+ chv_cgm_degamma_udw(&lut[i]));
}
}
@@ -981,31 +1069,34 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
- chv_cgm_gamma_ldw(&lut[i]));
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
- chv_cgm_gamma_udw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
+ chv_cgm_gamma_ldw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
+ chv_cgm_gamma_udw(&lut[i]));
}
}
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *ctm = crtc_state->hw.ctm;
- cherryview_load_csc_matrix(crtc_state);
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+ chv_load_cgm_csc(crtc, ctm);
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts(crtc_state);
- return;
- }
-
- if (degamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
chv_load_cgm_degamma(crtc, degamma_lut);
- if (gamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
chv_load_cgm_gamma(crtc, gamma_lut);
+ else
+ i965_load_luts(crtc_state);
+
+ intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+ crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1167,7 +1258,8 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
/* C8 relies on its palette being stored in the legacy LUT */
if (crtc_state->c8_planes) {
- DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
}
@@ -1630,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
return true;
}
-/* convert hw value with given bit_precision to lut property val */
-static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
-}
-
-static struct drm_property_blob *
-i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
+ int i;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
@@ -1659,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- if (HAS_GMCH(dev_priv))
- val = I915_READ(PALETTE(pipe, i));
- else
- val = I915_READ(LGC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_RED_MASK, val), 8);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_GREEN_MASK, val), 8);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_BLUE_MASK, val), 8);
+ u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
}
return blob;
@@ -1680,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
}
-static struct drm_property_blob *
-i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val1, val2;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1703,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
- val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
-
- blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_RED_MASK, val1);
- blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
- blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
+ u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i965_lut_10p6_pack(&lut[i], ldw, udw);
}
- blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 0)));
- blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 1)));
- blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 2)));
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
return blob;
}
static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
}
-static struct drm_property_blob *
-chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1755,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
-
- val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
+
+ chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
return blob;
@@ -1774,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
+ crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
else
i965_read_luts(crtc_state);
}
-static struct drm_property_blob *
-ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1797,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = I915_READ(PREC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
+
+ ilk_lut_10_pack(&lut[i], val);
}
return blob;
@@ -1815,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
@@ -1822,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
}
-static struct drm_property_blob *
-glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int hw_lut_size = ivb_lut_10_size(prec_index);
+ int i, hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * hw_lut_size,
@@ -1844,36 +1917,33 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
- PAL_PREC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
- val = I915_READ(PREC_PAL_DATA(pipe));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
}
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
return blob;
}
static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 44bbc7e74fc3..9ff05ec12115 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -48,7 +48,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(ICL_PORT_COMP_DW3(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -81,26 +81,27 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- val = I915_READ(ICL_PORT_COMP_DW1(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
- I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
if ((val & mask) != expected_val) {
- DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
- "current %08x mask %08x expected %08x\n",
- phy_name(phy),
- reg.reg, val, mask, expected_val);
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ phy_name(phy),
+ reg.reg, val, mask, expected_val);
return false;
}
@@ -127,8 +128,8 @@ static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
{
- return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
- (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+ return !(intel_de_read(dev_priv, CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+ (intel_de_read(dev_priv, CNL_PORT_COMP_DW0) & COMP_INIT);
}
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
@@ -151,20 +152,20 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(CHICKEN_MISC_2);
+ val = intel_de_read(dev_priv, CHICKEN_MISC_2);
val &= ~CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
cnl_set_procmon_ref_values(dev_priv, PHY_A);
- val = I915_READ(CNL_PORT_COMP_DW0);
+ val = intel_de_read(dev_priv, CNL_PORT_COMP_DW0);
val |= COMP_INIT;
- I915_WRITE(CNL_PORT_COMP_DW0, val);
+ intel_de_write(dev_priv, CNL_PORT_COMP_DW0, val);
- val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
}
static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
@@ -172,11 +173,12 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
u32 val;
if (!cnl_combo_phy_verify_state(dev_priv))
- DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+ drm_warn(&dev_priv->drm,
+ "Combo PHY HW state changed unexpectedly.\n");
- val = I915_READ(CHICKEN_MISC_2);
+ val = intel_de_read(dev_priv, CHICKEN_MISC_2);
val |= CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_2, val);
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
@@ -184,27 +186,65 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
{
/* The PHY C added by EHL has no PHY_MISC register */
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
- return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
- return !(I915_READ(ICL_PHY_MISC(phy)) &
+ return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+ (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+}
+
+static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
+{
+ bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A);
+ bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D);
+ bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+ /*
+ * VBT's 'dvo port' field for child devices references the DDI, not
+ * the PHY. So if combo PHY A is wired up to drive an external
+ * display, we should see a child device present on PORT_D and
+ * nothing on PORT_A and no DSI.
+ */
+ if (ddi_d_present && !ddi_a_present && !dsi_present)
+ return true;
+
+ /*
+ * If we encounter a VBT that claims to have an external display on
+ * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+ * in the log and let the internal display win.
+ */
+ if (ddi_d_present)
+ drm_err(&i915->drm,
+ "VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
+
+ return false;
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
bool ret;
+ u32 expected_val = 0;
if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (phy == PHY_A)
+ if (phy == PHY_A) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ expected_val = ICL_PHY_MISC_MUX_DDID;
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy),
+ ICL_PHY_MISC_MUX_DDID,
+ expected_val);
+ }
+ }
+
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
@@ -219,7 +259,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
u32 val;
if (is_dsi) {
- WARN_ON(lane_reversal);
+ drm_WARN_ON(&dev_priv->drm, lane_reversal);
switch (lane_count) {
case 1:
@@ -257,36 +297,10 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = I915_READ(ICL_PORT_CL_DW10(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
- I915_WRITE(ICL_PORT_CL_DW10(phy), val);
-}
-
-static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
-{
- bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
- bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
- bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
-
- /*
- * VBT's 'dvo port' field for child devices references the DDI, not
- * the PHY. So if combo PHY A is wired up to drive an external
- * display, we should see a child device present on PORT_D and
- * nothing on PORT_A and no DSI.
- */
- if (ddi_d_present && !ddi_a_present && !dsi_present)
- return val | ICL_PHY_MISC_MUX_DDID;
-
- /*
- * If we encounter a VBT that claims to have an external display on
- * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
- * in the log and let the internal display win.
- */
- if (ddi_d_present)
- DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
-
- return val & ~ICL_PHY_MISC_MUX_DDID;
+ intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -297,8 +311,9 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
u32 val;
if (icl_combo_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
- phy_name(phy));
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c already enabled, won't reprogram it.\n",
+ phy_name(phy));
continue;
}
@@ -318,28 +333,33 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
* based on whether our VBT indicates the presence of any
* "internal" child devices.
*/
- val = I915_READ(ICL_PHY_MISC(phy));
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
- val = ehl_combo_phy_a_mux(dev_priv, val);
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) {
+ val &= ~ICL_PHY_MISC_MUX_DDID;
+
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ val |= ICL_PHY_MISC_MUX_DDID;
+ }
+
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(phy), val);
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
cnl_set_procmon_ref_values(dev_priv, phy);
if (phy == PHY_A) {
- val = I915_READ(ICL_PORT_COMP_DW8(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
- I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
}
- val = I915_READ(ICL_PORT_COMP_DW0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
- val = I915_READ(ICL_PORT_CL_DW5(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
}
}
@@ -352,7 +372,8 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy))
- DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
/*
@@ -363,14 +384,14 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
goto skip_phy_misc;
- val = I915_READ(ICL_PHY_MISC(phy));
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(phy), val);
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
- val = I915_READ(ICL_PORT_COMP_DW0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
val &= ~COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 1133c4e97bb4..903e49659f56 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -153,7 +153,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
bool intel_connector_get_hw_state(struct intel_connector *connector)
{
enum pipe pipe = 0;
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
return encoder->get_hw_state(encoder, &pipe);
}
@@ -162,7 +162,8 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(dev,
+ !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index f976b800b245..78f9b6cde810 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -32,7 +32,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -75,7 +74,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(adpa_reg);
+ val = intel_de_read(dev_priv, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -112,7 +111,7 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
- tmp = I915_READ(crt->adpa_reg);
+ tmp = intel_de_read(dev_priv, crt->adpa_reg);
if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -184,7 +183,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
- I915_WRITE(BCLRPAT(crtc->pipe), 0);
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -201,7 +200,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
break;
}
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
}
static void intel_disable_crt(struct intel_encoder *encoder,
@@ -230,7 +229,7 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -258,7 +257,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
- WARN_ON(!old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -269,7 +268,7 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -282,7 +281,7 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -299,7 +298,13 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+
+ intel_enable_pipe(crtc_state);
+
+ lpt_pch_enable(crtc_state);
+
+ intel_crtc_vblank_on(crtc_state);
intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
@@ -414,7 +419,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
- DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LPT only supports 24bpp\n");
return -EINVAL;
}
@@ -442,34 +448,37 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = false;
- save_adpa = adpa = I915_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
if (intel_de_wait_for_clear(dev_priv,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(crt->adpa_reg, save_adpa);
- POSTING_READ(crt->adpa_reg);
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+ drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n",
+ adpa, ret);
return ret;
}
@@ -498,27 +507,30 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*/
reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
- save_adpa = adpa = I915_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- I915_WRITE(crt->adpa_reg, save_adpa);
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
if (reenable_hpd)
intel_hpd_enable(dev_priv, crt->base.hpd_pin);
@@ -558,15 +570,16 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* wait for FORCE_DETECT to go off */
if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_DETECT to go off");
}
- stat = I915_READ(PORT_HOTPLUG_STAT);
+ stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT);
if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
ret = true;
/* clear the interrupt we just generated, if any */
- I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+ intel_de_write(dev_priv, PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
@@ -629,13 +642,16 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
* have to check the EDID input spec of the attached device.
*/
if (!is_digital) {
- DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via DDC:0x50 [EDID]\n");
ret = true;
} else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
} else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
@@ -660,7 +676,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
u8 st00;
enum drm_connector_status status;
- DRM_DEBUG_KMS("starting load-detect on CRT\n");
+ drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
bclrpat_reg = BCLRPAT(pipe);
vtotal_reg = VTOTAL(pipe);
@@ -706,7 +722,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = I915_READ(vsync_reg);
+ u32 vsync = intel_de_read(dev_priv, vsync_reg);
u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
@@ -801,9 +817,9 @@ intel_crt_detect(struct drm_connector *connector,
int status, ret;
struct intel_load_detect_pipe tmp;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, connector->name,
- force);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name,
+ force);
if (i915_modparams.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
@@ -824,11 +840,13 @@ intel_crt_detect(struct drm_connector *connector,
* only trust an assertion that the monitor is connected.
*/
if (intel_crt_detect_hotplug(connector)) {
- DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via hotplug\n");
status = connector_status_connected;
goto out;
} else
- DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via hotplug\n");
}
if (intel_crt_detect_ddc(connector)) {
@@ -918,13 +936,13 @@ void intel_crt_reset(struct drm_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(crt->adpa_reg, adpa);
- POSTING_READ(crt->adpa_reg);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
- DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
+ drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = true;
}
@@ -969,7 +987,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
adpa_reg = ADPA;
- adpa = I915_READ(adpa_reg);
+ adpa = intel_de_read(dev_priv, adpa_reg);
if ((adpa & ADPA_DAC_ENABLE) == 0) {
/*
* On some machines (some IVB at least) CRT can be
@@ -979,11 +997,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
* take. So the only way to tell is attempt to enable
* it and see what happens.
*/
- I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
- ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
- if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
+ intel_de_write(dev_priv, adpa_reg,
+ adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
- I915_WRITE(adpa_reg, adpa);
+ intel_de_write(dev_priv, adpa_reg, adpa);
}
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
@@ -1027,6 +1045,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
if (HAS_DDI(dev_priv)) {
@@ -1057,14 +1078,6 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev_priv))
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-
- /*
- * Configure the automatic hotplug detection stuff
- */
- crt->force_hotplug_required = false;
-
/*
* TODO: find a proper way to discover whether we need to set the the
* polarity and link reversal bits or not, instead of relying on the
@@ -1074,7 +1087,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
+ dev_priv->fdi_rx_config = intel_de_read(dev_priv,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 09870a31b4f0..3112572cfb7d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_csr.h"
+#include "intel_de.h"
/**
* DOC: csr support for dmc
@@ -39,8 +40,8 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
@@ -276,11 +277,11 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
- val = I915_READ(DC_STATE_DEBUG);
+ val = intel_de_read(dev_priv, DC_STATE_DEBUG);
if ((val & mask) != mask) {
val |= mask;
- I915_WRITE(DC_STATE_DEBUG, val);
- POSTING_READ(DC_STATE_DEBUG);
+ intel_de_write(dev_priv, DC_STATE_DEBUG, val);
+ intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
}
@@ -298,12 +299,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
u32 i, fw_size;
if (!HAS_CSR(dev_priv)) {
- DRM_ERROR("No CSR support available for this platform\n");
+ drm_err(&dev_priv->drm,
+ "No CSR support available for this platform\n");
return;
}
if (!dev_priv->csr.dmc_payload) {
- DRM_ERROR("Tried to program CSR with empty payload\n");
+ drm_err(&dev_priv->drm,
+ "Tried to program CSR with empty payload\n");
return;
}
@@ -313,13 +316,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
preempt_disable();
for (i = 0; i < fw_size; i++)
- I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
+ intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
+ payload[i]);
preempt_enable();
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
- I915_WRITE(dev_priv->csr.mmioaddr[i],
- dev_priv->csr.mmiodata[i]);
+ intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
+ dev_priv->csr.mmiodata[i]);
}
dev_priv->csr.dc_state = 0;
@@ -607,7 +611,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- WARN_ON(dev_priv->csr.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
dev_priv->csr.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
@@ -636,16 +640,16 @@ static void csr_load_work_fn(struct work_struct *work)
intel_csr_load_program(dev_priv);
intel_csr_runtime_pm_put(dev_priv);
- DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->csr.fw_path,
- CSR_VERSION_MAJOR(csr->version),
+ drm_info(&dev_priv->drm,
+ "Finished loading DMC firmware %s (v%u.%u)\n",
+ dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
- dev_notice(dev_priv->drm.dev,
+ drm_notice(&dev_priv->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
csr->fw_path);
- dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+ drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -712,7 +716,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (i915_modparams.dmc_firmware_path) {
if (strlen(i915_modparams.dmc_firmware_path) == 0) {
csr->fw_path = NULL;
- DRM_INFO("Disabling CSR firmware and runtime PM\n");
+ drm_info(&dev_priv->drm,
+ "Disabling CSR firmware and runtime PM\n");
return;
}
@@ -722,11 +727,12 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
}
if (csr->fw_path == NULL) {
- DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No known CSR firmware for platform, disabling runtime PM\n");
return;
}
- DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+ drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
schedule_work(&dev_priv->csr.work);
}
@@ -783,7 +789,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_csr_ucode_suspend(dev_priv);
- WARN_ON(dev_priv->csr.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
kfree(dev_priv->csr.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/display/intel_csr.h
index 03c64f8af7ab..03c64f8af7ab 100644
--- a/drivers/gpu/drm/i915/intel_csr.h
+++ b/drivers/gpu/drm/i915/display/intel_csr.h
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d9a61f341070..52db7852827b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -568,6 +568,20 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x64, 0x30, 0x00, 0x0F }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x64, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_11_6;
@@ -622,6 +636,34 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
{ 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
};
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -818,7 +860,7 @@ bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
@@ -839,7 +881,7 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
@@ -860,7 +902,7 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
@@ -901,15 +943,43 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return icl_combo_phy_ddi_translations_dp_hbr2;
}
-static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP &&
+ rate > 270000) {
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
+ return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+ }
+
+ return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
+ return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_combo_phy_ddi_translations_dp_hbr2;
+ }
+
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+}
+
+static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
{
- struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries, level, default_entry;
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
@@ -937,19 +1007,18 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
default_entry = 6;
} else {
- WARN(1, "ddi translation table missing\n");
+ drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
return 0;
}
- if (WARN_ON_ONCE(n_entries == 0))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
return 0;
- if (port_info->hdmi_level_shift_set)
- level = port_info->hdmi_level_shift;
- else
+ level = intel_bios_hdmi_level_shift(encoder);
+ if (level < 0)
level = default_entry;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -980,15 +1049,14 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) &&
- dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ if (IS_GEN9_BC(dev_priv) && intel_bios_dp_boost_level(encoder))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- I915_WRITE(DDI_BUF_TRANS_LO(port, i),
- ddi_translations[i].trans1 | iboost_bit);
- I915_WRITE(DDI_BUF_TRANS_HI(port, i),
- ddi_translations[i].trans2);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ ddi_translations[i].trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ ddi_translations[i].trans2);
}
}
@@ -1008,21 +1076,20 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) &&
- dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+ if (IS_GEN9_BC(dev_priv) && intel_bios_hdmi_boost_level(encoder))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
- ddi_translations[level].trans1 | iboost_bit);
- I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
- ddi_translations[level].trans2);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ ddi_translations[level].trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ ddi_translations[level].trans2);
}
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -1033,7 +1100,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
for (i = 0; i < 16; i++) {
udelay(1);
- if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
return;
}
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
@@ -1124,70 +1191,64 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
- I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
- FDI_RX_PWRDN_LANE0_VAL(2) |
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
udelay(220);
/* Switch from Rawclk to PCDclk */
rx_ctl_val |= FDI_PCDCLK;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
- I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
- WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+ drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
- I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 |
- DP_TP_CTL_ENABLE);
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
* DDI E does not support port reversal, the functionality is
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
* port reversal bit */
- I915_WRITE(DDI_BUF_CTL(PORT_E),
- DDI_BUF_CTL_ENABLE |
- ((crtc_state->fdi_lanes - 1) << 1) |
- DDI_BUF_TRANS_SELECT(i / 2));
- POSTING_READ(DDI_BUF_CTL(PORT_E));
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
udelay(600);
/* Program PCH FDI Receiver TU */
- I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
/* Enable PCH FDI Receiver with auto-training */
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
/* Wait for FDI receiver lane calibration */
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(FDI_RX_MISC(PIPE_A));
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
- POSTING_READ(FDI_RX_MISC(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
udelay(5);
- temp = I915_READ(DP_TP_STATUS(PORT_E));
+ temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
break;
@@ -1203,37 +1264,34 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
}
rx_ctl_val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
- POSTING_READ(DDI_BUF_CTL(PORT_E));
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(PORT_E), temp);
- POSTING_READ(DP_TP_CTL(PORT_E));
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(FDI_RX_MISC(PIPE_A));
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
- POSTING_READ(FDI_RX_MISC(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
}
/* Enable normal pixel sending for FDI */
- I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
}
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1260,175 +1318,18 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
}
if (num_encoders != 1)
- WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
+ drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
+ num_encoders,
+ pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
}
-static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int refclk;
- int n, p, r;
- u32 wrpll;
-
- wrpll = I915_READ(reg);
- switch (wrpll & WRPLL_REF_MASK) {
- case WRPLL_REF_SPECIAL_HSW:
- /*
- * muxed-SSC for BDW.
- * non-SSC for non-ULT HSW. Check FUSE_STRAP3
- * for the non-SSC reference frequency.
- */
- if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- refclk = 24;
- else
- refclk = 135;
- break;
- }
- /* fall through */
- case WRPLL_REF_PCH_SSC:
- /*
- * We could calculate spread here, but our checking
- * code only cares about 5% accuracy, and spread is a max of
- * 0.5% downspread.
- */
- refclk = 135;
- break;
- case WRPLL_REF_LCPLL:
- refclk = 2700;
- break;
- default:
- MISSING_CASE(wrpll);
- return 0;
- }
-
- r = wrpll & WRPLL_DIVIDER_REF_MASK;
- p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
- n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
-
- /* Convert to KHz, p & r have a fixed point portion */
- return (refclk * n * 100) / (p * r);
-}
-
-static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq;
-
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
-
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR2_PDIV_1:
- p0 = 1;
- break;
- case DPLL_CFGCR2_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR2_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR2_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR2_KDIV_5:
- p2 = 5;
- break;
- case DPLL_CFGCR2_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR2_KDIV_3:
- p2 = 3;
- break;
- case DPLL_CFGCR2_KDIV_1:
- p2 = 1;
- break;
- }
-
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
- * 24 * 1000;
-
- dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
- * 24 * 1000) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq, ref_clock;
-
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR1_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR1_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR1_PDIV_5:
- p0 = 5;
- break;
- case DPLL_CFGCR1_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR1_KDIV_1:
- p2 = 1;
- break;
- case DPLL_CFGCR1_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR1_KDIV_3:
- p2 = 3;
- break;
- }
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
- * ref_clock;
-
- dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
- u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
switch (val) {
case DDI_CLK_SEL_NONE:
@@ -1447,77 +1348,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *pll_state)
-{
- u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
- u64 tmp;
-
- ref_clock = dev_priv->cdclk.hw.ref;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
- m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
- DKL_PLL_BIAS_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
- } else {
- m2_frac = 0;
- }
- } else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
- MG_PLL_DIV0_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
- } else {
- m2_frac = 0;
- }
- }
-
- switch (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
- div1 = 2;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
- div1 = 3;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
- div1 = 5;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
- div1 = 7;
- break;
- default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
- return 0;
- }
-
- div2 = (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
-
- /* div2 value of 0 is same as 1 means no div */
- if (div2 == 0)
- div2 = 1;
-
- /*
- * Adjust the original formula to delay the division by 2^22 in order to
- * minimize possible rounding errors.
- */
- tmp = (u64)m1 * m2_int * ref_clock +
- (((u64)m1 * m2_frac * ref_clock) >> 22);
- tmp = div_u64(tmp, 5 * div1 * div2);
-
- return tmp;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1543,214 +1373,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
-static void icl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- int link_clock;
-
- if (intel_phy_is_combo(dev_priv, phy)) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
- pipe_config->shared_dpll);
-
- if (pll_id == DPLL_ID_ICL_TBTPLL)
- link_clock = icl_calc_tbt_pll_link(dev_priv, port);
- else
- link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void cnl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
-
- switch (link_clock) {
- case DPLL_CFGCR0_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2700:
- link_clock = 270000;
- break;
- case DPLL_CFGCR0_LINK_RATE_3240:
- link_clock = 324000;
- break;
- case DPLL_CFGCR0_LINK_RATE_4050:
- link_clock = 405000;
- break;
- default:
- WARN(1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- /*
- * ctrl1 register is already shifted for each pll, just use 0 to get
- * the internal shift for each field
- */
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
- link_clock = skl_calc_wrpll_link(pll_state);
- } else {
- link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
-
- switch (link_clock) {
- case DPLL_CTRL1_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CTRL1_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CTRL1_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CTRL1_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CTRL1_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CTRL1_LINK_RATE_2700:
- link_clock = 270000;
- break;
- default:
- WARN(1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 val, pll;
-
- val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
- switch (val & PORT_CLK_SEL_MASK) {
- case PORT_CLK_SEL_LCPLL_810:
- link_clock = 81000;
- break;
- case PORT_CLK_SEL_LCPLL_1350:
- link_clock = 135000;
- break;
- case PORT_CLK_SEL_LCPLL_2700:
- link_clock = 270000;
- break;
- case PORT_CLK_SEL_WRPLL1:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
- break;
- case PORT_CLK_SEL_WRPLL2:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
- break;
- case PORT_CLK_SEL_SPLL:
- pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK;
- if (pll == SPLL_FREQ_810MHz)
- link_clock = 81000;
- else if (pll == SPLL_FREQ_1350MHz)
- link_clock = 135000;
- else if (pll == SPLL_FREQ_2700MHz)
- link_clock = 270000;
- else {
- WARN(1, "bad spll freq\n");
- return;
- }
- break;
- default:
- WARN(1, "bad port clock sel\n");
- return;
- }
-
- pipe_config->port_clock = link_clock * 2;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
-
- clock.m1 = 2;
- clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
-
- return chv_calc_dpll_params(100000, &clock);
-}
-
-static void bxt_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->port_clock =
- bxt_calc_pll_link(&pipe_config->dpll_hw_state);
-
- ddi_dotclock_get(pipe_config);
-}
-
static void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_ddi_clock_get(encoder, pipe_config);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
+ DPLL_ID_ICL_TBTPLL)
+ pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
+ encoder->port);
+ else
+ pipe_config->port_clock =
+ intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll);
+
+ ddi_dotclock_get(pipe_config);
}
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -1764,7 +1402,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- WARN_ON(transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -1787,8 +1425,8 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
temp |= DP_MSA_MISC_COLOR_CEA_RGB;
@@ -1810,7 +1448,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
}
/*
@@ -1904,7 +1542,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- WARN_ON(master == INVALID_TRANSCODER);
+ drm_WARN_ON(&dev_priv->drm,
+ master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
@@ -1925,7 +1564,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
/*
@@ -1942,7 +1581,7 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
temp &= ~TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1952,16 +1591,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
- val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
val &= ~TRANS_DDI_FUNC_ENABLE;
if (INTEL_GEN(dev_priv) >= 12) {
- if (!intel_dp_mst_is_master_trans(crtc_state))
- val &= ~TGL_TRANS_DDI_PORT_MASK;
+ if (!intel_dp_mst_is_master_trans(crtc_state)) {
+ val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ }
} else {
- val &= ~TRANS_DDI_PORT_MASK;
+ val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -1983,20 +1624,21 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
- if (WARN_ON(!wakeref))
+ if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
+ if (drm_WARN_ON(dev,
+ !intel_encoder->get_hw_state(intel_encoder, &pipe))) {
ret = -EIO;
goto out;
}
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe));
if (enable)
tmp |= TRANS_DDI_HDCP_SIGNALLING;
else
tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
- I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp);
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
@@ -2006,7 +1648,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum transcoder cpu_transcoder;
@@ -2030,7 +1672,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
else
cpu_transcoder = (enum transcoder) pipe;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
@@ -2083,12 +1725,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -2128,7 +1771,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -2162,7 +1806,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
out:
if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
- tmp = I915_READ(BXT_PHY_CTL(port));
+ tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
@@ -2221,11 +1865,16 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* happen since fake-MST encoders don't set their get_power_domains()
* hook.
*/
- if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
dig_port = enc_to_dig_port(encoder);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -2254,11 +1903,13 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_PORT(port));
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_PORT(port));
else
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
}
}
@@ -2269,11 +1920,13 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_DISABLED);
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_DISABLED);
else
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
}
}
@@ -2282,13 +1935,13 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
{
u32 tmp;
- tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
@@ -2300,9 +1953,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
- iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+ iboost = intel_bios_hdmi_boost_level(encoder);
else
- iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+ iboost = intel_bios_dp_boost_level(encoder);
if (iboost == 0) {
const struct ddi_buf_trans *ddi_translations;
@@ -2315,9 +1968,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
else
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
iboost = ddi_translations[level].i_boost;
@@ -2350,9 +2003,9 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
else
ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
bxt_ddi_phy_set_signal_level(dev_priv, port,
@@ -2372,12 +2025,15 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, encoder->type,
+ tgl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
} else if (INTEL_GEN(dev_priv) == 11) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (IS_ELKHARTLAKE(dev_priv))
+ ehl_get_combo_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
+ else if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
@@ -2399,9 +2055,10 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
}
- if (WARN_ON(n_entries < 1))
+ if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
n_entries = 1;
- if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
return index_to_dp_signal_levels[n_entries - 1] &
@@ -2444,52 +2101,52 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
else
ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~SCALING_MODE_SEL_MASK;
val |= SCALING_MODE_SEL(2);
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(CNL_PORT_TX_DW2_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Rcomp scalar is fixed as 0x98 for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW5 */
/* All DW5 values are fixed for every table entry */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~RTERM_SELECT_MASK;
val |= RTERM_SELECT(6);
val |= TAP3_DISABLE;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW7 */
- val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
}
static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2515,12 +2172,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port));
if (type != INTEL_OUTPUT_HDMI)
val |= COMMON_KEEPER_EN;
else
val &= ~COMMON_KEEPER_EN;
- I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val);
/* 2. Program loadgen select */
/*
@@ -2530,33 +2187,33 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~TX_TRAINING_EN;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
cnl_ddi_vswing_program(encoder, level, type);
/* 6. Set training enable to trigger update */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val |= TX_TRAINING_EN;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
@@ -2567,8 +2224,15 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
u32 n_entries, val;
int ln;
- ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
- &n_entries);
+ if (INTEL_GEN(dev_priv) >= 12)
+ ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
+ else if (IS_ELKHARTLAKE(dev_priv))
+ ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
+ else
+ ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
if (!ddi_translations)
return;
@@ -2578,41 +2242,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
}
/* Set PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2641,12 +2305,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
if (type == INTEL_OUTPUT_HDMI)
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -2656,33 +2320,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(ICL_PORT_CL_DW5(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
/* 6. Set training enable to trigger update */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2706,33 +2370,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2740,9 +2404,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2750,7 +2414,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2761,17 +2425,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2779,9 +2443,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val);
- val = I915_READ(MG_TX2_DCC(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2789,18 +2453,22 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
+ val = intel_de_read(dev_priv,
+ MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ val);
- val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
+ val = intel_de_read(dev_priv,
+ MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ val);
}
}
@@ -2846,24 +2514,25 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
for (ln = 0; ln < 2; ln++) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, ln));
- I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+ intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
/* All the registers are RMW */
- val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
- I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val);
- val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
- I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val);
- val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
- I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
}
}
@@ -2963,10 +2632,11 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ drm_WARN_ON(&dev_priv->drm,
+ (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
if (intel_phy_is_combo(dev_priv, phy)) {
/*
@@ -2981,14 +2651,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
*/
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(ICL_DPCLKA_CFGCR0);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
}
val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
@@ -2997,13 +2667,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
@@ -3012,7 +2682,7 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
enum port port;
u32 val;
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_port_masked(port, port_mask) {
enum phy phy = intel_port_to_phy(dev_priv, port);
bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
@@ -3025,13 +2695,13 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
* Punt on the case now where clock is gated, but it would
* be needed by the port. Something else is really broken then.
*/
- if (WARN_ON(ddi_clk_needed))
+ if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
continue;
DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
phy_name(phy));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
}
}
@@ -3057,7 +2727,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (WARN_ON(is_mst))
+ if (drm_WARN_ON(&dev_priv->drm, is_mst))
return;
}
@@ -3076,7 +2746,8 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (other_encoder == encoder)
continue;
- if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ port_mask & BIT(other_encoder->port)))
return;
}
/*
@@ -3098,52 +2769,54 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (WARN_ON(!pll))
+ if (drm_WARN_ON(&dev_priv->drm, !pll))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy))
- I915_WRITE(DDI_CLK_SEL(port),
- icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ icl_pll_to_ddi_clk_sel(encoder, crtc_state));
else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
/*
* MG does not exist but the programming is required
* to ungate DDIC and DDID
*/
- I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ DDI_CLK_SEL_MG);
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
- val = I915_READ(DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
/*
* Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
* This step and the step before must be done with separate
* register writes.
*/
- val = I915_READ(DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- I915_WRITE(DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
} else if (IS_GEN9_BC(dev_priv)) {
/* DDI -> PLL mapping */
- val = I915_READ(DPLL_CTRL2);
+ val = intel_de_read(dev_priv, DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- I915_WRITE(DPLL_CTRL2, val);
+ intel_de_write(dev_priv, DPLL_CTRL2, val);
} else if (INTEL_GEN(dev_priv) < 9) {
- I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(dev_priv, PORT_CLK_SEL(port),
+ hsw_pll_to_ddi_pll_sel(pll));
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
@@ -3155,15 +2828,17 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy) ||
(IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
- I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
- I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
- DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+ intel_de_write(dev_priv, DPCLKA_CFGCR0,
+ intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port));
} else if (IS_GEN9_BC(dev_priv)) {
- I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
- DPLL_CTRL2_DDI_CLK_OFF(port));
+ intel_de_write(dev_priv, DPLL_CTRL2,
+ intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port));
} else if (INTEL_GEN(dev_priv) < 9) {
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(dev_priv, PORT_CLK_SEL(port),
+ PORT_CLK_SEL_NONE);
}
}
@@ -3180,13 +2855,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
return;
if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
- ln0 = I915_READ(DKL_DP_MODE(tc_port));
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
- ln1 = I915_READ(DKL_DP_MODE(tc_port));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x0));
+ ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x1));
+ ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
} else {
- ln0 = I915_READ(MG_DP_MODE(0, tc_port));
- ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+ ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
+ ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
}
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
@@ -3198,7 +2875,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
switch (pin_assignment) {
case 0x0:
- WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dig_port->tc_mode != TC_PORT_LEGACY);
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -3243,13 +2921,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
}
if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
- I915_WRITE(DKL_DP_MODE(tc_port), ln0);
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
- I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x0));
+ intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x1));
+ intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1);
} else {
- I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
- I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+ intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
+ intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
}
}
@@ -3274,9 +2954,9 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
return;
intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
@@ -3294,90 +2974,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
return;
intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
-}
-
-static void
-tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
- u32 val;
-
- if (!cstate->dc3co_exitline)
- return;
-
- val = I915_READ(EXITLINE(cstate->cpu_transcoder));
- val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
- I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void
-tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
- u32 val, exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
-
- if (!cstate->dc3co_exitline)
- return;
-
- exit_scanlines = cstate->dc3co_exitline;
- exit_scanlines <<= EXITLINE_SHIFT;
- val = I915_READ(EXITLINE(cstate->cpu_transcoder));
- val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
- val |= exit_scanlines;
- val |= EXITLINE_ENABLE;
- I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *cstate)
-{
- u32 exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
- u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
-
- cstate->dc3co_exitline = 0;
-
- if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
- return;
-
- /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
- if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
- encoder->port != PORT_A)
- return;
-
- if (!cstate->has_psr2 || !cstate->hw.active)
- return;
-
- /*
- * DC3CO Exit time 200us B.Spec 49196
- * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
- */
- exit_scanlines =
- intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
-
- if (WARN_ON(exit_scanlines > crtc_vdisplay))
- return;
-
- cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
- DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
-}
-
-static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
-{
- u32 val;
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (INTEL_GEN(dev_priv) < 12)
- return;
-
- val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
-
- if (val & EXITLINE_ENABLE)
- crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
}
static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
@@ -3392,7 +2992,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
int level = intel_ddi_dp_level(intel_dp);
enum transcoder transcoder = crtc_state->cpu_transcoder;
- tgl_set_psr2_transcoder_exitline(crtc_state);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3534,16 +3133,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
int level = intel_ddi_dp_level(intel_dp);
if (INTEL_GEN(dev_priv) < 11)
- WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ drm_WARN_ON(&dev_priv->drm,
+ is_mst && (port == PORT_A || port == PORT_E));
else
- WARN_ON(is_mst && port == PORT_A);
+ drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
- intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
- intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
-
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3607,8 +3204,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
/* MST will call a setting of MSA after an allocating of Virtual Channel
* from MST encoder pre_enable callback.
*/
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_ddi_set_dp_msa(crtc_state, conn_state);
+
+ intel_dp_set_m_n(crtc_state, M1_N1);
+ }
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -3618,8 +3218,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- int level = intel_ddi_hdmi_level(dev_priv, port);
+ int level = intel_ddi_hdmi_level(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -3673,7 +3272,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
* the DP link parameteres
*/
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(encoder, crtc_state);
@@ -3706,20 +3305,20 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
bool wait = false;
u32 val;
- val = I915_READ(DDI_BUF_CTL(port));
+ val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
wait = true;
}
if (intel_crtc_has_dp_encoder(crtc_state)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
}
/* Disable FEC in DP Sink */
@@ -3751,9 +3350,13 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 val;
- val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~TGL_TRANS_DDI_PORT_MASK;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ val);
}
} else {
if (!is_mst)
@@ -3779,7 +3382,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
- tgl_clear_psr2_transcoder_exitline(old_crtc_state);
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3816,7 +3418,8 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
transcoder_name(old_crtc_state->cpu_transcoder));
- I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
}
static void intel_ddi_post_disable(struct intel_encoder *encoder,
@@ -3890,25 +3493,25 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
- val = I915_READ(FDI_RX_MISC(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_PCDCLK;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
static void intel_enable_ddi_dp(struct intel_encoder *encoder,
@@ -3944,9 +3547,9 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- WARN_ON(INTEL_GEN(dev_priv) < 9);
+ drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) < 9);
- if (WARN_ON(port < PORT_A || port > PORT_E))
+ if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
return CHICKEN_TRANS(trans[port]);
@@ -3964,8 +3567,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
+ "scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -3978,7 +3582,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3987,8 +3591,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
udelay(1);
@@ -3999,15 +3603,15 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port),
- dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port),
+ dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
@@ -4017,6 +3621,12 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_enable_pipe(crtc_state);
+
+ intel_crtc_vblank_on(crtc_state);
+
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
else
@@ -4096,43 +3706,11 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_hdcp *hdcp = &connector->hdcp;
- bool content_protection_type_changed =
- (conn_state->hdcp_content_type != hdcp->content_type &&
- conn_state->content_protection !=
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
- /*
- * During the HDCP encryption session if Type change is requested,
- * disable the HDCP and reenable it with new TYPE value.
- */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
- content_protection_type_changed)
- intel_hdcp_disable(connector);
-
- /*
- * Mark the hdcp state as DESIRED after the hdcp disable of type
- * change procedure.
- */
- if (content_protection_type_changed) {
- mutex_lock(&hdcp->mutex);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- mutex_unlock(&hdcp->mutex);
- }
-
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED ||
- content_protection_type_changed)
- intel_hdcp_enable(connector,
- crtc_state->cpu_transcoder,
- (u8)conn_state->hdcp_content_type);
+ intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
}
static void
@@ -4197,20 +3775,20 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
- dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+ dp_tp_ctl = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
- ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
- I915_WRITE(DDI_BUF_CTL(port),
- ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port),
+ ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
wait = true;
}
dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -4225,12 +3803,12 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
- POSTING_READ(DDI_BUF_CTL(port));
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
udelay(600);
}
@@ -4244,14 +3822,16 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
return false;
- return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (INTEL_GEN(dev_priv) >= 12 && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
+ else if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 3;
else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
@@ -4265,15 +3845,21 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
- if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder);
+ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder);
+ }
+
intel_dsc_get_config(encoder, pipe_config);
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4342,7 +3928,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
pipe_config->fec_enable =
- I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+ intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
encoder->base.base.id, encoder->base.name,
@@ -4365,9 +3951,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (encoder->type == INTEL_OUTPUT_EDP)
- tgl_dc3co_exitline_get_config(pipe_config);
-
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -4449,7 +4032,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
} else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
- tgl_dc3co_exitline_compute_config(encoder, pipe_config);
}
if (ret)
@@ -4470,6 +4052,112 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
return 0;
}
+static bool mode_equal(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2)
+{
+ return drm_mode_match(mode1, mode2,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode1->clock == mode2->clock; /* we want an exact match */
+}
+
+static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+ const struct intel_link_m_n *m_n_2)
+{
+ return m_n_1->tu == m_n_2->tu &&
+ m_n_1->gmch_m == m_n_2->gmch_m &&
+ m_n_1->gmch_n == m_n_2->gmch_n &&
+ m_n_1->link_m == m_n_2->link_m &&
+ m_n_1->link_n == m_n_2->link_n;
+}
+
+static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+ const struct intel_crtc_state *crtc_state2)
+{
+ return crtc_state1->hw.active && crtc_state2->hw.active &&
+ crtc_state1->output_types == crtc_state2->output_types &&
+ crtc_state1->output_format == crtc_state2->output_format &&
+ crtc_state1->lane_count == crtc_state2->lane_count &&
+ crtc_state1->port_clock == crtc_state2->port_clock &&
+ mode_equal(&crtc_state1->hw.adjusted_mode,
+ &crtc_state2->hw.adjusted_mode) &&
+ m_n_equal(&crtc_state1->dp_m_n, &crtc_state2->dp_m_n);
+}
+
+static u8
+intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
+ int tile_group_id)
+{
+ struct drm_connector *connector;
+ const struct drm_connector_state *conn_state;
+ struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(ref_crtc_state->uapi.state);
+ u8 transcoders = 0;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
+ return 0;
+
+ for_each_new_connector_in_state(&state->base, connector, conn_state, i) {
+ struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *crtc_state;
+
+ if (!crtc)
+ continue;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id !=
+ tile_group_id)
+ continue;
+ crtc_state = intel_atomic_get_new_crtc_state(state,
+ crtc);
+ if (!crtcs_port_sync_compatible(ref_crtc_state,
+ crtc_state))
+ continue;
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *connector = conn_state->connector;
+ u8 port_sync_transcoders = 0;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
+ encoder->base.base.id, encoder->base.name,
+ crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+
+ if (connector->has_tile)
+ port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
+ connector->tile_group->id);
+
+ /*
+ * EDP Transcoders cannot be ensalved
+ * make them a master always when present
+ */
+ if (port_sync_transcoders & BIT(TRANSCODER_EDP))
+ crtc_state->master_transcoder = TRANSCODER_EDP;
+ else
+ crtc_state->master_transcoder = ffs(port_sync_transcoders) - 1;
+
+ if (crtc_state->master_transcoder == crtc_state->cpu_transcoder) {
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->sync_mode_slaves_mask =
+ port_sync_transcoders & ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ return 0;
+}
+
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
@@ -4488,6 +4176,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
struct intel_connector *connector;
enum port port = intel_dig_port->base.port;
@@ -4498,6 +4187,10 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.prepare_link_retrain =
intel_ddi_prepare_link_retrain;
+ if (INTEL_GEN(dev_priv) < 12) {
+ intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+ }
if (!intel_dp_init_connector(intel_dig_port, connector)) {
kfree(connector);
@@ -4569,7 +4262,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
return 0;
@@ -4636,7 +4330,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
/*
* Unpowered type-c dongles can take some time to boot and be
@@ -4716,7 +4411,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4739,15 +4434,14 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
- struct ddi_vbt_port_info *port_info =
- &dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
- init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
- init_dp = port_info->supports_dp;
+ init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) ||
+ intel_bios_port_supports_hdmi(dev_priv, port);
+ init_dp = intel_bios_port_supports_dp(dev_priv, port);
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
@@ -4779,6 +4473,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
+ encoder->compute_config_late = intel_ddi_compute_config_late;
encoder->enable = intel_enable_ddi;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
@@ -4797,10 +4492,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+ DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL;
else
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+ DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
@@ -4808,8 +4505,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (intel_phy_is_tc(dev_priv, phy)) {
- bool is_legacy = !port_info->supports_typec_usb &&
- !port_info->supports_tbt;
+ bool is_legacy =
+ !intel_bios_port_supports_typec_usb(dev_priv, port) &&
+ !intel_bios_port_supports_tbt(dev_priv, port);
intel_tc_port_init(intel_dig_port, is_legacy);
@@ -4817,7 +4515,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->update_complete = intel_ddi_update_complete;
}
- WARN_ON(port > PORT_I);
+ drm_WARN_ON(&dev_priv->drm, port > PORT_I);
intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
port - PORT_A;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 167c6579d972..55fd72b901fe 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_DDI_H__
#define __INTEL_DDI_H__
-#include <drm/i915_drm.h>
-
#include "intel_display.h"
struct drm_connector_state;
@@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *state);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
new file mode 100644
index 000000000000..00da10bf35f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DE_H__
+#define __INTEL_DE_H__
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_uncore.h"
+
+static inline u32
+intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ intel_uncore_posting_read(&i915->uncore, reg);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline u32
+intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read_fw(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write(&i915->uncore, reg, val);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline void
+intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(&i915->uncore, reg, val);
+}
+
+static inline void
+intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+{
+ intel_uncore_rmw(&i915->uncore, reg, clear, set);
+}
+
+static inline int
+intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
+{
+ return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+}
+
+static inline int
+intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+}
+
+static inline int
+intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+}
+
+#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index aa453953908b..346846609f45 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
@@ -203,9 +202,9 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
val = vlv_cck_read(dev_priv, reg);
divider = val & CCK_FREQUENCY_VALUES;
- WARN((val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
+ drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
@@ -235,7 +234,8 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
CCK_CZ_CLOCK_CONTROL);
- DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
+ drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
+ dev_priv->czclk_freq);
}
static inline u32 /* units of 100MHz */
@@ -518,13 +518,11 @@ static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) |
- DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) &
- ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
/* Wa_2006604312:icl */
@@ -533,11 +531,11 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
else
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
}
static bool
@@ -883,7 +881,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
return calculated_clock->p > best_clock->p;
}
- if (WARN_ON_ONCE(!target_freq))
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -1049,9 +1047,9 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
else
line_mask = DSL_LINEMASK_GEN3;
- line1 = I915_READ(reg) & line_mask;
+ line1 = intel_de_read(dev_priv, reg) & line_mask;
msleep(5);
- line2 = I915_READ(reg) & line_mask;
+ line2 = intel_de_read(dev_priv, reg) & line_mask;
return line1 != line2;
}
@@ -1063,8 +1061,9 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
/* Wait for the display line to settle/start moving */
if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- DRM_ERROR("pipe %c scanline %s wait timed out\n",
- pipe_name(pipe), onoff(state));
+ drm_err(&dev_priv->drm,
+ "pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), onoff(state));
}
static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
@@ -1090,7 +1089,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
/* Wait for the Pipe State to go off */
if (intel_de_wait_for_clear(dev_priv, reg,
I965_PIPECONF_ACTIVE, 100))
- WARN(1, "pipe_off wait timed out\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
@@ -1103,7 +1103,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(DPLL(pipe));
+ val = intel_de_read(dev_priv, DPLL(pipe));
cur_state = !!(val & DPLL_VCO_ENABLE);
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
@@ -1139,10 +1139,11 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
* so pipe->transcoder cast is fine here.
*/
enum transcoder cpu_transcoder = (enum transcoder)pipe;
- u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
- u32 val = I915_READ(FDI_TX_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
cur_state = !!(val & FDI_TX_ENABLE);
}
I915_STATE_WARN(cur_state != state,
@@ -1158,7 +1159,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(FDI_RX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
@@ -1180,7 +1181,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
if (HAS_DDI(dev_priv))
return;
- val = I915_READ(FDI_TX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
@@ -1190,7 +1191,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(FDI_RX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_PLL_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
@@ -1204,14 +1205,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
- port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
@@ -1238,13 +1239,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 port_sel;
pp_reg = PP_CONTROL(0);
- port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ drm_WARN_ON(&dev_priv->drm,
+ port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
- val = I915_READ(pp_reg);
+ val = intel_de_read(dev_priv, pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
@@ -1268,7 +1270,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
- u32 val = I915_READ(PIPECONF(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
@@ -1318,7 +1320,7 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
u32 val;
bool enabled;
- val = I915_READ(PCH_TRANSCONF(pipe));
+ val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
enabled = !!(val & TRANS_ENABLE);
I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
@@ -1392,12 +1394,12 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- DRM_ERROR("DPLL %d failed to lock\n", pipe);
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
}
static void vlv_enable_pll(struct intel_crtc *crtc,
@@ -1414,8 +1416,9 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
_vlv_enable_pll(crtc, pipe_config);
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
@@ -1442,11 +1445,11 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
udelay(1);
/* Enable PLL */
- I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- DRM_ERROR("PLL %d failed to lock\n", pipe);
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
}
static void chv_enable_pll(struct intel_crtc *crtc,
@@ -1470,19 +1473,23 @@ static void chv_enable_pll(struct intel_crtc *crtc,
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
- I915_WRITE(CBR4_VLV, 0);
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
} else {
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
}
@@ -1513,29 +1520,29 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
- I915_WRITE(reg, dpll);
+ intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, reg, dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE(DPLL_MD(crtc->pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+ crtc_state->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
- I915_WRITE(reg, dpll);
+ intel_de_write(dev_priv, reg, dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, dpll);
+ intel_de_posting_read(dev_priv, reg);
udelay(150); /* wait for warmup */
}
}
@@ -1553,8 +1560,8 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
- I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1569,8 +1576,8 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1586,8 +1593,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
vlv_dpio_get(dev_priv);
@@ -1626,9 +1633,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
- WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dport->base.base.base.id, dport->base.base.name,
- I915_READ(dpll_reg) & port_mask, expected_mask);
+ drm_WARN(&dev_priv->drm, 1,
+ "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
+ intel_de_read(dev_priv, dpll_reg) & port_mask,
+ expected_mask);
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -1648,7 +1657,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_CPT(dev_priv)) {
reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
/*
* Workaround: Set the timing override bit
* before enabling the pch transcoder.
@@ -1657,12 +1666,12 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
reg = PCH_TRANSCONF(pipe);
- val = I915_READ(reg);
- pipeconf_val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, reg);
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -1692,9 +1701,10 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_PROGRESSIVE;
}
- I915_WRITE(reg, val | TRANS_ENABLE);
+ intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
- DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
+ pipe_name(pipe));
}
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1706,16 +1716,16 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
- val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
PIPECONF_INTERLACED_ILK)
@@ -1723,10 +1733,10 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
else
val |= TRANS_PROGRESSIVE;
- I915_WRITE(LPT_TRANSCONF, val);
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- DRM_ERROR("Failed to enable PCH transcoder\n");
+ drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
}
static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1743,19 +1753,20 @@ static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_ENABLE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -1763,18 +1774,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(LPT_TRANSCONF);
+ val = intel_de_read(dev_priv, LPT_TRANSCONF);
val &= ~TRANS_ENABLE;
- I915_WRITE(LPT_TRANSCONF, val);
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- DRM_ERROR("Failed to disable PCH transcoder\n");
+ drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
@@ -1807,7 +1818,7 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
return 0; /* Gen2 doesn't have a hardware frame counter */
}
-static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1825,7 +1836,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
assert_vblank_disabled(&crtc->base);
}
-static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1834,7 +1845,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
i915_reg_t reg;
u32 val;
- DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
assert_planes_disabled(crtc);
@@ -1862,15 +1873,15 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
trace_intel_pipe_enable(crtc);
reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
/* we keep both pipes enabled on 830 */
- WARN_ON(!IS_I830(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- I915_WRITE(reg, val | PIPECONF_ENABLE);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -1892,7 +1903,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg;
u32 val;
- DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
/*
* Make sure planes won't keep trying to pump pixels to us,
@@ -1903,7 +1914,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
trace_intel_pipe_disable(crtc);
reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1918,7 +1929,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
if ((val & PIPECONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -2211,11 +2222,11 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
@@ -2386,7 +2397,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int cpp = fb->format->cpp[color_plane];
- WARN_ON(new_offset > old_offset);
+ drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
@@ -2537,8 +2548,9 @@ static int intel_fb_offset_to_xy(int *x, int *y,
alignment = 0;
if (alignment != 0 && fb->offsets[color_plane] % alignment) {
- DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
- fb->offsets[color_plane], color_plane);
+ drm_dbg_kms(&dev_priv->drm,
+ "Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
}
@@ -2548,9 +2560,10 @@ static int intel_fb_offset_to_xy(int *x, int *y,
/* Catch potential overflows early */
if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
fb->offsets[color_plane])) {
- DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
- fb->offsets[color_plane], fb->pitches[color_plane],
- color_plane);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
return -ERANGE;
}
@@ -2706,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
/*
* We assume the primary plane for pipe A has
- * the highest stride limits of them all.
+ * the highest stride limits of them all,
+ * if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return 0;
@@ -3034,8 +3048,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
ret = intel_fb_offset_to_xy(&x, &y, fb, i);
if (ret) {
- DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
+ drm_dbg_kms(&dev_priv->drm,
+ "bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
return ret;
}
@@ -3054,8 +3069,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
*/
if (i == 0 && i915_gem_object_is_tiled(obj) &&
(x + width) * cpp > fb->pitches[i]) {
- DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
+ drm_dbg_kms(&dev_priv->drm,
+ "bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
return -EINVAL;
}
@@ -3111,8 +3127,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
}
if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
- DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), obj->base.size);
+ drm_dbg_kms(&dev_priv->drm,
+ "fb too big for bo (need %llu bytes, have %zu bytes)\n",
+ mul_u32_u32(max_size, tile_size), obj->base.size);
return -EINVAL;
}
@@ -3143,7 +3160,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- WARN_ON(is_ccs_modifier(fb->modifier));
+ drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
drm_rect_translate(&plane_state->uapi.src,
@@ -3184,7 +3201,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
- WARN_ON(i >= ARRAY_SIZE(info->plane));
+ drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
info->plane[i].offset = offset;
info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
tile_width * cpp);
@@ -3377,6 +3394,67 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
}
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 base, size;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base,
+ I915_GTT_MIN_ALIGNMENT);
+ size = round_up(plane_config->base + plane_config->size,
+ I915_GTT_MIN_ALIGNMENT);
+ size -= base;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (size * 2 > i915->stolen_usable_size)
+ return NULL;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ if (IS_ERR(obj))
+ return NULL;
+
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride =
+ plane_config->fb->base.pitches[0] |
+ plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ goto err_obj;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ !i915_vma_is_map_and_fenceable(vma))
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return NULL;
+}
+
static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
@@ -3385,22 +3463,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
- u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
- u32 size_aligned = round_up(plane_config->base + plane_config->size,
- PAGE_SIZE);
- struct drm_i915_gem_object *obj;
- bool ret = false;
-
- size_aligned -= base_aligned;
-
- if (plane_config->size == 0)
- return false;
-
- /* If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features. */
- if (size_aligned * 2 > dev_priv->stolen_usable_size)
- return false;
+ struct i915_vma *vma;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -3408,30 +3471,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
case I915_FORMAT_MOD_Y_TILED:
break;
default:
- DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
- fb->modifier);
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
return false;
}
- obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- base_aligned,
- base_aligned,
- size_aligned);
- if (IS_ERR(obj))
+ vma = initial_plane_vma(dev_priv, plane_config);
+ if (!vma)
return false;
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- break;
- case I915_TILING_X:
- case I915_TILING_Y:
- obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto out;
- }
-
mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
@@ -3439,17 +3488,18 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
- if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
- DRM_DEBUG_KMS("intel fb init failed\n");
- goto out;
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ vma->obj, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_vma;
}
+ plane_config->vma = vma;
+ return true;
- DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
- ret = true;
-out:
- i915_gem_object_put(obj);
- return ret;
+err_vma:
+ i915_vma_put(vma);
+ return false;
}
static void
@@ -3493,9 +3543,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
- plane->base.base.id, plane->base.name,
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+ plane->base.base.id, plane->base.name,
+ crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
@@ -3547,17 +3598,17 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_state *intel_state =
to_intel_plane_state(plane_state);
struct drm_framebuffer *fb;
+ struct i915_vma *vma;
if (!plane_config->fb)
return;
if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
fb = &plane_config->fb->base;
+ vma = plane_config->vma;
goto valid_fb;
}
- kfree(plane_config->fb);
-
/*
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
@@ -3577,7 +3628,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = state->hw.fb;
- drm_framebuffer_get(fb);
+ vma = state->vma;
goto valid_fb;
}
}
@@ -3600,21 +3651,11 @@ valid_fb:
intel_state->color_plane[0].stride =
intel_fb_pitch(fb, 0, intel_state->hw.rotation);
- intel_state->vma =
- intel_pin_and_fence_fb_obj(fb,
- &intel_state->view,
- intel_plane_uses_fence(intel_state),
- &intel_state->flags);
- if (IS_ERR(intel_state->vma)) {
- DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
- intel_crtc->pipe, PTR_ERR(intel_state->vma));
-
- intel_state->vma = NULL;
- drm_framebuffer_put(fb);
- return;
- }
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+ __i915_vma_pin(vma);
+ intel_state->vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
+ if (vma->fence)
+ intel_state->flags |= PLANE_HAS_FENCE;
plane_state->src_x = 0;
plane_state->src_y = 0;
@@ -3633,9 +3674,13 @@ valid_fb:
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
+ drm_framebuffer_get(fb);
+
plane_state->crtc = &intel_crtc->base;
intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
&to_intel_frontbuffer(fb)->bits);
}
@@ -3798,15 +3843,16 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
max_height = skl_max_plane_height();
if (w > max_width || h > max_height) {
- DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
return -EINVAL;
}
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
/*
@@ -3829,7 +3875,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
while ((x + w) * cpp > plane_state->color_plane[0].stride) {
if (offset == 0) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
@@ -3854,7 +3901,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (x != plane_state->color_plane[aux_plane].x ||
y != plane_state->color_plane[aux_plane].y) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
}
@@ -3875,6 +3923,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int uv_plane = 1;
@@ -3892,8 +3941,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
- DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
+ drm_dbg_kms(&i915->drm,
+ "CbCr source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
return -EINVAL;
}
@@ -3922,7 +3972,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
if (x != plane_state->color_plane[ccs_plane].x ||
y != plane_state->color_plane[ccs_plane].y) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
}
@@ -4331,7 +4382,8 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->color_plane[0].stride);
if (INTEL_GEN(dev_priv) < 4) {
/*
@@ -4339,21 +4391,26 @@ static void i9xx_update_plane(struct intel_plane *plane,
* generator but let's assume we still need to
* program whatever is there.
*/
- I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(DSPSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(PRIMSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ (y << 16) | x);
}
/*
@@ -4361,15 +4418,13 @@ static void i9xx_update_plane(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
else
- I915_WRITE_FW(DSPADDR(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -4396,11 +4451,11 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
else
- I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -4425,7 +4480,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- val = I915_READ(DSPCNTR(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
ret = val & DISPLAY_PLANE_ENABLE;
@@ -4444,10 +4499,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
@@ -4791,7 +4851,7 @@ __intel_display_resume(struct drm_device *dev,
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- WARN_ON(ret == -EDEADLK);
+ drm_WARN_ON(dev, ret == -EDEADLK);
return ret;
}
@@ -4819,7 +4879,8 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
- DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset potentially stuck, unbreaking through wedging\n");
intel_gt_set_wedged(&dev_priv->gt);
}
@@ -4843,13 +4904,15 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
state = drm_atomic_helper_duplicate_state(dev, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- DRM_ERROR("Duplicating state failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ ret);
return;
}
ret = drm_atomic_helper_disable_all(dev, ctx);
if (ret) {
- DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
drm_atomic_state_put(state);
return;
}
@@ -4878,7 +4941,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
/* for testing only restore the display */
ret = __intel_display_resume(dev, state, ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
@@ -4895,7 +4959,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
ret = __intel_display_resume(dev, state, ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
intel_hpd_init(dev_priv);
}
@@ -4915,7 +4980,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = I915_READ(PIPE_CHICKEN(pipe));
+ tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
/*
* Display WA #1153: icl
@@ -4930,7 +4995,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* across pipe
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
- I915_WRITE(PIPE_CHICKEN(pipe), tmp);
+ intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
@@ -4959,8 +5024,9 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state
/* Enable Transcoder Port Sync */
trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
- trans_ddi_func_ctl2_val);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+ trans_ddi_func_ctl2_val);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -4973,7 +5039,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
@@ -4981,10 +5047,10 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
@@ -4992,16 +5058,16 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE;
}
- I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
/* wait one idle pattern time */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(1000);
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
- FDI_FE_ERRC_ENABLE);
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -5020,81 +5086,83 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
- I915_READ(reg);
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
- DRM_DEBUG_KMS("FDI train 1 done.\n");
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- DRM_ERROR("FDI train 1 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- DRM_ERROR("FDI train 2 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
- DRM_DEBUG_KMS("FDI train done\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
}
@@ -5118,17 +5186,17 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -5136,13 +5204,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5150,28 +5218,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
break;
}
udelay(50);
@@ -5180,11 +5250,11 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- DRM_ERROR("FDI train 1 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN(dev_priv, 6)) {
@@ -5192,10 +5262,10 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -5203,28 +5273,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
break;
}
udelay(50);
@@ -5233,9 +5305,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- DRM_ERROR("FDI train 2 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
- DRM_DEBUG_KMS("FDI train done.\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
/* Manual link training for Ivy Bridge A0 parts */
@@ -5251,111 +5323,117 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
- DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
/* Try each vswing and preemphasis setting twice before moving on */
for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
/* disable first in case we need to retry */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_TX_ENABLE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp &= ~FDI_RX_ENABLE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(1); /* should be 0.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK ||
- (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
- i);
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
break;
}
udelay(1); /* should be 0.5us */
}
if (i == 4) {
- DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
continue;
}
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK ||
- (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
- i);
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
goto train_done;
}
udelay(2); /* should be 1.5us */
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
}
train_done:
- DRM_DEBUG_KMS("FDI train done.\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -5368,29 +5446,29 @@ static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- temp = I915_READ(reg);
- I915_WRITE(reg, temp | FDI_PCDCLK);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
}
@@ -5405,23 +5483,23 @@ static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_PCDCLK);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
/* Disable CPU FDI TX PLL */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
/* Wait for the clocks to turn off. */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
@@ -5434,32 +5512,33 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev_priv))
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5469,10 +5548,10 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
@@ -5505,7 +5584,7 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
- I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
mutex_lock(&dev_priv->sb_lock);
@@ -5552,17 +5631,14 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
}
/* This should not happen with any sane values */
- WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
- ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
- ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
-
- DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- clock,
- auxdiv,
- divsel,
- phasedir,
- phaseinc);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ clock, auxdiv, divsel, phasedir, phaseinc);
mutex_lock(&dev_priv->sb_lock);
@@ -5592,7 +5668,7 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
/* Wait for initialization time */
udelay(24);
- I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
int lpt_get_iclkip(struct drm_i915_private *dev_priv)
@@ -5603,7 +5679,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
u32 desired_divisor;
u32 temp;
- if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
mutex_lock(&dev_priv->sb_lock);
@@ -5639,41 +5715,46 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
- I915_READ(HTOTAL(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
- I915_READ(HBLANK(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
- I915_READ(HSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
- I915_READ(VTOTAL(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
- I915_READ(VBLANK(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
- I915_READ(VSYNC(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- I915_READ(VSYNCSHIFT(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
u32 temp;
- temp = I915_READ(SOUTH_CHICKEN1);
+ temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ FDI_RX_ENABLE);
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- POSTING_READ(SOUTH_CHICKEN1);
+ drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ enable ? "en" : "dis");
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
}
static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
@@ -5723,8 +5804,9 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
num_encoders++;
}
- WARN(num_encoders != 1, "%d encoders for pipe %c\n",
- num_encoders, pipe_name(crtc->pipe));
+ drm_WARN(encoder->base.dev, num_encoders != 1,
+ "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(crtc->pipe));
return encoder;
}
@@ -5753,8 +5835,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
/* Write the TU size bits before fdi link training, so that error
* detection works. */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc, crtc_state);
@@ -5764,7 +5846,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
- temp = I915_READ(PCH_DPLL_SEL);
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (crtc_state->shared_dpll ==
@@ -5772,7 +5854,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
temp |= sel;
else
temp &= ~sel;
- I915_WRITE(PCH_DPLL_SEL, temp);
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
/* XXX: pch pll's can be enabled any time before we enable the PCH
@@ -5795,11 +5877,11 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
@@ -5812,17 +5894,16 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
port = intel_get_crtc_new_encoder(state, crtc_state)->port;
- WARN_ON(port < PORT_B || port > PORT_D);
+ drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
temp |= TRANS_DP_PORT_SEL(port);
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
}
ilk_enable_pch_transcoder(crtc_state);
}
-static void lpt_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5844,11 +5925,13 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
- temp = I915_READ(dslreg);
+ temp = intel_de_read(dev_priv, dslreg);
udelay(500);
- if (wait_for(I915_READ(dslreg) != temp, 5)) {
- if (wait_for(I915_READ(dslreg) != temp, 5))
- DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
+ drm_err(&dev_priv->drm,
+ "mode set failed: pipe %c stuck\n",
+ pipe_name(pipe));
}
}
@@ -5963,7 +6046,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -5982,10 +6066,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
- DRM_DEBUG_KMS("scaler_user index %u.%u: "
- "Staged freeing scaler id %d scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, *scaler_id,
- scaler_state->scaler_users);
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, *scaler_id,
+ scaler_state->scaler_users);
*scaler_id = -1;
}
return 0;
@@ -5993,7 +6078,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Planar YUV: src dimensions not met\n");
return -EINVAL;
}
@@ -6006,18 +6092,20 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
(INTEL_GEN(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
- DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
- "size is out of scaler range\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h,
+ dst_w, dst_h);
return -EINVAL;
}
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- DRM_DEBUG_KMS("scaler_user index %u.%u: "
- "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
- scaler_state->scaler_users);
+ drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
return 0;
}
@@ -6036,7 +6124,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
bool need_scaler = false;
- if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ state->pch_pfit.enabled)
need_scaler = true;
return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
@@ -6088,9 +6177,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* check colorkey */
if (plane_state->ckey.flags) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
- intel_plane->base.base.id,
- intel_plane->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
return -EINVAL;
}
@@ -6128,9 +6218,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
break;
/* fall through */
default:
- DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->format->format);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->format->format);
return -EINVAL;
}
@@ -6157,9 +6248,11 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
+ unsigned long irqflags;
int id;
- if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
return;
pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
@@ -6172,14 +6265,21 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
- I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
- PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+ PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ crtc_state->pch_pfit.pos);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ crtc_state->pch_pfit.size);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
}
@@ -6195,12 +6295,15 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
- PF_PIPE_SEL_IVB(pipe));
+ intel_de_write(dev_priv, PF_CTL(pipe),
+ PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
else
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
+ intel_de_write(dev_priv, PF_CTL(pipe),
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe),
+ crtc_state->pch_pfit.pos);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe),
+ crtc_state->pch_pfit.size);
}
}
@@ -6218,25 +6321,26 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
- WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+ drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
+ drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
* so we need to just enable it and continue on.
*/
} else {
- I915_WRITE(IPS_CTL, IPS_ENABLE);
+ intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
/* The bit only becomes 1 in the next vblank, so this wait here
* is essentially intel_wait_for_vblank. If we don't have this
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
- DRM_ERROR("Timed out waiting for IPS enable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for IPS enable\n");
}
}
@@ -6250,17 +6354,19 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
return;
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ drm_WARN_ON(dev,
+ sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
- DRM_ERROR("Timed out waiting for IPS disable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for IPS disable\n");
} else {
- I915_WRITE(IPS_CTL, 0);
- POSTING_READ(IPS_CTL);
+ intel_de_write(dev_priv, IPS_CTL, 0);
+ intel_de_posting_read(dev_priv, IPS_CTL);
}
/* We need to wait for a vblank before we can disable the plane. */
@@ -6382,13 +6488,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(state, primary);
enum pipe pipe = crtc->pipe;
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
@@ -6399,8 +6502,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
hsw_enable_ips(new_crtc_state);
- if (new_primary_state)
- intel_fbc_post_update(crtc);
+ intel_fbc_post_update(state, crtc);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
@@ -6415,20 +6517,16 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(state, primary);
enum pipe pipe = crtc->pipe;
if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
- if (new_primary_state &&
- intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+ if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
/* Display WA 827 */
@@ -6770,7 +6868,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
/*
@@ -6863,7 +6961,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
enum pipe pipe, bool apply)
{
- u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+ u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
if (apply)
@@ -6871,7 +6969,7 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
else
val &= ~mask;
- I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
}
static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
@@ -6890,7 +6988,17 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
val |= MBUS_DBOX_B_CREDIT(8);
}
- I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
+ intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
+}
+
+static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+ HSW_LINETIME(crtc_state->linetime) |
+ HSW_IPS_LINETIME(crtc_state->ips_linetime));
}
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
@@ -6900,10 +7008,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
val |= HSW_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
static void hsw_crtc_enable(struct intel_atomic_state *state,
@@ -6916,7 +7024,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
bool psl_clkgate_wa;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
intel_encoders_pre_pll_enable(state, crtc);
@@ -6926,9 +7034,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_enable(state, crtc);
- if (intel_crtc_has_dp_encoder(new_crtc_state))
- intel_dp_set_m_n(new_crtc_state, M1_N1);
-
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(new_crtc_state);
@@ -6939,8 +7044,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(cpu_transcoder))
- I915_WRITE(PIPE_MULT(cpu_transcoder),
- new_crtc_state->pixel_multiplier - 1);
+ intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ new_crtc_state->pixel_multiplier - 1);
if (new_crtc_state->has_pch_encoder)
intel_cpu_transcoder_set_m_n(new_crtc_state,
@@ -6977,6 +7082,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) < 9)
intel_disable_primary_plane(new_crtc_state);
+ hsw_set_linetime_wm(new_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
@@ -6989,15 +7096,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 11)
icl_pipe_mbus_enable(crtc);
- /* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(new_crtc_state);
-
- if (new_crtc_state->has_pch_encoder)
- lpt_pch_enable(state, new_crtc_state);
-
- intel_crtc_vblank_on(new_crtc_state);
-
intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
@@ -7023,9 +7121,9 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (old_crtc_state->pch_pfit.enabled) {
- I915_WRITE(PF_CTL(pipe), 0);
- I915_WRITE(PF_WIN_POS(pipe), 0);
- I915_WRITE(PF_WIN_SZ(pipe), 0);
+ intel_de_write(dev_priv, PF_CTL(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
}
}
@@ -7067,16 +7165,16 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
/* disable DPLL_SEL */
- temp = I915_READ(PCH_DPLL_SEL);
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- I915_WRITE(PCH_DPLL_SEL, temp);
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
ilk_fdi_pll_disable(crtc);
@@ -7109,15 +7207,17 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
* The panel fitter should only be adjusted whilst the pipe is disabled,
* according to register description and PRM.
*/
- WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
- I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
- I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
+ intel_de_write(dev_priv, PFIT_PGM_RATIOS,
+ crtc_state->gmch_pfit.pgm_ratios);
+ intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
- I915_WRITE(BCLRPAT(crtc->pipe), 0);
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
}
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
@@ -7304,7 +7404,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
if (intel_crtc_has_dp_encoder(new_crtc_state))
@@ -7314,8 +7414,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
- I915_WRITE(CHV_CANVAS(pipe), 0);
+ intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+ intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
}
i9xx_set_pipeconf(new_crtc_state);
@@ -7356,8 +7456,10 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
+ intel_de_write(dev_priv, FP0(crtc->pipe),
+ crtc_state->dpll_hw_state.fp0);
+ intel_de_write(dev_priv, FP1(crtc->pipe),
+ crtc_state->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct intel_atomic_state *state,
@@ -7368,7 +7470,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
i9xx_set_pll_dividers(new_crtc_state);
@@ -7418,9 +7520,9 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
- I915_READ(PFIT_CONTROL));
- I915_WRITE(PFIT_CONTROL, 0);
+ drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
+ intel_de_read(dev_priv, PFIT_CONTROL));
+ intel_de_write(dev_priv, PFIT_CONTROL, 0);
}
static void i9xx_crtc_disable(struct intel_atomic_state *state,
@@ -7477,6 +7579,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(dev_priv->bw_obj.state);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(dev_priv->cdclk.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
@@ -7500,8 +7604,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
- DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.base.id, crtc->base.name);
return;
}
@@ -7511,19 +7616,21 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
ret = drm_atomic_add_affected_connectors(state, &crtc->base);
- WARN_ON(IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
- DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.base.id, crtc->base.name);
crtc->active = false;
crtc->base.enabled = false;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ drm_WARN_ON(&dev_priv->drm,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
crtc_state->uapi.active = false;
crtc_state->uapi.connector_mask = 0;
crtc_state->uapi.encoder_mask = 0;
@@ -7543,8 +7650,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
crtc->enabled_power_domains = 0;
dev_priv->active_pipes &= ~BIT(pipe);
- dev_priv->min_cdclk[pipe] = 0;
- dev_priv->min_voltage_level[pipe] = 0;
+ cdclk_state->min_cdclk[pipe] = 0;
+ cdclk_state->min_voltage_level[pipe] = 0;
+ cdclk_state->active_pipes &= ~BIT(pipe);
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
@@ -7563,7 +7671,8 @@ int intel_display_suspend(struct drm_device *dev)
state = drm_atomic_helper_suspend(dev);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
- DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
else
dev_priv->modeset_restore_state = state;
return ret;
@@ -7583,13 +7692,13 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.base.id,
- connector->base.name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
I915_STATE_WARN(!crtc_state,
"connector enabled without attached crtc\n");
@@ -7632,18 +7741,21 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
- DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
- DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
- pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
return -EINVAL;
} else {
return 0;
@@ -7668,15 +7780,17 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
return 0;
case PIPE_C:
if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
@@ -7687,7 +7801,8 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
return -EINVAL;
}
return 0;
@@ -7701,6 +7816,7 @@ static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
bool needs_recompute = false;
@@ -7713,7 +7829,7 @@ retry:
* Hence the bw of each lane in terms of the mode signal
* is:
*/
- link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
fdi_dotclock = adjusted_mode->crtc_clock;
@@ -7731,8 +7847,9 @@ retry:
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
- DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
needs_recompute = true;
pipe_config->bw_constrained = true;
@@ -7774,15 +7891,17 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
+static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->uapi.crtc->dev);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->uapi.state);
+ crtc_state->ips_enabled = false;
+
if (!hsw_crtc_state_ips_capable(crtc_state))
- return false;
+ return 0;
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -7791,18 +7910,27 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
* completely disable it.
*/
if (crtc_state->crc_enabled)
- return false;
+ return 0;
/* IPS should be fine as long as at least one plane is enabled. */
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
- return false;
+ return 0;
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) &&
- crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
- return false;
+ if (IS_BROADWELL(dev_priv)) {
+ const struct intel_cdclk_state *cdclk_state;
- return true;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ return 0;
+ }
+
+ crtc_state->ips_enabled = true;
+
+ return 0;
}
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
@@ -7884,9 +8012,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
if (adjusted_mode->crtc_clock > clock_limit) {
- DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
- adjusted_mode->crtc_clock, clock_limit,
- yesno(pipe_config->double_wide));
+ drm_dbg_kms(&dev_priv->drm,
+ "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ adjusted_mode->crtc_clock, clock_limit,
+ yesno(pipe_config->double_wide));
return -EINVAL;
}
@@ -7898,7 +8027,8 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* for output conversion from RGB->YCBCR. So if CTM is already
* applied we can't support YCBCR420 output.
*/
- DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR420 and CTM together are not possible\n");
return -EINVAL;
}
@@ -7910,13 +8040,15 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
*/
if (pipe_config->pipe_src_w & 1) {
if (pipe_config->double_wide) {
- DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Odd pipe source width not supported with double wide pipe\n");
return -EINVAL;
}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev_priv)) {
- DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Odd pipe source width not supported with dual link LVDS\n");
return -EINVAL;
}
}
@@ -7997,13 +8129,15 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
* indicates as much.
*/
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+ bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+ PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
- enableddisabled(bios_lvds_use_ssc),
- enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ drm_dbg_kms(&dev_priv->drm,
+ "SSC %s by BIOS, overriding VBT which says %s\n",
+ enableddisabled(bios_lvds_use_ssc),
+ enableddisabled(dev_priv->vbt.lvds_use_ssc));
dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
@@ -8090,10 +8224,11 @@ static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
- I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
- I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+ intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+ intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+ intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
@@ -8119,33 +8254,42 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
enum transcoder transcoder = crtc_state->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 5) {
- I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
- I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
- I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
+ m_n->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
+ m_n->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
+ m_n->link_n);
/*
* M2_N2 registers are set only if DRRS is supported
* (to make sure the registers are not unnecessarily accessed).
*/
if (m2_n2 && crtc_state->has_drrs &&
transcoder_has_m2_n2(dev_priv, transcoder)) {
- I915_WRITE(PIPE_DATA_M2(transcoder),
- TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
- I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
- I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
- I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
+ TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
+ m2_n2->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
+ m2_n2->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
+ m2_n2->link_n);
}
} else {
- I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
- I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
- I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
}
}
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
{
const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (m_n == M1_N1) {
dp_m_n = &crtc_state->dp_m_n;
@@ -8158,7 +8302,7 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
*/
dp_m_n = &crtc_state->dp_m2_n2;
} else {
- DRM_ERROR("Unsupported divider value\n");
+ drm_err(&i915->drm, "Unsupported divider value\n");
return;
}
@@ -8212,9 +8356,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
u32 coreclk, reg_val;
/* Enable Refclk */
- I915_WRITE(DPLL(pipe),
- pipe_config->dpll_hw_state.dpll &
- ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8314,8 +8457,8 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
int vco;
/* Enable Refclk and SSC */
- I915_WRITE(DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8614,27 +8757,22 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
}
if (INTEL_GEN(dev_priv) > 3)
- I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
-
- I915_WRITE(HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -8642,7 +8780,8 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
* bits. */
if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, VTOTAL(pipe),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
}
@@ -8655,9 +8794,8 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
- I915_WRITE(PIPESRC(pipe),
- ((crtc_state->pipe_src_w - 1) << 16) |
- (crtc_state->pipe_src_h - 1));
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -8670,9 +8808,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
else
- return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -8683,33 +8821,33 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 tmp;
- tmp = I915_READ(HTOTAL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = I915_READ(HBLANK(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hblank_start =
(tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_hblank_end =
((tmp >> 16) & 0xffff) + 1;
}
- tmp = I915_READ(HSYNC(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
- tmp = I915_READ(VTOTAL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = I915_READ(VBLANK(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vblank_start =
(tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vblank_end =
((tmp >> 16) & 0xffff) + 1;
}
- tmp = I915_READ(VSYNC(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
@@ -8727,7 +8865,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
- tmp = I915_READ(PIPESRC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
@@ -8768,7 +8906,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -8815,8 +8953,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
- POSTING_READ(PIPECONF(crtc->pipe));
+ intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
+ intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
}
static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -8833,7 +8971,9 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &intel_limits_i8xx_lvds;
@@ -8846,7 +8986,8 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8868,7 +9009,9 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
if (intel_is_dual_link_lvds(dev_priv))
@@ -8888,7 +9031,8 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8911,7 +9055,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &pnv_limits_lvds;
@@ -8922,7 +9068,8 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8945,7 +9092,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &intel_limits_i9xx_lvds;
@@ -8956,7 +9105,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8970,6 +9120,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_chv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8977,7 +9128,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8991,6 +9142,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_vlv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8998,7 +9150,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -9025,7 +9177,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
if (!i9xx_has_pfit(dev_priv))
return;
- tmp = I915_READ(PFIT_CONTROL);
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
@@ -9039,7 +9191,8 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
}
pipe_config->gmch_pfit.control = tmp;
- pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+ pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
+ PFIT_PGM_RATIOS);
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9087,11 +9240,11 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
return;
}
@@ -9099,7 +9252,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(DSPCNTR(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
@@ -9120,34 +9273,37 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->format = drm_format_info(fourcc);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = I915_READ(DSPOFFSET(i9xx_plane));
- base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
} else if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
- offset = I915_READ(DSPTILEOFF(i9xx_plane));
+ offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i9xx_plane));
else
- offset = I915_READ(DSPLINOFF(i9xx_plane));
- base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ offset = intel_de_read(dev_priv,
+ DSPLINOFF(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
} else {
- base = I915_READ(DSPADDR(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
}
plane_config->base = base;
- val = I915_READ(PIPESRC(pipe));
+ val = intel_de_read(dev_priv, PIPESRC(pipe));
fb->width = ((val >> 16) & 0xfff) + 1;
fb->height = ((val >> 0) & 0xfff) + 1;
- val = I915_READ(DSPSTRIDE(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
plane_config->fb = intel_fb;
}
@@ -9192,11 +9348,12 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
if (tmp & PIPEMISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
- WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
@@ -9214,7 +9371,7 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 tmp;
- tmp = I915_READ(DSPCNTR(i9xx_plane));
+ tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
if (tmp & DISPPLANE_GAMMA_ENABLE)
crtc_state->gamma_enable = true;
@@ -9245,7 +9402,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = false;
- tmp = I915_READ(PIPECONF(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
goto out;
@@ -9274,7 +9431,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
PIPECONF_GAMMA_MODE_SHIFT;
if (IS_CHERRYVIEW(dev_priv))
- pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+ pipe_config->cgm_mode = intel_de_read(dev_priv,
+ CGM_PIPE_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
@@ -9292,14 +9450,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
else
- tmp = I915_READ(DPLL_MD(crtc->pipe));
+ tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- tmp = I915_READ(DPLL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
@@ -9309,10 +9467,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
- pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+ pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
+ DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
- pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+ pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
+ FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
+ FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
@@ -9381,8 +9542,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- u32 temp = I915_READ(PCH_DPLL(i));
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
continue;
@@ -9394,15 +9555,16 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
- has_panel, has_lvds, has_ck505, using_ssc_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- val = I915_READ(PCH_DREF_CONTROL);
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
/* As we must carefully and slowly disable/enable each source in turn,
* compute the final state we want first and check if we need to
@@ -9454,14 +9616,14 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on panel\n");
+ drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else
val &= ~DREF_SSC1_ENABLE;
/* Get SSC going before enabling the outputs */
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -9469,30 +9631,31 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on eDP\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
} else {
- DRM_DEBUG_KMS("Disabling CPU source output\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Turn off CPU output */
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
if (!using_ssc_source) {
- DRM_DEBUG_KMS("Disabling SSC source\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
@@ -9501,8 +9664,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Turn off SSC1 */
val &= ~DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
}
}
@@ -9514,21 +9677,21 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
- if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- DRM_ERROR("FDI mPHY reset assert timeout\n");
+ drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
- tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
- if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
@@ -9617,10 +9780,11 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
{
u32 reg, tmp;
- if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
- with_fdi, "LP PCH doesn't have FDI\n"))
+ if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9714,10 +9878,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
u32 tmp;
int idx = BEND_IDX(steps);
- if (WARN_ON(steps % 5 != 0))
+ if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
return;
- if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
mutex_lock(&dev_priv->sb_lock);
@@ -9740,8 +9904,8 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
{
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 ctl = I915_READ(SPLL_CTL);
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -9760,8 +9924,8 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 ctl = I915_READ(WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -9810,17 +9974,17 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
dev_priv->pch_ssc_use = 0;
if (spll_uses_pch_ssc(dev_priv)) {
- DRM_DEBUG_KMS("SPLL using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
@@ -9885,8 +10049,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* This would end up with an odd purple hue over
* the entire display. Make sure we don't do it.
*/
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
@@ -9898,8 +10062,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(PIPECONF(pipe), val);
- POSTING_READ(PIPECONF(pipe));
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
}
static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
@@ -9921,8 +10085,8 @@ static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
- I915_WRITE(PIPECONF(cpu_transcoder), val);
- POSTING_READ(PIPECONF(cpu_transcoder));
+ intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
}
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
@@ -9965,7 +10129,10 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
BIT(PLANE_CURSOR))) == 0)
val |= PIPEMISC_HDR_MODE_PRECISION;
- I915_WRITE(PIPEMISC(crtc->pipe), val);
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+
+ intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
@@ -9973,7 +10140,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
case PIPEMISC_DITHER_6_BPC:
@@ -10126,8 +10293,9 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
refclk = dev_priv->vbt.lvds_ssc_freq;
}
@@ -10149,15 +10317,17 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
ilk_compute_dpll(crtc, crtc_state, NULL);
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
- DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
@@ -10171,12 +10341,12 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
- m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
- m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
- m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+ m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
+ m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
+ m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
- m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+ m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
+ m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
@@ -10189,30 +10359,38 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
if (INTEL_GEN(dev_priv) >= 5) {
- m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
- m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
- m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+ m_n->link_m = intel_de_read(dev_priv,
+ PIPE_LINK_M1(transcoder));
+ m_n->link_n = intel_de_read(dev_priv,
+ PIPE_LINK_N1(transcoder));
+ m_n->gmch_m = intel_de_read(dev_priv,
+ PIPE_DATA_M1(transcoder))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
- m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+ m_n->gmch_n = intel_de_read(dev_priv,
+ PIPE_DATA_N1(transcoder));
+ m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
- m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
- m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
- m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
+ m2_n2->link_m = intel_de_read(dev_priv,
+ PIPE_LINK_M2(transcoder));
+ m2_n2->link_n = intel_de_read(dev_priv,
+ PIPE_LINK_N2(transcoder));
+ m2_n2->gmch_m = intel_de_read(dev_priv,
+ PIPE_DATA_M2(transcoder))
& ~TU_SIZE_MASK;
- m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
- m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+ m2_n2->gmch_n = intel_de_read(dev_priv,
+ PIPE_DATA_N2(transcoder));
+ m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
} else {
- m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
- m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
- m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+ m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
+ m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
+ m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
- m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+ m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
+ m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
}
@@ -10247,12 +10425,14 @@ static void skl_get_pfit_config(struct intel_crtc *crtc,
/* find scaler attached to this pipe */
for (i = 0; i < crtc->num_scalers; i++) {
- ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+ ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
id = i;
pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
- pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+ pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+ SKL_PS_WIN_POS(crtc->pipe, i));
+ pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+ SKL_PS_WIN_SZ(crtc->pipe, i));
scaler_state->scalers[i].in_use = true;
break;
}
@@ -10284,11 +10464,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
return;
}
@@ -10296,7 +10476,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(PLANE_CTL(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
if (INTEL_GEN(dev_priv) >= 11)
pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
@@ -10304,7 +10484,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & PLANE_CTL_FORMAT_MASK;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
+ alpha = intel_de_read(dev_priv,
+ PLANE_COLOR_CTL(pipe, plane_id));
alpha &= PLANE_COLOR_ALPHA_MASK;
} else {
alpha = val & PLANE_CTL_ALPHA_MASK;
@@ -10368,16 +10549,16 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
val & PLANE_CTL_FLIP_HORIZONTAL)
plane_config->rotation |= DRM_MODE_REFLECT_X;
- base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+ base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
- offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
+ offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
- val = I915_READ(PLANE_SIZE(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
fb->height = ((val >> 16) & 0xffff) + 1;
fb->width = ((val >> 0) & 0xffff) + 1;
- val = I915_READ(PLANE_STRIDE(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
@@ -10385,10 +10566,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
plane_config->fb = intel_fb;
return;
@@ -10404,19 +10586,21 @@ static void ilk_get_pfit_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
- tmp = I915_READ(PF_CTL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
if (tmp & PF_ENABLE) {
pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
- pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+ pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+ PF_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+ PF_WIN_SZ(crtc->pipe));
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
if (IS_GEN(dev_priv, 7)) {
- WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
- PF_PIPE_SEL_IVB(crtc->pipe));
+ drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
+ PF_PIPE_SEL_IVB(crtc->pipe));
}
}
}
@@ -10441,7 +10625,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
- tmp = I915_READ(PIPECONF(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
goto out;
@@ -10478,18 +10662,19 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
- pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
- if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+ if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
pipe_config->has_pch_encoder = true;
- tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
@@ -10502,7 +10687,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
*/
pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
- tmp = I915_READ(PCH_DPLL_SEL);
+ tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
pll_id = DPLL_ID_PCH_PLL_B;
else
@@ -10513,8 +10698,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_shared_dpll_by_id(dev_priv, pll_id);
pll = pipe_config->shared_dpll;
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
@@ -10552,8 +10737,9 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
}
@@ -10567,10 +10753,10 @@ static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
enum intel_dpll_id id;
u32 temp;
- temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10585,24 +10771,25 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
u32 temp;
if (intel_phy_is_combo(dev_priv, phy)) {
- temp = I915_READ(ICL_DPCLKA_CFGCR0) &
+ temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
} else if (intel_phy_is_tc(dev_priv, phy)) {
- u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
if (clk_sel == DDI_CLK_SEL_MG) {
id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
port));
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
} else {
- WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+ drm_WARN_ON(&dev_priv->drm,
+ clk_sel < DDI_CLK_SEL_TBT_162);
id = DPLL_ID_ICL_TBTPLL;
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
}
} else {
- WARN(1, "Invalid port %x\n", port);
+ drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
return;
}
@@ -10629,7 +10816,7 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
id = DPLL_ID_SKL_DPLL2;
break;
default:
- DRM_ERROR("Incorrect port type\n");
+ drm_err(&dev_priv->drm, "Incorrect port type\n");
return;
}
@@ -10642,10 +10829,10 @@ static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
enum intel_dpll_id id;
u32 temp;
- temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+ temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
id = temp >> (port * 3 + 1);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10655,7 +10842,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
- u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+ u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
switch (ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
@@ -10723,7 +10910,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
bool force_thru = false;
enum pipe trans_pipe;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(panel_transcoder));
if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
@@ -10738,8 +10926,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to transcoder %s\n",
- transcoder_name(panel_transcoder));
+ drm_WARN(dev, 1,
+ "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
force_thru = true;
@@ -10767,11 +10956,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
/*
* Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
*/
- WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
- enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+ drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10780,7 +10969,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
- tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
}
@@ -10805,7 +10994,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
cpu_transcoder = TRANSCODER_DSI_C;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10825,11 +11014,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
break;
/* XXX: this works for video mode only */
- tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+ tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
if (!(tmp & DPI_ENABLE))
continue;
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
continue;
@@ -10853,7 +11042,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
port = (cpu_transcoder == TRANSCODER_DSI_A) ?
PORT_A : PORT_B;
} else {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 12)
port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
else
@@ -10873,7 +11063,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
pll = pipe_config->shared_dpll;
if (pll) {
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+ drm_WARN_ON(&dev_priv->drm,
+ !pll->info->funcs->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
}
@@ -10883,10 +11074,10 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
* the PCH transcoder is on.
*/
if (INTEL_GEN(dev_priv) < 9 &&
- (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
- tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
@@ -10899,7 +11090,8 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
{
u32 trans_port_sync, master_select;
- trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+ trans_port_sync = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder));
if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -10943,8 +11135,9 @@ static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
intel_display_power_put(dev_priv, power_domain, trans_wakeref);
}
- WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
}
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
@@ -10955,6 +11148,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain power_domain;
u64 power_domain_mask;
bool active;
+ u32 tmp;
pipe_config->master_transcoder = INVALID_TRANSCODER;
@@ -10974,7 +11168,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_GEN9_LP(dev_priv) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config,
&power_domain_mask, wakerefs)) {
- WARN_ON(active);
+ drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -10990,7 +11184,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_src_size(crtc, pipe_config);
if (IS_HASWELL(dev_priv)) {
- u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ u32 tmp = intel_de_read(dev_priv,
+ PIPECONF(pipe_config->cpu_transcoder));
if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
@@ -11013,12 +11208,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
}
- pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+ pipe_config->gamma_mode = intel_de_read(dev_priv,
+ GAMMA_MODE(crtc->pipe));
- pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
if (INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+ tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
pipe_config->gamma_enable = true;
@@ -11031,8 +11228,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
+ tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+ pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ pipe_config->ips_linetime =
+ REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
+
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wf) {
@@ -11047,7 +11250,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (hsw_crtc_supports_ips(crtc)) {
if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
+ pipe_config->ips_enabled = intel_de_read(dev_priv,
+ IPS_CTL) & IPS_ENABLE;
else {
/*
* We cannot readout IPS state on broadwell, set to
@@ -11061,7 +11265,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
- I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ intel_de_read(dev_priv,
+ PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -11150,7 +11355,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
plane_state, 0);
if (src_x != 0 || src_y != 0) {
- DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
return -EINVAL;
}
@@ -11181,10 +11387,11 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
return -EINVAL;
}
@@ -11255,6 +11462,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -11267,14 +11475,15 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
- DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
case 256:
@@ -11283,8 +11492,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
case 2048:
break;
default:
- DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
- fb->pitches[0]);
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
return -EINVAL;
}
@@ -11322,17 +11531,17 @@ static void i845_update_cursor(struct intel_plane *plane,
if (plane->cursor.base != base ||
plane->cursor.size != size ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(PIPE_A), 0);
- I915_WRITE_FW(CURBASE(PIPE_A), base);
- I915_WRITE_FW(CURSIZE, size);
- I915_WRITE_FW(CURPOS(PIPE_A), pos);
- I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE, size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
plane->cursor.base = base;
plane->cursor.size = size;
plane->cursor.cntl = cntl;
} else {
- I915_WRITE_FW(CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11357,7 +11566,7 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
@@ -11483,20 +11692,22 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
- DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -11512,7 +11723,8 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -11573,17 +11785,18 @@ static void i9xx_update_cursor(struct intel_plane *plane,
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
if (HAS_CUR_FBC(dev_priv))
- I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
- I915_WRITE_FW(CURCNTR(pipe), cntl);
- I915_WRITE_FW(CURPOS(pipe), pos);
- I915_WRITE_FW(CURBASE(pipe), base);
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
plane->cursor.base = base;
plane->cursor.size = fbc_ctl;
plane->cursor.cntl = cntl;
} else {
- I915_WRITE_FW(CURPOS(pipe), pos);
- I915_WRITE_FW(CURBASE(pipe), base);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11614,7 +11827,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- val = I915_READ(CURCNTR(plane->pipe));
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
ret = val & MCURSOR_MODE;
@@ -11700,13 +11913,13 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_crtc_state *crtc_state;
int ret, i = -1;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.id, encoder->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
old->restore_state = NULL;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
/*
* Algorithm gets a little messy:
@@ -11753,7 +11966,8 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
- DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no pipe available for load-detect\n");
ret = -ENODEV;
goto fail;
}
@@ -11804,13 +12018,16 @@ found:
if (!ret)
ret = drm_atomic_add_affected_planes(restore_state, crtc);
if (ret) {
- DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to create a copy of old state to restore: %i\n",
+ ret);
goto fail;
}
ret = drm_atomic_commit(state);
if (ret) {
- DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to set mode on load-detect pipe\n");
goto fail;
}
@@ -11843,20 +12060,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_atomic_state *state = old->restore_state;
int ret;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.id, encoder->name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
if (!state)
return;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret)
- DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Couldn't release load detect pipe: %i\n", ret);
drm_atomic_state_put(state);
}
@@ -11921,8 +12140,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7 : 14;
break;
default:
- DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
- "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
@@ -11931,7 +12151,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
+ u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
+ LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -12142,7 +12363,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
was_visible = old_plane_state->uapi.visible;
visible = plane_state->uapi.visible;
- if (!was_crtc_enabled && WARN_ON(was_visible))
+ if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
was_visible = false;
/*
@@ -12168,11 +12389,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- crtc->base.base.id, crtc->base.name,
- plane->base.base.id, plane->base.name,
- was_visible, visible,
- turn_off, turn_on, mode_changed);
+ drm_dbg_atomic(&dev_priv->drm,
+ "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ was_visible, visible,
+ turn_off, turn_on, mode_changed);
if (turn_on) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -12349,8 +12571,9 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
}
if (!linked_state) {
- DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
- hweight8(crtc_state->nv12_planes));
+ drm_dbg_kms(&dev_priv->drm,
+ "Need %d free Y planes for planar YUV\n",
+ hweight8(crtc_state->nv12_planes));
return -EINVAL;
}
@@ -12361,7 +12584,8 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->planar_linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
- DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+ drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
+ linked->base.name, plane->base.name);
/* Copy parameters to slave plane */
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
@@ -12398,120 +12622,74 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static bool
-intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
+static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc)
- continue;
- if (connector->has_tile &&
- connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
- return true;
- }
+ if (!crtc_state->hw.enable)
+ return 0;
- return false;
+ return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ adjusted_mode->crtc_clock);
}
-static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_cdclk_state *cdclk_state)
{
- crtc_state->master_transcoder = INVALID_TRANSCODER;
- crtc_state->sync_mode_slaves_mask = 0;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ cdclk_state->logical.cdclk);
}
-static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
- struct intel_crtc_state *crtc_state,
- int num_tiled_conns)
+static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct drm_connector *master_connector;
- struct drm_connector_list_iter conn_iter;
- struct drm_crtc *master_crtc = NULL;
- struct drm_crtc_state *master_crtc_state;
- struct intel_crtc_state *master_pipe_config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ u16 linetime_wm;
- if (INTEL_GEN(dev_priv) < 11)
+ if (!crtc_state->hw.enable)
return 0;
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
- return 0;
+ linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
+ crtc_state->pixel_rate);
- /*
- * In case of tiled displays there could be one or more slaves but there is
- * only one master. Lets make the CRTC used by the connector corresponding
- * to the last horizonal and last vertical tile a master/genlock CRTC.
- * All the other CRTCs corresponding to other tiles of the same Tile group
- * are the slave CRTCs and hold a pointer to their genlock CRTC.
- * If all tiles not present do not make master slave assignments.
- */
- if (!connector->has_tile ||
- crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
- crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
- num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- reset_port_sync_mode_state(crtc_state);
- return 0;
- }
- /* Last Horizontal and last vertical tile connector is a master
- * Master's crtc state is already populated in slave for port sync
- */
- if (connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
- return 0;
+ /* Display WA #1135: BXT:ALL GLK:ALL */
+ if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
+ linetime_wm /= 2;
- /* Loop through all connectors and configure the Slave crtc_state
- * to point to the correct master.
- */
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(master_connector, &conn_iter) {
- struct drm_connector_state *master_conn_state = NULL;
+ return linetime_wm;
+}
- if (!(master_connector->has_tile &&
- master_connector->tile_group->id == connector->tile_group->id))
- continue;
- if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
- master_connector->tile_v_loc != master_connector->num_v_tile - 1)
- continue;
+static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_cdclk_state *cdclk_state;
- master_conn_state = drm_atomic_get_connector_state(&state->base,
- master_connector);
- if (IS_ERR(master_conn_state)) {
- drm_connector_list_iter_end(&conn_iter);
- return PTR_ERR(master_conn_state);
- }
- if (master_conn_state->crtc) {
- master_crtc = master_conn_state->crtc;
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (INTEL_GEN(dev_priv) >= 9)
+ crtc_state->linetime = skl_linetime_wm(crtc_state);
+ else
+ crtc_state->linetime = hsw_linetime_wm(crtc_state);
- if (!master_crtc) {
- DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
- crtc->base.id);
- return -EINVAL;
- }
+ if (!hsw_crtc_supports_ips(crtc))
+ return 0;
- master_crtc_state = drm_atomic_get_crtc_state(&state->base,
- master_crtc);
- if (IS_ERR(master_crtc_state))
- return PTR_ERR(master_crtc_state);
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- master_pipe_config = to_intel_crtc_state(master_crtc_state);
- crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
- master_pipe_config->sync_mode_slaves_mask |=
- BIT(crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
- transcoder_name(crtc_state->master_transcoder),
- crtc->base.id,
- master_pipe_config->sync_mode_slaves_mask);
+ crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
+ cdclk_state);
return 0;
}
@@ -12531,7 +12709,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(crtc_state->shared_dpll)) {
+ !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
@@ -12551,17 +12729,18 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- ret = 0;
if (dev_priv->display.compute_pipe_wm) {
ret = dev_priv->display.compute_pipe_wm(crtc_state);
if (ret) {
- DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Target pipe watermarks are invalid\n");
return ret;
}
}
if (dev_priv->display.compute_intermediate_wm) {
- if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->display.compute_pipe_wm))
return 0;
/*
@@ -12571,23 +12750,39 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
*/
ret = dev_priv->display.compute_intermediate_wm(crtc_state);
if (ret) {
- DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No valid intermediate pipe watermarks are possible\n");
return ret;
}
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed || crtc_state->update_pipe)
+ if (mode_changed || crtc_state->update_pipe) {
ret = skl_update_scaler_crtc(crtc_state);
- if (!ret)
- ret = intel_atomic_setup_scalers(dev_priv, crtc,
- crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+ if (ret)
+ return ret;
}
- if (HAS_IPS(dev_priv))
- crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
+ if (HAS_IPS(dev_priv)) {
+ ret = hsw_compute_ips_config(crtc_state);
+ if (ret)
+ return ret;
+ }
- return ret;
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ ret = hsw_compute_linetime_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ }
+
+ return 0;
}
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
@@ -12620,6 +12815,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
struct intel_crtc_state *pipe_config)
{
struct drm_connector *connector = conn_state->connector;
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -12641,11 +12837,13 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
}
if (bpp < pipe_config->pipe_bpp) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
- "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
- connector->base.id, connector->name,
- bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+ "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ connector->base.id, connector->name,
+ bpp, 3 * info->bpc,
+ 3 * conn_state->max_requested_bpc,
+ pipe_config->pipe_bpp);
pipe_config->pipe_bpp = bpp;
}
@@ -12705,10 +12903,13 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
{
- DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->gmch_m, m_n->gmch_n,
- m_n->link_m, m_n->link_n, m_n->tu);
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+
+ drm_dbg_kms(&i915->drm,
+ "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
}
static void
@@ -12784,27 +12985,31 @@ static const char *output_formats(enum intel_output_format format)
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_format_name_buf format_name;
if (!fb) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- yesno(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ yesno(plane_state->uapi.visible));
return;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->format->format, &format_name),
- yesno(plane_state->uapi.visible));
- DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->format->format, &format_name),
+ yesno(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id);
if (plane_state->uapi.visible)
- DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
+ drm_dbg_kms(&i915->drm,
+ "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
@@ -12818,22 +13023,24 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
char buf[64];
int i;
- DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
- crtc->base.base.id, crtc->base.name,
- yesno(pipe_config->hw.enable), context);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
+ crtc->base.base.id, crtc->base.name,
+ yesno(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
- yesno(pipe_config->hw.active),
- buf, pipe_config->output_types,
- output_formats(pipe_config->output_format));
+ drm_dbg_kms(&dev_priv->drm,
+ "active: %s, output_types: %s (0x%x), output format: %s\n",
+ yesno(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ output_formats(pipe_config->output_format));
- DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
@@ -12849,13 +13056,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
&pipe_config->dp_m2_n2);
}
- DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+ drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
+ pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
@@ -12866,50 +13075,59 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
- DRM_DEBUG_KMS("requested mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
- DRM_DEBUG_KMS("adjusted mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
- pipe_config->port_clock,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
- pipe_config->pixel_rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
+ pipe_config->port_clock,
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+ pipe_config->pixel_rate);
+
+ drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
if (INTEL_GEN(dev_priv) >= 9)
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ drm_dbg_kms(&dev_priv->drm,
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
if (HAS_GMCH(dev_priv))
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
+ drm_dbg_kms(&dev_priv->drm,
+ "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
else
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- enableddisabled(pipe_config->pch_pfit.enabled),
- yesno(pipe_config->pch_pfit.force_thru));
+ drm_dbg_kms(&dev_priv->drm,
+ "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled),
+ yesno(pipe_config->pch_pfit.force_thru));
- DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide);
+ drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(dev_priv))
- DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
else
- DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
- DRM_DEBUG_KMS("MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
+ drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
dump_planes:
if (!state)
@@ -12957,24 +13175,21 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
encoder = to_intel_encoder(connector_state->best_encoder);
- WARN_ON(!connector_state->crtc);
+ drm_WARN_ON(dev, !connector_state->crtc);
switch (encoder->type) {
- unsigned int port_mask;
case INTEL_OUTPUT_DDI:
- if (WARN_ON(!HAS_DDI(to_i915(dev))))
+ if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
break;
/* else, fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
- port_mask = 1 << encoder->port;
-
/* the same port mustn't appear more than once */
- if (used_ports & port_mask)
+ if (used_ports & BIT(encoder->port))
ret = false;
- used_ports |= port_mask;
+ used_ports |= BIT(encoder->port);
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
@@ -13055,15 +13270,6 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
- /*
- * Save the slave bitmask which gets filled for master crtc state during
- * slave atomic check call. For all other CRTCs reset the port sync variables
- * crtc_state->master_transcoder needs to be set to INVALID
- */
- reset_port_sync_mode_state(saved_state);
- if (intel_atomic_is_master_connector(crtc_state))
- saved_state->sync_mode_slaves_mask =
- crtc_state->sync_mode_slaves_mask;
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
@@ -13078,11 +13284,10 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
struct drm_crtc *crtc = pipe_config->uapi.crtc;
struct drm_atomic_state *state = pipe_config->uapi.state;
- struct intel_encoder *encoder;
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int base_bpp, ret;
- int i, tile_group_id = -1, num_tiled_conns = 0;
+ int base_bpp, ret, i;
bool retry = true;
pipe_config->cpu_transcoder =
@@ -13121,13 +13326,15 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
&pipe_config->pipe_src_h);
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
if (connector_state->crtc != crtc)
continue;
- encoder = to_intel_encoder(connector_state->best_encoder);
-
if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
- DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ drm_dbg_kms(&i915->drm,
+ "rejecting invalid cloning configuration\n");
return -EINVAL;
}
@@ -13152,47 +13359,24 @@ encoder_retry:
drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
- /* Get tile_group_id of tiled connector */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc == crtc &&
- connector->has_tile) {
- tile_group_id = connector->tile_group->id;
- break;
- }
- }
-
- /* Get total number of tiled connectors in state that belong to
- * this tile group.
- */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector->has_tile &&
- connector->tile_group->id == tile_group_id)
- num_tiled_conns++;
- }
-
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
if (connector_state->crtc != crtc)
continue;
- ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
- num_tiled_conns);
- if (ret) {
- DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
- ret);
- return ret;
- }
-
- encoder = to_intel_encoder(connector_state->best_encoder);
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
if (ret < 0) {
if (ret != -EDEADLK)
- DRM_DEBUG_KMS("Encoder config failure: %d\n",
- ret);
+ drm_dbg_kms(&i915->drm,
+ "Encoder config failure: %d\n",
+ ret);
return ret;
}
}
@@ -13207,15 +13391,16 @@ encoder_retry:
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- DRM_DEBUG_KMS("CRTC fixup failed\n");
+ drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
return ret;
}
if (ret == RETRY) {
- if (WARN(!retry, "loop in pipe configuration computation\n"))
+ if (drm_WARN(&i915->drm, !retry,
+ "loop in pipe configuration computation\n"))
return -EINVAL;
- DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+ drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
retry = false;
goto encoder_retry;
}
@@ -13226,8 +13411,9 @@ encoder_retry:
*/
pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
!pipe_config->dither_force_disable;
- DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
- base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&i915->drm,
+ "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+ base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
/*
* Make drm_calc_timestamping_constants in
@@ -13238,6 +13424,35 @@ encoder_retry:
return 0;
}
+static int
+intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
+{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector,
+ conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base ||
+ !encoder->compute_config_late)
+ continue;
+
+ ret = encoder->compute_config_late(encoder, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
@@ -13316,16 +13531,17 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
- DRM_DEBUG_KMS("expected:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s infoframe\n", name);
+ drm_dbg_kms(&dev_priv->drm, "expected:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- DRM_DEBUG_KMS("found:\n");
+ drm_dbg_kms(&dev_priv->drm, "found:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
- DRM_ERROR("mismatch in %s infoframe\n", name);
- DRM_ERROR("expected:\n");
+ drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
+ drm_err(&dev_priv->drm, "expected:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- DRM_ERROR("found:\n");
+ drm_err(&dev_priv->drm, "found:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
@@ -13334,6 +13550,7 @@ static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct va_format vaf;
va_list args;
@@ -13342,11 +13559,12 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
vaf.va = &args;
if (fastset)
- DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
else
- DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
va_end(args);
}
@@ -13382,7 +13600,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
- DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "initial modeset and fastboot not set\n");
ret = false;
}
@@ -13584,7 +13803,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
- PIPE_CONF_CHECK_I(dc3co_exitline);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -13644,10 +13862,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
+ PIPE_CONF_CHECK_I(linetime);
+ PIPE_CONF_CHECK_I(ips_linetime);
+
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
-
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -13703,7 +13923,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
- PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
PIPE_CONF_CHECK_I(dsc.compression_enable);
@@ -13737,9 +13957,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
- WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- fdi_dotclock, dotclock);
+ drm_WARN(&dev_priv->drm,
+ !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
}
}
@@ -13750,12 +13971,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
- struct skl_ddb_allocation ddb;
struct skl_pipe_wm wm;
} *hw;
- struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ u8 hw_enabled_slices;
const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -13771,14 +13991,14 @@ static void verify_wm_state(struct intel_crtc *crtc,
skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
- skl_ddb_get_hw_state(dev_priv, &hw->ddb);
- sw_ddb = &dev_priv->wm.skl_hw.ddb;
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
- hw->ddb.enabled_slices != sw_ddb->enabled_slices)
- DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
- sw_ddb->enabled_slices,
- hw->ddb.enabled_slices);
+ hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
+ drm_err(&dev_priv->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ dev_priv->enabled_dbuf_slices_mask,
+ hw_enabled_slices);
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
@@ -13793,26 +14013,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
&sw_plane_wm->wm[level]))
continue;
- DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1, level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1, level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
}
if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
&sw_plane_wm->trans_wm)) {
- DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1,
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1,
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
}
/* DDB */
@@ -13820,10 +14042,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
+ drm_err(&dev_priv->drm,
+ "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
@@ -13845,26 +14068,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
&sw_plane_wm->wm[level]))
continue;
- DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
}
if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
&sw_plane_wm->trans_wm)) {
- DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe),
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe),
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
}
/* DDB */
@@ -13872,10 +14097,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe),
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
+ drm_err(&dev_priv->drm,
+ "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
@@ -13919,9 +14145,9 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat
bool enabled = false, found = false;
enum pipe pipe;
- DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ encoder->base.name);
for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
new_conn_state, i) {
@@ -13973,7 +14199,8 @@ verify_crtc_state(struct intel_crtc *crtc,
intel_crtc_state_reset(old_crtc_state, crtc);
old_crtc_state->uapi.state = state;
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+ crtc->base.name);
active = dev_priv->display.get_pipe_config(crtc, pipe_config);
@@ -14048,7 +14275,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- DRM_DEBUG_KMS("%s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
@@ -14075,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -14108,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
}
}
@@ -14135,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
int i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++)
- verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(dev_priv,
+ &dev_priv->dpll.shared_dplls[i],
+ NULL, NULL);
}
static void
@@ -14280,35 +14509,35 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_modeset_checks(struct intel_atomic_state *state)
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- int ret, i;
+ int i;
- /* keep the current setting */
- if (!state->cdclk.force_min_cdclk_changed)
- state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.active)
+ active_pipes |= BIT(crtc->pipe);
+ else
+ active_pipes &= ~BIT(crtc->pipe);
+ }
- state->modeset = true;
- state->active_pipes = dev_priv->active_pipes;
- state->cdclk.logical = dev_priv->cdclk.logical;
- state->cdclk.actual = dev_priv->cdclk.actual;
+ return active_pipes;
+}
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (new_crtc_state->hw.active)
- state->active_pipes |= BIT(crtc->pipe);
- else
- state->active_pipes &= ~BIT(crtc->pipe);
+static int intel_modeset_checks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
- if (old_crtc_state->hw.active != new_crtc_state->hw.active)
- state->active_pipe_changes |= BIT(crtc->pipe);
- }
+ state->modeset = true;
+ state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
+
+ state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
if (state->active_pipe_changes) {
- ret = intel_atomic_lock_global_state(state);
+ ret = _intel_atomic_lock_global_state(state);
if (ret)
return ret;
}
@@ -14399,7 +14628,7 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
}
static int intel_atomic_check_planes(struct intel_atomic_state *state,
- bool *need_modeset)
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -14415,8 +14644,9 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
ret = intel_plane_atomic_check(state, plane);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
- plane->base.base.id, plane->base.name);
+ drm_dbg_atomic(&dev_priv->drm,
+ "[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
return ret;
}
}
@@ -14453,8 +14683,11 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
* affected planes are part of the state. We can now
* compute the minimum cdclk for each plane.
*/
- for_each_new_intel_plane_in_state(state, plane, plane_state, i)
- *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -14467,9 +14700,11 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret = intel_crtc_atomic_check(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_atomic(&i915->drm,
+ "[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
return ret;
}
}
@@ -14494,76 +14729,6 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
return false;
}
-static int
-intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- int ret = 0;
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
-
- if (!connector->has_tile ||
- connector->tile_group->id != tile_grp_id)
- continue;
- conn_state = drm_atomic_get_connector_state(&state->base,
- connector);
- if (IS_ERR(conn_state)) {
- ret = PTR_ERR(conn_state);
- break;
- }
-
- if (!conn_state->crtc)
- continue;
-
- crtc_state = drm_atomic_get_crtc_state(&state->base,
- conn_state->crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- break;
- }
- crtc_state->mode_changed = true;
- ret = drm_atomic_add_affected_connectors(&state->base,
- conn_state->crtc);
- if (ret)
- break;
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return ret;
-}
-
-static int
-intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_connector *connector;
- struct drm_connector_state *old_conn_state, *new_conn_state;
- int i, ret;
-
- if (INTEL_GEN(dev_priv) < 11)
- return 0;
-
- /* Is tiled, mark all other tiled CRTCs as needing a modeset */
- for_each_oldnew_connector_in_state(&state->base, connector,
- old_conn_state, new_conn_state, i) {
- if (!connector->has_tile)
- continue;
- if (!intel_connector_needs_modeset(state, connector))
- continue;
-
- ret = intel_modeset_all_tiles(state, connector->tile_group->id);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -14575,6 +14740,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_cdclk_state *new_cdclk_state;
struct intel_crtc *crtc;
int ret, i;
bool any_ms = false;
@@ -14582,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev,
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->hw.mode.private_flags !=
- old_crtc_state->hw.mode.private_flags)
+ if (new_crtc_state->uapi.mode.private_flags !=
+ old_crtc_state->uapi.mode.private_flags)
new_crtc_state->uapi.mode_changed = true;
}
@@ -14591,21 +14757,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /**
- * This check adds all the connectors in current state that belong to
- * the same tile group to a full modeset.
- * This function directly sets the mode_changed to true and we also call
- * drm_atomic_add_affected_connectors(). Hence we are not explicitly
- * calling drm_atomic_helper_check_modeset() after this.
- *
- * Fixme: Handle some corner cases where one of the
- * tiled connectors gets disconnected and tile info is lost but since it
- * was previously synced to other conn, we need to add that to the modeset.
- */
- ret = intel_atomic_check_tiled_conns(state);
- if (ret)
- goto fail;
-
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state)) {
@@ -14615,18 +14766,26 @@ static int intel_atomic_check(struct drm_device *dev,
continue;
}
- if (!new_crtc_state->uapi.enable) {
- intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
- continue;
- }
-
ret = intel_crtc_prepare_cleared_state(new_crtc_state);
if (ret)
goto fail;
+ if (!new_crtc_state->hw.enable)
+ continue;
+
ret = intel_modeset_pipe_config(new_crtc_state);
if (ret)
goto fail;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
+ continue;
+
+ ret = intel_modeset_pipe_config_late(new_crtc_state);
+ if (ret)
+ goto fail;
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -14656,8 +14815,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (is_trans_port_sync_mode(new_crtc_state)) {
- u8 trans = new_crtc_state->sync_mode_slaves_mask |
- BIT(new_crtc_state->master_transcoder);
+ u8 trans = new_crtc_state->sync_mode_slaves_mask;
+
+ if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ trans |= BIT(new_crtc_state->master_transcoder);
if (intel_cpu_transcoders_need_modeset(state, trans)) {
new_crtc_state->uapi.mode_changed = true;
@@ -14680,7 +14841,8 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (any_ms && !check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "rejecting conflicting digital port configuration\n");
ret = EINVAL;
goto fail;
}
@@ -14689,18 +14851,32 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- any_ms |= state->cdclk.force_min_cdclk_changed;
-
ret = intel_atomic_check_planes(state, &any_ms);
if (ret)
goto fail;
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+ any_ms = true;
+
+ /*
+ * distrust_bios_wm will force a full dbuf recomputation
+ * but the hardware state will only get updated accordingly
+ * if state->modeset==true. Hence distrust_bios_wm==true &&
+ * state->modeset==false is an invalid combination which
+ * would cause the hardware and software dbuf state to get
+ * out of sync. We must prevent that.
+ *
+ * FIXME clean up this mess and introduce better
+ * state tracking for dbuf.
+ */
+ if (dev_priv->wm.distrust_bios_wm)
+ any_ms = true;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
- } else {
- state->cdclk.logical = dev_priv->cdclk.logical;
}
ret = intel_atomic_check_crtcs(state);
@@ -14806,6 +14982,18 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
ilk_pfit_disable(old_crtc_state);
}
+ /*
+ * The register is supposedly single buffered so perhaps
+ * not 100% correct to do this here. But SKL+ calculate
+ * this based on the adjust pixel rate so pfit changes do
+ * affect it and so it must be updated for fastsets.
+ * HSW/BDW only really need this here for fastboot, after
+ * that the value should not change without a full modeset.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_set_linetime_wm(new_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@@ -14848,9 +15036,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
- struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state,
- to_intel_plane(crtc->base.primary));
if (modeset) {
intel_crtc_update_active_timings(new_crtc_state);
@@ -14873,8 +15058,8 @@ static void intel_update_crtc(struct intel_crtc *crtc,
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
intel_fbc_disable(crtc);
- else if (new_plane_state)
- intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+ else
+ intel_fbc_enable(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -14904,7 +15089,8 @@ static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *ne
struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
enum transcoder slave_transcoder;
- WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+ drm_WARN_ON(&dev_priv->drm,
+ !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
return intel_get_crtc_for_pipe(dev_priv,
@@ -15021,7 +15207,7 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
if (conn_state->crtc == &crtc->base)
break;
}
- intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
+ intel_dp = intel_attached_dp(to_intel_connector(conn));
intel_dp_stop_link_train(intel_dp);
}
@@ -15036,15 +15222,12 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state,
- to_intel_plane(crtc->base.primary));
bool modeset = needs_modeset(new_crtc_state);
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
intel_fbc_disable(crtc);
- else if (new_plane_state)
- intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+ else
+ intel_fbc_enable(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -15068,18 +15251,20 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
struct intel_crtc_state *new_slave_crtc_state =
intel_atomic_get_new_crtc_state(state, slave_crtc);
struct intel_crtc_state *old_slave_crtc_state =
intel_atomic_get_old_crtc_state(state, slave_crtc);
- WARN_ON(!slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
+ drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
- DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
- crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
- slave_crtc->base.name);
+ drm_dbg_kms(&i915->drm,
+ "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+ crtc->base.base.id, crtc->base.name,
+ slave_crtc->base.base.id, slave_crtc->base.name);
/* Enable seq for slave with with DP_TP_CTL left Idle until the
* master is ready
@@ -15109,35 +15294,53 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
state);
}
+static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 required_slices = state->enabled_dbuf_slices_mask;
+ u8 slices_union = hw_enabled_slices | required_slices;
+
+ /* If 2nd DBuf slice required, enable it here */
+ if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, slices_union);
+}
+
+static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 required_slices = state->enabled_dbuf_slices_mask;
+
+ /* If 2nd DBuf slice is no more required disable it */
+ if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, required_slices);
+}
+
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
u8 update_pipes = 0, modeset_pipes = 0;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
if (!new_crtc_state->hw.active)
continue;
/* ignore allocations for crtc's that have been turned off. */
if (!needs_modeset(new_crtc_state)) {
- entries[i] = old_crtc_state->wm.skl.ddb;
- update_pipes |= BIT(crtc->pipe);
+ entries[pipe] = old_crtc_state->wm.skl.ddb;
+ update_pipes |= BIT(pipe);
} else {
- modeset_pipes |= BIT(crtc->pipe);
+ modeset_pipes |= BIT(pipe);
}
}
- /* If 2nd DBuf slice required, enable it here */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
-
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
@@ -15156,10 +15359,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i))
+ entries, I915_MAX_PIPES, pipe))
continue;
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
intel_update_crtc(crtc, state, old_crtc_state,
@@ -15193,10 +15396,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
is_trans_port_sync_slave(new_crtc_state))
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
if (is_trans_port_sync_mode(new_crtc_state)) {
@@ -15228,20 +15431,17 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
- WARN_ON(modeset_pipes);
+ drm_WARN_ON(&dev_priv->drm, modeset_pipes);
- /* If 2nd DBuf slice is no more required disable it */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15338,10 +15538,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
- intel_set_cdclk_pre_plane_update(dev_priv,
- &state->cdclk.actual,
- &dev_priv->cdclk.actual,
- state->cdclk.pipe);
+ intel_set_cdclk_pre_plane_update(state);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -15371,16 +15568,17 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_encoders_update_prepare(state);
+ /* Enable all new slices, we might need */
+ if (state->modeset)
+ icl_dbuf_slice_pre_update(state);
+
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
- intel_set_cdclk_post_plane_update(dev_priv,
- &state->cdclk.actual,
- &dev_priv->cdclk.actual,
- state->cdclk.pipe);
+ intel_set_cdclk_post_plane_update(state);
}
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -15427,6 +15625,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
dev_priv->display.optimize_watermarks(state, crtc);
}
+ /* Disable all slices, we don't need */
+ if (state->modeset)
+ icl_dbuf_slice_post_update(state);
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -15570,7 +15772,8 @@ static int intel_atomic_commit(struct drm_device *dev,
ret = intel_atomic_prepare_commit(state);
if (ret) {
- DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ drm_dbg_atomic(&dev_priv->drm,
+ "Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&state->commit_ready);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -15579,6 +15782,8 @@ static int intel_atomic_commit(struct drm_device *dev,
ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
if (!ret)
ret = drm_atomic_helper_swap_state(&state->base, true);
+ if (!ret)
+ intel_atomic_swap_global_state(state);
if (ret) {
i915_sw_fence_commit(&state->commit_ready);
@@ -15594,14 +15799,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (state->global_state_changed) {
assert_global_state_locked(dev_priv);
- memcpy(dev_priv->min_cdclk, state->min_cdclk,
- sizeof(state->min_cdclk));
- memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
- sizeof(state->min_voltage_level));
dev_priv->active_pipes = state->active_pipes;
- dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
-
- intel_cdclk_swap_state(state);
}
drm_atomic_state_get(&state->base);
@@ -15729,7 +15927,7 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
- * @plane: drm plane to prepare for
+ * @_plane: drm plane to prepare for
* @_new_plane_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
@@ -15740,23 +15938,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
* Returns 0 on success, negative error code on failure.
*/
int
-intel_prepare_plane_fb(struct drm_plane *plane,
+intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
int ret;
if (old_obj) {
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(intel_state,
- to_intel_crtc(plane->state->crtc));
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ to_intel_crtc(old_plane_state->hw.crtc));
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -15770,7 +15970,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* can safely continue.
*/
if (needs_modeset(crtc_state)) {
- ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv, NULL,
false, 0,
GFP_KERNEL);
@@ -15780,7 +15980,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
if (new_plane_state->uapi.fence) { /* explicit fencing */
- ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
@@ -15807,12 +16007,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (!new_plane_state->uapi.fence) { /* implicit fencing */
struct dma_fence *fence;
- ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
obj->base.resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
- return ret;
+ goto unpin_fb;
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
@@ -15833,12 +16033,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- if (!intel_state->rps_interactive) {
+ if (!state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, true);
- intel_state->rps_interactive = true;
+ state->rps_interactive = true;
}
return 0;
+
+unpin_fb:
+ intel_plane_unpin_fb(new_plane_state);
+
+ return ret;
}
/**
@@ -15854,13 +16059,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
{
struct intel_plane_state *old_plane_state =
to_intel_plane_state(_old_plane_state);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(old_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+
+ if (!obj)
+ return;
- if (intel_state->rps_interactive) {
+ if (state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, false);
- intel_state->rps_interactive = false;
+ state->rps_interactive = false;
}
/* Should only be called after a successful intel_prepare_plane_fb()! */
@@ -16029,6 +16238,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
new_plane_state->uapi.crtc_w = crtc_w;
new_plane_state->uapi.crtc_h = crtc_h;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
old_plane_state, new_plane_state);
if (ret)
@@ -16113,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -16194,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- possible_crtcs = BIT(pipe);
-
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -16247,7 +16455,6 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- unsigned int possible_crtcs;
struct intel_plane *cursor;
int ret, zpos;
@@ -16280,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- possible_crtcs, &intel_cursor_plane_funcs,
+ 0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -16328,6 +16533,7 @@ static const struct drm_crtc_funcs bdw_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = bdw_enable_vblank,
.disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs ilk_crtc_funcs = {
@@ -16336,6 +16542,7 @@ static const struct drm_crtc_funcs ilk_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = ilk_enable_vblank,
.disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs g4x_crtc_funcs = {
@@ -16344,6 +16551,7 @@ static const struct drm_crtc_funcs g4x_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i965_crtc_funcs = {
@@ -16352,6 +16560,7 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915gm_crtc_funcs = {
@@ -16360,6 +16569,7 @@ static const struct drm_crtc_funcs i915gm_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915gm_enable_vblank,
.disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -16368,6 +16578,7 @@ static const struct drm_crtc_funcs i915_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i8xx_crtc_funcs = {
@@ -16376,6 +16587,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
/* no hw vblank counter */
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static struct intel_crtc *intel_crtc_alloc(void)
@@ -16405,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc)
kfree(crtc);
}
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ plane->pipe);
+
+ plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+ }
+}
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary, *cursor;
@@ -16483,7 +16707,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_color_init(crtc);
- WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@@ -16543,10 +16769,10 @@ static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
if (!IS_MOBILE(dev_priv))
return false;
- if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
@@ -16561,11 +16787,11 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
- I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -16591,10 +16817,10 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
pps_num = 1;
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = I915_READ(PP_CONTROL(pps_idx));
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- I915_WRITE(PP_CONTROL(pps_idx), val);
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
}
}
@@ -16674,14 +16900,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* On SKL pre-D0 the strap isn't connected, so we assume
* it's there.
*/
- found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_GEN9_BC(dev_priv))
intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
* register */
- found = I915_READ(SFUSE_STRAP);
+ found = intel_de_read(dev_priv, SFUSE_STRAP);
if (found & SFUSE_STRAP_DDIB_DETECTED)
intel_ddi_init(dev_priv, PORT_B);
@@ -16714,25 +16940,25 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (ilk_has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
- if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+ if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
if (!found)
intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
- if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+ if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
}
- if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+ if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
- if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+ if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
- if (I915_READ(PCH_DP_C) & DP_DETECTED)
+ if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
- if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
@@ -16757,16 +16983,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/
has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
- if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
- if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
if (IS_CHERRYVIEW(dev_priv)) {
@@ -16775,9 +17001,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
@@ -16793,11 +17019,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_crt_init(dev_priv);
- if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOB\n");
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
if (!found && IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOB\n");
intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
}
@@ -16807,22 +17034,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOC\n");
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
}
- if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+ if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
if (IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOC\n");
intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
}
if (IS_G4X(dev_priv))
intel_dp_init(dev_priv, DP_C, PORT_C);
}
- if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
+ if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
intel_dp_init(dev_priv, DP_D, PORT_D);
if (SUPPORTS_TV(dev_priv))
@@ -16864,9 +17092,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
unsigned int *handle)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
if (obj->userptr.mm) {
- DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
+ drm_dbg(&i915->drm,
+ "attempting to use a userptr for a framebuffer, denied\n");
return -EINVAL;
}
@@ -16920,14 +17150,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (tiling != I915_TILING_NONE &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode doesn't match fb modifier\n");
goto err;
}
} else {
if (tiling == I915_TILING_X) {
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
} else if (tiling == I915_TILING_Y) {
- DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No Y tiling for legacy addfb\n");
goto err;
}
}
@@ -16937,10 +17169,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0])) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name),
- mode_cmd->modifier[0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->modifier[0]);
goto err;
}
@@ -16950,17 +17183,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (INTEL_GEN(dev_priv) < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
goto err;
}
max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > max_stride) {
- DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
- "tiled" : "linear",
- mode_cmd->pitches[0], max_stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s pitch (%u) must be at most %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+ "tiled" : "linear",
+ mode_cmd->pitches[0], max_stride);
goto err;
}
@@ -16969,15 +17204,17 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* the fb pitch and fence stride match.
*/
if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
goto err;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0) {
- DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
- mode_cmd->offsets[0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
goto err;
}
@@ -16987,14 +17224,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
u32 stride_alignment;
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
- DRM_DEBUG_KMS("bad plane %d handle\n", i);
+ drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
+ i);
goto err;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
if (fb->pitches[i] & (stride_alignment - 1)) {
- DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
- i, fb->pitches[i], stride_alignment);
+ drm_dbg_kms(&dev_priv->drm,
+ "plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
goto err;
}
@@ -17002,9 +17241,10 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
- DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
- i,
- fb->pitches[i], ccs_aux_stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
goto err;
}
}
@@ -17018,7 +17258,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
+ drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
goto err;
}
@@ -17048,17 +17288,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
-static void intel_atomic_state_free(struct drm_atomic_state *state)
-{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-
- drm_atomic_state_default_release(state);
-
- i915_sw_fence_fini(&intel_state->commit_ready);
-
- kfree(state);
-}
-
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -17290,9 +17519,36 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
void intel_modeset_init_hw(struct drm_i915_private *i915)
{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->cdclk.obj.state);
+
intel_update_cdclk(i915);
- intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
- i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
+ intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+}
+
+static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, state->dev) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
}
/*
@@ -17305,9 +17561,8 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-static void sanitize_watermarks(struct drm_device *dev)
+static void sanitize_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
struct intel_crtc *crtc;
@@ -17320,26 +17575,17 @@ static void sanitize_watermarks(struct drm_device *dev)
if (!dev_priv->display.optimize_watermarks)
return;
- /*
- * We need to hold connection_mutex before calling duplicate_state so
- * that the connector loop is protected.
- */
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- } else if (WARN_ON(ret)) {
- goto fail;
- }
-
- state = drm_atomic_helper_duplicate_state(dev, &ctx);
- if (WARN_ON(IS_ERR(state)))
- goto fail;
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
intel_state = to_intel_atomic_state(state);
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
@@ -17348,22 +17594,13 @@ retry:
if (!HAS_GMCH(dev_priv))
intel_state->skip_intermediate_wm = true;
- ret = intel_atomic_check(dev, state);
- if (ret) {
- /*
- * If we fail here, it means that the hardware appears to be
- * programmed in a way that shouldn't be possible, given our
- * understanding of watermark requirements. This might mean a
- * mistake in the hardware readout code or a mistake in the
- * watermark calculations for a given platform. Raise a WARN
- * so that this is noticeable.
- *
- * If this actually happens, we'll have to just leave the
- * BIOS-programmed watermarks untouched and hope for the best.
- */
- WARN(true, "Could not determine valid watermarks for inherited state\n");
- goto put_state;
- }
+ ret = sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
@@ -17373,9 +17610,29 @@ retry:
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
-put_state:
- drm_atomic_state_put(state);
fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
@@ -17384,7 +17641,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
{
if (IS_GEN(dev_priv, 5)) {
u32 fdi_pll_clk =
- I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+ intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
@@ -17393,7 +17650,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
return;
}
- DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+ drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
}
static int intel_initial_commit(struct drm_device *dev)
@@ -17476,6 +17733,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
struct drm_mode_config *mode_config = &i915->drm.mode_config;
drm_mode_config_init(&i915->drm);
+ INIT_LIST_HEAD(&i915->global_obj_list);
mode_config->min_width = 0;
mode_config->min_height = 0;
@@ -17517,11 +17775,31 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
}
}
-int intel_modeset_init(struct drm_i915_private *i915)
+static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+{
+ intel_atomic_global_obj_cleanup(i915);
+ drm_mode_config_cleanup(&i915->drm);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+
+ if (plane_config->vma)
+ i915_vma_put(plane_config->vma);
+}
+
+/* part #1: call before irq install */
+int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
- struct drm_device *dev = &i915->drm;
- enum pipe pipe;
- struct intel_crtc *crtc;
int ret;
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
@@ -17530,6 +17808,10 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_mode_config_init(i915);
+ ret = intel_cdclk_init(i915);
+ if (ret)
+ return ret;
+
ret = intel_bw_init(i915);
if (ret)
return ret;
@@ -17542,26 +17824,38 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_fbc_init(i915);
+ return 0;
+}
+
+/* part #2: call after irq install */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
+
intel_init_pm(i915);
intel_panel_sanitize_ssc(i915);
intel_gmbus_setup(i915);
- DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_NUM_PIPES(i915),
- INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+ drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
for_each_pipe(i915, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret) {
- drm_mode_config_cleanup(dev);
+ intel_mode_config_cleanup(i915);
return ret;
}
}
}
+ intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
intel_update_fdi_pll_freq(i915);
@@ -17601,6 +17895,8 @@ int intel_modeset_init(struct drm_i915_private *i915)
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
}
/*
@@ -17609,7 +17905,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(i915))
- sanitize_watermarks(dev);
+ sanitize_watermarks(i915);
/*
* Force all active planes to recompute their states. So that on
@@ -17619,7 +17915,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
*/
ret = intel_initial_commit(dev);
if (ret)
- DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+ drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
return 0;
}
@@ -17638,10 +17934,12 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 dpll, fp;
int i;
- WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
+ drm_WARN_ON(&dev_priv->drm,
+ i9xx_calc_dpll_params(48000, &clock) != 25154);
- DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
- pipe_name(pipe), clock.vco, clock.dot);
+ drm_dbg_kms(&dev_priv->drm,
+ "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
+ pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
dpll = DPLL_DVO_2X_MODE |
@@ -17651,27 +17949,27 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- I915_WRITE(FP0(pipe), fp);
- I915_WRITE(FP1(pipe), fp);
+ intel_de_write(dev_priv, FP0(pipe), fp);
+ intel_de_write(dev_priv, FP1(pipe), fp);
- I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
- I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
- I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
- I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
- I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
- I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
- I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
+ intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
+ intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
- I915_WRITE(DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -17679,17 +17977,18 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
*
* So write it again.
*/
- I915_WRITE(DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* We do this three times for luck */
for (i = 0; i < 3 ; i++) {
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
- I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
- POSTING_READ(PIPECONF(pipe));
+ intel_de_write(dev_priv, PIPECONF(pipe),
+ PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -17698,22 +17997,30 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
- pipe_name(pipe));
-
- WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
- WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
-
- I915_WRITE(PIPECONF(pipe), 0);
- POSTING_READ(PIPECONF(pipe));
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
+ pipe_name(pipe));
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
+
+ intel_de_write(dev_priv, PIPECONF(pipe), 0);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
- I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void
@@ -17736,8 +18043,9 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
if (pipe == crtc->pipe)
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
- plane->base.base.id, plane->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
@@ -17787,18 +18095,18 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
if (transcoder_is_dsi(cpu_transcoder))
return;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
val |= HSW_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
} else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~PIPECONF_FRAME_START_DELAY_MASK;
val |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
if (!crtc_state->has_pch_encoder)
@@ -17808,19 +18116,19 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
} else {
enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -17852,9 +18160,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
* gamma and CSC to match how we program our planes.
*/
if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE |
- SKL_BOTTOM_COLOR_CSC_ENABLE);
+ intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
+ SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
}
/* Adjust the state of the output pipe according to whether we
@@ -17926,16 +18233,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+ pipe_name(crtc->pipe));
has_active_crtc = false;
}
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ encoder->base.name);
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
@@ -17943,9 +18252,10 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (crtc_state) {
struct drm_encoder *best_encoder;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
/* avoid oopsing in case the hooks consult best_encoder */
best_encoder = connector->base.state->best_encoder;
@@ -17998,9 +18308,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
intel_set_plane_visible(crtc_state, plane_state, visible);
- DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
- plane->base.base.id, plane->base.name,
- enableddisabled(visible), pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ enableddisabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -18014,14 +18325,14 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(dev_priv->cdclk.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- int i;
-
- dev_priv->active_pipes = 0;
+ u8 active_pipes = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -18038,41 +18349,19 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = crtc_state->hw.active;
if (crtc_state->hw.active)
- dev_priv->active_pipes |= BIT(crtc->pipe);
+ active_pipes |= BIT(crtc->pipe);
- DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
- crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc_state->hw.active));
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ enableddisabled(crtc_state->hw.active));
}
- readout_plane_state(dev_priv);
-
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
- pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
- &pll->state.hw_state);
-
- if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(dev_priv,
- POWER_DOMAIN_DPLL_DC_OFF);
- }
-
- pll->state.crtc_mask = 0;
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (crtc_state->hw.active &&
- crtc_state->shared_dpll == pll)
- pll->state.crtc_mask |= 1 << crtc->pipe;
- }
- pll->active_mask = pll->state.crtc_mask;
+ readout_plane_state(dev_priv);
- DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->info->name, pll->state.crtc_mask, pll->on);
- }
+ intel_dpll_readout_hw_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
pipe = 0;
@@ -18089,10 +18378,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
- DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- enableddisabled(encoder->base.crtc),
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
+ pipe_name(pipe));
}
drm_connector_list_iter_begin(dev, &conn_iter);
@@ -18103,7 +18393,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.dpms = DRM_MODE_DPMS_ON;
- encoder = connector->encoder;
+ encoder = intel_attached_encoder(connector);
connector->base.encoder = &encoder->base;
crtc = to_intel_crtc(encoder->base.crtc);
@@ -18124,9 +18414,10 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id, connector->base.name,
- enableddisabled(connector->base.encoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
@@ -18190,19 +18481,20 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
}
if (crtc_state->hw.active) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
+ if (drm_WARN_ON(dev, min_cdclk < 0))
min_cdclk = 0;
}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
- dev_priv->min_voltage_level[crtc->pipe] =
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
intel_bw_crtc_update(bw_state, crtc_state);
@@ -18241,53 +18533,55 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
* Also known as Wa_14010480278.
*/
if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
- DARBF_GATING_DIS);
+ intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
+ intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
if (IS_HASWELL(dev_priv)) {
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ intel_de_write(dev_priv, CHICKEN_PAR1_1,
+ intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
}
}
static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = I915_READ(hdmi_reg);
+ u32 val = intel_de_read(dev_priv, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- I915_WRITE(hdmi_reg, val);
+ intel_de_write(dev_priv, hdmi_reg, val);
}
static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t dp_reg)
{
- u32 val = I915_READ(dp_reg);
+ u32 val = intel_de_read(dev_priv, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for DP %c\n",
+ port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- I915_WRITE(dp_reg, val);
+ intel_de_write(dev_priv, dp_reg, val);
}
static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
@@ -18324,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
- int i;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
@@ -18377,18 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_update_connector_atomic_state(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- if (!pll->on || pll->active_mask)
- continue;
-
- DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
- pll->info->name);
-
- pll->info->funcs->disable(dev_priv, pll);
- pll->on = false;
- }
+ intel_dpll_sanitize_state(dev_priv);
if (IS_G4X(dev_priv)) {
g4x_wm_get_hw_state(dev_priv);
@@ -18408,7 +18690,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
u64 put_domains;
put_domains = modeset_get_crtc_power_domains(crtc_state);
- if (WARN_ON(put_domains))
+ if (drm_WARN_ON(dev, put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -18444,7 +18726,8 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
}
@@ -18467,21 +18750,19 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
+/* part #1: call before irq uninstall */
void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
flush_workqueue(i915->flip_wq);
flush_workqueue(i915->modeset_wq);
flush_work(&i915->atomic_helper.free_work);
- WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
-
- /*
- * Interrupts and polling as the first thing to avoid creating havoc.
- * Too much stuff here (turning of connectors, ...) would
- * experience fancy races otherwise.
- */
- intel_irq_uninstall(i915);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
+}
+/* part #2: call after irq uninstall */
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
+{
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -18507,14 +18788,12 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
intel_hdcp_component_fini(i915);
- drm_mode_config_cleanup(&i915->drm);
+ intel_mode_config_cleanup(i915);
intel_overlay_cleanup(i915);
intel_gmbus_teardown(i915);
- intel_bw_cleanup(i915);
-
destroy_workqueue(i915->flip_wq);
destroy_workqueue(i915->modeset_wq);
@@ -18523,6 +18802,15 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+static bool
+has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+{
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return HAS_TRANSCODER_EDP(dev_priv);
+ else
+ return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
+}
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -18589,7 +18877,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
+ error->power_well_driver = intel_de_read(dev_priv,
+ HSW_PWR_WELL_CTL2);
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
@@ -18598,33 +18887,39 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
if (!error->pipe[i].power_domain_on)
continue;
- error->cursor[i].control = I915_READ(CURCNTR(i));
- error->cursor[i].position = I915_READ(CURPOS(i));
- error->cursor[i].base = I915_READ(CURBASE(i));
+ error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
+ error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
+ error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
- error->plane[i].control = I915_READ(DSPCNTR(i));
- error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
+ error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
if (INTEL_GEN(dev_priv) <= 3) {
- error->plane[i].size = I915_READ(DSPSIZE(i));
- error->plane[i].pos = I915_READ(DSPPOS(i));
+ error->plane[i].size = intel_de_read(dev_priv,
+ DSPSIZE(i));
+ error->plane[i].pos = intel_de_read(dev_priv,
+ DSPPOS(i));
}
if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
- error->plane[i].addr = I915_READ(DSPADDR(i));
+ error->plane[i].addr = intel_de_read(dev_priv,
+ DSPADDR(i));
if (INTEL_GEN(dev_priv) >= 4) {
- error->plane[i].surface = I915_READ(DSPSURF(i));
- error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ error->plane[i].surface = intel_de_read(dev_priv,
+ DSPSURF(i));
+ error->plane[i].tile_offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i));
}
- error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
if (HAS_GMCH(dev_priv))
- error->pipe[i].stat = I915_READ(PIPESTAT(i));
+ error->pipe[i].stat = intel_de_read(dev_priv,
+ PIPESTAT(i));
}
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ if (!has_transcoder(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
@@ -18636,13 +18931,20 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->transcoder[i].cpu_transcoder = cpu_transcoder;
- error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
- error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ error->transcoder[i].conf = intel_de_read(dev_priv,
+ PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = intel_de_read(dev_priv,
+ HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = intel_de_read(dev_priv,
+ HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = intel_de_read(dev_priv,
+ HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = intel_de_read(dev_priv,
+ VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = intel_de_read(dev_priv,
+ VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = intel_de_read(dev_priv,
+ VSYNC(cpu_transcoder));
}
return error;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 028aab728514..adb1225a3480 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -26,7 +26,6 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
-#include <drm/i915_drm.h>
enum link_m_n_set;
struct dpll;
@@ -40,12 +39,15 @@ struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
struct i915_ggtt_view;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
@@ -54,7 +56,6 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
-struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -312,10 +313,11 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
+ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
+ for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
+ for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -469,6 +471,8 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
@@ -486,6 +490,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state);
void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -495,6 +500,7 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
@@ -520,6 +526,7 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
@@ -608,8 +615,10 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init_noirq(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
void intel_modeset_driver_remove(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
new file mode 100644
index 000000000000..1e6eb7f2f72d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -0,0 +1,2134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_debugfs.h"
+#include "intel_csr.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_fbc.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sideband.h"
+
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+ return to_i915(node->minor->dev);
+}
+
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->fb_tracking.flip_bits);
+
+ return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&fbc->lock);
+
+ if (intel_fbc_is_active(dev_priv))
+ seq_puts(m, "FBC enabled\n");
+ else
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (intel_fbc_is_active(dev_priv)) {
+ u32 mask;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
+ else if (IS_G4X(dev_priv))
+ mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+ else
+ mask = intel_de_read(dev_priv, FBC_STATUS) &
+ (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
+
+ seq_printf(m, "Compressing: %s\n", yesno(mask));
+ }
+
+ mutex_unlock(&fbc->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_fbc_false_color_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ *val = dev_priv->fbc.false_color;
+
+ return 0;
+}
+
+static int i915_fbc_false_color_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ u32 reg;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ mutex_lock(&dev_priv->fbc.lock);
+
+ reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
+ dev_priv->fbc.false_color = val;
+
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL,
+ val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
+
+ mutex_unlock(&dev_priv->fbc.lock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
+ i915_fbc_false_color_get, i915_fbc_false_color_set,
+ "%llu\n");
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915_modparams.enable_ips));
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ bool sr_enabled = false;
+
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ /* no global SR status; inspect per-plane WM */;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
+
+ return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ return 0;
+}
+
+static int i915_vbt(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->vbt)
+ seq_write(m, opregion->vbt, opregion->vbt_size);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_framebuffer *fbdev_fb = NULL;
+ struct drm_framebuffer *drm_fb;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
+ fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.format->depth,
+ fbdev_fb->base.format->cpp[0] * 8,
+ fbdev_fb->base.modifier,
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
+ seq_putc(m, '\n');
+ }
+#endif
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, dev) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.format->depth,
+ fb->base.format->cpp[0] * 8,
+ fb->base.modifier,
+ drm_framebuffer_read_refcount(&fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
+ seq_putc(m, '\n');
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ u8 val;
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+ int ret;
+
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, status_val;
+ const char *status = "unknown";
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_psr *psr = &dev_priv->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+ if (psr->dp)
+ seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ enableddisabled(enabled), val);
+ psr_source_status(dev_priv, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ intel_wakeref_t wakeref;
+ int ret;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ ret = intel_psr_debug_set(dev_priv, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ *val = READ_ONCE(dev_priv->psr.debug);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->desc->name,
+ power_well->count);
+
+ for_each_power_domain(power_domain, power_well->desc->domains)
+ seq_printf(m, " %-23s %d\n",
+ intel_display_power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ return 0;
+}
+
+static int i915_dmc_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ struct intel_csr *csr;
+ i915_reg_t dc5_reg, dc6_reg = {};
+
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
+
+ csr = &dev_priv->csr;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
+ seq_printf(m, "path: %s\n", csr->fw_path);
+
+ if (!csr->dmc_payload)
+ goto out;
+
+ seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
+ CSR_VERSION_MINOR(csr->version));
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n",
+ intel_de_read(dev_priv, DMC_DEBUG3));
+ } else {
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT;
+ if (!IS_GEN9_LP(dev_priv))
+ dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ }
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n",
+ intel_de_read(dev_priv, dc5_reg));
+ if (dc6_reg.reg)
+ seq_printf(m, "DC5 -> DC6 count: %d\n",
+ intel_de_read(dev_priv, dc6_reg));
+
+out:
+ seq_printf(m, "program base: 0x%08x\n",
+ intel_de_read(dev_priv, CSR_PROGRAM(0)));
+ seq_printf(m, "ssp base: 0x%08x\n",
+ intel_de_read(dev_priv, CSR_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+ const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ seq_putc(m, '\t');
+
+ seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_encoder_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+ const struct drm_display_mode *mode = panel->fixed_mode;
+
+ seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool hdcp_cap, hdcp2_cap;
+
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+ seq_puts(m, "\n");
+}
+
+static void intel_dp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+
+ seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
+ if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ intel_panel_info(m, &intel_connector->panel);
+
+ drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+ &intel_dp->aux);
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_dp_mst_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp_mst_encoder *intel_mst =
+ enc_to_mst(intel_encoder);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+ intel_connector->port);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_lvds_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
+
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(connector->status));
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
+
+ if (!encoder)
+ return;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ intel_dp_mst_info(m, intel_connector);
+ else
+ intel_dp_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
+ intel_hdmi_info(m, intel_connector);
+ break;
+ default:
+ break;
+ }
+
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static const char *plane_type(enum drm_plane_type type)
+{
+ switch (type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ return "OVL";
+ case DRM_PLANE_TYPE_PRIMARY:
+ return "PRI";
+ case DRM_PLANE_TYPE_CURSOR:
+ return "CUR";
+ /*
+ * Deliberately omitting default: to generate compiler warnings
+ * when a new drm_plane_type gets added.
+ */
+ }
+
+ return "unknown";
+}
+
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
+{
+ /*
+ * According to doc only one DRM_MODE_ROTATE_ is allowed but this
+ * will print them all to visualize if the values are misused
+ */
+ snprintf(buf, bufsize,
+ "%s%s%s%s%s%s(0x%08x)",
+ (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
+ (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
+ (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
+ (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
+ (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
+ (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
+ rotation);
+}
+
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_format_name_buf format_name;
+ struct drm_rect src, dst;
+ char rot_str[48];
+
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
+
+ if (fb)
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
+
+ seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+ fb ? fb->width : 0, fb ? fb->height : 0,
+ DRM_RECT_FP_ARG(&src),
+ DRM_RECT_ARG(&dst),
+ rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_format_name_buf format_name;
+ char rot_str[48];
+
+ if (!fb)
+ return;
+
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, format_name.str,
+ fb->width, fb->height,
+ yesno(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
+ }
+}
+
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
+ int i;
+
+ /* Not all platformas have a scaler */
+ if (num_scalers) {
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+ num_scalers,
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id);
+
+ for (i = 0; i < num_scalers; i++) {
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
+
+ seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
+ i, yesno(sc->in_use), sc->mode);
+ }
+ seq_puts(m, "\n");
+ } else {
+ seq_puts(m, "\tNo scalers available on this platform\n");
+ }
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->uapi.enable),
+ yesno(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ if (crtc_state->hw.enable) {
+ seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->hw.active),
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+ }
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "CRTC info\n");
+ seq_printf(m, "---------\n");
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
+
+ seq_printf(m, "\n");
+ seq_printf(m, "Connector info\n");
+ seq_printf(m, "--------------\n");
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
+ intel_connector_info(m, connector);
+ drm_connector_list_iter_end(&conn_iter);
+
+ drm_modeset_unlock_all(dev);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ int i;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ dev_priv->dpll.ref_clks.nssc,
+ dev_priv->dpll.ref_clks.ssc);
+
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+ pll->info->id);
+ seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+ pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->state.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
+ seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
+ seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
+ pll->state.hw_state.mg_refclkin_ctl);
+ seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_coreclkctl1);
+ seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_hsclkctl);
+ seq_printf(m, " mg_pll_div0: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div0);
+ seq_printf(m, " mg_pll_div1: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div1);
+ seq_printf(m, " mg_pll_lf: 0x%08x\n",
+ pll->state.hw_state.mg_pll_lf);
+ seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+ pll->state.hw_state.mg_pll_frac_lock);
+ seq_printf(m, " mg_pll_ssc: 0x%08x\n",
+ pll->state.hw_state.mg_pll_ssc);
+ seq_printf(m, " mg_pll_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_bias);
+ seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int i915_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ yesno(dev_priv->ipc_enabled));
+ return 0;
+}
+
+static int i915_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (!HAS_IPC(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, i915_ipc_status_show, dev_priv);
+}
+
+static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ if (!dev_priv->ipc_enabled && enable)
+ drm_info(&dev_priv->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ }
+
+ return len;
+}
+
+static const struct file_operations i915_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_ipc_status_write
+};
+
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct skl_ddb_entry *entry;
+ struct intel_crtc *crtc;
+
+ if (INTEL_GEN(dev_priv) < 9)
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
+ entry->start, entry->end,
+ skl_ddb_entry_size(entry));
+ }
+
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
+ entry->end, skl_ddb_entry_size(entry));
+ }
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static void drrs_status_per_crtc(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_drrs *drrs = &dev_priv->drrs;
+ int vrefresh = 0;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->state->crtc != &intel_crtc->base)
+ continue;
+
+ seq_printf(m, "%s:\n", connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ seq_puts(m, "\n");
+
+ if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
+ struct intel_panel *panel;
+
+ mutex_lock(&drrs->mutex);
+ /* DRRS Supported */
+ seq_puts(m, "\tDRRS Supported: Yes\n");
+
+ /* disable_drrs() will make drrs->dp NULL */
+ if (!drrs->dp) {
+ seq_puts(m, "Idleness DRRS: Disabled\n");
+ if (dev_priv->psr.enabled)
+ seq_puts(m,
+ "\tAs PSR is enabled, DRRS is not enabled\n");
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+
+ panel = &drrs->dp->attached_connector->panel;
+ seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+ drrs->busy_frontbuffer_bits);
+
+ seq_puts(m, "\n\t\t");
+ if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+ seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+ vrefresh = panel->fixed_mode->vrefresh;
+ } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+ seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+ vrefresh = panel->downclock_mode->vrefresh;
+ } else {
+ seq_printf(m, "DRRS_State: Unknown(%d)\n",
+ drrs->refresh_rate_type);
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+ seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+ seq_puts(m, "\n\t\t");
+ mutex_unlock(&drrs->mutex);
+ } else {
+ /* DRRS not supported. Print the VBT parameter*/
+ seq_puts(m, "\tDRRS Supported : No");
+ }
+ seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *intel_crtc;
+ int active_crtc_cnt = 0;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_crtc(dev, intel_crtc) {
+ if (intel_crtc->base.state->active) {
+ active_crtc_cnt++;
+ seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+
+ drrs_status_per_crtc(m, dev, intel_crtc);
+ }
+ }
+ drm_modeset_unlock_all(dev);
+
+ if (!active_crtc_cnt)
+ seq_puts(m, "No active crtc found\n");
+
+ return 0;
+}
+
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ intel_encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ intel_dig_port = enc_to_dig_port(intel_encoder);
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
+ drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static ssize_t i915_displayport_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+ int val = 0;
+
+ dev = ((struct seq_file *)file->private_data)->private;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
+
+ drm_dbg(&to_i915(dev)->drm,
+ "Copied %d bytes from user\n", (unsigned int)len);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ break;
+ drm_dbg(&to_i915(dev)->drm,
+ "Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ intel_dp->compliance.test_active = true;
+ else
+ intel_dp->compliance.test_active = false;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_active)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_displayport_test_active_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_EDID_READ)
+ seq_printf(m, "%lx",
+ intel_dp->compliance.test_data.edid);
+ else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_VIDEO_PATTERN) {
+ seq_printf(m, "hdisplay: %d\n",
+ intel_dp->compliance.test_data.hdisplay);
+ seq_printf(m, "vdisplay: %d\n",
+ intel_dp->compliance.test_data.vdisplay);
+ seq_printf(m, "bpc: %u\n",
+ intel_dp->compliance.test_data.bpc);
+ }
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ int level;
+ int num_levels;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ u16 new[8] = { 0 };
+ int num_levels;
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(dev);
+
+ return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ intel_synchronize_irq(dev_priv);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->hotplug.hotplug_work);
+
+ seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+ seq_printf(m, "Detected: %s\n",
+ yesno(delayed_work_pending(&hotplug->reenable_work)));
+
+ return 0;
+}
+
+static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ unsigned int new_threshold;
+ int i;
+ char *newline;
+ char tmp[16];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ if (strcmp(tmp, "reset") == 0)
+ new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+ return -EINVAL;
+
+ if (new_threshold > 0)
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting HPD storm detection threshold to %d\n",
+ new_threshold);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_storm_threshold = new_threshold;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+}
+
+static const struct file_operations i915_hpd_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_storm_ctl_write
+};
+
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
+static int i915_drrs_ctl_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ if (INTEL_GEN(dev_priv) < 7)
+ return -ENODEV;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct drm_connector_list_iter conn_iter;
+ struct intel_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->hw.active ||
+ !crtc_state->has_drrs)
+ goto out;
+
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
+
+ if (!(crtc_state->uapi.connector_mask &
+ drm_connector_mask(connector)))
+ continue;
+
+ encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ drm_dbg(&dev_priv->drm,
+ "Manually %sabling DRRS. %llu\n",
+ val ? "en" : "dis", val);
+
+ intel_dp = enc_to_intel_dp(encoder);
+ if (val)
+ intel_edp_drrs_enable(intel_dp,
+ crtc_state);
+ else
+ intel_edp_drrs_disable(intel_dp,
+ crtc_state);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
+
+static ssize_t
+i915_fifo_underrun_reset_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct drm_i915_private *dev_priv = filp->private_data;
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = &dev_priv->drm;
+ int ret;
+ bool reset;
+
+ ret = kstrtobool_from_user(ubuf, cnt, &reset);
+ if (ret)
+ return ret;
+
+ if (!reset)
+ return cnt;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ struct drm_crtc_commit *commit;
+ struct intel_crtc_state *crtc_state;
+
+ ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(intel_crtc->base.state);
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (!ret)
+ ret = wait_for_completion_interruptible(&commit->flip_done);
+ }
+
+ if (!ret && crtc_state->hw.active) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Re-arming FIFO underruns on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+
+ intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_fbc_reset_underrun(dev_priv);
+ if (ret)
+ return ret;
+
+ return cnt;
+}
+
+static const struct file_operations i915_fifo_underrun_reset_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = i915_fifo_underrun_reset_write,
+ .llseek = default_llseek,
+};
+
+static const struct drm_info_list intel_display_debugfs_list[] = {
+ {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_ips_status", i915_ips_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_vbt", i915_vbt, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_power_domain_info", i915_power_domain_info, 0},
+ {"i915_dmc_info", i915_dmc_info, 0},
+ {"i915_display_info", i915_display_info, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
+ {"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
+};
+
+static const struct {
+ const char *name;
+ const struct file_operations *fops;
+} intel_display_debugfs_files[] = {
+ {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
+ {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+ {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+ {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+ {"i915_fbc_false_color", &i915_fbc_false_color_fops},
+ {"i915_dp_test_data", &i915_displayport_test_data_fops},
+ {"i915_dp_test_type", &i915_displayport_test_type_fops},
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
+ {"i915_ipc_status", &i915_ipc_status_fops},
+ {"i915_drrs_ctl", &i915_drrs_ctl_fops},
+ {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
+};
+
+int intel_display_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
+ debugfs_create_file(intel_display_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ to_i915(minor->dev),
+ intel_display_debugfs_files[i].fops);
+ }
+
+ return drm_debugfs_create_files(intel_display_debugfs_list,
+ ARRAY_SIZE(intel_display_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->backlight_off_delay);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_panel);
+
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ /* HDCP is supported by connector */
+ if (!intel_connector->hdcp.shim)
+ return -EINVAL;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name,
+ connector->base.id);
+ intel_hdcp_info(m, intel_connector);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ struct drm_modeset_acquire_ctx ctx;
+ struct intel_crtc_state *crtc_state = NULL;
+ int ret = 0;
+ bool try_again = false;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+ do {
+ try_again = false;
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &ctx);
+ if (ret) {
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
+ break;
+ }
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ break;
+ }
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret) {
+ try_again = true;
+ continue;
+ }
+ break;
+ } else if (ret) {
+ break;
+ }
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "DSC_Enabled: %s\n",
+ yesno(crtc_state->dsc.compression_enable));
+ seq_printf(m, "DSC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "Force_DSC_Enable: %s\n",
+ yesno(intel_dp->force_dsc_en));
+ if (!intel_dp_is_edp(intel_dp))
+ seq_printf(m, "FEC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+ } while (try_again);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ bool dsc_enable = false;
+ int ret;
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (len == 0)
+ return 0;
+
+ drm_dbg(&i915->drm,
+ "Copied %zu bytes from user to force DSC\n", len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
+ (dsc_enable) ? "true" : "false");
+ intel_dp->force_dsc_en = dsc_enable;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fec_support_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fec_support_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fec_support_write
+};
+
+/**
+ * intel_connector_debugfs_add - add i915 specific connector debugfs files
+ * @connector: pointer to a registered drm_connector
+ *
+ * Cleanup will be done by drm_connector_unregister() through a call to
+ * drm_debugfs_connector_remove().
+ *
+ * Returns 0 on success, negative error codes on error.
+ */
+int intel_connector_debugfs_add(struct drm_connector *connector)
+{
+ struct dentry *root = connector->debugfs_entry;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+ /* The connector must have been registered beforehands. */
+ if (!root)
+ return -ENODEV;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ connector, &i915_hdcp_sink_capability_fops);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+ debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+ connector, &i915_dsc_fec_support_fops);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
new file mode 100644
index 000000000000..a3bea1ce04c2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_H__
+#define __INTEL_DISPLAY_DEBUGFS_H__
+
+struct drm_connector;
+struct drm_i915_private;
+
+#ifdef CONFIG_DEBUG_FS
+int intel_display_debugfs_register(struct drm_i915_private *i915);
+int intel_connector_debugfs_add(struct drm_connector *connector);
+#else
+static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+#endif
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 46c40db992dd..84ecf8e58523 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -15,6 +15,7 @@
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
+#include "intel_pm.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
@@ -159,7 +160,7 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
power_well->desc->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
@@ -167,7 +168,7 @@ static void intel_power_well_enable(struct drm_i915_private *dev_priv,
static void intel_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
power_well->hw_enabled = false;
power_well->desc->ops->disable(dev_priv, power_well);
}
@@ -182,8 +183,9 @@ static void intel_power_well_get(struct drm_i915_private *dev_priv,
static void intel_power_well_put(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN(!power_well->count, "Use count on power well %s is already zero",
- power_well->desc->name);
+ drm_WARN(&dev_priv->drm, !power_well->count,
+ "Use count on power well %s is already zero",
+ power_well->desc->name);
if (!--power_well->count)
intel_power_well_disable(dev_priv, power_well);
@@ -289,11 +291,11 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
if (intel_de_wait_for_set(dev_priv, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
- DRM_DEBUG_KMS("%s power well enable timeout\n",
- power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ power_well->desc->name);
/* An AUX timeout is expected if the TBT DP tunnel is down. */
- WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+ drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
}
}
@@ -304,11 +306,11 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
u32 ret;
- ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
- ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+ ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
if (regs->kvmr.reg)
- ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
- ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
+ ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
return ret;
}
@@ -330,23 +332,25 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(I915_READ(regs->driver) &
+ wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
(reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
if (disabled)
return;
- DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- power_well->desc->name,
- !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+ drm_dbg_kms(&dev_priv->drm,
+ "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ power_well->desc->name,
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -372,17 +376,18 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well);
/* Display WA #1178: cnl */
if (IS_CANNONLAKE(dev_priv) &&
pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
- val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
+ val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
+ intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
}
if (wait_fuses)
@@ -403,8 +408,9 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_power_well_pre_disable(dev_priv,
power_well->desc->hsw.irq_pipe_mask);
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -419,14 +425,16 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
if (INTEL_GEN(dev_priv) < 12) {
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val | ICL_LANE_ENABLE_AUX);
}
hsw_wait_for_power_well_enable(dev_priv, power_well);
@@ -434,9 +442,9 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
!intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
}
}
@@ -449,13 +457,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val & ~ICL_LANE_ENABLE_AUX);
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -485,7 +495,7 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
int refs = hweight64(power_well->desc->domains &
async_put_domains_mask(&dev_priv->power_domains));
- WARN_ON(refs > power_well->count);
+ drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
return refs;
}
@@ -515,7 +525,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
continue;
dig_port = enc_to_dig_port(encoder);
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
continue;
if (dig_port->aux_ch != aux_ch) {
@@ -526,10 +536,10 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
break;
}
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- WARN_ON(!intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
}
#else
@@ -552,11 +562,11 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
icl_tc_port_assert_ref_held(dev_priv, power_well);
- val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
if (power_well->desc->hsw.is_tc_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
- I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+ intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
@@ -564,11 +574,13 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
DKL_CMN_UC_DW27_UC_HEALTH, 1))
- DRM_WARN("Timeout waiting TC uC health\n");
+ drm_warn(&dev_priv->drm,
+ "Timeout waiting TC uC health\n");
}
}
@@ -596,7 +608,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
HSW_PWR_WELL_CTL_STATE(pw_idx);
u32 val;
- val = I915_READ(regs->driver);
+ val = intel_de_read(dev_priv, regs->driver);
/*
* On GEN9 big core due to a DMC bug the driver's request bits for PW1
@@ -606,22 +618,26 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
*/
if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= I915_READ(regs->bios);
+ val |= intel_de_read(dev_priv, regs->bios);
return (val & mask) == mask;
}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
/*
* TODO: check for the following to verify the conditions to enter DC9
@@ -634,10 +650,12 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
/*
* TODO: check for the following to verify DC9 state was indeed
@@ -655,7 +673,7 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
int rereads = 0;
u32 v;
- I915_WRITE(DC_STATE_EN, state);
+ intel_de_write(dev_priv, DC_STATE_EN, state);
/* It has been observed that disabling the dc6 state sometimes
* doesn't stick and dmc keeps returning old value. Make sure
@@ -663,10 +681,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
* we are confident that state is exactly what we want.
*/
do {
- v = I915_READ(DC_STATE_EN);
+ v = intel_de_read(dev_priv, DC_STATE_EN);
if (v != state) {
- I915_WRITE(DC_STATE_EN, state);
+ intel_de_write(dev_priv, DC_STATE_EN, state);
rewrites++;
rereads = 0;
} else if (rereads++ > 5) {
@@ -676,13 +694,15 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
} while (rewrites < 100);
if (v != state)
- DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
- state, v);
+ drm_err(&dev_priv->drm,
+ "Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
/* Most of the times we need one retry, avoid spam */
if (rewrites > 1)
- DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
- state, rewrites);
+ drm_dbg_kms(&dev_priv->drm,
+ "Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
}
static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
@@ -708,10 +728,11 @@ static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+ val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
- dev_priv->csr.dc_state, val);
+ drm_dbg_kms(&dev_priv->drm,
+ "Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->csr.dc_state, val);
dev_priv->csr.dc_state = val;
}
@@ -743,18 +764,19 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
u32 val;
u32 mask;
- if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
- val = I915_READ(DC_STATE_EN);
+ val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
- val & mask, state);
+ drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+ val & mask, state);
/* Check if DMC is ignoring our DC state requests */
if ((val & mask) != dev_priv->csr.dc_state)
- DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->csr.dc_state, val & mask);
+ drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
val &= ~mask;
val |= state;
@@ -791,7 +813,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_KMS("Enabling DC3CO\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}
@@ -799,10 +821,10 @@ static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Disabling DC3CO\n");
- val = I915_READ(DC_STATE_EN);
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
+ val = intel_de_read(dev_priv, DC_STATE_EN);
val &= ~DC_STATE_DC3CO_STATUS;
- I915_WRITE(DC_STATE_EN, val);
+ intel_de_write(dev_priv, DC_STATE_EN, val);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
@@ -814,7 +836,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
- DRM_DEBUG_KMS("Enabling DC9\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
/*
* Power sequencer reset is not needed on
* platforms with South Display Engine on PCH,
@@ -829,7 +851,7 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc9(dev_priv);
- DRM_DEBUG_KMS("Disabling DC9\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -838,10 +860,13 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
- WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ !intel_de_read(dev_priv, CSR_PROGRAM(0)),
+ "CSR program storage start is NULL\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
+ "CSR SSP Base Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
+ "CSR HTP Not fine\n");
}
static struct i915_power_well *
@@ -861,7 +886,9 @@ lookup_power_well(struct drm_i915_private *dev_priv,
* the first power well and hope the WARN gets reported so we can fix
* our driver.
*/
- WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+ drm_WARN(&dev_priv->drm, 1,
+ "Power well %d not defined for this platform\n",
+ power_well_id);
return &dev_priv->power_domains.power_wells[0];
}
@@ -884,7 +911,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
- if (WARN_ON(!power_well))
+ if (drm_WARN_ON(&dev_priv->drm, !power_well))
goto unlock;
state = sanitize_target_dc_state(dev_priv, state);
@@ -912,13 +939,22 @@ unlock:
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
- bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
- SKL_DISP_PW_2);
+ enum i915_power_well_id high_pg;
- WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (INTEL_GEN(dev_priv) >= 12)
+ high_pg = TGL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
assert_csr_loaded(dev_priv);
@@ -928,22 +964,25 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
- DRM_DEBUG_KMS("Enabling DC5\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
/* Wa Display #1183: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
assert_csr_loaded(dev_priv);
}
@@ -952,12 +991,12 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
- DRM_DEBUG_KMS("Enabling DC6\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
/* Wa Display #1183: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -968,15 +1007,15 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = I915_READ(regs->bios);
+ u32 bios_req = intel_de_read(dev_priv, regs->bios);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- u32 drv_req = I915_READ(regs->driver);
+ u32 drv_req = intel_de_read(dev_priv, regs->driver);
if (!(drv_req & mask))
- I915_WRITE(regs->driver, drv_req | mask);
- I915_WRITE(regs->bios, bios_req & ~mask);
+ intel_de_write(dev_priv, regs->driver, drv_req | mask);
+ intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
}
}
@@ -1022,22 +1061,25 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
- (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+ return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
- u32 tmp = I915_READ(DBUF_CTL);
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
- WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
- (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
- "Unexpected DBuf power power state (0x%08x)\n", tmp);
+ drm_WARN(&dev_priv->drm,
+ hw_enabled_dbuf_slices != enabled_dbuf_slices,
+ "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+ hw_enabled_dbuf_slices,
+ enabled_dbuf_slices);
}
static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = {};
+ struct intel_cdclk_config cdclk_config = {};
if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
@@ -1046,9 +1088,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+ dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
- WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ &cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1108,9 +1152,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
- if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
@@ -1124,8 +1168,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1163,9 +1207,10 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
#undef COND
@@ -1204,8 +1249,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1214,7 +1259,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl != state);
vlv_punit_put(dev_priv);
@@ -1231,21 +1276,22 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- I915_WRITE(CBR1_VLV, 0);
-
- WARN_ON(dev_priv->rawclk_freq == 0);
+ intel_de_write(dev_priv, MI_ARB_VLV,
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ intel_de_write(dev_priv, CBR1_VLV, 0);
- I915_WRITE(RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+ drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ 1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -1262,13 +1308,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
for_each_pipe(dev_priv, pipe) {
- u32 val = I915_READ(DPLL(pipe));
+ u32 val = intel_de_read(dev_priv, DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
+ intel_de_write(dev_priv, DPLL(pipe), val);
}
vlv_init_display_clock_gating(dev_priv);
@@ -1348,7 +1394,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1360,7 +1407,8 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
vlv_set_power_well(dev_priv, power_well, false);
}
@@ -1422,7 +1470,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
*/
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
if (BITS_SET(phy_control,
@@ -1467,9 +1515,10 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
*/
if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
phy_status_mask, phy_status, 10))
- DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
+ drm_err(&dev_priv->drm,
+ "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
}
#undef BITS_SET
@@ -1481,8 +1530,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
u32 tmp;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
pipe = PIPE_A;
@@ -1499,7 +1549,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
/* Poll for phypwrgood signal */
if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
+ drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+ phy);
vlv_dpio_get(dev_priv);
@@ -1527,10 +1578,12 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_put(dev_priv);
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
}
@@ -1540,8 +1593,9 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
phy = DPIO_PHY0;
@@ -1553,12 +1607,14 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
}
dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
- DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
dev_priv->chv_phy_assert[phy] = true;
@@ -1621,11 +1677,13 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- WARN(actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
+ drm_WARN(&dev_priv->drm, actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN),
+ !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN),
+ !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
}
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
@@ -1646,10 +1704,12 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1677,10 +1737,12 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1703,7 +1765,8 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
/*
@@ -1711,7 +1774,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
vlv_punit_put(dev_priv);
@@ -1742,9 +1805,10 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
#undef COND
@@ -1752,6 +1816,13 @@ out:
vlv_punit_put(dev_priv);
}
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+}
+
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1981,12 +2052,13 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
power_domains = &dev_priv->power_domains;
- WARN(!power_domains->domain_use_count[domain],
- "Use count on domain %s is already zero\n",
- name);
- WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
- "Async disabling of domain %s is pending\n",
- name);
+ drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ name);
+ drm_WARN(&dev_priv->drm,
+ async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ "Async disabling of domain %s is pending\n",
+ name);
power_domains->domain_use_count[domain]--;
@@ -2131,7 +2203,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
goto out_verify;
}
- WARN_ON(power_domains->domain_use_count[domain] != 1);
+ drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
@@ -2206,7 +2278,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
verify_async_put_domains_state(power_domains);
- WARN_ON(power_domains->async_put_wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -2674,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- TGL_PW_2_POWER_DOMAINS | \
+ TGL_PW_3_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -2734,7 +2806,7 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
};
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
+ .sync_hw = chv_pipe_power_well_sync_hw,
.enable = chv_pipe_power_well_enable,
.disable = chv_pipe_power_well_disable,
.is_enabled = chv_pipe_power_well_enabled,
@@ -3870,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = TGL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4068,7 +4140,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TBT1",
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4079,7 +4151,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TBT2",
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4090,7 +4162,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TBT3",
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4101,7 +4173,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TBT4",
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4112,7 +4184,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TBT5",
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4123,7 +4195,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TBT6",
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4204,11 +4276,13 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
} else if (enable_dc == -1) {
requested_dc = max_dc;
} else if (enable_dc > max_dc && enable_dc <= 4) {
- DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
- enable_dc, max_dc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
requested_dc = max_dc;
} else {
- DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+ drm_err(&dev_priv->drm,
+ "Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
@@ -4227,7 +4301,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
break;
}
- DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+ drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
@@ -4371,16 +4445,16 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
{
u32 val, status;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
udelay(10);
- status = I915_READ(reg) & DBUF_POWER_STATE;
+ status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
if ((enable && !status) || (!enable && status)) {
- DRM_ERROR("DBus power %s timeout!\n",
- enable ? "enable" : "disable");
+ drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
return false;
}
return true;
@@ -4388,80 +4462,60 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+ icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 11)
- return 1;
- return 2;
+ icl_dbuf_slices_update(dev_priv, 0);
}
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- bool ret;
+ int i;
+ int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
- if (req_slices > intel_dbuf_max_slices(dev_priv)) {
- DRM_ERROR("Invalid number of dbuf slices requested\n");
- return;
- }
+ drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
+ "Invalid number of dbuf slices requested\n");
- if (req_slices == hw_enabled_slices || req_slices == 0)
- return;
+ DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
- if (req_slices > hw_enabled_slices)
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
- else
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+ /*
+ * Might be running this in parallel to gen9_dc_off_power_well_enable
+ * being called from intel_dp_detect for instance,
+ * which causes assertion triggered by race condition,
+ * as gen9_assert_dbuf_enabled might preempt this when registers
+ * were already updated, while dev_priv was not.
+ */
+ mutex_lock(&power_domains->lock);
+
+ for (i = 0; i < max_slices; i++) {
+ intel_dbuf_slice_set(dev_priv,
+ DBUF_CTL_S(i),
+ (req_slices & BIT(i)) != 0);
+ }
- if (ret)
- dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+ dev_priv->enabled_dbuf_slices_mask = req_slices;
+
+ mutex_unlock(&power_domains->lock);
}
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
- else
- /*
- * FIXME: for now pretend that we only have 1 slice, see
- * intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ skl_ddb_get_hw_state(dev_priv);
+ /*
+ * Just power up at least 1 slice, we will
+ * figure out later which slices we have and what we need.
+ */
+ icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
+ BIT(DBUF_S1));
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power disable timeout!\n");
- else
- /*
- * FIXME: for now pretend that the first slice is always
- * enabled, see intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ icl_dbuf_slices_update(dev_priv, 0);
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@ -4472,19 +4526,21 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
MBUS_ABOX_B_CREDIT_MASK |
MBUS_ABOX_BW_CREDIT_MASK;
-
- val = I915_READ(MBUS_ABOX_CTL);
- val &= ~mask;
- val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
- I915_WRITE(MBUS_ABOX_CTL, val);
+
+ intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
+ intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
+ }
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
{
- u32 val = I915_READ(LCPLL_CTL);
+ u32 val = intel_de_read(dev_priv, LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -4493,13 +4549,13 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
*/
if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
+ drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
+ drm_err(&dev_priv->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
- DRM_ERROR("LCPLL not using non-SSC reference\n");
+ drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -4511,26 +4567,26 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
"Display power well on\n");
- I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
"SPLL enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
"WRPLL1 enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
"WRPLL2 enabled\n");
- I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+ I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
"Panel power on\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev_priv))
- I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
- I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
- I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
"PCH GTC enabled\n");
/*
@@ -4545,9 +4601,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv))
- return I915_READ(D_COMP_HSW);
+ return intel_de_read(dev_priv, D_COMP_HSW);
else
- return I915_READ(D_COMP_BDW);
+ return intel_de_read(dev_priv, D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
@@ -4555,10 +4611,11 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
if (IS_HASWELL(dev_priv)) {
if (sandybridge_pcode_write(dev_priv,
GEN6_PCODE_WRITE_D_COMP, val))
- DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to write to D_COMP\n");
} else {
- I915_WRITE(D_COMP_BDW, val);
- POSTING_READ(D_COMP_BDW);
+ intel_de_write(dev_priv, D_COMP_BDW, val);
+ intel_de_posting_read(dev_priv, D_COMP_BDW);
}
}
@@ -4577,25 +4634,25 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
assert_can_disable_lcpll(dev_priv);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us(I915_READ(LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
- DRM_ERROR("Switching to FCLK failed\n");
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
- DRM_ERROR("LCPLL still locked\n");
+ drm_err(&dev_priv->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
@@ -4604,13 +4661,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((hsw_read_dcomp(dev_priv) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
- DRM_ERROR("D_COMP RCOMP still in progress\n");
+ drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val |= LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
@@ -4622,7 +4679,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
@@ -4636,8 +4693,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
}
val = hsw_read_dcomp(dev_priv);
@@ -4645,27 +4702,28 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
- DRM_ERROR("LCPLL not locked yet\n");
+ drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us((I915_READ(LCPLL_CTL) &
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
+ drm_err(&dev_priv->drm,
+ "Switching back to LCPLL failed\n");
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
}
/*
@@ -4695,12 +4753,12 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Enabling package C8+\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
lpt_disable_clkout_dp(dev_priv);
@@ -4711,15 +4769,15 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Disabling package C8+\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
}
@@ -4737,14 +4795,14 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (enable)
val |= reset_bits;
else
val &= ~reset_bits;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -4769,7 +4827,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -4786,7 +4844,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
@@ -4830,7 +4888,7 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -4847,7 +4905,7 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -4889,7 +4947,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
@@ -4911,7 +4969,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -4970,25 +5028,23 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
break;
if (table[i].page_mask == 0) {
- DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
- I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
- I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ drm_dbg(&dev_priv->drm,
+ "Unknown memory configuration; disabling address buddy logic.\n");
+ intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+ intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
} else {
- u32 val;
-
- I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
- I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+ intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
+ table[i].page_mask);
+ intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
+ table[i].page_mask);
/* Wa_22010178259:tgl */
- val = I915_READ(BW_BUDDY1_CTL);
- val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
- val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
- I915_WRITE(BW_BUDDY1_CTL, val);
-
- val = I915_READ(BW_BUDDY2_CTL);
- val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
- val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
- I915_WRITE(BW_BUDDY2_CTL, val);
+ intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
+ intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
}
}
@@ -5016,7 +5072,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
/* 4. Enable CDCLK. */
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
/* 5. Enable DBUF. */
icl_dbuf_enable(dev_priv);
@@ -5045,7 +5101,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
icl_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -5090,7 +5146,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* current lane status.
*/
if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- u32 status = I915_READ(DPLL(PIPE_A));
+ u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
@@ -5121,7 +5177,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
}
if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- u32 status = I915_READ(DPIO_PHY_STATUS);
+ u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -5142,10 +5198,10 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
dev_priv->chv_phy_assert[DPIO_PHY1] = true;
}
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
+ /* Defer application of initial phy_control to enabling the powerwell */
}
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
@@ -5158,10 +5214,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
/* If the display might be already active skip this */
if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
return;
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
+ drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
disp2d->desc->ops->enable(dev_priv, disp2d);
@@ -5189,8 +5245,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
{
- WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
- "VED not power gated\n");
+ drm_WARN(&dev_priv->drm,
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
}
static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
@@ -5201,9 +5258,9 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{}
};
- WARN(!pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
- "ISP not power gated\n");
+ drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
}
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
@@ -5230,9 +5287,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
- /* Must happen before power domain init on VLV/CHV */
- intel_update_rawclk(i915);
-
if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
@@ -5336,7 +5390,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
@@ -5418,7 +5472,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
@@ -5436,13 +5490,13 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
- DRM_DEBUG_DRIVER("%-25s %d\n",
- power_well->desc->name, power_well->count);
+ drm_dbg(&i915->drm, "%-25s %d\n",
+ power_well->desc->name, power_well->count);
for_each_power_domain(domain, power_well->desc->domains)
- DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg(&i915->drm, " %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
}
@@ -5475,19 +5529,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
enabled = power_well->desc->ops->is_enabled(i915, power_well);
if ((power_well->count || power_well->desc->always_on) !=
enabled)
- DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
- power_well->desc->name,
- power_well->count, enabled);
+ drm_err(&i915->drm,
+ "power well %s state mismatch (refcount %d/enabled %d)",
+ power_well->desc->name,
+ power_well->count, enabled);
domains_count = 0;
for_each_power_domain(domain, power_well->desc->domains)
domains_count += power_domains->domain_use_count[domain];
if (power_well->count != domains_count) {
- DRM_ERROR("power well %s refcount/domain refcount mismatch "
- "(refcount %d/domains refcount %d)\n",
- power_well->desc->name, power_well->count,
- domains_count);
+ drm_err(&i915->drm,
+ "power well %s refcount/domain refcount mismatch "
+ "(refcount %d/domains refcount %d)\n",
+ power_well->desc->name, power_well->count,
+ domains_count);
dump_domain_info = true;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2608a65af7fa..da64a5edae7a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -100,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ TGL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
@@ -307,6 +308,11 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
+enum dbuf_slice {
+ DBUF_S1,
+ DBUF_S2,
+};
+
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 888ea8a170d1..5e00e611f077 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -39,13 +39,14 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/i915_drm.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
#include "i915_drv.h"
+#include "intel_de.h"
struct drm_printer;
+struct __intel_global_objs_state;
/*
* Display related stuff
@@ -139,6 +140,9 @@ struct intel_encoder {
int (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
+ int (*compute_config_late)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
void (*update_prepare)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
@@ -214,6 +218,9 @@ struct intel_panel {
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
+ /* DPCD backlight */
+ u8 pwmgen_bit_count;
+
struct backlight_device *device;
/* Connector and platform specific backlight functions */
@@ -458,25 +465,8 @@ struct intel_atomic_state {
intel_wakeref_t wakeref;
- struct {
- /*
- * Logical state of cdclk (used for all scaling, watermark,
- * etc. calculations and checks). This is computed as if all
- * enabled crtcs were active.
- */
- struct intel_cdclk_state logical;
-
- /*
- * Actual state of cdclk, can be different from the logical
- * state only when all crtc's are DPMS off.
- */
- struct intel_cdclk_state actual;
-
- int force_min_cdclk;
- bool force_min_cdclk_changed;
- /* pipe to which cd2x update is synchronized */
- enum pipe pipe;
- } cdclk;
+ struct __intel_global_objs_state *global_objs;
+ int num_global_objs;
bool dpll_set, modeset;
@@ -491,10 +481,6 @@ struct intel_atomic_state {
u8 active_pipe_changes;
u8 active_pipes;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -508,14 +494,11 @@ struct intel_atomic_state {
/*
* active_pipes
- * min_cdclk[]
- * min_voltage_level[]
- * cdclk.*
*/
bool global_state_changed;
- /* Gen9+ only */
- struct skl_ddb_values wm_results;
+ /* Number of enabled DBuf slices */
+ u8 enabled_dbuf_slices_mask;
struct i915_sw_fence commit_ready;
@@ -611,6 +594,7 @@ struct intel_plane_state {
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
+ struct i915_vma *vma;
unsigned int tiling;
int size;
u32 base;
@@ -657,15 +641,30 @@ struct intel_crtc_scaler_state {
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+struct intel_wm_level {
+ bool enable;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
+};
+
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- u32 linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
+struct skl_wm_level {
+ u16 min_ddb_alloc;
+ u16 plane_res_b;
+ u8 plane_res_l;
+ bool plane_en;
+ bool ignore_lines;
+};
+
struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
@@ -675,7 +674,6 @@ struct skl_plane_wm {
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
- u32 linetime;
};
enum vlv_wm_level {
@@ -1046,6 +1044,10 @@ struct intel_crtc_state {
struct drm_dsc_config config;
} dsc;
+ /* HSW+ linetime watermarks */
+ u16 linetime;
+ u16 ips_linetime;
+
/* Forward Error correction State */
bool fec_enable;
@@ -1059,6 +1061,32 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
};
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ int skipped;
+ enum intel_pipe_crc_source source;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1102,6 +1130,10 @@ struct intel_crtc {
/* per pipe DSB related info */
struct intel_dsb dsb;
+
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc;
+#endif
};
struct intel_plane {
@@ -1181,8 +1213,6 @@ struct intel_hdmi {
};
struct intel_dp_mst_encoder;
-#define DP_MAX_DOWNSTREAM_PORTS 0x10
-
/*
* enum link_m_n_set:
* When platform provides two set of M_N registers for dp, we can
@@ -1250,6 +1280,7 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
+ u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
@@ -1422,8 +1453,17 @@ vlv_pipe_to_channel(enum pipe pipe)
}
static inline struct intel_crtc *
+intel_get_first_crtc(struct drm_i915_private *dev_priv)
+{
+ return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0));
+}
+
+static inline struct intel_crtc *
intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */
+ drm_WARN_ON(&dev_priv->drm,
+ !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe)));
return dev_priv->pipe_to_crtc_mapping[pipe];
}
@@ -1469,7 +1509,7 @@ enc_to_dig_port(struct intel_encoder *encoder)
}
static inline struct intel_digital_port *
-conn_to_dig_port(struct intel_connector *connector)
+intel_attached_dig_port(struct intel_connector *connector)
{
return enc_to_dig_port(intel_attached_encoder(connector));
}
@@ -1486,6 +1526,11 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
return &enc_to_dig_port(encoder)->dp;
}
+static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
+{
+ return enc_to_intel_dp(intel_attached_encoder(connector));
+}
+
static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
{
switch (encoder->type) {
@@ -1608,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_DP_MST) |
(1 << INTEL_OUTPUT_EDP));
}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(&dev_priv->drm, pipe);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_wait_one_vblank(&crtc->base);
}
+
static inline void
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index c7424e2a04a3..a2fafd4499f2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,7 +40,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -49,6 +48,7 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -146,11 +146,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
-{
- return enc_to_intel_dp(intel_attached_encoder(connector));
-}
-
static void intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -272,7 +267,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
/* Low voltage SKUs are limited to max of 5.4G */
if (voltage == VOLTAGE_INFO_0_85V)
@@ -323,14 +318,14 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
162000, 270000
};
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[dig_port->base.port];
const int *source_rates;
- int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
+ int size, max_rate = 0, vbt_max_rate;
/* This should only be done once */
- WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->source_rates || intel_dp->num_source_rates);
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
@@ -354,6 +349,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(g4x_rates);
}
+ vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
if (max_rate && vbt_max_rate)
max_rate = min(max_rate, vbt_max_rate);
else if (vbt_max_rate)
@@ -519,12 +515,13 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
*/
bits_per_pixel = (link_clock * lane_count * 8) /
intel_dp_mode_to_fec_clock(mode_clock);
- DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+ drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
mode_hdisplay;
- DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+ drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
+ max_bpp_small_joiner_ram);
/*
* Greatest allowed DSC BPP = MIN (output BPP from available Link BW
@@ -534,8 +531,8 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
/* Error out if the max bpp is less than smallest allowed valid bpp */
if (bits_per_pixel < valid_dsc_bpp[0]) {
- DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
- bits_per_pixel, valid_dsc_bpp[0]);
+ drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
return 0;
}
@@ -760,20 +757,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
- if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name))
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
- DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
@@ -783,7 +782,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
else
DP |= DP_PIPE_SEL(pipe);
- pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
/*
* The DPLL for the pipe must be enabled for this to work.
@@ -795,8 +794,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
- DRM_ERROR("Failed to force on pll for pipe %c!\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
return;
}
}
@@ -807,14 +807,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
- I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
- I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
if (!pll_enabled) {
vlv_force_pll_off(dev_priv, pipe);
@@ -837,13 +837,16 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe !=
+ intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->pps_pipe);
} else {
- WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps_pipe != INVALID_PIPE);
if (intel_dp->active_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->active_pipe);
@@ -866,10 +869,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -880,16 +883,17 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (WARN_ON(pipe == INVALID_PIPE))
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
pipe = PIPE_A;
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
- DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe),
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe),
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -913,7 +917,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps_reset)
return backlight_controller;
@@ -935,13 +939,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(PP_STATUS(pipe)) & PP_ON;
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
}
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
}
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
@@ -958,7 +962,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe;
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel != PANEL_PORT_SELECT_VLV(port))
@@ -997,16 +1001,18 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return;
}
- DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
- pipe_name(intel_dp->pps_pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -1016,8 +1022,10 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
return;
/*
@@ -1033,7 +1041,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@@ -1119,12 +1128,13 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pp_ctrl_reg = PP_CONTROL(pipe);
pp_div_reg = PP_DIVISOR(pipe);
- pp_div = I915_READ(pp_div_reg);
+ pp_div = intel_de_read(dev_priv, pp_div_reg);
pp_div &= PP_REFERENCE_DIVIDER_MASK;
/* 0x1F write to PP_DIV_REG sets max cycle delay */
- I915_WRITE(pp_div_reg, pp_div | 0x1F);
- I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
+ intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
+ intel_de_write(dev_priv, pp_ctrl_reg,
+ PANEL_UNLOCK_REGS);
msleep(intel_dp->panel_power_cycle_delay);
}
}
@@ -1142,7 +1152,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
intel_dp->pps_pipe == INVALID_PIPE)
return false;
- return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
@@ -1155,7 +1165,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
intel_dp->pps_pipe == INVALID_PIPE)
return false;
- return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
static void
@@ -1167,10 +1177,11 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- WARN(1, "eDP powered off while attempting aux channel communication.\n");
- DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
- I915_READ(_pp_stat_reg(intel_dp)),
- I915_READ(_pp_ctrl_reg(intel_dp)));
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
}
}
@@ -1191,8 +1202,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (!done)
- DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
- intel_dp->aux.name, timeout_ms, status);
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
@@ -1209,13 +1221,14 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
if (index)
return 0;
@@ -1226,9 +1239,10 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
+ freq = dev_priv->cdclk.hw.cdclk;
else
- return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -1360,7 +1374,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- pm_qos_update_request(&i915->pm_qos, 0);
+ cpu_latency_qos_update_request(&i915->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@@ -1378,8 +1392,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
const u32 status = intel_uncore_read(uncore, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
- WARN(1, "dp_aux_ch not started status 0x%08x\n",
- status);
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
intel_dp->aux_busy_last_status = status;
}
@@ -1388,7 +1403,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
/* Only 5 data registers! */
- if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
@@ -1440,7 +1455,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -EBUSY;
goto out;
}
@@ -1450,7 +1466,8 @@ done:
* Timeouts occur when the sink is not connected
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -EIO;
goto out;
}
@@ -1458,7 +1475,8 @@ done:
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -ETIMEDOUT;
goto out;
}
@@ -1473,8 +1491,9 @@ done:
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
- DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
- recv_bytes);
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
ret = -EBUSY;
goto out;
}
@@ -1488,7 +1507,7 @@ done:
ret = recv_bytes;
out:
- pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@@ -1742,7 +1761,8 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
+ aux_ch_name(dig_port->aux_ch),
port_name(encoder->port));
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
@@ -1918,8 +1938,9 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
- DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp.bpp);
bpp = dev_priv->vbt.edp.bpp;
}
}
@@ -2115,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
- DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No DSC support for less than 8bpc\n");
return -EINVAL;
}
@@ -2150,7 +2172,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
- DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Compressed BPP/Slice Count not supported\n");
return -EINVAL;
}
pipe_config->dsc.compressed_bpp = min_t(u16,
@@ -2167,26 +2190,28 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (pipe_config->dsc.slice_count > 1) {
pipe_config->dsc.dsc_split = true;
} else {
- DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
}
}
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
- DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
- "Compressed BPP = %d\n",
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
return ret;
}
pipe_config->dsc.compression_enable = true;
- DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = %d Slice Count = %d\n",
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp,
- pipe_config->dsc.slice_count);
+ drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
return 0;
}
@@ -2214,7 +2239,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->max_link_rate);
/* No common link rates between source and sink */
- WARN_ON(common_len <= 0);
+ drm_WARN_ON(encoder->base.dev, common_len <= 0);
limits.min_clock = 0;
limits.max_clock = common_len - 1;
@@ -2373,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
@@ -2492,9 +2517,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
- intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
- intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
-
/*
* There are four kinds of DP registers:
*
@@ -2515,7 +2537,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
@@ -2539,12 +2561,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
trans_dp |= TRANS_DP_ENH_FRAMING;
else
trans_dp &= ~TRANS_DP_ENH_FRAMING;
- I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
+ intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -2590,18 +2612,20 @@ static void wait_panel_status(struct intel_dp *intel_dp,
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
- mask, value,
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
mask, value, 5000))
- DRM_ERROR("Panel status timeout: status %08x control %08x\n",
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
- DRM_DEBUG_KMS("Wait complete\n");
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
}
static void wait_panel_on(struct intel_dp *intel_dp)
@@ -2660,9 +2684,9 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- control = I915_READ(_pp_ctrl_reg(intel_dp));
- if (WARN_ON(!HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
}
@@ -2696,9 +2720,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
- DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2709,17 +2733,19 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
- DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2759,14 +2785,14 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->want_panel_vdd);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2774,12 +2800,13 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2851,14 +2878,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
- if (WARN(edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@@ -2868,24 +2895,24 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
pp |= PANEL_POWER_ON;
if (!IS_GEN(dev_priv, 5))
pp |= PANEL_POWER_RESET;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
if (IS_GEN(dev_priv, 5)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
}
@@ -2913,11 +2940,12 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2929,8 +2957,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->want_panel_vdd = false;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
wait_panel_off(intel_dp);
intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2971,8 +2999,8 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
}
@@ -3007,8 +3035,8 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
intel_dp->last_backlight_off = jiffies;
@@ -3059,7 +3087,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
+ bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
@@ -3070,7 +3098,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
- bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -3089,8 +3117,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
- DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
- pipe_config->port_clock);
+ drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ pipe_config->port_clock);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
@@ -3099,8 +3127,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(500);
/*
@@ -3114,8 +3142,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
intel_dp->DP |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(200);
}
@@ -3129,12 +3157,12 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
- DRM_DEBUG_KMS("disabling eDP PLL\n");
+ drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
intel_dp->DP &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(200);
}
@@ -3149,7 +3177,7 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
* FIXME should really check all downstream ports...
*/
return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
- intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ drm_dp_is_branch(intel_dp->dpcd) &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
@@ -3214,7 +3242,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
enum pipe p;
for_each_pipe(dev_priv, p) {
- u32 val = I915_READ(TRANS_DP_CTL(p));
+ u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
*pipe = p;
@@ -3222,7 +3250,8 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
}
}
- DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ port_name(port));
/* must initialize pipe to something for the asserts */
*pipe = PIPE_A;
@@ -3237,7 +3266,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
bool ret;
u32 val;
- val = I915_READ(dp_reg);
+ val = intel_de_read(dev_priv, dp_reg);
ret = val & DP_PORT_EN;
@@ -3289,12 +3318,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
else
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- tmp = I915_READ(intel_dp->output_reg);
+ tmp = intel_de_read(dev_priv, intel_dp->output_reg);
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ u32 trans_dp = intel_de_read(dev_priv,
+ TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3328,7 +3358,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dp_get_m_n(crtc, pipe_config);
if (port == PORT_A) {
- if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -3353,8 +3383,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
}
@@ -3447,11 +3478,12 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
if (dp_train_pat & train_pat_mask)
- DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
- dp_train_pat & train_pat_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DP training pattern TPS%d\n",
+ dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
+ u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3477,7 +3509,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3494,7 +3526,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
- DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
@@ -3513,7 +3546,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2;
break;
}
@@ -3539,8 +3573,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
if (old_crtc_state->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_encoder *encoder,
@@ -3550,11 +3584,11 @@ static void intel_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- u32 dp_reg = I915_READ(intel_dp->output_reg);
+ u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
- if (WARN_ON(dp_reg & DP_PORT_EN))
+ if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
with_pps_lock(intel_dp, wakeref) {
@@ -3583,8 +3617,8 @@ static void intel_enable_dp(struct intel_encoder *encoder,
intel_dp_stop_link_train(intel_dp);
if (pipe_config->has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(pipe));
+ drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
+ pipe_name(pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -3625,9 +3659,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
edp_panel_vdd_off_sync(intel_dp);
@@ -3641,11 +3675,12 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- I915_WRITE(pp_on_reg, 0);
- POSTING_READ(pp_on_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
intel_dp->pps_pipe = INVALID_PIPE;
}
@@ -3660,17 +3695,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
- DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@@ -3686,7 +3722,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
if (intel_dp->pps_pipe != INVALID_PIPE &&
intel_dp->pps_pipe != crtc->pipe) {
@@ -3712,9 +3748,10 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
- DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+ encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -4140,18 +4177,22 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
}
if (mask)
- DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
-
- DRM_DEBUG_KMS("Using vswing level %d\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
- DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
+ drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "");
intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
void
@@ -4164,8 +4205,8 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -4178,10 +4219,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -4195,7 +4236,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DP idle patterns\n");
}
static void
@@ -4208,10 +4250,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 DP = intel_dp->DP;
- if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, intel_dp->output_reg) &
+ DP_PORT_EN) == 0))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -4221,12 +4265,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
DP &= ~DP_LINK_TRAIN_MASK;
DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -4245,12 +4289,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
DP &= ~DP_PORT_EN;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -4370,7 +4414,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
/* this function is meant to be called only once */
- WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -4390,8 +4434,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
- DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
- intel_dp->edp_dpcd);
+ drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ (int)sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
@@ -4466,7 +4511,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* it don't care about read it here and in intel_edp_init_dpcd().
*/
if (!intel_dp_is_edp(intel_dp) &&
- !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
+ !drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -5125,7 +5171,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+ drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
return 0;
@@ -5195,7 +5241,8 @@ intel_dp_hotplug(struct intel_encoder *encoder,
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
/*
* Keeping it consistent with intel_ddi_hotplug() and
@@ -5281,7 +5328,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
- DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
drm_kms_helper_hotplug_event(&dev_priv->drm);
}
@@ -5370,7 +5418,7 @@ static bool ibx_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool cpt_digital_port_connected(struct intel_encoder *encoder)
@@ -5393,7 +5441,7 @@ static bool cpt_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool spt_digital_port_connected(struct intel_encoder *encoder)
@@ -5412,7 +5460,7 @@ static bool spt_digital_port_connected(struct intel_encoder *encoder)
return cpt_digital_port_connected(encoder);
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
@@ -5435,7 +5483,7 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
static bool gm45_digital_port_connected(struct intel_encoder *encoder)
@@ -5458,7 +5506,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
@@ -5466,7 +5514,7 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
else
return ibx_digital_port_connected(encoder);
}
@@ -5476,7 +5524,7 @@ static bool snb_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(encoder);
}
@@ -5486,7 +5534,7 @@ static bool ivb_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
else
return cpt_digital_port_connected(encoder);
}
@@ -5496,7 +5544,7 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(encoder);
}
@@ -5521,16 +5569,16 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
enum phy phy)
{
if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
- return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+ return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
- return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
+ return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
}
static bool icp_digital_port_connected(struct intel_encoder *encoder)
@@ -5631,6 +5679,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = drm_detect_monitor_audio(edid);
drm_dp_cec_set_edid(&intel_dp->aux, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -5643,6 +5692,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = NULL;
intel_dp->has_audio = false;
+ intel_dp->edid_quirks = 0;
}
static int
@@ -5656,9 +5706,10 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@@ -5673,9 +5724,10 @@ intel_dp_detect(struct drm_connector *connector,
memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
if (intel_dp->is_mst) {
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
+ drm_dbg_kms(&dev_priv->drm,
+ "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -5763,8 +5815,8 @@ intel_dp_force(struct drm_connector *connector)
intel_aux_power_domain(dig_port);
intel_wakeref_t wakeref;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
intel_dp_unset_edid(intel_dp);
if (connector->status != connector_status_connected)
@@ -5815,7 +5867,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- i915_debugfs_connector_add(connector);
+ intel_connector_debugfs_add(connector);
DRM_DEBUG_KMS("registering %s bus for %s\n",
intel_dp->aux.name, connector->kdev->kobj.name);
@@ -6396,6 +6448,7 @@ static
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
bool is_repeater, u8 content_type)
{
+ int ret;
struct hdcp2_dp_errata_stream_type stream_type_msg;
if (is_repeater)
@@ -6411,8 +6464,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
- return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
sizeof(stream_type_msg));
+
+ return ret < 0 ? ret : 0;
+
}
static
@@ -6492,7 +6548,8 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
- DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
edp_panel_vdd_schedule_off(intel_dp);
@@ -6519,7 +6576,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
- intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
if (lspcon->active)
lspcon_resume(lspcon);
@@ -6545,6 +6602,140 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
}
}
+static int intel_modeset_tile_group(struct intel_atomic_state *state,
+ int tile_group_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id != tile_group_id)
+ continue;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ crtc = to_intel_crtc(conn_state->crtc);
+
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ if (transcoders == 0)
+ return 0;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->hw.enable)
+ continue;
+
+ if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
+ continue;
+
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ transcoders &= ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+
+ return 0;
+}
+
+static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(&state->base, connector);
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc;
+ u8 transcoders;
+
+ crtc = to_intel_crtc(old_conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!old_crtc_state->hw.active)
+ return 0;
+
+ transcoders = old_crtc_state->sync_mode_slaves_mask;
+ if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ transcoders |= BIT(old_crtc_state->master_transcoder);
+
+ return intel_modeset_affected_transcoders(state,
+ transcoders);
+}
+
+static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(conn->dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(conn, &state->base);
+ if (ret)
+ return ret;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, conn))
+ return 0;
+
+ if (conn->has_tile) {
+ ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (ret)
+ return ret;
+ }
+
+ return intel_modeset_synced_crtcs(state, conn);
+}
+
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -6561,7 +6752,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.detect_ctx = intel_dp_detect,
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .atomic_check = intel_digital_connector_atomic_check,
+ .atomic_check = intel_dp_connector_atomic_check,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -6697,10 +6888,10 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
/* Ensure PPS is unlocked */
if (!HAS_DDI(dev_priv))
- I915_WRITE(regs.pp_ctrl, pp_ctl);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- pp_on = I915_READ(regs.pp_on);
- pp_off = I915_READ(regs.pp_off);
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
/* Pull timing values out of registers */
seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
@@ -6711,7 +6902,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
- pp_div = I915_READ(regs.pp_div);
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
@@ -6768,8 +6959,9 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
- DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
}
/* T11_T12 delay is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
@@ -6811,12 +7003,15 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay,
+ intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay,
+ intel_dp->backlight_off_delay);
/*
* We override the HW backlight delays to 1 because we do manual waits
@@ -6841,7 +7036,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp_on, pp_off, port_sel = 0;
- int div = dev_priv->rawclk_freq / 1000;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
@@ -6865,14 +7060,16 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (force_disable_vdd) {
u32 pp = ilk_get_pp_control(intel_dp);
- WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
if (pp & EDP_FORCE_VDD)
- DRM_DEBUG_KMS("VDD already on, disabling first\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
pp &= ~EDP_FORCE_VDD;
- I915_WRITE(regs.pp_ctrl, pp);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
}
pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
@@ -6903,31 +7100,31 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
pp_on |= port_sel;
- I915_WRITE(regs.pp_on, pp_on);
- I915_WRITE(regs.pp_off, pp_off);
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
if (i915_mmio_reg_valid(regs.pp_div)) {
- I915_WRITE(regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
} else {
u32 pp_ctl;
- pp_ctl = I915_READ(regs.pp_ctrl);
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- I915_WRITE(regs.pp_ctrl, pp_ctl);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
}
- DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- I915_READ(regs.pp_on),
- I915_READ(regs.pp_off),
- i915_mmio_reg_valid(regs.pp_div) ?
- I915_READ(regs.pp_div) :
- (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6964,22 +7161,24 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
- DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Refresh rate should be positive non-zero.\n");
return;
}
if (intel_dp == NULL) {
- DRM_DEBUG_KMS("DRRS not supported.\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
return;
}
if (!intel_crtc) {
- DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS: intel_crtc not initialized\n");
return;
}
if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
- DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
+ drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
return;
}
@@ -6988,13 +7187,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
index = DRRS_LOW_RR;
if (index == dev_priv->drrs.refresh_rate_type) {
- DRM_DEBUG_KMS(
- "DRRS requested for previously set RR...ignoring\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS requested for previously set RR...ignoring\n");
return;
}
if (!crtc_state->hw.active) {
- DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "eDP encoder disabled. CRTC not Active\n");
return;
}
@@ -7008,13 +7208,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
break;
case DRRS_MAX_RR:
default:
- DRM_ERROR("Unsupported refreshrate type\n");
+ drm_err(&dev_priv->drm,
+ "Unsupported refreshrate type\n");
}
} else if (INTEL_GEN(dev_priv) > 6) {
i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (index > DRRS_HIGH_RR) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
@@ -7026,12 +7227,13 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
dev_priv->drrs.refresh_rate_type = index;
- DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+ drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
+ refresh_rate);
}
/**
@@ -7047,18 +7249,19 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!crtc_state->has_drrs) {
- DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+ drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
return;
}
if (dev_priv->psr.enabled) {
- DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR enabled. Not enabling DRRS.\n");
return;
}
mutex_lock(&dev_priv->drrs.mutex);
if (dev_priv->drrs.dp) {
- DRM_DEBUG_KMS("DRRS already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
goto unlock;
}
@@ -7284,25 +7487,28 @@ intel_dp_drrs_init(struct intel_connector *connector,
mutex_init(&dev_priv->drrs.mutex);
if (INTEL_GEN(dev_priv) <= 6) {
- DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported for Gen7 and above\n");
return NULL;
}
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
- DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
+ drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL;
}
downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
if (!downclock_mode) {
- DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Downclock mode is not found. DRRS not supported\n");
return NULL;
}
dev_priv->drrs.type = dev_priv->vbt.drrs_type;
dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
- DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
@@ -7331,8 +7537,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- DRM_INFO("LVDS was detected, not registering eDP\n");
+ drm_WARN_ON(dev,
+ !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ drm_info(&dev_priv->drm,
+ "LVDS was detected, not registering eDP\n");
return false;
}
@@ -7348,7 +7556,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
- DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ drm_info(&dev_priv->drm,
+ "failed to retrieve link info, disabling eDP\n");
goto out_vdd_off;
}
@@ -7356,8 +7565,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
+ drm_connector_update_edid_property(connector, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7393,17 +7602,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
- DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "using pipe %c for initial backlight setup\n",
+ pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
- if (fixed_mode)
- drm_connector_init_panel_orientation_property(
- connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+ if (fixed_mode) {
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ dev_priv->vbt.orientation,
+ fixed_mode->hdisplay, fixed_mode->vdisplay);
+ }
return true;
@@ -7459,10 +7671,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
INIT_WORK(&intel_connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
- if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7472,7 +7684,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
- intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
if (intel_dp_is_port_edp(dev_priv, port)) {
@@ -7480,7 +7692,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- WARN_ON(intel_phy_is_tc(dev_priv, phy));
+ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -7498,14 +7710,16 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp_is_edp(intel_dp) &&
- port != PORT_B && port != PORT_C))
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
return false;
- DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
- type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7518,6 +7732,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_dp_aux_init(intel_dp);
@@ -7543,7 +7758,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
}
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -7551,8 +7767,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* generated on the port when a cable is not attached.
*/
if (IS_G45(dev_priv)) {
- u32 temp = I915_READ(PEG_BAND_GAP_DATA);
- I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
}
return true;
@@ -7616,6 +7833,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
+ intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_port_to_power_domain(port);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 3da166054788..0c7be8ed1423 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
enum pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7c653f8c307f..dbfa6895795b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -57,10 +57,27 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
*/
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
u8 read_val[2] = { 0x0 };
+ u8 mode_reg;
u16 level = 0;
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &mode_reg) != 1) {
+ DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ return 0;
+ }
+
+ /*
+ * If we're not in DPCD control mode yet, the programmed brightness
+ * value is meaningless and we should assume max brightness
+ */
+ if ((mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) !=
+ DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)
+ return connector->panel.backlight.max;
+
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
@@ -82,7 +99,7 @@ static void
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -110,62 +127,29 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
- u8 pn, pn_min, pn_max;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+ int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
- /* Find desired value of (F x P)
- * Note that, if F x P is out of supported range, the maximum value or
- * minimum value will applied automatically. So no need to check that.
- */
freq = dev_priv->vbt.backlight.pwm_freq_hz;
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
if (!freq) {
DRM_DEBUG_KMS("Use panel default backlight frequency\n");
return false;
}
fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
- /* Use highest possible value of Pn for more granularity of brightness
- * adjustment while satifying the conditions below.
- * - Pn is in the range of Pn_min and Pn_max
- * - F is in the range of 1 and 255
- * - FxP is within 25% of desired value.
- * Note: 25% is arbitrary value and may need some tweak.
- */
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
- return false;
- }
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
- return false;
- }
- pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
- pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-
+ /* Ensure frequency is within 25% of desired value */
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
- if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
- DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
- return false;
- }
-
- for (pn = pn_max; pn >= pn_min; pn--) {
- f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
- fxp_actual = f << pn;
- if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
- break;
- }
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
+ DRM_DEBUG_KMS("Actual frequency out of range\n");
return false;
}
+
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
@@ -178,7 +162,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -197,6 +182,12 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT,
+ panel->backlight.pwmgen_bit_count) < 0)
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+
break;
/* Do nothing when it is already DPCD mode */
@@ -216,8 +207,9 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
}
+ intel_dp_aux_set_backlight(conn_state,
+ connector->panel.backlight.level);
set_aux_backlight_enable(intel_dp, true);
- intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
}
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -226,20 +218,91 @@ static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old
false);
}
+static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
+ u32 max_backlight = 0;
+ int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
+ u8 pn, pn_min, pn_max;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, &pn) == 1) {
+ pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ max_backlight = (1 << pn) - 1;
+ }
+
+ /* Find desired value of (F x P)
+ * Note that, if F x P is out of supported range, the maximum value or
+ * minimum value will applied automatically. So no need to check that.
+ */
+ freq = i915->vbt.backlight.pwm_freq_hz;
+ DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ if (!freq) {
+ DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ return max_backlight;
+ }
+
+ fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+
+ /* Use highest possible value of Pn for more granularity of brightness
+ * adjustment while satifying the conditions below.
+ * - Pn is in the range of Pn_min and Pn_max
+ * - F is in the range of 1 and 255
+ * - FxP is within 25% of desired value.
+ * Note: 25% is arbitrary value and may need some tweak.
+ */
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ return max_backlight;
+ }
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ return max_backlight;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
+ fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
+ if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
+ DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ return max_backlight;
+ }
+
+ for (pn = pn_max; pn >= pn_min; pn--) {
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
+ if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
+ break;
+ }
+
+ DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ return max_backlight;
+ }
+ panel->backlight.pwmgen_bit_count = pn;
+
+ max_backlight = (1 << pn) - 1;
+
+ return max_backlight;
+}
+
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct intel_panel *panel = &connector->panel;
- if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
- panel->backlight.max = 0xFFFF;
- else
- panel->backlight.max = 0xFF;
+ panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+ if (!panel->backlight.max)
+ return -ENODEV;
panel->backlight.min = 0;
panel->backlight.level = intel_dp_aux_get_backlight(connector);
-
panel->backlight.enabled = panel->backlight.level != 0;
return 0;
@@ -248,7 +311,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
@@ -265,15 +328,32 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
- struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (i915_modparams.enable_dpcd_backlight == 0 ||
- (i915_modparams.enable_dpcd_backlight == -1 &&
- dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
+ !intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
- if (!intel_dp_aux_display_control_capable(intel_connector))
+ /*
+ * There are a lot of machines that don't advertise the backlight
+ * control interface to use properly in their VBIOS, :\
+ */
+ if (dev_priv->vbt.backlight.type !=
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
+ i915_modparams.enable_dpcd_backlight != 1 &&
+ !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
+ DRM_DEV_INFO(dev->dev,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
+ }
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2a1130dd1ad0..a7defb37ab00 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -130,6 +130,7 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 voltage;
int voltage_tries, cr_tries, max_cr_tries;
bool max_vswing_reached = false;
@@ -143,9 +144,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
&link_bw, &rate_select);
if (link_bw)
- DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw);
+ drm_dbg_kms(&i915->drm,
+ "Using LINK_BW_SET value %02x\n", link_bw);
else
- DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select);
+ drm_dbg_kms(&i915->drm,
+ "Using LINK_RATE_SET value %02x\n", rate_select);
/* Write the link configuration data */
link_config[0] = link_bw;
@@ -169,7 +172,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
- DRM_ERROR("failed to enable link training\n");
+ drm_err(&i915->drm, "failed to enable link training\n");
return false;
}
@@ -193,22 +196,23 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("failed to get link status\n");
+ drm_err(&i915->drm, "failed to get link status\n");
return false;
}
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
- DRM_DEBUG_KMS("clock recovery OK\n");
+ drm_dbg_kms(&i915->drm, "clock recovery OK\n");
return true;
}
if (voltage_tries == 5) {
- DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+ drm_dbg_kms(&i915->drm,
+ "Same voltage tried 5 times\n");
return false;
}
if (max_vswing_reached) {
- DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+ drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
return false;
}
@@ -217,7 +221,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
- DRM_ERROR("failed to update link training\n");
+ drm_err(&i915->drm,
+ "failed to update link training\n");
return false;
}
@@ -231,7 +236,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
max_vswing_reached = true;
}
- DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ drm_err(&i915->drm,
+ "Failed clock recovery %d times, giving up!\n", max_cr_tries);
return false;
}
@@ -256,9 +262,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
return DP_TRAINING_PATTERN_4;
} else if (intel_dp->link_rate == 810000) {
if (!source_tps4)
- DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "8.1 Gbps link rate without source HBR3/TPS4 support\n");
if (!sink_tps4)
- DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "8.1 Gbps link rate without sink TPS4 support\n");
}
/*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
@@ -271,9 +279,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
return DP_TRAINING_PATTERN_3;
} else if (intel_dp->link_rate >= 540000) {
if (!source_tps3)
- DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
- DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
return DP_TRAINING_PATTERN_2;
@@ -282,6 +292,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int tries;
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
@@ -295,7 +306,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
training_pattern)) {
- DRM_ERROR("failed to start channel equalization\n");
+ drm_err(&i915->drm, "failed to start channel equalization\n");
return false;
}
@@ -303,7 +314,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("failed to get link status\n");
+ drm_err(&i915->drm,
+ "failed to get link status\n");
break;
}
@@ -311,23 +323,25 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp_dump_link_status(link_status);
- DRM_DEBUG_KMS("Clock recovery check failed, cannot "
- "continue channel equalization\n");
+ drm_dbg_kms(&i915->drm,
+ "Clock recovery check failed, cannot "
+ "continue channel equalization\n");
break;
}
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
- DRM_DEBUG_KMS("Channel EQ done. DP Training "
- "successful\n");
+ drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
+ "successful\n");
break;
}
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
- DRM_ERROR("failed to update link training\n");
+ drm_err(&i915->drm,
+ "failed to update link training\n");
break;
}
}
@@ -335,7 +349,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* Try 5 times, else fail and try at lower BW */
if (tries == 5) {
intel_dp_dump_link_status(link_status);
- DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
+ drm_dbg_kms(&i915->drm,
+ "Channel equalization failed 5 times\n");
}
intel_dp_set_idle_link_train(intel_dp);
@@ -362,17 +377,19 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (!intel_dp_link_training_channel_equalization(intel_dp))
goto failure_handling;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
return;
failure_handling:
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
if (!intel_dp_get_link_train_fallback_values(intel_dp,
intel_dp->link_rate,
intel_dp->lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index cba68c5a80fa..44f3fd251ca1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
void *port = connector->port;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -352,8 +352,9 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
- WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
- !intel_dp_mst_is_master_trans(old_crtc_state));
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+ !intel_dp_mst_is_master_trans(old_crtc_state));
intel_crtc_vblank_off(old_crtc_state);
@@ -361,9 +362,12 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
- val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+ val);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
@@ -437,8 +441,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
- WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
- !intel_dp_mst_is_master_trans(pipe_config));
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+ !intel_dp_mst_is_master_trans(pipe_config));
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@@ -459,8 +464,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = I915_READ(intel_dp->regs.dp_tp_status);
- I915_WRITE(intel_dp->regs.dp_tp_status, temp);
+ temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@@ -475,6 +480,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_pipe_clock(pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
+
+ intel_dp_set_m_n(pipe_config, M1_N1);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -486,6 +493,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+
+ intel_enable_pipe(pipe_config);
+
+ intel_crtc_vblank_on(pipe_config);
+
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
@@ -535,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
+static int
+intel_dp_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = drm_dp_mst_connector_late_register(connector,
+ intel_connector->port);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_connector_register(connector);
+ if (ret < 0)
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+
+ return ret;
+}
+
+static void
+intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_connector_unregister(connector);
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+}
+
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
- .early_unregister = intel_connector_unregister,
+ .late_register = intel_dp_mst_connector_late_register,
+ .early_unregister = intel_dp_mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -632,9 +674,9 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
- if (connector->encoder && connector->base.state->crtc) {
+ if (intel_attached_encoder(connector) && connector->base.state->crtc) {
enum pipe pipe;
- if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+ if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
return false;
return true;
}
@@ -706,36 +748,8 @@ err:
return NULL;
}
-static void intel_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- if (dev_priv->fbdev)
- drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_register(connector);
-}
-
-static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
- drm_connector_unregister(connector);
-
- if (dev_priv->fbdev)
- drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
- .register_connector = intel_dp_register_mst_connector,
- .destroy_connector = intel_dp_destroy_mst_connector,
};
static struct intel_dp_mst_encoder *
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 6fb1f7a7364e..399a7edb4568 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -259,7 +259,8 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
}
}
- WARN(1, "PHY not found for PORT %c", port_name(port));
+ drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c",
+ port_name(port));
*phy = DPIO_PHY0;
*ch = DPIO_CH0;
}
@@ -278,33 +279,34 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
val &= ~SCALE_DCOMP_METHOD;
if (enable)
val |= SCALE_DCOMP_METHOD;
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+ drm_err(&dev_priv->drm,
+ "Disabled scaling while ouniqetrangenmethod was set");
- I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
val &= ~DE_EMPHASIS;
val |= deemphasis << DEEMPH_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
}
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
@@ -314,20 +316,20 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
phy_info = bxt_get_phy_info(dev_priv, phy);
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
+ if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
return false;
- if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
- phy);
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but power hasn't settled\n", phy);
return false;
}
- if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
- phy);
+ if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but still in reset\n", phy);
return false;
}
@@ -337,7 +339,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+ u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
@@ -347,7 +349,8 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
{
if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
GRC_DONE, 10))
- DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+ drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n",
+ phy);
}
static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
@@ -364,18 +367,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
- "won't reprogram it\n", phy);
+ drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
return;
}
- DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
- "force reprogramming it\n", phy);
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
}
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
val |= phy_info->pwron_mask;
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -390,29 +394,30 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
PHY_RESERVED | PHY_POWER_GOOD,
PHY_POWER_GOOD,
1))
- DRM_ERROR("timeout during PHY%d power on\n", phy);
+ drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
+ phy);
/* Program PLL Rcomp code offset */
- val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
- val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
val &= ~IREF1RC_OFFSET_MASK;
val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
/* Program power gating */
- val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
SUS_CLK_CONFIG;
- I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
if (phy_info->dual_channel) {
- val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
}
if (phy_info->rcomp_phy != -1) {
@@ -430,19 +435,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
- I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+ intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
- val = I915_READ(BXT_PORT_REF_DW8(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
val |= GRC_DIS | GRC_RDY_OVRD;
- I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -452,13 +457,13 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
phy_info = bxt_get_phy_info(dev_priv, phy);
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
val &= ~phy_info->pwron_mask;
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -496,7 +501,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
va_list args;
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if ((val & mask) == expected)
return true;
@@ -504,7 +509,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
vaf.fmt = reg_fmt;
vaf.va = &args;
- DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
@@ -599,7 +604,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
/*
* Note that on CHV this flag is called UPAR, but has
@@ -609,7 +615,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
if (lane_lat_optim_mask & BIT(lane))
val |= LATENCY_OPTIM;
- I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ val);
}
}
@@ -627,7 +634,8 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
mask = 0;
for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
if (val & LATENCY_OPTIM)
mask |= BIT(lane);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c75e34d87111..2d47f1f756a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -45,6 +45,22 @@
* commit phase.
*/
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*update_ref_clks)(struct drm_i915_private *i915);
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state);
+};
+
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_state *shared_dpll)
@@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -88,7 +104,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->shared_dplls[id];
+ return &dev_priv->dpll.shared_dplls[id];
}
/**
@@ -103,11 +119,14 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- if (WARN_ON(pll < dev_priv->shared_dplls||
- pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+ long pll_idx = pll - dev_priv->dpll.shared_dplls;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ pll_idx < 0 ||
+ pll_idx >= dev_priv->dpll.num_shared_dpll))
return -1;
- return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+ return pll_idx;
}
/* For ILK+ */
@@ -118,7 +137,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+ if (drm_WARN(&dev_priv->drm, !pll,
+ "asserting DPLL %s with no DPLL\n", onoff(state)))
return;
cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
@@ -140,19 +160,19 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (WARN_ON(pll == NULL))
+ if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
- WARN_ON(!pll->state.crtc_mask);
+ mutex_lock(&dev_priv->dpll.lock);
+ drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
if (!pll->active_mask) {
- DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
- WARN_ON(pll->on);
+ drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
+ drm_WARN_ON(&dev_priv->drm, pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
pll->info->funcs->prepare(dev_priv, pll);
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -169,35 +189,36 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
unsigned int old_mask;
- if (WARN_ON(pll == NULL))
+ if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
old_mask = pll->active_mask;
- if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
- WARN_ON(pll->active_mask & crtc_mask))
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
+ drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
goto out;
pll->active_mask |= crtc_mask;
- DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "enable %s (active %x, on? %d) for crtc %d\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
if (old_mask) {
- WARN_ON(!pll->on);
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
assert_shared_dpll_enabled(dev_priv, pll);
goto out;
}
- WARN_ON(pll->on);
+ drm_WARN_ON(&dev_priv->drm, pll->on);
- DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
pll->info->funcs->enable(dev_priv, pll);
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -220,27 +241,28 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll_lock);
- if (WARN_ON(!(pll->active_mask & crtc_mask)))
+ mutex_lock(&dev_priv->dpll.lock);
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
goto out;
- DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "disable %s (active %x, on? %d) for crtc %d\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
assert_shared_dpll_enabled(dev_priv, pll);
- WARN_ON(!pll->on);
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
pll->active_mask &= ~crtc_mask;
if (pll->active_mask)
goto out;
- DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
pll->info->funcs->disable(dev_priv, pll);
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static struct intel_shared_dpll *
@@ -256,10 +278,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+ drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].crtc_mask == 0) {
@@ -271,20 +293,21 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
if (memcmp(pll_state,
&shared_dpll[i].hw_state,
sizeof(*pll_state)) == 0) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name,
- shared_dpll[i].crtc_mask,
- pll->active_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name,
+ shared_dpll[i].crtc_mask,
+ pll->active_mask);
return pll;
}
}
/* Ok no matching timings, maybe there's a free one? */
if (unused_pll) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- unused_pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ unused_pll->info->name);
return unused_pll;
}
@@ -297,6 +320,7 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_shared_dpll_state *shared_dpll;
const enum intel_dpll_id id = pll->info->id;
@@ -305,8 +329,8 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
if (shared_dpll[id].crtc_mask == 0)
shared_dpll[id].hw_state = *pll_state;
- DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
- pipe_name(crtc->pipe));
+ drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
+ pipe_name(crtc->pipe));
shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
}
@@ -357,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->shared_dplls[i];
+ &dev_priv->dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -378,10 +402,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(PCH_DPLL(id));
+ val = intel_de_read(dev_priv, PCH_DPLL(id));
hw_state->dpll = val;
- hw_state->fp0 = I915_READ(PCH_FP0(id));
- hw_state->fp1 = I915_READ(PCH_FP1(id));
+ hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
+ hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -393,8 +417,8 @@ static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
- I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
+ intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
+ intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
}
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -404,7 +428,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- val = I915_READ(PCH_DREF_CONTROL);
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
@@ -418,10 +442,10 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(id));
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -429,8 +453,8 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
- POSTING_READ(PCH_DPLL(id));
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(200);
}
@@ -439,8 +463,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(PCH_DPLL(id), 0);
- POSTING_READ(PCH_DPLL(id));
+ intel_de_write(dev_priv, PCH_DPLL(id), 0);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(200);
}
@@ -457,11 +481,12 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
- DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name);
} else {
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
@@ -484,12 +509,13 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
@@ -499,21 +525,34 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
.get_hw_state = ibx_pch_dpll_get_hw_state,
};
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+ { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
+};
+
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
- POSTING_READ(WRPLL_CTL(id));
+ intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
udelay(20);
}
static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
- POSTING_READ(SPLL_CTL);
+ intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
udelay(20);
}
@@ -523,9 +562,9 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(WRPLL_CTL(id));
- I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL(id));
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
+ intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
/*
* Try to set up the PCH reference clock once all DPLLs
@@ -541,9 +580,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(SPLL_CTL);
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
+ val = intel_de_read(dev_priv, SPLL_CTL);
+ intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
/*
* Try to set up the PCH reference clock once all DPLLs
@@ -566,7 +605,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(WRPLL_CTL(id));
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
hw_state->wrpll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -586,7 +625,7 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(SPLL_CTL);
+ val = intel_de_read(dev_priv, SPLL_CTL);
hw_state->spll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -811,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static struct intel_shared_dpll *
-hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -839,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
return pll;
}
+static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ int refclk;
+ int n, p, r;
+ u32 wrpll = pll->state.hw_state.wrpll;
+
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ refclk = dev_priv->dpll.ref_clks.nssc;
+ break;
+ }
+ /* fall through */
+ case WRPLL_REF_PCH_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = dev_priv->dpll.ref_clks.ssc;
+ break;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700000;
+ break;
+ default:
+ MISSING_CASE(wrpll);
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n / 10) / (p * r) * 2;
+}
+
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
+hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
@@ -858,7 +936,8 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_LCPLL_2700;
break;
default:
- DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+ drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
+ clock);
return NULL;
}
@@ -870,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
+static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->info->id) {
+ case DPLL_ID_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case DPLL_ID_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_ID_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ return NULL;
+
+ crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
+ SPLL_REF_MUXED_SSC;
+
+ return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SPLL));
+}
+
+static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
+ case SPLL_FREQ_810MHz:
+ link_clock = 81000;
+ break;
+ case SPLL_FREQ_1350MHz:
+ link_clock = 135000;
+ break;
+ case SPLL_FREQ_2700MHz:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad spll freq\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool hsw_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -881,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(state, crtc);
- } else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(crtc_state);
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return false;
-
- crtc_state->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
-
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SPLL));
- } else {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ pll = hsw_ddi_wrpll_get_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ pll = hsw_ddi_lcpll_get_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ pll = hsw_ddi_spll_get_dpll(state, crtc);
+ else
return false;
- }
if (!pll)
return false;
@@ -910,23 +1043,35 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 135000;
+ /* Non-SSC is only used on non-ULT HSW. */
+ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ i915->dpll.ref_clks.nssc = 24000;
+ else
+ i915->dpll.ref_clks.nssc = 135000;
+}
+
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- hw_state->wrpll, hw_state->spll);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
}
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
+ .get_freq = hsw_ddi_wrpll_get_freq,
};
static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
+ .get_freq = hsw_ddi_spll_get_freq,
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
@@ -950,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
+ .get_freq = hsw_ddi_lcpll_get_freq,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
+ { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
+ { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
+ { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+ { },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = hsw_update_dpll_ref_clks,
+ .dump_hw_state = hsw_dump_hw_state,
};
struct skl_dpll_regs {
@@ -989,15 +1153,15 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
DPLL_CTRL1_SSC(id) |
DPLL_CTRL1_LINK_RATE_MASK(id));
val |= pll->state.hw_state.ctrl1 << (id * 6);
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
+ intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
}
static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1008,17 +1172,17 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
skl_ddi_pll_write_ctrl1(dev_priv, pll);
- I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
- I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
- POSTING_READ(regs[id].cfgcr1);
- POSTING_READ(regs[id].cfgcr2);
+ intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+ intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr1);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- I915_WRITE(regs[id].ctl,
- I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
- DRM_ERROR("DPLL %d not locked\n", id);
+ drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
}
static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
@@ -1034,9 +1198,9 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- I915_WRITE(regs[id].ctl,
- I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
- POSTING_READ(regs[id].ctl);
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, regs[id].ctl);
}
static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
@@ -1061,17 +1225,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(regs[id].ctl);
+ val = intel_de_read(dev_priv, regs[id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
goto out;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CTRL1_HDMI_MODE(id)) {
- hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
- hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
+ hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
+ hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
}
ret = true;
@@ -1099,11 +1263,11 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
/* DPLL0 is always enabled since it drives CDCLK */
- val = I915_READ(regs[id].ctl);
- if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+ val = intel_de_read(dev_priv, regs[id].ctl);
+ if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
goto out;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
ret = true;
@@ -1222,6 +1386,7 @@ struct skl_wrpll_params {
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
u64 afe_clock,
+ int ref_clock,
u64 central_freq,
u32 p0, u32 p1, u32 p2)
{
@@ -1281,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
- params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
params->dco_fraction =
- div_u64((div_u64(dco_freq, 24) -
+ div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
@@ -1354,14 +1520,15 @@ skip_remaining_dividers:
*/
p0 = p1 = p2 = 0;
skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
- skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
- p0, p1, p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
+ ctx.central_freq, p0, p1, p2);
return true;
}
static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1374,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->dpll.ref_clks.nssc,
&wrpll_params))
return false;
@@ -1396,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ int ref_clock = i915->dpll.ref_clks.nssc;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ ref_clock / 0x8000;
+
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
static bool
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1436,25 +1662,62 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch ((pll->state.hw_state.ctrl1 &
+ DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
+ case DPLL_CTRL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool skl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
bool bret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not get HDMI pll dividers.\n");
return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not set DP dpll HW state.\n");
return false;
}
} else {
@@ -1482,10 +1745,29 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return skl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: "
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
hw_state->ctrl1,
hw_state->cfgcr1,
@@ -1496,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
};
static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = skl_update_dpll_ref_clks,
+ .dump_hw_state = skl_dump_hw_state,
};
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1515,113 +1815,114 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_REF_SEL;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_POWER_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
- if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
- DRM_ERROR("Power state not set for PLL:%d\n", port);
+ drm_err(&dev_priv->drm,
+ "Power state not set for PLL:%d\n", port);
}
/* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->state.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->state.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->state.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->state.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->state.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->state.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->state.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->state.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->state.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->state.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
200))
- DRM_ERROR("PLL %d not locked\n", port);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
temp |= DCC_DELAY_RANGE_2;
- I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
}
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->state.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1630,19 +1931,20 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
u32 temp;
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_POWER_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
- if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
- DRM_ERROR("Power state not reset for PLL:%d\n", port);
+ if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ drm_err(&dev_priv->drm,
+ "Power state not reset for PLL:%d\n", port);
}
}
@@ -1666,40 +1968,40 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+ hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+ hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+ hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+ hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+ hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+ hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+ hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+ hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+ hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -1708,11 +2010,14 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
- DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
- hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
+ hw_state->pcsdw12 = intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
+ drm_dbg(&dev_priv->drm,
+ "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+ hw_state->pcsdw12,
+ intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
@@ -1751,6 +2056,7 @@ static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct dpll best_clock;
@@ -1760,9 +2066,9 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
* i9xx_crtc_compute_clock
*/
if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
- DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- crtc_state->port_clock,
- pipe_name(crtc->pipe));
+ drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
+ crtc_state->port_clock,
+ pipe_name(crtc->pipe));
return false;
}
@@ -1799,6 +2105,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
const struct bxt_clk_div *clk_div)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
int clock = crtc_state->port_clock;
int vco = clk_div->vco;
@@ -1824,7 +2131,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
gain_ctl = 1;
targ_cnt = 9;
} else {
- DRM_ERROR("Invalid VCO\n");
+ drm_err(&i915->drm, "Invalid VCO\n");
return false;
}
@@ -1885,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+ return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+}
+
static bool bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1907,8 +2231,8 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
id = (enum intel_dpll_id) encoder->port;
pll = intel_get_shared_dpll_by_id(dev_priv, id);
- DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name, pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->info->name);
intel_reference_shared_dpll(state, crtc,
pll, &crtc_state->dpll_hw_state);
@@ -1918,89 +2242,37 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 100000;
+ i915->dpll.ref_clks.nssc = 100000;
+ /* DSI non-SSC ref 19.2MHz */
+}
+
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- hw_state->ebb0,
- hw_state->ebb4,
- hw_state->pll0,
- hw_state->pll1,
- hw_state->pll2,
- hw_state->pll3,
- hw_state->pll6,
- hw_state->pll8,
- hw_state->pll9,
- hw_state->pll10,
- hw_state->pcsdw12);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0,
+ hw_state->ebb4,
+ hw_state->pll0,
+ hw_state->pll1,
+ hw_state->pll2,
+ hw_state->pll3,
+ hw_state->pll6,
+ hw_state->pll8,
+ hw_state->pll9,
+ hw_state->pll10,
+ hw_state->pcsdw12);
}
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
-};
-
-struct intel_dpll_mgr {
- const struct dpll_info *dpll_info;
-
- bool (*get_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*put_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*update_active_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*dump_hw_state)(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *hw_state);
-};
-
-static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
- { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr pch_pll_mgr = {
- .dpll_info = pch_plls,
- .get_dplls = ibx_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = ibx_dump_hw_state,
-};
-
-static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
- { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
- { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
- { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
- { },
-};
-
-static const struct intel_dpll_mgr hsw_pll_mgr = {
- .dpll_info = hsw_plls,
- .get_dplls = hsw_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = hsw_dump_hw_state,
-};
-
-static const struct dpll_info skl_plls[] = {
- { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr skl_pll_mgr = {
- .dpll_info = skl_plls,
- .get_dplls = skl_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = skl_dump_hw_state,
+ .get_freq = bxt_ddi_pll_get_freq,
};
static const struct dpll_info bxt_plls[] = {
@@ -2014,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
.get_dplls = bxt_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = bxt_update_dpll_ref_clks,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -2024,32 +2297,32 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
u32 val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val |= PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
PLL_POWER_STATE, 5))
- DRM_ERROR("PLL %d Power not enabled\n", id);
+ drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
/*
* 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
* select DP mode, and set DP link rate.
*/
val = pll->state.hw_state.cfgcr0;
- I915_WRITE(CNL_DPLL_CFGCR0(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR0(id));
+ intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
/* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
val = pll->state.hw_state.cfgcr1;
- I915_WRITE(CNL_DPLL_CFGCR1(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR1(id));
+ intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
}
/*
@@ -2062,13 +2335,13 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val |= PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
- DRM_ERROR("PLL %d not locked\n", id);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
/*
* 8. If the frequency will result in a change to the voltage
@@ -2106,13 +2379,13 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val &= ~PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
- DRM_ERROR("PLL %d locked\n", id);
+ drm_err(&dev_priv->drm, "PLL %d locked\n", id);
/*
* 5. If the frequency will result in a change to the voltage
@@ -2124,14 +2397,14 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val &= ~PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
PLL_POWER_STATE, 5))
- DRM_ERROR("PLL %d Power not disabled\n", id);
+ drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
}
static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2150,16 +2423,17 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
if (!(val & PLL_ENABLE))
goto out;
- val = I915_READ(CNL_DPLL_CFGCR0(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
hw_state->cfgcr0 = val;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CFGCR0_HDMI_MODE) {
- hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ CNL_DPLL_CFGCR1(id));
}
ret = true;
@@ -2256,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_fraction = dco & 0x7fff;
}
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
-{
- int ref_clock = dev_priv->cdclk.hw.ref;
-
- /*
- * For ICL+, the spec states: if reference frequency is 38.4,
- * use 19.2 because the DPLL automatically divides that by 2.
- */
- if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
- ref_clock = 19200;
-
- return ref_clock;
-}
-
static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *wrpll_params)
+__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params,
+ int ref_clock)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
- u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2308,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
return false;
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
pdiv, qdiv, kdiv);
return true;
}
+static bool
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ i915->dpll.ref_clks.nssc);
+}
+
static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0, cfgcr1;
@@ -2344,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll,
+ int ref_clock)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+ if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
+}
+
static bool
cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2389,30 +2717,72 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
+ case DPLL_CFGCR0_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_3240:
+ link_clock = 324000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_4050:
+ link_clock = 405000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool cnl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
bool bret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not get HDMI pll dividers.\n");
return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not set DP dpll HW state.\n");
return false;
}
} else {
- DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
- crtc_state->output_types);
+ drm_dbg_kms(&i915->drm,
+ "Skip DPLL setup for output_types 0x%x\n",
+ crtc_state->output_types);
return false;
}
@@ -2422,7 +2792,7 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_SKL_DPLL1) |
BIT(DPLL_ID_SKL_DPLL0));
if (!pll) {
- DRM_DEBUG_KMS("No PLL selected\n");
+ drm_dbg_kms(&i915->drm, "No PLL selected\n");
return false;
}
@@ -2434,19 +2804,35 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
+ return cnl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return cnl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC reference */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: "
- "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
- hw_state->cfgcr0,
- hw_state->cfgcr1);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
+ "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
+ hw_state->cfgcr0,
+ hw_state->cfgcr1);
}
static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
.enable = cnl_ddi_pll_enable,
.disable = cnl_ddi_pll_disable,
.get_hw_state = cnl_ddi_pll_get_hw_state,
+ .get_freq = cnl_ddi_pll_get_freq,
};
static const struct dpll_info cnl_plls[] = {
@@ -2460,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
.get_dplls = cnl_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = cnl_update_dpll_ref_clks,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2555,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->cdclk.hw.ref == 24000 ?
+ dev_priv->dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2578,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) >= 12) {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2591,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2608,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * The PLL outputs multiple frequencies at the same time, selection is
+ * made at DDI clock mux level.
+ */
+ drm_WARN_ON(&i915->drm, 1);
+
+ return 0;
+}
+
+static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+{
+ int ref_clock = i915->dpll.ref_clks.nssc;
+
+ /*
+ * For ICL+, the spec states: if reference frequency is 38.4,
+ * use 19.2 because the DPLL automatically divides that by 2.
+ */
+ if (ref_clock == 38400)
+ ref_clock = 19200;
+
+ return ref_clock;
+}
+
+static bool
+icl_calc_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ icl_wrpll_ref_clock(i915));
+}
+
+static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll,
+ icl_wrpll_ref_clock(i915));
+}
+
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *pll_state)
@@ -2622,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
+ ret = icl_calc_wrpll(crtc_state, &pll_params);
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
@@ -2745,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->cdclk.hw.ref;
+ int refclk_khz = dev_priv->dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2761,7 +3191,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state, is_dkl)) {
- DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to find divisors for clock %d\n", clock);
return false;
}
@@ -2774,8 +3205,9 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
if (m2div_int > 255) {
- DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
- clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to find mdiv for clock %d\n",
+ clock);
return false;
}
}
@@ -2944,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
+ u64 tmp;
+
+ ref_clock = dev_priv->dpll.ref_clks.nssc;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
+
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+ div1 = 2;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+ div1 = 3;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+ div1 = 5;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+ div1 = 7;
+ break;
+ default:
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ return 0;
+ }
+
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
+ /* div2 value of 0 is same as 1 means no div */
+ if (div2 == 0)
+ div2 = 1;
+
+ /*
+ * Adjust the original formula to delay the division by 2^22 in order to
+ * minimize possible rounding errors.
+ */
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
+ tmp = div_u64(tmp, 5 * div1 * div2);
+
+ return tmp;
+}
+
/**
* icl_set_active_port_dpll - select the active port DPLL for a given CRTC
* @crtc_state: state for the CRTC to select the DPLL for
@@ -2996,7 +3500,8 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
unsigned long dpll_mask;
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate combo PHY PLL state.\n");
return false;
}
@@ -3013,8 +3518,9 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
&port_dpll->hw_state,
dpll_mask);
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
- encoder->base.base.id, encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "No combo PHY PLL found for [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name);
return false;
}
@@ -3038,7 +3544,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate TBT PLL state.\n");
return false;
}
@@ -3046,7 +3553,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+ drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
return false;
}
intel_reference_shared_dpll(state, crtc,
@@ -3055,7 +3562,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate MG PHY PLL state.\n");
goto err_unreference_tbt_pll;
}
@@ -3065,7 +3573,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(dpll_id));
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No MG PHY PLL found\n");
+ drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
}
intel_reference_shared_dpll(state, crtc,
@@ -3140,37 +3648,39 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
if (!(val & PLL_ENABLE))
goto out;
- hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+ MG_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_clktop2_hsclkctl =
- I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
- hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
- hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
- hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
- hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+ hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
+ MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
- hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->cdclk.hw.ref == 38400) {
+ if (dev_priv->dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3202,7 +3712,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
if (!(val & PLL_ENABLE))
goto out;
@@ -3210,13 +3720,15 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
* All registers read here have the same HIP_INDEX_REG even though
* they are on different building blocks
*/
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
- hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+ DKL_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_hsclkctl =
- I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
@@ -3224,32 +3736,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
- hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
- hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
- hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
@@ -3274,20 +3786,26 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
if (!(val & PLL_ENABLE))
goto out;
if (INTEL_GEN(dev_priv) >= 12) {
- hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR1(id));
} else {
if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(4));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(4));
} else {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(id));
}
}
@@ -3338,9 +3856,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
}
}
- I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
- I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
- POSTING_READ(cfgcr1_reg);
+ intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
+ intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
+ intel_de_posting_read(dev_priv, cfgcr1_reg);
}
static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
@@ -3356,41 +3874,42 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
+ intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
- val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
- I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
- I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
- I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
- I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
- I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
+ intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+ intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+ intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+ intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
+ hw_state->mg_pll_frac_lock);
+ intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = I915_READ(MG_PLL_BIAS(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
val &= ~hw_state->mg_pll_bias_mask;
val |= hw_state->mg_pll_bias;
- I915_WRITE(MG_PLL_BIAS(tc_port), val);
+ intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
- val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
val |= hw_state->mg_pll_tdc_coldst_bias;
- I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
- POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
static void dkl_pll_write(struct drm_i915_private *dev_priv,
@@ -3404,62 +3923,63 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
* All registers programmed here have the same HIP_INDEX_REG even
* though on different building block
*/
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
/* All the registers are RMW */
- val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+ intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
- val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
- val = I915_READ(DKL_PLL_DIV0(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
val |= hw_state->mg_pll_div0;
- I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
- val = I915_READ(DKL_PLL_DIV1(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
val |= hw_state->mg_pll_div1;
- I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
- val = I915_READ(DKL_PLL_SSC(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
val |= hw_state->mg_pll_ssc;
- I915_WRITE(DKL_PLL_SSC(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
- val = I915_READ(DKL_PLL_BIAS(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
val |= hw_state->mg_pll_bias;
- I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
- val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
val |= hw_state->mg_pll_tdc_coldst_bias;
- I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
- POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
}
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
@@ -3468,16 +3988,17 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val |= PLL_POWER_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
+ pll->info->id);
}
static void icl_pll_enable(struct drm_i915_private *dev_priv,
@@ -3486,13 +4007,13 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val |= PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/* Timeout is actually 600us. */
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
- DRM_ERROR("PLL %d not locked\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
}
static void combo_pll_enable(struct drm_i915_private *dev_priv,
@@ -3584,26 +4105,27 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* nothign here.
*/
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val &= ~PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/* Timeout is actually 1us. */
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
- DRM_ERROR("PLL %d locked\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val &= ~PLL_POWER_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
+ pll->info->id);
}
static void combo_pll_disable(struct drm_i915_private *dev_priv,
@@ -3639,44 +4161,54 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
icl_pll_disable(dev_priv, pll, enable_reg);
}
+static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
- "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
- "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
- "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
- "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
- "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
- hw_state->cfgcr0, hw_state->cfgcr1,
- hw_state->mg_refclkin_ctl,
- hw_state->mg_clktop2_coreclkctl1,
- hw_state->mg_clktop2_hsclkctl,
- hw_state->mg_pll_div0,
- hw_state->mg_pll_div1,
- hw_state->mg_pll_lf,
- hw_state->mg_pll_frac_lock,
- hw_state->mg_pll_ssc,
- hw_state->mg_pll_bias,
- hw_state->mg_pll_tdc_coldst_bias);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
}
static const struct intel_shared_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
+ .get_freq = icl_ddi_combo_pll_get_freq,
};
static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
+ .get_freq = icl_ddi_tbt_pll_get_freq,
};
static const struct intel_shared_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info icl_plls[] = {
@@ -3695,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3709,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3716,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info tgl_plls[] = {
@@ -3736,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3770,22 +4306,22 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->num_shared_dpll = 0;
+ dev_priv->dpll.num_shared_dpll = 0;
return;
}
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
- WARN_ON(i != dpll_info[i].id);
- dev_priv->shared_dplls[i].info = &dpll_info[i];
+ drm_WARN_ON(dev, i != dpll_info[i].id);
+ dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll_mgr = dpll_mgr;
- dev_priv->num_shared_dpll = i;
- mutex_init(&dev_priv->dpll_lock);
+ dev_priv->dpll.mgr = dpll_mgr;
+ dev_priv->dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->dpll.lock);
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+ BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
}
/**
@@ -3812,9 +4348,9 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
- if (WARN_ON(!dpll_mgr))
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return false;
return dpll_mgr->get_dplls(state, crtc, encoder);
@@ -3835,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -3864,35 +4400,114 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
- if (WARN_ON(!dpll_mgr))
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
dpll_mgr->update_active_dpll(state, crtc, encoder);
}
/**
+ * intel_dpll_get_freq - calculate the DPLL's output frequency
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ *
+ * Return the output frequency corresponding to @pll's current state.
+ */
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+ return 0;
+
+ return pll->info->funcs->get_freq(i915, pll);
+}
+
+static void readout_dpll_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_crtc *crtc;
+
+ pll->on = pll->info->funcs->get_hw_state(i915, pll,
+ &pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(i915) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
+ pll->state.crtc_mask = 0;
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
+ pll->state.crtc_mask |= 1 << crtc->pipe;
+ }
+ pll->active_mask = pll->state.crtc_mask;
+
+ drm_dbg_kms(&i915->drm,
+ "%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->info->name, pll->state.crtc_mask, pll->on);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
+ i915->dpll.mgr->update_ref_clks(i915);
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+static void sanitize_dpll_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (!pll->on || pll->active_mask)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "%s enabled but not in use, disabling\n",
+ pll->info->name);
+
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+}
+
+void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+/**
* intel_shared_dpll_dump_hw_state - write hw_state to dmesg
* @dev_priv: i915 drm device
* @hw_state: hw state to be written to the log
*
- * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
+ * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll_mgr) {
- dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->dpll.mgr) {
+ dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2a104c64291d..5d9a2bc371e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs {
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
+
+ /**
+ * @get_freq:
+ *
+ * Hook for calculating the pll's output frequency based on its
+ * current state.
+ */
+ int (*get_freq)(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
};
/**
@@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
+void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index ada006a690df..d7a6bf2277df 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -40,7 +40,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+ return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
}
static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
@@ -50,16 +50,16 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl |= DSB_ENABLE;
- I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
- POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
return true;
}
@@ -70,16 +70,16 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl &= ~DSB_ENABLE;
- I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
- POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
return true;
}
@@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc)
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Gem object creation failed\n");
+ drm_err(&i915->drm, "Gem object creation failed\n");
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
- DRM_ERROR("Vma creation failed\n");
+ drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
goto out;
}
buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
- DRM_ERROR("Command buffer creation failed\n");
+ drm_err(&i915->drm, "Command buffer creation failed\n");
goto out;
}
@@ -165,7 +165,7 @@ void intel_dsb_put(struct intel_dsb *dsb)
if (!HAS_DSB(i915))
return;
- if (WARN_ON(dsb->refcount == 0))
+ if (drm_WARN_ON(&i915->drm, dsb->refcount == 0))
return;
if (--dsb->refcount == 0) {
@@ -198,12 +198,12 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
u32 reg_val;
if (!buf) {
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
return;
}
- if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -272,12 +272,12 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
u32 *buf = dsb->cmd_buf;
if (!buf) {
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
return;
}
- if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -310,10 +310,12 @@ void intel_dsb_commit(struct intel_dsb *dsb)
goto reset;
if (is_dsb_busy(dsb)) {
- DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+ intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma));
tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
if (tail > dsb->free_pos * 4)
@@ -321,14 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb)
(tail - dsb->free_pos * 4));
if (is_dsb_busy(dsb)) {
- DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
- I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+ drm_dbg_kms(&dev_priv->drm,
+ "DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma) + tail);
if (wait_for(!is_dsb_busy(dsb), 1)) {
- DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSB workload completion.\n");
goto reset;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index c87838843d0b..b53c50372918 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -45,7 +45,7 @@
static u32 dcs_get_backlight(struct intel_connector *connector)
{
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi_device;
u8 data = 0;
@@ -160,13 +160,13 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
struct intel_panel *panel = &intel_connector->panel;
if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
return -ENODEV;
- if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+ if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
panel->backlight.setup = dcs_setup_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 04f953ba8f00..574dcfec9577 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,7 +36,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <video/mipi_display.h>
@@ -136,7 +135,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
flags = *data++;
type = *data++;
@@ -158,7 +157,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ port_name(port));
goto out;
}
@@ -182,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+ drm_dbg(&dev_priv->drm,
+ "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -194,7 +195,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+ drm_dbg(&dev_priv->drm,
+ "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
@@ -212,9 +214,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
u32 delay = *((const u32 *) data);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
usleep_range(delay, delay + 10);
data += 4;
@@ -231,7 +234,8 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
u8 port;
if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
- DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+ drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
+ gpio_index);
return;
}
@@ -244,10 +248,11 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
- DRM_DEBUG_KMS("SC gpio not supported\n");
+ drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
return;
} else {
- DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
return;
}
}
@@ -291,13 +296,15 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
- gpio_index);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid gpio index %u for GPIO N\n",
+ gpio_index);
return;
}
@@ -332,8 +339,9 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
GPIOD_OUT_HIGH);
if (IS_ERR_OR_NULL(gpio_desc)) {
- DRM_ERROR("GPIO index %u request failed (%ld)\n",
- gpio_index, PTR_ERR(gpio_desc));
+ drm_err(&dev_priv->drm,
+ "GPIO index %u request failed (%ld)\n",
+ gpio_index, PTR_ERR(gpio_desc));
return;
}
@@ -346,7 +354,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
static void icl_exec_gpio(struct drm_i915_private *dev_priv,
u8 gpio_source, u8 gpio_index, bool value)
{
- DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+ drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
@@ -356,7 +364,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (dev_priv->vbt.dsi.seq_version >= 3)
gpio_index = *data++;
@@ -494,13 +502,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping SPI element execution\n");
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -516,9 +527,10 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- DRM_ERROR("%s failed, error: %d\n", __func__, ret);
+ drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
#else
- DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+ drm_err(&i915->drm,
+ "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
return data + 15;
@@ -570,17 +582,18 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
if (!data)
return;
- WARN_ON(*data != seq_id);
+ drm_WARN_ON(&dev_priv->drm, *data != seq_id);
- DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
- seq_id, sequence_name(seq_id));
+ drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
data++;
@@ -612,18 +625,21 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- DRM_ERROR("Inconsistent operation size\n");
+ drm_err(&dev_priv->drm,
+ "Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
- operation_byte);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- DRM_ERROR("Unsupported MIPI operation byte %u\n",
- operation_byte);
+ drm_err(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
return;
}
}
@@ -658,40 +674,54 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
- DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
- DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
- DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- DRM_DEBUG_KMS("Video mode format %s\n",
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
- "burst" : "<unknown>");
- DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
- DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
- DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
- DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
- DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
+ drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
+ intel_dsi->pixel_overlap);
+ drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
+ drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_dbg_kms(&i915->drm, "Video mode format %s\n",
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+ "burst" : "<unknown>");
+ drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
+ intel_dsi->burst_mode_ratio);
+ drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_dbg_kms(&i915->drm, "Eot %s\n",
+ enableddisabled(intel_dsi->eotp_pkt));
+ drm_dbg_kms(&i915->drm, "Clockstop %s\n",
+ enableddisabled(!intel_dsi->clock_stop));
+ drm_dbg_kms(&i915->drm, "Mode %s\n",
+ intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- DRM_DEBUG_KMS("Dual link: NONE\n");
- DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
- DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
- DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
- DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
- DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
- DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
- DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
- DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
- DRM_DEBUG_KMS("BTA %s\n",
- enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
+ drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
+ intel_dsi->lp_rx_timeout);
+ drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
+ intel_dsi->turn_arnd_val);
+ drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
+ intel_dsi->hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
+ intel_dsi->clk_lp_to_hs_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
+ intel_dsi->clk_hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "BTA %s\n",
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
@@ -704,7 +734,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -763,7 +793,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- DRM_ERROR("Burst mode freq is less than computed\n");
+ drm_err(&dev_priv->drm,
+ "Burst mode freq is less than computed\n");
return false;
}
@@ -773,7 +804,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
} else {
- DRM_ERROR("Burst mode target is not set\n");
+ drm_err(&dev_priv->drm,
+ "Burst mode target is not set\n");
return false;
}
} else
@@ -856,17 +888,20 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+ drm_err(&dev_priv->drm,
+ "Failed to register pwm0 pinmux mapping\n");
pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- DRM_ERROR("Failed to set pinmux to PWM\n");
+ drm_err(&dev_priv->drm,
+ "Failed to set pinmux to PWM\n");
}
if (want_panel_gpio) {
intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- DRM_ERROR("Failed to own gpio for panel control\n");
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
}
@@ -875,7 +910,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
intel_dsi->gpio_backlight =
gpiod_get(dev->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- DRM_ERROR("Failed to own gpio for backlight control\n");
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 86a337c9d85d..341d5ce8b062 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -44,6 +43,7 @@
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
+#define INTEL_DVO_CHIP_LVDS_NO_FIXED 5
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
@@ -101,13 +101,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.dev_ops = &ch7017_ops,
},
{
- .type = INTEL_DVO_CHIP_TMDS,
+ .type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
.name = "ns2501",
.dvo_reg = DVOB,
.dvo_srcdim_reg = DVOB_SRCDIM,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
- }
+ },
};
struct intel_dvo {
@@ -137,7 +137,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
u32 tmp;
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
if (!(tmp & DVO_ENABLE))
return false;
@@ -152,7 +152,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
*pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
@@ -168,7 +168,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -190,11 +190,11 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = I915_READ(dvo_reg);
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
}
static void intel_enable_dvo(struct intel_encoder *encoder,
@@ -204,14 +204,14 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = I915_READ(dvo_reg);
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- I915_WRITE(dvo_reg, temp | DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -286,7 +286,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
/* Save the data order, since I don't know what it should be set to. */
- dvo_val = I915_READ(dvo_reg) &
+ dvo_val = intel_de_read(dev_priv, dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
@@ -301,11 +301,10 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
- I915_WRITE(dvo_srcdim_reg,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
- (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ intel_de_write(dev_priv, dvo_srcdim_reg,
+ (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
/*I915_WRITE(DVOB, dvo_val);*/
- I915_WRITE(dvo_reg, dvo_val);
+ intel_de_write(dev_priv, dvo_reg, dvo_val);
}
static enum drm_connector_status
@@ -481,15 +480,16 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
* initialize the device.
*/
for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = I915_READ(DPLL(pipe));
- I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
+ dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe),
+ dpll[pipe] | DPLL_DVO_2X_MODE);
}
dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(DPLL(pipe), dpll[pipe]);
+ intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -507,17 +507,21 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->port = port;
intel_encoder->pipe_mask = ~0;
- switch (dvo->type) {
- case INTEL_DVO_CHIP_TMDS:
+ if (dvo->type != INTEL_DVO_CHIP_LVDS)
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
+
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
case INTEL_DVO_CHIP_LVDS:
- intel_encoder->cloneable = 0;
drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a1048ece541e..c125ca9ab9b3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,15 +41,12 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-static inline bool fbc_supported(struct drm_i915_private *dev_priv)
-{
- return HAS_FBC(dev_priv);
-}
-
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -97,12 +94,12 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 fbc_ctl;
/* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
@@ -132,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG(i), 0);
+ intel_de_write(dev_priv, FBC_TAG(i), 0);
if (IS_GEN(dev_priv, 4)) {
u32 fbc_ctl2;
@@ -142,12 +139,13 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fence_id >= 0)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
+ intel_de_write(dev_priv, FBC_FENCE_OFF,
+ params->crtc.fence_y_offset);
}
/* enable it... */
- fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev_priv))
@@ -155,12 +153,12 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
if (params->fence_id >= 0)
fbc_ctl |= params->fence_id;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
}
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+ return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
}
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
@@ -176,13 +174,14 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
- I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, DPFC_FENCE_YOFF,
+ params->crtc.fence_y_offset);
} else {
- I915_WRITE(DPFC_FENCE_YOFF, 0);
+ intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
}
/* enable it... */
- I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
}
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -190,23 +189,27 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl);
}
}
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
}
/* This function forces a CFB recompression through the nuke operation. */
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
{
- I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
- POSTING_READ(MSG_FBC_REND_STATE);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ trace_intel_fbc_nuke(fbc->crtc);
+
+ intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
}
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
@@ -237,22 +240,22 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 5))
dpfc_ctl |= params->fence_id;
if (IS_GEN(dev_priv, 6)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE |
- params->fence_id);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET,
- params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fence_id);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+ params->crtc.fence_y_offset);
}
} else {
if (IS_GEN(dev_priv, 6)) {
- I915_WRITE(SNB_DPFC_CTL_SA, 0);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
}
- I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
+ params->crtc.fence_y_offset);
/* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
}
@@ -262,16 +265,16 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl);
}
}
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
@@ -282,14 +285,14 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
/* Display WA #0529: skl, kbl, bxt. */
if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
- u32 val = I915_READ(CHICKEN_MISC_4);
+ u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
if (params->gen9_wa_cfb_stride)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
- I915_WRITE(CHICKEN_MISC_4, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_4, val);
}
dpfc_ctl = 0;
@@ -314,13 +317,13 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE |
- params->fence_id);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
- } else {
- I915_WRITE(SNB_DPFC_CTL_SA,0);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fence_id);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+ params->crtc.fence_y_offset);
+ } else if (dev_priv->ggtt.num_fences) {
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
if (dev_priv->fbc.false_color)
@@ -328,21 +331,20 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
+ intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
+ intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
- I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
- HSW_FBCQ_DIS);
+ intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
+ intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
}
if (INTEL_GEN(dev_priv) >= 11)
/* Wa_1409120013:icl,ehl,tgl */
- I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
}
@@ -361,6 +363,8 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ trace_intel_fbc_activate(fbc->crtc);
+
fbc->active = true;
fbc->activated = true;
@@ -378,6 +382,8 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ trace_intel_fbc_deactivate(fbc->crtc);
+
fbc->active = false;
if (INTEL_GEN(dev_priv) >= 5)
@@ -407,7 +413,7 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
@@ -471,23 +477,25 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
- WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
+ drm_WARN_ON(&dev_priv->drm,
+ drm_mm_node_allocated(&fbc->compressed_fb));
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
else if (ret > 1) {
- DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
+ DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
fbc->threshold = ret;
if (INTEL_GEN(dev_priv) >= 5)
- I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
+ fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
- I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(dev_priv, DPFC_CB_BASE,
+ fbc->compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -500,16 +508,16 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->compressed_llb = compressed_llb;
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_fb.start,
- U32_MAX));
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_llb->start,
- U32_MAX));
- I915_WRITE(FBC_CFB_BASE,
- dev_priv->dsm.start + fbc->compressed_fb.start);
- I915_WRITE(FBC_LL_BASE,
- dev_priv->dsm.start + compressed_llb->start);
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
+ intel_de_write(dev_priv, FBC_CFB_BASE,
+ dev_priv->dsm.start + fbc->compressed_fb.start);
+ intel_de_write(dev_priv, FBC_LL_BASE,
+ dev_priv->dsm.start + compressed_llb->start);
}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
@@ -530,20 +538,22 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (drm_mm_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+ if (!drm_mm_node_allocated(&fbc->compressed_fb))
+ return;
if (fbc->compressed_llb) {
i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
kfree(fbc->compressed_llb);
}
+
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
}
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
@@ -555,7 +565,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
/* This should have been caught earlier. */
- if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
return false;
/* Below are the additional FBC restrictions. */
@@ -663,8 +673,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
- WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
- !plane_state->vma->fence);
+ drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
+ !plane_state->vma->fence);
if (plane_state->flags & PLANE_HAS_FENCE &&
plane_state->vma->fence)
@@ -681,12 +691,37 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
fbc->compressed_fb.size * fbc->threshold;
}
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (intel_vgpu_active(dev_priv)) {
+ fbc->no_fbc_reason = "VGPU is active";
+ return false;
+ }
+
+ if (!i915_modparams.enable_fbc) {
+ fbc->no_fbc_reason = "disabled per module param or by default";
+ return false;
+ }
+
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
+ return true;
+}
+
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!intel_fbc_can_enable(dev_priv))
+ return false;
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -785,28 +820,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- if (intel_vgpu_active(dev_priv)) {
- fbc->no_fbc_reason = "VGPU is active";
- return false;
- }
-
- if (!i915_modparams.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param or by default";
- return false;
- }
-
- if (fbc->underrun_detected) {
- fbc->no_fbc_reason = "underrun detected";
- return false;
- }
-
- return true;
-}
-
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
@@ -867,16 +880,20 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
return true;
}
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
const char *reason = "update pending";
bool need_vblank_wait = false;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return need_vblank_wait;
mutex_lock(&fbc->lock);
@@ -926,9 +943,9 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_crtc *crtc = fbc->crtc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->crtc);
- WARN_ON(fbc->active);
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
+ drm_WARN_ON(&dev_priv->drm, fbc->active);
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
@@ -942,7 +959,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
if (fbc->crtc != crtc)
return;
@@ -967,12 +984,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
-void intel_fbc_post_update(struct intel_crtc *crtc)
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return;
mutex_lock(&fbc->lock);
@@ -994,7 +1015,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
@@ -1015,7 +1036,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
@@ -1099,24 +1120,26 @@ out:
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
- * @crtc_state: corresponding &drm_crtc_state for @crtc
- * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
+ * @state: corresponding &drm_crtc_state for @crtc
*
* This function checks if the given CRTC was chosen for FBC, then enables it if
* possible. Notice that it doesn't activate FBC. It is valid to call
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
-void intel_fbc_enable(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return;
mutex_lock(&fbc->lock);
@@ -1129,7 +1152,7 @@ void intel_fbc_enable(struct intel_crtc *crtc,
__intel_fbc_disable(dev_priv);
}
- WARN_ON(fbc->active);
+ drm_WARN_ON(&dev_priv->drm, fbc->active);
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
@@ -1139,14 +1162,14 @@ void intel_fbc_enable(struct intel_crtc *crtc,
if (intel_fbc_alloc_cfb(dev_priv,
intel_fbc_calculate_cfb_size(dev_priv, cache),
- fb->format->cpp[0])) {
+ plane_state->hw.fb->format->cpp[0])) {
cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
- fb->modifier != I915_FORMAT_MOD_X_TILED)
+ plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
cache->gen9_wa_cfb_stride =
DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
else
@@ -1169,9 +1192,10 @@ out:
void intel_fbc_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc)
return;
mutex_lock(&fbc->lock);
@@ -1190,12 +1214,12 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
if (fbc->crtc) {
- WARN_ON(fbc->crtc->active);
+ drm_WARN_ON(&dev_priv->drm, fbc->crtc->active);
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&fbc->lock);
@@ -1267,7 +1291,7 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
/* There's no guarantee that underrun_detected won't be set to true
@@ -1348,7 +1372,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
/* This value was pulled out of someone's hat */
if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+ intel_de_write(dev_priv, FBC_CONTROL,
+ 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index c8a5e5098687..6dc1edefe81b 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,14 +19,13 @@ struct intel_plane_state;
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1e98e432c9fa..3bc804212a99 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -40,7 +40,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -191,7 +190,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
- if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
+ if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
@@ -410,9 +409,9 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc->state->active)
continue;
- WARN(!crtc->primary->state->fb,
- "re-used BIOS config but lost an fb on crtc %d\n",
- crtc->base.id);
+ drm_WARN(dev, !crtc->primary->state->fb,
+ "re-used BIOS config but lost an fb on crtc %d\n",
+ crtc->base.id);
}
@@ -439,7 +438,8 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
+ if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) ||
+ !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -452,7 +452,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
- ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper);
if (ret) {
kfree(ifbdev);
return ret;
@@ -461,8 +461,6 @@ int intel_fbdev_init(struct drm_device *dev)
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
- drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
-
return 0;
}
@@ -569,7 +567,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
- WARN_ON(state != FBINFO_STATE_RUNNING);
+ drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
if (!console_trylock()) {
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 6c83b350525d..813a4f7033e1 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -95,15 +95,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
lockdep_assert_held(&dev_priv->irq_lock);
- if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
- I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
}
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -118,11 +118,13 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg,
+ enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
} else {
- if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
+ drm_err(&dev_priv->drm, "pipe %c underrun\n",
+ pipe_name(pipe));
}
}
@@ -143,18 +145,18 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
- POSTING_READ(GEN7_ERR_INT);
+ intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_posting_read(dev_priv, GEN7_ERR_INT);
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
}
static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -163,7 +165,8 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_write(dev_priv, GEN7_ERR_INT,
+ ERR_INT_FIFO_UNDERRUN(pipe));
if (!ivb_can_enable_err_int(dev))
return;
@@ -173,9 +176,10 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
if (old &&
- I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
+ intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ drm_err(&dev_priv->drm,
+ "uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
}
}
}
@@ -209,19 +213,20 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
- u32 serr_int = I915_READ(SERR_INT);
+ u32 serr_int = intel_de_read(dev_priv, SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
- I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
- POSTING_READ(SERR_INT);
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+ intel_de_posting_read(dev_priv, SERR_INT);
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -231,8 +236,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
- I915_WRITE(SERR_INT,
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
if (!cpt_can_enable_serr_int(dev))
return;
@@ -241,10 +246,11 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
} else {
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- if (old && I915_READ(SERR_INT) &
+ if (old && intel_de_read(dev_priv, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm,
+ "uncleared pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
}
}
@@ -378,8 +384,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("CPU pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
@@ -400,8 +406,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("PCH transcoder %c FIFO underrun\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+ pipe_name(pch_transcoder));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
new file mode 100644
index 000000000000..a0cc894c3868
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_global_state.h"
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ obj->state = state;
+ obj->funcs = funcs;
+ list_add_tail(&obj->head, &dev_priv->global_obj_list);
+}
+
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+{
+ struct intel_global_obj *obj, *next;
+
+ list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
+ list_del(&obj->head);
+ obj->funcs->atomic_destroy_state(obj, obj->state);
+ }
+}
+
+static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
+static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
+ struct drm_modeset_lock *lock)
+{
+ struct drm_modeset_lock *l;
+
+ list_for_each_entry(l, &ctx->locked, head) {
+ if (lock == l)
+ return true;
+ }
+
+ return false;
+}
+
+static void assert_global_state_read_locked(struct intel_atomic_state *state)
+{
+ struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (modeset_lock_is_held(ctx, &crtc->base.mutex))
+ return;
+ }
+
+ WARN(1, "Global state not read locked\n");
+}
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int index, num_objs, i;
+ size_t size;
+ struct __intel_global_objs_state *arr;
+ struct intel_global_state *obj_state;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].state;
+
+ assert_global_state_read_locked(state);
+
+ num_objs = state->num_global_objs + 1;
+ size = sizeof(*state->global_objs) * num_objs;
+ arr = krealloc(state->global_objs, size, GFP_KERNEL);
+ if (!arr)
+ return ERR_PTR(-ENOMEM);
+
+ state->global_objs = arr;
+ index = state->num_global_objs;
+ memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
+
+ obj_state = obj->funcs->atomic_duplicate_state(obj);
+ if (!obj_state)
+ return ERR_PTR(-ENOMEM);
+
+ obj_state->changed = false;
+
+ state->global_objs[index].state = obj_state;
+ state->global_objs[index].old_state = obj->state;
+ state->global_objs[index].new_state = obj_state;
+ state->global_objs[index].ptr = obj;
+ obj_state->state = state;
+
+ state->num_global_objs = num_objs;
+
+ DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
+ obj, obj_state, state);
+
+ return obj_state;
+}
+
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].old_state;
+
+ return NULL;
+}
+
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].new_state;
+
+ return NULL;
+}
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *old_obj_state, *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
+ new_obj_state, i) {
+ WARN_ON(obj->state != old_obj_state);
+
+ /*
+ * If the new state wasn't modified (and properly
+ * locked for write access) we throw it away.
+ */
+ if (!new_obj_state->changed)
+ continue;
+
+ assert_global_state_write_locked(dev_priv);
+
+ old_obj_state->state = state;
+ new_obj_state->state = NULL;
+
+ state->global_objs[i].state = old_obj_state;
+ obj->state = new_obj_state;
+ }
+}
+
+void intel_atomic_clear_global_state(struct intel_atomic_state *state)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++) {
+ struct intel_global_obj *obj = state->global_objs[i].ptr;
+
+ obj->funcs->atomic_destroy_state(obj,
+ state->global_objs[i].state);
+ state->global_objs[i].ptr = NULL;
+ state->global_objs[i].state = NULL;
+ state->global_objs[i].old_state = NULL;
+ state->global_objs[i].new_state = NULL;
+ }
+ state->num_global_objs = 0;
+}
+
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
new file mode 100644
index 000000000000..e6163a469029
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GLOBAL_STATE_H__
+#define __INTEL_GLOBAL_STATE_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_global_obj;
+struct intel_global_state;
+
+struct intel_global_state_funcs {
+ struct intel_global_state *(*atomic_duplicate_state)(struct intel_global_obj *obj);
+ void (*atomic_destroy_state)(struct intel_global_obj *obj,
+ struct intel_global_state *state);
+};
+
+struct intel_global_obj {
+ struct list_head head;
+ struct intel_global_state *state;
+ const struct intel_global_state_funcs *funcs;
+};
+
+#define intel_for_each_global_obj(obj, dev_priv) \
+ list_for_each_entry(obj, &(dev_priv)->global_obj_list, head)
+
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+struct intel_global_state {
+ struct intel_atomic_state *state;
+ bool changed;
+};
+
+struct __intel_global_objs_state {
+ struct intel_global_obj *ptr;
+ struct intel_global_state *state, *old_state, *new_state;
+};
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs);
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state);
+void intel_atomic_clear_global_state(struct intel_atomic_state *state);
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 3d4d19ac1d14..1fd3a5a6296b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -143,8 +142,8 @@ to_intel_gmbus(struct i2c_adapter *i2c)
void
intel_gmbus_reset(struct drm_i915_private *dev_priv)
{
- I915_WRITE(GMBUS0, 0);
- I915_WRITE(GMBUS4, 0);
+ intel_de_write(dev_priv, GMBUS0, 0);
+ intel_de_write(dev_priv, GMBUS4, 0);
}
static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -153,12 +152,12 @@ static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
if (!enable)
val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -166,12 +165,12 @@ static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
if (!enable)
val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -179,12 +178,12 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(GEN9_CLKGATE_DIS_4);
+ val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4);
if (!enable)
val |= BXT_GMBUS_GATING_DIS;
else
val &= ~BXT_GMBUS_GATING_DIS;
- I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+ intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val);
}
static u32 get_reserved(struct intel_gmbus *bus)
@@ -337,14 +336,16 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
irq_en = 0;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- I915_WRITE_FW(GMBUS4, irq_en);
+ intel_de_write_fw(dev_priv, GMBUS4, irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ 2);
if (ret)
- ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
+ ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ 50);
- I915_WRITE_FW(GMBUS4, 0);
+ intel_de_write_fw(dev_priv, GMBUS4, 0);
remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
@@ -366,13 +367,13 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
irq_enable = GMBUS_IDLE_EN;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- I915_WRITE_FW(GMBUS4, irq_enable);
+ intel_de_write_fw(dev_priv, GMBUS4, irq_enable);
ret = intel_wait_for_register_fw(&dev_priv->uncore,
GMBUS2, GMBUS_ACTIVE, 0,
10);
- I915_WRITE_FW(GMBUS4, 0);
+ intel_de_write_fw(dev_priv, GMBUS4, 0);
remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
return ret;
@@ -404,15 +405,12 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
len++;
}
size = len % 256 + 256;
- I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+ intel_de_write_fw(dev_priv, GMBUS0,
+ gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
}
- I915_WRITE_FW(GMBUS1,
- gmbus1_index |
- GMBUS_CYCLE_WAIT |
- (size << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS1,
+ gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
@@ -421,7 +419,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- val = I915_READ_FW(GMBUS3);
+ val = intel_de_read_fw(dev_priv, GMBUS3);
do {
if (extra_byte_added && len == 1)
break;
@@ -432,7 +430,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (burst_read && len == size - 4)
/* Reset the override bit */
- I915_WRITE_FW(GMBUS0, gmbus0_reg);
+ intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg);
}
return 0;
@@ -489,12 +487,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
len -= 1;
}
- I915_WRITE_FW(GMBUS3, val);
- I915_WRITE_FW(GMBUS1,
- gmbus1_index | GMBUS_CYCLE_WAIT |
- (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS3, val);
+ intel_de_write_fw(dev_priv, GMBUS1,
+ gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -503,7 +498,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- I915_WRITE_FW(GMBUS3, val);
+ intel_de_write_fw(dev_priv, GMBUS3, val);
ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
@@ -568,7 +563,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- I915_WRITE_FW(GMBUS5, gmbus5);
+ intel_de_write_fw(dev_priv, GMBUS5, gmbus5);
if (msgs[1].flags & I2C_M_RD)
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
@@ -578,7 +573,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- I915_WRITE_FW(GMBUS5, 0);
+ intel_de_write_fw(dev_priv, GMBUS5, 0);
return ret;
}
@@ -601,7 +596,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
pch_gmbus_clock_gating(dev_priv, false);
retry:
- I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0);
+ intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
@@ -629,18 +624,19 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
- I915_WRITE_FW(GMBUS0, 0);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
ret = ret ?: i;
goto out;
@@ -660,8 +656,9 @@ clear_err:
*/
ret = -ENXIO;
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out after NAK\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
@@ -669,13 +666,13 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
- I915_WRITE_FW(GMBUS1, 0);
- I915_WRITE_FW(GMBUS0, 0);
+ intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT);
+ intel_de_write_fw(dev_priv, GMBUS1, 0);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
- DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
- adapter->name, msgs[i].addr,
- (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* Passive adapters sometimes NAK the first probe. Retry the first
@@ -684,17 +681,19 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
goto retry;
}
goto out;
timeout:
- DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- bus->adapter.name, bus->reg0 & 0xff);
- I915_WRITE_FW(GMBUS0, 0);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -908,7 +907,8 @@ err:
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_gmbus_is_valid_pin(dev_priv, pin)))
return NULL;
return &dev_priv->gmbus[pin].adapter;
@@ -929,9 +929,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
- DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
- force_bit ? "en" : "dis", adapter->name,
- bus->force_bit);
+ drm_dbg_kms(&dev_priv->drm,
+ "%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
mutex_unlock(&dev_priv->gmbus_mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 0fdbd39f6641..ee0f27ea2810 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -43,6 +43,7 @@ static
int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
@@ -54,7 +55,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Bksv is invalid\n");
+ drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
return -ENODEV;
}
@@ -64,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_capable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
@@ -85,8 +86,8 @@ bool intel_hdcp_capable(struct intel_connector *connector)
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_capable(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
@@ -112,7 +113,8 @@ static inline
bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
- return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ return intel_de_read(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
@@ -120,7 +122,8 @@ static inline
bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
- return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ return intel_de_read(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS;
}
@@ -184,9 +187,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
{
- I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
- I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
- HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
}
static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
@@ -194,7 +197,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
int ret;
u32 val;
- val = I915_READ(HDCP_KEY_STATUS);
+ val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
return 0;
@@ -203,7 +206,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* out of reset. So if Key is not already loaded, its an error state.
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+ if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
return -ENXIO;
/*
@@ -217,12 +220,13 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
- DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to initiate HDCP key load (%d)\n",
+ ret);
return ret;
}
} else {
- I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
}
/* Wait for the keys to load (500us) */
@@ -235,7 +239,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
return -ENXIO;
/* Send Aksv over to PCH display for use in authentication */
- I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
return 0;
}
@@ -243,9 +247,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
/* Returns updated SHA-1 index */
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
- I915_WRITE(HDCP_SHA_TEXT, sha_text);
+ intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
return 0;
@@ -270,7 +274,8 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
return HDCP_TRANSD_REP_PRESENT |
HDCP_TRANSD_SHA1_M0;
default:
- DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+ drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
+ cpu_transcoder);
return -EINVAL;
}
}
@@ -287,7 +292,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- DRM_ERROR("Unknown port %d\n", port);
+ drm_err(&dev_priv->drm, "Unknown port %d\n", port);
return -EINVAL;
}
}
@@ -297,21 +302,19 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
- struct drm_i915_private *dev_priv;
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
-
/* Process V' values from the receiver */
for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
if (ret)
return ret;
- I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
+ intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
}
/*
@@ -328,7 +331,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_text = 0;
sha_leftovers = 0;
rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
@@ -345,7 +348,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Programming guide writes this every 64 bytes */
sha_idx += sizeof(sha_text);
if (!(sha_idx % 64))
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
/* Store the leftover bytes from the ksv in sha_text */
sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
@@ -377,7 +381,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
*/
if (sha_leftovers == 0) {
/* Write 16 bits of text, 16 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
ret = intel_write_sha_text(dev_priv,
bstatus[0] << 8 | bstatus[1]);
if (ret < 0)
@@ -385,14 +390,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 16 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
@@ -400,7 +407,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
} else if (sha_leftovers == 1) {
/* Write 24 bits of text, 8 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
/* Only 24-bits of data, must be in the LSB */
sha_text = (sha_text & 0xffffff00) >> 8;
@@ -410,14 +418,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 24 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
@@ -425,7 +435,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
} else if (sha_leftovers == 2) {
/* Write 32 bits of text */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
@@ -433,7 +444,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 64 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
for (i = 0; i < 2; i++) {
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
@@ -442,7 +454,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
}
} else if (sha_leftovers == 3) {
/* Write 32 bits of text */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24;
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
@@ -450,32 +463,35 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 8 bits of text, 24 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, bstatus[1]);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 8 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else {
- DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
- sha_leftovers);
+ drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
+ sha_leftovers);
return -EINVAL;
}
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
ret = intel_write_sha_text(dev_priv, 0);
@@ -495,14 +511,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
return ret;
/* Tell the HW we're done with the hash and wait for it to ACK */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_COMPLETE_HASH);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
- if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
- DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
+ if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+ drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
return -ENXIO;
}
@@ -513,15 +530,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
static
int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
- struct drm_device *dev = connector->base.dev;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -531,7 +549,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -544,13 +562,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
if (num_downstream == 0) {
- DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Repeater with zero downstream devices\n");
return -EINVAL;
}
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo) {
- DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
+ drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
return -ENOMEM;
}
@@ -558,8 +577,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (ret)
goto err;
- if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
- DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
+ num_downstream)) {
+ drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
}
@@ -577,12 +597,13 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
}
if (i == tries) {
- DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "V Prime validation failed.(%d)\n", ret);
goto err;
}
- DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
- num_downstream);
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
ret = 0;
err:
kfree(ksv_fifo);
@@ -592,13 +613,12 @@ err:
/* Implements Part 1 of the HDCP authorization procedure */
static int intel_hdcp_auth(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
- struct drm_i915_private *dev_priv;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
- enum port port;
+ enum port port = intel_dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
union {
@@ -615,10 +635,6 @@ static int intel_hdcp_auth(struct intel_connector *connector)
} ri;
bool repeater_present, hdcp_capable;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
-
- port = intel_dig_port->base.port;
-
/*
* Detects whether the display is HDCP capable. Although we check for
* valid Bksv below, the HDCP over DP spec requires that we check
@@ -630,28 +646,32 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_DEBUG_KMS("Panel is not HDCP capable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel is not HDCP capable\n");
return -EINVAL;
}
}
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
- get_random_u32());
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
- HDCP_CONF_CAPTURE_AN);
+ intel_de_write(dev_priv,
+ HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
- DRM_ERROR("Timed out waiting for An\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
- an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
+ an.reg[0] = intel_de_read(dev_priv,
+ HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = intel_de_read(dev_priv,
+ HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@@ -664,33 +684,34 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret < 0)
return ret;
- if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
- DRM_ERROR("BKSV is revoked\n");
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
+ drm_err(&dev_priv->drm, "BKSV is revoked\n");
return -EPERM;
}
- I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
- I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
+ intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
+ bksv.reg[0]);
+ intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
+ bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
- I915_WRITE(HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
- port));
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
- HDCP_CONF_AUTH_AND_ENC);
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Timed out waiting for R0 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -716,19 +737,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
- I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+ intel_de_write(dev_priv,
+ HDCP_RPRIME(dev_priv, cpu_transcoder, port),
+ ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ drm_dbg_kms(&dev_priv->drm,
+ "Timed out waiting for Ri prime match (%x)\n",
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
+ cpu_transcoder, port)));
return -ETIMEDOUT;
}
@@ -737,7 +760,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Timed out waiting for encryption\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -749,52 +772,53 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
- DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
return 0;
}
static int _intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
+ connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
- DRM_ERROR("Failed to disable HDCP signalling\n");
+ drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
return ret;
}
- DRM_DEBUG_KMS("HDCP is disabled\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
return 0;
}
static int _intel_hdcp_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
int i, ret, tries = 3;
- DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
+ connector->base.name, connector->base.base.id);
if (!hdcp_key_loadable(dev_priv)) {
- DRM_ERROR("HDCP key Load is not possible\n");
+ drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
return -ENXIO;
}
@@ -805,7 +829,8 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
intel_hdcp_clear_keys(dev_priv);
}
if (ret) {
- DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
+ ret);
return ret;
}
@@ -817,13 +842,14 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
return 0;
}
- DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
/* Ensuring HDCP encryption and signalling are stopped. */
_intel_hdcp_disable(connector);
}
- DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
@@ -836,9 +862,9 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
/* Implements Part 3 of the HDCP authorization procedure */
static int intel_hdcp_check_link(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -853,11 +879,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
- DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
- connector->base.name, connector->base.base.id,
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "%s:%d HDCP link stopped encryption,%x\n",
+ connector->base.name, connector->base.base.id,
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -872,12 +899,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
ret = _intel_hdcp_disable(connector);
if (ret) {
- DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -885,7 +913,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_enable(connector);
if (ret) {
- DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -901,9 +929,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
- struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
/*
@@ -916,13 +944,13 @@ static void intel_hdcp_prop_work(struct work_struct *work)
hdcp->value);
mutex_unlock(&hdcp->mutex);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
}
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
- /* PORT E doesn't have HDCP, and PORT F is disabled */
- return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
+ return INTEL_INFO(dev_priv)->display.has_hdcp &&
+ (INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
}
static int
@@ -944,7 +972,8 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
if (ret)
- DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -974,7 +1003,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
- DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -998,7 +1028,7 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
if (ret < 0)
- DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1023,7 +1053,8 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
if (ret < 0)
- DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1048,7 +1079,8 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
if (ret < 0)
- DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1073,7 +1105,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
if (ret < 0)
- DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1097,7 +1130,8 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
if (ret < 0)
- DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1126,7 +1160,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
rep_topology,
rep_send_ack);
if (ret < 0)
- DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Verify rep topology failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1151,7 +1186,7 @@ hdcp2_verify_mprime(struct intel_connector *connector,
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
if (ret < 0)
- DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1174,7 +1209,8 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
if (ret < 0)
- DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1209,9 +1245,9 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
/* Authentication flow starts from here */
static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_ake_init ake_init;
struct hdcp2_ake_send_cert send_cert;
@@ -1242,15 +1278,16 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
return ret;
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
- DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
+ drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
}
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
- if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.send_cert.cert_rx.receiver_id,
1)) {
- DRM_ERROR("Receiver ID is revoked\n");
+ drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1297,7 +1334,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
static int hdcp2_locality_check(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_lc_init lc_init;
@@ -1333,7 +1370,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
static int hdcp2_session_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp2_ske_send_eks send_eks;
int ret;
@@ -1353,7 +1390,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1404,9 +1441,9 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static
int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_rep_send_receiverid_list recvid_list;
struct hdcp2_rep_send_ack rep_ack;
@@ -1425,7 +1462,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
- DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
return -EINVAL;
}
@@ -1433,17 +1470,24 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
+ if (!hdcp->hdcp2_encrypted && seq_num_v) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non zero Seq_num_v at first RecvId_List msg\n");
+ return -EINVAL;
+ }
+
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
- DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
return -EINVAL;
}
device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
- if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.recvid_list.receiver_ids,
device_cnt)) {
- DRM_ERROR("Revoked receiver ID(s) is in list\n");
+ drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1475,26 +1519,28 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
int ret;
ret = hdcp2_authentication_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_locality_check(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Locality Check failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_session_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
return ret;
}
@@ -1509,7 +1555,8 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
if (hdcp->is_repeater) {
ret = hdcp2_authenticate_repeater(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Repeater Auth Failed. Err: %d\n", ret);
return ret;
}
}
@@ -1524,31 +1571,32 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
- DRM_ERROR("Failed to enable HDCP signalling. %d\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to enable HDCP signalling. %d\n",
+ ret);
return ret;
}
}
- if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
- I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
- I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
- port)) |
- CTL_LINK_ENCRYPTION_REQ);
+ intel_de_write(dev_priv,
+ HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
}
ret = intel_de_wait_for_set(dev_priv,
@@ -1562,19 +1610,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
static int hdcp2_disable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS));
+ drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
- I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
- ~CTL_LINK_ENCRYPTION_REQ);
+ intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1582,13 +1629,14 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
- DRM_DEBUG_KMS("Disable Encryption Timedout");
+ drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
- DRM_ERROR("Failed to disable HDCP signalling. %d\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP signalling. %d\n",
+ ret);
return ret;
}
}
@@ -1598,6 +1646,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret, i, tries = 3;
for (i = 0; i < tries; i++) {
@@ -1606,10 +1655,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
break;
/* Clearing the mei hdcp session */
- DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
- i + 1, tries, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
if (i != tries) {
@@ -1620,9 +1669,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
ret = hdcp2_enable_encryption(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Encryption Enable Failed.(%d)\n", ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
}
@@ -1631,23 +1681,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
- DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
- hdcp->content_type, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
return ret;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
hdcp->hdcp2_encrypted = true;
return 0;
@@ -1655,15 +1706,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
@@ -1673,10 +1725,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
/* Implements the Link Integrity Check for HDCP2.2 */
static int intel_hdcp2_check_link(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -1690,10 +1742,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
- DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
- I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
- port)));
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "HDCP2.2 link stopped the encryption, %x\n",
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -1713,25 +1766,29 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
- DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP2.2 Downstream topology change\n");
ret = hdcp2_authenticate_repeater_topology(connector);
if (!ret) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&hdcp->prop_work);
goto out;
}
- DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Repeater topology auth failed.(%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
} else {
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP2.2 link failed, retrying auth\n",
+ connector->base.name, connector->base.base.id);
}
ret = _intel_hdcp2_disable(connector);
if (ret) {
- DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id, ret);
+ drm_err(&dev_priv->drm,
+ "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id, ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -1739,9 +1796,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
ret = _intel_hdcp2_enable(connector);
if (ret) {
- DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -1772,7 +1830,7 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
{
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
- DRM_DEBUG("I915 HDCP comp bind\n");
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
dev_priv->hdcp_master->mei_dev = mei_kdev;
@@ -1786,7 +1844,7 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
{
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
- DRM_DEBUG("I915 HDCP comp unbind\n");
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_master = NULL;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1830,7 +1888,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi =
- intel_get_mei_fw_ddi_index(connector->encoder->port);
+ intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
else
/*
* As per ME FW API expectation, for GEN 12+, fw_ddi is filled
@@ -1854,7 +1912,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
- DRM_ERROR("Out of Memory\n");
+ drm_err(&dev_priv->drm, "Out of Memory\n");
return -ENOMEM;
}
@@ -1881,14 +1939,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
return;
mutex_lock(&dev_priv->hdcp_comp_mutex);
- WARN_ON(dev_priv->hdcp_comp_added);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
dev_priv->hdcp_comp_added = true;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+ ret);
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_comp_added = false;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1899,12 +1958,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
static void intel_hdcp2_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
- DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
}
@@ -1954,7 +2014,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
return -ENOENT;
mutex_lock(&hdcp->mutex);
- WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_WARN_ON(&dev_priv->drm,
+ hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
if (INTEL_GEN(dev_priv) >= 12) {
@@ -2014,6 +2075,46 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_enable(connector,
+ crtc_state->cpu_transcoder,
+ (u8)conn_state->hdcp_content_type);
+}
+
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->hdcp_comp_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index f3c3272e712a..7c12ad609b1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,12 +8,12 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
struct intel_hdcp_shim;
enum port;
enum transcoder;
@@ -26,6 +26,9 @@ int intel_hdcp_init(struct intel_connector *connector,
int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
bool intel_hdcp2_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 93ac0f296852..821411b93dac 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -36,7 +36,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
#include "i915_debugfs.h"
@@ -45,6 +44,7 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -72,17 +72,19 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
- "HDMI port enabled, expecting disabled\n");
+ drm_WARN(dev,
+ intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
}
static void
assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
- WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
- TRANS_DDI_FUNC_ENABLE,
- "HDMI transcoder function enabled, expecting disabled\n");
+ drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+ TRANS_DDI_FUNC_ENABLE,
+ "HDMI transcoder function enabled, expecting disabled\n");
}
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
@@ -215,32 +217,33 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(VIDEO_DIP_CTL, val);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(VIDEO_DIP_DATA, *data);
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(VIDEO_DIP_DATA, 0);
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(VIDEO_DIP_CTL, val);
- POSTING_READ(VIDEO_DIP_CTL);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
}
static void g4x_read_infoframe(struct intel_encoder *encoder,
@@ -252,22 +255,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(VIDEO_DIP_CTL);
+ val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(VIDEO_DIP_CTL, val);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(VIDEO_DIP_DATA);
+ *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
}
static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -288,32 +291,34 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void ibx_read_infoframe(struct intel_encoder *encoder,
@@ -326,15 +331,15 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
@@ -343,7 +348,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -365,10 +370,11 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
@@ -378,22 +384,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void cpt_read_infoframe(struct intel_encoder *encoder,
@@ -406,15 +413,15 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
@@ -422,7 +429,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -441,32 +448,35 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void vlv_read_infoframe(struct intel_encoder *encoder,
@@ -479,15 +489,16 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv,
+ VLV_TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
@@ -495,7 +506,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -519,28 +530,30 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
int data_size;
int i;
- u32 val = I915_READ(ctl_reg);
+ u32 val = intel_de_read(dev_priv, ctl_reg);
data_size = hsw_dip_data_size(dev_priv, type);
- WARN_ON(len > data_size);
+ drm_WARN_ON(&dev_priv->drm, len > data_size);
val &= ~hsw_infoframe_enable(type);
- I915_WRITE(ctl_reg, val);
+ intel_de_write(dev_priv, ctl_reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2), *data);
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < data_size; i += 4)
- I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2), 0);
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ 0);
val |= hsw_infoframe_enable(type);
- I915_WRITE(ctl_reg, val);
- POSTING_READ(ctl_reg);
+ intel_de_write(dev_priv, ctl_reg, val);
+ intel_de_posting_read(dev_priv, ctl_reg);
}
static void hsw_read_infoframe(struct intel_encoder *encoder,
@@ -553,18 +566,19 @@ static void hsw_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(cpu_transcoder));
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2));
+ *data++ = intel_de_read(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
}
static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
u32 mask;
mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -655,12 +669,12 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(type)) == 0)
return;
- if (WARN_ON(frame->any.type != type))
+ if (drm_WARN_ON(encoder->base.dev, frame->any.type != type))
return;
/* see comment above for the reason for this offset */
len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
- if (WARN_ON(len < 0))
+ if (drm_WARN_ON(encoder->base.dev, len < 0))
return;
/* Insert the 'hole' (see big comment above) at position 3 */
@@ -734,8 +748,8 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
/* nonsense combination */
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
drm_hdmi_avi_infoframe_quant_range(frame, connector,
@@ -753,7 +767,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
/* TODO: handle pixel repetition for YCBCR420 outputs */
ret = hdmi_avi_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -774,13 +788,13 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
frame->sdi = HDMI_SPD_SDI_PC;
ret = hdmi_spd_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -806,11 +820,11 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
&crtc_state->hw.adjusted_mode);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
ret = hdmi_vendor_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -844,7 +858,7 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
}
ret = hdmi_drm_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(&dev_priv->drm, ret))
return false;
return true;
@@ -859,7 +873,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -885,8 +899,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
@@ -904,8 +918,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -982,7 +996,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
- I915_WRITE(reg, crtc_state->infoframes.gcp);
+ intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
return true;
}
@@ -1007,7 +1021,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
else
return;
- crtc_state->infoframes.gcp = I915_READ(reg);
+ crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
}
static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
@@ -1042,7 +1056,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1056,15 +1070,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- WARN(val & VIDEO_DIP_ENABLE,
- "DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
@@ -1077,8 +1091,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1100,7 +1114,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1113,8 +1127,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
@@ -1126,8 +1140,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1149,7 +1163,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1163,15 +1177,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- WARN(val & VIDEO_DIP_ENABLE,
- "DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
@@ -1184,8 +1198,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1205,7 +1219,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
assert_hdmi_transcoder_func_disabled(dev_priv,
crtc_state->cpu_transcoder);
@@ -1216,16 +1230,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_DRM_GLK);
if (!enable) {
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1260,10 +1274,9 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
unsigned int offset, void *buffer, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
u8 start = offset & 0xff;
@@ -1290,10 +1303,9 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
unsigned int offset, void *buffer, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
u8 *write_buf;
@@ -1325,10 +1337,9 @@ static
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
@@ -1447,7 +1458,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_crtc *crtc = connector->base.state->crtc;
struct intel_crtc *intel_crtc = container_of(crtc,
struct intel_crtc, base);
@@ -1455,7 +1466,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
int ret;
for (;;) {
- scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+ scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe));
if (scanline > 100 && scanline < 200)
break;
usleep_range(25, 50);
@@ -1507,8 +1518,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_connector *connector =
intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
@@ -1523,14 +1533,14 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
- I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+ intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
return false;
}
return true;
@@ -1767,8 +1777,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
- I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -1802,7 +1812,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- tmp = I915_READ(intel_hdmi->hdmi_reg);
+ tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -1863,7 +1873,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- WARN_ON(!pipe_config->has_hdmi_sink);
+ drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
@@ -1878,14 +1888,14 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
temp |= HDMI_AUDIO_ENABLE;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
if (pipe_config->has_audio)
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
@@ -1900,7 +1910,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
@@ -1910,10 +1920,10 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to toggle enable bit off and on
@@ -1924,17 +1934,18 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 24 &&
pipe_config->pixel_multiplier > 1) {
- I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+ temp & ~SDVO_ENABLE);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
if (pipe_config->has_audio)
@@ -1952,7 +1963,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
@@ -1969,27 +1980,25 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 24) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
- I915_READ(TRANS_CHICKEN1(pipe)) |
- TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
}
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- I915_WRITE(TRANS_CHICKEN1(pipe),
- I915_READ(TRANS_CHICKEN1(pipe)) &
- ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
if (pipe_config->has_audio)
@@ -2014,11 +2023,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -2039,14 +2048,14 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -2090,9 +2099,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[encoder->port];
- int max_tmds_clock;
+ int max_tmds_clock, vbt_max_tmds_clock;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
max_tmds_clock = 594000;
@@ -2103,15 +2110,23 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
else
max_tmds_clock = 165000;
- if (info->max_tmds_clock)
- max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+ vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ if (vbt_max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
return max_tmds_clock;
}
+static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi,
+ const struct drm_connector_state *conn_state)
+{
+ return hdmi->has_hdmi_sink &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
- bool force_dvi)
+ bool has_hdmi_sink)
{
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
@@ -2127,7 +2142,7 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
if (info->max_tmds_clock)
max_tmds_clock = min(max_tmds_clock,
info->max_tmds_clock);
- else if (!hdmi->has_hdmi_sink || force_dvi)
+ else if (!has_hdmi_sink)
max_tmds_clock = min(max_tmds_clock, 165000);
}
@@ -2137,13 +2152,14 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
- bool force_dvi)
+ bool has_hdmi_sink)
{
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
- if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
+ if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits,
+ has_hdmi_sink))
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
@@ -2165,16 +2181,13 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
- int clock;
+ int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- bool force_dvi =
- READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- clock = mode->clock;
-
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
@@ -2188,18 +2201,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock /= 2;
/* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
+ status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink);
- if (hdmi->has_hdmi_sink && !force_dvi) {
+ if (has_hdmi_sink) {
/* if we can't do 8bpc we may still be able to do 12bpc */
if (status != MODE_OK && !HAS_GMCH(dev_priv))
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
- true, force_dvi);
+ true, has_hdmi_sink);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
- true, force_dvi);
+ true, has_hdmi_sink);
}
if (status != MODE_OK)
return status;
@@ -2263,14 +2276,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
}
}
- /* Display WA #1139: glk */
- if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- adjusted_mode->htotal > 5460)
- return false;
-
- /* Display Wa_1405510057:icl */
+ /* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
+ bpc == 10 && IS_GEN(dev_priv, 11) &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -2315,7 +2323,7 @@ static int intel_hdmi_port_clock(int clock, int bpc)
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- int clock, bool force_dvi)
+ int clock)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int bpc;
@@ -2324,7 +2332,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
if (hdmi_deep_color_possible(crtc_state, bpc) &&
hdmi_port_clock_valid(intel_hdmi,
intel_hdmi_port_clock(clock, bpc),
- true, force_dvi) == MODE_OK)
+ true, crtc_state->has_hdmi_sink) == MODE_OK)
return bpc;
}
@@ -2332,8 +2340,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
}
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- bool force_dvi)
+ struct intel_crtc_state *crtc_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@@ -2347,8 +2354,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
clock /= 2;
- bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
- clock, force_dvi);
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock);
crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
@@ -2364,7 +2370,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
bpc, crtc_state->pipe_bpp);
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
- false, force_dvi) != MODE_OK) {
+ false, crtc_state->has_hdmi_sink) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
crtc_state->port_clock);
return -EINVAL;
@@ -2412,14 +2418,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_hdmi,
+ conn_state);
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
@@ -2448,7 +2454,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
- ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+ ret = intel_hdmi_compute_clock(encoder, pipe_config);
if (ret)
return ret;
@@ -2808,7 +2814,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- i915_debugfs_connector_add(connector);
+ intel_connector_debugfs_add(connector);
intel_hdmi_create_i2c_symlink(connector);
@@ -3002,7 +3008,7 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
else if (intel_phy_is_tc(dev_priv, phy))
return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
- WARN(1, "Unknown port:%c\n", port_name(port));
+ drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
@@ -3052,17 +3058,17 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
u8 ddc_pin;
- if (info->alternate_ddc_pin) {
+ ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ if (ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
- info->alternate_ddc_pin, port_name(port));
- return info->alternate_ddc_pin;
+ ddc_pin, port_name(port));
+ return ddc_pin;
}
if (HAS_PCH_MCC(dev_priv))
@@ -3139,16 +3145,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
- if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+ if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
- if (WARN(intel_dig_port->max_lanes < 4,
- "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return;
- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder);
ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
drm_connector_init_with_ddc(dev, connector,
@@ -3165,6 +3171,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -3188,8 +3195,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* generated on the port when a cable is not attached.
*/
if (IS_G45(dev_priv)) {
- u32 temp = I915_READ(PEG_BAND_GAP_DATA);
- I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
}
cec_fill_conn_info_from_drm(&conn_info, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index d3659d0b408b..8ff1f76a63df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -9,8 +9,6 @@
#include <linux/hdmi.h>
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 99d3a3c7989e..a091442efba4 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -23,8 +23,6 @@
#include <linux/kernel.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -89,29 +87,16 @@
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port)
{
- switch (port) {
- case PORT_A:
- return HPD_PORT_A;
- case PORT_B:
- return HPD_PORT_B;
- case PORT_C:
- return HPD_PORT_C;
- case PORT_D:
- return HPD_PORT_D;
- case PORT_E:
- return HPD_PORT_E;
- case PORT_F:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return HPD_PORT_E;
- return HPD_PORT_F;
- case PORT_G:
- return HPD_PORT_G;
- case PORT_H:
- return HPD_PORT_H;
- case PORT_I:
- return HPD_PORT_I;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ switch (phy) {
+ case PHY_F:
+ return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
+ case PHY_A ... PHY_E:
+ case PHY_G ... PHY_I:
+ return HPD_PORT_A + phy - PHY_A;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
return HPD_NONE;
}
}
@@ -120,6 +105,20 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
#define HPD_RETRY_DELAY 1000
+static enum hpd_pin
+intel_connector_hpd_pin(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ /*
+ * MST connectors get their encoder attached dynamically
+ * so need to make sure we have an encoder here. But since
+ * MST encoders have their hpd_pin set to HPD_NONE we don't
+ * have to special case them beyond that.
+ */
+ return encoder ? encoder->hpd_pin : HPD_NONE;
+}
+
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
* @dev_priv: private driver data pointer
@@ -171,10 +170,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+ drm_dbg_kms(&dev_priv->drm,
+ "Received HPD interrupt on PIN %d - cnt: %d\n",
+ pin,
hpd->stats[pin].count);
}
@@ -185,37 +187,32 @@ static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- enum hpd_pin pin;
+ struct intel_connector *connector;
bool hpd_disabled = false;
lockdep_assert_held(&dev_priv->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->polled != DRM_CONNECTOR_POLL_HPD)
- continue;
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
- intel_connector = to_intel_connector(connector);
- intel_encoder = intel_connector->encoder;
- if (!intel_encoder)
+ if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
continue;
- pin = intel_encoder->hpd_pin;
+ pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- DRM_INFO("HPD interrupt storm detected on connector %s: "
+ drm_info(&dev_priv->drm,
+ "HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
- connector->name);
+ connector->base.name);
dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT
- | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
drm_connector_list_iter_end(&conn_iter);
@@ -234,40 +231,38 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
intel_wakeref_t wakeref;
enum hpd_pin pin;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_hpd_pin(pin) {
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE ||
+ dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
continue;
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- /* Don't check MST ports, they don't have pins */
- if (!intel_connector->mst_port &&
- intel_connector->encoder->hpd_pin == pin) {
- if (connector->polled != intel_connector->polled)
- DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- connector->name);
- connector->polled = intel_connector->polled;
- if (!connector->polled)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (connector->base.polled != connector->polled)
+ drm_dbg(&dev_priv->drm,
+ "Reenabling HPD on connector %s\n",
+ connector->base.name);
+ connector->base.polled = connector->polled;
}
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_hpd_pin(pin) {
+ if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ }
+
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
+
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -281,7 +276,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->base.status;
connector->base.status =
@@ -290,11 +285,12 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
if (old_status == connector->base.status)
return INTEL_HOTPLUG_UNCHANGED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.base.id,
- connector->base.name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->base.status));
+ drm_dbg_kms(&to_i915(dev)->drm,
+ "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.base.id,
+ connector->base.name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->base.status));
return INTEL_HOTPLUG_CHANGED;
}
@@ -361,16 +357,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
container_of(work, struct drm_i915_private,
hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
u32 changed = 0, retry = 0;
u32 hpd_event_bits;
u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
+ drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
@@ -385,21 +379,25 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
u32 hpd_bit;
- intel_connector = to_intel_connector(connector);
- if (!intel_connector->encoder)
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
continue;
- intel_encoder = intel_connector->encoder;
- hpd_bit = BIT(intel_encoder->hpd_pin);
+
+ hpd_bit = BIT(pin);
if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
- DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- connector->name, intel_encoder->hpd_pin);
+ struct intel_encoder *encoder =
+ intel_attached_encoder(connector);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s (pin %i) received hotplug event.\n",
+ connector->base.name, pin);
- switch (intel_encoder->hotplug(intel_encoder,
- intel_connector,
- hpd_event_bits & hpd_bit)) {
+ switch (encoder->hotplug(encoder, connector,
+ hpd_event_bits & hpd_bit)) {
case INTEL_HOTPLUG_UNCHANGED:
break;
case INTEL_HOTPLUG_CHANGED:
@@ -481,9 +479,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
- encoder->base.base.id, encoder->base.name,
- long_hpd ? "long" : "short");
+ drm_dbg(&dev_priv->drm,
+ "digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
+ long_hpd ? "long" : "short");
queue_dig = true;
if (long_hpd) {
@@ -509,8 +508,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- WARN_ONCE(!HAS_GMCH(dev_priv),
- "Received HPD interrupt on pin %d although disabled\n", pin);
+ drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ "Received HPD interrupt on pin %d although disabled\n",
+ pin);
continue;
}
@@ -601,8 +601,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
container_of(work, struct drm_i915_private,
hotplug.poll_init_work);
struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
bool enabled;
mutex_lock(&dev->mode_config.mutex);
@@ -610,23 +610,18 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
- connector->polled = intel_connector->polled;
-
- /* MST has a dynamic intel_connector->encoder and it's reprobing
- * is all handled by the MST helpers. */
- if (intel_connector->mst_port)
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
+
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
- intel_connector->encoder->hpd_pin > HPD_NONE) {
- connector->polled = enabled ?
- DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT :
- DRM_CONNECTOR_POLL_HPD;
- }
+ connector->base.polled = connector->polled;
+
+ if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
}
drm_connector_list_iter_end(&conn_iter);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 087b5f57b321..1e6b4fda2900 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 0b67f7887cd0..ad5cc13037ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -71,6 +71,7 @@
#include <drm/intel_lpe_audio.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_lpe_audio.h"
#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
@@ -126,7 +127,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(pdata);
if (IS_ERR(platdev)) {
- DRM_ERROR("Failed to allocate LPE audio platform device\n");
+ drm_err(&dev_priv->drm,
+ "Failed to allocate LPE audio platform device\n");
return platdev;
}
@@ -166,7 +168,7 @@ static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
int irq = dev_priv->lpe_audio.irq;
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
&lpe_audio_irqchip,
handle_simple_irq,
@@ -189,7 +191,8 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
+ drm_info(&dev_priv->drm,
+ "HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
@@ -202,18 +205,19 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
dev_priv->lpe_audio.irq = irq_alloc_desc(0);
if (dev_priv->lpe_audio.irq < 0) {
- DRM_ERROR("Failed to allocate IRQ desc: %d\n",
+ drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
dev_priv->lpe_audio.irq);
ret = dev_priv->lpe_audio.irq;
goto err;
}
- DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
ret = lpe_audio_irq_init(dev_priv);
if (ret) {
- DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to initialize irqchip for lpe audio: %d\n",
ret);
goto err_free_irq;
}
@@ -222,7 +226,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
if (IS_ERR(dev_priv->lpe_audio.platdev)) {
ret = PTR_ERR(dev_priv->lpe_audio.platdev);
- DRM_ERROR("Failed to create lpe audio platform device: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to create lpe audio platform device: %d\n",
ret);
goto err_free_irq;
}
@@ -230,7 +235,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
/* enable chicken bit; at least this is required for Dell Wyse 3040
* with DP outputs (but only sometimes by some reason!)
*/
- I915_WRITE(VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE);
+ intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG,
+ VLV_CHICKEN_BIT_DBG_ENABLE);
return 0;
err_free_irq:
@@ -257,8 +263,8 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
ret = generic_handle_irq(dev_priv->lpe_audio.irq);
if (ret)
- DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
- ret);
+ drm_err_ratelimited(&dev_priv->drm,
+ "error handling LPE audio irq: %d\n", ret);
}
/**
@@ -276,7 +282,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
if (lpe_audio_detect(dev_priv)) {
ret = lpe_audio_setup(dev_priv);
if (ret < 0)
- DRM_ERROR("failed to setup LPE Audio bridge\n");
+ drm_err(&dev_priv->drm,
+ "failed to setup LPE Audio bridge\n");
}
return ret;
}
@@ -334,7 +341,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
- audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port));
+ audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port));
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
@@ -343,8 +350,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = dp_output;
/* Unmute the amp for both DP and HDMI */
- I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
- audio_enable & ~VLV_AMP_MUTE);
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable & ~VLV_AMP_MUTE);
} else {
memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
ppdata->pipe = -1;
@@ -352,8 +359,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = false;
/* Mute the amp for both DP and HDMI */
- I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
- audio_enable | VLV_AMP_MUTE);
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable | VLV_AMP_MUTE);
}
if (pdata->notify_audio_lpe)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 10696bb99dcf..9a067effcfa0 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,7 +37,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -85,7 +84,7 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(lvds_reg);
+ val = intel_de_read(dev_priv, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -125,7 +124,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = I915_READ(lvds_encoder->reg);
+ tmp = intel_de_read(dev_priv, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -143,7 +142,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_GEN(dev_priv) < 4) {
- tmp = I915_READ(PFIT_CONTROL);
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
@@ -156,18 +155,18 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
{
u32 val;
- pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(dev_priv, PP_CONTROL(0)) & PANEL_POWER_RESET;
- val = I915_READ(PP_ON_DELAYS(0));
+ val = intel_de_read(dev_priv, PP_ON_DELAYS(0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = I915_READ(PP_OFF_DELAYS(0));
+ val = intel_de_read(dev_priv, PP_OFF_DELAYS(0));
pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = I915_READ(PP_DIVISOR(0));
+ val = intel_de_read(dev_priv, PP_DIVISOR(0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -182,8 +181,9 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
- DRM_DEBUG_KMS("Panel power timings uninitialized, "
- "setting defaults\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel power timings uninitialized, "
+ "setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
pps->t1_t2 = 40 * 10;
pps->t5 = 200 * 10;
@@ -192,10 +192,10 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->tx = 200 * 10;
}
- DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
- "divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ "divider %d port %d powerdown_on_reset %d\n",
+ pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+ pps->divider, pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -203,25 +203,21 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(PP_CONTROL(0));
- WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+ val = intel_de_read(dev_priv, PP_CONTROL(0));
+ drm_WARN_ON(&dev_priv->drm,
+ (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- I915_WRITE(PP_CONTROL(0), val);
+ intel_de_write(dev_priv, PP_CONTROL(0), val);
- I915_WRITE(PP_ON_DELAYS(0),
- REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
- REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ intel_de_write(dev_priv, PP_ON_DELAYS(0),
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
- I915_WRITE(PP_OFF_DELAYS(0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
- REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ intel_de_write(dev_priv, PP_OFF_DELAYS(0),
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
- I915_WRITE(PP_DIVISOR(0),
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
- DIV_ROUND_UP(pps->t4, 1000) + 1));
+ intel_de_write(dev_priv, PP_DIVISOR(0),
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -299,7 +295,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(lvds_encoder->reg, temp);
+ intel_de_write(dev_priv, lvds_encoder->reg, temp);
}
/*
@@ -313,13 +309,16 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
- I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
- POSTING_READ(lvds_encoder->reg);
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
- DRM_ERROR("timed out waiting for panel to power on\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
}
@@ -331,12 +330,15 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power off\n");
- I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_encoder->reg);
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_encoder *encoder,
@@ -398,7 +400,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
- DRM_ERROR("Can't support LVDS on pipe A\n");
+ drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -408,8 +410,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
lvds_bpp = 6*3;
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
@@ -791,7 +794,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = I915_READ(lvds_encoder->reg);
+ val = intel_de_read(dev_priv, lvds_encoder->reg);
if (HAS_PCH_CPT(dev_priv))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
@@ -827,13 +830,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- WARN(!dev_priv->vbt.int_lvds_support,
- "Useless DMI match. Internal LVDS support disabled by VBT\n");
+ drm_WARN(dev, !dev_priv->vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
if (!dev_priv->vbt.int_lvds_support) {
- DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Internal LVDS support disabled by VBT\n");
return;
}
@@ -842,7 +846,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
else
lvds_reg = LVDS;
- lvds = I915_READ(lvds_reg);
+ lvds = intel_de_read(dev_priv, lvds_reg);
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
@@ -852,10 +856,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT\n");
return;
}
- DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT, but enabled anyway\n");
}
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
@@ -969,7 +975,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
*/
fixed_mode = intel_encoder_current_mode(intel_encoder);
if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@@ -985,8 +991,8 @@ out:
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
- lvds_encoder->is_dual_link ? "dual" : "single");
+ drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -995,7 +1001,7 @@ out:
failed:
mutex_unlock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index e59b4992ba1b..cc6b00959586 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,11 +30,10 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_panel.h"
#include "i915_drv.h"
+#include "intel_acpi.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
@@ -242,29 +241,6 @@ struct opregion_asle_ext {
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
-/*
- * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
- * Attached to the Display Adapter).
- */
-#define ACPI_DISPLAY_INDEX_SHIFT 0
-#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
-#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
-#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
-#define ACPI_DISPLAY_TYPE_SHIFT 8
-#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
-#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
-#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
-#define ACPI_DISPLAY_TYPE_TV (2 << 8)
-#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
-#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
-#define ACPI_VENDOR_SPECIFIC_SHIFT 12
-#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
-#define ACPI_BIOS_CAN_DETECT (1 << 16)
-#define ACPI_DEPENDS_ON_VGA (1 << 17)
-#define ACPI_PIPE_ID_SHIFT 18
-#define ACPI_PIPE_ID_MASK (7 << 18)
-#define ACPI_DEVICE_ID_SCHEME (1 << 31)
-
#define MAX_DSLP 1500
static int swsci(struct drm_i915_private *dev_priv,
@@ -311,7 +287,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* The spec tells us to do this, but we are the only user... */
scic = swsci->scic;
if (scic & SWSCI_SCIC_INDICATOR) {
- DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+ drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n");
return -EBUSY;
}
@@ -335,7 +311,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
- DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+ drm_dbg(&dev_priv->drm, "SWSCI request timed out\n");
return -ETIMEDOUT;
}
@@ -344,7 +320,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Note: scic == 0 is an error! */
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
- DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+ drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic);
return -EIO;
}
@@ -403,8 +379,9 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
- WARN_ONCE(1, "unsupported intel_encoder type %d\n",
- intel_encoder->type);
+ drm_WARN_ONCE(&dev_priv->drm, 1,
+ "unsupported intel_encoder type %d\n",
+ intel_encoder->type);
return -EINVAL;
}
@@ -448,10 +425,11 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
struct opregion_asle *asle = dev_priv->opregion.asle;
struct drm_device *dev = &dev_priv->drm;
- DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+ drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
if (acpi_video_get_backlight_type() == acpi_backlight_native) {
- DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "opregion backlight request ignored\n");
return 0;
}
@@ -468,7 +446,8 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
* Update backlight on all connectors that support backlight (usually
* only one).
*/
- DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+ drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
+ bclp);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
@@ -485,13 +464,13 @@ static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
- DRM_DEBUG_DRIVER("Illum is not supported\n");
+ drm_dbg(&dev_priv->drm, "Illum is not supported\n");
return ASLC_ALS_ILLUM_FAILED;
}
static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
{
- DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+ drm_dbg(&dev_priv->drm, "PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
@@ -499,30 +478,36 @@ static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
- DRM_DEBUG_DRIVER("Pfit is not supported\n");
+ drm_dbg(&dev_priv->drm, "Pfit is not supported\n");
return ASLC_PFIT_FAILED;
}
static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
{
- DRM_DEBUG_DRIVER("SROT is not supported\n");
+ drm_dbg(&dev_priv->drm, "SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
{
if (!iuer)
- DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (nothing)\n");
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (rotation lock)\n");
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume down)\n");
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume up)\n");
if (iuer & ASLE_IUER_WINDOWS_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (windows)\n");
if (iuer & ASLE_IUER_POWER_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (power)\n");
return ASLC_BUTTON_ARRAY_FAILED;
}
@@ -530,9 +515,11 @@ static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
- DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (clamshell)\n");
else
- DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (slate)\n");
return ASLC_CONVERTIBLE_FAILED;
}
@@ -540,16 +527,17 @@ static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
- DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+ drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n");
else
- DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+ drm_dbg(&dev_priv->drm,
+ "Docking is not supported (undocked)\n");
return ASLC_DOCKING_FAILED;
}
static u32 asle_isct_state(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_DRIVER("ISCT is not supported\n");
+ drm_dbg(&dev_priv->drm, "ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
}
@@ -569,8 +557,8 @@ static void asle_work(struct work_struct *work)
aslc_req = asle->aslc;
if (!(aslc_req & ASLC_REQ_MSK)) {
- DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
- aslc_req);
+ drm_dbg(&dev_priv->drm,
+ "No request on ASLC interrupt 0x%08x\n", aslc_req);
return;
}
@@ -662,54 +650,12 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct intel_connector *connector)
-{
- u32 display_type;
-
- switch (connector->base.connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_DVIA:
- display_type = ACPI_DISPLAY_TYPE_VGA;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_9PinDIN:
- case DRM_MODE_CONNECTOR_TV:
- display_type = ACPI_DISPLAY_TYPE_TV;
- break;
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- case DRM_MODE_CONNECTOR_eDP:
- case DRM_MODE_CONNECTOR_DSI:
- display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
- break;
- case DRM_MODE_CONNECTOR_Unknown:
- case DRM_MODE_CONNECTOR_VIRTUAL:
- display_type = ACPI_DISPLAY_TYPE_OTHER;
- break;
- default:
- MISSING_CASE(connector->base.connector_type);
- display_type = ACPI_DISPLAY_TYPE_OTHER;
- break;
- }
-
- return display_type;
-}
-
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
- int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -721,29 +667,22 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
+ intel_acpi_device_id_update(dev_priv);
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- u32 device_id, type;
-
- device_id = acpi_display_type(connector);
-
- /* Use display type specific display index. */
- type = (device_id & ACPI_DISPLAY_TYPE_MASK)
- >> ACPI_DISPLAY_TYPE_SHIFT;
- device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
-
- connector->acpi_device_id = device_id;
if (i < max_outputs)
- set_did(opregion, i, device_id);
+ set_did(opregion, i, connector->acpi_device_id);
i++;
}
drm_connector_list_iter_end(&conn_iter);
- DRM_DEBUG_KMS("%d outputs detected\n", i);
+ drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i);
if (i > max_outputs)
- DRM_ERROR("More than %d outputs in connector list\n",
- max_outputs);
+ drm_err(&dev_priv->drm,
+ "More than %d outputs in connector list\n",
+ max_outputs);
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
@@ -823,7 +762,9 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
if (requested_callbacks) {
u32 req = opregion->swsci_sbcb_sub_functions;
if ((req & tmp) != req)
- DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+ drm_dbg(&dev_priv->drm,
+ "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n",
+ req, tmp);
/* XXX: for now, trust the requested callbacks */
/* opregion->swsci_sbcb_sub_functions &= tmp; */
} else {
@@ -831,9 +772,10 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
}
}
- DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
- opregion->swsci_gbda_sub_functions,
- opregion->swsci_sbcb_sub_functions);
+ drm_dbg(&dev_priv->drm,
+ "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+ opregion->swsci_gbda_sub_functions,
+ opregion->swsci_sbcb_sub_functions);
}
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -867,15 +809,17 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
if (ret) {
- DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
- name, ret);
+ drm_err(&dev_priv->drm,
+ "Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
return ret;
}
if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (opregion->vbt_firmware) {
- DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT firmware \"%s\"\n", name);
opregion->vbt = opregion->vbt_firmware;
opregion->vbt_size = fw->size;
ret = 0;
@@ -883,7 +827,8 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
ret = -ENOMEM;
}
} else {
- DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
+ drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
+ name);
ret = -EINVAL;
}
@@ -910,9 +855,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
pci_read_config_dword(pdev, ASLS, &asls);
- DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+ drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n",
+ asls);
if (asls == 0) {
- DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -925,21 +871,21 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
memcpy(buf, base, sizeof(buf));
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ drm_dbg(&dev_priv->drm, "opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
opregion->lid_state = base + ACPI_CLID;
- DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
- opregion->header->over.major,
- opregion->header->over.minor,
- opregion->header->over.revision);
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
+ opregion->header->over.major,
+ opregion->header->over.minor,
+ opregion->header->over.revision);
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
/*
* Indicate we handle monitor hotplug events ourselves so we do
@@ -951,20 +897,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
}
if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG_DRIVER("SWSCI supported\n");
+ drm_dbg(&dev_priv->drm, "SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
swsci_setup(dev_priv);
}
if (mboxes & MBOX_ASLE) {
- DRM_DEBUG_DRIVER("ASLE supported\n");
+ drm_dbg(&dev_priv->drm, "ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
}
if (mboxes & MBOX_ASLE_EXT)
- DRM_DEBUG_DRIVER("ASLE extension supported\n");
+ drm_dbg(&dev_priv->drm, "ASLE extension supported\n");
if (intel_load_vbt_firmware(dev_priv) == 0)
goto out;
@@ -984,7 +930,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
*/
if (opregion->header->over.major > 2 ||
opregion->header->over.minor >= 1) {
- WARN_ON(rvda < OPREGION_SIZE);
+ drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE);
rvda += asls;
}
@@ -995,12 +941,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
goto out;
} else {
- DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (RVDA)\n");
memunmap(opregion->rvda);
opregion->rvda = NULL;
}
@@ -1018,11 +966,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
vbt_size -= OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
} else {
- DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
}
out:
@@ -1058,20 +1008,22 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret) {
- DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get panel details from OpRegion (%d)\n",
+ ret);
return ret;
}
ret = (panel_details >> 8) & 0xff;
if (ret > 0x10) {
- DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid OpRegion panel type 0x%x\n", ret);
return -EINVAL;
}
/* fall back to VBT panel type? */
if (ret == 0x0) {
- DRM_DEBUG_KMS("No panel type in OpRegion\n");
+ drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n");
return -ENODEV;
}
@@ -1081,7 +1033,8 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* via a quirk list :(
*/
if (!dmi_check_system(intel_use_opregion_panel_type)) {
- DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e40c3a0e2cd7..481187223101 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,7 +27,6 @@
*/
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
#include "gt/intel_ring.h"
@@ -204,9 +203,10 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- I915_WRITE(DSPCLK_GATE_D, 0);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, 0);
else
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+ intel_de_write(dev_priv, DSPCLK_GATE_D,
+ OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
pci_bus_read_config_byte(pdev->bus,
@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs;
- WARN_ON(overlay->active);
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -315,15 +315,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
- WARN_ON(!overlay->active);
+ drm_WARN_ON(&dev_priv->drm, !overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
/* check for underruns */
- tmp = I915_READ(DOVSTA);
+ tmp = intel_de_read(dev_priv, DOVSTA);
if (tmp & (1 << 17))
- DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -456,7 +456,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -759,7 +759,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -857,7 +858,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -891,7 +893,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL);
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
@@ -899,12 +901,12 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
*/
if (INTEL_GEN(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */
- ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
if (pfit_control & VERT_AUTO_SCALE)
- ratio = I915_READ(PFIT_AUTO_RATIOS);
+ ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS);
else
- ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
ratio >>= PFIT_VERT_SCALE_SHIFT;
}
@@ -1066,7 +1068,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1090,7 +1092,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
@@ -1225,7 +1228,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1239,12 +1242,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->saturation = overlay->saturation;
if (!IS_GEN(dev_priv, 2)) {
- attrs->gamma0 = I915_READ(OGAMC0);
- attrs->gamma1 = I915_READ(OGAMC1);
- attrs->gamma2 = I915_READ(OGAMC2);
- attrs->gamma3 = I915_READ(OGAMC3);
- attrs->gamma4 = I915_READ(OGAMC4);
- attrs->gamma5 = I915_READ(OGAMC5);
+ attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
+ attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
+ attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
+ attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
+ attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
+ attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
}
} else {
if (attrs->brightness < -128 || attrs->brightness > 127)
@@ -1274,12 +1277,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_unlock;
- I915_WRITE(OGAMC0, attrs->gamma0);
- I915_WRITE(OGAMC1, attrs->gamma1);
- I915_WRITE(OGAMC2, attrs->gamma2);
- I915_WRITE(OGAMC3, attrs->gamma3);
- I915_WRITE(OGAMC4, attrs->gamma4);
- I915_WRITE(OGAMC5, attrs->gamma5);
+ intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
+ intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
+ intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
+ intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
+ intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
+ intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
}
}
overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
@@ -1369,7 +1372,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
- DRM_INFO("Initialized overlay support.\n");
+ drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
out_free:
@@ -1389,7 +1392,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already.
*/
- WARN_ON(overlay->active);
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
i915_gem_object_put(overlay->reg_bo);
i915_active_fini(&overlay->last_flip);
@@ -1419,8 +1422,8 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
if (error == NULL)
return NULL;
- error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(GEN2_ISR);
+ error->dovsta = intel_de_read(dev_priv, DOVSTA);
+ error->isr = intel_de_read(dev_priv, GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 7b3ec6eb3382..276f43870802 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -96,8 +96,9 @@ intel_panel_edid_downclock_mode(struct intel_connector *connector,
if (!downclock_mode)
return NULL;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(downclock_mode);
return downclock_mode;
@@ -122,8 +123,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
if (!fixed_mode)
return NULL;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
@@ -138,8 +140,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using first mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
@@ -162,8 +165,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using mode from VBT: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
info->width_mm = fixed_mode->width_mm;
@@ -423,15 +426,15 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
default:
- WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+ drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
+ fitting_mode);
return;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_GEN(dev_priv) >= 4)
- pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- PFIT_FILTER_FUZZY);
+ pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -520,7 +523,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
if (i915_modparams.invert_brightness < 0)
return val;
@@ -537,14 +540,14 @@ static u32 lpt_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 i9xx_get_backlight(struct intel_connector *connector)
@@ -553,7 +556,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
u32 val;
- val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_GEN(dev_priv) < 4)
val >>= 1;
@@ -569,10 +572,10 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
- return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 vlv_get_backlight(struct intel_connector *connector)
@@ -588,7 +591,8 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
+ return intel_de_read(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
static u32 pwm_get_backlight(struct intel_connector *connector)
@@ -605,8 +609,8 @@ static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+ u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -615,8 +619,8 @@ static void pch_set_backlight(const struct drm_connector_state *conn_state, u32
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -626,7 +630,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
@@ -643,8 +647,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
- tmp = I915_READ(BLC_PWM_CTL) & ~mask;
- I915_WRITE(BLC_PWM_CTL, tmp | level);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
+ intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -654,8 +658,8 @@ static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
- tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -664,7 +668,8 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -709,7 +714,7 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
mutex_lock(&dev_priv->backlight_lock);
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -742,14 +747,16 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
- tmp = I915_READ(BLC_PWM_CPU_CTL2);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n");
- I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu backlight was enabled, disabling\n");
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ tmp & ~BLM_PWM_ENABLE);
}
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -760,11 +767,11 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BLC_PWM_CPU_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -779,8 +786,8 @@ static void i965_disable_backlight(const struct drm_connector_state *old_conn_st
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -792,8 +799,9 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ tmp & ~BLM_PWM_ENABLE);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -805,14 +813,15 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
val &= ~UTIL_PIN_ENABLE;
- I915_WRITE(UTIL_PIN_CTL, val);
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
}
}
@@ -825,9 +834,10 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
}
static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -857,7 +867,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
* another client is not activated.
*/
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ drm_dbg(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
return;
}
@@ -879,31 +890,31 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, schicken;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- DRM_DEBUG_KMS("pch backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (HAS_PCH_LPT(dev_priv)) {
- schicken = I915_READ(SOUTH_CHICKEN2);
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
if (panel->backlight.alternate_pwm_increment)
schicken |= LPT_PWM_GRANULARITY;
else
schicken &= ~LPT_PWM_GRANULARITY;
- I915_WRITE(SOUTH_CHICKEN2, schicken);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
} else {
- schicken = I915_READ(SOUTH_CHICKEN1);
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
if (panel->backlight.alternate_pwm_increment)
schicken |= SPT_PWM_GRANULARITY;
else
schicken &= ~SPT_PWM_GRANULARITY;
- I915_WRITE(SOUTH_CHICKEN1, schicken);
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
}
pch_ctl2 = panel->backlight.max << 16;
- I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
@@ -913,9 +924,10 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (HAS_PCH_LPT(dev_priv))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
- POSTING_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -930,41 +942,42 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("cpu backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
cpu_ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- DRM_DEBUG_KMS("pch backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
- POSTING_READ(BLC_PWM_CPU_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
pch_ctl2 = panel->backlight.max << 16;
- I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
- POSTING_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -975,10 +988,10 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- DRM_DEBUG_KMS("backlight already enabled\n");
- I915_WRITE(BLC_PWM_CTL, 0);
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ intel_de_write(dev_priv, BLC_PWM_CTL, 0);
}
freq = panel->backlight.max;
@@ -991,8 +1004,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
- I915_WRITE(BLC_PWM_CTL, ctl);
- POSTING_READ(BLC_PWM_CTL);
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1003,7 +1016,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* that has backlight.
*/
if (IS_GEN(dev_priv, 2))
- I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1015,11 +1028,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
- ctl2 = I915_READ(BLC_PWM_CTL2);
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(BLC_PWM_CTL2, ctl2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
}
freq = panel->backlight.max;
@@ -1027,16 +1040,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
freq /= 0xff;
ctl = freq << 16;
- I915_WRITE(BLC_PWM_CTL, ctl);
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- I915_WRITE(BLC_PWM_CTL2, ctl2);
- POSTING_READ(BLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
@@ -1050,15 +1063,15 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
ctl = panel->backlight.max << 16;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1066,9 +1079,10 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
- POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1082,30 +1096,34 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- DRM_DEBUG_KMS("util pin already enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "util pin already enabled\n");
val &= ~UTIL_PIN_ENABLE;
- I915_WRITE(UTIL_PIN_CTL, val);
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
}
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
- I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
- UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+ intel_de_write(dev_priv, UTIL_PIN_CTL,
+ val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
}
- I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.max);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1113,10 +1131,12 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1127,16 +1147,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
}
- I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.max);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1144,10 +1167,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1194,7 +1219,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
if (!panel->backlight.present)
return;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
mutex_lock(&dev_priv->backlight_lock);
@@ -1219,7 +1244,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_unlock(&dev_priv->backlight_lock);
- DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -1237,7 +1262,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
mutex_lock(&dev_priv->backlight_lock);
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -1380,7 +1405,8 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz);
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz);
}
/*
@@ -1441,7 +1467,8 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz * 128);
}
/*
@@ -1458,7 +1485,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_PINEVIEW(dev_priv))
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
clock = KHz(dev_priv->cdclk.hw.cdclk);
@@ -1476,7 +1503,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_G4X(dev_priv))
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
clock = KHz(dev_priv->cdclk.hw.cdclk);
@@ -1493,14 +1520,14 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int mul, clock;
- if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
if (IS_CHERRYVIEW(dev_priv))
clock = KHz(19200);
else
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
mul = 128;
}
@@ -1515,22 +1542,26 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u32 pwm;
if (!panel->backlight.hz_to_pwm) {
- DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion not supported\n");
return 0;
}
if (pwm_freq_hz) {
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
- pwm_freq_hz);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT defined backlight frequency %u Hz\n",
+ pwm_freq_hz);
} else {
pwm_freq_hz = 200;
- DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
- pwm_freq_hz);
+ drm_dbg_kms(&dev_priv->drm,
+ "default backlight frequency %u Hz\n",
+ pwm_freq_hz);
}
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
- DRM_DEBUG_KMS("backlight frequency conversion failed\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion failed\n");
return 0;
}
@@ -1546,7 +1577,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
int min;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1557,8 +1588,9 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
*/
min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
if (min != dev_priv->vbt.backlight.min_brightness) {
- DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
- dev_priv->vbt.backlight.min_brightness, min);
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping VBT min backlight %d/255 to %d/255\n",
+ dev_priv->vbt.backlight.min_brightness, min);
}
/* vbt value is a coefficient in range [0..255] */
@@ -1573,18 +1605,18 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
bool alt, cpu_mode;
if (HAS_PCH_LPT(dev_priv))
- alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
- alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
panel->backlight.alternate_pwm_increment = alt;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1608,13 +1640,16 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.max);
if (cpu_mode) {
- DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ cpu_ctl2 & ~BLM_PWM_ENABLE);
}
return 0;
@@ -1626,10 +1661,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
if (!panel->backlight.max)
@@ -1645,7 +1680,7 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
@@ -1658,7 +1693,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
@@ -1697,11 +1732,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
- ctl2 = I915_READ(BLC_PWM_CTL2);
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
@@ -1731,13 +1766,13 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
@@ -1767,18 +1802,20 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.controller = dev_priv->vbt.backlight.controller;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
- I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1812,11 +1849,13 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
*/
panel->backlight.controller = 0;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
- I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1843,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
+ u32 level, ns;
int retval;
/* Get the right PWM chip for DSI backlight according to VBT */
@@ -1855,7 +1895,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
}
if (IS_ERR(panel->backlight.pwm)) {
- DRM_ERROR("Failed to get the %s PWM chip\n", desc);
+ drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+ desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1866,23 +1907,27 @@ static int pwm_setup_backlight(struct intel_connector *connector,
*/
pwm_apply_args(panel->backlight.pwm);
- retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
- CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.min = 0; /* 0% */
+ panel->backlight.max = 100; /* 100% */
+ level = intel_panel_compute_brightness(connector, 100);
+ ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+ retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
- DRM_ERROR("Failed to configure the pwm chip\n");
+ drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
pwm_put(panel->backlight.pwm);
panel->backlight.pwm = NULL;
return retval;
}
- panel->backlight.min = 0; /* 0% */
- panel->backlight.max = 100; /* 100% */
- panel->backlight.level = DIV_ROUND_UP(
- pwm_get_duty_cycle(panel->backlight.pwm) * 100,
- CRC_PMIC_PWM_PERIOD_NS);
+ level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.level =
+ intel_panel_compute_brightness(connector, level);
panel->backlight.enabled = panel->backlight.level != 0;
- DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
+ drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+ desc);
return 0;
}
@@ -1913,15 +1958,17 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
if (!dev_priv->vbt.backlight.present) {
if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
- DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT, but present per quirk\n");
} else {
- DRM_DEBUG_KMS("no backlight present per VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT\n");
return 0;
}
}
/* ensure intel_panel has been initialized first */
- if (WARN_ON(!panel->backlight.setup))
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
return -ENODEV;
/* set level and max in panel struct */
@@ -1930,17 +1977,19 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
- DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
- connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to setup backlight for connector %s\n",
+ connector->name);
return ret;
}
panel->backlight.present = true;
- DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->name,
- enableddisabled(panel->backlight.enabled),
- panel->backlight.level, panel->backlight.max);
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->name,
+ enableddisabled(panel->backlight.enabled),
+ panel->backlight.level, panel->backlight.max);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 520408e83681..a9a5df2fee4d 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -110,8 +110,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- WARN(1, "nonexisting DP port %c\n",
- port_name(dig_port->base.port));
+ drm_WARN(dev, 1, "nonexisting DP port %c\n",
+ port_name(dig_port->base.port));
break;
}
break;
@@ -172,7 +172,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -188,7 +188,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- I915_WRITE(PORT_DFT2_G4X, tmp);
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
}
return 0;
@@ -237,7 +237,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
switch (pipe) {
case PIPE_A:
@@ -254,7 +254,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- I915_WRITE(PORT_DFT2_G4X, tmp);
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -328,7 +328,8 @@ put_state:
drm_atomic_state_put(state);
unlock:
- WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ drm_WARN(&dev_priv->drm, ret,
+ "Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
@@ -440,15 +441,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
+void intel_crtc_crc_init(struct intel_crtc *crtc)
{
- enum pipe pipe;
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
- for_each_pipe(dev_priv, pipe) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
- spin_lock_init(&pipe_crc->lock);
- }
+ spin_lock_init(&pipe_crc->lock);
}
static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
@@ -570,7 +567,7 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
@@ -586,7 +583,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
intel_wakeref_t wakeref;
@@ -595,14 +593,15 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -615,8 +614,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
goto out;
pipe_crc->source = source;
- I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
if (!source) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -638,7 +637,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
u32 val = 0;
if (!crtc->crc.opened)
@@ -650,22 +649,22 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
/* Swallow crc's until we stop generating them. */
spin_lock_irq(&pipe_crc->lock);
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), 0);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index db258a756fc6..43012b189415 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
+void intel_crtc_crc_init(struct intel_crtc *crtc);
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name, size_t *values_cnt);
@@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {}
#define intel_crtc_set_crc_source NULL
#define intel_crtc_verify_crc_source NULL
#define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 83025052c965..fd9b146e3aba 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -59,11 +59,28 @@
* get called by the frontbuffer tracking code. Note that because of locking
* issues the self-refresh re-enable code is done from a work queue, which
* must be correctly synchronized/cancelled when shutting down the pipe."
+ *
+ * DC3CO (DC3 clock off)
+ *
+ * On top of PSR2, GEN12 adds a intermediate power savings state that turns
+ * clock off automatically during PSR2 idle state.
+ * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
+ * entry/exit allows the HW to enter a low-power state even when page flipping
+ * periodically (for instance a 30fps video playback scenario).
+ *
+ * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
+ * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
+ * frames, if no other flip occurs and the function above is executed, DC3CO is
+ * disabled and PSR2 is configured to enter deep sleep, resetting again in case
+ * of another flip.
+ * Front buffer modifications do not trigger DC3CO activation on purpose as it
+ * would bring a lot of complexity and most of the moderns systems will only
+ * use page flips.
*/
-static bool psr_global_enabled(u32 debug)
+static bool psr_global_enabled(struct drm_i915_private *i915)
{
- switch (debug & I915_PSR_DEBUG_MODE_MASK) {
+ switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
return i915_modparams.enable_psr;
case I915_PSR_DEBUG_DISABLE:
@@ -77,8 +94,8 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Cannot enable DSC and PSR2 simultaneously */
- WARN_ON(crtc_state->dsc.compression_enable &&
- crtc_state->has_psr2);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable &&
+ crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
@@ -114,10 +131,10 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
EDP_PSR_PRE_ENTRY(trans_shift);
/* Warning: it is masking/setting reserved bits too */
- val = I915_READ(imr_reg);
+ val = intel_de_read(dev_priv, imr_reg);
val &= ~EDP_PSR_TRANS_MASK(trans_shift);
val |= ~mask;
- I915_WRITE(imr_reg, val);
+ intel_de_write(dev_priv, imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@@ -174,20 +191,24 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
dev_priv->psr.last_exit = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 9) {
- u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ PSR_EVENT(cpu_transcoder));
bool psr2_enabled = dev_priv->psr.psr2_enabled;
- I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
+ val);
psr_event_print(val, psr2_enabled);
}
}
@@ -195,7 +216,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
u32 val;
- DRM_WARN("[transcoder %s] PSR aux error\n",
+ drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
dev_priv->psr.irq_aux_error = true;
@@ -208,9 +229,9 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- val = I915_READ(imr_reg);
+ val = intel_de_read(dev_priv, imr_reg);
val |= EDP_PSR_ERROR(trans_shift);
- I915_WRITE(imr_reg, val);
+ intel_de_write(dev_priv, imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@@ -270,7 +291,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
if (dev_priv->psr.dp) {
- DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+ drm_warn(&dev_priv->drm,
+ "More than one eDP panel found, PSR support should be extended\n");
return;
}
@@ -279,16 +301,18 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp->psr_dpcd[0])
return;
- DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
- intel_dp->psr_dpcd[0]);
+ drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+ if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -316,8 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* GTC first.
*/
dev_priv->psr.sink_psr2_support = y_req && alpm;
- DRM_DEBUG_KMS("PSR2 %ssupported\n",
- dev_priv->psr.sink_psr2_support ? "" : "not ");
+ drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
@@ -380,8 +404,9 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
- intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+ intel_de_write(dev_priv,
+ EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+ intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -391,7 +416,8 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
+ intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+ aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -454,22 +480,30 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
return val;
}
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 max_sleep_time = 0x1f;
- u32 val = EDP_PSR_ENABLE;
+ int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
* off-by-one issue that HW has in some cases.
*/
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- /* sink_sync_latency of 8 means source has to wait for more than 8
- * frames, we'll go with 9 frames for now
- */
+ idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+ idle_frames = 0xf;
+
+ return idle_frames;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
@@ -483,9 +517,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -493,13 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- /* Let's use 6 as the minimum to cover all known cases including the
- * off-by-one issue that HW has in some cases.
- */
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -521,9 +549,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+ intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
}
static bool
@@ -552,10 +580,10 @@ static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
val &= ~EDP_PSR2_IDLE_FRAME_MASK;
val |= idle_frames;
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
}
static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -566,29 +594,22 @@ static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
{
- int idle_frames;
+ struct intel_dp *intel_dp = dev_priv->psr.dp;
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- /*
- * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
- * cases including the off-by-one issue that HW has in some cases.
- */
- idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- psr2_program_idle_frames(dev_priv, idle_frames);
+ psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
}
-static void tgl_dc5_idle_thread(struct work_struct *work)
+static void tgl_dc3co_disable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.idle_work.work);
+ container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
mutex_lock(&dev_priv->psr.lock);
/* If delayed work is pending, it is not idle */
- if (delayed_work_pending(&dev_priv->psr.idle_work))
+ if (delayed_work_pending(&dev_priv->psr.dc3co_work))
goto unlock;
- DRM_DEBUG_KMS("DC5/6 idle thread\n");
tgl_psr2_disable_dc3co(dev_priv);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -599,11 +620,41 @@ static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.dc3co_enabled)
return;
- cancel_delayed_work(&dev_priv->psr.idle_work);
+ cancel_delayed_work(&dev_priv->psr.dc3co_work);
/* Before PSR2 exit disallow dc3co*/
tgl_psr2_disable_dc3co(dev_priv);
}
+static void
+tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 exit_scanlines;
+
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+ if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
+ dig_port->base.port != PORT_A)
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
+
+ if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+ return;
+
+ crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -616,8 +667,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
- DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
- transcoder_name(crtc_state->cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
return false;
}
@@ -627,7 +679,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable) {
- DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
@@ -646,15 +699,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
- DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
- crtc_hdisplay, crtc_vdisplay,
- psr_max_h, psr_max_v);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ crtc_hdisplay, crtc_vdisplay,
+ psr_max_h, psr_max_v);
return false;
}
if (crtc_state->pipe_bpp > max_bpp) {
- DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
- crtc_state->pipe_bpp, max_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, pipe bpp %d > max supported %d\n",
+ crtc_state->pipe_bpp, max_bpp);
return false;
}
@@ -665,16 +720,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* x granularity.
*/
if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
- DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
- crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+ crtc_hdisplay, dev_priv->psr.su_x_granularity);
return false;
}
if (crtc_state->crc_enabled) {
- DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
return false;
}
+ tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
}
@@ -700,31 +758,36 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
- DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Port not supported\n");
return;
}
if (dev_priv->psr.sink_not_reliable) {
- DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink implementation is not reliable\n");
return;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Interlaced mode enabled\n");
return;
}
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
if (psr_setup_time < 0) {
- DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
- intel_dp->psr_dpcd[1]);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
return;
}
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
- psr_setup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
return;
}
@@ -737,10 +800,12 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
- WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
- WARN_ON(dev_priv->psr.active);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
@@ -768,11 +833,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
- u32 chicken = I915_READ(reg);
+ u32 chicken = intel_de_read(dev_priv, reg);
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT;
- I915_WRITE(reg, chicken);
+ intel_de_write(dev_priv, reg, chicken);
}
/*
@@ -789,9 +854,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+ mask);
psr_irq_control(dev_priv);
+
+ if (crtc_state->dc3co_exitline) {
+ u32 val;
+
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
+ val &= ~EXITLINE_MASK;
+ val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
+ val |= EXITLINE_ENABLE;
+ intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
+ }
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -800,14 +880,16 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
struct intel_dp *intel_dp = dev_priv->psr.dp;
u32 val;
- WARN_ON(dev_priv->psr.enabled);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+ /* DC5/DC6 requires at least 6 idle frames */
+ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
+ dev_priv->psr.dc3co_exit_delay = val;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -818,20 +900,22 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
* to avoid any rendering problems.
*/
if (INTEL_GEN(dev_priv) >= 12) {
- val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv,
+ TRANS_PSR_IIR(dev_priv->psr.transcoder));
val &= EDP_PSR_ERROR(0);
} else {
- val = I915_READ(EDP_PSR_IIR);
+ val = intel_de_read(dev_priv, EDP_PSR_IIR);
val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
}
if (val) {
dev_priv->psr.sink_not_reliable = true;
- DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR interruption error set, not enabling PSR\n");
return;
}
- DRM_DEBUG_KMS("Enabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
intel_psr_setup_vsc(intel_dp, crtc_state);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
@@ -860,12 +944,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
if (!crtc_state->has_psr)
return;
- WARN_ON(dev_priv->drrs.dp);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
- if (!psr_global_enabled(dev_priv->psr.debug)) {
- DRM_DEBUG_KMS("PSR disabled by flag\n");
+ if (!psr_global_enabled(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
goto unlock;
}
@@ -881,27 +965,33 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.active) {
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- WARN_ON(val & EDP_PSR2_ENABLE);
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- WARN_ON(val & EDP_PSR_ENABLE);
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
}
if (dev_priv->psr.psr2_enabled) {
tgl_disallow_dc3co_on_psr2_exit(dev_priv);
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- WARN_ON(!(val & EDP_PSR2_ENABLE));
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
val &= ~EDP_PSR2_ENABLE;
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- WARN_ON(!(val & EDP_PSR_ENABLE));
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
val &= ~EDP_PSR_ENABLE;
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@@ -917,8 +1007,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (!dev_priv->psr.enabled)
return;
- DRM_DEBUG_KMS("Disabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
intel_psr_exit(dev_priv);
@@ -933,7 +1023,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Wait till PSR is idle */
if (intel_de_wait_for_clear(dev_priv, psr_status,
psr_status_mask, 2000))
- DRM_ERROR("Timed out waiting PSR idle state\n");
+ drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -959,7 +1049,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
- if (WARN_ON(!CAN_PSR(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
return;
mutex_lock(&dev_priv->psr.lock);
@@ -968,7 +1058,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
- cancel_delayed_work_sync(&dev_priv->psr.idle_work);
+ cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -983,7 +1073,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* but it makes more sense write to the current active
* pipe.
*/
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+ intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
else
/*
* A write to CURSURFLIVE do not cause HW tracking to exit PSR
@@ -1015,7 +1105,7 @@ void intel_psr_update(struct intel_dp *intel_dp,
mutex_lock(&dev_priv->psr.lock);
- enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+ enable = crtc_state->has_psr && psr_global_enabled(dev_priv);
psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
@@ -1103,7 +1193,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&dev_priv->psr.lock);
@@ -1167,7 +1258,7 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
- DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
+ drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
@@ -1279,14 +1370,12 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
* event also therefore tgl_dc3co_flush() require to be changed
- * accrodingly in future.
+ * accordingly in future.
*/
static void
tgl_dc3co_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
- u32 delay;
-
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.dc3co_enabled)
@@ -1304,10 +1393,8 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
goto unlock;
tgl_psr2_enable_dc3co(dev_priv);
- /* DC5/DC6 required idle frames = 6 */
- delay = 6 * dev_priv->psr.dc3co_exit_delay;
- mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
- usecs_to_jiffies(delay));
+ mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
+ dev_priv->psr.dc3co_exit_delay);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -1391,7 +1478,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
- INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
+ INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
mutex_init(&dev_priv->psr.lock);
}
@@ -1427,14 +1514,15 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
if (r != 1) {
- DRM_ERROR("Error reading ALPM status\n");
+ drm_err(&dev_priv->drm, "Error reading ALPM status\n");
return;
}
if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "ALPM lock timeout error, disabling PSR\n");
/* Clearing error */
drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
@@ -1450,14 +1538,15 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
if (r != 1) {
- DRM_ERROR("Error reading DP_PSR_ESI\n");
+ drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
return;
}
if (val & DP_PSR_CAPS_CHANGE) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Sink PSR capability changed, disabling PSR\n");
/* Clearing it */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
@@ -1482,7 +1571,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
- DRM_ERROR("Error reading PSR status or error status\n");
+ drm_err(&dev_priv->drm,
+ "Error reading PSR status or error status\n");
goto exit;
}
@@ -1492,17 +1582,22 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
}
if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
- DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR RFB storage error, disabling PSR\n");
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR VSC SDP uncorrectable error, disabling PSR\n");
if (error_status & DP_PSR_LINK_CRC_ERROR)
- DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR Link CRC error, disabling PSR\n");
if (error_status & ~errors)
- DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
- error_status & ~errors);
+ drm_err(&dev_priv->drm,
+ "PSR_ERROR_STATUS unhandled errors %x\n",
+ error_status & ~errors);
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
@@ -1542,7 +1637,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
return;
intel_connector = to_intel_connector(connector);
- dig_port = enc_to_dig_port(intel_connector->encoder);
+ dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
if (dev_priv->psr.dp != &dig_port->dp)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 399b1542509f..46beb155d835 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -14,7 +14,7 @@
static void quirk_ssc_force_disable(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
- DRM_INFO("applying lvds SSC disable quirk\n");
+ drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
}
/*
@@ -24,14 +24,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915)
static void quirk_invert_brightness(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
- DRM_INFO("applying inverted panel brightness quirk\n");
+ drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
- DRM_INFO("applying backlight present quirk\n");
+ drm_info(&i915->drm, "applying backlight present quirk\n");
}
/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
@@ -40,7 +40,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915)
static void quirk_increase_t12_delay(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INCREASE_T12_DELAY;
- DRM_INFO("Applying T12 delay quirk\n");
+ drm_info(&i915->drm, "Applying T12 delay quirk\n");
}
/*
@@ -50,7 +50,7 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915)
static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
- DRM_INFO("Applying Increase DDI Disabled quirk\n");
+ drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
}
struct intel_quirk {
@@ -82,6 +82,16 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
DMI_MATCH(DMI_PRODUCT_NAME, ""),
},
},
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "Thundersoft TST178 tablet",
+ /* DMI strings are too generic, also match on BIOS date */
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"),
+ },
+ },
{ } /* terminating entry */
},
.hook = quirk_invert_brightness,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index e8819fd21e03..637d8fe2f8c2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -34,7 +34,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -217,23 +216,23 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
int i;
if (HAS_PCH_SPLIT(dev_priv)) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- POSTING_READ(intel_sdvo->sdvo_reg);
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_IBX(dev_priv)) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- POSTING_READ(intel_sdvo->sdvo_reg);
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
}
return;
}
if (intel_sdvo->port == PORT_B)
- cval = I915_READ(GEN3_SDVOC);
+ cval = intel_de_read(dev_priv, GEN3_SDVOC);
else
- bval = I915_READ(GEN3_SDVOB);
+ bval = intel_de_read(dev_priv, GEN3_SDVOB);
/*
* Write the registers twice for luck. Sometimes,
@@ -241,11 +240,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
- I915_WRITE(GEN3_SDVOB, bval);
- POSTING_READ(GEN3_SDVOB);
+ intel_de_write(dev_priv, GEN3_SDVOB, bval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOB);
- I915_WRITE(GEN3_SDVOC, cval);
- POSTING_READ(GEN3_SDVOC);
+ intel_de_write(dev_priv, GEN3_SDVOC, cval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOC);
}
}
@@ -414,12 +413,10 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
{
const char *cmd_name;
int i, pos = 0;
-#define BUF_LEN 256
- char buffer[BUF_LEN];
+ char buffer[64];
#define BUF_PRINT(args...) \
- pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
-
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
for (i = 0; i < args_len; i++) {
BUF_PRINT("%02X ", ((u8 *)args)[i]);
@@ -433,9 +430,9 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
BUF_PRINT("(%s)", cmd_name);
else
BUF_PRINT("(%02X)", cmd);
- BUG_ON(pos >= BUF_LEN - 1);
+
+ WARN_ON(pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
-#undef BUF_LEN
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
@@ -540,8 +537,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i, pos = 0;
-#define BUF_LEN 256
- char buffer[BUF_LEN];
+ char buffer[64];
buffer[0] = '\0';
@@ -581,7 +577,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
}
#define BUF_PRINT(args...) \
- pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
cmd_status = sdvo_cmd_status(status);
if (cmd_status)
@@ -600,9 +596,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
goto log_fail;
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- BUG_ON(pos >= BUF_LEN - 1);
+
+ WARN_ON(pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
-#undef BUF_LEN
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
return true;
@@ -1267,6 +1263,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
pipe_config->clock_set = true;
}
+static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
+ const struct drm_connector_state *conn_state)
+{
+ return sdvo->has_hdmi_monitor &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1322,12 +1325,15 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
- if (intel_sdvo_state->base.force_audio != HDMI_AUDIO_OFF_DVI)
- pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
- if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON ||
- (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO && intel_sdvo->has_hdmi_audio))
- pipe_config->has_audio = true;
+ if (pipe_config->has_hdmi_sink) {
+ if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio = intel_sdvo->has_hdmi_audio;
+ else
+ pipe_config->has_audio =
+ intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
+ }
if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
@@ -1470,7 +1476,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
- DRM_INFO("Setting output timings on %s failed\n",
+ drm_info(&dev_priv->drm,
+ "Setting output timings on %s failed\n",
SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
@@ -1494,12 +1501,14 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
- DRM_INFO("Setting input timings on %s failed\n",
+ drm_info(&dev_priv->drm,
+ "Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
switch (crtc_state->pixel_multiplier) {
default:
- WARN(1, "unknown pixel multiplier specified\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "unknown pixel multiplier specified\n");
/* fall through */
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1518,7 +1527,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
if (intel_sdvo->port == PORT_B)
sdvox &= SDVOB_PRESERVE_MASK;
else
@@ -1564,7 +1573,7 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(sdvo_reg);
+ val = intel_de_read(dev_priv, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -1607,7 +1616,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
@@ -1615,7 +1624,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* Some sdvo encoders are not spec compliant and don't
* implement the mandatory get_timings function.
*/
- DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+ drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
@@ -1667,9 +1676,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
- WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
- "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
- pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+ drm_WARN(dev,
+ encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -1734,7 +1744,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_OFF);
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1791,7 +1801,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
int i;
bool success;
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1806,8 +1816,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
* a given it the status is a success, we succeeded.
*/
if (success && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
}
if (0)
@@ -2219,8 +2230,8 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2709,6 +2720,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_encoder->hotplug = intel_sdvo_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
@@ -3229,9 +3241,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
enum port port)
{
if (HAS_PCH_SPLIT(dev_priv))
- WARN_ON(port != PORT_B);
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B);
else
- WARN_ON(port != PORT_B && port != PORT_C);
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C);
}
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
@@ -3269,8 +3281,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- DRM_DEBUG_KMS("No SDVO device found on %s\n",
- SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
}
@@ -3293,8 +3306,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
- SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
/* Output_setup can leave behind connectors! */
goto err_output;
}
@@ -3331,7 +3345,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
&intel_sdvo->pixel_clock_max))
goto err_output;
- DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index a66f224aa17d..72065e4360d5 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index fca77ec1e0dd..33d886141138 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -37,10 +37,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
@@ -104,7 +104,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (min <= 0 || max <= 0)
goto irq_disable;
- if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
+ if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
/*
@@ -113,8 +113,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
* re-entry as well.
*/
if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
- DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
- psr_status);
+ drm_err(&dev_priv->drm,
+ "PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
local_irq_disable();
@@ -135,8 +136,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
break;
if (!timeout) {
- DRM_ERROR("Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
break;
}
@@ -204,7 +206,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
* event outside of the critical section - the spinlock might spin for a
* while ... */
if (new_crtc_state->uapi.event) {
- WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
drm_crtc_arm_vblank_event(&crtc->base,
@@ -221,17 +224,20 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
if (crtc->debug.start_vbl_count &&
crtc->debug.start_vbl_count != end_vbl_count) {
- DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
- pipe_name(pipe), crtc->debug.start_vbl_count,
- end_vbl_count,
- ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
- crtc->debug.min_vbl, crtc->debug.max_vbl,
- crtc->debug.scanline_start, scanline_end);
+ drm_err(&dev_priv->drm,
+ "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+ pipe_name(pipe), crtc->debug.start_vbl_count,
+ end_vbl_count,
+ ktime_us_delta(end_vbl_time,
+ crtc->debug.start_vbl_time),
+ crtc->debug.min_vbl, crtc->debug.max_vbl,
+ crtc->debug.scanline_start, scanline_end);
}
#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
VBLANK_EVASION_TIME_US)
- DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ drm_warn(&dev_priv->drm,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
pipe_name(pipe),
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
VBLANK_EVASION_TIME_US);
@@ -278,6 +284,16 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
+
+ /*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
@@ -291,26 +307,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
drm_rect_init(src, src_x << 16, src_y << 16,
src_w << 16, src_h << 16);
- if (!fb->format->is_yuv)
- return 0;
-
- /* YUV specific checks */
- if (!rotated) {
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
- } else {
- hsub = vsub = max(fb->format->hsub, fb->format->vsub);
}
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
+
if (src_x % hsub || src_w % hsub) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_x, src_w, hsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, yesno(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_y, src_h, vsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, yesno(rotated));
return -EINVAL;
}
@@ -349,9 +365,8 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int pixel_rate = crtc_state->pixel_rate;
- unsigned int src_w, src_h, dst_w, dst_h;
unsigned int num, den;
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
skl_plane_ratio(crtc_state, plane_state, &num, &den);
@@ -359,17 +374,7 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
den *= 2;
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
-
- /* Downscaling limits the maximum pixel rate */
- dst_w = min(src_w, dst_w);
- dst_h = min(src_h, dst_h);
-
- return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
- mul_u32_u32(den, dst_w * dst_h));
+ return DIV_ROUND_UP(pixel_rate * num, den);
}
static unsigned int
@@ -434,14 +439,16 @@ skl_program_scaler(struct intel_plane *plane,
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
}
- I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
- I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ (crtc_x << 16) | crtc_y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ (crtc_w << 16) | crtc_h);
}
/* Preoffset values for YUV to RGB Conversion */
@@ -547,28 +554,37 @@ icl_program_input_csc(struct intel_plane *plane,
else
csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
- GOFF(csc[1]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
- GOFF(csc[4]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
- GOFF(csc[7]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
-
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
- PREOFF_YUV_TO_RGB_HI);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+ ROFF(csc[0]) | GOFF(csc[1]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+ BOFF(csc[2]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+ ROFF(csc[3]) | GOFF(csc[4]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+ BOFF(csc[5]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+ ROFF(csc[6]) | GOFF(csc[7]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+ BOFF(csc[8]));
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ 0);
else
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
- PREOFF_YUV_TO_RGB_LO);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
}
static void
@@ -623,44 +639,49 @@ skl_program_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ (src_h << 16) | src_w);
if (INTEL_GEN(dev_priv) < 12)
aux_dist |= aux_stride;
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
if (icl_is_hdr_plane(dev_priv, plane_id))
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+ plane_state->cus_ctl);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ (y << 16) | x);
if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
+ intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
if (plane_state->scaler_id >= 0)
skl_program_scaler(plane, crtc_state, plane_state);
@@ -693,12 +714,12 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (icl_is_hdr_plane(dev_priv, plane_id))
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
skl_write_plane_wm(plane, crtc_state);
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -718,7 +739,7 @@ skl_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
*pipe = plane->pipe;
@@ -774,23 +795,36 @@ chv_update_csc(const struct intel_plane_state *plane_state)
if (!fb->format->is_yuv)
return;
- I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
- I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
- I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
- I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
- I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
- I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8]));
-
- I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
- I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
- I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
-
- I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ intel_de_write_fw(dev_priv, SPCSCC01(plane_id),
+ SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
+ intel_de_write_fw(dev_priv, SPCSCC23(plane_id),
+ SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
+ intel_de_write_fw(dev_priv, SPCSCC45(plane_id),
+ SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
+ intel_de_write_fw(dev_priv, SPCSCC67(plane_id),
+ SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
+ intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+
+ intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id),
+ SPCSC_IMAX(1023) | SPCSC_IMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+ intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+
+ intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
#define SIN_0 0
@@ -829,10 +863,10 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
}
/* FIXME these register are single buffered :( */
- I915_WRITE_FW(SPCLRC0(pipe, plane_id),
- SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
- I915_WRITE_FW(SPCLRC1(pipe, plane_id),
- SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
+ intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id),
+ SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
+ intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id),
+ SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
static void
@@ -1019,10 +1053,8 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
- gamma[i] << 16 |
- gamma[i] << 8 |
- gamma[i]);
+ intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
static void
@@ -1055,32 +1087,37 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+ (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
- I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+ intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id),
+ key->max_value);
}
- I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
- I915_WRITE_FW(SPSURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
vlv_update_gamma(plane_state);
@@ -1099,8 +1136,8 @@ vlv_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
- I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1120,7 +1157,7 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+ ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
@@ -1424,19 +1461,17 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- I915_WRITE_FW(SPRGAMC(pipe, i),
- gamma[i] << 20 |
- gamma[i] << 10 |
- gamma[i]);
-
- I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
- I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
- I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]);
i++;
- I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
- I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
- I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]);
i++;
}
@@ -1476,25 +1511,27 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
if (key->flags) {
- I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value);
}
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPROFFSET(pipe), (y << 16) | x);
} else {
- I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), (y << 16) | x);
}
/*
@@ -1502,9 +1539,9 @@ ivb_update_plane(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(SPRCTL(pipe), sprctl);
- I915_WRITE_FW(SPRSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_update_gamma(plane_state);
@@ -1521,11 +1558,11 @@ ivb_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPRCTL(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
/* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), 0);
- I915_WRITE_FW(SPRSURF(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1544,7 +1581,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+ ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
@@ -1710,10 +1747,8 @@ static void g4x_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
- gamma[i] << 16 |
- gamma[i] << 8 |
- gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
static void ilk_sprite_linear_gamma(u16 gamma[17])
@@ -1741,14 +1776,12 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
- gamma[i] << 20 |
- gamma[i] << 10 |
- gamma[i]);
-
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
i++;
}
@@ -1788,28 +1821,30 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
if (key->flags) {
- I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
- I915_WRITE_FW(DVSSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
g4x_update_gamma(plane_state);
@@ -1829,10 +1864,10 @@ g4x_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DVSCNTR(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
/* Disable the scaler */
- I915_WRITE_FW(DVSSCALE(pipe), 0);
- I915_WRITE_FW(DVSSURF(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1851,7 +1886,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+ ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
@@ -1999,7 +2034,8 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
if (IS_CHERRYVIEW(dev_priv) &&
rotation & DRM_MODE_ROTATE_180 &&
rotation & DRM_MODE_REFLECT_X) {
- DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
@@ -2040,6 +2076,18 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -2054,21 +2102,24 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
is_ccs_modifier(fb->modifier)) {
- DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
- rotation);
+ drm_dbg_kms(&dev_priv->drm,
+ "RC support only with 0/180 degree rotation (%x)\n",
+ rotation);
return -EINVAL;
}
if (rotation & DRM_MODE_REFLECT_X &&
fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with linear surface formats\n");
return -EINVAL;
}
if (drm_rotation_90_or_270(rotation)) {
if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
- DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
@@ -2091,9 +2142,10 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
return -EINVAL;
default:
break;
@@ -2109,7 +2161,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
- DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /* Wa_1606054188:tgl */
+ if (IS_TIGERLAKE(dev_priv) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
return -EINVAL;
}
@@ -2136,10 +2198,11 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
(crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
- DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
- crtc_x + crtc_w < 4 ? "end" : "start",
- crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
- 4, pipe_src_w - 4);
+ drm_dbg_kms(&dev_priv->drm,
+ "requested plane X %s position %d invalid (valid range %d-%d)\n",
+ crtc_x + crtc_w < 4 ? "end" : "start",
+ crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+ 4, pipe_src_w - 4);
return -ERANGE;
}
@@ -2754,19 +2817,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
+static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
{
+ /* Wa_14010477008:tgl[a0..c0] */
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
+ return false;
+
return plane_id < PLANE_SPRITE4;
}
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
+ struct drm_i915_private *dev_priv = to_i915(_plane->dev);
struct intel_plane *plane = to_intel_plane(_plane);
switch (modifier) {
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (!gen12_plane_supports_mc_ccs(plane->id))
+ if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
return false;
/* fall through */
case DRM_FORMAT_MOD_LINEAR:
@@ -2935,9 +3004,10 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
-static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
+static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
{
- if (gen12_plane_supports_mc_ccs(plane_id))
+ if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
return gen12_plane_format_modifiers_mc_ccs;
else
return gen12_plane_format_modifiers_rc_ccs;
@@ -2968,7 +3038,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@@ -3008,7 +3077,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (INTEL_GEN(dev_priv) >= 12) {
- modifiers = gen12_get_plane_modifiers(plane_id);
+ modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
plane_funcs = &gen12_plane_funcs;
} else {
if (plane->has_ccs)
@@ -3023,10 +3092,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -3077,7 +3144,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
unsigned int supported_rotations;
const u64 *modifiers;
const u32 *formats;
@@ -3162,10 +3228,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, sprite));
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 7773169b7331..9b850c11aa78 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -61,7 +61,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
lane_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
- WARN_ON(lane_mask == 0xffffffff);
+ drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -76,7 +76,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
pin_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
- WARN_ON(pin_mask == 0xffffffff);
+ drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -120,7 +120,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
struct intel_uncore *uncore = &i915->uncore;
u32 val;
- WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+ drm_WARN_ON(&i915->drm,
+ lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
@@ -181,8 +182,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, nothing connected\n",
+ dig_port->tc_port_name);
return mask;
}
@@ -195,7 +197,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
- if (!WARN_ON(hweight32(mask) > 1))
+ if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
@@ -210,8 +212,9 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
return false;
}
@@ -228,8 +231,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
- dig_port->tc_port_name,
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+ dig_port->tc_port_name,
enableddisabled(enable));
return false;
@@ -243,8 +247,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
- DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY complete clear timed out\n",
+ dig_port->tc_port_name);
return true;
}
@@ -258,8 +263,9 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assume safe mode\n",
+ dig_port->tc_port_name);
return true;
}
@@ -409,16 +415,17 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- WARN_ON(intel_display_power_is_enabled(i915,
- intel_aux_power_domain(dig_port)));
+ drm_WARN_ON(&i915->drm,
+ intel_display_power_is_enabled(i915,
+ intel_aux_power_domain(dig_port)));
icl_tc_phy_disconnect(dig_port);
icl_tc_phy_connect(dig_port, required_lanes);
- DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(old_tc_mode),
- tc_port_mode_name(dig_port->tc_mode));
+ drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(old_tc_mode),
+ tc_port_mode_name(dig_port->tc_mode));
}
static void
@@ -503,7 +510,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
intel_tc_port_needs_reset(dig_port))
intel_tc_port_reset_mode(dig_port, required_lanes);
- WARN_ON(dig_port->tc_lock_wakeref);
+ drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
dig_port->tc_lock_wakeref = wakeref;
}
@@ -550,7 +557,7 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(i915, port);
- if (WARN_ON(tc_port == PORT_TC_NONE))
+ if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
return;
snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index c75e0ceecee6..d2e3a3a323e9 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,7 +33,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -907,7 +906,7 @@ static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 tmp = I915_READ(TV_CTL);
+ u32 tmp = intel_de_read(dev_priv, TV_CTL);
*pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
@@ -926,7 +925,8 @@ intel_enable_tv(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv,
to_intel_crtc(pipe_config->uapi.crtc)->pipe);
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
}
static void
@@ -937,7 +937,8 @@ intel_disable_tv(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
@@ -1095,11 +1096,11 @@ intel_tv_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
- tv_ctl = I915_READ(TV_CTL);
- hctl1 = I915_READ(TV_H_CTL_1);
- hctl3 = I915_READ(TV_H_CTL_3);
- vctl1 = I915_READ(TV_V_CTL_1);
- vctl2 = I915_READ(TV_V_CTL_2);
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ hctl1 = intel_de_read(dev_priv, TV_H_CTL_1);
+ hctl3 = intel_de_read(dev_priv, TV_H_CTL_3);
+ vctl1 = intel_de_read(dev_priv, TV_V_CTL_1);
+ vctl2 = intel_de_read(dev_priv, TV_V_CTL_2);
tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
@@ -1134,17 +1135,17 @@ intel_tv_get_config(struct intel_encoder *encoder,
break;
}
- tmp = I915_READ(TV_WIN_POS);
+ tmp = intel_de_read(dev_priv, TV_WIN_POS);
xpos = tmp >> 16;
ypos = tmp & 0xffff;
- tmp = I915_READ(TV_WIN_SIZE);
+ tmp = intel_de_read(dev_priv, TV_WIN_SIZE);
xsize = tmp >> 16;
ysize = tmp & 0xffff;
intel_tv_mode_to_mode(&mode, &tv_mode);
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(&mode);
intel_tv_scale_mode_horiz(&mode, hdisplay,
@@ -1200,7 +1201,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+ drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
pipe_config->port_clock = tv_mode->clock;
@@ -1215,7 +1216,8 @@ intel_tv_compute_config(struct intel_encoder *encoder,
extra = adjusted_mode->crtc_vdisplay - vdisplay;
if (extra < 0) {
- DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No vertical scaling for >1024 pixel wide modes\n");
return -EINVAL;
}
@@ -1248,7 +1250,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
tv_conn_state->bypass_vfilter = false;
}
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(adjusted_mode);
/*
@@ -1380,16 +1382,16 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
- I915_WRITE(TV_H_CTL_1, hctl1);
- I915_WRITE(TV_H_CTL_2, hctl2);
- I915_WRITE(TV_H_CTL_3, hctl3);
- I915_WRITE(TV_V_CTL_1, vctl1);
- I915_WRITE(TV_V_CTL_2, vctl2);
- I915_WRITE(TV_V_CTL_3, vctl3);
- I915_WRITE(TV_V_CTL_4, vctl4);
- I915_WRITE(TV_V_CTL_5, vctl5);
- I915_WRITE(TV_V_CTL_6, vctl6);
- I915_WRITE(TV_V_CTL_7, vctl7);
+ intel_de_write(dev_priv, TV_H_CTL_1, hctl1);
+ intel_de_write(dev_priv, TV_H_CTL_2, hctl2);
+ intel_de_write(dev_priv, TV_H_CTL_3, hctl3);
+ intel_de_write(dev_priv, TV_V_CTL_1, vctl1);
+ intel_de_write(dev_priv, TV_V_CTL_2, vctl2);
+ intel_de_write(dev_priv, TV_V_CTL_3, vctl3);
+ intel_de_write(dev_priv, TV_V_CTL_4, vctl4);
+ intel_de_write(dev_priv, TV_V_CTL_5, vctl5);
+ intel_de_write(dev_priv, TV_V_CTL_6, vctl6);
+ intel_de_write(dev_priv, TV_V_CTL_7, vctl7);
}
static void set_color_conversion(struct drm_i915_private *dev_priv,
@@ -1398,18 +1400,18 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
if (!color_conversion)
return;
- I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
- color_conversion->gy);
- I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
- color_conversion->ay);
- I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
- color_conversion->gu);
- I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
- color_conversion->au);
- I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
- color_conversion->gv);
- I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
- color_conversion->av);
+ intel_de_write(dev_priv, TV_CSC_Y,
+ (color_conversion->ry << 16) | color_conversion->gy);
+ intel_de_write(dev_priv, TV_CSC_Y2,
+ (color_conversion->by << 16) | color_conversion->ay);
+ intel_de_write(dev_priv, TV_CSC_U,
+ (color_conversion->ru << 16) | color_conversion->gu);
+ intel_de_write(dev_priv, TV_CSC_U2,
+ (color_conversion->bu << 16) | color_conversion->au);
+ intel_de_write(dev_priv, TV_CSC_V,
+ (color_conversion->rv << 16) | color_conversion->gv);
+ intel_de_write(dev_priv, TV_CSC_V2,
+ (color_conversion->bv << 16) | color_conversion->av);
}
static void intel_tv_pre_enable(struct intel_encoder *encoder,
@@ -1434,7 +1436,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
- tv_ctl = I915_READ(TV_CTL);
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (intel_tv->type) {
@@ -1511,21 +1513,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
- I915_WRITE(TV_SC_CTL_1, scctl1);
- I915_WRITE(TV_SC_CTL_2, scctl2);
- I915_WRITE(TV_SC_CTL_3, scctl3);
+ intel_de_write(dev_priv, TV_SC_CTL_1, scctl1);
+ intel_de_write(dev_priv, TV_SC_CTL_2, scctl2);
+ intel_de_write(dev_priv, TV_SC_CTL_3, scctl3);
set_color_conversion(dev_priv, color_conversion);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
else
- I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
if (video_levels)
- I915_WRITE(TV_CLR_LEVEL,
- ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
- (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+ intel_de_write(dev_priv, TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
@@ -1533,7 +1534,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
tv_filter_ctl = TV_AUTO_SCALE;
if (tv_conn_state->bypass_vfilter)
tv_filter_ctl |= TV_V_FILTER_BYPASS;
- I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl);
+ intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl);
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
ysize = intel_tv_mode_vdisplay(tv_mode);
@@ -1544,20 +1545,25 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
conn_state->tv.margins.right);
ysize -= (tv_conn_state->margins.top +
tv_conn_state->margins.bottom);
- I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
- I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+ intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos);
+ intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize);
j = 0;
for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_H_LUMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_H_CHROMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_V_LUMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
- I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
- I915_WRITE(TV_CTL, tv_ctl);
+ intel_de_write(dev_priv, TV_V_CHROMA(i),
+ tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_DAC,
+ intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE);
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
}
static int
@@ -1581,8 +1587,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
spin_unlock_irq(&dev_priv->irq_lock);
}
- save_tv_dac = tv_dac = I915_READ(TV_DAC);
- save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+ save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC);
+ save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL);
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
@@ -1608,15 +1614,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
- I915_WRITE(TV_CTL, tv_ctl);
- I915_WRITE(TV_DAC, tv_dac);
- POSTING_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
+ intel_de_write(dev_priv, TV_DAC, tv_dac);
+ intel_de_posting_read(dev_priv, TV_DAC);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
- tv_dac = I915_READ(TV_DAC);
- DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ tv_dac = intel_de_read(dev_priv, TV_DAC);
+ drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
/*
* A B C
* 0 1 1 Composite
@@ -1624,22 +1630,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n");
type = -1;
}
- I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- I915_WRITE(TV_CTL, save_tv_ctl);
- POSTING_READ(TV_CTL);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ intel_de_write(dev_priv, TV_CTL, save_tv_ctl);
+ intel_de_posting_read(dev_priv, TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -1794,7 +1803,7 @@ intel_tv_get_modes(struct drm_connector *connector)
*/
intel_tv_mode_to_mode(mode, tv_mode);
if (count == 0) {
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(mode);
}
intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
@@ -1870,11 +1879,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
int i, initial_mode = 0;
struct drm_connector_state *state;
- if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
if (!intel_bios_is_tv_present(dev_priv)) {
- DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n");
return;
}
@@ -1882,15 +1891,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* Sanity check the TV output by checking to see if the
* DAC register holds a value
*/
- save_tv_dac = I915_READ(TV_DAC);
+ save_tv_dac = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
- tv_dac_on = I915_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- tv_dac_off = I915_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac);
/*
* If the register does not hold the state change enable
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 4d0c23b29248..05c7cbe32eb4 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -111,7 +111,7 @@ enum bdb_block_id {
BDB_LVDS_LFP_DATA_PTRS = 41,
BDB_LVDS_LFP_DATA = 42,
BDB_LVDS_BACKLIGHT = 43,
- BDB_LVDS_POWER = 44,
+ BDB_LFP_POWER = 44,
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
BDB_COMPRESSION_PARAMETERS = 56,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 9e6aaa302e40..95ad87d4ccb3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -6,8 +6,6 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -374,7 +372,7 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
return false;
/* There's no pipe A DSC engine on ICL */
- WARN_ON(crtc->pipe == PIPE_A);
+ drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A);
return true;
}
@@ -518,119 +516,149 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
- DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_0,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_1 registers */
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
- DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_1,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_2 registers */
pps_val = 0;
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
- DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_2,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_3 registers */
pps_val = 0;
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
- DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_3,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_4 registers */
pps_val = 0;
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
- DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_4,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_5 registers */
pps_val = 0;
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
- DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_5,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_6 registers */
@@ -639,80 +667,100 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
- DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_6,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_7 registers */
pps_val = 0;
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
- DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_7,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_8 registers */
pps_val = 0;
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
- DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_8,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_9 registers */
pps_val = 0;
pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
- DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_9,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_10 registers */
@@ -721,20 +769,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
- DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_10, pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
}
/* Populate Picture parameter set 16 */
@@ -744,20 +797,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
vdsc_cfg->slice_width) |
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
- DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_16, pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
}
/* Populate the RC_BUF_THRESH registers */
@@ -766,42 +824,50 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
rc_buf_thresh_dword[i / 4] |=
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
- DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
- I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
- I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
- I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(DSCC_RC_BUF_THRESH_0,
- rc_buf_thresh_dword[0]);
- I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
- rc_buf_thresh_dword[1]);
- I915_WRITE(DSCC_RC_BUF_THRESH_1,
- rc_buf_thresh_dword[2]);
- I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
}
} else {
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
- rc_buf_thresh_dword[0]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
- rc_buf_thresh_dword[1]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
- rc_buf_thresh_dword[2]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
- rc_buf_thresh_dword[0]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
- rc_buf_thresh_dword[1]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
- rc_buf_thresh_dword[2]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
}
}
@@ -815,78 +881,94 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
RC_MAX_QP_SHIFT) |
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
- DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
- rc_range_params_dword[0]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
- rc_range_params_dword[1]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
- rc_range_params_dword[2]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
- rc_range_params_dword[3]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
- rc_range_params_dword[4]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
- rc_range_params_dword[5]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
- rc_range_params_dword[6]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
- rc_range_params_dword[0]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
- rc_range_params_dword[1]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
- rc_range_params_dword[2]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
- rc_range_params_dword[3]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
- rc_range_params_dword[4]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
- rc_range_params_dword[5]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
- rc_range_params_dword[6]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
}
} else {
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
- rc_range_params_dword[0]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
- rc_range_params_dword[1]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
- rc_range_params_dword[2]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
- rc_range_params_dword[3]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
- rc_range_params_dword[4]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
- rc_range_params_dword[5]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
- rc_range_params_dword[6]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
- rc_range_params_dword[0]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
- rc_range_params_dword[1]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
- rc_range_params_dword[2]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
- rc_range_params_dword[3]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
- rc_range_params_dword[4]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
- rc_range_params_dword[5]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
- rc_range_params_dword[6]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
}
}
}
@@ -912,11 +994,11 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
return;
if (!is_pipe_dsc(crtc_state)) {
- dss_ctl1 = I915_READ(DSS_CTL1);
- dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
} else {
- dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
- dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+ dss_ctl1 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+ dss_ctl2 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL2(pipe));
}
crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
@@ -930,9 +1012,10 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
/* PPS1 */
if (!is_pipe_dsc(crtc_state))
- val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+ val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
else
- val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ val = intel_de_read(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
vdsc_cfg->bits_per_pixel = val;
crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
out:
@@ -1013,8 +1096,8 @@ void intel_dsc_enable(struct intel_encoder *encoder,
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
- I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
- I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
}
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
@@ -1035,17 +1118,17 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
}
- dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ dss_ctl1_val = intel_de_read(dev_priv, dss_ctl1_reg);
if (dss_ctl1_val & JOINER_ENABLE)
dss_ctl1_val &= ~JOINER_ENABLE;
- I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
- dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ dss_ctl2_val = intel_de_read(dev_priv, dss_ctl2_reg);
if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
RIGHT_BRANCH_VDSC_ENABLE);
- I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
/* Disable Power wells for VDSC/joining */
intel_display_power_put_unchecked(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 2ff7293986d4..be333699c515 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -9,6 +9,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_vga.h"
static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
@@ -36,16 +37,17 @@ void intel_vga_disable(struct drm_i915_private *dev_priv)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
+ intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE);
+ intel_de_posting_read(dev_priv, vga_reg);
}
void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
{
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Something enabled VGA plane, disabling it\n");
intel_vga_disable(dev_priv);
}
}
@@ -98,7 +100,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
u16 gmch_ctrl;
if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
- DRM_ERROR("failed to read control word\n");
+ drm_err(&i915->drm, "failed to read control word\n");
return -EIO;
}
@@ -111,7 +113,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
- DRM_ERROR("failed to write control word\n");
+ drm_err(&i915->drm, "failed to write control word\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index daf4fc3dab6f..f4c362dc6e15 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -85,7 +85,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
mask, 100))
- DRM_ERROR("DPI FIFOs are not empty\n");
+ drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
}
static void write_data(struct drm_i915_private *dev_priv,
@@ -100,7 +100,7 @@ static void write_data(struct drm_i915_private *dev_priv,
for (j = 0; j < min_t(u32, len - i, 4); j++)
val |= *data++ << 8 * j;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -111,7 +111,7 @@ static void read_data(struct drm_i915_private *dev_priv,
u32 i, j;
for (i = 0; i < len; i += 4) {
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
for (j = 0; j < min_t(u32, len - i, 4); j++)
*data++ = val >> 8 * j;
@@ -154,29 +154,34 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
if (packet.payload_length) {
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
data_mask, 50))
- DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
packet.payload_length);
}
if (msg->rx_len) {
- I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+ GEN_READ_DATA_AVAIL);
}
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 50)) {
- DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP CTRL FIFO !full\n");
}
- I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+ intel_de_write(dev_priv, ctrl_reg,
+ header[2] << 16 | header[1] << 8 | header[0]);
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
data_mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
}
@@ -223,17 +228,19 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
cmd |= DPI_LP_MODE;
/* clear bit */
- I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
- if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
- DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd);
+ if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
+ drm_dbg_kms(&dev_priv->drm,
+ "Same special packet %02x twice in a row.\n", cmd);
- I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+ intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
- DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+ drm_err(&dev_priv->drm,
+ "Video mode command 0x%08x send failed.\n", cmd);
return 0;
}
@@ -265,7 +272,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (fixed_mode) {
@@ -328,36 +335,37 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
- I915_WRITE(MIPI_CTRL(port), tmp | GLK_MIPIIO_ENABLE);
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | GLK_MIPIIO_ENABLE);
}
/* Put the IO into reset */
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
tmp &= ~GLK_LP_WAKE;
else
tmp |= GLK_LP_WAKE;
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
- DRM_ERROR("MIPIO port is powergated\n");
+ drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |=
- !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
+ !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
}
return cold_boot;
@@ -374,48 +382,49 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not ON\n");
+ drm_err(&dev_priv->drm, "PHY is not ON\n");
}
/* Get IO out of reset */
- val = I915_READ(MIPI_CTRL(PORT_A));
- I915_WRITE(MIPI_CTRL(PORT_A), val | GLK_MIPIIO_RESET_RELEASED);
+ val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ val | GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= DEVICE_READY;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_ULPS_NOT_ACTIVE, 20))
- DRM_ERROR("ULPS not active\n");
+ drm_err(&dev_priv->drm, "ULPS not active\n");
/* Exit ULPS */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
/* Enter Normal Mode */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
- val = I915_READ(MIPI_CTRL(port));
+ val = intel_de_read(dev_priv, MIPI_CTRL(port));
val &= ~GLK_LP_WAKE;
- I915_WRITE(MIPI_CTRL(port), val);
+ intel_de_write(dev_priv, MIPI_CTRL(port), val);
}
}
@@ -423,14 +432,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_DATA_LANE_STOP_STATE, 20))
- DRM_ERROR("Date lane not in STOP state\n");
+ drm_err(&dev_priv->drm,
+ "Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT, 20))
- DRM_ERROR("D-PHY not entering LP-11 state\n");
+ drm_err(&dev_priv->drm,
+ "D-PHY not entering LP-11 state\n");
}
}
@@ -441,23 +452,24 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(BXT_MIPI_PORT_CTRL(port));
- I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ val | LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
/* Clear ULPS and set device ready */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
usleep_range(2000, 2500);
val |= DEVICE_READY;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
}
}
@@ -468,7 +480,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
@@ -481,21 +493,25 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_ENTER);
usleep_range(2500, 3000);
/* Enable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- val = I915_READ(MIPI_PORT_CTRL(PORT_A));
- I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
+ val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_EXIT);
usleep_range(2500, 3000);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY);
usleep_range(2500, 3000);
}
}
@@ -521,24 +537,25 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Enter ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
}
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not turning OFF\n");
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
- DRM_ERROR("MIPI IO Port is not powergated\n");
+ drm_err(&dev_priv->drm,
+ "MIPI IO Port is not powergated\n");
}
}
@@ -550,22 +567,22 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
u32 tmp;
/* Put the IO into reset */
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not turning OFF\n");
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
}
/* Clear MIPI mode */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~GLK_MIPIIO_ENABLE;
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
}
@@ -581,23 +598,23 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_EXIT);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
/*
@@ -607,14 +624,14 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
- DRM_ERROR("DSI LP not going Low\n");
+ drm_err(&dev_priv->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- val = I915_READ(port_ctrl);
- I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
}
@@ -631,18 +648,20 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
u32 temp;
if (IS_GEN9_LP(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports) {
- temp = I915_READ(MIPI_CTRL(port));
+ temp = intel_de_read(dev_priv,
+ MIPI_CTRL(port));
temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
BXT_PIXEL_OVERLAP_CNT_SHIFT;
- I915_WRITE(MIPI_CTRL(port), temp);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp);
}
} else {
- temp = I915_READ(VLV_CHICKEN_3);
+ temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
PIXEL_OVERLAP_CNT_SHIFT;
- I915_WRITE(VLV_CHICKEN_3, temp);
+ intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
}
}
@@ -651,7 +670,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
- temp = I915_READ(port_ctrl);
+ temp = intel_de_read(dev_priv, port_ctrl);
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
@@ -671,8 +690,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
temp |= DITHERING_ENABLE;
/* assert ip_tg_enable signal */
- I915_WRITE(port_ctrl, temp | DPI_ENABLE);
- POSTING_READ(port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -689,9 +708,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
u32 temp;
/* de-assert ip_tg_enable signal */
- temp = I915_READ(port_ctrl);
- I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
- POSTING_READ(port_ctrl);
+ temp = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -753,7 +772,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
u32 val;
bool glk_cold_boot = false;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -771,22 +790,22 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
- val | MIPIO_RST_CTRL);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val | MIPIO_RST_CTRL);
/* Power up DSI regulator */
- I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
/* Disable DPOunit clock gating, can stall pipe */
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val |= DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -820,7 +839,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* recommendation, port should be enabled befor plane & pipe */
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_de_write(dev_priv,
+ MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
} else {
@@ -838,6 +858,15 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
+static void bxt_dsi_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_crtc_vblank_on(crtc_state);
+}
+
/*
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
@@ -886,7 +915,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (IS_GEN9_LP(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -917,13 +946,14 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv)) {
/* Power down DSI regulator to save power */
- I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+ HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
- val & ~MIPIO_RST_CTRL);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val & ~MIPIO_RST_CTRL);
}
if (IS_GEN9_LP(dev_priv)) {
@@ -933,9 +963,9 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
vlv_dsi_pll_disable(encoder);
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
/* Assert reset */
@@ -960,7 +990,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum port port;
bool active = false;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
@@ -979,7 +1009,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
+ bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -988,26 +1018,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
- u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ u32 tmp = intel_de_read(dev_priv,
+ MIPI_DSI_FUNC_PRG(port));
enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
}
if (!enabled)
continue;
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
continue;
if (IS_GEN9_LP(dev_priv)) {
- u32 tmp = I915_READ(MIPI_CTRL(port));
+ u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (WARN_ON(tmp > PIPE_C))
+ if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
continue;
*pipe = tmp;
@@ -1051,11 +1082,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
- fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
@@ -1067,21 +1098,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
- I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
- I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
- I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VTOTAL(port));
hactive = adjusted_mode->crtc_hdisplay;
- hfp = I915_READ(MIPI_HFP_COUNT(port));
+ hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
/*
* Meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes
*/
- hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
- hbp = I915_READ(MIPI_HBP_COUNT(port));
+ hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
/* harizontal values are in terms of high speed byte clock */
hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
@@ -1098,9 +1132,9 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
/* vertical values are in terms of lines */
- vfp = I915_READ(MIPI_VFP_COUNT(port));
- vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
- vbp = I915_READ(MIPI_VBP_COUNT(port));
+ vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
+ vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+ vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
@@ -1191,7 +1225,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
@@ -1268,26 +1302,29 @@ static void set_dsi_timings(struct drm_encoder *encoder,
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
- I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
- adjusted_mode->crtc_hdisplay);
- I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
- adjusted_mode->crtc_vdisplay);
- I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
- adjusted_mode->crtc_vtotal);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+ adjusted_mode->crtc_hdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+ adjusted_mode->crtc_vdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+ adjusted_mode->crtc_vtotal);
}
- I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
- I915_WRITE(MIPI_HFP_COUNT(port), hfp);
+ intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+ hactive);
+ intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
- I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
- I915_WRITE(MIPI_HBP_COUNT(port), hbp);
+ intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+ hsync);
+ intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
/* vertical values are in terms of lines */
- I915_WRITE(MIPI_VFP_COUNT(port), vfp);
- I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
- I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+ intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
+ intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+ vsync);
+ intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
}
}
@@ -1322,7 +1359,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
u32 val, tmp;
u16 mode_hdisplay;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
@@ -1338,35 +1375,35 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp |
- ESCAPE_CLOCK_DIVIDER_1);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
- I915_WRITE(MIPI_CTRL(port), tmp |
- READ_REQUEST_PRIORITY_HIGH);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | READ_REQUEST_PRIORITY_HIGH);
} else if (IS_GEN9_LP(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
tmp |= BXT_PIPE_SELECT(pipe);
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
/* XXX: why here, why like this? handling in irq handler?! */
- I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
- I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
+ intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
- I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+ intel_dsi->dphy_reg);
- I915_WRITE(MIPI_DPI_RESOLUTION(port),
- adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
- mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+ adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
set_dsi_timings(encoder, adjusted_mode);
@@ -1393,7 +1430,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
@@ -1414,28 +1451,24 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
- txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
- intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
- txbyteclkhs(adjusted_mode->crtc_vtotal *
- adjusted_mode->crtc_htotal,
- bpp, intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
}
- I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
- intel_dsi->turn_arnd_val);
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
- intel_dsi->rst_timer_val);
+ intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+ intel_dsi->lp_rx_timeout);
+ intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_dsi->turn_arnd_val);
+ intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+ intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(port),
- txclkesc(intel_dsi->escape_clk_div, 100));
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ txclkesc(intel_dsi->escape_clk_div, 100));
if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
/*
@@ -1444,24 +1477,25 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* getting used. So write the other port
* if not in dual link mode.
*/
- I915_WRITE(MIPI_INIT_COUNT(port ==
- PORT_A ? PORT_C : PORT_A),
- intel_dsi->init_count);
+ intel_de_write(dev_priv,
+ MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+ intel_dsi->init_count);
}
/* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
/* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
- intel_dsi->hs_to_lp_count);
+ intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock.
* the number of byte clocks occupied in one low power clock.
@@ -1469,14 +1503,15 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
- I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+ intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+ intel_dsi->lp_byte_clk);
if (IS_GEMINILAKE(dev_priv)) {
- I915_WRITE(MIPI_TLPX_TIME_COUNT(port),
- intel_dsi->lp_byte_clk);
+ intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+ intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
- I915_WRITE(MIPI_CLK_LANE_TIMING(port),
- intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+ intel_dsi->dphy_reg);
}
/* the bw essential for transmitting 16 long packets containing
@@ -1484,21 +1519,18 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+ intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+ intel_dsi->bw_timer);
- I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
- intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
- intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+ intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a
* multiple of 64 like 1366 x 768. Enable RANDOM
* resolution support for such panels by default */
- I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
- intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format |
- IP_TG_CONFIG |
- RANDOM_DPI_DISPLAY_RESOLUTION);
+ intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port),
+ intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION);
}
}
@@ -1514,19 +1546,19 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
if (IS_GEN9_LP(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
- val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
val &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
}
}
@@ -1559,59 +1591,6 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static enum drm_panel_orientation
-vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_encoder *encoder = connector->encoder;
- enum intel_display_power_domain power_domain;
- enum drm_panel_orientation orientation;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- intel_wakeref_t wakeref;
- enum pipe pipe;
- u32 val;
-
- if (!encoder->get_hw_state(encoder, &pipe))
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- plane = to_intel_plane(crtc->base.primary);
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- val = I915_READ(DSPCNTR(plane->i9xx_plane));
-
- if (!(val & DISPLAY_PLANE_ENABLE))
- orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- else if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- else
- orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return orientation;
-}
-
-static enum drm_panel_orientation
-vlv_dsi_get_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum drm_panel_orientation orientation;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- orientation = vlv_dsi_get_hw_panel_orientation(connector);
- if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- return orientation;
- }
-
- return intel_dsi_get_panel_orientation(connector);
-}
-
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1628,10 +1607,9 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- vlv_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(
+ drm_connector_set_panel_orientation_with_quirk(
&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
@@ -1703,7 +1681,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+ drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1723,7 +1702,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1733,7 +1713,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1742,7 +1723,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
+ drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1817,7 +1799,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1849,6 +1831,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
+ if (IS_GEN9_LP(dev_priv))
+ intel_encoder->enable = bxt_dsi_enable;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1894,18 +1878,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- DRM_DEBUG_KMS("no device found\n");
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(intel_encoder);
if (current_mode) {
- DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n",
- intel_dsi->pclk, current_mode->clock);
+ drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- DRM_DEBUG_KMS("Using GOP pclk\n");
+ drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -1933,7 +1917,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
+ drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 6b89e67b120f..d0a514301575 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -64,7 +64,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- DRM_ERROR("DSI CLK Out of Range\n");
+ drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
@@ -126,7 +126,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
if (ret) {
- DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+ drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -138,8 +138,8 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
- config->dsi_pll.div, config->dsi_pll.ctrl);
+ drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ config->dsi_pll.div, config->dsi_pll.ctrl);
return 0;
}
@@ -149,7 +149,7 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
@@ -169,12 +169,12 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DSI_PLL_LOCK, 20)) {
vlv_cck_put(dev_priv);
- DRM_ERROR("DSI PLL lock failed\n");
+ drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
return;
}
vlv_cck_put(dev_priv);
- DRM_DEBUG_KMS("DSI PLL locked\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
@@ -182,7 +182,7 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
@@ -201,7 +201,7 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -215,15 +215,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = I915_READ(BXT_DSI_PLL_CTL);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
if (IS_GEMINILAKE(dev_priv)) {
if (!(val & BXT_DSIA_16X_MASK)) {
- DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -236,11 +238,11 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
val &= ~BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
/*
* PLL lock should deassert within 200us.
@@ -248,7 +250,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
*/
if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
@@ -263,7 +266,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
int i;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -292,7 +295,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- DRM_ERROR("wrong P1 divisor\n");
+ drm_err(&dev_priv->drm, "wrong P1 divisor\n");
return 0;
}
@@ -302,7 +305,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- DRM_ERROR("wrong m_seed programmed\n");
+ drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -325,7 +328,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
@@ -333,7 +336,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
- DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
+ drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -343,11 +346,10 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- temp = I915_READ(MIPI_CTRL(port));
+ temp = intel_de_read(dev_priv, MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(port), temp |
- intel_dsi->escape_clk_div <<
- ESCAPE_CLOCK_DIVIDER_SHIFT);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
static void glk_dsi_program_esc_clock(struct drm_device *dev,
@@ -393,8 +395,10 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
else
txesc2_div = 10;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
@@ -412,7 +416,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -448,7 +452,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
@@ -478,10 +482,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
+ drm_err(&dev_priv->drm,
+ "Cant get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -507,11 +512,11 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* Configure PLL vales */
- I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- POSTING_READ(BXT_DSI_PLL_CTL);
+ intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
if (IS_BROXTON(dev_priv)) {
@@ -522,18 +527,19 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
}
/* Enable DSI PLL */
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
val |= BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSI PLL to lock\n");
return;
}
- DRM_DEBUG_KMS("DSI PLL locked\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -544,20 +550,20 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
/* Clear old configurations */
if (IS_BROXTON(dev_priv)) {
- tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
- I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
- tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
}
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 81366aa4812b..0598e5382a1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -217,7 +217,7 @@ static void clear_pages_worker(struct work_struct *work)
0);
out_request:
if (unlikely(err)) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 151a1e8ae36a..68326ad3b2e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,14 +67,11 @@
#include <linux/log2.h>
#include <linux/nospec.h>
-#include <drm/i915_drm.h>
-
#include "gt/gen6_ppgtt.h"
#include "gt/intel_context.h"
+#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
-#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
#include "i915_gem_context.h"
@@ -245,7 +242,6 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count)
if (!e->engines[count])
continue;
- RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -258,7 +254,51 @@ static void free_engines(struct i915_gem_engines *e)
static void free_engines_rcu(struct rcu_head *rcu)
{
- free_engines(container_of(rcu, struct i915_gem_engines, rcu));
+ struct i915_gem_engines *engines =
+ container_of(rcu, struct i915_gem_engines, rcu);
+
+ i915_sw_fence_fini(&engines->fence);
+ free_engines(engines);
+}
+
+static int __i915_sw_fence_call
+engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_gem_engines *engines =
+ container_of(fence, typeof(*engines), fence);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (!list_empty(&engines->link)) {
+ struct i915_gem_context *ctx = engines->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->stale.lock, flags);
+ list_del(&engines->link);
+ spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ }
+ i915_gem_context_put(engines->ctx);
+ break;
+
+ case FENCE_FREE:
+ init_rcu_head(&engines->rcu);
+ call_rcu(&engines->rcu, free_engines_rcu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct i915_gem_engines *alloc_engines(unsigned int count)
+{
+ struct i915_gem_engines *e;
+
+ e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ i915_sw_fence_init(&e->fence, engines_notify);
+ return e;
}
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
@@ -268,11 +308,10 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
struct i915_gem_engines *e;
enum intel_engine_id id;
- e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+ e = alloc_engines(I915_NUM_ENGINES);
if (!e)
return ERR_PTR(-ENOMEM);
- init_rcu_head(&e->rcu);
for_each_engine(engine, gt, id) {
struct intel_context *ce;
@@ -306,7 +345,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
- free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
@@ -421,7 +459,7 @@ static struct intel_engine_cs *__active_engine(struct i915_request *rq)
}
engine = NULL;
- if (i915_request_is_active(rq) && !rq->fence.error)
+ if (i915_request_is_active(rq) && rq->fence.error != -EIO)
engine = rq->engine;
spin_unlock_irq(&locked->active.lock);
@@ -452,7 +490,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
return engine;
}
-static void kill_context(struct i915_gem_context *ctx)
+static void kill_engines(struct i915_gem_engines *engines)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -464,7 +502,7 @@ static void kill_context(struct i915_gem_context *ctx)
* However, we only care about pending requests, so only include
* engines on which there are incomplete requests.
*/
- for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+ for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
if (intel_context_set_banned(ce))
@@ -486,8 +524,82 @@ static void kill_context(struct i915_gem_context *ctx)
* the context from the GPU, we have to resort to a full
* reset. We hope the collateral damage is worth it.
*/
- __reset_context(ctx, engine);
+ __reset_context(engines->ctx, engine);
+ }
+}
+
+static void kill_stale_engines(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *pos, *next;
+
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
+ if (!i915_sw_fence_await(&pos->fence)) {
+ list_del_init(&pos->link);
+ continue;
+ }
+
+ spin_unlock_irq(&ctx->stale.lock);
+
+ kill_engines(pos);
+
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
+ list_safe_reset_next(pos, next, link);
+ list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
+
+ i915_sw_fence_complete(&pos->fence);
}
+ spin_unlock_irq(&ctx->stale.lock);
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+ kill_stale_engines(ctx);
+}
+
+static void engines_idle_release(struct i915_gem_context *ctx,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ INIT_LIST_HEAD(&engines->link);
+
+ engines->ctx = i915_gem_context_get(ctx);
+
+ for_each_gem_engine(ce, engines, it) {
+ struct dma_fence *fence;
+ int err = 0;
+
+ /* serialises with execbuf */
+ set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+ if (!intel_context_pin_if_active(ce))
+ continue;
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ err = i915_sw_fence_await_dma_fence(&engines->fence,
+ fence, 0,
+ GFP_KERNEL);
+ dma_fence_put(fence);
+ }
+ intel_context_unpin(ce);
+ if (err < 0)
+ goto kill;
+ }
+
+ spin_lock_irq(&ctx->stale.lock);
+ if (!i915_gem_context_is_closed(ctx))
+ list_add_tail(&engines->link, &ctx->stale.engines);
+ spin_unlock_irq(&ctx->stale.lock);
+
+kill:
+ if (list_empty(&engines->link)) /* raced, already closed */
+ kill_engines(engines);
+
+ i915_sw_fence_commit(&engines->fence);
}
static void set_closed_name(struct i915_gem_context *ctx)
@@ -511,11 +623,16 @@ static void context_close(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
+ /* Flush any concurrent set_engines() */
+ mutex_lock(&ctx->engines_mutex);
+ engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
i915_gem_context_set_closed(ctx);
- set_closed_name(ctx);
+ mutex_unlock(&ctx->engines_mutex);
mutex_lock(&ctx->mutex);
+ set_closed_name(ctx);
+
vm = i915_gem_context_vm(ctx);
if (vm)
i915_vm_close(vm);
@@ -604,6 +721,9 @@ __create_context(struct drm_i915_private *i915)
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
mutex_init(&ctx->engines_mutex);
e = default_engines(ctx);
if (IS_ERR(e)) {
@@ -637,23 +757,30 @@ err_free:
return ERR_PTR(err);
}
-static void
+static int
context_apply_all(struct i915_gem_context *ctx,
- void (*fn)(struct intel_context *ce, void *data),
+ int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
- fn(ce, data);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ err = fn(ce, data);
+ if (err)
+ break;
+ }
i915_gem_context_unlock_engines(ctx);
+
+ return err;
}
-static void __apply_ppgtt(struct intel_context *ce, void *vm)
+static int __apply_ppgtt(struct intel_context *ce, void *vm)
{
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
+ return 0;
}
static struct i915_address_space *
@@ -691,9 +818,10 @@ static void __set_timeline(struct intel_timeline **dst,
intel_timeline_put(old);
}
-static void __apply_timeline(struct intel_context *ce, void *timeline)
+static int __apply_timeline(struct intel_context *ce, void *timeline)
{
__set_timeline(&ce->timeline, timeline);
+ return 0;
}
static void __assign_timeline(struct i915_gem_context *ctx,
@@ -724,8 +852,8 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt)) {
- DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
- PTR_ERR(ppgtt));
+ drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
+ PTR_ERR(ppgtt));
context_close(ctx);
return ERR_CAST(ppgtt);
}
@@ -767,20 +895,15 @@ static void init_contexts(struct i915_gem_contexts *gc)
void i915_gem_init__contexts(struct drm_i915_private *i915)
{
init_contexts(&i915->gem.contexts);
- DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(i915)->has_logical_contexts ?
- "logical" : "fake");
+ drm_dbg(&i915->drm, "%s context support initialized\n",
+ DRIVER_CAPS(i915)->has_logical_contexts ?
+ "logical" : "fake");
}
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
flush_work(&i915->gem.contexts.free_work);
-}
-
-static int vm_idr_cleanup(int id, void *p, void *data)
-{
- i915_vm_put(p);
- return 0;
+ rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -820,8 +943,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
- mutex_init(&file_priv->vm_idr_lock);
- idr_init_base(&file_priv->vm_idr, 1);
+ /* 0 reserved for invalid/unassigned ppgtt */
+ xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx)) {
@@ -839,9 +962,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
err_ctx:
context_close(ctx);
err:
- idr_destroy(&file_priv->vm_idr);
+ xa_destroy(&file_priv->vm_xa);
xa_destroy(&file_priv->context_xa);
- mutex_destroy(&file_priv->vm_idr_lock);
return err;
}
@@ -849,6 +971,7 @@ void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx;
@@ -856,9 +979,9 @@ void i915_gem_context_close(struct drm_file *file)
context_close(ctx);
xa_destroy(&file_priv->context_xa);
- idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
- idr_destroy(&file_priv->vm_idr);
- mutex_destroy(&file_priv->vm_idr_lock);
+ xa_for_each(&file_priv->vm_xa, idx, vm)
+ i915_vm_put(vm);
+ xa_destroy(&file_priv->vm_xa);
contexts_flush_free(&i915->gem.contexts);
}
@@ -870,6 +993,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_vm_control *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_ppgtt *ppgtt;
+ u32 id;
int err;
if (!HAS_FULL_PPGTT(i915))
@@ -892,23 +1016,15 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
goto err_put;
}
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
+ xa_limit_32b, GFP_KERNEL);
if (err)
goto err_put;
- err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
- if (err < 0)
- goto err_unlock;
-
- GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
-
- mutex_unlock(&file_priv->vm_idr_lock);
-
- args->vm_id = err;
+ GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+ args->vm_id = id;
return 0;
-err_unlock:
- mutex_unlock(&file_priv->vm_idr_lock);
err_put:
i915_vm_put(&ppgtt->vm);
return err;
@@ -920,8 +1036,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_vm_control *args = data;
struct i915_address_space *vm;
- int err;
- u32 id;
if (args->flags)
return -EINVAL;
@@ -929,17 +1043,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
if (args->extensions)
return -EINVAL;
- id = args->vm_id;
- if (!id)
- return -ENOENT;
-
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (err)
- return err;
-
- vm = idr_remove(&file_priv->vm_idr, id);
-
- mutex_unlock(&file_priv->vm_idr_lock);
+ vm = xa_erase(&file_priv->vm_xa, args->vm_id);
if (!vm)
return -ENOENT;
@@ -965,6 +1069,30 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ if (unlikely(!engines))
+ break;
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -975,6 +1103,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
{
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
@@ -991,7 +1120,13 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ if (!e) {
+ i915_active_release(&cb->base);
+ return -ENOENT;
+ }
+
+ for_each_gem_engine(ce, e, it) {
struct i915_request *rq;
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
@@ -1022,7 +1157,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
@@ -1037,7 +1172,8 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct drm_i915_gem_context_param *args)
{
struct i915_address_space *vm;
- int ret;
+ int err;
+ u32 id;
if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
@@ -1045,27 +1181,22 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
rcu_read_lock();
vm = context_get_vm_rcu(ctx);
rcu_read_unlock();
+ if (!vm)
+ return -ENODEV;
- ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (ret)
+ err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+ if (err)
goto err_put;
- ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
- GEM_BUG_ON(!ret);
- if (ret < 0)
- goto err_unlock;
-
i915_vm_open(vm);
+ GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+ args->value = id;
args->size = 0;
- args->value = ret;
- ret = 0;
-err_unlock:
- mutex_unlock(&file_priv->vm_idr_lock);
err_put:
i915_vm_put(vm);
- return ret;
+ return err;
}
static void set_ppgtt_barrier(void *data)
@@ -1167,7 +1298,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
return -ENOENT;
rcu_read_lock();
- vm = idr_find(&file_priv->vm_idr, args->value);
+ vm = xa_load(&file_priv->vm_xa, args->value);
if (vm && !kref_get_unless_zero(&vm->ref))
vm = NULL;
rcu_read_unlock();
@@ -1213,87 +1344,61 @@ out:
return err;
}
-static int gen8_emit_rpcs_config(struct i915_request *rq,
- struct intel_context *ce,
- struct intel_sseu sseu)
+static int __apply_ringsize(struct intel_context *ce, void *sz)
{
- u64 offset;
- u32 *cs;
+ return intel_context_set_ring_size(ce, (unsigned long)sz);
+}
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+static int set_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
+
+ if (args->size)
+ return -EINVAL;
- offset = i915_ggtt_offset(ce->state) +
- LRC_STATE_PN * PAGE_SIZE +
- CTX_R_PWR_CLK_STATE * 4;
+ if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
+ return -EINVAL;
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+ if (args->value < I915_GTT_PAGE_SIZE)
+ return -EINVAL;
- intel_ring_advance(rq, cs);
+ if (args->value > 128 * I915_GTT_PAGE_SIZE)
+ return -EINVAL;
- return 0;
+ return context_apply_all(ctx,
+ __apply_ringsize,
+ __intel_context_ring_size(args->value));
}
-static int
-gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
+static int __get_ringsize(struct intel_context *ce, void *arg)
{
- struct i915_request *rq;
- int ret;
-
- lockdep_assert_held(&ce->pin_mutex);
-
- /*
- * If the context is not idle, we have to submit an ordered request to
- * modify its context image via the kernel context (writing to our own
- * image, or into the registers directory, does not stick). Pristine
- * and idle contexts will be configured on pinning.
- */
- if (!intel_context_pin_if_active(ce))
- return 0;
+ long sz;
- rq = intel_engine_create_kernel_request(ce->engine);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- goto out_unpin;
- }
-
- /* Serialise with the remote context */
- ret = intel_context_prepare_remote_request(ce, rq);
- if (ret == 0)
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
+ sz = intel_context_get_ring_size(ce);
+ GEM_BUG_ON(sz > INT_MAX);
- i915_request_add(rq);
-out_unpin:
- intel_context_unpin(ce);
- return ret;
+ return sz; /* stop on first engine */
}
-static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
+static int get_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- int ret;
+ int sz;
- GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
-
- ret = intel_context_lock_pinned(ce);
- if (ret)
- return ret;
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
- /* Nothing to do if unmodified. */
- if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
- goto unlock;
+ if (args->size)
+ return -EINVAL;
- ret = gen8_modify_rpcs(ce, sseu);
- if (!ret)
- ce->sseu = sseu;
+ sz = context_apply_all(ctx, __get_ringsize, NULL);
+ if (sz < 0)
+ return sz;
-unlock:
- intel_context_unlock_pinned(ce);
- return ret;
+ args->value = sz;
+ return 0;
}
static int
@@ -1460,6 +1565,7 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
struct i915_context_engines_load_balance __user *ext =
container_of_user(base, typeof(*ext), base);
const struct set_engines *set = data;
+ struct drm_i915_private *i915 = set->ctx->i915;
struct intel_engine_cs *stack[16];
struct intel_engine_cs **siblings;
struct intel_context *ce;
@@ -1467,24 +1573,25 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
unsigned int n;
int err;
- if (!HAS_EXECLISTS(set->ctx->i915))
+ if (!HAS_EXECLISTS(i915))
return -ENODEV;
- if (USES_GUC_SUBMISSION(set->ctx->i915))
+ if (intel_uc_uses_guc_submission(&i915->gt.uc))
return -ENODEV; /* not implement yet */
if (get_user(idx, &ext->engine_index))
return -EFAULT;
if (idx >= set->engines->num_engines) {
- DRM_DEBUG("Invalid placement value, %d >= %d\n",
- idx, set->engines->num_engines);
+ drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+ idx, set->engines->num_engines);
return -EINVAL;
}
idx = array_index_nospec(idx, set->engines->num_engines);
if (set->engines->engines[idx]) {
- DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
+ drm_dbg(&i915->drm,
+ "Invalid placement[%d], already occupied\n", idx);
return -EEXIST;
}
@@ -1516,12 +1623,13 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
goto out_siblings;
}
- siblings[n] = intel_engine_lookup_user(set->ctx->i915,
+ siblings[n] = intel_engine_lookup_user(i915,
ci.engine_class,
ci.engine_instance);
if (!siblings[n]) {
- DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
err = -EINVAL;
goto out_siblings;
}
@@ -1554,6 +1662,7 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
struct i915_context_engines_bond __user *ext =
container_of_user(base, typeof(*ext), base);
const struct set_engines *set = data;
+ struct drm_i915_private *i915 = set->ctx->i915;
struct i915_engine_class_instance ci;
struct intel_engine_cs *virtual;
struct intel_engine_cs *master;
@@ -1564,14 +1673,15 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
return -EFAULT;
if (idx >= set->engines->num_engines) {
- DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
- idx, set->engines->num_engines);
+ drm_dbg(&i915->drm,
+ "Invalid index for virtual engine: %d >= %d\n",
+ idx, set->engines->num_engines);
return -EINVAL;
}
idx = array_index_nospec(idx, set->engines->num_engines);
if (!set->engines->engines[idx]) {
- DRM_DEBUG("Invalid engine at %d\n", idx);
+ drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
return -EINVAL;
}
virtual = set->engines->engines[idx]->engine;
@@ -1589,11 +1699,12 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
if (copy_from_user(&ci, &ext->master, sizeof(ci)))
return -EFAULT;
- master = intel_engine_lookup_user(set->ctx->i915,
+ master = intel_engine_lookup_user(i915,
ci.engine_class, ci.engine_instance);
if (!master) {
- DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
- ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Unrecognised master engine: { class:%u, instance:%u }\n",
+ ci.engine_class, ci.engine_instance);
return -EINVAL;
}
@@ -1606,12 +1717,13 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
return -EFAULT;
- bond = intel_engine_lookup_user(set->ctx->i915,
+ bond = intel_engine_lookup_user(i915,
ci.engine_class,
ci.engine_instance);
if (!bond) {
- DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+ n, ci.engine_class, ci.engine_instance);
return -EINVAL;
}
@@ -1640,6 +1752,7 @@ static int
set_engines(struct i915_gem_context *ctx,
const struct drm_i915_gem_context_param *args)
{
+ struct drm_i915_private *i915 = ctx->i915;
struct i915_context_param_engines __user *user =
u64_to_user_ptr(args->value);
struct set_engines set = { .ctx = ctx };
@@ -1661,8 +1774,8 @@ set_engines(struct i915_gem_context *ctx,
BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
if (args->size < sizeof(*user) ||
!IS_ALIGNED(args->size, sizeof(*user->engines))) {
- DRM_DEBUG("Invalid size for engine array: %d\n",
- args->size);
+ drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
+ args->size);
return -EINVAL;
}
@@ -1671,13 +1784,10 @@ set_engines(struct i915_gem_context *ctx,
* first 64 engines defined here.
*/
num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
-
- set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
- GFP_KERNEL);
+ set.engines = alloc_engines(num_engines);
if (!set.engines)
return -ENOMEM;
- init_rcu_head(&set.engines->rcu);
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
@@ -1698,8 +1808,9 @@ set_engines(struct i915_gem_context *ctx,
ci.engine_class,
ci.engine_instance);
if (!engine) {
- DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Invalid engine[%d]: { class:%d, instance:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
__free_engines(set.engines, n);
return -ENOENT;
}
@@ -1729,6 +1840,11 @@ set_engines(struct i915_gem_context *ctx,
replace:
mutex_lock(&ctx->engines_mutex);
+ if (i915_gem_context_is_closed(ctx)) {
+ mutex_unlock(&ctx->engines_mutex);
+ free_engines(set.engines);
+ return -ENOENT;
+ }
if (args->size)
i915_gem_context_set_user_engines(ctx);
else
@@ -1736,7 +1852,8 @@ replace:
set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex);
- call_rcu(&set.engines->rcu, free_engines_rcu);
+ /* Keep track of old engine sets for kill_context() */
+ engines_idle_release(ctx, set.engines);
return 0;
}
@@ -1747,11 +1864,10 @@ __copy_engines(struct i915_gem_engines *e)
struct i915_gem_engines *copy;
unsigned int n;
- copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ copy = alloc_engines(e->num_engines);
if (!copy)
return ERR_PTR(-ENOMEM);
- init_rcu_head(&copy->rcu);
for (n = 0; n < e->num_engines; n++) {
if (e->engines[n])
copy->engines[n] = intel_context_get(e->engines[n]);
@@ -1852,17 +1968,19 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
-static void __apply_priority(struct intel_context *ce, void *arg)
+static int __apply_priority(struct intel_context *ce, void *arg)
{
struct i915_gem_context *ctx = arg;
if (!intel_engine_has_semaphores(ce->engine))
- return;
+ return 0;
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
intel_context_set_use_semaphores(ce);
else
intel_context_clear_use_semaphores(ce);
+
+ return 0;
}
static int set_priority(struct i915_gem_context *ctx,
@@ -1955,6 +2073,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = set_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1983,6 +2105,18 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
+static int copy_ring_size(struct intel_context *dst,
+ struct intel_context *src)
+{
+ long sz;
+
+ sz = intel_context_get_ring_size(src);
+ if (sz < 0)
+ return sz;
+
+ return intel_context_set_ring_size(dst, sz);
+}
+
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
@@ -1991,11 +2125,10 @@ static int clone_engines(struct i915_gem_context *dst,
bool user_engines;
unsigned long n;
- clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
- init_rcu_head(&clone->rcu);
for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine;
@@ -2025,6 +2158,12 @@ static int clone_engines(struct i915_gem_context *dst,
}
intel_context_set_gem(clone->engines[n], dst);
+
+ /* Copy across the preferred ringsize */
+ if (copy_ring_size(clone->engines[n], e->engines[n])) {
+ __free_engines(clone, n + 1);
+ goto err_unlock;
+ }
}
clone->num_engines = n;
@@ -2032,8 +2171,7 @@ static int clone_engines(struct i915_gem_context *dst,
i915_gem_context_unlock_engines(src);
/* Serialised by constructor */
- free_engines(__context_engines_static(dst));
- RCU_INIT_POINTER(dst->engines, clone);
+ engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
if (user_engines)
i915_gem_context_set_user_engines(dst);
else
@@ -2213,8 +2351,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
ext_data.fpriv = file->driver_priv;
if (client_is_banned(ext_data.fpriv)) {
- DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm, task_pid_nr(current));
+ drm_dbg(&i915->drm,
+ "client %s[%d] banned from creating ctx\n",
+ current->comm, task_pid_nr(current));
return -EIO;
}
@@ -2236,7 +2375,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_ctx;
args->ctx_id = id;
- DRM_DEBUG("HW context %d created\n", args->ctx_id);
+ drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
return 0;
@@ -2386,6 +2525,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_persistent(ctx);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = get_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2459,6 +2602,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
const struct i915_gem_engines *e = it->engines;
struct intel_context *ctx;
+ if (unlikely(!e))
+ return NULL;
+
do {
if (it->idx >= e->num_engines)
return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3ae61a355d87..f1d884d304bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
- struct intel_context *ce = ERR_PTR(-EINVAL);
+ struct intel_context *ce;
rcu_read_lock(); {
struct i915_gem_engines *e = rcu_dereference(ctx->engines);
- if (likely(idx < e->num_engines && e->engines[idx]))
+ if (unlikely(!e)) /* context was closed! */
+ ce = ERR_PTR(-ENOENT);
+ else if (likely(idx < e->num_engines && e->engines[idx]))
ce = intel_context_get(e->engines[idx]);
+ else
+ ce = ERR_PTR(-EINVAL);
} rcu_read_unlock();
return ce;
@@ -207,7 +211,6 @@ static inline void
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
struct i915_gem_engines *engines)
{
- GEM_BUG_ON(!engines);
it->engines = engines;
it->idx = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 017ca803ab47..28760bd03265 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -20,6 +20,7 @@
#include "gt/intel_context_types.h"
#include "i915_scheduler.h"
+#include "i915_sw_fence.h"
struct pid;
@@ -30,7 +31,12 @@ struct intel_timeline;
struct intel_ring;
struct i915_gem_engines {
- struct rcu_head rcu;
+ union {
+ struct list_head link;
+ struct rcu_head rcu;
+ };
+ struct i915_sw_fence fence;
+ struct i915_gem_context *ctx;
unsigned int num_engines;
struct intel_context *engines[];
};
@@ -173,6 +179,11 @@ struct i915_gem_context {
* context in messages.
*/
char name[TASK_COMM_LEN + 8];
+
+ struct {
+ spinlock_t lock;
+ struct list_head engines;
+ } stale;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 372b57ca0efc..7db5a793739d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -48,7 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
src = sg_next(src);
}
- if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ if (!dma_map_sg_attrs(attachment->dev,
+ st->sgl, st->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
ret = -ENOMEM;
goto err_free_sg;
}
@@ -71,7 +73,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ dma_unmap_sg_attrs(attachment->dev,
+ sg->sgl, sg->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sg);
kfree(sg);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 0cc40e77bbd2..4f96c8788a2e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -368,7 +368,6 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (!atomic_read(&obj->bind_count))
return;
@@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj = vma->obj;
-
- assert_object_held(obj);
-
/* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(obj);
+ i915_gem_object_bump_inactive_ggtt(vma->obj);
i915_vma_unpin(vma);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7643a30ba4cd..b7440f06c5e2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -10,7 +10,6 @@
#include <linux/uaccess.h>
#include <drm/drm_syncobj.h>
-#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
@@ -28,6 +27,19 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
+struct eb_vma {
+ struct i915_vma *vma;
+ unsigned int flags;
+
+ /** This vma's place in the execbuf reservation list */
+ struct drm_i915_gem_exec_object2 *exec;
+ struct list_head bind_link;
+ struct list_head reloc_link;
+
+ struct hlist_node node;
+ u32 handle;
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -35,17 +47,15 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
-#define __EXEC_OBJECT_HAS_REF BIT(31)
-#define __EXEC_OBJECT_HAS_PIN BIT(30)
-#define __EXEC_OBJECT_HAS_FENCE BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_HAS_PIN BIT(31)
+#define __EXEC_OBJECT_HAS_FENCE BIT(30)
+#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
+#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
-#define __EXEC_VALIDATED BIT(30)
-#define __EXEC_INTERNAL_FLAGS (~0u << 30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 31)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -220,15 +230,14 @@ struct i915_execbuffer {
struct drm_file *file; /** per-file lookup tables and limits */
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
- struct i915_vma **vma;
- unsigned int *flags;
+ struct eb_vma *vma;
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_request *request; /** our request to build */
- struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct eb_vma *batch; /** identity of the batch obj/vma */
struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -276,8 +285,6 @@ struct i915_execbuffer {
struct hlist_head *buckets; /** ht for relocation handles */
};
-#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
+ struct eb_vma *ev)
{
- unsigned int exec_flags = *vma->exec_flags;
+ struct i915_vma *vma = ev->vma;
u64 pin_flags;
if (vma->node.size)
@@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags = entry->offset & PIN_OFFSET_MASK;
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
return false;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma);
return false;
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- return !eb_vma_misplaced(entry, vma, exec_flags);
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
+ return !eb_vma_misplaced(entry, vma, ev->flags);
}
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
@@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
}
static inline void
-eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
+eb_unreserve_vma(struct eb_vma *ev)
{
- if (!(*flags & __EXEC_OBJECT_HAS_PIN))
+ if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
return;
- __eb_unreserve_vma(vma, *flags);
- *flags &= ~__EXEC_OBJECT_RESERVED;
+ __eb_unreserve_vma(ev->vma, ev->flags);
+ ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
static int
@@ -442,13 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb,
} else {
entry->pad_to_size = 0;
}
-
- if (unlikely(vma->exec_flags)) {
- DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
- entry->handle, (int)(entry - eb->exec));
- return -EINVAL;
- }
-
/*
* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
@@ -471,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb,
return 0;
}
-static int
+static void
eb_add_vma(struct i915_execbuffer *eb,
unsigned int i, unsigned batch_idx,
struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- int err;
+ struct eb_vma *ev = &eb->vma[i];
GEM_BUG_ON(i915_vma_is_closed(vma));
- if (!(eb->args->flags & __EXEC_VALIDATED)) {
- err = eb_validate_vma(eb, entry, vma);
- if (unlikely(err))
- return err;
- }
+ ev->vma = i915_vma_get(vma);
+ ev->exec = entry;
+ ev->flags = entry->flags;
if (eb->lut_size > 0) {
- vma->exec_handle = entry->handle;
- hlist_add_head(&vma->exec_node,
+ ev->handle = entry->handle;
+ hlist_add_head(&ev->node,
&eb->buckets[hash_32(entry->handle,
eb->lut_size)]);
}
if (entry->relocation_count)
- list_add_tail(&vma->reloc_link, &eb->relocs);
-
- /*
- * Stash a pointer from the vma to execobj, so we can query its flags,
- * size, alignment etc as provided by the user. Also we stash a pointer
- * to the vma inside the execobj so that we can use a direct lookup
- * to find the right target VMA when doing relocations.
- */
- eb->vma[i] = vma;
- eb->flags[i] = entry->flags;
- vma->exec_flags = &eb->flags[i];
+ list_add_tail(&ev->reloc_link, &eb->relocs);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
@@ -518,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb,
*/
if (i == batch_idx) {
if (entry->relocation_count &&
- !(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ !(ev->flags & EXEC_OBJECT_PINNED))
+ ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+ ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- eb->batch = vma;
+ eb->batch = ev;
}
- err = 0;
- if (eb_pin_vma(eb, entry, vma)) {
+ if (eb_pin_vma(eb, entry, ev)) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
- eb_unreserve_vma(vma, vma->exec_flags);
-
- list_add_tail(&vma->exec_link, &eb->unbound);
- if (drm_mm_node_allocated(&vma->node))
- err = i915_vma_unbind(vma);
- if (unlikely(err))
- vma->exec_flags = NULL;
+ eb_unreserve_vma(ev);
+ list_add_tail(&ev->bind_link, &eb->unbound);
}
- return err;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -562,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
}
static int eb_reserve_vma(const struct i915_execbuffer *eb,
- struct i915_vma *vma)
+ struct eb_vma *ev,
+ u64 pin_flags)
{
- struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- unsigned int exec_flags = *vma->exec_flags;
- u64 pin_flags;
+ struct drm_i915_gem_exec_object2 *entry = ev->exec;
+ unsigned int exec_flags = ev->flags;
+ struct i915_vma *vma = ev->vma;
int err;
- pin_flags = PIN_USER | PIN_NONBLOCK;
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
pin_flags |= PIN_GLOBAL;
@@ -583,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
pin_flags |= PIN_MAPPABLE;
- if (exec_flags & EXEC_OBJECT_PINNED) {
+ if (exec_flags & EXEC_OBJECT_PINNED)
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
- } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ if (drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(entry, vma, ev->flags)) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
}
err = i915_vma_pin(vma,
@@ -612,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
+ ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
}
@@ -621,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
static int eb_reserve(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
+ unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
struct list_head last;
- struct i915_vma *vma;
+ struct eb_vma *ev;
unsigned int i, pass;
- int err;
+ int err = 0;
/*
* Attempt to pin all of the buffers into the GTT.
@@ -640,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb)
* room for the earlier objects *unless* we need to defragment.
*/
+ if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
+ return -EINTR;
+
pass = 0;
- err = 0;
do {
- list_for_each_entry(vma, &eb->unbound, exec_link) {
- err = eb_reserve_vma(eb, vma);
+ list_for_each_entry(ev, &eb->unbound, bind_link) {
+ err = eb_reserve_vma(eb, ev, pin_flags);
if (err)
break;
}
- if (err != -ENOSPC)
- return err;
+ if (!(err == -ENOSPC || err == -EAGAIN))
+ break;
/* Resort *all* the objects into priority order */
INIT_LIST_HEAD(&eb->unbound);
INIT_LIST_HEAD(&last);
for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ unsigned int flags;
+ ev = &eb->vma[i];
+ flags = ev->flags;
if (flags & EXEC_OBJECT_PINNED &&
flags & __EXEC_OBJECT_HAS_PIN)
continue;
- eb_unreserve_vma(vma, &eb->flags[i]);
+ eb_unreserve_vma(ev);
if (flags & EXEC_OBJECT_PINNED)
/* Pinned must have their slot */
- list_add(&vma->exec_link, &eb->unbound);
+ list_add(&ev->bind_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
/* Map require the lowest 256MiB (aperture) */
- list_add_tail(&vma->exec_link, &eb->unbound);
+ list_add_tail(&ev->bind_link, &eb->unbound);
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
/* Prioritise 4GiB region for restricted bo */
- list_add(&vma->exec_link, &last);
+ list_add(&ev->bind_link, &last);
else
- list_add_tail(&vma->exec_link, &last);
+ list_add_tail(&ev->bind_link, &last);
}
list_splice_tail(&last, &eb->unbound);
+ if (err == -EAGAIN) {
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ flush_workqueue(eb->i915->mm.userptr_wq);
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ continue;
+ }
+
switch (pass++) {
case 0:
break;
@@ -688,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb)
err = i915_gem_evict_vm(eb->context->vm);
mutex_unlock(&eb->context->vm->mutex);
if (err)
- return err;
+ goto unlock;
break;
default:
- return -ENOSPC;
+ err = -ENOSPC;
+ goto unlock;
}
+
+ pin_flags = PIN_USER;
} while (1);
+
+unlock:
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ return err;
}
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
@@ -731,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
+ return -ENOENT;
+
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
batch = eb_batch_index(eb);
- mutex_lock(&eb->gem_context->mutex);
- if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
- err = -ENOENT;
- goto err_ctx;
- }
-
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -786,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
i915_gem_object_unlock(obj);
add_vma:
- err = eb_add_vma(eb, i, batch, vma);
+ err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err))
goto err_vma;
- GEM_BUG_ON(vma != eb->vma[i]);
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
- eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
+ eb_add_vma(eb, i, batch, vma);
}
- mutex_unlock(&eb->gem_context->mutex);
-
- eb->args->flags |= __EXEC_VALIDATED;
- return eb_reserve(eb);
+ return 0;
err_obj:
i915_gem_object_put(obj);
err_vma:
- eb->vma[i] = NULL;
-err_ctx:
- mutex_unlock(&eb->gem_context->mutex);
+ eb->vma[i].vma = NULL;
return err;
}
-static struct i915_vma *
+static struct eb_vma *
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
{
if (eb->lut_size < 0) {
if (handle >= -eb->lut_size)
return NULL;
- return eb->vma[handle];
+ return &eb->vma[handle];
} else {
struct hlist_head *head;
- struct i915_vma *vma;
+ struct eb_vma *ev;
head = &eb->buckets[hash_32(handle, eb->lut_size)];
- hlist_for_each_entry(vma, head, exec_node) {
- if (vma->exec_handle == handle)
- return vma;
+ hlist_for_each_entry(ev, head, node) {
+ if (ev->handle == handle)
+ return ev;
}
return NULL;
}
@@ -836,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
unsigned int i;
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
- unsigned int flags = eb->flags[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
if (!vma)
break;
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- vma->exec_flags = NULL;
- eb->vma[i] = NULL;
+ eb->vma[i].vma = NULL;
- if (flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, flags);
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __eb_unreserve_vma(vma, ev->flags);
- if (flags & __EXEC_OBJECT_HAS_REF)
- i915_vma_put(vma);
+ i915_vma_put(vma);
}
}
-static void eb_reset_vmas(const struct i915_execbuffer *eb)
-{
- eb_release_vmas(eb);
- if (eb->lut_size > 0)
- memset(eb->buckets, 0,
- sizeof(struct hlist_head) << eb->lut_size);
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
@@ -914,11 +896,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
static void reloc_gpu_flush(struct reloc_cache *cache)
{
- GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
+ struct drm_i915_gem_object *obj = cache->rq->batch->obj;
+
+ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
- i915_gem_object_unpin_map(cache->rq->batch->obj);
+ __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1197,7 +1181,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1328,10 +1312,11 @@ out:
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+ struct eb_vma *ev,
const struct drm_i915_gem_relocation_entry *reloc)
{
- struct i915_vma *target;
+ struct drm_i915_private *i915 = eb->i915;
+ struct eb_vma *target;
int err;
/* we've already hold a reference to all valid objects */
@@ -1341,7 +1326,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
- DRM_DEBUG("reloc with multiple write domains: "
+ drm_dbg(&i915->drm, "reloc with multiple write domains: "
"target %d offset %d "
"read %08x write %08x",
reloc->target_handle,
@@ -1352,7 +1337,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
- DRM_DEBUG("reloc with read/write non-GPU domains: "
+ drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
"target %d offset %d "
"read %08x write %08x",
reloc->target_handle,
@@ -1363,7 +1348,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (reloc->write_domain) {
- *target->exec_flags |= EXEC_OBJECT_WRITE;
+ target->flags |= EXEC_OBJECT_WRITE;
/*
* Sandybridge PPGTT errata: We need a global gtt mapping
@@ -1373,7 +1358,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
- err = i915_vma_bind(target, target->obj->cache_level,
+ err = i915_vma_bind(target->vma,
+ target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
@@ -1386,21 +1372,21 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset >
- vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
- DRM_DEBUG("Relocation beyond object bounds: "
+ ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
+ drm_dbg(&i915->drm, "Relocation beyond object bounds: "
"target %d offset %d size %d.\n",
reloc->target_handle,
(int)reloc->offset,
- (int)vma->size);
+ (int)ev->vma->size);
return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
- DRM_DEBUG("Relocation not 4-byte aligned: "
+ drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
"target %d offset %d.\n",
reloc->target_handle,
(int)reloc->offset);
@@ -1415,18 +1401,18 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* do relocations we are already stalling, disable the user's opt
* out of our synchronisation.
*/
- *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+ ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(vma, reloc, eb, target);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
-static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
+static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *urelocs;
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
+ const struct drm_i915_gem_exec_object2 *entry = ev->exec;
unsigned int remain;
urelocs = u64_to_user_ptr(entry->relocs_ptr);
@@ -1456,9 +1442,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
- pagefault_disable();
- copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
- pagefault_enable();
+ copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
@@ -1466,7 +1450,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
remain -= count;
do {
- u64 offset = eb_relocate_entry(eb, vma, r);
+ u64 offset = eb_relocate_entry(eb, ev, r);
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
@@ -1495,10 +1479,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* can read from this userspace address.
*/
offset = gen8_canonical_addr(offset & ~UPDATE);
- if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
- remain = -EFAULT;
- goto out;
- }
+ __put_user(offset,
+ &urelocs[r - stack].presumed_offset);
}
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
@@ -1508,281 +1490,34 @@ out:
return remain;
}
-static int
-eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
-{
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- struct drm_i915_gem_relocation_entry *relocs =
- u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- unsigned int i;
- int err;
-
- for (i = 0; i < entry->relocation_count; i++) {
- u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
-
- if ((s64)offset < 0) {
- err = (int)offset;
- goto err;
- }
- }
- err = 0;
-err:
- reloc_cache_reset(&eb->reloc_cache);
- return err;
-}
-
-static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
-{
- const char __user *addr, *end;
- unsigned long size;
- char __maybe_unused c;
-
- size = entry->relocation_count;
- if (size == 0)
- return 0;
-
- if (size > N_RELOC(ULONG_MAX))
- return -EINVAL;
-
- addr = u64_to_user_ptr(entry->relocs_ptr);
- size *= sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(addr, size))
- return -EFAULT;
-
- end = addr + size;
- for (; addr < end; addr += PAGE_SIZE) {
- int err = __get_user(c, addr);
- if (err)
- return err;
- }
- return __get_user(c, end - 1);
-}
-
-static int eb_copy_relocations(const struct i915_execbuffer *eb)
+static int eb_relocate(struct i915_execbuffer *eb)
{
- struct drm_i915_gem_relocation_entry *relocs;
- const unsigned int count = eb->buffer_count;
- unsigned int i;
int err;
- for (i = 0; i < count; i++) {
- const unsigned int nreloc = eb->exec[i].relocation_count;
- struct drm_i915_gem_relocation_entry __user *urelocs;
- unsigned long size;
- unsigned long copied;
-
- if (nreloc == 0)
- continue;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- goto err;
-
- urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
- size = nreloc * sizeof(*relocs);
-
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
- if (!relocs) {
- err = -ENOMEM;
- goto err;
- }
-
- /* copy_from_user is limited to < 4GiB */
- copied = 0;
- do {
- unsigned int len =
- min_t(u64, BIT_ULL(31), size - copied);
-
- if (__copy_from_user((char *)relocs + copied,
- (char __user *)urelocs + copied,
- len))
- goto end;
-
- copied += len;
- } while (copied < size);
-
- /*
- * As we do not update the known relocation offsets after
- * relocating (due to the complexities in lock handling),
- * we need to mark them as invalid now so that we force the
- * relocation processing next time. Just in case the target
- * object is evicted and then rebound into its old
- * presumed_offset before the next execbuffer - if that
- * happened we would make the mistake of assuming that the
- * relocations were valid.
- */
- if (!user_access_begin(urelocs, size))
- goto end;
-
- for (copied = 0; copied < nreloc; copied++)
- unsafe_put_user(-1,
- &urelocs[copied].presumed_offset,
- end_user);
- user_access_end();
-
- eb->exec[i].relocs_ptr = (uintptr_t)relocs;
- }
-
- return 0;
-
-end_user:
- user_access_end();
-end:
- kvfree(relocs);
- err = -EFAULT;
-err:
- while (i--) {
- relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
- if (eb->exec[i].relocation_count)
- kvfree(relocs);
- }
- return err;
-}
-
-static int eb_prefault_relocations(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- if (unlikely(i915_modparams.prefault_disable))
- return 0;
-
- for (i = 0; i < count; i++) {
- int err;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
-{
- struct drm_device *dev = &eb->i915->drm;
- bool have_copy = false;
- struct i915_vma *vma;
- int err = 0;
-
-repeat:
- if (signal_pending(current)) {
- err = -ERESTARTSYS;
- goto out;
- }
-
- /* We may process another execbuffer during the unlock... */
- eb_reset_vmas(eb);
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * We take 3 passes through the slowpatch.
- *
- * 1 - we try to just prefault all the user relocation entries and
- * then attempt to reuse the atomic pagefault disabled fast path again.
- *
- * 2 - we copy the user entries to a local buffer here outside of the
- * local and allow ourselves to wait upon any rendering before
- * relocations
- *
- * 3 - we already have a local copy of the relocation entries, but
- * were interrupted (EAGAIN) whilst waiting for the objects, try again.
- */
- if (!err) {
- err = eb_prefault_relocations(eb);
- } else if (!have_copy) {
- err = eb_copy_relocations(eb);
- have_copy = err == 0;
- } else {
- cond_resched();
- err = 0;
- }
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* A frequent cause for EAGAIN are currently unavailable client pages */
- flush_workqueue(eb->i915->mm.userptr_wq);
-
- err = i915_mutex_lock_interruptible(dev);
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* reacquire the objects */
+ mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
+ mutex_unlock(&eb->gem_context->mutex);
if (err)
- goto err;
-
- GEM_BUG_ON(!eb->batch);
-
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (!have_copy) {
- pagefault_disable();
- err = eb_relocate_vma(eb, vma);
- pagefault_enable();
- if (err)
- goto repeat;
- } else {
- err = eb_relocate_vma_slow(eb, vma);
- if (err)
- goto err;
- }
- }
-
- /*
- * Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- if (err == -EAGAIN)
- goto repeat;
-
-out:
- if (have_copy) {
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry =
- &eb->exec[i];
- struct drm_i915_gem_relocation_entry *relocs;
-
- if (!entry->relocation_count)
- continue;
+ return err;
- relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- kvfree(relocs);
- }
+ if (!list_empty(&eb->unbound)) {
+ err = eb_reserve(eb);
+ if (err)
+ return err;
}
- return err;
-}
-
-static int eb_relocate(struct i915_execbuffer *eb)
-{
- if (eb_lookup_vmas(eb))
- goto slow;
-
/* The objects are in their final locations, apply the relocations. */
if (eb->args->flags & __EXEC_HAS_RELOC) {
- struct i915_vma *vma;
+ struct eb_vma *ev;
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (eb_relocate_vma(eb, vma))
- goto slow;
+ list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ err = eb_relocate_vma(eb, ev);
+ if (err)
+ return err;
}
}
return 0;
-
-slow:
- return eb_relocate_slow(eb);
}
static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1795,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
- if (!err)
- continue;
-
- GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
-
if (err == -EDEADLK) {
GEM_BUG_ON(i == 0);
do {
int j = i - 1;
- ww_mutex_unlock(&eb->vma[j]->resv->lock);
+ ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
- swap(eb->flags[i], eb->flags[j]);
swap(eb->vma[i], eb->vma[j]);
- eb->vma[i]->exec_flags = &eb->flags[i];
} while (--i);
- GEM_BUG_ON(vma != eb->vma[0]);
- vma->exec_flags = &eb->flags[0];
err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
&acquire);
@@ -1826,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_done(&acquire);
while (i--) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+ unsigned int flags = ev->flags;
struct drm_i915_gem_object *obj = vma->obj;
assert_vma_held(vma);
@@ -1871,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
i915_vma_unlock(vma);
__eb_unreserve_vma(vma, flags);
- vma->exec_flags = NULL;
+ i915_vma_put(vma);
- if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
- i915_vma_put(vma);
+ ev->vma = NULL;
}
ww_acquire_fini(&acquire);
@@ -1888,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
return 0;
err_skip:
- i915_request_skip(eb->request, err);
+ i915_request_set_error_once(eb->request, err);
return err;
}
@@ -1922,7 +1649,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
int i;
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
- DRM_DEBUG("sol reset is gen7/rcs only\n");
+ drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2009,7 +1736,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
- err = i915_active_acquire(&eb->batch->active);
+ err = i915_active_acquire(&eb->batch->vma->active);
if (err)
goto err_free;
@@ -2026,7 +1753,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
- pw->batch = eb->batch;
+ pw->batch = eb->batch->vma;
pw->batch_offset = eb->batch_start_offset;
pw->batch_length = eb->batch_len;
pw->shadow = shadow;
@@ -2068,7 +1795,7 @@ err_trampoline:
err_shadow:
i915_active_release(&shadow->active);
err_batch:
- i915_active_release(&eb->batch->active);
+ i915_active_release(&eb->batch->vma->active);
err_free:
kfree(pw);
return err;
@@ -2076,6 +1803,7 @@ err_free:
static int eb_parse(struct i915_execbuffer *eb)
{
+ struct drm_i915_private *i915 = eb->i915;
struct intel_engine_pool_node *pool;
struct i915_vma *shadow, *trampoline;
unsigned int len;
@@ -2091,7 +1819,8 @@ static int eb_parse(struct i915_execbuffer *eb)
* post-scan tampering
*/
if (!eb->context->vm->has_read_only) {
- DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ drm_dbg(&i915->drm,
+ "Cannot prevent post-scan tampering without RO capable vm\n");
return -EINVAL;
}
} else {
@@ -2129,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(shadow);
- eb->flags[eb->buffer_count] =
- __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- shadow->exec_flags = &eb->flags[eb->buffer_count];
- eb->buffer_count++;
+ eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
+ eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batch = &eb->vma[eb->buffer_count++];
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = shadow;
shadow->private = pool;
return 0;
@@ -2164,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
-static int eb_submit(struct i915_execbuffer *eb)
+static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
@@ -2191,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb)
}
err = eb->engine->emit_bb_start(eb->request,
- eb->batch->node.start +
+ batch->node.start +
eb->batch_start_offset,
eb->batch_len,
eb->batch_flags);
@@ -2326,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
intel_context_timeline_unlock(tl);
if (rq) {
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- i915_request_put(rq);
- err = -EINTR;
- goto err_exit;
- }
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ long timeout;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (nonblock)
+ timeout = 0;
+
+ timeout = i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ timeout);
i915_request_put(rq);
+
+ if (timeout < 0) {
+ err = nonblock ? -EWOULDBLOCK : timeout;
+ goto err_exit;
+ }
}
eb->engine = ce->engine;
@@ -2372,8 +2105,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
if (user_ring_id != I915_EXEC_BSD &&
(args->flags & I915_EXEC_BSD_MASK)) {
- DRM_DEBUG("execbuf with non bsd ring but with invalid "
- "bsd dispatch flags: %d\n", (int)(args->flags));
+ drm_dbg(&i915->drm,
+ "execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
return -1;
}
@@ -2387,8 +2121,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
bsd_idx >>= I915_EXEC_BSD_SHIFT;
bsd_idx--;
} else {
- DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
- bsd_idx);
+ drm_dbg(&i915->drm,
+ "execbuf with unknown bsd ring: %u\n",
+ bsd_idx);
return -1;
}
@@ -2396,7 +2131,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
- DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+ drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
+ user_ring_id);
return -1;
}
@@ -2556,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb,
}
}
+static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (rq == end || !i915_request_retire(rq))
+ break;
+}
+
+static void eb_request_add(struct i915_execbuffer *eb)
+{
+ struct i915_request *rq = eb->request;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
+ struct i915_request *prev;
+
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+
+ prev = __i915_request_commit(rq);
+
+ /* Check that the context wasn't destroyed before submission */
+ if (likely(!intel_context_is_closed(eb->context))) {
+ attr = eb->gem_context->sched;
+
+ /*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+ } else {
+ /* Serialise with context_close via the add_to_timeline */
+ i915_request_set_error_once(rq, -ENOENT);
+ __i915_request_skip(rq);
+ }
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /* Try to clean up the client's timeline after submitting the request */
+ if (prev)
+ retire_requests(tl, prev);
+
+ mutex_unlock(&tl->mutex);
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
@@ -2568,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
+ struct i915_vma *batch;
int out_fence_fd = -1;
int err;
@@ -2582,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
- eb.vma[0] = NULL;
- eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+ eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2652,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_context;
- err = i915_mutex_lock_interruptible(dev);
- if (err)
- goto err_engine;
-
err = eb_relocate(&eb);
if (err) {
/*
@@ -2669,20 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
- DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
+ drm_dbg(&i915->drm,
+ "Attempting to use self-modifying batch buffer\n");
err = -EINVAL;
goto err_vma;
}
- if (eb.batch_start_offset > eb.batch->size ||
- eb.batch_len > eb.batch->size - eb.batch_start_offset) {
- DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+
+ if (range_overflows_t(u64,
+ eb.batch_start_offset, eb.batch_len,
+ eb.batch->vma->size)) {
+ drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
err = -EINVAL;
goto err_vma;
}
if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
+ eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
err = eb_parse(&eb);
if (err)
@@ -2692,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
+ batch = eb.batch->vma;
if (eb.batch_flags & I915_DISPATCH_SECURE) {
struct i915_vma *vma;
@@ -2705,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* fitting due to fragmentation.
* So this is actually safe.
*/
- vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vma;
+ goto err_parse;
}
- eb.batch = vma;
+ batch = vma;
}
/* All GPU relocation batches must be submitted prior to the user rq */
@@ -2758,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
- eb.request->batch = eb.batch;
- if (eb.batch->private)
- intel_engine_pool_mark_active(eb.batch->private, eb.request);
+ eb.request->batch = batch;
+ if (batch->private)
+ intel_engine_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
- err = eb_submit(&eb);
+ err = eb_submit(&eb, batch);
err_request:
add_to_client(eb.request, file);
i915_request_get(eb.request);
- i915_request_add(eb.request);
+ eb_request_add(&eb);
if (fences)
signal_fence_array(&eb, fences);
@@ -2786,16 +2589,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
- i915_vma_unpin(eb.batch);
- if (eb.batch->private)
- intel_engine_pool_put(eb.batch->private);
+ i915_vma_unpin(batch);
+err_parse:
+ if (batch->private)
+ intel_engine_pool_put(batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
- mutex_unlock(&dev->struct_mutex);
-err_engine:
eb_unpin_engine(&eb);
err_context:
i915_gem_context_put(eb.gem_context);
@@ -2813,9 +2615,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
+ return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
}
static bool check_buffer_count(size_t count)
@@ -2839,6 +2639,7 @@ int
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -2848,7 +2649,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
int err;
if (!check_buffer_count(count)) {
- DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+ drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2873,8 +2674,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
+ drm_dbg(&i915->drm,
+ "Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
kvfree(exec_list);
kvfree(exec2_list);
return -ENOMEM;
@@ -2883,8 +2685,8 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * count);
if (err) {
- DRM_DEBUG("copy %d exec entries failed %d\n",
- args->buffer_count, err);
+ drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
+ args->buffer_count, err);
kvfree(exec_list);
kvfree(exec2_list);
return -EFAULT;
@@ -2931,6 +2733,7 @@ int
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
struct drm_syncobj **fences = NULL;
@@ -2938,7 +2741,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
int err;
if (!check_buffer_count(count)) {
- DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+ drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2950,14 +2753,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
- count);
+ drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
+ count);
return -ENOMEM;
}
if (copy_from_user(exec2_list,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * count)) {
- DRM_DEBUG("copy %zd exec entries failed\n", count);
+ drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
kvfree(exec2_list);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 9cfb0e41ff06..cbbff81aa0af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -8,8 +8,6 @@
#include <linux/slab.h>
#include <linux/swiotlb.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0b6a442108de..b39c24dae64e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -613,8 +613,7 @@ __assign_mmap_offset(struct drm_file *file,
if (!obj)
return -ENOENT;
- if (mmap_type == I915_MMAP_TYPE_GTT &&
- i915_gem_object_never_bind_ggtt(obj)) {
+ if (i915_gem_object_never_mmap(obj)) {
err = -ENODEV;
goto out;
}
@@ -776,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
struct file *file;
rcu_read_lock();
- file = i915->gem.mmap_singleton;
+ file = READ_ONCE(i915->gem.mmap_singleton);
if (file && !get_file_rcu(file))
file = NULL;
rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 9c86f2dea947..2faa481cc18f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -11,8 +11,6 @@
#include <drm/drm_file.h>
#include <drm/drm_device.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
@@ -194,9 +192,9 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
}
static inline bool
-i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 70809d8897cd..e00792158f13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -186,7 +186,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
0);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
@@ -196,6 +196,17 @@ out_unpin:
return err;
}
+/* Wa_1209644611:icl,ehl */
+static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
+{
+ u32 height = size >> PAGE_SHIFT;
+
+ if (!IS_GEN(i915, 11))
+ return false;
+
+ return height % 4 == 3 && height <= 8;
+}
+
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *src,
struct i915_vma *dst)
@@ -237,7 +248,8 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (INTEL_GEN(i915) >= 9) {
+ if (INTEL_GEN(i915) >= 9 &&
+ !wa_1209644611_applies(i915, size)) {
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
*cmd++ = 0;
@@ -385,7 +397,7 @@ out_unlock:
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c2174da35bb0..a0b10bcd8d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -34,7 +34,7 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
#define I915_GEM_OBJECT_IS_PROXY BIT(3)
-#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_NO_MMAP BIT(4)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 54aca5c9101e..24f4cadea114 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -83,10 +83,12 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
- DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ drm_dbg(&i915->drm,
+ "Attempting to obtain a purgeable object\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b07bb40edd5a..698e22420dc5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -194,10 +194,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages)) {
+ if (!IS_ERR_OR_NULL(pages))
i915_gem_shmem_ops.put_pages(obj, pages);
- i915_gem_object_release_memory_region(obj);
- }
+
+ i915_gem_object_release_memory_region(obj);
+
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index c8264eb036bf..3d215164dd5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -85,7 +85,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ drm_WARN_ON(&i915->drm,
+ i915_gem_object_set_to_gtt_domain(obj, false));
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index a2a980d9d241..5d5d7eef3f43 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -148,7 +148,8 @@ rebuild_st:
last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
- WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+ drm_WARN_ON(&i915->drm,
+ (gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
if (sg) { /* loop terminated early; short sg table */
sg_page_sizes |= sg->length;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 59b387ade49c..03e5eb4c99d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
-#include <drm/i915_drm.h>
#include "i915_trace.h"
@@ -401,19 +400,22 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
i915->mm.shrinker.seeks = DEFAULT_SEEKS;
i915->mm.shrinker.batch = 4096;
- WARN_ON(register_shrinker(&i915->mm.shrinker));
+ drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
+ drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
- WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ drm_WARN_ON(&i915->drm,
+ register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
- WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
- WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
+ drm_WARN_ON(&i915->drm,
+ unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ drm_WARN_ON(&i915->drm,
+ unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&i915->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 451f3078d60d..5557dfa83a7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,7 @@
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_vgpu.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -110,8 +111,11 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
- DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
- DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
+ drm_dbg(&i915->drm,
+ "GTT within stolen memory at %pR\n",
+ &ggtt_res);
+ drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
+ dsm);
}
}
@@ -142,8 +146,9 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
* range. Apparently this works.
*/
if (!r && !IS_GEN(i915, 3)) {
- DRM_ERROR("conflict detected with stolen region: %pR\n",
- dsm);
+ drm_err(&i915->drm,
+ "conflict detected with stolen region: %pR\n",
+ dsm);
return -EBUSY;
}
@@ -171,8 +176,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
ELK_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
- IS_GM45(i915) ? "CTG" : "ELK", reg_val);
+ drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
+ IS_GM45(i915) ? "CTG" : "ELK", reg_val);
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
return;
@@ -181,14 +186,16 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
- reg_val);
+ drm_WARN(&i915->drm, IS_GEN(i915, 5),
+ "ILK stolen reserved found? 0x%08x\n",
+ reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
return;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
- WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
+ drm_WARN_ON(&i915->drm,
+ (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
*size = stolen_top - *base;
}
@@ -200,7 +207,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -234,7 +241,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -262,7 +269,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -289,7 +296,7 @@ static void chv_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -323,7 +330,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -342,7 +349,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
{
u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
@@ -453,8 +460,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
* it likely means we failed to read the registers correctly.
*/
if (!reserved_base) {
- DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
- &reserved_base, &reserved_size);
+ drm_err(&i915->drm,
+ "inconsistent reservation %pa + %pa; ignoring\n",
+ &reserved_base, &reserved_size);
reserved_base = stolen_top;
reserved_size = 0;
}
@@ -463,8 +471,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
- DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
- &i915->dsm_reserved, &i915->dsm);
+ drm_err(&i915->drm,
+ "Stolen reserved area %pR outside stolen memory %pR\n",
+ &i915->dsm_reserved, &i915->dsm);
return 0;
}
@@ -472,9 +481,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&i915->dsm) >> 10,
- ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
+ drm_dbg(&i915->drm,
+ "Memory reserved for graphics device: %lluK, usable: %lluK\n",
+ (u64)resource_size(&i915->dsm) >> 10,
+ ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
i915->stolen_usable_size =
resource_size(&i915->dsm) - reserved_total;
@@ -677,26 +687,24 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
- resource_size_t gtt_offset,
resource_size_t size)
{
struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
- struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
- struct i915_vma *vma;
int ret;
if (!drm_mm_initialized(&i915->mm.stolen))
return ERR_PTR(-ENODEV);
- DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
- &stolen_offset, &gtt_offset, &size);
+ drm_dbg(&i915->drm,
+ "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
+ &stolen_offset, &size);
/* KISS and expect everything to be page-aligned */
- if (WARN_ON(size == 0) ||
- WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
- WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
+ if (GEM_WARN_ON(size == 0) ||
+ GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+ GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -709,68 +717,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
if (ret) {
- DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
- kfree(stolen);
- return ERR_PTR(ret);
+ obj = ERR_PTR(ret);
+ goto err_free;
}
obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj)) {
- DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(i915, stolen);
- kfree(stolen);
- return obj;
- }
-
- /* Some objects just need physical mem from stolen space */
- if (gtt_offset == I915_GTT_OFFSET_NONE)
- return obj;
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- vma = i915_vma_instance(obj, &ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_pages;
- }
-
- /* To simplify the initialisation sequence between KMS and GTT,
- * we allow construction of the stolen object prior to
- * setting up the GTT space. The actual reservation will occur
- * later.
- */
- mutex_lock(&ggtt->vm.mutex);
- ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- size, gtt_offset, obj->cache_level,
- 0);
- if (ret) {
- DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
- mutex_unlock(&ggtt->vm.mutex);
- goto err_pages;
- }
-
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
- GEM_BUG_ON(vma->pages);
- vma->pages = obj->mm.pages;
- atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
-
- set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
- __i915_vma_set_map_and_fenceable(vma);
-
- list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
- mutex_unlock(&ggtt->vm.mutex);
-
- GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
- atomic_inc(&obj->bind_count);
+ if (IS_ERR(obj))
+ goto err_stolen;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
return obj;
-err_pages:
- i915_gem_object_unpin_pages(obj);
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(ret);
+err_stolen:
+ i915_gem_stolen_remove_node(i915, stolen);
+err_free:
+ kfree(stolen);
+ return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index c1040627fbf3..e15c0adad8af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -28,7 +28,6 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
resource_size_t stolen_offset,
- resource_size_t gtt_offset,
resource_size_t size);
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 6c7825a2dc2a..0158e49bf9bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -6,7 +6,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem.h"
@@ -183,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
- struct i915_vma *vma;
+ struct i915_vma *vma, *vn;
+ LIST_HEAD(unbind);
int ret = 0;
if (tiling_mode == I915_TILING_NONE)
return 0;
mutex_lock(&ggtt->vm.mutex);
+
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
+ GEM_BUG_ON(vma->vm != &ggtt->vm);
+
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
+ list_move(&vma->vm_link, &unbind);
+ }
+ spin_unlock(&obj->vma.lock);
+
+ list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
ret = __i915_vma_unbind(vma);
- if (ret)
+ if (ret) {
+ /* Restore the remaining vma on an error */
+ list_splice(&unbind, &ggtt->vm.bound_list);
break;
+ }
}
+
mutex_unlock(&ggtt->vm.mutex);
return ret;
@@ -269,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
}
mutex_unlock(&obj->mm.lock);
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
vma->fence_size =
i915_gem_fence_size(i915, vma->size, tiling, stride);
@@ -279,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if (vma->fence)
vma->fence->dirty = true;
}
+ spin_unlock(&obj->vma.lock);
obj->tiling_and_stride = tiling | stride;
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 580319b7bf1a..7ffd7afeb7a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -10,8 +10,6 @@
#include <linux/swap.h>
#include <linux/sched/mm.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -704,7 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
- I915_GEM_OBJECT_NO_GGTT |
+ I915_GEM_OBJECT_NO_MMAP |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
@@ -770,6 +768,23 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
+ /*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ *
+ * Aside from our own locals (for which we have no excuse!):
+ * - sg_table embeds unsigned int for num_pages
+ * - get_user_pages*() mixed ints with longs
+ */
+
+ if (args->user_size >> PAGE_SHIFT > INT_MAX)
+ return -E2BIG;
+
+ if (overflows_type(args->user_size, obj->base.size))
+ return -E2BIG;
+
if (!args->user_size)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 9311250d7d6f..d4f94ca9ae0d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1208,107 +1208,6 @@ static int igt_write_huge(struct i915_gem_context *ctx,
return err;
}
-static int igt_ppgtt_exhaust_huge(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
- static unsigned int pages[ARRAY_SIZE(page_sizes)];
- struct drm_i915_gem_object *obj;
- unsigned int size_mask;
- unsigned int page_mask;
- int n, i;
- int err = -ENODEV;
-
- if (supported == I915_GTT_PAGE_SIZE_4K)
- return 0;
-
- /*
- * Sanity check creating objects with a varying mix of page sizes --
- * ensuring that our writes lands in the right place.
- */
-
- n = 0;
- for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
- pages[n++] = BIT(i);
-
- for (size_mask = 2; size_mask < BIT(n); size_mask++) {
- unsigned int size = 0;
-
- for (i = 0; i < n; i++) {
- if (size_mask & BIT(i))
- size |= pages[i];
- }
-
- /*
- * For our page mask we want to enumerate all the page-size
- * combinations which will fit into our chosen object size.
- */
- for (page_mask = 2; page_mask <= size_mask; page_mask++) {
- unsigned int page_sizes = 0;
-
- for (i = 0; i < n; i++) {
- if (page_mask & BIT(i))
- page_sizes |= pages[i];
- }
-
- /*
- * Ensure that we can actually fill the given object
- * with our chosen page mask.
- */
- if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
- continue;
-
- obj = huge_pages_object(i915, size, page_sizes);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_device;
- }
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- i915_gem_object_put(obj);
-
- if (err == -ENOMEM) {
- pr_info("unable to get pages, size=%u, pages=%u\n",
- size, page_sizes);
- err = 0;
- break;
- }
-
- pr_err("pin_pages failed, size=%u, pages=%u\n",
- size_mask, page_mask);
-
- goto out_device;
- }
-
- /* Force the page-size for the gtt insertion */
- obj->mm.page_sizes.sg = page_sizes;
-
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("exhaust write-huge failed with size=%u\n",
- size);
- goto out_unpin;
- }
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj);
- i915_gem_object_put(obj);
- }
- }
-
- goto out_device;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
- i915_gem_object_put(obj);
-out_device:
- mkwrite_device_info(i915)->page_sizes = supported;
-
- return err;
-}
-
typedef struct drm_i915_gem_object *
(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
@@ -1578,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg)
unsigned int page_size = BIT(first);
obj = i915_gem_object_create_internal(dev_priv, page_size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_vm;
+ }
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
@@ -1632,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg)
}
obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_vm;
+ }
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
@@ -1900,7 +1803,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_shrink_thp),
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
- SUBTEST(igt_ppgtt_exhaust_huge),
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7fc46861a54d..54b86cf7f5d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1004,7 +1004,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
@@ -1465,9 +1465,12 @@ out_file:
static int check_scratch(struct i915_address_space *vm, u64 offset)
{
- struct drm_mm_node *node =
- __drm_mm_interval_first(&vm->mm,
- offset, offset + sizeof(u32) - 1);
+ struct drm_mm_node *node;
+
+ mutex_lock(&vm->mutex);
+ node = __drm_mm_interval_first(&vm->mm,
+ offset, offset + sizeof(u32) - 1);
+ mutex_unlock(&vm->mutex);
if (!node || node->start > offset)
return 0;
@@ -1492,6 +1495,10 @@ static int write_to_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+ err = check_scratch(ctx_vm(ctx), offset);
+ if (err)
+ return err;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1528,10 +1535,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto out_vm;
- err = check_scratch(vm, offset);
- if (err)
- goto err_unpin;
-
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
@@ -1556,7 +1559,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1575,64 +1578,95 @@ static int read_from_scratch(struct i915_gem_context *ctx,
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
- const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
struct i915_vma *vma;
+ unsigned int flags;
u32 *cmd;
int err;
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+ err = check_scratch(ctx_vm(ctx), offset);
+ if (err)
+ return err;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto out;
- }
-
- memset(cmd, POISON_INUSE, PAGE_SIZE);
if (INTEL_GEN(i915) >= 8) {
+ const u32 GPR0 = engine->mmio_base + 0x600;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_vm;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto out_vm;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
+ *cmd++ = GPR0;
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
+ *cmd++ = GPR0;
*cmd++ = result;
*cmd++ = 0;
+ *cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ flags = 0;
} else {
+ const u32 reg = engine->mmio_base + 0x420;
+
+ /* hsw: register access even to 3DPRIM! is protected */
+ vm = i915_vm_get(&engine->gt->ggtt->vm);
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_vm;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_vm;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
+ *cmd++ = reg;
*cmd++ = offset;
- *cmd++ = MI_STORE_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
- *cmd++ = result;
- }
- *cmd = MI_BATCH_BUFFER_END;
+ *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ *cmd++ = reg;
+ *cmd++ = vma->node.start + result;
+ *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(engine->gt);
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_vm;
+ flags = I915_DISPATCH_SECURE;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
- if (err)
- goto out_vm;
-
- err = check_scratch(vm, offset);
- if (err)
- goto err_unpin;
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -1640,7 +1674,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
if (err)
goto err_request;
@@ -1674,7 +1708,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1686,6 +1720,39 @@ out:
return err;
}
+static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
+{
+ struct i915_address_space *vm;
+ struct page *page;
+ u32 *vaddr;
+ int err = 0;
+
+ vm = ctx_vm(ctx);
+ if (!vm)
+ return -ENODEV;
+
+ page = vm->scratch[0].base.page;
+ if (!page) {
+ pr_err("No scratch page!\n");
+ return -EINVAL;
+ }
+
+ vaddr = kmap(page);
+ if (!vaddr) {
+ pr_err("No (mappable) scratch page!\n");
+ return -EINVAL;
+ }
+
+ memcpy(out, vaddr, sizeof(*out));
+ if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
+ pr_err("Inconsistent initial state of scratch page!\n");
+ err = -EINVAL;
+ }
+ kunmap(page);
+
+ return err;
+}
+
static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -1696,6 +1763,7 @@ static int igt_vm_isolation(void *arg)
I915_RND_STATE(prng);
struct file *file;
u64 vm_total;
+ u32 expected;
int err;
if (INTEL_GEN(i915) < 7)
@@ -1730,9 +1798,17 @@ static int igt_vm_isolation(void *arg)
if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
goto out_file;
+ /* Read the initial state of the scratch page */
+ err = check_scratch_page(ctx_a, &expected);
+ if (err)
+ goto out_file;
+
+ err = check_scratch_page(ctx_b, &expected);
+ if (err)
+ goto out_file;
+
vm_total = ctx_vm(ctx_a)->total;
GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
- vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
num_engines = 0;
@@ -1743,14 +1819,18 @@ static int igt_vm_isolation(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
+ /* Not all engines have their own GPR! */
+ if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
+ continue;
+
while (!__igt_timeout(end_time, NULL)) {
u32 value = 0xc5c5c5c5;
u64 offset;
- div64_u64_rem(i915_prandom_u64_state(&prng),
- vm_total, &offset);
- offset = round_down(offset, alignof_dword);
- offset += I915_GTT_PAGE_SIZE;
+ /* Leave enough space at offset 0 for the batch */
+ offset = igt_random_offset(&prng,
+ I915_GTT_PAGE_SIZE, vm_total,
+ sizeof(u32), alignof_dword);
err = write_to_scratch(ctx_a, engine,
offset, 0xdeadbeef);
@@ -1760,7 +1840,7 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_file;
- if (value) {
+ if (value != expected) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
engine->name, value,
upper_32_bits(offset),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..31549ad83fa6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -225,27 +226,32 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- /*
- * If we have a tiny shared address space, like for the GGTT
- * then we can't be too greedy.
- */
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size + 1;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
+ phys_sz = min(phys_sz, sz);
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
@@ -276,13 +282,14 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -293,6 +300,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
@@ -319,6 +328,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -334,23 +344,32 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size + 1;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
+ phys_sz = min(phys_sz, sz);
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
@@ -397,13 +416,14 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -416,6 +436,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src);
i915_gem_object_put(dst);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 6718da20f35d..772d8cba7da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -159,7 +159,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 384143aa7776..e7e3c620f542 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,9 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
i915_gem_context_set_persistence(ctx);
mutex_init(&ctx->engines_mutex);
@@ -37,7 +40,7 @@ mock_context(struct drm_i915_private *i915,
if (name) {
struct i915_ppgtt *ppgtt;
- strncpy(ctx->name, name, sizeof(ctx->name));
+ strncpy(ctx->name, name, sizeof(ctx->name) - 1);
ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
@@ -83,6 +86,8 @@ live_context(struct drm_i915_private *i915, struct file *file)
if (IS_ERR(ctx))
return ctx;
+ i915_gem_context_set_no_error_capture(ctx);
+
err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
if (err < 0)
goto err_ctx;
@@ -105,6 +110,7 @@ kernel_context(struct drm_i915_private *i915)
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_persistence(ctx);
+ i915_gem_context_set_no_error_capture(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
new file mode 100644
index 000000000000..de595b66a746
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_gpu_commands.h"
+
+#define MAX_URB_ENTRIES 64
+#define STATE_SIZE (4 * 1024)
+#define GT3_INLINE_DATA_DELAYS 0x1E00
+#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
+
+struct cb_kernel {
+ const void *data;
+ u32 size;
+};
+
+#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
+
+#include "ivb_clear_kernel.c"
+static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
+
+#include "hsw_clear_kernel.c"
+static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
+
+struct batch_chunk {
+ struct i915_vma *vma;
+ u32 offset;
+ u32 *start;
+ u32 *end;
+ u32 max_items;
+};
+
+struct batch_vals {
+ u32 max_primitives;
+ u32 max_urb_entries;
+ u32 cmd_size;
+ u32 state_size;
+ u32 state_start;
+ u32 batch_size;
+ u32 surface_height;
+ u32 surface_width;
+ u32 scratch_size;
+ u32 max_size;
+};
+
+static void
+batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
+{
+ if (IS_HASWELL(i915)) {
+ bv->max_primitives = 280;
+ bv->max_urb_entries = MAX_URB_ENTRIES;
+ bv->surface_height = 16 * 16;
+ bv->surface_width = 32 * 2 * 16;
+ } else {
+ bv->max_primitives = 128;
+ bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ bv->surface_height = 16 * 8;
+ bv->surface_width = 32 * 16;
+ }
+ bv->cmd_size = bv->max_primitives * 4096;
+ bv->state_size = STATE_SIZE;
+ bv->state_start = bv->cmd_size;
+ bv->batch_size = bv->cmd_size + bv->state_size;
+ bv->scratch_size = bv->surface_height * bv->surface_width;
+ bv->max_size = bv->batch_size + bv->scratch_size;
+}
+
+static void batch_init(struct batch_chunk *bc,
+ struct i915_vma *vma,
+ u32 *start, u32 offset, u32 max_bytes)
+{
+ bc->vma = vma;
+ bc->offset = offset;
+ bc->start = start + bc->offset / sizeof(*bc->start);
+ bc->end = bc->start;
+ bc->max_items = max_bytes / sizeof(*bc->start);
+}
+
+static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
+{
+ return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
+}
+
+static u32 batch_addr(const struct batch_chunk *bc)
+{
+ return bc->vma->node.start;
+}
+
+static void batch_add(struct batch_chunk *bc, const u32 d)
+{
+ GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
+ *bc->end++ = d;
+}
+
+static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
+{
+ u32 *map;
+
+ if (align) {
+ u32 *end = PTR_ALIGN(bc->end, align);
+
+ memset32(bc->end, 0, end - bc->end);
+ bc->end = end;
+ }
+
+ map = bc->end;
+ bc->end += items;
+
+ return map;
+}
+
+static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
+{
+ GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
+ return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
+}
+
+static u32
+gen7_fill_surface_state(struct batch_chunk *state,
+ const u32 dst_offset,
+ const struct batch_vals *bv)
+{
+ u32 surface_h = bv->surface_height;
+ u32 surface_w = bv->surface_width;
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+#define SURFACE_2D 1
+#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define RENDER_CACHE_READ_WRITE 1
+
+ *cs++ = SURFACE_2D << 29 |
+ (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
+ (RENDER_CACHE_READ_WRITE << 8);
+
+ *cs++ = batch_addr(state) + dst_offset;
+
+ *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
+ *cs++ = surface_w;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+#define SHADER_CHANNELS(r, g, b, a) \
+ (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
+ *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_binding_table(struct batch_chunk *state,
+ const struct batch_vals *bv)
+{
+ u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = surface_start - state->offset;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_kernel_data(struct batch_chunk *state,
+ const u32 *data,
+ const u32 size)
+{
+ return batch_offset(state,
+ memcpy(batch_alloc_bytes(state, 64, size),
+ data, size));
+}
+
+static u32
+gen7_fill_interface_descriptor(struct batch_chunk *state,
+ const struct batch_vals *bv,
+ const struct cb_kernel *kernel,
+ unsigned int count)
+{
+ u32 kernel_offset =
+ gen7_fill_kernel_data(state, kernel->data, kernel->size);
+ u32 binding_table = gen7_fill_binding_table(state, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8 * count);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = kernel_offset;
+ *cs++ = (1 << 7) | (1 << 13);
+ *cs++ = 0;
+ *cs++ = (binding_table - state->offset) | 1;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* 1 - 63dummy idds */
+ memset32(cs, 0x00, (count - 1) * 8);
+ batch_advance(state, cs + (count - 1) * 8);
+
+ return offset;
+}
+
+static void
+gen7_emit_state_base_address(struct batch_chunk *batch,
+ u32 surface_state_base)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 12);
+
+ *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ /* general */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* surface */
+ *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+ /* dynamic */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* indirect */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* instruction */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+
+ /* general/dynamic/indirect/instruction access Bound */
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_vfe_state(struct batch_chunk *batch,
+ const struct batch_vals *bv,
+ u32 urb_size, u32 curbe_size,
+ u32 mode)
+{
+ u32 urb_entries = bv->max_urb_entries;
+ u32 threads = bv->max_primitives - 1;
+ u32 *cs = batch_alloc_items(batch, 32, 8);
+
+ *cs++ = MEDIA_VFE_STATE | (8 - 2);
+
+ /* scratch buffer */
+ *cs++ = 0;
+
+ /* number of threads & urb entries for GPGPU vs Media Mode */
+ *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+
+ *cs++ = 0;
+
+ /* urb entry size & curbe size in 256 bits unit */
+ *cs++ = urb_size << 16 | curbe_size;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
+ const u32 interface_descriptor,
+ unsigned int count)
+{
+ u32 *cs = batch_alloc_items(batch, 8, 4);
+
+ *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
+ *cs++ = 0;
+ *cs++ = count * 8 * sizeof(*cs);
+
+ /*
+ * interface descriptor address - it is relative to the dynamics base
+ * address
+ */
+ *cs++ = interface_descriptor;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_media_object(struct batch_chunk *batch,
+ unsigned int media_object_index)
+{
+ unsigned int x_offset = (media_object_index % 16) * 64;
+ unsigned int y_offset = (media_object_index / 16) * 16;
+ unsigned int inline_data_size;
+ unsigned int media_batch_size;
+ unsigned int i;
+ u32 *cs;
+
+ inline_data_size = 112 * 8;
+ media_batch_size = inline_data_size + 6;
+
+ cs = batch_alloc_items(batch, 8, media_batch_size);
+
+ *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+
+ /* interface descriptor offset */
+ *cs++ = 0;
+
+ /* without indirect data */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* inline */
+ *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = 0;
+ *cs++ = GT3_INLINE_DATA_DELAYS;
+ for (i = 3; i < inline_data_size; i++)
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 5);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void emit_batch(struct i915_vma * const vma,
+ u32 *start,
+ const struct batch_vals *bv)
+{
+ struct drm_i915_private *i915 = vma->vm->i915;
+ unsigned int desc_count = 64;
+ const u32 urb_size = 112;
+ struct batch_chunk cmds, state;
+ u32 interface_descriptor;
+ unsigned int i;
+
+ batch_init(&cmds, vma, start, 0, bv->cmd_size);
+ batch_init(&state, vma, start, bv->state_start, bv->state_size);
+
+ interface_descriptor =
+ gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+ gen7_emit_pipeline_flush(&cmds);
+ batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
+ batch_add(&cmds, MI_NOOP);
+ gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_flush(&cmds);
+
+ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+
+ gen7_emit_interface_descriptor_load(&cmds,
+ interface_descriptor,
+ desc_count);
+
+ for (i = 0; i < bv->max_primitives; i++)
+ gen7_emit_media_object(&cmds, i);
+
+ batch_add(&cmds, MI_BATCH_BUFFER_END);
+}
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ struct batch_vals bv;
+ u32 *batch;
+
+ batch_get_defaults(engine->i915, &bv);
+ if (!vma)
+ return bv.max_size;
+
+ GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+
+ batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
new file mode 100644
index 000000000000..bb100748e2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __GEN7_RENDERCLEAR_H__
+#define __GEN7_RENDERCLEAR_H__
+
+struct intel_engine_cs;
+struct i915_vma;
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma);
+
+#endif /* __GEN7_RENDERCLEAR_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 4d1de2d97d5c..94e746af8926 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -8,6 +8,7 @@
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gtt.h"
@@ -25,6 +26,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
return pde;
}
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -706,6 +731,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
+ ppgtt->vm.pte_encode = gen8_pte_encode;
+
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
new file mode 100644
index 000000000000..b47f9d4a0848
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
+ */
+
+static const u32 hsw_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x00000160,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 57e8a051ddc2..aea992e46c42 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -51,6 +51,11 @@ int intel_context_alloc_state(struct intel_context *ce)
return -EINTR;
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_is_banned(ce)) {
+ err = -EIO;
+ goto unlock;
+ }
+
err = ce->ops->alloc(ce);
if (unlikely(err))
goto unlock;
@@ -92,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce)
{
int err;
+ GEM_BUG_ON(intel_context_is_closed(ce));
+
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
@@ -116,7 +123,8 @@ int __intel_context_do_pin(struct intel_context *ce)
if (unlikely(err))
goto err_active;
- CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+ CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
+ i915_ggtt_offset(ce->ring->vma),
ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
@@ -219,7 +227,9 @@ static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
- CE_TRACE(ce, "retire\n");
+ CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
set_bit(CONTEXT_VALID_BIT, &ce->flags);
if (ce->state)
@@ -280,6 +290,8 @@ intel_context_init(struct intel_context *ce,
ce->sseu = engine->sseu;
ce->ring = __intel_context_ring_size(SZ_4K);
+ ewma_runtime_init(&ce->runtime.avg);
+
ce->vm = i915_vm_get(engine->gt->vm);
INIT_LIST_HEAD(&ce->signal_link);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 30bd248827d8..07be021882cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include "i915_active.h"
+#include "i915_drv.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
#include "intel_ring_types.h"
@@ -35,6 +36,9 @@ int intel_context_alloc_state(struct intel_context *ce);
void intel_context_free(struct intel_context *ce);
+int intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu);
+
/**
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ce - the context
@@ -169,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce)
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
}
+static inline bool intel_context_is_closed(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -224,4 +233,20 @@ intel_context_clear_nopreempt(struct intel_context *ce)
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
+static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
+{
+ const u32 period =
+ RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+ return READ_ONCE(ce->runtime.total) * period;
+}
+
+static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+{
+ const u32 period =
+ RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+ return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
new file mode 100644
index 000000000000..65dcd090245d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_active.h"
+#include "intel_context.h"
+#include "intel_context_param.h"
+#include "intel_ring.h"
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz)
+{
+ int err;
+
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ err = i915_active_wait(&ce->active);
+ if (err < 0)
+ goto unlock;
+
+ if (intel_context_is_pinned(ce)) {
+ err = -EBUSY; /* In active use, come back later! */
+ goto unlock;
+ }
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ struct intel_ring *ring;
+
+ /* Replace the existing ringbuffer */
+ ring = intel_engine_create_ring(ce->engine, sz);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto unlock;
+ }
+
+ intel_ring_put(ce->ring);
+ ce->ring = ring;
+
+ /* Context image will be updated on next pin */
+ } else {
+ ce->ring = __intel_context_ring_size(sz);
+ }
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return err;
+}
+
+long intel_context_get_ring_size(struct intel_context *ce)
+{
+ long sz = (unsigned long)READ_ONCE(ce->ring);
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ sz = ce->ring->size;
+ intel_context_unlock_pinned(ce);
+ }
+
+ return sz;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
new file mode 100644
index 000000000000..f053d8633fe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_CONTEXT_PARAM_H
+#define INTEL_CONTEXT_PARAM_H
+
+struct intel_context;
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz);
+long intel_context_get_ring_size(struct intel_context *ce);
+
+#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
new file mode 100644
index 000000000000..57a30956c922
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_ring.h"
+#include "intel_sseu.h"
+
+static int gen8_emit_rpcs_config(struct i915_request *rq,
+ const struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ u64 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) +
+ LRC_STATE_PN * PAGE_SIZE +
+ CTX_R_PWR_CLK_STATE * 4;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
+{
+ struct i915_request *rq;
+ int ret;
+
+ lockdep_assert_held(&ce->pin_mutex);
+
+ /*
+ * If the context is not idle, we have to submit an ordered request to
+ * modify its context image via the kernel context (writing to our own
+ * image, or into the registers directory, does not stick). Pristine
+ * and idle contexts will be configured on pinning.
+ */
+ if (!intel_context_pin_if_active(ce))
+ return 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ /* Serialise with the remote context */
+ ret = intel_context_prepare_remote_request(ce, rq);
+ if (ret == 0)
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
+
+ i915_request_add(rq);
+out_unpin:
+ intel_context_unpin(ce);
+ return ret;
+}
+
+int
+intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ int ret;
+
+ GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
+
+ ret = intel_context_lock_pinned(ce);
+ if (ret)
+ return ret;
+
+ /* Nothing to do if unmodified. */
+ if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
+ goto unlock;
+
+ ret = gen8_modify_rpcs(ce, sseu);
+ if (!ret)
+ ce->sseu = sseu;
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index ca1420fb8b53..ca0d4f4f3615 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -7,6 +7,7 @@
#ifndef __INTEL_CONTEXT_TYPES__
#define __INTEL_CONTEXT_TYPES__
+#include <linux/average.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -19,6 +20,8 @@
#define CONTEXT_REDZONE POISON_INUSE
+DECLARE_EWMA(runtime, 3, 8);
+
struct i915_gem_context;
struct i915_vma;
struct intel_context;
@@ -42,8 +45,8 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -59,15 +62,31 @@ struct intel_context {
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
#define CONTEXT_VALID_BIT 2
-#define CONTEXT_USE_SEMAPHORES 3
-#define CONTEXT_BANNED 4
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
-#define CONTEXT_NOPREEMPT 6
+#define CONTEXT_CLOSED_BIT 3
+#define CONTEXT_USE_SEMAPHORES 4
+#define CONTEXT_BANNED 5
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
+#define CONTEXT_NOPREEMPT 7
u32 *lrc_reg_state;
- u64 lrc_desc;
+ union {
+ struct {
+ u32 lrca;
+ u32 ccid;
+ };
+ u64 desc;
+ } lrc;
u32 tag; /* cookie passed to HW to track this context on submission */
+ /* Time on GPU as tracked by the hw. */
+ struct {
+ struct ewma_runtime avg;
+ u64 total;
+ u32 last;
+ I915_SELFTEST_DECLARE(u32 num_underflow);
+ I915_SELFTEST_DECLARE(u32 max_underflow);
+ } runtime;
+
unsigned int active_count; /* protected by timeline->mutex */
atomic_t pin_count;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 5df003061e44..a1aa0d3e8be1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -107,7 +107,20 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
static inline struct i915_request *
execlists_active(const struct intel_engine_execlists *execlists)
{
- return *READ_ONCE(execlists->active);
+ struct i915_request * const *cur, * const *old, *active;
+
+ cur = READ_ONCE(execlists->active);
+ smp_rmb(); /* pairs with overwrite protection in process_csb() */
+ do {
+ old = cur;
+
+ active = READ_ONCE(*cur);
+ cur = READ_ONCE(execlists->active);
+
+ smp_rmb(); /* and complete the seqlock retry */
+ } while (unlikely(cur != old));
+
+ return active;
}
static inline void
@@ -192,6 +205,8 @@ void intel_engines_free(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
+int intel_engine_resume(struct intel_engine_cs *engine);
+
int intel_ring_submission_setup(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
@@ -303,26 +318,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine);
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
- if (!execlists->preempt_hang.inject_hang)
- return false;
-
- complete(&execlists->preempt_hang.completion);
- return true;
-}
-
-#else
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
- return false;
-}
-
-#endif
-
void intel_engine_init_active(struct intel_engine_cs *engine,
unsigned int subclass);
#define ENGINE_PHYSICAL 0
@@ -338,13 +333,4 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
return intel_engine_has_preemption(engine);
}
-static inline bool
-intel_engine_has_timeslices(const struct intel_engine_cs *engine)
-{
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return false;
-
- return intel_engine_has_semaphores(engine);
-}
-
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 06ff7695fa29..883a9b7fe88d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -35,6 +35,7 @@
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
+#include "intel_gt_pm.h"
#include "intel_lrc.h"
#include "intel_reset.h"
#include "intel_ring.h"
@@ -199,10 +200,10 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
* out in the wash.
*/
cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
- DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
- INTEL_GEN(gt->i915),
- cxt_size * 64,
- cxt_size - 1);
+ drm_dbg(&gt->i915->drm,
+ "gen%d CXT_SIZE = %d bytes [0x%08x]\n",
+ INTEL_GEN(gt->i915), cxt_size * 64,
+ cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
@@ -274,6 +275,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
@@ -300,11 +302,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
- engine->i915 = gt->i915;
+ engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
@@ -312,6 +314,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.heartbeat_interval_ms =
CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.max_busywait_duration_ns =
+ CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
engine->props.preempt_timeout_ms =
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
@@ -319,11 +323,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
+ /* Override to uninterruptible for OpenCL workloads. */
+ if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+ engine->props.preempt_timeout_ms = 0;
+
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
+ DRIVER_CAPS(i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -339,7 +347,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- gt->i915->engine[id] = engine;
+ i915->engine[id] = engine;
return 0;
}
@@ -392,8 +400,24 @@ void intel_engines_release(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /*
+ * Before we release the resources held by engine, we must be certain
+ * that the HW is no longer accessing them -- having the GPU scribble
+ * to or read from a page being used for something else causes no end
+ * of fun.
+ *
+ * The GPU should be reset by this point, but assume the worst just
+ * in case we aborted before completely initialising the engines.
+ */
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
+
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
+ intel_wakeref_wait_for_idle(&engine->wakeref);
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+
if (!engine->release)
continue;
@@ -432,9 +456,9 @@ int intel_engines_init_mmio(struct intel_gt *gt)
unsigned int i;
int err;
- WARN_ON(engine_mask == 0);
- WARN_ON(engine_mask &
- GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+ drm_WARN_ON(&i915->drm, engine_mask == 0);
+ drm_WARN_ON(&i915->drm, engine_mask &
+ GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_probe_failure(i915))
return -ENODEV;
@@ -455,7 +479,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != engine_mask))
+ if (drm_WARN_ON(&i915->drm, mask != engine_mask))
device_info->engine_mask = mask;
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
@@ -510,7 +534,6 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
{
unsigned int flags;
- flags = PIN_GLOBAL;
if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
@@ -523,11 +546,11 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
* above the mappable region (even though we never
* actually map it).
*/
- flags |= PIN_MAPPABLE;
+ flags = PIN_MAPPABLE;
else
- flags |= PIN_HIGH;
+ flags = PIN_HIGH;
- return i915_vma_pin(vma, 0, 0, flags);
+ return i915_ggtt_pin(vma, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -546,7 +569,8 @@ static int init_status_page(struct intel_engine_cs *engine)
*/
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate status page\n");
+ drm_err(&engine->i915->drm,
+ "Failed to allocate status page\n");
return PTR_ERR(obj);
}
@@ -614,15 +638,15 @@ static int engine_setup_common(struct intel_engine_cs *engine)
struct measure_breadcrumb {
struct i915_request rq;
- struct intel_timeline timeline;
struct intel_ring ring;
u32 cs[1024];
};
-static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
+static int measure_breadcrumb_dw(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
struct measure_breadcrumb *frame;
- int dw = -ENOMEM;
+ int dw;
GEM_BUG_ON(!engine->gt->scratch);
@@ -630,39 +654,27 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
if (!frame)
return -ENOMEM;
- if (intel_timeline_init(&frame->timeline,
- engine->gt,
- engine->status_page.vma))
- goto out_frame;
-
- mutex_lock(&frame->timeline.mutex);
+ frame->rq.i915 = engine->i915;
+ frame->rq.engine = engine;
+ frame->rq.context = ce;
+ rcu_assign_pointer(frame->rq.timeline, ce->timeline);
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring);
-
- frame->rq.i915 = engine->i915;
- frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
- rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
-
- dw = intel_timeline_pin(&frame->timeline);
- if (dw < 0)
- goto out_timeline;
+ mutex_lock(&ce->timeline->mutex);
spin_lock_irq(&engine->active.lock);
+
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+
spin_unlock_irq(&engine->active.lock);
+ mutex_unlock(&ce->timeline->mutex);
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
- intel_timeline_unpin(&frame->timeline);
-
-out_timeline:
- mutex_unlock(&frame->timeline.mutex);
- intel_timeline_fini(&frame->timeline);
-out_frame:
kfree(frame);
return dw;
}
@@ -737,12 +749,6 @@ static int engine_init_common(struct intel_engine_cs *engine)
engine->set_default_submission(engine);
- ret = measure_breadcrumb_dw(engine);
- if (ret < 0)
- return ret;
-
- engine->emit_fini_breadcrumb_dw = ret;
-
/*
* We may need to do things with the shrinker which
* require us to immediately switch back to the default
@@ -755,9 +761,18 @@ static int engine_init_common(struct intel_engine_cs *engine)
if (IS_ERR(ce))
return PTR_ERR(ce);
+ ret = measure_breadcrumb_dw(ce);
+ if (ret < 0)
+ goto err_context;
+
+ engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
return 0;
+
+err_context:
+ intel_context_put(ce);
+ return ret;
}
int intel_engines_init(struct intel_gt *gt)
@@ -824,6 +839,20 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_wa_list_free(&engine->whitelist);
}
+/**
+ * intel_engine_resume - re-initializes the HW state of the engine
+ * @engine: Engine to resume.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_resume(struct intel_engine_cs *engine)
+{
+ intel_engine_apply_workarounds(engine);
+ intel_engine_apply_whitelist(engine);
+
+ return engine->resume(engine);
+}
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -982,6 +1011,12 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ if (INTEL_GEN(i915) >= 12) {
+ instdone->slice_common_extra[0] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
+ instdone->slice_common_extra[1] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
+ }
for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
@@ -1260,6 +1295,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
+ if (HAS_EXECLISTS(dev_priv)) {
+ drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
+ drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
+ }
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
@@ -1276,8 +1317,14 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n",
+ drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
+ drm_printf(m, "\tRING_ESR: 0x%08x\n",
+ ENGINE_READ(engine, RING_ESR));
+ drm_printf(m, "\tRING_EMR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EMR));
+ drm_printf(m, "\tRING_EIR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EIR));
}
addr = intel_engine_get_active_head(engine);
@@ -1342,25 +1389,27 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
execlists_active_lock_bh(execlists);
rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
- char hdr[80];
+ char hdr[160];
int len;
- len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ",
+ (int)(port - execlists->active));
if (!i915_request_signaled(rq)) {
struct intel_timeline *tl = get_timeline(rq);
- len += snprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq));
+ len += scnprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
if (tl)
intel_timeline_put(tl);
}
- snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
@@ -1657,6 +1706,23 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* we only care about the snapshot of this moment.
*/
lockdep_assert_held(&engine->active.lock);
+
+ rcu_read_lock();
+ request = execlists_active(&engine->execlists);
+ if (request) {
+ struct intel_timeline *tl = request->context->timeline;
+
+ list_for_each_entry_from_reverse(request, &tl->requests, link) {
+ if (i915_request_completed(request))
+ break;
+
+ active = request;
+ }
+ }
+ rcu_read_unlock();
+ if (active)
+ return active;
+
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 6c6fd185457c..dd825718e4e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -180,7 +180,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- int err = 0;
+ int err;
if (!intel_engine_has_preemption(engine))
return -ENODEV;
@@ -188,8 +188,10 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- if (mutex_lock_interruptible(&ce->timeline->mutex))
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
goto out_rpm;
+ }
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
@@ -204,6 +206,8 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
__i915_request_commit(rq);
__i915_request_queue(rq, &attr);
+ GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+ err = 0;
out_unlock:
mutex_unlock(&ce->timeline->mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ea90ab3e396e..b6cf284e3a2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -112,7 +112,7 @@ __queue_and_release_pm(struct i915_request *rq,
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
- ENGINE_TRACE(engine, "\n");
+ ENGINE_TRACE(engine, "parking\n");
/*
* We have to serialise all potential retirement paths with our
@@ -249,7 +249,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (!switch_to_kernel_context(engine))
return -EBUSY;
- ENGINE_TRACE(engine, "\n");
+ ENGINE_TRACE(engine, "parked\n");
call_idle_barriers(engine); /* cleanup after wedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 92be41a6903c..0be674ae1cf6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -75,6 +75,7 @@ struct intel_instdone {
u32 instdone;
/* The following exist only in the RCS engine */
u32 slice_common;
+ u32 slice_common_extra[2];
u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
@@ -126,7 +127,6 @@ DECLARE_EWMA(_engine_latency, 6, 4)
struct st_preempt_hang {
struct completion completion;
unsigned int count;
- bool inject_hang;
};
/**
@@ -157,6 +157,30 @@ struct intel_engine_execlists {
struct i915_priolist default_priolist;
/**
+ * @ccid: identifier for contexts submitted to this engine
+ */
+ u32 ccid;
+
+ /**
+ * @yield: CCID at the time of the last semaphore-wait interrupt.
+ *
+ * Instead of leaving a semaphore busy-spinning on an engine, we would
+ * like to switch to another ready context, i.e. yielding the semaphore
+ * timeslice.
+ */
+ u32 yield;
+
+ /**
+ * @error_interrupt: CS Master EIR
+ *
+ * The CS generates an interrupt when it detects an error. We capture
+ * the first error interrupt, record the EIR and schedule the tasklet.
+ * In the tasklet, we process the pending CS events to ensure we have
+ * the guilty request, and then reset the engine.
+ */
+ u32 error_interrupt;
+
+ /**
* @no_priolist: priority lists disabled
*/
bool no_priolist;
@@ -285,8 +309,7 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
- unsigned int context_tag;
-#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
+ unsigned long context_tag;
struct rb_node uabi_node;
@@ -473,10 +496,11 @@ struct intel_engine_cs {
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
-#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
-#define I915_ENGINE_IS_VIRTUAL BIT(5)
-#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
-#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
+#define I915_ENGINE_HAS_TIMESLICES BIT(4)
+#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
+#define I915_ENGINE_IS_VIRTUAL BIT(6)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
unsigned int flags;
/*
@@ -537,6 +561,7 @@ struct intel_engine_cs {
struct {
unsigned long heartbeat_interval_ms;
+ unsigned long max_busywait_duration_ns;
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
@@ -574,6 +599,15 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine)
}
static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return false;
+
+ return engine->flags & I915_ENGINE_HAS_TIMESLICES;
+}
+
+static inline bool
intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 9e7f12bef828..848decee9066 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -278,7 +278,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
}
}
- if (WARN(errors, "Invalid UABI engine mapping found"))
+ if (drm_WARN(&i915->drm, errors,
+ "Invalid UABI engine mapping found"))
i915->uabi_engines = RB_ROOT;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 531d501be01f..4c5a209cb669 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,6 +8,8 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
+#include <drm/i915_drm.h>
+
#include "intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -104,27 +106,17 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
}
-static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- /*
- * Don't bother messing with faults pre GEN6 as we have little
- * documentation supporting that it's a good idea.
- */
- if (INTEL_GEN(i915) < 6)
- return;
+ struct i915_vma *vma;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ i915_vma_wait_for_bind(vma);
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
ggtt->invalidate(ggtt);
-}
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
-{
- ggtt_suspend_mappings(&i915->ggtt);
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -167,6 +159,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_gtt_chipset_flush();
}
+static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ return addr | _PAGE_PRESENT;
+}
+
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -182,7 +181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
ggtt->invalidate(ggtt);
}
@@ -192,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
dma_addr_t addr;
/*
@@ -203,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* not to allow the user to override access to a read only page.
*/
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(gtt_entries++, pte_encode | addr);
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma->node.start / I915_GTT_PAGE_SIZE;
+ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma->pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0].encode);
/*
* We want to flush the TLBs only after we're certain all the PTE
@@ -242,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
struct sgt_iter iter;
dma_addr_t addr;
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma->node.start / I915_GTT_PAGE_SIZE;
+ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+
for_each_sgt_daddr(addr, iter, vma->pages)
- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0].encode, gte++);
/*
* We want to flush the TLBs only after we're certain all the PTE
@@ -350,31 +366,6 @@ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
-struct clear_range {
- struct i915_address_space *vm;
- u64 start;
- u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
- struct clear_range *arg = _arg;
-
- gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
- u64 start,
- u64 length)
-{
- struct clear_range arg = { vm, start, length };
-
- stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -462,7 +453,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
u64 size;
int ret;
- if (!USES_GUC(ggtt->vm.i915))
+ if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
@@ -472,7 +463,8 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT);
if (ret)
- DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Failed to reserve top of GGTT for GuC\n");
return ret;
}
@@ -544,8 +536,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg_kms(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
@@ -879,8 +872,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->vm.clear_range != nop_clear_range)
- ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
ggtt->invalidate = gen8_ggtt_invalidate;
@@ -890,7 +883,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
- ggtt->vm.pte_encode = gen8_pte_encode;
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
setup_private_pat(ggtt->vm.gt->uncore);
@@ -1180,7 +1173,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
-static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
bool flush = false;
@@ -1188,8 +1181,6 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
intel_gt_check_and_clear_faults(ggtt->vm.gt);
- mutex_lock(&ggtt->vm.mutex);
-
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
@@ -1216,19 +1207,10 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
atomic_set(&ggtt->vm.open, open);
ggtt->invalidate(ggtt);
- mutex_unlock(&ggtt->vm.mutex);
-
if (flush)
wbinvd_on_all_cpus();
-}
-
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
-{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
- ggtt_restore_mappings(ggtt);
- if (INTEL_GEN(i915) >= 8)
+ if (INTEL_GEN(ggtt->vm.i915) >= 8)
setup_private_pat(ggtt->vm.gt->uncore);
}
@@ -1267,6 +1249,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_rotation_info_size(rot_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
@@ -1296,8 +1279,9 @@ err_sg_alloc:
kfree(st);
err_st_alloc:
- DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+ drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width,
+ rot_info->plane[0].height, size);
return ERR_PTR(ret);
}
@@ -1349,6 +1333,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_remapped_info_size(rem_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
@@ -1380,8 +1365,9 @@ err_sg_alloc:
kfree(st);
err_st_alloc:
- DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+ drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width,
+ rem_info->plane[0].height, size);
return ERR_PTR(ret);
}
@@ -1479,8 +1465,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
if (IS_ERR(vma->pages)) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
- DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
+ drm_err(&vma->vm->i915->drm,
+ "Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 51b8718513bc..f04214a54f75 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -292,10 +292,21 @@
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
-#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define STATE_BASE_ADDRESS \
+ ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
+#define BASE_ADDRESS_MODIFY REG_BIT(0)
+#define PIPELINE_SELECT \
+ ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
+#define PIPELINE_SELECT_MEDIA REG_BIT(0)
+#define GFX_OP_3DSTATE_VF_STATISTICS \
+ ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
+#define MEDIA_VFE_STATE \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
+#define MEDIA_OBJECT \
+ ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index da2b6e2ae692..d09f7596cb98 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -198,16 +198,16 @@ static void gen6_check_faults(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ?
- "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ drm_dbg(&engine->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
}
@@ -239,18 +239,17 @@ static void gen8_check_faults(struct intel_gt *gt)
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
@@ -345,7 +344,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
goto err_unref;
}
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ ret = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (ret)
goto err_unref;
@@ -455,6 +454,11 @@ err_rq:
if (!rq)
continue;
+ if (rq->fence.error) {
+ err = -EIO;
+ goto out;
+ }
+
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
state = rq->context->state;
if (!state)
@@ -538,6 +542,10 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
err = -EIO;
}
+ /* Flush and restore the kernel context for safety */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
+ err = -EIO;
+
return err;
}
@@ -584,7 +592,9 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_engines;
- intel_uc_init(&gt->uc);
+ err = intel_uc_init(&gt->uc);
+ if (err)
+ goto err_engines;
err = intel_gt_resume(gt);
if (err)
@@ -634,6 +644,13 @@ void intel_gt_driver_remove(struct intel_gt *gt)
void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_rps_driver_unregister(&gt->rps);
+
+ /*
+ * Upon unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ intel_gt_set_wedged(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -650,6 +667,9 @@ void intel_gt_driver_release(struct intel_gt *gt)
void intel_gt_driver_late_release(struct intel_gt *gt)
{
+ /* We need to wait for inflight RCU frees to release their grip */
+ rcu_barrier();
+
intel_uc_driver_late_release(&gt->uc);
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 1dac441cb8f4..4fac043750aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -14,7 +14,7 @@ struct drm_i915_private;
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
- GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
##__VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f796bdf1ed30..0cc7dd54f4f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,30 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
bool tasklet = false;
+ if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+ u32 eir;
+
+ eir = ENGINE_READ(engine, RING_EIR);
+ ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+ /* Disable the error interrupt until after the reset */
+ if (likely(eir)) {
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, eir);
+ WRITE_ONCE(engine->execlists.error_interrupt, eir);
+ tasklet = true;
+ }
+ }
+
+ if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+ WRITE_ONCE(engine->execlists.yield,
+ ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+ ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+ engine->execlists.yield);
+ if (del_timer(&engine->execlists.timer))
+ tasklet = true;
+ }
+
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
@@ -210,7 +234,11 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
- const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+ const u32 irqs =
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16;
@@ -279,66 +307,56 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ GT_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir);
}
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
{
void __iomem * const regs = gt->uncore->regs;
+ u32 iir;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
- if (likely(gt_iir[0]))
- raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
- if (likely(gt_iir[1]))
- raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(gt_iir[2]))
- raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
- if (likely(gt_iir[3]))
- raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
- }
-}
-
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
-{
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
- gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
- gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+ iir >> GEN8_RCS_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+ iir >> GEN8_BCS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(0), iir);
+ }
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
- gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
- gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ iir >> GEN8_VCS0_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ iir >> GEN8_VCS1_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(1), iir);
+ }
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
- gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ iir >> GEN8_VECS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(3), iir);
+ }
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
- guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(iir)) {
+ gen6_rps_irq_handler(&gt->rps, iir);
+ guc_irq_handler(&gt->uc.guc, iir >> 16);
+ raw_reg_write(regs, GEN8_GT_IIR(2), iir);
+ }
}
}
@@ -354,25 +372,19 @@ void gen8_gt_irq_reset(struct intel_gt *gt)
void gen8_gt_irq_postinstall(struct intel_gt *gt)
{
- struct intel_uncore *uncore = gt->uncore;
-
/* These are interrupts we'll toggle with the ring mask register */
- u32 gt_interrupts[] = {
- (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
+ const u32 irqs =
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
+ const u32 gt_interrupts[] = {
+ irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
+ irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
0,
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ irqs << GEN8_VECS_IRQ_SHIFT,
};
+ struct intel_uncore *uncore = gt->uncore;
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index 8f37593712c9..886c5cf408a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -36,9 +36,8 @@ void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl);
void gen8_gt_irq_reset(struct intel_gt *gt);
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
void gen8_gt_irq_postinstall(struct intel_gt *gt);
#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index d1c2f034296a..8b653c0f5e5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -216,7 +216,7 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
- err = engine->resume(engine);
+ err = intel_engine_resume(engine);
intel_engine_pm_put(engine);
if (err) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 16acdc5d6734..2a72cce63fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -171,7 +171,9 @@ void __i915_vm_close(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
- mutex_lock(&vm->mutex);
+ if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
+ return;
+
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -186,6 +188,7 @@ void __i915_vm_close(struct i915_address_space *vm)
i915_gem_object_put(obj);
}
GEM_BUG_ON(!list_empty(&vm->bound_list));
+
mutex_unlock(&vm->mutex);
}
@@ -299,6 +302,25 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
}
+static void poison_scratch_page(struct page *page, unsigned long size)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ do {
+ void *vaddr;
+
+ vaddr = kmap(page);
+ memset(vaddr, POISON_FREE, PAGE_SIZE);
+ kunmap(page);
+
+ page = pfn_to_page(page_to_pfn(page) + 1);
+ size -= PAGE_SIZE;
+ } while (size);
+}
+
int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
unsigned long size;
@@ -331,6 +353,17 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
goto skip;
+ /*
+ * Use a non-zero scratch page for debugging.
+ *
+ * We want a value that should be reasonably obvious
+ * to spot in the error state, while also causing a GPU hang
+ * if executed. We prefer using a clear page in production, so
+ * should it ever be accidentally used, the effect should be
+ * fairly benign.
+ */
+ poison_scratch_page(page, size);
+
addr = dma_map_page_attrs(vm->dma,
page, 0, size,
PCI_DMA_BIDIRECTIONAL,
@@ -448,36 +481,12 @@ void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
HSW_GTT_CACHE_EN,
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
- WARN_ON_ONCE(can_use_gtt_cache &&
- intel_uncore_read(uncore,
- HSW_GTT_CACHE_EN) == 0);
+ drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
}
}
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 7da7681c20b1..b3116fe8d180 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -429,8 +429,7 @@ static inline void
i915_vm_close(struct i915_address_space *vm)
{
GEM_BUG_ON(!atomic_read(&vm->open));
- if (atomic_dec_and_test(&vm->open))
- __i915_vm_close(vm);
+ __i915_vm_close(vm);
i915_vm_put(vm);
}
@@ -512,12 +511,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
-
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags);
+void i915_ggtt_suspend(struct i915_ggtt *gtt);
+void i915_ggtt_resume(struct i915_ggtt *ggtt);
int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index ceb785b75c25..e3f637b3650e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -50,6 +50,9 @@ static bool get_ia_constants(struct intel_llc *llc,
struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct intel_rps *rps = &llc_to_gt(llc)->rps;
+ if (!HAS_LLC(i915) || IS_DGFX(i915))
+ return false;
+
if (rps->max_freq <= rps->min_freq)
return false;
@@ -147,8 +150,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
void intel_llc_enable(struct intel_llc *llc)
{
- if (HAS_LLC(llc_to_gt(llc)->i915))
- gen6_update_ring_freq(llc);
+ gen6_update_ring_freq(llc);
}
void intel_llc_disable(struct intel_llc *llc)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 31455eceeb0c..2dfaddb8811e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -176,8 +176,6 @@
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-#define WA_TAIL_DWORDS 2
-#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
struct virtual_engine {
struct intel_engine_cs base;
@@ -247,7 +245,7 @@ static void mark_eio(struct i915_request *rq)
GEM_BUG_ON(i915_request_signaled(rq));
- dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -295,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority;
+ return READ_ONCE(rq->sched.attr.priority);
}
static int effective_prio(const struct i915_request *rq)
@@ -458,10 +456,10 @@ assert_priority_queue(const struct i915_request *prev,
* engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc.
*/
-static u64
+static u32
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
- u64 desc;
+ u32 desc;
desc = INTEL_LEGACY_32B_CONTEXT;
if (i915_vm_is_4lvl(ce->vm))
@@ -472,21 +470,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_GEN(engine->i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
- desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
- /*
- * The following 32bits are copied into the OA reports (dword 2).
- * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
- * anything below.
- */
- if (INTEL_GEN(engine->i915) >= 11) {
- desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
- /* bits 48-53 */
-
- desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
- /* bits 61-63 */
- }
-
- return desc;
+ return i915_ggtt_offset(ce->state) | desc;
}
static inline unsigned int dword_in_page(void *addr)
@@ -1006,7 +990,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
- rq->engine = owner;
+ WRITE_ONCE(rq->engine, owner);
owner->submit_request(rq);
active = NULL;
}
@@ -1194,7 +1178,49 @@ static void reset_active(struct i915_request *rq,
__execlists_update_reg_state(ce, engine, head);
/* We've switched away, so this should be a no-op, but intent matters */
- ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+}
+
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
+{
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ ce->runtime.num_underflow += dt < 0;
+ ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
+}
+
+static void intel_context_update_runtime(struct intel_context *ce)
+{
+ u32 old;
+ s32 dt;
+
+ if (intel_context_is_barrier(ce))
+ return;
+
+ old = ce->runtime.last;
+ ce->runtime.last = intel_context_get_runtime(ce);
+ dt = ce->runtime.last - old;
+
+ if (unlikely(dt <= 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, ce->runtime.last, dt);
+ st_update_runtime_underflow(ce, dt);
+ return;
+ }
+
+ ewma_runtime_add(&ce->runtime.avg, dt);
+ ce->runtime.total += dt;
}
static inline struct intel_engine_cs *
@@ -1213,16 +1239,21 @@ __execlists_schedule_in(struct i915_request *rq)
if (ce->tag) {
/* Use a fixed tag for OA and friends */
- ce->lrc_desc |= (u64)ce->tag << 32;
+ GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+ ce->lrc.ccid = ce->tag;
} else {
/* We don't need a strict matching tag, just different values */
- ce->lrc_desc &= ~GENMASK_ULL(47, 37);
- ce->lrc_desc |=
- (u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
- GEN11_SW_CTX_ID_SHIFT;
- BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
+ unsigned int tag = ffs(engine->context_tag);
+
+ GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
+ clear_bit(tag - 1, &engine->context_tag);
+ ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
}
+ ce->lrc.ccid |= engine->execlists.ccid;
+
__intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -1262,7 +1293,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
static inline void
__execlists_schedule_out(struct i915_request *rq,
- struct intel_engine_cs * const engine)
+ struct intel_engine_cs * const engine,
+ unsigned int ccid)
{
struct intel_context * const ce = rq->context;
@@ -1276,10 +1308,19 @@ __execlists_schedule_out(struct i915_request *rq,
* If we have just completed this context, the engine may now be
* idle and we want to re-enter powersaving.
*/
- if (list_is_last(&rq->link, &ce->timeline->requests) &&
+ if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
i915_request_completed(rq))
intel_engine_add_retire(engine, ce->timeline);
+ ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+ ccid &= GEN12_MAX_CONTEXT_HW_ID;
+ if (ccid < BITS_PER_LONG) {
+ GEM_BUG_ON(ccid == 0);
+ GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+ set_bit(ccid - 1, &engine->context_tag);
+ }
+
+ intel_context_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
intel_gt_pm_put_async(engine->gt);
@@ -1304,15 +1345,17 @@ execlists_schedule_out(struct i915_request *rq)
{
struct intel_context * const ce = rq->context;
struct intel_engine_cs *cur, *old;
+ u32 ccid;
trace_i915_request_out(rq);
+ ccid = rq->context->lrc.ccid;
old = READ_ONCE(ce->inflight);
do
cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
while (!try_cmpxchg(&ce->inflight, &old, cur));
if (!cur)
- __execlists_schedule_out(rq, old);
+ __execlists_schedule_out(rq, old, ccid);
i915_request_put(rq);
}
@@ -1320,7 +1363,7 @@ execlists_schedule_out(struct i915_request *rq)
static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
- u64 desc = ce->lrc_desc;
+ u64 desc = ce->lrc.desc;
u32 tail, prev;
/*
@@ -1359,7 +1402,7 @@ static u64 execlists_update_context(struct i915_request *rq)
*/
wmb();
- ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+ ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
return desc;
}
@@ -1395,15 +1438,26 @@ trace_ports(const struct intel_engine_execlists *execlists,
ports[1] ? ports[1]->fence.seqno : 0);
}
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
static __maybe_unused bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
const char *msg)
{
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
+ bool sentinel = false;
trace_ports(execlists, msg, execlists->pending);
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(execlists))
+ return true;
+
if (!execlists->pending[0]) {
GEM_TRACE_ERR("Nothing pending for promotion!\n");
return false;
@@ -1430,6 +1484,26 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
ce = rq->context;
+ /*
+ * Sentinels are supposed to be lonely so they flush the
+ * current exection off the HW. Check that they are the
+ * only request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ sentinel = i915_request_has_sentinel(rq);
+ if (sentinel && port != execlists->pending) {
+ GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
@@ -1525,6 +1599,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
@@ -1542,7 +1621,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
- if (unlikely((prev->fence.flags ^ next->fence.flags) &
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
@@ -1550,6 +1629,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (!can_merge_ctx(prev->context, next->context))
return false;
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
return true;
}
@@ -1585,7 +1665,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
}
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
+ struct i915_request *rq)
{
struct intel_engine_cs *old = ve->siblings[0];
@@ -1593,9 +1673,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_lock(&old->breadcrumbs.irq_lock);
if (!list_empty(&ve->context.signal_link)) {
- list_move_tail(&ve->context.signal_link,
- &engine->breadcrumbs.signalers);
- intel_engine_signal_breadcrumbs(engine);
+ list_del_init(&ve->context.signal_link);
+
+ /*
+ * We cannot acquire the new engine->breadcrumbs.irq_lock
+ * (as we are holding a breadcrumbs.irq_lock already),
+ * so attach this request to the signaler on submission.
+ * The queued irq_work will occur when we finally drop
+ * the engine->active.lock after dequeue.
+ */
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
+
+ /* Also transfer the pending irq_work for the old breadcrumb. */
+ intel_engine_signal_breadcrumbs(rq->engine);
}
spin_unlock(&old->breadcrumbs.irq_lock);
}
@@ -1605,6 +1695,11 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
&(rq__)->sched.waiters_list, \
wait_link)
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
+
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
LIST_HEAD(list);
@@ -1626,6 +1721,9 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
/* Leave semaphores spinning on the other engines */
if (w->engine != rq->engine)
continue;
@@ -1661,7 +1759,8 @@ static void defer_active(struct intel_engine_cs *engine)
}
static bool
-need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+need_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
int hint;
@@ -1675,6 +1774,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
return hint >= effective_prio(rq);
}
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool
+timeslice_expired(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
@@ -1690,15 +1815,15 @@ timeslice(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.timeslice_duration_ms);
}
-static unsigned long
-active_timeslice(const struct intel_engine_cs *engine)
+static unsigned long active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *engine->execlists.active;
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ const struct i915_request *rq = *execlists->active;
if (!rq || i915_request_completed(rq))
return 0;
- if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
return 0;
return timeslice(engine);
@@ -1715,8 +1840,11 @@ static void set_timeslice(struct intel_engine_cs *engine)
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
+ int prio = queue_prio(execlists);
- execlists->switch_priority_hint = execlists->queue_priority_hint;
+ WRITE_ONCE(execlists->switch_priority_hint, prio);
+ if (prio == INT_MIN)
+ return;
if (timer_pending(&execlists->timer))
return;
@@ -1849,13 +1977,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = NULL;
} else if (need_timeslice(engine, last) &&
- timer_expired(&engine->execlists.timer)) {
+ timeslice_expired(execlists, last)) {
ENGINE_TRACE(engine,
- "expired last=%llx:%lld, prio=%d, hint=%d\n",
+ "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
last->fence.context,
last->fence.seqno,
last->sched.attr.priority,
- execlists->queue_priority_hint);
+ execlists->queue_priority_hint,
+ yesno(timeslice_yield(execlists, last)));
ring_set_paused(engine, 1);
defer_active(engine);
@@ -1938,13 +2067,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
"",
yesno(engine != ve->siblings[0]));
- ve->request = NULL;
- ve->base.execlists.queue_priority_hint = INT_MIN;
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint,
+ INT_MIN);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- rq->engine = engine;
+ WRITE_ONCE(rq->engine, engine);
if (engine != ve->siblings[0]) {
u32 *regs = ve->context.lrc_reg_state;
@@ -1957,7 +2087,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine);
if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve, engine);
+ virtual_xfer_breadcrumbs(ve, rq);
/*
* Move the bound engine to the top of the list
@@ -2064,6 +2194,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(last &&
!can_merge_ctx(last->context,
rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
submit = true;
last = rq;
@@ -2112,6 +2245,7 @@ done:
}
clear_ports(port + 1, last_port - port);
+ WRITE_ONCE(execlists->yield, -1);
execlists_submit_ports(engine);
set_preempt_timeout(engine, *active);
} else {
@@ -2134,6 +2268,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_schedule_out(*port);
clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+ smp_wmb(); /* complete the seqlock for execlists_active() */
WRITE_ONCE(execlists->active, execlists->inflight);
}
@@ -2144,12 +2279,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
clflush((void *)last);
}
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -2240,7 +2369,6 @@ static void process_csb(struct intel_engine_cs *engine)
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
if (unlikely(head == tail))
return;
@@ -2254,6 +2382,7 @@ static void process_csb(struct intel_engine_cs *engine)
*/
rmb();
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
do {
bool promote;
@@ -2288,11 +2417,13 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+
+ ring_set_paused(engine, 0);
+
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
-
- if (!inject_preempt_hang(execlists))
- ring_set_paused(engine, 0);
+ smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", old);
@@ -2300,12 +2431,12 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- WRITE_ONCE(execlists->active,
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending)));
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
WRITE_ONCE(execlists->pending[0], NULL);
} else {
@@ -2320,8 +2451,37 @@ static void process_csb(struct intel_engine_cs *engine)
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
- GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
- !reset_in_progress(execlists));
+ if (GEM_SHOW_DEBUG() &&
+ !i915_request_completed(*execlists->active) &&
+ !reset_in_progress(execlists)) {
+ struct i915_request *rq __maybe_unused =
+ *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+
+ GEM_BUG_ON("context completed before request");
+ }
+
execlists_schedule_out(*execlists->active++);
GEM_BUG_ON(execlists->active - execlists->inflight >
@@ -2349,7 +2509,7 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
- if (!engine->execlists.pending[0]) {
+ if (!READ_ONCE(engine->execlists.pending[0])) {
rcu_read_lock(); /* protect peeking at execlists->active */
execlists_dequeue(engine);
rcu_read_unlock();
@@ -2366,12 +2526,12 @@ static void __execlists_hold(struct i915_request *rq)
if (i915_request_is_active(rq))
__i915_request_unsubmit(rq);
- RQ_TRACE(rq, "on hold\n");
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
list_move_tail(&rq->sched.link, &rq->engine->active.hold);
i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
@@ -2385,7 +2545,7 @@ static void __execlists_hold(struct i915_request *rq)
if (i915_request_completed(w))
continue;
- if (i915_request_on_hold(rq))
+ if (i915_request_on_hold(w))
continue;
list_move_tail(&w->sched.link, &list);
@@ -2443,6 +2603,7 @@ static bool execlists_hold(struct intel_engine_cs *engine,
GEM_BUG_ON(i915_request_on_hold(rq));
GEM_BUG_ON(rq->engine != engine);
__execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->active.hold));
unlock:
spin_unlock_irq(&engine->active.lock);
@@ -2452,23 +2613,27 @@ unlock:
static bool hold_request(const struct i915_request *rq)
{
struct i915_dependency *p;
+ bool result = false;
/*
* If one of our ancestors is on hold, we must also be on hold,
* otherwise we will bypass it and execute before it.
*/
- list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
const struct i915_request *s =
container_of(p->signaler, typeof(*s), sched);
if (s->engine != rq->engine)
continue;
- if (i915_request_on_hold(s))
- return true;
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
}
+ rcu_read_unlock();
- return false;
+ return result;
}
static void __execlists_unhold(struct i915_request *rq)
@@ -2478,6 +2643,8 @@ static void __execlists_unhold(struct i915_request *rq)
do {
struct i915_dependency *p;
+ RQ_TRACE(rq, "hold release\n");
+
GEM_BUG_ON(!i915_request_on_hold(rq));
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
@@ -2486,21 +2653,24 @@ static void __execlists_unhold(struct i915_request *rq)
i915_sched_lookup_priolist(rq->engine,
rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
- RQ_TRACE(rq, "hold release\n");
/* Also release any children on this engine that are ready */
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
if (w->engine != rq->engine)
continue;
- if (!i915_request_on_hold(rq))
+ if (!i915_request_on_hold(w))
continue;
/* Check that no other parents are also on hold */
- if (hold_request(rq))
+ if (hold_request(w))
continue;
list_move_tail(&w->sched.link, &list);
@@ -2615,13 +2785,13 @@ static bool execlists_capture(struct intel_engine_cs *engine)
if (!cap)
return true;
+ spin_lock_irq(&engine->active.lock);
cap->rq = execlists_active(&engine->execlists);
- GEM_BUG_ON(!cap->rq);
-
- rcu_read_lock();
- cap->rq = active_request(cap->rq->context->timeline, cap->rq);
- cap->rq = i915_request_get_rcu(cap->rq);
- rcu_read_unlock();
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->active.lock);
if (!cap->rq)
goto err_free;
@@ -2660,27 +2830,25 @@ err_free:
return false;
}
-static noinline void preempt_reset(struct intel_engine_cs *engine)
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- if (i915_modparams.reset < 3)
+ if (!intel_has_reset_engine(engine->gt))
return;
if (test_and_set_bit(bit, lock))
return;
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
/* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->execlists.tasklet);
- ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
- READ_ONCE(engine->props.preempt_timeout_ms),
- jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
-
ring_set_paused(engine, 1); /* Freeze the current request in place */
if (execlists_capture(engine))
- intel_engine_reset(engine, "preemption time out");
+ intel_engine_reset(engine, msg);
else
ring_set_paused(engine, 0);
@@ -2711,6 +2879,13 @@ static void execlists_submission_tasklet(unsigned long data)
bool timeout = preempt_timeout(engine);
process_csb(engine);
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ engine->execlists.error_interrupt = 0;
+ if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
+ execlists_reset(engine, "CS error");
+ }
+
if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
unsigned long flags;
@@ -2719,8 +2894,8 @@ static void execlists_submission_tasklet(unsigned long data)
spin_unlock_irqrestore(&engine->active.lock, flags);
/* Recheck after serialising with direct-submission */
- if (timeout && preempt_timeout(engine))
- preempt_reset(engine);
+ if (unlikely(timeout && preempt_timeout(engine)))
+ execlists_reset(engine, "preemption time out");
}
}
@@ -2793,6 +2968,7 @@ static void execlists_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->active.lock, flags);
if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
list_add_tail(&request->sched.link, &engine->active.hold);
i915_request_set_hold(request);
} else {
@@ -2874,6 +3050,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
/* RPCS */
if (engine->class == RENDER_CLASS) {
@@ -2899,7 +3076,7 @@ __execlists_context_pin(struct intel_context *ce,
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
- ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
+ ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine, ce->ring->tail);
@@ -2921,22 +3098,6 @@ static void execlists_context_reset(struct intel_context *ce)
CE_TRACE(ce, "reset\n");
GEM_BUG_ON(!intel_context_is_pinned(ce));
- /*
- * Because we emit WA_TAIL_DWORDS there may be a disparity
- * between our bookkeeping in ce->ring->head and ce->ring->tail and
- * that stored in context. As we only write new commands from
- * ce->ring->tail onwards, everything before that is junk. If the GPU
- * starts reading from its RING_HEAD from the context, it may try to
- * execute that junk and die.
- *
- * The contexts that are stilled pinned on resume belong to the
- * kernel, and are local to each engine. All other contexts will
- * have their head/tail sanitized upon pinning before use, so they
- * will never see garbage,
- *
- * So to avoid that we reset the context images upon resume. For
- * simplicity, we just zero everything out.
- */
intel_ring_reset(ce->ring, ce->ring->emit);
/* Scrub away the garbage */
@@ -2944,7 +3105,7 @@ static void execlists_context_reset(struct intel_context *ce)
ce, ce->engine, ce->ring, true);
__execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
- ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
}
static const struct intel_context_ops execlists_context_ops = {
@@ -2964,7 +3125,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
- GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -3257,7 +3419,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
goto err;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (err)
goto err;
@@ -3346,6 +3508,49 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ dev_err(engine->i915->drm.dev,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
static void enable_execlists(struct intel_engine_cs *engine)
{
u32 mode;
@@ -3367,7 +3572,9 @@ static void enable_execlists(struct intel_engine_cs *engine)
i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
- engine->context_tag = 0;
+ enable_error_interrupt(engine);
+
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
}
static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -3384,9 +3591,6 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int execlists_resume(struct intel_engine_cs *engine)
{
- intel_engine_apply_workarounds(engine);
- intel_engine_apply_whitelist(engine);
-
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -3517,9 +3721,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
@@ -3529,8 +3730,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
goto out_replay;
}
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
rq = active_request(ce->timeline, rq);
head = intel_ring_wrap(ce->ring, rq->head);
GEM_BUG_ON(head == ce->ring->tail);
@@ -3581,7 +3786,7 @@ out_replay:
head, ce->ring->tail);
__execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine, head);
- ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
+ ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
/* Push back any incomplete requests for replay after the reset. */
@@ -3604,7 +3809,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
static void nop_submission_tasklet(unsigned long data)
{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
/* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
}
static void execlists_reset_cancel(struct intel_engine_cs *engine)
@@ -4194,8 +4402,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
if (!intel_vgpu_active(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
}
if (INTEL_GEN(engine->i915) >= 12)
@@ -4273,6 +4484,8 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4340,6 +4553,11 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
else
execlists->csb_size = GEN11_CSB_ENTRIES;
+ if (INTEL_GEN(engine->i915) >= 11) {
+ execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
+
reset_csb_pointers(engine);
/* Finally, take ownership and responsibility for cleanup! */
@@ -4514,8 +4732,13 @@ populate_lr_context(struct intel_context *ce,
inhibit = false;
}
- /* The second page of the context object contains some fields which must
- * be set up prior to the first execution. */
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /*
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
+ */
execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
ce, engine, ring, inhibit);
@@ -4553,8 +4776,17 @@ static int __execlists_context_alloc(struct intel_context *ce,
if (!ce->timeline) {
struct intel_timeline *tl;
+ struct i915_vma *hwsp;
+
+ /*
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
+ */
+ hwsp = NULL;
+ if (unlikely(intel_context_is_barrier(ce)))
+ hwsp = engine->status_page.vma;
- tl = intel_timeline_create(engine->gt, NULL);
+ tl = intel_timeline_create(engine->gt, hwsp);
if (IS_ERR(tl)) {
ret = PTR_ERR(tl);
goto error_deref_obj;
@@ -4723,7 +4955,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = rq->execution_mask;
if (unlikely(!mask)) {
/* Invalid selection, submit to a random engine in error */
- i915_request_skip(rq, -ENODEV);
+ i915_request_set_error_once(rq, -ENODEV);
mask = ve->siblings[0]->mask;
}
@@ -4737,7 +4969,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
static void virtual_submission_tasklet(unsigned long data)
{
struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = ve->base.execlists.queue_priority_hint;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -5133,11 +5365,15 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tE ");
}
- last = NULL;
- count = 0;
+ if (execlists->switch_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tSwitch priority hint: %d\n",
+ READ_ONCE(execlists->switch_priority_hint));
if (execlists->queue_priority_hint != INT_MIN)
drm_printf(m, "\t\tQueue priority hint: %d\n",
- execlists->queue_priority_hint);
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 08a3be65f700..d39b72590e40 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -17,6 +17,7 @@
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
#define CTX_BB_PER_CTX_PTR (0x18 + 1)
+#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
#define CTX_PDP3_LDW (0x26 + 1)
#define CTX_PDP2_UDW (0x28 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index eeef90b55c64..632e08a4592b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -280,9 +280,32 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
GEN11_MOCS_ENTRIES
};
-static bool get_mocs_settings(const struct drm_i915_private *i915,
- struct drm_i915_mocs_table *table)
+enum {
+ HAS_GLOBAL_MOCS = BIT(0),
+ HAS_ENGINE_MOCS = BIT(1),
+ HAS_RENDER_L3CC = BIT(2),
+};
+
+static bool has_l3cc(const struct drm_i915_private *i915)
{
+ return true;
+}
+
+static bool has_global_mocs(const struct drm_i915_private *i915)
+{
+ return HAS_GLOBAL_MOCS_REGISTERS(i915);
+}
+
+static bool has_mocs(const struct drm_i915_private *i915)
+{
+ return !IS_DGFX(i915);
+}
+
+static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ struct drm_i915_mocs_table *table)
+{
+ unsigned int flags;
+
if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
@@ -300,13 +323,13 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
} else {
- WARN_ONCE(INTEL_GEN(i915) >= 9,
- "Platform that should have a MOCS table does not.\n");
- return false;
+ drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
+ "Platform that should have a MOCS table does not.\n");
+ return 0;
}
if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
- return false;
+ return 0;
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN(i915, 9)) {
@@ -315,10 +338,20 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
for (i = 0; i < table->size; i++)
if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
(L3_ESC(1) | L3_SCC(0x7))))
- return false;
+ return 0;
}
- return true;
+ flags = 0;
+ if (has_mocs(i915)) {
+ if (has_global_mocs(i915))
+ flags |= HAS_GLOBAL_MOCS;
+ else
+ flags |= HAS_ENGINE_MOCS;
+ }
+ if (has_l3cc(i915))
+ flags |= HAS_RENDER_L3CC;
+
+ return flags;
}
/*
@@ -411,18 +444,20 @@ static void init_l3cc_table(struct intel_engine_cs *engine,
void intel_mocs_init_engine(struct intel_engine_cs *engine)
{
struct drm_i915_mocs_table table;
+ unsigned int flags;
/* Called under a blanket forcewake */
assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
- if (!get_mocs_settings(engine->i915, &table))
+ flags = get_mocs_settings(engine->i915, &table);
+ if (!flags)
return;
/* Platforms with global MOCS do not need per-engine initialization. */
- if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ if (flags & HAS_ENGINE_MOCS)
init_mocs_table(engine, &table);
- if (engine->class == RENDER_CLASS)
+ if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
init_l3cc_table(engine, &table);
}
@@ -431,26 +466,17 @@ static u32 global_mocs_offset(void)
return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
}
-static void init_global_mocs(struct intel_gt *gt)
+void intel_mocs_init(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
+ unsigned int flags;
/*
* LLC and eDRAM control values are not applicable to dgfx
*/
- if (IS_DGFX(gt->i915))
- return;
-
- if (!get_mocs_settings(gt->i915, &table))
- return;
-
- __init_mocs_table(gt->uncore, &table, global_mocs_offset());
-}
-
-void intel_mocs_init(struct intel_gt *gt)
-{
- if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- init_global_mocs(gt);
+ flags = get_mocs_settings(gt->i915, &table);
+ if (flags & HAS_GLOBAL_MOCS)
+ __init_mocs_table(gt->uncore, &table, global_mocs_offset());
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9e303c29d6e3..3847ee44b181 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
@@ -226,10 +227,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
set(uncore, GEN6_RC_SLEEP, 0);
set(uncore, GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(i915))
- set(uncore, GEN6_RC6_THRESHOLD, 125000);
- else
- set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
set(uncore, GEN6_RC6p_THRESHOLD, 150000);
set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
@@ -299,7 +297,6 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
pctx = i915_gem_object_create_stolen_for_preallocated(i915,
pcbr_offset,
- I915_GTT_OFFSET_NONE,
pctx_size);
if (IS_ERR(pctx))
return PTR_ERR(pctx);
@@ -323,10 +320,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_t(u64,
- i915->dsm.start,
- pctx->stolen->start,
- U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
pctx_paddr = i915->dsm.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
@@ -542,6 +539,8 @@ void intel_rc6_init(struct intel_rc6 *rc6)
void intel_rc6_sanitize(struct intel_rc6 *rc6)
{
+ memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency));
+
if (rc6->enabled) { /* unbalanced suspend/resume */
rpm_get(rc6);
rc6->enabled = false;
@@ -604,6 +603,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6)
void intel_rc6_park(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ unsigned int target;
if (!rc6->enabled)
return;
@@ -618,7 +618,14 @@ void intel_rc6_park(struct intel_rc6 *rc6)
/* Turn off the HW timers and go directly to rc6 */
set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
- set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ target = 0x6; /* deepest rc6 */
+ else if (HAS_RC6p(rc6_to_i915(rc6)))
+ target = 0x5; /* deep rc6 */
+ else
+ target = 0x4; /* normal rc6 */
+ set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
}
void intel_rc6_disable(struct intel_rc6 *rc6)
@@ -713,7 +720,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
*/
i = (i915_mmio_reg_offset(reg) -
i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
- if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+ if (drm_WARN_ON_ONCE(&i915->drm, i >= ARRAY_SIZE(rc6->cur_residency)))
return 0;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index beee0cf89bce..80db3c9d785e 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -48,8 +48,10 @@ static void engine_skip_context(struct i915_request *rq)
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->context == hung_ctx)
- i915_request_skip(rq, -EIO);
+ if (rq->context == hung_ctx) {
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -72,9 +74,10 @@ static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
if (score) {
atomic_add(score, &file_priv->ban_score);
- DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
- ctx->name, score,
- atomic_read(&file_priv->ban_score));
+ drm_dbg(&ctx->i915->drm,
+ "client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
}
}
@@ -85,19 +88,18 @@ static bool mark_guilty(struct i915_request *rq)
bool banned;
int i;
+ if (intel_context_is_closed(rq->context)) {
+ intel_context_set_banned(rq->context);
+ return true;
+ }
+
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
-
- if (i915_gem_context_is_closed(ctx)) {
- intel_context_set_banned(rq->context);
- banned = true;
- goto out;
- }
+ return intel_context_is_banned(rq->context);
atomic_inc(&ctx->guilty_count);
@@ -122,8 +124,8 @@ static bool mark_guilty(struct i915_request *rq)
if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
banned = true;
if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count));
+ drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count));
intel_context_set_banned(rq->context);
}
@@ -153,11 +155,12 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
- i915_request_skip(rq, -EIO);
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
if (mark_guilty(rq))
engine_skip_context(rq);
} else {
- dma_fence_set_error(&rq->fence, -EAGAIN);
+ i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
}
rcu_read_unlock();
@@ -226,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
goto out;
}
@@ -234,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
goto out;
}
@@ -260,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
goto out;
}
@@ -271,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
goto out;
}
@@ -300,8 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
500, 0,
NULL);
if (err)
- DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
+ drm_dbg(&gt->i915->drm,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
return err;
}
@@ -401,7 +405,8 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return 0;
if (ret) {
- DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ drm_dbg(&engine->i915->drm,
+ "Wait for SFC forced lock ack failed\n");
return ret;
}
@@ -515,9 +520,10 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
- DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
- engine->name, request,
- intel_uncore_read_fw(uncore, reg));
+ drm_err(&engine->i915->drm,
+ "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
return ret;
}
@@ -781,7 +787,7 @@ static void nop_submit_request(struct i915_request *request)
unsigned long flags;
RQ_TRACE(request, "-EIO\n");
- dma_fence_set_error(&request->fence, -EIO);
+ i915_request_set_error_once(request, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
@@ -800,13 +806,6 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (test_bit(I915_WEDGED, &gt->reset.flags))
return;
- if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- for_each_engine(engine, gt, id)
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
-
GT_TRACE(gt, "start\n");
/*
@@ -845,10 +844,30 @@ void intel_gt_set_wedged(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
+ return;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
mutex_lock(&gt->reset.mutex);
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- __intel_gt_set_wedged(gt);
+
+ if (GEM_SHOW_DEBUG()) {
+ struct drm_printer p = drm_debug_printer(__func__);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_is_idle(engine))
+ continue;
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+ }
+
+ __intel_gt_set_wedged(gt);
+
mutex_unlock(&gt->reset.mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
static bool __intel_gt_unset_wedged(struct intel_gt *gt)
@@ -969,7 +988,7 @@ static int resume(struct intel_gt *gt)
int ret;
for_each_engine(engine, gt, id) {
- ret = engine->resume(engine);
+ ret = intel_engine_resume(engine);
if (ret)
return ret;
}
@@ -1022,7 +1041,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (i915_modparams.reset)
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
- DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
goto error;
}
@@ -1049,8 +1068,9 @@ void intel_gt_reset(struct intel_gt *gt,
*/
ret = intel_gt_init_hw(gt);
if (ret) {
- DRM_ERROR("Failed to initialise HW following reset (%d)\n",
- ret);
+ drm_err(&gt->i915->drm,
+ "Failed to initialise HW following reset (%d)\n",
+ ret);
goto taint;
}
@@ -1126,9 +1146,8 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- uses_guc ? "GuC " : "",
- engine->name, ret);
+ drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
+ uses_guc ? "GuC " : "", engine->name, ret);
goto out;
}
@@ -1144,7 +1163,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
- ret = engine->resume(engine);
+ ret = intel_engine_resume(engine);
out:
intel_engine_cancel_stop_cs(engine);
@@ -1165,7 +1184,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
- DRM_DEBUG_DRIVER("resetting chip\n");
+ drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 6ff803f397c4..8cda1b7e17ba 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -31,17 +31,15 @@ int intel_ring_pin(struct intel_ring *ring)
if (atomic_fetch_inc(&ring->pin_count))
return 0;
- flags = PIN_GLOBAL;
-
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
- ret = i915_vma_pin(vma, 0, 0, flags);
+ ret = i915_ggtt_pin(vma, 0, flags);
if (unlikely(ret))
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index bc44fe8e5ffa..fdc3f10e12aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -29,11 +29,10 @@
#include <linux/log2.h>
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
@@ -568,7 +567,8 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
return;
/* ring should be idle before issuing a sync flush*/
- WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
ENGINE_WRITE(engine, RING_INSTPM,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -626,6 +626,27 @@ static bool stop_ring(struct intel_engine_cs *engine)
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
+static struct i915_address_space *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static void set_pp_dir(struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm = vm_alias(engine->gt->vm);
+
+ if (vm) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE(engine, RING_PP_DIR_BASE,
+ px_base(ppgtt->pd)->ggtt_offset << 10);
+ }
+}
+
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -684,6 +705,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
intel_ring_update_space(ring);
+ set_pp_dir(engine);
+
/* First wake the ring up to an empty/idle ring */
ENGINE_WRITE(engine, RING_HEAD, ring->head);
ENGINE_WRITE(engine, RING_TAIL, ring->head);
@@ -857,43 +880,6 @@ static int rcs_resume(struct intel_engine_cs *engine)
intel_uncore_write(uncore, ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
- /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN_RANGE(i915, 4, 6))
- intel_uncore_write(uncore, MI_MODE,
- _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
-
- /* We need to disable the AsyncFlip performance optimisations in order
- * to use MI_WAIT_FOR_EVENT within the CS. It should already be
- * programmed to '1' on all products.
- *
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
- */
- if (IS_GEN_RANGE(i915, 6, 7))
- intel_uncore_write(uncore, MI_MODE,
- _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
- /* Required for the hardware to program scanline values for waiting */
- /* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN(i915, 6))
- intel_uncore_write(uncore, GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
-
- /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN(i915, 7))
- intel_uncore_write(uncore, GFX_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
- _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
-
- if (IS_GEN(i915, 6)) {
- /* From the Sandybridge PRM, volume 1 part 3, page 24:
- * "If this bit is set, STCunit will have LRA as replacement
- * policy. [...] This bit must be reset. LRA replacement
- * policy is not supported."
- */
- intel_uncore_write(uncore, CACHE_MODE_0,
- _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
- }
-
if (IS_GEN_RANGE(i915, 6, 7))
intel_uncore_write(uncore, INSTPM,
_MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
@@ -910,9 +896,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
@@ -1197,23 +1181,12 @@ static void ring_context_destroy(struct kref *ref)
intel_context_free(ce);
}
-static struct i915_address_space *vm_alias(struct intel_context *ce)
-{
- struct i915_address_space *vm;
-
- vm = ce->vm;
- if (i915_is_ggtt(vm))
- vm = &i915_vm_to_ggtt(vm)->alias->vm;
-
- return vm;
-}
-
static int __context_pin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
int err = 0;
- vm = vm_alias(ce);
+ vm = vm_alias(ce->vm);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
@@ -1224,7 +1197,7 @@ static void __context_unpin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
- vm = vm_alias(ce);
+ vm = vm_alias(ce->vm);
if (vm)
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
@@ -1384,7 +1357,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -1459,7 +1434,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->context->state) | flags;
+ *cs++ = i915_ggtt_offset(ce->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1574,21 +1549,64 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
}
+static int clear_residuals(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int ret;
+
+ ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+ if (ret)
+ return ret;
+
+ if (engine->kernel_context->state) {
+ ret = mi_set_context(rq,
+ engine->kernel_context,
+ MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ ret = engine->emit_bb_start(rq,
+ engine->wa_ctx.vma->node.start, 0,
+ 0);
+ if (ret)
+ return ret;
+
+ ret = engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /* Always invalidate before the next switch_mm() */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int switch_context(struct i915_request *rq)
{
+ struct intel_engine_cs *engine = rq->engine;
struct intel_context *ce = rq->context;
+ void **residuals = NULL;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- ret = switch_mm(rq, vm_alias(ce));
+ if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+ if (engine->wa_ctx.vma->private != ce) {
+ ret = clear_residuals(rq);
+ if (ret)
+ return ret;
+
+ residuals = &engine->wa_ctx.vma->private;
+ }
+ }
+
+ ret = switch_mm(rq, vm_alias(ce->vm));
if (ret)
return ret;
if (ce->state) {
u32 flags;
- GEM_BUG_ON(rq->engine->id != RCS0);
+ GEM_BUG_ON(engine->id != RCS0);
/* For resource streamer on HSW+ and power context elsewhere */
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
@@ -1600,7 +1618,7 @@ static int switch_context(struct i915_request *rq)
else
flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, flags);
+ ret = mi_set_context(rq, ce, flags);
if (ret)
return ret;
}
@@ -1609,6 +1627,20 @@ static int switch_context(struct i915_request *rq)
if (ret)
return ret;
+ /*
+ * Now past the point of no return, this request _will_ be emitted.
+ *
+ * Or at least this preamble will be emitted, the request may be
+ * interrupted prior to submitting the user payload. If so, we
+ * still submit the "empty" request in order to preserve global
+ * state tracking such as this, our tracking of the current
+ * dirty context.
+ */
+ if (residuals) {
+ intel_context_put(*residuals);
+ *residuals = intel_context_get(ce);
+ }
+
return 0;
}
@@ -1662,7 +1694,8 @@ static void gen6_bsd_submit_request(struct i915_request *request)
GEN6_BSD_SLEEP_INDICATOR,
0,
1000, 0, NULL))
- DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+ drm_err(&uncore->i915->drm,
+ "timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
i9xx_submit_request(request);
@@ -1787,11 +1820,16 @@ static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
+ if (engine->wa_ctx.vma) {
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+ }
+
intel_ring_unpin(engine->legacy.ring);
intel_ring_put(engine->legacy.ring);
@@ -1939,6 +1977,64 @@ static void setup_vecs(struct intel_engine_cs *engine)
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
}
+static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ return gen7_setup_clear_gpr_bb(engine, vma);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size;
+ int err;
+
+ size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (size <= 0)
+ return size;
+
+ size = ALIGN(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ goto err_private;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_unpin;
+
+ err = gen7_ctx_switch_bb_setup(engine, vma);
+ if (err)
+ goto err_unpin;
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_private:
+ intel_context_put(vma->private);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
struct intel_timeline *timeline;
@@ -1992,11 +2088,19 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ err = gen7_ctx_switch_bb_init(engine);
+ if (err)
+ goto err_ring_unpin;
+ }
+
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
+err_ring_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err_timeline_unpin:
@@ -2007,3 +2111,7 @@ err:
intel_engine_cleanup_common(engine);
return err;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring_submission.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index d2a3d935d186..19542fd9e207 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
@@ -55,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= rps->pm_events;
+ mask &= READ_ONCE(rps->pm_events);
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -68,22 +70,25 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
rps_reset_ei(rps);
if (IS_VALLEYVIEW(gt->i915))
/* WaGsvRC0ResidencyMethod:vlv */
- rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ events = GEN6_PM_RP_UP_EI_EXPIRED;
else
- rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
+ events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+ WRITE_ONCE(rps->pm_events, events);
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
- set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
+ intel_uncore_write(gt->uncore,
+ GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
}
static void gen6_rps_reset_interrupts(struct intel_rps *rps)
@@ -115,9 +120,10 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- rps->pm_events = 0;
+ WRITE_ONCE(rps->pm_events, 0);
- set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
+ intel_uncore_write(gt->uncore,
+ GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
@@ -642,7 +648,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && rps->active)
+ if (!rps->power.interactive++ && READ_ONCE(rps->active))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
@@ -719,11 +725,15 @@ void intel_rps_unpark(struct intel_rps *rps)
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
- rps->active = true;
+
+ WRITE_ONCE(rps->active, true);
+
freq = max(rps->cur_freq, rps->efficient_freq),
freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
intel_rps_set(rps, freq);
+
rps->last_adj = 0;
+
mutex_unlock(&rps->lock);
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
@@ -743,7 +753,7 @@ void intel_rps_park(struct intel_rps *rps)
if (INTEL_GEN(i915) >= 6)
rps_disable_interrupts(rps);
- rps->active = false;
+ WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
@@ -763,14 +773,27 @@ void intel_rps_park(struct intel_rps *rps)
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+
+ /*
+ * Since we will try and restart from the previously requested
+ * frequency on unparking, treat this idle point as a downclock
+ * interrupt and reduce the frequency for resume. If we park/unpark
+ * more frequently than the rps worker can run, we will not respond
+ * to any EI and never see a change in frequency.
+ *
+ * (Note we accommodate Cherryview's limitation of only using an
+ * even bin by applying it to all.)
+ */
+ rps->cur_freq =
+ max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
}
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &rq->engine->gt->rps;
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !rps->active)
+ if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
return;
/* Serializes with i915_request_retire() */
@@ -1026,7 +1049,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1123,7 +1147,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1191,11 +1216,11 @@ void intel_rps_enable(struct intel_rps *rps)
if (!rps->enabled)
return;
- WARN_ON(rps->max_freq < rps->min_freq);
- WARN_ON(rps->idle_freq > rps->max_freq);
+ drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
+ drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
- WARN_ON(rps->efficient_freq < rps->min_freq);
- WARN_ON(rps->efficient_freq > rps->max_freq);
+ drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
+ drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
}
static void gen6_rps_disable(struct intel_rps *rps)
@@ -1390,9 +1415,9 @@ static void chv_rps_init(struct intel_rps *rps)
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
- rps->min_freq) & 1,
- "Odd GPU freq values\n");
+ drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
+ rps->rp1_freq | rps->min_freq) & 1,
+ "Odd GPU freq values\n");
}
static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
@@ -1451,12 +1476,12 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
- if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ if (!pm_iir && !client_boost)
goto out;
mutex_lock(&rps->lock);
@@ -1552,11 +1577,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
- if (pm_iir & rps->pm_events) {
+ events = pm_iir & READ_ONCE(rps->pm_events);
+ if (events) {
spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
- rps->pm_iir |= pm_iir & rps->pm_events;
+
+ gen6_gt_pm_mask_irq(gt, events);
+ rps->pm_iir |= events;
+
schedule_work(&rps->work);
spin_unlock(&gt->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index d8d9f1179c2b..08b56d7ab4f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -312,7 +312,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
- err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
if (err)
return err;
@@ -410,6 +410,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
void *vaddr;
int err;
+ might_lock(&tl->gt->ggtt->vm.mutex);
+
/*
* If there is an outstanding GPU reference to this cacheline,
* such as it being sampled by a HW semaphore on another timeline,
@@ -435,7 +437,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
goto err_rollback;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (err) {
__idle_hwsp_free(vma->private, cacheline);
goto err_rollback;
@@ -519,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
rcu_read_lock();
cl = rcu_dereference(from->hwsp_cacheline);
+ if (i915_request_completed(from)) /* confirm cacheline is valid */
+ goto unlock;
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
goto unlock; /* seqno wrapped and completed! */
if (unlikely(i915_request_completed(from)))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 6c2f8462e0f3..5176ad1a3976 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -116,17 +116,17 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
} else {
wa_ = &wal->list[mid];
- if ((wa->mask & ~wa_->mask) == 0) {
- DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+ if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
+ DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
i915_mmio_reg_offset(wa_->reg),
- wa_->mask, wa_->val);
+ wa_->clr, wa_->set);
- wa_->val &= ~wa->mask;
+ wa_->set &= ~wa->clr;
}
wal->wa_count++;
- wa_->val |= wa->val;
- wa_->mask |= wa->mask;
+ wa_->set |= wa->set;
+ wa_->clr |= wa->clr;
wa_->read |= wa->read;
return;
}
@@ -147,13 +147,13 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
}
}
-static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val, u32 read_mask)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 clear, u32 set, u32 read_mask)
{
struct i915_wa wa = {
.reg = reg,
- .mask = mask,
- .val = val,
+ .clr = clear,
+ .set = set,
.read = read_mask,
};
@@ -161,38 +161,43 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
}
static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val)
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
- wa_add(wal, reg, mask, val, mask);
+ wa_add(wal, reg, clear, set, clear);
}
static void
-wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
+ wa_write_masked_or(wal, reg, ~0, set);
}
static void
-wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, ~0, val);
+ wa_write_masked_or(wal, reg, set, set);
}
static void
-wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_write_masked_or(wal, reg, val, val);
+ wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
+}
+
+static void
+wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
}
#define WA_SET_BIT_MASKED(addr, mask) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
+ wa_masked_en(wal, (addr), (mask))
#define WA_CLR_BIT_MASKED(addr, mask) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
+ wa_masked_dis(wal, (addr), (mask))
#define WA_SET_FIELD_MASKED(addr, mask, value) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
+ wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
@@ -570,12 +575,29 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* allow headerless messages for preemptible GPGPU context */
WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+
+ /* Wa_1604278689:icl,ehl */
+ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
+ wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
+
+ /* Wa_1406306137:icl,ehl */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- /* Wa_1409142259:tgl */
+ /*
+ * Wa_1409142259:tgl
+ * Wa_1409347922:tgl
+ * Wa_1409252684:tgl
+ * Wa_1409217633:tgl
+ * Wa_1409207793:tgl
+ * Wa_1409178076:tgl
+ * Wa_1408979724:tgl
+ */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
@@ -588,6 +610,11 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128, 0);
+
+ /* WaDisableGPGPUMidThreadPreemption:tgl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
static void
@@ -657,7 +684,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
*cs++ = i915_mmio_reg_offset(wa->reg);
- *cs++ = wa->val;
+ *cs++ = wa->set;
}
*cs++ = MI_NOOP;
@@ -822,7 +849,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
- WARN_ON(!subslice);
+ drm_WARN_ON(&i915->drm, !subslice);
}
subslice--;
@@ -893,11 +920,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SLICE_UNIT_LEVEL_CLKGATE,
MSCUNIT_CLKGATE_DIS);
- /* Wa_1406680159:icl */
- wa_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE,
- GWUNIT_CLKGATE_DIS);
-
/* Wa_1406838659:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
@@ -926,7 +948,7 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
- /* Wa_1409180338:tgl */
+ /* Wa_1607087056:tgl also know as BUG:1409180338 */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
@@ -986,11 +1008,10 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
- if ((cur ^ wa->val) & wa->read) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ if ((cur ^ wa->set) & wa->read) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read,
- wa->val, wa->mask);
+ cur, cur & wa->read, wa->set);
return false;
}
@@ -1015,7 +1036,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
+ if (wa->clr)
+ intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set);
+ else
+ intel_uncore_write_fw(uncore, wa->reg, wa->set);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
wa_verify(wa,
intel_uncore_read_fw(uncore, wa->reg),
@@ -1239,6 +1263,7 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ * Wa_1408556865:tgl
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
@@ -1249,6 +1274,12 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
+
+ /* Wa_1808121037:tgl */
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
+
+ /* Wa_1806527549:tgl */
+ whitelist_reg(w, HIZ_CHICKEN);
break;
default:
break;
@@ -1315,19 +1346,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
- /* Wa_1606700617:tgl */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1607138336:tgl */
+ /*
+ * Wa_1607138336:tgl
+ * Wa_1607063988:tgl
+ */
wa_write_or(wal,
GEN9_CTX_PREEMPT_REG,
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- /* Wa_1607030317:tgl */
- /* Wa_1607186500:tgl */
- /* Wa_1607297627:tgl */
+ /*
+ * Wa_1607030317:tgl
+ * Wa_1607186500:tgl
+ * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
+ * of then says it is fixed on B0 the other one says it is
+ * permanent
+ */
wa_masked_en(wal,
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
@@ -1340,6 +1373,35 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
+
+ /* Wa_1407928979:tgl */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+ /* Wa_1408615072:tgl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+ }
+
+ if (IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
+
+ /* Wa_1409804808:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_PUSH_CONST_DEREF_HOLD_DIS);
+
+ /* Wa_1606700617:tgl */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN(i915, 11)) {
@@ -1405,10 +1467,38 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_SCRATCH2,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
0);
+
+ /* WaEnable32PlaneMode:icl */
+ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+ GEN11_ENABLE_32_PLANE_MODE);
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
+ /*
+ * Wa_1408767742:icl[a2..forever],ehl[all]
+ * Wa_1605460711:icl[a0..c0]
+ */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
- if (IS_GEN_RANGE(i915, 9, 11)) {
- /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
+ if (IS_GEN_RANGE(i915, 9, 12)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1452,6 +1542,52 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
}
+
+ if (IS_GEN(i915, 7))
+ /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
+ wa_masked_en(wal,
+ GFX_MODE_GEN7,
+ GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+
+ if (IS_GEN_RANGE(i915, 6, 7))
+ /*
+ * We need to disable the AsyncFlip performance optimisations in
+ * order to use MI_WAIT_FOR_EVENT within the CS. It should
+ * already be programmed to '1' on all products.
+ *
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+ */
+ wa_masked_en(wal,
+ MI_MODE,
+ ASYNC_FLIP_PERF_DISABLE);
+
+ if (IS_GEN(i915, 6)) {
+ /*
+ * Required for the hardware to program scanline values for
+ * waiting
+ * WaEnableFlushTlbInvalidationMode:snb
+ */
+ wa_masked_en(wal,
+ GFX_MODE,
+ GFX_TLB_INVALIDATE_EXPLICIT);
+
+ /*
+ * From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ wa_masked_dis(wal,
+ CACHE_MODE_0,
+ CM0_STC_EVICT_DISABLE_LRA_SNB);
+ }
+
+ if (IS_GEN_RANGE(i915, 4, 6))
+ /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+ wa_add(wal, MI_MODE,
+ 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+ /* XXX bit doesn't stick on Broadwater */
+ IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
}
static void
@@ -1470,7 +1606,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+ if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
return;
if (engine->class == RENDER_CLASS)
@@ -1483,7 +1619,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
- if (INTEL_GEN(engine->i915) < 8)
+ if (INTEL_GEN(engine->i915) < 4)
return;
wa_init_start(wal, "engine", engine->name);
@@ -1626,6 +1762,16 @@ static int engine_wa_list_verify(struct intel_context *ce,
goto err_vma;
}
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err) {
+ i915_request_add(rq);
+ goto err_vma;
+ }
+
err = wa_list_srm(rq, wal, vma);
if (err)
goto err_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index e27ab1b710b3..d166a7145720 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -13,8 +13,8 @@
struct i915_wa {
i915_reg_t reg;
- u32 mask;
- u32 val;
+ u32 clr;
+ u32 set;
u32 read;
};
diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
new file mode 100644
index 000000000000..610ca7687735
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
+ */
+
+static const u32 ivb_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index f2806381733f..4a53ded7c2dd 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -65,6 +65,9 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
return NULL;
}
i915_active_init(&ring->vma->active, NULL, NULL);
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
+ ring->vma->node.size = sz;
intel_ring_update_space(ring);
@@ -241,9 +244,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 43d4d589749f..697114dd1f47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -142,6 +142,24 @@ out:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@@ -152,9 +170,11 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
}
@@ -172,9 +192,11 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_pulse);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err && err != -ENODEV)
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3e5e6c86e843..2b2efff6e19d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -268,7 +268,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
@@ -1640,7 +1640,7 @@ static int igt_reset_engines_atomic(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
igt_global_reset_lock(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index fd3770e48ac7..a912159693fd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -18,10 +18,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
- if (!get_ia_constants(llc, &consts)) {
- err = -ENODEV;
+ if (!get_ia_constants(llc, &consts))
goto out_rpm;
- }
for (gpu_freq = consts.min_gpu_freq;
gpu_freq <= consts.max_gpu_freq;
@@ -71,10 +69,5 @@ out_rpm:
int st_llc_verify(struct intel_llc *llc)
{
- int err = 0;
-
- if (HAS_LLC(llc_to_gt(llc)->i915))
- err = gen6_verify_ring_freq(llc);
-
- return err;
+ return gen6_verify_ring_freq(llc);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b292f8cbd0bf..f95ae15ce865 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -68,6 +68,71 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
engine->props.heartbeat_interval_ms = saved;
}
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_is_active(rq))
+ return 0;
+
+ if (i915_request_started(rq)) /* that was quick! */
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -285,6 +350,84 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
static int live_hold_reset(void *arg)
{
struct intel_gt *gt = arg;
@@ -386,6 +529,152 @@ out:
return err;
}
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ unsigned long heartbeat;
+ int err = 0;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_debug("%s: %s request not stated!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request completed with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ engine_heartbeat_enable(engine, heartbeat);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -580,6 +869,10 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
for_each_prime_number_from(count, 1, 16) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -614,33 +907,229 @@ err_obj:
return err;
}
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
struct i915_request *rq;
+ u32 *cs;
+ int err;
- rq = intel_engine_create_kernel_request(engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
i915_request_get(rq);
i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
return rq;
}
-static int wait_for_submit(struct intel_engine_cs *engine,
- struct i915_request *rq,
- unsigned long timeout)
+static int live_timeslice_rewind(void *arg)
{
- timeout += jiffies;
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
- if (i915_request_is_active(rq))
- return 0;
- } while (time_before(jiffies, timeout));
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- return -ETIME;
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Z, Y };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ engine_heartbeat_disable(engine, &heartbeat);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[0] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[0])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[1] = create_rewinder(ce, NULL, slot, Y);
+ intel_context_put(ce);
+ if (IS_ERR(rq[1]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[2] = create_rewinder(ce, rq[0], slot, Z);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+ GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_flush_submission(engine);
+ }
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ engine_heartbeat_enable(engine, heartbeat);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
}
static long timeslice_threshold(const struct intel_engine_cs *engine)
@@ -688,6 +1177,10 @@ static int live_timeslice_queue(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
@@ -774,6 +1267,7 @@ err_heartbeat:
break;
}
+err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
@@ -832,6 +1326,10 @@ static int live_busywait_preempt(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
@@ -1352,14 +1850,9 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1417,10 +1910,9 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
igt_spinner_end(&arg->a.spin);
- if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != 0) {
pr_err("Normal inflight0 request did not complete\n");
@@ -1500,10 +1992,9 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != -EIO) {
pr_err("Cancelled inflight0 request did not report -EIO\n");
@@ -1561,14 +2052,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1656,7 +2142,7 @@ static int live_suppress_self_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0; /* presume black blox */
if (intel_vgpu_active(gt->i915))
@@ -2279,117 +2765,6 @@ static int live_preempt_gang(void *arg)
return 0;
}
-static int live_preempt_hang(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- GEM_TRACE("lo spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- init_completion(&engine->execlists.preempt_hang.completion);
- engine->execlists.preempt_hang.inject_hang = true;
-
- i915_request_add(rq);
-
- if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
- HZ / 10)) {
- pr_err("Preemption did not occur within timeout!");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- intel_engine_reset(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-
- engine->execlists.preempt_hang.inject_hang = false;
-
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- GEM_TRACE("hi spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-}
-
static int live_preempt_timeout(void *arg)
{
struct intel_gt *gt = arg;
@@ -2882,7 +3257,7 @@ static int live_virtual_engine(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for_each_engine(engine, gt, id) {
@@ -3015,7 +3390,7 @@ static int live_virtual_mask(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3055,6 +3430,10 @@ static int preserved_virtual_engine(struct intel_gt *gt,
if (IS_ERR(scratch))
return PTR_ERR(scratch);
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
@@ -3153,7 +3532,7 @@ static int live_virtual_preserved(void *arg)
* are preserved.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
/* As we use CS_GPR we cannot run before they existed on all engines. */
@@ -3243,15 +3622,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
rq[0] = ERR_PTR(-ENOMEM);
for_each_engine(master, gt, id) {
struct i915_sw_fence fence = {};
+ struct intel_context *ce;
if (master->class == class)
continue;
+ ce = intel_context_create(master);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
- rq[0] = igt_spinner_create_request(&spin,
- master->kernel_context,
- MI_NOOP);
+ rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto out;
@@ -3377,7 +3762,7 @@ static int live_virtual_bond(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3538,7 +3923,7 @@ static int live_virtual_reset(void *arg)
* forgotten.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
if (!intel_has_reset_engine(gt))
@@ -3571,8 +3956,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_pin_rewind),
SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
@@ -3583,7 +3971,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_gang),
- SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
@@ -3631,6 +4018,62 @@ static void hexdump(const void *buf, size_t len)
}
}
+static int emit_semaphore_signal(struct intel_context *ce, void *slot)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+ return 0;
+}
+
+static int context_flush(struct intel_context *ce, long timeout)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+ int err = 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ rmb(); /* We know the request is written, make sure all state is too! */
+ return err;
+}
+
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
@@ -3797,6 +4240,11 @@ static int live_lrc_fixed(void *arg)
CTX_BB_STATE - 1,
"BB_STATE"
},
+ {
+ i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
+ CTX_TIMESTAMP - 1,
+ "RING_CTX_TIMESTAMP"
+ },
{ },
}, *t;
u32 *hw;
@@ -3880,8 +4328,16 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
*cs++ = 0;
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+
i915_request_get(rq);
i915_request_add(rq);
+ if (err)
+ goto err_rq;
intel_engine_flush_submission(engine);
expected[RING_TAIL_IDX] = ce->ring->tail;
@@ -3947,13 +4403,13 @@ static int live_lrc_state(void *arg)
return err;
}
-static int gpr_make_dirty(struct intel_engine_cs *engine)
+static int gpr_make_dirty(struct intel_context *ce)
{
struct i915_request *rq;
u32 *cs;
int n;
- rq = intel_engine_create_kernel_request(engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -3965,20 +4421,79 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
*cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
for (n = 0; n < NUM_GPR_DW; n++) {
- *cs++ = CS_GPR(engine, n);
+ *cs++ = CS_GPR(ce->engine, n);
*cs++ = STACK_MAGIC;
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_add(rq);
return 0;
}
-static int __live_gpr_clear(struct intel_engine_cs *engine,
- struct i915_vma *scratch)
+static struct i915_request *
+__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return ERR_CAST(cs);
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(ce->engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ rq = ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int __live_lrc_gpr(struct intel_engine_cs *engine,
+ struct i915_vma *scratch,
+ bool preempt)
+{
+ u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
struct intel_context *ce;
struct i915_request *rq;
u32 *cs;
@@ -3988,7 +4503,7 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
return 0; /* GPR only on rcs0 for gen8 */
- err = gpr_make_dirty(engine);
+ err = gpr_make_dirty(engine->kernel_context);
if (err)
return err;
@@ -3996,28 +4511,28 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
if (IS_ERR(ce))
return PTR_ERR(ce);
- rq = intel_context_create_request(ce);
+ rq = __gpr_read(ce, scratch, slot);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_put;
}
- cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(rq);
- goto err_put;
- }
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
- for (n = 0; n < NUM_GPR_DW; n++) {
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = CS_GPR(engine, n);
- *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
- *cs++ = 0;
- }
+ if (preempt) {
+ err = gpr_make_dirty(engine->kernel_context);
+ if (err)
+ goto err_rq;
- i915_request_get(rq);
- i915_request_add(rq);
+ err = emit_semaphore_signal(engine->kernel_context, slot);
+ if (err)
+ goto err_rq;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
@@ -4044,13 +4559,15 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
i915_gem_object_unpin_map(scratch->obj);
err_rq:
+ memset32(&slot[0], -1, 4);
+ wmb();
i915_request_put(rq);
err_put:
intel_context_put(ce);
return err;
}
-static int live_gpr_clear(void *arg)
+static int live_lrc_gpr(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -4068,7 +4585,971 @@ static int live_gpr_clear(void *arg)
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_gpr_clear(engine, scratch);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ err = __live_lrc_gpr(engine, scratch, false);
+ if (err)
+ goto err;
+
+ err = __live_lrc_gpr(engine, scratch, true);
+ if (err)
+ goto err;
+
+err:
+ engine_heartbeat_enable(engine, heartbeat);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static struct i915_request *
+create_timestamp(struct intel_context *ce, void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+struct lrc_timestamp {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce[2];
+ u32 poison;
+};
+
+static bool timestamp_advanced(u32 start, u32 end)
+{
+ return (s32)(end - start) > 0;
+}
+
+static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
+{
+ u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
+ struct i915_request *rq;
+ u32 timestamp;
+ int err = 0;
+
+ arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
+ rq = create_timestamp(arg->ce[0], slot, 1);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = wait_for_submit(rq->engine, rq, HZ / 2);
+ if (err)
+ goto err;
+
+ if (preempt) {
+ arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
+ err = emit_semaphore_signal(arg->ce[1], slot);
+ if (err)
+ goto err;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
+
+ /* And wait for switch to kernel (to save our context to memory) */
+ err = context_flush(arg->ce[0], HZ / 2);
+ if (err)
+ goto err;
+
+ if (!timestamp_advanced(arg->poison, slot[1])) {
+ pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ arg->poison, slot[1]);
+ err = -EINVAL;
+ }
+
+ timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
+ if (!timestamp_advanced(slot[1], timestamp)) {
+ pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ slot[1], timestamp);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(slot, -1, 4);
+ i915_request_put(rq);
+ return err;
+}
+
+static int live_lrc_timestamp(void *arg)
+{
+ struct lrc_timestamp data = {};
+ struct intel_gt *gt = arg;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ 0,
+ S32_MAX,
+ (u32)S32_MAX + 1,
+ U32_MAX,
+ };
+
+ /*
+ * We want to verify that the timestamp is saved and restore across
+ * context switches and is monotonic.
+ *
+ * So we do this with a little bit of LRC poisoning to check various
+ * boundary conditions, and see what happens if we preempt the context
+ * with a second request (carrying more poison into the timestamp).
+ */
+
+ for_each_engine(data.engine, gt, id) {
+ unsigned long heartbeat;
+ int i, err = 0;
+
+ engine_heartbeat_disable(data.engine, &heartbeat);
+
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(data.engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err;
+ }
+
+ data.ce[i] = tmp;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ data.poison = poison[i];
+
+ err = __lrc_timestamp(&data, false);
+ if (err)
+ break;
+
+ err = __lrc_timestamp(&data, true);
+ if (err)
+ break;
+ }
+
+err:
+ engine_heartbeat_enable(data.engine, heartbeat);
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ if (!data.ce[i])
+ break;
+
+ intel_context_unpin(data.ce[i]);
+ intel_context_put(data.ce[i]);
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_user_vma(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_vma *
+store_context(struct intel_context *ce, struct i915_vma *scratch)
+{
+ struct i915_vma *batch;
+ u32 dw, x, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ x = 0;
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = hw[dw];
+ *cs++ = lower_32_bits(scratch->node.start + x);
+ *cs++ = upper_32_bits(scratch->node.start + x);
+
+ dw += 2;
+ x += 4;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int move_to_active(struct i915_request *rq,
+ struct i915_vma *vma,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, flags);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static struct i915_request *
+record_registers(struct intel_context *ce,
+ struct i915_vma *before,
+ struct i915_vma *after,
+ u32 *sema)
+{
+ struct i915_vma *b_before, *b_after;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ b_before = store_context(ce, before);
+ if (IS_ERR(b_before))
+ return ERR_CAST(b_before);
+
+ b_after = store_context(ce, after);
+ if (IS_ERR(b_after)) {
+ rq = ERR_CAST(b_after);
+ goto err_before;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_after;
+
+ err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_before, 0);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_after, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_before->node.start);
+ *cs++ = upper_32_bits(b_before->node.start);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_after->node.start);
+ *cs++ = upper_32_bits(b_after->node.start);
+
+ intel_ring_advance(rq, cs);
+
+ WRITE_ONCE(*sema, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+err_after:
+ i915_vma_put(b_after);
+err_before:
+ i915_vma_put(b_before);
+ return rq;
+
+err_rq:
+ i915_request_add(rq);
+ rq = ERR_PTR(err);
+ goto err_after;
+}
+
+static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
+{
+ struct i915_vma *batch;
+ u32 dw, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ *cs++ = MI_LOAD_REGISTER_IMM(len);
+ while (len--) {
+ *cs++ = hw[dw];
+ *cs++ = poison;
+ dw += 2;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ u32 *cs;
+ int err;
+
+ batch = load_context(ce, poison);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = move_to_active(rq, batch, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(batch->node.start);
+ *cs++ = upper_32_bits(batch->node.start);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+err_rq:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_put(batch);
+ return err;
+}
+
+static bool is_moving(u32 a, u32 b)
+{
+ return a != b;
+}
+
+static int compare_isolation(struct intel_engine_cs *engine,
+ struct i915_vma *ref[2],
+ struct i915_vma *result[2],
+ struct intel_context *ce,
+ u32 poison)
+{
+ u32 x, dw, *hw, *lrc;
+ u32 *A[2], *B[2];
+ int err = 0;
+
+ A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ if (IS_ERR(A[0]))
+ return PTR_ERR(A[0]);
+
+ A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ if (IS_ERR(A[1])) {
+ err = PTR_ERR(A[1]);
+ goto err_A0;
+ }
+
+ B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ if (IS_ERR(B[0])) {
+ err = PTR_ERR(B[0]);
+ goto err_A1;
+ }
+
+ B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ if (IS_ERR(B[1])) {
+ err = PTR_ERR(B[1]);
+ goto err_B0;
+ }
+
+ lrc = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
+ goto err_B1;
+ }
+ lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ x = 0;
+ dw = 0;
+ hw = engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ if (!is_moving(A[0][x], A[1][x]) &&
+ (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
+ switch (hw[dw] & 4095) {
+ case 0x30: /* RING_HEAD */
+ case 0x34: /* RING_TAIL */
+ break;
+
+ default:
+ pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
+ engine->name, dw,
+ hw[dw], hw[dw + 1],
+ A[0][x], B[0][x], B[1][x],
+ poison, lrc[dw + 1]);
+ err = -EINVAL;
+ break;
+ }
+ }
+ dw += 2;
+ x++;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ i915_gem_object_unpin_map(ce->state->obj);
+err_B1:
+ i915_gem_object_unpin_map(result[1]->obj);
+err_B0:
+ i915_gem_object_unpin_map(result[0]->obj);
+err_A1:
+ i915_gem_object_unpin_map(ref[1]->obj);
+err_A0:
+ i915_gem_object_unpin_map(ref[0]->obj);
+ return err;
+}
+
+static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
+{
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
+ struct i915_vma *ref[2], *result[2];
+ struct intel_context *A, *B;
+ struct i915_request *rq;
+ int err;
+
+ A = intel_context_create(engine);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ B = intel_context_create(engine);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto err_A;
+ }
+
+ ref[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[0])) {
+ err = PTR_ERR(ref[0]);
+ goto err_B;
+ }
+
+ ref[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[1])) {
+ err = PTR_ERR(ref[1]);
+ goto err_ref0;
+ }
+
+ rq = record_registers(A, ref[0], ref[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ref1;
+ }
+
+ WRITE_ONCE(*sema, 1);
+ wmb();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ref1;
+ }
+ i915_request_put(rq);
+
+ result[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[0])) {
+ err = PTR_ERR(result[0]);
+ goto err_ref1;
+ }
+
+ result[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[1])) {
+ err = PTR_ERR(result[1]);
+ goto err_result0;
+ }
+
+ rq = record_registers(A, result[0], result[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_result1;
+ }
+
+ err = poison_registers(B, poison, sema);
+ if (err) {
+ WRITE_ONCE(*sema, -1);
+ i915_request_put(rq);
+ goto err_result1;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_result1;
+ }
+ i915_request_put(rq);
+
+ err = compare_isolation(engine, ref, result, A, poison);
+
+err_result1:
+ i915_vma_put(result[1]);
+err_result0:
+ i915_vma_put(result[0]);
+err_ref1:
+ i915_vma_put(ref[1]);
+err_ref0:
+ i915_vma_put(ref[0]);
+err_B:
+ intel_context_put(B);
+err_A:
+ intel_context_put(A);
+ return err;
+}
+
+static bool skip_isolation(const struct intel_engine_cs *engine)
+{
+ if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+ return true;
+
+ if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+ return true;
+
+ return false;
+}
+
+static int live_lrc_isolation(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ STACK_MAGIC,
+ 0x3a3a3a3a,
+ 0x5c5c5c5c,
+ 0xffffffff,
+ 0xffff0000,
+ };
+
+ /*
+ * Our goal is try and verify that per-context state cannot be
+ * tampered with by another non-privileged client.
+ *
+ * We take the list of context registers from the LRI in the default
+ * context image and attempt to modify that list from a remote context.
+ */
+
+ for_each_engine(engine, gt, id) {
+ int err = 0;
+ int i;
+
+ /* Just don't even ask */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
+ skip_isolation(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+ if (engine->pinned_default_state) {
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ err = __lrc_isolation(engine, poison[i]);
+ if (err)
+ break;
+
+ err = __lrc_isolation(engine, ~poison[i]);
+ if (err)
+ break;
+ }
+ }
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void garbage_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ tasklet_disable(&engine->execlists.tasklet);
+
+ if (!rq->fence.error)
+ intel_engine_reset(engine, NULL);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static struct i915_request *garbage(struct intel_context *ce,
+ struct rnd_state *prng)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (err)
+ return ERR_PTR(err);
+
+ prandom_bytes_state(prng,
+ ce->lrc_reg_state,
+ ce->engine->context_size -
+ LRC_STATE_PN * PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+
+err_unpin:
+ intel_context_unpin(ce);
+ return ERR_PTR(err);
+}
+
+static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct intel_context *ce;
+ struct i915_request *hang;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ hang = garbage(ce, prng);
+ if (IS_ERR(hang)) {
+ err = PTR_ERR(hang);
+ goto err_ce;
+ }
+
+ if (wait_for_submit(engine, hang, HZ / 2)) {
+ i915_request_put(hang);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ intel_context_set_banned(ce);
+ garbage_reset(engine, hang);
+
+ intel_engine_flush_submission(engine);
+ if (!hang->fence.error) {
+ i915_request_put(hang);
+ pr_err("%s: corrupted context was not reset\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_ce;
+ }
+
+ if (i915_request_wait(hang, 0, HZ / 2) < 0) {
+ pr_err("%s: corrupted context did not recover\n",
+ engine->name);
+ i915_request_put(hang);
+ err = -EIO;
+ goto err_ce;
+ }
+ i915_request_put(hang);
+
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_garbage(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Verify that we can recover if one context state is completely
+ * corrupted.
+ */
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ I915_RND_STATE(prng);
+ int err = 0, i;
+
+ if (!intel_has_reset_engine(engine->gt))
+ continue;
+
+ intel_engine_pm_get(engine);
+ for (i = 0; i < 3; i++) {
+ err = __lrc_garbage(engine, &prng);
+ if (err)
+ break;
+ }
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ ce->runtime.num_underflow = 0;
+ ce->runtime.max_underflow = 0;
+
+ do {
+ unsigned int loop = 1024;
+
+ while (loop) {
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_rq;
+ }
+
+ if (--loop == 0)
+ i915_request_get(rq);
+
+ i915_request_add(rq);
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+
+ i915_request_put(rq);
+ } while (1);
+
+ err = i915_request_wait(rq, 0, HZ / 5);
+ if (err < 0) {
+ pr_err("%s: request not completed!\n", engine->name);
+ goto err_wait;
+ }
+
+ igt_flush_test(engine->i915);
+
+ pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
+ engine->name,
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
+
+ err = 0;
+ if (ce->runtime.num_underflow) {
+ pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
+ engine->name,
+ ce->runtime.num_underflow,
+ ce->runtime.max_underflow);
+ GEM_TRACE_DUMP();
+ err = -EOVERFLOW;
+ }
+
+err_wait:
+ i915_request_put(rq);
+err_rq:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_pphwsp_runtime(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that cumulative context runtime as stored in the pphwsp[16]
+ * is monotonic.
+ */
+
+ for_each_engine(engine, gt, id) {
+ err = __live_pphwsp_runtime(engine);
if (err)
break;
}
@@ -4076,7 +5557,6 @@ static int live_gpr_clear(void *arg)
if (igt_flush_test(gt->i915))
err = -EIO;
- i915_vma_unpin_and_release(&scratch, 0);
return err;
}
@@ -4086,7 +5566,11 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_layout),
SUBTEST(live_lrc_fixed),
SUBTEST(live_lrc_state),
- SUBTEST(live_gpr_clear),
+ SUBTEST(live_lrc_gpr),
+ SUBTEST(live_lrc_isolation),
+ SUBTEST(live_lrc_timestamp),
+ SUBTEST(live_lrc_garbage),
+ SUBTEST(live_pphwsp_runtime),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index de1f83100fb6..8831ffee2061 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -12,7 +12,8 @@
#include "selftests/igt_spinner.h"
struct live_mocs {
- struct drm_i915_mocs_table table;
+ struct drm_i915_mocs_table mocs;
+ struct drm_i915_mocs_table l3cc;
struct i915_vma *scratch;
void *vaddr;
};
@@ -70,11 +71,22 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
+ struct drm_i915_mocs_table table;
+ unsigned int flags;
int err;
- if (!get_mocs_settings(gt->i915, &arg->table))
+ memset(arg, 0, sizeof(*arg));
+
+ flags = get_mocs_settings(gt->i915, &table);
+ if (!flags)
return -EINVAL;
+ if (flags & HAS_RENDER_L3CC)
+ arg->l3cc = table;
+
+ if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
+ arg->mocs = table;
+
arg->scratch = create_scratch(gt);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
@@ -223,9 +235,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
if (!err)
- err = read_mocs_table(rq, &arg->table, &offset);
+ err = read_mocs_table(rq, &arg->mocs, &offset);
if (!err && ce->engine->class == RENDER_CLASS)
- err = read_l3cc_table(rq, &arg->table, &offset);
+ err = read_l3cc_table(rq, &arg->l3cc, &offset);
offset -= i915_ggtt_offset(vma);
GEM_BUG_ON(offset > PAGE_SIZE);
@@ -236,9 +248,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Compare the results against the expected tables */
vaddr = arg->vaddr;
if (!err)
- err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
if (!err && ce->engine->class == RENDER_CLASS)
- err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 8cc55a0e9e06..95b165faeba7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -12,6 +12,21 @@
#include "selftests/i915_random.h"
+static u64 rc6_residency(struct intel_rc6 *rc6)
+{
+ u64 result;
+
+ /* XXX VLV_GT_MEDIA_RC6? */
+
+ result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if (HAS_RC6p(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
+
+ return result;
+}
+
int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
@@ -38,9 +53,9 @@ int live_rc6_manual(void *arg)
__intel_rc6_disable(rc6);
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
msleep(250);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
(res[1] - res[0]) >> 10);
@@ -51,14 +66,15 @@ int live_rc6_manual(void *arg)
/* Manually enter RC6 */
intel_rc6_park(rc6);
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
msleep(100);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
- pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+ pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
- intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL),
+ res[0]);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 6ad6aca315f6..35406ecdf0b2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -115,7 +115,7 @@ static int igt_atomic_engine_reset(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
intel_gt_pm_get(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
new file mode 100644
index 000000000000..9995faadd7e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static struct i915_vma *create_wally(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ err = i915_vma_sync(vma);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(cs);
+ }
+
+ if (INTEL_GEN(engine->i915) >= 6) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = 0;
+ } else if (INTEL_GEN(engine->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ }
+ *cs++ = vma->node.start + 4000;
+ *cs++ = STACK_MAGIC;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ vma = ERR_CAST(vma->private);
+ i915_gem_object_put(obj);
+ }
+
+ return vma;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int new_context_sync(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = context_sync(ce);
+ intel_context_put(ce);
+
+ return err;
+}
+
+static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
+{
+ int pass;
+ int err;
+
+ for (pass = 0; pass < 2; pass++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(engine->kernel_context);
+ if (err || READ_ONCE(*result)) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb emitted for the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+
+ err = context_sync(engine->kernel_context);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
+{
+ struct i915_vma *bb;
+ u32 *result;
+ int err;
+
+ bb = create_wally(engine);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ intel_context_put(bb->private);
+ i915_vma_unpin_and_release(&bb, 0);
+ return PTR_ERR(result);
+ }
+ result += 1000;
+
+ engine->wa_ctx.vma = bb;
+
+ err = mixed_contexts_sync(engine, result);
+ if (err)
+ goto out;
+
+ err = double_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+ err = kernel_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+out:
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_ctx_switch_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Exercise the inter-context wa batch.
+ *
+ * Between each user context we run a wa batch, and since it may
+ * have implications for user visible state, we have to check that
+ * we do actually execute it.
+ *
+ * The trick we use is to replace the normal wa batch with a custom
+ * one that writes to a marker within it, and we can then look for
+ * that marker to confirm if the batch was run when we expect it,
+ * and equally important it was wasn't run when we don't!
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_vma *saved_wa;
+ int err;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (IS_GEN_RANGE(gt->i915, 4, 5))
+ continue; /* MI_STORE_DWORD is privileged! */
+
+ saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+
+ intel_engine_pm_get(engine);
+ err = __live_ctx_switch_wa(engine);
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ engine->wa_ctx.vma = saved_wa;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_ctx_switch_wa),
+ };
+
+ if (HAS_EXECLISTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index e2d78cc22fb4..c2578a0f2f14 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,6 +6,8 @@
#include <linux/prime_numbers.h>
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
@@ -604,7 +606,6 @@ static int live_hwsp_alternate(void *arg)
tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
- intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
@@ -750,6 +751,189 @@ out_free:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
+static int live_hwsp_rollover_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Run the host for long enough, and even the kernel context will
+ * see a seqno rollover.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq[3] = {};
+ unsigned long heartbeat;
+ int i;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+ if (intel_gt_wait_for_idle(gt, HZ / 2)) {
+ err = -EIO;
+ goto out;
+ }
+
+ GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
+ tl->seqno = 0;
+ timeline_rollback(tl);
+ timeline_rollback(tl);
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = i915_request_create(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ engine_heartbeat_enable(engine, heartbeat);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int live_hwsp_rollover_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Simulate a long running user context, and force the seqno wrap
+ * on the user's timeline.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq[3] = {};
+ struct intel_timeline *tl;
+ struct intel_context *ce;
+ int i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_alloc_state(ce);
+ if (err)
+ goto out;
+
+ tl = ce->timeline;
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out;
+
+ timeline_rollback(tl);
+ timeline_rollback(tl);
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = intel_context_create_request(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
static int live_hwsp_recycle(void *arg)
{
struct intel_gt *gt = arg;
@@ -827,6 +1011,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
SUBTEST(live_hwsp_wrap),
+ SUBTEST(live_hwsp_rollover_kernel),
+ SUBTEST(live_hwsp_rollover_user),
};
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index ac1921854cbf..5ed323254ee1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -583,6 +583,15 @@ static int check_dirty_whitelist(struct intel_context *ce)
if (err)
goto err_request;
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(scratch, rq,
+ EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+ if (err)
+ goto err_request;
+
err = engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
0);
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
new file mode 100644
index 000000000000..8f9b2f33dbaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "sysfs_engines.h"
+
+struct kobj_engine {
+ struct kobject base;
+ struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute mmio_attr =
+__ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static const char * const vcs_caps[] = {
+ [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static const char * const vecs_caps[] = {
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+ /* Trim off the trailing space and replace with a newline */
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len > 0)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+__caps_show(struct intel_engine_cs *engine,
+ u32 caps, char *buf, bool show_unknown)
+{
+ const char * const *repr;
+ int count, n;
+ ssize_t len;
+
+ BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ repr = vcs_caps;
+ count = ARRAY_SIZE(vcs_caps);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ repr = vecs_caps;
+ count = ARRAY_SIZE(vecs_caps);
+ break;
+
+ default:
+ repr = NULL;
+ count = 0;
+ break;
+ }
+ GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
+
+ len = 0;
+ for_each_set_bit(n,
+ (unsigned long *)&caps,
+ show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
+ if (n >= count || !repr[n]) {
+ if (GEM_WARN_ON(show_unknown))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "[%x] ", n);
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s ", repr[n]);
+ }
+ if (GEM_WARN_ON(len >= PAGE_SIZE))
+ break;
+ }
+ return repr_trim(buf, len);
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return __caps_show(engine, engine->uabi_capabilities, buf, true);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static ssize_t
+all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return __caps_show(kobj_to_engine(kobj), -1, buf, false);
+}
+
+static struct kobj_attribute all_caps_attr =
+__ATTR(known_capabilities, 0444, all_caps_show, NULL);
+
+static ssize_t
+max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When waiting for a request, if is it currently being executed
+ * on the GPU, we busywait for a short while before sleeping. The
+ * premise is that most requests are short, and if it is already
+ * executing then there is a good chance that it will complete
+ * before we can setup the interrupt handler and go to sleep.
+ * We try to offset the cost of going to sleep, by first spinning
+ * on the request -- if it completed in less time than it would take
+ * to go sleep, process the interrupt and return back to the client,
+ * then we have saved the client some latency, albeit at the cost
+ * of spinning on an expensive CPU core.
+ *
+ * While we try to avoid waiting at all for a request that is unlikely
+ * to complete, deciding how long it is worth spinning is for is an
+ * arbitrary decision: trading off power vs latency.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_nsecs(2))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+
+ return count;
+}
+
+static ssize_t
+max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_attr =
+__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
+
+static ssize_t
+timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * Execlists uses a scheduling quantum (a timeslice) to alternate
+ * execution between ready-to-run contexts of equal priority. This
+ * ensures that all users (though only if they of equal importance)
+ * have the opportunity to run and prevents livelocks where contexts
+ * may have implicit ordering due to userspace semaphores.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+
+ if (execlists_active(&engine->execlists))
+ set_timer_ms(&engine->execlists.timer, duration);
+
+ return count;
+}
+
+static ssize_t
+timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
+
+static ssize_t
+stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When we allow ourselves to sleep before a GPU reset after disabling
+ * submission, even for a few milliseconds, gives an innocent context
+ * the opportunity to clear the GPU before the reset occurs. However,
+ * how long to sleep depends on the typical non-preemptible duration
+ * (a similar problem to determining the ideal preempt-reset timeout
+ * or even the heartbeat interval).
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+ return count;
+}
+
+static ssize_t
+stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_attr =
+__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long timeout;
+ int err;
+
+ /*
+ * After initialising a preemption request, we give the current
+ * resident a small amount of time to vacate the GPU. The preemption
+ * request is for a higher priority context and should be immediate to
+ * maintain high quality of service (and avoid priority inversion).
+ * However, the preemption granularity of the GPU can be quite coarse
+ * and so we need a compromise.
+ */
+
+ err = kstrtoull(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ set_timer_ms(&engine->execlists.preempt, timeout);
+
+ return count;
+}
+
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t
+heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long delay;
+ int err;
+
+ /*
+ * We monitor the health of the system via periodic heartbeat pulses.
+ * The pulses also provide the opportunity to perform garbage
+ * collection. However, we interpret an incomplete pulse (a missed
+ * heartbeat) as an indication that the system is no longer responsive,
+ * i.e. hung, and perform an engine or full GPU reset. Given that the
+ * preemption granularity can be very coarse on a system, the optimal
+ * value for any workload is unknowable!
+ */
+
+ err = kstrtoull(buf, 0, &delay);
+ if (err)
+ return err;
+
+ if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ err = intel_engine_set_heartbeat(engine, delay);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t
+heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+ .release = kobj_engine_release,
+ .sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return NULL;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = engine;
+
+ if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+ kobject_put(&ke->base);
+ return NULL;
+ }
+
+ /* xfer ownership to sysfs tree */
+ return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+ static const struct attribute *files[] = {
+ &name_attr.attr,
+ &class_attr.attr,
+ &inst_attr.attr,
+ &mmio_attr.attr,
+ &caps_attr.attr,
+ &all_caps_attr.attr,
+ &max_spin_attr.attr,
+ &stop_timeout_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_attr.attr,
+#endif
+ NULL
+ };
+
+ struct device *kdev = i915->drm.primary->kdev;
+ struct intel_engine_cs *engine;
+ struct kobject *dir;
+
+ dir = kobject_create_and_add("engine", &kdev->kobj);
+ if (!dir)
+ return;
+
+ for_each_uabi_engine(engine, i915) {
+ struct kobject *kobj;
+
+ kobj = kobj_engine(dir, engine);
+ if (!kobj)
+ goto err_engine;
+
+ if (sysfs_create_files(kobj, files))
+ goto err_object;
+
+ if (intel_engine_has_timeslices(engine) &&
+ sysfs_create_file(kobj, &timeslice_duration_attr.attr))
+ goto err_engine;
+
+ if (intel_engine_has_preempt_reset(engine) &&
+ sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+ goto err_engine;
+
+ if (0) {
+err_object:
+ kobject_put(kobj);
+err_engine:
+ dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h
new file mode 100644
index 000000000000..9546fffe03a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5d00a3b2d914..819f09ef51fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -207,7 +207,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!intel_guc_is_submission_supported(guc))
+ if (!intel_guc_submission_is_used(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -217,7 +217,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -333,7 +333,7 @@ int intel_guc_init(struct intel_guc *guc)
ret = intel_uc_fw_init(&guc->fw);
if (ret)
- goto err_fetch;
+ goto out;
ret = intel_guc_log_create(&guc->log);
if (ret)
@@ -348,7 +348,7 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_ads;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -364,6 +364,8 @@ int intel_guc_init(struct intel_guc *guc)
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(gt->ggtt);
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
err_ct:
@@ -374,9 +376,8 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_fw:
intel_uc_fw_fini(&guc->fw);
-err_fetch:
- intel_uc_fw_cleanup_fetch(&guc->fw);
- DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
+out:
+ i915_probe_error(gt->i915, "failed with %d\n", ret);
return ret;
}
@@ -384,12 +385,12 @@ void intel_guc_fini(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- if (!intel_uc_fw_is_available(&guc->fw))
+ if (!intel_uc_fw_is_loadable(&guc->fw))
return;
i915_ggtt_disable_guc(gt->ggtt);
- if (intel_guc_is_submission_supported(guc))
+ if (intel_guc_submission_is_used(guc))
intel_guc_submission_fini(guc);
intel_guc_ct_fini(&guc->ct);
@@ -397,9 +398,6 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
- intel_uc_fw_cleanup_fetch(&guc->fw);
-
- intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
}
/*
@@ -544,7 +542,7 @@ int intel_guc_suspend(struct intel_guc *guc)
* If GuC communication is enabled but submission is not supported,
* we do not need to suspend the GuC.
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
/*
@@ -609,7 +607,7 @@ int intel_guc_resume(struct intel_guc *guc)
* we do not need to resume the GuC but we do need to enable the
* GuC communication on resume (above).
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -678,8 +676,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(vma))
goto err;
- flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- ret = i915_vma_pin(vma, 0, 0, flags);
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ ret = i915_ggtt_pin(vma, 0, flags);
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 910d49590068..4594ccbeaa34 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -39,7 +39,7 @@ struct intel_guc {
void (*disable)(struct intel_guc *guc);
} interrupts;
- bool submission_supported;
+ bool submission_selected;
struct i915_vma *ads_vma;
struct __guc_ads_blob *ads_blob;
@@ -143,29 +143,36 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
return intel_uc_fw_is_supported(&guc->fw);
}
-static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+static inline bool intel_guc_is_wanted(struct intel_guc *guc)
{
return intel_uc_fw_is_enabled(&guc->fw);
}
-static inline bool intel_guc_is_running(struct intel_guc *guc)
+static inline bool intel_guc_is_used(struct intel_guc *guc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&guc->fw);
+}
+
+static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
{
return intel_uc_fw_is_running(&guc->fw);
}
+static inline bool intel_guc_is_ready(struct intel_guc *guc)
+{
+ return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
+}
+
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
intel_uc_fw_sanitize(&guc->fw);
+ intel_guc_ct_sanitize(&guc->ct);
guc->mmio_msg = 0;
return 0;
}
-static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
-{
- return guc->submission_supported;
-}
-
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index c6f971a049f9..11742fca0e9e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -5,11 +5,15 @@
#include "i915_drv.h"
#include "intel_guc_ct.h"
+#include "gt/intel_gt.h"
+#define CT_ERROR(_ct, _fmt, ...) \
+ DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_I915_DEBUG_GUC
-#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
+#define CT_DEBUG(_ct, _fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
#else
-#define CT_DEBUG_DRIVER(...) do { } while (0)
+#define CT_DEBUG(...) do { } while (0)
#endif
struct ct_request {
@@ -48,6 +52,21 @@ static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
return container_of(ct, struct intel_guc, ct);
}
+static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
+{
+ return guc_to_gt(ct_to_guc(ct));
+}
+
+static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
+{
+ return ct_to_gt(ct)->i915;
+}
+
+static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
+{
+ return ct_to_i915(ct)->drm.dev;
+}
+
static inline const char *guc_ct_buffer_type_to_str(u32 type)
{
switch (type) {
@@ -63,7 +82,6 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
u32 cmds_addr, u32 size)
{
- CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
memset(desc, 0, sizeof(*desc));
desc->addr = cmds_addr;
desc->size = size;
@@ -72,8 +90,6 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
{
- CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
- desc, desc->head, desc->tail);
desc->head = 0;
desc->tail = 0;
desc->is_in_error = 0;
@@ -89,31 +105,40 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
sizeof(struct guc_ct_buffer_desc),
type
};
- int err;
/* Can't use generic send(), CT registration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
- if (err)
- DRM_ERROR("CT: register %s buffer failed; err=%d\n",
- guc_ct_buffer_type_to_str(type), err);
+ return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
+{
+ int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
+
+ if (unlikely(err))
+ CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
-static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
- u32 type)
+static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
CTB_OWNER_HOST,
type
};
- int err;
/* Can't use generic send(), CT deregistration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
- if (err)
- DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
- guc_ct_buffer_type_to_str(type), err);
+ return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
+{
+ int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
+
+ if (unlikely(err))
+ CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
@@ -157,13 +182,12 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
*/
err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
- if (err) {
- DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+ if (unlikely(err)) {
+ CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
return err;
}
- CT_DEBUG_DRIVER("CT: vma base=%#x\n",
- intel_guc_ggtt_offset(guc, ct->vma));
+ CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
/* store pointers to desc and cmds */
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
@@ -197,7 +221,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- u32 base;
+ u32 base, cmds, size;
int err;
int i;
@@ -212,23 +236,23 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
*/
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- guc_ct_buffer_desc_init(ct->ctbs[i].desc,
- base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
- PAGE_SIZE/4);
+ cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
+ size = PAGE_SIZE / 4;
+ CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
+ guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
}
- /* register buffers, starting wirh RECV buffer
- * descriptors are in first half of the blob
+ /*
+ * Register both CT buffers starting with RECV buffer.
+ * Descriptors are in first half of the blob.
*/
- err = guc_action_register_ct_buffer(guc,
- base + PAGE_SIZE/4 * CTB_RECV,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
goto err_out;
- err = guc_action_register_ct_buffer(guc,
- base + PAGE_SIZE/4 * CTB_SEND,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
if (unlikely(err))
goto err_deregister;
@@ -237,10 +261,9 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
return 0;
err_deregister:
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_out:
- DRM_ERROR("CT: can't open channel; err=%d\n", err);
+ CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
return err;
}
@@ -256,18 +279,16 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
ct->enabled = false;
- if (intel_guc_is_running(guc)) {
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ if (intel_guc_is_fw_running(guc)) {
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
}
}
static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
- return ++ct->requests.next_fence;
+ return ++ct->requests.last_fence;
}
/**
@@ -288,25 +309,33 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
* ^-----------------len-------------------^
*/
-static int ctb_write(struct intel_guc_ct_buffer *ctb,
- const u32 *action,
- u32 len /* in dwords */,
- u32 fence,
- bool want_response)
+static int ct_write(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len /* in dwords */,
+ u32 fence,
+ bool want_response)
{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
struct guc_ct_buffer_desc *desc = ctb->desc;
- u32 head = desc->head / 4; /* in dwords */
- u32 tail = desc->tail / 4; /* in dwords */
- u32 size = desc->size / 4; /* in dwords */
- u32 used; /* in dwords */
+ u32 head = desc->head;
+ u32 tail = desc->tail;
+ u32 size = desc->size;
+ u32 used;
u32 header;
u32 *cmds = ctb->cmds;
unsigned int i;
- GEM_BUG_ON(desc->size % 4);
- GEM_BUG_ON(desc->head % 4);
- GEM_BUG_ON(desc->tail % 4);
- GEM_BUG_ON(tail >= size);
+ if (unlikely(desc->is_in_error))
+ return -EPIPE;
+
+ if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ (tail | head) >= size))
+ goto corrupted;
+
+ /* later calculations will be done in dwords */
+ head /= 4;
+ tail /= 4;
+ size /= 4;
/*
* tail == head condition indicates empty. GuC FW does not support
@@ -332,9 +361,8 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
(want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
(action[0] << GUC_CT_MSG_ACTION_SHIFT);
- CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
- 4, &header, 4, &fence,
- 4 * (len - 1), &action[1]);
+ CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
+ 4, &header, 4, &fence, 4 * (len - 1), &action[1]);
cmds[tail] = header;
tail = (tail + 1) % size;
@@ -346,12 +374,17 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
cmds[tail] = action[i];
tail = (tail + 1) % size;
}
+ GEM_BUG_ON(tail > size);
/* now update desc tail (back in bytes) */
desc->tail = tail * 4;
- GEM_BUG_ON(desc->tail > desc->size);
-
return 0;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+ desc->addr, desc->head, desc->tail, desc->size);
+ desc->is_in_error = 1;
+ return -EPIPE;
}
/**
@@ -469,7 +502,7 @@ static int ct_send(struct intel_guc_ct *ct,
list_add_tail(&request.link, &ct->requests.pending);
spin_unlock_irqrestore(&ct->requests.lock, flags);
- err = ctb_write(ctb, action, len, fence, !!response_buf);
+ err = ct_write(ct, action, len, fence, !!response_buf);
if (unlikely(err))
goto unlink;
@@ -526,11 +559,11 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
- action[0], ret, status);
+ CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
+ action[0], ret, status);
} else if (unlikely(ret)) {
- CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
- action[0], ret, ret);
+ CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
+ action[0], ret, ret);
}
mutex_unlock(&guc->send_mutex);
@@ -552,22 +585,29 @@ static inline bool ct_header_is_response(u32 header)
return !!(header & GUC_CT_MSG_IS_RESPONSE);
}
-static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
+static int ct_read(struct intel_guc_ct *ct, u32 *data)
{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
struct guc_ct_buffer_desc *desc = ctb->desc;
- u32 head = desc->head / 4; /* in dwords */
- u32 tail = desc->tail / 4; /* in dwords */
- u32 size = desc->size / 4; /* in dwords */
+ u32 head = desc->head;
+ u32 tail = desc->tail;
+ u32 size = desc->size;
u32 *cmds = ctb->cmds;
- s32 available; /* in dwords */
+ s32 available;
unsigned int len;
unsigned int i;
- GEM_BUG_ON(desc->size % 4);
- GEM_BUG_ON(desc->head % 4);
- GEM_BUG_ON(desc->tail % 4);
- GEM_BUG_ON(tail >= size);
- GEM_BUG_ON(head >= size);
+ if (unlikely(desc->is_in_error))
+ return -EPIPE;
+
+ if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ (tail | head) >= size))
+ goto corrupted;
+
+ /* later calculations will be done in dwords */
+ head /= 4;
+ tail /= 4;
+ size /= 4;
/* tail == head condition indicates empty */
available = tail - head;
@@ -577,7 +617,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
/* beware of buffer wrap case */
if (unlikely(available < 0))
available += size;
- CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
+ CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
GEM_BUG_ON(available < 0);
data[0] = cmds[head];
@@ -586,23 +626,29 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
/* message len with header */
len = ct_header_get_len(data[0]) + 1;
if (unlikely(len > (u32)available)) {
- DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
- 4, data,
- 4 * (head + available - 1 > size ?
- size - head : available - 1), &cmds[head],
- 4 * (head + available - 1 > size ?
- available - 1 - size + head : 0), &cmds[0]);
- return -EPROTO;
+ CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
+ 4, data,
+ 4 * (head + available - 1 > size ?
+ size - head : available - 1), &cmds[head],
+ 4 * (head + available - 1 > size ?
+ available - 1 - size + head : 0), &cmds[0]);
+ goto corrupted;
}
for (i = 1; i < len; i++) {
data[i] = cmds[head];
head = (head + 1) % size;
}
- CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
+ CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
desc->head = head * 4;
return 0;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+ desc->addr, desc->head, desc->tail, desc->size);
+ desc->is_in_error = 1;
+ return -EPIPE;
}
/**
@@ -627,7 +673,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
{
u32 header = msg[0];
u32 len = ct_header_get_len(header);
- u32 msglen = len + 1; /* total message length including header */
+ u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
u32 fence;
u32 status;
u32 datalen;
@@ -639,7 +685,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
/* Response payload shall at least include fence and status */
if (unlikely(len < 2)) {
- DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
return -EPROTO;
}
@@ -649,22 +695,22 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
/* Format of the status follows RESPONSE message */
if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
- DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
return -EPROTO;
}
- CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
+ CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
spin_lock(&ct->requests.lock);
list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
- CT_DEBUG_DRIVER("CT: request %u awaits response\n",
- req->fence);
+ CT_DEBUG(ct, "request %u awaits response\n",
+ req->fence);
continue;
}
if (unlikely(datalen > req->response_len)) {
- DRM_ERROR("CT: response %u too long %*ph\n",
- req->fence, 4 * msglen, msg);
+ CT_ERROR(ct, "Response for %u is too long %*ph\n",
+ req->fence, msgsize, msg);
datalen = 0;
}
if (datalen)
@@ -677,7 +723,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
spin_unlock(&ct->requests.lock);
if (!found)
- DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
return 0;
}
@@ -687,7 +733,7 @@ static void ct_process_request(struct intel_guc_ct *ct,
struct intel_guc *guc = ct_to_guc(ct);
int ret;
- CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
+ CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
@@ -698,8 +744,8 @@ static void ct_process_request(struct intel_guc_ct *ct,
default:
fail_unexpected:
- DRM_ERROR("CT: unexpected request %x %*ph\n",
- action, 4 * len, payload);
+ CT_ERROR(ct, "Unexpected request %x %*ph\n",
+ action, 4 * len, payload);
break;
}
}
@@ -767,18 +813,18 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
{
u32 header = msg[0];
u32 len = ct_header_get_len(header);
- u32 msglen = len + 1; /* total message length including header */
+ u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
struct ct_incoming_request *request;
unsigned long flags;
GEM_BUG_ON(ct_header_is_response(header));
- request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
+ request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
if (unlikely(!request)) {
- DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
return 0; /* XXX: -ENOMEM ? */
}
- memcpy(request->msg, msg, 4 * msglen);
+ memcpy(request->msg, msg, msgsize);
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
@@ -794,7 +840,6 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
*/
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
@@ -804,7 +849,7 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
}
do {
- err = ctb_read(ctb, msg);
+ err = ct_read(ct, msg);
if (err)
break;
@@ -813,10 +858,4 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
else
err = ct_handle_request(ct, msg);
} while (!err);
-
- if (GEM_WARN_ON(err == -EPROTO)) {
- DRM_ERROR("CT: corrupted message detected!\n");
- ctb->desc->is_in_error = 1;
- }
}
-
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 3e7fe237cfa5..494a51a5200f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -49,7 +49,7 @@ struct intel_guc_ct {
struct intel_guc_ct_buffer ctbs[2];
struct {
- u32 next_fence; /* fence to be used with next request to send */
+ u32 last_fence; /* last fence used to send request */
spinlock_t lock; /* protects pending requests list */
struct list_head pending; /* requests waiting for response */
@@ -65,6 +65,11 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
+static inline void intel_guc_ct_sanitize(struct intel_guc_ct *ct)
+{
+ ct->enabled = false;
+}
+
static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
{
return ct->enabled;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9e42324fdecd..aa6d56e25a10 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc,
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
+ u32 ctx_desc = rq->context->lrc.ccid;
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
guc_wq_item_append(guc, engine->guc_id, ctx_desc,
@@ -456,9 +456,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -660,12 +658,9 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_proc_desc_fini(guc);
}
-static bool __guc_submission_support(struct intel_guc *guc)
+static bool __guc_submission_selected(struct intel_guc *guc)
{
- /* XXX: GuC submission is unavailable for now */
- return false;
-
- if (!intel_guc_is_supported(guc))
+ if (!intel_guc_submission_is_supported(guc))
return false;
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
@@ -673,7 +668,7 @@ static bool __guc_submission_support(struct intel_guc *guc)
void intel_guc_submission_init_early(struct intel_guc *guc)
{
- guc->submission_supported = __guc_submission_support(guc);
+ guc->submission_selected = __guc_submission_selected(guc);
}
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index e402a2932592..4cf9d3e50263 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -8,7 +8,8 @@
#include <linux/types.h>
-struct intel_guc;
+#include "intel_guc.h"
+
struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
@@ -20,4 +21,20 @@ int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
+static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
+{
+ /* XXX: GuC submission is unavailable for now */
+ return false;
+}
+
+static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc)
+{
+ return guc->submission_selected;
+}
+
+static inline bool intel_guc_submission_is_used(struct intel_guc *guc)
+{
+ return intel_guc_is_used(guc) && intel_guc_submission_is_wanted(guc);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 32a069841c14..a74b65694512 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -121,19 +121,20 @@ int intel_huc_init(struct intel_huc *huc)
if (err)
goto out_fini;
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
out_fini:
intel_uc_fw_fini(&huc->fw);
out:
- intel_uc_fw_cleanup_fetch(&huc->fw);
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
+ i915_probe_error(i915, "failed with %d\n", err);
return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
- if (!intel_uc_fw_is_available(&huc->fw))
+ if (!intel_uc_fw_is_loadable(&huc->fw))
return;
intel_huc_rsa_data_destroy(huc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 644c059fe01d..a40b9cfc6c22 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -41,11 +41,17 @@ static inline bool intel_huc_is_supported(struct intel_huc *huc)
return intel_uc_fw_is_supported(&huc->fw);
}
-static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+static inline bool intel_huc_is_wanted(struct intel_huc *huc)
{
return intel_uc_fw_is_enabled(&huc->fw);
}
+static inline bool intel_huc_is_used(struct intel_huc *huc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&huc->fw);
+}
+
static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
{
return intel_uc_fw_is_running(&huc->fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index eee193bf2cc4..9cdf4cbe691c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -20,7 +20,7 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
struct drm_i915_private *i915 = gt->i915;
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
- intel_uc_uses_guc(uc),
+ intel_uc_wants_guc(uc),
INTEL_INFO(i915)->platform, INTEL_REVID(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 64934a876a50..a4cbe06e06bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -48,17 +48,17 @@ static void __confirm_options(struct intel_uc *uc)
DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
"enable_guc=%d (guc:%s submission:%s huc:%s)\n",
i915_modparams.enable_guc,
- yesno(intel_uc_uses_guc(uc)),
- yesno(intel_uc_uses_guc_submission(uc)),
- yesno(intel_uc_uses_huc(uc)));
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_wants_huc(uc)));
if (i915_modparams.enable_guc == -1)
return;
if (i915_modparams.enable_guc == 0) {
- GEM_BUG_ON(intel_uc_uses_guc(uc));
- GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
- GEM_BUG_ON(intel_uc_uses_huc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_wants_huc(uc));
return;
}
@@ -93,7 +93,7 @@ void intel_uc_init_early(struct intel_uc *uc)
__confirm_options(uc);
- if (intel_uc_uses_guc(uc))
+ if (intel_uc_wants_guc(uc))
uc->ops = &uc_ops_on;
else
uc->ops = &uc_ops_off;
@@ -257,13 +257,13 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
{
int err;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
if (err)
return;
- if (intel_uc_uses_huc(uc))
+ if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
}
@@ -273,25 +273,38 @@ static void __uc_cleanup_firmwares(struct intel_uc *uc)
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
-static void __uc_init(struct intel_uc *uc)
+static int __uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+ if (!intel_uc_uses_guc(uc))
+ return 0;
+
+ if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
+ return -ENOMEM;
/* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
ret = intel_guc_init(guc);
- if (ret) {
- intel_uc_fw_cleanup_fetch(&huc->fw);
- return;
+ if (ret)
+ return ret;
+
+ if (intel_uc_uses_huc(uc)) {
+ ret = intel_huc_init(huc);
+ if (ret)
+ goto out_guc;
}
- if (intel_uc_uses_huc(uc))
- intel_huc_init(huc);
+ return 0;
+
+out_guc:
+ intel_guc_fini(guc);
+ return ret;
}
static void __uc_fini(struct intel_uc *uc)
@@ -402,12 +415,12 @@ static int __uc_init_hw(struct intel_uc *uc)
int ret, attempts;
GEM_BUG_ON(!intel_uc_supports_guc(uc));
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
- if (!intel_uc_fw_is_available(&guc->fw)) {
+ if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
- intel_uc_supports_guc_submission(uc) ?
+ intel_uc_wants_guc_submission(uc) ?
intel_uc_fw_status_to_error(guc->fw.status) : 0;
goto err_out;
}
@@ -459,14 +472,14 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_communication;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found,
"submission",
- enableddisabled(intel_uc_supports_guc_submission(uc)));
+ enableddisabled(intel_uc_uses_guc_submission(uc)));
if (intel_uc_uses_huc(uc)) {
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
@@ -505,10 +518,10 @@ static void __uc_fini_hw(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_fw_running(guc))
return;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_disable(guc);
if (guc_communication_enabled(guc))
@@ -527,7 +540,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
guc_disable_communication(guc);
@@ -539,7 +552,7 @@ void intel_uc_runtime_suspend(struct intel_uc *uc)
struct intel_guc *guc = &uc->guc;
int err;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
err = intel_guc_suspend(guc);
@@ -554,7 +567,7 @@ void intel_uc_suspend(struct intel_uc *uc)
struct intel_guc *guc = &uc->guc;
intel_wakeref_t wakeref;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
@@ -566,7 +579,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
struct intel_guc *guc = &uc->guc;
int err;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_fw_running(guc))
return 0;
/* Make sure we enable communication if and only if it's disabled */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 49c913524686..5ae7b50b7dc1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -7,6 +7,7 @@
#define _INTEL_UC_H_
#include "intel_guc.h"
+#include "intel_guc_submission.h"
#include "intel_huc.h"
#include "i915_params.h"
@@ -16,7 +17,7 @@ struct intel_uc_ops {
int (*sanitize)(struct intel_uc *uc);
void (*init_fw)(struct intel_uc *uc);
void (*fini_fw)(struct intel_uc *uc);
- void (*init)(struct intel_uc *uc);
+ int (*init)(struct intel_uc *uc);
void (*fini)(struct intel_uc *uc);
int (*init_hw)(struct intel_uc *uc);
void (*fini_hw)(struct intel_uc *uc);
@@ -40,35 +41,44 @@ void intel_uc_runtime_suspend(struct intel_uc *uc);
int intel_uc_resume(struct intel_uc *uc);
int intel_uc_runtime_resume(struct intel_uc *uc);
-static inline bool intel_uc_supports_guc(struct intel_uc *uc)
-{
- return intel_guc_is_supported(&uc->guc);
-}
-
-static inline bool intel_uc_uses_guc(struct intel_uc *uc)
-{
- return intel_guc_is_enabled(&uc->guc);
-}
+/*
+ * We need to know as early as possible if we're going to use GuC or not to
+ * take the correct setup paths. Additionally, once we've started loading the
+ * GuC, it is unsafe to keep executing without it because some parts of the HW,
+ * a subset of which is not cleaned on GT reset, will start expecting the GuC FW
+ * to be running.
+ * To solve both these requirements, we commit to using the microcontrollers if
+ * the relevant modparam is set and the blobs are found on the system. At this
+ * stage, the only thing that can stop us from attempting to load the blobs on
+ * the HW and use them is a fundamental issue (e.g. no memory for our
+ * structures); if we hit such a problem during driver load we're broken even
+ * without GuC, so there is no point in trying to fall back.
+ *
+ * Given the above, we can be in one of 4 states, with the last one implying
+ * we're committed to using the microcontroller:
+ * - Not supported: not available in HW and/or firmware not defined.
+ * - Supported: available in HW and firmware defined.
+ * - Wanted: supported + enabled in modparam.
+ * - In use: wanted + firmware found on the system and successfully fetched.
+ */
-static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
+#define __uc_state_checker(x, func, state, required) \
+static inline bool intel_uc_##state##_##func(struct intel_uc *uc) \
+{ \
+ return intel_##func##_is_##required(&uc->x); \
}
-static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
-}
+#define uc_state_checkers(x, func) \
+__uc_state_checker(x, func, supports, supported) \
+__uc_state_checker(x, func, wants, wanted) \
+__uc_state_checker(x, func, uses, used)
-static inline bool intel_uc_supports_huc(struct intel_uc *uc)
-{
- return intel_uc_supports_guc(uc);
-}
+uc_state_checkers(guc, guc);
+uc_state_checkers(huc, huc);
+uc_state_checkers(guc, guc_submission);
-static inline bool intel_uc_uses_huc(struct intel_uc *uc)
-{
- return intel_huc_is_enabled(&uc->huc);
-}
+#undef uc_state_checkers
+#undef __uc_state_checker
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
@@ -80,7 +90,7 @@ static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
intel_uc_ops_function(sanitize, sanitize, int, 0);
intel_uc_ops_function(fetch_firmwares, init_fw, void, );
intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
-intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(init, init, int, 0);
intel_uc_ops_function(fini, fini, void, );
intel_uc_ops_function(init_hw, init_hw, int, 0);
intel_uc_ops_function(fini_hw, fini_hw, void, );
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 8ee0a0c7f447..18c755203688 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -43,7 +43,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
@@ -279,7 +279,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
err = i915_inject_probe_error(i915, -ENXIO);
if (err)
- return err;
+ goto fail;
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
@@ -501,7 +501,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
if (err)
return err;
- if (!intel_uc_fw_is_available(uc_fw))
+ if (!intel_uc_fw_is_loadable(uc_fw))
return -ENOEXEC;
/* Call custom loader */
@@ -544,7 +544,10 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
- intel_uc_fw_cleanup_fetch(uc_fw);
+ if (i915_gem_object_has_pinned_pages(uc_fw->obj))
+ i915_gem_object_unpin_pages(uc_fw->obj);
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 1f30543d0d2d..888ff0de0244 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -29,8 +29,11 @@ struct intel_gt;
* | | SELECTED |
* +------------+- / | \ -+
* | | MISSING <--/ | \--> ERROR |
- * | fetch | | |
- * | | /------> AVAILABLE <---<-----------\ |
+ * | fetch | V |
+ * | | AVAILABLE |
+ * +------------+- | -+
+ * | init | V |
+ * | | /------> LOADABLE <----<-----------\ |
* +------------+- \ / \ \ \ -+
* | | FAIL <--< \--> TRANSFERRED \ |
* | upload | \ / \ / |
@@ -46,6 +49,7 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
@@ -115,6 +119,8 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_LOADABLE:
+ return "LOADABLE";
case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
@@ -143,6 +149,7 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
case INTEL_UC_FIRMWARE_SELECTED:
return -ESTALE;
case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_LOADABLE:
case INTEL_UC_FIRMWARE_TRANSFERRED:
case INTEL_UC_FIRMWARE_RUNNING:
return 0;
@@ -184,6 +191,11 @@ static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
}
+static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE;
+}
+
static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
{
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
@@ -202,7 +214,7 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
if (intel_uc_fw_is_loaded(uc_fw))
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE);
}
static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 771420453f82..8b13f091cee2 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->ggtt.vm.mutex);
- mmio_hw_access_pre(dev_priv);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
+ mutex_lock(&gt->ggtt->vm.mutex);
+ mmio_hw_access_pre(gt);
+ ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mmio_hw_access_post(gt);
+ mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
int ret;
ret = alloc_gm(vgpu, false);
@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
return ret;
}
static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gt *gt = gvt->gt;
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
}
/**
@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(uncore->rpm);
- if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
- if (WARN_ON(!reg))
+ if (drm_WARN_ON(&i915->drm, !reg))
return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
- I915_WRITE(fence_reg_lo, 0);
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_lo, 0);
+ intel_uncore_posting_read(uncore, fence_reg_lo);
- I915_WRITE(fence_reg_hi, upper_32_bits(value));
- I915_WRITE(fence_reg_lo, lower_32_bits(value));
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
+ intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
+ intel_uncore_posting_read(uncore, fence_reg_lo);
}
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
u32 i;
- if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
int i;
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(&dev_priv->ggtt);
+ reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
+
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
@@ -220,8 +224,8 @@ out_free_fence:
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC;
}
@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- _clear_vgpu_fence(vgpu);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
+ _clear_vgpu_fence(vgpu);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 19cf1bbe059d..072725a448db 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -106,10 +106,13 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 4))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -297,34 +300,36 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;
- if (WARN_ON(bytes > 4))
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
- if (WARN_ON(bytes > 2))
+ if (drm_WARN_ON(&i915->drm, bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_ROM_ADDRESS:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
@@ -332,7 +337,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
break;
case INTEL_GVT_PCI_OPREGION:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_opregion_base_write_handler(vgpu,
*(u32 *)p_data);
@@ -391,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21a176cd8acc..a3cc080a46c6 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -164,6 +164,7 @@ struct decode_info {
#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03)
#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
@@ -462,7 +463,7 @@ enum {
struct parser_exec_state {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
int buf_type;
@@ -635,39 +636,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
},
};
-static inline u32 get_opcode(u32 cmd, int ring_id)
+static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
return cmd >> (32 - d_info->op_len);
}
-static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
- unsigned int opcode, int ring_id)
+static inline const struct cmd_info *
+find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
+ const struct intel_engine_cs *engine)
{
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
+ if (opcode == e->info->opcode &&
+ e->info->rings & engine->mask)
return e->info;
}
return NULL;
}
-static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
- u32 cmd, int ring_id)
+static inline const struct cmd_info *
+get_cmd_info(struct intel_gvt *gvt, u32 cmd,
+ const struct intel_engine_cs *engine)
{
u32 opcode;
- opcode = get_opcode(cmd, ring_id);
+ opcode = get_opcode(cmd, engine);
if (opcode == INVALID_OP)
return NULL;
- return find_cmd_entry(gvt, opcode, ring_id);
+ return find_cmd_entry(gvt, opcode, engine);
}
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
@@ -675,12 +679,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
}
-static inline void print_opcode(u32 cmd, int ring_id)
+static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
int i;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -709,10 +713,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
- " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
- s->ring_id, s->ring_start, s->ring_start + s->ring_size,
- s->ring_head, s->ring_tail);
+ gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n",
+ s->vgpu->id, s->engine->name,
+ s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
@@ -729,7 +734,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
- print_opcode(cmd_val(s, 0), s->ring_id);
+ print_opcode(cmd_val(s, 0), s->engine);
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
@@ -840,7 +845,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int data;
u32 ring_base;
u32 nopid;
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (!strcmp(cmd, "lri"))
data = cmd_val(s, index + 1);
@@ -850,7 +854,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return -EINVAL;
}
- ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+ ring_base = s->engine->mmio_base;
nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -926,9 +930,9 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values
*/
- if (IS_GEN(gvt->dev_priv, 9) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
- !strncmp(cmd, "lri", 3)) {
+ if (IS_GEN(s->engine->i915, 9) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
@@ -964,23 +968,10 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- struct intel_gvt *gvt = s->vgpu->gvt;
- u32 valid_len = CMD_LEN(1);
-
- /*
- * Official intel docs are somewhat sloppy , check the definition of
- * MI_LOAD_REGISTER_IMM.
- */
- #define MAX_VALID_LEN 127
- if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
- gvt_err("len is not valid: len=%u valid_len=%u\n",
- cmd_len, valid_len);
- return -EFAULT;
- }
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
- if (s->ring_id == BCS0 &&
+ if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
+ if (s->engine->id == BCS0 &&
cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
@@ -1001,9 +992,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= ((cmd_reg_inhibit(s, i) ||
- (cmd_reg_inhibit(s, i + 1)))) ?
+ (cmd_reg_inhibit(s, i + 1)))) ?
-EBADRQC : 0;
if (ret)
break;
@@ -1029,7 +1020,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
- if (IS_BROADWELL(gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
@@ -1141,7 +1132,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21));
@@ -1155,15 +1146,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
- set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
+ s->workload->pending_events);
return 0;
}
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
- set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
+ s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
@@ -1213,7 +1204,7 @@ struct plane_code_mapping {
static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1230,7 +1221,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
dword2 = cmd_val(s, 2);
v = (dword0 & GENMASK(21, 19)) >> 19;
- if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
@@ -1250,7 +1241,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->stride_reg = SPRSTRIDE(info->pipe);
info->surf_reg = SPRSURF(info->pipe);
} else {
- WARN_ON(1);
+ drm_WARN_ON(&dev_priv->drm, 1);
return -EBADRQC;
}
return 0;
@@ -1259,7 +1250,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1318,13 +1309,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile;
if (!info->async_flip)
return 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1347,7 +1337,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1378,11 +1368,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1667,7 +1655,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21));
@@ -1676,8 +1664,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
- set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
+ s->workload->pending_events);
return ret;
}
@@ -1725,12 +1713,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
/* Decide privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
- !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
+ if (cmd_val(s, 0) & BIT(8) &&
+ !(s->vgpu->scan_nonprivbb & s->engine->mask))
return 0;
+
return 1;
}
+static const char *repr_addr_type(unsigned int type)
+{
+ return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
+}
+
static int find_bb_size(struct parser_exec_state *s,
unsigned long *bb_size,
unsigned long *bb_end_cmd_offset)
@@ -1753,24 +1747,24 @@ static int find_bb_size(struct parser_exec_state *s,
return -EFAULT;
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
do {
if (copy_gma_to_hva(s->vgpu, mm,
- gma, gma + 4, &cmd) < 0)
+ gma, gma + 4, &cmd) < 0)
return -EFAULT;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1799,12 +1793,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va)
u32 cmd = *(u32 *)va;
const struct cmd_info *info;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1857,7 +1851,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (bb->ppgtt)
start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+ bb->obj = i915_gem_object_create_shmem(s->engine->i915,
round_up(bb_size + start_offset,
PAGE_SIZE));
if (IS_ERR(bb->obj)) {
@@ -2480,6 +2474,9 @@ static const struct cmd_info cmd_info[] = {
{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(1), 8, NULL},
+ {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS,
+ F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL},
+
{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
@@ -2666,25 +2663,25 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (cmd == MI_NOOP)
info = &cmd_info[mi_noop_index];
else
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
s->info = info;
- trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+ trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
ret = gvt_check_valid_cmd_length(cmd_length(s),
- info->valid_len);
+ info->valid_len);
if (ret)
return ret;
}
@@ -2781,7 +2778,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
@@ -2790,8 +2787,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.workload = workload;
s.is_ctx_wa = false;
- if ((bypass_scan_mask & (1 << workload->ring_id)) ||
- gma_head == gma_tail)
+ if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
return 0;
ret = ip_gma_set(&s, gma_head);
@@ -2830,7 +2826,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
@@ -2855,7 +2851,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
- int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2868,21 +2863,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
void *p;
/* realloc the new ring buffer if needed */
- p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
- GFP_KERNEL);
+ p = krealloc(s->ring_scan_buffer[workload->engine->id],
+ workload->rb_len, GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- s->ring_scan_buffer[ring_id] = p;
- s->ring_scan_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[workload->engine->id] = p;
+ s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
}
- shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
@@ -2940,7 +2935,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0;
void *map;
- obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create_shmem(workload->engine->i915,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -3029,30 +3024,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, unsigned long rings)
-{
- const struct cmd_info *info = NULL;
- unsigned int ring;
-
- for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
- info = find_cmd_entry(gvt, opcode, ring);
- if (info)
- break;
- }
- return info;
-}
-
static int init_cmd_table(struct intel_gvt *gvt)
{
+ unsigned int gen_type = intel_gvt_get_device_type(gvt);
int i;
- struct cmd_entry *e;
- const struct cmd_info *info;
- unsigned int gen_type;
-
- gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ struct cmd_entry *e;
+
if (!(cmd_info[i].devices & gen_type))
continue;
@@ -3061,23 +3040,16 @@ static int init_cmd_table(struct intel_gvt *gvt)
return -ENOMEM;
e->info = &cmd_info[i];
- info = find_cmd_entry_any_ring(gvt,
- e->info->opcode, e->info->rings);
- if (info) {
- gvt_err("%s %s duplicated\n", e->info->name,
- info->name);
- kfree(e);
- return -EEXIST;
- }
if (cmd_info[i].opcode == OP_MI_NOOP)
mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
- e->info->name, e->info->opcode, e->info->flag,
- e->info->devices, e->info->rings);
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 285f6011a537..ec47d4114554 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;
- preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
+ preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->dev_priv);
+ mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->dev_priv);
+ mmio_hw_access_post(gvt->gt);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
@@ -128,6 +127,7 @@ static int
vgpu_scan_nonprivbb_get(void *data, u64 *val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
*val = vgpu->scan_nonprivbb;
return 0;
}
@@ -142,42 +142,7 @@ static int
vgpu_scan_nonprivbb_set(void *data, u64 val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- enum intel_engine_id id;
- char buf[128], *s;
- int len;
-
- val &= (1 << I915_NUM_ENGINES) - 1;
-
- if (vgpu->scan_nonprivbb == val)
- return 0;
-
- if (!val)
- goto done;
-
- len = sprintf(buf,
- "gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
- vgpu->id);
-
- s = buf + len;
-
- for (id = 0; id < I915_NUM_ENGINES; id++) {
- struct intel_engine_cs *engine;
-
- engine = dev_priv->engine[id];
- if (engine && (val & (1 << id))) {
- len = snprintf(s, 4, "%d, ", engine->id);
- s += len;
- } else
- val &= ~(1 << id);
- }
-
- if (val)
- sprintf(s, "low performance expected.");
-
- pr_warn("%s\n", buf);
-done:
vgpu->scan_nonprivbb = val;
return 0;
}
@@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->dev_priv->drm.primary;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a62bdf9be682..a1696e9ce4b6 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
@@ -69,9 +69,10 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
@@ -168,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -207,20 +208,47 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
- vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
- LCPLL_PLL_ENABLE |
- LCPLL_PLL_LOCK;
- vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
-
+ /*
+ * Only 1 PIPE enabled in current vGPU display and PIPE_A is
+ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
+ * TRANSCODER_A can be enabled. PORT_x depends on the input of
+ * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
+ * so we fixed to DPLL0 here.
+ * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
+ */
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) =
+ DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
+ DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, LCPLL1_CTL) =
+ LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
+ vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
+ /*
+ * Golden M/N are calculated based on:
+ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
+ * DP link clk 1620 MHz and non-constant_n.
+ * TODO: calculate DP link symbol clk and stream clk m/n.
+ */
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -235,12 +263,18 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -255,12 +289,18 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -319,9 +359,10 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
- if (WARN_ON(resolution >= GVT_EDID_NUM))
+ if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
return -EINVAL;
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
@@ -389,7 +430,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -421,7 +462,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
int pipe;
mutex_lock(&vgpu->vgpu_lock);
- for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ for_each_pipe(vgpu->gvt->gt->i915, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
@@ -454,11 +495,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
*/
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv)) {
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
@@ -484,7 +525,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
@@ -506,7 +547,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
intel_vgpu_init_i2c_edid(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index ae139f0877ae..37fc460414a8 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -67,11 +67,11 @@ static int vgpu_gem_get_pages(
u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
- if (WARN_ON(!fb_info))
+ if (drm_WARN_ON(&dev_priv->drm, !fb_info))
return -ENODEV;
vgpu = fb_info->obj->vgpu;
- if (WARN_ON(!vgpu))
+ if (drm_WARN_ON(&dev_priv->drm, !vgpu))
return -ENODEV;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info;
@@ -523,7 +523,7 @@ out:
/* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 1fe6124918f1..190651df5db1 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- if (IS_BROXTON(dev_priv))
+ if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select);
- else if (IS_COFFEELAKE(dev_priv))
+ else if (IS_COFFEELAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
- if (WARN_ON(port < 0))
+ if (drm_WARN_ON(&i915->drm, port < 0))
return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS;
@@ -276,7 +276,9 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- WARN_ON(1);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ drm_WARN_ON(&i915->drm, 1);
return 0;
}
@@ -371,7 +373,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
@@ -399,7 +403,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
@@ -473,6 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
unsigned int offset,
void *p_data)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size;
int msg, addr, ctrl, op;
@@ -532,9 +539,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* support the gfx driver to do EDID access.
*/
} else {
- if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ))
return;
- if (WARN_ON(msg_length != 4))
+ if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->slave_selected) {
unsigned char val = edid_get_byte(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index d6e7a1189bad..dd25c3024370 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -39,8 +39,7 @@
#define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0
-#define execlist_ring_mmio(gvt, ring_id, offset) \
- (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
@@ -54,12 +53,12 @@ static int context_switch_events[] = {
[VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(unsigned int ring_id)
+static int to_context_switch_event(const struct intel_engine_cs *engine)
{
- if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
- return context_switch_events[ring_id];
+ return context_switch_events[engine->id];
}
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt,
- ring_id, _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
}
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
- struct execlist_context_status_format *status,
- bool trigger_interrupt_later)
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
unsigned long hwsp_gpa;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
- ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_BUF);
+ ctx_status_ptr_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
/* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- vgpu->hws_pga[ring_id]);
+ vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
- write_pointer * 8,
- status, 8);
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+ status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa +
- intel_hws_csb_write_index(dev_priv) * 4,
- &write_pointer, 4);
+ hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+ &write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
- vgpu->id, write_pointer, offset, status->ldw, status->udw);
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later)
return;
intel_vgpu_trigger_virtual_event(vgpu,
- ring_id_to_context_switch_event(execlist->ring_id));
+ to_context_switch_event(execlist->engine));
}
static int emulate_execlist_ctx_schedule_out(
@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg);
@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
int ret;
if (!workload->emulate_schedule_in)
@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+ ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
+ ctx);
if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
return ret;
@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist =
+ &s->execlist[workload->engine->id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
bool lite_restore = false;
int ret = 0;
- gvt_dbg_el("complete workload %p status %d\n", workload,
- workload->status);
+ gvt_dbg_el("complete workload %p status %d\n",
+ workload, workload->status);
- if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
+ if (workload->status || vgpu->resetting_eng & workload->engine->mask)
goto out;
- if (!list_empty(workload_q_head(vgpu, ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, workload->engine))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
@@ -436,14 +429,15 @@ out:
return ret;
}
-static int submit_context(struct intel_vgpu *vgpu, int ring_id,
- struct execlist_ctx_descriptor_format *desc,
- bool emulate_schedule_in)
+static int submit_context(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
- workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+ workload = intel_vgpu_create_workload(vgpu, engine, desc);
if (IS_ERR(workload))
return PTR_ERR(workload);
@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->emulate_schedule_in = emulate_schedule_in;
if (emulate_schedule_in)
- workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
+ workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
- emulate_schedule_in);
+ emulate_schedule_in);
intel_vgpu_queue_workload(workload);
return 0;
}
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i]->valid)
continue;
- ret = submit_context(vgpu, ring_id, desc[i], i == 0);
+ ret = submit_context(vgpu, engine, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@@ -504,22 +499,22 @@ inv_desc:
return -EINVAL;
}
-static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+static void init_vgpu_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu;
- execlist->ring_id = ring_id;
+ execlist->engine = engine;
execlist->slot[0].index = 0;
execlist->slot[1].index = 1;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
+ ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
@@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
@@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
- init_vgpu_execlist(vgpu, engine->id);
+ init_vgpu_execlist(vgpu, engine);
}
static int init_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5c0c1fd30c83..d62cd14605a3 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -170,16 +170,17 @@ struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context;
- int ring_id;
struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords;
+ const struct intel_engine_cs *engine;
};
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 8bb292b01271..0889ad8291b0 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu)
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, fmt;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode)
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index b0c1fda32977..990a181094e3 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
-
- *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+ *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
_MMIO(offset));
return 0;
}
@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
void *firmware;
void *p;
@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
@@ -208,8 +205,7 @@ invalid_firmware:
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path);
- ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
kfree(path);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4a4828074cb7..2a4b23f8aa74 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -71,8 +71,10 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
/* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{
- if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
- "invalid guest gmadr %llx\n", g_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
return -EACCES;
if (vgpu_gmadr_is_aperture(vgpu, g_addr))
@@ -87,8 +89,10 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
/* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{
- if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
- "invalid host gmadr %llx\n", h_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
return -EACCES;
if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
@@ -275,24 +279,23 @@ static inline int get_pse_type(int type)
return gtt_type_table[type].pse_entry_type;
}
-static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
return readq(addr);
}
-static void ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(dev_priv);
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_pre(gt);
+ intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ mmio_hw_access_post(gt);
}
-static void write_pte64(struct drm_i915_private *dev_priv,
- unsigned long index, u64 pte)
+static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
writeq(pte, addr);
}
@@ -315,7 +318,7 @@ static inline int gtt_get_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
} else {
e->val64 = *((u64 *)pt + index);
}
@@ -340,7 +343,7 @@ static inline int gtt_set_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
} else {
*((u64 *)pt + index) = e->val64;
}
@@ -734,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
@@ -819,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr;
int ret;
@@ -940,6 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
enum intel_gvt_gtt_type cur_pt_type;
@@ -952,7 +956,9 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (!gtt_type_is_pt(cur_pt_type) ||
!gtt_type_is_pt(cur_pt_type + 1)) {
- WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ drm_WARN(&i915->drm, 1,
+ "Invalid page table type, cur_pt_type is: %d\n",
+ cur_pt_type);
return -EINVAL;
}
@@ -1044,7 +1050,7 @@ fail:
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
@@ -1153,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn;
- if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
@@ -2314,7 +2320,7 @@ out:
ggtt_invalidate_pte(vgpu, &e);
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate(gvt->dev_priv);
+ ggtt_invalidate(gvt->gt);
return 0;
}
@@ -2347,16 +2353,18 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
enum intel_gvt_gtt_type type)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
- if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ if (drm_WARN_ON(&i915->drm,
+ type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
@@ -2410,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2682,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2731,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
@@ -2779,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry old_entry;
@@ -2809,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
}
- ggtt_invalidate(dev_priv);
+ ggtt_invalidate(gvt->gt);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 8f37eefa0a02..9e1787867894 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -35,6 +35,7 @@
#include <linux/kthread.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
@@ -49,15 +50,15 @@ static const char * const supported_hypervisors[] = {
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name)
{
+ const char *driver_name =
+ dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
int i;
- struct intel_vgpu_type *t;
- const char *driver_name = dev_driver_string(
- &gvt->dev_priv->drm.pdev->dev);
+ name += strlen(driver_name) + 1;
for (i = 0; i < gvt->num_types; i++) {
- t = &gvt->types[i];
- if (!strncmp(t->name, name + strlen(driver_name) + 1,
- sizeof(t->name)))
+ struct intel_vgpu_type *t = &gvt->types[i];
+
+ if (!strncmp(t->name, name, sizeof(t->name)))
return t;
}
@@ -120,10 +121,8 @@ static struct attribute_group *gvt_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
-static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups)
+static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
{
- *type_attrs = gvt_type_attrs;
*intel_vgpu_type_groups = gvt_vgpu_type_groups;
return true;
}
@@ -191,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -257,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt)
/**
* intel_gvt_clean_device - clean a GVT device
- * @dev_priv: i915 private
+ * @i915: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
*
*/
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+void intel_gvt_clean_device(struct drm_i915_private *i915)
{
- struct intel_gvt *gvt = to_gvt(dev_priv);
+ struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
- if (WARN_ON(!gvt))
+ if (drm_WARN_ON(&i915->drm, !gvt))
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
@@ -285,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- kfree(dev_priv->gvt);
- dev_priv->gvt = NULL;
+ kfree(i915->gvt);
}
/**
* intel_gvt_init_device - initialize a GVT device
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
*
* This function is called at the initialization stage, to initialize
* necessary GVT components.
@@ -300,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
* Zero on success, negative error code if failed.
*
*/
-int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
int ret;
- if (WARN_ON(dev_priv->gvt))
+ if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
@@ -319,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
- gvt->dev_priv = dev_priv;
+ gvt->gt = &i915->gt;
+ i915->gvt = gvt;
init_device_info(gvt);
@@ -378,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
- dev_priv->gvt = gvt;
- intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+ intel_gvt_host.dev = &i915->drm.pdev->dev;
intel_gvt_host.initialized = true;
return 0;
@@ -404,6 +402,7 @@ out_clean_mmio_info:
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
+ i915->gvt = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b47c6acaf9c0..58c2c7932e3f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -196,41 +196,21 @@ struct intel_vgpu {
struct dentry *debugfs;
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
- struct {
- struct mdev_device *mdev;
- struct vfio_region *region;
- int num_regions;
- struct eventfd_ctx *intx_trigger;
- struct eventfd_ctx *msi_trigger;
-
- /*
- * Two caches are used to avoid mapping duplicated pages (eg.
- * scratch pages). This help to reduce dma setup overhead.
- */
- struct rb_root gfn_cache;
- struct rb_root dma_addr_cache;
- unsigned long nr_cache_entries;
- struct mutex cache_lock;
-
- struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
- atomic_t released;
- struct vfio_device *vfio_device;
- } vdev;
-#endif
+ /* Hypervisor-specific device state. */
+ void *vdev;
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
- struct completion vblank_done;
-
u32 scan_nonprivbb;
};
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
+{
+ return vgpu->vdev;
+}
+
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
@@ -306,7 +286,7 @@ struct intel_gvt {
/* scheduler scope lock, protect gvt and vgpu schedule related data */
struct mutex sched_lock;
- struct drm_i915_private *dev_priv;
+ struct intel_gt *gt;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
@@ -376,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
+#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
+
/* Aperture/GM space definitions for GVT device */
-#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
+#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
+#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
-#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
-#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
+#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
@@ -394,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
+#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
@@ -570,8 +551,7 @@ struct intel_gvt_ops {
void (*vgpu_deactivate)(struct intel_vgpu *);
struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
const char *name);
- bool (*get_gvt_attrs)(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups);
+ bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
@@ -586,14 +566,14 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_post(struct intel_gt *gt)
{
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put_unchecked(gt->uncore->rpm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6d28d72e6c7e..2faf50e1b051 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -49,15 +49,17 @@
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
+ struct drm_i915_private *i915 = gvt->gt->i915;
+
+ if (IS_BROADWELL(i915))
return D_BDW;
- else if (IS_SKYLAKE(gvt->dev_priv))
+ else if (IS_SKYLAKE(i915))
return D_SKL;
- else if (IS_KABYLAKE(gvt->dev_priv))
+ else if (IS_KABYLAKE(i915))
return D_KBL;
- else if (IS_BROXTON(gvt->dev_priv))
+ else if (IS_BROXTON(i915))
return D_BXT;
- else if (IS_COFFEELAKE(gvt->dev_priv))
+ else if (IS_COFFEELAKE(i915))
return D_CFL;
return 0;
@@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt,
}
/**
- * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
- * Ring ID on success, negative error code if failed.
+ * The engine containing the offset within its mmio page.
*/
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int offset)
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
{
- enum intel_engine_id id;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
offset &= ~GENMASK(11, 0);
- for_each_engine(engine, gvt->dev_priv, id) {
+ for_each_engine(engine, gvt->gt, id)
if (engine->mmio_base == offset)
- return id;
- }
- return -ENODEV;
+ return engine;
+
+ return NULL;
}
#define offset_to_fence_num(offset) \
@@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
{
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
- if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
else if (!ips)
@@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
int ret;
@@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(dev_priv);
+ mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_post(gvt->gt);
return 0;
}
@@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
}
- engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
+ engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -460,11 +462,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-/* ascendingly sorted */
+/* sorted in ascending order */
static i915_reg_t force_nonpriv_white_list[] = {
+ _MMIO(0xd80),
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
- PS_INVOCATION_COUNT,//_MMIO(0x2348)
+ CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
+ PS_INVOCATION_COUNT, //_MMIO(0x2348)
+ PS_DEPTH_COUNT, //_MMIO(0x2350)
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
_MMIO(0x2690),
_MMIO(0x2694),
@@ -489,10 +494,11 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0xe18c),
_MMIO(0xe48c),
_MMIO(0xe5f4),
+ _MMIO(0x64844),
};
/* a simple bsearch */
-static inline bool in_whitelist(unsigned int reg)
+static inline bool in_whitelist(u32 reg)
{
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
i915_reg_t *array = force_nonpriv_white_list;
@@ -514,26 +520,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
- u32 ring_base;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ret = -EINVAL;
-
- if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
- gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
- vgpu->id, ring_id, offset, bytes);
- return ret;
- }
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- ring_base = dev_priv->engine[ring_id]->mmio_base;
+ if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return -EINVAL;
+ }
- if (in_whitelist(reg_nonpriv) ||
- reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
- ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
- bytes);
- } else
+ if (!in_whitelist(reg_nonpriv) &&
+ reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
- vgpu->id, *(u32 *)p_data, offset);
+ vgpu->id, reg_nonpriv, offset);
+ } else
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
return 0;
}
@@ -756,7 +757,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 pipe = DSPSURF_TO_PIPE(offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -797,7 +798,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
@@ -821,7 +822,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
unsigned int reg)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum intel_gvt_event_type event;
if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
@@ -836,7 +837,7 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
- WARN_ON(true);
+ drm_WARN_ON(&dev_priv->drm, true);
return -EINVAL;
}
@@ -924,11 +925,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
+ if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
- } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
/* write to the data registers */
return 0;
@@ -1244,8 +1245,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
char *env[3] = {NULL, NULL, NULL};
char vmid_str[20];
char display_ready_str[20];
@@ -1306,13 +1306,15 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int pf_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 val = *(u32 *)p_data;
if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
- WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
- vgpu->id);
+ drm_WARN_ONCE(&i915->drm, true,
+ "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
return 0;
}
@@ -1360,13 +1362,15 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 mode;
write_vreg(vgpu, offset, p_data, bytes);
mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
- WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
+ drm_WARN_ONCE(&i915->drm, 1,
+ "VM(%d): iGVT-g doesn't support GuC\n",
vgpu->id);
return 0;
}
@@ -1377,10 +1381,12 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 trtte = *(u32 *)p_data;
if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
- WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ drm_WARN(&i915->drm, 1,
+ "VM(%d): Use physical address for TRTT!\n",
vgpu->id);
return -EINVAL;
}
@@ -1427,9 +1433,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1439,7 +1445,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
- } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1452,9 +1458,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
}
break;
case SKL_PCODE_CDCLK_CONTROL:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1478,24 +1484,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
}
+
/*
* Need to emulate all the HWSP register write to ensure host can
* update the VM CSB status correctly. Here listed registers can
* support BDW, SKL or other platforms with same HWSP registers.
*/
- if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
+ if (unlikely(!engine)) {
gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
offset);
return -EINVAL;
}
- vgpu->hws_pga[ring_id] = value;
+ vgpu->hws_pga[engine->id] = value;
gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
vgpu->id, value, offset);
@@ -1507,7 +1515,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- if (IS_BROXTON(vgpu->gvt->dev_priv))
+ if (IS_BROXTON(vgpu->gvt->gt->i915))
v &= (1 << 31) | (1 << 29);
else
v &= (1 << 31) | (1 << 29) | (1 << 9) |
@@ -1654,26 +1662,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- int ring_id;
- u32 ring_base;
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(gvt, offset);
- ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
/**
* Read HW reg in following case
* a. the offset isn't a ring mmio
* b. the offset's ring is running on hw.
* c. the offset is ring time stamp mmio
*/
- if (ring_id >= 0)
- ring_base = dev_priv->engine[ring_id]->mmio_base;
-
- if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
+
+ if (!engine ||
+ vgpu == gvt->scheduler.engine_owner[engine->id] ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
+ mmio_hw_access_pre(gvt->gt);
+ vgpu_vreg(vgpu, offset) =
+ intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
+ mmio_hw_access_post(gvt->gt);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -1682,22 +1688,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
- if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
+ if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL;
- execlist = &vgpu->submission.execlist[ring_id];
+ execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
- ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ ret = intel_vgpu_submit_execlist(vgpu, engine);
if(ret)
- gvt_vgpu_err("fail submit workload on ring %d\n",
- ring_id);
+ gvt_vgpu_err("fail submit workload on ring %s\n",
+ engine->name);
}
++execlist->elsp_dwords.index;
@@ -1709,12 +1716,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
bool enable_execlist;
int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
@@ -1723,7 +1731,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
data & _MASKED_BIT_ENABLE(2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -1743,16 +1751,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
- gvt_dbg_core("EXECLIST %s on ring %d\n",
- (enable_execlist ? "enabling" : "disabling"),
- ring_id);
+ gvt_dbg_core("EXECLIST %s on ring %s\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ engine->name);
if (!enable_execlist)
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- BIT(ring_id),
- INTEL_VGPU_EXECLIST_SUBMISSION);
+ engine->mask,
+ INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1876,7 +1884,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
@@ -2415,9 +2423,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
@@ -2693,7 +2701,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2882,7 +2890,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
@@ -2902,7 +2910,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
- MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+ MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -3131,7 +3139,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
@@ -3367,7 +3375,7 @@ static struct gvt_mmio_block mmio_blocks[] = {
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
int ret;
@@ -3379,20 +3387,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
- if (IS_BROADWELL(dev_priv)) {
+ if (IS_BROADWELL(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_COFFEELAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
@@ -3541,13 +3549,14 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 11accd3e1023..540017fed908 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -245,6 +245,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data;
@@ -255,7 +256,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
if (info->has_upstream_irq)
@@ -282,6 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
@@ -289,7 +291,7 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ iir));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
vgpu_vreg(vgpu, reg) &= ~iir;
@@ -319,6 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL;
@@ -340,7 +343,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
if (!up_irq_info)
up_irq_info = irq->info[map->up_irq_group];
else
- WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+ drm_WARN_ON(&i915->drm, up_irq_info !=
+ irq->info[map->up_irq_group]);
bit = map->up_irq_bit;
@@ -350,7 +354,7 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- if (WARN_ON(!up_irq_info))
+ if (drm_WARN_ON(&i915->drm, !up_irq_info))
return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
@@ -536,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
+ if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
@@ -568,7 +572,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
- if (IS_BROADWELL(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->gt->i915)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
@@ -581,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -618,13 +622,14 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
handler = get_event_virt_handler(irq, event);
- WARN_ON(!handler);
+ drm_WARN_ON(&i915->drm, !handler);
handler(irq, event, vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 3259a1fa69e1..eee530453aa6 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -108,6 +108,37 @@ struct gvt_dma {
struct kref ref;
};
+struct kvmgt_vdev {
+ struct intel_vgpu *vgpu;
+ struct mdev_device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
+ struct mutex cache_lock;
+
+ struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
+ struct vfio_device *vfio_device;
+ struct vfio_group *vfio_group;
+};
+
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
+{
+ return intel_vgpu_vdev(vgpu);
+}
+
static inline bool handle_valid(unsigned long handle)
{
return !!(handle & ~0xff);
@@ -120,6 +151,8 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages;
int npage;
int ret;
@@ -129,8 +162,8 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
- WARN_ON(ret != 1);
+ ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
+ drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -138,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0;
int total_pages;
int npage;
@@ -152,8 +186,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
cur_gfn, ret);
@@ -187,7 +221,7 @@ err:
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct page *page = NULL;
int ret;
@@ -210,7 +244,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
@@ -219,7 +253,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
- struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -237,7 +271,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -258,6 +292,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
@@ -270,7 +305,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
- link = &vgpu->vdev.gfn_cache.rb_node;
+ link = &vdev->gfn_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node);
@@ -281,11 +316,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->gfn_node, parent, link);
- rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
+ rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL;
- link = &vgpu->vdev.dma_addr_cache.rb_node;
+ link = &vdev->dma_addr_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
@@ -296,46 +331,51 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->dma_addr_node, parent, link);
- rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
- vgpu->vdev.nr_cache_entries++;
+ vdev->nr_cache_entries++;
return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
- rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ rb_erase(&entry->gfn_node, &vdev->gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
kfree(entry);
- vgpu->vdev.nr_cache_entries--;
+ vdev->nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) {
- mutex_lock(&vgpu->vdev.cache_lock);
- node = rb_first(&vgpu->vdev.gfn_cache);
+ mutex_lock(&vdev->cache_lock);
+ node = rb_first(&vdev->gfn_cache);
if (!node) {
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
- vgpu->vdev.gfn_cache = RB_ROOT;
- vgpu->vdev.dma_addr_cache = RB_ROOT;
- vgpu->vdev.nr_cache_entries = 0;
- mutex_init(&vgpu->vdev.cache_lock);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ vdev->gfn_cache = RB_ROOT;
+ vdev->dma_addr_cache = RB_ROOT;
+ vdev->nr_cache_entries = 0;
+ mutex_init(&vdev->cache_lock);
}
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
@@ -409,16 +449,18 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- void *base = vgpu->vdev.region[i].data;
+ void *base = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- if (pos >= vgpu->vdev.region[i].size || iswrite) {
+
+ if (pos >= vdev->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
- count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ count = min(count, (size_t)(vdev->region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
@@ -512,7 +554,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
struct vfio_edid_region *region =
- (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+ (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
@@ -544,32 +586,34 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region;
- region = krealloc(vgpu->vdev.region,
- (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
- vgpu->vdev.region = region;
- vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
- vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
- vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
- vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
- vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
- vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
- vgpu->vdev.num_regions++;
+ vdev->region = region;
+ vdev->region[vdev->num_regions].type = type;
+ vdev->region[vdev->num_regions].subtype = subtype;
+ vdev->region[vdev->num_regions].ops = ops;
+ vdev->region[vdev->num_regions].size = size;
+ vdev->region[vdev->num_regions].flags = flags;
+ vdev->region[vdev->num_regions].data = data;
+ vdev->num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- vgpu->vdev.vfio_device = vfio_device_get_from_dev(
- mdev_dev(vgpu->vdev.mdev));
- if (!vgpu->vdev.vfio_device) {
+ vdev->vfio_device = vfio_device_get_from_dev(
+ mdev_dev(vdev->mdev));
+ if (!vdev->vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
@@ -637,10 +681,12 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
static void kvmgt_put_vfio_device(void *vgpu)
{
- if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+ struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
+
+ if (WARN_ON(!vdev->vfio_device))
return;
- vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+ vfio_device_put(vdev->vfio_device);
}
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
@@ -669,9 +715,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
goto out;
}
- INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+ INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
- vgpu->vdev.mdev = mdev;
+ kvmgt_vdev(vgpu)->mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
@@ -696,9 +742,10 @@ static int intel_vgpu_remove(struct mdev_device *mdev)
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.iommu_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ iommu_notifier);
+ struct intel_vgpu *vgpu = vdev->vgpu;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -708,7 +755,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- mutex_lock(&vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry)
@@ -718,7 +765,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
return NOTIFY_OK;
@@ -727,16 +774,16 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.group_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ group_notifier);
/* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vgpu->vdev.kvm = data;
+ vdev->kvm = data;
if (!data)
- schedule_work(&vgpu->vdev.release_work);
+ schedule_work(&vdev->release_work);
}
return NOTIFY_OK;
@@ -745,15 +792,17 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
static int intel_vgpu_open(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events;
int ret;
+ struct vfio_group *vfio_group;
- vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+ vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
@@ -762,13 +811,21 @@ static int intel_vgpu_open(struct mdev_device *mdev)
events = VFIO_GROUP_NOTIFY_SET_KVM;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
goto undo_iommu;
}
+ vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
+ if (IS_ERR_OR_NULL(vfio_group)) {
+ ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
+ gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
+ goto undo_register;
+ }
+ vdev->vfio_group = vfio_group;
+
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
@@ -781,51 +838,60 @@ static int intel_vgpu_open(struct mdev_device *mdev)
intel_gvt_ops->vgpu_activate(vgpu);
- atomic_set(&vgpu->vdev.released, 0);
+ atomic_set(&vdev->released, 0);
return ret;
undo_group:
+ vfio_group_put_external_user(vdev->vfio_group);
+ vdev->vfio_group = NULL;
+
+undo_register:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
undo_iommu:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
out:
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger;
- trigger = vgpu->vdev.msi_trigger;
+ trigger = vdev->msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
- vgpu->vdev.msi_trigger = NULL;
+ vdev->msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info;
int ret;
if (!handle_valid(vgpu->handle))
return;
- if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+ if (atomic_cmpxchg(&vdev->released, 0, 1))
return;
intel_gvt_ops->vgpu_release(vgpu);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
- WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
+ &vdev->iommu_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
- WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
+ &vdev->group_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for group failed: %d\n", ret);
/* dereference module reference taken at open */
module_put(THIS_MODULE);
@@ -834,8 +900,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
kvmgt_guest_exit(info);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
+ vfio_group_put_external_user(vdev->vfio_group);
- vgpu->vdev.kvm = NULL;
+ vdev->kvm = NULL;
vgpu->handle = 0;
}
@@ -848,10 +915,10 @@ static void intel_vgpu_release(struct mdev_device *mdev)
static void intel_vgpu_release_work(struct work_struct *work)
{
- struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
- vdev.release_work);
+ struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
+ release_work);
- __intel_vgpu_release(vgpu);
+ __intel_vgpu_release(vdev->vgpu);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -913,7 +980,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EINVAL;
}
- aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+ aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
ALIGN_DOWN(off, PAGE_SIZE),
count + offset_in_page(off));
if (!aperture_va)
@@ -933,12 +1000,13 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -967,11 +1035,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX:
break;
default:
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
- return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ return vdev->region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
}
@@ -1224,7 +1292,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
- vgpu->vdev.msi_trigger = trigger;
+ kvmgt_vdev(vgpu)->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
@@ -1276,6 +1344,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1294,7 +1363,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions;
+ vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1385,22 +1454,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions)
+ vdev->num_regions)
return -EINVAL;
info.index =
array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions);
+ vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->vdev.region[i].size;
- info.flags = vgpu->vdev.region[i].flags;
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
- cap_type.type = vgpu->vdev.region[i].type;
- cap_type.subtype = vgpu->vdev.region[i].subtype;
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
ret = vfio_info_add_capability(&caps,
&cap_type.header,
@@ -1597,12 +1666,10 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute **kvm_type_attrs;
struct attribute_group **kvm_vgpu_type_groups;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
- &kvm_vgpu_type_groups))
+ if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
return -EFAULT;
intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
@@ -1742,13 +1809,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct kvm *kvm;
vgpu = mdev_get_drvdata(mdev);
if (handle_valid(vgpu->handle))
return -EEXIST;
- kvm = vgpu->vdev.kvm;
+ vdev = kvmgt_vdev(vgpu);
+ kvm = vdev->kvm;
if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
@@ -1769,8 +1838,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- init_completion(&vgpu->vblank_done);
-
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1778,7 +1845,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
info->debugfs_cache_entries = debugfs_create_ulong(
"kvmgt_nr_cache_entries",
0444, vgpu->debugfs,
- &vgpu->vdev.nr_cache_entries);
+ &vdev->nr_cache_entries);
return 0;
}
@@ -1795,9 +1862,17 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
return true;
}
-static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
{
- /* nothing to do here */
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
+
+ if (!vgpu->vdev)
+ return -ENOMEM;
+
+ kvmgt_vdev(vgpu)->vgpu = vgpu;
+
return 0;
}
@@ -1805,29 +1880,34 @@ static void kvmgt_detach_vgpu(void *p_vgpu)
{
int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- if (!vgpu->vdev.region)
+ if (!vdev->region)
return;
- for (i = 0; i < vgpu->vdev.num_regions; i++)
- if (vgpu->vdev.region[i].ops->release)
- vgpu->vdev.region[i].ops->release(vgpu,
- &vgpu->vdev.region[i]);
- vgpu->vdev.num_regions = 0;
- kfree(vgpu->vdev.region);
- vgpu->vdev.region = NULL;
+ for (i = 0; i < vdev->num_regions; i++)
+ if (vdev->region[i].ops->release)
+ vdev->region[i].ops->release(vgpu,
+ &vdev->region[i]);
+ vdev->num_regions = 0;
+ kfree(vdev->region);
+ vdev->region = NULL;
+
+ kfree(vdev);
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
+ vdev = kvmgt_vdev(vgpu);
/*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
@@ -1838,10 +1918,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (vgpu->vdev.msi_trigger == NULL)
+ if (vdev->msi_trigger == NULL)
return 0;
- if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ if (eventfd_signal(vdev->msi_trigger, 1) == 1)
return 0;
return -EFAULT;
@@ -1867,26 +1947,26 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
- struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret;
if (!handle_valid(handle))
return -EINVAL;
- info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
- entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+ entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) {
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else if (entry->size != size) {
@@ -1898,7 +1978,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1906,19 +1986,20 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr;
}
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return 0;
err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret = 0;
@@ -1926,14 +2007,15 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
return -ENODEV;
info = (struct kvmgt_guest_info *)handle;
+ vdev = kvmgt_vdev(info->vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
@@ -1949,52 +2031,35 @@ static void __gvt_dma_release(struct kref *ref)
static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
- struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
if (!handle_valid(handle))
return;
- info = (struct kvmgt_guest_info *)handle;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
- entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ mutex_lock(&vdev->cache_lock);
+ entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_put(&entry->ref, __gvt_dma_release);
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len, bool write)
{
struct kvmgt_guest_info *info;
- struct kvm *kvm;
- int idx, ret;
- bool kthread = current->mm == NULL;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
-
- if (kthread) {
- if (!mmget_not_zero(kvm->mm))
- return -EFAULT;
- use_mm(kvm->mm);
- }
- idx = srcu_read_lock(&kvm->srcu);
- ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
- kvm_read_guest(kvm, gpa, buf, len);
- srcu_read_unlock(&kvm->srcu, idx);
-
- if (kthread) {
- unuse_mm(kvm->mm);
- mmput(kvm->mm);
- }
-
- return ret;
+ return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
+ gpa, buf, len, write);
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index a55178884d67..291993615af9 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -103,6 +103,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -114,15 +115,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
@@ -132,16 +135,16 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
- if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes)))
goto err;
}
@@ -175,6 +178,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -187,15 +191,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
@@ -205,7 +211,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
@@ -245,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
- if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 2e68f4b02c94..cc4812648bf4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -69,8 +69,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node;
};
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int reg);
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index aaf15916d29a..2ccaf78f96e8 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = {
[VECS0] = 0xcb00,
};
-static void load_render_mocs(struct drm_i915_private *dev_priv)
+static void load_render_mocs(const struct intel_engine_cs *engine)
{
- struct intel_gvt *gvt = dev_priv->gvt;
- i915_reg_t offset;
+ struct intel_gvt *gvt = engine->i915->gvt;
+ struct intel_uncore *uncore = engine->uncore;
u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
+ i915_reg_t offset;
int ring_id, i;
/* Platform doesn't have mocs mmios. */
@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
return;
for (ring_id = 0; ring_id < cnt; ring_id++) {
- if (!HAS_ENGINE(dev_priv, ring_id))
+ if (!HAS_ENGINE(engine->i915, ring_id))
continue;
+
offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
}
@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
gen9_render_mocs.initialized = true;
@@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
*cs++ = MI_LOAD_REGISTER_IMM(count);
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id ||
- !mmio->in_context)
+ if (mmio->id != ring_id || !mmio->in_context)
continue;
*cs++ = i915_mmio_reg_offset(mmio->reg);
- *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
- (mmio->mask << 16);
+ *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, ring_id);
}
@@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = {
[VECS0] = 0x4270,
};
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
@@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (!regs)
return;
- if (WARN_ON(ring_id >= cnt))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
return;
- if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+ if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
return;
- reg = _MMIO(regs[ring_id]);
+ reg = _MMIO(regs[engine->id]);
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
* we need to put a forcewake when invalidating RCS TLB caches,
@@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
+ if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
- gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
+ gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
+ engine->name);
else
vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(uncore, fw);
- gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+ gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
}
static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
- i915_reg_t offset, l3_offset;
- u32 old_v, new_v;
-
u32 regs[] = {
[RCS0] = 0xc800,
[VCS0] = 0xc900,
@@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
[BCS0] = 0xcc00,
[VECS0] = 0xcb00,
};
+ struct intel_uncore *uncore = engine->uncore;
+ i915_reg_t offset, l3_offset;
+ u32 old_v, new_v;
int i;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
return;
- if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
+ if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
return;
if (!pre && !gen9_render_mocs.initialized)
- load_render_mocs(dev_priv);
+ load_render_mocs(engine);
- offset.reg = regs[ring_id];
+ offset.reg = regs[engine->id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, offset);
else
- old_v = gen9_render_mocs.control_table[ring_id][i];
+ old_v = gen9_render_mocs.control_table[engine->id][i];
if (next)
new_v = vgpu_vreg_t(next, offset);
else
- new_v = gen9_render_mocs.control_table[ring_id][i];
+ new_v = gen9_render_mocs.control_table[engine->id][i];
if (old_v != new_v)
- I915_WRITE_FW(offset, new_v);
+ intel_uncore_write_fw(uncore, offset, new_v);
offset.reg += 4;
}
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
new_v = gen9_render_mocs.l3cc_table[i];
if (old_v != new_v)
- I915_WRITE_FW(l3_offset, new_v);
+ intel_uncore_write_fw(uncore, l3_offset, new_v);
l3_offset.reg += 4;
}
@@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce)
/* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s;
struct engine_mmio *mmio;
u32 old_v, new_v;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (INTEL_GEN(dev_priv) >= 9)
- switch_mocs(pre, next, ring_id);
+ if (INTEL_GEN(engine->i915) >= 9)
+ switch_mocs(pre, next, engine);
- for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+ for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id)
+ if (mmio->id != engine->id)
continue;
/*
* No need to do save or restore of the mmio which is in context
* state image on gen9, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_GEN(dev_priv, 9) && mmio->in_context)
+ if (IS_GEN(engine->i915, 9) && mmio->in_context)
continue;
// save
if (pre) {
- vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+ vgpu_vreg_t(pre, mmio->reg) =
+ intel_uncore_read_fw(uncore, mmio->reg);
if (mmio->mask)
vgpu_vreg_t(pre, mmio->reg) &=
- ~(mmio->mask << 16);
+ ~(mmio->mask << 16);
old_v = vgpu_vreg_t(pre, mmio->reg);
- } else
- old_v = mmio->value = I915_READ_FW(mmio->reg);
+ } else {
+ old_v = mmio->value =
+ intel_uncore_read_fw(uncore, mmio->reg);
+ }
// restore
if (next) {
@@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow[ring_id]))
+ !is_inhibit_context(s->shadow[engine->id]))
continue;
if (mmio->mask)
new_v = vgpu_vreg_t(next, mmio->reg) |
- (mmio->mask << 16);
+ (mmio->mask << 16);
else
new_v = vgpu_vreg_t(next, mmio->reg);
} else {
@@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre,
new_v = mmio->value;
}
- I915_WRITE_FW(mmio->reg, new_v);
+ intel_uncore_write_fw(uncore, mmio->reg, new_v);
trace_render_mmio(pre ? pre->id : 0,
next ? next->id : 0,
@@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre,
}
if (next)
- handle_tlb_pending_event(next, ring_id);
+ handle_tlb_pending_event(next, engine);
}
/**
* intel_gvt_switch_render_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
- * @ring_id: specify the engine
+ * @engine: the engine
*
* If pre is null indicates that host own the engine. If next is null
* indicates that we are switching to host workload.
*/
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id)
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- if (WARN_ON(!pre && !next))
+ if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
+ engine->name))
return;
- gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
/**
* We are using raw mmio access wrapper to improve the
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ switch_mmio(pre, next, engine);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
/**
@@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
@@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) {
- gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index f7eaa442403f..970704b18f23 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -37,7 +37,7 @@
#define __GVT_RENDER_H__
struct engine_mmio {
- int ring_id;
+ enum intel_engine_id id;
i915_reg_t reg;
u32 mask;
bool in_context;
@@ -45,7 +45,8 @@ struct engine_mmio {
};
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id);
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine);
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 2369d4a9af94..036b74fe9298 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
enum intel_engine_id i;
struct intel_engine_cs *engine;
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (!list_empty(workload_q_head(vgpu, i)))
+ for_each_engine(engine, vgpu->gvt->gt, i) {
+ if (!list_empty(workload_q_head(vgpu, engine)))
return true;
}
@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = true;
/* still have uncompleted workload? */
- for_each_engine(engine, gvt->dev_priv, i) {
- if (scheduler->current_workload[i])
+ for_each_engine(engine, gvt->gt, i) {
+ if (scheduler->current_workload[engine->id])
return;
}
@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = false;
/* wake up workload dispatch thread */
- for_each_engine(engine, gvt->dev_priv, i)
- wake_up(&scheduler->waitq[i]);
+ for_each_engine(engine, gvt->gt, i)
+ wake_up(&scheduler->waitq[engine->id]);
}
static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
@@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
- int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (!vgpu_data->active)
return;
@@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
- if (scheduler->engine_owner[ring_id] == vgpu) {
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
- scheduler->engine_owner[ring_id] = NULL;
+ for_each_engine(engine, vgpu->gvt->gt, id) {
+ if (scheduler->engine_owner[engine->id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, engine);
+ scheduler->engine_owner[engine->id] = NULL;
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 685d1e04a5ff..e92ed96c9b23 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0;
@@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS0)
+ if (workload->engine->id != RCS0)
return;
if (save) {
@@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS0) {
+ if (workload->engine->id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
return 0;
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring %s workload lrca %x",
+ workload->engine->name,
+ workload->ctx_desc.lrca);
+ context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
+ if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq)
return intel_context_force_single_submission(rq->context);
}
-static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+static void save_ring_hw_state(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+ struct intel_uncore *uncore = engine->uncore;
i915_reg_t reg;
- reg = RING_INSTDONE(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD_UDW(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_INSTDONE(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD_UDW(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct i915_request *req = data;
+ struct i915_request *rq = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
- shadow_ctx_notifier_block[req->engine->id]);
+ shadow_ctx_notifier_block[rq->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- enum intel_engine_id ring_id = req->engine->id;
+ enum intel_engine_id ring_id = rq->engine->id;
struct intel_vgpu_workload *workload;
unsigned long flags;
- if (!is_gvt_request(req)) {
+ if (!is_gvt_request(rq)) {
spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- NULL, ring_id);
+ NULL, rq->engine);
scheduler->engine_owner[ring_id] = NULL;
}
spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
@@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- workload->vgpu, ring_id);
+ workload->vgpu, rq->engine);
scheduler->engine_owner[ring_id] = workload->vgpu;
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
@@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
atomic_set(&workload->shadow_ctx_active, 0);
break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
break;
default:
WARN_ON(1);
@@ -286,17 +290,17 @@ static void
shadow_context_descriptor_update(struct intel_context *ce,
struct intel_vgpu_workload *workload)
{
- u64 desc = ce->lrc_desc;
+ u64 desc = ce->lrc.desc;
/*
* Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
- desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
- desc |= workload->ctx_desc.addressing_mode <<
+ desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+ desc |= (u64)workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- ce->lrc_desc = desc;
+ ce->lrc.desc = desc;
}
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
@@ -375,7 +379,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
struct i915_page_directory * const pd =
i915_pd_entry(ppgtt->pd, i);
-
+ /* skip now as current i915 ppgtt alloc won't allocate
+ top level pdp for non 4-level table, won't impact
+ shadow ppgtt. */
+ if (!pd)
+ break;
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
@@ -391,7 +399,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
- rq = i915_request_create(s->shadow[workload->ring_id]);
+ rq = i915_request_create(s->shadow[workload->engine->id]);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
return PTR_ERR(rq);
@@ -420,15 +428,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(s->shadow[workload->ring_id],
+ if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(s->shadow[workload->engine->id],
workload);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
return ret;
- if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
+ if (workload->engine->id == RCS0 &&
+ workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -436,6 +445,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->shadow = true;
return 0;
+
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
return ret;
@@ -567,12 +577,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base;
-
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
- vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+ vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
+ workload->rb_start;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -608,7 +614,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -625,7 +630,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload);
- set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
+ set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
@@ -677,11 +682,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct i915_request *rq;
- int ring_id = workload->ring_id;
int ret;
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
- ring_id, workload);
+ gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
+ workload->engine->name, workload);
mutex_lock(&vgpu->vgpu_lock);
@@ -710,8 +714,8 @@ out:
}
if (!IS_ERR_OR_NULL(workload->req)) {
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
- ring_id, workload->req);
+ gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
+ workload->engine->name, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
@@ -722,8 +726,8 @@ err_req:
return ret;
}
-static struct intel_vgpu_workload *pick_next_workload(
- struct intel_gvt *gvt, int ring_id)
+static struct intel_vgpu_workload *
+pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
@@ -735,27 +739,27 @@ static struct intel_vgpu_workload *pick_next_workload(
* bail out
*/
if (!scheduler->current_vgpu) {
- gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
goto out;
}
if (scheduler->need_reschedule) {
- gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
goto out;
}
if (!scheduler->current_vgpu->active ||
- list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+ list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
/*
* still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it.
*/
- if (scheduler->current_workload[ring_id]) {
- workload = scheduler->current_workload[ring_id];
- gvt_dbg_sched("ring id %d still have current workload %p\n",
- ring_id, workload);
+ if (scheduler->current_workload[engine->id]) {
+ workload = scheduler->current_workload[engine->id];
+ gvt_dbg_sched("ring %s still have current workload %p\n",
+ engine->name, workload);
goto out;
}
@@ -765,13 +769,14 @@ static struct intel_vgpu_workload *pick_next_workload(
* will wait the current workload is finished when trying to
* schedule out a vgpu.
*/
- scheduler->current_workload[ring_id] = container_of(
- workload_q_head(scheduler->current_vgpu, ring_id)->next,
- struct intel_vgpu_workload, list);
+ scheduler->current_workload[engine->id] =
+ list_first_entry(workload_q_head(scheduler->current_vgpu,
+ engine),
+ struct intel_vgpu_workload, list);
- workload = scheduler->current_workload[ring_id];
+ workload = scheduler->current_workload[engine->id];
- gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+ gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
@@ -783,14 +788,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 ring_base;
u32 head, tail;
u16 wrap_count;
@@ -811,14 +814,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ ring_base = rq->engine->mmio_base;
vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
+ if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -869,7 +872,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
@@ -966,54 +969,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
mutex_unlock(&vgpu->vgpu_lock);
}
-struct workload_thread_param {
- struct intel_gvt *gvt;
- int ring_id;
-};
-
-static int workload_thread(void *priv)
+static int workload_thread(void *arg)
{
- struct workload_thread_param *p = (struct workload_thread_param *)priv;
- struct intel_gvt *gvt = p->gvt;
- int ring_id = p->ring_id;
+ struct intel_engine_cs *engine = arg;
+ const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+ struct intel_gvt *gvt = engine->i915->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
-
- kfree(p);
- gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+ gvt_dbg_core("workload thread for ring %s started\n", engine->name);
while (!kthread_should_stop()) {
- add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ intel_wakeref_t wakeref;
+
+ add_wait_queue(&scheduler->waitq[engine->id], &wait);
do {
- workload = pick_next_workload(gvt, ring_id);
+ workload = pick_next_workload(gvt, engine);
if (workload)
break;
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
} while (!kthread_should_stop());
- remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+ remove_wait_queue(&scheduler->waitq[engine->id], &wait);
if (!workload)
break;
- gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
- workload->ring_id, workload,
- workload->vgpu->id);
+ gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
+ engine->name, workload,
+ workload->vgpu->id);
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(engine->uncore->rpm);
- gvt_dbg_sched("ring id %d will dispatch workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s will dispatch workload %p\n",
+ engine->name, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore,
+ FORCEWAKE_ALL);
/*
* Update the vReg of the vGPU which submitted this
* workload. The vGPU may use these registers for checking
@@ -1030,21 +1026,21 @@ static int workload_thread(void *priv)
goto complete;
}
- gvt_dbg_sched("ring id %d wait workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s wait workload %p\n",
+ engine->name, workload);
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
- workload, workload->status);
+ workload, workload->status);
- complete_current_workload(gvt, ring_id);
+ complete_current_workload(gvt, engine->id);
if (need_force_wake)
- intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore,
+ FORCEWAKE_ALL);
- intel_runtime_pm_put_unchecked(rpm);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1073,7 +1069,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
gvt_dbg_core("clean workload scheduler\n");
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
atomic_notifier_chain_unregister(
&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
@@ -1084,7 +1080,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct workload_thread_param *param = NULL;
struct intel_engine_cs *engine;
enum intel_engine_id i;
int ret;
@@ -1093,20 +1088,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq);
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
init_waitqueue_head(&scheduler->waitq[i]);
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto err;
- }
-
- param->gvt = gvt;
- param->ring_id = i;
-
- scheduler->thread[i] = kthread_run(workload_thread, param,
- "gvt workload %d", i);
+ scheduler->thread[i] = kthread_run(workload_thread, engine,
+ "gvt:%s", engine->name);
if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]);
@@ -1118,11 +1104,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
atomic_notifier_chain_register(&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
}
+
return 0;
+
err:
intel_gvt_clean_workload_scheduler(gvt);
- kfree(param);
- param = NULL;
return ret;
}
@@ -1160,7 +1146,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, id)
+ for_each_engine(engine, vgpu->gvt->gt, id)
intel_context_unpin(s->shadow[id]);
kmem_cache_destroy(s->workloads);
@@ -1217,7 +1203,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_ppgtt *ppgtt;
@@ -1230,7 +1216,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_save(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
@@ -1246,7 +1232,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
ce->vm = i915_vm_get(&ppgtt->vm);
intel_context_set_single_submission(ce);
- if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ /* Max ring buffer size */
+ if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
const unsigned int ring_size = 512 * SZ_4K;
ce->ring = __intel_context_ring_size(ring_size);
@@ -1282,7 +1269,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
out_shadow_ctx:
i915_context_ppgtt_root_restore(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1309,6 +1296,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask,
unsigned int interface)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
const struct intel_vgpu_submission_ops *ops[] = {
[INTEL_VGPU_EXECLIST_SUBMISSION] =
@@ -1316,10 +1304,11 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
};
int ret;
- if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
return -EINVAL;
- if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+ if (drm_WARN_ON(&i915->drm,
+ interface == 0 && engine_mask != ALL_ENGINES))
return -EINVAL;
if (s->active)
@@ -1441,7 +1430,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
- * @ring_id: ring index
+ * @engine: the engine
* @desc: a guest context descriptor
*
* This function is called when creating a vGPU workload.
@@ -1452,14 +1441,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
*
*/
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct list_head *q = workload_q_head(vgpu, engine);
struct intel_vgpu_workload *last_workload = NULL;
struct intel_vgpu_workload *workload = NULL;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
u32 guest_head;
@@ -1486,10 +1475,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
list_for_each_entry_reverse(last_workload, q, list) {
if (same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n",
- ring_id);
+ gvt_dbg_el("ring %s cur workload == last\n",
+ engine->name);
gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
+ last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
@@ -1499,7 +1488,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+ gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1519,7 +1508,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (IS_ERR(workload))
return workload;
- workload->ring_id = ring_id;
+ workload->engine = engine;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
@@ -1528,7 +1517,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1566,8 +1555,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
- workload, ring_id, head, tail, start, ctl);
+ gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
+ workload, engine->name, head, tail, start, ctl);
ret = prepare_mm(workload);
if (ret) {
@@ -1578,10 +1567,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
- if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- ret = intel_gvt_scan_and_shadow_workload(workload);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ if (list_empty(q)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
+ ret = intel_gvt_scan_and_shadow_workload(workload);
}
if (ret) {
@@ -1601,7 +1591,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{
list_add_tail(&workload->list,
- workload_q_head(workload->vgpu, workload->ring_id));
+ workload_q_head(workload->vgpu, workload->engine));
intel_gvt_kick_schedule(workload->vgpu->gvt);
- wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index c50d14a9ce85..bf7fc0ca4cb1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
@@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb {
bool ppgtt;
};
-#define workload_q_head(vgpu, ring_id) \
- (&(vgpu->submission.workload_q_head[ring_id]))
+#define workload_q_head(vgpu, e) \
+ (&(vgpu)->submission.workload_q_head[(e)->id])
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
@@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops
intel_vgpu_execlist_submission_ops;
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc);
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 345c2aa3b491..1d5ff88078bd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -37,6 +37,7 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* setup the ballooning information */
vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
@@ -69,7 +70,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
- WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+ drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
#define VGPU_MAX_WEIGHT 16
@@ -148,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN(gvt->dev_priv, 8))
+ if (IS_GEN(gvt->gt->i915, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (IS_GEN(gvt->dev_priv, 9))
+ vgpu_types[i].name);
+ else if (IS_GEN(gvt->gt->i915, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ vgpu_types[i].name);
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
@@ -271,8 +272,9 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
- WARN(vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
@@ -432,9 +434,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- /*TODO: add more platforms support */
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index b0a499753526..c4048628188a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,6 +7,7 @@
#include <linux/debugobjects.h>
#include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
@@ -390,13 +391,23 @@ out:
return err;
}
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
+ struct dma_fence *prev;
+
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
- if (!__i915_active_fence_set(&ref->excl, f))
+ rcu_read_lock();
+ prev = __i915_active_fence_set(&ref->excl, f);
+ if (prev)
+ prev = dma_fence_get_rcu(prev);
+ else
atomic_inc(&ref->count);
+ rcu_read_unlock();
+
+ return prev;
}
bool i915_active_acquire_if_busy(struct i915_active *ref)
@@ -442,6 +453,9 @@ static void enable_signaling(struct i915_active_fence *active)
{
struct dma_fence *fence;
+ if (unlikely(is_barrier(active)))
+ return;
+
fence = i915_active_fence_get(active);
if (!fence)
return;
@@ -450,26 +464,49 @@ static void enable_signaling(struct i915_active_fence *active)
dma_fence_put(fence);
}
-int i915_active_wait(struct i915_active *ref)
+static int flush_barrier(struct active_node *it)
{
- struct active_node *it, *n;
- int err = 0;
+ struct intel_engine_cs *engine;
- might_sleep();
+ if (likely(!is_barrier(&it->base)))
+ return 0;
- if (!i915_active_acquire_if_busy(ref))
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
return 0;
- /* Flush lazy signals */
+ return intel_engine_flush_barriers(engine);
+}
+
+static int flush_lazy_signals(struct i915_active *ref)
+{
+ struct active_node *it, *n;
+ int err = 0;
+
enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) /* unconnected idle barrier */
- continue;
+ err = flush_barrier(it); /* unconnected idle barrier? */
+ if (err)
+ break;
enable_signaling(&it->base);
}
- /* Any fence added after the wait begins will not be auto-signaled */
+ return err;
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+ int err;
+
+ might_sleep();
+
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ /* Any fence added after the wait begins will not be auto-signaled */
+ err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
@@ -481,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
return 0;
}
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int __await_active(struct i915_active_fence *active,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
+{
+ struct dma_fence *fence;
+
+ if (is_barrier(active)) /* XXX flush the barrier? */
+ return 0;
+
+ fence = i915_active_fence_get(active);
+ if (fence) {
+ int err;
+
+ err = fn(arg, fence);
+ dma_fence_put(fence);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int await_active(struct i915_active *ref,
+ unsigned int flags,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
{
int err = 0;
+ /* We must always wait for the exclusive fence! */
if (rcu_access_pointer(ref->excl.fence)) {
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&ref->excl.fence);
- rcu_read_unlock();
- if (fence) {
- err = i915_request_await_dma_fence(rq, fence);
- dma_fence_put(fence);
+ err = __await_active(&ref->excl, fn, arg);
+ if (err)
+ return err;
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ err = __await_active(&it->base, fn, arg);
+ if (err)
+ break;
}
+ i915_active_release(ref);
+ if (err)
+ return err;
}
- /* In the future we may choose to await on all fences */
+ return 0;
+}
- return err;
+static int rq_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_request_await_dma_fence(arg, fence);
+}
+
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, rq_await_fence, rq);
+}
+
+static int sw_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_sw_fence_await_dma_fence(arg, fence, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+}
+
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, sw_await_fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -623,6 +716,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* We can then use the preallocated nodes in
* i915_active_acquire_barrier()
*/
+ GEM_BUG_ON(!mask);
for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
struct llist_node *prev = first;
@@ -812,7 +906,6 @@ __i915_active_fence_set(struct i915_active_fence *active,
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
- GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
list_add_tail(&active->cb.node, &fence->cb_list);
spin_unlock_irqrestore(fence->lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 51e1e854ca55..b3282ae7913c 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -173,7 +173,8 @@ i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
}
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
static inline bool i915_active_has_exclusive(struct i915_active *ref)
{
@@ -182,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags);
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 66883af64ca1..20babbdb297d 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -312,7 +312,8 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
return block;
out_free:
- __i915_buddy_free(mm, block);
+ if (i != order)
+ __i915_buddy_free(mm, block);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a0e437aa65b7..189b573d02be 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -803,10 +803,11 @@ static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous) {
- DRM_ERROR("CMD: %s [%d] command table not sorted: "
- "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
- engine->name, engine->id,
- i, j, curr, previous);
+ drm_err(&engine->i915->drm,
+ "CMD: %s [%d] command table not sorted: "
+ "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, j, curr, previous);
ret = false;
}
@@ -829,10 +830,11 @@ static bool check_sorted(const struct intel_engine_cs *engine,
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
if (curr < previous) {
- DRM_ERROR("CMD: %s [%d] register table not sorted: "
- "entry=%d reg=0x%08X prev=0x%08X\n",
- engine->name, engine->id,
- i, curr, previous);
+ drm_err(&engine->i915->drm,
+ "CMD: %s [%d] register table not sorted: "
+ "entry=%d reg=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, curr, previous);
ret = false;
}
@@ -1010,18 +1012,21 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
}
if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
- DRM_ERROR("%s: command descriptions are not sorted\n",
- engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: command descriptions are not sorted\n",
+ engine->name);
return;
}
if (!validate_regs_sorted(engine)) {
- DRM_ERROR("%s: registers are not sorted\n", engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: registers are not sorted\n", engine->name);
return;
}
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
if (ret) {
- DRM_ERROR("%s: initialised failed!\n", engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: initialised failed!\n", engine->name);
fini_hash_table(engine);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d5a9b8a964c2..6ca797128aa1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,14 +30,6 @@
#include <linux/sort.h>
#include <drm/drm_debugfs.h>
-#include <drm/drm_fourcc.h>
-
-#include "display/intel_display_types.h"
-#include "display/intel_dp.h"
-#include "display/intel_fbc.h"
-#include "display/intel_hdcp.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_psr.h"
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_pm.h"
@@ -48,9 +40,9 @@
#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
+#include "i915_debugfs_params.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "intel_csr.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -127,8 +119,8 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
}
}
-static void
-describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+void
+i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
@@ -673,7 +665,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
if (!vma)
seq_puts(m, "unused");
else
- describe_obj(m, vma->obj);
+ i915_debugfs_describe_obj(m, vma->obj);
seq_putc(m, '\n');
}
rcu_read_unlock();
@@ -1004,367 +996,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static int ilk_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- u32 rgvmodectl, rstdbyctl;
- u16 crstandvid;
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
- rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
- crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
-
- seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
- seq_printf(m, "Boost freq: %d\n",
- (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
- MEMMODE_BOOST_FREQ_SHIFT);
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
- seq_printf(m, "SW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_SWMODE_EN));
- seq_printf(m, "Gated voltage change: %s\n",
- yesno(rgvmodectl & MEMMODE_RCLK_GATE));
- seq_printf(m, "Starting frequency: P%d\n",
- (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- seq_printf(m, "Max P-state: P%d\n",
- (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
- seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
- seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
- seq_printf(m, "Render standby enabled: %s\n",
- yesno(!(rstdbyctl & RCX_SW_EXIT)));
- seq_puts(m, "Current RS state: ");
- switch (rstdbyctl & RSX_STATUS_MASK) {
- case RSX_STATUS_ON:
- seq_puts(m, "on\n");
- break;
- case RSX_STATUS_RC1:
- seq_puts(m, "RC1\n");
- break;
- case RSX_STATUS_RC1E:
- seq_puts(m, "RC1E\n");
- break;
- case RSX_STATUS_RS1:
- seq_puts(m, "RS1\n");
- break;
- case RSX_STATUS_RS2:
- seq_puts(m, "RS2 (RC6)\n");
- break;
- case RSX_STATUS_RS3:
- seq_puts(m, "RC3 (RC6+)\n");
- break;
- default:
- seq_puts(m, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int i915_forcewake_domains(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_uncore_forcewake_domain *fw_domain;
- unsigned int tmp;
-
- seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake_count);
-
- for_each_fw_domain(fw_domain, uncore, tmp)
- seq_printf(m, "%s.wake_count = %u\n",
- intel_uncore_forcewake_domain_to_str(fw_domain->id),
- READ_ONCE(fw_domain->wake_count));
-
- return 0;
-}
-
-static void print_rc6_res(struct seq_file *m,
- const char *title,
- const i915_reg_t reg)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- seq_printf(m, "%s %u (%llu us)\n", title,
- intel_uncore_read(&i915->uncore, reg),
- intel_rc6_residency_us(&i915->gt.rc6, reg));
-}
-
-static int vlv_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rcctl1, pw_status;
-
- pw_status = I915_READ(VLV_GTLC_PW_STATUS);
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
-
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_EI_MODE(1))));
- seq_printf(m, "Render Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
-
- print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
- print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int gen6_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 gt_core_status, rcctl1, rc6vids = 0;
- u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
-
- gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
-
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
- if (INTEL_GEN(dev_priv) >= 9) {
- gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
- gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
- }
-
- if (INTEL_GEN(dev_priv) <= 7)
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
-
- seq_printf(m, "RC1e Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
- seq_printf(m, "Media Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
- }
- seq_printf(m, "Deep RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
- seq_printf(m, "Deepest RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_puts(m, "Current RC state: ");
- switch (gt_core_status & GEN6_RCn_MASK) {
- case GEN6_RC0:
- if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_puts(m, "Core Power Down\n");
- else
- seq_puts(m, "on\n");
- break;
- case GEN6_RC3:
- seq_puts(m, "RC3\n");
- break;
- case GEN6_RC6:
- seq_puts(m, "RC6\n");
- break;
- case GEN6_RC7:
- seq_puts(m, "RC7\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
-
- seq_printf(m, "Core Power Down: %s\n",
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
- }
-
- /* Not exactly sure what this is */
- print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
- GEN6_GT_GFX_RC6_LOCKED);
- print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
- print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
- print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
-
- if (INTEL_GEN(dev_priv) <= 7) {
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
- }
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int i915_drpc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- int err = -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = vlv_drpc_info(m);
- else if (INTEL_GEN(dev_priv) >= 6)
- err = gen6_drpc_info(m);
- else
- err = ilk_drpc_info(m);
- }
-
- return err;
-}
-
-static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
-
- seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->fb_tracking.busy_bits);
-
- seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->fb_tracking.flip_bits);
-
- return 0;
-}
-
-static int i915_fbc_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_fbc *fbc = &dev_priv->fbc;
- intel_wakeref_t wakeref;
-
- if (!HAS_FBC(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&fbc->lock);
-
- if (intel_fbc_is_active(dev_priv))
- seq_puts(m, "FBC enabled\n");
- else
- seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
-
- if (intel_fbc_is_active(dev_priv)) {
- u32 mask;
-
- if (INTEL_GEN(dev_priv) >= 8)
- mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 7)
- mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 5)
- mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
- else if (IS_G4X(dev_priv))
- mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
- else
- mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
- FBC_STAT_COMPRESSED);
-
- seq_printf(m, "Compressing: %s\n", yesno(mask));
- }
-
- mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_fbc_false_color_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- *val = dev_priv->fbc.false_color;
-
- return 0;
-}
-
-static int i915_fbc_false_color_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- u32 reg;
-
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- mutex_lock(&dev_priv->fbc.lock);
-
- reg = I915_READ(ILK_DPFC_CONTROL);
- dev_priv->fbc.false_color = val;
-
- I915_WRITE(ILK_DPFC_CONTROL, val ?
- (reg | FBC_CTL_FALSE_COLOR) :
- (reg & ~FBC_CTL_FALSE_COLOR));
-
- mutex_unlock(&dev_priv->fbc.lock);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
- i915_fbc_false_color_get, i915_fbc_false_color_set,
- "%llu\n");
-
-static int i915_ips_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!HAS_IPS(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915_modparams.enable_ips));
-
- if (INTEL_GEN(dev_priv) >= 8) {
- seq_puts(m, "Currently: unknown\n");
- } else {
- if (I915_READ(IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "Currently: enabled\n");
- else
- seq_puts(m, "Currently: disabled\n");
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_sr_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- bool sr_enabled = false;
-
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
- if (INTEL_GEN(dev_priv) >= 9)
- /* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
- sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- else if (IS_I915GM(dev_priv))
- sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
- else if (IS_PINEVIEW(dev_priv))
- sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
-
- return 0;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1406,70 +1037,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
-static int i915_opregion(struct seq_file *m, void *unused)
-{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
-
- return 0;
-}
-
-static int i915_vbt(struct seq_file *m, void *unused)
-{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
- if (opregion->vbt)
- seq_write(m, opregion->vbt, opregion->vbt_size);
-
- return 0;
-}
-
-static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_framebuffer *fbdev_fb = NULL;
- struct drm_framebuffer *drm_fb;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
- fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
-
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fbdev_fb->base.width,
- fbdev_fb->base.height,
- fbdev_fb->base.format->depth,
- fbdev_fb->base.format->cpp[0] * 8,
- fbdev_fb->base.modifier,
- drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, intel_fb_obj(&fbdev_fb->base));
- seq_putc(m, '\n');
- }
-#endif
-
- mutex_lock(&dev->mode_config.fb_lock);
- drm_for_each_fb(drm_fb, dev) {
- struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
- if (fb == fbdev_fb)
- continue;
-
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fb->base.width,
- fb->base.height,
- fb->base.format->depth,
- fb->base.format->cpp[0] * 8,
- fb->base.modifier,
- drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, intel_fb_obj(&fb->base));
- seq_putc(m, '\n');
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
@@ -1515,7 +1082,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (intel_context_pin_if_active(ce)) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
- describe_obj(m, ce->state->obj);
+ i915_debugfs_describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
intel_context_unpin(ce);
@@ -1752,10 +1319,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
return "";
}
-static void i915_guc_log_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
+static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
{
- struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
if (!intel_guc_log_relay_created(log)) {
@@ -1779,11 +1344,12 @@ static void i915_guc_log_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- i915_guc_log_info(m, dev_priv);
+ i915_guc_log_info(m, &uc->guc.log);
/* Add more as required ... */
@@ -1793,11 +1359,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->gt.uc.guc;
- struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+ struct intel_uc *uc = &dev_priv->gt.uc;
+ struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
int index;
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!intel_uc_uses_guc_submission(uc))
return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
@@ -1884,11 +1450,12 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
static int i915_guc_log_level_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
+ *val = intel_guc_log_get_level(&uc->guc.log);
return 0;
}
@@ -1896,11 +1463,12 @@ static int i915_guc_log_level_get(void *data, u64 *val)
static int i915_guc_log_level_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
+ return intel_guc_log_set_level(&uc->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -1913,7 +1481,7 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
struct intel_guc *guc = &i915->gt.uc.guc;
struct intel_guc_log *log = &guc->log;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return -ENODEV;
file->private_data = log;
@@ -1963,253 +1531,6 @@ static const struct file_operations i915_guc_log_relay_fops = {
.release = i915_guc_log_relay_release,
};
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
-{
- u8 val;
- static const char * const sink_status[] = {
- "inactive",
- "transition to active, capture and display",
- "active, display from RFB",
- "active, capture and display on sink device timings",
- "transition to inactive, capture and display, timing re-sync",
- "reserved",
- "reserved",
- "sink internal error",
- };
- struct drm_connector *connector = m->private;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- int ret;
-
- if (!CAN_PSR(dev_priv)) {
- seq_puts(m, "PSR Unsupported\n");
- return -ENODEV;
- }
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
-
- if (ret == 1) {
- const char *str = "unknown";
-
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- str = sink_status[val];
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
- } else {
- return ret;
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
-
-static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
-{
- u32 val, status_val;
- const char *status = "unknown";
-
- if (dev_priv->psr.psr2_enabled) {
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
- val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
- status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
- EDP_PSR2_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- } else {
- static const char * const live_status[] = {
- "IDLE",
- "SRDONACK",
- "SRDENT",
- "BUFOFF",
- "BUFON",
- "AUXACK",
- "SRDOFFACK",
- "SRDENT_ON",
- };
- val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
- status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- }
-
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
-}
-
-static int i915_edp_psr_status(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_psr *psr = &dev_priv->psr;
- intel_wakeref_t wakeref;
- const char *status;
- bool enabled;
- u32 val;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
- if (psr->dp)
- seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
- seq_puts(m, "\n");
-
- if (!psr->sink_support)
- return 0;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&psr->lock);
-
- if (psr->enabled)
- status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
- else
- status = "disabled";
- seq_printf(m, "PSR mode: %s\n", status);
-
- if (!psr->enabled) {
- seq_printf(m, "PSR sink not reliable: %s\n",
- yesno(psr->sink_not_reliable));
-
- goto unlock;
- }
-
- if (psr->psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- enabled = val & EDP_PSR2_ENABLE;
- } else {
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- enabled = val & EDP_PSR_ENABLE;
- }
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
- enableddisabled(enabled), val);
- psr_source_status(dev_priv, m);
- seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
- psr->busy_frontbuffer_bits);
-
- /*
- * SKL+ Perf counter is reset to 0 everytime DC state is entered
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
- val &= EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance counter: %u\n", val);
- }
-
- if (psr->debug & I915_PSR_DEBUG_IRQ) {
- seq_printf(m, "Last attempted entry at: %lld\n",
- psr->last_entry_attempt);
- seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
- }
-
- if (psr->psr2_enabled) {
- u32 su_frames_val[3];
- int frame;
-
- /*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
- */
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
- frame));
- su_frames_val[frame / 3] = val;
- }
-
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
-
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
-
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
- }
- }
-
-unlock:
- mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int
-i915_edp_psr_debug_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
- int ret;
-
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
-
- DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- ret = intel_psr_debug_set(dev_priv, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return ret;
-}
-
-static int
-i915_edp_psr_debug_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
-
- *val = READ_ONCE(dev_priv->psr.debug);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
- i915_edp_psr_debug_get, i915_edp_psr_debug_set,
- "%llu\n");
-
-static int i915_energy_uJ(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- unsigned long long power;
- intel_wakeref_t wakeref;
- u32 units;
-
- if (INTEL_GEN(dev_priv) < 6)
- return -ENODEV;
-
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
- return -ENODEV;
-
- units = (power & 0x1f00) >> 8;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- power = I915_READ(MCH_SECP_NRG_STTS);
-
- power = (1000000 * power) >> units; /* convert to uJ */
- seq_printf(m, "%llu", power);
-
- return 0;
-}
-
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2243,452 +1564,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_power_domain_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int i;
-
- mutex_lock(&power_domains->lock);
-
- seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
- for (i = 0; i < power_domains->power_well_count; i++) {
- struct i915_power_well *power_well;
- enum intel_display_power_domain power_domain;
-
- power_well = &power_domains->power_wells[i];
- seq_printf(m, "%-25s %d\n", power_well->desc->name,
- power_well->count);
-
- for_each_power_domain(power_domain, power_well->desc->domains)
- seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(power_domain),
- power_domains->domain_use_count[power_domain]);
- }
-
- mutex_unlock(&power_domains->lock);
-
- return 0;
-}
-
-static int i915_dmc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct intel_csr *csr;
- i915_reg_t dc5_reg, dc6_reg = {};
-
- if (!HAS_CSR(dev_priv))
- return -ENODEV;
-
- csr = &dev_priv->csr;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
- seq_printf(m, "path: %s\n", csr->fw_path);
-
- if (!csr->dmc_payload)
- goto out;
-
- seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
-
- if (INTEL_GEN(dev_priv) >= 12) {
- dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
- dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
- /*
- * NOTE: DMC_DEBUG3 is a general purpose reg.
- * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
- * reg for DC3CO debugging and validation,
- * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
- */
- seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
- } else {
- dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT;
- if (!IS_GEN9_LP(dev_priv))
- dc6_reg = SKL_CSR_DC5_DC6_COUNT;
- }
-
- seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
- if (dc6_reg.reg)
- seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
-
-out:
- seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
- seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
- seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static void intel_seq_print_mode(struct seq_file *m, int tabs,
- const struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < tabs; i++)
- seq_putc(m, '\t');
-
- seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_encoder_info(struct seq_file *m,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_connector_list_iter conn_iter;
- struct drm_connector *connector;
-
- seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
- encoder->base.base.id, encoder->base.name);
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- const struct drm_connector_state *conn_state =
- connector->state;
-
- if (conn_state->best_encoder != &encoder->base)
- continue;
-
- seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- }
- drm_connector_list_iter_end(&conn_iter);
-}
-
-static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
-{
- const struct drm_display_mode *mode = panel->fixed_mode;
-
- seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_hdcp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- bool hdcp_cap, hdcp2_cap;
-
- hdcp_cap = intel_hdcp_capable(intel_connector);
- hdcp2_cap = intel_hdcp2_capable(intel_connector);
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
-
- seq_puts(m, "\n");
-}
-
-static void intel_dp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
-
- seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
- seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
- if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
- intel_panel_info(m, &intel_connector->panel);
-
- drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
- &intel_dp->aux);
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
-}
-
-static void intel_dp_mst_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(intel_encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- intel_connector->port);
-
- seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
-static void intel_hdmi_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
-
- seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
-}
-
-static void intel_lvds_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- intel_panel_info(m, &intel_connector->panel);
-}
-
-static void intel_connector_info(struct seq_file *m,
- struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_connector_state *conn_state = connector->state;
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- const struct drm_display_mode *mode;
-
- seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
- connector->base.id, connector->name,
- drm_get_connector_status_name(connector->status));
-
- if (connector->status == connector_status_disconnected)
- return;
-
- seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
- connector->display_info.width_mm,
- connector->display_info.height_mm);
- seq_printf(m, "\tsubpixel order: %s\n",
- drm_get_subpixel_order_name(connector->display_info.subpixel_order));
- seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
-
- if (!encoder)
- return;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (encoder->type == INTEL_OUTPUT_DP_MST)
- intel_dp_mst_info(m, intel_connector);
- else
- intel_dp_info(m, intel_connector);
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- if (encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
- break;
- case DRM_MODE_CONNECTOR_HDMIA:
- if (encoder->type == INTEL_OUTPUT_HDMI ||
- encoder->type == INTEL_OUTPUT_DDI)
- intel_hdmi_info(m, intel_connector);
- break;
- default:
- break;
- }
-
- seq_printf(m, "\tmodes:\n");
- list_for_each_entry(mode, &connector->modes, head)
- intel_seq_print_mode(m, 2, mode);
-}
-
-static const char *plane_type(enum drm_plane_type type)
-{
- switch (type) {
- case DRM_PLANE_TYPE_OVERLAY:
- return "OVL";
- case DRM_PLANE_TYPE_PRIMARY:
- return "PRI";
- case DRM_PLANE_TYPE_CURSOR:
- return "CUR";
- /*
- * Deliberately omitting default: to generate compiler warnings
- * when a new drm_plane_type gets added.
- */
- }
-
- return "unknown";
-}
-
-static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
-{
- /*
- * According to doc only one DRM_MODE_ROTATE_ is allowed but this
- * will print them all to visualize if the values are misused
- */
- snprintf(buf, bufsize,
- "%s%s%s%s%s%s(0x%08x)",
- (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
- (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
- (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
- (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
- (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
- (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
- rotation);
-}
-
-static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
-{
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- const struct drm_framebuffer *fb = plane_state->uapi.fb;
- struct drm_format_name_buf format_name;
- struct drm_rect src, dst;
- char rot_str[48];
-
- src = drm_plane_state_src(&plane_state->uapi);
- dst = drm_plane_state_dest(&plane_state->uapi);
-
- if (fb)
- drm_get_format_name(fb->format->format, &format_name);
-
- plane_rotation(rot_str, sizeof(rot_str),
- plane_state->uapi.rotation);
-
- seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
- fb ? fb->width : 0, fb ? fb->height : 0,
- DRM_RECT_FP_ARG(&src),
- DRM_RECT_ARG(&dst),
- rot_str);
-}
-
-static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
-{
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_format_name_buf format_name;
- char rot_str[48];
-
- if (!fb)
- return;
-
- drm_get_format_name(fb->format->format, &format_name);
-
- plane_rotation(rot_str, sizeof(rot_str),
- plane_state->hw.rotation);
-
- seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb->base.id, format_name.str,
- fb->width, fb->height,
- yesno(plane_state->uapi.visible),
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst),
- rot_str);
-}
-
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
- plane->base.base.id, plane->base.name,
- plane_type(plane->base.type));
- intel_plane_uapi_info(m, plane);
- intel_plane_hw_info(m, plane);
- }
-}
-
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int num_scalers = crtc->num_scalers;
- int i;
-
- /* Not all platformas have a scaler */
- if (num_scalers) {
- seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
- num_scalers,
- crtc_state->scaler_state.scaler_users,
- crtc_state->scaler_state.scaler_id);
-
- for (i = 0; i < num_scalers; i++) {
- const struct intel_scaler *sc =
- &crtc_state->scaler_state.scalers[i];
-
- seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
- i, yesno(sc->in_use), sc->mode);
- }
- seq_puts(m, "\n");
- } else {
- seq_puts(m, "\tNo scalers available on this platform\n");
- }
-}
-
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_encoder *encoder;
-
- seq_printf(m, "[CRTC:%d:%s]:\n",
- crtc->base.base.id, crtc->base.name);
-
- seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->uapi.enable),
- yesno(crtc_state->uapi.active),
- DRM_MODE_ARG(&crtc_state->uapi.mode));
-
- if (crtc_state->hw.enable) {
- seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->hw.active),
- DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
-
- seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
- yesno(crtc_state->dither), crtc_state->pipe_bpp);
-
- intel_scaler_info(m, crtc);
- }
-
- for_each_intel_encoder_mask(&dev_priv->drm, encoder,
- crtc_state->uapi.encoder_mask)
- intel_encoder_info(m, crtc, encoder);
-
- intel_plane_info(m, crtc);
-
- seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
- yesno(!crtc->cpu_fifo_underrun_disabled),
- yesno(!crtc->pch_fifo_underrun_disabled));
-}
-
-static int i915_display_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- drm_modeset_lock_all(dev);
-
- seq_printf(m, "CRTC info\n");
- seq_printf(m, "---------\n");
- for_each_intel_crtc(dev, crtc)
- intel_crtc_info(m, crtc);
-
- seq_printf(m, "\n");
- seq_printf(m, "Connector info\n");
- seq_printf(m, "--------------\n");
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter)
- intel_connector_info(m, connector);
- drm_connector_list_iter_end(&conn_iter);
-
- drm_modeset_unlock_all(dev);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2733,55 +1608,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_shared_dplls_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int i;
-
- drm_modeset_lock_all(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
- pll->info->id);
- seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
- pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
- seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n",
- pll->state.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
- seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
- seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
- seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
- pll->state.hw_state.mg_refclkin_ctl);
- seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_coreclkctl1);
- seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_hsclkctl);
- seq_printf(m, " mg_pll_div0: 0x%08x\n",
- pll->state.hw_state.mg_pll_div0);
- seq_printf(m, " mg_pll_div1: 0x%08x\n",
- pll->state.hw_state.mg_pll_div1);
- seq_printf(m, " mg_pll_lf: 0x%08x\n",
- pll->state.hw_state.mg_pll_lf);
- seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
- pll->state.hw_state.mg_pll_frac_lock);
- seq_printf(m, " mg_pll_ssc: 0x%08x\n",
- pll->state.hw_state.mg_pll_ssc);
- seq_printf(m, " mg_pll_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_bias);
- seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_tdc_coldst_bias);
- }
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -2802,7 +1628,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
for (wa = wal->list; count--; wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
i915_mmio_reg_offset(wa->reg),
- wa->val, wa->mask);
+ wa->set, wa->clr);
seq_printf(m, "\n");
}
@@ -2810,646 +1636,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ipc_status_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Isochronous Priority Control: %s\n",
- yesno(dev_priv->ipc_enabled));
- return 0;
-}
-
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (!HAS_IPC(dev_priv))
- return -ENODEV;
-
- return single_open(file, i915_ipc_status_show, dev_priv);
-}
-
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- intel_wakeref_t wakeref;
- bool enable;
- int ret;
-
- ret = kstrtobool_from_user(ubuf, len, &enable);
- if (ret < 0)
- return ret;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (!dev_priv->ipc_enabled && enable)
- DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- }
-
- return len;
-}
-
-static const struct file_operations i915_ipc_status_fops = {
- .owner = THIS_MODULE,
- .open = i915_ipc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_ipc_status_write
-};
-
-static int i915_ddb_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_entry *entry;
- struct intel_crtc *crtc;
-
- if (INTEL_GEN(dev_priv) < 9)
- return -ENODEV;
-
- drm_modeset_lock_all(dev);
-
- seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- seq_printf(m, "Pipe %c\n", pipe_name(pipe));
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
- entry->start, entry->end,
- skl_ddb_entry_size(entry));
- }
-
- entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
- seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
- entry->end, skl_ddb_entry_size(entry));
- }
-
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
-static void drrs_status_per_crtc(struct seq_file *m,
- struct drm_device *dev,
- struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_drrs *drrs = &dev_priv->drrs;
- int vrefresh = 0;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->state->crtc != &intel_crtc->base)
- continue;
-
- seq_printf(m, "%s:\n", connector->name);
- }
- drm_connector_list_iter_end(&conn_iter);
-
- if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
- seq_puts(m, "\tVBT: DRRS_type: Static");
- else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
- seq_puts(m, "\tVBT: DRRS_type: Seamless");
- else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
- seq_puts(m, "\tVBT: DRRS_type: None");
- else
- seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
-
- seq_puts(m, "\n\n");
-
- if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
- struct intel_panel *panel;
-
- mutex_lock(&drrs->mutex);
- /* DRRS Supported */
- seq_puts(m, "\tDRRS Supported: Yes\n");
-
- /* disable_drrs() will make drrs->dp NULL */
- if (!drrs->dp) {
- seq_puts(m, "Idleness DRRS: Disabled\n");
- if (dev_priv->psr.enabled)
- seq_puts(m,
- "\tAs PSR is enabled, DRRS is not enabled\n");
- mutex_unlock(&drrs->mutex);
- return;
- }
-
- panel = &drrs->dp->attached_connector->panel;
- seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
- drrs->busy_frontbuffer_bits);
-
- seq_puts(m, "\n\t\t");
- if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
- seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
- vrefresh = panel->fixed_mode->vrefresh;
- } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
- seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
- vrefresh = panel->downclock_mode->vrefresh;
- } else {
- seq_printf(m, "DRRS_State: Unknown(%d)\n",
- drrs->refresh_rate_type);
- mutex_unlock(&drrs->mutex);
- return;
- }
- seq_printf(m, "\t\tVrefresh: %d", vrefresh);
-
- seq_puts(m, "\n\t\t");
- mutex_unlock(&drrs->mutex);
- } else {
- /* DRRS not supported. Print the VBT parameter*/
- seq_puts(m, "\tDRRS Supported : No");
- }
- seq_puts(m, "\n");
-}
-
-static int i915_drrs_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *intel_crtc;
- int active_crtc_cnt = 0;
-
- drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.state->active) {
- active_crtc_cnt++;
- seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
-
- drrs_status_per_crtc(m, dev, intel_crtc);
- }
- }
- drm_modeset_unlock_all(dev);
-
- if (!active_crtc_cnt)
- seq_puts(m, "No active crtc found\n");
-
- return 0;
-}
-
-static int i915_dp_mst_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- intel_encoder = intel_attached_encoder(to_intel_connector(connector));
- if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- intel_dig_port = enc_to_dig_port(intel_encoder);
- if (!intel_dig_port->dp.can_mst)
- continue;
-
- seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-
-static ssize_t i915_displayport_test_active_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- char *input_buffer;
- int status = 0;
- struct drm_device *dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
- int val = 0;
-
- dev = ((struct seq_file *)file->private_data)->private;
-
- if (len == 0)
- return 0;
-
- input_buffer = memdup_user_nul(ubuf, len);
- if (IS_ERR(input_buffer))
- return PTR_ERR(input_buffer);
-
- DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- status = kstrtoint(input_buffer, 10, &val);
- if (status < 0)
- break;
- DRM_DEBUG_DRIVER("Got %d for test active\n", val);
- /* To prevent erroneous activation of the compliance
- * testing code, only accept an actual value of 1 here
- */
- if (val == 1)
- intel_dp->compliance.test_active = true;
- else
- intel_dp->compliance.test_active = false;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- kfree(input_buffer);
- if (status < 0)
- return status;
-
- *offp += len;
- return len;
-}
-
-static int i915_displayport_test_active_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_active)
- seq_puts(m, "1");
- else
- seq_puts(m, "0");
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-
-static int i915_displayport_test_active_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, i915_displayport_test_active_show,
- inode->i_private);
-}
-
-static const struct file_operations i915_displayport_test_active_fops = {
- .owner = THIS_MODULE,
- .open = i915_displayport_test_active_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_displayport_test_active_write
-};
-
-static int i915_displayport_test_data_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_EDID_READ)
- seq_printf(m, "%lx",
- intel_dp->compliance.test_data.edid);
- else if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_VIDEO_PATTERN) {
- seq_printf(m, "hdisplay: %d\n",
- intel_dp->compliance.test_data.hdisplay);
- seq_printf(m, "vdisplay: %d\n",
- intel_dp->compliance.test_data.vdisplay);
- seq_printf(m, "bpc: %u\n",
- intel_dp->compliance.test_data.bpc);
- }
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
-
-static int i915_displayport_test_type_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- seq_printf(m, "%02lx", intel_dp->compliance.test_type);
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- int level;
- int num_levels;
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- drm_modeset_lock_all(dev);
-
- for (level = 0; level < num_levels; level++) {
- unsigned int latency = wm[level];
-
- /*
- * - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9/vlv/chv
- */
- if (INTEL_GEN(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level], latency / 10, latency % 10);
- }
-
- drm_modeset_unlock_all(dev);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.pri_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.spr_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.cur_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- return -ENODEV;
-
- return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, u16 wm[8])
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- u16 new[8] = { 0 };
- int num_levels;
- int level;
- int ret;
- char tmp[32];
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
- &new[0], &new[1], &new[2], &new[3],
- &new[4], &new[5], &new[6], &new[7]);
- if (ret != num_levels)
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
-
- for (level = 0; level < num_levels; level++)
- wm[level] = new[level];
-
- drm_modeset_unlock_all(dev);
-
- return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.pri_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.spr_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.cur_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = pri_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = spr_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = cur_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cur_wm_latency_write
-};
-
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -3641,7 +1827,8 @@ i915_cache_sharing_set(void *data, u64 val)
if (val > 3)
return -EINVAL;
- DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Manually setting uncore sharing to %llu\n", val);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 snpcr;
@@ -3947,292 +2134,6 @@ static const struct file_operations i915_forcewake_fops = {
.release = i915_forcewake_release,
};
-static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
-
- /* Synchronize with everything first in case there's been an HPD
- * storm, but we haven't finished handling it in the kernel yet
- */
- intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->hotplug.hotplug_work);
-
- seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
- seq_printf(m, "Detected: %s\n",
- yesno(delayed_work_pending(&hotplug->reenable_work)));
-
- return 0;
-}
-
-static ssize_t i915_hpd_storm_ctl_write(struct file *file,
- const char __user *ubuf, size_t len,
- loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
- unsigned int new_threshold;
- int i;
- char *newline;
- char tmp[16];
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- if (strcmp(tmp, "reset") == 0)
- new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
- else if (kstrtouint(tmp, 10, &new_threshold) != 0)
- return -EINVAL;
-
- if (new_threshold > 0)
- DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
- new_threshold);
- else
- DRM_DEBUG_KMS("Disabling HPD storm detection\n");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_storm_threshold = new_threshold;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
- return len;
-}
-
-static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
-}
-
-static const struct file_operations i915_hpd_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_storm_ctl_write
-};
-
-static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Enabled: %s\n",
- yesno(dev_priv->hotplug.hpd_short_storm_enabled));
-
- return 0;
-}
-
-static int
-i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_short_storm_ctl_show,
- inode->i_private);
-}
-
-static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
- char *newline;
- char tmp[16];
- int i;
- bool new_state;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- /* Reset to the "default" state for this system */
- if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
- else if (kstrtobool(tmp, &new_state) != 0)
- return -EINVAL;
-
- DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
- new_state ? "En" : "Dis");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_short_storm_enabled = new_state;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
- return len;
-}
-
-static const struct file_operations i915_hpd_short_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_short_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_short_storm_ctl_write,
-};
-
-static int i915_drrs_ctl_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- if (INTEL_GEN(dev_priv) < 7)
- return -ENODEV;
-
- for_each_intel_crtc(dev, crtc) {
- struct drm_connector_list_iter conn_iter;
- struct intel_crtc_state *crtc_state;
- struct drm_connector *connector;
- struct drm_crtc_commit *commit;
- int ret;
-
- ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- if (!crtc_state->hw.active ||
- !crtc_state->has_drrs)
- goto out;
-
- commit = crtc_state->uapi.commit;
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- goto out;
- }
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp;
-
- if (!(crtc_state->uapi.connector_mask &
- drm_connector_mask(connector)))
- continue;
-
- encoder = intel_attached_encoder(to_intel_connector(connector));
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
- val ? "en" : "dis", val);
-
- intel_dp = enc_to_intel_dp(encoder);
- if (val)
- intel_edp_drrs_enable(intel_dp,
- crtc_state);
- else
- intel_edp_drrs_disable(intel_dp,
- crtc_state);
- }
- drm_connector_list_iter_end(&conn_iter);
-
-out:
- drm_modeset_unlock(&crtc->base.mutex);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
-
-static ssize_t
-i915_fifo_underrun_reset_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct drm_i915_private *dev_priv = filp->private_data;
- struct intel_crtc *intel_crtc;
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- bool reset;
-
- ret = kstrtobool_from_user(ubuf, cnt, &reset);
- if (ret)
- return ret;
-
- if (!reset)
- return cnt;
-
- for_each_intel_crtc(dev, intel_crtc) {
- struct drm_crtc_commit *commit;
- struct intel_crtc_state *crtc_state;
-
- ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(intel_crtc->base.state);
- commit = crtc_state->uapi.commit;
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (!ret)
- ret = wait_for_completion_interruptible(&commit->flip_done);
- }
-
- if (!ret && crtc_state->hw.active) {
- DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
- pipe_name(intel_crtc->pipe));
-
- intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
- }
-
- drm_modeset_unlock(&intel_crtc->base.mutex);
-
- if (ret)
- return ret;
- }
-
- ret = intel_fbc_reset_underrun(dev_priv);
- if (ret)
- return ret;
-
- return cnt;
-}
-
-static const struct file_operations i915_fifo_underrun_reset_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = i915_fifo_underrun_reset_write,
- .llseek = default_llseek,
-};
-
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -4245,34 +2146,16 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
- {"i915_fbc_status", i915_fbc_status, 0},
- {"i915_ips_status", i915_ips_status, 0},
- {"i915_sr_status", i915_sr_status, 0},
- {"i915_opregion", i915_opregion, 0},
- {"i915_vbt", i915_vbt, 0},
- {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_llc", i915_llc, 0},
- {"i915_edp_psr_status", i915_edp_psr_status, 0},
- {"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
- {"i915_power_domain_info", i915_power_domain_info, 0},
- {"i915_dmc_info", i915_dmc_info, 0},
- {"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
{"i915_rcs_topology", i915_rcs_topology, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
- {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
- {"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
- {"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
- {"i915_drrs_status", i915_drrs_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -4289,21 +2172,8 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
- {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
- {"i915_fbc_false_color", &i915_fbc_false_color_fops},
- {"i915_dp_test_data", &i915_displayport_test_data_fops},
- {"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_level", &i915_guc_log_level_fops},
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
- {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
- {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
- {"i915_ipc_status", &i915_ipc_status_fops},
- {"i915_drrs_ctl", &i915_drrs_ctl_fops},
- {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -4311,9 +2181,10 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
struct drm_minor *minor = dev_priv->drm.primary;
int i;
+ i915_debugfs_params(dev_priv);
+
debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
to_i915(minor->dev), &i915_forcewake_fops);
-
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
debugfs_create_file(i915_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
@@ -4326,254 +2197,3 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
}
-
-struct dpcd_block {
- /* DPCD dump start address. */
- unsigned int offset;
- /* DPCD dump end address, inclusive. If unset, .size will be used. */
- unsigned int end;
- /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
- size_t size;
- /* Only valid for eDP. */
- bool edp;
-};
-
-static const struct dpcd_block i915_dpcd_debug[] = {
- { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
- { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
- { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
- { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
- { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
- { .offset = DP_SET_POWER },
- { .offset = DP_EDP_DPCD_REV },
- { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
- { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
- { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
-};
-
-static int i915_dpcd_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- u8 buf[16];
- ssize_t err;
- int i;
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
- const struct dpcd_block *b = &i915_dpcd_debug[i];
- size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
-
- if (b->edp &&
- connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
- /* low tech for now */
- if (WARN_ON(size > sizeof(buf)))
- continue;
-
- err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
- if (err < 0)
- seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
- else
- seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
-
-static int i915_panel_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->panel_power_up_delay);
- seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->panel_power_down_delay);
- seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->backlight_on_delay);
- seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->backlight_off_delay);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
-
-static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- /* HDCP is supported by connector */
- if (!intel_connector->hdcp.shim)
- return -EINVAL;
-
- seq_printf(m, "%s:%d HDCP version: ", connector->name,
- connector->base.id);
- intel_hdcp_info(m, intel_connector);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-
-static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
- struct drm_crtc *crtc;
- struct intel_dp *intel_dp;
- struct drm_modeset_acquire_ctx ctx;
- struct intel_crtc_state *crtc_state = NULL;
- int ret = 0;
- bool try_again = false;
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- do {
- try_again = false;
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- &ctx);
- if (ret) {
- if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
- try_again = true;
- continue;
- }
- break;
- }
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
- ret = -ENODEV;
- break;
- }
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret) {
- try_again = true;
- continue;
- }
- break;
- } else if (ret) {
- break;
- }
- intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- crtc_state = to_intel_crtc_state(crtc->state);
- seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc.compression_enable));
- seq_printf(m, "DSC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
- seq_printf(m, "Force_DSC_Enable: %s\n",
- yesno(intel_dp->force_dsc_en));
- if (!intel_dp_is_edp(intel_dp))
- seq_printf(m, "FEC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
- } while (try_again);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
-static ssize_t i915_dsc_fec_support_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- bool dsc_enable = false;
- int ret;
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (len == 0)
- return 0;
-
- DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
- len);
-
- ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
- if (ret < 0)
- return ret;
-
- DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
- (dsc_enable) ? "true" : "false");
- intel_dp->force_dsc_en = dsc_enable;
-
- *offp += len;
- return len;
-}
-
-static int i915_dsc_fec_support_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, i915_dsc_fec_support_show,
- inode->i_private);
-}
-
-static const struct file_operations i915_dsc_fec_support_fops = {
- .owner = THIS_MODULE,
- .open = i915_dsc_fec_support_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_dsc_fec_support_write
-};
-
-/**
- * i915_debugfs_connector_add - add i915 specific connector debugfs files
- * @connector: pointer to a registered drm_connector
- *
- * Cleanup will be done by drm_connector_unregister() through a call to
- * drm_debugfs_connector_remove().
- *
- * Returns 0 on success, negative error codes on error.
- */
-int i915_debugfs_connector_add(struct drm_connector *connector)
-{
- struct dentry *root = connector->debugfs_entry;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- /* The connector must have been registered beforehands. */
- if (!root)
- return -ENODEV;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_dpcd", S_IRUGO, root,
- connector, &i915_dpcd_fops);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- debugfs_create_file("i915_panel_timings", S_IRUGO, root,
- connector, &i915_panel_fops);
- debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
- connector, &i915_psr_sink_status_fops);
- }
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
- connector, &i915_hdcp_sink_capability_fops);
- }
-
- if (INTEL_GEN(dev_priv) >= 10 &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP))
- debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
- connector, &i915_dsc_fec_support_fops);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index c0cd22eb916d..6da39c76ab5e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -6,15 +6,17 @@
#ifndef __I915_DEBUGFS_H__
#define __I915_DEBUGFS_H__
-struct drm_i915_private;
struct drm_connector;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct seq_file;
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
-int i915_debugfs_connector_add(struct drm_connector *connector);
+void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
#else
static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; }
+static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
#endif
#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
new file mode 100644
index 000000000000..62b2c5f0495d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include "i915_debugfs_params.h"
+#include "i915_drv.h"
+#include "i915_params.h"
+
+/* int param */
+static int i915_param_int_show(struct seq_file *m, void *data)
+{
+ int *value = m->private;
+
+ seq_printf(m, "%d\n", *value);
+
+ return 0;
+}
+
+static int i915_param_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_int_show, inode->i_private);
+}
+
+static ssize_t i915_param_int_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int *value = m->private;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations i915_param_int_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_int_open,
+ .read = seq_read,
+ .write = i915_param_int_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_int_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_int_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* unsigned int param */
+static int i915_param_uint_show(struct seq_file *m, void *data)
+{
+ unsigned int *value = m->private;
+
+ seq_printf(m, "%u\n", *value);
+
+ return 0;
+}
+
+static int i915_param_uint_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_uint_show, inode->i_private);
+}
+
+static ssize_t i915_param_uint_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ unsigned int *value = m->private;
+ int ret;
+
+ ret = kstrtouint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations i915_param_uint_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_uint_open,
+ .read = seq_read,
+ .write = i915_param_uint_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_uint_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_uint_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* char * param */
+static int i915_param_charp_show(struct seq_file *m, void *data)
+{
+ const char **s = m->private;
+
+ seq_printf(m, "%s\n", *s);
+
+ return 0;
+}
+
+static int i915_param_charp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_charp_show, inode->i_private);
+}
+
+static ssize_t i915_param_charp_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ char **s = m->private;
+ char *new, *old;
+
+ /* FIXME: remove locking after params aren't the module params */
+ kernel_param_lock(THIS_MODULE);
+
+ old = *s;
+ new = strndup_user(ubuf, PAGE_SIZE);
+ if (IS_ERR(new)) {
+ len = PTR_ERR(new);
+ goto out;
+ }
+
+ *s = new;
+
+ kfree(old);
+out:
+ kernel_param_unlock(THIS_MODULE);
+
+ return len;
+}
+
+static const struct file_operations i915_param_charp_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_charp_open,
+ .read = seq_read,
+ .write = i915_param_charp_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_charp_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_charp_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+#define RO(mode) (((mode) & 0222) == 0)
+
+static struct dentry *
+i915_debugfs_create_int(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &i915_param_int_fops_ro :
+ &i915_param_int_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_uint(const char *name, umode_t mode,
+ struct dentry *parent, unsigned int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &i915_param_uint_fops_ro :
+ &i915_param_uint_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_charp(const char *name, umode_t mode,
+ struct dentry *parent, char **value)
+{
+ return debugfs_create_file(name, mode, parent, value,
+ RO(mode) ? &i915_param_charp_fops_ro :
+ &i915_param_charp_fops);
+}
+
+static __always_inline void
+_i915_param_create_file(struct dentry *parent, const char *name,
+ const char *type, int mode, void *value)
+{
+ if (!mode)
+ return;
+
+ if (!__builtin_strcmp(type, "bool"))
+ debugfs_create_bool(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "int"))
+ i915_debugfs_create_int(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ i915_debugfs_create_uint(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "unsigned long"))
+ debugfs_create_ulong(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "char *"))
+ i915_debugfs_create_charp(name, mode, parent, value);
+ else
+ WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n",
+ type, name);
+}
+
+/* add a subdirectory with files for each i915 param */
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ struct i915_params *params = &i915_modparams;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+ if (IS_ERR(dir))
+ return dir;
+
+ /*
+ * Note: We could create files for params needing special handling
+ * here. Set mode in params to 0 to skip the generic create file, or
+ * just let the generic create file fail silently with -EEXIST.
+ */
+
+#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, &params->x);
+ I915_PARAMS_FOR_EACH(REGISTER);
+#undef REGISTER
+
+ return dir;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.h b/drivers/gpu/drm/i915/i915_debugfs_params.h
new file mode 100644
index 000000000000..66567076546b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_DEBUGFS_PARAMS__
+#define __I915_DEBUGFS_PARAMS__
+
+struct dentry;
+struct drm_i915_private;
+
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915);
+
+#endif /* __I915_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8410330ce4f0..81a4621853db 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,12 +44,13 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_csr.h"
+#include "display/intel_display_debugfs.h"
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
@@ -69,6 +70,7 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_ioc32.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
#include "i915_perf.h"
@@ -78,74 +80,14 @@
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_csr.h"
+#include "intel_dram.h"
+#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pm.h"
+#include "vlv_suspend.h"
static struct drm_driver driver;
-struct vlv_s0ix_state {
- /* GAM */
- u32 wr_watermark;
- u32 gfx_prio_ctrl;
- u32 arb_mode;
- u32 gfx_pend_tlb0;
- u32 gfx_pend_tlb1;
- u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
- u32 media_max_req_count;
- u32 gfx_max_req_count;
- u32 render_hwsp;
- u32 ecochk;
- u32 bsd_hwsp;
- u32 blt_hwsp;
- u32 tlb_rd_addr;
-
- /* MBC */
- u32 g3dctl;
- u32 gsckgctl;
- u32 mbctl;
-
- /* GCP */
- u32 ucgctl1;
- u32 ucgctl3;
- u32 rcgctl1;
- u32 rcgctl2;
- u32 rstctl;
- u32 misccpctl;
-
- /* GPM */
- u32 gfxpause;
- u32 rpdeuhwtc;
- u32 rpdeuc;
- u32 ecobus;
- u32 pwrdwnupctl;
- u32 rp_down_timeout;
- u32 rp_deucsw;
- u32 rcubmabdtmr;
- u32 rcedata;
- u32 spare2gh;
-
- /* Display 1 CZ domain */
- u32 gt_imr;
- u32 gt_ier;
- u32 pm_imr;
- u32 pm_ier;
- u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
- /* GT SA CZ domain */
- u32 tilectl;
- u32 gt_fifoctl;
- u32 gtlc_wake_ctrl;
- u32 gtlc_survive;
- u32 pmwgicz;
-
- /* Display 2 CZ domain */
- u32 gu_ctl0;
- u32 gu_ctl1;
- u32 pcbr;
- u32 clock_gate_dis2;
-};
-
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
@@ -153,7 +95,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
dev_priv->bridge_dev =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
- DRM_ERROR("bridge device not found\n");
+ drm_err(&dev_priv->drm, "bridge device not found\n");
return -1;
}
return 0;
@@ -190,7 +132,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
return ret;
}
@@ -273,7 +215,8 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+/* part #1: call before irq install */
+static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
{
int ret;
@@ -293,25 +236,32 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
if (ret)
goto out;
- intel_register_dsm_handler();
+ intel_power_domains_init_hw(i915, false);
+
+ intel_csr_ucode_init(i915);
- ret = i915_switcheroo_register(i915);
+ ret = intel_modeset_init_noirq(i915);
if (ret)
goto cleanup_vga_client;
- intel_power_domains_init_hw(i915, false);
+ return 0;
- intel_csr_ucode_init(i915);
+cleanup_vga_client:
+ intel_vga_unregister(i915);
+out:
+ return ret;
+}
- ret = intel_irq_install(i915);
- if (ret)
- goto cleanup_csr;
+/* part #2: call after irq install */
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+{
+ int ret;
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
ret = intel_modeset_init(i915);
if (ret)
- goto cleanup_irq;
+ goto out;
ret = i915_gem_init(i915);
if (ret)
@@ -340,29 +290,27 @@ cleanup_gem:
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
cleanup_modeset:
+ /* FIXME */
intel_modeset_driver_remove(i915);
-cleanup_irq:
intel_irq_uninstall(i915);
-cleanup_csr:
- intel_csr_ucode_fini(i915);
- intel_power_domains_driver_remove(i915);
- i915_switcheroo_unregister(i915);
-cleanup_vga_client:
- intel_vga_unregister(i915);
+ intel_modeset_driver_remove_noirq(i915);
out:
return ret;
}
+/* part #1: call before irq uninstall */
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
intel_modeset_driver_remove(i915);
+}
- intel_irq_uninstall(i915);
+/* part #2: call after irq uninstall */
+static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
+{
+ intel_modeset_driver_remove_noirq(i915);
intel_bios_driver_remove(i915);
- i915_switcheroo_unregister(i915);
-
intel_vga_unregister(i915);
intel_csr_ucode_fini(i915);
@@ -412,7 +360,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
out_free_wq:
destroy_workqueue(dev_priv->wq);
out_err:
- DRM_ERROR("Failed to allocate workqueues.\n");
+ drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
return -ENOMEM;
}
@@ -441,37 +389,15 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+ pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
- DRM_ERROR("This is a pre-production stepping. "
+ drm_err(&dev_priv->drm, "This is a pre-production stepping. "
"It may not be fully functional.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
}
}
-static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
-{
- if (!IS_VALLEYVIEW(i915))
- return 0;
-
- /* we write all the values in the struct, so no need to zero it out */
- i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
- GFP_KERNEL);
- if (!i915->vlv_s0ix_state)
- return -ENOMEM;
-
- return 0;
-}
-
-static void vlv_free_s0ix_state(struct drm_i915_private *i915)
-{
- if (!i915->vlv_s0ix_state)
- return;
-
- kfree(i915->vlv_s0ix_state);
- i915->vlv_s0ix_state = NULL;
-}
-
static void sanitize_gpu(struct drm_i915_private *i915)
{
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -505,8 +431,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->backlight_lock);
mutex_init(&dev_priv->sb_lock);
- pm_qos_add_request(&dev_priv->sb_qos,
- PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
@@ -520,7 +445,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = vlv_alloc_s0ix_state(dev_priv);
+ ret = vlv_suspend_init(dev_priv);
if (ret < 0)
goto err_workqueues;
@@ -542,7 +467,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- intel_display_crc_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -551,7 +475,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
- vlv_free_s0ix_state(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -568,10 +492,10 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
- vlv_free_s0ix_state(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
- pm_qos_remove_request(&dev_priv->sb_qos);
+ cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
}
@@ -641,487 +565,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-
-static const char *intel_dram_type_str(enum intel_dram_type type)
-{
- static const char * const str[] = {
- DRAM_TYPE_STR(UNKNOWN),
- DRAM_TYPE_STR(DDR3),
- DRAM_TYPE_STR(DDR4),
- DRAM_TYPE_STR(LPDDR3),
- DRAM_TYPE_STR(LPDDR4),
- };
-
- if (type >= ARRAY_SIZE(str))
- type = INTEL_DRAM_UNKNOWN;
-
- return str[type];
-}
-
-#undef DRAM_TYPE_STR
-
-static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
-{
- return dimm->ranks * 64 / (dimm->width ?: 1);
-}
-
-/* Returns total GB for the whole DIMM */
-static int skl_get_dimm_size(u16 val)
-{
- return val & SKL_DRAM_SIZE_MASK;
-}
-
-static int skl_get_dimm_width(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & SKL_DRAM_WIDTH_MASK) {
- case SKL_DRAM_WIDTH_X8:
- case SKL_DRAM_WIDTH_X16:
- case SKL_DRAM_WIDTH_X32:
- val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int skl_get_dimm_ranks(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-/* Returns total GB for the whole DIMM */
-static int cnl_get_dimm_size(u16 val)
-{
- return (val & CNL_DRAM_SIZE_MASK) / 2;
-}
-
-static int cnl_get_dimm_width(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & CNL_DRAM_WIDTH_MASK) {
- case CNL_DRAM_WIDTH_X8:
- case CNL_DRAM_WIDTH_X16:
- case CNL_DRAM_WIDTH_X32:
- val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int cnl_get_dimm_ranks(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-static bool
-skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
-{
- /* Convert total GB to Gb per DRAM device */
- return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
-}
-
-static void
-skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
- struct dram_dimm_info *dimm,
- int channel, char dimm_name, u16 val)
-{
- if (INTEL_GEN(dev_priv) >= 10) {
- dimm->size = cnl_get_dimm_size(val);
- dimm->width = cnl_get_dimm_width(val);
- dimm->ranks = cnl_get_dimm_ranks(val);
- } else {
- dimm->size = skl_get_dimm_size(val);
- dimm->width = skl_get_dimm_width(val);
- dimm->ranks = skl_get_dimm_ranks(val);
- }
-
- DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
- channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
- yesno(skl_is_16gb_dimm(dimm)));
-}
-
-static int
-skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
- struct dram_channel_info *ch,
- int channel, u32 val)
-{
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
- channel, 'L', val & 0xffff);
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
- channel, 'S', val >> 16);
-
- if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
- DRM_DEBUG_KMS("CH%u not populated\n", channel);
- return -EINVAL;
- }
-
- if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
- ch->ranks = 2;
- else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
- ch->ranks = 2;
- else
- ch->ranks = 1;
-
- ch->is_16gb_dimm =
- skl_is_16gb_dimm(&ch->dimm_l) ||
- skl_is_16gb_dimm(&ch->dimm_s);
-
- DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
- channel, ch->ranks, yesno(ch->is_16gb_dimm));
-
- return 0;
-}
-
-static bool
-intel_is_dram_symmetric(const struct dram_channel_info *ch0,
- const struct dram_channel_info *ch1)
-{
- return !memcmp(ch0, ch1, sizeof(*ch0)) &&
- (ch0->dimm_s.size == 0 ||
- !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
-}
-
-static int
-skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0 = {}, ch1 = {};
- u32 val;
- int ret;
-
- val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- if (dram_info->num_channels == 0) {
- DRM_INFO("Number of memory channels is zero\n");
- return -EINVAL;
- }
-
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
- DRM_INFO("couldn't get memory rank information\n");
- return -EINVAL;
- }
-
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
-
- dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
-
- DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
- yesno(dram_info->symmetric_memory));
- return 0;
-}
-
-static enum intel_dram_type
-skl_get_dram_type(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
-
- switch (val & SKL_DRAM_DDR_TYPE_MASK) {
- case SKL_DRAM_DDR_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case SKL_DRAM_DDR_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case SKL_DRAM_DDR_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case SKL_DRAM_DDR_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static int
-skl_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 mem_freq_khz, val;
- int ret;
-
- dram_info->type = skl_get_dram_type(dev_priv);
- DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
-
- ret = skl_dram_get_channels_info(dev_priv);
- if (ret)
- return ret;
-
- val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
- DRM_INFO("Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-/* Returns Gb per DRAM device */
-static int bxt_get_dimm_size(u32 val)
-{
- switch (val & BXT_DRAM_SIZE_MASK) {
- case BXT_DRAM_SIZE_4GBIT:
- return 4;
- case BXT_DRAM_SIZE_6GBIT:
- return 6;
- case BXT_DRAM_SIZE_8GBIT:
- return 8;
- case BXT_DRAM_SIZE_12GBIT:
- return 12;
- case BXT_DRAM_SIZE_16GBIT:
- return 16;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int bxt_get_dimm_width(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
-
- return 8 << val;
-}
-
-static int bxt_get_dimm_ranks(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- switch (val & BXT_DRAM_RANK_MASK) {
- case BXT_DRAM_RANK_SINGLE:
- return 1;
- case BXT_DRAM_RANK_DUAL:
- return 2;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static enum intel_dram_type bxt_get_dimm_type(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return INTEL_DRAM_UNKNOWN;
-
- switch (val & BXT_DRAM_TYPE_MASK) {
- case BXT_DRAM_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case BXT_DRAM_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case BXT_DRAM_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case BXT_DRAM_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
- u32 val)
-{
- dimm->width = bxt_get_dimm_width(val);
- dimm->ranks = bxt_get_dimm_ranks(val);
-
- /*
- * Size in register is Gb per DRAM device. Convert to total
- * GB to match the way we report this for non-LP platforms.
- */
- dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
-}
-
-static int
-bxt_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels;
- int i;
-
- val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
- DRM_INFO("Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- /*
- * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
- */
- for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- struct dram_dimm_info dimm;
- enum intel_dram_type type;
-
- val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
- if (val == 0xFFFFFFFF)
- continue;
-
- dram_info->num_channels++;
-
- bxt_get_dimm_info(&dimm, val);
- type = bxt_get_dimm_type(val);
-
- WARN_ON(type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != type);
-
- DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
- i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
-
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
-
- if (type != INTEL_DRAM_UNKNOWN)
- dram_info->type = type;
- }
-
- if (dram_info->type == INTEL_DRAM_UNKNOWN ||
- dram_info->ranks == 0) {
- DRM_INFO("couldn't get memory information\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-static void
-intel_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- int ret;
-
- /*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
- */
- dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
-
- if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
- return;
-
- if (IS_GEN9_LP(dev_priv))
- ret = bxt_get_dram_info(dev_priv);
- else
- ret = skl_get_dram_info(dev_priv);
- if (ret)
- return;
-
- DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps,
- dram_info->num_channels);
-
- DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
-}
-
-static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
-{
- static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- static const u8 sets[4] = { 1, 1, 2, 2 };
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)];
-}
-
-static void edram_detect(struct drm_i915_private *dev_priv)
-{
- u32 edram_cap = 0;
-
- if (!(IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9))
- return;
-
- edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we don't have gt funcs set up */
-
- if (!(edram_cap & EDRAM_ENABLED))
- return;
-
- /*
- * The needed capability bits for size calculation are not there with
- * pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- dev_priv->edram_size_mb = 128;
- else
- dev_priv->edram_size_mb =
- gen9_edram_size_mb(dev_priv, edram_cap);
-
- dev_info(dev_priv->drm.dev,
- "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
-}
-
/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
@@ -1165,7 +608,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
- edram_detect(dev_priv);
+ intel_dram_edram_detect(dev_priv);
i915_perf_init(dev_priv);
@@ -1189,7 +632,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
- DRM_ERROR("failed to enable GGTT\n");
+ drm_err(&dev_priv->drm, "failed to enable GGTT\n");
goto err_mem_regions;
}
@@ -1205,7 +648,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
- DRM_ERROR("failed to set DMA mask\n");
+ drm_err(&dev_priv->drm, "failed to set DMA mask\n");
goto err_mem_regions;
}
@@ -1223,14 +666,13 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- DRM_ERROR("failed to set DMA mask\n");
+ drm_err(&dev_priv->drm, "failed to set DMA mask\n");
goto err_mem_regions;
}
}
- pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_gt_init_workarounds(dev_priv);
@@ -1255,7 +697,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
- DRM_DEBUG_DRIVER("can't enable MSI");
+ drm_dbg(&dev_priv->drm, "can't enable MSI");
}
ret = intel_gvt_init(dev_priv);
@@ -1267,7 +709,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system raw bandwidth and
* dram info. This will be used for memory latency calculation.
*/
- intel_get_dram_info(dev_priv);
+ intel_dram_detect(dev_priv);
intel_bw_init_hw(dev_priv);
@@ -1276,7 +718,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -1299,7 +741,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
@@ -1316,22 +758,19 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
- /*
- * Notify a valid surface after modesetting,
- * when running inside a VM.
- */
- if (intel_vgpu_active(dev_priv))
- I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+ intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
+ intel_display_debugfs_register(dev_priv);
i915_setup_sysfs(dev_priv);
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
} else
- DRM_ERROR("Failed to register driver for userspace access!\n");
+ drm_err(&dev_priv->drm,
+ "Failed to register driver for userspace access!\n");
if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
/* Must be done after probing outputs */
@@ -1361,6 +800,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_power_domains_enable(dev_priv);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
+
+ intel_register_dsm_handler();
+
+ if (i915_switcheroo_register(dev_priv))
+ drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
}
/**
@@ -1369,6 +813,10 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ i915_switcheroo_unregister(dev_priv);
+
+ intel_unregister_dsm_handler();
+
intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(dev_priv);
@@ -1413,11 +861,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- DRM_INFO("DRM_I915_DEBUG enabled\n");
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
- DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+ drm_info(&dev_priv->drm,
+ "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
}
static struct drm_i915_private *
@@ -1439,8 +888,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.dev_private = i915;
-
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -1480,16 +927,16 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
int ret;
- dev_priv = i915_driver_create(pdev, ent);
- if (IS_ERR(dev_priv))
- return PTR_ERR(dev_priv);
+ i915 = i915_driver_create(pdev, ent);
+ if (IS_ERR(i915))
+ return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
- dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
* Check if we support fake LMEM -- for now we only unleash this for
@@ -1497,13 +944,13 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
- if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+ if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
i915_modparams.fake_lmem_start) {
- mkwrite_device_info(dev_priv)->memory_regions =
+ mkwrite_device_info(i915)->memory_regions =
REGION_SMEM | REGION_LMEM | REGION_STOLEN;
- mkwrite_device_info(dev_priv)->is_dgfx = true;
- GEM_BUG_ON(!HAS_LMEM(dev_priv));
- GEM_BUG_ON(!IS_DGFX(dev_priv));
+ mkwrite_device_info(i915)->is_dgfx = true;
+ GEM_BUG_ON(!HAS_LMEM(i915));
+ GEM_BUG_ON(!IS_DGFX(i915));
}
}
#endif
@@ -1512,48 +959,60 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_fini;
- ret = i915_driver_early_probe(dev_priv);
+ ret = i915_driver_early_probe(i915);
if (ret < 0)
goto out_pci_disable;
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_detect_vgpu(dev_priv);
+ intel_vgpu_detect(i915);
- ret = i915_driver_mmio_probe(dev_priv);
+ ret = i915_driver_mmio_probe(i915);
if (ret < 0)
goto out_runtime_pm_put;
- ret = i915_driver_hw_probe(dev_priv);
+ ret = i915_driver_hw_probe(i915);
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe(dev_priv);
+ ret = i915_driver_modeset_probe_noirq(i915);
if (ret < 0)
goto out_cleanup_hw;
- i915_driver_register(dev_priv);
+ ret = intel_irq_install(i915);
+ if (ret)
+ goto out_cleanup_modeset;
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ ret = i915_driver_modeset_probe(i915);
+ if (ret < 0)
+ goto out_cleanup_irq;
+
+ i915_driver_register(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_welcome_messages(dev_priv);
+ i915_welcome_messages(i915);
return 0;
+out_cleanup_irq:
+ intel_irq_uninstall(i915);
+out_cleanup_modeset:
+ /* FIXME */
out_cleanup_hw:
- i915_driver_hw_remove(dev_priv);
- intel_memory_regions_driver_release(dev_priv);
- i915_ggtt_driver_release(dev_priv);
+ i915_driver_hw_remove(i915);
+ intel_memory_regions_driver_release(i915);
+ i915_ggtt_driver_release(i915);
out_cleanup_mmio:
- i915_driver_mmio_release(dev_priv);
+ i915_driver_mmio_release(i915);
out_runtime_pm_put:
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- i915_driver_late_release(dev_priv);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
out_pci_disable:
pci_disable_device(pdev);
out_fini:
- i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
- i915_driver_destroy(dev_priv);
+ i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
+ i915_driver_destroy(i915);
return ret;
}
@@ -1563,13 +1022,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_unregister(i915);
- /*
- * After unregistering the device to prevent any new users, cancel
- * all in-flight requests so that we can quickly unbind the active
- * resources.
- */
- intel_gt_set_wedged(&i915->gt);
-
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
@@ -1581,6 +1033,10 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_modeset_remove(i915);
+ intel_irq_uninstall(i915);
+
+ i915_driver_modeset_remove_noirq(i915);
+
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
@@ -1667,10 +1123,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
drm_modeset_unlock_all(dev);
}
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume);
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
-
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
@@ -1722,7 +1174,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev_priv);
+ i915_ggtt_suspend(&dev_priv->ggtt);
i915_save_state(dev_priv);
@@ -1757,7 +1209,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
disable_rpm_wakeref_asserts(rpm);
@@ -1770,11 +1222,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_power_suspend_late(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
-
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
- DRM_ERROR("Suspend complete failed: %d\n", ret);
+ drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
intel_power_domains_resume(dev_priv);
goto out;
@@ -1808,8 +1258,8 @@ int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
- if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
- state.event != PM_EVENT_FREEZE))
+ if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
return -EINVAL;
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1833,9 +1283,9 @@ static int i915_drm_resume(struct drm_device *dev)
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
- DRM_ERROR("failed to re-enable GGTT\n");
+ drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
- i915_gem_restore_gtt_mappings(dev_priv);
+ i915_ggtt_resume(&dev_priv->ggtt);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1922,7 +1372,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
*/
ret = pci_set_power_state(pdev, PCI_D0);
if (ret) {
- DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "failed to set PCI D0 power state (%d)\n", ret);
return ret;
}
@@ -1946,11 +1397,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_resume_prepare(dev_priv, false);
+ ret = vlv_resume_prepare(dev_priv, false);
if (ret)
- DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Resume prepare failed: %d, continuing anyway\n", ret);
intel_uncore_resume_early(&dev_priv->uncore);
@@ -2117,391 +1567,16 @@ static int i915_pm_restore(struct device *kdev)
return i915_pm_resume(kdev);
}
-/*
- * Save all Gunit registers that may be lost after a D3 and a subsequent
- * S0i[R123] transition. The list of registers needing a save/restore is
- * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
- * registers in the following way:
- * - Driver: saved/restored by the driver
- * - Punit : saved/restored by the Punit firmware
- * - No, w/o marking: no need to save/restore, since the register is R/O or
- * used internally by the HW in a way that doesn't depend
- * keeping the content across a suspend/resume.
- * - Debug : used for debugging
- *
- * We save/restore all registers marked with 'Driver', with the following
- * exceptions:
- * - Registers out of use, including also registers marked with 'Debug'.
- * These have no effect on the driver's operation, so we don't save/restore
- * them to reduce the overhead.
- * - Registers that are fully setup by an initialization function called from
- * the resume path. For example many clock gating and RPS/RC6 registers.
- * - Registers that provide the right functionality with their reset defaults.
- *
- * TODO: Except for registers that based on the above 3 criteria can be safely
- * ignored, we save/restore all others, practically treating the HW context as
- * a black-box for the driver. Further investigation is needed to reduce the
- * saved/restored registers even further, by following the same 3 criteria.
- */
-static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
- int i;
-
- if (!s)
- return;
-
- /* GAM 0x4000-0x4770 */
- s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
- s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
- s->arb_mode = I915_READ(ARB_MODE);
- s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
- s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
-
- for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
- s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
-
- s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
- s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
-
- s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
- s->ecochk = I915_READ(GAM_ECOCHK);
- s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
- s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
-
- s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
-
- /* MBC 0x9024-0x91D0, 0x8500 */
- s->g3dctl = I915_READ(VLV_G3DCTL);
- s->gsckgctl = I915_READ(VLV_GSCKGCTL);
- s->mbctl = I915_READ(GEN6_MBCTL);
-
- /* GCP 0x9400-0x9424, 0x8100-0x810C */
- s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
- s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
- s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
- s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
- s->rstctl = I915_READ(GEN6_RSTCTL);
- s->misccpctl = I915_READ(GEN7_MISCCPCTL);
-
- /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
- s->gfxpause = I915_READ(GEN6_GFXPAUSE);
- s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
- s->rpdeuc = I915_READ(GEN6_RPDEUC);
- s->ecobus = I915_READ(ECOBUS);
- s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
- s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
- s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
- s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
- s->rcedata = I915_READ(VLV_RCEDATA);
- s->spare2gh = I915_READ(VLV_SPAREG2H);
-
- /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
- s->gt_imr = I915_READ(GTIMR);
- s->gt_ier = I915_READ(GTIER);
- s->pm_imr = I915_READ(GEN6_PMIMR);
- s->pm_ier = I915_READ(GEN6_PMIER);
-
- for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
- s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
-
- /* GT SA CZ domain, 0x100000-0x138124 */
- s->tilectl = I915_READ(TILECTL);
- s->gt_fifoctl = I915_READ(GTFIFOCTL);
- s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
- s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- s->pmwgicz = I915_READ(VLV_PMWGICZ);
-
- /* Gunit-Display CZ domain, 0x182028-0x1821CF */
- s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
- s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
- s->pcbr = I915_READ(VLV_PCBR);
- s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
-
- /*
- * Not saving any of:
- * DFT, 0x9800-0x9EC0
- * SARB, 0xB000-0xB1FC
- * GAC, 0x5208-0x524C, 0x14000-0x14C000
- * PCI CFG
- */
-}
-
-static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
- u32 val;
- int i;
-
- if (!s)
- return;
-
- /* GAM 0x4000-0x4770 */
- I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
- I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
- I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
- I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
- I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
-
- for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
- I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
-
- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
- I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
-
- I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
- I915_WRITE(GAM_ECOCHK, s->ecochk);
- I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
- I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
-
- I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
-
- /* MBC 0x9024-0x91D0, 0x8500 */
- I915_WRITE(VLV_G3DCTL, s->g3dctl);
- I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
- I915_WRITE(GEN6_MBCTL, s->mbctl);
-
- /* GCP 0x9400-0x9424, 0x8100-0x810C */
- I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
- I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
- I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
- I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
- I915_WRITE(GEN6_RSTCTL, s->rstctl);
- I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
-
- /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
- I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
- I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
- I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
- I915_WRITE(ECOBUS, s->ecobus);
- I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
- I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
- I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
- I915_WRITE(VLV_RCEDATA, s->rcedata);
- I915_WRITE(VLV_SPAREG2H, s->spare2gh);
-
- /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
- I915_WRITE(GTIMR, s->gt_imr);
- I915_WRITE(GTIER, s->gt_ier);
- I915_WRITE(GEN6_PMIMR, s->pm_imr);
- I915_WRITE(GEN6_PMIER, s->pm_ier);
-
- for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
- I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
-
- /* GT SA CZ domain, 0x100000-0x138124 */
- I915_WRITE(TILECTL, s->tilectl);
- I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
- /*
- * Preserve the GT allow wake and GFX force clock bit, they are not
- * be restored, as they are used to control the s0ix suspend/resume
- * sequence by the caller.
- */
- val = I915_READ(VLV_GTLC_WAKE_CTRL);
- val &= VLV_GTLC_ALLOWWAKEREQ;
- val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
- I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
-
- val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- val &= VLV_GFX_CLK_FORCE_ON_BIT;
- val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
- I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
-
- /* Gunit-Display CZ domain, 0x182028-0x1821CF */
- I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
- I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
- I915_WRITE(VLV_PCBR, s->pcbr);
- I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
-}
-
-static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
- u32 mask, u32 val)
-{
- i915_reg_t reg = VLV_GTLC_PW_STATUS;
- u32 reg_value;
- int ret;
-
- /* The HW does not like us polling for PW_STATUS frequently, so
- * use the sleeping loop rather than risk the busy spin within
- * intel_wait_for_register().
- *
- * Transitioning between RC6 states should be at most 2ms (see
- * valleyview_enable_rps) so use a 3ms timeout.
- */
- ret = wait_for(((reg_value =
- intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
- == val, 3);
-
- /* just trace the final value */
- trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
-
- return ret;
-}
-
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
-{
- u32 val;
- int err;
-
- val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
- if (force_on)
- val |= VLV_GFX_CLK_FORCE_ON_BIT;
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
- if (!force_on)
- return 0;
-
- err = intel_wait_for_register(&dev_priv->uncore,
- VLV_GTLC_SURVIVABILITY_REG,
- VLV_GFX_CLK_STATUS_BIT,
- VLV_GFX_CLK_STATUS_BIT,
- 20);
- if (err)
- DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
- I915_READ(VLV_GTLC_SURVIVABILITY_REG));
-
- return err;
-}
-
-static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
-{
- u32 mask;
- u32 val;
- int err;
-
- val = I915_READ(VLV_GTLC_WAKE_CTRL);
- val &= ~VLV_GTLC_ALLOWWAKEREQ;
- if (allow)
- val |= VLV_GTLC_ALLOWWAKEREQ;
- I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
- POSTING_READ(VLV_GTLC_WAKE_CTRL);
-
- mask = VLV_GTLC_ALLOWWAKEACK;
- val = allow ? mask : 0;
-
- err = vlv_wait_for_pw_status(dev_priv, mask, val);
- if (err)
- DRM_ERROR("timeout disabling GT waking\n");
-
- return err;
-}
-
-static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
- bool wait_for_on)
-{
- u32 mask;
- u32 val;
-
- mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
- val = wait_for_on ? mask : 0;
-
- /*
- * RC6 transitioning can be delayed up to 2 msec (see
- * valleyview_enable_rps), use 3 msec for safety.
- *
- * This can fail to turn off the rc6 if the GPU is stuck after a failed
- * reset and we are trying to force the machine to sleep.
- */
- if (vlv_wait_for_pw_status(dev_priv, mask, val))
- DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
- onoff(wait_for_on));
-}
-
-static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
-{
- if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
- return;
-
- DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
- I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
-}
-
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
-{
- u32 mask;
- int err;
-
- /*
- * Bspec defines the following GT well on flags as debug only, so
- * don't treat them as hard failures.
- */
- vlv_wait_for_gt_wells(dev_priv, false);
-
- mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
- WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
-
- vlv_check_no_gt_access(dev_priv);
-
- err = vlv_force_gfx_clock(dev_priv, true);
- if (err)
- goto err1;
-
- err = vlv_allow_gt_wake(dev_priv, false);
- if (err)
- goto err2;
-
- vlv_save_gunit_s0ix_state(dev_priv);
-
- err = vlv_force_gfx_clock(dev_priv, false);
- if (err)
- goto err2;
-
- return 0;
-
-err2:
- /* For safety always re-enable waking and disable gfx clock forcing */
- vlv_allow_gt_wake(dev_priv, true);
-err1:
- vlv_force_gfx_clock(dev_priv, false);
-
- return err;
-}
-
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- int err;
- int ret;
-
- /*
- * If any of the steps fail just try to continue, that's the best we
- * can do at this point. Return the first error code (which will also
- * leave RPM permanently disabled).
- */
- ret = vlv_force_gfx_clock(dev_priv, true);
-
- vlv_restore_gunit_s0ix_state(dev_priv);
-
- err = vlv_allow_gt_wake(dev_priv, true);
- if (!ret)
- ret = err;
-
- err = vlv_force_gfx_clock(dev_priv, false);
- if (!ret)
- ret = err;
-
- vlv_check_no_gt_access(dev_priv);
-
- if (rpm_resume)
- intel_init_clock_gating(dev_priv);
-
- return ret;
-}
-
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- DRM_DEBUG_KMS("Suspending device\n");
+ drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
disable_rpm_wakeref_asserts(rpm);
@@ -2519,11 +1594,10 @@ static int intel_runtime_suspend(struct device *kdev)
intel_display_power_suspend(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
-
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
- DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Runtime suspend failed, disabling it (%d)\n", ret);
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2541,7 +1615,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_driver_release(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
- DRM_ERROR("Unclaimed access detected prior to suspending\n");
+ drm_err(&dev_priv->drm,
+ "Unclaimed access detected prior to suspending\n");
rpm->suspended = true;
@@ -2573,7 +1648,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
- DRM_DEBUG_KMS("Device suspended\n");
+ drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
return 0;
}
@@ -2581,25 +1656,25 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- DRM_DEBUG_KMS("Resuming device\n");
+ drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
- WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
+ drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
- DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
+ drm_dbg(&dev_priv->drm,
+ "Unclaimed access during suspend, bios?\n");
intel_display_power_resume(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_resume_prepare(dev_priv, true);
+ ret = vlv_resume_prepare(dev_priv, true);
intel_uncore_runtime_resume(&dev_priv->uncore);
@@ -2625,9 +1700,10 @@ static int intel_runtime_resume(struct device *kdev)
enable_rpm_wakeref_asserts(rpm);
if (ret)
- DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Runtime resume failed, disabling it (%d)\n", ret);
else
- DRM_DEBUG_KMS("Device resumed\n");
+ drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
return ret;
}
@@ -2675,12 +1751,12 @@ const struct dev_pm_ops i915_pm_ops = {
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .release = drm_release_noglobal,
.unlocked_ioctl = drm_ioctl,
.mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
- .compat_ioctl = i915_compat_ioctl,
+ .compat_ioctl = i915_ioc32_compat_ioctl,
.llseek = noop_llseek,
};
@@ -2772,9 +1848,6 @@ static struct drm_driver driver = {
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = i915_get_crtc_scanoutpos,
-
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 810e3ccd56ec..62b901ffabf9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,6 @@
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
-#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -70,6 +69,7 @@
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
+#include "display/intel_global_state.h"
#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
@@ -104,18 +104,23 @@
#include "intel_region_lmem.h"
-#include "intel_gvt.h"
-
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200114"
-#define DRIVER_TIMESTAMP 1579001978
+#define DRIVER_DATE "20200313"
+#define DRIVER_TIMESTAMP 1584144591
struct drm_i915_gem_object;
+/*
+ * The code assumes that the hpd_pins below have consecutive values and
+ * starting with HPD_PORT_A, the HPD pin associated with any port can be
+ * retrieved by adding the corresponding port (or phy) enum value to
+ * HPD_PORT_A in most cases. For example:
+ * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
+ */
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -203,9 +208,7 @@ struct drm_i915_file_private {
} mm;
struct xarray context_xa;
-
- struct idr vm_idr;
- struct mutex vm_idr_lock; /* guards vm_idr */
+ struct xarray vm_xa;
unsigned int bsd_engine;
@@ -255,18 +258,19 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
struct intel_atomic_state;
-struct intel_crtc_state;
+struct intel_cdclk_config;
+struct intel_cdclk_state;
+struct intel_cdclk_vals;
struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
-struct intel_cdclk_state;
struct drm_i915_display_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state);
+ struct intel_cdclk_config *cdclk_config);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
@@ -280,7 +284,7 @@ struct drm_i915_display_funcs {
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
- int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
u8 (*calc_voltage_level)(int cdclk);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -504,7 +508,7 @@ struct i915_psr {
u16 su_x_granularity;
bool dc3co_enabled;
u32 dc3co_exit_delay;
- struct delayed_work idle_work;
+ struct delayed_work dc3co_work;
bool force_mode_changed;
};
@@ -732,19 +736,10 @@ enum intel_ddb_partitioning {
INTEL_DDB_PART_5_6, /* IVB+ */
};
-struct intel_wm_level {
- bool enable;
- u32 pri_val;
- u32 spr_val;
- u32 cur_val;
- u32 fbc_val;
-};
-
struct ilk_wm_values {
u32 wm_pipe[3];
u32 wm_lp[3];
u32 wm_lp_spr[3];
- u32 wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
@@ -799,65 +794,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-struct skl_ddb_allocation {
- u8 enabled_slices; /* GEN11 has configurable 2 slices */
-};
-
-struct skl_ddb_values {
- unsigned dirty_pipes;
- struct skl_ddb_allocation ddb;
-};
-
-struct skl_wm_level {
- u16 min_ddb_alloc;
- u16 plane_res_b;
- u8 plane_res_l;
- bool plane_en;
- bool ignore_lines;
-};
-
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
-enum intel_pipe_crc_source {
- INTEL_PIPE_CRC_SOURCE_NONE,
- INTEL_PIPE_CRC_SOURCE_PLANE1,
- INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PLANE3,
- INTEL_PIPE_CRC_SOURCE_PLANE4,
- INTEL_PIPE_CRC_SOURCE_PLANE5,
- INTEL_PIPE_CRC_SOURCE_PLANE6,
- INTEL_PIPE_CRC_SOURCE_PLANE7,
- INTEL_PIPE_CRC_SOURCE_PIPE,
- /* TV/DP on pre-gen5/vlv can't use the pipe source. */
- INTEL_PIPE_CRC_SOURCE_TV,
- INTEL_PIPE_CRC_SOURCE_DP_B,
- INTEL_PIPE_CRC_SOURCE_DP_C,
- INTEL_PIPE_CRC_SOURCE_DP_D,
- INTEL_PIPE_CRC_SOURCE_AUTO,
- INTEL_PIPE_CRC_SOURCE_MAX,
-};
-
-#define INTEL_PIPE_CRC_ENTRIES_NR 128
-struct intel_pipe_crc {
- spinlock_t lock;
- int skipped;
- enum intel_pipe_crc_source source;
-};
-
struct i915_frontbuffer_tracking {
spinlock_t lock;
@@ -875,14 +811,7 @@ struct i915_virtual_gpu {
u32 caps;
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
-struct intel_cdclk_state {
+struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
};
@@ -1002,33 +931,18 @@ struct drm_i915_private {
unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
- unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
- /*
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
- */
struct {
- /*
- * The current logical cdclk state.
- * See intel_atomic_state.cdclk.logical
- */
- struct intel_cdclk_state logical;
- /*
- * The current actual cdclk state.
- * See intel_atomic_state.cdclk.actual
- */
- struct intel_cdclk_state actual;
- /* The current hardware cdclk state */
- struct intel_cdclk_state hw;
+ /* The current hardware cdclk configuration */
+ struct intel_cdclk_config hw;
/* cdclk, divider, and ratio table from bspec */
const struct intel_cdclk_vals *table;
- int force_min_cdclk;
+ struct intel_global_obj obj;
} cdclk;
/**
@@ -1068,31 +982,32 @@ struct drm_i915_private {
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
-#ifdef CONFIG_DEBUG_FS
- struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
-#endif
+ /**
+ * dpll and cdclk state is protected by connection_mutex
+ * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
+ * Must be global rather than per dpll, because on some platforms plls
+ * share registers.
+ */
+ struct {
+ struct mutex lock;
- /* dpll and cdclk state is protected by connection_mutex */
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *dpll_mgr;
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
- /*
- * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms
- * plls share registers.
- */
- struct mutex dpll_lock;
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+ } dpll;
+
+ struct list_head global_obj_list;
/*
- * For reading active_pipes, min_cdclk, min_voltage_level holding
- * any crtc lock is sufficient, for writing must hold all of them.
+ * For reading active_pipes holding any crtc lock is
+ * sufficient, for writing must hold all of them.
*/
u8 active_pipes;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@@ -1105,8 +1020,6 @@ struct drm_i915_private {
struct work_struct free_work;
} atomic_helper;
- u16 orig_clock;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -1191,7 +1104,6 @@ struct drm_i915_private {
/* current hardware state */
union {
struct ilk_wm_values hw;
- struct skl_ddb_values skl_hw;
struct vlv_wm_values vlv;
struct g4x_wm_values g4x;
};
@@ -1213,6 +1125,8 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
+ u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
+
struct dram_info {
bool valid;
bool is_16gb_dimm;
@@ -1236,7 +1150,7 @@ struct drm_i915_private {
u8 num_planes;
} max_bw[6];
- struct drm_private_obj bw_obj;
+ struct intel_global_obj bw_obj;
struct intel_runtime_pm runtime_pm;
@@ -1300,16 +1214,6 @@ struct drm_i915_private {
*/
};
-struct dram_dimm_info {
- u8 size, width, ranks;
-};
-
-struct dram_channel_info {
- struct dram_dimm_info dimm_l, dimm_s;
- u8 ranks;
- bool is_16gb_dimm;
-};
-
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
@@ -1580,6 +1484,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
+#define GLK_REVID_A2 0x2
+#define GLK_REVID_B0 0x3
#define IS_GLK_REVID(dev_priv, since, until) \
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
@@ -1601,6 +1507,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_ICELAKE(p) && IS_REVID(p, since, until))
#define TGL_REVID_A0 0x0
+#define TGL_REVID_B0 0x1
+#define TGL_REVID_C0 0x2
#define IS_TGL_REVID(p, since, until) \
(IS_TIGERLAKE(p) && IS_REVID(p, since, until))
@@ -1718,10 +1626,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* Having GuC is not the same as using GuC */
-#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
-
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1767,11 +1671,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-#ifdef CONFIG_COMPAT
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
-#define i915_compat_ioctl NULL
-#endif
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -1780,18 +1679,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
int i915_resume_switcheroo(struct drm_i915_private *i915);
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gvt;
-}
-
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.active;
-}
-
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1856,12 +1743,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __must_check
-i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- return mutex_lock_interruptible(&dev->struct_mutex);
-}
-
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2010,20 +1891,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-/* register wait wrappers for display regs */
-#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
- intel_wait_for_register(&(dev_priv_)->uncore, \
- (reg_), (mask_), (value_), (timeout_))
-
-#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \
- u32 mask__ = (mask_); \
- intel_de_wait_for_register((dev_priv_), (reg_), \
- mask__, mask__, (timeout_)); \
-})
-
-#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
- intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
-
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2046,10 +1913,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
-{
- return intel_guc_is_submission_supported(guc) &&
- intel_guc_is_running(guc);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5f6e63952821..ca5420012a22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_vma_manager.h>
-#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
@@ -941,9 +940,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if (i915_gem_object_never_bind_ggtt(obj))
- return ERR_PTR(-ENODEV);
-
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/*
@@ -1009,6 +1005,12 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
if (ret)
return ERR_PTR(ret);
+ ret = i915_vma_wait_for_bind(vma);
+ if (ret) {
+ i915_vma_unpin(vma);
+ return ERR_PTR(ret);
+ }
+
return vma;
}
@@ -1153,7 +1155,7 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
- i915_gem_restore_gtt_mappings(dev_priv);
+ i915_ggtt_resume(&dev_priv->ggtt);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
}
@@ -1201,7 +1203,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
+ drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1229,7 +1231,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
- WARN_ON(dev_priv->mm.shrink_count);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -1269,7 +1271,8 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+ drm_WARN_ON(&i915->drm,
+ i915_gem_object_set_to_cpu_domain(obj, true));
i915_gem_object_unlock(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0697bedebeef..02ad1acd117c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,8 +26,6 @@
*
*/
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_requests.h"
@@ -130,6 +128,13 @@ search_again:
active = NULL;
INIT_LIST_HEAD(&eviction_list);
list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
+ if (vma == active) { /* now seen this vma twice */
+ if (flags & PIN_NONBLOCK)
+ break;
+
+ active = ERR_PTR(-EAGAIN);
+ }
+
/*
* We keep this list in a rough least-recently scanned order
* of active elements (inactive elements are cheap to reap).
@@ -145,21 +150,12 @@ search_again:
* To notice when we complete one full cycle, we record the
* first active element seen, before moving it to the tail.
*/
- if (i915_vma_is_active(vma)) {
- if (vma == active) {
- if (flags & PIN_NONBLOCK)
- break;
-
- active = ERR_PTR(-EAGAIN);
- }
-
- if (active != ERR_PTR(-EAGAIN)) {
- if (!active)
- active = vma;
+ if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+ if (!active)
+ active = vma;
- list_move_tail(&vma->vm_link, &vm->bound_list);
- continue;
- }
+ list_move_tail(&vma->vm_link, &vm->bound_list);
+ continue;
}
if (mark_free(&scan, vma, flags, &eviction_list))
@@ -292,7 +288,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
- /* If we are using coloring to insert guard pages between
+ /*
+ * If we are using coloring to insert guard pages between
* different cache domains within the address space, we have
* to check whether the objects on either side of our range
* abutt and conflict. If they are in conflict, then we evict
@@ -309,22 +306,18 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
}
}
- if (flags & PIN_NONBLOCK &&
- (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
+ if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
break;
}
- /* Overlap of objects in the same batch? */
- if (i915_vma_is_pinned(vma)) {
+ if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
ret = -ENOSPC;
- if (vma->exec_flags &&
- *vma->exec_flags & EXEC_OBJECT_PINNED)
- ret = -EINVAL;
break;
}
- /* Never show fear in the face of dragons!
+ /*
+ * Never show fear in the face of dragons!
*
* We cannot directly remove this node from within this
* iterator and as with i915_gem_evict_something() we employ
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d9c34a23cd67..d152b648c73c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,10 +21,9 @@
* IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -237,11 +236,12 @@ static int fence_update(struct i915_fence_reg *fence,
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
- if (WARN(!i915_gem_object_get_stride(vma->obj) ||
- !i915_gem_object_get_tiling(vma->obj),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- i915_gem_object_get_stride(vma->obj),
- i915_gem_object_get_tiling(vma->obj)))
+ if (drm_WARN(&uncore->i915->drm,
+ !i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ i915_gem_object_get_stride(vma->obj),
+ i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
ret = i915_vma_sync(vma);
@@ -713,7 +713,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
}
if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
+ drm_err(&i915->drm, "Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e039eb56900f..cb43381b0d37 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
@@ -63,7 +61,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/* XXX This does not prevent more requests being submitted! */
if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
-MAX_SCHEDULE_TIMEOUT)) {
- DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
+ drm_err(&dev_priv->drm,
+ "Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9e401a5fcae8..5c8e51d2ba5b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -37,6 +37,7 @@
#include <drm/drm_print.h>
#include "display/intel_atomic.h"
+#include "display/intel_csr.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -47,7 +48,6 @@
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
-#include "intel_csr.h"
#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
@@ -450,6 +450,14 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
+
+ if (INTEL_GEN(m->i915) < 12)
+ return;
+
+ err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
+ ee->instdone.slice_common_extra[0]);
+ err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
+ ee->instdone.slice_common_extra[1]);
}
static void error_print_request(struct drm_i915_error_state_buf *m,
@@ -473,9 +481,13 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+ const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+
+ err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
- ctx->guilty, ctx->active);
+ ctx->guilty, ctx->active,
+ ctx->total_runtime * period,
+ mul_u32_u32(ctx->avg_runtime, period));
}
static struct i915_vma_coredump *
@@ -515,6 +527,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
+ err_printf(m, " ESR: 0x%08x\n", ee->esr);
error_print_instdone(m, ee);
@@ -1102,6 +1115,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
}
if (INTEL_GEN(i915) >= 4) {
+ ee->esr = ENGINE_READ(engine, RING_ESR);
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
@@ -1193,8 +1207,6 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
static void record_request(const struct i915_request *request,
struct i915_request_coredump *erq)
{
- const struct i915_gem_context *ctx;
-
erq->flags = request->fence.flags;
erq->context = request->fence.context;
erq->seqno = request->fence.seqno;
@@ -1205,9 +1217,13 @@ static void record_request(const struct i915_request *request,
erq->pid = 0;
rcu_read_lock();
- ctx = rcu_dereference(request->context->gem_context);
- if (ctx)
- erq->pid = pid_nr(ctx->pid);
+ if (!intel_context_is_closed(request->context)) {
+ const struct i915_gem_context *ctx;
+
+ ctx = rcu_dereference(request->context->gem_context);
+ if (ctx)
+ erq->pid = pid_nr(ctx->pid);
+ }
rcu_read_unlock();
}
@@ -1228,7 +1244,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
{
struct i915_gem_context *ctx;
struct task_struct *task;
- bool capture;
+ bool simulated;
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
@@ -1236,7 +1252,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
+ return true;
rcu_read_lock();
task = pid_task(ctx->pid, PIDTYPE_PID);
@@ -1250,10 +1266,13 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- capture = i915_gem_context_no_error_capture(ctx);
+ e->total_runtime = rq->context->runtime.total;
+ e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
+
+ simulated = i915_gem_context_no_error_capture(ctx);
i915_gem_context_put(ctx);
- return capture;
+ return simulated;
}
struct intel_engine_capture_vma {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index e4a6afed3bbf..0d1f6c8ff355 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -75,6 +75,7 @@ struct intel_engine_coredump {
u32 hws;
u32 ipeir;
u32 ipehr;
+ u32 esr;
u32 bbstate;
u32 instpm;
u32 instps;
@@ -87,6 +88,10 @@ struct intel_engine_coredump {
struct i915_gem_context_coredump {
char comm[TASK_COMM_LEN];
+
+ u64 total_runtime;
+ u32 avg_runtime;
+
pid_t pid;
int active;
int guilty;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index c1007245f46d..8e45ca3d2ede 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,9 +28,10 @@
*/
#include <linux/compat.h>
-#include <drm/i915_drm.h>
#include <drm/drm_ioctl.h>
+
#include "i915_drv.h"
+#include "i915_ioc32.h"
struct drm_i915_getparam32 {
s32 param;
@@ -67,7 +68,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
};
/**
- * i915_compat_ioctl - handle the mistakes of the past
+ * i915_ioc32_compat_ioctl - handle the mistakes of the past
* @filp: the file pointer
* @cmd: the ioctl command (and encoded flags)
* @arg: the ioctl argument (from userspace)
@@ -75,7 +76,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*/
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.h b/drivers/gpu/drm/i915/i915_ioc32.h
new file mode 100644
index 000000000000..40dcd55ca213
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_IOC32_H__
+#define __I915_IOC32_H__
+
+#ifdef CONFIG_COMPAT
+struct file;
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+#define i915_ioc32_compat_ioctl NULL
+#endif
+
+#endif /* __I915_IOC32_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afc6aad9bf8c..8a2b83807ffc 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_irq.h>
-#include <drm/i915_drm.h>
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
@@ -79,7 +78,7 @@ static const u32 hpd_ibx[HPD_NUM_PINS] = {
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
};
static const u32 hpd_cpt[HPD_NUM_PINS] = {
@@ -87,7 +86,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
};
static const u32 hpd_spt[HPD_NUM_PINS] = {
@@ -95,7 +94,7 @@ static const u32 hpd_spt[HPD_NUM_PINS] = {
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
- [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+ [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
};
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
@@ -104,7 +103,7 @@ static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
};
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
@@ -113,7 +112,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
@@ -122,21 +121,21 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
- [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
+ [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
};
static const u32 hpd_gen11[HPD_NUM_PINS] = {
[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
- [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+ [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
};
static const u32 hpd_gen12[HPD_NUM_PINS] = {
@@ -145,7 +144,7 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = {
[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
- [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
+ [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
};
static const u32 hpd_icp[HPD_NUM_PINS] = {
@@ -169,6 +168,14 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
+static void
+intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_handle_vblank(&crtc->base);
+}
+
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
@@ -208,8 +215,9 @@ static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
if (val == 0)
return;
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(reg), val);
+ drm_WARN(&uncore->i915->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(reg), val);
intel_uncore_write(uncore, reg, 0xffffffff);
intel_uncore_posting_read(uncore, reg);
intel_uncore_write(uncore, reg, 0xffffffff);
@@ -223,8 +231,9 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
if (val == 0)
return;
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(GEN2_IIR), val);
+ drm_WARN(&uncore->i915->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(GEN2_IIR), val);
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
intel_uncore_posting_read16(uncore, GEN2_IIR);
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
@@ -262,7 +271,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
u32 val;
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(bits & ~mask);
+ drm_WARN_ON(&dev_priv->drm, bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
val &= ~mask;
@@ -305,9 +314,9 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->irq_mask;
@@ -336,9 +345,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = I915_READ(GEN8_DE_PORT_IMR);
@@ -369,9 +378,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->de_irq_mask[pipe];
@@ -399,11 +408,11 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
I915_WRITE(SDEIMR, sdeimr);
@@ -425,13 +434,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
* On pipe A we don't support the PSR interrupt yet,
* on pipe B and C the same bit MBZ.
*/
- if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
/*
* On pipe B and C we don't support the PSR interrupt yet, on pipe
* A the same bit is for perf counters which we don't use either.
*/
- if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ status_mask & PIPE_B_PSR_STATUS_VLV))
return 0;
enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
@@ -443,10 +454,11 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
out:
- WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
- pipe_name(pipe), enable_mask, status_mask);
+ drm_WARN_ONCE(&dev_priv->drm,
+ enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask);
return enable_mask;
}
@@ -457,12 +469,12 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: status_mask=0x%x\n",
- pipe_name(pipe), status_mask);
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
@@ -480,12 +492,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: status_mask=0x%x\n",
- pipe_name(pipe), status_mask);
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
return;
@@ -624,9 +636,9 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
* register.
*/
do {
- high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
- low = I915_READ_FW(low_frame);
- high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
+ high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = intel_de_read_fw(dev_priv, low_frame);
+ high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -683,15 +695,17 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
* pipe frame time stamp. The time stamp value
* is sampled at every start of vertical blank.
*/
- scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ scan_prev_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
/*
* The TIMESTAMP_CTR register has the current
* time stamp value.
*/
- scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
+ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
- scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ scan_post_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
@@ -702,7 +716,10 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
return scanline;
}
-/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
+/*
+ * intel_de_read_fw(), only for fast reads of display block, no need for
+ * forcewake etc.
+ */
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -726,9 +743,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vtotal /= 2;
if (IS_GEN(dev_priv, 2))
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/*
* On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -747,7 +764,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
for (i = 0; i < 100; i++) {
udelay(1);
- temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+ temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
if (temp != position) {
position = temp;
break;
@@ -762,13 +779,15 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = _crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
@@ -777,9 +796,10 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
- if (WARN_ON(!mode->crtc_clock)) {
- DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
- "pipe %c\n", pipe_name(pipe));
+ if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
+ drm_dbg(&dev_priv->drm,
+ "trying to get scanoutpos for disabled "
+ "pipe %c\n", pipe_name(pipe));
return false;
}
@@ -818,7 +838,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
* We can split this into vertical and horizontal
* scanout position.
*/
- position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
/* convert to pixel counts */
vbl_start *= htotal;
@@ -879,6 +899,14 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
return true;
}
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ i915_get_crtc_scanoutpos);
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -918,7 +946,7 @@ static void ivb_parity_work(struct work_struct *work)
mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
- if (WARN_ON(!dev_priv->l3_parity.which_slice))
+ if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -929,7 +957,8 @@ static void ivb_parity_work(struct work_struct *work)
i915_reg_t reg;
slice--;
- if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ slice >= NUM_L3_SLICES(dev_priv)))
break;
dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -966,7 +995,7 @@ static void ivb_parity_work(struct work_struct *work)
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
out:
- WARN_ON(dev_priv->l3_parity.which_slice);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
spin_lock_irq(&gt->irq_lock);
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&gt->irq_lock);
@@ -1165,8 +1194,9 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg(&dev_priv->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
@@ -1187,8 +1217,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
trace_intel_pipe_crc(crtc, crcs);
@@ -1351,7 +1381,7 @@ static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1369,7 +1399,7 @@ static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1393,7 +1423,7 @@ static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1419,7 +1449,7 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1463,9 +1493,9 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
}
- WARN_ONCE(1,
- "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- I915_READ(PORT_HOTPLUG_STAT));
+ drm_WARN_ONCE(&dev_priv->drm, 1,
+ "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+ I915_READ(PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -1603,7 +1633,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 master_ctl, iir;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
- u32 gt_iir[4];
u32 ier = 0;
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
@@ -1631,7 +1660,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
- gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1655,8 +1684,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1710,8 +1737,8 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
- port_name(port));
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ port_name(port));
}
if (pch_iir & SDE_AUX_MASK)
@@ -1721,25 +1748,27 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- DRM_ERROR("PCH poison interrupt\n");
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK)
+ if (pch_iir & SDE_FDI_MASK) {
for_each_pipe(dev_priv, pipe)
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
- pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+ drm_dbg(&dev_priv->drm,
+ "PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
@@ -1754,7 +1783,7 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- DRM_ERROR("Poison interrupt\n");
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
@@ -1777,7 +1806,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- DRM_ERROR("PCH poison interrupt\n");
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
for_each_pipe(dev_priv, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
@@ -1796,8 +1825,8 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
- port_name(port));
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ port_name(port));
}
if (pch_iir & SDE_AUX_MASK_CPT)
@@ -1807,16 +1836,17 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+ drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+ drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
- if (pch_iir & SDE_FDI_MASK_CPT)
+ if (pch_iir & SDE_FDI_MASK_CPT) {
for_each_pipe(dev_priv, pipe)
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
- pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev_priv);
@@ -1844,8 +1874,9 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
pins = hpd_icp;
} else {
- WARN(!HAS_PCH_ICP(dev_priv),
- "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+ drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
+ "Unrecognized PCH type 0x%x\n",
+ INTEL_PCH_TYPE(dev_priv));
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
@@ -1952,11 +1983,11 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
if (de_iir & DE_POISON)
- DRM_ERROR("Poison interrupt\n");
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2009,7 +2040,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
}
/* check event from PCH */
@@ -2153,7 +2184,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (pin_mask)
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
else
- DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+ drm_err(&dev_priv->drm,
+ "Unexpected DE HPD interrupt 0x%08x\n", iir);
}
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
@@ -2226,7 +2258,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (!found)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
+ drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
}
static irqreturn_t
@@ -2243,7 +2275,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE MISC)!\n");
}
}
@@ -2254,7 +2287,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
- DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -2294,10 +2328,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (!found)
- DRM_ERROR("Unexpected DE Port interrupt\n");
+ drm_err(&dev_priv->drm,
+ "Unexpected DE Port interrupt\n");
}
else
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(dev_priv, pipe) {
@@ -2308,7 +2344,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -2316,7 +2353,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
@@ -2326,9 +2363,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
- pipe_name(pipe),
- fault_errors);
+ drm_err(&dev_priv->drm,
+ "Fault errors on pipe %c: 0x%08x\n",
+ pipe_name(pipe),
+ fault_errors);
}
if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
@@ -2354,7 +2392,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+ drm_dbg(&dev_priv->drm,
+ "The master control interrupt lied (SDE)!\n");
}
}
@@ -2384,7 +2423,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = arg;
void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
- u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -2395,8 +2433,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
- /* Find, clear, then process each source of interrupt */
- gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+ /* Find, queue (onto bottom-halves), then clear each source */
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -2407,8 +2445,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
return IRQ_HANDLED;
}
@@ -2491,7 +2527,7 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
return IRQ_NONE;
}
- /* Find, clear, then process each source of interrupt. */
+ /* Find, queue (onto bottom-halves), then clear each source */
gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
@@ -2686,7 +2722,7 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_NOP(dev_priv))
return;
- WARN_ON(I915_READ(SDEIER) != 0);
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
@@ -2733,7 +2769,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- WARN_ON(dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
@@ -3163,8 +3199,9 @@ static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE;
- DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
- hotplug, enabled_irqs);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invert bit setting: hp_ctl:%x hp_port:%x\n",
+ hotplug, enabled_irqs);
hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
/*
@@ -3321,9 +3358,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+ GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = GEN8_AUX_CHANNEL_A;
+ u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
enum pipe pipe;
@@ -3331,21 +3369,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) <= 10)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (INTEL_GEN(dev_priv) >= 9) {
- de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
- de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
- if (IS_GEN9_LP(dev_priv))
- de_port_masked |= BXT_DE_PORT_GMBUS;
- } else {
- de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- }
-
- if (INTEL_GEN(dev_priv) >= 11)
- de_port_masked |= ICL_AUX_CHANNEL_E;
-
- if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
- de_port_masked |= CNL_AUX_CHANNEL_F;
+ if (IS_GEN9_LP(dev_priv))
+ de_port_masked |= BXT_DE_PORT_GMBUS;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN;
@@ -3418,7 +3443,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
u32 mask = SDE_GMBUS_ICP;
- WARN_ON(I915_READ(SDEIER) != 0);
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
@@ -3547,7 +3572,8 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
if (eir_stuck)
- DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
+ eir_stuck);
}
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -3584,7 +3610,8 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
if (eir_stuck)
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
+ eir_stuck);
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 812c47a9c2d6..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -101,10 +101,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq);
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index fdd550405fd3..7b3b83bd5ab8 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -35,7 +35,6 @@
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
-#ifdef CONFIG_AS_MOVNTDQA
static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
@@ -93,10 +92,6 @@ static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
kernel_fpu_end();
}
-#else
-static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
-static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
-#endif
/**
* i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1dd1f3652795..add00ec1f787 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,7 +35,7 @@
MODULE_PARM_DESC(name, desc)
struct i915_params i915_modparams __read_mostly = {
-#define MEMBER(T, member, value) .member = (value),
+#define MEMBER(T, member, value, ...) .member = (value),
I915_PARAMS_FOR_EACH(MEMBER)
#undef MEMBER
};
@@ -92,9 +92,6 @@ i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe the driver for specified devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
-i915_param_named_unsafe(alpha_support, bool, 0400,
- "Deprecated. See i915.force_probe.");
-
i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
@@ -106,10 +103,6 @@ i915_param_named(fastboot, int, 0600,
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
-i915_param_named_unsafe(prefault_disable, bool, 0600,
- "Disable page prefaulting for pread/pwrite/reloc (default:false). "
- "For developers only.");
-
i915_param_named_unsafe(load_detect_test, bool, 0600,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
@@ -172,7 +165,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, int, 0600,
"Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 31b88f297fbc..45323732f099 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,49 +36,49 @@ struct drm_printer;
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
*
- * param(type, name, value)
+ * param(type, name, value, mode)
*
- * type: parameter type, one of {bool, int, unsigned int, char *}
+ * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
* name: name of the parameter
* value: initial/default value of the parameter
+ * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
+ * debugfs file
*/
#define I915_PARAMS_FOR_EACH(param) \
- param(char *, vbt_firmware, NULL) \
- param(int, modeset, -1) \
- param(int, lvds_channel_mode, 0) \
- param(int, panel_use_ssc, -1) \
- param(int, vbt_sdvo_panel_type, -1) \
- param(int, enable_dc, -1) \
- param(int, enable_fbc, -1) \
- param(int, enable_psr, -1) \
- param(int, disable_power_well, -1) \
- param(int, enable_ips, 1) \
- param(int, invert_brightness, 0) \
- param(int, enable_guc, 0) \
- param(int, guc_log_level, -1) \
- param(char *, guc_firmware_path, NULL) \
- param(char *, huc_firmware_path, NULL) \
- param(char *, dmc_firmware_path, NULL) \
- param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
- param(int, edp_vswing, 0) \
- param(int, reset, 3) \
- param(unsigned int, inject_probe_failure, 0) \
- param(int, fastboot, -1) \
- param(int, enable_dpcd_backlight, 0) \
- param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
- param(unsigned long, fake_lmem_start, 0) \
+ param(char *, vbt_firmware, NULL, 0400) \
+ param(int, modeset, -1, 0400) \
+ param(int, lvds_channel_mode, 0, 0400) \
+ param(int, panel_use_ssc, -1, 0600) \
+ param(int, vbt_sdvo_panel_type, -1, 0400) \
+ param(int, enable_dc, -1, 0400) \
+ param(int, enable_fbc, -1, 0600) \
+ param(int, enable_psr, -1, 0600) \
+ param(int, disable_power_well, -1, 0400) \
+ param(int, enable_ips, 1, 0600) \
+ param(int, invert_brightness, 0, 0600) \
+ param(int, enable_guc, 0, 0400) \
+ param(int, guc_log_level, -1, 0400) \
+ param(char *, guc_firmware_path, NULL, 0400) \
+ param(char *, huc_firmware_path, NULL, 0400) \
+ param(char *, dmc_firmware_path, NULL, 0400) \
+ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
+ param(int, edp_vswing, 0, 0400) \
+ param(unsigned int, reset, 3, 0600) \
+ param(unsigned int, inject_probe_failure, 0, 0600) \
+ param(int, fastboot, -1, 0600) \
+ param(int, enable_dpcd_backlight, -1, 0600) \
+ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
+ param(unsigned long, fake_lmem_start, 0, 0400) \
/* leave bools at the end to not create holes */ \
- param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_hangcheck, true) \
- param(bool, prefault_disable, false) \
- param(bool, load_detect_test, false) \
- param(bool, force_reset_modeset_test, false) \
- param(bool, error_capture, true) \
- param(bool, disable_display, false) \
- param(bool, verbose_state_checks, true) \
- param(bool, nuclear_pageflip, false) \
- param(bool, enable_dp_mst, true) \
- param(bool, enable_gvt, false)
+ param(bool, enable_hangcheck, true, 0600) \
+ param(bool, load_detect_test, false, 0600) \
+ param(bool, force_reset_modeset_test, false, 0600) \
+ param(bool, error_capture, true, 0600) \
+ param(bool, disable_display, false, 0400) \
+ param(bool, verbose_state_checks, true, 0) \
+ param(bool, nuclear_pageflip, false, 0400) \
+ param(bool, enable_dp_mst, true, 0600) \
+ param(bool, enable_gvt, false, 0400)
#define MEMBER(T, member, ...) T member;
struct i915_params {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f631f6d21127..2c80a0194c80 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
+#include <drm/i915_pciids.h>
#include "display/intel_fbdev.h"
@@ -615,7 +616,8 @@ static const struct intel_device_info chv_info = {
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
- .ddb_size = 896
+ .ddb_size = 896, \
+ .num_supported_dbuf_slices = 1
#define SKL_PLATFORM \
GEN9_FEATURES, \
@@ -650,6 +652,7 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
+ .num_supported_dbuf_slices = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
@@ -774,6 +777,7 @@ static const struct intel_device_info cnl_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
+ .num_supported_dbuf_slices = 2, \
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
@@ -819,11 +823,9 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
- .has_rps = false, /* XXX disabled for debugging */
};
#define GEN12_DGFX_FEATURES \
@@ -928,13 +930,6 @@ static bool force_probe(u16 device_id, const char *devices)
char *s, *p, *tok;
bool ret;
- /* FIXME: transitional */
- if (i915_modparams.alpha_support) {
- DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n",
- device_id);
- return true;
- }
-
if (!devices || !*devices)
return false;
@@ -968,7 +963,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (intel_info->require_force_probe &&
!force_probe(pdev->device, i915_modparams.force_probe)) {
- DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n"
+ dev_info(&pdev->dev,
+ "Your graphics device %04x is not properly supported by the driver in this\n"
"kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
"module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
"or (recommended) check for kernel updates.\n",
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3b6b913bd27a..cf2c01f17da8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -555,8 +555,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aging_tail = hw_tail;
stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
- hw_tail);
+ drm_err(&stream->perf->i915->drm,
+ "Ignoring spurious out of range OA buffer tail pointer = %x\n",
+ hw_tail);
}
}
@@ -686,7 +687,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -718,10 +719,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* only be incremented by multiples of the report size (notably also
* all a power of two).
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
@@ -742,8 +744,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* here would imply a driver bug that would result
* in an overrun.
*/
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ if (drm_WARN_ON(&uncore->i915->drm,
+ (OA_BUFFER_SIZE - head) < report_size)) {
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -896,7 +900,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
i915_reg_t oastatus_reg;
int ret;
- if (WARN_ON(!stream->oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
@@ -986,7 +990,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -1015,10 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* only be incremented by multiples of the report size (notably also
* all a power of two).
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
@@ -1036,8 +1041,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* here would imply a driver bug that would result
* in an overrun.
*/
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ if (drm_WARN_ON(&uncore->i915->drm,
+ (OA_BUFFER_SIZE - head) < report_size)) {
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -1110,7 +1117,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
u32 oastatus1;
int ret;
- if (WARN_ON(!stream->oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
@@ -1303,8 +1310,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* dropped by GuC. They won't be part of the context
* ID in the OA reports, so squash those lower bits.
*/
- stream->specific_ctx_id =
- lower_32_bits(ce->lrc_desc) >> 12;
+ stream->specific_ctx_id = ce->lrc.lrca >> 12;
/*
* GuC uses the top bit to signal proxy submission, so
@@ -1319,7 +1325,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 12: {
stream->specific_ctx_id_mask =
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
- stream->specific_ctx_id = stream->specific_ctx_id_mask;
+ /*
+ * Pick an unused context id
+ * 0 - BITS_PER_LONG are used by other contexts
+ * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
+ */
+ stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
break;
}
@@ -1327,11 +1338,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
MISSING_CASE(INTEL_GEN(ce->engine->i915));
}
- ce->tag = stream->specific_ctx_id_mask;
+ ce->tag = stream->specific_ctx_id;
- DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- stream->specific_ctx_id,
- stream->specific_ctx_id_mask);
+ drm_dbg(&stream->perf->i915->drm,
+ "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
return 0;
}
@@ -1391,8 +1403,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
+ *
+ * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
*/
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -1575,11 +1589,12 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
int ret;
- if (WARN_ON(stream->oa_buffer.vma))
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
return -ENODEV;
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
@@ -1587,7 +1602,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
- DRM_ERROR("Failed to allocate OA buffer\n");
+ drm_err(&i915->drm, "Failed to allocate OA buffer\n");
return PTR_ERR(bo);
}
@@ -1669,7 +1684,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
bo = i915_gem_object_create_internal(i915, 4096);
if (IS_ERR(bo)) {
- DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+ drm_err(&i915->drm,
+ "Failed to allocate NOA wait batchbuffer\n");
return PTR_ERR(bo);
}
@@ -2184,7 +2200,9 @@ static int gen8_modify_self(struct intel_context *ce,
struct i915_request *rq;
int err;
+ intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2653,7 +2671,8 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen8_oa_disable(struct i915_perf_stream *stream)
@@ -2664,7 +2683,8 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen12_oa_disable(struct i915_perf_stream *stream)
@@ -2676,7 +2696,16 @@ static void gen12_oa_disable(struct i915_perf_stream *stream)
GEN12_OAG_OACONTROL,
GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
+
+ intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
+ if (intel_wait_for_register(uncore,
+ GEN12_OA_TLB_INV_CR,
+ 1, 0,
+ 50))
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA tlb invalidate timed out\n");
}
/**
@@ -2740,6 +2769,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
struct i915_perf *perf = stream->perf;
int format_size;
int ret;
@@ -2796,7 +2826,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->sample_size += format_size;
stream->oa_buffer.format_size = format_size;
- if (WARN_ON(stream->oa_buffer.format_size == 0))
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
return -EINVAL;
stream->hold_preemption = props->hold_preemption;
@@ -2849,7 +2879,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
- perf->exclusive_stream = stream;
+ WRITE_ONCE(perf->exclusive_stream, stream);
ret = i915_perf_stream_enable_sync(stream);
if (ret) {
@@ -2869,7 +2899,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -2895,12 +2925,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
{
struct i915_perf_stream *stream;
- /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
-
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.exclusive_stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+ stream = READ_ONCE(engine->i915->perf.exclusive_stream);
/*
* For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
* is already doing that, so nothing to be done for gen12 here.
@@ -2910,49 +2939,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
}
/**
- * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
- * @stream: An i915 perf stream
- * @file: An i915 perf stream file
- * @buf: destination buffer given by userspace
- * @count: the number of bytes userspace wants to read
- * @ppos: (inout) file seek position (unused)
- *
- * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
- * ensure that if we've successfully copied any data then reporting that takes
- * precedence over any internal error status, so the data isn't lost.
- *
- * For example ret will be -ENOSPC whenever there is more buffered data than
- * can be copied to userspace, but that's only interesting if we weren't able
- * to copy some data because it implies the userspace buffer is too small to
- * receive a single record (and we never split records).
- *
- * Another case with ret == -EFAULT is more of a grey area since it would seem
- * like bad form for userspace to ask us to overrun its buffer, but the user
- * knows best:
- *
- * http://yarchive.net/comp/linux/partial_reads_writes.html
- *
- * Returns: The number of bytes copied or a negative error code on failure.
- */
-static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
- struct file *file,
- char __user *buf,
- size_t count,
- loff_t *ppos)
-{
- /* Note we keep the offset (aka bytes read) separate from any
- * error status so that the final check for whether we return
- * the bytes read with a higher precedence than any error (see
- * comment below) doesn't need to be handled/duplicated in
- * stream->ops->read() implementations.
- */
- size_t offset = 0;
- int ret = stream->ops->read(stream, buf, count, &offset);
-
- return offset ?: (ret ?: -EAGAIN);
-}
-
-/**
* i915_perf_read - handles read() FOP for i915 perf stream FDs
* @file: An i915 perf stream file
* @buf: destination buffer given by userspace
@@ -2977,7 +2963,8 @@ static ssize_t i915_perf_read(struct file *file,
{
struct i915_perf_stream *stream = file->private_data;
struct i915_perf *perf = stream->perf;
- ssize_t ret;
+ size_t offset = 0;
+ int ret;
/* To ensure it's handled consistently we simply treat all reads of a
* disabled stream as an error. In particular it might otherwise lead
@@ -3000,13 +2987,12 @@ static ssize_t i915_perf_read(struct file *file,
return ret;
mutex_lock(&perf->lock);
- ret = i915_perf_read_locked(stream, file,
- buf, count, ppos);
+ ret = stream->ops->read(stream, buf, count, &offset);
mutex_unlock(&perf->lock);
- } while (ret == -EAGAIN);
+ } while (!offset && !ret);
} else {
mutex_lock(&perf->lock);
- ret = i915_perf_read_locked(stream, file, buf, count, ppos);
+ ret = stream->ops->read(stream, buf, count, &offset);
mutex_unlock(&perf->lock);
}
@@ -3017,15 +3003,15 @@ static ssize_t i915_perf_read(struct file *file,
* and read() returning -EAGAIN. Clearing the oa.pollin state here
* effectively ensures we back off until the next hrtimer callback
* before reporting another EPOLLIN event.
+ * The exception to this is if ops->read() returned -ENOSPC which means
+ * that more OA data is available than could fit in the user provided
+ * buffer. In this case we want the next poll() call to not block.
*/
- if (ret >= 0 || ret == -EAGAIN) {
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
+ if (ret != -ENOSPC)
stream->pollin = false;
- }
- return ret;
+ /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
+ return offset ?: (ret ?: -EAGAIN);
}
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index aa729d04abe2..2c062534eac1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -448,7 +448,7 @@ static void engine_event_destroy(struct perf_event *event)
engine = intel_engine_lookup_user(i915,
engine_event_class(event),
engine_event_instance(event));
- if (WARN_ON_ONCE(!engine))
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine))
return;
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
@@ -584,7 +584,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
engine_event_class(event),
engine_event_instance(event));
- if (WARN_ON_ONCE(!engine)) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
/* Do nothing */
} else if (sample == I915_SAMPLE_BUSY &&
intel_engine_supports_stats(engine)) {
@@ -1188,7 +1188,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
if (!pmu->base.event_init)
return;
- WARN_ON(pmu->enable);
+ drm_WARN_ON(&i915->drm, pmu->enable);
hrtimer_cancel(&pmu->timer);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index f1d6cad0d7d5..941f0c14037c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -10,7 +10,7 @@
#include <linux/hrtimer.h>
#include <linux/perf_event.h>
#include <linux/spinlock_types.h>
-#include <drm/i915_drm.h>
+#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3575fd30756b..6e12000c4b6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -34,8 +34,8 @@
* Follow the style described here for new macros, and while changing existing
* macros. Do **not** mass change existing definitions just to update the style.
*
- * Layout
- * ~~~~~~
+ * File Layout
+ * ~~~~~~~~~~~
*
* Keep helper macros near the top. For example, _PIPE() and friends.
*
@@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
+
/* Gen12 OAR unit */
#define GEN12_OAR_OACONTROL _MMIO(0x2960)
#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
@@ -2626,6 +2628,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IPEIR_I965 _MMIO(0x2064)
#define IPEHR_I965 _MMIO(0x2068)
#define GEN7_SC_INSTDONE _MMIO(0x7100)
+#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
+#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
@@ -2639,6 +2643,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
#define RING_IPEIR(base) _MMIO((base) + 0x64)
#define RING_IPEHR(base) _MMIO((base) + 0x68)
+#define RING_EIR(base) _MMIO((base) + 0xb0)
+#define RING_EMR(base) _MMIO((base) + 0xb4)
+#define RING_ESR(base) _MMIO((base) + 0xb8)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
@@ -2860,6 +2867,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
#define MBUS_ABOX_CTL _MMIO(0x45038)
+#define MBUS_ABOX1_CTL _MMIO(0x45048)
+#define MBUS_ABOX2_CTL _MMIO(0x4504C)
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -3085,10 +3094,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) /* bdw+ */
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
-#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
+#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
#define GT_RENDER_USER_INTERRUPT (1 << 0)
@@ -3160,6 +3170,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
+#define GEN12_FF_TESSELATION_DOP_GATE_DISABLE BIT(19)
#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16)
#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16)
@@ -3277,6 +3288,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE (1 << 31)
@@ -3743,8 +3755,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
-#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c)
-
/* Clocking configuration register */
#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -4124,6 +4134,9 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
+#define TGL_VRH_GATING_DIS REG_BIT(31)
+
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
@@ -4851,16 +4864,6 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON REG_BIT(31)
-
-#define _PP_CONTROL_1 0xc7204
-#define _PP_CONTROL_2 0xc7304
-#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
- _PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
-#define VDD_OVERRIDE_FORCE REG_BIT(3)
-#define BACKLIGHT_ENABLE REG_BIT(2)
-#define PWR_DOWN_ON_RESET REG_BIT(1)
-#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -4921,6 +4924,7 @@ enum {
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
+#define PFIT_PIPE(pipe) ((pipe) << 29)
#define VERT_INTERP_DISABLE (0 << 10)
#define VERT_INTERP_BILINEAR (1 << 10)
#define VERT_INTERP_MASK (3 << 10)
@@ -5870,7 +5874,6 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
@@ -5879,6 +5882,7 @@ enum {
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
#define PIPEMISC_DITHER_8_BPC (0 << 5)
#define PIPEMISC_DITHER_10_BPC (1 << 5)
@@ -7745,9 +7749,9 @@ enum {
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1 << 6)
#define DISP_IPC_ENABLE (1 << 3)
-#define DBUF_CTL _MMIO(0x45008)
-#define DBUF_CTL_S1 _MMIO(0x45008)
-#define DBUF_CTL_S2 _MMIO(0x44FE8)
+#define _DBUF_CTL_S1 0x45008
+#define _DBUF_CTL_S2 0x44FE8
+#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
#define DBUF_POWER_REQUEST (1 << 31)
#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)
@@ -7767,6 +7771,7 @@ enum {
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
+#define CNL_DELAY_PMRSP (1 << 22)
#define MASK_WAKEMEM (1 << 13)
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
@@ -8988,6 +8993,8 @@ enum {
#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
#define GEN7_PCODE_TIMEOUT 0x2
#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
+#define GEN11_PCODE_LOCKED 0x6
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -9136,12 +9143,19 @@ enum {
#define THROTTLE_12_5 (7 << 2)
#define DISABLE_EARLY_EOT (1 << 1)
-#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9242,6 +9256,10 @@ enum {
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
@@ -10533,13 +10551,13 @@ enum skl_power_gate {
#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
-#define _PIPE_WM_LINETIME_A 0x45270
-#define _PIPE_WM_LINETIME_B 0x45274
-#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
-#define PIPE_WM_LINETIME_MASK (0x1ff)
-#define PIPE_WM_LINETIME_TIME(x) ((x))
-#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16)
+#define _WM_LINETIME_A 0x45270
+#define _WM_LINETIME_B 0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
+#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
+#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
/* SFUSE_STRAP */
#define SFUSE_STRAP _MMIO(0xc2014)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a18b2a244706..e2b78db685ea 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -51,7 +51,6 @@ struct execute_cb {
static struct i915_global_request {
struct i915_global base;
struct kmem_cache *slab_requests;
- struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_execute_cbs;
} global;
@@ -203,6 +202,19 @@ static void free_capture_list(struct i915_request *request)
}
}
+static void __i915_request_fill(struct i915_request *rq, u8 val)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset(vaddr + head, val, rq->ring->size - head);
+ head = 0;
+ }
+ memset(vaddr + head, val, rq->postfix - head);
+}
+
static void remove_from_engine(struct i915_request *rq)
{
struct intel_engine_cs *engine, *locked;
@@ -247,6 +259,9 @@ bool i915_request_retire(struct i915_request *rq)
*/
GEM_BUG_ON(!list_is_first(&rq->link,
&i915_request_timeline(rq)->requests));
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ /* Poison before we release our space in the ring */
+ __i915_request_fill(rq, POISON_FREE);
rq->ring->head = rq->postfix;
/*
@@ -275,7 +290,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
- list_del_rcu(&rq->link);
+ __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
@@ -348,6 +363,50 @@ __await_execution(struct i915_request *rq,
return 0;
}
+static bool fatal_error(int error)
+{
+ switch (error) {
+ case 0: /* not an error! */
+ case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
+ case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+ return false;
+ default:
+ return true;
+ }
+}
+
+void __i915_request_skip(struct i915_request *rq)
+{
+ GEM_BUG_ON(!fatal_error(rq->fence.error));
+
+ if (rq->infix == rq->postfix)
+ return;
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ __i915_request_fill(rq, 0);
+ rq->infix = rq->postfix;
+}
+
+void i915_request_set_error_once(struct i915_request *rq, int error)
+{
+ int old;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+
+ if (i915_request_signaled(rq))
+ return;
+
+ old = READ_ONCE(rq->fence.error);
+ do {
+ if (fatal_error(old))
+ return;
+ } while (!try_cmpxchg(&rq->fence.error, &old, error));
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -377,8 +436,10 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
- if (intel_context_is_banned(request->context))
- i915_request_skip(request, -EIO);
+ if (unlikely(intel_context_is_banned(request->context)))
+ i915_request_set_error_once(request, -EIO);
+ if (unlikely(fatal_error(request->fence.error)))
+ __i915_request_skip(request);
/*
* Are we using semaphores when the gpu is already saturated?
@@ -504,7 +565,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
trace_i915_request_submit(request);
if (unlikely(fence->error))
- i915_request_skip(request, fence->error);
+ i915_request_set_error_once(request, fence->error);
/*
* We need to serialize use of the submit_request() callback
@@ -688,6 +749,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
+ GEM_BUG_ON(i915_request_completed(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -857,7 +919,7 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->engine->saturated;
+ return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
}
static int
@@ -915,8 +977,16 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+
+ if (!intel_context_use_semaphores(to->context))
+ goto await_fence;
+
+ if (!rcu_access_pointer(from->hwsp_cacheline))
+ goto await_fence;
+
/* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
+ if (already_busywaiting(to) & mask)
goto await_fence;
if (i915_request_await_start(to, from) < 0)
@@ -929,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to,
if (__emit_semaphore_wait(to, from, from->fence.seqno))
goto await_fence;
- to->sched.semaphores |= from->engine->mask;
+ to->sched.semaphores |= mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
@@ -947,11 +1017,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
GEM_BUG_ON(to == from);
GEM_BUG_ON(to->timeline == from->timeline);
- if (i915_request_completed(from))
+ if (i915_request_completed(from)) {
+ i915_sw_fence_set_error_once(&to->submit, from->fence.error);
return 0;
+ }
if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
+ ret = i915_sched_node_add_dependency(&to->sched,
+ &from->sched,
+ I915_DEPENDENCY_EXTERNAL);
if (ret < 0)
return ret;
}
@@ -960,12 +1034,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- else if (intel_context_use_semaphores(to->context))
- ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
else
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
if (ret < 0)
return ret;
@@ -1064,6 +1134,8 @@ __i915_request_await_execution(struct i915_request *to,
{
int err;
+ GEM_BUG_ON(intel_context_is_barrier(from->context));
+
/* Submit both requests at the same time */
err = __await_execution(to, from, hook, I915_FENCE_GFP);
if (err)
@@ -1074,17 +1146,50 @@ __i915_request_await_execution(struct i915_request *to,
&from->fence))
return 0;
- /* Ensure both start together [after all semaphores in signal] */
- if (intel_engine_has_semaphores(to->engine))
- err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
- else
- err = i915_request_await_start(to, from);
+ /*
+ * Wait until the start of this request.
+ *
+ * The execution cb fires when we submit the request to HW. But in
+ * many cases this may be long before the request itself is ready to
+ * run (consider that we submit 2 requests for the same context, where
+ * the request of interest is behind an indefinite spinner). So we hook
+ * up to both to reduce our queues and keep the execution lag minimised
+ * in the worst case, though we hope that the await_start is elided.
+ */
+ err = i915_request_await_start(to, from);
if (err < 0)
return err;
+ /*
+ * Ensure both start together [after all semaphores in signal]
+ *
+ * Now that we are queued to the HW at roughly the same time (thanks
+ * to the execute cb) and are ready to run at roughly the same time
+ * (thanks to the await start), our signaler may still be indefinitely
+ * delayed by waiting on a semaphore from a remote engine. If our
+ * signaler depends on a semaphore, so indirectly do we, and we do not
+ * want to start our payload until our signaler also starts theirs.
+ * So we wait.
+ *
+ * However, there is also a second condition for which we need to wait
+ * for the precise start of the signaler. Consider that the signaler
+ * was submitted in a chain of requests following another context
+ * (with just an ordinary intra-engine fence dependency between the
+ * two). In this case the signaler is queued to HW, but not for
+ * immediate execution, and so we must wait until it reaches the
+ * active slot.
+ */
+ if (intel_engine_has_semaphores(to->engine)) {
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ if (err < 0)
+ return err;
+ }
+
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
- err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+ err = i915_sched_node_add_dependency(&to->sched,
+ &from->sched,
+ I915_DEPENDENCY_WEAK);
if (err < 0)
return err;
}
@@ -1202,31 +1307,6 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
-void i915_request_skip(struct i915_request *rq, int error)
-{
- void *vaddr = rq->ring->vaddr;
- u32 head;
-
- GEM_BUG_ON(!IS_ERR_VALUE((long)error));
- dma_fence_set_error(&rq->fence, error);
-
- if (rq->infix == rq->postfix)
- return;
-
- /*
- * As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = rq->infix;
- if (rq->postfix < head) {
- memset(vaddr + head, 0, rq->ring->size - head);
- head = 0;
- }
- memset(vaddr + head, 0, rq->postfix - head);
- rq->infix = rq->postfix;
-}
-
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
@@ -1256,7 +1336,17 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
if (prev && !i915_request_completed(prev)) {
- if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ /*
+ * The requests are supposed to be kept in order. However,
+ * we need to be wary in case the timeline->last_request
+ * is used as a barrier for external modification to this
+ * context.
+ */
+ GEM_BUG_ON(prev->context == rq->context &&
+ i915_seqno_passed(prev->fence.seqno,
+ rq->fence.seqno));
+
+ if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
@@ -1340,39 +1430,23 @@ void i915_request_add(struct i915_request *rq)
{
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
- struct i915_request *prev;
+ struct i915_gem_context *ctx;
lockdep_assert_held(&tl->mutex);
lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
+ __i915_request_commit(rq);
- prev = __i915_request_commit(rq);
-
- if (rcu_access_pointer(rq->context->gem_context))
- attr = i915_request_gem_context(rq)->sched;
+ /* XXX placeholder for selftests */
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ attr = ctx->sched;
+ rcu_read_unlock();
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
@@ -1380,32 +1454,10 @@ void i915_request_add(struct i915_request *rq)
__i915_request_queue(rq, &attr);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
- /*
- * In typical scenarios, we do not expect the previous request on
- * the timeline to be still tracked by timeline->last_request if it
- * has been completed. If the completed request is still here, that
- * implies that request retirement is a long way behind submission,
- * suggesting that we haven't been retiring frequently enough from
- * the combination of retire-before-alloc, waiters and the background
- * retirement worker. So if the last request on this timeline was
- * already completed, do a catch up pass, flushing the retirement queue
- * up to this client. Since we have now moved the heaviest operations
- * during retirement onto secondary workers, such as freeing objects
- * or contexts, retiring a bunch of requests is mostly list management
- * (and cache misses), and so we should not be overly penalizing this
- * client by performing excess work, though we may still performing
- * work on behalf of others -- but instead we should benefit from
- * improved resource management. (Well, that's the theory at least.)
- */
- if (prev &&
- i915_request_completed(prev) &&
- rcu_access_pointer(prev->timeline) == tl)
- i915_request_retire_upto(prev);
-
mutex_unlock(&tl->mutex);
}
-static unsigned long local_clock_us(unsigned int *cpu)
+static unsigned long local_clock_ns(unsigned int *cpu)
{
unsigned long t;
@@ -1422,7 +1474,7 @@ static unsigned long local_clock_us(unsigned int *cpu)
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
- t = local_clock() >> 10;
+ t = local_clock();
put_cpu();
return t;
@@ -1432,15 +1484,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{
unsigned int this_cpu;
- if (time_after(local_clock_us(&this_cpu), timeout))
+ if (time_after(local_clock_ns(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request * const rq,
- int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq, int state)
{
+ unsigned long timeout_ns;
unsigned int cpu;
/*
@@ -1468,7 +1520,8 @@ static bool __i915_spin_request(const struct i915_request * const rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- timeout_us += local_clock_us(&cpu);
+ timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+ timeout_ns += local_clock_ns(&cpu);
do {
if (i915_request_completed(rq))
return true;
@@ -1476,7 +1529,7 @@ static bool __i915_spin_request(const struct i915_request * const rq,
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout_us, cpu))
+ if (busywait_stop(timeout_ns, cpu))
break;
cpu_relax();
@@ -1562,8 +1615,8 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
- __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+ if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+ __i915_spin_request(rq, state)) {
dma_fence_signal(&rq->fence);
goto out;
}
@@ -1598,6 +1651,8 @@ long i915_request_wait(struct i915_request *rq,
break;
}
+ intel_engine_flush_submission(rq->engine);
+
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
@@ -1608,7 +1663,6 @@ long i915_request_wait(struct i915_request *rq,
break;
}
- intel_engine_flush_submission(rq->engine);
timeout = io_schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
@@ -1628,14 +1682,12 @@ out:
static void i915_global_request_shrink(void)
{
- kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
static void i915_global_request_exit(void)
{
- kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
}
@@ -1665,17 +1717,9 @@ int __init i915_global_request_init(void)
if (!global.slab_execute_cbs)
goto err_requests;
- global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!global.slab_dependencies)
- goto err_execute_cbs;
-
i915_global_register(&global.base);
return 0;
-err_execute_cbs:
- kmem_cache_destroy(global.slab_execute_cbs);
err_requests:
kmem_cache_destroy(global.slab_requests);
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index fccc339949ec..3c552bfea67a 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -305,6 +305,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void __i915_request_skip(struct i915_request *rq);
+
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
@@ -354,8 +357,6 @@ void i915_request_add(struct i915_request *rq);
bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
-void i915_request_skip(struct i915_request *request, int error);
-
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -397,7 +398,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
static inline u32 __hwsp_seqno(const struct i915_request *rq)
{
- return READ_ONCE(*rq->hwsp_seqno);
+ const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
+
+ return READ_ONCE(*hwsp);
}
/**
@@ -481,7 +484,7 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
}
/**
- * i915_request_is_running - check if the request is ready for execution
+ * i915_request_is_ready - check if the request is ready for execution
* @rq: the request
*
* Upon construction, the request is instructed to wait upon various
@@ -511,7 +514,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
static inline void i915_request_mark_complete(struct i915_request *rq)
{
- rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
+ WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
+ (u32 *)&rq->fence.seqno);
}
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 34b654b4e58a..f0a9e8958ca0 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
+ engine->execlists.queue_priority_hint = prio;
+
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
@@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
goto unlock;
- engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
@@ -227,10 +228,10 @@ unlock:
static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr)
{
+ const int prio = max(attr->priority, node->attr.priority);
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
- const int prio = attr->priority;
struct sched_cache cache;
LIST_HEAD(dfs);
@@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node,
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (prio <= READ_ONCE(node->attr.priority))
- return;
-
if (node_signaled(node))
return;
@@ -324,7 +322,7 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine != engine);
- node->attr.priority = prio;
+ WRITE_ONCE(node->attr.priority, prio);
/*
* Once the request is ready, it will be placed into the
@@ -363,6 +361,9 @@ static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
{
struct i915_sched_attr attr = node->attr;
+ if (attr.priority & bump)
+ return;
+
attr.priority |= bump;
__i915_schedule(node, &attr);
}
@@ -433,7 +434,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
/* All set, now publish. Beware the lockless walkers. */
- list_add(&dep->signal_link, &node->signalers_list);
+ list_add_rcu(&dep->signal_link, &node->signalers_list);
list_add_rcu(&dep->wait_link, &signal->waiters_list);
/*
@@ -455,7 +456,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
}
int i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal)
+ struct i915_sched_node *signal,
+ unsigned long flags)
{
struct i915_dependency *dep;
@@ -464,8 +466,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
- I915_DEPENDENCY_EXTERNAL |
- I915_DEPENDENCY_ALLOC))
+ flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
return 0;
@@ -486,7 +487,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->wait_link);
+ list_del_rcu(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -497,7 +498,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->signal_link);
+ list_del_rcu(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -526,7 +527,8 @@ static struct i915_global_scheduler global = { {
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN);
+ SLAB_HWCACHE_ALIGN |
+ SLAB_TYPESAFE_BY_RCU);
if (!global.slab_dependencies)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index d1dc4efef77b..6f0bf00fc569 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -34,7 +34,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
unsigned long flags);
int i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal);
+ struct i915_sched_node *signal,
+ unsigned long flags);
void i915_sched_node_fini(struct i915_sched_node *node);
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index d18e70550054..7186875088a0 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -78,6 +78,7 @@ struct i915_dependency {
unsigned long flags;
#define I915_DEPENDENCY_ALLOC BIT(0)
#define I915_DEPENDENCY_EXTERNAL BIT(1)
+#define I915_DEPENDENCY_WEAK BIT(2)
};
#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8812cdd9007f..ed2be3489f8e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,8 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 51ba97daf2a0..a3d38e089b6e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -211,10 +211,21 @@ void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
-void i915_sw_fence_await(struct i915_sw_fence *fence)
+bool i915_sw_fence_await(struct i915_sw_fence *fence)
{
- debug_fence_assert(fence);
- WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+ int pending;
+
+ /*
+ * It is only safe to add a new await to the fence while it has
+ * not yet been signaled (i.e. there are still existing signalers).
+ */
+ pending = atomic_read(&fence->pending);
+ do {
+ if (pending < 1)
+ return false;
+ } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1));
+
+ return true;
}
void __i915_sw_fence_init(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 19e806ce43bc..30a863353ee6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -91,7 +91,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp);
-void i915_sw_fence_await(struct i915_sw_fence *fence);
+bool i915_sw_fence_await(struct i915_sw_fence *fence);
void i915_sw_fence_complete(struct i915_sw_fence *fence);
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 39c79e1c5b52..ed69b5d4a375 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -43,7 +43,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && i915->drm.open_count == 0;
+ return i915 && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 0cef3130db05..45d32ef42787 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
+#include "gt/sysfs_engines.h"
#include "i915_drv.h"
#include "i915_sysfs.h"
@@ -525,7 +526,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- DRM_DEBUG_DRIVER("Resetting error state\n");
+ drm_dbg(&dev_priv->drm, "Resetting error state\n");
i915_reset_error_state(dev_priv);
return count;
@@ -564,31 +565,36 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
ret = sysfs_merge_group(&kdev->kobj,
&rc6_attr_group);
if (ret)
- DRM_ERROR("RC6 residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "RC6 residency sysfs setup failed\n");
}
if (HAS_RC6p(dev_priv)) {
ret = sysfs_merge_group(&kdev->kobj,
&rc6p_attr_group);
if (ret)
- DRM_ERROR("RC6p residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "RC6p residency sysfs setup failed\n");
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = sysfs_merge_group(&kdev->kobj,
&media_rc6_attr_group);
if (ret)
- DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "Media RC6 residency sysfs setup failed\n");
}
#endif
if (HAS_L3_DPF(dev_priv)) {
ret = device_create_bin_file(kdev, &dpf_attrs);
if (ret)
- DRM_ERROR("l3 parity sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "l3 parity sysfs setup failed\n");
if (NUM_L3_SLICES(dev_priv) > 1) {
ret = device_create_bin_file(kdev,
&dpf_attrs_1);
if (ret)
- DRM_ERROR("l3 parity slice 1 setup failed\n");
+ drm_err(&dev_priv->drm,
+ "l3 parity slice 1 setup failed\n");
}
}
@@ -598,9 +604,11 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 6)
ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
if (ret)
- DRM_ERROR("RPS sysfs setup failed\n");
+ drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
i915_setup_error_capture(kdev);
+
+ intel_engines_add_sysfs(dev_priv);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 233a97a2c276..bc854ad60954 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -339,6 +339,68 @@ TRACE_EVENT(intel_disable_plane,
__entry->frame, __entry->scanline)
);
+/* fbc */
+
+TRACE_EVENT(intel_fbc_activate,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_deactivate,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_nuke,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
/* pipe updates */
TRACE_EVENT(intel_pipe_update_start,
@@ -738,7 +800,7 @@ TRACE_EVENT(i915_request_in,
__field(u16, instance)
__field(u32, seqno)
__field(u32, port)
- __field(u32, prio)
+ __field(s32, prio)
),
TP_fast_assign(
@@ -751,7 +813,7 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->ctx, __entry->seqno,
__entry->prio, __entry->port)
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 632d6953c78d..029854ae65fc 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,7 +8,6 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
void
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index d34141f7dcd8..03a73d2bd50d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -34,6 +34,8 @@
struct drm_i915_private;
struct timer_list;
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -100,12 +102,24 @@ bool i915_error_injected(void);
typeof(max) max__ = (max); \
(void)(&start__ == &size__); \
(void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
+ start__ >= max__ || size__ > max__ - start__; \
})
#define range_overflows_t(type, start, size, max) \
range_overflows((type)(start), (type)(size), (type)(max))
+#define range_overflows_end(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_end_t(type, start, size, max) \
+ range_overflows_end((type)(start), (type)(size), (type)(max))
+
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
@@ -246,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first);
}
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
/*
* Wait until the work is finally complete, even if it tries to postpone
* by requeueing itself. Note, that if the worker never cancels itself,
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 968be26735c5..70fca72f5162 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,6 +21,8 @@
* SOFTWARE.
*/
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -51,13 +53,13 @@
*/
/**
- * i915_detect_vgpu - detect virtual GPU
+ * intel_vgpu_detect - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_detect_vgpu(struct drm_i915_private *dev_priv)
+void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
@@ -77,7 +79,8 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
if (!shared_area) {
- DRM_ERROR("failed to map MMIO bar to check for VGT\n");
+ drm_err(&dev_priv->drm,
+ "failed to map MMIO bar to check for VGT\n");
return;
}
@@ -87,7 +90,7 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
- DRM_INFO("VGT interface version mismatch!\n");
+ drm_info(&dev_priv->drm, "VGT interface version mismatch!\n");
goto out;
}
@@ -95,17 +98,42 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
dev_priv->vgpu.active = true;
mutex_init(&dev_priv->vgpu.lock);
- DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+ drm_info(&dev_priv->drm, "Virtual GPU for Intel GVT-g detected.\n");
out:
pci_iounmap(pdev, shared_area);
}
+void intel_vgpu_register(struct drm_i915_private *i915)
+{
+ /*
+ * Notify a valid surface after modesetting, when running inside a VM.
+ */
+ if (intel_vgpu_active(i915))
+ intel_uncore_write(&i915->uncore, vgtif_reg(display_ready),
+ VGT_DRV_DISPLAY_READY);
+}
+
+bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.active;
+}
+
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
@@ -120,13 +148,15 @@ static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
if (!drm_mm_node_allocated(node))
return;
- DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
- node->start,
- node->start + node->size,
- node->size / 1024);
+ drm_dbg(&dev_priv->drm,
+ "deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
+ node->start,
+ node->start + node->size,
+ node->size / 1024);
ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
@@ -141,12 +171,13 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
*/
void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
int i;
if (!intel_vgpu_active(ggtt->vm.i915))
return;
- DRM_DEBUG("VGT deballoon.\n");
+ drm_dbg(&dev_priv->drm, "VGT deballoon.\n");
for (i = 0; i < 4; i++)
vgt_deballoon_space(ggtt, &bl_info.space[i]);
@@ -156,13 +187,15 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
unsigned long size = end - start;
int ret;
if (start >= end)
return -EINVAL;
- DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+ drm_info(&dev_priv->drm,
+ "balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
@@ -219,7 +252,8 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
*/
int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
@@ -241,16 +275,18 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
- DRM_INFO("VGT ballooning configuration:\n");
- DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+ drm_info(&dev_priv->drm, "VGT ballooning configuration:\n");
+ drm_info(&dev_priv->drm,
+ "Mappable graphic memory: base 0x%lx size %ldKiB\n",
mappable_base, mappable_size / 1024);
- DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+ drm_info(&dev_priv->drm,
+ "Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
if (mappable_end > ggtt->mappable_end ||
unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_end) {
- DRM_ERROR("Invalid ballooning configuration!\n");
+ drm_err(&dev_priv->drm, "Invalid ballooning configuration!\n");
return -EINVAL;
}
@@ -287,7 +323,7 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
goto err_below_mappable;
}
- DRM_INFO("VGT balloon successfully\n");
+ drm_info(&dev_priv->drm, "VGT balloon successfully\n");
return 0;
err_below_mappable:
@@ -297,6 +333,6 @@ err_upon_unmappable:
err_upon_mappable:
vgt_deballoon_space(ggtt, &bl_info.space[2]);
err:
- DRM_ERROR("VGT balloon fail\n");
+ drm_err(&dev_priv->drm, "VGT balloon fail\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 8b3663dad193..ffbb77d08048 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,24 +24,17 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
-#include "i915_drv.h"
-#include "i915_pvinfo.h"
+#include <linux/types.h>
-void i915_detect_vgpu(struct drm_i915_private *dev_priv);
+struct drm_i915_private;
+struct i915_ggtt;
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
-
-static inline bool
-intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
-}
-
-static inline bool
-intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
-}
+void intel_vgpu_detect(struct drm_i915_private *i915);
+bool intel_vgpu_active(struct drm_i915_private *i915);
+void intel_vgpu_register(struct drm_i915_private *i915);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915);
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915);
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915);
int intel_vgt_balloon(struct i915_ggtt *ggtt);
void intel_vgt_deballoon(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4ff380770b32..2cd7a7e87c0a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
+ spin_lock(&obj->vma.lock);
+
if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32)))
- goto err_vma;
+ goto err_unlock;
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
if (unlikely(vma->fence_size < vma->size || /* overflow */
vma->fence_size > vm->total))
- goto err_vma;
+ goto err_unlock;
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
@@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
- spin_lock(&obj->vma.lock);
-
rb = NULL;
p = &obj->vma.tree.rb_node;
while (*p) {
@@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
return vma;
+err_unlock:
+ spin_unlock(&obj->vma.lock);
err_vma:
i915_vma_free(vma);
return ERR_PTR(-E2BIG);
@@ -294,6 +296,7 @@ struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
struct drm_i915_gem_object *pinned;
+ struct i915_sw_dma_fence_cb cb;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -339,6 +342,25 @@ struct i915_vma_work *i915_vma_work(void)
return vw;
}
+int i915_vma_wait_for_bind(struct i915_vma *vma)
+{
+ int err = 0;
+
+ if (rcu_access_pointer(vma->active.excl.fence)) {
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
+ }
+ }
+
+ return err;
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
@@ -386,6 +408,8 @@ int i915_vma_bind(struct i915_vma *vma,
trace_i915_vma_bind(vma, bind_flags);
if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ struct dma_fence *prev;
+
work->vma = vma;
work->cache_level = cache_level;
work->flags = bind_flags | I915_VMA_ALLOC;
@@ -399,8 +423,14 @@ int i915_vma_bind(struct i915_vma *vma,
* part of the obj->resv->excl_fence as it only affects
* execution and not content or object's backing store lifetime.
*/
- GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
- i915_active_set_exclusive(&vma->active, &work->base.dma);
+ prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
+ if (prev) {
+ __i915_sw_fence_await_dma_fence(&work->base.chain,
+ prev,
+ &work->cb);
+ dma_fence_put(prev);
+ }
+
work->base.dma.error = 0; /* enable the queue_work() */
if (vma->obj) {
@@ -408,7 +438,6 @@ int i915_vma_bind(struct i915_vma *vma,
work->pinned = vma->obj;
}
} else {
- GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
@@ -614,7 +643,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
- GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -892,6 +920,11 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (err)
goto err_fence;
+ if (unlikely(i915_vma_is_closed(vma))) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
bound = atomic_read(&vma->flags);
if (unlikely(bound & I915_VMA_ERROR)) {
err = -ENOMEM;
@@ -977,8 +1010,14 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
do {
err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
- if (err != -ENOSPC)
+ if (err != -ENOSPC) {
+ if (!err) {
+ err = i915_vma_wait_for_bind(vma);
+ if (err)
+ i915_vma_unpin(vma);
+ }
return err;
+ }
/* Unlike i915_vma_pin, we don't take no for an answer! */
flush_idle_contexts(vm->gt);
@@ -1060,6 +1099,7 @@ void i915_vma_release(struct kref *ref)
void i915_vma_parked(struct intel_gt *gt)
{
struct i915_vma *vma, *next;
+ LIST_HEAD(closed);
spin_lock_irq(&gt->closed_lock);
list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
@@ -1071,28 +1111,26 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (i915_vm_tryopen(vm)) {
- list_del_init(&vma->closed_link);
- } else {
+ if (!i915_vm_tryopen(vm)) {
i915_gem_object_put(obj);
- obj = NULL;
+ continue;
}
- spin_unlock_irq(&gt->closed_lock);
+ list_move(&vma->closed_link, &closed);
+ }
+ spin_unlock_irq(&gt->closed_lock);
- if (obj) {
- __i915_vma_put(vma);
- i915_gem_object_put(obj);
- }
+ /* As the GT is held idle, no vma can be reopened as we destroy them */
+ list_for_each_entry_safe(vma, next, &closed, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- i915_vm_close(vm);
+ INIT_LIST_HEAD(&vma->closed_link);
+ __i915_vma_put(vma);
- /* Restart after dropping lock */
- spin_lock_irq(&gt->closed_lock);
- next = list_first_entry(&gt->closed_vma,
- typeof(*next), closed_link);
+ i915_gem_object_put(obj);
+ i915_vm_close(vm);
}
- spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -1136,7 +1174,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active);
+ err = i915_request_await_active(rq, &vma->active, 0);
if (err)
return err;
@@ -1190,18 +1228,6 @@ int __i915_vma_unbind(struct i915_vma *vma)
lockdep_assert_held(&vma->vm->mutex);
- /*
- * First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- *
- * XXX Actually waiting under the vm->mutex is a hinderance and
- * should be pipelined wherever possible. In cases where that is
- * unavoidable, we should lift the wait to before the mutex.
- */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
@@ -1228,9 +1254,15 @@ int __i915_vma_unbind(struct i915_vma *vma)
* before the unbind, other due to non-strict nature of those
* indirect writes they may end up referencing the GGTT PTE
* after the unbind.
+ *
+ * Note that we may be concurrently poking at the GGTT_WRITE
+ * bit from set-domain, as we mark all GGTT vma associated
+ * with an object. We know this is for another vma, as we
+ * are currently unbinding this one -- so if this vma will be
+ * reused, it will be refaulted and have its dirty bit set
+ * before the next write.
*/
i915_vma_flush_writes(vma);
- GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
ret = i915_vma_revoke_fence(vma);
@@ -1250,7 +1282,8 @@ int __i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma);
}
- atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
+ &vma->flags);
i915_vma_detach(vma);
vma_unbind_pages(vma);
@@ -1268,20 +1301,30 @@ int i915_vma_unbind(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node))
return 0;
+ /* Optimistic wait before taking the mutex */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto out_rpm;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
/* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
err = mutex_lock_interruptible(&vm->mutex);
if (err)
- return err;
+ goto out_rpm;
err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex);
+out_rpm:
if (wakeref)
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 02b31a62951e..e1ced1df13e1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -375,6 +375,8 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
+int i915_vma_wait_for_bind(struct i915_vma *vma);
+
static inline int i915_vma_sync(struct i915_vma *vma)
{
/* Wait for the asynchronous bindings and pending GPU reads */
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index e0942efd5236..63831cdb7402 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -273,21 +273,10 @@ struct i915_vma {
struct rb_node obj_node;
struct hlist_node obj_hash;
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
/** This vma's place in the eviction list */
struct list_head evict_link;
struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 6670a0763be2..d7fe12734db8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -23,7 +23,9 @@
*/
#include <drm/drm_print.h>
+#include <drm/i915_pciids.h>
+#include "display/intel_cdclk.h"
#include "intel_device_info.h"
#include "i915_drv.h"
@@ -132,6 +134,7 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
{
sseu_dump(&info->sseu, p);
+ drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
drm_printf(p, "CS timestamp frequency: %u kHz\n",
info->cs_timestamp_frequency_khz);
}
@@ -743,7 +746,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* hclks." (through the “Clocking Configurationâ€
* (“CLKCFGâ€) MCHBAR register)
*/
- return dev_priv->rawclk_freq / 16;
+ return RUNTIME_INFO(dev_priv)->rawclk_freq / 16;
} else if (INTEL_GEN(dev_priv) <= 8) {
/* PRMs say:
*
@@ -974,10 +977,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- DRM_INFO("Display fused off, disabling\n");
+ drm_info(&dev_priv->drm,
+ "Display fused off, disabling\n");
info->pipe_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- DRM_INFO("PipeC fused off\n");
+ drm_info(&dev_priv->drm, "PipeC fused off\n");
info->pipe_mask &= ~BIT(PIPE_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
@@ -1000,8 +1004,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
* in the mask.
*/
if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
- DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
- enabled_mask);
+ drm_err(&dev_priv->drm,
+ "invalid pipe fuse configuration: enabled_mask=0x%x\n",
+ enabled_mask);
else
info->pipe_mask = enabled_mask;
@@ -1036,12 +1041,26 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
gen12_sseu_info_init(dev_priv);
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
- DRM_INFO("Disabling ppGTT for VT-d support\n");
+ drm_info(&dev_priv->drm,
+ "Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
}
+ runtime->rawclk_freq = intel_read_rawclk(dev_priv);
+ drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
+
/* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+ runtime->cs_timestamp_frequency_khz =
+ read_timestamp_frequency(dev_priv);
+ if (runtime->cs_timestamp_frequency_khz) {
+ runtime->cs_timestamp_period_ns =
+ div_u64(1e6, runtime->cs_timestamp_frequency_khz);
+ drm_dbg(&dev_priv->drm,
+ "CS timestamp wraparound in %lldms\n",
+ div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
+ S32_MAX),
+ USEC_PER_SEC));
+ }
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -1084,7 +1103,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
- DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
continue;
}
@@ -1096,8 +1115,8 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
- DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(dev_priv));
+ drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(dev_priv));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
for (i = 0; i < I915_MAX_VECS; i++) {
@@ -1108,10 +1127,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
- DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
}
}
- DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(dev_priv));
+ drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(dev_priv));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 2725cb7fc169..1ecb9df2de91 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -180,6 +180,7 @@ struct intel_device_info {
} display;
u16 ddb_size; /* in blocks */
+ u8 num_supported_dbuf_slices; /* number of DBuf slices */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -215,7 +216,10 @@ struct intel_runtime_info {
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
+ u32 rawclk_freq;
+
u32 cs_timestamp_frequency_khz;
+ u32 cs_timestamp_period_ns;
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
new file mode 100644
index 000000000000..6b922efb1d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_dram.h"
+
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
+struct dram_channel_info {
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
+ bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
+{
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
+
+ return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(i915) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(i915, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(i915, &ch->dimm_s,
+ channel, 'S', val >> 16);
+
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+ return -EINVAL;
+ }
+
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
+ else
+ ch->ranks = 1;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
+
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
+{
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
+ int ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ drm_info(&i915->drm, "Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If any of the channel is single rank channel, worst case output
+ * will be same as if single rank memory, so consider single rank
+ * memory.
+ */
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
+ else
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
+
+ if (dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+ drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
+
+ return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 mem_freq_khz, val;
+ int ret;
+
+ dram_info->type = skl_get_dram_type(i915);
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ ret = skl_dram_get_channels_info(i915);
+ if (ret)
+ return ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
+ SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_info->bandwidth_kbps = dram_info->num_channels *
+ mem_freq_khz * 8;
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+ return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 dram_channels;
+ u32 mem_freq_khz, val;
+ u8 num_active_channels;
+ int i;
+
+ val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
+ mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
+ BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
+ num_active_channels = hweight32(dram_channels);
+
+ /* Each active bit represents 4-byte channel */
+ dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
+
+ val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
+
+ /*
+ * If any of the channel is single rank channel,
+ * worst case output will be same as if single rank
+ * memory, so consider single rank memory.
+ */
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
+ }
+
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory information\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+
+ return 0;
+}
+
+void intel_dram_detect(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ int ret;
+
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+
+ if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+ return;
+
+ if (IS_GEN9_LP(i915))
+ ret = bxt_get_dram_info(i915);
+ else
+ ret = skl_get_dram_info(i915);
+ if (ret)
+ return;
+
+ drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps, dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(i915) < 9)
+ i915->edram_size_mb = 128;
+ else
+ i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+ dev_info(i915->drm.dev,
+ "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}
diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/intel_dram.h
new file mode 100644
index 000000000000..4ba13c13162c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DRAM_H__
+#define __INTEL_DRAM_H__
+
+struct drm_i915_private;
+
+void intel_dram_edram_detect(struct drm_i915_private *i915);
+void intel_dram_detect(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 2b6c016387c2..21b91313cc5d 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -22,6 +22,7 @@
*/
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gvt.h"
/**
@@ -67,12 +68,13 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
return;
if (intel_vgpu_active(dev_priv)) {
- DRM_INFO("GVT-g is disabled for guest\n");
+ drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
goto bail;
}
if (!is_supported_device(dev_priv)) {
- DRM_INFO("Unsupported device. GVT-g is disabled\n");
+ drm_info(&dev_priv->drm,
+ "Unsupported device. GVT-g is disabled\n");
goto bail;
}
@@ -99,18 +101,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return -ENODEV;
if (!i915_modparams.enable_gvt) {
- DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
+ drm_dbg(&dev_priv->drm,
+ "GVT-g is disabled by kernel params\n");
return 0;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
- DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
+ if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) {
+ drm_err(&dev_priv->drm,
+ "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
ret = intel_gvt_init_device(dev_priv);
if (ret) {
- DRM_DEBUG_DRIVER("Fail to init GVT device\n");
+ drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
goto bail;
}
@@ -121,6 +125,11 @@ bail:
return 0;
}
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gvt;
+}
+
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d0d038b3cd79..6b5e9d88646d 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -265,7 +265,9 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
if (IS_ERR(mem)) {
err = PTR_ERR(mem);
- DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+ drm_err(&i915->drm,
+ "Failed to setup region(%d) type=%d\n",
+ err, type);
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 4ed60e1f01db..20ab9a5023b5 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -13,91 +13,106 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 5));
+ drm_WARN_ON(&dev_priv->drm, !IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
/* KBP is SPT compatible */
return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm,
"Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- WARN_ON(!IS_TIGERLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
return PCH_JSP;
default:
return PCH_NONE;
@@ -188,7 +203,8 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
pch_type = intel_pch_type(dev_priv, id);
/* Sanity check virtual PCH id */
- if (WARN_ON(id && pch_type == PCH_NONE))
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && pch_type == PCH_NONE))
id = 0;
dev_priv->pch_type = pch_type;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index bd2d30ecc030..a52986a9e7a6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -40,12 +40,36 @@
#include "gt/intel_llc.h"
#include "i915_drv.h"
+#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -128,16 +152,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
-
- /* WaDDIIOTimeout:glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
- u32 val = I915_READ(CHICKEN_MISC_2);
- val &= ~(GLK_CL0_PWR_DOWN |
- GLK_CL1_PWR_DOWN |
- GLK_CL2_PWR_DOWN);
- I915_WRITE(CHICKEN_MISC_2, val);
- }
-
}
static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -469,9 +483,9 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
+ u32 dsparb, dsparb2, dsparb3;
switch (pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@@ -1969,6 +1983,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
+ u32 dsparb, dsparb2, dsparb3;
if (!crtc_state->fifo_changed)
return;
@@ -1977,8 +1992,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
- WARN_ON(fifo_size != 511);
+ drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1994,7 +2009,6 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&uncore->lock);
switch (crtc->pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = intel_uncore_read_fw(uncore, DSPARB);
dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
@@ -2776,7 +2790,7 @@ static bool ilk_validate_wm_level(int level,
}
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *intel_crtc,
+ const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
@@ -2810,34 +2824,6 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- u32 linetime, ips_linetime;
-
- if (!crtc_state->hw.active)
- return 0;
- if (WARN_ON(adjusted_mode->crtc_clock == 0))
- return 0;
- if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
- return 0;
-
- /* The WM are computed with base on how long it takes to fill a single
- * row at the given clock rate, multiplied by 8.
- * */
- linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- adjusted_mode->crtc_clock);
- ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- intel_state->cdclk.logical.cdclk);
-
- return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
- PIPE_WM_LINETIME_TIME(linetime);
-}
-
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[8])
{
@@ -3135,7 +3121,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -3175,12 +3161,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
-
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
@@ -3189,7 +3172,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3417,7 +3400,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* level is disabled. Doing otherwise could cause underruns.
*/
if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
- WARN_ON(wm_lp != 1);
+ drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
@@ -3426,14 +3409,12 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
/* LP0 register values */
for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
- const struct intel_wm_level *r =
- &intel_crtc->wm.active.ilk.wm[0];
+ const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
+ const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (WARN_ON(!r->enable))
+ if (drm_WARN_ON(&dev_priv->drm, !r->enable))
continue;
- results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
-
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
@@ -3472,7 +3453,6 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
/* dirty bits used to track which watermarks need changes */
#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
#define WM_DIRTY_FBC (1 << 24)
@@ -3487,12 +3467,6 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
int wm_lp;
for_each_pipe(dev_priv, pipe) {
- if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
- dirty |= WM_DIRTY_LINETIME(pipe);
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3584,13 +3558,6 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_PIPE(PIPE_C))
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_A))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_B))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_C))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
-
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = I915_READ(WM_MISC);
@@ -3644,26 +3611,18 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
{
- u8 enabled_slices;
-
- /* Slice 1 will always be enabled */
- enabled_slices = 1;
-
- /* Gen prior to GEN11 have only one DBuf slice */
- if (INTEL_GEN(dev_priv) < 11)
- return enabled_slices;
+ int i;
+ int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ u8 enabled_slices_mask = 0;
- /*
- * FIXME: for now we'll only ever use 1 slice; pretend that we have
- * only that 1 slice enabled until we have a proper way for on-demand
- * toggling of the second slice.
- */
- if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
- enabled_slices++;
+ for (i = 0; i < max_slices; i++) {
+ if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+ enabled_slices_mask |= BIT(i);
+ }
- return enabled_slices;
+ return enabled_slices_mask;
}
/*
@@ -3864,47 +3823,46 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
return true;
}
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- const u64 total_data_rate,
- const int num_active,
- struct skl_ddb_allocation *ddb)
+/*
+ * Calculate initial DBuf slice offset, based on slice size
+ * and mask(i.e if slice size is 1024 and second slice is enabled
+ * offset would be 1024)
+ */
+static unsigned int
+icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
+ u32 slice_size,
+ u32 ddb_size)
+{
+ unsigned int offset = 0;
+
+ if (!dbuf_slice_mask)
+ return 0;
+
+ offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+
+ WARN_ON(offset >= ddb_size);
+ return offset;
+}
+
+static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
{
- const struct drm_display_mode *adjusted_mode;
- u64 total_data_bw;
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- WARN_ON(ddb_size == 0);
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &crtc_state->hw.adjusted_mode;
- total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
-
- /*
- * 12GB/s is maximum BW supported by single DBuf slice.
- *
- * FIXME dbuf slice code is broken:
- * - must wait for planes to stop using the slice before powering it off
- * - plane straddling both slices is illegal in multi-pipe scenarios
- * - should validate we stay within the hw bandwidth limits
- */
- if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
- ddb->enabled_slices = 2;
- } else {
- ddb->enabled_slices = 1;
- ddb_size /= 2;
- }
-
return ddb_size;
}
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+ u8 active_pipes);
+
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
- struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
@@ -3912,12 +3870,19 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
const struct intel_crtc *crtc;
- u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+ u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
+ u32 ddb_range_size;
u32 i;
-
- if (WARN_ON(!state) || !crtc_state->hw.active) {
+ u32 dbuf_slice_mask;
+ u32 active_pipes;
+ u32 offset;
+ u32 slice_size;
+ u32 total_slice_mask;
+ u32 start, end;
+
+ if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
*num_active = hweight8(dev_priv->active_pipes);
@@ -3925,12 +3890,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
}
if (intel_state->active_pipe_changes)
- *num_active = hweight8(intel_state->active_pipes);
+ active_pipes = intel_state->active_pipes;
else
- *num_active = hweight8(dev_priv->active_pipes);
+ active_pipes = dev_priv->active_pipes;
+
+ *num_active = hweight8(active_pipes);
- ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
- *num_active, ddb);
+ ddb_size = intel_get_ddb_size(dev_priv);
+
+ slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
/*
* If the state doesn't change the active CRTC's or there is no
@@ -3950,30 +3918,95 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
}
/*
+ * Get allowed DBuf slices for correspondent pipe and platform.
+ */
+ dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
+
+ DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
+ dbuf_slice_mask,
+ pipe_name(for_pipe), active_pipes);
+
+ /*
+ * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
+ * and slice size is 1024, the offset would be 1024
+ */
+ offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
+ slice_size, ddb_size);
+
+ /*
+ * Figure out total size of allowed DBuf slices, which is basically
+ * a number of allowed slices for that pipe multiplied by slice size.
+ * Inside of this
+ * range ddb entries are still allocated in proportion to display width.
+ */
+ ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
+
+ /*
* Watermark/ddb requirement highly depends upon width of the
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
+ total_slice_mask = dbuf_slice_mask;
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
+ u32 pipe_dbuf_slice_mask;
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
+ active_pipes);
+
+ /*
+ * According to BSpec pipe can share one dbuf slice with another
+ * pipes or pipe can use multiple dbufs, in both cases we
+ * account for other pipes only if they have exactly same mask.
+ * However we need to account how many slices we should enable
+ * in total.
+ */
+ total_slice_mask |= pipe_dbuf_slice_mask;
- if (!crtc_state->hw.enable)
+ /*
+ * Do not account pipes using other slice sets
+ * luckily as of current BSpec slice sets do not partially
+ * intersect(pipes share either same one slice or same slice set
+ * i.e no partial intersection), so it is enough to check for
+ * equality for now.
+ */
+ if (dbuf_slice_mask != pipe_dbuf_slice_mask)
continue;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
- total_width += hdisplay;
+
+ total_width_in_range += hdisplay;
if (pipe < for_pipe)
- width_before_pipe += hdisplay;
+ width_before_pipe_in_range += hdisplay;
else if (pipe == for_pipe)
pipe_width = hdisplay;
}
- alloc->start = ddb_size * width_before_pipe / total_width;
- alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
+
+ start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
+ end = ddb_range_size *
+ (width_before_pipe_in_range + pipe_width) / total_width_in_range;
+
+ alloc->start = offset + start;
+ alloc->end = offset + end;
+
+ DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
+ alloc->start, alloc->end);
+ DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
+ intel_state->enabled_dbuf_slices_mask,
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
}
static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4002,7 +4035,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0);
- WARN_ON(ret);
+ drm_WARN_ON(&dev_priv->drm, ret);
for (level = 0; level <= max_level; level++) {
skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
@@ -4091,10 +4124,10 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
{
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ dev_priv->enabled_dbuf_slices_mask =
+ intel_enabled_dbuf_slices_mask(dev_priv);
}
/*
@@ -4144,6 +4177,254 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
return mul_fixed16(downscale_w, downscale_h);
}
+struct dbuf_slice_conf_entry {
+ u8 active_pipes;
+ u8 dbuf_mask[I915_MAX_PIPES];
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].dbuf_mask[pipe];
+ }
+ return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+ /*
+ * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+ * required calculating "pipe ratio" in order to determine
+ * if one or two slices can be used for single pipe configurations
+ * as additional constraint to the existing table.
+ * However based on recent info, it should be not "pipe ratio"
+ * but rather ratio between pixel_rate and cdclk with additional
+ * constants, so for now we are using only table until this is
+ * clarified. Also this is the reason why crtc_state param is
+ * still here - we will need it once those additional constraints
+ * pop up.
+ */
+ return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+ return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+ u8 active_pipes)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (IS_GEN(dev_priv, 12))
+ return tgl_compute_dbuf_slices(pipe, active_pipes);
+ else if (IS_GEN(dev_priv, 11))
+ return icl_compute_dbuf_slices(pipe, active_pipes);
+ /*
+ * For anything else just return one slice yet.
+ * Should be extended for other platforms.
+ */
+ return BIT(DBUF_S1);
+}
+
static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
@@ -4195,14 +4476,10 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4230,9 +4507,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!crtc_state->uapi.state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4271,13 +4545,10 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
}
static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
- struct skl_ddb_allocation *ddb /* out */)
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
@@ -4294,9 +4565,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (WARN_ON(!state))
- return 0;
-
if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
@@ -4314,7 +4582,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
- ddb, alloc, &num_active);
+ alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -4335,13 +4603,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
*/
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
- WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
+ drm_WARN_ON(&dev_priv->drm,
+ wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
}
@@ -4371,7 +4640,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* watermark level, plus an extra share of the leftover blocks
* proportional to its relative data rate.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
@@ -4406,11 +4675,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
alloc_size -= extra;
total_data_rate -= rate;
}
- WARN_ON(alloc_size != 0 || total_data_rate != 0);
+ drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
@@ -4420,7 +4689,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
continue;
/* Gen11+ uses a separate plane for UV watermarks */
- WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
/* Leave disabled planes at (0,0) */
if (total[plane_id]) {
@@ -4443,7 +4713,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* that aren't actually possible.
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4480,7 +4750,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* Go back and disable the transition watermark if it turns out we
* don't have enough DDB blocks for it.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4722,7 +4992,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
dev_priv->ipc_enabled)
latency += 4;
@@ -4844,45 +5114,36 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
}
}
-static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- uint_fixed_16_16_t linetime_us;
- u32 linetime_wm;
-
- linetime_us = intel_get_linetime_us(crtc_state);
- linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
-
- /* Display WA #1135: BXT:ALL GLK:ALL */
- if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
- linetime_wm /= 2;
-
- return linetime_wm;
-}
-
static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- u16 trans_min, trans_y_tile_min;
- const u16 trans_amount = 10; /* This is configurable amount */
+ u16 trans_min, trans_amount, trans_y_tile_min;
u16 wm0_sel_res_b, trans_offset_b, res_blocks;
- /* Transition WM are not recommended by HW team for GEN9 */
- if (INTEL_GEN(dev_priv) <= 9)
- return;
-
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
return;
- trans_min = 14;
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
+ return;
+
if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
trans_offset_b = trans_min + trans_amount;
@@ -4909,7 +5170,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
res_blocks += 1;
-
}
/*
@@ -5049,8 +5309,6 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
return ret;
}
- pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
-
return 0;
}
@@ -5059,9 +5317,10 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
+ intel_de_write_fw(dev_priv, reg,
+ (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE_FW(reg, 0);
+ intel_de_write_fw(dev_priv, reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5077,7 +5336,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_b;
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
- I915_WRITE_FW(reg, val);
+ intel_de_write_fw(dev_priv, reg, val);
}
void skl_write_plane_wm(struct intel_plane *plane,
@@ -5153,31 +5412,18 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
- !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
return false;
}
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
-static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
- const struct skl_pipe_wm *wm1,
- const struct skl_pipe_wm *wm2)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (!skl_plane_wm_equals(dev_priv,
- &wm1->planes[plane_id],
- &wm2->planes[plane_id]))
- return false;
- }
-
- return wm1->linetime == wm2->linetime;
-}
-
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
const struct skl_ddb_entry *b)
{
@@ -5231,18 +5477,17 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
- const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+ state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
+ ret = skl_allocate_pipe_ddb(new_crtc_state);
if (ret)
return ret;
@@ -5439,8 +5684,6 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
-
ret = intel_add_all_pipes(state);
if (ret)
return ret;
@@ -5515,12 +5758,8 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
- struct skl_ddb_values *results = &state->wm_results;
int ret, i;
- /* Clear all dirty flags */
- results->dirty_pipes = 0;
-
ret = skl_ddb_add_affected_pipes(state);
if (ret)
return ret;
@@ -5528,68 +5767,36 @@ skl_compute_wm(struct intel_atomic_state *state)
/*
* Calculate WM's for all pipes that are part of this transaction.
* Note that skl_ddb_add_affected_pipes may have added more CRTC's that
- * weren't otherwise being modified (and set bits in dirty_pipes) if
- * pipe allocations had to change.
+ * weren't otherwise being modified if pipe allocations had to change.
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
-
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
-
- if (!skl_pipe_wm_equals(crtc,
- &old_crtc_state->wm.skl.optimal,
- &new_crtc_state->wm.skl.optimal))
- results->dirty_pipes |= BIT(crtc->pipe);
}
ret = skl_compute_ddb(state);
if (ret)
return ret;
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
skl_print_wm_changes(state);
return 0;
}
-static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- enum pipe pipe = crtc->pipe;
-
- if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
- return;
-
- I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-}
-
-static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_ddb_values *results = &state->wm_results;
-
- if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
- return;
-
- mutex_lock(&dev_priv->wm.wm_mutex);
-
- if (crtc_state->uapi.active_changed)
- skl_atomic_update_crtc_wm(state, crtc);
-
- mutex_unlock(&dev_priv->wm.wm_mutex);
-}
-
static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
@@ -5712,25 +5919,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
if (!crtc->active)
return;
-
- out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
- struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- skl_ddb_get_hw_state(dev_priv, ddb);
+ skl_ddb_get_hw_state(dev_priv);
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
-
- if (crtc->active)
- hw->dirty_pipes |= BIT(crtc->pipe);
}
if (dev_priv->active_pipes) {
@@ -5754,8 +5954,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -5774,7 +5972,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
- active->linetime = hw->wm_linetime[pipe];
} else {
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -6629,20 +6826,9 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
- /* WaEnable32PlaneMode:icl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
-
- /*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
- 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /* Wa_1407352427:icl,ehl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, PSDUNIT_CLKGATE_DIS);
+ /*Wa_14010594013:icl, ehl */
+ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ 0, CNL_DELAY_PMRSP);
}
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6650,10 +6836,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
- /* Wa_1408615072:tgl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, VSUNIT_CLKGATE_DIS_TGL);
-
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
if (HAS_ENGINE(dev_priv, _VCS(i)))
@@ -6663,6 +6845,11 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(POWERGATE_ENABLE,
I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+
+ /* Wa_1409825376:tgl (pre-prod)*/
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ TGL_VRH_GATING_DIS);
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7248,8 +7435,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
- dev_priv->display.initial_watermarks = skl_initial_wm;
- dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c06c6a846d9a..d60a85421c5a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -17,7 +17,6 @@ struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
-struct skl_ddb_allocation;
struct skl_ddb_entry;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -33,11 +32,11 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index cbfb7171d62d..3f13baaef058 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -60,7 +60,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
- pm_qos_update_request(&i915->sb_qos, 0);
+ cpu_latency_qos_update_request(&i915->sb_qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
@@ -68,7 +68,8 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
- pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&i915->sb_qos,
+ PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
}
@@ -241,8 +242,9 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
* FIXME: There might be some registers where all 1's is a valid value,
* so ideally we should check the register offset instead...
*/
- WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
+ drm_WARN(&i915->drm, val == 0xffffffff,
+ "DPIO read pipe %c reg 0x%x == 0x%x\n",
+ pipe_name(pipe), reg, val);
return val;
}
@@ -365,6 +367,10 @@ static inline int gen7_check_mailbox_status(u32 mbox)
return -ETIMEDOUT;
case GEN7_PCODE_ILLEGAL_DATA:
return -EINVAL;
+ case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
+ return -ENXIO;
+ case GEN11_PCODE_LOCKED:
+ return -EBUSY;
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW;
default:
@@ -525,7 +531,7 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
*/
drm_dbg_kms(&i915->drm,
"PCODE timeout, retrying with preemption disabled\n");
- WARN_ON_ONCE(timeout_base_ms > 3);
+ drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
preempt_enable();
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 5f2cf6f43b8b..abb18b90d7c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -324,8 +324,9 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
- "GT thread status wait timed out\n");
+ drm_WARN_ONCE(&uncore->i915->drm,
+ wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
+ "GT thread status wait timed out\n");
}
static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
@@ -441,7 +442,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
cond_resched();
}
- WARN_ON(active_domains);
+ drm_WARN_ON(&uncore->i915->drm, active_domains);
fw = uncore->fw_domains_active;
if (fw)
@@ -757,9 +758,9 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
if (!uncore->funcs.force_wake_get)
return;
- WARN(uncore->fw_domains_active,
- "Expected all fw_domains to be inactive, but %08x are still on\n",
- uncore->fw_domains_active);
+ drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
+ "Expected all fw_domains to be inactive, but %08x are still on\n",
+ uncore->fw_domains_active);
}
void assert_forcewakes_active(struct intel_uncore *uncore,
@@ -779,9 +780,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
- WARN(fw_domains & ~uncore->fw_domains_active,
- "Expected %08x fw_domains to be active, but %08x are off\n",
- fw_domains, fw_domains & ~uncore->fw_domains_active);
+ drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
+ "Expected %08x fw_domains to be active, but %08x are off\n",
+ fw_domains, fw_domains & ~uncore->fw_domains_active);
/*
* Check that the caller has an explicit wakeref and we don't mistake
@@ -794,9 +795,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
if (uncore->fw_domains_timer & domain->mask)
expect++; /* pending automatic release */
- if (WARN(actual < expect,
- "Expected domain %d to be held awake by caller, count=%d\n",
- domain->id, actual))
+ if (drm_WARN(&uncore->i915->drm, actual < expect,
+ "Expected domain %d to be held awake by caller, count=%d\n",
+ domain->id, actual))
break;
}
@@ -866,9 +867,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
if (entry->domains == FORCEWAKE_ALL)
return uncore->fw_domains;
- WARN(entry->domains & ~uncore->fw_domains,
- "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
- entry->domains & ~uncore->fw_domains, offset);
+ drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
+ "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
+ entry->domains & ~uncore->fw_domains, offset);
return entry->domains;
}
@@ -1158,10 +1159,11 @@ __unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(uncore) && !before,
- "Unclaimed %s register 0x%x\n",
- read ? "read from" : "write to",
- i915_mmio_reg_offset(reg)))
+ if (drm_WARN(&uncore->i915->drm,
+ check_for_unclaimed_mmio(uncore) && !before,
+ "Unclaimed %s register 0x%x\n",
+ read ? "read from" : "write to",
+ i915_mmio_reg_offset(reg)))
/* Only report the first N failures */
i915_modparams.mmio_debug--;
}
@@ -1436,8 +1438,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
if (!d)
return -ENOMEM;
- WARN_ON(!i915_mmio_reg_valid(reg_set));
- WARN_ON(!i915_mmio_reg_valid(reg_ack));
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
d->uncore = uncore;
d->wake_count = 0;
@@ -1482,8 +1484,8 @@ static void fw_domain_fini(struct intel_uncore *uncore,
return;
uncore->fw_domains &= ~BIT(domain_id);
- WARN_ON(d->wake_count);
- WARN_ON(hrtimer_cancel(&d->timer));
+ drm_WARN_ON(&uncore->i915->drm, d->wake_count);
+ drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
kfree(d);
}
@@ -1613,7 +1615,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
#undef fw_domain_init
/* All future platforms are expected to require complex power gating */
- WARN_ON(!ret && uncore->fw_domains == 0);
+ drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
out:
if (ret)
@@ -2108,7 +2110,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
{
enum forcewake_domains fw_domains = 0;
- WARN_ON(!op);
+ drm_WARN_ON(&uncore->i915->drm, !op);
if (!intel_uncore_has_forcewake(uncore))
return 0;
@@ -2119,7 +2121,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
if (op & FW_REG_WRITE)
fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
- WARN_ON(fw_domains & ~uncore->fw_domains);
+ drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index ef572a0c2566..68bbb1580162 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -201,11 +201,57 @@ static int live_active_retire(void *arg)
return err;
}
+static int live_active_barrier(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ active = __live_alloc(i915);
+ if (!active)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&active->base);
+ if (err)
+ goto out;
+
+ for_each_uabi_engine(engine, i915) {
+ err = i915_active_acquire_preallocate_barrier(&active->base,
+ engine);
+ if (err)
+ break;
+
+ i915_active_acquire_barrier(&active->base);
+ }
+
+ i915_active_release(&active->base);
+
+ if (err == 0)
+ err = i915_active_wait(&active->base);
+
+ if (err == 0 && !READ_ONCE(active->retired)) {
+ pr_err("i915_active not retired after flushing barriers!\n");
+ err = -EINVAL;
+ }
+
+out:
+ __live_put(active);
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ return err;
+}
+
int i915_active_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_active_wait),
SUBTEST(live_active_retire),
+ SUBTEST(live_active_barrier),
};
if (intel_gt_is_wedged(&i915->gt))
@@ -265,28 +311,40 @@ static void spin_unlock_wait(spinlock_t *lock)
spin_unlock_irq(lock);
}
+static void active_flush(struct i915_active *ref,
+ struct i915_active_fence *active)
+{
+ struct dma_fence *fence;
+
+ fence = xchg(__active_fence_slot(active), NULL);
+ if (!fence)
+ return;
+
+ spin_lock_irq(fence->lock);
+ __list_del_entry(&active->cb.node);
+ spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+ atomic_dec(&ref->count);
+
+ GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
+ /* Wait for all active callbacks */
rcu_read_lock();
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- struct dma_fence *f;
-
- /* Wait for all active callbacks */
- f = rcu_dereference(it->base.fence);
- if (f)
- spin_unlock_wait(f->lock);
- }
+ active_flush(ref, &ref->excl);
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+ active_flush(ref, &it->base);
rcu_read_unlock();
i915_active_release(ref);
}
/* And wait for the retire callback */
- spin_lock_irq(&ref->tree_lock);
- spin_unlock_irq(&ref->tree_lock);
+ spin_unlock_wait(&ref->tree_lock);
/* ... which may have been on a thread instead */
flush_work(&ref->work);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 1b856bae67b5..939a6caebb03 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -298,10 +298,12 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
static int igt_buddy_alloc_smoke(void *arg)
{
struct i915_buddy_mm mm;
- int max_order;
+ IGT_TIMEOUT(end_time);
+ I915_RND_STATE(prng);
u64 chunk_size;
u64 mm_size;
- int err;
+ int *order;
+ int err, i;
igt_mm_config(&mm_size, &chunk_size);
@@ -313,10 +315,16 @@ static int igt_buddy_alloc_smoke(void *arg)
return err;
}
- for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ order = i915_random_order(mm.max_order + 1, &prng);
+ if (!order)
+ goto out_fini;
+
+ for (i = 0; i <= mm.max_order; ++i) {
struct i915_buddy_block *block;
- int order;
+ int max_order = order[i];
+ bool timeout = false;
LIST_HEAD(blocks);
+ int order;
u64 total;
err = igt_check_mm(&mm);
@@ -360,6 +368,11 @@ retry:
}
total += i915_buddy_block_size(&mm, block);
+
+ if (__igt_timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
} while (total < mm.size);
if (!err)
@@ -373,7 +386,7 @@ retry:
pr_err("post-mm check failed\n");
}
- if (err)
+ if (err || timeout)
break;
cond_resched();
@@ -382,6 +395,8 @@ retry:
if (err == -ENOMEM)
err = 0;
+ kfree(order);
+out_fini:
i915_buddy_fini(&mm);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 78f36faf2bbe..623759b73bb4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -98,7 +98,7 @@ static void pm_suspend(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_suspend_gtt_mappings(i915);
+ i915_ggtt_suspend(&i915->ggtt);
i915_gem_suspend_late(i915);
}
}
@@ -108,7 +108,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_suspend_gtt_mappings(i915);
+ i915_ggtt_suspend(&i915->ggtt);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
@@ -124,7 +124,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_restore_gtt_mappings(i915);
+ i915_ggtt_resume(&i915->ggtt);
i915_gem_restore_fences(&i915->ggtt);
i915_gem_resume(i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 34138c7bdd15..0a953bfc0585 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -43,6 +43,7 @@ selftest(reset, intel_reset_live_selftests)
selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
+selftest(ring_submission, intel_ring_submission_live_selftests)
selftest(perf, i915_perf_live_selftests)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index 5a577a1332f5..3bf7f53e9924 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -17,3 +17,4 @@
*/
selftest(engine_cs, intel_engine_cs_perf_selftests)
selftest(blt, i915_gem_object_blt_perf_selftests)
+selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 58b5f40a07dd..af89c7fc8f59 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -173,7 +173,7 @@ static int igt_vma_create(void *arg)
}
nc = 0;
- for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) {
+ for_each_prime_number(num_ctx, 2 * BITS_PER_LONG) {
for (; nc < num_ctx; nc++) {
ctx = mock_context(i915, "mock");
if (!ctx)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e8a58fe49c39..9ad4ab088466 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -183,7 +183,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 3ef3620e0da5..2a1d4ba1f9f3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -4,6 +4,7 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/sort.h>
#include "../i915_selftest.h"
@@ -19,6 +20,7 @@
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "i915_memcpy.h"
#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h"
@@ -572,6 +574,195 @@ out_put:
return err;
}
+static const char *repr_type(u32 type)
+{
+ switch (type) {
+ case I915_MAP_WB:
+ return "WB";
+ case I915_MAP_WC:
+ return "WC";
+ }
+
+ return "";
+}
+
+static struct drm_i915_gem_object *
+create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
+ void **out_addr)
+{
+ struct drm_i915_gem_object *obj;
+ void *addr;
+
+ obj = i915_gem_object_create_region(mr, size, 0);
+ if (IS_ERR(obj))
+ return obj;
+
+ addr = i915_gem_object_pin_map(obj, type);
+ if (IS_ERR(addr)) {
+ i915_gem_object_put(obj);
+ if (PTR_ERR(addr) == -ENXIO)
+ return ERR_PTR(-ENODEV);
+ return addr;
+ }
+
+ *out_addr = addr;
+ return obj;
+}
+
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static void igt_memcpy_long(void *dst, const void *src, size_t size)
+{
+ unsigned long *tmp = dst;
+ const unsigned long *s = src;
+
+ size = size / sizeof(unsigned long);
+ while (size--)
+ *tmp++ = *s++;
+}
+
+static inline void igt_memcpy(void *dst, const void *src, size_t size)
+{
+ memcpy(dst, src, size);
+}
+
+static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
+{
+ i915_memcpy_from_wc(dst, src, size);
+}
+
+static int _perf_memcpy(struct intel_memory_region *src_mr,
+ struct intel_memory_region *dst_mr,
+ u64 size, u32 src_type, u32 dst_type)
+{
+ struct drm_i915_private *i915 = src_mr->i915;
+ const struct {
+ const char *name;
+ void (*copy)(void *dst, const void *src, size_t size);
+ bool skip;
+ } tests[] = {
+ {
+ "memcpy",
+ igt_memcpy,
+ },
+ {
+ "memcpy_long",
+ igt_memcpy_long,
+ },
+ {
+ "memcpy_from_wc",
+ igt_memcpy_from_wc,
+ !i915_has_memcpy_from_wc(),
+ },
+ };
+ struct drm_i915_gem_object *src, *dst;
+ void *src_addr, *dst_addr;
+ int ret = 0;
+ int i;
+
+ src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto out;
+ }
+
+ dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out_unpin_src;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ ktime_t t[5];
+ int pass;
+
+ if (tests[i].skip)
+ continue;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ tests[i].copy(dst_addr, src_addr, size);
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
+ __func__,
+ src_mr->name,
+ repr_type(src_type),
+ dst_mr->name,
+ repr_type(dst_type),
+ tests[i].name,
+ size >> 10,
+ div64_u64(mul_u32_u32(4 * size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+
+ cond_resched();
+ }
+
+ i915_gem_object_unpin_map(dst);
+ i915_gem_object_put(dst);
+out_unpin_src:
+ i915_gem_object_unpin_map(src);
+ i915_gem_object_put(src);
+
+ i915_gem_drain_freed_objects(i915);
+out:
+ if (ret == -ENODEV)
+ ret = 0;
+
+ return ret;
+}
+
+static int perf_memcpy(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const u32 types[] = {
+ I915_MAP_WB,
+ I915_MAP_WC,
+ };
+ static const u32 sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_4M,
+ };
+ struct intel_memory_region *src_mr, *dst_mr;
+ int src_id, dst_id;
+ int i, j, k;
+ int ret;
+
+ for_each_memory_region(src_mr, i915, src_id) {
+ for_each_memory_region(dst_mr, i915, dst_id) {
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ for (j = 0; j < ARRAY_SIZE(types); ++j) {
+ for (k = 0; k < ARRAY_SIZE(types); ++k) {
+ ret = _perf_memcpy(src_mr,
+ dst_mr,
+ sizes[i],
+ types[j],
+ types[k]);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -619,3 +810,15 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
return i915_live_subtests(tests, i915);
}
+
+int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_memcpy),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 3b8986983afc..754d0eb6beaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -144,7 +144,6 @@ struct drm_i915_private *mock_gem_device(void)
goto put_device;
}
i915->drm.pdev = pdev;
- i915->drm.dev_private = i915;
intel_runtime_pm_init_early(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
new file mode 100644
index 000000000000..23adb64d640a
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_trace.h"
+#include "i915_utils.h"
+#include "intel_pm.h"
+#include "vlv_suspend.h"
+
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 pcbr;
+ u32 clock_gate_dis2;
+};
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ * used internally by the HW in a way that doesn't depend
+ * keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ * These have no effect on the driver's operation, so we don't save/restore
+ * them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ * the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+ struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+ struct intel_uncore *uncore = &i915->uncore;
+ int i;
+
+ if (!s)
+ return;
+
+ /* GAM 0x4000-0x4770 */
+ s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK);
+ s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL);
+ s->arb_mode = intel_uncore_read(uncore, ARB_MODE);
+ s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0);
+ s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i));
+
+ s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT);
+
+ s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7);
+ s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7);
+ s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7);
+
+ s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL);
+ s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL);
+ s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1);
+ s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3);
+ s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1);
+ s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2);
+ s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL);
+ s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE);
+ s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC);
+ s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC);
+ s->ecobus = intel_uncore_read(uncore, ECOBUS);
+ s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL);
+ s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT);
+ s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW);
+ s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR);
+ s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA);
+ s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ s->gt_imr = intel_uncore_read(uncore, GTIMR);
+ s->gt_ier = intel_uncore_read(uncore, GTIER);
+ s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+ s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i));
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ s->tilectl = intel_uncore_read(uncore, TILECTL);
+ s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL);
+ s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0);
+ s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1);
+ s->pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2);
+
+ /*
+ * Not saving any of:
+ * DFT, 0x9800-0x9EC0
+ * SARB, 0xB000-0xB1FC
+ * GAC, 0x5208-0x524C, 0x14000-0x14C000
+ * PCI CFG
+ */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+ struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+ int i;
+
+ if (!s)
+ return;
+
+ /* GAM 0x4000-0x4770 */
+ intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark);
+ intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
+ intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16));
+ intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
+ intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]);
+
+ intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+ intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk);
+ intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp);
+ intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp);
+
+ intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl);
+ intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl);
+ intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1);
+ intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3);
+ intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1);
+ intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2);
+ intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl);
+ intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause);
+ intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc);
+ intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc);
+ intel_uncore_write(uncore, ECOBUS, s->ecobus);
+ intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl);
+ intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout);
+ intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw);
+ intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr);
+ intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata);
+ intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ intel_uncore_write(uncore, GTIMR, s->gt_imr);
+ intel_uncore_write(uncore, GTIER, s->gt_ier);
+ intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr);
+ intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ intel_uncore_write(uncore, TILECTL, s->tilectl);
+ intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl);
+ /*
+ * Preserve the GT allow wake and GFX force clock bit, they are not
+ * be restored, as they are used to control the s0ix suspend/resume
+ * sequence by the caller.
+ */
+ val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ val &= VLV_GTLC_ALLOWWAKEREQ;
+ val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+ intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+
+ val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ val &= VLV_GFX_CLK_FORCE_ON_BIT;
+ val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+ intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+ intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0);
+ intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1);
+ intel_uncore_write(uncore, VLV_PCBR, s->pcbr);
+ intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
+}
+
+static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
+ u32 mask, u32 val)
+{
+ i915_reg_t reg = VLV_GTLC_PW_STATUS;
+ u32 reg_value;
+ int ret;
+
+ /* The HW does not like us polling for PW_STATUS frequently, so
+ * use the sleeping loop rather than risk the busy spin within
+ * intel_wait_for_register().
+ *
+ * Transitioning between RC6 states should be at most 2ms (see
+ * valleyview_enable_rps) so use a 3ms timeout.
+ */
+ ret = wait_for(((reg_value =
+ intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
+ == val, 3);
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
+ return ret;
+}
+
+static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+ int err;
+
+ val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+ if (force_on)
+ val |= VLV_GFX_CLK_FORCE_ON_BIT;
+ intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+ if (!force_on)
+ return 0;
+
+ err = intel_wait_for_register(uncore,
+ VLV_GTLC_SURVIVABILITY_REG,
+ VLV_GFX_CLK_STATUS_BIT,
+ VLV_GFX_CLK_STATUS_BIT,
+ 20);
+ if (err)
+ drm_err(&i915->drm,
+ "timeout waiting for GFX clock force-on (%08x)\n",
+ intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG));
+
+ return err;
+}
+
+static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 mask;
+ u32 val;
+ int err;
+
+ val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ val &= ~VLV_GTLC_ALLOWWAKEREQ;
+ if (allow)
+ val |= VLV_GTLC_ALLOWWAKEREQ;
+ intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+ intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
+
+ mask = VLV_GTLC_ALLOWWAKEACK;
+ val = allow ? mask : 0;
+
+ err = vlv_wait_for_pw_status(i915, mask, val);
+ if (err)
+ drm_err(&i915->drm, "timeout disabling GT waking\n");
+
+ return err;
+}
+
+static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
+{
+ u32 mask;
+ u32 val;
+
+ mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+ val = wait_for_on ? mask : 0;
+
+ /*
+ * RC6 transitioning can be delayed up to 2 msec (see
+ * valleyview_enable_rps), use 3 msec for safety.
+ *
+ * This can fail to turn off the rc6 if the GPU is stuck after a failed
+ * reset and we are trying to force the machine to sleep.
+ */
+ if (vlv_wait_for_pw_status(dev_priv, mask, val))
+ drm_dbg(&dev_priv->drm,
+ "timeout waiting for GT wells to go %s\n",
+ onoff(wait_for_on));
+}
+
+static void vlv_check_no_gt_access(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+
+ if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+ return;
+
+ drm_dbg(&i915->drm, "GT register access while GT waking disabled\n");
+ intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
+}
+
+int vlv_suspend_complete(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+ int err;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ /*
+ * Bspec defines the following GT well on flags as debug only, so
+ * don't treat them as hard failures.
+ */
+ vlv_wait_for_gt_wells(dev_priv, false);
+
+ mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+ vlv_check_no_gt_access(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, true);
+ if (err)
+ goto err1;
+
+ err = vlv_allow_gt_wake(dev_priv, false);
+ if (err)
+ goto err2;
+
+ vlv_save_gunit_s0ix_state(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ /* For safety always re-enable waking and disable gfx clock forcing */
+ vlv_allow_gt_wake(dev_priv, true);
+err1:
+ vlv_force_gfx_clock(dev_priv, false);
+
+ return err;
+}
+
+int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume)
+{
+ int err;
+ int ret;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ /*
+ * If any of the steps fail just try to continue, that's the best we
+ * can do at this point. Return the first error code (which will also
+ * leave RPM permanently disabled).
+ */
+ ret = vlv_force_gfx_clock(dev_priv, true);
+
+ vlv_restore_gunit_s0ix_state(dev_priv);
+
+ err = vlv_allow_gt_wake(dev_priv, true);
+ if (!ret)
+ ret = err;
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (!ret)
+ ret = err;
+
+ vlv_check_no_gt_access(dev_priv);
+
+ if (rpm_resume)
+ intel_init_clock_gating(dev_priv);
+
+ return ret;
+}
+
+int vlv_suspend_init(struct drm_i915_private *i915)
+{
+ if (!IS_VALLEYVIEW(i915))
+ return 0;
+
+ /* we write all the values in the struct, so no need to zero it out */
+ i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
+ GFP_KERNEL);
+ if (!i915->vlv_s0ix_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void vlv_suspend_cleanup(struct drm_i915_private *i915)
+{
+ if (!i915->vlv_s0ix_state)
+ return;
+
+ kfree(i915->vlv_s0ix_state);
+ i915->vlv_s0ix_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/vlv_suspend.h b/drivers/gpu/drm/i915/vlv_suspend.h
new file mode 100644
index 000000000000..895091cb1f62
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __VLV_SUSPEND_H__
+#define __VLV_SUSPEND_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int vlv_suspend_init(struct drm_i915_private *i915);
+void vlv_suspend_cleanup(struct drm_i915_private *i915);
+int vlv_suspend_complete(struct drm_i915_private *i915);
+int vlv_resume_prepare(struct drm_i915_private *i915, bool rpm_resume);
+
+#endif /* __VLV_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8cb2665b2c74..4da22a94790c 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -446,7 +446,7 @@ static int imx_ldb_register(struct drm_device *drm,
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
- imx_ldb_ch->bridge, NULL);
+ imx_ldb_ch->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 28826c0aa24a..6776ebb3246d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -359,7 +359,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!state->crtc)
+ if (WARN_ON(!state->crtc))
return -EINVAL;
crtc_state =
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 3dca424059f7..08fafa4bf8c2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -24,6 +24,7 @@
struct imx_parallel_display {
struct drm_connector connector;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct device *dev;
void *edid;
int edid_len;
@@ -31,7 +32,7 @@ struct imx_parallel_display {
u32 bus_flags;
struct drm_display_mode mode;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
};
static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
@@ -44,6 +45,11 @@ static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
return container_of(e, struct imx_parallel_display, encoder);
}
+static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
+{
+ return container_of(b, struct imx_parallel_display, bridge);
+}
+
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
@@ -89,37 +95,148 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
return &imxpd->encoder;
}
-static void imx_pd_encoder_enable(struct drm_encoder *encoder)
+static void imx_pd_bridge_enable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_prepare(imxpd->panel);
drm_panel_enable(imxpd->panel);
}
-static void imx_pd_encoder_disable(struct drm_encoder *encoder)
+static void imx_pd_bridge_disable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_disable(imxpd->panel);
drm_panel_unprepare(imxpd->panel);
}
-static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static const u32 imx_pd_bus_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_GBR888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+ MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static u32 *
+imx_pd_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
{
- struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *output_fmts;
- if (!imxpd->bus_format && di->num_bus_formats) {
- imx_crtc_state->bus_flags = di->bus_flags;
- imx_crtc_state->bus_format = di->bus_formats[0];
- } else {
- imx_crtc_state->bus_flags = imxpd->bus_flags;
- imx_crtc_state->bus_format = imxpd->bus_format;
+ if (!imxpd->bus_format && !di->num_bus_formats) {
+ *num_output_fmts = ARRAY_SIZE(imx_pd_bus_fmts);
+ return kmemdup(imx_pd_bus_fmts, sizeof(imx_pd_bus_fmts),
+ GFP_KERNEL);
+ }
+
+ *num_output_fmts = 1;
+ output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ if (!imxpd->bus_format && di->num_bus_formats)
+ output_fmts[0] = di->bus_formats[0];
+ else
+ output_fmts[0] = imxpd->bus_format;
+
+ return output_fmts;
+}
+
+static bool imx_pd_format_supported(u32 output_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx_pd_bus_fmts); i++) {
+ if (imx_pd_bus_fmts[i] == output_fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx_pd_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *input_fmts;
+
+ /*
+ * If the next bridge does not support bus format negotiation, let's
+ * use the static bus format definition (imxpd->bus_format) if it's
+ * specified, RGB888 when it's not.
+ */
+ if (output_fmt == MEDIA_BUS_FMT_FIXED)
+ output_fmt = imxpd->bus_format ? : MEDIA_BUS_FMT_RGB888_1X24;
+
+ /* Now make sure the requested output format is supported. */
+ if ((imxpd->bus_format && imxpd->bus_format != output_fmt) ||
+ !imx_pd_format_supported(output_fmt)) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+
+static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ struct drm_bridge_state *next_bridge_state = NULL;
+ struct drm_bridge *next_bridge;
+ u32 bus_flags, bus_fmt;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+ if (next_bridge)
+ next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ next_bridge);
+
+ if (next_bridge_state)
+ bus_flags = next_bridge_state->input_bus_cfg.flags;
+ else if (!imxpd->bus_format && di->num_bus_formats)
+ bus_flags = di->bus_flags;
+ else
+ bus_flags = imxpd->bus_flags;
+
+ bus_fmt = bridge_state->input_bus_cfg.format;
+ if (!imx_pd_format_supported(bus_fmt))
+ return -EINVAL;
+
+ if (bus_flags &
+ ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) {
+ dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags);
+ return -EINVAL;
}
+
+ bridge_state->output_bus_cfg.flags = bus_flags;
+ bridge_state->input_bus_cfg.flags = bus_flags;
+ imx_crtc_state->bus_flags = bus_flags;
+ imx_crtc_state->bus_format = bridge_state->input_bus_cfg.format;
imx_crtc_state->di_hsync_pin = 2;
imx_crtc_state->di_vsync_pin = 3;
@@ -143,10 +260,15 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
-static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
- .enable = imx_pd_encoder_enable,
- .disable = imx_pd_encoder_disable,
- .atomic_check = imx_pd_encoder_atomic_check,
+static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
+ .enable = imx_pd_bridge_enable,
+ .disable = imx_pd_bridge_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_check = imx_pd_bridge_atomic_check,
+ .atomic_get_input_bus_fmts = imx_pd_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
};
static int imx_pd_register(struct drm_device *drm,
@@ -166,11 +288,13 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- if (!imxpd->bridge) {
+ imxpd->bridge.funcs = &imx_pd_bridge_funcs;
+ drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+
+ if (!imxpd->next_bridge) {
drm_connector_helper_add(&imxpd->connector,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
@@ -181,8 +305,9 @@ static int imx_pd_register(struct drm_device *drm,
if (imxpd->panel)
drm_panel_attach(imxpd->panel, &imxpd->connector);
- if (imxpd->bridge) {
- ret = drm_bridge_attach(encoder, imxpd->bridge, NULL);
+ if (imxpd->next_bridge) {
+ ret = drm_bridge_attach(encoder, imxpd->next_bridge,
+ &imxpd->bridge, 0);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
@@ -227,7 +352,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->bus_format = bus_format;
/* port@1 is the output port */
- ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
+ &imxpd->next_bridge);
if (ret && ret != -ENODEV)
return ret;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 6d47ef7b148c..548cc25ea4ab 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -328,8 +328,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (!drm_atomic_crtc_needs_modeset(state))
return 0;
- if (state->mode.hdisplay > priv->soc_info->max_height ||
- state->mode.vdisplay > priv->soc_info->max_width)
+ if (state->mode.hdisplay > priv->soc_info->max_width ||
+ state->mode.vdisplay > priv->soc_info->max_height)
return -EINVAL;
rate = clk_round_rate(priv->pix_clk,
@@ -474,7 +474,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
{
- struct ingenic_drm *priv = arg;
+ struct ingenic_drm *priv = drm_device_get_priv(arg);
unsigned int state;
regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
@@ -737,7 +737,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_bridge_attach(&priv->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, bridge, NULL, 0);
if (ret) {
dev_err(dev, "Unable to attach bridge");
return ret;
@@ -843,6 +843,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 124efe4fa97b..2daac64d8955 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -15,10 +15,14 @@
#include "lima_vm.h"
int lima_sched_timeout_ms;
+uint lima_heap_init_nr_pages = 8;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
+module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
@@ -68,7 +72,7 @@ static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_
if (args->pad)
return -EINVAL;
- if (args->flags)
+ if (args->flags & ~(LIMA_BO_FLAG_HEAP))
return -EINVAL;
if (args->size == 0)
@@ -241,6 +245,12 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
+/**
+ * Changelog:
+ *
+ * - 1.1.0 - add heap buffer support
+ */
+
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
@@ -250,9 +260,9 @@ static struct drm_driver lima_drm_driver = {
.fops = &lima_drm_driver_fops,
.name = "lima",
.desc = "lima DRM",
- .date = "20190217",
+ .date = "20191231",
.major = 1,
- .minor = 0,
+ .minor = 1,
.patchlevel = 0,
.gem_create_object = lima_gem_create_object,
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index 69c7344715c9..f492ecc6a5d9 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -9,6 +9,7 @@
#include "lima_ctx.h"
extern int lima_sched_timeout_ms;
+extern uint lima_heap_init_nr_pages;
struct lima_vm;
struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index d0059d8c97d8..5404e0d668db 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -4,6 +4,8 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-mapping.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -15,6 +17,83 @@
#include "lima_gem.h"
#include "lima_vm.h"
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+{
+ struct page **pages;
+ struct address_space *mapping = bo->base.base.filp->f_mapping;
+ struct device *dev = bo->base.base.dev->dev;
+ size_t old_size = bo->heap_size;
+ size_t new_size = bo->heap_size ? bo->heap_size * 2 :
+ (lima_heap_init_nr_pages << PAGE_SHIFT);
+ struct sg_table sgt;
+ int i, ret;
+
+ if (bo->heap_size >= bo->base.base.size)
+ return -ENOSPC;
+
+ new_size = min(new_size, bo->base.base.size);
+
+ mutex_lock(&bo->base.pages_lock);
+
+ if (bo->base.pages) {
+ pages = bo->base.pages;
+ } else {
+ pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+ sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
+ if (!pages) {
+ mutex_unlock(&bo->base.pages_lock);
+ return -ENOMEM;
+ }
+
+ bo->base.pages = pages;
+ bo->base.pages_use_count = 1;
+
+ mapping_set_unevictable(mapping);
+ }
+
+ for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
+ struct page *page = shmem_read_mapping_page(mapping, i);
+
+ if (IS_ERR(page)) {
+ mutex_unlock(&bo->base.pages_lock);
+ return PTR_ERR(page);
+ }
+ pages[i] = page;
+ }
+
+ mutex_unlock(&bo->base.pages_lock);
+
+ ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
+ new_size, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (bo->base.sgt) {
+ dma_unmap_sg(dev, bo->base.sgt->sgl,
+ bo->base.sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(bo->base.sgt);
+ } else {
+ bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+ if (!bo->base.sgt) {
+ sg_free_table(&sgt);
+ return -ENOMEM;
+ }
+ }
+
+ dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
+
+ *bo->base.sgt = sgt;
+
+ if (vm) {
+ ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+ }
+
+ bo->heap_size = new_size;
+ return 0;
+}
+
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
@@ -22,7 +101,8 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
gfp_t mask;
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
- struct sg_table *sgt;
+ struct lima_bo *bo;
+ bool is_heap = flags & LIMA_BO_FLAG_HEAP;
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
@@ -36,10 +116,18 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
mask |= __GFP_DMA32;
mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- sgt = drm_gem_shmem_get_pages_sgt(obj);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto out;
+ if (is_heap) {
+ bo = to_lima_bo(obj);
+ err = lima_heap_alloc(bo, NULL);
+ if (err)
+ goto out;
+ } else {
+ struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
+
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
}
err = drm_gem_handle_create(file, obj, handle);
@@ -79,17 +167,47 @@ static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *f
lima_vm_bo_del(vm, bo);
}
+static int lima_gem_pin(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_pin(obj);
+}
+
+static void *lima_gem_vmap(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_shmem_vmap(obj);
+}
+
+static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_mmap(obj, vma);
+}
+
static const struct drm_gem_object_funcs lima_gem_funcs = {
.free = lima_gem_free_object,
.open = lima_gem_object_open,
.close = lima_gem_object_close,
.print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
+ .pin = lima_gem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
+ .vmap = lima_gem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .mmap = lima_gem_mmap,
};
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 1800feb3e47f..ccea06142f4b 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -7,12 +7,15 @@
#include <drm/drm_gem_shmem_helper.h>
struct lima_submit;
+struct lima_vm;
struct lima_bo {
struct drm_gem_shmem_object base;
struct mutex lock;
struct list_head va;
+
+ size_t heap_size;
};
static inline struct lima_bo *
@@ -31,6 +34,7 @@ static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
return bo->base.base.resv;
}
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm);
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index ccf49faedebf..d8841c870d90 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -11,6 +11,8 @@
#include "lima_device.h"
#include "lima_gp.h"
#include "lima_regs.h"
+#include "lima_gem.h"
+#include "lima_vm.h"
#define gp_write(reg, data) writel(data, ip->iomem + reg)
#define gp_read(reg) readl(ip->iomem + reg)
@@ -20,6 +22,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
struct lima_ip *ip = data;
struct lima_device *dev = ip->dev;
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ struct lima_sched_task *task = pipe->current_task;
u32 state = gp_read(LIMA_GP_INT_STAT);
u32 status = gp_read(LIMA_GP_STATUS);
bool done = false;
@@ -29,8 +32,16 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
return IRQ_NONE;
if (state & LIMA_GP_IRQ_MASK_ERROR) {
- dev_err(dev->dev, "gp error irq state=%x status=%x\n",
- state, status);
+ if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
+ dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
+ status);
+ } else {
+ dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+ state, status);
+ if (task)
+ task->recoverable = false;
+ }
/* mask all interrupts before hard reset */
gp_write(LIMA_GP_INT_MASK, 0);
@@ -43,6 +54,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
LIMA_GP_STATUS_PLBU_ACTIVE);
done = valid && !active;
+ pipe->error = false;
}
gp_write(LIMA_GP_INT_CLEAR, state);
@@ -121,6 +133,22 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
u32 cmd = 0;
int i;
+ /* update real heap buffer size for GP */
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+
+ if (bo->heap_size &&
+ lima_vm_get_va(task->vm, bo) ==
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) {
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] +
+ bo->heap_size;
+ task->recoverable = true;
+ task->heap = bo;
+ break;
+ }
+ }
+
if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
f[LIMA_GP_VSCL_END_ADDR >> 2])
cmd |= LIMA_GP_CMD_START_VS;
@@ -184,6 +212,36 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+ struct lima_sched_task *task = pipe->current_task;
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ size_t fail_size =
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] -
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2];
+
+ if (fail_size == task->heap->heap_size) {
+ int ret;
+
+ ret = lima_heap_alloc(task->heap, task->vm);
+ if (ret < 0)
+ return ret;
+ }
+
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+ /* Resume from where we stopped, i.e. new start is old end */
+ gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size;
+ gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+ return 0;
+}
+
static void lima_gp_print_version(struct lima_ip *ip)
{
u32 version, major, minor;
@@ -270,6 +328,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
pipe->task_fini = lima_gp_task_fini;
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
+ pipe->task_recover = lima_gp_task_recover;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 97ec09dee572..f79d2af427e7 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -99,6 +99,11 @@ void lima_mmu_fini(struct lima_ip *ip)
}
+void lima_mmu_flush_tlb(struct lima_ip *ip)
+{
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+}
+
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
{
struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 8c78319bcc8e..4f8ccbebcba1 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -10,6 +10,7 @@ struct lima_vm;
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
+void lima_mmu_flush_tlb(struct lima_ip *ip);
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
void lima_mmu_page_fault_resume(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
index ace8ecefbe90..0124c90e0153 100644
--- a/drivers/gpu/drm/lima/lima_regs.h
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -239,6 +239,7 @@
#define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
#define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
#define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F)
+#define LIMA_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
#define LIMA_MMU_COMMAND 0x0008
#define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00
#define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index b561dd05bd62..3886999b4533 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -313,6 +313,26 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
.free_job = lima_sched_free_job,
};
+static void lima_sched_recover_work(struct work_struct *work)
+{
+ struct lima_sched_pipe *pipe =
+ container_of(work, struct lima_sched_pipe, recover_work);
+ int i;
+
+ for (i = 0; i < pipe->num_l2_cache; i++)
+ lima_l2_cache_flush(pipe->l2_cache[i]);
+
+ if (pipe->bcast_mmu) {
+ lima_mmu_flush_tlb(pipe->bcast_mmu);
+ } else {
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_flush_tlb(pipe->mmu[i]);
+ }
+
+ if (pipe->task_recover(pipe))
+ drm_sched_fault(&pipe->base);
+}
+
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -321,6 +341,8 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
+ INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
+
return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
msecs_to_jiffies(timeout), name);
}
@@ -332,11 +354,14 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
- if (pipe->error)
- drm_sched_fault(&pipe->base);
- else {
- struct lima_sched_task *task = pipe->current_task;
-
+ struct lima_sched_task *task = pipe->current_task;
+
+ if (pipe->error) {
+ if (task && task->recoverable)
+ schedule_work(&pipe->recover_work);
+ else
+ drm_sched_fault(&pipe->base);
+ } else {
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 1d814fecbcc0..d64393fb50a9 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -20,6 +20,9 @@ struct lima_sched_task {
struct lima_bo **bos;
int num_bos;
+ bool recoverable;
+ struct lima_bo *heap;
+
/* pipe fence */
struct dma_fence *fence;
};
@@ -68,6 +71,9 @@ struct lima_sched_pipe {
void (*task_fini)(struct lima_sched_pipe *pipe);
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+ int (*task_recover)(struct lima_sched_pipe *pipe);
+
+ struct work_struct recover_work;
};
int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 840e2350d872..5b92fb82674a 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -155,6 +155,7 @@ err_out0:
void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
{
struct lima_bo_va *bo_va;
+ u32 size;
mutex_lock(&bo->lock);
@@ -166,8 +167,9 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
+ size = bo->heap_size ? bo->heap_size : bo_va->node.size;
lima_vm_unmap_range(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ bo_va->node.start + size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -277,3 +279,45 @@ void lima_vm_print(struct lima_vm *vm)
}
}
}
+
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
+{
+ struct lima_bo_va *bo_va;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
+ u32 base;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (!bo_va) {
+ err = -ENOENT;
+ goto err_out0;
+ }
+
+ mutex_lock(&vm->lock);
+
+ base = bo_va->node.start + (pageoff << PAGE_SHIFT);
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
+ bo->base.sgt->nents, pageoff) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ base + offset);
+ if (err)
+ goto err_out1;
+
+ offset += PAGE_SIZE;
+ }
+
+ mutex_unlock(&vm->lock);
+
+ mutex_unlock(&bo->lock);
+ return 0;
+
+err_out1:
+ if (offset)
+ lima_vm_unmap_range(vm, base, base + offset - 1);
+ mutex_unlock(&vm->lock);
+err_out0:
+ mutex_unlock(&bo->lock);
+ return err;
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index e0bdedcf14dd..22aeec77d84d 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -58,5 +58,6 @@ static inline void lima_vm_put(struct lima_vm *vm)
}
void lima_vm_print(struct lima_vm *vm);
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff);
#endif
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9008ddcfc528..f28cb7a576ba 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -20,11 +20,11 @@
* input formats including most variants of RGB and YUV.
*
* The hardware has four display pipes, and the layout is a little
- * bit like this:
+ * bit like this::
*
- * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
- * External 0..5 0..3 A,B, 3 x DSI bridge
- * source 0..9 C0,C1 2 x DPI
+ * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
+ * External 0..5 0..3 A,B, 3 x DSI bridge
+ * source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
@@ -43,6 +43,7 @@
* to change as we exploit more of the hardware capabilities.
*
* TODO:
+ *
* - Enabled damaged rectangles using drm_plane_enable_fb_damage_clips()
* so we can selectively just transmit the damaged area to a
* command-only display.
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index bb6528b01cd0..7af5ebb0c436 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -986,7 +986,8 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
clk_disable_unprepare(d->lp_clk);
}
-static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
+static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
struct drm_device *drm = bridge->dev;
@@ -998,7 +999,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
+ ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
if (ret) {
dev_err(d->dev, "failed to attach the DSI bridge\n");
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 01fa8b8d763d..4f0ce4cd5b8c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -607,7 +607,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
/* Currently DPI0 is fixed to be driven by OVL1 */
dpi->encoder.possible_crtcs = BIT(1);
- ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL);
+ ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
@@ -664,6 +664,16 @@ static unsigned int mt2701_calculate_factor(int clock)
return 1;
}
+static unsigned int mt8183_calculate_factor(int clock)
+{
+ if (clock <= 27000)
+ return 8;
+ else if (clock <= 167000)
+ return 4;
+ else
+ return 2;
+}
+
static const struct mtk_dpi_conf mt8173_conf = {
.cal_factor = mt8173_calculate_factor,
.reg_h_fre_con = 0xe0,
@@ -675,6 +685,11 @@ static const struct mtk_dpi_conf mt2701_conf = {
.edge_sel_en = true,
};
+static const struct mtk_dpi_conf mt8183_conf = {
+ .cal_factor = mt8183_calculate_factor,
+ .reg_h_fre_con = 0xe0,
+};
+
static int mtk_dpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -770,6 +785,9 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8173-dpi",
.data = &mt8173_conf,
},
+ { .compatible = "mediatek,mt8183-dpi",
+ .data = &mt8183_conf,
+ },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5fa1073cf26b..0ede69830a9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -904,7 +904,7 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
/* If there's a bridge, attach to it and let it create the connector */
if (dsi->bridge) {
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5e4a4dbda443..ff43a3d80410 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -169,6 +170,9 @@ struct mtk_hdmi {
bool audio_enable;
bool powered;
bool enabled;
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ struct mutex update_plugged_status_lock;
};
static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
@@ -1194,13 +1198,26 @@ static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
}
+static enum drm_connector_status
+mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
+{
+ bool connected;
+
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ connected = mtk_cec_hpd_high(hdmi->cec_dev);
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, connected);
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+
+ return connected ?
+ connector_status_connected : connector_status_disconnected;
+}
+
static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
bool force)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
-
- return mtk_cec_hpd_high(hdmi->cec_dev) ?
- connector_status_connected : connector_status_disconnected;
+ return mtk_hdmi_update_plugged_status(hdmi);
}
static void hdmi_conn_destroy(struct drm_connector *conn)
@@ -1297,11 +1314,17 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
* Bridge callbacks
*/
-static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
&mtk_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
@@ -1326,7 +1349,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
if (hdmi->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
- bridge);
+ bridge, flags);
if (ret) {
dev_err(hdmi->dev,
"Failed to attach external bridge: %d\n", ret);
@@ -1651,20 +1674,39 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
return 0;
}
+static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = data;
+
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+
+ mtk_hdmi_update_plugged_status(hdmi);
+
+ return 0;
+}
+
static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.hw_params = mtk_hdmi_audio_hw_params,
.audio_startup = mtk_hdmi_audio_startup,
.audio_shutdown = mtk_hdmi_audio_shutdown,
.digital_mute = mtk_hdmi_audio_digital_mute,
.get_eld = mtk_hdmi_audio_get_eld,
+ .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
};
-static void mtk_hdmi_register_audio_driver(struct device *dev)
+static int mtk_hdmi_register_audio_driver(struct device *dev)
{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
+ .data = hdmi,
};
struct platform_device *pdev;
@@ -1672,9 +1714,10 @@ static void mtk_hdmi_register_audio_driver(struct device *dev)
PLATFORM_DEVID_AUTO, &codec_data,
sizeof(codec_data));
if (IS_ERR(pdev))
- return;
+ return PTR_ERR(pdev);
DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+ return 0;
}
static int mtk_drm_hdmi_probe(struct platform_device *pdev)
@@ -1700,6 +1743,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
return ret;
}
+ mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
ret = mtk_hdmi_output_init(hdmi);
@@ -1708,7 +1752,11 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
return ret;
}
- mtk_hdmi_register_audio_driver(dev);
+ ret = mtk_hdmi_register_audio_driver(dev);
+ if (ret) {
+ dev_err(dev, "Failed to register audio driver: %d\n", ret);
+ return ret;
+ }
hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index b5f5eb7b4bb9..8c2e1b47e81a 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -412,9 +412,7 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev)
if (priv->afbcd.ops)
priv->afbcd.ops->init(priv);
- drm_mode_config_helper_resume(priv->drm);
-
- return 0;
+ return drm_mode_config_helper_resume(priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 3bb7ffe5fc39..64cb6ba4bc42 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -16,6 +16,7 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@@ -135,6 +136,7 @@ struct meson_dw_hdmi_data {
struct meson_dw_hdmi {
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
@@ -148,9 +150,12 @@ struct meson_dw_hdmi {
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
+ unsigned long output_bus_fmt;
};
#define encoder_to_meson_dw_hdmi(x) \
container_of(x, struct meson_dw_hdmi, encoder)
+#define bridge_to_meson_dw_hdmi(x) \
+ container_of(x, struct meson_dw_hdmi, bridge)
static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
const char *compat)
@@ -297,6 +302,10 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
struct meson_drm *priv = dw_hdmi->priv;
unsigned int pixel_clock = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ pixel_clock /= 2;
+
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
if (pixel_clock >= 371250) {
@@ -368,29 +377,40 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
}
static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
if (!vic) {
- meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
- vclk_freq, vclk_freq, false);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
+ vclk_freq, vclk_freq, vclk_freq, false);
return;
}
+ /* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -398,11 +418,11 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
- vclk_freq, venc_freq, hdmi_freq,
+ DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
- meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
}
@@ -437,8 +457,9 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Enable normal output to PHY */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
- /* TMDS pattern setup (TOFIX Handle the YUV420 case) */
- if (mode->clock > 340000) {
+ /* TMDS pattern setup */
+ if (mode->clock > 340000 &&
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
@@ -613,6 +634,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct meson_drm *priv = connector->dev->dev_private;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
@@ -621,9 +644,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
- /* If sink max TMDS clock, we reject the mode */
+ /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
if (connector->display_info.max_tmds_clock &&
- mode->clock > connector->display_info.max_tmds_clock)
+ mode->clock > connector->display_info.max_tmds_clock &&
+ !drm_mode_is_420_only(&connector->display_info, mode) &&
+ !drm_mode_is_420_also(&connector->display_info, mode))
return MODE_BAD;
/* Check against non-VIC supported modes */
@@ -639,6 +664,15 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
/* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -646,8 +680,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- /* VENC double pixels for 1080i and 720p modes */
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -655,14 +692,19 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
- vclk_freq, venc_freq, hdmi_freq);
+ dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
- return meson_vclk_vic_supported_freq(vclk_freq);
+ return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
}
/* Encoder */
+static const u32 meson_dw_hdmi_out_bus_fmts[] = {
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+};
+
static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -672,16 +714,54 @@ static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
.destroy = meson_venc_hdmi_encoder_destroy,
};
-static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+static u32 *
+meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts = NULL;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) {
+ if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) {
+ *num_input_fmts = 1;
+ input_fmts = kcalloc(*num_input_fmts,
+ sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+
+ break;
+ }
+ }
+
+ return input_fmts;
+}
+
+static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
+
+ dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
+
+ DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt);
+
return 0;
}
-static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("\n");
@@ -693,9 +773,9 @@ static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
@@ -706,32 +786,47 @@ static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
+ bool yuv420_mode = false;
DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
+ ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
+ yuv420_mode = true;
+ }
+
/* VENC + VENC-DVI Mode setup */
- meson_venc_hdmi_mode_set(priv, vic, mode);
+ meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
/* VCLK Set clock */
dw_hdmi_set_vclk(dw_hdmi, mode);
- /* Setup YUV444 to HDMI-TX, no 10bit diphering */
- writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ /* Setup YUV420 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(2 | (2 << 2),
+ priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ else
+ /* Setup YUV444 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
}
-static const struct drm_encoder_helper_funcs
- meson_venc_hdmi_encoder_helper_funcs = {
- .atomic_check = meson_venc_hdmi_encoder_atomic_check,
- .disable = meson_venc_hdmi_encoder_disable,
- .enable = meson_venc_hdmi_encoder_enable,
- .mode_set = meson_venc_hdmi_encoder_mode_set,
+static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_check = meson_venc_hdmi_encoder_atomic_check,
+ .enable = meson_venc_hdmi_encoder_enable,
+ .disable = meson_venc_hdmi_encoder_disable,
+ .mode_set = meson_venc_hdmi_encoder_mode_set,
};
/* DW HDMI Regmap */
@@ -852,6 +947,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
struct dw_hdmi_plat_data *dw_plat_data;
+ struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
struct resource *res;
int irq;
@@ -938,10 +1034,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(dw_plat_data->regm);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed to get hdmi top irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, dw_hdmi_top_irq,
dw_hdmi_top_thread_irq, IRQF_SHARED,
@@ -953,8 +1047,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
/* Encoder */
- drm_encoder_helper_add(encoder, &meson_venc_hdmi_encoder_helper_funcs);
-
ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, "meson_hdmi");
if (ret) {
@@ -962,6 +1054,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return ret;
}
+ meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs;
+ drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0);
+
encoder->possible_crtcs = BIT(0);
DRM_DEBUG_DRIVER("encoder initialized\n");
@@ -974,8 +1069,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
dw_plat_data->phy_name = "meson_dw_hdmi_phy";
dw_plat_data->phy_data = meson_dw_hdmi;
- dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ dw_plat_data->ycbcr_420_allowed = true;
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
@@ -984,11 +1079,16 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
platform_set_drvdata(pdev, meson_dw_hdmi);
- meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
- &meson_dw_hdmi->dw_plat_data);
+ meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev,
+ &meson_dw_hdmi->dw_plat_data);
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
+ next_bridge = of_drm_find_bridge(pdev->dev.of_node);
+ if (next_bridge)
+ drm_bridge_attach(encoder, next_bridge,
+ &meson_dw_hdmi->bridge, 0);
+
DRM_DEBUG_DRIVER("HDMI controller initialized\n");
return 0;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f690793ae2d5..fdf26dac9fa8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -354,12 +354,17 @@ enum {
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
- MESON_VCLK_HDMI_594000
+ MESON_VCLK_HDMI_594000,
+/* 2970 /1 /1 /1 /5 /1 => /1 /2 */
+ MESON_VCLK_HDMI_594000_YUV420,
};
struct meson_vclk_params {
+ unsigned int pll_freq;
+ unsigned int phy_freq;
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
unsigned int pixel_freq;
- unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -367,8 +372,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
.pixel_freq = 54000,
- .pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -376,8 +384,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
- .pixel_freq = 54000,
- .pll_base_freq = 4320000,
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
+ .pixel_freq = 27000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -385,8 +396,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
- .pixel_freq = 148500,
- .pll_base_freq = 2970000,
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
+ .pixel_freq = 74250,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -394,8 +408,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 74250,
+ .venc_freq = 74250,
.pixel_freq = 74250,
- .pll_base_freq = 2970000,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -403,8 +420,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
+ .pll_freq = 2970000,
+ .phy_freq = 1485000,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
.pixel_freq = 148500,
- .pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -412,8 +432,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 297000,
+ .vclk_freq = 297000,
.pixel_freq = 297000,
- .pll_base_freq = 5940000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -421,14 +444,29 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 5940000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
.pixel_freq = 594000,
- .pll_base_freq = 5940000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ [MESON_VCLK_HDMI_594000_YUV420] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
+ .pixel_freq = 297000,
+ .pll_od1 = 2,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
{ /* sentinel */ },
};
@@ -701,6 +739,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
unsigned int od, m, frac, od1, od2, od3;
if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+ /* OD2 goes to the PHY, and needs to be *10, so keep OD3=1 */
od3 = 1;
if (od < 4) {
od1 = 2;
@@ -723,21 +762,28 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq)
+meson_vclk_vic_supported_freq(unsigned int phy_freq,
+ unsigned int vclk_freq)
{
int i;
- DRM_DEBUG_DRIVER("freq = %d\n", freq);
+ DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+ phy_freq, vclk_freq);
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ i, params[i].phy_freq,
+ FREQ_1000_1001(params[i].phy_freq/10)*10);
/* Match strict frequency */
- if (freq == params[i].pixel_freq)
+ if (phy_freq == params[i].phy_freq &&
+ vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (freq == FREQ_1000_1001(params[i].pixel_freq))
+ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -965,8 +1011,9 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci)
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
unsigned int freq;
@@ -986,7 +1033,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
* - venc_div = 1
* - encp encoder
*/
- meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+ meson_vclk_set(priv, phy_freq, 0, 0, 0,
VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
@@ -1008,9 +1055,11 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if (vclk_freq == params[freq].pixel_freq ||
- vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
- if (vclk_freq != params[freq].pixel_freq)
+ if ((phy_freq == params[freq].phy_freq ||
+ phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ (vclk_freq == params[freq].vclk_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
vic_alternate_clock = false;
@@ -1039,7 +1088,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- meson_vclk_set(priv, params[freq].pll_base_freq,
+ meson_vclk_set(priv, params[freq].pll_freq,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index b62125540aef..aed0ab2efa71 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,10 +25,11 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq);
+meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci);
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 4efd7864d5bf..f93c725b6f02 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -946,7 +946,9 @@ bool meson_venc_hdmi_venc_repeat(int vic)
EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode)
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
union meson_hdmi_venc_mode vmode_dmt;
@@ -1528,14 +1530,14 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
reg |= VPU_HDMI_INV_VSYNC;
- /* Output data format: CbYCr */
- reg |= VPU_HDMI_OUTPUT_CBYCR;
+ /* Output data format */
+ reg |= ycrcb_map;
/*
* Write rate to the async FIFO between VENC and HDMI.
* One write every 2 wr_clk.
*/
- if (venc_repeat)
+ if (venc_repeat || yuv420_mode)
reg |= VPU_HDMI_WR_RATE(2);
/*
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 576768bdd08d..9138255ffc9e 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -60,7 +60,9 @@ extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode);
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode);
unsigned int meson_venci_get_field(struct meson_drm *priv);
void meson_venc_enable_vsync(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 1bd6b6d15ffb..541f9eb2a135 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -213,8 +213,10 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
meson_venci_cvbs_mode_set(priv, meson_mode->enci);
/* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ true);
}
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index aa32aad222c2..9691252d6233 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -95,7 +95,6 @@
#define MATROX_DPMS_CLEARED (-1)
#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
-#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
#define to_mga_connector(x) container_of(x, struct mga_connector, base)
struct mga_crtc {
@@ -110,12 +109,6 @@ struct mga_mode_info {
struct mga_crtc *crtc;
};
-struct mga_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -185,6 +178,8 @@ struct mga_device {
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
+
+ struct drm_encoder encoder;
};
static inline enum mga_type
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 62a8e9ccb16d..d90e83959fca 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mgag200_drv.h"
@@ -1449,76 +1450,6 @@ static void mga_crtc_init(struct mga_device *mdev)
drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
}
-/*
- * The encoder comes after the CRTC in the output pipeline, but before
- * the connector. It's responsible for ensuring that the digital
- * stream is appropriately converted into the output format. Setup is
- * very simple in this case - all we have to do is inform qemu of the
- * colour depth in order to ensure that it displays appropriately
- */
-
-/*
- * These functions are analagous to those in the CRTC code, but are intended
- * to handle any encoder-specific limitations
- */
-static void mga_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
-{
- return;
-}
-
-static void mga_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mga_encoder);
-}
-
-static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
- .dpms = mga_encoder_dpms,
- .mode_set = mga_encoder_mode_set,
- .prepare = mga_encoder_prepare,
- .commit = mga_encoder_commit,
-};
-
-static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
- .destroy = mga_encoder_destroy,
-};
-
-static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct mga_encoder *mga_encoder;
-
- mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
- if (!mga_encoder)
- return NULL;
-
- encoder = &mga_encoder->base;
- encoder->possible_crtcs = 0x1;
-
- drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
-
- return encoder;
-}
-
-
static int mga_vga_get_modes(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1686,8 +1617,9 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
int mgag200_modeset_init(struct mga_device *mdev)
{
- struct drm_encoder *encoder;
+ struct drm_encoder *encoder = &mdev->encoder;
struct drm_connector *connector;
+ int ret;
mdev->mode_info.mode_config_initialized = true;
@@ -1698,11 +1630,15 @@ int mgag200_modeset_init(struct mga_device *mdev)
mga_crtc_init(mdev);
- encoder = mga_encoder_init(mdev->dev);
- if (!encoder) {
- DRM_ERROR("mga_encoder_init failed\n");
- return -1;
+ ret = drm_simple_encoder_init(mdev->dev, encoder,
+ DRM_MODE_ENCODER_DAC);
+ if (ret) {
+ drm_err(mdev->dev,
+ "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ return ret;
}
+ encoder->possible_crtcs = 0x1;
connector = mga_vga_init(mdev->dev);
if (!connector) {
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7d9e63e20ded..724024a2243a 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = {
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
- u32 bin, val;
+ u32 val;
+
+ /*
+ * If the OPP table specifies a opp-supported-hw property then we have
+ * to set something with dev_pm_opp_set_supported_hw() or the table
+ * doesn't get populated so pick an arbitrary value that should
+ * ensure the default frequencies are selected but not conflict with any
+ * actual bins
+ */
+ val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin");
- /* If a nvmem cell isn't defined, nothing to do */
- if (IS_ERR(cell))
- return;
+ if (!IS_ERR(cell)) {
+ void *buf = nvmem_cell_read(cell, NULL);
+
+ if (!IS_ERR(buf)) {
+ u8 bin = *((u8 *) buf);
- bin = *((u32 *) nvmem_cell_read(cell, NULL));
- nvmem_cell_put(cell);
+ val = (1 << bin);
+ kfree(buf);
+ }
- val = (1 << bin);
+ nvmem_cell_put(cell);
+ }
dev_pm_opp_set_supported_hw(dev, &val, 1);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 748cd379065f..c4e71abbdd53 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{
- int count, i;
- u64 iova;
-
if (IS_ERR_OR_NULL(bo))
return;
- count = bo->size >> PAGE_SHIFT;
- iova = bo->iova;
-
- for (i = 0; i < count; i++, iova += PAGE_SIZE) {
- iommu_unmap(gmu->domain, iova, PAGE_SIZE);
- __free_pages(bo->pages[i], 0);
- }
-
- kfree(bo->pages);
+ dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
kfree(bo);
}
@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
size_t size)
{
struct a6xx_gmu_bo *bo;
- int ret, count, i;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
bo->size = PAGE_ALIGN(size);
- count = bo->size >> PAGE_SHIFT;
+ bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
- bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
- if (!bo->pages) {
+ if (!bo->virt) {
kfree(bo);
return ERR_PTR(-ENOMEM);
}
- for (i = 0; i < count; i++) {
- bo->pages[i] = alloc_page(GFP_KERNEL);
- if (!bo->pages[i])
- goto err;
- }
-
- bo->iova = gmu->uncached_iova_base;
-
- for (i = 0; i < count; i++) {
- ret = iommu_map(gmu->domain,
- bo->iova + (PAGE_SIZE * i),
- page_to_phys(bo->pages[i]), PAGE_SIZE,
- IOMMU_READ | IOMMU_WRITE);
-
- if (ret) {
- DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
-
- for (i = i - 1 ; i >= 0; i--)
- iommu_unmap(gmu->domain,
- bo->iova + (PAGE_SIZE * i),
- PAGE_SIZE);
-
- goto err;
- }
- }
-
- bo->virt = vmap(bo->pages, count, VM_IOREMAP,
- pgprot_writecombine(PAGE_KERNEL));
- if (!bo->virt)
- goto err;
-
- /* Align future IOVA addresses on 1MB boundaries */
- gmu->uncached_iova_base += ALIGN(size, SZ_1M);
-
return bo;
-
-err:
- for (i = 0; i < count; i++) {
- if (bo->pages[i])
- __free_pages(bo->pages[i], 0);
- }
-
- kfree(bo->pages);
- kfree(bo);
-
- return ERR_PTR(-ENOMEM);
-}
-
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
-{
- int ret;
-
- /*
- * The GMU address space is hardcoded to treat the range
- * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
- * between the GMU and the CPU will live in this space
- */
- gmu->uncached_iova_base = 0x60000000;
-
-
- gmu->domain = iommu_domain_alloc(&platform_bus_type);
- if (!gmu->domain)
- return -ENODEV;
-
- ret = iommu_attach_device(gmu->domain, gmu->dev);
-
- if (ret) {
- iommu_domain_free(gmu->domain);
- gmu->domain = NULL;
- }
-
- return ret;
}
/* Return the 'arc-level' for the given frequency */
@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_memory_free(gmu, gmu->hfi);
- iommu_detach_device(gmu->domain, gmu->dev);
-
- iommu_domain_free(gmu->domain);
-
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ /* Pass force_dma false to require the DT to set the dma region */
+ ret = of_dma_configure(gmu->dev, node, false);
+ if (ret)
+ return ret;
+
+ /* Set the mask after the of_dma_configure() */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
+ if (ret)
+ return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
- /* Set up the IOMMU context bank */
- ret = a6xx_gmu_memory_probe(gmu);
- if (ret)
- goto err_put_device;
-
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
@@ -1375,11 +1291,6 @@ err_mmio:
err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
- if (gmu->domain) {
- iommu_detach_device(gmu->domain, gmu->dev);
-
- iommu_domain_free(gmu->domain);
- }
ret = -ENODEV;
err_put_device:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 2af91ed7ed0c..4af65a36d5ca 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -12,8 +12,7 @@
struct a6xx_gmu_bo {
void *virt;
size_t size;
- u64 iova;
- struct page **pages;
+ dma_addr_t iova;
};
/*
@@ -49,9 +48,6 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
- struct iommu_domain *domain;
- u64 uncached_iova_base;
-
struct device *gxpd;
int idle_level;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e67c20c415af..24c974c293e5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers {
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
- "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 7fd29829b2fa..1d5c43c22269 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
return NULL;
for (i = 0; i < l; i++)
- buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
ascii85_encode(src[i], out));
return buf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index bf513411b243..17448505a9b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1272,6 +1272,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.atomic_destroy_state = dpu_crtc_destroy_state,
.late_register = dpu_crtc_late_register,
.early_unregister = dpu_crtc_early_unregister,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f8ac3bf60fd6..a1b79ee2bd9d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -164,7 +164,6 @@ enum dpu_enc_rc_states {
* clks and resources after IDLE_TIMEOUT time.
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
- * @mode_set_complete: flag to indicate modeset completion
* @idle_timeout: idle timeout duration in milliseconds
*/
struct dpu_encoder_virt {
@@ -202,7 +201,6 @@ struct dpu_encoder_virt {
struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work;
struct msm_display_topology topology;
- bool mode_set_complete;
u32 idle_timeout;
};
@@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config(
struct msm_display_info *disp_info;
if (!phys_enc->hw_mdptop || !phys_enc->parent) {
- DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
@@ -512,7 +510,6 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
if (cur_mode->vdisplay == adj_mode->vdisplay &&
cur_mode->hdisplay == adj_mode->hdisplay &&
drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
- adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
}
@@ -563,12 +560,13 @@ static int dpu_encoder_virt_atomic_check(
const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
+ struct dpu_global_state *global_state;
int i = 0;
int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) {
DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
- drm_enc != 0, crtc_state != 0, conn_state != 0);
+ drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
return -EINVAL;
}
@@ -579,6 +577,7 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_atomic_check(DRMID(drm_enc));
/*
@@ -610,17 +609,15 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
- /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ /* Reserve dynamic resources now. */
if (!ret) {
/*
* Avoid reserving resources when mode set is pending. Topology
* info may not be available to complete reservation.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)
- && dpu_enc->mode_set_complete) {
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
- topology, true);
- dpu_enc->mode_set_complete = false;
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ drm_enc, crtc_state, topology);
}
}
@@ -957,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc;
struct dpu_crtc_state *cstate;
- struct dpu_rm_hw_iter hw_iter;
+ struct dpu_global_state *global_state;
struct msm_display_topology topology;
- struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
- struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
- int num_lm = 0, num_ctl = 0;
- int i, j, ret;
+ struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_pp;
+ int i, j;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
@@ -976,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_kms = to_dpu_kms(priv->kms);
connector_list = &dpu_kms->dev->mode_config.connector_list;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ if (IS_ERR_OR_NULL(global_state)) {
+ DPU_ERROR("Failed to get global state");
+ return;
+ }
+
trace_dpu_enc_mode_set(DRMID(drm_enc));
list_for_each_entry(conn_iter, connector_list, head)
@@ -996,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
- /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
- topology, false);
- if (ret) {
- DPU_ERROR_ENC(dpu_enc,
- "failed to reserve hw resources, %d\n", ret);
- return;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
- num_ctl++;
- }
+ /* Query resource that have been reserved in atomic check step. */
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
- num_lm++;
- }
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
+ : NULL;
cstate = to_dpu_crtc_state(drm_crtc->state);
for (i = 0; i < num_lm; i++) {
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
- cstate->mixers[i].hw_lm = hw_lm[i];
- cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
}
cstate->num_mixers = num_lm;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ int num_blk;
+ struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
- goto error;
+ return;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
- goto error;
+ return;
}
phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[i];
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
- DPU_HW_BLK_INTF);
- for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
+ global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
+ hw_blk, ARRAY_SIZE(hw_blk));
+ for (j = 0; j < num_blk; j++) {
struct dpu_hw_intf *hw_intf;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
-
- hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ hw_intf = to_dpu_hw_intf(hw_blk[i]);
if (hw_intf->idx == phys->intf_idx)
phys->hw_intf = hw_intf;
}
@@ -1074,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!phys->hw_intf) {
DPU_ERROR_ENC(dpu_enc,
"no intf block assigned at idx: %d\n", i);
- goto error;
+ return;
}
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
-
- dpu_enc->mode_set_complete = true;
-
-error:
- dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1182,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
+ struct dpu_global_state *global_state;
int i = 0;
if (!drm_enc) {
@@ -1200,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_disable(DRMID(drm_enc));
@@ -1229,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- dpu_rm_release(&dpu_kms->rm, drm_enc);
+ dpu_rm_release(global_state, drm_enc);
mutex_unlock(&dpu_enc->enc_lock);
}
@@ -1965,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
PTR_ERR(enc));
- return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -1978,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
PTR_ERR(enc));
- return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -2009,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) {
- DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
+ DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 39e1e280ba44..8493d68ad841 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
- DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
return;
}
@@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
u32 flush_mask = 0;
if (!phys_enc->hw_pp) {
- DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index c71c18de5966..b5a49050d131 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
- DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
return;
}
@@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 85468981632d..0ead64d3f63d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -90,6 +90,16 @@ struct dpu_hw_intf {
};
/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3d6f46b1db30..d73cb73e938b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -97,6 +97,16 @@ struct dpu_hw_pingpong {
};
/**
+ * to_dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index cb08fafb1dc1..ce19f1d39367 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset = s->private;
struct dpu_kms *dpu_kms = regset->dpu_kms;
- struct drm_device *dev;
- struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
if (!dpu_kms->mmio)
return 0;
- dev = dpu_kms->dev;
- priv = dev->dev_private;
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
@@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
}
#endif
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct dpu_global_state *
+dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
+{
+ return to_dpu_global_state(dpu_kms->global_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_private_state *priv_state;
+ int ret;
+
+ ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_state = drm_atomic_get_private_obj_state(s,
+ &dpu_kms->global_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_dpu_global_state(priv_state);
+}
+
+static struct drm_private_state *
+dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dpu_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dpu_global_state *dpu_state = to_dpu_global_state(state);
+
+ kfree(dpu_state);
+}
+
+static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
+ .atomic_duplicate_state = dpu_kms_global_duplicate_state,
+ .atomic_destroy_state = dpu_kms_global_destroy_state,
+};
+
+static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct dpu_global_state *state;
+
+ drm_modeset_lock_init(&dpu_kms->global_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
+ &state->base,
+ &dpu_kms_global_state_funcs);
+ return 0;
+}
+
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
return dpu_crtc_vblank(crtc, true);
@@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
- struct dpu_kms *dpu_kms;
- struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
@@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
if (!kms)
return;
- dpu_kms = to_dpu_kms(kms);
- dev = dpu_kms->dev;
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
@@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{
- struct drm_device *dev;
int i;
- dev = dpu_kms->dev;
-
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
@@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
struct drm_device *dev;
- struct msm_drm_private *priv;
int i, rc = -EINVAL;
if (!kms) {
@@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
- priv = dev->dev_private;
+
+ rc = dpu_kms_global_obj_init(dpu_kms);
+ if (rc)
+ return rc;
atomic_set(&dpu_kms->bandwidth_ref, 0);
@@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
- struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
- ddev = dpu_kms->dev;
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index c6169e7df19d..211f5de99a44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -111,6 +111,13 @@ struct dpu_kms {
struct dpu_core_perf perf;
+ /*
+ * Global private object state, Do not access directly, use
+ * dpu_kms_global_get_state()
+ */
+ struct drm_modeset_lock global_state_lock;
+ struct drm_private_obj global_state;
+
struct dpu_rm rm;
bool rm_init;
@@ -139,6 +146,25 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+struct dpu_global_state {
+ struct drm_private_state base;
+
+ uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_enc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
+ uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
+};
+
+struct dpu_global_state
+ *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
+struct dpu_global_state
+ *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
+
/**
* Debugfs functions - extra helper functions for debugfs support
*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 23f5b1433b35..9b62451b01ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -12,8 +12,12 @@
#include "dpu_encoder.h"
#include "dpu_trace.h"
-#define RESERVED_BY_OTHER(h, r) \
- ((h)->enc_id && (h)->enc_id != r)
+
+static inline bool reserved_by_other(uint32_t *res_map, int idx,
+ uint32_t enc_id)
+{
+ return res_map[idx] && res_map[idx] != enc_id;
+}
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
@@ -25,171 +29,43 @@ struct dpu_rm_requirements {
struct dpu_encoder_hw_resources hw_res;
};
-
-/**
- * struct dpu_rm_hw_blk - hardware block tracking list member
- * @list: List head for list of all hardware blocks tracking items
- * @id: Hardware ID number, within it's own space, ie. LM_X
- * @enc_id: Encoder id to which this blk is binded
- * @hw: Pointer to the hardware register access object for this block
- */
-struct dpu_rm_hw_blk {
- struct list_head list;
- uint32_t id;
- uint32_t enc_id;
- struct dpu_hw_blk *hw;
-};
-
-void dpu_rm_init_hw_iter(
- struct dpu_rm_hw_iter *iter,
- uint32_t enc_id,
- enum dpu_hw_blk_type type)
-{
- memset(iter, 0, sizeof(*iter));
- iter->enc_id = enc_id;
- iter->type = type;
-}
-
-static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+int dpu_rm_destroy(struct dpu_rm *rm)
{
- struct list_head *blk_list;
-
- if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
- DPU_ERROR("invalid rm\n");
- return false;
- }
+ int i;
- i->hw = NULL;
- blk_list = &rm->hw_blks[i->type];
+ for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
+ struct dpu_hw_pingpong *hw;
- if (i->blk && (&i->blk->list == blk_list)) {
- DPU_DEBUG("attempt resume iteration past last\n");
- return false;
- }
-
- i->blk = list_prepare_entry(i->blk, blk_list, list);
-
- list_for_each_entry_continue(i->blk, blk_list, list) {
- if (i->enc_id == i->blk->enc_id) {
- i->hw = i->blk->hw;
- DPU_DEBUG("found type %d id %d for enc %d\n",
- i->type, i->blk->id, i->enc_id);
- return true;
+ if (rm->pingpong_blks[i]) {
+ hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
+ dpu_hw_pingpong_destroy(hw);
}
}
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
+ struct dpu_hw_mixer *hw;
- DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
-
- return false;
-}
-
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
-{
- bool ret;
-
- mutex_lock(&rm->rm_lock);
- ret = _dpu_rm_get_hw_locked(rm, i);
- mutex_unlock(&rm->rm_lock);
-
- return ret;
-}
-
-static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
-{
- switch (type) {
- case DPU_HW_BLK_LM:
- dpu_hw_lm_destroy(hw);
- break;
- case DPU_HW_BLK_CTL:
- dpu_hw_ctl_destroy(hw);
- break;
- case DPU_HW_BLK_PINGPONG:
- dpu_hw_pingpong_destroy(hw);
- break;
- case DPU_HW_BLK_INTF:
- dpu_hw_intf_destroy(hw);
- break;
- case DPU_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
- case DPU_HW_BLK_TOP:
- /* Top is a singleton, not managed in hw_blks list */
- case DPU_HW_BLK_MAX:
- default:
- DPU_ERROR("unsupported block type %d\n", type);
- break;
- }
-}
-
-int dpu_rm_destroy(struct dpu_rm *rm)
-{
- struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
- enum dpu_hw_blk_type type;
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
- list) {
- list_del(&hw_cur->list);
- _dpu_rm_hw_destroy(type, hw_cur->hw);
- kfree(hw_cur);
+ if (rm->mixer_blks[i]) {
+ hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
+ dpu_hw_lm_destroy(hw);
}
}
+ for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
+ struct dpu_hw_ctl *hw;
- mutex_destroy(&rm->rm_lock);
-
- return 0;
-}
-
-static int _dpu_rm_hw_blk_create(
- struct dpu_rm *rm,
- const struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- enum dpu_hw_blk_type type,
- uint32_t id,
- const void *hw_catalog_info)
-{
- struct dpu_rm_hw_blk *blk;
- void *hw;
-
- switch (type) {
- case DPU_HW_BLK_LM:
- hw = dpu_hw_lm_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_CTL:
- hw = dpu_hw_ctl_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_PINGPONG:
- hw = dpu_hw_pingpong_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_INTF:
- hw = dpu_hw_intf_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
- case DPU_HW_BLK_TOP:
- /* Top is a singleton, not managed in hw_blks list */
- case DPU_HW_BLK_MAX:
- default:
- DPU_ERROR("unsupported block type %d\n", type);
- return -EINVAL;
- }
-
- if (IS_ERR_OR_NULL(hw)) {
- DPU_ERROR("failed hw object creation: type %d, err %ld\n",
- type, PTR_ERR(hw));
- return -EFAULT;
+ if (rm->ctl_blks[i]) {
+ hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
+ dpu_hw_ctl_destroy(hw);
+ }
}
+ for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
+ struct dpu_hw_intf *hw;
- blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (!blk) {
- _dpu_rm_hw_destroy(type, hw);
- return -ENOMEM;
+ if (rm->intf_blks[i]) {
+ hw = to_dpu_hw_intf(rm->intf_blks[i]);
+ dpu_hw_intf_destroy(hw);
+ }
}
- blk->id = id;
- blk->hw = hw;
- blk->enc_id = 0;
- list_add_tail(&blk->list, &rm->hw_blks[type]);
-
return 0;
}
@@ -198,7 +74,6 @@ int dpu_rm_init(struct dpu_rm *rm,
void __iomem *mmio)
{
int rc, i;
- enum dpu_hw_blk_type type;
if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n");
@@ -208,13 +83,9 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
- mutex_init(&rm->rm_lock);
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++)
- INIT_LIST_HEAD(&rm->hw_blks[type]);
-
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) {
@@ -222,12 +93,17 @@ int dpu_rm_init(struct dpu_rm *rm,
continue;
}
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
- cat->mixer[i].id, &cat->mixer[i]);
- if (rc) {
- DPU_ERROR("failed: lm hw not available\n");
+ if (lm->id < LM_0 || lm->id >= LM_MAX) {
+ DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
+ continue;
+ }
+ hw = dpu_hw_lm_init(lm->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed lm object creation: err %d\n", rc);
goto fail;
}
+ rm->mixer_blks[lm->id - LM_0] = &hw->base;
if (!rm->lm_max_width) {
rm->lm_max_width = lm->sblk->maxwidth;
@@ -243,35 +119,59 @@ int dpu_rm_init(struct dpu_rm *rm,
}
for (i = 0; i < cat->pingpong_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
- cat->pingpong[i].id, &cat->pingpong[i]);
- if (rc) {
- DPU_ERROR("failed: pp hw not available\n");
+ struct dpu_hw_pingpong *hw;
+ const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
+
+ if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
+ DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
+ continue;
+ }
+ hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed pingpong object creation: err %d\n",
+ rc);
goto fail;
}
+ rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
}
for (i = 0; i < cat->intf_count; i++) {
- if (cat->intf[i].type == INTF_NONE) {
+ struct dpu_hw_intf *hw;
+ const struct dpu_intf_cfg *intf = &cat->intf[i];
+
+ if (intf->type == INTF_NONE) {
DPU_DEBUG("skip intf %d with type none\n", i);
continue;
}
-
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
- cat->intf[i].id, &cat->intf[i]);
- if (rc) {
- DPU_ERROR("failed: intf hw not available\n");
+ if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
+ DPU_ERROR("skip intf %d with invalid id\n", intf->id);
+ continue;
+ }
+ hw = dpu_hw_intf_init(intf->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed intf object creation: err %d\n", rc);
goto fail;
}
+ rm->intf_blks[intf->id - INTF_0] = &hw->base;
}
for (i = 0; i < cat->ctl_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
- cat->ctl[i].id, &cat->ctl[i]);
- if (rc) {
- DPU_ERROR("failed: ctl hw not available\n");
+ struct dpu_hw_ctl *hw;
+ const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
+
+ if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
+ DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
+ continue;
+ }
+ hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed ctl object creation: err %d\n", rc);
goto fail;
}
+ rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
}
return 0;
@@ -279,7 +179,7 @@ int dpu_rm_init(struct dpu_rm *rm,
fail:
dpu_rm_destroy(rm);
- return rc;
+ return rc ? rc : -EFAULT;
}
static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
@@ -288,85 +188,81 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
}
/**
+ * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
+ * @rm: dpu resource manager handle
+ * @primary_idx: index of primary mixer in rm->mixer_blks[]
+ * @peer_idx: index of other mixer in rm->mixer_blks[]
+ * @Return: true if rm->mixer_blks[peer_idx] is a peer of
+ * rm->mixer_blks[primary_idx]
+ */
+static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
+ int peer_idx)
+{
+ const struct dpu_lm_cfg *prim_lm_cfg;
+ const struct dpu_lm_cfg *peer_cfg;
+
+ prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
+ peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
+
+ if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
+ peer_cfg->id);
+ return false;
+ }
+ return true;
+}
+
+/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @enc_id: encoder id requesting for allocation
- * @reqs: proposed use case requirements
- * @lm: proposed layer mixer, function checks if lm, and all other hardwired
- * blocks connected to the lm (pp) is available and appropriate
- * @pp: output parameter, pingpong block attached to the layer mixer.
- * NULL if pp was not available, or not matching requirements.
- * @primary_lm: if non-null, this function check if lm is compatible primary_lm
- * as well as satisfying all other requirements
+ * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
+ * if lm, and all other hardwired blocks connected to the lm (pp) is
+ * available and appropriate
+ * @pp_idx: output parameter, index of pingpong block attached to the layer
+ * mixer in rm->pongpong_blks[].
* @Return: true if lm matches all requirements, false otherwise
*/
-static bool _dpu_rm_check_lm_and_get_connected_blks(
- struct dpu_rm *rm,
- uint32_t enc_id,
- struct dpu_rm_requirements *reqs,
- struct dpu_rm_hw_blk *lm,
- struct dpu_rm_hw_blk **pp,
- struct dpu_rm_hw_blk *primary_lm)
+static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id, int lm_idx, int *pp_idx)
{
- const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
- struct dpu_rm_hw_iter iter;
-
- *pp = NULL;
-
- DPU_DEBUG("check lm %d pp %d\n",
- lm_cfg->id, lm_cfg->pingpong);
-
- /* Check if this layer mixer is a peer of the proposed primary LM */
- if (primary_lm) {
- const struct dpu_lm_cfg *prim_lm_cfg =
- to_dpu_hw_mixer(primary_lm->hw)->cap;
-
- if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
- DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
- prim_lm_cfg->id);
- return false;
- }
- }
+ const struct dpu_lm_cfg *lm_cfg;
+ int idx;
/* Already reserved? */
- if (RESERVED_BY_OTHER(lm, enc_id)) {
- DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- if (iter.blk->id == lm_cfg->pingpong) {
- *pp = iter.blk;
- break;
- }
- }
-
- if (!*pp) {
+ lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
+ idx = lm_cfg->pingpong - PINGPONG_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
return false;
}
- if (RESERVED_BY_OTHER(*pp, enc_id)) {
- DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
- (*pp)->id);
+ if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
+ lm_cfg->pingpong);
return false;
}
-
+ *pp_idx = idx;
return true;
}
-static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
struct dpu_rm_requirements *reqs)
{
- struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
- struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
- struct dpu_rm_hw_iter iter_i, iter_j;
- int lm_count = 0;
- int i, rc = 0;
+ int lm_idx[MAX_BLOCKS];
+ int pp_idx[MAX_BLOCKS];
+ int i, j, lm_count = 0;
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
@@ -374,36 +270,40 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
}
/* Find a primary mixer */
- dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology.num_lm &&
- _dpu_rm_get_hw_locked(rm, &iter_i)) {
- memset(&lm, 0, sizeof(lm));
- memset(&pp, 0, sizeof(pp));
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; i++) {
+ if (!rm->mixer_blks[i])
+ continue;
lm_count = 0;
- lm[lm_count] = iter_i.blk;
+ lm_idx[lm_count] = i;
- if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, enc_id, reqs, lm[lm_count],
- &pp[lm_count], NULL))
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
+ enc_id, i, &pp_idx[lm_count])) {
continue;
+ }
++lm_count;
/* Valid primary mixer found, find matching peers */
- dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+ for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; j++) {
+ if (!rm->mixer_blks[j])
+ continue;
- while (lm_count != reqs->topology.num_lm &&
- _dpu_rm_get_hw_locked(rm, &iter_j)) {
- if (iter_i.blk == iter_j.blk)
+ if (!_dpu_rm_check_lm_peer(rm, i, j)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
+ LM_0 + i);
continue;
+ }
- if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, enc_id, reqs, iter_j.blk,
- &pp[lm_count], iter_i.blk))
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
+ global_state, enc_id, j,
+ &pp_idx[lm_count])) {
continue;
+ }
- lm[lm_count] = iter_j.blk;
+ lm_idx[lm_count] = j;
++lm_count;
}
}
@@ -413,65 +313,65 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
return -ENAVAIL;
}
- for (i = 0; i < ARRAY_SIZE(lm); i++) {
- if (!lm[i])
- break;
+ for (i = 0; i < lm_count; i++) {
+ global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
+ global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
- lm[i]->enc_id = enc_id;
- pp[i]->enc_id = enc_id;
-
- trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ pp_idx[i] + PINGPONG_0);
}
- return rc;
+ return 0;
}
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
const struct msm_display_topology *top)
{
- struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
- struct dpu_rm_hw_iter iter;
- int i = 0, num_ctls = 0;
- bool needs_split_display = false;
-
- memset(&ctls, 0, sizeof(ctls));
+ int ctl_idx[MAX_BLOCKS];
+ int i = 0, j, num_ctls;
+ bool needs_split_display;
/* each hw_intf needs its own hw_ctrl to program its control path */
num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
- unsigned long features = ctl->caps->features;
+ for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
+ const struct dpu_hw_ctl *ctl;
+ unsigned long features;
bool has_split_display;
- if (RESERVED_BY_OTHER(iter.blk, enc_id))
+ if (!rm->ctl_blks[j])
+ continue;
+ if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
continue;
+ ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
+ features = ctl->caps->features;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
- DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+ DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
if (needs_split_display != has_split_display)
continue;
- ctls[i] = iter.blk;
- DPU_DEBUG("ctl %d match\n", iter.blk->id);
+ ctl_idx[i] = j;
+ DPU_DEBUG("ctl %d match\n", j + CTL_0);
if (++i == num_ctls)
break;
+
}
if (i != num_ctls)
return -ENAVAIL;
- for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
- ctls[i]->enc_id = enc_id;
- trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
+ for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
+ global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
}
return 0;
@@ -479,40 +379,34 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
- uint32_t id,
- enum dpu_hw_blk_type type)
+ uint32_t id)
{
- struct dpu_rm_hw_iter iter;
- int ret = 0;
-
- /* Find the block entry in the rm, and note the reservation */
- dpu_rm_init_hw_iter(&iter, 0, type);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- if (iter.blk->id != id)
- continue;
+ int idx = id - INTF_0;
- if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
- DPU_ERROR("type %d id %d already reserved\n", type, id);
- return -ENAVAIL;
- }
-
- iter.blk->enc_id = enc_id;
- trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
- break;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
+ DPU_ERROR("invalid intf id: %d", id);
+ return -EINVAL;
}
- /* Shouldn't happen since intfs are fixed at probe */
- if (!iter.hw) {
- DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ if (!rm->intf_blks[idx]) {
+ DPU_ERROR("couldn't find intf id %d\n", id);
return -EINVAL;
}
- return ret;
+ if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
+ DPU_ERROR("intf id %d already reserved\n", id);
+ return -ENAVAIL;
+ }
+
+ global_state->intf_to_enc_id[idx] = enc_id;
+ return 0;
}
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res)
{
@@ -523,8 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw(
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, enc_id, id,
- DPU_HW_BLK_INTF);
+ ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
if (ret)
return ret;
}
@@ -534,25 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw(
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
+ ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
+ &reqs->hw_res);
if (ret)
return ret;
@@ -560,9 +455,7 @@ static int _dpu_rm_make_reservation(
}
static int _dpu_rm_populate_requirements(
- struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
@@ -577,37 +470,36 @@ static int _dpu_rm_populate_requirements(
return 0;
}
-static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
+static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
+ uint32_t enc_id)
{
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->enc_id == enc_id) {
- blk->enc_id = 0;
- DPU_DEBUG("rel enc %d %d %d\n", enc_id,
- type, blk->id);
- }
- }
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ if (res_mapping[i] == enc_id)
+ res_mapping[i] = 0;
}
}
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc)
{
- mutex_lock(&rm->rm_lock);
-
- _dpu_rm_release_reservation(rm, enc->base.id);
-
- mutex_unlock(&rm->rm_lock);
+ _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
+ ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
+ ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
+ ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->intf_to_enc_id,
+ ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
}
int dpu_rm_reserve(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology,
- bool test_only)
+ struct msm_display_topology topology)
{
struct dpu_rm_requirements reqs;
int ret;
@@ -616,31 +508,75 @@ int dpu_rm_reserve(
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
- enc->base.id, crtc_state->crtc->base.id, test_only);
+ if (IS_ERR(global_state)) {
+ DPU_ERROR("failed to global state\n");
+ return PTR_ERR(global_state);
+ }
- mutex_lock(&rm->rm_lock);
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
+ enc->base.id, crtc_state->crtc->base.id);
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
- topology);
+ ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
- goto end;
+ return ret;
}
- ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
- if (ret) {
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_reservation(rm, enc->base.id);
- } else if (test_only) {
- /* test_only: test the reservation and then undo */
- DPU_DEBUG("test_only: discard test [enc: %d]\n",
- enc->base.id);
- _dpu_rm_release_reservation(rm, enc->base.id);
- }
-end:
- mutex_unlock(&rm->rm_lock);
+
return ret;
}
+
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
+{
+ struct dpu_hw_blk **hw_blks;
+ uint32_t *hw_to_enc_id;
+ int i, num_blks, max_blks;
+
+ switch (type) {
+ case DPU_HW_BLK_PINGPONG:
+ hw_blks = rm->pingpong_blks;
+ hw_to_enc_id = global_state->pingpong_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->pingpong_blks);
+ break;
+ case DPU_HW_BLK_LM:
+ hw_blks = rm->mixer_blks;
+ hw_to_enc_id = global_state->mixer_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->mixer_blks);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw_blks = rm->ctl_blks;
+ hw_to_enc_id = global_state->ctl_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->ctl_blks);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw_blks = rm->intf_blks;
+ hw_to_enc_id = global_state->intf_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->intf_blks);
+ break;
+ default:
+ DPU_ERROR("blk type %d not managed by rm\n", type);
+ return 0;
+ }
+
+ num_blks = 0;
+ for (i = 0; i < max_blks; i++) {
+ if (hw_to_enc_id[i] != enc_id)
+ continue;
+
+ if (num_blks == blks_size) {
+ DPU_ERROR("More than %d resources assigned to enc %d\n",
+ blks_size, enc_id);
+ break;
+ }
+ blks[num_blks++] = hw_blks[i];
+ }
+
+ return num_blks;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 9c580a017094..6d2b04f306f0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -11,37 +11,24 @@
#include "msm_kms.h"
#include "dpu_hw_top.h"
+struct dpu_global_state;
+
/**
* struct dpu_rm - DPU dynamic hardware resource manager
- * @hw_blks: array of lists of hardware resources present in the system, one
- * list per type of hardware block
+ * @pingpong_blks: array of pingpong hardware resources
+ * @mixer_blks: array of layer mixer hardware resources
+ * @ctl_blks: array of ctl hardware resources
+ * @intf_blks: array of intf hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
- struct list_head hw_blks[DPU_HW_BLK_MAX];
- uint32_t lm_max_width;
- struct mutex rm_lock;
-};
+ struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
+ struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
+ struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
+ struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
-/**
- * struct dpu_rm_hw_blk - resource manager internal structure
- * forward declaration for single iterator definition without void pointer
- */
-struct dpu_rm_hw_blk;
-
-/**
- * struct dpu_rm_hw_iter - iterator for use with dpu_rm
- * @hw: dpu_hw object requested, or NULL on failure
- * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-struct dpu_rm_hw_iter {
- void *hw;
- struct dpu_rm_hw_blk *blk;
- uint32_t enc_id;
- enum dpu_hw_blk_type type;
+ uint32_t lm_max_width;
};
/**
@@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
* @topology: Pointer to topology info for the display
- * @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology,
- bool test_only);
+ struct msm_display_topology topology);
/**
* dpu_rm_reserve - Given the encoder for the display chain, release any
@@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm,
* @enc: DRM Encoder handle
* @Return: 0 on Success otherwise -ERROR
*/
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc);
/**
- * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
- * using dpu_rm_get_hw
- * @iter: iter object to initialize
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-void dpu_rm_init_hw_iter(
- struct dpu_rm_hw_iter *iter,
- uint32_t enc_id,
- enum dpu_hw_blk_type type);
-/**
- * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
- * Meant to do a single pass through the hardware list to iteratively
- * retrieve hardware blocks of a given type for a given encoder.
- * Initialize an iterator object.
- * Set hw block type of interest. Set encoder id of interest, 0 for any.
- * Function returns first hw of type for that encoder.
- * Subsequent calls will return the next reserved hw of that type in-order.
- * Iterator HW pointer will be null on failure to find hw.
- * @rm: DPU Resource Manager handle
- * @iter: iterator object
- * @Return: true on match found, false on no match found
+ * Get hw resources of the given type that are assigned to this encoder.
*/
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
#endif /* __DPU_RM_H__ */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 93ab36bd8df3..5e8c3f3e6625 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
int rc;
if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
- DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
@@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
u32 val;
if (!vbif || !vbif->cap) {
- DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
@@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
- vbif != 0, mdp != 0);
+ vbif != NULL, mdp != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index f34dca5d4532..c9239b07fe4f 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -481,6 +481,8 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index e1cc541e0ef2..998bef1190a3 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -405,6 +405,83 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = mdp5_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder)
+ return 0;
+
+ return mdp5_encoder_get_framecount(encoder);
+}
+
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1054,6 +1131,10 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.cursor_set = mdp5_crtc_cursor_set,
.cursor_move = mdp5_crtc_cursor_move,
.atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -1063,6 +1144,7 @@ static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.atomic_flush = mdp5_crtc_atomic_flush,
.atomic_enable = mdp5_crtc_atomic_enable,
.atomic_disable = mdp5_crtc_atomic_disable,
+ .get_scanout_position = mdp5_crtc_get_scanout_position,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index e43ecd4be10a..47b989834af1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -583,98 +583,6 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
-static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->crtc == crtc)
- return encoder;
-
- return NULL;
-}
-
-static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
-
- crtc = priv->crtcs[pipe];
- if (!crtc) {
- DRM_ERROR("Invalid crtc %d\n", pipe);
- return false;
- }
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder) {
- DRM_ERROR("no encoder found for crtc %d\n", pipe);
- return false;
- }
-
- vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
- vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
-
- /*
- * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
- * the end of VFP. Translate the porch values relative to the line
- * counter positions.
- */
-
- vactive_start = vsw + vbp + 1;
-
- vactive_end = vactive_start + mode->crtc_vdisplay;
-
- /* last scan line before VSYNC */
- vfp_end = mode->crtc_vtotal;
-
- if (stime)
- *stime = ktime_get();
-
- line = mdp5_encoder_get_linecount(encoder);
-
- if (line < vactive_start) {
- line -= vactive_start;
- } else if (line > vactive_end) {
- line = line - vfp_end - vactive_start;
- } else {
- line -= vactive_start;
- }
-
- *vpos = line;
- *hpos = 0;
-
- if (etime)
- *etime = ktime_get();
-
- return true;
-}
-
-static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
-
- if (pipe >= priv->num_crtcs)
- return 0;
-
- crtc = priv->crtcs[pipe];
- if (!crtc)
- return 0;
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder)
- return 0;
-
- return mdp5_encoder_get_framecount(encoder);
-}
-
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -725,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
if (config->platform.iommu) {
iommu_dev = &pdev->dev;
- if (!iommu_dev->iommu_fwspec)
+ if (!dev_iommu_fwspec_get(iommu_dev))
iommu_dev = iommu_dev->parent;
aspace = msm_gem_address_space_create(iommu_dev,
@@ -762,9 +670,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.max_width = 0xffff;
dev->mode_config.max_height = 0xffff;
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
- dev->driver->get_scanout_position = mdp5_get_scanoutpos;
- dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4864b9558f65..4b363bd7ddff 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -689,7 +689,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto fail;
@@ -718,7 +718,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
encoder = msm_dsi->encoder;
/* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge);
+ drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
/*
* we need the drm_connector created by the external bridge
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index ad4e963ccd9b..106a67473af5 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- ret = drm_bridge_attach(encoder, edp->bridge, NULL);
- if (ret)
- goto fail;
-
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index b65b5cc2dba2..c69a37e0c708 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -97,7 +97,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
bridge = &edp_bridge->base;
bridge->funcs = &edp_bridge_funcs;
- ret = drm_bridge_attach(edp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1a9b6289637d..737453b6e596 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
- if (ret)
- goto fail;
-
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index ba81338a9bf8..6e380db9287b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -287,7 +287,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
- ret = drm_bridge_attach(hdmi->encoder, bridge, NULL);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e4b750b0c2d3..29295dee2a2e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
+ if (!dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_msm_uninit;
+ }
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
@@ -668,8 +670,10 @@ static void msm_irq_uninstall(struct drm_device *dev)
kms->funcs->irq_uninstall(kms);
}
-static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -678,8 +682,10 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
return vblank_ctrl_queue_work(priv, pipe, true);
}
-static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -1004,8 +1010,6 @@ static struct drm_driver msm_driver = {
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
.irq_uninstall = msm_irq_uninstall,
- .enable_vblank = msm_enable_vblank,
- .disable_vblank = msm_disable_vblank,
.gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 740bf7c70d8f..194d900a460e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -232,6 +232,9 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_crtc_enable_vblank(struct drm_crtc *crtc);
+void msm_crtc_disable_vblank(struct drm_crtc *crtc);
+
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index db48867df47d..47235f8c5922 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -160,16 +160,12 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
/* the fw fb could be anywhere in memory */
drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9e0953c2b7ce..30584eaf8cc8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -157,7 +157,17 @@ struct msm_gem_submit {
uint32_t handle;
};
uint64_t iova;
- } bos[0];
+ } bos[];
};
+/* helper to determine of a buffer in submit should be dumped, used for both
+ * devcoredump and debugfs cmdstream dumping:
+ */
+static inline bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ extern bool rd_full;
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 18f3a5c53ffb..615c5cda5389 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -355,16 +355,34 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->cmd = kstrdup(cmd, GFP_KERNEL);
if (submit) {
- int i;
-
- state->bos = kcalloc(submit->nr_cmds,
+ int i, nr = 0;
+
+ /* count # of buffers to dump: */
+ for (i = 0; i < submit->nr_bos; i++)
+ if (should_dump(submit, i))
+ nr++;
+ /* always dump cmd bo's, but don't double count them: */
+ for (i = 0; i < submit->nr_cmds; i++)
+ if (!should_dump(submit, submit->cmd[i].idx))
+ nr++;
+
+ state->bos = kcalloc(nr,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (should_dump(submit, i)) {
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+ }
+
for (i = 0; state->bos && i < submit->nr_cmds; i++) {
int idx = submit->cmd[i].idx;
- msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
- submit->bos[idx].iova, submit->bos[idx].flags);
+ if (!should_dump(submit, submit->cmd[i].idx)) {
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+ submit->bos[idx].iova, submit->bos[idx].flags);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index af7ceb246c7c..732f65df5c4f 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -43,7 +43,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
-static bool rd_full = false;
+bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
@@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
msm_gem_put_vaddr(&obj->base);
}
-static bool
-should_dump(struct msm_gem_submit *submit, int idx)
-{
- return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
-}
-
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 37c50ea8f847..1f08de4241e0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1248,6 +1248,9 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
@@ -1258,6 +1261,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.disable = nv_crtc_disable,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static const uint32_t modeset_formats[] = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index e8eef88a8382..ffdd447d8706 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -35,7 +35,8 @@
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
-#include <subdev/timer.h>
+
+#include <nvif/timer.h>
int nv04_dac_output_offset(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 3fdfafa8b0ad..b674d68ef28a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -26,6 +26,7 @@
#include "hw.h"
#include <subdev/bios/pll.h>
+#include <nvif/timer.h>
#define CHIPSET_NFORCE 0x01a0
#define CHIPSET_NFORCE2 0x01f0
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 00a85f1e1a4a..ee782151d332 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -23,6 +23,7 @@
#include <nvif/cl507c.h>
#include <nvif/event.h>
+#include <nvif/timer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e7fcfa6e6467..c5152c39c684 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -23,6 +23,7 @@
#include "head.h"
#include <nvif/cl507d.h>
+#include <nvif/timer.h>
#include "nouveau_bo.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 3b36dc8d36b2..c03cb987856b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -24,6 +24,8 @@
#include <nouveau_bo.h>
+#include <nvif/timer.h>
+
void
corec37d_wndw_owner(struct nv50_core *core)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 397143b639c6..8c5cf096f69b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -24,21 +24,36 @@
#include "head.h"
#include <nvif/cl507a.h>
+#include <nvif/timer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+bool
+curs507a_space(struct nv50_wndw *wndw)
+{
+ nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
+ if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
+ return true;
+ );
+ WARN_ON(1);
+ return false;
+}
+
static void
curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
+ if (curs507a_space(wndw))
+ nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
}
static void
curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
- asyw->point.x);
+ if (curs507a_space(wndw)) {
+ nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
+ asyw->point.x);
+ }
}
const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
index 23fb29d41efe..96dff4f09f57 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
@@ -25,14 +25,17 @@
static void
cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
+ if (curs507a_space(wndw))
+ nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
}
static void
cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
- asyw->point.x);
+ if (curs507a_space(wndw)) {
+ nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
+ asyw->point.x);
+ }
}
static const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a3dc2ba19fb2..6be9df1820c5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -45,6 +45,7 @@
#include <nvif/cl5070.h>
#include <nvif/cl507d.h>
#include <nvif/event.h>
+#include <nvif/timer.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
@@ -1256,30 +1257,6 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
}
}
-static void
-nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nv50_mstc *mstc = nv50_mstc(connector);
-
- drm_connector_unregister(&mstc->connector);
-
- drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
-
- drm_connector_put(&mstc->connector);
-}
-
-static void
-nv50_mstm_register_connector(struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
-
- drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
-
- drm_connector_register(connector);
-}
-
static struct drm_connector *
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, const char *path)
@@ -1298,8 +1275,6 @@ nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
static const struct drm_dp_mst_topology_cbs
nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
- .register_connector = nv50_mstm_register_connector,
- .destroy_connector = nv50_mstm_destroy_connector,
};
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d9d64602947d..8f6455697ba7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
void
nv50_head_flush_clr(struct nv50_head *head,
@@ -413,6 +414,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
static const struct drm_crtc_helper_funcs
nv50_head_help = {
.atomic_check = nv50_head_atomic_check,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static void
@@ -481,6 +483,9 @@ nv50_head_func = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
.atomic_destroy_state = nv50_head_atomic_destroy_state,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
struct nv50_head *
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index 2e68fc736fe1..4f7ce57f2036 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -24,6 +24,8 @@
#include <nouveau_bo.h>
+#include <nvif/timer.h>
+
static void
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index caf397475918..a7412b9d3a98 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -97,6 +97,7 @@ struct nv50_wimm_func {
};
extern const struct nv50_wimm_func curs507a;
+bool curs507a_space(struct nv50_wndw *);
int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 25d969dcf67d..c2a572c67a76 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -23,27 +23,6 @@ int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
void nvif_device_fini(struct nvif_device *);
u64 nvif_device_time(struct nvif_device *);
-/* Delay based on GPU time (ie. PTIMER).
- *
- * Will return -ETIMEDOUT unless the loop was terminated with 'break',
- * where it will return the number of nanoseconds taken instead.
- */
-#define nvif_nsec(d,n,cond...) ({ \
- struct nvif_device *_device = (d); \
- u64 _nsecs = (n), _time0 = nvif_device_time(_device); \
- s64 _taken = 0; \
- \
- do { \
- cond \
- } while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
- \
- if (_taken >= _nsecs) \
- _taken = -ETIMEDOUT; \
- _taken; \
-})
-#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
-#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
-
/*XXX*/
#include <subdev/bios.h>
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/timer.h b/drivers/gpu/drm/nouveau/include/nvif/timer.h
new file mode 100644
index 000000000000..57587a985c4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/timer.h
@@ -0,0 +1,35 @@
+#ifndef __NVIF_TIMER_H__
+#define __NVIF_TIMER_H__
+#include <nvif/os.h>
+
+struct nvif_timer_wait {
+ struct nvif_device *device;
+ u64 limit;
+ u64 time0;
+ u64 time1;
+ int reads;
+};
+
+void nvif_timer_wait_init(struct nvif_device *, u64 nsec,
+ struct nvif_timer_wait *);
+s64 nvif_timer_wait_test(struct nvif_timer_wait *);
+
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ */
+#define nvif_nsec(d,n,cond...) ({ \
+ struct nvif_timer_wait _wait; \
+ s64 _taken = 0; \
+ \
+ nvif_timer_wait_init((d), (n), &_wait); \
+ do { \
+ cond \
+ } while ((_taken = nvif_timer_wait_test(&_wait)) >= 0); \
+ \
+ _taken; \
+})
+#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
+#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h
index 03c11826b693..6825574d93c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/user.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/user.h
@@ -10,6 +10,7 @@ struct nvif_user {
struct nvif_user_func {
void (*doorbell)(struct nvif_user *, u32 token);
+ u64 (*time)(struct nvif_user *);
};
int nvif_user_init(struct nvif_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1b62ccc57aef..c40f127de3d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -647,13 +647,6 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
}
static int
-nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- /* We'll do this from user space. */
- return 0;
-}
-
-static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -1501,8 +1494,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
ret = nvif_object_map_handle(&mem->mem.object,
&args, argc,
&handle, &length);
- if (ret != 1)
- return ret ? ret : -EINVAL;
+ if (ret != 1) {
+ if (WARN_ON(ret == 0))
+ return -EINVAL;
+ if (ret == -ENOSPC)
+ return -EAGAIN;
+ return ret;
+ }
reg->bus.base = 0;
reg->bus.offset = handle;
@@ -1697,7 +1695,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
- .invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 7dfbbbc1beea..15a3d40edf02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -222,22 +222,18 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
{
struct nouveau_drm *drm = nouveau_drm(minor->dev);
struct dentry *dentry;
- int i, ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
- dentry = debugfs_create_file(nouveau_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
- minor->debugfs_root, minor->dev,
- nouveau_debugfs_files[i].fops);
- if (!dentry)
- return -ENOMEM;
+ debugfs_create_file(nouveau_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root, minor->dev,
+ nouveau_debugfs_files[i].fops);
}
- ret = drm_debugfs_create_files(nouveau_debugfs_list,
- NOUVEAU_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret)
- return ret;
+ drm_debugfs_create_files(nouveau_debugfs_list,
+ NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
/* Set the size of the vbios since we know it, and it's confusing to
* userspace if it wants to seek() but the file has a length of 0
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 53f9bceaf17a..700817dc4fa0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -54,15 +54,10 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
}
int
-nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_enable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return -EINVAL;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_get(&nv_crtc->vblank);
@@ -70,15 +65,10 @@ nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
}
void
-nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_disable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_put(&nv_crtc->vblank);
}
@@ -136,21 +126,13 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
}
bool
-nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
+nouveau_display_scanoutpos(struct drm_crtc *crtc,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (nouveau_crtc(crtc)->index == pipe) {
- return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
- stime, etime);
- }
- }
-
- return false;
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+ stime, etime);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 6e8e66882e45..de004018ab5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -61,11 +61,12 @@ int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
-int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
-void nouveau_display_vblank_disable(struct drm_device *, unsigned int);
-bool nouveau_display_scanoutpos(struct drm_device *, unsigned int,
- bool, int *, int *, ktime_t *,
- ktime_t *, const struct drm_display_mode *);
+int nouveau_display_vblank_enable(struct drm_crtc *crtc);
+void nouveau_display_vblank_disable(struct drm_crtc *crtc);
+bool nouveau_display_scanoutpos(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 0ad5d87b5a8e..ad89e09a0be3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -28,6 +28,7 @@
#include <nvif/class.h>
#include <nvif/object.h>
+#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
@@ -176,6 +177,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
.end = vmf->address + PAGE_SIZE,
.src = &src,
.dst = &dst,
+ .src_owner = drm->dev,
};
/*
@@ -526,6 +528,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
drm->dmem->pagemap.res = *res;
drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
+ drm->dmem->pagemap.owner = drm->dev;
if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
goto out_free;
@@ -669,12 +672,6 @@ out:
return ret;
}
-static inline bool
-nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
-{
- return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
-}
-
void
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range)
@@ -690,18 +687,12 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
if (page == NULL)
continue;
- if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
+ if (!is_device_private_page(page))
continue;
- }
-
- if (!nouveau_dmem_page(drm, page)) {
- WARN(1, "Some unknown device memory !\n");
- range->pfns[i] = 0;
- continue;
- }
addr = nouveau_dmem_page_addr(page);
range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
+ range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index b65ae817eabf..ca4087f5a15b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev)
kfree(drm);
}
+/*
+ * On some Intel PCIe bridge controllers doing a
+ * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear.
+ * Skipping the intermediate D3hot step seems to make it work again. This is
+ * probably caused by not meeting the expectation the involved AML code has
+ * when the GPU is put into D3hot state before invoking it.
+ *
+ * This leads to various manifestations of this issue:
+ * - AML code execution to power on the GPU hits an infinite loop (as the
+ * code waits on device memory to change).
+ * - kernel crashes, as all PCI reads return -1, which most code isn't able
+ * to handle well enough.
+ *
+ * In all cases dmesg will contain at least one line like this:
+ * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3'
+ * followed by a lot of nouveau timeouts.
+ *
+ * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not
+ * documented PCI config space register 0x248 of the Intel PCIe bridge
+ * controller (0x1901) in order to change the state of the PCIe link between
+ * the PCIe port and the GPU. There are alternative code paths using other
+ * registers, which seem to work fine (executed pre Windows 8):
+ * - 0xbc bit 0x20 (publicly available documentation claims 'reserved')
+ * - 0xb0 bit 0x10 (link disable)
+ * Changing the conditions inside the firmware by poking into the relevant
+ * addresses does resolve the issue, but it seemed to be ACPI private memory
+ * and not any device accessible memory at all, so there is no portable way of
+ * changing the conditions.
+ * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared.
+ *
+ * The only systems where this behavior can be seen are hybrid graphics laptops
+ * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether
+ * this issue only occurs in combination with listed Intel PCIe bridge
+ * controllers and the mentioned GPUs or other devices as well.
+ *
+ * documentation on the PCIe bridge controller can be found in the
+ * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2"
+ * Section "12 PCI Express* Controller (x16) Registers"
+ */
+
+static void quirk_broken_nv_runpm(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+
+ if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (bridge->device) {
+ case 0x1901:
+ drm->old_pm_cap = pdev->pm_cap;
+ pdev->pm_cap = 0;
+ NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
+ break;
+ }
+}
+
static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
@@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm_dev_init;
+ quirk_broken_nv_runpm(pdev);
return 0;
fail_drm_dev_init:
@@ -734,7 +793,11 @@ static void
nouveau_drm_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ /* revert our workaround */
+ if (drm->old_pm_cap)
+ pdev->pm_cap = drm->old_pm_cap;
nouveau_drm_device_remove(dev);
pci_disable_device(pdev);
}
@@ -1120,11 +1183,6 @@ driver_stub = {
.debugfs_init = nouveau_drm_debugfs_init,
#endif
- .enable_vblank = nouveau_display_vblank_enable,
- .disable_vblank = nouveau_display_vblank_disable,
- .get_scanout_position = nouveau_display_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index c2c332fbde97..2a6519737800 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -140,6 +140,8 @@ struct nouveau_drm {
struct list_head clients;
+ u8 old_pm_cap;
+
struct {
struct agp_bridge_data *bridge;
u32 base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0c5cdda3c336..24d543a01f43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -558,14 +558,10 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbcon->helper, 4);
+ ret = drm_fb_helper_init(dev, &fbcon->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper);
- if (ret)
- goto fini;
-
if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index df9bf1fd1bc0..645fedd77e21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -171,6 +171,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
mm = get_task_mm(current);
down_read(&mm->mmap_sem);
+ if (!cli->svm.svmm) {
+ up_read(&mm->mmap_sem);
+ return -EINVAL;
+ }
+
for (addr = args->va_start, end = args->va_start + size; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
@@ -179,6 +184,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
if (!vma)
break;
+ addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
@@ -367,7 +373,6 @@ static const u64
nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
[HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
[HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
- [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
};
static const u64
@@ -541,7 +546,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
range.default_flags = 0;
range.pfn_flags_mask = -1UL;
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, 0);
+ ret = hmm_range_fault(&range);
up_read(&mm->mmap_sem);
if (ret <= 0) {
if (ret == 0 || ret == -EBUSY)
@@ -657,9 +662,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
if (start < svmm->unmanaged.limit)
limit = min_t(u64, limit, svmm->unmanaged.start);
- else
- if (limit > svmm->unmanaged.start)
- start = max_t(u64, start, svmm->unmanaged.limit);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
mm = svmm->notifier.mm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index d865d8aeac3c..c85dd8afa3c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 50d583d63807..f194d354c1f5 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -8,6 +8,7 @@ nvif-y += nvif/fifo.o
nvif-y += nvif/mem.o
nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o
+nvif-y += nvif/timer.o
nvif-y += nvif/vmm.o
# Usermode classes
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 1ec101ba3b42..0e92db44bbc8 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -27,11 +27,15 @@
u64
nvif_device_time(struct nvif_device *device)
{
- struct nv_device_time_v0 args = {};
- int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
- &args, sizeof(args));
- WARN_ON_ONCE(ret != 0);
- return args.time;
+ if (!device->user.func) {
+ struct nv_device_time_v0 args = {};
+ int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
+ &args, sizeof(args));
+ WARN_ON_ONCE(ret != 0);
+ return args.time;
+ }
+
+ return device->user.func->time(&device->user);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvif/timer.c b/drivers/gpu/drm/nouveau/nvif/timer.c
new file mode 100644
index 000000000000..602c1a258d10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/timer.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/timer.h>
+#include <nvif/device.h>
+
+s64
+nvif_timer_wait_test(struct nvif_timer_wait *wait)
+{
+ u64 time = nvif_device_time(wait->device);
+
+ if (wait->reads == 0) {
+ wait->time0 = time;
+ wait->time1 = time;
+ }
+
+ if (wait->time1 == time) {
+ if (WARN_ON(wait->reads++ == 16))
+ return -ETIMEDOUT;
+ } else {
+ wait->time1 = time;
+ wait->reads = 1;
+ }
+
+ if (wait->time1 - wait->time0 > wait->limit)
+ return -ETIMEDOUT;
+
+ return wait->time1 - wait->time0;
+}
+
+void
+nvif_timer_wait_init(struct nvif_device *device, u64 nsec,
+ struct nvif_timer_wait *wait)
+{
+ wait->device = device;
+ wait->limit = nsec;
+ wait->reads = 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/userc361.c b/drivers/gpu/drm/nouveau/nvif/userc361.c
index 19f9958e7e01..1116f871b272 100644
--- a/drivers/gpu/drm/nouveau/nvif/userc361.c
+++ b/drivers/gpu/drm/nouveau/nvif/userc361.c
@@ -21,6 +21,19 @@
*/
#include <nvif/user.h>
+static u64
+nvif_userc361_time(struct nvif_user *user)
+{
+ u32 hi, lo;
+
+ do {
+ hi = nvif_rd32(&user->object, 0x084);
+ lo = nvif_rd32(&user->object, 0x080);
+ } while (hi != nvif_rd32(&user->object, 0x084));
+
+ return ((u64)hi << 32 | lo);
+}
+
static void
nvif_userc361_doorbell(struct nvif_user *user, u32 token)
{
@@ -30,4 +43,5 @@ nvif_userc361_doorbell(struct nvif_user *user, u32 token)
const struct nvif_user_func
nvif_userc361 = {
.doorbell = nvif_userc361_doorbell,
+ .time = nvif_userc361_time,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index dd8f85b8b3a7..f2f5636efac4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1981,8 +1981,34 @@ gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_subdev *subdev = &base->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ bool reset = device->chipset == 0x137 || device->chipset == 0x138;
u32 ret;
+ /* On certain GP107/GP108 boards, we trigger a weird issue where
+ * GR will stop responding to PRI accesses after we've asked the
+ * SEC2 RTOS to boot the GR falcons. This happens with far more
+ * frequency when cold-booting a board (ie. returning from D3).
+ *
+ * The root cause for this is not known and has proven difficult
+ * to isolate, with many avenues being dead-ends.
+ *
+ * A workaround was discovered by Karol, whereby putting GR into
+ * reset for an extended period right before initialisation
+ * prevents the problem from occuring.
+ *
+ * XXX: As RM does not require any such workaround, this is more
+ * of a hack than a true fix.
+ */
+ reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset);
+ if (reset) {
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+ nvkm_rd32(device, 0x000200);
+ msleep(50);
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+ nvkm_rd32(device, 0x000200);
+ }
+
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
ret = nvkm_falcon_get(&gr->fecs.falcon, subdev);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
index 232a9d7c51e5..e770c9497871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -25,6 +25,9 @@
MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
static const struct nvkm_sec2_fwif
gp108_sec2_fwif[] = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index b6ebd95c9ba1..a8295653ceab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
return 0;
}
+MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
+
static const struct nvkm_sec2_fwif
tu102_sec2_fwif[] = {
{ 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 9b91da09dc5f..8d9812a51ef6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name)
else
return ERR_PTR(-ENODEV);
+ if (!pdev->rom || pdev->romlen == 0)
+ return ERR_PTR(-ENODEV);
+
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ priv->size = pdev->romlen;
if (ret = -ENODEV,
- (priv->rom = pci_platform_rom(pdev, &priv->size)))
+ (priv->rom = ioremap(pdev->rom, pdev->romlen)))
return priv;
kfree(priv);
}
@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(ret);
}
+static void
+platform_fini(void *data)
+{
+ struct priv *priv = data;
+
+ iounmap(priv->rom);
+ kfree(priv);
+}
+
const struct nvbios_source
nvbios_platform = {
.name = "PLATFORM",
.init = platform_init,
- .fini = (void(*)(void *))kfree,
+ .fini = platform_fini,
.read = pcirom_read,
.rw = true,
};
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index b562a8cd61bf..f2be594c7eff 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,28 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "OMAPDRM External Display Device Drivers"
-config DRM_OMAP_ENCODER_OPA362
- tristate "OPA362 external analog amplifier"
- help
- Driver for OPA362 external analog TV amplifier controlled
- through a GPIO.
-
-config DRM_OMAP_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
- help
- Driver for TPD12S015, which offers HDMI ESD protection and level
- shifting.
-
-config DRM_OMAP_CONNECTOR_HDMI
- tristate "HDMI Connector"
- help
- Driver for a generic HDMI connector.
-
-config DRM_OMAP_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
- help
- Driver for a generic analog TV connector.
-
config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index cb76859dc574..488ddf153613 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,6 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
deleted file mode 100644
index 0d20fab605d7..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Analog TV Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct device *dev;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tvc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void tvc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static const struct omap_dss_device_ops tvc_ops = {
- .connect = tvc_connect,
- .disconnect = tvc_disconnect,
-};
-
-static int tvc_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tvc_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tvc_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tvc_of_match[] = {
- { .compatible = "omapdss,svideo-connector", },
- { .compatible = "omapdss,composite-video-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tvc_of_match);
-
-static struct platform_driver tvc_connector_driver = {
- .probe = tvc_probe,
- .remove = __exit_p(tvc_remove),
- .driver = {
- .name = "connector-analog-tv",
- .of_match_table = tvc_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tvc_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Analog TV Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
deleted file mode 100644
index f5d69d810bb8..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HDMI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct device *dev;
-
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int hdmic_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void hdmic_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static bool hdmic_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops hdmic_ops = {
- .connect = hdmic_connect,
- .disconnect = hdmic_disconnect,
-
- .detect = hdmic_detect,
- .register_hpd_cb = hdmic_register_hpd_cb,
- .unregister_hpd_cb = hdmic_unregister_hpd_cb,
-};
-
-static irqreturn_t hdmic_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (hdmic_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int hdmic_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- mutex_init(&ddata->hpd_lock);
-
- /* HPD GPIO */
- gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->hpd_gpio = gpio;
-
- if (ddata->hpd_gpio) {
- r = devm_request_threaded_irq(&pdev->dev,
- gpiod_to_irq(ddata->hpd_gpio),
- NULL, hdmic_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- "hdmic hpd", ddata);
- if (r)
- return r;
- }
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &hdmic_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = ddata->hpd_gpio
- ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD
- : 0;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit hdmic_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id hdmic_of_match[] = {
- { .compatible = "omapdss,hdmi-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, hdmic_of_match);
-
-static struct platform_driver hdmi_connector_driver = {
- .probe = hdmic_probe,
- .remove = __exit_p(hdmic_remove),
- .driver = {
- .name = "connector-hdmi",
- .of_match_table = hdmic_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(hdmi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("HDMI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
deleted file mode 100644
index b992387ed674..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * OPA362 analog video amplifier with output/power control
- *
- * Copyright (C) 2014 Golden Delicious Computers
- * Author: H. Nikolaus Schaller <hns@goldelico.com>
- *
- * based on encoder-tfp410
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *enable_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int opa362_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void opa362_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static void opa362_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-}
-
-static void opa362_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-}
-
-static const struct omap_dss_device_ops opa362_ops = {
- .connect = opa362_connect,
- .disconnect = opa362_disconnect,
- .enable = opa362_enable,
- .disable = opa362_disable,
-};
-
-static int opa362_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
-
- dev_dbg(&pdev->dev, "probe\n");
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->enable_gpio = gpio;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &opa362_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit opa362_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- opa362_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id opa362_of_match[] = {
- { .compatible = "omapdss,ti,opa362", },
- {},
-};
-MODULE_DEVICE_TABLE(of, opa362_of_match);
-
-static struct platform_driver opa362_driver = {
- .probe = opa362_probe,
- .remove = __exit_p(opa362_remove),
- .driver = {
- .name = "amplifier-opa362",
- .of_match_table = opa362_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(opa362_driver);
-
-MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
-MODULE_DESCRIPTION("OPA362 analog video amplifier with output/power control");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
deleted file mode 100644
index 089105c5aa0a..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TPD12S015 HDMI ESD protection & level shifter chip driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/mutex.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct gpio_desc *ct_cp_hpd_gpio;
- struct gpio_desc *ls_oe_gpio;
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tpd_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
-
- /* DC-DC converter needs at max 300us to get to 90% of 5V */
- udelay(300);
-
- return 0;
-}
-
-static void tpd_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
-
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static bool tpd_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void tpd_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops tpd_ops = {
- .connect = tpd_connect,
- .disconnect = tpd_disconnect,
- .detect = tpd_detect,
- .register_hpd_cb = tpd_register_hpd_cb,
- .unregister_hpd_cb = tpd_unregister_hpd_cb,
-};
-
-static irqreturn_t tpd_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (tpd_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int tpd_probe(struct platform_device *pdev)
-{
- struct omap_dss_device *dssdev;
- struct panel_drv_data *ddata;
- int r;
- struct gpio_desc *gpio;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ct_cp_hpd_gpio = gpio;
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ls_oe_gpio = gpio;
-
- gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
- GPIOD_IN);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->hpd_gpio = gpio;
-
- mutex_init(&ddata->hpd_lock);
-
- r = devm_request_threaded_irq(&pdev->dev, gpiod_to_irq(ddata->hpd_gpio),
- NULL, tpd_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "tpd12s015 hpd", ddata);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tpd_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_HPD;
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tpd_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tpd_of_match[] = {
- { .compatible = "omapdss,ti,tpd12s015", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tpd_of_match);
-
-static struct platform_driver tpd_driver = {
- .probe = tpd_probe,
- .remove = __exit_p(tpd_remove),
- .driver = {
- .name = "tpd12s015",
- .of_match_table = tpd_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tpd_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("TPD12S015 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 564e3e1a1891..3484b5d4a91c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -678,7 +678,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- ddata->enabled = 1;
+ ddata->enabled = true;
if (!ddata->intro_printed) {
dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
@@ -729,7 +729,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
if (ddata->vpnl)
regulator_disable(ddata->vpnl);
- ddata->enabled = 0;
+ ddata->enabled = false;
}
static int dsicm_panel_reset(struct panel_drv_data *ddata)
@@ -1265,7 +1265,7 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->display = true;
dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
+ dssdev->of_port = 0;
dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 5950c3f52c2e..f967e6948f2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o dss-of.o output.o
+omapdss-base-y := base.o display.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index a1970b9db6ab..c7650a7c155d 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -149,8 +149,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
goto done;
}
- if (dssdev->id &&
- (dssdev->next || dssdev->bridge || dssdev->panel))
+ if (dssdev->id && (dssdev->next || dssdev->bridge))
goto done;
}
@@ -185,11 +184,10 @@ int omapdss_device_connect(struct dss_device *dss,
if (!dst) {
/*
* The destination is NULL when the source is connected to a
- * bridge or panel instead of a DSS device. Stop here, we will
- * attach the bridge or panel later when we will have a DRM
- * encoder.
+ * bridge instead of a DSS device. Stop here, we will attach
+ * the bridge later when we will have a DRM encoder.
*/
- return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+ return src && src->bridge ? 0 : -EINVAL;
}
if (omapdss_device_is_connected(dst))
@@ -197,10 +195,12 @@ int omapdss_device_connect(struct dss_device *dss,
dst->dss = dss;
- ret = dst->ops->connect(src, dst);
- if (ret < 0) {
- dst->dss = NULL;
- return ret;
+ if (dst->ops && dst->ops->connect) {
+ ret = dst->ops->connect(src, dst);
+ if (ret < 0) {
+ dst->dss = NULL;
+ return ret;
+ }
}
return 0;
@@ -217,7 +217,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
dst ? dev_name(dst->dev) : "NULL");
if (!dst) {
- WARN_ON(!src->bridge && !src->panel);
+ WARN_ON(!src->bridge);
return;
}
@@ -228,29 +228,18 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
- dst->ops->disconnect(src, dst);
+ if (dst->ops && dst->ops->disconnect)
+ dst->ops->disconnect(src, dst);
dst->dss = NULL;
}
EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- omapdss_device_pre_enable(dssdev->next);
-
- if (dssdev->ops->pre_enable)
- dssdev->ops->pre_enable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
-
void omapdss_device_enable(struct omap_dss_device *dssdev)
{
if (!dssdev)
return;
- if (dssdev->ops->enable)
+ if (dssdev->ops && dssdev->ops->enable)
dssdev->ops->enable(dssdev);
omapdss_device_enable(dssdev->next);
@@ -266,25 +255,11 @@ void omapdss_device_disable(struct omap_dss_device *dssdev)
omapdss_device_disable(dssdev->next);
- if (dssdev->ops->disable)
+ if (dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
EXPORT_SYMBOL_GPL(omapdss_device_disable);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- if (dssdev->ops->post_disable)
- dssdev->ops->post_disable(dssdev);
-
- omapdss_device_post_disable(dssdev->next);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
-
/* -----------------------------------------------------------------------------
* Components Handling
*/
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8a3f61f5825f..3b82158b1bfd 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -40,15 +40,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL_GPL(omapdss_display_init);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
-{
- while (output->next)
- output = output->next;
-
- return omapdss_device_get(output);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get);
-
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 95147437b990..5110acb0c6c1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -9,20 +9,22 @@
#define DSS_SUBSYS_NAME "DPI"
-#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include <linux/clk.h>
#include <linux/sys_soc.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct dpi_data {
struct platform_device *pdev;
@@ -34,19 +36,19 @@ struct dpi_data {
enum dss_clk_source clk_src;
struct dss_pll *pll;
- struct mutex lock;
-
struct dss_lcd_mgr_config mgr_config;
unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
-{
- return container_of(dssdev, struct dpi_data, output);
-}
+#define drm_bridge_to_dpi(bridge) container_of(bridge, struct dpi_data, bridge)
+
+/* -----------------------------------------------------------------------------
+ * Clock Handling and PLL
+ */
static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
enum omap_channel channel)
@@ -283,9 +285,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
-static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
- unsigned long pck_req, unsigned long *fck, int *lck_div,
- int *pck_div)
+static int dpi_set_pll_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -299,19 +299,15 @@ static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
if (r)
return r;
- dss_select_lcd_clk_source(dpi->dss, channel, dpi->clk_src);
+ dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+ dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
-static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
- unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -327,29 +323,19 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.fck;
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
static int dpi_set_mode(struct dpi_data *dpi)
{
- int lck_div = 0, pck_div = 0;
- unsigned long fck = 0;
- int r = 0;
+ int r;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
- dpi->pixelclock, &fck, &lck_div, &pck_div);
+ r = dpi_set_pll_clk(dpi, dpi->pixelclock);
else
- r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
- &lck_div, &pck_div);
- if (r)
- return r;
+ r = dpi_set_dispc_clk(dpi, dpi->pixelclock);
- return 0;
+ return r;
}
static void dpi_config_lcd_manager(struct dpi_data *dpi)
@@ -366,25 +352,149 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
-static void dpi_display_enable(struct omap_dss_device *dssdev)
+static int dpi_clock_update(struct dpi_data *dpi, unsigned long *clock)
+{
+ int lck_div, pck_div;
+ unsigned long fck;
+ struct dpi_clk_calc_ctx ctx;
+
+ if (dpi->pll) {
+ if (!dpi_pll_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
+ } else {
+ if (!dpi_dss_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.fck;
+ }
+
+ lck_div = ctx.dispc_cinfo.lck_div;
+ pck_div = ctx.dispc_cinfo.pck_div;
+
+ *clock = fck / lck_div / pck_div;
+
+ return 0;
+}
+
+static int dpi_verify_pll(struct dss_pll *pll)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- struct omap_dss_device *out = &dpi->output;
int r;
- mutex_lock(&dpi->lock);
+ /* do initial setup with the PLL to see if it is operational */
+
+ r = dss_pll_enable(pll);
+ if (r)
+ return r;
+
+ dss_pll_disable(pll);
+
+ return 0;
+}
+
+static void dpi_init_pll(struct dpi_data *dpi)
+{
+ struct dss_pll *pll;
+
+ if (dpi->pll)
+ return;
+
+ dpi->clk_src = dpi_get_clk_src(dpi);
+
+ pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
+ if (!pll)
+ return;
+
+ if (dpi_verify_pll(pll)) {
+ DSSWARN("PLL not operational\n");
+ return;
+ }
+
+ dpi->pll = pll;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dpi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ dpi_init_pll(dpi);
+
+ return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+dpi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ if (mode->hdisplay % 8 != 0)
+ return MODE_BAD_WIDTH;
+
+ if (mode->clock == 0)
+ return MODE_NOCLOCK;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool dpi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return false;
+
+ adjusted_mode->clock = clock / 1000;
+
+ return true;
+}
+
+static void dpi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ dpi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void dpi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ int r;
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
- goto err_reg_enable;
+ return;
}
r = dispc_runtime_get(dpi->dss->dispc);
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel);
+ r = dss_dpi_select_source(dpi->dss, dpi->id, dpi->output.dispc_channel);
if (r)
goto err_src_sel;
@@ -406,8 +516,6 @@ static void dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- mutex_unlock(&dpi->lock);
-
return;
err_mgr_enable:
@@ -420,15 +528,11 @@ err_src_sel:
err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-err_reg_enable:
- mutex_unlock(&dpi->lock);
}
-static void dpi_display_disable(struct omap_dss_device *dssdev)
+static void dpi_bridge_disable(struct drm_bridge *bridge)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- mutex_lock(&dpi->lock);
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
dss_mgr_disable(&dpi->output);
@@ -442,99 +546,34 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-
- mutex_unlock(&dpi->lock);
}
-static void dpi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- DSSDBG("dpi_set_timings\n");
-
- mutex_lock(&dpi->lock);
-
- dpi->pixelclock = mode->clock * 1000;
-
- mutex_unlock(&dpi->lock);
-}
+static const struct drm_bridge_funcs dpi_bridge_funcs = {
+ .attach = dpi_bridge_attach,
+ .mode_valid = dpi_bridge_mode_valid,
+ .mode_fixup = dpi_bridge_mode_fixup,
+ .mode_set = dpi_bridge_mode_set,
+ .enable = dpi_bridge_enable,
+ .disable = dpi_bridge_disable,
+};
-static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void dpi_bridge_init(struct dpi_data *dpi)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- int lck_div, pck_div;
- unsigned long fck;
- unsigned long pck;
- struct dpi_clk_calc_ctx ctx;
- bool ok;
+ dpi->bridge.funcs = &dpi_bridge_funcs;
+ dpi->bridge.of_node = dpi->pdev->dev.of_node;
+ dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
- if (mode->hdisplay % 8 != 0)
- return -EINVAL;
-
- if (mode->clock == 0)
- return -EINVAL;
-
- if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- } else {
- ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.fck;
- }
-
- lck_div = ctx.dispc_cinfo.lck_div;
- pck_div = ctx.dispc_cinfo.pck_div;
-
- pck = fck / lck_div / pck_div;
-
- mode->clock = pck / 1000;
-
- return 0;
+ drm_bridge_add(&dpi->bridge);
}
-static int dpi_verify_pll(struct dss_pll *pll)
+static void dpi_bridge_cleanup(struct dpi_data *dpi)
{
- int r;
-
- /* do initial setup with the PLL to see if it is operational */
-
- r = dss_pll_enable(pll);
- if (r)
- return r;
-
- dss_pll_disable(pll);
-
- return 0;
+ drm_bridge_remove(&dpi->bridge);
}
-static void dpi_init_pll(struct dpi_data *dpi)
-{
- struct dss_pll *pll;
-
- if (dpi->pll)
- return;
-
- dpi->clk_src = dpi_get_clk_src(dpi);
-
- pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
- if (!pll)
- return;
-
- if (dpi_verify_pll(pll)) {
- DSSWARN("PLL not operational\n");
- return;
- }
-
- dpi->pll = pll;
-}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
/*
* Return a hardcoded channel for the DPI output. This should work for
@@ -572,39 +611,14 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
}
}
-static int dpi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
-
- dpi_init_pll(dpi);
-
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void dpi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops dpi_ops = {
- .connect = dpi_connect,
- .disconnect = dpi_disconnect,
-
- .enable = dpi_display_enable,
- .disable = dpi_display_disable,
-
- .check_timings = dpi_check_timings,
- .set_timings = dpi_set_timings,
-};
-
static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
struct omap_dss_device *out = &dpi->output;
u32 port_num = 0;
int r;
+ dpi_bridge_init(dpi);
+
of_property_read_u32(port, "reg", &port_num);
dpi->id = port_num <= 2 ? port_num : 0;
@@ -625,13 +639,14 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->id = OMAP_DSS_OUTPUT_DPI;
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
- out->of_ports = BIT(port_num);
- out->ops = &dpi_ops;
+ out->of_port = port_num;
out->owner = THIS_MODULE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &dpi->bridge);
+ if (r < 0) {
+ dpi_bridge_cleanup(dpi);
return r;
+ }
omapdss_device_register(out);
@@ -645,8 +660,14 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ dpi_bridge_cleanup(dpi);
}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
+
static const struct soc_device_attribute dpi_soc_devices[] = {
{ .machine = "OMAP3[456]*" },
{ .machine = "[AD]M37*" },
@@ -706,8 +727,6 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
dpi->dss = dss;
port->data = dpi;
- mutex_init(&dpi->lock);
-
r = dpi_init_regulator(dpi);
if (r)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index da16ea095f13..79ddfbfd1b58 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5116,12 +5116,12 @@ static int dsi_init_output(struct dsi_data *dsi)
out->dispc_channel = dsi_get_channel(dsi);
out->ops = &dsi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_device_init_output(out);
+ r = omapdss_device_init_output(out, NULL);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
deleted file mode 100644
index b7981f3b80ad..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-
-#include "omapdss.h"
-
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
-{
- struct device_node *remote_node;
- struct omap_dss_device *dssdev;
-
- remote_node = of_graph_get_remote_node(node, port, 0);
- if (!remote_node)
- return NULL;
-
- dssdev = omapdss_find_device_by_node(remote_node);
- of_node_put(remote_node);
-
- return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
-}
-EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 225ec808b01a..4d5739fa4a5d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1151,46 +1151,38 @@ static const struct dss_features dra7xx_dss_feats = {
.has_lcd_clk_src = true,
};
-static int dss_init_ports(struct dss_device *dss)
+static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
unsigned int i;
- int r;
- for (i = 0; i < dss->feat->num_ports; i++) {
+ for (i = 0; i < num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- r = dpi_init_port(dss, pdev, port, dss->feat->model);
- if (r)
- return r;
+ dpi_uninit_port(port);
break;
-
case OMAP_DISPLAY_TYPE_SDI:
- r = sdi_init_port(dss, pdev, port);
- if (r)
- return r;
+ sdi_uninit_port(port);
break;
-
default:
break;
}
}
-
- return 0;
}
-static void dss_uninit_ports(struct dss_device *dss)
+static int dss_init_ports(struct dss_device *dss)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int i;
+ unsigned int i;
+ int r;
for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
@@ -1199,15 +1191,32 @@ static void dss_uninit_ports(struct dss_device *dss)
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_uninit_port(port);
+ r = dpi_init_port(dss, pdev, port, dss->feat->model);
+ if (r)
+ goto error;
break;
+
case OMAP_DISPLAY_TYPE_SDI:
- sdi_uninit_port(port);
+ r = sdi_init_port(dss, pdev, port);
+ if (r)
+ goto error;
break;
+
default:
break;
}
}
+
+ return 0;
+
+error:
+ __dss_uninit_ports(dss, i);
+ return r;
+}
+
+static void dss_uninit_ports(struct dss_device *dss)
+{
+ __dss_uninit_ports(dss, dss->feat->num_ports);
}
static int dss_video_pll_probe(struct dss_device *dss)
@@ -1339,9 +1348,15 @@ static int dss_component_compare(struct device *dev, void *data)
return dev == child;
}
+struct dss_component_match_data {
+ struct device *dev;
+ struct component_match **match;
+};
+
static int dss_add_child_component(struct device *dev, void *data)
{
- struct component_match **match = data;
+ struct dss_component_match_data *cmatch = data;
+ struct component_match **match = cmatch->match;
/*
* HACK
@@ -1352,7 +1367,17 @@ static int dss_add_child_component(struct device *dev, void *data)
if (strstr(dev_name(dev), "rfbi"))
return 0;
- component_match_add(dev->parent, match, dss_component_compare, dev);
+ /*
+ * Handle possible interconnect target modules defined within the DSS.
+ * The DSS components can be children of an interconnect target module
+ * after the device tree has been updated for the module data.
+ * See also omapdss_boot_init() for compatible fixup.
+ */
+ if (strstr(dev_name(dev), "target-module"))
+ return device_for_each_child(dev, cmatch,
+ dss_add_child_component);
+
+ component_match_add(cmatch->dev, match, dss_component_compare, dev);
return 0;
}
@@ -1395,6 +1420,7 @@ static int dss_probe_hardware(struct dss_device *dss)
static int dss_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
+ struct dss_component_match_data cmatch;
struct component_match *match = NULL;
struct resource *dss_mem;
struct dss_device *dss;
@@ -1472,7 +1498,9 @@ static int dss_probe(struct platform_device *pdev)
omapdss_gather_components(&pdev->dev);
- device_for_each_child(&pdev->dev, &match, dss_add_child_component);
+ cmatch.dev = &pdev->dev;
+ cmatch.match = &match;
+ device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r)
@@ -1543,7 +1571,8 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
for_each_dss_output(dssdev) {
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
+ dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index c867552c925c..3a40833d3368 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -14,6 +14,7 @@
#include <linux/hdmi.h>
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_bridge.h>
#include "omapdss.h"
#include "dss.h"
@@ -364,6 +365,7 @@ struct omap_hdmi {
bool core_enabled;
struct omap_dss_device output;
+ struct drm_bridge bridge;
struct platform_device *audio_pdev;
void (*audio_abort_cb)(struct device *dev);
@@ -378,6 +380,6 @@ struct omap_hdmi {
bool display_enabled;
};
-#define dssdev_to_hdmi(dssdev) container_of(dssdev, struct omap_hdmi, output)
+#define drm_bridge_to_hdmi(b) container_of(b, struct omap_hdmi, bridge)
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 0f557fad4513..2578c95570f6 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -28,6 +28,9 @@
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi4_core.h"
#include "hdmi4_cec.h"
@@ -237,20 +240,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -272,57 +261,139 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi4_audio_start(&hd->core, &hd->wp);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi4_audio_stop(&hd->core, &hd->wp);
+ hdmi_wp_audio_enable(&hd->wp, false);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+int hdmi4_core_enable(struct hdmi_core_data *core)
+{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+ int r = 0;
- r = hdmi4_read_edid(&hdmi->core, buf, len);
+ DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+void hdmi4_core_disable(struct hdmi_core_data *core)
{
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi4_audio_start(&hd->core, &hd->wp);
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+
+ DSSDBG("Enter omapdss_hdmi4_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi4_audio_stop(&hd->core, &hd->wp);
- hdmi_wp_audio_enable(&hd->wp, false);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
+
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi4_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
unsigned long flags;
- int r;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
- DSSDBG("ENTER hdmi_display_enable\n");
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -338,13 +409,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -357,58 +427,21 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-int hdmi4_core_enable(struct hdmi_core_data *core)
+static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
-
- mutex_lock(&hdmi->lock);
-
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-void hdmi4_core_disable(struct hdmi_core_data *core)
-{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
-
- DSSDBG("Enter omapdss_hdmi4_core_disable\n");
-
- mutex_lock(&hdmi->lock);
-
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
+ if (status == connector_status_disconnected)
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
+static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid = NULL;
+ unsigned int cec_addr;
bool need_enable;
int r;
@@ -417,63 +450,65 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
if (need_enable) {
r = hdmi4_core_enable(&hdmi->core);
if (r)
- return r;
+ return NULL;
}
- r = read_edid(hdmi, edid, len);
- if (r >= 256)
- hdmi4_cec_set_phys_addr(&hdmi->core,
- cec_get_edid_phys_addr(edid, r, NULL));
- else
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
- if (need_enable)
- hdmi4_core_disable(&hdmi->core);
+ mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- return r;
-}
+ r = hdmi4_core_ddc_init(&hdmi->core);
+ if (r)
+ goto done;
-static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
-}
+done:
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ if (edid && edid->extensions) {
+ unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
- hdmi->cfg.infoframe = *avi;
- return 0;
-}
+ cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
+ } else {
+ cec_addr = CEC_PHYS_ADDR_INVALID;
+ }
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi4_cec_set_phys_addr(&hdmi->core, cec_addr);
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
-}
+ if (need_enable)
+ hdmi4_core_disable(&hdmi->core);
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
+ return edid;
+}
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
+static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
+ .attach = hdmi4_bridge_attach,
+ .mode_set = hdmi4_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi4_bridge_enable,
+ .atomic_disable = hdmi4_bridge_disable,
+ .hpd_notify = hdmi4_bridge_hpd_notify,
+ .get_edid = hdmi4_bridge_get_edid,
+};
- .set_timings = hdmi_display_set_timings,
+static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
+{
+ hdmi->bridge.funcs = &hdmi4_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- .read_edid = hdmi_read_edid,
+ drm_bridge_add(&hdmi->bridge);
+}
- .hdmi = {
- .lost_hotplug = hdmi_lost_hotplug,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
+static void hdmi4_bridge_cleanup(struct omap_hdmi *hdmi)
+{
+ drm_bridge_remove(&hdmi->bridge);
+}
/* -----------------------------------------------------------------------------
* Audio Callbacks
@@ -666,19 +701,21 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi4_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi4_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -691,6 +728,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi4_bridge_cleanup(hdmi);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ea5d5c228534..751985a2679a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -32,7 +32,7 @@ static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
return core->base + HDMI_CORE_AV;
}
-static int hdmi_core_ddc_init(struct hdmi_core_data *core)
+int hdmi4_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -74,13 +74,11 @@ static int hdmi_core_ddc_init(struct hdmi_core_data *core)
return 0;
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
- u8 *pedid, int ext)
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u32 i;
- char checksum;
- u32 offset = 0;
/* HDMI_CORE_DDC_STATUS_IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
@@ -89,24 +87,21 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -ETIMEDOUT;
}
- if (ext % 2 != 0)
- offset = 0x80;
-
/* Load Segment Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, block / 2, 7, 0);
/* Load Slave Address Register */
REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
/* Load Offset Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, block % 2 ? 0x80 : 0, 7, 0);
/* Load Byte Count */
- REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, len, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
/* Set DDC_CMD */
- if (ext)
+ if (block)
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
else
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
@@ -122,7 +117,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -EIO;
}
- for (i = 0; i < 0x80; ++i) {
+ for (i = 0; i < len; ++i) {
int t;
/* IN_PROG */
@@ -141,48 +136,12 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
udelay(1);
}
- pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
- }
-
- checksum = 0;
- for (i = 0; i < 0x80; ++i)
- checksum += pedid[i];
-
- if (checksum != 0) {
- DSSERR("E-EDID checksum failed!!\n");
- return -EIO;
+ buf[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
}
return 0;
}
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, l;
-
- if (len < 128)
- return -EINVAL;
-
- r = hdmi_core_ddc_init(core);
- if (r)
- return r;
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- return r;
-
- l = 128;
-
- if (len >= 128 * 2 && edid[0x7e] > 0) {
- r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
- if (r)
- return r;
- l += 128;
- }
-
- return l;
-}
-
static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
{
DSSDBG("Enter hdmi_core_init\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index 11c4b7ba1eee..dc64ae2aa300 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -249,7 +249,9 @@ struct hdmi_core_packet_enable_repeat {
u32 generic_pkt_repeat;
};
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+int hdmi4_core_ddc_init(struct hdmi_core_data *core);
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+
void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index d9463b332554..4d4c1fabd0a1 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -31,6 +31,9 @@
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
@@ -236,20 +239,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -271,66 +260,138 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
- int idlemode;
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi_wp_audio_core_req_enable(&hd->wp, true);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi_wp_audio_core_req_enable(&hd->wp, false);
+ hdmi_wp_audio_enable(&hd->wp, false);
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+static int hdmi_core_enable(struct omap_hdmi *hdmi)
+{
+ int r = 0;
- idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
- /* No-idle mode */
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
- r = hdmi5_read_edid(&hdmi->core, buf, len);
+ mutex_lock(&hdmi->lock);
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+static void hdmi_core_disable(struct omap_hdmi *hdmi)
{
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi_wp_audio_core_req_enable(&hd->wp, true);
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi_wp_audio_core_req_enable(&hd->wp, false);
- hdmi_wp_audio_enable(&hd->wp, false);
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- unsigned long flags;
- int r;
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
- DSSDBG("ENTER hdmi_display_enable\n");
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi5_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
+
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -346,13 +407,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -365,109 +425,74 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_core_enable(struct omap_hdmi *hdmi)
+static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid;
+ bool need_enable;
+ int idlemode;
+ int r;
- mutex_lock(&hdmi->lock);
+ need_enable = hdmi->core_enabled == false;
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
+ if (need_enable) {
+ r = hdmi_core_enable(hdmi);
+ if (r)
+ return NULL;
}
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-static void hdmi_core_disable(struct omap_hdmi *hdmi)
-{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
-
mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ /* No-idle mode */
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
+ hdmi5_core_ddc_init(&hdmi->core);
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- bool need_enable;
- int r;
+ edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
- need_enable = hdmi->core_enabled == false;
+ hdmi5_core_ddc_uninit(&hdmi->core);
- if (need_enable) {
- r = hdmi_core_enable(hdmi);
- if (r)
- return r;
- }
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
- r = read_edid(hdmi, edid, len);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
if (need_enable)
hdmi_core_disable(hdmi);
- return r;
+ return (struct edid *)edid;
}
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
+static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
+ .attach = hdmi5_bridge_attach,
+ .mode_set = hdmi5_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi5_bridge_enable,
+ .atomic_disable = hdmi5_bridge_disable,
+ .get_edid = hdmi5_bridge_get_edid,
+};
+
+static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi->bridge.funcs = &hdmi5_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- hdmi->cfg.infoframe = *avi;
- return 0;
+ drm_bridge_add(&hdmi->bridge);
}
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
+static void hdmi5_bridge_cleanup(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
+ drm_bridge_remove(&hdmi->bridge);
}
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
-
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
-
- .set_timings = hdmi_display_set_timings,
-
- .read_edid = hdmi_read_edid,
-
- .hdmi = {
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
-
/* -----------------------------------------------------------------------------
* Audio Callbacks
*/
@@ -650,19 +675,21 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi5_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi5_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -675,6 +702,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi5_bridge_cleanup(hdmi);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index ff4d35c8771f..7dd587035160 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,7 +23,7 @@
#include "hdmi5_core.h"
-static void hdmi_core_ddc_init(struct hdmi_core_data *core)
+void hdmi5_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
@@ -102,7 +102,7 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x0, 2, 2);
}
-static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -112,14 +112,14 @@ static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2);
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u8 cur_addr;
- char checksum = 0;
const int retries = 1000;
- u8 seg_ptr = ext / 2;
- u8 edidbase = ((ext % 2) * 0x80);
+ u8 seg_ptr = block / 2;
+ u8 edidbase = ((block % 2) * EDID_LENGTH);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGPTR, seg_ptr, 7, 0);
@@ -127,7 +127,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
* TODO: We use polling here, although we probably should use proper
* interrupts.
*/
- for (cur_addr = 0; cur_addr < 128; ++cur_addr) {
+ for (cur_addr = 0; cur_addr < len; ++cur_addr) {
int i;
/* clear ERROR and DONE */
@@ -164,45 +164,13 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
return -EIO;
}
- pedid[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
- checksum += pedid[cur_addr];
+ buf[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
}
return 0;
}
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, n, i;
- int max_ext_blocks = (len / 128) - 1;
-
- if (len < 128)
- return -EINVAL;
-
- hdmi_core_ddc_init(core);
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- goto out;
-
- n = edid[0x7e];
-
- if (n > max_ext_blocks)
- n = max_ext_blocks;
-
- for (i = 1; i <= n; i++) {
- r = hdmi_core_ddc_edid(core, edid + i * EDID_LENGTH, i);
- if (r)
- goto out;
- }
-
-out:
- hdmi_core_ddc_uninit(core);
-
- return r ? r : len;
-}
-
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
index f10b8a283011..65eadefdb3f9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
@@ -281,7 +281,10 @@ struct csc_table {
u16 c1, c2, c3, c4;
};
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi5_core_ddc_init(struct hdmi_core_data *core);
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
+
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 31502857f013..72a7da7bfff1 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -174,34 +174,38 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
};
static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
- { .compatible = "composite-video-connector" },
- { .compatible = "hdmi-connector" },
{ .compatible = "panel-dsi-cm" },
- { .compatible = "svideo-connector" },
- { .compatible = "ti,opa362" },
- { .compatible = "ti,tpd12s015" },
{},
};
+static void __init omapdss_find_children(struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_find_property(child, "compatible", NULL))
+ continue;
+
+ omapdss_walk_device(child, true);
+
+ if (of_device_is_compatible(child, "ti,sysc"))
+ omapdss_find_children(child);
+ }
+}
+
static int __init omapdss_boot_init(void)
{
- struct device_node *dss, *child;
+ struct device_node *dss;
INIT_LIST_HEAD(&dss_conv_list);
dss = of_find_matching_node(NULL, omapdss_of_match);
if (dss == NULL || !of_device_is_available(dss))
- return 0;
+ goto put_node;
omapdss_walk_device(dss, true);
-
- for_each_available_child_of_node(dss, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
- omapdss_walk_device(child, true);
- }
+ omapdss_find_children(dss);
while (!list_empty(&dss_conv_list)) {
struct dss_conv_node *n;
@@ -217,6 +221,8 @@ static int __init omapdss_boot_init(void)
kfree(n);
}
+put_node:
+ of_node_put(dss);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 79f6b195c7cf..ab19d4af8de7 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -285,13 +285,6 @@ struct omap_dss_writeback_info {
u8 pre_mult_alpha;
};
-struct omapdss_hdmi_ops {
- void (*lost_hotplug)(struct omap_dss_device *dssdev);
- int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
- int (*set_infoframe)(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi);
-};
-
struct omapdss_dsi_ops {
void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
bool enter_ulps);
@@ -349,46 +342,23 @@ struct omap_dss_device_ops {
void (*disconnect)(struct omap_dss_device *dssdev,
struct omap_dss_device *dst);
- void (*pre_enable)(struct omap_dss_device *dssdev);
void (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
- void (*post_disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
struct drm_display_mode *mode);
- void (*set_timings)(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode);
-
- bool (*detect)(struct omap_dss_device *dssdev);
-
- void (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
int (*get_modes)(struct omap_dss_device *dssdev,
struct drm_connector *connector);
- union {
- const struct omapdss_hdmi_ops hdmi;
- const struct omapdss_dsi_ops dsi;
- };
+ const struct omapdss_dsi_ops dsi;
};
/**
* enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
- * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
* @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
*/
enum omap_dss_device_ops_flag {
- OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
- OMAP_DSS_DEVICE_OP_HPD = BIT(1),
- OMAP_DSS_DEVICE_OP_EDID = BIT(2),
OMAP_DSS_DEVICE_OP_MODES = BIT(3),
};
@@ -400,6 +370,7 @@ struct omap_dss_device {
struct dss_device *dss;
struct omap_dss_device *next;
struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
struct drm_panel *panel;
struct list_head list;
@@ -436,8 +407,8 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
- /* bitmask of port numbers in DT */
- unsigned int of_ports;
+ /* port number in DT */
+ unsigned int of_port;
};
struct omap_dss_driver {
@@ -461,7 +432,6 @@ static inline bool omapdss_is_initialized(void)
}
void omapdss_display_init(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm);
@@ -475,10 +445,8 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
void omapdss_device_enable(struct omap_dss_device *dssdev);
void omapdss_device_disable(struct omap_dss_device *dssdev);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
@@ -487,7 +455,8 @@ int omap_dss_get_num_overlays(void);
#define for_each_dss_output(d) \
while ((d = omapdss_device_next_output(d)) != NULL)
struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
-int omapdss_device_init_output(struct omap_dss_device *out);
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge);
void omapdss_device_cleanup_output(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
@@ -502,9 +471,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
}
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port);
-
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
DSS_WB_LCD2_MGR = 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 0693d34fca1b..ce21c798cca6 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,7 +4,6 @@
* Author: Archit Taneja <archit@ti.com>
*/
-#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,12 +17,14 @@
#include "dss.h"
#include "omapdss.h"
-int omapdss_device_init_output(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge)
{
struct device_node *remote_node;
+ int ret;
remote_node = of_graph_get_remote_node(out->dev->of_node,
- ffs(out->of_ports) - 1, 0);
+ out->of_port, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
@@ -39,17 +40,55 @@ int omapdss_device_init_output(struct omap_dss_device *out)
if (out->next && out->type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
- omapdss_device_put(out->next);
- out->next = NULL;
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
- return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+ if (out->panel) {
+ struct drm_bridge *bridge;
+
+ bridge = drm_panel_bridge_add(out->panel);
+ if (IS_ERR(bridge)) {
+ dev_err(out->dev,
+ "unable to create panel bridge (%ld)\n",
+ PTR_ERR(bridge));
+ ret = PTR_ERR(bridge);
+ goto error;
+ }
+
+ out->bridge = bridge;
+ }
+
+ if (local_bridge) {
+ if (!out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ out->next_bridge = out->bridge;
+ out->bridge = local_bridge;
+ }
+
+ if (!out->next && !out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ omapdss_device_cleanup_output(out);
+ out->next = NULL;
+ return ret;
}
EXPORT_SYMBOL(omapdss_device_init_output);
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
+ if (out->bridge && out->panel)
+ drm_panel_bridge_remove(out->next_bridge ?
+ out->next_bridge : out->bridge);
+
if (out->next)
omapdss_device_put(out->next);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 3b447c01fa2a..417a8740ad0a 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -6,17 +6,19 @@
#define DSS_SUBSYS_NAME "SDI"
-#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/regulator/consumer.h>
#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct sdi_device {
struct platform_device *pdev;
@@ -30,9 +32,11 @@ struct sdi_device {
int datapairs;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_sdi(dssdev) container_of(dssdev, struct sdi_device, output)
+#define drm_bridge_to_sdi(bridge) \
+ container_of(bridge, struct sdi_device, bridge)
struct sdi_clk_calc_ctx {
struct sdi_device *sdi;
@@ -118,9 +122,82 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
-static void sdi_display_enable(struct omap_dss_device *dssdev)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int sdi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+sdi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ int ret;
+
+ if (pixelclock == 0)
+ return MODE_NOCLOCK;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool sdi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ unsigned long pck;
+ int ret;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return false;
+
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
+
+ if (pck != pixelclock)
+ dev_dbg(&sdi->pdev->dev,
+ "pixel clock adjusted from %lu Hz to %lu Hz\n",
+ pixelclock, pck);
+
+ adjusted_mode->clock = pck / 1000;
+
+ return true;
+}
+
+static void sdi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ sdi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void sdi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
@@ -181,9 +258,10 @@ err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_display_disable(struct omap_dss_device *dssdev)
+static void sdi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
dss_mgr_disable(&sdi->output);
@@ -194,86 +272,56 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-
- sdi->pixelclock = mode->clock * 1000;
-}
+static const struct drm_bridge_funcs sdi_bridge_funcs = {
+ .attach = sdi_bridge_attach,
+ .mode_valid = sdi_bridge_mode_valid,
+ .mode_fixup = sdi_bridge_mode_fixup,
+ .mode_set = sdi_bridge_mode_set,
+ .atomic_enable = sdi_bridge_enable,
+ .atomic_disable = sdi_bridge_disable,
+};
-static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void sdi_bridge_init(struct sdi_device *sdi)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- struct dispc_clock_info dispc_cinfo;
- unsigned long pixelclock = mode->clock * 1000;
- unsigned long fck;
- unsigned long pck;
- int r;
+ sdi->bridge.funcs = &sdi_bridge_funcs;
+ sdi->bridge.of_node = sdi->pdev->dev.of_node;
+ sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- if (pixelclock == 0)
- return -EINVAL;
-
- r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
- if (r)
- return r;
-
- pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
-
- if (pck != pixelclock) {
- DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
- pixelclock, pck);
-
- mode->clock = pck / 1000;
- }
-
- return 0;
+ drm_bridge_add(&sdi->bridge);
}
-static int sdi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void sdi_bridge_cleanup(struct sdi_device *sdi)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ drm_bridge_remove(&sdi->bridge);
}
-static void sdi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops sdi_ops = {
- .connect = sdi_connect,
- .disconnect = sdi_disconnect,
-
- .enable = sdi_display_enable,
- .disable = sdi_display_disable,
-
- .check_timings = sdi_check_timings,
- .set_timings = sdi_set_timings,
-};
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
static int sdi_init_output(struct sdi_device *sdi)
{
struct omap_dss_device *out = &sdi->output;
int r;
+ sdi_bridge_init(sdi);
+
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
- out->of_ports = BIT(1);
- out->ops = &sdi_ops;
+ out->of_port = 1;
out->owner = THIS_MODULE;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &sdi->bridge);
+ if (r < 0) {
+ sdi_bridge_cleanup(sdi);
return r;
+ }
omapdss_device_register(out);
@@ -284,6 +332,8 @@ static void sdi_uninit_output(struct sdi_device *sdi)
{
omapdss_device_unregister(&sdi->output);
omapdss_device_cleanup_output(&sdi->output);
+
+ sdi_bridge_cleanup(sdi);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 596a297d5813..766553bb2f87 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -26,6 +25,8 @@
#include <linux/component.h>
#include <linux/sys_soc.h>
+#include <drm/drm_bridge.h>
+
#include "omapdss.h"
#include "dss.h"
@@ -289,7 +290,6 @@ static const struct drm_display_mode omap_dss_ntsc_mode = {
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
- struct mutex venc_lock;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
@@ -303,9 +303,10 @@ struct venc_device {
bool requires_tv_dac_clk;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_venc(dssdev) container_of(dssdev, struct venc_device, output)
+#define drm_bridge_to_venc(b) container_of(b, struct venc_device, bridge)
static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
{
@@ -477,56 +478,6 @@ static void venc_power_off(struct venc_device *venc)
venc_runtime_put(venc);
}
-static void venc_display_enable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_enable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_on(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static void venc_display_disable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_disable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_off(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- static const struct drm_display_mode *modes[] = {
- &omap_dss_pal_mode,
- &omap_dss_ntsc_mode,
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(modes); ++i) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, modes[i]);
- if (!mode)
- return i;
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return ARRAY_SIZE(modes);
-}
-
static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
{
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
@@ -545,57 +496,6 @@ static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mod
return VENC_MODE_UNKNOWN;
}
-static void venc_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
- enum venc_videomode venc_mode = venc_get_videomode(mode);
-
- DSSDBG("venc_set_timings\n");
-
- mutex_lock(&venc->venc_lock);
-
- switch (venc_mode) {
- default:
- WARN_ON_ONCE(1);
- /* Fall-through */
- case VENC_MODE_PAL:
- venc->config = &venc_config_pal_trm;
- break;
-
- case VENC_MODE_NTSC:
- venc->config = &venc_config_ntsc_trm;
- break;
- }
-
- dispc_set_tv_pclk(venc->dss->dispc, 13500000);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
-{
- DSSDBG("venc_check_timings\n");
-
- switch (venc_get_videomode(mode)) {
- case VENC_MODE_PAL:
- drm_mode_copy(mode, &omap_dss_pal_mode);
- break;
-
- case VENC_MODE_NTSC:
- drm_mode_copy(mode, &omap_dss_ntsc_mode);
- break;
-
- default:
- return -EINVAL;
- }
-
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_set_name(mode);
- return 0;
-}
-
static int venc_dump_regs(struct seq_file *s, void *p)
{
struct venc_device *venc = s->private;
@@ -673,31 +573,149 @@ static int venc_get_clocks(struct venc_device *venc)
return 0;
}
-static int venc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int venc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+venc_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ switch (venc_get_videomode(mode)) {
+ case VENC_MODE_PAL:
+ case VENC_MODE_NTSC:
+ return MODE_OK;
+
+ default:
+ return MODE_BAD;
+ }
+}
+
+static bool venc_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *venc_mode;
+
+ switch (venc_get_videomode(adjusted_mode)) {
+ case VENC_MODE_PAL:
+ venc_mode = &omap_dss_pal_mode;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc_mode = &omap_dss_ntsc_mode;
+ break;
+
+ default:
+ return false;
+ }
+
+ drm_mode_copy(adjusted_mode, venc_mode);
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_name(adjusted_mode);
+
+ return true;
+}
+
+static void venc_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+ enum venc_videomode venc_mode = venc_get_videomode(adjusted_mode);
+
+ switch (venc_mode) {
+ default:
+ WARN_ON_ONCE(1);
+ /* Fall-through */
+ case VENC_MODE_PAL:
+ venc->config = &venc_config_pal_trm;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc->config = &venc_config_ntsc_trm;
+ break;
+ }
+
+ dispc_set_tv_pclk(venc->dss->dispc, 13500000);
+}
+
+static void venc_bridge_enable(struct drm_bridge *bridge)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_on(venc);
}
-static void venc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void venc_bridge_disable(struct drm_bridge *bridge)
{
- omapdss_device_disconnect(dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_off(venc);
}
-static const struct omap_dss_device_ops venc_ops = {
- .connect = venc_connect,
- .disconnect = venc_disconnect,
+static int venc_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ static const struct drm_display_mode *modes[] = {
+ &omap_dss_pal_mode,
+ &omap_dss_ntsc_mode,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, modes[i]);
+ if (!mode)
+ return i;
- .enable = venc_display_enable,
- .disable = venc_display_disable,
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
- .check_timings = venc_check_timings,
- .set_timings = venc_set_timings,
+ return ARRAY_SIZE(modes);
+}
- .get_modes = venc_get_modes,
+static const struct drm_bridge_funcs venc_bridge_funcs = {
+ .attach = venc_bridge_attach,
+ .mode_valid = venc_bridge_mode_valid,
+ .mode_fixup = venc_bridge_mode_fixup,
+ .mode_set = venc_bridge_mode_set,
+ .enable = venc_bridge_enable,
+ .disable = venc_bridge_disable,
+ .get_modes = venc_bridge_get_modes,
};
+static void venc_bridge_init(struct venc_device *venc)
+{
+ venc->bridge.funcs = &venc_bridge_funcs;
+ venc->bridge.of_node = venc->pdev->dev.of_node;
+ venc->bridge.ops = DRM_BRIDGE_OP_MODES;
+ venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
+ venc->bridge.interlace_allowed = true;
+
+ drm_bridge_add(&venc->bridge);
+}
+
+static void venc_bridge_cleanup(struct venc_device *venc)
+{
+ drm_bridge_remove(&venc->bridge);
+}
+
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
@@ -747,19 +765,22 @@ static int venc_init_output(struct venc_device *venc)
struct omap_dss_device *out = &venc->output;
int r;
+ venc_bridge_init(venc);
+
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &venc_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &venc->bridge);
+ if (r < 0) {
+ venc_bridge_cleanup(venc);
return r;
+ }
omapdss_device_register(out);
@@ -770,6 +791,8 @@ static void venc_uninit_output(struct venc_device *venc)
{
omapdss_device_unregister(&venc->output);
omapdss_device_cleanup_output(&venc->output);
+
+ venc_bridge_cleanup(venc);
}
static int venc_probe_of(struct venc_device *venc)
@@ -839,8 +862,6 @@ static int venc_probe(struct platform_device *pdev)
if (soc_device_match(venc_soc_devices))
venc->requires_tv_dac_clk = true;
- mutex_init(&venc->venc_lock);
-
venc->config = &venc_config_pal_trm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 94cded387174..528764566b17 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -6,7 +6,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "omap_drv.h"
@@ -20,124 +19,12 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *output;
- struct omap_dss_device *hpd;
- bool hdmi_mode;
};
-static void omap_connector_hpd_notify(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
-
- if (status != connector_status_disconnected)
- return;
-
- /*
- * Notify all devics in the pipeline of disconnection. This is required
- * to let the HDMI encoders reset their internal state related to
- * connection status, such as the CEC address.
- */
- for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
- dssdev->ops->hdmi.lost_hotplug(dssdev);
- }
-}
-
-static void omap_connector_hpd_cb(void *cb_data,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = cb_data;
- struct drm_connector *connector = &omap_connector->base;
- struct drm_device *dev = connector->dev;
- enum drm_connector_status old_status;
-
- mutex_lock(&dev->mode_config.mutex);
- old_status = connector->status;
- connector->status = status;
- mutex_unlock(&dev->mode_config.mutex);
-
- if (old_status == status)
- return;
-
- omap_connector_hpd_notify(connector, status);
-
- drm_kms_helper_hotplug_event(dev);
-}
-
-void omap_connector_enable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb,
- omap_connector);
-}
-
-void omap_connector_disable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->unregister_hpd_cb(hpd);
-}
-
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- return omap_connector->hdmi_mode;
-}
-
-static struct omap_dss_device *
-omap_connector_find_device(struct drm_connector *connector,
- enum omap_dss_device_ops_flag op)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *d;
-
- for (d = omap_connector->output; d; d = d->next) {
- if (d->ops_flags & op)
- dssdev = d;
- }
-
- return dssdev;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
- struct omap_dss_device *dssdev;
- enum drm_connector_status status;
-
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
-
- if (dssdev) {
- status = dssdev->ops->detect(dssdev)
- ? connector_status_connected
- : connector_status_disconnected;
-
- omap_connector_hpd_notify(connector, status);
- } else {
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DPI:
- case DRM_MODE_CONNECTOR_LVDS:
- case DRM_MODE_CONNECTOR_DSI:
- status = connector_status_connected;
- break;
- default:
- status = connector_status_unknown;
- break;
- }
- }
-
- VERB("%s: %d (force=%d)", connector->name, status, force);
-
- return status;
+ return connector_status_connected;
}
static void omap_connector_destroy(struct drm_connector *connector)
@@ -146,14 +33,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
DBG("%s", connector->name);
- if (omap_connector->hpd) {
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- hpd->ops->unregister_hpd_cb(hpd);
- omapdss_device_put(hpd);
- omap_connector->hpd = NULL;
- }
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -162,81 +41,27 @@ static void omap_connector_destroy(struct drm_connector *connector)
kfree(omap_connector);
}
-#define MAX_EDID 512
-
-static int omap_connector_get_modes_edid(struct drm_connector *connector,
- struct omap_dss_device *dssdev)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- enum drm_connector_status status;
- void *edid;
- int n;
-
- status = omap_connector_detect(connector, false);
- if (status != connector_status_connected)
- goto no_edid;
-
- edid = kzalloc(MAX_EDID, GFP_KERNEL);
- if (!edid)
- goto no_edid;
-
- if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 ||
- !drm_edid_is_valid(edid)) {
- kfree(edid);
- goto no_edid;
- }
-
- drm_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
-
- omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid);
-
- kfree(edid);
- return n;
-
-no_edid:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *d;
DBG("%s", connector->name);
/*
- * If display exposes EDID, then we parse that in the normal way to
- * build table of supported modes.
+ * If the display pipeline reports modes (e.g. with a fixed resolution
+ * panel or an analog TV output), query it.
*/
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_EDID);
- if (dssdev)
- return omap_connector_get_modes_edid(connector, dssdev);
+ for (d = omap_connector->output; d; d = d->next) {
+ if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
+ dssdev = d;
+ }
- /*
- * Otherwise if the display pipeline reports modes (e.g. with a fixed
- * resolution panel or an analog TV output), query it.
- */
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_MODES);
if (dssdev)
return dssdev->ops->get_modes(dssdev, connector);
- /*
- * Otherwise if the display pipeline uses a drm_panel, we delegate the
- * operation to the panel API.
- */
- if (omap_connector->output->panel)
- return drm_panel_get_modes(omap_connector->output->panel,
- connector);
-
- /*
- * We can't retrieve modes, which can happen for instance for a DVI or
- * VGA output with the DDC bus unconnected. The KMS core will add the
- * default modes.
- */
+ /* We can't retrieve modes. The KMS core will add the default modes. */
return 0;
}
@@ -249,7 +74,7 @@ enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
drm_mode_copy(adjusted_mode, mode);
for (; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
+ if (!dssdev->ops || !dssdev->ops->check_timings)
continue;
ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
@@ -298,35 +123,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
-static int omap_connector_get_type(struct omap_dss_device *output)
-{
- struct omap_dss_device *display;
- enum omap_display_type type;
-
- display = omapdss_display_get(output);
- type = display->type;
- omapdss_device_put(display);
-
- switch (type) {
- case OMAP_DISPLAY_TYPE_HDMI:
- return DRM_MODE_CONNECTOR_HDMIA;
- case OMAP_DISPLAY_TYPE_DVI:
- return DRM_MODE_CONNECTOR_DVID;
- case OMAP_DISPLAY_TYPE_DSI:
- return DRM_MODE_CONNECTOR_DSI;
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- return DRM_MODE_CONNECTOR_DPI;
- case OMAP_DISPLAY_TYPE_VENC:
- /* TODO: This could also be composite */
- return DRM_MODE_CONNECTOR_SVIDEO;
- case OMAP_DISPLAY_TYPE_SDI:
- return DRM_MODE_CONNECTOR_LVDS;
- default:
- return DRM_MODE_CONNECTOR_Unknown;
- }
-}
-
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
@@ -334,7 +130,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
- struct omap_dss_device *dssdev;
DBG("%s", output->name);
@@ -349,27 +144,9 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- omap_connector_get_type(output));
+ DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- /*
- * Initialize connector status handling. First try to find a device that
- * supports hot-plug reporting. If it fails, fall back to a device that
- * support polling. If that fails too, we don't support hot-plug
- * detection at all.
- */
- dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD);
- if (dssdev) {
- omap_connector->hpd = omapdss_device_get(dssdev);
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- } else {
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
- if (dssdev)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- }
-
return connector;
fail:
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 13607bda33d8..0ecd4f1655b7 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -21,9 +21,6 @@ struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
struct drm_encoder *encoder);
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void omap_connector_enable_hpd(struct drm_connector *connector);
-void omap_connector_disable_hpd(struct drm_connector *connector);
enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 3c5ddbf30e97..fce7e944a280 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -831,7 +831,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
* tables so lets use that. Size of HW gamma table can be
* extracted with dispc_mgr_gamma_size(). If it returns 0
- * gamma table is not supprted.
+ * gamma table is not supported.
*/
if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 252f5ebb1acc..42ec51bb7b1b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -82,12 +82,11 @@ static const u32 reg[][4] = {
static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
{
- struct dma_device *dma_dev = dmm->wa_dma_chan->device;
struct dma_async_tx_descriptor *tx;
enum dma_status status;
dma_cookie_t cookie;
- tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+ tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
if (!tx) {
dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -99,7 +98,6 @@ static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
return -EIO;
}
- dma_async_issue_pending(dmm->wa_dma_chan);
status = dma_sync_wait(dmm->wa_dma_chan, cookie);
if (status != DMA_COMPLETE)
dev_err(dmm->dev, "i878 wa DMA copy failure\n");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d2750f60f519..cdafd7ef1c32 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
@@ -134,9 +135,6 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- if (pipe->output->panel)
- drm_panel_detach(pipe->output->panel);
-
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
@@ -209,11 +207,12 @@ static int omap_display_id(struct omap_dss_device *output)
struct device_node *node = NULL;
if (output->next) {
- struct omap_dss_device *display;
+ struct omap_dss_device *display = output;
+
+ while (display->next)
+ display = display->next;
- display = omapdss_display_get(output);
node = display->dev->of_node;
- omapdss_device_put(display);
} else if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
@@ -221,8 +220,6 @@ static int omap_display_id(struct omap_dss_device *output)
bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
- } else if (output->panel) {
- node = output->panel->dev->of_node;
}
return node ? of_alias_get_id(node, "display") : -ENODEV;
@@ -297,9 +294,14 @@ static int omap_modeset_init(struct drm_device *dev)
if (pipe->output->bridge) {
ret = drm_bridge_attach(pipe->encoder,
- pipe->output->bridge, NULL);
- if (ret < 0)
+ pipe->output->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "unable to attach bridge %pOF\n",
+ pipe->output->bridge->of_node);
return ret;
+ }
}
id = omap_display_id(pipe->output);
@@ -330,20 +332,28 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
- if (!pipe->output->bridge) {
+ if (pipe->output->next) {
pipe->connector = omap_connector_init(dev, pipe->output,
encoder);
if (!pipe->connector)
return -ENOMEM;
+ } else {
+ pipe->connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(pipe->connector)) {
+ dev_err(priv->dev,
+ "unable to create bridge connector for %s\n",
+ pipe->output->name);
+ return PTR_ERR(pipe->connector);
+ }
+ }
- drm_connector_attach_encoder(pipe->connector, encoder);
+ drm_connector_attach_encoder(pipe->connector, encoder);
- if (pipe->output->panel) {
- ret = drm_panel_attach(pipe->output->panel,
- pipe->connector);
- if (ret < 0)
- return ret;
- }
+ if (pipe->output->panel) {
+ ret = drm_panel_attach(pipe->output->panel,
+ pipe->connector);
+ if (ret < 0)
+ return ret;
}
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
@@ -382,6 +392,23 @@ static int omap_modeset_init(struct drm_device *dev)
return 0;
}
+static void omap_modeset_fini(struct drm_device *ddev)
+{
+ struct omap_drm_private *priv = ddev->dev_private;
+ unsigned int i;
+
+ omap_drm_irq_uninstall(ddev);
+
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+
+ if (pipe->output->panel)
+ drm_panel_detach(pipe->output->panel);
+ }
+
+ drm_mode_config_cleanup(ddev);
+}
+
/*
* Enable the HPD in external components if supported
*/
@@ -391,8 +418,13 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_enable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_enable_hpd(connector);
}
}
@@ -405,8 +437,13 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_disable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_disable_hpd(connector);
}
}
@@ -629,8 +666,7 @@ err_cleanup_helpers:
omap_fbdev_fini(ddev);
err_cleanup_modeset:
- drm_mode_config_cleanup(ddev);
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
@@ -655,9 +691,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev);
- drm_mode_config_cleanup(ddev);
-
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4f2165a37795..ae4b867a67a3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -10,7 +10,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
-#include <drm/drm_panel.h>
#include "omap_drv.h"
@@ -70,30 +69,6 @@ static void omap_encoder_update_videomode_flags(struct videomode *vm,
}
}
-static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
- struct drm_encoder *encoder,
- struct drm_display_mode *adjusted_mode)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- bool hdmi_mode;
-
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
- int r;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
- adjusted_mode);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
-}
-
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -138,17 +113,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
- /* Set timings for all devices in the display pipeline. */
+ /* Set timings for the dss manager. */
dss_mgr_set_timings(output, &vm);
-
- for (dssdev = output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops->set_timings)
- dssdev->ops->set_timings(dssdev, adjusted_mode);
- }
-
- /* Set the HDMI mode and HDMI infoframe if applicable. */
- if (output->type == OMAP_DISPLAY_TYPE_HDMI)
- omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
@@ -159,33 +125,12 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
- /* Disable the panel if present. */
- if (dssdev->panel) {
- drm_panel_disable(dssdev->panel);
- drm_panel_unprepare(dssdev->panel);
- }
-
/*
* Disable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_disable(dssdev->next);
-
- /*
- * Disable the internal encoder. This will disable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->disable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- }
-
- /*
- * Perform the post-disable operations on the chain of external devices
- * to complete the display pipeline disable.
- */
- omapdss_device_post_disable(dssdev->next);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
@@ -196,30 +141,12 @@ static void omap_encoder_enable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
- /* Prepare the chain of external devices for pipeline enable. */
- omapdss_device_pre_enable(dssdev->next);
-
- /*
- * Enable the internal encoder. This will enable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->enable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- }
-
/*
* Enable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_enable(dssdev->next);
-
- /* Enable the panel if present. */
- if (dssdev->panel) {
- drm_panel_prepare(dssdev->panel);
- drm_panel_enable(dssdev->panel);
- }
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b06e5cbfd03a..09a84919ef73 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -242,14 +242,10 @@ void omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_pipes);
+ ret = drm_fb_helper_init(dev, helper);
if (ret)
goto fail;
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index ae44ac2ec106..a1723c1b5fbf 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -29,6 +29,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TV101WUM_NL6
+ tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
+ 45NA WUXGA PANEL DSI Video Mode panel
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -50,6 +59,25 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_ELIDA_KD35T133
+ tristate "Elida KD35T133 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Elida
+ KD35T133 controller for 320x480 LCD panels with MIPI-DSI
+ system interfaces.
+
+config DRM_PANEL_FEIXIN_K101_IM2BA02
+ tristate "Feixin K101 IM2BA02 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Feixin K101 IM2BA02
+ 4-lane 800x1280 MIPI DSI panel.
+
config DRM_PANEL_FEIYANG_FY07024DI26A30D
tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
depends on OF
@@ -149,6 +177,16 @@ config DRM_PANEL_NEC_NL8048HL11
panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
as a module, choose M here.
+config DRM_PANEL_NOVATEK_NT35510
+ tristate "Novatek NT35510 RGB panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT35510 display controller, such as some
+ Hydis panels.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -275,6 +313,12 @@ config DRM_PANEL_SAMSUNG_S6E63M0
Say Y here if you want to enable support for Samsung S6E63M0
AMOLED LCD panel.
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
+ tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
+ depends on OF
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7c4d3c581fd4..96a883cd6630 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
+obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
@@ -13,6 +16,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -28,6 +32,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
new file mode 100644
index 000000000000..48a164257d18
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int bpc;
+
+ /**
+ * @width_mm: width of the panel's active display area
+ * @height_mm: height of the panel's active display area
+ */
+ struct {
+ unsigned int width_mm;
+ unsigned int height_mm;
+ } size;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ bool discharge_on_disable;
+};
+
+struct boe_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ const struct panel_desc *desc;
+
+ struct regulator *pp1800;
+ struct regulator *avee;
+ struct regulator *avdd;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+};
+
+enum dsi_cmd_type {
+ INIT_DCS_CMD,
+ DELAY_CMD,
+};
+
+struct panel_init_cmd {
+ enum dsi_cmd_type type;
+ size_t len;
+ const char *data;
+};
+
+#define _INIT_DCS_CMD(...) { \
+ .type = INIT_DCS_CMD, \
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+#define _INIT_DELAY_CMD(...) { \
+ .type = DELAY_CMD,\
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+static const struct panel_init_cmd boe_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x05),
+ _INIT_DCS_CMD(0xB1, 0xE5),
+ _INIT_DCS_CMD(0xB3, 0x52),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x88),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB6, 0x03),
+ _INIT_DCS_CMD(0xBA, 0x8B),
+ _INIT_DCS_CMD(0xBF, 0x1A),
+ _INIT_DCS_CMD(0xC0, 0x0F),
+ _INIT_DCS_CMD(0xC2, 0x0C),
+ _INIT_DCS_CMD(0xC3, 0x02),
+ _INIT_DCS_CMD(0xC4, 0x0C),
+ _INIT_DCS_CMD(0xC5, 0x02),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xE0, 0x26),
+ _INIT_DCS_CMD(0xE1, 0x26),
+ _INIT_DCS_CMD(0xDC, 0x00),
+ _INIT_DCS_CMD(0xDD, 0x00),
+ _INIT_DCS_CMD(0xCC, 0x26),
+ _INIT_DCS_CMD(0xCD, 0x26),
+ _INIT_DCS_CMD(0xC8, 0x00),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xD2, 0x03),
+ _INIT_DCS_CMD(0xD3, 0x03),
+ _INIT_DCS_CMD(0xE6, 0x04),
+ _INIT_DCS_CMD(0xE7, 0x04),
+ _INIT_DCS_CMD(0xC4, 0x09),
+ _INIT_DCS_CMD(0xC5, 0x09),
+ _INIT_DCS_CMD(0xD8, 0x0A),
+ _INIT_DCS_CMD(0xD9, 0x0A),
+ _INIT_DCS_CMD(0xC2, 0x0B),
+ _INIT_DCS_CMD(0xC3, 0x0B),
+ _INIT_DCS_CMD(0xD6, 0x0C),
+ _INIT_DCS_CMD(0xD7, 0x0C),
+ _INIT_DCS_CMD(0xC0, 0x05),
+ _INIT_DCS_CMD(0xC1, 0x05),
+ _INIT_DCS_CMD(0xD4, 0x06),
+ _INIT_DCS_CMD(0xD5, 0x06),
+ _INIT_DCS_CMD(0xCA, 0x07),
+ _INIT_DCS_CMD(0xCB, 0x07),
+ _INIT_DCS_CMD(0xDE, 0x08),
+ _INIT_DCS_CMD(0xDF, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x02),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xC1, 0x0D),
+ _INIT_DCS_CMD(0xC2, 0x17),
+ _INIT_DCS_CMD(0xC3, 0x26),
+ _INIT_DCS_CMD(0xC4, 0x31),
+ _INIT_DCS_CMD(0xC5, 0x1C),
+ _INIT_DCS_CMD(0xC6, 0x2C),
+ _INIT_DCS_CMD(0xC7, 0x33),
+ _INIT_DCS_CMD(0xC8, 0x31),
+ _INIT_DCS_CMD(0xC9, 0x37),
+ _INIT_DCS_CMD(0xCA, 0x37),
+ _INIT_DCS_CMD(0xCB, 0x37),
+ _INIT_DCS_CMD(0xCC, 0x39),
+ _INIT_DCS_CMD(0xCD, 0x2E),
+ _INIT_DCS_CMD(0xCE, 0x2F),
+ _INIT_DCS_CMD(0xCF, 0x2F),
+ _INIT_DCS_CMD(0xD0, 0x07),
+ _INIT_DCS_CMD(0xD2, 0x00),
+ _INIT_DCS_CMD(0xD3, 0x0D),
+ _INIT_DCS_CMD(0xD4, 0x17),
+ _INIT_DCS_CMD(0xD5, 0x26),
+ _INIT_DCS_CMD(0xD6, 0x31),
+ _INIT_DCS_CMD(0xD7, 0x3F),
+ _INIT_DCS_CMD(0xD8, 0x3F),
+ _INIT_DCS_CMD(0xD9, 0x3F),
+ _INIT_DCS_CMD(0xDA, 0x3F),
+ _INIT_DCS_CMD(0xDB, 0x37),
+ _INIT_DCS_CMD(0xDC, 0x37),
+ _INIT_DCS_CMD(0xDD, 0x37),
+ _INIT_DCS_CMD(0xDE, 0x39),
+ _INIT_DCS_CMD(0xDF, 0x2E),
+ _INIT_DCS_CMD(0xE0, 0x2F),
+ _INIT_DCS_CMD(0xE1, 0x2F),
+ _INIT_DCS_CMD(0xE2, 0x07),
+ _INIT_DCS_CMD(0xB0, 0x03),
+ _INIT_DCS_CMD(0xC8, 0x0B),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xC3, 0x00),
+ _INIT_DCS_CMD(0xE7, 0x00),
+ _INIT_DCS_CMD(0xC5, 0x2A),
+ _INIT_DCS_CMD(0xDE, 0x2A),
+ _INIT_DCS_CMD(0xCA, 0x43),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xE4, 0xC0),
+ _INIT_DCS_CMD(0xE5, 0x0D),
+ _INIT_DCS_CMD(0xCB, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x06),
+ _INIT_DCS_CMD(0xB8, 0xA5),
+ _INIT_DCS_CMD(0xC0, 0xA5),
+ _INIT_DCS_CMD(0xC7, 0x0F),
+ _INIT_DCS_CMD(0xD5, 0x32),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xBC, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x07),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x08),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x09),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0A),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0B),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0C),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x68),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static const struct panel_init_cmd auo_kd101n80_45na_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(120),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(120),
+ {},
+};
+
+static const struct panel_init_cmd auo_b101uan08_3_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xC0, 0x48),
+ _INIT_DCS_CMD(0xC1, 0x48),
+ _INIT_DCS_CMD(0xC2, 0x47),
+ _INIT_DCS_CMD(0xC3, 0x47),
+ _INIT_DCS_CMD(0xC4, 0x46),
+ _INIT_DCS_CMD(0xC5, 0x46),
+ _INIT_DCS_CMD(0xC6, 0x45),
+ _INIT_DCS_CMD(0xC7, 0x45),
+ _INIT_DCS_CMD(0xC8, 0x64),
+ _INIT_DCS_CMD(0xC9, 0x64),
+ _INIT_DCS_CMD(0xCA, 0x4F),
+ _INIT_DCS_CMD(0xCB, 0x4F),
+ _INIT_DCS_CMD(0xCC, 0x40),
+ _INIT_DCS_CMD(0xCD, 0x40),
+ _INIT_DCS_CMD(0xCE, 0x66),
+ _INIT_DCS_CMD(0xCF, 0x66),
+ _INIT_DCS_CMD(0xD0, 0x4F),
+ _INIT_DCS_CMD(0xD1, 0x4F),
+ _INIT_DCS_CMD(0xD2, 0x41),
+ _INIT_DCS_CMD(0xD3, 0x41),
+ _INIT_DCS_CMD(0xD4, 0x48),
+ _INIT_DCS_CMD(0xD5, 0x48),
+ _INIT_DCS_CMD(0xD6, 0x47),
+ _INIT_DCS_CMD(0xD7, 0x47),
+ _INIT_DCS_CMD(0xD8, 0x46),
+ _INIT_DCS_CMD(0xD9, 0x46),
+ _INIT_DCS_CMD(0xDA, 0x45),
+ _INIT_DCS_CMD(0xDB, 0x45),
+ _INIT_DCS_CMD(0xDC, 0x64),
+ _INIT_DCS_CMD(0xDD, 0x64),
+ _INIT_DCS_CMD(0xDE, 0x4F),
+ _INIT_DCS_CMD(0xDF, 0x4F),
+ _INIT_DCS_CMD(0xE0, 0x40),
+ _INIT_DCS_CMD(0xE1, 0x40),
+ _INIT_DCS_CMD(0xE2, 0x66),
+ _INIT_DCS_CMD(0xE3, 0x66),
+ _INIT_DCS_CMD(0xE4, 0x4F),
+ _INIT_DCS_CMD(0xE5, 0x4F),
+ _INIT_DCS_CMD(0xE6, 0x41),
+ _INIT_DCS_CMD(0xE7, 0x41),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static inline struct boe_panel *to_boe_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_panel, base);
+}
+
+static int boe_panel_init_dcs_cmd(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ struct drm_panel *panel = &boe->base;
+ int i, err = 0;
+
+ if (boe->desc->init_cmds) {
+ const struct panel_init_cmd *init_cmds = boe->desc->init_cmds;
+
+ for (i = 0; init_cmds[i].len != 0; i++) {
+ const struct panel_init_cmd *cmd = &init_cmds[i];
+
+ switch (cmd->type) {
+ case DELAY_CMD:
+ msleep(cmd->data[0]);
+ err = 0;
+ break;
+
+ case INIT_DCS_CMD:
+ err = mipi_dsi_dcs_write(dsi, cmd->data[0],
+ cmd->len <= 1 ? NULL :
+ &cmd->data[1],
+ cmd->len - 1);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to write command %u\n", i);
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (!boe->prepared)
+ return 0;
+
+ ret = boe_panel_enter_sleep_mode(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+ return ret;
+ }
+
+ msleep(150);
+
+ if (boe->desc->discharge_on_disable) {
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ } else {
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(500, 1000);
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ }
+
+ boe->prepared = false;
+
+ return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (boe->prepared)
+ return 0;
+
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 1500);
+
+ ret = regulator_enable(boe->pp1800);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 5000);
+
+ ret = regulator_enable(boe->avdd);
+ if (ret < 0)
+ goto poweroff1v8;
+ ret = regulator_enable(boe->avee);
+ if (ret < 0)
+ goto poweroffavdd;
+
+ usleep_range(5000, 10000);
+
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(6000, 10000);
+
+ ret = boe_panel_init_dcs_cmd(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to init panel: %d\n", ret);
+ goto poweroff;
+ }
+
+ boe->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(boe->avee);
+poweroffavdd:
+ regulator_disable(boe->avdd);
+poweroff1v8:
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ return ret;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+ msleep(130);
+ return 0;
+}
+
+static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
+ .clock = 159425,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 100,
+ .hsync_end = 1200 + 100 + 40,
+ .htotal = 1200 + 100 + 40 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc boe_tv101wum_nl6_desc = {
+ .modes = &boe_tv101wum_nl6_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+ .discharge_on_disable = false,
+};
+
+static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
+ .clock = 157000,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 36,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 4,
+ .vtotal = 1920 + 16 + 4 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_kd101n80_45na_desc = {
+ .modes = &auo_kd101n80_45na_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_kd101n80_45na_init_cmd,
+ .discharge_on_disable = true,
+};
+
+static const struct drm_display_mode boe_tv101wum_n53_default_mode = {
+ .clock = 159916,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 20,
+ .vsync_end = 1920 + 20 + 4,
+ .vtotal = 1920 + 20 + 4 + 10,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv101wum_n53_desc = {
+ .modes = &boe_tv101wum_n53_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+};
+
+static const struct drm_display_mode auo_b101uan08_3_default_mode = {
+ .clock = 159667,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 4,
+ .htotal = 1200 + 60 + 4 + 80,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 34,
+ .vsync_end = 1920 + 34 + 2,
+ .vtotal = 1920 + 34 + 2 + 24,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc auo_b101uan08_3_desc = {
+ .modes = &auo_b101uan08_3_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_b101uan08_3_init_cmd,
+};
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ const struct drm_display_mode *m = boe->desc->modes;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = boe->desc->size.width_mm;
+ connector->display_info.height_mm = boe->desc->size.height_mm;
+ connector->display_info.bpc = boe->desc->bpc;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs boe_panel_funcs = {
+ .unprepare = boe_panel_unprepare,
+ .prepare = boe_panel_prepare,
+ .enable = boe_panel_enable,
+ .get_modes = boe_panel_get_modes,
+};
+
+static int boe_panel_add(struct boe_panel *boe)
+{
+ struct device *dev = &boe->dsi->dev;
+ int err;
+
+ boe->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(boe->avdd))
+ return PTR_ERR(boe->avdd);
+
+ boe->avee = devm_regulator_get(dev, "avee");
+ if (IS_ERR(boe->avee))
+ return PTR_ERR(boe->avee);
+
+ boe->pp1800 = devm_regulator_get(dev, "pp1800");
+ if (IS_ERR(boe->pp1800))
+ return PTR_ERR(boe->pp1800);
+
+ boe->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(boe->enable_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(boe->enable_gpio));
+ return PTR_ERR(boe->enable_gpio);
+ }
+
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ drm_panel_init(&boe->base, dev, &boe_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&boe->base);
+ if (err)
+ return err;
+
+ boe->base.funcs = &boe_panel_funcs;
+ boe->base.dev = &boe->dsi->dev;
+
+ return drm_panel_add(&boe->base);
+}
+
+static int boe_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe;
+ int ret;
+ const struct panel_desc *desc;
+
+ boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
+ if (!boe)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->lanes = desc->lanes;
+ dsi->format = desc->format;
+ dsi->mode_flags = desc->mode_flags;
+ boe->desc = desc;
+ boe->dsi = dsi;
+ ret = boe_panel_add(boe);
+ if (ret < 0)
+ return ret;
+
+ mipi_dsi_set_drvdata(dsi, boe);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ drm_panel_remove(&boe->base);
+
+ return ret;
+}
+
+static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&boe->base);
+ drm_panel_unprepare(&boe->base);
+}
+
+static int boe_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ boe_panel_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ if (boe->base.dev)
+ drm_panel_remove(&boe->base);
+
+ return 0;
+}
+
+static const struct of_device_id boe_of_match[] = {
+ { .compatible = "boe,tv101wum-nl6",
+ .data = &boe_tv101wum_nl6_desc
+ },
+ { .compatible = "auo,kd101n80-45na",
+ .data = &auo_kd101n80_45na_desc
+ },
+ { .compatible = "boe,tv101wum-n53",
+ .data = &boe_tv101wum_n53_desc
+ },
+ { .compatible = "auo,b101uan08.3",
+ .data = &auo_b101uan08_3_desc
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_of_match);
+
+static struct mipi_dsi_driver boe_panel_driver = {
+ .driver = {
+ .name = "panel-boe-tv101wum-nl6",
+ .of_match_table = boe_of_match,
+ },
+ .probe = boe_panel_probe,
+ .remove = boe_panel_remove,
+ .shutdown = boe_panel_shutdown,
+};
+module_mipi_dsi_driver(boe_panel_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_DESCRIPTION("BOE tv101wum-nl6 1200x1920 video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
new file mode 100644
index 000000000000..711ded453c44
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Elida kd35t133 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define KD35T133_CMD_INTERFACEMODECTRL 0xb0
+#define KD35T133_CMD_FRAMERATECTRL 0xb1
+#define KD35T133_CMD_DISPLAYINVERSIONCTRL 0xb4
+#define KD35T133_CMD_DISPLAYFUNCTIONCTRL 0xb6
+#define KD35T133_CMD_POWERCONTROL1 0xc0
+#define KD35T133_CMD_POWERCONTROL2 0xc1
+#define KD35T133_CMD_VCOMCONTROL 0xc5
+#define KD35T133_CMD_POSITIVEGAMMA 0xe0
+#define KD35T133_CMD_NEGATIVEGAMMA 0xe1
+#define KD35T133_CMD_SETIMAGEFUNCTION 0xe9
+#define KD35T133_CMD_ADJUSTCONTROL3 0xf7
+
+struct kd35t133 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vdd;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
+{
+ return container_of(panel, struct kd35t133, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int kd35t133_init_sequence(struct kd35t133 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+
+ /*
+ * Init sequence was supplied by the panel vendor with minimal
+ * documentation.
+ */
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+ 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+ 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+ 0x20, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
+ 0xa9, 0x51, 0x2c, 0x82);
+ mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int kd35t133_unprepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vdd);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int kd35t133_prepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vdd);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vdd supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ msleep(20);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(250);
+
+ ret = kd35t133_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vdd:
+ regulator_disable(ctx->vdd);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 320,
+ .hsync_start = 320 + 130,
+ .hsync_end = 320 + 130 + 4,
+ .htotal = 320 + 130 + 4 + 130,
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 1,
+ .vtotal = 480 + 2 + 1 + 2,
+ .vrefresh = 60,
+ .clock = 17000,
+ .width_mm = 42,
+ .height_mm = 82,
+};
+
+static int kd35t133_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs kd35t133_funcs = {
+ .unprepare = kd35t133_unprepare,
+ .prepare = kd35t133_prepare,
+ .get_modes = kd35t133_get_modes,
+};
+
+static int kd35t133_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct kd35t133 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(ctx->vdd)) {
+ ret = PTR_ERR(ctx->vdd);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vdd regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int kd35t133_remove(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ kd35t133_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id kd35t133_of_match[] = {
+ { .compatible = "elida,kd35t133" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kd35t133_of_match);
+
+static struct mipi_dsi_driver kd35t133_driver = {
+ .driver = {
+ .name = "panel-elida-kd35t133",
+ .of_match_table = kd35t133_of_match,
+ },
+ .probe = kd35t133_probe,
+ .remove = kd35t133_remove,
+ .shutdown = kd35t133_shutdown,
+};
+module_mipi_dsi_driver(kd35t133_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Elida kd35t133 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
new file mode 100644
index 000000000000..fddbfddf6566
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2020 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define K101_IM2BA02_INIT_CMD_LEN 2
+
+static const char * const regulator_names[] = {
+ "dvdd",
+ "avdd",
+ "cvdd"
+};
+
+struct k101_im2ba02 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+ struct gpio_desc *reset;
+};
+
+static inline struct k101_im2ba02 *panel_to_k101_im2ba02(struct drm_panel *panel)
+{
+ return container_of(panel, struct k101_im2ba02, panel);
+}
+
+struct k101_im2ba02_init_cmd {
+ u8 data[K101_IM2BA02_INIT_CMD_LEN];
+};
+
+static const struct k101_im2ba02_init_cmd k101_im2ba02_init_cmds[] = {
+ /* Switch to page 0 */
+ { .data = { 0xE0, 0x00 } },
+
+ /* Seems to be some password */
+ { .data = { 0xE1, 0x93} },
+ { .data = { 0xE2, 0x65 } },
+ { .data = { 0xE3, 0xF8 } },
+
+ /* Lane number, 0x02 - 3 lanes, 0x03 - 4 lanes */
+ { .data = { 0x80, 0x03 } },
+
+ /* Sequence control */
+ { .data = { 0x70, 0x02 } },
+ { .data = { 0x71, 0x23 } },
+ { .data = { 0x72, 0x06 } },
+
+ /* Switch to page 1 */
+ { .data = { 0xE0, 0x01 } },
+
+ /* Set VCOM */
+ { .data = { 0x00, 0x00 } },
+ { .data = { 0x01, 0x66 } },
+ /* Set VCOM_Reverse */
+ { .data = { 0x03, 0x00 } },
+ { .data = { 0x04, 0x25 } },
+
+ /* Set Gamma Power, VG[MS][PN] */
+ { .data = { 0x17, 0x00 } },
+ { .data = { 0x18, 0x6D } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x00 } },
+ { .data = { 0x1B, 0xBF } }, /* VGMN = -4.5V */
+ { .data = { 0x1C, 0x00 } },
+
+ /* Set Gate Power */
+ { .data = { 0x1F, 0x3E } }, /* VGH_R = 15V */
+ { .data = { 0x20, 0x28 } }, /* VGL_R = -11V */
+ { .data = { 0x21, 0x28 } }, /* VGL_R2 = -11V */
+ { .data = { 0x22, 0x0E } }, /* PA[6:4] = 0, PA[0] = 0 */
+
+ /* Set Panel */
+ { .data = { 0x37, 0x09 } }, /* SS = 1, BGR = 1 */
+
+ /* Set RGBCYC */
+ { .data = { 0x38, 0x04 } }, /* JDT = 100 column inversion */
+ { .data = { 0x39, 0x08 } }, /* RGB_N_EQ1 */
+ { .data = { 0x3A, 0x12 } }, /* RGB_N_EQ2 */
+ { .data = { 0x3C, 0x78 } }, /* set EQ3 for TE_H */
+ { .data = { 0x3D, 0xFF } }, /* set CHGEN_ON */
+ { .data = { 0x3E, 0xFF } }, /* set CHGEN_OFF */
+ { .data = { 0x3F, 0x7F } }, /* set CHGEN_OFF2 */
+
+ /* Set TCON parameter */
+ { .data = { 0x40, 0x06 } }, /* RSO = 800 points */
+ { .data = { 0x41, 0xA0 } }, /* LN = 1280 lines */
+
+ /* Set power voltage */
+ { .data = { 0x55, 0x0F } }, /* DCDCM */
+ { .data = { 0x56, 0x01 } },
+ { .data = { 0x57, 0x69 } },
+ { .data = { 0x58, 0x0A } },
+ { .data = { 0x59, 0x0A } },
+ { .data = { 0x5A, 0x45 } },
+ { .data = { 0x5B, 0x15 } },
+
+ /* Set gamma */
+ { .data = { 0x5D, 0x7C } },
+ { .data = { 0x5E, 0x65 } },
+ { .data = { 0x5F, 0x55 } },
+ { .data = { 0x60, 0x49 } },
+ { .data = { 0x61, 0x44 } },
+ { .data = { 0x62, 0x35 } },
+ { .data = { 0x63, 0x3A } },
+ { .data = { 0x64, 0x23 } },
+ { .data = { 0x65, 0x3D } },
+ { .data = { 0x66, 0x3C } },
+ { .data = { 0x67, 0x3D } },
+ { .data = { 0x68, 0x5D } },
+ { .data = { 0x69, 0x4D } },
+ { .data = { 0x6A, 0x56 } },
+ { .data = { 0x6B, 0x48 } },
+ { .data = { 0x6C, 0x45 } },
+ { .data = { 0x6D, 0x38 } },
+ { .data = { 0x6E, 0x25 } },
+ { .data = { 0x6F, 0x00 } },
+ { .data = { 0x70, 0x7C } },
+ { .data = { 0x71, 0x65 } },
+ { .data = { 0x72, 0x55 } },
+ { .data = { 0x73, 0x49 } },
+ { .data = { 0x74, 0x44 } },
+ { .data = { 0x75, 0x35 } },
+ { .data = { 0x76, 0x3A } },
+ { .data = { 0x77, 0x23 } },
+ { .data = { 0x78, 0x3D } },
+ { .data = { 0x79, 0x3C } },
+ { .data = { 0x7A, 0x3D } },
+ { .data = { 0x7B, 0x5D } },
+ { .data = { 0x7C, 0x4D } },
+ { .data = { 0x7D, 0x56 } },
+ { .data = { 0x7E, 0x48 } },
+ { .data = { 0x7F, 0x45 } },
+ { .data = { 0x80, 0x38 } },
+ { .data = { 0x81, 0x25 } },
+ { .data = { 0x82, 0x00 } },
+
+ /* Switch to page 2, for GIP */
+ { .data = { 0xE0, 0x02 } },
+
+ { .data = { 0x00, 0x1E } },
+ { .data = { 0x01, 0x1E } },
+ { .data = { 0x02, 0x41 } },
+ { .data = { 0x03, 0x41 } },
+ { .data = { 0x04, 0x43 } },
+ { .data = { 0x05, 0x43 } },
+ { .data = { 0x06, 0x1F } },
+ { .data = { 0x07, 0x1F } },
+ { .data = { 0x08, 0x1F } },
+ { .data = { 0x09, 0x1F } },
+ { .data = { 0x0A, 0x1E } },
+ { .data = { 0x0B, 0x1E } },
+ { .data = { 0x0C, 0x1F } },
+ { .data = { 0x0D, 0x47 } },
+ { .data = { 0x0E, 0x47 } },
+ { .data = { 0x0F, 0x45 } },
+ { .data = { 0x10, 0x45 } },
+ { .data = { 0x11, 0x4B } },
+ { .data = { 0x12, 0x4B } },
+ { .data = { 0x13, 0x49 } },
+ { .data = { 0x14, 0x49 } },
+ { .data = { 0x15, 0x1F } },
+
+ { .data = { 0x16, 0x1E } },
+ { .data = { 0x17, 0x1E } },
+ { .data = { 0x18, 0x40 } },
+ { .data = { 0x19, 0x40 } },
+ { .data = { 0x1A, 0x42 } },
+ { .data = { 0x1B, 0x42 } },
+ { .data = { 0x1C, 0x1F } },
+ { .data = { 0x1D, 0x1F } },
+ { .data = { 0x1E, 0x1F } },
+ { .data = { 0x1F, 0x1f } },
+ { .data = { 0x20, 0x1E } },
+ { .data = { 0x21, 0x1E } },
+ { .data = { 0x22, 0x1f } },
+ { .data = { 0x23, 0x46 } },
+ { .data = { 0x24, 0x46 } },
+ { .data = { 0x25, 0x44 } },
+ { .data = { 0x26, 0x44 } },
+ { .data = { 0x27, 0x4A } },
+ { .data = { 0x28, 0x4A } },
+ { .data = { 0x29, 0x48 } },
+ { .data = { 0x2A, 0x48 } },
+ { .data = { 0x2B, 0x1f } },
+
+ { .data = { 0x2C, 0x1F } },
+ { .data = { 0x2D, 0x1F } },
+ { .data = { 0x2E, 0x42 } },
+ { .data = { 0x2F, 0x42 } },
+ { .data = { 0x30, 0x40 } },
+ { .data = { 0x31, 0x40 } },
+ { .data = { 0x32, 0x1E } },
+ { .data = { 0x33, 0x1E } },
+ { .data = { 0x34, 0x1F } },
+ { .data = { 0x35, 0x1F } },
+ { .data = { 0x36, 0x1E } },
+ { .data = { 0x37, 0x1E } },
+ { .data = { 0x38, 0x1F } },
+ { .data = { 0x39, 0x48 } },
+ { .data = { 0x3A, 0x48 } },
+ { .data = { 0x3B, 0x4A } },
+ { .data = { 0x3C, 0x4A } },
+ { .data = { 0x3D, 0x44 } },
+ { .data = { 0x3E, 0x44 } },
+ { .data = { 0x3F, 0x46 } },
+ { .data = { 0x40, 0x46 } },
+ { .data = { 0x41, 0x1F } },
+
+ { .data = { 0x42, 0x1F } },
+ { .data = { 0x43, 0x1F } },
+ { .data = { 0x44, 0x43 } },
+ { .data = { 0x45, 0x43 } },
+ { .data = { 0x46, 0x41 } },
+ { .data = { 0x47, 0x41 } },
+ { .data = { 0x48, 0x1E } },
+ { .data = { 0x49, 0x1E } },
+ { .data = { 0x4A, 0x1E } },
+ { .data = { 0x4B, 0x1F } },
+ { .data = { 0x4C, 0x1E } },
+ { .data = { 0x4D, 0x1E } },
+ { .data = { 0x4E, 0x1F } },
+ { .data = { 0x4F, 0x49 } },
+ { .data = { 0x50, 0x49 } },
+ { .data = { 0x51, 0x4B } },
+ { .data = { 0x52, 0x4B } },
+ { .data = { 0x53, 0x45 } },
+ { .data = { 0x54, 0x45 } },
+ { .data = { 0x55, 0x47 } },
+ { .data = { 0x56, 0x47 } },
+ { .data = { 0x57, 0x1F } },
+
+ { .data = { 0x58, 0x10 } },
+ { .data = { 0x59, 0x00 } },
+ { .data = { 0x5A, 0x00 } },
+ { .data = { 0x5B, 0x30 } },
+ { .data = { 0x5C, 0x02 } },
+ { .data = { 0x5D, 0x40 } },
+ { .data = { 0x5E, 0x01 } },
+ { .data = { 0x5F, 0x02 } },
+ { .data = { 0x60, 0x30 } },
+ { .data = { 0x61, 0x01 } },
+ { .data = { 0x62, 0x02 } },
+ { .data = { 0x63, 0x6A } },
+ { .data = { 0x64, 0x6A } },
+ { .data = { 0x65, 0x05 } },
+ { .data = { 0x66, 0x12 } },
+ { .data = { 0x67, 0x74 } },
+ { .data = { 0x68, 0x04 } },
+ { .data = { 0x69, 0x6A } },
+ { .data = { 0x6A, 0x6A } },
+ { .data = { 0x6B, 0x08 } },
+
+ { .data = { 0x6C, 0x00 } },
+ { .data = { 0x6D, 0x04 } },
+ { .data = { 0x6E, 0x04 } },
+ { .data = { 0x6F, 0x88 } },
+ { .data = { 0x70, 0x00 } },
+ { .data = { 0x71, 0x00 } },
+ { .data = { 0x72, 0x06 } },
+ { .data = { 0x73, 0x7B } },
+ { .data = { 0x74, 0x00 } },
+ { .data = { 0x75, 0x07 } },
+ { .data = { 0x76, 0x00 } },
+ { .data = { 0x77, 0x5D } },
+ { .data = { 0x78, 0x17 } },
+ { .data = { 0x79, 0x1F } },
+ { .data = { 0x7A, 0x00 } },
+ { .data = { 0x7B, 0x00 } },
+ { .data = { 0x7C, 0x00 } },
+ { .data = { 0x7D, 0x03 } },
+ { .data = { 0x7E, 0x7B } },
+
+ { .data = { 0xE0, 0x04 } },
+ { .data = { 0x2B, 0x2B } },
+ { .data = { 0x2E, 0x44 } },
+
+ { .data = { 0xE0, 0x01 } },
+ { .data = { 0x0E, 0x01 } },
+
+ { .data = { 0xE0, 0x03 } },
+ { .data = { 0x98, 0x2F } },
+
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE6, 0x02 } },
+ { .data = { 0xE7, 0x02 } },
+
+ { .data = { 0x11, 0x00 } },
+};
+
+static const struct k101_im2ba02_init_cmd timed_cmds[] = {
+ { .data = { 0x29, 0x00 } },
+ { .data = { 0x35, 0x00 } },
+};
+
+static int k101_im2ba02_prepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ unsigned int i;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+
+ msleep(30);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(200);
+
+ for (i = 0; i < ARRAY_SIZE(k101_im2ba02_init_cmds); i++) {
+ const struct k101_im2ba02_init_cmd *cmd = &k101_im2ba02_init_cmds[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+ if (ret < 0)
+ goto powerdown;
+ }
+
+ return 0;
+
+powerdown:
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int k101_im2ba02_enable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ const struct k101_im2ba02_init_cmd *cmd = &timed_cmds[1];
+ int ret;
+
+ msleep(150);
+
+ ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
+ if (ret < 0)
+ return ret;
+
+ msleep(50);
+
+ return mipi_dsi_dcs_write_buffer(ctx->dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+}
+
+static int k101_im2ba02_disable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int k101_im2ba02_unprepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+
+ msleep(200);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(20);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static const struct drm_display_mode k101_im2ba02_default_mode = {
+ .clock = 70000,
+ .vrefresh = 60,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 20,
+ .htotal = 800 + 20 + 20 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 4,
+ .vtotal = 1280 + 16 + 4 + 4,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .width_mm = 136,
+ .height_mm = 217,
+};
+
+static int k101_im2ba02_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ k101_im2ba02_default_mode.hdisplay,
+ k101_im2ba02_default_mode.vdisplay,
+ k101_im2ba02_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs k101_im2ba02_funcs = {
+ .disable = k101_im2ba02_disable,
+ .unprepare = k101_im2ba02_unprepare,
+ .prepare = k101_im2ba02_prepare,
+ .enable = k101_im2ba02_enable,
+ .get_modes = k101_im2ba02_get_modes,
+};
+
+static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx;
+ unsigned int i;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get regulators\n");
+ return ret;
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id k101_im2ba02_of_match[] = {
+ { .compatible = "feixin,k101-im2ba02", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, k101_im2ba02_of_match);
+
+static struct mipi_dsi_driver k101_im2ba02_driver = {
+ .probe = k101_im2ba02_dsi_probe,
+ .remove = k101_im2ba02_dsi_remove,
+ .driver = {
+ .name = "feixin-k101-im2ba02",
+ .of_match_table = k101_im2ba02_of_match,
+ },
+};
+module_mipi_dsi_driver(k101_im2ba02_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_DESCRIPTION("Feixin K101 IM2BA02 MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index f394d53a7da4..09935520e606 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -540,7 +540,7 @@ static int ili9322_enable(struct drm_panel *panel)
/* Serial RGB modes */
static const struct drm_display_mode srgb_320x240_mode = {
- .clock = 2453500,
+ .clock = 24535,
.hdisplay = 320,
.hsync_start = 320 + 359,
.hsync_end = 320 + 359 + 1,
@@ -554,7 +554,7 @@ static const struct drm_display_mode srgb_320x240_mode = {
};
static const struct drm_display_mode srgb_360x240_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 360,
.hsync_start = 360 + 35,
.hsync_end = 360 + 35 + 1,
@@ -569,7 +569,7 @@ static const struct drm_display_mode srgb_360x240_mode = {
/* This is the only mode listed for parallel RGB in the datasheet */
static const struct drm_display_mode prgb_320x240_mode = {
- .clock = 6400000,
+ .clock = 64000,
.hdisplay = 320,
.hsync_start = 320 + 38,
.hsync_end = 320 + 38 + 1,
@@ -584,7 +584,7 @@ static const struct drm_display_mode prgb_320x240_mode = {
/* YUV modes */
static const struct drm_display_mode yuv_640x320_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 252,
.hsync_end = 640 + 252 + 1,
@@ -598,7 +598,7 @@ static const struct drm_display_mode yuv_640x320_mode = {
};
static const struct drm_display_mode yuv_720x360_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 252,
.hsync_end = 720 + 252 + 1,
@@ -613,7 +613,7 @@ static const struct drm_display_mode yuv_720x360_mode = {
/* BT.656 VGA mode, 640x480 */
static const struct drm_display_mode itu_r_bt_656_640_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 3,
.hsync_end = 640 + 3 + 1,
@@ -628,7 +628,7 @@ static const struct drm_display_mode itu_r_bt_656_640_mode = {
/* BT.656 D1 mode 720x480 */
static const struct drm_display_mode itu_r_bt_656_720_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 3,
.hsync_end = 720 + 3 + 1,
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index b262b53dbd85..5907f2503755 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -197,7 +197,7 @@ static int lg4573_enable(struct drm_panel *panel)
}
static const struct drm_display_mode default_mode = {
- .clock = 27000,
+ .clock = 28341,
.hdisplay = 480,
.hsync_start = 480 + 10,
.hsync_end = 480 + 10 + 59,
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
new file mode 100644
index 000000000000..4a8fa908a2cf
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Novatek NT35510 panel driver
+ * Copyright (C) 2020 Linus Walleij <linus.walleij@linaro.org>
+ * Based on code by Robert Teather (C) 2012 Samsung
+ *
+ * This display driver (and I refer to the physical component NT35510,
+ * not this Linux kernel software driver) can handle:
+ * 480x864, 480x854, 480x800, 480x720 and 480x640 pixel displays.
+ * It has 480x840x24bit SRAM embedded for storing a frame.
+ * When powered on the display is by default in 480x800 mode.
+ *
+ * The actual panels using this component have different names, but
+ * the code needed to set up and configure the panel will be similar,
+ * so they should all use the NT35510 driver with appropriate configuration
+ * per-panel, e.g. for physical size.
+ *
+ * This driver is for the DSI interface to panels using the NT35510.
+ *
+ * The NT35510 can also use an RGB (DPI) interface combined with an
+ * I2C or SPI interface for setting up the NT35510. If this is needed
+ * this panel driver should be refactored to also support that use
+ * case.
+ */
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
+#define MCS_CMD_READ_ID1 0xDA
+#define MCS_CMD_READ_ID2 0xDB
+#define MCS_CMD_READ_ID3 0xDC
+#define MCS_CMD_MTP_READ_SETTING 0xF8 /* Uncertain about name */
+#define MCS_CMD_MTP_READ_PARAM 0xFF /* Uncertain about name */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 0.
+ */
+#define NT35510_P0_DOPCTR 0xB1
+#define NT35510_P0_SDHDTCTR 0xB6
+#define NT35510_P0_GSEQCTR 0xB7
+#define NT35510_P0_SDEQCTR 0xB8
+#define NT35510_P0_SDVPCTR 0xBA
+#define NT35510_P0_DPFRCTR1 0xBD
+#define NT35510_P0_DPFRCTR2 0xBE
+#define NT35510_P0_DPFRCTR3 0xBF
+#define NT35510_P0_DPMCTR12 0xCC
+
+#define NT35510_P0_DOPCTR_LEN 2
+#define NT35510_P0_GSEQCTR_LEN 2
+#define NT35510_P0_SDEQCTR_LEN 4
+#define NT35510_P0_SDVPCTR_LEN 1
+#define NT35510_P0_DPFRCTR1_LEN 5
+#define NT35510_P0_DPFRCTR2_LEN 5
+#define NT35510_P0_DPFRCTR3_LEN 5
+#define NT35510_P0_DPMCTR12_LEN 3
+
+#define NT35510_DOPCTR_0_RAMKP BIT(7) /* Contents kept in sleep */
+#define NT35510_DOPCTR_0_DSITE BIT(6) /* Enable TE signal */
+#define NT35510_DOPCTR_0_DSIG BIT(5) /* Enable generic read/write */
+#define NT35510_DOPCTR_0_DSIM BIT(4) /* Enable video mode on DSI */
+#define NT35510_DOPCTR_0_EOTP BIT(3) /* Support EoTP */
+#define NT35510_DOPCTR_0_N565 BIT(2) /* RGB or BGR pixel format */
+#define NT35510_DOPCTR_1_TW_PWR_SEL BIT(4) /* TE power selector */
+#define NT35510_DOPCTR_1_CRGB BIT(3) /* RGB or BGR byte order */
+#define NT35510_DOPCTR_1_CTB BIT(2) /* Vertical scanning direction */
+#define NT35510_DOPCTR_1_CRL BIT(1) /* Source driver data shift */
+#define NT35510_P0_SDVPCTR_PRG BIT(2) /* 0 = normal operation, 1 = VGLO */
+#define NT35510_P0_SDVPCTR_AVDD 0 /* source driver output = AVDD */
+#define NT35510_P0_SDVPCTR_OFFCOL 1 /* source driver output = off color */
+#define NT35510_P0_SDVPCTR_AVSS 2 /* source driver output = AVSS */
+#define NT35510_P0_SDVPCTR_HI_Z 3 /* source driver output = High impedance */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 1.
+ */
+#define NT35510_P1_SETAVDD 0xB0
+#define NT35510_P1_SETAVEE 0xB1
+#define NT35510_P1_SETVCL 0xB2
+#define NT35510_P1_SETVGH 0xB3
+#define NT35510_P1_SETVRGH 0xB4
+#define NT35510_P1_SETVGL 0xB5
+#define NT35510_P1_BT1CTR 0xB6
+#define NT35510_P1_BT2CTR 0xB7
+#define NT35510_P1_BT3CTR 0xB8
+#define NT35510_P1_BT4CTR 0xB9 /* VGH boosting times/freq */
+#define NT35510_P1_BT5CTR 0xBA
+#define NT35510_P1_PFMCTR 0xBB
+#define NT35510_P1_SETVGP 0xBC
+#define NT35510_P1_SETVGN 0xBD
+#define NT35510_P1_SETVCMOFF 0xBE
+#define NT35510_P1_VGHCTR 0xBF /* VGH output ctrl */
+#define NT35510_P1_SET_GAMMA_RED_POS 0xD1
+#define NT35510_P1_SET_GAMMA_GREEN_POS 0xD2
+#define NT35510_P1_SET_GAMMA_BLUE_POS 0xD3
+#define NT35510_P1_SET_GAMMA_RED_NEG 0xD4
+#define NT35510_P1_SET_GAMMA_GREEN_NEG 0xD5
+#define NT35510_P1_SET_GAMMA_BLUE_NEG 0xD6
+
+/* AVDD and AVEE setting 3 bytes */
+#define NT35510_P1_AVDD_LEN 3
+#define NT35510_P1_AVEE_LEN 3
+#define NT35510_P1_VGH_LEN 3
+#define NT35510_P1_VGL_LEN 3
+#define NT35510_P1_VGP_LEN 3
+#define NT35510_P1_VGN_LEN 3
+/* BT1CTR thru BT5CTR setting 3 bytes */
+#define NT35510_P1_BT1CTR_LEN 3
+#define NT35510_P1_BT2CTR_LEN 3
+#define NT35510_P1_BT4CTR_LEN 3
+#define NT35510_P1_BT5CTR_LEN 3
+/* 52 gamma parameters times two per color: positive and negative */
+#define NT35510_P1_GAMMA_LEN 52
+
+/**
+ * struct nt35510_config - the display-specific NT35510 configuration
+ *
+ * Some of the settings provide an array of bytes, A, B C which mean:
+ * A = normal / idle off mode
+ * B = idle on mode
+ * C = partial / idle off mode
+ *
+ * Gamma correction arrays are 10bit numbers, two consecutive bytes
+ * makes out one point on the gamma correction curve. The points are
+ * not linearly placed along the X axis, we get points 0, 1, 3, 5
+ * 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160, 192, 208, 224, 232,
+ * 240, 244, 248, 250, 252, 254, 255. The voltages tuples form
+ * V0, V1, V3 ... V255, with 0x0000 being the lowest voltage and
+ * 0x03FF being the highest voltage.
+ *
+ * Each value must be strictly higher than the previous value forming
+ * a rising curve like this:
+ *
+ * ^
+ * | V255
+ * | V254
+ * | ....
+ * | V5
+ * | V3
+ * | V1
+ * | V0
+ * +------------------------------------------->
+ *
+ * The details about all settings can be found in the NT35510 Application
+ * Note.
+ */
+struct nt35510_config {
+ /**
+ * @width_mm: physical panel width [mm]
+ */
+ u32 width_mm;
+ /**
+ * @height_mm: physical panel height [mm]
+ */
+ u32 height_mm;
+ /**
+ * @mode: the display mode. This is only relevant outside the panel
+ * in video mode: in command mode this is configuring the internal
+ * timing in the display controller.
+ */
+ const struct drm_display_mode mode;
+ /**
+ * @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
+ * in 0.1V steps the default is 0x05 which means 6.0V
+ */
+ u8 avdd[NT35510_P1_AVDD_LEN];
+ /**
+ * @bt1ctr: setting for boost power control for the AVDD step-up
+ * circuit (1)
+ * bits 0..2 in the lower nibble controls PCK, the booster clock
+ * frequency for the step-up circuit:
+ * 0 = Hsync/32
+ * 1 = Hsync/16
+ * 2 = Hsync/8
+ * 3 = Hsync/4
+ * 4 = Hsync/2
+ * 5 = Hsync
+ * 6 = Hsync x 2
+ * 7 = Hsync x 4
+ * bits 4..6 in the upper nibble controls BTP, the boosting
+ * amplification for the the step-up circuit:
+ * 0 = Disable
+ * 1 = 1.5 x VDDB
+ * 2 = 1.66 x VDDB
+ * 3 = 2 x VDDB
+ * 4 = 2.5 x VDDB
+ * 5 = 3 x VDDB
+ * The defaults are 4 and 4 yielding 0x44
+ */
+ u8 bt1ctr[NT35510_P1_BT1CTR_LEN];
+ /**
+ * @avee: setting for AVEE ranging from 0x00 = -6.5V to 0x14 = -4.5V
+ * in 0.1V steps the default is 0x05 which means -6.0V
+ */
+ u8 avee[NT35510_P1_AVEE_LEN];
+ /**
+ * @bt2ctr: setting for boost power control for the AVEE step-up
+ * circuit (2)
+ * bits 0..2 in the lower nibble controls NCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTN, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = Disable
+ * 1 = -1.5 x VDDB
+ * 2 = -2 x VDDB
+ * 3 = -2.5 x VDDB
+ * 4 = -3 x VDDB
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
+ /**
+ * @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
+ * in 1V steps, the default is 0x08 which means 15V
+ */
+ u8 vgh[NT35510_P1_VGH_LEN];
+ /**
+ * @bt4ctr: setting for boost power control for the VGH step-up
+ * circuit (4)
+ * bits 0..2 in the lower nibble controls HCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTH, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVDD + VDDB
+ * 1 = AVDD - AVEE
+ * 2 = AVDD - AVEE + VDDB
+ * 3 = AVDD x 2 - AVEE
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt4ctr[NT35510_P1_BT4CTR_LEN];
+ /**
+ * @vgl: setting for VGL ranging from 0x00 = -2V to 0x0f = -15V in
+ * 1V steps, the default is 0x08 which means -10V
+ */
+ u8 vgl[NT35510_P1_VGL_LEN];
+ /**
+ * @bt5ctr: setting for boost power control for the VGL step-up
+ * circuit (5)
+ * bits 0..2 in the lower nibble controls LCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTL, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVEE + VCL
+ * 1 = AVEE - AVDD
+ * 2 = AVEE + VCL - AVDD
+ * 3 = AVEE x 2 - AVDD
+ * The defaults are 3 and 2 yielding 0x32
+ */
+ u8 bt5ctr[NT35510_P1_BT5CTR_LEN];
+ /**
+ * @vgp: setting for VGP, the positive gamma divider voltages
+ * VGMP the high voltage and VGSP the low voltage.
+ * The first byte contains bit 8 of VGMP and VGSP in bits 4 and 0
+ * The second byte contains bit 0..7 of VGMP
+ * The third byte contains bit 0..7 of VGSP
+ * VGMP 0x00 = 3.0V .. 0x108 = 6.3V in steps of 12.5mV
+ * VGSP 0x00 = 0V .. 0x111 = 3.7V in steps of 12.5mV
+ */
+ u8 vgp[NT35510_P1_VGP_LEN];
+ /**
+ * @vgn: setting for VGN, the negative gamma divider voltages,
+ * same layout of bytes as @vgp.
+ */
+ u8 vgn[NT35510_P1_VGN_LEN];
+ /**
+ * @sdeqctr: Source driver control settings, first byte is
+ * 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
+ * mode 2 uses three steps meaning EQS3 is not used in mode
+ * 1. Mode 2 is default. The last three parameters are EQS1, EQS2
+ * and EQS3, setting the rise time for each equalizer step:
+ * 0x00 = 0.0 us to 0x0f = 7.5 us in steps of 0.5us. The default
+ * is 0x07 = 3.5 us.
+ */
+ u8 sdeqctr[NT35510_P0_SDEQCTR_LEN];
+ /**
+ * @sdvpctr: power/voltage behaviour during vertical porch time
+ */
+ u8 sdvpctr;
+ /**
+ * @t1: the number of pixel clocks on one scanline, range
+ * 0x100 (258 ticks) .. 0x3FF (1024 ticks) so the value + 1
+ * clock ticks.
+ */
+ u16 t1;
+ /**
+ * @vbp: vertical back porch toward the PANEL note: not toward
+ * the DSI host; these are separate interfaces, in from DSI host
+ * and out to the panel.
+ */
+ u8 vbp;
+ /**
+ * @vfp: vertical front porch toward the PANEL.
+ */
+ u8 vfp;
+ /**
+ * @psel: pixel clock divisor: 0 = 1, 1 = 2, 2 = 4, 3 = 8.
+ */
+ u8 psel;
+ /**
+ * @dpmctr12: Display timing control 12
+ * Byte 1 bit 4 selects LVGL voltage level: 0 = VGLX, 1 = VGL_REG
+ * Byte 1 bit 1 selects gate signal mode: 0 = non-overlap, 1 = overlap
+ * Byte 1 bit 0 selects output signal control R/L swap, 0 = normal
+ * 1 = swap all O->E, L->R
+ * Byte 2 is CLW delay clock for CK O/E and CKB O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ * Byte 3 is FTI_H0 delay time for STP O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ */
+ u8 dpmctr12[NT35510_P0_DPMCTR12_LEN];
+ /**
+ * @gamma_corr_pos_r: Red gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_g: Green gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_b: Blue gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_b[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_r: Red gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_g: Green gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_b: Blue gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
+};
+
+/**
+ * struct nt35510 - state container for the NT35510 panel
+ */
+struct nt35510 {
+ /**
+ * @dev: the container device
+ */
+ struct device *dev;
+ /**
+ * @conf: the specific panel configuration, as the NT35510
+ * can be combined with many physical panels, they can have
+ * different physical dimensions and gamma correction etc,
+ * so this is stored in the config.
+ */
+ const struct nt35510_config *conf;
+ /**
+ * @panel: the DRM panel object for the instance
+ */
+ struct drm_panel panel;
+ /**
+ * @supplies: regulators supplying the panel
+ */
+ struct regulator_bulk_data supplies[2];
+ /**
+ * @reset_gpio: the reset line
+ */
+ struct gpio_desc *reset_gpio;
+};
+
+/* Manufacturer command has strictly this byte sequence */
+static const u8 nt35510_mauc_select_page_0[] = { 0x55, 0xAA, 0x52, 0x08, 0x00 };
+static const u8 nt35510_mauc_select_page_1[] = { 0x55, 0xAA, 0x52, 0x08, 0x01 };
+static const u8 nt35510_vgh_on[] = { 0x01 };
+
+static inline struct nt35510 *panel_to_nt35510(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt35510, panel);
+}
+
+#define NT35510_ROTATE_0_SETTING 0x02
+#define NT35510_ROTATE_180_SETTING 0x00
+
+static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
+ u8 cmd, u8 cmdlen, const u8 *seq)
+{
+ const u8 *seqp = seq;
+ int cmdwritten = 0;
+ int chunk = cmdlen;
+ int ret;
+
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending DCS command seq cmd %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+
+ while (cmdwritten < cmdlen) {
+ chunk = cmdlen - cmdwritten;
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_generic_write(dsi, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending generic write seq %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+ }
+ DRM_DEV_DEBUG(nt->dev, "sent command %02x %02x bytes\n",
+ cmd, cmdlen);
+ return 0;
+}
+
+static int nt35510_read_id(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 id1, id2, id3;
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID1, &id1, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID1\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID2, &id2, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID2\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID3, &id3, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID3\n");
+ return ret;
+ }
+
+ /*
+ * Multi-Time Programmable (?) memory contains manufacturer
+ * ID (e.g. Hydis 0x55), driver ID (e.g. NT35510 0xc0) and
+ * version.
+ */
+ DRM_DEV_INFO(nt->dev,
+ "MTP ID manufacturer: %02x version: %02x driver: %02x\n",
+ id1, id2, id3);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_power() - set up power config in page 1
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_power(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVDD,
+ NT35510_P1_AVDD_LEN,
+ nt->conf->avdd);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT1CTR,
+ NT35510_P1_BT1CTR_LEN,
+ nt->conf->bt1ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVEE,
+ NT35510_P1_AVEE_LEN,
+ nt->conf->avee);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT2CTR,
+ NT35510_P1_BT2CTR_LEN,
+ nt->conf->bt2ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
+ NT35510_P1_VGH_LEN,
+ nt->conf->vgh);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT4CTR,
+ NT35510_P1_BT4CTR_LEN,
+ nt->conf->bt4ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_VGHCTR,
+ ARRAY_SIZE(nt35510_vgh_on),
+ nt35510_vgh_on);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGL,
+ NT35510_P1_VGL_LEN,
+ nt->conf->vgl);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT5CTR,
+ NT35510_P1_BT5CTR_LEN,
+ nt->conf->bt5ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGP,
+ NT35510_P1_VGP_LEN,
+ nt->conf->vgp);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGN,
+ NT35510_P1_VGN_LEN,
+ nt->conf->vgn);
+ if (ret)
+ return ret;
+
+ /* Typically 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_display() - set up display config in page 0
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_display(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ const struct nt35510_config *conf = nt->conf;
+ u8 dopctr[NT35510_P0_DOPCTR_LEN];
+ u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
+ u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
+ /* FIXME: set up any rotation (assume none for now) */
+ u8 addr_mode = NT35510_ROTATE_0_SETTING;
+ u8 val;
+ int ret;
+
+ /* Enable TE, EoTP and RGB pixel format */
+ dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
+ NT35510_DOPCTR_0_N565;
+ dopctr[1] = NT35510_DOPCTR_1_CTB;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
+ NT35510_P0_DOPCTR_LEN,
+ dopctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
+ sizeof(addr_mode));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Source data hold time, default 0x05 = 2.5us
+ * 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
+ * 0x0A = 5us
+ */
+ val = 0x0A;
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
+ sizeof(val));
+ if (ret < 0)
+ return ret;
+
+ /* EQ control for gate signals, 0x00 = 0 us */
+ gseqctr[0] = 0x00;
+ gseqctr[1] = 0x00;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
+ NT35510_P0_GSEQCTR_LEN,
+ gseqctr);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_SDEQCTR,
+ NT35510_P0_SDEQCTR_LEN,
+ conf->sdeqctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDVPCTR,
+ &conf->sdvpctr, 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Display timing control for active and idle off mode:
+ * the first byte contains
+ * the two high bits of T1A and second byte the low 8 bits, and
+ * the valid range is 0x100 (257) to 0x3ff (1023) representing
+ * 258..1024 (+1) pixel clock ticks for one scanline. At 20MHz pixel
+ * clock this covers the range of 12.90us .. 51.20us in steps of
+ * 0.05us, the default is 0x184 (388) representing 389 ticks.
+ * The third byte is VBPDA, vertical back porch display active
+ * and the fourth VFPDA, vertical front porch display active,
+ * both given in number of scanlines in the range 0x02..0xff
+ * for 2..255 scanlines. The fifth byte is 2 bits selecting
+ * PSEL for active and idle off mode, how much the 20MHz clock
+ * is divided by 0..3. This needs to be adjusted to get the right
+ * frame rate.
+ */
+ dpfrctr[0] = (conf->t1 >> 8) & 0xFF;
+ dpfrctr[1] = conf->t1 & 0xFF;
+ /* Vertical back porch */
+ dpfrctr[2] = conf->vbp;
+ /* Vertical front porch */
+ dpfrctr[3] = conf->vfp;
+ dpfrctr[4] = conf->psel;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR1,
+ NT35510_P0_DPFRCTR1_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ /* For idle and partial idle off mode we decrease front porch by one */
+ dpfrctr[3]--;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR2,
+ NT35510_P0_DPFRCTR2_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR3,
+ NT35510_P0_DPFRCTR3_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+
+ /* Enable TE on vblank */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ /* Turn on the pads? */
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPMCTR12,
+ NT35510_P0_DPMCTR12_LEN,
+ conf->dpmctr12);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_set_brightness(struct backlight_device *bl)
+{
+ struct nt35510 *nt = bl_get_data(bl);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 brightness = bl->props.brightness;
+ int ret;
+
+ DRM_DEV_DEBUG(nt->dev, "set brightness %d\n", brightness);
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &brightness,
+ sizeof(brightness));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops nt35510_bl_ops = {
+ .update_status = nt35510_set_brightness,
+};
+
+/*
+ * This power-on sequence
+ */
+static int nt35510_power_on(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret < 0) {
+ dev_err(nt->dev, "unable to enable regulators\n");
+ return ret;
+ }
+
+ /* Toggle RESET in accordance with datasheet page 370 */
+ if (nt->reset_gpio) {
+ gpiod_set_value(nt->reset_gpio, 1);
+ /* Active min 10 us according to datasheet, let's say 20 */
+ usleep_range(20, 1000);
+ gpiod_set_value(nt->reset_gpio, 0);
+ /*
+ * 5 ms during sleep mode, 120 ms during sleep out mode
+ * according to datasheet, let's use 120-140 ms.
+ */
+ usleep_range(120000, 140000);
+ }
+
+ ret = nt35510_read_id(nt);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 1 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_1),
+ nt35510_mauc_select_page_1);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_power(nt);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_b);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_b);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 0 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_0),
+ nt35510_mauc_select_page_0);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_display(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_power_off(struct nt35510 *nt)
+{
+ int ret;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret)
+ return ret;
+
+ if (nt->reset_gpio)
+ gpiod_set_value(nt->reset_gpio, 1);
+
+ return 0;
+}
+
+static int nt35510_unprepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+ usleep_range(10000, 20000);
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Wait 4 frames, how much is that 5ms in the vendor driver */
+ usleep_range(5000, 10000);
+
+ ret = nt35510_power_off(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_prepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_power_on(nt);
+ if (ret)
+ return ret;
+
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Up to 120 ms */
+ usleep_range(120000, 150000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display on (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Some 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int nt35510_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+
+ info = &connector->display_info;
+ info->width_mm = nt->conf->width_mm;
+ info->height_mm = nt->conf->height_mm;
+ mode = drm_mode_duplicate(connector->dev, &nt->conf->mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ mode->width_mm = nt->conf->width_mm;
+ mode->height_mm = nt->conf->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs nt35510_drm_funcs = {
+ .unprepare = nt35510_unprepare,
+ .prepare = nt35510_prepare,
+ .get_modes = nt35510_get_modes,
+};
+
+static int nt35510_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt35510 *nt;
+ int ret;
+
+ nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, nt);
+ nt->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ /*
+ * Datasheet suggests max HS rate for NT35510 is 250 MHz
+ * (period time 4ns, see figure 7.6.4 page 365) and max LP rate is
+ * 20 MHz (period time 50ns, see figure 7.6.6. page 366).
+ * However these frequencies appear in source code for the Hydis
+ * HVA40WV1 panel and setting up the LP frequency makes the panel
+ * not work.
+ *
+ * TODO: if other panels prove to be closer to the datasheet,
+ * maybe make this a per-panel config in struct nt35510_config?
+ */
+ dsi->hs_rate = 349440000;
+ dsi->lp_rate = 9600000;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ /*
+ * Every new incarnation of this display must have a unique
+ * data entry for the system in this driver.
+ */
+ nt->conf = of_device_get_match_data(dev);
+ if (!nt->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
+ nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
+ nt->supplies);
+ if (ret < 0)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[0].consumer,
+ 2300000, 4800000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[1].consumer,
+ 1650000, 3300000);
+ if (ret)
+ return ret;
+
+ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(nt->reset_gpio)) {
+ dev_err(dev, "error getting RESET GPIO\n");
+ return PTR_ERR(nt->reset_gpio);
+ }
+
+ drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ /*
+ * First, try to locate an external backlight (such as on GPIO)
+ * if this fails, assume we will want to use the internal backlight
+ * control.
+ */
+ ret = drm_panel_of_backlight(&nt->panel);
+ if (ret) {
+ dev_err(dev, "error getting external backlight %d\n", ret);
+ return ret;
+ }
+ if (!nt->panel.backlight) {
+ struct backlight_device *bl;
+
+ bl = devm_backlight_device_register(dev, "nt35510", dev, nt,
+ &nt35510_bl_ops, NULL);
+ if (IS_ERR(bl)) {
+ DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ return PTR_ERR(bl);
+ }
+ bl->props.max_brightness = 255;
+ bl->props.brightness = 255;
+ bl->props.power = FB_BLANK_POWERDOWN;
+ nt->panel.backlight = bl;
+ }
+
+ ret = drm_panel_add(&nt->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&nt->panel);
+
+ return 0;
+}
+
+static int nt35510_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ mipi_dsi_detach(dsi);
+ /* Power off */
+ ret = nt35510_power_off(nt);
+ drm_panel_remove(&nt->panel);
+
+ return ret;
+}
+
+/*
+ * These gamma correction values are 10bit tuples, so only bits 0 and 1 is
+ * ever used in the first byte. They form a positive and negative gamma
+ * correction curve for each color, values must be strictly higher for each
+ * step on the curve. As can be seen these default curves goes from 0x0001
+ * to 0x03FE.
+ */
+#define NT35510_GAMMA_POS_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x83, 0x02, 0x78, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+#define NT35510_GAMMA_NEG_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x43, 0x02, 0x50, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+/*
+ * The Hydis HVA40WV1 panel
+ */
+static const struct nt35510_config nt35510_hydis_hva40wv1 = {
+ .width_mm = 52,
+ .height_mm = 86,
+ /**
+ * As the Hydis panel is used in command mode, the porches etc
+ * are settings programmed internally into the NT35510 controller
+ * and generated toward the physical display. As the panel is not
+ * used in video mode, these are not really exposed to the DSI
+ * host.
+ *
+ * Display frame rate control:
+ * Frame rate = (20 MHz / 1) / (389 * (7 + 50 + 800)) ~= 60 Hz
+ */
+ .mode = {
+ /* The internal pixel clock of the NT35510 is 20 MHz */
+ .clock = 20000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2, /* HFP = 2 */
+ .hsync_end = 480 + 2 + 0, /* HSync = 0 */
+ .htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
+ .vdisplay = 800,
+ .vsync_start = 800 + 2, /* VFP = 2 */
+ .vsync_end = 800 + 2 + 0, /* VSync = 0 */
+ .vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
+ .vrefresh = 60, /* Calculated */
+ .flags = 0,
+ },
+ /* 0x09: AVDD = 5.6V */
+ .avdd = { 0x09, 0x09, 0x09 },
+ /* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
+ .bt1ctr = { 0x34, 0x34, 0x34 },
+ /* 0x09: AVEE = -5.6V */
+ .avee = { 0x09, 0x09, 0x09 },
+ /* 0x24: NCK = Hsync/2, BTN = -2 x VDDB */
+ .bt2ctr = { 0x24, 0x24, 0x24 },
+ /* 0x05 = 12V */
+ .vgh = { 0x05, 0x05, 0x05 },
+ /* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
+ .bt4ctr = { 0x24, 0x24, 0x24 },
+ /* 0x0B = -13V */
+ .vgl = { 0x0B, 0x0B, 0x0B },
+ /* 0x24: LCKA = Hsync, VGL = AVDD + VCL - AVDD */
+ .bt5ctr = { 0x24, 0x24, 0x24 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgp = { 0x00, 0xA3, 0x00 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgn = { 0x00, 0xA3, 0x00 },
+ /* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
+ .sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
+ /* SDVPCTR: Normal operation off color during v porch */
+ .sdvpctr = 0x01,
+ /* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
+ .t1 = 0x0184,
+ /* VBP: vertical back porch toward the panel */
+ .vbp = 7,
+ /* VFP: vertical front porch toward the panel */
+ .vfp = 50,
+ /* PSEL: divide pixel clock 20MHz with 1 (no clock downscaling) */
+ .psel = 0,
+ /* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
+ .dpmctr12 = { 0x03, 0x00, 0x00, },
+ /* Default gamma correction values */
+ .gamma_corr_pos_r = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_g = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_b = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_neg_r = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_g = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
+};
+
+static const struct of_device_id nt35510_of_match[] = {
+ {
+ .compatible = "hydis,hva40wv1",
+ .data = &nt35510_hydis_hva40wv1,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nt35510_of_match);
+
+static struct mipi_dsi_driver nt35510_driver = {
+ .probe = nt35510_probe,
+ .remove = nt35510_remove,
+ .driver = {
+ .name = "panel-novatek-nt35510",
+ .of_match_table = nt35510_of_match,
+ },
+};
+module_mipi_dsi_driver(nt35510_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("NT35510-based panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3c52f15f7a1c..9bb2e8c7934a 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -373,6 +373,12 @@ static const struct of_device_id ld9040_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ld9040_of_match);
+static const struct spi_device_id ld9040_ids[] = {
+ { "ld9040", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, ld9040_ids);
+
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
new file mode 100644
index 000000000000..9d843fcc3a22
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019, Michael Srba
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct s6e88a0_ams452ef01 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct
+s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e88a0_ams452ef01, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+}
+
+static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ // set default brightness/gama
+ dsi_dcs_write_seq(dsi, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
+ // set default Amoled Off Ratio
+ dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
+ dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(35);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ s6e88a0_ams452ef01_reset(ctx);
+
+ ret = s6e88a0_ams452ef01_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = s6e88a0_ams452ef01_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode s6e88a0_ams452ef01_mode = {
+ .clock = (540 + 88 + 4 + 20) * (960 + 14 + 2 + 8) * 60 / 1000,
+ .hdisplay = 540,
+ .hsync_start = 540 + 88,
+ .hsync_end = 540 + 88 + 4,
+ .htotal = 540 + 88 + 4 + 20,
+ .vdisplay = 960,
+ .vsync_start = 960 + 14,
+ .vsync_end = 960 + 14 + 2,
+ .vtotal = 960 + 14 + 2 + 8,
+ .vrefresh = 60,
+ .width_mm = 56,
+ .height_mm = 100,
+};
+
+static int s6e88a0_ams452ef01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &s6e88a0_ams452ef01_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e88a0_ams452ef01_panel_funcs = {
+ .unprepare = s6e88a0_ams452ef01_unprepare,
+ .prepare = s6e88a0_ams452ef01_prepare,
+ .get_modes = s6e88a0_ams452ef01_get_modes,
+};
+
+static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e88a0_ams452ef01 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+ return ret;
+ }
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add panel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
+ { .compatible = "samsung,s6e88a0-ams452ef01" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6e88a0_ams452ef01_of_match);
+
+static struct mipi_dsi_driver s6e88a0_ams452ef01_driver = {
+ .probe = s6e88a0_ams452ef01_probe,
+ .remove = s6e88a0_ams452ef01_remove,
+ .driver = {
+ .name = "panel-s6e88a0-ams452ef01",
+ .of_match_table = s6e88a0_ams452ef01_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e88a0_ams452ef01_driver);
+
+MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
+MODULE_DESCRIPTION("MIPI-DSI based Panel Driver for AMS452EF01 AMOLED LCD with a S6E88A0 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index e14c14ac62b5..3ad828eaefe1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -351,6 +351,54 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+static struct panel_desc panel_dpi;
+
+static int panel_dpi_probe(struct device *dev,
+ struct panel_simple *panel)
+{
+ struct display_timing *timing;
+ const struct device_node *np;
+ struct panel_desc *desc;
+ unsigned int bus_flags;
+ struct videomode vm;
+ int ret;
+
+ np = dev->of_node;
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
+ if (!timing)
+ return -ENOMEM;
+
+ ret = of_get_display_timing(np, "panel-timing", timing);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n",
+ np);
+ return ret;
+ }
+
+ desc->timings = timing;
+ desc->num_timings = 1;
+
+ of_property_read_u32(np, "width-mm", &desc->size.width);
+ of_property_read_u32(np, "height-mm", &desc->size.height);
+
+ /* Extract bus_flags from display_timing */
+ bus_flags = 0;
+ vm.flags = timing->flags;
+ drm_bus_flags_from_videomode(&vm, &bus_flags);
+ desc->bus_flags = bus_flags;
+
+ /* We do not know the connector for the DT node, so guess it */
+ desc->connector_type = DRM_MODE_CONNECTOR_DPI;
+
+ panel->desc = desc;
+
+ return 0;
+}
+
#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
(to_check->field.typ >= bounds->field.min && \
to_check->field.typ <= bounds->field.max)
@@ -437,8 +485,15 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -EPROBE_DEFER;
}
- if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ if (desc == &panel_dpi) {
+ /* Handle the generic panel-dpi binding */
+ err = panel_dpi_probe(dev, panel);
+ if (err)
+ goto free_ddc;
+ } else {
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ }
drm_panel_init(&panel->base, dev, &panel_simple_funcs,
desc->connector_type);
@@ -1301,6 +1356,37 @@ static const struct panel_desc edt_et035012dm6 = {
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
+static const struct drm_display_mode edt_etm043080dh6gp_mode = {
+ .clock = 10870,
+ .hdisplay = 480,
+ .hsync_start = 480 + 8,
+ .hsync_end = 480 + 8 + 4,
+ .htotal = 480 + 8 + 4 + 41,
+
+ /*
+ * IWG22M: Y resolution changed for "dc_linuxfb" module crashing while
+ * fb_align
+ */
+
+ .vdisplay = 288,
+ .vsync_start = 288 + 2,
+ .vsync_end = 288 + 2 + 4,
+ .vtotal = 288 + 2 + 4 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc edt_etm043080dh6gp = {
+ .modes = &edt_etm043080dh6gp_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 100,
+ .height = 65,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode edt_etm0430g0dh6_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -1440,6 +1526,33 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode frida_frd350h54004_mode = {
+ .clock = 6000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 44,
+ .hsync_end = 320 + 44 + 16,
+ .htotal = 320 + 44 + 16 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 2,
+ .vsync_end = 240 + 2 + 6,
+ .vtotal = 240 + 2 + 6 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc frida_frd350h54004 = {
+ .modes = &frida_frd350h54004_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 77,
+ .height = 64,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode friendlyarm_hd702e_mode = {
.clock = 67185,
.hdisplay = 800,
@@ -2080,6 +2193,64 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct display_timing logictechno_lt161010_2nh_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 20, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt161010_2nh = {
+ .timings = &logictechno_lt161010_2nh_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
+static const struct display_timing logictechno_lt170410_2whc_timing = {
+ .pixelclock = { 68900000, 71100000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 23, 60, 71 },
+ .hback_porch = { 23, 60, 71 },
+ .hsync_len = { 15, 40, 47 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 7, 10 },
+ .vback_porch = { 5, 7, 10 },
+ .vsync_len = { 6, 9, 12 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt170410_2whc = {
+ .timings = &logictechno_lt170410_2whc_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
@@ -2095,7 +2266,7 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
};
static const struct drm_display_mode logicpd_type_28_mode = {
- .clock = 9000,
+ .clock = 9107,
.hdisplay = 480,
.hsync_start = 480 + 3,
.hsync_end = 480 + 3 + 42,
@@ -2224,6 +2395,51 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+ {
+ .clock = 138500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 110920,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 48,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+ .modes = neweast_wjfh116008a_modes,
+ .num_modes = 2,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 150,
+ },
+ .delay = {
+ .prepare = 110,
+ .enable = 20,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2390,15 +2606,15 @@ static const struct panel_desc ontat_yx700wv03 = {
};
static const struct drm_display_mode ortustech_com37h3m_mode = {
- .clock = 22153,
+ .clock = 22230,
.hdisplay = 480,
- .hsync_start = 480 + 8,
- .hsync_end = 480 + 8 + 10,
- .htotal = 480 + 8 + 10 + 10,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 10,
+ .htotal = 480 + 40 + 10 + 40,
.vdisplay = 640,
.vsync_start = 640 + 4,
- .vsync_end = 640 + 4 + 3,
- .vtotal = 640 + 4 + 3 + 4,
+ .vsync_end = 640 + 4 + 2,
+ .vtotal = 640 + 4 + 2 + 4,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2439,6 +2655,7 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
@@ -2464,7 +2681,8 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
@@ -2546,6 +2764,35 @@ static const struct panel_desc rocktech_rk070er9427 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
+ .clock = 71100,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 2,
+ .vsync_end = 800 + 2 + 5,
+ .vtotal = 800 + 2 + 5 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc rocktech_rk101ii01d_ct = {
+ .modes = &rocktech_rk101ii01d_ct_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.clock = 271560,
.hdisplay = 2560,
@@ -2768,30 +3015,6 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
-static const struct drm_display_mode sharp_lq150x1lg11_mode = {
- .clock = 71100,
- .hdisplay = 1024,
- .hsync_start = 1024 + 168,
- .hsync_end = 1024 + 168 + 64,
- .htotal = 1024 + 168 + 64 + 88,
- .vdisplay = 768,
- .vsync_start = 768 + 37,
- .vsync_end = 768 + 37 + 2,
- .vtotal = 768 + 37 + 2 + 8,
- .vrefresh = 60,
-};
-
-static const struct panel_desc sharp_lq150x1lg11 = {
- .modes = &sharp_lq150x1lg11_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 304,
- .height = 228,
- },
- .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
-};
-
static const struct display_timing sharp_ls020b1dd01d_timing = {
.pixelclock = { 2000000, 4200000, 5000000 },
.hactive = { 240, 240, 240 },
@@ -3023,7 +3246,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
.width = 194,
.height = 116,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -3286,6 +3509,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,et035012dm6",
.data = &edt_et035012dm6,
}, {
+ .compatible = "edt,etm043080dh6gp",
+ .data = &edt_etm043080dh6gp,
+ }, {
.compatible = "edt,etm0430g0dh6",
.data = &edt_etm0430g0dh6,
}, {
@@ -3310,6 +3536,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "frida,frd350h54004",
+ .data = &frida_frd350h54004,
+ }, {
.compatible = "friendlyarm,hd702e",
.data = &friendlyarm_hd702e,
}, {
@@ -3388,6 +3617,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
+ .compatible = "logictechno,lt161010-2nhc",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt161010-2nhr",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt170410-2whc",
+ .data = &logictechno_lt170410_2whc,
+ }, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
@@ -3400,6 +3638,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "neweast,wjfh116008a",
+ .data = &neweast_wjfh116008a,
+ }, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
@@ -3439,6 +3680,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk070er9427",
.data = &rocktech_rk070er9427,
}, {
+ .compatible = "rocktech,rk101ii01d-ct",
+ .data = &rocktech_rk101ii01d_ct,
+ }, {
.compatible = "samsung,lsn122dl01-c01",
.data = &samsung_lsn122dl01_c01,
}, {
@@ -3466,9 +3710,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq123p1jx31",
.data = &sharp_lq123p1jx31,
}, {
- .compatible = "sharp,lq150x1lg11",
- .data = &sharp_lq150x1lg11,
- }, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
@@ -3526,6 +3767,10 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
+ /* Must be the last entry */
+ .compatible = "panel-dpi",
+ .data = &panel_dpi,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
index de0abf76ae6f..c91e55b2d7a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -48,7 +48,7 @@ struct acx424akp {
};
static const struct drm_display_mode sony_acx424akp_vid_mode = {
- .clock = 330000,
+ .clock = 27234,
.hdisplay = 480,
.hsync_start = 480 + 15,
.hsync_end = 480 + 15 + 0,
@@ -68,7 +68,7 @@ static const struct drm_display_mode sony_acx424akp_vid_mode = {
* command mode using the maximum HS frequency.
*/
static const struct drm_display_mode sony_acx424akp_cmd_mode = {
- .clock = 420160,
+ .clock = 35478,
.hdisplay = 480,
.hsync_start = 480 + 154,
.hsync_end = 480 + 154 + 16,
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index cf29405a2dbe..aeca15dfeb3c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,7 +86,12 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
+/*
+ * noinline_for_stack so we don't get multiple copies of tx_buf
+ * on the stack in case of gcc-plugin-structleak
+ */
+static int noinline_for_stack
+jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf = JBT_COMMAND | reg;
@@ -105,8 +110,9 @@ static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
return ret;
}
-static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
- u8 reg, u8 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_1(struct td028ttec1_panel *lcd,
+ u8 reg, u8 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[2];
@@ -128,8 +134,9 @@ static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
return ret;
}
-static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
- u8 reg, u16 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_2(struct td028ttec1_panel *lcd,
+ u8 reg, u16 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[3];
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 238fb6d54df4..8136babd3ba9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
@@ -87,18 +88,27 @@ static void panfrost_clk_fini(struct panfrost_device *pfdev)
static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
- int ret;
+ int ret, i;
- pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
- if (IS_ERR(pfdev->regulator)) {
- ret = PTR_ERR(pfdev->regulator);
- dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+ if (WARN(pfdev->comp->num_supplies > ARRAY_SIZE(pfdev->regulators),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < pfdev->comp->num_supplies; i++)
+ pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
+
+ ret = devm_regulator_bulk_get(pfdev->dev,
+ pfdev->comp->num_supplies,
+ pfdev->regulators);
+ if (ret < 0) {
+ dev_err(pfdev->dev, "failed to get regulators: %d\n", ret);
return ret;
}
- ret = regulator_enable(pfdev->regulator);
+ ret = regulator_bulk_enable(pfdev->comp->num_supplies,
+ pfdev->regulators);
if (ret < 0) {
- dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+ dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
@@ -107,7 +117,81 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
- regulator_disable(pfdev->regulator);
+ regulator_bulk_disable(pfdev->comp->num_supplies,
+ pfdev->regulators);
+}
+
+static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
+ if (!pfdev->pm_domain_devs[i])
+ break;
+
+ if (pfdev->pm_domain_links[i])
+ device_link_del(pfdev->pm_domain_links[i]);
+
+ dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
+ }
+}
+
+static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
+{
+ int err;
+ int i, num_domains;
+
+ num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+
+ /*
+ * Single domain is handled by the core, and, if only a single power
+ * the power domain is requested, the property is optional.
+ */
+ if (num_domains < 2 && pfdev->comp->num_pm_domains < 2)
+ return 0;
+
+ if (num_domains != pfdev->comp->num_pm_domains) {
+ dev_err(pfdev->dev,
+ "Incorrect number of power domains: %d provided, %d needed\n",
+ num_domains, pfdev->comp->num_pm_domains);
+ return -EINVAL;
+ }
+
+ if (WARN(num_domains > ARRAY_SIZE(pfdev->pm_domain_devs),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < num_domains; i++) {
+ pfdev->pm_domain_devs[i] =
+ dev_pm_domain_attach_by_name(pfdev->dev,
+ pfdev->comp->pm_domain_names[i]);
+ if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
+ err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
+ pfdev->pm_domain_devs[i] = NULL;
+ dev_err(pfdev->dev,
+ "failed to get pm-domain %s(%d): %d\n",
+ pfdev->comp->pm_domain_names[i], i, err);
+ goto err;
+ }
+
+ pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
+ pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+ if (!pfdev->pm_domain_links[i]) {
+ dev_err(pfdev->pm_domain_devs[i],
+ "adding device link failed!\n");
+ err = -ENODEV;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ panfrost_pm_domain_fini(pfdev);
+ return err;
}
int panfrost_device_init(struct panfrost_device *pfdev)
@@ -140,37 +224,43 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto err_out1;
}
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ goto err_out2;
+
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
if (IS_ERR(pfdev->iomem)) {
dev_err(pfdev->dev, "failed to ioremap iomem\n");
err = PTR_ERR(pfdev->iomem);
- goto err_out2;
+ goto err_out3;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto err_out2;
+ goto err_out3;
err = panfrost_mmu_init(pfdev);
if (err)
- goto err_out3;
+ goto err_out4;
err = panfrost_job_init(pfdev);
if (err)
- goto err_out4;
+ goto err_out5;
err = panfrost_perfcnt_init(pfdev);
if (err)
- goto err_out5;
+ goto err_out6;
return 0;
-err_out5:
+err_out6:
panfrost_job_fini(pfdev);
-err_out4:
+err_out5:
panfrost_mmu_fini(pfdev);
-err_out3:
+err_out4:
panfrost_gpu_fini(pfdev);
+err_out3:
+ panfrost_pm_domain_fini(pfdev);
err_out2:
panfrost_reset_fini(pfdev);
err_out1:
@@ -186,6 +276,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
panfrost_reset_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 06713811b92c..c30c719a8059 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
+#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
@@ -19,6 +20,8 @@ struct panfrost_job;
struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
+#define MAX_REGULATORS 2
+#define MAX_PM_DOMAINS 3
struct panfrost_features {
u16 id;
@@ -51,6 +54,23 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
+/*
+ * Features that cannot be automatically detected and need matching using the
+ * compatible string, typically SoC-specific.
+ */
+struct panfrost_compatible {
+ /* Supplies count and names. */
+ int num_supplies;
+ const char * const *supply_names;
+ /*
+ * Number of power domains required, note that values 0 and 1 are
+ * handled identically, as only values > 1 need special handling.
+ */
+ int num_pm_domains;
+ /* Only required if num_pm_domains > 1. */
+ const char * const *pm_domain_names;
+};
+
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -59,10 +79,14 @@ struct panfrost_device {
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
- struct regulator *regulator;
+ struct regulator_bulk_data regulators[MAX_REGULATORS];
struct reset_control *rstc;
+ /* pm_domains for devices with more than one. */
+ struct device *pm_domain_devs[MAX_PM_DOMAINS];
+ struct device_link *pm_domain_links[MAX_PM_DOMAINS];
struct panfrost_features features;
+ const struct panfrost_compatible *comp;
spinlock_t as_lock;
unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index b7a618db3ee2..882fecc33fdb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -584,6 +584,10 @@ static int panfrost_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pfdev);
+ pfdev->comp = of_device_get_match_data(&pdev->dev);
+ if (!pfdev->comp)
+ return -ENODEV;
+
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
@@ -655,16 +659,24 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
+static const char * const default_supplies[] = { "mali" };
+static const struct panfrost_compatible default_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies),
+ .supply_names = default_supplies,
+ .num_pm_domains = 1, /* optional */
+ .pm_domain_names = NULL,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "arm,mali-t604" },
- { .compatible = "arm,mali-t624" },
- { .compatible = "arm,mali-t628" },
- { .compatible = "arm,mali-t720" },
- { .compatible = "arm,mali-t760" },
- { .compatible = "arm,mali-t820" },
- { .compatible = "arm,mali-t830" },
- { .compatible = "arm,mali-t860" },
- { .compatible = "arm,mali-t880" },
+ { .compatible = "arm,mali-t604", .data = &default_data, },
+ { .compatible = "arm,mali-t624", .data = &default_data, },
+ { .compatible = "arm,mali-t628", .data = &default_data, },
+ { .compatible = "arm,mali-t720", .data = &default_data, },
+ { .compatible = "arm,mali-t760", .data = &default_data, },
+ { .compatible = "arm,mali-t820", .data = &default_data, },
+ { .compatible = "arm,mali-t830", .data = &default_data, },
+ { .compatible = "arm,mali-t860", .data = &default_data, },
+ { .compatible = "arm,mali-t880", .data = &default_data, },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 8822ec13a0d6..f2c1ddc41a9b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -308,28 +308,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == pfdev->features.l2_present, 100, 1000);
-
- gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
- val, val == pfdev->features.stack_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == pfdev->features.shader_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 100, 1000);
-
if (ret)
- dev_err(pfdev->dev, "error powering up gpu");
+ dev_err(pfdev->dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
gpu_write(pfdev, TILER_PWROFF_LO, 0);
gpu_write(pfdev, SHADER_PWROFF_LO, 0);
- gpu_write(pfdev, STACK_PWROFF_LO, 0);
gpu_write(pfdev, L2_PWROFF_LO, 0);
}
@@ -351,7 +349,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
return -ENODEV;
err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
- IRQF_SHARED, "gpu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9a1a72a748e7..7914b1570841 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -508,7 +508,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
return -ENODEV;
ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
- IRQF_SHARED, "job", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
if (ret) {
dev_err(pfdev->dev, "failed to request job irq");
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 5d75f8cf6477..ed28aeba6d59 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -640,9 +640,11 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
if (irq <= 0)
return -ENODEV;
- err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ err = devm_request_threaded_irq(pfdev->dev, irq,
+ panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
- IRQF_SHARED, "mmu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-mmu",
+ pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 09aeaffb7660..4f325c410b5d 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -19,6 +19,7 @@ static struct regmap *versatile_syscon_map;
* We detect the different syscon types from the compatible strings.
*/
enum versatile_clcd {
+ INTEGRATOR_IMPD1,
INTEGRATOR_CLCD_CM,
VERSATILE_CLCD,
REALVIEW_CLCD_EB,
@@ -65,6 +66,14 @@ static const struct of_device_id versatile_clcd_of_match[] = {
{},
};
+static const struct of_device_id impd1_clcd_of_match[] = {
+ {
+ .compatible = "arm,im-pd1-syscon",
+ .data = (void *)INTEGRATOR_IMPD1,
+ },
+ {},
+};
+
/*
* Core module CLCD control on the Integrator/CP, bits
* 8 thru 19 of the CM_CONTROL register controls a bunch
@@ -125,6 +134,36 @@ static void pl111_integrator_enable(struct drm_device *drm, u32 format)
val);
}
+#define IMPD1_CTRL_OFFSET 0x18
+#define IMPD1_CTRL_DISP_LCD (0 << 0)
+#define IMPD1_CTRL_DISP_VGA (1 << 0)
+#define IMPD1_CTRL_DISP_LCD1 (2 << 0)
+#define IMPD1_CTRL_DISP_ENABLE (1 << 2)
+#define IMPD1_CTRL_DISP_MASK (7 << 0)
+
+static void pl111_impd1_enable(struct drm_device *drm, u32 format)
+{
+ u32 val;
+
+ dev_info(drm->dev, "enable IM-PD1 CLCD connectors\n");
+ val = IMPD1_CTRL_DISP_VGA | IMPD1_CTRL_DISP_ENABLE;
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ val);
+}
+
+static void pl111_impd1_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable IM-PD1 CLCD connectors\n");
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ 0);
+}
+
/*
* This configuration register in the Versatile and RealView
* family is uniformly present but appears more and more
@@ -271,6 +310,20 @@ static const struct pl111_variant_data pl110_integrator = {
};
/*
+ * The IM-PD1 variant is a PL110 with a bunch of broken, or not
+ * yet implemented features
+ */
+static const struct pl111_variant_data pl110_impd1 = {
+ .name = "PL110 IM-PD1",
+ .is_pl110 = true,
+ .broken_clockdivider = true,
+ .broken_vblank = true,
+ .formats = pl110_integrator_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
+ .fb_bpp = 16,
+};
+
+/*
* This is the in-between PL110 variant found in the ARM Versatile,
* supporting RGB565/BGR565
*/
@@ -322,8 +375,21 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
/* Non-ARM reference designs, just bail out */
return 0;
}
+
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ /*
+ * On the Integrator, check if we should use the IM-PD1 instead,
+ * if we find it, it will take precedence. This is on the Integrator/AP
+ * which only has this option for PL110 graphics.
+ */
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
+ &clcd_id);
+ if (np)
+ versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ }
+
/* Versatile Express special handling */
if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
struct platform_device *pdev;
@@ -367,6 +433,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
priv->variant_display_enable = pl111_integrator_enable;
dev_info(dev, "set up callbacks for Integrator PL110\n");
break;
+ case INTEGRATOR_IMPD1:
+ versatile_syscon_map = map;
+ priv->variant = &pl110_impd1;
+ priv->variant_display_enable = pl111_impd1_enable;
+ priv->variant_display_disable = pl111_impd1_disable;
+ dev_info(dev, "set up callbacks for IM-PD1 PL110\n");
+ break;
case VERSATILE_CLCD:
versatile_syscon_map = map;
/* This can do RGB565 with external PLD */
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index ef09dc6bc635..05863b253d68 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -36,7 +36,7 @@ static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
struct ring {
struct qxl_ring_header header;
- uint8_t elements[0];
+ uint8_t elements[];
};
struct qxl_ring {
@@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
return ret;
ret = qxl_release_reserve_list(release, true);
- if (ret)
+ if (ret) {
+ qxl_release_free(qdev, release);
return ret;
-
+ }
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
/* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 16d73b22f3f5..91f398d51cfa 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -31,7 +31,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
+#include <drm/drm_simple_kms_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -372,19 +372,6 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
qxl_crtc_update_monitors_config(crtc, "flush");
}
@@ -523,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
return ret;
@@ -665,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
if (old_cursor_bo != NULL)
qxl_bo_unpin(old_cursor_bo);
@@ -713,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
}
static void qxl_update_dumb_head(struct qxl_device *qdev,
@@ -1021,9 +1008,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
return &qxl_output->enc;
}
-static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
-};
-
static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
.get_modes = qxl_conn_get_modes,
.mode_valid = qxl_conn_mode_valid,
@@ -1073,15 +1057,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static void qxl_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs qxl_enc_funcs = {
- .destroy = qxl_enc_destroy,
-};
-
static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
{
if (qdev->hotplug_mode_update_property)
@@ -1100,6 +1075,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ int ret;
qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
if (!qxl_output)
@@ -1112,15 +1088,19 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_connector_init(dev, &qxl_output->base,
&qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
- drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ ret = drm_simple_encoder_init(dev, &qxl_output->enc,
+ DRM_MODE_ENCODER_VIRTUAL);
+ if (ret) {
+ drm_err(dev, "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ goto err_drm_connector_cleanup;
+ }
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
- drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
drm_object_attach_property(&connector->base,
@@ -1130,6 +1110,11 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
return 0;
+
+err_drm_connector_cleanup:
+ drm_connector_cleanup(&qxl_output->base);
+ kfree(qxl_output);
+ return ret;
}
static struct drm_framebuffer *
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 5bebf1ea1c5d..3599db096973 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
goto out_release_backoff;
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
- if (!rects)
+ if (!rects) {
+ ret = -EINVAL;
goto out_release_backoff;
-
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
}
qxl_bo_kunmap(clips_bo);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_release_backoff:
if (ret)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1d601f57a6ba..4fda3f9b29f4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -34,6 +34,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper.h>
@@ -132,21 +133,30 @@ free_dev:
return ret;
}
+static void qxl_drm_release(struct drm_device *dev)
+{
+ struct qxl_device *qdev = dev->dev_private;
+
+ /*
+ * TODO: qxl_device_fini() call should be in qxl_pci_remove(),
+ * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ * non-trivial though.
+ */
+ qxl_modeset_fini(qdev);
+ qxl_device_fini(qdev);
+ dev->dev_private = NULL;
+ kfree(qdev);
+}
+
static void
qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct qxl_device *qdev = dev->dev_private;
drm_dev_unregister(dev);
-
- qxl_modeset_fini(qdev);
- qxl_device_fini(qdev);
+ drm_atomic_helper_shutdown(dev);
if (is_vga(pdev))
vga_put(pdev, VGA_RSRC_LEGACY_IO);
-
- dev->dev_private = NULL;
- kfree(qdev);
drm_dev_put(dev);
}
@@ -279,6 +289,8 @@ static struct drm_driver qxl_driver = {
.major = 0,
.minor = 1,
.patchlevel = 0,
+
+ .release = qxl_drm_release,
};
static int __init qxl_init(void)
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 43688ecdd8a0..60ab7151b84d 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
break;
default:
DRM_ERROR("unsupported image bit depth\n");
- return -EINVAL; /* TODO: cleanup */
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+ return -EINVAL;
}
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 8117a45b3610..72f3f1bbb40c 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
apply_surf_reloc(qdev, &reloc_info[i]);
}
+ qxl_release_fence_buffer_objects(release);
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
- if (ret)
- qxl_release_backoff_reserve_list(release);
- else
- qxl_release_fence_buffer_objects(release);
out_free_bos:
out_free_release:
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bfc1631093e9..70b20ee4741a 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -299,12 +299,12 @@ void qxl_device_fini(struct qxl_device *qdev)
{
qxl_bo_unref(&qdev->current_release_bo[0]);
qxl_bo_unref(&qdev->current_release_bo[1]);
+ qxl_gem_fini(qdev);
+ qxl_bo_fini(qdev);
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
- qxl_gem_fini(qdev);
- qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
iounmap(qdev->ram_header);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 16a5e903533d..62a5e424971b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,11 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -256,7 +251,6 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
index 403eb3a5891f..9c1a94153983 100644
--- a/drivers/gpu/drm/radeon/.gitignore
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mkregtable
*_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index be583695427a..91811757104c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2231,6 +2231,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.disable = atombios_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c42f73fad3e3..bb29cf02974d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
static bool radeon_read_platform_bios(struct radeon_device *rdev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = rdev->pdev->rom;
+ size_t romlen = rdev->pdev->romlen;
+ void __iomem *bios;
rdev->bios = NULL;
- bios = pci_platform_rom(rdev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ rdev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!rdev->bios)
return false;
- }
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- return false;
- }
+
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
+
+ memcpy_fromio(rdev->bios, bios, romlen);
+ iounmap(bios);
+
+ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa)
+ goto free_bios;
return true;
+free_bios:
+ kfree(rdev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a522e092038b..266e3cbbd09b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1263,7 +1263,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d07c7db0c815..35db79a168bf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -45,6 +45,10 @@
#include "atom.h"
#include "radeon.h"
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc);
+int radeon_enable_vblank_kms(struct drm_crtc *crtc);
+void radeon_disable_vblank_kms(struct drm_crtc *crtc);
+
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -460,7 +464,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(!ASIC_IS_AVIVO(rdev) ||
((int) (work->target_vblank -
- dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+ crtc->funcs->get_vblank_counter(crtc)) > 0)))
usleep_range(1000, 2000);
/* We borrow the event spin lock for protecting flip_status */
@@ -576,7 +580,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
}
work->base = base;
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- dev->driver->get_vblank_counter(dev, work->crtc_id);
+ crtc->funcs->get_vblank_counter(crtc);
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -668,6 +672,10 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = {
.set_config = radeon_crtc_set_config,
.destroy = radeon_crtc_destroy,
.page_flip_target = radeon_crtc_page_flip_target,
+ .get_vblank_counter = radeon_get_vblank_counter_kms,
+ .enable_vblank = radeon_enable_vblank_kms,
+ .disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -1973,3 +1981,16 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret;
}
+
+bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 28eef9282874..008308780443 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -301,35 +301,8 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
return connector;
}
-static void radeon_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_fb_add_connector(rdev, connector);
-
- drm_connector_register(connector);
-}
-
-static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct radeon_device *rdev = dev->dev_private;
-
- drm_connector_unregister(connector);
- radeon_fb_remove_connector(rdev, connector);
- drm_connector_cleanup(connector);
-
- kfree(connector);
- DRM_DEBUG_KMS("\n");
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
- .register_connector = radeon_dp_register_mst_connector,
- .destroy_connector = radeon_dp_destroy_mst_connector,
};
static struct
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8696af1ee14d..59f8186a2415 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -120,9 +120,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze);
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -602,16 +599,6 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-static bool
-radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_GEM | DRIVER_RENDER,
@@ -620,11 +607,6 @@ static struct drm_driver kms_driver = {
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
.unload = radeon_driver_unload_kms,
- .get_vblank_counter = radeon_get_vblank_counter_kms,
- .enable_vblank = radeon_enable_vblank_kms,
- .disable_vblank = radeon_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = radeon_get_crtc_scanout_position,
.irq_preinstall = radeon_driver_irq_preinstall_kms,
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ec0b7d6c994d..cf3156a65fc1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -354,15 +354,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
&radeon_fb_helper_funcs);
- ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
- RADEONFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
@@ -404,15 +399,3 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
return true;
return false;
}
-
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
-
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dd2f19b8022b..58176db85952 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -745,14 +745,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/**
* radeon_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
int vpos, hpos, stat;
u32 count;
struct radeon_device *rdev = dev->dev_private;
@@ -814,25 +815,26 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* radeon_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+int radeon_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
int r;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = true;
+ rdev->irq.crtc_vblank_int[pipe] = true;
r = radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
return r;
@@ -841,23 +843,24 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
/**
* radeon_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+void radeon_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = false;
+ rdev->irq.crtc_vblank_int[pipe] = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index a1985a552794..8817fd033cd0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1111,7 +1111,8 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
- .disable = radeon_crtc_disable
+ .disable = radeon_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 96565171d13e..c7f223743d46 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -880,6 +880,12 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+extern bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
@@ -980,9 +986,6 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
-
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index b3380ffab4c2..5d50c9edbe80 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -66,11 +66,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
-static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -774,7 +769,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
- .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 05e8b4d0af3f..2cb85dbe728f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2979,7 +2979,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0xC3) ||
(rdev->pdev->device == 0x6664) ||
(rdev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3cd83a030a04..c07c6a88aff0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -121,7 +121,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
* Attach the bridge to the encoder. The bridge will create the
* connector.
*/
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8ffa4fbbdeb3..ab0d49618cf9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -23,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "rcar_lvds.h"
@@ -590,8 +591,9 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_crtc *crtc;
@@ -603,7 +605,7 @@ static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -618,7 +620,8 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
/* Disable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- lvds->companion->funcs->atomic_disable(lvds->companion, state);
+ lvds->companion->funcs->atomic_disable(lvds->companion,
+ old_bridge_state);
clk_disable_unprepare(lvds->clocks.mod);
}
@@ -641,7 +644,8 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static int rcar_lvds_attach(struct drm_bridge *bridge)
+static int rcar_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
@@ -651,7 +655,12 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
- bridge);
+ bridge, flags);
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
/* Otherwise if we have a panel, create a connector. */
if (!lvds->panel)
@@ -682,6 +691,9 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.attach = rcar_lvds_attach,
.detach = rcar_lvds_detach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = rcar_lvds_atomic_enable,
.atomic_disable = rcar_lvds_atomic_disable,
.mode_fixup = rcar_lvds_mode_fixup,
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index f38f5e113c6b..ce98c08aa8b4 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -325,15 +325,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
- const struct rockchip_dp_chip_data *dp_data;
struct drm_device *drm_dev = data;
int ret;
- dp_data = of_device_get_match_data(dev);
- if (!dp_data)
- return -ENODEV;
-
- dp->data = dp_data;
dp->drm_dev = drm_dev;
ret = rockchip_dp_drm_create_encoder(dp);
@@ -344,16 +338,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.encoder = &dp->encoder;
- dp->plat_data.dev_type = dp->data->chip_type;
- dp->plat_data.power_on_start = rockchip_dp_poweron_start;
- dp->plat_data.power_off = rockchip_dp_powerdown;
- dp->plat_data.get_modes = rockchip_dp_get_modes;
-
- dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
- if (IS_ERR(dp->adp)) {
- ret = PTR_ERR(dp->adp);
+ ret = analogix_dp_bind(dp->adp, drm_dev);
+ if (ret)
goto err_cleanup_encoder;
- }
return 0;
err_cleanup_encoder:
@@ -368,8 +355,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
analogix_dp_unbind(dp->adp);
dp->encoder.funcs->destroy(&dp->encoder);
-
- dp->adp = ERR_PTR(-ENODEV);
}
static const struct component_ops rockchip_dp_component_ops = {
@@ -380,10 +365,15 @@ static const struct component_ops rockchip_dp_component_ops = {
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct rockchip_dp_chip_data *dp_data;
struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
int ret;
+ dp_data = of_device_get_match_data(dev);
+ if (!dp_data)
+ return -ENODEV;
+
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret < 0)
return ret;
@@ -394,7 +384,12 @@ static int rockchip_dp_probe(struct platform_device *pdev)
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
+ dp->data = dp_data;
dp->plat_data.panel = panel;
+ dp->plat_data.dev_type = dp->data->chip_type;
+ dp->plat_data.power_on_start = rockchip_dp_poweron_start;
+ dp->plat_data.power_off = rockchip_dp_powerdown;
+ dp->plat_data.get_modes = rockchip_dp_get_modes;
ret = rockchip_dp_of_probe(dp);
if (ret < 0)
@@ -402,12 +397,19 @@ static int rockchip_dp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dp);
+ dp->adp = analogix_dp_probe(dev, &dp->plat_data);
+ if (IS_ERR(dp->adp))
+ return PTR_ERR(dp->adp);
+
return component_add(dev, &rockchip_dp_component_ops);
}
static int rockchip_dp_remove(struct platform_device *pdev)
{
+ struct rockchip_dp_device *dp = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &rockchip_dp_component_ops);
+ analogix_dp_remove(dp->adp);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 521fe42ac5e2..2fdc455c4ad7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -124,7 +124,7 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize drm fb helper - %d.\n",
@@ -132,13 +132,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
return ret;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to add connectors - %d.\n", ret);
- goto err_drm_fb_helper_fini;
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7582d0e6a60a..0d1884684dcb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d04b3492bdac..cecb2cc781f5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -724,7 +724,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_HELPER_NO_SCALING;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0b3d18c457b2..cc672620d6e0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -328,7 +328,7 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
{
int act_height;
- act_height = (src_h + vskiplines - 1) / vskiplines;
+ act_height = DIV_ROUND_UP(src_h, vskiplines);
if (act_height == dst_h)
return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f25a36743cbd..449a62908d21 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -646,7 +646,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index ae730275a34f..90784781e515 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -98,7 +98,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
if (of_property_read_u32(endpoint, "reg", &endpoint_id))
endpoint_id = 0;
- if (rockchip_drm_endpoint_is_subdriver(endpoint) > 0)
+ /* if subdriver (> 0) or error case (< 0), ignore entry */
+ if (rockchip_drm_endpoint_is_subdriver(endpoint) != 0)
continue;
child_count++;
@@ -144,7 +145,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->bridge = bridge;
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index d79086498aff..877ce9b127f1 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job,
__entry->job_count, __entry->hw_job_count)
);
+TRACE_EVENT(drm_run_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity),
+ TP_STRUCT__entry(
+ __field(struct drm_sched_entity *, entity)
+ __field(struct dma_fence *, fence)
+ __field(const char *, name)
+ __field(uint64_t, id)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = entity;
+ __entry->id = sched_job->id;
+ __entry->fence = &sched_job->s_fence->finished;
+ __entry->name = sched_job->sched->name;
+ __entry->job_count = spsc_queue_count(&entity->job_queue);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __entry->name,
+ __entry->job_count, __entry->hw_job_count)
+);
+
TRACE_EVENT(drm_sched_process_job,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 63bccd201b97..c803e14eed91 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -84,6 +84,24 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
+ * drm_sched_entity_modify_sched - Modify sched of an entity
+ * @entity: scheduler entity to init
+ * @sched_list: the list of new drm scheds which will replace
+ * existing entity->sched_list
+ * @num_sched_list: number of drm sched in sched_list
+ */
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ WARN_ON(!num_sched_list || !sched_list);
+
+ entity->sched_list = sched_list;
+ entity->num_sched_list = num_sched_list;
+}
+EXPORT_SYMBOL(drm_sched_entity_modify_sched);
+
+/**
* drm_sched_entity_is_idle - Check if entity is idle
*
* @entity: scheduler entity
@@ -120,38 +138,6 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
}
/**
- * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- *
- * @entity: scheduler entity
- *
- * Return the pointer to the rq with least load.
- */
-static struct drm_sched_rq *
-drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
-{
- struct drm_sched_rq *rq = NULL;
- unsigned int min_score = UINT_MAX, num_score;
- int i;
-
- for (i = 0; i < entity->num_sched_list; ++i) {
- struct drm_gpu_scheduler *sched = entity->sched_list[i];
-
- if (!entity->sched_list[i]->ready) {
- DRM_WARN("sched%s is not ready, skipping", sched->name);
- continue;
- }
-
- num_score = atomic_read(&sched->score);
- if (num_score < min_score) {
- min_score = num_score;
- rq = &entity->sched_list[i]->sched_rq[entity->priority];
- }
- }
-
- return rq;
-}
-
-/**
* drm_sched_entity_flush - Flush a context entity
*
* @entity: scheduler entity
@@ -461,6 +447,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
+ struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
@@ -471,7 +458,8 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
spin_lock(&entity->rq_lock);
- rq = drm_sched_entity_get_free_sched(entity);
+ sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+ rq = sched ? &sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
@@ -498,7 +486,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->score);
+ atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 60c4c6a1aac6..2f319102ae9f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,7 +92,6 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -111,7 +110,6 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -222,8 +220,7 @@ EXPORT_SYMBOL(drm_sched_fault);
*
* Suspend the delayed work timeout for the scheduler. This is done by
* modifying the delayed work timeout to an arbitrary large value,
- * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
- * called from an IRQ context.
+ * MAX_SCHEDULE_TIMEOUT in this case.
*
* Returns the timeout remaining
*
@@ -252,46 +249,41 @@ EXPORT_SYMBOL(drm_sched_suspend_timeout);
* @sched: scheduler instance for which to resume the timeout
* @remaining: remaining timeout
*
- * Resume the delayed work timeout for the scheduler. Note that
- * this function can be called from an IRQ context.
+ * Resume the delayed work timeout for the scheduler.
*/
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
if (list_empty(&sched->ring_mirror_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_resume_timeout);
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -302,7 +294,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* is parked at which point it's safe.
*/
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -315,12 +307,12 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched->free_guilty = false;
}
} else {
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -383,7 +375,6 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
kthread_park(sched->thread);
@@ -417,9 +408,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* remove job from ring_mirror_list.
* Locking here is for concurrent resume timeout
*/
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
/*
* Wait for job's HW fence callback to finish using s_job
@@ -462,7 +453,6 @@ EXPORT_SYMBOL(drm_sched_stop);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
int r;
/*
@@ -491,9 +481,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
}
if (full_recovery) {
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
kthread_unpark(sched->thread);
@@ -657,7 +647,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
+ atomic_dec(&sched->num_jobs);
trace_drm_sched_process_job(s_fence);
@@ -679,7 +669,6 @@ static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- unsigned long flags;
/*
* Don't destroy jobs while the timeout worker is running OR thread
@@ -687,10 +676,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
*/
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) ||
- __kthread_should_park(sched->thread))
+ kthread_should_park())
return NULL;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -704,12 +693,48 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
drm_sched_start_timeout(sched);
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
return job;
}
/**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+ int i;
+ unsigned int min_jobs = UINT_MAX, num_jobs;
+
+ for (i = 0; i < num_sched_list; ++i) {
+ sched = sched_list[i];
+
+ if (!sched->ready) {
+ DRM_WARN("scheduler %s is not ready, skipping",
+ sched->name);
+ continue;
+ }
+
+ num_jobs = atomic_read(&sched->num_jobs);
+ if (num_jobs < min_jobs) {
+ min_jobs = num_jobs;
+ picked_sched = sched;
+ }
+ }
+
+ return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
+/**
* drm_sched_blocked - check if the scheduler is blocked
*
* @sched: scheduler instance
@@ -775,6 +800,7 @@ static int drm_sched_main(void *param)
atomic_inc(&sched->hw_rq_count);
drm_sched_job_begin(sched_job);
+ trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
drm_sched_fence_scheduled(s_fence);
@@ -834,7 +860,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->score, 0);
+ atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index dc64fbfc4e61..49e6cb8f5836 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -279,12 +279,13 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
return 0;
}
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int sti_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
- struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
@@ -297,8 +298,10 @@ int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
+static void sti_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *drm_dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
@@ -330,6 +333,8 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.late_register = sti_crtc_late_register,
+ .enable_vblank = sti_crtc_enable_vblank,
+ .disable_vblank = sti_crtc_disable_vblank,
};
bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index df489ab14e2b..1132b4586712 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -15,8 +15,6 @@ struct sti_mixer;
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor);
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void sti_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data);
bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index a39fc36f815b..50870d8cbb76 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -21,7 +21,6 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include "sti_crtc.h"
#include "sti_drv.h"
#include "sti_plane.h"
@@ -146,9 +145,6 @@ static struct drm_driver sti_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.fops = &sti_driver_fops,
- .enable_vblank = sti_crtc_enable_vblank,
- .disable_vblank = sti_crtc_disable_vblank,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index b2778ec1cdd7..3d04bfca21a0 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -467,7 +467,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
bridge->of_node = dvo->dev.of_node;
drm_bridge_add(bridge);
- err = drm_bridge_attach(encoder, bridge, NULL);
+ err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err) {
DRM_ERROR("Failed to attach bridge\n");
return err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 2bb32009d117..f3f28d79b0e4 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -701,7 +701,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hda;
bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 64ed102033c8..18eaf786ffa4 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1281,7 +1281,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hdmi;
bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 5a9f9aca8bc2..ea9fcbdc68b3 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
- .get_scanout_position = ltdc_crtc_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
};
static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 4b165635b2d4..2e1f2664495d 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -377,7 +377,9 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
- DRM_ERROR("Unable to get pll reference clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Unable to get pll reference clock: %d\n",
+ ret);
goto err_clk_get;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index c2815e8ae1da..df585fe64f61 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -636,38 +636,13 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
- .mode_valid = ltdc_crtc_mode_valid,
- .mode_fixup = ltdc_crtc_mode_fixup,
- .mode_set_nofb = ltdc_crtc_mode_set_nofb,
- .atomic_flush = ltdc_crtc_atomic_flush,
- .atomic_enable = ltdc_crtc_atomic_enable,
- .atomic_disable = ltdc_crtc_atomic_disable,
-};
-
-static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_set(ldev->regs, LTDC_IER, IER_LIE);
-
- return 0;
-}
-
-static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_clear(ldev->regs, LTDC_IER, IER_LIE);
-}
-
-bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool ltdc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *ddev = crtc->dev;
struct ltdc_device *ldev = ddev->dev_private;
int line, vactive_start, vactive_end, vtotal;
@@ -710,6 +685,39 @@ bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
return true;
}
+static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
+ .mode_valid = ltdc_crtc_mode_valid,
+ .mode_fixup = ltdc_crtc_mode_fixup,
+ .mode_set_nofb = ltdc_crtc_mode_set_nofb,
+ .atomic_flush = ltdc_crtc_atomic_flush,
+ .atomic_enable = ltdc_crtc_atomic_enable,
+ .atomic_disable = ltdc_crtc_atomic_disable,
+ .get_scanout_position = ltdc_crtc_get_scanout_position,
+};
+
+static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_crtc_state *state = crtc->state;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (state->enable)
+ reg_set(ldev->regs, LTDC_IER, IER_LIE);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+
+ DRM_DEBUG_DRIVER("\n");
+ reg_clear(ldev->regs, LTDC_IER, IER_LIE);
+}
+
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
@@ -719,6 +727,7 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
};
@@ -1100,7 +1109,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return -EINVAL;
@@ -1146,12 +1155,14 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
+ ldev->caps.nb_irq = 2;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 150000000;
+ ldev->caps.nb_irq = 4;
break;
default:
return -ENODEV;
@@ -1251,13 +1262,21 @@ int ltdc_load(struct drm_device *ddev)
reg_clear(ldev->regs, LTDC_IER,
IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
- for (i = 0; i < MAX_IRQ; i++) {
+ ret = ltdc_get_caps(ddev);
+ if (ret) {
+ DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
+ ldev->caps.hw_version);
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+
+ for (i = 0; i < ldev->caps.nb_irq; i++) {
irq = platform_get_irq(pdev, i);
- if (irq == -EPROBE_DEFER)
+ if (irq < 0) {
+ ret = irq;
goto err;
-
- if (irq < 0)
- continue;
+ }
ret = devm_request_threaded_irq(dev, irq, ltdc_irq,
ltdc_irq_thread, IRQF_ONESHOT,
@@ -1268,16 +1287,6 @@ int ltdc_load(struct drm_device *ddev)
}
}
-
- ret = ltdc_get_caps(ddev);
- if (ret) {
- DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
- ldev->caps.hw_version);
- goto err;
- }
-
- DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
-
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index a1ad0ae3b006..f153b908c70e 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -19,6 +19,7 @@ struct ltdc_caps {
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
int pad_max_freq_hz; /* max frequency supported by pad */
+ int nb_irq; /* number of hardware interrupts */
};
#define LTDC_MAX_LAYER 4
@@ -39,11 +40,6 @@ struct ltdc_device {
struct drm_atomic_state *suspend_state;
};
-bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
void ltdc_suspend(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 65b7a8739666..26e5c7ceb8ff 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -156,7 +156,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index b27f16af50f5..3b23d5be3cf3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -253,7 +253,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (rgb->bridge) {
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c81cdce6ed55..624437b27cdc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -114,46 +114,71 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
}
}
+static void sun4i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_CK_EN |
+ SUN4I_TCON0_LVDS_ANA0_REG_V |
+ SUN4I_TCON0_LVDS_ANA0_REG_C |
+ SUN4I_TCON0_LVDS_ANA0_EN_MB |
+ SUN4I_TCON0_LVDS_ANA0_PD |
+ SUN4I_TCON0_LVDS_ANA0_DCHS);
+
+ udelay(2); /* delay at least 1200 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_INIT,
+ SUN4I_TCON0_LVDS_ANA1_INIT);
+ udelay(1); /* delay at least 120 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE);
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB);
+}
+
+static void sun6i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ u8 val;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_C(2) |
+ SUN6I_TCON0_LVDS_ANA0_V(3) |
+ SUN6I_TCON0_LVDS_ANA0_PD(2) |
+ SUN6I_TCON0_LVDS_ANA0_EN_LDO);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
+
+ if (sun4i_tcon_get_pixel_depth(encoder) == 18)
+ val = 7;
+ else
+ val = 0xf;
+
+ regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+}
+
static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
if (enabled) {
- u8 val;
-
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN,
SUN4I_TCON0_LVDS_IF_EN);
-
- /*
- * As their name suggest, these values only apply to the A31
- * and later SoCs. We'll have to rework this when merging
- * support for the older SoCs.
- */
- regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_C(2) |
- SUN6I_TCON0_LVDS_ANA0_V(3) |
- SUN6I_TCON0_LVDS_ANA0_PD(2) |
- SUN6I_TCON0_LVDS_ANA0_EN_LDO);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_MB,
- SUN6I_TCON0_LVDS_ANA0_EN_MB);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
-
- if (sun4i_tcon_get_pixel_depth(encoder) == 18)
- val = 7;
- else
- val = 0xf;
-
- regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+ if (tcon->quirks->setup_lvds_phy)
+ tcon->quirks->setup_lvds_phy(tcon, encoder);
} else {
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN, 0);
@@ -1453,6 +1478,16 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.dclk_min_div = 1,
};
+static const struct sun4i_tcon_quirks sun7i_a20_tcon0_quirks = {
+ .supports_lvds = true,
+ .has_channel_0 = true,
+ .has_channel_1 = true,
+ .dclk_min_div = 4,
+ /* Same display pipeline structure as A10 */
+ .set_mux = sun4i_a10_tcon_set_mux,
+ .setup_lvds_phy = sun4i_tcon_setup_lvds_phy,
+};
+
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
@@ -1465,12 +1500,15 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
+ .supports_lvds = true,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1505,6 +1543,8 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_tcon0_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index a62ec826ae71..cfbf4e6c1679 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -193,6 +193,13 @@
#define SUN4I_TCON_MUX_CTRL_REG 0x200
#define SUN4I_TCON0_LVDS_ANA0_REG 0x220
+#define SUN4I_TCON0_LVDS_ANA0_DCHS BIT(16)
+#define SUN4I_TCON0_LVDS_ANA0_PD (BIT(20) | BIT(21))
+#define SUN4I_TCON0_LVDS_ANA0_EN_MB BIT(22)
+#define SUN4I_TCON0_LVDS_ANA0_REG_C (BIT(24) | BIT(25))
+#define SUN4I_TCON0_LVDS_ANA0_REG_V (BIT(26) | BIT(27))
+#define SUN4I_TCON0_LVDS_ANA0_CK_EN (BIT(29) | BIT(28))
+
#define SUN6I_TCON0_LVDS_ANA0_EN_MB BIT(31)
#define SUN6I_TCON0_LVDS_ANA0_EN_LDO BIT(30)
#define SUN6I_TCON0_LVDS_ANA0_EN_DRVC BIT(24)
@@ -201,6 +208,10 @@
#define SUN6I_TCON0_LVDS_ANA0_V(x) (((x) & 3) << 8)
#define SUN6I_TCON0_LVDS_ANA0_PD(x) (((x) & 3) << 4)
+#define SUN4I_TCON0_LVDS_ANA1_REG 0x224
+#define SUN4I_TCON0_LVDS_ANA1_INIT (0x1f << 26 | 0x1f << 10)
+#define SUN4I_TCON0_LVDS_ANA1_UPDATE (0x1f << 16 | 0x1f << 00)
+
#define SUN4I_TCON1_FILL_CTL_REG 0x300
#define SUN4I_TCON1_FILL_BEG0_REG 0x304
#define SUN4I_TCON1_FILL_END0_REG 0x308
@@ -228,6 +239,9 @@ struct sun4i_tcon_quirks {
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
+ /* handler for LVDS setup routine */
+ void (*setup_lvds_phy)(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder);
};
struct sun4i_tcon {
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a75fcb113172..3eb89f1eb0e1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -14,7 +14,6 @@
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -27,7 +26,6 @@
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
-#include "sun4i_drv.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
@@ -719,13 +717,34 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
struct mipi_dsi_device *device = dsi->device;
- union phy_configure_opts opts = { 0 };
+ union phy_configure_opts opts = { };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;
+ int err;
DRM_DEBUG_DRIVER("Enabling DSI output\n");
- pm_runtime_get_sync(dsi->dev);
+ err = regulator_enable(dsi->regulator);
+ if (err)
+ dev_warn(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+
+ reset_control_deassert(dsi->reset);
+ clk_prepare_enable(dsi->mod_clk);
+
+ /*
+ * Enable the DSI block.
+ */
+ regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+ SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
+
+ sun6i_dsi_inst_init(dsi, dsi->device);
+
+ regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
delay = sun6i_dsi_get_video_start_delay(dsi, mode);
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
@@ -749,7 +768,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
phy_configure(dsi->dphy, &opts);
phy_power_on(dsi->dphy);
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_prepare(dsi->panel);
/*
@@ -764,7 +783,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
* ordering on the panels I've tested it with, so I guess this
* will do for now, until that IP is better understood.
*/
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_enable(dsi->panel);
sun6i_dsi_start(dsi, DSI_START_HSC);
@@ -780,7 +799,7 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling DSI output\n");
- if (!IS_ERR(dsi->panel)) {
+ if (dsi->panel) {
drm_panel_disable(dsi->panel);
drm_panel_unprepare(dsi->panel);
}
@@ -788,7 +807,9 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
phy_power_off(dsi->dphy);
phy_exit(dsi->dphy);
- pm_runtime_put(dsi->dev);
+ clk_disable_unprepare(dsi->mod_clk);
+ reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
}
static int sun6i_dsi_get_modes(struct drm_connector *connector)
@@ -805,7 +826,10 @@ static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
static enum drm_connector_status
sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_connected;
+ struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
+
+ return dsi->panel ? connector_status_connected :
+ connector_status_disconnected;
}
static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
@@ -942,11 +966,18 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+ if (!dsi->drm || !dsi->drm->registered)
+ return -EPROBE_DEFER;
+
+ dsi->panel = panel;
dsi->device = device;
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (IS_ERR(dsi->panel))
- return PTR_ERR(dsi->panel);
+
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ drm_kms_helper_hotplug_event(dsi->drm);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -957,10 +988,14 @@ static int sun6i_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = dsi->panel;
dsi->panel = NULL;
dsi->device = NULL;
+ drm_panel_detach(panel);
+ drm_kms_helper_hotplug_event(dsi->drm);
+
return 0;
}
@@ -1022,15 +1057,9 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
- struct sun4i_drv *drv = drm->dev_private;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
int ret;
- if (!dsi->panel)
- return -EPROBE_DEFER;
-
- dsi->drv = drv;
-
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
ret = drm_encoder_init(drm,
@@ -1056,7 +1085,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
}
drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
- drm_panel_attach(dsi->panel, &dsi->connector);
+
+ dsi->drm = drm;
return 0;
@@ -1070,7 +1100,7 @@ static void sun6i_dsi_unbind(struct device *dev, struct device *master,
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- drm_panel_detach(dsi->panel);
+ dsi->drm = NULL;
}
static const struct component_ops sun6i_dsi_ops = {
@@ -1157,12 +1187,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
goto err_unprotect_clk;
}
- pm_runtime_enable(dev);
-
ret = mipi_dsi_host_register(&dsi->host);
if (ret) {
dev_err(dev, "Couldn't register MIPI-DSI host\n");
- goto err_pm_disable;
+ goto err_unprotect_clk;
}
ret = component_add(&pdev->dev, &sun6i_dsi_ops);
@@ -1175,8 +1203,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
err_remove_dsi_host:
mipi_dsi_host_unregister(&dsi->host);
-err_pm_disable:
- pm_runtime_disable(dev);
err_unprotect_clk:
clk_rate_exclusive_put(dsi->mod_clk);
err_attach_clk:
@@ -1192,7 +1218,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &sun6i_dsi_ops);
mipi_dsi_host_unregister(&dsi->host);
- pm_runtime_disable(dev);
clk_rate_exclusive_put(dsi->mod_clk);
if (!IS_ERR(dsi->bus_clk))
@@ -1201,59 +1226,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- err = regulator_enable(dsi->regulator);
- if (err) {
- dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
- return err;
- }
-
- reset_control_deassert(dsi->reset);
- clk_prepare_enable(dsi->mod_clk);
-
- /*
- * Enable the DSI block.
- *
- * Some part of it can only be done once we get a number of
- * lanes, see sun6i_dsi_inst_init
- */
- regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
- SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
-
- if (dsi->device)
- sun6i_dsi_inst_init(dsi, dsi->device);
-
- regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
-
- return 0;
-}
-
-static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(dsi->mod_clk);
- reset_control_assert(dsi->reset);
- regulator_disable(dsi->regulator);
-
- return 0;
-}
-
-static const struct dev_pm_ops sun6i_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
- sun6i_dsi_runtime_resume,
- NULL)
-};
-
static const struct of_device_id sun6i_dsi_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
{ .compatible = "allwinner,sun50i-a64-mipi-dsi" },
@@ -1267,7 +1239,6 @@ static struct platform_driver sun6i_dsi_platform_driver = {
.driver = {
.name = "sun6i-mipi-dsi",
.of_match_table = sun6i_dsi_of_table,
- .pm = &sun6i_dsi_pm_ops,
},
};
module_platform_driver(sun6i_dsi_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 3f4846f581ef..c863900ae3b4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -28,8 +28,8 @@ struct sun6i_dsi {
struct phy *dphy;
struct device *dev;
- struct sun4i_drv *drv;
struct mipi_dsi_device *device;
+ struct drm_device *drm;
struct drm_panel *panel;
};
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 7c70fd31a4c2..1a7b08f35776 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2503,7 +2503,6 @@ static int tegra_dc_couple(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -2560,8 +2559,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
tegra_powergate_power_off(dc->powergate);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
@@ -2573,7 +2571,13 @@ static int tegra_dc_probe(struct platform_device *pdev)
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
- dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dc->dev, "failed to probe RGB output: %d\n",
+ err);
return err;
}
@@ -2588,10 +2592,16 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto disable_pm;
}
return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+ tegra_dc_rgb_remove(dc);
+
+ return err;
}
static int tegra_dc_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bd268028fb3d..583cd6e0ae27 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static bool host1x_drm_wants_iommu(struct host1x_device *dev)
{
+ struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
struct iommu_domain *domain;
/*
@@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
* sufficient and whether or not the host1x is attached to an IOMMU
* doesn't matter.
*/
- if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+ if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
return true;
return domain != NULL;
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 84f0e01e3428..b8a328f53862 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -314,19 +314,13 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
struct drm_device *drm = fbdev->base.dev;
int err;
- err = drm_fb_helper_init(drm, &fbdev->base, max_connectors);
+ err = drm_fb_helper_init(drm, &fbdev->base);
if (err < 0) {
dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
err);
return err;
}
- err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
- if (err < 0) {
- dev_err(drm->dev, "failed to add connectors: %d\n", err);
- goto fini;
- }
-
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
dev_err(drm->dev, "failed to set initial configuration: %d\n",
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6f117628f257..38252c0f068d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1648,6 +1648,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
static int tegra_hdmi_probe(struct platform_device *pdev)
{
+ const char *level = KERN_ERR;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
@@ -1686,21 +1687,36 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
}
hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
- if (IS_ERR(hdmi->hdmi)) {
- dev_err(&pdev->dev, "failed to get HDMI regulator\n");
- return PTR_ERR(hdmi->hdmi);
+ err = PTR_ERR_OR_ZERO(hdmi->hdmi);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get HDMI regulator: %d\n", err);
+ return err;
}
hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
- if (IS_ERR(hdmi->pll)) {
- dev_err(&pdev->dev, "failed to get PLL regulator\n");
- return PTR_ERR(hdmi->pll);
+ err = PTR_ERR_OR_ZERO(hdmi->pll);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get PLL regulator: %d\n", err);
+ return err;
}
hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(hdmi->vdd)) {
- dev_err(&pdev->dev, "failed to get VDD regulator\n");
- return PTR_ERR(hdmi->vdd);
+ err = PTR_ERR_OR_ZERO(hdmi->vdd);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get VDD regulator: %d\n", err);
+ return err;
}
hdmi->output.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
new file mode 100644
index 000000000000..f790a5215302
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -0,0 +1,14 @@
+config DRM_TIDSS
+ tristate "DRM Support for TI Keystone"
+ depends on DRM && OF
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ The TI Keystone family SoCs introduced a new generation of
+ Display SubSystem. There is currently three Keystone family
+ SoCs released with DSS. Each with somewhat different version
+ of it. The SoCs are 66AK2Gx, AM65x, and J721E. Set this to Y
+ or M to add display support for TI Keystone family
+ platforms.
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
new file mode 100644
index 000000000000..312645271014
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+tidss-y := tidss_crtc.o \
+ tidss_drv.o \
+ tidss_encoder.o \
+ tidss_kms.o \
+ tidss_irq.o \
+ tidss_plane.o \
+ tidss_scale_coefs.o \
+ tidss_dispc.o
+
+obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
new file mode 100644
index 000000000000..3221a707e073
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* Page flip and frame done IRQs */
+
+static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
+{
+ struct drm_device *ddev = tcrtc->crtc.dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+ bool busy;
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ /*
+ * New settings are taken into use at VFP, and GO bit is cleared at
+ * the same time. This happens before the vertical blank interrupt.
+ * So there is a small change that the driver sets GO bit after VFP, but
+ * before vblank, and we have to check for that case here.
+ */
+ busy = dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport);
+ if (busy) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ event = tcrtc->event;
+ tcrtc->event = NULL;
+
+ if (!event) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ drm_crtc_send_vblank_event(&tcrtc->crtc, event);
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&tcrtc->crtc);
+}
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ drm_crtc_handle_vblank(crtc);
+
+ tidss_crtc_finish_page_flip(tcrtc);
+}
+
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ complete(&tcrtc->framedone_completion);
+}
+
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ dev_err_ratelimited(crtc->dev->dev, "CRTC%u SYNC LOST: (irq %llx)\n",
+ tcrtc->hw_videoport, irqstatus);
+}
+
+/* drm_crtc_helper_funcs */
+
+static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct dispc_device *dispc = tidss->dispc;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ const struct drm_display_mode *mode;
+ enum drm_mode_status ok;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->enable)
+ return 0;
+
+ mode = &state->adjusted_mode;
+
+ ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
+ if (ok != MODE_OK) {
+ dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n",
+ __func__, mode->hdisplay, mode->vdisplay, mode->clock);
+ return -EINVAL;
+ }
+
+ return dispc_vp_bus_check(dispc, hw_videoport, state);
+}
+
+/*
+ * This needs all affected planes to be present in the atomic
+ * state. The untouched planes are added to the state in
+ * tidss_atomic_check().
+ */
+static void tidss_crtc_position_planes(struct tidss_device *tidss,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state,
+ bool newmodeset)
+{
+ struct drm_atomic_state *ostate = old_state->state;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_crtc_state *cstate = crtc->state;
+ int layer;
+
+ if (!newmodeset && !cstate->zpos_changed &&
+ !to_tidss_crtc_state(cstate)->plane_pos_changed)
+ return;
+
+ for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ bool layer_active = false;
+ int i;
+
+ for_each_new_plane_in_state(ostate, plane, pstate, i) {
+ if (pstate->crtc != crtc || !pstate->visible)
+ continue;
+
+ if (pstate->normalized_zpos == layer) {
+ layer_active = true;
+ break;
+ }
+ }
+
+ if (layer_active) {
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dispc_ovr_set_plane(tidss->dispc, tplane->hw_plane_id,
+ tcrtc->hw_videoport,
+ pstate->crtc_x, pstate->crtc_y,
+ layer);
+ }
+ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
+ layer_active);
+ }
+}
+
+static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev,
+ "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+ crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+ crtc->state->enable, crtc->state->event);
+
+ /* There is nothing to do if CRTC is not going to be enabled. */
+ if (!crtc->state->enable)
+ return;
+
+ /*
+ * Flush CRTC changes with go bit only if new modeset is not
+ * coming, so CRTC is enabled trough out the commit.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc->state))
+ return;
+
+ /* If the GO bit is stuck we better quit here. */
+ if (WARN_ON(dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport)))
+ return;
+
+ /* We should have event if CRTC is enabled through out this commit. */
+ if (WARN_ON(!crtc->state->event))
+ return;
+
+ /* Write vp properties to HW if needed. */
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, false);
+
+ /* Update plane positions if needed. */
+ tidss_crtc_position_planes(tidss, crtc, old_crtc_state, false);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ dispc_vp_go(tidss->dispc, tcrtc->hw_videoport);
+
+ WARN_ON(tcrtc->event);
+
+ tcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ unsigned long flags;
+ int r;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ tidss_runtime_get(tidss);
+
+ r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
+ mode->clock * 1000);
+ if (r != 0)
+ return;
+
+ r = dispc_vp_enable_clk(tidss->dispc, tcrtc->hw_videoport);
+ if (r != 0)
+ return;
+
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, true);
+ tidss_crtc_position_planes(tidss, crtc, old_state, true);
+
+ /* Turn vertical blanking interrupt reporting on. */
+ drm_crtc_vblank_on(crtc);
+
+ dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ reinit_completion(&tcrtc->framedone_completion);
+
+ dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
+
+ if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
+ msecs_to_jiffies(500)))
+ dev_err(tidss->dev, "Timeout waiting for framedone on crtc %d",
+ tcrtc->hw_videoport);
+
+ dispc_vp_unprepare(tidss->dispc, tcrtc->hw_videoport);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_off(crtc);
+
+ dispc_vp_disable_clk(tidss->dispc, tcrtc->hw_videoport);
+
+ tidss_runtime_put(tidss);
+}
+
+static
+enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
+}
+
+static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
+ .atomic_check = tidss_crtc_atomic_check,
+ .atomic_flush = tidss_crtc_atomic_flush,
+ .atomic_enable = tidss_crtc_atomic_enable,
+ .atomic_disable = tidss_crtc_atomic_disable,
+
+ .mode_valid = tidss_crtc_mode_valid,
+};
+
+/* drm_crtc_funcs */
+
+static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ tidss_irq_enable_vblank(crtc);
+
+ return 0;
+}
+
+static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_irq_disable_vblank(crtc);
+
+ tidss_runtime_put(tidss);
+}
+
+static void tidss_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *tcrtc;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(crtc->state);
+
+ tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc) {
+ crtc->state = NULL;
+ return;
+ }
+
+ crtc->state = &tcrtc->base;
+ crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *state, *current_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ current_state = to_tidss_crtc_state(crtc->state);
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ state->plane_pos_changed = false;
+
+ state->bus_format = current_state->bus_format;
+ state->bus_flags = current_state->bus_flags;
+
+ return &state->base;
+}
+
+static void tidss_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(tcrtc);
+}
+
+static const struct drm_crtc_funcs tidss_crtc_funcs = {
+ .reset = tidss_crtc_reset,
+ .destroy = tidss_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = tidss_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = tidss_crtc_enable_vblank,
+ .disable_vblank = tidss_crtc_disable_vblank,
+};
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary)
+{
+ struct tidss_crtc *tcrtc;
+ struct drm_crtc *crtc;
+ unsigned int gamma_lut_size = 0;
+ bool has_ctm = tidss->feat->vp_feat.color.has_ctm;
+ int ret;
+
+ tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc)
+ return ERR_PTR(-ENOMEM);
+
+ tcrtc->hw_videoport = hw_videoport;
+ init_completion(&tcrtc->framedone_completion);
+
+ crtc = &tcrtc->crtc;
+
+ ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary,
+ NULL, &tidss_crtc_funcs, NULL);
+ if (ret < 0) {
+ kfree(tcrtc);
+ return ERR_PTR(ret);
+ }
+
+ drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs);
+
+ /*
+ * The dispc gamma functions adapt to what ever size we ask
+ * from it no matter what HW supports. X-server assumes 256
+ * element gamma tables so lets use that.
+ */
+ if (tidss->feat->vp_feat.color.gamma_size)
+ gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, has_ctm, gamma_lut_size);
+ if (gamma_lut_size)
+ drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+
+ return tcrtc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.h b/drivers/gpu/drm/tidss/tidss_crtc.h
new file mode 100644
index 000000000000..09e773666228
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_CRTC_H__
+#define __TIDSS_CRTC_H__
+
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <drm/drm_crtc.h>
+
+#define to_tidss_crtc(c) container_of((c), struct tidss_crtc, crtc)
+
+struct tidss_device;
+
+struct tidss_crtc {
+ struct drm_crtc crtc;
+
+ u32 hw_videoport;
+
+ struct drm_pending_vblank_event *event;
+
+ struct completion framedone_completion;
+};
+
+#define to_tidss_crtc_state(x) container_of(x, struct tidss_crtc_state, base)
+
+struct tidss_crtc_state {
+ /* Must be first. */
+ struct drm_crtc_state base;
+
+ bool plane_pos_changed;
+
+ u32 bus_format;
+ u32 bus_flags;
+};
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc);
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc);
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus);
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary);
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
new file mode 100644
index 000000000000..29f42768e294
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+#include "tidss_dispc_regs.h"
+#include "tidss_scale_coefs.h"
+
+static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x00,
+ [DSS_SYSCONFIG_OFF] = 0x04,
+ [DSS_SYSSTATUS_OFF] = 0x08,
+ [DISPC_IRQ_EOI_OFF] = 0x20,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x24,
+ [DISPC_IRQSTATUS_OFF] = 0x28,
+ [DISPC_IRQENABLE_SET_OFF] = 0x2c,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x30,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x40,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x44,
+
+ [DISPC_DBG_CONTROL_OFF] = 0x4c,
+ [DISPC_DBG_STATUS_OFF] = 0x50,
+
+ [DISPC_CLKGATING_DISABLE_OFF] = 0x54,
+};
+
+const struct dispc_features dispc_k2g_feats = {
+ .min_pclk_khz = 4375,
+
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 150000,
+ },
+
+ /*
+ * XXX According TRM the RGB input buffer width up to 2560 should
+ * work on 3 taps, but in practice it only works up to 1280.
+ */
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 1280,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 2560,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_K2G,
+
+ .common = "common",
+
+ .common_regs = tidss_k2g_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 1,
+ .vid_name = { "vid1" },
+ .vid_lite = { false },
+ .vid_order = { 0 },
+};
+
+static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x24,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x40,
+ [DISPC_VID_IRQENABLE_OFF] = 0x44,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x58,
+ [DISPC_VP_IRQENABLE_OFF] = 0x70,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x7c,
+
+ [WB_IRQENABLE_OFF] = 0x88,
+ [WB_IRQSTATUS_OFF] = 0x8c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x90,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x94,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x98,
+ [DSS_CBA_CFG_OFF] = 0x9c,
+ [DISPC_DBG_CONTROL_OFF] = 0xa0,
+ [DISPC_DBG_STATUS_OFF] = 0xa4,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xa8,
+ [DISPC_SECURE_DISABLE_OFF] = 0xac,
+};
+
+const struct dispc_features dispc_am65x_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ [DISPC_VP_OLDI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM65X,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+
+ .errata = {
+ .i2000 = true,
+ },
+};
+
+static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x80,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x34,
+ [DISPC_VID_IRQENABLE_OFF] = 0x38,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x48,
+ [DISPC_VP_IRQENABLE_OFF] = 0x58,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x68,
+
+ [WB_IRQENABLE_OFF] = 0x78,
+ [WB_IRQSTATUS_OFF] = 0x7c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x98,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x9c,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0xa0,
+ [DSS_CBA_CFG_OFF] = 0xa4,
+ [DISPC_DBG_CONTROL_OFF] = 0xa8,
+ [DISPC_DBG_STATUS_OFF] = 0xac,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xb0,
+ [DISPC_SECURE_DISABLE_OFF] = 0x90,
+
+ [FBDC_REVISION_1_OFF] = 0xb8,
+ [FBDC_REVISION_2_OFF] = 0xbc,
+ [FBDC_REVISION_3_OFF] = 0xc0,
+ [FBDC_REVISION_4_OFF] = 0xc4,
+ [FBDC_REVISION_5_OFF] = 0xc8,
+ [FBDC_REVISION_6_OFF] = 0xcc,
+ [FBDC_COMMON_CONTROL_OFF] = 0xd0,
+ [FBDC_CONSTANT_COLOR_0_OFF] = 0xd4,
+ [FBDC_CONSTANT_COLOR_1_OFF] = 0xd8,
+ [DISPC_CONNECTIONS_OFF] = 0xe4,
+ [DISPC_MSS_VP1_OFF] = 0xe8,
+ [DISPC_MSS_VP3_OFF] = 0xec,
+};
+
+const struct dispc_features dispc_j721e_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 170000,
+ [DISPC_VP_INTERNAL] = 600000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 2048,
+ .in_width_max_3tap_rgb = 4096,
+ .in_width_max_5tap_yuv = 4096,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_J721E,
+
+ .common = "common_m",
+ .common_regs = tidss_j721e_common_regs,
+
+ .num_vps = 4,
+ .vp_name = { "vp1", "vp2", "vp3", "vp4" },
+ .ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" },
+ .vpclk_name = { "vp1", "vp2", "vp3", "vp4" },
+ /* Currently hard coded VP routing (see dispc_initial_config()) */
+ .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI,
+ DISPC_VP_INTERNAL, DISPC_VP_DPI, },
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 1024,
+ .gamma_type = TIDSS_GAMMA_10BIT,
+ },
+ },
+ .num_planes = 4,
+ .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
+ .vid_lite = { 0, 1, 0, 1, },
+ .vid_order = { 1, 3, 0, 2 },
+};
+
+static const u16 *dispc_common_regmap;
+
+struct dss_vp_data {
+ u32 *gamma_table;
+};
+
+struct dispc_device {
+ struct tidss_device *tidss;
+ struct device *dev;
+
+ void __iomem *base_common;
+ void __iomem *base_vid[TIDSS_MAX_PLANES];
+ void __iomem *base_ovr[TIDSS_MAX_PORTS];
+ void __iomem *base_vp[TIDSS_MAX_PORTS];
+
+ struct regmap *oldi_io_ctrl;
+
+ struct clk *vp_clk[TIDSS_MAX_PORTS];
+
+ const struct dispc_features *feat;
+
+ struct clk *fclk;
+
+ bool is_enabled;
+
+ struct dss_vp_data vp_data[TIDSS_MAX_PORTS];
+
+ u32 *fourccs;
+ u32 num_fourccs;
+
+ u32 memory_bandwidth_limit;
+};
+
+static void dispc_write(struct dispc_device *dispc, u16 reg, u32 val)
+{
+ iowrite32(val, dispc->base_common + reg);
+}
+
+static u32 dispc_read(struct dispc_device *dispc, u16 reg)
+{
+ return ioread32(dispc->base_common + reg);
+}
+
+static
+void dispc_vid_write(struct dispc_device *dispc, u32 hw_plane, u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vid_read(struct dispc_device *dispc, u32 hw_plane, u16 reg)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_ovr_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_ovr_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_vp_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+/*
+ * TRM gives bitfields as start:end, where start is the higher bit
+ * number. For example 7:0
+ */
+
+static u32 FLD_MASK(u32 start, u32 end)
+{
+ return ((1 << (start - end + 1)) - 1) << end;
+}
+
+static u32 FLD_VAL(u32 val, u32 start, u32 end)
+{
+ return (val << end) & FLD_MASK(start, end);
+}
+
+static u32 FLD_GET(u32 val, u32 start, u32 end)
+{
+ return (val & FLD_MASK(start, end)) >> end;
+}
+
+static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
+{
+ return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
+}
+
+static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
+{
+ return FLD_GET(dispc_read(dispc, idx), start, end);
+}
+
+static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
+ start, end));
+}
+
+static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
+}
+
+static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_vid_write(dispc, hw_plane, idx,
+ FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
+ val, start, end));
+}
+
+static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
+}
+
+static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
+ val, start, end));
+}
+
+__maybe_unused
+static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
+}
+
+static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_ovr_write(dispc, ovr, idx,
+ FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
+ val, start, end));
+}
+
+static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
+{
+ dispc_irq_t vp_stat = 0;
+
+ if (stat & BIT(0))
+ vp_stat |= DSS_IRQ_VP_FRAME_DONE(hw_videoport);
+ if (stat & BIT(1))
+ vp_stat |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport);
+ if (stat & BIT(2))
+ vp_stat |= DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ if (stat & BIT(4))
+ vp_stat |= DSS_IRQ_VP_SYNC_LOST(hw_videoport);
+
+ return vp_stat;
+}
+
+static u32 dispc_vp_irq_to_raw(dispc_irq_t vpstat, u32 hw_videoport)
+{
+ u32 stat = 0;
+
+ if (vpstat & DSS_IRQ_VP_FRAME_DONE(hw_videoport))
+ stat |= BIT(0);
+ if (vpstat & DSS_IRQ_VP_VSYNC_EVEN(hw_videoport))
+ stat |= BIT(1);
+ if (vpstat & DSS_IRQ_VP_VSYNC_ODD(hw_videoport))
+ stat |= BIT(2);
+ if (vpstat & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ stat |= BIT(4);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_vid_irq_from_raw(u32 stat, u32 hw_plane)
+{
+ dispc_irq_t vid_stat = 0;
+
+ if (stat & BIT(0))
+ vid_stat |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane);
+
+ return vid_stat;
+}
+
+static u32 dispc_vid_irq_to_raw(dispc_irq_t vidstat, u32 hw_plane)
+{
+ u32 stat = 0;
+
+ if (vidstat & DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane))
+ stat |= BIT(0);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE, stat);
+}
+
+static void dispc_k2g_clear_irqstatus(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ dispc_k2g_vp_write_irqstatus(dispc, 0, mask);
+ dispc_k2g_vid_write_irqstatus(dispc, 0, mask);
+}
+
+static
+dispc_irq_t dispc_k2g_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ /* always clear the top level irqstatus */
+ dispc_write(dispc, DISPC_IRQSTATUS,
+ dispc_read(dispc, DISPC_IRQSTATUS));
+
+ stat |= dispc_k2g_vp_read_irqstatus(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqstatus(dispc, 0);
+
+ dispc_k2g_clear_irqstatus(dispc, stat);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ stat |= dispc_k2g_vp_read_irqenable(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqenable(dispc, 0);
+
+ return stat;
+}
+
+static
+void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
+
+ dispc_k2g_vp_set_irqenable(dispc, 0, mask);
+ dispc_k2g_vid_set_irqenable(dispc, 0, mask);
+
+ dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+
+ /* flush posted write */
+ dispc_k2g_read_irqenable(dispc);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQENABLE(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+}
+
+static
+void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
+{
+ unsigned int i;
+ u32 top_clear = 0;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ if (clearmask & DSS_IRQ_VP_MASK(i)) {
+ dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(i);
+ }
+ }
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+ dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(4 + i);
+ }
+ }
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQSTATUS);
+}
+
+static
+dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t status = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ status |= dispc_k3_vp_read_irqstatus(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ status |= dispc_k3_vid_read_irqstatus(dispc, i);
+
+ dispc_k3_clear_irqstatus(dispc, status);
+
+ return status;
+}
+
+static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t enable = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ enable |= dispc_k3_vp_read_irqenable(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ enable |= dispc_k3_vid_read_irqenable(dispc, i);
+
+ return enable;
+}
+
+static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ unsigned int i;
+ u32 main_enable = 0, main_disable = 0;
+ dispc_irq_t old_mask;
+
+ old_mask = dispc_k3_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ dispc_k3_vp_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_VP_MASK(i))
+ main_enable |= BIT(i); /* VP IRQ */
+ else
+ main_disable |= BIT(i); /* VP IRQ */
+ }
+
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ dispc_k3_vid_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_PLANE_MASK(i))
+ main_enable |= BIT(i + 4); /* VID IRQ */
+ else
+ main_disable |= BIT(i + 4); /* VID IRQ */
+ }
+
+ if (main_enable)
+ dispc_write(dispc, DISPC_IRQENABLE_SET, main_enable);
+
+ if (main_disable)
+ dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQENABLE_SET);
+}
+
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ return dispc_k2g_read_and_clear_irqstatus(dispc);
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ return dispc_k3_read_and_clear_irqstatus(dispc);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_set_irqenable(dispc, mask);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_set_irqenable(dispc, mask);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
+struct dispc_bus_format {
+ u32 bus_fmt;
+ u32 data_width;
+ bool is_oldi_fmt;
+ enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+};
+
+static const struct dispc_bus_format dispc_bus_formats[] = {
+ { MEDIA_BUS_FMT_RGB444_1X12, 12, false, 0 },
+ { MEDIA_BUS_FMT_RGB565_1X16, 16, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X18, 18, false, 0 },
+ { MEDIA_BUS_FMT_RGB888_1X24, 24, false, 0 },
+ { MEDIA_BUS_FMT_RGB101010_1X30, 30, false, 0 },
+ { MEDIA_BUS_FMT_RGB121212_1X36, 36, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, true, SPWG_18 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, true, SPWG_24 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, true, JEIDA_24 },
+};
+
+static const
+struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ u32 bus_fmt, u32 bus_flags)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_bus_formats); ++i) {
+ if (dispc_bus_formats[i].bus_fmt == bus_fmt)
+ return &dispc_bus_formats[i];
+ }
+
+ return NULL;
+}
+
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+ if (!fmt) {
+ dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n",
+ __func__, tstate->bus_format);
+ return -EINVAL;
+ }
+
+ if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+ fmt->is_oldi_fmt) {
+ dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+ __func__, dispc->feat->vp_name[hw_videoport]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+{
+ u32 val = power ? 0 : OLDI_PWRDN_TX;
+
+ if (WARN_ON(!dispc->oldi_io_ctrl))
+ return;
+
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+}
+
+static void dispc_set_num_datalines(struct dispc_device *dispc,
+ u32 hw_videoport, int num_lines)
+{
+ int v;
+
+ switch (num_lines) {
+ case 12:
+ v = 0; break;
+ case 16:
+ v = 1; break;
+ case 18:
+ v = 2; break;
+ case 24:
+ v = 3; break;
+ case 30:
+ v = 4; break;
+ case 36:
+ v = 5; break;
+ default:
+ WARN_ON(1);
+ v = 3;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+}
+
+static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_bus_format *fmt)
+{
+ u32 oldi_cfg = 0;
+ u32 oldi_reset_bit = BIT(5 + hw_videoport);
+ int count = 0;
+
+ /*
+ * For the moment DUALMODESYNC, MASTERSLAVE, MODE, and SRC
+ * bits of DISPC_VP_DSS_OLDI_CFG are set statically to 0.
+ */
+
+ if (fmt->data_width == 24)
+ oldi_cfg |= BIT(8); /* MSB */
+ else if (fmt->data_width != 18)
+ dev_warn(dispc->dev, "%s: %d port width not supported\n",
+ __func__, fmt->data_width);
+
+ oldi_cfg |= BIT(7); /* DEPOL */
+
+ oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+
+ oldi_cfg |= BIT(12); /* SOFTRST */
+
+ oldi_cfg |= BIT(0); /* ENABLE */
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+ while (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)) &&
+ count < 10000)
+ count++;
+
+ if (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)))
+ dev_warn(dispc->dev, "%s: timeout waiting OLDI reset done\n",
+ __func__);
+}
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_oldi_tx_power(dispc, true);
+
+ dispc_enable_oldi(dispc, hw_videoport, fmt);
+ }
+}
+
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct drm_display_mode *mode = &state->adjusted_mode;
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ bool align, onoff, rf, ieo, ipc, ihs, ivs;
+ const struct dispc_bus_format *fmt;
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
+ FLD_VAL(hsw - 1, 7, 0) |
+ FLD_VAL(hfp - 1, 19, 8) |
+ FLD_VAL(hbp - 1, 31, 20));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
+ FLD_VAL(vsw - 1, 7, 0) |
+ FLD_VAL(vfp, 19, 8) |
+ FLD_VAL(vbp, 31, 20));
+
+ ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+ ihs = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+ ieo = !!(tstate->bus_flags & DRM_BUS_FLAG_DE_LOW);
+
+ ipc = !!(tstate->bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE);
+
+ /* always use the 'rf' setting */
+ onoff = true;
+
+ rf = !!(tstate->bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE);
+
+ /* always use aligned syncs */
+ align = true;
+
+ /* always use DE_HIGH for OLDI */
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+ ieo = false;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
+ FLD_VAL(align, 18, 18) |
+ FLD_VAL(onoff, 17, 17) |
+ FLD_VAL(rf, 16, 16) |
+ FLD_VAL(ieo, 15, 15) |
+ FLD_VAL(ipc, 14, 14) |
+ FLD_VAL(ihs, 13, 13) |
+ FLD_VAL(ivs, 12, 12));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
+ FLD_VAL(mode->hdisplay - 1, 11, 0) |
+ FLD_VAL(mode->vdisplay - 1, 27, 16));
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+}
+
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
+{
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
+{
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+
+ dispc_oldi_tx_power(dispc, false);
+ }
+}
+
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
+{
+ return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+}
+
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
+{
+ WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+}
+
+enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
+
+static u16 c8_to_c12(u8 c8, enum c8_to_c12_mode mode)
+{
+ u16 c12;
+
+ c12 = c8 << 4;
+
+ switch (mode) {
+ case C8_TO_C12_REPLICATE:
+ /* Copy c8 4 MSB to 4 LSB for full scale c12 */
+ c12 |= c8 >> 4;
+ break;
+ case C8_TO_C12_MAX:
+ c12 |= 0xF;
+ break;
+ default:
+ case C8_TO_C12_MIN:
+ break;
+ }
+
+ return c12;
+}
+
+static u64 argb8888_to_argb12121212(u32 argb8888, enum c8_to_c12_mode m)
+{
+ u8 a, r, g, b;
+ u64 v;
+
+ a = (argb8888 >> 24) & 0xff;
+ r = (argb8888 >> 16) & 0xff;
+ g = (argb8888 >> 8) & 0xff;
+ b = (argb8888 >> 0) & 0xff;
+
+ v = ((u64)c8_to_c12(a, m) << 36) | ((u64)c8_to_c12(r, m) << 24) |
+ ((u64)c8_to_c12(g, m) << 12) | (u64)c8_to_c12(b, m);
+
+ return v;
+}
+
+static void dispc_vp_set_default_color(struct dispc_device *dispc,
+ u32 hw_videoport, u32 default_color)
+{
+ u64 v;
+
+ v = argb8888_to_argb12121212(default_color, C8_TO_C12_REPLICATE);
+
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR, v & 0xffffffff);
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff);
+}
+
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode)
+{
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+ enum dispc_vp_bus_type bus_type;
+ int max_pclk;
+
+ bus_type = dispc->feat->vp_bus_type[hw_videoport];
+
+ max_pclk = dispc->feat->max_pclk_khz[bus_type];
+
+ if (WARN_ON(max_pclk == 0))
+ return MODE_BAD;
+
+ if (mode->clock < dispc->feat->min_pclk_khz)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > max_pclk)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 4096)
+ return MODE_BAD;
+
+ if (mode->vdisplay > 4096)
+ return MODE_BAD;
+
+ /* TODO: add interlace support */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /*
+ * Enforce the output width is divisible by 2. Actually this
+ * is only needed in following cases:
+ * - YUV output selected (BT656, BT1120)
+ * - Dithering enabled
+ * - TDM with TDMCycleFormat == 3
+ * But for simplicity we enforce that always.
+ */
+ if ((mode->hdisplay % 2) != 0)
+ return MODE_BAD_HVALUE;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ if (hsw < 1 || hsw > 256 ||
+ hfp < 1 || hfp > 4096 ||
+ hbp < 1 || hbp > 4096)
+ return MODE_BAD_HVALUE;
+
+ if (vsw < 1 || vsw > 256 ||
+ vfp > 4095 || vbp > 4095)
+ return MODE_BAD_VVALUE;
+
+ if (dispc->memory_bandwidth_limit) {
+ const unsigned int bpp = 4;
+ u64 bandwidth;
+
+ bandwidth = 1000 * mode->clock;
+ bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
+ bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
+
+ if (dispc->memory_bandwidth_limit < bandwidth)
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ int ret = clk_prepare_enable(dispc->vp_clk[hw_videoport]);
+
+ if (ret)
+ dev_err(dispc->dev, "%s: enabling clk failed: %d\n", __func__,
+ ret);
+
+ return ret;
+}
+
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ clk_disable_unprepare(dispc->vp_clk[hw_videoport]);
+}
+
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
+{
+ int r = rate / 100, rr = real_rate / 100;
+
+ return (unsigned int)(abs(((rr - r) * 100) / r));
+}
+
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate)
+{
+ int r;
+ unsigned long new_rate;
+
+ r = clk_set_rate(dispc->vp_clk[hw_videoport], rate);
+ if (r) {
+ dev_err(dispc->dev, "vp%d: failed to set clk rate to %lu\n",
+ hw_videoport, rate);
+ return r;
+ }
+
+ new_rate = clk_get_rate(dispc->vp_clk[hw_videoport]);
+
+ if (dispc_pclk_diff(rate, new_rate) > 5)
+ dev_warn(dispc->dev,
+ "vp%d: Clock rate %lu differs over 5%% from requested %lu\n",
+ hw_videoport, new_rate, rate);
+
+ dev_dbg(dispc->dev, "vp%d: new rate %lu Hz (requested %lu Hz)\n",
+ hw_videoport, clk_get_rate(dispc->vp_clk[hw_videoport]), rate);
+
+ return 0;
+}
+
+/* OVR */
+static void dispc_k2g_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ /* On k2g there is only one plane and no need for ovr */
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_POSITION,
+ x | (y << 16));
+}
+
+static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ x, 17, 6);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ y, 30, 19);
+}
+
+static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ x, 13, 0);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ y, 29, 16);
+}
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable)
+{
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ !!enable, 0, 0);
+}
+
+/* CSC */
+enum csc_ctm {
+ CSC_RR, CSC_RG, CSC_RB,
+ CSC_GR, CSC_GG, CSC_GB,
+ CSC_BR, CSC_BG, CSC_BB,
+};
+
+enum csc_yuv2rgb {
+ CSC_RY, CSC_RCB, CSC_RCR,
+ CSC_GY, CSC_GCB, CSC_GCR,
+ CSC_BY, CSC_BCB, CSC_BCR,
+};
+
+enum csc_rgb2yuv {
+ CSC_YR, CSC_YG, CSC_YB,
+ CSC_CBR, CSC_CBG, CSC_CBB,
+ CSC_CRR, CSC_CRG, CSC_CRB,
+};
+
+struct dispc_csc_coef {
+ void (*to_regval)(const struct dispc_csc_coef *csc, u32 *regval);
+ int m[9];
+ int preoffset[3];
+ int postoffset[3];
+ enum { CLIP_LIMITED_RANGE = 0, CLIP_FULL_RANGE = 1, } cliping;
+ const char *name;
+};
+
+#define DISPC_CSC_REGVAL_LEN 8
+
+static
+void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+ regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
+ regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
+ regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
+#undef OVAL
+}
+
+#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+static
+void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RY], csc->m[CSC_RCR]);
+ regval[1] = CVAL(csc->m[CSC_RCB], csc->m[CSC_GY]);
+ regval[2] = CVAL(csc->m[CSC_GCR], csc->m[CSC_GCB]);
+ regval[3] = CVAL(csc->m[CSC_BY], csc->m[CSC_BCR]);
+ regval[4] = CVAL(csc->m[CSC_BCB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+__maybe_unused static
+void dispc_csc_rgb2yuv_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_YR], csc->m[CSC_YG]);
+ regval[1] = CVAL(csc->m[CSC_YB], csc->m[CSC_CRR]);
+ regval[2] = CVAL(csc->m[CSC_CRG], csc->m[CSC_CRB]);
+ regval[3] = CVAL(csc->m[CSC_CBR], csc->m[CSC_CBG]);
+ regval[4] = CVAL(csc->m[CSC_CBB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+static void dispc_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RR], csc->m[CSC_RG]);
+ regval[1] = CVAL(csc->m[CSC_RB], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_GG], csc->m[CSC_GB]);
+ regval[3] = CVAL(csc->m[CSC_BR], csc->m[CSC_BG]);
+ regval[4] = CVAL(csc->m[CSC_BB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), /* K2G has no post offset support */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ if (regval[7] != 0)
+ dev_warn(dispc->dev, "%s: No post offset support for %s\n",
+ __func__, csc->name);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), DISPC_VID_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, }, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, }, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Limited",
+};
+
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, }, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, }, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Limited",
+};
+
+static const struct {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ const struct dispc_csc_coef *csc;
+} dispc_csc_table[] = {
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt601_full, },
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt601_lim, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt709_full, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt709_lim, },
+};
+
+static const
+struct dispc_csc_coef *dispc_find_csc(enum drm_color_encoding encoding,
+ enum drm_color_range range)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_csc_table); i++) {
+ if (dispc_csc_table[i].encoding == encoding &&
+ dispc_csc_table[i].range == range) {
+ return dispc_csc_table[i].csc;
+ }
+ }
+ return NULL;
+}
+
+static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state)
+{
+ const struct dispc_csc_coef *coef;
+
+ coef = dispc_find_csc(state->color_encoding, state->color_range);
+ if (!coef) {
+ dev_err(dispc->dev, "%s: CSC (%u,%u) not found\n",
+ __func__, state->color_encoding, state->color_range);
+ return;
+ }
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vid_write_csc(dispc, hw_plane, coef);
+ else
+ dispc_k3_vid_write_csc(dispc, hw_plane, coef);
+}
+
+static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
+ bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+}
+
+/* SCALER */
+
+static u32 dispc_calc_fir_inc(u32 in, u32 out)
+{
+ return (u32)div_u64(0x200000ull * in, out);
+}
+
+enum dispc_vid_fir_coef_set {
+ DISPC_VID_FIR_COEF_HORIZ,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ DISPC_VID_FIR_COEF_VERT,
+ DISPC_VID_FIR_COEF_VERT_UV,
+};
+
+static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
+ u32 hw_plane,
+ enum dispc_vid_fir_coef_set coef_set,
+ const struct tidss_scale_coefs *coefs)
+{
+ static const u16 c0_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H0,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H0_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V0,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V0_C,
+ };
+
+ static const u16 c12_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H12,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H12_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V12,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V12_C,
+ };
+
+ const u16 c0_base = c0_regs[coef_set];
+ const u16 c12_base = c12_regs[coef_set];
+ int phase;
+
+ if (!coefs) {
+ dev_err(dispc->dev, "%s: No coefficients given.\n", __func__);
+ return;
+ }
+
+ for (phase = 0; phase <= 8; ++phase) {
+ u16 reg = c0_base + phase * 4;
+ u16 c0 = coefs->c0[phase];
+
+ dispc_vid_write(dispc, hw_plane, reg, c0);
+ }
+
+ for (phase = 0; phase <= 15; ++phase) {
+ u16 reg = c12_base + phase * 4;
+ s16 c1, c2;
+ u32 c12;
+
+ c1 = coefs->c1[phase];
+ c2 = coefs->c2[phase];
+ c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+
+ dispc_vid_write(dispc, hw_plane, reg, c12);
+ }
+}
+
+static bool dispc_fourcc_is_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct dispc_scaling_params {
+ int xinc, yinc;
+ u32 in_w, in_h, in_w_uv, in_h_uv;
+ u32 fir_xinc, fir_yinc, fir_xinc_uv, fir_yinc_uv;
+ bool scale_x, scale_y;
+ const struct tidss_scale_coefs *xcoef, *ycoef, *xcoef_uv, *ycoef_uv;
+ bool five_taps;
+};
+
+static int dispc_vid_calc_scaling(struct dispc_device *dispc,
+ const struct drm_plane_state *state,
+ struct dispc_scaling_params *sp,
+ bool lite_plane)
+{
+ const struct dispc_features_scaling *f = &dispc->feat->scaling;
+ u32 fourcc = state->fb->format->format;
+ u32 in_width_max_5tap = f->in_width_max_5tap_rgb;
+ u32 in_width_max_3tap = f->in_width_max_3tap_rgb;
+ u32 downscale_limit;
+ u32 in_width_max;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->xinc = 1;
+ sp->yinc = 1;
+ sp->in_w = state->src_w >> 16;
+ sp->in_w_uv = sp->in_w;
+ sp->in_h = state->src_h >> 16;
+ sp->in_h_uv = sp->in_h;
+
+ sp->scale_x = sp->in_w != state->crtc_w;
+ sp->scale_y = sp->in_h != state->crtc_h;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ in_width_max_5tap = f->in_width_max_5tap_yuv;
+ in_width_max_3tap = f->in_width_max_3tap_yuv;
+
+ sp->in_w_uv >>= 1;
+ sp->scale_x = true;
+
+ if (fourcc == DRM_FORMAT_NV12) {
+ sp->in_h_uv >>= 1;
+ sp->scale_y = true;
+ }
+ }
+
+ /* Skip the rest if no scaling is used */
+ if ((!sp->scale_x && !sp->scale_y) || lite_plane)
+ return 0;
+
+ if (sp->in_w > in_width_max_5tap) {
+ sp->five_taps = false;
+ in_width_max = in_width_max_3tap;
+ downscale_limit = f->downscale_limit_3tap;
+ } else {
+ sp->five_taps = true;
+ in_width_max = in_width_max_5tap;
+ downscale_limit = f->downscale_limit_5tap;
+ }
+
+ if (sp->scale_x) {
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+
+ if (sp->fir_xinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_w, state->src_w >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_xinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->xinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_w,
+ state->crtc_w),
+ downscale_limit);
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u < 1/%u\n",
+ __func__, state->crtc_w,
+ state->src_w >> 16,
+ downscale_limit * f->xinc_max);
+ return -EINVAL;
+ }
+
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ while (sp->in_w > in_width_max) {
+ sp->xinc++;
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: Too wide input buffer %u > %u\n", __func__,
+ state->src_w >> 16, in_width_max * f->xinc_max);
+ return -EINVAL;
+ }
+
+ /*
+ * We need even line length for YUV formats. Decimation
+ * can lead to odd length, so we need to make it even
+ * again.
+ */
+ if (dispc_fourcc_is_yuv(fourcc))
+ sp->in_w &= ~1;
+
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+ }
+
+ if (sp->scale_y) {
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h, state->crtc_h);
+
+ if (sp->fir_yinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: Y-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_h, state->src_h >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_yinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->yinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_h,
+ state->crtc_h),
+ downscale_limit);
+
+ sp->in_h /= sp->yinc;
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h,
+ state->crtc_h);
+ }
+ }
+
+ dev_dbg(dispc->dev,
+ "%s: %ux%u decim %ux%u -> %ux%u firinc %u.%03ux%u.%03u taps %u -> %ux%u\n",
+ __func__, state->src_w >> 16, state->src_h >> 16,
+ sp->xinc, sp->yinc, sp->in_w, sp->in_h,
+ sp->fir_xinc / 0x200000u,
+ ((sp->fir_xinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->fir_yinc / 0x200000u,
+ ((sp->fir_yinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->five_taps ? 5 : 3,
+ state->crtc_w, state->crtc_h);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ sp->in_w_uv /= sp->xinc;
+ sp->fir_xinc_uv = dispc_calc_fir_inc(sp->in_w_uv,
+ state->crtc_w);
+ sp->xcoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_xinc_uv,
+ true);
+ }
+ if (sp->scale_y) {
+ sp->in_h_uv /= sp->yinc;
+ sp->fir_yinc_uv = dispc_calc_fir_inc(sp->in_h_uv,
+ state->crtc_h);
+ sp->ycoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_yinc_uv,
+ sp->five_taps);
+ }
+ }
+
+ if (sp->scale_x)
+ sp->xcoef = tidss_get_scale_coefs(dispc->dev, sp->fir_xinc,
+ true);
+
+ if (sp->scale_y)
+ sp->ycoef = tidss_get_scale_coefs(dispc->dev, sp->fir_yinc,
+ sp->five_taps);
+
+ return 0;
+}
+
+static void dispc_vid_set_scaling(struct dispc_device *dispc,
+ u32 hw_plane,
+ struct dispc_scaling_params *sp,
+ u32 fourcc)
+{
+ /* HORIZONTAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_x, 7, 7);
+
+ /* VERTICAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_y, 8, 8);
+
+ /* Skip the rest if no scaling is used */
+ if (!sp->scale_x && !sp->scale_y)
+ return;
+
+ /* VERTICAL 5-TAPS */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->five_taps, 21, 21);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH2,
+ sp->fir_xinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ sp->xcoef_uv);
+ }
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV2,
+ sp->fir_yinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT_UV,
+ sp->ycoef_uv);
+ }
+ }
+
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH, sp->fir_xinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ,
+ sp->xcoef);
+ }
+
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV, sp->fir_yinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT, sp->ycoef);
+ }
+}
+
+/* OTHER */
+
+static const struct {
+ u32 fourcc;
+ u8 dss_code;
+} dispc_color_formats[] = {
+ { DRM_FORMAT_ARGB4444, 0x0, },
+ { DRM_FORMAT_ABGR4444, 0x1, },
+ { DRM_FORMAT_RGBA4444, 0x2, },
+
+ { DRM_FORMAT_RGB565, 0x3, },
+ { DRM_FORMAT_BGR565, 0x4, },
+
+ { DRM_FORMAT_ARGB1555, 0x5, },
+ { DRM_FORMAT_ABGR1555, 0x6, },
+
+ { DRM_FORMAT_ARGB8888, 0x7, },
+ { DRM_FORMAT_ABGR8888, 0x8, },
+ { DRM_FORMAT_RGBA8888, 0x9, },
+ { DRM_FORMAT_BGRA8888, 0xa, },
+
+ { DRM_FORMAT_RGB888, 0xb, },
+ { DRM_FORMAT_BGR888, 0xc, },
+
+ { DRM_FORMAT_ARGB2101010, 0xe, },
+ { DRM_FORMAT_ABGR2101010, 0xf, },
+
+ { DRM_FORMAT_XRGB4444, 0x20, },
+ { DRM_FORMAT_XBGR4444, 0x21, },
+ { DRM_FORMAT_RGBX4444, 0x22, },
+
+ { DRM_FORMAT_ARGB1555, 0x25, },
+ { DRM_FORMAT_ABGR1555, 0x26, },
+
+ { DRM_FORMAT_XRGB8888, 0x27, },
+ { DRM_FORMAT_XBGR8888, 0x28, },
+ { DRM_FORMAT_RGBX8888, 0x29, },
+ { DRM_FORMAT_BGRX8888, 0x2a, },
+
+ { DRM_FORMAT_XRGB2101010, 0x2e, },
+ { DRM_FORMAT_XBGR2101010, 0x2f, },
+
+ { DRM_FORMAT_YUYV, 0x3e, },
+ { DRM_FORMAT_UYVY, 0x3f, },
+
+ { DRM_FORMAT_NV12, 0x3d, },
+};
+
+static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
+ u32 hw_plane, u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (dispc_color_formats[i].fourcc == fourcc) {
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ dispc_color_formats[i].dss_code,
+ 6, 1);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len)
+{
+ WARN_ON(!dispc->fourccs);
+
+ *len = dispc->num_fourccs;
+
+ return dispc->fourccs;
+}
+
+static s32 pixinc(int pixels, u8 ps)
+{
+ if (pixels == 1)
+ return 1;
+ else if (pixels > 1)
+ return 1 + (pixels - 1) * ps;
+ else if (pixels < 0)
+ return 1 - (-pixels + 1) * ps;
+
+ WARN_ON(1);
+ return 0;
+}
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ bool need_scaling = state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h;
+ struct dispc_scaling_params scaling;
+ int ret;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (!dispc_find_csc(state->color_encoding,
+ state->color_range)) {
+ dev_dbg(dispc->dev,
+ "%s: Unsupported CSC (%u,%u) for HW plane %u\n",
+ __func__, state->color_encoding,
+ state->color_range, hw_plane);
+ return -EINVAL;
+ }
+ }
+
+ if (need_scaling) {
+ if (lite) {
+ dev_dbg(dispc->dev,
+ "%s: Lite plane %u can't scale %ux%u!=%ux%u\n",
+ __func__, hw_plane,
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_w, state->crtc_h);
+ return -EINVAL;
+ }
+ ret = dispc_vid_calc_scaling(dispc, state, &scaling, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ gem = drm_fb_cma_get_gem_obj(state->fb, 0);
+
+ return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
+ y * fb->pitches[0];
+}
+
+static
+dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ if (WARN_ON(state->fb->format->num_planes != 2))
+ return 0;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+
+ return gem->paddr + fb->offsets[1] +
+ (x * fb->format->cpp[1] / fb->format->hsub) +
+ (y * fb->pitches[1] / fb->format->vsub);
+}
+
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ u16 cpp = state->fb->format->cpp[0];
+ u32 fb_width = state->fb->pitches[0] / cpp;
+ dma_addr_t paddr = dispc_plane_state_paddr(state);
+ struct dispc_scaling_params scale;
+
+ dispc_vid_calc_scaling(dispc, state, &scale, lite);
+
+ dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
+ (scale.in_w - 1) | ((scale.in_h - 1) << 16));
+
+ /* For YUV422 format we use the macropixel size for pixel inc */
+ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp * 2));
+ else
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp));
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC,
+ pixinc(1 + (scale.yinc * fb_width -
+ scale.xinc * scale.in_w),
+ cpp));
+
+ if (state->fb->format->num_planes == 2) {
+ u16 cpp_uv = state->fb->format->cpp[1];
+ u32 fb_width_uv = state->fb->pitches[1] / cpp_uv;
+ dma_addr_t p_uv_addr = dispc_plane_state_p_uv_addr(state);
+
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_0, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_0, (u64)p_uv_addr >> 32);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_1, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_1, (u64)p_uv_addr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC_UV,
+ pixinc(1 + (scale.yinc * fb_width_uv -
+ scale.xinc * scale.in_w_uv),
+ cpp_uv));
+ }
+
+ if (!lite) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
+ (state->crtc_w - 1) |
+ ((state->crtc_h - 1) << 16));
+
+ dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
+ }
+
+ /* enable YUV->RGB color conversion */
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ dispc_vid_csc_setup(dispc, hw_plane, state);
+ dispc_vid_csc_enable(dispc, hw_plane, true);
+ } else {
+ dispc_vid_csc_enable(dispc, hw_plane, false);
+ }
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
+ 0xFF & (state->alpha >> 8));
+
+ if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 28, 28);
+ else
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 28, 28);
+
+ return 0;
+}
+
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+
+ return 0;
+}
+
+static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
+{
+ return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+}
+
+static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_k2g_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /*
+ * Prefetch up to fifo high-threshold value to minimize the
+ * possibility of underflows. Note that this means the PRELOAD
+ * register is ignored.
+ */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 19, 19);
+ }
+}
+
+static void dispc_k3_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+ u32 cba_lo_pri = 1;
+ u32 cba_hi_pri = 0;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /* Prefech up to PRELOAD value */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 19, 19);
+ }
+}
+
+static void dispc_plane_init(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_plane_init(dispc);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_plane_init(dispc);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void dispc_vp_init(struct dispc_device *dispc)
+{
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* Enable the gamma Shadow bit-field for all VPs*/
+ for (i = 0; i < dispc->feat->num_vps; i++)
+ VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+}
+
+static void dispc_initial_config(struct dispc_device *dispc)
+{
+ dispc_plane_init(dispc);
+ dispc_vp_init(dispc);
+
+ /* Note: Hardcoded DPI routing on J721E for now */
+ if (dispc->feat->subrev == DISPC_J721E) {
+ dispc_write(dispc, DISPC_CONNECTIONS,
+ FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */
+ FLD_VAL(8, 7, 4) /* VP3 to DPI1 */
+ );
+ }
+}
+
+static void dispc_k2g_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_GAMMA_TABLE,
+ v);
+ }
+}
+
+static void dispc_am65x_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_j721e_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_10BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ if (i == 0)
+ v |= 1 << 31;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static const struct drm_color_lut dispc_vp_gamma_default_lut[] = {
+ { .red = 0, .green = 0, .blue = 0, },
+ { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+static void dispc_vp_set_gamma(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_color_lut *lut,
+ unsigned int length)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ u32 hwbits;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d, lut len %u, hw len %u\n",
+ __func__, hw_videoport, length, hwlen);
+
+ if (dispc->feat->vp_feat.color.gamma_type == TIDSS_GAMMA_10BIT)
+ hwbits = 10;
+ else
+ hwbits = 8;
+
+ if (!lut || length < 2) {
+ lut = dispc_vp_gamma_default_lut;
+ length = ARRAY_SIZE(dispc_vp_gamma_default_lut);
+ }
+
+ for (i = 0; i < length - 1; ++i) {
+ unsigned int first = i * (hwlen - 1) / (length - 1);
+ unsigned int last = (i + 1) * (hwlen - 1) / (length - 1);
+ unsigned int w = last - first;
+ u16 r, g, b;
+ unsigned int j;
+
+ if (w == 0)
+ continue;
+
+ for (j = 0; j <= w; j++) {
+ r = (lut[i].red * (w - j) + lut[i + 1].red * j) / w;
+ g = (lut[i].green * (w - j) + lut[i + 1].green * j) / w;
+ b = (lut[i].blue * (w - j) + lut[i + 1].blue * j) / w;
+
+ r >>= 16 - hwbits;
+ g >>= 16 - hwbits;
+ b >>= 16 - hwbits;
+
+ table[first + j] = (r << (hwbits * 2)) |
+ (g << hwbits) | b;
+ }
+ }
+
+ dispc_vp_write_gamma_table(dispc, hw_videoport);
+}
+
+static s16 dispc_S31_32_to_s2_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x200);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1FF);
+
+ return ret;
+}
+
+static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s2_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s2_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s2_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s2_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s2_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s2_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s2_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s2_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
+}
+
+#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \
+ FLD_VAL(xB, 31, 22))
+
+static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_BB], csc->m[CSC_BG], csc->m[CSC_BR]);
+ regval[1] = CVAL(csc->m[CSC_GB], csc->m[CSC_GG], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_RB], csc->m[CSC_RG], csc->m[CSC_RR]);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_cpr_coef_reg[] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ /* K2G CPR is packed to three registers. */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ dispc_k2g_vp_csc_cpr_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vp_cpr_coef_reg); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_cpr_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 cprenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef cpr;
+
+ dispc_k2g_cpr_from_ctm(ctm, &cpr);
+ dispc_k2g_vp_write_csc(dispc, hw_videoport, &cpr);
+ cprenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ cprenable, 15, 15);
+}
+
+static s16 dispc_S31_32_to_s3_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x400);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x3FF);
+
+ return ret;
+}
+
+static void dispc_csc_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s3_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s3_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s3_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s3_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s3_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s3_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s3_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s3_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s3_8(ctm->matrix[8]);
+}
+
+static void dispc_k3_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ DISPC_VP_CSC_COEF3, DISPC_VP_CSC_COEF4, DISPC_VP_CSC_COEF5,
+ DISPC_VP_CSC_COEF6, DISPC_VP_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(regval); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 colorconvenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef csc;
+
+ dispc_csc_from_ctm(ctm, &csc);
+ dispc_k3_vp_write_csc(dispc, hw_videoport, &csc);
+ colorconvenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ colorconvenable, 24, 24);
+}
+
+static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_crtc_state *state,
+ bool newmodeset)
+{
+ struct drm_color_lut *lut = NULL;
+ struct drm_color_ctm *ctm = NULL;
+ unsigned int length = 0;
+
+ if (!(state->color_mgmt_changed || newmodeset))
+ return;
+
+ if (state->gamma_lut) {
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ length = state->gamma_lut->length / sizeof(*lut);
+ }
+
+ dispc_vp_set_gamma(dispc, hw_videoport, lut, length);
+
+ if (state->ctm)
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vp_set_ctm(dispc, hw_videoport, ctm);
+ else
+ dispc_k3_vp_set_ctm(dispc, hw_videoport, ctm);
+}
+
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset)
+{
+ dispc_vp_set_default_color(dispc, hw_videoport, 0);
+ dispc_vp_set_color_mgmt(dispc, hw_videoport, state, newmodeset);
+}
+
+int dispc_runtime_suspend(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "suspend\n");
+
+ dispc->is_enabled = false;
+
+ clk_disable_unprepare(dispc->fclk);
+
+ return 0;
+}
+
+int dispc_runtime_resume(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "resume\n");
+
+ clk_prepare_enable(dispc->fclk);
+
+ if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+ dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
+
+ dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
+ dispc_read(dispc, DSS_REVISION));
+
+ dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
+ REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
+ REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+
+ if (dispc->feat->subrev == DISPC_AM65X)
+ dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
+ REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
+ REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+
+ dev_dbg(dispc->dev, "DISPC IDLE %d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+
+ dispc_initial_config(dispc);
+
+ dispc->is_enabled = true;
+
+ tidss_irq_resume(dispc->tidss);
+
+ return 0;
+}
+
+void dispc_remove(struct tidss_device *tidss)
+{
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ tidss->dispc = NULL;
+}
+
+static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
+ void __iomem **base)
+{
+ struct resource *res;
+ void __iomem *b;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get mem resource '%s'\n", name);
+ return -EINVAL;
+ }
+
+ b = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(b)) {
+ dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name);
+ return PTR_ERR(b);
+ }
+
+ *base = b;
+
+ return 0;
+}
+
+static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
+ struct dispc_device *dispc)
+{
+ dispc->oldi_io_ctrl =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "ti,am65x-oldi-io-ctrl");
+ if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
+ dispc->oldi_io_ctrl = NULL;
+ } else if (IS_ERR(dispc->oldi_io_ctrl)) {
+ dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
+ __func__, PTR_ERR(dispc->oldi_io_ctrl));
+ return PTR_ERR(dispc->oldi_io_ctrl);
+ }
+ return 0;
+}
+
+int dispc_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dispc_device *dispc;
+ const struct dispc_features *feat;
+ unsigned int i, num_fourccs;
+ int r = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ feat = tidss->feat;
+
+ if (feat->subrev != DISPC_K2G) {
+ r = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (r)
+ dev_warn(dev, "cannot set DMA masks to 48-bit\n");
+ }
+
+ dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL);
+ if (!dispc)
+ return -ENOMEM;
+
+ dispc->fourccs = devm_kcalloc(dev, ARRAY_SIZE(dispc_color_formats),
+ sizeof(*dispc->fourccs), GFP_KERNEL);
+ if (!dispc->fourccs)
+ return -ENOMEM;
+
+ num_fourccs = 0;
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (feat->errata.i2000 &&
+ dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
+ continue;
+ dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
+ }
+ dispc->num_fourccs = num_fourccs;
+ dispc->tidss = tidss;
+ dispc->dev = dev;
+ dispc->feat = feat;
+
+ dispc_common_regmap = dispc->feat->common_regs;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->common,
+ &dispc->base_common);
+ if (r)
+ return r;
+
+ for (i = 0; i < dispc->feat->num_planes; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ &dispc->base_vid[i]);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < dispc->feat->num_vps; i++) {
+ u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
+ u32 *gamma_table;
+ struct clk *clk;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->ovr_name[i],
+ &dispc->base_ovr[i]);
+ if (r)
+ return r;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->vp_name[i],
+ &dispc->base_vp[i]);
+ if (r)
+ return r;
+
+ clk = devm_clk_get(dev, dispc->feat->vpclk_name[i]);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "%s: Failed to get clk %s:%ld\n", __func__,
+ dispc->feat->vpclk_name[i], PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ dispc->vp_clk[i] = clk;
+
+ gamma_table = devm_kmalloc_array(dev, gamma_size,
+ sizeof(*gamma_table),
+ GFP_KERNEL);
+ if (!gamma_table)
+ return -ENOMEM;
+ dispc->vp_data[i].gamma_table = gamma_table;
+ }
+
+ if (feat->subrev == DISPC_AM65X) {
+ r = dispc_init_am65x_oldi_io_ctrl(dev, dispc);
+ if (r)
+ return r;
+ }
+
+ dispc->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(dispc->fclk)) {
+ dev_err(dev, "%s: Failed to get fclk: %ld\n",
+ __func__, PTR_ERR(dispc->fclk));
+ return PTR_ERR(dispc->fclk);
+ }
+ dev_dbg(dev, "DSS fclk %lu Hz\n", clk_get_rate(dispc->fclk));
+
+ of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
+ &dispc->memory_bandwidth_limit);
+
+ tidss->dispc = dispc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
new file mode 100644
index 000000000000..a4a68249e44b
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_H__
+#define __TIDSS_DISPC_H__
+
+#include "tidss_drv.h"
+
+struct dispc_device;
+
+struct drm_crtc_state;
+
+enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
+
+struct tidss_vp_feat {
+ struct tidss_vp_color_feat {
+ u32 gamma_size;
+ enum tidss_gamma_type gamma_type;
+ bool has_ctm;
+ } color;
+};
+
+struct tidss_plane_feat {
+ struct tidss_plane_color_feat {
+ u32 encodings;
+ u32 ranges;
+ enum drm_color_encoding default_encoding;
+ enum drm_color_range default_range;
+ } color;
+ struct tidss_plane_blend_feat {
+ bool global_alpha;
+ } blend;
+};
+
+struct dispc_features_scaling {
+ u32 in_width_max_5tap_rgb;
+ u32 in_width_max_3tap_rgb;
+ u32 in_width_max_5tap_yuv;
+ u32 in_width_max_3tap_yuv;
+ u32 upscale_limit;
+ u32 downscale_limit_5tap;
+ u32 downscale_limit_3tap;
+ u32 xinc_max;
+};
+
+struct dispc_errata {
+ bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
+};
+
+enum dispc_vp_bus_type {
+ DISPC_VP_DPI, /* DPI output */
+ DISPC_VP_OLDI, /* OLDI (LVDS) output */
+ DISPC_VP_INTERNAL, /* SoC internal routing */
+ DISPC_VP_MAX_BUS_TYPE,
+};
+
+enum dispc_dss_subrevision {
+ DISPC_K2G,
+ DISPC_AM65X,
+ DISPC_J721E,
+};
+
+struct dispc_features {
+ int min_pclk_khz;
+ int max_pclk_khz[DISPC_VP_MAX_BUS_TYPE];
+
+ struct dispc_features_scaling scaling;
+
+ enum dispc_dss_subrevision subrev;
+
+ const char *common;
+ const u16 *common_regs;
+ u32 num_vps;
+ const char *vp_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *ovr_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
+ const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
+ struct tidss_vp_feat vp_feat;
+ u32 num_planes;
+ const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
+ bool vid_lite[TIDSS_MAX_PLANES];
+ u32 vid_order[TIDSS_MAX_PLANES];
+
+ struct dispc_errata errata;
+};
+
+extern const struct dispc_features dispc_k2g_feats;
+extern const struct dispc_features dispc_am65x_feats;
+extern const struct dispc_features dispc_j721e_feats;
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer);
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable);
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport);
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode);
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate);
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset);
+
+int dispc_runtime_suspend(struct dispc_device *dispc);
+int dispc_runtime_resume(struct dispc_device *dispc);
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
+
+int dispc_init(struct tidss_device *tidss);
+void dispc_remove(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
new file mode 100644
index 000000000000..88a83a41b6e3
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_REGS_H
+#define __TIDSS_DISPC_REGS_H
+
+enum dispc_common_regs {
+ NOT_APPLICABLE_OFF = 0,
+ DSS_REVISION_OFF,
+ DSS_SYSCONFIG_OFF,
+ DSS_SYSSTATUS_OFF,
+ DISPC_IRQ_EOI_OFF,
+ DISPC_IRQSTATUS_RAW_OFF,
+ DISPC_IRQSTATUS_OFF,
+ DISPC_IRQENABLE_SET_OFF,
+ DISPC_IRQENABLE_CLR_OFF,
+ DISPC_VID_IRQENABLE_OFF,
+ DISPC_VID_IRQSTATUS_OFF,
+ DISPC_VP_IRQENABLE_OFF,
+ DISPC_VP_IRQSTATUS_OFF,
+ WB_IRQENABLE_OFF,
+ WB_IRQSTATUS_OFF,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF,
+ DISPC_GLOBAL_OUTPUT_ENABLE_OFF,
+ DISPC_GLOBAL_BUFFER_OFF,
+ DSS_CBA_CFG_OFF,
+ DISPC_DBG_CONTROL_OFF,
+ DISPC_DBG_STATUS_OFF,
+ DISPC_CLKGATING_DISABLE_OFF,
+ DISPC_SECURE_DISABLE_OFF,
+ FBDC_REVISION_1_OFF,
+ FBDC_REVISION_2_OFF,
+ FBDC_REVISION_3_OFF,
+ FBDC_REVISION_4_OFF,
+ FBDC_REVISION_5_OFF,
+ FBDC_REVISION_6_OFF,
+ FBDC_COMMON_CONTROL_OFF,
+ FBDC_CONSTANT_COLOR_0_OFF,
+ FBDC_CONSTANT_COLOR_1_OFF,
+ DISPC_CONNECTIONS_OFF,
+ DISPC_MSS_VP1_OFF,
+ DISPC_MSS_VP3_OFF,
+ DISPC_COMMON_REG_TABLE_LEN,
+};
+
+/*
+ * dispc_common_regmap should be defined as const u16 * and pointing
+ * to a valid dss common register map for the platform, before the
+ * macros bellow can be used.
+ */
+
+#define REG(r) (dispc_common_regmap[r ## _OFF])
+
+#define DSS_REVISION REG(DSS_REVISION)
+#define DSS_SYSCONFIG REG(DSS_SYSCONFIG)
+#define DSS_SYSSTATUS REG(DSS_SYSSTATUS)
+#define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI)
+#define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW)
+#define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS)
+#define DISPC_IRQENABLE_SET REG(DISPC_IRQENABLE_SET)
+#define DISPC_IRQENABLE_CLR REG(DISPC_IRQENABLE_CLR)
+#define DISPC_VID_IRQENABLE(n) (REG(DISPC_VID_IRQENABLE) + (n) * 4)
+#define DISPC_VID_IRQSTATUS(n) (REG(DISPC_VID_IRQSTATUS) + (n) * 4)
+#define DISPC_VP_IRQENABLE(n) (REG(DISPC_VP_IRQENABLE) + (n) * 4)
+#define DISPC_VP_IRQSTATUS(n) (REG(DISPC_VP_IRQSTATUS) + (n) * 4)
+#define WB_IRQENABLE REG(WB_IRQENABLE)
+#define WB_IRQSTATUS REG(WB_IRQSTATUS)
+
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE)
+#define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER)
+#define DSS_CBA_CFG REG(DSS_CBA_CFG)
+#define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL)
+#define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS)
+#define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE)
+#define DISPC_SECURE_DISABLE REG(DISPC_SECURE_DISABLE)
+
+#define FBDC_REVISION_1 REG(FBDC_REVISION_1)
+#define FBDC_REVISION_2 REG(FBDC_REVISION_2)
+#define FBDC_REVISION_3 REG(FBDC_REVISION_3)
+#define FBDC_REVISION_4 REG(FBDC_REVISION_4)
+#define FBDC_REVISION_5 REG(FBDC_REVISION_5)
+#define FBDC_REVISION_6 REG(FBDC_REVISION_6)
+#define FBDC_COMMON_CONTROL REG(FBDC_COMMON_CONTROL)
+#define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0)
+#define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1)
+#define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS)
+#define DISPC_MSS_VP1 REG(DISPC_MSS_VP1)
+#define DISPC_MSS_VP3 REG(DISPC_MSS_VP3)
+
+/* VID */
+
+#define DISPC_VID_ACCUH_0 0x0
+#define DISPC_VID_ACCUH_1 0x4
+#define DISPC_VID_ACCUH2_0 0x8
+#define DISPC_VID_ACCUH2_1 0xc
+#define DISPC_VID_ACCUV_0 0x10
+#define DISPC_VID_ACCUV_1 0x14
+#define DISPC_VID_ACCUV2_0 0x18
+#define DISPC_VID_ACCUV2_1 0x1c
+#define DISPC_VID_ATTRIBUTES 0x20
+#define DISPC_VID_ATTRIBUTES2 0x24
+#define DISPC_VID_BA_0 0x28
+#define DISPC_VID_BA_1 0x2c
+#define DISPC_VID_BA_UV_0 0x30
+#define DISPC_VID_BA_UV_1 0x34
+#define DISPC_VID_BUF_SIZE_STATUS 0x38
+#define DISPC_VID_BUF_THRESHOLD 0x3c
+#define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4)
+
+#define DISPC_VID_FIRH 0x5c
+#define DISPC_VID_FIRH2 0x60
+#define DISPC_VID_FIRV 0x64
+#define DISPC_VID_FIRV2 0x68
+
+#define DISPC_VID_FIR_COEFS_H0 0x6c
+#define DISPC_VID_FIR_COEF_H0(phase) (0x6c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H0_C 0x90
+#define DISPC_VID_FIR_COEF_H0_C(phase) (0x90 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_H12 0xb4
+#define DISPC_VID_FIR_COEF_H12(phase) (0xb4 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H12_C 0xf4
+#define DISPC_VID_FIR_COEF_H12_C(phase) (0xf4 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V0 0x134
+#define DISPC_VID_FIR_COEF_V0(phase) (0x134 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V0_C 0x158
+#define DISPC_VID_FIR_COEF_V0_C(phase) (0x158 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V12 0x17c
+#define DISPC_VID_FIR_COEF_V12(phase) (0x17c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V12_C 0x1bc
+#define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4)
+
+#define DISPC_VID_GLOBAL_ALPHA 0x1fc
+#define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */
+#define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */
+#define DISPC_VID_MFLAG_THRESHOLD 0x208
+#define DISPC_VID_PICTURE_SIZE 0x20c
+#define DISPC_VID_PIXEL_INC 0x210
+#define DISPC_VID_K2G_POSITION 0x214 /* K2G */
+#define DISPC_VID_PRELOAD 0x218
+#define DISPC_VID_ROW_INC 0x21c
+#define DISPC_VID_SIZE 0x220
+#define DISPC_VID_BA_EXT_0 0x22c
+#define DISPC_VID_BA_EXT_1 0x230
+#define DISPC_VID_BA_UV_EXT_0 0x234
+#define DISPC_VID_BA_UV_EXT_1 0x238
+#define DISPC_VID_CSC_COEF7 0x23c
+#define DISPC_VID_ROW_INC_UV 0x248
+#define DISPC_VID_CLUT 0x260
+#define DISPC_VID_SAFETY_ATTRIBUTES 0x2a0
+#define DISPC_VID_SAFETY_CAPT_SIGNATURE 0x2a4
+#define DISPC_VID_SAFETY_POSITION 0x2a8
+#define DISPC_VID_SAFETY_REF_SIGNATURE 0x2ac
+#define DISPC_VID_SAFETY_SIZE 0x2b0
+#define DISPC_VID_SAFETY_LFSR_SEED 0x2b4
+#define DISPC_VID_LUMAKEY 0x2b8
+#define DISPC_VID_DMA_BUFSIZE 0x2bc /* J721E */
+
+/* OVR */
+
+#define DISPC_OVR_CONFIG 0x0
+#define DISPC_OVR_VIRTVP 0x4 /* J721E */
+#define DISPC_OVR_DEFAULT_COLOR 0x8
+#define DISPC_OVR_DEFAULT_COLOR2 0xc
+#define DISPC_OVR_TRANS_COLOR_MAX 0x10
+#define DISPC_OVR_TRANS_COLOR_MAX2 0x14
+#define DISPC_OVR_TRANS_COLOR_MIN 0x18
+#define DISPC_OVR_TRANS_COLOR_MIN2 0x1c
+#define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */
+/* VP */
+
+#define DISPC_VP_CONFIG 0x0
+#define DISPC_VP_CONTROL 0x4
+#define DISPC_VP_CSC_COEF0 0x8
+#define DISPC_VP_CSC_COEF1 0xc
+#define DISPC_VP_CSC_COEF2 0x10
+#define DISPC_VP_DATA_CYCLE_0 0x14
+#define DISPC_VP_DATA_CYCLE_1 0x18
+#define DISPC_VP_K2G_GAMMA_TABLE 0x20 /* K2G */
+#define DISPC_VP_K2G_IRQENABLE 0x3c /* K2G */
+#define DISPC_VP_K2G_IRQSTATUS 0x40 /* K2G */
+#define DISPC_VP_DATA_CYCLE_2 0x1c
+#define DISPC_VP_LINE_NUMBER 0x44
+#define DISPC_VP_POL_FREQ 0x4c
+#define DISPC_VP_SIZE_SCREEN 0x50
+#define DISPC_VP_TIMING_H 0x54
+#define DISPC_VP_TIMING_V 0x58
+#define DISPC_VP_CSC_COEF3 0x5c
+#define DISPC_VP_CSC_COEF4 0x60
+#define DISPC_VP_CSC_COEF5 0x64
+#define DISPC_VP_CSC_COEF6 0x68
+#define DISPC_VP_CSC_COEF7 0x6c
+#define DISPC_VP_SAFETY_ATTRIBUTES_0 0x70
+#define DISPC_VP_SAFETY_ATTRIBUTES_1 0x74
+#define DISPC_VP_SAFETY_ATTRIBUTES_2 0x78
+#define DISPC_VP_SAFETY_ATTRIBUTES_3 0x7c
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_0 0x90
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_1 0x94
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_2 0x98
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_3 0x9c
+#define DISPC_VP_SAFETY_POSITION_0 0xb0
+#define DISPC_VP_SAFETY_POSITION_1 0xb4
+#define DISPC_VP_SAFETY_POSITION_2 0xb8
+#define DISPC_VP_SAFETY_POSITION_3 0xbc
+#define DISPC_VP_SAFETY_REF_SIGNATURE_0 0xd0
+#define DISPC_VP_SAFETY_REF_SIGNATURE_1 0xd4
+#define DISPC_VP_SAFETY_REF_SIGNATURE_2 0xd8
+#define DISPC_VP_SAFETY_REF_SIGNATURE_3 0xdc
+#define DISPC_VP_SAFETY_SIZE_0 0xf0
+#define DISPC_VP_SAFETY_SIZE_1 0xf4
+#define DISPC_VP_SAFETY_SIZE_2 0xf8
+#define DISPC_VP_SAFETY_SIZE_3 0xfc
+#define DISPC_VP_SAFETY_LFSR_SEED 0x110
+#define DISPC_VP_GAMMA_TABLE 0x120
+#define DISPC_VP_DSS_OLDI_CFG 0x160
+#define DISPC_VP_DSS_OLDI_STATUS 0x164
+#define DISPC_VP_DSS_OLDI_LB 0x168
+#define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */
+
+/*
+ * OLDI IO_CTRL register offsets. On AM654 the registers are found
+ * from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
+ * CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
+ * register range.
+ */
+#define OLDI_DAT0_IO_CTRL 0x00
+#define OLDI_DAT1_IO_CTRL 0x04
+#define OLDI_DAT2_IO_CTRL 0x08
+#define OLDI_DAT3_IO_CTRL 0x0C
+#define OLDI_CLK_IO_CTRL 0x10
+
+#define OLDI_PWRDN_TX BIT(8)
+
+#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
new file mode 100644
index 000000000000..d95e4be2c7b9
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/console.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_kms.h"
+#include "tidss_irq.h"
+
+/* Power management */
+
+int tidss_runtime_get(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_get_sync(tidss->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+void tidss_runtime_put(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_put_sync(tidss->dev);
+ WARN_ON(r < 0);
+}
+
+static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return dispc_runtime_suspend(tidss->dispc);
+}
+
+static int __maybe_unused tidss_pm_runtime_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+ int r;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ r = dispc_runtime_resume(tidss->dispc);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int __maybe_unused tidss_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_suspend(&tidss->ddev);
+}
+
+static int __maybe_unused tidss_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_resume(&tidss->ddev);
+}
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops tidss_pm_ops = {
+ .runtime_suspend = tidss_pm_runtime_suspend,
+ .runtime_resume = tidss_pm_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
+};
+
+#endif /* CONFIG_PM */
+
+/* DRM device Information */
+
+static void tidss_release(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ drm_kms_helper_poll_fini(ddev);
+
+ tidss_modeset_cleanup(tidss);
+
+ drm_dev_fini(ddev);
+
+ kfree(tidss);
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+
+static struct drm_driver tidss_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &tidss_fops,
+ .release = tidss_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .name = "tidss",
+ .desc = "TI Keystone DSS",
+ .date = "20180215",
+ .major = 1,
+ .minor = 0,
+
+ .irq_preinstall = tidss_irq_preinstall,
+ .irq_postinstall = tidss_irq_postinstall,
+ .irq_handler = tidss_irq_handler,
+ .irq_uninstall = tidss_irq_uninstall,
+};
+
+static int tidss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss;
+ struct drm_device *ddev;
+ int ret;
+ int irq;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* Can't use devm_* since drm_device's lifetime may exceed dev's */
+ tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
+ if (!tidss)
+ return -ENOMEM;
+
+ ddev = &tidss->ddev;
+
+ ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
+ if (ret) {
+ kfree(ddev);
+ return ret;
+ }
+
+ tidss->dev = dev;
+ tidss->feat = of_device_get_match_data(dev);
+
+ platform_set_drvdata(pdev, tidss);
+
+ ddev->dev_private = tidss;
+
+ ret = dispc_init(tidss);
+ if (ret) {
+ dev_err(dev, "failed to initialize dispc: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call resume manually */
+ dispc_runtime_resume(tidss->dispc);
+#endif
+
+ ret = tidss_modeset_init(tidss);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to init DRM/KMS (%d)\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_runtime_suspend;
+ }
+
+ ret = drm_irq_install(ddev, irq);
+ if (ret) {
+ dev_err(dev, "drm_irq_install failed: %d\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ drm_kms_helper_poll_init(ddev);
+
+ drm_mode_config_reset(ddev);
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret) {
+ dev_err(dev, "failed to register DRM device\n");
+ goto err_irq_uninstall;
+ }
+
+ drm_fbdev_generic_setup(ddev, 32);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+
+err_irq_uninstall:
+ drm_irq_uninstall(ddev);
+
+err_runtime_suspend:
+#ifndef CONFIG_PM
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int tidss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss = platform_get_drvdata(pdev);
+ struct drm_device *ddev = &tidss->ddev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ drm_dev_unregister(ddev);
+
+ drm_atomic_helper_shutdown(ddev);
+
+ drm_irq_uninstall(ddev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call suspend manually */
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ /* devm allocated dispc goes away with the dev so mark it NULL */
+ dispc_remove(tidss);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+}
+
+static void tidss_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+static const struct of_device_id tidss_of_table[] = {
+ { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
+ { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
+ { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, tidss_of_table);
+
+static struct platform_driver tidss_platform_driver = {
+ .probe = tidss_probe,
+ .remove = tidss_remove,
+ .shutdown = tidss_shutdown,
+ .driver = {
+ .name = "tidss",
+#ifdef CONFIG_PM
+ .pm = &tidss_pm_ops,
+#endif
+ .of_match_table = tidss_of_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(tidss_platform_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TI Keystone DSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
new file mode 100644
index 000000000000..e2aa6436ad18
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_DRV_H__
+#define __TIDSS_DRV_H__
+
+#include <linux/spinlock.h>
+
+#define TIDSS_MAX_PORTS 4
+#define TIDSS_MAX_PLANES 4
+
+typedef u32 dispc_irq_t;
+
+struct tidss_device {
+ struct drm_device ddev; /* DRM device for DSS */
+ struct device *dev; /* Underlying DSS device */
+
+ const struct dispc_features *feat;
+ struct dispc_device *dispc;
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[TIDSS_MAX_PORTS];
+
+ unsigned int num_planes;
+ struct drm_plane *planes[TIDSS_MAX_PLANES];
+
+ spinlock_t wait_lock; /* protects the irq masks */
+ dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
+
+ struct drm_atomic_state *saved_state;
+};
+
+int tidss_runtime_get(struct tidss_device *tidss);
+void tidss_runtime_put(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
new file mode 100644
index 000000000000..30bf2a65949c
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/export.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include "tidss_crtc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+
+static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge *bridge;
+ bool bus_flags_set = false;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ /*
+ * Take the bus_flags from the first bridge that defines
+ * bridge timings, or from the connector's display_info if no
+ * bridge defines the timings.
+ */
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->timings)
+ continue;
+
+ tcrtc_state->bus_flags = bridge->timings->input_bus_flags;
+ bus_flags_set = true;
+ break;
+ }
+
+ if (!di->bus_formats || di->num_bus_formats == 0) {
+ dev_err(ddev->dev, "%s: No bus_formats in connected display\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ // XXX any cleaner way to set bus format and flags?
+ tcrtc_state->bus_format = di->bus_formats[0];
+ if (!bus_flags_set)
+ tcrtc_state->bus_flags = di->bus_flags;
+
+ return 0;
+}
+
+static void tidss_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .atomic_check = tidss_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = tidss_encoder_destroy,
+};
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs)
+{
+ struct drm_encoder *enc;
+ int ret;
+
+ enc = kzalloc(sizeof(*enc), GFP_KERNEL);
+ if (!enc)
+ return ERR_PTR(-ENOMEM);
+
+ enc->possible_crtcs = possible_crtcs;
+
+ ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
+ encoder_type, NULL);
+ if (ret < 0) {
+ kfree(enc);
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(enc, &encoder_helper_funcs);
+
+ dev_dbg(tidss->dev, "Encoder create done\n");
+
+ return enc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.h b/drivers/gpu/drm/tidss/tidss_encoder.h
new file mode 100644
index 000000000000..06854d66e7e6
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_ENCODER_H__
+#define __TIDSS_ENCODER_H__
+
+#include <drm/drm_encoder.h>
+
+struct tidss_device;
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
new file mode 100644
index 000000000000..612c046738e5
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_print.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* call with wait_lock and dispc runtime held */
+static void tidss_irq_update(struct tidss_device *tidss)
+{
+ assert_spin_locked(&tidss->wait_lock);
+
+ dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
+}
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+irqreturn_t tidss_irq_handler(int irq, void *arg)
+{
+ struct drm_device *ddev = (struct drm_device *)arg;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned int id;
+ dispc_irq_t irqstatus;
+
+ if (WARN_ON(!ddev->irq_enabled))
+ return IRQ_NONE;
+
+ irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ for (id = 0; id < tidss->num_crtcs; id++) {
+ struct drm_crtc *crtc = tidss->crtcs[id];
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+
+ if (irqstatus & (DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport)))
+ tidss_crtc_vblank_irq(crtc);
+
+ if (irqstatus & (DSS_IRQ_VP_FRAME_DONE(hw_videoport)))
+ tidss_crtc_framedone_irq(crtc);
+
+ if (irqstatus & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ tidss_crtc_error_irq(crtc, irqstatus);
+ }
+
+ if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
+ dev_err_ratelimited(tidss->dev, "OCP error\n");
+
+ return IRQ_HANDLED;
+}
+
+void tidss_irq_resume(struct tidss_device *tidss)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_preinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ spin_lock_init(&tidss->wait_lock);
+
+ tidss_runtime_get(tidss);
+
+ dispc_set_irqenable(tidss->dispc, 0);
+ dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ tidss_runtime_put(tidss);
+}
+
+int tidss_irq_postinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+ unsigned int i;
+
+ tidss_runtime_get(tidss);
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+
+ tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+
+ for (i = 0; i < tidss->num_crtcs; ++i) {
+ struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
+
+ tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
+
+ tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
+ }
+
+ tidss_irq_update(tidss);
+
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+
+ tidss_runtime_put(tidss);
+
+ return 0;
+}
+
+void tidss_irq_uninstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ tidss_runtime_get(tidss);
+ dispc_set_irqenable(tidss->dispc, 0);
+ tidss_runtime_put(tidss);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
new file mode 100644
index 000000000000..aa92db403cca
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_IRQ_H__
+#define __TIDSS_IRQ_H__
+
+#include <linux/types.h>
+
+#include "tidss_drv.h"
+
+/*
+ * The IRQ status from various DISPC IRQ registers are packed into a single
+ * value, where the bits are defined as follows:
+ *
+ * bit group |dev|wb |mrg0|mrg1|mrg2|mrg3|plane0-3| <unused> |
+ * bit use |D |fou|FEOL|FEOL|FEOL|FEOL| UUUU | |
+ * bit number|0 |1-3|4-7 |8-11| 12-19 | 20-23 | 24-31 |
+ *
+ * device bits: D = OCP error
+ * WB bits: f = frame done wb, o = wb buffer overflow,
+ * u = wb buffer uncomplete
+ * vp bits: F = frame done, E = vsync even, O = vsync odd, L = sync lost
+ * plane bits: U = fifo underflow
+ */
+
+#define DSS_IRQ_DEVICE_OCP_ERR BIT(0)
+
+#define DSS_IRQ_DEVICE_FRAMEDONEWB BIT(1)
+#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW BIT(2)
+#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR BIT(3)
+#define DSS_IRQ_DEVICE_WB_MASK GENMASK(3, 1)
+
+#define DSS_IRQ_VP_BIT_N(ch, bit) (4 + 4 * (ch) + (bit))
+#define DSS_IRQ_PLANE_BIT_N(plane, bit) \
+ (DSS_IRQ_VP_BIT_N(TIDSS_MAX_PORTS, 0) + 1 * (plane) + (bit))
+
+#define DSS_IRQ_VP_BIT(ch, bit) BIT(DSS_IRQ_VP_BIT_N((ch), (bit)))
+#define DSS_IRQ_PLANE_BIT(plane, bit) \
+ BIT(DSS_IRQ_PLANE_BIT_N((plane), (bit)))
+
+static inline dispc_irq_t DSS_IRQ_VP_MASK(u32 ch)
+{
+ return GENMASK(DSS_IRQ_VP_BIT_N((ch), 3), DSS_IRQ_VP_BIT_N((ch), 0));
+}
+
+static inline dispc_irq_t DSS_IRQ_PLANE_MASK(u32 plane)
+{
+ return GENMASK(DSS_IRQ_PLANE_BIT_N((plane), 0),
+ DSS_IRQ_PLANE_BIT_N((plane), 0));
+}
+
+#define DSS_IRQ_VP_FRAME_DONE(ch) DSS_IRQ_VP_BIT((ch), 0)
+#define DSS_IRQ_VP_VSYNC_EVEN(ch) DSS_IRQ_VP_BIT((ch), 1)
+#define DSS_IRQ_VP_VSYNC_ODD(ch) DSS_IRQ_VP_BIT((ch), 2)
+#define DSS_IRQ_VP_SYNC_LOST(ch) DSS_IRQ_VP_BIT((ch), 3)
+
+#define DSS_IRQ_PLANE_FIFO_UNDERFLOW(plane) DSS_IRQ_PLANE_BIT((plane), 0)
+
+struct drm_crtc;
+struct drm_device;
+
+struct tidss_device;
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc);
+void tidss_irq_disable_vblank(struct drm_crtc *crtc);
+
+void tidss_irq_preinstall(struct drm_device *ddev);
+int tidss_irq_postinstall(struct drm_device *ddev);
+void tidss_irq_uninstall(struct drm_device *ddev);
+irqreturn_t tidss_irq_handler(int irq, void *arg);
+
+void tidss_irq_resume(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
new file mode 100644
index 000000000000..7d419960b030
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+#include "tidss_kms.h"
+#include "tidss_plane.h"
+
+static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *ddev = old_state->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ drm_atomic_helper_commit_modeset_disables(ddev, old_state);
+ drm_atomic_helper_commit_planes(ddev, old_state, 0);
+ drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+
+ drm_atomic_helper_cleanup_planes(ddev, old_state);
+
+ tidss_runtime_put(tidss);
+}
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+ .atomic_commit_tail = tidss_atomic_commit_tail,
+};
+
+static int tidss_atomic_check(struct drm_device *ddev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *opstate;
+ struct drm_plane_state *npstate;
+ struct drm_plane *plane;
+ struct drm_crtc_state *cstate;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(ddev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Add all active planes on a CRTC to the atomic state, if
+ * x/y/z position or activity of any plane on that CRTC
+ * changes. This is needed for updating the plane positions in
+ * tidss_crtc_position_planes() which is called from
+ * crtc_atomic_enable() and crtc_atomic_flush(). We have an
+ * extra flag to to mark x,y-position changes and together
+ * with zpos_changed the condition recognizes all the above
+ * cases.
+ */
+ for_each_oldnew_plane_in_state(state, plane, opstate, npstate, i) {
+ if (!npstate->crtc || !npstate->visible)
+ continue;
+
+ if (!opstate->crtc || opstate->crtc_x != npstate->crtc_x ||
+ opstate->crtc_y != npstate->crtc_y) {
+ cstate = drm_atomic_get_crtc_state(state,
+ npstate->crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
+ to_tidss_crtc_state(cstate)->plane_pos_changed = true;
+ }
+ }
+
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
+ if (to_tidss_crtc_state(cstate)->plane_pos_changed ||
+ cstate->zpos_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = tidss_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tidss_dispc_modeset_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ unsigned int fourccs_len;
+ const u32 *fourccs = dispc_plane_formats(tidss->dispc, &fourccs_len);
+ unsigned int i;
+
+ struct pipe {
+ u32 hw_videoport;
+ struct drm_bridge *bridge;
+ u32 enc_type;
+ };
+
+ const struct dispc_features *feat = tidss->feat;
+ u32 max_vps = feat->num_vps;
+ u32 max_planes = feat->num_planes;
+
+ struct pipe pipes[TIDSS_MAX_PORTS];
+ u32 num_pipes = 0;
+ u32 crtc_mask;
+
+ /* first find all the connected panels & bridges */
+
+ for (i = 0; i < max_vps; i++) {
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ u32 enc_type = DRM_MODE_ENCODER_NONE;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, i, 0,
+ &panel, &bridge);
+ if (ret == -ENODEV) {
+ dev_dbg(dev, "no panel/bridge for port %d\n", i);
+ continue;
+ } else if (ret) {
+ dev_dbg(dev, "port %d probe returned %d\n", i, ret);
+ return ret;
+ }
+
+ if (panel) {
+ u32 conn_type;
+
+ dev_dbg(dev, "Setting up panel for port %d\n", i);
+
+ switch (feat->vp_bus_type[i]) {
+ case DISPC_VP_OLDI:
+ enc_type = DRM_MODE_ENCODER_LVDS;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ case DISPC_VP_DPI:
+ enc_type = DRM_MODE_ENCODER_DPI;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (panel->connector_type != conn_type) {
+ dev_err(dev,
+ "%s: Panel %s has incompatible connector type for vp%d (%d != %d)\n",
+ __func__, dev_name(panel->dev), i,
+ panel->connector_type, conn_type);
+ return -EINVAL;
+ }
+
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(bridge)) {
+ dev_err(dev,
+ "failed to set up panel bridge for port %d\n",
+ i);
+ return PTR_ERR(bridge);
+ }
+ }
+
+ pipes[num_pipes].hw_videoport = i;
+ pipes[num_pipes].bridge = bridge;
+ pipes[num_pipes].enc_type = enc_type;
+ num_pipes++;
+ }
+
+ /* all planes can be on any crtc */
+ crtc_mask = (1 << num_pipes) - 1;
+
+ /* then create a plane, a crtc and an encoder for each panel/bridge */
+
+ for (i = 0; i < num_pipes; ++i) {
+ struct tidss_plane *tplane;
+ struct tidss_crtc *tcrtc;
+ struct drm_encoder *enc;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+ int ret;
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_PRIMARY, crtc_mask,
+ fourccs, fourccs_len);
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+
+ tcrtc = tidss_crtc_create(tidss, pipes[i].hw_videoport,
+ &tplane->plane);
+ if (IS_ERR(tcrtc)) {
+ dev_err(tidss->dev, "crtc create failed\n");
+ return PTR_ERR(tcrtc);
+ }
+
+ tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc;
+
+ enc = tidss_encoder_create(tidss, pipes[i].enc_type,
+ 1 << tcrtc->crtc.index);
+ if (IS_ERR(enc)) {
+ dev_err(tidss->dev, "encoder create failed\n");
+ return PTR_ERR(enc);
+ }
+
+ ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0);
+ if (ret) {
+ dev_err(tidss->dev, "bridge attach failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* create overlay planes of the leftover planes */
+
+ while (tidss->num_planes < max_planes) {
+ struct tidss_plane *tplane;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_OVERLAY, crtc_mask,
+ fourccs, fourccs_len);
+
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+ }
+
+ return 0;
+}
+
+int tidss_modeset_init(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ drm_mode_config_init(ddev);
+
+ ddev->mode_config.min_width = 8;
+ ddev->mode_config.min_height = 8;
+ ddev->mode_config.max_width = 8096;
+ ddev->mode_config.max_height = 8096;
+ ddev->mode_config.normalize_zpos = true;
+ ddev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+ ret = tidss_dispc_modeset_init(tidss);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ ret = drm_vblank_init(ddev, tidss->num_crtcs);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ /* Start with vertical blanking interrupt reporting disabled. */
+ for (i = 0; i < tidss->num_crtcs; ++i)
+ drm_crtc_vblank_reset(tidss->crtcs[i]);
+
+ drm_mode_config_reset(ddev);
+
+ dev_dbg(tidss->dev, "%s done\n", __func__);
+
+ return 0;
+
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(ddev);
+ return ret;
+}
+
+void tidss_modeset_cleanup(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+
+ drm_mode_config_cleanup(ddev);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
new file mode 100644
index 000000000000..dda5625d0128
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_KMS_H__
+#define __TIDSS_KMS_H__
+
+struct tidss_device;
+
+int tidss_modeset_init(struct tidss_device *tidss);
+void tidss_modeset_cleanup(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
new file mode 100644
index 000000000000..798488948fc5
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_plane.h"
+
+/* drm_plane_helper_funcs */
+
+static int tidss_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ const struct drm_format_info *finfo;
+ struct drm_crtc_state *crtc_state;
+ u32 hw_plane = tplane->hw_plane_id;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_plane_helper_check_state(), set it manually.
+ */
+ state->visible = false;
+ return 0;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
+ INT_MAX, true, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The HW is only able to start drawing at subpixel boundary
+ * (the two first checks bellow). At the end of a row the HW
+ * can only jump integer number of subpixels forward to the
+ * beginning of the next row. So we can only show picture with
+ * integer subpixel width (the third check). However, after
+ * reaching the end of the drawn picture the drawing starts
+ * again at the absolute memory address where top left corner
+ * position of the drawn picture is (so there is no need to
+ * check for odd height).
+ */
+
+ finfo = drm_format_info(state->fb->format->format);
+
+ if ((state->src_x >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: x-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_x >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_y >> 16) % finfo->vsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: y-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_y >> 16), finfo->vsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_w >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: src width %u not divisible by subpixel size %u\n",
+ __func__, (state->src_w >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if (!state->visible)
+ return 0;
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_check(tidss->dispc, hw_plane, state, hw_videoport);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void tidss_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->visible) {
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
+ state, hw_videoport);
+
+ if (ret) {
+ dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
+ __func__, tplane->hw_plane_id);
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
+}
+
+static void tidss_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+}
+
+static void drm_plane_destroy(struct drm_plane *plane)
+{
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ drm_plane_cleanup(plane);
+ kfree(tplane);
+}
+
+static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+ .atomic_check = tidss_plane_atomic_check,
+ .atomic_update = tidss_plane_atomic_update,
+ .atomic_disable = tidss_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs tidss_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .destroy = drm_plane_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats)
+{
+ struct tidss_plane *tplane;
+ enum drm_plane_type type;
+ u32 possible_crtcs;
+ u32 num_planes = tidss->feat->num_planes;
+ u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709));
+ u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE));
+ u32 default_encoding = DRM_COLOR_YCBCR_BT601;
+ u32 default_range = DRM_COLOR_YCBCR_FULL_RANGE;
+ u32 blend_modes = (BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ int ret;
+
+ tplane = kzalloc(sizeof(*tplane), GFP_KERNEL);
+ if (!tplane)
+ return ERR_PTR(-ENOMEM);
+
+ tplane->hw_plane_id = hw_plane_id;
+
+ possible_crtcs = crtc_mask;
+ type = plane_type;
+
+ ret = drm_universal_plane_init(&tidss->ddev, &tplane->plane,
+ possible_crtcs,
+ &tidss_plane_funcs,
+ formats, num_formats,
+ NULL, type, NULL);
+ if (ret < 0)
+ goto err;
+
+ drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
+
+ drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
+ num_planes - 1);
+
+ ret = drm_plane_create_color_properties(&tplane->plane,
+ color_encodings,
+ color_ranges,
+ default_encoding,
+ default_range);
+ if (ret)
+ goto err;
+
+ ret = drm_plane_create_alpha_property(&tplane->plane);
+ if (ret)
+ goto err;
+
+ ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes);
+ if (ret)
+ goto err;
+
+ return tplane;
+
+err:
+ kfree(tplane);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
new file mode 100644
index 000000000000..80ff1c5a2535
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_PLANE_H__
+#define __TIDSS_PLANE_H__
+
+#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
+
+struct tidss_device;
+
+struct tidss_plane {
+ struct drm_plane plane;
+
+ u32 hw_plane_id;
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.c b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
new file mode 100644
index 000000000000..5ec68389cc68
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "tidss_scale_coefs.h"
+
+/*
+ * These are interpolated with a custom python script from DSS5
+ * (drivers/gpu/drm/omapdrm/dss/dispc_coef.c) coefficients.
+ */
+static const struct tidss_scale_coefs coef5_m32 = {
+ .c2 = { 28, 34, 40, 46, 52, 58, 64, 70, 0, 2, 4, 8, 12, 16, 20, 24, },
+ .c1 = { 132, 138, 144, 150, 156, 162, 168, 174, 76, 84, 92, 98, 104, 110, 116, 124, },
+ .c0 = { 192, 192, 192, 190, 188, 186, 184, 182, 180, },
+};
+
+static const struct tidss_scale_coefs coef5_m26 = {
+ .c2 = { 24, 28, 32, 38, 44, 50, 56, 64, 0, 2, 4, 6, 8, 12, 16, 20, },
+ .c1 = { 132, 138, 144, 152, 160, 166, 172, 178, 72, 80, 88, 94, 100, 108, 116, 124, },
+ .c0 = { 200, 202, 204, 202, 200, 196, 192, 188, 184, },
+};
+
+static const struct tidss_scale_coefs coef5_m22 = {
+ .c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14, },
+ .c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122, },
+ .c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192, },
+};
+
+static const struct tidss_scale_coefs coef5_m19 = {
+ .c2 = { 12, 14, 16, 22, 28, 34, 40, 48, 0, 0, 0, 2, 4, 4, 4, 8, },
+ .c1 = { 128, 140, 152, 160, 168, 176, 184, 192, 56, 64, 72, 82, 92, 100, 108, 118, },
+ .c0 = { 232, 232, 232, 226, 220, 218, 216, 208, 200, },
+};
+
+static const struct tidss_scale_coefs coef5_m16 = {
+ .c2 = { 0, 2, 4, 8, 12, 18, 24, 32, 0, 0, 0, -2, -4, -4, -4, -2, },
+ .c1 = { 124, 138, 152, 164, 176, 186, 196, 206, 40, 48, 56, 68, 80, 90, 100, 112, },
+ .c0 = { 264, 262, 260, 254, 248, 242, 236, 226, 216, },
+};
+
+static const struct tidss_scale_coefs coef5_m14 = {
+ .c2 = { -8, -6, -4, -2, 0, 6, 12, 18, 0, -2, -4, -6, -8, -8, -8, -8, },
+ .c1 = { 120, 134, 148, 164, 180, 194, 208, 220, 24, 32, 40, 52, 64, 78, 92, 106, },
+ .c0 = { 288, 286, 284, 280, 276, 266, 256, 244, 232, },
+};
+
+static const struct tidss_scale_coefs coef5_m13 = {
+ .c2 = { -12, -12, -12, -10, -8, -4, 0, 6, 0, -2, -4, -6, -8, -10, -12, -12, },
+ .c1 = { 112, 130, 148, 164, 180, 196, 212, 228, 12, 22, 32, 44, 56, 70, 84, 98, },
+ .c0 = { 312, 308, 304, 298, 292, 282, 272, 258, 244, },
+};
+
+static const struct tidss_scale_coefs coef5_m12 = {
+ .c2 = { -16, -18, -20, -18, -16, -14, -12, -6, 0, -2, -4, -6, -8, -10, -12, -14, },
+ .c1 = { 104, 124, 144, 164, 184, 202, 220, 238, 0, 10, 20, 30, 40, 56, 72, 88, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 272, 256, },
+};
+
+static const struct tidss_scale_coefs coef5_m11 = {
+ .c2 = { -20, -22, -24, -24, -24, -24, -24, -20, 0, -2, -4, -6, -8, -10, -12, -16, },
+ .c1 = { 92, 114, 136, 158, 180, 204, 228, 250, -16, -8, 0, 12, 24, 38, 52, 72, },
+ .c0 = { 368, 364, 360, 350, 340, 326, 312, 292, 272, },
+};
+
+static const struct tidss_scale_coefs coef5_m10 = {
+ .c2 = { -16, -20, -24, -28, -32, -34, -36, -34, 0, 0, 0, -2, -4, -8, -12, -14, },
+ .c1 = { 72, 96, 120, 148, 176, 204, 232, 260, -32, -26, -20, -10, 0, 16, 32, 52, },
+ .c0 = { 400, 398, 396, 384, 372, 354, 336, 312, 288, },
+};
+
+static const struct tidss_scale_coefs coef5_m9 = {
+ .c2 = { -12, -18, -24, -28, -32, -38, -44, -46, 0, 2, 4, 2, 0, -2, -4, -8, },
+ .c1 = { 40, 68, 96, 128, 160, 196, 232, 268, -48, -46, -44, -36, -28, -14, 0, 20, },
+ .c0 = { 456, 450, 444, 428, 412, 388, 364, 334, 304, },
+};
+
+static const struct tidss_scale_coefs coef5_m8 = {
+ .c2 = { 0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2, },
+ .c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20, },
+ .c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312, },
+};
+
+static const struct tidss_scale_coefs coef3_m32 = {
+ .c1 = { 108, 92, 76, 62, 48, 36, 24, 140, 256, 236, 216, 198, 180, 162, 144, 126, },
+ .c0 = { 296, 294, 292, 288, 284, 278, 272, 136, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m26 = {
+ .c1 = { 104, 90, 76, 60, 44, 32, 20, 138, 256, 236, 216, 198, 180, 160, 140, 122, },
+ .c0 = { 304, 300, 296, 292, 288, 282, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m22 = {
+ .c1 = { 100, 84, 68, 54, 40, 30, 20, 138, 256, 236, 216, 196, 176, 156, 136, 118, },
+ .c0 = { 312, 310, 308, 302, 296, 286, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m19 = {
+ .c1 = { 96, 80, 64, 50, 36, 26, 16, 136, 256, 236, 216, 194, 172, 152, 132, 114, },
+ .c0 = { 320, 318, 316, 310, 304, 292, 280, 140, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m16 = {
+ .c1 = { 88, 72, 56, 44, 32, 22, 12, 134, 256, 234, 212, 190, 168, 148, 128, 108, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 144, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m14 = {
+ .c1 = { 80, 64, 48, 36, 24, 16, 8, 132, 256, 232, 208, 186, 164, 142, 120, 100, },
+ .c0 = { 352, 348, 344, 334, 324, 310, 296, 148, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m13 = {
+ .c1 = { 72, 56, 40, 30, 20, 12, 4, 130, 256, 232, 208, 184, 160, 136, 112, 92, },
+ .c0 = { 368, 364, 360, 346, 332, 316, 300, 150, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m12 = {
+ .c1 = { 64, 50, 36, 26, 16, 10, 4, 130, 256, 230, 204, 178, 152, 128, 104, 84, },
+ .c0 = { 384, 378, 372, 358, 344, 324, 304, 152, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m11 = {
+ .c1 = { 56, 40, 24, 16, 8, 4, 0, 128, 256, 228, 200, 172, 144, 120, 96, 76, },
+ .c0 = { 400, 396, 392, 376, 360, 336, 312, 156, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m10 = {
+ .c1 = { 40, 26, 12, 6, 0, -2, -4, 126, 256, 226, 196, 166, 136, 110, 84, 62, },
+ .c0 = { 432, 424, 416, 396, 376, 348, 320, 160, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m9 = {
+ .c1 = { 24, 12, 0, -4, -8, -8, -8, 124, 256, 222, 188, 154, 120, 92, 64, 44, },
+ .c0 = { 464, 456, 448, 424, 400, 366, 332, 166, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m8 = {
+ .c1 = { 0, -8, -16, -16, -16, -12, -8, 124, 256, 214, 172, 134, 96, 66, 36, 18, },
+ .c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, },
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps)
+{
+ int i;
+ int inc;
+ static const struct {
+ int mmin;
+ int mmax;
+ const struct tidss_scale_coefs *coef3;
+ const struct tidss_scale_coefs *coef5;
+ const char *name;
+ } coefs[] = {
+ { 27, 32, &coef3_m32, &coef5_m32, "M32" },
+ { 23, 26, &coef3_m26, &coef5_m26, "M26" },
+ { 20, 22, &coef3_m22, &coef5_m22, "M22" },
+ { 17, 19, &coef3_m19, &coef5_m19, "M19" },
+ { 15, 16, &coef3_m16, &coef5_m16, "M16" },
+ { 14, 14, &coef3_m14, &coef5_m14, "M14" },
+ { 13, 13, &coef3_m13, &coef5_m13, "M13" },
+ { 12, 12, &coef3_m12, &coef5_m12, "M12" },
+ { 11, 11, &coef3_m11, &coef5_m11, "M11" },
+ { 10, 10, &coef3_m10, &coef5_m10, "M10" },
+ { 9, 9, &coef3_m9, &coef5_m9, "M9" },
+ { 4, 8, &coef3_m8, &coef5_m8, "M8" },
+ /*
+ * When upscaling more than two times, blockiness and outlines
+ * around the image are observed when M8 tables are used. M11,
+ * M16 and M19 tables are used to prevent this.
+ */
+ { 3, 3, &coef3_m11, &coef5_m11, "M11" },
+ { 2, 2, &coef3_m16, &coef5_m16, "M16" },
+ { 0, 1, &coef3_m19, &coef5_m19, "M19" },
+ };
+
+ /*
+ * inc is result of 0x200000 * in_size / out_size. This dividing
+ * by 0x40000 scales it down to 8 * in_size / out_size. After
+ * division the actual scaling factor is 8/inc.
+ */
+ inc = firinc / 0x40000;
+ for (i = 0; i < ARRAY_SIZE(coefs); ++i) {
+ if (inc >= coefs[i].mmin && inc <= coefs[i].mmax) {
+ if (five_taps)
+ return coefs[i].coef5;
+ else
+ return coefs[i].coef3;
+ }
+ }
+
+ dev_err(dev, "%s: Coefficients not found for firinc 0x%08x, inc %d\n",
+ __func__, firinc, inc);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
new file mode 100644
index 000000000000..64b5af5b5361
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_COEF_H__
+#define __TIDSS_DISPC_COEF_H__
+
+#include <linux/types.h>
+
+struct tidss_scale_coefs {
+ s16 c2[16];
+ s16 c1[16];
+ u16 c0[9];
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps);
+
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 51d034e095f4..28b7f703236e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -95,7 +95,7 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
priv->external_encoder->possible_crtcs = BIT(0);
- ret = drm_bridge_attach(priv->external_encoder, bridge, NULL);
+ ret = drm_bridge_attach(priv->external_encoder, bridge, NULL, 0);
if (ret) {
dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index a46ac284dd5e..4160e74e4751 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -47,6 +47,20 @@ config TINYDRM_ILI9341
If M is selected the module will be called ili9341.
+config TINYDRM_ILI9486
+ tristate "DRM support for ILI9486 display panels"
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following Ilitek ILI9486 panels:
+ * PISCREEN 3.5" 320x480 TFT (Ozzmaker 3.5")
+ * RPILCD 3.5" 320x480 TFT (Waveshare 3.5")
+
+ If M is selected the module will be called ili9486.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
@@ -85,14 +99,16 @@ config TINYDRM_ST7586
If M is selected the module will be called st7586.
config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7735R display panels"
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
- DRM driver Sitronix ST7735R with one of the following LCDs:
- * JD-T18003-T01 1.8" 128x160 TFT
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 896cf31132d3..c96ceee71453 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
+obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 94fb1f593564..a48173441ae0 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -22,7 +22,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
static bool eco_mode;
module_param(eco_mode, bool, 0644);
@@ -610,18 +609,10 @@ static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index c66acc566c2b..802fb8dde1b6 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -26,7 +26,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#define ILI9225_DRIVER_READ_CODE 0x00
#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
@@ -165,18 +164,10 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
ili9225_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
new file mode 100644
index 000000000000..532560aebb1e
--- /dev/null
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9486 panels
+ *
+ * Copyright 2020 Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modeset_helper.h>
+
+#define ILI9486_ITFCTR1 0xb0
+#define ILI9486_PWCTRL1 0xc2
+#define ILI9486_VMCTRL1 0xc5
+#define ILI9486_PGAMCTRL 0xe0
+#define ILI9486_NGAMCTRL 0xe1
+#define ILI9486_DGAMCTRL 0xe2
+#define ILI9486_MADCTL_BGR BIT(3)
+#define ILI9486_MADCTL_MV BIT(5)
+#define ILI9486_MADCTL_MX BIT(6)
+#define ILI9486_MADCTL_MY BIT(7)
+
+/*
+ * The PiScreen/waveshare rpi-lcd-35 has a SPI to 16-bit parallel bus converter
+ * in front of the display controller. This means that 8-bit values have to be
+ * transferred as 16-bit.
+ */
+static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ size_t num)
+{
+ struct spi_device *spi = mipi->spi;
+ void *data = par;
+ u32 speed_hz;
+ int i, ret;
+ __be16 *buf;
+
+ buf = kmalloc(32 * sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * The displays are Raspberry Pi HATs and connected to the 8-bit only
+ * SPI controller, so 16-bit command and parameters need byte swapping
+ * before being transferred as 8-bit on the big endian SPI bus.
+ * Pixel data bytes have already been swapped before this function is
+ * called.
+ */
+ buf[0] = cpu_to_be16(*cmd);
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
+ if (ret || !num)
+ goto free;
+
+ /* 8-bit configuration data, not 16-bit pixel data */
+ if (num <= 32) {
+ for (i = 0; i < num; i++)
+ buf[i] = cpu_to_be16(par[i]);
+ num *= 2;
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ data = buf;
+ }
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
+ free:
+ kfree(buf);
+
+ return ret;
+}
+
+static void waveshare_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ u8 addr_mode;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
+ if (ret < 0)
+ goto out_exit;
+ if (ret == 1)
+ goto out_enable;
+
+ mipi_dbi_command(dbi, ILI9486_ITFCTR1);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(250);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+
+ mipi_dbi_command(dbi, ILI9486_PWCTRL1, 0x44);
+
+ mipi_dbi_command(dbi, ILI9486_VMCTRL1, 0x00, 0x00, 0x00, 0x00);
+
+ mipi_dbi_command(dbi, ILI9486_PGAMCTRL,
+ 0x0F, 0x1F, 0x1C, 0x0C, 0x0F, 0x08, 0x48, 0x98,
+ 0x37, 0x0A, 0x13, 0x04, 0x11, 0x0D, 0x0);
+ mipi_dbi_command(dbi, ILI9486_NGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+ mipi_dbi_command(dbi, ILI9486_DGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+ out_enable:
+ switch (dbidev->rotation) {
+ case 90:
+ addr_mode = ILI9486_MADCTL_MY;
+ break;
+ case 180:
+ addr_mode = ILI9486_MADCTL_MV;
+ break;
+ case 270:
+ addr_mode = ILI9486_MADCTL_MX;
+ break;
+ default:
+ addr_mode = ILI9486_MADCTL_MV | ILI9486_MADCTL_MY |
+ ILI9486_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9486_MADCTL_BGR;
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
+ out_exit:
+ drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
+ .enable = waveshare_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = mipi_dbi_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode waveshare_mode = {
+ DRM_SIMPLE_MODE(480, 320, 73, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+
+static struct drm_driver ili9486_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &ili9486_fops,
+ .release = mipi_dbi_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9486",
+ .desc = "Ilitek ILI9486",
+ .date = "20200118",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9486_of_match[] = {
+ { .compatible = "waveshare,rpi-lcd-35" },
+ { .compatible = "ozzmaker,piscreen" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ili9486_of_match);
+
+static const struct spi_device_id ili9486_id[] = {
+ { "ili9486", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9486_id);
+
+static int ili9486_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
+ struct drm_device *drm;
+ struct mipi_dbi *dbi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
+ return -ENOMEM;
+
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
+ if (ret) {
+ kfree(dbidev);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(dbi->reset);
+ }
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
+ if (ret)
+ return ret;
+
+ dbi->command = waveshare_command;
+ dbi->read_commands = NULL;
+
+ ret = mipi_dbi_dev_init(dbidev, &waveshare_pipe_funcs,
+ &waveshare_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
+}
+
+static int ili9486_remove(struct spi_device *spi)
+{
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
+
+static void ili9486_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
+
+static struct spi_driver ili9486_spi_driver = {
+ .driver = {
+ .name = "ili9486",
+ .of_match_table = ili9486_of_match,
+ },
+ .id_table = ili9486_id,
+ .probe = ili9486_probe,
+ .remove = ili9486_remove,
+ .shutdown = ili9486_shutdown,
+};
+module_spi_driver(ili9486_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9486 DRM driver");
+MODULE_AUTHOR("Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 76d179200775..f5ebcaf7ee3a 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -17,7 +17,7 @@
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/sched/clock.h>
#include <linux/spi/spi.h>
#include <linux/thermal.h>
@@ -33,13 +33,13 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#define REPAPER_RID_G2_COG_ID 0x12
enum repaper_model {
+ /* 0 is reserved to avoid clashing with NULL */
E1144CS021 = 1,
E1190CS021,
E2200CS021,
@@ -856,18 +856,10 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
repaper_fb_dirty(state->fb);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -995,21 +987,21 @@ static int repaper_probe(struct spi_device *spi)
{
const struct drm_display_mode *mode;
const struct spi_device_id *spi_id;
- const struct of_device_id *match;
struct device *dev = &spi->dev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
struct drm_device *drm;
+ const void *match;
int ret;
- match = of_match_device(repaper_of_match, dev);
+ match = device_get_match_data(dev);
if (match) {
- model = (enum repaper_model)match->data;
+ model = (enum repaper_model)match;
} else {
spi_id = spi_get_device_id(spi);
- model = spi_id->driver_data;
+ model = (enum repaper_model)spi_id->driver_data;
}
/* The SPI device is used to allocate dma memory */
@@ -1197,7 +1189,6 @@ static void repaper_shutdown(struct spi_device *spi)
static struct spi_driver repaper_spi_driver = {
.driver = {
.name = "repaper",
- .owner = THIS_MODULE,
.of_match_table = repaper_of_match,
},
.id_table = repaper_id,
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 060cc756194f..9ef559dd3191 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -23,7 +23,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
/* controller-specific commands */
#define ST7586_DISP_MODE_GRAY 0x38
@@ -159,18 +158,10 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
st7586_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3f4487c71684..3cd9b8d9888d 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * DRM driver for Sitronix ST7735R panels
+ * DRM driver for display panels connected to a Sitronix ST7715R or ST7735R
+ * display controller in SPI mode.
*
* Copyright 2017 David Lechner <david@lechnology.com>
+ * Copyright (C) 2019 Glider bvba
*/
#include <linux/backlight.h>
@@ -37,12 +39,28 @@
#define ST7735R_MY BIT(7)
#define ST7735R_MX BIT(6)
#define ST7735R_MV BIT(5)
+#define ST7735R_RGB BIT(3)
+
+struct st7735r_cfg {
+ const struct drm_display_mode mode;
+ unsigned int left_offset;
+ unsigned int top_offset;
+ unsigned int write_only:1;
+ unsigned int rgb:1; /* RGB (vs. BGR) */
+};
+
+struct st7735r_priv {
+ struct mipi_dbi_dev dbidev; /* Must be first for .release() */
+ const struct st7735r_cfg *cfg;
+};
-static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static void st7735r_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct st7735r_priv *priv = container_of(dbidev, struct st7735r_priv,
+ dbidev);
struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
u8 addr_mode;
@@ -87,6 +105,10 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7735R_MY | ST7735R_MV;
break;
}
+
+ if (priv->cfg->rgb)
+ addr_mode |= ST7735R_RGB;
+
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
@@ -109,15 +131,24 @@ out_exit:
drm_dev_exit(idx);
}
-static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
- .enable = jd_t18003_t01_pipe_enable,
+static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
+ .enable = st7735r_pipe_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
-static const struct drm_display_mode jd_t18003_t01_mode = {
- DRM_SIMPLE_MODE(128, 160, 28, 35),
+static const struct st7735r_cfg jd_t18003_t01_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 160, 28, 35) },
+ /* Cannot read from Adafruit 1.8" display via SPI */
+ .write_only = true,
+};
+
+static const struct st7735r_cfg rh128128t_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 128, 25, 26) },
+ .left_offset = 2,
+ .top_offset = 3,
+ .rgb = true,
};
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -136,13 +167,14 @@ static struct drm_driver st7735r_driver = {
};
static const struct of_device_id st7735r_of_match[] = {
- { .compatible = "jianda,jd-t18003-t01" },
+ { .compatible = "jianda,jd-t18003-t01", .data = &jd_t18003_t01_cfg },
+ { .compatible = "okaya,rh128128t", .data = &rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(of, st7735r_of_match);
static const struct spi_device_id st7735r_id[] = {
- { "jd-t18003-t01", 0 },
+ { "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
{ },
};
MODULE_DEVICE_TABLE(spi, st7735r_id);
@@ -150,17 +182,26 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ const struct st7735r_cfg *cfg;
struct mipi_dbi_dev *dbidev;
+ struct st7735r_priv *priv;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
+ cfg = device_get_match_data(&spi->dev);
+ if (!cfg)
+ cfg = (void *)spi_get_device_id(spi)->driver_data;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ dbidev = &priv->dbidev;
+ priv->cfg = cfg;
+
dbi = &dbidev->dbi;
drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
@@ -193,10 +234,14 @@ static int st7735r_probe(struct spi_device *spi)
if (ret)
return ret;
- /* Cannot read from Adafruit 1.8" display via SPI */
- dbi->read_commands = NULL;
+ if (cfg->write_only)
+ dbi->read_commands = NULL;
+
+ dbidev->left_offset = cfg->left_offset;
+ dbidev->top_offset = cfg->top_offset;
- ret = mipi_dbi_dev_init(dbidev, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &st7735r_pipe_funcs, &cfg->mode,
+ rotation);
if (ret)
return ret;
@@ -231,7 +276,6 @@ static void st7735r_shutdown(struct spi_device *spi)
static struct spi_driver st7735r_spi_driver = {
.driver = {
.name = "st7735r",
- .owner = THIS_MODULE,
.of_match_table = st7735r_of_match,
},
.id_table = st7735r_id,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5df596fb0280..9e07c3f75156 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -145,34 +145,12 @@ static inline uint32_t ttm_bo_type_flags(unsigned type)
return 1 << (type);
}
-static void ttm_bo_release_list(struct kref *list_kref)
-{
- struct ttm_buffer_object *bo =
- container_of(list_kref, struct ttm_buffer_object, list_kref);
- size_t acc_size = bo->acc_size;
-
- BUG_ON(kref_read(&bo->list_kref));
- BUG_ON(kref_read(&bo->kref));
- BUG_ON(bo->mem.mm_node != NULL);
- BUG_ON(!list_empty(&bo->lru));
- BUG_ON(!list_empty(&bo->ddestroy));
- ttm_tt_destroy(bo->ttm);
- atomic_dec(&ttm_bo_glob.bo_count);
- dma_fence_put(bo->moving);
- if (!ttm_bo_uses_embedded_gem_object(bo))
- dma_resv_fini(&bo->base._resv);
- bo->destroy(bo);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
-}
-
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- dma_resv_assert_held(bo->base.resv);
-
if (!list_empty(&bo->lru))
return;
@@ -181,21 +159,14 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type];
list_add_tail(&bo->lru, &man->lru[bo->priority]);
- kref_get(&bo->list_kref);
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
- kref_get(&bo->list_kref);
}
}
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -203,12 +174,10 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
if (!list_empty(&bo->lru)) {
list_del_init(&bo->lru);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
@@ -372,14 +341,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
moved:
- if (bo->evicted) {
- if (bdev->driver->invalidate_caches) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
- }
- bo->evicted = false;
- }
+ bo->evicted = false;
if (bo->mem.mm_node)
bo->offset = (bo->mem.start << PAGE_SHIFT) +
@@ -428,92 +390,49 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
BUG_ON(!dma_resv_trylock(&bo->base._resv));
r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
+ dma_resv_unlock(&bo->base._resv);
if (r)
- dma_resv_unlock(&bo->base._resv);
+ return r;
+
+ if (bo->type != ttm_bo_type_sg) {
+ /* This works because the BO is about to be destroyed and nobody
+ * reference it any more. The only tricky case is the trylock on
+ * the resv object while holding the lru_lock.
+ */
+ spin_lock(&ttm_bo_glob.lru_lock);
+ bo->base.resv = &bo->base._resv;
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ }
return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
+ struct dma_resv *resv = &bo->base._resv;
struct dma_resv_list *fobj;
struct dma_fence *fence;
int i;
- fobj = dma_resv_get_list(&bo->base._resv);
- fence = dma_resv_get_excl(&bo->base._resv);
+ rcu_read_lock();
+ fobj = rcu_dereference(resv->fence);
+ fence = rcu_dereference(resv->fence_excl);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(bo->base.resv));
+ fence = rcu_dereference(fobj->shared[i]);
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
}
-}
-
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle
- */
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
- 30 * HZ);
- spin_lock(&ttm_bo_glob.lru_lock);
- goto error;
- }
-
- spin_lock(&ttm_bo_glob.lru_lock);
- ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
- if (!ret) {
- if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
- ttm_bo_del_from_lru(bo);
- spin_unlock(&ttm_bo_glob.lru_lock);
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
- ttm_bo_cleanup_memtype_use(bo);
- dma_resv_unlock(bo->base.resv);
- return;
- }
-
- ttm_bo_flush_all_fences(bo);
-
- /*
- * Make NO_EVICT bos immediately available to
- * shrinkers, now that they are queued for
- * destruction.
- */
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_move_to_lru_tail(bo, NULL);
- }
-
- dma_resv_unlock(bo->base.resv);
- }
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
-error:
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&ttm_bo_glob.lru_lock);
-
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
+ rcu_read_unlock();
}
/**
* function ttm_bo_cleanup_refs
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, do nothing.
+ * If bo idle, remove from lru lists, and unref.
+ * If not idle, block if possible.
*
* Must be called with lru_lock and reservation held, this function
* will drop the lru lock and optionally the reservation lock before returning.
@@ -527,14 +446,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct dma_resv *resv;
+ struct dma_resv *resv = &bo->base._resv;
int ret;
- if (unlikely(list_empty(&bo->ddestroy)))
- resv = bo->base.resv;
- else
- resv = &bo->base._resv;
-
if (dma_resv_test_signaled_rcu(resv, true))
ret = 0;
else
@@ -547,9 +461,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&ttm_bo_glob.lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true,
- interruptible,
- 30 * HZ);
+ lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+ 30 * HZ);
if (lret < 0)
return lret;
@@ -581,14 +494,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
-
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+
return 0;
}
@@ -610,8 +523,9 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
ddestroy);
- kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
+ if (!ttm_bo_get_unless_zero(bo))
+ continue;
if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
@@ -626,7 +540,7 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
spin_unlock(&glob->lru_lock);
}
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
spin_lock(&glob->lru_lock);
}
list_splice_tail(&removed, &bdev->ddestroy);
@@ -652,16 +566,69 @@ static void ttm_bo_release(struct kref *kref)
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ size_t acc_size = bo->acc_size;
+ int ret;
- if (bo->bdev->driver->release_notify)
- bo->bdev->driver->release_notify(bo);
+ if (!bo->deleted) {
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
+ 30 * HZ);
+ }
- drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_lock(man, false);
- ttm_mem_io_free_vm(bo);
- ttm_mem_io_unlock(man);
- ttm_bo_cleanup_refs_or_queue(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ if (bo->bdev->driver->release_notify)
+ bo->bdev->driver->release_notify(bo);
+
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
+ }
+
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+ /* The BO is not idle, resurrect it for delayed destroy */
+ ttm_bo_flush_all_fences(bo);
+ bo->deleted = true;
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+
+ /*
+ * Make NO_EVICT bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ */
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+ bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ }
+
+ kref_init(&bo->kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ return;
+ }
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ list_del(&bo->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ ttm_bo_cleanup_memtype_use(bo);
+
+ BUG_ON(bo->mem.mm_node != NULL);
+ atomic_dec(&ttm_bo_glob.bo_count);
+ dma_fence_put(bo->moving);
+ if (!ttm_bo_uses_embedded_gem_object(bo))
+ dma_resv_fini(&bo->base._resv);
+ bo->destroy(bo);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
void ttm_bo_put(struct ttm_buffer_object *bo)
@@ -764,8 +731,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
- if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
- || !list_empty(&bo->ddestroy))
+ if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
ret = true;
*locked = false;
if (busy)
@@ -846,6 +812,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
dma_resv_unlock(bo->base.resv);
continue;
}
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
break;
}
@@ -857,21 +828,19 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
- if (busy_bo)
- kref_get(&busy_bo->list_kref);
+ if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
+ busy_bo = NULL;
spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- kref_put(&busy_bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(busy_bo);
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
ctx->no_wait_gpu, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -881,7 +850,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (locked)
ttm_bo_unreserve(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1226,6 +1195,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
+
+ /*
+ * Remove the backing store if no placement is given.
+ */
+ if (!placement->num_placement && !placement->num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
+
/*
* Check whether we need to move buffer.
*/
@@ -1293,7 +1274,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
- kref_init(&bo->list_kref);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1813,11 +1793,18 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
- NULL)) {
- ret = 0;
- break;
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ NULL))
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
}
+
+ ret = 0;
+ break;
}
if (!ret)
break;
@@ -1828,11 +1815,9 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1886,7 +1871,7 @@ out:
*/
if (locked)
dma_resv_unlock(bo->base.resv);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_swapout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 953c82a4f573..52d2b71f1588 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -507,11 +507,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- if (bo->base.resv == &bo->base._resv)
+ if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 389128b8c4dd..0ad30b112982 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -59,9 +59,10 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* If possible, avoid waiting for GPU with mmap_sem
- * held.
+ * held. We only do this if the fault allows retry and this
+ * is the first attempt.
*/
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault_flag_allow_retry_first(vmf->flags)) {
ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
@@ -135,7 +136,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ /*
+ * If the fault allows retry and this is the first
+ * fault attempt, we try to release the mmap_sem
+ * before waiting
+ */
+ if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
@@ -156,6 +162,89 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
+ * @vmf: Fault data
+ * @bo: The buffer object
+ * @page_offset: Page offset from bo start
+ * @fault_page_size: The size of the fault in pages.
+ * @pgprot: The page protections.
+ * Does additional checking whether it's possible to insert a PUD or PMD
+ * pfn and performs the insertion.
+ *
+ * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
+ * a huge fault was not possible, or on insertion error.
+ */
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
+ struct ttm_buffer_object *bo,
+ pgoff_t page_offset,
+ pgoff_t fault_page_size,
+ pgprot_t pgprot)
+{
+ pgoff_t i;
+ vm_fault_t ret;
+ unsigned long pfn;
+ pfn_t pfnt;
+ struct ttm_tt *ttm = bo->ttm;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+ /* Fault should not cross bo boundary. */
+ page_offset &= ~(fault_page_size - 1);
+ if (page_offset + fault_page_size > bo->num_pages)
+ goto out_fallback;
+
+ if (bo->mem.bus.is_iomem)
+ pfn = ttm_bo_io_mem_pfn(bo, page_offset);
+ else
+ pfn = page_to_pfn(ttm->pages[page_offset]);
+
+ /* pfn must be fault_page_size aligned. */
+ if ((pfn & (fault_page_size - 1)) != 0)
+ goto out_fallback;
+
+ /* Check that memory is contiguous. */
+ if (!bo->mem.bus.is_iomem) {
+ for (i = 1; i < fault_page_size; ++i) {
+ if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
+ goto out_fallback;
+ }
+ } else if (bo->bdev->driver->io_mem_pfn) {
+ for (i = 1; i < fault_page_size; ++i) {
+ if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
+ goto out_fallback;
+ }
+ }
+
+ pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
+ if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
+ ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
+ ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
+#endif
+ else
+ WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);
+
+ if (ret != VM_FAULT_NOPAGE)
+ goto out_fallback;
+
+ return VM_FAULT_NOPAGE;
+out_fallback:
+ count_vm_event(THP_FAULT_FALLBACK);
+ return VM_FAULT_FALLBACK;
+}
+#else
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
+ struct ttm_buffer_object *bo,
+ pgoff_t page_offset,
+ pgoff_t fault_page_size,
+ pgprot_t pgprot)
+{
+ return VM_FAULT_FALLBACK;
+}
+#endif
+
/**
* ttm_bo_vm_fault_reserved - TTM fault helper
* @vmf: The struct vm_fault given as argument to the fault callback
@@ -163,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
* @num_prefault: Maximum number of prefault pages. The caller may want to
* specify this based on madvice settings and the size of the GPU object
* backed by the memory.
+ * @fault_page_size: The size of the fault in pages.
*
* This function inserts one or more page table entries pointing to the
* memory backing the buffer object, and then returns a return code
@@ -176,7 +266,8 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
*/
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
- pgoff_t num_prefault)
+ pgoff_t num_prefault,
+ pgoff_t fault_page_size)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -268,6 +359,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
prot = pgprot_decrypted(prot);
}
+ /* We don't prefault on huge faults. Yet. */
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) {
+ ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset,
+ fault_page_size, prot);
+ goto out_io_unlock;
+ }
+
/*
* Speculatively prefault a number of pages. Only error on
* first page.
@@ -334,7 +432,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return ret;
prot = vma->vm_page_prot;
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -445,7 +543,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
+ .access = ttm_bo_vm_access,
};
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bf876faea592..faefaaef7909 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -604,7 +604,7 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
p = pool->name;
for (i = 0; i < ARRAY_SIZE(t); i++) {
if (type & t[i]) {
- p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
}
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index e9671d38b4a0..0afdfb0d1fe1 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -109,7 +109,6 @@ static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
};
static const struct drm_connector_funcs udl_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 22af17959053..d59ebac70b15 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -375,8 +375,6 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
- crtc_state->no_vblank = true;
-
buf = (char *)udl->mode_buf;
/* This first section has to do with setting the base address on the
@@ -428,14 +426,6 @@ udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
}
-static int
-udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
-{
- return 0;
-}
-
static void
udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
@@ -457,7 +447,6 @@ struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
.mode_valid = udl_simple_display_pipe_mode_valid,
.enable = udl_simple_display_pipe_enable,
.disable = udl_simple_display_pipe_disable,
- .check = udl_simple_display_pipe_check,
.update = udl_simple_display_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9a35c555ec52..ac2603334587 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -254,27 +254,42 @@ struct v3d_csd_job {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- msleep(1); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
/* nsecs_to_jiffies64() does not guard against overflow */
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 8512d970a09f..ac8f75db2ecd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -41,6 +41,10 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
return -ENODEV;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb");
+ if (ret)
+ return ret;
+
vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
if (!vbox)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 19612132c8a3..0883a435e62b 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -18,7 +18,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "hgsmi_channels.h"
#include "vbox_drv.h"
@@ -226,17 +225,6 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
}
static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
@@ -838,6 +826,7 @@ static int vbox_connector_init(struct drm_device *dev,
static const struct drm_mode_config_funcs vbox_mode_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index 0592004f71aa..a5de40fe1a76 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -138,7 +138,7 @@ struct vbva_buffer {
u32 data_len;
/* variable size for the rest of the vbva_buffer area in VRAM. */
- u8 data[0];
+ u8 data[];
} __packed;
#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index b00e20f5ce05..1208258ad3b2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -84,13 +84,14 @@ static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_HACT_ACT),
};
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 val;
int fifo_lines;
@@ -1030,6 +1031,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1039,6 +1041,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.atomic_flush = vc4_crtc_atomic_flush,
.atomic_enable = vc4_crtc_atomic_enable,
.atomic_disable = vc4_crtc_atomic_disable,
+ .get_scanout_position = vc4_crtc_get_scanout_position,
};
static const struct vc4_crtc_data pv0_data = {
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index c586325de2a5..6dfede03396e 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -252,7 +252,7 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
- return drm_bridge_attach(dpi->encoder, bridge, NULL);
+ return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5e6fb6c2307f..76f93b662766 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -190,9 +190,6 @@ static struct drm_driver vc4_drm_driver = {
.irq_postinstall = vc4_irq_postinstall,
.irq_uninstall = vc4_irq_uninstall,
- .get_scanout_position = vc4_crtc_get_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 6627b20c99e9..139d25a8328e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -65,7 +65,7 @@ struct vc4_perfmon {
* Note that counter values can't be reset, but you can fake a reset by
* destroying the perfmon and creating a new one.
*/
- u64 counters[0];
+ u64 counters[];
};
struct vc4_dev {
@@ -677,32 +677,41 @@ struct vc4_validated_shader_info {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define _wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
- } else { \
- cpu_relax(); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
} \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* vc4_bo.c */
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
@@ -743,10 +752,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_txp_armed(struct drm_crtc_state *state);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index fd8a2eb60505..d99b1d526651 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1619,7 +1619,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
- ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "bridge attach failed: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index cea18dc15f77..340719238753 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -681,11 +681,23 @@ static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
- /* HSM clock must be 108% of the pixel clock. Additionally,
- * the AXI clock needs to be at least 25% of pixel clock, but
- * HSM ends up being the limiting factor.
+ /*
+ * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
+ * be faster than pixel clock, infinitesimally faster, tested in
+ * simulation. Otherwise, exact value is unimportant for HDMI
+ * operation." This conflicts with bcm2835's vc4 documentation, which
+ * states HSM's clock has to be at least 108% of the pixel clock.
+ *
+ * Real life tests reveal that vc4's firmware statement holds up, and
+ * users are able to use pixel clocks closer to HSM's, namely for
+ * 1920x1200@60Hz. So it was decided to have leave a 1% margin between
+ * both clocks. Which, for RPi0-3 implies a maximum pixel clock of
+ * 162MHz.
+ *
+ * Additionally, the AXI clock needs to be at least 25% of
+ * pixel clock, but HSM ends up being the limiting factor.
*/
- if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100))
+ if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100))
return MODE_CLOCK_HIGH;
return MODE_OK;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4934127f0d76..91e408f7a56e 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -139,7 +139,7 @@ static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
static bool plane_enabled(struct drm_plane_state *state)
{
- return state->fb && state->crtc;
+ return state->fb && !WARN_ON(!state->crtc);
}
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5156e6b279db..e27120d512b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
+ virtio_add_bool(m, "indirect", vgdev->has_indirect);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 0966208ec30d..2b7e6ae65546 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,7 +30,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "virtgpu_drv.h"
@@ -91,6 +90,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
crtc->mode.hdisplay,
crtc->mode.vdisplay, 0, 0);
+ virtio_gpu_notify(vgdev);
}
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -109,6 +109,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+ virtio_gpu_notify(vgdev);
output->enabled = false;
}
@@ -121,13 +122,6 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- unsigned long flags;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (crtc->state->event)
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -332,6 +326,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(dev, state);
@@ -375,6 +370,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
- drm_atomic_helper_shutdown(vgdev->ddev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 8cf27af3ad53..ab4bed78e656 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -135,7 +136,8 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
- drm_dev_unregister(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
virtio_gpu_deinit(dev);
drm_dev_put(dev);
}
@@ -214,4 +216,6 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
+
+ .release = virtio_gpu_release,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06e168e..7879ff58236f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -32,6 +32,7 @@
#include <linux/virtio_gpu.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
@@ -68,15 +69,21 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
-
- struct sg_table *pages;
- uint32_t mapped;
bool dumb;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem {
+ struct virtio_gpu_object base;
+ struct sg_table *pages;
+ uint32_t mapped;
+};
+
+#define to_virtio_gpu_shmem(virtio_gpu_object) \
+ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
@@ -114,6 +121,7 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
virtio_gpu_resp_cb resp_cb;
+ void *resp_cb_data;
struct virtio_gpu_object_array *objs;
struct list_head list;
@@ -175,10 +183,8 @@ struct virtio_gpu_device {
struct virtio_gpu_queue ctrlq;
struct virtio_gpu_queue cursorq;
struct kmem_cache *vbufs;
- bool vqs_ready;
- bool disable_notify;
- bool pending_notify;
+ atomic_t pending_commands;
struct ida resource_ida;
@@ -193,6 +199,7 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
+ bool has_indirect;
struct work_struct config_changed_work;
@@ -207,15 +214,19 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ bool context_created;
+ struct mutex context_lock;
};
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
+void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
@@ -262,7 +273,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id);
+ struct virtio_gpu_object *bo);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -279,9 +290,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj);
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -332,8 +342,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
void virtio_gpu_dequeue_fence_func(struct work_struct *work);
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
/* virtio_gpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
@@ -355,12 +364,16 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
+
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+
/* virtgpu_prime.c */
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 0a2b62279647..f0d5a8974677 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -39,6 +39,9 @@ int virtio_gpu_gem_create(struct drm_file *file,
int ret;
u32 handle;
+ if (vgdev->has_virgl_3d)
+ virtio_gpu_create_context(dev, file);
+
ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
if (ret < 0)
return ret;
@@ -123,6 +126,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
return 0;
}
@@ -143,6 +147,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
}
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 205ec4abae2b..512daff92038 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -27,19 +27,40 @@
#include <linux/file.h>
#include <linux/sync_file.h>
+#include <linux/uaccess.h>
#include <drm/drm_file.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ char dbgname[TASK_COMM_LEN];
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created)
+ goto out_unlock;
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ strlen(dbgname), dbgname);
+ virtio_gpu_notify(vgdev);
+ vfpriv->context_created = true;
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+}
+
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
+ return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
virtio_gpu_map->handle,
&virtio_gpu_map->offset);
}
@@ -51,11 +72,11 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
* VIRTIO_GPUReleaseInfo struct (first XXX bytes)
*/
static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *drm_file)
+ struct drm_file *file)
{
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence *out_fence;
int ret;
uint32_t *bo_handles = NULL;
@@ -74,6 +95,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
exbuf->fence_fd = -1;
+ virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
@@ -116,7 +138,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ buflist = virtio_gpu_array_from_handles(file, bo_handles,
exbuf->num_bo_handles);
if (!buflist) {
ret = -ENOENT;
@@ -126,22 +148,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
bo_handles = NULL;
}
- if (buflist) {
- ret = virtio_gpu_array_lock_resv(buflist);
- if (ret)
- goto out_unused_fd;
- }
-
buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
- goto out_unresv;
+ goto out_unused_fd;
+ }
+
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_memdup;
}
out_fence = virtio_gpu_fence_alloc(vgdev);
if(!out_fence) {
ret = -ENOMEM;
- goto out_memdup;
+ goto out_unresv;
}
if (out_fence_fd >= 0) {
@@ -158,13 +180,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
+ virtio_gpu_notify(vgdev);
return 0;
-out_memdup:
- kvfree(buf);
out_unresv:
if (buflist)
virtio_gpu_array_unlock_resv(buflist);
+out_memdup:
+ kvfree(buf);
out_unused_fd:
kvfree(bo_handles);
if (buflist)
@@ -177,7 +200,7 @@ out_unused_fd:
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_getparam *param = data;
@@ -200,7 +223,7 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
@@ -211,7 +234,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle = 0;
struct virtio_gpu_object_params params = { 0 };
- if (vgdev->has_virgl_3d == false) {
+ if (vgdev->has_virgl_3d) {
+ virtio_gpu_create_context(dev, file);
+ params.virgl = true;
+ params.target = rc->target;
+ params.bind = rc->bind;
+ params.depth = rc->depth;
+ params.array_size = rc->array_size;
+ params.last_level = rc->last_level;
+ params.nr_samples = rc->nr_samples;
+ params.flags = rc->flags;
+ } else {
if (rc->depth > 1)
return -EINVAL;
if (rc->nr_samples > 1)
@@ -228,16 +261,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
params.width = rc->width;
params.height = rc->height;
params.size = rc->size;
- if (vgdev->has_virgl_3d) {
- params.virgl = true;
- params.target = rc->target;
- params.bind = rc->bind;
- params.depth = rc->depth;
- params.array_size = rc->array_size;
- params.last_level = rc->last_level;
- params.nr_samples = rc->nr_samples;
- params.flags = rc->flags;
- }
/* allocate a single page size object */
if (params.size == 0)
params.size = PAGE_SIZE;
@@ -251,7 +274,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
return ret;
obj = &qobj->base.base;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, obj, &handle);
if (ret) {
drm_gem_object_release(obj);
return ret;
@@ -264,13 +287,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_virtgpu_resource_info *ri = data;
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
- gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
+ gobj = drm_gem_object_lookup(file, ri->bo_handle);
if (gobj == NULL)
return -ENOENT;
@@ -297,6 +320,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
+ virtio_gpu_create_context(dev, file);
objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
if (objs == NULL)
return -ENOENT;
@@ -314,6 +338,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
(vgdev, vfpriv->ctx_id, offset, args->level,
&args->box, objs, fence);
dma_fence_put(&fence->f);
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -344,6 +369,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->box.w, args->box.h, args->box.x, args->box.y,
objs, NULL);
} else {
+ virtio_gpu_create_context(dev, file);
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -359,6 +385,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &args->box, objs, fence);
dma_fence_put(&fence->f);
}
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -445,6 +472,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
/* not in cache - need to talk to hw */
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
+ virtio_gpu_notify(vgdev);
copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 2f5773e43557..0a5c8cf409fb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -25,6 +25,7 @@
#include <linux/virtio.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
#include <drm/drm_file.h>
@@ -44,6 +45,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
@@ -51,25 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear);
}
-static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
- uint32_t nlen, const char *name)
-{
- int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
- handle += 1;
- virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
- return handle;
-}
-
-static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
- uint32_t ctx_id)
-{
- virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
- ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
-}
-
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
void (*work_func)(struct work_struct *work))
{
@@ -92,6 +75,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
}
for (i = 0; i < num_capsets; i++) {
virtio_gpu_cmd_get_capset_info(vgdev, i);
+ virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {
@@ -159,6 +143,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+ vgdev->has_indirect = true;
+ }
DRM_INFO("features: %cvirgl %cedid\n",
vgdev->has_virgl_3d ? '+' : '-',
@@ -196,13 +183,13 @@ int virtio_gpu_init(struct drm_device *dev)
virtio_gpu_modeset_init(vgdev);
virtio_device_ready(vgdev->vdev);
- vgdev->vqs_ready = true;
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
return 0;
@@ -231,12 +218,16 @@ void virtio_gpu_deinit(struct drm_device *dev)
struct virtio_gpu_device *vgdev = dev->dev_private;
flush_work(&vgdev->obj_free_work);
- vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
vgdev->vdev->config->reset(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
+}
+
+void virtio_gpu_release(struct drm_device *dev)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
@@ -249,8 +240,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
- int id;
- char dbgname[TASK_COMM_LEN];
+ int handle;
/* can't create contexts without 3d renderer */
if (!vgdev->has_virgl_3d)
@@ -261,14 +251,15 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
if (!vfpriv)
return -ENOMEM;
- get_task_comm(dbgname, current);
- id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
- if (id < 0) {
+ mutex_init(&vfpriv->context_lock);
+
+ handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
+ if (handle < 0) {
kfree(vfpriv);
- return id;
+ return handle;
}
- vfpriv->ctx_id = id;
+ vfpriv->ctx_id = handle + 1;
file->driver_priv = vfpriv;
return 0;
}
@@ -276,14 +267,18 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
if (!vgdev->has_virgl_3d)
return;
- vfpriv = file->driver_priv;
+ if (vfpriv->context_created) {
+ virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
+ virtio_gpu_notify(vgdev);
+ }
- virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+ ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
+ mutex_destroy(&vfpriv->context_lock);
kfree(vfpriv);
file->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 3af7ec80c7da..d9039bb7c5e3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
@@ -61,21 +62,46 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
}
}
-static void virtio_gpu_free_object(struct drm_gem_object *obj)
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- if (bo->pages)
- virtio_gpu_object_detach(vgdev, bo);
- if (bo->created)
- virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+ if (virtio_gpu_is_shmem(bo)) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
+ if (shmem->pages) {
+ if (shmem->mapped) {
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl, shmem->mapped,
+ DMA_TO_DEVICE);
+ shmem->mapped = 0;
+ }
+
+ sg_free_table(shmem->pages);
+ shmem->pages = NULL;
+ drm_gem_shmem_unpin(&bo->base.base);
+ }
+
+ drm_gem_shmem_free_object(&bo->base.base);
+ }
+}
+
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- drm_gem_shmem_free_object(obj);
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ /* completion handler calls virtio_gpu_cleanup_object() */
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
}
-static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
@@ -86,21 +112,75 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = &drm_gem_shmem_mmap,
+ .mmap = drm_gem_shmem_mmap,
};
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
+{
+ return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
+}
+
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size)
{
- struct virtio_gpu_object *bo;
+ struct virtio_gpu_object_shmem *shmem;
+ struct drm_gem_shmem_object *dshmem;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
return NULL;
- bo->base.base.funcs = &virtio_gpu_gem_funcs;
- bo->base.map_cached = true;
- return &bo->base.base;
+ dshmem = &shmem->base.base;
+ dshmem->base.funcs = &virtio_gpu_shmem_funcs;
+ dshmem->map_cached = true;
+ return &dshmem->base;
+}
+
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+ struct scatterlist *sg;
+ int si, ret;
+
+ ret = drm_gem_shmem_pin(&bo->base.base);
+ if (ret < 0)
+ return -EINVAL;
+
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ if (!shmem->pages) {
+ drm_gem_shmem_unpin(&bo->base.base);
+ return -EINVAL;
+ }
+
+ if (use_dma_api) {
+ shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl,
+ shmem->pages->nents,
+ DMA_TO_DEVICE);
+ *nents = shmem->mapped;
+ } else {
+ *nents = shmem->pages->nents;
+ }
+
+ *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(shmem->pages->sgl, sg, *nents, si) {
+ (*ents)[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ (*ents)[si].length = cpu_to_le32(sg->length);
+ (*ents)[si].padding = 0;
+ }
+ return 0;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
@@ -111,6 +191,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
+ struct virtio_gpu_mem_entry *ents;
+ unsigned int nents;
int ret;
*bo_ptr = NULL;
@@ -147,12 +229,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
objs, fence);
}
- ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
+ return ret;
+ }
+
+ ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
+ virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index d1c3f5fbfee4..52d24179bcec 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -148,14 +148,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
0, 0);
+ virtio_gpu_notify(vgdev);
return;
}
if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
return;
- virtio_gpu_disable_notify(vgdev);
-
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
if (bo->dumb)
virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
@@ -186,8 +185,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1);
-
- virtio_gpu_enable_notify(vgdev);
+ virtio_gpu_notify(vgdev);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -266,6 +264,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
plane->state->crtc_w,
plane->state->crtc_h,
0, 0, objs, vgfb->fence);
+ virtio_gpu_notify(vgdev);
dma_fence_wait(&vgfb->fence->f, true);
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5914e79d3429..73854915ec34 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -95,7 +95,8 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
if (!vbuf)
return ERR_PTR(-ENOMEM);
- BUG_ON(size > MAX_INLINE_CMD_SIZE);
+ BUG_ON(size > MAX_INLINE_CMD_SIZE ||
+ size < sizeof(struct virtio_gpu_ctrl_hdr));
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
vbuf->size = size;
@@ -109,21 +110,14 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
return vbuf;
}
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer **vbuffer_p,
- int size)
+static struct virtio_gpu_ctrl_hdr *
+virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_vbuffer *vbuf;
-
- vbuf = virtio_gpu_get_vbuf(vgdev, size,
- sizeof(struct virtio_gpu_ctrl_hdr),
- NULL, NULL);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
- *vbuffer_p = vbuf;
- return vbuf->buf;
+ /* this assumes a vbuf contains a command that starts with a
+ * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
+ * virtqueues.
+ */
+ return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
}
static struct virtio_gpu_update_cursor*
@@ -161,6 +155,25 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_command *)vbuf->buf;
}
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size,
+ virtio_gpu_resp_cb cb)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
static void free_vbuf(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -209,12 +222,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
- if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
- cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
- DRM_ERROR("response 0x%x (command 0x%x)\n",
- le32_to_cpu(resp->type),
- le32_to_cpu(cmd->type));
+ cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
+ DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
} else
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
@@ -307,109 +320,107 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
-static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct scatterlist *vout)
- __releases(&vgdev->ctrlq.qlock)
- __acquires(&vgdev->ctrlq.qlock)
+static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vresp;
- int outcnt = 0, incnt = 0;
- bool notify = false;
- int ret;
+ int ret, idx;
- if (!vgdev->vqs_ready)
- return notify;
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ free_vbuf(vgdev, vbuf);
+ return;
+ }
- sg_init_one(&vcmd, vbuf->buf, vbuf->size);
- sgs[outcnt + incnt] = &vcmd;
- outcnt++;
+ if (vgdev->has_indirect)
+ elemcnt = 1;
- if (vout) {
- sgs[outcnt + incnt] = vout;
- outcnt++;
+again:
+ spin_lock(&vgdev->ctrlq.qlock);
+
+ if (vq->num_free < elemcnt) {
+ spin_unlock(&vgdev->ctrlq.qlock);
+ virtio_gpu_notify(vgdev);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
+ goto again;
}
- if (vbuf->resp_size) {
- sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
- sgs[outcnt + incnt] = &vresp;
- incnt++;
+ /* now that the position of the vbuf in the virtqueue is known, we can
+ * finally set the fence id
+ */
+ if (fence) {
+ virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ fence);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
}
-retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
- if (ret == -ENOSPC) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
- spin_lock(&vgdev->ctrlq.qlock);
- goto retry;
- } else {
- trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ WARN_ON(ret);
- notify = virtqueue_kick_prepare(vq);
- }
- return notify;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+
+ atomic_inc(&vgdev->pending_commands);
+
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ drm_dev_exit(idx);
}
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence *fence)
{
- struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *vout = NULL, sg;
+ struct scatterlist *sgs[3], vcmd, vout, vresp;
struct sg_table *sgt = NULL;
- bool notify;
- int outcnt = 0;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vout */
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
+ int sg_ents;
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
- &outcnt);
- if (!sgt)
+ &sg_ents);
+ if (!sgt) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
return;
- vout = sgt->sgl;
+ }
+
+ elemcnt += sg_ents;
+ sgs[outcnt] = sgt->sgl;
} else {
- sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
- vout = &sg;
- outcnt = 1;
+ sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+ elemcnt++;
+ sgs[outcnt] = &vout;
}
+ outcnt++;
}
-again:
- spin_lock(&vgdev->ctrlq.qlock);
-
- /*
- * Make sure we have enouth space in the virtqueue. If not
- * wait here until we have.
- *
- * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
- * to wait for free space, which can result in fence ids being
- * submitted out-of-order.
- */
- if (vq->num_free < 2 + outcnt) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
- goto again;
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
}
- if (hdr && fence) {
- virtio_gpu_fence_emit(vgdev, hdr, fence);
- if (vbuf->objs) {
- virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
- virtio_gpu_array_unlock_resv(vbuf->objs);
- }
- }
- notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
- spin_unlock(&vgdev->ctrlq.qlock);
- if (notify) {
- if (vgdev->disable_notify)
- vgdev->pending_notify = true;
- else
- virtqueue_notify(vgdev->ctrlq.vq);
- }
+ virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+ incnt);
if (sgt) {
sg_free_table(sgt);
@@ -417,25 +428,26 @@ again:
}
}
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
- vgdev->disable_notify = true;
-}
-
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
-{
- vgdev->disable_notify = false;
+ bool notify;
- if (!vgdev->pending_notify)
+ if (!atomic_read(&vgdev->pending_commands))
return;
- vgdev->pending_notify = false;
- virtqueue_notify(vgdev->ctrlq.vq);
+
+ spin_lock(&vgdev->ctrlq.qlock);
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
}
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
}
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -443,12 +455,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ int idx, ret, outcnt;
bool notify;
- int ret;
- int outcnt;
- if (!vgdev->vqs_ready)
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ free_vbuf(vgdev, vbuf);
return;
+ }
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -464,7 +477,7 @@ retry:
goto retry;
} else {
trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ virtio_gpu_vbuf_ctrl_hdr(vbuf));
notify = virtqueue_kick_prepare(vq);
}
@@ -473,6 +486,8 @@ retry:
if (notify)
virtqueue_notify(vq);
+
+ drm_dev_exit(idx);
}
/* just create gem objects for userspace and long lived objects,
@@ -499,39 +514,36 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->width = cpu_to_le32(params->width);
cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
bo->created = true;
}
-void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_resource_unref *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_object *bo;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
+ bo = vbuf->resp_cb_data;
+ vbuf->resp_cb_data = NULL;
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
- cmd_p->resource_id = cpu_to_le32(resource_id);
-
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_cleanup_object(bo);
}
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence)
+void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
{
- struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_resource_unref *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
+ virtio_gpu_cmd_unref_cb);
memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ vbuf->resp_cb_data = bo;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -588,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -606,7 +619,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void
@@ -629,7 +642,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -939,7 +952,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
@@ -988,7 +1000,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+
bo->created = true;
}
@@ -1003,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1021,7 +1035,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
@@ -1047,7 +1061,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
@@ -1070,94 +1084,19 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->size = cpu_to_le32(data_size);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence)
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_mem_entry *ents;
- struct scatterlist *sg;
- int si, nents, ret;
-
- if (WARN_ON_ONCE(!obj->created))
- return -EINVAL;
- if (WARN_ON_ONCE(obj->pages))
- return -EINVAL;
-
- ret = drm_gem_shmem_pin(&obj->base.base);
- if (ret < 0)
- return -EINVAL;
-
- obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
- if (obj->pages == NULL) {
- drm_gem_shmem_unpin(&obj->base.base);
- return -EINVAL;
- }
-
- if (use_dma_api) {
- obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->pages->nents,
- DMA_TO_DEVICE);
- nents = obj->mapped;
- } else {
- nents = obj->pages->nents;
- }
-
- /* gets freed when the ring has consumed it */
- ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
- GFP_KERNEL);
- if (!ents) {
- DRM_ERROR("failed to allocate ent list\n");
- return -ENOMEM;
- }
-
- for_each_sg(obj->pages->sgl, sg, nents, si) {
- ents[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
- ents[si].length = cpu_to_le32(sg->length);
- ents[si].padding = 0;
- }
-
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
- ents, nents,
- fence);
+ ents, nents, NULL);
return 0;
}
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj)
-{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
- if (WARN_ON_ONCE(!obj->pages))
- return;
-
- if (use_dma_api && obj->mapped) {
- struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
- /* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
- dma_fence_wait(&fence->f, true);
- dma_fence_put(&fence->f);
-
- /* ... then tear down iommu mappings */
- dma_unmap_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->mapped,
- DMA_TO_DEVICE);
- obj->mapped = 0;
- } else {
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
- }
-
- sg_free_table(obj->pages);
- obj->pages = NULL;
-
- drm_gem_shmem_unpin(&obj->base.base);
-}
-
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 74f703b8d22a..ac85e17428f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -76,10 +76,12 @@ static void vkms_disable_vblank(struct drm_crtc *crtc)
hrtimer_cancel(&out->vblank_hrtimer);
}
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq)
+static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -154,6 +156,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.atomic_destroy_state = vkms_atomic_crtc_destroy_state,
.enable_vblank = vkms_enable_vblank,
.disable_vblank = vkms_disable_vblank,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
.get_crc_sources = vkms_get_crc_sources,
.set_crc_source = vkms_set_crc_source,
.verify_crc_source = vkms_verify_crc_source,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 25bd7519295f..860de052e820 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -103,7 +103,6 @@ static struct drm_driver vkms_driver = {
.dumb_create = vkms_dumb_create,
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
- .get_vblank_timestamp = vkms_get_vblank_timestamp,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = vkms_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 7d52e24564db..eda04ffba7b1 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -111,10 +111,6 @@ struct vkms_gem_object {
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq);
-
int vkms_output_init(struct vkms_device *vkmsdev, int index);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 5fc8f85aaf3d..6d31265a2ab7 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -117,7 +117,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
bool can_position = false;
int ret;
- if (!state->fb | !state->crtc)
+ if (!state->fb || WARN_ON(!state->crtc))
return 0;
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index c877a21a0739..31f85f09f1fc 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,7 +8,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
- vmwgfx_validation.o vmwgfx_page_dirty.o \
+ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
ttm_object.o ttm_lock.o
+vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 9cbba0e8ce6a..799bc0963f7a 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2020 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -104,12 +104,12 @@ typedef enum {
SVGA_3D_CMD_DEAD1 = 1083,
SVGA_3D_CMD_DEAD2 = 1084,
- SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
- SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
- SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087,
- SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088,
- SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089,
- SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090,
+ SVGA_3D_CMD_DEAD12 = 1085,
+ SVGA_3D_CMD_DEAD13 = 1086,
+ SVGA_3D_CMD_DEAD14 = 1087,
+ SVGA_3D_CMD_DEAD15 = 1088,
+ SVGA_3D_CMD_DEAD16 = 1089,
+ SVGA_3D_CMD_DEAD17 = 1090,
SVGA_3D_CMD_SET_OTABLE_BASE = 1091,
SVGA_3D_CMD_READBACK_OTABLE = 1092,
@@ -261,30 +261,23 @@ typedef enum {
SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221,
SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
-
- /*
- * Reserve some IDs to be used for the SM5 shader types.
- */
- SVGA_3D_CMD_DX_RESERVED1 = 1223,
- SVGA_3D_CMD_DX_RESERVED2 = 1224,
- SVGA_3D_CMD_DX_RESERVED3 = 1225,
+ SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET = 1223,
+ SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET = 1224,
+ SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET = 1225,
SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226,
SVGA_3D_CMD_DX_MAX = 1227,
SVGA_3D_CMD_SCREEN_COPY = 1227,
- /*
- * Reserve some IDs to be used for video.
- */
- SVGA_3D_CMD_VIDEO_RESERVED1 = 1228,
- SVGA_3D_CMD_VIDEO_RESERVED2 = 1229,
- SVGA_3D_CMD_VIDEO_RESERVED3 = 1230,
- SVGA_3D_CMD_VIDEO_RESERVED4 = 1231,
- SVGA_3D_CMD_VIDEO_RESERVED5 = 1232,
- SVGA_3D_CMD_VIDEO_RESERVED6 = 1233,
- SVGA_3D_CMD_VIDEO_RESERVED7 = 1234,
- SVGA_3D_CMD_VIDEO_RESERVED8 = 1235,
+ SVGA_3D_CMD_RESERVED1 = 1228,
+ SVGA_3D_CMD_RESERVED2 = 1229,
+ SVGA_3D_CMD_RESERVED3 = 1230,
+ SVGA_3D_CMD_RESERVED4 = 1231,
+ SVGA_3D_CMD_RESERVED5 = 1232,
+ SVGA_3D_CMD_RESERVED6 = 1233,
+ SVGA_3D_CMD_RESERVED7 = 1234,
+ SVGA_3D_CMD_RESERVED8 = 1235,
SVGA_3D_CMD_GROW_OTABLE = 1236,
SVGA_3D_CMD_DX_GROW_COTABLE = 1237,
@@ -298,7 +291,46 @@ typedef enum {
SVGA_3D_CMD_DX_PRED_CONVERT = 1243,
SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244,
- SVGA_3D_CMD_MAX = 1245,
+ SVGA_3D_CMD_DX_DEFINE_UA_VIEW = 1245,
+ SVGA_3D_CMD_DX_DESTROY_UA_VIEW = 1246,
+ SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT = 1247,
+ SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT = 1248,
+ SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT = 1249,
+ SVGA_3D_CMD_DX_SET_UA_VIEWS = 1250,
+
+ SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT = 1251,
+ SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT = 1252,
+ SVGA_3D_CMD_DX_DISPATCH = 1253,
+ SVGA_3D_CMD_DX_DISPATCH_INDIRECT = 1254,
+
+ SVGA_3D_CMD_WRITE_ZERO_SURFACE = 1255,
+ SVGA_3D_CMD_HINT_ZERO_SURFACE = 1256,
+ SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER = 1257,
+ SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT = 1258,
+
+ SVGA_3D_CMD_LOGICOPS_BITBLT = 1259,
+ SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1260,
+ SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1261,
+ SVGA_3D_CMD_LOGICOPS_COLORFILL = 1262,
+ SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1263,
+ SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1264,
+
+ SVGA_3D_CMD_RESERVED2_1 = 1265,
+
+ SVGA_3D_CMD_RESERVED2_2 = 1266,
+ SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 = 1267,
+ SVGA_3D_CMD_DX_SET_CS_UA_VIEWS = 1268,
+ SVGA_3D_CMD_DX_SET_MIN_LOD = 1269,
+ SVGA_3D_CMD_RESERVED2_3 = 1270,
+ SVGA_3D_CMD_RESERVED2_4 = 1271,
+ SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 = 1272,
+ SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB = 1273,
+ SVGA_3D_CMD_DX_SET_SHADER_IFACE = 1274,
+ SVGA_3D_CMD_DX_BIND_STREAMOUTPUT = 1275,
+ SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS = 1276,
+ SVGA_3D_CMD_DX_BIND_SHADER_IFACE = 1277,
+
+ SVGA_3D_CMD_MAX = 1278,
SVGA_3D_CMD_FUTURE_MAX = 3000
} SVGAFifo3dCmdId;
@@ -334,6 +366,7 @@ struct {
uint32 sid;
SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
+
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
@@ -341,6 +374,7 @@ struct {
* numMipLevels set to 0.
*/
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
@@ -360,6 +394,7 @@ struct {
uint32 sid;
SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
+
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
@@ -369,6 +404,7 @@ struct {
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
+
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
@@ -517,6 +553,18 @@ typedef
struct {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dest;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBltNonMSToMS;
+/* SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
SVGA3dStretchBltMode mode;
@@ -555,6 +603,7 @@ struct {
SVGAGuestImage guest;
SVGA3dSurfaceImageId host;
SVGA3dTransferType transfer;
+
/*
* Followed by variable number of SVGA3dCopyBox structures. For consistency
* in all clipping logic and coordinate translation, we define the
@@ -789,7 +838,7 @@ struct {
uint32 indexBufferSid; /* Valid index buffer sid. */
uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */
- /* always 0 for DX9 guests, non-zero for OpenGL */
+ /* always 0 for pre SM guests, non-zero for OpenGL */
/* guests. We can't represent non-multiple of */
/* stride offsets in D3D9Renderer... */
uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */
@@ -1228,6 +1277,7 @@ struct SVGA3dCmdLogicOpsBitBlt {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dst;
SVGA3dLogicOp logicOp;
+ SVGA3dLogicOpRop3 logicOpRop3;
/* Followed by variable number of SVGA3dCopyBox structures */
}
#include "vmware_pack_end.h"
@@ -1247,7 +1297,8 @@ struct SVGA3dCmdLogicOpsTransBlt {
uint32 color;
uint32 flags;
SVGA3dBox srcBox;
- SVGA3dBox dstBox;
+ SVGA3dSignedBox dstBox;
+ SVGA3dBox clipBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
@@ -1266,7 +1317,8 @@ struct SVGA3dCmdLogicOpsStretchBlt {
uint16 mode;
uint16 flags;
SVGA3dBox srcBox;
- SVGA3dBox dstBox;
+ SVGA3dSignedBox dstBox;
+ SVGA3dBox clipBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
@@ -1283,6 +1335,7 @@ struct SVGA3dCmdLogicOpsColorFill {
SVGA3dSurfaceImageId dst;
uint32 color;
SVGA3dLogicOp logicOp;
+ SVGA3dLogicOpRop3 logicOpRop3;
/* Followed by variable number of SVGA3dRect structures. */
}
#include "vmware_pack_end.h"
@@ -1302,7 +1355,8 @@ struct SVGA3dCmdLogicOpsAlphaBlend {
uint32 alphaVal;
uint32 flags;
SVGA3dBox srcBox;
- SVGA3dBox dstBox;
+ SVGA3dSignedBox dstBox;
+ SVGA3dBox clipBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
@@ -1365,8 +1419,9 @@ struct {
SVGA3dSurface2Flags surface2Flags;
uint8 multisamplePattern;
uint8 qualityLevel;
- uint8 pad0[2];
- uint32 pad1[3];
+ uint16 bufferByteStride;
+ float minLOD;
+ uint32 pad0[2];
}
#include "vmware_pack_end.h"
SVGAOTableSurfaceEntry;
@@ -1543,7 +1598,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
SVGAOTableType type;
- PPN baseAddress;
+ PPN32 baseAddress;
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
@@ -1599,7 +1654,7 @@ typedef
struct SVGA3dCmdDefineGBMob {
SVGAMobId mobid;
SVGAMobFormat ptDepth;
- PPN base;
+ PPN32 base;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
@@ -1618,7 +1673,6 @@ struct SVGA3dCmdDestroyGBMob {
#include "vmware_pack_end.h"
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
-
/*
* Define a memory object (Mob) in the OTable with a PPN64 base.
*/
@@ -1719,6 +1773,27 @@ struct SVGA3dCmdDefineGBSurface_v3 {
SVGA3dCmdDefineGBSurface_v3; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
/*
+ * Defines a guest-backed surface, adding buffer byte stride.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v4 {
+ uint32 sid;
+ SVGA3dSurfaceAllFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dMSPattern multisamplePattern;
+ SVGA3dMSQualityLevel qualityLevel;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+ uint32 bufferByteStride;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v4; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 */
+
+/*
* Destroy a guest-backed surface.
*/
@@ -2181,4 +2256,20 @@ SVGA3dCmdScreenCopy; /* SVGA_3D_CMD_SCREEN_COPY */
#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWriteZeroSurface; /* SVGA_3D_CMD_WRITE_ZERO_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHintZeroSurface; /* SVGA_3D_CMD_HINT_ZERO_SURFACE */
+
#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index f256560049bf..617b468c626c 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2019 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -39,6 +39,8 @@
#include "includeCheck.h"
+#include "svga3d_types.h"
+
/*
* 3D Hardware Version
*
@@ -69,381 +71,408 @@ typedef enum {
* DevCap indexes.
*/
-typedef enum {
- SVGA3D_DEVCAP_INVALID = ((uint32)-1),
- SVGA3D_DEVCAP_3D = 0,
- SVGA3D_DEVCAP_MAX_LIGHTS = 1,
-
- /*
- * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
- * fixed-function texture units available. Each of these units
- * work in both FFP and Shader modes, and they support texture
- * transforms and texture coordinates. The host may have additional
- * texture image units that are only usable with shaders.
- */
- SVGA3D_DEVCAP_MAX_TEXTURES = 2,
- SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
- SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
- SVGA3D_DEVCAP_VERTEX_SHADER = 5,
- SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
- SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
- SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
- SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
- SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
- SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
- SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
- SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
- SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
- SVGA3D_DEVCAP_QUERY_TYPES = 15,
- SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
- SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
- SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
- SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
- SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
- SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
- SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
- SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
- SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
- SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
- SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
- SVGA3D_DEVCAP_TEXTURE_OPS = 31,
- SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
- SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
- SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
- SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
- SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
- SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
- SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
- SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
- SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
- SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
- SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
- SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
- SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
- SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
- SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
- SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
- SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
- SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
-
- /*
- * There is a hole in our devcap definitions for
- * historical reasons.
- *
- * Define a constant just for completeness.
- */
- SVGA3D_DEVCAP_MISSING62 = 62,
-
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
-
- /*
- * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
- * render targets. This does not include the depth or stencil targets.
- */
- SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
-
- SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
- SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
- SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
- SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
- SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
- SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
- SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
- SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
- SVGA3D_DEVCAP_SUPERSAMPLE = 73,
- SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
- SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
- SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
-
- /*
- * This is the maximum number of SVGA context IDs that the guest
- * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
- */
- SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
-
- /*
- * This is the maximum number of SVGA surface IDs that the guest
- * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
- */
- SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
-
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
-
- SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
- SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
-
- /*
- * Deprecated.
- */
- SVGA3D_DEVCAP_DEAD1 = 84,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
- * ored together, one for every type of video decoding supported.
- */
- SVGA3D_DEVCAP_VIDEO_DECODE = 85,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
- * ored together, one for every type of video processing supported.
- */
- SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
-
- SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
- SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
- SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
- SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
-
- SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
-
- /*
- * Does the host support the SVGA logic ops commands?
- */
- SVGA3D_DEVCAP_LOGICOPS = 92,
-
- /*
- * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
- */
- SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
-
- /*
- * Deprecated.
- */
- SVGA3D_DEVCAP_DEAD2 = 94,
-
- /*
- * Does the device support DXContexts?
- */
- SVGA3D_DEVCAP_DXCONTEXT = 95,
-
- /*
- * What is the maximum size of a texture array?
- *
- * (Even if this cap is zero, cubemaps are still allowed.)
- */
- SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
-
- /*
- * What is the maximum number of vertex buffers or vertex input registers
- * that can be expected to work correctly with a DXContext?
- *
- * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
- * anything in excess of this cap is not guaranteed to render correctly.
- *
- * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
- * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
- * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
- * but only the registers up to this cap value are guaranteed to render
- * correctly.
- *
- * If guest-drivers are able to expose a lower-limit, it's recommended
- * that they clamp to this value. Otherwise, the host will make a
- * best-effort on case-by-case basis if guests exceed this.
- */
- SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
-
- /*
- * What is the maximum number of constant buffers that can be expected to
- * work correctly with a DX context?
- *
- * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
- * anything in excess of this cap is not guaranteed to render correctly.
- *
- * If guest-drivers are able to expose a lower-limit, it's recommended
- * that they clamp to this value. Otherwise, the host will make a
- * best-effort on case-by-case basis if guests exceed this.
- */
- SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
-
- /*
- * Does the device support provoking vertex control?
- *
- * If this cap is present, the provokingVertexLast field in the
- * rasterizer state is enabled. (Guests can then set it to FALSE,
- * meaning that the first vertex is the provoking vertex, or TRUE,
- * meaning that the last verteix is the provoking vertex.)
- *
- * If this cap is FALSE, then guests should set the provokingVertexLast
- * to FALSE, otherwise rendering behavior is undefined.
- */
- SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
-
- SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
- SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
- SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
- SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
- SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
- SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
- SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
- SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
- SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
- SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
- SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
- SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
- SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
- SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
- SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
- SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
- SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
- SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
- SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 = 122,
- SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
- SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
- SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
- SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
- SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
- SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
- SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
- SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
- SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
- SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
- SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
- SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
- SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
- SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
- SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
- SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
- SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
- SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
- SVGA3D_DEVCAP_DXFMT_UYVY = 141,
- SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
- SVGA3D_DEVCAP_DXFMT_NV12 = 143,
- SVGA3D_DEVCAP_DXFMT_AYUV = 144,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
- SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
- SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
- SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
- SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
- SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
- SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 = 161,
- SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT = 162,
- SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
- SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
- SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
- SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
- SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
- SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
- SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
- SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
- SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
- SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
- SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
- SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
- SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 = 180,
- SVGA3D_DEVCAP_DXFMT_X24_G8_UINT = 181,
- SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
- SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
- SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
- SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
- SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
- SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
- SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
- SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
- SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
- SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
- SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
- SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
- SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
- SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
- SVGA3D_DEVCAP_DXFMT_P8 = 196,
- SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
- SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
- SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
- SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
- SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
- SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
- SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
- SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
- SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
- SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
- SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
- SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
- SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
- SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
- SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
- SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
- SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
- SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
- SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
- SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
- SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
- SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
- SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
- SVGA3D_DEVCAP_DXFMT_YV12 = 220,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
- SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
- SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
- SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
- SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
- SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
- SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
- SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
- SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
- SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
- SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
- SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
- SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
- SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
- SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
- SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
- SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
- SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
- SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
- SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
-
- /*
- * Advertises shaderModel 4.1 support, independent blend-states,
- * cube-map arrays, and a higher vertex input registers limit.
- *
- * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
- */
- SVGA3D_DEVCAP_SM41 = 244,
-
- SVGA3D_DEVCAP_MULTISAMPLE_2X = 245,
- SVGA3D_DEVCAP_MULTISAMPLE_4X = 246,
-
- SVGA3D_DEVCAP_MAX /* This must be the last index. */
-} SVGA3dDevCapIndex;
+typedef uint32 SVGA3dDevCapIndex;
+
+#define SVGA3D_DEVCAP_INVALID ((uint32)-1)
+#define SVGA3D_DEVCAP_3D 0
+#define SVGA3D_DEVCAP_MAX_LIGHTS 1
+
+/*
+ * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ */
+#define SVGA3D_DEVCAP_MAX_TEXTURES 2
+#define SVGA3D_DEVCAP_MAX_CLIP_PLANES 3
+#define SVGA3D_DEVCAP_VERTEX_SHADER_VERSION 4
+#define SVGA3D_DEVCAP_VERTEX_SHADER 5
+#define SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION 6
+#define SVGA3D_DEVCAP_FRAGMENT_SHADER 7
+#define SVGA3D_DEVCAP_MAX_RENDER_TARGETS 8
+#define SVGA3D_DEVCAP_S23E8_TEXTURES 9
+#define SVGA3D_DEVCAP_S10E5_TEXTURES 10
+#define SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND 11
+#define SVGA3D_DEVCAP_D16_BUFFER_FORMAT 12
+#define SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT 13
+#define SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT 14
+#define SVGA3D_DEVCAP_QUERY_TYPES 15
+#define SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING 16
+#define SVGA3D_DEVCAP_MAX_POINT_SIZE 17
+#define SVGA3D_DEVCAP_MAX_SHADER_TEXTURES 18
+#define SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH 19
+#define SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT 20
+#define SVGA3D_DEVCAP_MAX_VOLUME_EXTENT 21
+#define SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT 22
+#define SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO 23
+#define SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY 24
+#define SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT 25
+#define SVGA3D_DEVCAP_MAX_VERTEX_INDEX 26
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS 27
+#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS 28
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS 29
+#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS 30
+#define SVGA3D_DEVCAP_TEXTURE_OPS 31
+#define SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 32
+#define SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 33
+#define SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 34
+#define SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 35
+#define SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 36
+#define SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 37
+#define SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 38
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 39
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 40
+#define SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 41
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 42
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D16 43
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 44
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 45
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT1 46
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT2 47
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT3 48
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT4 49
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT5 50
+#define SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 51
+#define SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 52
+#define SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 53
+#define SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 54
+#define SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 55
+#define SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 56
+#define SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 57
+#define SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 58
+#define SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 59
+#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 60
+#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 61
+
+/*
+ * There is a hole in our devcap definitions for
+ * historical reasons.
+ *
+ * Define a constant just for completeness.
+ */
+#define SVGA3D_DEVCAP_MISSING62 62
+
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES 63
+
+/*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets. This does not include the depth or stencil targets.
+ */
+#define SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS 64
+
+#define SVGA3D_DEVCAP_SURFACEFMT_V16U16 65
+#define SVGA3D_DEVCAP_SURFACEFMT_G16R16 66
+#define SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 67
+#define SVGA3D_DEVCAP_SURFACEFMT_UYVY 68
+#define SVGA3D_DEVCAP_SURFACEFMT_YUY2 69
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD4 70
+#define SVGA3D_DEVCAP_DEAD5 71
+#define SVGA3D_DEVCAP_DEAD7 72
+#define SVGA3D_DEVCAP_DEAD6 73
+
+#define SVGA3D_DEVCAP_AUTOGENMIPMAPS 74
+#define SVGA3D_DEVCAP_SURFACEFMT_NV12 75
+#define SVGA3D_DEVCAP_DEAD10 76
+
+/*
+ * This is the maximum number of SVGA context IDs that the guest
+ * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+ */
+#define SVGA3D_DEVCAP_MAX_CONTEXT_IDS 77
+
+/*
+ * This is the maximum number of SVGA surface IDs that the guest
+ * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+ */
+#define SVGA3D_DEVCAP_MAX_SURFACE_IDS 78
+
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 79
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 80
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT 81
+
+#define SVGA3D_DEVCAP_SURFACEFMT_ATI1 82
+#define SVGA3D_DEVCAP_SURFACEFMT_ATI2 83
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD1 84
+#define SVGA3D_DEVCAP_DEAD8 85
+#define SVGA3D_DEVCAP_DEAD9 86
+
+#define SVGA3D_DEVCAP_LINE_AA 87 /* boolean */
+#define SVGA3D_DEVCAP_LINE_STIPPLE 88 /* boolean */
+#define SVGA3D_DEVCAP_MAX_LINE_WIDTH 89 /* float */
+#define SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH 90 /* float */
+
+#define SVGA3D_DEVCAP_SURFACEFMT_YV12 91
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD3 92
+
+/*
+ * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+ */
+#define SVGA3D_DEVCAP_TS_COLOR_KEY 93 /* boolean */
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD2 94
+
+/*
+ * Does the device support DXContexts?
+ */
+#define SVGA3D_DEVCAP_DXCONTEXT 95
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD11 96
+
+/*
+ * What is the maximum number of vertex buffers or vertex input registers
+ * that can be expected to work correctly with a DXContext?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+ * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+ * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+ * but only the registers up to this cap value are guaranteed to render
+ * correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
+ */
+#define SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS 97
+
+/*
+ * What is the maximum number of constant buffers that can be expected to
+ * work correctly with a DX context?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
+ */
+#define SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS 98
+
+/*
+ * Does the device support provoking vertex control?
+ *
+ * If this cap is present, the provokingVertexLast field in the
+ * rasterizer state is enabled. (Guests can then set it to FALSE,
+ * meaning that the first vertex is the provoking vertex, or TRUE,
+ * meaning that the last verteix is the provoking vertex.)
+ *
+ * If this cap is FALSE, then guests should set the provokingVertexLast
+ * to FALSE, otherwise rendering behavior is undefined.
+ */
+#define SVGA3D_DEVCAP_DX_PROVOKING_VERTEX 99
+
+#define SVGA3D_DEVCAP_DXFMT_X8R8G8B8 100
+#define SVGA3D_DEVCAP_DXFMT_A8R8G8B8 101
+#define SVGA3D_DEVCAP_DXFMT_R5G6B5 102
+#define SVGA3D_DEVCAP_DXFMT_X1R5G5B5 103
+#define SVGA3D_DEVCAP_DXFMT_A1R5G5B5 104
+#define SVGA3D_DEVCAP_DXFMT_A4R4G4B4 105
+#define SVGA3D_DEVCAP_DXFMT_Z_D32 106
+#define SVGA3D_DEVCAP_DXFMT_Z_D16 107
+#define SVGA3D_DEVCAP_DXFMT_Z_D24S8 108
+#define SVGA3D_DEVCAP_DXFMT_Z_D15S1 109
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8 110
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 111
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE16 112
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 113
+#define SVGA3D_DEVCAP_DXFMT_DXT1 114
+#define SVGA3D_DEVCAP_DXFMT_DXT2 115
+#define SVGA3D_DEVCAP_DXFMT_DXT3 116
+#define SVGA3D_DEVCAP_DXFMT_DXT4 117
+#define SVGA3D_DEVCAP_DXFMT_DXT5 118
+#define SVGA3D_DEVCAP_DXFMT_BUMPU8V8 119
+#define SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 120
+#define SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 121
+#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 122
+#define SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 123
+#define SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 124
+#define SVGA3D_DEVCAP_DXFMT_A2R10G10B10 125
+#define SVGA3D_DEVCAP_DXFMT_V8U8 126
+#define SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 127
+#define SVGA3D_DEVCAP_DXFMT_CxV8U8 128
+#define SVGA3D_DEVCAP_DXFMT_X8L8V8U8 129
+#define SVGA3D_DEVCAP_DXFMT_A2W10V10U10 130
+#define SVGA3D_DEVCAP_DXFMT_ALPHA8 131
+#define SVGA3D_DEVCAP_DXFMT_R_S10E5 132
+#define SVGA3D_DEVCAP_DXFMT_R_S23E8 133
+#define SVGA3D_DEVCAP_DXFMT_RG_S10E5 134
+#define SVGA3D_DEVCAP_DXFMT_RG_S23E8 135
+#define SVGA3D_DEVCAP_DXFMT_BUFFER 136
+#define SVGA3D_DEVCAP_DXFMT_Z_D24X8 137
+#define SVGA3D_DEVCAP_DXFMT_V16U16 138
+#define SVGA3D_DEVCAP_DXFMT_G16R16 139
+#define SVGA3D_DEVCAP_DXFMT_A16B16G16R16 140
+#define SVGA3D_DEVCAP_DXFMT_UYVY 141
+#define SVGA3D_DEVCAP_DXFMT_YUY2 142
+#define SVGA3D_DEVCAP_DXFMT_NV12 143
+#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD2 144
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS 145
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT 146
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT 147
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS 148
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT 149
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT 150
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT 151
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS 152
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT 153
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM 154
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT 155
+#define SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS 156
+#define SVGA3D_DEVCAP_DXFMT_R32G32_UINT 157
+#define SVGA3D_DEVCAP_DXFMT_R32G32_SINT 158
+#define SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS 159
+#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT 160
+#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 161
+#define SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT 162
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS 163
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT 164
+#define SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT 165
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS 166
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM 167
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB 168
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT 169
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT 170
+#define SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS 171
+#define SVGA3D_DEVCAP_DXFMT_R16G16_UINT 172
+#define SVGA3D_DEVCAP_DXFMT_R16G16_SINT 173
+#define SVGA3D_DEVCAP_DXFMT_R32_TYPELESS 174
+#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT 175
+#define SVGA3D_DEVCAP_DXFMT_R32_UINT 176
+#define SVGA3D_DEVCAP_DXFMT_R32_SINT 177
+#define SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS 178
+#define SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT 179
+#define SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 180
+#define SVGA3D_DEVCAP_DXFMT_X24_G8_UINT 181
+#define SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS 182
+#define SVGA3D_DEVCAP_DXFMT_R8G8_UNORM 183
+#define SVGA3D_DEVCAP_DXFMT_R8G8_UINT 184
+#define SVGA3D_DEVCAP_DXFMT_R8G8_SINT 185
+#define SVGA3D_DEVCAP_DXFMT_R16_TYPELESS 186
+#define SVGA3D_DEVCAP_DXFMT_R16_UNORM 187
+#define SVGA3D_DEVCAP_DXFMT_R16_UINT 188
+#define SVGA3D_DEVCAP_DXFMT_R16_SNORM 189
+#define SVGA3D_DEVCAP_DXFMT_R16_SINT 190
+#define SVGA3D_DEVCAP_DXFMT_R8_TYPELESS 191
+#define SVGA3D_DEVCAP_DXFMT_R8_UNORM 192
+#define SVGA3D_DEVCAP_DXFMT_R8_UINT 193
+#define SVGA3D_DEVCAP_DXFMT_R8_SNORM 194
+#define SVGA3D_DEVCAP_DXFMT_R8_SINT 195
+#define SVGA3D_DEVCAP_DXFMT_P8 196
+#define SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP 197
+#define SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM 198
+#define SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM 199
+#define SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS 200
+#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB 201
+#define SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS 202
+#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB 203
+#define SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS 204
+#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB 205
+#define SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS 206
+#define SVGA3D_DEVCAP_DXFMT_ATI1 207
+#define SVGA3D_DEVCAP_DXFMT_BC4_SNORM 208
+#define SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS 209
+#define SVGA3D_DEVCAP_DXFMT_ATI2 210
+#define SVGA3D_DEVCAP_DXFMT_BC5_SNORM 211
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM 212
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS 213
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB 214
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS 215
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB 216
+#define SVGA3D_DEVCAP_DXFMT_Z_DF16 217
+#define SVGA3D_DEVCAP_DXFMT_Z_DF24 218
+#define SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT 219
+#define SVGA3D_DEVCAP_DXFMT_YV12 220
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT 221
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT 222
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM 223
+#define SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT 224
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM 225
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM 226
+#define SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT 227
+#define SVGA3D_DEVCAP_DXFMT_R16G16_UNORM 228
+#define SVGA3D_DEVCAP_DXFMT_R16G16_SNORM 229
+#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT 230
+#define SVGA3D_DEVCAP_DXFMT_R8G8_SNORM 231
+#define SVGA3D_DEVCAP_DXFMT_R16_FLOAT 232
+#define SVGA3D_DEVCAP_DXFMT_D16_UNORM 233
+#define SVGA3D_DEVCAP_DXFMT_A8_UNORM 234
+#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM 235
+#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM 236
+#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM 237
+#define SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM 238
+#define SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM 239
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM 240
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM 241
+#define SVGA3D_DEVCAP_DXFMT_BC4_UNORM 242
+#define SVGA3D_DEVCAP_DXFMT_BC5_UNORM 243
+
+/*
+ * Advertises shaderModel 4.1 support, independent blend-states,
+ * cube-map arrays, and a higher vertex input registers limit.
+ *
+ * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+ */
+#define SVGA3D_DEVCAP_SM41 244
+#define SVGA3D_DEVCAP_MULTISAMPLE_2X 245
+#define SVGA3D_DEVCAP_MULTISAMPLE_4X 246
+
+/*
+ * Indicates that the device has rendering support for
+ * the full multisample quality. If this cap is not present,
+ * the host may or may not support full quality rendering.
+ *
+ * See also SVGA_REG_MS_HINT_RESOLVED.
+ */
+#define SVGA3D_DEVCAP_MS_FULL_QUALITY 247
+
+/*
+ * Advertises support for the SVGA3D LogicOps commands.
+ */
+#define SVGA3D_DEVCAP_LOGICOPS 248
+
+/*
+ * Advertises support for using logicOps in the DXBlendStates.
+ */
+#define SVGA3D_DEVCAP_LOGIC_BLENDOPS 249
+
+/*
+* Note DXFMT range is now non-contiguous.
+*/
+#define SVGA3D_DEVCAP_RESERVED_1 250
+#define SVGA3D_DEVCAP_DXFMT_BC6H_TYPELESS 251
+#define SVGA3D_DEVCAP_DXFMT_BC6H_UF16 252
+#define SVGA3D_DEVCAP_DXFMT_BC6H_SF16 253
+#define SVGA3D_DEVCAP_DXFMT_BC7_TYPELESS 254
+#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM 255
+#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM_SRGB 256
+#define SVGA3D_DEVCAP_RESERVED_2 257
+
+#define SVGA3D_DEVCAP_SM5 258
+#define SVGA3D_DEVCAP_MULTISAMPLE_8X 259
+
+/* This must be the last index. */
+#define SVGA3D_DEVCAP_MAX 260
/*
* Bit definitions for DXFMT devcaps
@@ -472,10 +501,10 @@ typedef enum {
#define SVGA3D_DXFMT_MAX (1 << 10)
typedef union {
- Bool b;
+ SVGA3dBool b;
uint32 u;
- int32 i;
- float f;
+ int32 i;
+ float f;
} SVGA3dDevCapResult;
#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 7a49c94df221..f703ac2b1768 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc.
+ * Copyright 2012-2019 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -118,12 +118,14 @@ typedef uint8 SVGA3dMultisampleRastEnable;
#define SVGA3D_DX_MAX_SRVIEWS 128
#define SVGA3D_DX_MAX_CONSTBUFFERS 16
#define SVGA3D_DX_MAX_SAMPLERS 16
+#define SVGA3D_DX_MAX_CLASS_INSTANCES 253
#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
typedef uint32 SVGA3dShaderResourceViewId;
typedef uint32 SVGA3dRenderTargetViewId;
typedef uint32 SVGA3dDepthStencilViewId;
+typedef uint32 SVGA3dUAViewId;
typedef uint32 SVGA3dShaderId;
typedef uint32 SVGA3dElementLayoutId;
@@ -145,6 +147,17 @@ typedef union {
float value[4];
} SVGA3dRGBAFloat;
+typedef union {
+ struct {
+ uint32 r;
+ uint32 g;
+ uint32 b;
+ uint32 a;
+ };
+
+ uint32 value[4];
+} SVGA3dRGBAUint32;
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -249,6 +262,39 @@ struct SVGA3dCmdDXSetShader {
#include "vmware_pack_end.h"
SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+typedef union {
+ struct {
+ uint32 cbOffset : 12;
+ uint32 cbId : 4;
+ uint32 baseSamp : 4;
+ uint32 baseTex : 7;
+ uint32 reserved : 5;
+ };
+ uint32 value;
+} SVGA3dIfaceData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderIface {
+ SVGA3dShaderType type;
+ uint32 numClassInstances;
+ uint32 index;
+ uint32 iface;
+ SVGA3dIfaceData data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderIface; /* SVGA_3D_CMD_DX_SET_SHADER_IFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShaderIface {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShaderIface; /* SVGA_3D_CMD_DX_BIND_SHADER_IFACE */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSamplers {
@@ -306,6 +352,26 @@ SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstancedIndirect {
+ SVGA3dSurfaceId argsBufferSid;
+ uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstancedIndirect;
+/* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstancedIndirect {
+ SVGA3dSurfaceId argsBufferSid;
+ uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstancedIndirect;
+/* SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDrawAuto {
uint32 pad0;
}
@@ -314,6 +380,27 @@ SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDispatch {
+ uint32 threadGroupCountX;
+ uint32 threadGroupCountY;
+ uint32 threadGroupCountZ;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDispatch;
+/* SVGA_3D_CMD_DX_DISPATCH */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDispatchIndirect {
+ SVGA3dSurfaceId argsBufferSid;
+ uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDispatchIndirect;
+/* SVGA_3D_CMD_DX_DISPATCH_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetInputLayout {
SVGA3dElementLayoutId elementLayoutId;
}
@@ -525,7 +612,7 @@ struct MKS3dDXSOState {
uint32 offset; /* Starting offset */
uint32 intOffset; /* Internal offset */
uint32 vertexCount; /* vertices written */
- uint32 sizeInBytes; /* max bytes to write */
+ uint32 dead;
}
#include "vmware_pack_end.h"
SVGA3dDXSOState;
@@ -786,6 +873,31 @@ struct SVGA3dCmdDXTransferFromBuffer {
SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+#define SVGA3D_TRANSFER_TO_BUFFER_READBACK (1 << 0)
+#define SVGA3D_TRANSFER_TO_BUFFER_FLAGS_MASK (1 << 0)
+typedef uint32 SVGA3dTransferToBufferFlags;
+
+/*
+ * Raw byte wise transfer to a buffer surface from another surface
+ * of the requested box. Supported if SVGA_CAP_DX2 is set. This
+ * command does not take a context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferToBuffer {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dBox srcBox;
+ SVGA3dSurfaceId destSid;
+ uint32 destOffset;
+ uint32 destPitch;
+ uint32 destSlicePitch;
+ SVGA3dTransferToBufferFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferToBuffer; /* SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER */
+
+
/*
* Raw byte wise transfer from a buffer surface into another surface
* of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
@@ -905,6 +1017,20 @@ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetHSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetDSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetCSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET */
+
+
+#define SVGA3D_BUFFEREX_SRV_RAW (1 << 0)
+#define SVGA3D_BUFFEREX_SRV_FLAGS_MAX (1 << 1)
+#define SVGA3D_BUFFEREX_SRV_FLAGS_MASK (SVGA3D_BUFFEREX_SRV_FLAGS_MAX - 1)
+typedef uint32 SVGA3dBufferExFlags;
typedef
#include "vmware_pack_begin.h"
@@ -925,7 +1051,7 @@ struct {
struct {
uint32 firstElement;
uint32 numElements;
- uint32 flags;
+ SVGA3dBufferExFlags flags;
uint32 pad0;
} bufferex;
};
@@ -1072,6 +1198,32 @@ struct SVGA3dCmdDXDefineDepthStencilView {
SVGA3dCmdDXDefineDepthStencilView;
/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+/*
+ * Version 2 needed in order to start validating and using the flags
+ * field. Unfortunately the device wasn't validating or using the
+ * flags field and the driver wasn't initializing it in shipped code,
+ * so a new version of the command is needed to allow that code to
+ * continue to work.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView_v2 {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ SVGA3DCreateDSViewFlags flags;
+ uint8 pad0;
+ uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView_v2;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyDepthStencilView {
@@ -1081,6 +1233,138 @@ struct SVGA3dCmdDXDestroyDepthStencilView {
SVGA3dCmdDXDestroyDepthStencilView;
/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+#define SVGA3D_UABUFFER_RAW (1 << 0)
+#define SVGA3D_UABUFFER_APPEND (1 << 1)
+#define SVGA3D_UABUFFER_COUNTER (1 << 2)
+typedef uint32 SVGA3dUABufferFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ union {
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ SVGA3dUABufferFlags flags;
+ uint32 padding0;
+ uint32 padding1;
+ } buffer;
+ struct {
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ uint32 padding0;
+ uint32 padding1;
+ } tex; /* 1d, 2d */
+ struct {
+ uint32 mipSlice;
+ uint32 firstW;
+ uint32 wSize;
+ uint32 padding0;
+ uint32 padding1;
+ } tex3D;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dUAViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ SVGA3dUAViewDesc desc;
+ uint32 structureCount;
+ uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXUAViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineUAView {
+ SVGA3dUAViewId uaViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+
+ SVGA3dUAViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineUAView;
+/* SVGA_3D_CMD_DX_DEFINE_UA_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyUAView {
+ SVGA3dUAViewId uaViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyUAView;
+/* SVGA_3D_CMD_DX_DESTROY_UA_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearUAViewUint {
+ SVGA3dUAViewId uaViewId;
+ SVGA3dRGBAUint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearUAViewUint;
+/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearUAViewFloat {
+ SVGA3dUAViewId uaViewId;
+ SVGA3dRGBAFloat value;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearUAViewFloat;
+/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCopyStructureCount {
+ SVGA3dUAViewId srcUAViewId;
+ SVGA3dSurfaceId destSid;
+ uint32 destByteOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCopyStructureCount;
+/* SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStructureCount {
+ SVGA3dUAViewId uaViewId;
+ uint32 structureCount;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStructureCount;
+/* SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetUAViews {
+ uint32 uavSpliceIndex;
+ /* Followed by a variable number of SVGA3dUAViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetUAViews; /* SVGA_3D_CMD_DX_SET_UA_VIEWS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCSUAViews {
+ uint32 startIndex;
+ /* Followed by a variable number of SVGA3dUAViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCSUAViews; /* SVGA_3D_CMD_DX_SET_CS_UA_VIEWS */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dInputElementDesc {
@@ -1099,7 +1383,7 @@ typedef
struct {
uint32 elid;
uint32 numDescs;
- SVGA3dInputElementDesc desc[32];
+ SVGA3dInputElementDesc descs[32];
uint32 pad[62];
}
#include "vmware_pack_end.h"
@@ -1261,7 +1545,8 @@ struct {
uint8 lineStippleEnable;
uint8 lineStippleFactor;
uint16 lineStipplePattern;
- uint32 forcedSampleCount;
+ uint8 forcedSampleCount;
+ uint8 mustBeZero[3];
}
#include "vmware_pack_end.h"
SVGACOTableDXRasterizerStateEntry;
@@ -1352,6 +1637,71 @@ struct SVGA3dCmdDXDestroySamplerState {
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_UNDEFINED 0
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_POSITION 1
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_CLIP_DISTANCE 2
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_CULL_DISTANCE 3
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_RENDER_TARGET_ARRAY_INDEX 4
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_VIEWPORT_ARRAY_INDEX 5
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_VERTEX_ID 6
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_PRIMITIVE_ID 7
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_INSTANCE_ID 8
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_IS_FRONT_FACE 9
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_SAMPLE_INDEX 10
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_0_EDGE_TESSFACTOR 11
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_0_EDGE_TESSFACTOR 12
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_1_EDGE_TESSFACTOR 13
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_1_EDGE_TESSFACTOR 14
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_INSIDE_TESSFACTOR 15
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_INSIDE_TESSFACTOR 16
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_U_EQ_0_EDGE_TESSFACTOR 17
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_V_EQ_0_EDGE_TESSFACTOR 18
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_W_EQ_0_EDGE_TESSFACTOR 19
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_INSIDE_TESSFACTOR 20
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DETAIL_TESSFACTOR 21
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DENSITY_TESSFACTOR 22
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_MAX 23
+typedef uint32 SVGA3dDXSignatureSemanticName;
+
+#define SVGADX_SIGNATURE_REGISTER_COMPONENT_UNKNOWN 0
+typedef uint32 SVGA3dDXSignatureRegisterComponentType;
+
+#define SVGADX_SIGNATURE_MIN_PRECISION_DEFAULT 0
+typedef uint32 SVGA3dDXSignatureMinPrecision;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXSignatureEntry {
+ uint32 registerIndex;
+ SVGA3dDXSignatureSemanticName semanticName;
+ uint32 mask; /* Lower 4 bits represent X, Y, Z, W channels */
+ SVGA3dDXSignatureRegisterComponentType componentType;
+ SVGA3dDXSignatureMinPrecision minPrecision;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXShaderSignatureEntry;
+
+#define SVGADX_SIGNATURE_HEADER_VERSION_0 0x08a92d12
+
+/*
+ * The SVGA3dDXSignatureHeader structure is added after the shader
+ * body in the mob that is bound to the shader. It is followed by the
+ * specified number of SVGA3dDXSignatureEntry structures for each of
+ * the three types of signatures in the order (input, output, patch
+ * constants).
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXSignatureHeader {
+ uint32 headerVersion;
+ uint32 numInputSignatures;
+ uint32 numOutputSignatures;
+ uint32 numPatchConstantSignatures;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXShaderSignatureHeader;
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineShader {
@@ -1415,7 +1765,8 @@ SVGA3dCmdDXCondBindAllShader; /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
/*
* The maximum number of streamout decl's in each streamout entry.
*/
-#define SVGA3D_MAX_STREAMOUT_DECLS 64
+#define SVGA3D_MAX_DX10_STREAMOUT_DECLS 64
+#define SVGA3D_MAX_STREAMOUT_DECLS 512
typedef
#include "vmware_pack_begin.h"
@@ -1434,10 +1785,16 @@ typedef
#include "vmware_pack_begin.h"
struct SVGAOTableStreamOutputEntry {
uint32 numOutputStreamEntries;
- SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS];
uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
uint32 rasterizedStream;
- uint32 pad[250];
+ uint32 numOutputStreamStrides;
+ uint32 mobid;
+ uint32 offsetInBytes;
+ uint8 usesMob;
+ uint8 pad0;
+ uint16 pad1;
+ uint32 pad2[246];
}
#include "vmware_pack_end.h"
SVGACOTableDXStreamOutputEntry;
@@ -1447,13 +1804,47 @@ typedef
struct SVGA3dCmdDXDefineStreamOutput {
SVGA3dStreamOutputId soid;
uint32 numOutputStreamEntries;
- SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS];
uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
uint32 rasterizedStream;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+/*
+ * Version 2 needed in order to start validating and using the
+ * rasterizedStream field. Unfortunately the device wasn't validating
+ * or using this field and the driver wasn't initializing it in shipped
+ * code, so a new version of the command is needed to allow that code
+ * to continue to work. Also added new numOutputStreamStrides field.
+ */
+
+#define SVGA3D_DX_SO_NO_RASTERIZED_STREAM 0xFFFFFFFF
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutputWithMob {
+ SVGA3dStreamOutputId soid;
+ uint32 numOutputStreamEntries;
+ uint32 numOutputStreamStrides;
+ uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutputWithMob;
+/* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindStreamOutput {
+ SVGA3dStreamOutputId soid;
+ uint32 mobid;
+ uint32 offsetInBytes;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindStreamOutput; /* SVGA_3D_CMD_DX_BIND_STREAMOUTPUT */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyStreamOutput {
@@ -1472,6 +1863,15 @@ SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetMinLOD {
+ SVGA3dSurfaceId sid;
+ float minLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetMinLOD; /* SVGA_3D_CMD_DX_SET_MIN_LOD */
+
+typedef
+#include "vmware_pack_begin.h"
struct {
uint64 value;
uint32 mobId;
@@ -1581,33 +1981,38 @@ struct SVGADXContextMobFormat {
uint32 rasterizerStateId;
uint32 depthStencilViewId;
uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
- uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
} renderState;
+ uint32 pad0[8];
+
struct {
uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
uint32 soid;
} streamOut;
- uint32 pad0[11];
+
+ uint32 pad1[10];
+
+ uint32 uavSpliceIndex;
uint8 numViewports;
uint8 numScissorRects;
- uint16 pad1[1];
+ uint16 pad2[1];
- uint32 pad2[3];
+ uint32 pad3[3];
SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
- uint32 pad3[32];
+ uint32 pad4[32];
SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
- uint32 pad4[64];
+ uint32 pad5[64];
struct {
uint32 queryID;
uint32 value;
} predication;
- uint32 pad5[2];
+ SVGAMobId shaderIfaceMobid;
+ uint32 shaderIfaceOffset;
struct {
uint32 shaderId;
SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
@@ -1619,11 +2024,38 @@ struct SVGADXContextMobFormat {
SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
- uint32 pad7[380];
+
+ uint32 pad7[64];
+
+ uint32 uaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS];
+ uint32 csuaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS];
+
+ uint32 pad8[188];
}
#include "vmware_pack_end.h"
SVGADXContextMobFormat;
+/*
+ * There is conflicting documentation on max class instances (253 vs 256). The
+ * lower value is the one used throughout the device, but since mob format is
+ * more involved to increase if needed, conservatively use the higher one here.
+ */
+#define SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED 256
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXShaderIfaceMobFormat {
+ struct {
+ uint32 numClassInstances;
+ uint32 iface[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED];
+ SVGA3dIfaceData data[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED];
+ } shaderIfaceState[SVGA3D_NUM_SHADERTYPE];
+
+ uint32 pad0[1018];
+}
+#include "vmware_pack_end.h"
+SVGADXShaderIfaceMobFormat;
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXTempSetContext {
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index b22a67f15660..f4375a41b3aa 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc.
+ * Copyright 2007-2019 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -40,11 +40,25 @@
#include "includeCheck.h"
#define SVGA3D_NUM_CLIPPLANES 6
+#define SVGA3D_MAX_CONTEXT_IDS 256
+#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+
+/*
+ * While there are separate bind-points for RenderTargetViews and
+ * UnorderedAccessViews in a DXContext, there is in fact one shared
+ * semantic space that the guest-driver can use on any given draw call.
+ * So there are really only 8 slots that can be spilt up between them, with the
+ * spliceIndex controlling where the UAV's sit in the collapsed array.
+ */
#define SVGA3D_MAX_RENDER_TARGETS 8
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
#define SVGA3D_MAX_UAVIEWS 8
-#define SVGA3D_MAX_CONTEXT_IDS 256
-#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+#define SVGA3D_DX11_1_MAX_UAVIEWS 64
+
+/*
+ * Maximum canonical size of a surface in host-backed mode (pre-GBObjects).
+ */
+#define SVGA3D_HB_MAX_SURFACE_SIZE MBYTES_2_BYTES(128)
/*
* Maximum ID a shader can be assigned on a given context.
@@ -59,6 +73,8 @@
#define SVGA3D_NUM_TEXTURE_UNITS 32
#define SVGA3D_NUM_LIGHTS 8
+#define SVGA3D_MAX_VIDEOPROCESSOR_SAMPLERS 32
+
/*
* Maximum size in dwords of shader text the SVGA device will allow.
* Currently 8 MB.
@@ -67,6 +83,11 @@
#define SVGA3D_MAX_SHADER_MEMORY (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
sizeof(uint32))
+/*
+ * The maximum value of threadGroupCount in each dimension
+ */
+#define SVGA3D_MAX_SHADER_THREAD_GROUPS 65535
+
#define SVGA3D_MAX_CLIP_PLANES 6
/*
@@ -85,7 +106,9 @@
/*
* Maximum number of array indexes in a GB surface (with DX enabled).
*/
-#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+#define SVGA3D_SM4_MAX_SURFACE_ARRAYSIZE 512
+#define SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE 2048
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE
/*
* The maximum number of vertex arrays we're guaranteed to support in
@@ -99,4 +122,9 @@
*/
#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+/*
+ * The maximum number of samples that can be contained in a surface.
+ */
+#define SVGA3D_MAX_SAMPLES 8
+
#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 61414f105c67..4db25bd9fa22 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -131,6 +131,8 @@ enum svga3d_block_desc {
SVGA3DBLOCKDESC_BC3 = 1 << 26,
SVGA3DBLOCKDESC_BC4 = 1 << 27,
SVGA3DBLOCKDESC_BC5 = 1 << 28,
+ SVGA3DBLOCKDESC_BC6H = 1 << 29,
+ SVGA3DBLOCKDESC_BC7 = 1 << 30,
SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA |
SVGA3DBLOCKDESC_UINT |
@@ -290,6 +292,18 @@ enum svga3d_block_desc {
SVGA3DBLOCKDESC_COMP_UNORM,
SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
SVGA3DBLOCKDESC_COMP_SNORM,
+ SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS = SVGA3DBLOCKDESC_BC6H |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC6H_COMP_UF16 = SVGA3DBLOCKDESC_BC6H |
+ SVGA3DBLOCKDESC_COMPRESSED,
+ SVGA3DBLOCKDESC_BC6H_COMP_SF16 = SVGA3DBLOCKDESC_BC6H |
+ SVGA3DBLOCKDESC_COMPRESSED,
+ SVGA3DBLOCKDESC_BC7_COMP_TYPELESS = SVGA3DBLOCKDESC_BC7 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC7_COMP_UNORM = SVGA3DBLOCKDESC_BC7 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC7_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
SVGA3DBLOCKDESC_PLANAR_YUV |
@@ -494,7 +508,7 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
- {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
+ {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 3, 3,
{{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
@@ -604,7 +618,7 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+ {SVGA3D_FORMAT_DEAD2, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 4, 4,
{{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
@@ -1103,6 +1117,46 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{4, 4, 1}, 16, 16,
{{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_B4G4R4A4_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+ {1, 1, 1}, 2, 2,
+ {{4}, {4}, {4}, {4}},
+ {{0}, {4}, {8}, {12}}},
+
+ {SVGA3D_BC6H_TYPELESS, SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC6H_UF16, SVGA3DBLOCKDESC_BC6H_COMP_UF16,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC6H_SF16, SVGA3DBLOCKDESC_BC6H_COMP_SF16,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC7_TYPELESS, SVGA3DBLOCKDESC_BC7_COMP_TYPELESS,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC7_UNORM, SVGA3DBLOCKDESC_BC7_COMP_UNORM,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC7_UNORM_SRGB, SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4,
+ {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
};
static inline u32 clamped_umul32(u32 a, u32 b)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 308370665a8e..77e338a65791 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -116,6 +116,19 @@ SVGA3dBox;
typedef
#include "vmware_pack_begin.h"
struct {
+ int32 x;
+ int32 y;
+ int32 z;
+ int32 w;
+ int32 h;
+ int32 d;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignedBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
uint32 x;
uint32 y;
uint32 z;
@@ -198,8 +211,7 @@ typedef enum SVGA3dSurfaceFormat {
/* Planar video formats */
SVGA3D_NV12 = 44,
- /* Video format with alpha */
- SVGA3D_AYUV = 45,
+ SVGA3D_FORMAT_DEAD2 = 45,
SVGA3D_R32G32B32A32_TYPELESS = 46,
SVGA3D_R32G32B32A32_UINT = 47,
@@ -305,6 +317,18 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_B8G8R8X8_UNORM = 142,
SVGA3D_BC4_UNORM = 143,
SVGA3D_BC5_UNORM = 144,
+ SVGA3D_B4G4R4A4_UNORM = 145,
+
+ /* DX11 compressed formats */
+ SVGA3D_BC6H_TYPELESS = 146,
+ SVGA3D_BC6H_UF16 = 147,
+ SVGA3D_BC6H_SF16 = 148,
+ SVGA3D_BC7_TYPELESS = 149,
+ SVGA3D_BC7_UNORM = 150,
+ SVGA3D_BC7_UNORM_SRGB = 151,
+
+ /* Video format with alpha */
+ SVGA3D_AYUV = 152,
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
@@ -326,10 +350,10 @@ typedef enum SVGA3dSurfaceFormat {
#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6)
#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7)
#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8)
-#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_DEAD2 (CONST64U(1) << 9)
#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10)
-#define SVGA3D_SURFACE_DECODE_RENDERTARGET (CONST64U(1) << 11)
+#define SVGA3D_SURFACE_DEAD1 (CONST64U(1) << 11)
/*
* Is this surface using a base-level pitch for it's mob backing?
@@ -387,7 +411,7 @@ typedef enum SVGA3dSurfaceFormat {
* Setting this flag allow this surface to be used with the
* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
* buffer surfaces, and no bind flags are allowed to be set on surfaces
- * with this flag.
+ * with this flag except SVGA3D_SURFACE_TRANSFER_TO_BUFFER.
*/
#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
@@ -402,7 +426,31 @@ typedef enum SVGA3dSurfaceFormat {
*/
#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
-#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 33)
+/*
+ * Specified that the surface is allowed to be bound to a UAView.
+ */
+#define SVGA3D_SURFACE_BIND_UAVIEW (CONST64U(1) << 33)
+
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER command. It is only valid for
+ * buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag except SVGA3D_SURFACE_TRANSFER_FROM_BUFFER.
+ */
+#define SVGA3D_SURFACE_TRANSFER_TO_BUFFER (CONST64U(1) << 34)
+
+#define SVGA3D_SURFACE_BIND_LOGICOPS (CONST64U(1) << 35)
+
+/*
+ * Optional flags for use with SVGA3D_SURFACE_BIND_UAVIEW
+ */
+#define SVGA3D_SURFACE_BIND_RAW_VIEWS (CONST64U(1) << 36)
+#define SVGA3D_SURFACE_BUFFER_STRUCTURED (CONST64U(1) << 37)
+
+#define SVGA3D_SURFACE_DRAWINDIRECT_ARGS (CONST64U(1) << 38)
+#define SVGA3D_SURFACE_RESOURCE_CLAMP (CONST64U(1) << 39)
+
+#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 40)
/*
* Surface flags types:
@@ -428,17 +476,25 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
- SVGA3D_SURFACE_MULTISAMPLE \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \
( SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_RESERVED1 | \
SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
@@ -448,7 +504,14 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
- SVGA3D_SURFACE_MULTISAMPLE \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \
@@ -456,6 +519,7 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_RESERVED1 | \
SVGA3D_SURFACE_MULTISAMPLE \
)
@@ -474,7 +538,14 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
- SVGA3D_SURFACE_MULTISAMPLE \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \
@@ -482,10 +553,11 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
+ SVGA3D_SURFACE_DEAD2 | \
SVGA3D_SURFACE_ARRAY | \
SVGA3D_SURFACE_MULTISAMPLE | \
- SVGA3D_SURFACE_MOB_PITCH \
+ SVGA3D_SURFACE_MOB_PITCH | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \
@@ -494,14 +566,23 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
SVGA3D_SURFACE_SCREENTARGET | \
- SVGA3D_SURFACE_MOB_PITCH \
+ SVGA3D_SURFACE_MOB_PITCH | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS \
)
-#define SVGA3D_SURFACE_DX_ONLY_MASK \
- ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
- SVGA3D_SURFACE_STAGING_UPLOAD | \
- SVGA3D_SURFACE_STAGING_DOWNLOAD | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+#define SVGA3D_SURFACE_DX_ONLY_MASK \
+ ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER \
)
#define SVGA3D_SURFACE_STAGING_MASK \
@@ -516,9 +597,135 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
SVGA3D_SURFACE_BIND_RENDER_TARGET | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
- SVGA3D_SURFACE_BIND_STREAM_OUTPUT \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS \
+ )
+
+#define SVGA3D_SURFACE_VADECODE_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_HINT_STATIC | \
+ SVGA3D_SURFACE_HINT_DYNAMIC | \
+ SVGA3D_SURFACE_HINT_INDEXBUFFER | \
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER | \
+ SVGA3D_SURFACE_HINT_TEXTURE | \
+ SVGA3D_SURFACE_HINT_RENDERTARGET | \
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \
+ SVGA3D_SURFACE_HINT_WRITEONLY | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_HINT_RT_LOCKABLE | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_RENDER_TARGET | \
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_INACTIVE | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
+ )
+
+#define SVGA3D_SURFACE_VAPROCESSFRAME_OUTPUT_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_HINT_INDEXBUFFER | \
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER | \
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_INACTIVE | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_VADECODE | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
+ )
+
+#define SVGA3D_SURFACE_VAPROCESSFRAME_INPUT_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_HINT_INDEXBUFFER | \
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER | \
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
+ )
+
+#define SVGA3D_SURFACE_LOGICOPS_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_VADECODE | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
+#define SVGA3D_BUFFER_STRUCTURED_STRIDE_MAX 2048
+
+
+/*
+ * These are really the D3DFORMAT_OP defines from the wdk. We need
+ * them so that we can query the host for what the supported surface
+ * operations are (when we're using the D3D backend, in particular),
+ * and so we can send those operations to the guest.
+ */
typedef enum {
SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
@@ -1338,7 +1545,40 @@ typedef enum {
SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8,
SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9,
SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10,
- SVGA3D_PRIMITIVE_MAX
+ SVGA3D_PRIMITIVE_DX10_MAX = 11,
+ SVGA3D_PRIMITIVE_1_CONTROL_POINT_PATCH = 11,
+ SVGA3D_PRIMITIVE_2_CONTROL_POINT_PATCH = 12,
+ SVGA3D_PRIMITIVE_3_CONTROL_POINT_PATCH = 13,
+ SVGA3D_PRIMITIVE_4_CONTROL_POINT_PATCH = 14,
+ SVGA3D_PRIMITIVE_5_CONTROL_POINT_PATCH = 15,
+ SVGA3D_PRIMITIVE_6_CONTROL_POINT_PATCH = 16,
+ SVGA3D_PRIMITIVE_7_CONTROL_POINT_PATCH = 17,
+ SVGA3D_PRIMITIVE_8_CONTROL_POINT_PATCH = 18,
+ SVGA3D_PRIMITIVE_9_CONTROL_POINT_PATCH = 19,
+ SVGA3D_PRIMITIVE_10_CONTROL_POINT_PATCH = 20,
+ SVGA3D_PRIMITIVE_11_CONTROL_POINT_PATCH = 21,
+ SVGA3D_PRIMITIVE_12_CONTROL_POINT_PATCH = 22,
+ SVGA3D_PRIMITIVE_13_CONTROL_POINT_PATCH = 23,
+ SVGA3D_PRIMITIVE_14_CONTROL_POINT_PATCH = 24,
+ SVGA3D_PRIMITIVE_15_CONTROL_POINT_PATCH = 25,
+ SVGA3D_PRIMITIVE_16_CONTROL_POINT_PATCH = 26,
+ SVGA3D_PRIMITIVE_17_CONTROL_POINT_PATCH = 27,
+ SVGA3D_PRIMITIVE_18_CONTROL_POINT_PATCH = 28,
+ SVGA3D_PRIMITIVE_19_CONTROL_POINT_PATCH = 29,
+ SVGA3D_PRIMITIVE_20_CONTROL_POINT_PATCH = 30,
+ SVGA3D_PRIMITIVE_21_CONTROL_POINT_PATCH = 31,
+ SVGA3D_PRIMITIVE_22_CONTROL_POINT_PATCH = 32,
+ SVGA3D_PRIMITIVE_23_CONTROL_POINT_PATCH = 33,
+ SVGA3D_PRIMITIVE_24_CONTROL_POINT_PATCH = 34,
+ SVGA3D_PRIMITIVE_25_CONTROL_POINT_PATCH = 35,
+ SVGA3D_PRIMITIVE_26_CONTROL_POINT_PATCH = 36,
+ SVGA3D_PRIMITIVE_27_CONTROL_POINT_PATCH = 37,
+ SVGA3D_PRIMITIVE_28_CONTROL_POINT_PATCH = 38,
+ SVGA3D_PRIMITIVE_29_CONTROL_POINT_PATCH = 39,
+ SVGA3D_PRIMITIVE_30_CONTROL_POINT_PATCH = 40,
+ SVGA3D_PRIMITIVE_31_CONTROL_POINT_PATCH = 41,
+ SVGA3D_PRIMITIVE_32_CONTROL_POINT_PATCH = 42,
+ SVGA3D_PRIMITIVE_MAX = 43
} SVGA3dPrimitiveType;
typedef enum {
@@ -1442,16 +1682,15 @@ typedef enum {
SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5,
SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6,
SVGA3D_QUERYTYPE_OCCLUSION64 = 7,
- SVGA3D_QUERYTYPE_EVENT = 8,
- SVGA3D_QUERYTYPE_DX10_MAX = 9,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12,
- SVGA3D_QUERYTYPE_SOP_STREAM0 = 13,
- SVGA3D_QUERYTYPE_SOP_STREAM1 = 14,
- SVGA3D_QUERYTYPE_SOP_STREAM2 = 15,
- SVGA3D_QUERYTYPE_SOP_STREAM3 = 16,
+ SVGA3D_QUERYTYPE_DX10_MAX = 8,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 8,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 9,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 10,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 11,
+ SVGA3D_QUERYTYPE_SOP_STREAM0 = 12,
+ SVGA3D_QUERYTYPE_SOP_STREAM1 = 13,
+ SVGA3D_QUERYTYPE_SOP_STREAM2 = 14,
+ SVGA3D_QUERYTYPE_SOP_STREAM3 = 15,
SVGA3D_QUERYTYPE_MAX
} SVGA3dQueryType;
@@ -1584,28 +1823,33 @@ typedef enum {
SVGA3D_READ_HOST_VRAM = 2,
} SVGA3dTransferType;
-typedef enum {
- SVGA3D_LOGICOP_INVALID = 0,
- SVGA3D_LOGICOP_MIN = 1,
- SVGA3D_LOGICOP_COPY = 1,
- SVGA3D_LOGICOP_NOT = 2,
- SVGA3D_LOGICOP_AND = 3,
- SVGA3D_LOGICOP_OR = 4,
- SVGA3D_LOGICOP_XOR = 5,
- SVGA3D_LOGICOP_NXOR = 6,
- SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */
- SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255),
- SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1),
-} SVGA3dLogicOp;
+#define SVGA3D_LOGICOP_INVALID 0
+#define SVGA3D_LOGICOP_MIN 1
+#define SVGA3D_LOGICOP_COPY 1
+#define SVGA3D_LOGICOP_NOT 2
+#define SVGA3D_LOGICOP_AND 3
+#define SVGA3D_LOGICOP_OR 4
+#define SVGA3D_LOGICOP_XOR 5
+#define SVGA3D_LOGICOP_NXOR 6
+#define SVGA3D_LOGICOP_ROP3 7
+#define SVGA3D_LOGICOP_MAX 8
+
+typedef uint16 SVGA3dLogicOp;
+
+#define SVGA3D_LOGICOP_ROP3_INVALID ((uint16) -1)
+#define SVGA3D_LOGICOP_ROP3_MIN 0
+#define SVGA3D_LOGICOP_ROP3_MAX 256
+
+typedef uint16 SVGA3dLogicOpRop3;
typedef
#include "vmware_pack_begin.h"
struct {
union {
struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
+ uint16 function; // SVGA3dFogFunction
+ uint8 type; // SVGA3dFogType
+ uint8 base; // SVGA3dFogBase
};
uint32 uintValue;
};
@@ -1742,4 +1986,15 @@ typedef enum SVGA3dMSQualityLevel {
SVGA3D_MS_QUALITY_MAX = 2,
} SVGA3dMSQualityLevel;
+/*
+ * Screen Target Update Flags
+ */
+
+typedef enum SVGA3dFrameUpdateType {
+ SVGA3D_FRAME_END = 0,
+ SVGA3D_FRAME_PARTIAL = 1,
+ SVGA3D_FRAME_UNKNOWN = 2,
+ SVGA3D_FRAME_MAX = 3,
+} SVGA3dFrameUpdateType;
+
#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 056f54b35d73..19fb9e3299e7 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -70,8 +70,7 @@ typedef uint32 SVGAMobId;
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
- * cursor bypass mode. This is still supported, but no new guest
- * drivers should use it.
+ * cursor bypass mode.
*/
#define SVGA_CURSOR_ON_HIDE 0x0
#define SVGA_CURSOR_ON_SHOW 0x1
@@ -137,6 +136,17 @@ typedef uint32 SVGAMobId;
#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
/*
+ * The byte-size is the size of the actual cursor data,
+ * possibly after expanding it to the current bit depth.
+ *
+ * 40K is sufficient memory for two 32-bit planes for a 64 x 64 cursor.
+ *
+ * The dimension limit is a bound on the maximum width or height.
+ */
+#define SVGA_MAX_CURSOR_CMD_BYTES (40 * 1024)
+#define SVGA_MAX_CURSOR_CMD_DIMENSION 1024
+
+/*
* Registers
*/
@@ -169,7 +179,7 @@ enum {
SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
SVGA_REG_GUEST_ID = 23, /* (Deprecated) */
- SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
+ SVGA_REG_DEAD = 24, /* Drivers should never write this. */
SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
@@ -208,7 +218,13 @@ enum {
SVGA_REG_MAX_PRIMARY_MEM = 50,
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
- SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
+ /*
+ * Legacy version of SVGA_REG_GBOBJECT_MEM_SIZE_KB for drivers that
+ * don't know how to convert to a 64-bit byte value without overflowing.
+ * (See SVGA_REG_GBOBJECT_MEM_SIZE_KB).
+ */
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51,
+
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -218,7 +234,59 @@ enum {
SVGA_REG_BLANK_SCREEN_TARGETS = 58,
SVGA_REG_CAP2 = 59,
SVGA_REG_DEVEL_CAP = 60,
- SVGA_REG_TOP = 61, /* Must be 1 more than the last register */
+
+ /*
+ * Allow the guest to hint to the device which driver is running.
+ *
+ * This should not generally change device behavior, but might be
+ * convenient to work-around specific bugs in guest drivers.
+ *
+ * Drivers should first write their id value into SVGA_REG_GUEST_DRIVER_ID,
+ * and then fill out all of the version registers that they have defined.
+ *
+ * After the driver has written all of the registers, they should
+ * then write the value SVGA_REG_GUEST_DRIVER_ID_SUBMIT to the
+ * SVGA_REG_GUEST_DRIVER_ID register, to signal that they have finished.
+ *
+ * The SVGA_REG_GUEST_DRIVER_ID values are defined below by the
+ * SVGARegGuestDriverId enum.
+ *
+ * The SVGA_REG_GUEST_DRIVER_VERSION fields are driver-specific,
+ * but ideally should encode a monotonically increasing number that allows
+ * the device to perform inequality checks against ranges of driver versions.
+ */
+ SVGA_REG_GUEST_DRIVER_ID = 61,
+ SVGA_REG_GUEST_DRIVER_VERSION1 = 62,
+ SVGA_REG_GUEST_DRIVER_VERSION2 = 63,
+ SVGA_REG_GUEST_DRIVER_VERSION3 = 64,
+ SVGA_REG_CURSOR_MOBID = 65,
+ SVGA_REG_CURSOR_MAX_BYTE_SIZE = 66,
+ SVGA_REG_CURSOR_MAX_DIMENSION = 67,
+
+ SVGA_REG_FIFO_CAPS = 68,
+ SVGA_REG_FENCE = 69,
+
+ SVGA_REG_RESERVED1 = 70,
+ SVGA_REG_RESERVED2 = 71,
+ SVGA_REG_RESERVED3 = 72,
+ SVGA_REG_RESERVED4 = 73,
+ SVGA_REG_RESERVED5 = 74,
+ SVGA_REG_SCREENDMA = 75,
+
+ /*
+ * The maximum amount of guest-backed objects that the device can have
+ * resident at a time. Guest-drivers should keep their working set size
+ * below this limit for best performance.
+ *
+ * Note that this value is in kilobytes, and not bytes, because the actual
+ * number of bytes might be larger than can fit in a 32-bit register.
+ *
+ * PLEASE USE A 64-BIT VALUE WHEN CONVERTING THIS INTO BYTES.
+ * (See SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB).
+ */
+ SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76,
+
+ SVGA_REG_TOP = 77, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -229,6 +297,20 @@ enum {
the use of the current SVGA driver. */
};
+
+/*
+ * Values for SVGA_REG_GUEST_DRIVER_ID.
+ */
+typedef enum SVGARegGuestDriverId {
+ SVGA_REG_GUEST_DRIVER_ID_UNKNOWN = 0,
+ SVGA_REG_GUEST_DRIVER_ID_WDDM = 1,
+ SVGA_REG_GUEST_DRIVER_ID_LINUX = 2,
+ SVGA_REG_GUEST_DRIVER_ID_MAX,
+
+ SVGA_REG_GUEST_DRIVER_ID_SUBMIT = MAX_UINT32,
+} SVGARegGuestDriverId;
+
+
/*
* Guest memory regions (GMRs):
*
@@ -416,7 +498,6 @@ typedef enum {
SVGA_CB_CONTEXT_0 = 0x0,
SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
SVGA_CB_CONTEXT_MAX = 0x2,
- SVGA_CB_CONTEXT_HP_MAX = 0x2,
} SVGACBContext;
@@ -733,9 +814,6 @@ SVGASignedPoint;
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
*
- * XXX: Add longer descriptions for each capability, including a list
- * of the new features that each capability provides.
- *
* SVGA_CAP_IRQMASK --
* Provides device interrupts. Adds device register SVGA_REG_IRQMASK
* to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
@@ -842,17 +920,51 @@ SVGASignedPoint;
* Allow the IntraSurfaceCopy command.
*
* SVGA_CAP2_DX2 --
- * Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ * Allow the DefineGBSurface_v3, WholeSurfaceCopy, WriteZeroSurface, and
+ * HintZeroSurface commands, and the SVGA_REG_GUEST_DRIVER_ID register.
+ *
+ * SVGA_CAP2_GB_MEMSIZE_2 --
+ * Allow the SVGA_REG_GBOBJECT_MEM_SIZE_KB register.
+ *
+ * SVGA_CAP2_SCREENDMA_REG --
+ * Allow the SVGA_REG_SCREENDMA register.
+ *
+ * SVGA_CAP2_OTABLE_PTDEPTH_2 --
+ * Allow 2 level page tables for OTable commands.
+ *
+ * SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT --
+ * Allow a stretch blt from a non-multisampled surface to a multisampled
+ * surface.
+ *
+ * SVGA_CAP2_CURSOR_MOB --
+ * Allow the SVGA_REG_CURSOR_MOBID register.
+ *
+ * SVGA_CAP2_MSHINT --
+ * Allow the SVGA_REG_MSHINT register.
+ *
+ * SVGA_CAP2_DX3 --
+ * Allows the DefineGBSurface_v4 command.
+ * Allows the DXDefineDepthStencilView_v2, DXDefineStreamOutputWithMob,
+ * and DXBindStreamOutput commands if 3D is also available.
+ * Allows the DXPredStagingCopy and DXStagingCopy commands if SM41
+ * is also available.
*
* SVGA_CAP2_RESERVED --
* Reserve the last bit for extending the SVGA capabilities to some
* future mechanisms.
*/
-#define SVGA_CAP2_NONE 0x00000000
-#define SVGA_CAP2_GROW_OTABLE 0x00000001
-#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
-#define SVGA_CAP2_DX2 0x00000004
-#define SVGA_CAP2_RESERVED 0x80000000
+#define SVGA_CAP2_NONE 0x00000000
+#define SVGA_CAP2_GROW_OTABLE 0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
+#define SVGA_CAP2_DX2 0x00000004
+#define SVGA_CAP2_GB_MEMSIZE_2 0x00000008
+#define SVGA_CAP2_SCREENDMA_REG 0x00000010
+#define SVGA_CAP2_OTABLE_PTDEPTH_2 0x00000020
+#define SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT 0x00000040
+#define SVGA_CAP2_CURSOR_MOB 0x00000080
+#define SVGA_CAP2_MSHINT 0x00000100
+#define SVGA_CAP2_DX3 0x00000400
+#define SVGA_CAP2_RESERVED 0x80000000
/*
@@ -875,7 +987,9 @@ typedef enum {
SVGABackdoorCapFifoCaps = 1,
SVGABackdoorCap3dHWVersion = 2,
SVGABackdoorCapDeviceCaps2 = 3,
- SVGABackdoorCapMax = 4,
+ SVGABackdoorCapDevelCaps = 4,
+ SVGABackdoorDevelRenderer = 5,
+ SVGABackdoorCapMax = 6,
} SVGABackdoorCapType;
@@ -1055,103 +1169,80 @@ enum {
/*
* FIFO Synchronization Registers
*
- * This explains the relationship between the various FIFO
- * sync-related registers in IOSpace and in FIFO space.
- *
* SVGA_REG_SYNC --
*
- * The SYNC register can be used in two different ways by the guest:
- *
- * 1. If the guest wishes to fully sync (drain) the FIFO,
- * it will write once to SYNC then poll on the BUSY
- * register. The FIFO is sync'ed once BUSY is zero.
- *
- * 2. If the guest wants to asynchronously wake up the host,
- * it will write once to SYNC without polling on BUSY.
- * Ideally it will do this after some new commands have
- * been placed in the FIFO, and after reading a zero
- * from SVGA_FIFO_BUSY.
- *
- * (1) is the original behaviour that SYNC was designed to
- * support. Originally, a write to SYNC would implicitly
- * trigger a read from BUSY. This causes us to synchronously
- * process the FIFO.
- *
- * This behaviour has since been changed so that writing SYNC
- * will *not* implicitly cause a read from BUSY. Instead, it
- * makes a channel call which asynchronously wakes up the MKS
- * thread.
- *
- * New guests can use this new behaviour to implement (2)
- * efficiently. This lets guests get the host's attention
- * without waiting for the MKS to poll, which gives us much
- * better CPU utilization on SMP hosts and on UP hosts while
- * we're blocked on the host GPU.
- *
- * Old guests shouldn't notice the behaviour change. SYNC was
- * never guaranteed to process the entire FIFO, since it was
- * bounded to a particular number of CPU cycles. Old guests will
- * still loop on the BUSY register until the FIFO is empty.
- *
- * Writing to SYNC currently has the following side-effects:
- *
- * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
- * - Asynchronously wakes up the MKS thread for FIFO processing
- * - The value written to SYNC is recorded as a "reason", for
- * stats purposes.
- *
- * If SVGA_FIFO_BUSY is available, drivers are advised to only
- * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
- * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
- * eventually set SVGA_FIFO_BUSY on its own, but this approach
- * lets the driver avoid sending multiple asynchronous wakeup
- * messages to the MKS thread.
+ * The SYNC register can be used by the guest driver to signal to the
+ * device that the guest driver is waiting for previously submitted
+ * commands to complete.
+ *
+ * When the guest driver writes to the SYNC register, the device sets
+ * the BUSY register to TRUE, and starts processing the submitted commands
+ * (if it was not already doing so). When all previously submitted
+ * commands are finished and the device is idle again, it sets the BUSY
+ * register back to FALSE. (If the guest driver submits new commands
+ * after writing the SYNC register, the new commands are not guaranteed
+ * to have been procesesd.)
+ *
+ * When guest drivers are submitting commands using the FIFO, the device
+ * periodically polls to check for new FIFO commands when idle, which may
+ * introduce a delay in command processing. If the guest-driver wants
+ * the commands to be processed quickly (which it typically does), it
+ * should write SYNC after each batch of commands is committed to the
+ * FIFO to immediately wake up the device. For even better performance,
+ * the guest can use the SVGA_FIFO_BUSY register to avoid these extra
+ * SYNC writes if the device is already active, using the technique known
+ * as "Ringing the Doorbell" (described below). (Note that command
+ * buffer submission implicitly wakes up the device, and so doesn't
+ * suffer from this problem.)
+ *
+ * The SYNC register can also be used in combination with BUSY to
+ * synchronously ensure that all SVGA commands are processed (with both
+ * the FIFO and command-buffers). To do this, the guest driver should
+ * write to SYNC, and then loop reading BUSY until BUSY returns FALSE.
+ * This technique is known as a "Legacy Sync".
*
* SVGA_REG_BUSY --
*
* This register is set to TRUE when SVGA_REG_SYNC is written,
- * and it reads as FALSE when the FIFO has been completely
- * drained.
- *
- * Every read from this register causes us to synchronously
- * process FIFO commands. There is no guarantee as to how many
- * commands each read will process.
+ * and is set back to FALSE when the device has finished processing
+ * all commands and is idle again.
*
- * CPU time spent processing FIFO commands will be billed to
- * the guest.
+ * Every read from the BUSY reigster will block for an undefined
+ * amount of time (normally until the device finishes some interesting
+ * work unit), or the device is idle.
*
- * New drivers should avoid using this register unless they
- * need to guarantee that the FIFO is completely drained. It
- * is overkill for performing a sync-to-fence. Older drivers
- * will use this register for any type of synchronization.
+ * Guest drivers can also do a partial Legacy Sync to check for some
+ * particular condition, for instance by stopping early when a fence
+ * passes before BUSY has been set back to FALSE. This is particularly
+ * useful if the guest-driver knows that it is blocked waiting on the
+ * device, because it will yield CPU time back to the host.
*
* SVGA_FIFO_BUSY --
*
- * This register is a fast way for the guest driver to check
- * whether the FIFO is already being processed. It reads and
- * writes at normal RAM speeds, with no monitor intervention.
- *
- * If this register reads as TRUE, the host is guaranteeing that
- * any new commands written into the FIFO will be noticed before
- * the MKS goes back to sleep.
+ * The SVGA_FIFO_BUSY register is a fast way for the guest driver to check
+ * whether the device is actively processing FIFO commands before writing
+ * the more expensive SYNC register.
*
- * If this register reads as FALSE, no such guarantee can be
- * made.
+ * If this register reads as TRUE, the device is actively processing
+ * FIFO commands.
*
- * The guest should use this register to quickly determine
- * whether or not it needs to wake up the host. If the guest
- * just wrote a command or group of commands that it would like
- * the host to begin processing, it should:
+ * If this register reads as FALSE, the device may not be actively
+ * processing commands, and the guest driver should try
+ * "Ringing the Doorbell".
*
- * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
- * action is necessary.
+ * To Ring the Doorbell, the guest should:
*
- * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
- * code that we've already sent a SYNC to the host and we
- * don't need to send a duplicate.
+ * 1. Have already written their batch of commands into the FIFO.
+ * 2. Check if the SVGA_FIFO_BUSY register is available by reading
+ * SVGA_FIFO_MIN.
+ * 3. Read SVGA_FIFO_BUSY. If it reads as TRUE, the device is actively
+ * processing FIFO commands, and no further action is necessary.
+ * 4. If SVGA_FIFO_BUSY was FALSE, write TRUE to SVGA_REG_SYNC.
*
- * 3. Write a reason to SVGA_REG_SYNC. This will send an
- * asynchronous wakeup to the MKS thread.
+ * For maximum performance, this procedure should be followed after
+ * every meaningful batch of commands has been written into the FIFO.
+ * (Normally when the underlying application signals it's finished a
+ * meaningful work unit by calling Flush.)
*/
@@ -1164,9 +1255,6 @@ enum {
* Video -- SVGA Video overlay units are supported
* Escape -- Escape command is supported
*
- * XXX: Add longer descriptions for each capability, including a list
- * of the new features that each capability provides.
- *
* SVGA_FIFO_CAP_SCREEN_OBJECT --
*
* Provides dynamic multi-screen rendering, for improved Unity and
@@ -1279,6 +1367,15 @@ enum {
/*
+ * ScreenDMA Register Values
+ */
+
+#define SVGA_SCREENDMA_REG_UNDEFINED 0
+#define SVGA_SCREENDMA_REG_NOT_PRESENT 1
+#define SVGA_SCREENDMA_REG_PRESENT 2
+#define SVGA_SCREENDMA_REG_MAX 3
+
+/*
* Video overlay support
*/
@@ -1665,6 +1762,80 @@ SVGAFifoCmdDefineAlphaCursor;
/*
+ * Provide a new large cursor image, as an AND/XOR mask.
+ *
+ * Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ uint32 andMaskDepth;
+ uint32 xorMaskDepth;
+ /*
+ * Followed by scanline data for AND mask, then XOR mask.
+ * Each scanline is padded to a 32-bit boundary.
+ */
+}
+#include "vmware_pack_end.h"
+SVGAGBColorCursorHeader;
+
+
+/*
+ * Provide a new large cursor image, in 32-bit BGRA format.
+ *
+ * Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ /* Followed by scanline data */
+}
+#include "vmware_pack_end.h"
+SVGAGBAlphaCursorHeader;
+
+ /*
+ * Define the SVGA guest backed cursor types
+ */
+
+typedef enum {
+ SVGA_COLOR_CURSOR = 0,
+ SVGA_ALPHA_CURSOR = 1,
+} SVGAGBCursorType;
+
+/*
+ * Provide a new large cursor image.
+ *
+ * Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAGBCursorType type;
+ union {
+ SVGAGBColorCursorHeader colorHeader;
+ SVGAGBAlphaCursorHeader alphaHeader;
+ } header;
+ uint32 sizeInBytes;
+ /*
+ * Followed by the cursor data
+ */
+}
+#include "vmware_pack_end.h"
+SVGAGBCursorHeader;
+
+
+/*
* SVGA_CMD_UPDATE_VERBOSE --
*
* Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
@@ -2061,9 +2232,12 @@ SVGAFifoCmdRemapGMR2;
#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024)
#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024)
#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_2GB (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_3GB (3 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_4GB (4 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_8GB (8 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
@@ -2086,4 +2260,6 @@ SVGAFifoCmdRemapGMR2;
#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024)
#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024)
+#define SVGA_PCI_REGS_PAGES (1)
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
index 350bbc6fab02..beddccee40f6 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -37,6 +37,7 @@ typedef s8 int8;
typedef uint64 PA;
typedef uint32 PPN;
+typedef uint32 PPN32;
typedef uint64 PPN64;
typedef bool Bool;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 66e14e38d5e8..f41550797970 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -57,9 +57,11 @@
#define VMW_BINDING_RT_BIT 0
#define VMW_BINDING_PS_BIT 1
-#define VMW_BINDING_SO_BIT 2
+#define VMW_BINDING_SO_T_BIT 2
#define VMW_BINDING_VB_BIT 3
-#define VMW_BINDING_NUM_BITS 4
+#define VMW_BINDING_UAV_BIT 4
+#define VMW_BINDING_CS_UAV_BIT 5
+#define VMW_BINDING_NUM_BITS 6
#define VMW_BINDING_PS_SR_BIT 0
@@ -75,6 +77,8 @@
* @vertex_buffers: Vertex buffer bindings.
* @index_buffer: Index buffer binding.
* @per_shader: Per shader-type bindings.
+ * @ua_views: UAV bindings.
+ * @so_state: StreamOutput bindings.
* @dirty: Bitmap tracking per binding-type changes that have not yet
* been emitted to the device.
* @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
@@ -95,10 +99,12 @@ struct vmw_ctx_binding_state {
struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
struct vmw_ctx_bindinfo_view ds_view;
- struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+ struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
struct vmw_ctx_bindinfo_ib index_buffer;
- struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+ struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
+ struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
+ struct vmw_ctx_bindinfo_so so_state;
unsigned long dirty;
DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
@@ -115,12 +121,16 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+
static void vmw_binding_build_asserts(void) __attribute__ ((unused));
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
@@ -151,6 +161,9 @@ static const size_t vmw_binding_shader_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
};
static const size_t vmw_binding_rt_offsets[] = {
offsetof(struct vmw_ctx_binding_state, render_targets),
@@ -162,6 +175,9 @@ static const size_t vmw_binding_cb_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
};
static const size_t vmw_binding_dx_ds_offsets[] = {
offsetof(struct vmw_ctx_binding_state, ds_view),
@@ -170,8 +186,11 @@ static const size_t vmw_binding_sr_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
};
-static const size_t vmw_binding_so_offsets[] = {
+static const size_t vmw_binding_so_target_offsets[] = {
offsetof(struct vmw_ctx_binding_state, so_targets),
};
static const size_t vmw_binding_vb_offsets[] = {
@@ -180,6 +199,15 @@ static const size_t vmw_binding_vb_offsets[] = {
static const size_t vmw_binding_ib_offsets[] = {
offsetof(struct vmw_ctx_binding_state, index_buffer),
};
+static const size_t vmw_binding_uav_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
+};
+static const size_t vmw_binding_cs_uav_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
+};
+static const size_t vmw_binding_so_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, so_state),
+};
static const struct vmw_binding_info vmw_binding_infos[] = {
[vmw_ctx_binding_shader] = {
@@ -214,10 +242,10 @@ static const struct vmw_binding_info vmw_binding_infos[] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_dx_ds_offsets,
.scrub_func = vmw_binding_scrub_dx_rt},
- [vmw_ctx_binding_so] = {
- .size = sizeof(struct vmw_ctx_bindinfo_so),
- .offsets = vmw_binding_so_offsets,
- .scrub_func = vmw_binding_scrub_so},
+ [vmw_ctx_binding_so_target] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_so_target),
+ .offsets = vmw_binding_so_target_offsets,
+ .scrub_func = vmw_binding_scrub_so_target},
[vmw_ctx_binding_vb] = {
.size = sizeof(struct vmw_ctx_bindinfo_vb),
.offsets = vmw_binding_vb_offsets,
@@ -226,6 +254,18 @@ static const struct vmw_binding_info vmw_binding_infos[] = {
.size = sizeof(struct vmw_ctx_bindinfo_ib),
.offsets = vmw_binding_ib_offsets,
.scrub_func = vmw_binding_scrub_ib},
+ [vmw_ctx_binding_uav] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_uav_offsets,
+ .scrub_func = vmw_binding_scrub_uav},
+ [vmw_ctx_binding_cs_uav] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_cs_uav_offsets,
+ .scrub_func = vmw_binding_scrub_cs_uav},
+ [vmw_ctx_binding_so] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_so),
+ .offsets = vmw_binding_so_offsets,
+ .scrub_func = vmw_binding_scrub_so},
};
/**
@@ -312,6 +352,18 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
}
/**
+ * vmw_binding_add_uav_index - Add UAV index for tracking.
+ * @cbs: Pointer to the context binding state tracker.
+ * @slot: UAV type to which bind this index.
+ * @index: The splice index to track.
+ */
+void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
+ uint32 index)
+{
+ cbs->ua_views[slot].index = index;
+}
+
+/**
* vmw_binding_transfer: Transfer a context binding tracking entry.
*
* @cbs: Pointer to the persistent context binding state tracker.
@@ -450,6 +502,10 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
vmw_binding_transfer(to, from, entry);
vmw_binding_drop(entry);
}
+
+ /* Also transfer uav splice indices */
+ to->ua_views[0].index = from->ua_views[0].index;
+ to->ua_views[1].index = from->ua_views[1].index;
}
/**
@@ -828,8 +884,8 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi,
u32 max_num)
{
- const struct vmw_ctx_bindinfo_so *biso =
- container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+ const struct vmw_ctx_bindinfo_so_target *biso =
+ container_of(bi, struct vmw_ctx_bindinfo_so_target, bi);
unsigned long i;
SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
@@ -854,11 +910,11 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ * vmw_emit_set_so_target - Issue delayed streamout binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*/
-static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
{
const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
struct {
@@ -1005,6 +1061,66 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
return 0;
}
+static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetUAViews body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+ /* Splice index is specified user-space */
+ cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+}
+
+static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCSUAViews body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+ /* Start index is specified user-space */
+ cmd->body.startIndex = cbs->ua_views[1].index;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+}
+
/**
* vmw_binding_emit_dirty - Issue delayed binding commands
*
@@ -1030,12 +1146,18 @@ static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
case VMW_BINDING_PS_BIT:
ret = vmw_binding_emit_dirty_ps(cbs);
break;
- case VMW_BINDING_SO_BIT:
- ret = vmw_emit_set_so(cbs);
+ case VMW_BINDING_SO_T_BIT:
+ ret = vmw_emit_set_so_target(cbs);
break;
case VMW_BINDING_VB_BIT:
ret = vmw_emit_set_vb(cbs);
break;
+ case VMW_BINDING_UAV_BIT:
+ ret = vmw_emit_set_uav(cbs);
+ break;
+ case VMW_BINDING_CS_UAV_BIT:
+ ret = vmw_emit_set_cs_uav(cbs);
+ break;
default:
BUG();
}
@@ -1089,18 +1211,18 @@ static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
}
/**
- * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
* scrub from a context
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_binding_state *cbs =
vmw_context_binding_state(bi->ctx);
- __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+ __set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
return 0;
}
@@ -1162,6 +1284,49 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
return 0;
}
+static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
+ return 0;
+}
+
+static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
+ * @bi: Single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_so *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetStreamOutput body;
+ } *cmd;
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
/**
* vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
* memory accounting.
@@ -1248,8 +1413,8 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
* Each time a resource is put on the validation list as the result of a
* context binding referencing it, we need to determine whether that resource
* will be dirtied (written to by the GPU) as a result of the corresponding
- * GPU operation. Currently rendertarget-, depth-stencil-, and
- * stream-output-target bindings are capable of dirtying its resource.
+ * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
+ * and unordered access view bindings are capable of dirtying its resource.
*
* Return: Whether the binding type dirties the resource its binding points to.
*/
@@ -1259,11 +1424,13 @@ u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
[vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
- [vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
};
/* Review this function as new bindings are added. */
- BUILD_BUG_ON(vmw_ctx_binding_max != 11);
+ BUILD_BUG_ON(vmw_ctx_binding_max != 14);
return is_binding_dirtying[binding_type];
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index cd9805c045cb..dcb71fd0bb3b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -33,6 +33,8 @@
#define VMW_MAX_VIEW_BINDINGS 128
+#define VMW_MAX_UAV_BIND_TYPE 2
+
struct vmw_private;
struct vmw_ctx_binding_state;
@@ -48,9 +50,12 @@ enum vmw_ctx_binding_type {
vmw_ctx_binding_dx_rt,
vmw_ctx_binding_sr,
vmw_ctx_binding_ds,
- vmw_ctx_binding_so,
+ vmw_ctx_binding_so_target,
vmw_ctx_binding_vb,
vmw_ctx_binding_ib,
+ vmw_ctx_binding_uav,
+ vmw_ctx_binding_cs_uav,
+ vmw_ctx_binding_so,
vmw_ctx_binding_max
};
@@ -128,14 +133,14 @@ struct vmw_ctx_bindinfo_view {
};
/**
- * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ * struct vmw_ctx_bindinfo_so_target - StreamOutput binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @size: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
-struct vmw_ctx_bindinfo_so {
+struct vmw_ctx_bindinfo_so_target {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 size;
@@ -189,9 +194,31 @@ struct vmw_dx_shader_bindings {
unsigned long dirty;
};
+/**
+ * struct vmw_ctx_bindinfo_uav - UAV context binding state.
+ * @views: UAV view bindings.
+ * @splice_index: The device splice index set by user-space.
+ */
+struct vmw_ctx_bindinfo_uav {
+ struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS];
+ uint32 index;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - Stream output binding metadata.
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+ struct vmw_ctx_bindinfo bi;
+ uint32 slot;
+};
+
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot);
+extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
+ uint32 slot, uint32 splice_index);
extern void
vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
struct vmw_ctx_binding_state *from);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 065015d2a8f6..3b41cf63110a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1241,7 +1241,8 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
* actually call into the already enabled manager, when
* binding the MOB.
*/
- if (!(dev_priv->capabilities & SVGA_CAP_DX))
+ if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
+ !dev_priv->has_mob)
return -ENOMEM;
ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index a56c9d802382..61c246335e66 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -36,7 +36,7 @@ struct vmw_user_context {
struct vmw_resource res;
struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
- struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+ struct vmw_resource *cotables[SVGA_COTABLE_MAX];
spinlock_t cotable_lock;
struct vmw_buffer_object *dx_query_mob;
};
@@ -116,12 +116,15 @@ static const struct vmw_res_func vmw_dx_context_func = {
* Context management:
*/
-static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
+ struct vmw_user_context *uctx)
{
struct vmw_resource *res;
int i;
+ u32 cotable_max = has_sm5_context(dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ for (i = 0; i < cotable_max; ++i) {
spin_lock(&uctx->cotable_lock);
res = uctx->cotables[i];
uctx->cotables[i] = NULL;
@@ -155,7 +158,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
- vmw_context_cotables_unref(uctx);
+ vmw_context_cotables_unref(dev_priv, uctx);
return;
}
@@ -208,7 +211,9 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
spin_lock_init(&uctx->cotable_lock);
if (dx) {
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ u32 cotable_max = has_sm5_context(dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
+ for (i = 0; i < cotable_max; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
if (IS_ERR(uctx->cotables[i])) {
@@ -222,7 +227,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
return 0;
out_cotables:
- vmw_context_cotables_unref(uctx);
+ vmw_context_cotables_unref(dev_priv, uctx);
out_err:
if (res_free)
res_free(res);
@@ -545,10 +550,12 @@ void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
{
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
+ u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
int i;
vmw_binding_state_scrub(uctx->cbs);
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ for (i = 0; i < cotable_max; ++i) {
struct vmw_resource *res;
/* Avoid racing with ongoing cotable destruction. */
@@ -731,7 +738,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
};
int ret;
- if (!dev_priv->has_dx && dx) {
+ if (!has_sm4_context(dev_priv) && dx) {
VMW_DEBUG_USER("DX contexts not supported by device.\n");
return -EINVAL;
}
@@ -839,7 +846,10 @@ struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
SVGACOTableType cotable_type)
{
- if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+ u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
+
+ if (cotable_type >= cotable_max)
return ERR_PTR(-EINVAL);
return container_of(ctx, struct vmw_user_context, res)->
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 3ca5cf375b01..65e8e7a97724 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -80,9 +80,10 @@ static const struct vmw_cotable_info co_info[] = {
{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
- {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+ {1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
{1, sizeof(SVGACOTableDXQueryEntry), NULL},
- {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+ {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
+ {1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
};
/*
@@ -102,6 +103,7 @@ const SVGACOTableType vmw_cotable_scrub_order[] = {
SVGA_COTABLE_SAMPLER,
SVGA_COTABLE_STREAMOUTPUT,
SVGA_COTABLE_DXQUERY,
+ SVGA_COTABLE_UAVIEW,
};
static int vmw_cotable_bind(struct vmw_resource *res,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 827458f49112..c2247a893ed4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/mem_encrypt.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
@@ -289,6 +290,8 @@ static void vmw_print_capabilities2(uint32_t capabilities2)
DRM_INFO(" Grow oTable.\n");
if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
DRM_INFO(" IntraSurface copy.\n");
+ if (capabilities2 & SVGA_CAP2_DX3)
+ DRM_INFO(" DX3.\n");
}
static void vmw_print_capabilities(uint32_t capabilities)
@@ -448,7 +451,7 @@ static int vmw_request_device(struct vmw_private *dev_priv)
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
if (IS_ERR(dev_priv->cman)) {
dev_priv->cman = NULL;
- dev_priv->has_dx = false;
+ dev_priv->sm_type = VMW_SM_LEGACY;
}
ret = vmw_request_device_late(dev_priv);
@@ -575,6 +578,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+ /* TTM currently doesn't fully support SEV encryption. */
+ if (mem_encrypt_active())
+ return -EINVAL;
+
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
@@ -682,8 +689,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
- DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+ DRM_INFO("Restricting capabilities since DMA not available.\n");
refuse_dma = true;
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ DRM_INFO("Disabling 3D acceleration.\n");
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
@@ -711,9 +720,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->max_mob_pages = 0;
dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
- uint64_t mem_size =
- vmw_read(dev_priv,
- SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ uint64_t mem_size;
+
+ if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
+ mem_size = vmw_read(dev_priv,
+ SVGA_REG_GBOBJECT_MEM_SIZE_KB);
+ else
+ mem_size =
+ vmw_read(dev_priv,
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
/*
* Workaround for low memory 2D VMs to compensate for the
@@ -866,7 +881,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->has_gmr = false;
}
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
@@ -876,14 +891,32 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
}
- if (dev_priv->has_mob) {
+ if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
- dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+ dev_priv->sm_type = VMW_SM_4;
spin_unlock(&dev_priv->cap_lock);
}
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
+
+ /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
+ if (has_sm4_context(dev_priv) &&
+ (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
+
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+ dev_priv->sm_type = VMW_SM_4_1;
+
+ if (has_sm4_1_context(dev_priv) &&
+ (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+ dev_priv->sm_type = VMW_SM_5;
+ }
+ }
+
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
@@ -893,23 +926,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
- if (dev_priv->has_dx) {
- /*
- * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
- * support
- */
- if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
- vmw_write(dev_priv, SVGA_REG_DEV_CAP,
- SVGA3D_DEVCAP_SM41);
- dev_priv->has_sm4_1 = vmw_read(dev_priv,
- SVGA_REG_DEV_CAP);
- }
- }
-
- DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
? "yes." : "no.");
- DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
+ if (dev_priv->sm_type == VMW_SM_5)
+ DRM_INFO("SM5 support available.\n");
+ if (dev_priv->sm_type == VMW_SM_4_1)
+ DRM_INFO("SM4_1 support available.\n");
+ if (dev_priv->sm_type == VMW_SM_4)
+ DRM_INFO("SM4 support available.\n");
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
VMWGFX_REPO, VMWGFX_GIT_VERSION);
@@ -1223,6 +1247,18 @@ static void vmw_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static unsigned long
+vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+
+ return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
+ &dev_priv->vma_manager);
+}
+
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr)
{
@@ -1394,14 +1430,12 @@ static const struct file_operations vmwgfx_driver_fops = {
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
+ .get_unmapped_area = vmw_get_unmapped_area,
};
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
- .get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 86b69397d166..3596f3923ea3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -58,7 +58,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DATE "20200114"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 17
+#define VMWGFX_DRIVER_MINOR 18
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -202,6 +202,7 @@ enum vmw_res_type {
vmw_res_dx_context,
vmw_res_cotable,
vmw_res_view,
+ vmw_res_streamoutput,
vmw_res_max
};
@@ -210,7 +211,8 @@ enum vmw_res_type {
*/
enum vmw_cmdbuf_res_type {
vmw_cmdbuf_res_shader,
- vmw_cmdbuf_res_view
+ vmw_cmdbuf_res_view,
+ vmw_cmdbuf_res_streamoutput
};
struct vmw_cmdbuf_res_manager;
@@ -223,24 +225,58 @@ struct vmw_cursor_snooper {
struct vmw_framebuffer;
struct vmw_surface_offset;
-struct vmw_surface {
- struct vmw_resource res;
- SVGA3dSurfaceAllFlags flags;
- uint32_t format;
- uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+/**
+ * struct vmw_surface_metadata - Metadata describing a surface.
+ *
+ * @flags: Device flags.
+ * @format: Surface SVGA3D_x format.
+ * @mip_levels: Mip level for each face. For GB first index is used only.
+ * @multisample_count: Sample count.
+ * @multisample_pattern: Sample patterns.
+ * @quality_level: Quality level.
+ * @autogen_filter: Filter for automatically generated mipmaps.
+ * @array_size: Number of array elements for a 1D/2D texture. For cubemap
+ texture number of faces * array_size. This should be 0 for pre
+ SM4 device.
+ * @buffer_byte_stride: Buffer byte stride.
+ * @num_sizes: Size of @sizes. For GB surface this should always be 1.
+ * @base_size: Surface dimension.
+ * @sizes: Array representing mip sizes. Legacy only.
+ * @scanout: Whether this surface will be used for scanout.
+ *
+ * This tracks metadata for both legacy and guest backed surface.
+ */
+struct vmw_surface_metadata {
+ u64 flags;
+ u32 format;
+ u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ u32 multisample_count;
+ u32 multisample_pattern;
+ u32 quality_level;
+ u32 autogen_filter;
+ u32 array_size;
+ u32 num_sizes;
+ u32 buffer_byte_stride;
struct drm_vmw_size base_size;
struct drm_vmw_size *sizes;
- uint32_t num_sizes;
bool scanout;
- uint32_t array_size;
- /* TODO so far just a extra pointer */
+};
+
+/**
+ * struct vmw_surface: Resource structure for a surface.
+ *
+ * @res: The base resource for this surface.
+ * @metadata: Metadata for this surface resource.
+ * @snooper: Cursor data. Legacy surface only.
+ * @offsets: Legacy surface only.
+ * @view_list: List of views bound to this surface.
+ */
+struct vmw_surface {
+ struct vmw_resource res;
+ struct vmw_surface_metadata metadata;
struct vmw_cursor_snooper snooper;
struct vmw_surface_offset *offsets;
- SVGA3dTextureFilter autogen_filter;
- uint32_t multisample_count;
struct list_head view_list;
- SVGA3dMSPattern multisample_pattern;
- SVGA3dMSQualityLevel quality_level;
};
struct vmw_marker_queue {
@@ -441,6 +477,22 @@ enum {
VMW_IRQTHREAD_MAX
};
+/**
+ * enum vmw_sm_type - Graphics context capability supported by device.
+ * @VMW_SM_LEGACY: Pre DX context.
+ * @VMW_SM_4: Context support upto SM4.
+ * @VMW_SM_4_1: Context support upto SM4_1.
+ * @VMW_SM_5: Context support up to SM5.
+ * @VMW_SM_MAX: Should be the last.
+ */
+enum vmw_sm_type {
+ VMW_SM_LEGACY = 0,
+ VMW_SM_4,
+ VMW_SM_4_1,
+ VMW_SM_5,
+ VMW_SM_MAX
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
@@ -475,22 +527,9 @@ struct vmw_private {
bool has_mob;
spinlock_t hw_lock;
spinlock_t cap_lock;
- bool has_dx;
bool assume_16bpp;
- bool has_sm4_1;
-
- /*
- * VGA registers.
- */
-
- struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
- uint32_t vga_width;
- uint32_t vga_height;
- uint32_t vga_bpp;
- uint32_t vga_bpl;
- uint32_t vga_pitchlock;
- uint32_t num_displays;
+ enum vmw_sm_type sm_type;
/*
* Framebuffer info.
@@ -661,6 +700,39 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
+/**
+ * has_sm4_context - Does the device support SM4 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM4 context or not.
+ */
+static inline bool has_sm4_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_4);
+}
+
+/**
+ * has_sm4_1_context - Does the device support SM4_1 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM4_1 context or not.
+ */
+static inline bool has_sm4_1_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_4_1);
+}
+
+/**
+ * has_sm5_context - Does the device support SM5 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM5 context or not.
+ */
+static inline bool has_sm5_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_5);
+}
+
extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv);
@@ -778,7 +850,7 @@ extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
- bool interuptable,
+ bool interruptible,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
@@ -900,7 +972,6 @@ extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
-extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
@@ -929,6 +1000,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
size_t gran);
+
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -947,7 +1019,6 @@ extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
-extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
extern const struct vmw_sg_table *
@@ -1085,8 +1156,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv);
int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
-int vmw_kms_save_vga(struct vmw_private *vmw_priv);
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
@@ -1100,9 +1169,9 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
+int vmw_enable_vblank(struct drm_crtc *crtc);
+void vmw_disable_vblank(struct drm_crtc *crtc);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -1139,7 +1208,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv);
int vmw_overlay_close(struct vmw_private *dev_priv);
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vmw_overlay_stop_all(struct vmw_private *dev_priv);
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
@@ -1186,10 +1254,6 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
extern const struct vmw_user_resource_conv *user_context_converter;
-extern int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
@@ -1219,7 +1283,6 @@ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
extern const struct vmw_user_resource_conv *user_surface_converter;
-extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -1230,11 +1293,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
SVGA3dSurfaceAllFlags svga3d_flags,
@@ -1254,6 +1312,11 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv);
+int vmw_gb_surface_define(struct vmw_private *dev_priv,
+ uint32_t user_accounting_size,
+ const struct vmw_surface_metadata *req,
+ struct vmw_surface **srf_out);
+
/*
* Shader management - vmwgfx_shader.c
*/
@@ -1287,6 +1350,24 @@ vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type);
/*
+ * Streamoutput management
+ */
+struct vmw_resource *
+vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key);
+int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ SVGA3dStreamOutputId user_key,
+ struct list_head *list);
+void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size);
+int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
+ SVGA3dStreamOutputId user_key,
+ struct list_head *list);
+void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback);
+
+/*
* Command buffer managed resources - vmwgfx_cmdbuf_res.c
*/
@@ -1430,6 +1511,17 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size);
+#endif
+
+/* Transparent hugepage support - vmwgfx_thp.c */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern const struct ttm_mem_type_manager_func vmw_thp_func;
+#else
+#define vmw_thp_func ttm_bo_manager_func
+#endif
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 73489a45decb..367d5b87ee6a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -459,10 +459,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
int ret = 0;
struct vmw_resource *res;
u32 i;
+ u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
/* Add all cotables to the validation list. */
- if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ if (has_sm4_context(dev_priv) &&
+ vmw_res_type(ctx) == vmw_res_dx_context) {
+ for (i = 0; i < cotable_max; ++i) {
res = vmw_context_cotable(ctx, i);
if (IS_ERR(res))
continue;
@@ -489,7 +492,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
break;
}
- if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ if (has_sm4_context(dev_priv) &&
+ vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
@@ -2116,6 +2120,9 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
+ SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
+ SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
+
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_cb binding;
@@ -2139,7 +2146,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
- if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+ if (binding.shader_slot >= max_shader_num ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
@@ -2167,12 +2174,15 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
container_of(header, typeof(*cmd), header);
+ SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+ SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
+
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
- cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ cmd->body.type >= max_allowed) {
VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@@ -2196,6 +2206,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
+ SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+ SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
@@ -2206,7 +2218,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
- if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
+ if (cmd->body.type >= max_allowed ||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
@@ -2467,7 +2479,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
- struct vmw_ctx_bindinfo_so binding;
+ struct vmw_ctx_bindinfo_so_target binding;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
@@ -2497,7 +2509,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
- binding.bi.bt = vmw_ctx_binding_so,
+ binding.bi.bt = vmw_ctx_binding_so_target,
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
@@ -2804,6 +2816,352 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
&cmd->body.surface.sid, NULL);
}
+static int vmw_cmd_sm5(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
+}
+
+static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
+}
+
+static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearUAViewUint body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
+ cmd->body.uaViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearUAViewFloat body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
+ cmd->body.uaViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetUAViews body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dUAViewId);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ VMW_DEBUG_USER("Invalid UAV binding.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
+ vmw_ctx_binding_uav, 0, (void *)&cmd[1],
+ num_uav, 0);
+ if (ret)
+ return ret;
+
+ vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
+ cmd->body.uavSpliceIndex);
+
+ return ret;
+}
+
+static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCSUAViews body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dUAViewId);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ VMW_DEBUG_USER("Invalid UAV binding.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
+ vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
+ num_uav, 0);
+ if (ret)
+ return ret;
+
+ vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
+ cmd->body.startIndex);
+
+ return ret;
+}
+
+static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineStreamOutputWithMob body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
+ ret = vmw_cotable_notify(res, cmd->body.soid);
+ if (ret)
+ return ret;
+
+ return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
+ cmd->body.soid,
+ &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When device does not support SM5 then streamoutput with mob command is
+ * not available to user-space. Simply return in this case.
+ */
+ if (!has_sm5_context(dev_priv))
+ return 0;
+
+ /*
+ * With SM5 capable device if lookup fails then user-space probably used
+ * old streamoutput define command. Return without an error.
+ */
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res))
+ return 0;
+
+ return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
+ &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Cound not find streamoutput to bind.\n");
+ return PTR_ERR(res);
+ }
+
+ vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
+
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ return ret;
+ }
+
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+}
+
+static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct vmw_ctx_bindinfo_so binding;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->body.soid == SVGA3D_INVALID_ID)
+ return 0;
+
+ /*
+ * When device does not support SM5 then streamoutput with mob command is
+ * not available to user-space. Simply return in this case.
+ */
+ if (!has_sm5_context(dev_priv))
+ return 0;
+
+ /*
+ * With SM5 capable device if lookup fails then user-space probably used
+ * old streamoutput define command. Return without an error.
+ */
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res)) {
+ return 0;
+ }
+
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ return ret;
+ }
+
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
+ binding.bi.bt = vmw_ctx_binding_so;
+ binding.slot = 0; /* Only one SO set to context at a time. */
+
+ vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
+ binding.slot);
+
+ return ret;
+}
+
+static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_indexed_instanced_indirect_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDrawIndexedInstancedIndirect body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.argsBufferSid, NULL);
+}
+
+static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_instanced_indirect_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDrawInstancedIndirect body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.argsBufferSid, NULL);
+}
+
+static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dispatch_indirect_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDispatchIndirect body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.argsBufferSid, NULL);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -2922,18 +3280,12 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
- false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
@@ -3141,9 +3493,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
&vmw_cmd_dx_so_define, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
- &vmw_cmd_dx_cid_check, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
- true, false, true),
+ &vmw_cmd_dx_destroy_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
+ &vmw_cmd_dx_set_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
&vmw_cmd_dx_set_so_targets, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
@@ -3159,6 +3511,37 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
true, false, true),
+
+ /*
+ * SM5 commands
+ */
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
+ &vmw_cmd_clear_uav_float, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
+ false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
+ true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
+ &vmw_cmd_indexed_instanced_indirect, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
+ &vmw_cmd_instanced_indirect, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
+ &vmw_cmd_dispatch_indirect, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
+ false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
+ &vmw_cmd_sm5_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
+ &vmw_cmd_dx_define_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
+ &vmw_cmd_dx_bind_streamoutput, true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 178a6cd1a06f..0f8d29397157 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -515,7 +515,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
- return 1;
+ return true;
vmw_fences_update(fman);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e5252ef3812f..6941689085ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -169,10 +169,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->mmio_virt;
- preempt_disable();
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
- preempt_enable();
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a15375eb476e..f681b7b4df1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,10 +114,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
(dev_priv->active_display_unit == vmw_du_screen_target);
break;
case DRM_VMW_PARAM_DX:
- param->value = dev_priv->has_dx;
+ param->value = has_sm4_context(dev_priv);
break;
case DRM_VMW_PARAM_SM4_1:
- param->value = dev_priv->has_sm4_1;
+ param->value = has_sm4_1_context(dev_priv);
+ break;
+ case DRM_VMW_PARAM_SM5:
+ param->value = has_sm5_context(dev_priv);
break;
default:
return -EINVAL;
@@ -126,14 +129,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
+static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value)
{
/*
* A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
* to check the sample count supported by virtual device. Since there
* never was support for multisample count for backing MOB return 0.
+ *
+ * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual
+ * device.
*/
- if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+ if (cap == SVGA3D_DEVCAP_DEAD5)
return 0;
return fmt_value;
@@ -164,7 +170,7 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
- compat_cap->pairs[i][1] = vmw_mask_multisample
+ compat_cap->pairs[i][1] = vmw_mask_legacy_multisample
(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
}
spin_unlock(&dev_priv->cap_lock);
@@ -220,7 +226,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
- *bounce32++ = vmw_mask_multisample
+ *bounce32++ = vmw_mask_legacy_multisample
(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
}
spin_unlock(&dev_priv->cap_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f47d5710cc95..04d66592f605 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -905,14 +905,14 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
*/
/* Surface must be marked as a scanout. */
- if (unlikely(!surface->scanout))
+ if (unlikely(!surface->metadata.scanout))
return -EINVAL;
- if (unlikely(surface->mip_levels[0] != 1 ||
- surface->num_sizes != 1 ||
- surface->base_size.width < mode_cmd->width ||
- surface->base_size.height < mode_cmd->height ||
- surface->base_size.depth != 1)) {
+ if (unlikely(surface->metadata.mip_levels[0] != 1 ||
+ surface->metadata.num_sizes != 1 ||
+ surface->metadata.base_size.width < mode_cmd->width ||
+ surface->metadata.base_size.height < mode_cmd->height ||
+ surface->metadata.base_size.depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
@@ -941,7 +941,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* For DX, surface format validation is done when surface->scanout
* is set.
*/
- if (!dev_priv->has_dx && format != surface->format) {
+ if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
@@ -1144,8 +1144,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
struct vmw_buffer_object *bo_mob,
struct vmw_surface **srf_out)
{
+ struct vmw_surface_metadata metadata = {0};
uint32_t format;
- struct drm_vmw_size content_base_size = {0};
struct vmw_resource *res;
unsigned int bytes_pp;
struct drm_format_name_buf format_name;
@@ -1175,22 +1175,15 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
return -EINVAL;
}
- content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
- content_base_size.height = mode_cmd->height;
- content_base_size.depth = 1;
-
- ret = vmw_surface_gb_priv_define(dev,
- 0, /* kernel visible only */
- 0, /* flags */
- format,
- true, /* can be a scanout buffer */
- 1, /* num of mip levels */
- 0,
- 0,
- content_base_size,
- SVGA3D_MS_PATTERN_NONE,
- SVGA3D_MS_QUALITY_NONE,
- srf_out);
+ metadata.format = format;
+ metadata.mip_levels[0] = 1;
+ metadata.num_sizes = 1;
+ metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
+ metadata.base_size.height = mode_cmd->height;
+ metadata.base_size.depth = 1;
+ metadata.scanout = true;
+
+ ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1897,87 +1890,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
-int vmw_kms_save_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
- vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_priv->vga_pitchlock =
- vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- vmw_priv->num_displays = vmw_read(vmw_priv,
- SVGA_REG_NUM_GUEST_DISPLAYS);
-
- if (vmw_priv->num_displays == 0)
- vmw_priv->num_displays = 1;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
- save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
- save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
- save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
- save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- if (i == 0 && vmw_priv->num_displays == 1 &&
- save->width == 0 && save->height == 0) {
-
- /*
- * It should be fairly safe to assume that these
- * values are uninitialized.
- */
-
- save->width = vmw_priv->vga_width - save->pos_x;
- save->height = vmw_priv->vga_height - save->pos_y;
- }
- }
-
- return 0;
-}
-
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
- vmw_priv->vga_pitchlock);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_mmio_write(vmw_priv->vga_pitchlock,
- vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
-
- return 0;
-}
-
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
@@ -1991,7 +1903,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
/**
* Function called by DRM code called with vbl_lock held.
*/
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
{
return 0;
}
@@ -1999,7 +1911,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int vmw_enable_vblank(struct drm_crtc *crtc)
{
return -EINVAL;
}
@@ -2007,7 +1919,7 @@ int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void vmw_disable_vblank(struct drm_crtc *crtc)
{
}
@@ -2597,7 +2509,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
int increment)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+ struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBImage body;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5702219ec38f..16dafff5cab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -236,6 +236,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 0a6bbac00896..e8eb42933ca2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,7 +320,7 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
struct vmw_otable **otables = &dev_priv->otable_batch.otables;
int ret;
- if (dev_priv->has_dx) {
+ if (has_sm4_context(dev_priv)) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
if (!(*otables))
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index fdb52f6d29fb..cd7ed1650d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -354,37 +354,6 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
}
/**
- * Stop all streams.
- *
- * Used by the fb code when starting.
- *
- * Takes the overlay lock.
- */
-int vmw_overlay_stop_all(struct vmw_private *dev_priv)
-{
- struct vmw_overlay *overlay = dev_priv->overlay_priv;
- int i, ret;
-
- if (!overlay)
- return 0;
-
- mutex_lock(&overlay->mutex);
-
- for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
- struct vmw_stream *stream = &overlay->stream[i];
- if (!stream->buf)
- continue;
-
- ret = vmw_overlay_stop(dev_priv, i, false, false);
- WARN_ON(ret != 0);
- }
-
- mutex_unlock(&overlay->mutex);
-
- return 0;
-}
-
-/**
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index f07aa857587c..d4d66532f9c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -69,7 +69,7 @@ struct vmw_bo_dirty {
unsigned int ref_count;
unsigned long bitmap_size;
size_t size;
- unsigned long bitmap[0];
+ unsigned long bitmap[];
};
/**
@@ -473,11 +473,11 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
* a lot of unnecessary write faults.
*/
if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
- prot = vma->vm_page_prot;
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
else
prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -486,3 +486,75 @@ out_unlock:
return ret;
}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ pgprot_t prot;
+ vm_fault_t ret;
+ pgoff_t fault_page_size;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+ bool is_cow_mapping =
+ (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+
+ switch (pe_size) {
+ case PE_SIZE_PMD:
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
+ break;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ case PE_SIZE_PUD:
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ return VM_FAULT_FALLBACK;
+ }
+
+ /* Always do write dirty-tracking and COW on PTE level. */
+ if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
+ return VM_FAULT_FALLBACK;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (vbo->dirty) {
+ pgoff_t allowed_prefault;
+ unsigned long page_offset;
+
+ page_offset = vmf->pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
+ if (page_offset >= bo->num_pages ||
+ vmw_resources_clean(vbo, page_offset,
+ page_offset + PAGE_SIZE,
+ &allowed_prefault)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ /*
+ * Write protect, so we get a new fault on write, and can
+ * split.
+ */
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
+ } else {
+ prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index e5a283263211..32a22e4eddb1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -319,6 +319,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 63807361e16f..3f97b61dd5d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -319,7 +319,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
static const size_t vmw_view_define_sizes[] = {
[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
- [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+ [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView),
+ [vmw_view_ua] = sizeof(SVGA3dCmdDXDefineUAView)
};
struct vmw_private *dev_priv = ctx->dev_priv;
@@ -499,8 +500,8 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
* Each time a resource is put on the validation list as the result of a
* view pointing to it, we need to determine whether that resource will
* be dirtied (written to by the GPU) as a result of the corresponding
- * GPU operation. Currently only rendertarget- and depth-stencil views are
- * capable of dirtying its resource.
+ * GPU operation. Currently only rendertarget-, depth-stencil and unordered
+ * access views are capable of dirtying its resource.
*
* Return: Whether the view type of @res dirties the resource it points to.
*/
@@ -509,10 +510,11 @@ u32 vmw_view_dirtying(struct vmw_resource *res)
static u32 view_is_dirtying[vmw_view_max] = {
[vmw_view_rt] = VMW_RES_DIRTY_SET,
[vmw_view_ds] = VMW_RES_DIRTY_SET,
+ [vmw_view_ua] = VMW_RES_DIRTY_SET,
};
/* Update this function as we add more view types */
- BUILD_BUG_ON(vmw_view_max != 3);
+ BUILD_BUG_ON(vmw_view_max != 4);
return view_is_dirtying[vmw_view(res)->view_type];
}
@@ -520,12 +522,14 @@ const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+ [vmw_view_ua] = SVGA_3D_CMD_DX_DESTROY_UA_VIEW,
};
const SVGACOTableType vmw_view_cotables[] = {
[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+ [vmw_view_ua] = SVGA_COTABLE_UAVIEW,
};
const SVGACOTableType vmw_so_cotables[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index 12565047bc55..f48b84bfeeac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -30,6 +30,7 @@ enum vmw_view_type {
vmw_view_sr,
vmw_view_rt,
vmw_view_ds,
+ vmw_view_ua,
vmw_view_max,
};
@@ -61,6 +62,7 @@ union vmw_view_destroy {
struct SVGA3dCmdDXDestroyRenderTargetView rtv;
struct SVGA3dCmdDXDestroyShaderResourceView srv;
struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+ struct SVGA3dCmdDXDestroyUAView uav;
u32 view_id;
};
@@ -87,6 +89,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
{
u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+ if (id == SVGA_3D_CMD_DX_DEFINE_UA_VIEW ||
+ id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW)
+ return vmw_view_ua;
+
if (tmp > (u32)vmw_view_max)
return vmw_view_max;
@@ -123,6 +129,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
return vmw_so_ss;
case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+ case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB:
case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
return vmw_so_so;
default:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41a96fb49835..9ffa9c75a5da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -590,7 +590,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
return;
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
- dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base;
dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -916,6 +916,9 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
@@ -1038,7 +1041,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
enum stdu_content_type new_content_type;
struct vmw_framebuffer_surface *new_vfbs;
- struct drm_crtc *crtc = new_state->crtc;
uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
int ret;
@@ -1055,8 +1057,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vfb = vmw_framebuffer_to_vfb(new_fb);
new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
- if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
- new_vfbs->surface->base_size.height == vdisplay)
+ if (new_vfbs &&
+ new_vfbs->surface->metadata.base_size.width == hdisplay &&
+ new_vfbs->surface->metadata.base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->bo)
new_content_type = SEPARATE_BO;
@@ -1064,12 +1067,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
new_content_type = SEPARATE_SURFACE;
if (new_content_type != SAME_AS_DISPLAY) {
- struct vmw_surface content_srf;
- struct drm_vmw_size display_base_size = {0};
+ struct vmw_surface_metadata metadata = {0};
- display_base_size.width = hdisplay;
- display_base_size.height = vdisplay;
- display_base_size.depth = 1;
+ metadata.base_size.width = hdisplay;
+ metadata.base_size.height = vdisplay;
+ metadata.base_size.depth = 1;
/*
* If content buffer is a buffer object, then we have to
@@ -1079,15 +1081,15 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
switch (new_fb->format->cpp[0]*8) {
case 32:
- content_srf.format = SVGA3D_X8R8G8B8;
+ metadata.format = SVGA3D_X8R8G8B8;
break;
case 16:
- content_srf.format = SVGA3D_R5G6B5;
+ metadata.format = SVGA3D_R5G6B5;
break;
case 8:
- content_srf.format = SVGA3D_P8;
+ metadata.format = SVGA3D_P8;
break;
default:
@@ -1095,22 +1097,20 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
return -EINVAL;
}
- content_srf.flags = 0;
- content_srf.mip_levels[0] = 1;
- content_srf.multisample_count = 0;
- content_srf.multisample_pattern =
- SVGA3D_MS_PATTERN_NONE;
- content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
+ metadata.mip_levels[0] = 1;
+ metadata.num_sizes = 1;
+ metadata.scanout = true;
} else {
- content_srf = *new_vfbs->surface;
+ metadata = new_vfbs->surface->metadata;
}
if (vps->surf) {
- struct drm_vmw_size cur_base_size = vps->surf->base_size;
+ struct drm_vmw_size cur_base_size =
+ vps->surf->metadata.base_size;
- if (cur_base_size.width != display_base_size.width ||
- cur_base_size.height != display_base_size.height ||
- vps->surf->format != content_srf.format) {
+ if (cur_base_size.width != metadata.base_size.width ||
+ cur_base_size.height != metadata.base_size.height ||
+ vps->surf->metadata.format != metadata.format) {
WARN_ON(vps->pinned != 0);
vmw_surface_unreference(&vps->surf);
}
@@ -1118,20 +1118,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
if (!vps->surf) {
- ret = vmw_surface_gb_priv_define
- (crtc->dev,
- /* Kernel visible only */
- 0,
- content_srf.flags,
- content_srf.format,
- true, /* a scanout buffer */
- content_srf.mip_levels[0],
- content_srf.multisample_count,
- 0,
- display_base_size,
- content_srf.multisample_pattern,
- content_srf.quality_level,
- &vps->surf);
+ ret = vmw_gb_surface_define(dev_priv, 0, &metadata,
+ &vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
return ret;
@@ -1308,7 +1296,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
diff.cpp = stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base;
- dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
src_bo = &vfbbo->buffer->base;
@@ -1885,7 +1873,7 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
/* Do nothing if Screen Target support is turned off */
- if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+ if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
new file mode 100644
index 000000000000..193192456663
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
+
+/**
+ * struct vmw_dx_streamoutput - Streamoutput resource metadata.
+ * @res: Base resource struct.
+ * @ctx: Non-refcounted context to which @res belong.
+ * @cotable: Refcounted cotable holding this Streamoutput.
+ * @cotable_head: List head for cotable-so_res list.
+ * @id: User-space provided identifier.
+ * @size: User-space provided mob size.
+ * @committed: Whether streamoutput is actually created or pending creation.
+ */
+struct vmw_dx_streamoutput {
+ struct vmw_resource res;
+ struct vmw_resource *ctx;
+ struct vmw_resource *cotable;
+ struct list_head cotable_head;
+ u32 id;
+ u32 size;
+ bool committed;
+};
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res);
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+ struct ttm_validate_buffer *val_buf);
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
+
+static size_t vmw_streamoutput_size;
+
+static const struct vmw_res_func vmw_dx_streamoutput_func = {
+ .res_type = vmw_res_streamoutput,
+ .needs_backup = true,
+ .may_evict = false,
+ .type_name = "DX streamoutput",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_streamoutput_create,
+ .destroy = NULL, /* Command buffer managed resource. */
+ .bind = vmw_dx_streamoutput_bind,
+ .unbind = vmw_dx_streamoutput_unbind,
+ .commit_notify = vmw_dx_streamoutput_commit_notify,
+};
+
+static inline struct vmw_dx_streamoutput *
+vmw_res_to_dx_streamoutput(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_dx_streamoutput, res);
+}
+
+/**
+ * vmw_dx_streamoutput_unscrub - Reattach the MOB to streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
+{
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd;
+
+ if (!list_empty(&so->cotable_head) || !so->committed )
+ return 0;
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = so->id;
+ cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.offsetInBytes = res->backup_offset;
+ cmd->body.sizeInBytes = so->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+
+ return 0;
+}
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ int ret = 0;
+
+ WARN_ON_ONCE(!so->committed);
+
+ if (vmw_resource_mob_attached(res)) {
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+
+ res->id = so->id;
+
+ return ret;
+}
+
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ int ret;
+
+ if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+ return -EINVAL;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_scrub - Unbind the MOB from streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd;
+
+ if (list_empty(&so->cotable_head))
+ return 0;
+
+ WARN_ON_ONCE(!so->committed);
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ cmd->body.sizeInBytes = so->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ res->id = -1;
+ list_del_init(&so->cotable_head);
+
+ return 0;
+}
+
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+ return -EINVAL;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_scrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ if (ret)
+ return ret;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_bo_fence_single(val_buf->bo, fence);
+
+ if (fence != NULL)
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ if (state == VMW_CMDBUF_RES_ADD) {
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+ so->committed = true;
+ res->id = so->id;
+ mutex_unlock(&dev_priv->binding_mutex);
+ } else {
+ mutex_lock(&dev_priv->binding_mutex);
+ list_del_init(&so->cotable_head);
+ so->committed = false;
+ res->id = -1;
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+}
+
+/**
+ * vmw_dx_streamoutput_lookup - Do a streamoutput resource lookup by user key.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: User-space identifier for lookup.
+ *
+ * Return: Valid refcounted vmw_resource on success, error pointer on failure.
+ */
+struct vmw_resource *
+vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key)
+{
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput,
+ user_key);
+}
+
+static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ vmw_resource_unreference(&so->cotable);
+ kfree(so);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
+}
+
+static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
+{
+ /* Destroyed by user-space cmd buf or as part of context takedown. */
+ res->id = -1;
+}
+
+/**
+ * vmw_dx_streamoutput_add - Add a streamoutput as a cmd buf managed resource.
+ * @man: Command buffer managed resource manager for current context.
+ * @ctx: Pointer to context resource.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx, u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_dx_streamoutput *so;
+ struct vmw_resource *res;
+ struct vmw_private *dev_priv = ctx->dev_priv;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (!vmw_streamoutput_size)
+ vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_streamoutput_size, &ttm_opt_ctx);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for streamout.\n");
+ return ret;
+ }
+
+ so = kmalloc(sizeof(*so), GFP_KERNEL);
+ if (!so) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_streamoutput_size);
+ return -ENOMEM;
+ }
+
+ res = &so->res;
+ so->ctx = ctx;
+ so->cotable = vmw_resource_reference
+ (vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT));
+ so->id = user_key;
+ so->committed = false;
+ INIT_LIST_HEAD(&so->cotable_head);
+ ret = vmw_resource_init(dev_priv, res, true,
+ vmw_dx_streamoutput_res_free,
+ &vmw_dx_streamoutput_func);
+ if (ret)
+ goto out_resource_init;
+
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key,
+ res, list);
+ if (ret)
+ goto out_resource_init;
+
+ res->id = so->id;
+ res->hw_destroy = vmw_dx_streamoutput_hw_destroy;
+
+out_resource_init:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_set_size - Sets streamoutput mob size in res struct.
+ * @res: The streamoutput res for which need to set size.
+ * @size: The size provided by user-space to set.
+ */
+void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size)
+{
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ so->size = size;
+}
+
+/**
+ * vmw_dx_streamoutput_remove - Stage streamoutput for removal.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_resource *r;
+
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput,
+ (u32)user_key, list, &r);
+}
+
+/**
+ * vmw_dx_streamoutput_cotable_list_scrub - cotable unbind_func callback.
+ * @dev_priv: Device private.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ */
+void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback)
+{
+ struct vmw_dx_streamoutput *entry, *next;
+
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
+
+ list_for_each_entry_safe(entry, next, list, cotable_head) {
+ WARN_ON(vmw_dx_streamoutput_scrub(&entry->res));
+ if (!readback)
+ entry->committed =false;
+ }
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3ce630aa4fde..126f93c0b0b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -79,7 +79,7 @@ struct vmw_surface_dirty {
struct svga3dsurface_cache cache;
size_t size;
u32 num_subres;
- SVGA3dBox boxes[0];
+ SVGA3dBox boxes[];
};
static void vmw_user_surface_free(struct vmw_resource *res);
@@ -199,7 +199,7 @@ struct vmw_surface_destroy {
*/
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
{
- return srf->num_sizes * sizeof(struct vmw_surface_dma);
+ return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
}
@@ -213,7 +213,7 @@ static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
*/
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
{
- return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
sizeof(SVGA3dSize);
}
@@ -262,7 +262,8 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
uint32_t cmd_len;
int i;
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+ cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
+ sizeof(SVGA3dSize);
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
@@ -272,16 +273,16 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
* since driver internally stores as 64 bit.
* For legacy surface define only 32 bit flag is supported.
*/
- cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
- cmd->body.format = srf->format;
+ cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
+ cmd->body.format = srf->metadata.format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+ cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
cmd += 1;
cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
+ src_size = srf->metadata.sizes;
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
cmd_size->width = src_size->width;
cmd_size->height = src_size->height;
cmd_size->depth = src_size->depth;
@@ -305,15 +306,15 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
uint32_t i;
struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
const struct svga3d_surface_desc *desc =
- svga3dsurface_get_desc(srf->format);
+ svga3dsurface_get_desc(srf->metadata.format);
- for (i = 0; i < srf->num_sizes; ++i) {
+ for (i = 0; i < srf->metadata.num_sizes; ++i) {
SVGA3dCmdHeader *header = &cmd->header;
SVGA3dCmdSurfaceDMA *body = &cmd->body;
SVGA3dCopyBox *cb = &cmd->cb;
SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
- const struct drm_vmw_size *cur_size = &srf->sizes[i];
+ const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
header->id = SVGA_3D_CMD_SURFACE_DMA;
header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
@@ -669,7 +670,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
- kfree(srf->sizes);
+ kfree(srf->metadata.sizes);
kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
@@ -728,6 +729,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
+ struct vmw_surface_metadata *metadata;
struct vmw_resource *res;
struct vmw_resource *tmp;
union drm_vmw_surface_create_arg *arg =
@@ -793,43 +795,45 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf = &user_srf->srf;
+ metadata = &srf->metadata;
res = &srf->res;
/* Driver internally stores as 64-bit flags */
- srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
- srf->format = req->format;
- srf->scanout = req->scanout;
+ metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
+ metadata->format = req->format;
+ metadata->scanout = req->scanout;
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = num_sizes;
+ memcpy(metadata->mip_levels, req->mip_levels,
+ sizeof(metadata->mip_levels));
+ metadata->num_sizes = num_sizes;
user_srf->size = size;
- srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
- req->size_addr,
- sizeof(*srf->sizes) * srf->num_sizes);
- if (IS_ERR(srf->sizes)) {
- ret = PTR_ERR(srf->sizes);
+ metadata->sizes =
+ memdup_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+ sizeof(*metadata->sizes) * metadata->num_sizes);
+ if (IS_ERR(metadata->sizes)) {
+ ret = PTR_ERR(metadata->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc_array(srf->num_sizes,
- sizeof(*srf->offsets),
+ srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
GFP_KERNEL);
if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
- srf->base_size = *srf->sizes;
- srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->multisample_count = 0;
- srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
- srf->quality_level = SVGA3D_MS_QUALITY_NONE;
+ metadata->base_size = *srf->metadata.sizes;
+ metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ metadata->multisample_count = 0;
+ metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
cur_bo_offset = 0;
cur_offset = srf->offsets;
- cur_size = srf->sizes;
+ cur_size = metadata->sizes;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- for (j = 0; j < srf->mip_levels[i]; ++j) {
+ for (j = 0; j < metadata->mip_levels[i]; ++j) {
uint32_t stride = svga3dsurface_calculate_pitch
(desc, cur_size);
@@ -843,11 +847,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->backup_size = cur_bo_offset;
- if (srf->scanout &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
+ if (metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == 64 &&
+ metadata->sizes[0].height == 64 &&
+ metadata->format == SVGA3D_A8R8G8B8) {
srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
if (!srf->snooper.image) {
@@ -911,7 +915,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
out_no_copy:
kfree(srf->offsets);
out_no_offsets:
- kfree(srf->sizes);
+ kfree(metadata->sizes);
out_no_sizes:
ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
@@ -1031,18 +1035,19 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
srf = &user_srf->srf;
/* Downcast of flags when sending back to user space */
- rep->flags = (uint32_t)srf->flags;
- rep->format = srf->format;
- memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ rep->flags = (uint32_t)srf->metadata.flags;
+ rep->format = srf->metadata.format;
+ memcpy(rep->mip_levels, srf->metadata.mip_levels,
+ sizeof(srf->metadata.mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, &srf->base_size,
- sizeof(srf->base_size));
+ ret = copy_to_user(user_sizes, &srf->metadata.base_size,
+ sizeof(srf->metadata.base_size));
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
- srf->num_sizes);
+ srf->metadata.num_sizes);
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
@@ -1062,6 +1067,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_surface_metadata *metadata = &srf->metadata;
uint32_t cmd_len, cmd_id, submit_len;
int ret;
struct {
@@ -1076,6 +1082,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v3 body;
} *cmd3;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface_v4 body;
+ } *cmd4;
if (likely(res->id != -1))
return 0;
@@ -1092,12 +1102,16 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
+ cmd_len = sizeof(cmd4->body);
+ submit_len = sizeof(*cmd4);
+ } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
cmd_len = sizeof(cmd3->body);
submit_len = sizeof(*cmd3);
- } else if (srf->array_size > 0) {
- /* has_dx checked on creation time. */
+ } else if (metadata->array_size > 0) {
+ /* VMW_SM_4 support verified at creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body);
submit_len = sizeof(*cmd2);
@@ -1110,51 +1124,68 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
+ cmd4 = (typeof(cmd4))cmd;
if (unlikely(!cmd)) {
ret = -ENOMEM;
goto out_no_fifo;
}
- if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
+ cmd4->header.id = cmd_id;
+ cmd4->header.size = cmd_len;
+ cmd4->body.sid = srf->res.id;
+ cmd4->body.surfaceFlags = metadata->flags;
+ cmd4->body.format = metadata->format;
+ cmd4->body.numMipLevels = metadata->mip_levels[0];
+ cmd4->body.multisampleCount = metadata->multisample_count;
+ cmd4->body.multisamplePattern = metadata->multisample_pattern;
+ cmd4->body.qualityLevel = metadata->quality_level;
+ cmd4->body.autogenFilter = metadata->autogen_filter;
+ cmd4->body.size.width = metadata->base_size.width;
+ cmd4->body.size.height = metadata->base_size.height;
+ cmd4->body.size.depth = metadata->base_size.depth;
+ cmd4->body.arraySize = metadata->array_size;
+ cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
+ } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
cmd3->header.id = cmd_id;
cmd3->header.size = cmd_len;
cmd3->body.sid = srf->res.id;
- cmd3->body.surfaceFlags = srf->flags;
- cmd3->body.format = srf->format;
- cmd3->body.numMipLevels = srf->mip_levels[0];
- cmd3->body.multisampleCount = srf->multisample_count;
- cmd3->body.multisamplePattern = srf->multisample_pattern;
- cmd3->body.qualityLevel = srf->quality_level;
- cmd3->body.autogenFilter = srf->autogen_filter;
- cmd3->body.size.width = srf->base_size.width;
- cmd3->body.size.height = srf->base_size.height;
- cmd3->body.size.depth = srf->base_size.depth;
- cmd3->body.arraySize = srf->array_size;
- } else if (srf->array_size > 0) {
+ cmd3->body.surfaceFlags = metadata->flags;
+ cmd3->body.format = metadata->format;
+ cmd3->body.numMipLevels = metadata->mip_levels[0];
+ cmd3->body.multisampleCount = metadata->multisample_count;
+ cmd3->body.multisamplePattern = metadata->multisample_pattern;
+ cmd3->body.qualityLevel = metadata->quality_level;
+ cmd3->body.autogenFilter = metadata->autogen_filter;
+ cmd3->body.size.width = metadata->base_size.width;
+ cmd3->body.size.height = metadata->base_size.height;
+ cmd3->body.size.depth = metadata->base_size.depth;
+ cmd3->body.arraySize = metadata->array_size;
+ } else if (metadata->array_size > 0) {
cmd2->header.id = cmd_id;
cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id;
- cmd2->body.surfaceFlags = srf->flags;
- cmd2->body.format = srf->format;
- cmd2->body.numMipLevels = srf->mip_levels[0];
- cmd2->body.multisampleCount = srf->multisample_count;
- cmd2->body.autogenFilter = srf->autogen_filter;
- cmd2->body.size.width = srf->base_size.width;
- cmd2->body.size.height = srf->base_size.height;
- cmd2->body.size.depth = srf->base_size.depth;
- cmd2->body.arraySize = srf->array_size;
+ cmd2->body.surfaceFlags = metadata->flags;
+ cmd2->body.format = metadata->format;
+ cmd2->body.numMipLevels = metadata->mip_levels[0];
+ cmd2->body.multisampleCount = metadata->multisample_count;
+ cmd2->body.autogenFilter = metadata->autogen_filter;
+ cmd2->body.size.width = metadata->base_size.width;
+ cmd2->body.size.height = metadata->base_size.height;
+ cmd2->body.size.depth = metadata->base_size.depth;
+ cmd2->body.arraySize = metadata->array_size;
} else {
cmd->header.id = cmd_id;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = srf->format;
- cmd->body.numMipLevels = srf->mip_levels[0];
- cmd->body.multisampleCount = srf->multisample_count;
- cmd->body.autogenFilter = srf->autogen_filter;
- cmd->body.size.width = srf->base_size.width;
- cmd->body.size.height = srf->base_size.height;
- cmd->body.size.depth = srf->base_size.depth;
+ cmd->body.surfaceFlags = metadata->flags;
+ cmd->body.format = metadata->format;
+ cmd->body.numMipLevels = metadata->mip_levels[0];
+ cmd->body.multisampleCount = metadata->multisample_count;
+ cmd->body.autogenFilter = metadata->autogen_filter;
+ cmd->body.size.width = metadata->base_size.width;
+ cmd->body.size.height = metadata->base_size.height;
+ cmd->body.size.depth = metadata->base_size.depth;
}
vmw_fifo_commit(dev_priv, submit_len);
@@ -1314,7 +1345,6 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
}
-
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
@@ -1336,6 +1366,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
req_ext.svga3d_flags_upper_32_bits = 0;
req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+ req_ext.buffer_byte_stride = 0;
req_ext.must_be_zero = 0;
return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
@@ -1371,171 +1402,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_surface_gb_priv_define - Define a private GB surface
- *
- * @dev: Pointer to a struct drm_device
- * @user_accounting_size: Used to track user-space memory usage, set
- * to 0 for kernel mode only memory
- * @svga3d_flags: SVGA3d surface flags for the device
- * @format: requested surface format
- * @for_scanout: true if inteded to be used for scanout buffer
- * @num_mip_levels: number of MIP levels
- * @multisample_count:
- * @array_size: Surface array size.
- * @size: width, heigh, depth of the surface requested
- * @multisample_pattern: Multisampling pattern when msaa is supported
- * @quality_level: Precision settings
- * @user_srf_out: allocated user_srf. Set to NULL on failure.
- *
- * GB surfaces allocated by this function will not have a user mode handle, and
- * thus will only be visible to vmwgfx. For optimization reasons the
- * surface may later be given a user mode handle by another function to make
- * it available to user mode drivers.
- */
-int vmw_surface_gb_priv_define(struct drm_device *dev,
- uint32_t user_accounting_size,
- SVGA3dSurfaceAllFlags svga3d_flags,
- SVGA3dSurfaceFormat format,
- bool for_scanout,
- uint32_t num_mip_levels,
- uint32_t multisample_count,
- uint32_t array_size,
- struct drm_vmw_size size,
- SVGA3dMSPattern multisample_pattern,
- SVGA3dMSQualityLevel quality_level,
- struct vmw_surface **srf_out)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
- struct vmw_surface *srf;
- int ret;
- u32 num_layers = 1;
- u32 sample_count = 1;
-
- *srf_out = NULL;
-
- if (for_scanout) {
- if (!svga3dsurface_is_screen_target_format(format)) {
- VMW_DEBUG_USER("Invalid Screen Target surface format.");
- return -EINVAL;
- }
-
- if (size.width > dev_priv->texture_max_width ||
- size.height > dev_priv->texture_max_height) {
- VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
- size.width, size.height,
- dev_priv->texture_max_width,
- dev_priv->texture_max_height);
- return -EINVAL;
- }
- } else {
- const struct svga3d_surface_desc *desc;
-
- desc = svga3dsurface_get_desc(format);
- if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- VMW_DEBUG_USER("Invalid surface format.\n");
- return -EINVAL;
- }
- }
-
- /* array_size must be null for non-GL3 host. */
- if (array_size > 0 && !dev_priv->has_dx) {
- VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
- return -EINVAL;
- }
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- user_accounting_size, &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(!user_srf)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- *srf_out = &user_srf->srf;
- user_srf->size = user_accounting_size;
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
-
- srf = &user_srf->srf;
- srf->flags = svga3d_flags;
- srf->format = format;
- srf->scanout = for_scanout;
- srf->mip_levels[0] = num_mip_levels;
- srf->num_sizes = 1;
- srf->sizes = NULL;
- srf->offsets = NULL;
- srf->base_size = size;
- srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->array_size = array_size;
- srf->multisample_count = multisample_count;
- srf->multisample_pattern = multisample_pattern;
- srf->quality_level = quality_level;
-
- if (array_size)
- num_layers = array_size;
- else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
- num_layers = SVGA3D_MAX_SURFACE_FACES;
-
- if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
- sample_count = srf->multisample_count;
-
- srf->res.backup_size =
- svga3dsurface_get_serialized_size_extended(srf->format,
- srf->base_size,
- srf->mip_levels[0],
- num_layers,
- sample_count);
-
- if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
- srf->res.backup_size += sizeof(SVGA3dDXSOState);
-
- /*
- * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
- * size greater than STDU max width/height. This is really a workaround
- * to support creation of big framebuffer requested by some user-space
- * for whole topology. That big framebuffer won't really be used for
- * binding with screen target as during prepare_fb a separate surface is
- * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
- */
- if (dev_priv->active_display_unit == vmw_du_screen_target &&
- for_scanout && size.width <= dev_priv->stdu_max_width &&
- size.height <= dev_priv->stdu_max_height)
- srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
-
- /*
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
* vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
* the user surface define functionality.
*
@@ -1588,43 +1454,60 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
struct drm_vmw_gb_surface_create_rep *rep,
struct drm_file *file_priv)
{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
+ struct vmw_surface_metadata metadata = {0};
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret;
+ int ret = 0;
uint32_t size;
uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
req->base.svga3d_flags);
- if (!dev_priv->has_sm4_1) {
- /*
- * If SM4_1 is not support then cannot send 64-bit flag to
- * device.
- */
+ /* array_size must be null for non-GL3 host. */
+ if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
+ VMW_DEBUG_USER("SM4 surface not supported.\n");
+ return -EINVAL;
+ }
+
+ if (!has_sm4_1_context(dev_priv)) {
if (req->svga3d_flags_upper_32_bits != 0)
- return -EINVAL;
+ ret = -EINVAL;
if (req->base.multisample_count != 0)
- return -EINVAL;
+ ret = -EINVAL;
if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
- return -EINVAL;
+ ret = -EINVAL;
if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
- return -EINVAL;
+ ret = -EINVAL;
+
+ if (ret) {
+ VMW_DEBUG_USER("SM4.1 surface not supported.\n");
+ return ret;
+ }
+ }
+
+ if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
+ VMW_DEBUG_USER("SM5 surface not supported.\n");
+ return -EINVAL;
}
if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
- req->base.multisample_count == 0)
+ req->base.multisample_count == 0) {
+ VMW_DEBUG_USER("Invalid sample count.\n");
return -EINVAL;
+ }
- if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
+ VMW_DEBUG_USER("Invalid mip level.\n");
return -EINVAL;
+ }
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
@@ -1632,22 +1515,25 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
size = vmw_user_surface_size;
+ metadata.flags = svga3d_flags_64;
+ metadata.format = req->base.format;
+ metadata.mip_levels[0] = req->base.mip_levels;
+ metadata.multisample_count = req->base.multisample_count;
+ metadata.multisample_pattern = req->multisample_pattern;
+ metadata.quality_level = req->quality_level;
+ metadata.array_size = req->base.array_size;
+ metadata.buffer_byte_stride = req->buffer_byte_stride;
+ metadata.num_sizes = 1;
+ metadata.base_size = req->base.base_size;
+ metadata.scanout = req->base.drm_surface_flags &
+ drm_vmw_surface_flag_scanout;
+
/* Define a surface based on the parameters. */
- ret = vmw_surface_gb_priv_define(dev,
- size,
- svga3d_flags_64,
- req->base.format,
- req->base.drm_surface_flags &
- drm_vmw_surface_flag_scanout,
- req->base.mip_levels,
- req->base.multisample_count,
- req->base.array_size,
- req->base.base_size,
- req->multisample_pattern,
- req->quality_level,
- &srf);
- if (unlikely(ret != 0))
+ ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
+ if (ret != 0) {
+ VMW_DEBUG_USER("Failed to define surface.\n");
return ret;
+ }
user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
@@ -1762,9 +1648,10 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
+ struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
uint32_t backup_handle;
- int ret = -EINVAL;
+ int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
@@ -1777,6 +1664,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
+ metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
@@ -1790,15 +1678,15 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
goto out_bad_resource;
}
- rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
- rep->creq.base.format = srf->format;
- rep->creq.base.mip_levels = srf->mip_levels[0];
+ rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
+ rep->creq.base.format = metadata->format;
+ rep->creq.base.mip_levels = metadata->mip_levels[0];
rep->creq.base.drm_surface_flags = 0;
- rep->creq.base.multisample_count = srf->multisample_count;
- rep->creq.base.autogen_filter = srf->autogen_filter;
- rep->creq.base.array_size = srf->array_size;
+ rep->creq.base.multisample_count = metadata->multisample_count;
+ rep->creq.base.autogen_filter = metadata->autogen_filter;
+ rep->creq.base.array_size = metadata->array_size;
rep->creq.base.buffer_handle = backup_handle;
- rep->creq.base.base_size = srf->base_size;
+ rep->creq.base.base_size = metadata->base_size;
rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
@@ -1808,9 +1696,9 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
- SVGA3D_FLAGS_UPPER_32(srf->flags);
- rep->creq.multisample_pattern = srf->multisample_pattern;
- rep->creq.quality_level = srf->quality_level;
+ SVGA3D_FLAGS_UPPER_32(metadata->flags);
+ rep->creq.multisample_pattern = metadata->multisample_pattern;
+ rep->creq.quality_level = metadata->quality_level;
rep->creq.must_be_zero = 0;
out_bad_resource:
@@ -1968,7 +1856,7 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
start >= res->backup_offset + res->backup_size))
return;
- if (srf->format == SVGA3D_BUFFER)
+ if (srf->metadata.format == SVGA3D_BUFFER)
vmw_surface_buf_dirty_range_add(res, start, end);
else
vmw_surface_tex_dirty_range_add(res, start, end);
@@ -2058,6 +1946,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
static int vmw_surface_dirty_alloc(struct vmw_resource *res)
{
struct vmw_surface *srf = vmw_res_to_srf(res);
+ const struct vmw_surface_metadata *metadata = &srf->metadata;
struct vmw_surface_dirty *dirty;
u32 num_layers = 1;
u32 num_mip;
@@ -2070,12 +1959,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
};
int ret;
- if (srf->array_size)
- num_layers = srf->array_size;
- else if (srf->flags & SVGA3D_SURFACE_CUBEMAP)
+ if (metadata->array_size)
+ num_layers = metadata->array_size;
+ else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
num_layers *= SVGA3D_MAX_SURFACE_FACES;
- num_mip = srf->mip_levels[0];
+ num_mip = metadata->mip_levels[0];
if (!num_mip)
num_mip = 1;
@@ -2096,9 +1985,10 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
goto out_no_dirty;
}
- num_samples = max_t(u32, 1, srf->multisample_count);
- ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip,
- num_layers, num_samples, &dirty->cache);
+ num_samples = max_t(u32, 1, metadata->multisample_count);
+ ret = svga3dsurface_setup_cache(&metadata->base_size, metadata->format,
+ num_mip, num_layers, num_samples,
+ &dirty->cache);
if (ret)
goto out_no_cache;
@@ -2153,3 +2043,147 @@ static int vmw_surface_clean(struct vmw_resource *res)
return 0;
}
+
+/*
+ * vmw_gb_surface_define - Define a private GB surface
+ *
+ * @dev_priv: Pointer to a device private.
+ * @user_accounting_size: Used to track user-space memory usage, set
+ * to 0 for kernel mode only memory
+ * @metadata: Metadata representing the surface to create.
+ * @user_srf_out: allocated user_srf. Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx. For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_gb_surface_define(struct vmw_private *dev_priv,
+ uint32_t user_accounting_size,
+ const struct vmw_surface_metadata *req,
+ struct vmw_surface **srf_out)
+{
+ struct vmw_surface_metadata *metadata;
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
+ u32 sample_count = 1;
+ u32 num_layers = 1;
+ int ret;
+
+ *srf_out = NULL;
+
+ if (req->scanout) {
+ if (!svga3dsurface_is_screen_target_format(req->format)) {
+ VMW_DEBUG_USER("Invalid Screen Target surface format.");
+ return -EINVAL;
+ }
+
+ if (req->base_size.width > dev_priv->texture_max_width ||
+ req->base_size.height > dev_priv->texture_max_height) {
+ VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
+ req->base_size.width,
+ req->base_size.height,
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ return -EINVAL;
+ }
+ } else {
+ const struct svga3d_surface_desc *desc =
+ svga3dsurface_get_desc(req->format);
+
+ if (desc->block_desc == SVGA3DBLOCKDESC_NONE) {
+ VMW_DEBUG_USER("Invalid surface format.\n");
+ return -EINVAL;
+ }
+ }
+
+ if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
+ return -EINVAL;
+
+ if (req->num_sizes != 1)
+ return -EINVAL;
+
+ if (req->sizes != NULL)
+ return -EINVAL;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ user_accounting_size, &ctx);
+ if (ret != 0) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(!user_srf)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ *srf_out = &user_srf->srf;
+ user_srf->size = user_accounting_size;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
+
+ srf = &user_srf->srf;
+ srf->metadata = *req;
+ srf->offsets = NULL;
+
+ metadata = &srf->metadata;
+
+ if (metadata->array_size)
+ num_layers = req->array_size;
+ else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
+ num_layers = SVGA3D_MAX_SURFACE_FACES;
+
+ if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
+ sample_count = metadata->multisample_count;
+
+ srf->res.backup_size =
+ svga3dsurface_get_serialized_size_extended(metadata->format,
+ metadata->base_size,
+ metadata->mip_levels[0],
+ num_layers,
+ sample_count);
+
+ if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+ srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+ /*
+ * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+ * size greater than STDU max width/height. This is really a workaround
+ * to support creation of big framebuffer requested by some user-space
+ * for whole topology. That big framebuffer won't really be used for
+ * binding with screen target as during prepare_fb a separate surface is
+ * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+ */
+ if (dev_priv->active_display_unit == vmw_du_screen_target &&
+ metadata->scanout &&
+ metadata->base_size.width <= dev_priv->stdu_max_width &&
+ metadata->base_size.height <= dev_priv->stdu_max_height)
+ metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+ /*
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
new file mode 100644
index 000000000000..b7c816ba7166
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Huge page-table-entry support for IO memory.
+ *
+ * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
+ */
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+
+/**
+ * struct vmw_thp_manager - Range manager implementing huge page alignment
+ *
+ * @mm: The underlying range manager. Protected by @lock.
+ * @lock: Manager lock.
+ */
+struct vmw_thp_manager {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long align_pages,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ unsigned long lpfn,
+ enum drm_mm_insert_mode mode)
+{
+ if (align_pages >= mem->page_alignment &&
+ (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
+ return drm_mm_insert_node_in_range(mm, node,
+ mem->num_pages,
+ align_pages, 0,
+ place->fpfn, lpfn, mode);
+ }
+
+ return -ENOSPC;
+}
+
+static int vmw_thp_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+ struct drm_mm_node *node;
+ unsigned long align_pages;
+ unsigned long lpfn;
+ enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ mode = DRM_MM_INSERT_BEST;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mode = DRM_MM_INSERT_HIGH;
+
+ spin_lock(&rman->lock);
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
+ align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
+ if (mem->num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(mm, node, align_pages,
+ place, mem, lpfn, mode);
+ if (!ret)
+ goto found_unlock;
+ }
+ }
+
+ align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
+ if (mem->num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
+ lpfn, mode);
+ if (!ret)
+ goto found_unlock;
+ }
+
+ ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ mem->page_alignment, 0,
+ place->fpfn, lpfn, mode);
+found_unlock:
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+ kfree(node);
+ } else {
+ mem->mm_node = node;
+ mem->start = node->start;
+ }
+
+ return 0;
+}
+
+
+
+static void vmw_thp_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+
+ if (mem->mm_node) {
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(mem->mm_node);
+ spin_unlock(&rman->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+ }
+}
+
+static int vmw_thp_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct vmw_thp_manager *rman;
+
+ rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+ if (!rman)
+ return -ENOMEM;
+
+ drm_mm_init(&rman->mm, 0, p_size);
+ spin_lock_init(&rman->lock);
+ man->priv = rman;
+ return 0;
+}
+
+static int vmw_thp_takedown(struct ttm_mem_type_manager *man)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+
+ spin_lock(&rman->lock);
+ if (drm_mm_clean(mm)) {
+ drm_mm_takedown(mm);
+ spin_unlock(&rman->lock);
+ kfree(rman);
+ man->priv = NULL;
+ return 0;
+ }
+ spin_unlock(&rman->lock);
+ return -EBUSY;
+}
+
+static void vmw_thp_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+
+ spin_lock(&rman->lock);
+ drm_mm_print(&rman->mm, printer);
+ spin_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func vmw_thp_func = {
+ .init = vmw_thp_init,
+ .takedown = vmw_thp_takedown,
+ .get_node = vmw_thp_get_node,
+ .put_node = vmw_thp_put_node,
+ .debug = vmw_thp_debug
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index d8ea3dd10af0..bf0bc4697959 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -736,11 +736,6 @@ out_no_init:
return NULL;
}
-static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -754,7 +749,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &vmw_thp_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
@@ -866,7 +861,6 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
- .invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index aa7e50f63b94..3c03b1746661 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
.page_mkwrite = vmw_bo_vm_mkwrite,
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close
+ .close = ttm_bo_vm_close,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ .huge_fault = vmw_bo_vm_huge_fault,
+#endif
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 4be49c1aef51..374142018171 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -401,7 +401,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
obj = xen_drm_front_gem_create(dev, args->size);
if (IS_ERR_OR_NULL(obj)) {
- ret = PTR_ERR(obj);
+ ret = PTR_ERR_OR_ZERO(obj);
goto fail;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 4f34c5208180..78096bbcd226 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -220,6 +220,24 @@ static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
return false;
}
+static int display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ /*
+ * Xen doesn't initialize vblanking via drm_vblank_init(), so
+ * DRM helpers assume that it doesn't handle vblanking and start
+ * sending out fake VBLANK events automatically.
+ *
+ * As xen contains it's own logic for sending out VBLANK events
+ * in send_pending_event(), disable no_vblank (i.e., the xen
+ * driver has vblanking support).
+ */
+ crtc_state->no_vblank = false;
+
+ return 0;
+}
+
static void display_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
{
@@ -284,6 +302,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = {
.enable = display_enable,
.disable = display_disable,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .check = display_check,
.update = display_update,
};
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 086c50fac689..c8f7b21fa09e 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -54,7 +54,7 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
@@ -281,7 +281,7 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 388bcc2889aa..d24344e91922 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -192,17 +192,55 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
+static bool host1x_wants_iommu(struct host1x *host1x)
+{
+ /*
+ * If we support addressing a maximum of 32 bits of physical memory
+ * and if the host1x firewall is enabled, there's no need to enable
+ * IOMMU support. This can happen for example on Tegra20, Tegra30
+ * and Tegra114.
+ *
+ * Tegra124 and later can address up to 34 bits of physical memory and
+ * many platforms come equipped with more than 2 GiB of system memory,
+ * which requires crossing the 4 GiB boundary. But there's a catch: on
+ * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
+ * only address up to 32 bits of memory in GATHER opcodes, which means
+ * that command buffers need to either be in the first 2 GiB of system
+ * memory (which could quickly lead to memory exhaustion), or command
+ * buffers need to be treated differently from other buffers (which is
+ * not possible with the current ABI).
+ *
+ * A third option is to use the IOMMU in these cases to make sure all
+ * buffers will be mapped into a 32-bit IOVA space that host1x can
+ * address. This allows all of the system memory to be used and works
+ * within the limitations of the host1x on these SoCs.
+ *
+ * In summary, default to enable IOMMU on Tegra124 and later. For any
+ * of the earlier SoCs, only use the IOMMU for additional safety when
+ * the host1x firewall is disabled.
+ */
+ if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ return false;
+ }
+
+ return true;
+}
+
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
/*
- * If the host1x firewall is enabled, there's no need to enable IOMMU
- * support. Similarly, if host1x is already attached to an IOMMU (via
- * the DMA API), don't try to attach again.
+ * We may not always want to enable IOMMU support (for example if the
+ * host1x firewall is already enabled and we don't support addressing
+ * more than 32 bits of physical memory), so check for that first.
+ *
+ * Similarly, if host1x is already attached to an IOMMU (via the DMA
+ * API), don't try to attach again.
*/
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+ if (!host1x_wants_iommu(host) || domain)
return domain;
host->group = iommu_group_get(host->dev);
@@ -502,6 +540,19 @@ static void __exit tegra_host1x_exit(void)
}
module_exit(tegra_host1x_exit);
+/**
+ * host1x_get_dma_mask() - query the supported DMA mask for host1x
+ * @host1x: host1x instance
+ *
+ * Note that this returns the supported DMA mask for host1x, which can be
+ * different from the applicable DMA mask under certain circumstances.
+ */
+u64 host1x_get_dma_mask(struct host1x *host1x)
+{
+ return host1x->info->dma_mask;
+}
+EXPORT_SYMBOL(host1x_get_dma_mask);
+
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");
diff --git a/drivers/gpu/trace/Kconfig b/drivers/gpu/trace/Kconfig
new file mode 100644
index 000000000000..c24e9edd022e
--- /dev/null
+++ b/drivers/gpu/trace/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config TRACE_GPU_MEM
+ bool
diff --git a/drivers/gpu/trace/Makefile b/drivers/gpu/trace/Makefile
new file mode 100644
index 000000000000..b70fbdc5847f
--- /dev/null
+++ b/drivers/gpu/trace/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_TRACE_GPU_MEM) += trace_gpu_mem.o
diff --git a/drivers/gpu/trace/trace_gpu_mem.c b/drivers/gpu/trace/trace_gpu_mem.c
new file mode 100644
index 000000000000..01e855897b6d
--- /dev/null
+++ b/drivers/gpu/trace/trace_gpu_mem.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPU memory trace points
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu_mem.h>
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_mem_total);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 494a39e74939..34f07371716d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -362,6 +362,13 @@ config HID_GFRM
---help---
Support for Google Fiber TV Box remote controls
+config HID_GLORIOUS
+ tristate "Glorious PC Gaming Race mice"
+ depends on HID
+ help
+ Support for Glorious PC Gaming Race mice such as
+ the Glorious Model O, O- and D.
+
config HID_HOLTEK
tristate "Holtek HID devices"
depends on USB_HID
@@ -1039,7 +1046,7 @@ config HID_U2FZERO
U2F Zero only supports blinking its LED, so this driver doesn't
allow setting the brightness to anything but 1, which will
- trigger a single blink and immediately reset to back 0.
+ trigger a single blink and immediately reset back to 0.
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
@@ -1145,6 +1152,17 @@ config HID_ALPS
Say Y here if you have a Alps touchpads over i2c-hid or usbhid
and want support for its special functionalities.
+config HID_MCP2221
+ tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support"
+ depends on USB_HID && I2C
+ depends on GPIOLIB
+ ---help---
+ Provides I2C and SMBUS host adapter functionality over USB-HID
+ through MCP2221 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-mcp2221.ko.
+
endmenu
endif # HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index bfefa365b1ce..d8ea4b8c95af 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HID_ELO) += hid-elo.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
+obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o
obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
@@ -70,6 +71,7 @@ obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MACALLY) += hid-macally.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
+obj-$(CONFIG_HID_MCP2221) += hid-mcp2221.o
obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 28588f74425e..6f1fe7248d81 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -801,6 +801,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
break;
case HID_DEVICE_ID_ALPS_U1_DUAL:
case HID_DEVICE_ID_ALPS_U1:
+ case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY:
data->dev_type = U1;
break;
default:
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index bf8d4afe0d6a..8deded185725 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -283,11 +283,9 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
int ret;
struct appleir *appleir;
- appleir = kzalloc(sizeof(struct appleir), GFP_KERNEL);
- if (!appleir) {
- ret = -ENOMEM;
- goto allocfail;
- }
+ appleir = devm_kzalloc(&hid->dev, sizeof(struct appleir), GFP_KERNEL);
+ if (!appleir)
+ return -ENOMEM;
appleir->hid = hid;
@@ -313,8 +311,7 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
return 0;
fail:
- kfree(appleir);
-allocfail:
+ devm_kfree(&hid->dev, appleir);
return ret;
}
@@ -323,7 +320,6 @@ static void appleir_remove(struct hid_device *hid)
struct appleir *appleir = hid_get_drvdata(hid);
hid_hw_stop(hid);
del_timer_sync(&appleir->key_up_timer);
- kfree(appleir);
}
static const struct hid_device_id appleir_devices[] = {
diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c
new file mode 100644
index 000000000000..558eb08c19ef
--- /dev/null
+++ b/drivers/hid/hid-glorious.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * USB HID driver for Glorious PC Gaming Race
+ * Glorious Model O, O- and D mice.
+ *
+ * Copyright (c) 2020 Samuel ÄŒavoj <sammko@sammserver.com>
+ */
+
+/*
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Samuel ÄŒavoj <sammko@sammserver.com>");
+MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
+
+/*
+ * Glorious Model O and O- specify the const flag in the consumer input
+ * report descriptor, which leads to inputs being ignored. Fix this
+ * by patching the descriptor.
+ */
+static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize == 213 &&
+ rdesc[84] == 129 && rdesc[112] == 129 && rdesc[140] == 129 &&
+ rdesc[85] == 3 && rdesc[113] == 3 && rdesc[141] == 3) {
+ hid_info(hdev, "patching Glorious Model O consumer control report descriptor\n");
+ rdesc[85] = rdesc[113] = rdesc[141] = \
+ HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE;
+ }
+ return rdesc;
+}
+
+static void glorious_update_name(struct hid_device *hdev)
+{
+ const char *model = "Device";
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_GLORIOUS_MODEL_O:
+ model = "Model O"; break;
+ case USB_DEVICE_ID_GLORIOUS_MODEL_D:
+ model = "Model D"; break;
+ }
+
+ snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model);
+}
+
+static int glorious_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ glorious_update_name(hdev);
+
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static const struct hid_device_id glorious_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ USB_DEVICE_ID_GLORIOUS_MODEL_O) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ USB_DEVICE_ID_GLORIOUS_MODEL_D) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, glorious_devices);
+
+static struct hid_driver glorious_driver = {
+ .name = "glorious",
+ .id_table = glorious_devices,
+ .probe = glorious_probe,
+ .report_fixup = glorious_report_fixup
+};
+
+module_hid_driver(glorious_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9f2213426556..1c71a1aa76b2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -79,10 +79,10 @@
#define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215
+#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
#define HID_DEVICE_ID_ALPS_1222 0x1222
-
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
@@ -385,6 +385,7 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002
#define USB_VENDOR_ID_ELAN 0x04f3
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
@@ -464,6 +465,10 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+#define USB_VENDOR_ID_GLORIOUS 0x258a
+#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
+#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
+
#define I2C_VENDOR_ID_GOODIX 0x27c6
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@@ -755,6 +760,7 @@
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219
#define USB_DEVICE_ID_LOGITECH_G15_LCD 0xc222
+#define USB_DEVICE_ID_LOGITECH_G11 0xc225
#define USB_DEVICE_ID_LOGITECH_G15_V2_LCD 0xc227
#define USB_DEVICE_ID_LOGITECH_G510 0xc22d
#define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO 0xc22e
@@ -821,6 +827,7 @@
#define USB_DEVICE_ID_PICK16F1454 0x0042
#define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7
#define USB_DEVICE_ID_LUXAFOR 0xf372
+#define USB_DEVICE_ID_MCP2221 0x00dd
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
@@ -1092,6 +1099,9 @@
#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
+#define I2C_VENDOR_ID_SYNAPTICS 0x06cb
+#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393 0x7a13
+
#define USB_VENDOR_ID_SYNAPTICS 0x06cb
#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
#define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002
@@ -1106,6 +1116,7 @@
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index 8a9268a5c66a..ef0cbcd7540d 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -803,8 +803,10 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
if (ret < 0) {
- hid_err(hdev, "Error disabling keyboard emulation for the G-keys\n");
- goto error_hw_stop;
+ hid_err(hdev, "Error %d disabling keyboard emulation for the G-keys, falling back to generic hid-input driver\n",
+ ret);
+ hid_set_drvdata(hdev, NULL);
+ return 0;
}
/* Get initial brightness levels */
@@ -870,6 +872,10 @@ error_hw_stop:
}
static const struct hid_device_id lg_g15_devices[] = {
+ /* The G11 is a G15 without the LCD, treat it as a G15 */
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G11),
+ .driver_data = LG_G15 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_G15_LCD),
.driver_data = LG_G15 },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index bb50d6e7745b..ed9b1c1f460d 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -16,11 +16,11 @@
#include <asm/unaligned.h>
#include "hid-ids.h"
-#define DJ_MAX_PAIRED_DEVICES 6
+#define DJ_MAX_PAIRED_DEVICES 7
#define DJ_MAX_NUMBER_NOTIFS 8
#define DJ_RECEIVER_INDEX 0
#define DJ_DEVICE_INDEX_MIN 1
-#define DJ_DEVICE_INDEX_MAX 6
+#define DJ_DEVICE_INDEX_MAX 7
#define DJREPORT_SHORT_LENGTH 15
#define DJREPORT_LONG_LENGTH 32
@@ -980,6 +980,11 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
break;
}
+ /* custom receiver device (eg. powerplay) */
+ if (hidpp_report->device_index == 7) {
+ workitem.reports_supported |= HIDPP;
+ }
+
if (workitem.type == WORKITEM_TYPE_EMPTY) {
hid_warn(hdev,
"unusable device of type %s (0x%02x) connected on slot %d",
@@ -1368,6 +1373,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
}
if (djdev->reports_supported & HIDPP) {
+ dbg_hid("%s: sending a HID++ descriptor, reports_supported: %llx\n",
+ __func__, djdev->reports_supported);
rdcat(rdesc, &rsize, hidpp_descriptor,
sizeof(hidpp_descriptor));
}
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
new file mode 100644
index 000000000000..d958475f8c81
--- /dev/null
+++ b/drivers/hid/hid-mcp2221.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MCP2221A - Microchip USB to I2C Host Protocol Bridge
+ *
+ * Copyright (c) 2020, Rishi Gupta <gupt21@gmail.com>
+ *
+ * Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20005565B.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/i2c.h>
+#include "hid-ids.h"
+
+/* Commands codes in a raw output report */
+enum {
+ MCP2221_I2C_WR_DATA = 0x90,
+ MCP2221_I2C_WR_NO_STOP = 0x94,
+ MCP2221_I2C_RD_DATA = 0x91,
+ MCP2221_I2C_RD_RPT_START = 0x93,
+ MCP2221_I2C_GET_DATA = 0x40,
+ MCP2221_I2C_PARAM_OR_STATUS = 0x10,
+ MCP2221_I2C_SET_SPEED = 0x20,
+ MCP2221_I2C_CANCEL = 0x10,
+};
+
+/* Response codes in a raw input report */
+enum {
+ MCP2221_SUCCESS = 0x00,
+ MCP2221_I2C_ENG_BUSY = 0x01,
+ MCP2221_I2C_START_TOUT = 0x12,
+ MCP2221_I2C_STOP_TOUT = 0x62,
+ MCP2221_I2C_WRADDRL_TOUT = 0x23,
+ MCP2221_I2C_WRDATA_TOUT = 0x44,
+ MCP2221_I2C_WRADDRL_NACK = 0x25,
+ MCP2221_I2C_MASK_ADDR_NACK = 0x40,
+ MCP2221_I2C_WRADDRL_SEND = 0x21,
+ MCP2221_I2C_ADDR_NACK = 0x25,
+ MCP2221_I2C_READ_COMPL = 0x55,
+};
+
+/*
+ * There is no way to distinguish responses. Therefore next command
+ * is sent only after response to previous has been received. Mutex
+ * lock is used for this purpose mainly.
+ */
+struct mcp2221 {
+ struct hid_device *hdev;
+ struct i2c_adapter adapter;
+ struct mutex lock;
+ struct completion wait_in_report;
+ u8 *rxbuf;
+ u8 txbuf[64];
+ int rxbuf_idx;
+ int status;
+ u8 cur_i2c_clk_div;
+};
+
+/*
+ * Default i2c bus clock frequency 400 kHz. Modify this if you
+ * want to set some other frequency (min 50 kHz - max 400 kHz).
+ */
+static uint i2c_clk_freq = 400;
+
+/* Synchronously send output report to the device */
+static int mcp_send_report(struct mcp2221 *mcp,
+ u8 *out_report, size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(out_report, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* mcp2221 uses interrupt endpoint for out reports */
+ ret = hid_hw_output_report(mcp->hdev, buf, len);
+ kfree(buf);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/*
+ * Send o/p report to the device and wait for i/p report to be
+ * received from the device. If the device does not respond,
+ * we timeout.
+ */
+static int mcp_send_data_req_status(struct mcp2221 *mcp,
+ u8 *out_report, int len)
+{
+ int ret;
+ unsigned long t;
+
+ reinit_completion(&mcp->wait_in_report);
+
+ ret = mcp_send_report(mcp, out_report, len);
+ if (ret)
+ return ret;
+
+ t = wait_for_completion_timeout(&mcp->wait_in_report,
+ msecs_to_jiffies(4000));
+ if (!t)
+ return -ETIMEDOUT;
+
+ return mcp->status;
+}
+
+/* Check pass/fail for actual communication with i2c slave */
+static int mcp_chk_last_cmd_status(struct mcp2221 *mcp)
+{
+ memset(mcp->txbuf, 0, 8);
+ mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS;
+
+ return mcp_send_data_req_status(mcp, mcp->txbuf, 8);
+}
+
+/* Cancels last command releasing i2c bus just in case occupied */
+static int mcp_cancel_last_cmd(struct mcp2221 *mcp)
+{
+ memset(mcp->txbuf, 0, 8);
+ mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS;
+ mcp->txbuf[2] = MCP2221_I2C_CANCEL;
+
+ return mcp_send_data_req_status(mcp, mcp->txbuf, 8);
+}
+
+static int mcp_set_i2c_speed(struct mcp2221 *mcp)
+{
+ int ret;
+
+ memset(mcp->txbuf, 0, 8);
+ mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS;
+ mcp->txbuf[3] = MCP2221_I2C_SET_SPEED;
+ mcp->txbuf[4] = mcp->cur_i2c_clk_div;
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 8);
+ if (ret) {
+ /* Small delay is needed here */
+ usleep_range(980, 1000);
+ mcp_cancel_last_cmd(mcp);
+ }
+
+ return 0;
+}
+
+/*
+ * An output report can contain minimum 1 and maximum 60 user data
+ * bytes. If the number of data bytes is more then 60, we send it
+ * in chunks of 60 bytes. Last chunk may contain exactly 60 or less
+ * bytes. Total number of bytes is informed in very first report to
+ * mcp2221, from that point onwards it first collect all the data
+ * from host and then send to i2c slave device.
+ */
+static int mcp_i2c_write(struct mcp2221 *mcp,
+ struct i2c_msg *msg, int type, u8 last_status)
+{
+ int ret, len, idx, sent;
+
+ idx = 0;
+ sent = 0;
+ if (msg->len < 60)
+ len = msg->len;
+ else
+ len = 60;
+
+ do {
+ mcp->txbuf[0] = type;
+ mcp->txbuf[1] = msg->len & 0xff;
+ mcp->txbuf[2] = msg->len >> 8;
+ mcp->txbuf[3] = (u8)(msg->addr << 1);
+
+ memcpy(&mcp->txbuf[4], &msg->buf[idx], len);
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, len + 4);
+ if (ret)
+ return ret;
+
+ usleep_range(980, 1000);
+
+ if (last_status) {
+ ret = mcp_chk_last_cmd_status(mcp);
+ if (ret)
+ return ret;
+ }
+
+ sent = sent + len;
+ if (sent >= msg->len)
+ break;
+
+ idx = idx + len;
+ if ((msg->len - sent) < 60)
+ len = msg->len - sent;
+ else
+ len = 60;
+
+ /*
+ * Testing shows delay is needed between successive writes
+ * otherwise next write fails on first-try from i2c core.
+ * This value is obtained through automated stress testing.
+ */
+ usleep_range(980, 1000);
+ } while (len > 0);
+
+ return ret;
+}
+
+/*
+ * Device reads all data (0 - 65535 bytes) from i2c slave device and
+ * stores it in device itself. This data is read back from device to
+ * host in multiples of 60 bytes using input reports.
+ */
+static int mcp_i2c_smbus_read(struct mcp2221 *mcp,
+ struct i2c_msg *msg, int type, u16 smbus_addr,
+ u8 smbus_len, u8 *smbus_buf)
+{
+ int ret;
+ u16 total_len;
+
+ mcp->txbuf[0] = type;
+ if (msg) {
+ mcp->txbuf[1] = msg->len & 0xff;
+ mcp->txbuf[2] = msg->len >> 8;
+ mcp->txbuf[3] = (u8)(msg->addr << 1);
+ total_len = msg->len;
+ mcp->rxbuf = msg->buf;
+ } else {
+ mcp->txbuf[1] = smbus_len;
+ mcp->txbuf[2] = 0;
+ mcp->txbuf[3] = (u8)(smbus_addr << 1);
+ total_len = smbus_len;
+ mcp->rxbuf = smbus_buf;
+ }
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 4);
+ if (ret)
+ return ret;
+
+ mcp->rxbuf_idx = 0;
+
+ do {
+ memset(mcp->txbuf, 0, 4);
+ mcp->txbuf[0] = MCP2221_I2C_GET_DATA;
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ if (ret)
+ return ret;
+
+ ret = mcp_chk_last_cmd_status(mcp);
+ if (ret)
+ return ret;
+
+ usleep_range(980, 1000);
+ } while (mcp->rxbuf_idx < total_len);
+
+ return ret;
+}
+
+static int mcp_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msgs[], int num)
+{
+ int ret;
+ struct mcp2221 *mcp = i2c_get_adapdata(adapter);
+
+ hid_hw_power(mcp->hdev, PM_HINT_FULLON);
+
+ mutex_lock(&mcp->lock);
+
+ /* Setting speed before every transaction is required for mcp2221 */
+ ret = mcp_set_i2c_speed(mcp);
+ if (ret)
+ goto exit;
+
+ if (num == 1) {
+ if (msgs->flags & I2C_M_RD) {
+ ret = mcp_i2c_smbus_read(mcp, msgs, MCP2221_I2C_RD_DATA,
+ 0, 0, NULL);
+ } else {
+ ret = mcp_i2c_write(mcp, msgs, MCP2221_I2C_WR_DATA, 1);
+ }
+ if (ret)
+ goto exit;
+ ret = num;
+ } else if (num == 2) {
+ /* Ex transaction; send reg address and read its contents */
+ if (msgs[0].addr == msgs[1].addr &&
+ !(msgs[0].flags & I2C_M_RD) &&
+ (msgs[1].flags & I2C_M_RD)) {
+
+ ret = mcp_i2c_write(mcp, &msgs[0],
+ MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, &msgs[1],
+ MCP2221_I2C_RD_RPT_START,
+ 0, 0, NULL);
+ if (ret)
+ goto exit;
+ ret = num;
+ } else {
+ dev_err(&adapter->dev,
+ "unsupported multi-msg i2c transaction\n");
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ dev_err(&adapter->dev,
+ "unsupported multi-msg i2c transaction\n");
+ ret = -EOPNOTSUPP;
+ }
+
+exit:
+ hid_hw_power(mcp->hdev, PM_HINT_NORMAL);
+ mutex_unlock(&mcp->lock);
+ return ret;
+}
+
+static int mcp_smbus_write(struct mcp2221 *mcp, u16 addr,
+ u8 command, u8 *buf, u8 len, int type,
+ u8 last_status)
+{
+ int data_len, ret;
+
+ mcp->txbuf[0] = type;
+ mcp->txbuf[1] = len + 1; /* 1 is due to command byte itself */
+ mcp->txbuf[2] = 0;
+ mcp->txbuf[3] = (u8)(addr << 1);
+ mcp->txbuf[4] = command;
+
+ switch (len) {
+ case 0:
+ data_len = 5;
+ break;
+ case 1:
+ mcp->txbuf[5] = buf[0];
+ data_len = 6;
+ break;
+ case 2:
+ mcp->txbuf[5] = buf[0];
+ mcp->txbuf[6] = buf[1];
+ data_len = 7;
+ break;
+ default:
+ memcpy(&mcp->txbuf[5], buf, len);
+ data_len = len + 5;
+ }
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, data_len);
+ if (ret)
+ return ret;
+
+ if (last_status) {
+ usleep_range(980, 1000);
+
+ ret = mcp_chk_last_cmd_status(mcp);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mcp_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size,
+ union i2c_smbus_data *data)
+{
+ int ret;
+ struct mcp2221 *mcp = i2c_get_adapdata(adapter);
+
+ hid_hw_power(mcp->hdev, PM_HINT_FULLON);
+
+ mutex_lock(&mcp->lock);
+
+ ret = mcp_set_i2c_speed(mcp);
+ if (ret)
+ goto exit;
+
+ switch (size) {
+
+ case I2C_SMBUS_QUICK:
+ if (read_write == I2C_SMBUS_READ)
+ ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_DATA,
+ addr, 0, &data->byte);
+ else
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_DATA, 1);
+ break;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_READ)
+ ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_DATA,
+ addr, 1, &data->byte);
+ else
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_DATA, 1);
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, 1, &data->byte);
+ } else {
+ ret = mcp_smbus_write(mcp, addr, command, &data->byte,
+ 1, MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, 2, (u8 *)&data->word);
+ } else {
+ ret = mcp_smbus_write(mcp, addr, command,
+ (u8 *)&data->word, 2,
+ MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 1);
+ if (ret)
+ goto exit;
+
+ mcp->rxbuf_idx = 0;
+ mcp->rxbuf = data->block;
+ mcp->txbuf[0] = MCP2221_I2C_GET_DATA;
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ if (ret)
+ goto exit;
+ } else {
+ if (!data->block[0]) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = mcp_smbus_write(mcp, addr, command, data->block,
+ data->block[0] + 1,
+ MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 1);
+ if (ret)
+ goto exit;
+
+ mcp->rxbuf_idx = 0;
+ mcp->rxbuf = data->block;
+ mcp->txbuf[0] = MCP2221_I2C_GET_DATA;
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ if (ret)
+ goto exit;
+ } else {
+ if (!data->block[0]) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = mcp_smbus_write(mcp, addr, command,
+ &data->block[1], data->block[0],
+ MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_PROC_CALL:
+ ret = mcp_smbus_write(mcp, addr, command,
+ (u8 *)&data->word,
+ 2, MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, 2, (u8 *)&data->word);
+ break;
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ ret = mcp_smbus_write(mcp, addr, command, data->block,
+ data->block[0] + 1,
+ MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, I2C_SMBUS_BLOCK_MAX,
+ data->block);
+ break;
+ default:
+ dev_err(&mcp->adapter.dev,
+ "unsupported smbus transaction size:%d\n", size);
+ ret = -EOPNOTSUPP;
+ }
+
+exit:
+ hid_hw_power(mcp->hdev, PM_HINT_NORMAL);
+ mutex_unlock(&mcp->lock);
+ return ret;
+}
+
+static u32 mcp_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_PEC);
+}
+
+static const struct i2c_algorithm mcp_i2c_algo = {
+ .master_xfer = mcp_i2c_xfer,
+ .smbus_xfer = mcp_smbus_xfer,
+ .functionality = mcp_i2c_func,
+};
+
+/* Gives current state of i2c engine inside mcp2221 */
+static int mcp_get_i2c_eng_state(struct mcp2221 *mcp,
+ u8 *data, u8 idx)
+{
+ int ret;
+
+ switch (data[idx]) {
+ case MCP2221_I2C_WRADDRL_NACK:
+ case MCP2221_I2C_WRADDRL_SEND:
+ ret = -ENXIO;
+ break;
+ case MCP2221_I2C_START_TOUT:
+ case MCP2221_I2C_STOP_TOUT:
+ case MCP2221_I2C_WRADDRL_TOUT:
+ case MCP2221_I2C_WRDATA_TOUT:
+ ret = -ETIMEDOUT;
+ break;
+ case MCP2221_I2C_ENG_BUSY:
+ ret = -EAGAIN;
+ break;
+ case MCP2221_SUCCESS:
+ ret = 0x00;
+ break;
+ default:
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*
+ * MCP2221 uses interrupt endpoint for input reports. This function
+ * is called by HID layer when it receives i/p report from mcp2221,
+ * which is actually a response to the previously sent command.
+ *
+ * MCP2221A firmware specific return codes are parsed and 0 or
+ * appropriate negative error code is returned. Delayed response
+ * results in timeout error and stray reponses results in -EIO.
+ */
+static int mcp2221_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ u8 *buf;
+ struct mcp2221 *mcp = hid_get_drvdata(hdev);
+
+ switch (data[0]) {
+
+ case MCP2221_I2C_WR_DATA:
+ case MCP2221_I2C_WR_NO_STOP:
+ case MCP2221_I2C_RD_DATA:
+ case MCP2221_I2C_RD_RPT_START:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ mcp->status = 0;
+ break;
+ default:
+ mcp->status = mcp_get_i2c_eng_state(mcp, data, 2);
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ case MCP2221_I2C_PARAM_OR_STATUS:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((mcp->txbuf[3] == MCP2221_I2C_SET_SPEED) &&
+ (data[3] != MCP2221_I2C_SET_SPEED)) {
+ mcp->status = -EAGAIN;
+ break;
+ }
+ if (data[20] & MCP2221_I2C_MASK_ADDR_NACK) {
+ mcp->status = -ENXIO;
+ break;
+ }
+ mcp->status = mcp_get_i2c_eng_state(mcp, data, 8);
+ break;
+ default:
+ mcp->status = -EIO;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ case MCP2221_I2C_GET_DATA:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if (data[2] == MCP2221_I2C_ADDR_NACK) {
+ mcp->status = -ENXIO;
+ break;
+ }
+ if (!mcp_get_i2c_eng_state(mcp, data, 2)
+ && (data[3] == 0)) {
+ mcp->status = 0;
+ break;
+ }
+ if (data[3] == 127) {
+ mcp->status = -EIO;
+ break;
+ }
+ if (data[2] == MCP2221_I2C_READ_COMPL) {
+ buf = mcp->rxbuf;
+ memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]);
+ mcp->rxbuf_idx = mcp->rxbuf_idx + data[3];
+ mcp->status = 0;
+ break;
+ }
+ mcp->status = -EIO;
+ break;
+ default:
+ mcp->status = -EIO;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ default:
+ mcp->status = -EIO;
+ complete(&mcp->wait_in_report);
+ }
+
+ return 1;
+}
+
+static int mcp2221_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct mcp2221 *mcp;
+
+ mcp = devm_kzalloc(&hdev->dev, sizeof(*mcp), GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "can't parse reports\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "can't start hardware\n");
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "can't open device\n");
+ goto err_hstop;
+ }
+
+ mutex_init(&mcp->lock);
+ init_completion(&mcp->wait_in_report);
+ hid_set_drvdata(hdev, mcp);
+ mcp->hdev = hdev;
+
+ /* Set I2C bus clock diviser */
+ if (i2c_clk_freq > 400)
+ i2c_clk_freq = 400;
+ if (i2c_clk_freq < 50)
+ i2c_clk_freq = 50;
+ mcp->cur_i2c_clk_div = (12000000 / (i2c_clk_freq * 1000)) - 3;
+
+ mcp->adapter.owner = THIS_MODULE;
+ mcp->adapter.class = I2C_CLASS_HWMON;
+ mcp->adapter.algo = &mcp_i2c_algo;
+ mcp->adapter.retries = 1;
+ mcp->adapter.dev.parent = &hdev->dev;
+ snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
+ "MCP2221 usb-i2c bridge on hidraw%d",
+ ((struct hidraw *)hdev->hidraw)->minor);
+
+ ret = i2c_add_adapter(&mcp->adapter);
+ if (ret) {
+ hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret);
+ goto err_i2c;
+ }
+ i2c_set_adapdata(&mcp->adapter, mcp);
+
+ return 0;
+
+err_i2c:
+ hid_hw_close(mcp->hdev);
+err_hstop:
+ hid_hw_stop(mcp->hdev);
+ return ret;
+}
+
+static void mcp2221_remove(struct hid_device *hdev)
+{
+ struct mcp2221 *mcp = hid_get_drvdata(hdev);
+
+ i2c_del_adapter(&mcp->adapter);
+ hid_hw_close(mcp->hdev);
+ hid_hw_stop(mcp->hdev);
+}
+
+static const struct hid_device_id mcp2221_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_MCP2221) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mcp2221_devices);
+
+static struct hid_driver mcp2221_driver = {
+ .name = "mcp2221",
+ .id_table = mcp2221_devices,
+ .probe = mcp2221_probe,
+ .remove = mcp2221_remove,
+ .raw_event = mcp2221_raw_event,
+};
+
+/* Register with HID core */
+module_hid_driver(mcp2221_driver);
+
+MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>");
+MODULE_DESCRIPTION("MCP2221 Microchip HID USB to I2C master bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e2ce790ff4a4..241740222199 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1920,6 +1920,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_EGALAX_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ { .driver_data = MT_CLS_EGALAX,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 3735546bb524..e4cb543de0cd 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -163,6 +163,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
@@ -398,9 +399,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
#endif
-#if IS_ENABLED(CONFIG_HID_ITE)
- { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
-#endif
#if IS_ENABLED(CONFIG_HID_ICADE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
#endif
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9ce22acdfaca..8cffa84c9650 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -217,7 +217,6 @@ static int rmi_hid_read_block(struct rmi_transport_dev *xport, u16 addr,
ret = rmi_write_report(hdev, data->writeReport,
data->output_report_size);
if (ret != data->output_report_size) {
- clear_bit(RMI_READ_REQUEST_PENDING, &data->flags);
dev_err(&hdev->dev,
"failed to write request output report (%d)\n",
ret);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 009000c5d55c..294c84e136d7 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -177,6 +177,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_BOGUS_IRQ },
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
I2C_HID_QUIRK_RESET_ON_RESUME },
+ { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
+ I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE },
{ 0, 0 }
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
index bb85985b1620..7c445b203f2a 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.h
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -82,7 +82,7 @@ struct ishtp_msg_hdr {
struct ishtp_bus_message {
uint8_t hbm_cmd;
- uint8_t data[0];
+ uint8_t data[];
} __packed;
/**
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 39e0e6c73adf..1cc6364aa957 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -214,7 +214,7 @@ struct ishtp_device {
const struct ishtp_hw_ops *ops;
size_t mtu;
uint32_t ishtp_msg_hdr;
- char hw[0] __aligned(sizeof(void *));
+ char hw[] __aligned(sizeof(void *));
};
static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index c7bc9db5b192..17a638f15082 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data;
int res;
+ mutex_lock(&usbhid->mutex);
+
set_bit(HID_OPENED, &usbhid->iofl);
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return 0;
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
+ res = 0;
+ goto Done;
+ }
res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */
if (res < 0) {
clear_bit(HID_OPENED, &usbhid->iofl);
- return -EIO;
+ res = -EIO;
+ goto Done;
}
usbhid->intf->needs_remote_wakeup = 1;
@@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
msleep(50);
clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
+
+ Done:
+ mutex_unlock(&usbhid->mutex);
return res;
}
@@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
+ mutex_lock(&usbhid->mutex);
+
/*
* Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing
@@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return;
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
+ hid_cancel_delayed_stuff(usbhid);
+ usb_kill_urb(usbhid->urbin);
+ usbhid->intf->needs_remote_wakeup = 0;
+ }
- hid_cancel_delayed_stuff(usbhid);
- usb_kill_urb(usbhid->urbin);
- usbhid->intf->needs_remote_wakeup = 0;
+ mutex_unlock(&usbhid->mutex);
}
/*
@@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
unsigned int n, insize = 0;
int ret;
+ mutex_lock(&usbhid->mutex);
+
clear_bit(HID_DISCONNECTED, &usbhid->iofl);
usbhid->bufsize = HID_MIN_BUFFER_SIZE;
@@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
usbhid_set_leds(hid);
device_set_wakeup_enable(&dev->dev, 1);
}
+
+ mutex_unlock(&usbhid->mutex);
return 0;
fail:
@@ -1187,6 +1202,7 @@ fail:
usbhid->urbout = NULL;
usbhid->urbctrl = NULL;
hid_free_buffers(dev, hid);
+ mutex_unlock(&usbhid->mutex);
return ret;
}
@@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->intf->needs_remote_wakeup = 0;
}
+ mutex_lock(&usbhid->mutex);
+
clear_bit(HID_STARTED, &usbhid->iofl);
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl);
@@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->urbout = NULL;
hid_free_buffers(hid_to_usb_dev(hid), hid);
+
+ mutex_unlock(&usbhid->mutex);
}
static int usbhid_power(struct hid_device *hid, int lvl)
@@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
INIT_WORK(&usbhid->reset_work, hid_reset);
timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
spin_lock_init(&usbhid->lock);
+ mutex_init(&usbhid->mutex);
ret = hid_add_device(hid);
if (ret) {
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 8620408bd7af..75fe85d3d27a 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -80,6 +80,7 @@ struct usbhid_device {
dma_addr_t outbuf_dma; /* Output buffer dma */
unsigned long last_out; /* record of last output for timeouts */
+ struct mutex mutex; /* start/stop/open/close */
spinlock_t lock; /* fifo spinlock */
unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
struct timer_list io_retry; /* Retry timer */
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5ded94b7bf68..cd71e7133944 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
data[0] = field->report->id;
ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
data, n, WAC_CMD_RETRIES);
- if (ret == n) {
+ if (ret == n && features->type == HID_GENERIC) {
ret = hid_report_raw_event(hdev,
HID_FEATURE_REPORT, data, n, 0);
+ } else if (ret == 2 && features->type != HID_GENERIC) {
+ features->touch_max = data[1];
} else {
features->touch_max = 16;
hid_warn(hdev, "wacom_feature_mapping: "
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index d99a9d407671..1c96809b51c9 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
{
struct input_dev *pad_input = wacom->pad_input;
unsigned char *data = wacom->data;
+ int nbuttons = wacom->features.numbered_buttons;
- int buttons = data[282] | ((data[281] & 0x40) << 2);
+ int expresskeys = data[282];
+ int center = (data[281] & 0x40) >> 6;
int ring = data[285] & 0x7F;
bool ringstatus = data[285] & 0x80;
- bool prox = buttons || ringstatus;
+ bool prox = expresskeys || center || ringstatus;
/* Fix touchring data: userspace expects 0 at left and increasing clockwise */
ring = 71 - ring;
@@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
if (ring > 71)
ring -= 72;
- wacom_report_numbered_buttons(pad_input, 9, buttons);
+ wacom_report_numbered_buttons(pad_input, nbuttons,
+ expresskeys | (center << (nbuttons - 1)));
input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
@@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
case HID_DG_TIPSWITCH:
hid_data->last_slot_field = equivalent_usage;
break;
+ case HID_DG_CONTACTCOUNT:
+ hid_data->cc_report = report->id;
+ hid_data->cc_index = i;
+ hid_data->cc_value_index = j;
+ break;
}
}
}
+
+ if (hid_data->cc_report != 0 &&
+ hid_data->cc_index >= 0) {
+ struct hid_field *field = report->field[hid_data->cc_index];
+ int value = field->value[hid_data->cc_value_index];
+ if (value)
+ hid_data->num_expected = value;
+ }
+ else {
+ hid_data->num_expected = wacom_wac->features.touch_max;
+ }
}
static void wacom_wac_finger_report(struct hid_device *hdev,
@@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
- struct hid_data *hid_data = &wacom_wac->hid_data;
/* If more packets of data are expected, give us a chance to
* process them rather than immediately syncing a partial
@@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
input_sync(input);
wacom_wac->hid_data.num_received = 0;
- hid_data->num_expected = 0;
/* keep touch state for pen event */
wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
@@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev,
}
}
-static void wacom_set_num_expected(struct hid_device *hdev,
- struct hid_report *report,
- int collection_index,
- struct hid_field *field,
- int field_index)
-{
- struct wacom *wacom = hid_get_drvdata(hdev);
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct hid_data *hid_data = &wacom_wac->hid_data;
- unsigned int original_collection_level =
- hdev->collection[collection_index].level;
- bool end_collection = false;
- int i;
-
- if (hid_data->num_expected)
- return;
-
- // find the contact count value for this segment
- for (i = field_index; i < report->maxfield && !end_collection; i++) {
- struct hid_field *field = report->field[i];
- unsigned int field_level =
- hdev->collection[field->usage[0].collection_index].level;
- unsigned int j;
-
- if (field_level != original_collection_level)
- continue;
-
- for (j = 0; j < field->maxusage; j++) {
- struct hid_usage *usage = &field->usage[j];
-
- if (usage->collection_index != collection_index) {
- end_collection = true;
- break;
- }
- if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) {
- hid_data->cc_report = report->id;
- hid_data->cc_index = i;
- hid_data->cc_value_index = j;
-
- if (hid_data->cc_report != 0 &&
- hid_data->cc_index >= 0) {
-
- struct hid_field *field =
- report->field[hid_data->cc_index];
- int value =
- field->value[hid_data->cc_value_index];
-
- if (value)
- hid_data->num_expected = value;
- }
- }
- }
- }
-
- if (hid_data->cc_report == 0 || hid_data->cc_index < 0)
- hid_data->num_expected = wacom_wac->features.touch_max;
-}
-
static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report,
int collection_index, struct hid_field *field,
int field_index)
{
struct wacom *wacom = hid_get_drvdata(hdev);
- if (WACOM_FINGER_FIELD(field))
- wacom_set_num_expected(hdev, report, collection_index, field,
- field_index);
wacom_report_events(hdev, report, collection_index, field_index);
/*
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 9eec970cdfa5..89869c66fb9d 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -965,14 +965,13 @@ static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
if (old_state != hi->iface_state) {
if (hi->iface_state == CS_STATE_CONFIGURED) {
- pm_qos_add_request(&hi->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY,
+ cpu_latency_qos_add_request(&hi->pm_qos_req,
CS_QOS_LATENCY_FOR_DATA_USEC);
local_bh_disable();
cs_hsi_read_on_data(hi);
local_bh_enable();
} else if (old_state == CS_STATE_CONFIGURED) {
- pm_qos_remove_request(&hi->pm_qos_req);
+ cpu_latency_qos_remove_request(&hi->pm_qos_req);
}
}
return r;
@@ -1075,8 +1074,8 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi)
WARN_ON(!cs_state_idle(hi->control_state));
WARN_ON(!cs_state_idle(hi->data_state));
- if (pm_qos_request_active(&hi->pm_qos_req))
- pm_qos_remove_request(&hi->pm_qos_req);
+ if (cpu_latency_qos_request_active(&hi->pm_qos_req))
+ cpu_latency_qos_remove_request(&hi->pm_qos_req);
spin_lock_bh(&hi->lock);
cs_hsi_free_data(hi);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0370364169c4..501c43c5851d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -839,6 +839,9 @@ void vmbus_initiate_unload(bool crash)
{
struct vmbus_channel_message_header hdr;
+ if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
+ return;
+
/* Pre-Win2012R2 hosts don't support reconnect */
if (vmbus_proto_version < VERSION_WIN8_1)
return;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 6098e0cbdb4b..533c8b82b344 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -184,11 +184,7 @@ void hv_synic_enable_regs(unsigned int cpu)
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
- if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
- shared_sint.auto_eoi = false;
- else
- shared_sint.auto_eoi = true;
-
+ shared_sint.auto_eoi = hv_recommend_using_aeoi();
hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index a02ce43d778d..32e3bc0aa665 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -533,7 +533,6 @@ struct hv_dynmem_device {
* State to synchronize hot-add.
*/
struct completion ol_waitevent;
- bool ha_waiting;
/*
* This thread handles hot-add
* requests from the host as well as notifying
@@ -634,10 +633,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case MEM_ONLINE:
case MEM_CANCEL_ONLINE:
- if (dm_device.ha_waiting) {
- dm_device.ha_waiting = false;
- complete(&dm_device.ol_waitevent);
- }
+ complete(&dm_device.ol_waitevent);
break;
case MEM_OFFLINE:
@@ -726,8 +722,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
has->covered_end_pfn += processed_pfn;
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
- init_completion(&dm_device.ol_waitevent);
- dm_device.ha_waiting = !memhp_auto_online;
+ reinit_completion(&dm_device.ol_waitevent);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
@@ -753,15 +748,14 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}
/*
- * Wait for the memory block to be onlined when memory onlining
- * is done outside of kernel (memhp_auto_online). Since the hot
- * add has succeeded, it is ok to proceed even if the pages in
- * the hot added region have not been "onlined" within the
- * allowed time.
+ * Wait for memory to get onlined. If the kernel onlined the
+ * memory when adding it, this will return directly. Otherwise,
+ * it will wait for user space to online the memory. This helps
+ * to avoid adding memory faster than it is getting onlined. As
+ * adding succeeded, it is ok to proceed even if the memory was
+ * not onlined in time.
*/
- if (dm_device.ha_waiting)
- wait_for_completion_timeout(&dm_device.ol_waitevent,
- 5*HZ);
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
post_status(&dm_device);
}
}
@@ -1706,6 +1700,7 @@ static int balloon_probe(struct hv_device *dev,
#ifdef CONFIG_MEMORY_HOTPLUG
set_online_page_callback(&hv_online_page);
+ init_completion(&dm_device.ol_waitevent);
register_memory_notifier(&hv_memory_nb);
#endif
diff --git a/drivers/hv/hv_debugfs.c b/drivers/hv/hv_debugfs.c
index 8a2878573582..ccf752b6659a 100644
--- a/drivers/hv/hv_debugfs.c
+++ b/drivers/hv/hv_debugfs.c
@@ -11,7 +11,7 @@
#include "hyperv_vmbus.h"
-struct dentry *hv_debug_root;
+static struct dentry *hv_debug_root;
static int hv_debugfs_delay_get(void *data, u64 *val)
{
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index e70783e33680..f9d14db980cb 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -286,8 +286,8 @@ TRACE_EVENT(vmbus_send_tl_connect_request,
__field(int, ret)
),
TP_fast_assign(
- memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
- memcpy(__entry->host_id, &msg->host_service_id.b, 16);
+ export_guid(__entry->guest_id, &msg->guest_endpoint_id);
+ export_guid(__entry->host_id, &msg->host_service_id);
__entry->ret = ret;
),
TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index f5fa3b3c9baf..70b30e223a57 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -292,7 +292,7 @@ struct vmbus_msginfo {
struct list_head msglist_entry;
/* The message itself */
- unsigned char msg[0];
+ unsigned char msg[];
};
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 029378c27421..e06c6b9555cf 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -31,6 +31,7 @@
#include <linux/kdebug.h>
#include <linux/efi.h>
#include <linux/random.h>
+#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -48,14 +49,35 @@ static int hyperv_cpuhp_online;
static void *hv_panic_page;
+/*
+ * Boolean to control whether to report panic messages over Hyper-V.
+ *
+ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ */
+static int sysctl_record_panic_msg = 1;
+
+static int hyperv_report_reg(void)
+{
+ return !sysctl_record_panic_msg || !hv_panic_page;
+}
+
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
void *args)
{
struct pt_regs *regs;
- regs = current_pt_regs();
+ vmbus_initiate_unload(true);
- hyperv_report_panic(regs, val);
+ /*
+ * Hyper-V should be notified only once about a panic. If we will be
+ * doing hyperv_report_panic_msg() later with kmsg data, don't do
+ * the notification here.
+ */
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
+ && hyperv_report_reg()) {
+ regs = current_pt_regs();
+ hyperv_report_panic(regs, val, false);
+ }
return NOTIFY_DONE;
}
@@ -65,7 +87,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
- hyperv_report_panic(regs, val);
+ /*
+ * Hyper-V should be notified only once about a panic. If we will be
+ * doing hyperv_report_panic_msg() later with kmsg data, don't do
+ * the notification here.
+ */
+ if (hyperv_report_reg())
+ hyperv_report_panic(regs, val, true);
return NOTIFY_DONE;
}
@@ -950,6 +978,9 @@ static int vmbus_resume(struct device *child_device)
return drv->resume(dev);
}
+#else
+#define vmbus_suspend NULL
+#define vmbus_resume NULL
#endif /* CONFIG_PM_SLEEP */
/*
@@ -969,11 +1000,22 @@ static void vmbus_device_release(struct device *device)
}
/*
- * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
- * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
+ * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
+ *
+ * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
+ * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
+ * is no way to wake up a Generation-2 VM.
+ *
+ * The other 4 ops are for hibernation.
*/
+
static const struct dev_pm_ops vmbus_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
+ .suspend_noirq = NULL,
+ .resume_noirq = NULL,
+ .freeze_noirq = vmbus_suspend,
+ .thaw_noirq = vmbus_resume,
+ .poweroff_noirq = vmbus_suspend,
+ .restore_noirq = vmbus_resume,
};
/* The one and only one */
@@ -1253,13 +1295,6 @@ static void vmbus_isr(void)
}
/*
- * Boolean to control whether to report panic messages over Hyper-V.
- *
- * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
- */
-static int sysctl_record_panic_msg = 1;
-
-/*
* Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
* buffer and call into Hyper-V to transfer the data.
*/
@@ -1382,19 +1417,29 @@ static int vmbus_bus_init(void)
hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
if (hv_panic_page) {
ret = kmsg_dump_register(&hv_kmsg_dumper);
- if (ret)
+ if (ret) {
pr_err("Hyper-V: kmsg dump register "
"error 0x%x\n", ret);
+ hv_free_hyperv_page(
+ (unsigned long)hv_panic_page);
+ hv_panic_page = NULL;
+ }
} else
pr_err("Hyper-V: panic message page memory "
"allocation failed");
}
register_die_notifier(&hyperv_die_block);
- atomic_notifier_chain_register(&panic_notifier_list,
- &hyperv_panic_block);
}
+ /*
+ * Always register the panic notifier because we need to unload
+ * the VMbus channel connection to prevent any VMbus
+ * activity after the VM panics.
+ */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &hyperv_panic_block);
+
vmbus_request_offers();
return 0;
@@ -1407,7 +1452,6 @@ err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
- hv_free_hyperv_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
return ret;
@@ -2204,8 +2248,6 @@ static int vmbus_bus_suspend(struct device *dev)
vmbus_initiate_unload(false);
- vmbus_connection.conn_state = DISCONNECTED;
-
/* Reset the event for the next resume. */
reinit_completion(&vmbus_connection.ready_for_resume_event);
@@ -2253,6 +2295,9 @@ static int vmbus_bus_resume(struct device *dev)
return 0;
}
+#else
+#define vmbus_bus_suspend NULL
+#define vmbus_bus_resume NULL
#endif /* CONFIG_PM_SLEEP */
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
@@ -2263,16 +2308,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
/*
- * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
- * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
- * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
- * pci "noirq" restore callback runs before "non-noirq" callbacks (see
+ * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
+ * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
+ * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
* resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
* dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
- * resume callback must also run via the "noirq" callbacks.
+ * resume callback must also run via the "noirq" ops.
+ *
+ * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
+ * earlier in this file before vmbus_pm.
*/
+
static const struct dev_pm_ops vmbus_bus_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
+ .suspend_noirq = NULL,
+ .resume_noirq = NULL,
+ .freeze_noirq = vmbus_bus_suspend,
+ .thaw_noirq = vmbus_bus_resume,
+ .poweroff_noirq = vmbus_bus_suspend,
+ .restore_noirq = vmbus_bus_resume
};
static struct acpi_driver vmbus_acpi_driver = {
@@ -2289,7 +2342,6 @@ static void hv_kexec_handler(void)
{
hv_stimer_global_cleanup();
vmbus_initiate_unload(false);
- vmbus_connection.conn_state = DISCONNECTED;
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
@@ -2306,7 +2358,6 @@ static void hv_crash_handler(struct pt_regs *regs)
* doing the cleanup for current CPU only. This should be sufficient
* for kdump.
*/
- vmbus_connection.conn_state = DISCONNECTED;
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 47ac20aee06f..4c62f900bf7e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -280,6 +280,15 @@ config SENSORS_ASC7621
This driver can also be built as a module. If so, the module
will be called asc7621.
+config SENSORS_AXI_FAN_CONTROL
+ tristate "Analog Devices FAN Control HDL Core driver"
+ help
+ If you say yes here you get support for the Analog Devices
+ AXI HDL FAN monitoring core.
+
+ This driver can also be built as a module. If so, the module
+ will be called axi-fan-control
+
config SENSORS_K8TEMP
tristate "AMD Athlon64/FX or Opteron temperature sensor"
depends on X86 && PCI
@@ -403,7 +412,7 @@ config SENSORS_DRIVETEMP
hard disk drives.
This driver can also be built as a module. If so, the module
- will be called satatemp.
+ will be called drivetemp.
config SENSORS_DS620
tristate "Dallas Semiconductor DS620"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 613f50987965..b0b9c8e57176 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_SENSORS_AS370) += as370-hwmon.o
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
+obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 01c2eeb02aa9..054080443b47 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -19,6 +19,7 @@
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/jiffies.h>
+#include <linux/of.h>
#include <linux/util_macros.h>
/* Indexes for the sysfs hooks */
@@ -193,6 +194,7 @@ struct adt7475_data {
unsigned long measure_updated;
bool valid;
+ u8 config2;
u8 config4;
u8 config5;
u8 has_voltage;
@@ -1458,6 +1460,85 @@ static int adt7475_update_limits(struct i2c_client *client)
return 0;
}
+static int set_property_bit(const struct i2c_client *client, char *property,
+ u8 *config, u8 bit_index)
+{
+ u32 prop_value = 0;
+ int ret = of_property_read_u32(client->dev.of_node, property,
+ &prop_value);
+
+ if (!ret) {
+ if (prop_value)
+ *config |= (1 << bit_index);
+ else
+ *config &= ~(1 << bit_index);
+ }
+
+ return ret;
+}
+
+static int load_attenuators(const struct i2c_client *client, int chip,
+ struct adt7475_data *data)
+{
+ int ret;
+
+ if (chip == adt7476 || chip == adt7490) {
+ set_property_bit(client, "adi,bypass-attenuator-in0",
+ &data->config4, 4);
+ set_property_bit(client, "adi,bypass-attenuator-in1",
+ &data->config4, 5);
+ set_property_bit(client, "adi,bypass-attenuator-in3",
+ &data->config4, 6);
+ set_property_bit(client, "adi,bypass-attenuator-in4",
+ &data->config4, 7);
+
+ ret = i2c_smbus_write_byte_data(client, REG_CONFIG4,
+ data->config4);
+ if (ret < 0)
+ return ret;
+ } else if (chip == adt7473 || chip == adt7475) {
+ set_property_bit(client, "adi,bypass-attenuator-in1",
+ &data->config2, 5);
+
+ ret = i2c_smbus_write_byte_data(client, REG_CONFIG2,
+ data->config2);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adt7475_set_pwm_polarity(struct i2c_client *client)
+{
+ u32 states[ADT7475_PWM_COUNT];
+ int ret, i;
+ u8 val;
+
+ ret = of_property_read_u32_array(client->dev.of_node,
+ "adi,pwm-active-state", states,
+ ARRAY_SIZE(states));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ ret = adt7475_read(PWM_CONFIG_REG(i));
+ if (ret < 0)
+ return ret;
+ val = ret;
+ if (states[i])
+ val &= ~BIT(4);
+ else
+ val |= BIT(4);
+
+ ret = i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(i), val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int adt7475_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1472,7 +1553,7 @@ static int adt7475_probe(struct i2c_client *client,
struct adt7475_data *data;
struct device *hwmon_dev;
int i, ret = 0, revision, group_num = 0;
- u8 config2, config3;
+ u8 config3;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
@@ -1546,8 +1627,12 @@ static int adt7475_probe(struct i2c_client *client,
}
/* Voltage attenuators can be bypassed, globally or individually */
- config2 = adt7475_read(REG_CONFIG2);
- if (config2 & CONFIG2_ATTN) {
+ data->config2 = adt7475_read(REG_CONFIG2);
+ ret = load_attenuators(client, chip, data);
+ if (ret)
+ dev_warn(&client->dev, "Error configuring attenuator bypass\n");
+
+ if (data->config2 & CONFIG2_ATTN) {
data->bypass_attn = (0x3 << 3) | 0x3;
} else {
data->bypass_attn = ((data->config4 & CONFIG4_ATTN_IN10) >> 4) |
@@ -1562,6 +1647,10 @@ static int adt7475_probe(struct i2c_client *client,
for (i = 0; i < ADT7475_PWM_COUNT; i++)
adt7475_read_pwm(client, i);
+ ret = adt7475_set_pwm_polarity(client);
+ if (ret && ret != -EINVAL)
+ dev_warn(&client->dev, "Error configuring pwm polarity\n");
+
/* Start monitoring */
switch (chip) {
case adt7475:
diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
new file mode 100644
index 000000000000..38d9cdb3db1a
--- /dev/null
+++ b/drivers/hwmon/axi-fan-control.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fan Control HDL CORE driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/fpga/adi-axi-common.h>
+#include <linux/hwmon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
+/* register map */
+#define ADI_REG_RSTN 0x0080
+#define ADI_REG_PWM_WIDTH 0x0084
+#define ADI_REG_TACH_PERIOD 0x0088
+#define ADI_REG_TACH_TOLERANCE 0x008c
+#define ADI_REG_PWM_PERIOD 0x00c0
+#define ADI_REG_TACH_MEASUR 0x00c4
+#define ADI_REG_TEMPERATURE 0x00c8
+
+#define ADI_REG_IRQ_MASK 0x0040
+#define ADI_REG_IRQ_PENDING 0x0044
+#define ADI_REG_IRQ_SRC 0x0048
+
+/* IRQ sources */
+#define ADI_IRQ_SRC_PWM_CHANGED BIT(0)
+#define ADI_IRQ_SRC_TACH_ERR BIT(1)
+#define ADI_IRQ_SRC_TEMP_INCREASE BIT(2)
+#define ADI_IRQ_SRC_NEW_MEASUR BIT(3)
+#define ADI_IRQ_SRC_MASK GENMASK(3, 0)
+#define ADI_IRQ_MASK_OUT_ALL 0xFFFFFFFFU
+
+#define SYSFS_PWM_MAX 255
+
+struct axi_fan_control_data {
+ void __iomem *base;
+ struct device *hdev;
+ unsigned long clk_rate;
+ int irq;
+ /* pulses per revolution */
+ u32 ppr;
+ bool hw_pwm_req;
+ bool update_tacho_params;
+ u8 fan_fault;
+};
+
+static inline void axi_iowrite(const u32 val, const u32 reg,
+ const struct axi_fan_control_data *ctl)
+{
+ iowrite32(val, ctl->base + reg);
+}
+
+static inline u32 axi_ioread(const u32 reg,
+ const struct axi_fan_control_data *ctl)
+{
+ return ioread32(ctl->base + reg);
+}
+
+static long axi_fan_control_get_pwm_duty(const struct axi_fan_control_data *ctl)
+{
+ u32 pwm_width = axi_ioread(ADI_REG_PWM_WIDTH, ctl);
+ u32 pwm_period = axi_ioread(ADI_REG_PWM_PERIOD, ctl);
+ /*
+ * PWM_PERIOD is a RO register set by the core. It should never be 0.
+ * For now we are trusting the HW...
+ */
+ return DIV_ROUND_CLOSEST(pwm_width * SYSFS_PWM_MAX, pwm_period);
+}
+
+static int axi_fan_control_set_pwm_duty(const long val,
+ struct axi_fan_control_data *ctl)
+{
+ u32 pwm_period = axi_ioread(ADI_REG_PWM_PERIOD, ctl);
+ u32 new_width;
+ long __val = clamp_val(val, 0, SYSFS_PWM_MAX);
+
+ new_width = DIV_ROUND_CLOSEST(__val * pwm_period, SYSFS_PWM_MAX);
+
+ axi_iowrite(new_width, ADI_REG_PWM_WIDTH, ctl);
+
+ return 0;
+}
+
+static long axi_fan_control_get_fan_rpm(const struct axi_fan_control_data *ctl)
+{
+ const u32 tach = axi_ioread(ADI_REG_TACH_MEASUR, ctl);
+
+ if (tach == 0)
+ /* should we return error, EAGAIN maybe? */
+ return 0;
+ /*
+ * The tacho period should be:
+ * TACH = 60/(ppr * rpm), where rpm is revolutions per second
+ * and ppr is pulses per revolution.
+ * Given the tacho period, we can multiply it by the input clock
+ * so that we know how many clocks we need to have this period.
+ * From this, we can derive the RPM value.
+ */
+ return DIV_ROUND_CLOSEST(60 * ctl->clk_rate, ctl->ppr * tach);
+}
+
+static int axi_fan_control_read_temp(struct device *dev, u32 attr, long *val)
+{
+ struct axi_fan_control_data *ctl = dev_get_drvdata(dev);
+ long raw_temp;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ raw_temp = axi_ioread(ADI_REG_TEMPERATURE, ctl);
+ /*
+ * The formula for the temperature is:
+ * T = (ADC * 501.3743 / 2^bits) - 273.6777
+ * It's multiplied by 1000 to have millidegrees as
+ * specified by the hwmon sysfs interface.
+ */
+ *val = ((raw_temp * 501374) >> 16) - 273677;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int axi_fan_control_read_fan(struct device *dev, u32 attr, long *val)
+{
+ struct axi_fan_control_data *ctl = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_fan_fault:
+ *val = ctl->fan_fault;
+ /* clear it now */
+ ctl->fan_fault = 0;
+ return 0;
+ case hwmon_fan_input:
+ *val = axi_fan_control_get_fan_rpm(ctl);
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int axi_fan_control_read_pwm(struct device *dev, u32 attr, long *val)
+{
+ struct axi_fan_control_data *ctl = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = axi_fan_control_get_pwm_duty(ctl);
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int axi_fan_control_write_pwm(struct device *dev, u32 attr, long val)
+{
+ struct axi_fan_control_data *ctl = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return axi_fan_control_set_pwm_duty(val, ctl);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int axi_fan_control_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_fan:
+ *str = "FAN";
+ return 0;
+ case hwmon_temp:
+ *str = "SYSMON4";
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int axi_fan_control_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return axi_fan_control_read_fan(dev, attr, val);
+ case hwmon_pwm:
+ return axi_fan_control_read_pwm(dev, attr, val);
+ case hwmon_temp:
+ return axi_fan_control_read_temp(dev, attr, val);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int axi_fan_control_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return axi_fan_control_write_pwm(dev, attr, val);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static umode_t axi_fan_control_fan_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ case hwmon_fan_label:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static umode_t axi_fan_control_pwm_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t axi_fan_control_temp_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static umode_t axi_fan_control_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return axi_fan_control_fan_is_visible(attr);
+ case hwmon_pwm:
+ return axi_fan_control_pwm_is_visible(attr);
+ case hwmon_temp:
+ return axi_fan_control_temp_is_visible(attr);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * This core has two main ways of changing the PWM duty cycle. It is done,
+ * either by a request from userspace (writing on pwm1_input) or by the
+ * core itself. When the change is done by the core, it will use predefined
+ * parameters to evaluate the tach signal and, on that case we cannot set them.
+ * On the other hand, when the request is done by the user, with some arbitrary
+ * value that the core does not now about, we have to provide the tach
+ * parameters so that, the core can evaluate the signal. On the IRQ handler we
+ * distinguish this by using the ADI_IRQ_SRC_TEMP_INCREASE interrupt. This tell
+ * us that the CORE requested a new duty cycle. After this, there is 5s delay
+ * on which the core waits for the fan rotation speed to stabilize. After this
+ * we get ADI_IRQ_SRC_PWM_CHANGED irq where we will decide if we need to set
+ * the tach parameters or not on the next tach measurement cycle (corresponding
+ * already to the ney duty cycle) based on the %ctl->hw_pwm_req flag.
+ */
+static irqreturn_t axi_fan_control_irq_handler(int irq, void *data)
+{
+ struct axi_fan_control_data *ctl = (struct axi_fan_control_data *)data;
+ u32 irq_pending = axi_ioread(ADI_REG_IRQ_PENDING, ctl);
+ u32 clear_mask;
+
+ if (irq_pending & ADI_IRQ_SRC_NEW_MEASUR) {
+ if (ctl->update_tacho_params) {
+ u32 new_tach = axi_ioread(ADI_REG_TACH_MEASUR, ctl);
+
+ /* get 25% tolerance */
+ u32 tach_tol = DIV_ROUND_CLOSEST(new_tach * 25, 100);
+ /* set new tacho parameters */
+ axi_iowrite(new_tach, ADI_REG_TACH_PERIOD, ctl);
+ axi_iowrite(tach_tol, ADI_REG_TACH_TOLERANCE, ctl);
+ ctl->update_tacho_params = false;
+ }
+ }
+
+ if (irq_pending & ADI_IRQ_SRC_PWM_CHANGED) {
+ /*
+ * if the pwm changes on behalf of software,
+ * we need to provide new tacho parameters to the core.
+ * Wait for the next measurement for that...
+ */
+ if (!ctl->hw_pwm_req) {
+ ctl->update_tacho_params = true;
+ } else {
+ ctl->hw_pwm_req = false;
+ sysfs_notify(&ctl->hdev->kobj, NULL, "pwm1");
+ }
+ }
+
+ if (irq_pending & ADI_IRQ_SRC_TEMP_INCREASE)
+ /* hardware requested a new pwm */
+ ctl->hw_pwm_req = true;
+
+ if (irq_pending & ADI_IRQ_SRC_TACH_ERR)
+ ctl->fan_fault = 1;
+
+ /* clear all interrupts */
+ clear_mask = irq_pending & ADI_IRQ_SRC_MASK;
+ axi_iowrite(clear_mask, ADI_REG_IRQ_PENDING, ctl);
+
+ return IRQ_HANDLED;
+}
+
+static int axi_fan_control_init(struct axi_fan_control_data *ctl,
+ const struct device_node *np)
+{
+ int ret;
+
+ /* get fan pulses per revolution */
+ ret = of_property_read_u32(np, "pulses-per-revolution", &ctl->ppr);
+ if (ret)
+ return ret;
+
+ /* 1, 2 and 4 are the typical and accepted values */
+ if (ctl->ppr != 1 && ctl->ppr != 2 && ctl->ppr != 4)
+ return -EINVAL;
+ /*
+ * Enable all IRQs
+ */
+ axi_iowrite(ADI_IRQ_MASK_OUT_ALL &
+ ~(ADI_IRQ_SRC_NEW_MEASUR | ADI_IRQ_SRC_TACH_ERR |
+ ADI_IRQ_SRC_PWM_CHANGED | ADI_IRQ_SRC_TEMP_INCREASE),
+ ADI_REG_IRQ_MASK, ctl);
+
+ /* bring the device out of reset */
+ axi_iowrite(0x01, ADI_REG_RSTN, ctl);
+
+ return ret;
+}
+
+static const struct hwmon_channel_info *axi_fan_control_info[] = {
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops axi_fan_control_hwmon_ops = {
+ .is_visible = axi_fan_control_is_visible,
+ .read = axi_fan_control_read,
+ .write = axi_fan_control_write,
+ .read_string = axi_fan_control_read_labels,
+};
+
+static const struct hwmon_chip_info axi_chip_info = {
+ .ops = &axi_fan_control_hwmon_ops,
+ .info = axi_fan_control_info,
+};
+
+static const u32 version_1_0_0 = ADI_AXI_PCORE_VER(1, 0, 'a');
+
+static const struct of_device_id axi_fan_control_of_match[] = {
+ { .compatible = "adi,axi-fan-control-1.00.a",
+ .data = (void *)&version_1_0_0},
+ {},
+};
+MODULE_DEVICE_TABLE(of, axi_fan_control_of_match);
+
+static int axi_fan_control_probe(struct platform_device *pdev)
+{
+ struct axi_fan_control_data *ctl;
+ struct clk *clk;
+ const struct of_device_id *id;
+ const char *name = "axi_fan_control";
+ u32 version;
+ int ret;
+
+ id = of_match_node(axi_fan_control_of_match, pdev->dev.of_node);
+ if (!id)
+ return -EINVAL;
+
+ ctl = devm_kzalloc(&pdev->dev, sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctl->base))
+ return PTR_ERR(ctl->base);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "clk_get failed with %ld\n", PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ ctl->clk_rate = clk_get_rate(clk);
+ if (!ctl->clk_rate)
+ return -EINVAL;
+
+ version = axi_ioread(ADI_AXI_REG_VERSION, ctl);
+ if (ADI_AXI_PCORE_VER_MAJOR(version) !=
+ ADI_AXI_PCORE_VER_MAJOR((*(u32 *)id->data))) {
+ dev_err(&pdev->dev, "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+ ADI_AXI_PCORE_VER_MAJOR((*(u32 *)id->data)),
+ ADI_AXI_PCORE_VER_MINOR((*(u32 *)id->data)),
+ ADI_AXI_PCORE_VER_PATCH((*(u32 *)id->data)),
+ ADI_AXI_PCORE_VER_MAJOR(version),
+ ADI_AXI_PCORE_VER_MINOR(version),
+ ADI_AXI_PCORE_VER_PATCH(version));
+ return -ENODEV;
+ }
+
+ ctl->irq = platform_get_irq(pdev, 0);
+ if (ctl->irq < 0)
+ return ctl->irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, ctl->irq, NULL,
+ axi_fan_control_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ pdev->driver_override, ctl);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request an irq, %d", ret);
+ return ret;
+ }
+
+ ret = axi_fan_control_init(ctl, pdev->dev.of_node);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device\n");
+ return ret;
+ }
+
+ ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
+ name,
+ ctl,
+ &axi_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(ctl->hdev);
+}
+
+static struct platform_driver axi_fan_control_driver = {
+ .driver = {
+ .name = "axi_fan_control_driver",
+ .of_match_table = axi_fan_control_of_match,
+ },
+ .probe = axi_fan_control_probe,
+};
+module_platform_driver(axi_fan_control_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices Fan Control HDL CORE driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index d855c78fb8be..bb9211215a68 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -709,7 +709,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
return 0;
}
static const struct x86_cpu_id __initconst coretemp_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
+ X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_DTHERM, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 53b517dbe7e6..4af2fc309c28 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev,
int channel = to_sensor_dev_attr(devattr)->index;
int ret;
- mutex_lock(&hwmon->hwmon_lock);
+ mutex_lock(&hwmon->da9052->auxadc_lock);
ret = __da9052_read_tsi(dev, channel);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->da9052->auxadc_lock);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index d4c83009d625..ab719d372b0d 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -7,7 +7,7 @@
* Hwmon integration:
* Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2013, 2014 Guenter Roeck <linux@roeck-us.net>
- * Copyright (C) 2014, 2015 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2014, 2015 Pali Rohár <pali@kernel.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -86,7 +86,7 @@ static unsigned int auto_fan;
#define I8K_HWMON_HAVE_FAN3 (1 << 12)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Dell laptop SMM BIOS hwmon driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("i8k");
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 370d0c74eb01..0d4f3d97ffc6 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -264,12 +264,18 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
return err;
switch (attr) {
case hwmon_temp_input:
+ if (!temp_is_valid(buf[SCT_STATUS_TEMP]))
+ return -ENODATA;
*val = temp_from_sct(buf[SCT_STATUS_TEMP]);
break;
case hwmon_temp_lowest:
+ if (!temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]))
+ return -ENODATA;
*val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
break;
case hwmon_temp_highest:
+ if (!temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]))
+ return -ENODATA;
*val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
break;
default:
@@ -340,7 +346,7 @@ static int drivetemp_identify_sata(struct drivetemp_data *st)
st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
if (!have_sct_data_table)
- goto skip_sct;
+ goto skip_sct_data;
/* Request and read temperature history table */
memset(buf, '\0', sizeof(st->smartdata));
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index d05ab713566d..a4ec85207782 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -219,7 +219,7 @@ struct aem_read_sensor_req {
struct aem_read_sensor_resp {
struct aem_iana_id id;
- u8 bytes[0];
+ u8 bytes[];
} __packed;
/* Data structures to talk to the IPMI layer */
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 0e525cfbdfc5..a750647e66a4 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -186,7 +186,7 @@ static void make_sensor_label(struct device_node *np,
u32 id;
size_t n;
- n = snprintf(sdata->label, sizeof(sdata->label), "%s", label);
+ n = scnprintf(sdata->label, sizeof(sdata->label), "%s", label);
/*
* Core temp pretty print
@@ -199,11 +199,11 @@ static void make_sensor_label(struct device_node *np,
* The digital thermal sensors are associated
* with a core.
*/
- n += snprintf(sdata->label + n,
+ n += scnprintf(sdata->label + n,
sizeof(sdata->label) - n, " %d",
cpuid);
else
- n += snprintf(sdata->label + n,
+ n += scnprintf(sdata->label + n,
sizeof(sdata->label) - n, " phy%d", id);
}
@@ -211,7 +211,7 @@ static void make_sensor_label(struct device_node *np,
* Membuffer pretty print
*/
if (!of_property_read_u32(np, "ibm,chip-id", &id))
- n += snprintf(sdata->label + n, sizeof(sdata->label) - n,
+ n += scnprintf(sdata->label + n, sizeof(sdata->label) - n,
" %d", id & 0xffff);
}
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index f2d81b0558e5..e3f1ebee7130 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
data->config = config;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
data, &jc42_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index e39354ffe973..9915578533bb 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -96,13 +96,20 @@ struct k10temp_data {
void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
int temp_offset;
u32 temp_adjust_mask;
- bool show_tdie;
- u32 show_tccd;
+ u32 show_temp;
u32 svi_addr[2];
+ bool is_zen;
bool show_current;
int cfactor[2];
};
+#define TCTL_BIT 0
+#define TDIE_BIT 1
+#define TCCD_BIT(x) ((x) + 2)
+
+#define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel))
+#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT)
+
struct tctl_offset {
u8 model;
char const *id;
@@ -179,9 +186,9 @@ static long get_raw_temp(struct k10temp_data *data)
return temp;
}
-const char *k10temp_temp_label[] = {
- "Tdie",
+static const char *k10temp_temp_label[] = {
"Tctl",
+ "Tdie",
"Tccd1",
"Tccd2",
"Tccd3",
@@ -192,12 +199,12 @@ const char *k10temp_temp_label[] = {
"Tccd8",
};
-const char *k10temp_in_label[] = {
+static const char *k10temp_in_label[] = {
"Vcore",
"Vsoc",
};
-const char *k10temp_curr_label[] = {
+static const char *k10temp_curr_label[] = {
"Icore",
"Isoc",
};
@@ -269,13 +276,13 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
switch (attr) {
case hwmon_temp_input:
switch (channel) {
- case 0: /* Tdie */
- *val = get_raw_temp(data) - data->temp_offset;
+ case 0: /* Tctl */
+ *val = get_raw_temp(data);
if (*val < 0)
*val = 0;
break;
- case 1: /* Tctl */
- *val = get_raw_temp(data);
+ case 1: /* Tdie */
+ *val = get_raw_temp(data) - data->temp_offset;
if (*val < 0)
*val = 0;
break;
@@ -333,23 +340,11 @@ static umode_t k10temp_is_visible(const void *_data,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- switch (channel) {
- case 0: /* Tdie, or Tctl if we don't show it */
- break;
- case 1: /* Tctl */
- if (!data->show_tdie)
- return 0;
- break;
- case 2 ... 9: /* Tccd{1-8} */
- if (!(data->show_tccd & BIT(channel - 2)))
- return 0;
- break;
- default:
+ if (!HAVE_TEMP(data, channel))
return 0;
- }
break;
case hwmon_temp_max:
- if (channel || data->show_tdie)
+ if (channel || data->is_zen)
return 0;
break;
case hwmon_temp_crit:
@@ -368,20 +363,9 @@ static umode_t k10temp_is_visible(const void *_data,
return 0;
break;
case hwmon_temp_label:
- /* No labels if we don't show the die temperature */
- if (!data->show_tdie)
- return 0;
- switch (channel) {
- case 0: /* Tdie */
- case 1: /* Tctl */
- break;
- case 2 ... 9: /* Tccd{1-8} */
- if (!(data->show_tccd & BIT(channel - 2)))
- return 0;
- break;
- default:
+ /* Show temperature labels only on Zen CPUs */
+ if (!data->is_zen || !HAVE_TEMP(data, channel))
return 0;
- }
break;
default:
return 0;
@@ -480,7 +464,7 @@ static void k10temp_init_debugfs(struct k10temp_data *data)
char name[32];
/* Only show debugfs data for Family 17h/18h CPUs */
- if (!data->show_tdie)
+ if (!data->is_zen)
return;
scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
@@ -546,7 +530,7 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev,
amd_smn_read(amd_pci_dev_to_node_id(pdev),
F17H_M70H_CCD_TEMP(i), &regval);
if (regval & F17H_M70H_CCD_TEMP_VALID)
- data->show_tccd |= BIT(i);
+ data->show_temp |= BIT(TCCD_BIT(i));
}
}
@@ -573,6 +557,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
data->pdev = pdev;
+ data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */
if (boot_cpu_data.x86 == 0x15 &&
((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
@@ -582,7 +567,8 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
data->read_tempreg = read_tempreg_nb_f17;
- data->show_tdie = true;
+ data->show_temp |= BIT(TDIE_BIT); /* show Tdie */
+ data->is_zen = true;
switch (boot_cpu_data.x86_model) {
case 0x1: /* Zen */
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 1eeb9d7de2a0..733c48bf6c98 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -262,10 +262,20 @@ static int lm73_detect(struct i2c_client *new_client,
return 0;
}
+static const struct of_device_id lm73_of_match[] = {
+ {
+ .compatible = "ti,lm73",
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, lm73_of_match);
+
static struct i2c_driver lm73_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm73",
+ .of_match_table = lm73_of_match,
},
.probe = lm73_probe,
.id_table = lm73_ids,
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 281c81edabc6..a7eb10d2a053 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -7,6 +7,11 @@
*
* Copyright (c) 2019 Advantech
* Author: Amy.Shih <amy.shih@advantech.com.tw>
+ *
+ * Supports the following chips:
+ *
+ * Chip #vin #fan #pwm #temp #dts chip ID
+ * nct7904d 20 12 4 5 8 0xc5
*/
#include <linux/module.h>
@@ -36,6 +41,7 @@
#define FANCTL_MAX 4 /* Counted from 1 */
#define TCPU_MAX 8 /* Counted from 1 */
#define TEMP_MAX 4 /* Counted from 1 */
+#define SMI_STS_MAX 10 /* Counted from 1 */
#define VT_ADC_CTRL0_REG 0x20 /* Bank 0 */
#define VT_ADC_CTRL1_REG 0x21 /* Bank 0 */
@@ -356,6 +362,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
unsigned int reg1, reg2, reg3;
+ s8 temps;
switch (attr) {
case hwmon_temp_input:
@@ -461,7 +468,8 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
if (ret < 0)
return ret;
- *val = ret * 1000;
+ temps = ret;
+ *val = temps * 1000;
return 0;
}
@@ -820,6 +828,10 @@ static const struct hwmon_channel_info *nct7904_info[] = {
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
@@ -853,6 +865,18 @@ static const struct hwmon_channel_info *nct7904_info[] = {
HWMON_T_CRIT_HYST,
HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
HWMON_T_CRIT_HYST),
NULL
};
@@ -988,6 +1012,13 @@ static int nct7904_probe(struct i2c_client *client,
data->fan_mode[i] = ret;
}
+ /* Read all of SMI status register to clear alarms */
+ for (i = 0; i < SMI_STS_MAX; i++) {
+ ret = nct7904_read_reg(data, BANK_0, SMI_STS1_REG + i);
+ if (ret < 0)
+ return ret;
+ }
+
hwmon_dev =
devm_hwmon_device_register_with_info(dev, client->name, data,
&nct7904_chip_info, NULL);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index a9ea06204767..de12a565006d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -92,10 +92,10 @@ config SENSORS_IRPS5401
be called irps5401.
config SENSORS_ISL68137
- tristate "Intersil ISL68137"
+ tristate "Renesas Digital Multiphase Voltage Regulators"
help
- If you say yes here you get hardware monitoring support for Intersil
- ISL68137.
+ If you say yes here you get hardware monitoring support for Renesas
+ digital multiphase voltage regulators.
This driver can also be built as a module. If so, the module will
be called isl68137.
@@ -113,8 +113,8 @@ config SENSORS_LTC2978
tristate "Linear Technologies LTC2978 and compatibles"
help
If you say yes here you get hardware monitoring support for Linear
- Technology LTC2974, LTC2975, LTC2977, LTC2978, LTC2980, LTC3880,
- LTC3883, LTC3886, LTC3887, LTCM2987, LTM4675, and LTM4676.
+ Technology LTC2972, LTC2974, LTC2975, LTC2977, LTC2978, LTC2979,
+ LTC2980, and LTM2987.
This driver can also be built as a module. If so, the module will
be called ltc2978.
@@ -123,9 +123,10 @@ config SENSORS_LTC2978_REGULATOR
bool "Regulator support for LTC2978 and compatibles"
depends on SENSORS_LTC2978 && REGULATOR
help
- If you say yes here you get regulator support for Linear
- Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, LTM4676
- and LTM4686.
+ If you say yes here you get regulator support for Linear Technology
+ LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7880,
+ LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680, LTM4686,
+ and LTM4700.
config SENSORS_LTC3815
tristate "Linear Technologies LTC3815"
@@ -209,10 +210,10 @@ config SENSORS_TPS40422
be called tps40422.
config SENSORS_TPS53679
- tristate "TI TPS53679, TPS53688"
+ tristate "TI TPS53647, TPS53667, TPS53679, TPS53681, TPS53688"
help
If you say yes here you get hardware monitoring support for TI
- TPS53679, TPS53688
+ TPS53647, TPS53667, TPS53679, TPS53681, and TPS53688.
This driver can also be built as a module. If so, the module will
be called tps53679.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 5caa37fbfc18..e25f541227da 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -226,7 +226,8 @@ static int adm1275_write_pmon_config(const struct adm1275_data *data,
return ret;
}
-static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
+static int adm1275_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
const struct adm1275_data *data = to_adm1275_data(info);
@@ -239,58 +240,68 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_IOUT_UC_FAULT_LIMIT:
if (!data->have_uc_fault)
return -ENXIO;
- ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1275_IOUT_WARN2_LIMIT);
break;
case PMBUS_IOUT_OC_FAULT_LIMIT:
if (!data->have_oc_fault)
return -ENXIO;
- ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1275_IOUT_WARN2_LIMIT);
break;
case PMBUS_VOUT_OV_WARN_LIMIT:
if (data->have_vout)
return -ENODATA;
- ret = pmbus_read_word_data(client, 0,
+ ret = pmbus_read_word_data(client, 0, 0xff,
ADM1075_VAUX_OV_WARN_LIMIT);
break;
case PMBUS_VOUT_UV_WARN_LIMIT:
if (data->have_vout)
return -ENODATA;
- ret = pmbus_read_word_data(client, 0,
+ ret = pmbus_read_word_data(client, 0, 0xff,
ADM1075_VAUX_UV_WARN_LIMIT);
break;
case PMBUS_READ_VOUT:
if (data->have_vout)
return -ENODATA;
- ret = pmbus_read_word_data(client, 0, ADM1075_READ_VAUX);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1075_READ_VAUX);
break;
case PMBUS_VIRT_READ_IOUT_MIN:
if (!data->have_iout_min)
return -ENXIO;
- ret = pmbus_read_word_data(client, 0, ADM1293_IOUT_MIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1293_IOUT_MIN);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
- ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1275_PEAK_IOUT);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VOUT);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1275_PEAK_VOUT);
break;
case PMBUS_VIRT_READ_VIN_MAX:
- ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1275_PEAK_VIN);
break;
case PMBUS_VIRT_READ_PIN_MIN:
if (!data->have_pin_min)
return -ENXIO;
- ret = pmbus_read_word_data(client, 0, ADM1293_PIN_MIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1293_PIN_MIN);
break;
case PMBUS_VIRT_READ_PIN_MAX:
if (!data->have_pin_max)
return -ENXIO;
- ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1276_PEAK_PIN);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
if (!data->have_temp_max)
return -ENXIO;
- ret = pmbus_read_word_data(client, 0, ADM1278_PEAK_TEMP);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ ADM1278_PEAK_TEMP);
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
case PMBUS_VIRT_RESET_VOUT_HISTORY:
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index 3795fe55b84f..7d300f2f338d 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -33,9 +33,12 @@
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
+#define CFFPS_CCIN_REVISION GENMASK(7, 0)
+#define CFFPS_CCIN_REVISION_LEGACY 0xde
#define CFFPS_CCIN_VERSION GENMASK(15, 8)
#define CFFPS_CCIN_VERSION_1 0x2b
#define CFFPS_CCIN_VERSION_2 0x2e
+#define CFFPS_CCIN_VERSION_3 0x51
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
@@ -148,7 +151,7 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
struct ibm_cffps *psu = to_psu(idxp, idx);
char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
- pmbus_set_page(psu->client, 0);
+ pmbus_set_page(psu->client, 0, 0xff);
switch (idx) {
case CFFPS_DEBUGFS_INPUT_HISTORY:
@@ -247,7 +250,7 @@ static ssize_t ibm_cffps_debugfs_write(struct file *file,
switch (idx) {
case CFFPS_DEBUGFS_ON_OFF_CONFIG:
- pmbus_set_page(psu->client, 0);
+ pmbus_set_page(psu->client, 0, 0xff);
rc = simple_write_to_buffer(&data, 1, ppos, buf, count);
if (rc <= 0)
@@ -325,13 +328,13 @@ static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
}
static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
- int reg)
+ int phase, int reg)
{
int rc, mfr;
switch (reg) {
case PMBUS_STATUS_WORD:
- rc = pmbus_read_word_data(client, page, reg);
+ rc = pmbus_read_word_data(client, page, phase, reg);
if (rc < 0)
return rc;
@@ -348,7 +351,8 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
rc |= PB_STATUS_OFF;
break;
case PMBUS_VIRT_READ_VMON:
- rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD);
+ rc = pmbus_read_word_data(client, page, phase,
+ CFFPS_12VCS_VOUT_CMD);
break;
default:
rc = -ENODATA;
@@ -379,7 +383,7 @@ static int ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
dev_dbg(&psu->client->dev, "LED brightness set: %d. Command: %d.\n",
brightness, next_led_state);
- pmbus_set_page(psu->client, 0);
+ pmbus_set_page(psu->client, 0, 0xff);
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
next_led_state);
@@ -401,7 +405,7 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
dev_dbg(&psu->client->dev, "LED blink set.\n");
- pmbus_set_page(psu->client, 0);
+ pmbus_set_page(psu->client, 0, 0xff);
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
CFFPS_LED_BLINK);
@@ -485,11 +489,14 @@ static int ibm_cffps_probe(struct i2c_client *client,
vs = (enum versions)id->driver_data;
if (vs == cffps_unknown) {
+ u16 ccin_revision = 0;
u16 ccin_version = CFFPS_CCIN_VERSION_1;
int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD);
- if (ccin > 0)
+ if (ccin > 0) {
+ ccin_revision = FIELD_GET(CFFPS_CCIN_REVISION, ccin);
ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin);
+ }
switch (ccin_version) {
default:
@@ -499,6 +506,12 @@ static int ibm_cffps_probe(struct i2c_client *client,
case CFFPS_CCIN_VERSION_2:
vs = cffps2;
break;
+ case CFFPS_CCIN_VERSION_3:
+ if (ccin_revision == CFFPS_CCIN_REVISION_LEGACY)
+ vs = cffps1;
+ else
+ vs = cffps2;
+ break;
}
/* Set the client name to include the version number. */
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 0d878bcd6d26..3eea3e006a96 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -21,37 +21,42 @@
#define IR35221_MFR_IOUT_VALLEY 0xcb
#define IR35221_MFR_TEMP_VALLEY 0xcc
-static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
+static int ir35221_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
int ret;
switch (reg) {
case PMBUS_VIRT_READ_VIN_MAX:
- ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ IR35221_MFR_VIN_PEAK);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ IR35221_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
- ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ IR35221_MFR_IOUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
- ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ IR35221_MFR_TEMP_PEAK);
break;
case PMBUS_VIRT_READ_VIN_MIN:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
IR35221_MFR_VIN_VALLEY);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
IR35221_MFR_VOUT_VALLEY);
break;
case PMBUS_VIRT_READ_IOUT_MIN:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
IR35221_MFR_IOUT_VALLEY);
break;
case PMBUS_VIRT_READ_TEMP_MIN:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
IR35221_MFR_TEMP_VALLEY);
break;
default:
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 515596c92fe1..0c622711ef7e 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Hardware monitoring driver for Intersil ISL68137
+ * Hardware monitoring driver for Renesas Digital Multiphase Voltage Regulators
*
* Copyright (c) 2017 Google Inc
+ * Copyright (c) 2020 Renesas Electronics America
*
*/
@@ -14,9 +15,61 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/sysfs.h>
+
#include "pmbus.h"
#define ISL68137_VOUT_AVS 0x30
+#define RAA_DMPVR2_READ_VMON 0xc8
+
+enum chips {
+ isl68137,
+ isl68220,
+ isl68221,
+ isl68222,
+ isl68223,
+ isl68224,
+ isl68225,
+ isl68226,
+ isl68227,
+ isl68229,
+ isl68233,
+ isl68239,
+ isl69222,
+ isl69223,
+ isl69224,
+ isl69225,
+ isl69227,
+ isl69228,
+ isl69234,
+ isl69236,
+ isl69239,
+ isl69242,
+ isl69243,
+ isl69247,
+ isl69248,
+ isl69254,
+ isl69255,
+ isl69256,
+ isl69259,
+ isl69260,
+ isl69268,
+ isl69269,
+ isl69298,
+ raa228000,
+ raa228004,
+ raa228006,
+ raa228228,
+ raa229001,
+ raa229004,
+};
+
+enum variants {
+ raa_dmpvr1_2rail,
+ raa_dmpvr2_1rail,
+ raa_dmpvr2_2rail,
+ raa_dmpvr2_3rail,
+ raa_dmpvr2_hv,
+};
static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client,
int page,
@@ -49,7 +102,8 @@ static ssize_t isl68137_avs_enable_store_page(struct i2c_client *client,
* enabling AVS control is the workaround.
*/
if (op_val == ISL68137_VOUT_AVS) {
- rc = pmbus_read_word_data(client, page, PMBUS_VOUT_COMMAND);
+ rc = pmbus_read_word_data(client, page, 0xff,
+ PMBUS_VOUT_COMMAND);
if (rc < 0)
return rc;
@@ -98,13 +152,31 @@ static const struct attribute_group enable_group = {
.attrs = enable_attrs,
};
-static const struct attribute_group *attribute_groups[] = {
+static const struct attribute_group *isl68137_attribute_groups[] = {
&enable_group,
NULL,
};
-static struct pmbus_driver_info isl68137_info = {
- .pages = 2,
+static int raa_dmpvr2_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ ret = pmbus_read_word_data(client, page, phase,
+ RAA_DMPVR2_READ_VMON);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static struct pmbus_driver_info raa_dmpvr_info = {
+ .pages = 3,
.format[PSC_VOLTAGE_IN] = direct,
.format[PSC_VOLTAGE_OUT] = direct,
.format[PSC_CURRENT_IN] = direct,
@@ -113,7 +185,7 @@ static struct pmbus_driver_info isl68137_info = {
.format[PSC_TEMPERATURE] = direct,
.m[PSC_VOLTAGE_IN] = 1,
.b[PSC_VOLTAGE_IN] = 0,
- .R[PSC_VOLTAGE_IN] = 3,
+ .R[PSC_VOLTAGE_IN] = 2,
.m[PSC_VOLTAGE_OUT] = 1,
.b[PSC_VOLTAGE_OUT] = 0,
.R[PSC_VOLTAGE_OUT] = 3,
@@ -133,24 +205,112 @@ static struct pmbus_driver_info isl68137_info = {
| PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
| PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP
| PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
- | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
- .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
- | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
- .groups = attribute_groups,
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_VMON,
+ .func[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
+ .func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
};
static int isl68137_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return pmbus_do_probe(client, id, &isl68137_info);
+ struct pmbus_driver_info *info;
+
+ info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ memcpy(info, &raa_dmpvr_info, sizeof(*info));
+
+ switch (id->driver_data) {
+ case raa_dmpvr1_2rail:
+ info->pages = 2;
+ info->R[PSC_VOLTAGE_IN] = 3;
+ info->func[0] &= ~PMBUS_HAVE_VMON;
+ info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT;
+ info->groups = isl68137_attribute_groups;
+ break;
+ case raa_dmpvr2_1rail:
+ info->pages = 1;
+ info->read_word_data = raa_dmpvr2_read_word_data;
+ break;
+ case raa_dmpvr2_2rail:
+ info->pages = 2;
+ info->read_word_data = raa_dmpvr2_read_word_data;
+ break;
+ case raa_dmpvr2_3rail:
+ info->read_word_data = raa_dmpvr2_read_word_data;
+ break;
+ case raa_dmpvr2_hv:
+ info->pages = 1;
+ info->R[PSC_VOLTAGE_IN] = 1;
+ info->m[PSC_VOLTAGE_OUT] = 2;
+ info->R[PSC_VOLTAGE_OUT] = 2;
+ info->m[PSC_CURRENT_IN] = 2;
+ info->m[PSC_POWER] = 2;
+ info->R[PSC_POWER] = -1;
+ info->read_word_data = raa_dmpvr2_read_word_data;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return pmbus_do_probe(client, id, info);
}
-static const struct i2c_device_id isl68137_id[] = {
- {"isl68137", 0},
+static const struct i2c_device_id raa_dmpvr_id[] = {
+ {"isl68137", raa_dmpvr1_2rail},
+ {"isl68220", raa_dmpvr2_2rail},
+ {"isl68221", raa_dmpvr2_3rail},
+ {"isl68222", raa_dmpvr2_2rail},
+ {"isl68223", raa_dmpvr2_2rail},
+ {"isl68224", raa_dmpvr2_3rail},
+ {"isl68225", raa_dmpvr2_2rail},
+ {"isl68226", raa_dmpvr2_3rail},
+ {"isl68227", raa_dmpvr2_1rail},
+ {"isl68229", raa_dmpvr2_3rail},
+ {"isl68233", raa_dmpvr2_2rail},
+ {"isl68239", raa_dmpvr2_3rail},
+
+ {"isl69222", raa_dmpvr2_2rail},
+ {"isl69223", raa_dmpvr2_3rail},
+ {"isl69224", raa_dmpvr2_2rail},
+ {"isl69225", raa_dmpvr2_2rail},
+ {"isl69227", raa_dmpvr2_3rail},
+ {"isl69228", raa_dmpvr2_3rail},
+ {"isl69234", raa_dmpvr2_2rail},
+ {"isl69236", raa_dmpvr2_2rail},
+ {"isl69239", raa_dmpvr2_3rail},
+ {"isl69242", raa_dmpvr2_2rail},
+ {"isl69243", raa_dmpvr2_1rail},
+ {"isl69247", raa_dmpvr2_2rail},
+ {"isl69248", raa_dmpvr2_2rail},
+ {"isl69254", raa_dmpvr2_2rail},
+ {"isl69255", raa_dmpvr2_2rail},
+ {"isl69256", raa_dmpvr2_2rail},
+ {"isl69259", raa_dmpvr2_2rail},
+ {"isl69260", raa_dmpvr2_2rail},
+ {"isl69268", raa_dmpvr2_2rail},
+ {"isl69269", raa_dmpvr2_3rail},
+ {"isl69298", raa_dmpvr2_2rail},
+
+ {"raa228000", raa_dmpvr2_hv},
+ {"raa228004", raa_dmpvr2_hv},
+ {"raa228006", raa_dmpvr2_hv},
+ {"raa228228", raa_dmpvr2_2rail},
+ {"raa229001", raa_dmpvr2_2rail},
+ {"raa229004", raa_dmpvr2_2rail},
{}
};
-MODULE_DEVICE_TABLE(i2c, isl68137_id);
+MODULE_DEVICE_TABLE(i2c, raa_dmpvr_id);
/* This is the driver that will be inserted */
static struct i2c_driver isl68137_driver = {
@@ -159,11 +319,11 @@ static struct i2c_driver isl68137_driver = {
},
.probe = isl68137_probe,
.remove = pmbus_do_remove,
- .id_table = isl68137_id,
+ .id_table = raa_dmpvr_id,
};
module_i2c_driver(isl68137_driver);
MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
-MODULE_DESCRIPTION("PMBus driver for Intersil ISL68137");
+MODULE_DESCRIPTION("PMBus driver for Renesas digital multiphase voltage regulators");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 05fce86f1f81..9e4cf0800186 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -211,7 +211,8 @@ struct lm25066_data {
#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
-static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
+static int lm25066_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
const struct lm25066_data *data = to_lm25066_data(info);
@@ -219,7 +220,7 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VIRT_READ_VMON:
- ret = pmbus_read_word_data(client, 0, LM25066_READ_VAUX);
+ ret = pmbus_read_word_data(client, 0, 0xff, LM25066_READ_VAUX);
if (ret < 0)
break;
/* Adjust returned value to match VIN coefficients */
@@ -244,33 +245,40 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
}
break;
case PMBUS_READ_IIN:
- ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_IIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ LM25066_MFR_READ_IIN);
break;
case PMBUS_READ_PIN:
- ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_PIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ LM25066_MFR_READ_PIN);
break;
case PMBUS_IIN_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, 0,
+ ret = pmbus_read_word_data(client, 0, 0xff,
LM25066_MFR_IIN_OC_WARN_LIMIT);
break;
case PMBUS_PIN_OP_WARN_LIMIT:
- ret = pmbus_read_word_data(client, 0,
+ ret = pmbus_read_word_data(client, 0, 0xff,
LM25066_MFR_PIN_OP_WARN_LIMIT);
break;
case PMBUS_VIRT_READ_VIN_AVG:
- ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ LM25066_READ_AVG_VIN);
break;
case PMBUS_VIRT_READ_VOUT_AVG:
- ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VOUT);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ LM25066_READ_AVG_VOUT);
break;
case PMBUS_VIRT_READ_IIN_AVG:
- ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_IIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ LM25066_READ_AVG_IIN);
break;
case PMBUS_VIRT_READ_PIN_AVG:
- ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_PIN);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ LM25066_READ_AVG_PIN);
break;
case PMBUS_VIRT_READ_PIN_MAX:
- ret = pmbus_read_word_data(client, 0, LM25066_READ_PIN_PEAK);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ LM25066_READ_PIN_PEAK);
break;
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = 0;
@@ -288,13 +296,14 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
+static int lm25056_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
int ret;
switch (reg) {
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
- ret = pmbus_read_word_data(client, 0,
+ ret = pmbus_read_word_data(client, 0, 0xff,
LM25056_VAUX_UV_WARN_LIMIT);
if (ret < 0)
break;
@@ -302,7 +311,7 @@ static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
- ret = pmbus_read_word_data(client, 0,
+ ret = pmbus_read_word_data(client, 0, 0xff,
LM25056_VAUX_OV_WARN_LIMIT);
if (ret < 0)
break;
@@ -310,7 +319,7 @@ static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
default:
- ret = lm25066_read_word_data(client, page, reg);
+ ret = lm25066_read_word_data(client, page, phase, reg);
break;
}
return ret;
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index a91ed01abb68..7b0e6b37e247 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -19,8 +19,15 @@
#include <linux/regulator/driver.h>
#include "pmbus.h"
-enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
- ltc3883, ltc3886, ltc3887, ltm2987, ltm4675, ltm4676, ltm4686 };
+enum chips {
+ /* Managers */
+ ltc2972, ltc2974, ltc2975, ltc2977, ltc2978, ltc2979, ltc2980,
+ /* Controllers */
+ ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887, ltc3889, ltc7880,
+ /* Modules */
+ ltm2987, ltm4664, ltm4675, ltm4676, ltm4677, ltm4678, ltm4680, ltm4686,
+ ltm4700,
+};
/* Common for all chips */
#define LTC2978_MFR_VOUT_PEAK 0xdd
@@ -43,9 +50,10 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTC3880_MFR_CLEAR_PEAKS 0xe3
#define LTC3880_MFR_TEMPERATURE2_PEAK 0xf4
-/* LTC3883 and LTC3886 only */
+/* LTC3883, LTC3884, LTC3886, LTC3889 and LTC7880 only */
#define LTC3883_MFR_IIN_PEAK 0xe1
+
/* LTC2975 only */
#define LTC2975_MFR_IIN_PEAK 0xc4
#define LTC2975_MFR_IIN_MIN 0xc5
@@ -54,27 +62,41 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTC2978_ID_MASK 0xfff0
+#define LTC2972_ID 0x0310
#define LTC2974_ID 0x0210
#define LTC2975_ID 0x0220
#define LTC2977_ID 0x0130
#define LTC2978_ID_REV1 0x0110 /* Early revision */
#define LTC2978_ID_REV2 0x0120
+#define LTC2979_ID_A 0x8060
+#define LTC2979_ID_B 0x8070
#define LTC2980_ID_A 0x8030 /* A/B for two die IDs */
#define LTC2980_ID_B 0x8040
#define LTC3880_ID 0x4020
#define LTC3882_ID 0x4200
#define LTC3882_ID_D1 0x4240 /* Dash 1 */
#define LTC3883_ID 0x4300
+#define LTC3884_ID 0x4C00
#define LTC3886_ID 0x4600
#define LTC3887_ID 0x4700
#define LTM2987_ID_A 0x8010 /* A/B for two die IDs */
#define LTM2987_ID_B 0x8020
+#define LTC3889_ID 0x4900
+#define LTC7880_ID 0x49E0
+#define LTM4664_ID 0x4120
#define LTM4675_ID 0x47a0
#define LTM4676_ID_REV1 0x4400
#define LTM4676_ID_REV2 0x4480
#define LTM4676A_ID 0x47e0
+#define LTM4677_ID_REV1 0x47B0
+#define LTM4677_ID_REV2 0x47D0
+#define LTM4678_ID_REV1 0x4100
+#define LTM4678_ID_REV2 0x4110
+#define LTM4680_ID 0x4140
#define LTM4686_ID 0x4770
+#define LTM4700_ID 0x4130
+#define LTC2972_NUM_PAGES 2
#define LTC2974_NUM_PAGES 4
#define LTC2978_NUM_PAGES 8
#define LTC3880_NUM_PAGES 2
@@ -151,7 +173,8 @@ static int ltc_wait_ready(struct i2c_client *client)
return -ETIMEDOUT;
}
-static int ltc_read_word_data(struct i2c_client *client, int page, int reg)
+static int ltc_read_word_data(struct i2c_client *client, int page, int phase,
+ int reg)
{
int ret;
@@ -159,7 +182,7 @@ static int ltc_read_word_data(struct i2c_client *client, int page, int reg)
if (ret < 0)
return ret;
- return pmbus_read_word_data(client, page, reg);
+ return pmbus_read_word_data(client, page, 0xff, reg);
}
static int ltc_read_byte_data(struct i2c_client *client, int page, int reg)
@@ -202,7 +225,7 @@ static int ltc_get_max(struct ltc2978_data *data, struct i2c_client *client,
{
int ret;
- ret = ltc_read_word_data(client, page, reg);
+ ret = ltc_read_word_data(client, page, 0xff, reg);
if (ret >= 0) {
if (lin11_to_val(ret) > lin11_to_val(*pmax))
*pmax = ret;
@@ -216,7 +239,7 @@ static int ltc_get_min(struct ltc2978_data *data, struct i2c_client *client,
{
int ret;
- ret = ltc_read_word_data(client, page, reg);
+ ret = ltc_read_word_data(client, page, 0xff, reg);
if (ret >= 0) {
if (lin11_to_val(ret) < lin11_to_val(*pmin))
*pmin = ret;
@@ -238,7 +261,8 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
&data->vin_max);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK);
+ ret = ltc_read_word_data(client, page, 0xff,
+ LTC2978_MFR_VOUT_PEAK);
if (ret >= 0) {
/*
* VOUT is 16 bit unsigned with fixed exponent,
@@ -269,7 +293,8 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
return ret;
}
-static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg)
+static int ltc2978_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct ltc2978_data *data = to_ltc2978_data(info);
@@ -281,7 +306,8 @@ static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg)
&data->vin_min);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
- ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_MIN);
+ ret = ltc_read_word_data(client, page, phase,
+ LTC2978_MFR_VOUT_MIN);
if (ret >= 0) {
/*
* VOUT_MIN is known to not be supported on some lots
@@ -314,7 +340,8 @@ static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg)
+static int ltc2974_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct ltc2978_data *data = to_ltc2978_data(info);
@@ -333,13 +360,14 @@ static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg)
ret = 0;
break;
default:
- ret = ltc2978_read_word_data(client, page, reg);
+ ret = ltc2978_read_word_data(client, page, phase, reg);
break;
}
return ret;
}
-static int ltc2975_read_word_data(struct i2c_client *client, int page, int reg)
+static int ltc2975_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct ltc2978_data *data = to_ltc2978_data(info);
@@ -367,13 +395,14 @@ static int ltc2975_read_word_data(struct i2c_client *client, int page, int reg)
ret = 0;
break;
default:
- ret = ltc2978_read_word_data(client, page, reg);
+ ret = ltc2978_read_word_data(client, page, phase, reg);
break;
}
return ret;
}
-static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
+static int ltc3880_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct ltc2978_data *data = to_ltc2978_data(info);
@@ -405,7 +434,8 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg)
+static int ltc3883_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct ltc2978_data *data = to_ltc2978_data(info);
@@ -420,7 +450,7 @@ static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg)
ret = 0;
break;
default:
- ret = ltc3880_read_word_data(client, page, reg);
+ ret = ltc3880_read_word_data(client, page, phase, reg);
break;
}
return ret;
@@ -492,20 +522,30 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
}
static const struct i2c_device_id ltc2978_id[] = {
+ {"ltc2972", ltc2972},
{"ltc2974", ltc2974},
{"ltc2975", ltc2975},
{"ltc2977", ltc2977},
{"ltc2978", ltc2978},
+ {"ltc2979", ltc2979},
{"ltc2980", ltc2980},
{"ltc3880", ltc3880},
{"ltc3882", ltc3882},
{"ltc3883", ltc3883},
+ {"ltc3884", ltc3884},
{"ltc3886", ltc3886},
{"ltc3887", ltc3887},
+ {"ltc3889", ltc3889},
+ {"ltc7880", ltc7880},
{"ltm2987", ltm2987},
+ {"ltm4664", ltm4664},
{"ltm4675", ltm4675},
{"ltm4676", ltm4676},
+ {"ltm4677", ltm4677},
+ {"ltm4678", ltm4678},
+ {"ltm4680", ltm4680},
{"ltm4686", ltm4686},
+ {"ltm4700", ltm4700},
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2978_id);
@@ -555,7 +595,9 @@ static int ltc2978_get_id(struct i2c_client *client)
chip_id &= LTC2978_ID_MASK;
- if (chip_id == LTC2974_ID)
+ if (chip_id == LTC2972_ID)
+ return ltc2972;
+ else if (chip_id == LTC2974_ID)
return ltc2974;
else if (chip_id == LTC2975_ID)
return ltc2975;
@@ -563,6 +605,8 @@ static int ltc2978_get_id(struct i2c_client *client)
return ltc2977;
else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2)
return ltc2978;
+ else if (chip_id == LTC2979_ID_A || chip_id == LTC2979_ID_B)
+ return ltc2979;
else if (chip_id == LTC2980_ID_A || chip_id == LTC2980_ID_B)
return ltc2980;
else if (chip_id == LTC3880_ID)
@@ -571,19 +615,35 @@ static int ltc2978_get_id(struct i2c_client *client)
return ltc3882;
else if (chip_id == LTC3883_ID)
return ltc3883;
+ else if (chip_id == LTC3884_ID)
+ return ltc3884;
else if (chip_id == LTC3886_ID)
return ltc3886;
else if (chip_id == LTC3887_ID)
return ltc3887;
+ else if (chip_id == LTC3889_ID)
+ return ltc3889;
+ else if (chip_id == LTC7880_ID)
+ return ltc7880;
else if (chip_id == LTM2987_ID_A || chip_id == LTM2987_ID_B)
return ltm2987;
+ else if (chip_id == LTM4664_ID)
+ return ltm4664;
else if (chip_id == LTM4675_ID)
return ltm4675;
else if (chip_id == LTM4676_ID_REV1 || chip_id == LTM4676_ID_REV2 ||
chip_id == LTM4676A_ID)
return ltm4676;
+ else if (chip_id == LTM4677_ID_REV1 || chip_id == LTM4677_ID_REV2)
+ return ltm4677;
+ else if (chip_id == LTM4678_ID_REV1 || chip_id == LTM4678_ID_REV2)
+ return ltm4678;
+ else if (chip_id == LTM4680_ID)
+ return ltm4680;
else if (chip_id == LTM4686_ID)
return ltm4686;
+ else if (chip_id == LTM4700_ID)
+ return ltm4700;
dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
return -ENODEV;
@@ -637,6 +697,19 @@ static int ltc2978_probe(struct i2c_client *client,
data->temp2_max = 0x7c00;
switch (data->id) {
+ case ltc2972:
+ info->read_word_data = ltc2975_read_word_data;
+ info->pages = LTC2972_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP2;
+ for (i = 0; i < info->pages; i++) {
+ info->func[i] |= PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+ }
+ break;
case ltc2974:
info->read_word_data = ltc2974_read_word_data;
info->pages = LTC2974_NUM_PAGES;
@@ -662,8 +735,10 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
}
break;
+
case ltc2977:
case ltc2978:
+ case ltc2979:
case ltc2980:
case ltm2987:
info->read_word_data = ltc2978_read_word_data;
@@ -680,6 +755,7 @@ static int ltc2978_probe(struct i2c_client *client,
case ltc3887:
case ltm4675:
case ltm4676:
+ case ltm4677:
case ltm4686:
data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
info->read_word_data = ltc3880_read_word_data;
@@ -721,7 +797,14 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
| PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
break;
+ case ltc3884:
case ltc3886:
+ case ltc3889:
+ case ltc7880:
+ case ltm4664:
+ case ltm4678:
+ case ltm4680:
+ case ltm4700:
data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
info->read_word_data = ltc3883_read_word_data;
info->pages = LTC3880_NUM_PAGES;
@@ -752,22 +835,33 @@ static int ltc2978_probe(struct i2c_client *client,
return pmbus_do_probe(client, id, info);
}
+
#ifdef CONFIG_OF
static const struct of_device_id ltc2978_of_match[] = {
+ { .compatible = "lltc,ltc2972" },
{ .compatible = "lltc,ltc2974" },
{ .compatible = "lltc,ltc2975" },
{ .compatible = "lltc,ltc2977" },
{ .compatible = "lltc,ltc2978" },
+ { .compatible = "lltc,ltc2979" },
{ .compatible = "lltc,ltc2980" },
{ .compatible = "lltc,ltc3880" },
{ .compatible = "lltc,ltc3882" },
{ .compatible = "lltc,ltc3883" },
+ { .compatible = "lltc,ltc3884" },
{ .compatible = "lltc,ltc3886" },
{ .compatible = "lltc,ltc3887" },
+ { .compatible = "lltc,ltc3889" },
+ { .compatible = "lltc,ltc7880" },
{ .compatible = "lltc,ltm2987" },
+ { .compatible = "lltc,ltm4664" },
{ .compatible = "lltc,ltm4675" },
{ .compatible = "lltc,ltm4676" },
+ { .compatible = "lltc,ltm4677" },
+ { .compatible = "lltc,ltm4678" },
+ { .compatible = "lltc,ltm4680" },
{ .compatible = "lltc,ltm4686" },
+ { .compatible = "lltc,ltm4700" },
{ }
};
MODULE_DEVICE_TABLE(of, ltc2978_of_match);
diff --git a/drivers/hwmon/pmbus/ltc3815.c b/drivers/hwmon/pmbus/ltc3815.c
index b83a18a58364..3036263e0a66 100644
--- a/drivers/hwmon/pmbus/ltc3815.c
+++ b/drivers/hwmon/pmbus/ltc3815.c
@@ -55,7 +55,7 @@ static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg)
* LTC3815 does not support the CLEAR_FAULTS command.
* Emulate it by clearing the status register.
*/
- ret = pmbus_read_word_data(client, 0, PMBUS_STATUS_WORD);
+ ret = pmbus_read_word_data(client, 0, 0xff, PMBUS_STATUS_WORD);
if (ret > 0) {
pmbus_write_word_data(client, 0, PMBUS_STATUS_WORD,
ret);
@@ -69,25 +69,31 @@ static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg)
return ret;
}
-static int ltc3815_read_word_data(struct i2c_client *client, int page, int reg)
+static int ltc3815_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
int ret;
switch (reg) {
case PMBUS_VIRT_READ_VIN_MAX:
- ret = pmbus_read_word_data(client, page, LTC3815_MFR_VIN_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ LTC3815_MFR_VIN_PEAK);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, page, LTC3815_MFR_VOUT_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ LTC3815_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
- ret = pmbus_read_word_data(client, page, LTC3815_MFR_TEMP_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ LTC3815_MFR_TEMP_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
- ret = pmbus_read_word_data(client, page, LTC3815_MFR_IOUT_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ LTC3815_MFR_IOUT_PEAK);
break;
case PMBUS_VIRT_READ_IIN_MAX:
- ret = pmbus_read_word_data(client, page, LTC3815_MFR_IIN_PEAK);
+ ret = pmbus_read_word_data(client, page, phase,
+ LTC3815_MFR_IIN_PEAK);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
case PMBUS_VIRT_RESET_VIN_HISTORY:
diff --git a/drivers/hwmon/pmbus/max16064.c b/drivers/hwmon/pmbus/max16064.c
index b3e7b8d2e69d..288e93f74c28 100644
--- a/drivers/hwmon/pmbus/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -15,17 +15,18 @@
#define MAX16064_MFR_VOUT_PEAK 0xd4
#define MAX16064_MFR_TEMPERATURE_PEAK 0xd6
-static int max16064_read_word_data(struct i2c_client *client, int page, int reg)
+static int max16064_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
int ret;
switch (reg) {
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX16064_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX16064_MFR_TEMPERATURE_PEAK);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
index 294e2212f61e..c0bb05487e0e 100644
--- a/drivers/hwmon/pmbus/max20730.c
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -85,7 +85,8 @@ static u32 max_current[][5] = {
[max20743] = { 18900, 24100, 29200, 34100 },
};
-static int max20730_read_word_data(struct i2c_client *client, int page, int reg)
+static int max20730_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
const struct max20730_data *data = to_max20730_data(info);
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 254b0f98c755..d9aa5c873d21 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -72,7 +72,7 @@ static int max31785_read_long_data(struct i2c_client *client, int page,
cmdbuf[0] = reg;
- rc = pmbus_set_page(client, page);
+ rc = pmbus_set_page(client, page, 0xff);
if (rc < 0)
return rc;
@@ -110,7 +110,7 @@ static int max31785_get_pwm_mode(struct i2c_client *client, int page)
if (config < 0)
return config;
- command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1);
+ command = pmbus_read_word_data(client, page, 0xff, PMBUS_FAN_COMMAND_1);
if (command < 0)
return command;
@@ -126,7 +126,7 @@ static int max31785_get_pwm_mode(struct i2c_client *client, int page)
}
static int max31785_read_word_data(struct i2c_client *client, int page,
- int reg)
+ int phase, int reg)
{
u32 val;
int rv;
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 5c63a6600729..18b4e071067f 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -41,7 +41,8 @@ struct max34440_data {
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
-static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
+static int max34440_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
int ret;
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
@@ -49,44 +50,44 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34440_MFR_VOUT_MIN);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
if (data->id != max34446 && data->id != max34451)
return -ENXIO;
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_IOUT_AVG);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34440_MFR_IOUT_PEAK);
break;
case PMBUS_VIRT_READ_POUT_AVG:
if (data->id != max34446)
return -ENXIO;
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_POUT_AVG);
break;
case PMBUS_VIRT_READ_POUT_MAX:
if (data->id != max34446)
return -ENXIO;
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_POUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_AVG:
if (data->id != max34446 && data->id != max34460 &&
data->id != max34461)
return -ENXIO;
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_TEMPERATURE_AVG);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
- ret = pmbus_read_word_data(client, page,
+ ret = pmbus_read_word_data(client, page, phase,
MAX34440_MFR_TEMPERATURE_PEAK);
break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
@@ -159,14 +160,14 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
int mfg_status;
if (page >= 0) {
- ret = pmbus_set_page(client, page);
+ ret = pmbus_set_page(client, page, 0xff);
if (ret < 0)
return ret;
}
switch (reg) {
case PMBUS_STATUS_IOUT:
- mfg_status = pmbus_read_word_data(client, 0,
+ mfg_status = pmbus_read_word_data(client, 0, 0xff,
PMBUS_STATUS_MFR_SPECIFIC);
if (mfg_status < 0)
return mfg_status;
@@ -176,7 +177,7 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
ret |= PB_IOUT_OC_FAULT;
break;
case PMBUS_STATUS_TEMPERATURE:
- mfg_status = pmbus_read_word_data(client, 0,
+ mfg_status = pmbus_read_word_data(client, 0, 0xff,
PMBUS_STATUS_MFR_SPECIFIC);
if (mfg_status < 0)
return mfg_status;
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index bc5f4cb6450e..643ccfc05106 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -28,7 +28,8 @@
#define MAX8688_STATUS_OT_FAULT BIT(13)
#define MAX8688_STATUS_OT_WARNING BIT(14)
-static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
+static int max8688_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
int ret;
@@ -37,13 +38,15 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, 0, MAX8688_MFR_VOUT_PEAK);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ MAX8688_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
- ret = pmbus_read_word_data(client, 0, MAX8688_MFR_IOUT_PEAK);
+ ret = pmbus_read_word_data(client, 0, 0xff,
+ MAX8688_MFR_IOUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
- ret = pmbus_read_word_data(client, 0,
+ ret = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFR_TEMPERATURE_PEAK);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
@@ -94,7 +97,7 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_STATUS_VOUT:
- mfg_status = pmbus_read_word_data(client, 0,
+ mfg_status = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFG_STATUS);
if (mfg_status < 0)
return mfg_status;
@@ -108,7 +111,7 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
ret |= PB_VOLTAGE_OV_FAULT;
break;
case PMBUS_STATUS_IOUT:
- mfg_status = pmbus_read_word_data(client, 0,
+ mfg_status = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFG_STATUS);
if (mfg_status < 0)
return mfg_status;
@@ -120,7 +123,7 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
ret |= PB_IOUT_OC_FAULT;
break;
case PMBUS_STATUS_TEMPERATURE:
- mfg_status = pmbus_read_word_data(client, 0,
+ mfg_status = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFG_STATUS);
if (mfg_status < 0)
return mfg_status;
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 51e8312b6c2d..6d384e8ee1db 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -102,10 +102,10 @@ static int pmbus_identify(struct i2c_client *client,
int page;
for (page = 1; page < PMBUS_PAGES; page++) {
- if (pmbus_set_page(client, page) < 0)
+ if (pmbus_set_page(client, page, 0xff) < 0)
break;
}
- pmbus_set_page(client, 0);
+ pmbus_set_page(client, 0, 0xff);
info->pages = page;
} else {
info->pages = 1;
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 13b34bd67f23..18e06fc6c53f 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -119,6 +119,9 @@ enum pmbus_regs {
PMBUS_MFR_DATE = 0x9D,
PMBUS_MFR_SERIAL = 0x9E,
+ PMBUS_IC_DEVICE_ID = 0xAD,
+ PMBUS_IC_DEVICE_REV = 0xAE,
+
/*
* Virtual registers.
* Useful to support attributes which are not supported by standard PMBus
@@ -359,6 +362,7 @@ enum pmbus_sensor_classes {
};
#define PMBUS_PAGES 32 /* Per PMBus specification */
+#define PMBUS_PHASES 8 /* Maximum number of phases per page */
/* Functionality bit mask */
#define PMBUS_HAVE_VIN BIT(0)
@@ -385,13 +389,15 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_PWM34 BIT(21)
#define PMBUS_HAVE_SAMPLES BIT(22)
-#define PMBUS_PAGE_VIRTUAL BIT(31)
+#define PMBUS_PHASE_VIRTUAL BIT(30) /* Phases on this page are virtual */
+#define PMBUS_PAGE_VIRTUAL BIT(31) /* Page is virtual */
enum pmbus_data_format { linear = 0, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
struct pmbus_driver_info {
int pages; /* Total number of pages */
+ u8 phases[PMBUS_PAGES]; /* Number of phases per page */
enum pmbus_data_format format[PSC_NUM_CLASSES];
enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */
/*
@@ -403,6 +409,7 @@ struct pmbus_driver_info {
int R[PSC_NUM_CLASSES]; /* exponent */
u32 func[PMBUS_PAGES]; /* Functionality, per page */
+ u32 pfunc[PMBUS_PHASES];/* Functionality, per phase */
/*
* The following functions map manufacturing specific register values
* to PMBus standard register values. Specify only if mapping is
@@ -415,7 +422,8 @@ struct pmbus_driver_info {
* the standard register.
*/
int (*read_byte_data)(struct i2c_client *client, int page, int reg);
- int (*read_word_data)(struct i2c_client *client, int page, int reg);
+ int (*read_word_data)(struct i2c_client *client, int page, int phase,
+ int reg);
int (*write_word_data)(struct i2c_client *client, int page, int reg,
u16 word);
int (*write_byte)(struct i2c_client *client, int page, u8 value);
@@ -454,9 +462,11 @@ extern const struct regulator_ops pmbus_regulator_ops;
/* Function declarations */
void pmbus_clear_cache(struct i2c_client *client);
-int pmbus_set_page(struct i2c_client *client, int page);
-int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg);
-int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word);
+int pmbus_set_page(struct i2c_client *client, int page, int phase);
+int pmbus_read_word_data(struct i2c_client *client, int page, int phase,
+ u8 reg);
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
+ u16 word);
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index d9c17feb7b4a..8d321bf7d15b 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -49,6 +49,7 @@ struct pmbus_sensor {
char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
struct device_attribute attribute;
u8 page; /* page number */
+ u8 phase; /* phase number, 0xff for all phases */
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
@@ -109,6 +110,7 @@ struct pmbus_data {
int (*read_status)(struct i2c_client *client, int page);
u8 currpage;
+ u8 currphase; /* current phase, 0xff for all */
};
struct pmbus_debugfs_entry {
@@ -146,15 +148,16 @@ void pmbus_clear_cache(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_clear_cache);
-int pmbus_set_page(struct i2c_client *client, int page)
+int pmbus_set_page(struct i2c_client *client, int page, int phase)
{
struct pmbus_data *data = i2c_get_clientdata(client);
int rv;
- if (page < 0 || page == data->currpage)
+ if (page < 0)
return 0;
- if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) {
+ if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL) &&
+ data->info->pages > 1 && page != data->currpage) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
if (rv < 0)
return rv;
@@ -166,9 +169,17 @@ int pmbus_set_page(struct i2c_client *client, int page)
if (rv != page)
return -EIO;
}
-
data->currpage = page;
+ if (data->info->phases[page] && data->currphase != phase &&
+ !(data->info->func[page] & PMBUS_PHASE_VIRTUAL)) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
+ phase);
+ if (rv)
+ return rv;
+ }
+ data->currphase = phase;
+
return 0;
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
@@ -177,7 +188,7 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
int rv;
- rv = pmbus_set_page(client, page);
+ rv = pmbus_set_page(client, page, 0xff);
if (rv < 0)
return rv;
@@ -208,7 +219,7 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
{
int rv;
- rv = pmbus_set_page(client, page);
+ rv = pmbus_set_page(client, page, 0xff);
if (rv < 0)
return rv;
@@ -286,11 +297,11 @@ int pmbus_update_fan(struct i2c_client *client, int page, int id,
}
EXPORT_SYMBOL_GPL(pmbus_update_fan);
-int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
+int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
{
int rv;
- rv = pmbus_set_page(client, page);
+ rv = pmbus_set_page(client, page, phase);
if (rv < 0)
return rv;
@@ -320,14 +331,15 @@ static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
* _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
*/
-static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
+static int _pmbus_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
int status;
if (info->read_word_data) {
- status = info->read_word_data(client, page, reg);
+ status = info->read_word_data(client, page, phase, reg);
if (status != -ENODATA)
return status;
}
@@ -335,14 +347,20 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
if (reg >= PMBUS_VIRT_BASE)
return pmbus_read_virt_reg(client, page, reg);
- return pmbus_read_word_data(client, page, reg);
+ return pmbus_read_word_data(client, page, phase, reg);
+}
+
+/* Same as above, but without phase parameter, for use in check functions */
+static int __pmbus_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ return _pmbus_read_word_data(client, page, 0xff, reg);
}
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
- rv = pmbus_set_page(client, page);
+ rv = pmbus_set_page(client, page, 0xff);
if (rv < 0)
return rv;
@@ -354,7 +372,7 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
{
int rv;
- rv = pmbus_set_page(client, page);
+ rv = pmbus_set_page(client, page, 0xff);
if (rv < 0)
return rv;
@@ -440,7 +458,7 @@ static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
have_rpm = !!(config & pmbus_fan_rpm_mask[id]);
if (want_rpm == have_rpm)
- return pmbus_read_word_data(client, page,
+ return pmbus_read_word_data(client, page, 0xff,
pmbus_fan_command_registers[id]);
/* Can't sensibly map between RPM and PWM, just return zero */
@@ -530,7 +548,7 @@ EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
{
- return pmbus_check_register(client, _pmbus_read_word_data, page, reg);
+ return pmbus_check_register(client, __pmbus_read_word_data, page, reg);
}
EXPORT_SYMBOL_GPL(pmbus_check_word_register);
@@ -595,6 +613,7 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
sensor->data
= _pmbus_read_word_data(client,
sensor->page,
+ sensor->phase,
sensor->reg);
}
pmbus_clear_faults(client);
@@ -1076,7 +1095,8 @@ static int pmbus_add_boolean(struct pmbus_data *data,
static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
const char *name, const char *type,
- int seq, int page, int reg,
+ int seq, int page, int phase,
+ int reg,
enum pmbus_sensor_classes class,
bool update, bool readonly,
bool convert)
@@ -1100,6 +1120,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
readonly = true;
sensor->page = page;
+ sensor->phase = phase;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
@@ -1119,7 +1140,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
static int pmbus_add_label(struct pmbus_data *data,
const char *name, int seq,
- const char *lstring, int index)
+ const char *lstring, int index, int phase)
{
struct pmbus_label *label;
struct device_attribute *a;
@@ -1131,11 +1152,21 @@ static int pmbus_add_label(struct pmbus_data *data,
a = &label->attribute;
snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
- if (!index)
- strncpy(label->label, lstring, sizeof(label->label) - 1);
- else
- snprintf(label->label, sizeof(label->label), "%s%d", lstring,
- index);
+ if (!index) {
+ if (phase == 0xff)
+ strncpy(label->label, lstring,
+ sizeof(label->label) - 1);
+ else
+ snprintf(label->label, sizeof(label->label), "%s.%d",
+ lstring, phase);
+ } else {
+ if (phase == 0xff)
+ snprintf(label->label, sizeof(label->label), "%s%d",
+ lstring, index);
+ else
+ snprintf(label->label, sizeof(label->label), "%s%d.%d",
+ lstring, index, phase);
+ }
pmbus_dev_attr_init(a, label->name, 0444, pmbus_show_label, NULL);
return pmbus_add_attribute(data, &a->attr);
@@ -1200,7 +1231,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client,
for (i = 0; i < nlimit; i++) {
if (pmbus_check_word_register(client, page, l->reg)) {
curr = pmbus_add_sensor(data, name, l->attr, index,
- page, l->reg, attr->class,
+ page, 0xff, l->reg, attr->class,
attr->update || l->update,
false, true);
if (!curr)
@@ -1227,7 +1258,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
struct pmbus_data *data,
const struct pmbus_driver_info *info,
const char *name,
- int index, int page,
+ int index, int page, int phase,
const struct pmbus_sensor_attr *attr,
bool paged)
{
@@ -1237,15 +1268,16 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
if (attr->label) {
ret = pmbus_add_label(data, name, index, attr->label,
- paged ? page + 1 : 0);
+ paged ? page + 1 : 0, phase);
if (ret)
return ret;
}
- base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true, true);
+ base = pmbus_add_sensor(data, name, "input", index, page, phase,
+ attr->reg, attr->class, true, true, true);
if (!base)
return -ENOMEM;
- if (attr->sfunc) {
+ /* No limit and alarm attributes for phase specific sensors */
+ if (attr->sfunc && phase == 0xff) {
ret = pmbus_add_limit_attrs(client, data, info, name,
index, page, base, attr);
if (ret < 0)
@@ -1315,10 +1347,25 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
continue;
ret = pmbus_add_sensor_attrs_one(client, data, info,
name, index, page,
- attrs, paged);
+ 0xff, attrs, paged);
if (ret)
return ret;
index++;
+ if (info->phases[page]) {
+ int phase;
+
+ for (phase = 0; phase < info->phases[page];
+ phase++) {
+ if (!(info->pfunc[phase] & attrs->func))
+ continue;
+ ret = pmbus_add_sensor_attrs_one(client,
+ data, info, name, index, page,
+ phase, attrs, paged);
+ if (ret)
+ return ret;
+ index++;
+ }
+ }
}
attrs++;
}
@@ -1822,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
struct pmbus_sensor *sensor;
sensor = pmbus_add_sensor(data, "fan", "target", index, page,
- PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
+ PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
false, false, true);
if (!sensor)
@@ -1833,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
return 0;
sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
- PMBUS_VIRT_PWM_1 + id, PSC_PWM,
+ PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
false, false, true);
if (!sensor)
return -ENOMEM;
sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
- PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
+ PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
true, false, false);
if (!sensor)
@@ -1882,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
continue;
if (pmbus_add_sensor(data, "fan", "input", index,
- page, pmbus_fan_registers[f],
+ page, pmbus_fan_registers[f], 0xff,
PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
@@ -1964,7 +2011,7 @@ static ssize_t pmbus_show_samples(struct device *dev,
struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_samples_reg *reg = to_samples_reg(devattr);
- val = _pmbus_read_word_data(client, reg->page, reg->attr->reg);
+ val = _pmbus_read_word_data(client, reg->page, 0xff, reg->attr->reg);
if (val < 0)
return val;
@@ -2120,7 +2167,7 @@ static int pmbus_read_status_byte(struct i2c_client *client, int page)
static int pmbus_read_status_word(struct i2c_client *client, int page)
{
- return _pmbus_read_word_data(client, page, PMBUS_STATUS_WORD);
+ return _pmbus_read_word_data(client, page, 0xff, PMBUS_STATUS_WORD);
}
static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
@@ -2482,6 +2529,8 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (pdata)
data->flags = pdata->flags;
data->info = info;
+ data->currpage = 0xff;
+ data->currphase = 0xfe;
ret = pmbus_init_common(client, data, info);
if (ret < 0)
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 9c22e9013dd7..157c99ffb52b 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -6,13 +6,21 @@
* Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
*/
+#include <linux/bits.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include "pmbus.h"
+enum chips {
+ tps53647, tps53667, tps53679, tps53681, tps53688
+};
+
+#define TPS53647_PAGE_NUM 1
+
#define TPS53679_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
#define TPS53679_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */
#define TPS53679_PROT_VR13_10MV 0x04 /* VR13.0 mode, 10-mV DAC */
@@ -20,13 +28,19 @@
#define TPS53679_PROT_VR13_5MV 0x07 /* VR13.0 mode, 5-mV DAC */
#define TPS53679_PAGE_NUM 2
-static int tps53679_identify(struct i2c_client *client,
- struct pmbus_driver_info *info)
+#define TPS53681_DEVICE_ID 0x81
+
+#define TPS53681_PMBUS_REVISION 0x33
+
+#define TPS53681_MFR_SPECIFIC_20 0xe4 /* Number of phases, per page */
+
+static int tps53679_identify_mode(struct i2c_client *client,
+ struct pmbus_driver_info *info)
{
u8 vout_params;
int i, ret;
- for (i = 0; i < TPS53679_PAGE_NUM; i++) {
+ for (i = 0; i < info->pages; i++) {
/* Read the register with VOUT scaling value.*/
ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
if (ret < 0)
@@ -52,48 +66,180 @@ static int tps53679_identify(struct i2c_client *client,
return 0;
}
+static int tps53679_identify_phases(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+
+ /* On TPS53681, only channel A provides per-phase output current */
+ ret = pmbus_read_byte_data(client, 0, TPS53681_MFR_SPECIFIC_20);
+ if (ret < 0)
+ return ret;
+ info->phases[0] = (ret & 0x07) + 1;
+
+ return 0;
+}
+
+static int tps53679_identify_chip(struct i2c_client *client,
+ u8 revision, u16 id)
+{
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ ret = pmbus_read_byte_data(client, 0, PMBUS_REVISION);
+ if (ret < 0)
+ return ret;
+ if (ret != revision) {
+ dev_err(&client->dev, "Unexpected PMBus revision 0x%x\n", ret);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
+ if (ret < 0)
+ return ret;
+ if (ret != 1 || buf[0] != id) {
+ dev_err(&client->dev, "Unexpected device ID 0x%x\n", buf[0]);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/*
+ * Common identification function for chips with multi-phase support.
+ * Since those chips have special configuration registers, we want to have
+ * some level of reassurance that we are really talking with the chip
+ * being probed. Check PMBus revision and chip ID.
+ */
+static int tps53679_identify_multiphase(struct i2c_client *client,
+ struct pmbus_driver_info *info,
+ int pmbus_rev, int device_id)
+{
+ int ret;
+
+ ret = tps53679_identify_chip(client, pmbus_rev, device_id);
+ if (ret < 0)
+ return ret;
+
+ ret = tps53679_identify_mode(client, info);
+ if (ret < 0)
+ return ret;
+
+ return tps53679_identify_phases(client, info);
+}
+
+static int tps53679_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ return tps53679_identify_mode(client, info);
+}
+
+static int tps53681_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ return tps53679_identify_multiphase(client, info,
+ TPS53681_PMBUS_REVISION,
+ TPS53681_DEVICE_ID);
+}
+
+static int tps53681_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ /*
+ * For reading the total output current (READ_IOUT) for all phases,
+ * the chip datasheet is a bit vague. It says "PHASE must be set to
+ * FFh to access all phases simultaneously. PHASE may also be set to
+ * 80h readack (!) the total phase current".
+ * Experiments show that the command does _not_ report the total
+ * current for all phases if the phase is set to 0xff. Instead, it
+ * appears to report the current of one of the phases. Override phase
+ * parameter with 0x80 when reading the total output current on page 0.
+ */
+ if (reg == PMBUS_READ_IOUT && page == 0 && phase == 0xff)
+ return pmbus_read_word_data(client, page, 0x80, reg);
+ return -ENODATA;
+}
+
static struct pmbus_driver_info tps53679_info = {
- .pages = TPS53679_PAGE_NUM,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = vid,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_CURRENT_OUT] = linear,
.format[PSC_POWER] = linear,
- .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
PMBUS_HAVE_POUT,
- .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
PMBUS_HAVE_POUT,
- .identify = tps53679_identify,
+ .pfunc[0] = PMBUS_HAVE_IOUT,
+ .pfunc[1] = PMBUS_HAVE_IOUT,
+ .pfunc[2] = PMBUS_HAVE_IOUT,
+ .pfunc[3] = PMBUS_HAVE_IOUT,
+ .pfunc[4] = PMBUS_HAVE_IOUT,
+ .pfunc[5] = PMBUS_HAVE_IOUT,
};
static int tps53679_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct pmbus_driver_info *info;
+ enum chips chip_id;
+
+ if (dev->of_node)
+ chip_id = (enum chips)of_device_get_match_data(dev);
+ else
+ chip_id = id->driver_data;
- info = devm_kmemdup(&client->dev, &tps53679_info, sizeof(*info),
- GFP_KERNEL);
+ info = devm_kmemdup(dev, &tps53679_info, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ switch (chip_id) {
+ case tps53647:
+ case tps53667:
+ info->pages = TPS53647_PAGE_NUM;
+ info->identify = tps53679_identify;
+ break;
+ case tps53679:
+ case tps53688:
+ info->pages = TPS53679_PAGE_NUM;
+ info->identify = tps53679_identify;
+ break;
+ case tps53681:
+ info->pages = TPS53679_PAGE_NUM;
+ info->phases[0] = 6;
+ info->identify = tps53681_identify;
+ info->read_word_data = tps53681_read_word_data;
+ break;
+ default:
+ return -ENODEV;
+ }
+
return pmbus_do_probe(client, id, info);
}
static const struct i2c_device_id tps53679_id[] = {
- {"tps53679", 0},
- {"tps53688", 0},
+ {"tps53647", tps53647},
+ {"tps53667", tps53667},
+ {"tps53679", tps53679},
+ {"tps53681", tps53681},
+ {"tps53688", tps53688},
{}
};
MODULE_DEVICE_TABLE(i2c, tps53679_id);
static const struct of_device_id __maybe_unused tps53679_of_match[] = {
- {.compatible = "ti,tps53679"},
- {.compatible = "ti,tps53688"},
+ {.compatible = "ti,tps53647", .data = (void *)tps53647},
+ {.compatible = "ti,tps53667", .data = (void *)tps53667},
+ {.compatible = "ti,tps53679", .data = (void *)tps53679},
+ {.compatible = "ti,tps53681", .data = (void *)tps53681},
+ {.compatible = "ti,tps53688", .data = (void *)tps53688},
{}
};
MODULE_DEVICE_TABLE(of, tps53679_of_match);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 23ea3415f166..81f4c4f166cd 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -370,7 +370,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
#ifdef CONFIG_DEBUG_FS
static int ucd9000_get_mfr_status(struct i2c_client *client, u8 *buffer)
{
- int ret = pmbus_set_page(client, 0);
+ int ret = pmbus_set_page(client, 0, 0xff);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index 660556b89e9f..d5103fc9e269 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -18,7 +18,8 @@
#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */
#define XDPE122_PAGE_NUM 2
-static int xdpe122_read_word_data(struct i2c_client *client, int page, int reg)
+static int xdpe122_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
long val;
@@ -29,7 +30,7 @@ static int xdpe122_read_word_data(struct i2c_client *client, int page, int reg)
switch (reg) {
case PMBUS_VOUT_OV_FAULT_LIMIT:
case PMBUS_VOUT_UV_FAULT_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
+ ret = pmbus_read_word_data(client, page, phase, reg);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 190b898e404a..3a827d0a881d 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -125,7 +125,8 @@ static inline void zl6100_wait(const struct zl6100_data *data)
}
}
-static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
+static int zl6100_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
@@ -167,7 +168,7 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
}
zl6100_wait(data);
- ret = pmbus_read_word_data(client, page, vreg);
+ ret = pmbus_read_word_data(client, page, phase, vreg);
data->access = ktime_get();
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 8264e849e588..e5d18dac8ee7 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -270,10 +270,10 @@ static int via_cputemp_down_prep(unsigned int cpu)
}
static const struct x86_cpu_id __initconst cputemp_ids[] = {
- { X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
- { X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
- { X86_VENDOR_CENTAUR, 6, 0xf, }, /* Nano */
- { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, },
+ X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, cputemp_ids);
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 37740e992cfa..826a1054100d 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -9,7 +9,7 @@ menuconfig HWSPINLOCK
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
depends on HWSPINLOCK
- depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3
+ depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3 || COMPILE_TEST
help
Say y here to support the OMAP Hardware Spinlock device (firstly
introduced in OMAP4).
@@ -19,7 +19,7 @@ config HWSPINLOCK_OMAP
config HWSPINLOCK_QCOM
tristate "Qualcomm Hardware Spinlock device"
depends on HWSPINLOCK
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
select MFD_SYSCON
help
Say y here to support the Qualcomm Hardware Mutex functionality, which
@@ -31,7 +31,7 @@ config HWSPINLOCK_QCOM
config HWSPINLOCK_SIRF
tristate "SIRF Hardware Spinlock device"
depends on HWSPINLOCK
- depends on ARCH_SIRF
+ depends on ARCH_SIRF || COMPILE_TEST
help
Say y here to support the SIRF Hardware Spinlock device, which
provides a synchronisation mechanism for the various processors
@@ -42,7 +42,7 @@ config HWSPINLOCK_SIRF
config HWSPINLOCK_SPRD
tristate "SPRD Hardware Spinlock device"
- depends on ARCH_SPRD
+ depends on ARCH_SPRD || COMPILE_TEST
depends on HWSPINLOCK
help
Say y here to support the SPRD Hardware Spinlock device.
@@ -51,7 +51,7 @@ config HWSPINLOCK_SPRD
config HWSPINLOCK_STM32
tristate "STM32 Hardware Spinlock device"
- depends on MACH_STM32MP157
+ depends on MACH_STM32MP157 || COMPILE_TEST
depends on HWSPINLOCK
help
Say y here to support the STM32 Hardware Spinlock device.
@@ -61,7 +61,7 @@ config HWSPINLOCK_STM32
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
depends on HWSPINLOCK
- depends on ARCH_U8500
+ depends on ARCH_U8500 || COMPILE_TEST
help
Say y here to support the STE Hardware Semaphore functionality, which
provides a synchronisation mechanism for the various processor on the
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
index 9eb6bd020dc7..29892767bb7a 100644
--- a/drivers/hwspinlock/hwspinlock_internal.h
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -56,7 +56,7 @@ struct hwspinlock_device {
const struct hwspinlock_ops *ops;
int base_id;
int num_locks;
- struct hwspinlock lock[0];
+ struct hwspinlock lock[];
};
static inline int hwlock_to_id(struct hwspinlock *hwlock)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 6ff30e25af55..83e841be1081 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -110,4 +110,25 @@ config CORESIGHT_CPU_DEBUG
properly, please refer Documentation/trace/coresight-cpu-debug.rst
for detailed description and the example for usage.
+config CORESIGHT_CTI
+ bool "CoreSight Cross Trigger Interface (CTI) driver"
+ depends on ARM || ARM64
+ help
+ This driver provides support for CoreSight CTI and CTM components.
+ These provide hardware triggering events between CoreSight trace
+ source and sink components. These can be used to halt trace or
+ inject events into the trace stream. CTI also provides a software
+ control to trigger the same halt events. This can provide fast trace
+ halt compared to disabling sources and sinks normally in driver
+ software.
+
+config CORESIGHT_CTI_INTEGRATION_REGS
+ bool "Access CTI CoreSight Integration Registers"
+ depends on CORESIGHT_CTI
+ help
+ This option adds support for the CoreSight integration registers on
+ this device. The integration registers allow the exploration of the
+ CTI trigger connections between this and other devices.These
+ registers are not used in normal operation and can leave devices in
+ an inconsistent state.
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 3c0ac421e211..0e3e72f0f510 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -17,3 +17,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o \
+ coresight-cti-platform.o \
+ coresight-cti-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
new file mode 100644
index 000000000000..2fdaeec80ee5
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linaro Limited. All rights reserved.
+ */
+
+#include <dt-bindings/arm/coresight-cti-dt.h>
+#include <linux/of.h>
+
+#include "coresight-cti.h"
+
+/* Number of CTI signals in the v8 architecturally defined connection */
+#define NR_V8PE_IN_SIGS 2
+#define NR_V8PE_OUT_SIGS 3
+#define NR_V8ETM_INOUT_SIGS 4
+
+/* CTI device tree trigger connection node keyword */
+#define CTI_DT_CONNS "trig-conns"
+
+/* CTI device tree connection property keywords */
+#define CTI_DT_V8ARCH_COMPAT "arm,coresight-cti-v8-arch"
+#define CTI_DT_CSDEV_ASSOC "arm,cs-dev-assoc"
+#define CTI_DT_TRIGIN_SIGS "arm,trig-in-sigs"
+#define CTI_DT_TRIGOUT_SIGS "arm,trig-out-sigs"
+#define CTI_DT_TRIGIN_TYPES "arm,trig-in-types"
+#define CTI_DT_TRIGOUT_TYPES "arm,trig-out-types"
+#define CTI_DT_FILTER_OUT_SIGS "arm,trig-filters"
+#define CTI_DT_CONN_NAME "arm,trig-conn-name"
+#define CTI_DT_CTM_ID "arm,cti-ctm-id"
+
+#ifdef CONFIG_OF
+/*
+ * CTI can be bound to a CPU, or a system device.
+ * CPU can be declared at the device top level or in a connections node
+ * so need to check relative to node not device.
+ */
+static int of_cti_get_cpu_at_node(const struct device_node *node)
+{
+ int cpu;
+ struct device_node *dn;
+
+ if (node == NULL)
+ return -1;
+
+ dn = of_parse_phandle(node, "cpu", 0);
+ /* CTI affinity defaults to no cpu */
+ if (!dn)
+ return -1;
+ cpu = of_cpu_node_to_id(dn);
+ of_node_put(dn);
+
+ /* No Affinity if no cpu nodes are found */
+ return (cpu < 0) ? -1 : cpu;
+}
+
+#else
+static int of_cti_get_cpu_at_node(const struct device_node *node)
+{
+ return -1;
+}
+
+#endif
+
+/*
+ * CTI can be bound to a CPU, or a system device.
+ * CPU can be declared at the device top level or in a connections node
+ * so need to check relative to node not device.
+ */
+static int cti_plat_get_cpu_at_node(struct fwnode_handle *fwnode)
+{
+ if (is_of_node(fwnode))
+ return of_cti_get_cpu_at_node(to_of_node(fwnode));
+ return -1;
+}
+
+const char *cti_plat_get_node_name(struct fwnode_handle *fwnode)
+{
+ if (is_of_node(fwnode))
+ return of_node_full_name(to_of_node(fwnode));
+ return "unknown";
+}
+
+/*
+ * Extract a name from the fwnode.
+ * If the device associated with the node is a coresight_device, then return
+ * that name and the coresight_device pointer, otherwise return the node name.
+ */
+static const char *
+cti_plat_get_csdev_or_node_name(struct fwnode_handle *fwnode,
+ struct coresight_device **csdev)
+{
+ const char *name = NULL;
+ *csdev = coresight_find_csdev_by_fwnode(fwnode);
+ if (*csdev)
+ name = dev_name(&(*csdev)->dev);
+ else
+ name = cti_plat_get_node_name(fwnode);
+ return name;
+}
+
+static bool cti_plat_node_name_eq(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ if (is_of_node(fwnode))
+ return of_node_name_eq(to_of_node(fwnode), name);
+ return false;
+}
+
+static int cti_plat_create_v8_etm_connection(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ int ret = -ENOMEM, i;
+ struct fwnode_handle *root_fwnode, *cs_fwnode;
+ const char *assoc_name = NULL;
+ struct coresight_device *csdev;
+ struct cti_trig_con *tc = NULL;
+
+ root_fwnode = dev_fwnode(dev);
+ if (IS_ERR_OR_NULL(root_fwnode))
+ return -EINVAL;
+
+ /* Can optionally have an etm node - return if not */
+ cs_fwnode = fwnode_find_reference(root_fwnode, CTI_DT_CSDEV_ASSOC, 0);
+ if (IS_ERR(cs_fwnode))
+ return 0;
+
+ /* allocate memory */
+ tc = cti_allocate_trig_con(dev, NR_V8ETM_INOUT_SIGS,
+ NR_V8ETM_INOUT_SIGS);
+ if (!tc)
+ goto create_v8_etm_out;
+
+ /* build connection data */
+ tc->con_in->used_mask = 0xF0; /* sigs <4,5,6,7> */
+ tc->con_out->used_mask = 0xF0; /* sigs <4,5,6,7> */
+
+ /*
+ * The EXTOUT type signals from the ETM are connected to a set of input
+ * triggers on the CTI, the EXTIN being connected to output triggers.
+ */
+ for (i = 0; i < NR_V8ETM_INOUT_SIGS; i++) {
+ tc->con_in->sig_types[i] = ETM_EXTOUT;
+ tc->con_out->sig_types[i] = ETM_EXTIN;
+ }
+
+ /*
+ * We look to see if the ETM coresight device associated with this
+ * handle has been registered with the system - i.e. probed before
+ * this CTI. If so csdev will be non NULL and we can use the device
+ * name and pass the csdev to the connection entry function where
+ * the association will be recorded.
+ * If not, then simply record the name in the connection data, the
+ * probing of the ETM will call into the CTI driver API to update the
+ * association then.
+ */
+ assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode, &csdev);
+ ret = cti_add_connection_entry(dev, drvdata, tc, csdev, assoc_name);
+
+create_v8_etm_out:
+ fwnode_handle_put(cs_fwnode);
+ return ret;
+}
+
+/*
+ * Create an architecturally defined v8 connection
+ * must have a cpu, can have an ETM.
+ */
+static int cti_plat_create_v8_connections(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ struct cti_device *cti_dev = &drvdata->ctidev;
+ struct cti_trig_con *tc = NULL;
+ int cpuid = 0;
+ char cpu_name_str[16];
+ int ret = -ENOMEM;
+
+ /* Must have a cpu node */
+ cpuid = cti_plat_get_cpu_at_node(dev_fwnode(dev));
+ if (cpuid < 0) {
+ dev_warn(dev,
+ "ARM v8 architectural CTI connection: missing cpu\n");
+ return -EINVAL;
+ }
+ cti_dev->cpu = cpuid;
+
+ /* Allocate the v8 cpu connection memory */
+ tc = cti_allocate_trig_con(dev, NR_V8PE_IN_SIGS, NR_V8PE_OUT_SIGS);
+ if (!tc)
+ goto of_create_v8_out;
+
+ /* Set the v8 PE CTI connection data */
+ tc->con_in->used_mask = 0x3; /* sigs <0 1> */
+ tc->con_in->sig_types[0] = PE_DBGTRIGGER;
+ tc->con_in->sig_types[1] = PE_PMUIRQ;
+ tc->con_out->used_mask = 0x7; /* sigs <0 1 2 > */
+ tc->con_out->sig_types[0] = PE_EDBGREQ;
+ tc->con_out->sig_types[1] = PE_DBGRESTART;
+ tc->con_out->sig_types[2] = PE_CTIIRQ;
+ scnprintf(cpu_name_str, sizeof(cpu_name_str), "cpu%d", cpuid);
+
+ ret = cti_add_connection_entry(dev, drvdata, tc, NULL, cpu_name_str);
+ if (ret)
+ goto of_create_v8_out;
+
+ /* Create the v8 ETM associated connection */
+ ret = cti_plat_create_v8_etm_connection(dev, drvdata);
+ if (ret)
+ goto of_create_v8_out;
+
+ /* filter pe_edbgreq - PE trigout sig <0> */
+ drvdata->config.trig_out_filter |= 0x1;
+
+of_create_v8_out:
+ return ret;
+}
+
+static int cti_plat_check_v8_arch_compatible(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (is_of_node(fwnode))
+ return of_device_is_compatible(to_of_node(fwnode),
+ CTI_DT_V8ARCH_COMPAT);
+ return 0;
+}
+
+static int cti_plat_count_sig_elements(const struct fwnode_handle *fwnode,
+ const char *name)
+{
+ int nr_elem = fwnode_property_count_u32(fwnode, name);
+
+ return (nr_elem < 0 ? 0 : nr_elem);
+}
+
+static int cti_plat_read_trig_group(struct cti_trig_grp *tgrp,
+ const struct fwnode_handle *fwnode,
+ const char *grp_name)
+{
+ int idx, err = 0;
+ u32 *values;
+
+ if (!tgrp->nr_sigs)
+ return 0;
+
+ values = kcalloc(tgrp->nr_sigs, sizeof(u32), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ err = fwnode_property_read_u32_array(fwnode, grp_name,
+ values, tgrp->nr_sigs);
+
+ if (!err) {
+ /* set the signal usage mask */
+ for (idx = 0; idx < tgrp->nr_sigs; idx++)
+ tgrp->used_mask |= BIT(values[idx]);
+ }
+
+ kfree(values);
+ return err;
+}
+
+static int cti_plat_read_trig_types(struct cti_trig_grp *tgrp,
+ const struct fwnode_handle *fwnode,
+ const char *type_name)
+{
+ int items, err = 0, nr_sigs;
+ u32 *values = NULL, i;
+
+ /* allocate an array according to number of signals in connection */
+ nr_sigs = tgrp->nr_sigs;
+ if (!nr_sigs)
+ return 0;
+
+ /* see if any types have been included in the device description */
+ items = cti_plat_count_sig_elements(fwnode, type_name);
+ if (items > nr_sigs)
+ return -EINVAL;
+
+ /* need an array to store the values iff there are any */
+ if (items) {
+ values = kcalloc(items, sizeof(u32), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ err = fwnode_property_read_u32_array(fwnode, type_name,
+ values, items);
+ if (err)
+ goto read_trig_types_out;
+ }
+
+ /*
+ * Match type id to signal index, 1st type to 1st index etc.
+ * If fewer types than signals default remainder to GEN_IO.
+ */
+ for (i = 0; i < nr_sigs; i++) {
+ if (i < items) {
+ tgrp->sig_types[i] =
+ values[i] < CTI_TRIG_MAX ? values[i] : GEN_IO;
+ } else {
+ tgrp->sig_types[i] = GEN_IO;
+ }
+ }
+
+read_trig_types_out:
+ kfree(values);
+ return err;
+}
+
+static int cti_plat_process_filter_sigs(struct cti_drvdata *drvdata,
+ const struct fwnode_handle *fwnode)
+{
+ struct cti_trig_grp *tg = NULL;
+ int err = 0, nr_filter_sigs;
+
+ nr_filter_sigs = cti_plat_count_sig_elements(fwnode,
+ CTI_DT_FILTER_OUT_SIGS);
+ if (nr_filter_sigs == 0)
+ return 0;
+
+ if (nr_filter_sigs > drvdata->config.nr_trig_max)
+ return -EINVAL;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return -ENOMEM;
+
+ err = cti_plat_read_trig_group(tg, fwnode, CTI_DT_FILTER_OUT_SIGS);
+ if (!err)
+ drvdata->config.trig_out_filter |= tg->used_mask;
+
+ kfree(tg);
+ return err;
+}
+
+static int cti_plat_create_connection(struct device *dev,
+ struct cti_drvdata *drvdata,
+ struct fwnode_handle *fwnode)
+{
+ struct cti_trig_con *tc = NULL;
+ int cpuid = -1, err = 0;
+ struct fwnode_handle *cs_fwnode = NULL;
+ struct coresight_device *csdev = NULL;
+ const char *assoc_name = "unknown";
+ char cpu_name_str[16];
+ int nr_sigs_in, nr_sigs_out;
+
+ /* look to see how many in and out signals we have */
+ nr_sigs_in = cti_plat_count_sig_elements(fwnode, CTI_DT_TRIGIN_SIGS);
+ nr_sigs_out = cti_plat_count_sig_elements(fwnode, CTI_DT_TRIGOUT_SIGS);
+
+ if ((nr_sigs_in > drvdata->config.nr_trig_max) ||
+ (nr_sigs_out > drvdata->config.nr_trig_max))
+ return -EINVAL;
+
+ tc = cti_allocate_trig_con(dev, nr_sigs_in, nr_sigs_out);
+ if (!tc)
+ return -ENOMEM;
+
+ /* look for the signals properties. */
+ err = cti_plat_read_trig_group(tc->con_in, fwnode,
+ CTI_DT_TRIGIN_SIGS);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_read_trig_types(tc->con_in, fwnode,
+ CTI_DT_TRIGIN_TYPES);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_read_trig_group(tc->con_out, fwnode,
+ CTI_DT_TRIGOUT_SIGS);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_read_trig_types(tc->con_out, fwnode,
+ CTI_DT_TRIGOUT_TYPES);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_process_filter_sigs(drvdata, fwnode);
+ if (err)
+ goto create_con_err;
+
+ /* read the connection name if set - may be overridden by later */
+ fwnode_property_read_string(fwnode, CTI_DT_CONN_NAME, &assoc_name);
+
+ /* associated cpu ? */
+ cpuid = cti_plat_get_cpu_at_node(fwnode);
+ if (cpuid >= 0) {
+ drvdata->ctidev.cpu = cpuid;
+ scnprintf(cpu_name_str, sizeof(cpu_name_str), "cpu%d", cpuid);
+ assoc_name = cpu_name_str;
+ } else {
+ /* associated device ? */
+ cs_fwnode = fwnode_find_reference(fwnode,
+ CTI_DT_CSDEV_ASSOC, 0);
+ if (!IS_ERR(cs_fwnode)) {
+ assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
+ &csdev);
+ fwnode_handle_put(cs_fwnode);
+ }
+ }
+ /* set up a connection */
+ err = cti_add_connection_entry(dev, drvdata, tc, csdev, assoc_name);
+
+create_con_err:
+ return err;
+}
+
+static int cti_plat_create_impdef_connections(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ int rc = 0;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_handle *child = NULL;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ fwnode_for_each_child_node(fwnode, child) {
+ if (cti_plat_node_name_eq(child, CTI_DT_CONNS))
+ rc = cti_plat_create_connection(dev, drvdata,
+ child);
+ if (rc != 0)
+ break;
+ }
+ fwnode_handle_put(child);
+
+ return rc;
+}
+
+/* get the hardware configuration & connection data. */
+int cti_plat_get_hw_data(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ int rc = 0;
+ struct cti_device *cti_dev = &drvdata->ctidev;
+
+ /* get any CTM ID - defaults to 0 */
+ device_property_read_u32(dev, CTI_DT_CTM_ID, &cti_dev->ctm_id);
+
+ /* check for a v8 architectural CTI device */
+ if (cti_plat_check_v8_arch_compatible(dev))
+ rc = cti_plat_create_v8_connections(dev, drvdata);
+ else
+ rc = cti_plat_create_impdef_connections(dev, drvdata);
+ if (rc)
+ return rc;
+
+ /* if no connections, just add a single default based on max IN-OUT */
+ if (cti_dev->nr_trig_con == 0)
+ rc = cti_add_default_connection(dev, drvdata);
+ return rc;
+}
+
+struct coresight_platform_data *
+coresight_cti_get_platform_data(struct device *dev)
+{
+ int ret = -ENOENT;
+ struct coresight_platform_data *pdata = NULL;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (IS_ERR_OR_NULL(fwnode))
+ goto error;
+
+ /*
+ * Alloc platform data but leave it zero init. CTI does not use the
+ * same connection infrastructuree as trace path components but an
+ * empty struct enables us to use the standard coresight component
+ * registration code.
+ */
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* get some CTI specifics */
+ ret = cti_plat_get_hw_data(dev, drvdata);
+
+ if (!ret)
+ return pdata;
+error:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
new file mode 100644
index 000000000000..1f8fb7c15e80
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include <linux/coresight.h>
+
+#include "coresight-cti.h"
+
+/*
+ * Declare the number of static declared attribute groups
+ * Value includes groups + NULL value at end of table.
+ */
+#define CORESIGHT_CTI_STATIC_GROUPS_MAX 5
+
+/*
+ * List of trigger signal type names. Match the constants declared in
+ * include\dt-bindings\arm\coresight-cti-dt.h
+ */
+static const char * const sig_type_names[] = {
+ "genio", /* GEN_IO */
+ "intreq", /* GEN_INTREQ */
+ "intack", /* GEN_INTACK */
+ "haltreq", /* GEN_HALTREQ */
+ "restartreq", /* GEN_RESTARTREQ */
+ "pe_edbgreq", /* PE_EDBGREQ */
+ "pe_dbgrestart",/* PE_DBGRESTART */
+ "pe_ctiirq", /* PE_CTIIRQ */
+ "pe_pmuirq", /* PE_PMUIRQ */
+ "pe_dbgtrigger",/* PE_DBGTRIGGER */
+ "etm_extout", /* ETM_EXTOUT */
+ "etm_extin", /* ETM_EXTIN */
+ "snk_full", /* SNK_FULL */
+ "snk_acqcomp", /* SNK_ACQCOMP */
+ "snk_flushcomp",/* SNK_FLUSHCOMP */
+ "snk_flushin", /* SNK_FLUSHIN */
+ "snk_trigin", /* SNK_TRIGIN */
+ "stm_asyncout", /* STM_ASYNCOUT */
+ "stm_tout_spte",/* STM_TOUT_SPTE */
+ "stm_tout_sw", /* STM_TOUT_SW */
+ "stm_tout_hete",/* STM_TOUT_HETE */
+ "stm_hwevent", /* STM_HWEVENT */
+ "ela_tstart", /* ELA_TSTART */
+ "ela_tstop", /* ELA_TSTOP */
+ "ela_dbgreq", /* ELA_DBGREQ */
+};
+
+/* Show function pointer used in the connections dynamic declared attributes*/
+typedef ssize_t (*p_show_fn)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
+/* Connection attribute types */
+enum cti_conn_attr_type {
+ CTI_CON_ATTR_NAME,
+ CTI_CON_ATTR_TRIGIN_SIG,
+ CTI_CON_ATTR_TRIGOUT_SIG,
+ CTI_CON_ATTR_TRIGIN_TYPES,
+ CTI_CON_ATTR_TRIGOUT_TYPES,
+ CTI_CON_ATTR_MAX,
+};
+
+/* Names for the connection attributes */
+static const char * const con_attr_names[CTI_CON_ATTR_MAX] = {
+ "name",
+ "in_signals",
+ "out_signals",
+ "in_types",
+ "out_types",
+};
+
+/* basic attributes */
+static ssize_t enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int enable_req;
+ bool enabled, powered;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ enable_req = atomic_read(&drvdata->config.enable_req_count);
+ spin_lock(&drvdata->spinlock);
+ powered = drvdata->config.hw_powered;
+ enabled = drvdata->config.hw_enabled;
+ spin_unlock(&drvdata->spinlock);
+
+ if (powered)
+ return sprintf(buf, "%d\n", enabled);
+ else
+ return sprintf(buf, "%d\n", !!enable_req);
+}
+
+static ssize_t enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val)
+ ret = cti_enable(drvdata->csdev);
+ else
+ ret = cti_disable(drvdata->csdev);
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR_RW(enable);
+
+static ssize_t powered_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ bool powered;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ powered = drvdata->config.hw_powered;
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%d\n", powered);
+}
+static DEVICE_ATTR_RO(powered);
+
+static ssize_t ctmid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", drvdata->ctidev.ctm_id);
+}
+static DEVICE_ATTR_RO(ctmid);
+
+static ssize_t nr_trigger_cons_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", drvdata->ctidev.nr_trig_con);
+}
+static DEVICE_ATTR_RO(nr_trigger_cons);
+
+/* attribute and group sysfs tables. */
+static struct attribute *coresight_cti_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_powered.attr,
+ &dev_attr_ctmid.attr,
+ &dev_attr_nr_trigger_cons.attr,
+ NULL,
+};
+
+/* register based attributes */
+
+/* macro to access RO registers with power check only (no enable check). */
+#define coresight_cti_reg(name, offset) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ u32 val = 0; \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ val = readl_relaxed(drvdata->base + offset); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return sprintf(buf, "0x%x\n", val); \
+} \
+static DEVICE_ATTR_RO(name)
+
+/* coresight management registers */
+coresight_cti_reg(devaff0, CTIDEVAFF0);
+coresight_cti_reg(devaff1, CTIDEVAFF1);
+coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS);
+coresight_cti_reg(devarch, CORESIGHT_DEVARCH);
+coresight_cti_reg(devid, CORESIGHT_DEVID);
+coresight_cti_reg(devtype, CORESIGHT_DEVTYPE);
+coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0);
+coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1);
+coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2);
+coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3);
+coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4);
+
+static struct attribute *coresight_cti_mgmt_attrs[] = {
+ &dev_attr_devaff0.attr,
+ &dev_attr_devaff1.attr,
+ &dev_attr_authstatus.attr,
+ &dev_attr_devarch.attr,
+ &dev_attr_devid.attr,
+ &dev_attr_devtype.attr,
+ &dev_attr_pidr0.attr,
+ &dev_attr_pidr1.attr,
+ &dev_attr_pidr2.attr,
+ &dev_attr_pidr3.attr,
+ &dev_attr_pidr4.attr,
+ NULL,
+};
+
+/* CTI low level programming registers */
+
+/*
+ * Show a simple 32 bit value if enabled and powered.
+ * If inaccessible & pcached_val not NULL then show cached value.
+ */
+static ssize_t cti_reg32_show(struct device *dev, char *buf,
+ u32 *pcached_val, int reg_offset)
+{
+ u32 val = 0;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ if ((reg_offset >= 0) && cti_active(config)) {
+ CS_UNLOCK(drvdata->base);
+ val = readl_relaxed(drvdata->base + reg_offset);
+ if (pcached_val)
+ *pcached_val = val;
+ CS_LOCK(drvdata->base);
+ } else if (pcached_val) {
+ val = *pcached_val;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%#x\n", val);
+}
+
+/*
+ * Store a simple 32 bit value.
+ * If pcached_val not NULL, then copy to here too,
+ * if reg_offset >= 0 then write through if enabled.
+ */
+static ssize_t cti_reg32_store(struct device *dev, const char *buf,
+ size_t size, u32 *pcached_val, int reg_offset)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* local store */
+ if (pcached_val)
+ *pcached_val = (u32)val;
+
+ /* write through if offset and enabled */
+ if ((reg_offset >= 0) && cti_active(config))
+ cti_write_single_reg(drvdata, reg_offset, val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+
+/* Standard macro for simple rw cti config registers */
+#define cti_config_reg32_rw(name, cfgname, offset) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ return cti_reg32_show(dev, buf, \
+ &drvdata->config.cfgname, offset); \
+} \
+ \
+static ssize_t name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ return cti_reg32_store(dev, buf, size, \
+ &drvdata->config.cfgname, offset); \
+} \
+static DEVICE_ATTR_RW(name)
+
+static ssize_t inout_sel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = (u32)drvdata->config.ctiinout_sel;
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t inout_sel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+ if (val > (CTIINOUTEN_MAX - 1))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.ctiinout_sel = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(inout_sel);
+
+static ssize_t inen_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ index = drvdata->config.ctiinout_sel;
+ val = drvdata->config.ctiinen[index];
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t inen_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ index = config->ctiinout_sel;
+ config->ctiinen[index] = val;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIINEN(index), val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(inen);
+
+static ssize_t outen_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ index = drvdata->config.ctiinout_sel;
+ val = drvdata->config.ctiouten[index];
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t outen_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ index = config->ctiinout_sel;
+ config->ctiouten[index] = val;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIOUTEN(index), val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(outen);
+
+static ssize_t intack_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ cti_write_intack(dev, val);
+ return size;
+}
+static DEVICE_ATTR_WO(intack);
+
+cti_config_reg32_rw(gate, ctigate, CTIGATE);
+cti_config_reg32_rw(asicctl, asicctl, ASICCTL);
+cti_config_reg32_rw(appset, ctiappset, CTIAPPSET);
+
+static ssize_t appclear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* a 1'b1 in appclr clears down the same bit in appset*/
+ config->ctiappset &= ~val;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIAPPCLEAR, val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_WO(appclear);
+
+static ssize_t apppulse_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIAPPPULSE, val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_WO(apppulse);
+
+coresight_cti_reg(triginstatus, CTITRIGINSTATUS);
+coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS);
+coresight_cti_reg(chinstatus, CTICHINSTATUS);
+coresight_cti_reg(choutstatus, CTICHOUTSTATUS);
+
+/*
+ * Define CONFIG_CORESIGHT_CTI_INTEGRATION_REGS to enable the access to the
+ * integration control registers. Normally only used to investigate connection
+ * data.
+ */
+#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
+
+/* macro to access RW registers with power check only (no enable check). */
+#define coresight_cti_reg_rw(name, offset) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ u32 val = 0; \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ val = readl_relaxed(drvdata->base + offset); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return sprintf(buf, "0x%x\n", val); \
+} \
+ \
+static ssize_t name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ unsigned long val = 0; \
+ if (kstrtoul(buf, 0, &val)) \
+ return -EINVAL; \
+ \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ cti_write_single_reg(drvdata, offset, val); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return size; \
+} \
+static DEVICE_ATTR_RW(name)
+
+/* macro to access WO registers with power check only (no enable check). */
+#define coresight_cti_reg_wo(name, offset) \
+static ssize_t name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ unsigned long val = 0; \
+ if (kstrtoul(buf, 0, &val)) \
+ return -EINVAL; \
+ \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ cti_write_single_reg(drvdata, offset, val); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return size; \
+} \
+static DEVICE_ATTR_WO(name)
+
+coresight_cti_reg_rw(itchout, ITCHOUT);
+coresight_cti_reg_rw(ittrigout, ITTRIGOUT);
+coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL);
+coresight_cti_reg_wo(itchinack, ITCHINACK);
+coresight_cti_reg_wo(ittriginack, ITTRIGINACK);
+coresight_cti_reg(ittrigin, ITTRIGIN);
+coresight_cti_reg(itchin, ITCHIN);
+coresight_cti_reg(itchoutack, ITCHOUTACK);
+coresight_cti_reg(ittrigoutack, ITTRIGOUTACK);
+
+#endif /* CORESIGHT_CTI_INTEGRATION_REGS */
+
+static struct attribute *coresight_cti_regs_attrs[] = {
+ &dev_attr_inout_sel.attr,
+ &dev_attr_inen.attr,
+ &dev_attr_outen.attr,
+ &dev_attr_gate.attr,
+ &dev_attr_asicctl.attr,
+ &dev_attr_intack.attr,
+ &dev_attr_appset.attr,
+ &dev_attr_appclear.attr,
+ &dev_attr_apppulse.attr,
+ &dev_attr_triginstatus.attr,
+ &dev_attr_trigoutstatus.attr,
+ &dev_attr_chinstatus.attr,
+ &dev_attr_choutstatus.attr,
+#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
+ &dev_attr_itctrl.attr,
+ &dev_attr_ittrigin.attr,
+ &dev_attr_itchin.attr,
+ &dev_attr_ittrigout.attr,
+ &dev_attr_itchout.attr,
+ &dev_attr_itchoutack.attr,
+ &dev_attr_ittrigoutack.attr,
+ &dev_attr_ittriginack.attr,
+ &dev_attr_itchinack.attr,
+#endif
+ NULL,
+};
+
+/* CTI channel x-trigger programming */
+static int
+cti_trig_op_parse(struct device *dev, enum cti_chan_op op,
+ enum cti_trig_dir dir, const char *buf, size_t size)
+{
+ u32 chan_idx;
+ u32 trig_idx;
+ int items, err = -EINVAL;
+
+ /* extract chan idx and trigger idx */
+ items = sscanf(buf, "%d %d", &chan_idx, &trig_idx);
+ if (items == 2) {
+ err = cti_channel_trig_op(dev, op, dir, chan_idx, trig_idx);
+ if (!err)
+ err = size;
+ }
+ return err;
+}
+
+static ssize_t trigin_attach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_IN,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigin_attach);
+
+static ssize_t trigin_detach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_IN,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigin_detach);
+
+static ssize_t trigout_attach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_OUT,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigout_attach);
+
+static ssize_t trigout_detach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_OUT,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigout_detach);
+
+
+static ssize_t chan_gate_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = 0, channel = 0;
+
+ if (kstrtoint(buf, 0, &channel))
+ return -EINVAL;
+
+ err = cti_channel_gate_op(dev, CTI_GATE_CHAN_ENABLE, channel);
+ return err ? err : size;
+}
+
+static ssize_t chan_gate_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ unsigned long ctigate_bitmask = cfg->ctigate;
+ int size = 0;
+
+ if (cfg->ctigate == 0)
+ size = sprintf(buf, "\n");
+ else
+ size = bitmap_print_to_pagebuf(true, buf, &ctigate_bitmask,
+ cfg->nr_ctm_channels);
+ return size;
+}
+static DEVICE_ATTR_RW(chan_gate_enable);
+
+static ssize_t chan_gate_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = 0, channel = 0;
+
+ if (kstrtoint(buf, 0, &channel))
+ return -EINVAL;
+
+ err = cti_channel_gate_op(dev, CTI_GATE_CHAN_DISABLE, channel);
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_gate_disable);
+
+static int
+chan_op_parse(struct device *dev, enum cti_chan_set_op op, const char *buf)
+{
+ int err = 0, channel = 0;
+
+ if (kstrtoint(buf, 0, &channel))
+ return -EINVAL;
+
+ err = cti_channel_setop(dev, op, channel);
+ return err;
+
+}
+
+static ssize_t chan_set_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = chan_op_parse(dev, CTI_CHAN_SET, buf);
+
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_set);
+
+static ssize_t chan_clear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = chan_op_parse(dev, CTI_CHAN_CLR, buf);
+
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_clear);
+
+static ssize_t chan_pulse_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = chan_op_parse(dev, CTI_CHAN_PULSE, buf);
+
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_pulse);
+
+static ssize_t trig_filter_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->config.trig_filter_enable;
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t trig_filter_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.trig_filter_enable = !!val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(trig_filter_enable);
+
+static ssize_t trigout_filtered_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ int size = 0, nr_trig_max = cfg->nr_trig_max;
+ unsigned long mask = cfg->trig_out_filter;
+
+ if (mask)
+ size = bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
+ return size;
+}
+static DEVICE_ATTR_RO(trigout_filtered);
+
+/* clear all xtrigger / channel programming */
+static ssize_t chan_xtrigs_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int i;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* clear the CTI trigger / channel programming registers */
+ for (i = 0; i < config->nr_trig_max; i++) {
+ config->ctiinen[i] = 0;
+ config->ctiouten[i] = 0;
+ }
+
+ /* clear the other regs */
+ config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0);
+ config->asicctl = 0;
+ config->ctiappset = 0;
+ config->ctiinout_sel = 0;
+ config->xtrig_rchan_sel = 0;
+
+ /* if enabled then write through */
+ if (cti_active(config))
+ cti_write_all_hw_regs(drvdata);
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_WO(chan_xtrigs_reset);
+
+/*
+ * Write to select a channel to view, read to display the
+ * cross triggers for the selected channel.
+ */
+static ssize_t chan_xtrigs_sel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+ if (val > (drvdata->config.nr_ctm_channels - 1))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.xtrig_rchan_sel = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+
+static ssize_t chan_xtrigs_sel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->config.xtrig_rchan_sel;
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%ld\n", val);
+}
+static DEVICE_ATTR_RW(chan_xtrigs_sel);
+
+static ssize_t chan_xtrigs_in_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ int used = 0, reg_idx;
+ int nr_trig_max = drvdata->config.nr_trig_max;
+ u32 chan_mask = BIT(cfg->xtrig_rchan_sel);
+
+ for (reg_idx = 0; reg_idx < nr_trig_max; reg_idx++) {
+ if (chan_mask & cfg->ctiinen[reg_idx])
+ used += sprintf(buf + used, "%d ", reg_idx);
+ }
+
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+static DEVICE_ATTR_RO(chan_xtrigs_in);
+
+static ssize_t chan_xtrigs_out_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ int used = 0, reg_idx;
+ int nr_trig_max = drvdata->config.nr_trig_max;
+ u32 chan_mask = BIT(cfg->xtrig_rchan_sel);
+
+ for (reg_idx = 0; reg_idx < nr_trig_max; reg_idx++) {
+ if (chan_mask & cfg->ctiouten[reg_idx])
+ used += sprintf(buf + used, "%d ", reg_idx);
+ }
+
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+static DEVICE_ATTR_RO(chan_xtrigs_out);
+
+static ssize_t print_chan_list(struct device *dev,
+ char *buf, bool inuse)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ int size, i;
+ unsigned long inuse_bits = 0, chan_mask;
+
+ /* scan regs to get bitmap of channels in use. */
+ spin_lock(&drvdata->spinlock);
+ for (i = 0; i < config->nr_trig_max; i++) {
+ inuse_bits |= config->ctiinen[i];
+ inuse_bits |= config->ctiouten[i];
+ }
+ spin_unlock(&drvdata->spinlock);
+
+ /* inverse bits if printing free channels */
+ if (!inuse)
+ inuse_bits = ~inuse_bits;
+
+ /* list of channels, or 'none' */
+ chan_mask = GENMASK(config->nr_ctm_channels - 1, 0);
+ if (inuse_bits & chan_mask)
+ size = bitmap_print_to_pagebuf(true, buf, &inuse_bits,
+ config->nr_ctm_channels);
+ else
+ size = sprintf(buf, "\n");
+ return size;
+}
+
+static ssize_t chan_inuse_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return print_chan_list(dev, buf, true);
+}
+static DEVICE_ATTR_RO(chan_inuse);
+
+static ssize_t chan_free_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return print_chan_list(dev, buf, false);
+}
+static DEVICE_ATTR_RO(chan_free);
+
+static struct attribute *coresight_cti_channel_attrs[] = {
+ &dev_attr_trigin_attach.attr,
+ &dev_attr_trigin_detach.attr,
+ &dev_attr_trigout_attach.attr,
+ &dev_attr_trigout_detach.attr,
+ &dev_attr_trig_filter_enable.attr,
+ &dev_attr_trigout_filtered.attr,
+ &dev_attr_chan_gate_enable.attr,
+ &dev_attr_chan_gate_disable.attr,
+ &dev_attr_chan_set.attr,
+ &dev_attr_chan_clear.attr,
+ &dev_attr_chan_pulse.attr,
+ &dev_attr_chan_inuse.attr,
+ &dev_attr_chan_free.attr,
+ &dev_attr_chan_xtrigs_sel.attr,
+ &dev_attr_chan_xtrigs_in.attr,
+ &dev_attr_chan_xtrigs_out.attr,
+ &dev_attr_chan_xtrigs_reset.attr,
+ NULL,
+};
+
+/* Create the connections trigger groups and attrs dynamically */
+/*
+ * Each connection has dynamic group triggers<N> + name, trigin/out sigs/types
+ * attributes, + each device has static nr_trigger_cons giving the number
+ * of groups. e.g. in sysfs:-
+ * /cti_<name>/triggers0
+ * /cti_<name>/triggers1
+ * /cti_<name>/nr_trigger_cons
+ * where nr_trigger_cons = 2
+ */
+static ssize_t con_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+
+ return sprintf(buf, "%s\n", con->con_dev_name);
+}
+
+static ssize_t trigin_sig_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ unsigned long mask = con->con_in->used_mask;
+
+ return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
+}
+
+static ssize_t trigout_sig_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ unsigned long mask = con->con_out->used_mask;
+
+ return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
+}
+
+/* convert a sig type id to a name */
+static const char *
+cti_sig_type_name(struct cti_trig_con *con, int used_count, bool in)
+{
+ int idx = 0;
+ struct cti_trig_grp *grp = in ? con->con_in : con->con_out;
+
+ if (used_count < grp->nr_sigs)
+ idx = grp->sig_types[used_count];
+ return sig_type_names[idx];
+}
+
+static ssize_t trigin_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ int sig_idx, used = 0;
+ const char *name;
+
+ for (sig_idx = 0; sig_idx < con->con_in->nr_sigs; sig_idx++) {
+ name = cti_sig_type_name(con, sig_idx, true);
+ used += sprintf(buf + used, "%s ", name);
+ }
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+
+static ssize_t trigout_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ int sig_idx, used = 0;
+ const char *name;
+
+ for (sig_idx = 0; sig_idx < con->con_out->nr_sigs; sig_idx++) {
+ name = cti_sig_type_name(con, sig_idx, false);
+ used += sprintf(buf + used, "%s ", name);
+ }
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+
+/*
+ * Array of show function names declared above to allow selection
+ * for the connection attributes
+ */
+static p_show_fn show_fns[CTI_CON_ATTR_MAX] = {
+ con_name_show,
+ trigin_sig_show,
+ trigout_sig_show,
+ trigin_type_show,
+ trigout_type_show,
+};
+
+static int cti_create_con_sysfs_attr(struct device *dev,
+ struct cti_trig_con *con,
+ enum cti_conn_attr_type attr_type,
+ int attr_idx)
+{
+ struct dev_ext_attribute *eattr = 0;
+ char *name = 0;
+
+ eattr = devm_kzalloc(dev, sizeof(struct dev_ext_attribute),
+ GFP_KERNEL);
+ if (eattr) {
+ name = devm_kstrdup(dev, con_attr_names[attr_type],
+ GFP_KERNEL);
+ if (name) {
+ /* fill out the underlying attribute struct */
+ eattr->attr.attr.name = name;
+ eattr->attr.attr.mode = 0444;
+
+ /* now the device_attribute struct */
+ eattr->attr.show = show_fns[attr_type];
+ } else {
+ return -ENOMEM;
+ }
+ } else {
+ return -ENOMEM;
+ }
+ eattr->var = con;
+ con->con_attrs[attr_idx] = &eattr->attr.attr;
+ return 0;
+}
+
+static struct attribute_group *
+cti_create_con_sysfs_group(struct device *dev, struct cti_device *ctidev,
+ int con_idx, struct cti_trig_con *tc)
+{
+ struct attribute_group *group = NULL;
+ int grp_idx;
+
+ group = devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
+ if (!group)
+ return NULL;
+
+ group->name = devm_kasprintf(dev, GFP_KERNEL, "triggers%d", con_idx);
+ if (!group->name)
+ return NULL;
+
+ grp_idx = con_idx + CORESIGHT_CTI_STATIC_GROUPS_MAX - 1;
+ ctidev->con_groups[grp_idx] = group;
+ tc->attr_group = group;
+ return group;
+}
+
+/* create a triggers connection group and the attributes for that group */
+static int cti_create_con_attr_set(struct device *dev, int con_idx,
+ struct cti_device *ctidev,
+ struct cti_trig_con *tc)
+{
+ struct attribute_group *attr_group = NULL;
+ int attr_idx = 0;
+ int err = -ENOMEM;
+
+ attr_group = cti_create_con_sysfs_group(dev, ctidev, con_idx, tc);
+ if (!attr_group)
+ return -ENOMEM;
+
+ /* allocate NULL terminated array of attributes */
+ tc->con_attrs = devm_kcalloc(dev, CTI_CON_ATTR_MAX + 1,
+ sizeof(struct attribute *), GFP_KERNEL);
+ if (!tc->con_attrs)
+ return -ENOMEM;
+
+ err = cti_create_con_sysfs_attr(dev, tc, CTI_CON_ATTR_NAME,
+ attr_idx++);
+ if (err)
+ return err;
+
+ if (tc->con_in->nr_sigs > 0) {
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGIN_SIG,
+ attr_idx++);
+ if (err)
+ return err;
+
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGIN_TYPES,
+ attr_idx++);
+ if (err)
+ return err;
+ }
+
+ if (tc->con_out->nr_sigs > 0) {
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGOUT_SIG,
+ attr_idx++);
+ if (err)
+ return err;
+
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGOUT_TYPES,
+ attr_idx++);
+ if (err)
+ return err;
+ }
+ attr_group->attrs = tc->con_attrs;
+ return 0;
+}
+
+/* create the array of group pointers for the CTI sysfs groups */
+int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
+{
+ int nr_groups;
+
+ /* nr groups = dynamic + static + NULL terminator */
+ nr_groups = ctidev->nr_trig_con + CORESIGHT_CTI_STATIC_GROUPS_MAX;
+ ctidev->con_groups = devm_kcalloc(dev, nr_groups,
+ sizeof(struct attribute_group *),
+ GFP_KERNEL);
+ if (!ctidev->con_groups)
+ return -ENOMEM;
+ return 0;
+}
+
+int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata)
+{
+ struct cti_device *ctidev = &drvdata->ctidev;
+ int err = 0, con_idx = 0, i;
+ struct cti_trig_con *tc = NULL;
+
+ err = cti_create_cons_groups(dev, ctidev);
+ if (err)
+ return err;
+
+ /* populate first locations with the static set of groups */
+ for (i = 0; i < (CORESIGHT_CTI_STATIC_GROUPS_MAX - 1); i++)
+ ctidev->con_groups[i] = coresight_cti_groups[i];
+
+ /* add dynamic set for each connection */
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ err = cti_create_con_attr_set(dev, con_idx++, ctidev, tc);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* attribute and group sysfs tables. */
+static const struct attribute_group coresight_cti_group = {
+ .attrs = coresight_cti_attrs,
+};
+
+static const struct attribute_group coresight_cti_mgmt_group = {
+ .attrs = coresight_cti_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group coresight_cti_regs_group = {
+ .attrs = coresight_cti_regs_attrs,
+ .name = "regs",
+};
+
+static const struct attribute_group coresight_cti_channels_group = {
+ .attrs = coresight_cti_channel_attrs,
+ .name = "channels",
+};
+
+const struct attribute_group *
+coresight_cti_groups[CORESIGHT_CTI_STATIC_GROUPS_MAX] = {
+ &coresight_cti_group,
+ &coresight_cti_mgmt_group,
+ &coresight_cti_regs_group,
+ &coresight_cti_channels_group,
+ NULL,
+};
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
new file mode 100644
index 000000000000..aa6e0249bd70
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include <linux/property.h>
+#include "coresight-cti.h"
+
+/**
+ * CTI devices can be associated with a PE, or be connected to CoreSight
+ * hardware. We have a list of all CTIs irrespective of CPU bound or
+ * otherwise.
+ *
+ * We assume that the non-CPU CTIs are always powered as we do with sinks etc.
+ *
+ * We leave the client to figure out if all the CTIs are interconnected with
+ * the same CTM, in general this is the case but does not always have to be.
+ */
+
+/* net of CTI devices connected via CTM */
+LIST_HEAD(ect_net);
+
+/* protect the list */
+static DEFINE_MUTEX(ect_mutex);
+
+#define csdev_to_cti_drvdata(csdev) \
+ dev_get_drvdata(csdev->dev.parent)
+
+/*
+ * CTI naming. CTI bound to cores will have the name cti_cpu<N> where
+ * N is the CPU ID. System CTIs will have the name cti_sys<I> where I
+ * is an index allocated by order of discovery.
+ *
+ * CTI device name list - for CTI not bound to cores.
+ */
+DEFINE_CORESIGHT_DEVLIST(cti_sys_devs, "cti_sys");
+
+/* write set of regs to hardware - call with spinlock claimed */
+void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ int i;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* disable CTI before writing registers */
+ writel_relaxed(0, drvdata->base + CTICONTROL);
+
+ /* write the CTI trigger registers */
+ for (i = 0; i < config->nr_trig_max; i++) {
+ writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN(i));
+ writel_relaxed(config->ctiouten[i],
+ drvdata->base + CTIOUTEN(i));
+ }
+
+ /* other regs */
+ writel_relaxed(config->ctigate, drvdata->base + CTIGATE);
+ writel_relaxed(config->asicctl, drvdata->base + ASICCTL);
+ writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET);
+
+ /* re-enable CTI */
+ writel_relaxed(1, drvdata->base + CTICONTROL);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void cti_enable_hw_smp_call(void *info)
+{
+ struct cti_drvdata *drvdata = info;
+
+ cti_write_all_hw_regs(drvdata);
+}
+
+/* write regs to hardware and enable */
+static int cti_enable_hw(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ struct device *dev = &drvdata->csdev->dev;
+ int rc = 0;
+
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+
+ /* no need to do anything if enabled or unpowered*/
+ if (config->hw_enabled || !config->hw_powered)
+ goto cti_state_unchanged;
+
+ /* claim the device */
+ rc = coresight_claim_device(drvdata->base);
+ if (rc)
+ goto cti_err_not_enabled;
+
+ if (drvdata->ctidev.cpu >= 0) {
+ rc = smp_call_function_single(drvdata->ctidev.cpu,
+ cti_enable_hw_smp_call,
+ drvdata, 1);
+ if (rc)
+ goto cti_err_not_enabled;
+ } else {
+ cti_write_all_hw_regs(drvdata);
+ }
+
+ config->hw_enabled = true;
+ atomic_inc(&drvdata->config.enable_req_count);
+ spin_unlock(&drvdata->spinlock);
+ return rc;
+
+cti_state_unchanged:
+ atomic_inc(&drvdata->config.enable_req_count);
+
+ /* cannot enable due to error */
+cti_err_not_enabled:
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(dev->parent);
+ return rc;
+}
+
+/* disable hardware */
+static int cti_disable_hw(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ struct device *dev = &drvdata->csdev->dev;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* check refcount - disable on 0 */
+ if (atomic_dec_return(&drvdata->config.enable_req_count) > 0)
+ goto cti_not_disabled;
+
+ /* no need to do anything if disabled or cpu unpowered */
+ if (!config->hw_enabled || !config->hw_powered)
+ goto cti_not_disabled;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* disable CTI */
+ writel_relaxed(0, drvdata->base + CTICONTROL);
+ config->hw_enabled = false;
+
+ coresight_disclaim_device_unlocked(drvdata->base);
+ CS_LOCK(drvdata->base);
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(dev);
+ return 0;
+
+ /* not disabled this call */
+cti_not_disabled:
+ spin_unlock(&drvdata->spinlock);
+ return 0;
+}
+
+void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value)
+{
+ CS_UNLOCK(drvdata->base);
+ writel_relaxed(value, drvdata->base + offset);
+ CS_LOCK(drvdata->base);
+}
+
+void cti_write_intack(struct device *dev, u32 ackval)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ /* write if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIINTACK, ackval);
+ spin_unlock(&drvdata->spinlock);
+}
+
+/*
+ * Look at the HW DEVID register for some of the HW settings.
+ * DEVID[15:8] - max number of in / out triggers.
+ */
+#define CTI_DEVID_MAXTRIGS(devid_val) ((int) BMVAL(devid_val, 8, 15))
+
+/* DEVID[19:16] - number of CTM channels */
+#define CTI_DEVID_CTMCHANNELS(devid_val) ((int) BMVAL(devid_val, 16, 19))
+
+static void cti_set_default_config(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ u32 devid;
+
+ devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+ config->nr_trig_max = CTI_DEVID_MAXTRIGS(devid);
+
+ /*
+ * no current hardware should exceed this, but protect the driver
+ * in case of fault / out of spec hw
+ */
+ if (config->nr_trig_max > CTIINOUTEN_MAX) {
+ dev_warn_once(dev,
+ "Limiting HW MaxTrig value(%d) to driver max(%d)\n",
+ config->nr_trig_max, CTIINOUTEN_MAX);
+ config->nr_trig_max = CTIINOUTEN_MAX;
+ }
+
+ config->nr_ctm_channels = CTI_DEVID_CTMCHANNELS(devid);
+
+ /* Most regs default to 0 as zalloc'ed except...*/
+ config->trig_filter_enable = true;
+ config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0);
+ atomic_set(&config->enable_req_count, 0);
+}
+
+/*
+ * Add a connection entry to the list of connections for this
+ * CTI device.
+ */
+int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc,
+ struct coresight_device *csdev,
+ const char *assoc_dev_name)
+{
+ struct cti_device *cti_dev = &drvdata->ctidev;
+
+ tc->con_dev = csdev;
+ /*
+ * Prefer actual associated CS device dev name to supplied value -
+ * which is likely to be node name / other conn name.
+ */
+ if (csdev)
+ tc->con_dev_name = dev_name(&csdev->dev);
+ else if (assoc_dev_name != NULL) {
+ tc->con_dev_name = devm_kstrdup(dev,
+ assoc_dev_name, GFP_KERNEL);
+ if (!tc->con_dev_name)
+ return -ENOMEM;
+ }
+ list_add_tail(&tc->node, &cti_dev->trig_cons);
+ cti_dev->nr_trig_con++;
+
+ /* add connection usage bit info to overall info */
+ drvdata->config.trig_in_use |= tc->con_in->used_mask;
+ drvdata->config.trig_out_use |= tc->con_out->used_mask;
+
+ return 0;
+}
+
+/* create a trigger connection with appropriately sized signal groups */
+struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
+ int out_sigs)
+{
+ struct cti_trig_con *tc = NULL;
+ struct cti_trig_grp *in = NULL, *out = NULL;
+
+ tc = devm_kzalloc(dev, sizeof(struct cti_trig_con), GFP_KERNEL);
+ if (!tc)
+ return tc;
+
+ in = devm_kzalloc(dev,
+ offsetof(struct cti_trig_grp, sig_types[in_sigs]),
+ GFP_KERNEL);
+ if (!in)
+ return NULL;
+
+ out = devm_kzalloc(dev,
+ offsetof(struct cti_trig_grp, sig_types[out_sigs]),
+ GFP_KERNEL);
+ if (!out)
+ return NULL;
+
+ tc->con_in = in;
+ tc->con_out = out;
+ tc->con_in->nr_sigs = in_sigs;
+ tc->con_out->nr_sigs = out_sigs;
+ return tc;
+}
+
+/*
+ * Add a default connection if nothing else is specified.
+ * single connection based on max in/out info, no assoc device
+ */
+int cti_add_default_connection(struct device *dev, struct cti_drvdata *drvdata)
+{
+ int ret = 0;
+ int n_trigs = drvdata->config.nr_trig_max;
+ u32 n_trig_mask = GENMASK(n_trigs - 1, 0);
+ struct cti_trig_con *tc = NULL;
+
+ /*
+ * Assume max trigs for in and out,
+ * all used, default sig types allocated
+ */
+ tc = cti_allocate_trig_con(dev, n_trigs, n_trigs);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->con_in->used_mask = n_trig_mask;
+ tc->con_out->used_mask = n_trig_mask;
+ ret = cti_add_connection_entry(dev, drvdata, tc, NULL, "default");
+ return ret;
+}
+
+/** cti channel api **/
+/* attach/detach channel from trigger - write through if enabled. */
+int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
+ enum cti_trig_dir direction, u32 channel_idx,
+ u32 trigger_idx)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ u32 trig_bitmask;
+ u32 chan_bitmask;
+ u32 reg_value;
+ int reg_offset;
+
+ /* ensure indexes in range */
+ if ((channel_idx >= config->nr_ctm_channels) ||
+ (trigger_idx >= config->nr_trig_max))
+ return -EINVAL;
+
+ trig_bitmask = BIT(trigger_idx);
+
+ /* ensure registered triggers and not out filtered */
+ if (direction == CTI_TRIG_IN) {
+ if (!(trig_bitmask & config->trig_in_use))
+ return -EINVAL;
+ } else {
+ if (!(trig_bitmask & config->trig_out_use))
+ return -EINVAL;
+
+ if ((config->trig_filter_enable) &&
+ (config->trig_out_filter & trig_bitmask))
+ return -EINVAL;
+ }
+
+ /* update the local register values */
+ chan_bitmask = BIT(channel_idx);
+ reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) :
+ CTIOUTEN(trigger_idx));
+
+ spin_lock(&drvdata->spinlock);
+
+ /* read - modify write - the trigger / channel enable value */
+ reg_value = direction == CTI_TRIG_IN ? config->ctiinen[trigger_idx] :
+ config->ctiouten[trigger_idx];
+ if (op == CTI_CHAN_ATTACH)
+ reg_value |= chan_bitmask;
+ else
+ reg_value &= ~chan_bitmask;
+
+ /* write local copy */
+ if (direction == CTI_TRIG_IN)
+ config->ctiinen[trigger_idx] = reg_value;
+ else
+ config->ctiouten[trigger_idx] = reg_value;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, reg_offset, reg_value);
+ spin_unlock(&drvdata->spinlock);
+ return 0;
+}
+
+int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
+ u32 channel_idx)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ u32 chan_bitmask;
+ u32 reg_value;
+ int err = 0;
+
+ if (channel_idx >= config->nr_ctm_channels)
+ return -EINVAL;
+
+ chan_bitmask = BIT(channel_idx);
+
+ spin_lock(&drvdata->spinlock);
+ reg_value = config->ctigate;
+ switch (op) {
+ case CTI_GATE_CHAN_ENABLE:
+ reg_value |= chan_bitmask;
+ break;
+
+ case CTI_GATE_CHAN_DISABLE:
+ reg_value &= ~chan_bitmask;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (err == 0) {
+ config->ctigate = reg_value;
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIGATE, reg_value);
+ }
+ spin_unlock(&drvdata->spinlock);
+ return err;
+}
+
+int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
+ u32 channel_idx)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ u32 chan_bitmask;
+ u32 reg_value;
+ u32 reg_offset;
+ int err = 0;
+
+ if (channel_idx >= config->nr_ctm_channels)
+ return -EINVAL;
+
+ chan_bitmask = BIT(channel_idx);
+
+ spin_lock(&drvdata->spinlock);
+ reg_value = config->ctiappset;
+ switch (op) {
+ case CTI_CHAN_SET:
+ config->ctiappset |= chan_bitmask;
+ reg_value = config->ctiappset;
+ reg_offset = CTIAPPSET;
+ break;
+
+ case CTI_CHAN_CLR:
+ config->ctiappset &= ~chan_bitmask;
+ reg_value = chan_bitmask;
+ reg_offset = CTIAPPCLEAR;
+ break;
+
+ case CTI_CHAN_PULSE:
+ config->ctiappset &= ~chan_bitmask;
+ reg_value = chan_bitmask;
+ reg_offset = CTIAPPPULSE;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if ((err == 0) && cti_active(config))
+ cti_write_single_reg(drvdata, reg_offset, reg_value);
+ spin_unlock(&drvdata->spinlock);
+
+ return err;
+}
+
+/*
+ * Look for a matching connection device name in the list of connections.
+ * If found then swap in the csdev name, set trig con association pointer
+ * and return found.
+ */
+static bool
+cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
+ struct coresight_device *csdev)
+{
+ struct cti_trig_con *tc;
+
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev_name) {
+ if (!strcmp(node_name, tc->con_dev_name)) {
+ /* match: so swap in csdev name & dev */
+ tc->con_dev_name = dev_name(&csdev->dev);
+ tc->con_dev = csdev;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/*
+ * Search the cti list to add an associated CTI into the supplied CS device
+ * This will set the association if CTI declared before the CS device.
+ * (called from coresight_register() with coresight_mutex locked).
+ */
+void cti_add_assoc_to_csdev(struct coresight_device *csdev)
+{
+ struct cti_drvdata *ect_item;
+ struct cti_device *ctidev;
+ const char *node_name = NULL;
+
+ /* protect the list */
+ mutex_lock(&ect_mutex);
+
+ /* exit if current is an ECT device.*/
+ if ((csdev->type == CORESIGHT_DEV_TYPE_ECT) || list_empty(&ect_net))
+ goto cti_add_done;
+
+ /* if we didn't find the csdev previously we used the fwnode name */
+ node_name = cti_plat_get_node_name(dev_fwnode(csdev->dev.parent));
+ if (!node_name)
+ goto cti_add_done;
+
+ /* for each CTI in list... */
+ list_for_each_entry(ect_item, &ect_net, node) {
+ ctidev = &ect_item->ctidev;
+ if (cti_match_fixup_csdev(ctidev, node_name, csdev)) {
+ /*
+ * if we found a matching csdev then update the ECT
+ * association pointer for the device with this CTI.
+ */
+ csdev->ect_dev = ect_item->csdev;
+ break;
+ }
+ }
+cti_add_done:
+ mutex_unlock(&ect_mutex);
+}
+EXPORT_SYMBOL_GPL(cti_add_assoc_to_csdev);
+
+/*
+ * Removing the associated devices is easier.
+ * A CTI will not have a value for csdev->ect_dev.
+ */
+void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
+{
+ struct cti_drvdata *ctidrv;
+ struct cti_trig_con *tc;
+ struct cti_device *ctidev;
+
+ mutex_lock(&ect_mutex);
+ if (csdev->ect_dev) {
+ ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
+ ctidev = &ctidrv->ctidev;
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev == csdev->ect_dev) {
+ tc->con_dev = NULL;
+ break;
+ }
+ }
+ csdev->ect_dev = NULL;
+ }
+ mutex_unlock(&ect_mutex);
+}
+EXPORT_SYMBOL_GPL(cti_remove_assoc_from_csdev);
+
+/*
+ * Update the cross references where the associated device was found
+ * while we were building the connection info. This will occur if the
+ * assoc device was registered before the CTI.
+ */
+static void cti_update_conn_xrefs(struct cti_drvdata *drvdata)
+{
+ struct cti_trig_con *tc;
+ struct cti_device *ctidev = &drvdata->ctidev;
+
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev)
+ /* set tc->con_dev->ect_dev */
+ coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ drvdata->csdev);
+ }
+}
+
+static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
+{
+ struct cti_trig_con *tc;
+ struct cti_device *ctidev = &drvdata->ctidev;
+
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev) {
+ coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ NULL);
+ }
+ }
+}
+
+/** cti ect operations **/
+int cti_enable(struct coresight_device *csdev)
+{
+ struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
+
+ return cti_enable_hw(drvdata);
+}
+
+int cti_disable(struct coresight_device *csdev)
+{
+ struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
+
+ return cti_disable_hw(drvdata);
+}
+
+const struct coresight_ops_ect cti_ops_ect = {
+ .enable = cti_enable,
+ .disable = cti_disable,
+};
+
+const struct coresight_ops cti_ops = {
+ .ect_ops = &cti_ops_ect,
+};
+
+/*
+ * Free up CTI specific resources
+ * called by dev->release, need to call down to underlying csdev release.
+ */
+static void cti_device_release(struct device *dev)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_drvdata *ect_item, *ect_tmp;
+
+ mutex_lock(&ect_mutex);
+ cti_remove_conn_xrefs(drvdata);
+
+ /* remove from the list */
+ list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) {
+ if (ect_item == drvdata) {
+ list_del(&ect_item->node);
+ break;
+ }
+ }
+ mutex_unlock(&ect_mutex);
+
+ if (drvdata->csdev_release)
+ drvdata->csdev_release(dev);
+}
+
+static int cti_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret = 0;
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct cti_drvdata *drvdata = NULL;
+ struct coresight_desc cti_desc;
+ struct coresight_platform_data *pdata = NULL;
+ struct resource *res = &adev->res;
+
+ /* driver data*/
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ dev_info(dev, "%s, mem err\n", __func__);
+ goto err_out;
+ }
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ dev_err(dev, "%s, remap err\n", __func__);
+ goto err_out;
+ }
+ drvdata->base = base;
+
+ dev_set_drvdata(dev, drvdata);
+
+ /* default CTI device info */
+ drvdata->ctidev.cpu = -1;
+ drvdata->ctidev.nr_trig_con = 0;
+ drvdata->ctidev.ctm_id = 0;
+ INIT_LIST_HEAD(&drvdata->ctidev.trig_cons);
+
+ spin_lock_init(&drvdata->spinlock);
+
+ /* initialise CTI driver config values */
+ cti_set_default_config(dev, drvdata);
+
+ pdata = coresight_cti_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ dev_err(dev, "coresight_cti_get_platform_data err\n");
+ ret = PTR_ERR(pdata);
+ goto err_out;
+ }
+
+ /* default to powered - could change on PM notifications */
+ drvdata->config.hw_powered = true;
+
+ /* set up device name - will depend if cpu bound or otherwise */
+ if (drvdata->ctidev.cpu >= 0)
+ cti_desc.name = devm_kasprintf(dev, GFP_KERNEL, "cti_cpu%d",
+ drvdata->ctidev.cpu);
+ else
+ cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev);
+ if (!cti_desc.name) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* create dynamic attributes for connections */
+ ret = cti_create_cons_sysfs(dev, drvdata);
+ if (ret) {
+ dev_err(dev, "%s: create dynamic sysfs entries failed\n",
+ cti_desc.name);
+ goto err_out;
+ }
+
+ /* set up coresight component description */
+ cti_desc.pdata = pdata;
+ cti_desc.type = CORESIGHT_DEV_TYPE_ECT;
+ cti_desc.subtype.ect_subtype = CORESIGHT_DEV_SUBTYPE_ECT_CTI;
+ cti_desc.ops = &cti_ops;
+ cti_desc.groups = drvdata->ctidev.con_groups;
+ cti_desc.dev = dev;
+ drvdata->csdev = coresight_register(&cti_desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err_out;
+ }
+
+ /* add to list of CTI devices */
+ mutex_lock(&ect_mutex);
+ list_add(&drvdata->node, &ect_net);
+ /* set any cross references */
+ cti_update_conn_xrefs(drvdata);
+ mutex_unlock(&ect_mutex);
+
+ /* set up release chain */
+ drvdata->csdev_release = drvdata->csdev->dev.release;
+ drvdata->csdev->dev.release = cti_device_release;
+
+ /* all done - dec pm refcount */
+ pm_runtime_put(&adev->dev);
+ dev_info(&drvdata->csdev->dev, "CTI initialized\n");
+ return 0;
+
+err_out:
+ return ret;
+}
+
+static struct amba_cs_uci_id uci_id_cti[] = {
+ {
+ /* CTI UCI data */
+ .devarch = 0x47701a14, /* CTI v2 */
+ .devarch_mask = 0xfff0ffff,
+ .devtype = 0x00000014, /* maj(0x4-debug) min(0x1-ECT) */
+ }
+};
+
+static const struct amba_id cti_ids[] = {
+ CS_AMBA_ID(0x000bb906), /* Coresight CTI (SoC 400), C-A72, C-A57 */
+ CS_AMBA_ID(0x000bb922), /* CTI - C-A8 */
+ CS_AMBA_ID(0x000bb9a8), /* CTI - C-A53 */
+ CS_AMBA_ID(0x000bb9aa), /* CTI - C-A73 */
+ CS_AMBA_UCI_ID(0x000bb9da, uci_id_cti), /* CTI - C-A35 */
+ CS_AMBA_UCI_ID(0x000bb9ed, uci_id_cti), /* Coresight CTI (SoC 600) */
+ { 0, 0},
+};
+
+static struct amba_driver cti_driver = {
+ .drv = {
+ .name = "coresight-cti",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = cti_probe,
+ .id_table = cti_ids,
+};
+builtin_amba_driver(cti_driver);
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
new file mode 100644
index 000000000000..004df3ab9dd0
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_CTI_H
+#define _CORESIGHT_CORESIGHT_CTI_H
+
+#include <asm/local.h>
+#include <linux/spinlock.h>
+#include "coresight-priv.h"
+
+/*
+ * Device registers
+ * 0x000 - 0x144: CTI programming and status
+ * 0xEDC - 0xEF8: CTI integration test.
+ * 0xF00 - 0xFFC: Coresight management registers.
+ */
+/* CTI programming registers */
+#define CTICONTROL 0x000
+#define CTIINTACK 0x010
+#define CTIAPPSET 0x014
+#define CTIAPPCLEAR 0x018
+#define CTIAPPPULSE 0x01C
+#define CTIINEN(n) (0x020 + (4 * n))
+#define CTIOUTEN(n) (0x0A0 + (4 * n))
+#define CTITRIGINSTATUS 0x130
+#define CTITRIGOUTSTATUS 0x134
+#define CTICHINSTATUS 0x138
+#define CTICHOUTSTATUS 0x13C
+#define CTIGATE 0x140
+#define ASICCTL 0x144
+/* Integration test registers */
+#define ITCHINACK 0xEDC /* WO CTI CSSoc 400 only*/
+#define ITTRIGINACK 0xEE0 /* WO CTI CSSoc 400 only*/
+#define ITCHOUT 0xEE4 /* WO RW-600 */
+#define ITTRIGOUT 0xEE8 /* WO RW-600 */
+#define ITCHOUTACK 0xEEC /* RO CTI CSSoc 400 only*/
+#define ITTRIGOUTACK 0xEF0 /* RO CTI CSSoc 400 only*/
+#define ITCHIN 0xEF4 /* RO */
+#define ITTRIGIN 0xEF8 /* RO */
+/* management registers */
+#define CTIDEVAFF0 0xFA8
+#define CTIDEVAFF1 0xFAC
+
+/*
+ * CTI CSSoc 600 has a max of 32 trigger signals per direction.
+ * CTI CSSoc 400 has 8 IO triggers - other CTIs can be impl def.
+ * Max of in and out defined in the DEVID register.
+ * - pick up actual number used from .dts parameters if present.
+ */
+#define CTIINOUTEN_MAX 32
+
+/**
+ * Group of related trigger signals
+ *
+ * @nr_sigs: number of signals in the group.
+ * @used_mask: bitmask representing the signal indexes in the group.
+ * @sig_types: array of types for the signals, length nr_sigs.
+ */
+struct cti_trig_grp {
+ int nr_sigs;
+ u32 used_mask;
+ int sig_types[];
+};
+
+/**
+ * Trigger connection - connection between a CTI and other (coresight) device
+ * lists input and output trigger signals for the device
+ *
+ * @con_in: connected CTIIN signals for the device.
+ * @con_out: connected CTIOUT signals for the device.
+ * @con_dev: coresight device connected to the CTI, NULL if not CS device
+ * @con_dev_name: name of connected device (CS or CPU)
+ * @node: entry node in list of connections.
+ * @con_attrs: Dynamic sysfs attributes specific to this connection.
+ * @attr_group: Dynamic attribute group created for this connection.
+ */
+struct cti_trig_con {
+ struct cti_trig_grp *con_in;
+ struct cti_trig_grp *con_out;
+ struct coresight_device *con_dev;
+ const char *con_dev_name;
+ struct list_head node;
+ struct attribute **con_attrs;
+ struct attribute_group *attr_group;
+};
+
+/**
+ * struct cti_device - description of CTI device properties.
+ *
+ * @nt_trig_con: Number of external devices connected to this device.
+ * @ctm_id: which CTM this device is connected to (by default it is
+ * assumed there is a single CTM per SoC, ID 0).
+ * @trig_cons: list of connections to this device.
+ * @cpu: CPU ID if associated with CPU, -1 otherwise.
+ * @con_groups: combined static and dynamic sysfs groups for trigger
+ * connections.
+ */
+struct cti_device {
+ int nr_trig_con;
+ u32 ctm_id;
+ struct list_head trig_cons;
+ int cpu;
+ const struct attribute_group **con_groups;
+};
+
+/**
+ * struct cti_config - configuration of the CTI device hardware
+ *
+ * @nr_trig_max: Max number of trigger signals implemented on device.
+ * (max of trig_in or trig_out) - from ID register.
+ * @nr_ctm_channels: number of available CTM channels - from ID register.
+ * @enable_req_count: CTI is enabled alongside >=1 associated devices.
+ * @hw_enabled: true if hw is currently enabled.
+ * @hw_powered: true if associated cpu powered on, or no cpu.
+ * @trig_in_use: bitfield of in triggers registered as in use.
+ * @trig_out_use: bitfield of out triggers registered as in use.
+ * @trig_out_filter: bitfield of out triggers that are blocked if filter
+ * enabled. Typically this would be dbgreq / restart on
+ * a core CTI.
+ * @trig_filter_enable: 1 if filtering enabled.
+ * @xtrig_rchan_sel: channel selection for xtrigger connection show.
+ * @ctiappset: CTI Software application channel set.
+ * @ctiinout_sel: register selector for INEN and OUTEN regs.
+ * @ctiinen: enable input trigger to a channel.
+ * @ctiouten: enable output trigger from a channel.
+ * @ctigate: gate channel output from CTI to CTM.
+ * @asicctl: asic control register.
+ */
+struct cti_config {
+ /* hardware description */
+ int nr_ctm_channels;
+ int nr_trig_max;
+
+ /* cti enable control */
+ atomic_t enable_req_count;
+ bool hw_enabled;
+ bool hw_powered;
+
+ /* registered triggers and filtering */
+ u32 trig_in_use;
+ u32 trig_out_use;
+ u32 trig_out_filter;
+ bool trig_filter_enable;
+ u8 xtrig_rchan_sel;
+
+ /* cti cross trig programmable regs */
+ u32 ctiappset;
+ u8 ctiinout_sel;
+ u32 ctiinen[CTIINOUTEN_MAX];
+ u32 ctiouten[CTIINOUTEN_MAX];
+ u32 ctigate;
+ u32 asicctl;
+};
+
+/**
+ * struct cti_drvdata - specifics for the CTI device
+ * @base: Memory mapped base address for this component..
+ * @csdev: Standard CoreSight device information.
+ * @ctidev: Extra information needed by the CTI/CTM framework.
+ * @spinlock: Control data access to one at a time.
+ * @config: Configuration data for this CTI device.
+ * @node: List entry of this device in the list of CTI devices.
+ * @csdev_release: release function for underlying coresight_device.
+ */
+struct cti_drvdata {
+ void __iomem *base;
+ struct coresight_device *csdev;
+ struct cti_device ctidev;
+ spinlock_t spinlock;
+ struct cti_config config;
+ struct list_head node;
+ void (*csdev_release)(struct device *dev);
+};
+
+/*
+ * Channel operation types.
+ */
+enum cti_chan_op {
+ CTI_CHAN_ATTACH,
+ CTI_CHAN_DETACH,
+};
+
+enum cti_trig_dir {
+ CTI_TRIG_IN,
+ CTI_TRIG_OUT,
+};
+
+enum cti_chan_gate_op {
+ CTI_GATE_CHAN_ENABLE,
+ CTI_GATE_CHAN_DISABLE,
+};
+
+enum cti_chan_set_op {
+ CTI_CHAN_SET,
+ CTI_CHAN_CLR,
+ CTI_CHAN_PULSE,
+};
+
+/* private cti driver fns & vars */
+extern const struct attribute_group *coresight_cti_groups[];
+int cti_add_default_connection(struct device *dev,
+ struct cti_drvdata *drvdata);
+int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc,
+ struct coresight_device *csdev,
+ const char *assoc_dev_name);
+struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
+ int out_sigs);
+int cti_enable(struct coresight_device *csdev);
+int cti_disable(struct coresight_device *csdev);
+void cti_write_all_hw_regs(struct cti_drvdata *drvdata);
+void cti_write_intack(struct device *dev, u32 ackval);
+void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value);
+int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
+ enum cti_trig_dir direction, u32 channel_idx,
+ u32 trigger_idx);
+int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
+ u32 channel_idx);
+int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
+ u32 channel_idx);
+int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata);
+struct coresight_platform_data *
+coresight_cti_get_platform_data(struct device *dev);
+const char *cti_plat_get_node_name(struct fwnode_handle *fwnode);
+
+/* cti powered and enabled */
+static inline bool cti_active(struct cti_config *cfg)
+{
+ return cfg->hw_powered && cfg->hw_enabled;
+}
+
+#endif /* _CORESIGHT_CORESIGHT_CTI_H */
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 3c5bee429105..43418a2126ff 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -57,6 +57,26 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode)
return bus_find_device_by_fwnode(&amba_bustype, fwnode);
}
+/*
+ * Find a registered coresight device from a device fwnode.
+ * The node info is associated with the AMBA parent, but the
+ * csdev keeps a copy so iterate round the coresight bus to
+ * find the device.
+ */
+struct coresight_device *
+coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode)
+{
+ struct device *dev;
+ struct coresight_device *csdev = NULL;
+
+ dev = bus_find_device_by_fwnode(&coresight_bustype, r_fwnode);
+ if (dev) {
+ csdev = to_coresight_device(dev);
+ put_device(dev);
+ }
+ return csdev;
+}
+
#ifdef CONFIG_OF
static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
{
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 82e563cdc879..890f9a5c97c6 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -22,6 +22,7 @@
#define CORESIGHT_CLAIMCLR 0xfa4
#define CORESIGHT_LAR 0xfb0
#define CORESIGHT_LSR 0xfb4
+#define CORESIGHT_DEVARCH 0xfbc
#define CORESIGHT_AUTHSTATUS 0xfb8
#define CORESIGHT_DEVID 0xfc8
#define CORESIGHT_DEVTYPE 0xfcc
@@ -161,6 +162,16 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
#endif
+#ifdef CONFIG_CORESIGHT_CTI
+extern void cti_add_assoc_to_csdev(struct coresight_device *csdev);
+extern void cti_remove_assoc_from_csdev(struct coresight_device *csdev);
+
+#else
+static inline void cti_add_assoc_to_csdev(struct coresight_device *csdev) {}
+static inline void
+cti_remove_assoc_from_csdev(struct coresight_device *csdev) {}
+#endif
+
/*
* Macros and inline functions to handle CoreSight UCI data and driver
* private data in AMBA ID table entries, and extract data values.
@@ -201,5 +212,9 @@ static inline void *coresight_get_uci_data(const struct amba_id *id)
}
void coresight_release_platform_data(struct coresight_platform_data *pdata);
+struct coresight_device *
+coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
+void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
+ struct coresight_device *ect_csdev);
#endif
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index ef20f74c85fa..c71553c09f8e 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -216,6 +216,44 @@ void coresight_disclaim_device(void __iomem *base)
CS_LOCK(base);
}
+/* enable or disable an associated CTI device of the supplied CS device */
+static int
+coresight_control_assoc_ectdev(struct coresight_device *csdev, bool enable)
+{
+ int ect_ret = 0;
+ struct coresight_device *ect_csdev = csdev->ect_dev;
+
+ if (!ect_csdev)
+ return 0;
+
+ if (enable) {
+ if (ect_ops(ect_csdev)->enable)
+ ect_ret = ect_ops(ect_csdev)->enable(ect_csdev);
+ } else {
+ if (ect_ops(ect_csdev)->disable)
+ ect_ret = ect_ops(ect_csdev)->disable(ect_csdev);
+ }
+
+ /* output warning if ECT enable is preventing trace operation */
+ if (ect_ret)
+ dev_info(&csdev->dev, "Associated ECT device (%s) %s failed\n",
+ dev_name(&ect_csdev->dev),
+ enable ? "enable" : "disable");
+ return ect_ret;
+}
+
+/*
+ * Set the associated ect / cti device while holding the coresight_mutex
+ * to avoid a race with coresight_enable that may try to use this value.
+ */
+void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
+ struct coresight_device *ect_csdev)
+{
+ mutex_lock(&coresight_mutex);
+ csdev->ect_dev = ect_csdev;
+ mutex_unlock(&coresight_mutex);
+}
+
static int coresight_enable_sink(struct coresight_device *csdev,
u32 mode, void *data)
{
@@ -228,9 +266,14 @@ static int coresight_enable_sink(struct coresight_device *csdev,
if (!sink_ops(csdev)->enable)
return -EINVAL;
- ret = sink_ops(csdev)->enable(csdev, mode, data);
+ ret = coresight_control_assoc_ectdev(csdev, true);
if (ret)
return ret;
+ ret = sink_ops(csdev)->enable(csdev, mode, data);
+ if (ret) {
+ coresight_control_assoc_ectdev(csdev, false);
+ return ret;
+ }
csdev->enable = true;
return 0;
@@ -246,6 +289,7 @@ static void coresight_disable_sink(struct coresight_device *csdev)
ret = sink_ops(csdev)->disable(csdev);
if (ret)
return;
+ coresight_control_assoc_ectdev(csdev, false);
csdev->enable = false;
}
@@ -269,8 +313,15 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0)
return outport;
- if (link_ops(csdev)->enable)
- ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (link_ops(csdev)->enable) {
+ ret = coresight_control_assoc_ectdev(csdev, true);
+ if (!ret) {
+ ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (ret)
+ coresight_control_assoc_ectdev(csdev, false);
+ }
+ }
+
if (!ret)
csdev->enable = true;
@@ -300,8 +351,10 @@ static void coresight_disable_link(struct coresight_device *csdev,
nr_conns = 1;
}
- if (link_ops(csdev)->disable)
+ if (link_ops(csdev)->disable) {
link_ops(csdev)->disable(csdev, inport, outport);
+ coresight_control_assoc_ectdev(csdev, false);
+ }
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->refcnt[i]) != 0)
@@ -322,9 +375,14 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
if (!csdev->enable) {
if (source_ops(csdev)->enable) {
- ret = source_ops(csdev)->enable(csdev, NULL, mode);
+ ret = coresight_control_assoc_ectdev(csdev, true);
if (ret)
return ret;
+ ret = source_ops(csdev)->enable(csdev, NULL, mode);
+ if (ret) {
+ coresight_control_assoc_ectdev(csdev, false);
+ return ret;
+ };
}
csdev->enable = true;
}
@@ -347,6 +405,7 @@ static bool coresight_disable_source(struct coresight_device *csdev)
if (atomic_dec_return(csdev->refcnt) == 0) {
if (source_ops(csdev)->disable)
source_ops(csdev)->disable(csdev, NULL);
+ coresight_control_assoc_ectdev(csdev, false);
csdev->enable = false;
}
return !csdev->enable;
@@ -955,12 +1014,16 @@ static struct device_type coresight_dev_type[] = {
{
.name = "helper",
},
+ {
+ .name = "ect",
+ },
};
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
+ cti_remove_assoc_from_csdev(csdev);
fwnode_handle_put(csdev->dev.fwnode);
kfree(csdev->refcnt);
kfree(csdev);
@@ -1027,17 +1090,11 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_connection *conn = &csdev->pdata->conns[i];
- struct device *dev = NULL;
- dev = bus_find_device_by_fwnode(&coresight_bustype, conn->child_fwnode);
- if (dev) {
- conn->child_dev = to_coresight_device(dev);
- /* and put reference from 'bus_find_device()' */
- put_device(dev);
- } else {
+ conn->child_dev =
+ coresight_find_csdev_by_fwnode(conn->child_fwnode);
+ if (!conn->child_dev)
csdev->orphan = true;
- conn->child_dev = NULL;
- }
}
}
@@ -1249,6 +1306,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
coresight_fixup_device_conns(csdev);
coresight_fixup_orphan_conns(csdev);
+ cti_add_assoc_to_csdev(csdev);
mutex_unlock(&coresight_mutex);
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 6f4f5486fe6d..5fe694708b7a 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -47,11 +47,13 @@ struct intel_th_output {
/**
* struct intel_th_drvdata - describes hardware capabilities and quirks
* @tscu_enable: device needs SW to enable time stamping unit
+ * @multi_is_broken: device has multiblock mode is broken
* @has_mintctl: device has interrupt control (MINTCTL) register
* @host_mode_only: device can only operate in 'host debugger' mode
*/
struct intel_th_drvdata {
unsigned int tscu_enable : 1,
+ multi_is_broken : 1,
has_mintctl : 1,
host_mode_only : 1;
};
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 255f8f41c8ff..3a77551fb4fc 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -138,6 +138,7 @@ struct msc {
struct list_head win_list;
struct sg_table single_sgt;
struct msc_window *cur_win;
+ struct msc_window *switch_on_unlock;
unsigned long nr_pages;
unsigned long single_sz;
unsigned int single_wrap : 1;
@@ -154,10 +155,13 @@ struct msc {
struct list_head iter_list;
+ bool stop_on_full;
+
/* config */
unsigned int enabled : 1,
wrap : 1,
- do_irq : 1;
+ do_irq : 1,
+ multi_is_broken : 1;
unsigned int mode;
unsigned int burst_len;
unsigned int index;
@@ -1665,7 +1669,7 @@ static int intel_th_msc_init(struct msc *msc)
{
atomic_set(&msc->user_count, -1);
- msc->mode = MSC_MODE_MULTI;
+ msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
mutex_init(&msc->buf_mutex);
INIT_LIST_HEAD(&msc->win_list);
INIT_LIST_HEAD(&msc->iter_list);
@@ -1717,6 +1721,10 @@ void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
return;
msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
+ if (msc->switch_on_unlock == win) {
+ msc->switch_on_unlock = NULL;
+ msc_win_switch(msc);
+ }
}
EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
@@ -1757,7 +1765,11 @@ static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
/* next window: if READY, proceed, if LOCKED, stop the trace */
if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
- schedule_work(&msc->work);
+ if (msc->stop_on_full)
+ schedule_work(&msc->work);
+ else
+ msc->switch_on_unlock = next_win;
+
return IRQ_HANDLED;
}
@@ -1877,6 +1889,9 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
return -EINVAL;
found:
+ if (i == MSC_MODE_MULTI && msc->multi_is_broken)
+ return -EOPNOTSUPP;
+
mutex_lock(&msc->buf_mutex);
ret = 0;
@@ -2047,11 +2062,36 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_WO(win_switch);
+static ssize_t stop_on_full_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", msc->stop_on_full);
+}
+
+static ssize_t stop_on_full_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = kstrtobool(buf, &msc->stop_on_full);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(stop_on_full);
+
static struct attribute *msc_output_attrs[] = {
&dev_attr_wrap.attr,
&dev_attr_mode.attr,
&dev_attr_nr_pages.attr,
&dev_attr_win_switch.attr,
+ &dev_attr_stop_on_full.attr,
NULL,
};
@@ -2083,6 +2123,9 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
if (!res)
msc->do_irq = 1;
+ if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
+ msc->multi_is_broken = 1;
+
msc->index = thdev->id;
msc->thdev = thdev;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 86aa6a46bcba..7ccac74553a6 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -120,6 +120,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
+static const struct intel_th_drvdata intel_th_1x_multi_is_broken = {
+ .multi_is_broken = 1,
+};
+
static const struct intel_th_drvdata intel_th_2x = {
.tscu_enable = 1,
.has_mintctl = 1,
@@ -152,7 +156,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
{
/* Kaby Lake PCH-H */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
- .driver_data = (kernel_ulong_t)0,
+ .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken,
},
{
/* Denverton */
@@ -207,7 +211,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
{
/* Comet Lake PCH-V */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
- .driver_data = (kernel_ulong_t)&intel_th_2x,
+ .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken,
},
{
/* Ice Lake NNPI */
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 5ac93f41bfec..7f10312d1b88 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -459,17 +459,17 @@ static int pca_init(struct i2c_adapter *adap)
/* To avoid integer overflow, use clock/100 for calculations */
clock = pca_clock(pca_data) / 100;
- if (pca_data->i2c_clock > 1000000) {
+ if (pca_data->i2c_clock > I2C_MAX_FAST_MODE_PLUS_FREQ) {
mode = I2C_PCA_MODE_TURBO;
min_tlow = 14;
min_thi = 5;
raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
- } else if (pca_data->i2c_clock > 400000) {
+ } else if (pca_data->i2c_clock > I2C_MAX_FAST_MODE_FREQ) {
mode = I2C_PCA_MODE_FASTP;
min_tlow = 17;
min_thi = 9;
raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
- } else if (pca_data->i2c_clock > 100000) {
+ } else if (pca_data->i2c_clock > I2C_MAX_STANDARD_MODE_FREQ) {
mode = I2C_PCA_MODE_FAST;
min_tlow = 44;
min_thi = 20;
@@ -542,7 +542,7 @@ int i2c_pca_add_numbered_bus(struct i2c_adapter *adap)
EXPORT_SYMBOL(i2c_pca_add_numbered_bus);
MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, "
- "Wolfram Sang <w.sang@pengutronix.de>");
+ "Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 1de23b4f3809..16ddc26c00e6 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -70,6 +70,7 @@
* @isr_mask: cached copy of local ISR enables.
* @isr_status: cached copy of local ISR status.
* @lock: spinlock for IRQ synchronization.
+ * @isr_mutex: mutex for IRQ thread.
*/
struct altr_i2c_dev {
void __iomem *base;
@@ -86,6 +87,7 @@ struct altr_i2c_dev {
u32 isr_mask;
u32 isr_status;
spinlock_t lock; /* IRQ synchronization */
+ struct mutex isr_mutex;
};
static void
@@ -147,7 +149,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
(ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_TCT_SHFT);
u32 t_high, t_low;
- if (idev->bus_clk_rate <= 100000) {
+ if (idev->bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ) {
tmp &= ~ALTR_I2C_CTRL_BSPEED;
/* Standard mode SCL 50/50 */
t_high = divisor * 1 / 2;
@@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
struct altr_i2c_dev *idev = _dev;
u32 status = idev->isr_status;
+ mutex_lock(&idev->isr_mutex);
if (!idev->msg) {
dev_warn(idev->dev, "unexpected interrupt\n");
altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
- return IRQ_HANDLED;
+ goto out;
}
read = (idev->msg->flags & I2C_M_RD) != 0;
@@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
complete(&idev->msg_complete);
dev_dbg(idev->dev, "Message Complete\n");
}
+out:
+ mutex_unlock(&idev->isr_mutex);
return IRQ_HANDLED;
}
@@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
u32 value;
u8 addr = i2c_8bit_addr_from_msg(msg);
+ mutex_lock(&idev->isr_mutex);
idev->msg = msg;
idev->msg_len = msg->len;
idev->buf = msg->buf;
@@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
altr_i2c_int_enable(idev, imask, true);
altr_i2c_fill_tx_fifo(idev);
}
+ mutex_unlock(&idev->isr_mutex);
time_left = wait_for_completion_timeout(&idev->msg_complete,
ALTR_I2C_XFER_TIMEOUT);
@@ -384,7 +391,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
struct altr_i2c_dev *idev = NULL;
struct resource *res;
int irq, ret;
- u32 val;
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
if (!idev)
@@ -410,23 +416,24 @@ static int altr_i2c_probe(struct platform_device *pdev)
idev->dev = &pdev->dev;
init_completion(&idev->msg_complete);
spin_lock_init(&idev->lock);
+ mutex_init(&idev->isr_mutex);
- val = device_property_read_u32(idev->dev, "fifo-size",
+ ret = device_property_read_u32(idev->dev, "fifo-size",
&idev->fifo_size);
- if (val) {
+ if (ret) {
dev_err(&pdev->dev, "FIFO size set to default of %d\n",
ALTR_I2C_DFLT_FIFO_SZ);
idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
}
- val = device_property_read_u32(idev->dev, "clock-frequency",
+ ret = device_property_read_u32(idev->dev, "clock-frequency",
&idev->bus_clk_rate);
- if (val) {
+ if (ret) {
dev_err(&pdev->dev, "Default to 100kHz\n");
- idev->bus_clk_rate = 100000; /* default clock rate */
+ idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
}
- if (idev->bus_clk_rate > 400000) {
+ if (idev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ) {
dev_err(&pdev->dev, "invalid clock-frequency %d\n",
idev->bus_clk_rate);
return -EINVAL;
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index 5e4800d72e00..cd3fd5ee5f65 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
if (!privdata)
return -ENOMEM;
+ privdata->pci_dev = pci_dev;
rc = amd_mp2_pci_init(privdata, pci_dev);
if (rc)
return rc;
mutex_init(&privdata->c2p_lock);
- privdata->pci_dev = pci_dev;
pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
pm_runtime_use_autosuspend(&pci_dev->dev);
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index f5b3f00c6559..17df9e8845b6 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -201,32 +201,37 @@ static int i2c_amd_resume(struct amd_i2c_common *i2c_common)
}
#endif
+static const u32 supported_speeds[] = {
+ I2C_MAX_HIGH_SPEED_MODE_FREQ,
+ I2C_MAX_TURBO_MODE_FREQ,
+ I2C_MAX_FAST_MODE_PLUS_FREQ,
+ I2C_MAX_FAST_MODE_FREQ,
+ I2C_MAX_STANDARD_MODE_FREQ,
+};
+
static enum speed_enum i2c_amd_get_bus_speed(struct platform_device *pdev)
{
u32 acpi_speed;
int i;
- static const u32 supported_speeds[] = {
- 0, 100000, 400000, 1000000, 1400000, 3400000
- };
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
/* round down to the lowest standard speed */
- for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
- if (acpi_speed < supported_speeds[i])
+ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed >= supported_speeds[i])
break;
}
- acpi_speed = supported_speeds[i - 1];
+ acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
switch (acpi_speed) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
return speed100k;
- case 400000:
+ case I2C_MAX_FAST_MODE_FREQ:
return speed400k;
- case 1000000:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
return speed1000k;
- case 1400000:
+ case I2C_MAX_TURBO_MODE_FREQ:
return speed1400k;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
return speed3400k;
default:
return speed400k;
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index a7be6f24450b..f51702d86a90 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
/* Ack all interrupts except for Rx done */
writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
bus->base + ASPEED_I2C_INTR_STS_REG);
+ readl(bus->base + ASPEED_I2C_INTR_STS_REG);
irq_remaining = irq_received;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
@@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
irq_received, irq_handled);
/* Ack Rx done */
- if (irq_received & ASPEED_I2CD_INTR_RX_DONE)
+ if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
writel(ASPEED_I2CD_INTR_RX_DONE,
bus->base + ASPEED_I2C_INTR_STS_REG);
+ readl(bus->base + ASPEED_I2C_INTR_STS_REG);
+ }
spin_unlock(&bus->lock);
return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
}
@@ -997,7 +1000,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"Could not read bus-frequency property\n");
- bus->bus_frequency = 100000;
+ bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
}
match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node);
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index 7a862e00b475..37b96ac9dfae 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -18,11 +18,13 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dma-atmel.h>
#include <linux/pm_runtime.h>
@@ -478,6 +480,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
unsigned long time_left;
bool has_unre_flag = dev->pdata->has_unre_flag;
bool has_alt_cmd = dev->pdata->has_alt_cmd;
+ struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
/*
* WARNING: the TXCOMP bit in the Status Register is NOT a clear on
@@ -637,6 +640,13 @@ error:
at91_twi_write(dev, AT91_TWI_CR,
AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
}
+
+ if (rinfo->get_sda && !(rinfo->get_sda(&dev->adapter))) {
+ dev_dbg(dev->dev,
+ "SDA is down; clear bus using gpio\n");
+ i2c_recover_bus(&dev->adapter);
+ }
+
return ret;
}
@@ -806,6 +816,84 @@ error:
return ret;
}
+static void at91_prepare_twi_recovery(struct i2c_adapter *adap)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(adap);
+
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
+}
+
+static void at91_unprepare_twi_recovery(struct i2c_adapter *adap)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(adap);
+
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
+}
+
+static int at91_init_twi_recovery_info(struct platform_device *pdev,
+ struct at91_twi_dev *dev)
+{
+ struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
+
+ dev->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!dev->pinctrl || IS_ERR(dev->pinctrl)) {
+ dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
+ return PTR_ERR(dev->pinctrl);
+ }
+
+ dev->pinctrl_pins_default = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl,
+ "gpio");
+ if (IS_ERR(dev->pinctrl_pins_default) ||
+ IS_ERR(dev->pinctrl_pins_gpio)) {
+ dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n");
+ return -EINVAL;
+ }
+
+ /*
+ * pins will be taken as GPIO, so we might as well inform pinctrl about
+ * this and move the state to GPIO
+ */
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
+
+ rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
+ if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
+ GPIOD_OUT_HIGH_OPEN_DRAIN);
+ if (PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(rinfo->sda_gpiod) ||
+ IS_ERR(rinfo->scl_gpiod)) {
+ dev_info(&pdev->dev, "recovery information incomplete\n");
+ if (!IS_ERR(rinfo->sda_gpiod)) {
+ gpiod_put(rinfo->sda_gpiod);
+ rinfo->sda_gpiod = NULL;
+ }
+ if (!IS_ERR(rinfo->scl_gpiod)) {
+ gpiod_put(rinfo->scl_gpiod);
+ rinfo->scl_gpiod = NULL;
+ }
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
+ return -EINVAL;
+ }
+
+ /* change the state of the pins back to their default state */
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
+
+ dev_info(&pdev->dev, "using scl, sda for recovery\n");
+
+ rinfo->prepare_recovery = at91_prepare_twi_recovery;
+ rinfo->unprepare_recovery = at91_unprepare_twi_recovery;
+ rinfo->recover_bus = i2c_generic_scl_recovery;
+ dev->adapter.bus_recovery_info = rinfo;
+
+ return 0;
+}
+
int at91_twi_probe_master(struct platform_device *pdev,
u32 phy_addr, struct at91_twi_dev *dev)
{
@@ -838,6 +926,10 @@ int at91_twi_probe_master(struct platform_device *pdev,
"i2c-analog-filter");
at91_calc_twi_clock(dev);
+ rc = at91_init_twi_recovery_info(pdev, dev);
+ if (rc == -EPROBE_DEFER)
+ return rc;
+
dev->adapter.algo = &at91_twi_algorithm;
dev->adapter.quirks = &at91_twi_quirks;
diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h
index 977a67bc0f88..f57a6cab96b4 100644
--- a/drivers/i2c/busses/i2c-at91.h
+++ b/drivers/i2c/busses/i2c-at91.h
@@ -151,6 +151,10 @@ struct at91_twi_dev {
u32 fifo_size;
struct at91_twi_dma dma;
bool slave_detected;
+ struct i2c_bus_recovery_info rinfo;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_pins_default;
+ struct pinctrl_state *pinctrl_pins_gpio;
#ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
unsigned smr;
struct i2c_client *slave;
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 0214daa913ff..be3681d08a8d 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -199,7 +199,7 @@ static int axxia_i2c_init(struct axxia_i2c_dev *idev)
/* Enable Master Mode */
writel(0x1, idev->base + GLOBAL_CONTROL);
- if (idev->bus_clk_rate <= 100000) {
+ if (idev->bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ) {
/* Standard mode SCL 50/50, tSU:DAT = 250 ns */
t_high = divisor * 1 / 2;
t_low = divisor * 1 / 2;
@@ -765,7 +765,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
of_property_read_u32(np, "clock-frequency", &idev->bus_clk_rate);
if (idev->bus_clk_rate == 0)
- idev->bus_clk_rate = 100000; /* default clock rate */
+ idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
ret = clk_prepare_enable(idev->i2c_clk);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 30efb7913b2e..d091a12596ad 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -360,6 +360,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
i2c_slave_event(iproc_i2c->slave,
I2C_SLAVE_WRITE_RECEIVED, &value);
+ if (rx_status == I2C_SLAVE_RX_END)
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_STOP, &value);
}
} else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
/* Master read other than start */
@@ -858,25 +861,25 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
if (ret < 0) {
dev_info(iproc_i2c->device,
"unable to interpret clock-frequency DT property\n");
- bus_speed = 100000;
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
}
- if (bus_speed < 100000) {
+ if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) {
dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
bus_speed);
dev_err(iproc_i2c->device,
"valid speeds are 100khz and 400khz\n");
return -EINVAL;
- } else if (bus_speed < 400000) {
- bus_speed = 100000;
+ } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) {
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
} else {
- bus_speed = 400000;
+ bus_speed = I2C_MAX_FAST_MODE_FREQ;
}
iproc_i2c->bus_speed = bus_speed;
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
- val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+ val |= (bus_speed == I2C_MAX_FAST_MODE_FREQ) << TIM_CFG_MODE_400_SHIFT;
iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
@@ -1029,7 +1032,7 @@ static int bcm_iproc_i2c_resume(struct device *dev)
/* configure to the desired bus speed */
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
- val |= (iproc_i2c->bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+ val |= (iproc_i2c->bus_speed == I2C_MAX_FAST_MODE_FREQ) << TIM_CFG_MODE_400_SHIFT;
iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
bcm_iproc_i2c_enable_disable(iproc_i2c, true);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 4e489a9d16fb..572aebbb254e 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -722,16 +722,16 @@ static int bcm_kona_i2c_assign_bus_speed(struct bcm_kona_i2c_dev *dev)
}
switch (bus_speed) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
dev->std_cfg = &std_cfg_table[BCM_SPD_100K];
break;
- case 400000:
+ case I2C_MAX_FAST_MODE_FREQ:
dev->std_cfg = &std_cfg_table[BCM_SPD_400K];
break;
- case 1000000:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
dev->std_cfg = &std_cfg_table[BCM_SPD_1MHZ];
break;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
/* Send mastercode at 100k */
dev->std_cfg = &std_cfg_table[BCM_SPD_100K];
dev->hs_cfg = &hs_cfg_table[BCM_SPD_3P4MHZ];
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 5ab901ad615d..d9b86fcc3825 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -439,7 +439,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_warn(&pdev->dev,
"Could not read clock-frequency property\n");
- bus_clk_rate = 100000;
+ bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;
}
ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 506991596b68..169a2836922d 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -580,6 +580,31 @@ static void brcmstb_i2c_set_bsc_reg_defaults(struct brcmstb_i2c_dev *dev)
brcmstb_i2c_set_bus_speed(dev);
}
+#define AUTOI2C_CTRL0 0x26c
+#define AUTOI2C_CTRL0_RELEASE_BSC BIT(1)
+
+static int bcm2711_release_bsc(struct brcmstb_i2c_dev *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->device);
+ struct resource *iomem;
+ void __iomem *autoi2c;
+
+ /* Map hardware registers */
+ iomem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "auto-i2c");
+ autoi2c = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(autoi2c))
+ return PTR_ERR(autoi2c);
+
+ writel(AUTOI2C_CTRL0_RELEASE_BSC, autoi2c + AUTOI2C_CTRL0);
+ devm_iounmap(&pdev->dev, autoi2c);
+
+ /* We need to reset the controller after the release */
+ dev->bsc_regmap->iic_enable = 0;
+ bsc_writel(dev, dev->bsc_regmap->iic_enable, iic_enable);
+
+ return 0;
+}
+
static int brcmstb_i2c_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -609,6 +634,13 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
goto probe_errorout;
}
+ if (of_device_is_compatible(dev->device->of_node,
+ "brcm,bcm2711-hdmi-i2c")) {
+ rc = bcm2711_release_bsc(dev);
+ if (rc)
+ goto probe_errorout;
+ }
+
rc = of_property_read_string(dev->device->of_node, "interrupt-names",
&int_name);
if (rc < 0)
@@ -705,6 +737,7 @@ static SIMPLE_DEV_PM_OPS(brcmstb_i2c_pm, brcmstb_i2c_suspend,
static const struct of_device_id brcmstb_i2c_of_match[] = {
{.compatible = "brcm,brcmstb-i2c"},
{.compatible = "brcm,brcmper-i2c"},
+ {.compatible = "brcm,bcm2711-hdmi-i2c"},
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 1105aee6634a..89d58f7d2a25 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -104,9 +104,6 @@
#define DRIVER_NAME "cdns-i2c"
-#define CDNS_I2C_SPEED_MAX 400000
-#define CDNS_I2C_SPEED_DEFAULT 100000
-
#define CDNS_I2C_DIVA_MAX 4
#define CDNS_I2C_DIVB_MAX 64
@@ -949,8 +946,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&id->i2c_clk);
- if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX))
- id->i2c_clk = CDNS_I2C_SPEED_DEFAULT;
+ if (ret || (id->i2c_clk > I2C_MAX_FAST_MODE_FREQ))
+ id->i2c_clk = I2C_MAX_STANDARD_MODE_FREQ;
cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS,
CDNS_I2C_CR_OFFSET);
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 33da07d64494..c6a7a00e1d52 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Intel BayTrail PMIC I2C bus semaphore implementaion
+ * Intel BayTrail PMIC I2C bus semaphore implementation
* Copyright (c) 2014, Intel Corporation.
*/
#include <linux/device.h>
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 2de7452fcd6d..c70c6fc09ee3 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -102,7 +102,7 @@ int i2c_dw_set_reg_access(struct dw_i2c_dev *dev)
i2c_dw_release_lock(dev);
if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) {
- /* Configure register endianess access */
+ /* Configure register endianness access */
dev->flags |= ACCESS_SWAP;
} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
/* Configure register access mode 16bit */
@@ -190,10 +190,10 @@ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
/*
* Workaround for avoiding TX arbitration lost in case I2C
- * slave pulls SDA down "too quickly" after falling egde of
+ * slave pulls SDA down "too quickly" after falling edge of
* SCL by enabling non-zero SDA RX hold. Specification says it
* extends incoming SDA low to high transition while SCL is
- * high but it apprears to help also above issue.
+ * high but it appears to help also above issue.
*/
if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
@@ -344,6 +344,28 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
return -EIO;
}
+void i2c_dw_set_fifo_size(struct dw_i2c_dev *dev)
+{
+ u32 param, tx_fifo_depth, rx_fifo_depth;
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec.
+ */
+ param = dw_readl(dev, DW_IC_COMP_PARAM_1);
+ tx_fifo_depth = ((param >> 16) & 0xff) + 1;
+ rx_fifo_depth = ((param >> 8) & 0xff) + 1;
+ if (!dev->tx_fifo_depth) {
+ dev->tx_fifo_depth = tx_fifo_depth;
+ dev->rx_fifo_depth = rx_fifo_depth;
+ } else if (tx_fifo_depth >= 2) {
+ dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
+ tx_fifo_depth);
+ dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
+ rx_fifo_depth);
+ }
+}
+
u32 i2c_dw_func(struct i2c_adapter *adap)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
@@ -356,7 +378,7 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
/* Disable controller */
__i2c_dw_disable(dev);
- /* Disable all interupts */
+ /* Disable all interrupts */
dw_writel(dev, 0, DW_IC_INTR_MASK);
dw_readl(dev, DW_IC_CLR_INTR);
}
@@ -366,11 +388,5 @@ void i2c_dw_disable_int(struct dw_i2c_dev *dev)
dw_writel(dev, 0, DW_IC_INTR_MASK);
}
-u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
-{
- return dw_readl(dev, DW_IC_COMP_PARAM_1);
-}
-EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
-
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 67edbbde1070..b220ad64c38d 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -297,6 +297,7 @@ int i2c_dw_acquire_lock(struct dw_i2c_dev *dev);
void i2c_dw_release_lock(struct dw_i2c_dev *dev);
int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev);
int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev);
+void i2c_dw_set_fifo_size(struct dw_i2c_dev *dev);
u32 i2c_dw_func(struct i2c_adapter *adap);
void i2c_dw_disable(struct dw_i2c_dev *dev);
void i2c_dw_disable_int(struct dw_i2c_dev *dev);
@@ -313,7 +314,6 @@ static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev)
void __i2c_dw_disable(struct dw_i2c_dev *dev);
-extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
extern int i2c_dw_probe(struct dw_i2c_dev *dev);
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_SLAVE)
extern int i2c_dw_probe_slave(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e8b328242256..3a58eef20936 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -521,7 +521,7 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
- * Ths unmasked raw version of interrupt status bits are available
+ * The unmasked raw version of interrupt status bits is available
* in the IC_RAW_INTR_STAT register.
*
* That is,
@@ -698,6 +698,8 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
if (ret)
return ret;
+ i2c_dw_set_fifo_size(dev);
+
ret = dev->init(dev);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 05b35ac33ce3..7a0b65b5b5b5 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -109,7 +109,7 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
/*
- * On Intel Merrifield the user visible i2c busses are enumerated
+ * On Intel Merrifield the user visible i2c buses are enumerated
* [1..7]. So, we add 1 to shift the default range. Besides that the
* first PCI slot provides 4 functions, that's why we have to add 0 to
* the first slot and 4 to the next one.
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 3b7d58c2fe85..5536673060cc 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -99,16 +99,16 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
switch (t->bus_freq_hz) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
dev->sda_hold_time = ss_ht;
break;
- case 1000000:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
dev->sda_hold_time = fp_ht;
break;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
dev->sda_hold_time = hs_ht;
break;
- case 400000:
+ case I2C_MAX_FAST_MODE_FREQ:
default:
dev->sda_hold_time = fs_ht;
break;
@@ -198,10 +198,10 @@ static void i2c_dw_configure_master(struct dw_i2c_dev *dev)
dev->mode = DW_IC_MASTER;
switch (t->bus_freq_hz) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
dev->master_cfg |= DW_IC_CON_SPEED_STD;
break;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
break;
default:
@@ -219,28 +219,6 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
dev->mode = DW_IC_SLAVE;
}
-static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev)
-{
- u32 param, tx_fifo_depth, rx_fifo_depth;
-
- /*
- * Try to detect the FIFO depth if not set by interface driver,
- * the depth could be from 2 to 256 from HW spec.
- */
- param = i2c_dw_read_comp_param(dev);
- tx_fifo_depth = ((param >> 16) & 0xff) + 1;
- rx_fifo_depth = ((param >> 8) & 0xff) + 1;
- if (!dev->tx_fifo_depth) {
- dev->tx_fifo_depth = tx_fifo_depth;
- dev->rx_fifo_depth = rx_fifo_depth;
- } else if (tx_fifo_depth >= 2) {
- dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
- tx_fifo_depth);
- dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
- rx_fifo_depth);
- }
-}
-
static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev)
{
pm_runtime_disable(dev->dev);
@@ -249,6 +227,13 @@ static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev)
pm_runtime_put_noidle(dev->dev);
}
+static const u32 supported_speeds[] = {
+ I2C_MAX_HIGH_SPEED_MODE_FREQ,
+ I2C_MAX_FAST_MODE_PLUS_FREQ,
+ I2C_MAX_FAST_MODE_FREQ,
+ I2C_MAX_STANDARD_MODE_FREQ,
+};
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -258,9 +243,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
u32 acpi_speed;
struct resource *mem;
int i, irq, ret;
- static const int supported_speeds[] = {
- 0, 100000, 400000, 1000000, 3400000
- };
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -296,11 +278,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Some DSTDs use a non standard speed, round down to the lowest
* standard speed.
*/
- for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
- if (acpi_speed < supported_speeds[i])
+ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed >= supported_speeds[i])
break;
}
- acpi_speed = supported_speeds[i - 1];
+ acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
/*
* Find bus speed from the "clock-frequency" device property, ACPI
@@ -311,7 +293,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
else if (acpi_speed || t->bus_freq_hz)
t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed);
else
- t->bus_freq_hz = 400000;
+ t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
dev->flags |= (uintptr_t)device_get_match_data(&pdev->dev);
@@ -325,8 +307,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Only standard mode at 100kHz, fast mode at 400kHz,
* fast mode plus at 1MHz and high speed mode at 3.4MHz are supported.
*/
- if (t->bus_freq_hz != 100000 && t->bus_freq_hz != 400000 &&
- t->bus_freq_hz != 1000000 && t->bus_freq_hz != 3400000) {
+ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (t->bus_freq_hz == supported_speeds[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(supported_speeds)) {
dev_err(&pdev->dev,
"%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
t->bus_freq_hz);
@@ -362,8 +347,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
}
- dw_i2c_set_fifo_size(dev);
-
adap = &dev->adapter;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
@@ -371,10 +354,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
adap->nr = -1;
- dev_pm_set_driver_flags(&pdev->dev,
- DPM_FLAG_SMART_PREPARE |
- DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_LEAVE_SUSPENDED);
+ } else {
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+ }
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index f5f001738df5..f5ecf76c0d02 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -107,7 +107,7 @@ static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev)
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
- * Ths unmasked raw version of interrupt status bits are available
+ * The unmasked raw version of interrupt status bits is available
* in the IC_RAW_INTR_STAT register.
*
* That is,
@@ -260,6 +260,8 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
if (ret)
return ret;
+ i2c_dw_set_fifo_size(dev);
+
ret = dev->init(dev);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 3adf72540db1..056a5c4f0833 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-#define DEFAULT_FREQ 100000
#define TIMEOUT_MS 100
#define II_CONTROL 0x0
@@ -300,7 +299,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&i2c->frequency))
- i2c->frequency = DEFAULT_FREQ;
+ i2c->frequency = I2C_MAX_STANDARD_MODE_FREQ;
i2c->dev = &pdev->dev;
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index 382f105e0fe3..b48b7888936f 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -64,8 +64,6 @@
#define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */
#define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1)
-#define U2C_I2C_FREQ_FAST 400000
-#define U2C_I2C_FREQ_STD 100000
#define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10))
#define DIOLAN_USB_TIMEOUT 100 /* in ms */
@@ -87,7 +85,7 @@ struct i2c_diolan_u2c {
int ocount; /* Number of enqueued messages */
};
-static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */
+static uint frequency = I2C_MAX_STANDARD_MODE_FREQ; /* I2C clock frequency in Hz */
module_param(frequency, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
@@ -299,12 +297,12 @@ static int diolan_init(struct i2c_diolan_u2c *dev)
{
int speed, ret;
- if (frequency >= 200000) {
+ if (frequency >= 2 * I2C_MAX_STANDARD_MODE_FREQ) {
speed = U2C_I2C_SPEED_FAST;
- frequency = U2C_I2C_FREQ_FAST;
- } else if (frequency >= 100000 || frequency == 0) {
+ frequency = I2C_MAX_FAST_MODE_FREQ;
+ } else if (frequency >= I2C_MAX_STANDARD_MODE_FREQ || frequency == 0) {
speed = U2C_I2C_SPEED_STD;
- frequency = U2C_I2C_FREQ_STD;
+ frequency = I2C_MAX_STANDARD_MODE_FREQ;
} else {
speed = U2C_I2C_SPEED(frequency);
if (speed > U2C_I2C_SPEED_2KHZ)
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index a8c6323e7f44..18cca8f56da8 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -388,7 +388,7 @@ static int efm32_i2c_probe(struct platform_device *pdev)
if (!ret) {
dev_dbg(&pdev->dev, "using frequency %u\n", frequency);
} else {
- frequency = 100000;
+ frequency = I2C_MAX_STANDARD_MODE_FREQ;
dev_info(&pdev->dev, "defaulting to 100 kHz\n");
}
ddata->frequency = frequency;
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index e7514c16b756..527030953ba1 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -164,13 +164,6 @@
#define HSI2C_MASTER_ID(x) ((x & 0xff) << 24)
#define MASTER_ID(x) ((x & 0x7) + 0x08)
-/*
- * Controller operating frequency, timing values for operation
- * are calculated against this frequency
- */
-#define HSI2C_HS_TX_CLOCK 1000000
-#define HSI2C_FS_TX_CLOCK 100000
-
#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(100))
enum i2c_type_exynos {
@@ -264,6 +257,9 @@ static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c)
* exynos5_i2c_set_timing: updates the registers with appropriate
* timing values calculated
*
+ * Timing values for operation are calculated against either 100kHz
+ * or 1MHz controller operating frequency.
+ *
* Returns 0 on success, -EINVAL if the cycle length cannot
* be calculated.
*/
@@ -281,7 +277,7 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
unsigned int t_ftl_cycle;
unsigned int clkin = clk_get_rate(i2c->clk);
unsigned int op_clk = hs_timings ? i2c->op_clock :
- (i2c->op_clock >= HSI2C_HS_TX_CLOCK) ? HSI2C_FS_TX_CLOCK :
+ (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ) ? I2C_MAX_STANDARD_MODE_FREQ :
i2c->op_clock;
int div, clk_cycle, temp;
@@ -353,7 +349,7 @@ static int exynos5_hsi2c_clock_setup(struct exynos5_i2c *i2c)
/* always set Fast Speed timings */
int ret = exynos5_i2c_set_timing(i2c, false);
- if (ret < 0 || i2c->op_clock < HSI2C_HS_TX_CLOCK)
+ if (ret < 0 || i2c->op_clock < I2C_MAX_FAST_MODE_PLUS_FREQ)
return ret;
return exynos5_i2c_set_timing(i2c, true);
@@ -376,7 +372,7 @@ static void exynos5_i2c_init(struct exynos5_i2c *i2c)
i2c->regs + HSI2C_CTL);
writel(HSI2C_TRAILING_COUNT, i2c->regs + HSI2C_TRAILIG_CTL);
- if (i2c->op_clock >= HSI2C_HS_TX_CLOCK) {
+ if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ) {
writel(HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)),
i2c->regs + HSI2C_ADDR);
i2c_conf |= HSI2C_HS_MODE;
@@ -748,7 +744,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
if (of_property_read_u32(np, "clock-frequency", &i2c->op_clock))
- i2c->op_clock = HSI2C_FS_TX_CLOCK;
+ i2c->op_clock = I2C_MAX_STANDARD_MODE_FREQ;
strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 224f830f77f9..6610304b6dc6 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -68,8 +68,6 @@
#define I2C_ARBITRATE_INTR BIT(1)
#define I2C_OVER_INTR BIT(0)
-#define HIX5I2C_MAX_FREQ 400000 /* 400k */
-
enum hix5hd2_i2c_state {
HIX5I2C_STAT_RW_ERR = -1,
HIX5I2C_STAT_INIT,
@@ -400,12 +398,12 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-frequency", &freq)) {
/* use 100k as default value */
- priv->freq = 100000;
+ priv->freq = I2C_MAX_STANDARD_MODE_FREQ;
} else {
- if (freq > HIX5I2C_MAX_FREQ) {
- priv->freq = HIX5I2C_MAX_FREQ;
+ if (freq > I2C_MAX_FAST_MODE_FREQ) {
+ priv->freq = I2C_MAX_FAST_MODE_FREQ;
dev_warn(priv->dev, "use max freq %d instead\n",
- HIX5I2C_MAX_FREQ);
+ I2C_MAX_FAST_MODE_FREQ);
} else {
priv->freq = freq;
}
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 20a4fbc53007..422097a31c95 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -304,7 +304,7 @@ static struct img_i2c_timings timings[] = {
/* Standard mode */
{
.name = "standard",
- .max_bitrate = 100000,
+ .max_bitrate = I2C_MAX_STANDARD_MODE_FREQ,
.tckh = 4000,
.tckl = 4700,
.tsdh = 4700,
@@ -316,7 +316,7 @@ static struct img_i2c_timings timings[] = {
/* Fast mode */
{
.name = "fast",
- .max_bitrate = 400000,
+ .max_bitrate = I2C_MAX_FAST_MODE_FREQ,
.tckh = 600,
.tckl = 1300,
.tsdh = 600,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index c92b56485fa6..94743ba581fe 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -75,12 +75,6 @@
#define I2C_CLK_RATIO 2
#define CHUNK_DATA 256
-#define LPI2C_DEFAULT_RATE 100000
-#define STARDARD_MAX_BITRATE 400000
-#define FAST_MAX_BITRATE 1000000
-#define FAST_PLUS_MAX_BITRATE 3400000
-#define HIGHSPEED_MAX_BITRATE 5000000
-
#define I2C_PM_TIMEOUT 10 /* ms */
enum lpi2c_imx_mode {
@@ -152,13 +146,13 @@ static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
unsigned int bitrate = lpi2c_imx->bitrate;
enum lpi2c_imx_mode mode;
- if (bitrate < STARDARD_MAX_BITRATE)
+ if (bitrate < I2C_MAX_FAST_MODE_FREQ)
mode = STANDARD;
- else if (bitrate < FAST_MAX_BITRATE)
+ else if (bitrate < I2C_MAX_FAST_MODE_PLUS_FREQ)
mode = FAST;
- else if (bitrate < FAST_PLUS_MAX_BITRATE)
+ else if (bitrate < I2C_MAX_HIGH_SPEED_MODE_FREQ)
mode = FAST_PLUS;
- else if (bitrate < HIGHSPEED_MAX_BITRATE)
+ else if (bitrate < I2C_MAX_ULTRA_FAST_MODE_FREQ)
mode = HS;
else
mode = ULTRA_FAST;
@@ -578,7 +572,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &lpi2c_imx->bitrate);
if (ret)
- lpi2c_imx->bitrate = LPI2C_DEFAULT_RATE;
+ lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
pdev->name, lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a3b61336fe55..0ab5381aa012 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -49,9 +50,6 @@
/* This will be the driver name the kernel reports */
#define DRIVER_NAME "imx-i2c"
-/* Default value */
-#define IMX_I2C_BIT_RATE 100000 /* 100kHz */
-
/*
* Enable DMA if transfer byte size is bigger than this threshold.
* As the hardware request, it must bigger than 4 bytes.\
@@ -414,7 +412,7 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
dma->chan_using = NULL;
}
-static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
+static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
{
unsigned long orig_jiffies = jiffies;
unsigned int temp;
@@ -444,15 +442,37 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
"<%s> I2C bus is busy\n", __func__);
return -ETIMEDOUT;
}
- schedule();
+ if (atomic)
+ udelay(100);
+ else
+ schedule();
}
return 0;
}
-static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
+static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
{
- wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
+ if (atomic) {
+ void __iomem *addr = i2c_imx->base + (IMX_I2C_I2SR << i2c_imx->hwdata->regshift);
+ unsigned int regval;
+
+ /*
+ * The formula for the poll timeout is documented in the RM
+ * Rev.5 on page 1878:
+ * T_min = 10/F_scl
+ * Set the value hard as it is done for the non-atomic use-case.
+ * Use 10 kHz for the calculation since this is the minimum
+ * allowed SMBus frequency. Also add an offset of 100us since it
+ * turned out that the I2SR_IIF bit isn't set correctly within
+ * the minimum timeout in polling mode.
+ */
+ readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100);
+ i2c_imx->i2csr = regval;
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ } else {
+ wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
+ }
if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
@@ -530,7 +550,7 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
+static int i2c_imx_start(struct imx_i2c_struct *i2c_imx, bool atomic)
{
unsigned int temp = 0;
int result;
@@ -543,23 +563,29 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode, i2c_imx, IMX_I2C_I2CR);
/* Wait controller to be stable */
- usleep_range(50, 150);
+ if (atomic)
+ udelay(50);
+ else
+ usleep_range(50, 150);
/* Start I2C transaction */
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_MSTA;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- result = i2c_imx_bus_busy(i2c_imx, 1);
+ result = i2c_imx_bus_busy(i2c_imx, 1, atomic);
if (result)
return result;
temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
+ if (atomic)
+ temp &= ~I2CR_IIEN; /* Disable interrupt */
+
temp &= ~I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
return result;
}
-static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
+static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
{
unsigned int temp = 0;
@@ -581,7 +607,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
}
if (!i2c_imx->stopped)
- i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx_bus_busy(i2c_imx, 0, atomic);
/* Disable I2C controller */
temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
@@ -662,7 +688,7 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
/* The last data byte must be transferred by the CPU. */
imx_i2c_write_reg(msgs->buf[msgs->len-1],
i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, false);
if (result)
return result;
@@ -721,7 +747,7 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
/* read n byte data */
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, false);
if (result)
return result;
@@ -734,7 +760,7 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx_bus_busy(i2c_imx, 0, false);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -752,7 +778,8 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
return 0;
}
-static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
+static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
+ bool atomic)
{
int i, result;
@@ -761,7 +788,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
/* write slave address */
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -775,7 +802,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
"<%s> write byte: B%d=0x%X\n",
__func__, i, msgs->buf[i]);
imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -785,7 +812,8 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
return 0;
}
-static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bool is_lastmsg)
+static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
+ bool is_lastmsg, bool atomic)
{
int i, result;
unsigned int temp;
@@ -798,7 +826,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
/* write slave address */
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -831,7 +859,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
for (i = 0; i < msgs->len; i++) {
u8 len = 0;
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
/*
@@ -859,7 +887,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx_bus_busy(i2c_imx, 0, atomic);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -890,8 +918,8 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
return 0;
}
-static int i2c_imx_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs, int num)
+static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num, bool atomic)
{
unsigned int i, temp;
int result;
@@ -900,16 +928,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
- result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
- if (result < 0)
- goto out;
-
/* Start I2C transfer */
- result = i2c_imx_start(i2c_imx);
+ result = i2c_imx_start(i2c_imx, atomic);
if (result) {
- if (i2c_imx->adapter.bus_recovery_info) {
+ /*
+ * Bus recovery uses gpiod_get_value_cansleep() which is not
+ * allowed within atomic context.
+ */
+ if (!atomic && i2c_imx->adapter.bus_recovery_info) {
i2c_recover_bus(&i2c_imx->adapter);
- result = i2c_imx_start(i2c_imx);
+ result = i2c_imx_start(i2c_imx, atomic);
}
}
@@ -927,7 +955,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_RSTA;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- result = i2c_imx_bus_busy(i2c_imx, 1);
+ result = i2c_imx_bus_busy(i2c_imx, 1, atomic);
if (result)
goto fail0;
}
@@ -951,13 +979,14 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
(temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
(temp & I2SR_RXAK ? 1 : 0));
#endif
- if (msgs[i].flags & I2C_M_RD)
- result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg);
- else {
- if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
+ if (msgs[i].flags & I2C_M_RD) {
+ result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic);
+ } else {
+ if (!atomic &&
+ i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
result = i2c_imx_dma_write(i2c_imx, &msgs[i]);
else
- result = i2c_imx_write(i2c_imx, &msgs[i]);
+ result = i2c_imx_write(i2c_imx, &msgs[i], atomic);
}
if (result)
goto fail0;
@@ -965,18 +994,49 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
fail0:
/* Stop I2C transfer */
- i2c_imx_stop(i2c_imx);
-
- pm_runtime_mark_last_busy(i2c_imx->adapter.dev.parent);
- pm_runtime_put_autosuspend(i2c_imx->adapter.dev.parent);
+ i2c_imx_stop(i2c_imx, atomic);
-out:
dev_dbg(&i2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
(result < 0) ? "error" : "success msg",
(result < 0) ? result : num);
return (result < 0) ? result : num;
}
+static int i2c_imx_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
+ int result;
+
+ result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
+ if (result < 0)
+ return result;
+
+ result = i2c_imx_xfer_common(adapter, msgs, num, false);
+
+ pm_runtime_mark_last_busy(i2c_imx->adapter.dev.parent);
+ pm_runtime_put_autosuspend(i2c_imx->adapter.dev.parent);
+
+ return result;
+}
+
+static int i2c_imx_xfer_atomic(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
+ int result;
+
+ result = clk_enable(i2c_imx->clk);
+ if (result)
+ return result;
+
+ result = i2c_imx_xfer_common(adapter, msgs, num, true);
+
+ clk_disable(i2c_imx->clk);
+
+ return result;
+}
+
static void i2c_imx_prepare_recovery(struct i2c_adapter *adap)
{
struct imx_i2c_struct *i2c_imx;
@@ -1049,8 +1109,9 @@ static u32 i2c_imx_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm i2c_imx_algo = {
- .master_xfer = i2c_imx_xfer,
- .functionality = i2c_imx_func,
+ .master_xfer = i2c_imx_xfer,
+ .master_xfer_atomic = i2c_imx_xfer_atomic,
+ .functionality = i2c_imx_func,
};
static int i2c_imx_probe(struct platform_device *pdev)
@@ -1066,10 +1127,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "<%s>\n", __func__);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "can't get irq number\n");
+ if (irq < 0)
return irq;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -1139,7 +1198,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto rpm_disable;
/* Set up clock divider */
- i2c_imx->bitrate = IMX_I2C_BIT_RATE;
+ i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
ret = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &i2c_imx->bitrate);
if (ret < 0 && pdata && pdata->bitrate)
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index deea18b14add..13b0c12e2dba 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -396,7 +396,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&bus_clk_rate);
if (ret)
- bus_clk_rate = 100000; /* 100 kHz default clock rate */
+ bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;
clkrate = clk_get_rate(i2c->clk);
if (clkrate == 0) {
@@ -407,9 +407,9 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
/* Setup I2C dividers to generate clock with proper duty cycle */
clkrate = clkrate / bus_clk_rate;
- if (bus_clk_rate <= 100000)
+ if (bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ)
scl_high = (clkrate * I2C_STD_MODE_DUTY) / 100;
- else if (bus_clk_rate <= 400000)
+ else if (bus_clk_rate <= I2C_MAX_FAST_MODE_FREQ)
scl_high = (clkrate * I2C_FAST_MODE_DUTY) / 100;
else
scl_high = (clkrate * I2C_FAST_MODE_PLUS_DUTY) / 100;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 2152ec5f535c..0ca6c38a15eb 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -56,9 +56,6 @@
#define I2C_DMA_4G_MODE 0x0001
#define I2C_DEFAULT_CLK_DIV 5
-#define I2C_DEFAULT_SPEED 100000 /* hz */
-#define MAX_FS_MODE_SPEED 400000
-#define MAX_HS_MODE_SPEED 3400000
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
#define MAX_HS_STEP_CNT_DIV 8
@@ -450,10 +447,10 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
unsigned int best_mul;
unsigned int cnt_mul;
- if (target_speed > MAX_HS_MODE_SPEED)
- target_speed = MAX_HS_MODE_SPEED;
+ if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
+ target_speed = I2C_MAX_FAST_MODE_PLUS_FREQ;
- if (target_speed > MAX_FS_MODE_SPEED)
+ if (target_speed > I2C_MAX_FAST_MODE_FREQ)
max_step_cnt = MAX_HS_STEP_CNT_DIV;
else
max_step_cnt = MAX_STEP_CNT_DIV;
@@ -514,9 +511,9 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
clk_src = parent_clk / i2c->clk_src_div;
target_speed = i2c->speed_hz;
- if (target_speed > MAX_FS_MODE_SPEED) {
+ if (target_speed > I2C_MAX_FAST_MODE_FREQ) {
/* Set master code speed register */
- ret = mtk_i2c_calculate_speed(i2c, clk_src, MAX_FS_MODE_SPEED,
+ ret = mtk_i2c_calculate_speed(i2c, clk_src, I2C_MAX_FAST_MODE_FREQ,
&l_step_cnt, &l_sample_cnt);
if (ret < 0)
return ret;
@@ -581,7 +578,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
- if ((i2c->speed_hz > MAX_FS_MODE_SPEED) || (left_num >= 1))
+ if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
control_reg |= I2C_CONTROL_RS;
if (i2c->op == I2C_MASTER_WRRD)
@@ -590,7 +587,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
/* set start condition */
- if (i2c->speed_hz <= I2C_DEFAULT_SPEED)
+ if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
mtk_i2c_writew(i2c, I2C_ST_START_CON, OFFSET_EXT_CONF);
else
mtk_i2c_writew(i2c, I2C_FS_START_CON, OFFSET_EXT_CONF);
@@ -798,7 +795,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
}
}
- if (i2c->auto_restart && num >= 2 && i2c->speed_hz > MAX_FS_MODE_SPEED)
+ if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
/* ignore the first restart irq after the master code,
* otherwise the first transfer will be discarded.
*/
@@ -893,7 +890,7 @@ static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c)
ret = of_property_read_u32(np, "clock-frequency", &i2c->speed_hz);
if (ret < 0)
- i2c->speed_hz = I2C_DEFAULT_SPEED;
+ i2c->speed_hz = I2C_MAX_STANDARD_MODE_FREQ;
ret = of_property_read_u32(np, "clock-div", &i2c->clk_src_div);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 62df8379bc89..45fe4a7fe0c0 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -300,7 +300,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&i2c->bus_freq))
- i2c->bus_freq = 100000;
+ i2c->bus_freq = I2C_MAX_STANDARD_MODE_FREQ;
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index febb7c7ea72b..9b8f1d8552ea 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -810,7 +810,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
tclk = clk_get_rate(drv_data->clk);
if (of_property_read_u32(np, "clock-frequency", &bus_freq))
- bus_freq = 100000; /* 100kHz by default */
+ bus_freq = I2C_MAX_STANDARD_MODE_FREQ; /* 100kHz by default */
if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") ||
of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
@@ -846,14 +846,14 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
drv_data->offload_enabled = true;
/* The delay is only needed in standard mode (100kHz) */
- if (bus_freq <= 100000)
+ if (bus_freq <= I2C_MAX_STANDARD_MODE_FREQ)
drv_data->errata_delay = true;
}
if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
drv_data->offload_enabled = false;
/* The delay is only needed in standard mode (100kHz) */
- if (bus_freq <= 100000)
+ if (bus_freq <= I2C_MAX_STANDARD_MODE_FREQ)
drv_data->errata_delay = true;
}
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 89224913f578..9587347447f0 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -731,7 +731,7 @@ static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, uint32_t speed)
* This is compensated for by subtracting the respective constants
* from the values written to the timing registers.
*/
- if (speed > 100000) {
+ if (speed > I2C_MAX_STANDARD_MODE_FREQ) {
/* fast mode */
low_count = DIV_ROUND_CLOSEST(divider * 13, (13 + 6));
high_count = DIV_ROUND_CLOSEST(divider * 6, (13 + 6));
@@ -769,7 +769,7 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
ret = of_property_read_u32(node, "clock-frequency", &speed);
if (ret) {
dev_warn(dev, "No I2C speed selected, using 100kHz\n");
- speed = 100000;
+ speed = I2C_MAX_STANDARD_MODE_FREQ;
}
mxs_i2c_derive_timing(i2c, speed);
@@ -836,10 +836,10 @@ static int mxs_i2c_probe(struct platform_device *pdev)
}
/* Setup the DMA */
- i2c->dmach = dma_request_slave_channel(dev, "rx-tx");
- if (!i2c->dmach) {
+ i2c->dmach = dma_request_chan(dev, "rx-tx");
+ if (IS_ERR(i2c->dmach)) {
dev_err(dev, "Failed to request dma\n");
- return -ENODEV;
+ return PTR_ERR(i2c->dmach);
}
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 01a7d72e5511..e1e8d4ef9aa7 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -396,7 +396,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* 2 whereas it is 3 for fast and fastplus mode of
* operation. TODO - high speed support.
*/
- div = (dev->clk_freq > 100000) ? 3 : 2;
+ div = (dev->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
/*
* generate the mask for baud rate counters. The controller
@@ -420,7 +420,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
if (dev->sm > I2C_FREQ_MODE_FAST) {
dev_err(&dev->adev->dev,
"do not support this mode defaulting to std. mode\n");
- brcr2 = i2c_clk/(100000 * 2) & 0xffff;
+ brcr2 = i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2) & 0xffff;
writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
writel(I2C_FREQ_MODE_STANDARD << 4,
dev->virtbase + I2C_CR);
@@ -949,10 +949,10 @@ static void nmk_i2c_of_probe(struct device_node *np,
{
/* Default to 100 kHz if no frequency is given in the node */
if (of_property_read_u32(np, "clock-frequency", &nmk->clk_freq))
- nmk->clk_freq = 100000;
+ nmk->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
/* This driver only supports 'standard' and 'fast' modes of operation. */
- if (nmk->clk_freq <= 100000)
+ if (nmk->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
nmk->sm = I2C_FREQ_MODE_STANDARD;
else
nmk->sm = I2C_FREQ_MODE_FAST;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 2dfea357b131..71b4637c86b7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1355,7 +1355,6 @@ omap_i2c_probe(struct platform_device *pdev)
{
struct omap_i2c_dev *omap;
struct i2c_adapter *adap;
- struct resource *mem;
const struct omap_i2c_bus_platform_data *pdata =
dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
@@ -1375,14 +1374,13 @@ omap_i2c_probe(struct platform_device *pdev)
if (!omap)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- omap->base = devm_ioremap_resource(&pdev->dev, mem);
+ omap->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(omap->base))
return PTR_ERR(omap->base);
match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
if (match) {
- u32 freq = 100000; /* default to 100000 Hz */
+ u32 freq = I2C_MAX_STANDARD_MODE_FREQ;
pdata = match->data;
omap->flags = pdata->flags;
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
index b6b5a495118b..3ab8be62c581 100644
--- a/drivers/i2c/busses/i2c-owl.c
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -87,9 +87,6 @@
#define OWL_I2C_MAX_RETRIES 50
-#define OWL_I2C_DEF_SPEED_HZ 100000
-#define OWL_I2C_MAX_SPEED_HZ 400000
-
struct owl_i2c_dev {
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -419,11 +416,11 @@ static int owl_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency",
&i2c_dev->bus_freq))
- i2c_dev->bus_freq = OWL_I2C_DEF_SPEED_HZ;
+ i2c_dev->bus_freq = I2C_MAX_STANDARD_MODE_FREQ;
/* We support only frequencies of 100k and 400k for now */
- if (i2c_dev->bus_freq != OWL_I2C_DEF_SPEED_HZ &&
- i2c_dev->bus_freq != OWL_I2C_MAX_SPEED_HZ) {
+ if (i2c_dev->bus_freq != I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_freq != I2C_MAX_FAST_MODE_FREQ) {
dev_err(dev, "invalid clock-frequency %d\n", i2c_dev->bus_freq);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 81eb441b2387..a535889acca6 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -333,13 +333,17 @@ static void i2c_parport_attach(struct parport *port)
/* Setup SMBus alert if supported */
if (adapter_parm[type].smbus_alert) {
- adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
- &adapter->alert_data);
- if (adapter->ara)
+ struct i2c_client *ara;
+
+ ara = i2c_new_smbus_alert_device(&adapter->adapter,
+ &adapter->alert_data);
+ if (!IS_ERR(ara)) {
+ adapter->ara = ara;
parport_enable_irq(port);
- else
+ } else {
dev_warn(&adapter->pdev->dev,
"Failed to register ARA client\n");
+ }
}
/* Add the new adapter to the list */
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 973e5339033c..d565714c1f13 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -279,14 +279,13 @@ static bool i2c_powermac_get_type(struct i2c_adapter *adap,
{
char tmp[16];
- /* Note: we to _NOT_ want the standard
- * i2c drivers to match with any of our powermac stuff
- * unless they have been specifically modified to handle
- * it on a case by case basis. For example, for thermal
- * control, things like lm75 etc... shall match with their
- * corresponding windfarm drivers, _NOT_ the generic ones,
- * so we force a prefix of AAPL, onto the modalias to
- * make that happen
+ /*
+ * Note: we do _NOT_ want the standard i2c drivers to match with any of
+ * our powermac stuff unless they have been specifically modified to
+ * handle it on a case by case basis. For example, for thermal control,
+ * things like lm75 etc... shall match with their corresponding
+ * windfarm drivers, _NOT_ the generic ones, so we force a prefix of
+ * 'MAC', onto the modalias to make that happen
*/
/* First try proper modalias */
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 17abf60c94ae..18d1e4fd4cf3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
@@ -502,45 +501,40 @@ static int geni_i2c_probe(struct platform_device *pdev)
struct resource *res;
u32 proto, tx_depth;
int ret;
+ struct device *dev = &pdev->dev;
- gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
+ gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL);
if (!gi2c)
return -ENOMEM;
- gi2c->se.dev = &pdev->dev;
- gi2c->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ gi2c->se.dev = dev;
+ gi2c->se.wrapper = dev_get_drvdata(dev->parent);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gi2c->se.base = devm_ioremap_resource(&pdev->dev, res);
+ gi2c->se.base = devm_ioremap_resource(dev, res);
if (IS_ERR(gi2c->se.base))
return PTR_ERR(gi2c->se.base);
- gi2c->se.clk = devm_clk_get(&pdev->dev, "se");
- if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(&pdev->dev)) {
- ret = PTR_ERR(gi2c->se.clk);
- dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
- return ret;
- }
+ gi2c->se.clk = devm_clk_get(dev, "se");
+ if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev))
+ return PTR_ERR(gi2c->se.clk);
- ret = device_property_read_u32(&pdev->dev, "clock-frequency",
- &gi2c->clk_freq_out);
+ ret = device_property_read_u32(dev, "clock-frequency",
+ &gi2c->clk_freq_out);
if (ret) {
- dev_info(&pdev->dev,
- "Bus frequency not specified, default to 100kHz.\n");
+ dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
gi2c->clk_freq_out = KHZ(100);
}
- if (has_acpi_companion(&pdev->dev))
- ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(&pdev->dev));
+ if (has_acpi_companion(dev))
+ ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(dev));
gi2c->irq = platform_get_irq(pdev, 0);
- if (gi2c->irq < 0) {
- dev_err(&pdev->dev, "IRQ error for i2c-geni\n");
+ if (gi2c->irq < 0)
return gi2c->irq;
- }
ret = geni_i2c_clk_map_idx(gi2c);
if (ret) {
- dev_err(&pdev->dev, "Invalid clk frequency %d Hz: %d\n",
+ dev_err(dev, "Invalid clk frequency %d Hz: %d\n",
gi2c->clk_freq_out, ret);
return ret;
}
@@ -549,29 +543,29 @@ static int geni_i2c_probe(struct platform_device *pdev)
init_completion(&gi2c->done);
spin_lock_init(&gi2c->lock);
platform_set_drvdata(pdev, gi2c);
- ret = devm_request_irq(&pdev->dev, gi2c->irq, geni_i2c_irq,
- IRQF_TRIGGER_HIGH, "i2c_geni", gi2c);
+ ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0,
+ dev_name(dev), gi2c);
if (ret) {
- dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+ dev_err(dev, "Request_irq failed:%d: err:%d\n",
gi2c->irq, ret);
return ret;
}
/* Disable the interrupt so that the system can enter low-power mode */
disable_irq(gi2c->irq);
i2c_set_adapdata(&gi2c->adap, gi2c);
- gi2c->adap.dev.parent = &pdev->dev;
- gi2c->adap.dev.of_node = pdev->dev.of_node;
+ gi2c->adap.dev.parent = dev;
+ gi2c->adap.dev.of_node = dev->of_node;
strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
- dev_err(&pdev->dev, "Error turning on resources %d\n", ret);
+ dev_err(dev, "Error turning on resources %d\n", ret);
return ret;
}
proto = geni_se_read_proto(&gi2c->se);
tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
if (proto != GENI_SE_I2C) {
- dev_err(&pdev->dev, "Invalid proto %d\n", proto);
+ dev_err(dev, "Invalid proto %d\n", proto);
geni_se_resources_off(&gi2c->se);
return -ENXIO;
}
@@ -581,11 +575,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
true, true, true);
ret = geni_se_resources_off(&gi2c->se);
if (ret) {
- dev_err(&pdev->dev, "Error turning off resources %d\n", ret);
+ dev_err(dev, "Error turning off resources %d\n", ret);
return ret;
}
- dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
+ dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
gi2c->suspended = 1;
pm_runtime_set_suspended(gi2c->se.dev);
@@ -595,12 +589,12 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&gi2c->adap);
if (ret) {
- dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ dev_err(dev, "Error adding i2c adapter %d\n", ret);
pm_runtime_disable(gi2c->se.dev);
return ret;
}
- dev_dbg(&pdev->dev, "Geni-I2C adaptor successfully added\n");
+ dev_dbg(dev, "Geni-I2C adaptor successfully added\n");
return 0;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 2d7dabe12723..748872a9b0fc 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -136,13 +136,8 @@
*/
#define TOUT_MIN 2
-/* I2C Frequency Modes */
-#define I2C_STANDARD_FREQ 100000
-#define I2C_FAST_MODE_FREQ 400000
-#define I2C_FAST_MODE_PLUS_FREQ 1000000
-
/* Default values. Use these if FW query fails */
-#define DEFAULT_CLK_FREQ I2C_STANDARD_FREQ
+#define DEFAULT_CLK_FREQ I2C_MAX_STANDARD_MODE_FREQ
#define DEFAULT_SRC_CLK 20000000
/*
@@ -1756,7 +1751,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
nodma:
/* We support frequencies up to FAST Mode Plus (1MHz) */
- if (!clk_freq || clk_freq > I2C_FAST_MODE_PLUS_FREQ) {
+ if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) {
dev_err(qup->dev, "clock frequency not supported %d\n",
clk_freq);
return -EINVAL;
@@ -1861,7 +1856,7 @@ nodma:
qup->in_fifo_sz = qup->in_blk_sz * (2 << size);
hs_div = 3;
- if (clk_freq <= I2C_STANDARD_FREQ) {
+ if (clk_freq <= I2C_MAX_STANDARD_MODE_FREQ) {
fs_div = ((src_clk_freq / clk_freq) / 2) - 3;
qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff);
} else {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 879f0e61a496..3b5397aa4ca6 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -235,17 +235,20 @@ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
return i2c_recover_bus(&priv->adap);
}
-static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timings *t)
+static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
{
u32 scgd, cdf, round, ick, sum, scl, cdf_width;
unsigned long rate;
struct device *dev = rcar_i2c_priv_to_dev(priv);
+ struct i2c_timings t = {
+ .bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ,
+ .scl_fall_ns = 35,
+ .scl_rise_ns = 200,
+ .scl_int_delay_ns = 50,
+ };
/* Fall back to previously used values if not supplied */
- t->bus_freq_hz = t->bus_freq_hz ?: 100000;
- t->scl_fall_ns = t->scl_fall_ns ?: 35;
- t->scl_rise_ns = t->scl_rise_ns ?: 200;
- t->scl_int_delay_ns = t->scl_int_delay_ns ?: 50;
+ i2c_parse_fw_timings(dev, &t, false);
switch (priv->devtype) {
case I2C_RCAR_GEN1:
@@ -291,7 +294,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
* = F[sum * ick / 1000000000]
* = F[(ick / 1000000) * sum / 1000]
*/
- sum = t->scl_fall_ns + t->scl_rise_ns + t->scl_int_delay_ns;
+ sum = t.scl_fall_ns + t.scl_rise_ns + t.scl_int_delay_ns;
round = (ick + 500000) / 1000000 * sum;
round = (round + 500) / 1000;
@@ -309,7 +312,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
*/
for (scgd = 0; scgd < 0x40; scgd++) {
scl = ick / (20 + (scgd * 8) + round);
- if (scl <= t->bus_freq_hz)
+ if (scl <= t.bus_freq_hz)
goto scgd_find;
}
dev_err(dev, "it is impossible to calculate best SCL\n");
@@ -317,7 +320,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
scgd_find:
dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
- scl, t->bus_freq_hz, rate, round, cdf, scgd);
+ scl, t.bus_freq_hz, rate, round, cdf, scgd);
/* keep icccr value */
priv->icccr = scgd << cdf_width | cdf;
@@ -920,7 +923,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct rcar_i2c_priv *priv;
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
- struct i2c_timings i2c_t;
int ret;
/* Otherwise logic will break because some bytes must always use PIO */
@@ -957,8 +959,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, priv);
strlcpy(adap->name, pdev->name, sizeof(adap->name));
- i2c_parse_fw_timings(dev, &i2c_t, false);
-
/* Init DMA */
sg_init_table(&priv->sg, 1);
priv->dma_direction = DMA_NONE;
@@ -967,7 +967,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
/* Activate device for clock calculation */
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ret = rcar_i2c_clock_calculate(priv, &i2c_t);
+ ret = rcar_i2c_clock_calculate(priv);
if (ret < 0)
goto out_pm_put;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 800414886f6b..4eccc0f69861 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -287,10 +287,10 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
pm_runtime_get_sync(riic->adapter.dev.parent);
- if (t->bus_freq_hz > 400000) {
+ if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) {
dev_err(&riic->adapter.dev,
- "unsupported bus speed (%dHz). 400000 max\n",
- t->bus_freq_hz);
+ "unsupported bus speed (%dHz). %d max\n",
+ t->bus_freq_hz, I2C_MAX_FAST_MODE_FREQ);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 1a33007b03e9..73272d4296bb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -539,9 +539,9 @@ out:
*/
static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
{
- if (speed <= 100000)
+ if (speed <= I2C_MAX_STANDARD_MODE_FREQ)
return &standard_mode_spec;
- else if (speed <= 400000)
+ else if (speed <= I2C_MAX_FAST_MODE_FREQ)
return &fast_mode_spec;
else
return &fast_mode_plus_spec;
@@ -578,8 +578,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
int ret = 0;
/* Only support standard-mode and fast-mode */
- if (WARN_ON(t->bus_freq_hz > 400000))
- t->bus_freq_hz = 400000;
+ if (WARN_ON(t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ))
+ t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
/* prevent scl_rate_khz from becoming 0 */
if (WARN_ON(t->bus_freq_hz < 1000))
@@ -758,8 +758,8 @@ static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate,
int ret = 0;
/* Support standard-mode, fast-mode and fast-mode plus */
- if (WARN_ON(t->bus_freq_hz > 1000000))
- t->bus_freq_hz = 1000000;
+ if (WARN_ON(t->bus_freq_hz > I2C_MAX_FAST_MODE_PLUS_FREQ))
+ t->bus_freq_hz = I2C_MAX_FAST_MODE_PLUS_FREQ;
/* prevent scl_rate_khz from becoming 0 */
if (WARN_ON(t->bus_freq_hz < 1000))
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c98ef4c4a0c9..5a5638e1daa1 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -835,11 +835,11 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
int freq;
i2c->clkrate = clkin;
- clkin /= 1000; /* clkin now in KHz */
+ clkin /= 1000; /* clkin now in KHz */
dev_dbg(i2c->dev, "pdata desired frequency %lu\n", pdata->frequency);
- target_frequency = pdata->frequency ? pdata->frequency : 100000;
+ target_frequency = pdata->frequency ?: I2C_MAX_STANDARD_MODE_FREQ;
target_frequency /= 1000; /* Target frequency now in KHz */
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 82b3b795e0bd..d83ca4028fa0 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -145,9 +145,6 @@ struct sh_mobile_dt_config {
#define IIC_FLAG_HAS_ICIC67 (1 << 0)
-#define STANDARD_MODE 100000
-#define FAST_MODE 400000
-
/* Register offsets */
#define ICDR 0x00
#define ICCR 0x04
@@ -270,11 +267,11 @@ static int sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
i2c_clk_khz = clk_get_rate(pd->clk) / 1000 / pd->clks_per_count;
- if (pd->bus_speed == STANDARD_MODE) {
+ if (pd->bus_speed == I2C_MAX_STANDARD_MODE_FREQ) {
tLOW = 47; /* tLOW = 4.7 us */
tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */
tf = 3; /* tf = 0.3 us */
- } else if (pd->bus_speed == FAST_MODE) {
+ } else if (pd->bus_speed == I2C_MAX_FAST_MODE_FREQ) {
tLOW = 13; /* tLOW = 1.3 us */
tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */
tf = 3; /* tf = 0.3 us */
@@ -851,7 +848,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
return PTR_ERR(pd->reg);
ret = of_property_read_u32(dev->dev.of_node, "clock-frequency", &bus_speed);
- pd->bus_speed = (ret || !bus_speed) ? STANDARD_MODE : bus_speed;
+ pd->bus_speed = (ret || !bus_speed) ? I2C_MAX_STANDARD_MODE_FREQ : bus_speed;
pd->clks_per_count = 1;
/* Newer variants come with two new bits in ICIC */
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index fb7a046b3226..a459e00c6851 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -62,7 +62,6 @@
#define SIRFSOC_I2C_STOP BIT(6)
#define SIRFSOC_I2C_START BIT(7)
-#define SIRFSOC_I2C_DEFAULT_SPEED 100000
#define SIRFSOC_I2C_ERR_NOACK 1
#define SIRFSOC_I2C_ERR_TIMEOUT 2
@@ -353,7 +352,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
err = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &bitrate);
if (err < 0)
- bitrate = SIRFSOC_I2C_DEFAULT_SPEED;
+ bitrate = I2C_MAX_STANDARD_MODE_FREQ;
/*
* Due to some hardware design issues, we need to tune the formula.
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index b432e7580458..123a42bfe3b1 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -337,9 +337,9 @@ static void sprd_i2c_set_clk(struct sprd_i2c *i2c_dev, u32 freq)
writel(div1, i2c_dev->base + ADDR_DVD1);
/* Start hold timing = hold time(us) * source clock */
- if (freq == 400000)
+ if (freq == I2C_MAX_FAST_MODE_FREQ)
writel((6 * apb_clk) / 10000000, i2c_dev->base + ADDR_STA0_DVD);
- else if (freq == 100000)
+ else if (freq == I2C_MAX_STANDARD_MODE_FREQ)
writel((4 * apb_clk) / 1000000, i2c_dev->base + ADDR_STA0_DVD);
}
@@ -502,7 +502,7 @@ static int sprd_i2c_probe(struct platform_device *pdev)
snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name),
"%s", "sprd-i2c");
- i2c_dev->bus_freq = 100000;
+ i2c_dev->bus_freq = I2C_MAX_STANDARD_MODE_FREQ;
i2c_dev->adap.owner = THIS_MODULE;
i2c_dev->dev = dev;
i2c_dev->adap.retries = 3;
@@ -516,7 +516,8 @@ static int sprd_i2c_probe(struct platform_device *pdev)
i2c_dev->bus_freq = prop;
/* We only support 100k and 400k now, otherwise will return error. */
- if (i2c_dev->bus_freq != 100000 && i2c_dev->bus_freq != 400000)
+ if (i2c_dev->bus_freq != I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_freq != I2C_MAX_FAST_MODE_FREQ)
return -EINVAL;
ret = sprd_i2c_clk_init(i2c_dev);
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index f7f7b5b64720..faa81a95551f 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -213,7 +213,7 @@ static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask)
*/
static struct st_i2c_timings i2c_timings[] = {
[I2C_MODE_STANDARD] = {
- .rate = 100000,
+ .rate = I2C_MAX_STANDARD_MODE_FREQ,
.rep_start_hold = 4400,
.rep_start_setup = 5170,
.start_hold = 4400,
@@ -222,7 +222,7 @@ static struct st_i2c_timings i2c_timings[] = {
.bus_free_time = 5170,
},
[I2C_MODE_FAST] = {
- .rate = 400000,
+ .rate = I2C_MAX_FAST_MODE_FREQ,
.rep_start_hold = 660,
.rep_start_setup = 660,
.start_hold = 660,
@@ -836,7 +836,7 @@ static int st_i2c_probe(struct platform_device *pdev)
i2c_dev->mode = I2C_MODE_STANDARD;
ret = of_property_read_u32(np, "clock-frequency", &clk_rate);
- if ((!ret) && (clk_rate == 400000))
+ if (!ret && (clk_rate == I2C_MAX_FAST_MODE_FREQ))
i2c_dev->mode = I2C_MODE_FAST;
i2c_dev->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index ba600d77a3f8..d6a69dfcac3f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -232,10 +232,10 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev)
* In standard mode:
* t_scl_high = t_scl_low = CCR * I2C parent clk period
* So to reach 100 kHz, we have:
- * CCR = I2C parent rate / 100 kHz >> 1
+ * CCR = I2C parent rate / (100 kHz * 2)
*
* For example with parent rate = 2 MHz:
- * CCR = 2000000 / (100000 << 1) = 10
+ * CCR = 2000000 / (100000 * 2) = 10
* t_scl_high = t_scl_low = 10 * (1 / 2000000) = 5000 ns
* t_scl_high + t_scl_low = 10000 ns so 100 kHz is reached
*
@@ -243,7 +243,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev)
* parent rate is not higher than 46 MHz . As a result val
* is at most 8 bits wide and so fits into the CCR bits [11:0].
*/
- val = i2c_dev->parent_rate / (100000 << 1);
+ val = i2c_dev->parent_rate / (I2C_MAX_STANDARD_MODE_FREQ * 2);
} else {
/*
* In fast mode, we compute CCR with duty = 0 as with low
@@ -263,7 +263,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev)
* parent rate is not higher than 46 MHz . As a result val
* is at most 6 bits wide and so fits into the CCR bits [11:0].
*/
- val = DIV_ROUND_UP(i2c_dev->parent_rate, 400000 * 3);
+ val = DIV_ROUND_UP(i2c_dev->parent_rate, I2C_MAX_FAST_MODE_FREQ * 3);
/* Select Fast mode */
ccr |= STM32F4_I2C_CCR_FS;
@@ -807,7 +807,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
ret = of_property_read_u32(np, "clock-frequency", &clk_rate);
- if (!ret && clk_rate >= 400000)
+ if (!ret && clk_rate >= I2C_MAX_FAST_MODE_FREQ)
i2c_dev->speed = STM32_I2C_SPEED_FAST;
i2c_dev->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 5c3e8ac6ad92..330ffed011e0 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -49,6 +50,7 @@
/* STM32F7 I2C control 1 */
#define STM32F7_I2C_CR1_PECEN BIT(23)
+#define STM32F7_I2C_CR1_WUPEN BIT(18)
#define STM32F7_I2C_CR1_SBC BIT(16)
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
@@ -174,7 +176,6 @@
* @cr2: Control register 2
* @oar1: Own address 1 register
* @oar2: Own address 2 register
- * @pecr: PEC register
* @tmgr: Timing register
*/
struct stm32f7_i2c_regs {
@@ -182,7 +183,6 @@ struct stm32f7_i2c_regs {
u32 cr2;
u32 oar1;
u32 oar2;
- u32 pecr;
u32 tmgr;
};
@@ -221,6 +221,7 @@ struct stm32f7_i2c_spec {
* @fall_time: Fall time (ns)
* @dnf: Digital filter coefficient (0-16)
* @analog_filter: Analog filter delay (On/Off)
+ * @fmp_clr_offset: Fast Mode Plus clear register offset from set register
*/
struct stm32f7_i2c_setup {
enum stm32_i2c_speed speed;
@@ -230,6 +231,7 @@ struct stm32f7_i2c_setup {
u32 fall_time;
u8 dnf;
bool analog_filter;
+ u32 fmp_clr_offset;
};
/**
@@ -301,6 +303,10 @@ struct stm32f7_i2c_msg {
* @dma: dma data
* @use_dma: boolean to know if dma is used in the current transfer
* @regmap: holds SYSCFG phandle for Fast Mode Plus bits
+ * @fmp_sreg: register address for setting Fast Mode Plus bits
+ * @fmp_creg: register address for clearing Fast Mode Plus bits
+ * @fmp_mask: mask for Fast Mode Plus bits in set register
+ * @wakeup_src: boolean to know if the device is a wakeup source
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
@@ -323,6 +329,10 @@ struct stm32f7_i2c_dev {
struct stm32_i2c_dma *dma;
bool use_dma;
struct regmap *regmap;
+ u32 fmp_sreg;
+ u32 fmp_creg;
+ u32 fmp_mask;
+ bool wakeup_src;
};
/*
@@ -334,9 +344,9 @@ struct stm32f7_i2c_dev {
*/
static struct stm32f7_i2c_spec i2c_specs[] = {
[STM32_I2C_SPEED_STANDARD] = {
- .rate = 100000,
- .rate_min = 80000,
- .rate_max = 100000,
+ .rate = I2C_MAX_STANDARD_MODE_FREQ,
+ .rate_min = I2C_MAX_STANDARD_MODE_FREQ * 8 / 10, /* 80% */
+ .rate_max = I2C_MAX_STANDARD_MODE_FREQ,
.fall_max = 300,
.rise_max = 1000,
.hddat_min = 0,
@@ -346,9 +356,9 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
.h_min = 4000,
},
[STM32_I2C_SPEED_FAST] = {
- .rate = 400000,
- .rate_min = 320000,
- .rate_max = 400000,
+ .rate = I2C_MAX_FAST_MODE_FREQ,
+ .rate_min = I2C_MAX_FAST_MODE_FREQ * 8 / 10, /* 80% */
+ .rate_max = I2C_MAX_FAST_MODE_FREQ,
.fall_max = 300,
.rise_max = 300,
.hddat_min = 0,
@@ -358,9 +368,9 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
.h_min = 600,
},
[STM32_I2C_SPEED_FAST_PLUS] = {
- .rate = 1000000,
- .rate_min = 800000,
- .rate_max = 1000000,
+ .rate = I2C_MAX_FAST_MODE_PLUS_FREQ,
+ .rate_min = I2C_MAX_FAST_MODE_PLUS_FREQ * 8 / 10, /* 80% */
+ .rate_max = I2C_MAX_FAST_MODE_PLUS_FREQ,
.fall_max = 100,
.rise_max = 120,
.hddat_min = 0,
@@ -378,6 +388,14 @@ static const struct stm32f7_i2c_setup stm32f7_setup = {
.analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
};
+static const struct stm32f7_i2c_setup stm32mp15_setup = {
+ .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
+ .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
+ .dnf = STM32F7_I2C_DNF_DEFAULT,
+ .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
+ .fmp_clr_offset = 0x40,
+};
+
static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask)
{
writel_relaxed(readl_relaxed(reg) | mask, reg);
@@ -592,8 +610,25 @@ exit:
static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
struct stm32f7_i2c_setup *setup)
{
+ struct i2c_timings timings, *t = &timings;
int ret = 0;
+ t->bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ t->scl_rise_ns = i2c_dev->setup.rise_time;
+ t->scl_fall_ns = i2c_dev->setup.fall_time;
+
+ i2c_parse_fw_timings(i2c_dev->dev, t, false);
+
+ if (t->bus_freq_hz >= I2C_MAX_FAST_MODE_PLUS_FREQ)
+ i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS;
+ else if (t->bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ)
+ i2c_dev->speed = STM32_I2C_SPEED_FAST;
+ else
+ i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
+
+ i2c_dev->setup.rise_time = t->scl_rise_ns;
+ i2c_dev->setup.fall_time = t->scl_fall_ns;
+
setup->speed = i2c_dev->speed;
setup->speed_freq = i2c_specs[setup->speed].rate;
setup->clock_src = clk_get_rate(i2c_dev->clk);
@@ -1691,6 +1726,24 @@ pm_free:
return ret;
}
+static void stm32f7_i2c_enable_wakeup(struct stm32f7_i2c_dev *i2c_dev,
+ bool enable)
+{
+ void __iomem *base = i2c_dev->base;
+ u32 mask = STM32F7_I2C_CR1_WUPEN;
+
+ if (!i2c_dev->wakeup_src)
+ return;
+
+ if (enable) {
+ device_set_wakeup_enable(i2c_dev->dev, true);
+ stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
+ } else {
+ device_set_wakeup_enable(i2c_dev->dev, false);
+ stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1, mask);
+ }
+}
+
static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(slave->adapter);
@@ -1717,6 +1770,9 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
if (ret < 0)
return ret;
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev))
+ stm32f7_i2c_enable_wakeup(i2c_dev, true);
+
if (id == 0) {
/* Configure Own Address 1 */
oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1);
@@ -1758,6 +1814,9 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
ret = 0;
pm_free:
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev))
+ stm32f7_i2c_enable_wakeup(i2c_dev, false);
+
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -1791,8 +1850,10 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
i2c_dev->slave[id] = NULL;
- if (!(stm32f7_i2c_is_slave_registered(i2c_dev)))
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev)) {
stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK);
+ stm32f7_i2c_enable_wakeup(i2c_dev, false);
+ }
pm_runtime_mark_last_busy(i2c_dev->dev);
pm_runtime_put_autosuspend(i2c_dev->dev);
@@ -1800,28 +1861,51 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
return 0;
}
+static int stm32f7_i2c_write_fm_plus_bits(struct stm32f7_i2c_dev *i2c_dev,
+ bool enable)
+{
+ int ret;
+
+ if (i2c_dev->speed != STM32_I2C_SPEED_FAST_PLUS ||
+ IS_ERR_OR_NULL(i2c_dev->regmap))
+ /* Optional */
+ return 0;
+
+ if (i2c_dev->fmp_sreg == i2c_dev->fmp_creg)
+ ret = regmap_update_bits(i2c_dev->regmap,
+ i2c_dev->fmp_sreg,
+ i2c_dev->fmp_mask,
+ enable ? i2c_dev->fmp_mask : 0);
+ else
+ ret = regmap_write(i2c_dev->regmap,
+ enable ? i2c_dev->fmp_sreg :
+ i2c_dev->fmp_creg,
+ i2c_dev->fmp_mask);
+
+ return ret;
+}
+
static int stm32f7_i2c_setup_fm_plus_bits(struct platform_device *pdev,
struct stm32f7_i2c_dev *i2c_dev)
{
struct device_node *np = pdev->dev.of_node;
int ret;
- u32 reg, mask;
i2c_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg-fmp");
- if (IS_ERR(i2c_dev->regmap)) {
+ if (IS_ERR(i2c_dev->regmap))
/* Optional */
return 0;
- }
- ret = of_property_read_u32_index(np, "st,syscfg-fmp", 1, &reg);
+ ret = of_property_read_u32_index(np, "st,syscfg-fmp", 1,
+ &i2c_dev->fmp_sreg);
if (ret)
return ret;
- ret = of_property_read_u32_index(np, "st,syscfg-fmp", 2, &mask);
- if (ret)
- return ret;
+ i2c_dev->fmp_creg = i2c_dev->fmp_sreg +
+ i2c_dev->setup.fmp_clr_offset;
- return regmap_update_bits(i2c_dev->regmap, reg, mask, mask);
+ return of_property_read_u32_index(np, "st,syscfg-fmp", 2,
+ &i2c_dev->fmp_mask);
}
static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
@@ -1847,7 +1931,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
struct stm32f7_i2c_dev *i2c_dev;
const struct stm32f7_i2c_setup *setup;
struct resource *res;
- u32 clk_rate, rise_time, fall_time;
struct i2c_adapter *adap;
struct reset_control *rst;
dma_addr_t phy_addr;
@@ -1879,6 +1962,9 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return irq_error ? : -ENOENT;
}
+ i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
+ "wakeup-source");
+
i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_dev->clk)) {
dev_err(&pdev->dev, "Error: Missing controller clock\n");
@@ -1891,20 +1977,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return ret;
}
- i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
- ret = device_property_read_u32(&pdev->dev, "clock-frequency",
- &clk_rate);
- if (!ret && clk_rate >= 1000000) {
- i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS;
- ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
- if (ret)
- goto clk_free;
- } else if (!ret && clk_rate >= 400000) {
- i2c_dev->speed = STM32_I2C_SPEED_FAST;
- } else if (!ret && clk_rate >= 100000) {
- i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
- }
-
rst = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(rst)) {
dev_err(&pdev->dev, "Error: Missing controller reset\n");
@@ -1944,20 +2016,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
i2c_dev->setup = *setup;
- ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
- &rise_time);
- if (!ret)
- i2c_dev->setup.rise_time = rise_time;
-
- ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
- &fall_time);
- if (!ret)
- i2c_dev->setup.fall_time = fall_time;
-
ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
if (ret)
goto clk_free;
+ if (i2c_dev->speed == STM32_I2C_SPEED_FAST_PLUS) {
+ ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
+ if (ret)
+ goto clk_free;
+ ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
+ if (ret)
+ goto clk_free;
+ }
+
adap = &i2c_dev->adap;
i2c_set_adapdata(adap, i2c_dev);
snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)",
@@ -1982,7 +2053,17 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"Failed to request dma error %i\n", ret);
- goto clk_free;
+ goto fmp_clear;
+ }
+
+ if (i2c_dev->wakeup_src) {
+ device_set_wakeup_capable(i2c_dev->dev, true);
+
+ ret = dev_pm_set_wake_irq(i2c_dev->dev, irq_event);
+ if (ret) {
+ dev_err(i2c_dev->dev, "Failed to set wake up irq\n");
+ goto clr_wakeup_capable;
+ }
}
platform_set_drvdata(pdev, i2c_dev);
@@ -2014,11 +2095,21 @@ pm_disable:
pm_runtime_set_suspended(i2c_dev->dev);
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+ if (i2c_dev->wakeup_src)
+ dev_pm_clear_wake_irq(i2c_dev->dev);
+
+clr_wakeup_capable:
+ if (i2c_dev->wakeup_src)
+ device_set_wakeup_capable(i2c_dev->dev, false);
+
if (i2c_dev->dma) {
stm32_i2c_dma_free(i2c_dev->dma);
i2c_dev->dma = NULL;
}
+fmp_clear:
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
+
clk_free:
clk_disable_unprepare(i2c_dev->clk);
@@ -2032,6 +2123,15 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c_dev->adap);
pm_runtime_get_sync(i2c_dev->dev);
+ if (i2c_dev->wakeup_src) {
+ dev_pm_clear_wake_irq(i2c_dev->dev);
+ /*
+ * enforce that wakeup is disabled and that the device
+ * is marked as non wakeup capable
+ */
+ device_init_wakeup(i2c_dev->dev, false);
+ }
+
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
pm_runtime_set_suspended(i2c_dev->dev);
@@ -2042,6 +2142,8 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
i2c_dev->dma = NULL;
}
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
+
clk_disable_unprepare(i2c_dev->clk);
return 0;
@@ -2073,8 +2175,8 @@ static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused
-stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+#ifdef CONFIG_PM_SLEEP
+static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
{
int ret;
struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
@@ -2087,16 +2189,15 @@ stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
backup_regs->cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2);
backup_regs->oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1);
backup_regs->oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2);
- backup_regs->pecr = readl_relaxed(i2c_dev->base + STM32F7_I2C_PECR);
backup_regs->tmgr = readl_relaxed(i2c_dev->base + STM32F7_I2C_TIMINGR);
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
pm_runtime_put_sync(i2c_dev->dev);
return ret;
}
-static int __maybe_unused
-stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
{
u32 cr1;
int ret;
@@ -2120,48 +2221,55 @@ stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
writel_relaxed(backup_regs->cr2, i2c_dev->base + STM32F7_I2C_CR2);
writel_relaxed(backup_regs->oar1, i2c_dev->base + STM32F7_I2C_OAR1);
writel_relaxed(backup_regs->oar2, i2c_dev->base + STM32F7_I2C_OAR2);
- writel_relaxed(backup_regs->pecr, i2c_dev->base + STM32F7_I2C_PECR);
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
pm_runtime_put_sync(i2c_dev->dev);
return ret;
}
-static int __maybe_unused stm32f7_i2c_suspend(struct device *dev)
+static int stm32f7_i2c_suspend(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
i2c_mark_adapter_suspended(&i2c_dev->adap);
- ret = stm32f7_i2c_regs_backup(i2c_dev);
- if (ret < 0) {
- i2c_mark_adapter_resumed(&i2c_dev->adap);
- return ret;
- }
- pinctrl_pm_select_sleep_state(dev);
- pm_runtime_force_suspend(dev);
+ if (!device_may_wakeup(dev) && !dev->power.wakeup_path) {
+ ret = stm32f7_i2c_regs_backup(i2c_dev);
+ if (ret < 0) {
+ i2c_mark_adapter_resumed(&i2c_dev->adap);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
+ pm_runtime_force_suspend(dev);
+ }
return 0;
}
-static int __maybe_unused stm32f7_i2c_resume(struct device *dev)
+static int stm32f7_i2c_resume(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
- ret = pm_runtime_force_resume(dev);
- if (ret < 0)
- return ret;
- pinctrl_pm_select_default_state(dev);
+ if (!device_may_wakeup(dev) && !dev->power.wakeup_path) {
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+ pinctrl_pm_select_default_state(dev);
+
+ ret = stm32f7_i2c_regs_restore(i2c_dev);
+ if (ret < 0)
+ return ret;
+ }
- ret = stm32f7_i2c_regs_restore(i2c_dev);
- if (ret < 0)
- return ret;
i2c_mark_adapter_resumed(&i2c_dev->adap);
return 0;
}
+#endif
static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend,
@@ -2171,6 +2279,7 @@ static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
static const struct of_device_id stm32f7_i2c_match[] = {
{ .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup},
+ { .compatible = "st,stm32mp15-i2c", .data = &stm32mp15_setup},
{},
};
MODULE_DEVICE_TABLE(of, stm32f7_i2c_match);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 42e0a53e7fa4..ba6b60caa45e 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -132,7 +132,7 @@ enum stu300_error {
#define NUM_ADDR_RESEND_ATTEMPTS 12
/* I2C clock speed, in Hz 0-400kHz*/
-static unsigned int scl_frequency = 100000;
+static unsigned int scl_frequency = I2C_MAX_STANDARD_MODE_FREQ;
module_param(scl_frequency, uint, 0644);
/**
@@ -497,7 +497,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
dev_dbg(&dev->pdev->dev, "Clock rate %lu Hz, I2C bus speed %d Hz "
"virtbase %p\n", clkrate, dev->speed, dev->virtbase);
- if (dev->speed > 100000)
+ if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ)
/* Fast Mode I2C */
val = ((clkrate/dev->speed) - 9)/3 + 1;
else
@@ -518,7 +518,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
return -EINVAL;
}
- if (dev->speed > 100000) {
+ if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ) {
/* CC6..CC0 */
stu300_wr8((val & I2C_CCR_CC_MASK) | I2C_CCR_FMSM,
dev->virtbase + I2C_CCR);
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 7c07ce116e38..e5293f0b3318 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -186,7 +186,7 @@ static int p2wi_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *childnp;
unsigned long parent_clk_freq;
- u32 clk_freq = 100000;
+ u32 clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
struct resource *r;
struct p2wi *p2wi;
u32 slave_addr;
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 86026798b4f7..9099d0a67ace 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -67,10 +67,10 @@
/* STANDARD MODE frequency */
#define SYNQUACER_I2C_CLK_MASTER_STD(rate) \
- DIV_ROUND_UP(DIV_ROUND_UP((rate), 100000) - 2, 2)
+ DIV_ROUND_UP(DIV_ROUND_UP((rate), I2C_MAX_STANDARD_MODE_FREQ) - 2, 2)
/* FAST MODE frequency */
#define SYNQUACER_I2C_CLK_MASTER_FAST(rate) \
- DIV_ROUND_UP((DIV_ROUND_UP((rate), 400000) - 2) * 2, 3)
+ DIV_ROUND_UP((DIV_ROUND_UP((rate), I2C_MAX_FAST_MODE_FREQ) - 2) * 2, 3)
/* (clkrate <= 18000000) */
/* calculate the value of CS bits in CCR register on standard mode */
@@ -602,7 +602,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
i2c->adapter.nr = pdev->id;
init_completion(&i2c->completion);
- if (bus_speed < 400000)
+ if (bus_speed < I2C_MAX_FAST_MODE_FREQ)
i2c->speed_khz = SYNQUACER_I2C_SPEED_SM;
else
i2c->speed_khz = SYNQUACER_I2C_SPEED_FM;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index cbc2ad49043e..4c4d17ddc96b 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -123,10 +123,6 @@
#define I2C_THIGH_SHIFT 8
#define I2C_INTERFACE_TIMING_1 0x98
-#define I2C_STANDARD_MODE 100000
-#define I2C_FAST_MODE 400000
-#define I2C_FAST_PLUS_MODE 1000000
-
/* Packet header size in bytes */
#define I2C_PACKET_HEADER_SIZE 12
@@ -737,8 +733,8 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT;
i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
- if (i2c_dev->bus_clk_rate > I2C_STANDARD_MODE &&
- i2c_dev->bus_clk_rate <= I2C_FAST_PLUS_MODE) {
+ if (i2c_dev->bus_clk_rate > I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_PLUS_FREQ) {
tlow = i2c_dev->hw->tlow_fast_fastplus_mode;
thigh = i2c_dev->hw->thigh_fast_fastplus_mode;
tsu_thd = i2c_dev->hw->setup_hold_time_fast_fast_plus_mode;
@@ -1341,7 +1337,7 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
ret = of_property_read_u32(np, "clock-frequency",
&i2c_dev->bus_clk_rate);
if (ret)
- i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+ i2c_dev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
multi_mode = of_property_read_bool(np, "multi-master");
i2c_dev->is_multimaster_mode = multi_mode;
@@ -1640,12 +1636,12 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
}
- if (i2c_dev->bus_clk_rate > I2C_FAST_MODE &&
- i2c_dev->bus_clk_rate <= I2C_FAST_PLUS_MODE)
+ if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ &&
+ i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_PLUS_FREQ)
i2c_dev->clk_divisor_non_hs_mode =
i2c_dev->hw->clk_divisor_fast_plus_mode;
- else if (i2c_dev->bus_clk_rate > I2C_STANDARD_MODE &&
- i2c_dev->bus_clk_rate <= I2C_FAST_MODE)
+ else if (i2c_dev->bus_clk_rate > I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_FREQ)
i2c_dev->clk_divisor_non_hs_mode =
i2c_dev->hw->clk_divisor_fast_mode;
else
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 19f8eec38717..12c90aa0900e 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -118,6 +118,8 @@ static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk)
static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
struct device_node *node)
{
+ struct i2c_client *ara;
+
if (!node)
return -EINVAL;
@@ -125,9 +127,12 @@ static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
if (!i2c->alert_data.irq)
return -EINVAL;
- i2c->ara = i2c_setup_smbus_alert(&i2c->adap, &i2c->alert_data);
- if (!i2c->ara)
- return -ENODEV;
+ ara = i2c_new_smbus_alert_device(&i2c->adap, &i2c->alert_data);
+ if (IS_ERR(ara))
+ return PTR_ERR(ara);
+
+ i2c->ara = ara;
+
return 0;
}
@@ -178,7 +183,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
thunder_i2c_clock_enable(dev, i2c);
ret = device_property_read_u32(dev, "clock-frequency", &i2c->twsi_freq);
if (ret)
- i2c->twsi_freq = 100000;
+ i2c->twsi_freq = I2C_MAX_STANDARD_MODE_FREQ;
init_waitqueue_head(&i2c->queue);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 4241aac79e7e..2b258d54d68c 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -73,8 +73,6 @@
#define UNIPHIER_FI2C_BYTE_WISE BIT(3)
#define UNIPHIER_FI2C_DEFER_STOP_COMP BIT(4)
-#define UNIPHIER_FI2C_DEFAULT_SPEED 100000
-#define UNIPHIER_FI2C_MAX_SPEED 400000
#define UNIPHIER_FI2C_FIFO_SIZE 8
struct uniphier_fi2c_priv {
@@ -537,9 +535,9 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
}
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > UNIPHIER_FI2C_MAX_SPEED) {
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 0270090c0360..668b1fa2b0ef 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -35,9 +35,6 @@
#define UNIPHIER_I2C_NOISE 0x1c /* noise filter control */
#define UNIPHIER_I2C_SETUP 0x20 /* setup time control */
-#define UNIPHIER_I2C_DEFAULT_SPEED 100000
-#define UNIPHIER_I2C_MAX_SPEED 400000
-
struct uniphier_i2c_priv {
struct completion comp;
struct i2c_adapter adap;
@@ -333,9 +330,9 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
}
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > UNIPHIER_I2C_MAX_SPEED) {
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 524017f7034e..88f5aafdce5b 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -399,7 +399,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
i2c_dev->mode = I2C_MODE_STANDARD;
err = of_property_read_u32(np, "clock-frequency", &clk_rate);
- if ((!err) && (clk_rate == 400000))
+ if (!err && (clk_rate == I2C_MAX_FAST_MODE_FREQ))
i2c_dev->mode = I2C_MODE_FAST;
i2c_dev->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 8a873975cf12..391c878a7cdc 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -71,8 +71,6 @@
#define XLP9XX_I2C_SLAVEADDR_ADDR_SHIFT 1
#define XLP9XX_I2C_IP_CLK_FREQ 133000000UL
-#define XLP9XX_I2C_DEFAULT_FREQ 100000
-#define XLP9XX_I2C_HIGH_FREQ 400000
#define XLP9XX_I2C_FIFO_SIZE 0x80U
#define XLP9XX_I2C_TIMEOUT_MS 1000
#define XLP9XX_I2C_BUSY_TIMEOUT 50
@@ -476,12 +474,12 @@ static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
err = device_property_read_u32(&pdev->dev, "clock-frequency", &freq);
if (err) {
- freq = XLP9XX_I2C_DEFAULT_FREQ;
+ freq = I2C_MAX_STANDARD_MODE_FREQ;
dev_dbg(&pdev->dev, "using default frequency %u\n", freq);
- } else if (freq == 0 || freq > XLP9XX_I2C_HIGH_FREQ) {
+ } else if (freq == 0 || freq > I2C_MAX_FAST_MODE_FREQ) {
dev_warn(&pdev->dev, "invalid frequency %u, using default\n",
freq);
- freq = XLP9XX_I2C_DEFAULT_FREQ;
+ freq = I2C_MAX_STANDARD_MODE_FREQ;
}
priv->clk_hz = freq;
@@ -491,12 +489,16 @@ static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
static int xlp9xx_i2c_smbus_setup(struct xlp9xx_i2c_dev *priv,
struct platform_device *pdev)
{
+ struct i2c_client *ara;
+
if (!priv->alert_data.irq)
return -EINVAL;
- priv->ara = i2c_setup_smbus_alert(&priv->adapter, &priv->alert_data);
- if (!priv->ara)
- return -ENODEV;
+ ara = i2c_new_smbus_alert_device(&priv->adapter, &priv->alert_data);
+ if (IS_ERR(ara))
+ return PTR_ERR(ara);
+
+ priv->ara = ara;
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 34cd4b308540..282f161a8b08 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -404,7 +404,7 @@ static int xlr_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&busfreq))
- busfreq = 100000;
+ busfreq = I2C_MAX_STANDARD_MODE_FREQ;
clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(clk)) {
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 8b0ff780919b..c8f42f2037cb 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -318,7 +318,7 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
lookup->min_speed = lookup->speed;
if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
- lookup->force_speed = 400000;
+ lookup->force_speed = I2C_MAX_FAST_MODE_FREQ;
return AE_OK;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index cefad0881942..1f1442dfcad7 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -7,7 +7,7 @@
* Mux support by Rodolfo Giometti <giometti@enneenne.com> and
* Michael Lawnick <michael.lawnick.ext@nsn.com>
*
- * Copyright (C) 2013-2017 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013-2017 Wolfram Sang <wsa@kernel.org>
*/
#define pr_fmt(fmt) "i2c-core: " fmt
@@ -338,8 +338,10 @@ static int i2c_device_probe(struct device *dev)
} else if (ACPI_COMPANION(dev)) {
irq = i2c_acpi_get_irq(client);
}
- if (irq == -EPROBE_DEFER)
- return irq;
+ if (irq == -EPROBE_DEFER) {
+ status = irq;
+ goto put_sync_adapter;
+ }
if (irq < 0)
irq = 0;
@@ -353,15 +355,19 @@ static int i2c_device_probe(struct device *dev)
*/
if (!driver->id_table &&
!i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
- !i2c_of_match_device(dev->driver->of_match_table, client))
- return -ENODEV;
+ !i2c_of_match_device(dev->driver->of_match_table, client)) {
+ status = -ENODEV;
+ goto put_sync_adapter;
+ }
if (client->flags & I2C_CLIENT_WAKE) {
int wakeirq;
wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
- if (wakeirq == -EPROBE_DEFER)
- return wakeirq;
+ if (wakeirq == -EPROBE_DEFER) {
+ status = wakeirq;
+ goto put_sync_adapter;
+ }
device_init_wakeup(&client->dev, true);
@@ -408,6 +414,10 @@ err_detach_pm_domain:
err_clear_wakeup_irq:
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
+put_sync_adapter:
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+ pm_runtime_put_sync(&client->adapter->dev);
+
return status;
}
@@ -1593,32 +1603,30 @@ EXPORT_SYMBOL(i2c_del_adapter);
* @dev: The device to scan for I2C timing properties
* @t: the i2c_timings struct to be filled with values
* @use_defaults: bool to use sane defaults derived from the I2C specification
- * when properties are not found, otherwise use 0
+ * when properties are not found, otherwise don't update
*
* Scan the device for the generic I2C properties describing timing parameters
* for the signal and fill the given struct with the results. If a property was
* not found and use_defaults was true, then maximum timings are assumed which
* are derived from the I2C specification. If use_defaults is not used, the
- * results will be 0, so drivers can apply their own defaults later. The latter
- * is mainly intended for avoiding regressions of existing drivers which want
- * to switch to this function. New drivers almost always should use the defaults.
+ * results will be as before, so drivers can apply their own defaults before
+ * calling this helper. The latter is mainly intended for avoiding regressions
+ * of existing drivers which want to switch to this function. New drivers
+ * almost always should use the defaults.
*/
-
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults)
{
int ret;
- memset(t, 0, sizeof(*t));
-
ret = device_property_read_u32(dev, "clock-frequency", &t->bus_freq_hz);
if (ret && use_defaults)
- t->bus_freq_hz = 100000;
+ t->bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
ret = device_property_read_u32(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns);
if (ret && use_defaults) {
- if (t->bus_freq_hz <= 100000)
+ if (t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ)
t->scl_rise_ns = 1000;
- else if (t->bus_freq_hz <= 400000)
+ else if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ)
t->scl_rise_ns = 300;
else
t->scl_rise_ns = 120;
@@ -1626,25 +1634,31 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de
ret = device_property_read_u32(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns);
if (ret && use_defaults) {
- if (t->bus_freq_hz <= 400000)
+ if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ)
t->scl_fall_ns = 300;
else
t->scl_fall_ns = 120;
}
- device_property_read_u32(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns);
+ ret = device_property_read_u32(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns);
+ if (ret && use_defaults)
+ t->scl_int_delay_ns = 0;
ret = device_property_read_u32(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns);
if (ret && use_defaults)
t->sda_fall_ns = t->scl_fall_ns;
- device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
+ ret = device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
+ if (ret && use_defaults)
+ t->sda_hold_ns = 0;
- device_property_read_u32(dev, "i2c-digital-filter-width-ns",
- &t->digital_filter_width_ns);
+ ret = device_property_read_u32(dev, "i2c-digital-filter-width-ns", &t->digital_filter_width_ns);
+ if (ret && use_defaults)
+ t->digital_filter_width_ns = 0;
- device_property_read_u32(dev, "i2c-analog-filter-cutoff-frequency",
- &t->analog_filter_cutoff_freq_hz);
+ ret = device_property_read_u32(dev, "i2c-analog-filter-cutoff-frequency", &t->analog_filter_cutoff_freq_hz);
+ if (ret && use_defaults)
+ t->analog_filter_cutoff_freq_hz = 0;
}
EXPORT_SYMBOL_GPL(i2c_parse_fw_timings);
@@ -2269,19 +2283,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_scanned_device);
-struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *adap, unsigned short addr))
-{
- struct i2c_client *client;
-
- client = i2c_new_scanned_device(adap, info, addr_list, probe);
- return IS_ERR(client) ? NULL : client;
-}
-EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-
struct i2c_adapter *i2c_get_adapter(int nr)
{
struct i2c_adapter *adapter;
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 6787c1f71483..3ed74aa4b44b 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -5,7 +5,7 @@
* Copyright (C) 2008 Jochen Friedrich <jochen@scram.de>
* based on a previous patch from Jon Smirl <jonsmirl@gmail.com>
*
- * Copyright (C) 2013, 2018 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013, 2018 Wolfram Sang <wsa@kernel.org>
*/
#include <dt-bindings/i2c/i2c.h>
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 3ac426a8ab5a..b34d2ff06931 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -666,7 +666,7 @@ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
/**
- * i2c_setup_smbus_alert - Setup SMBus alert support
+ * i2c_new_smbus_alert_device - get ara client for SMBus alert support
* @adapter: the target adapter
* @setup: setup data for the SMBus alert handler
* Context: can sleep
@@ -676,31 +676,25 @@ EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
* Handling can be done either through our IRQ handler, or by the
* adapter (from its handler, periodic polling, or whatever).
*
- * NOTE that if we manage the IRQ, we *MUST* know if it's level or
- * edge triggered in order to hand it to the workqueue correctly.
- * If triggering the alert seems to wedge the system, you probably
- * should have said it's level triggered.
- *
* This returns the ara client, which should be saved for later use with
- * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
- * to indicate an error.
+ * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or an
+ * ERRPTR to indicate an error.
*/
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup)
+struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup)
{
struct i2c_board_info ara_board_info = {
I2C_BOARD_INFO("smbus_alert", 0x0c),
.platform_data = setup,
};
- return i2c_new_device(adapter, &ara_board_info);
+ return i2c_new_client_device(adapter, &ara_board_info);
}
-EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
+EXPORT_SYMBOL_GPL(i2c_new_smbus_alert_device);
#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter)
{
- struct i2c_client *client;
int irq;
irq = of_property_match_string(adapter->dev.of_node, "interrupt-names",
@@ -710,11 +704,7 @@ int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter)
else if (irq < 0)
return irq;
- client = i2c_setup_smbus_alert(adapter, NULL);
- if (!client)
- return -ENODEV;
-
- return 0;
+ return PTR_ERR_OR_ZERO(i2c_new_smbus_alert_device(adapter, NULL));
}
EXPORT_SYMBOL_GPL(of_i2c_setup_smbus_alert);
#endif
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 2ea4585d18c5..da020acc9bbd 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -15,6 +15,7 @@
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
#include <linux/cdev.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/i2c-dev.h>
@@ -27,7 +28,6 @@
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/compat.h>
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
@@ -40,7 +40,7 @@
struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
- struct device *dev;
+ struct device dev;
struct cdev cdev;
};
@@ -84,12 +84,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
return i2c_dev;
}
-static void put_i2c_dev(struct i2c_dev *i2c_dev)
+static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
spin_unlock(&i2c_dev_list_lock);
- kfree(i2c_dev);
+ if (del_cdev)
+ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev);
+ put_device(&i2c_dev->dev);
}
static ssize_t name_show(struct device *dev,
@@ -628,6 +630,14 @@ static const struct file_operations i2cdev_fops = {
static struct class *i2c_dev_class;
+static void i2cdev_dev_release(struct device *dev)
+{
+ struct i2c_dev *i2c_dev;
+
+ i2c_dev = container_of(dev, struct i2c_dev, dev);
+ kfree(i2c_dev);
+}
+
static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
@@ -644,27 +654,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
cdev_init(&i2c_dev->cdev, &i2cdev_fops);
i2c_dev->cdev.owner = THIS_MODULE;
- res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
- if (res)
- goto error_cdev;
-
- /* register this i2c device with the driver core */
- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
- MKDEV(I2C_MAJOR, adap->nr), NULL,
- "i2c-%d", adap->nr);
- if (IS_ERR(i2c_dev->dev)) {
- res = PTR_ERR(i2c_dev->dev);
- goto error;
+
+ device_initialize(&i2c_dev->dev);
+ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
+ i2c_dev->dev.class = i2c_dev_class;
+ i2c_dev->dev.parent = &adap->dev;
+ i2c_dev->dev.release = i2cdev_dev_release;
+ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
+ if (res) {
+ put_i2c_dev(i2c_dev, false);
+ return res;
}
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
-error:
- cdev_del(&i2c_dev->cdev);
-error_cdev:
- put_i2c_dev(i2c_dev);
- return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
@@ -680,9 +686,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- cdev_del(&i2c_dev->cdev);
- put_i2c_dev(i2c_dev);
- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
+ put_i2c_dev(i2c_dev, true);
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
return 0;
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index db9763cb4dae..cb415b10642f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -96,7 +96,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ eeprom = dev_get_drvdata(kobj_to_dev(kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
memcpy(buf, &eeprom->buffer[off], count);
@@ -111,7 +111,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ eeprom = dev_get_drvdata(kobj_to_dev(kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
memcpy(&eeprom->buffer[off], buf, count);
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 7e2f5d0eacdb..809bcf8387d0 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -184,7 +184,7 @@ static struct i2c_driver smbalert_driver = {
* corresponding I2C device driver's alert function.
*
* It is assumed that ara is a valid i2c client previously returned by
- * i2c_setup_smbus_alert().
+ * i2c_new_smbus_alert_device().
*/
int i2c_handle_smbus_alert(struct i2c_client *ara)
{
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 0e16490eb3a1..5365199a31f4 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -272,6 +272,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
err_rollback_available:
device_remove_file(&pdev->dev, &dev_attr_available_masters);
err_rollback:
+ i2c_demux_deactivate_master(priv);
for (j = 0; j < i; j++) {
of_node_put(priv->chan[j].parent_np);
of_changeset_destroy(&priv->chan[j].chgset);
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index 9e2e1406f85e..bb8e60dff988 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -213,40 +213,34 @@ i3c_device_match_id(struct i3c_device *i3cdev,
{
struct i3c_device_info devinfo;
const struct i3c_device_id *id;
+ u16 manuf, part, ext_info;
+ bool rndpid;
i3c_device_get_info(i3cdev, &devinfo);
- /*
- * The lower 32bits of the provisional ID is just filled with a random
- * value, try to match using DCR info.
- */
- if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
- u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
- u16 part = I3C_PID_PART_ID(devinfo.pid);
- u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
-
- /* First try to match by manufacturer/part ID. */
- for (id = id_table; id->match_flags != 0; id++) {
- if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
- I3C_MATCH_MANUF_AND_PART)
- continue;
-
- if (manuf != id->manuf_id || part != id->part_id)
- continue;
-
- if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
- ext_info != id->extra_info)
- continue;
-
- return id;
- }
- }
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+ rndpid = I3C_PID_RND_LOWER_32BITS(devinfo.pid);
- /* Fallback to DCR match. */
for (id = id_table; id->match_flags != 0; id++) {
if ((id->match_flags & I3C_MATCH_DCR) &&
- id->dcr == devinfo.dcr)
- return id;
+ id->dcr != devinfo.dcr)
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_MANUF) &&
+ id->manuf_id != manuf)
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_PART) &&
+ (rndpid || id->part_id != part))
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+ (rndpid || id->extra_info != ext_info))
+ continue;
+
+ return id;
}
return NULL;
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 7f8f896fa0c3..d79cd6d54b3a 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -241,12 +241,34 @@ out:
}
static DEVICE_ATTR_RO(hdrcap);
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i3c_device *i3c = dev_to_i3cdev(dev);
+ struct i3c_device_info devinfo;
+ u16 manuf, part, ext;
+
+ i3c_device_get_info(i3c, &devinfo);
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+ return sprintf(buf, "i3c:dcr%02Xmanuf%04X", devinfo.dcr,
+ manuf);
+
+ return sprintf(buf, "i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
+ devinfo.dcr, manuf, part, ext);
+}
+static DEVICE_ATTR_RO(modalias);
+
static struct attribute *i3c_device_attrs[] = {
&dev_attr_bcr.attr,
&dev_attr_dcr.attr,
&dev_attr_pid.attr,
&dev_attr_dynamic_address.attr,
&dev_attr_hdrcap.attr,
+ &dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(i3c_device);
@@ -267,7 +289,7 @@ static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
devinfo.dcr, manuf);
return add_uevent_var(env,
- "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04xext%04x",
+ "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
devinfo.dcr, manuf, part, ext);
}
@@ -1953,7 +1975,7 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
* DEFSLVS command.
*/
if (boardinfo->base.flags & I2C_CLIENT_TEN) {
- dev_err(&master->dev, "I2C device with 10 bit address not supported.");
+ dev_err(dev, "I2C device with 10 bit address not supported.");
return -ENOTSUPP;
}
@@ -2138,7 +2160,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
* correctly even if one or more i2c devices are not registered.
*/
i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
- i2cdev->dev = i2c_new_device(adap, &i2cdev->boardinfo->base);
+ i2cdev->dev = i2c_new_client_device(adap, &i2cdev->boardinfo->base);
return 0;
}
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index bd26c3b9634e..5c5306cd50ec 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -221,7 +221,7 @@ struct dw_i3c_xfer {
struct completion comp;
int ret;
unsigned int ncmds;
- struct dw_i3c_cmd cmds[0];
+ struct dw_i3c_cmd cmds[];
};
struct dw_i3c_master {
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 54712793709e..3fee8bd7fe20 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -388,7 +388,7 @@ struct cdns_i3c_xfer {
struct completion comp;
int ret;
unsigned int ncmds;
- struct cdns_i3c_cmd cmds[0];
+ struct cdns_i3c_cmd cmds[];
};
struct cdns_i3c_data {
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 1c227ea8ecd3..593f149c2910 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -662,23 +662,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
CD-ROM on hda. This option changes this to more natural hda for
hard disk and hdc for CD-ROM.
-config BLK_DEV_IDE_AU1XXX
- bool "IDE for AMD Alchemy Au1200"
- depends on MIPS_ALCHEMY
- select IDE_XFER_MODE
-choice
- prompt "IDE Mode for AMD Alchemy Au1200"
- default BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- depends on BLK_DEV_IDE_AU1XXX
-
-config BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- bool "PIO+DbDMA IDE for AMD Alchemy Au1200"
-
-config BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
- bool "MDMA2+DbDMA IDE for AMD Alchemy Au1200"
- depends on BLK_DEV_IDE_AU1XXX
-endchoice
-
config BLK_DEV_IDE_TX4938
tristate "TX4938 internal IDE support"
depends on SOC_TX4938
@@ -859,8 +842,7 @@ config BLK_DEV_UMC8672
endif
config BLK_DEV_IDEDMA
- def_bool BLK_DEV_IDEDMA_SFF || \
- BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+ def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_ICS
select IDE_XFER_MODE
endif # IDE
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index d4f4409cfb8b..2605b3cdaf47 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -107,7 +107,5 @@ obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
-obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
-
obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o
obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
deleted file mode 100644
index 4d181a918d72..000000000000
--- a/drivers/ide/au1xxx-ide.c
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
- *
- * Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
- * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
- * Interface and Linux Device Driver" Application Note.
- */
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/scatterlist.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/au1xxx_dbdma.h>
-#include <asm/mach-au1x00/au1xxx_ide.h>
-
-#define DRV_NAME "au1200-ide"
-#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
-
-#ifndef IDE_REG_SHIFT
-#define IDE_REG_SHIFT 5
-#endif
-
-/* enable the burstmode in the dbdma */
-#define IDE_AU1XXX_BURSTMODE 1
-
-static _auide_hwif auide_hwif;
-
-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
-
-static inline void auide_insw(unsigned long port, void *addr, u32 count)
-{
- _auide_hwif *ahwif = &auide_hwif;
- chan_tab_t *ctp;
- au1x_ddma_desc_t *dp;
-
- if (!au1xxx_dbdma_put_dest(ahwif->rx_chan, virt_to_phys(addr),
- count << 1, DDMA_FLAGS_NOIE)) {
- printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
- return;
- }
- ctp = *((chan_tab_t **)ahwif->rx_chan);
- dp = ctp->cur_ptr;
- while (dp->dscr_cmd0 & DSCR_CMD0_V)
- ;
- ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
-}
-
-static inline void auide_outsw(unsigned long port, void *addr, u32 count)
-{
- _auide_hwif *ahwif = &auide_hwif;
- chan_tab_t *ctp;
- au1x_ddma_desc_t *dp;
-
- if (!au1xxx_dbdma_put_source(ahwif->tx_chan, virt_to_phys(addr),
- count << 1, DDMA_FLAGS_NOIE)) {
- printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
- return;
- }
- ctp = *((chan_tab_t **)ahwif->tx_chan);
- dp = ctp->cur_ptr;
- while (dp->dscr_cmd0 & DSCR_CMD0_V)
- ;
- ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
-}
-
-static void au1xxx_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- auide_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static void au1xxx_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- auide_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-#endif
-
-static void au1xxx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
-
- switch (drive->pio_mode - XFER_PIO_0) {
- case 0:
- mem_sttime = SBC_IDE_TIMING(PIO0);
-
- /* set configuration for RCS2# */
- mem_stcfg |= TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
- break;
-
- case 1:
- mem_sttime = SBC_IDE_TIMING(PIO1);
-
- /* set configuration for RCS2# */
- mem_stcfg |= TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
- break;
-
- case 2:
- mem_sttime = SBC_IDE_TIMING(PIO2);
-
- /* set configuration for RCS2# */
- mem_stcfg &= ~TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
- break;
-
- case 3:
- mem_sttime = SBC_IDE_TIMING(PIO3);
-
- /* set configuration for RCS2# */
- mem_stcfg &= ~TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
-
- break;
-
- case 4:
- mem_sttime = SBC_IDE_TIMING(PIO4);
-
- /* set configuration for RCS2# */
- mem_stcfg &= ~TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
- break;
- }
-
- au_writel(mem_sttime,MEM_STTIME2);
- au_writel(mem_stcfg,MEM_STCFG2);
-}
-
-static void auide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
-
- switch (drive->dma_mode) {
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
- case XFER_MW_DMA_2:
- mem_sttime = SBC_IDE_TIMING(MDMA2);
-
- /* set configuration for RCS2# */
- mem_stcfg &= ~TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
-
- break;
- case XFER_MW_DMA_1:
- mem_sttime = SBC_IDE_TIMING(MDMA1);
-
- /* set configuration for RCS2# */
- mem_stcfg &= ~TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
-
- break;
- case XFER_MW_DMA_0:
- mem_sttime = SBC_IDE_TIMING(MDMA0);
-
- /* set configuration for RCS2# */
- mem_stcfg |= TS_MASK;
- mem_stcfg &= ~TCSOE_MASK;
- mem_stcfg &= ~TOECS_MASK;
- mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
-
- break;
-#endif
- }
-
- au_writel(mem_sttime,MEM_STTIME2);
- au_writel(mem_stcfg,MEM_STCFG2);
-}
-
-/*
- * Multi-Word DMA + DbDMA functions
- */
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
-static int auide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- _auide_hwif *ahwif = &auide_hwif;
- struct scatterlist *sg;
- int i = cmd->sg_nents, count = 0;
- int iswrite = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
-
- /* Save for interrupt context */
- ahwif->drive = drive;
-
- /* fill the descriptors */
- sg = hwif->sg_table;
- while (i && sg_dma_len(sg)) {
- u32 cur_addr;
- u32 cur_len;
-
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- while (cur_len) {
- u32 flags = DDMA_FLAGS_NOIE;
- unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
-
- if (++count >= PRD_ENTRIES) {
- printk(KERN_WARNING "%s: DMA table too small\n",
- drive->name);
- return 0;
- }
-
- /* Lets enable intr for the last descriptor only */
- if (1==i)
- flags = DDMA_FLAGS_IE;
- else
- flags = DDMA_FLAGS_NOIE;
-
- if (iswrite) {
- if (!au1xxx_dbdma_put_source(ahwif->tx_chan,
- sg_phys(sg), tc, flags)) {
- printk(KERN_ERR "%s failed %d\n",
- __func__, __LINE__);
- }
- } else {
- if (!au1xxx_dbdma_put_dest(ahwif->rx_chan,
- sg_phys(sg), tc, flags)) {
- printk(KERN_ERR "%s failed %d\n",
- __func__, __LINE__);
- }
- }
-
- cur_addr += tc;
- cur_len -= tc;
- }
- sg = sg_next(sg);
- i--;
- }
-
- if (count)
- return 1;
-
- return 0; /* revert to PIO for this request */
-}
-
-static int auide_dma_end(ide_drive_t *drive)
-{
- return 0;
-}
-
-static void auide_dma_start(ide_drive_t *drive )
-{
-}
-
-
-static int auide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- if (auide_build_dmatable(drive, cmd) == 0)
- return 1;
-
- return 0;
-}
-
-static int auide_dma_test_irq(ide_drive_t *drive)
-{
- /* If dbdma didn't execute the STOP command yet, the
- * active bit is still set
- */
- drive->waiting_for_dma++;
- if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
- printk(KERN_WARNING "%s: timeout waiting for ddma to complete\n",
- drive->name);
- return 1;
- }
- udelay(10);
- return 0;
-}
-
-static void auide_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static void auide_ddma_tx_callback(int irq, void *param)
-{
-}
-
-static void auide_ddma_rx_callback(int irq, void *param)
-{
-}
-#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
-
-static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize,
- u32 devwidth, u32 flags, u32 regbase)
-{
- dev->dev_id = dev_id;
- dev->dev_physaddr = CPHYSADDR(regbase);
- dev->dev_intlevel = 0;
- dev->dev_intpolarity = 0;
- dev->dev_tsize = tsize;
- dev->dev_devwidth = devwidth;
- dev->dev_flags = flags;
-}
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
-static const struct ide_dma_ops au1xxx_dma_ops = {
- .dma_host_set = auide_dma_host_set,
- .dma_setup = auide_dma_setup,
- .dma_start = auide_dma_start,
- .dma_end = auide_dma_end,
- .dma_test_irq = auide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
-};
-
-static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- _auide_hwif *auide = &auide_hwif;
- dbdev_tab_t source_dev_tab, target_dev_tab;
- u32 dev_id, tsize, devwidth, flags;
-
- dev_id = hwif->ddma_id;
-
- tsize = 8; /* 1 */
- devwidth = 32; /* 16 */
-
-#ifdef IDE_AU1XXX_BURSTMODE
- flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
-#else
- flags = DEV_FLAGS_SYNC;
-#endif
-
- /* setup dev_tab for tx channel */
- auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth,
- DEV_FLAGS_OUT | flags, auide->regbase);
- auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
-
- auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth,
- DEV_FLAGS_IN | flags, auide->regbase);
- auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
-
- /* We also need to add a target device for the DMA */
- auide_init_dbdma_dev(&target_dev_tab, (u32)DSCR_CMD0_ALWAYS, tsize,
- devwidth, DEV_FLAGS_ANYUSE, auide->regbase);
- auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
-
- /* Get a channel for TX */
- auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
- auide->tx_dev_id,
- auide_ddma_tx_callback,
- (void*)auide);
-
- /* Get a channel for RX */
- auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
- auide->target_dev_id,
- auide_ddma_rx_callback,
- (void*)auide);
-
- auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
- NUM_DESCRIPTORS);
- auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
- NUM_DESCRIPTORS);
-
- /* FIXME: check return value */
- (void)ide_allocate_dma_engine(hwif);
-
- au1xxx_dbdma_start( auide->tx_chan );
- au1xxx_dbdma_start( auide->rx_chan );
-
- return 0;
-}
-#else
-static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- _auide_hwif *auide = &auide_hwif;
- dbdev_tab_t source_dev_tab;
- int flags;
-
-#ifdef IDE_AU1XXX_BURSTMODE
- flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
-#else
- flags = DEV_FLAGS_SYNC;
-#endif
-
- /* setup dev_tab for tx channel */
- auide_init_dbdma_dev(&source_dev_tab, (u32)DSCR_CMD0_ALWAYS, 8, 32,
- DEV_FLAGS_OUT | flags, auide->regbase);
- auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
-
- auide_init_dbdma_dev(&source_dev_tab, (u32)DSCR_CMD0_ALWAYS, 8, 32,
- DEV_FLAGS_IN | flags, auide->regbase);
- auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
-
- /* Get a channel for TX */
- auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
- auide->tx_dev_id,
- NULL,
- (void*)auide);
-
- /* Get a channel for RX */
- auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
- DSCR_CMD0_ALWAYS,
- NULL,
- (void*)auide);
-
- auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
- NUM_DESCRIPTORS);
- auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
- NUM_DESCRIPTORS);
-
- au1xxx_dbdma_start( auide->tx_chan );
- au1xxx_dbdma_start( auide->rx_chan );
-
- return 0;
-}
-#endif
-
-static void auide_setup_ports(struct ide_hw *hw, _auide_hwif *ahwif)
-{
- int i;
- unsigned long *ata_regs = hw->io_ports_array;
-
- /* FIXME? */
- for (i = 0; i < 8; i++)
- *ata_regs++ = ahwif->regbase + (i << IDE_REG_SHIFT);
-
- /* set the Alternative Status register */
- *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
-}
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
-static const struct ide_tp_ops au1xxx_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = au1xxx_input_data,
- .output_data = au1xxx_output_data,
-};
-#endif
-
-static const struct ide_port_ops au1xxx_port_ops = {
- .set_pio_mode = au1xxx_set_pio_mode,
- .set_dma_mode = auide_set_dma_mode,
-};
-
-static const struct ide_port_info au1xxx_port_info = {
- .init_dma = auide_ddma_init,
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- .tp_ops = &au1xxx_tp_ops,
-#endif
- .port_ops = &au1xxx_port_ops,
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
- .dma_ops = &au1xxx_dma_ops,
-#endif
- .host_flags = IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_NO_IO_32BIT |
- IDE_HFLAG_UNMASK_IRQS,
- .pio_mask = ATA_PIO4,
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
- .mwdma_mask = ATA_MWDMA2,
-#endif
- .chipset = ide_au1xxx,
-};
-
-static int au_ide_probe(struct platform_device *dev)
-{
- _auide_hwif *ahwif = &auide_hwif;
- struct resource *res;
- struct ide_host *host;
- int ret = 0;
- struct ide_hw hw, *hws[] = { &hw };
-
-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
- char *mode = "MWDMA2";
-#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
- char *mode = "PIO+DDMA(offload)";
-#endif
-
- memset(&auide_hwif, 0, sizeof(_auide_hwif));
- ahwif->irq = platform_get_irq(dev, 0);
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-
- if (res == NULL) {
- pr_debug("%s %d: no base address\n", DRV_NAME, dev->id);
- ret = -ENODEV;
- goto out;
- }
- if (ahwif->irq < 0) {
- pr_debug("%s %d: no IRQ\n", DRV_NAME, dev->id);
- ret = -ENODEV;
- goto out;
- }
-
- if (!request_mem_region(res->start, resource_size(res), dev->name)) {
- pr_debug("%s: request_mem_region failed\n", DRV_NAME);
- ret = -EBUSY;
- goto out;
- }
-
- ahwif->regbase = (u32)ioremap(res->start, resource_size(res));
- if (ahwif->regbase == 0) {
- ret = -ENOMEM;
- goto out;
- }
-
- res = platform_get_resource(dev, IORESOURCE_DMA, 0);
- if (!res) {
- pr_debug("%s: no DDMA ID resource\n", DRV_NAME);
- ret = -ENODEV;
- goto out;
- }
- ahwif->ddma_id = res->start;
-
- memset(&hw, 0, sizeof(hw));
- auide_setup_ports(&hw, ahwif);
- hw.irq = ahwif->irq;
- hw.dev = &dev->dev;
-
- ret = ide_host_add(&au1xxx_port_info, hws, 1, &host);
- if (ret)
- goto out;
-
- auide_hwif.hwif = host->ports[0];
-
- platform_set_drvdata(dev, host);
-
- printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
-
- out:
- return ret;
-}
-
-static int au_ide_remove(struct platform_device *dev)
-{
- struct resource *res;
- struct ide_host *host = platform_get_drvdata(dev);
- _auide_hwif *ahwif = &auide_hwif;
-
- ide_host_remove(host);
-
- iounmap((void *)ahwif->regbase);
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- return 0;
-}
-
-static struct platform_driver au1200_ide_driver = {
- .driver = {
- .name = "au1200-ide",
- },
- .probe = au_ide_probe,
- .remove = au_ide_remove,
-};
-
-module_platform_driver(au1200_ide_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AU1200 IDE driver");
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index acf874800ca4..b0411a1827a3 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -89,8 +89,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
static int __init ide_scan_pcibus(void)
{
struct pci_dev *dev = NULL;
- struct pci_driver *d;
- struct list_head *l, *n;
+ struct pci_driver *d, *tmp;
pre_init = 0;
for_each_pci_dev(dev)
@@ -101,9 +100,8 @@ static int __init ide_scan_pcibus(void)
* are post init.
*/
- list_for_each_safe(l, n, &ide_pci_drivers) {
- list_del(l);
- d = list_entry(l, struct pci_driver, node);
+ list_for_each_entry_safe(d, tmp, &ide_pci_drivers, node) {
+ list_del(&d->node);
if (__pci_register_driver(d, d->driver.owner,
d->driver.mod_name))
printk(KERN_ERR "%s: failed to register %s driver\n",
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d55606608ac8..f4495841bf68 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -2,8 +2,9 @@
/*
* intel_idle.c - native hardware idle loop for modern Intel processors
*
- * Copyright (c) 2013, Intel Corporation.
+ * Copyright (c) 2013 - 2020, Intel Corporation.
* Len Brown <len.brown@intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
/*
@@ -25,11 +26,6 @@
/*
* Known limitations
*
- * The driver currently initializes for_each_online_cpu() upon modprobe.
- * It it unaware of subsequent processors hot-added to the system.
- * This means that if you boot with maxcpus=n and later online
- * processors above n, those processors will use C1 only.
- *
* ACPI has a .suspend hack to turn off deep c-statees during suspend
* to avoid complications with the lapic timer workaround.
* Have not seen issues with suspend, but may need same workaround here.
@@ -55,7 +51,7 @@
#include <asm/mwait.h>
#include <asm/msr.h>
-#define INTEL_IDLE_VERSION "0.4.1"
+#define INTEL_IDLE_VERSION "0.5.1"
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
@@ -65,11 +61,12 @@ static struct cpuidle_driver intel_idle_driver = {
static int max_cstate = CPUIDLE_STATE_MAX - 1;
static unsigned int disabled_states_mask;
-static unsigned int mwait_substates;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
+
+static unsigned long auto_demotion_disable_flags;
+static bool disable_promotion_to_c1e;
-#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
-/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
-static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
+static bool lapic_timer_always_reliable;
struct idle_cpu {
struct cpuidle_state *state_table;
@@ -84,13 +81,10 @@ struct idle_cpu {
bool use_acpi;
};
-static const struct idle_cpu *icpu;
-static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
-static int intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index);
-static void intel_idle_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index);
-static struct cpuidle_state *cpuidle_state_table;
+static const struct idle_cpu *icpu __initdata;
+static struct cpuidle_state *cpuidle_state_table __initdata;
+
+static unsigned int mwait_substates __initdata;
/*
* Enable this state by default even if the ACPI _CST does not list it.
@@ -103,7 +97,7 @@ static struct cpuidle_state *cpuidle_state_table;
* If this flag is set, SW flushes the TLB, so even if the
* HW doesn't do the flushing, this flag is safe to use.
*/
-#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16)
/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
@@ -115,12 +109,87 @@ static struct cpuidle_state *cpuidle_state_table;
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
+/**
+ * intel_idle - Ask the processor to enter the given idle state.
+ * @dev: cpuidle device of the target CPU.
+ * @drv: cpuidle driver (assumed to point to intel_idle_driver).
+ * @index: Target idle state index.
+ *
+ * Use the MWAIT instruction to notify the processor that the CPU represented by
+ * @dev is idle and it can try to enter the idle state corresponding to @index.
+ *
+ * If the local APIC timer is not known to be reliable in the target idle state,
+ * enable one-shot tick broadcasting for the target CPU before executing MWAIT.
+ *
+ * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to
+ * flushing user TLBs.
+ *
+ * Must be called under local_irq_disable().
+ */
+static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+ unsigned long ecx = 1; /* break on interrupt flag */
+ bool uninitialized_var(tick);
+ int cpu = smp_processor_id();
+
+ /*
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
+ */
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(cpu);
+
+ if (!static_cpu_has(X86_FEATURE_ARAT) && !lapic_timer_always_reliable) {
+ /*
+ * Switch over to one-shot tick broadcast if the target C-state
+ * is deeper than C1.
+ */
+ if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) {
+ tick = true;
+ tick_broadcast_enter();
+ } else {
+ tick = false;
+ }
+ }
+
+ mwait_idle_with_hints(eax, ecx);
+
+ if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
+ tick_broadcast_exit();
+
+ return index;
+}
+
+/**
+ * intel_idle_s2idle - Ask the processor to enter the given idle state.
+ * @dev: cpuidle device of the target CPU.
+ * @drv: cpuidle driver (assumed to point to intel_idle_driver).
+ * @index: Target idle state index.
+ *
+ * Use the MWAIT instruction to notify the processor that the CPU represented by
+ * @dev is idle and it can try to enter the idle state corresponding to @index.
+ *
+ * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
+ * scheduler tick and suspended scheduler clock on the target CPU.
+ */
+static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ unsigned long eax = flg2MWAIT(drv->states[index].flags);
+ unsigned long ecx = 1; /* break on interrupt flag */
+
+ mwait_idle_with_hints(eax, ecx);
+}
+
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -157,7 +226,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state snb_cstates[] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -202,7 +271,7 @@ static struct cpuidle_state snb_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state byt_cstates[] = {
+static struct cpuidle_state byt_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -247,7 +316,7 @@ static struct cpuidle_state byt_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state cht_cstates[] = {
+static struct cpuidle_state cht_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -292,7 +361,7 @@ static struct cpuidle_state cht_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state ivb_cstates[] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -337,7 +406,7 @@ static struct cpuidle_state ivb_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state ivt_cstates[] = {
+static struct cpuidle_state ivt_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -374,7 +443,7 @@ static struct cpuidle_state ivt_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state ivt_cstates_4s[] = {
+static struct cpuidle_state ivt_cstates_4s[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -411,7 +480,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.enter = NULL }
};
-static struct cpuidle_state ivt_cstates_8s[] = {
+static struct cpuidle_state ivt_cstates_8s[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -448,7 +517,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.enter = NULL }
};
-static struct cpuidle_state hsw_cstates[] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -516,7 +585,7 @@ static struct cpuidle_state hsw_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state bdw_cstates[] = {
+static struct cpuidle_state bdw_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -585,7 +654,7 @@ static struct cpuidle_state bdw_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state skl_cstates[] = {
+static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -654,7 +723,7 @@ static struct cpuidle_state skl_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state skx_cstates[] = {
+static struct cpuidle_state skx_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -683,7 +752,7 @@ static struct cpuidle_state skx_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state atom_cstates[] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
.desc = "MWAIT 0x00",
@@ -719,7 +788,7 @@ static struct cpuidle_state atom_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state tangier_cstates[] = {
+static struct cpuidle_state tangier_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -763,7 +832,7 @@ static struct cpuidle_state tangier_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state avn_cstates[] = {
+static struct cpuidle_state avn_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -783,7 +852,7 @@ static struct cpuidle_state avn_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state knl_cstates[] = {
+static struct cpuidle_state knl_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -804,7 +873,7 @@ static struct cpuidle_state knl_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state bxt_cstates[] = {
+static struct cpuidle_state bxt_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -865,7 +934,7 @@ static struct cpuidle_state bxt_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state dnv_cstates[] = {
+static struct cpuidle_state dnv_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -894,225 +963,164 @@ static struct cpuidle_state dnv_cstates[] = {
.enter = NULL }
};
-/**
- * intel_idle
- * @dev: cpuidle_device
- * @drv: cpuidle driver
- * @index: index of cpuidle state
- *
- * Must be called under local_irq_disable().
- */
-static __cpuidle int intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- unsigned long ecx = 1; /* break on interrupt flag */
- struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
- unsigned int cstate;
- bool uninitialized_var(tick);
- int cpu = smp_processor_id();
-
- /*
- * leave_mm() to avoid costly and often unnecessary wakeups
- * for flushing the user TLB's associated with the active mm.
- */
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(cpu);
-
- if (!static_cpu_has(X86_FEATURE_ARAT)) {
- cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
- MWAIT_CSTATE_MASK) + 1;
- tick = false;
- if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
- tick = true;
- tick_broadcast_enter();
- }
- }
-
- mwait_idle_with_hints(eax, ecx);
-
- if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
- tick_broadcast_exit();
-
- return index;
-}
-
-/**
- * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle
- * @dev: cpuidle_device
- * @drv: cpuidle driver
- * @index: state index
- */
-static void intel_idle_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- unsigned long ecx = 1; /* break on interrupt flag */
- unsigned long eax = flg2MWAIT(drv->states[index].flags);
-
- mwait_idle_with_hints(eax, ecx);
-}
-
-static const struct idle_cpu idle_cpu_nehalem = {
+static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_nhx = {
+static const struct idle_cpu idle_cpu_nhx __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_atom = {
+static const struct idle_cpu idle_cpu_atom __initconst = {
.state_table = atom_cstates,
};
-static const struct idle_cpu idle_cpu_tangier = {
+static const struct idle_cpu idle_cpu_tangier __initconst = {
.state_table = tangier_cstates,
};
-static const struct idle_cpu idle_cpu_lincroft = {
+static const struct idle_cpu idle_cpu_lincroft __initconst = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
};
-static const struct idle_cpu idle_cpu_snb = {
+static const struct idle_cpu idle_cpu_snb __initconst = {
.state_table = snb_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_snx = {
+static const struct idle_cpu idle_cpu_snx __initconst = {
.state_table = snb_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_byt = {
+static const struct idle_cpu idle_cpu_byt __initconst = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
.byt_auto_demotion_disable_flag = true,
};
-static const struct idle_cpu idle_cpu_cht = {
+static const struct idle_cpu idle_cpu_cht __initconst = {
.state_table = cht_cstates,
.disable_promotion_to_c1e = true,
.byt_auto_demotion_disable_flag = true,
};
-static const struct idle_cpu idle_cpu_ivb = {
+static const struct idle_cpu idle_cpu_ivb __initconst = {
.state_table = ivb_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_ivt = {
+static const struct idle_cpu idle_cpu_ivt __initconst = {
.state_table = ivt_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_hsw = {
+static const struct idle_cpu idle_cpu_hsw __initconst = {
.state_table = hsw_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_hsx = {
+static const struct idle_cpu idle_cpu_hsx __initconst = {
.state_table = hsw_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_bdw = {
+static const struct idle_cpu idle_cpu_bdw __initconst = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_bdx = {
+static const struct idle_cpu idle_cpu_bdx __initconst = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_skl = {
+static const struct idle_cpu idle_cpu_skl __initconst = {
.state_table = skl_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_skx = {
+static const struct idle_cpu idle_cpu_skx __initconst = {
.state_table = skx_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_avn = {
+static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_knl = {
+static const struct idle_cpu idle_cpu_knl __initconst = {
.state_table = knl_cstates,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_bxt = {
+static const struct idle_cpu idle_cpu_bxt __initconst = {
.state_table = bxt_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_dnv = {
+static const struct idle_cpu idle_cpu_dnv __initconst = {
.state_table = dnv_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx),
- INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
- INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
- INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
- INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx),
- INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx),
- INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom),
- INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft),
- INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx),
- INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
- INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx),
- INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom),
- INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt),
- INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier),
- INTEL_CPU_FAM6(ATOM_AIRMONT, idle_cpu_cht),
- INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
- INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
- INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx),
- INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw),
- INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn),
- INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx),
- INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx),
- INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl),
- INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl),
- INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl),
- INTEL_CPU_FAM6(KABYLAKE, idle_cpu_skl),
- INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx),
- INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl),
- INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl),
- INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
- INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
- INTEL_CPU_FAM6(ATOM_GOLDMONT_D, idle_cpu_dnv),
- INTEL_CPU_FAM6(ATOM_TREMONT_D, idle_cpu_dnv),
+ X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
+ X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
+ X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem),
+ X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem),
+ X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx),
+ X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft),
+ X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx),
+ X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb),
+ X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht),
+ X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb),
+ X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
+ X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
+ X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv),
{}
};
-#define INTEL_CPU_FAM6_MWAIT \
- { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 }
-
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
- INTEL_CPU_FAM6_MWAIT,
+ X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
{}
};
@@ -1273,11 +1281,11 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
-/*
- * ivt_idle_state_table_update(void)
+/**
+ * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
- * Tune IVT multi-socket targets
- * Assumption: num_sockets == (max_package_num + 1)
+ * Tune IVT multi-socket targets.
+ * Assumption: num_sockets == (max_package_num + 1).
*/
static void __init ivt_idle_state_table_update(void)
{
@@ -1323,11 +1331,11 @@ static unsigned long long __init irtl_2_usec(unsigned long long irtl)
return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
}
-/*
- * bxt_idle_state_table_update(void)
+/**
+ * bxt_idle_state_table_update - Fix up the Broxton idle states table.
*
- * On BXT, we trust the IRTL to show the definitive maximum latency
- * We use the same value for target_residency.
+ * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the
+ * definitive maximum latency and use the same value for target_residency.
*/
static void __init bxt_idle_state_table_update(void)
{
@@ -1370,11 +1378,11 @@ static void __init bxt_idle_state_table_update(void)
}
}
-/*
- * sklh_idle_state_table_update(void)
+
+/**
+ * sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
*
- * On SKL-H (model 0x5e) disable C8 and C9 if:
- * C10 is enabled and SGX disabled
+ * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
*/
static void __init sklh_idle_state_table_update(void)
{
@@ -1485,9 +1493,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
}
}
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
+/**
+ * intel_idle_cpuidle_driver_init - Create the list of available idle states.
+ * @drv: cpuidle driver structure to initialize.
*/
static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
{
@@ -1509,7 +1517,7 @@ static void auto_demotion_disable(void)
unsigned long long msr_bits;
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
- msr_bits &= ~(icpu->auto_demotion_disable_flags);
+ msr_bits &= ~auto_demotion_disable_flags;
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
@@ -1522,10 +1530,12 @@ static void c1e_promotion_disable(void)
wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
}
-/*
- * intel_idle_cpu_init()
- * allocate, initialize, register cpuidle_devices
- * @cpu: cpu/core to initialize
+/**
+ * intel_idle_cpu_init - Register the target CPU with the cpuidle core.
+ * @cpu: CPU to initialize.
+ *
+ * Register a cpuidle device object for @cpu and update its MSRs in accordance
+ * with the processor model flags.
*/
static int intel_idle_cpu_init(unsigned int cpu)
{
@@ -1539,13 +1549,10 @@ static int intel_idle_cpu_init(unsigned int cpu)
return -EIO;
}
- if (!icpu)
- return 0;
-
- if (icpu->auto_demotion_disable_flags)
+ if (auto_demotion_disable_flags)
auto_demotion_disable();
- if (icpu->disable_promotion_to_c1e)
+ if (disable_promotion_to_c1e)
c1e_promotion_disable();
return 0;
@@ -1555,7 +1562,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
{
struct cpuidle_device *dev;
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+ if (!lapic_timer_always_reliable)
tick_broadcast_enable();
/*
@@ -1623,6 +1630,8 @@ static int __init intel_idle_init(void)
icpu = (const struct idle_cpu *)id->driver_data;
if (icpu) {
cpuidle_state_table = icpu->state_table;
+ auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
+ disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {
@@ -1647,15 +1656,15 @@ static int __init intel_idle_init(void)
}
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
- lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ lapic_timer_always_reliable = true;
retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
intel_idle_cpu_online, NULL);
if (retval < 0)
goto hp_setup_fail;
- pr_debug("lapic_timer_reliable_states 0x%x\n",
- lapic_timer_reliable_states);
+ pr_debug("Local APIC timer is reliable in %s\n",
+ lapic_timer_always_reliable ? "all C-states" : "C1");
return 0;
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 5bd51853b15e..d5c073a8aa3e 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -88,6 +88,7 @@ source "drivers/iio/orientation/Kconfig"
if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
+source "drivers/iio/position/Kconfig"
source "drivers/iio/potentiometer/Kconfig"
source "drivers/iio/potentiostat/Kconfig"
source "drivers/iio/pressure/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index bff682ad1cfb..1712011c0f4a 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -31,6 +31,7 @@ obj-y += light/
obj-y += magnetometer/
obj-y += multiplexer/
obj-y += orientation/
+obj-y += position/
obj-y += potentiometer/
obj-y += potentiostat/
obj-y += pressure/
diff --git a/drivers/iio/TODO b/drivers/iio/TODO
new file mode 100644
index 000000000000..7d7326b7085a
--- /dev/null
+++ b/drivers/iio/TODO
@@ -0,0 +1,19 @@
+2020-02-29
+
+Documentation
+ - Binding docs for devices that are obviously used via device
+tree
+ - Yaml conversions for abandoned drivers
+ - ABI Documentation
+ - Audit driviers/iio/staging/Documentation
+
+- Replace iio_dev->mlock by either a local lock or use
+iio_claim_direct.(Requires analysis of the purpose of the lock.)
+
+- Converting drivers from device tree centric to more generic
+property handlers.
+
+- Refactor old platform_data constructs from drivers and convert it
+to state struct and using property handlers and readers.
+
+Mailing list: linux-iio@vger.kernel.org
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index 0f0f27a8184e..4154e7396bbe 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -246,6 +246,7 @@ static const struct adis_data adis16201_data = {
.diag_stat_reg = ADIS16201_DIAG_STAT_REG,
.self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
+ .self_test_reg = ADIS16201_MSC_CTRL_REG,
.self_test_no_autoclear = true,
.timeouts = &adis16201_timeouts,
diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c
index c6dbd2424e10..31d45e7c5485 100644
--- a/drivers/iio/accel/adis16209.c
+++ b/drivers/iio/accel/adis16209.c
@@ -256,6 +256,7 @@ static const struct adis_data adis16209_data = {
.diag_stat_reg = ADIS16209_STAT_REG,
.self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
+ .self_test_reg = ADIS16209_MSC_CTRL_REG,
.self_test_no_autoclear = true,
.timeouts = &adis16209_timeouts,
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 68e847c6255e..2532b9ad3384 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -170,7 +170,8 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture, NULL);
if (ret)
return ret;
@@ -190,11 +191,6 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
state->sign[CROS_EC_SENSOR_Z] = -1;
}
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 66d768d971e1..6e429072e44a 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -980,7 +980,7 @@ static int sca3000_read_data(struct sca3000_state *st,
st->tx[0] = SCA3000_READ_REG(reg_address_high);
ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
if (ret) {
- dev_err(get_device(&st->us->dev), "problem reading register");
+ dev_err(&st->us->dev, "problem reading register\n");
return ret;
}
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 849cf74153c4..6b283be26ebc 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -147,12 +147,9 @@ static int st_accel_i2c_probe(struct i2c_client *client)
const struct st_sensor_settings *settings;
struct st_sensor_data *adata;
struct iio_dev *indio_dev;
- const char *match;
int ret;
- match = device_get_match_data(&client->dev);
- if (match)
- strlcpy(client->name, match, sizeof(client->name));
+ st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
settings = st_accel_get_settings(client->name);
if (!settings) {
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 82e33082958c..12bb8b7ca1ff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -39,6 +39,18 @@ config AD7124
To compile this driver as a module, choose M here: the module will be
called ad7124.
+config AD7192
+ tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7190,
+ AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7192.
+
config AD7266
tristate "Analog Devices AD7265/AD7266 ADC driver"
depends on SPI_MASTER
@@ -783,6 +795,16 @@ config RCAR_GYRO_ADC
To compile this driver as a module, choose M here: the
module will be called rcar-gyroadc.
+config RN5T618_ADC
+ tristate "ADC for the RN5T618/RC5T619 family of chips"
+ depends on MFD_RN5T618
+ help
+ Say yes here to build support for the integrated ADC inside the
+ RN5T618/619 series PMICs:
+
+ This driver can also be built as a module. If so, the module
+ will be called rn5t618-adc.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 919228900df9..637807861112 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o
obj-$(CONFIG_AD7124) += ad7124.o
+obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7291) += ad7291.o
obj-$(CONFIG_AD7292) += ad7292.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
+obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index d9915dc71d1e..a3c0647a5391 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -70,6 +70,11 @@
/* AD7124_FILTER_X */
#define AD7124_FILTER_FS_MSK GENMASK(10, 0)
#define AD7124_FILTER_FS(x) FIELD_PREP(AD7124_FILTER_FS_MSK, x)
+#define AD7124_FILTER_TYPE_MSK GENMASK(23, 21)
+#define AD7124_FILTER_TYPE_SEL(x) FIELD_PREP(AD7124_FILTER_TYPE_MSK, x)
+
+#define AD7124_SINC3_FILTER 2
+#define AD7124_SINC4_FILTER 0
enum ad7124_ids {
ID_AD7124_4,
@@ -93,6 +98,14 @@ static const unsigned int ad7124_gain[8] = {
1, 2, 4, 8, 16, 32, 64, 128
};
+static const unsigned int ad7124_reg_size[] = {
+ 1, 2, 3, 3, 2, 1, 3, 3, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3
+};
+
static const int ad7124_master_clk_freq_hz[3] = {
[AD7124_LOW_POWER] = 76800,
[AD7124_MID_POWER] = 153600,
@@ -119,6 +132,7 @@ struct ad7124_channel_config {
unsigned int vref_mv;
unsigned int pga_bits;
unsigned int odr;
+ unsigned int filter_type;
};
struct ad7124_state {
@@ -138,7 +152,8 @@ static const struct iio_chan_spec ad7124_channel_template = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -281,6 +296,58 @@ static int ad7124_set_channel_gain(struct ad7124_state *st,
return 0;
}
+static int ad7124_get_3db_filter_freq(struct ad7124_state *st,
+ unsigned int channel)
+{
+ unsigned int fadc;
+
+ fadc = st->channel_config[channel].odr;
+
+ switch (st->channel_config[channel].filter_type) {
+ case AD7124_SINC3_FILTER:
+ return DIV_ROUND_CLOSEST(fadc * 230, 1000);
+ case AD7124_SINC4_FILTER:
+ return DIV_ROUND_CLOSEST(fadc * 262, 1000);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7124_set_3db_filter_freq(struct ad7124_state *st,
+ unsigned int channel,
+ unsigned int freq)
+{
+ unsigned int sinc4_3db_odr;
+ unsigned int sinc3_3db_odr;
+ unsigned int new_filter;
+ unsigned int new_odr;
+
+ sinc4_3db_odr = DIV_ROUND_CLOSEST(freq * 1000, 230);
+ sinc3_3db_odr = DIV_ROUND_CLOSEST(freq * 1000, 262);
+
+ if (sinc4_3db_odr > sinc3_3db_odr) {
+ new_filter = AD7124_SINC3_FILTER;
+ new_odr = sinc4_3db_odr;
+ } else {
+ new_filter = AD7124_SINC4_FILTER;
+ new_odr = sinc3_3db_odr;
+ }
+
+ if (st->channel_config[channel].filter_type != new_filter) {
+ int ret;
+
+ st->channel_config[channel].filter_type = new_filter;
+ ret = ad7124_spi_write_mask(st, AD7124_FILTER(channel),
+ AD7124_FILTER_TYPE_MSK,
+ AD7124_FILTER_TYPE_SEL(new_filter),
+ 3);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ad7124_set_channel_odr(st, channel, new_odr);
+}
+
static int ad7124_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long info)
@@ -323,6 +390,9 @@ static int ad7124_read_raw(struct iio_dev *indio_dev,
*val = st->channel_config[chan->address].odr;
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *val = ad7124_get_3db_filter_freq(st, chan->scan_index);
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
@@ -355,11 +425,37 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
gain = DIV_ROUND_CLOSEST(res, val2);
return ad7124_set_channel_gain(st, chan->address, gain);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2 != 0)
+ return -EINVAL;
+
+ return ad7124_set_3db_filter_freq(st, chan->address, val);
default:
return -EINVAL;
}
}
+static int ad7124_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (reg >= ARRAY_SIZE(ad7124_reg_size))
+ return -EINVAL;
+
+ if (readval)
+ ret = ad_sd_read_reg(&st->sd, reg, ad7124_reg_size[reg],
+ readval);
+ else
+ ret = ad_sd_write_reg(&st->sd, reg, ad7124_reg_size[reg],
+ writeval);
+
+ return ret;
+}
+
static IIO_CONST_ATTR(in_voltage_scale_available,
"0.000001164 0.000002328 0.000004656 0.000009313 0.000018626 0.000037252 0.000074505 0.000149011 0.000298023");
@@ -375,6 +471,7 @@ static const struct attribute_group ad7124_attrs_group = {
static const struct iio_info ad7124_info = {
.read_raw = ad7124_read_raw,
.write_raw = ad7124_write_raw,
+ .debugfs_reg_access = &ad7124_reg_access,
.validate_trigger = ad_sd_validate_trigger,
.attrs = &ad7124_attrs_group,
};
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index bf3e2a9cc07f..08ba1a8f05eb 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/of_device.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -124,10 +125,10 @@
#define AD7193_CH_AINCOM 0x600 /* AINCOM - AINCOM */
/* ID Register Bit Designations (AD7192_REG_ID) */
-#define ID_AD7190 0x4
-#define ID_AD7192 0x0
-#define ID_AD7193 0x2
-#define ID_AD7195 0x6
+#define CHIPID_AD7190 0x4
+#define CHIPID_AD7192 0x0
+#define CHIPID_AD7193 0x2
+#define CHIPID_AD7195 0x6
#define AD7192_ID_MASK 0x0F
/* GPOCON Register Bit Designations (AD7192_REG_GPOCON) */
@@ -156,11 +157,24 @@
*/
enum {
- AD7192_SYSCALIB_ZERO_SCALE,
- AD7192_SYSCALIB_FULL_SCALE,
+ AD7192_SYSCALIB_ZERO_SCALE,
+ AD7192_SYSCALIB_FULL_SCALE,
+};
+
+enum {
+ ID_AD7190,
+ ID_AD7192,
+ ID_AD7193,
+ ID_AD7195,
+};
+
+struct ad7192_chip_info {
+ unsigned int chip_id;
+ const char *name;
};
struct ad7192_state {
+ const struct ad7192_chip_info *chip_info;
struct regulator *avdd;
struct regulator *dvdd;
struct clk *mclk;
@@ -171,7 +185,6 @@ struct ad7192_state {
u32 conf;
u32 scale_avail[8][2];
u8 gpocon;
- u8 devid;
u8 clock_sel;
struct mutex lock; /* protect sensor state */
u8 syscalib_mode[8];
@@ -347,7 +360,7 @@ static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
id &= AD7192_ID_MASK;
- if (id != st->devid)
+ if (id != st->chip_info->chip_id)
dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n",
id);
@@ -362,7 +375,7 @@ static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
st->mode |= AD7192_MODE_REJ60;
refin2_en = of_property_read_bool(np, "adi,refin2-pins-enable");
- if (refin2_en && st->devid != ID_AD7195)
+ if (refin2_en && st->chip_info->chip_id != CHIPID_AD7195)
st->conf |= AD7192_CONF_REFSEL;
st->conf &= ~AD7192_CONF_CHOP;
@@ -786,76 +799,124 @@ static const struct iio_info ad7195_info = {
.validate_trigger = ad_sd_validate_trigger,
};
+#define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _extend_name, \
+ _type, _mask_type_av, _ext_info) \
+ { \
+ .type = (_type), \
+ .differential = ((_channel2) == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = (_mask_type_av), \
+ .ext_info = (_ext_info), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 24, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD719x_DIFF_CHANNEL(_si, _channel1, _channel2, _address) \
+ __AD719x_CHANNEL(_si, _channel1, _channel2, _address, NULL, \
+ IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE), \
+ ad7192_calibsys_ext_info)
+
+#define AD719x_CHANNEL(_si, _channel1, _address) \
+ __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
+
+#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \
+ __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
+
+#define AD719x_TEMP_CHANNEL(_si, _address) \
+ __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
+
static const struct iio_chan_spec ad7192_channels[] = {
- AD_SD_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M, 24, 32, 0),
- AD_SD_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M, 24, 32, 0),
- AD_SD_TEMP_CHANNEL(2, AD7192_CH_TEMP, 24, 32, 0),
- AD_SD_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M, 24, 32, 0),
- AD_SD_CHANNEL(4, 1, AD7192_CH_AIN1, 24, 32, 0),
- AD_SD_CHANNEL(5, 2, AD7192_CH_AIN2, 24, 32, 0),
- AD_SD_CHANNEL(6, 3, AD7192_CH_AIN3, 24, 32, 0),
- AD_SD_CHANNEL(7, 4, AD7192_CH_AIN4, 24, 32, 0),
+ AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
+ AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M),
+ AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP),
+ AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M),
+ AD719x_CHANNEL(4, 1, AD7192_CH_AIN1),
+ AD719x_CHANNEL(5, 2, AD7192_CH_AIN2),
+ AD719x_CHANNEL(6, 3, AD7192_CH_AIN3),
+ AD719x_CHANNEL(7, 4, AD7192_CH_AIN4),
IIO_CHAN_SOFT_TIMESTAMP(8),
};
static const struct iio_chan_spec ad7193_channels[] = {
- AD_SD_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M, 24, 32, 0),
- AD_SD_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M, 24, 32, 0),
- AD_SD_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M, 24, 32, 0),
- AD_SD_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M, 24, 32, 0),
- AD_SD_TEMP_CHANNEL(4, AD7193_CH_TEMP, 24, 32, 0),
- AD_SD_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M, 24, 32, 0),
- AD_SD_CHANNEL(6, 1, AD7193_CH_AIN1, 24, 32, 0),
- AD_SD_CHANNEL(7, 2, AD7193_CH_AIN2, 24, 32, 0),
- AD_SD_CHANNEL(8, 3, AD7193_CH_AIN3, 24, 32, 0),
- AD_SD_CHANNEL(9, 4, AD7193_CH_AIN4, 24, 32, 0),
- AD_SD_CHANNEL(10, 5, AD7193_CH_AIN5, 24, 32, 0),
- AD_SD_CHANNEL(11, 6, AD7193_CH_AIN6, 24, 32, 0),
- AD_SD_CHANNEL(12, 7, AD7193_CH_AIN7, 24, 32, 0),
- AD_SD_CHANNEL(13, 8, AD7193_CH_AIN8, 24, 32, 0),
+ AD719x_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M),
+ AD719x_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M),
+ AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
+ AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
+ AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
+ AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M),
+ AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
+ AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
+ AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
+ AD719x_CHANNEL(9, 4, AD7193_CH_AIN4),
+ AD719x_CHANNEL(10, 5, AD7193_CH_AIN5),
+ AD719x_CHANNEL(11, 6, AD7193_CH_AIN6),
+ AD719x_CHANNEL(12, 7, AD7193_CH_AIN7),
+ AD719x_CHANNEL(13, 8, AD7193_CH_AIN8),
IIO_CHAN_SOFT_TIMESTAMP(14),
};
+static const struct ad7192_chip_info ad7192_chip_info_tbl[] = {
+ [ID_AD7190] = {
+ .chip_id = CHIPID_AD7190,
+ .name = "ad7190",
+ },
+ [ID_AD7192] = {
+ .chip_id = CHIPID_AD7192,
+ .name = "ad7192",
+ },
+ [ID_AD7193] = {
+ .chip_id = CHIPID_AD7193,
+ .name = "ad7193",
+ },
+ [ID_AD7195] = {
+ .chip_id = CHIPID_AD7195,
+ .name = "ad7195",
+ },
+};
+
static int ad7192_channels_config(struct iio_dev *indio_dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
- const struct iio_chan_spec *channels;
- struct iio_chan_spec *chan;
- int i;
- switch (st->devid) {
- case ID_AD7193:
- channels = ad7193_channels;
+ switch (st->chip_info->chip_id) {
+ case CHIPID_AD7193:
+ indio_dev->channels = ad7193_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7193_channels);
break;
default:
- channels = ad7192_channels;
+ indio_dev->channels = ad7192_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
break;
}
- chan = devm_kcalloc(indio_dev->dev.parent, indio_dev->num_channels,
- sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
-
- indio_dev->channels = chan;
-
- for (i = 0; i < indio_dev->num_channels; i++) {
- *chan = channels[i];
- chan->info_mask_shared_by_all |=
- BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY);
- if (chan->type != IIO_TEMP) {
- chan->info_mask_shared_by_type_available |=
- BIT(IIO_CHAN_INFO_SCALE);
- chan->ext_info = ad7192_calibsys_ext_info;
- }
- chan++;
- }
-
return 0;
}
+static const struct of_device_id ad7192_of_match[] = {
+ { .compatible = "adi,ad7190", .data = &ad7192_chip_info_tbl[ID_AD7190] },
+ { .compatible = "adi,ad7192", .data = &ad7192_chip_info_tbl[ID_AD7192] },
+ { .compatible = "adi,ad7193", .data = &ad7192_chip_info_tbl[ID_AD7193] },
+ { .compatible = "adi,ad7195", .data = &ad7192_chip_info_tbl[ID_AD7195] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad7192_of_match);
+
static int ad7192_probe(struct spi_device *spi)
{
struct ad7192_state *st;
@@ -899,22 +960,25 @@ static int ad7192_probe(struct spi_device *spi)
voltage_uv = regulator_get_voltage(st->avdd);
- if (voltage_uv)
+ if (voltage_uv > 0) {
st->int_vref_mv = voltage_uv / 1000;
- else
+ } else {
+ ret = voltage_uv;
dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
+ goto error_disable_avdd;
+ }
spi_set_drvdata(spi, indio_dev);
- st->devid = spi_get_device_id(spi)->driver_data;
+ st->chip_info = of_device_get_match_data(&spi->dev);
indio_dev->dev.parent = &spi->dev;
- indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = ad7192_channels_config(indio_dev);
if (ret < 0)
goto error_disable_dvdd;
- if (st->devid == ID_AD7195)
+ if (st->chip_info->chip_id == CHIPID_AD7195)
indio_dev->info = &ad7195_info;
else
indio_dev->info = &ad7192_info;
@@ -986,26 +1050,6 @@ static int ad7192_remove(struct spi_device *spi)
return 0;
}
-static const struct spi_device_id ad7192_id[] = {
- {"ad7190", ID_AD7190},
- {"ad7192", ID_AD7192},
- {"ad7193", ID_AD7193},
- {"ad7195", ID_AD7195},
- {}
-};
-
-MODULE_DEVICE_TABLE(spi, ad7192_id);
-
-static const struct of_device_id ad7192_of_match[] = {
- { .compatible = "adi,ad7190" },
- { .compatible = "adi,ad7192" },
- { .compatible = "adi,ad7193" },
- { .compatible = "adi,ad7195" },
- {}
-};
-
-MODULE_DEVICE_TABLE(of, ad7192_of_match);
-
static struct spi_driver ad7192_driver = {
.driver = {
.name = "ad7192",
@@ -1013,7 +1057,6 @@ static struct spi_driver ad7192_driver = {
},
.probe = ad7192_probe,
.remove = ad7192_remove,
- .id_table = ad7192_id,
};
module_spi_driver(ad7192_driver);
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index a6798f7dfdb8..6595fd196288 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -122,7 +122,10 @@ static int ad7292_single_conversion(struct ad7292_state *st,
{
.tx_buf = &st->d8,
.len = 4,
- .delay_usecs = 6,
+ .delay = {
+ .value = 6,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
}, {
.rx_buf = &st->d16,
.len = 2,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index b747db97f78a..e5691e330323 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -542,7 +542,7 @@ static const struct iio_info ad7797_info = {
.read_raw = &ad7793_read_raw,
.write_raw = &ad7793_write_raw,
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
- .attrs = &ad7793_attribute_group,
+ .attrs = &ad7797_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 2df7d057b249..22131a677445 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -836,8 +836,10 @@ static int exynos_adc_probe(struct platform_device *pdev)
info->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(info->vdd)) {
- dev_err(&pdev->dev, "failed getting regulator, err = %ld\n",
- PTR_ERR(info->vdd));
+ if (PTR_ERR(info->vdd) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed getting regulator, err = %ld\n",
+ PTR_ERR(info->vdd));
return PTR_ERR(info->vdd);
}
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 3b6f3b9a6c5b..0c5d7aaf6826 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -71,7 +71,10 @@ static int max1118_read(struct spi_device *spi, int channel)
*/
{
.len = 0,
- .delay_usecs = 1, /* > CNVST Low Time 100 ns */
+ .delay = { /* > CNVST Low Time 100 ns */
+ .value = 1,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
.cs_change = 1,
},
/*
@@ -81,7 +84,10 @@ static int max1118_read(struct spi_device *spi, int channel)
*/
{
.len = 0,
- .delay_usecs = 8,
+ .delay = {
+ .value = 8,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
},
{
.rx_buf = &adc->data,
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 465c7625a55a..2c0eb5de110c 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -421,7 +421,8 @@ static int mcp320x_probe(struct spi_device *spi)
adc->transfer[1].len++;
/* conversions are started by asserting CS pin for 8 usec */
- adc->start_conv_transfer.delay_usecs = 8;
+ adc->start_conv_transfer.delay.value = 8;
+ adc->start_conv_transfer.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_init_with_transfers(&adc->start_conv_msg,
&adc->start_conv_transfer, 1);
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index a6170a37ebe8..83bad2d5575d 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -14,6 +14,7 @@
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
+#include <linux/reset.h>
struct npcm_adc {
bool int_status;
@@ -23,13 +24,9 @@ struct npcm_adc {
struct clk *adc_clk;
wait_queue_head_t wq;
struct regulator *vref;
- struct regmap *rst_regmap;
+ struct reset_control *reset;
};
-/* NPCM7xx reset module */
-#define NPCM7XX_IPSRST1_OFFSET 0x020
-#define NPCM7XX_IPSRST1_ADC_RST BIT(27)
-
/* ADC registers */
#define NPCM_ADCCON 0x00
#define NPCM_ADCDATA 0x04
@@ -106,13 +103,11 @@ static int npcm_adc_read(struct npcm_adc *info, int *val, u8 channel)
msecs_to_jiffies(10));
if (ret == 0) {
regtemp = ioread32(info->regs + NPCM_ADCCON);
- if ((regtemp & NPCM_ADCCON_ADC_CONV) && info->rst_regmap) {
+ if (regtemp & NPCM_ADCCON_ADC_CONV) {
/* if conversion failed - reset ADC module */
- regmap_write(info->rst_regmap, NPCM7XX_IPSRST1_OFFSET,
- NPCM7XX_IPSRST1_ADC_RST);
+ reset_control_assert(info->reset);
msleep(100);
- regmap_write(info->rst_regmap, NPCM7XX_IPSRST1_OFFSET,
- 0x0);
+ reset_control_deassert(info->reset);
msleep(100);
/* Enable ADC and start conversion module */
@@ -186,7 +181,6 @@ static int npcm_adc_probe(struct platform_device *pdev)
struct npcm_adc *info;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
if (!indio_dev)
@@ -199,6 +193,10 @@ static int npcm_adc_probe(struct platform_device *pdev)
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
+ info->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(info->reset))
+ return PTR_ERR(info->reset);
+
info->adc_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->adc_clk)) {
dev_warn(&pdev->dev, "ADC clock failed: can't read clk\n");
@@ -211,16 +209,6 @@ static int npcm_adc_probe(struct platform_device *pdev)
div = div >> NPCM_ADCCON_DIV_SHIFT;
info->adc_sample_hz = clk_get_rate(info->adc_clk) / ((div + 1) * 2);
- if (of_device_is_compatible(np, "nuvoton,npcm750-adc")) {
- info->rst_regmap = syscon_regmap_lookup_by_compatible
- ("nuvoton,npcm750-rst");
- if (IS_ERR(info->rst_regmap)) {
- dev_err(&pdev->dev, "Failed to find nuvoton,npcm750-rst\n");
- ret = PTR_ERR(info->rst_regmap);
- goto err_disable_clk;
- }
- }
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
ret = -EINVAL;
diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c
new file mode 100644
index 000000000000..f21027e4e26a
--- /dev/null
+++ b/drivers/iio/adc/rn5t618-adc.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADC driver for the RICOH RN5T618 power management chip family
+ *
+ * Copyright (C) 2019 Andreas Kemnade
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mfd/rn5t618.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/slab.h>
+
+#define RN5T618_ADC_CONVERSION_TIMEOUT (msecs_to_jiffies(500))
+#define RN5T618_REFERENCE_VOLT 2500
+
+/* mask for selecting channels for single conversion */
+#define RN5T618_ADCCNT3_CHANNEL_MASK 0x7
+/* average 4-time conversion mode */
+#define RN5T618_ADCCNT3_AVG BIT(3)
+/* set for starting a single conversion, gets cleared by hw when done */
+#define RN5T618_ADCCNT3_GODONE BIT(4)
+/* automatic conversion, period is in ADCCNT2, selected channels are
+ * in ADCCNT1
+ */
+#define RN5T618_ADCCNT3_AUTO BIT(5)
+#define RN5T618_ADCEND_IRQ BIT(0)
+
+struct rn5t618_adc_data {
+ struct device *dev;
+ struct rn5t618 *rn5t618;
+ struct completion conv_completion;
+ int irq;
+};
+
+struct rn5t618_channel_ratios {
+ u16 numerator;
+ u16 denominator;
+};
+
+enum rn5t618_channels {
+ LIMMON = 0,
+ VBAT,
+ VADP,
+ VUSB,
+ VSYS,
+ VTHM,
+ AIN1,
+ AIN0
+};
+
+static const struct rn5t618_channel_ratios rn5t618_ratios[8] = {
+ [LIMMON] = {50, 32}, /* measured across 20mOhm, amplified by 32 */
+ [VBAT] = {2, 1},
+ [VADP] = {3, 1},
+ [VUSB] = {3, 1},
+ [VSYS] = {3, 1},
+ [VTHM] = {1, 1},
+ [AIN1] = {1, 1},
+ [AIN0] = {1, 1},
+};
+
+static int rn5t618_read_adc_reg(struct rn5t618 *rn5t618, int reg, u16 *val)
+{
+ u8 data[2];
+ int ret;
+
+ ret = regmap_bulk_read(rn5t618->regmap, reg, data, sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ *val = (data[0] << 4) | (data[1] & 0xF);
+
+ return 0;
+}
+
+static irqreturn_t rn5t618_adc_irq(int irq, void *data)
+{
+ struct rn5t618_adc_data *adc = data;
+ unsigned int r = 0;
+ int ret;
+
+ /* clear low & high threshold irqs */
+ regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC1, 0);
+ regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC2, 0);
+
+ ret = regmap_read(adc->rn5t618->regmap, RN5T618_IR_ADC3, &r);
+ if (ret < 0)
+ dev_err(adc->dev, "failed to read IRQ status: %d\n", ret);
+
+ regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC3, 0);
+
+ if (r & RN5T618_ADCEND_IRQ)
+ complete(&adc->conv_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int rn5t618_adc_read(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct rn5t618_adc_data *adc = iio_priv(iio_dev);
+ u16 raw;
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_SCALE) {
+ *val = RN5T618_REFERENCE_VOLT *
+ rn5t618_ratios[chan->channel].numerator;
+ *val2 = rn5t618_ratios[chan->channel].denominator * 4095;
+
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ /* select channel */
+ ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
+ RN5T618_ADCCNT3_CHANNEL_MASK,
+ chan->channel);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(adc->rn5t618->regmap, RN5T618_EN_ADCIR3,
+ RN5T618_ADCEND_IRQ);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
+ RN5T618_ADCCNT3_AVG,
+ mask == IIO_CHAN_INFO_AVERAGE_RAW ?
+ RN5T618_ADCCNT3_AVG : 0);
+ if (ret < 0)
+ return ret;
+
+ init_completion(&adc->conv_completion);
+ /* single conversion */
+ ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
+ RN5T618_ADCCNT3_GODONE,
+ RN5T618_ADCCNT3_GODONE);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&adc->conv_completion,
+ RN5T618_ADC_CONVERSION_TIMEOUT);
+ if (ret == 0) {
+ dev_warn(adc->dev, "timeout waiting for adc result\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = rn5t618_read_adc_reg(adc->rn5t618,
+ RN5T618_ILIMDATAH + 2 * chan->channel,
+ &raw);
+ if (ret < 0)
+ return ret;
+
+ *val = raw;
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info rn5t618_adc_iio_info = {
+ .read_raw = &rn5t618_adc_read,
+};
+
+#define RN5T618_ADC_CHANNEL(_channel, _type, _name) { \
+ .type = _type, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = _name, \
+ .indexed = 1. \
+}
+
+static const struct iio_chan_spec rn5t618_adc_iio_channels[] = {
+ RN5T618_ADC_CHANNEL(LIMMON, IIO_CURRENT, "LIMMON"),
+ RN5T618_ADC_CHANNEL(VBAT, IIO_VOLTAGE, "VBAT"),
+ RN5T618_ADC_CHANNEL(VADP, IIO_VOLTAGE, "VADP"),
+ RN5T618_ADC_CHANNEL(VUSB, IIO_VOLTAGE, "VUSB"),
+ RN5T618_ADC_CHANNEL(VSYS, IIO_VOLTAGE, "VSYS"),
+ RN5T618_ADC_CHANNEL(VTHM, IIO_VOLTAGE, "VTHM"),
+ RN5T618_ADC_CHANNEL(AIN1, IIO_VOLTAGE, "AIN1"),
+ RN5T618_ADC_CHANNEL(AIN0, IIO_VOLTAGE, "AIN0")
+};
+
+static int rn5t618_adc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct iio_dev *iio_dev;
+ struct rn5t618_adc_data *adc;
+ struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
+
+ iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!iio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(iio_dev);
+ adc->dev = &pdev->dev;
+ adc->rn5t618 = rn5t618;
+
+ if (rn5t618->irq_data)
+ adc->irq = regmap_irq_get_virq(rn5t618->irq_data,
+ RN5T618_IRQ_ADC);
+
+ if (adc->irq <= 0) {
+ dev_err(&pdev->dev, "get virq failed\n");
+ return -EINVAL;
+ }
+
+ init_completion(&adc->conv_completion);
+
+ iio_dev->name = dev_name(&pdev->dev);
+ iio_dev->dev.parent = &pdev->dev;
+ iio_dev->info = &rn5t618_adc_iio_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = rn5t618_adc_iio_channels;
+ iio_dev->num_channels = ARRAY_SIZE(rn5t618_adc_iio_channels);
+
+ /* stop any auto-conversion */
+ ret = regmap_write(rn5t618->regmap, RN5T618_ADCCNT3, 0);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, iio_dev);
+
+ ret = devm_request_threaded_irq(adc->dev, adc->irq, NULL,
+ rn5t618_adc_irq,
+ IRQF_ONESHOT, dev_name(adc->dev),
+ adc);
+ if (ret < 0) {
+ dev_err(adc->dev, "request irq %d failed: %d\n", adc->irq, ret);
+ return ret;
+ }
+
+ return devm_iio_device_register(adc->dev, iio_dev);
+}
+
+static struct platform_driver rn5t618_adc_driver = {
+ .driver = {
+ .name = "rn5t618-adc",
+ },
+ .probe = rn5t618_adc_probe,
+};
+
+module_platform_driver(rn5t618_adc_driver);
+MODULE_ALIAS("platform:rn5t618-adc");
+MODULE_DESCRIPTION("RICOH RN5T618 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 80c3f963527b..dfc3a306c667 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1418,8 +1418,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
static void stm32_adc_dma_buffer_done(void *data)
{
struct iio_dev *indio_dev = data;
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ int residue = stm32_adc_dma_residue(adc);
+
+ /*
+ * In DMA mode the trigger services of IIO are not used
+ * (e.g. no call to iio_trigger_poll).
+ * Calling irq handler associated to the hardware trigger is not
+ * relevant as the conversions have already been done. Data
+ * transfers are performed directly in DMA callback instead.
+ * This implementation avoids to call trigger irq handler that
+ * may sleep, in an atomic context (DMA irq handler context).
+ */
+ dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
+
+ while (residue >= indio_dev->scan_bytes) {
+ u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
- iio_trigger_poll_chained(indio_dev->trig);
+ iio_push_to_buffers(indio_dev, buffer);
+
+ residue -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->rx_buf_sz)
+ adc->bufi = 0;
+ }
}
static int stm32_adc_dma_start(struct iio_dev *indio_dev)
@@ -1790,18 +1812,18 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
return 0;
}
-static int stm32_adc_dma_request(struct iio_dev *indio_dev)
+static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev)
{
struct stm32_adc *adc = iio_priv(indio_dev);
struct dma_slave_config config;
int ret;
- adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ adc->dma_chan = dma_request_chan(dev, "rx");
if (IS_ERR(adc->dma_chan)) {
ret = PTR_ERR(adc->dma_chan);
if (ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
- dev_err(&indio_dev->dev,
+ dev_err(dev,
"DMA channel request failed with %d\n",
ret);
return ret;
@@ -1845,6 +1867,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
+ irqreturn_t (*handler)(int irq, void *p) = NULL;
struct stm32_adc *adc;
int ret;
@@ -1907,13 +1930,15 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = stm32_adc_dma_request(indio_dev);
+ ret = stm32_adc_dma_request(dev, indio_dev);
if (ret < 0)
return ret;
+ if (!adc->dma_chan)
+ handler = &stm32_adc_trigger_handler;
+
ret = iio_triggered_buffer_setup(indio_dev,
- &iio_pollfunc_store_time,
- &stm32_adc_trigger_handler,
+ &iio_pollfunc_store_time, handler,
&stm32_adc_buffer_setup_ops);
if (ret) {
dev_err(&pdev->dev, "buffer setup failed\n");
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 76a60d93fe23..506bf519f64c 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -62,7 +62,7 @@ enum sd_converter_type {
struct stm32_dfsdm_dev_data {
int type;
- int (*init)(struct iio_dev *indio_dev);
+ int (*init)(struct device *dev, struct iio_dev *indio_dev);
unsigned int num_channels;
const struct regmap_config *regmap_cfg;
};
@@ -1365,11 +1365,12 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
}
}
-static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+static int stm32_dfsdm_dma_request(struct device *dev,
+ struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ adc->dma_chan = dma_request_chan(dev, "rx");
if (IS_ERR(adc->dma_chan)) {
int ret = PTR_ERR(adc->dma_chan);
@@ -1425,7 +1426,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
&adc->dfsdm->ch_list[ch->channel]);
}
-static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1452,10 +1453,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
indio_dev->num_channels = 1;
indio_dev->channels = ch;
- return stm32_dfsdm_dma_request(indio_dev);
+ return stm32_dfsdm_dma_request(dev, indio_dev);
}
-static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1499,17 +1500,17 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
init_completion(&adc->completion);
/* Optionally request DMA */
- ret = stm32_dfsdm_dma_request(indio_dev);
+ ret = stm32_dfsdm_dma_request(dev, indio_dev);
if (ret) {
if (ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
- dev_err(&indio_dev->dev,
+ dev_err(dev,
"DMA channel request failed with %d\n",
ret);
return ret;
}
- dev_dbg(&indio_dev->dev, "No DMA support\n");
+ dev_dbg(dev, "No DMA support\n");
return 0;
}
@@ -1622,7 +1623,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
adc->dev_data = dev_data;
- ret = dev_data->init(iio);
+ ret = dev_data->init(dev, iio);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
index 9a460807d46d..8a8792010c20 100644
--- a/drivers/iio/adc/ti-ads8344.c
+++ b/drivers/iio/adc/ti-ads8344.c
@@ -29,19 +29,20 @@ struct ads8344 {
struct mutex lock;
u8 tx_buf ____cacheline_aligned;
- u16 rx_buf;
+ u8 rx_buf[3];
};
-#define ADS8344_VOLTAGE_CHANNEL(chan, si) \
+#define ADS8344_VOLTAGE_CHANNEL(chan, addr) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = addr, \
}
-#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
+#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, addr) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -50,6 +51,7 @@ struct ads8344 {
.differential = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = addr, \
}
static const struct iio_chan_spec ads8344_channels[] = {
@@ -89,11 +91,11 @@ static int ads8344_adc_conversion(struct ads8344 *adc, int channel,
udelay(9);
- ret = spi_read(spi, &adc->rx_buf, 2);
+ ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf));
if (ret)
return ret;
- return adc->rx_buf;
+ return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7;
}
static int ads8344_read_raw(struct iio_dev *iio,
@@ -105,7 +107,7 @@ static int ads8344_read_raw(struct iio_dev *iio,
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&adc->lock);
- *value = ads8344_adc_conversion(adc, channel->scan_index,
+ *value = ads8344_adc_conversion(adc, channel->address,
channel->differential);
mutex_unlock(&adc->lock);
if (*value < 0)
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 4965246808bd..77620359b54c 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -189,7 +189,8 @@ static int tlc4541_probe(struct spi_device *spi)
/* Setup default message */
st->scan_single_xfer[0].rx_buf = &st->rx_buf[0];
st->scan_single_xfer[0].len = 3;
- st->scan_single_xfer[1].delay_usecs = 3;
+ st->scan_single_xfer[1].delay.value = 3;
+ st->scan_single_xfer[1].delay.unit = SPI_DELAY_UNIT_NSECS;
st->scan_single_xfer[2].rx_buf = &st->rx_buf[0];
st->scan_single_xfer[2].len = 2;
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index ec227b358cd6..6fd06e4eff73 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -102,6 +102,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_FLAGS_BUFFERED BIT(0)
+/*
+ * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
+ * not have a hardware FIFO. Which means an interrupt is generated for each
+ * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely
+ * overloaded by the interrupts that it soft-lockups. For this reason the driver
+ * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy,
+ * but still responsive.
+ */
+#define XADC_MAX_SAMPLERATE 150000
+
static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
uint32_t val)
{
@@ -674,7 +684,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
spin_lock_irqsave(&xadc->lock, flags);
xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
- xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
if (state)
val |= XADC_AXI_INT_EOS;
else
@@ -722,13 +732,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
{
uint16_t val;
+ /* Powerdown the ADC-B when it is not needed. */
switch (seq_mode) {
case XADC_CONF1_SEQ_SIMULTANEOUS:
case XADC_CONF1_SEQ_INDEPENDENT:
- val = XADC_CONF2_PD_ADC_B;
+ val = 0;
break;
default:
- val = 0;
+ val = XADC_CONF2_PD_ADC_B;
break;
}
@@ -797,6 +808,16 @@ static int xadc_preenable(struct iio_dev *indio_dev)
if (ret)
goto err;
+ /*
+ * In simultaneous mode the upper and lower aux channels are samples at
+ * the same time. In this mode the upper 8 bits in the sequencer
+ * register are don't care and the lower 8 bits control two channels
+ * each. As such we must set the bit if either the channel in the lower
+ * group or the upper group is enabled.
+ */
+ if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
+ scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
+
ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
if (ret)
goto err;
@@ -823,11 +844,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = {
.postdisable = &xadc_postdisable,
};
+static int xadc_read_samplerate(struct xadc *xadc)
+{
+ unsigned int div;
+ uint16_t val16;
+ int ret;
+
+ ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
+ if (ret)
+ return ret;
+
+ div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
+ if (div < 2)
+ div = 2;
+
+ return xadc_get_dclk_rate(xadc) / div / 26;
+}
+
static int xadc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct xadc *xadc = iio_priv(indio_dev);
- unsigned int div;
uint16_t val16;
int ret;
@@ -880,41 +917,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = -((273150 << 12) / 503975);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
- if (ret)
+ ret = xadc_read_samplerate(xadc);
+ if (ret < 0)
return ret;
- div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
- if (div < 2)
- div = 2;
-
- *val = xadc_get_dclk_rate(xadc) / div / 26;
-
+ *val = ret;
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
-static int xadc_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2, long info)
+static int xadc_write_samplerate(struct xadc *xadc, int val)
{
- struct xadc *xadc = iio_priv(indio_dev);
unsigned long clk_rate = xadc_get_dclk_rate(xadc);
unsigned int div;
if (!clk_rate)
return -EINVAL;
- if (info != IIO_CHAN_INFO_SAMP_FREQ)
- return -EINVAL;
-
if (val <= 0)
return -EINVAL;
/* Max. 150 kSPS */
- if (val > 150000)
- val = 150000;
+ if (val > XADC_MAX_SAMPLERATE)
+ val = XADC_MAX_SAMPLERATE;
val *= 26;
@@ -927,7 +954,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
* limit.
*/
div = clk_rate / val;
- if (clk_rate / div / 26 > 150000)
+ if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE)
div++;
if (div < 2)
div = 2;
@@ -938,6 +965,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
div << XADC_CONF2_DIV_OFFSET);
}
+static int xadc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ if (info != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ return xadc_write_samplerate(xadc, val);
+}
+
static const struct iio_event_spec xadc_temp_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -1223,6 +1261,21 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
goto err_free_samplerate_trigger;
+ /*
+ * Make sure not to exceed the maximum samplerate since otherwise the
+ * resulting interrupt storm will soft-lock the system.
+ */
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ ret = xadc_read_samplerate(xadc);
+ if (ret < 0)
+ goto err_free_samplerate_trigger;
+ if (ret > XADC_MAX_SAMPLERATE) {
+ ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
+ if (ret < 0)
+ goto err_free_samplerate_trigger;
+ }
+ }
+
ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
if (ret)
diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
index da7f126d197b..9b02c9a2bc8a 100644
--- a/drivers/iio/amplifiers/Kconfig
+++ b/drivers/iio/amplifiers/Kconfig
@@ -22,4 +22,14 @@ config AD8366
To compile this driver as a module, choose M here: the
module will be called ad8366.
+config HMC425
+ tristate "Analog Devices HMC425A and similar GPIO Gain Amplifiers"
+ depends on GPIOLIB
+ help
+ Say yes here to build support for Analog Devices HMC425A and similar
+ gain amplifiers or step attenuators.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hmc425a.
+
endmenu
diff --git a/drivers/iio/amplifiers/Makefile b/drivers/iio/amplifiers/Makefile
index 9abef2ebe9bc..cb551d82f56b 100644
--- a/drivers/iio/amplifiers/Makefile
+++ b/drivers/iio/amplifiers/Makefile
@@ -5,3 +5,4 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD8366) += ad8366.o
+obj-$(CONFIG_HMC425) += hmc425a.o
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 0176d3d8cc9c..62167b87caea 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -5,6 +5,7 @@
* AD8366 Dual-Digital Variable Gain Amplifier (VGA)
* ADA4961 BiCMOS RF Digital Gain Amplifier (DGA)
* ADL5240 Digitally controlled variable gain amplifier (VGA)
+ * HMC1119 0.25 dB LSB, 7-Bit, Silicon Digital Attenuator
*
* Copyright 2012-2019 Analog Devices Inc.
*/
@@ -27,6 +28,7 @@ enum ad8366_type {
ID_AD8366,
ID_ADA4961,
ID_ADL5240,
+ ID_HMC1119,
};
struct ad8366_info {
@@ -62,6 +64,10 @@ static struct ad8366_info ad8366_infos[] = {
.gain_min = -11500,
.gain_max = 20000,
},
+ [ID_HMC1119] = {
+ .gain_min = -31750,
+ .gain_max = 0,
+ },
};
static int ad8366_write(struct iio_dev *indio_dev,
@@ -84,6 +90,9 @@ static int ad8366_write(struct iio_dev *indio_dev,
case ID_ADL5240:
st->data[0] = (ch_a & 0x3F);
break;
+ case ID_HMC1119:
+ st->data[0] = ch_a;
+ break;
}
ret = spi_write(st->spi, st->data, indio_dev->num_channels);
@@ -118,6 +127,9 @@ static int ad8366_read_raw(struct iio_dev *indio_dev,
case ID_ADL5240:
gain = 20000 - 31500 + code * 500;
break;
+ case ID_HMC1119:
+ gain = -1 * code * 250;
+ break;
}
/* Values in dB */
@@ -164,6 +176,9 @@ static int ad8366_write_raw(struct iio_dev *indio_dev,
case ID_ADL5240:
code = ((gain - 500 - 20000) / 500) & 0x3F;
break;
+ case ID_HMC1119:
+ code = (abs(gain) / 250) & 0x7F;
+ break;
}
mutex_lock(&st->lock);
@@ -180,9 +195,22 @@ static int ad8366_write_raw(struct iio_dev *indio_dev,
return ret;
}
+static int ad8366_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ return IIO_VAL_INT_PLUS_MICRO_DB;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_info ad8366_info = {
.read_raw = &ad8366_read_raw,
.write_raw = &ad8366_write_raw,
+ .write_raw_get_fmt = &ad8366_write_raw_get_fmt,
};
#define AD8366_CHAN(_channel) { \
@@ -233,6 +261,7 @@ static int ad8366_probe(struct spi_device *spi)
break;
case ID_ADA4961:
case ID_ADL5240:
+ case ID_HMC1119:
st->reset_gpio = devm_gpiod_get(&spi->dev, "reset",
GPIOD_OUT_HIGH);
indio_dev->channels = ada4961_channels;
@@ -285,6 +314,7 @@ static const struct spi_device_id ad8366_id[] = {
{"ad8366", ID_AD8366},
{"ada4961", ID_ADA4961},
{"adl5240", ID_ADL5240},
+ {"hmc1119", ID_HMC1119},
{}
};
MODULE_DEVICE_TABLE(spi, ad8366_id);
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
new file mode 100644
index 000000000000..d9e6e9678ffc
--- /dev/null
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HMC425A and similar Gain Amplifiers
+ *
+ * Copyright 2020 Analog Devices Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sysfs.h>
+
+enum hmc425a_type {
+ ID_HMC425A,
+};
+
+struct hmc425a_chip_info {
+ const char *name;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int num_gpios;
+ int gain_min;
+ int gain_max;
+ int default_gain;
+};
+
+struct hmc425a_state {
+ struct regulator *reg;
+ struct mutex lock; /* protect sensor state */
+ struct hmc425a_chip_info *chip_info;
+ struct gpio_descs *gpios;
+ enum hmc425a_type type;
+ u32 gain;
+};
+
+static int hmc425a_write(struct iio_dev *indio_dev, u32 value)
+{
+ struct hmc425a_state *st = iio_priv(indio_dev);
+ DECLARE_BITMAP(values, BITS_PER_TYPE(value));
+
+ values[0] = value;
+
+ gpiod_set_array_value_cansleep(st->gpios->ndescs, st->gpios->desc,
+ NULL, values);
+ return 0;
+}
+
+static int hmc425a_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long m)
+{
+ struct hmc425a_state *st = iio_priv(indio_dev);
+ int code, gain = 0;
+ int ret;
+
+ mutex_lock(&st->lock);
+ switch (m) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ code = st->gain;
+
+ switch (st->type) {
+ case ID_HMC425A:
+ gain = ~code * -500;
+ break;
+ }
+
+ *val = gain / 1000;
+ *val2 = (gain % 1000) * 1000;
+
+ ret = IIO_VAL_INT_PLUS_MICRO_DB;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&st->lock);
+
+ return ret;
+};
+
+static int hmc425a_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct hmc425a_state *st = iio_priv(indio_dev);
+ struct hmc425a_chip_info *inf = st->chip_info;
+ int code = 0, gain;
+ int ret;
+
+ if (val < 0)
+ gain = (val * 1000) - (val2 / 1000);
+ else
+ gain = (val * 1000) + (val2 / 1000);
+
+ if (gain > inf->gain_max || gain < inf->gain_min)
+ return -EINVAL;
+
+ switch (st->type) {
+ case ID_HMC425A:
+ code = ~((abs(gain) / 500) & 0x3F);
+ break;
+ }
+
+ mutex_lock(&st->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ st->gain = code;
+
+ ret = hmc425a_write(indio_dev, st->gain);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int hmc425a_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ return IIO_VAL_INT_PLUS_MICRO_DB;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info hmc425a_info = {
+ .read_raw = &hmc425a_read_raw,
+ .write_raw = &hmc425a_write_raw,
+ .write_raw_get_fmt = &hmc425a_write_raw_get_fmt,
+};
+
+#define HMC425A_CHAN(_channel) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+}
+
+static const struct iio_chan_spec hmc425a_channels[] = {
+ HMC425A_CHAN(0),
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id hmc425a_of_match[] = {
+ { .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hmc425a_of_match);
+
+static void hmc425a_reg_disable(void *data)
+{
+ struct hmc425a_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
+ [ID_HMC425A] = {
+ .name = "hmc425a",
+ .channels = hmc425a_channels,
+ .num_channels = ARRAY_SIZE(hmc425a_channels),
+ .num_gpios = 6,
+ .gain_min = -31500,
+ .gain_max = 0,
+ .default_gain = -0x40, /* set default gain -31.5db*/
+ },
+};
+
+static int hmc425a_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct hmc425a_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->type = (enum hmc425a_type)of_device_get_match_data(&pdev->dev);
+
+ st->chip_info = &hmc425a_chip_info_tbl[st->type];
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->name = st->chip_info->name;
+ st->gain = st->chip_info->default_gain;
+
+ st->gpios = devm_gpiod_get_array(&pdev->dev, "ctrl", GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpios)) {
+ ret = PTR_ERR(st->gpios);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get gpios\n");
+ return ret;
+ }
+
+ if (st->gpios->ndescs != st->chip_info->num_gpios) {
+ dev_err(&pdev->dev, "%d GPIOs needed to operate\n",
+ st->chip_info->num_gpios);
+ return -ENODEV;
+ }
+
+ st->reg = devm_regulator_get(&pdev->dev, "vcc-supply");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, hmc425a_reg_disable, st);
+ if (ret)
+ return ret;
+
+ mutex_init(&st->lock);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &hmc425a_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver hmc425a_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = hmc425a_of_match,
+ },
+ .probe = hmc425a_probe,
+};
+module_platform_driver(hmc425a_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices HMC425A and similar GPIO control Gain Amplifiers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 2f0a6fed2589..7b199ce16ecf 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -48,6 +48,11 @@
#define ATLAS_REG_EC_CALIB_STATUS_LOW BIT(2)
#define ATLAS_REG_EC_CALIB_STATUS_HIGH BIT(3)
+#define ATLAS_REG_DO_CALIB_STATUS 0x09
+#define ATLAS_REG_DO_CALIB_STATUS_MASK 0x03
+#define ATLAS_REG_DO_CALIB_STATUS_PRESSURE BIT(0)
+#define ATLAS_REG_DO_CALIB_STATUS_DO BIT(1)
+
#define ATLAS_REG_PH_TEMP_DATA 0x0e
#define ATLAS_REG_PH_DATA 0x16
@@ -60,14 +65,19 @@
#define ATLAS_REG_ORP_CALIB_STATUS 0x0d
#define ATLAS_REG_ORP_DATA 0x0e
+#define ATLAS_REG_DO_TEMP_DATA 0x12
+#define ATLAS_REG_DO_DATA 0x22
+
#define ATLAS_PH_INT_TIME_IN_MS 450
#define ATLAS_EC_INT_TIME_IN_MS 650
#define ATLAS_ORP_INT_TIME_IN_MS 450
+#define ATLAS_DO_INT_TIME_IN_MS 450
enum {
ATLAS_PH_SM,
ATLAS_EC_SM,
ATLAS_ORP_SM,
+ ATLAS_DO_SM,
};
struct atlas_data {
@@ -76,6 +86,7 @@ struct atlas_data {
struct atlas_device *chip;
struct regmap *regmap;
struct irq_work work;
+ unsigned int interrupt_enabled;
__be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
};
@@ -121,7 +132,7 @@ static const struct iio_chan_spec atlas_ph_channels[] = {
},
};
-#define ATLAS_EC_CHANNEL(_idx, _addr) \
+#define ATLAS_CONCENTRATION_CHANNEL(_idx, _addr) \
{\
.type = IIO_CONCENTRATION, \
.indexed = 1, \
@@ -152,8 +163,8 @@ static const struct iio_chan_spec atlas_ec_channels[] = {
.endianness = IIO_BE,
},
},
- ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA),
- ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA),
+ ATLAS_CONCENTRATION_CHANNEL(0, ATLAS_REG_TDS_DATA),
+ ATLAS_CONCENTRATION_CHANNEL(1, ATLAS_REG_PSS_DATA),
IIO_CHAN_SOFT_TIMESTAMP(3),
{
.type = IIO_TEMP,
@@ -182,6 +193,31 @@ static const struct iio_chan_spec atlas_orp_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(1),
};
+static const struct iio_chan_spec atlas_do_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .address = ATLAS_REG_DO_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_DO_TEMP_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .output = 1,
+ .scan_index = -1
+ },
+};
+
static int atlas_check_ph_calibration(struct atlas_data *data)
{
struct device *dev = &data->client->dev;
@@ -262,7 +298,31 @@ static int atlas_check_orp_calibration(struct atlas_data *data)
dev_warn(dev, "device has not been calibrated\n");
return 0;
-};
+}
+
+static int atlas_check_do_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ATLAS_REG_DO_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & ATLAS_REG_DO_CALIB_STATUS_MASK)) {
+ dev_warn(dev, "device has not been calibrated\n");
+ return 0;
+ }
+
+ if (!(val & ATLAS_REG_DO_CALIB_STATUS_PRESSURE))
+ dev_warn(dev, "device missing atmospheric pressure calibration\n");
+
+ if (!(val & ATLAS_REG_DO_CALIB_STATUS_DO))
+ dev_warn(dev, "device missing dissolved oxygen calibration\n");
+
+ return 0;
+}
struct atlas_device {
const struct iio_chan_spec *channels;
@@ -295,6 +355,13 @@ static struct atlas_device atlas_devices[] = {
.calibration = &atlas_check_orp_calibration,
.delay = ATLAS_ORP_INT_TIME_IN_MS,
},
+ [ATLAS_DO_SM] = {
+ .channels = atlas_do_channels,
+ .num_channels = 3,
+ .data_reg = ATLAS_REG_DO_DATA,
+ .calibration = &atlas_check_do_calibration,
+ .delay = ATLAS_DO_INT_TIME_IN_MS,
+ },
};
static int atlas_set_powermode(struct atlas_data *data, int on)
@@ -304,6 +371,9 @@ static int atlas_set_powermode(struct atlas_data *data, int on)
static int atlas_set_interrupt(struct atlas_data *data, bool state)
{
+ if (!data->interrupt_enabled)
+ return 0;
+
return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL,
ATLAS_REG_INT_CONTROL_EN,
state ? ATLAS_REG_INT_CONTROL_EN : 0);
@@ -507,6 +577,7 @@ static const struct i2c_device_id atlas_id[] = {
{ "atlas-ph-sm", ATLAS_PH_SM},
{ "atlas-ec-sm", ATLAS_EC_SM},
{ "atlas-orp-sm", ATLAS_ORP_SM},
+ { "atlas-do-sm", ATLAS_DO_SM},
{}
};
MODULE_DEVICE_TABLE(i2c, atlas_id);
@@ -515,6 +586,7 @@ static const struct of_device_id atlas_dt_ids[] = {
{ .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
{ .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
{ .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
+ { .compatible = "atlas,do-sm", .data = (void *)ATLAS_DO_SM, },
{ }
};
MODULE_DEVICE_TABLE(of, atlas_dt_ids);
@@ -572,11 +644,6 @@ static int atlas_probe(struct i2c_client *client,
if (ret)
return ret;
- if (client->irq <= 0) {
- dev_err(&client->dev, "no valid irq defined\n");
- return -EINVAL;
- }
-
ret = chip->calibration(data);
if (ret)
return ret;
@@ -596,16 +663,20 @@ static int atlas_probe(struct i2c_client *client,
init_irq_work(&data->work, atlas_work_handler);
- /* interrupt pin toggles on new conversion */
- ret = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, atlas_interrupt_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "atlas_irq",
- indio_dev);
- if (ret) {
- dev_err(&client->dev, "request irq (%d) failed\n", client->irq);
- goto unregister_buffer;
+ if (client->irq > 0) {
+ /* interrupt pin toggles on new conversion */
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, atlas_interrupt_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "atlas_irq",
+ indio_dev);
+
+ if (ret)
+ dev_warn(&client->dev,
+ "request irq (%d) failed\n", client->irq);
+ else
+ data->interrupt_enabled = 1;
}
ret = atlas_set_powermode(data, 1);
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
index 1dcc2a16ab2d..af801e203623 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
@@ -97,7 +97,7 @@ static int cros_ec_lid_angle_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, false);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL, NULL);
if (ret)
return ret;
@@ -127,7 +127,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_lid_angle_ids);
static struct platform_driver cros_ec_lid_angle_platform_driver = {
.driver = {
.name = DRV_NAME,
- .pm = &cros_ec_sensors_pm_ops,
},
.probe = cros_ec_lid_angle_probe,
.id_table = cros_ec_lid_angle_ids,
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 576e45faafaf..a66941fdb385 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -230,10 +230,14 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture,
+ cros_ec_sensors_push_data);
if (ret)
return ret;
+ iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
+
indio_dev->info = &ec_sensors_info;
state = iio_priv(indio_dev);
for (channel = state->channels, i = CROS_EC_SENSOR_X;
@@ -245,7 +249,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
BIT(IIO_CHAN_INFO_CALIBSCALE);
channel->info_mask_shared_by_all =
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_FREQUENCY) |
BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->info_mask_shared_by_all_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
@@ -292,11 +295,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
else
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
@@ -317,7 +315,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
static struct platform_driver cros_ec_sensors_platform_driver = {
.driver = {
.name = "cros-ec-sensors",
- .pm = &cros_ec_sensors_pm_ops,
},
.probe = cros_ec_sensors_probe,
.id_table = cros_ec_sensors_ids,
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index d3a3626c7cd8..c831915ca7e5 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -11,7 +11,9 @@
#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,6 +22,12 @@
#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
+/*
+ * Hard coded to the first device to support sensor fifo. The EC has a 2048
+ * byte fifo and will trigger an interrupt when fifo is 2/3 full.
+ */
+#define CROS_EC_FIFO_SIZE (2048 * 2 / 3)
+
static char *cros_ec_loc[] = {
[MOTIONSENSE_LOC_BASE] = "base",
[MOTIONSENSE_LOC_LID] = "lid",
@@ -53,8 +61,15 @@ static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev,
static void get_default_min_max_freq(enum motionsensor_type type,
u32 *min_freq,
- u32 *max_freq)
+ u32 *max_freq,
+ u32 *max_fifo_events)
{
+ /*
+ * We don't know fifo size, set to size previously used by older
+ * hardware.
+ */
+ *max_fifo_events = CROS_EC_FIFO_SIZE;
+
switch (type) {
case MOTIONSENSE_TYPE_ACCEL:
case MOTIONSENSE_TYPE_GYRO:
@@ -82,9 +97,155 @@ static void get_default_min_max_freq(enum motionsensor_type type,
}
}
+static int cros_ec_sensor_set_ec_rate(struct cros_ec_sensors_core_state *st,
+ int rate)
+{
+ int ret;
+
+ if (rate > U16_MAX)
+ rate = U16_MAX;
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = rate;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ mutex_unlock(&st->cmd_lock);
+ return ret;
+}
+
+static ssize_t cros_ec_sensor_set_report_latency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int integer, fract, ret;
+ int latency;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+ if (ret)
+ return ret;
+
+ /* EC rate is in ms. */
+ latency = integer * 1000 + fract / 1000;
+ ret = cros_ec_sensor_set_ec_rate(st, latency);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t cros_ec_sensor_get_report_latency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int latency, ret;
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = EC_MOTION_SENSE_NO_VALUE;
+
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ latency = st->resp->ec_rate.ret;
+ mutex_unlock(&st->cmd_lock);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d.%06u\n",
+ latency / 1000,
+ (latency % 1000) * 1000);
+}
+
+static IIO_DEVICE_ATTR(hwfifo_timeout, 0644,
+ cros_ec_sensor_get_report_latency,
+ cros_ec_sensor_set_report_latency, 0);
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->fifo_max_event_count);
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
+
+const struct attribute *cros_ec_sensor_fifo_attributes[] = {
+ &iio_dev_attr_hwfifo_timeout.dev_attr.attr,
+ &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cros_ec_sensor_fifo_attributes);
+
+int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ s16 *data,
+ s64 timestamp)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ s16 *out;
+ s64 delta;
+ unsigned int i;
+
+ /*
+ * Ignore samples if the buffer is not set: it is needed if the ODR is
+ * set but the buffer is not enabled yet.
+ */
+ if (!iio_buffer_enabled(indio_dev))
+ return 0;
+
+ out = (s16 *)st->samples;
+ for_each_set_bit(i,
+ indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ *out = data[i];
+ out++;
+ }
+
+ if (iio_device_get_clock(indio_dev) != CLOCK_BOOTTIME)
+ delta = iio_get_time_ns(indio_dev) - cros_ec_get_time_ns();
+ else
+ delta = 0;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+ timestamp + delta);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data);
+
+static void cros_ec_sensors_core_clean(void *arg)
+{
+ struct platform_device *pdev = (struct platform_device *)arg;
+ struct cros_ec_sensorhub *sensor_hub =
+ dev_get_drvdata(pdev->dev.parent);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ u8 sensor_num = st->param.info.sensor_num;
+
+ cros_ec_sensorhub_unregister_push_data(sensor_hub, sensor_num);
+}
+
+/**
+ * cros_ec_sensors_core_init() - basic initialization of the core structure
+ * @pdev: platform device created for the sensors
+ * @indio_dev: iio device structure of the device
+ * @physical_device: true if the device refers to a physical device
+ * @trigger_capture: function pointer to call buffer is triggered,
+ * for backward compatibility.
+ * @push_data: function to call when cros_ec_sensorhub receives
+ * a sample for that sensor.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev,
- bool physical_device)
+ bool physical_device,
+ cros_ec_sensors_capture_t trigger_capture,
+ cros_ec_sensorhub_push_data_cb_t push_data)
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
@@ -92,6 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
struct cros_ec_dev *ec = sensor_hub->ec;
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
u32 ver_mask;
+ int frequencies[ARRAY_SIZE(state->frequencies) / 2] = { 0 };
int ret, i;
platform_set_drvdata(pdev, indio_dev);
@@ -123,8 +285,6 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
indio_dev->name = pdev->name;
if (physical_device) {
- indio_dev->modes = INDIO_DIRECT_MODE;
-
state->param.cmd = MOTIONSENSE_CMD_INFO;
state->param.info.sensor_num = sensor_platform->sensor_num;
ret = cros_ec_motion_send_host_cmd(state, 0);
@@ -142,16 +302,63 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
state->calib[i].scale = MOTION_SENSE_DEFAULT_SCALE;
/* 0 is a correct value used to stop the device */
- state->frequencies[0] = 0;
if (state->msg->version < 3) {
get_default_min_max_freq(state->resp->info.type,
- &state->frequencies[1],
- &state->frequencies[2]);
+ &frequencies[1],
+ &frequencies[2],
+ &state->fifo_max_event_count);
} else {
- state->frequencies[1] =
- state->resp->info_3.min_frequency;
- state->frequencies[2] =
- state->resp->info_3.max_frequency;
+ frequencies[1] = state->resp->info_3.min_frequency;
+ frequencies[2] = state->resp->info_3.max_frequency;
+ state->fifo_max_event_count =
+ state->resp->info_3.fifo_max_event_count;
+ }
+ for (i = 0; i < ARRAY_SIZE(frequencies); i++) {
+ state->frequencies[2 * i] = frequencies[i] / 1000;
+ state->frequencies[2 * i + 1] =
+ (frequencies[i] % 1000) * 1000;
+ }
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ /*
+ * Create a software buffer, feed by the EC FIFO.
+ * We can not use trigger here, as events are generated
+ * as soon as sample_frequency is set.
+ */
+ struct iio_buffer *buffer;
+
+ buffer = devm_iio_kfifo_allocate(dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+ indio_dev->modes = INDIO_BUFFER_SOFTWARE;
+
+ ret = cros_ec_sensorhub_register_push_data(
+ sensor_hub, sensor_platform->sensor_num,
+ indio_dev, push_data);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(
+ dev, cros_ec_sensors_core_clean, pdev);
+ if (ret)
+ return ret;
+
+ /* Timestamp coming from FIFO are in ns since boot. */
+ ret = iio_device_set_clock(indio_dev, CLOCK_BOOTTIME);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * The only way to get samples in buffer is to set a
+ * software tigger (systrig, hrtimer).
+ */
+ ret = devm_iio_triggered_buffer_setup(
+ dev, indio_dev, NULL, trigger_capture,
+ NULL);
+ if (ret)
+ return ret;
}
}
@@ -159,6 +366,16 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
+/**
+ * cros_ec_motion_send_host_cmd() - send motion sense host command
+ * @state: pointer to state information for device
+ * @opt_length: optional length to reduce the response size, useful on the data
+ * path. Otherwise, the maximal allowed response size is used
+ *
+ * When called, the sub-command is assumed to be set in param->cmd.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
u16 opt_length)
{
@@ -421,6 +638,14 @@ int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
+/**
+ * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data)
{
@@ -445,6 +670,18 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
+/**
+ * cros_ec_sensors_capture() - the trigger handler function
+ * @irq: the interrupt number.
+ * @p: a pointer to the poll function.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
irqreturn_t cros_ec_sensors_capture(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -480,26 +717,24 @@ done:
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
+/**
+ * cros_ec_sensors_core_read() - function to request a value from the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: will contain one element making up the returned value
+ * @val2: will contain another element making up the returned value
+ * @mask: specifies which values to be requested
+ *
+ * Return: the type of value returned by the device
+ */
int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- int ret;
+ int ret, frequency;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data =
- EC_MOTION_SENSE_NO_VALUE;
-
- ret = cros_ec_motion_send_host_cmd(st, 0);
- if (ret)
- break;
-
- *val = st->resp->ec_rate.ret;
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_FREQUENCY:
st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
st->param.sensor_odr.data =
EC_MOTION_SENSE_NO_VALUE;
@@ -508,8 +743,10 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
if (ret)
break;
- *val = st->resp->sensor_odr.ret;
- ret = IIO_VAL_INT;
+ frequency = st->resp->sensor_odr.ret;
+ *val = frequency / 1000;
+ *val2 = (frequency % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret = -EINVAL;
@@ -520,6 +757,17 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
+/**
+ * cros_ec_sensors_core_read_avail() - get available values
+ * @indio_dev: pointer to state information for device
+ * @chan: channel specification structure table
+ * @vals: list of available values
+ * @type: type of data returned
+ * @length: number of data returned in the array
+ * @mask: specifies which values to be requested
+ *
+ * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST
+ */
int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals,
@@ -533,7 +781,7 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SAMP_FREQ:
*length = ARRAY_SIZE(state->frequencies);
*vals = (const int *)&state->frequencies;
- *type = IIO_VAL_INT;
+ *type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
}
@@ -541,31 +789,33 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read_avail);
+/**
+ * cros_ec_sensors_core_write() - function to write a value to the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: first part of value to write
+ * @val2: second part of value to write
+ * @mask: specifies which values to write
+ *
+ * Return: the type of value returned by the device
+ */
int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- int ret;
+ int ret, frequency;
switch (mask) {
- case IIO_CHAN_INFO_FREQUENCY:
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ frequency = val * 1000 + val2 / 1000;
st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
- st->param.sensor_odr.data = val;
+ st->param.sensor_odr.data = frequency;
/* Always roundup, so caller gets at least what it asks for. */
st->param.sensor_odr.roundup = 1;
ret = cros_ec_motion_send_host_cmd(st, 0);
break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data = val;
-
- ret = cros_ec_motion_send_host_cmd(st, 0);
- if (ret)
- break;
- st->curr_sampl_freq = val;
- break;
default:
ret = -EINVAL;
break;
@@ -574,52 +824,5 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
-static int __maybe_unused cros_ec_sensors_prepare(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
-
- if (st->curr_sampl_freq == 0)
- return 0;
-
- /*
- * If the sensors are sampled at high frequency, we will not be able to
- * sleep. Set sampling to a long period if necessary.
- */
- if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) {
- mutex_lock(&st->cmd_lock);
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data = CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY;
- cros_ec_motion_send_host_cmd(st, 0);
- mutex_unlock(&st->cmd_lock);
- }
- return 0;
-}
-
-static void __maybe_unused cros_ec_sensors_complete(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
-
- if (st->curr_sampl_freq == 0)
- return;
-
- if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) {
- mutex_lock(&st->cmd_lock);
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data = st->curr_sampl_freq;
- cros_ec_motion_send_host_cmd(st, 0);
- mutex_unlock(&st->cmd_lock);
- }
-}
-
-const struct dev_pm_ops cros_ec_sensors_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .prepare = cros_ec_sensors_prepare,
- .complete = cros_ec_sensors_complete
-#endif
-};
-EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops);
-
MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index e051edbc43c1..13bdfbbf5f71 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -79,7 +79,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor_settings->odr.addr)
+ if (!sdata->sensor_settings->odr.mask)
return 0;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
@@ -328,6 +328,8 @@ static struct st_sensors_platform_data *st_sensors_dev_probe(struct device *dev,
return NULL;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
if (!device_property_read_u32(dev, "st,drdy-int-pin", &val) && (val <= 2))
pdata->drdy_int_pin = (u8) val;
else
@@ -371,6 +373,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
/* If OF/DT pdata exists, it will take precedence of anything else */
of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
+ if (IS_ERR(of_pdata))
+ return PTR_ERR(of_pdata);
if (of_pdata)
pdata = of_pdata;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 979070196da9..93744011b63f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -121,26 +121,6 @@ config AD5624R_SPI
Say yes here to build support for Analog Devices AD5624R, AD5644R and
AD5664R converters (DAC). This driver uses the common SPI interface.
-config LTC1660
- tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver"
- depends on SPI
- help
- Say yes here to build support for Linear Technology
- LTC1660 and LTC1665 Digital to Analog Converters.
-
- To compile this driver as a module, choose M here: the
- module will be called ltc1660.
-
-config LTC2632
- tristate "Linear Technology LTC2632-12/10/8 DAC spi driver"
- depends on SPI
- help
- Say yes here to build support for Linear Technology
- LTC2632-12, LTC2632-10, LTC2632-8 converters (DAC).
-
- To compile this driver as a module, choose M here: the
- module will be called ltc2632.
-
config AD5686
tristate
@@ -208,6 +188,16 @@ config AD5764
To compile this driver as a module, choose M here: the
module will be called ad5764.
+config AD5770R
+ tristate "Analog Devices AD5770R IDAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5770R Digital to
+ Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5770r.
+
config AD5791
tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver"
depends on SPI
@@ -229,16 +219,6 @@ config AD7303
To compile this driver as module choose M here: the module will be called
ad7303.
-config CIO_DAC
- tristate "Measurement Computing CIO-DAC IIO driver"
- depends on X86 && (ISA_BUS || PC104)
- select ISA_BUS_API
- help
- Say yes here to build support for the Measurement Computing CIO-DAC
- analog output device family (CIO-DAC16, CIO-DAC08, PC104-DAC06). The
- base port addresses for the devices may be configured via the base
- array module parameter.
-
config AD8801
tristate "Analog Devices AD8801/AD8803 DAC driver"
depends on SPI_MASTER
@@ -249,6 +229,16 @@ config AD8801
To compile this driver as a module choose M here: the module will be called
ad8801.
+config CIO_DAC
+ tristate "Measurement Computing CIO-DAC IIO driver"
+ depends on X86 && (ISA_BUS || PC104)
+ select ISA_BUS_API
+ help
+ Say yes here to build support for the Measurement Computing CIO-DAC
+ analog output device family (CIO-DAC16, CIO-DAC08, PC104-DAC06). The
+ base port addresses for the devices may be configured via the base
+ array module parameter.
+
config DPOT_DAC
tristate "DAC emulation using a DPOT"
depends on OF
@@ -278,6 +268,27 @@ config LPC18XX_DAC
To compile this driver as a module, choose M here: the module will be
called lpc18xx_dac.
+config LTC1660
+ tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Linear Technology
+ LTC1660 and LTC1665 Digital to Analog Converters.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ltc1660.
+
+config LTC2632
+ tristate "Linear Technology LTC2632-12/10/8 and LTC2636-12/10/8 DAC spi driver"
+ depends on SPI
+ help
+ Say yes here to build support for Linear Technology
+ LTC2632-12, LTC2632-10, LTC2632-8, LTC2636-12, LTC2636-10 and
+ LTC2636-8 converters (DAC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ltc2632.
+
config M62332
tristate "Mitsubishi M62332 DAC driver"
depends on I2C
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 1369fa1d2f0e..2fc481167724 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_AD5755) += ad5755.o
obj-$(CONFIG_AD5755) += ad5758.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
+obj-$(CONFIG_AD5770R) += ad5770r.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index b9175fb4c8ab..388ddd14bfd0 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -631,10 +631,9 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
}
}
- if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) {
+ if (i == ARRAY_SIZE(ad5755_dcdc_freq_table))
dev_err(dev,
- "adi,dc-dc-freq out of range selecting 410kHz");
- }
+ "adi,dc-dc-freq out of range selecting 410kHz\n");
}
pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V;
@@ -645,17 +644,16 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
break;
}
}
- if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) {
+ if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table))
dev_err(dev,
- "adi,dc-dc-maxv out of range selecting 23V");
- }
+ "adi,dc-dc-maxv out of range selecting 23V\n");
}
devnr = 0;
for_each_child_of_node(np, pp) {
if (devnr >= AD5755_NUM_CHANNELS) {
dev_err(dev,
- "There is to many channels defined in DT\n");
+ "There are too many channels defined in DT\n");
goto error_out;
}
@@ -681,11 +679,10 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
break;
}
}
- if (i == ARRAY_SIZE(ad5755_slew_rate_table)) {
+ if (i == ARRAY_SIZE(ad5755_slew_rate_table))
dev_err(dev,
- "channel %d slew rate out of range selecting 64kHz",
+ "channel %d slew rate out of range selecting 64kHz\n",
devnr);
- }
pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1;
for (i = 0; i < ARRAY_SIZE(ad5755_slew_step_table); i++) {
@@ -695,11 +692,10 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
break;
}
}
- if (i == ARRAY_SIZE(ad5755_slew_step_table)) {
+ if (i == ARRAY_SIZE(ad5755_slew_step_table))
dev_err(dev,
- "channel %d slew step size out of range selecting 1 LSB",
+ "channel %d slew step size out of range selecting 1 LSB\n",
devnr);
- }
} else {
pdata->dac[devnr].slew.enable = false;
pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c
new file mode 100644
index 000000000000..2d7623b9b2c0
--- /dev/null
+++ b/drivers/iio/dac/ad5770r.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD5770R Digital to analog converters driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#define ADI_SPI_IF_CONFIG_A 0x00
+#define ADI_SPI_IF_CONFIG_B 0x01
+#define ADI_SPI_IF_DEVICE_CONFIG 0x02
+#define ADI_SPI_IF_CHIP_TYPE 0x03
+#define ADI_SPI_IF_PRODUCT_ID_L 0x04
+#define ADI_SPI_IF_PRODUCT_ID_H 0x05
+#define ADI_SPI_IF_CHIP_GRADE 0x06
+#define ADI_SPI_IF_SCRACTH_PAD 0x0A
+#define ADI_SPI_IF_SPI_REVISION 0x0B
+#define ADI_SPI_IF_SPI_VENDOR_L 0x0C
+#define ADI_SPI_IF_SPI_VENDOR_H 0x0D
+#define ADI_SPI_IF_SPI_STREAM_MODE 0x0E
+#define ADI_SPI_IF_CONFIG_C 0x10
+#define ADI_SPI_IF_STATUS_A 0x11
+
+/* ADI_SPI_IF_CONFIG_A */
+#define ADI_SPI_IF_SW_RESET_MSK (BIT(0) | BIT(7))
+#define ADI_SPI_IF_SW_RESET_SEL(x) ((x) & ADI_SPI_IF_SW_RESET_MSK)
+#define ADI_SPI_IF_ADDR_ASC_MSK (BIT(2) | BIT(5))
+#define ADI_SPI_IF_ADDR_ASC_SEL(x) (((x) << 2) & ADI_SPI_IF_ADDR_ASC_MSK)
+
+/* ADI_SPI_IF_CONFIG_B */
+#define ADI_SPI_IF_SINGLE_INS_MSK BIT(7)
+#define ADI_SPI_IF_SINGLE_INS_SEL(x) FIELD_PREP(ADI_SPI_IF_SINGLE_INS_MSK, x)
+#define ADI_SPI_IF_SHORT_INS_MSK BIT(7)
+#define ADI_SPI_IF_SHORT_INS_SEL(x) FIELD_PREP(ADI_SPI_IF_SINGLE_INS_MSK, x)
+
+/* ADI_SPI_IF_CONFIG_C */
+#define ADI_SPI_IF_STRICT_REG_MSK BIT(5)
+#define ADI_SPI_IF_STRICT_REG_GET(x) FIELD_GET(ADI_SPI_IF_STRICT_REG_MSK, x)
+
+/* AD5770R configuration registers */
+#define AD5770R_CHANNEL_CONFIG 0x14
+#define AD5770R_OUTPUT_RANGE(ch) (0x15 + (ch))
+#define AD5770R_FILTER_RESISTOR(ch) (0x1D + (ch))
+#define AD5770R_REFERENCE 0x1B
+#define AD5770R_DAC_LSB(ch) (0x26 + 2 * (ch))
+#define AD5770R_DAC_MSB(ch) (0x27 + 2 * (ch))
+#define AD5770R_CH_SELECT 0x34
+#define AD5770R_CH_ENABLE 0x44
+
+/* AD5770R_CHANNEL_CONFIG */
+#define AD5770R_CFG_CH0_SINK_EN(x) (((x) & 0x1) << 7)
+#define AD5770R_CFG_SHUTDOWN_B(x, ch) (((x) & 0x1) << (ch))
+
+/* AD5770R_OUTPUT_RANGE */
+#define AD5770R_RANGE_OUTPUT_SCALING(x) (((x) & GENMASK(5, 0)) << 2)
+#define AD5770R_RANGE_MODE(x) ((x) & GENMASK(1, 0))
+
+/* AD5770R_REFERENCE */
+#define AD5770R_REF_RESISTOR_SEL(x) (((x) & 0x1) << 2)
+#define AD5770R_REF_SEL(x) ((x) & GENMASK(1, 0))
+
+/* AD5770R_CH_ENABLE */
+#define AD5770R_CH_SET(x, ch) (((x) & 0x1) << (ch))
+
+#define AD5770R_MAX_CHANNELS 6
+#define AD5770R_MAX_CH_MODES 14
+#define AD5770R_LOW_VREF_mV 1250
+#define AD5770R_HIGH_VREF_mV 2500
+
+enum ad5770r_ch0_modes {
+ AD5770R_CH0_0_300 = 0,
+ AD5770R_CH0_NEG_60_0,
+ AD5770R_CH0_NEG_60_300
+};
+
+enum ad5770r_ch1_modes {
+ AD5770R_CH1_0_140_LOW_HEAD = 1,
+ AD5770R_CH1_0_140_LOW_NOISE,
+ AD5770R_CH1_0_250
+};
+
+enum ad5770r_ch2_5_modes {
+ AD5770R_CH_LOW_RANGE = 0,
+ AD5770R_CH_HIGH_RANGE
+};
+
+enum ad5770r_ref_v {
+ AD5770R_EXT_2_5_V = 0,
+ AD5770R_INT_1_25_V_OUT_ON,
+ AD5770R_EXT_1_25_V,
+ AD5770R_INT_1_25_V_OUT_OFF
+};
+
+enum ad5770r_output_filter_resistor {
+ AD5770R_FILTER_60_OHM = 0x0,
+ AD5770R_FILTER_5_6_KOHM = 0x5,
+ AD5770R_FILTER_11_2_KOHM,
+ AD5770R_FILTER_22_2_KOHM,
+ AD5770R_FILTER_44_4_KOHM,
+ AD5770R_FILTER_104_KOHM,
+};
+
+struct ad5770r_out_range {
+ u8 out_scale;
+ u8 out_range_mode;
+};
+
+/**
+ * struct ad5770R_state - driver instance specific data
+ * @spi: spi_device
+ * @regmap: regmap
+ * @vref_reg: fixed regulator for reference configuration
+ * @gpio_reset: gpio descriptor
+ * @output_mode: array contains channels output ranges
+ * @vref: reference value
+ * @ch_pwr_down: powerdown flags
+ * @internal_ref: internal reference flag
+ * @external_res: external 2.5k resistor flag
+ * @transf_buf: cache aligned buffer for spi read/write
+ */
+struct ad5770r_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regulator *vref_reg;
+ struct gpio_desc *gpio_reset;
+ struct ad5770r_out_range output_mode[AD5770R_MAX_CHANNELS];
+ int vref;
+ bool ch_pwr_down[AD5770R_MAX_CHANNELS];
+ bool internal_ref;
+ bool external_res;
+ u8 transf_buf[2] ____cacheline_aligned;
+};
+
+static const struct regmap_config ad5770r_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = BIT(7),
+};
+
+struct ad5770r_output_modes {
+ unsigned int ch;
+ u8 mode;
+ int min;
+ int max;
+};
+
+static struct ad5770r_output_modes ad5770r_rng_tbl[] = {
+ { 0, AD5770R_CH0_0_300, 0, 300 },
+ { 0, AD5770R_CH0_NEG_60_0, -60, 0 },
+ { 0, AD5770R_CH0_NEG_60_300, -60, 300 },
+ { 1, AD5770R_CH1_0_140_LOW_HEAD, 0, 140 },
+ { 1, AD5770R_CH1_0_140_LOW_NOISE, 0, 140 },
+ { 1, AD5770R_CH1_0_250, 0, 250 },
+ { 2, AD5770R_CH_LOW_RANGE, 0, 55 },
+ { 2, AD5770R_CH_HIGH_RANGE, 0, 150 },
+ { 3, AD5770R_CH_LOW_RANGE, 0, 45 },
+ { 3, AD5770R_CH_HIGH_RANGE, 0, 100 },
+ { 4, AD5770R_CH_LOW_RANGE, 0, 45 },
+ { 4, AD5770R_CH_HIGH_RANGE, 0, 100 },
+ { 5, AD5770R_CH_LOW_RANGE, 0, 45 },
+ { 5, AD5770R_CH_HIGH_RANGE, 0, 100 },
+};
+
+static const unsigned int ad5770r_filter_freqs[] = {
+ 153, 357, 715, 1400, 2800, 262000,
+};
+
+static const unsigned int ad5770r_filter_reg_vals[] = {
+ AD5770R_FILTER_104_KOHM,
+ AD5770R_FILTER_44_4_KOHM,
+ AD5770R_FILTER_22_2_KOHM,
+ AD5770R_FILTER_11_2_KOHM,
+ AD5770R_FILTER_5_6_KOHM,
+ AD5770R_FILTER_60_OHM
+};
+
+static int ad5770r_set_output_mode(struct ad5770r_state *st,
+ const struct ad5770r_out_range *out_mode,
+ int channel)
+{
+ unsigned int regval;
+
+ regval = AD5770R_RANGE_OUTPUT_SCALING(out_mode->out_scale) |
+ AD5770R_RANGE_MODE(out_mode->out_range_mode);
+
+ return regmap_write(st->regmap,
+ AD5770R_OUTPUT_RANGE(channel), regval);
+}
+
+static int ad5770r_set_reference(struct ad5770r_state *st)
+{
+ unsigned int regval;
+
+ regval = AD5770R_REF_RESISTOR_SEL(st->external_res);
+
+ if (st->internal_ref) {
+ regval |= AD5770R_REF_SEL(AD5770R_INT_1_25_V_OUT_OFF);
+ } else {
+ switch (st->vref) {
+ case AD5770R_LOW_VREF_mV:
+ regval |= AD5770R_REF_SEL(AD5770R_EXT_1_25_V);
+ break;
+ case AD5770R_HIGH_VREF_mV:
+ regval |= AD5770R_REF_SEL(AD5770R_EXT_2_5_V);
+ break;
+ default:
+ regval = AD5770R_REF_SEL(AD5770R_INT_1_25_V_OUT_OFF);
+ break;
+ }
+ }
+
+ return regmap_write(st->regmap, AD5770R_REFERENCE, regval);
+}
+
+static int ad5770r_soft_reset(struct ad5770r_state *st)
+{
+ return regmap_write(st->regmap, ADI_SPI_IF_CONFIG_A,
+ ADI_SPI_IF_SW_RESET_SEL(1));
+}
+
+static int ad5770r_reset(struct ad5770r_state *st)
+{
+ /* Perform software reset if no GPIO provided */
+ if (!st->gpio_reset)
+ return ad5770r_soft_reset(st);
+
+ gpiod_set_value_cansleep(st->gpio_reset, 0);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(st->gpio_reset, 1);
+
+ /* data must not be written during reset timeframe */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int ad5770r_get_range(struct ad5770r_state *st,
+ int ch, int *min, int *max)
+{
+ int i;
+ u8 tbl_ch, tbl_mode, out_range;
+
+ out_range = st->output_mode[ch].out_range_mode;
+
+ for (i = 0; i < AD5770R_MAX_CH_MODES; i++) {
+ tbl_ch = ad5770r_rng_tbl[i].ch;
+ tbl_mode = ad5770r_rng_tbl[i].mode;
+ if (tbl_ch == ch && tbl_mode == out_range) {
+ *min = ad5770r_rng_tbl[i].min;
+ *max = ad5770r_rng_tbl[i].max;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ad5770r_get_filter_freq(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *freq)
+{
+ struct ad5770r_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int regval, i;
+
+ ret = regmap_read(st->regmap,
+ AD5770R_FILTER_RESISTOR(chan->channel), &regval);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad5770r_filter_reg_vals); i++)
+ if (regval == ad5770r_filter_reg_vals[i])
+ break;
+ if (i == ARRAY_SIZE(ad5770r_filter_reg_vals))
+ return -EINVAL;
+
+ *freq = ad5770r_filter_freqs[i];
+
+ return IIO_VAL_INT;
+}
+
+static int ad5770r_set_filter_freq(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int freq)
+{
+ struct ad5770r_state *st = iio_priv(indio_dev);
+ unsigned int regval, i;
+
+ for (i = 0; i < ARRAY_SIZE(ad5770r_filter_freqs); i++)
+ if (ad5770r_filter_freqs[i] >= freq)
+ break;
+ if (i == ARRAY_SIZE(ad5770r_filter_freqs))
+ return -EINVAL;
+
+ regval = ad5770r_filter_reg_vals[i];
+
+ return regmap_write(st->regmap, AD5770R_FILTER_RESISTOR(chan->channel),
+ regval);
+}
+
+static int ad5770r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad5770r_state *st = iio_priv(indio_dev);
+ int max, min, ret;
+ u16 buf16;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_bulk_read(st->regmap,
+ chan->address,
+ st->transf_buf, 2);
+ if (ret)
+ return 0;
+
+ buf16 = st->transf_buf[0] + (st->transf_buf[1] << 8);
+ *val = buf16 >> 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = ad5770r_get_range(st, chan->channel, &min, &max);
+ if (ret < 0)
+ return ret;
+ *val = max - min;
+ /* There is no sign bit. (negative current is mapped from 0)
+ * (sourced/sinked) current = raw * scale + offset
+ * where offset in case of CH0 can be negative.
+ */
+ *val2 = 14;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return ad5770r_get_filter_freq(indio_dev, chan, val);
+ case IIO_CHAN_INFO_OFFSET:
+ ret = ad5770r_get_range(st, chan->channel, &min, &max);
+ if (ret < 0)
+ return ret;
+ *val = min;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5770r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad5770r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ st->transf_buf[0] = ((u16)val >> 6);
+ st->transf_buf[1] = (val & GENMASK(5, 0)) << 2;
+ return regmap_bulk_write(st->regmap, chan->address,
+ st->transf_buf, 2);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return ad5770r_set_filter_freq(indio_dev, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5770r_read_freq_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *type = IIO_VAL_INT;
+ *vals = ad5770r_filter_freqs;
+ *length = ARRAY_SIZE(ad5770r_filter_freqs);
+ return IIO_AVAIL_LIST;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5770r_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad5770r_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+ else
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static const struct iio_info ad5770r_info = {
+ .read_raw = ad5770r_read_raw,
+ .write_raw = ad5770r_write_raw,
+ .read_avail = ad5770r_read_freq_avail,
+ .debugfs_reg_access = &ad5770r_reg_access,
+};
+
+static int ad5770r_store_output_range(struct ad5770r_state *st,
+ int min, int max, int index)
+{
+ int i;
+
+ for (i = 0; i < AD5770R_MAX_CH_MODES; i++) {
+ if (ad5770r_rng_tbl[i].ch != index)
+ continue;
+ if (ad5770r_rng_tbl[i].min != min ||
+ ad5770r_rng_tbl[i].max != max)
+ continue;
+ st->output_mode[index].out_range_mode = ad5770r_rng_tbl[i].mode;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t ad5770r_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5770r_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->ch_pwr_down[chan->channel]);
+}
+
+static ssize_t ad5770r_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad5770r_state *st = iio_priv(indio_dev);
+ unsigned int regval;
+ unsigned int mask;
+ bool readin;
+ int ret;
+
+ ret = kstrtobool(buf, &readin);
+ if (ret)
+ return ret;
+
+ readin = !readin;
+
+ regval = AD5770R_CFG_SHUTDOWN_B(readin, chan->channel);
+ if (chan->channel == 0 &&
+ st->output_mode[0].out_range_mode > AD5770R_CH0_0_300) {
+ regval |= AD5770R_CFG_CH0_SINK_EN(readin);
+ mask = BIT(chan->channel) + BIT(7);
+ } else {
+ mask = BIT(chan->channel);
+ }
+ ret = regmap_update_bits(st->regmap, AD5770R_CHANNEL_CONFIG, mask,
+ regval);
+ if (ret)
+ return ret;
+
+ regval = AD5770R_CH_SET(readin, chan->channel);
+ ret = regmap_update_bits(st->regmap, AD5770R_CH_ENABLE,
+ BIT(chan->channel), regval);
+ if (ret)
+ return ret;
+
+ st->ch_pwr_down[chan->channel] = !readin;
+
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info ad5770r_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5770r_read_dac_powerdown,
+ .write = ad5770r_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ { }
+};
+
+#define AD5770R_IDAC_CHANNEL(index, reg) { \
+ .type = IIO_CURRENT, \
+ .address = reg, \
+ .indexed = 1, \
+ .channel = index, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .ext_info = ad5770r_ext_info, \
+}
+
+static const struct iio_chan_spec ad5770r_channels[] = {
+ AD5770R_IDAC_CHANNEL(0, AD5770R_DAC_MSB(0)),
+ AD5770R_IDAC_CHANNEL(1, AD5770R_DAC_MSB(1)),
+ AD5770R_IDAC_CHANNEL(2, AD5770R_DAC_MSB(2)),
+ AD5770R_IDAC_CHANNEL(3, AD5770R_DAC_MSB(3)),
+ AD5770R_IDAC_CHANNEL(4, AD5770R_DAC_MSB(4)),
+ AD5770R_IDAC_CHANNEL(5, AD5770R_DAC_MSB(5)),
+};
+
+static int ad5770r_channel_config(struct ad5770r_state *st)
+{
+ int ret, tmp[2], min, max;
+ unsigned int num;
+ struct fwnode_handle *child;
+
+ num = device_get_child_node_count(&st->spi->dev);
+ if (num != AD5770R_MAX_CHANNELS)
+ return -EINVAL;
+
+ device_for_each_child_node(&st->spi->dev, child) {
+ ret = fwnode_property_read_u32(child, "num", &num);
+ if (ret)
+ return ret;
+ if (num >= AD5770R_MAX_CHANNELS)
+ return -EINVAL;
+
+ ret = fwnode_property_read_u32_array(child,
+ "adi,range-microamp",
+ tmp, 2);
+ if (ret)
+ return ret;
+
+ min = tmp[0] / 1000;
+ max = tmp[1] / 1000;
+ ret = ad5770r_store_output_range(st, min, max, num);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ad5770r_init(struct ad5770r_state *st)
+{
+ int ret, i;
+
+ st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->gpio_reset))
+ return PTR_ERR(st->gpio_reset);
+
+ /* Perform a reset */
+ ret = ad5770r_reset(st);
+ if (ret)
+ return ret;
+
+ /* Set output range */
+ ret = ad5770r_channel_config(st);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < AD5770R_MAX_CHANNELS; i++) {
+ ret = ad5770r_set_output_mode(st, &st->output_mode[i], i);
+ if (ret)
+ return ret;
+ }
+
+ st->external_res = fwnode_property_read_bool(st->spi->dev.fwnode,
+ "adi,external-resistor");
+
+ ret = ad5770r_set_reference(st);
+ if (ret)
+ return ret;
+
+ /* Set outputs off */
+ ret = regmap_write(st->regmap, AD5770R_CHANNEL_CONFIG, 0x00);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD5770R_CH_ENABLE, 0x00);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < AD5770R_MAX_CHANNELS; i++)
+ st->ch_pwr_down[i] = true;
+
+ return ret;
+}
+
+static void ad5770r_disable_regulator(void *data)
+{
+ struct ad5770r_state *st = data;
+
+ regulator_disable(st->vref_reg);
+}
+
+static int ad5770r_probe(struct spi_device *spi)
+{
+ struct ad5770r_state *st;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ regmap = devm_regmap_init_spi(spi, &ad5770r_spi_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Error initializing spi regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+ st->regmap = regmap;
+
+ st->vref_reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (!IS_ERR(st->vref_reg)) {
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable vref regulators: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ ad5770r_disable_regulator,
+ st);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0)
+ return ret;
+
+ st->vref = ret / 1000;
+ } else {
+ if (PTR_ERR(st->vref_reg) == -ENODEV) {
+ st->vref = AD5770R_LOW_VREF_mV;
+ st->internal_ref = true;
+ } else {
+ return PTR_ERR(st->vref_reg);
+ }
+ }
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5770r_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5770r_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad5770r_channels);
+
+ ret = ad5770r_init(st);
+ if (ret < 0) {
+ dev_err(&spi->dev, "AD5770R init failed\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&st->spi->dev, indio_dev);
+}
+
+static const struct of_device_id ad5770r_of_id[] = {
+ { .compatible = "adi,ad5770r", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad5770r_of_id);
+
+static const struct spi_device_id ad5770r_id[] = {
+ { "ad5770r", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, ad5770r_id);
+
+static struct spi_driver ad5770r_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ad5770r_of_id,
+ },
+ .probe = ad5770r_probe,
+ .id_table = ad5770r_id,
+};
+
+module_spi_driver(ad5770r_driver);
+
+MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5770R IDAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index 643d1ce956ce..7adc91056aa1 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -12,11 +12,6 @@
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
-#define LTC2632_DAC_CHANNELS 2
-
-#define LTC2632_ADDR_DAC0 0x0
-#define LTC2632_ADDR_DAC1 0x1
-
#define LTC2632_CMD_WRITE_INPUT_N 0x0
#define LTC2632_CMD_UPDATE_DAC_N 0x1
#define LTC2632_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
@@ -33,6 +28,7 @@
*/
struct ltc2632_chip_info {
const struct iio_chan_spec *channels;
+ const size_t num_channels;
const int vref_mv;
};
@@ -57,6 +53,12 @@ enum ltc2632_supported_device_ids {
ID_LTC2632H12,
ID_LTC2632H10,
ID_LTC2632H8,
+ ID_LTC2636L12,
+ ID_LTC2636L10,
+ ID_LTC2636L8,
+ ID_LTC2636H12,
+ ID_LTC2636H10,
+ ID_LTC2636H8,
};
static int ltc2632_spi_write(struct spi_device *spi,
@@ -190,39 +192,77 @@ static const struct iio_chan_spec_ext_info ltc2632_ext_info[] = {
const struct iio_chan_spec _name ## _channels[] = { \
LTC2632_CHANNEL(0, _bits), \
LTC2632_CHANNEL(1, _bits), \
+ LTC2632_CHANNEL(2, _bits), \
+ LTC2632_CHANNEL(3, _bits), \
+ LTC2632_CHANNEL(4, _bits), \
+ LTC2632_CHANNEL(5, _bits), \
+ LTC2632_CHANNEL(6, _bits), \
+ LTC2632_CHANNEL(7, _bits), \
}
-static DECLARE_LTC2632_CHANNELS(ltc2632l12, 12);
-static DECLARE_LTC2632_CHANNELS(ltc2632l10, 10);
-static DECLARE_LTC2632_CHANNELS(ltc2632l8, 8);
-
-static DECLARE_LTC2632_CHANNELS(ltc2632h12, 12);
-static DECLARE_LTC2632_CHANNELS(ltc2632h10, 10);
-static DECLARE_LTC2632_CHANNELS(ltc2632h8, 8);
+static DECLARE_LTC2632_CHANNELS(ltc2632x12, 12);
+static DECLARE_LTC2632_CHANNELS(ltc2632x10, 10);
+static DECLARE_LTC2632_CHANNELS(ltc2632x8, 8);
static const struct ltc2632_chip_info ltc2632_chip_info_tbl[] = {
[ID_LTC2632L12] = {
- .channels = ltc2632l12_channels,
+ .channels = ltc2632x12_channels,
+ .num_channels = 2,
.vref_mv = 2500,
},
[ID_LTC2632L10] = {
- .channels = ltc2632l10_channels,
+ .channels = ltc2632x10_channels,
+ .num_channels = 2,
.vref_mv = 2500,
},
[ID_LTC2632L8] = {
- .channels = ltc2632l8_channels,
+ .channels = ltc2632x8_channels,
+ .num_channels = 2,
.vref_mv = 2500,
},
[ID_LTC2632H12] = {
- .channels = ltc2632h12_channels,
+ .channels = ltc2632x12_channels,
+ .num_channels = 2,
.vref_mv = 4096,
},
[ID_LTC2632H10] = {
- .channels = ltc2632h10_channels,
+ .channels = ltc2632x10_channels,
+ .num_channels = 2,
.vref_mv = 4096,
},
[ID_LTC2632H8] = {
- .channels = ltc2632h8_channels,
+ .channels = ltc2632x8_channels,
+ .num_channels = 2,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2636L12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 8,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2636L10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 8,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2636L8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 8,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2636H12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 8,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2636H10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 8,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2636H8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 8,
.vref_mv = 4096,
},
};
@@ -291,7 +331,7 @@ static int ltc2632_probe(struct spi_device *spi)
indio_dev->info = &ltc2632_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = chip_info->channels;
- indio_dev->num_channels = LTC2632_DAC_CHANNELS;
+ indio_dev->num_channels = chip_info->num_channels;
return iio_device_register(indio_dev);
}
@@ -316,6 +356,12 @@ static const struct spi_device_id ltc2632_id[] = {
{ "ltc2632-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H12] },
{ "ltc2632-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H10] },
{ "ltc2632-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H8] },
+ { "ltc2636-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L12] },
+ { "ltc2636-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L10] },
+ { "ltc2636-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L8] },
+ { "ltc2636-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636H12] },
+ { "ltc2636-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636H10] },
+ { "ltc2636-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636H8] },
{}
};
MODULE_DEVICE_TABLE(spi, ltc2632_id);
@@ -339,6 +385,24 @@ static const struct of_device_id ltc2632_of_match[] = {
}, {
.compatible = "lltc,ltc2632-h8",
.data = &ltc2632_chip_info_tbl[ID_LTC2632H8]
+ }, {
+ .compatible = "lltc,ltc2636-l12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2636L12]
+ }, {
+ .compatible = "lltc,ltc2636-l10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2636L10]
+ }, {
+ .compatible = "lltc,ltc2636-l8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2636L8]
+ }, {
+ .compatible = "lltc,ltc2636-h12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2636H12]
+ }, {
+ .compatible = "lltc,ltc2636-h10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2636H10]
+ }, {
+ .compatible = "lltc,ltc2636-h8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2636H8]
},
{}
};
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 71f8a5c471c4..7f1e9317c3f3 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -223,6 +223,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
return 0;
error_iio_device_register:
+ vf610_dac_exit(info);
clk_disable_unprepare(info->clk);
return ret;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index d5e03a406d4a..a4c967a5fc5c 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -59,7 +59,7 @@
struct adis16136_chip_info {
unsigned int precision;
unsigned int fullscale;
- const struct adis_timeout *timeouts;
+ const struct adis_data adis_data;
};
struct adis16136 {
@@ -466,22 +466,22 @@ static const char * const adis16136_status_error_msgs[] = {
[ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL] = "Flash checksum error",
};
-static const struct adis_data adis16136_data = {
- .diag_stat_reg = ADIS16136_REG_DIAG_STAT,
- .glob_cmd_reg = ADIS16136_REG_GLOB_CMD,
- .msc_ctrl_reg = ADIS16136_REG_MSC_CTRL,
-
- .self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST,
-
- .read_delay = 10,
- .write_delay = 10,
-
- .status_error_msgs = adis16136_status_error_msgs,
- .status_error_mask = BIT(ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL) |
- BIT(ADIS16136_DIAG_STAT_SPI_FAIL) |
- BIT(ADIS16136_DIAG_STAT_SELF_TEST_FAIL) |
- BIT(ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL),
-};
+#define ADIS16136_DATA(_timeouts) \
+{ \
+ .diag_stat_reg = ADIS16136_REG_DIAG_STAT, \
+ .glob_cmd_reg = ADIS16136_REG_GLOB_CMD, \
+ .msc_ctrl_reg = ADIS16136_REG_MSC_CTRL, \
+ .self_test_reg = ADIS16136_REG_MSC_CTRL, \
+ .self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST, \
+ .read_delay = 10, \
+ .write_delay = 10, \
+ .status_error_msgs = adis16136_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL) | \
+ BIT(ADIS16136_DIAG_STAT_SPI_FAIL) | \
+ BIT(ADIS16136_DIAG_STAT_SELF_TEST_FAIL) | \
+ BIT(ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL), \
+ .timeouts = (_timeouts), \
+}
enum adis16136_id {
ID_ADIS16133,
@@ -506,41 +506,25 @@ static const struct adis16136_chip_info adis16136_chip_info[] = {
[ID_ADIS16133] = {
.precision = IIO_DEGREE_TO_RAD(1200),
.fullscale = 24000,
- .timeouts = &adis16133_timeouts,
+ .adis_data = ADIS16136_DATA(&adis16133_timeouts),
},
[ID_ADIS16135] = {
.precision = IIO_DEGREE_TO_RAD(300),
.fullscale = 24000,
- .timeouts = &adis16133_timeouts,
+ .adis_data = ADIS16136_DATA(&adis16133_timeouts),
},
[ID_ADIS16136] = {
.precision = IIO_DEGREE_TO_RAD(450),
.fullscale = 24623,
- .timeouts = &adis16136_timeouts,
+ .adis_data = ADIS16136_DATA(&adis16136_timeouts),
},
[ID_ADIS16137] = {
.precision = IIO_DEGREE_TO_RAD(1000),
.fullscale = 24609,
- .timeouts = &adis16136_timeouts,
+ .adis_data = ADIS16136_DATA(&adis16136_timeouts),
},
};
-static struct adis_data *adis16136_adis_data_alloc(struct adis16136 *st,
- struct device *dev)
-{
- struct adis_data *data;
-
- data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- memcpy(data, &adis16136_data, sizeof(*data));
-
- data->timeouts = st->chip_info->timeouts;
-
- return data;
-}
-
static int adis16136_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -565,9 +549,7 @@ static int adis16136_probe(struct spi_device *spi)
indio_dev->info = &adis16136_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- adis16136_data = adis16136_adis_data_alloc(adis16136, &spi->dev);
- if (IS_ERR(adis16136_data))
- return PTR_ERR(adis16136_data);
+ adis16136_data = &adis16136->chip_info->adis_data;
ret = adis_init(&adis16136->adis, indio_dev, spi, adis16136_data);
if (ret)
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index be09b3e5910c..9823573e811a 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -346,6 +346,7 @@ static const struct adis_data adis16260_data = {
.diag_stat_reg = ADIS16260_DIAG_STAT,
.self_test_mask = ADIS16260_MSC_CTRL_MEM_TEST,
+ .self_test_reg = ADIS16260_MSC_CTRL,
.timeouts = &adis16260_timeouts,
.status_error_msgs = adis1620_status_error_msgs,
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 022bb54fb748..a8afd01de4f3 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -7,6 +7,7 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -346,8 +347,8 @@ static int adis_self_test(struct adis *adis)
int ret;
const struct adis_timeout *timeouts = adis->data->timeouts;
- ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
- adis->data->self_test_mask);
+ ret = __adis_write_reg_16(adis, adis->data->self_test_reg,
+ adis->data->self_test_mask);
if (ret) {
dev_err(&adis->spi->dev, "Failed to initiate self test: %d\n",
ret);
@@ -359,42 +360,71 @@ static int adis_self_test(struct adis *adis)
ret = __adis_check_status(adis);
if (adis->data->self_test_no_autoclear)
- __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
+ __adis_write_reg_16(adis, adis->data->self_test_reg, 0x00);
return ret;
}
/**
- * adis_inital_startup() - Performs device self-test
+ * __adis_initial_startup() - Device initial setup
* @adis: The adis device
*
+ * The function performs a HW reset via a reset pin that should be specified
+ * via GPIOLIB. If no pin is configured a SW reset will be performed.
+ * The RST pin for the ADIS devices should be configured as ACTIVE_LOW.
+ *
+ * After the self-test operation is performed, the function will also check
+ * that the product ID is as expected. This assumes that drivers providing
+ * 'prod_id_reg' will also provide the 'prod_id'.
+ *
* Returns 0 if the device is operational, a negative error code otherwise.
*
* This function should be called early on in the device initialization sequence
* to ensure that the device is in a sane and known state and that it is usable.
*/
-int adis_initial_startup(struct adis *adis)
+int __adis_initial_startup(struct adis *adis)
{
+ const struct adis_timeout *timeouts = adis->data->timeouts;
+ struct gpio_desc *gpio;
+ uint16_t prod_id;
int ret;
- mutex_lock(&adis->state_lock);
+ /* check if the device has rst pin low */
+ gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ gpiod_set_value_cansleep(gpio, 1);
+ msleep(10);
+ /* bring device out of reset */
+ gpiod_set_value_cansleep(gpio, 0);
+ msleep(timeouts->reset_ms);
+ } else {
+ ret = __adis_reset(adis);
+ if (ret)
+ return ret;
+ }
ret = adis_self_test(adis);
- if (ret) {
- dev_err(&adis->spi->dev, "Self-test failed, trying reset.\n");
- __adis_reset(adis);
- ret = adis_self_test(adis);
- if (ret) {
- dev_err(&adis->spi->dev, "Second self-test failed, giving up.\n");
- goto out_unlock;
- }
- }
+ if (ret)
+ return ret;
-out_unlock:
- mutex_unlock(&adis->state_lock);
- return ret;
+ if (!adis->data->prod_id_reg)
+ return 0;
+
+ ret = adis_read_reg_16(adis, adis->data->prod_id_reg, &prod_id);
+ if (ret)
+ return ret;
+
+ if (prod_id != adis->data->prod_id)
+ dev_warn(&adis->spi->dev,
+ "Device ID(%u) and product ID(%u) do not match.",
+ adis->data->prod_id, prod_id);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(adis_initial_startup);
+EXPORT_SYMBOL_GPL(__adis_initial_startup);
/**
* adis_single_conversion() - Performs a single sample conversion
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index cfb1c19eb930..05e70c1c4835 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -156,7 +156,7 @@ struct adis16400_state;
struct adis16400_chip_info {
const struct iio_chan_spec *channels;
- const struct adis_timeout *timeouts;
+ const struct adis_data adis_data;
const int num_channels;
const long flags;
unsigned int gyro_scale_micro;
@@ -930,12 +930,64 @@ static const struct iio_chan_spec adis16334_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
+static const char * const adis16400_status_error_msgs[] = {
+ [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ALARM2] = "Alarm 2 active",
+ [ADIS16400_DIAG_STAT_ALARM1] = "Alarm 1 active",
+ [ADIS16400_DIAG_STAT_FLASH_CHK] = "Flash checksum error",
+ [ADIS16400_DIAG_STAT_SELF_TEST] = "Self test error",
+ [ADIS16400_DIAG_STAT_OVERFLOW] = "Sensor overrange",
+ [ADIS16400_DIAG_STAT_SPI_FAIL] = "SPI failure",
+ [ADIS16400_DIAG_STAT_FLASH_UPT] = "Flash update failed",
+ [ADIS16400_DIAG_STAT_POWER_HIGH] = "Power supply above 5.25V",
+ [ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V",
+};
+
+#define ADIS16400_DATA(_timeouts) \
+{ \
+ .msc_ctrl_reg = ADIS16400_MSC_CTRL, \
+ .glob_cmd_reg = ADIS16400_GLOB_CMD, \
+ .diag_stat_reg = ADIS16400_DIAG_STAT, \
+ .read_delay = 50, \
+ .write_delay = 50, \
+ .self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST, \
+ .self_test_reg = ADIS16400_MSC_CTRL, \
+ .status_error_msgs = adis16400_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) | \
+ BIT(ADIS16400_DIAG_STAT_YACCL_FAIL) | \
+ BIT(ADIS16400_DIAG_STAT_XACCL_FAIL) | \
+ BIT(ADIS16400_DIAG_STAT_XGYRO_FAIL) | \
+ BIT(ADIS16400_DIAG_STAT_YGYRO_FAIL) | \
+ BIT(ADIS16400_DIAG_STAT_ZGYRO_FAIL) | \
+ BIT(ADIS16400_DIAG_STAT_ALARM2) | \
+ BIT(ADIS16400_DIAG_STAT_ALARM1) | \
+ BIT(ADIS16400_DIAG_STAT_FLASH_CHK) | \
+ BIT(ADIS16400_DIAG_STAT_SELF_TEST) | \
+ BIT(ADIS16400_DIAG_STAT_OVERFLOW) | \
+ BIT(ADIS16400_DIAG_STAT_SPI_FAIL) | \
+ BIT(ADIS16400_DIAG_STAT_FLASH_UPT) | \
+ BIT(ADIS16400_DIAG_STAT_POWER_HIGH) | \
+ BIT(ADIS16400_DIAG_STAT_POWER_LOW), \
+ .timeouts = (_timeouts), \
+}
+
static const struct adis_timeout adis16300_timeouts = {
.reset_ms = ADIS16400_STARTUP_DELAY,
.sw_reset_ms = ADIS16400_STARTUP_DELAY,
.self_test_ms = ADIS16400_STARTUP_DELAY,
};
+static const struct adis_timeout adis16334_timeouts = {
+ .reset_ms = 60,
+ .sw_reset_ms = 60,
+ .self_test_ms = 14,
+};
+
static const struct adis_timeout adis16362_timeouts = {
.reset_ms = 130,
.sw_reset_ms = 130,
@@ -972,7 +1024,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .timeouts = &adis16300_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts),
},
[ADIS16334] = {
.channels = adis16334_channels,
@@ -985,6 +1037,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
+ .adis_data = ADIS16400_DATA(&adis16334_timeouts),
},
[ADIS16350] = {
.channels = adis16350_channels,
@@ -996,7 +1049,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .timeouts = &adis16300_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts),
},
[ADIS16360] = {
.channels = adis16350_channels,
@@ -1009,7 +1062,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .timeouts = &adis16300_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts),
},
[ADIS16362] = {
.channels = adis16350_channels,
@@ -1022,7 +1075,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .timeouts = &adis16362_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16362_timeouts),
},
[ADIS16364] = {
.channels = adis16350_channels,
@@ -1035,7 +1088,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .timeouts = &adis16362_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16362_timeouts),
},
[ADIS16367] = {
.channels = adis16350_channels,
@@ -1048,7 +1101,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .timeouts = &adis16300_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts),
},
[ADIS16400] = {
.channels = adis16400_channels,
@@ -1060,7 +1113,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .timeouts = &adis16400_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16400_timeouts),
},
[ADIS16445] = {
.channels = adis16445_channels,
@@ -1074,7 +1127,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
- .timeouts = &adis16445_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16445_timeouts),
},
[ADIS16448] = {
.channels = adis16448_channels,
@@ -1088,7 +1141,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
- .timeouts = &adis16448_timeouts,
+ .adis_data = ADIS16400_DATA(&adis16448_timeouts),
}
};
@@ -1099,52 +1152,6 @@ static const struct iio_info adis16400_info = {
.debugfs_reg_access = adis_debugfs_reg_access,
};
-static const char * const adis16400_status_error_msgs[] = {
- [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
- [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
- [ADIS16400_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
- [ADIS16400_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
- [ADIS16400_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
- [ADIS16400_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
- [ADIS16400_DIAG_STAT_ALARM2] = "Alarm 2 active",
- [ADIS16400_DIAG_STAT_ALARM1] = "Alarm 1 active",
- [ADIS16400_DIAG_STAT_FLASH_CHK] = "Flash checksum error",
- [ADIS16400_DIAG_STAT_SELF_TEST] = "Self test error",
- [ADIS16400_DIAG_STAT_OVERFLOW] = "Sensor overrange",
- [ADIS16400_DIAG_STAT_SPI_FAIL] = "SPI failure",
- [ADIS16400_DIAG_STAT_FLASH_UPT] = "Flash update failed",
- [ADIS16400_DIAG_STAT_POWER_HIGH] = "Power supply above 5.25V",
- [ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V",
-};
-
-static const struct adis_data adis16400_data = {
- .msc_ctrl_reg = ADIS16400_MSC_CTRL,
- .glob_cmd_reg = ADIS16400_GLOB_CMD,
- .diag_stat_reg = ADIS16400_DIAG_STAT,
-
- .read_delay = 50,
- .write_delay = 50,
-
- .self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST,
-
- .status_error_msgs = adis16400_status_error_msgs,
- .status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) |
- BIT(ADIS16400_DIAG_STAT_YACCL_FAIL) |
- BIT(ADIS16400_DIAG_STAT_XACCL_FAIL) |
- BIT(ADIS16400_DIAG_STAT_XGYRO_FAIL) |
- BIT(ADIS16400_DIAG_STAT_YGYRO_FAIL) |
- BIT(ADIS16400_DIAG_STAT_ZGYRO_FAIL) |
- BIT(ADIS16400_DIAG_STAT_ALARM2) |
- BIT(ADIS16400_DIAG_STAT_ALARM1) |
- BIT(ADIS16400_DIAG_STAT_FLASH_CHK) |
- BIT(ADIS16400_DIAG_STAT_SELF_TEST) |
- BIT(ADIS16400_DIAG_STAT_OVERFLOW) |
- BIT(ADIS16400_DIAG_STAT_SPI_FAIL) |
- BIT(ADIS16400_DIAG_STAT_FLASH_UPT) |
- BIT(ADIS16400_DIAG_STAT_POWER_HIGH) |
- BIT(ADIS16400_DIAG_STAT_POWER_LOW),
-};
-
static void adis16400_setup_chan_mask(struct adis16400_state *st)
{
const struct adis16400_chip_info *chip_info = st->variant;
@@ -1158,23 +1165,6 @@ static void adis16400_setup_chan_mask(struct adis16400_state *st)
st->avail_scan_mask[0] |= BIT(ch->scan_index);
}
}
-
-static struct adis_data *adis16400_adis_data_alloc(struct adis16400_state *st,
- struct device *dev)
-{
- struct adis_data *data;
-
- data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- memcpy(data, &adis16400_data, sizeof(*data));
-
- data->timeouts = st->variant->timeouts;
-
- return data;
-}
-
static int adis16400_probe(struct spi_device *spi)
{
struct adis16400_state *st;
@@ -1207,9 +1197,7 @@ static int adis16400_probe(struct spi_device *spi)
st->adis.burst->extra_len = sizeof(u16);
}
- adis16400_data = adis16400_adis_data_alloc(st, &spi->dev);
- if (IS_ERR(adis16400_data))
- return PTR_ERR(adis16400_data);
+ adis16400_data = &st->variant->adis_data;
ret = adis_init(&st->adis, indio_dev, spi, adis16400_data);
if (ret)
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 9539cfe4a259..0027683d0256 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -333,40 +333,6 @@ static int adis16460_enable_irq(struct adis *adis, bool enable)
return 0;
}
-static int adis16460_initial_setup(struct iio_dev *indio_dev)
-{
- struct adis16460 *st = iio_priv(indio_dev);
- uint16_t prod_id;
- unsigned int device_id;
- int ret;
-
- adis_reset(&st->adis);
- msleep(222);
-
- ret = adis_write_reg_16(&st->adis, ADIS16460_REG_GLOB_CMD, BIT(1));
- if (ret)
- return ret;
- msleep(75);
-
- ret = adis_check_status(&st->adis);
- if (ret)
- return ret;
-
- ret = adis_read_reg_16(&st->adis, ADIS16460_REG_PROD_ID, &prod_id);
- if (ret)
- return ret;
-
- ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
- if (ret != 1)
- return -EINVAL;
-
- if (prod_id != device_id)
- dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
- device_id, prod_id);
-
- return 0;
-}
-
#define ADIS16460_DIAG_STAT_IN_CLK_OOS 7
#define ADIS16460_DIAG_STAT_FLASH_MEM 6
#define ADIS16460_DIAG_STAT_SELF_TEST 5
@@ -392,6 +358,10 @@ static const struct adis_timeout adis16460_timeouts = {
static const struct adis_data adis16460_data = {
.diag_stat_reg = ADIS16460_REG_DIAG_STAT,
.glob_cmd_reg = ADIS16460_REG_GLOB_CMD,
+ .prod_id_reg = ADIS16460_REG_PROD_ID,
+ .prod_id = 16460,
+ .self_test_mask = BIT(2),
+ .self_test_reg = ADIS16460_REG_GLOB_CMD,
.has_paging = false,
.read_delay = 5,
.write_delay = 5,
@@ -439,7 +409,7 @@ static int adis16460_probe(struct spi_device *spi)
adis16460_enable_irq(&st->adis, 0);
- ret = adis16460_initial_setup(indio_dev);
+ ret = __adis_initial_startup(&st->adis);
if (ret)
goto error_cleanup_buffer;
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index dac87f1001fd..cfae0e4476e7 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -138,7 +138,7 @@ struct adis16480_chip_info {
unsigned int max_dec_rate;
const unsigned int *filter_freqs;
bool has_pps_clk_mode;
- const struct adis_timeout *timeouts;
+ const struct adis_data adis_data;
};
enum adis16480_int_pin {
@@ -796,6 +796,58 @@ enum adis16480_variant {
ADIS16497_3,
};
+#define ADIS16480_DIAG_STAT_XGYRO_FAIL 0
+#define ADIS16480_DIAG_STAT_YGYRO_FAIL 1
+#define ADIS16480_DIAG_STAT_ZGYRO_FAIL 2
+#define ADIS16480_DIAG_STAT_XACCL_FAIL 3
+#define ADIS16480_DIAG_STAT_YACCL_FAIL 4
+#define ADIS16480_DIAG_STAT_ZACCL_FAIL 5
+#define ADIS16480_DIAG_STAT_XMAGN_FAIL 8
+#define ADIS16480_DIAG_STAT_YMAGN_FAIL 9
+#define ADIS16480_DIAG_STAT_ZMAGN_FAIL 10
+#define ADIS16480_DIAG_STAT_BARO_FAIL 11
+
+static const char * const adis16480_status_error_msgs[] = {
+ [ADIS16480_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_XMAGN_FAIL] = "X-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_YMAGN_FAIL] = "Y-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_ZMAGN_FAIL] = "Z-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_BARO_FAIL] = "Barometer self-test failure",
+};
+
+static int adis16480_enable_irq(struct adis *adis, bool enable);
+
+#define ADIS16480_DATA(_prod_id, _timeouts) \
+{ \
+ .diag_stat_reg = ADIS16480_REG_DIAG_STS, \
+ .glob_cmd_reg = ADIS16480_REG_GLOB_CMD, \
+ .prod_id_reg = ADIS16480_REG_PROD_ID, \
+ .prod_id = (_prod_id), \
+ .has_paging = true, \
+ .read_delay = 5, \
+ .write_delay = 5, \
+ .self_test_mask = BIT(1), \
+ .self_test_reg = ADIS16480_REG_GLOB_CMD, \
+ .status_error_msgs = adis16480_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) | \
+ BIT(ADIS16480_DIAG_STAT_BARO_FAIL), \
+ .enable_irq = adis16480_enable_irq, \
+ .timeouts = (_timeouts), \
+}
+
static const struct adis_timeout adis16485_timeouts = {
.reset_ms = 560,
.sw_reset_ms = 120,
@@ -838,7 +890,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .timeouts = &adis16485_timeouts,
+ .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts),
},
[ADIS16480] = {
.channels = adis16480_channels,
@@ -851,7 +903,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .timeouts = &adis16480_timeouts,
+ .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts),
},
[ADIS16485] = {
.channels = adis16485_channels,
@@ -864,7 +916,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .timeouts = &adis16485_timeouts,
+ .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts),
},
[ADIS16488] = {
.channels = adis16480_channels,
@@ -877,7 +929,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
- .timeouts = &adis16485_timeouts,
+ .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts),
},
[ADIS16490] = {
.channels = adis16485_channels,
@@ -891,7 +943,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .timeouts = &adis16495_timeouts,
+ .adis_data = ADIS16480_DATA(16490, &adis16495_timeouts),
},
[ADIS16495_1] = {
.channels = adis16485_channels,
@@ -905,7 +957,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .timeouts = &adis16495_1_timeouts,
+ .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts),
},
[ADIS16495_2] = {
.channels = adis16485_channels,
@@ -919,7 +971,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .timeouts = &adis16495_1_timeouts,
+ .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts),
},
[ADIS16495_3] = {
.channels = adis16485_channels,
@@ -933,7 +985,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .timeouts = &adis16495_1_timeouts,
+ .adis_data = ADIS16480_DATA(16495, &adis16495_1_timeouts),
},
[ADIS16497_1] = {
.channels = adis16485_channels,
@@ -947,7 +999,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .timeouts = &adis16495_1_timeouts,
+ .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts),
},
[ADIS16497_2] = {
.channels = adis16485_channels,
@@ -961,7 +1013,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .timeouts = &adis16495_1_timeouts,
+ .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts),
},
[ADIS16497_3] = {
.channels = adis16485_channels,
@@ -975,7 +1027,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
- .timeouts = &adis16495_1_timeouts,
+ .adis_data = ADIS16480_DATA(16497, &adis16495_1_timeouts),
},
};
@@ -1014,87 +1066,6 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
-static int adis16480_initial_setup(struct iio_dev *indio_dev)
-{
- struct adis16480 *st = iio_priv(indio_dev);
- uint16_t prod_id;
- unsigned int device_id;
- int ret;
-
- adis_reset(&st->adis);
- msleep(70);
-
- ret = adis_write_reg_16(&st->adis, ADIS16480_REG_GLOB_CMD, BIT(1));
- if (ret)
- return ret;
- msleep(30);
-
- ret = adis_check_status(&st->adis);
- if (ret)
- return ret;
-
- ret = adis_read_reg_16(&st->adis, ADIS16480_REG_PROD_ID, &prod_id);
- if (ret)
- return ret;
-
- ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
- if (ret != 1)
- return -EINVAL;
-
- if (prod_id != device_id)
- dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
- device_id, prod_id);
-
- return 0;
-}
-
-#define ADIS16480_DIAG_STAT_XGYRO_FAIL 0
-#define ADIS16480_DIAG_STAT_YGYRO_FAIL 1
-#define ADIS16480_DIAG_STAT_ZGYRO_FAIL 2
-#define ADIS16480_DIAG_STAT_XACCL_FAIL 3
-#define ADIS16480_DIAG_STAT_YACCL_FAIL 4
-#define ADIS16480_DIAG_STAT_ZACCL_FAIL 5
-#define ADIS16480_DIAG_STAT_XMAGN_FAIL 8
-#define ADIS16480_DIAG_STAT_YMAGN_FAIL 9
-#define ADIS16480_DIAG_STAT_ZMAGN_FAIL 10
-#define ADIS16480_DIAG_STAT_BARO_FAIL 11
-
-static const char * const adis16480_status_error_msgs[] = {
- [ADIS16480_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
- [ADIS16480_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
- [ADIS16480_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
- [ADIS16480_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
- [ADIS16480_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
- [ADIS16480_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
- [ADIS16480_DIAG_STAT_XMAGN_FAIL] = "X-axis magnetometer self-test failure",
- [ADIS16480_DIAG_STAT_YMAGN_FAIL] = "Y-axis magnetometer self-test failure",
- [ADIS16480_DIAG_STAT_ZMAGN_FAIL] = "Z-axis magnetometer self-test failure",
- [ADIS16480_DIAG_STAT_BARO_FAIL] = "Barometer self-test failure",
-};
-
-static const struct adis_data adis16480_data = {
- .diag_stat_reg = ADIS16480_REG_DIAG_STS,
- .glob_cmd_reg = ADIS16480_REG_GLOB_CMD,
- .has_paging = true,
-
- .read_delay = 5,
- .write_delay = 5,
-
- .status_error_msgs = adis16480_status_error_msgs,
- .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) |
- BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) |
- BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) |
- BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) |
- BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) |
- BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) |
- BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) |
- BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) |
- BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) |
- BIT(ADIS16480_DIAG_STAT_BARO_FAIL),
-
- .enable_irq = adis16480_enable_irq,
-};
-
static int adis16480_config_irq_pin(struct device_node *of_node,
struct adis16480 *st)
{
@@ -1245,22 +1216,6 @@ static int adis16480_get_ext_clocks(struct adis16480 *st)
return 0;
}
-static struct adis_data *adis16480_adis_data_alloc(struct adis16480 *st,
- struct device *dev)
-{
- struct adis_data *data;
-
- data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- memcpy(data, &adis16480_data, sizeof(*data));
-
- data->timeouts = st->chip_info->timeouts;
-
- return data;
-}
-
static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -1285,26 +1240,28 @@ static int adis16480_probe(struct spi_device *spi)
indio_dev->info = &adis16480_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- adis16480_data = adis16480_adis_data_alloc(st, &spi->dev);
- if (IS_ERR(adis16480_data))
- return PTR_ERR(adis16480_data);
+ adis16480_data = &st->chip_info->adis_data;
ret = adis_init(&st->adis, indio_dev, spi, adis16480_data);
if (ret)
return ret;
- ret = adis16480_config_irq_pin(spi->dev.of_node, st);
+ ret = __adis_initial_startup(&st->adis);
if (ret)
return ret;
+ ret = adis16480_config_irq_pin(spi->dev.of_node, st);
+ if (ret)
+ goto error_stop_device;
+
ret = adis16480_get_ext_clocks(st);
if (ret)
- return ret;
+ goto error_stop_device;
if (!IS_ERR_OR_NULL(st->ext_clk)) {
ret = adis16480_ext_clk_config(st, spi->dev.of_node, true);
if (ret)
- return ret;
+ goto error_stop_device;
st->clk_freq = clk_get_rate(st->ext_clk);
st->clk_freq *= 1000; /* micro */
@@ -1316,24 +1273,20 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
goto error_clk_disable_unprepare;
- ret = adis16480_initial_setup(indio_dev);
- if (ret)
- goto error_cleanup_buffer;
-
ret = iio_device_register(indio_dev);
if (ret)
- goto error_stop_device;
+ goto error_cleanup_buffer;
adis16480_debugfs_init(indio_dev);
return 0;
-error_stop_device:
- adis16480_stop_device(indio_dev);
error_cleanup_buffer:
adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
error_clk_disable_unprepare:
clk_disable_unprepare(st->ext_clk);
+error_stop_device:
+ adis16480_stop_device(indio_dev);
return ret;
}
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 3f4dd5c00b03..04e5e2a0fd6b 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -97,7 +97,8 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
if (j != scan_count)
adis->xfer[j].cs_change = 1;
adis->xfer[j].len = 2;
- adis->xfer[j].delay_usecs = adis->data->read_delay;
+ adis->xfer[j].delay.value = adis->data->read_delay;
+ adis->xfer[j].delay.unit = SPI_DELAY_UNIT_USECS;
if (j < scan_count)
adis->xfer[j].tx_buf = &tx[j];
if (j >= 1)
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 017bc0fcc365..7137ea6f25db 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -15,9 +15,9 @@ config INV_MPU6050_I2C
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6050/6500/6515,
- MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
- over I2C.
+ This driver supports the Invensense MPU6050/9150,
+ MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and
+ IAM20680 motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -27,8 +27,8 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6000/6500/6515,
- MPU9250/9255 and ICM20608/20602 motion tracking devices
- over SPI.
+ This driver supports the Invensense MPU6000,
+ MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and
+ IAM20680 motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 5096fc49012d..0b8d2f7a0165 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -16,6 +16,8 @@
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include "inv_mpu_iio.h"
#include "inv_mpu_magn.h"
@@ -99,9 +101,31 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = {
};
static const struct inv_mpu6050_chip_config chip_config_6050 = {
+ .clk = INV_CLK_INTERNAL,
.fsr = INV_MPU6050_FSR_2000DPS,
.lpf = INV_MPU6050_FILTER_20HZ,
- .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
+ .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(50),
+ .gyro_en = true,
+ .accl_en = true,
+ .temp_en = true,
+ .magn_en = false,
+ .gyro_fifo_enable = false,
+ .accl_fifo_enable = false,
+ .temp_fifo_enable = false,
+ .magn_fifo_enable = false,
+ .accl_fs = INV_MPU6050_FS_02G,
+ .user_ctrl = 0,
+};
+
+static const struct inv_mpu6050_chip_config chip_config_6500 = {
+ .clk = INV_CLK_PLL,
+ .fsr = INV_MPU6050_FSR_2000DPS,
+ .lpf = INV_MPU6050_FILTER_20HZ,
+ .divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(50),
+ .gyro_en = true,
+ .accl_en = true,
+ .temp_en = true,
+ .magn_en = false,
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
.temp_fifo_enable = false,
@@ -124,7 +148,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.whoami = INV_MPU6500_WHOAMI_VALUE,
.name = "MPU6500",
.reg = &reg_set_6500,
- .config = &chip_config_6050,
+ .config = &chip_config_6500,
.fifo_size = 512,
.temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
@@ -132,7 +156,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.whoami = INV_MPU6515_WHOAMI_VALUE,
.name = "MPU6515",
.reg = &reg_set_6500,
- .config = &chip_config_6050,
+ .config = &chip_config_6500,
.fifo_size = 512,
.temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
@@ -156,7 +180,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.whoami = INV_MPU9250_WHOAMI_VALUE,
.name = "MPU9250",
.reg = &reg_set_6500,
- .config = &chip_config_6050,
+ .config = &chip_config_6500,
.fifo_size = 512,
.temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
@@ -164,7 +188,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
.whoami = INV_MPU9255_WHOAMI_VALUE,
.name = "MPU9255",
.reg = &reg_set_6500,
- .config = &chip_config_6050,
+ .config = &chip_config_6500,
.fifo_size = 512,
.temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
@@ -172,104 +196,242 @@ static const struct inv_mpu6050_hw hw_info[] = {
.whoami = INV_ICM20608_WHOAMI_VALUE,
.name = "ICM20608",
.reg = &reg_set_6500,
- .config = &chip_config_6050,
+ .config = &chip_config_6500,
.fifo_size = 512,
.temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
},
{
+ .whoami = INV_ICM20609_WHOAMI_VALUE,
+ .name = "ICM20609",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 4 * 1024,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ },
+ {
+ .whoami = INV_ICM20689_WHOAMI_VALUE,
+ .name = "ICM20689",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 4 * 1024,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ },
+ {
.whoami = INV_ICM20602_WHOAMI_VALUE,
.name = "ICM20602",
.reg = &reg_set_icm20602,
- .config = &chip_config_6050,
+ .config = &chip_config_6500,
.fifo_size = 1008,
.temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
},
+ {
+ .whoami = INV_ICM20690_WHOAMI_VALUE,
+ .name = "ICM20690",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 1024,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ },
+ {
+ .whoami = INV_IAM20680_WHOAMI_VALUE,
+ .name = "IAM20680",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 512,
+ .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
+ },
};
-int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
+static int inv_mpu6050_pwr_mgmt_1_write(struct inv_mpu6050_state *st, bool sleep,
+ int clock, int temp_dis)
{
- unsigned int d, mgmt_1;
- int result;
- /*
- * switch clock needs to be careful. Only when gyro is on, can
- * clock source be switched to gyro. Otherwise, it must be set to
- * internal clock
- */
- if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) {
- result = regmap_read(st->map, st->reg->pwr_mgmt_1, &mgmt_1);
- if (result)
- return result;
+ u8 val;
+
+ if (clock < 0)
+ clock = st->chip_config.clk;
+ if (temp_dis < 0)
+ temp_dis = !st->chip_config.temp_en;
+
+ val = clock & INV_MPU6050_BIT_CLK_MASK;
+ if (temp_dis)
+ val |= INV_MPU6050_BIT_TEMP_DIS;
+ if (sleep)
+ val |= INV_MPU6050_BIT_SLEEP;
- mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
+ dev_dbg(regmap_get_device(st->map), "pwr_mgmt_1: 0x%x\n", val);
+ return regmap_write(st->map, st->reg->pwr_mgmt_1, val);
+}
+
+static int inv_mpu6050_clock_switch(struct inv_mpu6050_state *st,
+ unsigned int clock)
+{
+ int ret;
+
+ switch (st->chip_type) {
+ case INV_MPU6050:
+ case INV_MPU6000:
+ case INV_MPU9150:
+ /* old chips: switch clock manually */
+ ret = inv_mpu6050_pwr_mgmt_1_write(st, false, clock, -1);
+ if (ret)
+ return ret;
+ st->chip_config.clk = clock;
+ break;
+ default:
+ /* automatic clock switching, nothing to do */
+ break;
}
- if ((mask == INV_MPU6050_BIT_PWR_GYRO_STBY) && (!en)) {
- /*
- * turning off gyro requires switch to internal clock first.
- * Then turn off gyro engine
- */
- mgmt_1 |= INV_CLK_INTERNAL;
- result = regmap_write(st->map, st->reg->pwr_mgmt_1, mgmt_1);
- if (result)
- return result;
+ return 0;
+}
+
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en,
+ unsigned int mask)
+{
+ unsigned int sleep;
+ u8 pwr_mgmt2, user_ctrl;
+ int ret;
+
+ /* delete useless requests */
+ if (mask & INV_MPU6050_SENSOR_ACCL && en == st->chip_config.accl_en)
+ mask &= ~INV_MPU6050_SENSOR_ACCL;
+ if (mask & INV_MPU6050_SENSOR_GYRO && en == st->chip_config.gyro_en)
+ mask &= ~INV_MPU6050_SENSOR_GYRO;
+ if (mask & INV_MPU6050_SENSOR_TEMP && en == st->chip_config.temp_en)
+ mask &= ~INV_MPU6050_SENSOR_TEMP;
+ if (mask & INV_MPU6050_SENSOR_MAGN && en == st->chip_config.magn_en)
+ mask &= ~INV_MPU6050_SENSOR_MAGN;
+ if (mask == 0)
+ return 0;
+
+ /* turn on/off temperature sensor */
+ if (mask & INV_MPU6050_SENSOR_TEMP) {
+ ret = inv_mpu6050_pwr_mgmt_1_write(st, false, -1, !en);
+ if (ret)
+ return ret;
+ st->chip_config.temp_en = en;
}
- result = regmap_read(st->map, st->reg->pwr_mgmt_2, &d);
- if (result)
- return result;
- if (en)
- d &= ~mask;
- else
- d |= mask;
- result = regmap_write(st->map, st->reg->pwr_mgmt_2, d);
- if (result)
- return result;
+ /* update user_crtl for driving magnetometer */
+ if (mask & INV_MPU6050_SENSOR_MAGN) {
+ user_ctrl = st->chip_config.user_ctrl;
+ if (en)
+ user_ctrl |= INV_MPU6050_BIT_I2C_MST_EN;
+ else
+ user_ctrl &= ~INV_MPU6050_BIT_I2C_MST_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ return ret;
+ st->chip_config.user_ctrl = user_ctrl;
+ st->chip_config.magn_en = en;
+ }
- if (en) {
- /* Wait for output to stabilize */
- msleep(INV_MPU6050_TEMP_UP_TIME);
- if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) {
- /* switch internal clock to PLL */
- mgmt_1 |= INV_CLK_PLL;
- result = regmap_write(st->map,
- st->reg->pwr_mgmt_1, mgmt_1);
- if (result)
- return result;
+ /* manage accel & gyro engines */
+ if (mask & (INV_MPU6050_SENSOR_ACCL | INV_MPU6050_SENSOR_GYRO)) {
+ /* compute power management 2 current value */
+ pwr_mgmt2 = 0;
+ if (!st->chip_config.accl_en)
+ pwr_mgmt2 |= INV_MPU6050_BIT_PWR_ACCL_STBY;
+ if (!st->chip_config.gyro_en)
+ pwr_mgmt2 |= INV_MPU6050_BIT_PWR_GYRO_STBY;
+
+ /* update to new requested value */
+ if (mask & INV_MPU6050_SENSOR_ACCL) {
+ if (en)
+ pwr_mgmt2 &= ~INV_MPU6050_BIT_PWR_ACCL_STBY;
+ else
+ pwr_mgmt2 |= INV_MPU6050_BIT_PWR_ACCL_STBY;
+ }
+ if (mask & INV_MPU6050_SENSOR_GYRO) {
+ if (en)
+ pwr_mgmt2 &= ~INV_MPU6050_BIT_PWR_GYRO_STBY;
+ else
+ pwr_mgmt2 |= INV_MPU6050_BIT_PWR_GYRO_STBY;
+ }
+
+ /* switch clock to internal when turning gyro off */
+ if (mask & INV_MPU6050_SENSOR_GYRO && !en) {
+ ret = inv_mpu6050_clock_switch(st, INV_CLK_INTERNAL);
+ if (ret)
+ return ret;
+ }
+
+ /* update sensors engine */
+ dev_dbg(regmap_get_device(st->map), "pwr_mgmt_2: 0x%x\n",
+ pwr_mgmt2);
+ ret = regmap_write(st->map, st->reg->pwr_mgmt_2, pwr_mgmt2);
+ if (ret)
+ return ret;
+ if (mask & INV_MPU6050_SENSOR_ACCL)
+ st->chip_config.accl_en = en;
+ if (mask & INV_MPU6050_SENSOR_GYRO)
+ st->chip_config.gyro_en = en;
+
+ /* compute required time to have sensors stabilized */
+ sleep = 0;
+ if (en) {
+ if (mask & INV_MPU6050_SENSOR_ACCL) {
+ if (sleep < INV_MPU6050_ACCEL_UP_TIME)
+ sleep = INV_MPU6050_ACCEL_UP_TIME;
+ }
+ if (mask & INV_MPU6050_SENSOR_GYRO) {
+ if (sleep < INV_MPU6050_GYRO_UP_TIME)
+ sleep = INV_MPU6050_GYRO_UP_TIME;
+ }
+ } else {
+ if (mask & INV_MPU6050_SENSOR_GYRO) {
+ if (sleep < INV_MPU6050_GYRO_DOWN_TIME)
+ sleep = INV_MPU6050_GYRO_DOWN_TIME;
+ }
+ }
+ if (sleep)
+ msleep(sleep);
+
+ /* switch clock to PLL when turning gyro on */
+ if (mask & INV_MPU6050_SENSOR_GYRO && en) {
+ ret = inv_mpu6050_clock_switch(st, INV_CLK_PLL);
+ if (ret)
+ return ret;
}
}
return 0;
}
-int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
+static int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st,
+ bool power_on)
{
int result;
- if (power_on) {
- if (!st->powerup_count) {
- result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
- if (result)
- return result;
- usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
- INV_MPU6050_REG_UP_TIME_MAX);
- }
- st->powerup_count++;
- } else {
- if (st->powerup_count == 1) {
- result = regmap_write(st->map, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
- if (result)
- return result;
- }
- st->powerup_count--;
- }
+ result = inv_mpu6050_pwr_mgmt_1_write(st, !power_on, -1, -1);
+ if (result)
+ return result;
- dev_dbg(regmap_get_device(st->map), "set power %d, count=%u\n",
- power_on, st->powerup_count);
+ if (power_on)
+ usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
+ INV_MPU6050_REG_UP_TIME_MAX);
return 0;
}
-EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
+
+static int inv_mpu6050_set_gyro_fsr(struct inv_mpu6050_state *st,
+ enum inv_mpu6050_fsr_e val)
+{
+ unsigned int gyro_shift;
+ u8 data;
+
+ switch (st->chip_type) {
+ case INV_ICM20690:
+ gyro_shift = INV_ICM20690_GYRO_CONFIG_FSR_SHIFT;
+ break;
+ default:
+ gyro_shift = INV_MPU6050_GYRO_CONFIG_FSR_SHIFT;
+ break;
+ }
+
+ data = val << gyro_shift;
+ return regmap_write(st->map, st->reg->gyro_config, data);
+}
/**
* inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
@@ -286,20 +448,23 @@ static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
if (result)
return result;
+ /* set accel lpf */
switch (st->chip_type) {
case INV_MPU6050:
case INV_MPU6000:
case INV_MPU9150:
/* old chips, nothing to do */
- result = 0;
+ return 0;
+ case INV_ICM20689:
+ case INV_ICM20690:
+ /* set FIFO size to maximum value */
+ val |= INV_ICM20689_BITS_FIFO_SIZE_MAX;
break;
default:
- /* set accel lpf */
- result = regmap_write(st->map, st->reg->accel_lpf, val);
break;
}
- return result;
+ return regmap_write(st->map, st->reg->accel_lpf, val);
}
/**
@@ -317,35 +482,28 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
u8 d;
struct inv_mpu6050_state *st = iio_priv(indio_dev);
- result = inv_mpu6050_set_power_itg(st, true);
+ result = inv_mpu6050_set_gyro_fsr(st, st->chip_config.fsr);
if (result)
return result;
- d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
- result = regmap_write(st->map, st->reg->gyro_config, d);
- if (result)
- goto error_power_off;
- result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
+ result = inv_mpu6050_set_lpf_regs(st, st->chip_config.lpf);
if (result)
- goto error_power_off;
+ return result;
- d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE);
+ d = st->chip_config.divider;
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
- goto error_power_off;
+ return result;
- d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ d = (st->chip_config.accl_fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
result = regmap_write(st->map, st->reg->accl_config, d);
if (result)
- goto error_power_off;
+ return result;
result = regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask);
if (result)
return result;
- memcpy(&st->chip_config, hw_info[st->chip_type].config,
- sizeof(struct inv_mpu6050_chip_config));
-
/*
* Internal chip period is 1ms (1kHz).
* Let's use at the beginning the theorical value before measuring
@@ -356,13 +514,9 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
/* magn chip init, noop if not present in the chip */
result = inv_mpu_magn_probe(st);
if (result)
- goto error_power_off;
-
- return inv_mpu6050_set_power_itg(st, false);
+ return result;
-error_power_off:
- inv_mpu6050_set_power_itg(st, false);
- return result;
+ return 0;
}
static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg,
@@ -399,45 +553,85 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
int *val)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct device *pdev = regmap_get_device(st->map);
+ unsigned int freq_hz, period_us, min_sleep_us, max_sleep_us;
int result;
int ret;
- result = inv_mpu6050_set_power_itg(st, true);
- if (result)
+ /* compute sample period */
+ freq_hz = INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
+ period_us = 1000000 / freq_hz;
+
+ result = pm_runtime_get_sync(pdev);
+ if (result < 0) {
+ pm_runtime_put_noidle(pdev);
return result;
+ }
switch (chan->type) {
case IIO_ANGL_VEL:
- result = inv_mpu6050_switch_engine(st, true,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
- if (result)
- goto error_power_off;
+ if (!st->chip_config.gyro_en) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_SENSOR_GYRO);
+ if (result)
+ goto error_power_off;
+ /* need to wait 2 periods to have first valid sample */
+ min_sleep_us = 2 * period_us;
+ max_sleep_us = 2 * (period_us + period_us / 2);
+ usleep_range(min_sleep_us, max_sleep_us);
+ }
ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
chan->channel2, val);
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
- if (result)
- goto error_power_off;
break;
case IIO_ACCEL:
- result = inv_mpu6050_switch_engine(st, true,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
- if (result)
- goto error_power_off;
+ if (!st->chip_config.accl_en) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_SENSOR_ACCL);
+ if (result)
+ goto error_power_off;
+ /* wait 1 period for first sample availability */
+ min_sleep_us = period_us;
+ max_sleep_us = period_us + period_us / 2;
+ usleep_range(min_sleep_us, max_sleep_us);
+ }
ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
chan->channel2, val);
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
- if (result)
- goto error_power_off;
break;
case IIO_TEMP:
- /* wait for stablization */
- msleep(INV_MPU6050_SENSOR_UP_TIME);
+ /* temperature sensor work only with accel and/or gyro */
+ if (!st->chip_config.accl_en && !st->chip_config.gyro_en) {
+ result = -EBUSY;
+ goto error_power_off;
+ }
+ if (!st->chip_config.temp_en) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_SENSOR_TEMP);
+ if (result)
+ goto error_power_off;
+ /* wait 1 period for first sample availability */
+ min_sleep_us = period_us;
+ max_sleep_us = period_us + period_us / 2;
+ usleep_range(min_sleep_us, max_sleep_us);
+ }
ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
IIO_MOD_X, val);
break;
case IIO_MAGN:
+ if (!st->chip_config.magn_en) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_SENSOR_MAGN);
+ if (result)
+ goto error_power_off;
+ /* frequency is limited for magnetometer */
+ if (freq_hz > INV_MPU_MAGN_FREQ_HZ_MAX) {
+ freq_hz = INV_MPU_MAGN_FREQ_HZ_MAX;
+ period_us = 1000000 / freq_hz;
+ }
+ /* need to wait 2 periods to have first valid sample */
+ min_sleep_us = 2 * period_us;
+ max_sleep_us = 2 * (period_us + period_us / 2);
+ usleep_range(min_sleep_us, max_sleep_us);
+ }
ret = inv_mpu_magn_read(st, chan->channel2, val);
break;
default:
@@ -445,14 +639,13 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
break;
}
- result = inv_mpu6050_set_power_itg(st, false);
- if (result)
- goto error_power_off;
+ pm_runtime_mark_last_busy(pdev);
+ pm_runtime_put_autosuspend(pdev);
return ret;
error_power_off:
- inv_mpu6050_set_power_itg(st, false);
+ pm_runtime_put_autosuspend(pdev);
return result;
}
@@ -533,12 +726,10 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
{
int result, i;
- u8 d;
for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
if (gyro_scale_6050[i] == val) {
- d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
- result = regmap_write(st->map, st->reg->gyro_config, d);
+ result = inv_mpu6050_set_gyro_fsr(st, i);
if (result)
return result;
@@ -593,6 +784,7 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct device *pdev = regmap_get_device(st->map);
int result;
/*
@@ -604,9 +796,11 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
return result;
mutex_lock(&st->lock);
- result = inv_mpu6050_set_power_itg(st, true);
- if (result)
+ result = pm_runtime_get_sync(pdev);
+ if (result < 0) {
+ pm_runtime_put_noidle(pdev);
goto error_write_raw_unlock;
+ }
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -644,7 +838,8 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
break;
}
- result |= inv_mpu6050_set_power_itg(st, false);
+ pm_runtime_mark_last_busy(pdev);
+ pm_runtime_put_autosuspend(pdev);
error_write_raw_unlock:
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
@@ -655,30 +850,32 @@ error_write_raw_unlock:
/**
* inv_mpu6050_set_lpf() - set low pass filer based on fifo rate.
*
- * Based on the Nyquist principle, the sampling rate must
- * exceed twice of the bandwidth of the signal, or there
- * would be alising. This function basically search for the
- * correct low pass parameters based on the fifo rate, e.g,
- * sampling frequency.
+ * Based on the Nyquist principle, the bandwidth of the low
+ * pass filter must not exceed the signal sampling rate divided
+ * by 2, or there would be aliasing.
+ * This function basically search for the correct low pass
+ * parameters based on the fifo rate, e.g, sampling frequency.
*
* lpf is set automatically when setting sampling rate to avoid any aliases.
*/
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{
- static const int hz[] = {188, 98, 42, 20, 10, 5};
+ static const int hz[] = {400, 200, 90, 40, 20, 10};
static const int d[] = {
- INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
- INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_200HZ, INV_MPU6050_FILTER_100HZ,
+ INV_MPU6050_FILTER_45HZ, INV_MPU6050_FILTER_20HZ,
INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ
};
- int i, h, result;
+ int i, result;
u8 data;
- h = (rate >> 1);
- i = 0;
- while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
- i++;
- data = d[i];
+ data = INV_MPU6050_FILTER_5HZ;
+ for (i = 0; i < ARRAY_SIZE(hz); ++i) {
+ if (rate >= hz[i]) {
+ data = d[i];
+ break;
+ }
+ }
result = inv_mpu6050_set_lpf_regs(st, data);
if (result)
return result;
@@ -699,6 +896,7 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
int result;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct device *pdev = regmap_get_device(st->map);
if (kstrtoint(buf, 10, &fifo_rate))
return -EINVAL;
@@ -706,10 +904,6 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
return -EINVAL;
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- return result;
-
/* compute the chip sample rate divider */
d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(fifo_rate);
/* compute back the fifo rate to handle truncation cases */
@@ -720,9 +914,11 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
result = 0;
goto fifo_rate_fail_unlock;
}
- result = inv_mpu6050_set_power_itg(st, true);
- if (result)
+ result = pm_runtime_get_sync(pdev);
+ if (result < 0) {
+ pm_runtime_put_noidle(pdev);
goto fifo_rate_fail_unlock;
+ }
result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
@@ -738,11 +934,11 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_power_off;
+ pm_runtime_mark_last_busy(pdev);
fifo_rate_fail_power_off:
- result |= inv_mpu6050_set_power_itg(st, false);
+ pm_runtime_put_autosuspend(pdev);
fifo_rate_fail_unlock:
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
if (result)
return result;
@@ -1066,11 +1262,13 @@ static const struct iio_info mpu_info = {
static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
{
int result;
- unsigned int regval;
+ unsigned int regval, mask;
int i;
st->hw = &hw_info[st->chip_type];
st->reg = hw_info[st->chip_type].reg;
+ memcpy(&st->chip_config, hw_info[st->chip_type].config,
+ sizeof(st->chip_config));
/* check chip self-identification */
result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, &regval);
@@ -1102,6 +1300,24 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
if (result)
return result;
msleep(INV_MPU6050_POWER_UP_TIME);
+ switch (st->chip_type) {
+ case INV_MPU6000:
+ case INV_MPU6500:
+ case INV_MPU6515:
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /* reset signal path (required for spi connection) */
+ regval = INV_MPU6050_BIT_TEMP_RST | INV_MPU6050_BIT_ACCEL_RST |
+ INV_MPU6050_BIT_GYRO_RST;
+ result = regmap_write(st->map, INV_MPU6050_REG_SIGNAL_PATH_RESET,
+ regval);
+ if (result)
+ return result;
+ msleep(INV_MPU6050_POWER_UP_TIME);
+ break;
+ default:
+ break;
+ }
/*
* Turn power on. After reset, the sleep bit could be on
@@ -1112,17 +1328,13 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
result = inv_mpu6050_set_power_itg(st, true);
if (result)
return result;
-
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
- if (result)
- goto error_power_off;
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
+ mask = INV_MPU6050_SENSOR_ACCL | INV_MPU6050_SENSOR_GYRO |
+ INV_MPU6050_SENSOR_TEMP | INV_MPU6050_SENSOR_MAGN;
+ result = inv_mpu6050_switch_engine(st, false, mask);
if (result)
goto error_power_off;
- return inv_mpu6050_set_power_itg(st, false);
+ return 0;
error_power_off:
inv_mpu6050_set_power_itg(st, false);
@@ -1139,7 +1351,7 @@ static int inv_mpu_core_enable_regulator_vddio(struct inv_mpu6050_state *st)
"Failed to enable vddio regulator: %d\n", result);
} else {
/* Give the device a little bit of time to start up. */
- usleep_range(35000, 70000);
+ usleep_range(3000, 5000);
}
return result;
@@ -1170,6 +1382,14 @@ static void inv_mpu_core_disable_regulator_action(void *_data)
inv_mpu_core_disable_regulator_vddio(st);
}
+static void inv_mpu_pm_disable(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_put_sync_suspend(dev);
+ pm_runtime_disable(dev);
+}
+
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type)
{
@@ -1194,7 +1414,6 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->chip_type = chip_type;
- st->powerup_count = 0;
st->irq = irq;
st->map = regmap;
@@ -1259,6 +1478,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
dev_err(dev, "Failed to enable vdd regulator: %d\n", result);
return result;
}
+ msleep(INV_MPU6050_POWER_UP_TIME);
result = inv_mpu_core_enable_regulator_vddio(st);
if (result) {
@@ -1287,7 +1507,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
result = inv_mpu6050_init_config(indio_dev);
if (result) {
dev_err(dev, "Could not initialize device.\n");
- return result;
+ goto error_power_off;
}
dev_set_drvdata(dev, indio_dev);
@@ -1299,8 +1519,24 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->name = dev_name(dev);
/* requires parent device set in indio_dev */
- if (inv_mpu_bus_setup)
- inv_mpu_bus_setup(indio_dev);
+ if (inv_mpu_bus_setup) {
+ result = inv_mpu_bus_setup(indio_dev);
+ if (result)
+ goto error_power_off;
+ }
+
+ /* chip init is done, turning on runtime power management */
+ result = pm_runtime_set_active(dev);
+ if (result)
+ goto error_power_off;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, INV_MPU6050_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+ result = devm_add_action_or_reset(dev, inv_mpu_pm_disable, dev);
+ if (result)
+ return result;
switch (chip_type) {
case INV_MPU9150:
@@ -1359,14 +1595,17 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
}
return 0;
+
+error_power_off:
+ inv_mpu6050_set_power_itg(st, false);
+ return result;
}
EXPORT_SYMBOL_GPL(inv_mpu_core_probe);
-#ifdef CONFIG_PM_SLEEP
-
-static int inv_mpu_resume(struct device *dev)
+static int __maybe_unused inv_mpu_resume(struct device *dev)
{
- struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
int result;
mutex_lock(&st->lock);
@@ -1375,27 +1614,110 @@ static int inv_mpu_resume(struct device *dev)
goto out_unlock;
result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto out_unlock;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ result = inv_mpu6050_switch_engine(st, true, st->suspended_sensors);
+ if (result)
+ goto out_unlock;
+
+ if (iio_buffer_enabled(indio_dev))
+ result = inv_mpu6050_prepare_fifo(st, true);
+
out_unlock:
mutex_unlock(&st->lock);
return result;
}
-static int inv_mpu_suspend(struct device *dev)
+static int __maybe_unused inv_mpu_suspend(struct device *dev)
{
- struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
int result;
mutex_lock(&st->lock);
+
+ st->suspended_sensors = 0;
+ if (pm_runtime_suspended(dev)) {
+ result = 0;
+ goto out_unlock;
+ }
+
+ if (iio_buffer_enabled(indio_dev)) {
+ result = inv_mpu6050_prepare_fifo(st, false);
+ if (result)
+ goto out_unlock;
+ }
+
+ if (st->chip_config.accl_en)
+ st->suspended_sensors |= INV_MPU6050_SENSOR_ACCL;
+ if (st->chip_config.gyro_en)
+ st->suspended_sensors |= INV_MPU6050_SENSOR_GYRO;
+ if (st->chip_config.temp_en)
+ st->suspended_sensors |= INV_MPU6050_SENSOR_TEMP;
+ if (st->chip_config.magn_en)
+ st->suspended_sensors |= INV_MPU6050_SENSOR_MAGN;
+ result = inv_mpu6050_switch_engine(st, false, st->suspended_sensors);
+ if (result)
+ goto out_unlock;
+
result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ goto out_unlock;
+
inv_mpu_core_disable_regulator_vddio(st);
+out_unlock:
mutex_unlock(&st->lock);
return result;
}
-#endif /* CONFIG_PM_SLEEP */
-SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+static int __maybe_unused inv_mpu_runtime_suspend(struct device *dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+ unsigned int sensors;
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ sensors = INV_MPU6050_SENSOR_ACCL | INV_MPU6050_SENSOR_GYRO |
+ INV_MPU6050_SENSOR_TEMP | INV_MPU6050_SENSOR_MAGN;
+ ret = inv_mpu6050_switch_engine(st, false, sensors);
+ if (ret)
+ goto out_unlock;
+
+ ret = inv_mpu6050_set_power_itg(st, false);
+ if (ret)
+ goto out_unlock;
+
+ inv_mpu_core_disable_regulator_vddio(st);
+
+out_unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int __maybe_unused inv_mpu_runtime_resume(struct device *dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
+ ret = inv_mpu_core_enable_regulator_vddio(st);
+ if (ret)
+ return ret;
+
+ return inv_mpu6050_set_power_itg(st, true);
+}
+
+const struct dev_pm_ops inv_mpu_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(inv_mpu_suspend, inv_mpu_resume)
+ SET_RUNTIME_PM_OPS(inv_mpu_runtime_suspend, inv_mpu_runtime_resume, NULL)
+};
EXPORT_SYMBOL_GPL(inv_mpu_pmops);
MODULE_AUTHOR("Invensense Corporation");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index f47a28b4be23..6993d3b87bb0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -10,6 +10,7 @@
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/property.h>
#include "inv_mpu_iio.h"
static const struct regmap_config inv_mpu_regmap_config = {
@@ -19,62 +20,19 @@ static const struct regmap_config inv_mpu_regmap_config = {
static int inv_mpu6050_select_bypass(struct i2c_mux_core *muxc, u32 chan_id)
{
- struct iio_dev *indio_dev = i2c_mux_priv(muxc);
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&st->lock);
-
- ret = inv_mpu6050_set_power_itg(st, true);
- if (ret)
- goto error_unlock;
-
- ret = regmap_write(st->map, st->reg->int_pin_cfg,
- st->irq_mask | INV_MPU6050_BIT_BYPASS_EN);
-
-error_unlock:
- mutex_unlock(&st->lock);
-
- return ret;
-}
-
-static int inv_mpu6050_deselect_bypass(struct i2c_mux_core *muxc, u32 chan_id)
-{
- struct iio_dev *indio_dev = i2c_mux_priv(muxc);
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->lock);
-
- /* It doesn't really matter if any of the calls fail */
- regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask);
- inv_mpu6050_set_power_itg(st, false);
-
- mutex_unlock(&st->lock);
-
return 0;
}
-static const char *inv_mpu_match_acpi_device(struct device *dev,
- enum inv_devices *chip_id)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return NULL;
-
- *chip_id = (int)id->driver_data;
-
- return dev_name(dev);
-}
-
static bool inv_mpu_i2c_aux_bus(struct device *dev)
{
struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
switch (st->chip_type) {
case INV_ICM20608:
+ case INV_ICM20609:
+ case INV_ICM20689:
case INV_ICM20602:
+ case INV_IAM20680:
/* no i2c auxiliary bus on the chip */
return false;
case INV_MPU9150:
@@ -89,19 +47,20 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
}
}
-/*
- * MPU9xxx magnetometer support requires to disable i2c auxiliary bus support.
- * To ensure backward compatibility with existing setups, do not disable
- * i2c auxiliary bus if it used.
- * Check for i2c-gate node in devicetree and set magnetometer disabled.
- * Only MPU6500 is supported by ACPI, no need to check.
- */
-static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
+static int inv_mpu_i2c_aux_setup(struct iio_dev *indio_dev)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
struct device *dev = indio_dev->dev.parent;
struct device_node *mux_node;
+ int ret;
+ /*
+ * MPU9xxx magnetometer support requires to disable i2c auxiliary bus.
+ * To ensure backward compatibility with existing setups, do not disable
+ * i2c auxiliary bus if it used.
+ * Check for i2c-gate node in devicetree and set magnetometer disabled.
+ * Only MPU6500 is supported by ACPI, no need to check.
+ */
switch (st->chip_type) {
case INV_MPU9150:
case INV_MPU9250:
@@ -117,6 +76,14 @@ static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
break;
}
+ /* enable i2c bypass when using i2c auxiliary bus */
+ if (inv_mpu_i2c_aux_bus(dev)) {
+ ret = regmap_write(st->map, st->reg->int_pin_cfg,
+ st->irq_mask | INV_MPU6050_BIT_BYPASS_EN);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -130,6 +97,7 @@ static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
static int inv_mpu_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const void *match;
struct inv_mpu6050_state *st;
int result;
enum inv_devices chip_type;
@@ -140,18 +108,14 @@ static int inv_mpu_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_I2C_BLOCK))
return -EOPNOTSUPP;
- if (client->dev.of_node) {
- chip_type = (enum inv_devices)
- of_device_get_match_data(&client->dev);
+ match = device_get_match_data(&client->dev);
+ if (match) {
+ chip_type = (enum inv_devices)match;
name = client->name;
} else if (id) {
chip_type = (enum inv_devices)
id->driver_data;
name = id->name;
- } else if (ACPI_HANDLE(&client->dev)) {
- name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
- if (!name)
- return -ENODEV;
} else {
return -ENOSYS;
}
@@ -164,7 +128,7 @@ static int inv_mpu_probe(struct i2c_client *client,
}
result = inv_mpu_core_probe(regmap, client->irq, name,
- inv_mpu_magn_disable, chip_type);
+ inv_mpu_i2c_aux_setup, chip_type);
if (result < 0)
return result;
@@ -173,8 +137,7 @@ static int inv_mpu_probe(struct i2c_client *client,
/* declare i2c auxiliary bus */
st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
- inv_mpu6050_select_bypass,
- inv_mpu6050_deselect_bypass);
+ inv_mpu6050_select_bypass, NULL);
if (!st->muxc)
return -ENOMEM;
st->muxc->priv = dev_get_drvdata(&client->dev);
@@ -218,7 +181,11 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20609", INV_ICM20609},
+ {"icm20689", INV_ICM20689},
{"icm20602", INV_ICM20602},
+ {"icm20690", INV_ICM20690},
+ {"iam20680", INV_IAM20680},
{}
};
@@ -254,9 +221,25 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_ICM20608
},
{
+ .compatible = "invensense,icm20609",
+ .data = (void *)INV_ICM20609
+ },
+ {
+ .compatible = "invensense,icm20689",
+ .data = (void *)INV_ICM20689
+ },
+ {
.compatible = "invensense,icm20602",
.data = (void *)INV_ICM20602
},
+ {
+ .compatible = "invensense,icm20690",
+ .data = (void *)INV_ICM20690
+ },
+ {
+ .compatible = "invensense,iam20680",
+ .data = (void *)INV_IAM20680
+ },
{ }
};
MODULE_DEVICE_TABLE(of, inv_of_match);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 6158fca7f70e..cd38b3fccc7b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -75,15 +75,30 @@ enum inv_devices {
INV_MPU9250,
INV_MPU9255,
INV_ICM20608,
+ INV_ICM20609,
+ INV_ICM20689,
INV_ICM20602,
+ INV_ICM20690,
+ INV_IAM20680,
INV_NUM_PARTS
};
+/* chip sensors mask: accelerometer, gyroscope, temperature, magnetometer */
+#define INV_MPU6050_SENSOR_ACCL BIT(0)
+#define INV_MPU6050_SENSOR_GYRO BIT(1)
+#define INV_MPU6050_SENSOR_TEMP BIT(2)
+#define INV_MPU6050_SENSOR_MAGN BIT(3)
+
/**
* struct inv_mpu6050_chip_config - Cached chip configuration data.
+ * @clk: selected chip clock
* @fsr: Full scale range.
* @lpf: Digital low pass filter frequency.
* @accl_fs: accel full scale range.
+ * @accl_en: accel engine enabled
+ * @gyro_en: gyro engine enabled
+ * @temp_en: temperature sensor enabled
+ * @magn_en: magn engine (i2c master) enabled
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
* @temp_fifo_enable: enable temp data output
@@ -91,9 +106,14 @@ enum inv_devices {
* @divider: chip sample rate divider (sample rate divider - 1)
*/
struct inv_mpu6050_chip_config {
+ unsigned int clk:3;
unsigned int fsr:2;
unsigned int lpf:3;
unsigned int accl_fs:2;
+ unsigned int accl_en:1;
+ unsigned int gyro_en:1;
+ unsigned int temp_en:1;
+ unsigned int magn_en:1;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
unsigned int temp_fifo_enable:1;
@@ -144,6 +164,7 @@ struct inv_mpu6050_hw {
* @magn_disabled: magnetometer disabled for backward compatibility reason.
* @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss.
* @magn_orient: magnetometer sensor chip orientation if available.
+ * @suspended_sensors: sensors mask of sensors turned off for suspend
*/
struct inv_mpu6050_state {
struct mutex lock;
@@ -154,7 +175,6 @@ struct inv_mpu6050_state {
enum inv_devices chip_type;
struct i2c_mux_core *muxc;
struct i2c_client *mux_client;
- unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
struct iio_mount_matrix orientation;
struct regmap *map;
@@ -169,6 +189,7 @@ struct inv_mpu6050_state {
bool magn_disabled;
s32 magn_raw_to_gauss[3];
struct iio_mount_matrix magn_orient;
+ unsigned int suspended_sensors;
};
/*register and associated bit definition*/
@@ -241,7 +262,13 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_I2C_SLV3_DLY_EN 0x08
#define INV_MPU6050_BIT_DELAY_ES_SHADOW 0x80
+#define INV_MPU6050_REG_SIGNAL_PATH_RESET 0x68
+#define INV_MPU6050_BIT_TEMP_RST BIT(0)
+#define INV_MPU6050_BIT_ACCEL_RST BIT(1)
+#define INV_MPU6050_BIT_GYRO_RST BIT(2)
+
#define INV_MPU6050_REG_USER_CTRL 0x6A
+#define INV_MPU6050_BIT_SIG_COND_RST 0x01
#define INV_MPU6050_BIT_FIFO_RST 0x04
#define INV_MPU6050_BIT_DMP_RST 0x08
#define INV_MPU6050_BIT_I2C_MST_EN 0x20
@@ -252,6 +279,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
#define INV_MPU6050_BIT_H_RESET 0x80
#define INV_MPU6050_BIT_SLEEP 0x40
+#define INV_MPU6050_BIT_TEMP_DIS 0x08
#define INV_MPU6050_BIT_CLK_MASK 0x7
#define INV_MPU6050_REG_PWR_MGMT_2 0x6C
@@ -276,12 +304,16 @@ struct inv_mpu6050_state {
/* mpu6500 registers */
#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
+#define INV_ICM20689_BITS_FIFO_SIZE_MAX 0xC0
#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
/* delay time in milliseconds */
#define INV_MPU6050_POWER_UP_TIME 100
#define INV_MPU6050_TEMP_UP_TIME 100
-#define INV_MPU6050_SENSOR_UP_TIME 30
+#define INV_MPU6050_ACCEL_UP_TIME 20
+#define INV_MPU6050_GYRO_UP_TIME 35
+#define INV_MPU6050_GYRO_DOWN_TIME 150
+#define INV_MPU6050_SUSPEND_DELAY_MS 2000
/* delay time in microseconds */
#define INV_MPU6050_REG_UP_TIME_MIN 5000
@@ -293,6 +325,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_MAX_ACCL_FS_PARAM 3
#define INV_MPU6050_THREE_AXIS 3
#define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3
+#define INV_ICM20690_GYRO_CONFIG_FSR_SHIFT 2
#define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3
#define INV_MPU6500_TEMP_OFFSET 7011
@@ -315,7 +348,6 @@ struct inv_mpu6050_state {
#define INV_MPU6050_TS_PERIOD_JITTER 4
/* init parameters */
-#define INV_MPU6050_INIT_FIFO_RATE 50
#define INV_MPU6050_MAX_FIFO_RATE 1000
#define INV_MPU6050_MIN_FIFO_RATE 4
@@ -340,7 +372,11 @@ struct inv_mpu6050_state {
#define INV_MPU9255_WHOAMI_VALUE 0x73
#define INV_MPU6515_WHOAMI_VALUE 0x74
#define INV_ICM20608_WHOAMI_VALUE 0xAF
+#define INV_ICM20609_WHOAMI_VALUE 0xA6
+#define INV_ICM20689_WHOAMI_VALUE 0x98
#define INV_ICM20602_WHOAMI_VALUE 0x12
+#define INV_ICM20690_WHOAMI_VALUE 0x20
+#define INV_IAM20680_WHOAMI_VALUE 0xA9
/* scan element definition for generic MPU6xxx devices */
enum inv_mpu6050_scan {
@@ -360,14 +396,14 @@ enum inv_mpu6050_scan {
};
enum inv_mpu6050_filter_e {
- INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
- INV_MPU6050_FILTER_188HZ,
- INV_MPU6050_FILTER_98HZ,
- INV_MPU6050_FILTER_42HZ,
+ INV_MPU6050_FILTER_NOLPF2 = 0,
+ INV_MPU6050_FILTER_200HZ,
+ INV_MPU6050_FILTER_100HZ,
+ INV_MPU6050_FILTER_45HZ,
INV_MPU6050_FILTER_20HZ,
INV_MPU6050_FILTER_10HZ,
INV_MPU6050_FILTER_5HZ,
- INV_MPU6050_FILTER_2100HZ_NOLPF,
+ INV_MPU6050_FILTER_NOLPF,
NUM_MPU6050_FILTER
};
@@ -401,10 +437,10 @@ enum inv_mpu6050_clock_sel_e {
irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type);
-int inv_reset_fifo(struct iio_dev *indio_dev);
-int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
+int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable);
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en,
+ unsigned int mask);
int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
-int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
int inv_mpu_acpi_create_mux_client(struct i2c_client *client);
void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
index 4f192352521e..f282e9cc34c5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -44,9 +44,6 @@
#define INV_MPU_MAGN_REG_ASAY 0x11
#define INV_MPU_MAGN_REG_ASAZ 0x12
-/* Magnetometer maximum frequency */
-#define INV_MPU_MAGN_FREQ_HZ_MAX 50
-
static bool inv_magn_supported(const struct inv_mpu6050_state *st)
{
switch (st->chip_type) {
@@ -316,59 +313,32 @@ int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
*
* Returns 0 on success, a negative error code otherwise
*/
-int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val)
+int inv_mpu_magn_read(struct inv_mpu6050_state *st, int axis, int *val)
{
- unsigned int user_ctrl, status;
- __be16 data[3];
+ unsigned int status;
+ __be16 data;
uint8_t addr;
- uint8_t d;
- unsigned int period_ms;
int ret;
/* quit if chip is not supported */
if (!inv_magn_supported(st))
return -ENODEV;
- /* Mag data: X - Y - Z */
+ /* Mag data: XH,XL,YH,YL,ZH,ZL */
switch (axis) {
case IIO_MOD_X:
addr = 0;
break;
case IIO_MOD_Y:
- addr = 1;
+ addr = 2;
break;
case IIO_MOD_Z:
- addr = 2;
+ addr = 4;
break;
default:
return -EINVAL;
}
-
- /* set sample rate to max mag freq */
- d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU_MAGN_FREQ_HZ_MAX);
- ret = regmap_write(st->map, st->reg->sample_rate_div, d);
- if (ret)
- return ret;
-
- /* start i2c master, wait for xfer, stop */
- user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN;
- ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
- if (ret)
- return ret;
-
- /* need to wait 2 periods + half-period margin */
- period_ms = 1000 / INV_MPU_MAGN_FREQ_HZ_MAX;
- msleep(period_ms * 2 + period_ms / 2);
- user_ctrl = st->chip_config.user_ctrl;
- ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
- if (ret)
- return ret;
-
- /* restore sample rate */
- d = st->chip_config.divider;
- ret = regmap_write(st->map, st->reg->sample_rate_div, d);
- if (ret)
- return ret;
+ addr += INV_MPU6050_REG_EXT_SENS_DATA;
/* check i2c status and read raw data */
ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
@@ -379,12 +349,11 @@ int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val)
status & INV_MPU6050_BIT_I2C_SLV1_NACK)
return -EIO;
- ret = regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
- data, sizeof(data));
+ ret = regmap_bulk_read(st->map, addr, &data, sizeof(data));
if (ret)
return ret;
- *val = (int16_t)be16_to_cpu(data[addr]);
+ *val = (int16_t)be16_to_cpu(data);
return IIO_VAL_INT;
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
index b41bd0578478..185c000c697c 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
@@ -10,6 +10,9 @@
#include "inv_mpu_iio.h"
+/* Magnetometer maximum frequency */
+#define INV_MPU_MAGN_FREQ_HZ_MAX 50
+
int inv_mpu_magn_probe(struct inv_mpu6050_state *st);
/**
@@ -31,6 +34,6 @@ int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate);
int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st);
-int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val);
+int inv_mpu_magn_read(struct inv_mpu6050_state *st, int axis, int *val);
#endif /* INV_MPU_MAGN_H_ */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index f9fdf4302a91..9511e4715e2c 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -90,63 +90,14 @@ static s64 inv_mpu6050_get_timestamp(struct inv_mpu6050_state *st)
return ts;
}
-int inv_reset_fifo(struct iio_dev *indio_dev)
+static int inv_reset_fifo(struct iio_dev *indio_dev)
{
int result;
- u8 d;
struct inv_mpu6050_state *st = iio_priv(indio_dev);
- /* reset it timestamp validation */
- st->it_timestamp = 0;
-
- /* disable interrupt */
- result = regmap_write(st->map, st->reg->int_enable, 0);
- if (result) {
- dev_err(regmap_get_device(st->map), "int_enable failed %d\n",
- result);
- return result;
- }
- /* disable the sensor output to FIFO */
- result = regmap_write(st->map, st->reg->fifo_en, 0);
- if (result)
- goto reset_fifo_fail;
- /* disable fifo reading */
- result = regmap_write(st->map, st->reg->user_ctrl,
- st->chip_config.user_ctrl);
- if (result)
- goto reset_fifo_fail;
-
- /* reset FIFO*/
- d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
- result = regmap_write(st->map, st->reg->user_ctrl, d);
- if (result)
- goto reset_fifo_fail;
-
- /* enable interrupt */
- if (st->chip_config.accl_fifo_enable ||
- st->chip_config.gyro_fifo_enable ||
- st->chip_config.magn_fifo_enable) {
- result = regmap_write(st->map, st->reg->int_enable,
- INV_MPU6050_BIT_DATA_RDY_EN);
- if (result)
- return result;
- }
- /* enable FIFO reading */
- d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_EN;
- result = regmap_write(st->map, st->reg->user_ctrl, d);
- if (result)
- goto reset_fifo_fail;
- /* enable sensor output to FIFO */
- d = 0;
- if (st->chip_config.gyro_fifo_enable)
- d |= INV_MPU6050_BITS_GYRO_OUT;
- if (st->chip_config.accl_fifo_enable)
- d |= INV_MPU6050_BIT_ACCEL_OUT;
- if (st->chip_config.temp_fifo_enable)
- d |= INV_MPU6050_BIT_TEMP_OUT;
- if (st->chip_config.magn_fifo_enable)
- d |= INV_MPU6050_BIT_SLAVE_0;
- result = regmap_write(st->map, st->reg->fifo_en, d);
+ /* disable fifo and reenable it */
+ inv_mpu6050_prepare_fifo(st, false);
+ result = inv_mpu6050_prepare_fifo(st, true);
if (result)
goto reset_fifo_fail;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index ec102d5a5c77..673b198e6368 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -4,6 +4,8 @@
*/
#include <linux/module.h>
#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
@@ -19,10 +21,6 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
int ret = 0;
- ret = inv_mpu6050_set_power_itg(st, true);
- if (ret)
- return ret;
-
if (st->reg->i2c_if) {
ret = regmap_write(st->map, st->reg->i2c_if,
INV_ICM20602_BIT_I2C_IF_DIS);
@@ -31,27 +29,24 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
ret = regmap_write(st->map, st->reg->user_ctrl,
st->chip_config.user_ctrl);
}
- if (ret) {
- inv_mpu6050_set_power_itg(st, false);
- return ret;
- }
- return inv_mpu6050_set_power_itg(st, false);
+ return ret;
}
static int inv_mpu_probe(struct spi_device *spi)
{
+ const void *match;
struct regmap *regmap;
const struct spi_device_id *spi_id;
- const struct acpi_device_id *acpi_id;
const char *name = NULL;
enum inv_devices chip_type;
if ((spi_id = spi_get_device_id(spi))) {
chip_type = (enum inv_devices)spi_id->driver_data;
name = spi_id->name;
- } else if ((acpi_id = acpi_match_device(spi->dev.driver->acpi_match_table, &spi->dev))) {
- chip_type = (enum inv_devices)acpi_id->driver_data;
+ } else if ((match = device_get_match_data(&spi->dev))) {
+ chip_type = (enum inv_devices)match;
+ name = dev_name(&spi->dev);
} else {
return -ENODEV;
}
@@ -74,15 +69,69 @@ static int inv_mpu_probe(struct spi_device *spi)
static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
+ {"mpu6515", INV_MPU6515},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20609", INV_ICM20609},
+ {"icm20689", INV_ICM20689},
{"icm20602", INV_ICM20602},
+ {"icm20690", INV_ICM20690},
+ {"iam20680", INV_IAM20680},
{}
};
MODULE_DEVICE_TABLE(spi, inv_mpu_id);
+static const struct of_device_id inv_of_match[] = {
+ {
+ .compatible = "invensense,mpu6000",
+ .data = (void *)INV_MPU6000
+ },
+ {
+ .compatible = "invensense,mpu6500",
+ .data = (void *)INV_MPU6500
+ },
+ {
+ .compatible = "invensense,mpu6515",
+ .data = (void *)INV_MPU6515
+ },
+ {
+ .compatible = "invensense,mpu9250",
+ .data = (void *)INV_MPU9250
+ },
+ {
+ .compatible = "invensense,mpu9255",
+ .data = (void *)INV_MPU9255
+ },
+ {
+ .compatible = "invensense,icm20608",
+ .data = (void *)INV_ICM20608
+ },
+ {
+ .compatible = "invensense,icm20609",
+ .data = (void *)INV_ICM20609
+ },
+ {
+ .compatible = "invensense,icm20689",
+ .data = (void *)INV_ICM20689
+ },
+ {
+ .compatible = "invensense,icm20602",
+ .data = (void *)INV_ICM20602
+ },
+ {
+ .compatible = "invensense,icm20690",
+ .data = (void *)INV_ICM20690
+ },
+ {
+ .compatible = "invensense,iam20680",
+ .data = (void *)INV_IAM20680
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, inv_of_match);
+
static const struct acpi_device_id inv_acpi_match[] = {
{"INVN6000", INV_MPU6000},
{ },
@@ -93,6 +142,7 @@ static struct spi_driver inv_mpu_driver = {
.probe = inv_mpu_probe,
.id_table = inv_mpu_id,
.driver = {
+ .of_match_table = inv_of_match,
.acpi_match_table = ACPI_PTR(inv_acpi_match),
.name = "inv-mpu6000-spi",
.pm = &inv_mpu_pmops,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 5199fe790c30..f7b5a70be30f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -3,11 +3,13 @@
* Copyright (C) 2012 Invensense, Inc.
*/
+#include <linux/pm_runtime.h>
#include "inv_mpu_iio.h"
-static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
+static unsigned int inv_scan_query_mpu6050(struct iio_dev *indio_dev)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ unsigned int mask;
st->chip_config.gyro_fifo_enable =
test_bit(INV_MPU6050_SCAN_GYRO_X,
@@ -27,17 +29,28 @@ static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
st->chip_config.temp_fifo_enable =
test_bit(INV_MPU6050_SCAN_TEMP, indio_dev->active_scan_mask);
+
+ mask = 0;
+ if (st->chip_config.gyro_fifo_enable)
+ mask |= INV_MPU6050_SENSOR_GYRO;
+ if (st->chip_config.accl_fifo_enable)
+ mask |= INV_MPU6050_SENSOR_ACCL;
+ if (st->chip_config.temp_fifo_enable)
+ mask |= INV_MPU6050_SENSOR_TEMP;
+
+ return mask;
}
-static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
+static unsigned int inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ unsigned int mask;
- inv_scan_query_mpu6050(indio_dev);
+ mask = inv_scan_query_mpu6050(indio_dev);
/* no magnetometer if i2c auxiliary bus is used */
if (st->magn_disabled)
- return;
+ return mask;
st->chip_config.magn_fifo_enable =
test_bit(INV_MPU9X50_SCAN_MAGN_X,
@@ -46,9 +59,13 @@ static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
indio_dev->active_scan_mask) ||
test_bit(INV_MPU9X50_SCAN_MAGN_Z,
indio_dev->active_scan_mask);
+ if (st->chip_config.magn_fifo_enable)
+ mask |= INV_MPU6050_SENSOR_MAGN;
+
+ return mask;
}
-static void inv_scan_query(struct iio_dev *indio_dev)
+static unsigned int inv_scan_query(struct iio_dev *indio_dev)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
@@ -84,6 +101,54 @@ static unsigned int inv_compute_skip_samples(const struct inv_mpu6050_state *st)
return skip_samples;
}
+int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
+{
+ uint8_t d;
+ int ret;
+
+ if (enable) {
+ st->it_timestamp = 0;
+ /* reset FIFO */
+ d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
+ ret = regmap_write(st->map, st->reg->user_ctrl, d);
+ if (ret)
+ return ret;
+ /* enable sensor output to FIFO */
+ d = 0;
+ if (st->chip_config.gyro_fifo_enable)
+ d |= INV_MPU6050_BITS_GYRO_OUT;
+ if (st->chip_config.accl_fifo_enable)
+ d |= INV_MPU6050_BIT_ACCEL_OUT;
+ if (st->chip_config.temp_fifo_enable)
+ d |= INV_MPU6050_BIT_TEMP_OUT;
+ if (st->chip_config.magn_fifo_enable)
+ d |= INV_MPU6050_BIT_SLAVE_0;
+ ret = regmap_write(st->map, st->reg->fifo_en, d);
+ if (ret)
+ return ret;
+ /* enable FIFO reading */
+ d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, d);
+ if (ret)
+ return ret;
+ /* enable interrupt */
+ ret = regmap_write(st->map, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+ } else {
+ ret = regmap_write(st->map, st->reg->int_enable, 0);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, st->reg->fifo_en, 0);
+ if (ret)
+ return ret;
+ /* restore user_ctrl for disabling FIFO reading */
+ ret = regmap_write(st->map, st->reg->user_ctrl,
+ st->chip_config.user_ctrl);
+ }
+
+ return ret;
+}
+
/**
* inv_mpu6050_set_enable() - enable chip functions.
* @indio_dev: Device driver instance.
@@ -92,84 +157,43 @@ static unsigned int inv_compute_skip_samples(const struct inv_mpu6050_state *st)
static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
- uint8_t d;
+ struct device *pdev = regmap_get_device(st->map);
+ unsigned int scan;
int result;
if (enable) {
- result = inv_mpu6050_set_power_itg(st, true);
- if (result)
+ scan = inv_scan_query(indio_dev);
+ result = pm_runtime_get_sync(pdev);
+ if (result < 0) {
+ pm_runtime_put_noidle(pdev);
return result;
- inv_scan_query(indio_dev);
- if (st->chip_config.gyro_fifo_enable) {
- result = inv_mpu6050_switch_engine(st, true,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
- if (result)
- goto error_power_off;
- }
- if (st->chip_config.accl_fifo_enable) {
- result = inv_mpu6050_switch_engine(st, true,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
- if (result)
- goto error_gyro_off;
}
- if (st->chip_config.magn_fifo_enable) {
- d = st->chip_config.user_ctrl |
- INV_MPU6050_BIT_I2C_MST_EN;
- result = regmap_write(st->map, st->reg->user_ctrl, d);
- if (result)
- goto error_accl_off;
- st->chip_config.user_ctrl = d;
- }
- st->skip_samples = inv_compute_skip_samples(st);
- result = inv_reset_fifo(indio_dev);
+ /*
+ * In case autosuspend didn't trigger, turn off first not
+ * required sensors.
+ */
+ result = inv_mpu6050_switch_engine(st, false, ~scan);
if (result)
- goto error_magn_off;
- } else {
- result = regmap_write(st->map, st->reg->fifo_en, 0);
- if (result)
- goto error_magn_off;
-
- result = regmap_write(st->map, st->reg->int_enable, 0);
- if (result)
- goto error_magn_off;
-
- d = st->chip_config.user_ctrl & ~INV_MPU6050_BIT_I2C_MST_EN;
- result = regmap_write(st->map, st->reg->user_ctrl, d);
- if (result)
- goto error_magn_off;
- st->chip_config.user_ctrl = d;
-
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
+ goto error_power_off;
+ result = inv_mpu6050_switch_engine(st, true, scan);
if (result)
- goto error_accl_off;
-
- result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
+ goto error_power_off;
+ st->skip_samples = inv_compute_skip_samples(st);
+ result = inv_mpu6050_prepare_fifo(st, true);
if (result)
- goto error_gyro_off;
-
- result = inv_mpu6050_set_power_itg(st, false);
+ goto error_power_off;
+ } else {
+ result = inv_mpu6050_prepare_fifo(st, false);
if (result)
goto error_power_off;
+ pm_runtime_mark_last_busy(pdev);
+ pm_runtime_put_autosuspend(pdev);
}
return 0;
-error_magn_off:
- /* always restore user_ctrl to disable fifo properly */
- st->chip_config.user_ctrl &= ~INV_MPU6050_BIT_I2C_MST_EN;
- regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
-error_accl_off:
- if (st->chip_config.accl_fifo_enable)
- inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
-error_gyro_off:
- if (st->chip_config.gyro_fifo_enable)
- inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
error_power_off:
- inv_mpu6050_set_power_itg(st, false);
+ pm_runtime_put_autosuspend(pdev);
return result;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 9c3486a8134f..41cb20cb3809 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -230,8 +230,8 @@ enum st_lsm6dsx_ext_sensor_id {
* @i2c_addr: I2c slave address list.
* @wai: Wai address info.
* @id: external sensor id.
- * @odr: Output data rate of the sensor [Hz].
- * @gain: Configured sensor sensitivity.
+ * @odr_table: Output data rate of the sensor [Hz].
+ * @fs_table: Configured sensor sensitivity table depending on full scale.
* @temp_comp: Temperature compensation register info (addr + mask).
* @pwr_table: Power on register info (addr + mask).
* @off_canc: Offset cancellation register info (addr + mask).
@@ -337,6 +337,7 @@ enum st_lsm6dsx_fifo_mode {
* @gain: Configured sensor sensitivity.
* @odr: Output data rate of the sensor [Hz].
* @watermark: Sensor watermark level.
+ * @decimator: Sensor decimation factor.
* @sip: Number of samples in a given pattern.
* @ts_ref: Sensor timestamp reference for hw one.
* @ext_info: Sensor settings if it is connected to i2c controller
@@ -350,11 +351,13 @@ struct st_lsm6dsx_sensor {
u32 odr;
u16 watermark;
+ u8 decimator;
u8 sip;
s64 ts_ref;
struct {
const struct st_lsm6dsx_ext_dev_settings *settings;
+ u32 slv_odr;
u8 addr;
} ext_info;
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index bb899345f2bb..afd00daeefb2 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -93,6 +93,7 @@ st_lsm6dsx_get_decimator_val(struct st_lsm6dsx_sensor *sensor, u32 max_odr)
break;
}
+ sensor->decimator = decimator;
return i == max_size ? 0 : st_lsm6dsx_decimator_table[i].val;
}
@@ -337,7 +338,7 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 addr,
int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
{
struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor, *ext_sensor = NULL;
- int err, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
+ int err, sip, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
@@ -399,19 +400,20 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
acc_sip = acc_sensor->sip;
ts_sip = hw->ts_sip;
offset = 0;
+ sip = 0;
while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
- if (gyro_sip > 0) {
+ if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
memcpy(gyro_buff, &hw->buff[offset],
ST_LSM6DSX_SAMPLE_SIZE);
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
- if (acc_sip > 0) {
+ if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
memcpy(acc_buff, &hw->buff[offset],
ST_LSM6DSX_SAMPLE_SIZE);
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
- if (ext_sip > 0) {
+ if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
memcpy(ext_buff, &hw->buff[offset],
ST_LSM6DSX_SAMPLE_SIZE);
offset += ST_LSM6DSX_SAMPLE_SIZE;
@@ -441,18 +443,25 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
- if (gyro_sip-- > 0)
+ if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_GYRO],
gyro_buff, gyro_sensor->ts_ref + ts);
- if (acc_sip-- > 0)
+ gyro_sip--;
+ }
+ if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_ACC],
acc_buff, acc_sensor->ts_ref + ts);
- if (ext_sip-- > 0)
+ acc_sip--;
+ }
+ if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_EXT0],
ext_buff, ext_sensor->ts_ref + ts);
+ ext_sip--;
+ }
+ sip++;
}
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 84d219ae6aee..4426524b59f2 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2036,11 +2036,21 @@ static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw)
return 0;
}
-static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
+static int st_lsm6dsx_reset_device(struct st_lsm6dsx_hw *hw)
{
const struct st_lsm6dsx_reg *reg;
int err;
+ /*
+ * flush hw FIFO before device reset in order to avoid
+ * possible races on interrupt line 1. If the first interrupt
+ * line is asserted during hw reset the device will work in
+ * I3C-only mode (if it is supported)
+ */
+ err = st_lsm6dsx_flush_fifo(hw);
+ if (err < 0 && err != -ENOTSUPP)
+ return err;
+
/* device sw reset */
reg = &hw->settings->reset;
err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
@@ -2059,6 +2069,18 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
msleep(50);
+ return 0;
+}
+
+static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
+{
+ const struct st_lsm6dsx_reg *reg;
+ int err;
+
+ err = st_lsm6dsx_reset_device(hw);
+ if (err < 0)
+ return err;
+
/* enable Block Data Update */
reg = &hw->settings->bdu;
err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index eea555617d4a..1cf98195f84d 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -421,7 +421,8 @@ int st_lsm6dsx_shub_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable)
settings = sensor->ext_info.settings;
if (enable) {
- err = st_lsm6dsx_shub_set_odr(sensor, sensor->odr);
+ err = st_lsm6dsx_shub_set_odr(sensor,
+ sensor->ext_info.slv_odr);
if (err < 0)
return err;
} else {
@@ -459,14 +460,15 @@ st_lsm6dsx_shub_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- delay = 1000000000 / sensor->odr;
+ delay = 1000000000 / sensor->ext_info.slv_odr;
usleep_range(delay, 2 * delay);
len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3);
err = st_lsm6dsx_shub_read(sensor, ch->address, data, len);
+ if (err < 0)
+ return err;
- st_lsm6dsx_shub_set_enable(sensor, false);
-
+ err = st_lsm6dsx_shub_set_enable(sensor, false);
if (err < 0)
return err;
@@ -499,8 +501,8 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
iio_device_release_direct_mode(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = sensor->odr / 1000;
- *val2 = (sensor->odr % 1000) * 1000;
+ *val = sensor->ext_info.slv_odr / 1000;
+ *val2 = (sensor->ext_info.slv_odr % 1000) * 1000;
ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
@@ -534,8 +536,22 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
val = val * 1000 + val2 / 1000;
err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data);
- if (!err)
- sensor->odr = val;
+ if (!err) {
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ struct st_lsm6dsx_sensor *ref_sensor;
+ u8 odr_val;
+ int odr;
+
+ ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val);
+ if (odr < 0) {
+ err = odr;
+ goto release;
+ }
+
+ sensor->ext_info.slv_odr = val;
+ sensor->odr = odr;
+ }
break;
}
default:
@@ -543,6 +559,7 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
break;
}
+release:
iio_device_release_direct_mode(iio_dev);
return err;
@@ -612,6 +629,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
const struct st_lsm6dsx_ext_dev_settings *info,
u8 i2c_addr, const char *name)
{
+ enum st_lsm6dsx_sensor_id ref_id = ST_LSM6DSX_ID_ACC;
struct iio_chan_spec *ext_channels;
struct st_lsm6dsx_sensor *sensor;
struct iio_dev *iio_dev;
@@ -627,7 +645,8 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = info->odr_table.odr_avl[0].milli_hz;
+ sensor->odr = hw->settings->odr_table[ref_id].odr_avl[0].milli_hz;
+ sensor->ext_info.slv_odr = info->odr_table.odr_avl[0].milli_hz;
sensor->gain = info->fs_table.fs_avl[0].gain;
sensor->ext_info.settings = info;
sensor->ext_info.addr = i2c_addr;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 65ff0d067018..24f7bbff4938 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -189,7 +189,12 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
-static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
+/**
+ * iio_device_set_clock() - Set current timestamping clock for the device
+ * @indio_dev: IIO device structure containing the device
+ * @clock_id: timestamping clock posix identifier to set.
+ */
+int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
{
int ret;
const struct iio_event_interface *ev_int = indio_dev->event_interface;
@@ -207,6 +212,7 @@ static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
return 0;
}
+EXPORT_SYMBOL(iio_device_set_clock);
/**
* iio_get_time_ns() - utility function to get a time stamp for events etc
@@ -301,11 +307,14 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct iio_dev *indio_dev = file->private_data;
- char buf[20];
unsigned val = 0;
- ssize_t len;
int ret;
+ if (*ppos > 0)
+ return simple_read_from_buffer(userbuf, count, ppos,
+ indio_dev->read_buf,
+ indio_dev->read_buf_len);
+
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
@@ -314,9 +323,13 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
return ret;
}
- len = snprintf(buf, sizeof(buf), "0x%X\n", val);
+ indio_dev->read_buf_len = snprintf(indio_dev->read_buf,
+ sizeof(indio_dev->read_buf),
+ "0x%X\n", val);
- return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+ return simple_read_from_buffer(userbuf, count, ppos,
+ indio_dev->read_buf,
+ indio_dev->read_buf_len);
}
static ssize_t iio_debugfs_write_reg(struct file *file,
@@ -769,17 +782,18 @@ static ssize_t iio_read_channel_info_avail(struct device *dev,
}
/**
- * iio_str_to_fixpoint() - Parse a fixed-point number from a string
+ * __iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
* @fract_mult: Multiplier for the first decimal place, should be a power of 10
* @integer: The integer part of the number
* @fract: The fractional part of the number
+ * @scale_db: True if this should parse as dB
*
* Returns 0 on success, or a negative error code if the string could not be
* parsed.
*/
-int iio_str_to_fixpoint(const char *str, int fract_mult,
- int *integer, int *fract)
+static int __iio_str_to_fixpoint(const char *str, int fract_mult,
+ int *integer, int *fract, bool scale_db)
{
int i = 0, f = 0;
bool integer_part = true, negative = false;
@@ -810,6 +824,14 @@ int iio_str_to_fixpoint(const char *str, int fract_mult,
break;
else
return -EINVAL;
+ } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
+ /* Ignore the dB suffix */
+ str += sizeof(" dB") - 1;
+ continue;
+ } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) {
+ /* Ignore the dB suffix */
+ str += sizeof("dB") - 1;
+ continue;
} else if (*str == '.' && integer_part) {
integer_part = false;
} else {
@@ -830,6 +852,22 @@ int iio_str_to_fixpoint(const char *str, int fract_mult,
return 0;
}
+
+/**
+ * iio_str_to_fixpoint() - Parse a fixed-point number from a string
+ * @str: The string to parse
+ * @fract_mult: Multiplier for the first decimal place, should be a power of 10
+ * @integer: The integer part of the number
+ * @fract: The fractional part of the number
+ *
+ * Returns 0 on success, or a negative error code if the string could not be
+ * parsed.
+ */
+int iio_str_to_fixpoint(const char *str, int fract_mult,
+ int *integer, int *fract)
+{
+ return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false);
+}
EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
static ssize_t iio_write_channel_info(struct device *dev,
@@ -842,6 +880,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
int ret, fract_mult = 100000;
int integer, fract = 0;
bool is_char = false;
+ bool scale_db = false;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
@@ -853,6 +892,9 @@ static ssize_t iio_write_channel_info(struct device *dev,
case IIO_VAL_INT:
fract_mult = 0;
break;
+ case IIO_VAL_INT_PLUS_MICRO_DB:
+ scale_db = true;
+ /* fall through */
case IIO_VAL_INT_PLUS_MICRO:
fract_mult = 100000;
break;
@@ -873,7 +915,8 @@ static ssize_t iio_write_channel_info(struct device *dev,
return -EINVAL;
integer = ch;
} else {
- ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
+ ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
+ scale_db);
if (ret)
return ret;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 9968f982fbc7..b27719cefcf9 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -43,6 +43,16 @@ config ADUX1020
To compile this driver as a module, choose M here: the
module will be called adux1020.
+config AL3010
+ tristate "AL3010 ambient light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Dyna Image AL3010
+ ambient light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called al3010.
+
config AL3320A
tristate "AL3320A ambient light sensor"
depends on I2C
@@ -159,6 +169,17 @@ config IIO_CROS_EC_LIGHT_PROX
To compile this driver as a module, choose M here:
the module will be called cros_ec_light_prox.
+config GP2AP002
+ tristate "Sharp GP2AP002 Proximity/ALS sensor"
+ depends on I2C
+ select REGMAP
+ help
+ Say Y here if you have a Sharp GP2AP002 proximity/ALS combo-chip
+ hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gp2ap002.
+
config GP2AP020A00F
tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
depends on I2C
@@ -173,6 +194,16 @@ config GP2AP020A00F
To compile this driver as a module, choose M here: the
module will be called gp2ap020a00f.
+config IQS621_ALS
+ tristate "Azoteq IQS621/622 ambient light sensors"
+ depends on MFD_IQS62X || COMPILE_TEST
+ help
+ Say Y here if you want to build support for the Azoteq IQS621
+ and IQS622 ambient light sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iqs621-als.
+
config SENSORS_ISL29018
tristate "Intersil 29018 light and proximity sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c98d1cefb861..d1c8aa30b9a8 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_ACPI_ALS) += acpi-als.o
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_ADUX1020) += adux1020.o
+obj-$(CONFIG_AL3010) += al3010.o
obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9960) += apds9960.o
@@ -18,9 +19,11 @@ obj-$(CONFIG_CM3323) += cm3323.o
obj-$(CONFIG_CM3605) += cm3605.o
obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_IIO_CROS_EC_LIGHT_PROX) += cros_ec_light_prox.o
+obj-$(CONFIG_GP2AP002) += gp2ap002.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
+obj-$(CONFIG_IQS621_ALS) += iqs621-als.o
obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
obj-$(CONFIG_ISL29125) += isl29125.o
diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c
new file mode 100644
index 000000000000..b1ed7658cc46
--- /dev/null
+++ b/drivers/iio/light/al3010.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AL3010 - Dyna Image Ambient Light Sensor
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2016, Dyna-Image Corp.
+ * Copyright (c) 2020, David Heidelberg, Michał Mirosław, Dmitry Osipenko
+ *
+ * IIO driver for AL3010 (7-bit I2C slave address 0x1C).
+ *
+ * TODO: interrupt support, thresholds
+ * When the driver will get support for interrupt handling, then interrupt
+ * will need to be disabled before turning sensor OFF in order to avoid
+ * potential races with the interrupt handling.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define AL3010_DRV_NAME "al3010"
+
+#define AL3010_REG_SYSTEM 0x00
+#define AL3010_REG_DATA_LOW 0x0c
+#define AL3010_REG_CONFIG 0x10
+
+#define AL3010_CONFIG_DISABLE 0x00
+#define AL3010_CONFIG_ENABLE 0x01
+
+#define AL3010_GAIN_MASK GENMASK(6,4)
+
+#define AL3010_SCALE_AVAILABLE "1.1872 0.2968 0.0742 0.018"
+
+enum al3xxxx_range {
+ AL3XXX_RANGE_1, /* 77806 lx */
+ AL3XXX_RANGE_2, /* 19542 lx */
+ AL3XXX_RANGE_3, /* 4863 lx */
+ AL3XXX_RANGE_4 /* 1216 lx */
+};
+
+static const int al3010_scales[][2] = {
+ {0, 1187200}, {0, 296800}, {0, 74200}, {0, 18600}
+};
+
+struct al3010_data {
+ struct i2c_client *client;
+};
+
+static const struct iio_chan_spec al3010_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static IIO_CONST_ATTR(in_illuminance_scale_available, AL3010_SCALE_AVAILABLE);
+
+static struct attribute *al3010_attributes[] = {
+ &iio_const_attr_in_illuminance_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group al3010_attribute_group = {
+ .attrs = al3010_attributes,
+};
+
+static int al3010_set_pwr(struct i2c_client *client, bool pwr)
+{
+ u8 val = pwr ? AL3010_CONFIG_ENABLE : AL3010_CONFIG_DISABLE;
+ return i2c_smbus_write_byte_data(client, AL3010_REG_SYSTEM, val);
+}
+
+static void al3010_set_pwr_off(void *_data)
+{
+ struct al3010_data *data = _data;
+
+ al3010_set_pwr(data->client, false);
+}
+
+static int al3010_init(struct al3010_data *data)
+{
+ int ret;
+
+ ret = al3010_set_pwr(data->client, true);
+
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, AL3010_REG_CONFIG,
+ FIELD_PREP(AL3010_GAIN_MASK,
+ AL3XXX_RANGE_3));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int al3010_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct al3010_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * ALS ADC value is stored in two adjacent registers:
+ * - low byte of output is stored at AL3010_REG_DATA_LOW
+ * - high byte of output is stored at AL3010_REG_DATA_LOW + 1
+ */
+ ret = i2c_smbus_read_word_data(data->client,
+ AL3010_REG_DATA_LOW);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = i2c_smbus_read_byte_data(data->client,
+ AL3010_REG_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(AL3010_GAIN_MASK, ret);
+ *val = al3010_scales[ret][0];
+ *val2 = al3010_scales[ret][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int al3010_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct al3010_data *data = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(al3010_scales); i++) {
+ if (val != al3010_scales[i][0] ||
+ val2 != al3010_scales[i][1])
+ continue;
+
+ return i2c_smbus_write_byte_data(data->client,
+ AL3010_REG_CONFIG,
+ FIELD_PREP(AL3010_GAIN_MASK, i));
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info al3010_info = {
+ .read_raw = al3010_read_raw,
+ .write_raw = al3010_write_raw,
+ .attrs = &al3010_attribute_group,
+};
+
+static int al3010_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct al3010_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &al3010_info;
+ indio_dev->name = AL3010_DRV_NAME;
+ indio_dev->channels = al3010_channels;
+ indio_dev->num_channels = ARRAY_SIZE(al3010_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = al3010_init(data);
+ if (ret < 0) {
+ dev_err(&client->dev, "al3010 chip init failed\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&client->dev,
+ al3010_set_pwr_off,
+ data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused al3010_suspend(struct device *dev)
+{
+ return al3010_set_pwr(to_i2c_client(dev), false);
+}
+
+static int __maybe_unused al3010_resume(struct device *dev)
+{
+ return al3010_set_pwr(to_i2c_client(dev), true);
+}
+
+static SIMPLE_DEV_PM_OPS(al3010_pm_ops, al3010_suspend, al3010_resume);
+
+static const struct i2c_device_id al3010_id[] = {
+ {"al3010", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, al3010_id);
+
+static const struct of_device_id al3010_of_match[] = {
+ { .compatible = "dynaimage,al3010", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, al3010_of_match);
+
+static struct i2c_driver al3010_driver = {
+ .driver = {
+ .name = AL3010_DRV_NAME,
+ .of_match_table = al3010_of_match,
+ .pm = &al3010_pm_ops,
+ },
+ .probe = al3010_probe,
+ .id_table = al3010_id,
+};
+module_i2c_driver(al3010_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>");
+MODULE_AUTHOR("David Heidelberg <david@ixit.cz>");
+MODULE_DESCRIPTION("AL3010 Ambient Light Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index a21aa99e74e4..20ed0a73c390 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -7,11 +7,15 @@
* IIO driver for AL3320A (7-bit I2C slave address 0x1C).
*
* TODO: interrupt support, thresholds
+ * When the driver will get support for interrupt handling, then interrupt
+ * will need to be disabled before turning sensor OFF in order to avoid
+ * potential races with the interrupt handling.
*/
-#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/bitfield.h>
#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -36,8 +40,7 @@
#define AL3320A_CONFIG_DISABLE 0x00
#define AL3320A_CONFIG_ENABLE 0x01
-#define AL3320A_GAIN_SHIFT 1
-#define AL3320A_GAIN_MASK (BIT(2) | BIT(1))
+#define AL3320A_GAIN_MASK GENMASK(2, 1)
/* chip params default values */
#define AL3320A_DEFAULT_MEAN_TIME 4
@@ -79,18 +82,31 @@ static const struct attribute_group al3320a_attribute_group = {
.attrs = al3320a_attributes,
};
+static int al3320a_set_pwr(struct i2c_client *client, bool pwr)
+{
+ u8 val = pwr ? AL3320A_CONFIG_ENABLE : AL3320A_CONFIG_DISABLE;
+ return i2c_smbus_write_byte_data(client, AL3320A_REG_CONFIG, val);
+}
+
+static void al3320a_set_pwr_off(void *_data)
+{
+ struct al3320a_data *data = _data;
+
+ al3320a_set_pwr(data->client, false);
+}
+
static int al3320a_init(struct al3320a_data *data)
{
int ret;
- /* power on */
- ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG,
- AL3320A_CONFIG_ENABLE);
+ ret = al3320a_set_pwr(data->client, true);
+
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG_RANGE,
- AL3320A_RANGE_3 << AL3320A_GAIN_SHIFT);
+ FIELD_PREP(AL3320A_GAIN_MASK,
+ AL3320A_RANGE_3));
if (ret < 0)
return ret;
@@ -133,7 +149,7 @@ static int al3320a_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- ret = (ret & AL3320A_GAIN_MASK) >> AL3320A_GAIN_SHIFT;
+ ret = FIELD_GET(AL3320A_GAIN_MASK, ret);
*val = al3320a_scales[ret][0];
*val2 = al3320a_scales[ret][1];
@@ -152,11 +168,13 @@ static int al3320a_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
for (i = 0; i < ARRAY_SIZE(al3320a_scales); i++) {
- if (val == al3320a_scales[i][0] &&
- val2 == al3320a_scales[i][1])
- return i2c_smbus_write_byte_data(data->client,
+ if (val != al3320a_scales[i][0] ||
+ val2 != al3320a_scales[i][1])
+ continue;
+
+ return i2c_smbus_write_byte_data(data->client,
AL3320A_REG_CONFIG_RANGE,
- i << AL3320A_GAIN_SHIFT);
+ FIELD_PREP(AL3320A_GAIN_MASK, i));
}
break;
}
@@ -196,27 +214,47 @@ static int al3320a_probe(struct i2c_client *client,
dev_err(&client->dev, "al3320a chip init failed\n");
return ret;
}
+
+ ret = devm_add_action_or_reset(&client->dev,
+ al3320a_set_pwr_off,
+ data);
+ if (ret < 0)
+ return ret;
+
return devm_iio_device_register(&client->dev, indio_dev);
}
-static int al3320a_remove(struct i2c_client *client)
+static int __maybe_unused al3320a_suspend(struct device *dev)
+{
+ return al3320a_set_pwr(to_i2c_client(dev), false);
+}
+
+static int __maybe_unused al3320a_resume(struct device *dev)
{
- return i2c_smbus_write_byte_data(client, AL3320A_REG_CONFIG,
- AL3320A_CONFIG_DISABLE);
+ return al3320a_set_pwr(to_i2c_client(dev), true);
}
+static SIMPLE_DEV_PM_OPS(al3320a_pm_ops, al3320a_suspend, al3320a_resume);
+
static const struct i2c_device_id al3320a_id[] = {
{"al3320a", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, al3320a_id);
+static const struct of_device_id al3320a_of_match[] = {
+ { .compatible = "dynaimage,al3320a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, al3320a_of_match);
+
static struct i2c_driver al3320a_driver = {
.driver = {
.name = AL3320A_DRV_NAME,
+ .of_match_table = al3320a_of_match,
+ .pm = &al3320a_pm_ops,
},
.probe = al3320a_probe,
- .remove = al3320a_remove,
.id_table = al3320a_id,
};
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index 7a838e2956f4..2198b50909ed 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -177,10 +177,14 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture,
+ cros_ec_sensors_push_data);
if (ret)
return ret;
+ iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
+
indio_dev->info = &cros_ec_light_prox_info;
state = iio_priv(indio_dev);
state->core.type = state->core.resp->info.type;
@@ -189,8 +193,7 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
/* Common part */
channel->info_mask_shared_by_all =
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_FREQUENCY);
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->info_mask_shared_by_all_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
@@ -236,11 +239,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
@@ -258,7 +256,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids);
static struct platform_driver cros_ec_light_prox_platform_driver = {
.driver = {
.name = "cros-ec-light-prox",
- .pm = &cros_ec_sensors_pm_ops,
},
.probe = cros_ec_light_prox_probe,
.id_table = cros_ec_light_prox_ids,
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
new file mode 100644
index 000000000000..b7ef16b28280
--- /dev/null
+++ b/drivers/iio/light/gp2ap002.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * These are the two Sharp GP2AP002 variants supported by this driver:
+ * GP2AP002A00F Ambient Light and Proximity Sensor
+ * GP2AP002S00F Proximity Sensor
+ *
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based partly on the code in Sony Ericssons GP2AP00200F driver by
+ * Courtney Cavin and Oskar Andero in drivers/input/misc/gp2ap002a00f.c
+ * Based partly on a Samsung misc driver submitted by
+ * Donggeun Kim & Minkyu Kang in 2011:
+ * https://lore.kernel.org/lkml/1315556546-7445-1-git-send-email-dg77.kim@samsung.com/
+ * Based partly on a submission by
+ * Jonathan Bakker and Paweł Chmiel in january 2019:
+ * https://lore.kernel.org/linux-input/20190125175045.22576-1-pawel.mikolaj.chmiel@gmail.com/
+ * Based partly on code from the Samsung GT-S7710 by <mjchen@sta.samsung.com>
+ * Based partly on the code in LG Electronics GP2AP00200F driver by
+ * Kenobi Lee <sungyoung.lee@lge.com> and EunYoung Cho <ey.cho@lge.com>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/consumer.h> /* To get our ADC channel */
+#include <linux/iio/types.h> /* To deal with our ADC channel */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/bits.h>
+#include <linux/math64.h>
+#include <linux/pm.h>
+
+#define GP2AP002_PROX_CHANNEL 0
+#define GP2AP002_ALS_CHANNEL 1
+
+/* ------------------------------------------------------------------------ */
+/* ADDRESS SYMBOL DATA Init R/W */
+/* D7 D6 D5 D4 D3 D2 D1 D0 */
+/* ------------------------------------------------------------------------ */
+/* 0 PROX X X X X X X X VO H'00 R */
+/* 1 GAIN X X X X LED0 X X X H'00 W */
+/* 2 HYS HYSD HYSC1 HYSC0 X HYSF3 HYSF2 HYSF1 HYSF0 H'00 W */
+/* 3 CYCLE X X CYCL2 CYCL1 CYCL0 OSC2 X X H'00 W */
+/* 4 OPMOD X X X ASD X X VCON SSD H'00 W */
+/* 6 CON X X X OCON1 OCON0 X X X H'00 W */
+/* ------------------------------------------------------------------------ */
+/* VO :Proximity sensing result(0: no detection, 1: detection) */
+/* LED0 :Select switch for LED driver's On-registence(0:2x higher, 1:normal)*/
+/* HYSD/HYSF :Adjusts the receiver sensitivity */
+/* OSC :Select switch internal clocl frequency hoppling(0:effective) */
+/* CYCL :Determine the detection cycle(typically 8ms, up to 128x) */
+/* SSD :Software Shutdown function(0:shutdown, 1:operating) */
+/* VCON :VOUT output method control(0:normal, 1:interrupt) */
+/* ASD :Select switch for analog sleep function(0:ineffective, 1:effective)*/
+/* OCON :Select switch for enabling/disabling VOUT (00:enable, 11:disable) */
+
+#define GP2AP002_PROX 0x00
+#define GP2AP002_GAIN 0x01
+#define GP2AP002_HYS 0x02
+#define GP2AP002_CYCLE 0x03
+#define GP2AP002_OPMOD 0x04
+#define GP2AP002_CON 0x06
+
+#define GP2AP002_PROX_VO_DETECT BIT(0)
+
+/* Setting this bit to 0 means 2x higher LED resistance */
+#define GP2AP002_GAIN_LED_NORMAL BIT(3)
+
+/*
+ * These bits adjusts the proximity sensitivity, determining characteristics
+ * of the detection distance and its hysteresis.
+ */
+#define GP2AP002_HYS_HYSD_SHIFT 7
+#define GP2AP002_HYS_HYSD_MASK BIT(7)
+#define GP2AP002_HYS_HYSC_SHIFT 5
+#define GP2AP002_HYS_HYSC_MASK GENMASK(6, 5)
+#define GP2AP002_HYS_HYSF_SHIFT 0
+#define GP2AP002_HYS_HYSF_MASK GENMASK(3, 0)
+#define GP2AP002_HYS_MASK (GP2AP002_HYS_HYSD_MASK | \
+ GP2AP002_HYS_HYSC_MASK | \
+ GP2AP002_HYS_HYSF_MASK)
+
+/*
+ * These values determine the detection cycle response time
+ * 0: 8ms, 1: 16ms, 2: 32ms, 3: 64ms, 4: 128ms,
+ * 5: 256ms, 6: 512ms, 7: 1024ms
+ */
+#define GP2AP002_CYCLE_CYCL_SHIFT 3
+#define GP2AP002_CYCLE_CYCL_MASK GENMASK(5, 3)
+
+/*
+ * Select switch for internal clock frequency hopping
+ * 0: effective,
+ * 1: ineffective
+ */
+#define GP2AP002_CYCLE_OSC_EFFECTIVE 0
+#define GP2AP002_CYCLE_OSC_INEFFECTIVE BIT(2)
+#define GP2AP002_CYCLE_OSC_MASK BIT(2)
+
+/* Analog sleep effective */
+#define GP2AP002_OPMOD_ASD BIT(4)
+/* Enable chip */
+#define GP2AP002_OPMOD_SSD_OPERATING BIT(0)
+/* IRQ mode */
+#define GP2AP002_OPMOD_VCON_IRQ BIT(1)
+#define GP2AP002_OPMOD_MASK (BIT(0) | BIT(1) | BIT(4))
+
+/*
+ * Select switch for enabling/disabling Vout pin
+ * 0: enable
+ * 2: force to go Low
+ * 3: force to go High
+ */
+#define GP2AP002_CON_OCON_SHIFT 3
+#define GP2AP002_CON_OCON_ENABLE (0x0 << GP2AP002_CON_OCON_SHIFT)
+#define GP2AP002_CON_OCON_LOW (0x2 << GP2AP002_CON_OCON_SHIFT)
+#define GP2AP002_CON_OCON_HIGH (0x3 << GP2AP002_CON_OCON_SHIFT)
+#define GP2AP002_CON_OCON_MASK (0x3 << GP2AP002_CON_OCON_SHIFT)
+
+/**
+ * struct gp2ap002 - GP2AP002 state
+ * @map: regmap pointer for the i2c regmap
+ * @dev: pointer to parent device
+ * @vdd: regulator controlling VDD
+ * @vio: regulator controlling VIO
+ * @alsout: IIO ADC channel to convert the ALSOUT signal
+ * @hys_far: hysteresis control from device tree
+ * @hys_close: hysteresis control from device tree
+ * @is_gp2ap002s00f: this is the GP2AP002F variant of the chip
+ * @irq: the IRQ line used by this device
+ * @enabled: we cannot read the status of the hardware so we need to
+ * keep track of whether the event is enabled using this state variable
+ */
+struct gp2ap002 {
+ struct regmap *map;
+ struct device *dev;
+ struct regulator *vdd;
+ struct regulator *vio;
+ struct iio_channel *alsout;
+ u8 hys_far;
+ u8 hys_close;
+ bool is_gp2ap002s00f;
+ int irq;
+ bool enabled;
+};
+
+static irqreturn_t gp2ap002_prox_irq(int irq, void *d)
+{
+ struct iio_dev *indio_dev = d;
+ struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
+ u64 ev;
+ int val;
+ int ret;
+
+ ret = regmap_read(gp2ap002->map, GP2AP002_PROX, &val);
+ if (ret) {
+ dev_err(gp2ap002->dev, "error reading proximity\n");
+ goto err_retrig;
+ }
+
+ if (val & GP2AP002_PROX_VO_DETECT) {
+ /* Close */
+ dev_dbg(gp2ap002->dev, "close\n");
+ ret = regmap_write(gp2ap002->map, GP2AP002_HYS,
+ gp2ap002->hys_far);
+ if (ret)
+ dev_err(gp2ap002->dev,
+ "error setting up proximity hysteresis\n");
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, GP2AP002_PROX_CHANNEL,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING);
+ } else {
+ /* Far */
+ dev_dbg(gp2ap002->dev, "far\n");
+ ret = regmap_write(gp2ap002->map, GP2AP002_HYS,
+ gp2ap002->hys_close);
+ if (ret)
+ dev_err(gp2ap002->dev,
+ "error setting up proximity hysteresis\n");
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, GP2AP002_PROX_CHANNEL,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING);
+ }
+ iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev));
+
+ /*
+ * After changing hysteresis, we need to wait for one detection
+ * cycle to see if anything changed, or we will just trigger the
+ * previous interrupt again. A detection cycle depends on the CYCLE
+ * register, we are hard-coding ~8 ms in probe() so wait some more
+ * than this, 20-30 ms.
+ */
+ usleep_range(20000, 30000);
+
+err_retrig:
+ ret = regmap_write(gp2ap002->map, GP2AP002_CON,
+ GP2AP002_CON_OCON_ENABLE);
+ if (ret)
+ dev_err(gp2ap002->dev, "error setting up VOUT control\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This array maps current and lux.
+ *
+ * Ambient light sensing range is 3 to 55000 lux.
+ *
+ * This mapping is based on the following formula.
+ * illuminance = 10 ^ (current[mA] / 10)
+ *
+ * When the ADC measures 0, return 0 lux.
+ */
+static const u16 gp2ap002_illuminance_table[] = {
+ 0, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 16, 20, 25, 32, 40, 50, 63, 79,
+ 100, 126, 158, 200, 251, 316, 398, 501, 631, 794, 1000, 1259, 1585,
+ 1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000, 12589, 15849, 19953,
+ 25119, 31623, 39811, 50119,
+};
+
+static int gp2ap002_get_lux(struct gp2ap002 *gp2ap002)
+{
+ int ret, res;
+ u16 lux;
+
+ ret = iio_read_channel_processed(gp2ap002->alsout, &res);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(gp2ap002->dev, "read %d mA from ADC\n", res);
+
+ /* ensure we don't under/overflow */
+ res = clamp(res, 0, (int)ARRAY_SIZE(gp2ap002_illuminance_table) - 1);
+ lux = gp2ap002_illuminance_table[res];
+
+ return (int)lux;
+}
+
+static int gp2ap002_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = gp2ap002_get_lux(gp2ap002);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int gp2ap002_init(struct gp2ap002 *gp2ap002)
+{
+ int ret;
+
+ /* Set up the IR LED resistance */
+ ret = regmap_write(gp2ap002->map, GP2AP002_GAIN,
+ GP2AP002_GAIN_LED_NORMAL);
+ if (ret) {
+ dev_err(gp2ap002->dev, "error setting up LED gain\n");
+ return ret;
+ }
+ ret = regmap_write(gp2ap002->map, GP2AP002_HYS, gp2ap002->hys_far);
+ if (ret) {
+ dev_err(gp2ap002->dev,
+ "error setting up proximity hysteresis\n");
+ return ret;
+ }
+
+ /* Disable internal frequency hopping */
+ ret = regmap_write(gp2ap002->map, GP2AP002_CYCLE,
+ GP2AP002_CYCLE_OSC_INEFFECTIVE);
+ if (ret) {
+ dev_err(gp2ap002->dev,
+ "error setting up internal frequency hopping\n");
+ return ret;
+ }
+
+ /* Enable chip and IRQ, disable analog sleep */
+ ret = regmap_write(gp2ap002->map, GP2AP002_OPMOD,
+ GP2AP002_OPMOD_SSD_OPERATING |
+ GP2AP002_OPMOD_VCON_IRQ);
+ if (ret) {
+ dev_err(gp2ap002->dev, "error setting up operation mode\n");
+ return ret;
+ }
+
+ /* Interrupt on VOUT enabled */
+ ret = regmap_write(gp2ap002->map, GP2AP002_CON,
+ GP2AP002_CON_OCON_ENABLE);
+ if (ret)
+ dev_err(gp2ap002->dev, "error setting up VOUT control\n");
+
+ return ret;
+}
+
+static int gp2ap002_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
+
+ /*
+ * We just keep track of this internally, as it is not possible to
+ * query the hardware.
+ */
+ return gp2ap002->enabled;
+}
+
+static int gp2ap002_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
+
+ if (state) {
+ /*
+ * This will bring the regulators up (unless they are on
+ * already) and reintialize the sensor by using runtime_pm
+ * callbacks.
+ */
+ pm_runtime_get_sync(gp2ap002->dev);
+ gp2ap002->enabled = true;
+ } else {
+ pm_runtime_mark_last_busy(gp2ap002->dev);
+ pm_runtime_put_autosuspend(gp2ap002->dev);
+ gp2ap002->enabled = false;
+ }
+
+ return 0;
+}
+
+static const struct iio_info gp2ap002_info = {
+ .read_raw = gp2ap002_read_raw,
+ .read_event_config = gp2ap002_read_event_config,
+ .write_event_config = gp2ap002_write_event_config,
+};
+
+static const struct iio_event_spec gp2ap002_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec gp2ap002_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .event_spec = gp2ap002_events,
+ .num_event_specs = ARRAY_SIZE(gp2ap002_events),
+ },
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .channel = GP2AP002_ALS_CHANNEL,
+ },
+};
+
+/*
+ * We need a special regmap because this hardware expects to
+ * write single bytes to registers but read a 16bit word on some
+ * variants and discard the lower 8 bits so combine
+ * i2c_smbus_read_word_data() with i2c_smbus_write_byte_data()
+ * selectively like this.
+ */
+static int gp2ap002_regmap_i2c_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_smbus_read_word_data(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = (ret >> 8) & 0xFF;
+
+ return 0;
+}
+
+static int gp2ap002_regmap_i2c_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static struct regmap_bus gp2ap002_regmap_bus = {
+ .reg_read = gp2ap002_regmap_i2c_read,
+ .reg_write = gp2ap002_regmap_i2c_write,
+};
+
+static int gp2ap002_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct gp2ap002 *gp2ap002;
+ struct iio_dev *indio_dev;
+ struct device *dev = &client->dev;
+ enum iio_chan_type ch_type;
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = GP2AP002_CON,
+ };
+ struct regmap *regmap;
+ int num_chan;
+ const char *compat;
+ u8 val;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gp2ap002));
+ if (!indio_dev)
+ return -ENOMEM;
+ i2c_set_clientdata(client, indio_dev);
+
+ gp2ap002 = iio_priv(indio_dev);
+ gp2ap002->dev = dev;
+
+ /*
+ * Check the device compatible like this makes it possible to use
+ * ACPI PRP0001 for registering the sensor using device tree
+ * properties.
+ */
+ ret = device_property_read_string(dev, "compatible", &compat);
+ if (ret) {
+ dev_err(dev, "cannot check compatible\n");
+ return ret;
+ }
+ gp2ap002->is_gp2ap002s00f = !strcmp(compat, "sharp,gp2ap002s00f");
+
+ regmap = devm_regmap_init(dev, &gp2ap002_regmap_bus, dev, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+ gp2ap002->map = regmap;
+
+ /*
+ * The hysteresis settings are coded into the device tree as values
+ * to be written into the hysteresis register. The datasheet defines
+ * modes "A", "B1" and "B2" with fixed values to be use but vendor
+ * code trees for actual devices are tweaking these values and refer to
+ * modes named things like "B1.5". To be able to support any devices,
+ * we allow passing an arbitrary hysteresis setting for "near" and
+ * "far".
+ */
+
+ /* Check the device tree for the IR LED hysteresis */
+ ret = device_property_read_u8(dev, "sharp,proximity-far-hysteresis",
+ &val);
+ if (ret) {
+ dev_err(dev, "failed to obtain proximity far setting\n");
+ return ret;
+ }
+ dev_dbg(dev, "proximity far setting %02x\n", val);
+ gp2ap002->hys_far = val;
+
+ ret = device_property_read_u8(dev, "sharp,proximity-close-hysteresis",
+ &val);
+ if (ret) {
+ dev_err(dev, "failed to obtain proximity close setting\n");
+ return ret;
+ }
+ dev_dbg(dev, "proximity close setting %02x\n", val);
+ gp2ap002->hys_close = val;
+
+ /* The GP2AP002A00F has a light sensor too */
+ if (!gp2ap002->is_gp2ap002s00f) {
+ gp2ap002->alsout = devm_iio_channel_get(dev, "alsout");
+ if (IS_ERR(gp2ap002->alsout)) {
+ if (PTR_ERR(gp2ap002->alsout) == -ENODEV) {
+ dev_err(dev, "no ADC, deferring...\n");
+ return -EPROBE_DEFER;
+ }
+ dev_err(dev, "failed to get ALSOUT ADC channel\n");
+ return PTR_ERR(gp2ap002->alsout);
+ }
+ ret = iio_get_channel_type(gp2ap002->alsout, &ch_type);
+ if (ret < 0)
+ return ret;
+ if (ch_type != IIO_CURRENT) {
+ dev_err(dev,
+ "wrong type of IIO channel specified for ALSOUT\n");
+ return -EINVAL;
+ }
+ }
+
+ gp2ap002->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(gp2ap002->vdd)) {
+ dev_err(dev, "failed to get VDD regulator\n");
+ return PTR_ERR(gp2ap002->vdd);
+ }
+ gp2ap002->vio = devm_regulator_get(dev, "vio");
+ if (IS_ERR(gp2ap002->vio)) {
+ dev_err(dev, "failed to get VIO regulator\n");
+ return PTR_ERR(gp2ap002->vio);
+ }
+
+ /* Operating voltage 2.4V .. 3.6V according to datasheet */
+ ret = regulator_set_voltage(gp2ap002->vdd, 2400000, 3600000);
+ if (ret) {
+ dev_err(dev, "failed to sett VDD voltage\n");
+ return ret;
+ }
+
+ /* VIO should be between 1.65V and VDD */
+ ret = regulator_get_voltage(gp2ap002->vdd);
+ if (ret < 0) {
+ dev_err(dev, "failed to get VDD voltage\n");
+ return ret;
+ }
+ ret = regulator_set_voltage(gp2ap002->vio, 1650000, ret);
+ if (ret) {
+ dev_err(dev, "failed to set VIO voltage\n");
+ return ret;
+ }
+
+ ret = regulator_enable(gp2ap002->vdd);
+ if (ret) {
+ dev_err(dev, "failed to enable VDD regulator\n");
+ return ret;
+ }
+ ret = regulator_enable(gp2ap002->vio);
+ if (ret) {
+ dev_err(dev, "failed to enable VIO regulator\n");
+ goto out_disable_vdd;
+ }
+
+ msleep(20);
+
+ /*
+ * Initialize the device and signal to runtime PM that now we are
+ * definately up and using power.
+ */
+ ret = gp2ap002_init(gp2ap002);
+ if (ret) {
+ dev_err(dev, "initialization failed\n");
+ goto out_disable_vio;
+ }
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ gp2ap002->enabled = false;
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ gp2ap002_prox_irq, IRQF_ONESHOT,
+ "gp2ap002", indio_dev);
+ if (ret) {
+ dev_err(dev, "unable to request IRQ\n");
+ goto out_disable_vio;
+ }
+ gp2ap002->irq = client->irq;
+
+ /*
+ * As the device takes 20 ms + regulator delay to come up with a fresh
+ * measurement after power-on, do not shut it down unnecessarily.
+ * Set autosuspend to a one second.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &gp2ap002_info;
+ indio_dev->name = "gp2ap002";
+ indio_dev->channels = gp2ap002_channels;
+ /* Skip light channel for the proximity-only sensor */
+ num_chan = ARRAY_SIZE(gp2ap002_channels);
+ if (gp2ap002->is_gp2ap002s00f)
+ num_chan--;
+ indio_dev->num_channels = num_chan;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto out_disable_pm;
+ dev_dbg(dev, "Sharp GP2AP002 probed successfully\n");
+
+ return 0;
+
+out_disable_pm:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+out_disable_vio:
+ regulator_disable(gp2ap002->vio);
+out_disable_vdd:
+ regulator_disable(gp2ap002->vdd);
+ return ret;
+}
+
+static int gp2ap002_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
+ struct device *dev = &client->dev;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ iio_device_unregister(indio_dev);
+ regulator_disable(gp2ap002->vio);
+ regulator_disable(gp2ap002->vdd);
+
+ return 0;
+}
+
+static int __maybe_unused gp2ap002_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
+ int ret;
+
+ /* Deactivate the IRQ */
+ disable_irq(gp2ap002->irq);
+
+ /* Disable chip and IRQ, everything off */
+ ret = regmap_write(gp2ap002->map, GP2AP002_OPMOD, 0x00);
+ if (ret) {
+ dev_err(gp2ap002->dev, "error setting up operation mode\n");
+ return ret;
+ }
+ /*
+ * As these regulators may be shared, at least we are now in
+ * sleep even if the regulators aren't really turned off.
+ */
+ regulator_disable(gp2ap002->vio);
+ regulator_disable(gp2ap002->vdd);
+
+ return 0;
+}
+
+static int __maybe_unused gp2ap002_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(gp2ap002->vdd);
+ if (ret) {
+ dev_err(dev, "failed to enable VDD regulator in resume path\n");
+ return ret;
+ }
+ ret = regulator_enable(gp2ap002->vio);
+ if (ret) {
+ dev_err(dev, "failed to enable VIO regulator in resume path\n");
+ return ret;
+ }
+
+ msleep(20);
+
+ ret = gp2ap002_init(gp2ap002);
+ if (ret) {
+ dev_err(dev, "re-initialization failed\n");
+ return ret;
+ }
+
+ /* Re-activate the IRQ */
+ enable_irq(gp2ap002->irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops gp2ap002_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(gp2ap002_runtime_suspend,
+ gp2ap002_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id gp2ap002_id_table[] = {
+ { "gp2ap002", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, gp2ap002_id_table);
+
+static const struct of_device_id gp2ap002_of_match[] = {
+ { .compatible = "sharp,gp2ap002a00f" },
+ { .compatible = "sharp,gp2ap002s00f" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gp2ap002_of_match);
+
+static struct i2c_driver gp2ap002_driver = {
+ .driver = {
+ .name = "gp2ap002",
+ .of_match_table = gp2ap002_of_match,
+ .pm = &gp2ap002_dev_pm_ops,
+ },
+ .probe = gp2ap002_probe,
+ .remove = gp2ap002_remove,
+ .id_table = gp2ap002_id_table,
+};
+module_i2c_driver(gp2ap002_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("GP2AP002 ambient light and proximity sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 4d70c5bf35da..7fbbce0d4bc7 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1390,6 +1390,12 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
mutex_lock(&data->lock);
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0) {
+ mutex_unlock(&data->lock);
+ return err;
+ }
+
/*
* Enable triggers according to the scan_mask. Enabling either
* LIGHT_CLEAR or LIGHT_IR scan mode results in enabling ALS
@@ -1420,14 +1426,12 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
goto error_unlock;
data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!data->buffer) {
+ if (!data->buffer)
err = -ENOMEM;
- goto error_unlock;
- }
-
- err = iio_triggered_buffer_postenable(indio_dev);
error_unlock:
+ if (err < 0)
+ iio_triggered_buffer_predisable(indio_dev);
mutex_unlock(&data->lock);
return err;
@@ -1436,14 +1440,10 @@ error_unlock:
static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev)
{
struct gp2ap020a00f_data *data = iio_priv(indio_dev);
- int i, err;
+ int i, err = 0;
mutex_lock(&data->lock);
- err = iio_triggered_buffer_predisable(indio_dev);
- if (err < 0)
- goto error_unlock;
-
for_each_set_bit(i, indio_dev->active_scan_mask,
indio_dev->masklength) {
switch (i) {
@@ -1465,7 +1465,8 @@ static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev)
if (err == 0)
kfree(data->buffer);
-error_unlock:
+ iio_triggered_buffer_predisable(indio_dev);
+
mutex_unlock(&data->lock);
return err;
diff --git a/drivers/iio/light/iqs621-als.c b/drivers/iio/light/iqs621-als.c
new file mode 100644
index 000000000000..b2988a782bd0
--- /dev/null
+++ b/drivers/iio/light/iqs621-als.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS621/622 Ambient Light Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IQS621_ALS_FLAGS_LIGHT BIT(7)
+#define IQS621_ALS_FLAGS_RANGE GENMASK(3, 0)
+
+#define IQS621_ALS_UI_OUT 0x17
+
+#define IQS621_ALS_THRESH_DARK 0x80
+#define IQS621_ALS_THRESH_LIGHT 0x81
+
+#define IQS622_IR_RANGE 0x15
+#define IQS622_IR_FLAGS 0x16
+#define IQS622_IR_FLAGS_TOUCH BIT(1)
+#define IQS622_IR_FLAGS_PROX BIT(0)
+
+#define IQS622_IR_UI_OUT 0x17
+
+#define IQS622_IR_THRESH_PROX 0x91
+#define IQS622_IR_THRESH_TOUCH 0x92
+
+struct iqs621_als_private {
+ struct iqs62x_core *iqs62x;
+ struct notifier_block notifier;
+ struct mutex lock;
+ bool light_en;
+ bool range_en;
+ bool prox_en;
+ u8 als_flags;
+ u8 ir_flags_mask;
+ u8 ir_flags;
+ u8 thresh_light;
+ u8 thresh_dark;
+ u8 thresh_prox;
+};
+
+static int iqs621_als_init(struct iqs621_als_private *iqs621_als)
+{
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ unsigned int event_mask = 0;
+ int ret;
+
+ switch (iqs621_als->ir_flags_mask) {
+ case IQS622_IR_FLAGS_TOUCH:
+ ret = regmap_write(iqs62x->regmap, IQS622_IR_THRESH_TOUCH,
+ iqs621_als->thresh_prox);
+ break;
+
+ case IQS622_IR_FLAGS_PROX:
+ ret = regmap_write(iqs62x->regmap, IQS622_IR_THRESH_PROX,
+ iqs621_als->thresh_prox);
+ break;
+
+ default:
+ ret = regmap_write(iqs62x->regmap, IQS621_ALS_THRESH_LIGHT,
+ iqs621_als->thresh_light);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(iqs62x->regmap, IQS621_ALS_THRESH_DARK,
+ iqs621_als->thresh_dark);
+ }
+
+ if (ret)
+ return ret;
+
+ if (iqs621_als->light_en || iqs621_als->range_en)
+ event_mask |= iqs62x->dev_desc->als_mask;
+
+ if (iqs621_als->prox_en)
+ event_mask |= iqs62x->dev_desc->ir_mask;
+
+ return regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ event_mask, 0);
+}
+
+static int iqs621_als_notifier(struct notifier_block *notifier,
+ unsigned long event_flags, void *context)
+{
+ struct iqs62x_event_data *event_data = context;
+ struct iqs621_als_private *iqs621_als;
+ struct iio_dev *indio_dev;
+ bool light_new, light_old;
+ bool prox_new, prox_old;
+ u8 range_new, range_old;
+ s64 timestamp;
+ int ret;
+
+ iqs621_als = container_of(notifier, struct iqs621_als_private,
+ notifier);
+ indio_dev = iio_priv_to_dev(iqs621_als);
+ timestamp = iio_get_time_ns(indio_dev);
+
+ mutex_lock(&iqs621_als->lock);
+
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ ret = iqs621_als_init(iqs621_als);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to re-initialize device: %d\n", ret);
+ ret = NOTIFY_BAD;
+ } else {
+ ret = NOTIFY_OK;
+ }
+
+ goto err_mutex;
+ }
+
+ if (!iqs621_als->light_en && !iqs621_als->range_en &&
+ !iqs621_als->prox_en) {
+ ret = NOTIFY_DONE;
+ goto err_mutex;
+ }
+
+ /* IQS621 only */
+ light_new = event_data->als_flags & IQS621_ALS_FLAGS_LIGHT;
+ light_old = iqs621_als->als_flags & IQS621_ALS_FLAGS_LIGHT;
+
+ if (iqs621_als->light_en && light_new && !light_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if (iqs621_als->light_en && !light_new && light_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ /* IQS621 and IQS622 */
+ range_new = event_data->als_flags & IQS621_ALS_FLAGS_RANGE;
+ range_old = iqs621_als->als_flags & IQS621_ALS_FLAGS_RANGE;
+
+ if (iqs621_als->range_en && (range_new > range_old))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if (iqs621_als->range_en && (range_new < range_old))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ /* IQS622 only */
+ prox_new = event_data->ir_flags & iqs621_als->ir_flags_mask;
+ prox_old = iqs621_als->ir_flags & iqs621_als->ir_flags_mask;
+
+ if (iqs621_als->prox_en && prox_new && !prox_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if (iqs621_als->prox_en && !prox_new && prox_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ iqs621_als->als_flags = event_data->als_flags;
+ iqs621_als->ir_flags = event_data->ir_flags;
+ ret = NOTIFY_OK;
+
+err_mutex:
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static void iqs621_als_notifier_unregister(void *context)
+{
+ struct iqs621_als_private *iqs621_als = context;
+ struct iio_dev *indio_dev = iio_priv_to_dev(iqs621_als);
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(&iqs621_als->iqs62x->nh,
+ &iqs621_als->notifier);
+ if (ret)
+ dev_err(indio_dev->dev.parent,
+ "Failed to unregister notifier: %d\n", ret);
+}
+
+static int iqs621_als_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ int ret;
+ __le16 val_buf;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ ret = regmap_read(iqs62x->regmap, chan->address, val);
+ if (ret)
+ return ret;
+
+ *val &= IQS621_ALS_FLAGS_RANGE;
+ return IIO_VAL_INT;
+
+ case IIO_PROXIMITY:
+ case IIO_LIGHT:
+ ret = regmap_raw_read(iqs62x->regmap, chan->address, &val_buf,
+ sizeof(val_buf));
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(val_buf);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iqs621_als_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&iqs621_als->lock);
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = iqs621_als->light_en;
+ break;
+
+ case IIO_INTENSITY:
+ ret = iqs621_als->range_en;
+ break;
+
+ case IIO_PROXIMITY:
+ ret = iqs621_als->prox_en;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static int iqs621_als_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&iqs621_als->lock);
+
+ ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->als_flags, &val);
+ if (ret)
+ goto err_mutex;
+ iqs621_als->als_flags = val;
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ iqs62x->dev_desc->als_mask,
+ iqs621_als->range_en || state ? 0 :
+ 0xFF);
+ if (!ret)
+ iqs621_als->light_en = state;
+ break;
+
+ case IIO_INTENSITY:
+ ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ iqs62x->dev_desc->als_mask,
+ iqs621_als->light_en || state ? 0 :
+ 0xFF);
+ if (!ret)
+ iqs621_als->range_en = state;
+ break;
+
+ case IIO_PROXIMITY:
+ ret = regmap_read(iqs62x->regmap, IQS622_IR_FLAGS, &val);
+ if (ret)
+ goto err_mutex;
+ iqs621_als->ir_flags = val;
+
+ ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ iqs62x->dev_desc->ir_mask,
+ state ? 0 : 0xFF);
+ if (!ret)
+ iqs621_als->prox_en = state;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+err_mutex:
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static int iqs621_als_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ int ret = IIO_VAL_INT;
+
+ mutex_lock(&iqs621_als->lock);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = iqs621_als->thresh_light * 16;
+ break;
+
+ case IIO_EV_DIR_FALLING:
+ *val = iqs621_als->thresh_dark * 4;
+ break;
+
+ case IIO_EV_DIR_EITHER:
+ if (iqs621_als->ir_flags_mask == IQS622_IR_FLAGS_TOUCH)
+ *val = iqs621_als->thresh_prox * 4;
+ else
+ *val = iqs621_als->thresh_prox;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static int iqs621_als_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ unsigned int thresh_reg, thresh_val;
+ u8 ir_flags_mask, *thresh_cache;
+ int ret = -EINVAL;
+
+ mutex_lock(&iqs621_als->lock);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ thresh_reg = IQS621_ALS_THRESH_LIGHT;
+ thresh_val = val / 16;
+
+ thresh_cache = &iqs621_als->thresh_light;
+ ir_flags_mask = 0;
+ break;
+
+ case IIO_EV_DIR_FALLING:
+ thresh_reg = IQS621_ALS_THRESH_DARK;
+ thresh_val = val / 4;
+
+ thresh_cache = &iqs621_als->thresh_dark;
+ ir_flags_mask = 0;
+ break;
+
+ case IIO_EV_DIR_EITHER:
+ /*
+ * The IQS622 supports two detection thresholds, both measured
+ * in the same arbitrary units reported by read_raw: proximity
+ * (0 through 255 in steps of 1), and touch (0 through 1020 in
+ * steps of 4).
+ *
+ * Based on the single detection threshold chosen by the user,
+ * select the hardware threshold that gives the best trade-off
+ * between range and resolution.
+ *
+ * By default, the close-range (but coarse) touch threshold is
+ * chosen during probe.
+ */
+ switch (val) {
+ case 0 ... 255:
+ thresh_reg = IQS622_IR_THRESH_PROX;
+ thresh_val = val;
+
+ ir_flags_mask = IQS622_IR_FLAGS_PROX;
+ break;
+
+ case 256 ... 1020:
+ thresh_reg = IQS622_IR_THRESH_TOUCH;
+ thresh_val = val / 4;
+
+ ir_flags_mask = IQS622_IR_FLAGS_TOUCH;
+ break;
+
+ default:
+ goto err_mutex;
+ }
+
+ thresh_cache = &iqs621_als->thresh_prox;
+ break;
+
+ default:
+ goto err_mutex;
+ }
+
+ if (thresh_val > 0xFF)
+ goto err_mutex;
+
+ ret = regmap_write(iqs62x->regmap, thresh_reg, thresh_val);
+ if (ret)
+ goto err_mutex;
+
+ *thresh_cache = thresh_val;
+ iqs621_als->ir_flags_mask = ir_flags_mask;
+
+err_mutex:
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static const struct iio_info iqs621_als_info = {
+ .read_raw = &iqs621_als_read_raw,
+ .read_event_config = iqs621_als_read_event_config,
+ .write_event_config = iqs621_als_write_event_config,
+ .read_event_value = iqs621_als_read_event_value,
+ .write_event_value = iqs621_als_write_event_value,
+};
+
+static const struct iio_event_spec iqs621_als_range_events[] = {
+ {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_event_spec iqs621_als_light_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_chan_spec iqs621_als_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .address = IQS621_ALS_FLAGS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = iqs621_als_range_events,
+ .num_event_specs = ARRAY_SIZE(iqs621_als_range_events),
+ },
+ {
+ .type = IIO_LIGHT,
+ .address = IQS621_ALS_UI_OUT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .event_spec = iqs621_als_light_events,
+ .num_event_specs = ARRAY_SIZE(iqs621_als_light_events),
+ },
+};
+
+static const struct iio_event_spec iqs622_als_prox_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_chan_spec iqs622_als_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .address = IQS622_ALS_FLAGS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = iqs621_als_range_events,
+ .num_event_specs = ARRAY_SIZE(iqs621_als_range_events),
+ .modified = true,
+ },
+ {
+ .type = IIO_INTENSITY,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .address = IQS622_IR_RANGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .modified = true,
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .address = IQS622_IR_UI_OUT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = iqs622_als_prox_events,
+ .num_event_specs = ARRAY_SIZE(iqs622_als_prox_events),
+ },
+};
+
+static int iqs621_als_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iqs621_als_private *iqs621_als;
+ struct iio_dev *indio_dev;
+ unsigned int val;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*iqs621_als));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iqs621_als = iio_priv(indio_dev);
+ iqs621_als->iqs62x = iqs62x;
+
+ if (iqs62x->dev_desc->prod_num == IQS622_PROD_NUM) {
+ ret = regmap_read(iqs62x->regmap, IQS622_IR_THRESH_TOUCH,
+ &val);
+ if (ret)
+ return ret;
+ iqs621_als->thresh_prox = val;
+ iqs621_als->ir_flags_mask = IQS622_IR_FLAGS_TOUCH;
+
+ indio_dev->channels = iqs622_als_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs622_als_channels);
+ } else {
+ ret = regmap_read(iqs62x->regmap, IQS621_ALS_THRESH_LIGHT,
+ &val);
+ if (ret)
+ return ret;
+ iqs621_als->thresh_light = val;
+
+ ret = regmap_read(iqs62x->regmap, IQS621_ALS_THRESH_DARK,
+ &val);
+ if (ret)
+ return ret;
+ iqs621_als->thresh_dark = val;
+
+ indio_dev->channels = iqs621_als_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs621_als_channels);
+ }
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = iqs62x->dev_desc->dev_name;
+ indio_dev->info = &iqs621_als_info;
+
+ mutex_init(&iqs621_als->lock);
+
+ iqs621_als->notifier.notifier_call = iqs621_als_notifier;
+ ret = blocking_notifier_chain_register(&iqs621_als->iqs62x->nh,
+ &iqs621_als->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ iqs621_als_notifier_unregister,
+ iqs621_als);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver iqs621_als_platform_driver = {
+ .driver = {
+ .name = "iqs621-als",
+ },
+ .probe = iqs621_als_probe,
+};
+module_platform_driver(iqs621_als_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS621/622 Ambient Light Sensors");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs621-als");
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index 015a21f0c2ef..9174ab928880 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -102,6 +102,9 @@
#define SI1133_INPUT_FRACTION_LOW 15
#define SI1133_LUX_OUTPUT_FRACTION 12
#define SI1133_LUX_BUFFER_SIZE 9
+#define SI1133_MEASURE_BUFFER_SIZE 3
+
+#define SI1133_SIGN_BIT_INDEX 23
static const int si1133_scale_available[] = {
1, 2, 4, 8, 16, 32, 64, 128};
@@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = {
}
};
-static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag,
+static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag,
s8 shift)
{
return ((input << fraction) / mag) << shift;
}
-static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
+static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order,
u8 input_fraction, s8 sign,
const struct si1133_coeff *coeffs)
{
@@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
* The algorithm is from:
* https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716
*/
-static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff,
+static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff,
const struct si1133_coeff *coeffs)
{
u8 x_order, y_order;
@@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data,
{
int err;
- __be16 resp;
+ u8 buffer[SI1133_MEASURE_BUFFER_SIZE];
err = si1133_set_adcmux(data, 0, chan->channel);
if (err)
@@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data,
if (err)
return err;
- err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp),
- (u8 *)&resp);
+ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer),
+ buffer);
if (err)
return err;
- *val = be16_to_cpu(resp);
+ *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
+ SI1133_SIGN_BIT_INDEX);
return err;
}
@@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
{
int err;
int lux;
- u32 high_vis;
- u32 low_vis;
- u32 ir;
+ s32 high_vis;
+ s32 low_vis;
+ s32 ir;
u8 buffer[SI1133_LUX_BUFFER_SIZE];
/* Activate lux channels */
@@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
if (err)
return err;
- high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2];
- low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5];
- ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8];
+ high_vis =
+ sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
+ SI1133_SIGN_BIT_INDEX);
+
+ low_vis =
+ sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5],
+ SI1133_SIGN_BIT_INDEX);
+
+ ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8],
+ SI1133_SIGN_BIT_INDEX);
if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
lux = si1133_calc_polynomial(high_vis, ir,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index e5b00a6611ac..ec803c1e81df 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -57,6 +58,8 @@
#define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */
#define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */
+#define VCNL4000_SLEEP_DELAY_MS 2000 /* before we enter pm_runtime_suspend */
+
enum vcnl4000_device_ids {
VCNL4000,
VCNL4010,
@@ -87,6 +90,7 @@ struct vcnl4000_chip_spec {
int (*init)(struct vcnl4000_data *data);
int (*measure_light)(struct vcnl4000_data *data, int *val);
int (*measure_proximity)(struct vcnl4000_data *data, int *val);
+ int (*set_power_state)(struct vcnl4000_data *data, bool on);
};
static const struct i2c_device_id vcnl4000_id[] = {
@@ -99,6 +103,12 @@ static const struct i2c_device_id vcnl4000_id[] = {
};
MODULE_DEVICE_TABLE(i2c, vcnl4000_id);
+static int vcnl4000_set_power_state(struct vcnl4000_data *data, bool on)
+{
+ /* no suspend op */
+ return 0;
+}
+
static int vcnl4000_init(struct vcnl4000_data *data)
{
int ret, prod_id;
@@ -127,9 +137,31 @@ static int vcnl4000_init(struct vcnl4000_data *data)
data->al_scale = 250000;
mutex_init(&data->vcnl4000_lock);
- return 0;
+ return data->chip_spec->set_power_state(data, true);
};
+static int vcnl4200_set_power_state(struct vcnl4000_data *data, bool on)
+{
+ u16 val = on ? 0 /* power on */ : 1 /* shut down */;
+ int ret;
+
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF, val);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, val);
+ if (ret < 0)
+ return ret;
+
+ if (on) {
+ /* Wait at least one integration cycle before fetching data */
+ data->vcnl4200_al.last_measurement = ktime_get();
+ data->vcnl4200_ps.last_measurement = ktime_get();
+ }
+
+ return 0;
+}
+
static int vcnl4200_init(struct vcnl4000_data *data)
{
int ret, id;
@@ -155,14 +187,6 @@ static int vcnl4200_init(struct vcnl4000_data *data)
data->rev = (ret >> 8) & 0xf;
- /* Set defaults and enable both channels */
- ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF, 0);
- if (ret < 0)
- return ret;
- ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, 0);
- if (ret < 0)
- return ret;
-
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
switch (id) {
@@ -181,11 +205,13 @@ static int vcnl4200_init(struct vcnl4000_data *data)
data->al_scale = 120000;
break;
}
- data->vcnl4200_al.last_measurement = ktime_set(0, 0);
- data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
mutex_init(&data->vcnl4200_al.lock);
mutex_init(&data->vcnl4200_ps.lock);
+ ret = data->chip_spec->set_power_state(data, true);
+ if (ret < 0)
+ return ret;
+
return 0;
};
@@ -292,24 +318,28 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.init = vcnl4000_init,
.measure_light = vcnl4000_measure_light,
.measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
},
[VCNL4010] = {
.prod = "VCNL4010/4020",
.init = vcnl4000_init,
.measure_light = vcnl4000_measure_light,
.measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
},
[VCNL4040] = {
.prod = "VCNL4040",
.init = vcnl4200_init,
.measure_light = vcnl4200_measure_light,
.measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
},
[VCNL4200] = {
.prod = "VCNL4200",
.init = vcnl4200_init,
.measure_light = vcnl4200_measure_light,
.measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
},
};
@@ -324,6 +354,23 @@ static const struct iio_chan_spec vcnl4000_channels[] = {
}
};
+static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ if (on) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ pm_runtime_put_noidle(dev);
+ } else {
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
+}
+
static int vcnl4000_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -333,20 +380,26 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = vcnl4000_set_pm_runtime_state(data, true);
+ if (ret < 0)
+ return ret;
+
switch (chan->type) {
case IIO_LIGHT:
ret = data->chip_spec->measure_light(data, val);
- if (ret < 0)
- return ret;
- return IIO_VAL_INT;
+ if (!ret)
+ ret = IIO_VAL_INT;
+ break;
case IIO_PROXIMITY:
ret = data->chip_spec->measure_proximity(data, val);
- if (ret < 0)
- return ret;
- return IIO_VAL_INT;
+ if (!ret)
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+ vcnl4000_set_pm_runtime_state(data, false);
+ return ret;
case IIO_CHAN_INFO_SCALE:
if (chan->type != IIO_LIGHT)
return -EINVAL;
@@ -394,7 +447,22 @@ static int vcnl4000_probe(struct i2c_client *client,
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- return devm_iio_device_register(&client->dev, indio_dev);
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0)
+ goto fail_poweroff;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto fail_poweroff;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, VCNL4000_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return 0;
+fail_poweroff:
+ data->chip_spec->set_power_state(data, false);
+ return ret;
}
static const struct of_device_id vcnl_4000_of_match[] = {
@@ -422,13 +490,51 @@ static const struct of_device_id vcnl_4000_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vcnl_4000_of_match);
+static int vcnl4000_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
+ iio_device_unregister(indio_dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ return data->chip_spec->set_power_state(data, false);
+}
+
+static int __maybe_unused vcnl4000_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ return data->chip_spec->set_power_state(data, false);
+}
+
+static int __maybe_unused vcnl4000_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ return data->chip_spec->set_power_state(data, true);
+}
+
+static const struct dev_pm_ops vcnl4000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(vcnl4000_runtime_suspend,
+ vcnl4000_runtime_resume, NULL)
+};
+
static struct i2c_driver vcnl4000_driver = {
.driver = {
.name = VCNL4000_DRV_NAME,
+ .pm = &vcnl4000_pm_ops,
.of_match_table = vcnl_4000_of_match,
},
.probe = vcnl4000_probe,
.id_table = vcnl4000_id,
+ .remove = vcnl4000_remove,
};
module_i2c_driver(vcnl4000_driver);
diff --git a/drivers/iio/position/Kconfig b/drivers/iio/position/Kconfig
new file mode 100644
index 000000000000..eda67f008c5b
--- /dev/null
+++ b/drivers/iio/position/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Linear and angular position sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Linear and angular position sensors"
+
+config IQS624_POS
+ tristate "Azoteq IQS624/625 angular position sensors"
+ depends on MFD_IQS62X || COMPILE_TEST
+ help
+ Say Y here if you want to build support for the Azoteq IQS624
+ and IQS625 angular position sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iqs624-pos.
+
+endmenu
diff --git a/drivers/iio/position/Makefile b/drivers/iio/position/Makefile
new file mode 100644
index 000000000000..3cbe7a734352
--- /dev/null
+++ b/drivers/iio/position/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for IIO linear and angular position sensors
+#
+
+# When adding new entries keep the list in alphabetical order
+
+obj-$(CONFIG_IQS624_POS) += iqs624-pos.o
diff --git a/drivers/iio/position/iqs624-pos.c b/drivers/iio/position/iqs624-pos.c
new file mode 100644
index 000000000000..77096c31c2ba
--- /dev/null
+++ b/drivers/iio/position/iqs624-pos.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS624/625 Angular Position Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IQS624_POS_DEG_OUT 0x16
+
+#define IQS624_POS_SCALE1 (314159 / 180)
+#define IQS624_POS_SCALE2 100000
+
+struct iqs624_pos_private {
+ struct iqs62x_core *iqs62x;
+ struct notifier_block notifier;
+ struct mutex lock;
+ bool angle_en;
+ u16 angle;
+};
+
+static int iqs624_pos_angle_en(struct iqs62x_core *iqs62x, bool angle_en)
+{
+ unsigned int event_mask = IQS624_HALL_UI_WHL_EVENT;
+
+ /*
+ * The IQS625 reports angular position in the form of coarse intervals,
+ * so only interval change events are unmasked. Conversely, the IQS624
+ * reports angular position down to one degree of resolution, so wheel
+ * movement events are unmasked instead.
+ */
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM)
+ event_mask = IQS624_HALL_UI_INT_EVENT;
+
+ return regmap_update_bits(iqs62x->regmap, IQS624_HALL_UI, event_mask,
+ angle_en ? 0 : 0xFF);
+}
+
+static int iqs624_pos_notifier(struct notifier_block *notifier,
+ unsigned long event_flags, void *context)
+{
+ struct iqs62x_event_data *event_data = context;
+ struct iqs624_pos_private *iqs624_pos;
+ struct iqs62x_core *iqs62x;
+ struct iio_dev *indio_dev;
+ u16 angle = event_data->ui_data;
+ s64 timestamp;
+ int ret;
+
+ iqs624_pos = container_of(notifier, struct iqs624_pos_private,
+ notifier);
+ indio_dev = iio_priv_to_dev(iqs624_pos);
+ timestamp = iio_get_time_ns(indio_dev);
+
+ iqs62x = iqs624_pos->iqs62x;
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM)
+ angle = event_data->interval;
+
+ mutex_lock(&iqs624_pos->lock);
+
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ ret = iqs624_pos_angle_en(iqs62x, iqs624_pos->angle_en);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to re-initialize device: %d\n", ret);
+ ret = NOTIFY_BAD;
+ } else {
+ ret = NOTIFY_OK;
+ }
+ } else if (iqs624_pos->angle_en && (angle != iqs624_pos->angle)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ANGL, 0,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_NONE),
+ timestamp);
+
+ iqs624_pos->angle = angle;
+ ret = NOTIFY_OK;
+ } else {
+ ret = NOTIFY_DONE;
+ }
+
+ mutex_unlock(&iqs624_pos->lock);
+
+ return ret;
+}
+
+static void iqs624_pos_notifier_unregister(void *context)
+{
+ struct iqs624_pos_private *iqs624_pos = context;
+ struct iio_dev *indio_dev = iio_priv_to_dev(iqs624_pos);
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(&iqs624_pos->iqs62x->nh,
+ &iqs624_pos->notifier);
+ if (ret)
+ dev_err(indio_dev->dev.parent,
+ "Failed to unregister notifier: %d\n", ret);
+}
+
+static int iqs624_pos_angle_get(struct iqs62x_core *iqs62x, unsigned int *val)
+{
+ int ret;
+ __le16 val_buf;
+
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM)
+ return regmap_read(iqs62x->regmap, iqs62x->dev_desc->interval,
+ val);
+
+ ret = regmap_raw_read(iqs62x->regmap, IQS624_POS_DEG_OUT, &val_buf,
+ sizeof(val_buf));
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(val_buf);
+
+ return 0;
+}
+
+static int iqs624_pos_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs624_pos->iqs62x;
+ unsigned int scale = 1;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iqs624_pos_angle_get(iqs62x, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM) {
+ ret = regmap_read(iqs62x->regmap, IQS624_INTERVAL_DIV,
+ &scale);
+ if (ret)
+ return ret;
+ }
+
+ *val = scale * IQS624_POS_SCALE1;
+ *val2 = IQS624_POS_SCALE2;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iqs624_pos_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&iqs624_pos->lock);
+ ret = iqs624_pos->angle_en;
+ mutex_unlock(&iqs624_pos->lock);
+
+ return ret;
+}
+
+static int iqs624_pos_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs624_pos->iqs62x;
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&iqs624_pos->lock);
+
+ ret = iqs624_pos_angle_get(iqs62x, &val);
+ if (ret)
+ goto err_mutex;
+
+ ret = iqs624_pos_angle_en(iqs62x, state);
+ if (ret)
+ goto err_mutex;
+
+ iqs624_pos->angle = val;
+ iqs624_pos->angle_en = state;
+
+err_mutex:
+ mutex_unlock(&iqs624_pos->lock);
+
+ return ret;
+}
+
+static const struct iio_info iqs624_pos_info = {
+ .read_raw = &iqs624_pos_read_raw,
+ .read_event_config = iqs624_pos_read_event_config,
+ .write_event_config = iqs624_pos_write_event_config,
+};
+
+static const struct iio_event_spec iqs624_pos_events[] = {
+ {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_NONE,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec iqs624_pos_channels[] = {
+ {
+ .type = IIO_ANGL,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = iqs624_pos_events,
+ .num_event_specs = ARRAY_SIZE(iqs624_pos_events),
+ },
+};
+
+static int iqs624_pos_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iqs624_pos_private *iqs624_pos;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*iqs624_pos));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iqs624_pos = iio_priv(indio_dev);
+ iqs624_pos->iqs62x = iqs62x;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->channels = iqs624_pos_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs624_pos_channels);
+ indio_dev->name = iqs62x->dev_desc->dev_name;
+ indio_dev->info = &iqs624_pos_info;
+
+ mutex_init(&iqs624_pos->lock);
+
+ iqs624_pos->notifier.notifier_call = iqs624_pos_notifier;
+ ret = blocking_notifier_chain_register(&iqs624_pos->iqs62x->nh,
+ &iqs624_pos->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ iqs624_pos_notifier_unregister,
+ iqs624_pos);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver iqs624_pos_platform_driver = {
+ .driver = {
+ .name = "iqs624-pos",
+ },
+ .probe = iqs624_pos_probe,
+};
+module_platform_driver(iqs624_pos_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS624/625 Angular Position Sensors");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs624-pos");
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index a0e5f530faa9..2cb11da18e0f 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -275,11 +275,20 @@ static int lmp91000_buffer_cb(const void *val, void *private)
static const struct iio_trigger_ops lmp91000_trigger_ops = {
};
-static int lmp91000_buffer_preenable(struct iio_dev *indio_dev)
+static int lmp91000_buffer_postenable(struct iio_dev *indio_dev)
{
struct lmp91000_data *data = iio_priv(indio_dev);
+ int err;
- return iio_channel_start_all_cb(data->cb_buffer);
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ return err;
+
+ err = iio_channel_start_all_cb(data->cb_buffer);
+ if (err)
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return err;
}
static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
@@ -288,12 +297,11 @@ static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
iio_channel_stop_all_cb(data->cb_buffer);
- return 0;
+ return iio_triggered_buffer_predisable(indio_dev);
}
static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
- .preenable = lmp91000_buffer_preenable,
- .postenable = iio_triggered_buffer_postenable,
+ .postenable = lmp91000_buffer_postenable,
.predisable = lmp91000_buffer_predisable,
};
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 9c2d9bf8f100..689b978db4f9 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -101,6 +101,17 @@ config HP03
To compile this driver as a module, choose M here: the module
will be called hp03.
+config ICP10100
+ tristate "InvenSense ICP-101xx pressure and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ Say yes here to build support for InvenSense ICP-101xx barometric
+ pressure and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called icp10100.
+
config MPL115
tristate
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 5a79192d8cb5..083ae87ff48f 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_DPS310) += dps310.o
obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_HP03) += hp03.o
+obj-$(CONFIG_ICP10100) += icp10100.o
obj-$(CONFIG_MPL115) += mpl115.o
obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index b521bebd551c..c079b8960082 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -134,10 +134,14 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture,
+ cros_ec_sensors_push_data);
if (ret)
return ret;
+ iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
+
indio_dev->info = &cros_ec_baro_info;
state = iio_priv(indio_dev);
state->core.type = state->core.resp->info.type;
@@ -147,8 +151,7 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
channel->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
channel->info_mask_shared_by_all =
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_FREQUENCY);
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->info_mask_shared_by_all_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
@@ -182,11 +185,6 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
new file mode 100644
index 000000000000..06cb5b63a189
--- /dev/null
+++ b/drivers/iio/pressure/icp10100.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 InvenSense, Inc.
+ *
+ * Driver for InvenSense ICP-1010xx barometric pressure and temperature sensor.
+ *
+ * Datasheet:
+ * http://www.invensense.com/wp-content/uploads/2018/01/DS-000186-ICP-101xx-v1.2.pdf
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/crc8.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+
+#define ICP10100_ID_REG_GET(_reg) ((_reg) & 0x003F)
+#define ICP10100_ID_REG 0x08
+#define ICP10100_RESPONSE_WORD_LENGTH 3
+#define ICP10100_CRC8_WORD_LENGTH 2
+#define ICP10100_CRC8_POLYNOMIAL 0x31
+#define ICP10100_CRC8_INIT 0xFF
+
+enum icp10100_mode {
+ ICP10100_MODE_LP, /* Low power mode: 1x sampling */
+ ICP10100_MODE_N, /* Normal mode: 2x sampling */
+ ICP10100_MODE_LN, /* Low noise mode: 4x sampling */
+ ICP10100_MODE_ULN, /* Ultra low noise mode: 8x sampling */
+ ICP10100_MODE_NB,
+};
+
+struct icp10100_state {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct regulator *vdd;
+ enum icp10100_mode mode;
+ int16_t cal[4];
+};
+
+struct icp10100_command {
+ __be16 cmd;
+ unsigned long wait_us;
+ unsigned long wait_max_us;
+ size_t response_word_nb;
+};
+
+static const struct icp10100_command icp10100_cmd_soft_reset = {
+ .cmd = cpu_to_be16(0x805D),
+ .wait_us = 170,
+ .wait_max_us = 200,
+ .response_word_nb = 0,
+};
+
+static const struct icp10100_command icp10100_cmd_read_id = {
+ .cmd = cpu_to_be16(0xEFC8),
+ .wait_us = 0,
+ .response_word_nb = 1,
+};
+
+static const struct icp10100_command icp10100_cmd_read_otp = {
+ .cmd = cpu_to_be16(0xC7F7),
+ .wait_us = 0,
+ .response_word_nb = 1,
+};
+
+static const struct icp10100_command icp10100_cmd_measure[] = {
+ [ICP10100_MODE_LP] = {
+ .cmd = cpu_to_be16(0x401A),
+ .wait_us = 1800,
+ .wait_max_us = 2000,
+ .response_word_nb = 3,
+ },
+ [ICP10100_MODE_N] = {
+ .cmd = cpu_to_be16(0x48A3),
+ .wait_us = 6300,
+ .wait_max_us = 6500,
+ .response_word_nb = 3,
+ },
+ [ICP10100_MODE_LN] = {
+ .cmd = cpu_to_be16(0x5059),
+ .wait_us = 23800,
+ .wait_max_us = 24000,
+ .response_word_nb = 3,
+ },
+ [ICP10100_MODE_ULN] = {
+ .cmd = cpu_to_be16(0x58E0),
+ .wait_us = 94500,
+ .wait_max_us = 94700,
+ .response_word_nb = 3,
+ },
+};
+
+static const uint8_t icp10100_switch_mode_otp[] =
+ {0xC5, 0x95, 0x00, 0x66, 0x9c};
+
+DECLARE_CRC8_TABLE(icp10100_crc8_table);
+
+static inline int icp10100_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ int ret;
+
+ ret = i2c_transfer(adap, msgs, num);
+ if (ret < 0)
+ return ret;
+
+ if (ret != num)
+ return -EIO;
+
+ return 0;
+}
+
+static int icp10100_send_cmd(struct icp10100_state *st,
+ const struct icp10100_command *cmd,
+ __be16 *buf, size_t buf_len)
+{
+ size_t size = cmd->response_word_nb * ICP10100_RESPONSE_WORD_LENGTH;
+ uint8_t data[16];
+ uint8_t *ptr;
+ uint8_t *buf_ptr = (uint8_t *)buf;
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = st->client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (uint8_t *)&cmd->cmd,
+ }, {
+ .addr = st->client->addr,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = data,
+ },
+ };
+ uint8_t crc;
+ unsigned int i;
+ int ret;
+
+ if (size > sizeof(data))
+ return -EINVAL;
+
+ if (cmd->response_word_nb > 0 &&
+ (buf == NULL || buf_len < (cmd->response_word_nb * 2)))
+ return -EINVAL;
+
+ dev_dbg(&st->client->dev, "sending cmd %#x\n", be16_to_cpu(cmd->cmd));
+
+ if (cmd->response_word_nb > 0 && cmd->wait_us == 0) {
+ /* direct command-response without waiting */
+ ret = icp10100_i2c_xfer(st->client->adapter, msgs,
+ ARRAY_SIZE(msgs));
+ if (ret)
+ return ret;
+ } else {
+ /* transfer command write */
+ ret = icp10100_i2c_xfer(st->client->adapter, &msgs[0], 1);
+ if (ret)
+ return ret;
+ if (cmd->wait_us > 0)
+ usleep_range(cmd->wait_us, cmd->wait_max_us);
+ /* transfer response read if needed */
+ if (cmd->response_word_nb > 0) {
+ ret = icp10100_i2c_xfer(st->client->adapter, &msgs[1], 1);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+ }
+
+ /* process read words with crc checking */
+ for (i = 0; i < cmd->response_word_nb; ++i) {
+ ptr = &data[i * ICP10100_RESPONSE_WORD_LENGTH];
+ crc = crc8(icp10100_crc8_table, ptr, ICP10100_CRC8_WORD_LENGTH,
+ ICP10100_CRC8_INIT);
+ if (crc != ptr[ICP10100_CRC8_WORD_LENGTH]) {
+ dev_err(&st->client->dev, "crc error recv=%#x calc=%#x\n",
+ ptr[ICP10100_CRC8_WORD_LENGTH], crc);
+ return -EIO;
+ }
+ *buf_ptr++ = ptr[0];
+ *buf_ptr++ = ptr[1];
+ }
+
+ return 0;
+}
+
+static int icp10100_read_cal_otp(struct icp10100_state *st)
+{
+ __be16 val;
+ int i;
+ int ret;
+
+ /* switch into OTP read mode */
+ ret = i2c_master_send(st->client, icp10100_switch_mode_otp,
+ ARRAY_SIZE(icp10100_switch_mode_otp));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(icp10100_switch_mode_otp))
+ return -EIO;
+
+ /* read 4 calibration values */
+ for (i = 0; i < 4; ++i) {
+ ret = icp10100_send_cmd(st, &icp10100_cmd_read_otp,
+ &val, sizeof(val));
+ if (ret)
+ return ret;
+ st->cal[i] = be16_to_cpu(val);
+ dev_dbg(&st->client->dev, "cal[%d] = %d\n", i, st->cal[i]);
+ }
+
+ return 0;
+}
+
+static int icp10100_init_chip(struct icp10100_state *st)
+{
+ __be16 val;
+ uint16_t id;
+ int ret;
+
+ /* read and check id */
+ ret = icp10100_send_cmd(st, &icp10100_cmd_read_id, &val, sizeof(val));
+ if (ret)
+ return ret;
+ id = ICP10100_ID_REG_GET(be16_to_cpu(val));
+ if (id != ICP10100_ID_REG) {
+ dev_err(&st->client->dev, "invalid id %#x\n", id);
+ return -ENODEV;
+ }
+
+ /* read calibration data from OTP */
+ ret = icp10100_read_cal_otp(st);
+ if (ret)
+ return ret;
+
+ /* reset chip */
+ return icp10100_send_cmd(st, &icp10100_cmd_soft_reset, NULL, 0);
+}
+
+static int icp10100_get_measures(struct icp10100_state *st,
+ uint32_t *pressure, uint16_t *temperature)
+{
+ const struct icp10100_command *cmd;
+ __be16 measures[3];
+ int ret;
+
+ pm_runtime_get_sync(&st->client->dev);
+
+ mutex_lock(&st->lock);
+ cmd = &icp10100_cmd_measure[st->mode];
+ ret = icp10100_send_cmd(st, cmd, measures, sizeof(measures));
+ mutex_unlock(&st->lock);
+ if (ret)
+ goto error_measure;
+
+ *pressure = (be16_to_cpu(measures[0]) << 8) |
+ (be16_to_cpu(measures[1]) >> 8);
+ *temperature = be16_to_cpu(measures[2]);
+
+ pm_runtime_mark_last_busy(&st->client->dev);
+error_measure:
+ pm_runtime_put_autosuspend(&st->client->dev);
+ return ret;
+}
+
+static uint32_t icp10100_get_pressure(struct icp10100_state *st,
+ uint32_t raw_pressure, uint16_t raw_temp)
+{
+ static int32_t p_calib[] = {45000, 80000, 105000};
+ static int32_t lut_lower = 3670016;
+ static int32_t lut_upper = 12058624;
+ static int32_t inv_quadr_factor = 16777216;
+ static int32_t offset_factor = 2048;
+ int64_t val1, val2;
+ int32_t p_lut[3];
+ int32_t t, t_square;
+ int64_t a, b, c;
+ uint32_t pressure_mPa;
+
+ dev_dbg(&st->client->dev, "raw: pressure = %u, temp = %u\n",
+ raw_pressure, raw_temp);
+
+ /* compute p_lut values */
+ t = (int32_t)raw_temp - 32768;
+ t_square = t * t;
+ val1 = (int64_t)st->cal[0] * (int64_t)t_square;
+ p_lut[0] = lut_lower + (int32_t)div_s64(val1, inv_quadr_factor);
+ val1 = (int64_t)st->cal[1] * (int64_t)t_square;
+ p_lut[1] = offset_factor * st->cal[3] +
+ (int32_t)div_s64(val1, inv_quadr_factor);
+ val1 = (int64_t)st->cal[2] * (int64_t)t_square;
+ p_lut[2] = lut_upper + (int32_t)div_s64(val1, inv_quadr_factor);
+ dev_dbg(&st->client->dev, "p_lut = [%d, %d, %d]\n",
+ p_lut[0], p_lut[1], p_lut[2]);
+
+ /* compute a, b, c factors */
+ val1 = (int64_t)p_lut[0] * (int64_t)p_lut[1] *
+ (int64_t)(p_calib[0] - p_calib[1]) +
+ (int64_t)p_lut[1] * (int64_t)p_lut[2] *
+ (int64_t)(p_calib[1] - p_calib[2]) +
+ (int64_t)p_lut[2] * (int64_t)p_lut[0] *
+ (int64_t)(p_calib[2] - p_calib[0]);
+ val2 = (int64_t)p_lut[2] * (int64_t)(p_calib[0] - p_calib[1]) +
+ (int64_t)p_lut[0] * (int64_t)(p_calib[1] - p_calib[2]) +
+ (int64_t)p_lut[1] * (int64_t)(p_calib[2] - p_calib[0]);
+ c = div64_s64(val1, val2);
+ dev_dbg(&st->client->dev, "val1 = %lld, val2 = %lld, c = %lld\n",
+ val1, val2, c);
+ val1 = (int64_t)p_calib[0] * (int64_t)p_lut[0] -
+ (int64_t)p_calib[1] * (int64_t)p_lut[1] -
+ (int64_t)(p_calib[1] - p_calib[0]) * c;
+ val2 = (int64_t)p_lut[0] - (int64_t)p_lut[1];
+ a = div64_s64(val1, val2);
+ dev_dbg(&st->client->dev, "val1 = %lld, val2 = %lld, a = %lld\n",
+ val1, val2, a);
+ b = ((int64_t)p_calib[0] - a) * ((int64_t)p_lut[0] + c);
+ dev_dbg(&st->client->dev, "b = %lld\n", b);
+
+ /*
+ * pressure_Pa = a + (b / (c + raw_pressure))
+ * pressure_mPa = 1000 * pressure_Pa
+ */
+ pressure_mPa = 1000LL * a + div64_s64(1000LL * b, c + raw_pressure);
+
+ return pressure_mPa;
+}
+
+static int icp10100_read_raw_measures(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct icp10100_state *st = iio_priv(indio_dev);
+ uint32_t raw_pressure;
+ uint16_t raw_temp;
+ uint32_t pressure_mPa;
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = icp10100_get_measures(st, &raw_pressure, &raw_temp);
+ if (ret)
+ goto error_release;
+
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ pressure_mPa = icp10100_get_pressure(st, raw_pressure,
+ raw_temp);
+ /* mPa to kPa */
+ *val = pressure_mPa / 1000000;
+ *val2 = pressure_mPa % 1000000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_TEMP:
+ *val = raw_temp;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+error_release:
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
+
+static int icp10100_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct icp10100_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ return icp10100_read_raw_measures(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* 1000 * 175°C / 65536 in m°C */
+ *val = 2;
+ *val2 = 670288;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* 1000 * -45°C in m°C */
+ *val = -45000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ mutex_lock(&st->lock);
+ *val = 1 << st->mode;
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int icp10100_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ static int oversamplings[] = {1, 2, 4, 8};
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = oversamplings;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(oversamplings);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int icp10100_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct icp10100_state *st = iio_priv(indio_dev);
+ unsigned int mode;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ /* oversampling is always positive and a power of 2 */
+ if (val <= 0 || !is_power_of_2(val))
+ return -EINVAL;
+ mode = ilog2(val);
+ if (mode >= ICP10100_MODE_NB)
+ return -EINVAL;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+ st->mode = mode;
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int icp10100_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info icp10100_info = {
+ .read_raw = icp10100_read_raw,
+ .read_avail = icp10100_read_avail,
+ .write_raw = icp10100_write_raw,
+ .write_raw_get_fmt = icp10100_write_raw_get_fmt,
+};
+
+static const struct iio_chan_spec icp10100_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ }, {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+};
+
+static int icp10100_enable_regulator(struct icp10100_state *st)
+{
+ int ret;
+
+ ret = regulator_enable(st->vdd);
+ if (ret)
+ return ret;
+ msleep(100);
+
+ return 0;
+}
+
+static void icp10100_disable_regulator_action(void *data)
+{
+ struct icp10100_state *st = data;
+ int ret;
+
+ ret = regulator_disable(st->vdd);
+ if (ret)
+ dev_err(&st->client->dev, "error %d disabling vdd\n", ret);
+}
+
+static void icp10100_pm_disable(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_put_sync_suspend(dev);
+ pm_runtime_disable(dev);
+}
+
+static int icp10100_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct icp10100_state *st;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "plain i2c transactions not supported\n");
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = icp10100_channels;
+ indio_dev->num_channels = ARRAY_SIZE(icp10100_channels);
+ indio_dev->info = &icp10100_info;
+
+ st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+ st->client = client;
+ st->mode = ICP10100_MODE_N;
+
+ st->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(st->vdd))
+ return PTR_ERR(st->vdd);
+
+ ret = icp10100_enable_regulator(st);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev,
+ icp10100_disable_regulator_action, st);
+ if (ret)
+ return ret;
+
+ /* has to be done before the first i2c communication */
+ crc8_populate_msb(icp10100_crc8_table, ICP10100_CRC8_POLYNOMIAL);
+
+ ret = icp10100_init_chip(st);
+ if (ret) {
+ dev_err(&client->dev, "init chip error %d\n", ret);
+ return ret;
+ }
+
+ /* enable runtime pm with autosuspend delay of 2s */
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, 2000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put(&client->dev);
+ ret = devm_add_action_or_reset(&client->dev, icp10100_pm_disable,
+ &client->dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused icp10100_suspend(struct device *dev)
+{
+ struct icp10100_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = regulator_disable(st->vdd);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __maybe_unused icp10100_resume(struct device *dev)
+{
+ struct icp10100_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ ret = icp10100_enable_regulator(st);
+ if (ret)
+ goto out_unlock;
+
+ /* reset chip */
+ ret = icp10100_send_cmd(st, &icp10100_cmd_soft_reset, NULL, 0);
+
+out_unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static UNIVERSAL_DEV_PM_OPS(icp10100_pm, icp10100_suspend, icp10100_resume,
+ NULL);
+
+static const struct of_device_id icp10100_of_match[] = {
+ {
+ .compatible = "invensense,icp10100",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, icp10100_of_match);
+
+static const struct i2c_device_id icp10100_id[] = {
+ { "icp10100", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, icp10100_id);
+
+static struct i2c_driver icp10100_driver = {
+ .driver = {
+ .name = "icp10100",
+ .pm = &icp10100_pm,
+ .of_match_table = of_match_ptr(icp10100_of_match),
+ },
+ .probe = icp10100_probe,
+ .id_table = icp10100_id,
+};
+module_i2c_driver(icp10100_driver);
+
+MODULE_AUTHOR("InvenSense, Inc.");
+MODULE_DESCRIPTION("InvenSense icp10100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 01eb8cc63076..568b76e06385 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -45,6 +45,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -56,6 +57,7 @@ struct srf04_data {
struct device *dev;
struct gpio_desc *gpiod_trig;
struct gpio_desc *gpiod_echo;
+ struct gpio_desc *gpiod_power;
struct mutex lock;
int irqnr;
ktime_t ts_rising;
@@ -63,6 +65,7 @@ struct srf04_data {
struct completion rising;
struct completion falling;
const struct srf04_cfg *cfg;
+ int startup_time_ms;
};
static const struct srf04_cfg srf04_cfg = {
@@ -97,6 +100,9 @@ static int srf04_read(struct srf04_data *data)
u64 dt_ns;
u32 time_ns, distance_mm;
+ if (data->gpiod_power)
+ pm_runtime_get_sync(data->dev);
+
/*
* just one read-echo-cycle can take place at a time
* ==> lock against concurrent reading calls
@@ -110,6 +116,11 @@ static int srf04_read(struct srf04_data *data)
udelay(data->cfg->trigger_pulse_us);
gpiod_set_value(data->gpiod_trig, 0);
+ if (data->gpiod_power) {
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+ }
+
/* it should not take more than 20 ms until echo is rising */
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
if (ret < 0) {
@@ -268,6 +279,22 @@ static int srf04_probe(struct platform_device *pdev)
return PTR_ERR(data->gpiod_echo);
}
+ data->gpiod_power = devm_gpiod_get_optional(dev, "power",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpiod_power)) {
+ dev_err(dev, "failed to get power-gpios: err=%ld\n",
+ PTR_ERR(data->gpiod_power));
+ return PTR_ERR(data->gpiod_power);
+ }
+ if (data->gpiod_power) {
+
+ if (of_property_read_u32(dev->of_node, "startup-time-ms",
+ &data->startup_time_ms))
+ data->startup_time_ms = 100;
+ dev_dbg(dev, "using power gpio: startup-time-ms=%d\n",
+ data->startup_time_ms);
+ }
+
if (gpiod_cansleep(data->gpiod_echo)) {
dev_err(data->dev, "cansleep-GPIOs not supported\n");
return -ENODEV;
@@ -296,14 +323,81 @@ static int srf04_probe(struct platform_device *pdev)
indio_dev->channels = srf04_chan_spec;
indio_dev->num_channels = ARRAY_SIZE(srf04_chan_spec);
- return devm_iio_device_register(dev, indio_dev);
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(data->dev, "iio_device_register: %d\n", ret);
+ return ret;
+ }
+
+ if (data->gpiod_power) {
+ pm_runtime_set_autosuspend_delay(data->dev, 1000);
+ pm_runtime_use_autosuspend(data->dev);
+
+ ret = pm_runtime_set_active(data->dev);
+ if (ret) {
+ dev_err(data->dev, "pm_runtime_set_active: %d\n", ret);
+ iio_device_unregister(indio_dev);
+ }
+
+ pm_runtime_enable(data->dev);
+ pm_runtime_idle(data->dev);
+ }
+
+ return ret;
}
+static int srf04_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct srf04_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (data->gpiod_power) {
+ pm_runtime_disable(data->dev);
+ pm_runtime_set_suspended(data->dev);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused srf04_pm_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct srf04_data *data = iio_priv(indio_dev);
+
+ gpiod_set_value(data->gpiod_power, 0);
+
+ return 0;
+}
+
+static int __maybe_unused srf04_pm_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct srf04_data *data = iio_priv(indio_dev);
+
+ gpiod_set_value(data->gpiod_power, 1);
+ msleep(data->startup_time_ms);
+
+ return 0;
+}
+
+static const struct dev_pm_ops srf04_pm_ops = {
+ SET_RUNTIME_PM_OPS(srf04_pm_runtime_suspend,
+ srf04_pm_runtime_resume, NULL)
+};
+
static struct platform_driver srf04_driver = {
.probe = srf04_probe,
+ .remove = srf04_remove,
.driver = {
.name = "srf04-gpio",
.of_match_table = of_srf04_match,
+ .pm = &srf04_pm_ops,
},
};
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index e1ccb4003015..f1f2a1499c9e 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -4,6 +4,16 @@
#
menu "Temperature sensors"
+config IQS620AT_TEMP
+ tristate "Azoteq IQS620AT temperature sensor"
+ depends on MFD_IQS62X || COMPILE_TEST
+ help
+ Say Y here if you want to build support for the Azoteq IQS620AT
+ temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iqs620at-temp.
+
config LTC2983
tristate "Analog Devices Multi-Sensor Digital Temperature Measurement System"
depends on SPI
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index d6b850b0cf63..90c113115422 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -3,6 +3,7 @@
# Makefile for industrial I/O temperature drivers
#
+obj-$(CONFIG_IQS620AT_TEMP) += iqs620at-temp.o
obj-$(CONFIG_LTC2983) += ltc2983.o
obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
diff --git a/drivers/iio/temperature/iqs620at-temp.c b/drivers/iio/temperature/iqs620at-temp.c
new file mode 100644
index 000000000000..3fd52b3eb030
--- /dev/null
+++ b/drivers/iio/temperature/iqs620at-temp.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS620AT Temperature Sensor
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IQS620_TEMP_UI_OUT 0x1A
+
+#define IQS620_TEMP_SCALE 1000
+#define IQS620_TEMP_OFFSET (-100)
+
+static int iqs620_temp_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iqs62x_core *iqs62x = iio_device_get_drvdata(indio_dev);
+ int ret;
+ __le16 val_buf;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_raw_read(iqs62x->regmap, IQS620_TEMP_UI_OUT,
+ &val_buf, sizeof(val_buf));
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(val_buf);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = IQS620_TEMP_SCALE;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OFFSET:
+ *val = IQS620_TEMP_OFFSET;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info iqs620_temp_info = {
+ .read_raw = &iqs620_temp_read_raw,
+};
+
+static const struct iio_chan_spec iqs620_temp_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+};
+
+static int iqs620_temp_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, 0);
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iio_device_set_drvdata(indio_dev, iqs62x);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->channels = iqs620_temp_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs620_temp_channels);
+ indio_dev->name = iqs62x->dev_desc->dev_name;
+ indio_dev->info = &iqs620_temp_info;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver iqs620_temp_platform_driver = {
+ .driver = {
+ .name = "iqs620at-temp",
+ },
+ .probe = iqs620_temp_probe,
+};
+module_platform_driver(iqs620_temp_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS620AT Temperature Sensor");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs620at-temp");
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 2f82e8c32186..7d8962d6566a 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -75,14 +75,27 @@ static const void *stm32h7_valids_table[][MAX_VALIDS] = {
{ }, /* timer 17 */
};
+struct stm32_timer_trigger_regs {
+ u32 cr1;
+ u32 cr2;
+ u32 psc;
+ u32 arr;
+ u32 cnt;
+ u32 smcr;
+};
+
struct stm32_timer_trigger {
struct device *dev;
struct regmap *regmap;
struct clk *clk;
+ bool enabled;
u32 max_arr;
const void *triggers;
const void *valids;
bool has_trgo2;
+ struct mutex lock; /* concurrent sysfs configuration */
+ struct list_head tr_list;
+ struct stm32_timer_trigger_regs bak;
};
struct stm32_timer_trigger_cfg {
@@ -106,7 +119,7 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
{
unsigned long long prd, div;
int prescaler = 0;
- u32 ccer, cr1;
+ u32 ccer;
/* Period and prescaler values depends of clock rate */
div = (unsigned long long)clk_get_rate(priv->clk);
@@ -136,9 +149,11 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
if (ccer & TIM_CCER_CCXE)
return -EBUSY;
- regmap_read(priv->regmap, TIM_CR1, &cr1);
- if (!(cr1 & TIM_CR1_CEN))
+ mutex_lock(&priv->lock);
+ if (!priv->enabled) {
+ priv->enabled = true;
clk_enable(priv->clk);
+ }
regmap_write(priv->regmap, TIM_PSC, prescaler);
regmap_write(priv->regmap, TIM_ARR, prd - 1);
@@ -157,6 +172,7 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
/* Enable controller */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN);
+ mutex_unlock(&priv->lock);
return 0;
}
@@ -164,16 +180,13 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
static void stm32_timer_stop(struct stm32_timer_trigger *priv,
struct iio_trigger *trig)
{
- u32 ccer, cr1;
+ u32 ccer;
regmap_read(priv->regmap, TIM_CCER, &ccer);
if (ccer & TIM_CCER_CCXE)
return;
- regmap_read(priv->regmap, TIM_CR1, &cr1);
- if (cr1 & TIM_CR1_CEN)
- clk_disable(priv->clk);
-
+ mutex_lock(&priv->lock);
/* Stop timer */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
@@ -188,6 +201,12 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv,
/* Make sure that registers are updated */
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
+
+ if (priv->enabled) {
+ priv->enabled = false;
+ clk_disable(priv->clk);
+ }
+ mutex_unlock(&priv->lock);
}
static ssize_t stm32_tt_store_frequency(struct device *dev,
@@ -302,8 +321,15 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
for (i = 0; i <= master_mode_max; i++) {
if (!strncmp(master_mode_table[i], buf,
strlen(master_mode_table[i]))) {
+ mutex_lock(&priv->lock);
+ if (!priv->enabled) {
+ /* Clock should be enabled first */
+ priv->enabled = true;
+ clk_enable(priv->clk);
+ }
regmap_update_bits(priv->regmap, TIM_CR2, mask,
i << shift);
+ mutex_unlock(&priv->lock);
return len;
}
}
@@ -361,11 +387,21 @@ static const struct attribute_group *stm32_trigger_attr_groups[] = {
static const struct iio_trigger_ops timer_trigger_ops = {
};
-static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
+static void stm32_unregister_iio_triggers(struct stm32_timer_trigger *priv)
+{
+ struct iio_trigger *tr;
+
+ list_for_each_entry(tr, &priv->tr_list, alloc_list)
+ iio_trigger_unregister(tr);
+}
+
+static int stm32_register_iio_triggers(struct stm32_timer_trigger *priv)
{
int ret;
const char * const *cur = priv->triggers;
+ INIT_LIST_HEAD(&priv->tr_list);
+
while (cur && *cur) {
struct iio_trigger *trig;
bool cur_is_trgo = stm32_timer_is_trgo_name(*cur);
@@ -392,9 +428,13 @@ static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
iio_trigger_set_drvdata(trig, priv);
- ret = devm_iio_trigger_register(priv->dev, trig);
- if (ret)
+ ret = iio_trigger_register(trig);
+ if (ret) {
+ stm32_unregister_iio_triggers(priv);
return ret;
+ }
+
+ list_add_tail(&trig->alloc_list, &priv->tr_list);
cur++;
}
@@ -441,7 +481,6 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
- u32 dat;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -452,19 +491,23 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
case IIO_CHAN_INFO_ENABLE:
+ mutex_lock(&priv->lock);
if (val) {
- regmap_read(priv->regmap, TIM_CR1, &dat);
- if (!(dat & TIM_CR1_CEN))
+ if (!priv->enabled) {
+ priv->enabled = true;
clk_enable(priv->clk);
+ }
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
} else {
- regmap_read(priv->regmap, TIM_CR1, &dat);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
0);
- if (dat & TIM_CR1_CEN)
+ if (priv->enabled) {
+ priv->enabled = false;
clk_disable(priv->clk);
+ }
}
+ mutex_unlock(&priv->lock);
return 0;
}
@@ -560,7 +603,6 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
int sms = stm32_enable_mode2sms(mode);
- u32 val;
if (sms < 0)
return sms;
@@ -568,11 +610,12 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
* Triggered mode sets CEN bit automatically by hardware. So, first
* enable counter clock, so it can use it. Keeps it in sync with CEN.
*/
- if (sms == 6) {
- regmap_read(priv->regmap, TIM_CR1, &val);
- if (!(val & TIM_CR1_CEN))
- clk_enable(priv->clk);
+ mutex_lock(&priv->lock);
+ if (sms == 6 && !priv->enabled) {
+ clk_enable(priv->clk);
+ priv->enabled = true;
}
+ mutex_unlock(&priv->lock);
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
@@ -756,8 +799,9 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
priv->triggers = triggers_table[index];
priv->valids = cfg->valids_table[index];
stm32_timer_detect_trgo2(priv);
+ mutex_init(&priv->lock);
- ret = stm32_setup_iio_triggers(priv);
+ ret = stm32_register_iio_triggers(priv);
if (ret)
return ret;
@@ -766,6 +810,77 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
return 0;
}
+static int stm32_timer_trigger_remove(struct platform_device *pdev)
+{
+ struct stm32_timer_trigger *priv = platform_get_drvdata(pdev);
+ u32 val;
+
+ /* Unregister triggers before everything can be safely turned off */
+ stm32_unregister_iio_triggers(priv);
+
+ /* Check if nobody else use the timer, then disable it */
+ regmap_read(priv->regmap, TIM_CCER, &val);
+ if (!(val & TIM_CCER_CCXE))
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+
+ if (priv->enabled)
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_timer_trigger_suspend(struct device *dev)
+{
+ struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
+
+ /* Only take care of enabled timer: don't disturb other MFD child */
+ if (priv->enabled) {
+ /* Backup registers that may get lost in low power mode */
+ regmap_read(priv->regmap, TIM_CR1, &priv->bak.cr1);
+ regmap_read(priv->regmap, TIM_CR2, &priv->bak.cr2);
+ regmap_read(priv->regmap, TIM_PSC, &priv->bak.psc);
+ regmap_read(priv->regmap, TIM_ARR, &priv->bak.arr);
+ regmap_read(priv->regmap, TIM_CNT, &priv->bak.cnt);
+ regmap_read(priv->regmap, TIM_SMCR, &priv->bak.smcr);
+
+ /* Disable the timer */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ clk_disable(priv->clk);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused stm32_timer_trigger_resume(struct device *dev)
+{
+ struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
+ int ret;
+
+ if (priv->enabled) {
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* restore master/slave modes */
+ regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr);
+ regmap_write(priv->regmap, TIM_CR2, priv->bak.cr2);
+
+ /* restore sampling_frequency (trgo / trgo2 triggers) */
+ regmap_write(priv->regmap, TIM_PSC, priv->bak.psc);
+ regmap_write(priv->regmap, TIM_ARR, priv->bak.arr);
+ regmap_write(priv->regmap, TIM_CNT, priv->bak.cnt);
+
+ /* Also re-enables the timer */
+ regmap_write(priv->regmap, TIM_CR1, priv->bak.cr1);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_timer_trigger_pm_ops,
+ stm32_timer_trigger_suspend,
+ stm32_timer_trigger_resume);
+
static const struct stm32_timer_trigger_cfg stm32_timer_trg_cfg = {
.valids_table = valids_table,
.num_valids_table = ARRAY_SIZE(valids_table),
@@ -790,9 +905,11 @@ MODULE_DEVICE_TABLE(of, stm32_trig_of_match);
static struct platform_driver stm32_timer_trigger_driver = {
.probe = stm32_timer_trigger_probe,
+ .remove = stm32_timer_trigger_remove,
.driver = {
.name = "stm32-timer-trigger",
.of_match_table = stm32_trig_of_match,
+ .pm = &stm32_timer_trigger_pm_ops,
},
};
module_platform_driver(stm32_timer_trigger_driver);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 17bfedd24cc3..a670209bbce6 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -46,7 +46,7 @@
struct ib_pkey_cache {
int table_len;
- u16 table[0];
+ u16 table[];
};
struct ib_update_work {
@@ -973,6 +973,23 @@ done:
EXPORT_SYMBOL(rdma_query_gid);
/**
+ * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
+ * @attr: Potinter to the GID attribute
+ *
+ * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
+ * to the SGID attr. Callers are required to already be holding the reference
+ * to an existing GID entry.
+ *
+ * Returns the HW GID context
+ *
+ */
+void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
+{
+ return container_of(attr, struct ib_gid_table_entry, attr)->context;
+}
+EXPORT_SYMBOL(rdma_read_gid_hw_context);
+
+/**
* rdma_find_gid - Returns SGID attributes if the matching GID is found.
* @device: The device to query.
* @gid: The GID value to search for.
@@ -1536,8 +1553,11 @@ int ib_cache_setup_one(struct ib_device *device)
if (err)
return err;
- rdma_for_each_port (device, p)
- ib_cache_update(device, p, true);
+ rdma_for_each_port (device, p) {
+ err = ib_cache_update(device, p, true);
+ if (err)
+ return err;
+ }
return 0;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 15e99a888427..17f14e0eafe4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -80,8 +80,19 @@ const char *__attribute_const__ ibcm_reject_msg(int reason)
}
EXPORT_SYMBOL(ibcm_reject_msg);
+struct cm_id_private;
static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ struct ib_cm_sidr_rep_param *param);
+static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
+ const void *private_data, u8 private_data_len);
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
+ void *private_data, u8 private_data_len);
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len);
static struct ib_client cm_client = {
.name = "cm",
@@ -197,7 +208,7 @@ struct cm_device {
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
- struct cm_port *port[0];
+ struct cm_port *port[];
};
struct cm_av {
@@ -216,7 +227,7 @@ struct cm_work {
__be32 local_id; /* Established / timewait */
__be32 remote_id;
struct ib_cm_event cm_event;
- struct sa_path_rec path[0];
+ struct sa_path_rec path[];
};
struct cm_timewait_info {
@@ -261,7 +272,6 @@ struct cm_id_private {
__be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
- u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
u8 retry_count;
@@ -572,18 +582,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return 0;
}
-static int cm_alloc_id(struct cm_id_private *cm_id_priv)
-{
- int err;
- u32 id;
-
- err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
- xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
-
- cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return err;
-}
-
static u32 cm_local_id(__be32 local_id)
{
return (__force u32) (local_id ^ cm.random_id_operand);
@@ -633,22 +631,44 @@ static int be64_gt(__be64 a, __be64 b)
return (__force u64) a > (__force u64) b;
}
-static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
+/*
+ * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
+ * if the new ID was inserted, NULL if it could not be inserted due to a
+ * collision, or the existing cm_id_priv ready for shared usage.
+ */
+static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
+ ib_cm_handler shared_handler)
{
struct rb_node **link = &cm.listen_service_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
__be64 service_mask = cm_id_priv->id.service_mask;
+ unsigned long flags;
+ spin_lock_irqsave(&cm.lock, flags);
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
if ((cur_cm_id_priv->id.service_mask & service_id) ==
(service_mask & cur_cm_id_priv->id.service_id) &&
- (cm_id_priv->id.device == cur_cm_id_priv->id.device))
+ (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
+ /*
+ * Sharing an ib_cm_id with different handlers is not
+ * supported
+ */
+ if (cur_cm_id_priv->id.cm_handler != shared_handler ||
+ cur_cm_id_priv->id.context ||
+ WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return NULL;
+ }
+ refcount_inc(&cur_cm_id_priv->refcount);
+ cur_cm_id_priv->listen_sharecount++;
+ spin_unlock_irqrestore(&cm.lock, flags);
return cur_cm_id_priv;
+ }
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
link = &(*link)->rb_left;
@@ -661,9 +681,11 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
else
link = &(*link)->rb_right;
}
+ cm_id_priv->listen_sharecount++;
rb_link_node(&cm_id_priv->service_node, parent, link);
rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
- return NULL;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return cm_id_priv;
}
static struct cm_id_private * cm_find_listen(struct ib_device *device,
@@ -810,21 +832,12 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
return NULL;
}
-static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
- enum ib_cm_sidr_status status)
-{
- struct ib_cm_sidr_rep_param param;
-
- memset(&param, 0, sizeof param);
- param.status = status;
- ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
-}
-
-struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
- ib_cm_handler cm_handler,
- void *context)
+static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ void *context)
{
struct cm_id_private *cm_id_priv;
+ u32 id;
int ret;
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
@@ -836,10 +849,9 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
cm_id_priv->id.cm_handler = cm_handler;
cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1;
- ret = cm_alloc_id(cm_id_priv);
- if (ret)
- goto error;
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
+ RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
@@ -847,11 +859,42 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
refcount_set(&cm_id_priv->refcount, 1);
- return &cm_id_priv->id;
+
+ ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
+ &cm.local_id_next, GFP_KERNEL);
+ if (ret < 0)
+ goto error;
+ cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
+
+ return cm_id_priv;
error:
kfree(cm_id_priv);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Make the ID visible to the MAD handlers and other threads that use the
+ * xarray.
+ */
+static void cm_finalize_id(struct cm_id_private *cm_id_priv)
+{
+ xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
+ cm_id_priv, GFP_KERNEL);
+}
+
+struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ void *context)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
+ if (IS_ERR(cm_id_priv))
+ return ERR_CAST(cm_id_priv);
+
+ cm_finalize_id(cm_id_priv);
+ return &cm_id_priv->id;
}
EXPORT_SYMBOL(ib_create_cm_id);
@@ -932,6 +975,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
unsigned long flags;
struct cm_device *cm_dev;
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
if (!cm_dev)
return;
@@ -963,6 +1008,8 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
@@ -979,54 +1026,51 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
struct cm_work *work;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-retest:
spin_lock_irq(&cm_id_priv->lock);
+retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
- spin_unlock_irq(&cm_id_priv->lock);
-
- spin_lock_irq(&cm.lock);
+ spin_lock(&cm.lock);
if (--cm_id_priv->listen_sharecount > 0) {
/* The id is still shared. */
+ WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
+ spin_unlock(&cm.lock);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_deref_id(cm_id_priv);
- spin_unlock_irq(&cm.lock);
return;
}
+ cm_id->state = IB_CM_IDLE;
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
- spin_unlock_irq(&cm.lock);
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
+ spin_unlock(&cm.lock);
break;
case IB_CM_SIDR_REQ_SENT:
cm_id->state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_SIDR_REQ_RCVD:
- spin_unlock_irq(&cm_id_priv->lock);
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
- spin_lock_irq(&cm.lock);
- if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
- rb_erase(&cm_id_priv->sidr_id_node,
- &cm.remote_sidr_table);
- spin_unlock_irq(&cm.lock);
+ cm_send_sidr_rep_locked(cm_id_priv,
+ &(struct ib_cm_sidr_rep_param){
+ .status = IB_SIDR_REJECT });
+ /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
+ cm_id->state = IB_CM_IDLE;
break;
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
- &cm_id_priv->id.device->node_guid,
- sizeof cm_id_priv->id.device->node_guid,
- NULL, 0);
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
+ &cm_id_priv->id.device->node_guid,
+ sizeof(cm_id_priv->id.device->node_guid),
+ NULL, 0);
break;
case IB_CM_REQ_RCVD:
if (err == -ENOMEM) {
/* Do not reject to allow future retries. */
cm_reset_to_idle(cm_id_priv);
- spin_unlock_irq(&cm_id_priv->lock);
} else {
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
+ cm_send_rej_locked(cm_id_priv,
+ IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+ NULL, 0);
}
break;
case IB_CM_REP_SENT:
@@ -1036,38 +1080,56 @@ retest:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, NULL, 0);
break;
case IB_CM_ESTABLISHED:
- spin_unlock_irq(&cm_id_priv->lock);
- if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
+ if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
+ cm_id->state = IB_CM_IDLE;
break;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ }
+ cm_send_dreq_locked(cm_id_priv, NULL, 0);
goto retest;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
- spin_unlock_irq(&cm_id_priv->lock);
- break;
+ goto retest;
case IB_CM_DREQ_RCVD:
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_drep(cm_id, NULL, 0);
+ cm_send_drep_locked(cm_id_priv, NULL, 0);
+ WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
+ goto retest;
+ case IB_CM_TIMEWAIT:
+ /*
+ * The cm_acquire_id in cm_timewait_handler will stop working
+ * once we do cm_free_id() below, so just move to idle here for
+ * consistency.
+ */
+ cm_id->state = IB_CM_IDLE;
break;
- default:
- spin_unlock_irq(&cm_id_priv->lock);
+ case IB_CM_IDLE:
break;
}
+ WARN_ON(cm_id->state != IB_CM_IDLE);
- spin_lock_irq(&cm.lock);
+ spin_lock(&cm.lock);
+ /* Required for cleanup paths related cm_req_handler() */
+ if (cm_id_priv->timewait_info) {
+ cm_cleanup_timewait(cm_id_priv->timewait_info);
+ kfree(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
+ }
if (!list_empty(&cm_id_priv->altr_list) &&
(!cm_id_priv->altr_send_port_not_ready))
list_del(&cm_id_priv->altr_list);
if (!list_empty(&cm_id_priv->prim_list) &&
(!cm_id_priv->prim_send_port_not_ready))
list_del(&cm_id_priv->prim_list);
- spin_unlock_irq(&cm.lock);
+ WARN_ON(cm_id_priv->listen_sharecount);
+ WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
+ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
+ rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ spin_unlock(&cm.lock);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
@@ -1087,8 +1149,27 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
}
EXPORT_SYMBOL(ib_destroy_cm_id);
+static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
+ __be64 service_mask)
+{
+ service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
+ service_id &= service_mask;
+ if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
+ (service_id != IB_CM_ASSIGN_SERVICE_ID))
+ return -EINVAL;
+
+ if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
+ cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ } else {
+ cm_id_priv->id.service_id = service_id;
+ cm_id_priv->id.service_mask = service_mask;
+ }
+ return 0;
+}
+
/**
- * __ib_cm_listen - Initiates listening on the specified service ID for
+ * ib_cm_listen - Initiates listening on the specified service ID for
* connection and service ID resolution requests.
* @cm_id: Connection identifier associated with the listen request.
* @service_id: Service identifier matched against incoming connection
@@ -1100,51 +1181,33 @@ EXPORT_SYMBOL(ib_destroy_cm_id);
* exactly. This parameter is ignored if %service_id is set to
* IB_CM_ASSIGN_SERVICE_ID.
*/
-static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
- __be64 service_mask)
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
{
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
- int ret = 0;
-
- service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
- service_id &= service_mask;
- if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
- (service_id != IB_CM_ASSIGN_SERVICE_ID))
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- if (cm_id->state != IB_CM_IDLE)
- return -EINVAL;
-
- cm_id->state = IB_CM_LISTEN;
- ++cm_id_priv->listen_sharecount;
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
- if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
- cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
- cm_id->service_mask = ~cpu_to_be64(0);
- } else {
- cm_id->service_id = service_id;
- cm_id->service_mask = service_mask;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state != IB_CM_IDLE) {
+ ret = -EINVAL;
+ goto out;
}
- cur_cm_id_priv = cm_insert_listen(cm_id_priv);
- if (cur_cm_id_priv) {
- cm_id->state = IB_CM_IDLE;
- --cm_id_priv->listen_sharecount;
+ ret = cm_init_listen(cm_id_priv, service_id, service_mask);
+ if (ret)
+ goto out;
+
+ if (!cm_insert_listen(cm_id_priv, NULL)) {
ret = -EBUSY;
+ goto out;
}
- return ret;
-}
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cm.lock, flags);
- ret = __ib_cm_listen(cm_id, service_id, service_mask);
- spin_unlock_irqrestore(&cm.lock, flags);
+ cm_id_priv->id.state = IB_CM_LISTEN;
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_cm_listen);
@@ -1169,51 +1232,38 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
ib_cm_handler cm_handler,
__be64 service_id)
{
+ struct cm_id_private *listen_id_priv;
struct cm_id_private *cm_id_priv;
- struct ib_cm_id *cm_id;
- unsigned long flags;
int err = 0;
/* Create an ID in advance, since the creation may sleep */
- cm_id = ib_create_cm_id(device, cm_handler, NULL);
- if (IS_ERR(cm_id))
- return cm_id;
-
- spin_lock_irqsave(&cm.lock, flags);
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
+ if (IS_ERR(cm_id_priv))
+ return ERR_CAST(cm_id_priv);
- if (service_id == IB_CM_ASSIGN_SERVICE_ID)
- goto new_id;
+ err = cm_init_listen(cm_id_priv, service_id, 0);
+ if (err)
+ return ERR_PTR(err);
- /* Find an existing ID */
- cm_id_priv = cm_find_listen(device, service_id);
- if (cm_id_priv) {
- if (cm_id->cm_handler != cm_handler || cm_id->context) {
- /* Sharing an ib_cm_id with different handlers is not
- * supported */
- spin_unlock_irqrestore(&cm.lock, flags);
- ib_destroy_cm_id(cm_id);
+ spin_lock_irq(&cm_id_priv->lock);
+ listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
+ if (listen_id_priv != cm_id_priv) {
+ spin_unlock_irq(&cm_id_priv->lock);
+ ib_destroy_cm_id(&cm_id_priv->id);
+ if (!listen_id_priv)
return ERR_PTR(-EINVAL);
- }
- refcount_inc(&cm_id_priv->refcount);
- ++cm_id_priv->listen_sharecount;
- spin_unlock_irqrestore(&cm.lock, flags);
-
- ib_destroy_cm_id(cm_id);
- cm_id = &cm_id_priv->id;
- return cm_id;
+ return &listen_id_priv->id;
}
+ cm_id_priv->id.state = IB_CM_LISTEN;
+ spin_unlock_irq(&cm_id_priv->lock);
-new_id:
- /* Use newly created ID */
- err = __ib_cm_listen(cm_id, service_id, 0);
-
- spin_unlock_irqrestore(&cm.lock, flags);
+ /*
+ * A listen ID does not need to be in the xarray since it does not
+ * receive mads, is not placed in the remote_id or remote_qpn rbtree,
+ * and does not enter timewait.
+ */
- if (err) {
- ib_destroy_cm_id(cm_id);
- return ERR_PTR(err);
- }
- return cm_id;
+ return &cm_id_priv->id;
}
EXPORT_SYMBOL(ib_cm_insert_listen);
@@ -1381,10 +1431,6 @@ static void cm_format_req(struct cm_req_msg *req_msg,
static int cm_validate_req_param(struct ib_cm_req_param *param)
{
- /* peer-to-peer not supported */
- if (param->peer_to_peer)
- return -EINVAL;
-
if (!param->primary_path)
return -EINVAL;
@@ -1419,7 +1465,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
/* Verify that we're not in timewait. */
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_IDLE) {
+ if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = -EINVAL;
goto out;
@@ -1437,12 +1483,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
param->ppath_sgid_attr, &cm_id_priv->av,
cm_id_priv);
if (ret)
- goto error1;
+ goto out;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path, NULL,
&cm_id_priv->alt_av, cm_id_priv);
if (ret)
- goto error1;
+ goto out;
}
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
@@ -1460,7 +1506,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
if (ret)
- goto error1;
+ goto out;
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
@@ -1483,7 +1529,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
return 0;
error2: cm_free_msg(cm_id_priv->msg);
-error1: kfree(cm_id_priv->timewait_info);
out: return ret;
}
EXPORT_SYMBOL(ib_send_cm_req);
@@ -1783,17 +1828,17 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
static void cm_format_rej(struct cm_rej_msg *rej_msg,
struct cm_id_private *cm_id_priv,
- enum ib_cm_rej_reason reason,
- void *ari,
- u8 ari_length,
- const void *private_data,
- u8 private_data_len)
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len, enum ib_cm_state state)
{
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
- switch(cm_id_priv->id.state) {
+ switch (state) {
case IB_CM_REQ_RCVD:
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
@@ -1838,8 +1883,12 @@ static void cm_dup_req_handler(struct cm_work *work,
counter[CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
- if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
+ spin_lock_irq(&cm_id_priv->lock);
+ if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
+ spin_unlock_irq(&cm_id_priv->lock);
return;
+ }
+ spin_unlock_irq(&cm_id_priv->lock);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
@@ -1854,8 +1903,9 @@ static void cm_dup_req_handler(struct cm_work *work,
cm_id_priv->private_data_len);
break;
case IB_CM_TIMEWAIT:
- cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
- IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
+ IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
+ IB_CM_TIMEWAIT);
break;
default:
goto unlock;
@@ -1924,14 +1974,10 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
- goto out;
+ return NULL;
}
refcount_inc(&listen_cm_id_priv->refcount);
- refcount_inc(&cm_id_priv->refcount);
- cm_id_priv->id.state = IB_CM_REQ_RCVD;
- atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
-out:
return listen_cm_id_priv;
}
@@ -1973,7 +2019,6 @@ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
static int cm_req_handler(struct cm_work *work)
{
- struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_req_msg *req_msg;
const struct ib_global_route *grh;
@@ -1982,13 +2027,33 @@ static int cm_req_handler(struct cm_work *work)
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
- if (IS_ERR(cm_id))
- return PTR_ERR(cm_id);
+ cm_id_priv =
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
+ if (IS_ERR(cm_id_priv))
+ return PTR_ERR(cm_id_priv);
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id =
cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ cm_id_priv->tid = req_msg->hdr.tid;
+ cm_id_priv->timeout_ms = cm_convert_to_ms(
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
+ cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
+ cm_id_priv->remote_qpn =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->initiator_depth =
+ IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
+ cm_id_priv->responder_resources =
+ IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+ cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
+ cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
+ cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
+ cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
+
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
@@ -2000,27 +2065,26 @@ static int cm_req_handler(struct cm_work *work)
ret = PTR_ERR(cm_id_priv->timewait_info);
goto destroy;
}
- cm_id_priv->timewait_info->work.remote_id =
- cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
+ cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
cm_id_priv->timewait_info->remote_ca_guid =
cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
- cm_id_priv->timewait_info->remote_qpn =
- cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
+
+ /*
+ * Note that the ID pointer is not in the xarray at this point,
+ * so this set is only visible to the local thread.
+ */
+ cm_id_priv->id.state = IB_CM_REQ_RCVD;
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
- be32_to_cpu(cm_id->local_id));
+ be32_to_cpu(cm_id_priv->id.local_id));
+ cm_id_priv->id.state = IB_CM_IDLE;
ret = -EINVAL;
- goto free_timeinfo;
+ goto destroy;
}
- cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
- cm_id_priv->id.context = listen_cm_id_priv->id.context;
- cm_id_priv->id.service_id =
- cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
-
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
memset(&work->path[0], 0, sizeof(work->path[0]));
@@ -2058,10 +2122,10 @@ static int cm_req_handler(struct cm_work *work)
work->port->port_num, 0,
&work->path[0].sgid);
if (err)
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
NULL, 0, NULL, 0);
else
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid,
sizeof(work->path[0].sgid),
NULL, 0);
@@ -2071,41 +2135,40 @@ static int cm_req_handler(struct cm_work *work)
ret = cm_init_av_by_path(&work->path[1], NULL,
&cm_id_priv->alt_av, cm_id_priv);
if (ret) {
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+ ib_send_cm_rej(&cm_id_priv->id,
+ IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
sizeof(work->path[0].sgid), NULL, 0);
goto rejected;
}
}
- cm_id_priv->tid = req_msg->hdr.tid;
- cm_id_priv->timeout_ms = cm_convert_to_ms(
- IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
- cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
- cm_id_priv->remote_qpn =
- cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
- cm_id_priv->initiator_depth =
- IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
- cm_id_priv->responder_resources =
- IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
- cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
- cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
- cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
- cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
- cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
- cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
+
+ /* Now MAD handlers can see the new ID */
+ spin_lock_irq(&cm_id_priv->lock);
+ cm_finalize_id(cm_id_priv);
+
+ /* Refcount belongs to the event, pairs with cm_process_work() */
+ refcount_inc(&cm_id_priv->refcount);
+ atomic_inc(&cm_id_priv->work_count);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_process_work(cm_id_priv, work);
+ /*
+ * Since this ID was just created and was not made visible to other MAD
+ * handlers until the cm_finalize_id() above we know that the
+ * cm_process_work() will deliver the event and the listen_cm_id
+ * embedded in the event can be derefed here.
+ */
cm_deref_id(listen_cm_id_priv);
return 0;
rejected:
- refcount_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
-free_timeinfo:
- kfree(cm_id_priv->timewait_info);
destroy:
- ib_destroy_cm_id(cm_id);
+ ib_destroy_cm_id(&cm_id_priv->id);
return ret;
}
@@ -2189,6 +2252,9 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
+ WARN_ONCE(param->qp_num & 0xFF000000,
+ "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
+ param->qp_num);
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -2359,13 +2425,13 @@ static int cm_rep_handler(struct cm_work *work)
case IB_CM_MRA_REQ_RCVD:
break;
default:
- spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
pr_debug(
"%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
__func__, cm_id_priv->id.state,
IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
+ spin_unlock_irq(&cm_id_priv->lock);
goto error;
}
@@ -2434,8 +2500,6 @@ static int cm_rep_handler(struct cm_work *work)
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
- /* todo: handle peer_to_peer */
-
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
@@ -2546,35 +2610,32 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
private_data_len);
}
-int ib_send_cm_dreq(struct ib_cm_id *cm_id,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
+ const void *private_data, u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_ESTABLISHED) {
+ if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id->local_id), cm_id->state);
- ret = -EINVAL;
- goto out;
+ be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
+ return -EINVAL;
}
- if (cm_id->lap_state == IB_CM_LAP_SENT ||
- cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
+ cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret) {
cm_enter_timewait(cm_id_priv);
- goto out;
+ return ret;
}
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
@@ -2585,14 +2646,26 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
- cm_id->state = IB_CM_DREQ_SENT;
+ cm_id_priv->id.state = IB_CM_DREQ_SENT;
cm_id_priv->msg = msg;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
+}
+
+int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_dreq);
@@ -2613,51 +2686,60 @@ static void cm_format_drep(struct cm_drep_msg *drep_msg,
private_data_len);
}
-int ib_send_cm_drep(struct ib_cm_id *cm_id,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
+ void *private_data, u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
- void *data;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
return -EINVAL;
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_DREQ_RCVD) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
- pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
- __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
+ if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
+ pr_debug(
+ "%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
+ kfree(private_data);
return -EINVAL;
}
- cm_set_private_data(cm_id_priv, data, private_data_len);
+ cm_set_private_data(cm_id_priv, private_data, private_data_len);
cm_enter_timewait(cm_id_priv);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
- goto out;
+ return ret;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
+ return 0;
+}
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ void *data;
+ int ret;
+
+ data = cm_copy_private_data(private_data, private_data_len);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_drep);
@@ -2816,65 +2898,75 @@ out:
return -EINVAL;
}
-int ib_send_cm_rej(struct ib_cm_id *cm_id,
- enum ib_cm_rej_reason reason,
- void *ari,
- u8 ari_length,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
+ enum ib_cm_state state = cm_id_priv->id.state;
struct ib_mad_send_buf *msg;
- unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch (cm_id->state) {
+ switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (!ret)
- cm_format_rej((struct cm_rej_msg *) msg->mad,
- cm_id_priv, reason, ari, ari_length,
- private_data, private_data_len);
-
cm_reset_to_idle(cm_id_priv);
+ ret = cm_alloc_msg(cm_id_priv, &msg);
+ if (ret)
+ return ret;
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
+ ari, ari_length, private_data, private_data_len,
+ state);
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (!ret)
- cm_format_rej((struct cm_rej_msg *) msg->mad,
- cm_id_priv, reason, ari, ari_length,
- private_data, private_data_len);
-
cm_enter_timewait(cm_id_priv);
+ ret = cm_alloc_msg(cm_id_priv, &msg);
+ if (ret)
+ return ret;
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
+ ari, ari_length, private_data, private_data_len,
+ state);
break;
default:
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
- ret = -EINVAL;
- goto out;
+ be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
+ return -EINVAL;
}
- if (ret)
- goto out;
-
ret = ib_post_send_mad(msg, NULL);
- if (ret)
+ if (ret) {
cm_free_msg(msg);
+ return ret;
+ }
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
+}
+
+int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
+ void *ari, u8 ari_length, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
+ private_data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rej);
@@ -2972,10 +3064,10 @@ static int cm_rej_handler(struct cm_work *work)
}
/* fall through */
default:
- spin_unlock_irq(&cm_id_priv->lock);
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
cm_id_priv->id.state);
+ spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto out;
}
@@ -3502,20 +3594,27 @@ static void cm_format_sidr_req_event(struct cm_work *work,
static int cm_sidr_req_handler(struct cm_work *work)
{
- struct ib_cm_id *cm_id;
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
+ struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
int ret;
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
- if (IS_ERR(cm_id))
- return PTR_ERR(cm_id);
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ cm_id_priv =
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
+ if (IS_ERR(cm_id_priv))
+ return PTR_ERR(cm_id_priv);
/* Record SGID/SLID and request ID for lookup. */
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
+
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ cm_id_priv->tid = sidr_req_msg->hdr.tid;
+
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
@@ -3525,41 +3624,46 @@ static int cm_sidr_req_handler(struct cm_work *work)
if (ret)
goto out;
- cm_id_priv->id.remote_id =
- cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
- cm_id_priv->tid = sidr_req_msg->hdr.tid;
- atomic_inc(&cm_id_priv->work_count);
-
spin_lock_irq(&cm.lock);
- cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
- if (cur_cm_id_priv) {
+ listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+ if (listen_cm_id_priv) {
spin_unlock_irq(&cm.lock);
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
- cur_cm_id_priv = cm_find_listen(
- cm_id->device,
- cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)));
- if (!cur_cm_id_priv) {
+ listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
+ cm_id_priv->id.service_id);
+ if (!listen_cm_id_priv) {
spin_unlock_irq(&cm.lock);
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
+ ib_send_cm_sidr_rep(&cm_id_priv->id,
+ &(struct ib_cm_sidr_rep_param){
+ .status = IB_SIDR_UNSUPPORTED });
goto out; /* No match. */
}
- refcount_inc(&cur_cm_id_priv->refcount);
- refcount_inc(&cm_id_priv->refcount);
+ refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
- cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
- cm_id_priv->id.context = cur_cm_id_priv->id.context;
- cm_id_priv->id.service_id =
- cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
- cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
- cm_process_work(cm_id_priv, work);
- cm_deref_id(cur_cm_id_priv);
+ /*
+ * A SIDR ID does not need to be in the xarray since it does not receive
+ * mads, is not placed in the remote_id or remote_qpn rbtree, and does
+ * not enter timewait.
+ */
+
+ cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
+ cm_free_work(work);
+ /*
+ * A pointer to the listen_cm_id is held in the event, so this deref
+ * must be after the event is delivered above.
+ */
+ cm_deref_id(listen_cm_id_priv);
+ if (ret)
+ cm_destroy_id(&cm_id_priv->id, ret);
return 0;
out:
ib_destroy_cm_id(&cm_id_priv->id);
@@ -3589,50 +3693,52 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
param->private_data, param->private_data_len);
}
-int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
- struct ib_cm_sidr_rep_param *param)
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ struct ib_cm_sidr_rep_param *param)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
(param->private_data &&
param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
- ret = -EINVAL;
- goto error;
- }
+ if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
+ return -EINVAL;
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
- goto error;
+ return ret;
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
- cm_id->state = IB_CM_IDLE;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-
- spin_lock_irqsave(&cm.lock, flags);
+ cm_id_priv->id.state = IB_CM_IDLE;
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
- spin_unlock_irqrestore(&cm.lock, flags);
return 0;
+}
-error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
+ struct ib_cm_sidr_rep_param *param)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_sidr_rep_locked(cm_id_priv, param);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2dec3a02ab9f..26e6f7df247b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -199,7 +199,7 @@ struct cma_device {
struct list_head list;
struct ib_device *device;
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
struct list_head id_list;
enum ib_gid_type *default_gid_type;
u8 *default_roce_tos;
@@ -247,9 +247,15 @@ enum {
CMA_OPTION_AFONLY,
};
-void cma_ref_dev(struct cma_device *cma_dev)
+void cma_dev_get(struct cma_device *cma_dev)
{
- atomic_inc(&cma_dev->refcount);
+ refcount_inc(&cma_dev->refcount);
+}
+
+void cma_dev_put(struct cma_device *cma_dev)
+{
+ if (refcount_dec_and_test(&cma_dev->refcount))
+ complete(&cma_dev->comp);
}
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
@@ -267,7 +273,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
}
if (found_cma_dev)
- cma_ref_dev(found_cma_dev);
+ cma_dev_get(found_cma_dev);
mutex_unlock(&lock);
return found_cma_dev;
}
@@ -463,7 +469,7 @@ static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev)
{
- cma_ref_dev(cma_dev);
+ cma_dev_get(cma_dev);
id_priv->cma_dev = cma_dev;
id_priv->id.device = cma_dev->device;
id_priv->id.route.addr.dev_addr.transport =
@@ -484,12 +490,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
rdma_start_port(cma_dev->device)];
}
-void cma_deref_dev(struct cma_device *cma_dev)
-{
- if (atomic_dec_and_test(&cma_dev->refcount))
- complete(&cma_dev->comp);
-}
-
static inline void release_mc(struct kref *kref)
{
struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
@@ -502,7 +502,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
{
mutex_lock(&lock);
list_del(&id_priv->list);
- cma_deref_dev(id_priv->cma_dev);
+ cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
mutex_unlock(&lock);
}
@@ -728,8 +728,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
+ unsigned int port;
union ib_gid gid;
- u8 port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
@@ -753,7 +753,7 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
}
list_for_each_entry(cma_dev, &dev_list, list) {
- for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+ rdma_for_each_port (cma_dev->device, port) {
if (listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
@@ -786,8 +786,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
struct cma_device *cma_dev, *cur_dev;
struct sockaddr_ib *addr;
union ib_gid gid, sgid, *dgid;
+ unsigned int p;
u16 pkey, index;
- u8 p;
enum ib_port_state port_state;
int i;
@@ -798,7 +798,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
- for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ rdma_for_each_port (cur_dev->device, p) {
if (!rdma_cap_af_ib(cur_dev->device, p))
continue;
@@ -840,9 +840,14 @@ found:
return 0;
}
-static void cma_deref_id(struct rdma_id_private *id_priv)
+static void cma_id_get(struct rdma_id_private *id_priv)
+{
+ refcount_inc(&id_priv->refcount);
+}
+
+static void cma_id_put(struct rdma_id_private *id_priv)
{
- if (atomic_dec_and_test(&id_priv->refcount))
+ if (refcount_dec_and_test(&id_priv->refcount))
complete(&id_priv->comp);
}
@@ -870,7 +875,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
init_completion(&id_priv->comp);
- atomic_set(&id_priv->refcount, 1);
+ refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
@@ -1846,11 +1851,11 @@ void rdma_destroy_id(struct rdma_cm_id *id)
}
cma_release_port(id_priv);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
wait_for_completion(&id_priv->comp);
if (id_priv->internal_id)
- cma_deref_id(id_priv->id.context);
+ cma_id_put(id_priv->id.context);
kfree(id_priv->id.route.path_rec);
@@ -2187,7 +2192,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
*/
- atomic_inc(&conn_id->refcount);
+ cma_id_get(conn_id);
ret = cma_cm_event_handler(conn_id, &event);
if (ret)
goto err3;
@@ -2204,13 +2209,13 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
if (net_dev)
dev_put(net_dev);
return 0;
err3:
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
/* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL;
err2:
@@ -2391,7 +2396,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
*/
- atomic_inc(&conn_id->refcount);
+ cma_id_get(conn_id);
ret = cma_cm_event_handler(conn_id, &event);
if (ret) {
/* User wants to destroy the CM ID */
@@ -2399,13 +2404,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
rdma_destroy_id(&conn_id->id);
return ret;
}
mutex_unlock(&conn_id->handler_mutex);
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
out:
mutex_unlock(&listen_id->handler_mutex);
@@ -2492,7 +2497,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
_cma_attach_to_dev(dev_id_priv, cma_dev);
list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
dev_id_priv->tos_set = id_priv->tos_set;
@@ -2647,7 +2652,7 @@ static void cma_work_handler(struct work_struct *_work)
}
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
if (destroy)
rdma_destroy_id(&id_priv->id);
kfree(work);
@@ -2671,7 +2676,7 @@ static void cma_ndev_work_handler(struct work_struct *_work)
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
if (destroy)
rdma_destroy_id(&id_priv->id);
kfree(work);
@@ -2687,14 +2692,19 @@ static void cma_init_resolve_route_work(struct cma_work *work,
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
}
-static void cma_init_resolve_addr_work(struct cma_work *work,
- struct rdma_id_private *id_priv)
+static void enqueue_resolve_addr_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
{
+ /* Balances with cma_id_put() in cma_work_handler */
+ cma_id_get(id_priv);
+
work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler);
work->old_state = RDMA_CM_ADDR_QUERY;
work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+
+ queue_work(cma_wq, &work->work);
}
static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
@@ -2968,6 +2978,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2:
kfree(route->path_rec);
route->path_rec = NULL;
+ route->num_paths = 0;
err1:
kfree(work);
return ret;
@@ -2982,7 +2993,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
return -EINVAL;
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
else if (rdma_protocol_roce(id->device, id->port_num))
@@ -2998,7 +3009,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_route);
@@ -3025,9 +3036,9 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
struct cma_device *cma_dev, *cur_dev;
union ib_gid gid;
enum ib_port_state port_state;
+ unsigned int p;
u16 pkey;
int ret;
- u8 p;
cma_dev = NULL;
mutex_lock(&lock);
@@ -3039,7 +3050,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
if (!cma_dev)
cma_dev = cur_dev;
- for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ rdma_for_each_port (cur_dev->device, p) {
if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
port_state == IB_PORT_ACTIVE) {
cma_dev = cur_dev;
@@ -3148,9 +3159,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- atomic_inc(&id_priv->refcount);
- cma_init_resolve_addr_work(work, id_priv);
- queue_work(cma_wq, &work->work);
+ enqueue_resolve_addr_work(work, id_priv);
return 0;
err:
kfree(work);
@@ -3175,9 +3184,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
- atomic_inc(&id_priv->refcount);
- cma_init_resolve_addr_work(work, id_priv);
- queue_work(cma_wq, &work->work);
+ enqueue_resolve_addr_work(work, id_priv);
return 0;
err:
kfree(work);
@@ -4588,7 +4595,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
INIT_WORK(&work->work, cma_ndev_work_handler);
work->id = id_priv;
work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
queue_work(cma_wq, &work->work);
}
@@ -4663,7 +4670,7 @@ static void cma_add_one(struct ib_device *device)
}
init_completion(&cma_dev->comp);
- atomic_set(&cma_dev->refcount, 1);
+ refcount_set(&cma_dev->refcount, 1);
INIT_LIST_HEAD(&cma_dev->id_list);
ib_set_client_data(device, &cma_client, cma_dev);
@@ -4722,11 +4729,11 @@ static void cma_process_remove(struct cma_device *cma_dev)
list_del(&id_priv->listen_list);
list_del_init(&id_priv->list);
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
mutex_unlock(&lock);
ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
if (ret)
rdma_destroy_id(&id_priv->id);
@@ -4734,7 +4741,7 @@ static void cma_process_remove(struct cma_device *cma_dev)
}
mutex_unlock(&lock);
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
wait_for_completion(&cma_dev->comp);
}
@@ -4790,6 +4797,19 @@ static int __init cma_init(void)
{
int ret;
+ /*
+ * There is a rare lock ordering dependency in cma_netdev_callback()
+ * that only happens when bonding is enabled. Teach lockdep that rtnl
+ * must never be nested under lock so it can find these without having
+ * to test with bonding.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ rtnl_lock();
+ mutex_lock(&lock);
+ mutex_unlock(&lock);
+ rtnl_unlock();
+ }
+
cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
if (!cma_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 8b0b5ae22e4c..c672a4978bfd 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -94,7 +94,7 @@ static int cma_configfs_params_get(struct config_item *item,
static void cma_configfs_params_put(struct cma_device *cma_dev)
{
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
}
static ssize_t default_roce_mode_show(struct config_item *item,
@@ -312,12 +312,12 @@ static struct config_group *make_cma_dev(struct config_group *group,
configfs_add_default_group(&cma_dev_group->ports_group,
&cma_dev_group->device_group);
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
return &cma_dev_group->device_group;
fail:
if (cma_dev)
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
kfree(cma_dev_group);
return ERR_PTR(err);
}
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index ca7307277518..5edcf44a9307 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -66,7 +66,7 @@ struct rdma_id_private {
struct mutex qp_mutex;
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
struct mutex handler_mutex;
int backlog;
@@ -111,8 +111,8 @@ static inline void cma_configfs_exit(void)
}
#endif
-void cma_ref_dev(struct cma_device *dev);
-void cma_deref_dev(struct cma_device *dev);
+void cma_dev_get(struct cma_device *dev);
+void cma_dev_put(struct cma_device *dev);
typedef bool (*cma_device_filter)(struct ib_device *, void *);
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
void *cookie);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 956b3a7dfed7..403d8673a2f9 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -79,13 +79,13 @@ struct ib_mad_private {
struct ib_mad_private_header header;
size_t mad_size;
struct ib_grh grh;
- u8 mad[0];
+ u8 mad[];
} __packed;
struct ib_rmpp_segment {
struct list_head list;
u32 num;
- u8 data[0];
+ u8 data[];
};
struct ib_mad_agent_private {
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index cd338ddc4a39..9c2d8b7f1af9 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -71,7 +71,7 @@ struct mcast_device {
struct ib_event_handler event_handler;
int start_port;
int end_port;
- struct mcast_port port[0];
+ struct mcast_port port[];
};
enum mcast_state {
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9eec26d10d7b..e16105be2eb2 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1292,11 +1292,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
ret = fill_func(msg, has_cap_net_admin, res, port);
-
- rdma_restrack_put(res);
if (ret)
goto err_free;
+ rdma_restrack_put(res);
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 5128cb16bb48..e0a5e897e4b1 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -153,9 +153,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
uobj->context = NULL;
/*
- * For DESTROY the usecnt is held write locked, the caller is expected
- * to put it unlock and put the object when done with it. Only DESTROY
- * can remove the IDR handle.
+ * For DESTROY the usecnt is not changed, the caller is expected to
+ * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
+ * handle.
*/
if (reason != RDMA_REMOVE_DESTROY)
atomic_set(&uobj->usecnt, 0);
@@ -187,7 +187,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
/*
* This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
* sequence. It should only be used from command callbacks. On success the
- * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
+ * caller must pair this with uobj_put_destroy(). This
* version requires the caller to have already obtained an
* LOOKUP_DESTROY uobject kref.
*/
@@ -198,6 +198,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
down_read(&ufile->hw_destroy_rwsem);
+ /*
+ * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
+ * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
+ * This is because any other concurrent thread can still see the object
+ * in the xarray due to RCU. Leaving it locked ensures nothing else will
+ * touch it.
+ */
ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
if (ret)
goto out_unlock;
@@ -216,7 +223,7 @@ out_unlock:
/*
* uobj_get_destroy destroys the HW object and returns a handle to the uobj
* with a NULL object pointer. The caller must pair this with
- * uverbs_put_destroy.
+ * uobj_put_destroy().
*/
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
u32 id, struct uverbs_attr_bundle *attrs)
@@ -250,8 +257,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
uobj = __uobj_get_destroy(obj, id, attrs);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
-
- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ uobj_put_destroy(uobj);
return 0;
}
@@ -360,7 +366,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
* uverbs_uobject_fd_release(), and the caller is expected to ensure
* that release is never done while a call to lookup is possible.
*/
- if (f->f_op != fd_type->fops) {
+ if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
fput(f);
return ERR_PTR(-EBADF);
}
@@ -459,7 +465,8 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
struct ib_uobject *uobj;
struct file *filp;
- if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
+ if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
+ fd_type->fops->release != &uverbs_async_event_release))
return ERR_PTR(-EINVAL);
new_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -474,16 +481,15 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags);
if (IS_ERR(filp)) {
+ uverbs_uobject_put(uobj);
uobj = ERR_CAST(filp);
- goto err_uobj;
+ goto err_fd;
}
uobj->object = filp;
uobj->id = new_fd;
return uobj;
-err_uobj:
- uverbs_uobject_put(uobj);
err_fd:
put_unused_fd(new_fd);
return uobj;
@@ -679,7 +685,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode)
{
assert_uverbs_usecnt(uobj, mode);
- uobj->uapi_object->type_class->lookup_put(uobj, mode);
/*
* In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See
@@ -696,6 +701,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
break;
}
+ uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj);
}
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 06e5b6787443..557efbf29197 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -391,13 +391,13 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
if (!ret)
return -ENOMEM;
sg_cnt = ret;
if (prot_sg_cnt) {
- ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir);
+ ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir);
if (!ret) {
ret = -ENOMEM;
goto out_unmap_sg;
@@ -466,9 +466,9 @@ out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
if (prot_sg_cnt)
- ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
out_unmap_sg:
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
@@ -628,9 +628,9 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
kfree(ctx->reg);
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
if (prot_sg_cnt)
- ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 30d4c126a2db..74e0058fcf9e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -101,7 +101,7 @@ struct ib_sa_port {
struct ib_sa_device {
int start_port, end_port;
struct ib_event_handler event_handler;
- struct ib_sa_port port[0];
+ struct ib_sa_port port[];
};
struct ib_sa_query {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 0274e9b704be..16b6cf57fa85 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -85,12 +85,13 @@ struct ucma_file {
struct ucma_context {
u32 id;
struct completion comp;
- atomic_t ref;
+ refcount_t ref;
int events_reported;
int backlog;
struct ucma_file *file;
struct rdma_cm_id *cm_id;
+ struct mutex mutex;
u64 uid;
struct list_head list;
@@ -152,7 +153,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
if (ctx->closing)
ctx = ERR_PTR(-EIO);
else
- atomic_inc(&ctx->ref);
+ refcount_inc(&ctx->ref);
}
xa_unlock(&ctx_table);
return ctx;
@@ -160,7 +161,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
static void ucma_put_ctx(struct ucma_context *ctx)
{
- if (atomic_dec_and_test(&ctx->ref))
+ if (refcount_dec_and_test(&ctx->ref))
complete(&ctx->comp);
}
@@ -212,10 +213,11 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id);
- atomic_set(&ctx->ref, 1);
+ refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp);
INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file;
+ mutex_init(&ctx->mutex);
if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
goto error;
@@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
}
events_reported = ctx->events_reported;
+ mutex_destroy(&ctx->mutex);
kfree(ctx);
return events_reported;
}
@@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ mutex_unlock(&ctx->mutex);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
memset(&resp, 0, sizeof resp);
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
@@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
out:
+ mutex_unlock(&ctx->mutex);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
switch (cmd.option) {
case RDMA_USER_CM_QUERY_ADDR:
ret = ucma_query_addr(ctx, response, out_len);
@@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file,
ret = -ENOSYS;
break;
}
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1045,7 +1063,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
dst->retry_count = src->retry_count;
dst->rnr_retry_count = src->rnr_retry_count;
dst->srq = src->srq;
- dst->qp_num = src->qp_num;
+ dst->qp_num = src->qp_num & 0xFFFFFF;
dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
}
@@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ mutex_lock(&ctx->mutex);
ret = rdma_connect(ctx->cm_id, &conn_param);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
cmd.backlog : max_backlog;
+ mutex_lock(&ctx->mutex);
ret = rdma_listen(ctx->cm_id, ctx->backlog);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&file->mut);
+ mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
+ mutex_unlock(&ctx->mutex);
if (!ret)
ctx->uid = cmd.uid;
mutex_unlock(&file->mut);
- } else
+ } else {
+ mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, NULL, NULL);
-
+ mutex_unlock(&ctx->mutex);
+ }
ucma_put_ctx(ctx);
return ret;
}
@@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_disconnect(ctx->cm_id);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
+ mutex_lock(&ctx->mutex);
ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
+ mutex_unlock(&ctx->mutex);
if (ret)
goto out;
@@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path);
+ mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &opa);
+ mutex_unlock(&ctx->mutex);
} else {
+ mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
+ mutex_unlock(&ctx->mutex);
}
if (ret)
return ret;
@@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
switch (level) {
case RDMA_OPTION_ID:
+ mutex_lock(&ctx->mutex);
ret = ucma_set_option_id(ctx, optname, optval, optlen);
+ mutex_unlock(&ctx->mutex);
break;
case RDMA_OPTION_IB:
ret = ucma_set_option_ib(ctx, optname, optval, optlen);
@@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
if (ctx->cm_id->device)
ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
mc->join_state = join_state;
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
+ mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
+ mutex_unlock(&ctx->mutex);
if (ret)
goto err2;
@@ -1502,7 +1544,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = ERR_PTR(-ENOENT);
else if (mc->ctx->file != file)
mc = ERR_PTR(-EINVAL);
- else if (!atomic_inc_not_zero(&mc->ctx->ref))
+ else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
else
__xa_erase(&multicast_table, mc->id);
@@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
goto out;
}
+ mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
+ mutex_unlock(&mc->ctx->mutex);
+
mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc);
list_del(&mc->list);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 06b6125b5ae1..82455a1392f1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -197,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
unsigned long lock_limit;
unsigned long new_pinned;
unsigned long cur_base;
+ unsigned long dma_attr = 0;
struct mm_struct *mm;
unsigned long npages;
int ret;
@@ -278,10 +279,12 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg(device,
- umem->sg_head.sgl,
- umem->sg_nents,
- DMA_BIDIRECTIONAL);
+ if (access & IB_ACCESS_RELAXED_ORDERING)
+ dma_attr |= DMA_ATTR_WEAK_ORDERING;
+
+ umem->nmap =
+ ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
+ DMA_BIDIRECTIONAL, dma_attr);
if (!umem->nmap) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 7df71983212d..3d189c7ee59e 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -219,6 +219,7 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+int uverbs_async_event_release(struct inode *inode, struct file *filp);
int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
@@ -227,6 +228,9 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj);
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
void ib_uverbs_release_file(struct kref *ref);
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+ __u64 element, __u64 event,
+ struct list_head *obj_list, u32 *counter);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2d4083bf4a04..1bab8de14757 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -346,7 +346,7 @@ const struct file_operations uverbs_async_event_fops = {
.owner = THIS_MODULE,
.read = ib_uverbs_async_event_read,
.poll = ib_uverbs_async_event_poll,
- .release = uverbs_uobject_fd_release,
+ .release = uverbs_async_event_release,
.fasync = ib_uverbs_async_event_fasync,
.llseek = no_llseek,
};
@@ -386,10 +386,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
}
-static void
-ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
- __u64 element, __u64 event, struct list_head *obj_list,
- u32 *counter)
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+ __u64 element, __u64 event,
+ struct list_head *obj_list, u32 *counter)
{
struct ib_uverbs_event *entry;
unsigned long flags;
@@ -820,6 +819,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
ret = mmget_not_zero(mm);
if (!ret) {
list_del_init(&priv->list);
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
mm = NULL;
continue;
}
@@ -1183,9 +1186,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
*/
mutex_unlock(&uverbs_dev->lists_mutex);
- ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
- IB_EVENT_DEVICE_FATAL, NULL, NULL);
-
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
kref_put(&file->ref, ib_uverbs_release_file);
diff --git a/drivers/infiniband/core/uverbs_std_types_async_fd.c b/drivers/infiniband/core/uverbs_std_types_async_fd.c
index 82ec0806b34b..61899eaf1f91 100644
--- a/drivers/infiniband/core/uverbs_std_types_async_fd.c
+++ b/drivers/infiniband/core/uverbs_std_types_async_fd.c
@@ -26,10 +26,38 @@ static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
container_of(uobj, struct ib_uverbs_async_event_file, uobj);
ib_unregister_event_handler(&event_file->event_handler);
- ib_uverbs_free_event_queue(&event_file->ev_queue);
+
+ if (why == RDMA_REMOVE_DRIVER_REMOVE)
+ ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL,
+ NULL, NULL);
return 0;
}
+int uverbs_async_event_release(struct inode *inode, struct file *filp)
+{
+ struct ib_uverbs_async_event_file *event_file;
+ struct ib_uobject *uobj = filp->private_data;
+ int ret;
+
+ if (!uobj)
+ return uverbs_uobject_fd_release(inode, filp);
+
+ event_file =
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+ /*
+ * The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after
+ * disassociation, so cleaning the event list must only happen after
+ * release. The user knows it has reached the end of the event stream
+ * when it sees IB_EVENT_DEVICE_FATAL.
+ */
+ uverbs_uobject_get(uobj);
+ ret = uverbs_uobject_fd_release(inode, filp);
+ ib_uverbs_free_event_queue(&event_file->ev_queue);
+ uverbs_uobject_put(uobj);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_ASYNC_EVENT_ALLOC,
UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e62c9dfc7837..56a71337112c 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -54,8 +54,6 @@
#include "core_priv.h"
#include <trace/events/rdma_core.h>
-#include <trace/events/rdma_core.h>
-
static int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr);
@@ -1127,8 +1125,7 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
EXPORT_SYMBOL(ib_open_qp);
static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+ struct ib_qp_init_attr *qp_init_attr)
{
struct ib_qp *real_qp = qp;
@@ -1150,9 +1147,18 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
return qp;
}
-struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+/**
+ * ib_create_qp - Creates a kernel QP associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the QP.
+ * @qp_init_attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
+ *
+ * NOTE: for user qp use ib_create_qp_user with valid udata!
+ */
+struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr)
{
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp;
@@ -1187,7 +1193,7 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp =
- create_xrc_qp_user(qp, qp_init_attr, udata);
+ create_xrc_qp_user(qp, qp_init_attr);
if (IS_ERR(xrc_qp)) {
ret = PTR_ERR(xrc_qp);
@@ -1243,7 +1249,7 @@ err:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_create_qp_user);
+EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 725b2350e349..a300588634c5 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -89,6 +89,15 @@
#define BNXT_RE_DEFAULT_ACK_DELAY 16
+struct bnxt_re_ring_attr {
+ dma_addr_t *dma_arr;
+ int pages;
+ int type;
+ u32 depth;
+ u32 lrid; /* Logical ring id */
+ u8 mode;
+};
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
@@ -104,6 +113,14 @@ struct bnxt_re_sqp_entries {
struct bnxt_re_qp *qp1_qp;
};
+#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
+struct bnxt_re_gsi_context {
+ struct bnxt_re_qp *gsi_qp;
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_sqp_entries *sqp_tbl;
+};
+
#define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
@@ -115,7 +132,6 @@ struct bnxt_re_dev {
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
-#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
#define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
@@ -125,7 +141,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
- struct bnxt_qplib_chip_ctx chip_ctx;
+ struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
@@ -160,15 +176,11 @@ struct bnxt_re_dev {
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
- atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
/* QP for for handling QP1 packets */
- u32 sqp_id;
- struct bnxt_re_qp *qp1_sqp;
- struct bnxt_re_ah *sqp_ah;
- struct bnxt_re_sqp_entries sqp_tbl[1024];
+ struct bnxt_re_gsi_context gsi_ctx;
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 52b6a4d85460..95f6d493d1b9 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -312,9 +312,9 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
*/
if (ctx->idx == 0 &&
rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
- ctx->refcnt == 1 && rdev->qp1_sqp) {
- dev_dbg(rdev_to_dev(rdev),
- "Trying to delete GID0 while QP1 is alive\n");
+ ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
+ ibdev_dbg(&rdev->ibdev,
+ "Trying to delete GID0 while QP1 is alive\n");
return -EFAULT;
}
ctx->refcnt--;
@@ -322,8 +322,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
vlan_id, true);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to remove GID: %#x", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to remove GID: %#x", rc);
} else {
ctx_tbl = sgid_tbl->ctx;
ctx_tbl[ctx->idx] = NULL;
@@ -360,7 +360,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
}
if (rc < 0) {
- dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
+ ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
return rc;
}
@@ -423,12 +423,12 @@ static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
wqe.bind.r_key = fence->bind_rkey;
fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
- dev_dbg(rdev_to_dev(qp->rdev),
- "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+ ibdev_dbg(&qp->rdev->ibdev,
+ "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
wqe.bind.r_key, qp->qplib_qp.id, pd);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
if (rc) {
- dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
+ ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
return rc;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
@@ -479,7 +479,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
+ ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
rc = -EIO;
fence->dma_addr = 0;
goto fail;
@@ -499,7 +499,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
+ ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
goto fail;
}
@@ -511,7 +511,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
+ ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
goto fail;
}
mr->ib_mr.rkey = mr->qplib_mr.rkey;
@@ -519,8 +519,8 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
/* Create a fence MW only for kernel consumers */
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
if (IS_ERR(mw)) {
- dev_err(rdev_to_dev(rdev),
- "Failed to create fence-MW for PD: %p\n", pd);
+ ibdev_err(&rdev->ibdev,
+ "Failed to create fence-MW for PD: %p\n", pd);
rc = PTR_ERR(mw);
goto fail;
}
@@ -558,7 +558,7 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
pd->rdev = rdev;
if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
rc = -ENOMEM;
goto fail;
}
@@ -585,16 +585,16 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to copy user response\n");
+ ibdev_err(&rdev->ibdev,
+ "Failed to copy user response\n");
goto dbfail;
}
}
if (!udata)
if (bnxt_re_create_fence_mr(pd))
- dev_warn(rdev_to_dev(rdev),
- "Failed to create Fence-MR\n");
+ ibdev_warn(&rdev->ibdev,
+ "Failed to create Fence-MR\n");
return 0;
dbfail:
bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -639,12 +639,13 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
const struct ib_gid_attr *sgid_attr;
+ struct bnxt_re_gid_ctx *ctx;
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
u8 nw_type;
int rc;
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
- dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
+ ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
return -EINVAL;
}
@@ -654,19 +655,18 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
/* Supply the configuration for the HW */
memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
sizeof(union ib_gid));
- /*
- * If RoCE V2 is enabled, stack will have two entries for
- * each GID entry. Avoiding this duplicte entry in HW. Dividing
- * the GID index by 2 for RoCE V2
+ sgid_attr = grh->sgid_attr;
+ /* Get the HW context of the GID. The reference
+ * of GID table entry is already taken by the caller.
*/
- ah->qplib_ah.sgid_index = grh->sgid_index / 2;
+ ctx = rdma_read_gid_hw_context(sgid_attr);
+ ah->qplib_ah.sgid_index = ctx->idx;
ah->qplib_ah.host_sgid_index = grh->sgid_index;
ah->qplib_ah.traffic_class = grh->traffic_class;
ah->qplib_ah.flow_label = grh->flow_label;
ah->qplib_ah.hop_limit = grh->hop_limit;
ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
- sgid_attr = grh->sgid_attr;
/* Get network header type for this GID */
nw_type = rdma_gid_attr_network_type(sgid_attr);
ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
@@ -675,7 +675,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
!(flags & RDMA_CREATE_AH_SLEEPABLE));
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
return rc;
}
@@ -742,6 +742,49 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
+static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+{
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_dev *rdev;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
+ bnxt_qplib_destroy_ah(&rdev->qplib_res,
+ &gsi_sah->qplib_ah,
+ true);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
+ goto fail;
+ }
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+
+ kfree(rdev->gsi_ctx.sqp_tbl);
+ kfree(gsi_sah);
+ kfree(gsi_sqp);
+ rdev->gsi_ctx.gsi_sqp = NULL;
+ rdev->gsi_ctx.gsi_sah = NULL;
+ rdev->gsi_ctx.sqp_tbl = NULL;
+
+ return 0;
+fail:
+ return rc;
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
@@ -750,10 +793,16 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
+ mutex_lock(&rdev->qp_lock);
+ list_del(&qp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
+
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
return rc;
}
@@ -765,40 +814,19 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
- bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
- false);
-
- bnxt_qplib_clean_qp(&qp->qplib_qp);
- rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to destroy Shadow QP");
- return rc;
- }
- bnxt_qplib_free_qp_res(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- mutex_lock(&rdev->qp_lock);
- list_del(&rdev->qp1_sqp->list);
- atomic_dec(&rdev->qp_count);
- mutex_unlock(&rdev->qp_lock);
-
- kfree(rdev->sqp_ah);
- kfree(rdev->qp1_sqp);
- rdev->qp1_sqp = NULL;
- rdev->sqp_ah = NULL;
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
+ rc = bnxt_re_destroy_gsi_sqp(qp);
+ if (rc)
+ goto sh_fail;
}
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
- mutex_lock(&rdev->qp_lock);
- list_del(&qp->list);
- atomic_dec(&rdev->qp_count);
- mutex_unlock(&rdev->qp_lock);
kfree(qp);
return 0;
+sh_fail:
+ return rc;
}
static u8 __from_ib_qp_type(enum ib_qp_type type)
@@ -831,7 +859,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
- psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
bytes += (qplib_qp->sq.max_wqe * psn_sz);
@@ -843,9 +871,11 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
return PTR_ERR(umem);
qp->sumem = umem;
- qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
+ qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
qplib_qp->sq.sg_info.nmap = umem->nmap;
+ qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
+ qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) {
@@ -856,9 +886,11 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
- qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
+ qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
qplib_qp->rq.sg_info.nmap = umem->nmap;
+ qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
+ qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
}
qplib_qp->dpi = &cntx->dpi;
@@ -906,8 +938,8 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate HW AH for Shadow QP");
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate HW AH for Shadow QP");
goto fail;
}
@@ -948,6 +980,8 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
+ qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
+ qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq;
@@ -956,6 +990,8 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.rq.q_full_delta = 1;
+ qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
+ qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
qp->qplib_qp.mtu = qp1_qp->mtu;
@@ -967,8 +1003,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
if (rc)
goto fail;
- rdev->sqp_id = qp->qplib_qp.id;
-
spin_lock_init(&qp->sq_lock);
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
@@ -981,205 +1015,378 @@ fail:
return NULL;
}
-struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
{
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp;
- struct bnxt_re_cq *cq;
- struct bnxt_re_srq *srq;
- int rc, entries;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
- if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
- return ERR_PTR(-EINVAL);
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ if (init_attr->srq) {
+ struct bnxt_re_srq *srq;
- qp->rdev = rdev;
- ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
- qp->qplib_qp.pd = &pd->qplib_pd;
- qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
- qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
+ srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
+ if (!srq) {
+ ibdev_err(&rdev->ibdev, "SRQ not found");
+ return -EINVAL;
+ }
+ qplqp->srq = &srq->qplib_srq;
+ qplqp->rq.max_wqe = 0;
+ } else {
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty.
+ */
+ entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
+ qplqp->rq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
- qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
- if (qp->qplib_qp.type == IB_QPT_MAX) {
- dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
- qp->qplib_qp.type);
- rc = -EINVAL;
- goto fail;
+ qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
+ init_attr->cap.max_recv_wr;
+ qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ }
+ qplqp->rq.sg_info.pgsize = PAGE_SIZE;
+ qplqp->rq.sg_info.pgshft = PAGE_SHIFT;
+
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ qplqp->rq.max_sge = 6;
+}
+
+static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->sq.max_sge = init_attr->cap.max_send_sge;
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ /*
+ * Change the SQ depth if user has requested minimum using
+ * configfs. Only supported for kernel consumers
+ */
+ entries = init_attr->cap.max_send_wr;
+ /* Allocate 128 + 1 more than what's provided */
+ entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qplqp->sq.q_full_delta -= 1;
+ qplqp->sq.sg_info.pgsize = PAGE_SIZE;
+ qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
+}
+
+static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
+ init_attr->cap.max_send_wr;
+ qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+}
+
+static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ int qptype;
+
+ chip_ctx = rdev->chip_ctx;
+
+ qptype = __from_ib_qp_type(init_attr->qp_type);
+ if (qptype == IB_QPT_MAX) {
+ ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
+ qptype = -EOPNOTSUPP;
+ goto out;
}
- qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
- qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
- IB_SIGNAL_ALL_WR) ? true : false);
+ if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
+ init_attr->qp_type == IB_QPT_GSI)
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
+out:
+ return qptype;
+}
- qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
+static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc = 0, qptype;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ /* Setup misc params */
+ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+ qplqp->pd = &pd->qplib_pd;
+ qplqp->qp_handle = (u64)qplqp;
+ qplqp->max_inline_data = init_attr->cap.max_inline_data;
+ qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
+ true : false);
+ qptype = bnxt_re_init_qp_type(rdev, init_attr);
+ if (qptype < 0) {
+ rc = qptype;
+ goto out;
+ }
+ qplqp->type = (u8)qptype;
+
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
+ qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
+ }
+ qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
+ if (init_attr->create_flags)
+ ibdev_dbg(&rdev->ibdev,
+ "QP create flags 0x%x not supported",
+ init_attr->create_flags);
- if (qp_init_attr->send_cq) {
- cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
- ib_cq);
+ /* Setup CQs */
+ if (init_attr->send_cq) {
+ cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
- dev_err(rdev_to_dev(rdev), "Send CQ not found");
+ ibdev_err(&rdev->ibdev, "Send CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.scq = &cq->qplib_cq;
+ qplqp->scq = &cq->qplib_cq;
qp->scq = cq;
}
- if (qp_init_attr->recv_cq) {
- cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
- ib_cq);
+ if (init_attr->recv_cq) {
+ cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
- dev_err(rdev_to_dev(rdev), "Receive CQ not found");
+ ibdev_err(&rdev->ibdev, "Receive CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.rcq = &cq->qplib_cq;
+ qplqp->rcq = &cq->qplib_cq;
qp->rcq = cq;
}
- if (qp_init_attr->srq) {
- srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
- ib_srq);
- if (!srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not found");
- rc = -EINVAL;
- goto fail;
- }
- qp->qplib_qp.srq = &srq->qplib_srq;
- qp->qplib_qp.rq.max_wqe = 0;
- } else {
- /* Allocate 1 more than what's provided so posting max doesn't
- * mean empty
- */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
- qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
+ /* Setup RQ/SRQ */
+ rc = bnxt_re_init_rq_attr(qp, init_attr);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_rq_attr(qp);
- qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
- qp_init_attr->cap.max_recv_wr;
+ /* Setup SQ */
+ bnxt_re_init_sq_attr(qp, init_attr, udata);
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
- qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
+ if (udata) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+out:
+ return rc;
+}
+
+static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
+ struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_qp *sqp;
+ struct bnxt_re_ah *sah;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ /* Create a shadow QP to handle the QP1 traffic */
+ sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ GFP_KERNEL);
+ if (!sqp_tbl)
+ return -ENOMEM;
+ rdev->gsi_ctx.sqp_tbl = sqp_tbl;
+
+ sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
+ if (!sqp) {
+ rc = -ENODEV;
+ ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sqp = sqp;
+
+ sqp->rcq = qp->rcq;
+ sqp->scq = qp->scq;
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
+ &qp->qplib_qp);
+ if (!sah) {
+ bnxt_qplib_destroy_qp(&rdev->qplib_res,
+ &sqp->qplib_qp);
+ rc = -ENODEV;
+ ibdev_err(&rdev->ibdev,
+ "Failed to create AH entry for ShadowQP");
+ goto out;
}
+ rdev->gsi_ctx.gsi_sah = sah;
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ return 0;
+out:
+ kfree(sqp_tbl);
+ return rc;
+}
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
- /* Allocate 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
- qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
- qp_init_attr->cap.max_send_wr;
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- qp->qplib_qp.sq.max_sge++;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
-
- qp->qplib_qp.rq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
-
- qp->qplib_qp.sq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
- goto fail;
- }
- /* Create a shadow QP to handle the QP1 traffic */
- rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->qp1_sqp) {
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create Shadow QP for QP1");
- goto qp_destroy;
- }
- rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->sqp_ah) {
- bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create AH entry for ShadowQP");
- goto qp_destroy;
- }
+static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_qp *qplqp;
+ int rc = 0;
- } else {
- /* Allocate 128 + 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
- /*
- * Reserving one slot for Phantom WQE. Application can
- * post one extra entry in this case. But allowing this to avoid
- * unexpected Queue full condition
- */
+ qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.sq.q_full_delta -= 1;
+ rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
+ goto out;
+ }
- qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
- qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
- if (udata) {
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
- if (rc)
- goto fail;
- } else {
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- }
+ rc = bnxt_re_create_shadow_gsi(qp, pd);
+out:
+ return rc;
+}
+
+static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_qplib_dev_attr *dev_attr)
+{
+ bool rc = true;
+
+ if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
+ ibdev_err(&rdev->ibdev,
+ "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
+ init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_inline_data,
+ dev_attr->max_inline_data);
+ rc = false;
+ }
+ return rc;
+}
+struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_qp *qp;
+ int rc;
+
+ rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
+ if (!rc) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ qp->rdev = rdev;
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ if (rc)
+ goto fail;
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
+ rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
+ if (rc == -ENODEV)
+ goto qp_destroy;
+ if (rc)
+ goto fail;
+ } else {
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to create HW QP");
goto free_umem;
}
+ if (udata) {
+ struct bnxt_re_qp_resp resp;
+
+ resp.qpid = qp->qplib_qp.id;
+ resp.rsvd = 0;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
+ goto qp_destroy;
+ }
+ }
}
qp->ib_qp.qp_num = qp->qplib_qp.id;
+ if (qp_init_attr->qp_type == IB_QPT_GSI)
+ rdev->gsi_ctx.gsi_qp = qp;
spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
-
- if (udata) {
- struct bnxt_re_qp_resp resp;
-
- resp.qpid = qp->ib_qp.qp_num;
- resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
- goto qp_destroy;
- }
- }
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
- atomic_inc(&rdev->qp_count);
mutex_unlock(&rdev->qp_lock);
+ atomic_inc(&rdev->qp_count);
return &qp->ib_qp;
qp_destroy:
@@ -1189,6 +1396,7 @@ free_umem:
ib_umem_release(qp->sumem);
fail:
kfree(qp);
+exit:
return ERR_PTR(rc);
}
@@ -1311,9 +1519,11 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
return PTR_ERR(umem);
srq->umem = umem;
- qplib_srq->sg_info.sglist = umem->sg_head.sgl;
+ qplib_srq->sg_info.sghead = umem->sg_head.sgl;
qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
qplib_srq->sg_info.nmap = umem->nmap;
+ qplib_srq->sg_info.pgsize = PAGE_SIZE;
+ qplib_srq->sg_info.pgshft = PAGE_SHIFT;
qplib_srq->srq_handle = ureq.srq_handle;
qplib_srq->dpi = &cntx->dpi;
@@ -1334,7 +1544,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
int rc, entries;
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
- dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
+ ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
rc = -EINVAL;
goto exit;
}
@@ -1369,7 +1579,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
+ ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
goto fail;
}
@@ -1379,7 +1589,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
resp.srqid = srq->qplib_srq.id;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
- dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
+ ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
bnxt_qplib_destroy_srq(&rdev->qplib_res,
&srq->qplib_srq);
goto fail;
@@ -1418,7 +1628,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
srq->qplib_srq.threshold = srq_attr->srq_limit;
rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
+ ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
return rc;
}
/* On success, update the shadow */
@@ -1426,8 +1636,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* No need to Build and send response back to udata */
break;
default:
- dev_err(rdev_to_dev(rdev),
- "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ ibdev_err(&rdev->ibdev,
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
return 0;
@@ -1445,7 +1655,7 @@ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
tsrq.qplib_srq.id = srq->qplib_srq.id;
rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
+ ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
return rc;
}
srq_attr->max_wr = srq->qplib_srq.max_wqe;
@@ -1487,7 +1697,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
{
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
int rc = 0;
if (qp_attr_mask & IB_QP_STATE) {
@@ -1511,8 +1721,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc)
- dev_err(rdev_to_dev(rdev),
- "Failed to modify Shadow QP for QP1");
+ ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
return rc;
}
@@ -1533,15 +1742,15 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
new_qp_state = qp_attr->qp_state;
if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
ib_qp->qp_type, qp_attr_mask)) {
- dev_err(rdev_to_dev(rdev),
- "Invalid attribute mask: %#x specified ",
- qp_attr_mask);
- dev_err(rdev_to_dev(rdev),
- "for qpn: %#x type: %#x",
- ib_qp->qp_num, ib_qp->qp_type);
- dev_err(rdev_to_dev(rdev),
- "curr_qp_state=0x%x, new_qp_state=0x%x\n",
- curr_qp_state, new_qp_state);
+ ibdev_err(&rdev->ibdev,
+ "Invalid attribute mask: %#x specified ",
+ qp_attr_mask);
+ ibdev_err(&rdev->ibdev,
+ "for qpn: %#x type: %#x",
+ ib_qp->qp_num, ib_qp->qp_type);
+ ibdev_err(&rdev->ibdev,
+ "curr_qp_state=0x%x, new_qp_state=0x%x\n",
+ curr_qp_state, new_qp_state);
return -EINVAL;
}
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
@@ -1549,18 +1758,16 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
- dev_dbg(rdev_to_dev(rdev),
- "Move QP = %p to flush list\n",
- qp);
+ ibdev_dbg(&rdev->ibdev,
+ "Move QP = %p to flush list\n", qp);
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
}
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
- dev_dbg(rdev_to_dev(rdev),
- "Move QP = %p out of flush list\n",
- qp);
+ ibdev_dbg(&rdev->ibdev,
+ "Move QP = %p out of flush list\n", qp);
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
@@ -1593,6 +1800,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
const struct ib_global_route *grh =
rdma_ah_read_grh(&qp_attr->ah_attr);
const struct ib_gid_attr *sgid_attr;
+ struct bnxt_re_gid_ctx *ctx;
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
@@ -1604,11 +1812,12 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
sizeof(qp->qplib_qp.ah.dgid.data));
qp->qplib_qp.ah.flow_label = grh->flow_label;
- /* If RoCE V2 is enabled, stack will have two entries for
- * each GID entry. Avoiding this duplicte entry in HW. Dividing
- * the GID index by 2 for RoCE V2
+ sgid_attr = grh->sgid_attr;
+ /* Get the HW context of the GID. The reference
+ * of GID table entry is already taken by the caller.
*/
- qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
+ ctx = rdma_read_gid_hw_context(sgid_attr);
+ qp->qplib_qp.ah.sgid_index = ctx->idx;
qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
qp->qplib_qp.ah.hop_limit = grh->hop_limit;
qp->qplib_qp.ah.traffic_class = grh->traffic_class;
@@ -1616,7 +1825,6 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
ether_addr_copy(qp->qplib_qp.ah.dmac,
qp_attr->ah_attr.roce.dmac);
- sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
&qp->qplib_qp.smac[0]);
if (rc)
@@ -1690,10 +1898,10 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (qp_attr->max_dest_rd_atomic >
dev_attr->max_qp_init_rd_atom) {
- dev_err(rdev_to_dev(rdev),
- "max_dest_rd_atomic requested%d is > dev_max%d",
- qp_attr->max_dest_rd_atomic,
- dev_attr->max_qp_init_rd_atom);
+ ibdev_err(&rdev->ibdev,
+ "max_dest_rd_atomic requested%d is > dev_max%d",
+ qp_attr->max_dest_rd_atomic,
+ dev_attr->max_qp_init_rd_atom);
return -EINVAL;
}
@@ -1714,8 +1922,8 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
(qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
(qp_attr->cap.max_inline_data >=
dev_attr->max_inline_data)) {
- dev_err(rdev_to_dev(rdev),
- "Create QP failed - max exceeded");
+ ibdev_err(&rdev->ibdev,
+ "Create QP failed - max exceeded");
return -EINVAL;
}
entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
@@ -1748,10 +1956,10 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
return rc;
}
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
return rc;
}
@@ -1773,7 +1981,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to query HW QP");
goto out;
}
qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
@@ -1978,7 +2186,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
wqe->num_sge++;
} else {
- dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
+ ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
rc = -ENOMEM;
}
return rc;
@@ -1995,9 +2203,12 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
+ struct bnxt_re_sqp_entries *sqp_entry;
struct bnxt_qplib_sge ref, sge;
+ struct bnxt_re_dev *rdev;
u32 rq_prod_index;
- struct bnxt_re_sqp_entries *sqp_entry;
+
+ rdev = qp->rdev;
rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
@@ -2012,7 +2223,7 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
ref.lkey = wqe->sg_list[0].lkey;
ref.size = wqe->sg_list[0].size;
- sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
/* SGE 1 */
wqe->sg_list[0].addr = sge.addr;
@@ -2164,7 +2375,7 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
wqe->frmr.page_list = mr->pages;
wqe->frmr.page_list_len = mr->npages;
- wqe->frmr.levels = qplib_frpl->hwq.level + 1;
+ wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
/* Need unconditional fence for reg_mr
@@ -2211,8 +2422,8 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
if ((sge_len + wqe->inline_len) >
BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
- dev_err(rdev_to_dev(rdev),
- "Inline data size requested > supported value");
+ ibdev_err(&rdev->ibdev,
+ "Inline data size requested > supported value");
return -EINVAL;
}
sge_len = wr->sg_list[i].length;
@@ -2259,21 +2470,18 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
const struct ib_send_wr *wr)
{
- struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0;
unsigned long flags;
spin_lock_irqsave(&qp->sq_lock, flags);
- memset(&wqe, 0, sizeof(wqe));
while (wr) {
- /* House keeping */
- memset(&wqe, 0, sizeof(wqe));
+ struct bnxt_qplib_swqe wqe = {};
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
- dev_err(rdev_to_dev(rdev),
- "Limit exceeded for Send SGEs");
+ ibdev_err(&rdev->ibdev,
+ "Limit exceeded for Send SGEs");
rc = -EINVAL;
goto bad;
}
@@ -2292,9 +2500,9 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
bad:
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Post send failed opcode = %#x rc = %d",
- wr->opcode, rc);
+ ibdev_err(&rdev->ibdev,
+ "Post send failed opcode = %#x rc = %d",
+ wr->opcode, rc);
break;
}
wr = wr->next;
@@ -2321,8 +2529,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
- dev_err(rdev_to_dev(qp->rdev),
- "Limit exceeded for Send SGEs");
+ ibdev_err(&qp->rdev->ibdev,
+ "Limit exceeded for Send SGEs");
rc = -EINVAL;
goto bad;
}
@@ -2367,8 +2575,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = bnxt_re_build_atomic_wqe(wr, &wqe);
break;
case IB_WR_RDMA_READ_WITH_INV:
- dev_err(rdev_to_dev(qp->rdev),
- "RDMA Read with Invalidate is not supported");
+ ibdev_err(&qp->rdev->ibdev,
+ "RDMA Read with Invalidate is not supported");
rc = -EINVAL;
goto bad;
case IB_WR_LOCAL_INV:
@@ -2379,8 +2587,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
break;
default:
/* Unsupported WRs */
- dev_err(rdev_to_dev(qp->rdev),
- "WR (%#x) is not supported", wr->opcode);
+ ibdev_err(&qp->rdev->ibdev,
+ "WR (%#x) is not supported", wr->opcode);
rc = -EINVAL;
goto bad;
}
@@ -2388,9 +2596,9 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
bad:
if (rc) {
- dev_err(rdev_to_dev(qp->rdev),
- "post_send failed op:%#x qps = %#x rc = %d\n",
- wr->opcode, qp->qplib_qp.state, rc);
+ ibdev_err(&qp->rdev->ibdev,
+ "post_send failed op:%#x qps = %#x rc = %d\n",
+ wr->opcode, qp->qplib_qp.state, rc);
*bad_wr = wr;
break;
}
@@ -2418,8 +2626,8 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
- dev_err(rdev_to_dev(rdev),
- "Limit exceeded for Receive SGEs");
+ ibdev_err(&rdev->ibdev,
+ "Limit exceeded for Receive SGEs");
rc = -EINVAL;
break;
}
@@ -2455,8 +2663,8 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
- dev_err(rdev_to_dev(qp->rdev),
- "Limit exceeded for Receive SGEs");
+ ibdev_err(&qp->rdev->ibdev,
+ "Limit exceeded for Receive SGEs");
rc = -EINVAL;
*bad_wr = wr;
break;
@@ -2527,7 +2735,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/* Validate CQ fields */
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
- dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
+ ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
return -EINVAL;
}
@@ -2538,6 +2746,8 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
+ cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
+ cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
if (udata) {
struct bnxt_re_cq_req req;
struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
@@ -2554,7 +2764,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = PTR_ERR(cq->umem);
goto fail;
}
- cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
+ cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
cq->qplib_cq.dpi = &uctx->dpi;
@@ -2581,7 +2791,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
+ ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
goto fail;
}
@@ -2601,7 +2811,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
resp.rsvd = 0;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
+ ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
goto c2fail;
}
@@ -2832,12 +3042,13 @@ static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
return rc;
}
-static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
+static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp1_qp->rdev;
+ struct bnxt_re_dev *rdev = gsi_qp->rdev;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
struct ib_send_wr *swr;
struct ib_ud_wr udwr;
struct ib_recv_wr rwr;
@@ -2860,26 +3071,26 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr = &udwr.wr;
tbl_idx = cqe->wr_id;
- rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
- (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
- rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
+ rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
+ (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
+ rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
/* Shadow QP header buffer */
- shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
+ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
/* Store this cqe */
memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
- sqp_entry->qp1_qp = qp1_qp;
+ sqp_entry->qp1_qp = gsi_qp;
/* Find packet type from the cqe */
pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
cqe->raweth_qp1_flags2);
if (pkt_type < 0) {
- dev_err(rdev_to_dev(rdev), "Invalid packet\n");
+ ibdev_err(&rdev->ibdev, "Invalid packet\n");
return -EINVAL;
}
@@ -2926,10 +3137,10 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
rwr.wr_id = tbl_idx;
rwr.next = NULL;
- rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
+ rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to post Rx buffers to shadow QP");
+ ibdev_err(&rdev->ibdev,
+ "Failed to post Rx buffers to shadow QP");
return -ENOMEM;
}
@@ -2938,13 +3149,13 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr->wr_id = tbl_idx;
swr->opcode = IB_WR_SEND;
swr->next = NULL;
-
- udwr.ah = &rdev->sqp_ah->ib_ah;
- udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
- udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+ udwr.ah = &gsi_sah->ib_ah;
+ udwr.remote_qpn = gsi_sqp->qplib_qp.id;
+ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
/* post data received in the send queue */
- rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
+ rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
return 0;
}
@@ -2998,12 +3209,12 @@ static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
}
-static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
+static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_re_qp *qp1_qp = NULL;
+ struct bnxt_re_dev *rdev = gsi_sqp->rdev;
+ struct bnxt_re_qp *gsi_qp = NULL;
struct bnxt_qplib_cqe *orig_cqe = NULL;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
int nw_type;
@@ -3013,13 +3224,13 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
tbl_idx = cqe->wr_id;
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
- qp1_qp = sqp_entry->qp1_qp;
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ gsi_qp = sqp_entry->qp1_qp;
orig_cqe = &sqp_entry->cqe;
wc->wr_id = sqp_entry->wrid;
wc->byte_len = orig_cqe->length;
- wc->qp = &qp1_qp->ib_qp;
+ wc->qp = &gsi_qp->ib_qp;
wc->ex.imm_data = orig_cqe->immdata;
wc->src_qp = orig_cqe->src_qp;
@@ -3084,11 +3295,11 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
rc = bnxt_re_bind_fence_mw(lib_qp);
if (!rc) {
lib_qp->sq.phantom_wqe_cnt++;
- dev_dbg(&lib_qp->sq.hwq.pdev->dev,
- "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
- lib_qp->id, lib_qp->sq.hwq.prod,
- HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
- lib_qp->sq.phantom_wqe_cnt);
+ ibdev_dbg(&qp->rdev->ibdev,
+ "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+ lib_qp->id, lib_qp->sq.hwq.prod,
+ HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+ lib_qp->sq.phantom_wqe_cnt);
}
spin_unlock_irqrestore(&qp->sq_lock, flags);
@@ -3098,7 +3309,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_qp *qp;
+ struct bnxt_re_qp *qp, *sh_qp;
struct bnxt_qplib_cqe *cqe;
int i, ncqe, budget;
struct bnxt_qplib_q *sq;
@@ -3111,7 +3322,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
budget = min_t(u32, num_entries, cq->max_cql);
num_entries = budget;
if (!cq->cql) {
- dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
+ ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
goto exit;
}
cqe = &cq->cql[0];
@@ -3124,8 +3335,8 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
qp = container_of(lib_qp,
struct bnxt_re_qp, qplib_qp);
if (send_phantom_wqe(qp) == -ENOMEM)
- dev_err(rdev_to_dev(cq->rdev),
- "Phantom failed! Scheduled to send again\n");
+ ibdev_err(&cq->rdev->ibdev,
+ "Phantom failed! Scheduled to send again\n");
else
sq->send_phantom = false;
}
@@ -3149,8 +3360,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
(unsigned long)(cqe->qp_handle),
struct bnxt_re_qp, qplib_qp);
if (!qp) {
- dev_err(rdev_to_dev(cq->rdev),
- "POLL CQ : bad QP handle");
+ ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle");
continue;
}
wc->qp = &qp->ib_qp;
@@ -3162,8 +3372,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
switch (cqe->opcode) {
case CQ_BASE_CQE_TYPE_REQ:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
@@ -3189,7 +3400,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
* stored in the table
*/
tbl_idx = cqe->wr_id;
- sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
wc->wr_id = sqp_entry->wrid;
bnxt_re_process_res_rawqp1_wc(wc, cqe);
break;
@@ -3197,8 +3408,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_rc_wc(wc, cqe);
break;
case CQ_BASE_CQE_TYPE_RES_UD:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
@@ -3213,9 +3425,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_ud_wc(qp, wc, cqe);
break;
default:
- dev_err(rdev_to_dev(cq->rdev),
- "POLL CQ : type 0x%x not handled",
- cqe->opcode);
+ ibdev_err(&cq->rdev->ibdev,
+ "POLL CQ : type 0x%x not handled",
+ cqe->opcode);
continue;
}
wc++;
@@ -3308,7 +3520,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
return rc;
}
@@ -3355,7 +3567,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
int rc;
if (type != IB_MR_TYPE_MEM_REG) {
- dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
+ ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
return ERR_PTR(-EINVAL);
}
if (max_num_sg > MAX_PBL_LVL_1_PGS)
@@ -3385,8 +3597,8 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl, max_num_sg);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate HW FR page list");
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate HW FR page list");
goto fail_mr;
}
@@ -3421,7 +3633,7 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
+ ibdev_err(&rdev->ibdev, "Allocate MW failed!");
goto fail;
}
mw->ib_mw.rkey = mw->qplib_mw.rkey;
@@ -3442,7 +3654,7 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
return rc;
}
@@ -3494,8 +3706,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
- dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
- length, BNXT_RE_MAX_MR_SIZE);
+ ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
+ length, BNXT_RE_MAX_MR_SIZE);
return ERR_PTR(-ENOMEM);
}
@@ -3510,7 +3722,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR");
goto free_mr;
}
/* The fixed portion of the rkey is the same as the lkey */
@@ -3518,7 +3730,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem)) {
- dev_err(rdev_to_dev(rdev), "Failed to get umem");
+ ibdev_err(&rdev->ibdev, "Failed to get umem");
rc = -EFAULT;
goto free_mrw;
}
@@ -3527,7 +3739,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.va = virt_addr;
umem_pgs = ib_umem_page_count(umem);
if (!umem_pgs) {
- dev_err(rdev_to_dev(rdev), "umem is invalid!");
+ ibdev_err(&rdev->ibdev, "umem is invalid!");
rc = -EINVAL;
goto free_umem;
}
@@ -3544,15 +3756,15 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
virt_addr));
if (!bnxt_re_page_size_ok(page_shift)) {
- dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
+ ibdev_err(&rdev->ibdev, "umem page size unsupported!");
rc = -EFAULT;
goto fail;
}
if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
length > BNXT_RE_MAX_MR_SIZE_LOW) {
- dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
- length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
+ ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
+ length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
rc = -EINVAL;
goto fail;
}
@@ -3562,7 +3774,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
umem_pgs, false, 1 << page_shift);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to register user MR");
+ ibdev_err(&rdev->ibdev, "Failed to register user MR");
goto fail;
}
@@ -3595,12 +3807,11 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
u32 chip_met_rev_num = 0;
int rc;
- dev_dbg(rdev_to_dev(rdev), "ABI version requested %u",
- ibdev->ops.uverbs_abi_ver);
+ ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
- dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
- BNXT_RE_ABI_VERSION);
+ ibdev_dbg(ibdev, " is different from the device %d ",
+ BNXT_RE_ABI_VERSION);
return -EPERM;
}
@@ -3614,10 +3825,10 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
spin_lock_init(&uctx->sh_lock);
resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
- chip_met_rev_num = rdev->chip_ctx.chip_num;
- chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) <<
+ chip_met_rev_num = rdev->chip_ctx->chip_num;
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
- chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) <<
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
resp.chip_id0 = chip_met_rev_num;
/* Future extension of chip info */
@@ -3632,7 +3843,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy user context");
+ ibdev_err(ibdev, "Failed to copy user context");
rc = -EFAULT;
goto cfail;
}
@@ -3682,15 +3893,14 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
PAGE_SIZE, vma->vm_page_prot)) {
- dev_err(rdev_to_dev(rdev), "Failed to map DPI");
+ ibdev_err(&rdev->ibdev, "Failed to map DPI");
return -EAGAIN;
}
} else {
pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
if (remap_pfn_range(vma, vma->vm_start,
pfn, PAGE_SIZE, vma->vm_page_prot)) {
- dev_err(rdev_to_dev(rdev),
- "Failed to map shared page");
+ ibdev_err(&rdev->ibdev, "Failed to map shared page");
return -EAGAIN;
}
}
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 793c97251588..b12fbc857f94 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -78,26 +78,43 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
/* Mutex to protect the list of bnxt_re devices added */
static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
+static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
+static void bnxt_re_stop_irq(void *handle);
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+
+ if (!rdev->chip_ctx)
+ return;
+ chip_ctx = rdev->chip_ctx;
+ rdev->chip_ctx = NULL;
rdev->rcfw.res = NULL;
rdev->qplib_res.cctx = NULL;
+ rdev->qplib_res.pdev = NULL;
+ rdev->qplib_res.netdev = NULL;
+ kfree(chip_ctx);
}
static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
struct bnxt *bp;
en_dev = rdev->en_dev;
bp = netdev_priv(en_dev->net);
- rdev->chip_ctx.chip_num = bp->chip_num;
+ chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
+ if (!chip_ctx)
+ return -ENOMEM;
+ chip_ctx->chip_num = bp->chip_num;
+
+ rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */
- rdev->qplib_res.cctx = &rdev->chip_ctx;
+ rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
return 0;
@@ -136,9 +153,9 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
attr->max_srq);
ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
- if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
+ rdev->qplib_ctx.tqm_ctx.qcount[i] =
rdev->dev_attr.tqm_alloc_reqs[i];
}
@@ -185,7 +202,7 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
bnxt_re_limit_pf_res(rdev);
- num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
if (num_vfs)
bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
@@ -208,7 +225,7 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
return;
rdev->num_vfs = num_vfs;
- if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) {
+ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
&rdev->qplib_ctx);
@@ -221,8 +238,10 @@ static void bnxt_re_shutdown(void *p)
if (!rdev)
return;
-
- bnxt_re_ib_unreg(rdev);
+ ASSERT_RTNL();
+ /* Release the MSIx vectors before queuing unregister */
+ bnxt_re_stop_irq(rdev);
+ ib_unregister_device_queued(&rdev->ibdev);
}
static void bnxt_re_stop_irq(void *handle)
@@ -254,7 +273,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
* to f/w will timeout and that will set the
* timeout bit.
*/
- dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+ ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
return;
}
@@ -271,8 +290,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
msix_ent[indx].vector, false);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to reinit NQ index %d\n", indx - 1);
+ ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
+ indx - 1);
}
}
@@ -358,9 +377,9 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
goto done;
}
if (num_msix_got != num_msix_want) {
- dev_warn(rdev_to_dev(rdev),
- "Requested %d MSI-X vectors, got %d\n",
- num_msix_want, num_msix_got);
+ ibdev_warn(&rdev->ibdev,
+ "Requested %d MSI-X vectors, got %d\n",
+ num_msix_want, num_msix_got);
}
rdev->num_msix = num_msix_got;
done:
@@ -407,14 +426,14 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc)
- dev_err(rdev_to_dev(rdev),
- "Failed to free HW ring:%d :%#x", req.ring_id, rc);
+ ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
+ req.ring_id, rc);
return rc;
}
-static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
- int pages, int type, u32 ring_mask,
- u32 map_index, u16 *fw_ring_id)
+static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ring_attr *ring_attr,
+ u16 *fw_ring_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_alloc_input req = {0};
@@ -428,18 +447,18 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0;
- req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
- if (pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
+ if (ring_attr->pages > 1) {
/* Page size is in log2 units */
req.page_size = BNXT_PAGE_SHIFT;
req.page_tbl_depth = 1;
}
req.fbo = 0;
/* Association of ring index with doorbell index and MSIX number */
- req.logical_id = cpu_to_le16(map_index);
- req.length = cpu_to_le32(ring_mask + 1);
- req.ring_type = type;
- req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req.logical_id = cpu_to_le16(ring_attr->lrid);
+ req.length = cpu_to_le32(ring_attr->depth + 1);
+ req.ring_type = ring_attr->type;
+ req.int_mode = ring_attr->mode;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
@@ -468,8 +487,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc)
- dev_err(rdev_to_dev(rdev),
- "Failed to free HW stats context %#x", rc);
+ ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
+ rc);
return rc;
}
@@ -524,17 +543,12 @@ static bool is_bnxt_re_dev(struct net_device *netdev)
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
{
- struct bnxt_re_dev *rdev;
+ struct ib_device *ibdev =
+ ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
+ if (!ibdev)
+ return NULL;
- rcu_read_lock();
- list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) {
- if (rdev->netdev == netdev) {
- rcu_read_unlock();
- return rdev;
- }
- }
- rcu_read_unlock();
- return NULL;
+ return container_of(ibdev, struct bnxt_re_dev, ibdev);
}
static void bnxt_re_dev_unprobe(struct net_device *netdev,
@@ -608,11 +622,6 @@ static const struct attribute_group bnxt_re_dev_attr_group = {
.attrs = bnxt_re_attributes,
};
-static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
-{
- ib_unregister_device(&rdev->ibdev);
-}
-
static const struct ib_device_ops bnxt_re_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_BNXT_RE,
@@ -627,6 +636,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.create_cq = bnxt_re_create_cq,
.create_qp = bnxt_re_create_qp,
.create_srq = bnxt_re_create_srq,
+ .dealloc_driver = bnxt_re_dealloc_driver,
.dealloc_pd = bnxt_re_dealloc_pd,
.dealloc_ucontext = bnxt_re_dealloc_ucontext,
.del_gid = bnxt_re_del_gid,
@@ -723,15 +733,11 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
{
dev_put(rdev->netdev);
rdev->netdev = NULL;
-
mutex_lock(&bnxt_re_dev_lock);
list_del_rcu(&rdev->list);
mutex_unlock(&bnxt_re_dev_lock);
synchronize_rcu();
-
- ib_dealloc_device(&rdev->ibdev);
- /* rdev is gone */
}
static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
@@ -742,8 +748,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
/* Allocate bnxt_re_dev instance here */
rdev = ib_alloc_device(bnxt_re_dev, ibdev);
if (!rdev) {
- dev_err(NULL, "%s: bnxt_re_dev allocation failure!",
- ROCE_DRV_MODULE_NAME);
+ ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!",
+ ROCE_DRV_MODULE_NAME);
return NULL;
}
/* Default values */
@@ -872,8 +878,8 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
int rc = 0;
if (!srq) {
- dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
- ROCE_DRV_MODULE_NAME);
+ ibdev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
+ ROCE_DRV_MODULE_NAME);
rc = -EINVAL;
goto done;
}
@@ -900,8 +906,8 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
qplib_cq);
if (!cq) {
- dev_err(NULL, "%s: CQ is NULL, CQN not handled",
- ROCE_DRV_MODULE_NAME);
+ ibdev_err(NULL, "%s: CQ is NULL, CQN not handled",
+ ROCE_DRV_MODULE_NAME);
return -EINVAL;
}
if (cq->ib_cq.comp_handler) {
@@ -916,7 +922,7 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
- return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) :
rdev->msix_entries[indx].db_offset;
@@ -948,8 +954,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to enable NQ with rc = 0x%x", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to enable NQ with rc = 0x%x", rc);
goto fail;
}
num_vec_enabled++;
@@ -967,10 +973,10 @@ static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
int i;
for (i = 0; i < rdev->num_msix - 1; i++) {
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
- rdev->nq[i].res = NULL;
bnxt_qplib_free_nq(&rdev->nq[i]);
+ rdev->nq[i].res = NULL;
}
}
@@ -991,10 +997,10 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
+ struct bnxt_re_ring_attr rattr = {};
+ struct bnxt_qplib_ctx *qplib_ctx;
int num_vec_created = 0;
- dma_addr_t *pg_map;
int rc = 0, i;
- int pages;
u8 type;
/* Configure and allocate resources for qplib */
@@ -1015,27 +1021,31 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc)
goto dealloc_res;
+ qplib_ctx = &rdev->qplib_ctx;
for (i = 0; i < rdev->num_msix - 1; i++) {
- rdev->nq[i].res = &rdev->qplib_res;
- rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
- BNXT_RE_MAX_SRQC_COUNT + 2;
- rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
+ struct bnxt_qplib_nq *nq;
+
+ nq = &rdev->nq[i];
+ nq->hwq.max_elements = (qplib_ctx->cq_count +
+ qplib_ctx->srqc_count + 2);
+ rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
- i, rc);
+ ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
+ i, rc);
goto free_nq;
}
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr;
- pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count;
- rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
- BNXT_QPLIB_NQE_MAX_CNT - 1,
- rdev->msix_entries[i + 1].ring_idx,
- &rdev->nq[i].ring_id);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
+ rattr.type = type;
+ rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
+ rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate NQ fw id with rc = 0x%x",
- rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate NQ fw id with rc = 0x%x",
+ rc);
bnxt_qplib_free_nq(&rdev->nq[i]);
goto free_nq;
}
@@ -1043,8 +1053,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
}
return 0;
free_nq:
- for (i = num_vec_created; i >= 0; i--) {
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ for (i = num_vec_created - 1; i >= 0; i--) {
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
bnxt_qplib_free_nq(&rdev->nq[i]);
}
@@ -1109,10 +1119,10 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
return rc;
if (resp.queue_cfg_info) {
- dev_warn(rdev_to_dev(rdev),
- "Asymmetric cos queue configuration detected");
- dev_warn(rdev_to_dev(rdev),
- " on device, QoS may not be fully functional\n");
+ ibdev_warn(&rdev->ibdev,
+ "Asymmetric cos queue configuration detected");
+ ibdev_warn(&rdev->ibdev,
+ " on device, QoS may not be fully functional\n");
}
qcfgmap = &resp.pri0_cos_queue_id;
tmp_map = (u8 *)cid_map;
@@ -1125,7 +1135,8 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp)
{
- return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
+ return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
+ (qp == rdev->gsi_ctx.gsi_sqp);
}
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
@@ -1160,12 +1171,13 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
u16 gid_idx, index;
int rc = 0;
- if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
+ if (!ib_device_try_get(&rdev->ibdev))
return 0;
if (!sgid_tbl) {
- dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
- return -EINVAL;
+ ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
+ rc = -EINVAL;
+ goto out;
}
for (index = 0; index < sgid_tbl->active; index++) {
@@ -1185,7 +1197,8 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
rdev->qplib_res.netdev->dev_addr);
}
-
+out:
+ ib_device_put(&rdev->ibdev);
return rc;
}
@@ -1241,7 +1254,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
/* Get cosq id for this priority */
rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
if (rc) {
- dev_warn(rdev_to_dev(rdev), "no cos for p_mask %x\n", prio_map);
+ ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map);
return rc;
}
/* Parse CoS IDs for app priority */
@@ -1250,8 +1263,8 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
/* Config BONO. */
rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
if (rc) {
- dev_warn(rdev_to_dev(rdev), "no tc for cos{%x, %x}\n",
- rdev->cosq[0], rdev->cosq[1]);
+ ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n",
+ rdev->cosq[0], rdev->cosq[1]);
return rc;
}
@@ -1286,8 +1299,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to query HW version, rc = 0x%x", rc);
+ ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
+ rc);
return;
}
rdev->qplib_ctx.hwrm_intf_ver =
@@ -1297,15 +1310,35 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
le16_to_cpu(resp.hwrm_intf_patch);
}
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
+static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
+{
+ int rc = 0;
+ u32 event;
+
+ /* Register ib dev */
+ rc = bnxt_re_register_ib(rdev);
+ if (rc) {
+ pr_err("Failed to register with IB: %#x\n", rc);
+ return rc;
+ }
+ dev_info(rdev_to_dev(rdev), "Device registered successfully");
+ ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+ &rdev->active_width);
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
+
+ event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
+
+ return rc;
+}
+
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
{
u8 type;
int rc;
- if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
- /* Cleanup ib dev */
- bnxt_re_unregister_ib(rdev);
- }
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -1318,28 +1351,28 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to deinitialize RCFW: %#x", rc);
+ ibdev_warn(&rdev->ibdev,
+ "Failed to deinitialize RCFW: %#x", rc);
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
- bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
+ bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
rc = bnxt_re_free_msix(rdev);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to free MSI-X vectors: %#x", rc);
+ ibdev_warn(&rdev->ibdev,
+ "Failed to free MSI-X vectors: %#x", rc);
}
bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
rc = bnxt_re_unregister_netdev(rdev);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to unregister with netdev: %#x", rc);
+ ibdev_warn(&rdev->ibdev,
+ "Failed to unregister with netdev: %#x", rc);
}
}
@@ -1353,31 +1386,29 @@ static void bnxt_re_worker(struct work_struct *work)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
-static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
{
- dma_addr_t *pg_map;
- u32 db_offt, ridx;
- int pages, vid;
- bool locked;
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_re_ring_attr rattr;
+ u32 db_offt;
+ int vid;
u8 type;
int rc;
- /* Acquire rtnl lock through out this function */
- rtnl_lock();
- locked = true;
-
/* Registered a new RoCE device instance to netdev */
+ memset(&rattr, 0, sizeof(rattr));
rc = bnxt_re_register_netdev(rdev);
if (rc) {
rtnl_unlock();
- pr_err("Failed to register with netedev: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with netedev: %#x\n", rc);
return -EINVAL;
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
rc = bnxt_re_setup_chip_ctx(rdev);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to get chip context\n");
+ ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
return -EINVAL;
}
@@ -1386,7 +1417,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
rc = bnxt_re_request_msix(rdev);
if (rc) {
- pr_err("Failed to get MSI-X vectors: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to get MSI-X vectors: %#x\n", rc);
rc = -EINVAL;
goto fail;
}
@@ -1397,31 +1429,36 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
- rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
+ rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
&rdev->qplib_ctx,
BNXT_RE_MAX_QPC_COUNT);
if (rc) {
- pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate RCFW Channel: %#x\n", rc);
goto fail;
}
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
- pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
- ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
- rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
- BNXT_QPLIB_CREQE_MAX_CNT - 1,
- ridx, &rdev->rcfw.creq_ring_id);
+
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ creq = &rdev->rcfw.creq;
+ rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
+ rattr.type = type;
+ rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
+ rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) {
- pr_err("Failed to allocate CREQ: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
- rc = bnxt_qplib_enable_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
+ rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, rdev->is_virtfn,
&bnxt_re_aeq_handler);
if (rc) {
- pr_err("Failed to enable RCFW channel: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
+ rc);
goto free_ring;
}
@@ -1432,24 +1469,27 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
bnxt_re_set_resource_limits(rdev);
- rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
- bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
+ rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
+ bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
if (rc) {
- pr_err("Failed to allocate QPLIB context: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate QPLIB context: %#x\n", rc);
goto disable_rcfw;
}
rc = bnxt_re_net_stats_ctx_alloc(rdev,
rdev->qplib_ctx.stats.dma_map,
&rdev->qplib_ctx.stats.fw_id);
if (rc) {
- pr_err("Failed to allocate stats context: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate stats context: %#x\n", rc);
goto free_ctx;
}
rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
rdev->is_virtfn);
if (rc) {
- pr_err("Failed to initialize RCFW: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to initialize RCFW: %#x\n", rc);
goto free_sctx;
}
set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
@@ -1457,13 +1497,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Resources based on the 'new' device caps */
rc = bnxt_re_alloc_res(rdev);
if (rc) {
- pr_err("Failed to allocate resources: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate resources: %#x\n", rc);
goto fail;
}
set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
rc = bnxt_re_init_res(rdev);
if (rc) {
- pr_err("Failed to initialize resources: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to initialize resources: %#x\n", rc);
goto fail;
}
@@ -1472,46 +1514,28 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
if (!rdev->is_virtfn) {
rc = bnxt_re_setup_qos(rdev);
if (rc)
- pr_info("RoCE priority not yet configured\n");
+ ibdev_info(&rdev->ibdev,
+ "RoCE priority not yet configured\n");
INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
- rtnl_unlock();
- locked = false;
-
- /* Register ib dev */
- rc = bnxt_re_register_ib(rdev);
- if (rc) {
- pr_err("Failed to register with IB: %#x\n", rc);
- goto fail;
- }
- set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
- dev_info(rdev_to_dev(rdev), "Device registered successfully");
- ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
- &rdev->active_width);
- set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
-
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx:
- bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
+ bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
- if (!locked)
- rtnl_lock();
- bnxt_re_ib_unreg(rdev);
- rtnl_unlock();
+ bnxt_re_dev_uninit(rdev);
return rc;
}
@@ -1538,7 +1562,8 @@ static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
en_dev = bnxt_re_dev_probe(netdev);
if (IS_ERR(en_dev)) {
if (en_dev != ERR_PTR(-ENODEV))
- pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME);
+ ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
+ ROCE_DRV_MODULE_NAME);
rc = PTR_ERR(en_dev);
goto exit;
}
@@ -1552,9 +1577,47 @@ exit:
return rc;
}
-static void bnxt_re_remove_one(struct bnxt_re_dev *rdev)
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
{
+ bnxt_re_dev_uninit(rdev);
pci_dev_put(rdev->en_dev->pdev);
+ bnxt_re_dev_unreg(rdev);
+}
+
+static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
+ struct net_device *netdev)
+{
+ int rc;
+
+ rc = bnxt_re_dev_reg(rdev, netdev);
+ if (rc == -ENODEV)
+ return rc;
+ if (rc) {
+ pr_err("Failed to register with the device %s: %#x\n",
+ netdev->name, rc);
+ return rc;
+ }
+
+ pci_dev_get((*rdev)->en_dev->pdev);
+ rc = bnxt_re_dev_init(*rdev);
+ if (rc) {
+ pci_dev_put((*rdev)->en_dev->pdev);
+ bnxt_re_dev_unreg(*rdev);
+ }
+
+ return rc;
+}
+
+static void bnxt_re_dealloc_driver(struct ib_device *ib_dev)
+{
+ struct bnxt_re_dev *rdev =
+ container_of(ib_dev, struct bnxt_re_dev, ibdev);
+
+ dev_info(rdev_to_dev(rdev), "Unregistering Device");
+
+ rtnl_lock();
+ bnxt_re_remove_device(rdev);
+ rtnl_unlock();
}
/* Handle all deferred netevents tasks */
@@ -1567,21 +1630,23 @@ static void bnxt_re_task(struct work_struct *work)
re_work = container_of(work, struct bnxt_re_work, work);
rdev = re_work->rdev;
- if (re_work->event != NETDEV_REGISTER &&
- !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
- return;
-
- switch (re_work->event) {
- case NETDEV_REGISTER:
- rc = bnxt_re_ib_reg(rdev);
+ if (re_work->event == NETDEV_REGISTER) {
+ rc = bnxt_re_ib_init(rdev);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to register with IB: %#x", rc);
- bnxt_re_remove_one(rdev);
- bnxt_re_dev_unreg(rdev);
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with IB: %#x", rc);
+ rtnl_lock();
+ bnxt_re_remove_device(rdev);
+ rtnl_unlock();
goto exit;
}
- break;
+ goto exit;
+ }
+
+ if (!ib_device_try_get(&rdev->ibdev))
+ goto exit;
+
+ switch (re_work->event) {
case NETDEV_UP:
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
IB_EVENT_PORT_ACTIVE);
@@ -1601,17 +1666,12 @@ static void bnxt_re_task(struct work_struct *work)
default:
break;
}
- smp_mb__before_atomic();
- atomic_dec(&rdev->sched_count);
+ ib_device_put(&rdev->ibdev);
exit:
+ put_device(&rdev->ibdev.dev);
kfree(re_work);
}
-static void bnxt_re_init_one(struct bnxt_re_dev *rdev)
-{
- pci_dev_get(rdev->en_dev->pdev);
-}
-
/*
* "Notifier chain callback can be invoked for the same chain from
* different CPUs at the same time".
@@ -1634,6 +1694,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
struct bnxt_re_dev *rdev;
int rc = 0;
bool sch_work = false;
+ bool release = true;
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
@@ -1641,7 +1702,8 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
rdev = bnxt_re_from_netdev(real_dev);
if (!rdev && event != NETDEV_REGISTER)
- goto exit;
+ return NOTIFY_OK;
+
if (real_dev != netdev)
goto exit;
@@ -1649,27 +1711,14 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
case NETDEV_REGISTER:
if (rdev)
break;
- rc = bnxt_re_dev_reg(&rdev, real_dev);
- if (rc == -ENODEV)
- break;
- if (rc) {
- pr_err("Failed to register with the device %s: %#x\n",
- real_dev->name, rc);
- break;
- }
- bnxt_re_init_one(rdev);
- sch_work = true;
+ rc = bnxt_re_add_device(&rdev, real_dev);
+ if (!rc)
+ sch_work = true;
+ release = false;
break;
case NETDEV_UNREGISTER:
- /* netdev notifier will call NETDEV_UNREGISTER again later since
- * we are still holding the reference to the netdev
- */
- if (atomic_read(&rdev->sched_count) > 0)
- goto exit;
- bnxt_re_ib_unreg(rdev);
- bnxt_re_remove_one(rdev);
- bnxt_re_dev_unreg(rdev);
+ ib_unregister_device_queued(&rdev->ibdev);
break;
default:
@@ -1680,17 +1729,19 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
/* Allocate for the deferred task */
re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
if (re_work) {
+ get_device(&rdev->ibdev.dev);
re_work->rdev = rdev;
re_work->event = event;
re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task);
- atomic_inc(&rdev->sched_count);
queue_work(bnxt_re_wq, &re_work->work);
}
}
exit:
+ if (rdev && release)
+ ib_device_put(&rdev->ibdev);
return NOTIFY_DONE;
}
@@ -1726,36 +1777,21 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
- struct bnxt_re_dev *rdev, *next;
- LIST_HEAD(to_be_deleted);
+ struct bnxt_re_dev *rdev;
- mutex_lock(&bnxt_re_dev_lock);
- /* Free all adapter allocated resources */
- if (!list_empty(&bnxt_re_dev_list))
- list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
- mutex_unlock(&bnxt_re_dev_lock);
- /*
- * Cleanup the devices in reverse order so that the VF device
- * cleanup is done before PF cleanup
- */
- list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
- dev_info(rdev_to_dev(rdev), "Unregistering Device");
- /*
- * Flush out any scheduled tasks before destroying the
- * resources
- */
- flush_workqueue(bnxt_re_wq);
- bnxt_re_dev_stop(rdev);
- /* Acquire the rtnl_lock as the L2 resources are freed here */
- rtnl_lock();
- bnxt_re_ib_unreg(rdev);
- rtnl_unlock();
- bnxt_re_remove_one(rdev);
- bnxt_re_dev_unreg(rdev);
- }
unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
if (bnxt_re_wq)
destroy_workqueue(bnxt_re_wq);
+ list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
+ /* VF device removal should be called before the removal
+ * of PF device. Queue VFs unregister first, so that VFs
+ * shall be removed before the PF during the call of
+ * ib_unregister_driver.
+ */
+ if (rdev->is_virtfn)
+ ib_unregister_device(&rdev->ibdev);
+ }
+ ib_unregister_driver(RDMA_DRIVER_BNXT_RE);
}
module_init(bnxt_re_mod_init);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 020f70e6865e..899a5d2c100e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -43,6 +43,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/prefetch.h>
#include <linux/if_ether.h>
@@ -53,9 +54,7 @@
#include "qplib_sp.h"
#include "qplib_fp.h"
-static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
-static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
{
@@ -233,6 +232,70 @@ fail:
return rc;
}
+static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
+ struct nq_base *nqe, **nq_ptr;
+ int budget = nq->budget;
+ u32 sw_cons, raw_cons;
+ uintptr_t q_handle;
+ u16 type;
+
+ spin_lock_bh(&hwq->lock);
+ /* Service the NQ until empty */
+ raw_cons = hwq->cons;
+ while (budget--) {
+ sw_cons = HWQ_CMP(raw_cons, hwq);
+ nq_ptr = (struct nq_base **)hwq->pbl_ptr;
+ nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
+ if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
+ break;
+
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
+ switch (type) {
+ case NQ_BASE_TYPE_CQ_NOTIFICATION:
+ {
+ struct nq_cn *nqcne = (struct nq_cn *)nqe;
+
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
+ << 32;
+ if ((unsigned long)cq == q_handle) {
+ nqcne->cq_handle_low = 0;
+ nqcne->cq_handle_high = 0;
+ cq->cnq_events++;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ raw_cons++;
+ }
+ spin_unlock_bh(&hwq->lock);
+}
+
+/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
+ * this CQ.
+ */
+static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
+{
+ u32 retry_cnt = 100;
+
+ while (retry_cnt--) {
+ if (cnq_events == cq->cnq_events)
+ return;
+ usleep_range(50, 100);
+ clean_nq(cq->nq, cq);
+ }
+}
+
static void bnxt_qplib_service_nq(unsigned long data)
{
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
@@ -241,12 +304,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
int num_srqne_processed = 0;
- u32 sw_cons, raw_cons;
- u16 type;
int budget = nq->budget;
+ u32 sw_cons, raw_cons;
uintptr_t q_handle;
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
+ u16 type;
+ spin_lock_bh(&hwq->lock);
/* Service the NQ until empty */
raw_cons = hwq->cons;
while (budget--) {
@@ -272,7 +335,10 @@ static void bnxt_qplib_service_nq(unsigned long data)
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
<< 32;
cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
- bnxt_qplib_arm_cq_enable(cq);
+ if (!cq)
+ break;
+ bnxt_qplib_armen_db(&cq->dbinfo,
+ DBC_DBC_TYPE_CQ_ARMENA);
spin_lock_bh(&cq->compl_lock);
atomic_set(&cq->arm_state, 0);
if (!nq->cqn_handler(nq, (cq)))
@@ -280,19 +346,22 @@ static void bnxt_qplib_service_nq(unsigned long data)
else
dev_warn(&nq->pdev->dev,
"cqn - type 0x%x not handled\n", type);
+ cq->cnq_events++;
spin_unlock_bh(&cq->compl_lock);
break;
}
case NQ_BASE_TYPE_SRQ_EVENT:
{
+ struct bnxt_qplib_srq *srq;
struct nq_srq_event *nqsrqe =
(struct nq_srq_event *)nqe;
q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
<< 32;
- bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
- DBC_DBC_TYPE_SRQ_ARMENA);
+ srq = (struct bnxt_qplib_srq *)q_handle;
+ bnxt_qplib_armen_db(&srq->dbinfo,
+ DBC_DBC_TYPE_SRQ_ARMENA);
if (!nq->srqn_handler(nq,
(struct bnxt_qplib_srq *)q_handle,
nqsrqe->event))
@@ -314,10 +383,9 @@ static void bnxt_qplib_service_nq(unsigned long data)
}
if (hwq->cons != raw_cons) {
hwq->cons = raw_cons;
- bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
- hwq->max_elements, nq->ring_id,
- gen_p5);
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
}
+ spin_unlock_bh(&hwq->lock);
}
static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
@@ -333,25 +401,23 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
/* Fan out to CPU affinitized kthreads? */
- tasklet_schedule(&nq->worker);
+ tasklet_schedule(&nq->nq_tasklet);
return IRQ_HANDLED;
}
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
- tasklet_disable(&nq->worker);
+ tasklet_disable(&nq->nq_tasklet);
/* Mask h/w interrupt */
- bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
- nq->hwq.max_elements, nq->ring_id, gen_p5);
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
/* Sync with last running IRQ handler */
- synchronize_irq(nq->vector);
+ synchronize_irq(nq->msix_vec);
if (kill)
- tasklet_kill(&nq->worker);
+ tasklet_kill(&nq->nq_tasklet);
if (nq->requested) {
- irq_set_affinity_hint(nq->vector, NULL);
- free_irq(nq->vector, nq);
+ irq_set_affinity_hint(nq->msix_vec, NULL);
+ free_irq(nq->msix_vec, nq);
nq->requested = false;
}
}
@@ -364,89 +430,108 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
/* Make sure the HW is stopped! */
- if (nq->requested)
- bnxt_qplib_nq_stop_irq(nq, true);
+ bnxt_qplib_nq_stop_irq(nq, true);
- if (nq->bar_reg_iomem)
- iounmap(nq->bar_reg_iomem);
- nq->bar_reg_iomem = NULL;
+ if (nq->nq_db.reg.bar_reg) {
+ iounmap(nq->nq_db.reg.bar_reg);
+ nq->nq_db.reg.bar_reg = NULL;
+ }
nq->cqn_handler = NULL;
nq->srqn_handler = NULL;
- nq->vector = 0;
+ nq->msix_vec = 0;
}
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
int rc;
if (nq->requested)
return -EFAULT;
- nq->vector = msix_vector;
+ nq->msix_vec = msix_vector;
if (need_init)
- tasklet_init(&nq->worker, bnxt_qplib_service_nq,
+ tasklet_init(&nq->nq_tasklet, bnxt_qplib_service_nq,
(unsigned long)nq);
else
- tasklet_enable(&nq->worker);
+ tasklet_enable(&nq->nq_tasklet);
snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
+ rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
if (rc)
return rc;
cpumask_clear(&nq->mask);
cpumask_set_cpu(nq_indx, &nq->mask);
- rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+ rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
if (rc) {
dev_warn(&nq->pdev->dev,
"set affinity failed; vector: %d nq_idx: %d\n",
- nq->vector, nq_indx);
+ nq->msix_vec, nq_indx);
}
nq->requested = true;
- bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
- nq->hwq.max_elements, nq->ring_id, gen_p5);
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
+
+ return rc;
+}
+
+static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
+{
+ resource_size_t reg_base;
+ struct bnxt_qplib_nq_db *nq_db;
+ struct pci_dev *pdev;
+ int rc = 0;
+
+ pdev = nq->pdev;
+ nq_db = &nq->nq_db;
+
+ nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
+ nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
+ if (!nq_db->reg.bar_base) {
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
+ nq_db->reg.bar_id);
+ rc = -ENOMEM;
+ goto fail;
+ }
+ reg_base = nq_db->reg.bar_base + reg_offt;
+ /* Unconditionally map 8 bytes to support 57500 series */
+ nq_db->reg.len = 8;
+ nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
+ if (!nq_db->reg.bar_reg) {
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
+ nq_db->reg.bar_id);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ nq_db->dbinfo.db = nq_db->reg.bar_reg;
+ nq_db->dbinfo.hwq = &nq->hwq;
+ nq_db->dbinfo.xid = nq->ring_id;
+fail:
return rc;
}
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *),
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_srq *,
- u8 event))
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srqn_handler)
{
- resource_size_t nq_base;
int rc = -1;
- if (cqn_handler)
- nq->cqn_handler = cqn_handler;
-
- if (srqn_handler)
- nq->srqn_handler = srqn_handler;
+ nq->pdev = pdev;
+ nq->cqn_handler = cqn_handler;
+ nq->srqn_handler = srqn_handler;
/* Have a task to schedule CQ notifiers in post send case */
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
if (!nq->cqn_wq)
return -ENOMEM;
- nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
- nq->bar_reg_off = bar_reg_offset;
- nq_base = pci_resource_start(pdev, nq->bar_reg);
- if (!nq_base) {
- rc = -ENOMEM;
- goto fail;
- }
- /* Unconditionally map 8 bytes to support 57500 series */
- nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8);
- if (!nq->bar_reg_iomem) {
- rc = -ENOMEM;
+ rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
+ if (rc)
goto fail;
- }
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
if (rc) {
@@ -464,49 +549,38 @@ fail:
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
{
if (nq->hwq.max_elements) {
- bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
+ bnxt_qplib_free_hwq(nq->res, &nq->hwq);
nq->hwq.max_elements = 0;
}
}
-int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
{
- u8 hwq_type;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
- nq->pdev = pdev;
+ nq->pdev = res->pdev;
+ nq->res = res;
if (!nq->hwq.max_elements ||
nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
- hwq_type = bnxt_qplib_get_hwq_type(nq->res);
- if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
- &nq->hwq.max_elements,
- BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
- PAGE_SIZE, hwq_type))
- return -ENOMEM;
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = nq->hwq.max_elements;
+ hwq_attr.stride = sizeof(struct nq_base);
+ hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
+ if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
+ dev_err(&nq->pdev->dev, "FP NQ allocation failed");
+ return -ENOMEM;
+ }
nq->budget = 8;
return 0;
}
/* SRQ */
-static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
-{
- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- void __iomem *db;
- u32 sw_prod;
- u64 val = 0;
-
- /* Ring DB */
- sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ?
- srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq);
- db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base :
- srq->dpi->dbr;
- val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
- val <<= 32;
- val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- writeq(val, db);
-}
-
void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
@@ -526,24 +600,26 @@ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
kfree(srq->swq);
if (rc)
return;
- bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ bnxt_qplib_free_hwq(res, &srq->hwq);
}
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_srq req;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
struct creq_create_srq_resp resp;
+ struct cmdq_create_srq req;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
int rc, idx;
- srq->hwq.max_elements = srq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info,
- &srq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &srq->sg_info;
+ hwq_attr.depth = srq->max_wqe;
+ hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -595,14 +671,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
srq->swq[srq->last_idx].next_idx = -1;
srq->id = le32_to_cpu(resp.xid);
- srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
+ srq->dbinfo.hwq = &srq->hwq;
+ srq->dbinfo.xid = srq->id;
+ srq->dbinfo.db = srq->dpi->dbr;
+ srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
if (srq->threshold)
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA);
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
srq->arm_req = false;
return 0;
fail:
- bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ bnxt_qplib_free_hwq(res, &srq->hwq);
kfree(srq->swq);
exit:
return rc;
@@ -621,7 +700,7 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
srq_hwq->max_elements - sw_cons + sw_prod;
if (count > srq->threshold) {
srq->arm_req = false;
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
+ bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
} else {
/* Deferred arming */
srq->arm_req = true;
@@ -709,10 +788,10 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
srq_hwq->max_elements - sw_cons + sw_prod;
spin_unlock(&srq_hwq->lock);
/* Ring DB */
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ);
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
if (srq->arm_req == true && count > srq->threshold) {
srq->arm_req = false;
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
+ bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
}
done:
return rc;
@@ -721,15 +800,16 @@ done:
/* QP */
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_qp1 req;
- struct creq_create_qp1_resp resp;
- struct bnxt_qplib_pbl *pbl;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
- int rc;
+ struct creq_create_qp1_resp resp;
+ struct cmdq_create_qp1 req;
+ struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
u32 qp_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
@@ -739,11 +819,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.qp_handle = cpu_to_le64(qp->qp_handle);
/* SQ */
- sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL,
- &sq->hwq.max_elements,
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.depth = sq->max_wqe;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -778,11 +859,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
- rq->hwq.max_elements = qp->rq.max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
- &rq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.depth = qp->rq.max_wqe;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto fail_sq;
@@ -840,6 +922,15 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ qp->cctx = res->cctx;
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ }
rcfw->qp_tbl[qp->id].qp_id = qp->id;
rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
@@ -848,10 +939,10 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
fail:
bnxt_qplib_free_qp_hdr_buf(res, qp);
fail_rq:
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
+ bnxt_qplib_free_hwq(res, &rq->hwq);
kfree(rq->swq);
fail_sq:
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
+ bnxt_qplib_free_hwq(res, &sq->hwq);
kfree(sq->swq);
exit:
return rc;
@@ -860,7 +951,9 @@ exit:
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
unsigned long int psn_search, poff = 0;
+ struct bnxt_qplib_sg_info sginfo = {};
struct sq_psn_search **psn_search_ptr;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
@@ -887,12 +980,15 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
}
- sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info,
- &sq->hwq.max_elements,
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
- psn_sz,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.depth = sq->max_wqe;
+ hwq_attr.aux_stride = psn_sz;
+ hwq_attr.aux_depth = hwq_attr.depth;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -956,12 +1052,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
- rq->hwq.max_elements = rq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
- &rq->sg_info,
- &rq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.depth = rq->max_wqe;
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto fail_sq;
@@ -1029,10 +1127,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req_size = xrrq->max_elements *
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1);
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
- &xrrq->max_elements,
- BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
- 0, req_size, HWQ_TYPE_CTX);
+ sginfo.pgsize = req_size;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
if (rc)
goto fail_buf_free;
pbl = &xrrq->pbl[PBL_LVL_0];
@@ -1044,11 +1149,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req_size = xrrq->max_elements *
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1);
-
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
- &xrrq->max_elements,
- BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
- 0, req_size, HWQ_TYPE_CTX);
+ sginfo.pgsize = req_size;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
if (rc)
goto fail_orrq;
@@ -1064,9 +1168,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- qp->cctx = res->cctx;
INIT_LIST_HEAD(&qp->sq_flush);
INIT_LIST_HEAD(&qp->rq_flush);
+ qp->cctx = res->cctx;
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ }
rcfw->qp_tbl[qp->id].qp_id = qp->id;
rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
@@ -1074,17 +1186,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
fail:
if (qp->irrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
+ bnxt_qplib_free_hwq(res, &qp->irrq);
fail_orrq:
if (qp->orrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
+ bnxt_qplib_free_hwq(res, &qp->orrq);
fail_buf_free:
bnxt_qplib_free_qp_hdr_buf(res, qp);
fail_rq:
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
+ bnxt_qplib_free_hwq(res, &rq->hwq);
kfree(rq->swq);
fail_sq:
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
+ bnxt_qplib_free_hwq(res, &sq->hwq);
kfree(sq->swq);
exit:
return rc;
@@ -1440,16 +1552,16 @@ void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
bnxt_qplib_free_qp_hdr_buf(res, qp);
- bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
+ bnxt_qplib_free_hwq(res, &qp->sq.hwq);
kfree(qp->sq.swq);
- bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
+ bnxt_qplib_free_hwq(res, &qp->rq.hwq);
kfree(qp->rq.swq);
if (qp->irrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
+ bnxt_qplib_free_hwq(res, &qp->irrq);
if (qp->orrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
+ bnxt_qplib_free_hwq(res, &qp->orrq);
}
@@ -1506,16 +1618,8 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *sq = &qp->sq;
- u32 sw_prod;
- u64 val = 0;
- val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
- DBC_DBC_TYPE_SQ);
- val <<= 32;
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
- val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- /* Flush all the WQE writes to HW */
- writeq(val, qp->dpi->dbr);
+ bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
}
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
@@ -1807,16 +1911,8 @@ done:
void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *rq = &qp->rq;
- u32 sw_prod;
- u64 val = 0;
- val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
- DBC_DBC_TYPE_RQ);
- val <<= 32;
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- /* Flush the writes to HW Rx WQE before the ringing Rx DB */
- writeq(val, qp->dpi->dbr);
+ bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
}
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
@@ -1896,48 +1992,22 @@ done:
}
/* CQ */
-
-/* Spinlock must be held */
-static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
-{
- u64 val = 0;
-
- val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
- DBC_DBC_TYPE_CQ_ARMENA;
- val <<= 32;
- /* Flush memory writes before enabling the CQ */
- writeq(val, cq->dbr_base);
-}
-
-static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
-{
- struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- u32 sw_cons;
- u64 val = 0;
-
- /* Ring DB */
- val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
- val <<= 32;
- sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
- val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- /* flush memory writes before arming the CQ */
- writeq(val, cq->dpi->dbr);
-}
-
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_cq req;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
struct creq_create_cq_resp resp;
+ struct cmdq_create_cq req;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
int rc;
- cq->hwq.max_elements = cq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info,
- &cq->hwq.max_elements,
- BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.depth = cq->max_wqe;
+ hwq_attr.stride = sizeof(struct cq_base);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ hwq_attr.sginfo = &cq->sg_info;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -1976,7 +2046,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
goto fail;
cq->id = le32_to_cpu(resp.xid);
- cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
init_waitqueue_head(&cq->waitq);
INIT_LIST_HEAD(&cq->sqf_head);
@@ -1984,11 +2053,17 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
spin_lock_init(&cq->compl_lock);
spin_lock_init(&cq->flush_lock);
- bnxt_qplib_arm_cq_enable(cq);
+ cq->dbinfo.hwq = &cq->hwq;
+ cq->dbinfo.xid = cq->id;
+ cq->dbinfo.db = cq->dpi->dbr;
+ cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
+
+ bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
+
return 0;
fail:
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
+ bnxt_qplib_free_hwq(res, &cq->hwq);
exit:
return rc;
}
@@ -1998,6 +2073,7 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_cq req;
struct creq_destroy_cq_resp resp;
+ u16 total_cnq_events;
u16 cmd_flags = 0;
int rc;
@@ -2008,7 +2084,9 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
(void *)&resp, NULL, 0);
if (rc)
return rc;
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
+ total_cnq_events = le16_to_cpu(resp.total_cnq_events);
+ __wait_for_all_nqes(cq, total_cnq_events);
+ bnxt_qplib_free_hwq(res, &cq->hwq);
return 0;
}
@@ -2141,8 +2219,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
sq->send_phantom = true;
/* TODO: Only ARM if the previous SQE is ARMALL */
- bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL);
-
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
rc = -EAGAIN;
goto out;
}
@@ -2426,7 +2503,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
}
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
- cqe->length = (u32)le16_to_cpu(hwcqe->length);
+ cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
cqe->flags = le16_to_cpu(hwcqe->flags);
@@ -2812,7 +2889,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
}
if (cq->hwq.cons != raw_cons) {
cq->hwq.cons = raw_cons;
- bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ);
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
}
exit:
return num_cqes - budget;
@@ -2821,7 +2898,7 @@ exit:
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{
if (arm_type)
- bnxt_qplib_arm_cq(cq, arm_type);
+ bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
/* Using cq->arm_state variable to track whether to issue cq handler */
atomic_set(&cq->arm_state, 1);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 99e0a13cbefa..7edb70b6bb16 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -42,7 +42,7 @@
struct bnxt_qplib_srq {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
- void __iomem *dbr_base;
+ struct bnxt_qplib_db_info dbinfo;
u64 srq_handle;
u32 id;
u32 max_wqe;
@@ -236,6 +236,7 @@ struct bnxt_qplib_swqe {
struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
u16 q_full_delta;
@@ -370,7 +371,7 @@ struct bnxt_qplib_cqe {
#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
struct bnxt_qplib_cq {
struct bnxt_qplib_dpi *dpi;
- void __iomem *dbr_base;
+ struct bnxt_qplib_db_info dbinfo;
u32 max_wqe;
u32 id;
u16 count;
@@ -401,6 +402,7 @@ struct bnxt_qplib_cq {
* of the same QP while manipulating the flush list.
*/
spinlock_t flush_lock; /* QP flush management */
+ u16 cnq_events;
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
@@ -433,66 +435,32 @@ struct bnxt_qplib_cq {
NQ_DB_IDX_VALID | \
NQ_DB_IRQ_DIS)
-static inline void bnxt_qplib_ring_nq_db64(void __iomem *db, u32 index,
- u32 xid, bool arm)
-{
- u64 val;
-
- val = xid & DBC_DBC_XID_MASK;
- val |= DBC_DBC_PATH_ROCE;
- val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
- val <<= 32;
- val |= index & DBC_DBC_INDEX_MASK;
- writeq(val, db);
-}
-
-static inline void bnxt_qplib_ring_nq_db_rearm(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_nq_db64(db, index, xid, true);
- else
- writel(NQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK), db);
-}
+struct bnxt_qplib_nq_db {
+ struct bnxt_qplib_reg_desc reg;
+ struct bnxt_qplib_db_info dbinfo;
+};
-static inline void bnxt_qplib_ring_nq_db(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_nq_db64(db, index, xid, false);
- else
- writel(NQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK), db);
-}
+typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq, u8 event);
struct bnxt_qplib_nq {
- struct pci_dev *pdev;
- struct bnxt_qplib_res *res;
-
- int vector;
- cpumask_t mask;
- int budget;
- bool requested;
- struct tasklet_struct worker;
- struct bnxt_qplib_hwq hwq;
-
- u16 bar_reg;
- u32 bar_reg_off;
- u16 ring_id;
- void __iomem *bar_reg_iomem;
-
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq);
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_srq *srq,
- u8 event);
- struct workqueue_struct *cqn_wq;
- char name[32];
+ struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
+ char name[32];
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_nq_db nq_db;
+ u16 ring_id;
+ int msix_vec;
+ cpumask_t mask;
+ struct tasklet_struct nq_tasklet;
+ bool requested;
+ int budget;
+
+ cqn_handler_t cqn_handler;
+ srqn_handler_t srqn_handler;
+ struct workqueue_struct *cqn_wq;
};
struct bnxt_qplib_nq_work {
@@ -507,11 +475,8 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq),
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_srq *srq,
- u8 event));
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srq_handler);
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
@@ -550,7 +515,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
-int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 1291b12287a5..f01e864bb611 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -55,12 +55,14 @@ static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
u16 cbit;
int rc;
+ cmdq = &rcfw->cmdq;
cbit = cookie % rcfw->cmdq_depth;
- rc = wait_event_timeout(rcfw->waitq,
- !test_bit(cbit, rcfw->cmdq_bitmap),
+ rc = wait_event_timeout(cmdq->waitq,
+ !test_bit(cbit, cmdq->cmdq_bitmap),
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
return rc ? 0 : -ETIMEDOUT;
};
@@ -68,15 +70,17 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
u16 cbit;
+ cmdq = &rcfw->cmdq;
cbit = cookie % rcfw->cmdq_depth;
- if (!test_bit(cbit, rcfw->cmdq_bitmap))
+ if (!test_bit(cbit, cmdq->cmdq_bitmap))
goto done;
do {
mdelay(1); /* 1m sec */
bnxt_qplib_service_creq((unsigned long)rcfw);
- } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
+ } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count);
done:
return count ? 0 : -ETIMEDOUT;
};
@@ -84,56 +88,60 @@ done:
static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
struct creq_base *resp, void *sb, u8 is_block)
{
- struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
- struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_cmdqe *cmdqe, **hwq_ptr;
+ struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
+ struct bnxt_qplib_crsqe *crsqe;
u32 cmdq_depth = rcfw->cmdq_depth;
- struct bnxt_qplib_crsq *crsqe;
u32 sw_prod, cmdq_prod;
+ struct pci_dev *pdev;
unsigned long flags;
u32 size, opcode;
u16 cookie, cbit;
u8 *preq;
+ pdev = rcfw->pdev;
+
opcode = req->opcode;
- if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
+ if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
- dev_err(&rcfw->pdev->dev,
+ dev_err(&pdev->dev,
"RCFW not initialized, reject opcode 0x%x\n", opcode);
return -EINVAL;
}
- if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
+ if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
- dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n");
+ dev_err(&pdev->dev, "RCFW already initialized!\n");
return -EINVAL;
}
- if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
+ if (test_bit(FIRMWARE_TIMED_OUT, &cmdq->flags))
return -ETIMEDOUT;
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
- spin_lock_irqsave(&cmdq->lock, flags);
- if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
- dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n");
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ spin_lock_irqsave(&hwq->lock, flags);
+ if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) {
+ dev_err(&pdev->dev, "RCFW: CMDQ is full!\n");
+ spin_unlock_irqrestore(&hwq->lock, flags);
return -EAGAIN;
}
- cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
+ cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
cbit = cookie % rcfw->cmdq_depth;
if (is_block)
cookie |= RCFW_CMD_IS_BLOCKING;
- set_bit(cbit, rcfw->cmdq_bitmap);
+ set_bit(cbit, cmdq->cmdq_bitmap);
req->cookie = cpu_to_le16(cookie);
crsqe = &rcfw->crsqe_tbl[cbit];
if (crsqe->resp) {
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ spin_unlock_irqrestore(&hwq->lock, flags);
return -EBUSY;
}
@@ -155,15 +163,15 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
BNXT_QPLIB_CMDQE_UNITS;
}
- cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
+ hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr;
preq = (u8 *)req;
do {
/* Locate the next cmdq slot */
- sw_prod = HWQ_CMP(cmdq->prod, cmdq);
- cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
+ sw_prod = HWQ_CMP(hwq->prod, hwq);
+ cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
[get_cmdq_idx(sw_prod, cmdq_depth)];
if (!cmdqe) {
- dev_err(&rcfw->pdev->dev,
+ dev_err(&pdev->dev,
"RCFW request failed with no cmdqe!\n");
goto done;
}
@@ -172,31 +180,27 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
preq += min_t(u32, size, sizeof(*cmdqe));
size -= min_t(u32, size, sizeof(*cmdqe));
- cmdq->prod++;
- rcfw->seq_num++;
+ hwq->prod++;
} while (size > 0);
+ cmdq->seq_num++;
- rcfw->seq_num++;
-
- cmdq_prod = cmdq->prod;
- if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
+ cmdq_prod = hwq->prod;
+ if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
/* The very first doorbell write
* is required to set this flag
* which prompts the FW to reset
* its internal pointers
*/
cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
- clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
+ clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
}
/* ring CMDQ DB */
wmb();
- writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
- rcfw->cmdq_bar_reg_prod_off);
- writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
- rcfw->cmdq_bar_reg_trig_off);
+ writel(cmdq_prod, cmdq->cmdq_mbox.prod);
+ writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
done:
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ spin_unlock_irqrestore(&hwq->lock, flags);
/* Return the CREQ response pointer */
return 0;
}
@@ -236,7 +240,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* timed out */
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
- set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
+ set_bit(FIRMWARE_TIMED_OUT, &rcfw->cmdq.flags);
return rc;
}
@@ -253,6 +257,8 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_func_event *func_event)
{
+ int rc;
+
switch (func_event->event) {
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
break;
@@ -286,37 +292,41 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
default:
return -EINVAL;
}
- return 0;
+
+ rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
+ return rc;
}
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
- struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
struct creq_qp_error_notification *err_event;
- struct bnxt_qplib_crsq *crsqe;
- unsigned long flags;
+ struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
+ struct bnxt_qplib_crsqe *crsqe;
struct bnxt_qplib_qp *qp;
u16 cbit, blocked = 0;
- u16 cookie;
+ struct pci_dev *pdev;
+ unsigned long flags;
__le16 mcookie;
+ u16 cookie;
+ int rc = 0;
u32 qp_id;
+ pdev = rcfw->pdev;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
qp = rcfw->qp_tbl[qp_id].qp_handle;
- dev_dbg(&rcfw->pdev->dev,
- "Received QP error notification\n");
- dev_dbg(&rcfw->pdev->dev,
+ dev_dbg(&pdev->dev, "Received QP error notification\n");
+ dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
if (!qp)
break;
bnxt_qplib_mark_qp_error(qp);
- rcfw->aeq_handler(rcfw, qp_event, qp);
+ rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
break;
default:
/*
@@ -328,7 +338,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
*
*/
- spin_lock_irqsave_nested(&cmdq->lock, flags,
+ spin_lock_irqsave_nested(&hwq->lock, flags,
SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
mcookie = qp_event->cookie;
@@ -342,44 +352,44 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
crsqe->resp = NULL;
} else {
if (crsqe->resp && crsqe->resp->cookie)
- dev_err(&rcfw->pdev->dev,
+ dev_err(&pdev->dev,
"CMD %s cookie sent=%#x, recd=%#x\n",
crsqe->resp ? "mismatch" : "collision",
crsqe->resp ? crsqe->resp->cookie : 0,
mcookie);
}
- if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
- dev_warn(&rcfw->pdev->dev,
+ if (!test_and_clear_bit(cbit, rcfw->cmdq.cmdq_bitmap))
+ dev_warn(&pdev->dev,
"CMD bit %d was not requested\n", cbit);
- cmdq->cons += crsqe->req_size;
+ hwq->cons += crsqe->req_size;
crsqe->req_size = 0;
if (!blocked)
- wake_up(&rcfw->waitq);
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ wake_up(&rcfw->cmdq.waitq);
+ spin_unlock_irqrestore(&hwq->lock, flags);
}
- return 0;
+ return rc;
}
/* SP - CREQ Completion handlers */
static void bnxt_qplib_service_creq(unsigned long data)
{
struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
- struct bnxt_qplib_hwq *creq = &rcfw->creq;
+ struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
- struct creq_base *creqe, **creq_ptr;
+ struct bnxt_qplib_hwq *hwq = &creq->hwq;
+ struct creq_base *creqe, **hwq_ptr;
u32 sw_cons, raw_cons;
unsigned long flags;
/* Service the CREQ until budget is over */
- spin_lock_irqsave(&creq->lock, flags);
- raw_cons = creq->cons;
+ spin_lock_irqsave(&hwq->lock, flags);
+ raw_cons = hwq->cons;
while (budget > 0) {
- sw_cons = HWQ_CMP(raw_cons, creq);
- creq_ptr = (struct creq_base **)creq->pbl_ptr;
- creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
- if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
+ sw_cons = HWQ_CMP(raw_cons, hwq);
+ hwq_ptr = (struct creq_base **)hwq->pbl_ptr;
+ creqe = &hwq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
+ if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
break;
/* The valid test of the entry must be done first before
* reading any further.
@@ -391,12 +401,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
case CREQ_BASE_TYPE_QP_EVENT:
bnxt_qplib_process_qp_event
(rcfw, (struct creq_qp_event *)creqe);
- rcfw->creq_qp_event_processed++;
+ creq->stats.creq_qp_event_processed++;
break;
case CREQ_BASE_TYPE_FUNC_EVENT:
if (!bnxt_qplib_process_func_event
(rcfw, (struct creq_func_event *)creqe))
- rcfw->creq_func_event_processed++;
+ creq->stats.creq_func_event_processed++;
else
dev_warn(&rcfw->pdev->dev,
"aeqe:%#x Not handled\n", type);
@@ -412,28 +422,30 @@ static void bnxt_qplib_service_creq(unsigned long data)
budget--;
}
- if (creq->cons != raw_cons) {
- creq->cons = raw_cons;
- bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
- raw_cons, creq->max_elements,
- rcfw->creq_ring_id, gen_p5);
+ if (hwq->cons != raw_cons) {
+ hwq->cons = raw_cons;
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
+ rcfw->res->cctx, true);
}
- spin_unlock_irqrestore(&creq->lock, flags);
+ spin_unlock_irqrestore(&hwq->lock, flags);
}
static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_rcfw *rcfw = dev_instance;
- struct bnxt_qplib_hwq *creq = &rcfw->creq;
+ struct bnxt_qplib_creq_ctx *creq;
struct creq_base **creq_ptr;
+ struct bnxt_qplib_hwq *hwq;
u32 sw_cons;
+ creq = &rcfw->creq;
+ hwq = &creq->hwq;
/* Prefetch the CREQ element */
- sw_cons = HWQ_CMP(creq->cons, creq);
- creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
+ sw_cons = HWQ_CMP(hwq->cons, hwq);
+ creq_ptr = (struct creq_base **)creq->hwq.pbl_ptr;
prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
- tasklet_schedule(&rcfw->worker);
+ tasklet_schedule(&creq->creq_tasklet);
return IRQ_HANDLED;
}
@@ -452,7 +464,7 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
if (rc)
return rc;
- clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
+ clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
return 0;
}
@@ -520,9 +532,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
level = ctx->tim_tbl.level;
req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
__get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
- level = ctx->tqm_pde_level;
- req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
+ level = ctx->tqm_ctx.pde.level;
+ req.tqm_pg_size_tqm_lvl =
+ (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
+ __get_pbl_pg_idx(&ctx->tqm_ctx.pde.pbl[level]);
req.qpc_page_dir =
cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
@@ -535,7 +548,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.tim_page_dir =
cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.tqm_page_dir =
- cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
+ cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]);
req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
@@ -555,33 +568,46 @@ skip_ctx_setup:
NULL, 0);
if (rc)
return rc;
- set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
+ set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
return 0;
}
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
+ kfree(rcfw->cmdq.cmdq_bitmap);
kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
- bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
- bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
+ bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
+ bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
rcfw->pdev = NULL;
}
-int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx,
int qp_tbl_sz)
{
- u8 hwq_type;
-
- rcfw->pdev = pdev;
- rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
- hwq_type = bnxt_qplib_get_hwq_type(rcfw->res);
- if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL,
- &rcfw->creq.max_elements,
- BNXT_QPLIB_CREQE_UNITS,
- 0, PAGE_SIZE, hwq_type)) {
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ u32 bmap_size = 0;
+
+ rcfw->pdev = res->pdev;
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ rcfw->res = res;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = rcfw->res;
+ hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
+ hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
+ hwq_attr.type = bnxt_qplib_get_hwq_type(res);
+
+ if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
dev_err(&rcfw->pdev->dev,
"HW channel CREQ allocation failed\n");
goto fail;
@@ -591,23 +617,28 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
else
rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192;
- rcfw->cmdq.max_elements = rcfw->cmdq_depth;
- if (bnxt_qplib_alloc_init_hwq
- (rcfw->pdev, &rcfw->cmdq, NULL,
- &rcfw->cmdq.max_elements,
- BNXT_QPLIB_CMDQE_UNITS, 0,
- bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth),
- HWQ_TYPE_CTX)) {
+ sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
+ hwq_attr.depth = rcfw->cmdq_depth;
+ hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
dev_err(&rcfw->pdev->dev,
"HW channel CMDQ allocation failed\n");
goto fail;
}
- rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
+ rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
if (!rcfw->crsqe_tbl)
goto fail;
+ bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
+ cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
+ if (!cmdq->cmdq_bitmap)
+ goto fail;
+
+ cmdq->bmap_size = bmap_size;
+
rcfw->qp_tbl_size = qp_tbl_sz;
rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
GFP_KERNEL);
@@ -623,137 +654,199 @@ fail:
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
+ struct bnxt_qplib_creq_ctx *creq;
- tasklet_disable(&rcfw->worker);
+ creq = &rcfw->creq;
+ tasklet_disable(&creq->creq_tasklet);
/* Mask h/w interrupts */
- bnxt_qplib_ring_creq_db(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
- rcfw->creq.max_elements, rcfw->creq_ring_id,
- gen_p5);
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
/* Sync with last running IRQ-handler */
- synchronize_irq(rcfw->vector);
+ synchronize_irq(creq->msix_vec);
if (kill)
- tasklet_kill(&rcfw->worker);
+ tasklet_kill(&creq->creq_tasklet);
- if (rcfw->requested) {
- free_irq(rcfw->vector, rcfw);
- rcfw->requested = false;
+ if (creq->requested) {
+ free_irq(creq->msix_vec, rcfw);
+ creq->requested = false;
}
}
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
unsigned long indx;
+ creq = &rcfw->creq;
+ cmdq = &rcfw->cmdq;
+ /* Make sure the HW channel is stopped! */
bnxt_qplib_rcfw_stop_irq(rcfw, true);
- iounmap(rcfw->cmdq_bar_reg_iomem);
- iounmap(rcfw->creq_bar_reg_iomem);
+ iounmap(cmdq->cmdq_mbox.reg.bar_reg);
+ iounmap(creq->creq_db.reg.bar_reg);
- indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
- if (indx != rcfw->bmap_size)
+ indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size);
+ if (indx != cmdq->bmap_size)
dev_err(&rcfw->pdev->dev,
"disabling RCFW with pending cmd-bit %lx\n", indx);
- kfree(rcfw->cmdq_bitmap);
- rcfw->bmap_size = 0;
- rcfw->cmdq_bar_reg_iomem = NULL;
- rcfw->creq_bar_reg_iomem = NULL;
- rcfw->aeq_handler = NULL;
- rcfw->vector = 0;
+ cmdq->cmdq_mbox.reg.bar_reg = NULL;
+ creq->creq_db.reg.bar_reg = NULL;
+ creq->aeq_handler = NULL;
+ creq->msix_vec = 0;
}
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
bool need_init)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
+ struct bnxt_qplib_creq_ctx *creq;
int rc;
- if (rcfw->requested)
+ creq = &rcfw->creq;
+
+ if (creq->requested)
return -EFAULT;
- rcfw->vector = msix_vector;
+ creq->msix_vec = msix_vector;
if (need_init)
- tasklet_init(&rcfw->worker,
+ tasklet_init(&creq->creq_tasklet,
bnxt_qplib_service_creq, (unsigned long)rcfw);
else
- tasklet_enable(&rcfw->worker);
- rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
+ tasklet_enable(&creq->creq_tasklet);
+ rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
"bnxt_qplib_creq", rcfw);
if (rc)
return rc;
- rcfw->requested = true;
- bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
- rcfw->creq.cons, rcfw->creq.max_elements,
- rcfw->creq_ring_id, gen_p5);
+ creq->requested = true;
+
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true);
return 0;
}
-int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw,
- int msix_vector,
- int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- void *, void *))
+static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw, bool is_vf)
{
- resource_size_t res_base;
- struct cmdq_init init;
- u16 bmap_size;
- int rc;
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
+ u16 prod_offt;
+ int rc = 0;
- /* General */
- rcfw->seq_num = 0;
- set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
- bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
- rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
- if (!rcfw->cmdq_bitmap)
- return -ENOMEM;
- rcfw->bmap_size = bmap_size;
+ pdev = rcfw->pdev;
+ mbox = &rcfw->cmdq.cmdq_mbox;
- /* CMDQ */
- rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
- res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
- if (!res_base)
+ mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
+ if (!mbox->reg.bar_base) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d resc start is 0!\n",
+ mbox->reg.bar_id);
return -ENOMEM;
+ }
- rcfw->cmdq_bar_reg_iomem = ioremap(res_base +
- RCFW_COMM_BASE_OFFSET,
- RCFW_COMM_SIZE);
- if (!rcfw->cmdq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n",
- rcfw->cmdq_bar_reg);
+ bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
+ if (!mbox->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d mapping failed\n",
+ mbox->reg.bar_id);
return -ENOMEM;
}
- rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
- RCFW_PF_COMM_PROD_OFFSET;
+ prod_offt = is_vf ? RCFW_VF_COMM_PROD_OFFSET :
+ RCFW_PF_COMM_PROD_OFFSET;
+ mbox->prod = (void __iomem *)(mbox->reg.bar_reg + prod_offt);
+ mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET);
+ return rc;
+}
- rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
+static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
+{
+ struct bnxt_qplib_creq_db *creq_db;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
- /* CREQ */
- rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
- res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
- if (!res_base)
- dev_err(&rcfw->pdev->dev,
- "CREQ BAR region %d resc start is 0!\n",
- rcfw->creq_bar_reg);
+ pdev = rcfw->pdev;
+ creq_db = &rcfw->creq.creq_db;
+
+ creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
+ creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
+ if (!creq_db->reg.bar_id)
+ dev_err(&pdev->dev,
+ "QPLIB: CREQ BAR region %d resc start is 0!",
+ creq_db->reg.bar_id);
+
+ bar_reg = creq_db->reg.bar_base + reg_offt;
/* Unconditionally map 8 bytes to support 57500 series */
- rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off,
- 8);
- if (!rcfw->creq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
- rcfw->creq_bar_reg);
- iounmap(rcfw->cmdq_bar_reg_iomem);
- rcfw->cmdq_bar_reg_iomem = NULL;
+ creq_db->reg.len = 8;
+ creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
+ if (!creq_db->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "QPLIB: CREQ BAR region %d mapping failed",
+ creq_db->reg.bar_id);
return -ENOMEM;
}
- rcfw->creq_qp_event_processed = 0;
- rcfw->creq_func_event_processed = 0;
+ creq_db->dbinfo.db = creq_db->reg.bar_reg;
+ creq_db->dbinfo.hwq = &rcfw->creq.hwq;
+ creq_db->dbinfo.xid = rcfw->creq.ring_id;
+ return 0;
+}
- if (aeq_handler)
- rcfw->aeq_handler = aeq_handler;
- init_waitqueue_head(&rcfw->waitq);
+static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ struct cmdq_init init = {0};
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ mbox = &cmdq->cmdq_mbox;
+
+ init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
+ init.cmdq_size_cmdq_lvl =
+ cpu_to_le16(((rcfw->cmdq_depth <<
+ CMDQ_INIT_CMDQ_SIZE_SFT) &
+ CMDQ_INIT_CMDQ_SIZE_MASK) |
+ ((cmdq->hwq.level <<
+ CMDQ_INIT_CMDQ_LVL_SFT) &
+ CMDQ_INIT_CMDQ_LVL_MASK));
+ init.creq_ring_id = cpu_to_le16(creq->ring_id);
+ /* Write to the Bono mailbox register */
+ __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
+}
+
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off, int virt_fn,
+ aeq_handler_t aeq_handler)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ int rc;
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+
+ /* Clear to defaults */
+
+ cmdq->seq_num = 0;
+ set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ init_waitqueue_head(&cmdq->waitq);
+
+ creq->stats.creq_qp_event_processed = 0;
+ creq->stats.creq_func_event_processed = 0;
+ creq->aeq_handler = aeq_handler;
+
+ rc = bnxt_qplib_map_cmdq_mbox(rcfw, virt_fn);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
+ if (rc)
+ return rc;
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
@@ -763,16 +856,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
return rc;
}
- init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
- init.cmdq_size_cmdq_lvl = cpu_to_le16(
- ((rcfw->cmdq_depth << CMDQ_INIT_CMDQ_SIZE_SFT) &
- CMDQ_INIT_CMDQ_SIZE_MASK) |
- ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
- CMDQ_INIT_CMDQ_LVL_MASK));
- init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
+ bnxt_qplib_start_rcfw(rcfw);
- /* Write to the Bono mailbox register */
- __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index dfeadc192e17..411fce3493b6 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -206,8 +206,9 @@ static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */
+typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *);
-struct bnxt_qplib_crsq {
+struct bnxt_qplib_crsqe {
struct creq_qp_event *resp;
u32 req_size;
};
@@ -225,41 +226,53 @@ struct bnxt_qplib_qp_node {
#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
+#define FIRMWARE_INITIALIZED_FLAG (0)
+#define FIRMWARE_FIRST_FLAG (31)
+#define FIRMWARE_TIMED_OUT (3)
+struct bnxt_qplib_cmdq_mbox {
+ struct bnxt_qplib_reg_desc reg;
+ void __iomem *prod;
+ void __iomem *db;
+};
+
+struct bnxt_qplib_cmdq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_cmdq_mbox cmdq_mbox;
+ wait_queue_head_t waitq;
+ unsigned long flags;
+ unsigned long *cmdq_bitmap;
+ u32 bmap_size;
+ u32 seq_num;
+};
+
+struct bnxt_qplib_creq_db {
+ struct bnxt_qplib_reg_desc reg;
+ struct bnxt_qplib_db_info dbinfo;
+};
+
+struct bnxt_qplib_creq_stat {
+ u64 creq_qp_event_processed;
+ u64 creq_func_event_processed;
+};
+
+struct bnxt_qplib_creq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_creq_db creq_db;
+ struct bnxt_qplib_creq_stat stats;
+ struct tasklet_struct creq_tasklet;
+ aeq_handler_t aeq_handler;
+ u16 ring_id;
+ int msix_vec;
+ bool requested; /*irq handler installed */
+};
+
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
struct bnxt_qplib_res *res;
- int vector;
- struct tasklet_struct worker;
- bool requested;
- unsigned long *cmdq_bitmap;
- u32 bmap_size;
- unsigned long flags;
-#define FIRMWARE_INITIALIZED_FLAG 0
-#define FIRMWARE_FIRST_FLAG 31
-#define FIRMWARE_TIMED_OUT 3
- wait_queue_head_t waitq;
- int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- void *, void *);
- u32 seq_num;
-
- /* Bar region info */
- void __iomem *cmdq_bar_reg_iomem;
- u16 cmdq_bar_reg;
- u16 cmdq_bar_reg_prod_off;
- u16 cmdq_bar_reg_trig_off;
- u16 creq_ring_id;
- u16 creq_bar_reg;
- void __iomem *creq_bar_reg_iomem;
-
- /* Cmd-Resp and Async Event notification queue */
- struct bnxt_qplib_hwq creq;
- u64 creq_qp_event_processed;
- u64 creq_func_event_processed;
-
- /* Actual Cmd and Resp Queues */
- struct bnxt_qplib_hwq cmdq;
- struct bnxt_qplib_crsq *crsqe_tbl;
+ struct bnxt_qplib_cmdq_ctx cmdq;
+ struct bnxt_qplib_creq_ctx creq;
+ struct bnxt_qplib_crsqe *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
u64 oos_prev;
@@ -268,7 +281,7 @@ struct bnxt_qplib_rcfw {
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
-int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx,
int qp_tbl_sz);
@@ -276,12 +289,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
bool need_init);
-int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw,
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- void *aeqe, void *obj));
+ aeq_handler_t aeq_handler);
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 60ea1b924b67..cab1adf1fed9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -44,6 +44,7 @@
#include <linux/inetdevice.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -55,9 +56,10 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats);
/* PBL */
-static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
+static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
bool is_umem)
{
+ struct pci_dev *pdev = res->pdev;
int i;
if (!is_umem) {
@@ -74,35 +76,56 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
pbl->pg_arr[i] = NULL;
}
}
- kfree(pbl->pg_arr);
+ vfree(pbl->pg_arr);
pbl->pg_arr = NULL;
- kfree(pbl->pg_map_arr);
+ vfree(pbl->pg_map_arr);
pbl->pg_map_arr = NULL;
pbl->pg_count = 0;
pbl->pg_size = 0;
}
-static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
- struct scatterlist *sghead, u32 pages,
- u32 nmaps, u32 pg_size)
+static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
{
+ struct scatterlist *sghead = sginfo->sghead;
struct sg_dma_page_iter sg_iter;
+ int i = 0;
+
+ for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
+ pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+ pbl->pg_arr[i] = NULL;
+ pbl->pg_count++;
+ i++;
+ }
+}
+
+static int __alloc_pbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
+{
+ struct pci_dev *pdev = res->pdev;
+ struct scatterlist *sghead;
bool is_umem = false;
+ u32 pages;
int i;
+ if (sginfo->nopte)
+ return 0;
+ pages = sginfo->npages;
+ sghead = sginfo->sghead;
/* page ptr arrays */
- pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
+ pbl->pg_arr = vmalloc(pages * sizeof(void *));
if (!pbl->pg_arr)
return -ENOMEM;
- pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
+ pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
if (!pbl->pg_map_arr) {
- kfree(pbl->pg_arr);
+ vfree(pbl->pg_arr);
pbl->pg_arr = NULL;
return -ENOMEM;
}
pbl->pg_count = 0;
- pbl->pg_size = pg_size;
+ pbl->pg_size = sginfo->pgsize;
if (!sghead) {
for (i = 0; i < pages; i++) {
@@ -115,25 +138,19 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
pbl->pg_count++;
}
} else {
- i = 0;
is_umem = true;
- for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) {
- pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
- pbl->pg_arr[i] = NULL;
- pbl->pg_count++;
- i++;
- }
+ bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
}
return 0;
-
fail:
- __free_pbl(pdev, pbl, is_umem);
+ __free_pbl(res, pbl, is_umem);
return -ENOMEM;
}
/* HWQ */
-void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq)
{
int i;
@@ -144,9 +161,9 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
for (i = 0; i < hwq->level + 1; i++) {
if (i == hwq->level)
- __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
+ __free_pbl(res, &hwq->pbl[i], hwq->is_user);
else
- __free_pbl(pdev, &hwq->pbl[i], false);
+ __free_pbl(res, &hwq->pbl[i], false);
}
hwq->level = PBL_LVL_MAX;
@@ -158,79 +175,113 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
}
/* All HWQs are power of 2 in size */
-int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
- struct bnxt_qplib_sg_info *sg_info,
- u32 *elements, u32 element_size, u32 aux,
- u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
+
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr)
{
- u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0;
+ u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
+ struct bnxt_qplib_sg_info sginfo = {};
+ u32 depth, stride, npbl, npde;
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
struct scatterlist *sghead = NULL;
- int i, rc;
-
+ struct bnxt_qplib_res *res;
+ struct pci_dev *pdev;
+ int i, rc, lvl;
+
+ res = hwq_attr->res;
+ pdev = res->pdev;
+ sghead = hwq_attr->sginfo->sghead;
+ pg_size = hwq_attr->sginfo->pgsize;
hwq->level = PBL_LVL_MAX;
- slots = roundup_pow_of_two(*elements);
- if (aux) {
- aux_size = roundup_pow_of_two(aux);
- aux_pages = (slots * aux_size) / pg_size;
- if ((slots * aux_size) % pg_size)
+ depth = roundup_pow_of_two(hwq_attr->depth);
+ stride = roundup_pow_of_two(hwq_attr->stride);
+ if (hwq_attr->aux_depth) {
+ aux_slots = hwq_attr->aux_depth;
+ aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
+ aux_pages = (aux_slots * aux_size) / pg_size;
+ if ((aux_slots * aux_size) % pg_size)
aux_pages++;
}
- size = roundup_pow_of_two(element_size);
-
- if (sg_info)
- sghead = sg_info->sglist;
if (!sghead) {
hwq->is_user = false;
- pages = (slots * size) / pg_size + aux_pages;
- if ((slots * size) % pg_size)
- pages++;
- if (!pages)
+ npages = (depth * stride) / pg_size + aux_pages;
+ if ((depth * stride) % pg_size)
+ npages++;
+ if (!npages)
return -EINVAL;
- maps = 0;
+ hwq_attr->sginfo->npages = npages;
} else {
hwq->is_user = true;
- pages = sg_info->npages;
- maps = sg_info->nmap;
+ npages = hwq_attr->sginfo->npages;
+ npages = (npages * PAGE_SIZE) /
+ BIT_ULL(hwq_attr->sginfo->pgshft);
+ if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
+ BIT_ULL(hwq_attr->sginfo->pgshft))
+ if (!npages)
+ npages++;
}
- /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
- if (sghead && (pages == MAX_PBL_LVL_0_PGS))
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
- pages, maps, pg_size);
- else
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL,
- 1, 0, pg_size);
- if (rc)
- goto fail;
-
- hwq->level = PBL_LVL_0;
+ if (npages == MAX_PBL_LVL_0_PGS) {
+ /* This request is Level 0, map PTE */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_0;
+ }
- if (pages > MAX_PBL_LVL_0_PGS) {
- if (pages > MAX_PBL_LVL_1_PGS) {
+ if (npages > MAX_PBL_LVL_0_PGS) {
+ if (npages > MAX_PBL_LVL_1_PGS) {
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
/* 2 levels of indirection */
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
- MAX_PBL_LVL_1_PGS_FOR_LVL_2,
- 0, pg_size);
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ npde = npbl >> MAX_PDL_LVL_SHIFT;
+ if (npbl % BIT(MAX_PDL_LVL_SHIFT))
+ npde++;
+ /* Alloc PDE pages */
+ sginfo.pgsize = npde * pg_size;
+ sginfo.npages = 1;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
+
+ /* Alloc PBL pages */
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
if (rc)
goto fail;
- /* Fill in lvl0 PBL */
+ /* Fill PDL with PBL page pointers */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
- dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
- src_phys_ptr[i] | PTU_PDE_VALID;
- hwq->level = PBL_LVL_1;
-
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
- pages, maps, pg_size);
+ if (hwq_attr->type == HWQ_TYPE_MR) {
+ /* For MR it is expected that we supply only 1 contigous
+ * page i.e only 1 entry in the PDL that will contain
+ * all the PBLs for the user supplied memory region
+ */
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+ i++)
+ dst_virt_ptr[0][i] = src_phys_ptr[i] |
+ flag;
+ } else {
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+ i++)
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] |
+ PTU_PDE_VALID;
+ }
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
+ hwq_attr->sginfo);
if (rc)
goto fail;
-
- /* Fill in lvl1 PBL */
+ hwq->level = PBL_LVL_2;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBLs with PTE pointers */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
@@ -238,7 +289,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] | PTU_PTE_VALID;
}
- if (hwq_type == HWQ_TYPE_QUEUE) {
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
/* Find the last pg of the size */
i = hwq->pbl[PBL_LVL_2].pg_count;
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
@@ -248,25 +299,36 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
[PTR_IDX(i - 2)] |=
PTU_PTE_NEXT_TO_LAST;
}
- hwq->level = PBL_LVL_2;
- } else {
- u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
- PTU_PTE_VALID;
+ } else { /* pages < 512 npbl = 1, npde = 0 */
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
/* 1 level of indirection */
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
- pages, maps, pg_size);
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ /* Alloc PBL page */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
if (rc)
goto fail;
- /* Fill in lvl0 PBL */
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_1;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBL with PTE pointers */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] | flag;
- }
- if (hwq_type == HWQ_TYPE_QUEUE) {
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
/* Find the last pg of the size */
i = hwq->pbl[PBL_LVL_1].pg_count;
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
@@ -276,42 +338,141 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
[PTR_IDX(i - 2)] |=
PTU_PTE_NEXT_TO_LAST;
}
- hwq->level = PBL_LVL_1;
}
}
- hwq->pdev = pdev;
- spin_lock_init(&hwq->lock);
+done:
hwq->prod = 0;
hwq->cons = 0;
- *elements = hwq->max_elements = slots;
- hwq->element_size = size;
-
+ hwq->pdev = pdev;
+ hwq->depth = hwq_attr->depth;
+ hwq->max_elements = depth;
+ hwq->element_size = stride;
/* For direct access to the elements */
- hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
- hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
+ lvl = hwq->level;
+ if (hwq_attr->sginfo->nopte && hwq->level)
+ lvl = hwq->level - 1;
+ hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
+ hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
+ spin_lock_init(&hwq->lock);
return 0;
-
fail:
- bnxt_qplib_free_hwq(pdev, hwq);
+ bnxt_qplib_free_hwq(res, hwq);
return -ENOMEM;
}
/* Context Tables */
-void bnxt_qplib_free_ctx(struct pci_dev *pdev,
+void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx)
{
int i;
- bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
- bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
+ /* restore original pde level before destroy */
+ ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
+ bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
+}
+
+static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_tqm_ctx *tqmctx;
+ int rc = 0;
+ int i;
+
+ tqmctx = &ctx->tqm_ctx;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = res;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ hwq_attr.depth = 512;
+ hwq_attr.stride = sizeof(u64);
+ /* Alloc pdl buffer */
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
+ if (rc)
+ goto out;
+ /* Save original pdl level */
+ tqmctx->pde_level = tqmctx->pde.level;
+
+ hwq_attr.stride = 1;
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
+ if (!tqmctx->qcount[i])
+ continue;
+ hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
+ if (rc)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
+{
+ struct bnxt_qplib_hwq *tbl;
+ dma_addr_t *dma_ptr;
+ __le64 **pbl_ptr, *ptr;
+ int i, j, k;
+ int fnz_idx = -1;
+ int pg_count;
+
+ pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
+
+ for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
+ i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
+ tbl = &ctx->qtbl[i];
+ if (!tbl->max_elements)
+ continue;
+ if (fnz_idx == -1)
+ fnz_idx = i; /* first non-zero index */
+ switch (tbl->level) {
+ case PBL_LVL_2:
+ pg_count = tbl->pbl[PBL_LVL_1].pg_count;
+ for (k = 0; k < pg_count; k++) {
+ ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
+ dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
+ *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
+ }
+ break;
+ case PBL_LVL_1:
+ case PBL_LVL_0:
+ default:
+ ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
+ *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
+ PTU_PTE_VALID);
+ break;
+ }
+ }
+ if (fnz_idx == -1)
+ fnz_idx = 0;
+ /* update pde level as per page table programming */
+ ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
+ ctx->qtbl[fnz_idx].level + 1;
+}
+
+static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ int rc = 0;
+
+ rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
+ if (rc)
+ goto fail;
+
+ bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
+fail:
+ return rc;
}
/*
@@ -335,120 +496,72 @@ void bnxt_qplib_free_ctx(struct pci_dev *pdev,
* Returns:
* 0 if success, else -ERRORS
*/
-int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
+int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx,
bool virt_fn, bool is_p5)
{
- int i, j, k, rc = 0;
- int fnz_idx = -1;
- __le64 **pbl_ptr;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ int rc = 0;
if (virt_fn || is_p5)
goto stats_alloc;
/* QPC Tables */
- ctx->qpc_tbl.max_elements = ctx->qpc_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL,
- &ctx->qpc_tbl.max_elements,
- BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = ctx->qpc_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
if (rc)
goto fail;
/* MRW Tables */
- ctx->mrw_tbl.max_elements = ctx->mrw_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL,
- &ctx->mrw_tbl.max_elements,
- BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->mrw_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
if (rc)
goto fail;
/* SRQ Tables */
- ctx->srqc_tbl.max_elements = ctx->srqc_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL,
- &ctx->srqc_tbl.max_elements,
- BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->srqc_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
if (rc)
goto fail;
/* CQ Tables */
- ctx->cq_tbl.max_elements = ctx->cq_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL,
- &ctx->cq_tbl.max_elements,
- BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->cq_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
if (rc)
goto fail;
/* TQM Buffer */
- ctx->tqm_pde.max_elements = 512;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL,
- &ctx->tqm_pde.max_elements, sizeof(u64),
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
+ rc = bnxt_qplib_setup_tqm_rings(res, ctx);
if (rc)
goto fail;
-
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
- if (!ctx->tqm_count[i])
- continue;
- ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
- ctx->tqm_count[i];
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL,
- &ctx->tqm_tbl[i].max_elements, 1,
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
- if (rc)
- goto fail;
- }
- pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
- for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
- i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
- if (!ctx->tqm_tbl[i].max_elements)
- continue;
- if (fnz_idx == -1)
- fnz_idx = i;
- switch (ctx->tqm_tbl[i].level) {
- case PBL_LVL_2:
- for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
- k++)
- pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
- cpu_to_le64(
- ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
- | PTU_PTE_VALID);
- break;
- case PBL_LVL_1:
- case PBL_LVL_0:
- default:
- pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
- ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
- PTU_PTE_VALID);
- break;
- }
- }
- if (fnz_idx == -1)
- fnz_idx = 0;
- ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
- PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
-
/* TIM Buffer */
ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL,
- &ctx->tim_tbl.max_elements, 1,
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->qpc_count * 16;
+ hwq_attr.stride = 1;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
if (rc)
goto fail;
-
stats_alloc:
/* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
if (rc)
goto fail;
return 0;
fail:
- bnxt_qplib_free_ctx(pdev, ctx);
+ bnxt_qplib_free_ctx(res, ctx);
return rc;
}
@@ -808,9 +921,6 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
-
- res->netdev = NULL;
- res->pdev = NULL;
}
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index aaa76d792185..95b645dbbc2d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -55,7 +55,8 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE,
- HWQ_TYPE_L2_CMPL
+ HWQ_TYPE_L2_CMPL,
+ HWQ_TYPE_MR
};
#define MAX_PBL_LVL_0_PGS 1
@@ -63,6 +64,7 @@ enum bnxt_qplib_hwq_type {
#define MAX_PBL_LVL_1_PGS_SHIFT 9
#define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
#define MAX_PBL_LVL_2_PGS (256 * 512)
+#define MAX_PDL_LVL_SHIFT 9
enum bnxt_qplib_pbl_lvl {
PBL_LVL_0,
@@ -78,6 +80,13 @@ enum bnxt_qplib_pbl_lvl {
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
+struct bnxt_qplib_reg_desc {
+ u8 bar_id;
+ resource_size_t bar_base;
+ void __iomem *bar_reg;
+ size_t len;
+};
+
struct bnxt_qplib_pbl {
u32 pg_count;
u32 pg_size;
@@ -85,17 +94,37 @@ struct bnxt_qplib_pbl {
dma_addr_t *pg_map_arr;
};
+struct bnxt_qplib_sg_info {
+ struct scatterlist *sghead;
+ u32 nmap;
+ u32 npages;
+ u32 pgshft;
+ u32 pgsize;
+ bool nopte;
+};
+
+struct bnxt_qplib_hwq_attr {
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_sg_info *sginfo;
+ enum bnxt_qplib_hwq_type type;
+ u32 depth;
+ u32 stride;
+ u32 aux_stride;
+ u32 aux_depth;
+};
+
struct bnxt_qplib_hwq {
struct pci_dev *pdev;
/* lock to protect qplib_hwq */
spinlock_t lock;
- struct bnxt_qplib_pbl pbl[PBL_LVL_MAX];
+ struct bnxt_qplib_pbl pbl[PBL_LVL_MAX + 1];
enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
/* ptr for easy access to the PBL entries */
void **pbl_ptr;
/* ptr for easy access to the dma_addr */
dma_addr_t *pbl_dma_ptr;
u32 max_elements;
+ u32 depth;
u16 element_size; /* Size of each entry */
u32 prod; /* raw */
@@ -104,6 +133,13 @@ struct bnxt_qplib_hwq {
u8 is_user;
};
+struct bnxt_qplib_db_info {
+ void __iomem *db;
+ void __iomem *priv_db;
+ struct bnxt_qplib_hwq *hwq;
+ u32 xid;
+};
+
/* Tables */
struct bnxt_qplib_pd_tbl {
unsigned long *tbl;
@@ -159,6 +195,15 @@ struct bnxt_qplib_vf_res {
#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
+#define MAX_TQM_ALLOC_REQ 48
+#define MAX_TQM_ALLOC_BLK_SIZE 8
+struct bnxt_qplib_tqm_ctx {
+ struct bnxt_qplib_hwq pde;
+ u8 pde_level; /* Original level */
+ struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ];
+ u8 qcount[MAX_TQM_ALLOC_REQ];
+};
+
struct bnxt_qplib_ctx {
u32 qpc_count;
struct bnxt_qplib_hwq qpc_tbl;
@@ -169,12 +214,7 @@ struct bnxt_qplib_ctx {
u32 cq_count;
struct bnxt_qplib_hwq cq_tbl;
struct bnxt_qplib_hwq tim_tbl;
-#define MAX_TQM_ALLOC_REQ 48
-#define MAX_TQM_ALLOC_BLK_SIZE 8
- u8 tqm_count[MAX_TQM_ALLOC_REQ];
- struct bnxt_qplib_hwq tqm_pde;
- u32 tqm_pde_level;
- struct bnxt_qplib_hwq tqm_tbl[MAX_TQM_ALLOC_REQ];
+ struct bnxt_qplib_tqm_ctx tqm_ctx;
struct bnxt_qplib_stats stats;
struct bnxt_qplib_vf_res vf_res;
u64 hwrm_intf_ver;
@@ -223,11 +263,6 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
}
-struct bnxt_qplib_sg_info {
- struct scatterlist *sglist;
- u32 nmap;
- u32 npages;
-};
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
@@ -235,11 +270,10 @@ struct bnxt_qplib_sg_info {
struct bnxt_qplib_pd;
struct bnxt_qplib_dev_attr;
-void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq);
-int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
- struct bnxt_qplib_sg_info *sg_info, u32 *elements,
- u32 elements_per_page, u32 aux, u32 pg_size,
- enum bnxt_qplib_hwq_type hwq_type);
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq);
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr);
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
struct bnxt_qplib_pd *pd);
@@ -258,9 +292,80 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr);
-void bnxt_qplib_free_ctx(struct pci_dev *pdev,
+void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
-int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
+int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx,
bool virt_fn, bool is_p5);
+
+static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
+ bool arm)
+{
+ u32 key;
+
+ key = info->hwq->cons & (info->hwq->max_elements - 1);
+ key |= (CMPL_DOORBELL_IDX_VALID |
+ (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
+ if (!arm)
+ key |= CMPL_DOORBELL_MASK;
+ writel(key, info->db);
+}
+
+static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
+ key <<= 32;
+ key |= (info->hwq->cons & (info->hwq->max_elements - 1)) &
+ DBC_DBC_INDEX_MASK;
+ writeq(key, info->db);
+}
+
+static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
+ key <<= 32;
+ key |= (info->hwq->prod & (info->hwq->max_elements - 1)) &
+ DBC_DBC_INDEX_MASK;
+ writeq(key, info->db);
+}
+
+static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
+ key <<= 32;
+ writeq(key, info->priv_db);
+}
+
+static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info,
+ u32 th)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | th;
+ key <<= 32;
+ key |= th & DBC_DBC_INDEX_MASK;
+ writeq(key, info->priv_db);
+}
+
+static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
+ struct bnxt_qplib_chip_ctx *cctx,
+ bool arm)
+{
+ u32 type;
+
+ type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+ if (bnxt_qplib_is_chip_gen_p5(cctx))
+ bnxt_qplib_ring_db(info, type);
+ else
+ bnxt_qplib_ring_db32(info, arm);
+}
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 40296b97d21e..66954ff6a2f2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -585,7 +585,7 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
/* Free the qplib's MRW memory */
if (mrw->hwq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
return 0;
}
@@ -646,7 +646,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
if (mrw->hwq.max_elements) {
mrw->va = 0;
mrw->total_size = 0;
- bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
}
return 0;
@@ -656,10 +656,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_register_mr req;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
struct creq_register_mr_resp resp;
- u16 cmd_flags = 0, level;
+ struct cmdq_register_mr req;
int pg_ptrs, pages, i, rc;
+ u16 cmd_flags = 0, level;
dma_addr_t **pbl_ptr;
u32 pg_size;
@@ -674,20 +676,23 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
if (pages > MAX_PBL_LVL_1_PGS) {
dev_err(&res->pdev->dev,
- "SP: Reg MR pages requested (0x%x) exceeded max (0x%x)\n",
+ "SP: Reg MR: pages requested (0x%x) exceeded max (0x%x)\n",
pages, MAX_PBL_LVL_1_PGS);
return -ENOMEM;
}
/* Free the hwq if it already exist, must be a rereg */
if (mr->hwq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
-
- mr->hwq.max_elements = pages;
+ bnxt_qplib_free_hwq(res, &mr->hwq);
/* Use system PAGE_SIZE */
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL,
- &mr->hwq.max_elements,
- PAGE_SIZE, 0, PAGE_SIZE,
- HWQ_TYPE_CTX);
+ hwq_attr.res = res;
+ hwq_attr.depth = pages;
+ hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.type = HWQ_TYPE_MR;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.sginfo->npages = pages;
+ hwq_attr.sginfo->pgsize = PAGE_SIZE;
+ hwq_attr.sginfo->pgshft = PAGE_SHIFT;
+ rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
if (rc) {
dev_err(&res->pdev->dev,
"SP: Reg MR memory allocation failed\n");
@@ -734,7 +739,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
fail:
if (mr->hwq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
+ bnxt_qplib_free_hwq(res, &mr->hwq);
return rc;
}
@@ -742,6 +747,8 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl,
int max_pg_ptrs)
{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
int pg_ptrs, pages, rc;
/* Re-calculate the max to fit the HWQ allocation model */
@@ -753,10 +760,15 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
if (pages > MAX_PBL_LVL_1_PGS)
return -ENOMEM;
- frpl->hwq.max_elements = pages;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL,
- &frpl->hwq.max_elements, PAGE_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.nopte = true;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = pg_ptrs;
+ hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr);
if (!rc)
frpl->max_pg_ptrs = pg_ptrs;
@@ -766,7 +778,7 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl)
{
- bnxt_qplib_free_hwq(res->pdev, &frpl->hwq);
+ bnxt_qplib_free_hwq(res, &frpl->hwq);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index d69dece3b1d5..30e08bcc9afb 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
srqidx = ABORT_RSS_SRQIDX_G(
be32_to_cpu(req->srqidx_status));
if (srqidx) {
- complete_cached_srq_buffers(ep,
- req->srqidx_status);
+ complete_cached_srq_buffers(ep, srqidx);
} else {
/* Hold ep ref until finish_peer_abort() */
c4iw_get_ep(&ep->com);
@@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
- ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
- TCB_RQ_START_S);
+ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
+ TCB_RQ_START_S);
cleanup:
pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7d06b0f8d49a..e8e11bd95e42 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -707,7 +707,7 @@ struct mpa_message {
u8 flags;
u8 revision;
__be16 private_data_size;
- u8 private_data[0];
+ u8 private_data[];
};
struct mpa_v2_conn_params {
@@ -719,7 +719,7 @@ struct terminate_message {
u8 layer_etype;
u8 ecode;
__be16 hdrct_rsvd;
- u8 len_hdrs[0];
+ u8 len_hdrs[];
};
#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 89ac2f9ae6dd..ac48012c992f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2127,7 +2127,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
php = to_c4iw_pd(pd);
rhp = php->rhp;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index cbdb300a4794..a2f5e29ef226 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -123,7 +123,7 @@ struct fw_ri_dsgl {
__be32 len0;
__be64 addr0;
#ifndef C99_NOT_SUPPORTED
- struct fw_ri_dsge_pair sge[0];
+ struct fw_ri_dsge_pair sge[];
#endif
};
@@ -139,7 +139,7 @@ struct fw_ri_isgl {
__be16 nsge;
__be32 r2;
#ifndef C99_NOT_SUPPORTED
- struct fw_ri_sge sge[0];
+ struct fw_ri_sge sge[];
#endif
};
@@ -149,7 +149,7 @@ struct fw_ri_immd {
__be16 r2;
__be32 immdlen;
#ifndef C99_NOT_SUPPORTED
- __u8 data[0];
+ __u8 data[];
#endif
};
@@ -321,7 +321,7 @@ struct fw_ri_res_wr {
__be32 len16_pkd;
__u64 cookie;
#ifndef C99_NOT_SUPPORTED
- struct fw_ri_res res[0];
+ struct fw_ri_res res[];
#endif
};
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 74b787a90660..96b104ab5415 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
@@ -801,21 +801,16 @@ struct efa_admin_mmio_req_read_less_resp {
/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
-#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_SHIFT 1
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
/* reg_mr_cmd */
#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
-#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
-#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */
-#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
-#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_SHIFT 6
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
index c8e0c8b905be..29d53ed63b3e 100644
--- a/drivers/infiniband/hw/efa/efa_admin_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_H_
@@ -121,9 +121,7 @@ struct efa_admin_aenq_entry {
/* aq_common_desc */
#define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
-#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
-#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
/* acq_common_desc */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 0778f4f7dccd..7fce69f5568f 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -16,26 +16,13 @@
#define EFA_ASYNC_QUEUE_DEPTH 16
#define EFA_ADMIN_QUEUE_DEPTH 32
-#define MIN_EFA_VER\
- ((EFA_ADMIN_API_VERSION_MAJOR << EFA_REGS_VERSION_MAJOR_VERSION_SHIFT) | \
- (EFA_ADMIN_API_VERSION_MINOR & EFA_REGS_VERSION_MINOR_VERSION_MASK))
-
#define EFA_CTRL_MAJOR 0
#define EFA_CTRL_MINOR 0
#define EFA_CTRL_SUB_MINOR 1
-#define MIN_EFA_CTRL_VER \
- (((EFA_CTRL_MAJOR) << \
- (EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
- ((EFA_CTRL_MINOR) << \
- (EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
- (EFA_CTRL_SUB_MINOR))
-
#define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
#define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
-#define EFA_REGS_ADMIN_INTR_MASK 1
-
enum efa_cmd_status {
EFA_CMD_SUBMITTED,
EFA_CMD_COMPLETED,
@@ -84,7 +71,7 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
struct efa_admin_mmio_req_read_less_resp *read_resp;
unsigned long exp_time;
- u32 mmio_read_reg;
+ u32 mmio_read_reg = 0;
u32 err;
read_resp = mmio_read->read_resp;
@@ -94,10 +81,9 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
/* trash DMA req_id to identify when hardware is done */
read_resp->req_id = mmio_read->seq_num + 0x9aL;
- mmio_read_reg = (offset << EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
- EFA_REGS_MMIO_REG_READ_REG_OFF_MASK;
- mmio_read_reg |= mmio_read->seq_num &
- EFA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+ EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
+ EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
+ mmio_read->seq_num);
writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
@@ -137,9 +123,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_com_admin_sq *sq = &aq->sq;
u16 size = aq->depth * sizeof(*sq->entries);
+ u32 aq_caps = 0;
u32 addr_high;
u32 addr_low;
- u32 aq_caps;
sq->entries =
dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
@@ -160,10 +146,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
- aq_caps = aq->depth & EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
- aq_caps |= (sizeof(struct efa_admin_aq_entry) <<
- EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
- EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+ EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
+ EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_aq_entry));
writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
@@ -175,9 +160,9 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_com_admin_cq *cq = &aq->cq;
u16 size = aq->depth * sizeof(*cq->entries);
+ u32 acq_caps = 0;
u32 addr_high;
u32 addr_low;
- u32 acq_caps;
cq->entries =
dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
@@ -195,13 +180,11 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
- acq_caps = aq->depth & EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
- acq_caps |= (sizeof(struct efa_admin_acq_entry) <<
- EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
- EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
- acq_caps |= (aq->msix_vector_idx <<
- EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT) &
- EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK;
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_acq_entry));
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
+ aq->msix_vector_idx);
writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
@@ -212,7 +195,8 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
struct efa_aenq_handlers *aenq_handlers)
{
struct efa_com_aenq *aenq = &edev->aenq;
- u32 addr_low, addr_high, aenq_caps;
+ u32 addr_low, addr_high;
+ u32 aenq_caps = 0;
u16 size;
if (!aenq_handlers) {
@@ -237,13 +221,11 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
- aenq_caps = aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct efa_admin_aenq_entry) <<
- EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
- aenq_caps |= (aenq->msix_vector_idx
- << EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT) &
- EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK;
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_aenq_entry));
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
+ aenq->msix_vector_idx);
writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
/*
@@ -280,8 +262,8 @@ static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
struct efa_comp_ctx *comp_ctx)
{
- u16 cmd_id = comp_ctx->user_cqe->acq_common_descriptor.command &
- EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+ u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command,
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
u16 ctx_id = cmd_id & (aq->depth - 1);
ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
@@ -335,8 +317,8 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
cmd->aq_common_descriptor.command_id = cmd_id;
- cmd->aq_common_descriptor.flags |= aq->sq.phase &
- EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+ EFA_SET(&cmd->aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
if (!comp_ctx) {
@@ -427,8 +409,8 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
struct efa_comp_ctx *comp_ctx;
u16 cmd_id;
- cmd_id = cqe->acq_common_descriptor.command &
- EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+ cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
if (!comp_ctx) {
@@ -705,7 +687,7 @@ void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
u32 mask_value = 0;
if (polling)
- mask_value = EFA_REGS_ADMIN_INTR_MASK;
+ EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1);
writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
if (polling)
@@ -743,7 +725,7 @@ int efa_com_admin_init(struct efa_com_dev *edev,
int err;
dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
- if (!(dev_sts & EFA_REGS_DEV_STS_READY_MASK)) {
+ if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
ibdev_err(edev->efa_dev,
"Device isn't ready, abort com init %#x\n", dev_sts);
return -ENODEV;
@@ -778,8 +760,7 @@ int efa_com_admin_init(struct efa_com_dev *edev,
goto err_destroy_cq;
cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
- timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
- EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
if (timeout)
/* the resolution of timeout reg is 100ms */
aq->completion_timeout = timeout * 100000;
@@ -940,7 +921,9 @@ void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
int efa_com_validate_version(struct efa_com_dev *edev)
{
+ u32 min_ctrl_ver = 0;
u32 ctrl_ver_masked;
+ u32 min_ver = 0;
u32 ctrl_ver;
u32 ver;
@@ -953,33 +936,42 @@ int efa_com_validate_version(struct efa_com_dev *edev)
EFA_REGS_CONTROLLER_VERSION_OFF);
ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
- (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- EFA_REGS_VERSION_MAJOR_VERSION_SHIFT,
- ver & EFA_REGS_VERSION_MINOR_VERSION_MASK);
-
- if (ver < MIN_EFA_VER) {
+ EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
+ EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));
+
+ EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
+ EFA_ADMIN_API_VERSION_MAJOR);
+ EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
+ EFA_ADMIN_API_VERSION_MINOR);
+ if (ver < min_ver) {
ibdev_err(edev->efa_dev,
"EFA version is lower than the minimal version the driver supports\n");
return -EOPNOTSUPP;
}
- ibdev_dbg(edev->efa_dev,
- "efa controller version: %d.%d.%d implementation version %d\n",
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
- EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
- EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
- EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+ ibdev_dbg(
+ edev->efa_dev,
+ "efa controller version: %d.%d.%d implementation version %d\n",
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
+ EFA_GET(&ctrl_ver,
+ EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));
ctrl_ver_masked =
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
-
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
+ EFA_GET(&ctrl_ver,
+ EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);
+
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
+ EFA_CTRL_MAJOR);
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
+ EFA_CTRL_MINOR);
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
+ EFA_CTRL_SUB_MINOR);
/* Validate the ctrl version without the implementation ID */
- if (ctrl_ver_masked < MIN_EFA_CTRL_VER) {
+ if (ctrl_ver_masked < min_ctrl_ver) {
ibdev_err(edev->efa_dev,
"EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
return -EOPNOTSUPP;
@@ -1002,8 +994,7 @@ int efa_com_get_dma_width(struct efa_com_dev *edev)
u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
int width;
- width = (caps & EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
- EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+ width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);
ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
@@ -1017,16 +1008,14 @@ int efa_com_get_dma_width(struct efa_com_dev *edev)
return width;
}
-static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
- u16 exp_state)
+static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
{
u32 val, i;
for (i = 0; i < timeout; i++) {
val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
- if ((val & EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
- exp_state)
+ if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
return 0;
ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
@@ -1046,36 +1035,34 @@ static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
int efa_com_dev_reset(struct efa_com_dev *edev,
enum efa_regs_reset_reason_types reset_reason)
{
- u32 stat, timeout, cap, reset_val;
+ u32 stat, timeout, cap;
+ u32 reset_val = 0;
int err;
stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
- if (!(stat & EFA_REGS_DEV_STS_READY_MASK)) {
+ if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
ibdev_err(edev->efa_dev,
"Device isn't ready, can't reset device\n");
return -EINVAL;
}
- timeout = (cap & EFA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
- EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
if (!timeout) {
ibdev_err(edev->efa_dev, "Invalid timeout value\n");
return -EINVAL;
}
/* start reset */
- reset_val = EFA_REGS_DEV_CTL_DEV_RESET_MASK;
- reset_val |= (reset_reason << EFA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
- EFA_REGS_DEV_CTL_RESET_REASON_MASK;
+ EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
+ EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
/* reset clears the mmio readless address, restore it */
efa_com_mmio_reg_read_resp_addr_init(edev);
- err = wait_for_reset_state(edev, timeout,
- EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ err = wait_for_reset_state(edev, timeout, 1);
if (err) {
ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
return err;
@@ -1089,8 +1076,7 @@ int efa_com_dev_reset(struct efa_com_dev *edev,
return err;
}
- timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
- EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
if (timeout)
/* the resolution of timeout reg is 100ms */
edev->aq.completion_timeout = timeout * 100000;
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index e20bd84a1014..eea5574a62e8 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -161,8 +161,9 @@ int efa_com_create_cq(struct efa_com_dev *edev,
int err;
create_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_CQ;
- create_cmd.cq_caps_2 = (params->entry_size_in_bytes / 4) &
- EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ EFA_SET(&create_cmd.cq_caps_2,
+ EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS,
+ params->entry_size_in_bytes / 4);
create_cmd.cq_depth = params->cq_depth;
create_cmd.num_sub_cqs = params->num_sub_cqs;
create_cmd.uar = params->uarn;
@@ -227,8 +228,8 @@ int efa_com_register_mr(struct efa_com_dev *edev,
mr_cmd.aq_common_desc.opcode = EFA_ADMIN_REG_MR;
mr_cmd.pd = params->pd;
mr_cmd.mr_length = params->mr_length_in_bytes;
- mr_cmd.flags |= params->page_shift &
- EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
+ EFA_SET(&mr_cmd.flags, EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT,
+ params->page_shift);
mr_cmd.iova = params->iova;
mr_cmd.permissions = params->permissions;
@@ -242,11 +243,11 @@ int efa_com_register_mr(struct efa_com_dev *edev,
params->pbl.pbl.address.mem_addr_low;
mr_cmd.pbl.pbl.address.mem_addr_high =
params->pbl.pbl.address.mem_addr_high;
- mr_cmd.aq_common_desc.flags |=
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+ EFA_SET(&mr_cmd.aq_common_desc.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
if (params->indirect)
- mr_cmd.aq_common_desc.flags |=
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ EFA_SET(&mr_cmd.aq_common_desc.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
}
err = efa_com_cmd_exec(aq,
@@ -386,9 +387,8 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
get_cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_FEATURE;
if (control_buff_size)
- get_cmd.aq_common_descriptor.flags =
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
-
+ EFA_SET(&get_cmd.aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&get_cmd.control_buffer.address.mem_addr_high,
@@ -538,8 +538,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
set_cmd->aq_common_descriptor.opcode = EFA_ADMIN_SET_FEATURE;
if (control_buff_size) {
- set_cmd->aq_common_descriptor.flags =
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ set_cmd->aq_common_descriptor.flags = 0;
+ EFA_SET(&set_cmd->aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&set_cmd->control_buffer.address.mem_addr_high,
&set_cmd->control_buffer.address.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_common_defs.h b/drivers/infiniband/hw/efa/efa_common_defs.h
index c559ec08898e..90af1c82c9c6 100644
--- a/drivers/infiniband/hw/efa/efa_common_defs.h
+++ b/drivers/infiniband/hw/efa/efa_common_defs.h
@@ -1,14 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COMMON_H_
#define _EFA_COMMON_H_
+#include <linux/bitfield.h>
+
#define EFA_COMMON_SPEC_VERSION_MAJOR 2
#define EFA_COMMON_SPEC_VERSION_MINOR 0
+#define EFA_GET(ptr, mask) FIELD_GET(mask##_MASK, *(ptr))
+
+#define EFA_SET(ptr, mask, value) \
+ ({ \
+ typeof(ptr) _ptr = ptr; \
+ *_ptr = (*_ptr & ~(mask##_MASK)) | \
+ FIELD_PREP(mask##_MASK, value); \
+ })
+
struct efa_common_mem_addr {
u32 mem_addr_low;
diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h
index bb9cad3d6a15..4017982fe13b 100644
--- a/drivers/infiniband/hw/efa/efa_regs_defs.h
+++ b/drivers/infiniband/hw/efa/efa_regs_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_REGS_H_
@@ -45,69 +45,52 @@ enum efa_regs_reset_reason_types {
/* version register */
#define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff
-#define EFA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
#define EFA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
/* controller_version register */
#define EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
-#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
-#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
-#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
/* caps register */
#define EFA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
-#define EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
#define EFA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
-#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
-#define EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
#define EFA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
#define EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
-#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
/* acq_caps register */
#define EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
-#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xff0000
-#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT 24
#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK 0xff000000
/* aenq_caps register */
#define EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
-#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xff0000
-#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT 24
#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK 0xff000000
+/* intr_mask register */
+#define EFA_REGS_INTR_MASK_EN_MASK 0x1
+
/* dev_ctl register */
#define EFA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
-#define EFA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
#define EFA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
-#define EFA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
#define EFA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define EFA_REGS_DEV_STS_READY_MASK 0x1
-#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
-#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
-#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
-#define EFA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
#define EFA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
-#define EFA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
#define EFA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
/* mmio_reg_read register */
#define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
-#define EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
#define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
#endif /* _EFA_REGS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index ec5545870554..5c57098a4aee 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -144,9 +144,6 @@ static inline bool is_rdma_read_cap(struct efa_dev *dev)
return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
}
-#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
- sizeof_field(typeof(x), fld) <= (sz))
-
#define is_reserved_cleared(reserved) \
!memchr_inv(reserved, 0, sizeof(reserved))
@@ -169,6 +166,14 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
return addr;
}
+static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
+ dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
+ free_pages_exact(cpu_addr, size);
+}
+
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata)
@@ -402,6 +407,9 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
int err;
ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
+
+ efa_qp_user_mmap_entries_remove(qp);
+
err = efa_destroy_qp_handle(dev, qp->qp_handle);
if (err)
return err;
@@ -411,11 +419,10 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
"qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
qp->rq_cpu_addr, qp->rq_size,
&qp->rq_dma_addr);
- dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
- DMA_TO_DEVICE);
+ efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
}
- efa_qp_user_mmap_entries_remove(qp);
kfree(qp);
return 0;
}
@@ -599,7 +606,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
if (err)
goto err_out;
- if (!field_avail(cmd, driver_qp_type, udata->inlen)) {
+ if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, no input udata\n");
err = -EINVAL;
@@ -720,13 +727,9 @@ err_remove_mmap_entries:
err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped:
- if (qp->rq_size) {
- dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
- DMA_TO_DEVICE);
-
- if (!qp->rq_mmap_entry)
- free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
- }
+ if (qp->rq_size)
+ efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
err_free_qp:
kfree(qp);
err_out:
@@ -845,10 +848,10 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
"Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
- efa_destroy_cq_idx(dev, cq->cq_idx);
- dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
rdma_user_mmap_entry_remove(cq->mmap_entry);
+ efa_destroy_cq_idx(dev, cq->cq_idx);
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
@@ -890,7 +893,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out;
}
- if (!field_avail(cmd, num_sub_cqs, udata->inlen)) {
+ if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
ibdev_dbg(ibdev,
"Incompatible ABI params, no input udata\n");
err = -EINVAL;
@@ -985,10 +988,8 @@ err_remove_mmap:
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
- dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
- if (!cq->mmap_entry)
- free_pages_exact(cq->cpu_addr, cq->size);
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
err_out:
atomic64_inc(&dev->stats.sw_stats.create_cq_err);
@@ -1550,10 +1551,6 @@ void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
- /* DMA mapping is already gone, now free the pages */
- if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
- free_pages_exact(phys_to_virt(entry->address),
- entry->rdma_entry.npages * PAGE_SIZE);
kfree(entry);
}
diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index d106d23016ba..c22ab7b5163b 100644
--- a/drivers/infiniband/hw/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
@@ -78,7 +78,7 @@ static int read_efi_var(const char *name, unsigned long *size,
*size = 0;
*return_data = NULL;
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
return -EOPNOTSUPP;
uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 986c12153e62..0dfbcfb048ca 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -222,11 +222,11 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
while (bit < bitsize) {
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
if (zero - 1 != bit)
- size += snprintf(data + size,
+ size += scnprintf(data + size,
datalen - size - 1,
"0x%lx-0x%lx,", bit, zero - 1);
else
- size += snprintf(data + size,
+ size += scnprintf(data + size,
datalen - size - 1, "0x%lx,",
bit);
bit = find_next_bit(fault->opcodes, bitsize, zero);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 259115886d35..e7fdd70c6e78 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -209,7 +209,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd->mm = current->mm;
mmgrab(fd->mm);
fd->dd = dd;
- kobject_get(&fd->dd->kobj);
fp->private_data = fd;
return 0;
nomem:
@@ -713,7 +712,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
deallocate_ctxt(uctxt);
done:
mmdrop(fdata->mm);
- kobject_put(&dd->kobj);
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
@@ -1696,7 +1694,7 @@ static int user_add(struct hfi1_devdata *dd)
snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
&dd->user_cdev, &dd->user_device,
- true, &dd->kobj);
+ true, &dd->verbs_dev.rdi.ibdev.dev.kobj);
if (ret)
user_remove(dd);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cae12f416ca0..b06c2594105a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1413,8 +1413,6 @@ struct hfi1_devdata {
bool aspm_enabled; /* ASPM state: enabled/disabled */
struct rhashtable *sdma_rht;
- struct kobject kobj;
-
/* vnic data */
struct hfi1_vnic_data vnic;
/* Lock to protect IRQ SRC register access */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3acda7a0800..3759d9233a1c 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1198,13 +1198,13 @@ static void finalize_asic_data(struct hfi1_devdata *dd,
}
/**
- * hfi1_clean_devdata - cleans up per-unit data structure
+ * hfi1_free_devdata - cleans up and frees per-unit data structure
* @dd: pointer to a valid devdata structure
*
- * It cleans up all data structures set up by
+ * It cleans up and frees all data structures set up by
* by hfi1_alloc_devdata().
*/
-static void hfi1_clean_devdata(struct hfi1_devdata *dd)
+void hfi1_free_devdata(struct hfi1_devdata *dd)
{
struct hfi1_asic_data *ad;
unsigned long flags;
@@ -1231,23 +1231,6 @@ static void hfi1_clean_devdata(struct hfi1_devdata *dd)
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
-static void __hfi1_free_devdata(struct kobject *kobj)
-{
- struct hfi1_devdata *dd =
- container_of(kobj, struct hfi1_devdata, kobj);
-
- hfi1_clean_devdata(dd);
-}
-
-static struct kobj_type hfi1_devdata_type = {
- .release = __hfi1_free_devdata,
-};
-
-void hfi1_free_devdata(struct hfi1_devdata *dd)
-{
- kobject_put(&dd->kobj);
-}
-
/**
* hfi1_alloc_devdata - Allocate our primary per-unit data structure.
* @pdev: Valid PCI device
@@ -1333,11 +1316,10 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
- kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
bail:
- hfi1_clean_devdata(dd);
+ hfi1_free_devdata(dd);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index a51bcd2b4391..7073f237a949 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -2381,7 +2381,7 @@ struct opa_port_status_rsp {
__be64 port_vl_rcv_bubble;
__be64 port_vl_mark_fecn;
__be64 port_vl_xmit_discards;
- } vls[0]; /* real array size defined by # bits set in vl_select_mask */
+ } vls[]; /* real array size defined by # bits set in vl_select_mask */
};
enum counter_selects {
@@ -2423,7 +2423,7 @@ struct opa_aggregate {
__be16 attr_id;
__be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
__be32 attr_mod;
- u8 data[0];
+ u8 data[];
};
#define MSK_LLI 0x000000f0
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 2f48e6953629..889e63d3f2cc 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -165,7 +165,7 @@ struct opa_mad_notice_attr {
} __packed ntc_2048;
};
- u8 class_data[0];
+ u8 class_data[];
};
#define IB_VLARB_LOWPRI_0_31 1
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index c9a58b642bdd..0102262343c0 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -243,7 +243,7 @@ struct sc_config_sizes {
*/
struct pio_map_elem {
u32 mask;
- struct send_context *ksc[0];
+ struct send_context *ksc[];
};
/*
@@ -263,7 +263,7 @@ struct pio_vl_map {
u32 mask;
u8 actual_vls;
u8 vls;
- struct pio_map_elem *map[0];
+ struct pio_map_elem *map[];
};
int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index a51525647ac8..c93ea021cf49 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -833,7 +833,7 @@ struct sdma_engine *sdma_select_engine_sc(
struct sdma_rht_map_elem {
u32 mask;
u8 ctr;
- struct sdma_engine *sde[0];
+ struct sdma_engine *sde[];
};
struct sdma_rht_node {
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 1e2e40f79cb2..7a851191f987 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -1002,7 +1002,7 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
*/
struct sdma_map_elem {
u32 mask;
- struct sdma_engine *sde[0];
+ struct sdma_engine *sde[];
};
/**
@@ -1024,7 +1024,7 @@ struct sdma_vl_map {
u32 mask;
u8 actual_vls;
u8 vls;
- struct sdma_map_elem *map[0];
+ struct sdma_map_elem *map[];
};
int sdma_map_init(
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 90f62c4bddba..074ec71772d2 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sc2vl sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail;
+ /*
+ * Based on the documentation for kobject_init_and_add(), the
+ * caller should call kobject_put even if this call fails.
+ */
+ goto bail_sc2vl;
}
kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
@@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sl2sc sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sc2vl;
+ goto bail_sl2sc;
}
kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
@@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping vl2mtu sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sl2sc;
+ goto bail_vl2mtu;
}
kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
@@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_vl2mtu;
+ goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@@ -742,7 +746,6 @@ bail_sl2sc:
kobject_put(&ppd->sl2sc_kobj);
bail_sc2vl:
kobject_put(&ppd->sc2vl_kobj);
-bail:
return ret;
}
@@ -853,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
return 0;
bail:
- for (i = 0; i < dd->num_sdma; i++)
- kobject_del(&dd->per_sdma[i].kobj);
+ /*
+ * The function kobject_put() will call kobject_del() if the kobject
+ * has been added successfully. The sysfs files created under the
+ * kobject directory will also be removed during the process.
+ */
+ for (; i >= 0; i--)
+ kobject_put(&dd->per_sdma[i].kobj);
return ret;
}
@@ -867,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
int i;
+ /* Unwind operations in hfi1_verbs_register_sysfs() */
+ for (i = 0; i < dd->num_sdma; i++)
+ kobject_put(&dd->per_sdma[i].kobj);
+
for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i];
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 6257eee083a1..332abb446861 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -73,7 +73,7 @@ struct tid_rb_node {
dma_addr_t dma_addr;
bool freed;
unsigned int npages;
- struct page *pages[0];
+ struct page *pages[];
};
static inline int num_user_pages(unsigned long addr,
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 13e4203497b3..a92346e88628 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
pq->state = SDMA_PKT_Q_ACTIVE;
- /* Send the first N packets in the request to buy us some time */
- ret = user_sdma_send_pkts(req, pcount);
- if (unlikely(ret < 0 && ret != -EBUSY))
- goto free_req;
/*
* This is a somewhat blocking send implementation.
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 5ffe4c996ed3..5bfb52ffd590 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -257,8 +257,8 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
return ret;
}
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(*resp))) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
&hr_cq->db);
if (ret) {
@@ -321,8 +321,8 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
udata, struct hns_roce_ucontext, ibucontext);
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(*resp)))
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags))
hns_roce_db_unmap_user(context, &hr_cq->db);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a7c4ff975c28..f6b3cf6b95d6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -641,6 +641,19 @@ struct hns_roce_rinl_buf {
u32 wqe_cnt;
};
+enum {
+ HNS_ROCE_FLUSH_FLAG = 0,
+};
+
+struct hns_roce_work {
+ struct hns_roce_dev *hr_dev;
+ struct work_struct work;
+ u32 qpn;
+ u32 cqn;
+ int event_type;
+ int sub_type;
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
@@ -656,11 +669,6 @@ struct hns_roce_qp {
struct ib_umem *umem;
struct hns_roce_mtt mtt;
struct hns_roce_mtr mtr;
-
- /* this define must less than HNS_ROCE_MAX_BT_REGION */
-#define HNS_ROCE_WQE_REGION_MAX 3
- struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX];
- int region_cnt;
int wqe_bt_pg_shift;
u32 buff_size;
@@ -684,6 +692,9 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+ /* 0: flush needed, 1: unneeded */
+ unsigned long flush_flag;
+ struct hns_roce_work flush_work;
struct hns_roce_rinl_buf rq_inl_buf;
struct list_head node; /* all qps are on a list */
struct list_head rq_node; /* all recv qps are on a list */
@@ -762,14 +773,8 @@ struct hns_roce_eq {
int eqe_ba_pg_sz;
int eqe_buf_pg_sz;
int hop_num;
- u64 *bt_l0; /* Base address table for L0 */
- u64 **bt_l1; /* Base address table for L1 */
- u64 **buf;
- dma_addr_t l0_dma;
- dma_addr_t *l1_dma;
- dma_addr_t *buf_dma;
- u32 l0_last_num; /* L0 last chunk num */
- u32 l1_last_num; /* L1 last chunk num */
+ struct hns_roce_mtr mtr;
+ struct hns_roce_buf buf;
int eq_max_cnt;
int eq_period;
int shift;
@@ -881,7 +886,7 @@ struct hns_roce_caps {
u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num;
- u32 cqe_ba_pg_sz;
+ u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz;
@@ -906,15 +911,6 @@ struct hns_roce_caps {
u16 default_ceq_arm_st;
};
-struct hns_roce_work {
- struct hns_roce_dev *hr_dev;
- struct work_struct work;
- u32 qpn;
- u32 cqn;
- int event_type;
- int sub_type;
-};
-
struct hns_roce_dfx_hw {
int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
@@ -1237,9 +1233,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
-void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
-void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
-void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n);
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
@@ -1248,9 +1245,8 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
-void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
-void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
- int cnt);
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata);
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index e82215774032..263338b90d7a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -39,6 +39,16 @@
#define DMA_ADDR_T_SHIFT 12
#define BT_BA_SHIFT 32
+#define HEM_INDEX_BUF BIT(0)
+#define HEM_INDEX_L0 BIT(1)
+#define HEM_INDEX_L1 BIT(2)
+struct hns_roce_hem_index {
+ u64 buf;
+ u64 l0;
+ u64 l1;
+ u32 inited; /* indicate which index is available */
+};
+
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
{
int hop_num = 0;
@@ -84,25 +94,27 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
return hop_num ? true : false;
}
-static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
- u32 bt_chunk_num, u64 hem_max_num)
+static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
+ u32 bt_chunk_num, u64 hem_max_num)
{
+ u64 start_idx = round_down(hem_idx, bt_chunk_num);
u64 check_max_num = start_idx + bt_chunk_num;
u64 i;
for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
- if (hem[i])
+ if (i != hem_idx && hem[i])
return false;
return true;
}
-static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
+static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
{
+ u64 start_idx = round_down(ba_idx, bt_chunk_num);
int i;
for (i = 0; i < bt_chunk_num; i++)
- if (bt[start_idx + i])
+ if (i != ba_idx && bt[start_idx + i])
return false;
return true;
@@ -434,178 +446,235 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret;
}
-static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long obj)
+static int calc_hem_config(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_mhop mhop;
- struct hns_roce_hem_iter iter;
- u32 buf_chunk_size;
- u32 bt_chunk_size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long mhop_obj = obj;
+ u32 l0_idx, l1_idx, l2_idx;
u32 chunk_ba_num;
- u32 hop_num;
- u32 size;
u32 bt_num;
- u64 hem_idx;
- u64 bt_l1_idx = 0;
- u64 bt_l0_idx = 0;
- u64 bt_ba;
- unsigned long mhop_obj = obj;
- int bt_l1_allocated = 0;
- int bt_l0_allocated = 0;
- int step_idx;
int ret;
- ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
if (ret)
return ret;
- buf_chunk_size = mhop.buf_chunk_size;
- bt_chunk_size = mhop.bt_chunk_size;
- hop_num = mhop.hop_num;
- chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
-
- bt_num = hns_roce_get_bt_num(table->type, hop_num);
+ l0_idx = mhop->l0_idx;
+ l1_idx = mhop->l1_idx;
+ l2_idx = mhop->l2_idx;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
switch (bt_num) {
case 3:
- hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
- mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
- bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- bt_l0_idx = mhop.l0_idx;
+ index->l1 = l0_idx * chunk_ba_num + l1_idx;
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
+ l1_idx * chunk_ba_num + l2_idx;
break;
case 2:
- hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- bt_l0_idx = mhop.l0_idx;
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num + l1_idx;
break;
case 1:
- hem_idx = mhop.l0_idx;
+ index->buf = l0_idx;
break;
default:
- dev_err(dev, "Table %d not support hop_num = %d!\n",
- table->type, hop_num);
+ ibdev_err(ibdev, "Table %d not support mhop.hop_num = %d!\n",
+ table->type, mhop->hop_num);
return -EINVAL;
}
- if (unlikely(hem_idx >= table->num_hem)) {
- dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n",
- table->type, hem_idx, table->num_hem);
+ if (unlikely(index->buf >= table->num_hem)) {
+ ibdev_err(ibdev, "Table %d exceed hem limt idx %llu,max %lu!\n",
+ table->type, index->buf, table->num_hem);
return -EINVAL;
}
- mutex_lock(&table->mutex);
+ return 0;
+}
- if (table->hem[hem_idx]) {
- ++table->hem[hem_idx]->refcount;
- goto out;
+static void free_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+
+ if (index->inited & HEM_INDEX_BUF) {
+ hns_roce_free_hem(hr_dev, table->hem[index->buf]);
+ table->hem[index->buf] = NULL;
+ }
+
+ if (index->inited & HEM_INDEX_L1) {
+ dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
+ table->bt_l1_dma_addr[index->l1]);
+ table->bt_l1[index->l1] = NULL;
}
+ if (index->inited & HEM_INDEX_L0) {
+ dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
+ table->bt_l0_dma_addr[index->l0]);
+ table->bt_l0[index->l0] = NULL;
+ }
+}
+
+static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_iter iter;
+ gfp_t flag;
+ u64 bt_ba;
+ u32 size;
+ int ret;
+
/* alloc L1 BA's chunk */
- if ((check_whether_bt_num_3(table->type, hop_num) ||
- check_whether_bt_num_2(table->type, hop_num)) &&
- !table->bt_l0[bt_l0_idx]) {
- table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
- &(table->bt_l0_dma_addr[bt_l0_idx]),
+ if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
+ check_whether_bt_num_2(table->type, mhop->hop_num)) &&
+ !table->bt_l0[index->l0]) {
+ table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l0_dma_addr[index->l0],
GFP_KERNEL);
- if (!table->bt_l0[bt_l0_idx]) {
+ if (!table->bt_l0[index->l0]) {
ret = -ENOMEM;
goto out;
}
- bt_l0_allocated = 1;
-
- /* set base address to hardware */
- if (table->type < HEM_TYPE_MTT) {
- step_idx = 0;
- if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
- ret = -ENODEV;
- dev_err(dev, "set HEM base address to HW failed!\n");
- goto err_dma_alloc_l1;
- }
- }
+ index->inited |= HEM_INDEX_L0;
}
/* alloc L2 BA's chunk */
- if (check_whether_bt_num_3(table->type, hop_num) &&
- !table->bt_l1[bt_l1_idx]) {
- table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
- &(table->bt_l1_dma_addr[bt_l1_idx]),
+ if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
+ !table->bt_l1[index->l1]) {
+ table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l1_dma_addr[index->l1],
GFP_KERNEL);
- if (!table->bt_l1[bt_l1_idx]) {
+ if (!table->bt_l1[index->l1]) {
ret = -ENOMEM;
- goto err_dma_alloc_l1;
- }
- bt_l1_allocated = 1;
- *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
- table->bt_l1_dma_addr[bt_l1_idx];
-
- /* set base address to hardware */
- step_idx = 1;
- if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
- ret = -ENODEV;
- dev_err(dev, "set HEM base address to HW failed!\n");
- goto err_alloc_hem_buf;
+ goto err_alloc_hem;
}
+ index->inited |= HEM_INDEX_L1;
+ *(table->bt_l0[index->l0] + mhop->l1_idx) =
+ table->bt_l1_dma_addr[index->l1];
}
/*
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
* alloc bt space chunk for MTT/CQE.
*/
- size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
- table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
- size >> PAGE_SHIFT,
- size,
- (table->lowmem ? GFP_KERNEL :
- GFP_HIGHUSER) | __GFP_NOWARN);
- if (!table->hem[hem_idx]) {
+ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
+ flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
+ size, flag);
+ if (!table->hem[index->buf]) {
ret = -ENOMEM;
- goto err_alloc_hem_buf;
+ goto err_alloc_hem;
}
- hns_roce_hem_first(table->hem[hem_idx], &iter);
+ index->inited |= HEM_INDEX_BUF;
+ hns_roce_hem_first(table->hem[index->buf], &iter);
bt_ba = hns_roce_hem_addr(&iter);
-
if (table->type < HEM_TYPE_MTT) {
- if (hop_num == 2) {
- *(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
- step_idx = 2;
- } else if (hop_num == 1) {
- *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
- step_idx = 1;
- } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
- step_idx = 0;
- } else {
- ret = -EINVAL;
- goto err_dma_alloc_l1;
+ if (mhop->hop_num == 2)
+ *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
+ else if (mhop->hop_num == 1)
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ } else if (mhop->hop_num == 2) {
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ }
+
+ return 0;
+err_alloc_hem:
+ free_mhop_hem(hr_dev, table, mhop, index);
+out:
+ return ret;
+}
+
+static int set_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int step_idx;
+ int ret = 0;
+
+ if (index->inited & HEM_INDEX_L0) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM step 0 failed!\n");
+ goto out;
}
+ }
- /* set HEM base address to hardware */
- if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
- ret = -ENODEV;
- dev_err(dev, "set HEM base address to HW failed!\n");
- goto err_alloc_hem_buf;
+ if (index->inited & HEM_INDEX_L1) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM step 1 failed!\n");
+ goto out;
}
- } else if (hop_num == 2) {
- *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
}
- ++table->hem[hem_idx]->refcount;
- goto out;
+ if (index->inited & HEM_INDEX_BUF) {
+ if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = mhop->hop_num;
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
+ if (ret)
+ ibdev_err(ibdev, "set HEM step last failed!\n");
+ }
+out:
+ return ret;
+}
-err_alloc_hem_buf:
- if (bt_l1_allocated) {
- dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
- table->bt_l1_dma_addr[bt_l1_idx]);
- table->bt_l1[bt_l1_idx] = NULL;
+static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
+ int ret;
+
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "calc hem config failed!\n");
+ return ret;
+ }
+
+ mutex_lock(&table->mutex);
+ if (table->hem[index.buf]) {
+ ++table->hem[index.buf]->refcount;
+ goto out;
}
-err_dma_alloc_l1:
- if (bt_l0_allocated) {
- dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
- table->bt_l0_dma_addr[bt_l0_idx]);
- table->bt_l0[bt_l0_idx] = NULL;
+ ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "alloc mhop hem failed!\n");
+ goto out;
}
+ /* set HEM base address to hardware */
+ if (table->type < HEM_TYPE_MTT) {
+ ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM address to HW failed!\n");
+ goto err_alloc;
+ }
+ }
+
+ ++table->hem[index.buf]->refcount;
+ goto out;
+
+err_alloc:
+ free_mhop_hem(hr_dev, table, &mhop, &index);
out:
mutex_unlock(&table->mutex);
return ret;
@@ -656,116 +725,75 @@ out:
return ret;
}
+static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 hop_num = mhop->hop_num;
+ u32 chunk_ba_num;
+ int step_idx;
+
+ index->inited = HEM_INDEX_BUF;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ if (check_whether_bt_num_2(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem))
+ index->inited |= HEM_INDEX_L0;
+ } else if (check_whether_bt_num_3(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem)) {
+ index->inited |= HEM_INDEX_L1;
+ if (hns_roce_check_bt_null(table->bt_l1, index->l1,
+ chunk_ba_num))
+ index->inited |= HEM_INDEX_L0;
+ }
+ }
+
+ if (table->type < HEM_TYPE_MTT) {
+ if (hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = hop_num;
+
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
+ ibdev_warn(ibdev, "Clear hop%d HEM failed.\n", hop_num);
+
+ if (index->inited & HEM_INDEX_L1)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ ibdev_warn(ibdev, "Clear HEM step 1 failed.\n");
+
+ if (index->inited & HEM_INDEX_L0)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ ibdev_warn(ibdev, "Clear HEM step 0 failed.\n");
+ }
+}
+
static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj,
int check_refcount)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_mhop mhop;
- unsigned long mhop_obj = obj;
- u32 bt_chunk_size;
- u32 chunk_ba_num;
- u32 hop_num;
- u32 start_idx;
- u32 bt_num;
- u64 hem_idx;
- u64 bt_l1_idx = 0;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
int ret;
- ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
- if (ret)
- return;
-
- bt_chunk_size = mhop.bt_chunk_size;
- hop_num = mhop.hop_num;
- chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
-
- bt_num = hns_roce_get_bt_num(table->type, hop_num);
- switch (bt_num) {
- case 3:
- hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
- mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
- bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- break;
- case 2:
- hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- break;
- case 1:
- hem_idx = mhop.l0_idx;
- break;
- default:
- dev_err(dev, "Table %d not support hop_num = %d!\n",
- table->type, hop_num);
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "calc hem config failed!\n");
return;
}
mutex_lock(&table->mutex);
-
- if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
+ if (check_refcount && (--table->hem[index.buf]->refcount > 0)) {
mutex_unlock(&table->mutex);
return;
}
- if (table->type < HEM_TYPE_MTT && hop_num == 1) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
- dev_warn(dev, "Clear HEM base address failed.\n");
- } else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
- dev_warn(dev, "Clear HEM base address failed.\n");
- } else if (table->type < HEM_TYPE_MTT &&
- hop_num == HNS_ROCE_HOP_NUM_0) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
- dev_warn(dev, "Clear HEM base address failed.\n");
- }
-
- /*
- * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
- * free bt space chunk for MTT/CQE.
- */
- hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
- table->hem[hem_idx] = NULL;
-
- if (check_whether_bt_num_2(table->type, hop_num)) {
- start_idx = mhop.l0_idx * chunk_ba_num;
- if (hns_roce_check_hem_null(table->hem, start_idx,
- chunk_ba_num, table->num_hem)) {
- if (table->type < HEM_TYPE_MTT &&
- hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
- dev_warn(dev, "Clear HEM base address failed.\n");
-
- dma_free_coherent(dev, bt_chunk_size,
- table->bt_l0[mhop.l0_idx],
- table->bt_l0_dma_addr[mhop.l0_idx]);
- table->bt_l0[mhop.l0_idx] = NULL;
- }
- } else if (check_whether_bt_num_3(table->type, hop_num)) {
- start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
- mhop.l1_idx * chunk_ba_num;
- if (hns_roce_check_hem_null(table->hem, start_idx,
- chunk_ba_num, table->num_hem)) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
- dev_warn(dev, "Clear HEM base address failed.\n");
-
- dma_free_coherent(dev, bt_chunk_size,
- table->bt_l1[bt_l1_idx],
- table->bt_l1_dma_addr[bt_l1_idx]);
- table->bt_l1[bt_l1_idx] = NULL;
-
- start_idx = mhop.l0_idx * chunk_ba_num;
- if (hns_roce_check_bt_null(table->bt_l1, start_idx,
- chunk_ba_num)) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj,
- 0))
- dev_warn(dev, "Clear HEM base address failed.\n");
-
- dma_free_coherent(dev, bt_chunk_size,
- table->bt_l0[mhop.l0_idx],
- table->bt_l0_dma_addr[mhop.l0_idx]);
- table->bt_l0[mhop.l0_idx] = NULL;
- }
- }
- }
+ clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ free_mhop_hem(hr_dev, table, &mhop, &index);
mutex_unlock(&table->mutex);
}
@@ -1383,6 +1411,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
void *cpu_base;
u64 phy_base;
int ret = 0;
+ int ba_num;
int offset;
int total;
int step;
@@ -1393,12 +1422,16 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
if (root_hem)
return 0;
+ ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
+ if (ba_num < 1)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&temp_root);
- total = r->offset;
+ offset = r->offset;
/* indicate to last region */
r = &regions[region_cnt - 1];
- root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1,
- unit, true, 0);
+ root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true, 0);
if (!root_hem)
return -ENOMEM;
list_add(&root_hem->list, &temp_root);
@@ -1410,7 +1443,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
INIT_LIST_HEAD(&temp_list[i]);
total = 0;
- for (i = 0; i < region_cnt && total < unit; i++) {
+ for (i = 0; i < region_cnt && total < ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@@ -1443,7 +1476,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem,
&hem_list->mid_bt[i][1], list) {
- offset = hem->start / step * BA_BYTE_LEN;
+ offset = (hem->start - r->offset) / step *
+ BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset,
hem->dma_addr);
total++;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index c6e66586e533..5ff028d77be3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -69,7 +69,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
struct hns_roce_wqe_data_seg *dseg = NULL;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_sq_db sq_db;
+ struct hns_roce_sq_db sq_db = {};
int ps_opcode = 0, i = 0;
unsigned long flags = 0;
void *wqe = NULL;
@@ -106,7 +106,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
goto out;
}
- wqe = get_send_wqe(qp, wqe_idx);
+ wqe = hns_roce_get_send_wqe(qp, wqe_idx);
qp->sq.wrid[wqe_idx] = wr->wr_id;
/* Corresponding to the RC and RD type wqe process separately */
@@ -318,8 +318,6 @@ out:
/* Memory barrier */
wmb();
- sq_db.u32_4 = 0;
- sq_db.u32_8 = 0;
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
@@ -351,7 +349,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_rq_db rq_db;
+ struct hns_roce_rq_db rq_db = {};
__le32 doorbell[2] = {0};
unsigned long flags = 0;
unsigned int wqe_idx;
@@ -380,7 +378,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
goto out;
}
- ctrl = get_recv_wqe(hr_qp, wqe_idx);
+ ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
roce_set_field(ctrl->rwqe_byte_12,
RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
@@ -418,9 +416,6 @@ out:
ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
} else {
- rq_db.u32_4 = 0;
- rq_db.u32_8 = 0;
-
roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
RQ_DOORBELL_U32_4_RQ_HEAD_S,
hr_qp->rq.head);
@@ -2289,9 +2284,10 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
if (is_send) {
/* SQ conrespond to CQE */
- sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
+ sq_wqe = hns_roce_get_send_wqe(*cur_qp,
+ roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S)&
+ CQE_BYTE_4_WQE_INDEX_S) &
((*cur_qp)->sq.wqe_cnt-1));
switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
case HNS_ROCE_WQE_OPCODE_SEND:
@@ -3623,26 +3619,11 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (send_cq && send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
- hns_roce_unlock_cqs(send_cq, recv_cq);
-
hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_qp_free(hr_dev, hr_qp);
-
- /* RC QP, release QPN */
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
-
- hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
-
- ib_umem_release(hr_qp->umem);
- if (!udata) {
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
+ hns_roce_unlock_cqs(send_cq, recv_cq);
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- }
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
- kfree(hr_qp);
return 0;
}
@@ -3954,10 +3935,8 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
eq->cons_index++;
aeqes_found = 1;
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
- dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
eq->cons_index = 0;
- }
}
set_eq_cons_index_v1(eq, 0);
@@ -4007,11 +3986,8 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
ceqes_found = 1;
if (eq->cons_index >
- EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) {
- dev_warn(&eq->hr_dev->pdev->dev,
- "cons_index overflow, set back to 0.\n");
+ EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
eq->cons_index = 0;
- }
}
set_eq_cons_index_v1(eq, 0);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 12c4cd8e9378..c3316672b70e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -56,11 +56,45 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+/*
+ * mapped-value = 1 + real-value
+ * The hns wr opcode real value is start from 0, In order to distinguish between
+ * initialized and uninitialized map values, we plus 1 to the actual value when
+ * defining the mapping, so that the validity can be identified by checking the
+ * mapped value is greater than 0.
+ */
+#define HR_OPC_MAP(ib_key, hr_key) \
+ [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
+
+static const u32 hns_roce_op_code[] = {
+ HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
+ HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
+ HR_OPC_MAP(SEND, SEND),
+ HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
+ HR_OPC_MAP(RDMA_READ, RDMA_READ),
+ HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
+ HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
+ HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
+ HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
+ HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
+ HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
+ HR_OPC_MAP(REG_MR, FAST_REG_PMR),
+};
+
+static u32 to_hr_opcode(u32 ib_opcode)
+{
+ if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
+ return HNS_ROCE_V2_WQE_OP_MASK;
+
+ return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
+ HNS_ROCE_V2_WQE_OP_MASK;
+}
+
static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
- struct hns_roce_wqe_frmr_seg *fseg,
- const struct ib_reg_wr *wr)
+ void *wqe, const struct ib_reg_wr *wr)
{
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
+ struct hns_roce_wqe_frmr_seg *fseg = wqe;
/* use ib_access_flags */
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
@@ -92,16 +126,26 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
-static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
- const struct ib_atomic_wr *wr)
+static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ int valid_num_sge)
{
- if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
- aseg->cmp_data = cpu_to_le64(wr->compare_add);
+ struct hns_roce_wqe_atomic_seg *aseg;
+
+ set_data_seg_v2(wqe, wr->sg_list);
+ aseg = wqe + sizeof(struct hns_roce_v2_wqe_data_seg);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
+ aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
} else {
- aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
+ aseg->fetchadd_swap_data =
+ cpu_to_le64(atomic_wr(wr)->compare_add);
aseg->cmp_data = 0;
}
+
+ roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
}
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
@@ -127,7 +171,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
* should calculate how many sges in the first page and the second
* page.
*/
- dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
+ dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
(uintptr_t)dseg) /
sizeof(struct hns_roce_v2_wqe_data_seg);
@@ -137,7 +181,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
set_data_seg_v2(dseg++, sg + i);
(*sge_ind)++;
}
- dseg = get_send_extend_sge(qp,
+ dseg = hns_roce_get_extend_sge(qp,
(*sge_ind) & (qp->sge.sge_cnt - 1));
for (i = 0; i < se_sge_num; i++) {
set_data_seg_v2(dseg++, sg + fi_sge_num + i);
@@ -154,11 +198,11 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
- int valid_num_sge,
- const struct ib_send_wr **bad_wr)
+ int valid_num_sge)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
int j = 0;
int i;
@@ -166,15 +210,14 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
if (le32_to_cpu(rc_sq_wqe->msg_len) >
hr_dev->caps.max_sq_inline) {
- *bad_wr = wr;
- dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
- rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
+ ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len,
+ hr_dev->caps.max_sq_inline);
return -EINVAL;
}
if (wr->opcode == IB_WR_RDMA_READ) {
- *bad_wr = wr;
- dev_err(hr_dev->dev, "Not support inline data!\n");
+ ibdev_err(ibdev, "Not support inline data!\n");
return -EINVAL;
}
@@ -220,62 +263,287 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
return 0;
}
-static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state);
-
static int check_send_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct ib_qp *ibqp = &hr_qp->ibqp;
- struct device *dev = hr_dev->dev;
if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) {
- dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+ ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
+ ibqp->qp_type);
return -EOPNOTSUPP;
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR)) {
- dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state);
+ ibdev_err(ibdev, "failed to post WQE, QP state %d!\n",
+ hr_qp->state);
return -EINVAL;
} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
- dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state);
+ ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
+ hr_dev->state);
return -EIO;
}
return 0;
}
+static inline int calc_wr_sge_num(const struct ib_send_wr *wr, u32 *sge_len)
+{
+ int valid_num = 0;
+ u32 len = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ len += wr->sg_list[i].length;
+ valid_num++;
+ }
+ }
+
+ *sge_len = len;
+ return valid_num;
+}
+
+static inline int set_ud_wqe(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ void *wqe, unsigned int *sge_idx,
+ unsigned int owner_bit)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
+ unsigned int curr_idx = *sge_idx;
+ int valid_num_sge;
+ u32 msg_len = 0;
+ bool loopback;
+ u8 *smac;
+
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+ memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
+ V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
+ V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
+ V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
+ V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
+ roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]);
+
+ /* MAC loopback */
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0;
+
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
+
+ roce_set_field(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+
+ ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
+ break;
+ default:
+ ud_sq_wqe->immtdata = 0;
+ break;
+ }
+
+ /* Set sig attr */
+ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ /* Set se attr */
+ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
+
+ roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
+ V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
+
+ roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
+
+ roce_set_field(ud_sq_wqe->byte_20,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ curr_idx & (qp->sge.sge_cnt - 1));
+
+ roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+ ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
+ qp->qkey : ud_wr(wr)->remote_qkey);
+ roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
+
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
+ V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port);
+
+ roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
+ ah->av.vlan_en ? 1 : 0);
+ roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index);
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2);
+
+ set_extend_sge(qp, wr, &curr_idx, valid_num_sge);
+
+ *sge_idx = curr_idx;
+
+ return 0;
+}
+
+static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ void *wqe, unsigned int *sge_idx,
+ unsigned int owner_bit)
+{
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+ unsigned int curr_idx = *sge_idx;
+ int valid_num_sge;
+ u32 msg_len = 0;
+ int ret = 0;
+
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+
+ rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
+ break;
+ case IB_WR_SEND_WITH_INV:
+ rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ rc_sq_wqe->immtdata = 0;
+ break;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
+
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
+ rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ case IB_WR_REG_MR:
+ set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr));
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
+ break;
+ default:
+ break;
+ }
+
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ to_hr_opcode(wr->opcode));
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ set_atomic_seg(wr, wqe, rc_sq_wqe, valid_num_sge);
+ else if (wr->opcode != IB_WR_REG_MR)
+ ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
+ wqe, &curr_idx, valid_num_sge);
+
+ *sge_idx = curr_idx;
+
+ return ret;
+}
+
+static inline void update_sq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *qp)
+{
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
+ if (qp->state == IB_QPS_ERR) {
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+ } else {
+ struct hns_roce_v2_db sq_db = {};
+
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+ V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+ V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
+ V2_DB_PARAMETER_IDX_S,
+ qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+ V2_DB_PARAMETER_SL_S, qp->sl);
+
+ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+ }
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
- struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
- struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
- struct hns_roce_wqe_frmr_seg *fseg;
- struct device *dev = hr_dev->dev;
- struct hns_roce_v2_db sq_db;
- struct ib_qp_attr attr;
+ unsigned long flags = 0;
unsigned int owner_bit;
unsigned int sge_idx;
unsigned int wqe_idx;
- unsigned long flags;
- int valid_num_sge;
void *wqe = NULL;
- bool loopback;
- int attr_mask;
- u32 tmp_len;
- u32 hr_op;
- u8 *smac;
int nreq;
int ret;
- int i;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -298,327 +566,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
- dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, qp->sq.max_gs);
+ ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, qp->sq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
- wqe = get_send_wqe(qp, wqe_idx);
+ wqe = hns_roce_get_send_wqe(qp, wqe_idx);
qp->sq.wrid[wqe_idx] = wr->wr_id;
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
- valid_num_sge = 0;
- tmp_len = 0;
-
- for (i = 0; i < wr->num_sge; i++) {
- if (likely(wr->sg_list[i].length)) {
- tmp_len += wr->sg_list[i].length;
- valid_num_sge++;
- }
- }
/* Corresponding to the QP type, wqe process separately */
- if (ibqp->qp_type == IB_QPT_GSI) {
- ud_sq_wqe = wqe;
- memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
-
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
- V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
- V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
- V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
- V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
- roce_set_field(ud_sq_wqe->byte_48,
- V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
- V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
- ah->av.mac[4]);
- roce_set_field(ud_sq_wqe->byte_48,
- V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
- V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
- ah->av.mac[5]);
-
- /* MAC loopback */
- smac = (u8 *)hr_dev->dev_addr[qp->port];
- loopback = ether_addr_equal_unaligned(ah->av.mac,
- smac) ? 1 : 0;
-
- roce_set_bit(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
-
- roce_set_field(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
- V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
-
- ud_sq_wqe->msg_len =
- cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
-
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ud_sq_wqe->immtdata =
- cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
- break;
- default:
- ud_sq_wqe->immtdata = 0;
- break;
- }
+ if (ibqp->qp_type == IB_QPT_GSI)
+ ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ else if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
- /* Set sig attr */
- roce_set_bit(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
-
- /* Set se attr */
- roce_set_bit(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
-
- roce_set_bit(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
-
- roce_set_field(ud_sq_wqe->byte_16,
- V2_UD_SEND_WQE_BYTE_16_PD_M,
- V2_UD_SEND_WQE_BYTE_16_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
-
- roce_set_field(ud_sq_wqe->byte_16,
- V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
- valid_num_sge);
-
- roce_set_field(ud_sq_wqe->byte_20,
- V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_idx & (qp->sge.sge_cnt - 1));
-
- roce_set_field(ud_sq_wqe->byte_24,
- V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
- V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
- ud_sq_wqe->qkey =
- cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
- qp->qkey : ud_wr(wr)->remote_qkey);
- roce_set_field(ud_sq_wqe->byte_32,
- V2_UD_SEND_WQE_BYTE_32_DQPN_M,
- V2_UD_SEND_WQE_BYTE_32_DQPN_S,
- ud_wr(wr)->remote_qpn);
-
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_VLAN_M,
- V2_UD_SEND_WQE_BYTE_36_VLAN_S,
- ah->av.vlan_id);
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
- V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
- ah->av.hop_limit);
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
- ah->av.tclass);
- roce_set_field(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
- ah->av.flowlabel);
- roce_set_field(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_SL_M,
- V2_UD_SEND_WQE_BYTE_40_SL_S,
- ah->av.sl);
- roce_set_field(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_PORTN_M,
- V2_UD_SEND_WQE_BYTE_40_PORTN_S,
- qp->port);
-
- roce_set_bit(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
- ah->av.vlan_en ? 1 : 0);
- roce_set_field(ud_sq_wqe->byte_48,
- V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
- V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
- hns_get_gid_index(hr_dev, qp->phy_port,
- ah->av.gid_index));
-
- memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
- GID_LEN_V2);
-
- set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
- } else if (ibqp->qp_type == IB_QPT_RC) {
- rc_sq_wqe = wqe;
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
-
- rc_sq_wqe->msg_len =
- cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
-
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- rc_sq_wqe->immtdata =
- cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
- break;
- case IB_WR_SEND_WITH_INV:
- rc_sq_wqe->inv_key =
- cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- default:
- rc_sq_wqe->immtdata = 0;
- break;
- }
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_FENCE_S,
- (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
-
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
- rc_sq_wqe->rkey =
- cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE:
- hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
- rc_sq_wqe->rkey =
- cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
- rc_sq_wqe->rkey =
- cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_SEND:
- hr_op = HNS_ROCE_V2_WQE_OP_SEND;
- break;
- case IB_WR_SEND_WITH_INV:
- hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
- break;
- case IB_WR_SEND_WITH_IMM:
- hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
- break;
- case IB_WR_LOCAL_INV:
- hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
- rc_sq_wqe->inv_key =
- cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- case IB_WR_REG_MR:
- hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
- fseg = wqe;
- set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
- rc_sq_wqe->rkey =
- cpu_to_le32(atomic_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(atomic_wr(wr)->remote_addr);
- break;
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
- rc_sq_wqe->rkey =
- cpu_to_le32(atomic_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(atomic_wr(wr)->remote_addr);
- break;
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- hr_op =
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
- break;
- case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- hr_op =
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
- break;
- default:
- hr_op = HNS_ROCE_V2_WQE_OP_MASK;
- break;
- }
-
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
-
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
- struct hns_roce_v2_wqe_data_seg *dseg;
-
- dseg = wqe;
- set_data_seg_v2(dseg, wr->sg_list);
- wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
- set_atomic_seg(wqe, atomic_wr(wr));
- roce_set_field(rc_sq_wqe->byte_16,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
- valid_num_sge);
- } else if (wr->opcode != IB_WR_REG_MR) {
- ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
- wqe, &sge_idx,
- valid_num_sge, bad_wr);
- if (ret)
- goto out;
- }
- } else {
- dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
- spin_unlock_irqrestore(&qp->sq.lock, flags);
+ if (ret) {
*bad_wr = wr;
- return -EOPNOTSUPP;
+ goto out;
}
}
out:
if (likely(nreq)) {
qp->sq.head += nreq;
+ qp->next_sge = sge_idx;
/* Memory barrier */
wmb();
-
- sq_db.byte_4 = 0;
- sq_db.parameter = 0;
-
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
- V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
- V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
- V2_DB_PARAMETER_IDX_S,
- qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
- V2_DB_PARAMETER_SL_S, qp->sl);
-
- hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
-
- qp->next_sge = sge_idx;
-
- if (qp->state == IB_QPS_ERR) {
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
-
- ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
- qp->state, IB_QPS_ERR);
- if (ret) {
- spin_unlock_irqrestore(&qp->sq.lock, flags);
- *bad_wr = wr;
- return ret;
- }
- }
+ update_sq_db(hr_dev, qp);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -643,13 +621,11 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list;
- struct device *dev = hr_dev->dev;
- struct ib_qp_attr attr;
unsigned long flags;
void *wqe = NULL;
- int attr_mask;
u32 wqe_idx;
int nreq;
int ret;
@@ -675,14 +651,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
- dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, hr_qp->rq.max_gs);
+ ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
+ wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
- wqe = get_recv_wqe(hr_qp, wqe_idx);
+ wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; i++) {
if (!wr->sg_list[i].length)
@@ -717,20 +693,21 @@ out:
/* Memory barrier */
wmb();
- *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
-
+ /*
+ * Hip08 hardware cannot flush the WQEs in RQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
if (hr_qp->state == IB_QPS_ERR) {
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
-
- ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
- attr_mask, hr_qp->state,
- IB_QPS_ERR);
- if (ret) {
- spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
- *bad_wr = wr;
- return ret;
- }
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
+ &hr_qp->flush_flag))
+ init_flush_work(hr_dev, hr_qp);
+ } else {
+ *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
}
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -1448,82 +1425,63 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
-
- if (i == 0) {
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
- HNS_ROCE_VF_QPC_BT_NUM);
-
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
- HNS_ROCE_VF_SRQC_BT_NUM);
-
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
- HNS_ROCE_VF_CQC_BT_NUM);
-
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
- HNS_ROCE_VF_MPT_BT_NUM);
-
- roce_set_field(req_a->vf_eqc_bt_idx_num,
- VF_RES_A_DATA_5_VF_EQC_IDX_M,
- VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
- roce_set_field(req_a->vf_eqc_bt_idx_num,
- VF_RES_A_DATA_5_VF_EQC_NUM_M,
- VF_RES_A_DATA_5_VF_EQC_NUM_S,
- HNS_ROCE_VF_EQC_NUM);
- } else {
- roce_set_field(req_b->vf_smac_idx_num,
- VF_RES_B_DATA_1_VF_SMAC_IDX_M,
- VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
- roce_set_field(req_b->vf_smac_idx_num,
- VF_RES_B_DATA_1_VF_SMAC_NUM_M,
- VF_RES_B_DATA_1_VF_SMAC_NUM_S,
- HNS_ROCE_VF_SMAC_NUM);
-
- roce_set_field(req_b->vf_sgid_idx_num,
- VF_RES_B_DATA_2_VF_SGID_IDX_M,
- VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
- roce_set_field(req_b->vf_sgid_idx_num,
- VF_RES_B_DATA_2_VF_SGID_NUM_M,
- VF_RES_B_DATA_2_VF_SGID_NUM_S,
- HNS_ROCE_VF_SGID_NUM);
-
- roce_set_field(req_b->vf_qid_idx_sl_num,
- VF_RES_B_DATA_3_VF_QID_IDX_M,
- VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
- roce_set_field(req_b->vf_qid_idx_sl_num,
- VF_RES_B_DATA_3_VF_SL_NUM_M,
- VF_RES_B_DATA_3_VF_SL_NUM_S,
- HNS_ROCE_VF_SL_NUM);
-
- roce_set_field(req_b->vf_sccc_idx_num,
- VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
- roce_set_field(req_b->vf_sccc_idx_num,
- VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
- HNS_ROCE_VF_SCCC_BT_NUM);
- }
}
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
+
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
+ HNS_ROCE_VF_SRQC_BT_NUM);
+
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
+
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
+
+ roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
+ VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
+ roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
+ VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
+
+ roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
+ VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
+ roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
+ VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
+
+ roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
+ VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
+ roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
+ VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
+
+ roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
+ VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
+ roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
+ VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
+
+ roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
+ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
+ roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
+ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
+ HNS_ROCE_VF_SCCC_BT_NUM);
+
return hns_roce_cmq_send(hr_dev, desc, 2);
}
@@ -1691,7 +1649,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
@@ -1939,7 +1897,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
&caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
caps->sccc_hop_num = ctx_hop_num;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
@@ -1999,7 +1957,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
- if (hr_dev->pci_dev->revision == 0x21) {
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
ret = hns_roce_query_pf_timer_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
@@ -2007,16 +1965,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret);
return ret;
}
- }
-
- ret = hns_roce_alloc_vf_resource(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
- ret);
- return ret;
- }
- if (hr_dev->pci_dev->revision == 0x21) {
ret = hns_roce_set_vf_switch_param(hr_dev, 0);
if (ret) {
dev_err(hr_dev->dev,
@@ -2046,6 +1995,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
if (ret)
set_default_caps(hr_dev);
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
@@ -2298,7 +2254,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
- if (hr_dev->pci_dev->revision == 0x21)
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B)
hns_roce_function_clear(hr_dev);
hns_roce_free_link_table(hr_dev, &priv->tpq);
@@ -2461,7 +2417,9 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
if (ret)
- dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to configure sgid table, ret = %d!\n",
+ ret);
return ret;
}
@@ -2757,7 +2715,7 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
- *hr_cq->set_ci_db = cons_index & 0xffffff;
+ *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -2942,7 +2900,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
- wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
+ wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
data_len = wc->byte_len;
for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
@@ -3013,13 +2971,11 @@ out:
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
struct hns_roce_srq *srq = NULL;
- struct hns_roce_dev *hr_dev;
struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq;
- struct ib_qp_attr attr;
- int attr_mask;
int is_send;
u16 wqe_ctr;
u32 opcode;
@@ -3043,16 +2999,17 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
V2_CQE_BYTE_16_LCL_QPN_S);
if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
- hr_dev = to_hr_dev(hr_cq->ib_cq.device);
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (unlikely(!hr_qp)) {
- dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
- hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
+ ibdev_err(&hr_dev->ib_dev,
+ "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
return -EINVAL;
}
*cur_qp = hr_qp;
}
+ hr_qp = *cur_qp;
wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0;
@@ -3137,14 +3094,24 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
break;
}
- /* flush cqe if wc status is error, excluding flush error */
- if ((wc->status != IB_WC_SUCCESS) &&
- (wc->status != IB_WC_WR_FLUSH_ERR)) {
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
- return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
- &attr, attr_mask,
- (*cur_qp)->state, IB_QPS_ERR);
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
+ * into errored mode. Hence, as a workaround to this hardware
+ * limitation, driver needs to assist in flushing. But the flushing
+ * operation uses mailbox to convey the QP state to the hardware and
+ * which can sleep due to the mutex protection around the mailbox calls.
+ * Hence, use the deferred flush for now. Once wc error detected, the
+ * flushing operation is needed.
+ */
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
+ status & HNS_ROCE_V2_CQE_STATUS_MASK);
+
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
+ init_flush_work(hr_dev, hr_qp);
+
+ return 0;
}
if (wc->status == IB_WC_WR_FLUSH_ERR)
@@ -3262,14 +3229,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->port_num = roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
wc->pkey_index = 0;
- memcpy(wc->smac, cqe->smac, 4);
- wc->smac[4] = roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_SMAC_4_M,
- V2_CQE_BYTE_28_SMAC_4_S);
- wc->smac[5] = roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_SMAC_5_M,
- V2_CQE_BYTE_28_SMAC_5_S);
- wc->wc_flags |= IB_WC_WITH_SMAC;
+
if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_VID_M,
@@ -3567,14 +3527,9 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
-
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
@@ -3582,9 +3537,6 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
hr_qp->ibqp.srq) ? 0 :
ilog2((unsigned int)hr_qp->rq.wqe_cnt));
-
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
@@ -3604,280 +3556,53 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, 0);
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
- V2_QPC_BYTE_4_SQPN_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
- roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
- V2_QPC_BYTE_20_RQWS_S, 0);
set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
/* No VLAN need to set 0xFFF */
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, 0);
-
- /*
- * Set some fields in context to zero, Because the default values
- * of all fields in context are zero, we need not set them to 0 again.
- * but we should set the relevant fields of context mask to 0.
- */
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
- roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
- V2_QPC_BYTE_60_TEMPID_S, 0);
-
- roce_set_field(qpc_mask->byte_60_qpst_tempid,
- V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_tempid,
- V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_tempid,
- V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
- roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
- roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
-
- if (hr_qp->rdb_en) {
+ if (hr_qp->rdb_en)
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
- roce_set_bit(qpc_mask->byte_68_rq_db,
- V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
- }
roce_set_field(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
((u32)hr_qp->rdb.dma) >> 1);
- roce_set_field(qpc_mask->byte_68_rq_db,
- V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
- V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
- qpc_mask->rq_db_record_addr = 0;
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, 0);
if (ibqp->srq) {
roce_set_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
to_hr_srq(ibqp->srq)->srqn);
- roce_set_field(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S, 1);
- roce_set_bit(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQ_EN_S, 0);
}
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
-
- roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
- V2_QPC_BYTE_92_SRQ_INFO_S, 0);
-
- roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
- V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
-
- roce_set_field(qpc_mask->byte_104_rq_sge,
- V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
- V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
-
- roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
- V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
- roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
-
- qpc_mask->rq_rnr_timer = 0;
- qpc_mask->rx_msg_len = 0;
- qpc_mask->rx_rkey_pkt_info = 0;
- qpc_mask->rx_va = 0;
-
- roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
- V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
- roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
- V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
-
- roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
- 0);
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
- V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
- V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
-
- roce_set_field(qpc_mask->byte_144_raq,
- V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
- V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
- roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
- V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
- roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
-
- roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
- V2_QPC_BYTE_148_RQ_MSN_S, 0);
- roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
- V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
-
- roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
- V2_QPC_BYTE_152_RAQ_PSN_S, 0);
- roce_set_field(qpc_mask->byte_152_raq,
- V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
- V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
-
- roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
- V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
-
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
-
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
- V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
-
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
- roce_set_field(qpc_mask->byte_172_sq_psn,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
-
- roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
- 0);
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
- roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
-
- roce_set_field(qpc_mask->byte_176_msg_pktn,
- V2_QPC_BYTE_176_MSG_USE_PKTN_M,
- V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
- roce_set_field(qpc_mask->byte_176_msg_pktn,
- V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
- V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
-
- roce_set_field(qpc_mask->byte_184_irrl_idx,
- V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
- V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
-
- qpc_mask->cur_sge_offset = 0;
-
- roce_set_field(qpc_mask->byte_192_ext_sge,
- V2_QPC_BYTE_192_CUR_SGE_IDX_M,
- V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
- roce_set_field(qpc_mask->byte_192_ext_sge,
- V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
- V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
-
- roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
- V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
-
- roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
- V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
- roce_set_field(qpc_mask->byte_200_sq_max,
- V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
- V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
-
- roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
- roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
-
- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
- V2_QPC_BYTE_212_CHECK_FLG_S, 0);
-
- qpc_mask->sq_timer = 0;
-
- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
- V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
- V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
- roce_set_field(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
- V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
-
- roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
- 0);
- roce_set_bit(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
- roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
- 0);
-
- qpc_mask->irrl_cur_sge_offset = 0;
-
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
- V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
- V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_RX_ACK_MSN_M,
- V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
-
- roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
- V2_QPC_BYTE_248_IRRL_PSN_S, 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
- 0);
- roce_set_field(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
- V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
- 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
- 0);
hr_qp->access_flags = attr->qp_access_flags;
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, 0);
-
- roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
- V2_QPC_BYTE_252_ERR_TYPE_S, 0);
-
- roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
- V2_QPC_BYTE_256_RQ_CQE_IDX_M,
- V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
- roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
- V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
- V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
@@ -3987,21 +3712,22 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int mtt_cnt,
u32 page_size)
{
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
if (hr_qp->rq.wqe_cnt < 1)
return true;
if (mtt_cnt < 1) {
- dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
- hr_qp->qpn);
+ ibdev_err(ibdev, "failed to find RQWQE buf ba of QP(0x%lx)\n",
+ hr_qp->qpn);
return false;
}
if (mtt_cnt < MTT_MIN_COUNT &&
(hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
- dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
- hr_qp->qpn);
+ ibdev_err(ibdev,
+ "failed to find next RQWQE buf ba of QP(0x%lx)\n",
+ hr_qp->qpn);
return false;
}
@@ -4016,7 +3742,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle_3;
dma_addr_t dma_handle_2;
@@ -4043,7 +3769,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
hr_qp->qpn, &dma_handle_2);
if (!mtts_2) {
- dev_err(dev, "qp irrl_table find failed\n");
+ ibdev_err(ibdev, "failed to find QP irrl_table\n");
return -EINVAL;
}
@@ -4051,12 +3777,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
hr_qp->qpn, &dma_handle_3);
if (!mtts_3) {
- dev_err(dev, "qp trrl_table find failed\n");
+ ibdev_err(ibdev, "failed to find QP trrl_table\n");
return -EINVAL;
}
if (attr_mask & IB_QP_ALT_PATH) {
- dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
+ ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n",
+ attr_mask);
return -EINVAL;
}
@@ -4201,7 +3928,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
@@ -4259,7 +3986,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0;
u32 page_size;
@@ -4268,7 +3995,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
/* Search qp buf's mtts */
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
if (count < 1) {
- dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
+ ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n",
+ hr_qp->qpn);
return -EINVAL;
}
@@ -4278,16 +4006,15 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
hr_qp->sge.offset / page_size,
&sge_cur_blk, 1, NULL);
if (count < 1) {
- dev_err(dev, "qp(0x%lx) sge pa find failed\n",
- hr_qp->qpn);
+ ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n",
+ hr_qp->qpn);
return -EINVAL;
}
}
/* Not support alternate path and path migration */
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_PATH_MIG_STATE)) {
- dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
+ if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
+ ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
return -EINVAL;
}
@@ -4405,6 +4132,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol;
u16 vlan_id = 0xffff;
@@ -4446,13 +4174,13 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
- dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
- grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
+ ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
+ grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
return -EINVAL;
}
if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
- dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
+ ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
return -EINVAL;
}
@@ -4475,7 +4203,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
- if (hr_dev->pci_dev->revision == 0x21 && is_udp)
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B && is_udp)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
else
@@ -4530,7 +4258,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
/* Nothing */
;
} else {
- dev_err(hr_dev->dev, "Illegal state for QP!\n");
+ ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
ret = -EINVAL;
goto out;
}
@@ -4565,8 +4293,8 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
0);
} else {
- dev_warn(hr_dev->dev,
- "Local ACK timeout shall be 0 to 30.\n");
+ ibdev_warn(&hr_dev->ib_dev,
+ "Local ACK timeout shall be 0 to 30.\n");
}
}
@@ -4734,7 +4462,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context ctx[2];
struct hns_roce_v2_qp_context *context = ctx;
struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long sq_flag = 0;
+ unsigned long rq_flag = 0;
int ret;
/*
@@ -4752,6 +4482,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* When QP state is err, SQ and RQ WQE should be flushed */
if (new_state == IB_QPS_ERR) {
+ spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ hr_qp->state = IB_QPS_ERR;
roce_set_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
@@ -4759,8 +4491,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
if (!ibqp->srq) {
+ spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
@@ -4768,6 +4502,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
}
}
@@ -4791,7 +4526,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
if (ret) {
- dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
goto out;
}
@@ -4848,10 +4583,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (ret) {
- dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
+ if (ret)
goto out;
- }
memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
@@ -4867,7 +4600,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context context = {};
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
int tmp_qp_state;
int state;
int ret;
@@ -4885,7 +4618,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
if (ret) {
- dev_err(dev, "query qpc error\n");
+ ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret);
ret = -EINVAL;
goto out;
}
@@ -4894,7 +4627,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
- dev_err(dev, "Illegal ib_qp_state\n");
+ ibdev_err(ibdev, "Illegal ib_qp_state\n");
ret = -EINVAL;
goto out;
}
@@ -4992,8 +4725,8 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
{
- struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cq *send_cq, *recv_cq;
unsigned long flags;
int ret = 0;
@@ -5002,7 +4735,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
if (ret)
- ibdev_err(ibdev, "modify QP to Reset failed.\n");
+ ibdev_err(ibdev,
+ "failed to modify QP to RST, ret = %d\n",
+ ret);
}
send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
@@ -5011,10 +4746,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
hns_roce_lock_cqs(send_cq, recv_cq);
- list_del(&hr_qp->node);
- list_del(&hr_qp->sq_node);
- list_del(&hr_qp->rq_node);
-
if (!udata) {
if (recv_cq)
__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
@@ -5032,43 +4763,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
- hns_roce_qp_free(hr_dev, hr_qp);
-
- /* Not special_QP, free their QPN */
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UD))
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
-
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-
- if (udata) {
- struct hns_roce_ucontext *context =
- rdma_udata_to_drv_context(
- udata,
- struct hns_roce_ucontext,
- ibucontext);
-
- if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
- hns_roce_db_unmap_user(context, &hr_qp->sdb);
-
- if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
- hns_roce_db_unmap_user(context, &hr_qp->rdb);
- } else {
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- if (hr_qp->rq.wqe_cnt)
- hns_roce_free_db(hr_dev, &hr_qp->rdb);
- }
- ib_umem_release(hr_qp->umem);
-
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hr_qp->rq.wqe_cnt) {
- kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
- kfree(hr_qp->rq_inl_buf.wqe_list);
- }
-
return ret;
}
@@ -5080,17 +4774,19 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret)
- ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to destroy QP 0x%06lx, ret = %d\n",
hr_qp->qpn, ret);
- kfree(hr_qp);
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
return 0;
}
static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
+ struct hns_roce_qp *hr_qp)
{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_sccc_clr_done *resp;
struct hns_roce_sccc_clr *clr;
struct hns_roce_cmq_desc desc;
@@ -5102,7 +4798,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret);
goto out;
}
@@ -5112,7 +4808,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
clr->qpn = cpu_to_le32(hr_qp->qpn);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret);
goto out;
}
@@ -5123,7 +4819,8 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
HNS_ROCE_OPC_QUERY_SCCC, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
+ ret);
goto out;
}
@@ -5133,7 +4830,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
msleep(20);
}
- dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
+ ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT;
out:
@@ -5177,99 +4874,65 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
- dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when modifying CQ, ret = %d\n",
+ ret);
return ret;
}
-static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
-{
- struct hns_roce_qp *hr_qp;
- struct ib_qp_attr attr;
- int attr_mask;
- int ret;
-
- hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
- if (!hr_qp) {
- dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
- return;
- }
-
- if (hr_qp->ibqp.uobject) {
- if (hr_qp->sdb_en == 1) {
- hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
- if (hr_qp->rdb_en == 1)
- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
- } else {
- dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
- return;
- }
- }
-
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
- ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- hr_qp->state, IB_QPS_ERR);
- if (ret)
- dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
- qpn);
-}
-
static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work);
- struct device *dev = irq_work->hr_dev->dev;
+ struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
u32 qpn = irq_work->qpn;
u32 cqn = irq_work->cqn;
switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_info(dev, "Path migrated succeeded.\n");
+ ibdev_info(ibdev, "Path migrated succeeded.\n");
break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "Path migration failed.\n");
+ ibdev_warn(ibdev, "Path migration failed.\n");
break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "Send queue drained.\n");
+ ibdev_warn(ibdev, "Send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
- qpn, irq_work->sub_type);
- hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
+ qpn, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_err(dev, "Invalid request local work queue 0x%x error.\n",
- qpn);
- hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
+ qpn);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
- qpn, irq_work->sub_type);
- hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
+ qpn, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- dev_warn(dev, "SRQ limit reach.\n");
+ ibdev_warn(ibdev, "SRQ limit reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ last wqe reach.\n");
+ ibdev_warn(ibdev, "SRQ last wqe reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- dev_err(dev, "SRQ catas error.\n");
+ ibdev_err(ibdev, "SRQ catas error.\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_err(dev, "CQ 0x%x access err.\n", cqn);
+ ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- dev_warn(dev, "DB overflow.\n");
+ ibdev_warn(ibdev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- dev_warn(dev, "Function level reset.\n");
+ ibdev_warn(ibdev, "Function level reset.\n");
break;
default:
break;
@@ -5326,44 +4989,24 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64(hr_dev, doorbell, eq->doorbell);
}
-static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset)
{
u32 buf_chk_sz;
- unsigned long off;
buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
- off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
-
- return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
- off % buf_chk_sz);
-}
-
-static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
-{
- u32 buf_chk_sz;
- unsigned long off;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
-
- off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
-
- if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
- return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
- off % buf_chk_sz);
+ if (eq->buf.nbufs == 1)
+ return eq->buf.direct.buf + offset % buf_chk_sz;
else
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
+ return eq->buf.page_list[offset / buf_chk_sz].buf +
+ offset % buf_chk_sz;
}
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_aeqe *aeqe;
- if (!eq->hop_num)
- aeqe = get_aeqe_v2(eq, eq->cons_index);
- else
- aeqe = mhop_get_aeqe(eq, eq->cons_index);
-
+ aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE);
return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
@@ -5456,44 +5099,12 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
return aeqe_found;
}
-static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
-{
- u32 buf_chk_sz;
- unsigned long off;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
- off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
-
- return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
- off % buf_chk_sz);
-}
-
-static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
-{
- u32 buf_chk_sz;
- unsigned long off;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
-
- off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
-
- if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
- return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
- off % buf_chk_sz);
- else
- return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
- buf_chk_sz]) + off % buf_chk_sz);
-}
-
static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe;
- if (!eq->hop_num)
- ceqe = get_ceqe_v2(eq, eq->cons_index);
- else
- ceqe = mhop_get_ceqe(eq, eq->cons_index);
-
+ ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE);
return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
@@ -5501,7 +5112,6 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq)
{
- struct device *dev = hr_dev->dev;
struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
int ceqe_found = 0;
u32 cqn;
@@ -5520,10 +5130,8 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
ceqe_found = 1;
- if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
- dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
eq->cons_index = 0;
- }
ceqe = next_ceqe_sw_v2(eq);
}
@@ -5653,90 +5261,11 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
}
-static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = hr_dev->dev;
- u64 idx;
- u64 size;
- u32 buf_chk_sz;
- u32 bt_chk_sz;
- u32 mhop_num;
- int eqe_alloc;
- int i = 0;
- int j = 0;
-
- mhop_num = hr_dev->caps.eqe_hop_num;
- buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
- bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0) {
- dma_free_coherent(dev, (unsigned int)(eq->entries *
- eq->eqe_size), eq->bt_l0, eq->l0_dma);
- return;
- }
-
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
- if (mhop_num == 1) {
- for (i = 0; i < eq->l0_last_num; i++) {
- if (i == eq->l0_last_num - 1) {
- eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
- size = (eq->entries - eqe_alloc) * eq->eqe_size;
- dma_free_coherent(dev, size, eq->buf[i],
- eq->buf_dma[i]);
- break;
- }
- dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
- eq->buf_dma[i]);
- }
- } else if (mhop_num == 2) {
- for (i = 0; i < eq->l0_last_num; i++) {
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
- eq->l1_dma[i]);
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
- if ((i == eq->l0_last_num - 1)
- && j == eq->l1_last_num - 1) {
- eqe_alloc = (buf_chk_sz / eq->eqe_size)
- * idx;
- size = (eq->entries - eqe_alloc)
- * eq->eqe_size;
- dma_free_coherent(dev, size,
- eq->buf[idx],
- eq->buf_dma[idx]);
- break;
- }
- dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
- eq->buf_dma[idx]);
- }
- }
- }
- kfree(eq->buf_dma);
- kfree(eq->buf);
- kfree(eq->l1_dma);
- kfree(eq->bt_l1);
- eq->buf_dma = NULL;
- eq->buf = NULL;
- eq->l1_dma = NULL;
- eq->bt_l1 = NULL;
-}
-
-static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- u32 buf_chk_sz;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
-
- if (hr_dev->caps.eqe_hop_num) {
- hns_roce_mhop_free_eq(hr_dev, eq);
- return;
- }
-
- dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
- eq->buf_list->map);
- kfree(eq->buf_list);
+ if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
+ hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf);
}
static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
@@ -5744,6 +5273,8 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
void *mb_buf)
{
struct hns_roce_eq_context *eqc;
+ u64 ba[MTT_MIN_COUNT] = { 0 };
+ int count;
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
@@ -5759,10 +5290,23 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
eq->shift = ilog2((unsigned int)eq->entries);
- if (!eq->hop_num)
- eq->eqe_ba = eq->buf_list->map;
- else
- eq->eqe_ba = eq->l0_dma;
+ /* if not muti-hop, eqe buffer only use one trunk */
+ if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) {
+ eq->eqe_ba = eq->buf.direct.map;
+ eq->cur_eqe_ba = eq->eqe_ba;
+ if (eq->buf.npages > 1)
+ eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz);
+ else
+ eq->nxt_eqe_ba = eq->eqe_ba;
+ } else {
+ count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba,
+ MTT_MIN_COUNT, &eq->eqe_ba);
+ eq->cur_eqe_ba = ba[0];
+ if (count > 1)
+ eq->nxt_eqe_ba = ba[1];
+ else
+ eq->nxt_eqe_ba = ba[0];
+ }
/* set eqc state */
roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
@@ -5860,220 +5404,97 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
}
-static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ u32 page_shift)
{
- struct device *dev = hr_dev->dev;
- int eq_alloc_done = 0;
- int eq_buf_cnt = 0;
- int eqe_alloc;
- u32 buf_chk_sz;
- u32 bt_chk_sz;
- u32 mhop_num;
- u64 size;
- u64 idx;
+ struct hns_roce_buf_region region = {};
+ dma_addr_t *buf_list = NULL;
int ba_num;
- int bt_num;
- int record_i;
- int record_j;
- int i = 0;
- int j = 0;
-
- mhop_num = hr_dev->caps.eqe_hop_num;
- buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
- bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+ int ret;
ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
- buf_chk_sz);
- bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0) {
- if (eq->entries > buf_chk_sz / eq->eqe_size) {
- dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
- eq->entries);
- return -EINVAL;
- }
- eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
- &(eq->l0_dma), GFP_KERNEL);
- if (!eq->bt_l0)
- return -ENOMEM;
+ 1 << page_shift);
+ hns_roce_init_buf_region(&region, hr_dev->caps.eqe_hop_num, 0, ba_num);
- eq->cur_eqe_ba = eq->l0_dma;
- eq->nxt_eqe_ba = 0;
+ /* alloc a tmp list for storing eq buf address */
+ ret = hns_roce_alloc_buf_list(&region, &buf_list, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "alloc eq buf_list error\n");
+ return ret;
+ }
- return 0;
+ ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count,
+ region.offset, &eq->buf);
+ if (ba_num != region.count) {
+ dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n",
+ region.count, ba_num);
+ ret = -ENOBUFS;
+ goto done;
}
- eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
- if (!eq->buf_dma)
- return -ENOMEM;
- eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
- if (!eq->buf)
- goto err_kcalloc_buf;
-
- if (mhop_num == 2) {
- eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
- if (!eq->l1_dma)
- goto err_kcalloc_l1_dma;
-
- eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
- if (!eq->bt_l1)
- goto err_kcalloc_bt_l1;
- }
-
- /* alloc L0 BT */
- eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
- if (!eq->bt_l0)
- goto err_dma_alloc_l0;
-
- if (mhop_num == 1) {
- if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
- dev_err(dev, "ba_num %d is too large for 1 hop\n",
- ba_num);
-
- /* alloc buf */
- for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
- if (eq_buf_cnt + 1 < ba_num) {
- size = buf_chk_sz;
- } else {
- eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
- size = (eq->entries - eqe_alloc) * eq->eqe_size;
- }
- eq->buf[i] = dma_alloc_coherent(dev, size,
- &(eq->buf_dma[i]),
- GFP_KERNEL);
- if (!eq->buf[i])
- goto err_dma_alloc_buf;
+ hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz,
+ page_shift);
+ ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, &region, 1);
+ if (ret)
+ dev_err(hr_dev->dev, "mtr attach error for eqe\n");
- *(eq->bt_l0 + i) = eq->buf_dma[i];
+ goto done;
- eq_buf_cnt++;
- if (eq_buf_cnt >= ba_num)
- break;
- }
- eq->cur_eqe_ba = eq->buf_dma[0];
- if (ba_num > 1)
- eq->nxt_eqe_ba = eq->buf_dma[1];
-
- } else if (mhop_num == 2) {
- /* alloc L1 BT and buf */
- for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
- eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
- &(eq->l1_dma[i]),
- GFP_KERNEL);
- if (!eq->bt_l1[i])
- goto err_dma_alloc_l1;
- *(eq->bt_l0 + i) = eq->l1_dma[i];
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- idx = i * bt_chk_sz / BA_BYTE_LEN + j;
- if (eq_buf_cnt + 1 < ba_num) {
- size = buf_chk_sz;
- } else {
- eqe_alloc = (buf_chk_sz / eq->eqe_size)
- * idx;
- size = (eq->entries - eqe_alloc)
- * eq->eqe_size;
- }
- eq->buf[idx] = dma_alloc_coherent(dev, size,
- &(eq->buf_dma[idx]),
- GFP_KERNEL);
- if (!eq->buf[idx])
- goto err_dma_alloc_buf;
-
- *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
-
- eq_buf_cnt++;
- if (eq_buf_cnt >= ba_num) {
- eq_alloc_done = 1;
- break;
- }
- }
+ hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
+done:
+ hns_roce_free_buf_list(&buf_list, 1);
- if (eq_alloc_done)
- break;
- }
- eq->cur_eqe_ba = eq->buf_dma[0];
- if (ba_num > 1)
- eq->nxt_eqe_ba = eq->buf_dma[1];
- }
+ return ret;
+}
- eq->l0_last_num = i + 1;
- if (mhop_num == 2)
- eq->l1_last_num = j + 1;
+static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ struct hns_roce_buf *buf = &eq->buf;
+ bool is_mhop = false;
+ u32 page_shift;
+ u32 mhop_num;
+ u32 max_size;
+ int ret;
- return 0;
+ page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz;
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ if (!mhop_num) {
+ max_size = 1 << page_shift;
+ buf->size = max_size;
+ } else if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ max_size = eq->entries * eq->eqe_size;
+ buf->size = max_size;
+ } else {
+ max_size = 1 << page_shift;
+ buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size);
+ is_mhop = true;
+ }
-err_dma_alloc_l1:
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
- eq->bt_l0 = NULL;
- eq->l0_dma = 0;
- for (i -= 1; i >= 0; i--) {
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
- eq->l1_dma[i]);
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- idx = i * bt_chk_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
- eq->buf_dma[idx]);
- }
+ ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift);
+ if (ret) {
+ dev_err(hr_dev->dev, "alloc eq buf error\n");
+ return ret;
}
- goto err_dma_alloc_l0;
-
-err_dma_alloc_buf:
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
- eq->bt_l0 = NULL;
- eq->l0_dma = 0;
-
- if (mhop_num == 1)
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
- eq->buf_dma[i]);
- else if (mhop_num == 2) {
- record_i = i;
- record_j = j;
- for (; i >= 0; i--) {
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
- eq->l1_dma[i]);
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- if (i == record_i && j >= record_j)
- break;
-
- idx = i * bt_chk_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, buf_chk_sz,
- eq->buf[idx],
- eq->buf_dma[idx]);
- }
+
+ if (is_mhop) {
+ ret = map_eq_buf(hr_dev, eq, page_shift);
+ if (ret) {
+ dev_err(hr_dev->dev, "map roce buf error\n");
+ goto err_alloc;
}
}
-err_dma_alloc_l0:
- kfree(eq->bt_l1);
- eq->bt_l1 = NULL;
-
-err_kcalloc_bt_l1:
- kfree(eq->l1_dma);
- eq->l1_dma = NULL;
-
-err_kcalloc_l1_dma:
- kfree(eq->buf);
- eq->buf = NULL;
-
-err_kcalloc_buf:
- kfree(eq->buf_dma);
- eq->buf_dma = NULL;
-
- return -ENOMEM;
+ return 0;
+err_alloc:
+ hns_roce_buf_free(hr_dev, buf->size, buf);
+ return ret;
}
static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq,
unsigned int eq_cmd)
{
- struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
- u32 buf_chk_sz = 0;
int ret;
/* Allocate mailbox memory */
@@ -6081,38 +5502,17 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- if (!hr_dev->caps.eqe_hop_num) {
- buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
-
- eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
- GFP_KERNEL);
- if (!eq->buf_list) {
- ret = -ENOMEM;
- goto free_cmd_mbox;
- }
-
- eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
- &(eq->buf_list->map),
- GFP_KERNEL);
- if (!eq->buf_list->buf) {
- ret = -ENOMEM;
- goto err_alloc_buf;
- }
-
- } else {
- ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
- if (ret) {
- ret = -ENOMEM;
- goto free_cmd_mbox;
- }
+ ret = alloc_eq_buf(hr_dev, eq);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
}
-
hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
- dev_err(dev, "[mailbox cmd] create eqc failed.\n");
+ dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
goto err_cmd_mbox;
}
@@ -6121,16 +5521,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
return 0;
err_cmd_mbox:
- if (!hr_dev->caps.eqe_hop_num)
- dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
- eq->buf_list->map);
- else {
- hns_roce_mhop_free_eq(hr_dev, eq);
- goto free_cmd_mbox;
- }
-
-err_alloc_buf:
- kfree(eq->buf_list);
+ free_eq_buf(hr_dev, eq);
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -6292,8 +5683,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
goto err_request_irq_fail;
}
- hr_dev->irq_workq =
- create_singlethread_workqueue("hns_roce_irq_workqueue");
+ hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n");
ret = -ENOMEM;
@@ -6310,7 +5700,7 @@ err_request_irq_fail:
err_create_eq_fail:
for (i -= 1; i >= 0; i--)
- hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ free_eq_buf(hr_dev, &eq_table->eq[i]);
kfree(eq_table->eq);
return ret;
@@ -6332,7 +5722,7 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < eq_num; i++) {
hns_roce_v2_destroy_eqc(hr_dev, i);
- hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ free_eq_buf(hr_dev, &eq_table->eq[i]);
}
kfree(eq_table->eq);
@@ -6472,8 +5862,9 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(hr_dev->dev,
- "MODIFY SRQ Failed to cmd mailbox.\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when modifying SRQ, ret = %d\n",
+ ret);
return ret;
}
}
@@ -6499,7 +5890,9 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
HNS_ROCE_CMD_QUERY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
- dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying SRQ, ret = %d\n",
+ ret);
goto out;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 2a117ff6a6be..82dd9f6f4845 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -50,15 +50,14 @@
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ 0x100000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
-#define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
-#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
-#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
-#define HNS_ROCE_V2_MAX_SRQ_SGE_NUM 0x100
+#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
+#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
@@ -163,7 +162,7 @@ enum {
#define GID_LEN_V2 16
-#define HNS_ROCE_V2_CQE_QPN_MASK 0x3ffff
+#define HNS_ROCE_V2_CQE_QPN_MASK 0xfffff
enum {
HNS_ROCE_V2_WQE_OP_SEND = 0x0,
@@ -460,8 +459,8 @@ enum hns_roce_v2_qp_state {
HNS_ROCE_QP_ST_INIT,
HNS_ROCE_QP_ST_RTR,
HNS_ROCE_QP_ST_RTS,
- HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_SQD,
+ HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_ERR,
HNS_ROCE_QP_ST_SQ_DRAINING,
HNS_ROCE_QP_NUM_ST
@@ -1056,11 +1055,6 @@ struct hns_roce_v2_mpt_entry {
#define V2_DB_PARAMETER_SL_S 16
#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
-struct hns_roce_v2_cq_db {
- __le32 byte_4;
- __le32 parameter;
-};
-
#define V2_CQ_DB_BYTE_4_TAG_S 0
#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index b9898e71655a..176f34692f88 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -243,7 +243,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
/* Allocate MTT entry */
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
mtt->mtt_type);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 780c780fdb22..b10c50b8736e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -60,14 +60,12 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ib_dev = ibpd->device;
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd = to_hr_pd(ibpd);
int ret;
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) {
- dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
+ ibdev_err(ib_dev, "failed to alloc pd, ret = %d\n", ret);
return ret;
}
@@ -76,7 +74,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
- dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
+ ibdev_err(ib_dev, "failed to copy to udata\n");
return -EFAULT;
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 3257ad11be48..6317901c4b4f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -43,6 +43,45 @@
#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
+static void flush_work_handle(struct work_struct *work)
+{
+ struct hns_roce_work *flush_work = container_of(work,
+ struct hns_roce_work, work);
+ struct hns_roce_qp *hr_qp = container_of(flush_work,
+ struct hns_roce_qp, flush_work);
+ struct device *dev = flush_work->hr_dev->dev;
+ struct ib_qp_attr attr;
+ int attr_mask;
+ int ret;
+
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+
+ if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
+ ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
+ if (ret)
+ dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
+ ret);
+ }
+
+ /*
+ * make sure we signal QP destroy leg that flush QP was completed
+ * so that it can safely proceed ahead now and destroy QP
+ */
+ if (atomic_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+}
+
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_work *flush_work = &hr_qp->flush_work;
+
+ flush_work->hr_dev = hr_dev;
+ INIT_WORK(&flush_work->work, flush_work_handle);
+ atomic_inc(&hr_qp->refcount);
+ queue_work(hr_dev->irq_workq, &flush_work->work);
+}
+
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct device *dev = hr_dev->dev;
@@ -59,6 +98,15 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
return;
}
+ if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
+ (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
+ qp->state = IB_QPS_ERR;
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+ }
+
qp->event(qp, (enum hns_roce_event)event_type);
if (atomic_dec_and_test(&qp->refcount))
@@ -108,15 +156,34 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
}
}
-static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
- int align, unsigned long *base)
+static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
- struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ unsigned long num = 0;
+ int ret;
- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
- base) ?
- -ENOMEM :
- 0;
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ /* when hw version is v1, the sqpn is allocated */
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
+ else
+ num = 1;
+
+ hr_qp->doorbell_qpn = 1;
+ } else {
+ ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
+ 1, 1, &num);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
+ return -ENOMEM;
+ }
+
+ hr_qp->doorbell_qpn = (u32)num;
+ }
+
+ hr_qp->qpn = num;
+
+ return 0;
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -139,50 +206,75 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
}
}
-static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
- struct hns_roce_qp *hr_qp)
+static void add_qp_to_list(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_cq *send_cq, struct ib_cq *recv_cq)
+{
+ struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
+ unsigned long flags;
+
+ hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
+ hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
+
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
+
+ list_add_tail(&hr_qp->node, &hr_dev->qp_list);
+ if (hr_send_cq)
+ list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
+ if (hr_recv_cq)
+ list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
+
+ hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
+static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
{
struct xarray *xa = &hr_dev->qp_table_xa;
int ret;
- if (!qpn)
+ if (!hr_qp->qpn)
return -EINVAL;
- hr_qp->qpn = qpn;
- atomic_set(&hr_qp->refcount, 1);
- init_completion(&hr_qp->free);
-
- ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1),
- hr_qp, GFP_KERNEL));
+ ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
if (ret)
- dev_err(hr_dev->dev, "QPC xa_store failed\n");
+ dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
+ else
+ /* add QP to device's QP list for softwc */
+ add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
+ init_attr->recv_cq);
return ret;
}
-static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
- struct hns_roce_qp *hr_qp)
+static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = hr_dev->dev;
int ret;
- if (!qpn)
+ if (!hr_qp->qpn)
return -EINVAL;
- hr_qp->qpn = qpn;
+ /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
+ hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ return 0;
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "QPC table get failed\n");
+ dev_err(dev, "Failed to get QPC table\n");
goto err_out;
}
/* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "IRRL table get failed\n");
+ dev_err(dev, "Failed to get IRRL table\n");
goto err_put_qp;
}
@@ -191,7 +283,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "TRRL table get failed\n");
+ dev_err(dev, "Failed to get TRRL table\n");
goto err_put_irrl;
}
}
@@ -201,22 +293,13 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "SCC CTX table get failed\n");
+ dev_err(dev, "Failed to get SCC CTX table\n");
goto err_put_trrl;
}
}
- ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
- if (ret)
- goto err_put_sccc;
-
return 0;
-err_put_sccc:
- if (hr_dev->caps.sccc_entry_sz)
- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
- hr_qp->qpn);
-
err_put_trrl:
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
@@ -236,88 +319,84 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
struct xarray *xa = &hr_dev->qp_table_xa;
unsigned long flags;
+ list_del(&hr_qp->node);
+ list_del(&hr_qp->sq_node);
+ list_del(&hr_qp->rq_node);
+
xa_lock_irqsave(xa, flags);
__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
xa_unlock_irqrestore(xa, flags);
}
-void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- if (atomic_dec_and_test(&hr_qp->refcount))
- complete(&hr_qp->free);
- wait_for_completion(&hr_qp->free);
+ /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
+ hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ return;
- if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
- if (hr_dev->caps.trrl_entry_sz)
- hns_roce_table_put(hr_dev, &qp_table->trrl_table,
- hr_qp->qpn);
- hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
- }
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
}
-void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
- int cnt)
+static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- if (base_qpn < hr_dev->caps.reserved_qps)
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ return;
+
+ if (hr_qp->qpn < hr_dev->caps.reserved_qps)
return;
- hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
+ hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
}
-static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
+static int set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, bool is_user, int has_rq,
struct hns_roce_qp *hr_qp)
{
- struct device *dev = hr_dev->dev;
u32 max_cnt;
- /* Check the validity of QP support capacity */
- if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
- cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
- dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
- cap->max_recv_wr, cap->max_recv_sge);
- return -EINVAL;
- }
-
/* If srq exist, set zero for relative number of rq */
if (!has_rq) {
hr_qp->rq.wqe_cnt = 0;
hr_qp->rq.max_gs = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
- } else {
- if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
- dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
- return -EINVAL;
- }
- if (hr_dev->caps.min_wqes)
- max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
- else
- max_cnt = cap->max_recv_wr;
+ return 0;
+ }
- hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
+ /* Check the validity of QP support capacity */
+ if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
+ cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
+ ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
+ cap->max_recv_wr, cap->max_recv_sge);
+ return -EINVAL;
+ }
- if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
- return -EINVAL;
- }
+ max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
- max_cnt = max(1U, cap->max_recv_sge);
- hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
- if (hr_dev->caps.max_rq_sg <= 2)
- hr_qp->rq.wqe_shift =
- ilog2(hr_dev->caps.max_rq_desc_sz);
- else
- hr_qp->rq.wqe_shift =
- ilog2(hr_dev->caps.max_rq_desc_sz
- * hr_qp->rq.max_gs);
+ hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
+ if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
+ ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
+ cap->max_recv_wr);
+ return -EINVAL;
}
+ max_cnt = max(1U, cap->max_recv_sge);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
+
+ if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+ else
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
+
cap->max_recv_wr = hr_qp->rq.wqe_cnt;
cap->max_recv_sge = hr_qp->rq.max_gs;
@@ -334,12 +413,12 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
/* Sanity check SQ size before proceeding */
if (ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
- ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
+ ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n");
return -EINVAL;
}
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
- ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n",
+ ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n",
cap->max_send_sge);
return -EINVAL;
}
@@ -347,10 +426,9 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_ib_create_qp *ucmd)
+static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
{
u32 ex_sge_num;
u32 page_size;
@@ -363,27 +441,28 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
- ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
+ ibdev_err(&hr_dev->ib_dev, "Failed to check user SQ size limit\n");
return ret;
}
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->caps.max_sq_sg <= 2)
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
- if (hr_qp->sq.max_gs > 2)
+ if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
- if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
+ if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- dev_err(hr_dev->dev,
- "The extended sge cnt error! sge_cnt=%d\n",
- hr_qp->sge.sge_cnt);
+ ibdev_err(&hr_dev->ib_dev,
+ "Failed to check extended SGE size limit %d\n",
+ hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
@@ -392,7 +471,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */
- if (hr_dev->caps.max_sq_sg <= 2) {
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
round_up((hr_qp->sq.wqe_cnt <<
@@ -492,30 +571,6 @@ static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
return region_cnt;
}
-static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
- struct hns_roce_buf_region *regions,
- int region_cnt)
-{
- int bt_pg_shift;
- int ba_num;
- int ret;
-
- bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz;
-
- /* all root ba entries must in one bt page */
- do {
- ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN;
- ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt,
- ba_num);
- if (ret <= ba_num)
- break;
-
- bt_pg_shift++;
- } while (ret > ba_num);
-
- return bt_pg_shift - PAGE_SHIFT;
-}
-
static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
@@ -528,13 +583,15 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
}
/* ud sqwqe's sge use extend sge */
- if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
+ hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4;
}
- if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+ if (hr_qp->sq.max_gs > 2 &&
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
@@ -545,46 +602,43 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap,
- struct hns_roce_qp *hr_qp)
+static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
{
- struct device *dev = hr_dev->dev;
u32 page_size;
u32 max_cnt;
int size;
int ret;
- if (cap->max_send_wr > hr_dev->caps.max_wqes ||
+ if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
- dev_err(dev, "SQ WR or sge or inline data error!\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
- if (hr_dev->caps.min_wqes)
- max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
- else
- max_cnt = cap->max_send_wr;
+ max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->caps.max_sq_sg <= 2)
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
ret = set_extend_sge_param(hr_dev, hr_qp);
if (ret) {
- dev_err(dev, "set extend sge parameters fail\n");
+ ibdev_err(&hr_dev->ib_dev, "set extend sge parameters fail\n");
return ret;
}
@@ -593,7 +647,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.offset = 0;
size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
- if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+ if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
@@ -677,362 +731,449 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
kfree(hr_qp->rq_inl_buf.wqe_list);
}
-static void add_qp_to_list(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct ib_cq *send_cq, struct ib_cq *recv_cq)
-{
- struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
- unsigned long flags;
-
- hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
- hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
-
- spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
- hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
-
- list_add_tail(&hr_qp->node, &hr_dev->qp_list);
- if (hr_send_cq)
- list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
- if (hr_recv_cq)
- list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
-
- hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
- spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
-}
-
-static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
- struct ib_pd *ib_pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, unsigned long sqpn,
- struct hns_roce_qp *hr_qp)
+static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ u32 page_shift, bool is_user)
{
- dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
- struct device *dev = hr_dev->dev;
- struct hns_roce_ib_create_qp ucmd;
- struct hns_roce_ib_create_qp_resp resp = {};
- struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
+/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */
+#define HNS_ROCE_WQE_REGION_MAX 3
+ struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {};
+ dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
- unsigned long qpn = 0;
- u32 page_shift;
+ int region_count;
int buf_count;
int ret;
int i;
- mutex_init(&hr_qp->mutex);
- spin_lock_init(&hr_qp->sq.lock);
- spin_lock_init(&hr_qp->rq.lock);
-
- hr_qp->state = IB_QPS_RESET;
-
- hr_qp->ibqp.qp_type = init_attr->qp_type;
-
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
- else
- hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+ region_count = split_wqe_buf_region(hr_dev, hr_qp, regions,
+ ARRAY_SIZE(regions), page_shift);
- ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
- hns_roce_qp_has_rq(init_attr), hr_qp);
+ /* alloc a tmp list to store WQE buffers address */
+ ret = hns_roce_alloc_buf_list(regions, buf_list, region_count);
if (ret) {
- dev_err(dev, "hns_roce_set_rq_size failed\n");
- goto err_out;
+ ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
+ return ret;
}
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hns_roce_qp_has_rq(init_attr)) {
- ret = alloc_rq_inline_buf(hr_qp, init_attr);
- if (ret) {
- dev_err(dev, "allocate receive inline buffer failed\n");
- goto err_out;
+ for (i = 0; i < region_count; i++) {
+ r = &regions[i];
+ if (is_user)
+ buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
+ r->count, r->offset, hr_qp->umem,
+ page_shift);
+ else
+ buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i],
+ r->count, r->offset, &hr_qp->hr_buf);
+
+ if (buf_count != r->count) {
+ ibdev_err(ibdev, "Failed to get %s WQE buf, expect %d = %d.\n",
+ is_user ? "user" : "kernel",
+ r->count, buf_count);
+ ret = -ENOBUFS;
+ goto done;
}
}
- page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
- if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- dev_err(dev, "ib_copy_from_udata error for create qp\n");
- ret = -EFAULT;
- goto err_alloc_rq_inline_buf;
- }
+ hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz;
+ hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
+ page_shift);
+ ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions,
+ region_count);
+ if (ret)
+ ibdev_err(ibdev, "Failed to attach WQE's mtr\n");
+
+ goto done;
- ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
- &ucmd);
+ hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
+done:
+ hns_roce_free_buf_list(buf_list, region_count);
+
+ return ret;
+}
+
+static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, unsigned long addr)
+{
+ u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ bool is_rq_buf_inline;
+ int ret;
+
+ is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+ hns_roce_qp_has_rq(init_attr);
+ if (is_rq_buf_inline) {
+ ret = alloc_rq_inline_buf(hr_qp, init_attr);
if (ret) {
- dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
- goto err_alloc_rq_inline_buf;
+ ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n");
+ return ret;
}
+ }
- hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr,
- hr_qp->buff_size, 0);
+ if (udata) {
+ hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
- dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
- goto err_alloc_rq_inline_buf;
- }
- hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
- hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
- page_shift);
- ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
- hr_qp->region_cnt);
- if (ret) {
- dev_err(dev, "alloc buf_list error for create qp\n");
- goto err_alloc_list;
+ goto err_inline;
}
+ } else {
+ ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
+ (1 << page_shift) * 2,
+ &hr_qp->hr_buf, page_shift);
+ if (ret)
+ goto err_inline;
+ }
- for (i = 0; i < hr_qp->region_cnt; i++) {
- r = &hr_qp->regions[i];
- buf_count = hns_roce_get_umem_bufs(hr_dev,
- buf_list[i], r->count, r->offset,
- hr_qp->umem, page_shift);
- if (buf_count != r->count) {
- dev_err(dev,
- "get umem buf err, expect %d,ret %d.\n",
- r->count, buf_count);
- ret = -ENOBUFS;
- goto err_get_bufs;
- }
- }
+ ret = map_wqe_buf(hr_dev, hr_qp, page_shift, udata);
+ if (ret)
+ goto err_alloc;
+
+ return 0;
+
+err_inline:
+ if (is_rq_buf_inline)
+ free_rq_inline_buf(hr_qp);
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
- (udata->inlen >= sizeof(ucmd)) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_sq(init_attr)) {
- ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr,
+err_alloc:
+ if (udata) {
+ ib_umem_release(hr_qp->umem);
+ hr_qp->umem = NULL;
+ } else {
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ }
+
+ ibdev_err(ibdev, "Failed to alloc WQE buffer, ret %d.\n", ret);
+
+ return ret;
+}
+
+static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
+ if (hr_qp->umem) {
+ ib_umem_release(hr_qp->umem);
+ hr_qp->umem = NULL;
+ }
+
+ if (hr_qp->hr_buf.nbufs > 0)
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+ hr_qp->rq.wqe_cnt)
+ free_rq_inline_buf(hr_qp);
+}
+
+static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
+ hns_roce_qp_has_sq(init_attr) &&
+ udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
+}
+
+static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
+ hns_roce_qp_has_rq(init_attr));
+}
+
+static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ hns_roce_qp_has_rq(init_attr));
+}
+
+static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ if (udata) {
+ if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
+ ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
&hr_qp->sdb);
if (ret) {
- dev_err(dev, "sq record doorbell map failed!\n");
- goto err_get_bufs;
+ ibdev_err(ibdev,
+ "Failed to map user SQ doorbell\n");
+ goto err_out;
}
-
- /* indicate kernel supports sq record db */
- resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
hr_qp->sdb_en = 1;
+ resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
}
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_rq(init_attr)) {
- ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr,
+ if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
+ ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
&hr_qp->rdb);
if (ret) {
- dev_err(dev, "rq record doorbell map failed!\n");
- goto err_sq_dbmap;
+ ibdev_err(ibdev,
+ "Failed to map user RQ doorbell\n");
+ goto err_sdb;
}
-
- /* indicate kernel supports rq record db */
- resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
hr_qp->rdb_en = 1;
+ resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
}
} else {
- if (init_attr->create_flags &
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- dev_err(dev, "init_attr->create_flags error!\n");
- ret = -EINVAL;
- goto err_alloc_rq_inline_buf;
- }
-
- if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
- dev_err(dev, "init_attr->create_flags error!\n");
- ret = -EINVAL;
- goto err_alloc_rq_inline_buf;
- }
-
- /* Set SQ size */
- ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
- hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
- goto err_alloc_rq_inline_buf;
- }
-
/* QP doorbell register address */
hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- hns_roce_qp_has_rq(init_attr)) {
+ if (kernel_qp_has_rdb(hr_dev, init_attr)) {
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
if (ret) {
- dev_err(dev, "rq record doorbell alloc failed!\n");
- goto err_alloc_rq_inline_buf;
+ ibdev_err(ibdev,
+ "Failed to alloc kernel RQ doorbell\n");
+ goto err_out;
}
*hr_qp->rdb.db_record = 0;
hr_qp->rdb_en = 1;
}
+ }
- /* Allocate QP buf */
- if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
- (1 << page_shift) * 2,
- &hr_qp->hr_buf, page_shift)) {
- dev_err(dev, "hns_roce_buf_alloc error!\n");
+ return 0;
+err_sdb:
+ if (udata && hr_qp->sdb_en)
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+err_out:
+ return ret;
+}
+
+static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
+
+ if (udata) {
+ if (hr_qp->rdb_en)
+ hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
+ if (hr_qp->sdb_en)
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+ } else {
+ if (hr_qp->rdb_en)
+ hns_roce_free_db(hr_dev, &hr_qp->rdb);
+ }
+}
+
+static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 *sq_wrid = NULL;
+ u64 *rq_wrid = NULL;
+ int ret;
+
+ sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(sq_wrid)) {
+ ibdev_err(ibdev, "Failed to alloc SQ wrid\n");
+ return -ENOMEM;
+ }
+
+ if (hr_qp->rq.wqe_cnt) {
+ rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(rq_wrid)) {
+ ibdev_err(ibdev, "Failed to alloc RQ wrid\n");
ret = -ENOMEM;
- goto err_db;
- }
- hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
- hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
- page_shift);
- ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
- hr_qp->region_cnt);
- if (ret) {
- dev_err(dev, "alloc buf_list error for create qp!\n");
- goto err_alloc_list;
+ goto err_sq;
}
+ }
- for (i = 0; i < hr_qp->region_cnt; i++) {
- r = &hr_qp->regions[i];
- buf_count = hns_roce_get_kmem_bufs(hr_dev,
- buf_list[i], r->count, r->offset,
- &hr_qp->hr_buf);
- if (buf_count != r->count) {
- dev_err(dev,
- "get kmem buf err, expect %d,ret %d.\n",
- r->count, buf_count);
- ret = -ENOBUFS;
- goto err_get_bufs;
- }
+ hr_qp->sq.wrid = sq_wrid;
+ hr_qp->rq.wrid = rq_wrid;
+ return 0;
+err_sq:
+ kfree(sq_wrid);
+
+ return ret;
+}
+
+static void free_kernel_wrid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ kfree(hr_qp->rq.wrid);
+ kfree(hr_qp->sq.wrid);
+}
+
+static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ hr_qp->ibqp.qp_type = init_attr->qp_type;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
+ else
+ hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+
+ ret = set_rq_size(hr_dev, &init_attr->cap, udata,
+ hns_roce_qp_has_rq(init_attr), hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to set user RQ size\n");
+ return ret;
+ }
+
+ if (udata) {
+ if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) {
+ ibdev_err(ibdev, "Failed to copy QP ucmd\n");
+ return -EFAULT;
}
- hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
- GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) {
- ret = -ENOMEM;
- goto err_get_bufs;
+ ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
+ if (ret)
+ ibdev_err(ibdev, "Failed to set user SQ size\n");
+ } else {
+ if (init_attr->create_flags &
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
+ ibdev_err(ibdev, "Failed to check multicast loopback\n");
+ return -EINVAL;
}
- if (hr_qp->rq.wqe_cnt) {
- hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
- GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) {
- ret = -ENOMEM;
- goto err_sq_wrid;
- }
+ if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
+ return -EINVAL;
}
+
+ ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
+ if (ret)
+ ibdev_err(ibdev, "Failed to set kernel SQ size\n");
}
- if (sqpn) {
- qpn = sqpn;
- } else {
- /* Get QPN */
- ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
+ return ret;
+}
+
+static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_ib_create_qp_resp resp = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_qp ucmd;
+ int ret;
+
+ mutex_init(&hr_qp->mutex);
+ spin_lock_init(&hr_qp->sq.lock);
+ spin_lock_init(&hr_qp->rq.lock);
+
+ hr_qp->state = IB_QPS_RESET;
+ hr_qp->flush_flag = 0;
+
+ ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to set QP param\n");
+ return ret;
+ }
+
+ if (!udata) {
+ ret = alloc_kernel_wrid(hr_dev, hr_qp);
if (ret) {
- dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
- goto err_wrid;
+ ibdev_err(ibdev, "Failed to alloc wrid\n");
+ return ret;
}
}
- hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions,
- hr_qp->region_cnt);
- hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list,
- hr_qp->regions, hr_qp->region_cnt);
+ ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
if (ret) {
- dev_err(dev, "mtr attach error for create qp\n");
- goto err_mtr;
+ ibdev_err(ibdev, "Failed to alloc QP doorbell\n");
+ goto err_wrid;
}
- if (init_attr->qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- /* In v1 engine, GSI QP context in RoCE engine's register */
- ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_qp_alloc failed!\n");
- goto err_qpn;
- }
- } else {
- ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_qp_alloc failed!\n");
- goto err_qpn;
- }
+ ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc QP buffer\n");
+ goto err_db;
}
- if (sqpn)
- hr_qp->doorbell_qpn = 1;
- else
- hr_qp->doorbell_qpn = (u32)hr_qp->qpn;
+ ret = alloc_qpn(hr_dev, hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc QPN\n");
+ goto err_buf;
+ }
+
+ ret = alloc_qpc(hr_dev, hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc QP context\n");
+ goto err_qpn;
+ }
+
+ ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to store QP\n");
+ goto err_qpc;
+ }
if (udata) {
ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)));
- if (ret)
- goto err_qp;
+ if (ret) {
+ ibdev_err(ibdev, "copy qp resp failed!\n");
+ goto err_store;
+ }
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret)
- goto err_qp;
+ goto err_store;
}
+ hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->event = hns_roce_ib_qp_event;
-
- add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq);
-
- hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
+ atomic_set(&hr_qp->refcount, 1);
+ init_completion(&hr_qp->free);
return 0;
-err_qp:
- if (init_attr->qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hns_roce_qp_remove(hr_dev, hr_qp);
- else
- hns_roce_qp_free(hr_dev, hr_qp);
-
+err_store:
+ hns_roce_qp_remove(hr_dev, hr_qp);
+err_qpc:
+ free_qpc(hr_dev, hr_qp);
err_qpn:
- if (!sqpn)
- hns_roce_release_range_qp(hr_dev, qpn, 1);
-
-err_mtr:
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-
+ free_qpn(hr_dev, hr_qp);
+err_buf:
+ free_qp_buf(hr_dev, hr_qp);
+err_db:
+ free_qp_db(hr_dev, hr_qp, udata);
err_wrid:
- if (udata) {
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_rq(init_attr))
- hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
- } else {
- if (hr_qp->rq.wqe_cnt)
- kfree(hr_qp->rq.wrid);
- }
-
-err_sq_dbmap:
- if (udata)
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
- (udata->inlen >= sizeof(ucmd)) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_sq(init_attr))
- hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
-
-err_sq_wrid:
- if (!udata)
- kfree(hr_qp->sq.wrid);
-
-err_get_bufs:
- hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
-
-err_alloc_list:
- if (!hr_qp->umem)
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- ib_umem_release(hr_qp->umem);
+ free_kernel_wrid(hr_dev, hr_qp);
+ return ret;
+}
-err_db:
- if (!udata && hns_roce_qp_has_rq(init_attr) &&
- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
- hns_roce_free_db(hr_dev, &hr_qp->rdb);
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ if (atomic_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+ wait_for_completion(&hr_qp->free);
-err_alloc_rq_inline_buf:
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hns_roce_qp_has_rq(init_attr))
- free_rq_inline_buf(hr_qp);
+ free_qpc(hr_dev, hr_qp);
+ free_qpn(hr_dev, hr_qp);
+ free_qp_buf(hr_dev, hr_qp);
+ free_kernel_wrid(hr_dev, hr_qp);
+ free_qp_db(hr_dev, hr_qp, udata);
-err_out:
- return ret;
+ kfree(hr_qp);
}
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
@@ -1050,7 +1191,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
if (!hr_qp)
return ERR_PTR(-ENOMEM);
- ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
+ ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp);
if (ret) {
ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
@@ -1059,8 +1200,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(ret);
}
- hr_qp->ibqp.qp_num = hr_qp->qpn;
-
break;
}
case IB_QPT_GSI: {
@@ -1077,15 +1216,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- /* when hw version is v1, the sqpn is allocated */
- if (hr_dev->caps.max_sq_sg <= 2)
- hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
- else
- hr_qp->ibqp.qp_num = 1;
-
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
- hr_qp->ibqp.qp_num, hr_qp);
+ hr_qp);
if (ret) {
ibdev_err(ibdev, "Create GSI QP failed!\n");
kfree(hr_qp);
@@ -1097,7 +1229,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
default:{
ibdev_err(ibdev, "not support QP type %d\n",
init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
}
@@ -1230,11 +1362,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- if (hr_dev->caps.min_wqes) {
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
ret = -EPERM;
ibdev_err(&hr_dev->ib_dev,
- "cur_state=%d new_state=%d\n", cur_state,
- new_state);
+ "RST2RST state is not supported\n");
} else {
ret = 0;
}
@@ -1306,17 +1437,17 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
}
-void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
-void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
-void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
(n << hr_qp->sge.sge_shift));
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index c6d5f06f9cde..5b3dd1a337d4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -381,7 +381,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
srq->max_gs = init_attr->attr.max_sge;
- srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
+ srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size);
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 8feec35f95a7..3c62c9327a9c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -67,7 +67,7 @@
#include "i40iw_user.h"
#include "i40iw_puda.h"
-#define I40IW_FW_VERSION 2
+#define I40IW_FW_VER_DEFAULT 2
#define I40IW_HW_VERSION 2
#define I40IW_ARP_ADD 1
@@ -326,6 +326,26 @@ struct i40iw_handler {
};
/**
+ * i40iw_fw_major_ver - get firmware major version
+ * @dev: iwarp device
+ **/
+static inline u64 i40iw_fw_major_ver(struct i40iw_sc_dev *dev)
+{
+ return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
+ I40IW_FW_VER_MAJOR);
+}
+
+/**
+ * i40iw_fw_minor_ver - get firmware minor version
+ * @dev: iwarp device
+ **/
+static inline u64 i40iw_fw_minor_ver(struct i40iw_sc_dev *dev)
+{
+ return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
+ I40IW_FW_VER_MINOR);
+}
+
+/**
* to_iwdev - get device
* @ibdev: ib device
**/
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index bb78d3280acc..fa7a5ff498c7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
struct rtable *rt;
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev = iwdev->netdev;
__be32 dst_ipaddr = htonl(dst_ip);
__be32 src_ipaddr = htonl(src_ip);
@@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
return rc;
}
- if (netif_is_bond_slave(netdev))
- netdev = netdev_master_upper_dev_get(netdev);
-
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
@@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
{
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev = iwdev->netdev;
struct dst_entry *dst;
struct sockaddr_in6 dst_addr;
struct sockaddr_in6 src_addr;
@@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
return rc;
}
- if (netif_is_bond_slave(netdev))
- netdev = netdev_master_upper_dev_get(netdev);
-
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
rcu_read_lock();
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 66dc1ba03389..6e43e4d730f4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -85,7 +85,7 @@ struct ietf_mpa_v1 {
u8 flags;
u8 rev;
__be16 priv_data_len;
- u8 priv_data[0];
+ u8 priv_data[];
};
#define ietf_mpa_req_resp_frame ietf_mpa_frame
@@ -101,7 +101,7 @@ struct ietf_mpa_v2 {
u8 rev;
__be16 priv_data_len;
struct ietf_rtr_msg rtr_msg;
- u8 priv_data[0];
+ u8 priv_data[];
};
struct i40iw_cm_node;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 4d841a3c68f3..688f19667221 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1022,6 +1022,95 @@ static enum i40iw_status_code i40iw_sc_commit_fpm_values(
}
/**
+ * i40iw_sc_query_rdma_features_done - poll cqp for query features done
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code
+i40iw_sc_query_rdma_features_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(
+ cqp, I40IW_CQP_OP_QUERY_RDMA_FEATURES, NULL);
+}
+
+/**
+ * i40iw_sc_query_rdma_features - query rdma features
+ * @cqp: struct for cqp hw
+ * @feat_mem: holds PA for HW to use
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code
+i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
+ struct i40iw_dma_mem *feat_mem, u64 scratch)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 32, feat_mem->pa);
+
+ header = LS_64(I40IW_CQP_OP_QUERY_RDMA_FEATURES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | feat_mem->size;
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY RDMA FEATURES WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * i40iw_get_rdma_features - get RDMA features
+ * @dev - sc device struct
+ */
+enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev)
+{
+ enum i40iw_status_code ret_code;
+ struct i40iw_dma_mem feat_buf;
+ u64 temp;
+ u16 byte_idx, feat_type, feat_cnt;
+
+ ret_code = i40iw_allocate_dma_mem(dev->hw, &feat_buf,
+ I40IW_FEATURE_BUF_SIZE,
+ I40IW_FEATURE_BUF_ALIGNMENT);
+
+ if (ret_code)
+ return I40IW_ERR_NO_MEMORY;
+
+ ret_code = i40iw_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (!ret_code)
+ ret_code = i40iw_sc_query_rdma_features_done(dev->cqp);
+
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = RS_64(temp, I40IW_FEATURE_CNT);
+ if (feat_cnt < I40IW_MAX_FEATURES) {
+ ret_code = I40IW_ERR_INVALID_FEAT_CNT;
+ goto exit;
+ } else if (feat_cnt > I40IW_MAX_FEATURES) {
+ i40iw_debug(dev, I40IW_DEBUG_CQP,
+ "features buf size insufficient\n");
+ }
+
+ for (byte_idx = 0, feat_type = 0; feat_type < I40IW_MAX_FEATURES;
+ feat_type++, byte_idx += 8) {
+ get_64bit_val((u64 *)feat_buf.va, byte_idx, &temp);
+ dev->feature_info[feat_type] = RS_64(temp, I40IW_FEATURE_INFO);
+ }
+exit:
+ i40iw_free_dma_mem(dev->hw, &feat_buf);
+
+ return ret_code;
+}
+
+/**
* i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
* @cqp: struct for cqp hw
*/
@@ -4265,6 +4354,13 @@ static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
true,
I40IW_CQP_WAIT_EVENT);
break;
+ case OP_QUERY_RDMA_FEATURES:
+ values_mem.pa = pcmdinfo->in.u.query_rdma_features.cap_pa;
+ values_mem.va = pcmdinfo->in.u.query_rdma_features.cap_va;
+ status = i40iw_sc_query_rdma_features(
+ pcmdinfo->in.u.query_rdma_features.cqp, &values_mem,
+ pcmdinfo->in.u.query_rdma_features.scratch);
+ break;
default:
status = I40IW_NOT_SUPPORTED;
break;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 6ddaeec87d2f..e8367d67575d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -403,7 +403,7 @@
#define I40IW_CQP_OP_MANAGE_ARP 0x0f
#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10
#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11
-#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12
+#define I40IW_CQP_OP_QUERY_RDMA_FEATURES 0x12
#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13
#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14
#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
@@ -431,6 +431,24 @@
#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d
+#define I40IW_FEATURE_BUF_SIZE (8 * I40IW_MAX_FEATURES)
+
+#define I40IW_FW_VER_MINOR_SHIFT 0
+#define I40IW_FW_VER_MINOR_MASK \
+ (0xffffULL << I40IW_FW_VER_MINOR_SHIFT)
+
+#define I40IW_FW_VER_MAJOR_SHIFT 16
+#define I40IW_FW_VER_MAJOR_MASK \
+ (0xffffULL << I40IW_FW_VER_MAJOR_SHIFT)
+
+#define I40IW_FEATURE_INFO_SHIFT 0
+#define I40IW_FEATURE_INFO_MASK \
+ (0xffffULL << I40IW_FEATURE_INFO_SHIFT)
+
+#define I40IW_FEATURE_CNT_SHIFT 32
+#define I40IW_FEATURE_CNT_MASK \
+ (0xffffULL << I40IW_FEATURE_CNT_SHIFT)
+
#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16
#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT)
@@ -1529,7 +1547,8 @@ enum i40iw_alignment {
I40IW_AEQ_ALIGNMENT = 0x100,
I40IW_CEQ_ALIGNMENT = 0x100,
I40IW_CQ0_ALIGNMENT = 0x100,
- I40IW_SD_BUF_ALIGNMENT = 0x80
+ I40IW_SD_BUF_ALIGNMENT = 0x80,
+ I40IW_FEATURE_BUF_ALIGNMENT = 0x8
};
#define I40IW_WQE_SIZE_64 64
@@ -1732,6 +1751,7 @@ enum i40iw_alignment {
#define OP_REQUESTED_COMMANDS 31
#define OP_COMPLETED_COMMANDS 32
#define OP_GEN_AE 33
-#define OP_SIZE_CQP_STAT_ARRAY 34
+#define OP_QUERY_RDMA_FEATURES 34
+#define OP_SIZE_CQP_STAT_ARRAY 35
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 55a1fbf0e670..ae8b97c30665 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
int arp_index;
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
- if (arp_index == -1)
+ if (arp_index < 0)
return;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 238614370927..9c96ece5e7f3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1212,22 +1212,19 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
{
struct net_device *dev;
struct in_device *idev;
- bool got_lock = true;
u32 ip_addr;
- if (!rtnl_trylock())
- got_lock = false;
-
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
(rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
- (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ (dev == iwdev->netdev)) && (READ_ONCE(dev->flags) & IFF_UP)) {
const struct in_ifaddr *ifa;
- idev = in_dev_get(dev);
+ idev = __in_dev_get_rcu(dev);
if (!idev)
continue;
- in_dev_for_each_ifa_rtnl(ifa, idev) {
+ in_dev_for_each_ifa_rcu(ifa, idev) {
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
"IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
@@ -1239,12 +1236,9 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
true,
I40IW_ARP_ADD);
}
-
- in_dev_put(idev);
}
}
- if (got_lock)
- rtnl_unlock();
+ rcu_read_unlock();
}
/**
@@ -1689,6 +1683,12 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_setup_ceqs(iwdev, ldev);
if (status)
break;
+
+ status = i40iw_get_rdma_features(dev);
+ if (status)
+ dev->feature_info[I40IW_FEATURE_FW_INFO] =
+ I40IW_FW_VER_DEFAULT;
+
iwdev->init_state = CEQ_CREATED;
status = i40iw_initialize_hw_resources(iwdev);
if (status)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index 11d3a2a72100..4c429567bbb4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -105,6 +105,7 @@ enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *
bool poll_registers);
enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
+enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev);
void free_sd_mem(struct i40iw_sc_dev *dev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index f7013f11d808..d1c5855bd8c3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -95,7 +95,8 @@ enum i40iw_status_code {
I40IW_ERR_INVALID_MAC_ADDR = -65,
I40IW_ERR_BAD_STAG = -66,
I40IW_ERR_CQ_COMPL_ERROR = -67,
- I40IW_ERR_QUEUE_DESTROYED = -68
+ I40IW_ERR_QUEUE_DESTROYED = -68,
+ I40IW_ERR_INVALID_FEAT_CNT = -69
};
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index adc8d2ec523d..54c323c40d96 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -234,6 +234,11 @@ enum i40iw_hw_stats_index_64b {
I40IW_HW_STAT_INDEX_MAX_64
};
+enum i40iw_feature_type {
+ I40IW_FEATURE_FW_INFO = 0,
+ I40IW_MAX_FEATURES
+};
+
struct i40iw_dev_hw_stats_offsets {
u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
@@ -501,6 +506,7 @@ struct i40iw_sc_dev {
const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
+ u64 feature_info[I40IW_MAX_FEATURES];
u32 debug_mask;
u8 hmc_fn_id;
bool is_pf;
@@ -1340,6 +1346,12 @@ struct cqp_info {
struct i40iw_sc_qp *qp;
u64 scratch;
} suspend_resume;
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ void *cap_va;
+ u64 cap_pa;
+ u64 scratch;
+ } query_rdma_features;
} u;
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index c335de91508f..1b6fb1380961 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -64,7 +64,8 @@ static int i40iw_query_device(struct ib_device *ibdev,
return -EINVAL;
memset(props, 0, sizeof(*props));
ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
- props->fw_ver = I40IW_FW_VERSION;
+ props->fw_ver = i40iw_fw_major_ver(&iwdev->sc_dev) << 32 |
+ i40iw_fw_minor_ver(&iwdev->sc_dev);
props->device_cap_flags = iwdev->device_cap_flags;
props->vendor_id = iwdev->ldev->pcidev->vendor;
props->vendor_part_id = iwdev->ldev->pcidev->device;
@@ -617,7 +618,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
if (init_attr->qp_type != IB_QPT_RC) {
- err_code = -EINVAL;
+ err_code = -EOPNOTSUPP;
goto error;
}
if (iwdev->push_mode)
@@ -2534,10 +2535,11 @@ static const char * const i40iw_hw_stat_names[] = {
static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
{
- u32 firmware_version = I40IW_FW_VERSION;
+ struct i40iw_device *iwdev = to_iwdev(dev);
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
- (firmware_version & 0x000000ff));
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%llu.%llu",
+ i40iw_fw_major_ver(&iwdev->sc_dev),
+ i40iw_fw_minor_ver(&iwdev->sc_dev));
}
/**
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 2f5d9b181848..275722cec8c6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -434,9 +434,6 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
return real_index;
}
-#define field_avail(type, fld, sz) (offsetof(type, fld) + \
- sizeof(((type *)0)->fld) <= (sz))
-
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -447,7 +444,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
int err;
int have_ib_ports;
struct mlx4_uverbs_ex_query_device cmd;
- struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
+ struct mlx4_uverbs_ex_query_device_resp resp = {};
struct mlx4_clock_params clock_params;
if (uhw->inlen) {
@@ -602,7 +599,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx4_wqe_data_seg);
}
- if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
+ if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
if (props->rss_caps.supported_qpts) {
resp.rss_caps.rx_hash_function =
MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
@@ -626,7 +623,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(resp.rss_caps);
}
- if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+ if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
if (dev->dev->caps.max_gso_sz &&
((mlx4_ib_port_link_layer(ibdev, 1) ==
IB_LINK_LAYER_ETHERNET) ||
@@ -1502,8 +1499,9 @@ static int __mlx4_ib_create_default_rules(
int i;
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
+ union ib_flow_spec ib_spec = {};
int ret;
- union ib_flow_spec ib_spec;
+
switch (pdefault_rules->rules_create_list[i]) {
case 0:
/* no rule */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 26425dd2d960..cf51e3cbd969 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1636,7 +1636,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
return &qp->ibqp;
@@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
int send_size;
int header_size;
int spc;
+ int err;
int i;
if (wr->wr.opcode != IB_WR_SEND)
@@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
sqp->ud_header.lrh.virtual_lane = 0;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
- ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ if (err)
+ return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
@@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
}
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
+ &pkey);
else
- ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
+ &pkey);
+ if (err)
+ return err;
+
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index d0a043ccbe58..2a334800f109 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -8,3 +8,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 8ba439fabf7f..de4da92b81a6 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -47,6 +47,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
"rp_byte_reset",
"rp_threshold",
"rp_ai_rate",
+ "rp_max_rate",
"rp_hai_rate",
"rp_min_dec_fac",
"rp_min_rate",
@@ -56,6 +57,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
"rp_rate_reduce_monitor_period",
"rp_initial_alpha_value",
"rp_gd",
+ "np_min_time_between_cnps",
"np_cnp_dscp",
"np_cnp_prio_mode",
"np_cnp_prio",
@@ -66,6 +68,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
#define MLX5_IB_RP_TIME_RESET_ATTR BIT(3)
#define MLX5_IB_RP_BYTE_RESET_ATTR BIT(4)
#define MLX5_IB_RP_THRESHOLD_ATTR BIT(5)
+#define MLX5_IB_RP_MAX_RATE_ATTR BIT(6)
#define MLX5_IB_RP_AI_RATE_ATTR BIT(7)
#define MLX5_IB_RP_HAI_RATE_ATTR BIT(8)
#define MLX5_IB_RP_MIN_DEC_FAC_ATTR BIT(9)
@@ -77,6 +80,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
#define MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR BIT(15)
#define MLX5_IB_RP_GD_ATTR BIT(16)
+#define MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR BIT(2)
#define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3)
#define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4)
@@ -111,6 +115,9 @@ static u32 mlx5_get_cc_param_val(void *field, int offset)
case MLX5_IB_DBG_CC_RP_AI_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_ai_rate);
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate);
case MLX5_IB_DBG_CC_RP_HAI_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_hai_rate);
@@ -138,6 +145,9 @@ static u32 mlx5_get_cc_param_val(void *field, int offset)
case MLX5_IB_DBG_CC_RP_GD:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_gd);
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps);
case MLX5_IB_DBG_CC_NP_CNP_DSCP:
return MLX5_GET(cong_control_r_roce_ecn_np, field,
cnp_dscp);
@@ -186,6 +196,11 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_ai_rate, var);
break;
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ *attr_mask |= MLX5_IB_RP_MAX_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate, var);
+ break;
case MLX5_IB_DBG_CC_RP_HAI_RATE:
*attr_mask |= MLX5_IB_RP_HAI_RATE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
@@ -231,6 +246,11 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_gd, var);
break;
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ *attr_mask |= MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps, var);
+ break;
case MLX5_IB_DBG_CC_NP_CNP_DSCP:
*attr_mask |= MLX5_IB_NP_CNP_DSCP_ATTR;
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_dscp, var);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 3dec3de903b7..146ba2966744 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -715,17 +715,19 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- ucmdlen = udata->inlen < sizeof(ucmd) ?
- (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
+ if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
+ return -EINVAL;
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
- if (ucmdlen == sizeof(ucmd) &&
- (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
+ if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX)))
return -EINVAL;
- if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
+ if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
+ ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
*cqe_size = ucmd.cqe_size;
@@ -762,7 +764,14 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- *index = context->bfregi.sys_pages[0];
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
+ *index = ucmd.uar_page_index;
+ } else if (context->bfregi.lib_uar_dyn) {
+ err = -EINVAL;
+ goto err_cqb;
+ } else {
+ *index = context->bfregi.sys_pages[0];
+ }
if (ucmd.cqe_comp_en == 1) {
int mini_cqe_format;
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index dbee17d22d50..862b7bf3e646 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -35,6 +35,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
*namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ffa7c2100edb..6679756506e6 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -39,9 +39,6 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
-#if defined(CONFIG_X86)
-#include <asm/memtype.h>
-#endif
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
@@ -898,7 +895,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->raw_packet_caps |=
IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
- if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
if (max_tso) {
resp.tso_caps.max_tso = 1 << max_tso;
@@ -908,7 +905,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
resp.rss_caps.rx_hash_function =
MLX5_RX_HASH_FUNC_TOEPLITZ;
resp.rss_caps.rx_hash_fields_mask =
@@ -928,9 +925,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.rss_caps);
}
} else {
- if (field_avail(typeof(resp), tso_caps, uhw_outlen))
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
resp.response_length += sizeof(resp.tso_caps);
- if (field_avail(typeof(resp), rss_caps, uhw_outlen))
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
resp.response_length += sizeof(resp.rss_caps);
}
@@ -1072,7 +1069,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MAX_CQ_PERIOD;
}
- if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
resp.response_length += sizeof(resp.cqe_comp_caps);
if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
@@ -1090,7 +1087,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
+ if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
@@ -1108,8 +1105,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.packet_pacing_caps);
}
- if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
- uhw_outlen)) {
+ if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
+ uhw_outlen) {
if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
resp.mlx5_ib_support_multi_pkt_send_wqes =
MLX5_IB_ALLOW_MPW;
@@ -1122,7 +1119,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), flags, uhw_outlen)) {
+ if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
resp.response_length += sizeof(resp.flags);
if (MLX5_CAP_GEN(mdev, cqe_compression_128))
@@ -1138,7 +1135,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
}
- if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
resp.response_length += sizeof(resp.sw_parsing_caps);
if (MLX5_CAP_ETH(mdev, swp)) {
resp.sw_parsing_caps.sw_parsing_offloads |=
@@ -1158,7 +1155,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
+ if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
@@ -1181,7 +1178,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
resp.response_length += sizeof(resp.tunnel_offloads_caps);
if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
resp.tunnel_offloads_caps |=
@@ -1192,12 +1189,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_GRE;
- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_GRE)
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_UDP)
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
@@ -1791,6 +1786,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
max_cqe_version);
u32 dump_fill_mkey;
bool lib_uar_4k;
+ bool lib_uar_dyn;
if (!dev->ib_active)
return -EAGAIN;
@@ -1849,8 +1845,14 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
}
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
+ lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
bfregi = &context->bfregi;
+ if (lib_uar_dyn) {
+ bfregi->lib_uar_dyn = lib_uar_dyn;
+ goto uar_done;
+ }
+
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
@@ -1877,6 +1879,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
if (err)
goto out_sys_pages;
+uar_done:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true);
if (err < 0)
@@ -1898,19 +1901,19 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- resp.tot_bfregs = req.total_num_bfregs;
+ resp.tot_bfregs = lib_uar_dyn ? 0 : req.total_num_bfregs;
resp.num_ports = dev->num_ports;
- if (field_avail(typeof(resp), cqe_version, udata->outlen))
+ if (offsetofend(typeof(resp), cqe_version) <= udata->outlen)
resp.response_length += sizeof(resp.cqe_version);
- if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
+ if (offsetofend(typeof(resp), cmds_supp_uhw) <= udata->outlen) {
resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
resp.response_length += sizeof(resp.cmds_supp_uhw);
}
- if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
+ if (offsetofend(typeof(resp), eth_min_inline) <= udata->outlen) {
if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
resp.eth_min_inline++;
@@ -1918,7 +1921,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
resp.response_length += sizeof(resp.eth_min_inline);
}
- if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
+ if (offsetofend(typeof(resp), clock_info_versions) <= udata->outlen) {
if (mdev->clock_info)
resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
resp.response_length += sizeof(resp.clock_info_versions);
@@ -1930,7 +1933,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
* pretend we don't support reading the HCA's core clock. This is also
* forced by mmap function.
*/
- if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ if (offsetofend(typeof(resp), hca_core_clock_offset) <= udata->outlen) {
if (PAGE_SIZE <= 4096) {
resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
@@ -1940,18 +1943,18 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
resp.response_length += sizeof(resp.hca_core_clock_offset);
}
- if (field_avail(typeof(resp), log_uar_size, udata->outlen))
+ if (offsetofend(typeof(resp), log_uar_size) <= udata->outlen)
resp.response_length += sizeof(resp.log_uar_size);
- if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
+ if (offsetofend(typeof(resp), num_uars_per_page) <= udata->outlen)
resp.response_length += sizeof(resp.num_uars_per_page);
- if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
+ if (offsetofend(typeof(resp), num_dyn_bfregs) <= udata->outlen) {
resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
resp.response_length += sizeof(resp.num_dyn_bfregs);
}
- if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
+ if (offsetofend(typeof(resp), dump_fill_mkey) <= udata->outlen) {
if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
resp.dump_fill_mkey = dump_fill_mkey;
resp.comp_mask |=
@@ -2026,6 +2029,17 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
+static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
+ int uar_idx)
+{
+ unsigned int fw_uars_per_page;
+
+ fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_UARS_IN_PAGE : 1;
+
+ return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
+}
+
static int get_command(unsigned long offset)
{
return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
@@ -2110,6 +2124,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
mutex_unlock(&var_table->bitmap_lock);
kfree(mentry);
break;
+ case MLX5_IB_MMAP_TYPE_UAR_WC:
+ case MLX5_IB_MMAP_TYPE_UAR_NC:
+ mlx5_cmd_free_uar(dev->mdev, mentry->page_idx);
+ kfree(mentry);
+ break;
default:
WARN_ON(true);
}
@@ -2130,6 +2149,9 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
bfregi->num_static_sys_pages;
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2147,14 +2169,6 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_ALLOC_WC:
-/* Some architectures don't support WC memory */
-#if defined(CONFIG_X86)
- if (!pat_enabled())
- return -EPERM;
-#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
- return -EPERM;
-#endif
- /* fall through */
case MLX5_IB_MMAP_REGULAR_PAGE:
/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
prot = pgprot_writecombine(vma->vm_page_prot);
@@ -2269,7 +2283,8 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
mentry = to_mmmap(entry);
pfn = (mentry->address >> PAGE_SHIFT);
- if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR)
+ if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
+ mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
prot = pgprot_noncached(vma->vm_page_prot);
else
prot = pgprot_writecombine(vma->vm_page_prot);
@@ -2300,9 +2315,12 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
command = get_command(vma->vm_pgoff);
switch (command) {
case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
+ if (!dev->wc_support)
+ return -EPERM;
+ fallthrough;
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
- case MLX5_IB_MMAP_ALLOC_WC:
return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
@@ -3570,7 +3588,8 @@ static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
@@ -4045,6 +4064,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
log_max_ft_size));
priority = fs_matcher->priority;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ log_max_ft_size));
+ priority = fs_matcher->priority;
}
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
@@ -4061,6 +4085,8 @@ _get_flow_table(struct mlx5_ib_dev *dev,
prio = &dev->flow_db->fdb;
else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
prio = &dev->flow_db->rdma_rx[priority];
+ else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX)
+ prio = &dev->flow_db->rdma_tx[priority];
if (!prio)
return ERR_PTR(-EINVAL);
@@ -6089,9 +6115,9 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-static int var_obj_cleanup(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
+static int mmap_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct mlx5_user_mmap_entry *obj = uobject->object;
@@ -6099,6 +6125,16 @@ static int var_obj_cleanup(struct ib_uobject *uobject,
return 0;
}
+static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
+ struct mlx5_user_mmap_entry *entry,
+ size_t length)
+{
+ return rdma_user_mmap_entry_insert_range(
+ &c->ibucontext, &entry->rdma_entry, length,
+ (MLX5_IB_MMAP_OFFSET_START << 16),
+ ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
+}
+
static struct mlx5_user_mmap_entry *
alloc_var_entry(struct mlx5_ib_ucontext *c)
{
@@ -6129,10 +6165,8 @@ alloc_var_entry(struct mlx5_ib_ucontext *c)
entry->page_idx = page_idx;
entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
- err = rdma_user_mmap_entry_insert_range(
- &c->ibucontext, &entry->rdma_entry, var_table->stride_size,
- MLX5_IB_MMAP_OFFSET_START << 16,
- (MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1);
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry,
+ var_table->stride_size);
if (err)
goto err_insert;
@@ -6216,7 +6250,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UA_MANDATORY));
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
- UVERBS_TYPE_ALLOC_IDR(var_obj_cleanup),
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
&UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
&UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
@@ -6228,6 +6262,134 @@ static bool var_is_supported(struct ib_device *device)
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
}
+static struct mlx5_user_mmap_entry *
+alloc_uar_entry(struct mlx5_ib_ucontext *c,
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type)
+{
+ struct mlx5_user_mmap_entry *entry;
+ struct mlx5_ib_dev *dev;
+ u32 uar_index;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ dev = to_mdev(c->ibucontext.device);
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ if (err)
+ goto end;
+
+ entry->page_idx = uar_index;
+ entry->address = uar_index2paddress(dev, uar_index);
+ if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
+ else
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
+
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
+ if (err)
+ goto err_insert;
+
+ return entry;
+
+err_insert:
+ mlx5_cmd_free_uar(dev->mdev, uar_index);
+end:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_user_mmap_entry *entry;
+ u64 mmap_offset;
+ u32 length;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ err = uverbs_get_const(&alloc_type, attrs,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
+ if (err)
+ return err;
+
+ if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
+ alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
+ return -EOPNOTSUPP;
+
+ if (!to_mdev(c->ibucontext.device)->wc_support &&
+ alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ return -EOPNOTSUPP;
+
+ entry = alloc_uar_entry(c, alloc_type);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ mmap_offset = mlx5_entry_to_mmap_offset(entry);
+ length = entry->rdma_entry.npages * PAGE_SIZE;
+ uobj->object = entry;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ &entry->page_idx, sizeof(entry->page_idx));
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_UAR_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
+ enum mlx5_ib_uapi_uar_alloc_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_UAR_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
+
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_dm,
UVERBS_OBJECT_DM,
@@ -6252,12 +6414,14 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
{}
};
@@ -6391,6 +6555,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->reset_flow_resource_lock);
xa_init(&dev->odp_mkeys);
xa_init(&dev->sig_mrs);
+ atomic_set(&dev->mkey_var, 0);
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
@@ -6546,7 +6711,8 @@ static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
doorbell_bar_offset);
bar_size = (1ULL << log_doorbell_bar_size) * 4096;
var_table->stride_size = 1ULL << log_doorbell_stride;
- var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size);
+ var_table->num_var_hw_entries = div_u64(bar_size,
+ var_table->stride_size);
mutex_init(&var_table->bitmap_lock);
var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
GFP_KERNEL);
@@ -7078,6 +7244,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
mlx5_ib_stage_counters_init,
mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_UAR,
mlx5_ib_stage_uar_init,
mlx5_ib_stage_uar_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b90a3649e7d1..c19ec9fd8a63 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -316,7 +316,7 @@ int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
if (!dev->mdev->roce.roce_en &&
port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
if (mlx5_core_is_pf(dev->mdev))
- dev->wc_support = true;
+ dev->wc_support = arch_can_pci_mmap_wc();
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f3bdbd5e5096..a4e522385de0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -64,8 +64,6 @@
dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
__LINE__, current->pid, ##arg)
-#define field_avail(type, fld, sz) (offsetof(type, fld) + \
- sizeof(((type *)0)->fld) <= (sz))
#define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
@@ -126,11 +124,27 @@ enum {
enum mlx5_ib_mmap_type {
MLX5_IB_MMAP_TYPE_MEMIC = 1,
MLX5_IB_MMAP_TYPE_VAR = 2,
+ MLX5_IB_MMAP_TYPE_UAR_WC = 3,
+ MLX5_IB_MMAP_TYPE_UAR_NC = 4,
};
-#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \
- (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
-#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+struct mlx5_bfreg_info {
+ u32 *sys_pages;
+ int num_low_latency_bfregs;
+ unsigned int *count;
+
+ /*
+ * protect bfreg allocation data structs
+ */
+ struct mutex lock;
+ u32 ver;
+ u8 lib_uar_4k : 1;
+ u8 lib_uar_dyn : 1;
+ u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
+};
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
@@ -203,6 +217,11 @@ struct mlx5_ib_flow_matcher {
u8 match_criteria_enable;
};
+struct mlx5_ib_pp {
+ u16 index;
+ struct mlx5_core_dev *mdev;
+};
+
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
@@ -210,6 +229,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
struct mlx5_ib_flow_prio fdb;
struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
@@ -618,8 +638,8 @@ struct mlx5_ib_mr {
struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
- int order;
- bool allocated_from_cache;
+ unsigned int order;
+ struct mlx5_cache_ent *cache_ent;
int npages;
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
@@ -701,22 +721,34 @@ struct mlx5_cache_ent {
u32 access_mode;
u32 page;
- u32 size;
- u32 cur;
+ u8 disabled:1;
+ u8 fill_to_high_water:1;
+
+ /*
+ * - available_mrs is the length of list head, ie the number of MRs
+ * available for immediate allocation.
+ * - total_mrs is available_mrs plus all in use MRs that could be
+ * returned to the cache.
+ * - limit is the low water mark for available_mrs, 2* limit is the
+ * upper water mark.
+ * - pending is the number of MRs currently being created
+ */
+ u32 total_mrs;
+ u32 available_mrs;
+ u32 limit;
+ u32 pending;
+
+ /* Statistics */
u32 miss;
- u32 limit;
struct mlx5_ib_dev *dev;
struct work_struct work;
struct delayed_work dwork;
- int pending;
- struct completion compl;
};
struct mlx5_mr_cache {
struct workqueue_struct *wq;
struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
- int stopped;
struct dentry *root;
unsigned long last_add;
};
@@ -794,6 +826,7 @@ enum mlx5_ib_dbg_cc_types {
MLX5_IB_DBG_CC_RP_BYTE_RESET,
MLX5_IB_DBG_CC_RP_THRESHOLD,
MLX5_IB_DBG_CC_RP_AI_RATE,
+ MLX5_IB_DBG_CC_RP_MAX_RATE,
MLX5_IB_DBG_CC_RP_HAI_RATE,
MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
MLX5_IB_DBG_CC_RP_MIN_RATE,
@@ -803,6 +836,7 @@ enum mlx5_ib_dbg_cc_types {
MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
MLX5_IB_DBG_CC_RP_GD,
+ MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
MLX5_IB_DBG_CC_NP_CNP_DSCP,
MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
MLX5_IB_DBG_CC_NP_CNP_PRIO,
@@ -986,14 +1020,16 @@ struct mlx5_ib_dev {
*/
struct mutex cap_mask_mutex;
u8 ib_active:1;
- u8 fill_delay:1;
u8 is_rep:1;
u8 lag_active:1;
u8 wc_support:1;
+ u8 fill_delay;
struct umr_common umrc;
/* sync used page count stats
*/
struct mlx5_ib_resources devr;
+
+ atomic_t mkey_var;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
@@ -1263,7 +1299,8 @@ int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
-struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ unsigned int entry);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
@@ -1383,6 +1420,7 @@ int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
extern const struct uapi_definition mlx5_ib_devx_defs[];
extern const struct uapi_definition mlx5_ib_flow_defs[];
+extern const struct uapi_definition mlx5_ib_qos_defs[];
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
@@ -1472,12 +1510,11 @@ static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
{
u8 cqe_version = ucontext->cqe_version;
- if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
return 0;
- if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
- !!cqe_version))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
return -EINVAL;
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
@@ -1490,12 +1527,11 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
{
u8 cqe_version = ucontext->cqe_version;
- if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
return 0;
- if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
- !!cqe_version))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
return -EINVAL;
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
@@ -1534,7 +1570,9 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
- if (access_flags & IB_ACCESS_RELAXED_ORDERING)
+ if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
+ (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) ||
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)))
return false;
return true;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 6fa0a83c19de..44683073be0c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -47,9 +47,46 @@ enum {
#define MLX5_UMR_ALIGN 2048
+static void
+create_mkey_callback(int status, struct mlx5_async_work *context);
+
+static void
+assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
+ u32 *in)
+{
+ u8 key = atomic_inc_return(&dev->mkey_var);
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, mkey_7_0, key);
+ mkey->key = key;
+}
+
+static int
+mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen)
+{
+ assign_mkey_variant(dev, mkey, in);
+ return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
+}
+
+static int
+mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *in, int inlen, u32 *out, int outlen,
+ struct mlx5_async_work *context)
+{
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ assign_mkey_variant(dev, mkey, in);
+ return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
+ create_mkey_callback, context);
+}
+
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
@@ -63,67 +100,73 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-static int order2idx(struct mlx5_ib_dev *dev, int order)
-{
- struct mlx5_mr_cache *cache = &dev->cache;
-
- if (order < cache->ent[0].order)
- return 0;
- else
- return order - cache->ent[0].order;
-}
-
static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
{
return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}
-static void reg_mr_callback(int status, struct mlx5_async_work *context)
+static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr =
container_of(context, struct mlx5_ib_mr, cb_work);
struct mlx5_ib_dev *dev = mr->dev;
- struct mlx5_mr_cache *cache = &dev->cache;
- int c = order2idx(dev, mr->order);
- struct mlx5_cache_ent *ent = &cache->ent[c];
- u8 key;
+ struct mlx5_cache_ent *ent = mr->cache_ent;
unsigned long flags;
- spin_lock_irqsave(&ent->lock, flags);
- ent->pending--;
- spin_unlock_irqrestore(&ent->lock, flags);
if (status) {
mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
kfree(mr);
- dev->fill_delay = 1;
+ spin_lock_irqsave(&ent->lock, flags);
+ ent->pending--;
+ WRITE_ONCE(dev->fill_delay, 1);
+ spin_unlock_irqrestore(&ent->lock, flags);
mod_timer(&dev->delay_timer, jiffies + HZ);
return;
}
mr->mmkey.type = MLX5_MKEY_MR;
- spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
- key = dev->mdev->priv.mkey_key++;
- spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
- mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
+ mr->mmkey.key |= mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, mr->out, mkey_index));
- cache->last_add = jiffies;
+ WRITE_ONCE(dev->cache.last_add, jiffies);
spin_lock_irqsave(&ent->lock, flags);
list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- ent->size++;
+ ent->available_mrs++;
+ ent->total_mrs++;
+ /* If we are doing fill_to_high_water then keep going. */
+ queue_adjust_cache_locked(ent);
+ ent->pending--;
spin_unlock_irqrestore(&ent->lock, flags);
+}
- if (!completion_done(&ent->compl))
- complete(&ent->compl);
+static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
+{
+ struct mlx5_ib_mr *mr;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return NULL;
+ mr->order = ent->order;
+ mr->cache_ent = ent;
+ mr->dev = ent->dev;
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
+
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
+ MLX5_SET(mkc, mkc, log_page_size, ent->page);
+ return mr;
}
-static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
+/* Asynchronously schedule new MRs to be populated in the cache. */
+static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
- int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
@@ -136,42 +179,29 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
for (i = 0; i < num; i++) {
- if (ent->pending >= MAX_PENDING_REG_MR) {
- err = -EAGAIN;
- break;
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ mr = alloc_cache_mr(ent, mkc);
if (!mr) {
err = -ENOMEM;
break;
}
- mr->order = ent->order;
- mr->allocated_from_cache = true;
- mr->dev = dev;
-
- MLX5_SET(mkc, mkc, free, 1);
- MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
- MLX5_SET(mkc, mkc, access_mode_4_2,
- (ent->access_mode >> 2) & 0x7);
-
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
- MLX5_SET(mkc, mkc, log_page_size, ent->page);
-
spin_lock_irq(&ent->lock);
+ if (ent->pending >= MAX_PENDING_REG_MR) {
+ err = -EAGAIN;
+ spin_unlock_irq(&ent->lock);
+ kfree(mr);
+ break;
+ }
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
- &dev->async_ctx, in, inlen,
- mr->out, sizeof(mr->out),
- reg_mr_callback, &mr->cb_work);
+ err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
+ &ent->dev->async_ctx, in, inlen,
+ mr->out, sizeof(mr->out),
+ &mr->cb_work);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
spin_unlock_irq(&ent->lock);
- mlx5_ib_warn(dev, "create mkey failed %d\n", err);
+ mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
kfree(mr);
break;
}
@@ -181,70 +211,128 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
return err;
}
-static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
+/* Synchronously create a MR in the cache */
+static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_ib_mr *tmp_mr;
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mr *mr;
- LIST_HEAD(del_list);
- int i;
+ void *mkc;
+ u32 *in;
+ int err;
- for (i = 0; i < num; i++) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
- break;
- }
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_move(&mr->list, &del_list);
- ent->cur--;
- ent->size--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
- }
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
- list_del(&mr->list);
- kfree(mr);
+ mr = alloc_cache_mr(ent, mkc);
+ if (!mr) {
+ err = -ENOMEM;
+ goto free_in;
}
+
+ err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
+ if (err)
+ goto free_mr;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ WRITE_ONCE(ent->dev->cache.last_add, jiffies);
+ spin_lock_irq(&ent->lock);
+ ent->total_mrs++;
+ spin_unlock_irq(&ent->lock);
+ kfree(in);
+ return mr;
+free_mr:
+ kfree(mr);
+free_in:
+ kfree(in);
+ return ERR_PTR(err);
}
-static ssize_t size_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
+{
+ struct mlx5_ib_mr *mr;
+
+ lockdep_assert_held(&ent->lock);
+ if (list_empty(&ent->head))
+ return;
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->available_mrs--;
+ ent->total_mrs--;
+ spin_unlock_irq(&ent->lock);
+ mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
+ kfree(mr);
+ spin_lock_irq(&ent->lock);
+}
+
+static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
+ bool limit_fill)
{
- struct mlx5_cache_ent *ent = filp->private_data;
- struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20] = {0};
- u32 var;
int err;
- int c;
- count = min(count, sizeof(lbuf) - 1);
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
+ lockdep_assert_held(&ent->lock);
- c = order2idx(dev, ent->order);
+ while (true) {
+ if (limit_fill)
+ target = ent->limit * 2;
+ if (target == ent->available_mrs + ent->pending)
+ return 0;
+ if (target > ent->available_mrs + ent->pending) {
+ u32 todo = target - (ent->available_mrs + ent->pending);
- if (sscanf(lbuf, "%u", &var) != 1)
- return -EINVAL;
+ spin_unlock_irq(&ent->lock);
+ err = add_keys(ent, todo);
+ if (err == -EAGAIN)
+ usleep_range(3000, 5000);
+ spin_lock_irq(&ent->lock);
+ if (err) {
+ if (err != -EAGAIN)
+ return err;
+ } else
+ return 0;
+ } else {
+ remove_cache_mr_locked(ent);
+ }
+ }
+}
- if (var < ent->limit)
- return -EINVAL;
+static ssize_t size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_cache_ent *ent = filp->private_data;
+ u32 target;
+ int err;
- if (var > ent->size) {
- do {
- err = add_keys(dev, c, var - ent->size);
- if (err && err != -EAGAIN)
- return err;
+ err = kstrtou32_from_user(buf, count, 0, &target);
+ if (err)
+ return err;
- usleep_range(3000, 5000);
- } while (err);
- } else if (var < ent->size) {
- remove_keys(dev, c, ent->size - var);
+ /*
+ * Target is the new value of total_mrs the user requests, however we
+ * cannot free MRs that are in use. Compute the target value for
+ * available_mrs.
+ */
+ spin_lock_irq(&ent->lock);
+ if (target < ent->total_mrs - ent->available_mrs) {
+ err = -EINVAL;
+ goto err_unlock;
}
+ target = target - (ent->total_mrs - ent->available_mrs);
+ if (target < ent->limit || target > ent->limit*2) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ err = resize_available_mrs(ent, target, false);
+ if (err)
+ goto err_unlock;
+ spin_unlock_irq(&ent->lock);
return count;
+
+err_unlock:
+ spin_unlock_irq(&ent->lock);
+ return err;
}
static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
@@ -254,7 +342,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
+ err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
if (err < 0)
return err;
@@ -272,32 +360,23 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_cache_ent *ent = filp->private_data;
- struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20] = {0};
u32 var;
int err;
- int c;
-
- count = min(count, sizeof(lbuf) - 1);
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
- c = order2idx(dev, ent->order);
-
- if (sscanf(lbuf, "%u", &var) != 1)
- return -EINVAL;
-
- if (var > ent->size)
- return -EINVAL;
+ err = kstrtou32_from_user(buf, count, 0, &var);
+ if (err)
+ return err;
+ /*
+ * Upon set we immediately fill the cache to high water mark implied by
+ * the limit.
+ */
+ spin_lock_irq(&ent->lock);
ent->limit = var;
-
- if (ent->cur < ent->limit) {
- err = add_keys(dev, c, 2 * ent->limit - ent->cur);
- if (err)
- return err;
- }
-
+ err = resize_available_mrs(ent, 0, true);
+ spin_unlock_irq(&ent->lock);
+ if (err)
+ return err;
return count;
}
@@ -322,68 +401,119 @@ static const struct file_operations limit_fops = {
.read = limit_read,
};
-static int someone_adding(struct mlx5_mr_cache *cache)
+static bool someone_adding(struct mlx5_mr_cache *cache)
{
- int i;
+ unsigned int i;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
- if (cache->ent[i].cur < cache->ent[i].limit)
- return 1;
+ struct mlx5_cache_ent *ent = &cache->ent[i];
+ bool ret;
+
+ spin_lock_irq(&ent->lock);
+ ret = ent->available_mrs < ent->limit;
+ spin_unlock_irq(&ent->lock);
+ if (ret)
+ return true;
}
+ return false;
+}
- return 0;
+/*
+ * Check if the bucket is outside the high/low water mark and schedule an async
+ * update. The cache refill has hysteresis, once the low water mark is hit it is
+ * refilled up to the high mark.
+ */
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
+{
+ lockdep_assert_held(&ent->lock);
+
+ if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
+ return;
+ if (ent->available_mrs < ent->limit) {
+ ent->fill_to_high_water = true;
+ queue_work(ent->dev->cache.wq, &ent->work);
+ } else if (ent->fill_to_high_water &&
+ ent->available_mrs + ent->pending < 2 * ent->limit) {
+ /*
+ * Once we start populating due to hitting a low water mark
+ * continue until we pass the high water mark.
+ */
+ queue_work(ent->dev->cache.wq, &ent->work);
+ } else if (ent->available_mrs == 2 * ent->limit) {
+ ent->fill_to_high_water = false;
+ } else if (ent->available_mrs > 2 * ent->limit) {
+ /* Queue deletion of excess entries */
+ ent->fill_to_high_water = false;
+ if (ent->pending)
+ queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ msecs_to_jiffies(1000));
+ else
+ queue_work(ent->dev->cache.wq, &ent->work);
+ }
}
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
struct mlx5_mr_cache *cache = &dev->cache;
- int i = order2idx(dev, ent->order);
int err;
- if (cache->stopped)
- return;
+ spin_lock_irq(&ent->lock);
+ if (ent->disabled)
+ goto out;
- ent = &dev->cache.ent[i];
- if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
- err = add_keys(dev, i, 1);
- if (ent->cur < 2 * ent->limit) {
- if (err == -EAGAIN) {
- mlx5_ib_dbg(dev, "returned eagain, order %d\n",
- i + 2);
- queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(3));
- } else if (err) {
- mlx5_ib_warn(dev, "command failed order %d, err %d\n",
- i + 2, err);
+ if (ent->fill_to_high_water &&
+ ent->available_mrs + ent->pending < 2 * ent->limit &&
+ !READ_ONCE(dev->fill_delay)) {
+ spin_unlock_irq(&ent->lock);
+ err = add_keys(ent, 1);
+ spin_lock_irq(&ent->lock);
+ if (ent->disabled)
+ goto out;
+ if (err) {
+ /*
+ * EAGAIN only happens if pending is positive, so we
+ * will be rescheduled from reg_mr_callback(). The only
+ * failure path here is ENOMEM.
+ */
+ if (err != -EAGAIN) {
+ mlx5_ib_warn(
+ dev,
+ "command failed order %d, err %d\n",
+ ent->order, err);
queue_delayed_work(cache->wq, &ent->dwork,
msecs_to_jiffies(1000));
- } else {
- queue_work(cache->wq, &ent->work);
}
}
- } else if (ent->cur > 2 * ent->limit) {
+ } else if (ent->available_mrs > 2 * ent->limit) {
+ bool need_delay;
+
/*
- * The remove_keys() logic is performed as garbage collection
- * task. Such task is intended to be run when no other active
- * processes are running.
+ * The remove_cache_mr() logic is performed as garbage
+ * collection task. Such task is intended to be run when no
+ * other active processes are running.
*
* The need_resched() will return TRUE if there are user tasks
* to be activated in near future.
*
- * In such case, we don't execute remove_keys() and postpone
- * the garbage collection work to try to run in next cycle,
- * in order to free CPU resources to other tasks.
+ * In such case, we don't execute remove_cache_mr() and postpone
+ * the garbage collection work to try to run in next cycle, in
+ * order to free CPU resources to other tasks.
*/
- if (!need_resched() && !someone_adding(cache) &&
- time_after(jiffies, cache->last_add + 300 * HZ)) {
- remove_keys(dev, i, 1);
- if (ent->cur > ent->limit)
- queue_work(cache->wq, &ent->work);
- } else {
+ spin_unlock_irq(&ent->lock);
+ need_delay = need_resched() || someone_adding(cache) ||
+ time_after(jiffies,
+ READ_ONCE(cache->last_add) + 300 * HZ);
+ spin_lock_irq(&ent->lock);
+ if (ent->disabled)
+ goto out;
+ if (need_delay)
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
- }
+ remove_cache_mr_locked(ent);
+ queue_adjust_cache_locked(ent);
}
+out:
+ spin_unlock_irq(&ent->lock);
}
static void delayed_cache_work_func(struct work_struct *work)
@@ -402,117 +532,95 @@ static void cache_work_func(struct work_struct *work)
__cache_work_func(ent);
}
-struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
+/* Allocate a special entry from the cache */
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ unsigned int entry)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
- int err;
- if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
+ if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
+ entry >= ARRAY_SIZE(cache->ent)))
return ERR_PTR(-EINVAL);
- }
ent = &cache->ent[entry];
- while (1) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
-
- err = add_keys(dev, entry, 1);
- if (err && err != -EAGAIN)
- return ERR_PTR(err);
-
- wait_for_completion(&ent->compl);
- } else {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
- list);
- list_del(&mr->list);
- ent->cur--;
- spin_unlock_irq(&ent->lock);
- if (ent->cur < ent->limit)
- queue_work(cache->wq, &ent->work);
+ spin_lock_irq(&ent->lock);
+ if (list_empty(&ent->head)) {
+ spin_unlock_irq(&ent->lock);
+ mr = create_cache_mr(ent);
+ if (IS_ERR(mr))
return mr;
- }
+ } else {
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
}
+ return mr;
}
-static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
+/* Return a MR already available in the cache */
+static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_ib_dev *dev = req_ent->dev;
struct mlx5_ib_mr *mr = NULL;
- struct mlx5_cache_ent *ent;
- int last_umr_cache_entry;
- int c;
- int i;
-
- c = order2idx(dev, order);
- last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
- if (c < 0 || c > last_umr_cache_entry) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
- return NULL;
- }
-
- for (i = c; i <= last_umr_cache_entry; i++) {
- ent = &cache->ent[i];
+ struct mlx5_cache_ent *ent = req_ent;
- mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
+ /* Try larger MR pools from the cache to satisfy the allocation */
+ for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
+ mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
+ ent - dev->cache.ent);
spin_lock_irq(&ent->lock);
if (!list_empty(&ent->head)) {
mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
list);
list_del(&mr->list);
- ent->cur--;
+ ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- if (ent->cur < ent->limit)
- queue_work(cache->wq, &ent->work);
break;
}
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- queue_work(cache->wq, &ent->work);
}
if (!mr)
- cache->ent[c].miss++;
+ req_ent->miss++;
return mr;
}
+static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_cache_ent *ent = mr->cache_ent;
+
+ mr->cache_ent = NULL;
+ spin_lock_irq(&ent->lock);
+ ent->total_mrs--;
+ spin_unlock_irq(&ent->lock);
+}
+
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
- int shrink = 0;
- int c;
+ struct mlx5_cache_ent *ent = mr->cache_ent;
- if (!mr->allocated_from_cache)
+ if (!ent)
return;
- c = order2idx(dev, mr->order);
- WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
-
if (mlx5_mr_cache_invalidate(mr)) {
- mr->allocated_from_cache = false;
+ detach_mr_from_cache(mr);
destroy_mkey(dev, mr);
- ent = &cache->ent[c];
- if (ent->cur < ent->limit)
- queue_work(cache->wq, &ent->work);
return;
}
- ent = &cache->ent[c];
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- if (ent->cur > 2 * ent->limit)
- shrink = 1;
+ ent->available_mrs++;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- if (shrink)
- queue_work(cache->wq, &ent->work);
}
static void clean_keys(struct mlx5_ib_dev *dev, int c)
@@ -532,8 +640,8 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_move(&mr->list, &del_list);
- ent->cur--;
- ent->size--;
+ ent->available_mrs--;
+ ent->total_mrs--;
spin_unlock_irq(&ent->lock);
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
@@ -571,7 +679,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
dir = debugfs_create_dir(ent->name, cache->root);
debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
- debugfs_create_u32("cur", 0400, dir, &ent->cur);
+ debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
debugfs_create_u32("miss", 0600, dir, &ent->miss);
}
}
@@ -580,7 +688,7 @@ static void delay_time_func(struct timer_list *t)
{
struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
- dev->fill_delay = 0;
+ WRITE_ONCE(dev->fill_delay, 0);
}
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
@@ -606,7 +714,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->dev = dev;
ent->limit = 0;
- init_completion(&ent->compl);
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
@@ -628,7 +735,9 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
- queue_work(cache->wq, &ent->work);
+ spin_lock_irq(&ent->lock);
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
}
mlx5_mr_cache_debugfs_init(dev);
@@ -638,13 +747,20 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
- int i;
+ unsigned int i;
if (!dev->cache.wq)
return 0;
- dev->cache.stopped = 1;
- flush_workqueue(dev->cache.wq);
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ struct mlx5_cache_ent *ent = &dev->cache.ent[i];
+
+ spin_lock_irq(&ent->lock);
+ ent->disabled = true;
+ spin_unlock_irq(&ent->lock);
+ cancel_work_sync(&ent->work);
+ cancel_delayed_work_sync(&ent->dwork);
+ }
mlx5_mr_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
@@ -685,7 +801,6 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
@@ -707,7 +822,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
MLX5_SET(mkc, mkc, length64, 1);
set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
- err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
@@ -840,31 +955,37 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
return err;
}
-static struct mlx5_ib_mr *alloc_mr_from_cache(
- struct ib_pd *pd, struct ib_umem *umem,
- u64 virt_addr, u64 len, int npages,
- int page_shift, int order, int access_flags)
+static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
+ unsigned int order)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+
+ if (order < cache->ent[0].order)
+ return &cache->ent[0];
+ order = order - cache->ent[0].order;
+ if (order > MR_CACHE_LAST_STD_ENTRY)
+ return NULL;
+ return &cache->ent[order];
+}
+
+static struct mlx5_ib_mr *
+alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
+ u64 len, int npages, int page_shift, unsigned int order,
+ int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order);
struct mlx5_ib_mr *mr;
- int err = 0;
- int i;
- for (i = 0; i < 1; i++) {
- mr = alloc_cached_mr(dev, order);
- if (mr)
- break;
-
- err = add_keys(dev, order2idx(dev, order), 1);
- if (err && err != -EAGAIN) {
- mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
- break;
- }
+ if (!ent)
+ return ERR_PTR(-E2BIG);
+ mr = get_cache_mr(ent);
+ if (!mr) {
+ mr = create_cache_mr(ent);
+ if (IS_ERR(mr))
+ return mr;
}
- if (!mr)
- return ERR_PTR(-EAGAIN);
-
mr->ibmr.pd = pd;
mr->umem = umem;
mr->access_flags = access_flags;
@@ -1097,7 +1218,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
get_octo_len(virt_addr, length, page_shift));
}
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
@@ -1137,7 +1258,6 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
@@ -1160,7 +1280,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
MLX5_SET64(mkc, mkc, len, length);
set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
- err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
@@ -1319,6 +1439,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (is_odp_mr(mr)) {
to_ib_umem_odp(mr->umem)->private = mr;
+ init_waitqueue_head(&mr->q_deferred_work);
atomic_set(&mr->num_deferred_work, 0);
err = xa_err(xa_store(&dev->odp_mkeys,
mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
@@ -1439,10 +1560,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
/*
* UMR can't be used - MKey needs to be replaced.
*/
- if (mr->allocated_from_cache)
- err = mlx5_mr_cache_invalidate(mr);
- else
- err = destroy_mkey(dev, mr);
+ if (mr->cache_ent)
+ detach_mr_from_cache(mr);
+ err = destroy_mkey(dev, mr);
if (err)
goto err;
@@ -1454,8 +1574,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mr = to_mmr(ib_mr);
goto err;
}
-
- mr->allocated_from_cache = false;
} else {
/*
* Send a UMR WQE
@@ -1542,8 +1660,6 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int allocated_from_cache = mr->allocated_from_cache;
-
if (mr->sig) {
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
@@ -1558,7 +1674,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig = NULL;
}
- if (!allocated_from_cache) {
+ if (!mr->cache_ent) {
destroy_mkey(dev, mr);
mlx5_free_priv_descs(mr);
}
@@ -1575,7 +1691,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
else
clean_mr(dev, mr);
- if (mr->allocated_from_cache)
+ if (mr->cache_ent)
mlx5_mr_cache_free(dev, mr);
else
kfree(mr);
@@ -1638,7 +1754,7 @@ static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
goto err_free_descs;
@@ -1905,7 +2021,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
if (err)
goto free;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index bf50cd91f472..3de7606d4a1a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -197,7 +197,7 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
odp->private = NULL;
mutex_unlock(&odp->umem_mutex);
- if (!mr->allocated_from_cache) {
+ if (!mr->cache_ent) {
mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
WARN_ON(mr->descs);
}
diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c
new file mode 100644
index 000000000000..cac878a70edb
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qos.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static bool pp_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return (MLX5_CAP_GEN(dev->mdev, qos) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_pp *pp_entry;
+ void *in_ctx;
+ u16 uid;
+ int inlen;
+ u32 flags;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ /* The allocated entry can be used only by a DEVX context */
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ dev = to_mdev(c->ibucontext.device);
+ pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
+ if (!pp_entry)
+ return -ENOMEM;
+
+ in_ctx = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ memcpy(rl_raw, in_ctx, inlen);
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
+ if (err)
+ goto err;
+
+ uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
+ c->devx_uid : MLX5_SHARED_RESOURCE_UID;
+
+ err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
+ (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
+ &pp_entry->index);
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ &pp_entry->index, sizeof(pp_entry->index));
+ if (err)
+ goto clean;
+
+ pp_entry->mdev = dev->mdev;
+ uobj->object = pp_entry;
+ return 0;
+
+clean:
+ mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
+err:
+ kfree(pp_entry);
+ return err;
+}
+
+static int pp_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_pp *pp_entry = uobject->object;
+
+ mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
+ kfree(pp_entry);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_PP_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
+ UVERBS_ATTR_SIZE(1,
+ MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ enum mlx5_ib_uapi_pp_alloc_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_PP_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
+ UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
+
+
+const struct uapi_definition mlx5_ib_qos_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_PP,
+ UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8fe149e808af..2210759843ba 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -697,6 +697,9 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
{
int bfregn = -ENOMEM;
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
mutex_lock(&bfregi->lock);
if (bfregi->ver >= 2) {
bfregn = alloc_high_class_bfreg(dev, bfregi);
@@ -768,6 +771,9 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
u32 index_of_sys_page;
u32 offset;
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
@@ -919,6 +925,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
void *qpc;
int err;
u16 uid;
+ u32 uar_flags;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (err) {
@@ -928,24 +935,29 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
ibucontext);
- if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
+ uar_flags = ucmd.flags & (MLX5_QP_FLAG_UAR_PAGE_INDEX |
+ MLX5_QP_FLAG_BFREG_INDEX);
+ switch (uar_flags) {
+ case MLX5_QP_FLAG_UAR_PAGE_INDEX:
+ uar_index = ucmd.bfreg_index;
+ bfregn = MLX5_IB_INVALID_BFREG;
+ break;
+ case MLX5_QP_FLAG_BFREG_INDEX:
uar_index = bfregn_to_uar_index(dev, &context->bfregi,
ucmd.bfreg_index, true);
if (uar_index < 0)
return uar_index;
-
bfregn = MLX5_IB_INVALID_BFREG;
- } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
- /*
- * TBD: should come from the verbs when we have the API
- */
- /* In CROSS_CHANNEL CQ and QP must use the same UAR */
- bfregn = MLX5_CROSS_CHANNEL_BFREG;
- }
- else {
+ break;
+ case 0:
+ if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ return -EINVAL;
bfregn = alloc_bfreg(dev, &context->bfregi);
if (bfregn < 0)
return bfregn;
+ break;
+ default:
+ return -EINVAL;
}
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
@@ -2100,6 +2112,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_UAR_PAGE_INDEX |
MLX5_QP_FLAG_TYPE_DCI |
MLX5_QP_FLAG_TYPE_DCT))
return -EINVAL;
@@ -2789,7 +2802,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
mlx5_ib_dbg(dev, "unsupported qp type %d\n",
init_attr->qp_type);
/* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
@@ -5545,7 +5558,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
rdma_ah_set_static_rate(ah_attr,
path->static_rate ? path->static_rate - 5 : 0);
- if (path->grh_mlid & (1 << 7)) {
+
+ if (path->grh_mlid & (1 << 7) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
rdma_ah_set_grh(ah_attr, NULL,
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 78a48aea3faf..fa808582b08b 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -58,7 +58,7 @@ struct mthca_user_db_table {
u64 uvirt;
struct scatterlist mem;
int refcount;
- } page[0];
+ } page[];
};
static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index da9b8f9b884f..f9a2e65e2ff5 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -68,7 +68,7 @@ struct mthca_icm_table {
int lowmem;
int coherent;
struct mutex mutex;
- struct mthca_icm *icm[0];
+ struct mthca_icm *icm[];
};
struct mthca_icm_iter {
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index ac19d57803b5..69a3e4f62fb1 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -561,7 +561,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
if (err) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index d47ea675734b..10e343894595 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1111,7 +1111,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
(attrs->qp_type != IB_QPT_UD)) {
pr_err("%s(%d) unsupported qp type=0x%x requested\n",
__func__, dev->id, attrs->qp_type);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
/* Skip the check for QP1 to support CM size of 128 */
if ((attrs->qp_type != IB_QPT_GSI) &&
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 484b555150e0..a5bd3adaf90a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1186,7 +1186,7 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: unsupported qp type=0x%x requested\n",
attrs->qp_type);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (attrs->cap.max_send_wr > qattr->max_sqe) {
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 568b21eb6ea1..021df0654ba7 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail;
+ goto bail_link;
}
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
@@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping sl2vl sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_link;
+ goto bail_sl;
}
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
@@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping diag_counters sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sl;
+ goto bail_diagc;
}
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
@@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_diagc;
+ goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
&cc_table_bin_attr);
kobject_put(&ppd->pport_cc_kobj);
}
+ kobject_put(&ppd->diagc_kobj);
kobject_put(&ppd->sl2vl_kobj);
kobject_put(&ppd->pport_kobj);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 5ef93f8f17a1..7508abb6a0fa 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -39,7 +39,6 @@
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
-#include <linux/random.h>
#include <linux/vmalloc.h>
#include <rdma/rdma_vt.h>
@@ -1503,7 +1502,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
unsigned i, ctxt;
int ret;
- get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 8bf414b47b96..dc0e81f3b6f4 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -177,7 +177,6 @@ struct qib_ibdev {
struct timer_list mem_timer;
struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys;
- u32 qp_rnd; /* random bytes for hash */
u32 n_piowait;
u32 n_txwait;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 556b8e44a51c..71f82339446c 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -504,7 +504,7 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n",
dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
trans_spec = cmd.spec;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 70be49b1ca05..7ec8991ace67 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -77,7 +77,7 @@ struct usnic_uiom_reg {
struct usnic_uiom_chunk {
struct list_head list;
int nents;
- struct scatterlist page_list[0];
+ struct scatterlist page_list[];
};
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index e580ae9cc55a..780fd2dfc07e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
ret = -ENOMEM;
- goto err_free_device;
+ goto err_disable_pdev;
}
ret = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 9de1281f9a3b..afcc2abcf55c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -217,7 +217,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
init_attr->qp_type != IB_QPT_GSI) {
dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
if (is_srq && !dev->dsr->caps.max_srq) {
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 5724cbbe38b1..04d2e72017fe 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
*/
if (udata && udata->outlen >= sizeof(__u64)) {
cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
- if (!cq->ip) {
- err = -ENOMEM;
+ if (IS_ERR(cq->ip)) {
+ err = PTR_ERR(cq->ip);
goto bail_wc;
}
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index 652f4a7efc1b..37853aa3bcf7 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -154,7 +154,7 @@ done:
* @udata: user data (must be valid!)
* @obj: opaque pointer to a cq, wq etc
*
- * Return: rvt_mmap struct on success
+ * Return: rvt_mmap struct on success, ERR_PTR on failure
*/
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
struct ib_udata *udata, void *obj)
@@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
if (!ip)
- return ip;
+ return ERR_PTR(-ENOMEM);
size = PAGE_ALIGN(size);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 7858d499db03..500a7ee04c44 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1220,7 +1220,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
default:
/* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
init_attr->cap.max_inline_data = 0;
@@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->ip = rvt_create_mmap_info(rdi, s, udata,
qp->r_rq.wq);
- if (!qp->ip) {
- ret = ERR_PTR(-ENOMEM);
+ if (IS_ERR(qp->ip)) {
+ ret = ERR_CAST(qp->ip);
goto bail_qpn;
}
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 24fef021d51d..f547c115af03 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
- if (!srq->ip) {
- ret = -ENOMEM;
+ if (IS_ERR(srq->ip)) {
+ ret = PTR_ERR(srq->ip);
goto bail_wq;
}
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 986265ad6e79..72b031ab7092 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -284,12 +284,6 @@ static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
&gid->global.interface_id);
}
-static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
-{
- return container_of(ibucontext, struct rvt_ucontext, ibucontext);
-}
-
/**
* rvt_alloc_ucontext - Allocate a user context
* @uctx: Verbs context
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 0946a301a5c5..4afdd2e20883 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -103,6 +103,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
+ rxe->ndev->dev_addr);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 48f48122ddcb..6a413d73b95d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
- return NULL;
+ return ERR_PTR(-ENOMEM);
size = PAGE_ALIGN(size);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index ec21f616ac98..6c11c3aeeca6 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -590,15 +590,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
int err;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
+ int max_rd_atomic = attr->max_rd_atomic ?
+ roundup_pow_of_two(attr->max_rd_atomic) : 0;
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- int max_dest_rd_atomic =
- __roundup_pow_of_two(attr->max_dest_rd_atomic);
+ int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
+ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index ff92704de32f..245040c3a35d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
if (outbuf) {
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
- if (!ip)
+ if (IS_ERR(ip)) {
+ err = PTR_ERR(ip);
goto err1;
+ }
- err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
- if (err)
+ if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
+ err = -EFAULT;
goto err2;
+ }
spin_lock_bh(&rxe->pending_lock);
list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
@@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
err2:
kfree(ip);
err1:
- return -EINVAL;
+ return err;
}
inline void rxe_queue_reset(struct rxe_queue *q)
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index acd0a925481c..8ef17d617022 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -63,7 +63,7 @@ struct rxe_queue_buf {
__u32 pad_2[31];
__u32 consumer_index;
__u32 pad_3[31];
- __u8 data[0];
+ __u8 data[];
};
struct rxe_queue {
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index c5651a96b196..559e5fd3bad8 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1769,14 +1769,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
return 0;
}
-static int siw_listen_address(struct iw_cm_id *id, int backlog,
- struct sockaddr *laddr, int addr_family)
+/*
+ * siw_create_listen - Create resources for a listener's IWCM ID @id
+ *
+ * Starts listen on the socket address id->local_addr.
+ *
+ */
+int siw_create_listen(struct iw_cm_id *id, int backlog)
{
struct socket *s;
struct siw_cep *cep = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
+ int addr_family = id->local_addr.ss_family;
int rv = 0, s_val;
+ if (addr_family != AF_INET && addr_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
return rv;
@@ -1791,9 +1800,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
siw_dbg(id->device, "setsockopt error: %d\n", rv);
goto error;
}
- rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
- sizeof(struct sockaddr_in) :
- sizeof(struct sockaddr_in6));
+ if (addr_family == AF_INET) {
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ } else {
+ struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv6_addr_any(&laddr->sin6_addr))
+ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in6));
+ }
if (rv) {
siw_dbg(id->device, "socket bind error: %d\n", rv);
goto error;
@@ -1852,7 +1877,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
- siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
+ siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
return 0;
@@ -1910,106 +1935,6 @@ static void siw_drop_listeners(struct iw_cm_id *id)
}
}
-/*
- * siw_create_listen - Create resources for a listener's IWCM ID @id
- *
- * Listens on the socket address id->local_addr.
- *
- * If the listener's @id provides a specific local IP address, at most one
- * listening socket is created and associated with @id.
- *
- * If the listener's @id provides the wildcard (zero) local IP address,
- * a separate listen is performed for each local IP address of the device
- * by creating a listening socket and binding to that local IP address.
- *
- */
-int siw_create_listen(struct iw_cm_id *id, int backlog)
-{
- struct net_device *dev = to_siw_dev(id->device)->netdev;
- int rv = 0, listeners = 0;
-
- siw_dbg(id->device, "backlog %d\n", backlog);
-
- /*
- * For each attached address of the interface, create a
- * listening socket, if id->local_addr is the wildcard
- * IP address or matches the IP address.
- */
- if (id->local_addr.ss_family == AF_INET) {
- struct in_device *in_dev = in_dev_get(dev);
- struct sockaddr_in s_laddr;
- const struct in_ifaddr *ifa;
-
- if (!in_dev) {
- rv = -ENODEV;
- goto out;
- }
- memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
-
- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
-
- rtnl_lock();
- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
- if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) ||
- s_laddr.sin_addr.s_addr == ifa->ifa_address) {
- s_laddr.sin_addr.s_addr = ifa->ifa_address;
-
- rv = siw_listen_address(id, backlog,
- (struct sockaddr *)&s_laddr,
- AF_INET);
- if (!rv)
- listeners++;
- }
- }
- rtnl_unlock();
- in_dev_put(in_dev);
- } else if (id->local_addr.ss_family == AF_INET6) {
- struct inet6_dev *in6_dev = in6_dev_get(dev);
- struct inet6_ifaddr *ifp;
- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
-
- if (!in6_dev) {
- rv = -ENODEV;
- goto out;
- }
- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
-
- rtnl_lock();
- list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
- if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
- continue;
- if (ipv6_addr_any(&s_laddr->sin6_addr) ||
- ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
- struct sockaddr_in6 bind_addr = {
- .sin6_family = AF_INET6,
- .sin6_port = s_laddr->sin6_port,
- .sin6_flowinfo = 0,
- .sin6_addr = ifp->addr,
- .sin6_scope_id = dev->ifindex };
-
- rv = siw_listen_address(id, backlog,
- (struct sockaddr *)&bind_addr,
- AF_INET6);
- if (!rv)
- listeners++;
- }
- }
- rtnl_unlock();
- in6_dev_put(in6_dev);
- } else {
- rv = -EAFNOSUPPORT;
- }
-out:
- if (listeners)
- rv = 0;
- else if (!rv)
- rv = -EINVAL;
-
- siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
-
- return rv;
-}
-
int siw_destroy_listen(struct iw_cm_id *id)
{
if (!id->provider_data) {
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 9ccce2909ac4..650520244ed0 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -332,7 +332,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
struct siw_srq *srq;
struct siw_wqe *wqe = NULL;
bool srq_event = false;
- unsigned long flags;
+ unsigned long uninitialized_var(flags);
srq = qp->srq;
if (srq) {
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index ae92c8080967..9f53aa4feb87 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
{
struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
struct siw_device *sdev = to_siw_dev(pd->device);
- struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
+ struct siw_mem *mem;
int rv = 0;
siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
- if (unlikely(!mem || !base_mr)) {
+ if (unlikely(!base_mr)) {
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
return -EINVAL;
}
+
if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
- rv = -EINVAL;
- goto out;
+ return -EINVAL;
}
+
+ mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
+ if (unlikely(!mem)) {
+ pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
+ return -EINVAL;
+ }
+
if (unlikely(mem->pd != pd)) {
pr_warn("siw: fastreg: PD mismatch\n");
rv = -EINVAL;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 07e30138aaa1..aeb842bc7a1e 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -165,15 +165,16 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
struct ib_port_attr *attr)
{
struct siw_device *sdev = to_siw_dev(base_dev);
+ int rv;
memset(attr, 0, sizeof(*attr));
- attr->active_mtu = attr->max_mtu;
- attr->active_speed = 2;
- attr->active_width = 2;
+ rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
+ &attr->active_width);
attr->gid_tbl_len = 1;
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
attr->pkey_tbl_len = 1;
@@ -192,7 +193,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
* attr->subnet_timeout = 0;
* attr->init_type_repy = 0;
*/
- return 0;
+ return rv;
}
int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
@@ -322,7 +323,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
}
if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n");
- rv = -EINVAL;
+ rv = -EOPNOTSUPP;
goto err_out;
}
if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 2aa3457a30ce..9a3379c49541 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -377,8 +377,12 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring;
struct ipoib_tx_buf *tx_ring;
+ /* cyclic ring variables for managing tx_ring, for UD only */
unsigned int tx_head;
unsigned int tx_tail;
+ /* cyclic ring variables for counting overall outstanding send WRs */
+ unsigned int global_tx_head;
+ unsigned int global_tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
struct ib_wc send_wc[MAX_SEND_CQE];
@@ -838,6 +842,4 @@ extern int ipoib_debug_level;
#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
-extern const char ipoib_driver_version[];
-
#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c59e00a0881f..9bf0fa30df28 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return;
}
- if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
netif_stop_queue(dev);
@@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
} else {
netif_trans_update(dev);
++tx->tx_head;
- ++priv->tx_head;
+ ++priv->global_tx_head;
}
}
@@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_tx_lock(dev);
++tx->tx_tail;
- ++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
@@ -1232,8 +1234,9 @@ timeout:
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock_bh(p->dev);
++p->tx_tail;
- ++priv->tx_tail;
- if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
+ ++priv->global_tx_tail;
+ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 63e4f9d15fd9..67a21fdf5367 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -68,9 +68,6 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->version, ipoib_driver_version,
- sizeof(drvinfo->version));
-
strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
@@ -213,6 +210,8 @@ static int ipoib_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops ipoib_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = ipoib_get_link_ksettings,
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index c332b4761816..da3c5315bbb5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
@@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
/* increase the tx_head after send success, but use it for queue state */
- if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
netif_stop_queue(dev);
}
@@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
rc = priv->tx_head;
++priv->tx_head;
+ ++priv->global_tx_head;
}
return rc;
}
@@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 4a0d3a9e72e1..ceec24d45185 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -52,10 +52,6 @@
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
-#define DRV_VERSION "1.0.0"
-
-const char ipoib_driver_version[] = DRV_VERSION;
-
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
@@ -1188,9 +1184,11 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
- ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
- netif_queue_stopped(dev),
- priv->tx_head, priv->tx_tail);
+ ipoib_warn(priv,
+ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+ priv->global_tx_head, priv->global_tx_tail);
+
/* XXX reset QP, etc. */
}
@@ -1705,7 +1703,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
goto out_rx_ring_cleanup;
}
- /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
+ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
if (ipoib_transport_dev_init(dev, priv->ca)) {
pr_warn("%s: ipoib_transport_dev_init failed\n",
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 7a8f24de3631..999ef7cdd05e 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -292,12 +292,27 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
{
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+ struct iser_fr_desc *desc;
+ struct ib_mr_status mr_status;
- if (!reg->mem_h)
+ desc = reg->mem_h;
+ if (!desc)
return;
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
- reg->mem_h);
+ /*
+ * The signature MR cannot be invalidated and reused without checking.
+ * libiscsi calls the check_protection transport handler only if
+ * SCSI-Response is received. And the signature MR is not checked if
+ * the task is completed for some other reason like a timeout or error
+ * handling. That's why we must check the signature MR here before
+ * putting it to the free pool.
+ */
+ if (unlikely(desc->sig_protected)) {
+ desc->sig_protected = false;
+ ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
+ &mr_status);
+ }
+ device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc);
reg->mem_h = NULL;
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index 4480092c68e0..d324312a373c 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -239,7 +239,7 @@ struct opa_veswport_mactable_entry {
* @offset: mac table starting offset
* @num_entries: Number of entries to get or set
* @mac_tbl_digest: mac table digest
- * @tbl_entries[]: Array of table entries
+ * @tbl_entries: Array of table entries
*
* The EM sends down this structure in a MAD indicating
* the starting offset in the forwarding table that this
@@ -258,7 +258,7 @@ struct opa_veswport_mactable {
__be16 offset;
__be16 num_entries;
__be32 mac_tbl_digest;
- struct opa_veswport_mactable_entry tbl_entries[0];
+ struct opa_veswport_mactable_entry tbl_entries[];
} __packed;
/**
@@ -440,7 +440,7 @@ struct opa_veswport_iface_macs {
__be16 num_macs_in_msg;
__be16 tot_macs_in_lst;
__be16 gen_count;
- struct opa_vnic_iface_mac_entry entry[0];
+ struct opa_vnic_iface_mac_entry entry[];
} __packed;
/**
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
index 8ad7da989a0e..42d557dff19d 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -125,8 +125,6 @@ static void vnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, opa_vnic_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index 6dbc08e1a6a6..dd942dd642bd 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -292,7 +292,6 @@ struct opa_vnic_mac_tbl_node {
hlist_for_each_entry(obj, &name[bkt], member)
extern char opa_vnic_driver_name[];
-extern const char opa_vnic_driver_version[];
struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
u8 port_num, u8 vport_num);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index be5befd92d16..6e8d650c17c7 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -59,9 +59,7 @@
#include "opa_vnic_internal.h"
-#define DRV_VERSION "1.0"
char opa_vnic_driver_name[] = "opa_vnic";
-const char opa_vnic_driver_version[] = DRV_VERSION;
/*
* The trap service level is kept in bits 3 to 7 in the trap_sl_rsvd
@@ -1041,9 +1039,6 @@ static int __init opa_vnic_init(void)
{
int rc;
- pr_info("OPA Virtual Network Driver - v%s\n",
- opa_vnic_driver_version);
-
rc = ib_register_client(&opa_vnic_client);
if (rc)
pr_err("VNIC driver register failed %d\n", rc);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 5359ece561ca..6fabcc2faf1f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -309,7 +309,7 @@ struct srp_fr_pool {
int max_page_list_len;
spinlock_t lock;
struct list_head free_list;
- struct srp_fr_desc desc[0];
+ struct srp_fr_desc desc[];
};
/**
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index a81e14148407..f699538bdac4 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -16,7 +16,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
if (dev->absinfo && test_bit(src, dev->absbit)) {
dev->absinfo[dst] = dev->absinfo[src];
dev->absinfo[dst].fuzz = 0;
- dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst);
+ __set_bit(dst, dev->absbit);
}
}
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index a7bc576eb342..434d265fa2e8 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -247,7 +247,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0] = 0xe3;
}
- /* fall through */
+ fallthrough;
default:
return data[0];
}
@@ -267,14 +267,14 @@ static int db9_saturn_report(unsigned char id, unsigned char data[60], struct in
switch (data[j]) {
case 0x16: /* multi controller (analog 4 axis) */
input_report_abs(dev, db9_abs[5], data[j + 6]);
- /* fall through */
+ fallthrough;
case 0x15: /* mission stick (analog 3 axis) */
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
- /* fall through */
+ fallthrough;
case 0x13: /* racing controller (analog 1 axis) */
input_report_abs(dev, db9_abs[2], data[j + 3]);
- /* fall through */
+ fallthrough;
case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
case 0x02: /* digital pad (digital 2 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
@@ -368,7 +368,7 @@ static void db9_timer(struct timer_list *t)
input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
- /* fall through */
+ fallthrough;
case DB9_MULTI_0802:
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index e0a362be5812..88df68cc4ac6 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -485,7 +485,7 @@ static void gc_multi_process_packet(struct gc *gc)
switch (pad->type) {
case GC_MULTI2:
input_report_key(dev, BTN_THUMB, s & data[5]);
- /* fall through */
+ fallthrough;
case GC_MULTI:
input_report_abs(dev, ABS_X,
@@ -638,7 +638,7 @@ static void gc_psx_report_one(struct gc_pad *pad, unsigned char psx_type,
input_report_key(dev, BTN_THUMBL, ~data[0] & 0x04);
input_report_key(dev, BTN_THUMBR, ~data[0] & 0x02);
- /* fall through */
+ fallthrough;
case GC_PSX_NEGCON:
case GC_PSX_ANALOG:
@@ -872,7 +872,8 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_SNES:
for (i = 4; i < 8; i++)
input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]);
- /* fall through */
+ fallthrough;
+
case GC_NES:
for (i = 0; i < 4; i++)
input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]);
@@ -880,7 +881,8 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_MULTI2:
input_set_capability(input_dev, EV_KEY, BTN_THUMB);
- /* fall through */
+ fallthrough;
+
case GC_MULTI:
input_set_capability(input_dev, EV_KEY, BTN_TRIGGER);
/* fall through */
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 1777e68c9f02..fac91ea14f17 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -656,16 +656,19 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
switch (i * m) {
case 60:
- sw->number++; /* fall through */
+ sw->number++;
+ fallthrough;
case 45: /* Ambiguous packet length */
if (j <= 40) { /* ID length less or eq 40 -> FSP */
case 43:
sw->type = SW_ID_FSP;
break;
}
- sw->number++; /* fall through */
+ sw->number++;
+ fallthrough;
case 30:
- sw->number++; /* fall through */
+ sw->number++;
+ fallthrough;
case 15:
sw->type = SW_ID_GP;
break;
@@ -681,9 +684,11 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
sw->type = SW_ID_PP;
break;
case 66:
- sw->bits = 3; /* fall through */
+ sw->bits = 3;
+ fallthrough;
case 198:
- sw->length = 22; /* fall through */
+ sw->length = 22;
+ fallthrough;
case 64:
sw->type = SW_ID_3DP;
if (j == 160)
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index cf7cbcd0c29d..429411c6c0a8 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -146,7 +146,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
break;
}
spaceball->escape = 0;
- /* fall through */
+ fallthrough;
case 'M':
case 'Q':
case 'S':
@@ -154,7 +154,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
spaceball->escape = 0;
data &= 0x1f;
}
- /* fall through */
+ fallthrough;
default:
if (spaceball->escape)
spaceball->escape = 0;
@@ -220,13 +220,13 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
input_dev->keybit[BIT_WORD(BTN_A)] |= BIT_MASK(BTN_A) |
BIT_MASK(BTN_B) | BIT_MASK(BTN_C) |
BIT_MASK(BTN_MODE);
- /* fall through */
+ fallthrough;
default:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_2) |
BIT_MASK(BTN_3) | BIT_MASK(BTN_4) |
BIT_MASK(BTN_5) | BIT_MASK(BTN_6) |
BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
- /* fall through */
+ fallthrough;
case SPACEBALL_3003C:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_1) |
BIT_MASK(BTN_8);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f6c356e579c5..793ecbbda32c 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -663,6 +663,16 @@ config KEYBOARD_IPAQ_MICRO
To compile this driver as a module, choose M here: the
module will be called ipaq-micro-keys.
+config KEYBOARD_IQS62X
+ tristate "Azoteq IQS620A/621/622/624/625 keys and switches"
+ depends on MFD_IQS62X
+ help
+ Say Y here to enable key and switch support for the Azoteq IQS620A,
+ IQS621, IQS622, IQS624 and IQS625 multi-function sensors.
+
+ To compile this driver as a module, choose M here: the module will
+ be called iqs62x-keys.
+
config KEYBOARD_OMAP
tristate "TI OMAP keypad support"
depends on ARCH_OMAP1
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index f5b17524adf2..1d689fdd5c00 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o
+obj-$(CONFIG_KEYBOARD_IQS62X) += iqs62x-keys.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
obj-$(CONFIG_KEYBOARD_IMX_SC_KEY) += imx_sc_key.o
obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index e7d58e7f0257..eb0e9cd66bcb 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -1016,7 +1016,7 @@ static int adp5589_probe(struct i2c_client *client,
switch (id->driver_data) {
case ADP5585_02:
kpad->support_row5 = true;
- /* fall through */
+ fallthrough;
case ADP5585_01:
kpad->is_adp5585 = true;
kpad->var = &const_adp5585;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6ec28265771d..edc613efc158 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1241,7 +1241,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
case SERIO_8042_XL:
atkbd->translated = true;
- /* Fall through */
+ fallthrough;
case SERIO_8042:
if (serio->write)
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 53c9ff338dea..f2d4e4daa818 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -574,7 +574,6 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
IRQ_TYPE_EDGE_RISING : IRQ_TYPE_EDGE_FALLING;
break;
case EV_ACT_ANY:
- /* fall through */
default:
/*
* For other cases, we are OK letting suspend/resume
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
new file mode 100644
index 000000000000..93446b21f98f
--- /dev/null
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS620A/621/622/624/625 Keys and Switches
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum {
+ IQS62X_SW_HALL_N,
+ IQS62X_SW_HALL_S,
+};
+
+static const char * const iqs62x_switch_names[] = {
+ [IQS62X_SW_HALL_N] = "hall-switch-north",
+ [IQS62X_SW_HALL_S] = "hall-switch-south",
+};
+
+struct iqs62x_switch_desc {
+ enum iqs62x_event_flag flag;
+ unsigned int code;
+ bool enabled;
+};
+
+struct iqs62x_keys_private {
+ struct iqs62x_core *iqs62x;
+ struct input_dev *input;
+ struct notifier_block notifier;
+ struct iqs62x_switch_desc switches[ARRAY_SIZE(iqs62x_switch_names)];
+ unsigned int keycode[IQS62X_NUM_KEYS];
+ unsigned int keycodemax;
+ u8 interval;
+};
+
+static int iqs62x_keys_parse_prop(struct platform_device *pdev,
+ struct iqs62x_keys_private *iqs62x_keys)
+{
+ struct fwnode_handle *child;
+ unsigned int val;
+ int ret, i;
+
+ ret = device_property_count_u32(&pdev->dev, "linux,keycodes");
+ if (ret > IQS62X_NUM_KEYS) {
+ dev_err(&pdev->dev, "Too many keycodes present\n");
+ return -EINVAL;
+ } else if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to count keycodes: %d\n", ret);
+ return ret;
+ }
+ iqs62x_keys->keycodemax = ret;
+
+ ret = device_property_read_u32_array(&pdev->dev, "linux,keycodes",
+ iqs62x_keys->keycode,
+ iqs62x_keys->keycodemax);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read keycodes: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) {
+ child = device_get_named_child_node(&pdev->dev,
+ iqs62x_switch_names[i]);
+ if (!child)
+ continue;
+
+ ret = fwnode_property_read_u32(child, "linux,code", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read switch code: %d\n",
+ ret);
+ return ret;
+ }
+ iqs62x_keys->switches[i].code = val;
+ iqs62x_keys->switches[i].enabled = true;
+
+ if (fwnode_property_present(child, "azoteq,use-prox"))
+ iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
+ IQS62X_EVENT_HALL_N_P :
+ IQS62X_EVENT_HALL_S_P);
+ else
+ iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
+ IQS62X_EVENT_HALL_N_T :
+ IQS62X_EVENT_HALL_S_T);
+ }
+
+ return 0;
+}
+
+static int iqs62x_keys_init(struct iqs62x_keys_private *iqs62x_keys)
+{
+ struct iqs62x_core *iqs62x = iqs62x_keys->iqs62x;
+ enum iqs62x_event_flag flag;
+ unsigned int event_reg, val;
+ unsigned int event_mask = 0;
+ int ret, i;
+
+ switch (iqs62x->dev_desc->prod_num) {
+ case IQS620_PROD_NUM:
+ case IQS621_PROD_NUM:
+ case IQS622_PROD_NUM:
+ event_reg = IQS620_GLBL_EVENT_MASK;
+
+ /*
+ * Discreet button, hysteresis and SAR UI flags represent keys
+ * and are unmasked if mapped to a valid keycode.
+ */
+ for (i = 0; i < iqs62x_keys->keycodemax; i++) {
+ if (iqs62x_keys->keycode[i] == KEY_RESERVED)
+ continue;
+
+ if (iqs62x_events[i].reg == IQS62X_EVENT_PROX)
+ event_mask |= iqs62x->dev_desc->prox_mask;
+ else if (iqs62x_events[i].reg == IQS62X_EVENT_HYST)
+ event_mask |= (iqs62x->dev_desc->hyst_mask |
+ iqs62x->dev_desc->sar_mask);
+ }
+
+ ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->hall_flags,
+ &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Hall UI flags represent switches and are unmasked if their
+ * corresponding child nodes are present.
+ */
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) {
+ if (!(iqs62x_keys->switches[i].enabled))
+ continue;
+
+ flag = iqs62x_keys->switches[i].flag;
+
+ if (iqs62x_events[flag].reg != IQS62X_EVENT_HALL)
+ continue;
+
+ event_mask |= iqs62x->dev_desc->hall_mask;
+
+ input_report_switch(iqs62x_keys->input,
+ iqs62x_keys->switches[i].code,
+ (val & iqs62x_events[flag].mask) ==
+ iqs62x_events[flag].val);
+ }
+
+ input_sync(iqs62x_keys->input);
+ break;
+
+ case IQS624_PROD_NUM:
+ event_reg = IQS624_HALL_UI;
+
+ /*
+ * Interval change events represent keys and are unmasked if
+ * either wheel movement flag is mapped to a valid keycode.
+ */
+ if (iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_UP] != KEY_RESERVED)
+ event_mask |= IQS624_HALL_UI_INT_EVENT;
+
+ if (iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_DN] != KEY_RESERVED)
+ event_mask |= IQS624_HALL_UI_INT_EVENT;
+
+ ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->interval,
+ &val);
+ if (ret)
+ return ret;
+
+ iqs62x_keys->interval = val;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return regmap_update_bits(iqs62x->regmap, event_reg, event_mask, 0);
+}
+
+static int iqs62x_keys_notifier(struct notifier_block *notifier,
+ unsigned long event_flags, void *context)
+{
+ struct iqs62x_event_data *event_data = context;
+ struct iqs62x_keys_private *iqs62x_keys;
+ int ret, i;
+
+ iqs62x_keys = container_of(notifier, struct iqs62x_keys_private,
+ notifier);
+
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ ret = iqs62x_keys_init(iqs62x_keys);
+ if (ret) {
+ dev_err(iqs62x_keys->input->dev.parent,
+ "Failed to re-initialize device: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_OK;
+ }
+
+ for (i = 0; i < iqs62x_keys->keycodemax; i++) {
+ if (iqs62x_events[i].reg == IQS62X_EVENT_WHEEL &&
+ event_data->interval == iqs62x_keys->interval)
+ continue;
+
+ input_report_key(iqs62x_keys->input, iqs62x_keys->keycode[i],
+ event_flags & BIT(i));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++)
+ if (iqs62x_keys->switches[i].enabled)
+ input_report_switch(iqs62x_keys->input,
+ iqs62x_keys->switches[i].code,
+ event_flags &
+ BIT(iqs62x_keys->switches[i].flag));
+
+ input_sync(iqs62x_keys->input);
+
+ if (event_data->interval == iqs62x_keys->interval)
+ return NOTIFY_OK;
+
+ /*
+ * Each frame contains at most one wheel event (up or down), in which
+ * case a complementary release cycle is emulated.
+ */
+ if (event_flags & BIT(IQS62X_EVENT_WHEEL_UP)) {
+ input_report_key(iqs62x_keys->input,
+ iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_UP],
+ 0);
+ input_sync(iqs62x_keys->input);
+ } else if (event_flags & BIT(IQS62X_EVENT_WHEEL_DN)) {
+ input_report_key(iqs62x_keys->input,
+ iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_DN],
+ 0);
+ input_sync(iqs62x_keys->input);
+ }
+
+ iqs62x_keys->interval = event_data->interval;
+
+ return NOTIFY_OK;
+}
+
+static int iqs62x_keys_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iqs62x_keys_private *iqs62x_keys;
+ struct input_dev *input;
+ int ret, i;
+
+ iqs62x_keys = devm_kzalloc(&pdev->dev, sizeof(*iqs62x_keys),
+ GFP_KERNEL);
+ if (!iqs62x_keys)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, iqs62x_keys);
+
+ ret = iqs62x_keys_parse_prop(pdev, iqs62x_keys);
+ if (ret)
+ return ret;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->keycodemax = iqs62x_keys->keycodemax;
+ input->keycode = iqs62x_keys->keycode;
+ input->keycodesize = sizeof(*iqs62x_keys->keycode);
+
+ input->name = iqs62x->dev_desc->dev_name;
+ input->id.bustype = BUS_I2C;
+
+ for (i = 0; i < iqs62x_keys->keycodemax; i++)
+ if (iqs62x_keys->keycode[i] != KEY_RESERVED)
+ input_set_capability(input, EV_KEY,
+ iqs62x_keys->keycode[i]);
+
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++)
+ if (iqs62x_keys->switches[i].enabled)
+ input_set_capability(input, EV_SW,
+ iqs62x_keys->switches[i].code);
+
+ iqs62x_keys->iqs62x = iqs62x;
+ iqs62x_keys->input = input;
+
+ ret = iqs62x_keys_init(iqs62x_keys);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device: %d\n", ret);
+ return ret;
+ }
+
+ ret = input_register_device(iqs62x_keys->input);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register device: %d\n", ret);
+ return ret;
+ }
+
+ iqs62x_keys->notifier.notifier_call = iqs62x_keys_notifier;
+ ret = blocking_notifier_chain_register(&iqs62x_keys->iqs62x->nh,
+ &iqs62x_keys->notifier);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret);
+
+ return ret;
+}
+
+static int iqs62x_keys_remove(struct platform_device *pdev)
+{
+ struct iqs62x_keys_private *iqs62x_keys = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(&iqs62x_keys->iqs62x->nh,
+ &iqs62x_keys->notifier);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to unregister notifier: %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver iqs62x_keys_platform_driver = {
+ .driver = {
+ .name = "iqs62x-keys",
+ },
+ .probe = iqs62x_keys_probe,
+ .remove = iqs62x_keys_remove,
+};
+module_platform_driver(iqs62x_keys_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS620A/621/622/624/625 Keys and Switches");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs62x-keys");
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 305f0160506a..8a36d78fed63 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -68,7 +68,7 @@ static int ati_remote2_get_channel_mask(char *buffer,
{
pr_debug("%s()\n", __func__);
- return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
+ return sprintf(buffer, "0x%04x\n", *(unsigned int *)kp->arg);
}
static int ati_remote2_set_mode_mask(const char *val,
@@ -84,7 +84,7 @@ static int ati_remote2_get_mode_mask(char *buffer,
{
pr_debug("%s()\n", __func__);
- return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg);
+ return sprintf(buffer, "0x%02x\n", *(unsigned int *)kp->arg);
}
static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index c09b9628ad34..e413801f0491 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -663,12 +663,8 @@ static const struct usb_device_id cm109_usb_table[] = {
static void cm109_usb_cleanup(struct cm109_dev *dev)
{
kfree(dev->ctl_req);
- if (dev->ctl_data)
- usb_free_coherent(dev->udev, USB_PKT_LEN,
- dev->ctl_data, dev->ctl_dma);
- if (dev->irq_data)
- usb_free_coherent(dev->udev, USB_PKT_LEN,
- dev->irq_data, dev->irq_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma);
usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */
usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index d8dbfc030d0f..08b9b5cdb943 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -335,7 +335,7 @@ static int ims_pcu_setup_gamepad(struct ims_pcu *pcu)
err_free_mem:
input_free_device(input);
kfree(gamepad);
- return -ENOMEM;
+ return error;
}
static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu)
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index 6699eb160a0f..a348247d3d38 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -575,8 +575,7 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5:
engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
-
- /* fall through */
+ fallthrough;
case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY:
engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
@@ -731,14 +730,12 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
iqs269->switches[i].code = val;
iqs269->switches[i].enabled = true;
}
-
- /* fall through */
+ fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
break;
-
- /* fall through */
+ fallthrough;
default:
iqs269->keycode[i * IQS269_NUM_CH + reg] = val;
@@ -1143,14 +1140,12 @@ static int iqs269_input_init(struct iqs269_private *iqs269)
sw_code,
state & BIT(j));
}
-
- /* fall through */
+ fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
continue;
-
- /* fall through */
+ fallthrough;
default:
if (keycode != KEY_RESERVED)
@@ -1273,14 +1268,12 @@ static int iqs269_report(struct iqs269_private *iqs269)
input_report_switch(iqs269->keypad,
sw_code,
state & BIT(j));
-
- /* fall through */
+ fallthrough;
case IQS269_CHx_HALL_INACTIVE:
if (iqs269->hall_enable)
continue;
-
- /* fall through */
+ fallthrough;
default:
input_report_key(iqs269->keypad, keycode,
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 8ceaf7db2882..81e777a04b88 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -190,7 +190,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Failed to request direction pwm: %d", err);
- /* Fall through */
+ fallthrough;
case -EPROBE_DEFER:
return err;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index a1bba722b234..4ff5cd2a6d8d 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -124,7 +124,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info,
switch (mtouch->event_type) {
case XENKBD_MT_EV_DOWN:
input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, true);
- /* fall through */
+ fallthrough;
case XENKBD_MT_EV_MOTION:
input_report_abs(info->mtouch, ABS_MT_POSITION_X,
@@ -524,7 +524,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 34700eda0429..b067bfd2699c 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1929,7 +1929,7 @@ static int alps_monitor_mode(struct psmouse *psmouse, bool enable)
static int alps_absolute_mode_v6(struct psmouse *psmouse)
{
u16 reg_val = 0x181;
- int ret = -1;
+ int ret;
/* enter monitor mode, to write the register */
if (alps_monitor_mode(psmouse, true))
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 3f06e8a495d8..bfa26651c0be 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -458,7 +458,7 @@ static int atp_status_check(struct urb *urb)
dev->info->datalen, dev->urb->actual_length);
dev->overflow_warned = true;
}
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 00e395dfc3d5..a0361f9325f8 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -1067,7 +1067,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
return error;
}
- /* Fall through */
+ fallthrough;
case CYAPA_STATE_BL_IDLE:
/* Try to get firmware version in bootloader mode. */
cyapa_gen3_bl_query_data(cyapa);
@@ -1078,7 +1078,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
return error;
}
- /* Fall through */
+ fallthrough;
case CYAPA_STATE_OP:
/*
* Reading query data before going back to the full mode
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 7f012bfa2658..bb3a63d1268d 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -2554,7 +2554,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN5_APP;
- /* fall through */
+ fallthrough;
case CYAPA_STATE_GEN5_APP:
/*
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index c1b524ab4623..7eba66fbef58 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -680,7 +680,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
}
cyapa->state = CYAPA_STATE_GEN6_APP;
- /* fall through */
+ fallthrough;
case CYAPA_STATE_GEN6_APP:
/*
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index a9074ac9364f..c75b00c45d75 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -26,6 +26,8 @@
#define ETP_CALIBRATE_MAX_LEN 3
+#define ETP_FEATURE_REPORT_MK BIT(0)
+
/* IAP Firmware handling */
#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
@@ -33,6 +35,8 @@
#define ETP_FW_IAP_PAGE_ERR (1 << 5)
#define ETP_FW_IAP_INTF_ERR (1 << 4)
#define ETP_FW_PAGE_SIZE 64
+#define ETP_FW_PAGE_SIZE_128 128
+#define ETP_FW_PAGE_SIZE_512 512
#define ETP_FW_SIGNATURE_SIZE 6
struct i2c_client;
@@ -55,8 +59,9 @@ struct elan_transport_ops {
int (*get_baseline_data)(struct i2c_client *client,
bool max_baseliune, u8 *value);
- int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
- int (*get_sm_version)(struct i2c_client *client,
+ int (*get_version)(struct i2c_client *client, u8 pattern, bool iap,
+ u8 *version);
+ int (*get_sm_version)(struct i2c_client *client, u8 pattern,
u16 *ic_type, u8 *version, u8 *clickpad);
int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
int (*get_product_id)(struct i2c_client *client, u16 *id);
@@ -72,13 +77,18 @@ struct elan_transport_ops {
int (*iap_get_mode)(struct i2c_client *client, enum tp_mode *mode);
int (*iap_reset)(struct i2c_client *client);
- int (*prepare_fw_update)(struct i2c_client *client);
- int (*write_fw_block)(struct i2c_client *client,
+ int (*prepare_fw_update)(struct i2c_client *client, u16 ic_type,
+ u8 iap_version);
+ int (*write_fw_block)(struct i2c_client *client, u16 fw_page_size,
const u8 *page, u16 checksum, int idx);
int (*finish_fw_update)(struct i2c_client *client,
struct completion *reset_done);
- int (*get_report)(struct i2c_client *client, u8 *report);
+ int (*get_report_features)(struct i2c_client *client, u8 pattern,
+ unsigned int *features,
+ unsigned int *report_len);
+ int (*get_report)(struct i2c_client *client, u8 *report,
+ unsigned int report_len);
int (*get_pressure_adjustment)(struct i2c_client *client,
int *adjustment);
int (*get_pattern)(struct i2c_client *client, u8 *pattern);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 6291fb5fa015..c599e21a8478 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -50,12 +50,14 @@
#define ETP_MAX_FINGERS 5
#define ETP_FINGER_DATA_LEN 5
#define ETP_REPORT_ID 0x5D
+#define ETP_REPORT_ID2 0x60 /* High precision report */
#define ETP_TP_REPORT_ID 0x5E
#define ETP_REPORT_ID_OFFSET 2
#define ETP_TOUCH_INFO_OFFSET 3
#define ETP_FINGER_DATA_OFFSET 4
#define ETP_HOVER_INFO_OFFSET 30
-#define ETP_MAX_REPORT_LEN 34
+#define ETP_MK_DATA_OFFSET 33 /* For high precision reports */
+#define ETP_MAX_REPORT_LEN 39
/* The main device structure */
struct elan_tp_data {
@@ -85,11 +87,14 @@ struct elan_tp_data {
u8 sm_version;
u8 iap_version;
u16 fw_checksum;
+ unsigned int report_features;
+ unsigned int report_len;
int pressure_adjustment;
u8 mode;
u16 ic_type;
u16 fw_validpage_count;
- u16 fw_signature_address;
+ u16 fw_page_size;
+ u32 fw_signature_address;
bool irq_wake;
@@ -100,8 +105,8 @@ struct elan_tp_data {
bool middle_button;
};
-static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
- u16 *signature_address)
+static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
+ u32 *signature_address, u16 *page_size)
{
switch (ic_type) {
case 0x00:
@@ -126,16 +131,37 @@ static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
case 0x10:
*validpage_count = 1024;
break;
+ case 0x11:
+ *validpage_count = 1280;
+ break;
+ case 0x13:
+ *validpage_count = 2048;
+ break;
+ case 0x14:
+ case 0x15:
+ *validpage_count = 1024;
+ break;
default:
/* unknown ic type clear value */
*validpage_count = 0;
*signature_address = 0;
+ *page_size = 0;
return -ENXIO;
}
*signature_address =
(*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
+ if ((ic_type == 0x14 || ic_type == 0x15) && iap_version >= 2) {
+ *validpage_count /= 8;
+ *page_size = ETP_FW_PAGE_SIZE_512;
+ } else if (ic_type >= 0x0D && iap_version >= 1) {
+ *validpage_count /= 2;
+ *page_size = ETP_FW_PAGE_SIZE_128;
+ } else {
+ *page_size = ETP_FW_PAGE_SIZE;
+ }
+
return 0;
}
@@ -215,8 +241,13 @@ static int elan_query_product(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_sm_version(data->client, &data->ic_type,
- &data->sm_version, &data->clickpad);
+ error = data->ops->get_pattern(data->client, &data->pattern);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, data->pattern,
+ &data->ic_type, &data->sm_version,
+ &data->clickpad);
if (error)
return error;
@@ -312,9 +343,9 @@ static int elan_initialize(struct elan_tp_data *data)
static int elan_query_device_info(struct elan_tp_data *data)
{
int error;
- u16 ic_type;
- error = data->ops->get_version(data->client, false, &data->fw_version);
+ error = data->ops->get_version(data->client, data->pattern, false,
+ &data->fw_version);
if (error)
return error;
@@ -323,7 +354,8 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_version(data->client, true, &data->iap_version);
+ error = data->ops->get_version(data->client, data->pattern,
+ true, &data->iap_version);
if (error)
return error;
@@ -332,17 +364,16 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_pattern(data->client, &data->pattern);
+ error = data->ops->get_report_features(data->client, data->pattern,
+ &data->report_features,
+ &data->report_len);
if (error)
return error;
- if (data->pattern == 0x01)
- ic_type = data->ic_type;
- else
- ic_type = data->iap_version;
-
- error = elan_get_fwinfo(ic_type, &data->fw_validpage_count,
- &data->fw_signature_address);
+ error = elan_get_fwinfo(data->ic_type, data->iap_version,
+ &data->fw_validpage_count,
+ &data->fw_signature_address,
+ &data->fw_page_size);
if (error)
dev_warn(&data->client->dev,
"unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
@@ -351,16 +382,21 @@ static int elan_query_device_info(struct elan_tp_data *data)
return 0;
}
-static unsigned int elan_convert_resolution(u8 val)
+static unsigned int elan_convert_resolution(u8 val, u8 pattern)
{
/*
- * (value from firmware) * 10 + 790 = dpi
- *
+ * pattern <= 0x01:
+ * (value from firmware) * 10 + 790 = dpi
+ * else
+ * ((value from firmware) + 3) * 100 = dpi
+ */
+ int res = pattern <= 0x01 ?
+ (int)(char)val * 10 + 790 : ((int)(char)val + 3) * 100;
+ /*
* We also have to convert dpi to dots/mm (*10/254 to avoid floating
* point).
*/
-
- return ((int)(char)val * 10 + 790) * 10 / 254;
+ return res * 10 / 254;
}
static int elan_query_device_parameters(struct elan_tp_data *data)
@@ -409,8 +445,8 @@ static int elan_query_device_parameters(struct elan_tp_data *data)
if (error)
return error;
- data->x_res = elan_convert_resolution(hw_x_res);
- data->y_res = elan_convert_resolution(hw_y_res);
+ data->x_res = elan_convert_resolution(hw_x_res, data->pattern);
+ data->y_res = elan_convert_resolution(hw_y_res, data->pattern);
} else {
data->x_res = (data->max_x + 1) / x_mm;
data->y_res = (data->max_y + 1) / y_mm;
@@ -430,14 +466,14 @@ static int elan_query_device_parameters(struct elan_tp_data *data)
* IAP firmware updater related routines
**********************************************************
*/
-static int elan_write_fw_block(struct elan_tp_data *data,
+static int elan_write_fw_block(struct elan_tp_data *data, u16 page_size,
const u8 *page, u16 checksum, int idx)
{
int retry = ETP_RETRY_COUNT;
int error;
do {
- error = data->ops->write_fw_block(data->client,
+ error = data->ops->write_fw_block(data->client, page_size,
page, checksum, idx);
if (!error)
return 0;
@@ -460,21 +496,23 @@ static int __elan_update_firmware(struct elan_tp_data *data,
u16 boot_page_count;
u16 sw_checksum = 0, fw_checksum = 0;
- error = data->ops->prepare_fw_update(client);
+ error = data->ops->prepare_fw_update(client, data->ic_type,
+ data->iap_version);
if (error)
return error;
iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
- boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
+ boot_page_count = (iap_start_addr * 2) / data->fw_page_size;
for (i = boot_page_count; i < data->fw_validpage_count; i++) {
u16 checksum = 0;
- const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
+ const u8 *page = &fw->data[i * data->fw_page_size];
- for (j = 0; j < ETP_FW_PAGE_SIZE; j += 2)
+ for (j = 0; j < data->fw_page_size; j += 2)
checksum += ((page[j + 1] << 8) | page[j]);
- error = elan_write_fw_block(data, page, checksum, i);
+ error = elan_write_fw_block(data, data->fw_page_size,
+ page, checksum, i);
if (error) {
dev_err(dev, "write page %d fail: %d\n", i, error);
return error;
@@ -886,24 +924,22 @@ static const struct attribute_group *elan_sysfs_groups[] = {
* Elan isr functions
******************************************************************
*/
-static void elan_report_contact(struct elan_tp_data *data,
- int contact_num, bool contact_valid,
- u8 *finger_data)
+static void elan_report_contact(struct elan_tp_data *data, int contact_num,
+ bool contact_valid, bool high_precision,
+ u8 *packet, u8 *finger_data)
{
struct input_dev *input = data->input;
unsigned int pos_x, pos_y;
- unsigned int pressure, mk_x, mk_y;
- unsigned int area_x, area_y, major, minor;
- unsigned int scaled_pressure;
+ unsigned int pressure, scaled_pressure;
if (contact_valid) {
- pos_x = ((finger_data[0] & 0xf0) << 4) |
- finger_data[1];
- pos_y = ((finger_data[0] & 0x0f) << 8) |
- finger_data[2];
- mk_x = (finger_data[3] & 0x0f);
- mk_y = (finger_data[3] >> 4);
- pressure = finger_data[4];
+ if (high_precision) {
+ pos_x = get_unaligned_be16(&finger_data[0]);
+ pos_y = get_unaligned_be16(&finger_data[2]);
+ } else {
+ pos_x = ((finger_data[0] & 0xf0) << 4) | finger_data[1];
+ pos_y = ((finger_data[0] & 0x0f) << 8) | finger_data[2];
+ }
if (pos_x > data->max_x || pos_y > data->max_y) {
dev_dbg(input->dev.parent,
@@ -913,18 +949,8 @@ static void elan_report_contact(struct elan_tp_data *data,
return;
}
- /*
- * To avoid treating large finger as palm, let's reduce the
- * width x and y per trace.
- */
- area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
- area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
-
- major = max(area_x, area_y);
- minor = min(area_x, area_y);
-
+ pressure = finger_data[4];
scaled_pressure = pressure + data->pressure_adjustment;
-
if (scaled_pressure > ETP_MAX_PRESSURE)
scaled_pressure = ETP_MAX_PRESSURE;
@@ -933,16 +959,37 @@ static void elan_report_contact(struct elan_tp_data *data,
input_report_abs(input, ABS_MT_POSITION_X, pos_x);
input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
- input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
- input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
- input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
+
+ if (data->report_features & ETP_FEATURE_REPORT_MK) {
+ unsigned int mk_x, mk_y, area_x, area_y;
+ u8 mk_data = high_precision ?
+ packet[ETP_MK_DATA_OFFSET + contact_num] :
+ finger_data[3];
+
+ mk_x = mk_data & 0x0f;
+ mk_y = mk_data >> 4;
+
+ /*
+ * To avoid treating large finger as palm, let's reduce
+ * the width x and y per trace.
+ */
+ area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
+ area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
+
+ input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ max(area_x, area_y));
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ min(area_x, area_y));
+ }
} else {
input_mt_slot(input, contact_num);
input_mt_report_slot_inactive(input);
}
}
-static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
+static void elan_report_absolute(struct elan_tp_data *data, u8 *packet,
+ bool high_precision)
{
struct input_dev *input = data->input;
u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET];
@@ -953,11 +1000,12 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
pm_wakeup_event(&data->client->dev, 0);
- hover_event = hover_info & 0x40;
- for (i = 0; i < ETP_MAX_FINGERS; i++) {
- contact_valid = tp_info & (1U << (3 + i));
- elan_report_contact(data, i, contact_valid, finger_data);
+ hover_event = hover_info & BIT(6);
+ for (i = 0; i < ETP_MAX_FINGERS; i++) {
+ contact_valid = tp_info & BIT(3 + i);
+ elan_report_contact(data, i, contact_valid, high_precision,
+ packet, finger_data);
if (contact_valid)
finger_data += ETP_FINGER_DATA_LEN;
}
@@ -1015,13 +1063,16 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
goto out;
}
- error = data->ops->get_report(data->client, report);
+ error = data->ops->get_report(data->client, report, data->report_len);
if (error)
goto out;
switch (report[ETP_REPORT_ID_OFFSET]) {
case ETP_REPORT_ID:
- elan_report_absolute(data, report);
+ elan_report_absolute(data, report, false);
+ break;
+ case ETP_REPORT_ID2:
+ elan_report_absolute(data, report, true);
break;
case ETP_TP_REPORT_ID:
elan_report_trackpoint(data, report);
@@ -1112,7 +1163,9 @@ static int elan_setup_input_device(struct elan_tp_data *data)
input_abs_set_res(input, ABS_X, data->x_res);
input_abs_set_res(input, ABS_Y, data->y_res);
input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
- input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+ if (data->report_features & ETP_FEATURE_REPORT_MK)
+ input_set_abs_params(input, ABS_TOOL_WIDTH,
+ 0, ETP_FINGER_WIDTH, 0, 0);
input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
/* And MT parameters */
@@ -1122,10 +1175,12 @@ static int elan_setup_input_device(struct elan_tp_data *data)
input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res);
input_set_abs_params(input, ABS_MT_PRESSURE, 0,
ETP_MAX_PRESSURE, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
- ETP_FINGER_WIDTH * max_width, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
- ETP_FINGER_WIDTH * min_width, 0, 0);
+ if (data->report_features & ETP_FEATURE_REPORT_MK) {
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+ 0, ETP_FINGER_WIDTH * max_width, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR,
+ 0, ETP_FINGER_WIDTH * min_width, 0, 0);
+ }
data->input = input;
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 058b35b1f9a9..5a496d4ffa49 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/sched.h>
#include <asm/unaligned.h>
@@ -43,6 +44,8 @@
#define ETP_I2C_RESOLUTION_CMD 0x0108
#define ETP_I2C_PRESSURE_CMD 0x010A
#define ETP_I2C_IAP_VERSION_CMD 0x0110
+#define ETP_I2C_IC_TYPE_P0_CMD 0x0110
+#define ETP_I2C_IAP_VERSION_P0_CMD 0x0111
#define ETP_I2C_SET_CMD 0x0300
#define ETP_I2C_POWER_CMD 0x0307
#define ETP_I2C_FW_CHECKSUM_CMD 0x030F
@@ -53,8 +56,12 @@
#define ETP_I2C_CALIBRATE_CMD 0x0316
#define ETP_I2C_MAX_BASELINE_CMD 0x0317
#define ETP_I2C_MIN_BASELINE_CMD 0x0318
+#define ETP_I2C_IAP_TYPE_REG 0x0040
+#define ETP_I2C_IAP_TYPE_CMD 0x0304
#define ETP_I2C_REPORT_LEN 34
+#define ETP_I2C_REPORT_LEN_ID2 39
+#define ETP_I2C_REPORT_MAX_LEN 39
#define ETP_I2C_DESC_LENGTH 30
#define ETP_I2C_REPORT_DESC_LENGTH 158
#define ETP_I2C_INF_LENGTH 2
@@ -249,56 +256,52 @@ static int elan_i2c_get_pattern(struct i2c_client *client, u8 *pattern)
dev_err(&client->dev, "failed to get pattern: %d\n", error);
return error;
}
- *pattern = val[1];
+
+ /*
+ * Not all versions of firmware implement "get pattern" command.
+ * When this command is not implemented the device will respond
+ * with 0xFF 0xFF, which we will treat as "old" pattern 0.
+ */
+ *pattern = val[0] == 0xFF && val[1] == 0xFF ? 0 : val[1];
return 0;
}
static int elan_i2c_get_version(struct i2c_client *client,
- bool iap, u8 *version)
+ u8 pattern, bool iap, u8 *version)
{
int error;
- u8 pattern_ver;
+ u16 cmd;
u8 val[3];
- error = elan_i2c_get_pattern(client, &pattern_ver);
- if (error) {
- dev_err(&client->dev, "failed to get pattern version\n");
- return error;
- }
+ if (!iap)
+ cmd = ETP_I2C_FW_VERSION_CMD;
+ else if (pattern == 0)
+ cmd = ETP_I2C_IAP_VERSION_P0_CMD;
+ else
+ cmd = ETP_I2C_IAP_VERSION_CMD;
- error = elan_i2c_read_cmd(client,
- iap ? ETP_I2C_IAP_VERSION_CMD :
- ETP_I2C_FW_VERSION_CMD,
- val);
+ error = elan_i2c_read_cmd(client, cmd, val);
if (error) {
dev_err(&client->dev, "failed to get %s version: %d\n",
iap ? "IAP" : "FW", error);
return error;
}
- if (pattern_ver == 0x01)
+ if (pattern >= 0x01)
*version = iap ? val[1] : val[0];
else
*version = val[0];
return 0;
}
-static int elan_i2c_get_sm_version(struct i2c_client *client,
- u16 *ic_type, u8 *version,
- u8 *clickpad)
+static int elan_i2c_get_sm_version(struct i2c_client *client, u8 pattern,
+ u16 *ic_type, u8 *version, u8 *clickpad)
{
int error;
- u8 pattern_ver;
u8 val[3];
- error = elan_i2c_get_pattern(client, &pattern_ver);
- if (error) {
- dev_err(&client->dev, "failed to get pattern version\n");
- return error;
- }
-
- if (pattern_ver == 0x01) {
+ if (pattern >= 0x01) {
error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_CMD, val);
if (error) {
dev_err(&client->dev, "failed to get ic type: %d\n",
@@ -324,7 +327,14 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
return error;
}
*version = val[0];
- *ic_type = val[1];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_P0_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get ic type: %d\n",
+ error);
+ return error;
+ }
+ *ic_type = val[0];
error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD,
val);
@@ -386,7 +396,7 @@ static int elan_i2c_get_max(struct i2c_client *client,
return error;
}
- *max_x = le16_to_cpup((__le16 *)val) & 0x0fff;
+ *max_x = le16_to_cpup((__le16 *)val);
error = elan_i2c_read_cmd(client, ETP_I2C_MAX_Y_AXIS_CMD, val);
if (error) {
@@ -394,7 +404,7 @@ static int elan_i2c_get_max(struct i2c_client *client,
return error;
}
- *max_y = le16_to_cpup((__le16 *)val) & 0x0fff;
+ *max_y = le16_to_cpup((__le16 *)val);
return 0;
}
@@ -507,7 +517,43 @@ static int elan_i2c_set_flash_key(struct i2c_client *client)
return 0;
}
-static int elan_i2c_prepare_fw_update(struct i2c_client *client)
+static int elan_read_write_iap_type(struct i2c_client *client)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+ int retry = 3;
+
+ do {
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_TYPE_CMD,
+ ETP_I2C_IAP_TYPE_REG);
+ if (error) {
+ dev_err(&client->dev,
+ "cannot write iap type: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_TYPE_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read iap type register: %d\n",
+ error);
+ return error;
+ }
+ constant = le16_to_cpup((__le16 *)val);
+ dev_dbg(&client->dev, "iap type reg: 0x%04x\n", constant);
+
+ if (constant == ETP_I2C_IAP_TYPE_REG)
+ return 0;
+
+ } while (--retry > 0);
+
+ dev_err(&client->dev, "cannot set iap type\n");
+ return -EIO;
+}
+
+static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+ u8 iap_version)
{
struct device *dev = &client->dev;
int error;
@@ -547,6 +593,12 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client)
return -EIO;
}
+ if (ic_type >= 0x0D && iap_version >= 1) {
+ error = elan_read_write_iap_type(client);
+ if (error)
+ return error;
+ }
+
/* Set flash key again */
error = elan_i2c_set_flash_key(client);
if (error)
@@ -572,57 +624,64 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client)
return 0;
}
-static int elan_i2c_write_fw_block(struct i2c_client *client,
+static int elan_i2c_write_fw_block(struct i2c_client *client, u16 fw_page_size,
const u8 *page, u16 checksum, int idx)
{
struct device *dev = &client->dev;
- u8 page_store[ETP_FW_PAGE_SIZE + 4];
+ u8 *page_store;
u8 val[3];
u16 result;
int ret, error;
+ page_store = kmalloc(fw_page_size + 4, GFP_KERNEL);
+ if (!page_store)
+ return -ENOMEM;
+
page_store[0] = ETP_I2C_IAP_REG_L;
page_store[1] = ETP_I2C_IAP_REG_H;
- memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE);
+ memcpy(&page_store[2], page, fw_page_size);
/* recode checksum at last two bytes */
- put_unaligned_le16(checksum, &page_store[ETP_FW_PAGE_SIZE + 2]);
+ put_unaligned_le16(checksum, &page_store[fw_page_size + 2]);
- ret = i2c_master_send(client, page_store, sizeof(page_store));
- if (ret != sizeof(page_store)) {
+ ret = i2c_master_send(client, page_store, fw_page_size + 4);
+ if (ret != fw_page_size + 4) {
error = ret < 0 ? ret : -EIO;
dev_err(dev, "Failed to write page %d: %d\n", idx, error);
- return error;
+ goto exit;
}
/* Wait for F/W to update one page ROM data. */
- msleep(35);
+ msleep(fw_page_size == ETP_FW_PAGE_SIZE_512 ? 50 : 35);
error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
if (error) {
dev_err(dev, "Failed to read IAP write result: %d\n", error);
- return error;
+ goto exit;
}
result = le16_to_cpup((__le16 *)val);
if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
dev_err(dev, "IAP reports failed write: %04hx\n",
result);
- return -EIO;
+ error = -EIO;
+ goto exit;
}
- return 0;
+exit:
+ kfree(page_store);
+ return error;
}
static int elan_i2c_finish_fw_update(struct i2c_client *client,
struct completion *completion)
{
struct device *dev = &client->dev;
- int error;
+ int error = 0;
int len;
- u8 buffer[ETP_I2C_REPORT_LEN];
+ u8 buffer[ETP_I2C_REPORT_MAX_LEN];
- len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
- if (len != ETP_I2C_REPORT_LEN) {
+ len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_MAX_LEN);
+ if (len <= 0) {
error = len < 0 ? len : -EIO;
dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
error, len);
@@ -656,20 +715,31 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
return 0;
}
-static int elan_i2c_get_report(struct i2c_client *client, u8 *report)
+static int elan_i2c_get_report_features(struct i2c_client *client, u8 pattern,
+ unsigned int *features,
+ unsigned int *report_len)
+{
+ *features = ETP_FEATURE_REPORT_MK;
+ *report_len = pattern <= 0x01 ?
+ ETP_I2C_REPORT_LEN : ETP_I2C_REPORT_LEN_ID2;
+ return 0;
+}
+
+static int elan_i2c_get_report(struct i2c_client *client,
+ u8 *report, unsigned int report_len)
{
int len;
- len = i2c_master_recv(client, report, ETP_I2C_REPORT_LEN);
+ len = i2c_master_recv(client, report, report_len);
if (len < 0) {
dev_err(&client->dev, "failed to read report data: %d\n", len);
return len;
}
- if (len != ETP_I2C_REPORT_LEN) {
+ if (len != report_len) {
dev_err(&client->dev,
"wrong report length (%d vs %d expected)\n",
- len, ETP_I2C_REPORT_LEN);
+ len, report_len);
return -EIO;
}
@@ -706,5 +776,6 @@ const struct elan_transport_ops elan_i2c_ops = {
.get_pattern = elan_i2c_get_pattern,
+ .get_report_features = elan_i2c_get_report_features,
.get_report = elan_i2c_get_report,
};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 8c3185d54c73..8ff823751f3b 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -147,7 +147,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
}
static int elan_smbus_get_version(struct i2c_client *client,
- bool iap, u8 *version)
+ u8 pattern, bool iap, u8 *version)
{
int error;
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
@@ -166,9 +166,8 @@ static int elan_smbus_get_version(struct i2c_client *client,
return 0;
}
-static int elan_smbus_get_sm_version(struct i2c_client *client,
- u16 *ic_type, u8 *version,
- u8 *clickpad)
+static int elan_smbus_get_sm_version(struct i2c_client *client, u8 pattern,
+ u16 *ic_type, u8 *version, u8 *clickpad)
{
int error;
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
@@ -340,7 +339,8 @@ static int elan_smbus_set_flash_key(struct i2c_client *client)
return 0;
}
-static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+static int elan_smbus_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+ u8 iap_version)
{
struct device *dev = &client->dev;
int len;
@@ -414,7 +414,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
}
-static int elan_smbus_write_fw_block(struct i2c_client *client,
+static int elan_smbus_write_fw_block(struct i2c_client *client, u16 fw_page_size,
const u8 *page, u16 checksum, int idx)
{
struct device *dev = &client->dev;
@@ -429,7 +429,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
*/
error = i2c_smbus_write_block_data(client,
ETP_SMBUS_WRITE_FW_BLOCK,
- ETP_FW_PAGE_SIZE / 2,
+ fw_page_size / 2,
page);
if (error) {
dev_err(dev, "Failed to write page %d (part %d): %d\n",
@@ -439,8 +439,8 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
error = i2c_smbus_write_block_data(client,
ETP_SMBUS_WRITE_FW_BLOCK,
- ETP_FW_PAGE_SIZE / 2,
- page + ETP_FW_PAGE_SIZE / 2);
+ fw_page_size / 2,
+ page + fw_page_size / 2);
if (error) {
dev_err(dev, "Failed to write page %d (part %d): %d\n",
idx, 2, error);
@@ -469,7 +469,21 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
return 0;
}
-static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
+static int elan_smbus_get_report_features(struct i2c_client *client, u8 pattern,
+ unsigned int *features,
+ unsigned int *report_len)
+{
+ /*
+ * SMBus controllers with pattern 2 lack area info, as newer
+ * high-precision packets use that space for coordinates.
+ */
+ *features = pattern <= 0x01 ? ETP_FEATURE_REPORT_MK : 0;
+ *report_len = ETP_SMBUS_REPORT_LEN;
+ return 0;
+}
+
+static int elan_smbus_get_report(struct i2c_client *client,
+ u8 *report, unsigned int report_len)
{
int len;
@@ -534,6 +548,7 @@ const struct elan_transport_ops elan_smbus_ops = {
.write_fw_block = elan_smbus_write_fw_block,
.finish_fw_update = elan_smbus_finish_fw_update,
+ .get_report_features = elan_smbus_get_report_features,
.get_report = elan_smbus_get_report,
.get_pattern = elan_smbus_get_pattern,
};
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d8434b7b623..90f8765f9efc 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -383,7 +383,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
*/
if (packet[3] & 0x80)
fingers = 4;
- /* fall through */
+ fallthrough;
case 1:
/*
* byte 1: . . . . x11 x10 x9 x8
@@ -1146,7 +1146,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
case 2:
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
- /* fall through */
+ fallthrough;
case 3:
if (info->hw_version == 3)
elantech_set_buttonpad_prop(psmouse);
@@ -1877,12 +1877,10 @@ static bool elantech_use_host_notify(struct psmouse *psmouse,
/* expected case */
break;
case ETP_BUS_SMB_ALERT_ONLY:
- /* fall-through */
case ETP_BUS_PS2_SMB_ALERT:
psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
break;
case ETP_BUS_SMB_HST_NTFY_ONLY:
- /* fall-through */
case ETP_BUS_PS2_SMB_HST_NTFY:
return true;
default:
@@ -1897,7 +1895,7 @@ static bool elantech_use_host_notify(struct psmouse *psmouse,
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;
- int error = -EINVAL;
+ int error;
psmouse_reset(psmouse);
@@ -2015,7 +2013,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
int elantech_init_ps2(struct psmouse *psmouse)
{
struct elantech_device_info info;
- int error = -EINVAL;
+ int error;
psmouse_reset(psmouse);
@@ -2036,7 +2034,7 @@ int elantech_init_ps2(struct psmouse *psmouse)
int elantech_init(struct psmouse *psmouse)
{
struct elantech_device_info info;
- int error = -EINVAL;
+ int error;
psmouse_reset(psmouse);
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 72a083f3fc4a..4dc441309aac 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -238,7 +238,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
/* we're not spewing, but this packet might be the start */
priv->spew_flag = MAYBE_SPEWING;
- /* fall-through */
+ fallthrough;
case MAYBE_SPEWING:
priv->spew_count++;
@@ -249,7 +249,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
/* excessive spew detected, request recalibration */
priv->spew_flag = SPEW_DETECTED;
- /* fall-through */
+ fallthrough;
case SPEW_DETECTED:
/* only recalibrate when the overall delta to the cursor
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 0b75248c8380..c112980c2341 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -105,7 +105,7 @@ static void navpoint_packet(struct navpoint *navpoint)
case 0x19: /* Module 0, Hello packet */
if ((navpoint->data[1] & 0xf0) == 0x10)
break;
- /* FALLTHROUGH */
+ fallthrough;
default:
dev_warn(navpoint->dev,
"spurious packet: data=0x%02x,0x%02x,...\n",
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 527ae0b9a191..0b4a3039f312 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
{
int type = *((unsigned int *)kp->arg);
- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
+ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
}
static int __init psmouse_init(void)
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index e99d9bf1a267..2716d2ba386a 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
fsp_reg_write_enable(psmouse, false);
- return count;
+ return retval;
}
PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
@@ -794,7 +794,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
/* on-pad click, filter it if necessary */
if ((ad->flags & FSPDRV_FLAG_EN_OPC) != FSPDRV_FLAG_EN_OPC)
packet[0] &= ~FSP_PB0_LBTN;
- /* fall through */
+ fallthrough;
case FSP_PKT_TYPE_NORMAL:
/* normal packet */
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index ea9242d53899..caa79c177c55 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -128,7 +128,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
case SERIO_MS:
sermouse->type = SERIO_MP;
- /* fall through */
+ fallthrough;
case SERIO_MP:
if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
@@ -139,7 +139,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
case SERIO_MZP:
case SERIO_MZPP:
input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
- /* fall through */
+ fallthrough;
case SERIO_MZ:
input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 0dddf273afd9..d3eda48032e3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -562,7 +562,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
str = last_str;
break;
}
- /* fall through - report timeout */
+ fallthrough; /* report timeout */
case 0xfc:
case 0xfd:
case 0xfe: dfl = SERIO_TIMEOUT; data = 0xfe; break;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index a8c94a940a79..8a16e41f7b7f 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -418,7 +418,7 @@ bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data)
ps2dev->nak = 0;
break;
}
- /* Fall through */
+ fallthrough;
default:
/*
* Do not signal errors if we get unexpected reply while
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 530fd15eaeca..25bf8be6e711 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -247,7 +247,7 @@ void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *k
case KE_SW:
value = ke->sw.value;
- /* fall through */
+ fallthrough;
case KE_VSW:
input_report_switch(dev, ke->sw.code, value);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 96d65575f75a..44bb1f69b4b2 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -676,8 +676,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
/* Mask out the Y tilt value used for pressure */
device->buffer[7] = (u8)((device->buffer[7]) & 0x7F);
+ fallthrough;
- /* Fall thru */
case 4:
/* Tilt */
input_report_abs(inputdev, ABS_TILT_X,
@@ -685,8 +685,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
input_report_abs(inputdev, ABS_TILT_Y,
sign_extend32(device->buffer[7], 6));
+ fallthrough;
- /* Fall thru */
case 2:
case 3:
/* Convert buttons, only 5 bits possible */
@@ -695,8 +695,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
/* We don't apply any meaning to the bitmask,
just report */
input_event(inputdev, EV_MSC, MSC_SERIAL, val);
+ fallthrough;
- /* Fall thru */
case 1:
/* All reports have X and Y coords in the same place */
val = get_unaligned_le16(&device->buffer[1]);
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 38f087404f7a..749edbdb7ffa 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -146,7 +146,7 @@ static void pegasus_parse_packet(struct pegasus *pegasus)
/* xy data */
case BATTERY_LOW:
dev_warn_once(&dev->dev, "Pen battery low\n");
- /* fall through */
+ fallthrough;
case BATTERY_NO_REPORT:
case BATTERY_GOOD:
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index a2189739e30f..6b71b0aff115 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -20,6 +20,7 @@
#include <linux/i2c.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -129,6 +130,7 @@ struct t9_range {
/* MXT_SPT_COMMSCONFIG_T18 */
#define MXT_COMMS_CTRL 0
#define MXT_COMMS_CMD 1
+#define MXT_COMMS_RETRIGEN BIT(6)
/* MXT_DEBUG_DIAGNOSTIC_T37 */
#define MXT_DIAGNOSTIC_PAGEUP 0x01
@@ -308,6 +310,7 @@ struct mxt_data {
struct t7_config t7_cfg;
struct mxt_dbg dbg;
struct gpio_desc *reset_gpio;
+ bool use_retrigen_workaround;
/* Cached parameters from object table */
u16 T5_address;
@@ -318,6 +321,7 @@ struct mxt_data {
u16 T71_address;
u8 T9_reportid_min;
u8 T9_reportid_max;
+ u16 T18_address;
u8 T19_reportid;
u16 T44_address;
u8 T100_reportid_min;
@@ -1190,9 +1194,11 @@ static int mxt_acquire_irq(struct mxt_data *data)
enable_irq(data->irq);
- error = mxt_process_messages_until_invalid(data);
- if (error)
- return error;
+ if (data->use_retrigen_workaround) {
+ error = mxt_process_messages_until_invalid(data);
+ if (error)
+ return error;
+ }
return 0;
}
@@ -1282,6 +1288,38 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
return crc;
}
+static int mxt_check_retrigen(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ int val;
+ struct irq_data *irqd;
+
+ data->use_retrigen_workaround = false;
+
+ irqd = irq_get_irq_data(data->irq);
+ if (!irqd)
+ return -EINVAL;
+
+ if (irqd_is_level_type(irqd))
+ return 0;
+
+ if (data->T18_address) {
+ error = __mxt_read_reg(client,
+ data->T18_address + MXT_COMMS_CTRL,
+ 1, &val);
+ if (error)
+ return error;
+
+ if (val & MXT_COMMS_RETRIGEN)
+ return 0;
+ }
+
+ dev_warn(&client->dev, "Enabling RETRIGEN workaround\n");
+ data->use_retrigen_workaround = true;
+ return 0;
+}
+
static int mxt_prepare_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
{
struct device *dev = &data->client->dev;
@@ -1561,6 +1599,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE);
+ ret = mxt_check_retrigen(data);
+ if (ret)
+ goto release_mem;
+
ret = mxt_soft_reset(data);
if (ret)
goto release_mem;
@@ -1604,6 +1646,7 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T71_address = 0;
data->T9_reportid_min = 0;
data->T9_reportid_max = 0;
+ data->T18_address = 0;
data->T19_reportid = 0;
data->T44_address = 0;
data->T100_reportid_min = 0;
@@ -1678,6 +1721,9 @@ static int mxt_parse_object_table(struct mxt_data *data,
object->num_report_ids - 1;
data->num_touchids = object->num_report_ids;
break;
+ case MXT_SPT_COMMSCONFIG_T18:
+ data->T18_address = object->start_address;
+ break;
case MXT_SPT_MESSAGECOUNT_T44:
data->T44_address = object->start_address;
break;
@@ -2141,6 +2187,10 @@ static int mxt_initialize(struct mxt_data *data)
if (error)
return error;
+ error = mxt_check_retrigen(data);
+ if (error)
+ return error;
+
error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
&client->dev, GFP_KERNEL, data,
mxt_config_cb);
diff --git a/drivers/input/touchscreen/chipone_icn8505.c b/drivers/input/touchscreen/chipone_icn8505.c
index c768186ce856..f9ca5502ac8c 100644
--- a/drivers/input/touchscreen/chipone_icn8505.c
+++ b/drivers/input/touchscreen/chipone_icn8505.c
@@ -288,7 +288,7 @@ static int icn8505_upload_fw(struct icn8505_data *icn8505)
* we may need it at resume. Having loaded it once will make the
* firmware class code cache it at suspend/resume.
*/
- error = request_firmware(&fw, icn8505->firmware_name, dev);
+ error = firmware_request_platform(&fw, icn8505->firmware_name, dev);
if (error) {
dev_err(dev, "Firmware request error %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3a4f18d3450d..6ff81d48da86 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -288,7 +288,7 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
return edt_ft5x06_ts_readwrite(tsdata->client, 4,
wrbuf, 0, NULL);
- /* fallthrough */
+
case EDT_M09:
case EDT_M12:
case EV_FT:
@@ -330,7 +330,6 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
}
break;
- /* fallthrough */
case EDT_M09:
case EDT_M12:
case EV_FT:
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 5477a5718202..b0bd5bb079be 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -955,7 +955,7 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
break;
ts->state = ELAN_STATE_NORMAL;
- /* fall through */
+ fallthrough;
case ELAN_STATE_NORMAL:
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index d6772a2c2d09..e0bacd34866a 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -348,7 +348,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
case 1: /* 6-byte protocol */
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
- /* fall through */
+ fallthrough;
case 2: /* 4-byte protocol */
input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index e007e2e8f626..a6597f026980 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -8,7 +8,9 @@
*/
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
@@ -16,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/sizes.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
@@ -23,15 +26,59 @@
#define EXC3000_SLOTS_PER_FRAME 5
#define EXC3000_LEN_FRAME 66
#define EXC3000_LEN_POINT 10
-#define EXC3000_MT_EVENT 6
+
+#define EXC3000_LEN_MODEL_NAME 16
+#define EXC3000_LEN_FW_VERSION 16
+
+#define EXC3000_MT1_EVENT 0x06
+#define EXC3000_MT2_EVENT 0x18
+
#define EXC3000_TIMEOUT_MS 100
+#define EXC3000_RESET_MS 10
+#define EXC3000_READY_MS 100
+
+static const struct i2c_device_id exc3000_id[];
+
+struct eeti_dev_info {
+ const char *name;
+ int max_xy;
+};
+
+enum eeti_dev_id {
+ EETI_EXC3000,
+ EETI_EXC80H60,
+ EETI_EXC80H84,
+};
+
+static struct eeti_dev_info exc3000_info[] = {
+ [EETI_EXC3000] = {
+ .name = "EETI EXC3000 Touch Screen",
+ .max_xy = SZ_4K - 1,
+ },
+ [EETI_EXC80H60] = {
+ .name = "EETI EXC80H60 Touch Screen",
+ .max_xy = SZ_16K - 1,
+ },
+ [EETI_EXC80H84] = {
+ .name = "EETI EXC80H84 Touch Screen",
+ .max_xy = SZ_16K - 1,
+ },
+};
+
struct exc3000_data {
struct i2c_client *client;
+ const struct eeti_dev_info *info;
struct input_dev *input;
struct touchscreen_properties prop;
+ struct gpio_desc *reset;
struct timer_list timer;
u8 buf[2 * EXC3000_LEN_FRAME];
+ struct completion wait_event;
+ struct mutex query_lock;
+ int query_result;
+ char model[EXC3000_LEN_MODEL_NAME];
+ char fw_version[EXC3000_LEN_FW_VERSION];
};
static void exc3000_report_slots(struct input_dev *input,
@@ -58,10 +105,15 @@ static void exc3000_timer(struct timer_list *t)
input_sync(data->input);
}
-static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
+static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
{
+ struct i2c_client *client = data->client;
+ u8 expected_event = EXC3000_MT1_EVENT;
int ret;
+ if (data->info->max_xy == SZ_16K - 1)
+ expected_event = EXC3000_MT2_EVENT;
+
ret = i2c_master_send(client, "'", 2);
if (ret < 0)
return ret;
@@ -76,19 +128,21 @@ static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
if (ret != EXC3000_LEN_FRAME)
return -EIO;
- if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME ||
- buf[2] != EXC3000_MT_EVENT)
+ if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME)
+ return -EINVAL;
+
+ if (buf[2] != expected_event)
return -EINVAL;
return 0;
}
-static int exc3000_read_data(struct i2c_client *client,
+static int exc3000_read_data(struct exc3000_data *data,
u8 *buf, int *n_slots)
{
int error;
- error = exc3000_read_frame(client, buf);
+ error = exc3000_read_frame(data, buf);
if (error)
return error;
@@ -98,7 +152,7 @@ static int exc3000_read_data(struct i2c_client *client,
if (*n_slots > EXC3000_SLOTS_PER_FRAME) {
/* Read 2nd frame to get the rest of the contacts. */
- error = exc3000_read_frame(client, buf + EXC3000_LEN_FRAME);
+ error = exc3000_read_frame(data, buf + EXC3000_LEN_FRAME);
if (error)
return error;
@@ -110,6 +164,28 @@ static int exc3000_read_data(struct i2c_client *client,
return 0;
}
+static int exc3000_query_interrupt(struct exc3000_data *data)
+{
+ u8 *buf = data->buf;
+ int error;
+
+ error = i2c_master_recv(data->client, buf, EXC3000_LEN_FRAME);
+ if (error < 0)
+ return error;
+
+ if (buf[0] != 'B')
+ return -EPROTO;
+
+ if (buf[4] == 'E')
+ strlcpy(data->model, buf + 5, sizeof(data->model));
+ else if (buf[4] == 'D')
+ strlcpy(data->fw_version, buf + 5, sizeof(data->fw_version));
+ else
+ return -EPROTO;
+
+ return 0;
+}
+
static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
{
struct exc3000_data *data = dev_id;
@@ -118,7 +194,13 @@ static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
int slots, total_slots;
int error;
- error = exc3000_read_data(data->client, buf, &total_slots);
+ if (mutex_is_locked(&data->query_lock)) {
+ data->query_result = exc3000_query_interrupt(data);
+ complete(&data->wait_event);
+ goto out;
+ }
+
+ error = exc3000_read_data(data, buf, &total_slots);
if (error) {
/* Schedule a timer to release "stuck" contacts */
mod_timer(&data->timer,
@@ -145,31 +227,132 @@ out:
return IRQ_HANDLED;
}
-static int exc3000_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct exc3000_data *data = i2c_get_clientdata(client);
+ static const u8 request[68] = {
+ 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'D', 0x00
+ };
+ int error;
+
+ mutex_lock(&data->query_lock);
+
+ data->query_result = -ETIMEDOUT;
+ reinit_completion(&data->wait_event);
+
+ error = i2c_master_send(client, request, sizeof(request));
+ if (error < 0) {
+ mutex_unlock(&data->query_lock);
+ return error;
+ }
+
+ wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
+ mutex_unlock(&data->query_lock);
+
+ if (data->query_result < 0)
+ return data->query_result;
+
+ return sprintf(buf, "%s\n", data->fw_version);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static ssize_t exc3000_get_model(struct exc3000_data *data)
+{
+ static const u8 request[68] = {
+ 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'E', 0x00
+ };
+ struct i2c_client *client = data->client;
+ int error;
+
+ mutex_lock(&data->query_lock);
+ data->query_result = -ETIMEDOUT;
+ reinit_completion(&data->wait_event);
+
+ error = i2c_master_send(client, request, sizeof(request));
+ if (error < 0) {
+ mutex_unlock(&data->query_lock);
+ return error;
+ }
+
+ wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
+ mutex_unlock(&data->query_lock);
+
+ return data->query_result;
+}
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct exc3000_data *data = i2c_get_clientdata(client);
+ int error;
+
+ error = exc3000_get_model(data);
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%s\n", data->model);
+}
+static DEVICE_ATTR_RO(model);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_model.attr,
+ NULL
+};
+
+static struct attribute_group exc3000_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
+static int exc3000_probe(struct i2c_client *client)
{
struct exc3000_data *data;
struct input_dev *input;
- int error;
+ int error, max_xy, retry;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
+ data->info = device_get_match_data(&client->dev);
+ if (!data->info) {
+ enum eeti_dev_id eeti_dev_id =
+ i2c_match_id(exc3000_id, client)->driver_data;
+ data->info = &exc3000_info[eeti_dev_id];
+ }
timer_setup(&data->timer, exc3000_timer, 0);
+ init_completion(&data->wait_event);
+ mutex_init(&data->query_lock);
+
+ data->reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset))
+ return PTR_ERR(data->reset);
+
+ if (data->reset) {
+ msleep(EXC3000_RESET_MS);
+ gpiod_set_value_cansleep(data->reset, 0);
+ msleep(EXC3000_READY_MS);
+ }
input = devm_input_allocate_device(&client->dev);
if (!input)
return -ENOMEM;
data->input = input;
+ input_set_drvdata(input, data);
- input->name = "EETI EXC3000 Touch Screen";
+ input->name = data->info->name;
input->id.bustype = BUS_I2C;
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ max_xy = data->info->max_xy;
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+
touchscreen_parse_properties(input, true, &data->prop);
error = input_mt_init_slots(input, EXC3000_NUM_SLOTS,
@@ -187,18 +370,49 @@ static int exc3000_probe(struct i2c_client *client,
if (error)
return error;
+ /*
+ * I²C does not have built-in recovery, so retry on failure. This
+ * ensures, that the device probe will not fail for temporary issues
+ * on the bus. This is not needed for the sysfs calls (userspace
+ * will receive the error code and can start another query) and
+ * cannot be done for touch events (but that only means loosing one
+ * or two touch events anyways).
+ */
+ for (retry = 0; retry < 3; retry++) {
+ error = exc3000_get_model(data);
+ if (!error)
+ break;
+ dev_warn(&client->dev, "Retry %d get EETI EXC3000 model: %d\n",
+ retry + 1, error);
+ }
+
+ if (error)
+ return error;
+
+ dev_dbg(&client->dev, "TS Model: %s", data->model);
+
+ i2c_set_clientdata(client, data);
+
+ error = devm_device_add_group(&client->dev, &exc3000_attribute_group);
+ if (error)
+ return error;
+
return 0;
}
static const struct i2c_device_id exc3000_id[] = {
- { "exc3000", 0 },
+ { "exc3000", EETI_EXC3000 },
+ { "exc80h60", EETI_EXC80H60 },
+ { "exc80h84", EETI_EXC80H84 },
{ }
};
MODULE_DEVICE_TABLE(i2c, exc3000_id);
#ifdef CONFIG_OF
static const struct of_device_id exc3000_of_match[] = {
- { .compatible = "eeti,exc3000" },
+ { .compatible = "eeti,exc3000", .data = &exc3000_info[EETI_EXC3000] },
+ { .compatible = "eeti,exc80h60", .data = &exc3000_info[EETI_EXC80H60] },
+ { .compatible = "eeti,exc80h84", .data = &exc3000_info[EETI_EXC80H84] },
{ }
};
MODULE_DEVICE_TABLE(of, exc3000_of_match);
@@ -210,7 +424,7 @@ static struct i2c_driver exc3000_driver = {
.of_match_table = of_match_ptr(exc3000_of_match),
},
.id_table = exc3000_id,
- .probe = exc3000_probe,
+ .probe_new = exc3000_probe,
};
module_i2c_driver(exc3000_driver);
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index 5875bb1099a8..3162b68f7374 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -289,7 +289,7 @@ static int iqs5xx_bl_cmd(struct i2c_client *client, u8 bl_cmd, u16 bl_addr)
break;
case IQS5XX_BL_CMD_EXEC:
usleep_range(10000, 10100);
- /* fall through */
+ fallthrough;
default:
return 0;
}
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 1af08d3dfaf7..f15713aaebc2 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -130,7 +130,6 @@ static irqreturn_t max11801_ts_interrupt(int irq, void *dev_id)
switch (buf[1] & EVENT_TAG_MASK) {
case EVENT_INIT:
- /* fall through */
case EVENT_MIDDLE:
input_report_abs(data->input_dev, ABS_X, x);
input_report_abs(data->input_dev, ABS_Y, y);
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index ad8b6a2bfd36..8fa2f3b7cfd8 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -288,7 +288,7 @@ static int silead_ts_load_fw(struct i2c_client *client)
dev_dbg(dev, "Firmware file name: %s", data->fw_name);
- error = request_firmware(&fw, data->fw_name, dev);
+ error = firmware_request_platform(&fw, data->fw_name, dev);
if (error) {
dev_err(dev, "Firmware request error %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index b54cc64e4ea6..df946869d4cd 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -255,7 +255,7 @@ static void stmfts_parse_events(struct stmfts_data *sdata)
case STMFTS_EV_SLEEP_OUT_CONTROLLER_READY:
case STMFTS_EV_STATUS:
complete(&sdata->cmd_done);
- /* fall through */
+ fallthrough;
case STMFTS_EV_NO_EVENT:
case STMFTS_EV_DEBUG:
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 76938ece1658..a88f2f07bc27 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,9 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_BCM_VOTER
+ tristate
+
config INTERCONNECT_QCOM_MSM8916
tristate "Qualcomm MSM8916 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -23,6 +26,13 @@ config INTERCONNECT_QCOM_MSM8974
This is a driver for the Qualcomm Network-on-Chip on msm8974-based
platforms.
+config INTERCONNECT_QCOM_OSM_L3
+ tristate "Qualcomm OSM L3 interconnect driver"
+ depends on INTERCONNECT_QCOM || COMPILE_TEST
+ help
+ Say y here to support the Operating State Manager (OSM) interconnect
+ driver which controls the scaling of L3 caches on Qualcomm SoCs.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -32,10 +42,25 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH
+ tristate
+
+config INTERCONNECT_QCOM_SC7180
+ tristate "Qualcomm SC7180 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sc7180-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
depends on INTERCONNECT_QCOM
depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
help
This is a driver for the Qualcomm Network-on-Chip on sdm845-based
platforms.
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index e8271575e3d8..3a047fe6e45a 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,13 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
+icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8974-objs := msm8974.o
+icc-osm-l3-objs := osm-l3.o
qnoc-qcs404-objs := qcs404.o
+icc-rpmh-obj := icc-rpmh.o
+qnoc-sc7180-objs := sc7180.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
+obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
+obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
new file mode 100644
index 000000000000..2a11a63e7217
--- /dev/null
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/div64.h>
+#include <linux/interconnect-provider.h>
+#include <linux/list_sort.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+static LIST_HEAD(bcm_voters);
+static DEFINE_MUTEX(bcm_voter_lock);
+
+/**
+ * struct bcm_voter - Bus Clock Manager voter
+ * @dev: reference to the device that communicates with the BCM
+ * @np: reference to the device node to match bcm voters
+ * @lock: mutex to protect commit and wake/sleep lists in the voter
+ * @commit_list: list containing bcms to be committed to hardware
+ * @ws_list: list containing bcms that have different wake/sleep votes
+ * @voter_node: list of bcm voters
+ */
+struct bcm_voter {
+ struct device *dev;
+ struct device_node *np;
+ struct mutex lock;
+ struct list_head commit_list;
+ struct list_head ws_list;
+ struct list_head voter_node;
+};
+
+static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
+{
+ const struct qcom_icc_bcm *bcm_a =
+ list_entry(a, struct qcom_icc_bcm, list);
+ const struct qcom_icc_bcm *bcm_b =
+ list_entry(b, struct qcom_icc_bcm, list);
+
+ if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
+ return -1;
+ else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
+ return 0;
+ else
+ return 1;
+}
+
+static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+{
+ size_t i, bucket;
+ u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
+ u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
+ u64 temp;
+
+ for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg[bucket] = max(agg_avg[bucket], temp);
+
+ temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak[bucket] = max(agg_peak[bucket], temp);
+ }
+
+ temp = agg_avg[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x[bucket] = temp;
+
+ temp = agg_peak[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y[bucket] = temp;
+ }
+
+ if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
+ bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
+ }
+}
+
+static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
+ u32 addr, bool commit)
+{
+ bool valid = true;
+
+ if (!cmd)
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (vote_x == 0 && vote_y == 0)
+ valid = false;
+
+ if (vote_x > BCM_TCS_CMD_VOTE_MASK)
+ vote_x = BCM_TCS_CMD_VOTE_MASK;
+
+ if (vote_y > BCM_TCS_CMD_VOTE_MASK)
+ vote_y = BCM_TCS_CMD_VOTE_MASK;
+
+ cmd->addr = addr;
+ cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
+
+ /*
+ * Set the wait for completion flag on command that need to be completed
+ * before the next command.
+ */
+ cmd->wait = commit;
+}
+
+static void tcs_list_gen(struct list_head *bcm_list, int bucket,
+ struct tcs_cmd tcs_list[MAX_BCMS],
+ int n[MAX_VCD + 1])
+{
+ struct qcom_icc_bcm *bcm;
+ bool commit;
+ size_t idx = 0, batch = 0, cur_vcd_size = 0;
+
+ memset(n, 0, sizeof(int) * (MAX_VCD + 1));
+
+ list_for_each_entry(bcm, bcm_list, list) {
+ commit = false;
+ cur_vcd_size++;
+ if ((list_is_last(&bcm->list, bcm_list)) ||
+ bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
+ commit = true;
+ cur_vcd_size = 0;
+ }
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
+ bcm->vote_y[bucket], bcm->addr, commit);
+ idx++;
+ n[batch]++;
+ /*
+ * Batch the BCMs in such a way that we do not split them in
+ * multiple payloads when they are under the same VCD. This is
+ * to ensure that every BCM is committed since we only set the
+ * commit bit on the last BCM request of every VCD.
+ */
+ if (n[batch] >= MAX_RPMH_PAYLOAD) {
+ if (!commit) {
+ n[batch] -= cur_vcd_size;
+ n[batch + 1] = cur_vcd_size;
+ }
+ batch++;
+ }
+ }
+}
+
+/**
+ * of_bcm_voter_get - gets a bcm voter handle from DT node
+ * @dev: device pointer for the consumer device
+ * @name: name for the bcm voter device
+ *
+ * This function will match a device_node pointer for the phandle
+ * specified in the device DT and return a bcm_voter handle on success.
+ *
+ * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
+ * when matching bcm voter is yet to be found.
+ */
+struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
+{
+ struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
+ struct bcm_voter *temp;
+ struct device_node *np, *node;
+ int idx = 0;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ if (name) {
+ idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ node = of_parse_phandle(np, "qcom,bcm-voters", idx);
+
+ mutex_lock(&bcm_voter_lock);
+ list_for_each_entry(temp, &bcm_voters, voter_node) {
+ if (temp->np == node) {
+ voter = temp;
+ break;
+ }
+ }
+ mutex_unlock(&bcm_voter_lock);
+
+ return voter;
+}
+EXPORT_SYMBOL_GPL(of_bcm_voter_get);
+
+/**
+ * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
+ * @voter: voter that the bcms are being added to
+ * @bcm: bcm to add to the commit and wake sleep list
+ */
+void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
+{
+ if (!voter)
+ return;
+
+ mutex_lock(&voter->lock);
+ if (list_empty(&bcm->list))
+ list_add_tail(&bcm->list, &voter->commit_list);
+
+ if (list_empty(&bcm->ws_list))
+ list_add_tail(&bcm->ws_list, &voter->ws_list);
+
+ mutex_unlock(&voter->lock);
+}
+EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
+
+/**
+ * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
+ * @voter: voter that needs flushing
+ *
+ * This function generates a set of AMC commands and flushes to the BCM device
+ * associated with the voter. It conditionally generate WAKE and SLEEP commands
+ * based on deltas between WAKE/SLEEP requirements. The ws_list persists
+ * through multiple commit requests and bcm nodes are removed only when the
+ * requirements for WAKE matches SLEEP.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise.
+ */
+int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
+{
+ struct qcom_icc_bcm *bcm;
+ struct qcom_icc_bcm *bcm_tmp;
+ int commit_idx[MAX_VCD + 1];
+ struct tcs_cmd cmds[MAX_BCMS];
+ int ret = 0;
+
+ if (!voter)
+ return 0;
+
+ mutex_lock(&voter->lock);
+ list_for_each_entry(bcm, &voter->commit_list, list)
+ bcm_aggregate(bcm);
+
+ /*
+ * Pre sort the BCMs based on VCD for ease of generating a command list
+ * that groups the BCMs with the same VCD together. VCDs are numbered
+ * with lowest being the most expensive time wise, ensuring that
+ * those commands are being sent the earliest in the queue. This needs
+ * to be sorted every commit since we can't guarantee the order in which
+ * the BCMs are added to the list.
+ */
+ list_sort(NULL, &voter->commit_list, cmp_vcd);
+
+ /*
+ * Construct the command list based on a pre ordered list of BCMs
+ * based on VCD.
+ */
+ tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
+
+ if (!commit_idx[0])
+ goto out;
+
+ ret = rpmh_invalidate(voter->dev);
+ if (ret) {
+ pr_err("Error invalidating RPMH client (%d)\n", ret);
+ goto out;
+ }
+
+ ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
+ cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending AMC RPMH requests (%d)\n", ret);
+ goto out;
+ }
+
+ list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
+ list_del_init(&bcm->list);
+
+ list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
+ /*
+ * Only generate WAKE and SLEEP commands if a resource's
+ * requirements change as the execution environment transitions
+ * between different power states.
+ */
+ if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
+ bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
+ bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
+ bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
+ list_add_tail(&bcm->list, &voter->commit_list);
+ else
+ list_del_init(&bcm->ws_list);
+ }
+
+ if (list_empty(&voter->commit_list))
+ goto out;
+
+ list_sort(NULL, &voter->commit_list, cmp_vcd);
+
+ tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
+
+ ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
+ goto out;
+ }
+
+ tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
+
+ ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
+ goto out;
+ }
+
+out:
+ list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
+ list_del_init(&bcm->list);
+
+ mutex_unlock(&voter->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
+
+static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
+{
+ struct bcm_voter *voter;
+
+ voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
+ if (!voter)
+ return -ENOMEM;
+
+ voter->dev = &pdev->dev;
+ voter->np = pdev->dev.of_node;
+ mutex_init(&voter->lock);
+ INIT_LIST_HEAD(&voter->commit_list);
+ INIT_LIST_HEAD(&voter->ws_list);
+
+ mutex_lock(&bcm_voter_lock);
+ list_add_tail(&voter->voter_node, &bcm_voters);
+ mutex_unlock(&bcm_voter_lock);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_voter_of_match[] = {
+ { .compatible = "qcom,bcm-voter" },
+ { }
+};
+
+static struct platform_driver qcom_icc_bcm_voter_driver = {
+ .probe = qcom_icc_bcm_voter_probe,
+ .driver = {
+ .name = "bcm_voter",
+ .of_match_table = bcm_voter_of_match,
+ },
+};
+module_platform_driver(qcom_icc_bcm_voter_driver);
+
+MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/bcm-voter.h b/drivers/interconnect/qcom/bcm-voter.h
new file mode 100644
index 000000000000..0f64c0bab2c0
--- /dev/null
+++ b/drivers/interconnect/qcom/bcm-voter.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_BCM_VOTER_H__
+#define __DRIVERS_INTERCONNECT_QCOM_BCM_VOTER_H__
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#include "icc-rpmh.h"
+
+#define DEFINE_QBCM(_name, _bcmname, _keepalive, ...) \
+static struct qcom_icc_bcm _name = { \
+ .name = _bcmname, \
+ .keepalive = _keepalive, \
+ .num_nodes = ARRAY_SIZE(((struct qcom_icc_node *[]){ __VA_ARGS__ })), \
+ .nodes = { __VA_ARGS__ }, \
+}
+
+struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name);
+void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm);
+int qcom_icc_bcm_voter_commit(struct bcm_voter *voter);
+
+#endif
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
new file mode 100644
index 000000000000..3ac5182c9ab2
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+/**
+ * qcom_icc_pre_aggregate - cleans up stale values from prior icc_set
+ * @node: icc node to operate on
+ */
+void qcom_icc_pre_aggregate(struct icc_node *node)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ qn->sum_avg[i] = 0;
+ qn->max_peak[i] = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
+
+/**
+ * qcom_icc_aggregate - aggregate bw for buckets indicated by tag
+ * @node: node to aggregate
+ * @tag: tag to indicate which buckets to aggregate
+ * @avg_bw: new bw to sum aggregate
+ * @peak_bw: new bw to max aggregate
+ * @agg_avg: existing aggregate avg bw val
+ * @agg_peak: existing aggregate peak bw val
+ */
+int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
+
+ qn = node->data;
+ qp = to_qcom_provider(node->provider);
+
+ if (!tag)
+ tag = QCOM_ICC_TAG_ALWAYS;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ if (tag & BIT(i)) {
+ qn->sum_avg[i] += avg_bw;
+ qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
+ }
+ }
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
+
+/**
+ * qcom_icc_set - set the constraints based on path
+ * @src: source node for the path to set constraints on
+ * @dst: destination node for the path to set constraints on
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+
+ if (!src)
+ node = dst;
+ else
+ node = src;
+
+ qp = to_qcom_provider(node->provider);
+
+ qcom_icc_bcm_voter_commit(qp->voter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_set);
+
+/**
+ * qcom_icc_bcm_init - populates bcm aux data and connect qnodes
+ * @bcm: bcm to be initialized
+ * @dev: associated provider device
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
+{
+ struct qcom_icc_node *qn;
+ const struct bcm_db *data;
+ size_t data_count;
+ int i;
+
+ /* BCM is already initialised*/
+ if (bcm->addr)
+ return 0;
+
+ bcm->addr = cmd_db_read_addr(bcm->name);
+ if (!bcm->addr) {
+ dev_err(dev, "%s could not find RPMh address\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ data = cmd_db_read_aux_data(bcm->name, &data_count);
+ if (IS_ERR(data)) {
+ dev_err(dev, "%s command db read error (%ld)\n",
+ bcm->name, PTR_ERR(data));
+ return PTR_ERR(data);
+ }
+ if (!data_count) {
+ dev_err(dev, "%s command db missing or partial aux data\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm->aux_data.unit = le32_to_cpu(data->unit);
+ bcm->aux_data.width = le16_to_cpu(data->width);
+ bcm->aux_data.vcd = data->vcd;
+ bcm->aux_data.reserved = data->reserved;
+ INIT_LIST_HEAD(&bcm->list);
+ INIT_LIST_HEAD(&bcm->ws_list);
+
+ /* Link Qnodes to their respective BCMs */
+ for (i = 0; i < bcm->num_nodes; i++) {
+ qn = bcm->nodes[i];
+ qn->bcms[qn->num_bcms] = bcm;
+ qn->num_bcms++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_bcm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
new file mode 100644
index 000000000000..903d25e61984
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_RPMH_H__
+#define __DRIVERS_INTERCONNECT_QCOM_ICC_RPMH_H__
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @dev: reference to the NoC device
+ * @bcms: list of bcms that maps to the provider
+ * @num_bcms: number of @bcms
+ * @voter: bcm voter targeted by this provider
+ */
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct device *dev;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+ struct bcm_voter *voter;
+};
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+#define MAX_LINKS 128
+#define MAX_BCMS 64
+#define MAX_BCM_PER_NODE 3
+#define MAX_VCD 10
+
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC 0
+#define QCOM_ICC_BUCKET_WAKE 1
+#define QCOM_ICC_BUCKET_SLEEP 2
+#define QCOM_ICC_NUM_BUCKETS 3
+#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+ QCOM_ICC_TAG_SLEEP)
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @channels: num of channels at this node
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw requests
+ * @max_peak: current max aggregate value of all peak bw requests
+ * @bcms: list of bcms associated with this logical node
+ * @num_bcms: num of @bcms
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 channels;
+ u16 buswidth;
+ u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
+ u64 max_peak[QCOM_ICC_NUM_BUCKETS];
+ struct qcom_icc_bcm *bcms[MAX_BCM_PER_NODE];
+ size_t num_bcms;
+};
+
+/**
+ * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
+ * known as Bus Clock Manager (BCM)
+ * @name: the bcm node name used to fetch BCM data from command db
+ * @type: latency or bandwidth bcm
+ * @addr: address offsets used when voting to RPMH
+ * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
+ * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+ * @dirty: flag used to indicate whether the bcm needs to be committed
+ * @keepalive: flag used to indicate whether a keepalive is required
+ * @aux_data: auxiliary data used when calculating threshold values and
+ * communicating with RPMh
+ * @list: used to link to other bcms when compiling lists for commit
+ * @ws_list: used to keep track of bcms that may transition between wake/sleep
+ * @num_nodes: total number of @num_nodes
+ * @nodes: list of qcom_icc_nodes that this BCM encapsulates
+ */
+struct qcom_icc_bcm {
+ const char *name;
+ u32 type;
+ u32 addr;
+ u64 vote_x[QCOM_ICC_NUM_BUCKETS];
+ u64 vote_y[QCOM_ICC_NUM_BUCKETS];
+ bool dirty;
+ bool keepalive;
+ struct bcm_db aux_data;
+ struct list_head list;
+ struct list_head ws_list;
+ size_t num_nodes;
+ struct qcom_icc_node *nodes[];
+};
+
+struct qcom_icc_fabric {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+#define DEFINE_QNODE(_name, _id, _channels, _buswidth, ...) \
+ static struct qcom_icc_node _name = { \
+ .id = _id, \
+ .name = #_name, \
+ .channels = _channels, \
+ .buswidth = _buswidth, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+int qcom_icc_set(struct icc_node *src, struct icc_node *dst);
+int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev);
+void qcom_icc_pre_aggregate(struct icc_node *node);
+
+#endif
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
new file mode 100644
index 000000000000..96fb9ff5ff2e
--- /dev/null
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interconnect/qcom,osm-l3.h>
+
+#include "sc7180.h"
+#include "sdm845.h"
+
+#define LUT_MAX_ENTRIES 40U
+#define LUT_SRC GENMASK(31, 30)
+#define LUT_L_VAL GENMASK(7, 0)
+#define LUT_ROW_SIZE 32
+#define CLK_HW_DIV 2
+
+/* Register offsets */
+#define REG_ENABLE 0x0
+#define REG_FREQ_LUT 0x110
+#define REG_PERF_STATE 0x920
+
+#define OSM_L3_MAX_LINKS 1
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_osm_l3_icc_provider, provider)
+
+struct qcom_osm_l3_icc_provider {
+ void __iomem *base;
+ unsigned int max_state;
+ unsigned long lut_tables[LUT_MAX_ENTRIES];
+ struct icc_provider provider;
+};
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[OSM_L3_MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 buswidth;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, ...) \
+ static struct qcom_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3);
+DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16);
+
+static struct qcom_icc_node *sdm845_osm_l3_nodes[] = {
+ [MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3,
+ [SLAVE_OSM_L3] = &sdm845_osm_l3,
+};
+
+static const struct qcom_icc_desc sdm845_icc_osm_l3 = {
+ .nodes = sdm845_osm_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sdm845_osm_l3_nodes),
+};
+
+DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3);
+DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16);
+
+static struct qcom_icc_node *sc7180_osm_l3_nodes[] = {
+ [MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3,
+ [SLAVE_OSM_L3] = &sc7180_osm_l3,
+};
+
+static const struct qcom_icc_desc sc7180_icc_osm_l3 = {
+ .nodes = sc7180_osm_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sc7180_osm_l3_nodes),
+};
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_osm_l3_icc_provider *qp;
+ struct icc_provider *provider;
+ struct qcom_icc_node *qn;
+ struct icc_node *n;
+ unsigned int index;
+ u32 agg_peak = 0;
+ u32 agg_avg = 0;
+ u64 rate;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ rate = max(agg_avg, agg_peak);
+ rate = icc_units_to_bps(rate);
+ do_div(rate, qn->buswidth);
+
+ for (index = 0; index < qp->max_state - 1; index++) {
+ if (qp->lut_tables[index] >= rate)
+ break;
+ }
+
+ writel_relaxed(index, qp->base + REG_PERF_STATE);
+
+ return 0;
+}
+
+static int qcom_osm_l3_remove(struct platform_device *pdev)
+{
+ struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static int qcom_osm_l3_probe(struct platform_device *pdev)
+{
+ u32 info, src, lval, i, prev_freq = 0, freq;
+ static unsigned long hw_rate, xo_rate;
+ struct qcom_osm_l3_icc_provider *qp;
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct icc_node *node;
+ size_t num_nodes;
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(&pdev->dev, "xo");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ xo_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ clk = clk_get(&pdev->dev, "alternate");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
+ clk_put(clk);
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ qp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qp->base))
+ return PTR_ERR(qp->base);
+
+ /* HW should be in enabled state to proceed */
+ if (!(readl_relaxed(qp->base + REG_ENABLE) & 0x1)) {
+ dev_err(&pdev->dev, "error hardware not enabled\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ info = readl_relaxed(qp->base + REG_FREQ_LUT +
+ i * LUT_ROW_SIZE);
+ src = FIELD_GET(LUT_SRC, info);
+ lval = FIELD_GET(LUT_L_VAL, info);
+ if (src)
+ freq = xo_rate * lval;
+ else
+ freq = hw_rate;
+
+ /* Two of the same frequencies signify end of table */
+ if (i > 0 && prev_freq == freq)
+ break;
+
+ dev_dbg(&pdev->dev, "index=%d freq=%d\n", i, freq);
+
+ qp->lut_tables[i] = freq;
+ prev_freq = freq;
+ }
+ qp->max_state = i;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+
+ return ret;
+}
+
+static const struct of_device_id osm_l3_of_match[] = {
+ { .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 },
+ { .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, osm_l3_of_match);
+
+static struct platform_driver osm_l3_driver = {
+ .probe = qcom_osm_l3_probe,
+ .remove = qcom_osm_l3_remove,
+ .driver = {
+ .name = "osm-l3",
+ .of_match_table = osm_l3_of_match,
+ },
+};
+module_platform_driver(osm_l3_driver);
+
+MODULE_DESCRIPTION("Qualcomm OSM L3 interconnect driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
new file mode 100644
index 000000000000..dcf493d07928
--- /dev/null
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sc7180.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sc7180.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SC7180_MASTER_A1NOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qspi, SC7180_MASTER_QSPI, 1, 4, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qup_0, SC7180_MASTER_QUP_0, 1, 4, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, SC7180_MASTER_SDCC_2, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_emmc, SC7180_MASTER_EMMC, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, SC7180_MASTER_UFS_MEM, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, SC7180_MASTER_A2NOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SC7180_MASTER_QDSS_BAM, 1, 4, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup_1, SC7180_MASTER_QUP_1, 1, 4, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, SC7180_MASTER_CRYPTO, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, SC7180_MASTER_IPA, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SC7180_MASTER_QDSS_ETR, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_usb3, SC7180_MASTER_USB3, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SC7180_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SC7180_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, SC7180_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qnm_npu, SC7180_MASTER_NPU, 2, 32, SC7180_SLAVE_CDSP_GEM_NOC);
+DEFINE_QNODE(qxm_npu_dsp, SC7180_MASTER_NPU_PROC, 1, 8, SC7180_SLAVE_CDSP_GEM_NOC);
+DEFINE_QNODE(qnm_snoc, SC7180_MASTER_SNOC_CNOC, 1, 8, SC7180_SLAVE_A1NOC_CFG, SC7180_SLAVE_A2NOC_CFG, SC7180_SLAVE_AHB2PHY_SOUTH, SC7180_SLAVE_AHB2PHY_CENTER, SC7180_SLAVE_AOP, SC7180_SLAVE_AOSS, SC7180_SLAVE_BOOT_ROM, SC7180_SLAVE_CAMERA_CFG, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, SC7180_SLAVE_CLK_CTL, SC7180_SLAVE_RBCPR_CX_CFG, SC7180_SLAVE_RBCPR_MX_CFG, SC7180_SLAVE_CRYPTO_0_CFG, SC7180_SLAVE_DCC_CFG, SC7180_SLAVE_CNOC_DDRSS, SC7180_SLAVE_DISPLAY_CFG, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, SC7180_SLAVE_EMMC_CFG, SC7180_SLAVE_GLM,
+ SC7180_SLAVE_GFX3D_CFG, SC7180_SLAVE_IMEM_CFG, SC7180_SLAVE_IPA_CFG, SC7180_SLAVE_CNOC_MNOC_CFG, SC7180_SLAVE_CNOC_MSS, SC7180_SLAVE_NPU_CFG, SC7180_SLAVE_NPU_DMA_BWMON_CFG, SC7180_SLAVE_NPU_PROC_BWMON_CFG, SC7180_SLAVE_PDM, SC7180_SLAVE_PIMEM_CFG, SC7180_SLAVE_PRNG, SC7180_SLAVE_QDSS_CFG, SC7180_SLAVE_QM_CFG, SC7180_SLAVE_QM_MPU_CFG, SC7180_SLAVE_QSPI_0, SC7180_SLAVE_QUP_0, SC7180_SLAVE_QUP_1, SC7180_SLAVE_SDCC_2, SC7180_SLAVE_SECURITY, SC7180_SLAVE_SNOC_CFG, SC7180_SLAVE_TCSR, SC7180_SLAVE_TLMM_WEST, SC7180_SLAVE_TLMM_NORTH, SC7180_SLAVE_TLMM_SOUTH, SC7180_SLAVE_UFS_MEM_CFG, SC7180_SLAVE_USB3, SC7180_SLAVE_VENUS_CFG, SC7180_SLAVE_VENUS_THROTTLE_CFG, SC7180_SLAVE_VSENSE_CTRL_CFG, SC7180_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, SC7180_MASTER_QDSS_DAP, 1, 8, SC7180_SLAVE_A1NOC_CFG, SC7180_SLAVE_A2NOC_CFG, SC7180_SLAVE_AHB2PHY_SOUTH, SC7180_SLAVE_AHB2PHY_CENTER, SC7180_SLAVE_AOP, SC7180_SLAVE_AOSS, SC7180_SLAVE_BOOT_ROM, SC7180_SLAVE_CAMERA_CFG, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, SC7180_SLAVE_CLK_CTL, SC7180_SLAVE_RBCPR_CX_CFG, SC7180_SLAVE_RBCPR_MX_CFG, SC7180_SLAVE_CRYPTO_0_CFG, SC7180_SLAVE_DCC_CFG, SC7180_SLAVE_CNOC_DDRSS, SC7180_SLAVE_DISPLAY_CFG, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, SC7180_SLAVE_EMMC_CFG, SC7180_SLAVE_GLM, SC7180_SLAVE_GFX3D_CFG, SC7180_SLAVE_IMEM_CFG, SC7180_SLAVE_IPA_CFG, SC7180_SLAVE_CNOC_MNOC_CFG, SC7180_SLAVE_CNOC_MSS, SC7180_SLAVE_NPU_CFG, SC7180_SLAVE_NPU_DMA_BWMON_CFG,
+SC7180_SLAVE_NPU_PROC_BWMON_CFG, SC7180_SLAVE_PDM, SC7180_SLAVE_PIMEM_CFG, SC7180_SLAVE_PRNG, SC7180_SLAVE_QDSS_CFG, SC7180_SLAVE_QM_CFG, SC7180_SLAVE_QM_MPU_CFG, SC7180_SLAVE_QSPI_0, SC7180_SLAVE_QUP_0, SC7180_SLAVE_QUP_1, SC7180_SLAVE_SDCC_2, SC7180_SLAVE_SECURITY, SC7180_SLAVE_SNOC_CFG, SC7180_SLAVE_TCSR, SC7180_SLAVE_TLMM_WEST, SC7180_SLAVE_TLMM_NORTH, SC7180_SLAVE_TLMM_SOUTH, SC7180_SLAVE_UFS_MEM_CFG, SC7180_SLAVE_USB3, SC7180_SLAVE_VENUS_CFG, SC7180_SLAVE_VENUS_THROTTLE_CFG, SC7180_SLAVE_VSENSE_CTRL_CFG, SC7180_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc_dc_noc, SC7180_MASTER_CNOC_DC_NOC, 1, 4, SC7180_SLAVE_GEM_NOC_CFG, SC7180_SLAVE_LLCC_CFG);
+DEFINE_QNODE(acm_apps0, SC7180_MASTER_APPSS_PROC, 1, 16, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(acm_sys_tcu, SC7180_MASTER_SYS_TCU, 1, 8, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qhm_gemnoc_cfg, SC7180_MASTER_GEM_NOC_CFG, 1, 4, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, SC7180_SLAVE_SERVICE_GEM_NOC);
+DEFINE_QNODE(qnm_cmpnoc, SC7180_MASTER_COMPUTE_NOC, 1, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, SC7180_MASTER_MNOC_HF_MEM_NOC, 1, 32, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE);
+DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
+DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, SC7180_MASTER_CAMNOC_HF1, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, SC7180_MASTER_CAMNOC_SF, 1, 32, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SC7180_MASTER_MDP0, 1, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SC7180_MASTER_ROTATOR, 1, 16, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, SC7180_MASTER_VIDEO_P0, 1, 32, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, SC7180_MASTER_VIDEO_PROC, 1, 8, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(amm_npu_sys, SC7180_MASTER_NPU_SYS, 2, 32, SC7180_SLAVE_NPU_COMPUTE_NOC);
+DEFINE_QNODE(qhm_npu_cfg, SC7180_MASTER_NPU_NOC_CFG, 1, 4, SC7180_SLAVE_NPU_CAL_DP0, SC7180_SLAVE_NPU_CP, SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG, SC7180_SLAVE_NPU_DPM, SC7180_SLAVE_ISENSE_CFG, SC7180_SLAVE_NPU_LLM_CFG, SC7180_SLAVE_NPU_TCM, SC7180_SLAVE_SERVICE_NPU_NOC);
+DEFINE_QNODE(qup_core_master_1, SC7180_MASTER_QUP_CORE_0, 1, 4, SC7180_SLAVE_QUP_CORE_0);
+DEFINE_QNODE(qup_core_master_2, SC7180_MASTER_QUP_CORE_1, 1, 4, SC7180_SLAVE_QUP_CORE_1);
+DEFINE_QNODE(qhm_snoc_cfg, SC7180_MASTER_SNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SC7180_MASTER_A1NOC_SNOC, 1, 16, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_SNOC_GEM_NOC_SF, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, SC7180_MASTER_A2NOC_SNOC, 1, 16, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_SNOC_GEM_NOC_SF, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM, SC7180_SLAVE_TCU);
+DEFINE_QNODE(qnm_gemnoc, SC7180_MASTER_GEM_NOC_SNOC, 1, 8, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM, SC7180_SLAVE_TCU);
+DEFINE_QNODE(qxm_pimem, SC7180_MASTER_PIMEM, 1, 8, SC7180_SLAVE_SNOC_GEM_NOC_GC, SC7180_SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SC7180_SLAVE_A1NOC_SNOC, 1, 16, SC7180_MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SC7180_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SC7180_SLAVE_A2NOC_SNOC, 1, 16, SC7180_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SC7180_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_camnoc_uncomp, SC7180_SLAVE_CAMNOC_UNCOMP, 1, 32);
+DEFINE_QNODE(qns_cdsp_gemnoc, SC7180_SLAVE_CDSP_GEM_NOC, 1, 32, SC7180_MASTER_COMPUTE_NOC);
+DEFINE_QNODE(qhs_a1_noc_cfg, SC7180_SLAVE_A1NOC_CFG, 1, 4, SC7180_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SC7180_SLAVE_A2NOC_CFG, 1, 4, SC7180_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_ahb2phy0, SC7180_SLAVE_AHB2PHY_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy2, SC7180_SLAVE_AHB2PHY_CENTER, 1, 4);
+DEFINE_QNODE(qhs_aop, SC7180_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SC7180_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_boot_rom, SC7180_SLAVE_BOOT_ROM, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SC7180_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_camera_nrt_throttle_cfg, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_camera_rt_throttle_cfg, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SC7180_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SC7180_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mx, SC7180_SLAVE_RBCPR_MX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SC7180_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SC7180_SLAVE_DCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SC7180_SLAVE_CNOC_DDRSS, 1, 4, SC7180_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_display_cfg, SC7180_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_display_rt_throttle_cfg, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_display_throttle_cfg, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_emmc_cfg, SC7180_SLAVE_EMMC_CFG, 1, 4);
+DEFINE_QNODE(qhs_glm, SC7180_SLAVE_GLM, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SC7180_SLAVE_GFX3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SC7180_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SC7180_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SC7180_SLAVE_CNOC_MNOC_CFG, 1, 4, SC7180_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_mss_cfg, SC7180_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_npu_cfg, SC7180_SLAVE_NPU_CFG, 1, 4, SC7180_MASTER_NPU_NOC_CFG);
+DEFINE_QNODE(qhs_npu_dma_throttle_cfg, SC7180_SLAVE_NPU_DMA_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_npu_dsp_throttle_cfg, SC7180_SLAVE_NPU_PROC_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_pdm, SC7180_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SC7180_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SC7180_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SC7180_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qm_cfg, SC7180_SLAVE_QM_CFG, 1, 4);
+DEFINE_QNODE(qhs_qm_mpu_cfg, SC7180_SLAVE_QM_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_qspi, SC7180_SLAVE_QSPI_0, 1, 4);
+DEFINE_QNODE(qhs_qup0, SC7180_SLAVE_QUP_0, 1, 4);
+DEFINE_QNODE(qhs_qup1, SC7180_SLAVE_QUP_1, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SC7180_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_security, SC7180_SLAVE_SECURITY, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SC7180_SLAVE_SNOC_CFG, 1, 4, SC7180_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_tcsr, SC7180_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm_1, SC7180_SLAVE_TLMM_WEST, 1, 4);
+DEFINE_QNODE(qhs_tlmm_2, SC7180_SLAVE_TLMM_NORTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm_3, SC7180_SLAVE_TLMM_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SC7180_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3, SC7180_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SC7180_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_venus_throttle_cfg, SC7180_SLAVE_VENUS_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SC7180_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(srvc_cnoc, SC7180_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_gemnoc, SC7180_SLAVE_GEM_NOC_CFG, 1, 4, SC7180_MASTER_GEM_NOC_CFG);
+DEFINE_QNODE(qhs_llcc, SC7180_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
+DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
+DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
+DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8);
+DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
+DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SC7180_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_cal_dp0, SC7180_SLAVE_NPU_CAL_DP0, 1, 4);
+DEFINE_QNODE(qhs_cp, SC7180_SLAVE_NPU_CP, 1, 4);
+DEFINE_QNODE(qhs_dma_bwmon, SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_dpm, SC7180_SLAVE_NPU_DPM, 1, 4);
+DEFINE_QNODE(qhs_isense, SC7180_SLAVE_ISENSE_CFG, 1, 4);
+DEFINE_QNODE(qhs_llm, SC7180_SLAVE_NPU_LLM_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcm, SC7180_SLAVE_NPU_TCM, 1, 4);
+DEFINE_QNODE(qns_npu_sys, SC7180_SLAVE_NPU_COMPUTE_NOC, 2, 32);
+DEFINE_QNODE(srvc_noc, SC7180_SLAVE_SERVICE_NPU_NOC, 1, 4);
+DEFINE_QNODE(qup_core_slave_1, SC7180_SLAVE_QUP_CORE_0, 1, 4);
+DEFINE_QNODE(qup_core_slave_2, SC7180_SLAVE_QUP_CORE_1, 1, 4);
+DEFINE_QNODE(qhs_apss, SC7180_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SC7180_SLAVE_SNOC_CNOC, 1, 8, SC7180_MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_gemnoc_gc, SC7180_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC7180_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_gemnoc_sf, SC7180_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SC7180_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SC7180_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SC7180_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SC7180_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_qdss_stm, SC7180_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SC7180_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup_core_master_1, &qup_core_master_2);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps0);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_cn1, "CN1", false, &qhm_qspi, &xm_sdc2, &xm_emmc, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qxm_pimem, &qns_gemnoc_gc);
+DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_QSPI] = &qhm_qspi,
+ [MASTER_QUP_0] = &qhm_qup_0,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_EMMC] = &xm_emmc,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sc7180_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_1] = &qhm_qup_1,
+ [MASTER_USB3] = &qhm_usb3,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sc7180_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node *camnoc_virt_nodes[] = {
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+static struct qcom_icc_desc sc7180_camnoc_virt = {
+ .nodes = camnoc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
+ .bcms = camnoc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *compute_noc_bcms[] = {
+ &bcm_co0,
+ &bcm_co2,
+ &bcm_co3,
+};
+
+static struct qcom_icc_node *compute_noc_nodes[] = {
+ [MASTER_NPU] = &qnm_npu,
+ [MASTER_NPU_PROC] = &qxm_npu_dsp,
+ [SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
+};
+
+static struct qcom_icc_desc sc7180_compute_noc = {
+ .nodes = compute_noc_nodes,
+ .num_nodes = ARRAY_SIZE(compute_noc_nodes),
+ .bcms = compute_noc_bcms,
+ .num_bcms = ARRAY_SIZE(compute_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_CENTER] = &qhs_ahb2phy2,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_BOOT_ROM] = &qhs_boot_rom,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_DISPLAY_RT_THROTTLE_CFG] = &qhs_display_rt_throttle_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
+ [SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_NPU_CFG] = &qhs_npu_cfg,
+ [SLAVE_NPU_DMA_BWMON_CFG] = &qhs_npu_dma_throttle_cfg,
+ [SLAVE_NPU_PROC_BWMON_CFG] = &qhs_npu_dsp_throttle_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QM_CFG] = &qhs_qm_cfg,
+ [SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_WEST] = &qhs_tlmm_1,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_2,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_3,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static struct qcom_icc_desc sc7180_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
+ [SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+};
+
+static struct qcom_icc_desc sc7180_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh4,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_apps0,
+ [MASTER_SYS_TCU] = &acm_sys_tcu,
+ [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
+};
+
+static struct qcom_icc_desc sc7180_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+ &bcm_ip0,
+};
+
+static struct qcom_icc_node *ipa_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+};
+
+static struct qcom_icc_desc sc7180_ipa_virt = {
+ .nodes = ipa_virt_nodes,
+ .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+ .bcms = ipa_virt_bcms,
+ .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static struct qcom_icc_desc sc7180_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static struct qcom_icc_desc sc7180_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_node *npu_noc_nodes[] = {
+ [MASTER_NPU_SYS] = &amm_npu_sys,
+ [MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
+ [SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
+ [SLAVE_NPU_CP] = &qhs_cp,
+ [SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
+ [SLAVE_NPU_DPM] = &qhs_dpm,
+ [SLAVE_ISENSE_CFG] = &qhs_isense,
+ [SLAVE_NPU_LLM_CFG] = &qhs_llm,
+ [SLAVE_NPU_TCM] = &qhs_tcm,
+ [SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
+ [SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
+};
+
+static struct qcom_icc_desc sc7180_npu_noc = {
+ .nodes = npu_noc_nodes,
+ .num_nodes = ARRAY_SIZE(npu_noc_nodes),
+};
+
+static struct qcom_icc_bcm *qup_virt_bcms[] = {
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node *qup_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup_core_master_1,
+ [MASTER_QUP_CORE_1] = &qup_core_master_2,
+ [SLAVE_QUP_CORE_0] = &qup_core_slave_1,
+ [SLAVE_QUP_CORE_1] = &qup_core_slave_2,
+};
+
+static struct qcom_icc_desc sc7180_qup_virt = {
+ .nodes = qup_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qup_virt_nodes),
+ .bcms = qup_virt_bcms,
+ .num_bcms = ARRAY_SIZE(qup_virt_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn7,
+ &bcm_sn9,
+ &bcm_sn12,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sc7180_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sc7180-aggre1-noc",
+ .data = &sc7180_aggre1_noc},
+ { .compatible = "qcom,sc7180-aggre2-noc",
+ .data = &sc7180_aggre2_noc},
+ { .compatible = "qcom,sc7180-camnoc-virt",
+ .data = &sc7180_camnoc_virt},
+ { .compatible = "qcom,sc7180-compute-noc",
+ .data = &sc7180_compute_noc},
+ { .compatible = "qcom,sc7180-config-noc",
+ .data = &sc7180_config_noc},
+ { .compatible = "qcom,sc7180-dc-noc",
+ .data = &sc7180_dc_noc},
+ { .compatible = "qcom,sc7180-gem-noc",
+ .data = &sc7180_gem_noc},
+ { .compatible = "qcom,sc7180-ipa-virt",
+ .data = &sc7180_ipa_virt},
+ { .compatible = "qcom,sc7180-mc-virt",
+ .data = &sc7180_mc_virt},
+ { .compatible = "qcom,sc7180-mmss-noc",
+ .data = &sc7180_mmss_noc},
+ { .compatible = "qcom,sc7180-npu-noc",
+ .data = &sc7180_npu_noc},
+ { .compatible = "qcom,sc7180-qup-virt",
+ .data = &sc7180_qup_virt},
+ { .compatible = "qcom,sc7180-system-noc",
+ .data = &sc7180_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sc7180",
+ .of_match_table = qnoc_of_match,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SC7180 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sc7180.h b/drivers/interconnect/qcom/sc7180.h
new file mode 100644
index 000000000000..c6212a10c2f6
--- /dev/null
+++ b/drivers/interconnect/qcom/sc7180.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm #define SC7180 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SC7180_H
+#define __DRIVERS_INTERCONNECT_QCOM_SC7180_H
+
+#define SC7180_MASTER_APPSS_PROC 0
+#define SC7180_MASTER_SYS_TCU 1
+#define SC7180_MASTER_NPU_SYS 2
+#define SC7180_MASTER_IPA_CORE 3
+#define SC7180_MASTER_LLCC 4
+#define SC7180_MASTER_A1NOC_CFG 5
+#define SC7180_MASTER_A2NOC_CFG 6
+#define SC7180_MASTER_CNOC_DC_NOC 7
+#define SC7180_MASTER_GEM_NOC_CFG 8
+#define SC7180_MASTER_CNOC_MNOC_CFG 9
+#define SC7180_MASTER_NPU_NOC_CFG 10
+#define SC7180_MASTER_QDSS_BAM 11
+#define SC7180_MASTER_QSPI 12
+#define SC7180_MASTER_QUP_0 13
+#define SC7180_MASTER_QUP_1 14
+#define SC7180_MASTER_SNOC_CFG 15
+#define SC7180_MASTER_A1NOC_SNOC 16
+#define SC7180_MASTER_A2NOC_SNOC 17
+#define SC7180_MASTER_COMPUTE_NOC 18
+#define SC7180_MASTER_GEM_NOC_SNOC 19
+#define SC7180_MASTER_MNOC_HF_MEM_NOC 20
+#define SC7180_MASTER_MNOC_SF_MEM_NOC 21
+#define SC7180_MASTER_NPU 22
+#define SC7180_MASTER_SNOC_CNOC 23
+#define SC7180_MASTER_SNOC_GC_MEM_NOC 24
+#define SC7180_MASTER_SNOC_SF_MEM_NOC 25
+#define SC7180_MASTER_QUP_CORE_0 26
+#define SC7180_MASTER_QUP_CORE_1 27
+#define SC7180_MASTER_CAMNOC_HF0 28
+#define SC7180_MASTER_CAMNOC_HF1 29
+#define SC7180_MASTER_CAMNOC_HF0_UNCOMP 30
+#define SC7180_MASTER_CAMNOC_HF1_UNCOMP 31
+#define SC7180_MASTER_CAMNOC_SF 32
+#define SC7180_MASTER_CAMNOC_SF_UNCOMP 33
+#define SC7180_MASTER_CRYPTO 34
+#define SC7180_MASTER_GFX3D 35
+#define SC7180_MASTER_IPA 36
+#define SC7180_MASTER_MDP0 37
+#define SC7180_MASTER_NPU_PROC 38
+#define SC7180_MASTER_PIMEM 39
+#define SC7180_MASTER_ROTATOR 40
+#define SC7180_MASTER_VIDEO_P0 41
+#define SC7180_MASTER_VIDEO_PROC 42
+#define SC7180_MASTER_QDSS_DAP 43
+#define SC7180_MASTER_QDSS_ETR 44
+#define SC7180_MASTER_SDCC_2 45
+#define SC7180_MASTER_UFS_MEM 46
+#define SC7180_MASTER_USB3 47
+#define SC7180_MASTER_EMMC 48
+#define SC7180_SLAVE_EBI1 49
+#define SC7180_SLAVE_IPA_CORE 50
+#define SC7180_SLAVE_A1NOC_CFG 51
+#define SC7180_SLAVE_A2NOC_CFG 52
+#define SC7180_SLAVE_AHB2PHY_SOUTH 53
+#define SC7180_SLAVE_AHB2PHY_CENTER 54
+#define SC7180_SLAVE_AOP 55
+#define SC7180_SLAVE_AOSS 56
+#define SC7180_SLAVE_APPSS 57
+#define SC7180_SLAVE_BOOT_ROM 58
+#define SC7180_SLAVE_NPU_CAL_DP0 59
+#define SC7180_SLAVE_CAMERA_CFG 60
+#define SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG 61
+#define SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG 62
+#define SC7180_SLAVE_CLK_CTL 63
+#define SC7180_SLAVE_NPU_CP 64
+#define SC7180_SLAVE_RBCPR_CX_CFG 65
+#define SC7180_SLAVE_RBCPR_MX_CFG 66
+#define SC7180_SLAVE_CRYPTO_0_CFG 67
+#define SC7180_SLAVE_DCC_CFG 68
+#define SC7180_SLAVE_CNOC_DDRSS 69
+#define SC7180_SLAVE_DISPLAY_CFG 70
+#define SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG 71
+#define SC7180_SLAVE_DISPLAY_THROTTLE_CFG 72
+#define SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG 73
+#define SC7180_SLAVE_NPU_DPM 74
+#define SC7180_SLAVE_EMMC_CFG 75
+#define SC7180_SLAVE_GEM_NOC_CFG 76
+#define SC7180_SLAVE_GLM 77
+#define SC7180_SLAVE_GFX3D_CFG 78
+#define SC7180_SLAVE_IMEM_CFG 79
+#define SC7180_SLAVE_IPA_CFG 80
+#define SC7180_SLAVE_ISENSE_CFG 81
+#define SC7180_SLAVE_LLCC_CFG 82
+#define SC7180_SLAVE_NPU_LLM_CFG 83
+#define SC7180_SLAVE_MSS_PROC_MS_MPU_CFG 84
+#define SC7180_SLAVE_CNOC_MNOC_CFG 85
+#define SC7180_SLAVE_CNOC_MSS 86
+#define SC7180_SLAVE_NPU_CFG 87
+#define SC7180_SLAVE_NPU_DMA_BWMON_CFG 88
+#define SC7180_SLAVE_NPU_PROC_BWMON_CFG 89
+#define SC7180_SLAVE_PDM 90
+#define SC7180_SLAVE_PIMEM_CFG 91
+#define SC7180_SLAVE_PRNG 92
+#define SC7180_SLAVE_QDSS_CFG 93
+#define SC7180_SLAVE_QM_CFG 94
+#define SC7180_SLAVE_QM_MPU_CFG 95
+#define SC7180_SLAVE_QSPI_0 96
+#define SC7180_SLAVE_QUP_0 97
+#define SC7180_SLAVE_QUP_1 98
+#define SC7180_SLAVE_SDCC_2 99
+#define SC7180_SLAVE_SECURITY 100
+#define SC7180_SLAVE_SNOC_CFG 101
+#define SC7180_SLAVE_NPU_TCM 102
+#define SC7180_SLAVE_TCSR 103
+#define SC7180_SLAVE_TLMM_WEST 104
+#define SC7180_SLAVE_TLMM_NORTH 105
+#define SC7180_SLAVE_TLMM_SOUTH 106
+#define SC7180_SLAVE_UFS_MEM_CFG 107
+#define SC7180_SLAVE_USB3 108
+#define SC7180_SLAVE_VENUS_CFG 109
+#define SC7180_SLAVE_VENUS_THROTTLE_CFG 110
+#define SC7180_SLAVE_VSENSE_CTRL_CFG 111
+#define SC7180_SLAVE_A1NOC_SNOC 112
+#define SC7180_SLAVE_A2NOC_SNOC 113
+#define SC7180_SLAVE_CAMNOC_UNCOMP 114
+#define SC7180_SLAVE_CDSP_GEM_NOC 115
+#define SC7180_SLAVE_SNOC_CNOC 116
+#define SC7180_SLAVE_GEM_NOC_SNOC 117
+#define SC7180_SLAVE_SNOC_GEM_NOC_GC 118
+#define SC7180_SLAVE_SNOC_GEM_NOC_SF 119
+#define SC7180_SLAVE_LLCC 120
+#define SC7180_SLAVE_MNOC_HF_MEM_NOC 121
+#define SC7180_SLAVE_MNOC_SF_MEM_NOC 122
+#define SC7180_SLAVE_NPU_COMPUTE_NOC 123
+#define SC7180_SLAVE_QUP_CORE_0 124
+#define SC7180_SLAVE_QUP_CORE_1 125
+#define SC7180_SLAVE_IMEM 126
+#define SC7180_SLAVE_PIMEM 127
+#define SC7180_SLAVE_SERVICE_A1NOC 128
+#define SC7180_SLAVE_SERVICE_A2NOC 129
+#define SC7180_SLAVE_SERVICE_CNOC 130
+#define SC7180_SLAVE_SERVICE_GEM_NOC 131
+#define SC7180_SLAVE_SERVICE_MNOC 132
+#define SC7180_SLAVE_SERVICE_NPU_NOC 133
+#define SC7180_SLAVE_SERVICE_SNOC 134
+#define SC7180_SLAVE_QDSS_STM 135
+#define SC7180_SLAVE_TCU 136
+#define SC7180_MASTER_OSM_L3_APPS 137
+#define SC7180_SLAVE_OSM_L3 138
+
+#endif
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index f078cf0fce56..f6c7b969520d 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -1,379 +1,245 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
-#include <asm/div64.h>
-#include <dt-bindings/interconnect/qcom,sdm845.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
-#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/sort.h>
-
-#include <soc/qcom/cmd-db.h>
-#include <soc/qcom/rpmh.h>
-#include <soc/qcom/tcs.h>
-
-#define to_qcom_provider(_provider) \
- container_of(_provider, struct qcom_icc_provider, provider)
-
-struct qcom_icc_provider {
- struct icc_provider provider;
- struct device *dev;
- struct qcom_icc_bcm **bcms;
- size_t num_bcms;
-};
-
-/**
- * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
- * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
- * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
- * @vcd: virtual clock domain that this bcm belongs to
- * @reserved: reserved field
- */
-struct bcm_db {
- __le32 unit;
- __le16 width;
- u8 vcd;
- u8 reserved;
-};
-#define SDM845_MAX_LINKS 43
-#define SDM845_MAX_BCMS 30
-#define SDM845_MAX_BCM_PER_NODE 2
-#define SDM845_MAX_VCD 10
+#include <dt-bindings/interconnect/qcom,sdm845.h>
-/*
- * The AMC bucket denotes constraints that are applied to hardware when
- * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
- * when the execution environment transitions between active and low power mode.
- */
-#define QCOM_ICC_BUCKET_AMC 0
-#define QCOM_ICC_BUCKET_WAKE 1
-#define QCOM_ICC_BUCKET_SLEEP 2
-#define QCOM_ICC_NUM_BUCKETS 3
-#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
-#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
-#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
-#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
-#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
- QCOM_ICC_TAG_SLEEP)
-
-/**
- * struct qcom_icc_node - Qualcomm specific interconnect nodes
- * @name: the node name used in debugfs
- * @links: an array of nodes where we can go next while traversing
- * @id: a unique node identifier
- * @num_links: the total number of @links
- * @channels: num of channels at this node
- * @buswidth: width of the interconnect between a node and the bus
- * @sum_avg: current sum aggregate value of all avg bw requests
- * @max_peak: current max aggregate value of all peak bw requests
- * @bcms: list of bcms associated with this logical node
- * @num_bcms: num of @bcms
- */
-struct qcom_icc_node {
- const char *name;
- u16 links[SDM845_MAX_LINKS];
- u16 id;
- u16 num_links;
- u16 channels;
- u16 buswidth;
- u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
- u64 max_peak[QCOM_ICC_NUM_BUCKETS];
- struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
- size_t num_bcms;
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdm845.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SDM845_MASTER_A1NOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup1, SDM845_MASTER_BLSP_1, 1, 4, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_tsif, SDM845_MASTER_TSIF, 1, 4, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, SDM845_MASTER_SDCC_2, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc4, SDM845_MASTER_SDCC_4, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, SDM845_MASTER_UFS_CARD, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, SDM845_MASTER_UFS_MEM, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_pcie_0, SDM845_MASTER_PCIE_0, 1, 8, SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, SDM845_MASTER_A2NOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SDM845_MASTER_QDSS_BAM, 1, 4, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, SDM845_MASTER_BLSP_2, 1, 4, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_cnoc, SDM845_MASTER_CNOC_A2NOC, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, SDM845_MASTER_CRYPTO, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, SDM845_MASTER_IPA, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_1, SDM845_MASTER_PCIE_1, 1, 8, SDM845_SLAVE_ANOC_PCIE_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDM845_MASTER_QDSS_ETR, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, SDM845_MASTER_USB3_0, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, SDM845_MASTER_USB3_1, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SDM845_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SDM845_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, SDM845_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qhm_spdm, SDM845_MASTER_SPDM, 1, 4, SDM845_SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qhm_tic, SDM845_MASTER_TIC, 1, 4, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_CNOC_A2NOC, SDM845_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qnm_snoc, SDM845_MASTER_SNOC_CNOC, 1, 8, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, SDM845_MASTER_QDSS_DAP, 1, 8, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_CNOC_A2NOC, SDM845_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc, SDM845_MASTER_CNOC_DC_NOC, 1, 4, SDM845_SLAVE_LLCC_CFG, SDM845_SLAVE_MEM_NOC_CFG);
+DEFINE_QNODE(acm_l3, SDM845_MASTER_APPSS_PROC, 1, 16, SDM845_SLAVE_GNOC_SNOC, SDM845_SLAVE_GNOC_MEM_NOC, SDM845_SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(pm_gnoc_cfg, SDM845_MASTER_GNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(llcc_mc, SDM845_MASTER_LLCC, 4, 4, SDM845_SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, SDM845_MASTER_TCU_0, 1, 8, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_memnoc_cfg, SDM845_MASTER_MEM_NOC_CFG, 1, 4, SDM845_SLAVE_MSS_PROC_MS_MPU_CFG, SDM845_SLAVE_SERVICE_MEM_NOC);
+DEFINE_QNODE(qnm_apps, SDM845_MASTER_GNOC_MEM_NOC, 2, 32, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, SDM845_MASTER_MNOC_HF_MEM_NOC, 2, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SDM845_MASTER_MNOC_SF_MEM_NOC, 1, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDM845_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SDM845_MASTER_SNOC_SF_MEM_NOC, 1, 16, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, SDM845_MASTER_GFX3D, 2, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_mnoc_cfg, SDM845_MASTER_CNOC_MNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, SDM845_MASTER_CAMNOC_HF0, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, SDM845_MASTER_CAMNOC_HF1, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, SDM845_MASTER_CAMNOC_SF, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SDM845_MASTER_MDP0, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, SDM845_MASTER_MDP1, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SDM845_MASTER_ROTATOR, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, SDM845_MASTER_VIDEO_P0, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, SDM845_MASTER_VIDEO_P1, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, SDM845_MASTER_VIDEO_PROC, 1, 8, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, SDM845_MASTER_SNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SDM845_MASTER_A1NOC_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, SDM845_MASTER_A2NOC_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_PCIE_0, SDM845_SLAVE_PCIE_1, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM, SDM845_SLAVE_TCU);
+DEFINE_QNODE(qnm_gladiator_sodv, SDM845_MASTER_GNOC_SNOC, 1, 8, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_IMEM, SDM845_SLAVE_PCIE_0, SDM845_SLAVE_PCIE_1, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM, SDM845_SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc, SDM845_MASTER_MEM_NOC_SNOC, 1, 8, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_IMEM, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_pcie_anoc, SDM845_MASTER_ANOC_PCIE_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, SDM845_MASTER_PIMEM, 1, 8, SDM845_SLAVE_SNOC_MEM_NOC_GC, SDM845_SLAVE_IMEM);
+DEFINE_QNODE(xm_gic, SDM845_MASTER_GIC, 1, 8, SDM845_SLAVE_SNOC_MEM_NOC_GC, SDM845_SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SDM845_SLAVE_A1NOC_SNOC, 1, 16, SDM845_MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SDM845_SLAVE_SERVICE_A1NOC, 1, 4, 0);
+DEFINE_QNODE(qns_pcie_a1noc_snoc, SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, SDM845_MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(qns_a2noc_snoc, SDM845_SLAVE_A2NOC_SNOC, 1, 16, SDM845_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_snoc, SDM845_SLAVE_ANOC_PCIE_SNOC, 1, 16, SDM845_MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SDM845_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_camnoc_uncomp, SDM845_SLAVE_CAMNOC_UNCOMP, 1, 32);
+DEFINE_QNODE(qhs_a1_noc_cfg, SDM845_SLAVE_A1NOC_CFG, 1, 4, SDM845_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SDM845_SLAVE_A2NOC_CFG, 1, 4, SDM845_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_aop, SDM845_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SDM845_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SDM845_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDM845_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_compute_dsp_cfg, SDM845_SLAVE_CDSP_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SDM845_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDM845_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SDM845_SLAVE_DCC_CFG, 1, 4, SDM845_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_ddrss_cfg, SDM845_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_display_cfg, SDM845_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_glm, SDM845_SLAVE_GLM, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SDM845_SLAVE_GFX3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SDM845_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDM845_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SDM845_SLAVE_CNOC_MNOC_CFG, 1, 4, SDM845_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_pcie0_cfg, SDM845_SLAVE_PCIE_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie_gen3_cfg, SDM845_SLAVE_PCIE_1_CFG, 1, 4);
+DEFINE_QNODE(qhs_pdm, SDM845_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_phy_refgen_south, SDM845_SLAVE_SOUTH_PHY_CFG, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SDM845_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SDM845_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDM845_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qupv3_north, SDM845_SLAVE_BLSP_2, 1, 4);
+DEFINE_QNODE(qhs_qupv3_south, SDM845_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SDM845_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_sdc4, SDM845_SLAVE_SDCC_4, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDM845_SLAVE_SNOC_CFG, 1, 4, SDM845_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SDM845_SLAVE_SPDM_WRAPPER, 1, 4);
+DEFINE_QNODE(qhs_spss_cfg, SDM845_SLAVE_SPSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDM845_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm_north, SDM845_SLAVE_TLMM_NORTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm_south, SDM845_SLAVE_TLMM_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_tsif, SDM845_SLAVE_TSIF, 1, 4);
+DEFINE_QNODE(qhs_ufs_card_cfg, SDM845_SLAVE_UFS_CARD_CFG, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SDM845_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SDM845_SLAVE_USB3_0, 1, 4);
+DEFINE_QNODE(qhs_usb3_1, SDM845_SLAVE_USB3_1, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SDM845_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SDM845_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(qns_cnoc_a2noc, SDM845_SLAVE_CNOC_A2NOC, 1, 8, SDM845_MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SDM845_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_llcc, SDM845_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_memnoc, SDM845_SLAVE_MEM_NOC_CFG, 1, 4, SDM845_MASTER_MEM_NOC_CFG);
+DEFINE_QNODE(qns_gladiator_sodv, SDM845_SLAVE_GNOC_SNOC, 1, 8, SDM845_MASTER_GNOC_SNOC);
+DEFINE_QNODE(qns_gnoc_memnoc, SDM845_SLAVE_GNOC_MEM_NOC, 2, 32, SDM845_MASTER_GNOC_MEM_NOC);
+DEFINE_QNODE(srvc_gnoc, SDM845_SLAVE_SERVICE_GNOC, 1, 4);
+DEFINE_QNODE(ebi, SDM845_SLAVE_EBI1, 4, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SDM845_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_apps_io, SDM845_SLAVE_MEM_NOC_GNOC, 1, 32);
+DEFINE_QNODE(qns_llcc, SDM845_SLAVE_LLCC, 4, 16, SDM845_MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SDM845_SLAVE_MEM_NOC_SNOC, 1, 8, SDM845_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(srvc_memnoc, SDM845_SLAVE_SERVICE_MEM_NOC, 1, 4);
+DEFINE_QNODE(qns2_mem_noc, SDM845_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SDM845_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SDM845_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SDM845_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SDM845_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_apss, SDM845_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SDM845_SLAVE_SNOC_CNOC, 1, 8, SDM845_MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_memnoc_gc, SDM845_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDM845_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_memnoc_sf, SDM845_SLAVE_SNOC_MEM_NOC_SF, 1, 16, SDM845_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDM845_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(qxs_pcie, SDM845_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(qxs_pcie_gen3, SDM845_SLAVE_PCIE_1, 1, 8);
+DEFINE_QNODE(qxs_pimem, SDM845_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDM845_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_qdss_stm, SDM845_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDM845_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", false, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qxs_pcie);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qxs_pcie_gen3);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &srvc_aggre1_noc, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, &srvc_aggre2_noc, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
+DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
+DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_sn9,
};
-/**
- * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
- * known as Bus Clock Manager (BCM)
- * @name: the bcm node name used to fetch BCM data from command db
- * @type: latency or bandwidth bcm
- * @addr: address offsets used when voting to RPMH
- * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
- * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
- * @dirty: flag used to indicate whether the bcm needs to be committed
- * @keepalive: flag used to indicate whether a keepalive is required
- * @aux_data: auxiliary data used when calculating threshold values and
- * communicating with RPMh
- * @list: used to link to other bcms when compiling lists for commit
- * @num_nodes: total number of @num_nodes
- * @nodes: list of qcom_icc_nodes that this BCM encapsulates
- */
-struct qcom_icc_bcm {
- const char *name;
- u32 type;
- u32 addr;
- u64 vote_x[QCOM_ICC_NUM_BUCKETS];
- u64 vote_y[QCOM_ICC_NUM_BUCKETS];
- bool dirty;
- bool keepalive;
- struct bcm_db aux_data;
- struct list_head list;
- size_t num_nodes;
- struct qcom_icc_node *nodes[];
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+ [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
};
-struct qcom_icc_fabric {
- struct qcom_icc_node **nodes;
- size_t num_nodes;
+static const struct qcom_icc_desc sdm845_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
- size_t num_nodes;
- struct qcom_icc_bcm **bcms;
- size_t num_bcms;
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_sn11,
+ &bcm_qup0,
};
-#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \
- _numlinks, ...) \
- static struct qcom_icc_node _name = { \
- .id = _id, \
- .name = #_name, \
- .channels = _channels, \
- .buswidth = _buswidth, \
- .num_links = _numlinks, \
- .links = { __VA_ARGS__ }, \
- }
-
-DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC);
-DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC);
-DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG);
-DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC);
-DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC);
-DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1);
-DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC);
-DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
-DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
-DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
-DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM);
-DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
-DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
-DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC);
-DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0);
-DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
-DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC);
-DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
-DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0);
-DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0);
-DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0);
-DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0);
-DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0);
-DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0);
-DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0);
-DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0);
-DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0);
-DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0);
-DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0);
-DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0);
-DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0);
-DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0);
-DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0);
-DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0);
-DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0);
-DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0);
-DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0);
-DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0);
-DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0);
-DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0);
-DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC);
-DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0);
-DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG);
-DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC);
-DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC);
-DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0);
-DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0);
-DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0);
-DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC);
-DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC);
-DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0);
-DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0);
-DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0);
-DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC);
-DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0);
-DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0);
-DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0);
-DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0);
-DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0);
-DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0);
-DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0);
-
-#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \
- static struct qcom_icc_bcm _name = { \
- .name = _bcmname, \
- .keepalive = _keepalive, \
- .num_nodes = _numnodes, \
- .nodes = { __VA_ARGS__ }, \
- }
-
-DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io);
-DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
-DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu);
-DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
-DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf);
-DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2);
-DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc);
-DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem);
-DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
-DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie);
-DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3);
-DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic);
-DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc);
-DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc);
-
-static struct qcom_icc_node *rsc_hlos_nodes[] = {
- [MASTER_APPSS_PROC] = &acm_l3,
- [MASTER_TCU_0] = &acm_tcu,
- [MASTER_LLCC] = &llcc_mc,
- [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
- [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
- [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
- [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
- [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
- [MASTER_BLSP_1] = &qhm_qup1,
- [MASTER_BLSP_2] = &qhm_qup2,
- [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
- [MASTER_SPDM] = &qhm_spdm,
- [MASTER_TIC] = &qhm_tic,
- [MASTER_TSIF] = &qhm_tsif,
- [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
- [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
- [MASTER_GNOC_MEM_NOC] = &qnm_apps,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
- [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
- [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
- [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
- [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
- [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
- [MASTER_SNOC_CNOC] = &qnm_snoc,
- [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
- [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
- [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
- [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
- [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
- [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
- [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
- [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[MASTER_CRYPTO] = &qxm_crypto,
- [MASTER_GFX3D] = &qxm_gpu,
[MASTER_IPA] = &qxm_ipa,
- [MASTER_MDP0] = &qxm_mdp0,
- [MASTER_MDP1] = &qxm_mdp1,
- [MASTER_PIMEM] = &qxm_pimem,
- [MASTER_ROTATOR] = &qxm_rot,
- [MASTER_VIDEO_P0] = &qxm_venus0,
- [MASTER_VIDEO_P1] = &qxm_venus1,
- [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
- [MASTER_GIC] = &xm_gic,
[MASTER_PCIE_1] = &xm_pcie3_1,
- [MASTER_PCIE_0] = &xm_pcie_0,
- [MASTER_QDSS_DAP] = &xm_qdss_dap,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
- [MASTER_SDCC_2] = &xm_sdc2,
- [MASTER_SDCC_4] = &xm_sdc4,
- [MASTER_UFS_CARD] = &xm_ufs_card,
- [MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3_0] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
- [SLAVE_EBI1] = &ebi,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sdm845_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_SPDM] = &qhm_spdm,
+ [MASTER_TIC] = &qhm_tic,
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AOP] = &qhs_aop,
[SLAVE_AOSS] = &qhs_aoss,
- [SLAVE_APPSS] = &qhs_apss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
@@ -386,9 +252,6 @@ static struct qcom_icc_node *rsc_hlos_nodes[] = {
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
- [SLAVE_LLCC_CFG] = &qhs_llcc,
- [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
- [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
@@ -414,53 +277,122 @@ static struct qcom_icc_node *rsc_hlos_nodes[] = {
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
- [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
- [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
- [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
- [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
- [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
- [SLAVE_SNOC_CNOC] = &qns_cnoc,
[SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static const struct qcom_icc_desc sdm845_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
+};
+
+static const struct qcom_icc_desc sdm845_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gladiator_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *gladiator_noc_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_l3,
+ [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
[SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
[SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
- [SLAVE_LLCC] = &qns_llcc,
- [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
- [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
- [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
- [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
- [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
- [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
- [SLAVE_IMEM] = &qxs_imem,
- [SLAVE_PCIE_0] = &qxs_pcie,
- [SLAVE_PCIE_1] = &qxs_pcie_gen3,
- [SLAVE_PIMEM] = &qxs_pimem,
- [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
- [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
- [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
[SLAVE_SERVICE_GNOC] = &srvc_gnoc,
- [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
- [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [SLAVE_SERVICE_SNOC] = &srvc_snoc,
- [SLAVE_QDSS_STM] = &xs_qdss_stm,
- [SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
- &bcm_acv,
+static const struct qcom_icc_desc sdm845_gladiator_noc = {
+ .nodes = gladiator_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gladiator_noc_nodes),
+ .bcms = gladiator_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mem_noc_bcms[] = {
&bcm_mc0,
+ &bcm_acv,
&bcm_sh0,
- &bcm_mm0,
&bcm_sh1,
- &bcm_mm1,
&bcm_sh2,
- &bcm_mm2,
&bcm_sh3,
- &bcm_mm3,
&bcm_sh5,
+};
+
+static struct qcom_icc_node *mem_noc_nodes[] = {
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
+ [MASTER_GNOC_MEM_NOC] = &qnm_apps,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sdm845_mem_noc = {
+ .nodes = mem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+ .bcms = mem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_MDP1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+static const struct qcom_icc_desc sdm845_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn0,
- &bcm_ce0,
- &bcm_cn0,
- &bcm_qup0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
@@ -476,297 +408,34 @@ static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
&bcm_sn15,
};
-static struct qcom_icc_desc sdm845_rsc_hlos = {
- .nodes = rsc_hlos_nodes,
- .num_nodes = ARRAY_SIZE(rsc_hlos_nodes),
- .bcms = rsc_hlos_bcms,
- .num_bcms = ARRAY_SIZE(rsc_hlos_bcms),
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
+ [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &qxs_pcie,
+ [SLAVE_PCIE_1] = &qxs_pcie_gen3,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
-{
- struct qcom_icc_node *qn;
- const struct bcm_db *data;
- size_t data_count;
- int i;
-
- bcm->addr = cmd_db_read_addr(bcm->name);
- if (!bcm->addr) {
- dev_err(dev, "%s could not find RPMh address\n",
- bcm->name);
- return -EINVAL;
- }
-
- data = cmd_db_read_aux_data(bcm->name, &data_count);
- if (IS_ERR(data)) {
- dev_err(dev, "%s command db read error (%ld)\n",
- bcm->name, PTR_ERR(data));
- return PTR_ERR(data);
- }
- if (!data_count) {
- dev_err(dev, "%s command db missing or partial aux data\n",
- bcm->name);
- return -EINVAL;
- }
-
- bcm->aux_data.unit = le32_to_cpu(data->unit);
- bcm->aux_data.width = le16_to_cpu(data->width);
- bcm->aux_data.vcd = data->vcd;
- bcm->aux_data.reserved = data->reserved;
-
- /*
- * Link Qnodes to their respective BCMs
- */
- for (i = 0; i < bcm->num_nodes; i++) {
- qn = bcm->nodes[i];
- qn->bcms[qn->num_bcms] = bcm;
- qn->num_bcms++;
- }
-
- return 0;
-}
-
-inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
- u32 addr, bool commit)
-{
- bool valid = true;
-
- if (!cmd)
- return;
-
- if (vote_x == 0 && vote_y == 0)
- valid = false;
-
- if (vote_x > BCM_TCS_CMD_VOTE_MASK)
- vote_x = BCM_TCS_CMD_VOTE_MASK;
-
- if (vote_y > BCM_TCS_CMD_VOTE_MASK)
- vote_y = BCM_TCS_CMD_VOTE_MASK;
-
- cmd->addr = addr;
- cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
-
- /*
- * Set the wait for completion flag on command that need to be completed
- * before the next command.
- */
- if (commit)
- cmd->wait = true;
-}
-
-static void tcs_list_gen(struct list_head *bcm_list, int bucket,
- struct tcs_cmd tcs_list[SDM845_MAX_VCD],
- int n[SDM845_MAX_VCD])
-{
- struct qcom_icc_bcm *bcm;
- bool commit;
- size_t idx = 0, batch = 0, cur_vcd_size = 0;
-
- memset(n, 0, sizeof(int) * SDM845_MAX_VCD);
-
- list_for_each_entry(bcm, bcm_list, list) {
- commit = false;
- cur_vcd_size++;
- if ((list_is_last(&bcm->list, bcm_list)) ||
- bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
- commit = true;
- cur_vcd_size = 0;
- }
- tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
- bcm->vote_y[bucket], bcm->addr, commit);
- idx++;
- n[batch]++;
- /*
- * Batch the BCMs in such a way that we do not split them in
- * multiple payloads when they are under the same VCD. This is
- * to ensure that every BCM is committed since we only set the
- * commit bit on the last BCM request of every VCD.
- */
- if (n[batch] >= MAX_RPMH_PAYLOAD) {
- if (!commit) {
- n[batch] -= cur_vcd_size;
- n[batch + 1] = cur_vcd_size;
- }
- batch++;
- }
- }
-}
-
-static void bcm_aggregate(struct qcom_icc_bcm *bcm)
-{
- size_t i, bucket;
- u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
- u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
- u64 temp;
-
- for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
- for (i = 0; i < bcm->num_nodes; i++) {
- temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
- agg_avg[bucket] = max(agg_avg[bucket], temp);
-
- temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth);
- agg_peak[bucket] = max(agg_peak[bucket], temp);
- }
-
- temp = agg_avg[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_x[bucket] = temp;
-
- temp = agg_peak[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_y[bucket] = temp;
- }
-
- if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
- bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
- bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
- bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
- bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
- bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
- }
-
- bcm->dirty = false;
-}
-
-static void qcom_icc_pre_aggregate(struct icc_node *node)
-{
- size_t i;
- struct qcom_icc_node *qn;
-
- qn = node->data;
-
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
- qn->sum_avg[i] = 0;
- qn->max_peak[i] = 0;
- }
-}
-
-static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
- u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
- size_t i;
- struct qcom_icc_node *qn;
-
- qn = node->data;
-
- if (!tag)
- tag = QCOM_ICC_TAG_ALWAYS;
-
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
- if (tag & BIT(i)) {
- qn->sum_avg[i] += avg_bw;
- qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
- }
- }
-
- *agg_avg += avg_bw;
- *agg_peak = max_t(u32, *agg_peak, peak_bw);
-
- for (i = 0; i < qn->num_bcms; i++)
- qn->bcms[i]->dirty = true;
-
- return 0;
-}
-
-static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
-{
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- struct tcs_cmd cmds[SDM845_MAX_BCMS];
- struct list_head commit_list;
- int commit_idx[SDM845_MAX_VCD];
- int ret = 0, i;
-
- if (!src)
- node = dst;
- else
- node = src;
-
- qp = to_qcom_provider(node->provider);
-
- INIT_LIST_HEAD(&commit_list);
-
- for (i = 0; i < qp->num_bcms; i++) {
- if (qp->bcms[i]->dirty) {
- bcm_aggregate(qp->bcms[i]);
- list_add_tail(&qp->bcms[i]->list, &commit_list);
- }
- }
-
- /*
- * Construct the command list based on a pre ordered list of BCMs
- * based on VCD.
- */
- tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
-
- if (!commit_idx[0])
- return ret;
-
- ret = rpmh_invalidate(qp->dev);
- if (ret) {
- pr_err("Error invalidating RPMH client (%d)\n", ret);
- return ret;
- }
-
- ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE,
- cmds, commit_idx);
- if (ret) {
- pr_err("Error sending AMC RPMH requests (%d)\n", ret);
- return ret;
- }
-
- INIT_LIST_HEAD(&commit_list);
-
- for (i = 0; i < qp->num_bcms; i++) {
- /*
- * Only generate WAKE and SLEEP commands if a resource's
- * requirements change as the execution environment transitions
- * between different power states.
- */
- if (qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_WAKE] !=
- qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
- qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_WAKE] !=
- qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_SLEEP]) {
- list_add_tail(&qp->bcms[i]->list, &commit_list);
- }
- }
-
- if (list_empty(&commit_list))
- return ret;
-
- tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
-
- ret = rpmh_write_batch(qp->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
- if (ret) {
- pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
- return ret;
- }
-
- tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
-
- ret = rpmh_write_batch(qp->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
- if (ret) {
- pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
- return ret;
- }
-
- return ret;
-}
-
-static int cmp_vcd(const void *_l, const void *_r)
-{
- const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l;
- const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r;
-
- if (l[0]->aux_data.vcd < r[0]->aux_data.vcd)
- return -1;
- else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd)
- return 0;
- else
- return 1;
-}
+static const struct qcom_icc_desc sdm845_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
static int qnoc_probe(struct platform_device *pdev)
{
@@ -779,7 +448,7 @@ static int qnoc_probe(struct platform_device *pdev)
size_t num_nodes, i;
int ret;
- desc = of_device_get_match_data(&pdev->dev);
+ desc = device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
@@ -808,6 +477,12 @@ static int qnoc_probe(struct platform_device *pdev)
qp->bcms = desc->bcms;
qp->num_bcms = desc->num_bcms;
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter)) {
+ dev_err(&pdev->dev, "bcm_voter err:%ld\n", PTR_ERR(qp->voter));
+ return PTR_ERR(qp->voter);
+ }
+
ret = icc_provider_add(provider);
if (ret) {
dev_err(&pdev->dev, "error adding interconnect provider\n");
@@ -817,6 +492,9 @@ static int qnoc_probe(struct platform_device *pdev)
for (i = 0; i < num_nodes; i++) {
size_t j;
+ if (!qnodes[i])
+ continue;
+
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
@@ -827,10 +505,6 @@ static int qnoc_probe(struct platform_device *pdev)
node->data = qnodes[i];
icc_node_add(node, provider);
- dev_dbg(&pdev->dev, "registered node %p %s %d\n", node,
- qnodes[i]->name, node->id);
-
- /* populate links */
for (j = 0; j < qnodes[i]->num_links; j++)
icc_link_create(node, qnodes[i]->links[j]);
@@ -841,19 +515,9 @@ static int qnoc_probe(struct platform_device *pdev)
for (i = 0; i < qp->num_bcms; i++)
qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
- /*
- * Pre sort the BCMs based on VCD for ease of generating a command list
- * that groups the BCMs with the same VCD together. VCDs are numbered
- * with lowest being the most expensive time wise, ensuring that
- * those commands are being sent the earliest in the queue.
- */
- sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL);
-
platform_set_drvdata(pdev, qp);
- dev_dbg(&pdev->dev, "Registered SDM845 ICC\n");
-
- return ret;
+ return 0;
err:
icc_nodes_remove(provider);
icc_provider_del(provider);
@@ -869,8 +533,23 @@ static int qnoc_remove(struct platform_device *pdev)
}
static const struct of_device_id qnoc_of_match[] = {
- { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos },
- { },
+ { .compatible = "qcom,sdm845-aggre1-noc",
+ .data = &sdm845_aggre1_noc},
+ { .compatible = "qcom,sdm845-aggre2-noc",
+ .data = &sdm845_aggre2_noc},
+ { .compatible = "qcom,sdm845-config-noc",
+ .data = &sdm845_config_noc},
+ { .compatible = "qcom,sdm845-dc-noc",
+ .data = &sdm845_dc_noc},
+ { .compatible = "qcom,sdm845-gladiator-noc",
+ .data = &sdm845_gladiator_noc},
+ { .compatible = "qcom,sdm845-mem-noc",
+ .data = &sdm845_mem_noc},
+ { .compatible = "qcom,sdm845-mmss-noc",
+ .data = &sdm845_mmss_noc},
+ { .compatible = "qcom,sdm845-system-noc",
+ .data = &sdm845_system_noc},
+ { }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
diff --git a/drivers/interconnect/qcom/sdm845.h b/drivers/interconnect/qcom/sdm845.h
new file mode 100644
index 000000000000..776e9c2acb27
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm845.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDM845_H__
+#define __DRIVERS_INTERCONNECT_QCOM_SDM845_H__
+
+#define SDM845_MASTER_A1NOC_CFG 1
+#define SDM845_MASTER_BLSP_1 2
+#define SDM845_MASTER_TSIF 3
+#define SDM845_MASTER_SDCC_2 4
+#define SDM845_MASTER_SDCC_4 5
+#define SDM845_MASTER_UFS_CARD 6
+#define SDM845_MASTER_UFS_MEM 7
+#define SDM845_MASTER_PCIE_0 8
+#define SDM845_MASTER_A2NOC_CFG 9
+#define SDM845_MASTER_QDSS_BAM 10
+#define SDM845_MASTER_BLSP_2 11
+#define SDM845_MASTER_CNOC_A2NOC 12
+#define SDM845_MASTER_CRYPTO 13
+#define SDM845_MASTER_IPA 14
+#define SDM845_MASTER_PCIE_1 15
+#define SDM845_MASTER_QDSS_ETR 16
+#define SDM845_MASTER_USB3_0 17
+#define SDM845_MASTER_USB3_1 18
+#define SDM845_MASTER_CAMNOC_HF0_UNCOMP 19
+#define SDM845_MASTER_CAMNOC_HF1_UNCOMP 20
+#define SDM845_MASTER_CAMNOC_SF_UNCOMP 21
+#define SDM845_MASTER_SPDM 22
+#define SDM845_MASTER_TIC 23
+#define SDM845_MASTER_SNOC_CNOC 24
+#define SDM845_MASTER_QDSS_DAP 25
+#define SDM845_MASTER_CNOC_DC_NOC 26
+#define SDM845_MASTER_APPSS_PROC 27
+#define SDM845_MASTER_GNOC_CFG 28
+#define SDM845_MASTER_LLCC 29
+#define SDM845_MASTER_TCU_0 30
+#define SDM845_MASTER_MEM_NOC_CFG 31
+#define SDM845_MASTER_GNOC_MEM_NOC 32
+#define SDM845_MASTER_MNOC_HF_MEM_NOC 33
+#define SDM845_MASTER_MNOC_SF_MEM_NOC 34
+#define SDM845_MASTER_SNOC_GC_MEM_NOC 35
+#define SDM845_MASTER_SNOC_SF_MEM_NOC 36
+#define SDM845_MASTER_GFX3D 37
+#define SDM845_MASTER_CNOC_MNOC_CFG 38
+#define SDM845_MASTER_CAMNOC_HF0 39
+#define SDM845_MASTER_CAMNOC_HF1 40
+#define SDM845_MASTER_CAMNOC_SF 41
+#define SDM845_MASTER_MDP0 42
+#define SDM845_MASTER_MDP1 43
+#define SDM845_MASTER_ROTATOR 44
+#define SDM845_MASTER_VIDEO_P0 45
+#define SDM845_MASTER_VIDEO_P1 46
+#define SDM845_MASTER_VIDEO_PROC 47
+#define SDM845_MASTER_SNOC_CFG 48
+#define SDM845_MASTER_A1NOC_SNOC 49
+#define SDM845_MASTER_A2NOC_SNOC 50
+#define SDM845_MASTER_GNOC_SNOC 51
+#define SDM845_MASTER_MEM_NOC_SNOC 52
+#define SDM845_MASTER_ANOC_PCIE_SNOC 53
+#define SDM845_MASTER_PIMEM 54
+#define SDM845_MASTER_GIC 55
+#define SDM845_SLAVE_A1NOC_SNOC 56
+#define SDM845_SLAVE_SERVICE_A1NOC 57
+#define SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC 58
+#define SDM845_SLAVE_A2NOC_SNOC 59
+#define SDM845_SLAVE_ANOC_PCIE_SNOC 60
+#define SDM845_SLAVE_SERVICE_A2NOC 61
+#define SDM845_SLAVE_CAMNOC_UNCOMP 62
+#define SDM845_SLAVE_A1NOC_CFG 63
+#define SDM845_SLAVE_A2NOC_CFG 64
+#define SDM845_SLAVE_AOP 65
+#define SDM845_SLAVE_AOSS 66
+#define SDM845_SLAVE_CAMERA_CFG 67
+#define SDM845_SLAVE_CLK_CTL 68
+#define SDM845_SLAVE_CDSP_CFG 69
+#define SDM845_SLAVE_RBCPR_CX_CFG 70
+#define SDM845_SLAVE_CRYPTO_0_CFG 71
+#define SDM845_SLAVE_DCC_CFG 72
+#define SDM845_SLAVE_CNOC_DDRSS 73
+#define SDM845_SLAVE_DISPLAY_CFG 74
+#define SDM845_SLAVE_GLM 75
+#define SDM845_SLAVE_GFX3D_CFG 76
+#define SDM845_SLAVE_IMEM_CFG 77
+#define SDM845_SLAVE_IPA_CFG 78
+#define SDM845_SLAVE_CNOC_MNOC_CFG 79
+#define SDM845_SLAVE_PCIE_0_CFG 80
+#define SDM845_SLAVE_PCIE_1_CFG 81
+#define SDM845_SLAVE_PDM 82
+#define SDM845_SLAVE_SOUTH_PHY_CFG 83
+#define SDM845_SLAVE_PIMEM_CFG 84
+#define SDM845_SLAVE_PRNG 85
+#define SDM845_SLAVE_QDSS_CFG 86
+#define SDM845_SLAVE_BLSP_2 87
+#define SDM845_SLAVE_BLSP_1 88
+#define SDM845_SLAVE_SDCC_2 89
+#define SDM845_SLAVE_SDCC_4 90
+#define SDM845_SLAVE_SNOC_CFG 91
+#define SDM845_SLAVE_SPDM_WRAPPER 92
+#define SDM845_SLAVE_SPSS_CFG 93
+#define SDM845_SLAVE_TCSR 94
+#define SDM845_SLAVE_TLMM_NORTH 95
+#define SDM845_SLAVE_TLMM_SOUTH 96
+#define SDM845_SLAVE_TSIF 97
+#define SDM845_SLAVE_UFS_CARD_CFG 98
+#define SDM845_SLAVE_UFS_MEM_CFG 99
+#define SDM845_SLAVE_USB3_0 100
+#define SDM845_SLAVE_USB3_1 101
+#define SDM845_SLAVE_VENUS_CFG 102
+#define SDM845_SLAVE_VSENSE_CTRL_CFG 103
+#define SDM845_SLAVE_CNOC_A2NOC 104
+#define SDM845_SLAVE_SERVICE_CNOC 105
+#define SDM845_SLAVE_LLCC_CFG 106
+#define SDM845_SLAVE_MEM_NOC_CFG 107
+#define SDM845_SLAVE_GNOC_SNOC 108
+#define SDM845_SLAVE_GNOC_MEM_NOC 109
+#define SDM845_SLAVE_SERVICE_GNOC 110
+#define SDM845_SLAVE_EBI1 111
+#define SDM845_SLAVE_MSS_PROC_MS_MPU_CFG 112
+#define SDM845_SLAVE_MEM_NOC_GNOC 113
+#define SDM845_SLAVE_LLCC 114
+#define SDM845_SLAVE_MEM_NOC_SNOC 115
+#define SDM845_SLAVE_SERVICE_MEM_NOC 116
+#define SDM845_SLAVE_MNOC_SF_MEM_NOC 117
+#define SDM845_SLAVE_MNOC_HF_MEM_NOC 118
+#define SDM845_SLAVE_SERVICE_MNOC 119
+#define SDM845_SLAVE_APPSS 120
+#define SDM845_SLAVE_SNOC_CNOC 121
+#define SDM845_SLAVE_SNOC_MEM_NOC_GC 122
+#define SDM845_SLAVE_SNOC_MEM_NOC_SF 123
+#define SDM845_SLAVE_IMEM 124
+#define SDM845_SLAVE_PCIE_0 125
+#define SDM845_SLAVE_PCIE_1 126
+#define SDM845_SLAVE_PIMEM 127
+#define SDM845_SLAVE_SERVICE_SNOC 128
+#define SDM845_SLAVE_QDSS_STM 129
+#define SDM845_SLAVE_TCU 130
+#define SDM845_MASTER_OSM_L3_APPS 131
+#define SDM845_SLAVE_OSM_L3 132
+
+#endif /* __DRIVERS_INTERCONNECT_QCOM_SDM845_H__ */
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d2fade984999..2ab07ce17abb 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -188,6 +188,7 @@ config INTEL_IOMMU
select NEED_DMA_MAP_STATE
select DMAR_TABLE
select SWIOTLB
+ select IOASID
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
@@ -273,7 +274,7 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
- depends on ARM && MMU
+ depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
---help---
@@ -291,7 +292,7 @@ config OMAP_IOMMU_DEBUG
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC))
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -325,7 +326,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && MMU
+ depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -370,7 +371,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on (ARM64 || ARM) && MMU
+ depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -440,7 +441,7 @@ config S390_IOMMU
config S390_CCW_IOMMU
bool "S390 CCW IOMMU Support"
- depends on S390 && CCW
+ depends on S390 && CCW || COMPILE_TEST
select IOMMU_API
help
Enables bits of IOMMU API required by VFIO. The iommu_ops
@@ -448,7 +449,7 @@ config S390_CCW_IOMMU
config S390_AP_IOMMU
bool "S390 AP IOMMU Support"
- depends on S390 && ZCRYPT
+ depends on S390 && ZCRYPT || COMPILE_TEST
select IOMMU_API
help
Enables bits of IOMMU API required by VFIO. The iommu_ops
@@ -456,7 +457,7 @@ config S390_AP_IOMMU
config MTK_IOMMU
bool "MTK IOMMU Support"
- depends on ARM || ARM64
+ depends on HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
@@ -506,8 +507,8 @@ config HYPERV_IOMMU
guests to run with x2APIC mode enabled.
config VIRTIO_IOMMU
- bool "Virtio IOMMU driver"
- depends on VIRTIO=y
+ tristate "Virtio IOMMU driver"
+ depends on VIRTIO
depends on ARM64
select IOMMU_API
select INTERVAL_TREE
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 20cce366e951..2883ac389abb 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -101,6 +101,8 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
+static void update_and_flush_device_table(struct protection_domain *domain,
+ struct domain_pgtable *pgtable);
/****************************************************************************
*
@@ -125,7 +127,8 @@ static inline int get_acpihid_device_id(struct device *dev,
return -ENODEV;
list_for_each_entry(p, &acpihid_map, list) {
- if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) {
+ if (acpi_dev_hid_uid_match(adev, p->hid,
+ p->uid[0] ? p->uid : NULL)) {
if (entry)
*entry = p;
return p->devid;
@@ -151,6 +154,26 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
+static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
+ struct domain_pgtable *pgtable)
+{
+ u64 pt_root = atomic64_read(&domain->pt_root);
+
+ pgtable->root = (u64 *)(pt_root & PAGE_MASK);
+ pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
+}
+
+static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
+{
+ u64 pt_root;
+
+ /* lowest 3 bits encode pgtable mode */
+ pt_root = mode & 7;
+ pt_root |= (u64)root;
+
+ return pt_root;
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -1397,13 +1420,18 @@ static struct page *free_sub_pt(unsigned long root, int mode,
static void free_pagetable(struct protection_domain *domain)
{
- unsigned long root = (unsigned long)domain->pt_root;
+ struct domain_pgtable pgtable;
struct page *freelist = NULL;
+ unsigned long root;
- BUG_ON(domain->mode < PAGE_MODE_NONE ||
- domain->mode > PAGE_MODE_6_LEVEL);
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ atomic64_set(&domain->pt_root, 0);
- freelist = free_sub_pt(root, domain->mode, freelist);
+ BUG_ON(pgtable.mode < PAGE_MODE_NONE ||
+ pgtable.mode > PAGE_MODE_6_LEVEL);
+
+ root = (unsigned long)pgtable.root;
+ freelist = free_sub_pt(root, pgtable.mode, freelist);
free_page_list(freelist);
}
@@ -1417,24 +1445,39 @@ static bool increase_address_space(struct protection_domain *domain,
unsigned long address,
gfp_t gfp)
{
+ struct domain_pgtable pgtable;
unsigned long flags;
- bool ret = false;
- u64 *pte;
+ bool ret = true;
+ u64 *pte, root;
spin_lock_irqsave(&domain->lock, flags);
- if (address <= PM_LEVEL_SIZE(domain->mode) ||
- WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+ if (address <= PM_LEVEL_SIZE(pgtable.mode))
+ goto out;
+
+ ret = false;
+ if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
goto out;
- *pte = PM_LEVEL_PDE(domain->mode,
- iommu_virt_to_phys(domain->pt_root));
- domain->pt_root = pte;
- domain->mode += 1;
+ *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+
+ pgtable.root = pte;
+ pgtable.mode += 1;
+ update_and_flush_device_table(domain, &pgtable);
+ domain_flush_complete(domain);
+
+ /*
+ * Device Table needs to be updated and flushed before the new root can
+ * be published.
+ */
+ root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
+ atomic64_set(&domain->pt_root, root);
ret = true;
@@ -1451,16 +1494,29 @@ static u64 *alloc_pte(struct protection_domain *domain,
gfp_t gfp,
bool *updated)
{
+ struct domain_pgtable pgtable;
int level, end_lvl;
u64 *pte, *page;
BUG_ON(!is_power_of_2(page_size));
- while (address > PM_LEVEL_SIZE(domain->mode))
- *updated = increase_address_space(domain, address, gfp) || *updated;
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+ while (address > PM_LEVEL_SIZE(pgtable.mode)) {
+ /*
+ * Return an error if there is no memory to update the
+ * page-table.
+ */
+ if (!increase_address_space(domain, address, gfp))
+ return NULL;
+
+ /* Read new values to check if update was successful */
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ }
+
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ level = pgtable.mode - 1;
+ pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -1536,16 +1592,19 @@ static u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size)
{
+ struct domain_pgtable pgtable;
int level;
u64 *pte;
*page_size = 0;
- if (address > PM_LEVEL_SIZE(domain->mode))
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+ if (address > PM_LEVEL_SIZE(pgtable.mode))
return NULL;
- level = domain->mode - 1;
- pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ level = pgtable.mode - 1;
+ pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -1660,7 +1719,13 @@ out:
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- update_domain(dom);
+ /*
+ * Flush domain TLB(s) and wait for completion. Any Device-Table
+ * Updates and flushing already happened in
+ * increase_address_space().
+ */
+ domain_flush_tlb_pde(dom);
+ domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -1806,6 +1871,7 @@ static void dma_ops_domain_free(struct protection_domain *domain)
static struct protection_domain *dma_ops_domain_alloc(void)
{
struct protection_domain *domain;
+ u64 *pt_root, root;
domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
if (!domain)
@@ -1814,12 +1880,14 @@ static struct protection_domain *dma_ops_domain_alloc(void)
if (protection_domain_init(domain))
goto free_domain;
- domain->mode = PAGE_MODE_3_LEVEL;
- domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- domain->flags = PD_DMA_OPS_MASK;
- if (!domain->pt_root)
+ pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pt_root)
goto free_domain;
+ root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
+ atomic64_set(&domain->pt_root, root);
+ domain->flags = PD_DMA_OPS_MASK;
+
if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
goto free_domain;
@@ -1841,16 +1909,17 @@ static bool dma_ops_domain(struct protection_domain *domain)
}
static void set_dte_entry(u16 devid, struct protection_domain *domain,
+ struct domain_pgtable *pgtable,
bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
- if (domain->mode != PAGE_MODE_NONE)
- pte_root = iommu_virt_to_phys(domain->pt_root);
+ if (pgtable->mode != PAGE_MODE_NONE)
+ pte_root = iommu_virt_to_phys(pgtable->root);
- pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
+ pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
@@ -1923,6 +1992,7 @@ static void clear_dte_entry(u16 devid)
static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
+ struct domain_pgtable pgtable;
struct amd_iommu *iommu;
bool ats;
@@ -1938,7 +2008,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ set_dte_entry(dev_data->devid, domain, &pgtable,
+ ats, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
device_flush_dte(dev_data);
@@ -2249,23 +2321,36 @@ static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
*
*****************************************************************************/
-static void update_device_table(struct protection_domain *domain)
+static void update_device_table(struct protection_domain *domain,
+ struct domain_pgtable *pgtable)
{
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
- dev_data->iommu_v2);
+ set_dte_entry(dev_data->devid, domain, pgtable,
+ dev_data->ats.enabled, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
}
}
+static void update_and_flush_device_table(struct protection_domain *domain,
+ struct domain_pgtable *pgtable)
+{
+ update_device_table(domain, pgtable);
+ domain_flush_devices(domain);
+}
+
static void update_domain(struct protection_domain *domain)
{
- update_device_table(domain);
+ struct domain_pgtable pgtable;
- domain_flush_devices(domain);
+ /* Update device table */
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ update_and_flush_device_table(domain, &pgtable);
+
+ /* Flush domain TLB(s) and wait for completion */
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
}
int __init amd_iommu_init_api(void)
@@ -2375,6 +2460,7 @@ out_err:
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
+ u64 *pt_root, root;
switch (type) {
case IOMMU_DOMAIN_UNMANAGED:
@@ -2382,13 +2468,15 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
if (!pdomain)
return NULL;
- pdomain->mode = PAGE_MODE_3_LEVEL;
- pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pdomain->pt_root) {
+ pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pt_root) {
protection_domain_free(pdomain);
return NULL;
}
+ root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
+ atomic64_set(&pdomain->pt_root, root);
+
pdomain->domain.geometry.aperture_start = 0;
pdomain->domain.geometry.aperture_end = ~0ULL;
pdomain->domain.geometry.force_aperture = true;
@@ -2406,7 +2494,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
if (!pdomain)
return NULL;
- pdomain->mode = PAGE_MODE_NONE;
+ atomic64_set(&pdomain->pt_root, PAGE_MODE_NONE);
break;
default:
return NULL;
@@ -2418,6 +2506,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
+ struct domain_pgtable pgtable;
domain = to_pdomain(dom);
@@ -2435,7 +2524,9 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
dma_ops_domain_free(domain);
break;
default:
- if (domain->mode != PAGE_MODE_NONE)
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+ if (pgtable.mode != PAGE_MODE_NONE)
free_pagetable(domain);
if (domain->flags & PD_IOMMUV2_MASK)
@@ -2518,10 +2609,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct domain_pgtable pgtable;
int prot = 0;
int ret;
- if (domain->mode == PAGE_MODE_NONE)
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ if (pgtable.mode == PAGE_MODE_NONE)
return -EINVAL;
if (iommu_prot & IOMMU_READ)
@@ -2541,8 +2634,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct domain_pgtable pgtable;
- if (domain->mode == PAGE_MODE_NONE)
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ if (pgtable.mode == PAGE_MODE_NONE)
return 0;
return iommu_unmap_page(domain, iova, page_size);
@@ -2553,9 +2648,11 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long offset_mask, pte_pgsize;
+ struct domain_pgtable pgtable;
u64 *pte, __pte;
- if (domain->mode == PAGE_MODE_NONE)
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ if (pgtable.mode == PAGE_MODE_NONE)
return iova;
pte = fetch_pte(domain, iova, &pte_pgsize);
@@ -2708,16 +2805,26 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct domain_pgtable pgtable;
unsigned long flags;
+ u64 pt_root;
spin_lock_irqsave(&domain->lock, flags);
+ /* First save pgtable configuration*/
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+
/* Update data structure */
- domain->mode = PAGE_MODE_NONE;
+ pt_root = amd_iommu_domain_encode_pgtable(NULL, PAGE_MODE_NONE);
+ atomic64_set(&domain->pt_root, pt_root);
/* Make changes visible to IOMMUs */
update_domain(domain);
+ /* Restore old pgtable in domain->ptroot to free page-table */
+ pt_root = amd_iommu_domain_encode_pgtable(pgtable.root, pgtable.mode);
+ atomic64_set(&domain->pt_root, pt_root);
+
/* Page-table is not visible to IOMMU anymore, so free it */
free_pagetable(domain);
@@ -2908,9 +3015,11 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
static int __set_gcr3(struct protection_domain *domain, int pasid,
unsigned long cr3)
{
+ struct domain_pgtable pgtable;
u64 *pte;
- if (domain->mode != PAGE_MODE_NONE)
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ if (pgtable.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
@@ -2924,9 +3033,11 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
static int __clear_gcr3(struct protection_domain *domain, int pasid)
{
+ struct domain_pgtable pgtable;
u64 *pte;
- if (domain->mode != PAGE_MODE_NONE)
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ if (pgtable.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 6be3853a5d97..5b81fd16f5fa 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1329,8 +1329,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
}
case IVHD_DEV_ACPI_HID: {
u16 devid;
- u8 hid[ACPIHID_HID_LEN] = {0};
- u8 uid[ACPIHID_UID_LEN] = {0};
+ u8 hid[ACPIHID_HID_LEN];
+ u8 uid[ACPIHID_UID_LEN];
int ret;
if (h->type != 0x40) {
@@ -1347,6 +1347,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
+ uid[0] = '\0';
switch (e->uidf) {
case UID_NOT_PRESENT:
@@ -1361,8 +1362,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case UID_IS_CHARACTER:
- memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
- uid[ACPIHID_UID_LEN - 1] = '\0';
+ memcpy(uid, &e->uid, e->uidl);
+ uid[e->uidl] = '\0';
break;
default:
@@ -2936,7 +2937,7 @@ static int __init parse_amd_iommu_intr(char *str)
{
for (; *str; ++str) {
if (strncmp(str, "legacy", 6) == 0) {
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
break;
}
if (strncmp(str, "vapic", 5) == 0) {
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index f8d01d6b00da..7a8fdec138bd 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -348,7 +348,7 @@
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
#define DTE_GCR3_INDEX_A 0
#define DTE_GCR3_INDEX_B 1
@@ -468,8 +468,7 @@ struct protection_domain {
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- int mode; /* paging mode (0-6 levels) */
- u64 *pt_root; /* page table root pointer */
+ atomic64_t pt_root; /* pgtable root and pgtable mode */
int glx; /* Number of levels for GCR3 table */
u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */
@@ -477,6 +476,12 @@ struct protection_domain {
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
};
+/* For decocded pt_root */
+struct domain_pgtable {
+ int mode;
+ u64 *root;
+};
+
/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index aa3ac2a03807..82508730feb7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -69,6 +69,9 @@
#define IDR1_SSIDSIZE GENMASK(10, 6)
#define IDR1_SIDSIZE GENMASK(5, 0)
+#define ARM_SMMU_IDR3 0xc
+#define IDR3_RIL (1 << 10)
+
#define ARM_SMMU_IDR5 0x14
#define IDR5_STALL_MAX GENMASK(31, 16)
#define IDR5_GRAN64K (1 << 6)
@@ -346,9 +349,14 @@
#define CMDQ_CFGI_1_LEAF (1UL << 0)
#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
+#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12)
+#define CMDQ_TLBI_RANGE_NUM_MAX 31
+#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20)
#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
#define CMDQ_TLBI_1_LEAF (1UL << 0)
+#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8)
+#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10)
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
@@ -473,9 +481,13 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_TLBI_S2_IPA 0x2a
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
struct {
+ u8 num;
+ u8 scale;
u16 asid;
u16 vmid;
bool leaf;
+ u8 ttl;
+ u8 tg;
u64 addr;
} tlbi;
@@ -548,6 +560,11 @@ struct arm_smmu_cmdq {
atomic_t lock;
};
+struct arm_smmu_cmdq_batch {
+ u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
+ int num;
+};
+
struct arm_smmu_evtq {
struct arm_smmu_queue q;
u32 max_stalls;
@@ -627,6 +644,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HYP (1 << 12)
#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
#define ARM_SMMU_FEAT_VAX (1 << 14)
+#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -895,14 +913,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_TLBI_NH_VA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
break;
case CMDQ_OP_TLBI_S2_IPA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
break;
case CMDQ_OP_TLBI_NH_ASID:
@@ -1482,6 +1508,24 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
}
+static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ if (cmds->num == CMDQ_BATCH_ENTRIES) {
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
+ cmds->num = 0;
+ }
+ arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd);
+ cmds->num++;
+}
+
+static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds)
+{
+ return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
+}
+
/* Context descriptor manipulation functions */
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
int ssid, bool leaf)
@@ -1489,6 +1533,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
size_t i;
unsigned long flags;
struct arm_smmu_master *master;
+ struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD,
@@ -1502,12 +1547,12 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
for (i = 0; i < master->num_sids; i++) {
cmd.cfgi.sid = master->sids[i];
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
}
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
@@ -1531,6 +1576,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
CTXDESC_L1_DESC_V;
+ /* See comment in arm_smmu_write_ctx_desc() */
WRITE_ONCE(*dst, cpu_to_le64(val));
}
@@ -1726,7 +1772,8 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
- *dst = cpu_to_le64(val);
+ /* See comment in arm_smmu_write_ctx_desc() */
+ WRITE_ONCE(*dst, cpu_to_le64(val));
}
static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -2132,17 +2179,16 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
cmd->atc.size = log2_span;
}
-static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
- struct arm_smmu_cmdq_ent *cmd)
+static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
{
int i;
+ struct arm_smmu_cmdq_ent cmd;
- if (!master->ats_enabled)
- return 0;
+ arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
for (i = 0; i < master->num_sids; i++) {
- cmd->atc.sid = master->sids[i];
- arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
+ cmd.atc.sid = master->sids[i];
+ arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
}
return arm_smmu_cmdq_issue_sync(master->smmu);
@@ -2151,10 +2197,11 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
int ssid, unsigned long iova, size_t size)
{
- int ret = 0;
+ int i;
unsigned long flags;
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master;
+ struct arm_smmu_cmdq_batch cmds = {};
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
return 0;
@@ -2179,11 +2226,18 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head)
- ret |= arm_smmu_atc_inv_master(master, &cmd);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ if (!master->ats_enabled)
+ continue;
+
+ for (i = 0; i < master->num_sids; i++) {
+ cmd.atc.sid = master->sids[i];
+ arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
+ }
+ }
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- return ret ? -ETIMEDOUT : 0;
+ return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
}
/* IO_PGTABLE API */
@@ -2218,10 +2272,10 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
size_t granule, bool leaf,
struct arm_smmu_domain *smmu_domain)
{
- u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long start = iova, end = iova + size;
- int i = 0;
+ unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0;
+ size_t inv_range = granule;
+ struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_cmdq_ent cmd = {
.tlbi = {
.leaf = leaf,
@@ -2239,19 +2293,50 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
+ if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
+ /* Get the leaf page size */
+ tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+
+ /* Convert page size of 12,14,16 (log2) to 1,2,3 */
+ cmd.tlbi.tg = (tg - 10) / 2;
+
+ /* Determine what level the granule is at */
+ cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+
+ num_pages = size >> tg;
+ }
+
while (iova < end) {
- if (i == CMDQ_BATCH_ENTRIES) {
- arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false);
- i = 0;
+ if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
+ /*
+ * On each iteration of the loop, the range is 5 bits
+ * worth of the aligned size remaining.
+ * The range in pages is:
+ *
+ * range = (num_pages & (0x1f << __ffs(num_pages)))
+ */
+ unsigned long scale, num;
+
+ /* Determine the power of 2 multiple number of pages */
+ scale = __ffs(num_pages);
+ cmd.tlbi.scale = scale;
+
+ /* Determine how many chunks of 2^scale size we have */
+ num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
+ cmd.tlbi.num = num - 1;
+
+ /* range is num * 2^scale * pgsize */
+ inv_range = num << (scale + tg);
+
+ /* Clear out the lower order bits for the next iteration */
+ num_pages -= num << scale;
}
cmd.tlbi.addr = iova;
- arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd);
- iova += granule;
- i++;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
+ iova += inv_range;
}
-
- arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true);
+ arm_smmu_cmdq_batch_submit(smmu, &cmds);
/*
* Unfortunately, this can't be leaf-only since we may have
@@ -2611,7 +2696,6 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
{
- struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_domain *smmu_domain = master->domain;
if (!master->ats_enabled)
@@ -2623,11 +2707,57 @@ static void arm_smmu_disable_ats(struct arm_smmu_master *master)
* ATC invalidation via the SMMU.
*/
wmb();
- arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
- arm_smmu_atc_inv_master(master, &cmd);
+ arm_smmu_atc_inv_master(master);
atomic_dec(&smmu_domain->nr_ats_masters);
}
+static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
+{
+ int ret;
+ int features;
+ int num_pasids;
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(master->dev))
+ return -ENODEV;
+
+ pdev = to_pci_dev(master->dev);
+
+ features = pci_pasid_features(pdev);
+ if (features < 0)
+ return features;
+
+ num_pasids = pci_max_pasids(pdev);
+ if (num_pasids <= 0)
+ return num_pasids;
+
+ ret = pci_enable_pasid(pdev, features);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PASID\n");
+ return ret;
+ }
+
+ master->ssid_bits = min_t(u8, ilog2(num_pasids),
+ master->smmu->ssid_bits);
+ return 0;
+}
+
+static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(master->dev))
+ return;
+
+ pdev = to_pci_dev(master->dev);
+
+ if (!pdev->pasid_enabled)
+ return;
+
+ master->ssid_bits = 0;
+ pci_disable_pasid(pdev);
+}
+
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
unsigned long flags;
@@ -2659,7 +2789,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (!fwspec)
return -ENOENT;
- master = fwspec->iommu_priv;
+ master = dev_iommu_priv_get(dev);
smmu = master->smmu;
arm_smmu_detach_dev(master);
@@ -2795,7 +2925,7 @@ static int arm_smmu_add_device(struct device *dev)
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return -ENODEV;
- if (WARN_ON_ONCE(fwspec->iommu_priv))
+ if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
return -EBUSY;
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
@@ -2810,7 +2940,7 @@ static int arm_smmu_add_device(struct device *dev)
master->smmu = smmu;
master->sids = fwspec->ids;
master->num_sids = fwspec->num_ids;
- fwspec->iommu_priv = master;
+ dev_iommu_priv_set(dev, master);
/* Check the SIDs are in range of the SMMU and our stream table */
for (i = 0; i < master->num_sids; i++) {
@@ -2831,13 +2961,23 @@ static int arm_smmu_add_device(struct device *dev)
master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+ /*
+ * Note that PASID must be enabled before, and disabled after ATS:
+ * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register
+ *
+ * Behavior is undefined if this bit is Set and the value of the PASID
+ * Enable, Execute Requested Enable, or Privileged Mode Requested bits
+ * are changed.
+ */
+ arm_smmu_enable_pasid(master);
+
if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
master->ssid_bits = min_t(u8, master->ssid_bits,
CTXDESC_LINEAR_CDMAX);
ret = iommu_device_link(&smmu->iommu, dev);
if (ret)
- goto err_free_master;
+ goto err_disable_pasid;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group)) {
@@ -2850,9 +2990,11 @@ static int arm_smmu_add_device(struct device *dev)
err_unlink:
iommu_device_unlink(&smmu->iommu, dev);
+err_disable_pasid:
+ arm_smmu_disable_pasid(master);
err_free_master:
kfree(master);
- fwspec->iommu_priv = NULL;
+ dev_iommu_priv_set(dev, NULL);
return ret;
}
@@ -2865,11 +3007,12 @@ static void arm_smmu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
- master = fwspec->iommu_priv;
+ master = dev_iommu_priv_get(dev);
smmu = master->smmu;
arm_smmu_detach_dev(master);
iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
+ arm_smmu_disable_pasid(master);
kfree(master);
iommu_fwspec_free(dev);
}
@@ -3700,6 +3843,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
if (smmu->sid_bits <= STRTAB_SPLIT)
smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+ /* IDR3 */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
+ if (FIELD_GET(IDR3_RIL, reg))
+ smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
+
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 16c4b87af42b..a6a5796e9c41 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -98,12 +98,10 @@ struct arm_smmu_master_cfg {
s16 smendx[];
};
#define INVALID_SMENDX -1
-#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
-#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
-#define fwspec_smendx(fw, i) \
- (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
-#define for_each_cfg_sme(fw, i, idx) \
- for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
+#define cfg_smendx(cfg, fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
+#define for_each_cfg_sme(cfg, fw, i, idx) \
+ for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
static bool using_legacy_binding, using_generic_binding;
@@ -1061,7 +1059,7 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
static int arm_smmu_master_alloc_smes(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs;
struct iommu_group *group;
@@ -1069,7 +1067,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
mutex_lock(&smmu->stream_map_mutex);
/* Figure out a viable stream map entry allocation */
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
@@ -1100,7 +1098,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
iommu_group_put(group);
/* It worked! Now, poke the actual hardware */
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
arm_smmu_write_sme(smmu, idx);
smmu->s2crs[idx].group = group;
}
@@ -1117,14 +1115,14 @@ out_err:
return ret;
}
-static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
+static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
+ struct iommu_fwspec *fwspec)
{
- struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
- struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+ struct arm_smmu_device *smmu = cfg->smmu;
int i, idx;
mutex_lock(&smmu->stream_map_mutex);
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
if (arm_smmu_free_sme(smmu, idx))
arm_smmu_write_sme(smmu, idx);
cfg->smendx[i] = INVALID_SMENDX;
@@ -1133,6 +1131,7 @@ static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
}
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master_cfg *cfg,
struct iommu_fwspec *fwspec)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -1146,7 +1145,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
else
type = S2CR_TYPE_TRANS;
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue;
@@ -1160,10 +1159,11 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- int ret;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_master_cfg *cfg;
struct arm_smmu_device *smmu;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret;
if (!fwspec || fwspec->ops != &arm_smmu_ops) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1177,10 +1177,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* domains, just say no (but more politely than by dereferencing NULL).
* This should be at least a WARN_ON once that's sorted.
*/
- if (!fwspec->iommu_priv)
+ cfg = dev_iommu_priv_get(dev);
+ if (!cfg)
return -ENODEV;
- smmu = fwspec_smmu(fwspec);
+ smmu = cfg->smmu;
ret = arm_smmu_rpm_get(smmu);
if (ret < 0)
@@ -1204,7 +1205,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* Looks ok, so add the device to the domain */
- ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+ ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
/*
* Setup an autosuspend delay to avoid bouncing runpm state.
@@ -1383,7 +1384,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
static int arm_smmu_add_device(struct device *dev)
{
- struct arm_smmu_device *smmu;
+ struct arm_smmu_device *smmu = NULL;
struct arm_smmu_master_cfg *cfg;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
int i, ret;
@@ -1429,7 +1430,7 @@ static int arm_smmu_add_device(struct device *dev)
goto out_free;
cfg->smmu = smmu;
- fwspec->iommu_priv = cfg;
+ dev_iommu_priv_set(dev, cfg);
while (i--)
cfg->smendx[i] = INVALID_SMENDX;
@@ -1467,7 +1468,7 @@ static void arm_smmu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
- cfg = fwspec->iommu_priv;
+ cfg = dev_iommu_priv_get(dev);
smmu = cfg->smmu;
ret = arm_smmu_rpm_get(smmu);
@@ -1475,23 +1476,25 @@ static void arm_smmu_remove_device(struct device *dev)
return;
iommu_device_unlink(&smmu->iommu, dev);
- arm_smmu_master_free_smes(fwspec);
+ arm_smmu_master_free_smes(cfg, fwspec);
arm_smmu_rpm_put(smmu);
+ dev_iommu_priv_set(dev, NULL);
iommu_group_remove_device(dev);
- kfree(fwspec->iommu_priv);
+ kfree(cfg);
iommu_fwspec_free(dev);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+ struct arm_smmu_device *smmu = cfg->smmu;
struct iommu_group *group = NULL;
int i, idx;
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
if (group && smmu->s2crs[idx].group &&
group != smmu->s2crs[idx].group)
return ERR_PTR(-EINVAL);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4be549478691..0182cff2c7ac 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -371,11 +371,11 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
-#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
int intel_iommu_sm = 1;
#else
int intel_iommu_sm;
-#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
@@ -4501,7 +4501,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
struct dmar_atsr_unit *atsru;
struct acpi_dmar_atsr *tmp;
- list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
+ dmar_rcu_check()) {
tmp = (struct acpi_dmar_atsr *)atsru->hdr;
if (atsr->segment != tmp->segment)
continue;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index d7f2a5358900..2998418f0a38 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -531,7 +531,7 @@ struct page_req_dsc {
u64 priv_data[2];
};
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
+#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
{
@@ -611,14 +611,15 @@ static irqreturn_t prq_event_thread(int irq, void *d)
* any faults on kernel addresses. */
if (!svm->mm)
goto bad_req;
- /* If the mm is already defunct, don't handle faults. */
- if (!mmget_not_zero(svm->mm))
- goto bad_req;
/* If address is not canonical, return invalid response */
if (!is_canonical_address(address))
goto bad_req;
+ /* If the mm is already defunct, don't handle faults. */
+ if (!mmget_not_zero(svm->mm))
+ goto bad_req;
+
down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3e3528436e0b..03d6a26687bc 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -152,9 +152,9 @@ void iommu_device_unregister(struct iommu_device *iommu)
}
EXPORT_SYMBOL_GPL(iommu_device_unregister);
-static struct iommu_param *iommu_get_dev_param(struct device *dev)
+static struct dev_iommu *dev_iommu_get(struct device *dev)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
if (param)
return param;
@@ -164,14 +164,15 @@ static struct iommu_param *iommu_get_dev_param(struct device *dev)
return NULL;
mutex_init(&param->lock);
- dev->iommu_param = param;
+ dev->iommu = param;
return param;
}
-static void iommu_free_dev_param(struct device *dev)
+static void dev_iommu_free(struct device *dev)
{
- kfree(dev->iommu_param);
- dev->iommu_param = NULL;
+ iommu_fwspec_free(dev);
+ kfree(dev->iommu);
+ dev->iommu = NULL;
}
int iommu_probe_device(struct device *dev)
@@ -183,7 +184,7 @@ int iommu_probe_device(struct device *dev)
if (!ops)
return -EINVAL;
- if (!iommu_get_dev_param(dev))
+ if (!dev_iommu_get(dev))
return -ENOMEM;
if (!try_module_get(ops->owner)) {
@@ -200,7 +201,7 @@ int iommu_probe_device(struct device *dev)
err_module_put:
module_put(ops->owner);
err_free_dev_param:
- iommu_free_dev_param(dev);
+ dev_iommu_free(dev);
return ret;
}
@@ -211,9 +212,9 @@ void iommu_release_device(struct device *dev)
if (dev->iommu_group)
ops->remove_device(dev);
- if (dev->iommu_param) {
+ if (dev->iommu) {
module_put(ops->owner);
- iommu_free_dev_param(dev);
+ dev_iommu_free(dev);
}
}
@@ -509,7 +510,7 @@ struct iommu_group *iommu_group_alloc(void)
NULL, "%d", group->id);
if (ret) {
ida_simple_remove(&iommu_group_ida, group->id);
- kfree(group);
+ kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -692,6 +693,15 @@ out:
return ret;
}
+static bool iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (domain->ops->is_attach_deferred)
+ return domain->ops->is_attach_deferred(domain, dev);
+
+ return false;
+}
+
/**
* iommu_group_add_device - add a device to an iommu group
* @group: the group into which to add the device (reference should be held)
@@ -746,7 +756,7 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
- if (group->domain)
+ if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
if (ret)
@@ -972,7 +982,7 @@ int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
void *data)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
int ret = 0;
if (!param)
@@ -1015,7 +1025,7 @@ EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
*/
int iommu_unregister_device_fault_handler(struct device *dev)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
int ret = 0;
if (!param)
@@ -1055,7 +1065,7 @@ EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
*/
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
struct iommu_fault_event *evt_pending = NULL;
struct iommu_fault_param *fparam;
int ret = 0;
@@ -1104,7 +1114,7 @@ int iommu_page_response(struct device *dev,
int ret = -EINVAL;
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain || !domain->ops->page_response)
@@ -1428,7 +1438,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
return group;
}
-EXPORT_SYMBOL(iommu_group_get_for_dev);
+EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
@@ -1652,9 +1662,6 @@ static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
int ret;
- if ((domain->ops->is_attach_deferred != NULL) &&
- domain->ops->is_attach_deferred(domain, dev))
- return 0;
if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV;
@@ -1726,8 +1733,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- if ((domain->ops->is_attach_deferred != NULL) &&
- domain->ops->is_attach_deferred(domain, dev))
+ if (iommu_is_attach_deferred(domain, dev))
return;
if (unlikely(domain->ops->detach_dev == NULL))
@@ -2405,7 +2411,11 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
if (fwspec)
return ops == fwspec->ops ? 0 : -EINVAL;
- fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!dev_iommu_get(dev))
+ return -ENOMEM;
+
+ /* Preallocate for the overwhelmingly common case of 1 ID */
+ fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
if (!fwspec)
return -ENOMEM;
@@ -2432,15 +2442,15 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_free);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- size_t size;
- int i;
+ int i, new_num;
if (!fwspec)
return -EINVAL;
- size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
- if (size > sizeof(*fwspec)) {
- fwspec = krealloc(fwspec, size, GFP_KERNEL);
+ new_num = fwspec->num_ids + num_ids;
+ if (new_num > 1) {
+ fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
+ GFP_KERNEL);
if (!fwspec)
return -ENOMEM;
@@ -2450,7 +2460,7 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
for (i = 0; i < num_ids; i++)
fwspec->ids[fwspec->num_ids + i] = ids[i];
- fwspec->num_ids += num_ids;
+ fwspec->num_ids = new_num;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ecb3f9464dd5..310cf09feea3 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -89,9 +89,7 @@ static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- return fwspec ? fwspec->iommu_priv : NULL;
+ return dev_iommu_priv_get(dev);
}
#define TLB_LOOP_TIMEOUT 100 /* 100us */
@@ -727,14 +725,13 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
static int ipmmu_init_platform_device(struct device *dev,
struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct platform_device *ipmmu_pdev;
ipmmu_pdev = of_find_device_by_node(args->np);
if (!ipmmu_pdev)
return -ENODEV;
- fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
+ dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
return 0;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 95945f467c03..5f4d6df59cf6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -358,8 +358,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
if (!data)
return -ENODEV;
@@ -378,7 +378,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
if (!data)
return;
@@ -450,7 +450,7 @@ static int mtk_iommu_add_device(struct device *dev)
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
iommu_device_link(&data->iommu, dev);
group = iommu_group_get_for_dev(dev);
@@ -469,7 +469,7 @@ static void mtk_iommu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
iommu_device_unlink(&data->iommu, dev);
iommu_group_remove_device(dev);
@@ -496,7 +496,6 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct platform_device *m4updev;
if (args->args_count != 1) {
@@ -505,13 +504,13 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return -EINVAL;
}
- if (!fwspec->iommu_priv) {
+ if (!dev_iommu_priv_get(dev)) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- fwspec->iommu_priv = platform_get_drvdata(m4updev);
+ dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
}
return iommu_fwspec_add_ids(dev, args->args, 1);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index e93b94ecac45..a31be05601c9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -263,8 +263,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
int ret;
if (!data)
@@ -286,7 +286,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
if (!data)
return;
@@ -387,20 +387,20 @@ static int mtk_iommu_create_mapping(struct device *dev,
return -EINVAL;
}
- if (!fwspec->iommu_priv) {
+ if (!dev_iommu_priv_get(dev)) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- fwspec->iommu_priv = platform_get_drvdata(m4updev);
+ dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
}
ret = iommu_fwspec_add_ids(dev, args->args, 1);
if (ret)
return ret;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
m4udev = data->dev;
mtk_mapping = m4udev->archdata.iommu;
if (!mtk_mapping) {
@@ -459,7 +459,7 @@ static int mtk_iommu_add_device(struct device *dev)
if (err)
return err;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
mtk_mapping = data->dev->archdata.iommu;
err = arm_iommu_attach_device(dev, mtk_mapping);
if (err) {
@@ -478,7 +478,7 @@ static void mtk_iommu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
iommu_device_unlink(&data->iommu, dev);
iommu_group_remove_device(dev);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index be551cc34be4..887fefcb03b4 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -167,7 +167,7 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
- if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
+ if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
@@ -434,7 +434,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
bytes = iopgsz_to_bytes(cr.cam & 3);
if ((start <= da) && (da < start + bytes)) {
- dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
+ dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
__func__, start, da, bytes);
iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
@@ -1352,11 +1352,11 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
- dev_err(dev, "invalid size to map: %d\n", bytes);
+ dev_err(dev, "invalid size to map: %zu\n", bytes);
return -EINVAL;
}
- dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
+ dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
iotlb_init_entry(&e, da, pa, omap_pgsz);
@@ -1393,7 +1393,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t bytes = 0;
int i;
- dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
+ dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
iommu = omap_domain->iommus;
for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index 1a4adb59a859..51d74002cc30 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -63,7 +63,8 @@
*
* va to pa translation
*/
-static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
+static inline phys_addr_t omap_iommu_translate(unsigned long d, dma_addr_t va,
+ dma_addr_t mask)
{
return (d & mask) | (va & (~mask));
}
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 4328da0b0a9f..5b3b270972f8 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -48,7 +48,7 @@ struct qcom_iommu_dev {
void __iomem *local_base;
u32 sec_id;
u8 num_ctxs;
- struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */
+ struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
};
struct qcom_iommu_ctx {
@@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
static const struct iommu_ops qcom_iommu_ops;
-static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec)
+static struct qcom_iommu_dev * to_iommu(struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
if (!fwspec || fwspec->ops != &qcom_iommu_ops)
return NULL;
- return fwspec->iommu_priv;
+
+ return dev_iommu_priv_get(dev);
}
-static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid)
+static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
if (!qcom_iommu)
return NULL;
return qcom_iommu->ctxs[asid - 1];
@@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie)
{
- struct iommu_fwspec *fwspec = cookie;
+ struct iommu_fwspec *fwspec;
+ struct device *dev = cookie;
unsigned i;
+ fwspec = dev_iommu_fwspec_get(dev);
+
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
unsigned int val, ret;
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie)
{
- struct iommu_fwspec *fwspec = cookie;
+ struct device *dev = cookie;
+ struct iommu_fwspec *fwspec;
unsigned i;
+ fwspec = dev_iommu_fwspec_get(dev);
+
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
}
@@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
- struct iommu_fwspec *fwspec = cookie;
+ struct device *dev = cookie;
+ struct iommu_fwspec *fwspec;
unsigned i, reg;
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+ fwspec = dev_iommu_fwspec_get(dev);
+
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
size_t s = size;
iova = (iova >> 12) << 12;
@@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
static int qcom_iommu_init_domain(struct iommu_domain *domain,
struct qcom_iommu_dev *qcom_iommu,
- struct iommu_fwspec *fwspec)
+ struct device *dev)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
int i, ret = 0;
@@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
};
qcom_domain->iommu = qcom_iommu;
- pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec);
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
if (!pgtbl_ops) {
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
ret = -ENOMEM;
@@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
if (!ctx->secure_init) {
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
int ret;
@@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
/* Ensure that the domain is finalized */
pm_runtime_get_sync(qcom_iommu->dev);
- ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec);
+ ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
pm_runtime_put_sync(qcom_iommu->dev);
if (ret < 0)
return ret;
@@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
unsigned i;
if (WARN_ON(!qcom_domain->iommu))
@@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
@@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap)
static int qcom_iommu_add_device(struct device *dev)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct iommu_group *group;
struct device_link *link;
@@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev)
static void qcom_iommu_remove_device(struct device *dev)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
if (!qcom_iommu)
return;
@@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev)
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct qcom_iommu_dev *qcom_iommu;
struct platform_device *iommu_pdev;
unsigned asid = args->args[0];
@@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
WARN_ON(asid > qcom_iommu->num_ctxs))
return -EINVAL;
- if (!fwspec->iommu_priv) {
- fwspec->iommu_priv = qcom_iommu;
+ if (!dev_iommu_priv_get(dev)) {
+ dev_iommu_priv_set(dev, qcom_iommu);
} else {
/* make sure devices iommus dt node isn't referring to
* multiple different iommu devices. Multiple context
* banks are ok, but multiple devices are not:
*/
- if (WARN_ON(qcom_iommu != fwspec->iommu_priv))
+ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
return -EINVAL;
}
@@ -813,8 +824,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
qcom_iommu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
+ if (res) {
qcom_iommu->local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qcom_iommu->local_base))
+ return PTR_ERR(qcom_iommu->local_base);
+ }
qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
if (IS_ERR(qcom_iommu->iface_clk)) {
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 3fb7ba72507d..db6559e8336f 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -247,7 +247,7 @@ static int gart_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
- if (!dev->iommu_fwspec)
+ if (!dev_iommu_fwspec_get(dev))
return -ENODEV;
group = iommu_group_get_for_dev(dev);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index cce329d71fba..4e1d11af23c8 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -453,7 +453,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
if (!region)
return -ENOMEM;
- list_add(&vdev->resv_regions, &region->list);
+ list_add(&region->list, &vdev->resv_regions);
return 0;
}
@@ -466,7 +466,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
struct virtio_iommu_req_probe *probe;
struct virtio_iommu_probe_property *prop;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = fwspec->iommu_priv;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
if (!fwspec->num_ids)
return -EINVAL;
@@ -607,24 +607,36 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
return &vdomain->domain;
}
-static int viommu_domain_finalise(struct viommu_dev *viommu,
+static int viommu_domain_finalise(struct viommu_endpoint *vdev,
struct iommu_domain *domain)
{
int ret;
+ unsigned long viommu_page_size;
+ struct viommu_dev *viommu = vdev->viommu;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- vdomain->viommu = viommu;
- vdomain->map_flags = viommu->map_flags;
+ viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
+ if (viommu_page_size > PAGE_SIZE) {
+ dev_err(vdev->dev,
+ "granule 0x%lx larger than system page size 0x%lx\n",
+ viommu_page_size, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ vdomain->id = (unsigned int)ret;
domain->pgsize_bitmap = viommu->pgsize_bitmap;
domain->geometry = viommu->geometry;
- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
- viommu->last_domain, GFP_KERNEL);
- if (ret >= 0)
- vdomain->id = (unsigned int)ret;
+ vdomain->map_flags = viommu->map_flags;
+ vdomain->viommu = viommu;
- return ret > 0 ? 0 : ret;
+ return 0;
}
static void viommu_domain_free(struct iommu_domain *domain)
@@ -648,7 +660,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
int ret = 0;
struct virtio_iommu_req_attach req;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = fwspec->iommu_priv;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
struct viommu_domain *vdomain = to_viommu_domain(domain);
mutex_lock(&vdomain->mutex);
@@ -657,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Properly initialize the domain now that we know which viommu
* owns it.
*/
- ret = viommu_domain_finalise(vdev->viommu, domain);
+ ret = viommu_domain_finalise(vdev, domain);
} else if (vdomain->viommu != vdev->viommu) {
dev_err(dev, "cannot attach to foreign vIOMMU\n");
ret = -EXDEV;
@@ -807,8 +819,7 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = fwspec->iommu_priv;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
list_for_each_entry(entry, &vdev->resv_regions, list) {
@@ -876,7 +887,7 @@ static int viommu_add_device(struct device *dev)
vdev->dev = dev;
vdev->viommu = viommu;
INIT_LIST_HEAD(&vdev->resv_regions);
- fwspec->iommu_priv = vdev;
+ dev_iommu_priv_set(dev, vdev);
if (viommu->probe_size) {
/* Get additional information for this endpoint */
@@ -920,7 +931,7 @@ static void viommu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &viommu_ops)
return;
- vdev = fwspec->iommu_priv;
+ vdev = dev_iommu_priv_get(dev);
iommu_group_remove_device(dev);
iommu_device_unlink(&vdev->viommu->iommu, dev);
@@ -1082,7 +1093,6 @@ static int viommu_probe(struct virtio_device *vdev)
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &viommu_ops) {
- pci_request_acs();
ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
if (ret)
goto err_unregister;
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index 23445ebfda5c..ec71063fff76 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -306,6 +306,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to map driver user space!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
+ res = -ENOMEM;
goto out_release_mem8_space;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 6d397732138d..a85aada04a64 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -458,7 +458,7 @@ config IMX_IRQSTEER
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
config IMX_INTMUX
- def_bool y if ARCH_MXC
+ def_bool y if ARCH_MXC || COMPILE_TEST
select IRQ_DOMAIN
help
Support for the i.MX INTMUX interrupt multiplexer.
@@ -513,4 +513,23 @@ config EXYNOS_IRQ_COMBINER
Say yes here to add support for the IRQ combiner devices embedded
in Samsung Exynos chips.
+config LOONGSON_LIOINTC
+ bool "Loongson Local I/O Interrupt Controller"
+ depends on MACH_LOONGSON64
+ default y
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson Local I/O Interrupt Controller.
+
+config LOONGSON_HTPIC
+ bool "Loongson3 HyperTransport PIC Controller"
+ depends on MACH_LOONGSON64
+ default y
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+ select I8259
+ help
+ Support for the Loongson-3 HyperTransport PIC Controller.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index eae0d78cbf22..37bbe39bf909 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -105,3 +105,5 @@ obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
+obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
+obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index bb1ad451392f..2c999dc310c1 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d)
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
irq_gc_unlock(gc);
- return 0;
+ return 1;
}
static int aic_set_type(struct irq_data *d, unsigned type)
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 29333497ba10..fc1b3a9cdafc 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d)
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
irq_gc_unlock(bgc);
- return 0;
+ return 1;
}
static int aic5_set_type(struct irq_data *d, unsigned type)
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 418245d31921..a1e004af23e7 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -61,6 +61,7 @@
| SHORTCUT1_MASK | SHORTCUT2_MASK)
#define REG_FIQ_CONTROL 0x0c
+#define FIQ_CONTROL_ENABLE BIT(7)
#define NR_BANKS 3
#define IRQS_PER_BANK 32
@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node,
{
void __iomem *base;
int irq, b, i;
+ u32 reg;
base = of_iomap(node, 0);
if (!base)
@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node,
handle_level_irq);
irq_set_probe(irq);
}
+
+ reg = readl_relaxed(intc.enable[b]);
+ if (reg) {
+ writel_relaxed(reg, intc.disable[b]);
+ pr_err(FW_BUG "Bootloader left irq enabled: "
+ "bank %d irq %*pbl\n", b, IRQS_PER_BANK, &reg);
+ }
+ }
+
+ reg = readl_relaxed(base + REG_FIQ_CONTROL);
+ if (reg & FIQ_CONTROL_ENABLE) {
+ writel_relaxed(0, base + REG_FIQ_CONTROL);
+ pr_err(FW_BUG "Bootloader left fiq enabled\n");
}
if (is_2836) {
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index cbf01afcd2a6..fd7c537fb42a 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -50,7 +50,7 @@ struct bcm7038_l1_chip {
struct bcm7038_l1_cpu {
void __iomem *map_base;
- u32 mask_cache[0];
+ u32 mask_cache[];
};
/*
@@ -416,7 +416,7 @@ static const struct irq_domain_ops bcm7038_l1_domain_ops = {
.map = bcm7038_l1_map,
};
-int __init bcm7038_l1_of_init(struct device_node *dn,
+static int __init bcm7038_l1_of_init(struct device_node *dn,
struct device_node *parent)
{
struct bcm7038_l1_chip *intc;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 83b1186ffcad..124251b0ccba 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -14,6 +14,7 @@
#include <linux/dma-iommu.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/log2.h>
@@ -96,6 +97,7 @@ struct its_node {
struct mutex dev_alloc_lock;
struct list_head entry;
void __iomem *base;
+ void __iomem *sgir_base;
phys_addr_t phys_base;
struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write;
@@ -188,6 +190,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+/*
+ * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
+ * always have vSGIs mapped.
+ */
+static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
+{
+ return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
+}
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -197,7 +208,7 @@ static u16 get_its_list(struct its_vm *vm)
if (!is_v4(its))
continue;
- if (vm->vlpi_count[its->list_nr])
+ if (require_its_list_vmovp(vm, its))
__set_bit(its->list_nr, &its_list);
}
@@ -239,15 +250,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
return NULL;
}
-static int irq_to_cpuid(struct irq_data *d)
+static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
+{
+ raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
+ return vpe->col_idx;
+}
+
+static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
+}
+
+static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
{
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map = get_vlpi_map(d);
+ int cpu;
- if (map)
- return map->vpe->col_idx;
+ if (map) {
+ cpu = vpe_to_cpuid_lock(map->vpe, flags);
+ } else {
+ /* Physical LPIs are already locked via the irq_desc lock */
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ cpu = its_dev->event_map.col_map[its_get_event_id(d)];
+ /* Keep GCC quiet... */
+ *flags = 0;
+ }
- return its_dev->event_map.col_map[its_get_event_id(d)];
+ return cpu;
+}
+
+static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
+{
+ struct its_vlpi_map *map = get_vlpi_map(d);
+
+ if (map)
+ vpe_to_cpuid_unlock(map->vpe, flags);
}
static struct its_collection *valid_col(struct its_collection *col)
@@ -353,6 +390,15 @@ struct its_cmd_desc {
struct {
struct its_vpe *vpe;
} its_invdb_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ u8 sgi;
+ u8 priority;
+ bool enable;
+ bool group;
+ bool clear;
+ } its_vsgi_cmd;
};
};
@@ -501,6 +547,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db)
its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
}
+static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
+{
+ its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
+}
+
+static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
+{
+ its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
+}
+
+static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
+{
+ its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
+}
+
+static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
+{
+ its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
+}
+
+static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
+{
+ its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
+}
+
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
@@ -866,6 +937,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
return valid_vpe(its, desc->its_invdb_cmd.vpe);
}
+static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ if (WARN_ON(!is_v4_1(its)))
+ return NULL;
+
+ its_encode_cmd(cmd, GITS_CMD_VSGI);
+ its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
+ its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
+ its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
+ its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
+ its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
+ its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vsgi_cmd.vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -1214,7 +1305,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
if (!is_v4(its))
continue;
- if (!vpe->its_vm->vlpi_count[its->list_nr])
+ if (!require_its_list_vmovp(vpe->its_vm, its))
continue;
desc.its_vmovp_cmd.col = &its->collections[col_id];
@@ -1321,7 +1412,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
static void wait_for_syncr(void __iomem *rdbase)
{
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
cpu_relax();
}
@@ -1329,7 +1420,9 @@ static void direct_lpi_inv(struct irq_data *d)
{
struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase;
+ unsigned long flags;
u64 val;
+ int cpu;
if (map) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1344,10 +1437,14 @@ static void direct_lpi_inv(struct irq_data *d)
}
/* Target the redistributor this LPI is currently routed to */
- rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
+ cpu = irq_to_cpuid_lock(d, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ irq_to_cpuid_unlock(d, flags);
}
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
@@ -1499,12 +1596,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return 0;
}
+/*
+ * Two favourable cases:
+ *
+ * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
+ * for vSGI delivery
+ *
+ * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
+ * and we're better off mapping all VPEs always
+ *
+ * If neither (a) nor (b) is true, then we map vPEs on demand.
+ *
+ */
+static bool gic_requires_eager_mapping(void)
+{
+ if (!its_list_map || gic_rdists->has_rvpeid)
+ return true;
+
+ return false;
+}
+
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
unsigned long flags;
- /* Not using the ITS list? Everything is always mapped. */
- if (!its_list_map)
+ if (gic_requires_eager_mapping())
return;
raw_spin_lock_irqsave(&vmovp_lock, flags);
@@ -1538,7 +1654,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
unsigned long flags;
/* Not using the ITS list? Everything is always mapped. */
- if (!its_list_map)
+ if (gic_requires_eager_mapping())
return;
raw_spin_lock_irqsave(&vmovp_lock, flags);
@@ -2036,18 +2152,17 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
}
static int its_setup_baser(struct its_node *its, struct its_baser *baser,
- u64 cache, u64 shr, u32 psz, u32 order,
- bool indirect)
+ u64 cache, u64 shr, u32 order, bool indirect)
{
u64 val = its_read_baser(its, baser);
u64 esz = GITS_BASER_ENTRY_SIZE(val);
u64 type = GITS_BASER_TYPE(val);
u64 baser_phys, tmp;
- u32 alloc_pages;
+ u32 alloc_pages, psz;
struct page *page;
void *base;
-retry_alloc_baser:
+ psz = baser->psz;
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
@@ -2120,25 +2235,6 @@ retry_baser:
goto retry_baser;
}
- if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
- /*
- * Page size didn't stick. Let's try a smaller
- * size and retry. If we reach 4K, then
- * something is horribly wrong...
- */
- free_pages((unsigned long)base, order);
- baser->base = NULL;
-
- switch (psz) {
- case SZ_16K:
- psz = SZ_4K;
- goto retry_alloc_baser;
- case SZ_64K:
- psz = SZ_16K;
- goto retry_alloc_baser;
- }
- }
-
if (val != tmp) {
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
@@ -2164,13 +2260,14 @@ retry_baser:
static bool its_parse_indirect_baser(struct its_node *its,
struct its_baser *baser,
- u32 psz, u32 *order, u32 ids)
+ u32 *order, u32 ids)
{
u64 tmp = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(tmp);
u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
u32 new_order = *order;
+ u32 psz = baser->psz;
bool indirect = false;
/* No need to enable Indirection if memory requirement < (psz*2)bytes */
@@ -2288,11 +2385,58 @@ static void its_free_tables(struct its_node *its)
}
}
+static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
+{
+ u64 psz = SZ_64K;
+
+ while (psz) {
+ u64 val, gpsz;
+
+ val = its_read_baser(its, baser);
+ val &= ~GITS_BASER_PAGE_SIZE_MASK;
+
+ switch (psz) {
+ case SZ_64K:
+ gpsz = GITS_BASER_PAGE_SIZE_64K;
+ break;
+ case SZ_16K:
+ gpsz = GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_4K:
+ default:
+ gpsz = GITS_BASER_PAGE_SIZE_4K;
+ break;
+ }
+
+ gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
+
+ val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
+ its_write_baser(its, baser, val);
+
+ if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
+ break;
+
+ switch (psz) {
+ case SZ_64K:
+ psz = SZ_16K;
+ break;
+ case SZ_16K:
+ psz = SZ_4K;
+ break;
+ case SZ_4K:
+ default:
+ return -1;
+ }
+ }
+
+ baser->psz = psz;
+ return 0;
+}
+
static int its_alloc_tables(struct its_node *its)
{
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_RaWaWb;
- u32 psz = SZ_64K;
int err, i;
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
@@ -2303,16 +2447,22 @@ static int its_alloc_tables(struct its_node *its)
struct its_baser *baser = its->tables + i;
u64 val = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(val);
- u32 order = get_order(psz);
bool indirect = false;
+ u32 order;
- switch (type) {
- case GITS_BASER_TYPE_NONE:
+ if (type == GITS_BASER_TYPE_NONE)
continue;
+ if (its_probe_baser_psz(its, baser)) {
+ its_free_tables(its);
+ return -ENXIO;
+ }
+
+ order = get_order(baser->psz);
+
+ switch (type) {
case GITS_BASER_TYPE_DEVICE:
- indirect = its_parse_indirect_baser(its, baser,
- psz, &order,
+ indirect = its_parse_indirect_baser(its, baser, &order,
device_ids(its));
break;
@@ -2328,20 +2478,18 @@ static int its_alloc_tables(struct its_node *its)
}
}
- indirect = its_parse_indirect_baser(its, baser,
- psz, &order,
+ indirect = its_parse_indirect_baser(its, baser, &order,
ITS_MAX_VPEID_BITS);
break;
}
- err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
+ err = its_setup_baser(its, baser, cache, shr, order, indirect);
if (err < 0) {
its_free_tables(its);
return err;
}
/* Update settings which will be used for next BASERn */
- psz = baser->psz;
cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
}
@@ -2452,6 +2600,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
if (!gic_rdists->has_rvpeid)
return true;
+ /* Skip non-present CPUs */
+ if (!base)
+ return true;
+
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
@@ -3482,17 +3634,25 @@ static int its_vpe_set_affinity(struct irq_data *d,
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
int from, cpu = cpumask_first(mask_val);
+ unsigned long flags;
/*
* Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped
* into the proxy device, we need to move the doorbell
* interrupt to its new location.
+ *
+ * Another thing is that changing the affinity of a vPE affects
+ * *other interrupts* such as all the vLPIs that are routed to
+ * this vPE. This means that the irq_desc lock is not enough to
+ * protect us, and that we must ensure nobody samples vpe->col_idx
+ * during the update, hence the lock below which must also be
+ * taken on any vLPI handling path that evaluates vpe->col_idx.
*/
- if (vpe->col_idx == cpu)
+ from = vpe_to_cpuid_lock(vpe, &flags);
+ if (from == cpu)
goto out;
- from = vpe->col_idx;
vpe->col_idx = cpu;
/*
@@ -3508,10 +3668,25 @@ static int its_vpe_set_affinity(struct irq_data *d,
out:
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ vpe_to_cpuid_unlock(vpe, flags);
return IRQ_SET_MASK_OK_DONE;
}
+static void its_wait_vpt_parse_complete(void)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ if (!gic_rdists->has_vpend_valid_dirty)
+ return;
+
+ WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
+ val,
+ !(val & GICR_VPENDBASER_Dirty),
+ 10, 500));
+}
+
static void its_vpe_schedule(struct its_vpe *vpe)
{
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@@ -3528,7 +3703,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
val |= GICR_VPENDBASER_RaWaWb;
- val |= GICR_VPENDBASER_NonShareable;
+ val |= GICR_VPENDBASER_InnerShareable;
/*
* There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very
@@ -3542,6 +3717,8 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid;
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ its_wait_vpt_parse_complete();
}
static void its_vpe_deschedule(struct its_vpe *vpe)
@@ -3619,9 +3796,11 @@ static void its_vpe_send_inv(struct irq_data *d)
void __iomem *rdbase;
/* Target the redistributor this VPE is currently known on */
+ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
+ raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
} else {
its_vpe_send_cmd(vpe, its_send_inv);
}
@@ -3675,12 +3854,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
return 0;
}
+static int its_vpe_retrigger(struct irq_data *d)
+{
+ return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
+}
+
static struct irq_chip its_vpe_irq_chip = {
.name = "GICv4-vpe",
.irq_mask = its_vpe_mask_irq,
.irq_unmask = its_vpe_unmask_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_vpe_set_affinity,
+ .irq_retrigger = its_vpe_retrigger,
.irq_set_irqchip_state = its_vpe_set_irqchip_state,
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};
@@ -3742,6 +3927,8 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ its_wait_vpt_parse_complete();
}
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
@@ -3782,8 +3969,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe)
val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
/* Target the redistributor this vPE is currently known on */
+ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVALLR);
+
+ wait_for_syncr(rdbase);
+ raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
}
static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
@@ -3818,6 +4009,222 @@ static struct irq_chip its_vpe_4_1_irq_chip = {
.irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
};
+static void its_configure_sgi(struct irq_data *d, bool clear)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_desc desc;
+
+ desc.its_vsgi_cmd.vpe = vpe;
+ desc.its_vsgi_cmd.sgi = d->hwirq;
+ desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
+ desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
+ desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
+ desc.its_vsgi_cmd.clear = clear;
+
+ /*
+ * GICv4.1 allows us to send VSGI commands to any ITS as long as the
+ * destination VPE is mapped there. Since we map them eagerly at
+ * activation time, we're pretty sure the first GICv4.1 ITS will do.
+ */
+ its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
+}
+
+static void its_sgi_mask_irq(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ vpe->sgi_config[d->hwirq].enabled = false;
+ its_configure_sgi(d, false);
+}
+
+static void its_sgi_unmask_irq(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ vpe->sgi_config[d->hwirq].enabled = true;
+ its_configure_sgi(d, false);
+}
+
+static int its_sgi_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ /*
+ * There is no notion of affinity for virtual SGIs, at least
+ * not on the host (since they can only be targetting a vPE).
+ * Tell the kernel we've done whatever it asked for.
+ */
+ irq_data_update_effective_affinity(d, mask_val);
+ return IRQ_SET_MASK_OK;
+}
+
+static int its_sgi_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (state) {
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its = find_4_1_its();
+ u64 val;
+
+ val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
+ val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
+ writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
+ } else {
+ its_configure_sgi(d, true);
+ }
+
+ return 0;
+}
+
+static int its_sgi_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool *val)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ void __iomem *base;
+ unsigned long flags;
+ u32 count = 1000000; /* 1s! */
+ u32 status;
+ int cpu;
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ /*
+ * Locking galore! We can race against two different events:
+ *
+ * - Concurent vPE affinity change: we must make sure it cannot
+ * happen, or we'll talk to the wrong redistributor. This is
+ * identical to what happens with vLPIs.
+ *
+ * - Concurrent VSGIPENDR access: As it involves accessing two
+ * MMIO registers, this must be made atomic one way or another.
+ */
+ cpu = vpe_to_cpuid_lock(vpe, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
+ writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
+ do {
+ status = readl_relaxed(base + GICR_VSGIPENDR);
+ if (!(status & GICR_VSGIPENDR_BUSY))
+ goto out;
+
+ count--;
+ if (!count) {
+ pr_err_ratelimited("Unable to get SGI status\n");
+ goto out;
+ }
+ cpu_relax();
+ udelay(1);
+ } while (count);
+
+out:
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ vpe_to_cpuid_unlock(vpe, flags);
+
+ if (!count)
+ return -ENXIO;
+
+ *val = !!(status & (1 << d->hwirq));
+
+ return 0;
+}
+
+static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case PROP_UPDATE_VSGI:
+ vpe->sgi_config[d->hwirq].priority = info->priority;
+ vpe->sgi_config[d->hwirq].group = info->group;
+ its_configure_sgi(d, false);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_sgi_irq_chip = {
+ .name = "GICv4.1-sgi",
+ .irq_mask = its_sgi_mask_irq,
+ .irq_unmask = its_sgi_unmask_irq,
+ .irq_set_affinity = its_sgi_set_affinity,
+ .irq_set_irqchip_state = its_sgi_set_irqchip_state,
+ .irq_get_irqchip_state = its_sgi_get_irqchip_state,
+ .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
+};
+
+static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct its_vpe *vpe = args;
+ int i;
+
+ /* Yes, we do want 16 SGIs */
+ WARN_ON(nr_irqs != 16);
+
+ for (i = 0; i < 16; i++) {
+ vpe->sgi_config[i].priority = 0;
+ vpe->sgi_config[i].enabled = false;
+ vpe->sgi_config[i].group = false;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+ &its_sgi_irq_chip, vpe);
+ irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
+ }
+
+ return 0;
+}
+
+static void its_sgi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* Nothing to do */
+}
+
+static int its_sgi_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool reserve)
+{
+ /* Write out the initial SGI configuration */
+ its_configure_sgi(d, false);
+ return 0;
+}
+
+static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ /*
+ * The VSGI command is awkward:
+ *
+ * - To change the configuration, CLEAR must be set to false,
+ * leaving the pending bit unchanged.
+ * - To clear the pending bit, CLEAR must be set to true, leaving
+ * the configuration unchanged.
+ *
+ * You just can't do both at once, hence the two commands below.
+ */
+ vpe->sgi_config[d->hwirq].enabled = false;
+ its_configure_sgi(d, false);
+ its_configure_sgi(d, true);
+}
+
+static const struct irq_domain_ops its_sgi_domain_ops = {
+ .alloc = its_sgi_irq_domain_alloc,
+ .free = its_sgi_irq_domain_free,
+ .activate = its_sgi_irq_domain_activate,
+ .deactivate = its_sgi_irq_domain_deactivate,
+};
+
static int its_vpe_id_alloc(void)
{
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
@@ -3851,6 +4258,7 @@ static int its_vpe_init(struct its_vpe *vpe)
return -ENOMEM;
}
+ raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
if (gic_rdists->has_rvpeid)
@@ -3960,8 +4368,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
- /* If we use the list map, we issue VMAPP on demand... */
- if (its_list_map)
+ /*
+ * If we use the list map, we issue VMAPP on demand... Unless
+ * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
+ * so that VSGIs can work.
+ */
+ if (!gic_requires_eager_mapping())
return 0;
/* Map the VPE to the first possible CPU */
@@ -3987,10 +4399,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct its_node *its;
/*
- * If we use the list map, we unmap the VPE once no VLPIs are
- * associated with the VM.
+ * If we use the list map on GICv4.0, we unmap the VPE once no
+ * VLPIs are associated with the VM.
*/
- if (its_list_map)
+ if (!gic_requires_eager_mapping())
return;
list_for_each_entry(its, &its_nodes, entry) {
@@ -4404,7 +4816,7 @@ static int __init its_probe_one(struct resource *res,
struct page *page;
int err;
- its_base = ioremap(res->start, resource_size(res));
+ its_base = ioremap(res->start, SZ_64K);
if (!its_base) {
pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
return -ENOMEM;
@@ -4455,6 +4867,13 @@ static int __init its_probe_one(struct resource *res,
if (is_v4_1(its)) {
u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+
+ its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
+ if (!its->sgir_base) {
+ err = -ENOMEM;
+ goto out_free_its;
+ }
+
its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
@@ -4468,7 +4887,7 @@ static int __init its_probe_one(struct resource *res,
get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
- goto out_free_its;
+ goto out_unmap_sgir;
}
its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
@@ -4535,6 +4954,9 @@ out_free_tables:
its_free_tables(its);
out_free_cmd:
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+out_unmap_sgir:
+ if (its->sgir_base)
+ iounmap(its->sgir_base);
out_free_its:
kfree(its);
out_unmap:
@@ -4818,6 +5240,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct device_node *of_node;
struct its_node *its;
bool has_v4 = false;
+ bool has_v4_1 = false;
int err;
gic_rdists = rdists;
@@ -4838,12 +5261,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
if (err)
return err;
- list_for_each_entry(its, &its_nodes, entry)
+ list_for_each_entry(its, &its_nodes, entry) {
has_v4 |= is_v4(its);
+ has_v4_1 |= is_v4_1(its);
+ }
+
+ /* Don't bother with inconsistent systems */
+ if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
+ rdists->has_rvpeid = false;
if (has_v4 & rdists->has_vlpis) {
+ const struct irq_domain_ops *sgi_ops;
+
+ if (has_v4_1)
+ sgi_ops = &its_sgi_domain_ops;
+ else
+ sgi_ops = NULL;
+
if (its_init_vpe_domain() ||
- its_init_v4(parent_domain, &its_vpe_domain_ops)) {
+ its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
rdists->has_vlpis = false;
pr_err("ITS: Disabling GICv4 support\n");
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1eec9d4649d5..d7006ef18a0d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -724,6 +724,7 @@ static void __init gic_dist_init(void)
unsigned int i;
u64 affinity;
void __iomem *base = gic_data.dist_base;
+ u32 val;
/* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR);
@@ -756,9 +757,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
+ val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
+ if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
+ pr_info("Enabling SGIs without active state\n");
+ val |= GICD_CTLR_nASSGIreq;
+ }
+
/* Enable distributor with ARE, Group1 */
- writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
- base + GICD_CTLR);
+ writel_relaxed(val, base + GICD_CTLR);
/*
* Set all global interrupts to the boot CPU only. ARE must be
@@ -829,6 +835,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base;
+ raw_spin_lock_init(&gic_data_rdist()->rd_lock);
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset;
@@ -866,6 +873,7 @@ static int __gic_update_rdist_properties(struct redist_region *region,
gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
gic_data.rdists.has_rvpeid);
+ gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
/* Detect non-sensical configurations */
if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
@@ -886,10 +894,11 @@ static void gic_update_rdist_properties(void)
if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
gic_data.ppi_nr = 0;
pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
- pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
- !gic_data.rdists.has_vlpis ? "no " : "",
- !gic_data.rdists.has_direct_lpi ? "no " : "",
- !gic_data.rdists.has_rvpeid ? "no " : "");
+ if (gic_data.rdists.has_vlpis)
+ pr_info("GICv4 features: %s%s%s\n",
+ gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
+ gic_data.rdists.has_rvpeid ? "RVPEID " : "",
+ gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
}
/* Check whether it's single security state view */
@@ -1609,17 +1618,19 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
- irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
+ gic_data.rdists.has_vpend_valid_dirty = true;
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM;
goto out_free;
}
+ irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
+
gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
pr_info("Distributor has %sRange Selector support\n",
gic_data.has_rss ? "" : "no ");
@@ -1785,6 +1796,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
@@ -2100,6 +2112,7 @@ static void __init gic_acpi_setup_kvm_info(void)
}
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 45969927cc81..0c18714ae13e 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -85,6 +85,53 @@
static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
+static const struct irq_domain_ops *sgi_domain_ops;
+
+static bool has_v4_1(void)
+{
+ return !!sgi_domain_ops;
+}
+
+static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
+{
+ char *name;
+ int sgi_base;
+
+ if (!has_v4_1())
+ return 0;
+
+ name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
+ if (!name)
+ goto err;
+
+ vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
+ if (!vpe->fwnode)
+ goto err;
+
+ kfree(name);
+ name = NULL;
+
+ vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
+ sgi_domain_ops, vpe);
+ if (!vpe->sgi_domain)
+ goto err;
+
+ sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
+ NUMA_NO_NODE, vpe,
+ false, NULL);
+ if (sgi_base <= 0)
+ goto err;
+
+ return 0;
+
+err:
+ if (vpe->sgi_domain)
+ irq_domain_remove(vpe->sgi_domain);
+ if (vpe->fwnode)
+ irq_domain_free_fwnode(vpe->fwnode);
+ kfree(name);
+ return -ENOMEM;
+}
int its_alloc_vcpu_irqs(struct its_vm *vm)
{
@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if (vpe_base_irq <= 0)
goto err;
- for (i = 0; i < vm->nr_vpes; i++)
+ for (i = 0; i < vm->nr_vpes; i++) {
+ int ret;
vm->vpes[i]->irq = vpe_base_irq + i;
+ ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
+ if (ret)
+ goto err;
+ }
return 0;
@@ -126,8 +178,28 @@ err:
return -ENOMEM;
}
+static void its_free_sgi_irqs(struct its_vm *vm)
+{
+ int i;
+
+ if (!has_v4_1())
+ return;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
+
+ if (WARN_ON(!irq))
+ continue;
+
+ irq_domain_free_irqs(irq, 16);
+ irq_domain_remove(vm->vpes[i]->sgi_domain);
+ irq_domain_free_fwnode(vm->vpes[i]->fwnode);
+ }
+}
+
void its_free_vcpu_irqs(struct its_vm *vm)
{
+ its_free_sgi_irqs(vm);
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode);
@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info);
}
-int its_schedule_vpe(struct its_vpe *vpe, bool on)
+int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
+{
+ struct irq_desc *desc = irq_to_desc(vpe->irq);
+ struct its_cmd_info info = { };
+ int ret;
+
+ WARN_ON(preemptible());
+
+ info.cmd_type = DESCHEDULE_VPE;
+ if (has_v4_1()) {
+ /* GICv4.1 can directly deal with doorbells */
+ info.req_db = db;
+ } else {
+ /* Undo the nested disable_irq() calls... */
+ while (db && irqd_irq_disabled(&desc->irq_data))
+ enable_irq(vpe->irq);
+ }
+
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->resident = false;
+
+ return ret;
+}
+
+int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{
- struct its_cmd_info info;
+ struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
- info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
+ info.cmd_type = SCHEDULE_VPE;
+ if (has_v4_1()) {
+ info.g0en = g0en;
+ info.g1en = g1en;
+ } else {
+ /* Disabled the doorbell, as we're about to enter the guest */
+ disable_irq_nosync(vpe->irq);
+ }
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
- vpe->resident = on;
+ vpe->resident = true;
return ret;
}
@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return irq_set_vcpu_affinity(irq, &info);
}
-int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
+int its_prop_update_vsgi(int irq, u8 priority, bool group)
+{
+ struct its_cmd_info info = {
+ .cmd_type = PROP_UPDATE_VSGI,
+ {
+ .priority = priority,
+ .group = group,
+ },
+ };
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_init_v4(struct irq_domain *domain,
+ const struct irq_domain_ops *vpe_ops,
+ const struct irq_domain_ops *sgi_ops)
{
if (domain) {
pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain;
- vpe_domain_ops = ops;
+ vpe_domain_ops = vpe_ops;
+ sgi_domain_ops = sgi_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index d000870d9b6b..b6f6aa7b2862 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
- .handler = no_action,
- .name = "cascade",
- .flags = IRQF_NO_THREAD,
-};
-
static struct resource pic1_io_resource = {
.name = "pic1",
.start = PIC_MASTER_CMD,
@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = {
*/
struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
{
+ /*
+ * PIC_CASCADE_IR is cascade interrupt to second interrupt controller
+ */
+ int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
struct irq_domain *domain;
insert_resource(&ioport_resource, &pic1_io_resource);
@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (!domain)
panic("Failed to add i8259 IRQ domain");
- setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+ if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to register cascade interrupt\n");
register_syscore_ops(&i8259_syscore_ops);
return domain;
}
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
index 6d05cefe9d79..7a7222d4c19c 100644
--- a/drivers/irqchip/irq-ingenic-tcu.c
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -180,3 +180,4 @@ err_free_tcu:
IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
+IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index c5589ee0dfb3..9f3da4260ca6 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data)
return IRQ_HANDLED;
}
-static struct irqaction intc_cascade_action = {
- .handler = intc_cascade,
- .name = "SoC intc cascade interrupt",
-};
-
static int __init ingenic_intc_of_init(struct device_node *node,
unsigned num_chips)
{
@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node,
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
- setup_irq(parent_irq, &intc_cascade_action);
+ if (request_irq(parent_irq, intc_cascade, 0,
+ "SoC intc cascade interrupt", NULL))
+ pr_err("Failed to register SoC intc cascade interrupt\n");
return 0;
out_domain_remove:
diff --git a/drivers/irqchip/irq-loongson-htpic.c b/drivers/irqchip/irq-loongson-htpic.c
new file mode 100644
index 000000000000..dd018c22ea83
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-htpic.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson HTPIC IRQ support
+ */
+
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/i8259.h>
+
+#define HTPIC_MAX_PARENT_IRQ 4
+#define HTINT_NUM_VECTORS 8
+#define HTINT_EN_OFF 0x20
+
+struct loongson_htpic {
+ void __iomem *base;
+ struct irq_domain *domain;
+};
+
+static struct loongson_htpic *htpic;
+
+static void htpic_irq_dispatch(struct irq_desc *desc)
+{
+ struct loongson_htpic *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ uint32_t pending;
+
+ chained_irq_enter(chip, desc);
+ pending = readl(priv->base);
+ /* Ack all IRQs at once, otherwise IRQ flood might happen */
+ writel(pending, priv->base);
+
+ if (!pending)
+ spurious_interrupt();
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ if (unlikely(bit > 15)) {
+ spurious_interrupt();
+ break;
+ }
+
+ generic_handle_irq(irq_linear_revmap(priv->domain, bit));
+ pending &= ~BIT(bit);
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static void htpic_reg_init(void)
+{
+ int i;
+
+ for (i = 0; i < HTINT_NUM_VECTORS; i++) {
+ uint32_t val;
+
+ /* Disable all HT Vectors */
+ writel(0x0, htpic->base + HTINT_EN_OFF + i * 0x4);
+ val = readl(htpic->base + i * 0x4);
+ /* Ack all possible pending IRQs */
+ writel(GENMASK(31, 0), htpic->base + i * 0x4);
+ }
+
+ /* Enable 16 vectors for PIC */
+ writel(0xffff, htpic->base + HTINT_EN_OFF);
+}
+
+static void htpic_resume(void)
+{
+ htpic_reg_init();
+}
+
+struct syscore_ops htpic_syscore_ops = {
+ .resume = htpic_resume,
+};
+
+int __init htpic_of_init(struct device_node *node, struct device_node *parent)
+{
+ unsigned int parent_irq[4];
+ int i, err;
+ int num_parents = 0;
+
+ if (htpic) {
+ pr_err("loongson-htpic: Only one HTPIC is allowed in the system\n");
+ return -ENODEV;
+ }
+
+ htpic = kzalloc(sizeof(*htpic), GFP_KERNEL);
+ if (!htpic) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ htpic->base = of_iomap(node, 0);
+ if (!htpic->base) {
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ htpic->domain = __init_i8259_irqs(node);
+ if (!htpic->domain) {
+ pr_err("loongson-htpic: Failed to initialize i8259 IRQs\n");
+ err = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ /* Interrupt may come from any of the 4 interrupt line */
+ for (i = 0; i < HTPIC_MAX_PARENT_IRQ; i++) {
+ parent_irq[i] = irq_of_parse_and_map(node, i);
+ if (parent_irq[i] <= 0)
+ break;
+
+ num_parents++;
+ }
+
+ if (!num_parents) {
+ pr_err("loongson-htpic: Failed to get parent irqs\n");
+ err = -ENODEV;
+ goto out_remove_domain;
+ }
+
+ htpic_reg_init();
+
+ for (i = 0; i < num_parents; i++) {
+ irq_set_chained_handler_and_data(parent_irq[i],
+ htpic_irq_dispatch, htpic);
+ }
+
+ register_syscore_ops(&htpic_syscore_ops);
+
+ return 0;
+
+out_remove_domain:
+ irq_domain_remove(htpic->domain);
+out_iounmap:
+ iounmap(htpic->base);
+out_free:
+ kfree(htpic);
+ return err;
+}
+
+IRQCHIP_DECLARE(loongson_htpic, "loongson,htpic-1.0", htpic_of_init);
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
new file mode 100644
index 000000000000..63b61474a0cc
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson Local IO Interrupt Controller support
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/irqchip/chained_irq.h>
+
+#include <boot_param.h>
+
+#define LIOINTC_CHIP_IRQ 32
+#define LIOINTC_NUM_PARENT 4
+
+#define LIOINTC_INTC_CHIP_START 0x20
+
+#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
+#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
+#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
+#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
+#define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10)
+#define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14)
+
+#define LIOINTC_SHIFT_INTx 4
+
+#define LIOINTC_ERRATA_IRQ 10
+
+struct liointc_handler_data {
+ struct liointc_priv *priv;
+ u32 parent_int_map;
+};
+
+struct liointc_priv {
+ struct irq_chip_generic *gc;
+ struct liointc_handler_data handler[LIOINTC_NUM_PARENT];
+ u8 map_cache[LIOINTC_CHIP_IRQ];
+ bool has_lpc_irq_errata;
+};
+
+static void liointc_chained_handle_irq(struct irq_desc *desc)
+{
+ struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_chip_generic *gc = handler->priv->gc;
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+
+ pending = readl(gc->reg_base + LIOINTC_REG_INTC_STATUS);
+
+ if (!pending) {
+ /* Always blame LPC IRQ if we have that bug */
+ if (handler->priv->has_lpc_irq_errata &&
+ (handler->parent_int_map & ~gc->mask_cache &
+ BIT(LIOINTC_ERRATA_IRQ)))
+ pending = BIT(LIOINTC_ERRATA_IRQ);
+ else
+ spurious_interrupt();
+ }
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_irq(irq_find_mapping(gc->domain, bit));
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void liointc_set_bit(struct irq_chip_generic *gc,
+ unsigned int offset,
+ u32 mask, bool set)
+{
+ if (set)
+ writel(readl(gc->reg_base + offset) | mask,
+ gc->reg_base + offset);
+ else
+ writel(readl(gc->reg_base + offset) & ~mask,
+ gc->reg_base + offset);
+}
+
+static int liointc_set_type(struct irq_data *data, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ u32 mask = data->mask;
+ unsigned long flags;
+
+ irq_gc_lock_irqsave(gc, flags);
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+ irq_gc_unlock_irqrestore(gc, flags);
+
+ irqd_set_trigger_type(data, type);
+ return 0;
+}
+
+static void liointc_resume(struct irq_chip_generic *gc)
+{
+ struct liointc_priv *priv = gc->private;
+ unsigned long flags;
+ int i;
+
+ irq_gc_lock_irqsave(gc, flags);
+ /* Disable all at first */
+ writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
+ /* Revert map cache */
+ for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
+ writeb(priv->map_cache[i], gc->reg_base + i);
+ /* Revert mask cache */
+ writel(~gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
+ irq_gc_unlock_irqrestore(gc, flags);
+}
+
+static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
+
+int __init liointc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ struct irq_chip_type *ct;
+ struct liointc_priv *priv;
+ void __iomem *base;
+ u32 of_parent_int_map[LIOINTC_NUM_PARENT];
+ int parent_irq[LIOINTC_NUM_PARENT];
+ bool have_parent = FALSE;
+ int sz, i, err = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ err = -ENODEV;
+ goto out_free_priv;
+ }
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
+ parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
+ if (parent_irq[i] > 0)
+ have_parent = TRUE;
+ }
+ if (!have_parent) {
+ err = -ENODEV;
+ goto out_iounmap;
+ }
+
+ sz = of_property_read_variable_u32_array(node,
+ "loongson,parent_int_map",
+ &of_parent_int_map[0],
+ LIOINTC_NUM_PARENT,
+ LIOINTC_NUM_PARENT);
+ if (sz < 4) {
+ pr_err("loongson-liointc: No parent_int_map\n");
+ err = -ENODEV;
+ goto out_iounmap;
+ }
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++)
+ priv->handler[i].parent_int_map = of_parent_int_map[i];
+
+ /* Setup IRQ domain */
+ domain = irq_domain_add_linear(node, 32,
+ &irq_generic_chip_ops, priv);
+ if (!domain) {
+ pr_err("loongson-liointc: cannot add IRQ domain\n");
+ err = -EINVAL;
+ goto out_iounmap;
+ }
+
+ err = irq_alloc_domain_generic_chips(domain, 32, 1,
+ node->full_name, handle_level_irq,
+ IRQ_NOPROBE, 0, 0);
+ if (err) {
+ pr_err("loongson-liointc: unable to register IRQ domain\n");
+ goto out_free_domain;
+ }
+
+
+ /* Disable all IRQs */
+ writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE);
+ /* Set to level triggered */
+ writel(0x0, base + LIOINTC_REG_INTC_EDGE);
+
+ /* Generate parent INT part of map cache */
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
+ u32 pending = priv->handler[i].parent_int_map;
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx;
+ pending &= ~BIT(bit);
+ }
+ }
+
+ for (i = 0; i < LIOINTC_CHIP_IRQ; i++) {
+ /* Generate core part of map cache */
+ priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id);
+ writeb(priv->map_cache[i], base + i);
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->private = priv;
+ gc->reg_base = base;
+ gc->domain = domain;
+ gc->resume = liointc_resume;
+
+ ct = gc->chip_types;
+ ct->regs.enable = LIOINTC_REG_INTC_ENABLE;
+ ct->regs.disable = LIOINTC_REG_INTC_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+ ct->chip.irq_set_type = liointc_set_type;
+
+ gc->mask_cache = 0xffffffff;
+ priv->gc = gc;
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
+ if (parent_irq[i] <= 0)
+ continue;
+
+ priv->handler[i].priv = priv;
+ irq_set_chained_handler_and_data(parent_irq[i],
+ liointc_chained_handle_irq, &priv->handler[i]);
+ }
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(domain);
+out_iounmap:
+ iounmap(base);
+out_free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
+IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 6b566bba263b..ff7627b57772 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -220,10 +220,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
return 0;
}
+static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ platform_msi_domain_free(domain, virq, nr_irqs);
+}
+
static const struct irq_domain_ops mbigen_domain_ops = {
.translate = mbigen_domain_translate,
.alloc = mbigen_irq_domain_alloc,
- .free = irq_domain_free_irqs_common,
+ .free = mbigen_irq_domain_free,
};
static int mbigen_of_create_domain(struct platform_device *pdev,
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index ccc7f823911b..bc7aebcc96e9 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -144,12 +144,17 @@ struct meson_gpio_irq_controller {
static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
unsigned int reg, u32 mask, u32 val)
{
+ unsigned long flags;
u32 tmp;
+ spin_lock_irqsave(&ctl->lock, flags);
+
tmp = readl_relaxed(ctl->base + reg);
tmp &= ~mask;
tmp |= val;
writel_relaxed(tmp, ctl->base + reg);
+
+ spin_unlock_irqrestore(&ctl->lock, flags);
}
static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
@@ -196,14 +201,15 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
unsigned long hwirq,
u32 **channel_hwirq)
{
+ unsigned long flags;
unsigned int idx;
- spin_lock(&ctl->lock);
+ spin_lock_irqsave(&ctl->lock, flags);
/* Find a free channel */
idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
if (idx >= NUM_CHANNEL) {
- spin_unlock(&ctl->lock);
+ spin_unlock_irqrestore(&ctl->lock, flags);
pr_err("No channel available\n");
return -ENOSPC;
}
@@ -211,6 +217,8 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
/* Mark the channel as used */
set_bit(idx, ctl->channel_map);
+ spin_unlock_irqrestore(&ctl->lock, flags);
+
/*
* Setup the mux of the channel to route the signal of the pad
* to the appropriate input of the GIC
@@ -225,8 +233,6 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
*/
*channel_hwirq = &(ctl->channel_irqs[idx]);
- spin_unlock(&ctl->lock);
-
pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
hwirq, idx, **channel_hwirq);
@@ -287,13 +293,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
val |= REG_EDGE_POL_LOW(params, idx);
}
- spin_lock(&ctl->lock);
-
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
REG_EDGE_POL_MASK(params, idx), val);
- spin_unlock(&ctl->lock);
-
return 0;
}
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 547045d89c4b..91adf771f185 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -66,7 +66,7 @@ struct mvebu_icu_irq_data {
unsigned int type;
};
-DEFINE_STATIC_KEY_FALSE(legacy_bindings);
+static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
static void mvebu_icu_init(struct mvebu_icu *icu,
struct mvebu_icu_msi_data *msi_data,
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 6e5e3172796b..3819185bfd02 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
i->iomem = devm_ioremap(dev, io[k]->start,
- resource_size(io[k]));
+ resource_size(io[k]));
if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n");
ret = -ENXIO;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index aa4af886e43a..d0a71febdadc 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -4,6 +4,7 @@
* Copyright (C) 2018 Christoph Hellwig
*/
#define pr_fmt(fmt) "plic: " fmt
+#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -55,7 +56,14 @@
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
-static void __iomem *plic_regs;
+#define PLIC_DISABLE_THRESHOLD 0x7
+#define PLIC_ENABLE_THRESHOLD 0
+
+struct plic_priv {
+ struct cpumask lmask;
+ struct irq_domain *irqdomain;
+ void __iomem *regs;
+};
struct plic_handler {
bool present;
@@ -66,6 +74,7 @@ struct plic_handler {
*/
raw_spinlock_t enable_lock;
void __iomem *enable_base;
+ struct plic_priv *priv;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler,
}
static inline void plic_irq_toggle(const struct cpumask *mask,
- int hwirq, int enable)
+ struct irq_data *d, int enable)
{
int cpu;
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
- writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
- if (handler->present)
- plic_toggle(handler, hwirq, enable);
+ if (handler->present &&
+ cpumask_test_cpu(cpu, &handler->priv->lmask))
+ plic_toggle(handler, d->hwirq, enable);
}
}
static void plic_irq_unmask(struct irq_data *d)
{
- unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
- cpu_online_mask);
+ struct cpumask amask;
+ unsigned int cpu;
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
+
+ cpumask_and(&amask, &priv->lmask, cpu_online_mask);
+ cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ &amask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return;
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ plic_irq_toggle(cpumask_of(cpu), d, 1);
}
static void plic_irq_mask(struct irq_data *d)
{
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
+
+ plic_irq_toggle(&priv->lmask, d, 0);
}
#ifdef CONFIG_SMP
@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
+ struct cpumask amask;
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
+
+ cpumask_and(&amask, &priv->lmask, mask_val);
if (force)
- cpu = cpumask_first(mask_val);
+ cpu = cpumask_first(&amask);
else
- cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ cpu = cpumask_any_and(&amask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ plic_irq_toggle(&priv->lmask, d, 0);
+ plic_irq_toggle(cpumask_of(cpu), d, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.free = irq_domain_free_irqs_top,
};
-static struct irq_domain *plic_irqdomain;
-
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs)
csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
- int irq = irq_find_mapping(plic_irqdomain, hwirq);
+ int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node)
return -1;
}
+static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
+{
+ /* priority must be > threshold to trigger an interrupt */
+ writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
+}
+
+static int plic_dying_cpu(unsigned int cpu)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ csr_clear(CSR_IE, IE_EIE);
+ plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
+
+ return 0;
+}
+
+static int plic_starting_cpu(unsigned int cpu)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ csr_set(CSR_IE, IE_EIE);
+ plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
+
+ return 0;
+}
+
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
+ struct plic_priv *priv;
- if (plic_regs) {
- pr_warn("PLIC already present.\n");
- return -ENXIO;
- }
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- plic_regs = of_iomap(node, 0);
- if (WARN_ON(!plic_regs))
- return -EIO;
+ priv->regs = of_iomap(node, 0);
+ if (WARN_ON(!priv->regs)) {
+ error = -EIO;
+ goto out_free_priv;
+ }
error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node,
goto out_iounmap;
error = -ENOMEM;
- plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, NULL);
- if (WARN_ON(!plic_irqdomain))
+ priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
+ &plic_irqdomain_ops, priv);
+ if (WARN_ON(!priv->irqdomain))
goto out_iounmap;
for (i = 0; i < nr_contexts; i++) {
@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
- u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node,
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
pr_warn("handler already present for context %d.\n", i);
- threshold = 0xffffffff;
+ plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
+ cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
handler->hart_base =
- plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base =
- plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
-
+ priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
+ handler->priv = priv;
done:
- /* priority must be > threshold to trigger an interrupt */
- writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
nr_handlers++;
}
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
out_iounmap:
- iounmap(plic_regs);
+ iounmap(priv->regs);
+out_free_priv:
+ kfree(priv);
return error;
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index e00f2fa27f00..faa8482c8246 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void)
unregister_syscore_ops(&stm32_exti_h_syscore_ops);
}
+static int stm32_exti_h_retrigger(struct irq_data *d)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(mask, base + stm32_bank->swier_ofst);
+
+ return 0;
+}
+
static struct irq_chip stm32_exti_h_chip = {
.name = "stm32-exti-h",
.irq_eoi = stm32_exti_h_eoi,
.irq_mask = stm32_exti_h_mask,
.irq_unmask = stm32_exti_h_unmask,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_retrigger = stm32_exti_h_retrigger,
.irq_set_type = stm32_exti_h_set_type,
.irq_set_wake = stm32_exti_h_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND,
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 8f6e6b08eadf..7e3ebf6ed2cd 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -37,6 +37,7 @@
#define VINT_ENABLE_SET_OFFSET 0x0
#define VINT_ENABLE_CLR_OFFSET 0x8
#define VINT_STATUS_OFFSET 0x18
+#define VINT_STATUS_MASKED_OFFSET 0x20
/**
* struct ti_sci_inta_event_desc - Description of an event coming to
@@ -116,7 +117,7 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_desc_get_chip(desc), desc);
val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
- VINT_STATUS_OFFSET);
+ VINT_STATUS_MASKED_OFFSET);
for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) {
virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 928858dada75..f1386733d3bc 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
- u32 status = readl(f->base + IRQ_STATUS);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+ status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(desc);
- return;
+ goto out;
}
do {
@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
+
+out:
+ chained_irq_exit(chip, desc);
}
/*
@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
+ writel(clear_mask, base + IRQ_ENABLE_CLEAR);
+ writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
- writel(clear_mask, base + IRQ_ENABLE_CLEAR);
- writel(clear_mask, base + FIQ_ENABLE_CLEAR);
-
/*
* On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index f3f20a3cff50..3c87d925f74c 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node,
void __iomem *regs;
u32 interrupt_mask = ~0;
u32 wakeup_mask = ~0;
-
- if (WARN(parent, "non-root VICs are not supported"))
- return -EINVAL;
+ int parent_irq;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node,
of_property_read_u32(node, "valid-mask", &interrupt_mask);
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
+ parent_irq = of_irq_get(node, 0);
+ if (parent_irq < 0)
+ parent_irq = 0;
/*
* Passing 0 as first IRQ makes the simple domain allocate descriptors
*/
- __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
+ __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
return 0;
}
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index e3043ded8973..1d3d273309bd 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -38,29 +38,31 @@ struct xintc_irq_chip {
void __iomem *base;
struct irq_domain *root_domain;
u32 intr_mask;
+ u32 nr_irq;
};
-static struct xintc_irq_chip *xintc_irqc;
+static struct xintc_irq_chip *primary_intc;
-static void xintc_write(int reg, u32 data)
+static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
{
if (static_branch_unlikely(&xintc_is_be))
- iowrite32be(data, xintc_irqc->base + reg);
+ iowrite32be(data, irqc->base + reg);
else
- iowrite32(data, xintc_irqc->base + reg);
+ iowrite32(data, irqc->base + reg);
}
-static unsigned int xintc_read(int reg)
+static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
{
if (static_branch_unlikely(&xintc_is_be))
- return ioread32be(xintc_irqc->base + reg);
+ return ioread32be(irqc->base + reg);
else
- return ioread32(xintc_irqc->base + reg);
+ return ioread32(irqc->base + reg);
}
static void intc_enable_or_unmask(struct irq_data *d)
{
- unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+ unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler
*/
if (irqd_is_level_type(d))
- xintc_write(IAR, mask);
+ xintc_write(irqc, IAR, mask);
- xintc_write(SIE, mask);
+ xintc_write(irqc, SIE, mask);
}
static void intc_disable_or_mask(struct irq_data *d)
{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
- xintc_write(CIE, 1 << d->hwirq);
+ xintc_write(irqc, CIE, BIT(d->hwirq));
}
static void intc_ack(struct irq_data *d)
{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
- xintc_write(IAR, 1 << d->hwirq);
+ xintc_write(irqc, IAR, BIT(d->hwirq));
}
static void intc_mask_ack(struct irq_data *d)
{
- unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+ unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
- xintc_write(CIE, mask);
- xintc_write(IAR, mask);
+ xintc_write(irqc, CIE, mask);
+ xintc_write(irqc, IAR, mask);
}
static struct irq_chip intc_dev = {
@@ -103,13 +110,28 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack,
};
+static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
+{
+ unsigned int irq = 0;
+ u32 hwirq;
+
+ hwirq = xintc_read(irqc, IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(irqc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
+
unsigned int xintc_get_irq(void)
{
- unsigned int hwirq, irq = -1;
+ unsigned int irq = -1;
+ u32 hwirq;
- hwirq = xintc_read(IVR);
+ hwirq = xintc_read(primary_intc, IVR);
if (hwirq != -1U)
- irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+ irq = irq_find_mapping(primary_intc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
@@ -118,15 +140,18 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
- if (xintc_irqc->intr_mask & (1 << hw)) {
+ struct xintc_irq_chip *irqc = d->host_data;
+
+ if (irqc->intr_mask & BIT(hw)) {
irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_edge_irq, "edge");
+ handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {
irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_level_irq, "level");
+ handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
+ irq_set_chip_data(irq, irqc);
return 0;
}
@@ -138,12 +163,14 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xintc_irq_chip *irqc;
u32 pending;
+ irqc = irq_data_get_irq_handler_data(&desc->irq_data);
chained_irq_enter(chip, desc);
do {
- pending = xintc_get_irq();
- if (pending == -1U)
+ pending = xintc_get_irq_local(irqc);
+ if (pending == 0)
break;
generic_handle_irq(pending);
} while (true);
@@ -153,28 +180,19 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
- u32 nr_irq;
- int ret, irq;
struct xintc_irq_chip *irqc;
-
- if (xintc_irqc) {
- pr_err("irq-xilinx: Multiple instances aren't supported\n");
- return -EINVAL;
- }
+ int ret, irq;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc)
return -ENOMEM;
-
- xintc_irqc = irqc;
-
irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base);
- ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
- goto err_alloc;
+ goto error;
}
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
@@ -183,34 +201,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
- if (irqc->intr_mask >> nr_irq)
+ if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
- intc, nr_irq, irqc->intr_mask);
+ intc, irqc->nr_irq, irqc->intr_mask);
/*
* Disable all external interrupts until they are
* explicity requested.
*/
- xintc_write(IER, 0);
+ xintc_write(irqc, IER, 0);
/* Acknowledge any pending interrupts just in case. */
- xintc_write(IAR, 0xffffffff);
+ xintc_write(irqc, IAR, 0xffffffff);
/* Turn on the Master Enable. */
- xintc_write(MER, MER_HIE | MER_ME);
- if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
+ if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
static_branch_enable(&xintc_is_be);
- xintc_write(MER, MER_HIE | MER_ME);
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
}
- irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
- goto err_alloc;
+ ret = -EINVAL;
+ goto error;
}
if (parent) {
@@ -222,16 +241,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else {
pr_err("irq-xilinx: interrupts property not in DT\n");
ret = -EINVAL;
- goto err_alloc;
+ goto error;
}
} else {
- irq_set_default_host(irqc->root_domain);
+ primary_intc = irqc;
+ irq_set_default_host(primary_intc->root_domain);
}
return 0;
-err_alloc:
- xintc_irqc = NULL;
+error:
+ iounmap(irqc->base);
kfree(irqc);
return ret;
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index abfe59284ff2..aa54bfcb0433 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -33,7 +33,7 @@ struct combiner {
int parent_irq;
u32 nirqs;
u32 nregs;
- struct combiner_reg regs[0];
+ struct combiner_reg regs[];
};
static inline int irq_nr(u32 reg, u32 bit)
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index e325e87c0593..11e8c7d8b6e8 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -743,10 +743,10 @@ check_send(struct isar_hw *isar, u8 rdm)
}
}
-const char *dmril[] = {"NO SPEED", "1200/75", "NODEF2", "75/1200", "NODEF4",
+static const char *dmril[] = {"NO SPEED", "1200/75", "NODEF2", "75/1200", "NODEF4",
"300", "600", "1200", "2400", "4800", "7200",
"9600nt", "9600t", "12000", "14400", "WRONG"};
-const char *dmrim[] = {"NO MOD", "NO DEF", "V32/V32b", "V22", "V21",
+static const char *dmrim[] = {"NO MOD", "NO DEF", "V32/V32b", "V22", "V21",
"Bell103", "V23", "Bell202", "V17", "V29", "V27ter"};
static void
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d82f1dea3711..c664d84e1667 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -846,6 +846,17 @@ config LEDS_TPS6105X
It is a single boost converter primarily for white LEDs and
audio amplifiers.
+config LEDS_IP30
+ tristate "LED support for SGI Octane machines"
+ depends on LEDS_CLASS
+ depends on SGI_MFD_IOC3
+ help
+ This option enables support for the Red and White LEDs of
+ SGI Octane machines.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-ip30.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d7e1107753fb..45235d5fb218 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -6,91 +6,92 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o
obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o
obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
-# LED Platform Drivers
+# LED Platform Drivers (keep this sorted, M-| sort)
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
+obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
+obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
-obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
+obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
+obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
+obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
+obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
+obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_CPCAP) += leds-cpcap.o
-obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
+obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
+obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
+obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
+obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
+obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
+obj-$(CONFIG_LEDS_IP30) += leds-ip30.o
+obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o
+obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
+obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
+obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
obj-$(CONFIG_LEDS_LM3532) += leds-lm3532.o
obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
+obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
+obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o
+obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
obj-$(CONFIG_LEDS_LM3642) += leds-lm3642.o
-obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
-obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
-obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
-obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
-obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
-obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
-obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
-obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
-obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
-obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
+obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
+obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP3952) += leds-lp3952.o
-obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
+obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o
-obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
-obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o
-obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
-obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o
-obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
-obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
-obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
-obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
-obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
-obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
-obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
-obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
-obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
-obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
-obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
-obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
-obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
-obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
-obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
-obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
-obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
obj-$(CONFIG_LEDS_MAX77693) += leds-max77693.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
-obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
-obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
-obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
+obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
-obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
-obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
-obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
-obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
-obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
+obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_MLXREG) += leds-mlxreg.o
-obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
-obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o
obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o
-obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
+obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
+obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
+obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
+obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
+obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
+obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
+obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
+obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
+obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
+obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
+obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
+obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
+obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
-obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o
+obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
+obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
+obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o
-obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
-obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
+obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o
obj-$(CONFIG_LEDS_TPS6105X) += leds-tps6105x.o
+obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
+obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
+obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
obj-$(CONFIG_LEDS_EL15203000) += leds-el15203000.o
+obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 1fc40e8af75e..3363a6551a70 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -376,7 +376,7 @@ int led_classdev_register_ext(struct device *parent,
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
- led_cdev->name, dev_name(led_cdev->dev));
+ proposed_name, dev_name(led_cdev->dev));
if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
ret = led_add_brightness_hw_changed(led_cdev);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index bd61a823d0ca..8bbaef5a2986 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -660,7 +660,6 @@ static int bd2802_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bd2802_led *led;
- struct bd2802_led_platform_data *pdata;
int ret, i;
led = devm_kzalloc(&client->dev, sizeof(struct bd2802_led), GFP_KERNEL);
@@ -668,7 +667,6 @@ static int bd2802_probe(struct i2c_client *client,
return -ENOMEM;
led->client = client;
- pdata = led->pdata = dev_get_platdata(&client->dev);
i2c_set_clientdata(client, led);
/*
diff --git a/drivers/leds/leds-ip30.c b/drivers/leds/leds-ip30.c
new file mode 100644
index 000000000000..d4ec7361c616
--- /dev/null
+++ b/drivers/leds/leds-ip30.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LED Driver for SGI Octane machines
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+
+#define IP30_LED_SYSTEM 0
+#define IP30_LED_FAULT 1
+
+struct ip30_led {
+ struct led_classdev cdev;
+ u32 __iomem *reg;
+};
+
+static void ip30led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct ip30_led *led = container_of(led_cdev, struct ip30_led, cdev);
+
+ writel(value, led->reg);
+}
+
+static int ip30led_create(struct platform_device *pdev, int num)
+{
+ struct resource *res;
+ struct ip30_led *data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, num);
+ if (!res)
+ return -EBUSY;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg))
+ return PTR_ERR(data->reg);
+
+
+ switch (num) {
+ case IP30_LED_SYSTEM:
+ data->cdev.name = "white:power";
+ break;
+ case IP30_LED_FAULT:
+ data->cdev.name = "red:fault";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data->cdev.brightness = readl(data->reg);
+ data->cdev.max_brightness = 1;
+ data->cdev.brightness_set = ip30led_set;
+
+ return devm_led_classdev_register(&pdev->dev, &data->cdev);
+}
+
+static int ip30led_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = ip30led_create(pdev, IP30_LED_SYSTEM);
+ if (ret < 0)
+ return ret;
+
+ return ip30led_create(pdev, IP30_LED_FAULT);
+}
+
+static struct platform_driver ip30led_driver = {
+ .probe = ip30led_probe,
+ .driver = {
+ .name = "ip30-leds",
+ },
+};
+
+module_platform_driver(ip30led_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI Octane LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ip30-leds");
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 6f29b8943913..cd768f991da1 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -44,7 +44,7 @@ struct is31fl32xx_priv {
const struct is31fl32xx_chipdef *cdef;
struct i2c_client *client;
unsigned int num_leds;
- struct is31fl32xx_led_data leds[0];
+ struct is31fl32xx_led_data leds[];
};
/**
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 188a57da981a..aa9bf8cda673 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -140,7 +140,7 @@ struct lm3532_led {
int ctrl_brt_pointer;
int num_leds;
int full_scale_current;
- int enabled:1;
+ unsigned int enabled:1;
u32 led_strings[LM3532_MAX_CONTROL_BANKS];
char label[LED_MAX_NAME_SIZE];
};
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index b71711aff8a3..872d26f9706a 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -246,7 +246,7 @@ static int lm3697_probe_dt(struct lm3697 *priv)
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3697_MAX_LED_STRINGS) {
- dev_err(&priv->client->dev, "To many LED strings defined\n");
+ dev_err(&priv->client->dev, "Too many LED strings defined\n");
continue;
}
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 7c500dfdcfa3..538ca5755602 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -12,14 +12,38 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/leds.h>
#include <linux/module.h>
-#include <linux/platform_data/leds-kirkwood-ns2.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include "leds.h"
+enum ns2_led_modes {
+ NS_V2_LED_OFF,
+ NS_V2_LED_ON,
+ NS_V2_LED_SATA,
+};
+
+struct ns2_led_modval {
+ enum ns2_led_modes mode;
+ int cmd_level;
+ int slow_level;
+};
+
+struct ns2_led {
+ const char *name;
+ const char *default_trigger;
+ struct gpio_desc *cmd;
+ struct gpio_desc *slow;
+ int num_modes;
+ struct ns2_led_modval *modval;
+};
+
+struct ns2_led_platform_data {
+ int num_leds;
+ struct ns2_led *leds;
+};
+
/*
* The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED
* modes are available: off, on and SATA activity blinking. The LED modes are
@@ -29,8 +53,8 @@
struct ns2_led_data {
struct led_classdev cdev;
- unsigned int cmd;
- unsigned int slow;
+ struct gpio_desc *cmd;
+ struct gpio_desc *slow;
bool can_sleep;
unsigned char sata; /* True when SATA mode active. */
rwlock_t rw_lock; /* Lock GPIOs. */
@@ -46,8 +70,8 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
int cmd_level;
int slow_level;
- cmd_level = gpio_get_value_cansleep(led_dat->cmd);
- slow_level = gpio_get_value_cansleep(led_dat->slow);
+ cmd_level = gpiod_get_value_cansleep(led_dat->cmd);
+ slow_level = gpiod_get_value_cansleep(led_dat->slow);
for (i = 0; i < led_dat->num_modes; i++) {
if (cmd_level == led_dat->modval[i].cmd_level &&
@@ -80,15 +104,15 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
write_lock_irqsave(&led_dat->rw_lock, flags);
if (!led_dat->can_sleep) {
- gpio_set_value(led_dat->cmd,
- led_dat->modval[i].cmd_level);
- gpio_set_value(led_dat->slow,
- led_dat->modval[i].slow_level);
+ gpiod_set_value(led_dat->cmd,
+ led_dat->modval[i].cmd_level);
+ gpiod_set_value(led_dat->slow,
+ led_dat->modval[i].slow_level);
goto exit_unlock;
}
- gpio_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level);
- gpio_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level);
+ gpiod_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level);
+ gpiod_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level);
exit_unlock:
write_unlock_irqrestore(&led_dat->rw_lock, flags);
@@ -176,26 +200,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
int ret;
enum ns2_led_modes mode;
- ret = devm_gpio_request_one(&pdev->dev, template->cmd,
- gpio_get_value_cansleep(template->cmd) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- template->name);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to setup command GPIO\n",
- template->name);
- return ret;
- }
-
- ret = devm_gpio_request_one(&pdev->dev, template->slow,
- gpio_get_value_cansleep(template->slow) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- template->name);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to setup slow GPIO\n",
- template->name);
- return ret;
- }
-
rwlock_init(&led_dat->rw_lock);
led_dat->cdev.name = template->name;
@@ -205,8 +209,8 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
led_dat->cdev.groups = ns2_led_groups;
led_dat->cmd = template->cmd;
led_dat->slow = template->slow;
- led_dat->can_sleep = gpio_cansleep(led_dat->cmd) |
- gpio_cansleep(led_dat->slow);
+ led_dat->can_sleep = gpiod_cansleep(led_dat->cmd) |
+ gpiod_cansleep(led_dat->slow);
if (led_dat->can_sleep)
led_dat->cdev.brightness_set_blocking = ns2_led_set_blocking;
else
@@ -261,17 +265,26 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
const char *string;
int i, num_modes;
struct ns2_led_modval *modval;
+ struct gpio_desc *gd;
- ret = of_get_named_gpio(child, "cmd-gpio", 0);
- if (ret < 0)
- goto err_node_put;
- led->cmd = ret;
- ret = of_get_named_gpio(child, "slow-gpio", 0);
- if (ret < 0)
- goto err_node_put;
- led->slow = ret;
ret = of_property_read_string(child, "label", &string);
led->name = (ret == 0) ? string : child->name;
+
+ gd = gpiod_get_from_of_node(child, "cmd-gpio", 0,
+ GPIOD_ASIS, led->name);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto err_node_put;
+ }
+ led->cmd = gd;
+ gd = gpiod_get_from_of_node(child, "slow-gpio", 0,
+ GPIOD_ASIS, led->name);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto err_node_put;
+ }
+ led->slow = gd;
+
ret = of_property_read_string(child, "linux,default-trigger",
&string);
if (ret == 0)
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 8b6965a563e9..6c8a724aac51 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,60 +16,55 @@
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
-#include <linux/leds_pwm.h>
#include <linux/slab.h>
+struct led_pwm {
+ const char *name;
+ const char *default_trigger;
+ u8 active_low;
+ unsigned int max_brightness;
+};
+
+struct led_pwm_platform_data {
+ int num_leds;
+ struct led_pwm *leds;
+};
+
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
+ struct pwm_state pwmstate;
unsigned int active_low;
- unsigned int period;
- int duty;
};
struct led_pwm_priv {
int num_leds;
- struct led_pwm_data leds[0];
+ struct led_pwm_data leds[];
};
-static void __led_pwm_set(struct led_pwm_data *led_dat)
-{
- int new_duty = led_dat->duty;
-
- pwm_config(led_dat->pwm, new_duty, led_dat->period);
-
- if (new_duty == 0)
- pwm_disable(led_dat->pwm);
- else
- pwm_enable(led_dat->pwm);
-}
-
static int led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
unsigned int max = led_dat->cdev.max_brightness;
- unsigned long long duty = led_dat->period;
+ unsigned long long duty = led_dat->pwmstate.period;
duty *= brightness;
do_div(duty, max);
if (led_dat->active_low)
- duty = led_dat->period - duty;
-
- led_dat->duty = duty;
-
- __led_pwm_set(led_dat);
+ duty = led_dat->pwmstate.period - duty;
- return 0;
+ led_dat->pwmstate.duty_cycle = duty;
+ led_dat->pwmstate.enabled = duty > 0;
+ return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
}
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct fwnode_handle *fwnode)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
- struct pwm_args pargs;
int ret;
led_data->active_low = led->active_low;
@@ -93,17 +88,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
led_data->cdev.brightness_set_blocking = led_pwm_set;
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(led_data->pwm);
-
- pwm_get_args(led_data->pwm, &pargs);
-
- led_data->period = pargs.period;
- if (!led_data->period && (led->pwm_period_ns > 0))
- led_data->period = led->pwm_period_ns;
+ pwm_init_state(led_data->pwm, &led_data->pwmstate);
ret = devm_led_classdev_register(dev, &led_data->cdev);
if (ret == 0) {
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 7543e395a2c6..db38a68abb6c 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -380,12 +380,11 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_dev;
}
- tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
+ tqueue = blk_alloc_queue(tt->make_rq, dev->q->node);
if (!tqueue) {
ret = -ENOMEM;
goto err_disk;
}
- blk_queue_make_request(tqueue, tt->make_rq);
strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
tdisk->flags = GENHD_FL_EXT_DEVT;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 7d8958df9472..6387302b03f2 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -37,7 +37,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
active = 0;
up(&rlun->wr_sem);
}
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"pblk: pos:%d, ch:%d, lun:%d - %d\n",
i,
rlun->bppa.a.ch,
@@ -120,7 +120,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
struct nvm_addrf_12 *gppaf = (struct nvm_addrf_12 *)&geo->addrf;
- sz = snprintf(page, PAGE_SIZE,
+ sz = scnprintf(page, PAGE_SIZE,
"g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
pblk->addrf_len,
ppaf->blk_offset, ppaf->blk_len,
@@ -130,7 +130,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
ppaf->pln_offset, ppaf->pln_len,
ppaf->sec_offset, ppaf->sec_len);
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
gppaf->blk_offset, gppaf->blk_len,
gppaf->pg_offset, gppaf->pg_len,
@@ -142,7 +142,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
struct nvm_addrf *ppaf = &pblk->addrf;
struct nvm_addrf *gppaf = &geo->addrf;
- sz = snprintf(page, PAGE_SIZE,
+ sz = scnprintf(page, PAGE_SIZE,
"pblk:(s:%d)ch:%d/%d,lun:%d/%d,chk:%d/%d/sec:%d/%d\n",
pblk->addrf_len,
ppaf->ch_offset, ppaf->ch_len,
@@ -150,7 +150,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
ppaf->chk_offset, ppaf->chk_len,
ppaf->sec_offset, ppaf->sec_len);
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"device:ch:%d/%d,lun:%d/%d,chk:%d/%d,sec:%d/%d\n",
gppaf->ch_offset, gppaf->ch_len,
gppaf->lun_offset, gppaf->lun_len,
@@ -278,11 +278,11 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
pblk_err(pblk, "corrupted free line list:%d/%d\n",
nr_free_lines, free_line_cnt);
- sz = snprintf(page, PAGE_SIZE - sz,
+ sz = scnprintf(page, PAGE_SIZE - sz,
"line: nluns:%d, nblks:%d, nsecs:%d\n",
geo->all_luns, lm->blk_per_line, lm->sec_per_line);
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
cur_data, cur_log,
nr_free_lines,
@@ -292,12 +292,12 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
d_line_cnt, l_line_cnt,
l_mg->nr_lines);
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, werr: %d, queue:%d\n",
gc_full, gc_high, gc_mid, gc_low, gc_empty, gc_werr,
atomic_read(&pblk->gc.read_inflight_gc));
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
cur_data, cur_sec, msecs, vsc, sec_in_line,
map_weight, lm->sec_per_line,
@@ -313,19 +313,19 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
struct pblk_line_meta *lm = &pblk->lm;
ssize_t sz = 0;
- sz = snprintf(page, PAGE_SIZE - sz,
+ sz = scnprintf(page, PAGE_SIZE - sz,
"smeta - len:%d, secs:%d\n",
lm->smeta_len, lm->smeta_sec);
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"emeta - len:%d, sec:%d, bb_start:%d\n",
lm->emeta_len[0], lm->emeta_sec[0],
lm->emeta_bb);
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"bitmap lengths: sec:%d, blk:%d, lun:%d\n",
lm->sec_bitmap_len,
lm->blk_bitmap_len,
lm->lun_bitmap_len);
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"blk_line:%d, sec_line:%d, sec_blk:%d\n",
lm->blk_per_line,
lm->sec_per_line,
@@ -344,12 +344,12 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
{
int sz;
- sz = snprintf(page, PAGE_SIZE,
+ sz = scnprintf(page, PAGE_SIZE,
"user:%lld gc:%lld pad:%lld WA:",
user, gc, pad);
if (!user) {
- sz += snprintf(page + sz, PAGE_SIZE - sz, "NaN\n");
+ sz += scnprintf(page + sz, PAGE_SIZE - sz, "NaN\n");
} else {
u64 wa_int;
u32 wa_frac;
@@ -358,7 +358,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
wa_int = div64_u64(wa_int, user);
wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
- sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
+ sz += scnprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
wa_int, wa_frac);
}
@@ -401,9 +401,9 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
total = atomic64_read(&pblk->nr_flush) - pblk->nr_flush_rst;
if (!total) {
for (i = 0; i < (buckets + 1); i++)
- sz += snprintf(page + sz, PAGE_SIZE - sz,
+ sz += scnprintf(page + sz, PAGE_SIZE - sz,
"%d:0 ", i);
- sz += snprintf(page + sz, PAGE_SIZE - sz, "\n");
+ sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
return sz;
}
@@ -411,7 +411,7 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
for (i = 0; i < buckets; i++)
total_buckets += atomic64_read(&pblk->pad_dist[i]);
- sz += snprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ",
+ sz += scnprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ",
bucket_percentage(total - total_buckets, total));
for (i = 0; i < buckets; i++) {
@@ -419,10 +419,10 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
p = bucket_percentage(atomic64_read(&pblk->pad_dist[i]),
total);
- sz += snprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ",
+ sz += scnprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ",
i + 1, p);
}
- sz += snprintf(page + sz, PAGE_SIZE - sz, "\n");
+ sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
return sz;
}
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index b1314d104b06..b4821c751d04 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -142,7 +142,7 @@ const struct file_operations anslcd_fops = {
};
static struct miscdevice anslcd_dev = {
- ANSLCD_MINOR,
+ LCD_MINOR,
"anslcd",
&anslcd_fops
};
diff --git a/drivers/macintosh/ans-lcd.h b/drivers/macintosh/ans-lcd.h
index f0a6e4c68557..bca7d76d441b 100644
--- a/drivers/macintosh/ans-lcd.h
+++ b/drivers/macintosh/ans-lcd.h
@@ -2,8 +2,6 @@
#ifndef _PPC_ANS_LCD_H
#define _PPC_ANS_LCD_H
-#define ANSLCD_MINOR 156
-
#define ANSLCD_CLEAR 0x01
#define ANSLCD_SENDCTRL 0x02
#define ANSLCD_SETSHORTDELAY 0x03
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index a0d87ed9da69..f55f6adf5e5f 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -323,7 +323,7 @@ static void do_attach(struct i2c_adapter *adapter)
of_node_put(np);
} else {
strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
- i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
+ i2c_new_scanned_device(adapter, &info, scan_ds1775, NULL);
}
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
@@ -331,7 +331,7 @@ static void do_attach(struct i2c_adapter *adapter)
of_node_put(np);
} else {
strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
- i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
+ i2c_new_scanned_device(adapter, &info, scan_adm1030, NULL);
}
}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index d38fb78a3b23..83eb05bf85ff 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -75,9 +75,6 @@
/* Some compile options */
#undef DEBUG_SLEEP
-/* Misc minor number allocated for /dev/pmu */
-#define PMU_MINOR 154
-
/* How many iterations between battery polls */
#define BATTERY_POLLING_COUNT 2
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ab4eb750bbdd..5a577a6734cf 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -227,4 +227,13 @@ config ZYNQMP_IPI_MBOX
message to the IPI buffer and will access the IPI control
registers to kick the other processor or enquire status.
+config SUN6I_MSGBOX
+ tristate "Allwinner sun6i/sun8i/sun9i/sun50i Message Box"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default ARCH_SUNXI
+ help
+ Mailbox implementation for the hardware message box present in
+ various Allwinner SoCs. This mailbox is used for communication
+ between the application CPUs and the power management coprocessor.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c22fad6f696b..2e4364ef5c47 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -48,3 +48,5 @@ obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
+
+obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
index 02b7b28e6969..9f2ce7f03c67 100644
--- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -156,16 +156,12 @@ static int armada_37xx_mbox_probe(struct platform_device *pdev)
return -ENOMEM;
mbox->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->base)) {
- dev_err(&pdev->dev, "ioremap failed\n");
+ if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
- }
mbox->irq = platform_get_irq(pdev, 0);
- if (mbox->irq < 0) {
- dev_err(&pdev->dev, "Cannot get irq\n");
+ if (mbox->irq < 0)
return mbox->irq;
- }
mbox->dev = &pdev->dev;
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 8ee9db274802..bee33abb5308 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1599,6 +1599,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
1 << RING_CMPL_ALIGN_ORDER, 0);
if (!mbox->cmpl_pool) {
ret = -ENOMEM;
+ goto fail_destroy_bd_pool;
}
/* Allocate platform MSIs for each ring */
@@ -1661,6 +1662,7 @@ fail_free_debugfs_root:
platform_msi_domain_free_irqs(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
+fail_destroy_bd_pool:
dma_pool_destroy(mbox->bd_pool);
fail:
return ret;
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index fcb3b18a0678..c10a9318a4b7 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -436,33 +436,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
pdcs = filp->private_data;
out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"SPU %u stats:\n", pdcs->pdc_idx);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC requests....................%u\n",
pdcs->pdc_requests);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC responses...................%u\n",
pdcs->pdc_replies);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx not done.....................%u\n",
pdcs->last_tx_not_done);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx ring full....................%u\n",
pdcs->tx_ring_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx ring full....................%u\n",
pdcs->rx_ring_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx desc write fail. Ring full...%u\n",
pdcs->txnobuf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx desc write fail. Ring full...%u\n",
pdcs->rxnobuf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Receive overflow................%u\n",
pdcs->rx_oflow);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Num frags in rx ring............%u\n",
NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
pdcs->nrxpost));
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 2cdcdc5f1119..7906624a731c 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -4,8 +4,10 @@
*/
#include <linux/clk.h>
+#include <linux/firmware/imx/ipc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
@@ -27,6 +29,8 @@
#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
#define IMX_MU_CHANS 16
+/* TX0/RX0/RXDB[0-3] */
+#define IMX_MU_SCU_CHANS 6
#define IMX_MU_CHAN_NAME_SIZE 20
enum imx_mu_chan_type {
@@ -36,11 +40,9 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RXDB, /* Rx doorbell */
};
-struct imx_mu_dcfg {
- u32 xTR[4]; /* Transmit Registers */
- u32 xRR[4]; /* Receive Registers */
- u32 xSR; /* Status Register */
- u32 xCR; /* Control Register */
+struct imx_sc_rpc_msg_max {
+ struct imx_sc_rpc_msg hdr;
+ u32 data[7];
};
struct imx_mu_con_priv {
@@ -67,18 +69,14 @@ struct imx_mu_priv {
bool side_b;
};
-static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
- .xTR = {0x0, 0x4, 0x8, 0xc},
- .xRR = {0x10, 0x14, 0x18, 0x1c},
- .xSR = 0x20,
- .xCR = 0x24,
-};
-
-static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
- .xTR = {0x20, 0x24, 0x28, 0x2c},
- .xRR = {0x40, 0x44, 0x48, 0x4c},
- .xSR = 0x60,
- .xCR = 0x64,
+struct imx_mu_dcfg {
+ int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
+ int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
+ void (*init)(struct imx_mu_priv *priv);
+ u32 xTR[4]; /* Transmit Registers */
+ u32 xRR[4]; /* Receive Registers */
+ u32 xSR; /* Status Register */
+ u32 xCR; /* Control Register */
};
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -111,6 +109,117 @@ static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
return val;
}
+static int imx_mu_generic_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ u32 *arg = data;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_TXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
+ tasklet_schedule(&cp->txdb_tasklet);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_generic_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ u32 dat;
+
+ dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
+ mbox_chan_received_data(cp->chan, (void *)&dat);
+
+ return 0;
+}
+
+static int imx_mu_scu_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ struct imx_sc_rpc_msg_max *msg = data;
+ u32 *arg = data;
+ int i, ret;
+ u32 xsr;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ if (msg->hdr.size > sizeof(*msg)) {
+ /*
+ * The real message size can be different to
+ * struct imx_sc_rpc_msg_max size
+ */
+ dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4 && i < msg->hdr.size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ for (; i < msg->hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR,
+ xsr,
+ xsr & IMX_MU_xSR_TEn(i % 4),
+ 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "Send data index: %d timeout\n", i);
+ return ret;
+ }
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_scu_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ struct imx_sc_rpc_msg_max msg;
+ u32 *data = (u32 *)&msg;
+ int i, ret;
+ u32 xsr;
+
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
+
+ if (msg.hdr.size > sizeof(msg)) {
+ dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
+ sizeof(msg), msg.hdr.size);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < msg.hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR, xsr,
+ xsr & IMX_MU_xSR_RFn(i % 4), 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "timeout read idx %d\n", i);
+ return ret;
+ }
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(0), 0);
+ mbox_chan_received_data(cp->chan, (void *)&msg);
+
+ return 0;
+}
+
static void imx_mu_txdb_tasklet(unsigned long data)
{
struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
@@ -123,7 +232,7 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
struct mbox_chan *chan = p;
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- u32 val, ctrl, dat;
+ u32 val, ctrl;
ctrl = imx_mu_read(priv, priv->dcfg->xCR);
val = imx_mu_read(priv, priv->dcfg->xSR);
@@ -152,8 +261,7 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
mbox_chan_txdone(chan, 0);
} else if (val == IMX_MU_xSR_RFn(cp->idx)) {
- dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
- mbox_chan_received_data(chan, (void *)&dat);
+ priv->dcfg->rx(priv, cp);
} else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
mbox_chan_received_data(chan, NULL);
@@ -169,23 +277,8 @@ static int imx_mu_send_data(struct mbox_chan *chan, void *data)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- u32 *arg = data;
- switch (cp->type) {
- case IMX_MU_TYPE_TX:
- imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
- break;
- case IMX_MU_TYPE_TXDB:
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
- tasklet_schedule(&cp->txdb_tasklet);
- break;
- default:
- dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
- return -EINVAL;
- }
-
- return 0;
+ return priv->dcfg->tx(priv, cp, data);
}
static int imx_mu_startup(struct mbox_chan *chan)
@@ -256,6 +349,42 @@ static const struct mbox_chan_ops imx_mu_ops = {
.shutdown = imx_mu_shutdown,
};
+static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+
+ switch (type) {
+ case IMX_MU_TYPE_TX:
+ case IMX_MU_TYPE_RX:
+ if (idx != 0)
+ dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
+ chan = type;
+ break;
+ case IMX_MU_TYPE_RXDB:
+ chan = 2 + idx;
+ break;
+ default:
+ dev_err(mbox->dev, "Invalid chan type: %d\n", type);
+ return NULL;
+ }
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
@@ -280,6 +409,22 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i % 4;
+ cp->type = i >> 2;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_CHANS;
+ priv->mbox.of_xlate = imx_mu_xlate;
+
if (priv->side_b)
return;
@@ -287,13 +432,34 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, 0, priv->dcfg->xCR);
}
+static void imx_mu_init_scu(struct imx_mu_priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i < 2 ? 0 : i - 2;
+ cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_SCU_CHANS;
+ priv->mbox.of_xlate = imx_mu_scu_xlate;
+
+ /* Set default MU configuration */
+ imx_mu_write(priv, 0, priv->dcfg->xCR);
+}
+
static int imx_mu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_mu_priv *priv;
const struct imx_mu_dcfg *dcfg;
- unsigned int i;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -329,32 +495,19 @@ static int imx_mu_probe(struct platform_device *pdev)
return ret;
}
- for (i = 0; i < IMX_MU_CHANS; i++) {
- struct imx_mu_con_priv *cp = &priv->con_priv[i];
-
- cp->idx = i % 4;
- cp->type = i >> 2;
- cp->chan = &priv->mbox_chans[i];
- priv->mbox_chans[i].con_priv = cp;
- snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
- }
-
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
+ priv->dcfg->init(priv);
+
spin_lock_init(&priv->xcr_lock);
priv->mbox.dev = dev;
priv->mbox.ops = &imx_mu_ops;
priv->mbox.chans = priv->mbox_chans;
- priv->mbox.num_chans = IMX_MU_CHANS;
- priv->mbox.of_xlate = imx_mu_xlate;
priv->mbox.txdone_irq = true;
platform_set_drvdata(pdev, priv);
- imx_mu_init_generic(priv);
-
return devm_mbox_controller_register(dev, &priv->mbox);
}
@@ -367,9 +520,40 @@ static int imx_mu_remove(struct platform_device *pdev)
return 0;
}
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x20, 0x24, 0x28, 0x2c},
+ .xRR = {0x40, 0x44, 0x48, 0x4c},
+ .xSR = 0x60,
+ .xCR = 0x64,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
+ .tx = imx_mu_scu_tx,
+ .rx = imx_mu_scu_rx,
+ .init = imx_mu_init_scu,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
+ { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ },
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 9a6ce9f5a7db..b24822ad8409 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -56,7 +56,6 @@ struct cmdq_thread {
void __iomem *base;
struct list_head task_busy_list;
u32 priority;
- bool atomic_exec;
};
struct cmdq_task {
@@ -162,48 +161,11 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
cmdq_thread_invalidate_fetched_data(thread);
}
-static bool cmdq_command_is_wfe(u64 cmd)
-{
- u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
- u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
- u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
-
- return ((cmd & wfe_mask) == (wfe_op | wfe_option));
-}
-
-/* we assume tasks in the same display GCE thread are waiting the same event. */
-static void cmdq_task_remove_wfe(struct cmdq_task *task)
-{
- struct device *dev = task->cmdq->mbox.dev;
- u64 *base = task->pkt->va_base;
- int i;
-
- dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
- DMA_TO_DEVICE);
- for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
- if (cmdq_command_is_wfe(base[i]))
- base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
- CMDQ_JUMP_PASS;
- dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
- DMA_TO_DEVICE);
-}
-
static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
{
return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
}
-static void cmdq_thread_wait_end(struct cmdq_thread *thread,
- unsigned long end_pa)
-{
- struct device *dev = thread->chan->mbox->dev;
- unsigned long curr_pa;
-
- if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
- curr_pa, curr_pa == end_pa, 1, 20))
- dev_err(dev, "GCE thread cannot run to end.\n");
-}
-
static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
{
struct cmdq_task_cb *cb = &task->pkt->async_cb;
@@ -383,36 +345,15 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
-
- /*
- * Atomic execution should remove the following wfe, i.e. only
- * wait event at first task, and prevent to pause when running.
- */
- if (thread->atomic_exec) {
- /* GCE is executing if command is not WFE */
- if (!cmdq_thread_is_in_wfe(thread)) {
- cmdq_thread_resume(thread);
- cmdq_thread_wait_end(thread, end_pa);
- WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- /* set to this task directly */
- writel(task->pa_base,
- thread->base + CMDQ_THR_CURR_ADDR);
- } else {
- cmdq_task_insert_into_thread(task);
- cmdq_task_remove_wfe(task);
- smp_mb(); /* modify jump before enable thread */
- }
+ /* check boundary */
+ if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+ curr_pa == end_pa) {
+ /* set to this task directly */
+ writel(task->pa_base,
+ thread->base + CMDQ_THR_CURR_ADDR);
} else {
- /* check boundary */
- if (curr_pa == end_pa - CMDQ_INST_SIZE ||
- curr_pa == end_pa) {
- /* set to this task directly */
- writel(task->pa_base,
- thread->base + CMDQ_THR_CURR_ADDR);
- } else {
- cmdq_task_insert_into_thread(task);
- smp_mb(); /* modify jump before enable thread */
- }
+ cmdq_task_insert_into_thread(task);
+ smp_mb(); /* modify jump before enable thread */
}
writel(task->pa_base + pkt->cmd_buf_size,
thread->base + CMDQ_THR_END_ADDR);
@@ -432,10 +373,62 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
{
}
+static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq_task_cb *cb;
+ struct cmdq_cb_data data;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task, *tmp;
+ unsigned long flags;
+ u32 enable;
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ if (list_empty(&thread->task_busy_list))
+ goto out;
+
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ if (!cmdq_thread_is_in_wfe(thread))
+ goto wait;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ cb = &task->pkt->async_cb;
+ if (cb->cb) {
+ data.sta = CMDQ_CB_ERROR;
+ data.data = cb->data;
+ cb->cb(data);
+ }
+ list_del(&task->list_entry);
+ kfree(task);
+ }
+
+ cmdq_thread_resume(thread);
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+
+out:
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ return 0;
+
+wait:
+ cmdq_thread_resume(thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
+ enable, enable == 0, 1, timeout)) {
+ dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
+ (u32)(thread->base - cmdq->base));
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
.send_data = cmdq_mbox_send_data,
.startup = cmdq_mbox_startup,
.shutdown = cmdq_mbox_shutdown,
+ .flush = cmdq_mbox_flush,
};
static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
@@ -449,7 +442,6 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
thread->priority = sp->args[1];
- thread->atomic_exec = (sp->args[2] != 0);
thread->chan = &mbox->chans[ind];
return &mbox->chans[ind];
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
new file mode 100644
index 000000000000..ccecf2e5941d
--- /dev/null
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2017-2019 Samuel Holland <samuel@sholland.org>
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define NUM_CHANS 8
+
+#define CTRL_REG(n) (0x0000 + 0x4 * ((n) / 4))
+#define CTRL_RX(n) BIT(0 + 8 * ((n) % 4))
+#define CTRL_TX(n) BIT(4 + 8 * ((n) % 4))
+
+#define REMOTE_IRQ_EN_REG 0x0040
+#define REMOTE_IRQ_STAT_REG 0x0050
+#define LOCAL_IRQ_EN_REG 0x0060
+#define LOCAL_IRQ_STAT_REG 0x0070
+
+#define RX_IRQ(n) BIT(0 + 2 * (n))
+#define RX_IRQ_MASK 0x5555
+#define TX_IRQ(n) BIT(1 + 2 * (n))
+#define TX_IRQ_MASK 0xaaaa
+
+#define FIFO_STAT_REG(n) (0x0100 + 0x4 * (n))
+#define FIFO_STAT_MASK GENMASK(0, 0)
+
+#define MSG_STAT_REG(n) (0x0140 + 0x4 * (n))
+#define MSG_STAT_MASK GENMASK(2, 0)
+
+#define MSG_DATA_REG(n) (0x0180 + 0x4 * (n))
+
+#define mbox_dbg(mbox, ...) dev_dbg((mbox)->controller.dev, __VA_ARGS__)
+
+struct sun6i_msgbox {
+ struct mbox_controller controller;
+ struct clk *clk;
+ spinlock_t lock;
+ void __iomem *regs;
+};
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan);
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan);
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline struct sun6i_msgbox *to_sun6i_msgbox(struct mbox_chan *chan)
+{
+ return chan->con_priv;
+}
+
+static irqreturn_t sun6i_msgbox_irq(int irq, void *dev_id)
+{
+ struct sun6i_msgbox *mbox = dev_id;
+ uint32_t status;
+ int n;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mbox->regs + LOCAL_IRQ_EN_REG) &
+ readl(mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < NUM_CHANS; ++n) {
+ struct mbox_chan *chan = &mbox->controller.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ while (sun6i_msgbox_peek_data(chan)) {
+ uint32_t msg = readl(mbox->regs + MSG_DATA_REG(n));
+
+ mbox_dbg(mbox, "Channel %d received 0x%08x\n", n, msg);
+ mbox_chan_received_data(chan, &msg);
+ }
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun6i_msgbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+ uint32_t msg = *(uint32_t *)data;
+
+ /* Using a channel backwards gets the hardware into a bad state. */
+ if (WARN_ON_ONCE(!(readl(mbox->regs + CTRL_REG(n)) & CTRL_TX(n))))
+ return 0;
+
+ writel(msg, mbox->regs + MSG_DATA_REG(n));
+ mbox_dbg(mbox, "Channel %d sent 0x%08x\n", n, msg);
+
+ return 0;
+}
+
+static int sun6i_msgbox_startup(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /* The coprocessor is responsible for setting channel directions. */
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Flush the receive FIFO. */
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ /* Enable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) | RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+ }
+
+ mbox_dbg(mbox, "Channel %d startup complete\n", n);
+
+ return 0;
+}
+
+static void sun6i_msgbox_shutdown(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Disable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) & ~RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+
+ /* Attempt to flush the FIFO until the IRQ is cleared. */
+ do {
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ } while (readl(mbox->regs + LOCAL_IRQ_STAT_REG) & RX_IRQ(n));
+ }
+
+ mbox_dbg(mbox, "Channel %d shutdown complete\n", n);
+}
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /*
+ * The hardware allows snooping on the remote user's IRQ statuses.
+ * We consider a message to be acknowledged only once the receive IRQ
+ * for that channel is cleared. Since the receive IRQ for a channel
+ * cannot be cleared until the FIFO for that channel is empty, this
+ * ensures that the message has actually been read. It also gives the
+ * recipient an opportunity to perform minimal processing before
+ * acknowledging the message.
+ */
+ return !(readl(mbox->regs + REMOTE_IRQ_STAT_REG) & RX_IRQ(n));
+}
+
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ return readl(mbox->regs + MSG_STAT_REG(n)) & MSG_STAT_MASK;
+}
+
+static const struct mbox_chan_ops sun6i_msgbox_chan_ops = {
+ .send_data = sun6i_msgbox_send_data,
+ .startup = sun6i_msgbox_startup,
+ .shutdown = sun6i_msgbox_shutdown,
+ .last_tx_done = sun6i_msgbox_last_tx_done,
+ .peek_data = sun6i_msgbox_peek_data,
+};
+
+static int sun6i_msgbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_chan *chans;
+ struct reset_control *reset;
+ struct resource *res;
+ struct sun6i_msgbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, NUM_CHANS, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ for (i = 0; i < NUM_CHANS; ++i)
+ chans[i].con_priv = mbox;
+
+ mbox->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mbox->clk)) {
+ ret = PTR_ERR(mbox->clk);
+ dev_err(dev, "Failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mbox->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ dev_err(dev, "Failed to get reset control: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /*
+ * NOTE: We rely on platform firmware to preconfigure the channel
+ * directions, and we share this hardware block with other firmware
+ * that runs concurrently with Linux (e.g. a trusted monitor).
+ *
+ * Therefore, we do *not* assert the reset line if probing fails or
+ * when removing the device.
+ */
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ dev_err(dev, "Failed to deassert reset: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto err_disable_unprepare;
+ }
+
+ mbox->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /* Disable all IRQs for this end of the msgbox. */
+ writel(0, mbox->regs + LOCAL_IRQ_EN_REG);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ sun6i_msgbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register IRQ handler: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.ops = &sun6i_msgbox_chan_ops;
+ mbox->controller.chans = chans;
+ mbox->controller.num_chans = NUM_CHANS;
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+
+ spin_lock_init(&mbox->lock);
+ platform_set_drvdata(pdev, mbox);
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(dev, "Failed to register controller: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ return 0;
+
+err_disable_unprepare:
+ clk_disable_unprepare(mbox->clk);
+
+ return ret;
+}
+
+static int sun6i_msgbox_remove(struct platform_device *pdev)
+{
+ struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ /* See the comment in sun6i_msgbox_probe about the reset line. */
+ clk_disable_unprepare(mbox->clk);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_msgbox_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-msgbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_msgbox_of_match);
+
+static struct platform_driver sun6i_msgbox_driver = {
+ .driver = {
+ .name = "sun6i-msgbox",
+ .of_match_table = sun6i_msgbox_of_match,
+ },
+ .probe = sun6i_msgbox_probe,
+ .remove = sun6i_msgbox_remove,
+};
+module_platform_driver(sun6i_msgbox_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner sun6i/sun8i/sun9i/sun50i Message Box");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index fa872df4e770..72856e5f23a3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -101,64 +101,6 @@
#define insert_lock(s, b) ((b)->level <= (s)->lock)
-/*
- * These macros are for recursing down the btree - they handle the details of
- * locking and looking up nodes in the cache for you. They're best treated as
- * mere syntax when reading code that uses them.
- *
- * op->lock determines whether we take a read or a write lock at a given depth.
- * If you've got a read lock and find that you need a write lock (i.e. you're
- * going to have to split), set op->lock and return -EINTR; btree_root() will
- * call you again and you'll have the correct lock.
- */
-
-/**
- * btree - recurse down the btree on a specified key
- * @fn: function to call, which will be passed the child node
- * @key: key to recurse on
- * @b: parent btree node
- * @op: pointer to struct btree_op
- */
-#define btree(fn, key, b, op, ...) \
-({ \
- int _r, l = (b)->level - 1; \
- bool _w = l <= (op)->lock; \
- struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
- _w, b); \
- if (!IS_ERR(_child)) { \
- _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
- rw_unlock(_w, _child); \
- } else \
- _r = PTR_ERR(_child); \
- _r; \
-})
-
-/**
- * btree_root - call a function on the root of the btree
- * @fn: function to call, which will be passed the child node
- * @c: cache set
- * @op: pointer to struct btree_op
- */
-#define btree_root(fn, c, op, ...) \
-({ \
- int _r = -EINTR; \
- do { \
- struct btree *_b = (c)->root; \
- bool _w = insert_lock(op, _b); \
- rw_lock(_w, _b, _b->level); \
- if (_b == (c)->root && \
- _w == insert_lock(op, _b)) { \
- _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
- } \
- rw_unlock(_w, _b); \
- bch_cannibalize_unlock(c); \
- if (_r == -EINTR) \
- schedule(); \
- } while (_r == -EINTR); \
- \
- finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
- _r; \
-})
static inline struct bset *write_block(struct btree *b)
{
@@ -1848,7 +1790,7 @@ static void bch_btree_gc(struct cache_set *c)
/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
do {
- ret = btree_root(gc_root, c, &op, &writes, &stats);
+ ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
closure_sync(&writes);
cond_resched();
@@ -1946,7 +1888,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
}
if (p)
- ret = btree(check_recurse, p, b, op);
+ ret = bcache_btree(check_recurse, p, b, op);
p = k;
} while (p && !ret);
@@ -1955,13 +1897,176 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
return ret;
}
+
+static int bch_btree_check_thread(void *arg)
+{
+ int ret;
+ struct btree_check_info *info = arg;
+ struct btree_check_state *check_state = info->state;
+ struct cache_set *c = check_state->c;
+ struct btree_iter iter;
+ struct bkey *k, *p;
+ int cur_idx, prev_idx, skip_nr;
+ int i, n;
+
+ k = p = NULL;
+ i = n = 0;
+ cur_idx = prev_idx = 0;
+ ret = 0;
+
+ /* root node keys are checked before thread created */
+ bch_btree_iter_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ BUG_ON(!k);
+
+ p = k;
+ while (k) {
+ /*
+ * Fetch a root node key index, skip the keys which
+ * should be fetched by other threads, then check the
+ * sub-tree indexed by the fetched key.
+ */
+ spin_lock(&check_state->idx_lock);
+ cur_idx = check_state->key_idx;
+ check_state->key_idx++;
+ spin_unlock(&check_state->idx_lock);
+
+ skip_nr = cur_idx - prev_idx;
+
+ while (skip_nr) {
+ k = bch_btree_iter_next_filter(&iter,
+ &c->root->keys,
+ bch_ptr_bad);
+ if (k)
+ p = k;
+ else {
+ /*
+ * No more keys to check in root node,
+ * current checking threads are enough,
+ * stop creating more.
+ */
+ atomic_set(&check_state->enough, 1);
+ /* Update check_state->enough earlier */
+ smp_mb__after_atomic();
+ goto out;
+ }
+ skip_nr--;
+ cond_resched();
+ }
+
+ if (p) {
+ struct btree_op op;
+
+ btree_node_prefetch(c->root, p);
+ c->gc_stats.nodes++;
+ bch_btree_op_init(&op, 0);
+ ret = bcache_btree(check_recurse, p, c->root, &op);
+ if (ret)
+ goto out;
+ }
+ p = NULL;
+ prev_idx = cur_idx;
+ cond_resched();
+ }
+
+out:
+ info->result = ret;
+ /* update check_state->started among all CPUs */
+ smp_mb__before_atomic();
+ if (atomic_dec_and_test(&check_state->started))
+ wake_up(&check_state->wait);
+
+ return ret;
+}
+
+
+
+static int bch_btree_chkthread_nr(void)
+{
+ int n = num_online_cpus()/2;
+
+ if (n == 0)
+ n = 1;
+ else if (n > BCH_BTR_CHKTHREAD_MAX)
+ n = BCH_BTR_CHKTHREAD_MAX;
+
+ return n;
+}
+
int bch_btree_check(struct cache_set *c)
{
- struct btree_op op;
+ int ret = 0;
+ int i;
+ struct bkey *k = NULL;
+ struct btree_iter iter;
+ struct btree_check_state *check_state;
+ char name[32];
- bch_btree_op_init(&op, SHRT_MAX);
+ /* check and mark root node keys */
+ for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
+ bch_initial_mark_key(c, c->root->level, k);
+
+ bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
+
+ if (c->root->level == 0)
+ return 0;
+
+ check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
+ if (!check_state)
+ return -ENOMEM;
- return btree_root(check_recurse, c, &op);
+ check_state->c = c;
+ check_state->total_threads = bch_btree_chkthread_nr();
+ check_state->key_idx = 0;
+ spin_lock_init(&check_state->idx_lock);
+ atomic_set(&check_state->started, 0);
+ atomic_set(&check_state->enough, 0);
+ init_waitqueue_head(&check_state->wait);
+
+ /*
+ * Run multiple threads to check btree nodes in parallel,
+ * if check_state->enough is non-zero, it means current
+ * running check threads are enough, unncessary to create
+ * more.
+ */
+ for (i = 0; i < check_state->total_threads; i++) {
+ /* fetch latest check_state->enough earlier */
+ smp_mb__before_atomic();
+ if (atomic_read(&check_state->enough))
+ break;
+
+ check_state->infos[i].result = 0;
+ check_state->infos[i].state = check_state;
+ snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
+ atomic_inc(&check_state->started);
+
+ check_state->infos[i].thread =
+ kthread_run(bch_btree_check_thread,
+ &check_state->infos[i],
+ name);
+ if (IS_ERR(check_state->infos[i].thread)) {
+ pr_err("fails to run thread bch_btrchk[%d]", i);
+ for (--i; i >= 0; i--)
+ kthread_stop(check_state->infos[i].thread);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ wait_event_interruptible(check_state->wait,
+ atomic_read(&check_state->started) == 0 ||
+ test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+
+ for (i = 0; i < check_state->total_threads; i++) {
+ if (check_state->infos[i].result) {
+ ret = check_state->infos[i].result;
+ goto out;
+ }
+ }
+
+out:
+ kfree(check_state);
+ return ret;
}
void bch_initial_gc_finish(struct cache_set *c)
@@ -2401,7 +2506,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
bch_ptr_bad))) {
- ret = btree(map_nodes_recurse, k, b,
+ ret = bcache_btree(map_nodes_recurse, k, b,
op, from, fn, flags);
from = NULL;
@@ -2419,10 +2524,10 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn, int flags)
{
- return btree_root(map_nodes_recurse, c, op, from, fn, flags);
+ return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
}
-static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
struct bkey *from, btree_map_keys_fn *fn,
int flags)
{
@@ -2435,7 +2540,8 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
- : btree(map_keys_recurse, k, b, op, from, fn, flags);
+ : bcache_btree(map_keys_recurse, k,
+ b, op, from, fn, flags);
from = NULL;
if (ret != MAP_CONTINUE)
@@ -2452,7 +2558,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_keys_fn *fn, int flags)
{
- return btree_root(map_keys_recurse, c, op, from, fn, flags);
+ return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
}
/* Keybuf code */
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index f4dcca449391..257969980c49 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -145,6 +145,9 @@ struct btree {
struct bio *bio;
};
+
+
+
#define BTREE_FLAG(flag) \
static inline bool btree_node_ ## flag(struct btree *b) \
{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
@@ -216,6 +219,25 @@ struct btree_op {
unsigned int insert_collision:1;
};
+struct btree_check_state;
+struct btree_check_info {
+ struct btree_check_state *state;
+ struct task_struct *thread;
+ int result;
+};
+
+#define BCH_BTR_CHKTHREAD_MAX 64
+struct btree_check_state {
+ struct cache_set *c;
+ int total_threads;
+ int key_idx;
+ spinlock_t idx_lock;
+ atomic_t started;
+ atomic_t enough;
+ wait_queue_head_t wait;
+ struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX];
+};
+
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
{
memset(op, 0, sizeof(struct btree_op));
@@ -284,6 +306,65 @@ static inline void force_wake_up_gc(struct cache_set *c)
wake_up_gc(c);
}
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn: function to call, which will be passed the child node
+ * @key: key to recurse on
+ * @b: parent btree node
+ * @op: pointer to struct btree_op
+ */
+#define bcache_btree(fn, key, b, op, ...) \
+({ \
+ int _r, l = (b)->level - 1; \
+ bool _w = l <= (op)->lock; \
+ struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
+ _w, b); \
+ if (!IS_ERR(_child)) { \
+ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
+ rw_unlock(_w, _child); \
+ } else \
+ _r = PTR_ERR(_child); \
+ _r; \
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn: function to call, which will be passed the child node
+ * @c: cache set
+ * @op: pointer to struct btree_op
+ */
+#define bcache_btree_root(fn, c, op, ...) \
+({ \
+ int _r = -EINTR; \
+ do { \
+ struct btree *_b = (c)->root; \
+ bool _w = insert_lock(op, _b); \
+ rw_lock(_w, _b, _b->level); \
+ if (_b == (c)->root && \
+ _w == insert_lock(op, _b)) { \
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
+ } \
+ rw_unlock(_w, _b); \
+ bch_cannibalize_unlock(c); \
+ if (_r == -EINTR) \
+ schedule(); \
+ } while (_r == -EINTR); \
+ \
+ finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
+ _r; \
+})
+
#define MAP_DONE 0
#define MAP_CONTINUE 1
@@ -314,6 +395,9 @@ typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
struct bkey *k);
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_keys_fn *fn, int flags);
+int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from, btree_map_keys_fn *fn,
+ int flags);
typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 820d8402a1dc..71a90fbec314 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1161,8 +1161,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-static blk_qc_t cached_dev_make_request(struct request_queue *q,
- struct bio *bio)
+blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
{
struct search *s;
struct bcache_device *d = bio->bi_disk->private_data;
@@ -1266,7 +1265,6 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
{
struct gendisk *g = dc->disk.disk;
- g->queue->make_request_fn = cached_dev_make_request;
g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
@@ -1301,8 +1299,7 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-static blk_qc_t flash_dev_make_request(struct request_queue *q,
- struct bio *bio)
+blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
{
struct search *s;
struct closure *cl;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index c64dbd7a91aa..bb005c93dd72 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -37,7 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
+blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio);
+
void bch_flash_dev_request_init(struct bcache_device *d);
+blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio);
extern struct kmem_cache *bch_search_cache;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 0c3c5419c52b..d98354fa28e3 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -816,7 +816,7 @@ static void bcache_device_free(struct bcache_device *d)
}
static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
- sector_t sectors)
+ sector_t sectors, make_request_fn make_request_fn)
{
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
@@ -866,11 +866,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
- q = blk_alloc_queue(GFP_KERNEL);
+ q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE);
if (!q)
return -ENOMEM;
- blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
q->backing_dev_info->congested_data = d;
@@ -1339,7 +1338,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
q->limits.raid_partial_stripes_expensive;
ret = bcache_device_init(&dc->disk, block_size,
- dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
+ dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
+ cached_dev_make_request);
if (ret)
return ret;
@@ -1451,7 +1451,8 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype);
- if (bcache_device_init(d, block_bytes(c), u->sectors))
+ if (bcache_device_init(d, block_bytes(c), u->sectors,
+ flash_dev_make_request))
goto err;
bcache_device_attach(d, c, u - c->uuids);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 3470fae4eabc..323276994aab 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -154,7 +154,7 @@ static ssize_t bch_snprint_string_list(char *buf,
size_t i;
for (i = 0; list[i]; i++)
- out += snprintf(out, buf + size - out,
+ out += scnprintf(out, buf + size - out,
i == selected ? "[%s] " : "%s ", list[i]);
out[-1] = '\n';
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 4a40f9eadeaf..3f7641fb28d5 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -183,7 +183,7 @@ static void update_writeback_rate(struct work_struct *work)
*/
set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
- smp_mb();
+ smp_mb__after_atomic();
/*
* CACHE_SET_IO_DISABLE might be set via sysfs interface,
@@ -193,7 +193,7 @@ static void update_writeback_rate(struct work_struct *work)
test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
- smp_mb();
+ smp_mb__after_atomic();
return;
}
@@ -229,7 +229,7 @@ static void update_writeback_rate(struct work_struct *work)
*/
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
- smp_mb();
+ smp_mb__after_atomic();
}
static unsigned int writeback_delay(struct cached_dev *dc,
@@ -785,7 +785,9 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
return MAP_CONTINUE;
}
-void bch_sectors_dirty_init(struct bcache_device *d)
+static int bch_root_node_dirty_init(struct cache_set *c,
+ struct bcache_device *d,
+ struct bkey *k)
{
struct sectors_dirty_init op;
int ret;
@@ -796,8 +798,13 @@ void bch_sectors_dirty_init(struct bcache_device *d)
op.start = KEY(op.inode, 0, 0);
do {
- ret = bch_btree_map_keys(&op.op, d->c, &op.start,
- sectors_dirty_init_fn, 0);
+ ret = bcache_btree(map_keys_recurse,
+ k,
+ c->root,
+ &op.op,
+ &op.start,
+ sectors_dirty_init_fn,
+ 0);
if (ret == -EAGAIN)
schedule_timeout_interruptible(
msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
@@ -806,6 +813,151 @@ void bch_sectors_dirty_init(struct bcache_device *d)
break;
}
} while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static int bch_dirty_init_thread(void *arg)
+{
+ struct dirty_init_thrd_info *info = arg;
+ struct bch_dirty_init_state *state = info->state;
+ struct cache_set *c = state->c;
+ struct btree_iter iter;
+ struct bkey *k, *p;
+ int cur_idx, prev_idx, skip_nr;
+ int i;
+
+ k = p = NULL;
+ i = 0;
+ cur_idx = prev_idx = 0;
+
+ bch_btree_iter_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ BUG_ON(!k);
+
+ p = k;
+
+ while (k) {
+ spin_lock(&state->idx_lock);
+ cur_idx = state->key_idx;
+ state->key_idx++;
+ spin_unlock(&state->idx_lock);
+
+ skip_nr = cur_idx - prev_idx;
+
+ while (skip_nr) {
+ k = bch_btree_iter_next_filter(&iter,
+ &c->root->keys,
+ bch_ptr_bad);
+ if (k)
+ p = k;
+ else {
+ atomic_set(&state->enough, 1);
+ /* Update state->enough earlier */
+ smp_mb__after_atomic();
+ goto out;
+ }
+ skip_nr--;
+ cond_resched();
+ }
+
+ if (p) {
+ if (bch_root_node_dirty_init(c, state->d, p) < 0)
+ goto out;
+ }
+
+ p = NULL;
+ prev_idx = cur_idx;
+ cond_resched();
+ }
+
+out:
+ /* In order to wake up state->wait in time */
+ smp_mb__before_atomic();
+ if (atomic_dec_and_test(&state->started))
+ wake_up(&state->wait);
+
+ return 0;
+}
+
+static int bch_btre_dirty_init_thread_nr(void)
+{
+ int n = num_online_cpus()/2;
+
+ if (n == 0)
+ n = 1;
+ else if (n > BCH_DIRTY_INIT_THRD_MAX)
+ n = BCH_DIRTY_INIT_THRD_MAX;
+
+ return n;
+}
+
+void bch_sectors_dirty_init(struct bcache_device *d)
+{
+ int i;
+ struct bkey *k = NULL;
+ struct btree_iter iter;
+ struct sectors_dirty_init op;
+ struct cache_set *c = d->c;
+ struct bch_dirty_init_state *state;
+ char name[32];
+
+ /* Just count root keys if no leaf node */
+ if (c->root->level == 0) {
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+ op.count = 0;
+ op.start = KEY(op.inode, 0, 0);
+
+ for_each_key_filter(&c->root->keys,
+ k, &iter, bch_ptr_invalid)
+ sectors_dirty_init_fn(&op.op, c->root, k);
+ return;
+ }
+
+ state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
+ if (!state) {
+ pr_warn("sectors dirty init failed: cannot allocate memory");
+ return;
+ }
+
+ state->c = c;
+ state->d = d;
+ state->total_threads = bch_btre_dirty_init_thread_nr();
+ state->key_idx = 0;
+ spin_lock_init(&state->idx_lock);
+ atomic_set(&state->started, 0);
+ atomic_set(&state->enough, 0);
+ init_waitqueue_head(&state->wait);
+
+ for (i = 0; i < state->total_threads; i++) {
+ /* Fetch latest state->enough earlier */
+ smp_mb__before_atomic();
+ if (atomic_read(&state->enough))
+ break;
+
+ state->infos[i].state = state;
+ atomic_inc(&state->started);
+ snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
+
+ state->infos[i].thread =
+ kthread_run(bch_dirty_init_thread,
+ &state->infos[i],
+ name);
+ if (IS_ERR(state->infos[i].thread)) {
+ pr_err("fails to run thread bch_dirty_init[%d]", i);
+ for (--i; i >= 0; i--)
+ kthread_stop(state->infos[i].thread);
+ goto out;
+ }
+ }
+
+ wait_event_interruptible(state->wait,
+ atomic_read(&state->started) == 0 ||
+ test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+
+out:
+ kfree(state);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 4e4c6810dc3c..b029843ce5b6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -16,6 +16,7 @@
#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
+#define BCH_DIRTY_INIT_THRD_MAX 64
/*
* 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up
@@ -23,6 +24,24 @@
*/
#define WRITEBACK_SHARE_SHIFT 14
+struct bch_dirty_init_state;
+struct dirty_init_thrd_info {
+ struct bch_dirty_init_state *state;
+ struct task_struct *thread;
+};
+
+struct bch_dirty_init_state {
+ struct cache_set *c;
+ struct bcache_device *d;
+ int total_threads;
+ int key_idx;
+ spinlock_t idx_lock;
+ atomic_t started;
+ atomic_t enough;
+ wait_queue_head_t wait;
+ struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX];
+};
+
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{
uint64_t i, ret = 0;
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index c05b12110456..17712456fa63 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -656,7 +656,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd,
return (bit >= (start + nr_regions));
}
-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
+unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
{
return bitmap_weight(cmd->region_map, cmd->nr_regions);
}
@@ -850,6 +850,12 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
struct dirty_map *dmap;
unsigned long word, flags;
+ if (unlikely(region_nr >= cmd->nr_regions)) {
+ DMERR("Region %lu out of range (total number of regions %lu)",
+ region_nr, cmd->nr_regions);
+ return -ERANGE;
+ }
+
word = region_nr / BITS_PER_LONG;
spin_lock_irqsave(&cmd->bitmap_lock, flags);
@@ -879,6 +885,13 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
struct dirty_map *dmap;
unsigned long word, region_nr;
+ if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start ||
+ (start + nr_regions) > cmd->nr_regions)) {
+ DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)",
+ start, nr_regions, cmd->nr_regions);
+ return -ERANGE;
+ }
+
spin_lock_irq(&cmd->bitmap_lock);
if (cmd->read_only) {
diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
index 14af1ebd853f..d848b8799c07 100644
--- a/drivers/md/dm-clone-metadata.h
+++ b/drivers/md/dm-clone-metadata.h
@@ -156,7 +156,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd,
/*
* Returns the number of hydrated regions.
*/
-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
+unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
/*
* Returns the first unhydrated region with region_nr >= @start
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index d1e1b5b56b1b..5ce96ddf1ce1 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -282,7 +282,7 @@ static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
/* Get the address of the region in sectors */
static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
{
- return (region_nr << clone->region_shift);
+ return ((sector_t)region_nr << clone->region_shift);
}
/* Get the region number of the bio */
@@ -293,10 +293,17 @@ static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
/* Get the region range covered by the bio */
static void bio_region_range(struct clone *clone, struct bio *bio,
- unsigned long *rs, unsigned long *re)
+ unsigned long *rs, unsigned long *nr_regions)
{
+ unsigned long end;
+
*rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
- *re = bio_end_sector(bio) >> clone->region_shift;
+ end = bio_end_sector(bio) >> clone->region_shift;
+
+ if (*rs >= end)
+ *nr_regions = 0;
+ else
+ *nr_regions = end - *rs;
}
/* Check whether a bio overwrites a region */
@@ -454,7 +461,7 @@ static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
{
- unsigned long rs, re;
+ unsigned long rs, nr_regions;
/*
* If the destination device supports discards, remap and trim the
@@ -463,9 +470,9 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
*/
if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
remap_to_dest(clone, bio);
- bio_region_range(clone, bio, &rs, &re);
- trim_bio(bio, rs << clone->region_shift,
- (re - rs) << clone->region_shift);
+ bio_region_range(clone, bio, &rs, &nr_regions);
+ trim_bio(bio, region_to_sector(clone, rs),
+ nr_regions << clone->region_shift);
generic_make_request(bio);
} else
bio_endio(bio);
@@ -473,12 +480,21 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
static void process_discard_bio(struct clone *clone, struct bio *bio)
{
- unsigned long rs, re;
+ unsigned long rs, nr_regions;
- bio_region_range(clone, bio, &rs, &re);
- BUG_ON(re > clone->nr_regions);
+ bio_region_range(clone, bio, &rs, &nr_regions);
+ if (!nr_regions) {
+ bio_endio(bio);
+ return;
+ }
- if (unlikely(rs == re)) {
+ if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
+ (rs + nr_regions) > clone->nr_regions)) {
+ DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
+ clone_device_name(clone), rs, nr_regions,
+ clone->nr_regions,
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio_sectors(bio));
bio_endio(bio);
return;
}
@@ -487,7 +503,7 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
* The covered regions are already hydrated so we just need to pass
* down the discard.
*/
- if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) {
+ if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
complete_discard_bio(clone, bio, true);
return;
}
@@ -788,11 +804,14 @@ static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr
struct dm_io_region from, to;
struct clone *clone = hd->clone;
+ if (WARN_ON(!nr_regions))
+ return;
+
region_size = clone->region_size;
region_start = hd->region_nr;
region_end = region_start + nr_regions - 1;
- total_size = (nr_regions - 1) << clone->region_shift;
+ total_size = region_to_sector(clone, nr_regions - 1);
if (region_end == clone->nr_regions - 1) {
/*
@@ -1169,7 +1188,7 @@ static void process_deferred_discards(struct clone *clone)
int r = -EPERM;
struct bio *bio;
struct blk_plug plug;
- unsigned long rs, re;
+ unsigned long rs, nr_regions;
struct bio_list discards = BIO_EMPTY_LIST;
spin_lock_irq(&clone->lock);
@@ -1185,14 +1204,13 @@ static void process_deferred_discards(struct clone *clone)
/* Update the metadata */
bio_list_for_each(bio, &discards) {
- bio_region_range(clone, bio, &rs, &re);
+ bio_region_range(clone, bio, &rs, &nr_regions);
/*
* A discard request might cover regions that have been already
* hydrated. There is no need to update the metadata for these
* regions.
*/
- r = dm_clone_cond_set_range(clone->cmd, rs, re - rs);
-
+ r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
if (unlikely(r))
break;
}
@@ -1455,7 +1473,7 @@ static void clone_status(struct dm_target *ti, status_type_t type,
goto error;
}
- DMEMIT("%u %llu/%llu %llu %lu/%lu %u ",
+ DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
DM_CLONE_METADATA_BLOCK_SIZE,
(unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
(unsigned long long)nr_metadata_blocks,
@@ -1775,6 +1793,7 @@ error:
static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
+ sector_t nr_regions;
struct clone *clone;
struct dm_arg_set as;
@@ -1816,7 +1835,16 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto out_with_source_dev;
clone->region_shift = __ffs(clone->region_size);
- clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size);
+ nr_regions = dm_sector_div_up(ti->len, clone->region_size);
+
+ /* Check for overflow */
+ if (nr_regions != (unsigned long)nr_regions) {
+ ti->error = "Too many regions. Consider increasing the region size";
+ r = -EOVERFLOW;
+ goto out_with_source_dev;
+ }
+
+ clone->nr_regions = nr_regions;
r = validate_nr_regions(clone->nr_regions, &ti->error);
if (r)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index c6a529873d0f..3df90daba89e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -230,6 +230,8 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg);
+static bool crypt_integrity_aead(struct crypt_config *cc);
+
/*
* Use this to access cipher attributes that are independent of the key.
*/
@@ -346,7 +348,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
unsigned bs;
int log;
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
+ if (crypt_integrity_aead(cc))
bs = crypto_aead_blocksize(any_tfm_aead(cc));
else
bs = crypto_skcipher_blocksize(any_tfm(cc));
@@ -712,7 +714,7 @@ static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) {
+ if (crypt_integrity_aead(cc)) {
ti->error = "AEAD transforms not supported for EBOIV";
return -EINVAL;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 2f03fecd312d..4094c47eca7f 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -39,6 +39,7 @@
#define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
+#define DISCARD_FILLER 0xf6
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -257,6 +258,7 @@ struct dm_integrity_c {
bool just_formatted;
bool recalculate_flag;
bool fix_padding;
+ bool discard;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -284,7 +286,7 @@ struct dm_integrity_io {
struct work_struct work;
struct dm_integrity_c *ic;
- bool write;
+ enum req_opf op;
bool fua;
struct dm_integrity_range range;
@@ -510,8 +512,8 @@ static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
- (unsigned long long)sector,
- (unsigned long long)n_sectors,
+ sector,
+ n_sectors,
ic->sb->log2_sectors_per_block,
ic->log2_blocks_per_bitmap_bit,
mode);
@@ -1299,6 +1301,11 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
unsigned *metadata_offset, unsigned total_size, int op)
{
+#define MAY_BE_FILLER 1
+#define MAY_BE_HASH 2
+ unsigned hash_offset = 0;
+ unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+
do {
unsigned char *data, *dp;
struct dm_buffer *b;
@@ -1320,18 +1327,35 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
} else if (op == TAG_WRITE) {
memcpy(dp, tag, to_copy);
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
- } else {
+ } else {
/* e.g.: op == TAG_CMP */
- if (unlikely(memcmp(dp, tag, to_copy))) {
- unsigned i;
- for (i = 0; i < to_copy; i++) {
- if (dp[i] != tag[i])
- break;
- total_size--;
+ if (likely(is_power_of_2(ic->tag_size))) {
+ if (unlikely(memcmp(dp, tag, to_copy)))
+ if (unlikely(!ic->discard) ||
+ unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
+ goto thorough_test;
+ }
+ } else {
+ unsigned i, ts;
+thorough_test:
+ ts = total_size;
+
+ for (i = 0; i < to_copy; i++, ts--) {
+ if (unlikely(dp[i] != tag[i]))
+ may_be &= ~MAY_BE_HASH;
+ if (likely(dp[i] != DISCARD_FILLER))
+ may_be &= ~MAY_BE_FILLER;
+ hash_offset++;
+ if (unlikely(hash_offset == ic->tag_size)) {
+ if (unlikely(!may_be)) {
+ dm_bufio_release(b);
+ return ts;
+ }
+ hash_offset = 0;
+ may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ }
}
- dm_bufio_release(b);
- return total_size;
}
}
dm_bufio_release(b);
@@ -1342,10 +1366,17 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
(*metadata_block)++;
*metadata_offset = 0;
}
+
+ if (unlikely(!is_power_of_2(ic->tag_size))) {
+ hash_offset = (hash_offset + to_copy) % ic->tag_size;
+ }
+
total_size -= to_copy;
} while (unlikely(total_size));
return 0;
+#undef MAY_BE_FILLER
+#undef MAY_BE_HASH
}
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
@@ -1428,7 +1459,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
remove_range(ic, &dio->range);
- if (unlikely(dio->write))
+ if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
schedule_autocommit(ic);
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
@@ -1519,15 +1550,20 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[HASH_MAX_DIGESTSIZE];
- unsigned sectors_to_process = dio->range.n_sectors;
- sector_t sector = dio->range.logical_sector;
+ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ sector_t sector;
+ unsigned sectors_to_process;
+ sector_t save_metadata_block;
+ unsigned save_metadata_offset;
if (unlikely(ic->mode == 'R'))
goto skip_io;
- checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
- GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
+ if (likely(dio->op != REQ_OP_DISCARD))
+ checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
+ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
+ else
+ checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
if (!checksums) {
checksums = checksums_onstack;
if (WARN_ON(extra_space &&
@@ -1537,6 +1573,43 @@ static void integrity_metadata(struct work_struct *w)
}
}
+ if (unlikely(dio->op == REQ_OP_DISCARD)) {
+ sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
+ unsigned bi_size = dio->bio_details.bi_iter.bi_size;
+ unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
+ unsigned max_blocks = max_size / ic->tag_size;
+ memset(checksums, DISCARD_FILLER, max_size);
+
+ while (bi_size) {
+ unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ this_step_blocks = min(this_step_blocks, max_blocks);
+ r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+ this_step_blocks * ic->tag_size, TAG_WRITE);
+ if (unlikely(r)) {
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
+ goto error;
+ }
+
+ /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
+ printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
+ printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
+ BUG();
+ }*/
+ bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
+ }
+
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
+ goto skip_io;
+ }
+
+ save_metadata_block = dio->metadata_block;
+ save_metadata_offset = dio->metadata_offset;
+ sector = dio->range.logical_sector;
+ sectors_to_process = dio->range.n_sectors;
+
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos;
char *mem, *checksums_ptr;
@@ -1555,11 +1628,12 @@ again:
kunmap_atomic(mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
- checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
+ checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- DMERR_LIMIT("Checksum failed at sector 0x%llx",
- (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
+ (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches);
}
@@ -1598,7 +1672,7 @@ again:
tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
this_len = min(biv.bv_len, data_to_process);
r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
- this_len, !dio->write ? TAG_READ : TAG_WRITE);
+ this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
if (unlikely(r))
goto error;
data_to_process -= this_len;
@@ -1625,6 +1699,20 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->ic = ic;
dio->bi_status = 0;
+ dio->op = bio_op(bio);
+
+ if (unlikely(dio->op == REQ_OP_DISCARD)) {
+ if (ti->max_io_len) {
+ sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
+ unsigned log2_max_io_len = __fls(ti->max_io_len);
+ sector_t start_boundary = sec >> log2_max_io_len;
+ sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
+ if (start_boundary < end_boundary) {
+ sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
+ dm_accept_partial_bio(bio, len);
+ }
+ }
+ }
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
submit_flush_bio(ic, dio);
@@ -1632,8 +1720,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
- dio->write = bio_op(bio) == REQ_OP_WRITE;
- dio->fua = dio->write && bio->bi_opf & REQ_FUA;
+ dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
if (unlikely(dio->fua)) {
/*
* Don't pass down the FUA flag because we have to flush
@@ -1643,18 +1730,18 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
- (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
- (unsigned long long)ic->provided_data_sectors);
+ dio->range.logical_sector, bio_sectors(bio),
+ ic->provided_data_sectors);
return DM_MAPIO_KILL;
}
if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
ic->sectors_per_block,
- (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
+ dio->range.logical_sector, bio_sectors(bio));
return DM_MAPIO_KILL;
}
- if (ic->sectors_per_block > 1) {
+ if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
struct bvec_iter iter;
struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) {
@@ -1687,7 +1774,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
}
- if (unlikely(ic->mode == 'R') && unlikely(dio->write))
+ if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
return DM_MAPIO_KILL;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
@@ -1717,13 +1804,13 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
retry_kmap:
mem = kmap_atomic(bv.bv_page);
- if (likely(dio->write))
+ if (likely(dio->op == REQ_OP_WRITE))
flush_dcache_page(bv.bv_page);
do {
struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
- if (unlikely(!dio->write)) {
+ if (unlikely(dio->op == REQ_OP_READ)) {
struct journal_sector *js;
char *mem_ptr;
unsigned s;
@@ -1748,12 +1835,12 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
- (unsigned long long)logical_sector);
+ logical_sector);
}
}
#endif
@@ -1770,7 +1857,7 @@ retry_kmap:
char *tag_addr;
BUG_ON(PageHighMem(biv.bv_page));
tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
- if (likely(dio->write))
+ if (likely(dio->op == REQ_OP_WRITE))
memcpy(tag_ptr, tag_addr, tag_now);
else
memcpy(tag_addr, tag_ptr, tag_now);
@@ -1778,12 +1865,12 @@ retry_kmap:
tag_ptr += tag_now;
tag_todo -= tag_now;
} while (unlikely(tag_todo)); else {
- if (likely(dio->write))
+ if (likely(dio->op == REQ_OP_WRITE))
memset(tag_ptr, 0, tag_todo);
}
}
- if (likely(dio->write)) {
+ if (likely(dio->op == REQ_OP_WRITE)) {
struct journal_sector *js;
unsigned s;
@@ -1819,12 +1906,12 @@ retry_kmap:
bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
- if (unlikely(!dio->write))
+ if (unlikely(dio->op == REQ_OP_READ))
flush_dcache_page(bv.bv_page);
kunmap_atomic(mem);
} while (n_sectors);
- if (likely(dio->write)) {
+ if (likely(dio->op == REQ_OP_WRITE)) {
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
@@ -1856,7 +1943,10 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
unsigned journal_section, journal_entry;
unsigned journal_read_pos;
struct completion read_comp;
- bool need_sync_io = ic->internal_hash && !dio->write;
+ bool discard_retried = false;
+ bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+ if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
+ need_sync_io = true;
if (need_sync_io && from_map) {
INIT_WORK(&dio->work, integrity_bio_wait);
@@ -1874,8 +1964,8 @@ retry:
}
dio->range.n_sectors = bio_sectors(bio);
journal_read_pos = NOT_FOUND;
- if (likely(ic->mode == 'J')) {
- if (dio->write) {
+ if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
+ if (dio->op == REQ_OP_WRITE) {
unsigned next_entry, i, pos;
unsigned ws, we, range_sectors;
@@ -1970,6 +2060,21 @@ offload_to_thread:
}
}
}
+ if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
+ sector_t next_sector;
+ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ if (unlikely(new_pos != NOT_FOUND) ||
+ unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
+ remove_range_unlocked(ic, &dio->range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+ queue_work(ic->commit_wq, &ic->commit_work);
+ flush_workqueue(ic->commit_wq);
+ queue_work(ic->writer_wq, &ic->writer_work);
+ flush_workqueue(ic->writer_wq);
+ discard_retried = true;
+ goto lock_retry;
+ }
+ }
spin_unlock_irq(&ic->endio_wait.lock);
if (unlikely(journal_read_pos != NOT_FOUND)) {
@@ -1978,7 +2083,7 @@ offload_to_thread:
goto journal_read_write;
}
- if (ic->mode == 'B' && dio->write) {
+ if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
struct bitmap_block_status *bbs;
@@ -2007,6 +2112,18 @@ offload_to_thread:
bio->bi_end_io = integrity_end_io;
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
+ if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
+ integrity_metadata(&dio->work);
+ dm_integrity_flush_buffers(ic);
+
+ dio->in_flight = (atomic_t)ATOMIC_INIT(1);
+ dio->completion = NULL;
+
+ generic_make_request(bio);
+
+ return;
+ }
+
generic_make_request(bio);
if (need_sync_io) {
@@ -2193,6 +2310,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
sec &= ~(sector_t)(ic->sectors_per_block - 1);
}
}
+ if (unlikely(sec >= ic->provided_data_sectors))
+ continue;
get_area_and_offset(ic, sec, &area, &offset);
restore_last_bytes(ic, access_journal_data(ic, i, j), je);
for (k = j + 1; k < ic->journal_section_entries; k++) {
@@ -2202,6 +2321,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
break;
BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
sec2 = journal_entry_get_sector(je2);
+ if (unlikely(sec2 >= ic->provided_data_sectors))
+ break;
get_area_and_offset(ic, sec2, &area2, &offset2);
if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
break;
@@ -2404,7 +2525,7 @@ next_chunk:
get_area_and_offset(ic, logical_sector, &area, &offset);
}
- DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
+ DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
recalc_write_super(ic);
@@ -2828,9 +2949,29 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
static void dm_integrity_resume(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
int r;
+
DEBUG_print("resume\n");
+ if (ic->provided_data_sectors != old_provided_data_sectors) {
+ if (ic->provided_data_sectors > old_provided_data_sectors &&
+ ic->mode == 'B' &&
+ ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
+ rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
+ ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ }
+
+ ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+ }
+
if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
DEBUG_print("resume dirty_bitmap\n");
rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
@@ -2898,7 +3039,7 @@ static void dm_integrity_resume(struct dm_target *ti)
DEBUG_print("testing recalc: %x\n", ic->sb->flags);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
- DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
+ DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
if (recalc_pos < ic->provided_data_sectors) {
queue_work(ic->recalc_wq, &ic->recalc_work);
} else if (recalc_pos > ic->provided_data_sectors) {
@@ -2929,9 +3070,9 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_INFO:
DMEMIT("%llu %llu",
(unsigned long long)atomic64_read(&ic->number_of_mismatches),
- (unsigned long long)ic->provided_data_sectors);
+ ic->provided_data_sectors);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
- DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
+ DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
else
DMEMIT(" -");
break;
@@ -2944,6 +3085,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
+ arg_count += ic->discard;
arg_count += ic->mode == 'J';
arg_count += ic->mode == 'J';
arg_count += ic->mode == 'B';
@@ -2952,7 +3094,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
- DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
+ DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
DMEMIT(" meta_device:%s", ic->meta_dev->name);
@@ -2960,6 +3102,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
DMEMIT(" recalculate");
+ if (ic->discard)
+ DMEMIT(" allow_discards");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
@@ -2968,7 +3112,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
if (ic->mode == 'B') {
- DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
+ DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
}
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
@@ -3073,6 +3217,24 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
return 0;
}
+static void get_provided_data_sectors(struct dm_integrity_c *ic)
+{
+ if (!ic->meta_dev) {
+ int test_bit;
+ ic->provided_data_sectors = 0;
+ for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
+ __u64 prev_data_sectors = ic->provided_data_sectors;
+
+ ic->provided_data_sectors |= (sector_t)1 << test_bit;
+ if (calculate_device_limits(ic))
+ ic->provided_data_sectors = prev_data_sectors;
+ }
+ } else {
+ ic->provided_data_sectors = ic->data_device_sectors;
+ ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
+ }
+}
+
static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
{
unsigned journal_sections;
@@ -3100,20 +3262,15 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
- ic->provided_data_sectors = 0;
- for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
- __u64 prev_data_sectors = ic->provided_data_sectors;
-
- ic->provided_data_sectors |= (sector_t)1 << test_bit;
- if (calculate_device_limits(ic))
- ic->provided_data_sectors = prev_data_sectors;
- }
+ get_provided_data_sectors(ic);
if (!ic->provided_data_sectors)
return -EINVAL;
} else {
ic->sb->log2_interleave_sectors = 0;
- ic->provided_data_sectors = ic->data_device_sectors;
- ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
+
+ get_provided_data_sectors(ic);
+ if (!ic->provided_data_sectors)
+ return -EINVAL;
try_smaller_buffer:
ic->sb->journal_sections = cpu_to_le32(0);
@@ -3733,6 +3890,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true;
+ } else if (!strcmp(opt_string, "allow_discards")) {
+ ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
ic->fix_padding = true;
} else {
@@ -3791,6 +3950,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ if (ic->discard && !ic->internal_hash) {
+ r = -EINVAL;
+ ti->error = "Discard can be only used with internal hash";
+ goto bad;
+ }
+
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
ic->autocommit_msec = sync_msec;
timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
@@ -3920,16 +4085,16 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
}
- ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
- if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
- /* test for overflow */
+ if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
r = -EINVAL;
- ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
+ ti->error = "Journal mac mismatch";
goto bad;
}
- if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
+
+ get_provided_data_sectors(ic);
+ if (!ic->provided_data_sectors) {
r = -EINVAL;
- ti->error = "Journal mac mismatch";
+ ti->error = "The device is too small";
goto bad;
}
@@ -3994,10 +4159,9 @@ try_smaller_buffer:
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
- DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
- (unsigned long long)ic->provided_data_sectors);
+ DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
- DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
+ DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
@@ -4121,6 +4285,8 @@ try_smaller_buffer:
ti->num_flush_bios = 1;
ti->flush_supported = true;
+ if (ic->discard)
+ ti->num_discard_bios = 1;
return 0;
@@ -4202,7 +4368,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 8d07fdf63a47..e1db43446327 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -201,10 +201,27 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
}
+static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ int ret;
+ struct linear_c *lc = ti->private;
+ struct block_device *bdev = lc->dev->bdev;
+ struct dax_device *dax_dev = lc->dev->dax_dev;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+ dev_sector = linear_map_sector(ti, sector);
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
+ if (ret)
+ return ret;
+ return dax_zero_page_range(dax_dev, pgoff, nr_pages);
+}
+
#else
#define linear_dax_direct_access NULL
#define linear_dax_copy_from_iter NULL
#define linear_dax_copy_to_iter NULL
+#define linear_dax_zero_page_range NULL
#endif
static struct target_type linear_target = {
@@ -226,6 +243,7 @@ static struct target_type linear_target = {
.direct_access = linear_dax_direct_access,
.dax_copy_from_iter = linear_dax_copy_from_iter,
.dax_copy_to_iter = linear_dax_copy_to_iter,
+ .dax_zero_page_range = linear_dax_zero_page_range,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 99721c76225d..8ea20b56b4d6 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -994,10 +994,26 @@ static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
}
+static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ int ret;
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+
+ ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT,
+ &pgoff);
+ if (ret)
+ return ret;
+ return dax_zero_page_range(lc->dev->dax_dev, pgoff,
+ nr_pages << PAGE_SHIFT);
+}
+
#else
#define log_writes_dax_direct_access NULL
#define log_writes_dax_copy_from_iter NULL
#define log_writes_dax_copy_to_iter NULL
+#define log_writes_dax_zero_page_range NULL
#endif
static struct target_type log_writes_target = {
@@ -1016,6 +1032,7 @@ static struct target_type log_writes_target = {
.direct_access = log_writes_dax_direct_access,
.dax_copy_from_iter = log_writes_dax_copy_from_iter,
.dax_copy_to_iter = log_writes_dax_copy_to_iter,
+ .dax_zero_page_range = log_writes_dax_zero_page_range,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 58fd137b6ae1..3e500098132f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -585,10 +585,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
- queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
- if (!pgpath || !queue_io)
+ if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+ /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
+ queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
+
if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
/* Queue for the daemon to resubmit */
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 63bbcc20f49a..fa813c0f993d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -360,10 +360,32 @@ static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
}
+static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ int ret;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+ struct stripe_c *sc = ti->private;
+ struct dax_device *dax_dev;
+ struct block_device *bdev;
+ uint32_t stripe;
+
+ stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ dev_sector += sc->stripe[stripe].physical_start;
+ dax_dev = sc->stripe[stripe].dev->dax_dev;
+ bdev = sc->stripe[stripe].dev->bdev;
+
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
+ if (ret)
+ return ret;
+ return dax_zero_page_range(dax_dev, pgoff, nr_pages);
+}
+
#else
#define stripe_dax_direct_access NULL
#define stripe_dax_copy_from_iter NULL
#define stripe_dax_copy_to_iter NULL
+#define stripe_dax_zero_page_range NULL
#endif
/*
@@ -486,6 +508,7 @@ static struct target_type stripe_target = {
.direct_access = stripe_dax_direct_access,
.dax_copy_from_iter = stripe_dax_copy_from_iter,
.dax_copy_to_iter = stripe_dax_copy_to_iter,
+ .dax_zero_page_range = stripe_dax_zero_page_range,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 3ceeb6b404ed..fb41b4f23c48 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
fio->level++;
if (type == DM_VERITY_BLOCK_TYPE_METADATA)
- block += v->data_blocks;
+ block = block - v->hash_start + v->data_blocks;
/*
* For RS(M, N), the continuous FEC data is divided into blocks of N
@@ -551,6 +551,7 @@ void verity_fec_dtr(struct dm_verity *v)
mempool_exit(&f->rs_pool);
mempool_exit(&f->prealloc_pool);
mempool_exit(&f->extra_pool);
+ mempool_exit(&f->output_pool);
kmem_cache_destroy(f->cache);
if (f->data_bufio)
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index a09bdc000e64..613c171b1b6d 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -26,6 +26,8 @@
#define AUTOCOMMIT_BLOCKS_SSD 65536
#define AUTOCOMMIT_BLOCKS_PMEM 64
#define AUTOCOMMIT_MSEC 1000
+#define MAX_AGE_DIV 16
+#define MAX_AGE_UNSPECIFIED -1UL
#define BITMAP_GRANULARITY 65536
#if BITMAP_GRANULARITY < PAGE_SIZE
@@ -88,6 +90,7 @@ struct wc_entry {
:47
#endif
;
+ unsigned long age;
#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
uint64_t original_sector;
uint64_t seq_count;
@@ -119,6 +122,7 @@ struct dm_writecache {
size_t writeback_size;
size_t freelist_high_watermark;
size_t freelist_low_watermark;
+ unsigned long max_age;
unsigned uncommitted_blocks;
unsigned autocommit_blocks;
@@ -130,6 +134,8 @@ struct dm_writecache {
struct timer_list autocommit_timer;
struct wait_queue_head freelist_wait;
+ struct timer_list max_age_timer;
+
atomic_t bio_in_progress[2];
struct wait_queue_head bio_in_progress_wait[2];
@@ -160,6 +166,7 @@ struct dm_writecache {
bool autocommit_time_set:1;
bool writeback_fua_set:1;
bool flush_on_suspend:1;
+ bool cleaner:1;
unsigned writeback_all;
struct workqueue_struct *writeback_wq;
@@ -502,6 +509,34 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
}
+static void ssd_commit_superblock(struct dm_writecache *wc)
+{
+ int r;
+ struct dm_io_region region;
+ struct dm_io_request req;
+
+ region.bdev = wc->ssd_dev->bdev;
+ region.sector = 0;
+ region.count = PAGE_SIZE;
+
+ if (unlikely(region.sector + region.count > wc->metadata_sectors))
+ region.count = wc->metadata_sectors - region.sector;
+
+ region.sector += wc->start_sector;
+
+ req.bi_op = REQ_OP_WRITE;
+ req.bi_op_flags = REQ_SYNC | REQ_FUA;
+ req.mem.type = DM_IO_VMA;
+ req.mem.ptr.vma = (char *)wc->memory_map;
+ req.client = wc->dm_io;
+ req.notify.fn = NULL;
+ req.notify.context = NULL;
+
+ r = dm_io(&req, 1, &region, NULL);
+ if (unlikely(r))
+ writecache_error(wc, r, "error writing superblock");
+}
+
static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
if (WC_MODE_PMEM(wc))
@@ -596,6 +631,7 @@ static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *i
rb_link_node(&ins->rb_node, parent, node);
rb_insert_color(&ins->rb_node, &wc->tree);
list_add(&ins->lru, &wc->lru);
+ ins->age = jiffies;
}
static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
@@ -631,6 +667,16 @@ static inline void writecache_verify_watermark(struct dm_writecache *wc)
queue_work(wc->writeback_wq, &wc->writeback_work);
}
+static void writecache_max_age_timer(struct timer_list *t)
+{
+ struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
+
+ if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
+ queue_work(wc->writeback_wq, &wc->writeback_work);
+ mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
+ }
+}
+
static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
{
struct wc_entry *e;
@@ -741,8 +787,10 @@ static void writecache_flush(struct dm_writecache *wc)
wc->seq_count++;
pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
- writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
- writecache_commit_flushed(wc, false);
+ if (WC_MODE_PMEM(wc))
+ writecache_commit_flushed(wc, false);
+ else
+ ssd_commit_superblock(wc);
wc->overwrote_committed = false;
@@ -837,6 +885,7 @@ static void writecache_suspend(struct dm_target *ti)
bool flush_on_suspend;
del_timer_sync(&wc->autocommit_timer);
+ del_timer_sync(&wc->max_age_timer);
wc_lock(wc);
writecache_flush(wc);
@@ -876,11 +925,30 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
struct wc_entry *e = &wc->entries[b];
e->index = b;
e->write_in_progress = false;
+ cond_resched();
}
return 0;
}
+static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
+{
+ struct dm_io_region region;
+ struct dm_io_request req;
+
+ region.bdev = wc->ssd_dev->bdev;
+ region.sector = wc->start_sector;
+ region.count = n_sectors;
+ req.bi_op = REQ_OP_READ;
+ req.bi_op_flags = REQ_SYNC;
+ req.mem.type = DM_IO_VMA;
+ req.mem.ptr.vma = (char *)wc->memory_map;
+ req.client = wc->dm_io;
+ req.notify.fn = NULL;
+
+ return dm_io(&req, 1, &region, NULL);
+}
+
static void writecache_resume(struct dm_target *ti)
{
struct dm_writecache *wc = ti->private;
@@ -891,8 +959,18 @@ static void writecache_resume(struct dm_target *ti)
wc_lock(wc);
- if (WC_MODE_PMEM(wc))
+ if (WC_MODE_PMEM(wc)) {
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+ } else {
+ r = writecache_read_metadata(wc, wc->metadata_sectors);
+ if (r) {
+ size_t sb_entries_offset;
+ writecache_error(wc, r, "unable to read metadata: %d", r);
+ sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
+ memset((char *)wc->memory_map + sb_entries_offset, -1,
+ (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
+ }
+ }
wc->tree = RB_ROOT;
INIT_LIST_HEAD(&wc->lru);
@@ -930,6 +1008,7 @@ static void writecache_resume(struct dm_target *ti)
e->original_sector = le64_to_cpu(wme.original_sector);
e->seq_count = le64_to_cpu(wme.seq_count);
}
+ cond_resched();
}
#endif
for (b = 0; b < wc->n_blocks; b++) {
@@ -973,6 +1052,9 @@ erase_this:
writecache_verify_watermark(wc);
+ if (wc->max_age != MAX_AGE_UNSPECIFIED)
+ mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
+
wc_unlock(wc);
}
@@ -1021,6 +1103,28 @@ static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_w
return 0;
}
+static void activate_cleaner(struct dm_writecache *wc)
+{
+ wc->flush_on_suspend = true;
+ wc->cleaner = true;
+ wc->freelist_high_watermark = wc->n_blocks;
+ wc->freelist_low_watermark = wc->n_blocks;
+}
+
+static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+{
+ if (argc != 1)
+ return -EINVAL;
+
+ wc_lock(wc);
+ activate_cleaner(wc);
+ if (!dm_suspended(wc->ti))
+ writecache_verify_watermark(wc);
+ wc_unlock(wc);
+
+ return 0;
+}
+
static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
{
@@ -1031,6 +1135,8 @@ static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
r = process_flush_mesg(argc, argv, wc);
else if (!strcasecmp(argv[0], "flush_on_suspend"))
r = process_flush_on_suspend_mesg(argc, argv, wc);
+ else if (!strcasecmp(argv[0], "cleaner"))
+ r = process_cleaner_mesg(argc, argv, wc);
else
DMERR("unrecognised message received: %s", argv[0]);
@@ -1194,6 +1300,7 @@ read_next_block:
}
} else {
do {
+ bool found_entry = false;
if (writecache_has_error(wc))
goto unlock_error;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
@@ -1204,9 +1311,25 @@ read_next_block:
wc->overwrote_committed = true;
goto bio_copy;
}
+ found_entry = true;
+ } else {
+ if (unlikely(wc->cleaner))
+ goto direct_write;
}
e = writecache_pop_from_freelist(wc, (sector_t)-1);
if (unlikely(!e)) {
+ if (!found_entry) {
+direct_write:
+ e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
+ if (e) {
+ sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
+ BUG_ON(!next_boundary);
+ if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
+ dm_accept_partial_bio(bio, next_boundary);
+ }
+ }
+ goto unlock_remap_origin;
+ }
writecache_wait_on_freelist(wc);
continue;
}
@@ -1619,7 +1742,9 @@ restart:
wbl.size = 0;
while (!list_empty(&wc->lru) &&
(wc->writeback_all ||
- wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
+ wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
+ (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
+ wc->max_age - wc->max_age / MAX_AGE_DIV))) {
n_walked++;
if (unlikely(n_walked > WRITEBACK_LATENCY) &&
@@ -1791,8 +1916,10 @@ static int init_memory(struct dm_writecache *wc)
pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
- for (b = 0; b < wc->n_blocks; b++)
+ for (b = 0; b < wc->n_blocks; b++) {
write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
+ cond_resched();
+ }
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc, false);
@@ -1882,9 +2009,11 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
wc->ti = ti;
mutex_init(&wc->lock);
+ wc->max_age = MAX_AGE_UNSPECIFIED;
writecache_poison_lists(wc);
init_waitqueue_head(&wc->freelist_wait);
timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
+ timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
for (i = 0; i < 2; i++) {
atomic_set(&wc->bio_in_progress[i], 0);
@@ -2001,6 +2130,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Invalid block size";
goto bad;
}
+ if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
+ wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
+ r = -EINVAL;
+ ti->error = "Block size is smaller than device logical block size";
+ goto bad;
+ }
wc->block_size_bits = __ffs(wc->block_size);
wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@@ -2058,6 +2193,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto invalid_optional;
wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
wc->autocommit_time_set = true;
+ } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
+ unsigned max_age_msecs;
+ string = dm_shift_arg(&as), opt_params--;
+ if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
+ goto invalid_optional;
+ if (max_age_msecs > 86400000)
+ goto invalid_optional;
+ wc->max_age = msecs_to_jiffies(max_age_msecs);
+ } else if (!strcasecmp(string, "cleaner")) {
+ wc->cleaner = true;
} else if (!strcasecmp(string, "fua")) {
if (WC_MODE_PMEM(wc)) {
wc->writeback_fua = true;
@@ -2089,8 +2234,6 @@ invalid_optional:
goto bad;
}
} else {
- struct dm_io_region region;
- struct dm_io_request req;
size_t n_blocks, n_metadata_blocks;
uint64_t n_bitmap_bits;
@@ -2147,19 +2290,9 @@ invalid_optional:
goto bad;
}
- region.bdev = wc->ssd_dev->bdev;
- region.sector = wc->start_sector;
- region.count = wc->metadata_sectors;
- req.bi_op = REQ_OP_READ;
- req.bi_op_flags = REQ_SYNC;
- req.mem.type = DM_IO_VMA;
- req.mem.ptr.vma = (char *)wc->memory_map;
- req.client = wc->dm_io;
- req.notify.fn = NULL;
-
- r = dm_io(&req, 1, &region, NULL);
+ r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
if (r) {
- ti->error = "Unable to read metadata";
+ ti->error = "Unable to read first block of metadata";
goto bad;
}
}
@@ -2235,6 +2368,9 @@ overflow:
do_div(x, 100);
wc->freelist_low_watermark = x;
+ if (wc->cleaner)
+ activate_cleaner(wc);
+
r = writecache_alloc_entries(wc);
if (r) {
ti->error = "Cannot allocate memory";
@@ -2278,9 +2414,9 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
extra_args = 0;
if (wc->start_sector)
extra_args += 2;
- if (wc->high_wm_percent_set)
+ if (wc->high_wm_percent_set && !wc->cleaner)
extra_args += 2;
- if (wc->low_wm_percent_set)
+ if (wc->low_wm_percent_set && !wc->cleaner)
extra_args += 2;
if (wc->max_writeback_jobs_set)
extra_args += 2;
@@ -2288,19 +2424,21 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
extra_args += 2;
if (wc->autocommit_time_set)
extra_args += 2;
+ if (wc->cleaner)
+ extra_args++;
if (wc->writeback_fua_set)
extra_args++;
DMEMIT("%u", extra_args);
if (wc->start_sector)
DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
- if (wc->high_wm_percent_set) {
+ if (wc->high_wm_percent_set && !wc->cleaner) {
x = (uint64_t)wc->freelist_high_watermark * 100;
x += wc->n_blocks / 2;
do_div(x, (size_t)wc->n_blocks);
DMEMIT(" high_watermark %u", 100 - (unsigned)x);
}
- if (wc->low_wm_percent_set) {
+ if (wc->low_wm_percent_set && !wc->cleaner) {
x = (uint64_t)wc->freelist_low_watermark * 100;
x += wc->n_blocks / 2;
do_div(x, (size_t)wc->n_blocks);
@@ -2312,6 +2450,10 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
if (wc->autocommit_time_set)
DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
+ if (wc->max_age != MAX_AGE_UNSPECIFIED)
+ DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
+ if (wc->cleaner)
+ DMEMIT(" cleaner");
if (wc->writeback_fua_set)
DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
break;
@@ -2320,7 +2462,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
static struct target_type writecache_target = {
.name = "writecache",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = writecache_ctr,
.dtr = writecache_dtr,
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 516c7b671d25..369de15c4e80 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1109,7 +1109,6 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
switch (blkz->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
- zmd->nr_rnd_zones++;
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0413018c8305..db9e46114653 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -25,6 +25,7 @@
#include <linux/wait.h>
#include <linux/pr.h>
#include <linux/refcount.h>
+#include <linux/part_stat.h>
#define DM_MSG_PREFIX "core"
@@ -1198,6 +1199,35 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
+static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ int ret = -EIO;
+ int srcu_idx;
+
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+
+ if (!ti)
+ goto out;
+ if (WARN_ON(!ti->type->dax_zero_page_range)) {
+ /*
+ * ->zero_page_range() is mandatory dax operation. If we are
+ * here, something is wrong.
+ */
+ dm_put_live_table(md, srcu_idx);
+ goto out;
+ }
+ ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
+
+ out:
+ dm_put_live_table(md, srcu_idx);
+
+ return ret;
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
@@ -1739,8 +1769,9 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
* won't be imposed.
*/
if (current->bio_list) {
- blk_queue_split(md->queue, &bio);
- if (!is_abnormal_io(bio))
+ if (is_abnormal_io(bio))
+ blk_queue_split(md->queue, &bio);
+ else
dm_queue_split(md, ti, &bio);
}
@@ -1938,16 +1969,15 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
- if (!md->queue)
- goto bad;
- md->queue->queuedata = md;
/*
* default to bio-based required ->make_request_fn until DM
* table is loaded and md->type established. If request-based
* table is loaded: blk-mq will override accordingly.
*/
- blk_queue_make_request(md->queue, dm_make_request);
+ md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
+ if (!md->queue)
+ goto bad;
+ md->queue->queuedata = md;
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
@@ -1968,7 +1998,7 @@ static struct mapped_device *alloc_dev(int minor)
if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
md->dax_dev = alloc_dax(md, md->disk->disk_name,
&dm_dax_ops, 0);
- if (!md->dax_dev)
+ if (IS_ERR(md->dax_dev))
goto bad;
}
@@ -3199,6 +3229,7 @@ static const struct dax_operations dm_dax_ops = {
.dax_supported = dm_dax_supported,
.copy_from_iter = dm_dax_copy_from_iter,
.copy_to_iter = dm_dax_copy_to_iter,
+ .zero_page_range = dm_dax_zero_page_range,
};
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 469f551863be..271e8a587354 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -58,8 +58,10 @@
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
+#include <linux/raid/detect.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
+#include <linux/part_stat.h>
#include <trace/events/block.h>
#include "md.h"
@@ -2491,12 +2493,12 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
- char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
- pr_warn("md: could not open %s.\n", __bdevname(dev, b));
+ pr_warn("md: could not open device unknown-block(%u,%u).\n",
+ MAJOR(dev), MINOR(dev));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
@@ -5621,12 +5623,11 @@ static int md_alloc(dev_t dev, char *name)
mddev->hold_active = UNTIL_STOP;
error = -ENOMEM;
- mddev->queue = blk_alloc_queue(GFP_KERNEL);
+ mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
mddev->queue->queuedata = mddev;
- blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
@@ -6184,7 +6185,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
md_bitmap_wait_behind_writes(mddev);
- if (mddev->pers && mddev->pers->quiesce) {
+ if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b36a41332867..9dfea5c4b6ab 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -208,9 +208,9 @@ config MEDIA_SUBDRV_AUTOSELECT
If unsure say Y.
config MEDIA_HIDE_ANCILLARY_SUBDRV
- bool
- depends on MEDIA_SUBDRV_AUTOSELECT && !COMPILE_TEST && !EXPERT
- default y
+ bool
+ depends on MEDIA_SUBDRV_AUTOSELECT && !COMPILE_TEST && !EXPERT
+ default y
config MEDIA_ATTACH
bool
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 4a841bee5cc2..e748cd54b45d 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -23,7 +23,7 @@ struct cec_notifier {
struct kref kref;
struct device *hdmi_dev;
struct cec_connector_info conn_info;
- const char *conn_name;
+ const char *port_name;
struct cec_adapter *cec_adap;
u16 phys_addr;
@@ -32,16 +32,30 @@ struct cec_notifier {
static LIST_HEAD(cec_notifiers);
static DEFINE_MUTEX(cec_notifiers_lock);
-struct cec_notifier *
-cec_notifier_get_conn(struct device *hdmi_dev, const char *conn_name)
+/**
+ * cec_notifier_get_conn - find or create a new cec_notifier for the given
+ * device and connector tuple.
+ * @hdmi_dev: device that sends the events.
+ * @port_name: the connector name from which the event occurs
+ *
+ * If a notifier for device @dev already exists, then increase the refcount
+ * and return that notifier.
+ *
+ * If it doesn't exist, then allocate a new notifier struct and return a
+ * pointer to that new struct.
+ *
+ * Return NULL if the memory could not be allocated.
+ */
+static struct cec_notifier *
+cec_notifier_get_conn(struct device *hdmi_dev, const char *port_name)
{
struct cec_notifier *n;
mutex_lock(&cec_notifiers_lock);
list_for_each_entry(n, &cec_notifiers, head) {
if (n->hdmi_dev == hdmi_dev &&
- (!conn_name ||
- (n->conn_name && !strcmp(n->conn_name, conn_name)))) {
+ (!port_name ||
+ (n->port_name && !strcmp(n->port_name, port_name)))) {
kref_get(&n->kref);
mutex_unlock(&cec_notifiers_lock);
return n;
@@ -51,9 +65,9 @@ cec_notifier_get_conn(struct device *hdmi_dev, const char *conn_name)
if (!n)
goto unlock;
n->hdmi_dev = hdmi_dev;
- if (conn_name) {
- n->conn_name = kstrdup(conn_name, GFP_KERNEL);
- if (!n->conn_name) {
+ if (port_name) {
+ n->port_name = kstrdup(port_name, GFP_KERNEL);
+ if (!n->port_name) {
kfree(n);
n = NULL;
goto unlock;
@@ -68,7 +82,6 @@ unlock:
mutex_unlock(&cec_notifiers_lock);
return n;
}
-EXPORT_SYMBOL_GPL(cec_notifier_get_conn);
static void cec_notifier_release(struct kref *kref)
{
@@ -76,7 +89,7 @@ static void cec_notifier_release(struct kref *kref)
container_of(kref, struct cec_notifier, kref);
list_del(&n->head);
- kfree(n->conn_name);
+ kfree(n->port_name);
kfree(n);
}
@@ -88,10 +101,10 @@ static void cec_notifier_put(struct cec_notifier *n)
}
struct cec_notifier *
-cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name,
const struct cec_connector_info *conn_info)
{
- struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, conn_name);
+ struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, port_name);
if (!n)
return n;
@@ -129,7 +142,7 @@ void cec_notifier_conn_unregister(struct cec_notifier *n)
EXPORT_SYMBOL_GPL(cec_notifier_conn_unregister);
struct cec_notifier *
-cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name,
struct cec_adapter *adap)
{
struct cec_notifier *n;
@@ -137,7 +150,7 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
if (WARN_ON(!adap))
return NULL;
- n = cec_notifier_get_conn(hdmi_dev, conn_name);
+ n = cec_notifier_get_conn(hdmi_dev, port_name);
if (!n)
return n;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index aabb830e7468..d6531874faa6 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -97,8 +97,6 @@ void saa7146_buffer_finish(struct saa7146_dev *dev,
DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state);
DEB_EE("q->curr:%p\n", q->curr);
- BUG_ON(!q->curr);
-
/* finish current buffer */
if (NULL == q->curr) {
DEB_D("aiii. no current buffer\n");
@@ -296,7 +294,7 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
int res;
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER: {
+ case VFL_TYPE_VIDEO: {
DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",
file, vma);
q = &fh->video_q;
@@ -378,7 +376,7 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof
int ret;
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
/*
DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun",
file, data, (unsigned long)count);
@@ -409,7 +407,7 @@ static ssize_t fops_write(struct file *file, const char __user *data, size_t cou
int ret;
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
return -EINVAL;
case VFL_TYPE_VBI:
if (fh->dev->ext_vv_data->vbi_fops.write) {
@@ -597,7 +595,7 @@ int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev,
DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type);
vfd->fops = &video_fops;
- if (type == VFL_TYPE_GRABBER)
+ if (type == VFL_TYPE_VIDEO)
vfd->ioctl_ops = &dev->ext_vv_data->vid_ops;
else
vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops;
@@ -611,7 +609,7 @@ int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev,
vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
vfd->device_caps |= dev->ext_vv_data->capabilities;
- if (type == VFL_TYPE_GRABBER)
+ if (type == VFL_TYPE_VIDEO)
vfd->device_caps &=
~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT);
else
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index c95d4583498e..8916bb644756 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -45,88 +45,88 @@ static void smsdvb_print_dvb_stats(struct smsdvb_debugfs *debug_data,
buf = debug_data->stats_data;
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_rf_locked = %d\n", p->is_rf_locked);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_demod_locked = %d\n", p->is_demod_locked);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_external_lna_on = %d\n", p->is_external_lna_on);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"SNR = %d\n", p->SNR);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"ber = %d\n", p->ber);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"FIB_CRC = %d\n", p->FIB_CRC);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"ts_per = %d\n", p->ts_per);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"MFER = %d\n", p->MFER);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"RSSI = %d\n", p->RSSI);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"in_band_pwr = %d\n", p->in_band_pwr);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"carrier_offset = %d\n", p->carrier_offset);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"modem_state = %d\n", p->modem_state);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"frequency = %d\n", p->frequency);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"bandwidth = %d\n", p->bandwidth);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"transmission_mode = %d\n", p->transmission_mode);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"modem_state = %d\n", p->modem_state);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"guard_interval = %d\n", p->guard_interval);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"code_rate = %d\n", p->code_rate);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"lp_code_rate = %d\n", p->lp_code_rate);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"hierarchy = %d\n", p->hierarchy);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"constellation = %d\n", p->constellation);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"burst_size = %d\n", p->burst_size);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"burst_duration = %d\n", p->burst_duration);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"burst_cycle_time = %d\n", p->burst_cycle_time);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"calc_burst_cycle_time = %d\n",
p->calc_burst_cycle_time);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_rows = %d\n", p->num_of_rows);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_padd_cols = %d\n", p->num_of_padd_cols);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_punct_cols = %d\n", p->num_of_punct_cols);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"error_ts_packets = %d\n", p->error_ts_packets);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"total_ts_packets = %d\n", p->total_ts_packets);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_valid_mpe_tlbs = %d\n", p->num_of_valid_mpe_tlbs);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_invalid_mpe_tlbs = %d\n", p->num_of_invalid_mpe_tlbs);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_corrected_mpe_tlbs = %d\n", p->num_of_corrected_mpe_tlbs);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"ber_error_count = %d\n", p->ber_error_count);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"ber_bit_count = %d\n", p->ber_bit_count);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"pre_ber = %d\n", p->pre_ber);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"cell_id = %d\n", p->cell_id);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"dvbh_srv_ind_hp = %d\n", p->dvbh_srv_ind_hp);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"dvbh_srv_ind_lp = %d\n", p->dvbh_srv_ind_lp);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_mpe_received = %d\n", p->num_mpe_received);
debug_data->stats_count = n;
@@ -148,42 +148,42 @@ static void smsdvb_print_isdb_stats(struct smsdvb_debugfs *debug_data,
buf = debug_data->stats_data;
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"statistics_type = %d\t", p->statistics_type);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"full_size = %d\n", p->full_size);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_rf_locked = %d\t\t", p->is_rf_locked);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_demod_locked = %d\t", p->is_demod_locked);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_external_lna_on = %d\n", p->is_external_lna_on);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"SNR = %d dB\t\t", p->SNR);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"RSSI = %d dBm\t\t", p->RSSI);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"in_band_pwr = %d dBm\n", p->in_band_pwr);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"carrier_offset = %d\t", p->carrier_offset);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"bandwidth = %d\t\t", p->bandwidth);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"frequency = %d Hz\n", p->frequency);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"transmission_mode = %d\t", p->transmission_mode);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"modem_state = %d\t\t", p->modem_state);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"guard_interval = %d\n", p->guard_interval);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"system_type = %d\t\t", p->system_type);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"partial_reception = %d\t", p->partial_reception);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_layers = %d\n", p->num_of_layers);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
for (i = 0; i < 3; i++) {
@@ -191,31 +191,34 @@ static void smsdvb_print_isdb_stats(struct smsdvb_debugfs *debug_data,
p->layer_info[i].number_of_segments > 13)
continue;
- n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i);
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t",
p->layer_info[i].code_rate);
- n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n",
p->layer_info[i].constellation);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t",
p->layer_info[i].ber);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
+ "\tber_error_count = %-5d\t",
p->layer_info[i].ber_error_count);
- n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n",
p->layer_info[i].ber_bit_count);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t",
p->layer_info[i].pre_ber);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n",
p->layer_info[i].ts_per);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
+ "\terror_ts_packets = %-5d\t",
p->layer_info[i].error_ts_packets);
- n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
+ "total_ts_packets = %-5d\t",
p->layer_info[i].total_ts_packets);
- n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n",
p->layer_info[i].ti_ldepth_i);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"\tnumber_of_segments = %d\t",
p->layer_info[i].number_of_segments);
- n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n",
p->layer_info[i].tmcc_errors);
}
@@ -238,44 +241,44 @@ static void smsdvb_print_isdb_stats_ex(struct smsdvb_debugfs *debug_data,
buf = debug_data->stats_data;
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"statistics_type = %d\t", p->statistics_type);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"full_size = %d\n", p->full_size);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_rf_locked = %d\t\t", p->is_rf_locked);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_demod_locked = %d\t", p->is_demod_locked);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"is_external_lna_on = %d\n", p->is_external_lna_on);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"SNR = %d dB\t\t", p->SNR);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"RSSI = %d dBm\t\t", p->RSSI);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"in_band_pwr = %d dBm\n", p->in_band_pwr);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"carrier_offset = %d\t", p->carrier_offset);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"bandwidth = %d\t\t", p->bandwidth);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"frequency = %d Hz\n", p->frequency);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"transmission_mode = %d\t", p->transmission_mode);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"modem_state = %d\t\t", p->modem_state);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"guard_interval = %d\n", p->guard_interval);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"system_type = %d\t\t", p->system_type);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"partial_reception = %d\t", p->partial_reception);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"num_of_layers = %d\n", p->num_of_layers);
- n += snprintf(&buf[n], PAGE_SIZE - n, "segment_number = %d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "segment_number = %d\t",
p->segment_number);
- n += snprintf(&buf[n], PAGE_SIZE - n, "tune_bw = %d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "tune_bw = %d\n",
p->tune_bw);
for (i = 0; i < 3; i++) {
@@ -283,31 +286,34 @@ static void smsdvb_print_isdb_stats_ex(struct smsdvb_debugfs *debug_data,
p->layer_info[i].number_of_segments > 13)
continue;
- n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i);
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t",
p->layer_info[i].code_rate);
- n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n",
p->layer_info[i].constellation);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t",
p->layer_info[i].ber);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
+ "\tber_error_count = %-5d\t",
p->layer_info[i].ber_error_count);
- n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n",
p->layer_info[i].ber_bit_count);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t",
p->layer_info[i].pre_ber);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n",
p->layer_info[i].ts_per);
- n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
+ "\terror_ts_packets = %-5d\t",
p->layer_info[i].error_ts_packets);
- n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t",
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
+ "total_ts_packets = %-5d\t",
p->layer_info[i].total_ts_packets);
- n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n",
p->layer_info[i].ti_ldepth_i);
- n += snprintf(&buf[n], PAGE_SIZE - n,
+ n += scnprintf(&buf[n], PAGE_SIZE - n,
"\tnumber_of_segments = %d\t",
p->layer_info[i].number_of_segments);
- n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n",
+ n += scnprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n",
p->layer_info[i].tmcc_errors);
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 4489744fbbd9..44d65f5be845 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -393,8 +393,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
}
}
- dprintk(1, "allocated %d buffers, %d plane(s) each\n",
- buffer, num_planes);
+ dprintk(3, "allocated %d buffers, %d plane(s) each\n",
+ buffer, num_planes);
return buffer;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index d0c9dffe49e5..d3a3ee5b597b 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -593,8 +593,8 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
/* checking if dmabuf is big enough to store contiguous chunk */
contig_size = vb2_dc_get_contiguous_size(sgt);
if (contig_size < buf->size) {
- pr_err("contiguous chunk is too small %lu/%lu b\n",
- contig_size, buf->size);
+ pr_err("contiguous chunk is too small %lu/%lu\n",
+ contig_size, buf->size);
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
return -EFAULT;
}
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index ac7be872f460..5de016412c42 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -2182,7 +2182,7 @@ int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr,
u32 *data, u32 flags)
{
u8 buf[sizeof(*data)] = { 0 };
- int rc = -EIO;
+ int rc;
u32 word = 0;
if (!data)
@@ -4229,7 +4229,7 @@ int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr,
u16 data, u32 flags)
{
u8 buf[2];
- int rc = -EIO;
+ int rc;
buf[0] = (u8) (data & 0xff);
buf[1] = (u8) ((data >> 8) & 0xff);
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index c96f05ff5f2f..d2c28dcf6b42 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -65,6 +65,92 @@ err:
}
/*
+ * m88ds3103b demod has an internal device related to clocking. First the i2c
+ * gate must be opened, for one transaction, then writes will be allowed.
+ */
+static int m88ds3103b_dt_write(struct m88ds3103_dev *dev, int reg, int data)
+{
+ struct i2c_client *client = dev->client;
+ u8 buf[] = {reg, data};
+ u8 val;
+ int ret;
+ struct i2c_msg msg = {
+ .addr = dev->dt_addr, .flags = 0, .buf = buf, .len = 2
+ };
+
+ m88ds3103_update_bits(dev, 0x11, 0x01, 0x00);
+
+ val = 0x11;
+ ret = regmap_write(dev->regmap, 0x03, val);
+ if (ret)
+ dev_dbg(&client->dev, "fail=%d\n", ret);
+
+ ret = i2c_transfer(dev->dt_client->adapter, &msg, 1);
+ if (ret != 1) {
+ dev_err(&client->dev, "0x%02x (ret=%i, reg=0x%02x, value=0x%02x)\n",
+ dev->dt_addr, ret, reg, data);
+
+ m88ds3103_update_bits(dev, 0x11, 0x01, 0x01);
+ return -EREMOTEIO;
+ }
+ m88ds3103_update_bits(dev, 0x11, 0x01, 0x01);
+
+ dev_dbg(&client->dev, "0x%02x reg 0x%02x, value 0x%02x\n",
+ dev->dt_addr, reg, data);
+
+ return 0;
+}
+
+/*
+ * m88ds3103b demod has an internal device related to clocking. First the i2c
+ * gate must be opened, for two transactions, then reads will be allowed.
+ */
+static int m88ds3103b_dt_read(struct m88ds3103_dev *dev, u8 reg)
+{
+ struct i2c_client *client = dev->client;
+ int ret;
+ u8 val;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+ struct i2c_msg msg[] = {
+ {
+ .addr = dev->dt_addr,
+ .flags = 0,
+ .buf = b0,
+ .len = 1
+ },
+ {
+ .addr = dev->dt_addr,
+ .flags = I2C_M_RD,
+ .buf = b1,
+ .len = 1
+ }
+ };
+
+ m88ds3103_update_bits(dev, 0x11, 0x01, 0x00);
+
+ val = 0x12;
+ ret = regmap_write(dev->regmap, 0x03, val);
+ if (ret)
+ dev_dbg(&client->dev, "fail=%d\n", ret);
+
+ ret = i2c_transfer(dev->dt_client->adapter, msg, 2);
+ if (ret != 2) {
+ dev_err(&client->dev, "0x%02x (ret=%d, reg=0x%02x)\n",
+ dev->dt_addr, ret, reg);
+
+ m88ds3103_update_bits(dev, 0x11, 0x01, 0x01);
+ return -EREMOTEIO;
+ }
+ m88ds3103_update_bits(dev, 0x11, 0x01, 0x01);
+
+ dev_dbg(&client->dev, "0x%02x reg 0x%02x, value 0x%02x\n",
+ dev->dt_addr, reg, b1[0]);
+
+ return b1[0];
+}
+
+/*
* Get the demodulator AGC PWM voltage setting supplied to the tuner.
*/
int m88ds3103_get_agc_pwm(struct dvb_frontend *fe, u8 *_agc_pwm)
@@ -288,6 +374,251 @@ err:
return ret;
}
+static int m88ds3103b_select_mclk(struct m88ds3103_dev *dev)
+{
+ struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
+ u32 adc_Freq_MHz[3] = {96, 93, 99};
+ u8 reg16_list[3] = {96, 92, 100}, reg16, reg15;
+ u32 offset_MHz[3];
+ u32 max_offset = 0;
+ u32 old_setting = dev->mclk;
+ u32 tuner_freq_MHz = c->frequency / 1000;
+ u8 i;
+ char big_symbol = 0;
+
+ big_symbol = (c->symbol_rate > 45010000) ? 1 : 0;
+
+ if (big_symbol) {
+ reg16 = 115;
+ } else {
+ reg16 = 96;
+
+ /* TODO: IS THIS NECESSARY ? */
+ for (i = 0; i < 3; i++) {
+ offset_MHz[i] = tuner_freq_MHz % adc_Freq_MHz[i];
+
+ if (offset_MHz[i] > (adc_Freq_MHz[i] / 2))
+ offset_MHz[i] = adc_Freq_MHz[i] - offset_MHz[i];
+
+ if (offset_MHz[i] > max_offset) {
+ max_offset = offset_MHz[i];
+ reg16 = reg16_list[i];
+ dev->mclk = adc_Freq_MHz[i] * 1000 * 1000;
+
+ if (big_symbol)
+ dev->mclk /= 2;
+
+ dev_dbg(&client->dev, "modifying mclk %u -> %u\n",
+ old_setting, dev->mclk);
+ }
+ }
+ }
+
+ if (dev->mclk == 93000000)
+ regmap_write(dev->regmap, 0xA0, 0x42);
+ else if (dev->mclk == 96000000)
+ regmap_write(dev->regmap, 0xA0, 0x44);
+ else if (dev->mclk == 99000000)
+ regmap_write(dev->regmap, 0xA0, 0x46);
+ else if (dev->mclk == 110250000)
+ regmap_write(dev->regmap, 0xA0, 0x4E);
+ else
+ regmap_write(dev->regmap, 0xA0, 0x44);
+
+ reg15 = m88ds3103b_dt_read(dev, 0x15);
+
+ m88ds3103b_dt_write(dev, 0x05, 0x40);
+ m88ds3103b_dt_write(dev, 0x11, 0x08);
+
+ if (big_symbol)
+ reg15 |= 0x02;
+ else
+ reg15 &= ~0x02;
+
+ m88ds3103b_dt_write(dev, 0x15, reg15);
+ m88ds3103b_dt_write(dev, 0x16, reg16);
+
+ usleep_range(5000, 5500);
+
+ m88ds3103b_dt_write(dev, 0x05, 0x00);
+ m88ds3103b_dt_write(dev, 0x11, (u8)(big_symbol ? 0x0E : 0x0A));
+
+ usleep_range(5000, 5500);
+
+ return 0;
+}
+
+static int m88ds3103b_set_mclk(struct m88ds3103_dev *dev, u32 mclk_khz)
+{
+ u8 reg11 = 0x0A, reg15, reg16, reg1D, reg1E, reg1F, tmp;
+ u8 sm, f0 = 0, f1 = 0, f2 = 0, f3 = 0;
+ u16 pll_div_fb, N;
+ u32 div;
+
+ reg15 = m88ds3103b_dt_read(dev, 0x15);
+ reg16 = m88ds3103b_dt_read(dev, 0x16);
+ reg1D = m88ds3103b_dt_read(dev, 0x1D);
+
+ if (dev->cfg->ts_mode != M88DS3103_TS_SERIAL) {
+ if (reg16 == 92)
+ tmp = 93;
+ else if (reg16 == 100)
+ tmp = 99;
+ else
+ tmp = 96;
+
+ mclk_khz *= tmp;
+ mclk_khz /= 96;
+ }
+
+ pll_div_fb = (reg15 & 0x01) << 8;
+ pll_div_fb += reg16;
+ pll_div_fb += 32;
+
+ div = 9000 * pll_div_fb * 4;
+ div /= mclk_khz;
+
+ if (dev->cfg->ts_mode == M88DS3103_TS_SERIAL) {
+ reg11 |= 0x02;
+
+ if (div <= 32) {
+ N = 2;
+
+ f0 = 0;
+ f1 = div / N;
+ f2 = div - f1;
+ f3 = 0;
+ } else if (div <= 34) {
+ N = 3;
+
+ f0 = div / N;
+ f1 = (div - f0) / (N - 1);
+ f2 = div - f0 - f1;
+ f3 = 0;
+ } else if (div <= 64) {
+ N = 4;
+
+ f0 = div / N;
+ f1 = (div - f0) / (N - 1);
+ f2 = (div - f0 - f1) / (N - 2);
+ f3 = div - f0 - f1 - f2;
+ } else {
+ N = 4;
+
+ f0 = 16;
+ f1 = 16;
+ f2 = 16;
+ f3 = 16;
+ }
+
+ if (f0 == 16)
+ f0 = 0;
+ else if ((f0 < 8) && (f0 != 0))
+ f0 = 8;
+
+ if (f1 == 16)
+ f1 = 0;
+ else if ((f1 < 8) && (f1 != 0))
+ f1 = 8;
+
+ if (f2 == 16)
+ f2 = 0;
+ else if ((f2 < 8) && (f2 != 0))
+ f2 = 8;
+
+ if (f3 == 16)
+ f3 = 0;
+ else if ((f3 < 8) && (f3 != 0))
+ f3 = 8;
+ } else {
+ reg11 &= ~0x02;
+
+ if (div <= 32) {
+ N = 2;
+
+ f0 = 0;
+ f1 = div / N;
+ f2 = div - f1;
+ f3 = 0;
+ } else if (div <= 48) {
+ N = 3;
+
+ f0 = div / N;
+ f1 = (div - f0) / (N - 1);
+ f2 = div - f0 - f1;
+ f3 = 0;
+ } else if (div <= 64) {
+ N = 4;
+
+ f0 = div / N;
+ f1 = (div - f0) / (N - 1);
+ f2 = (div - f0 - f1) / (N - 2);
+ f3 = div - f0 - f1 - f2;
+ } else {
+ N = 4;
+
+ f0 = 16;
+ f1 = 16;
+ f2 = 16;
+ f3 = 16;
+ }
+
+ if (f0 == 16)
+ f0 = 0;
+ else if ((f0 < 9) && (f0 != 0))
+ f0 = 9;
+
+ if (f1 == 16)
+ f1 = 0;
+ else if ((f1 < 9) && (f1 != 0))
+ f1 = 9;
+
+ if (f2 == 16)
+ f2 = 0;
+ else if ((f2 < 9) && (f2 != 0))
+ f2 = 9;
+
+ if (f3 == 16)
+ f3 = 0;
+ else if ((f3 < 9) && (f3 != 0))
+ f3 = 9;
+ }
+
+ sm = N - 1;
+
+ /* Write to registers */
+ //reg15 &= 0x01;
+ //reg15 |= (pll_div_fb >> 8) & 0x01;
+
+ //reg16 = pll_div_fb & 0xFF;
+
+ reg1D &= ~0x03;
+ reg1D |= sm;
+ reg1D |= 0x80;
+
+ reg1E = ((f3 << 4) + f2) & 0xFF;
+ reg1F = ((f1 << 4) + f0) & 0xFF;
+
+ m88ds3103b_dt_write(dev, 0x05, 0x40);
+ m88ds3103b_dt_write(dev, 0x11, 0x08);
+ m88ds3103b_dt_write(dev, 0x1D, reg1D);
+ m88ds3103b_dt_write(dev, 0x1E, reg1E);
+ m88ds3103b_dt_write(dev, 0x1F, reg1F);
+
+ m88ds3103b_dt_write(dev, 0x17, 0xc1);
+ m88ds3103b_dt_write(dev, 0x17, 0x81);
+
+ usleep_range(5000, 5500);
+
+ m88ds3103b_dt_write(dev, 0x05, 0x00);
+ m88ds3103b_dt_write(dev, 0x11, 0x0A);
+
+ usleep_range(5000, 5500);
+
+ return 0;
+}
+
static int m88ds3103_set_frontend(struct dvb_frontend *fe)
{
struct m88ds3103_dev *dev = fe->demodulator_priv;
@@ -298,7 +629,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
u8 u8tmp, u8tmp1 = 0, u8tmp2 = 0; /* silence compiler warning */
u8 buf[3];
u16 u16tmp;
- u32 tuner_frequency_khz, target_mclk;
+ u32 tuner_frequency_khz, target_mclk, u32tmp;
s32 s32tmp;
static const struct reg_sequence reset_buf[] = {
{0x07, 0x80}, {0x07, 0x00}
@@ -321,6 +652,20 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
/* Disable demod clock path */
if (dev->chip_id == M88RS6000_CHIP_ID) {
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ ret = regmap_read(dev->regmap, 0xb2, &u32tmp);
+ if (ret)
+ goto err;
+ if (u32tmp == 0x01) {
+ ret = regmap_write(dev->regmap, 0x00, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0xb2, 0x00);
+ if (ret)
+ goto err;
+ }
+ }
+
ret = regmap_write(dev->regmap, 0x06, 0xe0);
if (ret)
goto err;
@@ -346,7 +691,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
tuner_frequency_khz = c->frequency;
}
- /* select M88RS6000 demod main mclk and ts mclk from tuner die. */
+ /* set M88RS6000/DS3103B demod main mclk and ts mclk from tuner die */
if (dev->chip_id == M88RS6000_CHIP_ID) {
if (c->symbol_rate > 45010000)
dev->mclk = 110250000;
@@ -358,6 +703,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
else
target_mclk = 144000000;
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ m88ds3103b_select_mclk(dev);
+ m88ds3103b_set_mclk(dev, target_mclk / 1000);
+ }
+
/* Enable demod clock path */
ret = regmap_write(dev->regmap, 0x06, 0x00);
if (ret)
@@ -469,12 +819,42 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08);
if (ret)
goto err;
+
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ buf[0] = m88ds3103b_dt_read(dev, 0x15);
+ buf[1] = m88ds3103b_dt_read(dev, 0x16);
+
+ if (c->symbol_rate > 45010000) {
+ buf[0] &= ~0x03;
+ buf[0] |= 0x02;
+ buf[0] |= ((147 - 32) >> 8) & 0x01;
+ buf[1] = (147 - 32) & 0xFF;
+
+ dev->mclk = 110250 * 1000;
+ } else {
+ buf[0] &= ~0x03;
+ buf[0] |= ((128 - 32) >> 8) & 0x01;
+ buf[1] = (128 - 32) & 0xFF;
+
+ dev->mclk = 96000 * 1000;
+ }
+ m88ds3103b_dt_write(dev, 0x15, buf[0]);
+ m88ds3103b_dt_write(dev, 0x16, buf[1]);
+
+ regmap_read(dev->regmap, 0x30, &u32tmp);
+ u32tmp &= ~0x80;
+ regmap_write(dev->regmap, 0x30, u32tmp & 0xff);
+ }
+
ret = regmap_write(dev->regmap, 0xf1, 0x01);
if (ret)
goto err;
- ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80);
- if (ret)
- goto err;
+
+ if (dev->chiptype != M88DS3103_CHIPTYPE_3103B) {
+ ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80);
+ if (ret)
+ goto err;
+ }
}
switch (dev->cfg->ts_mode) {
@@ -488,6 +868,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
break;
case M88DS3103_TS_PARALLEL:
u8tmp = 0x02;
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ u8tmp = 0x01;
+ u8tmp1 = 0x01;
+ }
break;
case M88DS3103_TS_CI:
u8tmp = 0x03;
@@ -516,6 +900,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
u8tmp1 = 0x3f;
u8tmp2 = 0x3f;
break;
+ case M88DS3103_TS_PARALLEL:
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ ret = m88ds3103_update_bits(dev, 0x29, 0x01, u8tmp1);
+ if (ret)
+ goto err;
+ }
+ /* fall through */
default:
u16tmp = DIV_ROUND_UP(target_mclk, dev->cfg->ts_clk);
u8tmp1 = u16tmp / 2 - 1;
@@ -543,6 +934,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
else
u8tmp = 0x06;
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B)
+ m88ds3103b_set_mclk(dev, target_mclk / 1000);
+
ret = regmap_write(dev->regmap, 0xc3, 0x08);
if (ret)
goto err;
@@ -578,6 +972,16 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ /* enable/disable 192M LDPC clock */
+ ret = m88ds3103_update_bits(dev, 0x29, 0x10,
+ (c->delivery_system == SYS_DVBS) ? 0x10 : 0x0);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_update_bits(dev, 0xc9, 0x08, 0x08);
+ }
+
dev_dbg(&client->dev, "carrier offset=%d\n",
(tuner_frequency_khz - c->frequency));
@@ -642,7 +1046,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
if (utmp)
goto warm;
- /* global reset, global diseqc reset, golbal fec reset */
+ /* global reset, global diseqc reset, global fec reset */
ret = regmap_write(dev->regmap, 0x07, 0xe0);
if (ret)
goto err;
@@ -652,12 +1056,15 @@ static int m88ds3103_init(struct dvb_frontend *fe)
/* cold state - try to download firmware */
dev_info(&client->dev, "found a '%s' in cold state\n",
- m88ds3103_ops.info.name);
+ dev->fe.ops.info.name);
- if (dev->chip_id == M88RS6000_CHIP_ID)
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B)
+ name = M88DS3103B_FIRMWARE;
+ else if (dev->chip_id == M88RS6000_CHIP_ID)
name = M88RS6000_FIRMWARE;
else
name = M88DS3103_FIRMWARE;
+
/* request the firmware, this will block and timeout */
ret = request_firmware(&firmware, name, &client->dev);
if (ret) {
@@ -700,10 +1107,16 @@ static int m88ds3103_init(struct dvb_frontend *fe)
}
dev_info(&client->dev, "found a '%s' in warm state\n",
- m88ds3103_ops.info.name);
+ dev->fe.ops.info.name);
dev_info(&client->dev, "firmware version: %X.%X\n",
(utmp >> 4) & 0xf, (utmp >> 0 & 0xf));
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ m88ds3103b_dt_write(dev, 0x21, 0x92);
+ m88ds3103b_dt_write(dev, 0x15, 0x6C);
+ m88ds3103b_dt_write(dev, 0x17, 0xC1);
+ m88ds3103b_dt_write(dev, 0x17, 0x81);
+ }
warm:
/* warm state */
dev->warm = true;
@@ -1393,6 +1806,8 @@ static int m88ds3103_probe(struct i2c_client *client,
goto err_kfree;
dev->chip_id = utmp >> 1;
+ dev->chiptype = (u8)id->driver_data;
+
dev_dbg(&client->dev, "chip_id=%02x\n", dev->chip_id);
switch (dev->chip_id) {
@@ -1459,7 +1874,10 @@ static int m88ds3103_probe(struct i2c_client *client,
/* create dvb_frontend */
memcpy(&dev->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
- if (dev->chip_id == M88RS6000_CHIP_ID)
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B)
+ strscpy(dev->fe.ops.info.name, "Montage Technology M88DS3103B",
+ sizeof(dev->fe.ops.info.name));
+ else if (dev->chip_id == M88RS6000_CHIP_ID)
strscpy(dev->fe.ops.info.name, "Montage Technology M88RS6000",
sizeof(dev->fe.ops.info.name));
if (!pdata->attach_in_use)
@@ -1470,6 +1888,26 @@ static int m88ds3103_probe(struct i2c_client *client,
/* setup callbacks */
pdata->get_dvb_frontend = m88ds3103_get_dvb_frontend;
pdata->get_i2c_adapter = m88ds3103_get_i2c_adapter;
+
+ if (dev->chiptype == M88DS3103_CHIPTYPE_3103B) {
+ /* enable i2c repeater for tuner */
+ m88ds3103_update_bits(dev, 0x11, 0x01, 0x01);
+
+ /* get frontend address */
+ ret = regmap_read(dev->regmap, 0x29, &utmp);
+ if (ret)
+ goto err_kfree;
+ dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1;
+ dev_err(&client->dev, "dt addr is 0x%02x", dev->dt_addr);
+
+ dev->dt_client = i2c_new_dummy_device(client->adapter,
+ dev->dt_addr);
+ if (!dev->dt_client) {
+ ret = -ENODEV;
+ goto err_kfree;
+ }
+ }
+
return 0;
err_kfree:
kfree(dev);
@@ -1484,6 +1922,9 @@ static int m88ds3103_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
+ if (dev->dt_client)
+ i2c_unregister_device(dev->dt_client);
+
i2c_mux_del_adapters(dev->muxc);
kfree(dev);
@@ -1491,7 +1932,9 @@ static int m88ds3103_remove(struct i2c_client *client)
}
static const struct i2c_device_id m88ds3103_id_table[] = {
- {"m88ds3103", 0},
+ {"m88ds3103", M88DS3103_CHIPTYPE_3103},
+ {"m88rs6000", M88DS3103_CHIPTYPE_RS6000},
+ {"m88ds3103b", M88DS3103_CHIPTYPE_3103B},
{}
};
MODULE_DEVICE_TABLE(i2c, m88ds3103_id_table);
@@ -1513,3 +1956,4 @@ MODULE_DESCRIPTION("Montage Technology M88DS3103 DVB-S/S2 demodulator driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(M88DS3103_FIRMWARE);
MODULE_FIRMWARE(M88RS6000_FIRMWARE);
+MODULE_FIRMWARE(M88DS3103B_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
index c825032f07ab..aa5306f40201 100644
--- a/drivers/media/dvb-frontends/m88ds3103_priv.h
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -16,13 +16,20 @@
#include <linux/regmap.h>
#include <linux/math64.h>
-#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
-#define M88RS6000_FIRMWARE "dvb-demod-m88rs6000.fw"
+#define M88DS3103B_FIRMWARE "dvb-demod-m88ds3103b.fw"
+#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
+#define M88RS6000_FIRMWARE "dvb-demod-m88rs6000.fw"
+
#define M88RS6000_CHIP_ID 0x74
#define M88DS3103_CHIP_ID 0x70
+#define M88DS3103_CHIPTYPE_3103 0
+#define M88DS3103_CHIPTYPE_RS6000 1
+#define M88DS3103_CHIPTYPE_3103B 2
+
struct m88ds3103_dev {
struct i2c_client *client;
+ struct i2c_client *dt_client;
struct regmap_config regmap_config;
struct regmap *regmap;
struct m88ds3103_config config;
@@ -35,10 +42,13 @@ struct m88ds3103_dev {
struct i2c_mux_core *muxc;
/* auto detect chip id to do different config */
u8 chip_id;
+ /* chip type to differentiate m88rs6000 from m88ds3103b */
+ u8 chiptype;
/* main mclk is calculated for M88RS6000 dynamically */
s32 mclk;
u64 post_bit_error;
u64 post_bit_count;
+ u8 dt_addr;
};
struct m88ds3103_reg_val {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 1953b00b3e48..685c0ac71819 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -470,10 +470,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto error;
if (dev->delivery_system == SYS_DVBS) {
- dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
- buf[2] << 8 | buf[3] << 0;
- dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
- buf[2] << 8 | buf[3] << 0;
+ u32 bit_error = buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3] << 0;
+
+ dev->dvbv3_ber = bit_error;
+ dev->post_bit_error += bit_error;
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
dev->block_error += buf[4] << 8 | buf[5] << 0;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index c68e002d26ea..125d596c13dd 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -238,6 +238,7 @@ config VIDEO_ADV7604
tristate "Analog Devices ADV7604 decoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on GPIOLIB || COMPILE_TEST
+ select REGMAP_I2C
select HDMI
select V4L2_FWNODE
help
@@ -379,6 +380,7 @@ config VIDEO_TVP5150
tristate "Texas Instruments TVP5150 video decoder"
depends on VIDEO_V4L2 && I2C
select V4L2_FWNODE
+ select REGMAP_I2C
help
Support for the Texas Instruments TVP5150 video decoder.
@@ -584,6 +586,7 @@ config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on V4L2_FWNODE
+ select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX214 camera.
@@ -591,6 +594,17 @@ config VIDEO_IMX214
To compile this driver as a module, choose M here: the
module will be called imx214.
+config VIDEO_IMX219
+ tristate "Sony IMX219 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX219 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx219.
+
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -612,6 +626,7 @@ config VIDEO_IMX274
config VIDEO_IMX290
tristate "Sony IMX290 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select REGMAP_I2C
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
@@ -804,6 +819,7 @@ config VIDEO_OV7670
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
depends on I2C && VIDEO_V4L2
+ select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index c147bb9d28db..77bf7d0b691f 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -111,6 +111,7 @@ obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
obj-$(CONFIG_VIDEO_HI556) += hi556.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
+obj-$(CONFIG_VIDEO_IMX219) += imx219.o
obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_VIDEO_IMX290) += imx290.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 6528e2343fc8..00159daa6fcd 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -749,6 +749,17 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
return ret;
}
+static int adv7180_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ return adv7180_set_pad_format(sd, cfg, &fmt);
+}
+
static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
@@ -854,6 +865,7 @@ static const struct v4l2_subdev_core_ops adv7180_core_ops = {
};
static const struct v4l2_subdev_pad_ops adv7180_pad_ops = {
+ .init_cfg = adv7180_init_cfg,
.enum_mbus_code = adv7180_enum_mbus_code,
.set_fmt = adv7180_set_pad_format,
.get_fmt = adv7180_get_pad_format,
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index adcaaa8c86d1..4175d06ffd47 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -803,7 +803,6 @@ err_rpm_put:
static int imx214_g_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_interval *fival)
{
- fival->pad = 0;
fival->interval.numerator = 1;
fival->interval.denominator = IMX214_FPS;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
new file mode 100644
index 000000000000..cb03bdec1f9c
--- /dev/null
+++ b/drivers/media/i2c/imx219.c
@@ -0,0 +1,1481 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A V4L2 driver for Sony IMX219 cameras.
+ * Copyright (C) 2019, Raspberry Pi (Trading) Ltd
+ *
+ * Based on Sony imx258 camera driver
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * DT / fwnode changes, and regulator / GPIO control taken from imx214 driver
+ * Copyright 2018 Qtechnology A/S
+ *
+ * Flip handling taken from the Sony IMX319 driver.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+#include <asm/unaligned.h>
+
+#define IMX219_REG_VALUE_08BIT 1
+#define IMX219_REG_VALUE_16BIT 2
+
+#define IMX219_REG_MODE_SELECT 0x0100
+#define IMX219_MODE_STANDBY 0x00
+#define IMX219_MODE_STREAMING 0x01
+
+/* Chip ID */
+#define IMX219_REG_CHIP_ID 0x0000
+#define IMX219_CHIP_ID 0x0219
+
+/* External clock frequency is 24.0M */
+#define IMX219_XCLK_FREQ 24000000
+
+/* Pixel rate is fixed at 182.4M for all the modes */
+#define IMX219_PIXEL_RATE 182400000
+
+#define IMX219_DEFAULT_LINK_FREQ 456000000
+
+/* V_TIMING internal */
+#define IMX219_REG_VTS 0x0160
+#define IMX219_VTS_15FPS 0x0dc6
+#define IMX219_VTS_30FPS_1080P 0x06e3
+#define IMX219_VTS_30FPS_BINNED 0x06e3
+#define IMX219_VTS_30FPS_640x480 0x06e3
+#define IMX219_VTS_MAX 0xffff
+
+#define IMX219_VBLANK_MIN 4
+
+/*Frame Length Line*/
+#define IMX219_FLL_MIN 0x08a6
+#define IMX219_FLL_MAX 0xffff
+#define IMX219_FLL_STEP 1
+#define IMX219_FLL_DEFAULT 0x0c98
+
+/* HBLANK control - read only */
+#define IMX219_PPL_DEFAULT 3448
+
+/* Exposure control */
+#define IMX219_REG_EXPOSURE 0x015a
+#define IMX219_EXPOSURE_MIN 4
+#define IMX219_EXPOSURE_STEP 1
+#define IMX219_EXPOSURE_DEFAULT 0x640
+#define IMX219_EXPOSURE_MAX 65535
+
+/* Analog gain control */
+#define IMX219_REG_ANALOG_GAIN 0x0157
+#define IMX219_ANA_GAIN_MIN 0
+#define IMX219_ANA_GAIN_MAX 232
+#define IMX219_ANA_GAIN_STEP 1
+#define IMX219_ANA_GAIN_DEFAULT 0x0
+
+/* Digital gain control */
+#define IMX219_REG_DIGITAL_GAIN 0x0158
+#define IMX219_DGTL_GAIN_MIN 0x0100
+#define IMX219_DGTL_GAIN_MAX 0x0fff
+#define IMX219_DGTL_GAIN_DEFAULT 0x0100
+#define IMX219_DGTL_GAIN_STEP 1
+
+#define IMX219_REG_ORIENTATION 0x0172
+
+/* Test Pattern Control */
+#define IMX219_REG_TEST_PATTERN 0x0600
+#define IMX219_TEST_PATTERN_DISABLE 0
+#define IMX219_TEST_PATTERN_SOLID_COLOR 1
+#define IMX219_TEST_PATTERN_COLOR_BARS 2
+#define IMX219_TEST_PATTERN_GREY_COLOR 3
+#define IMX219_TEST_PATTERN_PN9 4
+
+/* Test pattern colour components */
+#define IMX219_REG_TESTP_RED 0x0602
+#define IMX219_REG_TESTP_GREENR 0x0604
+#define IMX219_REG_TESTP_BLUE 0x0606
+#define IMX219_REG_TESTP_GREENB 0x0608
+#define IMX219_TESTP_COLOUR_MIN 0
+#define IMX219_TESTP_COLOUR_MAX 0x03ff
+#define IMX219_TESTP_COLOUR_STEP 1
+#define IMX219_TESTP_RED_DEFAULT IMX219_TESTP_COLOUR_MAX
+#define IMX219_TESTP_GREENR_DEFAULT 0
+#define IMX219_TESTP_BLUE_DEFAULT 0
+#define IMX219_TESTP_GREENB_DEFAULT 0
+
+struct imx219_reg {
+ u16 address;
+ u8 val;
+};
+
+struct imx219_reg_list {
+ unsigned int num_of_regs;
+ const struct imx219_reg *regs;
+};
+
+/* Mode : resolution and related config&values */
+struct imx219_mode {
+ /* Frame width */
+ unsigned int width;
+ /* Frame height */
+ unsigned int height;
+
+ /* V-timing */
+ unsigned int vts_def;
+
+ /* Default register values */
+ struct imx219_reg_list reg_list;
+};
+
+/*
+ * Register sets lifted off the i2C interface from the Raspberry Pi firmware
+ * driver.
+ * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
+ */
+static const struct imx219_reg mode_3280x2464_regs[] = {
+ {0x0100, 0x00},
+ {0x30eb, 0x0c},
+ {0x30eb, 0x05},
+ {0x300a, 0xff},
+ {0x300b, 0xff},
+ {0x30eb, 0x05},
+ {0x30eb, 0x09},
+ {0x0114, 0x01},
+ {0x0128, 0x00},
+ {0x012a, 0x18},
+ {0x012b, 0x00},
+ {0x0164, 0x00},
+ {0x0165, 0x00},
+ {0x0166, 0x0c},
+ {0x0167, 0xcf},
+ {0x0168, 0x00},
+ {0x0169, 0x00},
+ {0x016a, 0x09},
+ {0x016b, 0x9f},
+ {0x016c, 0x0c},
+ {0x016d, 0xd0},
+ {0x016e, 0x09},
+ {0x016f, 0xa0},
+ {0x0170, 0x01},
+ {0x0171, 0x01},
+ {0x0174, 0x00},
+ {0x0175, 0x00},
+ {0x0301, 0x05},
+ {0x0303, 0x01},
+ {0x0304, 0x03},
+ {0x0305, 0x03},
+ {0x0306, 0x00},
+ {0x0307, 0x39},
+ {0x030b, 0x01},
+ {0x030c, 0x00},
+ {0x030d, 0x72},
+ {0x0624, 0x0c},
+ {0x0625, 0xd0},
+ {0x0626, 0x09},
+ {0x0627, 0xa0},
+ {0x455e, 0x00},
+ {0x471e, 0x4b},
+ {0x4767, 0x0f},
+ {0x4750, 0x14},
+ {0x4540, 0x00},
+ {0x47b4, 0x14},
+ {0x4713, 0x30},
+ {0x478b, 0x10},
+ {0x478f, 0x10},
+ {0x4793, 0x10},
+ {0x4797, 0x0e},
+ {0x479b, 0x0e},
+ {0x0162, 0x0d},
+ {0x0163, 0x78},
+};
+
+static const struct imx219_reg mode_1920_1080_regs[] = {
+ {0x0100, 0x00},
+ {0x30eb, 0x05},
+ {0x30eb, 0x0c},
+ {0x300a, 0xff},
+ {0x300b, 0xff},
+ {0x30eb, 0x05},
+ {0x30eb, 0x09},
+ {0x0114, 0x01},
+ {0x0128, 0x00},
+ {0x012a, 0x18},
+ {0x012b, 0x00},
+ {0x0162, 0x0d},
+ {0x0163, 0x78},
+ {0x0164, 0x02},
+ {0x0165, 0xa8},
+ {0x0166, 0x0a},
+ {0x0167, 0x27},
+ {0x0168, 0x02},
+ {0x0169, 0xb4},
+ {0x016a, 0x06},
+ {0x016b, 0xeb},
+ {0x016c, 0x07},
+ {0x016d, 0x80},
+ {0x016e, 0x04},
+ {0x016f, 0x38},
+ {0x0170, 0x01},
+ {0x0171, 0x01},
+ {0x0174, 0x00},
+ {0x0175, 0x00},
+ {0x0301, 0x05},
+ {0x0303, 0x01},
+ {0x0304, 0x03},
+ {0x0305, 0x03},
+ {0x0306, 0x00},
+ {0x0307, 0x39},
+ {0x030b, 0x01},
+ {0x030c, 0x00},
+ {0x030d, 0x72},
+ {0x0624, 0x07},
+ {0x0625, 0x80},
+ {0x0626, 0x04},
+ {0x0627, 0x38},
+ {0x455e, 0x00},
+ {0x471e, 0x4b},
+ {0x4767, 0x0f},
+ {0x4750, 0x14},
+ {0x4540, 0x00},
+ {0x47b4, 0x14},
+ {0x4713, 0x30},
+ {0x478b, 0x10},
+ {0x478f, 0x10},
+ {0x4793, 0x10},
+ {0x4797, 0x0e},
+ {0x479b, 0x0e},
+ {0x0162, 0x0d},
+ {0x0163, 0x78},
+};
+
+static const struct imx219_reg mode_1640_1232_regs[] = {
+ {0x0100, 0x00},
+ {0x30eb, 0x0c},
+ {0x30eb, 0x05},
+ {0x300a, 0xff},
+ {0x300b, 0xff},
+ {0x30eb, 0x05},
+ {0x30eb, 0x09},
+ {0x0114, 0x01},
+ {0x0128, 0x00},
+ {0x012a, 0x18},
+ {0x012b, 0x00},
+ {0x0164, 0x00},
+ {0x0165, 0x00},
+ {0x0166, 0x0c},
+ {0x0167, 0xcf},
+ {0x0168, 0x00},
+ {0x0169, 0x00},
+ {0x016a, 0x09},
+ {0x016b, 0x9f},
+ {0x016c, 0x06},
+ {0x016d, 0x68},
+ {0x016e, 0x04},
+ {0x016f, 0xd0},
+ {0x0170, 0x01},
+ {0x0171, 0x01},
+ {0x0174, 0x01},
+ {0x0175, 0x01},
+ {0x0301, 0x05},
+ {0x0303, 0x01},
+ {0x0304, 0x03},
+ {0x0305, 0x03},
+ {0x0306, 0x00},
+ {0x0307, 0x39},
+ {0x030b, 0x01},
+ {0x030c, 0x00},
+ {0x030d, 0x72},
+ {0x0624, 0x06},
+ {0x0625, 0x68},
+ {0x0626, 0x04},
+ {0x0627, 0xd0},
+ {0x455e, 0x00},
+ {0x471e, 0x4b},
+ {0x4767, 0x0f},
+ {0x4750, 0x14},
+ {0x4540, 0x00},
+ {0x47b4, 0x14},
+ {0x4713, 0x30},
+ {0x478b, 0x10},
+ {0x478f, 0x10},
+ {0x4793, 0x10},
+ {0x4797, 0x0e},
+ {0x479b, 0x0e},
+ {0x0162, 0x0d},
+ {0x0163, 0x78},
+};
+
+static const struct imx219_reg mode_640_480_regs[] = {
+ {0x0100, 0x00},
+ {0x30eb, 0x05},
+ {0x30eb, 0x0c},
+ {0x300a, 0xff},
+ {0x300b, 0xff},
+ {0x30eb, 0x05},
+ {0x30eb, 0x09},
+ {0x0114, 0x01},
+ {0x0128, 0x00},
+ {0x012a, 0x18},
+ {0x012b, 0x00},
+ {0x0162, 0x0d},
+ {0x0163, 0x78},
+ {0x0164, 0x03},
+ {0x0165, 0xe8},
+ {0x0166, 0x08},
+ {0x0167, 0xe7},
+ {0x0168, 0x02},
+ {0x0169, 0xf0},
+ {0x016a, 0x06},
+ {0x016b, 0xaf},
+ {0x016c, 0x02},
+ {0x016d, 0x80},
+ {0x016e, 0x01},
+ {0x016f, 0xe0},
+ {0x0170, 0x01},
+ {0x0171, 0x01},
+ {0x0174, 0x03},
+ {0x0175, 0x03},
+ {0x0301, 0x05},
+ {0x0303, 0x01},
+ {0x0304, 0x03},
+ {0x0305, 0x03},
+ {0x0306, 0x00},
+ {0x0307, 0x39},
+ {0x030b, 0x01},
+ {0x030c, 0x00},
+ {0x030d, 0x72},
+ {0x0624, 0x06},
+ {0x0625, 0x68},
+ {0x0626, 0x04},
+ {0x0627, 0xd0},
+ {0x455e, 0x00},
+ {0x471e, 0x4b},
+ {0x4767, 0x0f},
+ {0x4750, 0x14},
+ {0x4540, 0x00},
+ {0x47b4, 0x14},
+ {0x4713, 0x30},
+ {0x478b, 0x10},
+ {0x478f, 0x10},
+ {0x4793, 0x10},
+ {0x4797, 0x0e},
+ {0x479b, 0x0e},
+};
+
+static const struct imx219_reg raw8_framefmt_regs[] = {
+ {0x018c, 0x08},
+ {0x018d, 0x08},
+ {0x0309, 0x08},
+};
+
+static const struct imx219_reg raw10_framefmt_regs[] = {
+ {0x018c, 0x0a},
+ {0x018d, 0x0a},
+ {0x0309, 0x0a},
+};
+
+static const char * const imx219_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bars",
+ "Solid Color",
+ "Grey Color Bars",
+ "PN9"
+};
+
+static const int imx219_test_pattern_val[] = {
+ IMX219_TEST_PATTERN_DISABLE,
+ IMX219_TEST_PATTERN_COLOR_BARS,
+ IMX219_TEST_PATTERN_SOLID_COLOR,
+ IMX219_TEST_PATTERN_GREY_COLOR,
+ IMX219_TEST_PATTERN_PN9,
+};
+
+/* regulator supplies */
+static const char * const imx219_supply_name[] = {
+ /* Supplies can be enabled in any order */
+ "VANA", /* Analog (2.8V) supply */
+ "VDIG", /* Digital Core (1.8V) supply */
+ "VDDL", /* IF (1.2V) supply */
+};
+
+#define IMX219_NUM_SUPPLIES ARRAY_SIZE(imx219_supply_name)
+
+/*
+ * The supported formats.
+ * This table MUST contain 4 entries per format, to cover the various flip
+ * combinations in the order
+ * - no flip
+ * - h flip
+ * - v flip
+ * - h&v flips
+ */
+static const u32 codes[] = {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+};
+
+/*
+ * Initialisation delay between XCLR low->high and the moment when the sensor
+ * can start capture (i.e. can leave software stanby) must be not less than:
+ * t4 + max(t5, t6 + <time to initialize the sensor register over I2C>)
+ * where
+ * t4 is fixed, and is max 200uS,
+ * t5 is fixed, and is 6000uS,
+ * t6 depends on the sensor external clock, and is max 32000 clock periods.
+ * As per sensor datasheet, the external clock must be from 6MHz to 27MHz.
+ * So for any acceptable external clock t6 is always within the range of
+ * 1185 to 5333 uS, and is always less than t5.
+ * For this reason this is always safe to wait (t4 + t5) = 6200 uS, then
+ * initialize the sensor over I2C, and then exit the software standby.
+ *
+ * This start-up time can be optimized a bit more, if we start the writes
+ * over I2C after (t4+t6), but before (t4+t5) expires. But then sensor
+ * initialization over I2C may complete before (t4+t5) expires, and we must
+ * ensure that capture is not started before (t4+t5).
+ *
+ * This delay doesn't account for the power supply startup time. If needed,
+ * this should be taken care of via the regulator framework. E.g. in the
+ * case of DT for regulator-fixed one should define the startup-delay-us
+ * property.
+ */
+#define IMX219_XCLR_MIN_DELAY_US 6200
+#define IMX219_XCLR_DELAY_RANGE_US 1000
+
+/* Mode configs */
+static const struct imx219_mode supported_modes[] = {
+ {
+ /* 8MPix 15fps mode */
+ .width = 3280,
+ .height = 2464,
+ .vts_def = IMX219_VTS_15FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
+ .regs = mode_3280x2464_regs,
+ },
+ },
+ {
+ /* 1080P 30fps cropped */
+ .width = 1920,
+ .height = 1080,
+ .vts_def = IMX219_VTS_30FPS_1080P,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
+ .regs = mode_1920_1080_regs,
+ },
+ },
+ {
+ /* 2x2 binned 30fps mode */
+ .width = 1640,
+ .height = 1232,
+ .vts_def = IMX219_VTS_30FPS_BINNED,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1640_1232_regs),
+ .regs = mode_1640_1232_regs,
+ },
+ },
+ {
+ /* 640x480 30fps mode */
+ .width = 640,
+ .height = 480,
+ .vts_def = IMX219_VTS_30FPS_640x480,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_640_480_regs),
+ .regs = mode_640_480_regs,
+ },
+ },
+};
+
+struct imx219 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_mbus_framefmt fmt;
+
+ struct clk *xclk; /* system clock to IMX219 */
+ u32 xclk_freq;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[IMX219_NUM_SUPPLIES];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+
+ /* Current mode */
+ const struct imx219_mode *mode;
+
+ /*
+ * Mutex for serialized access:
+ * Protect sensor module set pad format and start/stop streaming safely.
+ */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx219, sd);
+}
+
+/* Read registers up to 2 at a time */
+static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+ u8 data_buf[4] = { 0, };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+/* Write registers up to 2 at a time */
+static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int imx219_write_regs(struct imx219 *imx219,
+ const struct imx219_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < len; i++) {
+ ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Get bayer order based on flip setting. */
+static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
+{
+ unsigned int i;
+
+ lockdep_assert_held(&imx219->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(codes); i++)
+ if (codes[i] == code)
+ break;
+
+ if (i >= ARRAY_SIZE(codes))
+ i = 0;
+
+ i = (i & ~3) | (imx219->vflip->val ? 2 : 0) |
+ (imx219->hflip->val ? 1 : 0);
+
+ return codes[i];
+}
+
+static void imx219_set_default_format(struct imx219 *imx219)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = &imx219->fmt;
+ fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ fmt->colorspace,
+ fmt->ycbcr_enc);
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+ fmt->width = supported_modes[0].width;
+ fmt->height = supported_modes[0].height;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct imx219 *imx219 = to_imx219(sd);
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ mutex_lock(&imx219->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = supported_modes[0].width;
+ try_fmt->height = supported_modes[0].height;
+ try_fmt->code = imx219_get_format_code(imx219,
+ MEDIA_BUS_FMT_SRGGB10_1X10);
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ mutex_unlock(&imx219->mutex);
+
+ return 0;
+}
+
+static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx219 *imx219 =
+ container_of(ctrl->handler, struct imx219, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ int ret;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max, exposure_def;
+
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = imx219->mode->height + ctrl->val - 4;
+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
+ __v4l2_ctrl_modify_range(imx219->exposure,
+ imx219->exposure->minimum,
+ exposure_max, imx219->exposure->step,
+ exposure_def);
+ }
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (pm_runtime_get_if_in_use(&client->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN,
+ IMX219_REG_VALUE_08BIT, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE,
+ IMX219_REG_VALUE_16BIT, ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN,
+ IMX219_REG_VALUE_16BIT, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN,
+ IMX219_REG_VALUE_16BIT,
+ imx219_test_pattern_val[ctrl->val]);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1,
+ imx219->hflip->val |
+ imx219->vflip->val << 1);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = imx219_write_reg(imx219, IMX219_REG_VTS,
+ IMX219_REG_VALUE_16BIT,
+ imx219->mode->height + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN_RED:
+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED,
+ IMX219_REG_VALUE_16BIT, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENR:
+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR,
+ IMX219_REG_VALUE_16BIT, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN_BLUE:
+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE,
+ IMX219_REG_VALUE_16BIT, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENB:
+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB,
+ IMX219_REG_VALUE_16BIT, ctrl->val);
+ break;
+ default:
+ dev_info(&client->dev,
+ "ctrl(id:0x%x,val:0x%x) is not handled\n",
+ ctrl->id, ctrl->val);
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx219_ctrl_ops = {
+ .s_ctrl = imx219_set_ctrl,
+};
+
+static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx219 *imx219 = to_imx219(sd);
+
+ if (code->index >= (ARRAY_SIZE(codes) / 4))
+ return -EINVAL;
+
+ code->code = imx219_get_format_code(imx219, codes[code->index * 4]);
+
+ return 0;
+}
+
+static int imx219_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct imx219 *imx219 = to_imx219(sd);
+
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != imx219_get_format_code(imx219, imx219->fmt.code))
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void imx219_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ fmt->colorspace,
+ fmt->ycbcr_enc);
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+}
+
+static void imx219_update_pad_format(struct imx219 *imx219,
+ const struct imx219_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.field = V4L2_FIELD_NONE;
+ imx219_reset_colorspace(&fmt->format);
+}
+
+static int __imx219_get_pad_format(struct imx219 *imx219,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(&imx219->sd, cfg, fmt->pad);
+ /* update the code which could change due to vflip or hflip: */
+ try_fmt->code = imx219_get_format_code(imx219, try_fmt->code);
+ fmt->format = *try_fmt;
+ } else {
+ imx219_update_pad_format(imx219, imx219->mode, fmt);
+ fmt->format.code = imx219_get_format_code(imx219,
+ imx219->fmt.code);
+ }
+
+ return 0;
+}
+
+static int imx219_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx219 *imx219 = to_imx219(sd);
+ int ret;
+
+ mutex_lock(&imx219->mutex);
+ ret = __imx219_get_pad_format(imx219, cfg, fmt);
+ mutex_unlock(&imx219->mutex);
+
+ return ret;
+}
+
+static int imx219_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx219 *imx219 = to_imx219(sd);
+ const struct imx219_mode *mode;
+ struct v4l2_mbus_framefmt *framefmt;
+ int exposure_max, exposure_def, hblank;
+ unsigned int i;
+
+ mutex_lock(&imx219->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(codes); i++)
+ if (codes[i] == fmt->format.code)
+ break;
+ if (i >= ARRAY_SIZE(codes))
+ i = 0;
+
+ /* Bayer order varies with flips */
+ fmt->format.code = imx219_get_format_code(imx219, codes[i]);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+ imx219_update_pad_format(imx219, mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *framefmt = fmt->format;
+ } else if (imx219->mode != mode ||
+ imx219->fmt.code != fmt->format.code) {
+ imx219->fmt = fmt->format;
+ imx219->mode = mode;
+ /* Update limits and set FPS to default */
+ __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
+ IMX219_VTS_MAX - mode->height, 1,
+ mode->vts_def - mode->height);
+ __v4l2_ctrl_s_ctrl(imx219->vblank,
+ mode->vts_def - mode->height);
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = mode->vts_def - 4;
+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
+ __v4l2_ctrl_modify_range(imx219->exposure,
+ imx219->exposure->minimum,
+ exposure_max, imx219->exposure->step,
+ exposure_def);
+ /*
+ * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
+ * depends on mode->width only, and is not changeble in any
+ * way other than changing the mode.
+ */
+ hblank = IMX219_PPL_DEFAULT - mode->width;
+ __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1,
+ hblank);
+ }
+
+ mutex_unlock(&imx219->mutex);
+
+ return 0;
+}
+
+static int imx219_set_framefmt(struct imx219 *imx219)
+{
+ switch (imx219->fmt.code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return imx219_write_regs(imx219, raw8_framefmt_regs,
+ ARRAY_SIZE(raw8_framefmt_regs));
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ return imx219_write_regs(imx219, raw10_framefmt_regs,
+ ARRAY_SIZE(raw10_framefmt_regs));
+ }
+
+ return -EINVAL;
+}
+
+static int imx219_start_streaming(struct imx219 *imx219)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ const struct imx219_reg_list *reg_list;
+ int ret;
+
+ /* Apply default values of current mode */
+ reg_list = &imx219->mode->reg_list;
+ ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ ret = imx219_set_framefmt(imx219);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set frame format: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ /* set stream on register */
+ return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
+}
+
+static void imx219_stop_streaming(struct imx219 *imx219)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ int ret;
+
+ /* set stream off register */
+ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+ if (ret)
+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
+}
+
+static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx219 *imx219 = to_imx219(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&imx219->mutex);
+ if (imx219->streaming == enable) {
+ mutex_unlock(&imx219->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto err_unlock;
+ }
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = imx219_start_streaming(imx219);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ imx219_stop_streaming(imx219);
+ pm_runtime_put(&client->dev);
+ }
+
+ imx219->streaming = enable;
+
+ /* vflip and hflip cannot change during streaming */
+ __v4l2_ctrl_grab(imx219->vflip, enable);
+ __v4l2_ctrl_grab(imx219->hflip, enable);
+
+ mutex_unlock(&imx219->mutex);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+err_unlock:
+ mutex_unlock(&imx219->mutex);
+
+ return ret;
+}
+
+/* Power/clock management functions */
+static int imx219_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx219 *imx219 = to_imx219(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(IMX219_NUM_SUPPLIES,
+ imx219->supplies);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable regulators\n",
+ __func__);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(imx219->xclk);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to enable clock\n",
+ __func__);
+ goto reg_off;
+ }
+
+ gpiod_set_value_cansleep(imx219->reset_gpio, 1);
+ usleep_range(IMX219_XCLR_MIN_DELAY_US,
+ IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
+
+ return 0;
+
+reg_off:
+ regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
+
+ return ret;
+}
+
+static int imx219_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx219 *imx219 = to_imx219(sd);
+
+ gpiod_set_value_cansleep(imx219->reset_gpio, 0);
+ regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
+ clk_disable_unprepare(imx219->xclk);
+
+ return 0;
+}
+
+static int __maybe_unused imx219_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx219 *imx219 = to_imx219(sd);
+
+ if (imx219->streaming)
+ imx219_stop_streaming(imx219);
+
+ return 0;
+}
+
+static int __maybe_unused imx219_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx219 *imx219 = to_imx219(sd);
+ int ret;
+
+ if (imx219->streaming) {
+ ret = imx219_start_streaming(imx219);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ imx219_stop_streaming(imx219);
+ imx219->streaming = 0;
+
+ return ret;
+}
+
+static int imx219_get_regulators(struct imx219 *imx219)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ unsigned int i;
+
+ for (i = 0; i < IMX219_NUM_SUPPLIES; i++)
+ imx219->supplies[i].supply = imx219_supply_name[i];
+
+ return devm_regulator_bulk_get(&client->dev,
+ IMX219_NUM_SUPPLIES,
+ imx219->supplies);
+}
+
+/* Verify chip ID */
+static int imx219_identify_module(struct imx219 *imx219)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ int ret;
+ u32 val;
+
+ ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID,
+ IMX219_REG_VALUE_16BIT, &val);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id %x\n",
+ IMX219_CHIP_ID);
+ return ret;
+ }
+
+ if (val != IMX219_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ IMX219_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops imx219_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops imx219_video_ops = {
+ .s_stream = imx219_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
+ .enum_mbus_code = imx219_enum_mbus_code,
+ .get_fmt = imx219_get_pad_format,
+ .set_fmt = imx219_set_pad_format,
+ .enum_frame_size = imx219_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops imx219_subdev_ops = {
+ .core = &imx219_core_ops,
+ .video = &imx219_video_ops,
+ .pad = &imx219_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops imx219_internal_ops = {
+ .open = imx219_open,
+};
+
+/* Initialize control handlers */
+static int imx219_init_controls(struct imx219 *imx219)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ unsigned int height = imx219->mode->height;
+ int exposure_max, exposure_def, hblank;
+ int i, ret;
+
+ ctrl_hdlr = &imx219->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+ if (ret)
+ return ret;
+
+ mutex_init(&imx219->mutex);
+ ctrl_hdlr->lock = &imx219->mutex;
+
+ /* By default, PIXEL_RATE is read only */
+ imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ IMX219_PIXEL_RATE,
+ IMX219_PIXEL_RATE, 1,
+ IMX219_PIXEL_RATE);
+
+ /* Initial vblank/hblank/exposure parameters based on current mode */
+ imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
+ IMX219_VTS_MAX - height, 1,
+ imx219->mode->vts_def - height);
+ hblank = IMX219_PPL_DEFAULT - imx219->mode->width;
+ imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank,
+ 1, hblank);
+ if (imx219->hblank)
+ imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ exposure_max = imx219->mode->vts_def - 4;
+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
+ imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ IMX219_EXPOSURE_MIN, exposure_max,
+ IMX219_EXPOSURE_STEP,
+ exposure_def);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX219_ANA_GAIN_MIN, IMX219_ANA_GAIN_MAX,
+ IMX219_ANA_GAIN_STEP, IMX219_ANA_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ IMX219_DGTL_GAIN_MIN, IMX219_DGTL_GAIN_MAX,
+ IMX219_DGTL_GAIN_STEP, IMX219_DGTL_GAIN_DEFAULT);
+
+ imx219->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (imx219->hflip)
+ imx219->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ imx219->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (imx219->vflip)
+ imx219->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx219_test_pattern_menu) - 1,
+ 0, 0, imx219_test_pattern_menu);
+ for (i = 0; i < 4; i++) {
+ /*
+ * The assumption is that
+ * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
+ * V4L2_CID_TEST_PATTERN_BLUE == V4L2_CID_TEST_PATTERN_RED + 2
+ * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
+ */
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+ V4L2_CID_TEST_PATTERN_RED + i,
+ IMX219_TESTP_COLOUR_MIN,
+ IMX219_TESTP_COLOUR_MAX,
+ IMX219_TESTP_COLOUR_STEP,
+ IMX219_TESTP_COLOUR_MAX);
+ /* The "Solid color" pattern is white by default */
+ }
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
+
+ imx219->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ mutex_destroy(&imx219->mutex);
+
+ return ret;
+}
+
+static void imx219_free_controls(struct imx219 *imx219)
+{
+ v4l2_ctrl_handler_free(imx219->sd.ctrl_handler);
+ mutex_destroy(&imx219->mutex);
+}
+
+static int imx219_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_fwnode_endpoint ep_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret = -EINVAL;
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ if (v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep_cfg)) {
+ dev_err(dev, "could not parse endpoint\n");
+ goto error_out;
+ }
+
+ /* Check the number of MIPI CSI2 data lanes */
+ if (ep_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(dev, "only 2 data lanes are currently supported\n");
+ goto error_out;
+ }
+
+ /* Check the link frequency set in device tree */
+ if (!ep_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ goto error_out;
+ }
+
+ if (ep_cfg.nr_of_link_frequencies != 1 ||
+ ep_cfg.link_frequencies[0] != IMX219_DEFAULT_LINK_FREQ) {
+ dev_err(dev, "Link frequency not supported: %lld\n",
+ ep_cfg.link_frequencies[0]);
+ goto error_out;
+ }
+
+ ret = 0;
+
+error_out:
+ v4l2_fwnode_endpoint_free(&ep_cfg);
+ fwnode_handle_put(endpoint);
+
+ return ret;
+}
+
+static int imx219_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct imx219 *imx219;
+ int ret;
+
+ imx219 = devm_kzalloc(&client->dev, sizeof(*imx219), GFP_KERNEL);
+ if (!imx219)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&imx219->sd, client, &imx219_subdev_ops);
+
+ /* Check the hardware configuration in device tree */
+ if (imx219_check_hwcfg(dev))
+ return -EINVAL;
+
+ /* Get system clock (xclk) */
+ imx219->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(imx219->xclk)) {
+ dev_err(dev, "failed to get xclk\n");
+ return PTR_ERR(imx219->xclk);
+ }
+
+ imx219->xclk_freq = clk_get_rate(imx219->xclk);
+ if (imx219->xclk_freq != IMX219_XCLK_FREQ) {
+ dev_err(dev, "xclk frequency not supported: %d Hz\n",
+ imx219->xclk_freq);
+ return -EINVAL;
+ }
+
+ ret = imx219_get_regulators(imx219);
+ if (ret) {
+ dev_err(dev, "failed to get regulators\n");
+ return ret;
+ }
+
+ /* Request optional enable pin */
+ imx219->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+
+ /*
+ * The sensor must be powered for imx219_identify_module()
+ * to be able to read the CHIP_ID register
+ */
+ ret = imx219_power_on(dev);
+ if (ret)
+ return ret;
+
+ ret = imx219_identify_module(imx219);
+ if (ret)
+ goto error_power_off;
+
+ /* Set default mode to max resolution */
+ imx219->mode = &supported_modes[0];
+
+ /* sensor doesn't enter LP-11 state upon power up until and unless
+ * streaming is started, so upon power up switch the modes to:
+ * streaming -> standby
+ */
+ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
+ if (ret < 0)
+ goto error_power_off;
+ usleep_range(100, 110);
+
+ /* put sensor back to standby mode */
+ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+ if (ret < 0)
+ goto error_power_off;
+ usleep_range(100, 110);
+
+ ret = imx219_init_controls(imx219);
+ if (ret)
+ goto error_power_off;
+
+ /* Initialize subdev */
+ imx219->sd.internal_ops = &imx219_internal_ops;
+ imx219->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx219->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ imx219->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Initialize default format */
+ imx219_set_default_format(imx219);
+
+ ret = media_entity_pads_init(&imx219->sd.entity, 1, &imx219->pad);
+ if (ret) {
+ dev_err(dev, "failed to init entity pads: %d\n", ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&imx219->sd);
+ if (ret < 0) {
+ dev_err(dev, "failed to register sensor sub-device: %d\n", ret);
+ goto error_media_entity;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&imx219->sd.entity);
+
+error_handler_free:
+ imx219_free_controls(imx219);
+
+error_power_off:
+ imx219_power_off(dev);
+
+ return ret;
+}
+
+static int imx219_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx219 *imx219 = to_imx219(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ imx219_free_controls(imx219);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ imx219_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx219_dt_ids[] = {
+ { .compatible = "sony,imx219" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx219_dt_ids);
+
+static const struct dev_pm_ops imx219_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx219_suspend, imx219_resume)
+ SET_RUNTIME_PM_OPS(imx219_power_off, imx219_power_on, NULL)
+};
+
+static struct i2c_driver imx219_i2c_driver = {
+ .driver = {
+ .name = "imx219",
+ .of_match_table = imx219_dt_ids,
+ .pm = &imx219_pm_ops,
+ },
+ .probe_new = imx219_probe,
+ .remove = imx219_remove,
+};
+
+module_i2c_driver(imx219_i2c_driver);
+
+MODULE_AUTHOR("Dave Stevenson <dave.stevenson@raspberrypi.com");
+MODULE_DESCRIPTION("Sony IMX219 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 1ae252378799..8537cc4ca108 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -63,6 +63,10 @@
#define OV5675_TEST_PATTERN_ENABLE BIT(7)
#define OV5675_TEST_PATTERN_BAR_SHIFT 2
+/* Flip Mirror Controls from sensor */
+#define OV5675_REG_FORMAT1 0x3820
+#define OV5675_REG_FORMAT2 0x373d
+
#define to_ov5675(_sd) container_of(_sd, struct ov5675, sd)
enum {
@@ -314,21 +318,21 @@ static const struct ov5675_reg mode_1296x972_regs[] = {
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0xf4},
+ {0x3803, 0x00},
{0x3804, 0x0a},
{0x3805, 0x3f},
- {0x3806, 0x06},
- {0x3807, 0xb3},
+ {0x3806, 0x07},
+ {0x3807, 0xb7},
{0x3808, 0x05},
- {0x3809, 0x00},
- {0x380a, 0x02},
- {0x380b, 0xd0},
+ {0x3809, 0x10},
+ {0x380a, 0x03},
+ {0x380b, 0xcc},
{0x380c, 0x02},
{0x380d, 0xee},
{0x380e, 0x07},
- {0x380f, 0xe4},
- {0x3811, 0x10},
- {0x3813, 0x09},
+ {0x380f, 0xd0},
+ {0x3811, 0x08},
+ {0x3813, 0x0d},
{0x3814, 0x03},
{0x3815, 0x01},
{0x3816, 0x03},
@@ -604,6 +608,53 @@ static int ov5675_test_pattern(struct ov5675 *ov5675, u32 pattern)
OV5675_REG_VALUE_08BIT, pattern);
}
+/*
+ * OV5675 supports keeping the pixel order by mirror and flip function
+ * The Bayer order isn't affected by the flip controls
+ */
+static int ov5675_set_ctrl_hflip(struct ov5675 *ov5675, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov5675_read_reg(ov5675, OV5675_REG_FORMAT1,
+ OV5675_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ return ov5675_write_reg(ov5675, OV5675_REG_FORMAT1,
+ OV5675_REG_VALUE_08BIT,
+ ctrl_val ? val & ~BIT(3) : val);
+}
+
+static int ov5675_set_ctrl_vflip(struct ov5675 *ov5675, u8 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov5675_read_reg(ov5675, OV5675_REG_FORMAT1,
+ OV5675_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov5675_write_reg(ov5675, OV5675_REG_FORMAT1,
+ OV5675_REG_VALUE_08BIT,
+ ctrl_val ? val | BIT(4) | BIT(5) : val);
+
+ if (ret)
+ return ret;
+
+ ret = ov5675_read_reg(ov5675, OV5675_REG_FORMAT2,
+ OV5675_REG_VALUE_08BIT, &val);
+
+ if (ret)
+ return ret;
+
+ return ov5675_write_reg(ov5675, OV5675_REG_FORMAT2,
+ OV5675_REG_VALUE_08BIT,
+ ctrl_val ? val | BIT(1) : val);
+}
+
static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov5675 *ov5675 = container_of(ctrl->handler,
@@ -654,6 +705,14 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov5675_test_pattern(ov5675, ctrl->val);
break;
+ case V4L2_CID_HFLIP:
+ ov5675_set_ctrl_hflip(ov5675, ctrl->val);
+ break;
+
+ case V4L2_CID_VFLIP:
+ ov5675_set_ctrl_vflip(ov5675, ctrl->val);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -722,6 +781,11 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov5675_test_pattern_menu) - 1,
0, 0, ov5675_test_pattern_menu);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
if (ctrl_hdlr->error)
return ctrl_hdlr->error;
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index d6cd15bb699a..cc678d9d2e0d 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -971,16 +971,9 @@ unlock_and_return:
return ret;
}
-/* Calculate the delay in us by clock rate and clock cycles */
-static inline u32 ov5695_cal_delay(u32 cycles)
-{
- return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000);
-}
-
static int __ov5695_power_on(struct ov5695 *ov5695)
{
- int ret;
- u32 delay_us;
+ int i, ret;
struct device *dev = &ov5695->client->dev;
ret = clk_prepare_enable(ov5695->xvclk);
@@ -991,21 +984,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695)
gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
- ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators\n");
- goto disable_clk;
+ /*
+ * The hardware requires the regulators to be powered on in order,
+ * so enable them one by one.
+ */
+ for (i = 0; i < OV5695_NUM_SUPPLIES; i++) {
+ ret = regulator_enable(ov5695->supplies[i].consumer);
+ if (ret) {
+ dev_err(dev, "Failed to enable %s: %d\n",
+ ov5695->supplies[i].supply, ret);
+ goto disable_reg_clk;
+ }
}
gpiod_set_value_cansleep(ov5695->reset_gpio, 0);
- /* 8192 cycles prior to first SCCB transaction */
- delay_us = ov5695_cal_delay(8192);
- usleep_range(delay_us, delay_us * 2);
+ usleep_range(1000, 1200);
return 0;
-disable_clk:
+disable_reg_clk:
+ for (--i; i >= 0; i--)
+ regulator_disable(ov5695->supplies[i].consumer);
clk_disable_unprepare(ov5695->xvclk);
return ret;
@@ -1013,9 +1013,22 @@ disable_clk:
static void __ov5695_power_off(struct ov5695 *ov5695)
{
+ struct device *dev = &ov5695->client->dev;
+ int i, ret;
+
clk_disable_unprepare(ov5695->xvclk);
gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
- regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies);
+
+ /*
+ * The hardware requires the regulators to be powered off in order,
+ * so disable them one by one.
+ */
+ for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) {
+ ret = regulator_disable(ov5695->supplies[i].consumer);
+ if (ret)
+ dev_err(dev, "Failed to disable %s: %d\n",
+ ov5695->supplies[i].supply, ret);
+ }
}
static int __maybe_unused ov5695_runtime_resume(struct device *dev)
@@ -1285,7 +1298,7 @@ static int ov5695_probe(struct i2c_client *client,
if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
- ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ov5695->reset_gpio)) {
dev_err(dev, "Failed to get reset-gpios\n");
return -EINVAL;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
index 8911660da86f..71cf68a95bb2 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
@@ -547,7 +547,7 @@ int s5c73m3_init_controls(struct s5c73m3 *state)
V4L2_CTRL_FLAG_UPDATE;
v4l2_ctrl_auto_cluster(2, &ctrls->auto_iso, 0, false);
ctrls->af_status->flags |= V4L2_CTRL_FLAG_VOLATILE;
- v4l2_ctrl_cluster(6, &ctrls->focus_auto);
+ v4l2_ctrl_cluster(5, &ctrls->focus_auto);
state->sensor_sd.ctrl_handler = hdl;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index a80d7701b519..5e4f6a2ef78e 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -57,6 +57,45 @@ static const struct smiapp_module_ident smiapp_module_idents[] = {
*
*/
+static u32 smiapp_get_limit(struct smiapp_sensor *sensor,
+ unsigned int limit)
+{
+ if (WARN_ON(limit >= SMIAPP_LIMIT_LAST))
+ return 1;
+
+ return sensor->limits[limit];
+}
+
+#define SMIA_LIM(sensor, limit) \
+ smiapp_get_limit(sensor, SMIAPP_LIMIT_##limit)
+
+static int smiapp_read_all_smia_limits(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int i;
+ int rval;
+
+ for (i = 0; i < SMIAPP_LIMIT_LAST; i++) {
+ u32 val;
+
+ rval = smiapp_read(
+ sensor, smiapp_reg_limits[i].addr, &val);
+ if (rval)
+ return rval;
+
+ sensor->limits[i] = val;
+
+ dev_dbg(&client->dev, "0x%8.8x \"%s\" = %u, 0x%x\n",
+ smiapp_reg_limits[i].addr,
+ smiapp_reg_limits[i].what, val, val);
+ }
+
+ if (SMIA_LIM(sensor, SCALER_N_MIN) == 0)
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_SCALER_N_MIN, 16);
+
+ return 0;
+}
+
static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
@@ -240,35 +279,35 @@ static int smiapp_pll_try(struct smiapp_sensor *sensor,
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
struct smiapp_pll_limits lim = {
- .min_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_PRE_PLL_CLK_DIV],
- .max_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_PRE_PLL_CLK_DIV],
- .min_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ],
- .max_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_IP_FREQ_HZ],
- .min_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MIN_PLL_MULTIPLIER],
- .max_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MAX_PLL_MULTIPLIER],
- .min_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ],
- .max_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ],
-
- .op.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
- .op.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
- .op.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
- .op.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
- .op.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
- .op.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
- .op.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
- .op.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
-
- .vt.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
- .vt.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
- .vt.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
- .vt.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
- .vt.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
- .vt.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
- .vt.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
- .vt.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
-
- .min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
- .min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
+ .min_pre_pll_clk_div = SMIA_LIM(sensor, MIN_PRE_PLL_CLK_DIV),
+ .max_pre_pll_clk_div = SMIA_LIM(sensor, MAX_PRE_PLL_CLK_DIV),
+ .min_pll_ip_freq_hz = SMIA_LIM(sensor, MIN_PLL_IP_FREQ_HZ),
+ .max_pll_ip_freq_hz = SMIA_LIM(sensor, MAX_PLL_IP_FREQ_HZ),
+ .min_pll_multiplier = SMIA_LIM(sensor, MIN_PLL_MULTIPLIER),
+ .max_pll_multiplier = SMIA_LIM(sensor, MAX_PLL_MULTIPLIER),
+ .min_pll_op_freq_hz = SMIA_LIM(sensor, MIN_PLL_OP_FREQ_HZ),
+ .max_pll_op_freq_hz = SMIA_LIM(sensor, MAX_PLL_OP_FREQ_HZ),
+
+ .op.min_sys_clk_div = SMIA_LIM(sensor, MIN_OP_SYS_CLK_DIV),
+ .op.max_sys_clk_div = SMIA_LIM(sensor, MAX_OP_SYS_CLK_DIV),
+ .op.min_pix_clk_div = SMIA_LIM(sensor, MIN_OP_PIX_CLK_DIV),
+ .op.max_pix_clk_div = SMIA_LIM(sensor, MAX_OP_PIX_CLK_DIV),
+ .op.min_sys_clk_freq_hz = SMIA_LIM(sensor, MIN_OP_SYS_CLK_FREQ_HZ),
+ .op.max_sys_clk_freq_hz = SMIA_LIM(sensor, MAX_OP_SYS_CLK_FREQ_HZ),
+ .op.min_pix_clk_freq_hz = SMIA_LIM(sensor, MIN_OP_PIX_CLK_FREQ_HZ),
+ .op.max_pix_clk_freq_hz = SMIA_LIM(sensor, MAX_OP_PIX_CLK_FREQ_HZ),
+
+ .vt.min_sys_clk_div = SMIA_LIM(sensor, MIN_VT_SYS_CLK_DIV),
+ .vt.max_sys_clk_div = SMIA_LIM(sensor, MAX_VT_SYS_CLK_DIV),
+ .vt.min_pix_clk_div = SMIA_LIM(sensor, MIN_VT_PIX_CLK_DIV),
+ .vt.max_pix_clk_div = SMIA_LIM(sensor, MAX_VT_PIX_CLK_DIV),
+ .vt.min_sys_clk_freq_hz = SMIA_LIM(sensor, MIN_VT_SYS_CLK_FREQ_HZ),
+ .vt.max_sys_clk_freq_hz = SMIA_LIM(sensor, MAX_VT_SYS_CLK_FREQ_HZ),
+ .vt.min_pix_clk_freq_hz = SMIA_LIM(sensor, MIN_VT_PIX_CLK_FREQ_HZ),
+ .vt.max_pix_clk_freq_hz = SMIA_LIM(sensor, MAX_VT_PIX_CLK_FREQ_HZ),
+
+ .min_line_length_pck_bin = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK_BIN),
+ .min_line_length_pck = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK),
};
return smiapp_pll_calculate(&client->dev, &lim, pll);
@@ -311,7 +350,7 @@ static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor)
max = sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ sensor->vblank->val
- - sensor->limits[SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MAX_MARGIN];
+ - SMIA_LIM(sensor, COARSE_INTEGRATION_TIME_MAX_MARGIN);
__v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max, ctrl->step, max);
}
@@ -568,10 +607,10 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
sensor->analog_gain = v4l2_ctrl_new_std(
&sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
V4L2_CID_ANALOGUE_GAIN,
- sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN],
- sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX],
- max(sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_STEP], 1U),
- sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN]);
+ SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_MIN),
+ SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_MAX),
+ max(SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_STEP), 1U),
+ SMIA_LIM(sensor, ANALOGUE_GAIN_CODE_MIN));
/* Exposure limits will be updated soon, use just something here. */
sensor->exposure = v4l2_ctrl_new_std(
@@ -677,45 +716,6 @@ static void smiapp_free_controls(struct smiapp_sensor *sensor)
v4l2_ctrl_handler_free(&sensor->ssds[i].ctrl_handler);
}
-static int smiapp_get_limits(struct smiapp_sensor *sensor, int const *limit,
- unsigned int n)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned int i;
- u32 val;
- int rval;
-
- for (i = 0; i < n; i++) {
- rval = smiapp_read(
- sensor, smiapp_reg_limits[limit[i]].addr, &val);
- if (rval)
- return rval;
- sensor->limits[limit[i]] = val;
- dev_dbg(&client->dev, "0x%8.8x \"%s\" = %u, 0x%x\n",
- smiapp_reg_limits[limit[i]].addr,
- smiapp_reg_limits[limit[i]].what, val, val);
- }
-
- return 0;
-}
-
-static int smiapp_get_all_limits(struct smiapp_sensor *sensor)
-{
- unsigned int i;
- int rval;
-
- for (i = 0; i < SMIAPP_LIMIT_LAST; i++) {
- rval = smiapp_get_limits(sensor, &i, 1);
- if (rval < 0)
- return rval;
- }
-
- if (sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] == 0)
- smiapp_replace_limit(sensor, SMIAPP_LIMIT_SCALER_N_MIN, 16);
-
- return 0;
-}
-
static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
@@ -869,21 +869,21 @@ static void smiapp_update_blanking(struct smiapp_sensor *sensor)
int min, max;
if (sensor->binning_vertical > 1 || sensor->binning_horizontal > 1) {
- min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN];
- max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN];
- min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN];
- max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN];
- min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN];
+ min_fll = SMIA_LIM(sensor, MIN_FRAME_LENGTH_LINES_BIN);
+ max_fll = SMIA_LIM(sensor, MAX_FRAME_LENGTH_LINES_BIN);
+ min_llp = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK_BIN);
+ max_llp = SMIA_LIM(sensor, MAX_LINE_LENGTH_PCK_BIN);
+ min_lbp = SMIA_LIM(sensor, MIN_LINE_BLANKING_PCK_BIN);
} else {
- min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES];
- max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES];
- min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK];
- max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK];
- min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK];
+ min_fll = SMIA_LIM(sensor, MIN_FRAME_LENGTH_LINES);
+ max_fll = SMIA_LIM(sensor, MAX_FRAME_LENGTH_LINES);
+ min_llp = SMIA_LIM(sensor, MIN_LINE_LENGTH_PCK);
+ max_llp = SMIA_LIM(sensor, MAX_LINE_LENGTH_PCK);
+ min_lbp = SMIA_LIM(sensor, MIN_LINE_BLANKING_PCK);
}
min = max_t(int,
- sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
+ SMIA_LIM(sensor, MIN_FRAME_BLANKING_LINES),
min_fll -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
max = max_fll - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
@@ -961,7 +961,7 @@ static int smiapp_read_nvm_page(struct smiapp_sensor *sensor, u32 p, u8 *nvm,
return -ENODATA;
}
- if (sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ if (SMIA_LIM(sensor, DATA_TRANSFER_IF_CAPABILITY) &
SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL) {
for (i = 1000; i > 0; i--) {
if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
@@ -1416,7 +1416,7 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
*/
/* Digital crop */
- if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ if (SMIA_LIM(sensor, DIGITAL_CROP_CAPABILITY)
== SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
rval = smiapp_write(
sensor, SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET,
@@ -1444,7 +1444,7 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
}
/* Scaling */
- if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ if (SMIA_LIM(sensor, SCALING_CAPABILITY)
!= SMIAPP_SCALING_CAPABILITY_NONE) {
rval = smiapp_write(sensor, SMIAPP_REG_U16_SCALING_MODE,
sensor->scaling_mode);
@@ -1467,7 +1467,7 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
if (rval < 0)
goto out;
- if ((sensor->limits[SMIAPP_LIMIT_FLASH_MODE_CAPABILITY] &
+ if ((SMIA_LIM(sensor, FLASH_MODE_CAPABILITY) &
(SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE |
SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE)) &&
sensor->hwcfg->strobe_setup != NULL &&
@@ -1715,8 +1715,7 @@ static void smiapp_propagate(struct v4l2_subdev *subdev,
if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
if (ssd == sensor->scaler) {
sensor->scale_m =
- sensor->limits[
- SMIAPP_LIMIT_SCALER_N_MIN];
+ SMIA_LIM(sensor, SCALER_N_MIN);
sensor->scaling_mode =
SMIAPP_SCALING_MODE_NONE;
} else if (ssd == sensor->binner) {
@@ -1828,12 +1827,12 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
fmt->format.width =
clamp(fmt->format.width,
- sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
- sensor->limits[SMIAPP_LIMIT_MAX_X_OUTPUT_SIZE]);
+ SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE),
+ SMIA_LIM(sensor, MAX_X_OUTPUT_SIZE));
fmt->format.height =
clamp(fmt->format.height,
- sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE],
- sensor->limits[SMIAPP_LIMIT_MAX_Y_OUTPUT_SIZE]);
+ SMIA_LIM(sensor, MIN_Y_OUTPUT_SIZE),
+ SMIA_LIM(sensor, MAX_Y_OUTPUT_SIZE));
smiapp_get_crop_compose(subdev, cfg, crops, NULL, fmt->which);
@@ -1886,7 +1885,7 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
val -= abs(w - ask_w);
val -= abs(h - ask_h);
- if (w < sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE])
+ if (w < SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE))
val -= SCALING_GOODNESS_EXTREME;
dev_dbg(&client->dev, "w %d ask_w %d h %d ask_h %d goodness %d\n",
@@ -1952,7 +1951,7 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
u32 min, max, a, b, max_m;
- u32 scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ u32 scale_m = SMIA_LIM(sensor, SCALER_N_MIN);
int mode = SMIAPP_SCALING_MODE_HORIZONTAL;
u32 try[4];
u32 ntry = 0;
@@ -1965,19 +1964,19 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
crops[SMIAPP_PAD_SINK]->height);
a = crops[SMIAPP_PAD_SINK]->width
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.width;
+ * SMIA_LIM(sensor, SCALER_N_MIN) / sel->r.width;
b = crops[SMIAPP_PAD_SINK]->height
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.height;
+ * SMIA_LIM(sensor, SCALER_N_MIN) / sel->r.height;
max_m = crops[SMIAPP_PAD_SINK]->width
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]
- / sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE];
+ * SMIA_LIM(sensor, SCALER_N_MIN)
+ / SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE);
- a = clamp(a, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN],
- sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]);
- b = clamp(b, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN],
- sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]);
- max_m = clamp(max_m, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN],
- sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]);
+ a = clamp(a, SMIA_LIM(sensor, SCALER_M_MIN),
+ SMIA_LIM(sensor, SCALER_M_MAX));
+ b = clamp(b, SMIA_LIM(sensor, SCALER_M_MIN),
+ SMIA_LIM(sensor, SCALER_M_MAX));
+ max_m = clamp(max_m, SMIA_LIM(sensor, SCALER_M_MIN),
+ SMIA_LIM(sensor, SCALER_M_MAX));
dev_dbg(&client->dev, "scaling: a %d b %d max_m %d\n", a, b, max_m);
@@ -2004,7 +2003,7 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
subdev,
crops[SMIAPP_PAD_SINK]->width
/ try[i]
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ * SMIA_LIM(sensor, SCALER_N_MIN),
sel->r.width,
crops[SMIAPP_PAD_SINK]->height,
sel->r.height,
@@ -2018,18 +2017,18 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
best = this;
}
- if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ if (SMIA_LIM(sensor, SCALING_CAPABILITY)
== SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
continue;
this = scaling_goodness(
subdev, crops[SMIAPP_PAD_SINK]->width
/ try[i]
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ * SMIA_LIM(sensor, SCALER_N_MIN),
sel->r.width,
crops[SMIAPP_PAD_SINK]->height
/ try[i]
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ * SMIA_LIM(sensor, SCALER_N_MIN),
sel->r.height,
sel->flags);
@@ -2043,12 +2042,12 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
sel->r.width =
(crops[SMIAPP_PAD_SINK]->width
/ scale_m
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]) & ~1;
+ * SMIA_LIM(sensor, SCALER_N_MIN)) & ~1;
if (mode == SMIAPP_SCALING_MODE_BOTH)
sel->r.height =
(crops[SMIAPP_PAD_SINK]->height
/ scale_m
- * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN])
+ * SMIA_LIM(sensor, SCALER_N_MIN))
& ~1;
else
sel->r.height = crops[SMIAPP_PAD_SINK]->height;
@@ -2104,7 +2103,7 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
return 0;
if (ssd == sensor->scaler
&& sel->pad == SMIAPP_PAD_SINK
- && sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ && SMIA_LIM(sensor, DIGITAL_CROP_CAPABILITY)
== SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP)
return 0;
return -EINVAL;
@@ -2120,7 +2119,7 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
if (ssd == sensor->binner)
return 0;
if (ssd == sensor->scaler
- && sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ && SMIA_LIM(sensor, SCALING_CAPABILITY)
!= SMIAPP_SCALING_CAPABILITY_NONE)
return 0;
/* Fall through */
@@ -2185,8 +2184,8 @@ static void smiapp_get_native_size(struct smiapp_subdev *ssd,
{
r->top = 0;
r->left = 0;
- r->width = ssd->sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
- r->height = ssd->sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+ r->width = SMIA_LIM(ssd->sensor, X_ADDR_MAX) + 1;
+ r->height = SMIA_LIM(ssd->sensor, Y_ADDR_MAX) + 1;
}
static int __smiapp_get_selection(struct v4l2_subdev *subdev,
@@ -2271,10 +2270,10 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
sel->r.height = SMIAPP_ALIGN_DIM(sel->r.height, sel->flags);
sel->r.width = max_t(unsigned int,
- sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
+ SMIA_LIM(sensor, MIN_X_OUTPUT_SIZE),
sel->r.width);
sel->r.height = max_t(unsigned int,
- sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE],
+ SMIA_LIM(sensor, MIN_Y_OUTPUT_SIZE),
sel->r.height);
switch (sel->target) {
@@ -2927,7 +2926,7 @@ static int smiapp_probe(struct i2c_client *client)
goto out_power_off;
}
- rval = smiapp_get_all_limits(sensor);
+ rval = smiapp_read_all_smia_limits(sensor);
if (rval) {
rval = -ENODEV;
goto out_power_off;
@@ -2963,7 +2962,7 @@ static int smiapp_probe(struct i2c_client *client)
goto out_power_off;
}
- if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
+ if (SMIA_LIM(sensor, BINNING_CAPABILITY)) {
u32 val;
rval = smiapp_read(sensor,
@@ -3000,7 +2999,7 @@ static int smiapp_probe(struct i2c_client *client)
}
if (sensor->minfo.smiapp_version &&
- sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIA_LIM(sensor, DATA_TRANSFER_IF_CAPABILITY) &
SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED) {
if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
dev_err(&client->dev, "sysfs nvm entry failed\n");
@@ -3010,21 +3009,21 @@ static int smiapp_probe(struct i2c_client *client)
}
/* We consider this as profile 0 sensor if any of these are zero. */
- if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
+ if (!SMIA_LIM(sensor, MIN_OP_SYS_CLK_DIV) ||
+ !SMIA_LIM(sensor, MAX_OP_SYS_CLK_DIV) ||
+ !SMIA_LIM(sensor, MIN_OP_PIX_CLK_DIV) ||
+ !SMIA_LIM(sensor, MAX_OP_PIX_CLK_DIV)) {
sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
- } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ } else if (SMIA_LIM(sensor, SCALING_CAPABILITY)
!= SMIAPP_SCALING_CAPABILITY_NONE) {
- if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ if (SMIA_LIM(sensor, SCALING_CAPABILITY)
== SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
else
sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
sensor->scaler = &sensor->ssds[sensor->ssds_used];
sensor->ssds_used++;
- } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ } else if (SMIA_LIM(sensor, DIGITAL_CROP_CAPABILITY)
== SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
sensor->scaler = &sensor->ssds[sensor->ssds_used];
sensor->ssds_used++;
@@ -3034,13 +3033,13 @@ static int smiapp_probe(struct i2c_client *client)
sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
sensor->ssds_used++;
- sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ sensor->scale_m = SMIA_LIM(sensor, SCALER_N_MIN);
/* prepare PLL configuration input values */
sensor->pll.bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
sensor->pll.csi2.lanes = sensor->hwcfg->lanes;
sensor->pll.ext_clk_freq_hz = sensor->hwcfg->ext_clk;
- sensor->pll.scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ sensor->pll.scale_n = SMIA_LIM(sensor, SCALER_N_MIN);
/* Profile 0 sensors have no separate OP clock branch. */
if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
sensor->pll.flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 43505cd0616e..e6f96309786f 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -35,6 +35,10 @@
#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE BIT(0)
#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE BIT(1)
+#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK 0
+#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE 1
+#define SMIAPP_CSI_SIGNALLING_MODE_CSI2 2
+
#define SMIAPP_DPHY_CTRL_AUTOMATIC 0
/* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */
#define SMIAPP_DPHY_CTRL_UI 1
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index ce8c1d47fbf0..1b58b7c6c839 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -8,6 +8,8 @@
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
*/
+#include <asm/unaligned.h>
+
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -69,18 +71,19 @@ static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg,
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
struct i2c_msg msg;
- unsigned char data[4];
- u16 offset = reg;
+ unsigned char data_buf[sizeof(u32)] = { 0 };
+ unsigned char offset_buf[sizeof(u16)];
int r;
+ if (len > sizeof(data_buf))
+ return -EINVAL;
+
msg.addr = client->addr;
msg.flags = 0;
- msg.len = 2;
- msg.buf = data;
+ msg.len = sizeof(offset_buf);
+ msg.buf = offset_buf;
+ put_unaligned_be16(reg, offset_buf);
- /* high byte goes out first */
- data[0] = (u8) (offset >> 8);
- data[1] = (u8) offset;
r = i2c_transfer(client->adapter, &msg, 1);
if (r != 1) {
if (r >= 0)
@@ -90,6 +93,8 @@ static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg,
msg.len = len;
msg.flags = I2C_M_RD;
+ msg.buf = &data_buf[sizeof(data_buf) - len];
+
r = i2c_transfer(client->adapter, &msg, 1);
if (r != 1) {
if (r >= 0)
@@ -97,27 +102,12 @@ static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg,
goto err;
}
- *val = 0;
- /* high byte comes first */
- switch (len) {
- case SMIAPP_REG_32BIT:
- *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) +
- data[3];
- break;
- case SMIAPP_REG_16BIT:
- *val = (data[0] << 8) + data[1];
- break;
- case SMIAPP_REG_8BIT:
- *val = data[0];
- break;
- default:
- BUG();
- }
+ *val = get_unaligned_be32(data_buf);
return 0;
err:
- dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r);
+ dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r);
return r;
}
@@ -158,7 +148,7 @@ static int __smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val,
&& len != SMIAPP_REG_32BIT)
return -EINVAL;
- if (len == SMIAPP_REG_8BIT || !only8)
+ if (!only8)
rval = ____smiapp_read(sensor, SMIAPP_REG_ADDR(reg), len, val);
else
rval = ____smiapp_read_8only(sensor, SMIAPP_REG_ADDR(reg), len,
@@ -214,13 +204,10 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
struct i2c_msg msg;
unsigned char data[6];
unsigned int retries;
- u8 flags = SMIAPP_REG_FLAGS(reg);
u8 len = SMIAPP_REG_WIDTH(reg);
- u16 offset = SMIAPP_REG_ADDR(reg);
int r;
- if ((len != SMIAPP_REG_8BIT && len != SMIAPP_REG_16BIT &&
- len != SMIAPP_REG_32BIT) || flags)
+ if (len > sizeof(data) - 2)
return -EINVAL;
msg.addr = client->addr;
@@ -228,27 +215,8 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
msg.len = 2 + len;
msg.buf = data;
- /* high byte goes out first */
- data[0] = (u8) (reg >> 8);
- data[1] = (u8) (reg & 0xff);
-
- switch (len) {
- case SMIAPP_REG_8BIT:
- data[2] = val;
- break;
- case SMIAPP_REG_16BIT:
- data[2] = val >> 8;
- data[3] = val;
- break;
- case SMIAPP_REG_32BIT:
- data[2] = val >> 24;
- data[3] = val >> 16;
- data[4] = val >> 8;
- data[5] = val;
- break;
- default:
- BUG();
- }
+ put_unaligned_be16(SMIAPP_REG_ADDR(reg), data);
+ put_unaligned_be32(val << (8 * (sizeof(val) - len)), data + 2);
for (retries = 0; retries < 5; retries++) {
/*
@@ -269,7 +237,8 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
}
dev_err(&client->dev,
- "wrote 0x%x to offset 0x%x error %d\n", val, offset, r);
+ "wrote 0x%x to offset 0x%x error %d\n", val,
+ SMIAPP_REG_ADDR(reg), r);
return r;
}
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 4837d80dc453..6f469934f9e3 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
-#include <media/i2c/smiapp.h>
#include "smiapp-pll.h"
#include "smiapp-reg.h"
@@ -42,6 +41,49 @@
#define SMIAPP_COLOUR_COMPONENTS 4
+#define SMIAPP_NAME "smiapp"
+
+#define SMIAPP_DFL_I2C_ADDR (0x20 >> 1) /* Default I2C Address */
+#define SMIAPP_ALT_I2C_ADDR (0x6e >> 1) /* Alternate I2C Address */
+
+/*
+ * Sometimes due to board layout considerations the camera module can be
+ * mounted rotated. The typical rotation used is 180 degrees which can be
+ * corrected by giving a default H-FLIP and V-FLIP in the sensor readout.
+ * FIXME: rotation also changes the bayer pattern.
+ */
+enum smiapp_module_board_orient {
+ SMIAPP_MODULE_BOARD_ORIENT_0 = 0,
+ SMIAPP_MODULE_BOARD_ORIENT_180,
+};
+
+struct smiapp_flash_strobe_parms {
+ u8 mode;
+ u32 strobe_width_high_us;
+ u16 strobe_delay;
+ u16 stobe_start_point;
+ u8 trigger;
+};
+
+struct smiapp_hwconfig {
+ /*
+ * Change the cci address if i2c_addr_alt is set.
+ * Both default and alternate cci addr need to be present
+ */
+ unsigned short i2c_addr_dfl; /* Default i2c addr */
+ unsigned short i2c_addr_alt; /* Alternate i2c addr */
+
+ uint32_t ext_clk; /* sensor external clk */
+
+ unsigned int lanes; /* Number of CSI-2 lanes */
+ uint32_t csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
+ uint64_t *op_sys_clock;
+
+ enum smiapp_module_board_orient module_board_orient;
+
+ struct smiapp_flash_strobe_parms *strobe_setup;
+};
+
#include "smiapp-limits.h"
struct smiapp_quirk;
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index edad49cebcdf..eb39cf5ea089 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -13,12 +13,15 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
+#include <media/v4l2-rect.h>
#include "tvp5150_reg.h"
@@ -31,6 +34,15 @@
#define TVP5150_MBUS_FMT MEDIA_BUS_FMT_UYVY8_2X8
#define TVP5150_FIELD V4L2_FIELD_ALTERNATE
#define TVP5150_COLORSPACE V4L2_COLORSPACE_SMPTE170M
+#define TVP5150_STD_MASK (V4L2_STD_NTSC | \
+ V4L2_STD_NTSC_443 | \
+ V4L2_STD_PAL | \
+ V4L2_STD_PAL_M | \
+ V4L2_STD_PAL_N | \
+ V4L2_STD_PAL_Nc | \
+ V4L2_STD_SECAM)
+
+#define TVP5150_MAX_CONNECTORS 3 /* Check dt-bindings for more information */
MODULE_DESCRIPTION("Texas Instruments TVP5150A/TVP5150AM1/TVP5151 video decoder driver");
MODULE_AUTHOR("Mauro Carvalho Chehab");
@@ -44,18 +56,26 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
#define dprintk0(__dev, __arg...) dev_dbg_lvl(__dev, 0, 0, __arg)
enum tvp5150_pads {
- TVP5150_PAD_IF_INPUT,
+ TVP5150_PAD_AIP1A,
+ TVP5150_PAD_AIP1B,
TVP5150_PAD_VID_OUT,
TVP5150_NUM_PADS
};
+struct tvp5150_connector {
+ struct v4l2_fwnode_connector base;
+ struct media_entity ent;
+ struct media_pad pad;
+};
+
struct tvp5150 {
struct v4l2_subdev sd;
-#ifdef CONFIG_MEDIA_CONTROLLER
+
struct media_pad pads[TVP5150_NUM_PADS];
- struct media_entity input_ent[TVP5150_INPUT_NUM];
- struct media_pad input_pad[TVP5150_INPUT_NUM];
-#endif
+ struct tvp5150_connector connectors[TVP5150_MAX_CONNECTORS];
+ struct tvp5150_connector *cur_connector;
+ unsigned int connectors_num;
+
struct v4l2_ctrl_handler hdl;
struct v4l2_rect rect;
struct regmap *regmap;
@@ -282,9 +302,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
break;
}
- dev_dbg_lvl(sd->dev, 1, debug, "Selecting video route: route input=%i, output=%i => tvp5150 input=%i, opmode=%i\n",
- decoder->input, decoder->output,
- input, opmode);
+ dev_dbg_lvl(sd->dev, 1, debug,
+ "Selecting video route: route input=%s, output=%s => tvp5150 input=0x%02x, opmode=0x%02x\n",
+ decoder->input == 0 ? "aip1a" :
+ decoder->input == 2 ? "aip1b" : "svideo",
+ decoder->output == 0 ? "normal" : "black-frame-gen",
+ input, opmode);
regmap_write(decoder->regmap, TVP5150_OP_MODE_CTL, opmode);
regmap_write(decoder->regmap, TVP5150_VD_IN_SRC_SEL_1, input);
@@ -773,17 +796,33 @@ static int tvp5150_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
static int tvp5150_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct tvp5150 *decoder = to_tvp5150(sd);
+ struct tvp5150_connector *cur_con = decoder->cur_connector;
+ v4l2_std_id supported_stds;
if (decoder->norm == std)
return 0;
+ /* In case of no of-connectors are available no limitations are made */
+ if (!decoder->connectors_num)
+ supported_stds = V4L2_STD_ALL;
+ else
+ supported_stds = cur_con->base.connector.analog.sdtv_stds;
+
+ /*
+ * Check if requested std or group of std's is/are supported by the
+ * connector.
+ */
+ if ((supported_stds & std) == 0)
+ return -EINVAL;
+
/* Change cropping height limits */
if (std & V4L2_STD_525_60)
decoder->rect.height = TVP5150_V_MAX_525_60;
else
decoder->rect.height = TVP5150_V_MAX_OTHERS;
- decoder->norm = std;
+ /* Set only the specific supported std in case of group of std's. */
+ decoder->norm = supported_stds & std;
return tvp5150_set_std(sd, std);
}
@@ -986,6 +1025,25 @@ static void tvp5150_set_default(v4l2_std_id std, struct v4l2_rect *crop)
crop->height = TVP5150_V_MAX_OTHERS;
}
+static struct v4l2_rect *
+tvp5150_get_pad_crop(struct tvp5150 *decoder,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &decoder->rect;
+ case V4L2_SUBDEV_FORMAT_TRY:
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ return v4l2_subdev_get_try_crop(&decoder->sd, cfg, pad);
+#else
+ return ERR_PTR(-EINVAL);
+#endif
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
@@ -1010,25 +1068,10 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int tvp5150_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
+static unsigned int tvp5150_get_hmax(struct v4l2_subdev *sd)
{
struct tvp5150 *decoder = to_tvp5150(sd);
- struct v4l2_rect rect = sel->r;
v4l2_std_id std;
- int hmax;
-
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
- sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- dev_dbg_lvl(sd->dev, 1, debug, "%s left=%d, top=%d, width=%d, height=%d\n",
- __func__, rect.left, rect.top, rect.width, rect.height);
-
- /* tvp5150 has some special limits */
- rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
- rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
/* Calculate height based on current standard */
if (decoder->norm == V4L2_STD_ALL)
@@ -1036,36 +1079,78 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
else
std = decoder->norm;
- if (std & V4L2_STD_525_60)
- hmax = TVP5150_V_MAX_525_60;
- else
- hmax = TVP5150_V_MAX_OTHERS;
+ return (std & V4L2_STD_525_60) ?
+ TVP5150_V_MAX_525_60 : TVP5150_V_MAX_OTHERS;
+}
- /*
- * alignments:
- * - width = 2 due to UYVY colorspace
- * - height, image = no special alignment
- */
- v4l_bound_align_image(&rect.width,
- TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
- TVP5150_H_MAX - rect.left, 1, &rect.height,
- hmax - TVP5150_MAX_CROP_TOP - rect.top,
- hmax - rect.top, 0, 0);
+static void tvp5150_set_hw_selection(struct v4l2_subdev *sd,
+ struct v4l2_rect *rect)
+{
+ struct tvp5150 *decoder = to_tvp5150(sd);
+ unsigned int hmax = tvp5150_get_hmax(sd);
- regmap_write(decoder->regmap, TVP5150_VERT_BLANKING_START, rect.top);
+ regmap_write(decoder->regmap, TVP5150_VERT_BLANKING_START, rect->top);
regmap_write(decoder->regmap, TVP5150_VERT_BLANKING_STOP,
- rect.top + rect.height - hmax);
+ rect->top + rect->height - hmax);
regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_ST_MSB,
- rect.left >> TVP5150_CROP_SHIFT);
+ rect->left >> TVP5150_CROP_SHIFT);
regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_ST_LSB,
- rect.left | (1 << TVP5150_CROP_SHIFT));
+ rect->left | (1 << TVP5150_CROP_SHIFT));
regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_STP_MSB,
- (rect.left + rect.width - TVP5150_MAX_CROP_LEFT) >>
+ (rect->left + rect->width - TVP5150_MAX_CROP_LEFT) >>
TVP5150_CROP_SHIFT);
regmap_write(decoder->regmap, TVP5150_ACT_VD_CROP_STP_LSB,
- rect.left + rect.width - TVP5150_MAX_CROP_LEFT);
+ rect->left + rect->width - TVP5150_MAX_CROP_LEFT);
+}
+
+static int tvp5150_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct tvp5150 *decoder = to_tvp5150(sd);
+ struct v4l2_rect *rect = &sel->r;
+ struct v4l2_rect *crop;
+ unsigned int hmax;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ dev_dbg_lvl(sd->dev, 1, debug, "%s left=%d, top=%d, width=%d, height=%d\n",
+ __func__, rect->left, rect->top, rect->width, rect->height);
+
+ /* tvp5150 has some special limits */
+ rect->left = clamp(rect->left, 0, TVP5150_MAX_CROP_LEFT);
+ rect->top = clamp(rect->top, 0, TVP5150_MAX_CROP_TOP);
+ hmax = tvp5150_get_hmax(sd);
+
+ /*
+ * alignments:
+ * - width = 2 due to UYVY colorspace
+ * - height, image = no special alignment
+ */
+ v4l_bound_align_image(&rect->width,
+ TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect->left,
+ TVP5150_H_MAX - rect->left, 1, &rect->height,
+ hmax - TVP5150_MAX_CROP_TOP - rect->top,
+ hmax - rect->top, 0, 0);
+
+ if (!IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API) &&
+ sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad, sel->which);
+ if (IS_ERR(crop))
+ return PTR_ERR(crop);
+
+ /*
+ * Update output image size if the selection (crop) rectangle size or
+ * position has been modified.
+ */
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE &&
+ !v4l2_rect_equal(rect, crop))
+ tvp5150_set_hw_selection(sd, rect);
- decoder->rect = rect;
+ *crop = *rect;
return 0;
}
@@ -1075,11 +1160,9 @@ static int tvp5150_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_selection *sel)
{
struct tvp5150 *decoder = container_of(sd, struct tvp5150, sd);
+ struct v4l2_rect *crop;
v4l2_std_id std;
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.left = 0;
@@ -1097,7 +1180,11 @@ static int tvp5150_get_selection(struct v4l2_subdev *sd,
sel->r.height = TVP5150_V_MAX_OTHERS;
return 0;
case V4L2_SEL_TGT_CROP:
- sel->r = decoder->rect;
+ crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad,
+ sel->which);
+ if (IS_ERR(crop))
+ return PTR_ERR(crop);
+ sel->r = *crop;
return 0;
default:
return -EINVAL;
@@ -1170,30 +1257,148 @@ static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
}
/****************************************************************************
- Media entity ops
+ * Media entity ops
****************************************************************************/
+#if defined(CONFIG_MEDIA_CONTROLLER)
+static int tvp5150_set_link(struct media_pad *connector_pad,
+ struct media_pad *tvp5150_pad, u32 flags)
+{
+ struct media_link *link;
+
+ link = media_entity_find_link(connector_pad, tvp5150_pad);
+ if (!link)
+ return -EINVAL;
+
+ link->flags = flags;
+ link->reverse->flags = link->flags;
+
+ return 0;
+}
+
+static int tvp5150_disable_all_input_links(struct tvp5150 *decoder)
+{
+ struct media_pad *connector_pad;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < TVP5150_NUM_PADS - 1; i++) {
+ connector_pad = media_entity_remote_pad(&decoder->pads[i]);
+ if (!connector_pad)
+ continue;
+
+ err = tvp5150_set_link(connector_pad, &decoder->pads[i], 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tvp5150_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
+ u32 config);
-#ifdef CONFIG_MEDIA_CONTROLLER
static int tvp5150_link_setup(struct media_entity *entity,
- const struct media_pad *local,
+ const struct media_pad *tvp5150_pad,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct tvp5150 *decoder = to_tvp5150(sd);
- int i;
+ struct media_pad *other_tvp5150_pad =
+ &decoder->pads[tvp5150_pad->index ^ 1];
+ struct v4l2_fwnode_connector *v4l2c;
+ bool is_svideo = false;
+ unsigned int i;
+ int err;
+
+ /*
+ * The TVP5150 state is determined by the enabled sink pad link(s).
+ * Enabling or disabling the source pad link has no effect.
+ */
+ if (tvp5150_pad->flags & MEDIA_PAD_FL_SOURCE)
+ return 0;
- for (i = 0; i < TVP5150_INPUT_NUM; i++) {
- if (remote->entity == &decoder->input_ent[i])
+ /* Check if the svideo connector should be enabled */
+ for (i = 0; i < decoder->connectors_num; i++) {
+ if (remote->entity == &decoder->connectors[i].ent) {
+ v4l2c = &decoder->connectors[i].base;
+ is_svideo = v4l2c->type == V4L2_CONN_SVIDEO;
break;
+ }
}
- /* Do nothing for entities that are not input connectors */
- if (i == TVP5150_INPUT_NUM)
- return 0;
+ dev_dbg_lvl(sd->dev, 1, debug, "link setup '%s':%d->'%s':%d[%d]",
+ remote->entity->name, remote->index,
+ tvp5150_pad->entity->name, tvp5150_pad->index,
+ flags & MEDIA_LNK_FL_ENABLED);
+ if (is_svideo)
+ dev_dbg_lvl(sd->dev, 1, debug,
+ "link setup '%s':%d->'%s':%d[%d]",
+ remote->entity->name, remote->index,
+ other_tvp5150_pad->entity->name,
+ other_tvp5150_pad->index,
+ flags & MEDIA_LNK_FL_ENABLED);
- decoder->input = i;
+ /*
+ * The TVP5150 has an internal mux which allows the following setup:
+ *
+ * comp-connector1 --\
+ * |---> AIP1A
+ * /
+ * svideo-connector -|
+ * \
+ * |---> AIP1B
+ * comp-connector2 --/
+ *
+ * We can't rely on user space that the current connector gets disabled
+ * first before enabling the new connector. Disable all active
+ * connector links to be on the safe side.
+ */
+ err = tvp5150_disable_all_input_links(decoder);
+ if (err)
+ return err;
+
+ tvp5150_s_routing(sd, is_svideo ? TVP5150_SVIDEO : tvp5150_pad->index,
+ flags & MEDIA_LNK_FL_ENABLED ? TVP5150_NORMAL :
+ TVP5150_BLACK_SCREEN, 0);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ struct v4l2_fwnode_connector_analog *v4l2ca;
+ u32 new_norm;
+
+ /*
+ * S-Video connector is conneted to both ports AIP1A and AIP1B.
+ * Both links must be enabled in one-shot regardless which link
+ * the user requests.
+ */
+ if (is_svideo) {
+ err = tvp5150_set_link((struct media_pad *)remote,
+ other_tvp5150_pad, flags);
+ if (err)
+ return err;
+ }
- tvp5150_selmux(sd);
+ if (!decoder->connectors_num)
+ return 0;
+
+ /* Update the current connector */
+ decoder->cur_connector =
+ container_of(remote, struct tvp5150_connector, pad);
+
+ /*
+ * Do nothing if the new connector supports the same tv-norms as
+ * the old one.
+ */
+ v4l2ca = &decoder->cur_connector->base.connector.analog;
+ new_norm = decoder->norm & v4l2ca->sdtv_stds;
+ if (decoder->norm == new_norm)
+ return 0;
+
+ /*
+ * Fallback to the new connector tv-norms if we can't find any
+ * common between the current tv-norm and the new one.
+ */
+ tvp5150_s_std(sd, new_norm ? new_norm : v4l2ca->sdtv_stds);
+ }
return 0;
}
@@ -1202,20 +1407,54 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
.link_setup = tvp5150_link_setup,
};
#endif
-
/****************************************************************************
I2C Command
****************************************************************************/
+static int __maybe_unused tvp5150_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tvp5150 *decoder = to_tvp5150(sd);
+
+ if (decoder->irq)
+ /* Disable lock interrupt */
+ return regmap_update_bits(decoder->regmap,
+ TVP5150_INT_ENABLE_REG_A,
+ TVP5150_INT_A_LOCK, 0);
+ return 0;
+}
+
+static int __maybe_unused tvp5150_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tvp5150 *decoder = to_tvp5150(sd);
+
+ if (decoder->irq)
+ /* Enable lock interrupt */
+ return regmap_update_bits(decoder->regmap,
+ TVP5150_INT_ENABLE_REG_A,
+ TVP5150_INT_A_LOCK,
+ TVP5150_INT_A_LOCK);
+ return 0;
+}
static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
{
struct tvp5150 *decoder = to_tvp5150(sd);
- unsigned int mask, val = 0, int_val = 0;
+ unsigned int mask, val = 0;
+ int ret;
mask = TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
TVP5150_MISC_CTL_CLOCK_OE;
if (enable) {
+ ret = pm_runtime_get_sync(sd->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sd->dev);
+ return ret;
+ }
+
tvp5150_enable(sd);
/* Enable outputs if decoder is locked */
@@ -1223,15 +1462,13 @@ static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
val = decoder->lock ? decoder->oe : 0;
else
val = decoder->oe;
- int_val = TVP5150_INT_A_LOCK;
+
v4l2_subdev_notify_event(&decoder->sd, &tvp5150_ev_fmt);
+ } else {
+ pm_runtime_put(sd->dev);
}
regmap_update_bits(decoder->regmap, TVP5150_MISC_CTL, mask, val);
- if (decoder->irq)
- /* Enable / Disable lock interrupt */
- regmap_update_bits(decoder->regmap, TVP5150_INT_ENABLE_REG_A,
- TVP5150_INT_A_LOCK, int_val);
return 0;
}
@@ -1342,6 +1579,19 @@ static int tvp5150_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regi
}
#endif
+static int tvp5150_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
int status = tvp5150_read(sd, 0x88);
@@ -1352,40 +1602,96 @@ static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
static int tvp5150_registered(struct v4l2_subdev *sd)
{
-#ifdef CONFIG_MEDIA_CONTROLLER
+#if defined(CONFIG_MEDIA_CONTROLLER)
struct tvp5150 *decoder = to_tvp5150(sd);
- int ret = 0;
- int i;
-
- for (i = 0; i < TVP5150_INPUT_NUM; i++) {
- struct media_entity *input = &decoder->input_ent[i];
- struct media_pad *pad = &decoder->input_pad[i];
-
- if (!input->name)
- continue;
+ unsigned int i;
+ int ret;
- decoder->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+ /*
+ * Setup connector pads and links. Enable the link to the first
+ * available connector per default.
+ */
+ for (i = 0; i < decoder->connectors_num; i++) {
+ struct media_entity *con = &decoder->connectors[i].ent;
+ struct media_pad *pad = &decoder->connectors[i].pad;
+ struct v4l2_fwnode_connector *v4l2c =
+ &decoder->connectors[i].base;
+ struct v4l2_connector_link *link =
+ v4l2_connector_first_link(v4l2c);
+ unsigned int port = link->fwnode_link.remote_port;
+ unsigned int flags = i ? 0 : MEDIA_LNK_FL_ENABLED;
+ bool is_svideo = v4l2c->type == V4L2_CONN_SVIDEO;
+
+ pad->flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(con, 1, pad);
+ if (ret < 0)
+ goto err;
- ret = media_entity_pads_init(input, 1, pad);
+ ret = media_device_register_entity(sd->v4l2_dev->mdev, con);
if (ret < 0)
- return ret;
+ goto err;
- ret = media_device_register_entity(sd->v4l2_dev->mdev, input);
+ ret = media_create_pad_link(con, 0, &sd->entity, port, flags);
if (ret < 0)
- return ret;
+ goto err;
- ret = media_create_pad_link(input, 0, &sd->entity,
- TVP5150_PAD_IF_INPUT, 0);
- if (ret < 0) {
- media_device_unregister_entity(input);
- return ret;
+ if (is_svideo) {
+ /*
+ * Check tvp5150_link_setup() comments for more
+ * information.
+ */
+ link = v4l2_connector_last_link(v4l2c);
+ port = link->fwnode_link.remote_port;
+ ret = media_create_pad_link(con, 0, &sd->entity, port,
+ flags);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* Enable default input. */
+ if (flags == MEDIA_LNK_FL_ENABLED) {
+ decoder->input =
+ is_svideo ? TVP5150_SVIDEO :
+ port == 0 ? TVP5150_COMPOSITE0 :
+ TVP5150_COMPOSITE1;
+
+ tvp5150_selmux(sd);
+ decoder->cur_connector = &decoder->connectors[i];
+ tvp5150_s_std(sd, v4l2c->connector.analog.sdtv_stds);
}
}
+
+ return 0;
+
+err:
+ for (i = 0; i < decoder->connectors_num; i++)
+ media_device_unregister_entity(&decoder->connectors[i].ent);
+ return ret;
#endif
return 0;
}
+static int tvp5150_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(sd->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sd->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tvp5150_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ pm_runtime_put(sd->dev);
+
+ return 0;
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops tvp5150_ctrl_ops = {
@@ -1399,6 +1705,8 @@ static const struct v4l2_subdev_core_ops tvp5150_core_ops = {
.g_register = tvp5150_g_register,
.s_register = tvp5150_s_register,
#endif
+ .subscribe_event = tvp5150_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
@@ -1441,6 +1749,8 @@ static const struct v4l2_subdev_ops tvp5150_ops = {
static const struct v4l2_subdev_internal_ops tvp5150_internal_ops = {
.registered = tvp5150_registered,
+ .open = tvp5150_open,
+ .close = tvp5150_close,
};
/****************************************************************************
@@ -1591,102 +1901,211 @@ static int tvp5150_init(struct i2c_client *c)
return 0;
}
-static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
+#if defined(CONFIG_MEDIA_CONTROLLER)
+static int tvp5150_mc_init(struct tvp5150 *decoder)
{
- struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
- struct device_node *ep;
-#ifdef CONFIG_MEDIA_CONTROLLER
- struct device_node *connectors, *child;
- struct media_entity *input;
- const char *name;
- u32 input_type;
-#endif
- unsigned int flags;
- int ret = 0;
+ struct v4l2_subdev *sd = &decoder->sd;
+ unsigned int i;
- ep = of_graph_get_next_endpoint(np, NULL);
- if (!ep)
- return -EINVAL;
+ sd->entity.ops = &tvp5150_sd_media_ops;
+ sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
- ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg);
- if (ret)
- goto err;
+ for (i = 0; i < TVP5150_NUM_PADS - 1; i++) {
+ decoder->pads[i].flags = MEDIA_PAD_FL_SINK;
+ decoder->pads[i].sig_type = PAD_SIGNAL_ANALOG;
+ }
- flags = bus_cfg.bus.parallel.flags;
+ decoder->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+ decoder->pads[i].sig_type = PAD_SIGNAL_DV;
- if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL &&
- !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH &&
- flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH &&
- flags & V4L2_MBUS_FIELD_EVEN_LOW)) {
- ret = -EINVAL;
- goto err;
- }
+ return media_entity_pads_init(&sd->entity, TVP5150_NUM_PADS,
+ decoder->pads);
+}
- decoder->mbus_type = bus_cfg.bus_type;
+#else /* !defined(CONFIG_MEDIA_CONTROLLER) */
-#ifdef CONFIG_MEDIA_CONTROLLER
- connectors = of_get_child_by_name(np, "connectors");
+static inline int tvp5150_mc_init(struct tvp5150 *decoder)
+{
+ return 0;
+}
+#endif /* defined(CONFIG_MEDIA_CONTROLLER) */
- if (!connectors)
- goto err;
+static int tvp5150_validate_connectors(struct tvp5150 *decoder)
+{
+ struct device *dev = decoder->sd.dev;
+ struct tvp5150_connector *tvpc;
+ struct v4l2_fwnode_connector *v4l2c;
+ unsigned int i;
+
+ if (!decoder->connectors_num) {
+ dev_err(dev, "No valid connector found\n");
+ return -ENODEV;
+ }
- for_each_available_child_of_node(connectors, child) {
- ret = of_property_read_u32(child, "input", &input_type);
- if (ret) {
- dev_err(decoder->sd.dev,
- "missing type property in node %pOFn\n",
- child);
- of_node_put(child);
- goto err_connector;
+ for (i = 0; i < decoder->connectors_num; i++) {
+ struct v4l2_connector_link *link0 = NULL;
+ struct v4l2_connector_link *link1;
+
+ tvpc = &decoder->connectors[i];
+ v4l2c = &tvpc->base;
+
+ if (v4l2c->type == V4L2_CONN_COMPOSITE) {
+ if (v4l2c->nr_of_links != 1) {
+ dev_err(dev, "Composite: connector needs 1 link\n");
+ return -EINVAL;
+ }
+ link0 = v4l2_connector_first_link(v4l2c);
+ if (!link0) {
+ dev_err(dev, "Composite: invalid first link\n");
+ return -EINVAL;
+ }
+ if (link0->fwnode_link.remote_id == 1) {
+ dev_err(dev, "Composite: invalid endpoint id\n");
+ return -EINVAL;
+ }
}
- if (input_type >= TVP5150_INPUT_NUM) {
- ret = -EINVAL;
- of_node_put(child);
- goto err_connector;
+ if (v4l2c->type == V4L2_CONN_SVIDEO) {
+ if (v4l2c->nr_of_links != 2) {
+ dev_err(dev, "SVideo: connector needs 2 links\n");
+ return -EINVAL;
+ }
+ link0 = v4l2_connector_first_link(v4l2c);
+ if (!link0) {
+ dev_err(dev, "SVideo: invalid first link\n");
+ return -EINVAL;
+ }
+ link1 = v4l2_connector_last_link(v4l2c);
+ if (link0->fwnode_link.remote_port ==
+ link1->fwnode_link.remote_port) {
+ dev_err(dev, "SVideo: invalid link setup\n");
+ return -EINVAL;
+ }
}
- input = &decoder->input_ent[input_type];
+ if (!(v4l2c->connector.analog.sdtv_stds & TVP5150_STD_MASK)) {
+ dev_err(dev, "Unsupported tv-norm on connector %s\n",
+ v4l2c->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
+{
+ struct device *dev = decoder->sd.dev;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_UNKNOWN
+ };
+ struct device_node *ep_np;
+ struct tvp5150_connector *tvpc;
+ struct v4l2_fwnode_connector *v4l2c;
+ unsigned int flags, ep_num;
+ unsigned int i;
+ int ret;
+
+ /* At least 1 output and 1 input */
+ ep_num = of_graph_get_endpoint_count(np);
+ if (ep_num < 2 || ep_num > 5) {
+ dev_err(dev, "At least 1 input and 1 output must be connected to the device.\n");
+ return -EINVAL;
+ }
- /* Each input connector can only be defined once */
- if (input->name) {
- dev_err(decoder->sd.dev,
- "input %s with same type already exists\n",
- input->name);
- of_node_put(child);
- ret = -EINVAL;
- goto err_connector;
+ /* Layout if all connectors are used:
+ *
+ * tvp-5150 port@0 (AIP1A)
+ * endpoint@0 -----------> Comp0-Con port
+ * endpoint@1 --------+--> Svideo-Con port
+ * tvp-5150 port@1 (AIP1B) |
+ * endpoint@1 --------+
+ * endpoint@0 -----------> Comp1-Con port
+ * tvp-5150 port@2
+ * endpoint (video bitstream output at YOUT[0-7] parallel bus)
+ */
+ for_each_endpoint_of_node(np, ep_np) {
+ struct fwnode_handle *ep_fwnode = of_fwnode_handle(ep_np);
+ unsigned int next_connector = decoder->connectors_num;
+ struct of_endpoint ep;
+
+ of_graph_parse_endpoint(ep_np, &ep);
+ if (ep.port > 1 || ep.id > 1) {
+ dev_dbg(dev, "Ignore connector on port@%u/ep@%u\n",
+ ep.port, ep.id);
+ continue;
}
- switch (input_type) {
- case TVP5150_COMPOSITE0:
- case TVP5150_COMPOSITE1:
- input->function = MEDIA_ENT_F_CONN_COMPOSITE;
- break;
- case TVP5150_SVIDEO:
- input->function = MEDIA_ENT_F_CONN_SVIDEO;
- break;
+ tvpc = &decoder->connectors[next_connector];
+ v4l2c = &tvpc->base;
+
+ if (ep.port == 0 || (ep.port == 1 && ep.id == 0)) {
+ ret = v4l2_fwnode_connector_parse(ep_fwnode, v4l2c);
+ if (ret)
+ goto err_put;
+ ret = v4l2_fwnode_connector_add_link(ep_fwnode, v4l2c);
+ if (ret)
+ goto err_put;
+ decoder->connectors_num++;
+ } else {
+ /* Adding the 2nd svideo link */
+ for (i = 0; i < TVP5150_MAX_CONNECTORS; i++) {
+ tvpc = &decoder->connectors[i];
+ v4l2c = &tvpc->base;
+ if (v4l2c->type == V4L2_CONN_SVIDEO)
+ break;
+ }
+
+ ret = v4l2_fwnode_connector_add_link(ep_fwnode, v4l2c);
+ if (ret)
+ goto err_put;
}
+ }
- input->flags = MEDIA_ENT_FL_CONNECTOR;
+ ret = tvp5150_validate_connectors(decoder);
+ if (ret)
+ goto err_free;
+
+ for (i = 0; i < decoder->connectors_num; i++) {
+ tvpc = &decoder->connectors[i];
+ v4l2c = &tvpc->base;
+ tvpc->ent.flags = MEDIA_ENT_FL_CONNECTOR;
+ tvpc->ent.function = v4l2c->type == V4L2_CONN_SVIDEO ?
+ MEDIA_ENT_F_CONN_SVIDEO : MEDIA_ENT_F_CONN_COMPOSITE;
+ tvpc->ent.name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ v4l2c->name, v4l2c->label ?
+ v4l2c->label : "");
+ }
- ret = of_property_read_string(child, "label", &name);
- if (ret < 0) {
- dev_err(decoder->sd.dev,
- "missing label property in node %pOFn\n",
- child);
- of_node_put(child);
- goto err_connector;
- }
+ ep_np = of_graph_get_endpoint_by_regs(np, TVP5150_PAD_VID_OUT, 0);
+ if (!ep_np) {
+ dev_err(dev, "Error no output endpoint available\n");
+ goto err_free;
+ }
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_np), &bus_cfg);
+ of_node_put(ep_np);
+ if (ret)
+ goto err_free;
- input->name = name;
+ flags = bus_cfg.bus.parallel.flags;
+ if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL &&
+ !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH &&
+ flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH &&
+ flags & V4L2_MBUS_FIELD_EVEN_LOW)) {
+ ret = -EINVAL;
+ goto err_free;
}
-err_connector:
- of_node_put(connectors);
-#endif
-err:
- of_node_put(ep);
+ decoder->mbus_type = bus_cfg.bus_type;
+
+ return 0;
+
+err_put:
+ of_node_put(ep_np);
+err_free:
+ for (i = 0; i < TVP5150_MAX_CONNECTORS; i++)
+ v4l2_fwnode_connector_free(&decoder->connectors[i].base);
+
return ret;
}
@@ -1701,6 +2120,7 @@ static int tvp5150_probe(struct i2c_client *c)
struct v4l2_subdev *sd;
struct device_node *np = c->dev.of_node;
struct regmap *map;
+ unsigned int i;
int res;
/* Check if the adapter supports the needed features */
@@ -1722,6 +2142,9 @@ static int tvp5150_probe(struct i2c_client *c)
core->regmap = map;
sd = &core->sd;
+ v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
+ sd->internal_ops = &tvp5150_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
if (IS_ENABLED(CONFIG_OF) && np) {
res = tvp5150_parse_dt(core, np);
@@ -1734,30 +2157,29 @@ static int tvp5150_probe(struct i2c_client *c)
core->mbus_type = V4L2_MBUS_BT656;
}
- v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
- sd->internal_ops = &tvp5150_internal_ops;
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-#if defined(CONFIG_MEDIA_CONTROLLER)
- core->pads[TVP5150_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
- core->pads[TVP5150_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
- core->pads[TVP5150_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
- core->pads[TVP5150_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV;
-
- sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
-
- res = media_entity_pads_init(&sd->entity, TVP5150_NUM_PADS, core->pads);
- if (res < 0)
+ res = tvp5150_mc_init(core);
+ if (res)
return res;
- sd->entity.ops = &tvp5150_sd_media_ops;
-#endif
-
res = tvp5150_detect_version(core);
if (res < 0)
return res;
- core->norm = V4L2_STD_ALL; /* Default is autodetect */
+ /*
+ * Iterate over all available connectors in case they are supported and
+ * successfully parsed. Fallback to default autodetect in case they
+ * aren't supported.
+ */
+ for (i = 0; i < core->connectors_num; i++) {
+ struct v4l2_fwnode_connector *v4l2c;
+
+ v4l2c = &core->connectors[i].base;
+ core->norm |= v4l2c->connector.analog.sdtv_stds;
+ }
+
+ if (!core->connectors_num)
+ core->norm = V4L2_STD_ALL;
+
core->detected_norm = V4L2_STD_UNKNOWN;
core->input = TVP5150_COMPOSITE1;
core->enable = true;
@@ -1802,6 +2224,11 @@ static int tvp5150_probe(struct i2c_client *c)
if (debug > 1)
tvp5150_log_status(sd);
+
+ pm_runtime_set_active(&c->dev);
+ pm_runtime_enable(&c->dev);
+ pm_runtime_idle(&c->dev);
+
return 0;
err:
@@ -1813,18 +2240,32 @@ static int tvp5150_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct tvp5150 *decoder = to_tvp5150(sd);
+ unsigned int i;
dev_dbg_lvl(sd->dev, 1, debug,
"tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
c->addr << 1);
+ for (i = 0; i < decoder->connectors_num; i++)
+ v4l2_fwnode_connector_free(&decoder->connectors[i].base);
+ for (i = 0; i < decoder->connectors_num; i++)
+ media_device_unregister_entity(&decoder->connectors[i].ent);
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
+ pm_runtime_disable(&c->dev);
+ pm_runtime_set_suspended(&c->dev);
+
return 0;
}
/* ----------------------------------------------------------------------- */
+static const struct dev_pm_ops tvp5150_pm_ops = {
+ SET_RUNTIME_PM_OPS(tvp5150_runtime_suspend,
+ tvp5150_runtime_resume,
+ NULL)
+};
+
static const struct i2c_device_id tvp5150_id[] = {
{ "tvp5150", 0 },
{ }
@@ -1843,6 +2284,7 @@ static struct i2c_driver tvp5150_driver = {
.driver = {
.of_match_table = of_match_ptr(tvp5150_of_match),
.name = "tvp5150",
+ .pm = &tvp5150_pm_ops,
},
.probe_new = tvp5150_probe,
.remove = tvp5150_remove,
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 078141712c88..0465832a4090 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -255,7 +255,7 @@ static int amg88xx_set_power(struct video_i2c_data *data, bool on)
return amg88xx_set_power_off(data);
}
-#if IS_ENABLED(CONFIG_HWMON)
+#if IS_REACHABLE(CONFIG_HWMON)
static const u32 amg88xx_temp_config[] = {
HWMON_T_INPUT,
@@ -858,7 +858,7 @@ static int video_i2c_probe(struct i2c_client *client,
}
}
- ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&data->vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0)
goto error_pm_disable;
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 668770e9f609..211279c5fd77 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -662,9 +662,14 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
struct media_link *link;
struct media_link *backlink;
- BUG_ON(source == NULL || sink == NULL);
- BUG_ON(source_pad >= source->num_pads);
- BUG_ON(sink_pad >= sink->num_pads);
+ if (WARN_ON(!source || !sink) ||
+ WARN_ON(source_pad >= source->num_pads) ||
+ WARN_ON(sink_pad >= sink->num_pads))
+ return -EINVAL;
+ if (WARN_ON(!(source->pads[source_pad].flags & MEDIA_PAD_FL_SOURCE)))
+ return -EINVAL;
+ if (WARN_ON(!(sink->pads[sink_pad].flags & MEDIA_PAD_FL_SINK)))
+ return -EINVAL;
link = media_add_link(&source->links);
if (link == NULL)
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index a359da7773a9..9144f795fb93 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2964,7 +2964,7 @@ static int bttv_open(struct file *file)
dprintk("open dev=%s\n", video_device_node_name(vdev));
- if (vdev->vfl_type == VFL_TYPE_GRABBER) {
+ if (vdev->vfl_type == VFL_TYPE_VIDEO) {
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
} else if (vdev->vfl_type == VFL_TYPE_VBI) {
type = V4L2_BUF_TYPE_VBI_CAPTURE;
@@ -3905,7 +3905,7 @@ static int bttv_register_video(struct bttv *btv)
if (no_overlay <= 0)
btv->video_dev.device_caps |= V4L2_CAP_VIDEO_OVERLAY;
- if (video_register_device(&btv->video_dev, VFL_TYPE_GRABBER,
+ if (video_register_device(&btv->video_dev, VFL_TYPE_VIDEO,
video_nr[btv->c.nr]) < 0)
goto err;
pr_info("%d: registered device %s\n",
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index c5207501d5e0..0ff37496c9ab 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -1272,7 +1272,7 @@ static int cobalt_node_register(struct cobalt *cobalt, int node)
video_set_drvdata(vdev, s);
ret = vb2_queue_init(q);
if (!s->is_audio && ret == 0)
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
else if (!s->is_dummy)
ret = cobalt_alsa_init(s);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index b79718519b9b..3178df3c4922 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -48,19 +48,19 @@ static struct {
} cx18_stream_info[] = {
{ /* CX18_ENC_STREAM_TYPE_MPG */
"encoder MPEG",
- VFL_TYPE_GRABBER, 0,
+ VFL_TYPE_VIDEO, 0,
PCI_DMA_FROMDEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_TS */
"TS",
- VFL_TYPE_GRABBER, -1,
+ VFL_TYPE_VIDEO, -1,
PCI_DMA_FROMDEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_YUV */
"encoder YUV",
- VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET,
+ VFL_TYPE_VIDEO, CX18_V4L2_ENC_YUV_OFFSET,
PCI_DMA_FROMDEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER
@@ -74,13 +74,13 @@ static struct {
},
{ /* CX18_ENC_STREAM_TYPE_PCM */
"encoder PCM audio",
- VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
+ VFL_TYPE_VIDEO, CX18_V4L2_ENC_PCM_OFFSET,
PCI_DMA_FROMDEVICE,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
},
{ /* CX18_ENC_STREAM_TYPE_IDX */
"encoder IDX",
- VFL_TYPE_GRABBER, -1,
+ VFL_TYPE_VIDEO, -1,
PCI_DMA_FROMDEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_RAD */
@@ -434,7 +434,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
name = video_device_node_name(&s->video_dev);
switch (vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
CX18_INFO("Registered device %s for %s (%d x %d.%02d kB)\n",
name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type] / 1024,
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 2327fe612610..434677bd4ad1 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1545,7 +1545,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
if (dev->tuner_type != TUNER_ABSENT)
dev->v4l_device->device_caps |= V4L2_CAP_TUNER;
err = video_register_device(dev->v4l_device,
- VFL_TYPE_GRABBER, -1);
+ VFL_TYPE_VIDEO, -1);
if (err < 0) {
pr_info("%s: can't register mpeg device\n", dev->name);
return err;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 7fc408ee4934..000c108b94fd 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -1304,7 +1304,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
dev->video_dev->device_caps |= V4L2_CAP_TUNER;
- err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
+ err = video_register_device(dev->video_dev, VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0) {
pr_info("%s: can't register video device\n",
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index a10261da0db6..1b80c990cb94 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -757,7 +757,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
snprintf(vdev->name, sizeof(vdev->name), "%s #%d", dev->name, i);
video_set_drvdata(vdev, chan);
- err = video_register_device(vdev, VFL_TYPE_GRABBER,
+ err = video_register_device(vdev, VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0)
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index d3da7f4297af..fa4ca002ed19 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1138,7 +1138,7 @@ static int blackbird_register_video(struct cx8802_dev *dev)
V4L2_CAP_VIDEO_CAPTURE;
if (dev->core->board.tuner_type != UNSET)
dev->mpeg_dev.device_caps |= V4L2_CAP_TUNER;
- err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&dev->mpeg_dev, VFL_TYPE_VIDEO, -1);
if (err < 0) {
pr_info("can't register mpeg device\n");
return err;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index b8abcd550604..6aabc45aa93c 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1451,7 +1451,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
V4L2_CAP_VIDEO_CAPTURE;
if (core->board.tuner_type != UNSET)
dev->video_dev.device_caps |= V4L2_CAP_TUNER;
- err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER,
+ err = video_register_device(&dev->video_dev, VFL_TYPE_VIDEO,
video_nr[core->nr]);
if (err < 0) {
pr_err("can't register video device\n");
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 7480f0d3ad0f..82581aa5a2a3 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -550,7 +550,7 @@ static int dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
IRQF_SHARED, DT3155_NAME, pd);
if (err)
goto err_iounmap;
- err = video_register_device(&pd->vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&pd->vdev, VFL_TYPE_VIDEO, -1);
if (err)
goto err_free_irq;
dev_info(&pdev->dev, "/dev/video%i is ready\n", pd->vdev.minor);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 1adfdc7ab0db..92f5eadf2c99 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1647,7 +1647,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vdev->queue = &q->vbq;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
video_set_drvdata(vdev, cio2);
- r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
dev_err(&cio2->pci_dev->dev,
"failed to register video device (%d)\n", r);
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index f7de9118f609..f04ee84bab5f 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -99,7 +99,7 @@ static struct {
} ivtv_stream_info[] = {
{ /* IVTV_ENC_STREAM_TYPE_MPG */
"encoder MPG",
- VFL_TYPE_GRABBER, 0,
+ VFL_TYPE_VIDEO, 0,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
@@ -107,7 +107,7 @@ static struct {
},
{ /* IVTV_ENC_STREAM_TYPE_YUV */
"encoder YUV",
- VFL_TYPE_GRABBER, IVTV_V4L2_ENC_YUV_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_ENC_YUV_OFFSET,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
@@ -123,7 +123,7 @@ static struct {
},
{ /* IVTV_ENC_STREAM_TYPE_PCM */
"encoder PCM",
- VFL_TYPE_GRABBER, IVTV_V4L2_ENC_PCM_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_ENC_PCM_OFFSET,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -137,7 +137,7 @@ static struct {
},
{ /* IVTV_DEC_STREAM_TYPE_MPG */
"decoder MPG",
- VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_DEC_MPG_OFFSET,
PCI_DMA_TODEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
@@ -158,7 +158,7 @@ static struct {
},
{ /* IVTV_DEC_STREAM_TYPE_YUV */
"decoder YUV",
- VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_DEC_YUV_OFFSET,
PCI_DMA_TODEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
@@ -318,7 +318,7 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
name = video_device_node_name(&s->vdev);
switch (vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
IVTV_INFO("Registered device %s for %s (%d kB)\n",
name, s->name, itv->options.kilobytes[type]);
break;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 3a4c29bc0ba5..73e064e6f56d 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1711,7 +1711,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
v4l2_ctrl_handler_setup(&meye.hdl);
meye.vdev.ctrl_handler = &meye.hdl;
- if (video_register_device(&meye.vdev, VFL_TYPE_GRABBER,
+ if (video_register_device(&meye.vdev, VFL_TYPE_VIDEO,
video_nr) < 0) {
v4l2_err(v4l2_dev, "video_register_device failed\n");
goto outvideoreg;
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 2d582c02adbf..e4623ed2f831 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1214,7 +1214,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (saa7134_no_overlay <= 0)
dev->video_dev->device_caps |= V4L2_CAP_VIDEO_OVERLAY;
- err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
+ err = video_register_device(dev->video_dev,VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0) {
pr_info("%s: can't register video device\n",
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index cb65d345fd3e..8ad7879bd840 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -291,7 +291,7 @@ static int empress_init(struct saa7134_dev *dev)
dev->empress_dev->device_caps |= V4L2_CAP_TUNER;
video_set_drvdata(dev->empress_dev, dev);
- err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
+ err = video_register_device(dev->empress_dev,VFL_TYPE_VIDEO,
empress_nr[dev->nr]);
if (err < 0) {
pr_info("%s: can't register video device\n",
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 342cabf48064..a8ac94fadc14 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1008,8 +1008,7 @@ int saa7134_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
*/
if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) ||
(dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq)))
- pm_qos_add_request(&dev->qos_request,
- PM_QOS_CPU_DMA_LATENCY, 20);
+ cpu_latency_qos_add_request(&dev->qos_request, 20);
dmaq->seq_nr = 0;
return 0;
@@ -1024,7 +1023,7 @@ void saa7134_vb2_stop_streaming(struct vb2_queue *vq)
if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) ||
(dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq)))
- pm_qos_remove_request(&dev->qos_request);
+ cpu_latency_qos_remove_request(&dev->qos_request);
}
static const struct vb2_ops vb2_qops = {
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index f96226930670..2214c74bbbf1 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -289,7 +289,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
- ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
+ ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_VIDEO);
if (ret < 0) {
pr_err("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index bf5e55348f15..39d14c179d22 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -362,7 +362,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
- if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
+ if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_VIDEO)) {
pr_err("cannot register capture v4l2 device. skipping.\n");
return -1;
}
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
index e6a71c17566d..129a1f8ebe1a 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/media/pci/saa7146/mxb.c
@@ -707,7 +707,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
vv_data.vid_ops.vidioc_g_register = vidioc_g_register;
vv_data.vid_ops.vidioc_s_register = vidioc_s_register;
#endif
- if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
+ if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_VIDEO)) {
ERR("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
return -1;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 3fca7257a720..11e1eb6a6809 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1087,7 +1087,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
v4l2_ctrl_handler_setup(hdl);
video_set_drvdata(port->v4l_device, port);
result = video_register_device(port->v4l_device,
- VFL_TYPE_GRABBER, -1);
+ VFL_TYPE_VIDEO, -1);
if (result < 0) {
printk(KERN_INFO "%s: can't register mpeg device\n",
dev->name);
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 476d7f3b32d6..cbf85231b708 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -1304,7 +1304,7 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
solo_enc->vfd->queue = &solo_enc->vidq;
solo_enc->vfd->lock = &solo_enc->lock;
video_set_drvdata(solo_enc->vfd, solo_enc);
- ret = video_register_device(solo_enc->vfd, VFL_TYPE_GRABBER, nr);
+ ret = video_register_device(solo_enc->vfd, VFL_TYPE_VIDEO, nr);
if (ret < 0)
goto vdev_release;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 78792067e920..54434f3c428d 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -692,7 +692,7 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
while (erase_off(solo_dev))
/* Do nothing */;
- ret = video_register_device(solo_dev->vfd, VFL_TYPE_GRABBER, nr);
+ ret = video_register_device(solo_dev->vfd, VFL_TYPE_VIDEO, nr);
if (ret < 0)
goto fail;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index fd3de3bb0c89..798574cfad35 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1069,7 +1069,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
vip->video_dev.lock = &vip->v4l_lock;
video_set_drvdata(&vip->video_dev, vip);
- ret = video_register_device(&vip->video_dev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&vip->video_dev, VFL_TYPE_VIDEO, -1);
if (ret)
goto vrelease;
diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c
index f3d6c3cdb872..cabe006658dd 100644
--- a/drivers/media/pci/ttpci/av7110_v4l.c
+++ b/drivers/media/pci/ttpci/av7110_v4l.c
@@ -831,7 +831,7 @@ int av7110_init_v4l(struct av7110 *av7110)
if (FW_VERSION(av7110->arm_app) < 0x2623)
vv_data->capabilities &= ~V4L2_CAP_SLICED_VBI_OUTPUT;
- if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
+ if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_VIDEO)) {
ERR("cannot register capture device. skipping\n");
saa7146_vv_release(dev);
return -ENODEV;
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index e2d482af2367..38cac508bd72 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1470,7 +1470,7 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
- if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
+ if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
/* fixme: proper cleanup here */
ERR("cannot register capture v4l2 device\n");
saa7146_vv_release(dev);
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 09732eed7eb4..ec1e06da7e4f 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -1156,7 +1156,7 @@ static int tw5864_video_input_init(struct tw5864_input *input, int video_nr)
input->gop = GOP_SIZE;
input->frame_interval = 1;
- ret = video_register_device(&input->vdev, VFL_TYPE_GRABBER, video_nr);
+ ret = video_register_device(&input->vdev, VFL_TYPE_VIDEO, video_nr);
if (ret)
goto free_v4l2_hdl;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 2fb82d50c53e..10986fcd66a5 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -962,7 +962,7 @@ int tw68_video_init2(struct tw68_dev *dev, int video_nr)
dev->vdev.lock = &dev->lock;
dev->vdev.queue = &dev->vidq;
video_set_drvdata(&dev->vdev, dev);
- return video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr);
+ return video_register_device(&dev->vdev, VFL_TYPE_VIDEO, video_nr);
}
/*
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 9be8c6e4fb69..1ced2b0ddb24 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -1282,7 +1282,7 @@ int tw686x_video_init(struct tw686x_dev *dev)
vc->device = vdev;
video_set_drvdata(vdev, vc);
- err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (err < 0)
goto error;
vc->num = vdev->num;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f65e98d3adf2..e01bbb9dd1c1 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -507,6 +507,18 @@ config VIDEO_SUN8I_DEINTERLACE
capability found on some SoCs, like H3.
To compile this driver as a module choose m here.
+config VIDEO_SUN8I_ROTATE
+ tristate "Allwinner DE2 rotation driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ depends on PM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Allwinner DE2 rotation unit.
+ To compile this driver as a module choose m here.
+
endif # V4L_MEM2MEM_DRIVERS
# TI VIDEO PORT Helper Modules
@@ -610,49 +622,49 @@ config CEC_GPIO
between compatible devices.
config VIDEO_SAMSUNG_S5P_CEC
- tristate "Samsung S5P CEC driver"
- depends on ARCH_EXYNOS || COMPILE_TEST
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for Samsung S5P HDMI CEC interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
+ tristate "Samsung S5P CEC driver"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for Samsung S5P HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
config VIDEO_STI_HDMI_CEC
- tristate "STMicroelectronics STiH4xx HDMI CEC driver"
- depends on ARCH_STI || COMPILE_TEST
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for STIH4xx HDMI CEC interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
+ tristate "STMicroelectronics STiH4xx HDMI CEC driver"
+ depends on ARCH_STI || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for STIH4xx HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
config VIDEO_STM32_HDMI_CEC
- tristate "STMicroelectronics STM32 HDMI CEC driver"
- depends on ARCH_STM32 || COMPILE_TEST
- select REGMAP
- select REGMAP_MMIO
- select CEC_CORE
- help
- This is a driver for STM32 interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
+ tristate "STMicroelectronics STM32 HDMI CEC driver"
+ depends on ARCH_STM32 || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ select CEC_CORE
+ help
+ This is a driver for STM32 interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
config VIDEO_TEGRA_HDMI_CEC
- tristate "Tegra HDMI CEC driver"
- depends on ARCH_TEGRA || COMPILE_TEST
- select CEC_CORE
- select CEC_NOTIFIER
- help
- This is a driver for the Tegra HDMI CEC interface. It uses the
- generic CEC framework interface.
- The CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
+ tristate "Tegra HDMI CEC driver"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for the Tegra HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
config VIDEO_SECO_CEC
tristate "SECO Boards HDMI CEC driver"
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 09104304bd06..66079cc41f38 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -285,6 +285,7 @@ vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
+ ccdcparam->data_sz > VPFE_CCDC_DATA_8BITS ||
max_gamma > max_data) {
vpfe_dbg(1, vpfe, "Invalid data line select\n");
return -EINVAL;
@@ -324,7 +325,7 @@ static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
u32 dma_cntl, pcr;
pcr = vpfe_reg_read(ccdc, VPFE_PCR);
@@ -348,7 +349,7 @@ static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
struct vpfe_ccdc_config_params_raw raw_params;
int x;
@@ -504,7 +505,7 @@ vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
*/
static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
struct vpfe_ccdc_config_params_raw *config_params =
&ccdc->ccdc_cfg.bayer.config_params;
struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
@@ -609,7 +610,7 @@ static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n",
__func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
@@ -741,7 +742,7 @@ static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
struct vpfe_hw_if_param *params)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
ccdc->ccdc_cfg.if_type = params->if_type;
@@ -2267,7 +2268,7 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe)
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
video_set_drvdata(vdev, vpfe);
- err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&vpfe->video_dev, VFL_TYPE_VIDEO, -1);
if (err) {
vpfe_err(vpfe,
"Unable to register video device.\n");
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index d8593cb2ae84..7d98db1d9b52 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -1,4 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2020 IBM Corp.
+// Copyright (c) 2019-2020 Intel Corporation
#include <linux/atomic.h>
#include <linux/bitfield.h>
@@ -72,11 +74,8 @@
#define VE_SEQ_CTRL_CAP_BUSY BIT(16)
#define VE_SEQ_CTRL_COMP_BUSY BIT(18)
-#ifdef CONFIG_MACH_ASPEED_G5
-#define VE_SEQ_CTRL_JPEG_MODE BIT(13) /* AST2500 */
-#else
-#define VE_SEQ_CTRL_JPEG_MODE BIT(8) /* AST2400 */
-#endif /* CONFIG_MACH_ASPEED_G5 */
+#define AST2500_VE_SEQ_CTRL_JPEG_MODE BIT(13)
+#define AST2400_VE_SEQ_CTRL_JPEG_MODE BIT(8)
#define VE_CTRL 0x008
#define VE_CTRL_HSYNC_POL BIT(0)
@@ -133,7 +132,8 @@
#define VE_COMP_CTRL_HQ_DCT_CHR GENMASK(26, 22)
#define VE_COMP_CTRL_HQ_DCT_LUM GENMASK(31, 27)
-#define VE_OFFSET_COMP_STREAM 0x078
+#define AST2400_VE_COMP_SIZE_READ_BACK 0x078
+#define AST2600_VE_COMP_SIZE_READ_BACK 0x084
#define VE_SRC_LR_EDGE_DET 0x090
#define VE_SRC_LR_EDGE_DET_LEFT GENMASK(11, 0)
@@ -220,6 +220,9 @@ struct aspeed_video {
struct video_device vdev;
struct mutex video_lock; /* v4l2 and videobuf2 lock */
+ u32 jpeg_mode;
+ u32 comp_size_read;
+
wait_queue_head_t wait;
spinlock_t lock; /* buffer list lock */
struct delayed_work res_work;
@@ -243,6 +246,26 @@ struct aspeed_video {
#define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev)
+struct aspeed_video_config {
+ u32 jpeg_mode;
+ u32 comp_size_read;
+};
+
+static const struct aspeed_video_config ast2400_config = {
+ .jpeg_mode = AST2400_VE_SEQ_CTRL_JPEG_MODE,
+ .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK,
+};
+
+static const struct aspeed_video_config ast2500_config = {
+ .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE,
+ .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK,
+};
+
+static const struct aspeed_video_config ast2600_config = {
+ .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE,
+ .comp_size_read = AST2600_VE_COMP_SIZE_READ_BACK,
+};
+
static const u32 aspeed_video_jpeg_header[ASPEED_VIDEO_JPEG_HEADER_SIZE] = {
0xe0ffd8ff, 0x464a1000, 0x01004649, 0x60000101, 0x00006000, 0x0f00feff,
0x00002d05, 0x00000000, 0x00000000, 0x00dbff00
@@ -572,7 +595,7 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
if (sts & VE_INTERRUPT_COMP_COMPLETE) {
struct aspeed_video_buffer *buf;
u32 frame_size = aspeed_video_read(video,
- VE_OFFSET_COMP_STREAM);
+ video->comp_size_read);
spin_lock(&video->lock);
clear_bit(VIDEO_FRAME_INPRG, &video->flags);
@@ -907,7 +930,7 @@ static void aspeed_video_init_regs(struct aspeed_video *video)
FIELD_PREP(VE_COMP_CTRL_DCT_LUM, video->jpeg_quality) |
FIELD_PREP(VE_COMP_CTRL_DCT_CHR, video->jpeg_quality | 0x10);
u32 ctrl = VE_CTRL_AUTO_OR_CURSOR;
- u32 seq_ctrl = VE_SEQ_CTRL_JPEG_MODE;
+ u32 seq_ctrl = video->jpeg_mode;
if (video->frame_rate)
ctrl |= FIELD_PREP(VE_CTRL_FRC, video->frame_rate);
@@ -1565,14 +1588,14 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
V4L2_CAP_STREAMING;
vdev->v4l2_dev = v4l2_dev;
strscpy(vdev->name, DEVICE_NAME, sizeof(vdev->name));
- vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->vfl_type = VFL_TYPE_VIDEO;
vdev->vfl_dir = VFL_DIR_RX;
vdev->release = video_device_release_empty;
vdev->ioctl_ops = &aspeed_video_ioctl_ops;
vdev->lock = &video->video_lock;
video_set_drvdata(vdev, video);
- rc = video_register_device(vdev, VFL_TYPE_GRABBER, 0);
+ rc = video_register_device(vdev, VFL_TYPE_VIDEO, 0);
if (rc) {
vb2_queue_release(vbq);
v4l2_ctrl_handler_free(&video->ctrl_handler);
@@ -1653,16 +1676,37 @@ err_unprepare_eclk:
return rc;
}
+static const struct of_device_id aspeed_video_of_match[] = {
+ { .compatible = "aspeed,ast2400-video-engine", .data = &ast2400_config },
+ { .compatible = "aspeed,ast2500-video-engine", .data = &ast2500_config },
+ { .compatible = "aspeed,ast2600-video-engine", .data = &ast2600_config },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aspeed_video_of_match);
+
static int aspeed_video_probe(struct platform_device *pdev)
{
+ const struct aspeed_video_config *config;
+ const struct of_device_id *match;
+ struct aspeed_video *video;
int rc;
- struct resource *res;
- struct aspeed_video *video =
- devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL);
+ video = devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL);
if (!video)
return -ENOMEM;
+ video->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(video->base))
+ return PTR_ERR(video->base);
+
+ match = of_match_node(aspeed_video_of_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+
+ config = match->data;
+ video->jpeg_mode = config->jpeg_mode;
+ video->comp_size_read = config->comp_size_read;
+
video->frame_rate = 30;
video->dev = &pdev->dev;
spin_lock_init(&video->lock);
@@ -1671,13 +1715,6 @@ static int aspeed_video_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work);
INIT_LIST_HEAD(&video->buffers);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- video->base = devm_ioremap_resource(video->dev, res);
-
- if (IS_ERR(video->base))
- return PTR_ERR(video->base);
-
rc = aspeed_video_init(video);
if (rc)
return rc;
@@ -1716,13 +1753,6 @@ static int aspeed_video_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id aspeed_video_of_match[] = {
- { .compatible = "aspeed,ast2400-video-engine" },
- { .compatible = "aspeed,ast2500-video-engine" },
- {}
-};
-MODULE_DEVICE_TABLE(of, aspeed_video_of_match);
-
static struct platform_driver aspeed_video_driver = {
.driver = {
.name = DEVICE_NAME,
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index d7669a03e98e..a6e9797a0ec9 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -22,6 +22,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
+#include <linux/atmel-isc-media.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -224,10 +225,35 @@ const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
(((mbus_code) == MEDIA_BUS_FMT_Y10_1X10) | \
(((mbus_code) == MEDIA_BUS_FMT_Y8_1X8)))
+#define ISC_CTRL_ISC_TO_V4L2(x) ((x) == ISC_WB_O_ZERO_VAL ? 0 : (x))
+#define ISC_CTRL_V4L2_TO_ISC(x) ((x) ? (x) : ISC_WB_O_ZERO_VAL)
+
+static inline void isc_update_v4l2_ctrls(struct isc_device *isc)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ /* In here we set the v4l2 controls w.r.t. our pipeline config */
+ v4l2_ctrl_s_ctrl(isc->r_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_R]);
+ v4l2_ctrl_s_ctrl(isc->b_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_B]);
+ v4l2_ctrl_s_ctrl(isc->gr_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GR]);
+ v4l2_ctrl_s_ctrl(isc->gb_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GB]);
+
+ v4l2_ctrl_s_ctrl(isc->r_off_ctrl,
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_R]));
+ v4l2_ctrl_s_ctrl(isc->b_off_ctrl,
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_B]));
+ v4l2_ctrl_s_ctrl(isc->gr_off_ctrl,
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GR]));
+ v4l2_ctrl_s_ctrl(isc->gb_off_ctrl,
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GB]));
+}
+
static inline void isc_update_awb_ctrls(struct isc_device *isc)
{
struct isc_ctrls *ctrls = &isc->ctrls;
+ /* In here we set our actual hw pipeline config */
+
regmap_write(isc->regmap, ISC_WB_O_RGR,
(ISC_WB_O_ZERO_VAL - (ctrls->offset[ISC_HIS_CFG_MODE_R])) |
((ISC_WB_O_ZERO_VAL - ctrls->offset[ISC_HIS_CFG_MODE_GR]) << 16));
@@ -662,11 +688,9 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
bay_cfg = isc->config.sd_format->cfa_baycfg;
- if (ctrls->awb == ISC_WB_NONE)
- isc_reset_awb_ctrls(isc);
-
regmap_write(regmap, ISC_WB_CFG, bay_cfg);
isc_update_awb_ctrls(isc);
+ isc_update_v4l2_ctrls(isc);
regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL);
@@ -1396,6 +1420,7 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
isc->try_config.sd_format != isc->config.sd_format) {
isc->ctrls.hist_stat = HIST_INIT;
isc_reset_awb_ctrls(isc);
+ isc_update_v4l2_ctrls(isc);
}
/* make the try configuration active */
isc->config = isc->try_config;
@@ -1814,10 +1839,6 @@ static void isc_awb_work(struct work_struct *w)
ctrls->hist_id = hist_id;
baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
- /* if no more auto white balance, reset controls. */
- if (ctrls->awb == ISC_WB_NONE)
- isc_reset_awb_ctrls(isc);
-
pm_runtime_get_sync(isc->dev);
/*
@@ -1842,6 +1863,8 @@ static void isc_awb_work(struct work_struct *w)
if (ctrls->awb == ISC_WB_ONETIME) {
v4l2_info(&isc->v4l2_dev,
"Completed one time white-balance adjustment.\n");
+ /* update the v4l2 controls values */
+ isc_update_v4l2_ctrls(isc);
ctrls->awb = ISC_WB_NONE;
}
}
@@ -1873,6 +1896,27 @@ static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_GAMMA:
ctrls->gamma_index = ctrl->val;
break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops isc_ctrl_ops = {
+ .s_ctrl = isc_s_ctrl,
+};
+
+static int isc_s_awb_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isc_device *isc = container_of(ctrl->handler,
+ struct isc_device, ctrls.handler);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
if (ctrl->val == 1)
ctrls->awb = ISC_WB_AUTO;
@@ -1883,36 +1927,142 @@ static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
if (!isc->config.sd_format)
break;
- if (ctrls->hist_stat != HIST_ENABLED)
- isc_reset_awb_ctrls(isc);
+ /* configure the controls with new values from v4l2 */
+ if (ctrl->cluster[ISC_CTRL_R_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_R] = isc->r_gain_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_B_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_B] = isc->b_gain_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_GR_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_GR] = isc->gr_gain_ctrl->val;
+ if (ctrl->cluster[ISC_CTRL_GB_GAIN]->is_new)
+ ctrls->gain[ISC_HIS_CFG_MODE_GB] = isc->gb_gain_ctrl->val;
+
+ if (ctrl->cluster[ISC_CTRL_R_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_R] =
+ ISC_CTRL_V4L2_TO_ISC(isc->r_off_ctrl->val);
+ if (ctrl->cluster[ISC_CTRL_B_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_B] =
+ ISC_CTRL_V4L2_TO_ISC(isc->b_off_ctrl->val);
+ if (ctrl->cluster[ISC_CTRL_GR_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_GR] =
+ ISC_CTRL_V4L2_TO_ISC(isc->gr_off_ctrl->val);
+ if (ctrl->cluster[ISC_CTRL_GB_OFF]->is_new)
+ ctrls->offset[ISC_HIS_CFG_MODE_GB] =
+ ISC_CTRL_V4L2_TO_ISC(isc->gb_off_ctrl->val);
- if (isc->ctrls.awb == ISC_WB_AUTO &&
+ isc_update_awb_ctrls(isc);
+
+ if (vb2_is_streaming(&isc->vb2_vidq)) {
+ /*
+ * If we are streaming, we can update profile to
+ * have the new settings in place.
+ */
+ isc_update_profile(isc);
+ } else {
+ /*
+ * The auto cluster will activate automatically this
+ * control. This has to be deactivated when not
+ * streaming.
+ */
+ v4l2_ctrl_activate(isc->do_wb_ctrl, false);
+ }
+
+ /* if we have autowhitebalance on, start histogram procedure */
+ if (ctrls->awb == ISC_WB_AUTO &&
vb2_is_streaming(&isc->vb2_vidq) &&
ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
isc_set_histogram(isc, true);
- break;
- case V4L2_CID_DO_WHITE_BALANCE:
- /* if AWB is enabled, do nothing */
- if (ctrls->awb == ISC_WB_AUTO)
- return 0;
+ /*
+ * for one time whitebalance adjustment, check the button,
+ * if it's pressed, perform the one time operation.
+ */
+ if (ctrls->awb == ISC_WB_NONE &&
+ ctrl->cluster[ISC_CTRL_DO_WB]->is_new &&
+ !(ctrl->cluster[ISC_CTRL_DO_WB]->flags &
+ V4L2_CTRL_FLAG_INACTIVE)) {
+ ctrls->awb = ISC_WB_ONETIME;
+ isc_set_histogram(isc, true);
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "One time white-balance started.\n");
+ }
+ return 0;
+ }
+ return 0;
+}
- ctrls->awb = ISC_WB_ONETIME;
- isc_set_histogram(isc, true);
- v4l2_dbg(1, debug, &isc->v4l2_dev,
- "One time white-balance started.\n");
+static int isc_g_volatile_awb_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isc_device *isc = container_of(ctrl->handler,
+ struct isc_device, ctrls.handler);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ switch (ctrl->id) {
+ /* being a cluster, this id will be called for every control */
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrl->cluster[ISC_CTRL_R_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_R];
+ ctrl->cluster[ISC_CTRL_B_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_B];
+ ctrl->cluster[ISC_CTRL_GR_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_GR];
+ ctrl->cluster[ISC_CTRL_GB_GAIN]->val =
+ ctrls->gain[ISC_HIS_CFG_MODE_GB];
+
+ ctrl->cluster[ISC_CTRL_R_OFF]->val =
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_R]);
+ ctrl->cluster[ISC_CTRL_B_OFF]->val =
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_B]);
+ ctrl->cluster[ISC_CTRL_GR_OFF]->val =
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GR]);
+ ctrl->cluster[ISC_CTRL_GB_OFF]->val =
+ ISC_CTRL_ISC_TO_V4L2(ctrls->offset[ISC_HIS_CFG_MODE_GB]);
break;
- default:
- return -EINVAL;
}
-
return 0;
}
-static const struct v4l2_ctrl_ops isc_ctrl_ops = {
- .s_ctrl = isc_s_ctrl,
+static const struct v4l2_ctrl_ops isc_awb_ops = {
+ .s_ctrl = isc_s_awb_ctrl,
+ .g_volatile_ctrl = isc_g_volatile_awb_ctrl,
};
+#define ISC_CTRL_OFF(_name, _id, _name_str) \
+ static const struct v4l2_ctrl_config _name = { \
+ .ops = &isc_awb_ops, \
+ .id = _id, \
+ .name = _name_str, \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .flags = V4L2_CTRL_FLAG_SLIDER, \
+ .min = -4095, \
+ .max = 4095, \
+ .step = 1, \
+ .def = 0, \
+ }
+
+ISC_CTRL_OFF(isc_r_off_ctrl, ISC_CID_R_OFFSET, "Red Component Offset");
+ISC_CTRL_OFF(isc_b_off_ctrl, ISC_CID_B_OFFSET, "Blue Component Offset");
+ISC_CTRL_OFF(isc_gr_off_ctrl, ISC_CID_GR_OFFSET, "Green Red Component Offset");
+ISC_CTRL_OFF(isc_gb_off_ctrl, ISC_CID_GB_OFFSET, "Green Blue Component Offset");
+
+#define ISC_CTRL_GAIN(_name, _id, _name_str) \
+ static const struct v4l2_ctrl_config _name = { \
+ .ops = &isc_awb_ops, \
+ .id = _id, \
+ .name = _name_str, \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .flags = V4L2_CTRL_FLAG_SLIDER, \
+ .min = 0, \
+ .max = 8191, \
+ .step = 1, \
+ .def = 512, \
+ }
+
+ISC_CTRL_GAIN(isc_r_gain_ctrl, ISC_CID_R_GAIN, "Red Component Gain");
+ISC_CTRL_GAIN(isc_b_gain_ctrl, ISC_CID_B_GAIN, "Blue Component Gain");
+ISC_CTRL_GAIN(isc_gr_gain_ctrl, ISC_CID_GR_GAIN, "Green Red Component Gain");
+ISC_CTRL_GAIN(isc_gb_gain_ctrl, ISC_CID_GB_GAIN, "Green Blue Component Gain");
+
static int isc_ctrl_init(struct isc_device *isc)
{
const struct v4l2_ctrl_ops *ops = &isc_ctrl_ops;
@@ -1923,7 +2073,7 @@ static int isc_ctrl_init(struct isc_device *isc)
ctrls->hist_stat = HIST_INIT;
isc_reset_awb_ctrls(isc);
- ret = v4l2_ctrl_handler_init(hdl, 5);
+ ret = v4l2_ctrl_handler_init(hdl, 13);
if (ret < 0)
return ret;
@@ -1933,10 +2083,13 @@ static int isc_ctrl_init(struct isc_device *isc)
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 2);
- v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ isc->awb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
/* do_white_balance is a button, so min,max,step,default are ignored */
- isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DO_WHITE_BALANCE,
+ isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops,
+ V4L2_CID_DO_WHITE_BALANCE,
0, 0, 0, 0);
if (!isc->do_wb_ctrl) {
@@ -1947,6 +2100,21 @@ static int isc_ctrl_init(struct isc_device *isc)
v4l2_ctrl_activate(isc->do_wb_ctrl, false);
+ isc->r_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_gain_ctrl, NULL);
+ isc->b_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_gain_ctrl, NULL);
+ isc->gr_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_gain_ctrl, NULL);
+ isc->gb_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_gain_ctrl, NULL);
+ isc->r_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_off_ctrl, NULL);
+ isc->b_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_off_ctrl, NULL);
+ isc->gr_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_off_ctrl, NULL);
+ isc->gb_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_off_ctrl, NULL);
+
+ /*
+ * The cluster is in auto mode with autowhitebalance enabled
+ * and manual mode otherwise.
+ */
+ v4l2_ctrl_auto_cluster(10, &isc->awb_ctrl, 0, true);
+
v4l2_ctrl_handler_setup(hdl);
return 0;
@@ -2143,7 +2311,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vdev, isc);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
"video_register_device failed: %d\n", ret);
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
index bfaed2fad2b5..fc56a745c7d1 100644
--- a/drivers/media/platform/atmel/atmel-isc.h
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -213,7 +213,6 @@ struct isc_device {
struct fmt_config try_config;
struct isc_ctrls ctrls;
- struct v4l2_ctrl *do_wb_ctrl;
struct work_struct awb_work;
struct mutex lock; /* serialize access to file operations */
@@ -223,6 +222,28 @@ struct isc_device {
struct isc_subdev_entity *current_subdev;
struct list_head subdev_entities;
+
+ struct {
+#define ISC_CTRL_DO_WB 1
+#define ISC_CTRL_R_GAIN 2
+#define ISC_CTRL_B_GAIN 3
+#define ISC_CTRL_GR_GAIN 4
+#define ISC_CTRL_GB_GAIN 5
+#define ISC_CTRL_R_OFF 6
+#define ISC_CTRL_B_OFF 7
+#define ISC_CTRL_GR_OFF 8
+#define ISC_CTRL_GB_OFF 9
+ struct v4l2_ctrl *awb_ctrl;
+ struct v4l2_ctrl *do_wb_ctrl;
+ struct v4l2_ctrl *r_gain_ctrl;
+ struct v4l2_ctrl *b_gain_ctrl;
+ struct v4l2_ctrl *gr_gain_ctrl;
+ struct v4l2_ctrl *gb_gain_ctrl;
+ struct v4l2_ctrl *r_off_ctrl;
+ struct v4l2_ctrl *b_off_ctrl;
+ struct v4l2_ctrl *gr_off_ctrl;
+ struct v4l2_ctrl *gb_off_ctrl;
+ };
};
#define GAMMA_MAX 2
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 963dfd6e750e..d74aa73f26be 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1094,7 +1094,7 @@ static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
- ret = video_register_device(isi->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(isi->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(isi->dev, "Failed to register video device\n");
return ret;
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index acff10ad257a..d0d093dd8f7c 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2726,7 +2726,7 @@ static int coda_register_device(struct coda_dev *dev, int i)
v4l2_disable_ioctl(vfd, VIDIOC_G_CROP);
v4l2_disable_ioctl(vfd, VIDIOC_S_CROP);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (!ret)
v4l2_info(&dev->v4l2_dev, "%s registered as %s\n",
type == CODA_INST_ENCODER ? "encoder" : "decoder",
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index b49378b18e5d..c98edb67cfb2 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -29,7 +29,7 @@
#include "ccdc_hw_device.h"
/* Defaults for module configuration parameters */
-static struct isif_config_params_raw isif_config_defaults = {
+static const struct isif_config_params_raw isif_config_defaults = {
.linearize = {
.en = 0,
.corr_shft = ISIF_NO_SHIFT,
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index ae419958e420..38d3088d4d38 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -1339,7 +1339,7 @@ static int register_device(struct vpbe_layer *vpbe_display_layer,
vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue;
err = video_register_device(&vpbe_display_layer->video_dev,
- VFL_TYPE_GRABBER,
+ VFL_TYPE_VIDEO,
-1);
if (err)
return -ENODEV;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 9b1d9643589b..f9f7dd17c57c 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -880,7 +880,7 @@ static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
/* Fill in the information about format */
pix_fmt = vpfe_lookup_pix_format(pix);
if (pix_fmt) {
- fmt->pixelformat = fmt->pixelformat;
+ fmt->pixelformat = pix_fmt->pixelformat;
return 0;
}
return -EINVAL;
@@ -1780,7 +1780,7 @@ static int vpfe_probe(struct platform_device *pdev)
"video_dev=%p\n", &vpfe_dev->video_dev);
vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = video_register_device(&vpfe_dev->video_dev,
- VFL_TYPE_GRABBER, -1);
+ VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(pdev->dev.driver,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 71f4fe882d13..d9ec439faefa 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1466,7 +1466,7 @@ static int vpif_probe_complete(void)
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
video_set_drvdata(&ch->video_dev, ch);
err = video_register_device(vdev,
- VFL_TYPE_GRABBER, (j ? 1 : 0));
+ VFL_TYPE_VIDEO, (j ? 1 : 0));
if (err)
goto probe_out;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index abbdbac08e6f..ead14c49d4f5 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1214,7 +1214,7 @@ static int vpif_probe_complete(void)
vdev->lock = &common->lock;
vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
video_set_drvdata(&ch->video_dev, ch);
- err = video_register_device(vdev, VFL_TYPE_GRABBER,
+ err = video_register_device(vdev, VFL_TYPE_VIDEO,
(j ? 3 : 2));
if (err < 0)
goto probe_out;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 35a1d0d6dd66..e2c162635f72 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -771,7 +771,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
return PTR_ERR(gsc->m2m.m2m_dev);
}
- ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&gsc->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(&pdev->dev,
"%s(): failed to register video device\n", __func__);
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 989cb34f19b1..be4effcbfe7b 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -13,7 +13,7 @@ config VIDEO_SAMSUNG_EXYNOS4_IS
if VIDEO_SAMSUNG_EXYNOS4_IS
config VIDEO_EXYNOS4_IS_COMMON
- tristate
+ tristate
config VIDEO_S5P_FIMC
tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver"
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 121d609ff856..705f182330ca 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -1808,7 +1808,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
if (ret)
goto err_me_cleanup;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_ctrl_free;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index d2cbcdca0463..15f443fa7208 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -619,7 +619,7 @@ int fimc_isp_video_device_register(struct fimc_isp *isp,
video_set_drvdata(vdev, isp);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
media_entity_cleanup(&vdev->entity);
return ret;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index e87c6a09205b..394e0818f2d5 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1297,7 +1297,7 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
video_set_drvdata(vfd, fimc);
fimc->ve.pipe = v4l2_get_subdev_hostdata(sd);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
media_entity_cleanup(&vfd->entity);
fimc->ve.pipe = NULL;
@@ -1614,6 +1614,9 @@ static int fimc_lite_remove(struct platform_device *pdev)
struct fimc_lite *fimc = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ if (!pm_runtime_enabled(dev))
+ clk_disable_unprepare(fimc->clock);
+
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
fimc_lite_unregister_capture_subdev(fimc);
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index c70c2cbe3eb1..4acb179556c4 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -746,7 +746,7 @@ int fimc_register_m2m_device(struct fimc_dev *fimc,
if (ret)
goto err_me;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vd;
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 81a8faedbba6..84633a3b8475 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1486,7 +1486,7 @@ static int viu_of_probe(struct platform_device *op)
mutex_lock(&viu_dev->lock);
- ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(viu_dev->vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
goto err_unlock;
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index 38d942322302..08d76eb05ed1 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -1709,7 +1709,7 @@ static int pxp_probe(struct platform_device *pdev)
goto err_v4l2;
}
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto err_m2m;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 9ad24c86c5ab..1f89e71cdccf 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -953,7 +953,7 @@ static int deinterlace_probe(struct platform_device *pdev)
vfd->lock = &pcdev->dev_mutex;
vfd->v4l2_dev = &pcdev->v4l2_dev;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
goto unreg_dev;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 803baf97f06e..09775b6624c6 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1802,7 +1802,7 @@ static int mccic_notify_bound(struct v4l2_async_notifier *notifier,
cam->vdev.lock = &cam->s_mutex;
cam->vdev.queue = &cam->vb_queue;
video_set_drvdata(&cam->vdev, cam);
- ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
cam->sensor = NULL;
goto out;
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index ee802fc3bcdf..f82a81a3bdee 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -1150,7 +1150,7 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
jpeg->dec_vdev->device_caps = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_M2M_MPLANE;
- ret = video_register_device(jpeg->dec_vdev, VFL_TYPE_GRABBER, 3);
+ ret = video_register_device(jpeg->dec_vdev, VFL_TYPE_VIDEO, 3);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
goto err_dec_vdev_register;
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
index 9afe8161a8c0..14991685adb7 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -110,6 +110,12 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
comp->clk[i] = of_clk_get(node, i);
+ if (IS_ERR(comp->clk[i])) {
+ if (PTR_ERR(comp->clk[i]) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clock\n");
+
+ return PTR_ERR(comp->clk[i]);
+ }
/* Only RDMA needs two clocks */
if (comp->type != MTK_MDP_RDMA)
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index 7c9e2d69e21a..821f2cf325f0 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -1229,7 +1229,7 @@ int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp)
goto err_m2m_init;
}
- ret = video_register_device(mdp->vdev, VFL_TYPE_GRABBER, 2);
+ ret = video_register_device(mdp->vdev, VFL_TYPE_VIDEO, 2);
if (ret) {
dev_err(dev, "failed to register video device\n");
goto err_vdev_register;
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
index 6720d11f50cf..b065ccd06914 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
@@ -15,7 +15,7 @@ static inline struct mtk_mdp_ctx *vpu_to_ctx(struct mtk_mdp_vpu *vpu)
return container_of(vpu, struct mtk_mdp_ctx, vpu);
}
-static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg)
+static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg)
{
struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
(unsigned long)msg->ap_inst;
@@ -26,10 +26,11 @@ static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg)
vpu->inst_addr = msg->vpu_inst_addr;
}
-static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv)
+static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len,
+ void *priv)
{
- unsigned int msg_id = *(unsigned int *)data;
- struct mdp_ipi_comm_ack *msg = (struct mdp_ipi_comm_ack *)data;
+ const struct mdp_ipi_comm_ack *msg = data;
+ unsigned int msg_id = msg->msg_id;
struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
(unsigned long)msg->ap_inst;
struct mtk_mdp_ctx *ctx;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 100ae8c5e702..97a1b6664c20 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -331,7 +331,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_event_workq;
}
- ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, 0);
if (ret) {
mtk_v4l2_err("Failed to register video device");
goto err_dec_reg;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 1d82aa2b6017..4d31f1ed113f 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -356,7 +356,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_event_workq;
}
- ret = video_register_device(vfd_enc, VFL_TYPE_GRABBER, 1);
+ ret = video_register_device(vfd_enc, VFL_TYPE_VIDEO, 1);
if (ret) {
mtk_v4l2_err("Failed to register video device");
goto err_enc_reg;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 24c1f0bf2147..257a5b5ad212 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -110,7 +110,11 @@ struct vp9_sf_ref_fb {
* @buf_len_sz_c : size used to store cbcr plane ufo info (AP-R, VPU-W)
* @profile : profile sparsed from vpu (AP-R, VPU-W)
- * @show_frame : display this frame or not (AP-R, VPU-W)
+ * @show_frame : [BIT(0)] display this frame or not (AP-R, VPU-W)
+ * [BIT(1)] reset segment data or not (AP-R, VPU-W)
+ * [BIT(2)] trig decoder hardware or not (AP-R, VPU-W)
+ * [BIT(3)] ask VPU to set bits(0~4) accordingly (AP-W, VPU-R)
+ * [BIT(4)] do not reset segment data before every frame (AP-R, VPU-W)
* @show_existing_frame : inform this frame is show existing frame
* (AP-R, VPU-W)
* @frm_to_show_idx : index to show frame (AP-R, VPU-W)
@@ -494,12 +498,12 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
frm_to_show->fb->base_y.size);
}
if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
- if (vsi->show_frame)
+ if (vsi->show_frame & BIT(0))
vp9_add_to_fb_disp_list(inst, inst->cur_fb);
}
} else {
if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
- if (vsi->show_frame)
+ if (vsi->show_frame & BIT(0))
vp9_add_to_fb_disp_list(inst, frm_to_show->fb);
}
}
@@ -800,6 +804,9 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
}
inst->vsi = (struct vdec_vp9_vsi *)inst->vpu.vsi;
+
+ inst->vsi->show_frame |= BIT(3);
+
init_all_fb_lists(inst);
ctx->drv_handle = inst;
@@ -870,13 +877,27 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
vsi->sf_frm_sz[idx]);
}
}
- memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
+
+ if (!(vsi->show_frame & BIT(4)))
+ memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
+
ret = vpu_dec_start(&inst->vpu, data, 3);
if (ret) {
mtk_vcodec_err(inst, "vpu_dec_start failed");
goto DECODE_ERROR;
}
+ if (vsi->show_frame & BIT(1)) {
+ memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
+
+ if (vsi->show_frame & BIT(2)) {
+ if (vpu_dec_start(&inst->vpu, NULL, 0)) {
+ mtk_vcodec_err(inst, "vpu trig decoder failed");
+ goto DECODE_ERROR;
+ }
+ }
+ }
+
ret = validate_vsi_array_indexes(inst, vsi);
if (ret) {
mtk_vcodec_err(inst, "Invalid values from VPU.");
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 70abfd4cd4b9..948a12fd9d46 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -9,7 +9,7 @@
#include "vdec_ipi_msg.h"
#include "vdec_vpu_if.h"
-static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
+static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
{
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
(unsigned long)msg->ap_inst_addr;
@@ -34,9 +34,9 @@ static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
* This function runs in interrupt context and it means there's an IPI MSG
* from VPU.
*/
-static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+static void vpu_dec_ipi_handler(const void *data, unsigned int len, void *priv)
{
- struct vdec_vpu_ipi_ack *msg = data;
+ const struct vdec_vpu_ipi_ack *msg = data;
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
(unsigned long)msg->ap_inst_addr;
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
index 3e931b0ed096..9540709c1905 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
@@ -8,26 +8,26 @@
#include "venc_ipi_msg.h"
#include "venc_vpu_if.h"
-static void handle_enc_init_msg(struct venc_vpu_inst *vpu, void *data)
+static void handle_enc_init_msg(struct venc_vpu_inst *vpu, const void *data)
{
- struct venc_vpu_ipi_msg_init *msg = data;
+ const struct venc_vpu_ipi_msg_init *msg = data;
vpu->inst_addr = msg->vpu_inst_addr;
vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
}
-static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, void *data)
+static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, const void *data)
{
- struct venc_vpu_ipi_msg_enc *msg = data;
+ const struct venc_vpu_ipi_msg_enc *msg = data;
vpu->state = msg->state;
vpu->bs_size = msg->bs_size;
vpu->is_key_frm = msg->is_key_frm;
}
-static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv)
+static void vpu_enc_ipi_handler(const void *data, unsigned int len, void *priv)
{
- struct venc_vpu_ipi_msg_common *msg = data;
+ const struct venc_vpu_ipi_msg_common *msg = data;
struct venc_vpu_inst *vpu =
(struct venc_vpu_inst *)(unsigned long)msg->venc_inst;
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index a768707abb94..d30c08983f56 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -46,6 +46,8 @@
/* binary firmware name */
#define VPU_P_FW "vpu_p.bin"
#define VPU_D_FW "vpu_d.bin"
+#define VPU_P_FW_NEW "mediatek/mt8173/vpu_p.bin"
+#define VPU_D_FW_NEW "mediatek/mt8173/vpu_d.bin"
#define VPU_RESET 0x0
#define VPU_TCM_CFG 0x0008
@@ -203,8 +205,8 @@ struct mtk_vpu {
struct vpu_run run;
struct vpu_wdt wdt;
struct vpu_ipi_desc ipi_desc[IPI_MAX];
- struct share_obj *recv_buf;
- struct share_obj *send_buf;
+ struct share_obj __iomem *recv_buf;
+ struct share_obj __iomem *send_buf;
struct device *dev;
struct clk *clk;
bool fw_loaded;
@@ -292,7 +294,7 @@ int vpu_ipi_send(struct platform_device *pdev,
unsigned int len)
{
struct mtk_vpu *vpu = platform_get_drvdata(pdev);
- struct share_obj *send_obj = vpu->send_buf;
+ struct share_obj __iomem *send_obj = vpu->send_buf;
unsigned long timeout;
int ret = 0;
@@ -325,9 +327,9 @@ int vpu_ipi_send(struct platform_device *pdev,
}
} while (vpu_cfg_readl(vpu, HOST_TO_VPU));
- memcpy((void *)send_obj->share_buf, buf, len);
- send_obj->len = len;
- send_obj->id = id;
+ memcpy_toio(send_obj->share_buf, buf, len);
+ writel(len, &send_obj->len);
+ writel(id, &send_obj->id);
vpu->ipi_id_ack[id] = false;
/* send the command to VPU */
@@ -477,16 +479,24 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
size_t tcm_size = fw_type ? VPU_DTCM_SIZE : VPU_PTCM_SIZE;
size_t fw_size = fw_type ? VPU_D_FW_SIZE : VPU_P_FW_SIZE;
char *fw_name = fw_type ? VPU_D_FW : VPU_P_FW;
+ char *fw_new_name = fw_type ? VPU_D_FW_NEW : VPU_P_FW_NEW;
const struct firmware *vpu_fw;
size_t dl_size = 0;
size_t extra_fw_size = 0;
void *dest;
int ret;
- ret = request_firmware(&vpu_fw, fw_name, vpu->dev);
+ ret = request_firmware(&vpu_fw, fw_new_name, vpu->dev);
if (ret < 0) {
- dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, ret);
- return ret;
+ dev_info(vpu->dev, "Failed to load %s, %d, retry\n",
+ fw_new_name, ret);
+
+ ret = request_firmware(&vpu_fw, fw_name, vpu->dev);
+ if (ret < 0) {
+ dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name,
+ ret);
+ return ret;
+ }
}
dl_size = vpu_fw->size;
if (dl_size > fw_size) {
@@ -600,10 +610,10 @@ OUT_LOAD_FW:
}
EXPORT_SYMBOL_GPL(vpu_load_firmware);
-static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
+static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv)
{
- struct mtk_vpu *vpu = (struct mtk_vpu *)priv;
- struct vpu_run *run = (struct vpu_run *)data;
+ struct mtk_vpu *vpu = priv;
+ const struct vpu_run *run = data;
vpu->run.signaled = run->signaled;
strscpy(vpu->run.fw_ver, run->fw_ver, sizeof(vpu->run.fw_ver));
@@ -700,19 +710,21 @@ static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
static void vpu_ipi_handler(struct mtk_vpu *vpu)
{
- struct share_obj *rcv_obj = vpu->recv_buf;
+ struct share_obj __iomem *rcv_obj = vpu->recv_buf;
struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc;
-
- if (rcv_obj->id < IPI_MAX && ipi_desc[rcv_obj->id].handler) {
- ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf,
- rcv_obj->len,
- ipi_desc[rcv_obj->id].priv);
- if (rcv_obj->id > IPI_VPU_INIT) {
- vpu->ipi_id_ack[rcv_obj->id] = true;
+ unsigned char data[SHARE_BUF_SIZE];
+ s32 id = readl(&rcv_obj->id);
+
+ memcpy_fromio(data, rcv_obj->share_buf, sizeof(data));
+ if (id < IPI_MAX && ipi_desc[id].handler) {
+ ipi_desc[id].handler(data, readl(&rcv_obj->len),
+ ipi_desc[id].priv);
+ if (id > IPI_VPU_INIT) {
+ vpu->ipi_id_ack[id] = true;
wake_up(&vpu->ack_wq);
}
} else {
- dev_err(vpu->dev, "No such ipi id = %d\n", rcv_obj->id);
+ dev_err(vpu->dev, "No such ipi id = %d\n", id);
}
}
@@ -722,11 +734,10 @@ static int vpu_ipi_init(struct mtk_vpu *vpu)
vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
/* shared buffer initialization */
- vpu->recv_buf = (__force struct share_obj *)(vpu->reg.tcm +
- VPU_DTCM_OFFSET);
+ vpu->recv_buf = vpu->reg.tcm + VPU_DTCM_OFFSET;
vpu->send_buf = vpu->recv_buf + 1;
- memset(vpu->recv_buf, 0, sizeof(struct share_obj));
- memset(vpu->send_buf, 0, sizeof(struct share_obj));
+ memset_io(vpu->recv_buf, 0, sizeof(struct share_obj));
+ memset_io(vpu->send_buf, 0, sizeof(struct share_obj));
return 0;
}
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h
index d4453b4bcee9..ee7c552ce928 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.h
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h
@@ -15,7 +15,7 @@
* VPU interfaces with other blocks by share memory and interrupt.
**/
-typedef void (*ipi_handler_t) (void *data,
+typedef void (*ipi_handler_t) (const void *data,
unsigned int len,
void *priv);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 27779b75df54..df78df59da45 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -866,7 +866,7 @@ static int emmaprp_probe(struct platform_device *pdev)
goto rel_vdev;
}
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n");
goto rel_m2m;
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 513b99bf963b..21193f0b7f61 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1500,7 +1500,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
/* Register the Video device with V4L2
*/
vfd = vout->vfd;
- if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
+ if (video_register_device(vfd, VFL_TYPE_VIDEO, -1) < 0) {
dev_err(&pdev->dev,
": Could not register Video for Linux device\n");
vfd->minor = -1;
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 471ae7cdb813..0fbb2aa6dd2c 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1312,6 +1312,10 @@ static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable)
{
struct isp_device *isp = to_isp_device(ccdc);
+ /* Avoid restarting the CCDC when streaming is stopping. */
+ if (enable && ccdc->stopping & CCDC_STOP_REQUEST)
+ return;
+
isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR,
ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index ee183c35ff3b..6f769c527fae 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1311,7 +1311,7 @@ static int isp_video_open(struct file *file)
goto done;
}
- ret = v4l2_pipeline_pm_use(&video->video.entity, 1);
+ ret = v4l2_pipeline_pm_get(&video->video.entity);
if (ret < 0) {
omap3isp_put(video->isp);
goto done;
@@ -1363,7 +1363,7 @@ static int isp_video_release(struct file *file)
vb2_queue_release(&handle->queue);
mutex_unlock(&video->queue_lock);
- v4l2_pipeline_pm_use(&video->video.entity, 0);
+ v4l2_pipeline_pm_put(&video->video.entity);
/* Release the file handle. */
v4l2_fh_del(vfh);
@@ -1453,7 +1453,7 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
video->video.fops = &isp_video_fops;
snprintf(video->video.name, sizeof(video->video.name),
"OMAP3 ISP %s %s", name, direction);
- video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.vfl_type = VFL_TYPE_VIDEO;
video->video.release = video_device_release_empty;
video->video.ioctl_ops = &isp_video_ioctl_ops;
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1484,7 +1484,7 @@ int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
video->video.v4l2_dev = vdev;
- ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1);
if (ret < 0)
dev_err(video->isp->dev,
"%s: could not register video device (%d)\n",
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 43ae645d866b..70c85a2a10f5 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2191,7 +2191,7 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
if (err)
goto out_sensor_poweroff;
- err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&pcdev->vdev, VFL_TYPE_VIDEO, -1);
if (err) {
v4l2_err(v4l2_dev, "register video device failed: %d\n", err);
pcdev->sensor = NULL;
@@ -2440,23 +2440,23 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->base = base;
/* request dma */
- pcdev->dma_chans[0] = dma_request_slave_channel(&pdev->dev, "CI_Y");
- if (!pcdev->dma_chans[0]) {
+ pcdev->dma_chans[0] = dma_request_chan(&pdev->dev, "CI_Y");
+ if (IS_ERR(pcdev->dma_chans[0])) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
- return -ENODEV;
+ return PTR_ERR(pcdev->dma_chans[0]);
}
- pcdev->dma_chans[1] = dma_request_slave_channel(&pdev->dev, "CI_U");
- if (!pcdev->dma_chans[1]) {
- dev_err(&pdev->dev, "Can't request DMA for Y\n");
- err = -ENODEV;
+ pcdev->dma_chans[1] = dma_request_chan(&pdev->dev, "CI_U");
+ if (IS_ERR(pcdev->dma_chans[1])) {
+ dev_err(&pdev->dev, "Can't request DMA for U\n");
+ err = PTR_ERR(pcdev->dma_chans[1]);
goto exit_free_dma_y;
}
- pcdev->dma_chans[2] = dma_request_slave_channel(&pdev->dev, "CI_V");
- if (!pcdev->dma_chans[2]) {
+ pcdev->dma_chans[2] = dma_request_chan(&pdev->dev, "CI_V");
+ if (IS_ERR(pcdev->dma_chans[2])) {
dev_err(&pdev->dev, "Can't request DMA for V\n");
- err = -ENODEV;
+ err = PTR_ERR(pcdev->dma_chans[2]);
goto exit_free_dma_u;
}
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 1d50dfbbb762..cdbd6dba1122 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -745,7 +745,7 @@ static int video_open(struct file *file)
file->private_data = vfh;
- ret = v4l2_pipeline_pm_use(&vdev->entity, 1);
+ ret = v4l2_pipeline_pm_get(&vdev->entity);
if (ret < 0) {
dev_err(video->camss->dev, "Failed to power up pipeline: %d\n",
ret);
@@ -771,7 +771,7 @@ static int video_release(struct file *file)
vb2_fop_release(file);
- v4l2_pipeline_pm_use(&vdev->entity, 0);
+ v4l2_pipeline_pm_put(&vdev->entity);
file->private_data = NULL;
@@ -921,7 +921,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
vdev->lock = &video->lock;
strscpy(vdev->name, name, sizeof(vdev->name));
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(v4l2_dev->dev, "Failed to register video device: %d\n",
ret);
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
index b44b11b03e12..64af0bc1edae 100644
--- a/drivers/media/platform/qcom/venus/Makefile
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -3,7 +3,7 @@
venus-core-objs += core.o helpers.o firmware.o \
hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \
- hfi_parser.o
+ hfi_parser.o pm_helpers.o
venus-dec-objs += vdec.o vdec_ctrls.o
venus-enc-objs += venc.o venc_ctrls.o
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 07312a2fab24..194b10b98767 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -3,7 +3,6 @@
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
-#include <linux/clk.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/ioctl.h>
@@ -19,9 +18,8 @@
#include <media/v4l2-ioctl.h>
#include "core.h"
-#include "vdec.h"
-#include "venc.h"
#include "firmware.h"
+#include "pm_helpers.h"
static void venus_event_notify(struct venus_core *core, u32 event)
{
@@ -100,50 +98,6 @@ static void venus_sys_error_handler(struct work_struct *work)
mutex_unlock(&core->lock);
}
-static int venus_clks_get(struct venus_core *core)
-{
- const struct venus_resources *res = core->res;
- struct device *dev = core->dev;
- unsigned int i;
-
- for (i = 0; i < res->clks_num; i++) {
- core->clks[i] = devm_clk_get(dev, res->clks[i]);
- if (IS_ERR(core->clks[i]))
- return PTR_ERR(core->clks[i]);
- }
-
- return 0;
-}
-
-static int venus_clks_enable(struct venus_core *core)
-{
- const struct venus_resources *res = core->res;
- unsigned int i;
- int ret;
-
- for (i = 0; i < res->clks_num; i++) {
- ret = clk_prepare_enable(core->clks[i]);
- if (ret)
- goto err;
- }
-
- return 0;
-err:
- while (i--)
- clk_disable_unprepare(core->clks[i]);
-
- return ret;
-}
-
-static void venus_clks_disable(struct venus_core *core)
-{
- const struct venus_resources *res = core->res;
- unsigned int i = res->clks_num;
-
- while (i--)
- clk_disable_unprepare(core->clks[i]);
-}
-
static u32 to_v4l2_codec_type(u32 codec)
{
switch (codec) {
@@ -256,9 +210,15 @@ static int venus_probe(struct platform_device *pdev)
if (!core->res)
return -ENODEV;
- ret = venus_clks_get(core);
- if (ret)
- return ret;
+ core->pm_ops = venus_pm_get(core->res->hfi_version);
+ if (!core->pm_ops)
+ return -ENODEV;
+
+ if (core->pm_ops->core_get) {
+ ret = core->pm_ops->core_get(dev);
+ if (ret)
+ return ret;
+ }
ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
if (ret)
@@ -350,6 +310,7 @@ err_runtime_disable:
static int venus_remove(struct platform_device *pdev)
{
struct venus_core *core = platform_get_drvdata(pdev);
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
struct device *dev = core->dev;
int ret;
@@ -368,6 +329,9 @@ static int venus_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ if (pm_ops->core_put)
+ pm_ops->core_put(dev);
+
icc_put(core->video_path);
icc_put(core->cpucfg_path);
@@ -379,11 +343,15 @@ static int venus_remove(struct platform_device *pdev)
static __maybe_unused int venus_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret;
ret = hfi_core_suspend(core);
+ if (ret)
+ return ret;
- venus_clks_disable(core);
+ if (pm_ops->core_power)
+ ret = pm_ops->core_power(dev, POWER_OFF);
return ret;
}
@@ -391,21 +359,16 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
static __maybe_unused int venus_runtime_resume(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret;
- ret = venus_clks_enable(core);
- if (ret)
- return ret;
-
- ret = hfi_core_resume(core, false);
- if (ret)
- goto err_clks_disable;
-
- return 0;
+ if (pm_ops->core_power) {
+ ret = pm_ops->core_power(dev, POWER_ON);
+ if (ret)
+ return ret;
+ }
-err_clks_disable:
- venus_clks_disable(core);
- return ret;
+ return hfi_core_resume(core, false);
}
static const struct dev_pm_ops venus_pm_ops = {
@@ -463,6 +426,9 @@ static const struct venus_resources msm8996_res = {
.reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset),
.clks = {"core", "iface", "bus", "mbus" },
.clks_num = 4,
+ .vcodec0_clks = { "core" },
+ .vcodec1_clks = { "core" },
+ .vcodec_clks_num = 1,
.max_load = 2563200,
.hfi_version = HFI_VERSION_3XX,
.vmem_id = VIDC_RESOURCE_NONE,
@@ -517,6 +483,35 @@ static const struct venus_resources sdm845_res = {
.codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
+ .vcodec0_clks = { "core", "bus" },
+ .vcodec1_clks = { "core", "bus" },
+ .vcodec_clks_num = 2,
+ .max_load = 3110400, /* 4096x2160@90 */
+ .hfi_version = HFI_VERSION_4XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/venus-5.2/venus.mdt",
+};
+
+static const struct venus_resources sdm845_res_v2 = {
+ .freq_tbl = sdm845_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .bw_tbl_enc = sdm845_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
+ .bw_tbl_dec = sdm845_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
+ .codec_freq_data = sdm845_codec_freq_data,
+ .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
+ .clks = {"core", "iface", "bus" },
+ .clks_num = 3,
+ .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
+ .vcodec1_clks = { "vcodec1_core", "vcodec1_bus" },
+ .vcodec_clks_num = 2,
+ .vcodec_pmdomains = { "venus", "vcodec0", "vcodec1" },
+ .vcodec_pmdomains_num = 3,
+ .vcodec_num = 2,
.max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_4XX,
.vmem_id = VIDC_RESOURCE_NONE,
@@ -526,10 +521,56 @@ static const struct venus_resources sdm845_res = {
.fwname = "qcom/venus-5.2/venus.mdt",
};
+static const struct freq_tbl sc7180_freq_table[] = {
+ { 0, 500000000 },
+ { 0, 434000000 },
+ { 0, 340000000 },
+ { 0, 270000000 },
+ { 0, 150000000 },
+};
+
+static const struct bw_tbl sc7180_bw_table_enc[] = {
+ { 972000, 750000, 0, 0, 0 }, /* 3840x2160@30 */
+ { 489600, 451000, 0, 0, 0 }, /* 1920x1080@60 */
+ { 244800, 234000, 0, 0, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sc7180_bw_table_dec[] = {
+ { 1036800, 1386000, 0, 1875000, 0 }, /* 4096x2160@30 */
+ { 489600, 865000, 0, 1146000, 0 }, /* 1920x1080@60 */
+ { 244800, 530000, 0, 583000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct venus_resources sc7180_res = {
+ .freq_tbl = sc7180_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sc7180_freq_table),
+ .bw_tbl_enc = sc7180_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sc7180_bw_table_enc),
+ .bw_tbl_dec = sc7180_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sc7180_bw_table_dec),
+ .codec_freq_data = sdm845_codec_freq_data,
+ .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
+ .clks = {"core", "iface", "bus" },
+ .clks_num = 3,
+ .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
+ .vcodec_clks_num = 2,
+ .vcodec_pmdomains = { "venus", "vcodec0" },
+ .vcodec_pmdomains_num = 2,
+ .vcodec_num = 1,
+ .hfi_version = HFI_VERSION_4XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/venus-5.4/venus.mdt",
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
{ .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
+ { .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, },
+ { .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
{ }
};
MODULE_DEVICE_TABLE(of, venus_dt_match);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 11585fb3cae3..bd3ac6a4501f 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -14,7 +14,9 @@
#include "hfi.h"
-#define VIDC_CLKS_NUM_MAX 4
+#define VIDC_CLKS_NUM_MAX 4
+#define VIDC_VCODEC_CLKS_NUM_MAX 2
+#define VIDC_PMDOMAINS_NUM_MAX 3
struct freq_tbl {
unsigned int load;
@@ -55,6 +57,12 @@ struct venus_resources {
unsigned int codec_freq_data_size;
const char * const clks[VIDC_CLKS_NUM_MAX];
unsigned int clks_num;
+ const char * const vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX];
+ const char * const vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX];
+ unsigned int vcodec_clks_num;
+ const char * const vcodec_pmdomains[VIDC_PMDOMAINS_NUM_MAX];
+ unsigned int vcodec_pmdomains_num;
+ unsigned int vcodec_num;
enum hfi_version hfi_version;
u32 max_load;
unsigned int vmem_id;
@@ -100,10 +108,10 @@ struct venus_caps {
* @base: IO memory base address
* @irq: Venus irq
* @clks: an array of struct clk pointers
- * @core0_clk: a struct clk pointer for core0
- * @core1_clk: a struct clk pointer for core1
- * @core0_bus_clk: a struct clk pointer for core0 bus clock
- * @core1_bus_clk: a struct clk pointer for core1 bus clock
+ * @vcodec0_clks: an array of vcodec0 struct clk pointers
+ * @vcodec1_clks: an array of vcodec1 struct clk pointers
+ * @pd_dl_venus: pmdomain device-link for venus domain
+ * @pmdomains: an array of pmdomains struct device pointers
* @vdev_dec: a reference to video device structure for decoder instances
* @vdev_enc: a reference to video device structure for encoder instances
* @v4l2_dev: a holder for v4l2 device structure
@@ -132,12 +140,12 @@ struct venus_core {
void __iomem *base;
int irq;
struct clk *clks[VIDC_CLKS_NUM_MAX];
- struct clk *core0_clk;
- struct clk *core1_clk;
- struct clk *core0_bus_clk;
- struct clk *core1_bus_clk;
+ struct clk *vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX];
+ struct clk *vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX];
struct icc_path *video_path;
struct icc_path *cpucfg_path;
+ struct device_link *pd_dl_venus;
+ struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX];
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -159,6 +167,7 @@ struct venus_core {
unsigned int error;
bool sys_error;
const struct hfi_core_ops *core_ops;
+ const struct venus_pm_ops *pm_ops;
unsigned long enc_codecs;
unsigned long dec_codecs;
unsigned int max_sessions_supported;
@@ -172,6 +181,8 @@ struct venus_core {
struct delayed_work work;
struct venus_caps caps[MAX_CODEC_NUM];
unsigned int codecs_count;
+ unsigned int core0_usage_count;
+ unsigned int core1_usage_count;
};
struct vdec_controls {
@@ -187,6 +198,7 @@ struct venc_controls {
u32 bitrate_mode;
u32 bitrate;
u32 bitrate_peak;
+ u32 rc_enable;
u32 h264_i_period;
u32 h264_entropy_mode;
@@ -344,6 +356,7 @@ struct venus_inst {
unsigned int subscriptions;
int buf_count;
struct venus_ts_metadata tss[VIDEO_MAX_FRAME];
+ unsigned long payloads[VIDEO_MAX_FRAME];
u64 fps;
struct v4l2_fract timeperframe;
const struct venus_format *fmt_out;
@@ -370,6 +383,8 @@ struct venus_inst {
const struct hfi_inst_ops *ops;
u32 session_type;
union hfi_get_property hprop;
+ unsigned int core_acquired: 1;
+ unsigned int bit_depth;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index d3d1748a7ef6..8801a6a7543d 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -44,8 +44,14 @@ static void venus_reset_cpu(struct venus_core *core)
int venus_set_hw_state(struct venus_core *core, bool resume)
{
- if (core->use_tz)
- return qcom_scm_set_remote_state(resume, 0);
+ int ret;
+
+ if (core->use_tz) {
+ ret = qcom_scm_set_remote_state(resume, 0);
+ if (resume && ret == -EINVAL)
+ ret = 0;
+ return ret;
+ }
if (resume)
venus_reset_cpu(core);
@@ -100,8 +106,7 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
mem_va = memremap(r.start, *mem_size, MEMREMAP_WC);
if (!mem_va) {
- dev_err(dev, "unable to map memory region: %pa+%zx\n",
- &r.start, *mem_size);
+ dev_err(dev, "unable to map memory region: %pR\n", &r);
ret = -ENOMEM;
goto err_release_fw;
}
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index a172f1ac0b35..bcc603804041 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -3,12 +3,8 @@
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/interconnect.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/videobuf2-dma-sg.h>
#include <media/v4l2-mem2mem.h>
@@ -17,7 +13,7 @@
#include "core.h"
#include "helpers.h"
#include "hfi_helper.h"
-#include "hfi_venus_io.h"
+#include "pm_helpers.h"
struct intbuf {
struct list_head list;
@@ -360,266 +356,6 @@ err:
}
EXPORT_SYMBOL_GPL(venus_helper_intbufs_realloc);
-static u32 load_per_instance(struct venus_inst *inst)
-{
- u32 mbs;
-
- if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
- return 0;
-
- mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
-
- return mbs * inst->fps;
-}
-
-static u32 load_per_type(struct venus_core *core, u32 session_type)
-{
- struct venus_inst *inst = NULL;
- u32 mbs_per_sec = 0;
-
- mutex_lock(&core->lock);
- list_for_each_entry(inst, &core->instances, list) {
- if (inst->session_type != session_type)
- continue;
-
- mbs_per_sec += load_per_instance(inst);
- }
- mutex_unlock(&core->lock);
-
- return mbs_per_sec;
-}
-
-static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
-{
- const struct venus_resources *res = inst->core->res;
- const struct bw_tbl *bw_tbl;
- unsigned int num_rows, i;
-
- *avg = 0;
- *peak = 0;
-
- if (mbs == 0)
- return;
-
- if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
- num_rows = res->bw_tbl_enc_size;
- bw_tbl = res->bw_tbl_enc;
- } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
- num_rows = res->bw_tbl_dec_size;
- bw_tbl = res->bw_tbl_dec;
- } else {
- return;
- }
-
- if (!bw_tbl || num_rows == 0)
- return;
-
- for (i = 0; i < num_rows; i++) {
- if (mbs > bw_tbl[i].mbs_per_sec)
- break;
-
- if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
- *avg = bw_tbl[i].avg_10bit;
- *peak = bw_tbl[i].peak_10bit;
- } else {
- *avg = bw_tbl[i].avg;
- *peak = bw_tbl[i].peak;
- }
- }
-}
-
-static int load_scale_bw(struct venus_core *core)
-{
- struct venus_inst *inst = NULL;
- u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
-
- mutex_lock(&core->lock);
- list_for_each_entry(inst, &core->instances, list) {
- mbs_per_sec = load_per_instance(inst);
- mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
- total_avg += avg;
- total_peak += peak;
- }
- mutex_unlock(&core->lock);
-
- dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
- total_avg, total_peak);
-
- return icc_set_bw(core->video_path, total_avg, total_peak);
-}
-
-static int set_clk_freq(struct venus_core *core, unsigned long freq)
-{
- struct clk *clk = core->clks[0];
- int ret;
-
- ret = clk_set_rate(clk, freq);
- if (ret)
- return ret;
-
- ret = clk_set_rate(core->core0_clk, freq);
- if (ret)
- return ret;
-
- ret = clk_set_rate(core->core1_clk, freq);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int scale_clocks(struct venus_inst *inst)
-{
- struct venus_core *core = inst->core;
- const struct freq_tbl *table = core->res->freq_tbl;
- unsigned int num_rows = core->res->freq_tbl_size;
- unsigned long freq = table[0].freq;
- struct device *dev = core->dev;
- u32 mbs_per_sec;
- unsigned int i;
- int ret;
-
- mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
- load_per_type(core, VIDC_SESSION_TYPE_DEC);
-
- if (mbs_per_sec > core->res->max_load)
- dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
- mbs_per_sec, core->res->max_load);
-
- if (!mbs_per_sec && num_rows > 1) {
- freq = table[num_rows - 1].freq;
- goto set_freq;
- }
-
- for (i = 0; i < num_rows; i++) {
- if (mbs_per_sec > table[i].load)
- break;
- freq = table[i].freq;
- }
-
-set_freq:
-
- ret = set_clk_freq(core, freq);
- if (ret) {
- dev_err(dev, "failed to set clock rate %lu (%d)\n",
- freq, ret);
- return ret;
- }
-
- ret = load_scale_bw(core);
- if (ret) {
- dev_err(dev, "failed to set bandwidth (%d)\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static unsigned long calculate_inst_freq(struct venus_inst *inst,
- unsigned long filled_len)
-{
- unsigned long vpp_freq = 0, vsp_freq = 0;
- u32 fps = (u32)inst->fps;
- u32 mbs_per_sec;
-
- mbs_per_sec = load_per_instance(inst) / fps;
-
- vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
- /* 21 / 20 is overhead factor */
- vpp_freq += vpp_freq / 20;
- vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
-
- /* 10 / 7 is overhead factor */
- if (inst->session_type == VIDC_SESSION_TYPE_ENC)
- vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
- else
- vsp_freq += ((fps * filled_len * 8) * 10) / 7;
-
- return max(vpp_freq, vsp_freq);
-}
-
-static int scale_clocks_v4(struct venus_inst *inst)
-{
- struct venus_core *core = inst->core;
- const struct freq_tbl *table = core->res->freq_tbl;
- unsigned int num_rows = core->res->freq_tbl_size;
- struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
- struct device *dev = core->dev;
- unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
- unsigned long filled_len = 0;
- struct venus_buffer *buf, *n;
- struct vb2_buffer *vb;
- int i, ret;
-
- v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
- vb = &buf->vb.vb2_buf;
- filled_len = max(filled_len, vb2_get_plane_payload(vb, 0));
- }
-
- if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
- return 0;
-
- freq = calculate_inst_freq(inst, filled_len);
- inst->clk_data.freq = freq;
-
- mutex_lock(&core->lock);
- list_for_each_entry(inst, &core->instances, list) {
- if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
- freq_core1 += inst->clk_data.freq;
- } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
- freq_core2 += inst->clk_data.freq;
- } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
- freq_core1 += inst->clk_data.freq;
- freq_core2 += inst->clk_data.freq;
- }
- }
- mutex_unlock(&core->lock);
-
- freq = max(freq_core1, freq_core2);
-
- if (freq >= table[0].freq) {
- freq = table[0].freq;
- dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
- freq, table[0].freq);
- goto set_freq;
- }
-
- for (i = num_rows - 1 ; i >= 0; i--) {
- if (freq <= table[i].freq) {
- freq = table[i].freq;
- break;
- }
- }
-
-set_freq:
-
- ret = set_clk_freq(core, freq);
- if (ret) {
- dev_err(dev, "failed to set clock rate %lu (%d)\n",
- freq, ret);
- return ret;
- }
-
- ret = load_scale_bw(core);
- if (ret) {
- dev_err(dev, "failed to set bandwidth (%d)\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-int venus_helper_load_scale_clocks(struct venus_inst *inst)
-{
- if (IS_V4(inst->core))
- return scale_clocks_v4(inst);
-
- return scale_clocks(inst);
-}
-EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
-
static void fill_buffer_desc(const struct venus_buffer *buf,
struct hfi_buffer_desc *bd, bool response)
{
@@ -723,7 +459,7 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
put_ts_metadata(inst, vbuf);
- venus_helper_load_scale_clocks(inst);
+ venus_pm_load_scale(inst);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
fdata.buffer_type = HFI_BUFFER_OUTPUT;
@@ -890,6 +626,78 @@ static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
max(extradata, y_stride * 48), SZ_4K);
}
+static u32 get_framesize_raw_p010(u32 width, u32 height)
+{
+ u32 y_plane, uv_plane, y_stride, uv_stride, y_sclines, uv_sclines;
+
+ y_stride = ALIGN(width * 2, 256);
+ uv_stride = ALIGN(width * 2, 256);
+ y_sclines = ALIGN(height, 32);
+ uv_sclines = ALIGN((height + 1) >> 1, 16);
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines;
+
+ return ALIGN((y_plane + uv_plane), SZ_4K);
+}
+
+static u32 get_framesize_raw_p010_ubwc(u32 width, u32 height)
+{
+ u32 y_stride, uv_stride, y_sclines, uv_sclines;
+ u32 y_ubwc_plane, uv_ubwc_plane;
+ u32 y_meta_stride, y_meta_scanlines;
+ u32 uv_meta_stride, uv_meta_scanlines;
+ u32 y_meta_plane, uv_meta_plane;
+ u32 size;
+
+ y_stride = ALIGN(width * 2, 256);
+ uv_stride = ALIGN(width * 2, 256);
+ y_sclines = ALIGN(height, 16);
+ uv_sclines = ALIGN((height + 1) >> 1, 16);
+
+ y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K);
+ uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K);
+ y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
+ y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16);
+ y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K);
+ uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 16), 64);
+ uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16);
+ uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane;
+
+ return ALIGN(size, SZ_4K);
+}
+
+static u32 get_framesize_raw_yuv420_tp10_ubwc(u32 width, u32 height)
+{
+ u32 y_stride, uv_stride, y_sclines, uv_sclines;
+ u32 y_ubwc_plane, uv_ubwc_plane;
+ u32 y_meta_stride, y_meta_scanlines;
+ u32 uv_meta_stride, uv_meta_scanlines;
+ u32 y_meta_plane, uv_meta_plane;
+ u32 extradata = SZ_16K;
+ u32 size;
+
+ y_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256);
+ uv_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256);
+ y_sclines = ALIGN(height, 16);
+ uv_sclines = ALIGN((height + 1) >> 1, 16);
+
+ y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K);
+ uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K);
+ y_meta_stride = ALIGN(DIV_ROUND_UP(width, 48), 64);
+ y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16);
+ y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K);
+ uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 24), 64);
+ uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16);
+ uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane;
+ size += max(extradata + SZ_8K, y_stride * 48);
+
+ return ALIGN(size, SZ_4K);
+}
+
u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
{
switch (hfi_fmt) {
@@ -898,6 +706,12 @@ u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
return get_framesize_raw_nv12(width, height);
case HFI_COLOR_FORMAT_NV12_UBWC:
return get_framesize_raw_nv12_ubwc(width, height);
+ case HFI_COLOR_FORMAT_P010:
+ return get_framesize_raw_p010(width, height);
+ case HFI_COLOR_FORMAT_P010_UBWC:
+ return get_framesize_raw_p010_ubwc(width, height);
+ case HFI_COLOR_FORMAT_YUV420_TP10_UBWC:
+ return get_framesize_raw_yuv420_tp10_ubwc(width, height);
default:
return 0;
}
@@ -987,21 +801,6 @@ int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
}
EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
-int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
-{
- const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
- struct hfi_videocores_usage_type cu;
-
- inst->clk_data.core_id = usage;
- if (!IS_V4(inst->core))
- return 0;
-
- cu.video_core_enable_mask = usage;
-
- return hfi_session_set_property(inst, ptype, &cu);
-}
-EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
-
int venus_helper_init_codec_freq_data(struct venus_inst *inst)
{
const struct codec_freq_data *data;
@@ -1289,6 +1088,15 @@ int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
+static void cache_payload(struct venus_inst *inst, struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ unsigned int idx = vbuf->vb2_buf.index;
+
+ if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->payloads[idx] = vb2_get_plane_payload(vb, 0);
+}
+
void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -1300,6 +1108,8 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+ cache_payload(inst, vb);
+
if (inst->session_type == VIDC_SESSION_TYPE_ENC &&
!(inst->streamon_out && inst->streamon_cap))
goto unlock;
@@ -1354,7 +1164,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(inst);
+ venus_pm_load_scale(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -1365,6 +1175,8 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
else
inst->streamon_cap = 0;
+ venus_pm_release_core(inst);
+
mutex_unlock(&inst->lock);
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
@@ -1417,7 +1229,7 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_bufs_free;
- venus_helper_load_scale_clocks(inst);
+ venus_pm_load_scale(inst);
ret = hfi_session_load_res(inst);
if (ret)
@@ -1512,6 +1324,27 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
if (!caps)
return -EINVAL;
+ if (inst->bit_depth == VIDC_BITDEPTH_10 &&
+ inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ found_ubwc =
+ find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
+ HFI_COLOR_FORMAT_YUV420_TP10_UBWC);
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2,
+ HFI_COLOR_FORMAT_NV12);
+ if (found_ubwc && found) {
+ /*
+ * Hard-code DPB buffers to be 10bit UBWC and decoder
+ * output buffers in 8bit NV12 until V4L2 is able to
+ * expose compressed/tiled formats to applications.
+ */
+ *out_fmt = HFI_COLOR_FORMAT_YUV420_TP10_UBWC;
+ *out2_fmt = HFI_COLOR_FORMAT_NV12;
+ return 0;
+ }
+
+ return -EINVAL;
+ }
+
if (ubwc) {
ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
@@ -1542,52 +1375,3 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
return -EINVAL;
}
EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
-
-int venus_helper_power_enable(struct venus_core *core, u32 session_type,
- bool enable)
-{
- void __iomem *ctrl, *stat;
- u32 val;
- int ret;
-
- if (!IS_V3(core) && !IS_V4(core))
- return 0;
-
- if (IS_V3(core)) {
- if (session_type == VIDC_SESSION_TYPE_DEC)
- ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
- else
- ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
- if (enable)
- writel(0, ctrl);
- else
- writel(1, ctrl);
-
- return 0;
- }
-
- if (session_type == VIDC_SESSION_TYPE_DEC) {
- ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
- stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
- } else {
- ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
- stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
- }
-
- if (enable) {
- writel(0, ctrl);
-
- ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
- if (ret)
- return ret;
- } else {
- writel(1, ctrl);
-
- ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(venus_helper_power_enable);
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 34dcd0c13f06..b64875564064 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -34,7 +34,6 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
u32 buftype);
int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
int venus_helper_init_codec_freq_data(struct venus_inst *inst);
-int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs);
@@ -53,14 +52,11 @@ int venus_helper_get_out_fmts(struct venus_inst *inst, u32 fmt, u32 *out_fmt,
u32 *out2_fmt, bool ubwc);
int venus_helper_alloc_dpb_bufs(struct venus_inst *inst);
int venus_helper_free_dpb_bufs(struct venus_inst *inst);
-int venus_helper_power_enable(struct venus_core *core, u32 session_type,
- bool enable);
int venus_helper_intbufs_alloc(struct venus_inst *inst);
int venus_helper_intbufs_free(struct venus_inst *inst);
int venus_helper_intbufs_realloc(struct venus_inst *inst);
int venus_helper_queue_dpb_bufs(struct venus_inst *inst);
int venus_helper_unregister_bufs(struct venus_inst *inst);
-int venus_helper_load_scale_clocks(struct venus_inst *inst);
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst);
int venus_helper_process_initial_out_bufs(struct venus_inst *inst);
void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 4f645076abfb..c67e412f8201 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1207,6 +1207,8 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP:
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE:
/* not implemented on Venus 4xx */
return -ENOTSUPP;
default:
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index b70551e296b7..f6613df1d16b 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -550,6 +550,7 @@ struct hfi_bitrate {
#define HFI_CAPABILITY_LCU_SIZE 0x14
#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS 0x15
#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE 0x16
+#define HFI_CAPABILITY_MAX_VIDEOCORES 0x2b
struct hfi_capability {
u32 capability_type;
@@ -792,6 +793,9 @@ struct hfi_h264_vui_timing_info {
u32 time_scale;
};
+#define VIDC_BITDEPTH_8 0x00000
+#define VIDC_BITDEPTH_10 0x20002
+
struct hfi_bit_depth {
u32 buffer_type;
u32 bit_depth;
@@ -840,8 +844,10 @@ struct hfi_extradata_input_crop {
#define HFI_COLOR_FORMAT_10_BIT_BASE 0x4000
#define HFI_COLOR_FORMAT_YUV420_TP10 0x4002
+#define HFI_COLOR_FORMAT_P010 0x4003
#define HFI_COLOR_FORMAT_NV12_UBWC 0x8002
#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC 0xc002
+#define HFI_COLOR_FORMAT_P010_UBWC 0xc003
#define HFI_COLOR_FORMAT_RGBA8888_UBWC 0x8010
struct hfi_uncompressed_format_select {
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 2293d936e49c..7f515a4b9bd1 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data)
if (IS_V1(core)) {
core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
+ core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
}
}
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.h b/drivers/media/platform/qcom/venus/hfi_parser.h
index 3e931c747e19..264e6dd2415f 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.h
+++ b/drivers/media/platform/qcom/venus/hfi_parser.h
@@ -107,4 +107,9 @@ static inline u32 frate_step(struct venus_inst *inst)
return cap_step(inst, HFI_CAPABILITY_FRAMERATE);
}
+static inline u32 core_num_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_MAX_VIDEOCORES);
+}
+
#endif
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
new file mode 100644
index 000000000000..abf93158857b
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd.
+ *
+ * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org>
+ */
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "core.h"
+#include "hfi_parser.h"
+#include "hfi_venus_io.h"
+#include "pm_helpers.h"
+
+static bool legacy_binding;
+
+static int core_clks_get(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ struct device *dev = core->dev;
+ unsigned int i;
+
+ for (i = 0; i < res->clks_num; i++) {
+ core->clks[i] = devm_clk_get(dev, res->clks[i]);
+ if (IS_ERR(core->clks[i]))
+ return PTR_ERR(core->clks[i]);
+ }
+
+ return 0;
+}
+
+static int core_clks_enable(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < res->clks_num; i++) {
+ ret = clk_prepare_enable(core->clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ while (i--)
+ clk_disable_unprepare(core->clks[i]);
+
+ return ret;
+}
+
+static void core_clks_disable(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i = res->clks_num;
+
+ while (i--)
+ clk_disable_unprepare(core->clks[i]);
+}
+
+static int core_clks_set_rate(struct venus_core *core, unsigned long freq)
+{
+ struct clk *clk = core->clks[0];
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->vcodec0_clks[0], freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->vcodec1_clks[0], freq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int vcodec_clks_get(struct venus_core *core, struct device *dev,
+ struct clk **clks, const char * const *id)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i;
+
+ for (i = 0; i < res->vcodec_clks_num; i++) {
+ if (!id[i])
+ continue;
+ clks[i] = devm_clk_get(dev, id[i]);
+ if (IS_ERR(clks[i]))
+ return PTR_ERR(clks[i]);
+ }
+
+ return 0;
+}
+
+static int vcodec_clks_enable(struct venus_core *core, struct clk **clks)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < res->vcodec_clks_num; i++) {
+ ret = clk_prepare_enable(clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ while (i--)
+ clk_disable_unprepare(clks[i]);
+
+ return ret;
+}
+
+static void vcodec_clks_disable(struct venus_core *core, struct clk **clks)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i = res->vcodec_clks_num;
+
+ while (i--)
+ clk_disable_unprepare(clks[i]);
+}
+
+static u32 load_per_instance(struct venus_inst *inst)
+{
+ u32 mbs;
+
+ if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
+ return 0;
+
+ mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
+
+ return mbs * inst->fps;
+}
+
+static u32 load_per_type(struct venus_core *core, u32 session_type)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->session_type != session_type)
+ continue;
+
+ mbs_per_sec += load_per_instance(inst);
+ }
+ mutex_unlock(&core->lock);
+
+ return mbs_per_sec;
+}
+
+static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
+{
+ const struct venus_resources *res = inst->core->res;
+ const struct bw_tbl *bw_tbl;
+ unsigned int num_rows, i;
+
+ *avg = 0;
+ *peak = 0;
+
+ if (mbs == 0)
+ return;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
+ num_rows = res->bw_tbl_enc_size;
+ bw_tbl = res->bw_tbl_enc;
+ } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ num_rows = res->bw_tbl_dec_size;
+ bw_tbl = res->bw_tbl_dec;
+ } else {
+ return;
+ }
+
+ if (!bw_tbl || num_rows == 0)
+ return;
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs > bw_tbl[i].mbs_per_sec)
+ break;
+
+ if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
+ *avg = bw_tbl[i].avg_10bit;
+ *peak = bw_tbl[i].peak_10bit;
+ } else {
+ *avg = bw_tbl[i].avg;
+ *peak = bw_tbl[i].peak;
+ }
+ }
+}
+
+static int load_scale_bw(struct venus_core *core)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ mbs_per_sec = load_per_instance(inst);
+ mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
+ total_avg += avg;
+ total_peak += peak;
+ }
+ mutex_unlock(&core->lock);
+
+ dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
+ total_avg, total_peak);
+
+ return icc_set_bw(core->video_path, total_avg, total_peak);
+}
+
+static int load_scale_v1(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ unsigned long freq = table[0].freq;
+ struct device *dev = core->dev;
+ u32 mbs_per_sec;
+ unsigned int i;
+ int ret;
+
+ mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
+ load_per_type(core, VIDC_SESSION_TYPE_DEC);
+
+ if (mbs_per_sec > core->res->max_load)
+ dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
+ mbs_per_sec, core->res->max_load);
+
+ if (!mbs_per_sec && num_rows > 1) {
+ freq = table[num_rows - 1].freq;
+ goto set_freq;
+ }
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs_per_sec > table[i].load)
+ break;
+ freq = table[i].freq;
+ }
+
+set_freq:
+
+ ret = core_clks_set_rate(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
+
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int core_get_v1(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+
+ return core_clks_get(core);
+}
+
+static int core_power_v1(struct device *dev, int on)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (on == POWER_ON)
+ ret = core_clks_enable(core);
+ else
+ core_clks_disable(core);
+
+ return ret;
+}
+
+static const struct venus_pm_ops pm_ops_v1 = {
+ .core_get = core_get_v1,
+ .core_power = core_power_v1,
+ .load_scale = load_scale_v1,
+};
+
+static void
+vcodec_control_v3(struct venus_core *core, u32 session_type, bool enable)
+{
+ void __iomem *ctrl;
+
+ if (session_type == VIDC_SESSION_TYPE_DEC)
+ ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
+ else
+ ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
+
+ if (enable)
+ writel(0, ctrl);
+ else
+ writel(1, ctrl);
+}
+
+static int vdec_get_v3(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+
+ return vcodec_clks_get(core, dev, core->vcodec0_clks,
+ core->res->vcodec0_clks);
+}
+
+static int vdec_power_v3(struct device *dev, int on)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret = 0;
+
+ vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, true);
+
+ if (on == POWER_ON)
+ ret = vcodec_clks_enable(core, core->vcodec0_clks);
+ else
+ vcodec_clks_disable(core, core->vcodec0_clks);
+
+ vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, false);
+
+ return ret;
+}
+
+static int venc_get_v3(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+
+ return vcodec_clks_get(core, dev, core->vcodec1_clks,
+ core->res->vcodec1_clks);
+}
+
+static int venc_power_v3(struct device *dev, int on)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret = 0;
+
+ vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, true);
+
+ if (on == POWER_ON)
+ ret = vcodec_clks_enable(core, core->vcodec1_clks);
+ else
+ vcodec_clks_disable(core, core->vcodec1_clks);
+
+ vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, false);
+
+ return ret;
+}
+
+static const struct venus_pm_ops pm_ops_v3 = {
+ .core_get = core_get_v1,
+ .core_power = core_power_v1,
+ .vdec_get = vdec_get_v3,
+ .vdec_power = vdec_power_v3,
+ .venc_get = venc_get_v3,
+ .venc_power = venc_power_v3,
+ .load_scale = load_scale_v1,
+};
+
+static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable)
+{
+ void __iomem *ctrl, *stat;
+ u32 val;
+ int ret;
+
+ if (coreid == VIDC_CORE_ID_1) {
+ ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
+ } else {
+ ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
+ }
+
+ if (enable) {
+ writel(0, ctrl);
+
+ ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
+ if (ret)
+ return ret;
+ } else {
+ writel(1, ctrl);
+
+ ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
+{
+ int ret;
+
+ if (coreid_mask & VIDC_CORE_ID_1) {
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
+ if (ret)
+ return ret;
+
+ vcodec_clks_disable(core, core->vcodec0_clks);
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_put_sync(core->pmdomains[1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (coreid_mask & VIDC_CORE_ID_2) {
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
+ if (ret)
+ return ret;
+
+ vcodec_clks_disable(core, core->vcodec1_clks);
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_put_sync(core->pmdomains[2]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
+{
+ int ret;
+
+ if (coreid_mask & VIDC_CORE_ID_1) {
+ ret = pm_runtime_get_sync(core->pmdomains[1]);
+ if (ret < 0)
+ return ret;
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
+ if (ret)
+ return ret;
+
+ ret = vcodec_clks_enable(core, core->vcodec0_clks);
+ if (ret)
+ return ret;
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (coreid_mask & VIDC_CORE_ID_2) {
+ ret = pm_runtime_get_sync(core->pmdomains[2]);
+ if (ret < 0)
+ return ret;
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
+ if (ret)
+ return ret;
+
+ ret = vcodec_clks_enable(core, core->vcodec1_clks);
+ if (ret)
+ return ret;
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load)
+{
+ u32 mbs_per_sec, load, core1_load = 0, core2_load = 0;
+ u32 cores_max = core_num_max(inst);
+ struct venus_core *core = inst->core;
+ struct venus_inst *inst_pos;
+ unsigned long vpp_freq;
+ u32 coreid;
+
+ mutex_lock(&core->lock);
+
+ list_for_each_entry(inst_pos, &core->instances, list) {
+ if (inst_pos == inst)
+ continue;
+ vpp_freq = inst_pos->clk_data.codec_freq_data->vpp_freq;
+ coreid = inst_pos->clk_data.core_id;
+
+ mbs_per_sec = load_per_instance(inst_pos);
+ load = mbs_per_sec * vpp_freq;
+
+ if ((coreid & VIDC_CORE_ID_3) == VIDC_CORE_ID_3) {
+ core1_load += load / 2;
+ core2_load += load / 2;
+ } else if (coreid & VIDC_CORE_ID_1) {
+ core1_load += load;
+ } else if (coreid & VIDC_CORE_ID_2) {
+ core2_load += load;
+ }
+ }
+
+ *min_coreid = core1_load <= core2_load ?
+ VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
+ *min_load = min(core1_load, core2_load);
+
+ if (cores_max < VIDC_CORE_ID_2 || core->res->vcodec_num < 2) {
+ *min_coreid = VIDC_CORE_ID_1;
+ *min_load = core1_load;
+ }
+
+ mutex_unlock(&core->lock);
+}
+
+static int decide_core(struct venus_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+ struct venus_core *core = inst->core;
+ u32 min_coreid, min_load, inst_load;
+ struct hfi_videocores_usage_type cu;
+ unsigned long max_freq;
+
+ if (legacy_binding) {
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC)
+ cu.video_core_enable_mask = VIDC_CORE_ID_1;
+ else
+ cu.video_core_enable_mask = VIDC_CORE_ID_2;
+
+ goto done;
+ }
+
+ if (inst->clk_data.core_id != VIDC_CORE_ID_DEFAULT)
+ return 0;
+
+ inst_load = load_per_instance(inst);
+ inst_load *= inst->clk_data.codec_freq_data->vpp_freq;
+ max_freq = core->res->freq_tbl[0].freq;
+
+ min_loaded_core(inst, &min_coreid, &min_load);
+
+ if ((inst_load + min_load) > max_freq) {
+ dev_warn(core->dev, "HW is overloaded, needed: %u max: %lu\n",
+ inst_load, max_freq);
+ return -EINVAL;
+ }
+
+ inst->clk_data.core_id = min_coreid;
+ cu.video_core_enable_mask = min_coreid;
+
+done:
+ return hfi_session_set_property(inst, ptype, &cu);
+}
+
+static int acquire_core(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ unsigned int coreid_mask = 0;
+
+ if (inst->core_acquired)
+ return 0;
+
+ inst->core_acquired = true;
+
+ if (inst->clk_data.core_id & VIDC_CORE_ID_1) {
+ if (core->core0_usage_count++)
+ return 0;
+
+ coreid_mask = VIDC_CORE_ID_1;
+ }
+
+ if (inst->clk_data.core_id & VIDC_CORE_ID_2) {
+ if (core->core1_usage_count++)
+ return 0;
+
+ coreid_mask |= VIDC_CORE_ID_2;
+ }
+
+ return poweron_coreid(core, coreid_mask);
+}
+
+static int release_core(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ unsigned int coreid_mask = 0;
+ int ret;
+
+ if (!inst->core_acquired)
+ return 0;
+
+ if (inst->clk_data.core_id & VIDC_CORE_ID_1) {
+ if (--core->core0_usage_count)
+ goto done;
+
+ coreid_mask = VIDC_CORE_ID_1;
+ }
+
+ if (inst->clk_data.core_id & VIDC_CORE_ID_2) {
+ if (--core->core1_usage_count)
+ goto done;
+
+ coreid_mask |= VIDC_CORE_ID_2;
+ }
+
+ ret = poweroff_coreid(core, coreid_mask);
+ if (ret)
+ return ret;
+
+done:
+ inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
+ inst->core_acquired = false;
+ return 0;
+}
+
+static int coreid_power_v4(struct venus_inst *inst, int on)
+{
+ struct venus_core *core = inst->core;
+ int ret;
+
+ if (legacy_binding)
+ return 0;
+
+ if (on == POWER_ON) {
+ ret = decide_core(inst);
+ if (ret)
+ return ret;
+
+ mutex_lock(&core->lock);
+ ret = acquire_core(inst);
+ mutex_unlock(&core->lock);
+ } else {
+ mutex_lock(&core->lock);
+ ret = release_core(inst);
+ mutex_unlock(&core->lock);
+ }
+
+ return ret;
+}
+
+static int vdec_get_v4(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+
+ if (!legacy_binding)
+ return 0;
+
+ return vcodec_clks_get(core, dev, core->vcodec0_clks,
+ core->res->vcodec0_clks);
+}
+
+static void vdec_put_v4(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ unsigned int i;
+
+ if (!legacy_binding)
+ return;
+
+ for (i = 0; i < core->res->vcodec_clks_num; i++)
+ core->vcodec0_clks[i] = NULL;
+}
+
+static int vdec_power_v4(struct device *dev, int on)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (!legacy_binding)
+ return 0;
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
+ if (ret)
+ return ret;
+
+ if (on == POWER_ON)
+ ret = vcodec_clks_enable(core, core->vcodec0_clks);
+ else
+ vcodec_clks_disable(core, core->vcodec0_clks);
+
+ vcodec_control_v4(core, VIDC_CORE_ID_1, false);
+
+ return ret;
+}
+
+static int venc_get_v4(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+
+ if (!legacy_binding)
+ return 0;
+
+ return vcodec_clks_get(core, dev, core->vcodec1_clks,
+ core->res->vcodec1_clks);
+}
+
+static void venc_put_v4(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ unsigned int i;
+
+ if (!legacy_binding)
+ return;
+
+ for (i = 0; i < core->res->vcodec_clks_num; i++)
+ core->vcodec1_clks[i] = NULL;
+}
+
+static int venc_power_v4(struct device *dev, int on)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (!legacy_binding)
+ return 0;
+
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
+ if (ret)
+ return ret;
+
+ if (on == POWER_ON)
+ ret = vcodec_clks_enable(core, core->vcodec1_clks);
+ else
+ vcodec_clks_disable(core, core->vcodec1_clks);
+
+ vcodec_control_v4(core, VIDC_CORE_ID_2, false);
+
+ return ret;
+}
+
+static int vcodec_domains_get(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ const struct venus_resources *res = core->res;
+ struct device *pd;
+ unsigned int i;
+
+ if (!res->vcodec_pmdomains_num)
+ return -ENODEV;
+
+ for (i = 0; i < res->vcodec_pmdomains_num; i++) {
+ pd = dev_pm_domain_attach_by_name(dev,
+ res->vcodec_pmdomains[i]);
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+ core->pmdomains[i] = pd;
+ }
+
+ core->pd_dl_venus = device_link_add(dev, core->pmdomains[0],
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS |
+ DL_FLAG_RPM_ACTIVE);
+ if (!core->pd_dl_venus)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void vcodec_domains_put(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ const struct venus_resources *res = core->res;
+ unsigned int i;
+
+ if (!res->vcodec_pmdomains_num)
+ return;
+
+ if (core->pd_dl_venus)
+ device_link_del(core->pd_dl_venus);
+
+ for (i = 0; i < res->vcodec_pmdomains_num; i++) {
+ if (IS_ERR_OR_NULL(core->pmdomains[i]))
+ continue;
+ dev_pm_domain_detach(core->pmdomains[i], true);
+ }
+}
+
+static int core_get_v4(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ const struct venus_resources *res = core->res;
+ int ret;
+
+ ret = core_clks_get(core);
+ if (ret)
+ return ret;
+
+ if (!res->vcodec_pmdomains_num)
+ legacy_binding = true;
+
+ dev_info(dev, "%s legacy binding\n", legacy_binding ? "" : "non");
+
+ ret = vcodec_clks_get(core, dev, core->vcodec0_clks, res->vcodec0_clks);
+ if (ret)
+ return ret;
+
+ ret = vcodec_clks_get(core, dev, core->vcodec1_clks, res->vcodec1_clks);
+ if (ret)
+ return ret;
+
+ if (legacy_binding)
+ return 0;
+
+ ret = vcodec_domains_get(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void core_put_v4(struct device *dev)
+{
+ if (legacy_binding)
+ return;
+
+ vcodec_domains_put(dev);
+}
+
+static int core_power_v4(struct device *dev, int on)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (on == POWER_ON)
+ ret = core_clks_enable(core);
+ else
+ core_clks_disable(core);
+
+ return ret;
+}
+
+static unsigned long calculate_inst_freq(struct venus_inst *inst,
+ unsigned long filled_len)
+{
+ unsigned long vpp_freq = 0, vsp_freq = 0;
+ u32 fps = (u32)inst->fps;
+ u32 mbs_per_sec;
+
+ mbs_per_sec = load_per_instance(inst) / fps;
+
+ vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
+ /* 21 / 20 is overhead factor */
+ vpp_freq += vpp_freq / 20;
+ vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
+
+ /* 10 / 7 is overhead factor */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
+ else
+ vsp_freq += ((fps * filled_len * 8) * 10) / 7;
+
+ return max(vpp_freq, vsp_freq);
+}
+
+static int load_scale_v4(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ struct device *dev = core->dev;
+ unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
+ unsigned long filled_len = 0;
+ int i, ret;
+
+ for (i = 0; i < inst->num_input_bufs; i++)
+ filled_len = max(filled_len, inst->payloads[i]);
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
+ return 0;
+
+ freq = calculate_inst_freq(inst, filled_len);
+ inst->clk_data.freq = freq;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
+ freq_core1 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
+ freq_core2 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ freq_core1 += inst->clk_data.freq;
+ freq_core2 += inst->clk_data.freq;
+ }
+ }
+ mutex_unlock(&core->lock);
+
+ freq = max(freq_core1, freq_core2);
+
+ if (freq >= table[0].freq) {
+ freq = table[0].freq;
+ dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
+ freq, table[0].freq);
+ goto set_freq;
+ }
+
+ for (i = num_rows - 1 ; i >= 0; i--) {
+ if (freq <= table[i].freq) {
+ freq = table[i].freq;
+ break;
+ }
+ }
+
+set_freq:
+
+ ret = core_clks_set_rate(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
+
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct venus_pm_ops pm_ops_v4 = {
+ .core_get = core_get_v4,
+ .core_put = core_put_v4,
+ .core_power = core_power_v4,
+ .vdec_get = vdec_get_v4,
+ .vdec_put = vdec_put_v4,
+ .vdec_power = vdec_power_v4,
+ .venc_get = venc_get_v4,
+ .venc_put = venc_put_v4,
+ .venc_power = venc_power_v4,
+ .coreid_power = coreid_power_v4,
+ .load_scale = load_scale_v4,
+};
+
+const struct venus_pm_ops *venus_pm_get(enum hfi_version version)
+{
+ switch (version) {
+ case HFI_VERSION_1XX:
+ default:
+ return &pm_ops_v1;
+ case HFI_VERSION_3XX:
+ return &pm_ops_v3;
+ case HFI_VERSION_4XX:
+ return &pm_ops_v4;
+ }
+
+ return NULL;
+}
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h
new file mode 100644
index 000000000000..aa2f6afa2354
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/pm_helpers.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2019 Linaro Ltd. */
+#ifndef __VENUS_PM_HELPERS_H__
+#define __VENUS_PM_HELPERS_H__
+
+struct device;
+
+#define POWER_ON 1
+#define POWER_OFF 0
+
+struct venus_pm_ops {
+ int (*core_get)(struct device *dev);
+ void (*core_put)(struct device *dev);
+ int (*core_power)(struct device *dev, int on);
+
+ int (*vdec_get)(struct device *dev);
+ void (*vdec_put)(struct device *dev);
+ int (*vdec_power)(struct device *dev, int on);
+
+ int (*venc_get)(struct device *dev);
+ void (*venc_put)(struct device *dev);
+ int (*venc_power)(struct device *dev, int on);
+
+ int (*coreid_power)(struct venus_inst *inst, int on);
+
+ int (*load_scale)(struct venus_inst *inst);
+};
+
+const struct venus_pm_ops *venus_pm_get(enum hfi_version version);
+
+static inline int venus_pm_load_scale(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+
+ if (!core->pm_ops || !core->pm_ops->load_scale)
+ return 0;
+
+ return core->pm_ops->load_scale(inst);
+}
+
+static inline int venus_pm_acquire_core(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
+ int ret = 0;
+
+ if (pm_ops && pm_ops->coreid_power)
+ ret = pm_ops->coreid_power(inst, POWER_ON);
+
+ return ret;
+}
+
+static inline int venus_pm_release_core(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
+ int ret = 0;
+
+ if (pm_ops && pm_ops->coreid_power)
+ ret = pm_ops->coreid_power(inst, POWER_OFF);
+
+ return ret;
+}
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 8feaf5daece9..4ed2628585a1 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -20,6 +20,7 @@
#include "core.h"
#include "helpers.h"
#include "vdec.h"
+#include "pm_helpers.h"
/*
* Three resons to keep MPLANE formats (despite that the number of planes
@@ -578,10 +579,6 @@ static int vdec_output_conf(struct venus_inst *inst)
if (ret)
return ret;
- ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_1);
- if (ret)
- return ret;
-
if (core->res->hfi_version == HFI_VERSION_1XX) {
ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
ret = hfi_session_set_property(inst, ptype, &en);
@@ -868,7 +865,7 @@ reconfigure:
if (ret)
goto free_dpb_bufs;
- venus_helper_load_scale_clocks(inst);
+ venus_pm_load_scale(inst);
ret = hfi_session_continue(inst);
if (ret)
@@ -950,6 +947,10 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
mutex_lock(&inst->lock);
+ ret = venus_pm_acquire_core(inst);
+ if (ret)
+ goto error;
+
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
ret = vdec_start_capture(inst);
else
@@ -1076,7 +1077,8 @@ static void vdec_session_release(struct venus_inst *inst)
hfi_session_abort(inst);
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(inst);
+ venus_pm_load_scale(inst);
+ venus_pm_release_core(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
mutex_unlock(&inst->lock);
@@ -1191,6 +1193,9 @@ static void vdec_event_change(struct venus_inst *inst,
inst->out_width = ev_data->width;
inst->out_height = ev_data->height;
+ if (inst->bit_depth != ev_data->bit_depth)
+ inst->bit_depth = ev_data->bit_depth;
+
dev_dbg(dev, "event %s sufficient resources (%ux%u)\n",
sufficient ? "" : "not", ev_data->width, ev_data->height);
@@ -1336,6 +1341,9 @@ static int vdec_open(struct file *file)
inst->num_output_bufs = 1;
inst->codec_state = VENUS_DEC_STATE_DEINIT;
inst->buf_count = 0;
+ inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
+ inst->core_acquired = false;
+ inst->bit_depth = VIDC_BITDEPTH_8;
init_waitqueue_head(&inst->reconf_wait);
venus_helper_init_instance(inst);
@@ -1432,20 +1440,14 @@ static int vdec_probe(struct platform_device *pdev)
if (!core)
return -EPROBE_DEFER;
- if (IS_V3(core) || IS_V4(core)) {
- core->core0_clk = devm_clk_get(dev, "core");
- if (IS_ERR(core->core0_clk))
- return PTR_ERR(core->core0_clk);
- }
+ platform_set_drvdata(pdev, core);
- if (IS_V4(core)) {
- core->core0_bus_clk = devm_clk_get(dev, "bus");
- if (IS_ERR(core->core0_bus_clk))
- return PTR_ERR(core->core0_bus_clk);
+ if (core->pm_ops->vdec_get) {
+ ret = core->pm_ops->vdec_get(dev);
+ if (ret)
+ return ret;
}
- platform_set_drvdata(pdev, core);
-
vdev = video_device_alloc();
if (!vdev)
return -ENOMEM;
@@ -1458,7 +1460,7 @@ static int vdec_probe(struct platform_device *pdev)
vdev->v4l2_dev = &core->v4l2_dev;
vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vdev_release;
@@ -1482,57 +1484,33 @@ static int vdec_remove(struct platform_device *pdev)
video_unregister_device(core->vdev_dec);
pm_runtime_disable(core->dev_dec);
+ if (core->pm_ops->vdec_put)
+ core->pm_ops->vdec_put(core->dev_dec);
+
return 0;
}
static __maybe_unused int vdec_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
- int ret;
-
- if (IS_V1(core))
- return 0;
-
- ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
- if (ret)
- return ret;
-
- if (IS_V4(core))
- clk_disable_unprepare(core->core0_bus_clk);
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
+ int ret = 0;
- clk_disable_unprepare(core->core0_clk);
+ if (pm_ops->vdec_power)
+ ret = pm_ops->vdec_power(dev, POWER_OFF);
- return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
+ return ret;
}
static __maybe_unused int vdec_runtime_resume(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
- int ret;
-
- if (IS_V1(core))
- return 0;
-
- ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(core->core0_clk);
- if (ret)
- goto err_power_disable;
-
- if (IS_V4(core))
- ret = clk_prepare_enable(core->core0_bus_clk);
-
- if (ret)
- goto err_unprepare_core0;
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
+ int ret = 0;
- return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
+ if (pm_ops->vdec_power)
+ ret = pm_ops->vdec_power(dev, POWER_ON);
-err_unprepare_core0:
- clk_disable_unprepare(core->core0_clk);
-err_power_disable:
- venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 453edf966d4f..9981a2a27c90 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -20,6 +20,7 @@
#include "core.h"
#include "helpers.h"
#include "venc.h"
+#include "pm_helpers.h"
#define NUM_B_FRAMES_MAX 4
@@ -655,10 +656,6 @@ static int venc_set_properties(struct venus_inst *inst)
if (ret)
return ret;
- ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_2);
- if (ret)
- return ret;
-
ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
frate.buffer_type = HFI_BUFFER_OUTPUT;
frate.framerate = inst->fps * (1 << 16);
@@ -731,7 +728,9 @@ static int venc_set_properties(struct venus_inst *inst)
if (ret)
return ret;
- if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ if (!ctr->rc_enable)
+ rate_control = HFI_RATE_CONTROL_OFF;
+ else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
rate_control = HFI_RATE_CONTROL_VBR_CFR;
else
rate_control = HFI_RATE_CONTROL_CBR_CFR;
@@ -991,6 +990,10 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
if (ret)
goto bufs_done;
+ ret = venus_pm_acquire_core(inst);
+ if (ret)
+ goto deinit_sess;
+
ret = venc_set_properties(inst);
if (ret)
goto deinit_sess;
@@ -1159,6 +1162,8 @@ static int venc_open(struct file *file)
inst->core = core;
inst->session_type = VIDC_SESSION_TYPE_ENC;
+ inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
+ inst->core_acquired = false;
venus_helper_init_instance(inst);
@@ -1255,20 +1260,14 @@ static int venc_probe(struct platform_device *pdev)
if (!core)
return -EPROBE_DEFER;
- if (IS_V3(core) || IS_V4(core)) {
- core->core1_clk = devm_clk_get(dev, "core");
- if (IS_ERR(core->core1_clk))
- return PTR_ERR(core->core1_clk);
- }
+ platform_set_drvdata(pdev, core);
- if (IS_V4(core)) {
- core->core1_bus_clk = devm_clk_get(dev, "bus");
- if (IS_ERR(core->core1_bus_clk))
- return PTR_ERR(core->core1_bus_clk);
+ if (core->pm_ops->venc_get) {
+ ret = core->pm_ops->venc_get(dev);
+ if (ret)
+ return ret;
}
- platform_set_drvdata(pdev, core);
-
vdev = video_device_alloc();
if (!vdev)
return -ENOMEM;
@@ -1281,7 +1280,7 @@ static int venc_probe(struct platform_device *pdev)
vdev->v4l2_dev = &core->v4l2_dev;
vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vdev_release;
@@ -1305,57 +1304,33 @@ static int venc_remove(struct platform_device *pdev)
video_unregister_device(core->vdev_enc);
pm_runtime_disable(core->dev_enc);
+ if (core->pm_ops->venc_put)
+ core->pm_ops->venc_put(core->dev_enc);
+
return 0;
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
- int ret;
-
- if (IS_V1(core))
- return 0;
-
- ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
- if (ret)
- return ret;
-
- if (IS_V4(core))
- clk_disable_unprepare(core->core1_bus_clk);
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
+ int ret = 0;
- clk_disable_unprepare(core->core1_clk);
+ if (pm_ops->venc_power)
+ ret = pm_ops->venc_power(dev, POWER_OFF);
- return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
+ return ret;
}
static __maybe_unused int venc_runtime_resume(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
- int ret;
-
- if (IS_V1(core))
- return 0;
-
- ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(core->core1_clk);
- if (ret)
- goto err_power_disable;
-
- if (IS_V4(core))
- ret = clk_prepare_enable(core->core1_bus_clk);
-
- if (ret)
- goto err_unprepare_core1;
+ const struct venus_pm_ops *pm_ops = core->pm_ops;
+ int ret = 0;
- return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
+ if (pm_ops->venc_power)
+ ret = pm_ops->venc_power(dev, POWER_ON);
-err_unprepare_core1:
- clk_disable_unprepare(core->core1_clk);
-err_power_disable:
- venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 877c0b3299e9..8362dde7949e 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -199,6 +199,9 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
}
mutex_unlock(&inst->lock);
break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ ctr->rc_enable = ctrl->val;
+ break;
default:
return -EINVAL;
}
@@ -214,7 +217,7 @@ int venc_ctrl_init(struct venus_inst *inst)
{
int ret;
- ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 30);
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 31);
if (ret)
return ret;
@@ -351,6 +354,9 @@ int venc_ctrl_init(struct venus_inst *inst)
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, 0, 0, 0, 0);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1);
+
ret = inst->ctrl_handler.error;
if (ret)
goto err;
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index cf9029efeb04..1a30cd036371 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -535,7 +535,7 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
/* Set scaling coefficient */
crop_height = vin->crop.height;
- if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
+ if (V4L2_FIELD_HAS_BOTH(vin->format.field))
crop_height *= 2;
ys = 0;
@@ -564,7 +564,7 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
rvin_write(vin, 0, VNSLPOC_REG);
rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
- if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
+ if (V4L2_FIELD_HAS_BOTH(vin->format.field))
rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
else
rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
@@ -626,6 +626,8 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_INTERLACED_BT:
vnmc = VNMC_IM_FULL | VNMC_FOC;
break;
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
@@ -842,27 +844,52 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
struct rvin_buffer *buf;
struct vb2_v4l2_buffer *vbuf;
dma_addr_t phys_addr;
+ int prev;
/* A already populated slot shall never be overwritten. */
- if (WARN_ON(vin->queue_buf[slot] != NULL))
+ if (WARN_ON(vin->buf_hw[slot].buffer))
return;
- vin_dbg(vin, "Filling HW slot: %d\n", slot);
-
- if (list_empty(&vin->buf_list)) {
- vin->queue_buf[slot] = NULL;
+ prev = (slot == 0 ? HW_BUFFER_NUM : slot) - 1;
+
+ if (vin->buf_hw[prev].type == HALF_TOP) {
+ vbuf = vin->buf_hw[prev].buffer;
+ vin->buf_hw[slot].buffer = vbuf;
+ vin->buf_hw[slot].type = HALF_BOTTOM;
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ phys_addr = vin->buf_hw[prev].phys +
+ vin->format.sizeimage / 4;
+ break;
+ default:
+ phys_addr = vin->buf_hw[prev].phys +
+ vin->format.sizeimage / 2;
+ break;
+ }
+ } else if (list_empty(&vin->buf_list)) {
+ vin->buf_hw[slot].buffer = NULL;
+ vin->buf_hw[slot].type = FULL;
phys_addr = vin->scratch_phys;
} else {
/* Keep track of buffer we give to HW */
buf = list_entry(vin->buf_list.next, struct rvin_buffer, list);
vbuf = &buf->vb;
list_del_init(to_buf_list(vbuf));
- vin->queue_buf[slot] = vbuf;
+ vin->buf_hw[slot].buffer = vbuf;
+
+ vin->buf_hw[slot].type =
+ V4L2_FIELD_IS_SEQUENTIAL(vin->format.field) ?
+ HALF_TOP : FULL;
/* Setup DMA */
phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
}
+ vin_dbg(vin, "Filling HW slot: %d type: %d buffer: %p\n",
+ slot, vin->buf_hw[slot].type, vin->buf_hw[slot].buffer);
+
+ vin->buf_hw[slot].phys = phys_addr;
rvin_set_slot_addr(vin, slot, phys_addr);
}
@@ -870,6 +897,11 @@ static int rvin_capture_start(struct rvin_dev *vin)
{
int slot, ret;
+ for (slot = 0; slot < HW_BUFFER_NUM; slot++) {
+ vin->buf_hw[slot].buffer = NULL;
+ vin->buf_hw[slot].type = FULL;
+ }
+
for (slot = 0; slot < HW_BUFFER_NUM; slot++)
rvin_fill_hw_slot(vin, slot);
@@ -953,13 +985,24 @@ static irqreturn_t rvin_irq(int irq, void *data)
}
/* Capture frame */
- if (vin->queue_buf[slot]) {
- vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms);
- vin->queue_buf[slot]->sequence = vin->sequence;
- vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
- vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf,
+ if (vin->buf_hw[slot].buffer) {
+ /*
+ * Nothing to do but refill the hardware slot if
+ * capture only filled first half of vb2 buffer.
+ */
+ if (vin->buf_hw[slot].type == HALF_TOP) {
+ vin->buf_hw[slot].buffer = NULL;
+ rvin_fill_hw_slot(vin, slot);
+ goto done;
+ }
+
+ vin->buf_hw[slot].buffer->field =
+ rvin_get_active_field(vin, vnms);
+ vin->buf_hw[slot].buffer->sequence = vin->sequence;
+ vin->buf_hw[slot].buffer->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vin->buf_hw[slot].buffer->vb2_buf,
VB2_BUF_STATE_DONE);
- vin->queue_buf[slot] = NULL;
+ vin->buf_hw[slot].buffer = NULL;
} else {
/* Scratch buffer was used, dropping frame. */
vin_dbg(vin, "Dropping frame %u\n", vin->sequence);
@@ -980,14 +1023,22 @@ static void return_all_buffers(struct rvin_dev *vin,
enum vb2_buffer_state state)
{
struct rvin_buffer *buf, *node;
- int i;
+ struct vb2_v4l2_buffer *freed[HW_BUFFER_NUM];
+ unsigned int i, n;
for (i = 0; i < HW_BUFFER_NUM; i++) {
- if (vin->queue_buf[i]) {
- vb2_buffer_done(&vin->queue_buf[i]->vb2_buf,
- state);
- vin->queue_buf[i] = NULL;
+ freed[i] = vin->buf_hw[i].buffer;
+ vin->buf_hw[i].buffer = NULL;
+
+ for (n = 0; n < i; n++) {
+ if (freed[i] == freed[n]) {
+ freed[i] = NULL;
+ break;
+ }
}
+
+ if (freed[i])
+ vb2_buffer_done(&freed[i]->vb2_buf, state);
}
list_for_each_entry_safe(buf, node, &vin->buf_list, list) {
@@ -1291,7 +1342,7 @@ int rvin_dma_register(struct rvin_dev *vin, int irq)
vin->state = STOPPED;
for (i = 0; i < HW_BUFFER_NUM; i++)
- vin->queue_buf[i] = NULL;
+ vin->buf_hw[i].buffer = NULL;
/* buffer queue */
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 5ff565e76bca..5151a3cd8a6e 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -73,11 +73,22 @@ const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
{
int i;
- if (vin->info->model == RCAR_M1 && pixelformat == V4L2_PIX_FMT_XBGR32)
- return NULL;
-
- if (pixelformat == V4L2_PIX_FMT_NV12 && !vin->info->nv12)
- return NULL;
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_XBGR32:
+ if (vin->info->model == RCAR_M1)
+ return NULL;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ /*
+ * If NV12 is supported it's only supported on channels 0, 1, 4,
+ * 5, 8, 9, 12 and 13.
+ */
+ if (!vin->info->nv12 || !(BIT(vin->id) & 0x3333))
+ return NULL;
+ break;
+ default:
+ break;
+ }
for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
if (rvin_formats[i].fourcc == pixelformat)
@@ -107,6 +118,9 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin,
break;
}
+ if (V4L2_FIELD_IS_SEQUENTIAL(pix->field))
+ align = 0x80;
+
return ALIGN(pix->width, align) * fmt->bpp;
}
@@ -137,6 +151,8 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_ALTERNATE:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
break;
default:
pix->field = RVIN_DEFAULT_FIELD;
@@ -826,7 +842,7 @@ static int rvin_open(struct file *file)
goto err_unlock;
if (vin->info->use_mc)
- ret = v4l2_pipeline_pm_use(&vin->vdev.entity, 1);
+ ret = v4l2_pipeline_pm_get(&vin->vdev.entity);
else if (v4l2_fh_is_singular_file(file))
ret = rvin_power_parallel(vin, true);
@@ -842,7 +858,7 @@ static int rvin_open(struct file *file)
return 0;
err_power:
if (vin->info->use_mc)
- v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
+ v4l2_pipeline_pm_put(&vin->vdev.entity);
else if (v4l2_fh_is_singular_file(file))
rvin_power_parallel(vin, false);
err_open:
@@ -870,7 +886,7 @@ static int rvin_release(struct file *file)
ret = _vb2_fop_release(file, NULL);
if (vin->info->use_mc) {
- v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
+ v4l2_pipeline_pm_put(&vin->vdev.entity);
} else {
if (fh_singular)
rvin_power_parallel(vin, false);
@@ -953,7 +969,7 @@ int rvin_v4l2_register(struct rvin_dev *vin)
rvin_format_align(vin, &vin->format);
- ret = video_register_device(&vin->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&vin->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
vin_err(vin, "Failed to register video device\n");
return ret;
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index a36b0824f81d..c19d077ce1cb 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -61,6 +61,23 @@ enum rvin_dma_state {
};
/**
+ * enum rvin_buffer_type
+ *
+ * Describes how a buffer is given to the hardware. To be able
+ * to capture SEQ_TB/BT it's needed to capture to the same vb2
+ * buffer twice so the type of buffer needs to be kept.
+ *
+ * FULL - One capture fills the whole vb2 buffer
+ * HALF_TOP - One capture fills the top half of the vb2 buffer
+ * HALF_BOTTOM - One capture fills the bottom half of the vb2 buffer
+ */
+enum rvin_buffer_type {
+ FULL,
+ HALF_TOP,
+ HALF_BOTTOM,
+};
+
+/**
* struct rvin_video_format - Data format stored in memory
* @fourcc: Pixelformat
* @bpp: Bytes per pixel
@@ -164,9 +181,8 @@ struct rvin_info {
* @scratch: cpu address for scratch buffer
* @scratch_phys: physical address of the scratch buffer
*
- * @qlock: protects @queue_buf, @buf_list, @sequence
- * @state
- * @queue_buf: Keeps track of buffers given to HW slot
+ * @qlock: protects @buf_hw, @buf_list, @sequence and @state
+ * @buf_hw: Keeps track of buffers given to HW slot
* @buf_list: list of queued buffers
* @sequence: V4L2 buffers sequence number
* @state: keeps track of operation state
@@ -205,7 +221,11 @@ struct rvin_dev {
dma_addr_t scratch_phys;
spinlock_t qlock;
- struct vb2_v4l2_buffer *queue_buf[HW_BUFFER_NUM];
+ struct {
+ struct vb2_v4l2_buffer *buffer;
+ enum rvin_buffer_type type;
+ dma_addr_t phys;
+ } buf_hw[HW_BUFFER_NUM];
struct list_head buf_list;
unsigned int sequence;
enum rvin_dma_state state;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 0f267a237b42..3d2451ac347d 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -275,10 +275,14 @@ static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr)
for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
struct rcar_drif *ch = sdr->ch[i];
- ch->dmach = dma_request_slave_channel(&ch->pdev->dev, "rx");
- if (!ch->dmach) {
- rdrif_err(sdr, "ch%u: dma channel req failed\n", i);
- ret = -ENODEV;
+ ch->dmach = dma_request_chan(&ch->pdev->dev, "rx");
+ if (IS_ERR(ch->dmach)) {
+ ret = PTR_ERR(ch->dmach);
+ if (ret != -EPROBE_DEFER)
+ rdrif_err(sdr,
+ "ch%u: dma channel req failed: %pe\n",
+ i, ch->dmach);
+ ch->dmach = NULL;
goto dmach_error;
}
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 97bed45360f0..c9448de885b6 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2344,7 +2344,7 @@ static int fdp1_probe(struct platform_device *pdev)
video_set_drvdata(vfd, fdp1);
strscpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
goto release_m2m;
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 1c3f507acfc9..5250a14324e9 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1663,7 +1663,7 @@ static int jpu_probe(struct platform_device *pdev)
jpu->vfd_encoder.device_caps = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_M2M_MPLANE;
- ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
goto m2m_init_rollback;
@@ -1682,7 +1682,7 @@ static int jpu_probe(struct platform_device *pdev)
jpu->vfd_decoder.device_caps = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_M2M_MPLANE;
- ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
goto enc_vdev_register_rollback;
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index 197b3991330d..f7d71a6a7970 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -1450,7 +1450,7 @@ static int ceu_notify_complete(struct v4l2_async_notifier *notifier)
V4L2_CAP_STREAMING;
video_set_drvdata(vdev, ceudev);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
v4l2_err(vdev->v4l2_dev,
"video_register_device failed: %d\n", ret);
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index e9ff12b6b5bb..9d122429706e 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -889,7 +889,7 @@ static int rga_probe(struct platform_device *pdev)
def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
def_frame.size = def_frame.stride * def_frame.height;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
goto rel_vdev;
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 2fb45db8e4ba..9ca49af29542 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1158,7 +1158,7 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
vfd->ctrl_handler = &vp->ctrl_handler;
vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_ctrlh_free;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index f5f05ea9f521..6932fd47071b 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -695,7 +695,7 @@ static int g2d_probe(struct platform_device *pdev)
vfd->lock = &dev->mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto rel_vdev;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index ac2162235cef..86bda3947110 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2946,7 +2946,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_encoder->vfl_dir = VFL_DIR_M2M;
jpeg->vfd_encoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
- ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
video_device_release(jpeg->vfd_encoder);
@@ -2976,7 +2976,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
jpeg->vfd_decoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
- ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
video_device_release(jpeg->vfd_decoder);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index b776f83e395e..5c2a23b953a4 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1376,7 +1376,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
s5p_mfc_init_regs(dev);
/* Register decoder and encoder */
- ret = video_register_device(dev->vfd_dec, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(dev->vfd_dec, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto err_dec_reg;
@@ -1384,7 +1384,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"decoder registered as /dev/video%d\n", dev->vfd_dec->num);
- ret = video_register_device(dev->vfd_enc, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(dev->vfd_enc, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto err_enc_reg;
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 2b4c0d9d6928..f08b8fc192d8 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1160,7 +1160,7 @@ static int sh_veu_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
pm_runtime_suspend(&pdev->dev);
if (ret < 0)
goto evidreg;
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 2236702c21b4..36e5f2ff4ef1 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1323,7 +1323,7 @@ static int sh_vou_probe(struct platform_device *pdev)
goto ei2cnd;
}
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0)
goto evregdev;
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index d1025a53709f..af2d5eb782ce 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1066,7 +1066,7 @@ static int bdisp_register_device(struct bdisp_dev *bdisp)
return PTR_ERR(bdisp->m2m.m2m_dev);
}
- ret = video_register_device(&bdisp->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&bdisp->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(bdisp->dev,
"%s(): failed to register video device\n", __func__);
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index 91369fb3ffaa..2503224eeee5 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -1781,7 +1781,7 @@ static int delta_register_device(struct delta_dev *delta)
snprintf(vdev->name, sizeof(vdev->name), "%s-%s",
DELTA_NAME, DELTA_FW_VERSION);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(delta->dev, "%s failed to register video device\n",
DELTA_PREFIX);
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
index 64004d15a9c9..197b99d8fd9c 100644
--- a/drivers/media/platform/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -1316,7 +1316,7 @@ static int hva_register_device(struct hva_dev *hva)
snprintf(vdev->name, sizeof(vdev->name), "%s%lx", HVA_NAME,
hva->ip_version);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(dev, "%s failed to register video device\n",
HVA_PREFIX);
diff --git a/drivers/media/platform/stm32/stm32-cec.c b/drivers/media/platform/stm32/stm32-cec.c
index 8a86b2cc22fa..ea4b1ebfca99 100644
--- a/drivers/media/platform/stm32/stm32-cec.c
+++ b/drivers/media/platform/stm32/stm32-cec.c
@@ -291,7 +291,9 @@ static int stm32_cec_probe(struct platform_device *pdev)
cec->clk_cec = devm_clk_get(&pdev->dev, "cec");
if (IS_ERR(cec->clk_cec)) {
- dev_err(&pdev->dev, "Cannot get cec clock\n");
+ if (PTR_ERR(cec->clk_cec) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Cannot get cec clock\n");
+
return PTR_ERR(cec->clk_cec);
}
@@ -302,10 +304,14 @@ static int stm32_cec_probe(struct platform_device *pdev)
}
cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec");
+ if (IS_ERR(cec->clk_hdmi_cec) &&
+ PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
if (!IS_ERR(cec->clk_hdmi_cec)) {
ret = clk_prepare(cec->clk_hdmi_cec);
if (ret) {
- dev_err(&pdev->dev, "Unable to prepare hdmi-cec clock\n");
+ dev_err(&pdev->dev, "Can't prepare hdmi-cec clock\n");
return ret;
}
}
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 9392e3409fba..b8931490b83b 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1910,10 +1910,13 @@ static int dcmi_probe(struct platform_device *pdev)
return PTR_ERR(mclk);
}
- chan = dma_request_slave_channel(&pdev->dev, "tx");
- if (!chan) {
- dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n");
- return -EPROBE_DEFER;
+ chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request DMA channel: %d\n", ret);
+ return ret;
}
spin_lock_init(&dcmi->irqlock);
@@ -1971,7 +1974,7 @@ static int dcmi_probe(struct platform_device *pdev)
}
dcmi->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
- ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(dcmi->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(dcmi->dev, "Failed to register video device\n");
goto err_media_entity_cleanup;
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
index 3878cb4efdc2..ff0993f70dc3 100644
--- a/drivers/media/platform/sunxi/Makefile
+++ b/drivers/media/platform/sunxi/Makefile
@@ -1,3 +1,4 @@
obj-y += sun4i-csi/
obj-y += sun6i-csi/
obj-y += sun8i-di/
+obj-y += sun8i-rotate/
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
index 83a3a0257c7b..1721e5aee9c6 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
@@ -214,7 +214,7 @@ static int sun4i_csi_open(struct file *file)
if (ret < 0)
goto err_pm_put;
- ret = v4l2_pipeline_pm_use(&csi->vdev.entity, 1);
+ ret = v4l2_pipeline_pm_get(&csi->vdev.entity);
if (ret)
goto err_pm_put;
@@ -227,7 +227,7 @@ static int sun4i_csi_open(struct file *file)
return 0;
err_pipeline_pm_put:
- v4l2_pipeline_pm_use(&csi->vdev.entity, 0);
+ v4l2_pipeline_pm_put(&csi->vdev.entity);
err_pm_put:
pm_runtime_put(csi->dev);
@@ -243,7 +243,7 @@ static int sun4i_csi_release(struct file *file)
mutex_lock(&csi->lock);
v4l2_fh_release(file);
- v4l2_pipeline_pm_use(&csi->vdev.entity, 0);
+ v4l2_pipeline_pm_put(&csi->vdev.entity);
pm_runtime_put(csi->dev);
mutex_unlock(&csi->lock);
@@ -374,7 +374,7 @@ int sun4i_csi_v4l2_register(struct sun4i_csi *csi)
vdev->ioctl_ops = &sun4i_csi_ioctl_ops;
video_set_drvdata(vdev, csi);
- ret = video_register_device(&csi->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&csi->vdev, VFL_TYPE_VIDEO, -1);
if (ret)
return ret;
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index f0dfe68486d1..d9648b2810b9 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -474,7 +474,7 @@ static int sun6i_video_open(struct file *file)
if (ret < 0)
goto unlock;
- ret = v4l2_pipeline_pm_use(&video->vdev.entity, 1);
+ ret = v4l2_pipeline_pm_get(&video->vdev.entity);
if (ret < 0)
goto fh_release;
@@ -507,7 +507,7 @@ static int sun6i_video_close(struct file *file)
_vb2_fop_release(file, NULL);
- v4l2_pipeline_pm_use(&video->vdev.entity, 0);
+ v4l2_pipeline_pm_put(&video->vdev.entity);
if (last_fh)
sun6i_csi_set_power(video->csi, false);
@@ -648,7 +648,7 @@ int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
vdev->release = video_device_release_empty;
vdev->fops = &sun6i_video_fops;
vdev->ioctl_ops = &sun6i_video_ioctl_ops;
- vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->vfl_type = VFL_TYPE_VIDEO;
vdev->vfl_dir = VFL_DIR_RX;
vdev->v4l2_dev = &csi->v4l2_dev;
vdev->queue = vidq;
@@ -656,7 +656,7 @@ int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vdev, video);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
v4l2_err(&csi->v4l2_dev,
"video_register_device failed: %d\n", ret);
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index b61f3dea7c93..d78f6593ddd1 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -814,11 +814,8 @@ static int deinterlace_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev->dev, "Failed to get IRQ\n");
-
+ if (irq <= 0)
return irq;
- }
ret = devm_request_irq(dev->dev, irq, deinterlace_irq,
0, dev_name(dev->dev), dev);
@@ -882,7 +879,7 @@ static int deinterlace_probe(struct platform_device *pdev)
deinterlace_video_device.name);
video_set_drvdata(vfd, dev);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/Makefile b/drivers/media/platform/sunxi/sun8i-rotate/Makefile
new file mode 100644
index 000000000000..40f9cf398e98
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-rotate/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+sun8i-rotate-y += sun8i_rotate.o
+sun8i-rotate-y += sun8i_formats.o
+
+obj-$(CONFIG_VIDEO_SUN8I_ROTATE) += sun8i-rotate.o
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i-formats.h b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-formats.h
new file mode 100644
index 000000000000..697cd5fadd5f
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-formats.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _SUN8I_FORMATS_H_
+#define _SUN8I_FORMATS_H_
+
+#include <linux/videodev2.h>
+
+#define ROTATE_FLAG_YUV BIT(0)
+#define ROTATE_FLAG_OUTPUT BIT(1)
+
+struct rotate_format {
+ u32 fourcc;
+ u32 hw_format;
+ int planes;
+ int bpp[3];
+ int hsub;
+ int vsub;
+ unsigned int flags;
+};
+
+const struct rotate_format *rotate_find_format(u32 pixelformat);
+int rotate_enum_fmt(struct v4l2_fmtdesc *f, bool dst);
+
+#endif
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i-rotate.h b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-rotate.h
new file mode 100644
index 000000000000..32ade97ba572
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i-rotate.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Allwinner DE2 rotation driver
+ *
+ * Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#ifndef _SUN8I_ROTATE_H_
+#define _SUN8I_ROTATE_H_
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/platform_device.h>
+
+#define ROTATE_NAME "sun8i-rotate"
+
+#define ROTATE_GLB_CTL 0x00
+#define ROTATE_GLB_CTL_START BIT(31)
+#define ROTATE_GLB_CTL_RESET BIT(30)
+#define ROTATE_GLB_CTL_BURST_LEN(x) ((x) << 16)
+#define ROTATE_GLB_CTL_HFLIP BIT(7)
+#define ROTATE_GLB_CTL_VFLIP BIT(6)
+#define ROTATE_GLB_CTL_ROTATION(x) ((x) << 4)
+#define ROTATE_GLB_CTL_MODE(x) ((x) << 0)
+
+#define ROTATE_INT 0x04
+#define ROTATE_INT_FINISH_IRQ_EN BIT(16)
+#define ROTATE_INT_FINISH_IRQ BIT(0)
+
+#define ROTATE_IN_FMT 0x20
+#define ROTATE_IN_FMT_FORMAT(x) ((x) << 0)
+
+#define ROTATE_IN_SIZE 0x24
+#define ROTATE_IN_PITCH0 0x30
+#define ROTATE_IN_PITCH1 0x34
+#define ROTATE_IN_PITCH2 0x38
+#define ROTATE_IN_ADDRL0 0x40
+#define ROTATE_IN_ADDRH0 0x44
+#define ROTATE_IN_ADDRL1 0x48
+#define ROTATE_IN_ADDRH1 0x4c
+#define ROTATE_IN_ADDRL2 0x50
+#define ROTATE_IN_ADDRH2 0x54
+#define ROTATE_OUT_SIZE 0x84
+#define ROTATE_OUT_PITCH0 0x90
+#define ROTATE_OUT_PITCH1 0x94
+#define ROTATE_OUT_PITCH2 0x98
+#define ROTATE_OUT_ADDRL0 0xA0
+#define ROTATE_OUT_ADDRH0 0xA4
+#define ROTATE_OUT_ADDRL1 0xA8
+#define ROTATE_OUT_ADDRH1 0xAC
+#define ROTATE_OUT_ADDRL2 0xB0
+#define ROTATE_OUT_ADDRH2 0xB4
+
+#define ROTATE_BURST_8 0x07
+#define ROTATE_BURST_16 0x0f
+#define ROTATE_BURST_32 0x1f
+#define ROTATE_BURST_64 0x3f
+
+#define ROTATE_MODE_COPY_ROTATE 0x01
+
+#define ROTATE_FORMAT_ARGB32 0x00
+#define ROTATE_FORMAT_ABGR32 0x01
+#define ROTATE_FORMAT_RGBA32 0x02
+#define ROTATE_FORMAT_BGRA32 0x03
+#define ROTATE_FORMAT_XRGB32 0x04
+#define ROTATE_FORMAT_XBGR32 0x05
+#define ROTATE_FORMAT_RGBX32 0x06
+#define ROTATE_FORMAT_BGRX32 0x07
+#define ROTATE_FORMAT_RGB24 0x08
+#define ROTATE_FORMAT_BGR24 0x09
+#define ROTATE_FORMAT_RGB565 0x0a
+#define ROTATE_FORMAT_BGR565 0x0b
+#define ROTATE_FORMAT_ARGB4444 0x0c
+#define ROTATE_FORMAT_ABGR4444 0x0d
+#define ROTATE_FORMAT_RGBA4444 0x0e
+#define ROTATE_FORMAT_BGRA4444 0x0f
+#define ROTATE_FORMAT_ARGB1555 0x10
+#define ROTATE_FORMAT_ABGR1555 0x11
+#define ROTATE_FORMAT_RGBA5551 0x12
+#define ROTATE_FORMAT_BGRA5551 0x13
+
+#define ROTATE_FORMAT_YUYV 0x20
+#define ROTATE_FORMAT_UYVY 0x21
+#define ROTATE_FORMAT_YVYU 0x22
+#define ROTATE_FORMAT_VYUV 0x23
+#define ROTATE_FORMAT_NV61 0x24
+#define ROTATE_FORMAT_NV16 0x25
+#define ROTATE_FORMAT_YUV422P 0x26
+#define ROTATE_FORMAT_NV21 0x28
+#define ROTATE_FORMAT_NV12 0x29
+#define ROTATE_FORMAT_YUV420P 0x2A
+
+#define ROTATE_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
+
+#define ROTATE_MIN_WIDTH 8U
+#define ROTATE_MIN_HEIGHT 8U
+#define ROTATE_MAX_WIDTH 4096U
+#define ROTATE_MAX_HEIGHT 4096U
+
+struct rotate_ctx {
+ struct v4l2_fh fh;
+ struct rotate_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ u32 hflip;
+ u32 vflip;
+ u32 rotate;
+};
+
+struct rotate_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+
+ void __iomem *base;
+
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+
+ struct reset_control *rstc;
+};
+
+#endif
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c
new file mode 100644
index 000000000000..cebfbc5def38
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#include "sun8i-formats.h"
+#include "sun8i-rotate.h"
+
+/*
+ * Formats not included in array:
+ * ROTATE_FORMAT_BGR565
+ * ROTATE_FORMAT_VYUV
+ */
+
+static const struct rotate_format rotate_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .hw_format = ROTATE_FORMAT_ARGB32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .hw_format = ROTATE_FORMAT_ABGR32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBA32,
+ .hw_format = ROTATE_FORMAT_RGBA32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRA32,
+ .hw_format = ROTATE_FORMAT_BGRA32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .hw_format = ROTATE_FORMAT_XRGB32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .hw_format = ROTATE_FORMAT_XBGR32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .hw_format = ROTATE_FORMAT_RGBX32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .hw_format = ROTATE_FORMAT_BGRX32,
+ .planes = 1,
+ .bpp = { 4, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .hw_format = ROTATE_FORMAT_RGB24,
+ .planes = 1,
+ .bpp = { 3, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .hw_format = ROTATE_FORMAT_BGR24,
+ .planes = 1,
+ .bpp = { 3, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .hw_format = ROTATE_FORMAT_RGB565,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .hw_format = ROTATE_FORMAT_ARGB4444,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_ABGR444,
+ .hw_format = ROTATE_FORMAT_ABGR4444,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBA444,
+ .hw_format = ROTATE_FORMAT_RGBA4444,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRA444,
+ .hw_format = ROTATE_FORMAT_BGRA4444,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .hw_format = ROTATE_FORMAT_ARGB1555,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_ABGR555,
+ .hw_format = ROTATE_FORMAT_ABGR1555,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBA555,
+ .hw_format = ROTATE_FORMAT_RGBA5551,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRA555,
+ .hw_format = ROTATE_FORMAT_BGRA5551,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 1,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_OUTPUT
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .hw_format = ROTATE_FORMAT_YVYU,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 2,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .hw_format = ROTATE_FORMAT_UYVY,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 2,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .hw_format = ROTATE_FORMAT_YUYV,
+ .planes = 1,
+ .bpp = { 2, 0, 0 },
+ .hsub = 2,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .hw_format = ROTATE_FORMAT_NV61,
+ .planes = 2,
+ .bpp = { 1, 2, 0 },
+ .hsub = 2,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .hw_format = ROTATE_FORMAT_NV16,
+ .planes = 2,
+ .bpp = { 1, 2, 0 },
+ .hsub = 2,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .hw_format = ROTATE_FORMAT_YUV422P,
+ .planes = 3,
+ .bpp = { 1, 1, 1 },
+ .hsub = 2,
+ .vsub = 1,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .hw_format = ROTATE_FORMAT_NV21,
+ .planes = 2,
+ .bpp = { 1, 2, 0 },
+ .hsub = 2,
+ .vsub = 2,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .hw_format = ROTATE_FORMAT_NV12,
+ .planes = 2,
+ .bpp = { 1, 2, 0 },
+ .hsub = 2,
+ .vsub = 2,
+ .flags = ROTATE_FLAG_YUV
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .hw_format = ROTATE_FORMAT_YUV420P,
+ .planes = 3,
+ .bpp = { 1, 1, 1 },
+ .hsub = 2,
+ .vsub = 2,
+ .flags = ROTATE_FLAG_YUV | ROTATE_FLAG_OUTPUT
+ },
+};
+
+const struct rotate_format *rotate_find_format(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rotate_formats); i++)
+ if (rotate_formats[i].fourcc == pixelformat)
+ return &rotate_formats[i];
+
+ return NULL;
+}
+
+int rotate_enum_fmt(struct v4l2_fmtdesc *f, bool dst)
+{
+ int i, index;
+
+ index = 0;
+
+ for (i = 0; i < ARRAY_SIZE(rotate_formats); i++) {
+ /* not all formats can be used for capture buffers */
+ if (dst && !(rotate_formats[i].flags & ROTATE_FLAG_OUTPUT))
+ continue;
+
+ if (index == f->index) {
+ f->pixelformat = rotate_formats[i].fourcc;
+
+ return 0;
+ }
+
+ index++;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
new file mode 100644
index 000000000000..94f505d3cbad
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
@@ -0,0 +1,924 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner sun8i DE2 rotation driver
+ *
+ * Copyright (C) 2020 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "sun8i-formats.h"
+#include "sun8i-rotate.h"
+
+static inline u32 rotate_read(struct rotate_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline void rotate_write(struct rotate_dev *dev, u32 reg, u32 value)
+{
+ writel(value, dev->base + reg);
+}
+
+static inline void rotate_set_bits(struct rotate_dev *dev, u32 reg, u32 bits)
+{
+ writel(readl(dev->base + reg) | bits, dev->base + reg);
+}
+
+static void rotate_calc_addr_pitch(dma_addr_t buffer,
+ u32 bytesperline, u32 height,
+ const struct rotate_format *fmt,
+ dma_addr_t *addr, u32 *pitch)
+{
+ u32 size;
+ int i;
+
+ for (i = 0; i < fmt->planes; i++) {
+ pitch[i] = bytesperline;
+ addr[i] = buffer;
+ if (i > 0)
+ pitch[i] /= fmt->hsub / fmt->bpp[i];
+ size = pitch[i] * height;
+ if (i > 0)
+ size /= fmt->vsub;
+ buffer += size;
+ }
+}
+
+static void rotate_device_run(void *priv)
+{
+ struct rotate_ctx *ctx = priv;
+ struct rotate_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src, *dst;
+ const struct rotate_format *fmt;
+ dma_addr_t addr[3];
+ u32 val, pitch[3];
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ val = ROTATE_GLB_CTL_MODE(ROTATE_MODE_COPY_ROTATE);
+ if (ctx->hflip)
+ val |= ROTATE_GLB_CTL_HFLIP;
+ if (ctx->vflip)
+ val |= ROTATE_GLB_CTL_VFLIP;
+ val |= ROTATE_GLB_CTL_ROTATION(ctx->rotate / 90);
+ if (ctx->rotate != 90 && ctx->rotate != 270)
+ val |= ROTATE_GLB_CTL_BURST_LEN(ROTATE_BURST_64);
+ else
+ val |= ROTATE_GLB_CTL_BURST_LEN(ROTATE_BURST_8);
+ rotate_write(dev, ROTATE_GLB_CTL, val);
+
+ fmt = rotate_find_format(ctx->src_fmt.pixelformat);
+ if (!fmt)
+ return;
+
+ rotate_write(dev, ROTATE_IN_FMT, ROTATE_IN_FMT_FORMAT(fmt->hw_format));
+
+ rotate_calc_addr_pitch(vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0),
+ ctx->src_fmt.bytesperline, ctx->src_fmt.height,
+ fmt, addr, pitch);
+
+ rotate_write(dev, ROTATE_IN_SIZE,
+ ROTATE_SIZE(ctx->src_fmt.width, ctx->src_fmt.height));
+
+ rotate_write(dev, ROTATE_IN_PITCH0, pitch[0]);
+ rotate_write(dev, ROTATE_IN_PITCH1, pitch[1]);
+ rotate_write(dev, ROTATE_IN_PITCH2, pitch[2]);
+
+ rotate_write(dev, ROTATE_IN_ADDRL0, addr[0]);
+ rotate_write(dev, ROTATE_IN_ADDRL1, addr[1]);
+ rotate_write(dev, ROTATE_IN_ADDRL2, addr[2]);
+
+ rotate_write(dev, ROTATE_IN_ADDRH0, 0);
+ rotate_write(dev, ROTATE_IN_ADDRH1, 0);
+ rotate_write(dev, ROTATE_IN_ADDRH2, 0);
+
+ fmt = rotate_find_format(ctx->dst_fmt.pixelformat);
+ if (!fmt)
+ return;
+
+ rotate_calc_addr_pitch(vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0),
+ ctx->dst_fmt.bytesperline, ctx->dst_fmt.height,
+ fmt, addr, pitch);
+
+ rotate_write(dev, ROTATE_OUT_SIZE,
+ ROTATE_SIZE(ctx->dst_fmt.width, ctx->dst_fmt.height));
+
+ rotate_write(dev, ROTATE_OUT_PITCH0, pitch[0]);
+ rotate_write(dev, ROTATE_OUT_PITCH1, pitch[1]);
+ rotate_write(dev, ROTATE_OUT_PITCH2, pitch[2]);
+
+ rotate_write(dev, ROTATE_OUT_ADDRL0, addr[0]);
+ rotate_write(dev, ROTATE_OUT_ADDRL1, addr[1]);
+ rotate_write(dev, ROTATE_OUT_ADDRL2, addr[2]);
+
+ rotate_write(dev, ROTATE_OUT_ADDRH0, 0);
+ rotate_write(dev, ROTATE_OUT_ADDRH1, 0);
+ rotate_write(dev, ROTATE_OUT_ADDRH2, 0);
+
+ rotate_set_bits(dev, ROTATE_INT, ROTATE_INT_FINISH_IRQ_EN);
+ rotate_set_bits(dev, ROTATE_GLB_CTL, ROTATE_GLB_CTL_START);
+}
+
+static irqreturn_t rotate_irq(int irq, void *data)
+{
+ struct vb2_v4l2_buffer *buffer;
+ struct rotate_dev *dev = data;
+ struct rotate_ctx *ctx;
+ unsigned int val;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_NONE;
+ }
+
+ val = rotate_read(dev, ROTATE_INT);
+ if (!(val & ROTATE_INT_FINISH_IRQ))
+ return IRQ_NONE;
+
+ /* clear flag and disable irq */
+ rotate_write(dev, ROTATE_INT, ROTATE_INT_FINISH_IRQ);
+
+ buffer = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_DONE);
+
+ buffer = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+
+ return IRQ_HANDLED;
+}
+
+static inline struct rotate_ctx *rotate_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct rotate_ctx, fh);
+}
+
+static void rotate_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int height, width, alignment, sizeimage, size, bpl;
+ const struct rotate_format *fmt;
+ int i;
+
+ fmt = rotate_find_format(pix_fmt->pixelformat);
+ if (!fmt)
+ return;
+
+ width = ALIGN(pix_fmt->width, fmt->hsub);
+ height = ALIGN(pix_fmt->height, fmt->vsub);
+
+ /* all pitches have to be 16 byte aligned */
+ alignment = 16;
+ if (fmt->planes > 1)
+ alignment *= fmt->hsub / fmt->bpp[1];
+ bpl = ALIGN(width * fmt->bpp[0], alignment);
+
+ sizeimage = 0;
+ for (i = 0; i < fmt->planes; i++) {
+ size = bpl * height;
+ if (i > 0) {
+ size *= fmt->bpp[i];
+ size /= fmt->hsub;
+ size /= fmt->vsub;
+ }
+ sizeimage += size;
+ }
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+ pix_fmt->bytesperline = bpl;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int rotate_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, ROTATE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, ROTATE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", ROTATE_NAME);
+
+ return 0;
+}
+
+static int rotate_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return rotate_enum_fmt(f, true);
+}
+
+static int rotate_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return rotate_enum_fmt(f, false);
+}
+
+static int rotate_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct rotate_format *fmt;
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ fmt = rotate_find_format(fsize->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = ROTATE_MIN_WIDTH;
+ fsize->stepwise.min_height = ROTATE_MIN_HEIGHT;
+ fsize->stepwise.max_width = ROTATE_MAX_WIDTH;
+ fsize->stepwise.max_height = ROTATE_MAX_HEIGHT;
+ fsize->stepwise.step_width = fmt->hsub;
+ fsize->stepwise.step_height = fmt->vsub;
+
+ return 0;
+}
+
+static int rotate_set_cap_format(struct rotate_ctx *ctx,
+ struct v4l2_pix_format *f,
+ u32 rotate)
+{
+ const struct rotate_format *fmt;
+
+ fmt = rotate_find_format(ctx->src_fmt.pixelformat);
+ if (!fmt)
+ return -EINVAL;
+
+ if (fmt->flags & ROTATE_FLAG_YUV)
+ f->pixelformat = V4L2_PIX_FMT_YUV420;
+ else
+ f->pixelformat = ctx->src_fmt.pixelformat;
+
+ f->field = V4L2_FIELD_NONE;
+
+ if (rotate == 90 || rotate == 270) {
+ f->width = ctx->src_fmt.height;
+ f->height = ctx->src_fmt.width;
+ } else {
+ f->width = ctx->src_fmt.width;
+ f->height = ctx->src_fmt.height;
+ }
+
+ rotate_prepare_format(f);
+
+ return 0;
+}
+
+static int rotate_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rotate_ctx *ctx = rotate_file2ctx(file);
+
+ f->fmt.pix = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int rotate_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rotate_ctx *ctx = rotate_file2ctx(file);
+
+ f->fmt.pix = ctx->src_fmt;
+
+ return 0;
+}
+
+static int rotate_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rotate_ctx *ctx = rotate_file2ctx(file);
+
+ return rotate_set_cap_format(ctx, &f->fmt.pix, ctx->rotate);
+}
+
+static int rotate_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!rotate_find_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ARGB32;
+
+ if (f->fmt.pix.width < ROTATE_MIN_WIDTH)
+ f->fmt.pix.width = ROTATE_MIN_WIDTH;
+ if (f->fmt.pix.height < ROTATE_MIN_HEIGHT)
+ f->fmt.pix.height = ROTATE_MIN_HEIGHT;
+
+ if (f->fmt.pix.width > ROTATE_MAX_WIDTH)
+ f->fmt.pix.width = ROTATE_MAX_WIDTH;
+ if (f->fmt.pix.height > ROTATE_MAX_HEIGHT)
+ f->fmt.pix.height = ROTATE_MAX_HEIGHT;
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ rotate_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int rotate_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rotate_ctx *ctx = rotate_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = rotate_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int rotate_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rotate_ctx *ctx = rotate_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = rotate_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ /*
+ * Capture queue has to be also checked, because format and size
+ * depends on output format and size.
+ */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->src_fmt = f->fmt.pix;
+
+ /* Propagate colorspace information to capture. */
+ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
+ ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
+ ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+
+ return rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate);
+}
+
+static const struct v4l2_ioctl_ops rotate_ioctl_ops = {
+ .vidioc_querycap = rotate_querycap,
+
+ .vidioc_enum_framesizes = rotate_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = rotate_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = rotate_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = rotate_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = rotate_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = rotate_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = rotate_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = rotate_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = rotate_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int rotate_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct rotate_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static int rotate_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct rotate_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static void rotate_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rotate_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void rotate_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct rotate_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+
+ do {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, state);
+ } while (vbuf);
+}
+
+static int rotate_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ struct rotate_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx->dev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable module\n");
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void rotate_stop_streaming(struct vb2_queue *vq)
+{
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ struct rotate_ctx *ctx = vb2_get_drv_priv(vq);
+
+ pm_runtime_put(ctx->dev->dev);
+ }
+
+ rotate_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops rotate_qops = {
+ .queue_setup = rotate_queue_setup,
+ .buf_prepare = rotate_buf_prepare,
+ .buf_queue = rotate_buf_queue,
+ .start_streaming = rotate_start_streaming,
+ .stop_streaming = rotate_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int rotate_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct rotate_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->ops = &rotate_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->min_buffers_needed = 2;
+ dst_vq->ops = &rotate_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rotate_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rotate_ctx *ctx = container_of(ctrl->handler,
+ struct rotate_ctx,
+ ctrl_handler);
+ struct v4l2_pix_format fmt;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ rotate_set_cap_format(ctx, &fmt, ctrl->val);
+
+ /* Check if capture format needs to be changed */
+ if (fmt.width != ctx->dst_fmt.width ||
+ fmt.height != ctx->dst_fmt.height ||
+ fmt.bytesperline != ctx->dst_fmt.bytesperline ||
+ fmt.sizeimage != ctx->dst_fmt.sizeimage) {
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ rotate_set_cap_format(ctx, &ctx->dst_fmt, ctrl->val);
+ }
+
+ ctx->rotate = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rotate_ctrl_ops = {
+ .s_ctrl = rotate_s_ctrl,
+};
+
+static int rotate_setup_ctrls(struct rotate_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_err(&ctx->dev->v4l2_dev, "control setup failed!\n");
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+
+ return err;
+ }
+
+ return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+}
+
+static int rotate_open(struct file *file)
+{
+ struct rotate_dev *dev = video_drvdata(file);
+ struct rotate_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ /* default output format */
+ ctx->src_fmt.pixelformat = V4L2_PIX_FMT_ARGB32;
+ ctx->src_fmt.field = V4L2_FIELD_NONE;
+ ctx->src_fmt.width = 640;
+ ctx->src_fmt.height = 480;
+ rotate_prepare_format(&ctx->src_fmt);
+
+ /* default capture format */
+ rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &rotate_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_free;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ ret = rotate_setup_ctrls(ctx);
+ if (ret)
+ goto err_free;
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int rotate_release(struct file *file)
+{
+ struct rotate_dev *dev = video_drvdata(file);
+ struct rotate_ctx *ctx = container_of(file->private_data,
+ struct rotate_ctx, fh);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations rotate_fops = {
+ .owner = THIS_MODULE,
+ .open = rotate_open,
+ .release = rotate_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device rotate_video_device = {
+ .name = ROTATE_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &rotate_fops,
+ .ioctl_ops = &rotate_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops rotate_m2m_ops = {
+ .device_run = rotate_device_run,
+};
+
+static int rotate_probe(struct platform_device *pdev)
+{
+ struct rotate_dev *dev;
+ struct video_device *vfd;
+ int irq, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vfd = rotate_video_device;
+ dev->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev->dev, "Failed to get IRQ\n");
+
+ return irq;
+ }
+
+ ret = devm_request_irq(dev->dev, irq, rotate_irq,
+ 0, dev_name(dev->dev), dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
+
+ dev->bus_clk = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->bus_clk)) {
+ dev_err(dev->dev, "Failed to get bus clock\n");
+
+ return PTR_ERR(dev->bus_clk);
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ dev_err(dev->dev, "Failed to get mod clock\n");
+
+ return PTR_ERR(dev->mod_clk);
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ dev_err(dev->dev, "Failed to get reset control\n");
+
+ return PTR_ERR(dev->rstc);
+ }
+
+ mutex_init(&dev->dev_mutex);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register V4L2 device\n");
+
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s",
+ rotate_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+
+ goto err_v4l2;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->m2m_dev = v4l2_m2m_init(&rotate_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_video;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ pm_runtime_enable(dev->dev);
+
+ return 0;
+
+err_video:
+ video_unregister_device(&dev->vfd);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int rotate_remove(struct platform_device *pdev)
+{
+ struct rotate_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int rotate_runtime_resume(struct device *device)
+{
+ struct rotate_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = clk_prepare_enable(dev->bus_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable bus clock\n");
+
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable mod clock\n");
+
+ goto err_bus_clk;
+ }
+
+ ret = reset_control_deassert(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ goto err_mod_clk;
+ }
+
+ return 0;
+
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_bus_clk:
+ clk_disable_unprepare(dev->bus_clk);
+
+ return ret;
+}
+
+static int rotate_runtime_suspend(struct device *device)
+{
+ struct rotate_dev *dev = dev_get_drvdata(device);
+
+ reset_control_assert(dev->rstc);
+
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->bus_clk);
+
+ return 0;
+}
+
+static const struct of_device_id rotate_dt_match[] = {
+ { .compatible = "allwinner,sun8i-a83t-de2-rotate" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rotate_dt_match);
+
+static const struct dev_pm_ops rotate_pm_ops = {
+ .runtime_resume = rotate_runtime_resume,
+ .runtime_suspend = rotate_runtime_suspend,
+};
+
+static struct platform_driver rotate_driver = {
+ .probe = rotate_probe,
+ .remove = rotate_remove,
+ .driver = {
+ .name = ROTATE_NAME,
+ .of_match_table = rotate_dt_match,
+ .pm = &rotate_pm_ops,
+ },
+};
+module_platform_driver(rotate_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner DE2 rotate driver");
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index be54806180a5..6c8f3702eac0 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -372,8 +372,6 @@ struct cal_ctx {
struct v4l2_subdev *sensor;
struct v4l2_fwnode_endpoint endpoint;
- struct v4l2_async_subdev asd;
-
struct v4l2_fh fh;
struct cal_dev *dev;
struct cc_data *cc;
@@ -722,16 +720,16 @@ static void enable_irqs(struct cal_ctx *ctx)
static void disable_irqs(struct cal_ctx *ctx)
{
+ u32 val;
+
/* Disable IRQ_WDMA_END 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_CLR(2),
- CAL_HL_IRQ_CLEAR,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
/* Disable IRQ_WDMA_START 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_CLR(3),
- CAL_HL_IRQ_CLEAR,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
}
@@ -1948,7 +1946,7 @@ static int cal_complete_ctx(struct cal_ctx *ctx)
vfd->lock = &ctx->mutex;
video_set_drvdata(vfd, ctx);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, video_nr);
if (ret < 0)
return ret;
@@ -2032,7 +2030,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
parent = pdev->dev.of_node;
- asd = &ctx->asd;
endpoint = &ctx->endpoint;
ep_node = NULL;
@@ -2079,8 +2076,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
ctx_dbg(3, ctx, "can't get remote parent\n");
goto cleanup_exit;
}
- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- asd->match.fwnode = of_fwnode_handle(sensor_node);
v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint);
@@ -2110,9 +2105,17 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
v4l2_async_notifier_init(&ctx->notifier);
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd)
+ goto cleanup_exit;
+
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode = of_fwnode_handle(sensor_node);
+
ret = v4l2_async_notifier_add_subdev(&ctx->notifier, asd);
if (ret) {
ctx_err(ctx, "Error adding asd\n");
+ kfree(asd);
goto cleanup_exit;
}
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 65c2c048b018..cff2fcd6d812 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -2500,7 +2500,7 @@ static void vpe_fw_cb(struct platform_device *pdev)
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
vpe_err(dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 78841b9015ce..ed0ad68c5c48 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -646,7 +646,7 @@ static int viacam_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
* requirement which will keep the CPU out of the deeper sleep
* states.
*/
- pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+ cpu_latency_qos_add_request(&cam->qos_request, 50);
viacam_start_engine(cam);
return 0;
out:
@@ -662,7 +662,7 @@ static void viacam_vb2_stop_streaming(struct vb2_queue *vq)
struct via_camera *cam = vb2_get_drv_priv(vq);
struct via_buffer *buf, *tmp;
- pm_qos_remove_request(&cam->qos_request);
+ cpu_latency_qos_remove_request(&cam->qos_request);
viacam_stop_engine(cam);
list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) {
@@ -1262,7 +1262,7 @@ static int viacam_probe(struct platform_device *pdev)
cam->vdev.lock = &cam->lock;
cam->vdev.queue = vq;
video_set_drvdata(&cam->vdev, cam);
- ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto out_irq;
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 82350097503e..30ced1c21387 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -117,15 +117,10 @@ struct vicodec_ctx {
struct vicodec_dev *dev;
bool is_enc;
bool is_stateless;
- bool is_draining;
- bool next_is_last;
- bool has_stopped;
spinlock_t *lock;
struct v4l2_ctrl_handler hdl;
- struct vb2_v4l2_buffer *last_src_buf;
-
/* Source and destination queue data */
struct vicodec_q_data q_data[2];
struct v4l2_fwht_state state;
@@ -431,11 +426,11 @@ static void device_run(void *priv)
v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
spin_lock(ctx->lock);
- if (!ctx->comp_has_next_frame && src_buf == ctx->last_src_buf) {
+ if (!ctx->comp_has_next_frame &&
+ v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src_buf)) {
dst_buf->flags |= V4L2_BUF_FLAG_LAST;
v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
- ctx->is_draining = false;
- ctx->has_stopped = true;
+ v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
}
if (ctx->is_enc || ctx->is_stateless) {
src_buf->sequence = q_src->sequence++;
@@ -586,8 +581,6 @@ static int job_ready(void *priv)
unsigned int max_to_copy;
unsigned int comp_frame_size;
- if (ctx->has_stopped)
- return 0;
if (ctx->source_changed)
return 0;
if (ctx->is_stateless || ctx->is_enc || ctx->comp_has_frame)
@@ -607,7 +600,8 @@ restart:
if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
state = get_next_header(ctx, &p, p_src + sz - p);
if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
- if (ctx->is_draining && src_buf == ctx->last_src_buf)
+ if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx,
+ src_buf))
return 1;
job_remove_src_buf(ctx, state);
goto restart;
@@ -636,7 +630,8 @@ restart:
p += copy;
ctx->comp_size += copy;
if (ctx->comp_size < max_to_copy) {
- if (ctx->is_draining && src_buf == ctx->last_src_buf)
+ if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx,
+ src_buf))
return 1;
job_remove_src_buf(ctx, state);
goto restart;
@@ -1219,41 +1214,6 @@ static int vidioc_s_selection(struct file *file, void *priv,
return 0;
}
-static int vicodec_mark_last_buf(struct vicodec_ctx *ctx)
-{
- struct vb2_v4l2_buffer *next_dst_buf;
- int ret = 0;
-
- spin_lock(ctx->lock);
- if (ctx->is_draining) {
- ret = -EBUSY;
- goto unlock;
- }
- if (ctx->has_stopped)
- goto unlock;
-
- ctx->last_src_buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
- ctx->is_draining = true;
- if (ctx->last_src_buf)
- goto unlock;
-
- next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (!next_dst_buf) {
- ctx->next_is_last = true;
- goto unlock;
- }
-
- next_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- vb2_buffer_done(&next_dst_buf->vb2_buf, VB2_BUF_STATE_DONE);
- ctx->is_draining = false;
- ctx->has_stopped = true;
- v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
-
-unlock:
- spin_unlock(ctx->lock);
- return ret;
-}
-
static int vicodec_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
@@ -1268,18 +1228,19 @@ static int vicodec_encoder_cmd(struct file *file, void *fh,
!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
- if (ec->cmd == V4L2_ENC_CMD_STOP)
- return vicodec_mark_last_buf(ctx);
- ret = 0;
- spin_lock(ctx->lock);
- if (ctx->is_draining) {
- ret = -EBUSY;
- } else if (ctx->has_stopped) {
- ctx->has_stopped = false;
+ ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, ec);
+ if (ret < 0)
+ return ret;
+
+ if (ec->cmd == V4L2_ENC_CMD_STOP &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
+
+ if (ec->cmd == V4L2_ENC_CMD_START &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
- }
- spin_unlock(ctx->lock);
- return ret;
+
+ return 0;
}
static int vicodec_decoder_cmd(struct file *file, void *fh,
@@ -1296,18 +1257,19 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
- if (dc->cmd == V4L2_DEC_CMD_STOP)
- return vicodec_mark_last_buf(ctx);
- ret = 0;
- spin_lock(ctx->lock);
- if (ctx->is_draining) {
- ret = -EBUSY;
- } else if (ctx->has_stopped) {
- ctx->has_stopped = false;
+ ret = v4l2_m2m_ioctl_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
+
+ if (dc->cmd == V4L2_DEC_CMD_START &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
- }
- spin_unlock(ctx->lock);
- return ret;
+
+ return 0;
}
static int vicodec_enum_framesizes(struct file *file, void *fh,
@@ -1480,23 +1442,21 @@ static void vicodec_buf_queue(struct vb2_buffer *vb)
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
};
- if (vb2_is_streaming(vq_cap)) {
- if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type) &&
- ctx->next_is_last) {
- unsigned int i;
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type) &&
+ vb2_is_streaming(vb->vb2_queue) &&
+ v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) {
+ unsigned int i;
- for (i = 0; i < vb->num_planes; i++)
- vb->planes[i].bytesused = 0;
- vbuf->flags = V4L2_BUF_FLAG_LAST;
- vbuf->field = V4L2_FIELD_NONE;
- vbuf->sequence = get_q_data(ctx, vb->vb2_queue->type)->sequence++;
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
- ctx->is_draining = false;
- ctx->has_stopped = true;
- ctx->next_is_last = false;
- v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
- return;
- }
+ for (i = 0; i < vb->num_planes; i++)
+ vb->planes[i].bytesused = 0;
+
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence =
+ get_q_data(ctx, vb->vb2_queue->type)->sequence++;
+
+ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, vbuf);
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
+ return;
}
/* buf_queue handles only the first source change event */
@@ -1609,8 +1569,7 @@ static int vicodec_start_streaming(struct vb2_queue *q,
chroma_div = info->width_div * info->height_div;
q_data->sequence = 0;
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- ctx->last_src_buf = NULL;
+ v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
state->gop_cnt = 0;
@@ -1689,29 +1648,12 @@ static void vicodec_stop_streaming(struct vb2_queue *q)
vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
- if (V4L2_TYPE_IS_OUTPUT(q->type)) {
- if (ctx->is_draining) {
- struct vb2_v4l2_buffer *next_dst_buf;
-
- spin_lock(ctx->lock);
- ctx->last_src_buf = NULL;
- next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (!next_dst_buf) {
- ctx->next_is_last = true;
- } else {
- next_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- vb2_buffer_done(&next_dst_buf->vb2_buf, VB2_BUF_STATE_DONE);
- ctx->is_draining = false;
- ctx->has_stopped = true;
- v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
- }
- spin_unlock(ctx->lock);
- }
- } else {
- ctx->is_draining = false;
- ctx->has_stopped = false;
- ctx->next_is_last = false;
- }
+ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
+
if (!ctx->is_enc && V4L2_TYPE_IS_OUTPUT(q->type))
ctx->first_source_change_sent = false;
@@ -2120,7 +2062,7 @@ static int register_instance(struct vicodec_dev *dev,
}
video_set_drvdata(vfd, dev);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device '%s'\n", name);
v4l2_m2m_release(dev_instance->m2m_dev);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 8d6b09623d88..ac6717fbb764 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1333,7 +1333,7 @@ static int vim2m_probe(struct platform_device *pdev)
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto error_v4l2;
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 76c015898cfd..23e740c1c5c0 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -325,20 +325,20 @@ static const struct media_entity_operations vimc_cap_mops = {
.link_validate = vimc_vdev_link_validate,
};
-static void vimc_cap_release(struct video_device *vdev)
+void vimc_cap_release(struct vimc_ent_device *ved)
{
struct vimc_cap_device *vcap =
- container_of(vdev, struct vimc_cap_device, vdev);
+ container_of(ved, struct vimc_cap_device, ved);
media_entity_cleanup(vcap->ved.ent);
kfree(vcap);
}
-void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
+void vimc_cap_unregister(struct vimc_ent_device *ved)
{
- struct vimc_cap_device *vcap;
+ struct vimc_cap_device *vcap =
+ container_of(ved, struct vimc_cap_device, ved);
- vcap = container_of(ved, struct vimc_cap_device, ved);
vb2_queue_release(&vcap->queue);
video_unregister_device(&vcap->vdev);
}
@@ -423,7 +423,7 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
ret = vb2_queue_init(q);
if (ret) {
- dev_err(&vimc->pdev.dev, "%s: vb2 queue init failed (err=%d)\n",
+ dev_err(vimc->mdev.dev, "%s: vb2 queue init failed (err=%d)\n",
vcfg_name, ret);
goto err_clean_m_ent;
}
@@ -443,13 +443,13 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
vcap->ved.ent = &vcap->vdev.entity;
vcap->ved.process_frame = vimc_cap_process_frame;
vcap->ved.vdev_get_format = vimc_cap_get_format;
- vcap->ved.dev = &vimc->pdev.dev;
+ vcap->ved.dev = vimc->mdev.dev;
/* Initialize the video_device struct */
vdev = &vcap->vdev;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
vdev->entity.ops = &vimc_cap_mops;
- vdev->release = vimc_cap_release;
+ vdev->release = video_device_release_empty;
vdev->fops = &vimc_cap_fops;
vdev->ioctl_ops = &vimc_cap_ioctl_ops;
vdev->lock = &vcap->lock;
@@ -460,9 +460,9 @@ struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
video_set_drvdata(vdev, &vcap->ved);
/* Register the video_device with the v4l2 and the media framework */
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
- dev_err(&vimc->pdev.dev, "%s: video register failed (err=%d)\n",
+ dev_err(vimc->mdev.dev, "%s: video register failed (err=%d)\n",
vcap->vdev.name, ret);
goto err_release_queue;
}
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 16ce9f3b7c75..c95c17c048f2 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -327,7 +327,6 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
u32 function,
u16 num_pads,
struct media_pad *pads,
- const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops)
{
int ret;
@@ -337,7 +336,6 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
/* Initialize the subdev */
v4l2_subdev_init(sd, sd_ops);
- sd->internal_ops = sd_int_ops;
sd->entity.function = function;
sd->entity.ops = &vimc_ent_sd_mops;
sd->owner = THIS_MODULE;
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 87eb8259c2a8..616d5a6b0754 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -106,14 +106,12 @@ struct vimc_ent_device {
/**
* struct vimc_device - main device for vimc driver
*
- * @pdev pointer to the platform device
* @pipe_cfg pointer to the vimc pipeline configuration structure
* @ent_devs array of vimc_ent_device pointers
* @mdev the associated media_device parent
* @v4l2_dev Internal v4l2 parent device
*/
struct vimc_device {
- struct platform_device pdev;
const struct vimc_pipeline_config *pipe_cfg;
struct vimc_ent_device **ent_devs;
struct media_device mdev;
@@ -127,16 +125,18 @@ struct vimc_device {
* @name entity name
* @ved pointer to vimc_ent_device (a node in the
* topology)
- * @add subdev add hook - initializes and registers
- * subdev called from vimc-core
- * @rm subdev rm hook - unregisters and frees
- * subdev called from vimc-core
+ * @add initializes and registers
+ * vim entity - called from vimc-core
+ * @unregister unregisters vimc entity - called from vimc-core
+ * @release releases vimc entity - called from the v4l2_dev
+ * release callback
*/
struct vimc_ent_config {
const char *name;
struct vimc_ent_device *(*add)(struct vimc_device *vimc,
const char *vcfg_name);
- void (*rm)(struct vimc_device *vimc, struct vimc_ent_device *ved);
+ void (*unregister)(struct vimc_ent_device *ved);
+ void (*release)(struct vimc_ent_device *ved);
};
/**
@@ -147,22 +147,23 @@ struct vimc_ent_config {
*/
bool vimc_is_source(struct media_entity *ent);
-/* prototypes for vimc_ent_config add and rm hooks */
+/* prototypes for vimc_ent_config hooks */
struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
const char *vcfg_name);
-void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+void vimc_cap_unregister(struct vimc_ent_device *ved);
+void vimc_cap_release(struct vimc_ent_device *ved);
struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
const char *vcfg_name);
-void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+void vimc_deb_release(struct vimc_ent_device *ved);
struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
const char *vcfg_name);
-void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+void vimc_sca_release(struct vimc_ent_device *ved);
struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
const char *vcfg_name);
-void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+void vimc_sen_release(struct vimc_ent_device *ved);
/**
* vimc_pix_map_by_index - get vimc_pix_map struct by its index
@@ -197,7 +198,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
* @num_pads: number of pads to initialize
* @pads: the array of pads of the entity, the caller should set the
flags of the pads
- * @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops
* @sd_ops: pointer to &struct v4l2_subdev_ops.
*
* Helper function initialize and register the struct vimc_ent_device and struct
@@ -210,7 +210,6 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
u32 function,
u16 num_pads,
struct media_pad *pads,
- const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops);
/**
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 97a272f3350a..339126e565dc 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -48,48 +48,51 @@ static struct vimc_ent_config ent_config[] = {
{
.name = "Sensor A",
.add = vimc_sen_add,
- .rm = vimc_sen_rm,
+ .release = vimc_sen_release,
},
{
.name = "Sensor B",
.add = vimc_sen_add,
- .rm = vimc_sen_rm,
+ .release = vimc_sen_release,
},
{
.name = "Debayer A",
.add = vimc_deb_add,
- .rm = vimc_deb_rm,
+ .release = vimc_deb_release,
},
{
.name = "Debayer B",
.add = vimc_deb_add,
- .rm = vimc_deb_rm,
+ .release = vimc_deb_release,
},
{
.name = "Raw Capture 0",
.add = vimc_cap_add,
- .rm = vimc_cap_rm,
+ .unregister = vimc_cap_unregister,
+ .release = vimc_cap_release,
},
{
.name = "Raw Capture 1",
.add = vimc_cap_add,
- .rm = vimc_cap_rm,
+ .unregister = vimc_cap_unregister,
+ .release = vimc_cap_release,
},
{
/* TODO: change this to vimc-input when it is implemented */
.name = "RGB/YUV Input",
.add = vimc_sen_add,
- .rm = vimc_sen_rm,
+ .release = vimc_sen_release,
},
{
.name = "Scaler",
.add = vimc_sca_add,
- .rm = vimc_sca_rm,
+ .release = vimc_sca_release,
},
{
.name = "RGB/YUV Capture",
.add = vimc_cap_add,
- .rm = vimc_cap_rm,
+ .unregister = vimc_cap_unregister,
+ .release = vimc_cap_release,
},
};
@@ -162,12 +165,12 @@ static int vimc_add_subdevs(struct vimc_device *vimc)
unsigned int i;
for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- dev_dbg(&vimc->pdev.dev, "new entity for %s\n",
+ dev_dbg(vimc->mdev.dev, "new entity for %s\n",
vimc->pipe_cfg->ents[i].name);
vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].add(vimc,
vimc->pipe_cfg->ents[i].name);
if (!vimc->ent_devs[i]) {
- dev_err(&vimc->pdev.dev, "add new entity for %s\n",
+ dev_err(vimc->mdev.dev, "add new entity for %s\n",
vimc->pipe_cfg->ents[i].name);
return -EINVAL;
}
@@ -175,13 +178,33 @@ static int vimc_add_subdevs(struct vimc_device *vimc)
return 0;
}
-static void vimc_rm_subdevs(struct vimc_device *vimc)
+static void vimc_release_subdevs(struct vimc_device *vimc)
{
unsigned int i;
for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
if (vimc->ent_devs[i])
- vimc->pipe_cfg->ents[i].rm(vimc, vimc->ent_devs[i]);
+ vimc->pipe_cfg->ents[i].release(vimc->ent_devs[i]);
+}
+
+static void vimc_unregister_subdevs(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ if (vimc->ent_devs[i] && vimc->pipe_cfg->ents[i].unregister)
+ vimc->pipe_cfg->ents[i].unregister(vimc->ent_devs[i]);
+}
+
+static void vimc_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct vimc_device *vimc =
+ container_of(v4l2_dev, struct vimc_device, v4l2_dev);
+
+ vimc_release_subdevs(vimc);
+ media_device_cleanup(&vimc->mdev);
+ kfree(vimc->ent_devs);
+ kfree(vimc);
}
static int vimc_register_devices(struct vimc_device *vimc)
@@ -195,7 +218,6 @@ static int vimc_register_devices(struct vimc_device *vimc)
"v4l2 device register failed (err=%d)\n", ret);
return ret;
}
-
/* allocate ent_devs */
vimc->ent_devs = kcalloc(vimc->pipe_cfg->num_ents,
sizeof(*vimc->ent_devs), GFP_KERNEL);
@@ -236,9 +258,9 @@ static int vimc_register_devices(struct vimc_device *vimc)
err_mdev_unregister:
media_device_unregister(&vimc->mdev);
- media_device_cleanup(&vimc->mdev);
err_rm_subdevs:
- vimc_rm_subdevs(vimc);
+ vimc_unregister_subdevs(vimc);
+ vimc_release_subdevs(vimc);
kfree(vimc->ent_devs);
err_v4l2_unregister:
v4l2_device_unregister(&vimc->v4l2_dev);
@@ -248,20 +270,23 @@ err_v4l2_unregister:
static void vimc_unregister(struct vimc_device *vimc)
{
+ vimc_unregister_subdevs(vimc);
media_device_unregister(&vimc->mdev);
- media_device_cleanup(&vimc->mdev);
v4l2_device_unregister(&vimc->v4l2_dev);
- kfree(vimc->ent_devs);
}
static int vimc_probe(struct platform_device *pdev)
{
- struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
+ struct vimc_device *vimc;
int ret;
dev_dbg(&pdev->dev, "probe");
- memset(&vimc->mdev, 0, sizeof(vimc->mdev));
+ vimc = kzalloc(sizeof(*vimc), GFP_KERNEL);
+ if (!vimc)
+ return -ENOMEM;
+
+ vimc->pipe_cfg = &pipe_cfg;
/* Link the media device within the v4l2_device */
vimc->v4l2_dev.mdev = &vimc->mdev;
@@ -277,20 +302,27 @@ static int vimc_probe(struct platform_device *pdev)
ret = vimc_register_devices(vimc);
if (ret) {
media_device_cleanup(&vimc->mdev);
+ kfree(vimc);
return ret;
}
+ /*
+ * the release cb is set only after successful registration.
+ * if the registration fails, we release directly from probe
+ */
+ vimc->v4l2_dev.release = vimc_v4l2_dev_release;
+ platform_set_drvdata(pdev, vimc);
return 0;
}
static int vimc_remove(struct platform_device *pdev)
{
- struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
+ struct vimc_device *vimc = platform_get_drvdata(pdev);
dev_dbg(&pdev->dev, "remove");
- vimc_rm_subdevs(vimc);
vimc_unregister(vimc);
+ v4l2_device_put(&vimc->v4l2_dev);
return 0;
}
@@ -299,12 +331,9 @@ static void vimc_dev_release(struct device *dev)
{
}
-static struct vimc_device vimc_dev = {
- .pipe_cfg = &pipe_cfg,
- .pdev = {
- .name = VIMC_PDEV_NAME,
- .dev.release = vimc_dev_release,
- }
+static struct platform_device vimc_pdev = {
+ .name = VIMC_PDEV_NAME,
+ .dev.release = vimc_dev_release,
};
static struct platform_driver vimc_pdrv = {
@@ -319,16 +348,16 @@ static int __init vimc_init(void)
{
int ret;
- ret = platform_device_register(&vimc_dev.pdev);
+ ret = platform_device_register(&vimc_pdev);
if (ret) {
- dev_err(&vimc_dev.pdev.dev,
+ dev_err(&vimc_pdev.dev,
"platform device registration failed (err=%d)\n", ret);
return ret;
}
ret = platform_driver_register(&vimc_pdrv);
if (ret) {
- dev_err(&vimc_dev.pdev.dev,
+ dev_err(&vimc_pdev.dev,
"platform driver registration failed (err=%d)\n", ret);
platform_driver_unregister(&vimc_pdrv);
return ret;
@@ -341,7 +370,7 @@ static void __exit vimc_exit(void)
{
platform_driver_unregister(&vimc_pdrv);
- platform_device_unregister(&vimc_dev.pdev);
+ platform_device_unregister(&vimc_pdev);
}
module_init(vimc_init);
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 5d1b67d684bb..baf6bf9f65b5 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -494,28 +494,16 @@ static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = {
.s_ctrl = vimc_deb_s_ctrl,
};
-static void vimc_deb_release(struct v4l2_subdev *sd)
+void vimc_deb_release(struct vimc_ent_device *ved)
{
struct vimc_deb_device *vdeb =
- container_of(sd, struct vimc_deb_device, sd);
+ container_of(ved, struct vimc_deb_device, ved);
v4l2_ctrl_handler_free(&vdeb->hdl);
media_entity_cleanup(vdeb->ved.ent);
kfree(vdeb);
}
-static const struct v4l2_subdev_internal_ops vimc_deb_int_ops = {
- .release = vimc_deb_release,
-};
-
-void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
-{
- struct vimc_deb_device *vdeb;
-
- vdeb = container_of(ved, struct vimc_deb_device, ved);
- v4l2_device_unregister_subdev(&vdeb->sd);
-}
-
static const struct v4l2_ctrl_config vimc_deb_ctrl_class = {
.flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
.id = VIMC_CID_VIMC_CLASS,
@@ -563,13 +551,12 @@ struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
- vdeb->pads,
- &vimc_deb_int_ops, &vimc_deb_ops);
+ vdeb->pads, &vimc_deb_ops);
if (ret)
goto err_free_hdl;
vdeb->ved.process_frame = vimc_deb_process_frame;
- vdeb->ved.dev = &vimc->pdev.dev;
+ vdeb->ved.dev = vimc->mdev.dev;
vdeb->mean_win_size = vimc_deb_ctrl_mean_win_size.def;
/* Initialize the frame format */
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index e2e551bc3ded..7521439747c5 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -464,27 +464,15 @@ static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
return vsca->src_frame;
};
-static void vimc_sca_release(struct v4l2_subdev *sd)
+void vimc_sca_release(struct vimc_ent_device *ved)
{
struct vimc_sca_device *vsca =
- container_of(sd, struct vimc_sca_device, sd);
+ container_of(ved, struct vimc_sca_device, ved);
media_entity_cleanup(vsca->ved.ent);
kfree(vsca);
}
-static const struct v4l2_subdev_internal_ops vimc_sca_int_ops = {
- .release = vimc_sca_release,
-};
-
-void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
-{
- struct vimc_sca_device *vsca;
-
- vsca = container_of(ved, struct vimc_sca_device, ved);
- v4l2_device_unregister_subdev(&vsca->sd);
-}
-
struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
const char *vcfg_name)
{
@@ -504,15 +492,14 @@ struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
- vsca->pads,
- &vimc_sca_int_ops, &vimc_sca_ops);
+ vsca->pads, &vimc_sca_ops);
if (ret) {
kfree(vsca);
return NULL;
}
vsca->ved.process_frame = vimc_sca_process_frame;
- vsca->ved.dev = &vimc->pdev.dev;
+ vsca->ved.dev = vimc->mdev.dev;
/* Initialize the frame format */
vsca->sink_fmt = sink_fmt_default;
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 32380f504591..92daee58209e 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -279,10 +279,10 @@ static const struct v4l2_ctrl_ops vimc_sen_ctrl_ops = {
.s_ctrl = vimc_sen_s_ctrl,
};
-static void vimc_sen_release(struct v4l2_subdev *sd)
+void vimc_sen_release(struct vimc_ent_device *ved)
{
struct vimc_sen_device *vsen =
- container_of(sd, struct vimc_sen_device, sd);
+ container_of(ved, struct vimc_sen_device, ved);
v4l2_ctrl_handler_free(&vsen->hdl);
tpg_free(&vsen->tpg);
@@ -290,18 +290,6 @@ static void vimc_sen_release(struct v4l2_subdev *sd)
kfree(vsen);
}
-static const struct v4l2_subdev_internal_ops vimc_sen_int_ops = {
- .release = vimc_sen_release,
-};
-
-void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
-{
- struct vimc_sen_device *vsen;
-
- vsen = container_of(ved, struct vimc_sen_device, ved);
- v4l2_device_unregister_subdev(&vsen->sd);
-}
-
/* Image Processing Controls */
static const struct v4l2_ctrl_config vimc_sen_ctrl_class = {
.flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
@@ -365,12 +353,12 @@ struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
vcfg_name,
MEDIA_ENT_F_CAM_SENSOR, 1, &vsen->pad,
- &vimc_sen_int_ops, &vimc_sen_ops);
+ &vimc_sen_ops);
if (ret)
goto err_free_tpg;
vsen->ved.process_frame = vimc_sen_process_frame;
- vsen->ved.dev = &vimc->pdev.dev;
+ vsen->ved.dev = vimc->mdev.dev;
/* Initialize the frame format */
vsen->mbus_format = fmt_default;
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index cd6b55433c9e..65feb3c596db 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -207,16 +207,27 @@ int vimc_streamer_s_stream(struct vimc_stream *stream,
stream->kthread = kthread_run(vimc_streamer_thread, stream,
"vimc-streamer thread");
- if (IS_ERR(stream->kthread))
- return PTR_ERR(stream->kthread);
+ if (IS_ERR(stream->kthread)) {
+ ret = PTR_ERR(stream->kthread);
+ dev_err(ved->dev, "kthread_run failed with %d\n", ret);
+ vimc_streamer_pipeline_terminate(stream);
+ stream->kthread = NULL;
+ return ret;
+ }
} else {
if (!stream->kthread)
return 0;
ret = kthread_stop(stream->kthread);
+ /*
+ * kthread_stop returns -EINTR in cases when streamon was
+ * immediately followed by streamoff, and the thread didn't had
+ * a chance to run. Ignore errors to stop the stream in the
+ * pipeline.
+ */
if (ret)
- return ret;
+ dev_dbg(ved->dev, "kthread_stop returned '%d'\n", ret);
stream->kthread = NULL;
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 15091cbf6de7..6c740e3e6999 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -407,7 +407,7 @@ static int vidioc_log_status(struct file *file, void *fh)
struct video_device *vdev = video_devdata(file);
v4l2_ctrl_log_status(file, fh);
- if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_GRABBER)
+ if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_VIDEO)
tpg_log_status(&dev->tpg);
return 0;
}
@@ -1525,7 +1525,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
}
#endif
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_cap_nr[inst]);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_cap_nr[inst]);
if (ret < 0)
goto unreg_dev;
v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n",
@@ -1571,14 +1571,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
}
v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n",
dev_name(&dev->cec_tx_adap[i]->devnode.dev), i);
- if (i <= out_type_counter[HDMI])
- cec_s_phys_addr(dev->cec_tx_adap[i], i << 12, false);
+ if (i < out_type_counter[HDMI])
+ cec_s_phys_addr(dev->cec_tx_adap[i], (i + 1) << 12, false);
else
cec_s_phys_addr(dev->cec_tx_adap[i], 0x1000, false);
}
#endif
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_out_nr[inst]);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_out_nr[inst]);
if (ret < 0)
goto unreg_dev;
v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s\n",
@@ -1734,7 +1734,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret)
goto unreg_dev;
#endif
- ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO,
meta_cap_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1764,7 +1764,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret)
goto unreg_dev;
#endif
- ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO,
meta_out_nr[inst]);
if (ret < 0)
goto unreg_dev;
diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/vsp1/vsp1_histo.c
index 30d751f2cccf..a91e142bcb94 100644
--- a/drivers/media/platform/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/vsp1/vsp1_histo.c
@@ -551,7 +551,7 @@ int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
histo->video.fops = &histo_v4l2_fops;
snprintf(histo->video.name, sizeof(histo->video.name),
"%s histo", histo->entity.subdev.name);
- histo->video.vfl_type = VFL_TYPE_GRABBER;
+ histo->video.vfl_type = VFL_TYPE_VIDEO;
histo->video.release = video_device_release_empty;
histo->video.ioctl_ops = &histo_v4l2_ioctl_ops;
histo->video.device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
@@ -576,7 +576,7 @@ int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
/* ... and register the video device. */
histo->video.queue = &histo->queue;
- ret = video_register_device(&histo->video, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&histo->video, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(vsp1->dev, "failed to register video device\n");
goto error;
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 5c67ff92d97a..fe3130db1fa2 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -706,7 +706,7 @@
#define VI6_HGT_HUE_AREA_LOWER_SHIFT 16
#define VI6_HGT_HUE_AREA_UPPER_SHIFT 0
#define VI6_HGT_LB_TH 0x3424
-#define VI6_HGT_LBn_H(n) (0x3438 + (n) * 8)
+#define VI6_HGT_LBn_H(n) (0x3428 + (n) * 8)
#define VI6_HGT_LBn_V(n) (0x342c + (n) * 8)
#define VI6_HGT_HISTO(m, n) (0x3450 + (m) * 128 + (n) * 4)
#define VI6_HGT_MAXMIN 0x3750
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 5e59ed2c3614..044eb5778820 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1293,7 +1293,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
video->video.fops = &vsp1_video_fops;
snprintf(video->video.name, sizeof(video->video.name), "%s %s",
rwpf->entity.subdev.name, direction);
- video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.vfl_type = VFL_TYPE_VIDEO;
video->video.release = video_device_release_empty;
video->video.ioctl_ops = &vsp1_video_ioctl_ops;
@@ -1316,7 +1316,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
/* ... and register the video device. */
video->video.queue = &video->queue;
- ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(video->vsp1->dev, "failed to register video device\n");
goto error;
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index b211380a11f2..2a56201cb853 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -685,7 +685,7 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
xdev->dev->of_node,
type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
port);
- dma->video.vfl_type = VFL_TYPE_GRABBER;
+ dma->video.vfl_type = VFL_TYPE_VIDEO;
dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
? VFL_DIR_RX : VFL_DIR_TX;
dma->video.release = video_device_release_empty;
@@ -725,16 +725,17 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
/* ... and the DMA channel. */
snprintf(name, sizeof(name), "port%u", port);
- dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
- if (dma->dma == NULL) {
- dev_err(dma->xdev->dev, "no VDMA channel found\n");
- ret = -ENODEV;
+ dma->dma = dma_request_chan(dma->xdev->dev, name);
+ if (IS_ERR(dma->dma)) {
+ ret = PTR_ERR(dma->dma);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dma->xdev->dev, "no VDMA channel found\n");
goto error;
}
dma->align = 1 << dma->dma->device->copy_align;
- ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(dma->xdev->dev, "failed to register video device\n");
goto error;
@@ -752,7 +753,7 @@ void xvip_dma_cleanup(struct xvip_dma *dma)
if (video_is_registered(&dma->video))
video_unregister_device(&dma->video);
- if (dma->dma)
+ if (!IS_ERR_OR_NULL(dma->dma))
dma_release_channel(dma->dma);
media_entity_cleanup(&dma->video.entity);
diff --git a/drivers/media/radio/si470x/Kconfig b/drivers/media/radio/si470x/Kconfig
index 537f8e1601f3..a1ba8bc54b62 100644
--- a/drivers/media/radio/si470x/Kconfig
+++ b/drivers/media/radio/si470x/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config RADIO_SI470X
- tristate "Silicon Labs Si470x FM Radio Receiver support"
- depends on VIDEO_V4L2
+ tristate "Silicon Labs Si470x FM Radio Receiver support"
+ depends on VIDEO_V4L2
help
This is a driver for devices with the Silicon Labs SI470x
chip (either via USB or I2C buses).
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 0a0ce620e4a2..0f3417d161b8 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -35,11 +35,6 @@ static const struct bpf_func_proto rc_repeat_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-/*
- * Currently rc-core does not support 64-bit scancodes, but there are many
- * known protocols with more than 32 bits. So, define the interface as u64
- * as a future-proof.
- */
BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode,
u32, toggle)
{
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index a7deca1fefb7..3c8bd13d029a 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -76,7 +76,7 @@ struct send_packet {
uint8_t channels;
uint8_t busy7;
uint8_t busy4;
- uint8_t payload[0];
+ uint8_t payload[];
};
static void process_ir_data(struct iguanair *ir, unsigned len)
diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c
index 74a1d30fae6e..4c3d03876200 100644
--- a/drivers/media/rc/ir-xmp-decoder.c
+++ b/drivers/media/rc/ir-xmp-decoder.c
@@ -166,7 +166,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev)
} else if (geq_margin(ev.duration, XMP_NIBBLE_PREFIX, XMP_UNIT)) {
/* store nibble raw data, decode after trailer */
if (data->count == 16) {
- dev_dbg(&dev->dev, "to many pulses (%d) ignoring: %u\n",
+ dev_dbg(&dev->dev, "too many pulses (%d) ignoring: %u\n",
data->count, ev.duration);
data->state = STATE_INACTIVE;
return -EINVAL;
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 63261ef6380a..aaa1bf81d00d 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-videomate-m1f.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
+ rc-videostrong-kii-pro.o \
rc-wetek-hub.o \
rc-wetek-play2.o \
rc-winfast.o \
diff --git a/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
new file mode 100644
index 000000000000..414d4d231e7e
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Mohammad Rasim <mohammad.rasim96@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the Videostrong KII Pro STB remote control
+//
+
+static struct rc_map_table kii_pro[] = {
+ { 0x59, KEY_POWER },
+ { 0x19, KEY_MUTE },
+ { 0x42, KEY_RED },
+ { 0x40, KEY_GREEN },
+ { 0x00, KEY_YELLOW },
+ { 0x03, KEY_BLUE },
+ { 0x4a, KEY_BACK },
+ { 0x48, KEY_FORWARD },
+ { 0x08, KEY_PREVIOUSSONG},
+ { 0x0b, KEY_NEXTSONG},
+ { 0x46, KEY_PLAYPAUSE },
+ { 0x44, KEY_STOP },
+ { 0x1f, KEY_FAVORITES}, //KEY_F5?
+ { 0x04, KEY_PVR },
+ { 0x4d, KEY_EPG },
+ { 0x02, KEY_INFO },
+ { 0x09, KEY_SUBTITLE },
+ { 0x01, KEY_AUDIO },
+ { 0x0d, KEY_HOMEPAGE },
+ { 0x11, KEY_TV }, // DTV ?
+ { 0x06, KEY_UP },
+ { 0x5a, KEY_LEFT },
+ { 0x1a, KEY_ENTER }, // KEY_OK ?
+ { 0x1b, KEY_RIGHT },
+ { 0x16, KEY_DOWN },
+ { 0x45, KEY_MENU },
+ { 0x05, KEY_ESC },
+ { 0x13, KEY_VOLUMEUP },
+ { 0x17, KEY_VOLUMEDOWN },
+ { 0x58, KEY_APPSELECT },
+ { 0x12, KEY_VENDOR }, // mouse
+ { 0x55, KEY_PAGEUP }, // KEY_CHANNELUP ?
+ { 0x15, KEY_PAGEDOWN }, // KEY_CHANNELDOWN ?
+ { 0x52, KEY_1 },
+ { 0x50, KEY_2 },
+ { 0x10, KEY_3 },
+ { 0x56, KEY_4 },
+ { 0x54, KEY_5 },
+ { 0x14, KEY_6 },
+ { 0x4e, KEY_7 },
+ { 0x4c, KEY_8 },
+ { 0x0c, KEY_9 },
+ { 0x18, KEY_WWW }, // KEY_F7
+ { 0x0f, KEY_0 },
+ { 0x51, KEY_BACKSPACE },
+};
+
+static struct rc_map_list kii_pro_map = {
+ .map = {
+ .scan = kii_pro,
+ .size = ARRAY_SIZE(kii_pro),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_KII_PRO,
+ }
+};
+
+static int __init init_rc_map_kii_pro(void)
+{
+ return rc_map_register(&kii_pro_map);
+}
+
+static void __exit exit_rc_map_kii_pro(void)
+{
+ rc_map_unregister(&kii_pro_map);
+}
+
+module_init(init_rc_map_kii_pro)
+module_exit(exit_rc_map_kii_pro)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mohammad Rasim <mohammad.rasim96@gmail.com>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 9a8c1cf54ac4..583e4f32a0da 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -269,12 +269,7 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
goto out_unlock;
}
- /*
- * The scancode field in lirc_scancode is 64-bit simply
- * to future-proof it, since there are IR protocols encode
- * use more than 32 bits. For now only 32-bit protocols
- * are supported.
- */
+ /* We only have encoders for 32-bit protocols. */
if (scan.scancode > U32_MAX ||
!rc_validate_scancode(scan.rc_proto, scan.scancode)) {
ret = -EINVAL;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 5c2cd8d2d155..48a69bf23236 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -230,10 +230,10 @@ static ssize_t wakeup_data_show(struct device *dev,
for (i = 0; i < fifo_len; i++) {
duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
- buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
+ buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len,
"%d ", duration);
}
- buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
+ buf_len += scnprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
spin_unlock_irqrestore(&nvt->lock, flags);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 6f80c251f641..d7064d664d52 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -164,6 +164,41 @@ static struct rc_map_list empty_map = {
};
/**
+ * scancode_to_u64() - converts scancode in &struct input_keymap_entry
+ * @ke: keymap entry containing scancode to be converted.
+ * @scancode: pointer to the location where converted scancode should
+ * be stored.
+ *
+ * This function is a version of input_scancode_to_scalar specialized for
+ * rc-core.
+ */
+static int scancode_to_u64(const struct input_keymap_entry *ke, u64 *scancode)
+{
+ switch (ke->len) {
+ case 1:
+ *scancode = *((u8 *)ke->scancode);
+ break;
+
+ case 2:
+ *scancode = *((u16 *)ke->scancode);
+ break;
+
+ case 4:
+ *scancode = *((u32 *)ke->scancode);
+ break;
+
+ case 8:
+ *scancode = *((u64 *)ke->scancode);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* ir_create_table() - initializes a scancode table
* @dev: the rc_dev device
* @rc_map: the rc_map to initialize
@@ -285,13 +320,13 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
/* Did the user wish to remove the mapping? */
if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
- dev_dbg(&dev->dev, "#%d: Deleting scan 0x%04x\n",
+ dev_dbg(&dev->dev, "#%d: Deleting scan 0x%04llx\n",
index, rc_map->scan[index].scancode);
rc_map->len--;
memmove(&rc_map->scan[index], &rc_map->scan[index+ 1],
(rc_map->len - index) * sizeof(struct rc_map_table));
} else {
- dev_dbg(&dev->dev, "#%d: %s scan 0x%04x with key 0x%04x\n",
+ dev_dbg(&dev->dev, "#%d: %s scan 0x%04llx with key 0x%04x\n",
index,
old_keycode == KEY_RESERVED ? "New" : "Replacing",
rc_map->scan[index].scancode, new_keycode);
@@ -334,8 +369,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
*/
static unsigned int ir_establish_scancode(struct rc_dev *dev,
struct rc_map *rc_map,
- unsigned int scancode,
- bool resize)
+ u64 scancode, bool resize)
{
unsigned int i;
@@ -394,7 +428,7 @@ static int ir_setkeycode(struct input_dev *idev,
struct rc_dev *rdev = input_get_drvdata(idev);
struct rc_map *rc_map = &rdev->rc_map;
unsigned int index;
- unsigned int scancode;
+ u64 scancode;
int retval = 0;
unsigned long flags;
@@ -407,7 +441,7 @@ static int ir_setkeycode(struct input_dev *idev,
goto out;
}
} else {
- retval = input_scancode_to_scalar(ke, &scancode);
+ retval = scancode_to_u64(ke, &scancode);
if (retval)
goto out;
@@ -434,8 +468,7 @@ out:
*
* return: -ENOMEM if all keycodes could not be inserted, otherwise zero.
*/
-static int ir_setkeytable(struct rc_dev *dev,
- const struct rc_map *from)
+static int ir_setkeytable(struct rc_dev *dev, const struct rc_map *from)
{
struct rc_map *rc_map = &dev->rc_map;
unsigned int i, index;
@@ -466,7 +499,7 @@ static int ir_setkeytable(struct rc_dev *dev,
static int rc_map_cmp(const void *key, const void *elt)
{
- const unsigned int *scancode = key;
+ const u64 *scancode = key;
const struct rc_map_table *e = elt;
if (*scancode < e->scancode)
@@ -487,7 +520,7 @@ static int rc_map_cmp(const void *key, const void *elt)
* return: index in the table, -1U if not found
*/
static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
- unsigned int scancode)
+ u64 scancode)
{
struct rc_map_table *res;
@@ -516,7 +549,7 @@ static int ir_getkeycode(struct input_dev *idev,
struct rc_map_table *entry;
unsigned long flags;
unsigned int index;
- unsigned int scancode;
+ u64 scancode;
int retval;
spin_lock_irqsave(&rc_map->lock, flags);
@@ -524,7 +557,7 @@ static int ir_getkeycode(struct input_dev *idev,
if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
index = ke->index;
} else {
- retval = input_scancode_to_scalar(ke, &scancode);
+ retval = scancode_to_u64(ke, &scancode);
if (retval)
goto out;
@@ -538,7 +571,6 @@ static int ir_getkeycode(struct input_dev *idev,
ke->keycode = entry->keycode;
ke->len = sizeof(entry->scancode);
memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
-
} else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
/*
* We do not really know the valid range of scancodes
@@ -570,7 +602,7 @@ out:
*
* return: the corresponding keycode, or KEY_RESERVED
*/
-u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode)
+u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode)
{
struct rc_map *rc_map = &dev->rc_map;
unsigned int keycode;
@@ -586,7 +618,7 @@ u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode)
spin_unlock_irqrestore(&rc_map->lock, flags);
if (keycode != KEY_RESERVED)
- dev_dbg(&dev->dev, "%s: scancode 0x%04x keycode 0x%02x\n",
+ dev_dbg(&dev->dev, "%s: scancode 0x%04llx keycode 0x%02x\n",
dev->device_name, scancode, keycode);
return keycode;
@@ -719,8 +751,11 @@ void rc_repeat(struct rc_dev *dev)
spin_lock_irqsave(&dev->keylock, flags);
- input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode);
- input_sync(dev->input_dev);
+ if (dev->last_scancode <= U32_MAX) {
+ input_event(dev->input_dev, EV_MSC, MSC_SCAN,
+ dev->last_scancode);
+ input_sync(dev->input_dev);
+ }
if (dev->keypressed) {
dev->keyup_jiffies = jiffies + timeout;
@@ -743,7 +778,7 @@ EXPORT_SYMBOL_GPL(rc_repeat);
* called with keylock held.
*/
static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
- u32 scancode, u32 keycode, u8 toggle)
+ u64 scancode, u32 keycode, u8 toggle)
{
bool new_event = (!dev->keypressed ||
dev->last_protocol != protocol ||
@@ -761,7 +796,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
if (new_event && dev->keypressed)
ir_do_keyup(dev, false);
- input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
+ if (scancode <= U32_MAX)
+ input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
dev->last_protocol = protocol;
dev->last_scancode = scancode;
@@ -772,7 +808,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
/* Register a keypress */
dev->keypressed = true;
- dev_dbg(&dev->dev, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08x\n",
+ dev_dbg(&dev->dev, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08llx\n",
dev->device_name, keycode, protocol, scancode);
input_report_key(dev->input_dev, keycode, 1);
@@ -809,7 +845,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
* This routine is used to signal that a key has been pressed on the
* remote control.
*/
-void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
+void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
u8 toggle)
{
unsigned long flags;
@@ -840,7 +876,7 @@ EXPORT_SYMBOL_GPL(rc_keydown);
* remote control. The driver must manually call rc_keyup() at a later stage.
*/
void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol,
- u32 scancode, u8 toggle)
+ u64 scancode, u8 toggle)
{
unsigned long flags;
u32 keycode = rc_g_keycode_from_table(dev, scancode);
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
index d789d82df7c4..f86ef1ca1288 100644
--- a/drivers/media/spi/gs1662.c
+++ b/drivers/media/spi/gs1662.c
@@ -147,11 +147,17 @@ static int gs_read_register(struct spi_device *spi, u16 addr, u16 *value)
{
.tx_buf = &buf_addr,
.len = 2,
- .delay_usecs = 1,
+ .delay = {
+ .value = 1,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
}, {
.rx_buf = &buf_value,
.len = 2,
- .delay_usecs = 1,
+ .delay = {
+ .value = 1,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
},
};
@@ -175,11 +181,17 @@ static int gs_write_register(struct spi_device *spi, u16 addr, u16 value)
{
.tx_buf = &buf_addr,
.len = 2,
- .delay_usecs = 1,
+ .delay = {
+ .value = 1,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
}, {
.tx_buf = &buf_value,
.len = 2,
- .delay_usecs = 1,
+ .delay = {
+ .value = 1,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
},
};
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 03c2944f6273..e678d3d11467 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -25,7 +25,6 @@ if MEDIA_ANALOG_TV_SUPPORT
comment "Analog TV USB devices"
source "drivers/media/usb/pvrusb2/Kconfig"
source "drivers/media/usb/hdpvr/Kconfig"
-source "drivers/media/usb/usbvision/Kconfig"
source "drivers/media/usb/stk1160/Kconfig"
source "drivers/media/usb/go7007/Kconfig"
endif
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 21e46b10caa5..169aa07c97bd 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -17,7 +17,6 @@ obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
-obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
obj-$(CONFIG_VIDEO_STK1160) += stk1160/
obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
obj-$(CONFIG_VIDEO_TM6000) += tm6000/
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index d1895334cbbf..51b8d14fb4dc 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1042,7 +1042,7 @@ static int au0828_v4l2_close(struct file *filp)
dev->streaming_users, dev->users);
mutex_lock(&dev->lock);
- if (vdev->vfl_type == VFL_TYPE_GRABBER && dev->vid_timeout_running) {
+ if (vdev->vfl_type == VFL_TYPE_VIDEO && dev->vid_timeout_running) {
/* Cancel timeout thread in case they didn't call streamoff */
dev->vid_timeout_running = 0;
del_timer_sync(&dev->vid_timeout);
@@ -2007,7 +2007,7 @@ int au0828_analog_register(struct au0828_dev *dev,
/* Register the v4l2 device */
video_set_drvdata(&dev->vdev, dev);
- retval = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
+ retval = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1);
if (retval != 0) {
dprintk(1, "unable to register video device (error = %d).\n",
retval);
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 039963a7765b..198ddfb8d2b1 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -511,6 +511,9 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
return ret;
}
+ if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
err("cannot handle USB speed because it is too slow.");
@@ -544,9 +547,6 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL;
int ret;
- if (intf->cur_altsetting->desc.bNumEndpoints < 1)
- return -ENODEV;
-
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n");
return -ENOMEM;
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 9d3d05125d7b..e488e7870f42 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -1134,7 +1134,7 @@ int cpia2_register_camera(struct camera_data *cam)
reset_camera_struct_v4l(cam);
/* register v4l device */
- if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
+ if (video_register_device(&cam->vdev, VFL_TYPE_VIDEO, video_nr) < 0) {
ERR("video_register_device failed\n");
return -ENODEV;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 1aec4459f50a..b0cd51134654 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1790,7 +1790,7 @@ int cx231xx_417_register(struct cx231xx *dev)
dev->v4l_device.queue = q;
err = video_register_device(&dev->v4l_device,
- VFL_TYPE_GRABBER, -1);
+ VFL_TYPE_VIDEO, -1);
if (err < 0) {
dprintk(3, "%s: can't register mpeg device\n", dev->name);
v4l2_ctrl_handler_free(&dev->mpeg_ctrl_handler.hdl);
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index e205f7f0a56a..0037b4b1381e 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -147,7 +147,7 @@ static struct tda18271_config pv_tda18271_config = {
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
-static struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = {
+static const struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = {
.qam_if_khz = 4000,
.vsb_if_khz = 3250,
.spectral_inversion = 1,
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 69abafaebbf3..8bff7d8a0310 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1785,7 +1785,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->vdev.device_caps |= V4L2_CAP_TUNER;
/* register v4l2 video video_device */
- ret = video_register_device(&dev->vdev, VFL_TYPE_GRABBER,
+ ret = video_register_device(&dev->vdev, VFL_TYPE_VIDEO,
video_nr[dev->devno]);
if (ret) {
dev_err(dev->dev,
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index 0514e87405b6..89a1b204b90c 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -318,14 +318,14 @@ static struct tda10023_config anysee_tda10023_tda18212_config = {
.deltaf = 0xba02,
};
-static struct tda18212_config anysee_tda18212_config = {
+static const struct tda18212_config anysee_tda18212_config = {
.if_dvbt_6 = 4150,
.if_dvbt_7 = 4150,
.if_dvbt_8 = 4150,
.if_dvbc = 5000,
};
-static struct tda18212_config anysee_tda18212_config2 = {
+static const struct tda18212_config anysee_tda18212_config2 = {
.if_dvbt_6 = 3550,
.if_dvbt_7 = 3700,
.if_dvbt_8 = 4150,
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 62d3566bf7ee..fd8b42bb9a84 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -486,13 +486,10 @@ static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
static u8 obuf[64], ibuf[64];
int i, read, read_o;
u16 len;
- u8 gate = st->i2c_gate;
+ u8 gate;
mutex_lock(&d->i2c_mutex);
- if (gate == 0)
- gate = 5;
-
for (i = 0; i < num; i++) {
read_o = msg[i].flags & I2C_M_RD;
read = i + 1 < num && msg[i + 1].flags & I2C_M_RD;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c6881a1b3232..2080f6ef4be1 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -552,6 +552,9 @@ tuner_found:
if (ret)
goto err;
+ /* slave demod needs some time to wake up */
+ msleep(20);
+
/* check slave answers */
ret = rtl28xxu_ctrl_msg(d, &req_mn88472);
if (ret == 0 && buf[0] == 0x02) {
diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c
index 0699f718d052..001cae648797 100644
--- a/drivers/media/usb/dvb-usb/cxusb-analog.c
+++ b/drivers/media/usb/dvb-usb/cxusb-analog.c
@@ -1223,7 +1223,7 @@ static int cxusb_medion_g_tuner(struct file *file, void *fh,
if (tuner->index != 0)
return -EINVAL;
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ if (vdev->vfl_type == VFL_TYPE_VIDEO)
tuner->type = V4L2_TUNER_ANALOG_TV;
else
tuner->type = V4L2_TUNER_RADIO;
@@ -1259,7 +1259,7 @@ static int cxusb_medion_g_tuner(struct file *file, void *fh,
if (ret != 0)
return ret;
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ if (vdev->vfl_type == VFL_TYPE_VIDEO)
strscpy(tuner->name, "TV tuner", sizeof(tuner->name));
else
strscpy(tuner->name, "Radio tuner", sizeof(tuner->name));
@@ -1292,7 +1292,7 @@ static int cxusb_medion_s_tuner(struct file *file, void *fh,
* make sure that cx25840 is in a correct TV / radio mode,
* since calls above may have changed it for tuner / IF demod
*/
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ if (vdev->vfl_type == VFL_TYPE_VIDEO)
v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm);
else
v4l2_subdev_call(cxdev->cx25840, tuner, s_radio);
@@ -1335,7 +1335,7 @@ static int cxusb_medion_s_frequency(struct file *file, void *fh,
* make sure that cx25840 is in a correct TV / radio mode,
* since calls above may have changed it for tuner / IF demod
*/
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ if (vdev->vfl_type == VFL_TYPE_VIDEO)
v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm);
else
v4l2_subdev_call(cxdev->cx25840, tuner, s_radio);
@@ -1564,7 +1564,7 @@ static int cxusb_videoradio_release(struct file *f)
cxusb_vprintk(dvbdev, OPS, "got release\n");
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ if (vdev->vfl_type == VFL_TYPE_VIDEO)
ret = vb2_fop_release(f);
else
ret = v4l2_fh_release(f);
@@ -1663,7 +1663,7 @@ static int cxusb_medion_register_analog_video(struct dvb_usb_device *dvbdev)
cxdev->videodev->lock = &cxdev->dev_lock;
video_set_drvdata(cxdev->videodev, dvbdev);
- ret = video_register_device(cxdev->videodev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(cxdev->videodev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(&dvbdev->udev->dev,
"video device register failed, ret = %d\n", ret);
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index e53c58ab6488..ef62dd6c5ae4 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -818,7 +818,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
- if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
+ if (intf->cur_altsetting->desc.bNumEndpoints < rc_ep + 1)
return -ENODEV;
purb = usb_alloc_urb(0, GFP_KERNEL);
@@ -838,7 +838,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
* Some devices like the Hauppauge NovaTD model 52009 use an interrupt
* endpoint, while others use a bulk one.
*/
- e = &intf->altsetting[0].endpoint[rc_ep].desc;
+ e = &intf->cur_altsetting->endpoint[rc_ep].desc;
if (usb_endpoint_dir_in(e)) {
if (usb_endpoint_xfer_bulk(e)) {
pipe = usb_rcvbulkpipe(d->udev, rc_ep);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 8b584507dd59..1007366a295b 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1524,6 +1524,29 @@ static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
return -EIO;
}
+static int tt_s2_4600_frontend_attach_probe_demod(struct dvb_usb_device *d,
+ const int probe_addr)
+{
+ struct dw2102_state *state = d->priv;
+
+ state->data[0] = 0x9;
+ state->data[1] = 0x1;
+ state->data[2] = 0x1;
+ state->data[3] = probe_addr;
+ state->data[4] = 0x0;
+
+ if (dvb_usb_generic_rw(d, state->data, 5, state->data, 2, 0) < 0) {
+ err("i2c probe for address 0x%x failed.", probe_addr);
+ return 0;
+ }
+
+ if (state->data[0] != 8) /* fail(7) or error, no device at address */
+ return 0;
+
+ /* probing successful */
+ return 1;
+}
+
static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap->dev;
@@ -1533,6 +1556,7 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
struct i2c_board_info board_info;
struct m88ds3103_platform_data m88ds3103_pdata = {};
struct ts2020_config ts2020_config = {};
+ int demod_addr;
mutex_lock(&d->data_mutex);
@@ -1570,8 +1594,22 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
+ /* probe for demodulator i2c address */
+ demod_addr = -1;
+ if (tt_s2_4600_frontend_attach_probe_demod(d, 0x68))
+ demod_addr = 0x68;
+ else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x69))
+ demod_addr = 0x69;
+ else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x6a))
+ demod_addr = 0x6a;
+
mutex_unlock(&d->data_mutex);
+ if (demod_addr < 0) {
+ err("probing for demodulator failed. Is the external power switched on?");
+ return -ENODEV;
+ }
+
/* attach demod */
m88ds3103_pdata.clk = 27000000;
m88ds3103_pdata.i2c_wr_max = 33;
@@ -1586,8 +1624,11 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
m88ds3103_pdata.lnb_hv_pol = 1;
m88ds3103_pdata.lnb_en_pol = 0;
memset(&board_info, 0, sizeof(board_info));
- strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE);
- board_info.addr = 0x68;
+ if (demod_addr == 0x6a)
+ strscpy(board_info.type, "m88ds3103b", I2C_NAME_SIZE);
+ else
+ strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE);
+ board_info.addr = demod_addr;
board_info.platform_data = &m88ds3103_pdata;
request_module("m88ds3103");
client = i2c_new_client_device(&d->i2c_adap, &board_info);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index def9cdd931a9..a8c321d11827 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2398,6 +2398,20 @@ const struct em28xx_board em28xx_boards[] = {
.ir_codes = RC_MAP_PINNACLE_PCTV_HD,
},
/*
+ * 2013:0259 PCTV DVB-S2 Stick (461e_v2)
+ * Empia EM28178, Montage M88DS3103b, Montage M88TS2022, Allegro A8293
+ */
+ [EM28178_BOARD_PCTV_461E_V2] = {
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ .name = "PCTV DVB-S2 Stick (461e v2)",
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = pctv_461e,
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
+ },
+ /*
* 2013:025f PCTV tripleStick (292e).
* Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2157
*/
@@ -2696,6 +2710,10 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
{ USB_DEVICE(0x2013, 0x0258),
.driver_info = EM28178_BOARD_PCTV_461E },
+ { USB_DEVICE(0x2013, 0x0461),
+ .driver_info = EM28178_BOARD_PCTV_461E_V2 },
+ { USB_DEVICE(0x2013, 0x0259),
+ .driver_info = EM28178_BOARD_PCTV_461E_V2 },
{ USB_DEVICE(0x2013, 0x025f),
.driver_info = EM28178_BOARD_PCTV_292E },
{ USB_DEVICE(0x2013, 0x0264), /* Hauppauge WinTV-soloHD 292e SE */
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 0ab6c493bc74..fb9cbfa81a84 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -1219,6 +1219,61 @@ static int em28178_dvb_init_pctv_461e(struct em28xx *dev)
return 0;
}
+static int em28178_dvb_init_pctv_461e_v2(struct em28xx *dev)
+{
+ struct em28xx_dvb *dvb = dev->dvb;
+ struct i2c_adapter *i2c_adapter;
+ struct m88ds3103_platform_data m88ds3103_pdata = {};
+ struct ts2020_config ts2020_config = {};
+ struct a8293_platform_data a8293_pdata = {};
+
+ /* attach demod */
+ m88ds3103_pdata.clk = 27000000;
+ m88ds3103_pdata.i2c_wr_max = 33;
+ m88ds3103_pdata.ts_mode = M88DS3103_TS_PARALLEL;
+ m88ds3103_pdata.ts_clk = 16000;
+ m88ds3103_pdata.ts_clk_pol = 0;
+ m88ds3103_pdata.agc = 0x99;
+ m88ds3103_pdata.agc_inv = 0;
+ m88ds3103_pdata.spec_inv = 0;
+ dvb->i2c_client_demod = dvb_module_probe("m88ds3103", "m88ds3103b",
+ &dev->i2c_adap[dev->def_i2c_bus],
+ 0x6a, &m88ds3103_pdata);
+
+ if (!dvb->i2c_client_demod)
+ return -ENODEV;
+
+ dvb->fe[0] = m88ds3103_pdata.get_dvb_frontend(dvb->i2c_client_demod);
+ i2c_adapter = m88ds3103_pdata.get_i2c_adapter(dvb->i2c_client_demod);
+
+ /* attach tuner */
+ ts2020_config.fe = dvb->fe[0];
+ dvb->i2c_client_tuner = dvb_module_probe("ts2020", "ts2022",
+ i2c_adapter,
+ 0x60, &ts2020_config);
+ if (!dvb->i2c_client_tuner) {
+ dvb_module_release(dvb->i2c_client_demod);
+ return -ENODEV;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ dvb->fe[0]->ops.read_signal_strength =
+ dvb->fe[0]->ops.tuner_ops.get_rf_strength;
+
+ /* attach SEC */
+ a8293_pdata.dvb_frontend = dvb->fe[0];
+ dvb->i2c_client_sec = dvb_module_probe("a8293", NULL,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ 0x08, &a8293_pdata);
+ if (!dvb->i2c_client_sec) {
+ dvb_module_release(dvb->i2c_client_tuner);
+ dvb_module_release(dvb->i2c_client_demod);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int em28178_dvb_init_pctv_292e(struct em28xx *dev)
{
struct em28xx_dvb *dvb = dev->dvb;
@@ -1860,6 +1915,11 @@ static int em28xx_dvb_init(struct em28xx *dev)
if (result)
goto out_free;
break;
+ case EM28178_BOARD_PCTV_461E_V2:
+ result = em28178_dvb_init_pctv_461e_v2(dev);
+ if (result)
+ goto out_free;
+ break;
case EM28178_BOARD_PCTV_292E:
result = em28178_dvb_init_pctv_292e(dev);
if (result)
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index b0f7390e4b4f..6b84c3413e83 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -2141,7 +2141,7 @@ static int em28xx_v4l2_open(struct file *filp)
int ret;
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
break;
case VFL_TYPE_VBI:
@@ -2789,7 +2789,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
}
/* register v4l2 video video_device */
- ret = video_register_device(&v4l2->vdev, VFL_TYPE_GRABBER,
+ ret = video_register_device(&v4l2->vdev, VFL_TYPE_VIDEO,
video_nr[dev->devno]);
if (ret) {
dev_err(&dev->intf->dev,
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 4ecadd57dac7..acbb62397314 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -150,6 +150,7 @@
#define EM2884_BOARD_TERRATEC_H6 101
#define EM2882_BOARD_ZOLID_HYBRID_TV_STICK 102
#define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103
+#define EM28178_BOARD_PCTV_461E_V2 104
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index ff2aa057c1fb..f889c9d740cd 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1044,6 +1044,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
struct go7007_usb *usb;
const struct go7007_usb_board *board;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_host_endpoint *ep;
unsigned num_i2c_devs;
char *name;
int video_pipe, i, v_urb_len;
@@ -1140,7 +1141,8 @@ static int go7007_usb_probe(struct usb_interface *intf,
if (usb->intr_urb->transfer_buffer == NULL)
goto allocfail;
- if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
+ ep = usb->usbdev->ep_in[4];
+ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
usb_rcvbulkpipe(usb->usbdev, 4),
usb->intr_urb->transfer_buffer, 2*sizeof(u16),
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index 0b3d185f3cb0..b2edc4deaca3 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -1138,7 +1138,7 @@ int go7007_v4l2_init(struct go7007 *go)
go7007_s_input(go);
if (go->board_info->sensor_flags & GO7007_SENSOR_TV)
go7007_s_std(go);
- rv = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ rv = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (rv < 0)
return rv;
dev_info(go->dev, "registered device %s [v4l2]\n",
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index c1b307bbe540..0566e00d6fea 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1555,7 +1555,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
/* init video stuff */
ret = video_register_device(&gspca_dev->vdev,
- VFL_TYPE_GRABBER,
+ VFL_TYPE_VIDEO,
-1);
if (ret < 0) {
pr_err("video_register_device err %d\n", ret);
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index f417dfc0b872..0afe70a3f9a2 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -3477,6 +3477,11 @@ static void ov511_mode_init_regs(struct sd *sd)
return;
}
+ if (alt->desc.bNumEndpoints < 1) {
+ sd->gspca_dev.usb_err = -ENODEV;
+ return;
+ }
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
@@ -3603,6 +3608,11 @@ static void ov518_mode_init_regs(struct sd *sd)
return;
}
+ if (alt->desc.bNumEndpoints < 1) {
+ sd->gspca_dev.usb_err = -ENODEV;
+ return;
+ }
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 79653d409951..95673fc0a99c 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -282,6 +282,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev)
return -EIO;
}
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size);
if (err < 0)
@@ -306,11 +309,21 @@ out:
static int stv06xx_isoc_init(struct gspca_dev *gspca_dev)
{
+ struct usb_interface_cache *intfc;
struct usb_host_interface *alt;
struct sd *sd = (struct sd *) gspca_dev;
+ intfc = gspca_dev->dev->actconfig->intf_cache[0];
+
+ if (intfc->num_altsetting < 2)
+ return -ENODEV;
+
+ alt = &intfc->altsetting[1];
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
- alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
alt->endpoint[0].desc.wMaxPacketSize =
cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]);
@@ -323,6 +336,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev)
struct usb_host_interface *alt;
struct sd *sd = (struct sd *) gspca_dev;
+ /*
+ * Existence of altsetting and endpoint was verified in
+ * stv06xx_isoc_init()
+ */
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode];
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
index 6d1007715ff7..ae382b3b5f7f 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
@@ -185,6 +185,10 @@ static int pb0100_start(struct sd *sd)
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt)
return -ENODEV;
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
/* If we don't have enough bandwidth use a lower framerate */
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index 934a90bd78c2..c579b100f066 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -1442,6 +1442,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev)
return -EIO;
}
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
}
@@ -2626,6 +2629,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
static int sd_isoc_init(struct gspca_dev *gspca_dev)
{
+ struct usb_interface_cache *intfc;
struct usb_host_interface *alt;
int max_packet_size;
@@ -2641,8 +2645,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
break;
}
+ intfc = gspca_dev->dev->actconfig->intf_cache[0];
+
+ if (intfc->num_altsetting < 2)
+ return -ENODEV;
+
+ alt = &intfc->altsetting[1];
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
- alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
return 0;
@@ -2665,6 +2678,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
break;
}
+ /*
+ * Existence of altsetting and endpoint was verified in sd_isoc_init()
+ */
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
if (packet_size <= min_packet_size)
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index bad71d863d39..563128d11731 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -1238,7 +1238,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
dev->video_dev.v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(&dev->video_dev, dev);
- res = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER, devnum);
+ res = video_register_device(&dev->video_dev, VFL_TYPE_VIDEO, devnum);
if (res < 0) {
v4l2_err(&dev->v4l2_dev, "video_device registration failed\n");
goto error;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index eaa08c7999d4..9657c1883311 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1196,7 +1196,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
hdw = vp->channel.mc_head->hdw;
dip->v4l_type = v4l_type;
switch (v4l_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
dip->stream = &vp->channel.mc_head->video_stream;
dip->config = pvr2_config_mpeg;
dip->minor_type = pvr2_v4l_type_video;
@@ -1276,7 +1276,7 @@ struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp)
/* register streams */
vp->dev_video = kzalloc(sizeof(*vp->dev_video),GFP_KERNEL);
if (!vp->dev_video) goto fail;
- pvr2_v4l2_dev_init(vp->dev_video,vp,VFL_TYPE_GRABBER);
+ pvr2_v4l2_dev_init(vp->dev_video,vp,VFL_TYPE_VIDEO);
if (pvr2_hdw_get_input_available(vp->channel.mc_head->hdw) &
(1 << PVR2_CVAL_INPUT_RADIO)) {
vp->dev_radio = kzalloc(sizeof(*vp->dev_radio),GFP_KERNEL);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 9b76cf133d52..d57b8b786506 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1116,7 +1116,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
- rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
+ rc = video_register_device(&pdev->vdev, VFL_TYPE_VIDEO, -1);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
goto err_unregister_v4l2_dev;
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 329ec8089592..4af55e2478be 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1649,11 +1649,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
video_set_drvdata(&vc->vdev, vc);
if (video_nr == -1)
ret = video_register_device(&vc->vdev,
- VFL_TYPE_GRABBER,
+ VFL_TYPE_VIDEO,
video_nr);
else
ret = video_register_device(&vc->vdev,
- VFL_TYPE_GRABBER,
+ VFL_TYPE_VIDEO,
cur_nr + i);
if (ret) {
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index bcd14c66e8df..6a4eb616d516 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -830,7 +830,7 @@ int stk1160_video_register(struct stk1160 *dev)
dev->norm);
video_set_drvdata(&dev->vdev, dev);
- rc = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
+ rc = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1);
if (rc < 0) {
stk1160_err("video_register_device failed (%d)\n", rc);
return rc;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index b22501f76b78..a45d464427c4 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1254,7 +1254,7 @@ static int stk_register_video_device(struct stk_camera *dev)
dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
video_set_drvdata(&dev->vdev, dev);
- err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&dev->vdev, VFL_TYPE_VIDEO, -1);
if (err)
pr_err("v4l registration failed\n");
else
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index c07a81a6cbe2..bfba06ea60e9 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1300,7 +1300,7 @@ static int __tm6000_open(struct file *file)
video_device_node_name(vdev));
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
break;
case VFL_TYPE_VBI:
@@ -1639,7 +1639,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vidq.queued);
- ret = video_register_device(&dev->vfd, VFL_TYPE_GRABBER, video_nr);
+ ret = video_register_device(&dev->vfd, VFL_TYPE_VIDEO, video_nr);
if (ret < 0) {
printk(KERN_INFO "%s: can't register video device\n",
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index 5095c380b2c1..ee9c656d121f 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -56,7 +56,7 @@ int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, index, NULL, 0, 0);
+ value, index, NULL, 0, USB_CTRL_GET_TIMEOUT);
if (ret < 0)
return ret;
}
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 3d9284a09ee5..c89efcd46163 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -800,7 +800,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
ret = usb_control_msg(usbtv->udev,
usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
+ 0, USBTV_BASE + 0x0244, (void *)data, 3,
+ USB_CTRL_GET_TIMEOUT);
if (ret < 0)
goto error;
}
@@ -851,7 +852,7 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0),
USBTV_CONTROL_REG,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, index, (void *)data, size, 0);
+ 0, index, (void *)data, size, USB_CTRL_SET_TIMEOUT);
error:
if (ret < 0)
@@ -940,7 +941,7 @@ int usbtv_video_init(struct usbtv *usbtv)
usbtv->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
video_set_drvdata(&usbtv->vdev, usbtv);
- ret = video_register_device(&usbtv->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&usbtv->vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_warn(usbtv->dev, "Could not register video device\n");
goto vdev_fail;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 99883550375e..431d86e1c94b 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2014,7 +2014,7 @@ int uvc_register_video_device(struct uvc_device *dev,
*/
video_set_drvdata(vdev, stream);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
uvc_printk(KERN_ERR, "Failed to register %s device (%d).\n",
v4l2_type_names[type], ret);
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 57dbcc8083bf..8c670934d920 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1516,7 +1516,7 @@ static int zr364xx_probe(struct usb_interface *intf,
V4L2_FIELD_NONE,
sizeof(struct zr364xx_buffer), cam, &cam->lock);
- err = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
if (err) {
dev_err(&udev->dev, "video_register_device failed\n");
goto fail;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 2928c5e0a73d..93d33d1db4e8 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -4296,10 +4296,17 @@ void v4l2_ctrl_request_complete(struct media_request *req,
continue;
v4l2_ctrl_lock(ctrl);
- if (ref->req)
+ if (ref->req) {
ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
- else
+ } else {
ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
+ /*
+ * Set ref->req to ensure that when userspace wants to
+ * obtain the controls of this request it will take
+ * this value and not the current value of the control.
+ */
+ ref->req = ref;
+ }
v4l2_ctrl_unlock(ctrl);
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index da42d172714a..97b6a3af1361 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -542,13 +542,13 @@ static void determine_valid_ioctls(struct video_device *vdev)
V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO &&
(vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
- bool is_meta = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO &&
(vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -783,7 +783,7 @@ static int video_register_media_controller(struct video_device *vdev)
vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
intf_type = MEDIA_INTF_T_V4L_VIDEO;
vdev->entity.function = MEDIA_ENT_F_IO_V4L;
break;
@@ -891,7 +891,7 @@ int __video_register_device(struct video_device *vdev,
/* Part 1: check device type */
switch (type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
name_base = "video";
break;
case VFL_TYPE_VBI:
@@ -935,7 +935,7 @@ int __video_register_device(struct video_device *vdev,
* of 128-191 and just pick the first free minor there
* (new style). */
switch (type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
minor_offset = 0;
minor_cnt = 64;
break;
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 63d6b147b21e..c69941214bb2 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -111,9 +111,6 @@ EXPORT_SYMBOL_GPL(v4l2_device_unregister);
int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
struct v4l2_subdev *sd)
{
-#if defined(CONFIG_MEDIA_CONTROLLER)
- struct media_entity *entity = &sd->entity;
-#endif
int err;
/* Check for valid input */
@@ -143,7 +140,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
#if defined(CONFIG_MEDIA_CONTROLLER)
/* Register the entity. */
if (v4l2_dev->mdev) {
- err = media_device_register_entity(v4l2_dev->mdev, entity);
+ err = media_device_register_entity(v4l2_dev->mdev, &sd->entity);
if (err < 0)
goto error_module;
}
@@ -163,7 +160,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
error_unregister:
#if defined(CONFIG_MEDIA_CONTROLLER)
- media_device_unregister_entity(entity);
+ media_device_unregister_entity(&sd->entity);
#endif
error_module:
if (!sd->owner_v4l2_dev)
@@ -179,6 +176,7 @@ static void v4l2_subdev_release(struct v4l2_subdev *sd)
if (sd->internal_ops && sd->internal_ops->release)
sd->internal_ops->release(sd);
+ sd->devnode = NULL;
module_put(owner);
}
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 6ece4320e1d2..97f0f8b23b5d 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -557,33 +557,28 @@ int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
-int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
+int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
struct v4l2_fwnode_link *link)
{
- const char *port_prop = is_of_node(__fwnode) ? "reg" : "port";
- struct fwnode_handle *fwnode;
+ struct fwnode_endpoint fwep;
memset(link, 0, sizeof(*link));
- fwnode = fwnode_get_parent(__fwnode);
- fwnode_property_read_u32(fwnode, port_prop, &link->local_port);
- fwnode = fwnode_get_next_parent(fwnode);
- if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports"))
- fwnode = fwnode_get_next_parent(fwnode);
- link->local_node = fwnode;
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->local_id = fwep.id;
+ link->local_port = fwep.port;
+ link->local_node = fwnode_graph_get_port_parent(fwnode);
- fwnode = fwnode_graph_get_remote_endpoint(__fwnode);
+ fwnode = fwnode_graph_get_remote_endpoint(fwnode);
if (!fwnode) {
fwnode_handle_put(fwnode);
return -ENOLINK;
}
- fwnode = fwnode_get_parent(fwnode);
- fwnode_property_read_u32(fwnode, port_prop, &link->remote_port);
- fwnode = fwnode_get_next_parent(fwnode);
- if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports"))
- fwnode = fwnode_get_next_parent(fwnode);
- link->remote_node = fwnode;
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->remote_id = fwep.id;
+ link->remote_port = fwep.port;
+ link->remote_node = fwnode_graph_get_port_parent(fwnode);
return 0;
}
@@ -596,6 +591,171 @@ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
+static const struct v4l2_fwnode_connector_conv {
+ enum v4l2_connector_type type;
+ const char *compatible;
+} connectors[] = {
+ {
+ .type = V4L2_CONN_COMPOSITE,
+ .compatible = "composite-video-connector",
+ }, {
+ .type = V4L2_CONN_SVIDEO,
+ .compatible = "svideo-connector",
+ },
+};
+
+static enum v4l2_connector_type
+v4l2_fwnode_string_to_connector_type(const char *con_str)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(connectors); i++)
+ if (!strcmp(con_str, connectors[i].compatible))
+ return connectors[i].type;
+
+ return V4L2_CONN_UNKNOWN;
+}
+
+static void
+v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *vc)
+{
+ u32 stds;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds);
+
+ /* The property is optional. */
+ vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds;
+}
+
+void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector)
+{
+ struct v4l2_connector_link *link, *tmp;
+
+ if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN)
+ return;
+
+ list_for_each_entry_safe(link, tmp, &connector->links, head) {
+ v4l2_fwnode_put_link(&link->fwnode_link);
+ list_del(&link->head);
+ kfree(link);
+ }
+
+ kfree(connector->label);
+ connector->label = NULL;
+ connector->type = V4L2_CONN_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free);
+
+static enum v4l2_connector_type
+v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode)
+{
+ const char *type_name;
+ int err;
+
+ if (!fwnode)
+ return V4L2_CONN_UNKNOWN;
+
+ /* The connector-type is stored within the compatible string. */
+ err = fwnode_property_read_string(fwnode, "compatible", &type_name);
+ if (err)
+ return V4L2_CONN_UNKNOWN;
+
+ return v4l2_fwnode_string_to_connector_type(type_name);
+}
+
+int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector)
+{
+ struct fwnode_handle *connector_node;
+ enum v4l2_connector_type connector_type;
+ const char *label;
+ int err;
+
+ if (!fwnode)
+ return -EINVAL;
+
+ memset(connector, 0, sizeof(*connector));
+
+ INIT_LIST_HEAD(&connector->links);
+
+ connector_node = fwnode_graph_get_port_parent(fwnode);
+ connector_type = v4l2_fwnode_get_connector_type(connector_node);
+ if (connector_type == V4L2_CONN_UNKNOWN) {
+ fwnode_handle_put(connector_node);
+ connector_node = fwnode_graph_get_remote_port_parent(fwnode);
+ connector_type = v4l2_fwnode_get_connector_type(connector_node);
+ }
+
+ if (connector_type == V4L2_CONN_UNKNOWN) {
+ pr_err("Unknown connector type\n");
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ connector->type = connector_type;
+ connector->name = fwnode_get_name(connector_node);
+ err = fwnode_property_read_string(connector_node, "label", &label);
+ connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL);
+
+ /* Parse the connector specific properties. */
+ switch (connector->type) {
+ case V4L2_CONN_COMPOSITE:
+ case V4L2_CONN_SVIDEO:
+ v4l2_fwnode_connector_parse_analog(connector_node, connector);
+ break;
+ /* Avoid compiler warnings */
+ case V4L2_CONN_UNKNOWN:
+ break;
+ }
+
+out:
+ fwnode_handle_put(connector_node);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse);
+
+int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector)
+{
+ struct fwnode_handle *connector_ep;
+ struct v4l2_connector_link *link;
+ int err;
+
+ if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN)
+ return -EINVAL;
+
+ connector_ep = fwnode_graph_get_remote_endpoint(fwnode);
+ if (!connector_ep)
+ return -ENOTCONN;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link);
+ if (err)
+ goto err;
+
+ fwnode_handle_put(connector_ep);
+
+ list_add(&link->head, &connector->links);
+ connector->nr_of_links++;
+
+ return 0;
+
+err:
+ kfree(link);
+ fwnode_handle_put(connector_ep);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link);
+
static int
v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
struct v4l2_async_notifier *notifier,
diff --git a/drivers/media/v4l2-core/v4l2-i2c.c b/drivers/media/v4l2-core/v4l2-i2c.c
index 5bf99e7c0c09..b4acca75644b 100644
--- a/drivers/media/v4l2-core/v4l2-i2c.c
+++ b/drivers/media/v4l2-core/v4l2-i2c.c
@@ -74,10 +74,10 @@ struct v4l2_subdev
/* Create the i2c client */
if (info->addr == 0 && probe_addrs)
- client = i2c_new_probed_device(adapter, info, probe_addrs,
- NULL);
+ client = i2c_new_scanned_device(adapter, info, probe_addrs,
+ NULL);
else
- client = i2c_new_device(adapter, info);
+ client = i2c_new_client_device(adapter, info);
/*
* Note: by loading the module first we are certain that c->driver
@@ -88,7 +88,7 @@ struct v4l2_subdev
* want to use the i2c device, so explicitly loading the module
* is the best alternative.
*/
- if (!client || !client->dev.driver)
+ if (!i2c_client_has_driver(client))
goto error;
/* Lock the module so we can safely get the v4l2_subdev pointer */
@@ -110,7 +110,7 @@ error:
* If we have a client but no subdev, then something went wrong and
* we must unregister the client.
*/
- if (client && !sd)
+ if (!IS_ERR(client) && !sd)
i2c_unregister_device(client);
return sd;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index aaf83e254272..b2ef8e60ea7d 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -941,12 +941,12 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO &&
(vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
- bool is_meta = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO &&
(vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1222,6 +1222,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break;
case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
@@ -1306,6 +1307,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break;
case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 014a2a97cadd..0fffdd3ce6a4 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
* use_count field stores the total number of users of all video device nodes
* in the pipeline.
*
- * The v4l2_pipeline_pm_use() function must be called in the open() and
+ * The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and
* close() handlers of video device nodes. It increments or decrements the use
* count of all subdev entities in the pipeline.
*
@@ -423,7 +423,7 @@ static int pipeline_pm_power(struct media_entity *entity, int change,
return ret;
}
-int v4l2_pipeline_pm_use(struct media_entity *entity, int use)
+static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use)
{
struct media_device *mdev = entity->graph_obj.mdev;
int change = use ? 1 : -1;
@@ -444,7 +444,19 @@ int v4l2_pipeline_pm_use(struct media_entity *entity, int use)
return ret;
}
-EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_use);
+
+int v4l2_pipeline_pm_get(struct media_entity *entity)
+{
+ return v4l2_pipeline_pm_use(entity, 1);
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get);
+
+void v4l2_pipeline_pm_put(struct media_entity *entity)
+{
+ /* Powering off entities shouldn't fail. */
+ WARN_ON(v4l2_pipeline_pm_use(entity, 0));
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put);
int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
unsigned int notification)
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index cc34c5ab7009..8986c31176e9 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -340,6 +340,11 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
+ if (m2m_ctx->has_stopped) {
+ dprintk("Device has stopped\n");
+ goto job_unlock;
+ }
+
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
dprintk("Driver not ready\n");
@@ -556,6 +561,140 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
+/*
+ * This will add the LAST flag and mark the buffer management
+ * state as stopped.
+ * This is called when the last capture buffer must be flagged as LAST
+ * in draining mode from the encoder/decoder driver buf_queue() callback
+ * or from v4l2_update_last_buf_state() when a capture buffer is available.
+ */
+void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_mark_stopped(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done);
+
+/* When stop command is issued, update buffer management state */
+static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ return 0;
+
+ m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
+ m2m_ctx->is_draining = true;
+
+ /*
+ * The processing of the last output buffer queued before
+ * the STOP command is expected to mark the buffer management
+ * state as stopped with v4l2_m2m_mark_stopped().
+ */
+ if (m2m_ctx->last_src_buf)
+ return 0;
+
+ /*
+ * In case the output queue is empty, try to mark the last capture
+ * buffer as LAST.
+ */
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!next_dst_buf) {
+ /*
+ * Wait for the next queued one in encoder/decoder driver
+ * buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
+ * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
+ * streaming.
+ */
+ m2m_ctx->next_buf_last = true;
+ return 0;
+ }
+
+ v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf);
+
+ return 0;
+}
+
+/*
+ * Updates the encoding/decoding buffer management state, should
+ * be called from encoder/decoder drivers start_streaming()
+ */
+void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ /* If start streaming again, untag the last output buffer */
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ m2m_ctx->last_src_buf = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state);
+
+/*
+ * Updates the encoding/decoding buffer management state, should
+ * be called from encoder/decoder driver stop_streaming()
+ */
+void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * If in draining state, either mark next dst buffer as
+ * done or flag next one to be marked as done either
+ * in encoder/decoder driver buf_queue() callback using
+ * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
+ * if encoder/decoder is not yet streaming
+ */
+ if (m2m_ctx->is_draining) {
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ m2m_ctx->last_src_buf = NULL;
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!next_dst_buf)
+ m2m_ctx->next_buf_last = true;
+ else
+ v4l2_m2m_last_buffer_done(m2m_ctx,
+ next_dst_buf);
+ }
+ } else {
+ v4l2_m2m_clear_state(m2m_ctx);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state);
+
+static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned int i;
+
+ if (WARN_ON(q->is_output))
+ return;
+ if (list_empty(&q->queued_list))
+ return;
+
+ vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry);
+ for (i = 0; i < vb->num_planes; i++)
+ vb2_set_plane_payload(vb, i, 0);
+
+ /*
+ * Since the buffer hasn't been queued to the ready queue,
+ * mark is active and owned before marking it LAST and DONE
+ */
+ vb->state = VB2_BUF_STATE_ACTIVE;
+ atomic_inc(&q->owned_by_drv_count);
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ vbuf->field = V4L2_FIELD_NONE;
+
+ v4l2_m2m_last_buffer_done(m2m_ctx, vbuf);
+}
+
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
@@ -570,11 +709,25 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
__func__);
return -EPERM;
}
+
ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
- if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
+ if (ret)
+ return ret;
+
+ /*
+ * If the capture queue is streaming, but streaming hasn't started
+ * on the device, but was asked to stop, mark the previously queued
+ * buffer as DONE with LAST flag since it won't be queued on the
+ * device.
+ */
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) &&
+ (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx)))
+ v4l2_m2m_force_last_buf_done(m2m_ctx, vq);
+ else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
@@ -1225,6 +1378,70 @@ int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+/*
+ * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
+ * Should be called from the encoder driver encoder_cmd() callback
+ */
+int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
+ return -EINVAL;
+
+ if (ec->cmd == V4L2_ENC_CMD_STOP)
+ return v4l2_update_last_buf_state(m2m_ctx);
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ m2m_ctx->has_stopped = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd);
+
+/*
+ * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
+ * Should be called from the decoder driver decoder_cmd() callback
+ */
+int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
+ return -EINVAL;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP)
+ return v4l2_update_last_buf_state(m2m_ctx);
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ m2m_ctx->has_stopped = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
+
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
+
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
+
int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
diff --git a/drivers/memory/.gitignore b/drivers/memory/.gitignore
index cbca8b028437..caedc4c7d2db 100644
--- a/drivers/memory/.gitignore
+++ b/drivers/memory/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
ti-emif-asm-offsets.h
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 21f05240682b..33b8216bac30 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1158,6 +1158,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 8ae474d9bfb9..b16715e9515d 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -628,6 +628,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index e3efd9529506..b42bdb667e85 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1256,6 +1256,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index 8a24494f8c4d..a1ec7e84d6fe 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -64,6 +64,7 @@
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -111,13 +112,13 @@ MODULE_DESCRIPTION(LANAME);
#ifdef MPT_LAN_IO_DEBUG
#define dioprintk(x) printk x
#else
-#define dioprintk(x)
+#define dioprintk(x) no_printk x
#endif
#ifdef MPT_LAN_DEBUG
#define dlprintk(x) printk x
#else
-#define dlprintk(x)
+#define dlprintk(x) no_printk x
#endif
#define NETDEV_TO_LANPRIV_PTR(d) ((struct mpt_lan_priv *)netdev_priv(d))
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index c396483d3624..e35b13891fe4 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
- char event_data[0] __aligned(4);
+ char event_data[] __aligned(4);
};
struct mptsas_discovery_event {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2b203290e7b9..0a59249198d3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -642,6 +642,19 @@ config MFD_IPAQ_MICRO
AT90LS8535 microcontroller flashed with a special iPAQ
firmware using the custom protocol implemented in this driver.
+config MFD_IQS62X
+ tristate "Azoteq IQS620A/621/622/624/625 core support"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build core support for the Azoteq IQS620A,
+ IQS621, IQS622, IQS624 and IQS625 multi-function sensors. Additional
+ options must be selected to enable device-specific functions.
+
+ To compile this driver as a module, choose M here: the module will
+ be called iqs62x.
+
config MFD_JANZ_CMODIO
tristate "Janz CMOD-IO PCI MODULbus Carrier Board"
select MFD_CORE
@@ -893,6 +906,7 @@ config MFD_CPCAP
tristate "Support for Motorola CPCAP"
depends on SPI
depends on OF || COMPILE_TEST
+ select MFD_CORE
select REGMAP_SPI
select REGMAP_IRQ
help
@@ -1058,6 +1072,7 @@ config MFD_RN5T618
depends on OF
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
Say yes here to add support for the Ricoh RN5T567,
RN5T618, RC5T619 PMIC.
@@ -1201,7 +1216,7 @@ config AB8500_CORE
chip. This connects to U8500 either on the SSP/SPI bus (deprecated
since hardware version v1.0) or the I2C bus via PRCMU. It also adds
the irq_chip parts for handling the Mixed Signal chip events.
- This chip embeds various other multimedia funtionalities as well.
+ This chip embeds various other multimedia functionalities as well.
config AB8500_DEBUG
bool "Enable debug info via debugfs"
@@ -1851,7 +1866,7 @@ config MFD_WM8994
has on board GPIO and regulator functionality which is
supported via the relevant subsystems. This driver provides
core support for the WM8994, in order to use the actual
- functionaltiy of the device other drivers must be enabled.
+ functionality of the device other drivers must be enabled.
config MFD_WM97xx
tristate "Wolfson Microelectronics WM97xx"
@@ -1864,7 +1879,7 @@ config MFD_WM97xx
designed for smartphone applications. As well as audio functionality
it has on board GPIO and a touchscreen functionality which is
supported via the relevant subsystems. This driver provides core
- support for the WM97xx, in order to use the actual functionaltiy of
+ support for the WM97xx, in order to use the actual functionality of
the device other drivers must be enabled.
config MFD_STW481X
@@ -1957,7 +1972,7 @@ config MFD_STPMIC1
Support for ST Microelectronics STPMIC1 PMIC. STPMIC1 has power on
key, watchdog and regulator functionalities which are supported via
the relevant subsystems. This driver provides core support for the
- STPMIC1. In order to use the actual functionaltiy of the device other
+ STPMIC1. In order to use the actual functionality of the device other
drivers must be enabled.
To compile this driver as a module, choose M here: the
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b83f172545e1..f935d10cbf0f 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -226,6 +226,7 @@ obj-$(CONFIG_MFD_AS3711) += as3711.o
obj-$(CONFIG_MFD_AS3722) += as3722.o
obj-$(CONFIG_MFD_STW481X) += stw481x.o
obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
+obj-$(CONFIG_MFD_IQS62X) += iqs62x.o
obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 78ee4b28fca2..a17cf759739d 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -221,7 +221,7 @@ static ssize_t aat2870_dump_reg(struct aat2870_data *aat2870, char *buf)
count += sprintf(buf, "aat2870 registers\n");
for (addr = 0; addr < AAT2870_REG_NUM; addr++) {
- count += sprintf(buf + count, "0x%02x: ", addr);
+ count += snprintf(buf + count, PAGE_SIZE - count, "0x%02x: ", addr);
if (count >= PAGE_SIZE - 1)
break;
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 39e611695053..32c2b912b58b 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -211,7 +211,7 @@ static int ec_device_probe(struct platform_device *pdev)
* explicitly added on platforms that don't have the PD notifier ACPI
* device entry defined.
*/
- if (IS_ENABLED(CONFIG_OF)) {
+ if (IS_ENABLED(CONFIG_OF) && ec->ec_dev->dev->of_node) {
if (cros_ec_check_features(ec, EC_FEATURE_USB_PD)) {
retval = mfd_add_hotplug_devices(ec->dev,
cros_usbpd_notify_cells,
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 419c73533401..fc30726e2e27 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -21,6 +21,9 @@
#define DA9062_REG_EVENT_B_OFFSET 1
#define DA9062_REG_EVENT_C_OFFSET 2
+#define DA9062_IRQ_LOW 0
+#define DA9062_IRQ_HIGH 1
+
static struct regmap_irq da9061_irqs[] = {
/* EVENT A */
[DA9061_IRQ_ONKEY] = {
@@ -369,6 +372,33 @@ static int da9062_get_device_type(struct da9062 *chip)
return ret;
}
+static u32 da9062_configure_irq_type(struct da9062 *chip, int irq, u32 *trigger)
+{
+ u32 irq_type = 0;
+ struct irq_data *irq_data = irq_get_irq_data(irq);
+
+ if (!irq_data) {
+ dev_err(chip->dev, "Invalid IRQ: %d\n", irq);
+ return -EINVAL;
+ }
+ *trigger = irqd_get_trigger_type(irq_data);
+
+ switch (*trigger) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = DA9062_IRQ_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = DA9062_IRQ_LOW;
+ break;
+ default:
+ dev_warn(chip->dev, "Unsupported IRQ type: %d\n", *trigger);
+ return -EINVAL;
+ }
+ return regmap_update_bits(chip->regmap, DA9062AA_CONFIG_A,
+ DA9062AA_IRQ_TYPE_MASK,
+ irq_type << DA9062AA_IRQ_TYPE_SHIFT);
+}
+
static const struct regmap_range da9061_aa_readable_ranges[] = {
regmap_reg_range(DA9062AA_PAGE_CON, DA9062AA_STATUS_B),
regmap_reg_range(DA9062AA_STATUS_D, DA9062AA_EVENT_C),
@@ -388,6 +418,7 @@ static const struct regmap_range da9061_aa_readable_ranges[] = {
regmap_reg_range(DA9062AA_VBUCK1_A, DA9062AA_VBUCK4_A),
regmap_reg_range(DA9062AA_VBUCK3_A, DA9062AA_VBUCK3_A),
regmap_reg_range(DA9062AA_VLDO1_A, DA9062AA_VLDO4_A),
+ regmap_reg_range(DA9062AA_CONFIG_A, DA9062AA_CONFIG_A),
regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B),
regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B),
regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B),
@@ -417,6 +448,7 @@ static const struct regmap_range da9061_aa_writeable_ranges[] = {
regmap_reg_range(DA9062AA_VBUCK1_A, DA9062AA_VBUCK4_A),
regmap_reg_range(DA9062AA_VBUCK3_A, DA9062AA_VBUCK3_A),
regmap_reg_range(DA9062AA_VLDO1_A, DA9062AA_VLDO4_A),
+ regmap_reg_range(DA9062AA_CONFIG_A, DA9062AA_CONFIG_A),
regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B),
regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B),
regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B),
@@ -596,6 +628,7 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
const struct regmap_irq_chip *irq_chip;
const struct regmap_config *config;
int cell_num;
+ u32 trigger_type = 0;
int ret;
chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
@@ -654,10 +687,15 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
if (ret)
return ret;
+ ret = da9062_configure_irq_type(chip, i2c->irq, &trigger_type);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to configure IRQ type\n");
+ return ret;
+ }
+
ret = regmap_add_irq_chip(chip->regmap, i2c->irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
- -1, irq_chip,
- &chip->regmap_irq);
+ trigger_type | IRQF_SHARED | IRQF_ONESHOT,
+ -1, irq_chip, &chip->regmap_irq);
if (ret) {
dev_err(chip->dev, "Failed to request IRQ %d: %d\n",
i2c->irq, ret);
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 7841c11411d0..39276fa626d2 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -90,6 +90,11 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
+enum dln2_endpoint {
+ DLN2_EP_OUT = 0,
+ DLN2_EP_IN = 1,
+};
+
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -640,35 +645,56 @@ static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
return 0;
}
+enum {
+ DLN2_ACPI_MATCH_GPIO = 0,
+ DLN2_ACPI_MATCH_I2C = 1,
+ DLN2_ACPI_MATCH_SPI = 2,
+};
+
static struct dln2_platform_data dln2_pdata_gpio = {
.handle = DLN2_HANDLE_GPIO,
};
+static struct mfd_cell_acpi_match dln2_acpi_match_gpio = {
+ .adr = DLN2_ACPI_MATCH_GPIO,
+};
+
/* Only one I2C port seems to be supported on current hardware */
static struct dln2_platform_data dln2_pdata_i2c = {
.handle = DLN2_HANDLE_I2C,
.port = 0,
};
+static struct mfd_cell_acpi_match dln2_acpi_match_i2c = {
+ .adr = DLN2_ACPI_MATCH_I2C,
+};
+
/* Only one SPI port supported */
static struct dln2_platform_data dln2_pdata_spi = {
.handle = DLN2_HANDLE_SPI,
.port = 0,
};
+static struct mfd_cell_acpi_match dln2_acpi_match_spi = {
+ .adr = DLN2_ACPI_MATCH_SPI,
+};
+
static const struct mfd_cell dln2_devs[] = {
{
.name = "dln2-gpio",
+ .acpi_match = &dln2_acpi_match_gpio,
.platform_data = &dln2_pdata_gpio,
.pdata_size = sizeof(struct dln2_platform_data),
},
{
.name = "dln2-i2c",
+ .acpi_match = &dln2_acpi_match_i2c,
.platform_data = &dln2_pdata_i2c,
.pdata_size = sizeof(struct dln2_platform_data),
},
{
.name = "dln2-spi",
+ .acpi_match = &dln2_acpi_match_spi,
.platform_data = &dln2_pdata_spi,
.pdata_size = sizeof(struct dln2_platform_data),
},
@@ -733,10 +759,10 @@ static int dln2_probe(struct usb_interface *interface,
hostif->desc.bNumEndpoints < 2)
return -ENODEV;
- epin = &hostif->endpoint[0].desc;
- epout = &hostif->endpoint[1].desc;
+ epout = &hostif->endpoint[DLN2_EP_OUT].desc;
if (!usb_endpoint_is_bulk_out(epout))
return -ENODEV;
+ epin = &hostif->endpoint[DLN2_EP_IN].desc;
if (!usb_endpoint_is_bulk_in(epin))
return -ENODEV;
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index c40a6c7d0cf8..7fc0c5d4edff 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -139,6 +139,11 @@ static const struct intel_lpss_platform_info cnl_i2c_info = {
.properties = spt_i2c_properties,
};
+static const struct intel_lpss_platform_info ehl_i2c_info = {
+ .clk_rate = 100000000,
+ .properties = bxt_i2c_properties,
+};
+
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
@@ -231,15 +236,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4b2a), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4b2b), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4b37), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x4b44), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b45), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b4b), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b4c), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b44), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b45), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b4b), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b4c), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b4d), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x4b78), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b78), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&ehl_i2c_info },
/* JSL */
{ PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
@@ -347,6 +352,16 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
+ /* CML-V */
+ { PCI_VDEVICE(INTEL, 0xa3a7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa3a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa3a9), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa3aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa3e0), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e1), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
new file mode 100644
index 000000000000..af764bc87d7c
--- /dev/null
+++ b/drivers/mfd/iqs62x.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ *
+ * These devices rely on application-specific register settings and calibration
+ * data developed in and exported from a suite of GUIs offered by the vendor. A
+ * separate tool converts the GUIs' ASCII-based output into a standard firmware
+ * file parsed by the driver.
+ *
+ * Link to datasheets and GUIs: https://www.azoteq.com/
+ *
+ * Link to conversion tool: https://github.com/jlabundy/iqs62x-h2bin.git
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define IQS62X_PROD_NUM 0x00
+
+#define IQS62X_SYS_FLAGS 0x10
+#define IQS62X_SYS_FLAGS_IN_ATI BIT(2)
+
+#define IQS620_HALL_FLAGS 0x16
+#define IQS621_HALL_FLAGS 0x19
+#define IQS622_HALL_FLAGS IQS621_HALL_FLAGS
+
+#define IQS624_INTERVAL_NUM 0x18
+#define IQS625_INTERVAL_NUM 0x12
+
+#define IQS622_PROX_SETTINGS_4 0x48
+#define IQS620_PROX_SETTINGS_4 0x50
+#define IQS620_PROX_SETTINGS_4_SAR_EN BIT(7)
+
+#define IQS621_ALS_CAL_DIV_LUX 0x82
+#define IQS621_ALS_CAL_DIV_IR 0x83
+
+#define IQS620_TEMP_CAL_MULT 0xC2
+#define IQS620_TEMP_CAL_DIV 0xC3
+#define IQS620_TEMP_CAL_OFFS 0xC4
+
+#define IQS62X_SYS_SETTINGS 0xD0
+#define IQS62X_SYS_SETTINGS_SOFT_RESET BIT(7)
+#define IQS62X_SYS_SETTINGS_ACK_RESET BIT(6)
+#define IQS62X_SYS_SETTINGS_EVENT_MODE BIT(5)
+#define IQS62X_SYS_SETTINGS_CLK_DIV BIT(4)
+#define IQS62X_SYS_SETTINGS_REDO_ATI BIT(1)
+
+#define IQS62X_PWR_SETTINGS 0xD2
+#define IQS62X_PWR_SETTINGS_DIS_AUTO BIT(5)
+#define IQS62X_PWR_SETTINGS_PWR_MODE_MASK (BIT(4) | BIT(3))
+#define IQS62X_PWR_SETTINGS_PWR_MODE_HALT (BIT(4) | BIT(3))
+#define IQS62X_PWR_SETTINGS_PWR_MODE_NORM 0
+
+#define IQS62X_OTP_CMD 0xF0
+#define IQS62X_OTP_CMD_FG3 0x13
+#define IQS62X_OTP_DATA 0xF1
+#define IQS62X_MAX_REG 0xFF
+
+#define IQS62X_HALL_CAL_MASK GENMASK(3, 0)
+
+#define IQS62X_FW_REC_TYPE_INFO 0
+#define IQS62X_FW_REC_TYPE_PROD 1
+#define IQS62X_FW_REC_TYPE_HALL 2
+#define IQS62X_FW_REC_TYPE_MASK 3
+#define IQS62X_FW_REC_TYPE_DATA 4
+
+#define IQS62X_ATI_POLL_SLEEP_US 10000
+#define IQS62X_ATI_POLL_TIMEOUT_US 500000
+#define IQS62X_ATI_STABLE_DELAY_MS 150
+
+struct iqs62x_fw_rec {
+ u8 type;
+ u8 addr;
+ u8 len;
+ u8 data;
+} __packed;
+
+struct iqs62x_fw_blk {
+ struct list_head list;
+ u8 addr;
+ u8 mask;
+ u8 len;
+ u8 data[];
+};
+
+struct iqs62x_info {
+ u8 prod_num;
+ u8 sw_num;
+ u8 hw_num;
+} __packed;
+
+static int iqs62x_dev_init(struct iqs62x_core *iqs62x)
+{
+ struct iqs62x_fw_blk *fw_blk;
+ unsigned int val;
+ int ret;
+ u8 clk_div = 1;
+
+ list_for_each_entry(fw_blk, &iqs62x->fw_blk_head, list) {
+ if (fw_blk->mask)
+ ret = regmap_update_bits(iqs62x->regmap, fw_blk->addr,
+ fw_blk->mask, *fw_blk->data);
+ else
+ ret = regmap_raw_write(iqs62x->regmap, fw_blk->addr,
+ fw_blk->data, fw_blk->len);
+ if (ret)
+ return ret;
+ }
+
+ switch (iqs62x->dev_desc->prod_num) {
+ case IQS620_PROD_NUM:
+ case IQS622_PROD_NUM:
+ ret = regmap_read(iqs62x->regmap,
+ iqs62x->dev_desc->prox_settings, &val);
+ if (ret)
+ return ret;
+
+ if (val & IQS620_PROX_SETTINGS_4_SAR_EN)
+ iqs62x->ui_sel = IQS62X_UI_SAR1;
+
+ /* fall through */
+
+ case IQS621_PROD_NUM:
+ ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ IQS620_GLBL_EVENT_MASK_PMU |
+ iqs62x->dev_desc->prox_mask |
+ iqs62x->dev_desc->sar_mask |
+ iqs62x->dev_desc->hall_mask |
+ iqs62x->dev_desc->hyst_mask |
+ iqs62x->dev_desc->temp_mask |
+ iqs62x->dev_desc->als_mask |
+ iqs62x->dev_desc->ir_mask);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ ret = regmap_write(iqs62x->regmap, IQS624_HALL_UI,
+ IQS624_HALL_UI_WHL_EVENT |
+ IQS624_HALL_UI_INT_EVENT |
+ IQS624_HALL_UI_AUTO_CAL);
+ if (ret)
+ return ret;
+
+ /*
+ * The IQS625 default interval divider is below the minimum
+ * permissible value, and the datasheet mandates that it is
+ * corrected during initialization (unless an updated value
+ * has already been provided by firmware).
+ *
+ * To protect against an unacceptably low user-entered value
+ * stored in the firmware, the same check is extended to the
+ * IQS624 as well.
+ */
+ ret = regmap_read(iqs62x->regmap, IQS624_INTERVAL_DIV, &val);
+ if (ret)
+ return ret;
+
+ if (val >= iqs62x->dev_desc->interval_div)
+ break;
+
+ ret = regmap_write(iqs62x->regmap, IQS624_INTERVAL_DIV,
+ iqs62x->dev_desc->interval_div);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(iqs62x->regmap, IQS62X_SYS_SETTINGS, &val);
+ if (ret)
+ return ret;
+
+ if (val & IQS62X_SYS_SETTINGS_CLK_DIV)
+ clk_div = iqs62x->dev_desc->clk_div;
+
+ ret = regmap_write(iqs62x->regmap, IQS62X_SYS_SETTINGS, val |
+ IQS62X_SYS_SETTINGS_ACK_RESET |
+ IQS62X_SYS_SETTINGS_EVENT_MODE |
+ IQS62X_SYS_SETTINGS_REDO_ATI);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(iqs62x->regmap, IQS62X_SYS_FLAGS, val,
+ !(val & IQS62X_SYS_FLAGS_IN_ATI),
+ IQS62X_ATI_POLL_SLEEP_US,
+ IQS62X_ATI_POLL_TIMEOUT_US * clk_div);
+ if (ret)
+ return ret;
+
+ msleep(IQS62X_ATI_STABLE_DELAY_MS * clk_div);
+
+ return 0;
+}
+
+static int iqs62x_firmware_parse(struct iqs62x_core *iqs62x,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = iqs62x->client;
+ struct iqs62x_fw_rec *fw_rec;
+ struct iqs62x_fw_blk *fw_blk;
+ unsigned int val;
+ size_t pos = 0;
+ int ret = 0;
+ u8 mask, len, *data;
+ u8 hall_cal_index = 0;
+
+ while (pos < fw->size) {
+ if (pos + sizeof(*fw_rec) > fw->size) {
+ ret = -EINVAL;
+ break;
+ }
+ fw_rec = (struct iqs62x_fw_rec *)(fw->data + pos);
+ pos += sizeof(*fw_rec);
+
+ if (pos + fw_rec->len - 1 > fw->size) {
+ ret = -EINVAL;
+ break;
+ }
+ pos += fw_rec->len - 1;
+
+ switch (fw_rec->type) {
+ case IQS62X_FW_REC_TYPE_INFO:
+ continue;
+
+ case IQS62X_FW_REC_TYPE_PROD:
+ if (fw_rec->data == iqs62x->dev_desc->prod_num)
+ continue;
+
+ dev_err(&client->dev,
+ "Incompatible product number: 0x%02X\n",
+ fw_rec->data);
+ ret = -EINVAL;
+ break;
+
+ case IQS62X_FW_REC_TYPE_HALL:
+ if (!hall_cal_index) {
+ ret = regmap_write(iqs62x->regmap,
+ IQS62X_OTP_CMD,
+ IQS62X_OTP_CMD_FG3);
+ if (ret)
+ break;
+
+ ret = regmap_read(iqs62x->regmap,
+ IQS62X_OTP_DATA, &val);
+ if (ret)
+ break;
+
+ hall_cal_index = val & IQS62X_HALL_CAL_MASK;
+ if (!hall_cal_index) {
+ dev_err(&client->dev,
+ "Uncalibrated device\n");
+ ret = -ENODATA;
+ break;
+ }
+ }
+
+ if (hall_cal_index > fw_rec->len) {
+ ret = -EINVAL;
+ break;
+ }
+
+ mask = 0;
+ data = &fw_rec->data + hall_cal_index - 1;
+ len = sizeof(*data);
+ break;
+
+ case IQS62X_FW_REC_TYPE_MASK:
+ if (fw_rec->len < (sizeof(mask) + sizeof(*data))) {
+ ret = -EINVAL;
+ break;
+ }
+
+ mask = fw_rec->data;
+ data = &fw_rec->data + sizeof(mask);
+ len = sizeof(*data);
+ break;
+
+ case IQS62X_FW_REC_TYPE_DATA:
+ mask = 0;
+ data = &fw_rec->data;
+ len = fw_rec->len;
+ break;
+
+ default:
+ dev_err(&client->dev,
+ "Unrecognized record type: 0x%02X\n",
+ fw_rec->type);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ break;
+
+ fw_blk = devm_kzalloc(&client->dev,
+ struct_size(fw_blk, data, len),
+ GFP_KERNEL);
+ if (!fw_blk) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ fw_blk->addr = fw_rec->addr;
+ fw_blk->mask = mask;
+ fw_blk->len = len;
+ memcpy(fw_blk->data, data, len);
+
+ list_add(&fw_blk->list, &iqs62x->fw_blk_head);
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS] = {
+ [IQS62X_EVENT_PROX_CH0_T] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(4),
+ .val = BIT(4),
+ },
+ [IQS62X_EVENT_PROX_CH0_P] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(0),
+ .val = BIT(0),
+ },
+ [IQS62X_EVENT_PROX_CH1_T] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(5),
+ .val = BIT(5),
+ },
+ [IQS62X_EVENT_PROX_CH1_P] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(1),
+ .val = BIT(1),
+ },
+ [IQS62X_EVENT_PROX_CH2_T] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(6),
+ .val = BIT(6),
+ },
+ [IQS62X_EVENT_PROX_CH2_P] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(2),
+ .val = BIT(2),
+ },
+ [IQS62X_EVENT_HYST_POS_T] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(6) | BIT(7),
+ .val = BIT(6),
+ },
+ [IQS62X_EVENT_HYST_POS_P] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(5) | BIT(7),
+ .val = BIT(5),
+ },
+ [IQS62X_EVENT_HYST_NEG_T] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(6) | BIT(7),
+ .val = BIT(6) | BIT(7),
+ },
+ [IQS62X_EVENT_HYST_NEG_P] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(5) | BIT(7),
+ .val = BIT(5) | BIT(7),
+ },
+ [IQS62X_EVENT_SAR1_ACT] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(4),
+ .val = BIT(4),
+ },
+ [IQS62X_EVENT_SAR1_QRD] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(2),
+ .val = BIT(2),
+ },
+ [IQS62X_EVENT_SAR1_MOVE] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(1),
+ .val = BIT(1),
+ },
+ [IQS62X_EVENT_SAR1_HALT] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(0),
+ .val = BIT(0),
+ },
+ [IQS62X_EVENT_WHEEL_UP] = {
+ .reg = IQS62X_EVENT_WHEEL,
+ .mask = BIT(7) | BIT(6),
+ .val = BIT(7),
+ },
+ [IQS62X_EVENT_WHEEL_DN] = {
+ .reg = IQS62X_EVENT_WHEEL,
+ .mask = BIT(7) | BIT(6),
+ .val = BIT(7) | BIT(6),
+ },
+ [IQS62X_EVENT_HALL_N_T] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(2) | BIT(0),
+ .val = BIT(2),
+ },
+ [IQS62X_EVENT_HALL_N_P] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(1) | BIT(0),
+ .val = BIT(1),
+ },
+ [IQS62X_EVENT_HALL_S_T] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(2) | BIT(0),
+ .val = BIT(2) | BIT(0),
+ },
+ [IQS62X_EVENT_HALL_S_P] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(1) | BIT(0),
+ .val = BIT(1) | BIT(0),
+ },
+ [IQS62X_EVENT_SYS_RESET] = {
+ .reg = IQS62X_EVENT_SYS,
+ .mask = BIT(7),
+ .val = BIT(7),
+ },
+};
+EXPORT_SYMBOL_GPL(iqs62x_events);
+
+static irqreturn_t iqs62x_irq(int irq, void *context)
+{
+ struct iqs62x_core *iqs62x = context;
+ struct i2c_client *client = iqs62x->client;
+ struct iqs62x_event_data event_data;
+ struct iqs62x_event_desc event_desc;
+ enum iqs62x_event_reg event_reg;
+ unsigned long event_flags = 0;
+ int ret, i, j;
+ u8 event_map[IQS62X_EVENT_SIZE];
+
+ /*
+ * The device asserts the RDY output to signal the beginning of a
+ * communication window, which is closed by an I2C stop condition.
+ * As such, all interrupt status is captured in a single read and
+ * broadcast to any interested sub-device drivers.
+ */
+ ret = regmap_raw_read(iqs62x->regmap, IQS62X_SYS_FLAGS, event_map,
+ sizeof(event_map));
+ if (ret) {
+ dev_err(&client->dev, "Failed to read device status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < sizeof(event_map); i++) {
+ event_reg = iqs62x->dev_desc->event_regs[iqs62x->ui_sel][i];
+
+ switch (event_reg) {
+ case IQS62X_EVENT_UI_LO:
+ event_data.ui_data = get_unaligned_le16(&event_map[i]);
+
+ /* fall through */
+
+ case IQS62X_EVENT_UI_HI:
+ case IQS62X_EVENT_NONE:
+ continue;
+
+ case IQS62X_EVENT_ALS:
+ event_data.als_flags = event_map[i];
+ continue;
+
+ case IQS62X_EVENT_IR:
+ event_data.ir_flags = event_map[i];
+ continue;
+
+ case IQS62X_EVENT_INTER:
+ event_data.interval = event_map[i];
+ continue;
+
+ case IQS62X_EVENT_HYST:
+ event_map[i] <<= iqs62x->dev_desc->hyst_shift;
+
+ /* fall through */
+
+ case IQS62X_EVENT_WHEEL:
+ case IQS62X_EVENT_HALL:
+ case IQS62X_EVENT_PROX:
+ case IQS62X_EVENT_SYS:
+ break;
+ }
+
+ for (j = 0; j < IQS62X_NUM_EVENTS; j++) {
+ event_desc = iqs62x_events[j];
+
+ if (event_desc.reg != event_reg)
+ continue;
+
+ if ((event_map[i] & event_desc.mask) == event_desc.val)
+ event_flags |= BIT(j);
+ }
+ }
+
+ /*
+ * The device resets itself in response to the I2C master stalling
+ * communication past a fixed timeout. In this case, all registers
+ * are restored and any interested sub-device drivers are notified.
+ */
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ ret = iqs62x_dev_init(iqs62x);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to re-initialize device: %d\n", ret);
+ return IRQ_NONE;
+ }
+ }
+
+ ret = blocking_notifier_call_chain(&iqs62x->nh, event_flags,
+ &event_data);
+ if (ret & NOTIFY_STOP_MASK)
+ return IRQ_NONE;
+
+ /*
+ * Once the communication window is closed, a small delay is added to
+ * ensure the device's RDY output has been deasserted by the time the
+ * interrupt handler returns.
+ */
+ usleep_range(50, 100);
+
+ return IRQ_HANDLED;
+}
+
+static void iqs62x_firmware_load(const struct firmware *fw, void *context)
+{
+ struct iqs62x_core *iqs62x = context;
+ struct i2c_client *client = iqs62x->client;
+ int ret;
+
+ if (fw) {
+ ret = iqs62x_firmware_parse(iqs62x, fw);
+ if (ret) {
+ dev_err(&client->dev, "Failed to parse firmware: %d\n",
+ ret);
+ goto err_out;
+ }
+ }
+
+ ret = iqs62x_dev_init(iqs62x);
+ if (ret) {
+ dev_err(&client->dev, "Failed to initialize device: %d\n", ret);
+ goto err_out;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, iqs62x_irq, IRQF_ONESHOT,
+ client->name, iqs62x);
+ if (ret) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", ret);
+ goto err_out;
+ }
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ iqs62x->dev_desc->sub_devs,
+ iqs62x->dev_desc->num_sub_devs,
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(&client->dev, "Failed to add sub-devices: %d\n", ret);
+
+err_out:
+ complete_all(&iqs62x->fw_done);
+}
+
+static const struct mfd_cell iqs620at_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs620a-keys",
+ },
+ {
+ .name = "iqs620a-pwm",
+ .of_compatible = "azoteq,iqs620a-pwm",
+ },
+ { .name = "iqs620at-temp", },
+};
+
+static const struct mfd_cell iqs620a_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs620a-keys",
+ },
+ {
+ .name = "iqs620a-pwm",
+ .of_compatible = "azoteq,iqs620a-pwm",
+ },
+};
+
+static const struct mfd_cell iqs621_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs621-keys",
+ },
+ { .name = "iqs621-als", },
+};
+
+static const struct mfd_cell iqs622_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs622-keys",
+ },
+ { .name = "iqs621-als", },
+};
+
+static const struct mfd_cell iqs624_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs624-keys",
+ },
+ { .name = "iqs624-pos", },
+};
+
+static const struct mfd_cell iqs625_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs625-keys",
+ },
+ { .name = "iqs624-pos", },
+};
+
+static const u8 iqs620at_cal_regs[] = {
+ IQS620_TEMP_CAL_MULT,
+ IQS620_TEMP_CAL_DIV,
+ IQS620_TEMP_CAL_OFFS,
+};
+
+static const u8 iqs621_cal_regs[] = {
+ IQS621_ALS_CAL_DIV_LUX,
+ IQS621_ALS_CAL_DIV_IR,
+};
+
+static const enum iqs62x_event_reg iqs620a_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HALL, /* 0x16 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ },
+ [IQS62X_UI_SAR1] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HALL, /* 0x16 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ },
+};
+
+static const enum iqs62x_event_reg iqs621_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_ALS, /* 0x16 */
+ IQS62X_EVENT_UI_LO, /* 0x17 */
+ IQS62X_EVENT_UI_HI, /* 0x18 */
+ IQS62X_EVENT_HALL, /* 0x19 */
+ },
+};
+
+static const enum iqs62x_event_reg iqs622_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_ALS, /* 0x14 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_IR, /* 0x16 */
+ IQS62X_EVENT_UI_LO, /* 0x17 */
+ IQS62X_EVENT_UI_HI, /* 0x18 */
+ IQS62X_EVENT_HALL, /* 0x19 */
+ },
+ [IQS62X_UI_SAR1] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_ALS, /* 0x14 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_IR, /* 0x16 */
+ IQS62X_EVENT_UI_LO, /* 0x17 */
+ IQS62X_EVENT_UI_HI, /* 0x18 */
+ IQS62X_EVENT_HALL, /* 0x19 */
+ },
+};
+
+static const enum iqs62x_event_reg iqs624_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_WHEEL, /* 0x14 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_UI_LO, /* 0x16 */
+ IQS62X_EVENT_UI_HI, /* 0x17 */
+ IQS62X_EVENT_INTER, /* 0x18 */
+ IQS62X_EVENT_NONE,
+ },
+};
+
+static const enum iqs62x_event_reg iqs625_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_PROX, /* 0x11 */
+ IQS62X_EVENT_INTER, /* 0x12 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ },
+};
+
+static const struct iqs62x_dev_desc iqs62x_devs[] = {
+ {
+ .dev_name = "iqs620at",
+ .sub_devs = iqs620at_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs620at_sub_devs),
+
+ .prod_num = IQS620_PROD_NUM,
+ .sw_num = 0x08,
+ .cal_regs = iqs620at_cal_regs,
+ .num_cal_regs = ARRAY_SIZE(iqs620at_cal_regs),
+
+ .prox_mask = BIT(0),
+ .sar_mask = BIT(1) | BIT(7),
+ .hall_mask = BIT(2),
+ .hyst_mask = BIT(3),
+ .temp_mask = BIT(4),
+
+ .prox_settings = IQS620_PROX_SETTINGS_4,
+ .hall_flags = IQS620_HALL_FLAGS,
+
+ .clk_div = 4,
+ .fw_name = "iqs620a.bin",
+ .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs620a",
+ .sub_devs = iqs620a_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs620a_sub_devs),
+
+ .prod_num = IQS620_PROD_NUM,
+ .sw_num = 0x08,
+
+ .prox_mask = BIT(0),
+ .sar_mask = BIT(1) | BIT(7),
+ .hall_mask = BIT(2),
+ .hyst_mask = BIT(3),
+ .temp_mask = BIT(4),
+
+ .prox_settings = IQS620_PROX_SETTINGS_4,
+ .hall_flags = IQS620_HALL_FLAGS,
+
+ .clk_div = 4,
+ .fw_name = "iqs620a.bin",
+ .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs621",
+ .sub_devs = iqs621_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs621_sub_devs),
+
+ .prod_num = IQS621_PROD_NUM,
+ .sw_num = 0x09,
+ .cal_regs = iqs621_cal_regs,
+ .num_cal_regs = ARRAY_SIZE(iqs621_cal_regs),
+
+ .prox_mask = BIT(0),
+ .hall_mask = BIT(1),
+ .als_mask = BIT(2),
+ .hyst_mask = BIT(3),
+ .temp_mask = BIT(4),
+
+ .als_flags = IQS621_ALS_FLAGS,
+ .hall_flags = IQS621_HALL_FLAGS,
+ .hyst_shift = 5,
+
+ .clk_div = 2,
+ .fw_name = "iqs621.bin",
+ .event_regs = &iqs621_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs622",
+ .sub_devs = iqs622_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs622_sub_devs),
+
+ .prod_num = IQS622_PROD_NUM,
+ .sw_num = 0x06,
+
+ .prox_mask = BIT(0),
+ .sar_mask = BIT(1),
+ .hall_mask = BIT(2),
+ .als_mask = BIT(3),
+ .ir_mask = BIT(4),
+
+ .prox_settings = IQS622_PROX_SETTINGS_4,
+ .als_flags = IQS622_ALS_FLAGS,
+ .hall_flags = IQS622_HALL_FLAGS,
+
+ .clk_div = 2,
+ .fw_name = "iqs622.bin",
+ .event_regs = &iqs622_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs624",
+ .sub_devs = iqs624_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs624_sub_devs),
+
+ .prod_num = IQS624_PROD_NUM,
+ .sw_num = 0x0B,
+
+ .interval = IQS624_INTERVAL_NUM,
+ .interval_div = 3,
+
+ .clk_div = 2,
+ .fw_name = "iqs624.bin",
+ .event_regs = &iqs624_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs625",
+ .sub_devs = iqs625_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs625_sub_devs),
+
+ .prod_num = IQS625_PROD_NUM,
+ .sw_num = 0x0B,
+
+ .interval = IQS625_INTERVAL_NUM,
+ .interval_div = 10,
+
+ .clk_div = 2,
+ .fw_name = "iqs625.bin",
+ .event_regs = &iqs625_event_regs[IQS62X_UI_PROX],
+ },
+};
+
+static const struct regmap_config iqs62x_map_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IQS62X_MAX_REG,
+};
+
+static int iqs62x_probe(struct i2c_client *client)
+{
+ struct iqs62x_core *iqs62x;
+ struct iqs62x_info info;
+ unsigned int val;
+ int ret, i, j;
+ u8 sw_num = 0;
+ const char *fw_name = NULL;
+
+ iqs62x = devm_kzalloc(&client->dev, sizeof(*iqs62x), GFP_KERNEL);
+ if (!iqs62x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs62x);
+ iqs62x->client = client;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&iqs62x->nh);
+ INIT_LIST_HEAD(&iqs62x->fw_blk_head);
+ init_completion(&iqs62x->fw_done);
+
+ iqs62x->regmap = devm_regmap_init_i2c(client, &iqs62x_map_config);
+ if (IS_ERR(iqs62x->regmap)) {
+ ret = PTR_ERR(iqs62x->regmap);
+ dev_err(&client->dev, "Failed to initialize register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_raw_read(iqs62x->regmap, IQS62X_PROD_NUM, &info,
+ sizeof(info));
+ if (ret)
+ return ret;
+
+ /*
+ * The following sequence validates the device's product and software
+ * numbers. It then determines if the device is factory-calibrated by
+ * checking for nonzero values in the device's designated calibration
+ * registers (if applicable). Depending on the device, the absence of
+ * calibration data indicates a reduced feature set or invalid device.
+ *
+ * For devices given in both calibrated and uncalibrated versions, the
+ * calibrated version (e.g. IQS620AT) appears first in the iqs62x_devs
+ * array. The uncalibrated version (e.g. IQS620A) appears next and has
+ * the same product and software numbers, but no calibration registers
+ * are specified.
+ */
+ for (i = 0; i < ARRAY_SIZE(iqs62x_devs); i++) {
+ if (info.prod_num != iqs62x_devs[i].prod_num)
+ continue;
+
+ iqs62x->dev_desc = &iqs62x_devs[i];
+
+ if (info.sw_num < iqs62x->dev_desc->sw_num)
+ continue;
+
+ sw_num = info.sw_num;
+
+ /*
+ * Read each of the device's designated calibration registers,
+ * if any, and exit from the inner loop early if any are equal
+ * to zero (indicating the device is uncalibrated). This could
+ * be acceptable depending on the device (e.g. IQS620A instead
+ * of IQS620AT).
+ */
+ for (j = 0; j < iqs62x->dev_desc->num_cal_regs; j++) {
+ ret = regmap_read(iqs62x->regmap,
+ iqs62x->dev_desc->cal_regs[j], &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ break;
+ }
+
+ /*
+ * If the number of nonzero values read from the device equals
+ * the number of designated calibration registers (which could
+ * be zero), exit from the outer loop early to signal that the
+ * device's product and software numbers match a known device,
+ * and the device is calibrated (if applicable).
+ */
+ if (j == iqs62x->dev_desc->num_cal_regs)
+ break;
+ }
+
+ if (!iqs62x->dev_desc) {
+ dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
+ info.prod_num);
+ return -EINVAL;
+ }
+
+ if (!sw_num) {
+ dev_err(&client->dev, "Unrecognized software number: 0x%02X\n",
+ info.sw_num);
+ return -EINVAL;
+ }
+
+ if (i == ARRAY_SIZE(iqs62x_devs)) {
+ dev_err(&client->dev, "Uncalibrated device\n");
+ return -ENODATA;
+ }
+
+ device_property_read_string(&client->dev, "firmware-name", &fw_name);
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ fw_name ? : iqs62x->dev_desc->fw_name,
+ &client->dev, GFP_KERNEL, iqs62x,
+ iqs62x_firmware_load);
+ if (ret)
+ dev_err(&client->dev, "Failed to request firmware: %d\n", ret);
+
+ return ret;
+}
+
+static int iqs62x_remove(struct i2c_client *client)
+{
+ struct iqs62x_core *iqs62x = i2c_get_clientdata(client);
+
+ wait_for_completion(&iqs62x->fw_done);
+
+ return 0;
+}
+
+static int __maybe_unused iqs62x_suspend(struct device *dev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(dev);
+ int ret;
+
+ wait_for_completion(&iqs62x->fw_done);
+
+ /*
+ * As per the datasheet, automatic mode switching must be disabled
+ * before the device is placed in or taken out of halt mode.
+ */
+ ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_DIS_AUTO, 0xFF);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_PWR_MODE_MASK,
+ IQS62X_PWR_SETTINGS_PWR_MODE_HALT);
+}
+
+static int __maybe_unused iqs62x_resume(struct device *dev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_PWR_MODE_MASK,
+ IQS62X_PWR_SETTINGS_PWR_MODE_NORM);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_DIS_AUTO, 0);
+}
+
+static SIMPLE_DEV_PM_OPS(iqs62x_pm, iqs62x_suspend, iqs62x_resume);
+
+static const struct of_device_id iqs62x_of_match[] = {
+ { .compatible = "azoteq,iqs620a" },
+ { .compatible = "azoteq,iqs621" },
+ { .compatible = "azoteq,iqs622" },
+ { .compatible = "azoteq,iqs624" },
+ { .compatible = "azoteq,iqs625" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs62x_of_match);
+
+static struct i2c_driver iqs62x_i2c_driver = {
+ .driver = {
+ .name = "iqs62x",
+ .of_match_table = iqs62x_of_match,
+ .pm = &iqs62x_pm,
+ },
+ .probe_new = iqs62x_probe,
+ .remove = iqs62x_remove,
+};
+module_i2c_driver(iqs62x_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS620A/621/622/624/625 Multi-Function Sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 4798d9f3f9d5..1f4f01b02d98 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -840,7 +840,7 @@ MODULE_DEVICE_TABLE(of, usbhs_omap_dt_ids);
static struct platform_driver usbhs_omap_driver = {
.driver = {
- .name = (char *)usbhs_driver_name,
+ .name = usbhs_driver_name,
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 265f5e350e1c..4b7f73c317e8 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -99,7 +99,7 @@
struct usbtll_omap {
void __iomem *base;
int nch; /* num. of channels */
- struct clk *ch_clk[0]; /* must be the last member */
+ struct clk *ch_clk[]; /* must be the last member */
};
/*-------------------------------------------------------------------------*/
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, usbtll_omap_dt_ids);
static struct platform_driver usbtll_omap_driver = {
.driver = {
- .name = (char *)usbtll_driver_name,
+ .name = usbtll_driver_name,
.of_match_table = usbtll_omap_dt_ids,
},
.probe = usbtll_omap_probe,
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index 29133326c6fd..acd172ddcbd6 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -76,7 +76,7 @@ struct pm_irq_chip {
unsigned int num_masters;
const struct pm_irq_data *pm_irq_data;
/* MUST BE AT THE END OF THIS STRUCT */
- u8 config[0];
+ u8 config[];
};
static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp,
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index a69a6742ecdc..d109b9f14407 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
-#include <linux/syscore_ops.h>
struct rk808_reg_data {
int addr;
@@ -186,7 +185,6 @@ static const struct rk808_reg_data rk805_pre_init_reg[] = {
{RK805_BUCK4_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK,
RK805_BUCK4_ILMAX_3500MA},
{RK805_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_400MA},
- {RK805_GPIO_IO_POL_REG, SLP_SD_MSK, SLEEP_FUN},
{RK805_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP115C},
};
@@ -449,88 +447,60 @@ static const struct regmap_irq_chip rk818_irq_chip = {
static struct i2c_client *rk808_i2c_client;
-static void rk805_device_shutdown(void)
+static void rk808_pm_power_off(void)
{
int ret;
+ unsigned int reg, bit;
struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
- if (!rk808)
- return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK805_DEV_CTRL_REG,
- DEV_OFF, DEV_OFF);
- if (ret)
- dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
-}
-
-static void rk805_device_shutdown_prepare(void)
-{
- int ret;
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
-
- if (!rk808)
- return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK805_GPIO_IO_POL_REG,
- SLP_SD_MSK, SHUTDOWN_FUN);
- if (ret)
- dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
-}
-
-static void rk808_device_shutdown(void)
-{
- int ret;
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
-
- if (!rk808)
- return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK808_DEVCTRL_REG,
- DEV_OFF_RST, DEV_OFF_RST);
- if (ret)
- dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
-}
-
-static void rk818_device_shutdown(void)
-{
- int ret;
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
-
- if (!rk808)
+ switch (rk808->variant) {
+ case RK805_ID:
+ reg = RK805_DEV_CTRL_REG;
+ bit = DEV_OFF;
+ break;
+ case RK808_ID:
+ reg = RK808_DEVCTRL_REG,
+ bit = DEV_OFF_RST;
+ break;
+ case RK818_ID:
+ reg = RK818_DEVCTRL_REG;
+ bit = DEV_OFF;
+ break;
+ default:
return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK818_DEVCTRL_REG,
- DEV_OFF, DEV_OFF);
+ }
+ ret = regmap_update_bits(rk808->regmap, reg, bit, bit);
if (ret)
dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
}
-static void rk8xx_syscore_shutdown(void)
+static void rk8xx_shutdown(struct i2c_client *client)
{
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+ struct rk808 *rk808 = i2c_get_clientdata(client);
int ret;
- if (system_state == SYSTEM_POWER_OFF &&
- (rk808->variant == RK809_ID || rk808->variant == RK817_ID)) {
+ switch (rk808->variant) {
+ case RK805_ID:
+ ret = regmap_update_bits(rk808->regmap,
+ RK805_GPIO_IO_POL_REG,
+ SLP_SD_MSK,
+ SHUTDOWN_FUN);
+ break;
+ case RK809_ID:
+ case RK817_ID:
ret = regmap_update_bits(rk808->regmap,
RK817_SYS_CFG(3),
RK817_SLPPIN_FUNC_MSK,
SLPPIN_DN_FUN);
- if (ret) {
- dev_warn(&rk808_i2c_client->dev,
- "Cannot switch to power down function\n");
- }
+ break;
+ default:
+ return;
}
+ if (ret)
+ dev_warn(&client->dev,
+ "Cannot switch to power down function\n");
}
-static struct syscore_ops rk808_syscore_ops = {
- .shutdown = rk8xx_syscore_shutdown,
-};
-
static const struct of_device_id rk808_of_match[] = {
{ .compatible = "rockchip,rk805" },
{ .compatible = "rockchip,rk808" },
@@ -550,7 +520,7 @@ static int rk808_probe(struct i2c_client *client,
const struct mfd_cell *cells;
int nr_pre_init_regs;
int nr_cells;
- int pm_off = 0, msb, lsb;
+ int msb, lsb;
unsigned char pmic_id_msb, pmic_id_lsb;
int ret;
int i;
@@ -594,8 +564,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk805_pre_init_reg);
cells = rk805s;
nr_cells = ARRAY_SIZE(rk805s);
- rk808->pm_pwroff_fn = rk805_device_shutdown;
- rk808->pm_pwroff_prep_fn = rk805_device_shutdown_prepare;
break;
case RK808_ID:
rk808->regmap_cfg = &rk808_regmap_config;
@@ -604,7 +572,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg);
cells = rk808s;
nr_cells = ARRAY_SIZE(rk808s);
- rk808->pm_pwroff_fn = rk808_device_shutdown;
break;
case RK818_ID:
rk808->regmap_cfg = &rk818_regmap_config;
@@ -613,7 +580,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg);
cells = rk818s;
nr_cells = ARRAY_SIZE(rk818s);
- rk808->pm_pwroff_fn = rk818_device_shutdown;
break;
case RK809_ID:
case RK817_ID:
@@ -623,7 +589,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk817_pre_init_reg);
cells = rk817s;
nr_cells = ARRAY_SIZE(rk817s);
- register_syscore_ops(&rk808_syscore_ops);
break;
default:
dev_err(&client->dev, "Unsupported RK8XX ID %lu\n",
@@ -674,17 +639,9 @@ static int rk808_probe(struct i2c_client *client,
goto err_irq;
}
- pm_off = of_property_read_bool(np,
- "rockchip,system-power-controller");
- if (pm_off && !pm_power_off) {
+ if (of_property_read_bool(np, "rockchip,system-power-controller")) {
rk808_i2c_client = client;
- pm_power_off = rk808->pm_pwroff_fn;
- }
-
- if (pm_off && !pm_power_off_prepare) {
- if (!rk808_i2c_client)
- rk808_i2c_client = client;
- pm_power_off_prepare = rk808->pm_pwroff_prep_fn;
+ pm_power_off = rk808_pm_power_off;
}
return 0;
@@ -704,25 +661,24 @@ static int rk808_remove(struct i2c_client *client)
* pm_power_off may points to a function from another module.
* Check if the pointer is set by us and only then overwrite it.
*/
- if (rk808->pm_pwroff_fn && pm_power_off == rk808->pm_pwroff_fn)
+ if (pm_power_off == rk808_pm_power_off)
pm_power_off = NULL;
- /**
- * As above, check if the pointer is set by us before overwrite.
- */
- if (rk808->pm_pwroff_prep_fn &&
- pm_power_off_prepare == rk808->pm_pwroff_prep_fn)
- pm_power_off_prepare = NULL;
-
return 0;
}
static int __maybe_unused rk8xx_suspend(struct device *dev)
{
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+ struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev));
int ret = 0;
switch (rk808->variant) {
+ case RK805_ID:
+ ret = regmap_update_bits(rk808->regmap,
+ RK805_GPIO_IO_POL_REG,
+ SLP_SD_MSK,
+ SLEEP_FUN);
+ break;
case RK809_ID:
case RK817_ID:
ret = regmap_update_bits(rk808->regmap,
@@ -739,7 +695,7 @@ static int __maybe_unused rk8xx_suspend(struct device *dev)
static int __maybe_unused rk8xx_resume(struct device *dev)
{
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+ struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev));
int ret = 0;
switch (rk808->variant) {
@@ -766,6 +722,7 @@ static struct i2c_driver rk808_i2c_driver = {
},
.probe = rk808_probe,
.remove = rk808_remove,
+ .shutdown = rk8xx_shutdown,
};
module_i2c_driver(rk808_i2c_driver);
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index ead2e79036a9..232de50562f9 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -8,10 +8,13 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rn5t618.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
@@ -20,6 +23,13 @@ static const struct mfd_cell rn5t618_cells[] = {
{ .name = "rn5t618-wdt" },
};
+static const struct mfd_cell rc5t619_cells[] = {
+ { .name = "rn5t618-adc" },
+ { .name = "rn5t618-regulator" },
+ { .name = "rc5t619-rtc" },
+ { .name = "rn5t618-wdt" },
+};
+
static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -32,6 +42,8 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
case RN5T618_IR_GPF:
case RN5T618_MON_IOIN:
case RN5T618_INTMON:
+ case RN5T618_RTC_CTRL1 ... RN5T618_RTC_CTRL2:
+ case RN5T618_RTC_SECONDS ... RN5T618_RTC_YEAR:
return true;
default:
return false;
@@ -46,9 +58,56 @@ static const struct regmap_config rn5t618_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct regmap_irq rc5t619_irqs[] = {
+ REGMAP_IRQ_REG(RN5T618_IRQ_SYS, 0, BIT(0)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_DCDC, 0, BIT(1)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_RTC, 0, BIT(2)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_ADC, 0, BIT(3)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_GPIO, 0, BIT(4)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_CHG, 0, BIT(6)),
+};
+
+static const struct regmap_irq_chip rc5t619_irq_chip = {
+ .name = "rc5t619",
+ .irqs = rc5t619_irqs,
+ .num_irqs = ARRAY_SIZE(rc5t619_irqs),
+ .num_regs = 1,
+ .status_base = RN5T618_INTMON,
+ .mask_base = RN5T618_INTEN,
+ .mask_invert = true,
+};
+
static struct rn5t618 *rn5t618_pm_power_off;
static struct notifier_block rn5t618_restart_handler;
+static int rn5t618_irq_init(struct rn5t618 *rn5t618)
+{
+ const struct regmap_irq_chip *irq_chip = NULL;
+ int ret;
+
+ if (!rn5t618->irq)
+ return 0;
+
+ switch (rn5t618->variant) {
+ case RC5T619:
+ irq_chip = &rc5t619_irq_chip;
+ break;
+ default:
+ dev_err(rn5t618->dev, "Currently no IRQ support for variant %d\n",
+ (int)rn5t618->variant);
+ return -ENOENT;
+ }
+
+ ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap,
+ rn5t618->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 0, irq_chip, &rn5t618->irq_data);
+ if (ret)
+ dev_err(rn5t618->dev, "Failed to register IRQ chip\n");
+
+ return ret;
+}
+
static void rn5t618_trigger_poweroff_sequence(bool repower)
{
/* disable automatic repower-on */
@@ -87,8 +146,7 @@ static const struct of_device_id rn5t618_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rn5t618_of_match);
-static int rn5t618_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int rn5t618_i2c_probe(struct i2c_client *i2c)
{
const struct of_device_id *of_id;
struct rn5t618 *priv;
@@ -106,6 +164,8 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, priv);
priv->variant = (long)of_id->data;
+ priv->irq = i2c->irq;
+ priv->dev = &i2c->dev;
priv->regmap = devm_regmap_init_i2c(i2c, &rn5t618_regmap_config);
if (IS_ERR(priv->regmap)) {
@@ -114,8 +174,16 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = devm_mfd_add_devices(&i2c->dev, -1, rn5t618_cells,
- ARRAY_SIZE(rn5t618_cells), NULL, 0, NULL);
+ if (priv->variant == RC5T619)
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE,
+ rc5t619_cells,
+ ARRAY_SIZE(rc5t619_cells),
+ NULL, 0, NULL);
+ else
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE,
+ rn5t618_cells,
+ ARRAY_SIZE(rn5t618_cells),
+ NULL, 0, NULL);
if (ret) {
dev_err(&i2c->dev, "failed to add sub-devices: %d\n", ret);
return ret;
@@ -138,7 +206,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
return ret;
}
- return 0;
+ return rn5t618_irq_init(priv);
}
static int rn5t618_i2c_remove(struct i2c_client *i2c)
@@ -155,19 +223,38 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c)
return 0;
}
-static const struct i2c_device_id rn5t618_i2c_id[] = {
- { }
-};
-MODULE_DEVICE_TABLE(i2c, rn5t618_i2c_id);
+static int __maybe_unused rn5t618_i2c_suspend(struct device *dev)
+{
+ struct rn5t618 *priv = dev_get_drvdata(dev);
+
+ if (priv->irq)
+ disable_irq(priv->irq);
+
+ return 0;
+}
+
+static int __maybe_unused rn5t618_i2c_resume(struct device *dev)
+{
+ struct rn5t618 *priv = dev_get_drvdata(dev);
+
+ if (priv->irq)
+ enable_irq(priv->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rn5t618_i2c_dev_pm_ops,
+ rn5t618_i2c_suspend,
+ rn5t618_i2c_resume);
static struct i2c_driver rn5t618_i2c_driver = {
.driver = {
.name = "rn5t618",
.of_match_table = of_match_ptr(rn5t618_of_match),
+ .pm = &rn5t618_i2c_dev_pm_ops,
},
- .probe = rn5t618_i2c_probe,
+ .probe_new = rn5t618_i2c_probe,
.remove = rn5t618_i2c_remove,
- .id_table = rn5t618_i2c_id,
};
module_i2c_driver(rn5t618_i2c_driver);
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index c0529a1cd5ea..ebdf2f11ae28 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -10,6 +10,7 @@
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <uapi/linux/usb/charger.h>
#define SPRD_PMIC_INT_MASK_STATUS 0x0
#define SPRD_PMIC_INT_RAW_STATUS 0x4
@@ -17,6 +18,16 @@
#define SPRD_SC2731_IRQ_BASE 0x140
#define SPRD_SC2731_IRQ_NUMS 16
+#define SPRD_SC2731_CHG_DET 0xedc
+
+/* PMIC charger detection definition */
+#define SPRD_PMIC_CHG_DET_DELAY_US 200000
+#define SPRD_PMIC_CHG_DET_TIMEOUT 2000000
+#define SPRD_PMIC_CHG_DET_DONE BIT(11)
+#define SPRD_PMIC_SDP_TYPE BIT(7)
+#define SPRD_PMIC_DCP_TYPE BIT(6)
+#define SPRD_PMIC_CDP_TYPE BIT(5)
+#define SPRD_PMIC_CHG_TYPE_MASK GENMASK(7, 5)
struct sprd_pmic {
struct regmap *regmap;
@@ -24,12 +35,14 @@ struct sprd_pmic {
struct regmap_irq *irqs;
struct regmap_irq_chip irq_chip;
struct regmap_irq_chip_data *irq_data;
+ const struct sprd_pmic_data *pdata;
int irq;
};
struct sprd_pmic_data {
u32 irq_base;
u32 num_irqs;
+ u32 charger_det;
};
/*
@@ -40,8 +53,46 @@ struct sprd_pmic_data {
static const struct sprd_pmic_data sc2731_data = {
.irq_base = SPRD_SC2731_IRQ_BASE,
.num_irqs = SPRD_SC2731_IRQ_NUMS,
+ .charger_det = SPRD_SC2731_CHG_DET,
};
+enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct sprd_pmic *ddata = spi_get_drvdata(spi);
+ const struct sprd_pmic_data *pdata = ddata->pdata;
+ enum usb_charger_type type;
+ u32 val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(ddata->regmap, pdata->charger_det, val,
+ (val & SPRD_PMIC_CHG_DET_DONE),
+ SPRD_PMIC_CHG_DET_DELAY_US,
+ SPRD_PMIC_CHG_DET_TIMEOUT);
+ if (ret) {
+ dev_err(&spi->dev, "failed to detect charger type\n");
+ return UNKNOWN_TYPE;
+ }
+
+ switch (val & SPRD_PMIC_CHG_TYPE_MASK) {
+ case SPRD_PMIC_CDP_TYPE:
+ type = CDP_TYPE;
+ break;
+ case SPRD_PMIC_DCP_TYPE:
+ type = DCP_TYPE;
+ break;
+ case SPRD_PMIC_SDP_TYPE:
+ type = SDP_TYPE;
+ break;
+ default:
+ type = UNKNOWN_TYPE;
+ break;
+ }
+
+ return type;
+}
+EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type);
+
static const struct mfd_cell sprd_pmic_devs[] = {
{
.name = "sc27xx-wdt",
@@ -181,6 +232,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
spi_set_drvdata(spi, ddata);
ddata->dev = &spi->dev;
ddata->irq = spi->irq;
+ ddata->pdata = pdata;
ddata->irq_chip.name = dev_name(&spi->dev);
ddata->irq_chip.status_base =
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 7f0d48f406e3..99e151475d8f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -480,4 +480,5 @@ source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
+source "drivers/misc/uacce/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c1860d35dc7e..9abf2923d831 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -56,4 +56,5 @@ obj-$(CONFIG_OCXL) += ocxl/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
+obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 423fecc19fc4..3a9467aaa435 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -394,6 +394,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
void rts522a_init_params(struct rtsx_pcr *pcr)
{
rts5227_init_params(pcr);
+ pcr->ops = &rts522a_pcr_ops;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 06038b325b02..55da6428ceb0 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -142,6 +142,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
rtsx_disable_aspm(pcr);
+ /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
+ msleep(1);
+
if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 282c9ef68ed2..9ff18d4961ce 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -227,6 +227,7 @@ MODULE_DEVICE_TABLE(of, at24_of_match);
static const struct acpi_device_id at24_acpi_ids[] = {
{ "INT3499", (kernel_ulong_t)&at24_data_INT3499 },
+ { "TPF0001", (kernel_ulong_t)&at24_data_24c1024 },
{ /* END OF LIST */ }
};
MODULE_DEVICE_TABLE(acpi, at24_acpi_ids);
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 0bf08678431b..409276b6374d 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -129,6 +129,8 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
spin_unlock(&job->user_cb->lock);
hl_cb_put(job->user_cb);
job->user_cb = NULL;
+ } else if (!rc) {
+ job->job_cb_size = job->user_cb_size;
}
return rc;
@@ -507,7 +509,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cb *cb;
bool int_queues_only = true;
u32 size_to_copy;
- int rc, i, parse_cnt;
+ int rc, i;
*cs_seq = ULLONG_MAX;
@@ -547,7 +549,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
hl_debugfs_add_cs(cs);
/* Validate ALL the CS chunks before submitting the CS */
- for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
+ for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
enum hl_queue_type queue_type;
bool is_kernel_allocated_cb;
@@ -585,10 +587,6 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
job->cs = cs;
job->user_cb = cb;
job->user_cb_size = chunk->cb_size;
- if (is_kernel_allocated_cb)
- job->job_cb_size = cb->size;
- else
- job->job_cb_size = chunk->cb_size;
job->hw_queue_id = chunk->queue_index;
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
@@ -659,8 +657,8 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
struct hl_device *hdev = hpriv->hdev;
union hl_cs_args *args = data;
struct hl_ctx *ctx = hpriv->ctx;
- void __user *chunks;
- u32 num_chunks;
+ void __user *chunks_execute, *chunks_restore;
+ u32 num_chunks_execute, num_chunks_restore;
u64 cs_seq = ULONG_MAX;
int rc, do_ctx_switch;
bool need_soft_reset = false;
@@ -673,13 +671,25 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
goto out;
}
+ chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
+ num_chunks_execute = args->in.num_chunks_execute;
+
+ if (!num_chunks_execute) {
+ dev_err(hdev->dev,
+ "Got execute CS with 0 chunks, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+
do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
long ret;
- chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
- num_chunks = args->in.num_chunks_restore;
+ chunks_restore =
+ (void __user *) (uintptr_t) args->in.chunks_restore;
+ num_chunks_restore = args->in.num_chunks_restore;
mutex_lock(&hpriv->restore_phase_mutex);
@@ -707,13 +717,13 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
hdev->asic_funcs->restore_phase_topology(hdev);
- if (num_chunks == 0) {
+ if (!num_chunks_restore) {
dev_dbg(hdev->dev,
"Need to run restore phase but restore CS is empty\n");
rc = 0;
} else {
- rc = _hl_cs_ioctl(hpriv, chunks, num_chunks,
- &cs_seq);
+ rc = _hl_cs_ioctl(hpriv, chunks_restore,
+ num_chunks_restore, &cs_seq);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -726,7 +736,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
}
/* Need to wait for restore completion before execution phase */
- if (num_chunks > 0) {
+ if (num_chunks_restore) {
ret = _hl_cs_wait_ioctl(hdev, ctx,
jiffies_to_usecs(hdev->timeout_jiffies),
cs_seq);
@@ -754,18 +764,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
}
}
- chunks = (void __user *)(uintptr_t)args->in.chunks_execute;
- num_chunks = args->in.num_chunks_execute;
-
- if (num_chunks == 0) {
- dev_err(hdev->dev,
- "Got execute CS with 0 chunks, context %d\n",
- ctx->asid);
- rc = -EINVAL;
- goto out;
- }
-
- rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq);
+ rc = _hl_cs_ioctl(hpriv, chunks_execute, num_chunks_execute, &cs_seq);
out:
if (rc != -EAGAIN) {
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 20413e350343..756d36ed5d95 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -393,9 +393,10 @@ static int mmu_show(struct seq_file *s, void *data)
}
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
mutex_lock(&ctx->mmu_lock);
@@ -547,12 +548,15 @@ static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
goto out;
if (hdev->dram_supports_virtual_memory &&
- addr >= prop->va_space_dram_start_address &&
- addr < prop->va_space_dram_end_address)
+ (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
return true;
- if (addr >= prop->va_space_host_start_address &&
- addr < prop->va_space_host_end_address)
+ if (addr >= prop->pmmu.start_addr &&
+ addr < prop->pmmu.end_addr)
+ return true;
+
+ if (addr >= prop->pmmu_huge.start_addr &&
+ addr < prop->pmmu_huge.end_addr)
return true;
out:
return false;
@@ -575,9 +579,10 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
}
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
mutex_lock(&ctx->mmu_lock);
@@ -705,6 +710,65 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_data_read64(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u64 addr = entry->addr;
+ u64 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%016llx\n", val);
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
+}
+
+static ssize_t hl_data_write64(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 addr = entry->addr;
+ u64 value;
+ ssize_t rc;
+
+ rc = kstrtoull_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
+ value, addr);
+ return rc;
+ }
+
+ return count;
+}
+
static ssize_t hl_get_power_state(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -912,6 +976,12 @@ static const struct file_operations hl_data32b_fops = {
.write = hl_data_write32
};
+static const struct file_operations hl_data64b_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_data_read64,
+ .write = hl_data_write64
+};
+
static const struct file_operations hl_i2c_data_fops = {
.owner = THIS_MODULE,
.read = hl_i2c_data_read,
@@ -1025,6 +1095,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_data32b_fops);
+ debugfs_create_file("data64",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_data64b_fops);
+
debugfs_create_file("set_power_state",
0200,
dev_entry->root,
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index b680b0caa69b..aef4de36b7aa 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -36,7 +36,7 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
status = HL_DEVICE_STATUS_OPERATIONAL;
return status;
-};
+}
static void hpriv_release(struct kref *ref)
{
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index b8a8de24aaf7..68f065607544 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -324,7 +324,11 @@ static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
- GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
};
static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
@@ -393,19 +397,21 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->dmmu.hop2_mask = HOP2_MASK;
prop->dmmu.hop3_mask = HOP3_MASK;
prop->dmmu.hop4_mask = HOP4_MASK;
- prop->dmmu.huge_page_size = PAGE_SIZE_2MB;
+ prop->dmmu.start_addr = VA_DDR_SPACE_START;
+ prop->dmmu.end_addr = VA_DDR_SPACE_END;
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
- /* No difference between PMMU and DMMU except of page size */
+ /* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
- prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->pmmu.start_addr = VA_HOST_SPACE_START;
+ prop->pmmu.end_addr = VA_HOST_SPACE_END;
prop->pmmu.page_size = PAGE_SIZE_4KB;
- prop->va_space_host_start_address = VA_HOST_SPACE_START;
- prop->va_space_host_end_address = VA_HOST_SPACE_END;
- prop->va_space_dram_start_address = VA_DDR_SPACE_START;
- prop->va_space_dram_end_address = VA_DDR_SPACE_END;
- prop->dram_size_for_default_page_mapping =
- prop->va_space_dram_end_address;
+ /* PMMU and HPMMU are the same except of page size */
+ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
+ prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
+
+ prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
@@ -2573,8 +2579,7 @@ static int goya_hw_init(struct hl_device *hdev)
* After CPU initialization is finished, change DDR bar mapping inside
* iATU to point to the start address of the MMU page tables
*/
- if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
- (MMU_PAGE_TABLES_ADDR &
+ if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
dev_err(hdev->dev,
"failed to map DDR bar to MMU page tables\n");
@@ -3443,12 +3448,13 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
/*
* WA for HW-23.
* We can't allow user to read from Host using QMANs other than 1.
+ * PMMU and HPMMU addresses are equal, check only one of them.
*/
if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
le32_to_cpu(user_dma_pkt->tsize),
- hdev->asic_prop.va_space_host_start_address,
- hdev->asic_prop.va_space_host_end_address)) {
+ hdev->asic_prop.pmmu.start_addr,
+ hdev->asic_prop.pmmu.end_addr)) {
dev_err(hdev->dev,
"Can't DMA from host on queue other then 1\n");
return -EFAULT;
@@ -4178,6 +4184,96 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
return rc;
}
+static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 ddr_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ u32 val_l = RREG32(addr - CFG_BASE);
+ u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
+
+ *val = (((u64) val_h) << 32) | val_l;
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
+
+ *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (ddr_bar_addr != U64_MAX) {
+ *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev,
+ ddr_bar_addr);
+ }
+ if (ddr_bar_addr == U64_MAX)
+ rc = -EIO;
+
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
+
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 ddr_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ WREG32(addr - CFG_BASE, lower_32_bits(val));
+ WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
+
+ writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (ddr_bar_addr != U64_MAX) {
+ writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev,
+ ddr_bar_addr);
+ }
+ if (ddr_bar_addr == U64_MAX)
+ rc = -EIO;
+
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
@@ -4297,6 +4393,14 @@ static const char *_goya_get_event_desc(u16 event_type)
return "TPC%d_bmon_spmu";
case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
return "DMA_bm_ch%d";
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
+ return "POWER_ENV_S";
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
+ return "POWER_ENV_E";
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
+ return "THERMAL_ENV_S";
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
+ return "THERMAL_ENV_E";
default:
return "N/A";
}
@@ -4388,22 +4492,22 @@ static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
static void goya_print_razwi_info(struct hl_device *hdev)
{
if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
- dev_err(hdev->dev, "Illegal write to LBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
- dev_err(hdev->dev, "Illegal read from LBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
- dev_err(hdev->dev, "Illegal write to HBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
- dev_err(hdev->dev, "Illegal read from HBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
}
}
@@ -4423,7 +4527,8 @@ static void goya_print_mmu_error_info(struct hl_device *hdev)
addr <<= 32;
addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
- dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
+ dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
+ addr);
WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
}
@@ -4435,7 +4540,7 @@ static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
char desc[20] = "";
goya_get_event_desc(event_type, desc, sizeof(desc));
- dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
if (razwi) {
@@ -4526,6 +4631,33 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
return rc;
}
+static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
+{
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to power consumption\n");
+ break;
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Power envelop is safe, back to optimal clock\n");
+ break;
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to overheating\n");
+ break;
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Thermal envelop is safe, back to optimal clock\n");
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid clock change event %d\n",
+ event_type);
+ break;
+ }
+}
+
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
@@ -4609,6 +4741,14 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
goya_unmask_irq(hdev, event_type);
break;
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
+ goya_print_clk_change_info(hdev, event_type);
+ goya_unmask_irq(hdev, event_type);
+ break;
+
default:
dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
event_type);
@@ -4776,7 +4916,8 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
- prop->dram_base_address + off, PAGE_SIZE_2MB);
+ prop->dram_base_address + off, PAGE_SIZE_2MB,
+ (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
if (rc) {
dev_err(hdev->dev, "Map failed for address 0x%llx\n",
prop->dram_base_address + off);
@@ -4786,7 +4927,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
- hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB);
+ hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true);
if (rc) {
dev_err(hdev->dev,
@@ -4799,7 +4940,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
rc = hl_mmu_map(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
hdev->cpu_accessible_dma_address + cpu_off,
- PAGE_SIZE_4KB);
+ PAGE_SIZE_4KB, true);
if (rc) {
dev_err(hdev->dev,
"Map failed for CPU accessible memory\n");
@@ -4825,14 +4966,15 @@ unmap_cpu:
for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
if (hl_mmu_unmap(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
- PAGE_SIZE_4KB))
+ PAGE_SIZE_4KB, true))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
unmap:
for (; off >= 0 ; off -= PAGE_SIZE_2MB)
if (hl_mmu_unmap(hdev->kernel_ctx,
- prop->dram_base_address + off, PAGE_SIZE_2MB))
+ prop->dram_base_address + off, PAGE_SIZE_2MB,
+ true))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
prop->dram_base_address + off);
@@ -4857,14 +4999,15 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
- PAGE_SIZE_2MB))
+ PAGE_SIZE_2MB, true))
dev_warn(hdev->dev,
"Failed to unmap CPU accessible memory\n");
} else {
for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
if (hl_mmu_unmap(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
- PAGE_SIZE_4KB))
+ PAGE_SIZE_4KB,
+ (cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
@@ -4872,7 +5015,8 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
if (hl_mmu_unmap(hdev->kernel_ctx,
- prop->dram_base_address + off, PAGE_SIZE_2MB))
+ prop->dram_base_address + off, PAGE_SIZE_2MB,
+ (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
dev_warn_ratelimited(hdev->dev,
"Failed to unmap address 0x%llx\n",
prop->dram_base_address + off);
@@ -5113,6 +5257,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
}
static void goya_hw_queues_lock(struct hl_device *hdev)
+ __acquires(&goya->hw_queues_lock)
{
struct goya_device *goya = hdev->asic_specific;
@@ -5120,6 +5265,7 @@ static void goya_hw_queues_lock(struct hl_device *hdev)
}
static void goya_hw_queues_unlock(struct hl_device *hdev)
+ __releases(&goya->hw_queues_lock)
{
struct goya_device *goya = hdev->asic_specific;
@@ -5180,6 +5326,8 @@ static const struct hl_asic_funcs goya_funcs = {
.restore_phase_topology = goya_restore_phase_topology,
.debugfs_read32 = goya_debugfs_read32,
.debugfs_write32 = goya_debugfs_write32,
+ .debugfs_read64 = goya_debugfs_read64,
+ .debugfs_write64 = goya_debugfs_write64,
.add_device_attr = goya_add_device_attr,
.handle_eqe = goya_handle_eqe,
.set_pll_profile = goya_set_pll_profile,
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index c1ee6e2b5dff..a1bc930d904f 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -364,8 +364,8 @@ static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
u64 range_start, range_end;
if (hdev->mmu_enable) {
- range_start = prop->va_space_dram_start_address;
- range_end = prop->va_space_dram_end_address;
+ range_start = prop->dmmu.start_addr;
+ range_end = prop->dmmu.end_addr;
} else {
range_start = prop->dram_user_base_address;
range_end = prop->dram_end_address;
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index b2ebc01e27f4..cdd4903e48fa 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -298,8 +298,8 @@ static ssize_t pm_mng_profile_store(struct device *dev,
/* Make sure we are in LOW PLL when changing modes */
if (hdev->pm_mng_profile == PM_MANUAL) {
hdev->curr_pll_profile = PLL_HIGH;
- hl_device_set_frequency(hdev, PLL_LOW);
hdev->pm_mng_profile = PM_AUTO;
+ hl_device_set_frequency(hdev, PLL_LOW);
}
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
if (hdev->pm_mng_profile == PM_AUTO) {
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 00c949f4ccd1..31ebcf9458fe 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -132,6 +132,8 @@ enum hl_device_hw_state {
/**
* struct hl_mmu_properties - ASIC specific MMU address translation properties.
+ * @start_addr: virtual start address of the memory region.
+ * @end_addr: virtual end address of the memory region.
* @hop0_shift: shift of hop 0 mask.
* @hop1_shift: shift of hop 1 mask.
* @hop2_shift: shift of hop 2 mask.
@@ -143,9 +145,10 @@ enum hl_device_hw_state {
* @hop3_mask: mask to get the PTE address in hop 3.
* @hop4_mask: mask to get the PTE address in hop 4.
* @page_size: default page size used to allocate memory.
- * @huge_page_size: page size used to allocate memory with huge pages.
*/
struct hl_mmu_properties {
+ u64 start_addr;
+ u64 end_addr;
u64 hop0_shift;
u64 hop1_shift;
u64 hop2_shift;
@@ -157,7 +160,6 @@ struct hl_mmu_properties {
u64 hop3_mask;
u64 hop4_mask;
u32 page_size;
- u32 huge_page_size;
};
/**
@@ -169,6 +171,8 @@ struct hl_mmu_properties {
* @preboot_ver: F/W Preboot version.
* @dmmu: DRAM MMU address translation properties.
* @pmmu: PCI (host) MMU address translation properties.
+ * @pmmu_huge: PCI (host) MMU address translation properties for memory
+ * allocated with huge pages.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -178,14 +182,6 @@ struct hl_mmu_properties {
* @dram_size: DRAM total size.
* @dram_pci_bar_size: size of PCI bar towards DRAM.
* @max_power_default: max power of the device after reset
- * @va_space_host_start_address: base address of virtual memory range for
- * mapping host memory.
- * @va_space_host_end_address: end address of virtual memory range for
- * mapping host memory.
- * @va_space_dram_start_address: base address of virtual memory range for
- * mapping DRAM memory.
- * @va_space_dram_end_address: end address of virtual memory range for
- * mapping DRAM memory.
* @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
* fault.
* @pcie_dbi_base_address: Base address of the PCIE_DBI block.
@@ -218,6 +214,7 @@ struct asic_fixed_properties {
char preboot_ver[VERSION_MAX_LEN];
struct hl_mmu_properties dmmu;
struct hl_mmu_properties pmmu;
+ struct hl_mmu_properties pmmu_huge;
u64 sram_base_address;
u64 sram_end_address;
u64 sram_user_base_address;
@@ -227,10 +224,6 @@ struct asic_fixed_properties {
u64 dram_size;
u64 dram_pci_bar_size;
u64 max_power_default;
- u64 va_space_host_start_address;
- u64 va_space_host_end_address;
- u64 va_space_dram_start_address;
- u64 va_space_dram_end_address;
u64 dram_size_for_default_page_mapping;
u64 pcie_dbi_base_address;
u64 pcie_aux_dbi_reg_addr;
@@ -431,10 +424,12 @@ struct hl_eq {
* enum hl_asic_type - supported ASIC types.
* @ASIC_INVALID: Invalid ASIC type.
* @ASIC_GOYA: Goya device.
+ * @ASIC_GAUDI: Gaudi device.
*/
enum hl_asic_type {
ASIC_INVALID,
- ASIC_GOYA
+ ASIC_GOYA,
+ ASIC_GAUDI
};
struct hl_cs_parser;
@@ -589,6 +584,8 @@ struct hl_asic_funcs {
void (*restore_phase_topology)(struct hl_device *hdev);
int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
+ int (*debugfs_read64)(struct hl_device *hdev, u64 addr, u64 *val);
+ int (*debugfs_write64)(struct hl_device *hdev, u64 addr, u64 val);
void (*add_device_attr)(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
void (*handle_eqe)(struct hl_device *hdev,
@@ -658,6 +655,8 @@ struct hl_va_range {
* this hits 0l. It is incremented on CS and CS_WAIT.
* @cs_pending: array of DMA fence objects representing pending CS.
* @host_va_range: holds available virtual addresses for host mappings.
+ * @host_huge_va_range: holds available virtual addresses for host mappings
+ * with huge pages.
* @dram_va_range: holds available virtual addresses for DRAM mappings.
* @mem_hash_lock: protects the mem_hash.
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
@@ -688,8 +687,9 @@ struct hl_ctx {
struct hl_device *hdev;
struct kref refcount;
struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
- struct hl_va_range host_va_range;
- struct hl_va_range dram_va_range;
+ struct hl_va_range *host_va_range;
+ struct hl_va_range *host_huge_va_range;
+ struct hl_va_range *dram_va_range;
struct mutex mem_hash_lock;
struct mutex mmu_lock;
struct list_head debugfs_list;
@@ -763,7 +763,7 @@ struct hl_userptr {
* @aborted: true if CS was aborted due to some device error.
*/
struct hl_cs {
- u8 jobs_in_queue_cnt[HL_MAX_QUEUES];
+ u16 jobs_in_queue_cnt[HL_MAX_QUEUES];
struct hl_ctx *ctx;
struct list_head job_list;
spinlock_t job_lock;
@@ -1291,6 +1291,8 @@ struct hl_device_idle_busy_ts {
* otherwise.
* @dram_supports_virtual_memory: is MMU enabled towards DRAM.
* @dram_default_page_mapping: is DRAM default page mapping enabled.
+ * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
+ * huge pages.
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
@@ -1372,6 +1374,7 @@ struct hl_device {
u8 reset_on_lockup;
u8 dram_supports_virtual_memory;
u8 dram_default_page_mapping;
+ u8 pmmu_huge_range;
u8 init_done;
u8 device_cpu_disabled;
u8 dma_mask;
@@ -1573,8 +1576,10 @@ int hl_mmu_init(struct hl_device *hdev);
void hl_mmu_fini(struct hl_device *hdev);
int hl_mmu_ctx_init(struct hl_ctx *ctx);
void hl_mmu_ctx_fini(struct hl_ctx *ctx);
-int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size);
-int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+ u32 page_size, bool flush_pte);
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
+ bool flush_pte);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
@@ -1606,11 +1611,18 @@ int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
-long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
+int hl_get_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_set_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
+int hl_get_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_fan_speed(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_pwm_info(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value);
u64 hl_get_max_power(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 8c342fb499ca..b670859c677a 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -40,12 +40,13 @@ MODULE_PARM_DESC(reset_on_lockup,
#define PCI_VENDOR_ID_HABANALABS 0x1da3
#define PCI_IDS_GOYA 0x0001
+#define PCI_IDS_GAUDI 0x1000
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, ids);
/*
* get_asic_type - translate device id to asic type
@@ -63,6 +64,9 @@ static enum hl_asic_type get_asic_type(u16 device)
case PCI_IDS_GOYA:
asic_type = ASIC_GOYA;
break;
+ case PCI_IDS_GAUDI:
+ asic_type = ASIC_GAUDI;
+ break;
default:
asic_type = ASIC_INVALID;
break;
@@ -263,6 +267,11 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
dev_err(&pdev->dev, "Unsupported ASIC\n");
rc = -ENODEV;
goto free_hdev;
+ } else if (hdev->asic_type == ASIC_GAUDI) {
+ dev_err(&pdev->dev,
+ "GAUDI is not supported by the current kernel\n");
+ rc = -ENODEV;
+ goto free_hdev;
}
} else {
hdev->asic_type = asic_type;
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index 7be4bace9b4f..a21a26e07c3b 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -113,6 +113,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ int rc;
if (hl_device_disabled_or_in_reset(hdev))
return -ENODEV;
@@ -125,36 +126,40 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp_crit:
case hwmon_temp_max_hyst:
case hwmon_temp_crit_hyst:
+ case hwmon_temp_offset:
+ case hwmon_temp_highest:
break;
default:
return -EINVAL;
}
- *val = hl_get_temperature(hdev, channel, attr);
+ rc = hl_get_temperature(hdev, channel, attr, val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_input:
case hwmon_in_min:
case hwmon_in_max:
+ case hwmon_in_highest:
break;
default:
return -EINVAL;
}
- *val = hl_get_voltage(hdev, channel, attr);
+ rc = hl_get_voltage(hdev, channel, attr, val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
case hwmon_curr_min:
case hwmon_curr_max:
+ case hwmon_curr_highest:
break;
default:
return -EINVAL;
}
- *val = hl_get_current(hdev, channel, attr);
+ rc = hl_get_current(hdev, channel, attr, val);
break;
case hwmon_fan:
switch (attr) {
@@ -165,7 +170,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
default:
return -EINVAL;
}
- *val = hl_get_fan_speed(hdev, channel, attr);
+ rc = hl_get_fan_speed(hdev, channel, attr, val);
break;
case hwmon_pwm:
switch (attr) {
@@ -175,12 +180,12 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
default:
return -EINVAL;
}
- *val = hl_get_pwm_info(hdev, channel, attr);
+ rc = hl_get_pwm_info(hdev, channel, attr, val);
break;
default:
return -EINVAL;
}
- return 0;
+ return rc;
}
static int hl_write(struct device *dev, enum hwmon_sensor_types type,
@@ -192,6 +197,15 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
return -ENODEV;
switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_offset:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_temperature(hdev, channel, attr, val);
+ break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
@@ -219,7 +233,10 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_temp_max_hyst:
case hwmon_temp_crit:
case hwmon_temp_crit_hyst:
+ case hwmon_temp_highest:
return 0444;
+ case hwmon_temp_offset:
+ return 0644;
}
break;
case hwmon_in:
@@ -227,6 +244,7 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_in_input:
case hwmon_in_min:
case hwmon_in_max:
+ case hwmon_in_highest:
return 0444;
}
break;
@@ -235,6 +253,7 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_curr_input:
case hwmon_curr_min:
case hwmon_curr_max:
+ case hwmon_curr_highest:
return 0444;
}
break;
@@ -265,10 +284,10 @@ static const struct hwmon_ops hl_hwmon_ops = {
.write = hl_write
};
-long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -279,22 +298,47 @@ long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get temperature from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_set_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set temperature of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
+int hl_get_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -305,22 +349,22 @@ long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get voltage from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -331,22 +375,22 @@ long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get current from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_fan_speed(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -357,22 +401,22 @@ long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get fan speed from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_pwm_info(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -383,16 +427,16 @@ long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get pwm info from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index e4c6699a1868..bdd0a4c3a9cf 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -189,6 +189,10 @@ enum pq_init_status {
* ArmCP to write to the structure, to prevent data corruption in case of
* mismatched driver/FW versions.
*
+ * ARMCP_PACKET_TEMPERATURE_SET -
+ * Set the value of the offset property of a specified thermal sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
*/
enum armcp_packet_id {
@@ -214,6 +218,8 @@ enum armcp_packet_id {
ARMCP_PACKET_MAX_POWER_GET, /* sysfs */
ARMCP_PACKET_MAX_POWER_SET, /* sysfs */
ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ ARMCP_RESERVED,
+ ARMCP_PACKET_TEMPERATURE_SET, /* sysfs */
};
#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -271,24 +277,32 @@ enum armcp_packet_rc {
armcp_packet_fault
};
+/*
+ * armcp_temp_type should adhere to hwmon_temp_attributes
+ * defined in Linux kernel hwmon.h file
+ */
enum armcp_temp_type {
armcp_temp_input,
armcp_temp_max = 6,
armcp_temp_max_hyst,
armcp_temp_crit,
- armcp_temp_crit_hyst
+ armcp_temp_crit_hyst,
+ armcp_temp_offset = 19,
+ armcp_temp_highest = 22
};
enum armcp_in_attributes {
armcp_in_input,
armcp_in_min,
- armcp_in_max
+ armcp_in_max,
+ armcp_in_highest = 7
};
enum armcp_curr_attributes {
armcp_curr_input,
armcp_curr_min,
- armcp_curr_max
+ armcp_curr_max,
+ armcp_curr_highest = 7
};
enum armcp_fan_attributes {
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
index bb7a1aa3279e..5fb92362fc5f 100644
--- a/drivers/misc/habanalabs/include/goya/goya_async_events.h
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -188,6 +188,10 @@ enum goya_async_event_id {
GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485,
GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486,
GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S = 507,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E = 508,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S = 509,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E = 510,
GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023,
GOYA_ASYNC_EVENT_ID_SIZE
};
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
index cd89723c7f61..08061282cd9c 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -11,24 +11,27 @@
/*
* PSOC scratch-pad registers
*/
-#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
-#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
-#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
-#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
-#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
-#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
-#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
-#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
-#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
-#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
-#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
-#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
-#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
-#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
-#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
-#define mmUBOOT_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
-#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
+#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
+#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
+#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
-#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS mmPSOC_GLOBAL_CONF_WARM_REBOOT
#endif /* GOYA_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 2853a2de8cf6..f7992a69fd3a 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -8,20 +8,35 @@
#ifndef HL_BOOT_IF_H
#define HL_BOOT_IF_H
+#define LKD_HARD_RESET_MAGIC 0xED7BD694
+
+/* CPU error bits in BOOT_ERROR registers */
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << 0)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << 1)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << 2)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << 3)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6)
+#define CPU_BOOT_ERR0_ENABLED (1 << 31)
+
enum cpu_boot_status {
CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
- CPU_BOOT_STATUS_IN_WFE,
- CPU_BOOT_STATUS_DRAM_RDY,
- CPU_BOOT_STATUS_SRAM_AVAIL,
- CPU_BOOT_STATUS_IN_BTL, /* BTL is H/W FSM */
- CPU_BOOT_STATUS_IN_PREBOOT,
- CPU_BOOT_STATUS_IN_SPL,
- CPU_BOOT_STATUS_IN_UBOOT,
- CPU_BOOT_STATUS_DRAM_INIT_FAIL,
- CPU_BOOT_STATUS_FIT_CORRUPTED,
- CPU_BOOT_STATUS_UBOOT_NOT_READY,
- CPU_BOOT_STATUS_RESERVED,
- CPU_BOOT_STATUS_TS_INIT_FAIL,
+ CPU_BOOT_STATUS_IN_WFE = 1,
+ CPU_BOOT_STATUS_DRAM_RDY = 2,
+ CPU_BOOT_STATUS_SRAM_AVAIL = 3,
+ CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT = 5,
+ CPU_BOOT_STATUS_IN_SPL = 6,
+ CPU_BOOT_STATUS_IN_UBOOT = 7,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ CPU_BOOT_STATUS_NIC_FW_RDY = 11,
+ CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_READY_TO_BOOT = 15,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 6c72cb4eff54..a72f766ca470 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -530,7 +530,7 @@ static u64 get_va_block(struct hl_device *hdev,
* or not, hence we continue with the biggest possible
* granularity.
*/
- page_size = hdev->asic_prop.pmmu.huge_page_size;
+ page_size = hdev->asic_prop.pmmu_huge.page_size;
else
page_size = hdev->asic_prop.dmmu.page_size;
@@ -638,13 +638,12 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
struct hl_vm_phys_pg_pack **pphys_pg_pack)
{
- struct hl_mmu_properties *mmu_prop = &ctx->hdev->asic_prop.pmmu;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask, total_npages;
u32 npages, page_size = PAGE_SIZE,
- huge_page_size = mmu_prop->huge_page_size;
+ huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
@@ -747,7 +746,8 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
- rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
+ rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
+ (i + 1) == phys_pg_pack->npages);
if (rc) {
dev_err(hdev->dev,
"map failed for handle %u, npages: %llu, mapped: %llu",
@@ -765,7 +765,8 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
err:
next_vaddr = vaddr;
for (i = 0 ; i < mapped_pg_cnt ; i++) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size,
+ (i + 1) == mapped_pg_cnt))
dev_warn_ratelimited(hdev->dev,
"failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
phys_pg_pack->handle, next_vaddr,
@@ -794,7 +795,8 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
next_vaddr = vaddr;
for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size,
+ (i + 1) == phys_pg_pack->npages))
dev_warn_ratelimited(hdev->dev,
"unmap failed for vaddr: 0x%llx\n", next_vaddr);
@@ -853,6 +855,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_userptr *userptr = NULL;
struct hl_vm_hash_node *hnode;
+ struct hl_va_range *va_range;
enum vm_type_t *vm_type;
u64 ret_vaddr, hint_addr;
u32 handle = 0;
@@ -924,9 +927,16 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto hnode_err;
}
- ret_vaddr = get_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- phys_pg_pack->total_size, hint_addr, is_userptr);
+ if (is_userptr)
+ if (phys_pg_pack->page_size == hdev->asic_prop.pmmu.page_size)
+ va_range = ctx->host_va_range;
+ else
+ va_range = ctx->host_huge_va_range;
+ else
+ va_range = ctx->dram_va_range;
+
+ ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
+ hint_addr, is_userptr);
if (!ret_vaddr) {
dev_err(hdev->dev, "no available va block for handle %u\n",
handle);
@@ -965,10 +975,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
return 0;
map_err:
- if (add_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- ret_vaddr,
- ret_vaddr + phys_pg_pack->total_size - 1))
+ if (add_va_block(hdev, va_range, ret_vaddr,
+ ret_vaddr + phys_pg_pack->total_size - 1))
dev_warn(hdev->dev,
"release va block failed for handle 0x%x, vaddr: 0x%llx\n",
handle, ret_vaddr);
@@ -1030,7 +1038,6 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
- va_range = &ctx->host_va_range;
userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
&phys_pg_pack);
@@ -1040,9 +1047,15 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
vaddr);
goto vm_type_err;
}
+
+ if (phys_pg_pack->page_size ==
+ hdev->asic_prop.pmmu.page_size)
+ va_range = ctx->host_va_range;
+ else
+ va_range = ctx->host_huge_va_range;
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
is_userptr = false;
- va_range = &ctx->dram_va_range;
+ va_range = ctx->dram_va_range;
phys_pg_pack = hnode->ptr;
} else {
dev_warn(hdev->dev,
@@ -1438,19 +1451,18 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
}
/*
- * hl_va_range_init - initialize virtual addresses range
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_range : pointer to the range to initialize
- * @start : range start address
- * @end : range end address
+ * va_range_init - initialize virtual addresses range
+ * @hdev: pointer to the habanalabs device structure
+ * @va_range: pointer to the range to initialize
+ * @start: range start address
+ * @end: range end address
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
* addresses.
*/
-static int hl_va_range_init(struct hl_device *hdev,
- struct hl_va_range *va_range, u64 start, u64 end)
+static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
+ u64 start, u64 end)
{
int rc;
@@ -1485,47 +1497,105 @@ static int hl_va_range_init(struct hl_device *hdev,
}
/*
- * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
+ * va_range_fini() - clear a virtual addresses range
+ * @hdev: pointer to the habanalabs structure
+ * va_range: pointer to virtual addresses range
*
- * @ctx : pointer to the habanalabs context structure
- * @host_range_start : host virtual addresses range start
- * @host_range_end : host virtual addresses range end
- * @dram_range_start : dram virtual addresses range start
- * @dram_range_end : dram virtual addresses range end
+ * This function does the following:
+ * - Frees the virtual addresses block list and its lock
+ */
+static void va_range_fini(struct hl_device *hdev,
+ struct hl_va_range *va_range)
+{
+ mutex_lock(&va_range->lock);
+ clear_va_list_locked(hdev, &va_range->list);
+ mutex_unlock(&va_range->lock);
+
+ mutex_destroy(&va_range->lock);
+ kfree(va_range);
+}
+
+/*
+ * vm_ctx_init_with_ranges() - initialize virtual memory for context
+ * @ctx: pointer to the habanalabs context structure
+ * @host_range_start: host virtual addresses range start.
+ * @host_range_end: host virtual addresses range end.
+ * @host_huge_range_start: host virtual addresses range start for memory
+ * allocated with huge pages.
+ * @host_huge_range_end: host virtual addresses range end for memory allocated
+ * with huge pages.
+ * @dram_range_start: dram virtual addresses range start.
+ * @dram_range_end: dram virtual addresses range end.
*
* This function initializes the following:
* - MMU for context
* - Virtual address to area descriptor hashtable
* - Virtual block list of available virtual memory
*/
-static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
- u64 host_range_end, u64 dram_range_start,
- u64 dram_range_end)
+static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
+ u64 host_range_start,
+ u64 host_range_end,
+ u64 host_huge_range_start,
+ u64 host_huge_range_end,
+ u64 dram_range_start,
+ u64 dram_range_end)
{
struct hl_device *hdev = ctx->hdev;
int rc;
+ ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
+ if (!ctx->host_va_range)
+ return -ENOMEM;
+
+ ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
+ GFP_KERNEL);
+ if (!ctx->host_huge_va_range) {
+ rc = -ENOMEM;
+ goto host_huge_va_range_err;
+ }
+
+ ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
+ if (!ctx->dram_va_range) {
+ rc = -ENOMEM;
+ goto dram_va_range_err;
+ }
+
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
- return rc;
+ goto mmu_ctx_err;
}
mutex_init(&ctx->mem_hash_lock);
hash_init(ctx->mem_hash);
- mutex_init(&ctx->host_va_range.lock);
+ mutex_init(&ctx->host_va_range->lock);
- rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start,
- host_range_end);
+ rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
+ host_range_end);
if (rc) {
dev_err(hdev->dev, "failed to init host vm range\n");
- goto host_vm_err;
+ goto host_page_range_err;
+ }
+
+ if (hdev->pmmu_huge_range) {
+ mutex_init(&ctx->host_huge_va_range->lock);
+
+ rc = va_range_init(hdev, ctx->host_huge_va_range,
+ host_huge_range_start,
+ host_huge_range_end);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to init host huge vm range\n");
+ goto host_hpage_range_err;
+ }
+ } else {
+ ctx->host_huge_va_range = ctx->host_va_range;
}
- mutex_init(&ctx->dram_va_range.lock);
+ mutex_init(&ctx->dram_va_range->lock);
- rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start,
+ rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
dram_range_end);
if (rc) {
dev_err(hdev->dev, "failed to init dram vm range\n");
@@ -1537,15 +1607,29 @@ static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
return 0;
dram_vm_err:
- mutex_destroy(&ctx->dram_va_range.lock);
+ mutex_destroy(&ctx->dram_va_range->lock);
- mutex_lock(&ctx->host_va_range.lock);
- clear_va_list_locked(hdev, &ctx->host_va_range.list);
- mutex_unlock(&ctx->host_va_range.lock);
-host_vm_err:
- mutex_destroy(&ctx->host_va_range.lock);
+ if (hdev->pmmu_huge_range) {
+ mutex_lock(&ctx->host_huge_va_range->lock);
+ clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
+ mutex_unlock(&ctx->host_huge_va_range->lock);
+ }
+host_hpage_range_err:
+ if (hdev->pmmu_huge_range)
+ mutex_destroy(&ctx->host_huge_va_range->lock);
+ mutex_lock(&ctx->host_va_range->lock);
+ clear_va_list_locked(hdev, &ctx->host_va_range->list);
+ mutex_unlock(&ctx->host_va_range->lock);
+host_page_range_err:
+ mutex_destroy(&ctx->host_va_range->lock);
mutex_destroy(&ctx->mem_hash_lock);
hl_mmu_ctx_fini(ctx);
+mmu_ctx_err:
+ kfree(ctx->dram_va_range);
+dram_va_range_err:
+ kfree(ctx->host_huge_va_range);
+host_huge_va_range_err:
+ kfree(ctx->host_va_range);
return rc;
}
@@ -1553,8 +1637,8 @@ host_vm_err:
int hl_vm_ctx_init(struct hl_ctx *ctx)
{
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
- u64 host_range_start, host_range_end, dram_range_start,
- dram_range_end;
+ u64 host_range_start, host_range_end, host_huge_range_start,
+ host_huge_range_end, dram_range_start, dram_range_end;
atomic64_set(&ctx->dram_phys_mem, 0);
@@ -1566,38 +1650,26 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
* address of the memory related to the given handle.
*/
if (ctx->hdev->mmu_enable) {
- dram_range_start = prop->va_space_dram_start_address;
- dram_range_end = prop->va_space_dram_end_address;
- host_range_start = prop->va_space_host_start_address;
- host_range_end = prop->va_space_host_end_address;
+ dram_range_start = prop->dmmu.start_addr;
+ dram_range_end = prop->dmmu.end_addr;
+ host_range_start = prop->pmmu.start_addr;
+ host_range_end = prop->pmmu.end_addr;
+ host_huge_range_start = prop->pmmu_huge.start_addr;
+ host_huge_range_end = prop->pmmu_huge.end_addr;
} else {
dram_range_start = prop->dram_user_base_address;
dram_range_end = prop->dram_end_address;
host_range_start = prop->dram_user_base_address;
host_range_end = prop->dram_end_address;
+ host_huge_range_start = prop->dram_user_base_address;
+ host_huge_range_end = prop->dram_end_address;
}
- return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
- dram_range_start, dram_range_end);
-}
-
-/*
- * hl_va_range_fini - clear a virtual addresses range
- *
- * @hdev : pointer to the habanalabs structure
- * va_range : pointer to virtual addresses range
- *
- * This function does the following:
- * - Frees the virtual addresses block list and its lock
- */
-static void hl_va_range_fini(struct hl_device *hdev,
- struct hl_va_range *va_range)
-{
- mutex_lock(&va_range->lock);
- clear_va_list_locked(hdev, &va_range->list);
- mutex_unlock(&va_range->lock);
-
- mutex_destroy(&va_range->lock);
+ return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
+ host_huge_range_start,
+ host_huge_range_end,
+ dram_range_start,
+ dram_range_end);
}
/*
@@ -1664,8 +1736,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
}
spin_unlock(&vm->idr_lock);
- hl_va_range_fini(hdev, &ctx->dram_va_range);
- hl_va_range_fini(hdev, &ctx->host_va_range);
+ va_range_fini(hdev, ctx->dram_va_range);
+ if (hdev->pmmu_huge_range)
+ va_range_fini(hdev, ctx->host_huge_va_range);
+ va_range_fini(hdev, ctx->host_va_range);
mutex_destroy(&ctx->mem_hash_lock);
hl_mmu_ctx_fini(ctx);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 6262b26e2086..a290d6b49d78 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -254,6 +254,15 @@ static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
return phys_hop_addr + pte_offset;
}
+static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+}
+
static int dram_default_mapping_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -548,6 +557,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
curr_pte;
bool is_huge, clear_hop3 = true;
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop0_addr = get_hop0_addr(ctx);
@@ -637,29 +647,27 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
clear_hop3 = true;
if (!clear_hop3)
- goto flush;
+ goto mapped;
clear_pte(ctx, hop3_pte_addr);
if (put_pte(ctx, hop3_addr))
- goto flush;
+ goto mapped;
clear_pte(ctx, hop2_pte_addr);
if (put_pte(ctx, hop2_addr))
- goto flush;
+ goto mapped;
clear_pte(ctx, hop1_pte_addr);
if (put_pte(ctx, hop1_addr))
- goto flush;
+ goto mapped;
clear_pte(ctx, hop0_pte_addr);
}
-flush:
- flush(ctx);
-
+mapped:
return 0;
not_mapped:
@@ -675,6 +683,7 @@ not_mapped:
* @ctx: pointer to the context structure
* @virt_addr: virt addr to map from
* @page_size: size of the page to unmap
+ * @flush_pte: whether to do a PCI flush
*
* This function does the following:
* - Check that the virt addr is mapped
@@ -685,40 +694,43 @@ not_mapped:
* changes the MMU hash, it must be protected by a lock.
* However, because it maps only a single page, the lock should be implemented
* in a higher level in order to protect the entire mapping of the memory area
+ *
+ * For optimization reasons PCI flush may be requested once after unmapping of
+ * large area.
*/
-int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
+ bool flush_pte)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr;
u32 real_page_size, npages;
- int i, rc;
+ int i, rc = 0;
bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_dram_addr = is_dram_va(hdev, virt_addr);
- mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ if (is_dram_addr)
+ mmu_prop = &prop->dmmu;
+ else if ((page_size % prop->pmmu_huge.page_size) == 0)
+ mmu_prop = &prop->pmmu_huge;
+ else
+ mmu_prop = &prop->pmmu;
/*
* The H/W handles mapping of specific page sizes. Hence if the page
* size is bigger, we break it to sub-pages and unmap them separately.
*/
- if ((page_size % mmu_prop->huge_page_size) == 0) {
- real_page_size = mmu_prop->huge_page_size;
- } else if ((page_size % mmu_prop->page_size) == 0) {
+ if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not %uKB nor %uMB aligned, can't unmap\n",
- page_size,
- mmu_prop->page_size >> 10,
- mmu_prop->huge_page_size >> 20);
+ "page size of %u is not %uKB aligned, can't unmap\n",
+ page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
@@ -729,12 +741,15 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
for (i = 0 ; i < npages ; i++) {
rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
- return rc;
+ break;
real_virt_addr += real_page_size;
}
- return 0;
+ if (flush_pte)
+ flush(ctx);
+
+ return rc;
}
static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
@@ -753,8 +768,6 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
hop4_new = false, is_huge;
int rc = -ENOMEM;
- mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
-
/*
* This mapping function can map a page or a huge page. For huge page
* there are only 3 hops rather than 4. Currently the DRAM allocation
@@ -762,11 +775,15 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
* one of the two page sizes. Since this is a common code for all the
* three cases, we need this hugs page check.
*/
- is_huge = page_size == mmu_prop->huge_page_size;
-
- if (is_dram_addr && !is_huge) {
- dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
- return -EFAULT;
+ if (is_dram_addr) {
+ mmu_prop = &prop->dmmu;
+ is_huge = true;
+ } else if (page_size == prop->pmmu_huge.page_size) {
+ mmu_prop = &prop->pmmu_huge;
+ is_huge = true;
+ } else {
+ mmu_prop = &prop->pmmu;
+ is_huge = false;
}
hop0_addr = get_hop0_addr(ctx);
@@ -885,8 +902,6 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
get_pte(ctx, hop3_addr);
}
- flush(ctx);
-
return 0;
err:
@@ -909,6 +924,7 @@ err:
* @virt_addr: virt addr to map from
* @phys_addr: phys addr to map to
* @page_size: physical page size
+ * @flush_pte: whether to do a PCI flush
*
* This function does the following:
* - Check that the virt addr is not mapped
@@ -919,8 +935,12 @@ err:
* changes the MMU hash, it must be protected by a lock.
* However, because it maps only a single page, the lock should be implemented
* in a higher level in order to protect the entire mapping of the memory area
+ *
+ * For optimization reasons PCI flush may be requested once after mapping of
+ * large area.
*/
-int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
+ bool flush_pte)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -933,26 +953,25 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_dram_addr = is_dram_va(hdev, virt_addr);
- mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ if (is_dram_addr)
+ mmu_prop = &prop->dmmu;
+ else if ((page_size % prop->pmmu_huge.page_size) == 0)
+ mmu_prop = &prop->pmmu_huge;
+ else
+ mmu_prop = &prop->pmmu;
/*
* The H/W handles mapping of specific page sizes. Hence if the page
* size is bigger, we break it to sub-pages and map them separately.
*/
- if ((page_size % mmu_prop->huge_page_size) == 0) {
- real_page_size = mmu_prop->huge_page_size;
- } else if ((page_size % mmu_prop->page_size) == 0) {
+ if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not %dKB nor %dMB aligned, can't unmap\n",
- page_size,
- mmu_prop->page_size >> 10,
- mmu_prop->huge_page_size >> 20);
+ "page size of %u is not %uKB aligned, can't unmap\n",
+ page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
@@ -976,6 +995,9 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
mapped_cnt++;
}
+ if (flush_pte)
+ flush(ctx);
+
return 0;
err:
@@ -988,6 +1010,8 @@ err:
real_virt_addr += real_page_size;
}
+ flush(ctx);
+
return rc;
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index de87693cf557..886459e0ddd9 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -11,6 +11,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
#ifdef CONFIG_X86_32
#include <asm/desc.h>
@@ -175,6 +176,80 @@ void lkdtm_HUNG_TASK(void)
schedule();
}
+volatile unsigned int huge = INT_MAX - 2;
+volatile unsigned int ignored;
+
+void lkdtm_OVERFLOW_SIGNED(void)
+{
+ int value;
+
+ value = huge;
+ pr_info("Normal signed addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing signed addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+
+void lkdtm_OVERFLOW_UNSIGNED(void)
+{
+ unsigned int value;
+
+ value = huge;
+ pr_info("Normal unsigned addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing unsigned addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+/* Intentially using old-style flex array definition of 1 byte. */
+struct array_bounds_flex_array {
+ int one;
+ int two;
+ char data[1];
+};
+
+struct array_bounds {
+ int one;
+ int two;
+ char data[8];
+ int three;
+};
+
+void lkdtm_ARRAY_BOUNDS(void)
+{
+ struct array_bounds_flex_array *not_checked;
+ struct array_bounds *checked;
+ volatile int i;
+
+ not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
+ checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+
+ pr_info("Array access within bounds ...\n");
+ /* For both, touch all bytes in the actual member size. */
+ for (i = 0; i < sizeof(checked->data); i++)
+ checked->data[i] = 'A';
+ /*
+ * For the uninstrumented flex array member, also touch 1 byte
+ * beyond to verify it is correctly uninstrumented.
+ */
+ for (i = 0; i < sizeof(not_checked->data) + 1; i++)
+ not_checked->data[i] = 'A';
+
+ pr_info("Array access beyond bounds ...\n");
+ for (i = 0; i < sizeof(checked->data) + 1; i++)
+ checked->data[i] = 'B';
+
+ kfree(not_checked);
+ kfree(checked);
+}
+
void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
@@ -378,3 +453,39 @@ void lkdtm_DOUBLE_FAULT(void)
pr_err("XFAIL: this test is ia32-only\n");
#endif
}
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static noinline void change_pac_parameters(void)
+{
+ /* Reset the keys of current task */
+ ptrauth_thread_init_kernel(current);
+ ptrauth_thread_switch_kernel(current);
+}
+
+#define CORRUPT_PAC_ITERATE 10
+noinline void lkdtm_CORRUPT_PAC(void)
+{
+ int i;
+
+ if (!system_supports_address_auth()) {
+ pr_err("FAIL: arm64 pointer authentication feature not present\n");
+ return;
+ }
+
+ pr_info("Change the PAC parameters to force function return failure\n");
+ /*
+ * Pac is a hash value computed from input keys, return address and
+ * stack pointer. As pac has fewer bits so there is a chance of
+ * collision, so iterate few times to reduce the collision probability.
+ */
+ for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
+ change_pac_parameters();
+
+ pr_err("FAIL: %s test failed. Kernel may be unstable from here\n", __func__);
+}
+#else /* !CONFIG_ARM64_PTR_AUTH */
+noinline void lkdtm_CORRUPT_PAC(void)
+{
+ pr_err("FAIL: arm64 pointer authentication config disabled\n");
+}
+#endif
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index ee0d6e721441..a5e344df9166 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -116,6 +116,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
CRASHTYPE(UNSET_SMEP),
+ CRASHTYPE(CORRUPT_PAC),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
CRASHTYPE(WRITE_AFTER_FREE),
@@ -129,6 +130,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(HARDLOCKUP),
CRASHTYPE(SPINLOCKUP),
CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
CRASHTYPE(EXEC_DATA),
CRASHTYPE(EXEC_STACK),
CRASHTYPE(EXEC_KMALLOC),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index c56d23e37643..601a2156a0d4 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -22,6 +22,9 @@ void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
+void lkdtm_OVERFLOW_SIGNED(void);
+void lkdtm_OVERFLOW_UNSIGNED(void);
+void lkdtm_ARRAY_BOUNDS(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
void lkdtm_CORRUPT_USER_DS(void);
@@ -31,6 +34,7 @@ void lkdtm_UNSET_SMEP(void);
#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void);
#endif
+void lkdtm_CORRUPT_PAC(void);
/* lkdtm_heap.c */
void __init lkdtm_heap_init(void);
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
index d5a084475abc..d1a5c0705be3 100644
--- a/drivers/misc/lkdtm/stackleak.c
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -16,6 +16,7 @@ void lkdtm_STACKLEAK_ERASING(void)
unsigned long *sp, left, found, i;
const unsigned long check_depth =
STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+ bool test_failed = false;
/*
* For the details about the alignment of the poison values, see
@@ -34,7 +35,8 @@ void lkdtm_STACKLEAK_ERASING(void)
left--;
} else {
pr_err("FAIL: not enough stack space for the test\n");
- return;
+ test_failed = true;
+ goto end;
}
pr_info("checking unused part of the thread stack (%lu bytes)...\n",
@@ -52,22 +54,29 @@ void lkdtm_STACKLEAK_ERASING(void)
}
if (found <= check_depth) {
- pr_err("FAIL: thread stack is not erased (checked %lu bytes)\n",
+ pr_err("FAIL: the erased part is not found (checked %lu bytes)\n",
i * sizeof(unsigned long));
- return;
+ test_failed = true;
+ goto end;
}
- pr_info("first %lu bytes are unpoisoned\n",
+ pr_info("the erased part begins after %lu not poisoned bytes\n",
(i - found) * sizeof(unsigned long));
/* The rest of thread stack should be erased */
for (; i < left; i++) {
if (*(sp - i) != STACKLEAK_POISON) {
- pr_err("FAIL: thread stack is NOT properly erased\n");
- return;
+ pr_err("FAIL: bad value number %lu in the erased part: 0x%lx\n",
+ i, *(sp - i));
+ test_failed = true;
}
}
- pr_info("OK: the rest of the thread stack is properly erased\n");
- return;
+end:
+ if (test_failed) {
+ pr_err("FAIL: the thread stack is NOT properly erased\n");
+ dump_stack();
+ } else {
+ pr_info("OK: the rest of the thread stack is properly erased\n");
+ }
}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 9ad9c01ddf41..910f059b3384 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -91,7 +91,7 @@ struct mkhi_rule_id {
struct mkhi_fwcaps {
struct mkhi_rule_id id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct mkhi_fw_ver_block {
@@ -119,7 +119,7 @@ struct mkhi_msg_hdr {
struct mkhi_msg {
struct mkhi_msg_hdr hdr;
- u8 data[0];
+ u8 data[];
} __packed;
#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1e3edbbacb1e..b32c825a0945 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -266,6 +266,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
@@ -287,6 +288,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
@@ -1585,7 +1587,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
goto err;
}
- hbuf_len = mei_slots2data(hbuf_slots);
+ hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
@@ -1718,7 +1720,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
goto out;
}
- hbuf_len = mei_slots2data(hbuf_slots);
+ hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 87a0201ba6b3..9392934e3a06 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -75,9 +75,9 @@
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
-#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */
+#define MEI_DEV_ID_CNP_LP_3 0x9DE4 /* Cannon Point LP 3 (iTouch) */
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
-#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
+#define MEI_DEV_ID_CNP_H_3 0xA364 /* Cannon Point H 3 (iTouch) */
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
@@ -87,6 +87,8 @@
#define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */
#define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */
+#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */
+
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 668418d7ea77..f620442addf5 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1465,6 +1465,13 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};
+/* LBG with quirk for SPS Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_FW_SPS,
+};
+
/* Tiger Lake and newer devices */
static const struct mei_cfg mei_me_pch15_cfg = {
MEI_CFG_PCH8_HFS,
@@ -1487,6 +1494,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
};
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 4a8d4dcd5a91..b6b94e211464 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -80,6 +80,9 @@ struct mei_me_hw {
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
* @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
@@ -93,6 +96,7 @@ enum mei_cfg_idx {
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH12_SPS_CFG,
MEI_ME_PCH15_CFG,
MEI_ME_NUM_CFG,
};
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index d025a5f8317e..b1a8d5ec88b3 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -209,11 +209,14 @@ struct mei_msg_hdr {
u32 extension[0];
} __packed;
+/* The length is up to 9 bits */
+#define MEI_MSG_MAX_LEN_MASK GENMASK(9, 0)
+
#define MEI_MSG_HDR_MAX 2
struct mei_bus_message {
u8 hbm_cmd;
- u8 data[0];
+ u8 data[];
} __packed;
/**
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 76f8ff5ff974..3a29db07211d 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -533,7 +533,7 @@ struct mei_device {
#endif /* CONFIG_DEBUG_FS */
const struct mei_hw_ops *ops;
- char hw[0] __aligned(sizeof(void *));
+ char hw[] __aligned(sizeof(void *));
};
static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 2711451b3d87..a1ed375fed37 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -1,25 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/fcntl.h>
#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/compat.h>
-#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/pm_domain.h>
@@ -79,7 +70,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
@@ -92,9 +83,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
@@ -111,6 +102,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
+
/* required last entry */
{0, }
};
@@ -210,11 +203,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
hw = to_me_hw(dev);
hw->mem_addr = pcim_iomap_table(pdev)[0];
- hw->irq = pdev->irq;
hw->read_fws = mei_me_read_fws;
pci_enable_msi(pdev);
+ hw->irq = pdev->irq;
+
/* request and enable interrupt */
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index f1c16a587495..beacf2a2f2b5 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -1,20 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2017, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/pm_domain.h>
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index b6841ba6d922..3bfe72c59864 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -116,7 +116,7 @@ config MIC_COSM
config VOP
tristate "VOP Driver"
- depends on VOP_BUS
+ depends on VOP_BUS && VHOST_DPN
select VHOST_RING
select VIRTIO
help
@@ -133,8 +133,4 @@ config VOP
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-if VOP
-source "drivers/vhost/Kconfig.vringh"
-endif
-
endmenu
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 4f2d9212432c..fb5b3989753d 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -137,7 +137,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size,
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
dma_addr_t tmp;
- void *va = kmalloc(size, gfp | __GFP_ZERO);
+ void *va = kzalloc(size, gfp);
if (va) {
tmp = mic_map_single(mdev, va, size);
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index a7743312da9c..d18cda966912 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -350,10 +350,10 @@ mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw)
if (!buf)
return -ENOMEM;
- len += snprintf(buf, CMDLINE_SIZE - len,
+ len += scnprintf(buf, CMDLINE_SIZE - len,
" mem=%dM", boot_mem);
if (mdev->cosm_dev->cmdline)
- snprintf(buf + len, CMDLINE_SIZE - len, " %s",
+ scnprintf(buf + len, CMDLINE_SIZE - len, " %s",
mdev->cosm_dev->cmdline);
memcpy_toio(cmd_line_va, buf, strlen(buf) + 1);
kfree(buf);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index a5e317073d95..ef5a1af6bab7 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
@@ -64,6 +65,9 @@
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
+#define PCI_ENDPOINT_TEST_FLAGS 0x2c
+#define FLAG_USE_DMA BIT(0)
+
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define is_am654_pci_dev(pdev) \
@@ -98,11 +102,13 @@ struct pci_endpoint_test {
struct completion irq_raised;
int last_irq;
int num_irqs;
+ int irq_type;
/* mutex to protect the ioctls */
struct mutex mutex;
struct miscdevice miscdev;
enum pci_barno test_reg_bar;
size_t alignment;
+ const char *name;
};
struct pci_endpoint_test_data {
@@ -157,6 +163,7 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
struct pci_dev *pdev = test->pdev;
pci_free_irq_vectors(pdev);
+ test->irq_type = IRQ_TYPE_UNDEFINED;
}
static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
@@ -191,6 +198,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
irq = 0;
res = false;
}
+
+ test->irq_type = type;
test->num_irqs = irq;
return res;
@@ -218,7 +227,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
for (i = 0; i < test->num_irqs; i++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
+ IRQF_SHARED, test->name, test);
if (err)
goto fail;
}
@@ -315,11 +324,16 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
return false;
}
-static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
+static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
+ unsigned long arg)
{
+ struct pci_endpoint_test_xfer_param param;
bool ret = false;
void *src_addr;
void *dst_addr;
+ u32 flags = 0;
+ bool use_dma;
+ size_t size;
dma_addr_t src_phys_addr;
dma_addr_t dst_phys_addr;
struct pci_dev *pdev = test->pdev;
@@ -330,25 +344,46 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_dst_phys_addr;
size_t offset;
size_t alignment = test->alignment;
+ int irq_type = test->irq_type;
u32 src_crc32;
u32 dst_crc32;
+ int err;
+ err = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (err) {
+ dev_err(dev, "Failed to get transfer param\n");
+ return false;
+ }
+
+ size = param.size;
if (size > SIZE_MAX - alignment)
goto err;
+ use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
+ if (use_dma)
+ flags |= FLAG_USE_DMA;
+
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
- orig_src_addr = dma_alloc_coherent(dev, size + alignment,
- &orig_src_phys_addr, GFP_KERNEL);
+ orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_src_addr) {
dev_err(dev, "Failed to allocate source buffer\n");
ret = false;
goto err;
}
+ get_random_bytes(orig_src_addr, size + alignment);
+ orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
+ size + alignment, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, orig_src_phys_addr)) {
+ dev_err(dev, "failed to map source buffer address\n");
+ ret = false;
+ goto err_src_phys_addr;
+ }
+
if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
offset = src_phys_addr - orig_src_phys_addr;
@@ -364,15 +399,21 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
upper_32_bits(src_phys_addr));
- get_random_bytes(src_addr, size);
src_crc32 = crc32_le(~0, src_addr, size);
- orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
- &orig_dst_phys_addr, GFP_KERNEL);
+ orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_dst_addr) {
dev_err(dev, "Failed to allocate destination address\n");
ret = false;
- goto err_orig_src_addr;
+ goto err_dst_addr;
+ }
+
+ orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
+ size + alignment, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, orig_dst_phys_addr)) {
+ dev_err(dev, "failed to map destination buffer address\n");
+ ret = false;
+ goto err_dst_phys_addr;
}
if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
@@ -392,6 +433,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
@@ -399,24 +441,34 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
wait_for_completion(&test->irq_raised);
+ dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
+ DMA_FROM_DEVICE);
+
dst_crc32 = crc32_le(~0, dst_addr, size);
if (dst_crc32 == src_crc32)
ret = true;
- dma_free_coherent(dev, size + alignment, orig_dst_addr,
- orig_dst_phys_addr);
+err_dst_phys_addr:
+ kfree(orig_dst_addr);
-err_orig_src_addr:
- dma_free_coherent(dev, size + alignment, orig_src_addr,
- orig_src_phys_addr);
+err_dst_addr:
+ dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
+ DMA_TO_DEVICE);
+
+err_src_phys_addr:
+ kfree(orig_src_addr);
err:
return ret;
}
-static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
+static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
+ unsigned long arg)
{
+ struct pci_endpoint_test_xfer_param param;
bool ret = false;
+ u32 flags = 0;
+ bool use_dma;
u32 reg;
void *addr;
dma_addr_t phys_addr;
@@ -426,24 +478,47 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
+ int irq_type = test->irq_type;
+ size_t size;
u32 crc32;
+ int err;
+
+ err = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (err != 0) {
+ dev_err(dev, "Failed to get transfer param\n");
+ return false;
+ }
+ size = param.size;
if (size > SIZE_MAX - alignment)
goto err;
+ use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
+ if (use_dma)
+ flags |= FLAG_USE_DMA;
+
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
- orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
- GFP_KERNEL);
+ orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate address\n");
ret = false;
goto err;
}
+ get_random_bytes(orig_addr, size + alignment);
+
+ orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, orig_phys_addr)) {
+ dev_err(dev, "failed to map source buffer address\n");
+ ret = false;
+ goto err_phys_addr;
+ }
+
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
offset = phys_addr - orig_phys_addr;
@@ -453,8 +528,6 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
addr = orig_addr;
}
- get_random_bytes(addr, size);
-
crc32 = crc32_le(~0, addr, size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
crc32);
@@ -466,6 +539,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
@@ -477,15 +551,24 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
if (reg & STATUS_READ_SUCCESS)
ret = true;
- dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
+ dma_unmap_single(dev, orig_phys_addr, size + alignment,
+ DMA_TO_DEVICE);
+
+err_phys_addr:
+ kfree(orig_addr);
err:
return ret;
}
-static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
+static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
+ unsigned long arg)
{
+ struct pci_endpoint_test_xfer_param param;
bool ret = false;
+ u32 flags = 0;
+ bool use_dma;
+ size_t size;
void *addr;
dma_addr_t phys_addr;
struct pci_dev *pdev = test->pdev;
@@ -494,24 +577,44 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
+ int irq_type = test->irq_type;
u32 crc32;
+ int err;
+
+ err = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (err) {
+ dev_err(dev, "Failed to get transfer param\n");
+ return false;
+ }
+ size = param.size;
if (size > SIZE_MAX - alignment)
goto err;
+ use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
+ if (use_dma)
+ flags |= FLAG_USE_DMA;
+
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
- orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
- GFP_KERNEL);
+ orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err;
}
+ orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, orig_phys_addr)) {
+ dev_err(dev, "failed to map source buffer address\n");
+ ret = false;
+ goto err_phys_addr;
+ }
+
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
offset = phys_addr - orig_phys_addr;
@@ -528,6 +631,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
@@ -535,15 +639,26 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
wait_for_completion(&test->irq_raised);
+ dma_unmap_single(dev, orig_phys_addr, size + alignment,
+ DMA_FROM_DEVICE);
+
crc32 = crc32_le(~0, addr, size);
if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
ret = true;
- dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
+err_phys_addr:
+ kfree(orig_addr);
err:
return ret;
}
+static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
+{
+ pci_endpoint_test_release_irq(test);
+ pci_endpoint_test_free_irq_vectors(test);
+ return true;
+}
+
static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
int req_irq_type)
{
@@ -555,7 +670,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
return false;
}
- if (irq_type == req_irq_type)
+ if (test->irq_type == req_irq_type)
return true;
pci_endpoint_test_release_irq(test);
@@ -567,12 +682,10 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
if (!pci_endpoint_test_request_irq(test))
goto err;
- irq_type = req_irq_type;
return true;
err:
pci_endpoint_test_free_irq_vectors(test);
- irq_type = IRQ_TYPE_UNDEFINED;
return false;
}
@@ -616,6 +729,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
case PCITEST_GET_IRQTYPE:
ret = irq_type;
break;
+ case PCITEST_CLEAR_IRQ:
+ ret = pci_endpoint_test_clear_irq(test);
+ break;
}
ret:
@@ -633,7 +749,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
{
int err;
int id;
- char name[20];
+ char name[24];
enum pci_barno bar;
void __iomem *base;
struct device *dev = &pdev->dev;
@@ -652,6 +768,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->test_reg_bar = 0;
test->alignment = 0;
test->pdev = pdev;
+ test->irq_type = IRQ_TYPE_UNDEFINED;
if (no_msi)
irq_type = IRQ_TYPE_LEGACY;
@@ -667,6 +784,12 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
init_completion(&test->irq_raised);
mutex_init(&test->mutex);
+ if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ return -EINVAL;
+ }
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Cannot enable PCI device\n");
@@ -684,9 +807,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
goto err_disable_irq;
- if (!pci_endpoint_test_request_irq(test))
- goto err_disable_irq;
-
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
@@ -716,12 +836,21 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
}
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
+ test->name = kstrdup(name, GFP_KERNEL);
+ if (!test->name) {
+ err = -ENOMEM;
+ goto err_ida_remove;
+ }
+
+ if (!pci_endpoint_test_request_irq(test))
+ goto err_kfree_test_name;
+
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
misc_device->name = kstrdup(name, GFP_KERNEL);
if (!misc_device->name) {
err = -ENOMEM;
- goto err_ida_remove;
+ goto err_release_irq;
}
misc_device->fops = &pci_endpoint_test_fops,
@@ -736,6 +865,12 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
err_kfree_name:
kfree(misc_device->name);
+err_release_irq:
+ pci_endpoint_test_release_irq(test);
+
+err_kfree_test_name:
+ kfree(test->name);
+
err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
@@ -744,7 +879,6 @@ err_iounmap:
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
- pci_endpoint_test_release_irq(test);
err_disable_irq:
pci_endpoint_test_free_irq_vectors(test);
@@ -770,6 +904,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
misc_deregister(&test->miscdev);
kfree(misc_device->name);
+ kfree(test->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
@@ -783,6 +918,12 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static const struct pci_endpoint_test_data default_data = {
+ .test_reg_bar = BAR_0,
+ .alignment = SZ_4K,
+ .irq_type = IRQ_TYPE_MSI,
+};
+
static const struct pci_endpoint_test_data am654_data = {
.test_reg_bar = BAR_2,
.alignment = SZ_64K,
@@ -790,8 +931,12 @@ static const struct pci_endpoint_test_data am654_data = {
};
static const struct pci_device_id pci_endpoint_test_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
+ .driver_data = (kernel_ulong_t)&default_data,
+ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
+ .driver_data = (kernel_ulong_t)&default_data,
+ },
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index e77d1b1f9d05..85c103923632 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -136,7 +136,7 @@ struct gru_dump_context_header {
pid_t pid;
unsigned long vaddr;
int cch_locked;
- unsigned long data[0];
+ unsigned long data[];
};
/*
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index a7e44b2eb413..5ce8f3081e96 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -372,7 +372,7 @@ struct gru_thread_state {
int ts_data_valid; /* Indicates if ts_gdata has
valid data */
struct gru_gseg_statistics ustats; /* User statistics */
- unsigned long ts_gdata[0]; /* save area for GRU data (CB,
+ unsigned long ts_gdata[]; /* save area for GRU data (CB,
DS, CBE) */
};
diff --git a/drivers/misc/uacce/Kconfig b/drivers/misc/uacce/Kconfig
new file mode 100644
index 000000000000..5e39b6083b0f
--- /dev/null
+++ b/drivers/misc/uacce/Kconfig
@@ -0,0 +1,13 @@
+config UACCE
+ tristate "Accelerator Framework for User Land"
+ depends on IOMMU_API
+ help
+ UACCE provides interface for the user process to access the hardware
+ without interaction with the kernel space in data path.
+
+ The user-space interface is described in
+ include/uapi/misc/uacce/uacce.h
+
+ See Documentation/misc-devices/uacce.rst for more details.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/misc/uacce/Makefile b/drivers/misc/uacce/Makefile
new file mode 100644
index 000000000000..5b4374e8b5f2
--- /dev/null
+++ b/drivers/misc/uacce/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+obj-$(CONFIG_UACCE) += uacce.o
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
new file mode 100644
index 000000000000..d39307f060bd
--- /dev/null
+++ b/drivers/misc/uacce/uacce.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/compat.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/uacce.h>
+
+static struct class *uacce_class;
+static dev_t uacce_devt;
+static DEFINE_MUTEX(uacce_mutex);
+static DEFINE_XARRAY_ALLOC(uacce_xa);
+
+static int uacce_start_queue(struct uacce_queue *q)
+{
+ int ret = 0;
+
+ mutex_lock(&uacce_mutex);
+
+ if (q->state != UACCE_Q_INIT) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ if (q->uacce->ops->start_queue) {
+ ret = q->uacce->ops->start_queue(q);
+ if (ret < 0)
+ goto out_with_lock;
+ }
+
+ q->state = UACCE_Q_STARTED;
+
+out_with_lock:
+ mutex_unlock(&uacce_mutex);
+
+ return ret;
+}
+
+static int uacce_put_queue(struct uacce_queue *q)
+{
+ struct uacce_device *uacce = q->uacce;
+
+ mutex_lock(&uacce_mutex);
+
+ if (q->state == UACCE_Q_ZOMBIE)
+ goto out;
+
+ if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
+ uacce->ops->stop_queue(q);
+
+ if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
+ uacce->ops->put_queue)
+ uacce->ops->put_queue(q);
+
+ q->state = UACCE_Q_ZOMBIE;
+out:
+ mutex_unlock(&uacce_mutex);
+
+ return 0;
+}
+
+static long uacce_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
+
+ switch (cmd) {
+ case UACCE_CMD_START_Q:
+ return uacce_start_queue(q);
+
+ case UACCE_CMD_PUT_Q:
+ return uacce_put_queue(q);
+
+ default:
+ if (!uacce->ops->ioctl)
+ return -EINVAL;
+
+ return uacce->ops->ioctl(q, cmd, arg);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long uacce_fops_compat_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ arg = (unsigned long)compat_ptr(arg);
+
+ return uacce_fops_unl_ioctl(filep, cmd, arg);
+}
+#endif
+
+static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
+ void *data)
+{
+ struct uacce_mm *uacce_mm = data;
+ struct uacce_queue *q;
+
+ /*
+ * No new queue can be added concurrently because no caller can have a
+ * reference to this mm. But there may be concurrent calls to
+ * uacce_mm_put(), so we need the lock.
+ */
+ mutex_lock(&uacce_mm->lock);
+ list_for_each_entry(q, &uacce_mm->queues, list)
+ uacce_put_queue(q);
+ uacce_mm->mm = NULL;
+ mutex_unlock(&uacce_mm->lock);
+
+ return 0;
+}
+
+static struct iommu_sva_ops uacce_sva_ops = {
+ .mm_exit = uacce_sva_exit,
+};
+
+static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
+ struct uacce_queue *q,
+ struct mm_struct *mm)
+{
+ struct uacce_mm *uacce_mm = NULL;
+ struct iommu_sva *handle = NULL;
+ int ret;
+
+ lockdep_assert_held(&uacce->mm_lock);
+
+ list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
+ if (uacce_mm->mm == mm) {
+ mutex_lock(&uacce_mm->lock);
+ list_add(&q->list, &uacce_mm->queues);
+ mutex_unlock(&uacce_mm->lock);
+ return uacce_mm;
+ }
+ }
+
+ uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
+ if (!uacce_mm)
+ return NULL;
+
+ if (uacce->flags & UACCE_DEV_SVA) {
+ /*
+ * Safe to pass an incomplete uacce_mm, since mm_exit cannot
+ * fire while we hold a reference to the mm.
+ */
+ handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
+ if (IS_ERR(handle))
+ goto err_free;
+
+ ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
+ if (ret)
+ goto err_unbind;
+
+ uacce_mm->pasid = iommu_sva_get_pasid(handle);
+ if (uacce_mm->pasid == IOMMU_PASID_INVALID)
+ goto err_unbind;
+ }
+
+ uacce_mm->mm = mm;
+ uacce_mm->handle = handle;
+ INIT_LIST_HEAD(&uacce_mm->queues);
+ mutex_init(&uacce_mm->lock);
+ list_add(&q->list, &uacce_mm->queues);
+ list_add(&uacce_mm->list, &uacce->mm_list);
+
+ return uacce_mm;
+
+err_unbind:
+ if (handle)
+ iommu_sva_unbind_device(handle);
+err_free:
+ kfree(uacce_mm);
+ return NULL;
+}
+
+static void uacce_mm_put(struct uacce_queue *q)
+{
+ struct uacce_mm *uacce_mm = q->uacce_mm;
+
+ lockdep_assert_held(&q->uacce->mm_lock);
+
+ mutex_lock(&uacce_mm->lock);
+ list_del(&q->list);
+ mutex_unlock(&uacce_mm->lock);
+
+ if (list_empty(&uacce_mm->queues)) {
+ if (uacce_mm->handle)
+ iommu_sva_unbind_device(uacce_mm->handle);
+ list_del(&uacce_mm->list);
+ kfree(uacce_mm);
+ }
+}
+
+static int uacce_fops_open(struct inode *inode, struct file *filep)
+{
+ struct uacce_mm *uacce_mm = NULL;
+ struct uacce_device *uacce;
+ struct uacce_queue *q;
+ int ret = 0;
+
+ uacce = xa_load(&uacce_xa, iminor(inode));
+ if (!uacce)
+ return -ENODEV;
+
+ q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ mutex_lock(&uacce->mm_lock);
+ uacce_mm = uacce_mm_get(uacce, q, current->mm);
+ mutex_unlock(&uacce->mm_lock);
+ if (!uacce_mm) {
+ ret = -ENOMEM;
+ goto out_with_mem;
+ }
+
+ q->uacce = uacce;
+ q->uacce_mm = uacce_mm;
+
+ if (uacce->ops->get_queue) {
+ ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
+ if (ret < 0)
+ goto out_with_mm;
+ }
+
+ init_waitqueue_head(&q->wait);
+ filep->private_data = q;
+ uacce->inode = inode;
+ q->state = UACCE_Q_INIT;
+
+ return 0;
+
+out_with_mm:
+ mutex_lock(&uacce->mm_lock);
+ uacce_mm_put(q);
+ mutex_unlock(&uacce->mm_lock);
+out_with_mem:
+ kfree(q);
+ return ret;
+}
+
+static int uacce_fops_release(struct inode *inode, struct file *filep)
+{
+ struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
+
+ uacce_put_queue(q);
+
+ mutex_lock(&uacce->mm_lock);
+ uacce_mm_put(q);
+ mutex_unlock(&uacce->mm_lock);
+
+ kfree(q);
+
+ return 0;
+}
+
+static vm_fault_t uacce_vma_fault(struct vm_fault *vmf)
+{
+ if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ return 0;
+}
+
+static void uacce_vma_close(struct vm_area_struct *vma)
+{
+ struct uacce_queue *q = vma->vm_private_data;
+ struct uacce_qfile_region *qfr = NULL;
+
+ if (vma->vm_pgoff < UACCE_MAX_REGION)
+ qfr = q->qfrs[vma->vm_pgoff];
+
+ kfree(qfr);
+}
+
+static const struct vm_operations_struct uacce_vm_ops = {
+ .fault = uacce_vma_fault,
+ .close = uacce_vma_close,
+};
+
+static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
+ struct uacce_qfile_region *qfr;
+ enum uacce_qfrt type = UACCE_MAX_REGION;
+ int ret = 0;
+
+ if (vma->vm_pgoff < UACCE_MAX_REGION)
+ type = vma->vm_pgoff;
+ else
+ return -EINVAL;
+
+ qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
+ if (!qfr)
+ return -ENOMEM;
+
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
+ vma->vm_ops = &uacce_vm_ops;
+ vma->vm_private_data = q;
+ qfr->type = type;
+
+ mutex_lock(&uacce_mutex);
+
+ if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ if (q->qfrs[type]) {
+ ret = -EEXIST;
+ goto out_with_lock;
+ }
+
+ switch (type) {
+ case UACCE_QFRT_MMIO:
+ if (!uacce->ops->mmap) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ ret = uacce->ops->mmap(q, vma, qfr);
+ if (ret)
+ goto out_with_lock;
+
+ break;
+
+ case UACCE_QFRT_DUS:
+ if (!uacce->ops->mmap) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ ret = uacce->ops->mmap(q, vma, qfr);
+ if (ret)
+ goto out_with_lock;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ q->qfrs[type] = qfr;
+ mutex_unlock(&uacce_mutex);
+
+ return ret;
+
+out_with_lock:
+ mutex_unlock(&uacce_mutex);
+ kfree(qfr);
+ return ret;
+}
+
+static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
+{
+ struct uacce_queue *q = file->private_data;
+ struct uacce_device *uacce = q->uacce;
+
+ poll_wait(file, &q->wait, wait);
+ if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
+ return EPOLLIN | EPOLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations uacce_fops = {
+ .owner = THIS_MODULE,
+ .open = uacce_fops_open,
+ .release = uacce_fops_release,
+ .unlocked_ioctl = uacce_fops_unl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = uacce_fops_compat_ioctl,
+#endif
+ .mmap = uacce_fops_mmap,
+ .poll = uacce_fops_poll,
+};
+
+#define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
+
+static ssize_t api_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%s\n", uacce->api_ver);
+}
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%u\n", uacce->flags);
+}
+
+static ssize_t available_instances_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ if (!uacce->ops->get_available_instances)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n",
+ uacce->ops->get_available_instances(uacce));
+}
+
+static ssize_t algorithms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%s\n", uacce->algs);
+}
+
+static ssize_t region_mmio_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%lu\n",
+ uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
+}
+
+static ssize_t region_dus_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%lu\n",
+ uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
+}
+
+static DEVICE_ATTR_RO(api);
+static DEVICE_ATTR_RO(flags);
+static DEVICE_ATTR_RO(available_instances);
+static DEVICE_ATTR_RO(algorithms);
+static DEVICE_ATTR_RO(region_mmio_size);
+static DEVICE_ATTR_RO(region_dus_size);
+
+static struct attribute *uacce_dev_attrs[] = {
+ &dev_attr_api.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_available_instances.attr,
+ &dev_attr_algorithms.attr,
+ &dev_attr_region_mmio_size.attr,
+ &dev_attr_region_dus_size.attr,
+ NULL,
+};
+
+static umode_t uacce_dev_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ if (((attr == &dev_attr_region_mmio_size.attr) &&
+ (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
+ ((attr == &dev_attr_region_dus_size.attr) &&
+ (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group uacce_dev_group = {
+ .is_visible = uacce_dev_is_visible,
+ .attrs = uacce_dev_attrs,
+};
+
+__ATTRIBUTE_GROUPS(uacce_dev);
+
+static void uacce_release(struct device *dev)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ kfree(uacce);
+}
+
+/**
+ * uacce_alloc() - alloc an accelerator
+ * @parent: pointer of uacce parent device
+ * @interface: pointer of uacce_interface for register
+ *
+ * Returns uacce pointer if success and ERR_PTR if not
+ * Need check returned negotiated uacce->flags
+ */
+struct uacce_device *uacce_alloc(struct device *parent,
+ struct uacce_interface *interface)
+{
+ unsigned int flags = interface->flags;
+ struct uacce_device *uacce;
+ int ret;
+
+ uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
+ if (!uacce)
+ return ERR_PTR(-ENOMEM);
+
+ if (flags & UACCE_DEV_SVA) {
+ ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
+ if (ret)
+ flags &= ~UACCE_DEV_SVA;
+ }
+
+ uacce->parent = parent;
+ uacce->flags = flags;
+ uacce->ops = interface->ops;
+
+ ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto err_with_uacce;
+
+ INIT_LIST_HEAD(&uacce->mm_list);
+ mutex_init(&uacce->mm_lock);
+ device_initialize(&uacce->dev);
+ uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
+ uacce->dev.class = uacce_class;
+ uacce->dev.groups = uacce_dev_groups;
+ uacce->dev.parent = uacce->parent;
+ uacce->dev.release = uacce_release;
+ dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
+
+ return uacce;
+
+err_with_uacce:
+ if (flags & UACCE_DEV_SVA)
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ kfree(uacce);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(uacce_alloc);
+
+/**
+ * uacce_register() - add the accelerator to cdev and export to user space
+ * @uacce: The initialized uacce device
+ *
+ * Return 0 if register succeeded, or an error.
+ */
+int uacce_register(struct uacce_device *uacce)
+{
+ if (!uacce)
+ return -ENODEV;
+
+ uacce->cdev = cdev_alloc();
+ if (!uacce->cdev)
+ return -ENOMEM;
+
+ uacce->cdev->ops = &uacce_fops;
+ uacce->cdev->owner = THIS_MODULE;
+
+ return cdev_device_add(uacce->cdev, &uacce->dev);
+}
+EXPORT_SYMBOL_GPL(uacce_register);
+
+/**
+ * uacce_remove() - remove the accelerator
+ * @uacce: the accelerator to remove
+ */
+void uacce_remove(struct uacce_device *uacce)
+{
+ struct uacce_mm *uacce_mm;
+ struct uacce_queue *q;
+
+ if (!uacce)
+ return;
+ /*
+ * unmap remaining mapping from user space, preventing user still
+ * access the mmaped area while parent device is already removed
+ */
+ if (uacce->inode)
+ unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
+
+ /* ensure no open queue remains */
+ mutex_lock(&uacce->mm_lock);
+ list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
+ /*
+ * We don't take the uacce_mm->lock here. Since we hold the
+ * device's mm_lock, no queue can be added to or removed from
+ * this uacce_mm. We may run concurrently with mm_exit, but
+ * uacce_put_queue() is serialized and iommu_sva_unbind_device()
+ * waits for the lock that mm_exit is holding.
+ */
+ list_for_each_entry(q, &uacce_mm->queues, list)
+ uacce_put_queue(q);
+
+ if (uacce->flags & UACCE_DEV_SVA) {
+ iommu_sva_unbind_device(uacce_mm->handle);
+ uacce_mm->handle = NULL;
+ }
+ }
+ mutex_unlock(&uacce->mm_lock);
+
+ /* disable sva now since no opened queues */
+ if (uacce->flags & UACCE_DEV_SVA)
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+
+ if (uacce->cdev)
+ cdev_device_del(uacce->cdev, &uacce->dev);
+ xa_erase(&uacce_xa, uacce->dev_id);
+ put_device(&uacce->dev);
+}
+EXPORT_SYMBOL_GPL(uacce_remove);
+
+static int __init uacce_init(void)
+{
+ int ret;
+
+ uacce_class = class_create(THIS_MODULE, UACCE_NAME);
+ if (IS_ERR(uacce_class))
+ return PTR_ERR(uacce_class);
+
+ ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
+ if (ret)
+ class_destroy(uacce_class);
+
+ return ret;
+}
+
+static __exit void uacce_exit(void)
+{
+ unregister_chrdev_region(uacce_devt, MINORMASK);
+ class_destroy(uacce_class);
+}
+
+subsys_initcall(uacce_init);
+module_exit(uacce_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Accelerator interface for Userland applications");
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 058fcd7f9f01..a431787c0898 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -42,7 +42,7 @@ struct vexpress_syscfg_func {
struct vexpress_syscfg *syscfg;
struct regmap *regmap;
int num_templates;
- u32 template[0]; /* Keep it last! */
+ u32 template[]; /* Keep it last! */
};
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 663d87924e5e..7896952de1ac 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -70,7 +70,6 @@ MODULE_ALIAS("mmc:block");
* ample.
*/
#define MMC_BLK_TIMEOUT_MS (10 * 1000)
-#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
@@ -168,6 +167,11 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq);
+static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -408,44 +412,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
-static int ioctl_do_sanitize(struct mmc_card *card)
-{
- int err;
-
- if (!mmc_can_sanitize(card)) {
- pr_warn("%s: %s - SANITIZE is not supported\n",
- mmc_hostname(card->host), __func__);
- err = -EOPNOTSUPP;
- goto out;
- }
-
- pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
- mmc_hostname(card->host), __func__);
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_SANITIZE_START, 1,
- MMC_SANITIZE_REQ_TIMEOUT);
-
- if (err)
- pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
- mmc_hostname(card->host), __func__, err);
-
- pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
- __func__);
-out:
- return err;
-}
-
-static inline bool mmc_blk_in_tran_state(u32 status)
-{
- /*
- * Some cards mishandle the status bits, so make sure to check both the
- * busy indication and the card state.
- */
- return status & R1_READY_FOR_DATA &&
- (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
-}
-
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
u32 *resp_errs)
{
@@ -477,13 +443,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
__func__, status);
return -ETIMEDOUT;
}
-
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!mmc_blk_in_tran_state(status));
+ } while (!mmc_ready_for_data(status));
return err;
}
@@ -580,15 +540,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
- (cmd.opcode == MMC_SWITCH)) {
- err = ioctl_do_sanitize(card);
-
- if (err)
- pr_err("%s: ioctl_do_sanitize() failed. err = %d",
- __func__, err);
-
- return err;
- }
+ (cmd.opcode == MMC_SWITCH))
+ return mmc_sanitize(card);
mmc_wait_for_req(card->host, &mrq);
@@ -1417,6 +1370,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
struct mmc_request *mrq = &mqrq->brq.mrq;
struct request_queue *q = req->q;
struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
unsigned long flags;
bool put_card;
int err;
@@ -1446,7 +1400,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
spin_lock_irqsave(&mq->lock, flags);
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+ mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -1532,9 +1486,30 @@ static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
return mmc_blk_cqe_start_req(mq->card->host, mrq);
}
+static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ int err;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+ mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (host->hsq_enabled)
+ return mmc_blk_hsq_issue_rw_rq(mq, req);
mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
@@ -1666,7 +1641,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
goto error_exit;
if (!mmc_host_is_spi(host) &&
- !mmc_blk_in_tran_state(status)) {
+ !mmc_ready_for_data(status)) {
err = mmc_blk_fix_state(card, req);
if (err)
goto error_exit;
@@ -1726,7 +1701,7 @@ static bool mmc_blk_status_error(struct request *req, u32 status)
return brq->cmd.resp[0] & CMD_ERRORS ||
brq->stop.resp[0] & stop_err_bits ||
status & stop_err_bits ||
- (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
+ (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
}
static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
@@ -1788,7 +1763,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
/* Try to get back to "tran" state */
if (!mmc_host_is_spi(mq->card->host) &&
- (err || !mmc_blk_in_tran_state(status)))
+ (err || !mmc_ready_for_data(status)))
err = mmc_blk_fix_state(mq->card, req);
/*
@@ -1920,6 +1895,41 @@ static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
mmc_run_bkops(mq->card);
}
+static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq =
+ container_of(mrq, struct mmc_queue_req, brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(&mq->lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+}
+
void mmc_blk_mq_complete(struct request *req)
{
struct mmc_queue *mq = req->q->queuedata;
@@ -2474,8 +2484,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
- put_device(&rpmb->dev);
mmc_blk_put(rpmb->md);
+ put_device(&rpmb->dev);
return 0;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a971c4bcc442..4c5de6d37ac7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -403,23 +403,6 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
cmd = mrq->cmd;
- /*
- * If host has timed out waiting for the sanitize
- * to complete, card might be still in programming state
- * so let's try to bring the card out of programming
- * state.
- */
- if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
- if (!mmc_interrupt_hpi(host->card)) {
- pr_warn("%s: %s: Interrupted sanitize\n",
- mmc_hostname(host), __func__);
- cmd->error = 0;
- break;
- } else {
- pr_err("%s: %s: Failed to interrupt sanitize\n",
- mmc_hostname(host), __func__);
- }
- }
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card))
break;
@@ -1658,8 +1641,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
bool use_r1b_resp = false;
- unsigned long timeout;
- int loop_udelay=64, udelay_max=32768;
int err;
mmc_retune_hold(card->host);
@@ -1763,38 +1744,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
goto out;
- timeout = jiffies + msecs_to_jiffies(busy_timeout);
- do {
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- /* Do not retry else we can't see errors */
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
- if (err || R1_STATUS(cmd.resp[0])) {
- pr_err("error %d requesting status %#x\n",
- err, cmd.resp[0]);
- err = -EIO;
- goto out;
- }
-
- /* Timeout if the device never becomes ready for data and
- * never leaves the program state.
- */
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state! %s\n",
- mmc_hostname(card->host), __func__);
- err = -EIO;
- goto out;
- }
- if ((cmd.resp[0] & R1_READY_FOR_DATA) &&
- R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG)
- break;
-
- usleep_range(loop_udelay, loop_udelay*2);
- if (loop_udelay < udelay_max)
- loop_udelay *= 2;
- } while (1);
+ /* Let's poll to find out when the erase operation completes. */
+ err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE);
out:
mmc_retune_release(card->host);
@@ -1957,7 +1908,6 @@ int mmc_can_sanitize(struct mmc_card *card)
return 1;
return 0;
}
-EXPORT_SYMBOL(mmc_can_sanitize);
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index de14b5845f52..de94fbe629bd 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1055,7 +1055,7 @@ static int mmc_select_hs(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
- true, true, true);
+ true, true);
if (err)
pr_warn("%s: switch to high-speed failed, err:%d\n",
mmc_hostname(card->host), err);
@@ -1087,7 +1087,7 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
ext_csd_bits,
card->ext_csd.generic_cmd6_time,
MMC_TIMING_MMC_DDR52,
- true, true, true);
+ true, true);
if (err) {
pr_err("%s: switch to bus width %d ddr failed\n",
mmc_hostname(host), 1 << bus_width);
@@ -1155,7 +1155,7 @@ static int mmc_select_hs400(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
- true, false, true);
+ false, true);
if (err) {
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
mmc_hostname(host), err);
@@ -1173,7 +1173,7 @@ static int mmc_select_hs400(struct mmc_card *card)
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err)
goto out_err;
@@ -1197,7 +1197,7 @@ static int mmc_select_hs400(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
- true, false, true);
+ false, true);
if (err) {
pr_err("%s: switch to hs400 failed, err:%d\n",
mmc_hostname(host), err);
@@ -1211,7 +1211,7 @@ static int mmc_select_hs400(struct mmc_card *card)
if (host->ops->hs400_complete)
host->ops->hs400_complete(host);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err)
goto out_err;
@@ -1243,20 +1243,20 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time, 0,
- true, false, true);
+ false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err)
goto out_err;
/* Switch HS DDR to HS */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
- 0, true, false, true);
+ 0, false, true);
if (err)
goto out_err;
@@ -1265,7 +1265,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
if (host->ops->hs400_downgrade)
host->ops->hs400_downgrade(host);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err)
goto out_err;
@@ -1274,7 +1274,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time, 0,
- true, false, true);
+ false, true);
if (err)
goto out_err;
@@ -1285,7 +1285,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
* failed. If there really is a problem, we would expect tuning will
* fail and the result ends up the same.
*/
- err = __mmc_switch_status(card, false);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1358,7 +1358,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time, 0,
- true, false, true);
+ false, true);
if (err) {
pr_err("%s: switch to hs for hs400es failed, err:%d\n",
mmc_hostname(host), err);
@@ -1366,7 +1366,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
}
mmc_set_timing(host, MMC_TIMING_MMC_HS);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err)
goto out_err;
@@ -1392,7 +1392,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
- true, false, true);
+ false, true);
if (err) {
pr_err("%s: switch to hs400es failed, err:%d\n",
mmc_hostname(host), err);
@@ -1407,7 +1407,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
if (host->ops->hs400_enhanced_strobe)
host->ops->hs400_enhanced_strobe(host, &host->ios);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err)
goto out_err;
@@ -1457,7 +1457,7 @@ static int mmc_select_hs200(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
- true, false, true);
+ false, true);
if (err)
goto err;
old_timing = host->ios.timing;
@@ -1468,7 +1468,7 @@ static int mmc_select_hs200(struct mmc_card *card)
* switch failed. If there really is a problem, we would expect
* tuning will fail and the result ends up the same.
*/
- err = __mmc_switch_status(card, false);
+ err = mmc_switch_status(card, false);
/*
* mmc_select_timing() assumes timing has not changed if
@@ -1851,15 +1851,19 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
- if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
+ if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
- if (err) {
- pr_err("%s: Failed to enable CQE, error %d\n",
- mmc_hostname(host), err);
- } else {
+ if (!err) {
host->cqe_enabled = true;
- pr_info("%s: Command Queue Engine enabled\n",
- mmc_hostname(host));
+
+ if (card->ext_csd.cmdq_en) {
+ pr_info("%s: Command Queue Engine enabled\n",
+ mmc_hostname(host));
+ } else {
+ host->hsq_enabled = true;
+ pr_info("%s: Host Software Queue enabled\n",
+ mmc_hostname(host));
+ }
}
}
@@ -1958,7 +1962,7 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout, 0, true, false, false);
+ notify_type, timeout, 0, false, false);
if (err)
pr_err("%s: Power Off Notification timed out, %u\n",
mmc_hostname(card->host), timeout);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index e025604e17d4..baa6314f69b4 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -19,9 +19,9 @@
#include "host.h"
#include "mmc_ops.h"
-#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
+#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -431,7 +431,7 @@ static int mmc_switch_status_error(struct mmc_host *host, u32 status)
}
/* Caller must hold re-tuning */
-int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
+int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
{
u32 status;
int err;
@@ -445,18 +445,54 @@ int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
return mmc_switch_status_error(card->host, status);
}
-int mmc_switch_status(struct mmc_card *card)
+static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
+ enum mmc_busy_cmd busy_cmd, bool *busy)
{
- return __mmc_switch_status(card, true);
+ struct mmc_host *host = card->host;
+ u32 status = 0;
+ int err;
+
+ if (host->ops->card_busy) {
+ *busy = host->ops->card_busy(host);
+ return 0;
+ }
+
+ err = mmc_send_status(card, &status);
+ if (retry_crc_err && err == -EILSEQ) {
+ *busy = true;
+ return 0;
+ }
+ if (err)
+ return err;
+
+ switch (busy_cmd) {
+ case MMC_BUSY_CMD6:
+ err = mmc_switch_status_error(card->host, status);
+ break;
+ case MMC_BUSY_ERASE:
+ err = R1_STATUS(status) ? -EIO : 0;
+ break;
+ case MMC_BUSY_HPI:
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ *busy = !mmc_ready_for_data(status);
+ return 0;
}
-static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- bool send_status, bool retry_crc_err)
+static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ bool send_status, bool retry_crc_err,
+ enum mmc_busy_cmd busy_cmd)
{
struct mmc_host *host = card->host;
int err;
unsigned long timeout;
- u32 status = 0;
+ unsigned int udelay = 32, udelay_max = 32768;
bool expired = false;
bool busy = false;
@@ -478,21 +514,9 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
*/
expired = time_after(jiffies, timeout);
- if (host->ops->card_busy) {
- busy = host->ops->card_busy(host);
- } else {
- err = mmc_send_status(card, &status);
- if (retry_crc_err && err == -EILSEQ) {
- busy = true;
- } else if (err) {
- return err;
- } else {
- err = mmc_switch_status_error(host, status);
- if (err)
- return err;
- busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
- }
- }
+ err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
+ if (err)
+ return err;
/* Timeout if the device still remains busy. */
if (expired && busy) {
@@ -500,11 +524,24 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
mmc_hostname(host), __func__);
return -ETIMEDOUT;
}
+
+ /* Throttle the polling rate to avoid hogging the CPU. */
+ if (busy) {
+ usleep_range(udelay, udelay * 2);
+ if (udelay < udelay_max)
+ udelay *= 2;
+ }
} while (busy);
return 0;
}
+int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ enum mmc_busy_cmd busy_cmd)
+{
+ return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
+}
+
/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
@@ -514,7 +551,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
* @timing: new timing to change to
- * @use_busy_signal: use the busy signal as response type
* @send_status: send status cmd to poll for busy
* @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
*
@@ -522,12 +558,12 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
- bool use_busy_signal, bool send_status, bool retry_crc_err)
+ bool send_status, bool retry_crc_err)
{
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {};
- bool use_r1b_resp = use_busy_signal;
+ bool use_r1b_resp = true;
unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
@@ -562,24 +598,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
}
- if (index == EXT_CSD_SANITIZE_START)
- cmd.sanitize_busy = true;
-
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
goto out;
- /* No need to check card status in case of unblocking command */
- if (!use_busy_signal)
- goto out;
-
/*If SPI or used HW busy detection above, then we don't need to poll. */
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
mmc_host_is_spi(host))
goto out_tim;
/* Let's try to poll to find out when the command is completed. */
- err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+ err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
+ MMC_BUSY_CMD6);
if (err)
goto out;
@@ -589,7 +619,7 @@ out_tim:
mmc_set_timing(host, timing);
if (send_status) {
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, true);
if (err && timing)
mmc_set_timing(host, old_timing);
}
@@ -603,7 +633,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
return __mmc_switch(card, set, index, value, timeout_ms, 0,
- true, true, false);
+ true, false);
}
EXPORT_SYMBOL_GPL(mmc_switch);
@@ -799,32 +829,46 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
}
-static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
+static int mmc_send_hpi_cmd(struct mmc_card *card)
{
+ unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
+ struct mmc_host *host = card->host;
+ bool use_r1b_resp = true;
struct mmc_command cmd = {};
- unsigned int opcode;
int err;
- opcode = card->ext_csd.hpi_cmd;
- if (opcode == MMC_STOP_TRANSMISSION)
+ cmd.opcode = card->ext_csd.hpi_cmd;
+ cmd.arg = card->rca << 16 | 1;
+
+ /*
+ * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
+ * In case it doesn't, let's instruct the host to avoid HW busy
+ * detection, by using a R1 response instead of R1B.
+ */
+ if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
+ use_r1b_resp = false;
+
+ if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- else if (opcode == MMC_SEND_STATUS)
+ cmd.busy_timeout = busy_timeout_ms;
+ } else {
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ use_r1b_resp = false;
+ }
- cmd.opcode = opcode;
- cmd.arg = card->rca << 16 | 1;
-
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_wait_for_cmd(host, &cmd, 0);
if (err) {
- pr_warn("%s: error %d interrupting operation. "
- "HPI command response %#x\n", mmc_hostname(card->host),
- err, cmd.resp[0]);
+ pr_warn("%s: HPI error %d. Command response %#x\n",
+ mmc_hostname(host), err, cmd.resp[0]);
return err;
}
- if (status)
- *status = cmd.resp[0];
- return 0;
+ /* No need to poll when using HW busy detection. */
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
+ return 0;
+
+ /* Let's poll to find out when the HPI request completes. */
+ return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
}
/**
@@ -834,11 +878,10 @@ static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
* Issued High Priority Interrupt, and check for card status
* until out-of prg-state.
*/
-int mmc_interrupt_hpi(struct mmc_card *card)
+static int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
- unsigned long prg_wait;
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
@@ -871,20 +914,7 @@ int mmc_interrupt_hpi(struct mmc_card *card)
goto out;
}
- err = mmc_send_hpi_cmd(card, &status);
- if (err)
- goto out;
-
- prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
- do {
- err = mmc_send_status(card, &status);
-
- if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
- break;
- if (time_after(jiffies, prg_wait))
- err = -ETIMEDOUT;
- } while (!err);
-
+ err = mmc_send_hpi_cmd(card);
out:
return err;
}
@@ -1000,3 +1030,37 @@ int mmc_cmdq_disable(struct mmc_card *card)
return mmc_cmdq_switch(card, false);
}
EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
+
+int mmc_sanitize(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err;
+
+ if (!mmc_can_sanitize(card)) {
+ pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
+ return -EOPNOTSUPP;
+ }
+
+ pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
+
+ mmc_retune_hold(host);
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
+ 1, MMC_SANITIZE_TIMEOUT_MS);
+ if (err)
+ pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
+
+ /*
+ * If the sanitize operation timed out, the card is probably still busy
+ * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
+ * it with a HPI command to get back into R1_STATE_TRAN.
+ */
+ if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
+ pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
+
+ mmc_retune_release(host);
+
+ pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_sanitize);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 8f2f9475716d..632009260e51 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -10,6 +10,12 @@
#include <linux/types.h>
+enum mmc_busy_cmd {
+ MMC_BUSY_CMD6,
+ MMC_BUSY_ERASE,
+ MMC_BUSY_HPI,
+};
+
struct mmc_host;
struct mmc_card;
@@ -26,20 +32,21 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
-int mmc_interrupt_hpi(struct mmc_card *card);
int mmc_can_ext_csd(struct mmc_card *card);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
-int mmc_switch_status(struct mmc_card *card);
-int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
+int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
+int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ enum mmc_busy_cmd busy_cmd);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
- bool use_busy_signal, bool send_status, bool retry_crc_err);
+ bool send_status, bool retry_crc_err);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms);
void mmc_run_bkops(struct mmc_card *card);
int mmc_flush_cache(struct mmc_card *card);
int mmc_cmdq_enable(struct mmc_card *card);
int mmc_cmdq_disable(struct mmc_card *card);
+int mmc_sanitize(struct mmc_card *card);
#endif
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 492dd4596314..c21b3cb71775 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -71,6 +71,7 @@ struct mmc_test_mem {
* @sg_len: length of currently mapped scatterlist @sg
* @mem: allocated memory
* @sg: scatterlist
+ * @sg_areq: scatterlist for non-blocking request
*/
struct mmc_test_area {
unsigned long max_sz;
@@ -82,6 +83,7 @@ struct mmc_test_area {
unsigned int sg_len;
struct mmc_test_mem *mem;
struct scatterlist *sg;
+ struct scatterlist *sg_areq;
};
/**
@@ -836,14 +838,16 @@ static int mmc_test_start_areq(struct mmc_test_card *test,
}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
- struct scatterlist *sg, unsigned sg_len,
- unsigned dev_addr, unsigned blocks,
- unsigned blksz, int write, int count)
+ unsigned int dev_addr, int write,
+ int count)
{
struct mmc_test_req *rq1, *rq2;
struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
+ struct mmc_test_area *t = &test->area;
+ struct scatterlist *sg = t->sg;
+ struct scatterlist *sg_areq = t->sg_areq;
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
@@ -857,8 +861,8 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
for (i = 0; i < count; i++) {
mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
- mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
- blksz, write);
+ mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
+ t->blocks, 512, write);
ret = mmc_test_start_areq(test, mrq, prev_mrq);
if (ret)
goto err;
@@ -867,7 +871,8 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
prev_mrq = &rq2->mrq;
swap(mrq, prev_mrq);
- dev_addr += blocks;
+ swap(sg, sg_areq);
+ dev_addr += t->blocks;
}
ret = mmc_test_start_areq(test, NULL, prev_mrq);
@@ -1396,10 +1401,11 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
* Map sz bytes so that it can be transferred.
*/
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
- int max_scatter, int min_sg_len)
+ int max_scatter, int min_sg_len, bool nonblock)
{
struct mmc_test_area *t = &test->area;
int err;
+ unsigned int sg_len = 0;
t->blocks = sz >> 9;
@@ -1411,6 +1417,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
t->max_seg_sz, &t->sg_len, min_sg_len);
}
+
+ if (err || !nonblock)
+ goto err;
+
+ if (max_scatter) {
+ err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
+ t->max_segs, t->max_seg_sz,
+ &sg_len);
+ } else {
+ err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
+ t->max_seg_sz, &sg_len, min_sg_len);
+ }
+ if (!err && sg_len != t->sg_len)
+ err = -EINVAL;
+
+err:
if (err)
pr_info("%s: Failed to map sg list\n",
mmc_hostname(test->card->host));
@@ -1440,7 +1462,6 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
struct timespec64 ts1, ts2;
int ret = 0;
int i;
- struct mmc_test_area *t = &test->area;
/*
* In the case of a maximally scattered transfer, the maximum transfer
@@ -1458,15 +1479,14 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
sz = max_tfr;
}
- ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
+ ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
if (ret)
return ret;
if (timed)
ktime_get_ts64(&ts1);
if (nonblock)
- ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
- dev_addr, t->blocks, 512, write, count);
+ ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
else
for (i = 0; i < count && ret == 0; i++) {
ret = mmc_test_area_transfer(test, dev_addr, write);
@@ -1525,6 +1545,7 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
kfree(t->sg);
+ kfree(t->sg_areq);
mmc_test_free_mem(t->mem);
return 0;
@@ -1584,6 +1605,13 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
goto out_free;
}
+ t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
+ GFP_KERNEL);
+ if (!t->sg_areq) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
t->dev_addr = mmc_test_capacity(test->card) / 2;
t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
@@ -2468,7 +2496,7 @@ static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
return RESULT_UNSUP_HOST;
- ret = mmc_test_area_map(test, sz, 0, 0);
+ ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
if (ret)
return ret;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 9edc08685e86..4b1eb89b401d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -62,7 +62,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
struct mmc_host *host = mq->card->host;
- if (mq->use_cqe)
+ if (mq->use_cqe && !host->hsq_enabled)
return mmc_cqe_issue_type(host, req);
if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
@@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
case MMC_ISSUE_DCMD:
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
if (recovery_needed)
- __mmc_cqe_recovery_notifier(mq);
+ mmc_cqe_recovery_notifier(mrq);
return BLK_EH_RESET_TIMER;
}
- /* No timeout (XXX: huh? comment doesn't make much sense) */
- blk_mq_complete_request(req);
+ /* The request has gone already */
return BLK_EH_DONE;
default:
/* Timeout is handled by mmc core */
@@ -124,19 +123,16 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
{
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
unsigned long flags;
- int ret;
+ bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags);
-
- if (mq->recovery_needed || !mq->use_cqe)
- ret = BLK_EH_RESET_TIMER;
- else
- ret = mmc_cqe_timed_out(req);
-
+ ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
spin_unlock_irqrestore(&mq->lock, flags);
- return ret;
+ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
}
static void mmc_mq_recovery_handler(struct work_struct *work)
@@ -144,12 +140,13 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
struct mmc_queue *mq = container_of(work, struct mmc_queue,
recovery_work);
struct request_queue *q = mq->queue;
+ struct mmc_host *host = mq->card->host;
mmc_get_card(mq->card, &mq->ctx);
mq->in_recovery = true;
- if (mq->use_cqe)
+ if (mq->use_cqe && !host->hsq_enabled)
mmc_blk_cqe_recovery(mq);
else
mmc_blk_mq_recovery(mq);
@@ -160,6 +157,9 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
mq->recovery_needed = false;
spin_unlock_irq(&mq->lock);
+ if (host->hsq_enabled)
+ host->cqe_ops->cqe_recovery_finish(host);
+
mmc_put_card(mq->card, &mq->ctx);
blk_mq_run_hw_queues(q, true);
@@ -279,6 +279,14 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
}
break;
case MMC_ISSUE_ASYNC:
+ /*
+ * For MMC host software queue, we only allow 2 requests in
+ * flight to avoid a long latency.
+ */
+ if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
+ spin_unlock_irq(&mq->lock);
+ return BLK_STS_RESOURCE;
+ }
break;
default:
/*
@@ -430,7 +438,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
* The queue depth for CQE must match the hardware because the request
* tag is used to index the hardware queue.
*/
- if (mq->use_cqe)
+ if (mq->use_cqe && !host->hsq_enabled)
mq->tag_set.queue_depth =
min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
else
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index fe914ff5f5d6..76c7add367d5 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1082,6 +1082,16 @@ retry:
}
}
+ if (host->cqe_ops && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (!err) {
+ host->cqe_enabled = true;
+ host->hsq_enabled = true;
+ pr_info("%s: Host Software Queue enabled\n",
+ mmc_hostname(host));
+ }
+ }
+
if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
pr_err("%s: Host failed to negotiate down from 3.3V\n",
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 900871073bd7..3ffe4ff49aa7 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -276,14 +276,15 @@ static void sdio_single_irq_set(struct mmc_card *card)
card->sdio_single_irq = NULL;
if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
- card->host->sdio_irqs == 1)
+ card->host->sdio_irqs == 1) {
for (i = 0; i < card->sdio_funcs; i++) {
- func = card->sdio_func[i];
- if (func && func->irq_handler) {
- card->sdio_single_irq = func;
- break;
- }
- }
+ func = card->sdio_func[i];
+ if (func && func->irq_handler) {
+ card->sdio_single_irq = func;
+ break;
+ }
+ }
+ }
}
/**
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 3a5089f0332c..462b5352fea7 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -645,6 +645,7 @@ config MMC_SDHCI_SPRD
depends on ARCH_SPRD
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_HSQ
help
This selects the SDIO Host Controller in Spreadtrum
SoCs, this driver supports R11(IP version: R11P0).
@@ -949,6 +950,17 @@ config MMC_CQHCI
If unsure, say N.
+config MMC_HSQ
+ tristate "MMC Host Software Queue support"
+ help
+ This selects the MMC Host Software Queue support. This may increase
+ performance, if the host controller and its driver supports it.
+
+ If you have a controller/driver supporting this interface, say Y or M
+ here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 21d9089e5eda..b929ef941208 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o
obj-$(CONFIG_MMC_CQHCI) += cqhci.o
+obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index 1aee485d56d4..026ca9194ce5 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to get irq for data line\n");
- return ret;
+ goto free_host;
}
mutex_init(&host->cmd_mutex);
@@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, host);
mmc_add_host(mmc);
return 0;
+
+free_host:
+ mmc_free_host(mmc);
+ return ret;
}
static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 916746c6c2c7..e299cdd1e619 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -207,13 +207,13 @@ static int octeon_mmc_probe(struct platform_device *pdev)
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- host->base = (void __iomem *)base;
+ host->base = base;
host->reg_off = 0;
base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
- host->dma_base = (void __iomem *)base;
+ host->dma_base = base;
/*
* To keep the register addresses shared we intentionaly use
* a negative offset here, first register used on Octeon therefore
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 5047f7343ffc..75934f3c117e 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -298,16 +299,16 @@ static void __cqhci_disable(struct cqhci_host *cq_host)
cq_host->activated = false;
}
-int cqhci_suspend(struct mmc_host *mmc)
+int cqhci_deactivate(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
- if (cq_host->enabled)
+ if (cq_host->enabled && cq_host->activated)
__cqhci_disable(cq_host);
return 0;
}
-EXPORT_SYMBOL(cqhci_suspend);
+EXPORT_SYMBOL(cqhci_deactivate);
int cqhci_resume(struct mmc_host *mmc)
{
@@ -321,14 +322,20 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
struct cqhci_host *cq_host = mmc->cqe_private;
int err;
+ if (!card->ext_csd.cmdq_en)
+ return -EINVAL;
+
if (cq_host->enabled)
return 0;
cq_host->rca = card->rca;
err = cqhci_host_alloc_tdl(cq_host);
- if (err)
+ if (err) {
+ pr_err("%s: Failed to enable CQE, error %d\n",
+ mmc_hostname(mmc), err);
return err;
+ }
__cqhci_enable(cq_host);
@@ -343,12 +350,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
/* CQHCI is idle and should halt immediately, so set a small timeout */
#define CQHCI_OFF_TIMEOUT 100
+static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL);
+}
+
static void cqhci_off(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
- ktime_t timeout;
- bool timed_out;
u32 reg;
+ int err;
if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
return;
@@ -358,15 +369,9 @@ static void cqhci_off(struct mmc_host *mmc)
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
- timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
- while (1) {
- timed_out = ktime_compare(ktime_get(), timeout) > 0;
- reg = cqhci_readl(cq_host, CQHCI_CTL);
- if ((reg & CQHCI_HALT) || timed_out)
- break;
- }
-
- if (timed_out)
+ err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
+ reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
+ if (err < 0)
pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
else
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
@@ -1071,7 +1076,7 @@ struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
/* check and setup CMDQ interface */
cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "cqhci_mem");
+ "cqhci");
if (!cqhci_memres) {
dev_dbg(&pdev->dev, "CMDQ not supported\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index def76e9b5cac..437700179de4 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -230,7 +230,11 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
int data_error);
int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
-int cqhci_suspend(struct mmc_host *mmc);
+int cqhci_deactivate(struct mmc_host *mmc);
+static inline int cqhci_suspend(struct mmc_host *mmc)
+{
+ return cqhci_deactivate(mmc);
+}
int cqhci_resume(struct mmc_host *mmc);
#endif
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 8b038e7b2cd3..2e58743d83bb 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
meson_mx_mmc_start_cmd(mmc, mrq->cmd);
}
-static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
-{
- struct meson_mx_mmc_host *host = mmc_priv(mmc);
- u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
-
- return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
-}
-
static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd)
{
@@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
static struct mmc_host_ops meson_mx_mmc_ops = {
.request = meson_mx_mmc_request,
.set_ios = meson_mx_mmc_set_ios,
- .card_busy = meson_mx_mmc_card_busy,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
};
@@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk));
- mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
new file mode 100644
index 000000000000..b90b2c97b6cf
--- /dev/null
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *
+ * MMC software queue support based on command queue interfaces
+ *
+ * Copyright (C) 2019 Linaro, Inc.
+ * Author: Baolin Wang <baolin.wang@linaro.org>
+ */
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+
+#include "mmc_hsq.h"
+
+#define HSQ_NUM_SLOTS 64
+#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
+
+static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
+{
+ struct mmc_host *mmc = hsq->mmc;
+ struct hsq_slot *slot;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsq->lock, flags);
+
+ /* Make sure we are not already running a request now */
+ if (hsq->mrq) {
+ spin_unlock_irqrestore(&hsq->lock, flags);
+ return;
+ }
+
+ /* Make sure there are remain requests need to pump */
+ if (!hsq->qcnt || !hsq->enabled) {
+ spin_unlock_irqrestore(&hsq->lock, flags);
+ return;
+ }
+
+ slot = &hsq->slot[hsq->next_tag];
+ hsq->mrq = slot->mrq;
+ hsq->qcnt--;
+
+ spin_unlock_irqrestore(&hsq->lock, flags);
+
+ mmc->ops->request(mmc, hsq->mrq);
+}
+
+static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
+{
+ struct hsq_slot *slot;
+ int tag;
+
+ /*
+ * If there are no remain requests in software queue, then set a invalid
+ * tag.
+ */
+ if (!remains) {
+ hsq->next_tag = HSQ_INVALID_TAG;
+ return;
+ }
+
+ /*
+ * Increasing the next tag and check if the corresponding request is
+ * available, if yes, then we found a candidate request.
+ */
+ if (++hsq->next_tag != HSQ_INVALID_TAG) {
+ slot = &hsq->slot[hsq->next_tag];
+ if (slot->mrq)
+ return;
+ }
+
+ /* Othersie we should iterate all slots to find a available tag. */
+ for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
+ slot = &hsq->slot[tag];
+ if (slot->mrq)
+ break;
+ }
+
+ if (tag == HSQ_NUM_SLOTS)
+ tag = HSQ_INVALID_TAG;
+
+ hsq->next_tag = tag;
+}
+
+static void mmc_hsq_post_request(struct mmc_hsq *hsq)
+{
+ unsigned long flags;
+ int remains;
+
+ spin_lock_irqsave(&hsq->lock, flags);
+
+ remains = hsq->qcnt;
+ hsq->mrq = NULL;
+
+ /* Update the next available tag to be queued. */
+ mmc_hsq_update_next_tag(hsq, remains);
+
+ if (hsq->waiting_for_idle && !remains) {
+ hsq->waiting_for_idle = false;
+ wake_up(&hsq->wait_queue);
+ }
+
+ /* Do not pump new request in recovery mode. */
+ if (hsq->recovery_halt) {
+ spin_unlock_irqrestore(&hsq->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&hsq->lock, flags);
+
+ /*
+ * Try to pump new request to host controller as fast as possible,
+ * after completing previous request.
+ */
+ if (remains > 0)
+ mmc_hsq_pump_requests(hsq);
+}
+
+/**
+ * mmc_hsq_finalize_request - finalize one request if the request is done
+ * @mmc: the host controller
+ * @mrq: the request need to be finalized
+ *
+ * Return true if we finalized the corresponding request in software queue,
+ * otherwise return false.
+ */
+bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_hsq *hsq = mmc->cqe_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsq->lock, flags);
+
+ if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
+ spin_unlock_irqrestore(&hsq->lock, flags);
+ return false;
+ }
+
+ /*
+ * Clear current completed slot request to make a room for new request.
+ */
+ hsq->slot[hsq->next_tag].mrq = NULL;
+
+ spin_unlock_irqrestore(&hsq->lock, flags);
+
+ mmc_cqe_request_done(mmc, hsq->mrq);
+
+ mmc_hsq_post_request(hsq);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
+
+static void mmc_hsq_recovery_start(struct mmc_host *mmc)
+{
+ struct mmc_hsq *hsq = mmc->cqe_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsq->lock, flags);
+
+ hsq->recovery_halt = true;
+
+ spin_unlock_irqrestore(&hsq->lock, flags);
+}
+
+static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
+{
+ struct mmc_hsq *hsq = mmc->cqe_private;
+ int remains;
+
+ spin_lock_irq(&hsq->lock);
+
+ hsq->recovery_halt = false;
+ remains = hsq->qcnt;
+
+ spin_unlock_irq(&hsq->lock);
+
+ /*
+ * Try to pump new request if there are request pending in software
+ * queue after finishing recovery.
+ */
+ if (remains > 0)
+ mmc_hsq_pump_requests(hsq);
+}
+
+static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_hsq *hsq = mmc->cqe_private;
+ int tag = mrq->tag;
+
+ spin_lock_irq(&hsq->lock);
+
+ if (!hsq->enabled) {
+ spin_unlock_irq(&hsq->lock);
+ return -ESHUTDOWN;
+ }
+
+ /* Do not queue any new requests in recovery mode. */
+ if (hsq->recovery_halt) {
+ spin_unlock_irq(&hsq->lock);
+ return -EBUSY;
+ }
+
+ hsq->slot[tag].mrq = mrq;
+
+ /*
+ * Set the next tag as current request tag if no available
+ * next tag.
+ */
+ if (hsq->next_tag == HSQ_INVALID_TAG)
+ hsq->next_tag = tag;
+
+ hsq->qcnt++;
+
+ spin_unlock_irq(&hsq->lock);
+
+ mmc_hsq_pump_requests(hsq);
+
+ return 0;
+}
+
+static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ if (mmc->ops->post_req)
+ mmc->ops->post_req(mmc, mrq, 0);
+}
+
+static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
+{
+ bool is_idle;
+
+ spin_lock_irq(&hsq->lock);
+
+ is_idle = (!hsq->mrq && !hsq->qcnt) ||
+ hsq->recovery_halt;
+
+ *ret = hsq->recovery_halt ? -EBUSY : 0;
+ hsq->waiting_for_idle = !is_idle;
+
+ spin_unlock_irq(&hsq->lock);
+
+ return is_idle;
+}
+
+static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
+{
+ struct mmc_hsq *hsq = mmc->cqe_private;
+ int ret;
+
+ wait_event(hsq->wait_queue,
+ mmc_hsq_queue_is_idle(hsq, &ret));
+
+ return ret;
+}
+
+static void mmc_hsq_disable(struct mmc_host *mmc)
+{
+ struct mmc_hsq *hsq = mmc->cqe_private;
+ u32 timeout = 500;
+ int ret;
+
+ spin_lock_irq(&hsq->lock);
+
+ if (!hsq->enabled) {
+ spin_unlock_irq(&hsq->lock);
+ return;
+ }
+
+ spin_unlock_irq(&hsq->lock);
+
+ ret = wait_event_timeout(hsq->wait_queue,
+ mmc_hsq_queue_is_idle(hsq, &ret),
+ msecs_to_jiffies(timeout));
+ if (ret == 0) {
+ pr_warn("could not stop mmc software queue\n");
+ return;
+ }
+
+ spin_lock_irq(&hsq->lock);
+
+ hsq->enabled = false;
+
+ spin_unlock_irq(&hsq->lock);
+}
+
+static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct mmc_hsq *hsq = mmc->cqe_private;
+
+ spin_lock_irq(&hsq->lock);
+
+ if (hsq->enabled) {
+ spin_unlock_irq(&hsq->lock);
+ return -EBUSY;
+ }
+
+ hsq->enabled = true;
+
+ spin_unlock_irq(&hsq->lock);
+
+ return 0;
+}
+
+static const struct mmc_cqe_ops mmc_hsq_ops = {
+ .cqe_enable = mmc_hsq_enable,
+ .cqe_disable = mmc_hsq_disable,
+ .cqe_request = mmc_hsq_request,
+ .cqe_post_req = mmc_hsq_post_req,
+ .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
+ .cqe_recovery_start = mmc_hsq_recovery_start,
+ .cqe_recovery_finish = mmc_hsq_recovery_finish,
+};
+
+int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
+{
+ hsq->num_slots = HSQ_NUM_SLOTS;
+ hsq->next_tag = HSQ_INVALID_TAG;
+
+ hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
+ sizeof(struct hsq_slot), GFP_KERNEL);
+ if (!hsq->slot)
+ return -ENOMEM;
+
+ hsq->mmc = mmc;
+ hsq->mmc->cqe_private = hsq;
+ mmc->cqe_ops = &mmc_hsq_ops;
+
+ spin_lock_init(&hsq->lock);
+ init_waitqueue_head(&hsq->wait_queue);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_hsq_init);
+
+void mmc_hsq_suspend(struct mmc_host *mmc)
+{
+ mmc_hsq_disable(mmc);
+}
+EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
+
+int mmc_hsq_resume(struct mmc_host *mmc)
+{
+ return mmc_hsq_enable(mmc, NULL);
+}
+EXPORT_SYMBOL_GPL(mmc_hsq_resume);
+
+MODULE_DESCRIPTION("MMC Host Software Queue support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h
new file mode 100644
index 000000000000..18b9cf55925f
--- /dev/null
+++ b/drivers/mmc/host/mmc_hsq.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_MMC_HSQ_H
+#define LINUX_MMC_HSQ_H
+
+struct hsq_slot {
+ struct mmc_request *mrq;
+};
+
+struct mmc_hsq {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ wait_queue_head_t wait_queue;
+ struct hsq_slot *slot;
+ spinlock_t lock;
+
+ int next_tag;
+ int num_slots;
+ int qcnt;
+
+ bool enabled;
+ bool waiting_for_idle;
+ bool recovery_halt;
+};
+
+int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc);
+void mmc_hsq_suspend(struct mmc_host *mmc);
+int mmc_hsq_resume(struct mmc_host *mmc);
+bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq);
+
+#endif
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e9ffce8d41ea..647567def612 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -22,6 +22,7 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
@@ -274,6 +275,32 @@ static struct variant_data variant_stm32_sdmmc = {
.init = sdmmc_variant_init,
};
+static struct variant_data variant_stm32_sdmmcv2 = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .f_max = 208000000,
+ .stm32_clkdiv = true,
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
+ .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
+ .datactrl_first = true,
+ .datacnt_useless = true,
+ .datalength_bits = 25,
+ .datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
+ .stm32_idmabsize_mask = GENMASK(16, 5),
+ .dma_lli = true,
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
+ .init = sdmmc_variant_init,
+};
+
static struct variant_data variant_qcom = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
@@ -1217,6 +1244,9 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
writel_relaxed(clks, host->base + MMCIDATATIMER);
}
+ if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
+ host->ops->pre_sig_volt_switch(host);
+
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
@@ -1830,6 +1860,7 @@ static int mmci_get_cd(struct mmc_host *mmc)
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
+ struct mmci_host *host = mmc_priv(mmc);
int ret = 0;
if (!IS_ERR(mmc->supply.vqmmc)) {
@@ -1849,6 +1880,9 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
+ if (!ret && host->ops && host->ops->post_sig_volt_switch)
+ ret = host->ops->post_sig_volt_switch(host, ios);
+
if (ret)
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
}
@@ -1933,6 +1967,8 @@ static int mmci_probe(struct amba_device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->mmc_ops = &mmci_ops;
+ mmc->ops = &mmci_ops;
/*
* Some variant (STM32) doesn't have opendrain bit, nevertheless
@@ -2072,8 +2108,6 @@ static int mmci_probe(struct amba_device *dev,
host->stop_abort.arg = 0;
host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
- mmc->ops = &mmci_ops;
-
/* We support these PM capabilities. */
mmc->pm_caps |= MMC_PM_KEEP_POWER;
@@ -2335,6 +2369,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmc,
},
+ {
+ .id = 0x00253180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv2,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index ea6a0b5779d4..e1a9b96a3396 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -165,6 +165,7 @@
/* Extended status bits for the STM32 variants */
#define MCI_STM32_BUSYD0 BIT(20)
#define MCI_STM32_BUSYD0END BIT(21)
+#define MCI_STM32_VSWEND BIT(25)
#define MMCICLEAR 0x038
#define MCI_CMDCRCFAILCLR (1 << 0)
@@ -182,6 +183,9 @@
#define MCI_ST_SDIOITC (1 << 22)
#define MCI_ST_CEATAENDC (1 << 23)
#define MCI_ST_BUSYENDC (1 << 24)
+/* Extended clear bits for the STM32 variants */
+#define MCI_STM32_VSWENDC BIT(25)
+#define MCI_STM32_CKSTOPC BIT(26)
#define MMCIMASK0 0x03c
#define MCI_CMDCRCFAILMASK (1 << 0)
@@ -377,6 +381,8 @@ struct mmci_host_ops {
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
+ void (*pre_sig_volt_switch)(struct mmci_host *host);
+ int (*post_sig_volt_switch)(struct mmci_host *host, struct mmc_ios *ios);
};
struct mmci_host {
@@ -407,8 +413,10 @@ struct mmci_host {
u32 mask1_reg;
u8 vqmmc_enabled:1;
struct mmci_platform_data *plat;
+ struct mmc_host_ops *mmc_ops;
struct mmci_host_ops *ops;
struct variant_data *variant;
+ void *variant_priv;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_opendrain;
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index a4f7e8e689d3..d33e62bd6153 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -3,10 +3,13 @@
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Ludovic.barre@st.com for STMicroelectronics.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/of_address.h>
#include <linux/reset.h>
#include <linux/scatterlist.h>
#include "mmci.h"
@@ -14,17 +17,40 @@
#define SDMMC_LLI_BUF_LEN PAGE_SIZE
#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT)
+#define DLYB_CR 0x0
+#define DLYB_CR_DEN BIT(0)
+#define DLYB_CR_SEN BIT(1)
+
+#define DLYB_CFGR 0x4
+#define DLYB_CFGR_SEL_MASK GENMASK(3, 0)
+#define DLYB_CFGR_UNIT_MASK GENMASK(14, 8)
+#define DLYB_CFGR_LNG_MASK GENMASK(27, 16)
+#define DLYB_CFGR_LNGF BIT(31)
+
+#define DLYB_NB_DELAY 11
+#define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1)
+#define DLYB_CFGR_UNIT_MAX 127
+
+#define DLYB_LNG_TIMEOUT_US 1000
+#define SDMMC_VSWEND_TIMEOUT_US 10000
+
struct sdmmc_lli_desc {
u32 idmalar;
u32 idmabase;
u32 idmasize;
};
-struct sdmmc_priv {
+struct sdmmc_idma {
dma_addr_t sg_dma;
void *sg_cpu;
};
+struct sdmmc_dlyb {
+ void __iomem *base;
+ u32 unit;
+ u32 max;
+};
+
static int sdmmc_idma_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
@@ -36,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
* excepted the last element which has no constraint on idmasize
*/
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
- if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32)) ||
- !IS_ALIGNED(sg_dma_len(data->sg), SDMMC_IDMA_BURST)) {
+ if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
dev_err(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
@@ -45,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
}
}
- if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32))) {
+ if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
dev_err(mmc_dev(host->mmc),
"unaligned last scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
@@ -92,7 +118,7 @@ static void sdmmc_idma_unprep_data(struct mmci_host *host,
static int sdmmc_idma_setup(struct mmci_host *host)
{
- struct sdmmc_priv *idma;
+ struct sdmmc_idma *idma;
idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL);
if (!idma)
@@ -123,7 +149,7 @@ static int sdmmc_idma_setup(struct mmci_host *host)
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
{
- struct sdmmc_priv *idma = host->dma_priv;
+ struct sdmmc_idma *idma = host->dma_priv;
struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu;
struct mmc_data *data = host->data;
struct scatterlist *sg;
@@ -226,12 +252,25 @@ static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
mmci_write_clkreg(host, clk);
}
+static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb)
+{
+ if (!dlyb || !dlyb->base)
+ return;
+
+ /* Output clock = Input clock */
+ writel_relaxed(0, dlyb->base + DLYB_CR);
+}
+
static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
{
struct mmc_ios ios = host->mmc->ios;
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
+ /* adds OF options */
pwr = host->pwr_reg_add;
+ sdmmc_dlyb_input_ck(dlyb);
+
if (ios.power_mode == MMC_POWER_OFF) {
/* Only a reset could power-off sdmmc */
reset_control_assert(host->rst);
@@ -254,6 +293,10 @@ static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
writel(MCI_IRQENABLE | host->variant->start_err,
host->base + MMCIMASK0);
+ /* preserves voltage switch bits */
+ pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN |
+ MCI_STM32_VSWITCH);
+
/*
* After a power-cycle state, we must set the SDMMC in
* Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are
@@ -315,14 +358,145 @@ complete:
if (host->busy_status) {
writel_relaxed(mask & ~host->variant->busy_detect_mask,
base + MMCIMASK0);
- writel_relaxed(host->variant->busy_detect_mask,
- base + MMCICLEAR);
host->busy_status = 0;
}
+ writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR);
+
return true;
}
+static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb,
+ int unit, int phase, bool sampler)
+{
+ u32 cfgr;
+
+ writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR);
+
+ cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) |
+ FIELD_PREP(DLYB_CFGR_SEL_MASK, phase);
+ writel_relaxed(cfgr, dlyb->base + DLYB_CFGR);
+
+ if (!sampler)
+ writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR);
+}
+
+static int sdmmc_dlyb_lng_tuning(struct mmci_host *host)
+{
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
+ u32 cfgr;
+ int i, lng, ret;
+
+ for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) {
+ sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true);
+
+ ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr,
+ (cfgr & DLYB_CFGR_LNGF),
+ 1, DLYB_LNG_TIMEOUT_US);
+ if (ret) {
+ dev_warn(mmc_dev(host->mmc),
+ "delay line cfg timeout unit:%d cfgr:%d\n",
+ i, cfgr);
+ continue;
+ }
+
+ lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr);
+ if (lng < BIT(DLYB_NB_DELAY) && lng > 0)
+ break;
+ }
+
+ if (i > DLYB_CFGR_UNIT_MAX)
+ return -EINVAL;
+
+ dlyb->unit = i;
+ dlyb->max = __fls(lng);
+
+ return 0;
+}
+
+static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
+{
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
+ int cur_len = 0, max_len = 0, end_of_len = 0;
+ int phase;
+
+ for (phase = 0; phase <= dlyb->max; phase++) {
+ sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
+
+ if (mmc_send_tuning(host->mmc, opcode, NULL)) {
+ cur_len = 0;
+ } else {
+ cur_len++;
+ if (cur_len > max_len) {
+ max_len = cur_len;
+ end_of_len = phase;
+ }
+ }
+ }
+
+ if (!max_len) {
+ dev_err(mmc_dev(host->mmc), "no tuning point found\n");
+ return -EINVAL;
+ }
+
+ phase = end_of_len - max_len / 2;
+ sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
+
+ dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n",
+ dlyb->unit, dlyb->max, phase);
+
+ return 0;
+}
+
+static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct sdmmc_dlyb *dlyb = host->variant_priv;
+
+ if (!dlyb || !dlyb->base)
+ return -EINVAL;
+
+ if (sdmmc_dlyb_lng_tuning(host))
+ return -EINVAL;
+
+ return sdmmc_dlyb_phase_tuning(host, opcode);
+}
+
+static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host)
+{
+ /* clear the voltage switch completion flag */
+ writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR);
+ /* enable Voltage switch procedure */
+ mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN);
+}
+
+static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
+ struct mmc_ios *ios)
+{
+ unsigned long flags;
+ u32 status;
+ int ret = 0;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ spin_lock_irqsave(&host->lock, flags);
+ mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* wait voltage switch completion while 10ms */
+ ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS,
+ status,
+ (status & MCI_STM32_VSWEND),
+ 10, SDMMC_VSWEND_TIMEOUT_US);
+
+ writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
+ host->base + MMCICLEAR);
+ mmci_write_pwrreg(host, host->pwr_reg &
+ ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
+ }
+
+ return ret;
+}
+
static struct mmci_host_ops sdmmc_variant_ops = {
.validate_data = sdmmc_idma_validate_data,
.prep_data = sdmmc_idma_prep_data,
@@ -334,9 +508,27 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,
+ .pre_sig_volt_switch = sdmmc_pre_sig_volt_vswitch,
+ .post_sig_volt_switch = sdmmc_post_sig_volt_switch,
};
void sdmmc_variant_init(struct mmci_host *host)
{
+ struct device_node *np = host->mmc->parent->of_node;
+ void __iomem *base_dlyb;
+ struct sdmmc_dlyb *dlyb;
+
host->ops = &sdmmc_variant_ops;
+
+ base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL);
+ if (IS_ERR(base_dlyb))
+ return;
+
+ dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL);
+ if (!dlyb)
+ return;
+
+ dlyb->base = base_dlyb;
+ host->variant_priv = dlyb;
+ host->mmc_ops->execute_tuning = sdmmc_execute_tuning;
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 7726dcf48f2c..b221c02cc71f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -128,6 +128,7 @@
#define MSDC_PS_CDSTS (0x1 << 1) /* R */
#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
#define MSDC_PS_DAT (0xff << 16) /* R */
+#define MSDC_PS_DATA1 (0x1 << 17) /* R */
#define MSDC_PS_CMD (0x1 << 24) /* R */
#define MSDC_PS_WP (0x1 << 31) /* R */
@@ -361,6 +362,7 @@ struct msdc_save_para {
struct mtk_mmc_compatible {
u8 clk_div_bits;
+ bool recheck_sdio_irq;
bool hs400_tune; /* only used for MT8173 */
u32 pad_tune_reg;
bool async_fifo;
@@ -436,6 +438,7 @@ struct msdc_host {
static const struct mtk_mmc_compatible mt8135_compat = {
.clk_div_bits = 8,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
@@ -448,6 +451,7 @@ static const struct mtk_mmc_compatible mt8135_compat = {
static const struct mtk_mmc_compatible mt8173_compat = {
.clk_div_bits = 8,
+ .recheck_sdio_irq = true,
.hs400_tune = true,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
@@ -460,6 +464,7 @@ static const struct mtk_mmc_compatible mt8173_compat = {
static const struct mtk_mmc_compatible mt8183_compat = {
.clk_div_bits = 12,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -472,6 +477,7 @@ static const struct mtk_mmc_compatible mt8183_compat = {
static const struct mtk_mmc_compatible mt2701_compat = {
.clk_div_bits = 12,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -484,6 +490,7 @@ static const struct mtk_mmc_compatible mt2701_compat = {
static const struct mtk_mmc_compatible mt2712_compat = {
.clk_div_bits = 12,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -496,6 +503,7 @@ static const struct mtk_mmc_compatible mt2712_compat = {
static const struct mtk_mmc_compatible mt7622_compat = {
.clk_div_bits = 12,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -508,6 +516,7 @@ static const struct mtk_mmc_compatible mt7622_compat = {
static const struct mtk_mmc_compatible mt8516_compat = {
.clk_div_bits = 12,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
@@ -518,6 +527,7 @@ static const struct mtk_mmc_compatible mt8516_compat = {
static const struct mtk_mmc_compatible mt7620_compat = {
.clk_div_bits = 8,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
@@ -591,6 +601,7 @@ static void msdc_reset_hw(struct msdc_host *host)
static void msdc_cmd_next(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd);
+static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb);
static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
@@ -1007,6 +1018,32 @@ static int msdc_auto_cmd_done(struct msdc_host *host, int events,
return cmd->error;
}
+/**
+ * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost
+ *
+ * Host controller may lost interrupt in some special case.
+ * Add SDIO irq recheck mechanism to make sure all interrupts
+ * can be processed immediately
+ *
+ */
+static void msdc_recheck_sdio_irq(struct msdc_host *host)
+{
+ u32 reg_int, reg_inten, reg_ps;
+
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ reg_inten = readl(host->base + MSDC_INTEN);
+ if (reg_inten & MSDC_INTEN_SDIOIRQ) {
+ reg_int = readl(host->base + MSDC_INT);
+ reg_ps = readl(host->base + MSDC_PS);
+ if (!(reg_int & MSDC_INT_SDIOIRQ ||
+ reg_ps & MSDC_PS_DATA1)) {
+ __msdc_enable_sdio_irq(host, 0);
+ sdio_signal_irq(host->mmc);
+ }
+ }
+ }
+}
+
static void msdc_track_cmd_data(struct msdc_host *host,
struct mmc_command *cmd, struct mmc_data *data)
{
@@ -1035,6 +1072,8 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
if (host->error)
msdc_reset_hw(host);
mmc_request_done(host->mmc, mrq);
+ if (host->dev_comp->recheck_sdio_irq)
+ msdc_recheck_sdio_irq(host);
}
/* returns true if command is fully handled; returns false otherwise */
@@ -1393,6 +1432,8 @@ static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
if (enb) {
sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+ if (host->dev_comp->recheck_sdio_irq)
+ msdc_recheck_sdio_irq(host);
} else {
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index f524251d5113..2a4c83a5f32e 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -57,6 +57,12 @@ struct renesas_sdhi {
void __iomem *scc_ctl;
u32 scc_tappos;
u32 scc_tappos_hs400;
+ bool doing_tune;
+
+ /* Tuning values: 1 for success, 0 for failure */
+ DECLARE_BITMAP(taps, BITS_PER_LONG);
+ unsigned int tap_num;
+ unsigned long tap_set;
};
#define host_to_priv(host) \
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 35cb24cd45b4..df826661366f 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -250,20 +250,25 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
#define SH_MOBILE_SDHI_SCC_CKSEL 0x006
#define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008
#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A
+#define SH_MOBILE_SDHI_SCC_SMPCMP 0x00C
#define SH_MOBILE_SDHI_SCC_TMPPORT2 0x00E
-/* Definitions for values the SH_MOBILE_SDHI_SCC_DTCNTL register */
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0)
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK 0xff
-/* Definitions for values the SH_MOBILE_SDHI_SCC_CKSEL register */
#define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL BIT(0)
-/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSCNTL register */
+
#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0)
-/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSREQ register */
+
+#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN BIT(0)
+#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP BIT(1)
#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2)
-/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT2 register */
+
+#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN BIT(8)
+#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP BIT(24)
+#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR (BIT(8) | BIT(24))
+
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4)
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN BIT(31)
@@ -316,17 +321,9 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
}
-static void renesas_sdhi_prepare_tuning(struct tmio_mmc_host *host,
- unsigned long tap)
-{
- struct renesas_sdhi *priv = host_to_priv(host);
-
- /* Set sampling clock position */
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
-}
-
-static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
+static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -339,6 +336,12 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
priv->scc_tappos_hs400);
+ /* Gen3 can't do automatic tap correction with HS400, so disable it */
+ if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC)
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
@@ -352,7 +355,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
if (priv->quirks && priv->quirks->hs400_4taps)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
- host->tap_set / 2);
+ priv->tap_set / 2);
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
@@ -374,8 +377,9 @@ static void renesas_sdhi_reset_scc(struct tmio_mmc_host *host,
SH_MOBILE_SDHI_SCC_CKSEL));
}
-static void renesas_sdhi_disable_scc(struct tmio_mmc_host *host)
+static void renesas_sdhi_disable_scc(struct mmc_host *mmc)
{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
renesas_sdhi_reset_scc(host, priv);
@@ -410,9 +414,12 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
}
-static void renesas_sdhi_prepare_hs400_tuning(struct tmio_mmc_host *host)
+static int renesas_sdhi_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
renesas_sdhi_reset_hs400_mode(host, host_to_priv(host));
+ return 0;
}
#define SH_MOBILE_SDHI_MAX_TAP 3
@@ -426,6 +433,8 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
unsigned long ntap; /* temporary counter of tuning success */
unsigned long i;
+ priv->doing_tune = false;
+
/* Clear SCC_RVSREQ */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
@@ -434,11 +443,11 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
* result requiring the tap to be good in both runs before
* considering it for tuning selection.
*/
- for (i = 0; i < host->tap_num * 2; i++) {
- int offset = host->tap_num * (i < host->tap_num ? 1 : -1);
+ for (i = 0; i < priv->tap_num * 2; i++) {
+ int offset = priv->tap_num * (i < priv->tap_num ? 1 : -1);
- if (!test_bit(i, host->taps))
- clear_bit(i + offset, host->taps);
+ if (!test_bit(i, priv->taps))
+ clear_bit(i + offset, priv->taps);
}
/*
@@ -450,8 +459,8 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
ntap = 0;
tap_start = 0;
tap_end = 0;
- for (i = 0; i < host->tap_num * 2; i++) {
- if (test_bit(i, host->taps)) {
+ for (i = 0; i < priv->tap_num * 2; i++) {
+ if (test_bit(i, priv->taps)) {
ntap++;
} else {
if (ntap > tap_cnt) {
@@ -470,12 +479,12 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
}
if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP)
- host->tap_set = (tap_start + tap_end) / 2 % host->tap_num;
+ priv->tap_set = (tap_start + tap_end) / 2 % priv->tap_num;
else
return -EIO;
/* Set SCC */
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, priv->tap_set);
/* Enable auto re-tuning */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
@@ -485,6 +494,97 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
return 0;
}
+static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+ int i;
+
+ priv->tap_num = renesas_sdhi_init_tuning(host);
+ if (!priv->tap_num)
+ return 0; /* Tuning is not supported */
+
+ if (priv->tap_num * 2 >= sizeof(priv->taps) * BITS_PER_BYTE) {
+ dev_err(&host->pdev->dev,
+ "Too many taps, please update 'taps' in tmio_mmc_host!\n");
+ return -EINVAL;
+ }
+
+ priv->doing_tune = true;
+ bitmap_zero(priv->taps, priv->tap_num * 2);
+
+ /* Issue CMD19 twice for each tap */
+ for (i = 0; i < 2 * priv->tap_num; i++) {
+ /* Set sampling clock position */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
+
+ if (mmc_send_tuning(host->mmc, opcode, NULL) == 0)
+ set_bit(i, priv->taps);
+ }
+
+ return renesas_sdhi_select_tuning(host);
+}
+
+static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_4tap)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+ unsigned long new_tap = priv->tap_set;
+ u32 val;
+
+ val = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ);
+ if (!val)
+ return false;
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+
+ /* Change TAP position according to correction status */
+ if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC &&
+ host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ /*
+ * With HS400, the DAT signal is based on DS, not CLK.
+ * Therefore, use only CMD status.
+ */
+ u32 smpcmp = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) &
+ SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR;
+ if (!smpcmp)
+ return false; /* no error in CMD signal */
+ else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP)
+ new_tap++;
+ else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN)
+ new_tap--;
+ else
+ return true; /* need retune */
+ } else {
+ if (val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR)
+ return true; /* need retune */
+ else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP)
+ new_tap++;
+ else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN)
+ new_tap--;
+ else
+ return false;
+ }
+
+ priv->tap_set = (new_tap % priv->tap_num);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
+ priv->tap_set / (use_4tap ? 2 : 1));
+
+ return false;
+}
+
+static bool renesas_sdhi_auto_correction(struct tmio_mmc_host *host)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ /* Check SCC error */
+ if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
+ SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+ return true;
+ }
+
+ return false;
+}
+
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
@@ -499,20 +599,14 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap))
return false;
- if (mmc_doing_retune(host->mmc))
+ if (mmc_doing_retune(host->mmc) || priv->doing_tune)
return false;
- /* Check SCC error */
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
- SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &&
- sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
- SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
- /* Clear SCC error */
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
- return true;
- }
+ SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN)
+ return renesas_sdhi_auto_correction(host);
- return false;
+ return renesas_sdhi_manual_correction(host, use_4tap);
}
static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
@@ -531,10 +625,6 @@ static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
- ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
- sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
-
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK,
TMIO_MASK_INIT_RCAR2);
@@ -811,14 +901,11 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!hit)
dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
- host->init_tuning = renesas_sdhi_init_tuning;
- host->prepare_tuning = renesas_sdhi_prepare_tuning;
- host->select_tuning = renesas_sdhi_select_tuning;
- host->check_scc_error = renesas_sdhi_check_scc_error;
- host->prepare_hs400_tuning =
- renesas_sdhi_prepare_hs400_tuning;
- host->hs400_downgrade = renesas_sdhi_disable_scc;
- host->hs400_complete = renesas_sdhi_hs400_complete;
+ host->execute_tuning = renesas_sdhi_execute_tuning;
+ host->check_retune = renesas_sdhi_check_scc_error;
+ host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
+ host->ops.hs400_downgrade = renesas_sdhi_disable_scc;
+ host->ops.hs400_complete = renesas_sdhi_hs400_complete;
}
num_irqs = platform_irq_count(pdev);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 2a2173d953f5..d8b76cb8698a 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -75,7 +75,7 @@ struct sdhci_acpi_host {
bool use_runtime_pm;
bool is_intel;
bool reset_signal_volt_on_suspend;
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
enum {
@@ -242,7 +242,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
static bool sdhci_acpi_byt(void)
{
static const struct x86_cpu_id byt[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
{}
};
@@ -252,7 +252,7 @@ static bool sdhci_acpi_byt(void)
static bool sdhci_acpi_cht(void)
{
static const struct x86_cpu_id cht[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
@@ -605,10 +605,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
- .chip = &sdhci_acpi_chip_amd,
- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
- .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
- SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
};
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index e573495f8726..6da6d4fb5edd 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -68,7 +68,7 @@ struct sdhci_cdns_priv {
void __iomem *hrs_addr;
bool enhanced_strobe;
unsigned int nr_phy_params;
- struct sdhci_cdns_phy_param phy_params[0];
+ struct sdhci_cdns_phy_param phy_params[];
};
struct sdhci_cdns_phy_cfg {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 382f25b2fa45..5ec8e4bf1ac7 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -9,6 +9,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -73,6 +74,7 @@
#define ESDHC_STROBE_DLL_CTRL 0x70
#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1)
+#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT 0x7
#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3
#define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20)
@@ -160,6 +162,22 @@
#define ESDHC_FLAG_CQHCI BIT(12)
/* need request pmqos during low power */
#define ESDHC_FLAG_PMQOS BIT(13)
+/* The IP state got lost in low power mode */
+#define ESDHC_FLAG_STATE_LOST_IN_LPMODE BIT(14)
+/* The IP lost clock rate in PM_RUNTIME */
+#define ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME BIT(15)
+/*
+ * The IP do not support the ACMD23 feature completely when use ADMA mode.
+ * In ADMA mode, it only use the 16 bit block count of the register 0x4
+ * (BLOCK_ATT) as the CMD23's argument for ACMD23 mode, which means it will
+ * ignore the upper 16 bit of the CMD23's argument. This will block the reliable
+ * write operation in RPMB, because RPMB reliable write need to set the bit31
+ * of the CMD23's argument.
+ * imx6qpdl/imx6sx/imx6sl/imx7d has this limitation only for ADMA mode, SDMA
+ * do not has this limitation. so when these SoC use ADMA mode, it need to
+ * disable the ACMD23 feature.
+ */
+#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
struct esdhc_soc_data {
u32 flags;
@@ -182,43 +200,67 @@ static const struct esdhc_soc_data esdhc_imx53_data = {
};
static const struct esdhc_soc_data usdhc_imx6q_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING,
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
+ | ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
static const struct esdhc_soc_data usdhc_imx6sl_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
- | ESDHC_FLAG_HS200,
+ | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_BROKEN_AUTO_CMD23,
+};
+
+static const struct esdhc_soc_data usdhc_imx6sll_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
static const struct esdhc_soc_data usdhc_imx6sx_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
- | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE
+ | ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
static const struct esdhc_soc_data usdhc_imx6ull_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
- | ESDHC_FLAG_ERR010450,
+ | ESDHC_FLAG_ERR010450
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
static const struct esdhc_soc_data usdhc_imx7d_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
- | ESDHC_FLAG_HS400,
+ | ESDHC_FLAG_HS400
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE
+ | ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
static struct esdhc_soc_data usdhc_imx7ulp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
- | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400,
+ | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
static struct esdhc_soc_data usdhc_imx8qxp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_CQHCI,
+ | ESDHC_FLAG_CQHCI
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE
+ | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
+};
+
+static struct esdhc_soc_data usdhc_imx8mm_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_CQHCI
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
struct pltfm_imx_data {
@@ -264,11 +306,13 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, },
{ .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
+ { .compatible = "fsl,imx6sll-usdhc", .data = &usdhc_imx6sll_data, },
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
{ .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, },
{ .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
+ { .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -301,6 +345,17 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
}
+static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host)
+{
+ u32 present_state;
+ int ret;
+
+ ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, present_state,
+ (present_state & ESDHC_CLOCK_GATE_OFF), 2, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__);
+}
+
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -514,6 +569,8 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
else
new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (!(new_val & ESDHC_VENDOR_SPEC_FRC_SDCLK_ON))
+ esdhc_wait_for_card_clock_gate_off(host);
return;
case SDHCI_HOST_CONTROL2:
new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
@@ -582,10 +639,24 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
* For DMA access restore the levels to default value.
*/
m = readl(host->ioaddr + ESDHC_WTMK_LVL);
- if (val & SDHCI_TRNS_DMA)
+ if (val & SDHCI_TRNS_DMA) {
wml = ESDHC_WTMK_LVL_WML_VAL_DEF;
- else
+ } else {
+ u8 ctrl;
wml = ESDHC_WTMK_LVL_WML_VAL_MAX;
+
+ /*
+ * Since already disable DMA mode, so also need
+ * to clear the DMASEL. Otherwise, for standard
+ * tuning, when send tuning command, usdhc will
+ * still prefetch the ADMA script from wrong
+ * DMA address, then we will see IOMMU report
+ * some error which show lack of TLB mapping.
+ */
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
m &= ~(ESDHC_WTMK_LVL_RD_WML_MASK |
ESDHC_WTMK_LVL_WR_WML_MASK);
m |= (wml << ESDHC_WTMK_LVL_RD_WML_SHIFT) |
@@ -742,12 +813,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
int ddr_pre_div = imx_data->is_ddr ? 2 : 1;
int pre_div = 1;
int div = 1;
+ int ret;
u32 temp, val;
if (esdhc_is_usdhc(imx_data)) {
val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
host->ioaddr + ESDHC_VENDOR_SPEC);
+ esdhc_wait_for_card_clock_gate_off(host);
}
if (clock == 0) {
@@ -802,13 +875,18 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
| (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ /* need to wait the bit 3 of the PRSSTAT to be set, make sure card clock is stable */
+ ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, temp,
+ (temp & ESDHC_CLOCK_STABLE), 2, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc), "card clock still not stable in 100us!.\n");
+
if (esdhc_is_usdhc(imx_data)) {
val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
host->ioaddr + ESDHC_VENDOR_SPEC);
}
- mdelay(1);
}
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
@@ -983,12 +1061,17 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
*/
static void esdhc_set_strobe_dll(struct sdhci_host *host)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 strobe_delay;
u32 v;
+ int ret;
/* disable clock before enabling strobe dll */
writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
host->ioaddr + ESDHC_VENDOR_SPEC);
+ esdhc_wait_for_card_clock_gate_off(host);
/* force a reset on strobe dll */
writel(ESDHC_STROBE_DLL_CTRL_RESET,
@@ -1000,19 +1083,21 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
* enable strobe dll ctrl and adjust the delay target
* for the uSDHC loopback read clock
*/
+ if (imx_data->boarddata.strobe_dll_delay_target)
+ strobe_delay = imx_data->boarddata.strobe_dll_delay_target;
+ else
+ strobe_delay = ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT;
v = ESDHC_STROBE_DLL_CTRL_ENABLE |
ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT |
- (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
+ (strobe_delay << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
- /* wait 5us to make sure strobe dll status register stable */
- udelay(5);
- v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
- if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
- dev_warn(mmc_dev(host->mmc),
- "warning! HS400 strobe DLL status REF not lock!\n");
- if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
+
+ /* wait max 50us to get the REF/SLV lock */
+ ret = readl_poll_timeout(host->ioaddr + ESDHC_STROBE_DLL_STATUS, v,
+ ((v & ESDHC_STROBE_DLL_STS_REF_LOCK) && (v & ESDHC_STROBE_DLL_STS_SLV_LOCK)), 1, 50);
+ if (ret == -ETIMEDOUT)
dev_warn(mmc_dev(host->mmc),
- "warning! HS400 strobe DLL status SLV not lock!\n");
+ "warning! HS400 strobe DLL status REF/SLV not lock in 50us, STROBE DLL status is %x!\n", v);
}
static void esdhc_reset_tuning(struct sdhci_host *host)
@@ -1162,6 +1247,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host = host->mmc->cqe_private;
int tmp;
if (esdhc_is_usdhc(imx_data)) {
@@ -1238,6 +1324,21 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
tmp &= ~ESDHC_STD_TUNING_EN;
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
}
+
+ /*
+ * On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card
+ * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the
+ * the 1st linux configure power/clock for the 2nd Linux.
+ *
+ * When the 2nd Linux is booting into rootfs stage, we let the 1st Linux
+ * to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump.
+ * After we clear the pending interrupt and halt CQCTL, issue gone.
+ */
+ if (cq_host) {
+ tmp = cqhci_readl(cq_host, CQHCI_IS);
+ cqhci_writel(cq_host, tmp, CQHCI_IS);
+ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
+ }
}
}
@@ -1328,6 +1429,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
of_property_read_u32(np, "fsl,tuning-start-tap",
&boarddata->tuning_start_tap);
+ of_property_read_u32(np, "fsl,strobe-dll-delay-target",
+ &boarddata->strobe_dll_delay_target);
if (of_find_property(np, "no-1-8-v", NULL))
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
@@ -1452,8 +1555,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
pdev->id_entry->driver_data;
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_add_request(&imx_data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx_data->clk_ipg)) {
@@ -1488,7 +1590,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(imx_data->pinctrl)) {
err = PTR_ERR(imx_data->pinctrl);
- goto disable_ahb_clk;
+ dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n");
}
if (esdhc_is_usdhc(imx_data)) {
@@ -1519,6 +1621,9 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
+ if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
+ host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
+
if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
host->mmc_host_ops.hs400_enhanced_strobe =
@@ -1572,7 +1677,7 @@ disable_per_clk:
clk_disable_unprepare(imx_data->clk_per);
free_sdhci:
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
sdhci_pltfm_free(pdev);
return err;
}
@@ -1595,7 +1700,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
clk_disable_unprepare(imx_data->clk_ahb);
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
sdhci_pltfm_free(pdev);
@@ -1606,6 +1711,8 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
static int sdhci_esdhc_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
if (host->mmc->caps2 & MMC_CAP2_CQE) {
@@ -1614,10 +1721,20 @@ static int sdhci_esdhc_suspend(struct device *dev)
return ret;
}
+ if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) &&
+ (host->tuning_mode != SDHCI_TUNING_MODE_1)) {
+ mmc_retune_timer_stop(host->mmc);
+ mmc_retune_needed(host->mmc);
+ }
+
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
- return sdhci_suspend_host(host);
+ ret = sdhci_suspend_host(host);
+ if (!ret)
+ return pinctrl_pm_select_sleep_state(dev);
+
+ return ret;
}
static int sdhci_esdhc_resume(struct device *dev)
@@ -1625,6 +1742,10 @@ static int sdhci_esdhc_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
int ret;
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
/* re-initialize hw state in case it's lost in low power mode */
sdhci_esdhc_imx_hwinit(host);
@@ -1667,7 +1788,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
clk_disable_unprepare(imx_data->clk_ahb);
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
return ret;
}
@@ -1680,8 +1801,10 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
int err;
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_add_request(&imx_data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME)
+ clk_set_rate(imx_data->clk_per, pltfm_host->clock);
err = clk_prepare_enable(imx_data->clk_ahb);
if (err)
@@ -1714,7 +1837,7 @@ disable_ahb_clk:
clk_disable_unprepare(imx_data->clk_ahb);
remove_pm_qos_request:
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
return err;
}
#endif
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 9289bb4d633e..947212f16bc6 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -31,6 +31,7 @@
/* Present State Register */
#define ESDHC_PRSSTAT 0x24
+#define ESDHC_CLOCK_GATE_OFF 0x00000080
#define ESDHC_CLOCK_STABLE 0x00000008
/* Protocol Control Register */
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index f4f5f0a70cda..225603148d7d 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -261,9 +261,24 @@ static const struct sdhci_iproc_data bcm2835_data = {
.mmc_caps = 0x00000000,
};
+static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
+ .read_l = sdhci_iproc_readl,
+ .read_w = sdhci_iproc_readw,
+ .read_b = sdhci_iproc_readb,
+ .write_l = sdhci_iproc_writel,
+ .write_w = sdhci_iproc_writew,
+ .write_b = sdhci_iproc_writeb,
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_set_power_and_bus_voltage,
+ .get_max_clock = sdhci_iproc_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .ops = &sdhci_iproc_32only_ops,
+ .ops = &sdhci_iproc_bcm2711_ops,
};
static const struct sdhci_iproc_data bcm2711_data = {
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
index 92f30a1db435..4e7cc0680f94 100644
--- a/drivers/mmc/host/sdhci-milbeaut.c
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -121,17 +121,6 @@ static void sdhci_milbeaut_reset(struct sdhci_host *host, u8 mask)
}
}
-static void sdhci_milbeaut_set_power(struct sdhci_host *host,
- unsigned char mode, unsigned short vdd)
-{
- if (!IS_ERR(host->mmc->supply.vmmc)) {
- struct mmc_host *mmc = host->mmc;
-
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
- }
- sdhci_set_power_noreg(host, mode, vdd);
-}
-
static const struct sdhci_ops sdhci_milbeaut_ops = {
.voltage_switch = sdhci_milbeaut_soft_voltage_switch,
.get_min_clock = sdhci_milbeaut_get_min_clock,
@@ -139,7 +128,7 @@ static const struct sdhci_ops sdhci_milbeaut_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_milbeaut_set_power,
+ .set_power = sdhci_set_power_and_bus_voltage,
};
static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3955fa5db43c..a8bcb3f16aa4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -977,9 +977,21 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
goto out;
}
- config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3);
- config |= CORE_PWRSAVE_DLL;
- writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3);
+ /*
+ * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
+ * When MCLK is gated OFF, it is not gated for less than 0.5us
+ * and MCLK must be switched on for at-least 1us before DATA
+ * starts coming. Controllers with 14lpp and later tech DLL cannot
+ * guarantee above requirement. So PWRSAVE_DLL should not be
+ * turned on for host controllers using this DLL.
+ */
+ if (!msm_host->use_14lpp_dll_reset) {
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_vendor_spec3);
+ config |= CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr +
+ msm_offset->core_vendor_spec3);
+ }
/*
* Drain writebuffer to ensure above DLL calibration
@@ -1811,6 +1823,13 @@ static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
}
+static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
+ cqhci_deactivate(host->mmc);
+ sdhci_reset(host, mask);
+}
+
static const struct sdhci_msm_variant_ops mci_var_ops = {
.msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
.msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
@@ -1849,7 +1868,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
static const struct sdhci_ops sdhci_msm_ops = {
- .reset = sdhci_reset,
+ .reset = sdhci_msm_reset,
.set_clock = sdhci_msm_set_clock,
.get_min_clock = sdhci_msm_get_min_clock,
.get_max_clock = sdhci_msm_get_max_clock,
@@ -2068,6 +2087,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index e49b44b4d82e..d4905c106c06 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -325,17 +325,6 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
-static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
-{
- if (!IS_ERR(host->mmc->supply.vmmc)) {
- struct mmc_host *mmc = host->mmc;
-
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
- }
- sdhci_set_power_noreg(host, mode, vdd);
-}
-
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -343,7 +332,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_arasan_set_power,
+ .set_power = sdhci_set_power_and_bus_voltage,
};
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
@@ -358,6 +347,17 @@ static struct sdhci_arasan_of_data sdhci_arasan_data = {
.pdata = &sdhci_arasan_pdata,
};
+static const struct sdhci_pltfm_data sdhci_arasan_zynqmp_pdata = {
+ .ops = &sdhci_arasan_ops,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+};
+
+static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
+ .pdata = &sdhci_arasan_zynqmp_pdata,
+};
+
static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
{
int cmd_error = 0;
@@ -403,7 +403,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_arasan_set_power,
+ .set_power = sdhci_set_power_and_bus_voltage,
.irq = sdhci_arasan_cqhci_irq,
};
@@ -553,7 +553,7 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
},
{
.compatible = "xlnx,zynqmp-8.9a",
- .data = &sdhci_arasan_data,
+ .data = &sdhci_arasan_zynqmp_data,
},
{ /* sentinel */ }
};
@@ -757,6 +757,50 @@ static const struct clk_ops zynqmp_sampleclk_ops = {
.set_phase = sdhci_zynqmp_sampleclk_set_phase,
};
+static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ sdhci_arasan->clk_data.clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ u16 clk;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Issue DLL Reset */
+ eemi_ops->ioctl(deviceid, IOCTL_SD_DLL_RESET,
+ PM_DLL_RESET_PULSE, 0, NULL);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ sdhci_enable_clk(host, clk);
+}
+
+static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct clk_hw *hw = &sdhci_arasan->clk_data.sdcardclk_hw;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 device_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 :
+ NODE_SD_1;
+ int err;
+
+ arasan_zynqmp_dll_reset(host, device_id);
+
+ err = sdhci_execute_tuning(mmc, opcode);
+ if (err)
+ return err;
+
+ arasan_zynqmp_dll_reset(host, device_id);
+
+ return 0;
+}
+
/**
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
*
@@ -1247,6 +1291,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
zynqmp_clk_data->eemi_ops = eemi_ops;
sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
+ host->mmc_host_ops.execute_tuning =
+ arasan_zynqmp_execute_tuning;
}
arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index fcef5c0d0908..c79bff5e2280 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -101,22 +101,6 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
-/*
- * In this specific implementation of the SDHCI controller, the power register
- * needs to have a valid voltage set even when the power supply is managed by
- * an external regulator.
- */
-static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
-{
- if (!IS_ERR(host->mmc->supply.vmmc)) {
- struct mmc_host *mmc = host->mmc;
-
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
- }
- sdhci_set_power_noreg(host, mode, vdd);
-}
-
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -146,7 +130,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_at91_reset,
.set_uhs_signaling = sdhci_at91_set_uhs_signaling,
- .set_power = sdhci_at91_set_power,
+ .set_power = sdhci_set_power_and_bus_voltage,
};
static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
@@ -205,8 +189,8 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
/* Set capabilities in ro mode. */
writel(0, host->ioaddr + SDMMC_CACR);
- dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
- clk_mul, gck_rate, clk_base_rate);
+ dev_dbg(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
+ clk_mul, gck_rate, clk_base_rate);
/*
* We have to set preset values because it depends on the clk_mul
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index c4978177ef88..1ec74c2d5c17 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -108,6 +108,11 @@ struct sdhci_omap_host {
struct pinctrl *pinctrl;
struct pinctrl_state **pinctrl_state;
bool is_tuning;
+ /* Omap specific context save */
+ u32 con;
+ u32 hctl;
+ u32 sysctl;
+ u32 capa;
};
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
@@ -1235,12 +1240,64 @@ static int sdhci_omap_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
+{
+ omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+}
+
+static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
+{
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
+}
+
+static int __maybe_unused sdhci_omap_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_suspend_host(host);
+
+ sdhci_omap_context_save(omap_host);
+
+ pinctrl_pm_select_idle_state(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused sdhci_omap_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ pm_runtime_force_resume(dev);
+
+ pinctrl_pm_select_default_state(dev);
+
+ sdhci_omap_context_restore(omap_host);
+
+ sdhci_resume_host(host);
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend,
+ sdhci_omap_resume);
static struct platform_driver sdhci_omap_driver = {
.probe = sdhci_omap_probe,
.remove = sdhci_omap_remove,
.driver = {
.name = "sdhci-omap",
+ .pm = &sdhci_omap_dev_pm_ops,
.of_match_table = omap_sdhci_match,
},
};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 525de2454a4d..2527244c2ae1 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);
+ if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
+ return 0;
+
return intel_host->drv_strength;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index ce15a05f23d4..fd76aa672e02 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -26,6 +26,9 @@
#define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
#define GLI_9750_DRIVING_1_VALUE 0xFFF
#define GLI_9750_DRIVING_2_VALUE 0x3
+#define SDHCI_GLI_9750_SEL_1 BIT(29)
+#define SDHCI_GLI_9750_SEL_2 BIT(31)
+#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30))
#define SDHCI_GLI_9750_PLL 0x864
#define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
@@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host)
GLI_9750_DRIVING_1_VALUE);
driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
GLI_9750_DRIVING_2_VALUE);
+ driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
+ driving_value |= SDHCI_GLI_9750_SEL_2;
sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
@@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
return value;
}
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+
+ pci_free_irq_vectors(slot->chip->pdev);
+ gli_pcie_enable_msi(slot);
+
+ return sdhci_pci_resume_host(chip);
+}
+#endif
+
static const struct sdhci_ops sdhci_gl9755_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
@@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9755,
.ops = &sdhci_gl9755_ops,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_gli_resume,
+#endif
};
static const struct sdhci_ops sdhci_gl9750_ops = {
@@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = {
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9750,
.ops = &sdhci_gl9750_ops,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_gli_resume,
+#endif
};
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 981bbbe63aff..42ccd123b046 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -163,7 +163,7 @@ struct sdhci_pci_slot {
bool cd_override_level;
void (*hw_reset)(struct sdhci_host *host);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
struct sdhci_pci_chip {
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 2af445b8c325..6301b81cf573 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -25,7 +25,7 @@ struct sdhci_pltfm_host {
unsigned int clock;
u16 xfer_mode_shadow;
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index d07b9793380f..2ab42c59e4f8 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include "sdhci-pltfm.h"
+#include "mmc_hsq.h"
/* SDHCI_ARGUMENT2 register high 16bit */
#define SDHCI_SPRD_ARG2_STUFF GENMASK(31, 16)
@@ -379,6 +380,16 @@ static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
return 0;
}
+static void sdhci_sprd_request_done(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ /* Validate if the request was from software queue firstly. */
+ if (mmc_hsq_finalize_request(host->mmc, mrq))
+ return;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
@@ -392,6 +403,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
.hw_reset = sdhci_sprd_hw_reset,
.get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
.get_ro = sdhci_sprd_get_ro,
+ .request_done = sdhci_sprd_request_done,
};
static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -521,6 +533,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct sdhci_sprd_host *sprd_host;
+ struct mmc_hsq *hsq;
struct clk *clk;
int ret = 0;
@@ -543,7 +556,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
sdhci_sprd_voltage_switch;
host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_ERASE | MMC_CAP_CMD23;
+ MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
ret = mmc_of_parse(host->mmc);
if (ret)
goto pltfm_free;
@@ -631,6 +644,18 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
sprd_host->flags = host->flags;
+ hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL);
+ if (!hsq) {
+ ret = -ENOMEM;
+ goto err_cleanup_host;
+ }
+
+ ret = mmc_hsq_init(hsq, host->mmc);
+ if (ret)
+ goto err_cleanup_host;
+
+ host->always_defer_done = true;
+
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
@@ -689,6 +714,7 @@ static int sdhci_sprd_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+ mmc_hsq_suspend(host->mmc);
sdhci_runtime_suspend_host(host);
clk_disable_unprepare(sprd_host->clk_sdio);
@@ -717,6 +743,8 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
goto clk_disable;
sdhci_runtime_resume_host(host, 1);
+ mmc_hsq_resume(host->mmc);
+
return 0;
clk_disable:
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index a25c3a4d3f6c..3e2c5101291d 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -45,6 +45,7 @@
#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
+#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0)
#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
@@ -1227,6 +1228,34 @@ static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
+static void tegra_sdhci_set_timeout(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ u32 val;
+
+ /*
+ * HW busy detection timeout is based on programmed data timeout
+ * counter and maximum supported timeout is 11s which may not be
+ * enough for long operations like cache flush, sleep awake, erase.
+ *
+ * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
+ * host controller to wait for busy state until the card is busy
+ * without HW timeout.
+ *
+ * So, use infinite busy wait mode for operations that may take
+ * more than maximum HW busy timeout of 11s otherwise use finite
+ * busy wait mode.
+ */
+ val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ if (cmd && cmd->busy_timeout >= 11 * HZ)
+ val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
+ else
+ val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
+ sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+
+ __sdhci_set_timeout(host, cmd);
+}
+
static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
.write_l = tegra_cqhci_writel,
.enable = sdhci_tegra_cqe_enable,
@@ -1366,6 +1395,7 @@ static const struct sdhci_ops tegra210_sdhci_ops = {
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
.voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
+ .set_timeout = tegra_sdhci_set_timeout,
};
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
@@ -1403,6 +1433,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
.voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
.irq = sdhci_tegra_cqhci_irq,
+ .set_timeout = tegra_sdhci_set_timeout,
};
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
@@ -1552,8 +1583,8 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
host->mmc->caps |= MMC_CAP_1_8V_DDR;
- /* R1B responses is required to properly manage HW busy detection. */
- host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+ /* HW busy detection is supported, but R1B responses are required. */
+ host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
tegra_sdhci_parse_dt(host);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 1dea1ba66f7b..4703cd540c7f 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
+
+ /*
+ * For some reason the controller's Host Control2 register reports
+ * the bit representing 1.8V signaling as 0 when read after it was
+ * written as 1. Subsequent read reports 1.
+ *
+ * Since this may cause some issues, do an empty read of the Host
+ * Control2 register here to circumvent this.
+ */
+ sdhci_readw(host, SDHCI_HOST_CONTROL2);
}
static const struct sdhci_ops sdhci_xenon_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 63db84481dff..e368f2dabf20 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -9,6 +9,7 @@
* - JMicron (hardware and technical support)
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/ktime.h>
@@ -153,7 +154,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- !mmc_card_is_removable(host->mmc))
+ !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
return;
if (enable) {
@@ -1766,10 +1767,9 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
pre_val = sdhci_get_preset_value(host);
- div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
- >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
+ div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
if (host->clk_mul &&
- (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
+ (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
clk = SDHCI_PROG_CLOCK_MODE;
real_div = div + 1;
clk_mul = host->clk_mul;
@@ -2010,6 +2010,25 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
}
EXPORT_SYMBOL_GPL(sdhci_set_power);
+/*
+ * Some controllers need to configure a valid bus voltage on their power
+ * register regardless of whether an external regulator is taking care of power
+ * supply. This helper function takes care of it if set as the controller's
+ * sdhci_ops.set_power callback.
+ */
+void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
+ unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
+
/*****************************************************************************\
* *
* MMC callbacks *
@@ -2227,8 +2246,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_enable_preset_value(host, true);
preset = sdhci_get_preset_value(host);
- ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
- >> SDHCI_PRESET_DRV_SHIFT;
+ ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
+ preset);
}
/* Re-enable SD Clock */
@@ -2944,7 +2963,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
spin_unlock_irqrestore(&host->lock, flags);
- mmc_request_done(host->mmc, mrq);
+ if (host->ops->request_done)
+ host->ops->request_done(host, mrq);
+ else
+ mmc_request_done(host->mmc, mrq);
return false;
}
@@ -3247,7 +3269,7 @@ static inline bool sdhci_defer_done(struct sdhci_host *host,
{
struct mmc_data *data = mrq->data;
- return host->pending_reset ||
+ return host->pending_reset || host->always_defer_done ||
((host->flags & SDHCI_REQ_USE_DMA) && data &&
data->host_cookie == COOKIE_MAPPED);
}
@@ -3372,7 +3394,12 @@ out:
/* Process mrqs ready for immediate completion */
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
- if (mrqs_done[i])
+ if (!mrqs_done[i])
+ continue;
+
+ if (host->ops->request_done)
+ host->ops->request_done(host, mrqs_done[i]);
+ else
mmc_request_done(host->mmc, mrqs_done[i]);
}
@@ -3973,9 +4000,6 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc_hostname(mmc), host->version);
}
- if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
- mmc->caps2 &= ~MMC_CAP2_CQE;
-
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
else if (!(host->caps & SDHCI_CAN_DO_SDMA))
@@ -4512,6 +4536,12 @@ int __sdhci_add_host(struct sdhci_host *host)
struct mmc_host *mmc = host->mmc;
int ret;
+ if ((mmc->caps2 & MMC_CAP2_CQE) &&
+ (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
+ mmc->caps2 &= ~MMC_CAP2_CQE;
+ mmc->cqe_ops = NULL;
+ }
+
host->complete_wq = alloc_workqueue("sdhci", flags, 0);
if (!host->complete_wq)
return -ENOMEM;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a6a3ddcf97e7..79dffbb731d3 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -9,6 +9,7 @@
#ifndef __SDHCI_HW_H
#define __SDHCI_HW_H
+#include <linux/bits.h>
#include <linux/scatterlist.h>
#include <linux/compiler.h>
#include <linux/types.h>
@@ -267,12 +268,9 @@
#define SDHCI_PRESET_FOR_SDR104 0x6C
#define SDHCI_PRESET_FOR_DDR50 0x6E
#define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
-#define SDHCI_PRESET_DRV_MASK 0xC000
-#define SDHCI_PRESET_DRV_SHIFT 14
-#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
-#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10
-#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF
-#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0
+#define SDHCI_PRESET_DRV_MASK GENMASK(15, 14)
+#define SDHCI_PRESET_CLKGEN_SEL BIT(10)
+#define SDHCI_PRESET_SDCLK_FREQ_MASK GENMASK(9, 0)
#define SDHCI_SLOT_INT_STATUS 0xFC
@@ -537,6 +535,7 @@ struct sdhci_host {
bool irq_wake_enabled; /* IRQ wakeup is enabled */
bool v4_mode; /* Host Version 4 Enable */
bool use_external_dma; /* Host selects to use external DMA */
+ bool always_defer_done; /* Always defer to complete requests */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -613,7 +612,7 @@ struct sdhci_host {
u64 data_timeout;
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
struct sdhci_ops {
@@ -654,6 +653,8 @@ struct sdhci_ops {
void (*voltage_switch)(struct sdhci_host *host);
void (*adma_write_desc)(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len, unsigned int cmd);
+ void (*request_done)(struct sdhci_host *host,
+ struct mmc_request *mrq);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -769,6 +770,9 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_enable_clk(struct sdhci_host *host, u16 clk);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
+ unsigned char mode,
+ unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 3afea580fbea..061b4398a4f1 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -81,7 +81,8 @@ static struct regmap_config sdhci_am654_regmap_config = {
struct sdhci_am654_data {
struct regmap *base;
- int otap_del_sel;
+ bool legacy_otapdly;
+ int otap_del_sel[11];
int trm_icp;
int drv_strength;
bool dll_on;
@@ -98,7 +99,27 @@ struct sdhci_am654_driver_data {
#define DLL_PRESENT (1 << 3)
};
-static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+struct timing_data {
+ const char *binding;
+ u32 capability;
+};
+
+static const struct timing_data td[] = {
+ [MMC_TIMING_LEGACY] = {"ti,otap-del-sel-legacy", 0},
+ [MMC_TIMING_MMC_HS] = {"ti,otap-del-sel-mmc-hs", MMC_CAP_MMC_HIGHSPEED},
+ [MMC_TIMING_SD_HS] = {"ti,otap-del-sel-sd-hs", MMC_CAP_SD_HIGHSPEED},
+ [MMC_TIMING_UHS_SDR12] = {"ti,otap-del-sel-sdr12", MMC_CAP_UHS_SDR12},
+ [MMC_TIMING_UHS_SDR25] = {"ti,otap-del-sel-sdr25", MMC_CAP_UHS_SDR25},
+ [MMC_TIMING_UHS_SDR50] = {"ti,otap-del-sel-sdr50", MMC_CAP_UHS_SDR50},
+ [MMC_TIMING_UHS_SDR104] = {"ti,otap-del-sel-sdr104",
+ MMC_CAP_UHS_SDR104},
+ [MMC_TIMING_UHS_DDR50] = {"ti,otap-del-sel-ddr50", MMC_CAP_UHS_DDR50},
+ [MMC_TIMING_MMC_DDR52] = {"ti,otap-del-sel-ddr52", MMC_CAP_DDR},
+ [MMC_TIMING_MMC_HS200] = {"ti,otap-del-sel-hs200", MMC_CAP2_HS200},
+ [MMC_TIMING_MMC_HS400] = {"ti,otap-del-sel-hs400", MMC_CAP2_HS400},
+};
+
+static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
@@ -106,6 +127,73 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
u32 mask, val;
int ret;
+ if (sdhci_am654->flags & FREQSEL_2_BIT) {
+ switch (clock) {
+ case 200000000:
+ sel50 = 0;
+ sel100 = 0;
+ break;
+ case 100000000:
+ sel50 = 0;
+ sel100 = 1;
+ break;
+ default:
+ sel50 = 1;
+ sel100 = 0;
+ }
+
+ /* Configure PHY DLL frequency */
+ mask = SEL50_MASK | SEL100_MASK;
+ val = (sel50 << SEL50_SHIFT) | (sel100 << SEL100_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
+
+ } else {
+ switch (clock) {
+ case 200000000:
+ freqsel = 0x0;
+ break;
+ default:
+ freqsel = 0x4;
+ }
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL5, FREQSEL_MASK,
+ freqsel << FREQSEL_SHIFT);
+ }
+ /* Configure DLL TRIM */
+ mask = DLL_TRIM_ICP_MASK;
+ val = sdhci_am654->trm_icp << DLL_TRIM_ICP_SHIFT;
+
+ /* Configure DLL driver strength */
+ mask |= DR_TY_MASK;
+ val |= sdhci_am654->drv_strength << DR_TY_SHIFT;
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1, mask, val);
+
+ /* Enable DLL */
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK,
+ 0x1 << ENDLL_SHIFT);
+ /*
+ * Poll for DLL ready. Use a one second timeout.
+ * Works in all experiments done so far
+ */
+ ret = regmap_read_poll_timeout(sdhci_am654->base, PHY_STAT1, val,
+ val & DLLRDY_MASK, 1000, 1000000);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "DLL failed to relock\n");
+ return;
+ }
+
+ sdhci_am654->dll_on = true;
+}
+
+static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ unsigned char timing = host->mmc->ios.timing;
+ u32 otap_del_sel;
+ u32 otap_del_ena;
+ u32 mask, val;
+
if (sdhci_am654->dll_on) {
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
@@ -116,80 +204,31 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock > CLOCK_TOO_SLOW_HZ) {
/* Setup DLL Output TAP delay */
+ if (sdhci_am654->legacy_otapdly)
+ otap_del_sel = sdhci_am654->otap_del_sel[0];
+ else
+ otap_del_sel = sdhci_am654->otap_del_sel[timing];
+
+ otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
+
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
- val = (1 << OTAPDLYENA_SHIFT) |
- (sdhci_am654->otap_del_sel << OTAPDLYSEL_SHIFT);
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ val = (otap_del_ena << OTAPDLYENA_SHIFT) |
+ (otap_del_sel << OTAPDLYSEL_SHIFT);
+
/* Write to STRBSEL for HS400 speed mode */
- if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ if (timing == MMC_TIMING_MMC_HS400) {
if (sdhci_am654->flags & STRBSEL_4_BIT)
- mask = STRBSEL_4BIT_MASK;
+ mask |= STRBSEL_4BIT_MASK;
else
- mask = STRBSEL_8BIT_MASK;
+ mask |= STRBSEL_8BIT_MASK;
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask,
- sdhci_am654->strb_sel <<
- STRBSEL_SHIFT);
+ val |= sdhci_am654->strb_sel << STRBSEL_SHIFT;
}
- if (sdhci_am654->flags & FREQSEL_2_BIT) {
- switch (clock) {
- case 200000000:
- sel50 = 0;
- sel100 = 0;
- break;
- case 100000000:
- sel50 = 0;
- sel100 = 1;
- break;
- default:
- sel50 = 1;
- sel100 = 0;
- }
-
- /* Configure PHY DLL frequency */
- mask = SEL50_MASK | SEL100_MASK;
- val = (sel50 << SEL50_SHIFT) | (sel100 << SEL100_SHIFT);
- regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask,
- val);
- } else {
- switch (clock) {
- case 200000000:
- freqsel = 0x0;
- break;
- default:
- freqsel = 0x4;
- }
-
- regmap_update_bits(sdhci_am654->base, PHY_CTRL5,
- FREQSEL_MASK,
- freqsel << FREQSEL_SHIFT);
- }
-
- /* Configure DLL TRIM */
- mask = DLL_TRIM_ICP_MASK;
- val = sdhci_am654->trm_icp << DLL_TRIM_ICP_SHIFT;
-
- /* Configure DLL driver strength */
- mask |= DR_TY_MASK;
- val |= sdhci_am654->drv_strength << DR_TY_SHIFT;
- regmap_update_bits(sdhci_am654->base, PHY_CTRL1, mask, val);
- /* Enable DLL */
- regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK,
- 0x1 << ENDLL_SHIFT);
- /*
- * Poll for DLL ready. Use a one second timeout.
- * Works in all experiments done so far
- */
- ret = regmap_read_poll_timeout(sdhci_am654->base, PHY_STAT1,
- val, val & DLLRDY_MASK, 1000,
- 1000000);
- if (ret) {
- dev_err(mmc_dev(host->mmc), "DLL failed to relock\n");
- return;
- }
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
- sdhci_am654->dll_on = true;
+ if (timing > MMC_TIMING_UHS_SDR25)
+ sdhci_am654_setup_dll(host, clock);
}
}
@@ -198,27 +237,24 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- int val, mask;
+ unsigned char timing = host->mmc->ios.timing;
+ u32 otap_del_sel;
+ u32 mask, val;
+
+ /* Setup DLL Output TAP delay */
+ if (sdhci_am654->legacy_otapdly)
+ otap_del_sel = sdhci_am654->otap_del_sel[0];
+ else
+ otap_del_sel = sdhci_am654->otap_del_sel[timing];
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
- val = (1 << OTAPDLYENA_SHIFT) |
- (sdhci_am654->otap_del_sel << OTAPDLYSEL_SHIFT);
+ val = (0x1 << OTAPDLYENA_SHIFT) |
+ (otap_del_sel << OTAPDLYSEL_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
sdhci_set_clock(host, clock);
}
-static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
-{
- if (!IS_ERR(host->mmc->supply.vmmc)) {
- struct mmc_host *mmc = host->mmc;
-
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
- }
- sdhci_set_power_noreg(host, mode, vdd);
-}
-
static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
{
unsigned char timing = host->mmc->ios.timing;
@@ -274,7 +310,7 @@ static struct sdhci_ops sdhci_am654_ops = {
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.set_bus_width = sdhci_set_bus_width,
- .set_power = sdhci_am654_set_power,
+ .set_power = sdhci_set_power_and_bus_voltage,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
@@ -297,7 +333,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.set_bus_width = sdhci_set_bus_width,
- .set_power = sdhci_am654_set_power,
+ .set_power = sdhci_set_power_and_bus_voltage,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
@@ -320,7 +356,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.set_bus_width = sdhci_set_bus_width,
- .set_power = sdhci_am654_set_power,
+ .set_power = sdhci_set_power_and_bus_voltage,
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
@@ -371,6 +407,55 @@ static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
return ret;
}
+static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ struct sdhci_am654_data *sdhci_am654)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int i;
+ int ret;
+
+ ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].binding,
+ &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]);
+ if (ret) {
+ /*
+ * ti,otap-del-sel-legacy is mandatory, look for old binding
+ * if not found.
+ */
+ ret = device_property_read_u32(dev, "ti,otap-del-sel",
+ &sdhci_am654->otap_del_sel[0]);
+ if (ret) {
+ dev_err(dev, "Couldn't find otap-del-sel\n");
+
+ return ret;
+ }
+
+ dev_info(dev, "Using legacy binding ti,otap-del-sel\n");
+ sdhci_am654->legacy_otapdly = true;
+
+ return 0;
+ }
+
+ for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
+
+ ret = device_property_read_u32(dev, td[i].binding,
+ &sdhci_am654->otap_del_sel[i]);
+ if (ret) {
+ dev_dbg(dev, "Couldn't find %s\n",
+ td[i].binding);
+ /*
+ * Remove the corresponding capability
+ * if an otap-del-sel value is not found
+ */
+ if (i <= MMC_TIMING_MMC_DDR52)
+ host->mmc->caps &= ~td[i].capability;
+ else
+ host->mmc->caps2 &= ~td[i].capability;
+ }
+ }
+
+ return 0;
+}
+
static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -419,6 +504,10 @@ static int sdhci_am654_init(struct sdhci_host *host)
if (ret)
goto err_cleanup_host;
+ ret = sdhci_am654_get_otap_delay(host, sdhci_am654);
+ if (ret)
+ goto err_cleanup_host;
+
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
@@ -437,11 +526,6 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
int drv_strength;
int ret;
- ret = device_property_read_u32(dev, "ti,otap-del-sel",
- &sdhci_am654->otap_del_sel);
- if (ret)
- return ret;
-
if (sdhci_am654->flags & DLL_PRESENT) {
ret = device_property_read_u32(dev, "ti,trm-icp",
&sdhci_am654->trm_icp);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index c5ba13fae399..b4cf10109162 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -176,20 +176,13 @@ struct tmio_mmc_host {
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*reset)(struct tmio_mmc_host *host);
void (*hw_reset)(struct tmio_mmc_host *host);
- void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
- bool (*check_scc_error)(struct tmio_mmc_host *host);
+ bool (*check_retune)(struct tmio_mmc_host *host);
/*
* Mandatory callback for tuning to occur which is optional for SDR50
* and mandatory for SDR104.
*/
- unsigned int (*init_tuning)(struct tmio_mmc_host *host);
- int (*select_tuning)(struct tmio_mmc_host *host);
-
- /* Tuning values: 1 for success, 0 for failure */
- DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
- unsigned int tap_num;
- unsigned long tap_set;
+ int (*execute_tuning)(struct tmio_mmc_host *host, u32 opcode);
void (*prepare_hs400_tuning)(struct tmio_mmc_host *host);
void (*hs400_downgrade)(struct tmio_mmc_host *host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 1e424bcdbd5f..9520bd94cf43 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -718,38 +718,13 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- int i, ret = 0;
-
- if (!host->init_tuning || !host->select_tuning)
- /* Tuning is not supported */
- goto out;
-
- host->tap_num = host->init_tuning(host);
- if (!host->tap_num)
- /* Tuning is not supported */
- goto out;
-
- if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
- dev_warn_once(&host->pdev->dev,
- "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
- goto out;
- }
-
- bitmap_zero(host->taps, host->tap_num * 2);
-
- /* Issue CMD19 twice for each tap */
- for (i = 0; i < 2 * host->tap_num; i++) {
- if (host->prepare_tuning)
- host->prepare_tuning(host, i % host->tap_num);
+ int ret;
- ret = mmc_send_tuning(mmc, opcode, NULL);
- if (ret == 0)
- set_bit(i, host->taps);
- }
+ if (!host->execute_tuning)
+ return 0;
- ret = host->select_tuning(host);
+ ret = host->execute_tuning(host, opcode);
-out:
if (ret < 0) {
dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
tmio_mmc_hw_reset(mmc);
@@ -843,8 +818,8 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
if (mrq->cmd->error || (mrq->data && mrq->data->error))
tmio_mmc_abort_dma(host);
- /* SCC error means retune, but executed command was still successful */
- if (host->check_scc_error && host->check_scc_error(host))
+ /* Error means retune, but executed command was still successful */
+ if (host->check_retune && host->check_retune(host))
mmc_retune_needed(host->mmc);
/* If SET_BLOCK_COUNT, continue with main command */
@@ -1022,34 +997,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static int tmio_mmc_prepare_hs400_tuning(struct mmc_host *mmc,
- struct mmc_ios *ios)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- if (host->prepare_hs400_tuning)
- host->prepare_hs400_tuning(host);
-
- return 0;
-}
-
-static void tmio_mmc_hs400_downgrade(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- if (host->hs400_downgrade)
- host->hs400_downgrade(host);
-}
-
-static void tmio_mmc_hs400_complete(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- if (host->hs400_complete)
- host->hs400_complete(host);
-}
-
-static const struct mmc_host_ops tmio_mmc_ops = {
+static struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -1058,9 +1006,6 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.multi_io_quirk = tmio_multi_io_quirk,
.hw_reset = tmio_mmc_hw_reset,
.execute_tuning = tmio_mmc_execute_tuning,
- .prepare_hs400_tuning = tmio_mmc_prepare_hs400_tuning,
- .hs400_downgrade = tmio_mmc_hs400_downgrade,
- .hs400_complete = tmio_mmc_hs400_complete,
};
static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
@@ -1325,11 +1270,6 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
-static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
-{
- return host->tap_num && mmc_can_retune(host->mmc);
-}
-
int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
@@ -1346,8 +1286,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
tmio_mmc_enable_dma(host, true);
- if (tmio_mmc_can_retune(host) && host->select_tuning(host))
- dev_warn(&host->pdev->dev, "Tuning selection failed\n");
+ mmc_retune_needed(host->mmc);
return 0;
}
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 6ced1b7f642f..739cf63ef6e2 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -95,7 +95,7 @@ struct sd_response_header {
u8 port_number;
u8 command_type;
u8 command_index;
- u8 command_response[0];
+ u8 command_response[];
} __packed;
struct sd_status_header {
@@ -1363,7 +1363,7 @@ static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
int retval;
for (n = 0; n < sdio_funcs; n++) {
struct sdio_func *sf = card->sdio_func[n];
- l += snprintf(vub300->vub_name + l,
+ l += scnprintf(vub300->vub_name + l,
sizeof(vub300->vub_name) - l, "_%04X%04X",
sf->vendor, sf->device);
}
diff --git a/drivers/most/Kconfig b/drivers/most/Kconfig
new file mode 100644
index 000000000000..58d7999170a7
--- /dev/null
+++ b/drivers/most/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig MOST
+ tristate "MOST support"
+ depends on HAS_DMA && CONFIGFS_FS
+ default n
+ help
+ Say Y here if you want to enable MOST support.
+ This driver needs at least one additional component to enable the
+ desired access from userspace (e.g. character devices) and one that
+ matches the network controller's hardware interface (e.g. USB).
+
+ To compile this driver as a module, choose M here: the
+ module will be called most_core.
+
+ If in doubt, say N here.
diff --git a/drivers/most/Makefile b/drivers/most/Makefile
new file mode 100644
index 000000000000..e810cd3a47ee
--- /dev/null
+++ b/drivers/most/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MOST) += most_core.o
+most_core-y := core.o \
+ configfs.o
diff --git a/drivers/staging/most/configfs.c b/drivers/most/configfs.c
index 9a961222f458..27b0c923597f 100644
--- a/drivers/staging/most/configfs.c
+++ b/drivers/most/configfs.c
@@ -10,8 +10,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/configfs.h>
-
-#include "most.h"
+#include <linux/most.h>
#define MAX_STRING_SIZE 80
diff --git a/drivers/staging/most/core.c b/drivers/most/core.c
index 0c4ae6920d77..f781c46cd4af 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/most/core.c
@@ -20,8 +20,7 @@
#include <linux/kthread.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
-
-#include "most.h"
+#include <linux/most.h>
#define MAX_CHANNELS 64
#define STRING_SIZE 80
@@ -472,7 +471,7 @@ static int print_links(struct device *dev, void *data)
list_for_each_entry(c, &iface->p->channel_list, list) {
if (c->pipe0.comp) {
- offs += snprintf(buf + offs,
+ offs += scnprintf(buf + offs,
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe0.comp->name,
@@ -480,7 +479,7 @@ static int print_links(struct device *dev, void *data)
dev_name(&c->dev));
}
if (c->pipe1.comp) {
- offs += snprintf(buf + offs,
+ offs += scnprintf(buf + offs,
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe1.comp->name,
@@ -519,7 +518,7 @@ static ssize_t components_show(struct device_driver *drv, char *buf)
int offs = 0;
list_for_each_entry(comp, &comp_list, list) {
- offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
+ offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
comp->name);
}
return offs;
@@ -1484,7 +1483,7 @@ static void __exit most_exit(void)
ida_destroy(&mdev_id);
}
-module_init(most_init);
+subsys_initcall(most_init);
module_exit(most_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 00a79489067c..142c0f9485fe 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -834,7 +834,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
/* Someone else might have been playing with it. */
return -EAGAIN;
}
- /* Fall through */
+ fallthrough;
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
@@ -907,7 +907,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* Fall through */
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 04b383bc3947..a1f3e1031c3d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -966,8 +966,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* fall through */
-
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -2935,7 +2934,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
- /* fall through */
+ fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 54edae63b92d..270322bca221 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -324,8 +324,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
case FL_JEDEC_QUERY:
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, cmd_addr);
if (map_word_andequal(map, status, status_OK, status_OK)) {
@@ -462,8 +461,7 @@ static int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
#endif
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -756,8 +754,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -998,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
- /* Fall through */
+ fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
@@ -1054,8 +1051,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1201,8 +1197,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e2d4db05aeb3..99b7986002f0 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -109,13 +109,13 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
case 8:
onecmd |= (onecmd << (chip_mode * 32));
#endif
- /* fall through */
+ fallthrough;
case 4:
onecmd |= (onecmd << (chip_mode * 16));
- /* fall through */
+ fallthrough;
case 2:
onecmd |= (onecmd << (chip_mode * 8));
- /* fall through */
+ fallthrough;
case 1:
;
}
@@ -165,13 +165,13 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
case 8:
res |= (onestat >> (chip_mode * 32));
#endif
- /* fall through */
+ fallthrough;
case 4:
res |= (onestat >> (chip_mode * 16));
- /* fall through */
+ fallthrough;
case 2:
res |= (onestat >> (chip_mode * 8));
- /* fall through */
+ fallthrough;
case 1:
;
}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 36aa082f6db0..c08721b11642 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -329,10 +329,10 @@ static int ustrtoul(const char *cp, char **endp, unsigned int base)
switch (**endp) {
case 'G' :
result *= 1024;
- /* fall through */
+ fallthrough;
case 'M':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'K':
case 'k':
result *= 1024;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 931e5c2481b5..087b5e86d1bf 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -148,10 +148,10 @@ static int parse_num64(uint64_t *num64, char *token)
switch (token[len - 2]) {
case 'G':
shift += 10;
- /* fall through */
+ fallthrough;
case 'M':
shift += 10;
- /* fall through */
+ fallthrough;
case 'k':
shift += 10;
token[len - 2] = 0;
@@ -243,22 +243,25 @@ static int phram_setup(const char *val)
ret = parse_num64(&start, token[1]);
if (ret) {
- kfree(name);
parse_err("illegal start address\n");
+ goto error;
}
ret = parse_num64(&len, token[2]);
if (ret) {
- kfree(name);
parse_err("illegal device length\n");
+ goto error;
}
ret = register_device(name, start, len);
- if (!ret)
- pr_info("%s device: %#llx at %#llx\n", name, len, start);
- else
- kfree(name);
+ if (ret)
+ goto error;
+
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
+ return 0;
+error:
+ kfree(name);
return ret;
}
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index 08d543b124cd..f350a0809f88 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -11,6 +11,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -57,8 +58,10 @@ static const struct hyperbus_ops am654_hbmc_ops = {
static int am654_hbmc_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct am654_hbmc_priv *priv;
+ struct resource res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -67,6 +70,10 @@ static int am654_hbmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
if (of_property_read_bool(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
@@ -88,6 +95,11 @@ static int am654_hbmc_probe(struct platform_device *pdev)
goto disable_pm;
}
+ priv->hbdev.map.size = resource_size(&res);
+ priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(priv->hbdev.map.virt))
+ return PTR_ERR(priv->hbdev.map.virt);
+
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
priv->hbdev.ctlr = &priv->ctlr;
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 6af9ea34117d..32685e8dd278 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -10,7 +10,6 @@
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/types.h>
static struct hyperbus_device *map_to_hbdev(struct map_info *map)
@@ -62,7 +61,6 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
struct hyperbus_ctlr *ctlr;
struct device_node *np;
struct map_info *map;
- struct resource res;
struct device *dev;
int ret;
@@ -73,22 +71,15 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
np = hbdev->np;
ctlr = hbdev->ctlr;
- if (!of_device_is_compatible(np, "cypress,hyperflash"))
+ if (!of_device_is_compatible(np, "cypress,hyperflash")) {
+ dev_err(ctlr->dev, "\"cypress,hyperflash\" compatible missing\n");
return -ENODEV;
+ }
hbdev->memtype = HYPERFLASH;
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- return ret;
-
dev = ctlr->dev;
map = &hbdev->map;
- map->size = resource_size(&res);
- map->virt = devm_ioremap_resource(dev, &res);
- if (IS_ERR(map->virt))
- return PTR_ERR(map->virt);
-
map->name = dev_name(dev);
map->bankwidth = 2;
map->device_node = np;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 54b176d4319f..af16d3485de0 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -130,7 +130,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
+ " BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = 0x%x\n"
" PercentUsed = %d\n",
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 1efc643c9871..fb1cbc9a2870 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -68,7 +68,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
GFP_KERNEL);
if (!shared) {
- kfree(lpddr);
kfree(mtd);
return NULL;
}
@@ -305,8 +304,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* fall through */
-
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 47602af4ee34..d3d4e987c163 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -34,7 +34,7 @@ struct sa_subdev_info {
struct sa_info {
struct mtd_info *mtd;
int num_subdev;
- struct sa_subdev_info subdev[0];
+ struct sa_subdev_info subdev[];
};
static DEFINE_SPINLOCK(sa1100_vpp_lock);
@@ -81,8 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
default:
printk(KERN_WARNING "SA1100 flash: unknown base address "
"0x%08lx, assuming CS0\n", phys);
- /* Fall through */
-
+ fallthrough;
case SA1100_CS0_PHYS:
subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
break;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index c06b5322d470..078e0f67377d 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -294,12 +294,13 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ int ret;
mutex_lock(&mtdblk->cache_mutex);
- write_cached_data(mtdblk);
+ ret = write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
mtd_sync(dev->mtd);
- return 0;
+ return ret;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index b841008a9eb7..c5935b2f9cd1 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -349,6 +349,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops = {};
uint32_t retlen;
@@ -360,7 +361,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->_write_oob)
+ if (!master->_write_oob)
return -EOPNOTSUPP;
ops.ooblen = length;
@@ -586,6 +587,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
@@ -597,9 +599,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_data = (const void __user *)(uintptr_t)req.usr_data;
usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
- if (!mtd->_write_oob)
+ if (!master->_write_oob)
return -EOPNOTSUPP;
-
ops.mode = req.mode;
ops.len = (size_t)req.len;
ops.ooblen = (size_t)req.ooblen;
@@ -635,6 +636,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ struct mtd_info *master = mtd_get_master(mtd);
void __user *argp = (void __user *)arg;
int ret = 0;
struct mtd_info_user info;
@@ -824,7 +826,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_oobinfo oi;
- if (!mtd->ooblayout)
+ if (!master->ooblayout)
return -EOPNOTSUPP;
ret = get_oobinfo(mtd, &oi);
@@ -918,7 +920,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_ecclayout_user *usrlay;
- if (!mtd->ooblayout)
+ if (!master->ooblayout)
return -EOPNOTSUPP;
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5fac4355b9c2..29d41003d6e0 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -456,13 +456,14 @@ static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
struct mtd_pairing_info *info)
{
- int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+ int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
if (wunit < 0 || wunit >= npairs)
return -EINVAL;
- if (mtd->pairing && mtd->pairing->get_info)
- return mtd->pairing->get_info(mtd, wunit, info);
+ if (master->pairing && master->pairing->get_info)
+ return master->pairing->get_info(master, wunit, info);
info->group = 0;
info->pair = wunit;
@@ -498,15 +499,16 @@ EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
const struct mtd_pairing_info *info)
{
- int ngroups = mtd_pairing_groups(mtd);
- int npairs = mtd_wunit_per_eb(mtd) / ngroups;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ngroups = mtd_pairing_groups(master);
+ int npairs = mtd_wunit_per_eb(master) / ngroups;
if (!info || info->pair < 0 || info->pair >= npairs ||
info->group < 0 || info->group >= ngroups)
return -EINVAL;
- if (mtd->pairing && mtd->pairing->get_wunit)
- return mtd->pairing->get_wunit(mtd, info);
+ if (master->pairing && master->pairing->get_wunit)
+ return mtd->pairing->get_wunit(master, info);
return info->pair;
}
@@ -524,10 +526,12 @@ EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
*/
int mtd_pairing_groups(struct mtd_info *mtd)
{
- if (!mtd->pairing || !mtd->pairing->ngroups)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->pairing || !master->pairing->ngroups)
return 1;
- return mtd->pairing->ngroups;
+ return master->pairing->ngroups;
}
EXPORT_SYMBOL_GPL(mtd_pairing_groups);
@@ -551,7 +555,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.id = -1;
config.dev = &mtd->dev;
- config.name = mtd->name;
+ config.name = dev_name(&mtd->dev);
config.owner = THIS_MODULE;
config.reg_read = mtd_nvmem_reg_read;
config.size = mtd->size;
@@ -587,6 +591,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
int add_mtd_device(struct mtd_info *mtd)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_notifier *not;
int i, error;
@@ -608,7 +613,7 @@ int add_mtd_device(struct mtd_info *mtd)
(mtd->_read && mtd->_read_oob)))
return -EINVAL;
- if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ if (WARN_ON((!mtd->erasesize || !master->_erase) &&
!(mtd->flags & MTD_NO_ERASE)))
return -EINVAL;
@@ -765,7 +770,8 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd)
pr_debug("mtd device won't show a device symlink in sysfs\n");
}
- mtd->orig_flags = mtd->flags;
+ INIT_LIST_HEAD(&mtd->partitions);
+ mutex_init(&mtd->master.partitions_lock);
}
/**
@@ -971,20 +977,26 @@ EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int err;
- if (!try_module_get(mtd->owner))
+ if (!try_module_get(master->owner))
return -ENODEV;
- if (mtd->_get_device) {
- err = mtd->_get_device(mtd);
+ if (master->_get_device) {
+ err = master->_get_device(mtd);
if (err) {
- module_put(mtd->owner);
+ module_put(master->owner);
return err;
}
}
- mtd->usecount++;
+
+ while (mtd->parent) {
+ mtd->usecount++;
+ mtd = mtd->parent;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -1038,13 +1050,18 @@ EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
- --mtd->usecount;
- BUG_ON(mtd->usecount < 0);
+ struct mtd_info *master = mtd_get_master(mtd);
- if (mtd->_put_device)
- mtd->_put_device(mtd);
+ while (mtd->parent) {
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+ mtd = mtd->parent;
+ }
+
+ if (master->_put_device)
+ master->_put_device(master);
- module_put(mtd->owner);
+ module_put(master->owner);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
@@ -1055,9 +1072,13 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
*/
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+ u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
+ int ret;
+
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
- if (!mtd->erasesize || !mtd->_erase)
+ if (!mtd->erasesize || !master->_erase)
return -ENOTSUPP;
if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
@@ -1069,7 +1090,14 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
return 0;
ledtrig_mtd_activity();
- return mtd->_erase(mtd, instr);
+
+ instr->addr += mst_ofs;
+ ret = master->_erase(master, instr);
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= mst_ofs;
+
+ instr->addr -= mst_ofs;
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
@@ -1079,30 +1107,36 @@ EXPORT_SYMBOL_GPL(mtd_erase);
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
*virt = NULL;
if (phys)
*phys = 0;
- if (!mtd->_point)
+ if (!master->_point)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_point(mtd, from, len, retlen, virt, phys);
+
+ from = mtd_get_master_ofs(mtd, from);
+ return master->_point(master, from, len, retlen, virt, phys);
}
EXPORT_SYMBOL_GPL(mtd_point);
/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
- if (!mtd->_unpoint)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unpoint)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_unpoint(mtd, from, len);
+ return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
}
EXPORT_SYMBOL_GPL(mtd_unpoint);
@@ -1129,6 +1163,25 @@ unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
+ const struct mtd_ecc_stats *old_stats)
+{
+ struct mtd_ecc_stats diff;
+
+ if (master == mtd)
+ return;
+
+ diff = master->ecc_stats;
+ diff.failed -= old_stats->failed;
+ diff.corrected -= old_stats->corrected;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.failed += diff.failed;
+ mtd->ecc_stats.corrected += diff.corrected;
+ mtd = mtd->parent;
+ }
+}
+
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
@@ -1171,8 +1224,10 @@ EXPORT_SYMBOL_GPL(mtd_write);
int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_panic_write)
+ if (!master->_panic_write)
return -EOPNOTSUPP;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
@@ -1183,7 +1238,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
if (!mtd->oops_panic_write)
mtd->oops_panic_write = true;
- return mtd->_panic_write(mtd, to, len, retlen, buf);
+ return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
+ retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
@@ -1222,7 +1278,10 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_ecc_stats old_stats = master->ecc_stats;
int ret_code;
+
ops->retlen = ops->oobretlen = 0;
ret_code = mtd_check_oob_ops(mtd, from, ops);
@@ -1232,14 +1291,17 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_read */
- if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
+ if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
- if (mtd->_read_oob)
- ret_code = mtd->_read_oob(mtd, from, ops);
+ from = mtd_get_master_ofs(mtd, from);
+ if (master->_read_oob)
+ ret_code = master->_read_oob(master, from, ops);
else
- ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
- ops->datbuf);
+ ret_code = master->_read(master, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ mtd_update_ecc_stats(mtd, master, &old_stats);
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1258,6 +1320,7 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int ret;
ops->retlen = ops->oobretlen = 0;
@@ -1272,14 +1335,16 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_write */
- if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+ if (!master->_write_oob && (!master->_write || ops->oobbuf))
return -EOPNOTSUPP;
- if (mtd->_write_oob)
- return mtd->_write_oob(mtd, to, ops);
+ to = mtd_get_master_ofs(mtd, to);
+
+ if (master->_write_oob)
+ return master->_write_oob(master, to, ops);
else
- return mtd->_write(mtd, to, ops->len, &ops->retlen,
- ops->datbuf);
+ return master->_write(master, to, ops->len, &ops->retlen,
+ ops->datbuf);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
@@ -1302,15 +1367,17 @@ EXPORT_SYMBOL_GPL(mtd_write_oob);
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
memset(oobecc, 0, sizeof(*oobecc));
- if (!mtd || section < 0)
+ if (!master || section < 0)
return -EINVAL;
- if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+ if (!master->ooblayout || !master->ooblayout->ecc)
return -ENOTSUPP;
- return mtd->ooblayout->ecc(mtd, section, oobecc);
+ return master->ooblayout->ecc(master, section, oobecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
@@ -1334,15 +1401,17 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
int mtd_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobfree)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
memset(oobfree, 0, sizeof(*oobfree));
- if (!mtd || section < 0)
+ if (!master || section < 0)
return -EINVAL;
- if (!mtd->ooblayout || !mtd->ooblayout->free)
+ if (!master->ooblayout || !master->ooblayout->free)
return -ENOTSUPP;
- return mtd->ooblayout->free(mtd, section, oobfree);
+ return master->ooblayout->free(master, section, oobfree);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
@@ -1651,60 +1720,69 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
- if (!mtd->_get_fact_prot_info)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_fact_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
+ return master->_get_fact_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_read_fact_prot_reg)
+ if (!master->_read_fact_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+ return master->_read_fact_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
- if (!mtd->_get_user_prot_info)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_user_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_user_prot_info(mtd, len, retlen, buf);
+ return master->_get_user_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_read_user_prot_reg)
+ if (!master->_read_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+ return master->_read_user_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int ret;
*retlen = 0;
- if (!mtd->_write_user_prot_reg)
+ if (!master->_write_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
if (ret)
return ret;
@@ -1718,80 +1796,105 @@ EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
{
- if (!mtd->_lock_user_prot_reg)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_lock_user_prot_reg(mtd, from, len);
+ return master->_lock_user_prot_reg(master, from, len);
}
EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
/* Chip-supported device locking */
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_lock)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_lock(mtd, ofs, len);
+ return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_lock);
int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_unlock)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unlock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_unlock(mtd, ofs, len);
+ return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_unlock);
int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_is_locked)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_is_locked)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_is_locked(mtd, ofs, len);
+ return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_is_locked);
int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
- if (!mtd->_block_isreserved)
+ if (!master->_block_isreserved)
return 0;
- return mtd->_block_isreserved(mtd, ofs);
+ return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isreserved);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
- if (!mtd->_block_isbad)
+ if (!master->_block_isbad)
return 0;
- return mtd->_block_isbad(mtd, ofs);
+ return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- if (!mtd->_block_markbad)
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (!master->_block_markbad)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return mtd->_block_markbad(mtd, ofs);
+
+ ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+ if (ret)
+ return ret;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.badblocks++;
+ mtd = mtd->parent;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mtd_block_markbad);
@@ -1841,12 +1944,17 @@ static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- if (!mtd->_writev)
+
+ if (!master->_writev)
return default_mtd_writev(mtd, vecs, count, to, retlen);
- return mtd->_writev(mtd, vecs, count, to, retlen);
+
+ return master->_writev(master, vecs, count,
+ mtd_get_master_ofs(mtd, to), retlen);
}
EXPORT_SYMBOL_GPL(mtd_writev);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 7328c066c5ba..3f6025684f58 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -20,339 +20,52 @@
#include "mtdcore.h"
-/* Our partition linked list */
-static LIST_HEAD(mtd_partitions);
-static DEFINE_MUTEX(mtd_partitions_mutex);
-
-/**
- * struct mtd_part - our partition node structure
- *
- * @mtd: struct holding partition details
- * @parent: parent mtd - flash device or another partition
- * @offset: partition offset relative to the *flash device*
- */
-struct mtd_part {
- struct mtd_info mtd;
- struct mtd_info *parent;
- uint64_t offset;
- struct list_head list;
-};
-
-/*
- * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
- * the pointer to that structure.
- */
-static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
-{
- return container_of(mtd, struct mtd_part, mtd);
-}
-
-static u64 part_absolute_offset(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- if (!mtd_is_partition(mtd))
- return 0;
-
- return part_absolute_offset(part->parent) + part->offset;
-}
-
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
*/
-static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- struct mtd_ecc_stats stats;
- int res;
-
- stats = part->parent->ecc_stats;
- res = part->parent->_read(part->parent, from + part->offset, len,
- retlen, buf);
- if (unlikely(mtd_is_eccerr(res)))
- mtd->ecc_stats.failed +=
- part->parent->ecc_stats.failed - stats.failed;
- else
- mtd->ecc_stats.corrected +=
- part->parent->ecc_stats.corrected - stats.corrected;
- return res;
-}
-
-static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_point(part->parent, from + part->offset, len,
- retlen, virt, phys);
-}
-
-static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_unpoint(part->parent, from + part->offset, len);
-}
-
-static int part_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- struct mtd_ecc_stats stats;
- int res;
-
- stats = part->parent->ecc_stats;
- res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(mtd_is_eccerr(res)))
- mtd->ecc_stats.failed +=
- part->parent->ecc_stats.failed - stats.failed;
- else
- mtd->ecc_stats.corrected +=
- part->parent->ecc_stats.corrected - stats.corrected;
- return res;
-}
-
-static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_read_user_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
- size_t *retlen, struct otp_info *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_user_prot_info(part->parent, len, retlen,
- buf);
-}
-
-static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_read_fact_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
- size_t *retlen, struct otp_info *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_fact_prot_info(part->parent, len, retlen,
- buf);
-}
-
-static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_write(part->parent, to + part->offset, len,
- retlen, buf);
-}
-
-static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_panic_write(part->parent, to + part->offset, len,
- retlen, buf);
-}
-
-static int part_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_write_oob(part->parent, to + part->offset, ops);
-}
-
-static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_write_user_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_lock_user_prot_reg(part->parent, from, len);
-}
-
-static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_writev(part->parent, vecs, count,
- to + part->offset, retlen);
-}
-
-static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- int ret;
-
- instr->addr += part->offset;
- ret = part->parent->_erase(part->parent, instr);
- if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
- instr->fail_addr -= part->offset;
- instr->addr -= part->offset;
-
- return ret;
-}
-
-static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_lock(part->parent, ofs + part->offset, len);
-}
-
-static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_unlock(part->parent, ofs + part->offset, len);
-}
-
-static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_is_locked(part->parent, ofs + part->offset, len);
-}
-
-static void part_sync(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_sync(part->parent);
-}
-
-static int part_suspend(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_suspend(part->parent);
-}
-
-static void part_resume(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_resume(part->parent);
-}
-
-static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- ofs += part->offset;
- return part->parent->_block_isreserved(part->parent, ofs);
-}
-
-static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- ofs += part->offset;
- return part->parent->_block_isbad(part->parent, ofs);
-}
-
-static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- int res;
-
- ofs += part->offset;
- res = part->parent->_block_markbad(part->parent, ofs);
- if (!res)
- mtd->ecc_stats.badblocks++;
- return res;
-}
-
-static int part_get_device(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_device(part->parent);
-}
-
-static void part_put_device(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_put_device(part->parent);
-}
-
-static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return mtd_ooblayout_ecc(part->parent, section, oobregion);
-}
-
-static int part_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return mtd_ooblayout_free(part->parent, section, oobregion);
-}
-
-static const struct mtd_ooblayout_ops part_ooblayout_ops = {
- .ecc = part_ooblayout_ecc,
- .free = part_ooblayout_free,
-};
-
-static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_max_bad_blocks(part->parent,
- ofs + part->offset, len);
-}
-
-static inline void free_partition(struct mtd_part *p)
+static inline void free_partition(struct mtd_info *mtd)
{
- kfree(p->mtd.name);
- kfree(p);
+ kfree(mtd->name);
+ kfree(mtd);
}
-static struct mtd_part *allocate_partition(struct mtd_info *parent,
- const struct mtd_partition *part, int partno,
- uint64_t cur_offset)
+static struct mtd_info *allocate_partition(struct mtd_info *parent,
+ const struct mtd_partition *part,
+ int partno, uint64_t cur_offset)
{
int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
parent->erasesize;
- struct mtd_part *slave;
+ struct mtd_info *child, *master = mtd_get_master(parent);
u32 remainder;
char *name;
u64 tmp;
/* allocate the partition structure */
- slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
name = kstrdup(part->name, GFP_KERNEL);
- if (!name || !slave) {
+ if (!name || !child) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
parent->name);
kfree(name);
- kfree(slave);
+ kfree(child);
return ERR_PTR(-ENOMEM);
}
/* set up the MTD object for this partition */
- slave->mtd.type = parent->type;
- slave->mtd.flags = parent->orig_flags & ~part->mask_flags;
- slave->mtd.orig_flags = slave->mtd.flags;
- slave->mtd.size = part->size;
- slave->mtd.writesize = parent->writesize;
- slave->mtd.writebufsize = parent->writebufsize;
- slave->mtd.oobsize = parent->oobsize;
- slave->mtd.oobavail = parent->oobavail;
- slave->mtd.subpage_sft = parent->subpage_sft;
- slave->mtd.pairing = parent->pairing;
-
- slave->mtd.name = name;
- slave->mtd.owner = parent->owner;
+ child->type = parent->type;
+ child->part.flags = parent->flags & ~part->mask_flags;
+ child->flags = child->part.flags;
+ child->size = part->size;
+ child->writesize = parent->writesize;
+ child->writebufsize = parent->writebufsize;
+ child->oobsize = parent->oobsize;
+ child->oobavail = parent->oobavail;
+ child->subpage_sft = parent->subpage_sft;
+
+ child->name = name;
+ child->owner = parent->owner;
/* NOTE: Historically, we didn't arrange MTDs as a tree out of
* concern for showing the same data in multiple partitions.
@@ -360,134 +73,76 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
* so the MTD_PARTITIONED_MASTER option allows that. The master
* will have device nodes etc only if this is set, so make the
* parent conditional on that option. Note, this is a way to
- * distinguish between the master and the partition in sysfs.
+ * distinguish between the parent and its partitions in sysfs.
*/
- slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
- &parent->dev :
- parent->dev.parent;
- slave->mtd.dev.of_node = part->of_node;
-
- if (parent->_read)
- slave->mtd._read = part_read;
- if (parent->_write)
- slave->mtd._write = part_write;
-
- if (parent->_panic_write)
- slave->mtd._panic_write = part_panic_write;
-
- if (parent->_point && parent->_unpoint) {
- slave->mtd._point = part_point;
- slave->mtd._unpoint = part_unpoint;
- }
-
- if (parent->_read_oob)
- slave->mtd._read_oob = part_read_oob;
- if (parent->_write_oob)
- slave->mtd._write_oob = part_write_oob;
- if (parent->_read_user_prot_reg)
- slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
- if (parent->_read_fact_prot_reg)
- slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
- if (parent->_write_user_prot_reg)
- slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
- if (parent->_lock_user_prot_reg)
- slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
- if (parent->_get_user_prot_info)
- slave->mtd._get_user_prot_info = part_get_user_prot_info;
- if (parent->_get_fact_prot_info)
- slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
- if (parent->_sync)
- slave->mtd._sync = part_sync;
- if (!partno && !parent->dev.class && parent->_suspend &&
- parent->_resume) {
- slave->mtd._suspend = part_suspend;
- slave->mtd._resume = part_resume;
- }
- if (parent->_writev)
- slave->mtd._writev = part_writev;
- if (parent->_lock)
- slave->mtd._lock = part_lock;
- if (parent->_unlock)
- slave->mtd._unlock = part_unlock;
- if (parent->_is_locked)
- slave->mtd._is_locked = part_is_locked;
- if (parent->_block_isreserved)
- slave->mtd._block_isreserved = part_block_isreserved;
- if (parent->_block_isbad)
- slave->mtd._block_isbad = part_block_isbad;
- if (parent->_block_markbad)
- slave->mtd._block_markbad = part_block_markbad;
- if (parent->_max_bad_blocks)
- slave->mtd._max_bad_blocks = part_max_bad_blocks;
-
- if (parent->_get_device)
- slave->mtd._get_device = part_get_device;
- if (parent->_put_device)
- slave->mtd._put_device = part_put_device;
-
- slave->mtd._erase = part_erase;
- slave->parent = parent;
- slave->offset = part->offset;
-
- if (slave->offset == MTDPART_OFS_APPEND)
- slave->offset = cur_offset;
- if (slave->offset == MTDPART_OFS_NXTBLK) {
+ child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
+ &parent->dev : parent->dev.parent;
+ child->dev.of_node = part->of_node;
+ child->parent = parent;
+ child->part.offset = part->offset;
+ INIT_LIST_HEAD(&child->partitions);
+
+ if (child->part.offset == MTDPART_OFS_APPEND)
+ child->part.offset = cur_offset;
+ if (child->part.offset == MTDPART_OFS_NXTBLK) {
tmp = cur_offset;
- slave->offset = cur_offset;
+ child->part.offset = cur_offset;
remainder = do_div(tmp, wr_alignment);
if (remainder) {
- slave->offset += wr_alignment - remainder;
+ child->part.offset += wr_alignment - remainder;
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
- (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+ (unsigned long long)cur_offset,
+ child->part.offset);
}
}
- if (slave->offset == MTDPART_OFS_RETAIN) {
- slave->offset = cur_offset;
- if (parent->size - slave->offset >= slave->mtd.size) {
- slave->mtd.size = parent->size - slave->offset
- - slave->mtd.size;
+ if (child->part.offset == MTDPART_OFS_RETAIN) {
+ child->part.offset = cur_offset;
+ if (parent->size - child->part.offset >= child->size) {
+ child->size = parent->size - child->part.offset -
+ child->size;
} else {
printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
- part->name, parent->size - slave->offset,
- slave->mtd.size);
+ part->name, parent->size - child->part.offset,
+ child->size);
/* register to preserve ordering */
goto out_register;
}
}
- if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = parent->size - slave->offset;
+ if (child->size == MTDPART_SIZ_FULL)
+ child->size = parent->size - child->part.offset;
- printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
- (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
+ child->part.offset, child->part.offset + child->size,
+ child->name);
/* let's do some sanity checks */
- if (slave->offset >= parent->size) {
+ if (child->part.offset >= parent->size) {
/* let's register it anyway to preserve ordering */
- slave->offset = 0;
- slave->mtd.size = 0;
+ child->part.offset = 0;
+ child->size = 0;
/* Initialize ->erasesize to make add_mtd_device() happy. */
- slave->mtd.erasesize = parent->erasesize;
-
+ child->erasesize = parent->erasesize;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
}
- if (slave->offset + slave->mtd.size > parent->size) {
- slave->mtd.size = parent->size - slave->offset;
+ if (child->part.offset + child->size > parent->size) {
+ child->size = parent->size - child->part.offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
- part->name, parent->name, (unsigned long long)slave->mtd.size);
+ part->name, parent->name, child->size);
}
if (parent->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = parent->numeraseregions;
- u64 end = slave->offset + slave->mtd.size;
+ u64 end = child->part.offset + child->size;
struct mtd_erase_region_info *regions = parent->eraseregions;
/* Find the first erase regions which is part of this
* partition. */
- for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ for (i = 0; i < max && regions[i].offset <= child->part.offset;
+ i++)
;
/* The loop searched for the region _behind_ the first one */
if (i > 0)
@@ -495,70 +150,68 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
- if (slave->mtd.erasesize < regions[i].erasesize) {
- slave->mtd.erasesize = regions[i].erasesize;
- }
+ if (child->erasesize < regions[i].erasesize)
+ child->erasesize = regions[i].erasesize;
}
- BUG_ON(slave->mtd.erasesize == 0);
+ BUG_ON(child->erasesize == 0);
} else {
/* Single erase size */
- slave->mtd.erasesize = parent->erasesize;
+ child->erasesize = parent->erasesize;
}
/*
- * Slave erasesize might differ from the master one if the master
+ * Child erasesize might differ from the parent one if the parent
* exposes several regions with different erasesize. Adjust
* wr_alignment accordingly.
*/
- if (!(slave->mtd.flags & MTD_NO_ERASE))
- wr_alignment = slave->mtd.erasesize;
+ if (!(child->flags & MTD_NO_ERASE))
+ wr_alignment = child->erasesize;
- tmp = part_absolute_offset(parent) + slave->offset;
+ tmp = mtd_get_master_ofs(child, 0);
remainder = do_div(tmp, wr_alignment);
- if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
+ child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
part->name);
}
- tmp = part_absolute_offset(parent) + slave->mtd.size;
+ tmp = mtd_get_master_ofs(child, 0) + child->size;
remainder = do_div(tmp, wr_alignment);
- if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
+ child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
part->name);
}
- mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
- slave->mtd.ecc_step_size = parent->ecc_step_size;
- slave->mtd.ecc_strength = parent->ecc_strength;
- slave->mtd.bitflip_threshold = parent->bitflip_threshold;
+ child->ecc_step_size = parent->ecc_step_size;
+ child->ecc_strength = parent->ecc_strength;
+ child->bitflip_threshold = parent->bitflip_threshold;
- if (parent->_block_isbad) {
+ if (master->_block_isbad) {
uint64_t offs = 0;
- while (offs < slave->mtd.size) {
- if (mtd_block_isreserved(parent, offs + slave->offset))
- slave->mtd.ecc_stats.bbtblocks++;
- else if (mtd_block_isbad(parent, offs + slave->offset))
- slave->mtd.ecc_stats.badblocks++;
- offs += slave->mtd.erasesize;
+ while (offs < child->size) {
+ if (mtd_block_isreserved(child, offs))
+ child->ecc_stats.bbtblocks++;
+ else if (mtd_block_isbad(child, offs))
+ child->ecc_stats.badblocks++;
+ offs += child->erasesize;
}
}
out_register:
- return slave;
+ return child;
}
static ssize_t mtd_partition_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- struct mtd_part *part = mtd_to_part(mtd);
- return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset);
+
+ return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset);
}
static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
@@ -568,9 +221,9 @@ static const struct attribute *mtd_partition_attrs[] = {
NULL
};
-static int mtd_add_partition_attrs(struct mtd_part *new)
+static int mtd_add_partition_attrs(struct mtd_info *new)
{
- int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
+ int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
if (ret)
printk(KERN_WARNING
"mtd: failed to create partition attrs, err=%d\n", ret);
@@ -580,8 +233,9 @@ static int mtd_add_partition_attrs(struct mtd_part *new)
int mtd_add_partition(struct mtd_info *parent, const char *name,
long long offset, long long length)
{
+ struct mtd_info *master = mtd_get_master(parent);
struct mtd_partition part;
- struct mtd_part *new;
+ struct mtd_info *child;
int ret = 0;
/* the direct offset is expected */
@@ -600,28 +254,28 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
part.size = length;
part.offset = offset;
- new = allocate_partition(parent, &part, -1, offset);
- if (IS_ERR(new))
- return PTR_ERR(new);
+ child = allocate_partition(parent, &part, -1, offset);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- mutex_lock(&mtd_partitions_mutex);
- list_add(&new->list, &mtd_partitions);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(&new->mtd);
+ ret = add_mtd_device(child);
if (ret)
goto err_remove_part;
- mtd_add_partition_attrs(new);
+ mtd_add_partition_attrs(child);
return 0;
err_remove_part:
- mutex_lock(&mtd_partitions_mutex);
- list_del(&new->list);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
- free_partition(new);
+ free_partition(child);
return ret;
}
@@ -630,119 +284,142 @@ EXPORT_SYMBOL_GPL(mtd_add_partition);
/**
* __mtd_del_partition - delete MTD partition
*
- * @priv: internal MTD struct for partition to be deleted
+ * @priv: MTD structure to be deleted
*
* This function must be called with the partitions mutex locked.
*/
-static int __mtd_del_partition(struct mtd_part *priv)
+static int __mtd_del_partition(struct mtd_info *mtd)
{
- struct mtd_part *child, *next;
+ struct mtd_info *child, *next;
int err;
- list_for_each_entry_safe(child, next, &mtd_partitions, list) {
- if (child->parent == &priv->mtd) {
- err = __mtd_del_partition(child);
- if (err)
- return err;
- }
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ err = __mtd_del_partition(child);
+ if (err)
+ return err;
}
- sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
+ sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
- err = del_mtd_device(&priv->mtd);
+ err = del_mtd_device(mtd);
if (err)
return err;
- list_del(&priv->list);
- free_partition(priv);
+ list_del(&child->part.node);
+ free_partition(mtd);
return 0;
}
/*
* This function unregisters and destroy all slave MTD objects which are
- * attached to the given MTD object.
+ * attached to the given MTD object, recursively.
*/
-int del_mtd_partitions(struct mtd_info *mtd)
+static int __del_mtd_partitions(struct mtd_info *mtd)
{
- struct mtd_part *slave, *next;
+ struct mtd_info *child, *next;
+ LIST_HEAD(tmp_list);
int ret, err = 0;
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if (slave->parent == mtd) {
- ret = __mtd_del_partition(slave);
- if (ret < 0)
- err = ret;
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ if (mtd_has_partitions(child))
+ del_mtd_partitions(child);
+
+ pr_info("Deleting %s MTD partition\n", child->name);
+ ret = del_mtd_device(child);
+ if (ret < 0) {
+ pr_err("Error when deleting partition \"%s\" (%d)\n",
+ child->name, ret);
+ err = ret;
+ continue;
}
- mutex_unlock(&mtd_partitions_mutex);
+
+ list_del(&child->part.node);
+ free_partition(child);
+ }
return err;
}
+int del_mtd_partitions(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
+
+ mutex_lock(&master->master.partitions_lock);
+ ret = __del_mtd_partitions(mtd);
+ mutex_unlock(&master->master.partitions_lock);
+
+ return ret;
+}
+
int mtd_del_partition(struct mtd_info *mtd, int partno)
{
- struct mtd_part *slave, *next;
+ struct mtd_info *child, *master = mtd_get_master(mtd);
int ret = -EINVAL;
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if ((slave->parent == mtd) &&
- (slave->mtd.index == partno)) {
- ret = __mtd_del_partition(slave);
+ mutex_lock(&master->master.partitions_lock);
+ list_for_each_entry(child, &mtd->partitions, part.node) {
+ if (child->index == partno) {
+ ret = __mtd_del_partition(child);
break;
}
- mutex_unlock(&mtd_partitions_mutex);
+ }
+ mutex_unlock(&master->master.partitions_lock);
return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
/*
- * This function, given a master MTD object and a partition table, creates
- * and registers slave MTD objects which are bound to the master according to
- * the partition definitions.
+ * This function, given a parent MTD object and a partition table, creates
+ * and registers the child MTD objects which are bound to the parent according
+ * to the partition definitions.
*
- * For historical reasons, this function's caller only registers the master
+ * For historical reasons, this function's caller only registers the parent
* if the MTD_PARTITIONED_MASTER config option is set.
*/
-int add_mtd_partitions(struct mtd_info *master,
+int add_mtd_partitions(struct mtd_info *parent,
const struct mtd_partition *parts,
int nbparts)
{
- struct mtd_part *slave;
+ struct mtd_info *child, *master = mtd_get_master(parent);
uint64_t cur_offset = 0;
int i, ret;
- printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
+ nbparts, parent->name);
for (i = 0; i < nbparts; i++) {
- slave = allocate_partition(master, parts + i, i, cur_offset);
- if (IS_ERR(slave)) {
- ret = PTR_ERR(slave);
+ child = allocate_partition(parent, parts + i, i, cur_offset);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
goto err_del_partitions;
}
- mutex_lock(&mtd_partitions_mutex);
- list_add(&slave->list, &mtd_partitions);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(&slave->mtd);
+ ret = add_mtd_device(child);
if (ret) {
- mutex_lock(&mtd_partitions_mutex);
- list_del(&slave->list);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
- free_partition(slave);
+ free_partition(child);
goto err_del_partitions;
}
- mtd_add_partition_attrs(slave);
+ mtd_add_partition_attrs(child);
+
/* Look for subpartitions */
- parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
+ parse_mtd_partitions(child, parts[i].types, NULL);
- cur_offset = slave->offset + slave->mtd.size;
+ cur_offset = child->part.offset + child->size;
}
return 0;
@@ -1023,29 +700,11 @@ void mtd_part_parser_cleanup(struct mtd_partitions *parts)
}
}
-int mtd_is_partition(const struct mtd_info *mtd)
-{
- struct mtd_part *part;
- int ispart = 0;
-
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry(part, &mtd_partitions, list)
- if (&part->mtd == mtd) {
- ispart = 1;
- break;
- }
- mutex_unlock(&mtd_partitions_mutex);
-
- return ispart;
-}
-EXPORT_SYMBOL_GPL(mtd_is_partition);
-
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
- if (!mtd_is_partition(mtd))
- return mtd->size;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
- return mtd_get_device_size(mtd_to_part(mtd)->parent);
+ return master->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index d5326d19b136..ec18ade33262 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -3259,7 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd)
switch (density) {
case ONENAND_DEVICE_DENSITY_8Gb:
this->options |= ONENAND_HAS_NOP_1;
- /* fall through */
+ fallthrough;
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 8312182088c1..d66dab25df20 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -19,15 +19,17 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
/*
* MTD structure for E3 (Delta)
*/
-struct ams_delta_nand {
+struct gpio_nand {
struct nand_controller base;
struct nand_chip nand_chip;
struct gpio_desc *gpiod_rdy;
@@ -39,41 +41,20 @@ struct ams_delta_nand {
struct gpio_desc *gpiod_cle;
struct gpio_descs *data_gpiods;
bool data_in;
+ unsigned int tRP;
+ unsigned int tWP;
+ u8 (*io_read)(struct gpio_nand *this);
+ void (*io_write)(struct gpio_nand *this, u8 byte);
};
-/*
- * Define partitions for flash devices
- */
-
-static const struct mtd_partition partition_info[] = {
- { .name = "Kernel",
- .offset = 0,
- .size = 3 * SZ_1M + SZ_512K },
- { .name = "u-boot",
- .offset = 3 * SZ_1M + SZ_512K,
- .size = SZ_256K },
- { .name = "u-boot params",
- .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
- .size = SZ_256K },
- { .name = "Amstrad LDR",
- .offset = 4 * SZ_1M,
- .size = SZ_256K },
- { .name = "File system",
- .offset = 4 * SZ_1M + 1 * SZ_256K,
- .size = 27 * SZ_1M },
- { .name = "PBL reserved",
- .offset = 32 * SZ_1M - 3 * SZ_256K,
- .size = 3 * SZ_256K },
-};
-
-static void ams_delta_write_commit(struct ams_delta_nand *priv)
+static void gpio_nand_write_commit(struct gpio_nand *priv)
{
- gpiod_set_value(priv->gpiod_nwe, 0);
- ndelay(40);
gpiod_set_value(priv->gpiod_nwe, 1);
+ ndelay(priv->tWP);
+ gpiod_set_value(priv->gpiod_nwe, 0);
}
-static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -81,10 +62,10 @@ static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
- ams_delta_write_commit(priv);
+ gpio_nand_write_commit(priv);
}
-static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -94,30 +75,30 @@ static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
gpiod_direction_output_raw(data_gpiods->desc[i],
test_bit(i, values));
- ams_delta_write_commit(priv);
+ gpio_nand_write_commit(priv);
priv->data_in = false;
}
-static u8 ams_delta_io_read(struct ams_delta_nand *priv)
+static u8 gpio_nand_io_read(struct gpio_nand *priv)
{
u8 res;
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
- gpiod_set_value(priv->gpiod_nre, 0);
- ndelay(40);
+ gpiod_set_value(priv->gpiod_nre, 1);
+ ndelay(priv->tRP);
gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
- gpiod_set_value(priv->gpiod_nre, 1);
+ gpiod_set_value(priv->gpiod_nre, 0);
res = values[0];
return res;
}
-static void ams_delta_dir_input(struct ams_delta_nand *priv)
+static void gpio_nand_dir_input(struct gpio_nand *priv)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
int i;
@@ -128,68 +109,67 @@ static void ams_delta_dir_input(struct ams_delta_nand *priv)
priv->data_in = true;
}
-static void ams_delta_write_buf(struct ams_delta_nand *priv, const u8 *buf,
- int len)
+static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
{
int i = 0;
if (len > 0 && priv->data_in)
- ams_delta_dir_output(priv, buf[i++]);
+ gpio_nand_dir_output(priv, buf[i++]);
while (i < len)
- ams_delta_io_write(priv, buf[i++]);
+ priv->io_write(priv, buf[i++]);
}
-static void ams_delta_read_buf(struct ams_delta_nand *priv, u8 *buf, int len)
+static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
{
int i;
- if (!priv->data_in)
- ams_delta_dir_input(priv);
+ if (priv->data_gpiods && !priv->data_in)
+ gpio_nand_dir_input(priv);
for (i = 0; i < len; i++)
- buf[i] = ams_delta_io_read(priv);
+ buf[i] = priv->io_read(priv);
}
-static void ams_delta_ctrl_cs(struct ams_delta_nand *priv, bool assert)
+static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
{
- gpiod_set_value(priv->gpiod_nce, assert ? 0 : 1);
+ gpiod_set_value(priv->gpiod_nce, assert);
}
-static int ams_delta_exec_op(struct nand_chip *this,
+static int gpio_nand_exec_op(struct nand_chip *this,
const struct nand_operation *op, bool check_only)
{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
+ struct gpio_nand *priv = nand_get_controller_data(this);
const struct nand_op_instr *instr;
int ret = 0;
if (check_only)
return 0;
- ams_delta_ctrl_cs(priv, 1);
+ gpio_nand_ctrl_cs(priv, 1);
for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
switch (instr->type) {
case NAND_OP_CMD_INSTR:
gpiod_set_value(priv->gpiod_cle, 1);
- ams_delta_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+ gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
gpiod_set_value(priv->gpiod_cle, 0);
break;
case NAND_OP_ADDR_INSTR:
gpiod_set_value(priv->gpiod_ale, 1);
- ams_delta_write_buf(priv, instr->ctx.addr.addrs,
+ gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
instr->ctx.addr.naddrs);
gpiod_set_value(priv->gpiod_ale, 0);
break;
case NAND_OP_DATA_IN_INSTR:
- ams_delta_read_buf(priv, instr->ctx.data.buf.in,
+ gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
- ams_delta_write_buf(priv, instr->ctx.data.buf.out,
+ gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
@@ -206,28 +186,61 @@ static int ams_delta_exec_op(struct nand_chip *this,
break;
}
- ams_delta_ctrl_cs(priv, 0);
+ gpio_nand_ctrl_cs(priv, 0);
return ret;
}
-static const struct nand_controller_ops ams_delta_ops = {
- .exec_op = ams_delta_exec_op,
+static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline,
+ const struct nand_data_interface *cf)
+{
+ struct gpio_nand *priv = nand_get_controller_data(this);
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
+ struct device *dev = &nand_to_mtd(this)->dev;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ if (priv->gpiod_nre) {
+ priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
+ dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
+ }
+
+ priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
+ dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+ .exec_op = gpio_nand_exec_op,
+ .setup_data_interface = gpio_nand_setup_data_interface,
};
/*
* Main initialization routine
*/
-static int ams_delta_init(struct platform_device *pdev)
+static int gpio_nand_probe(struct platform_device *pdev)
{
- struct ams_delta_nand *priv;
+ struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
+ const struct mtd_partition *partitions = NULL;
+ int num_partitions = 0;
+ struct gpio_nand *priv;
struct nand_chip *this;
struct mtd_info *mtd;
- struct gpio_descs *data_gpiods;
+ int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
int err = 0;
+ if (pdata) {
+ partitions = pdata->parts;
+ num_partitions = pdata->num_parts;
+ }
+
/* Allocate memory for MTD device structure and private data */
- priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -238,6 +251,7 @@ static int ams_delta_init(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
nand_set_controller_data(this, priv);
+ nand_set_flash_node(this, pdev->dev.of_node);
priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
if (IS_ERR(priv->gpiod_rdy)) {
@@ -251,29 +265,33 @@ static int ams_delta_init(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- /* Set chip enabled, but */
- priv->gpiod_nwp = devm_gpiod_get(&pdev->dev, "nwp", GPIOD_OUT_HIGH);
+ /* Set chip enabled but write protected */
+ priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
+ GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nwp)) {
err = PTR_ERR(priv->gpiod_nwp);
dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
+ priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nce)) {
err = PTR_ERR(priv->gpiod_nce);
dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
+ priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nre)) {
err = PTR_ERR(priv->gpiod_nre);
dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
+ priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nwe)) {
err = PTR_ERR(priv->gpiod_nwe);
dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
@@ -295,28 +313,62 @@ static int ams_delta_init(struct platform_device *pdev)
}
/* Request array of data pins, initialize them as input */
- data_gpiods = devm_gpiod_get_array(&pdev->dev, "data", GPIOD_IN);
- if (IS_ERR(data_gpiods)) {
- err = PTR_ERR(data_gpiods);
+ priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
+ GPIOD_IN);
+ if (IS_ERR(priv->data_gpiods)) {
+ err = PTR_ERR(priv->data_gpiods);
dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
return err;
}
- priv->data_gpiods = data_gpiods;
- priv->data_in = true;
+ if (priv->data_gpiods) {
+ if (!priv->gpiod_nwe) {
+ dev_err(&pdev->dev,
+ "mandatory NWE pin not provided by platform\n");
+ return -ENODEV;
+ }
- /* Initialize the NAND controller object embedded in ams_delta_nand. */
- priv->base.ops = &ams_delta_ops;
+ priv->io_read = gpio_nand_io_read;
+ priv->io_write = gpio_nand_io_write;
+ priv->data_in = true;
+ }
+
+ if (pdev->id_entry)
+ probe = (void *) pdev->id_entry->driver_data;
+ else
+ probe = of_device_get_match_data(&pdev->dev);
+ if (probe)
+ err = probe(pdev, priv);
+ if (err)
+ return err;
+
+ if (!priv->io_read || !priv->io_write) {
+ dev_err(&pdev->dev, "incomplete device configuration\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the NAND controller object embedded in gpio_nand. */
+ priv->base.ops = &gpio_nand_ops;
nand_controller_init(&priv->base);
this->controller = &priv->base;
+ /*
+ * FIXME: We should release write protection only after nand_scan() to
+ * be on the safe side but we can't do that until we have a generic way
+ * to assert/deassert WP from the core. Even if the core shouldn't
+ * write things in the nand_scan() path, it should have control on this
+ * pin just in case we ever need to disable write protection during
+ * chip detection/initialization.
+ */
+ /* Release write protection */
+ gpiod_set_value(priv->gpiod_nwp, 0);
+
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
return err;
/* Register the partitions */
- err = mtd_device_register(mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ err = mtd_device_register(mtd, partitions, num_partitions);
if (err)
goto err_nand_cleanup;
@@ -331,26 +383,47 @@ err_nand_cleanup:
/*
* Clean up routine
*/
-static int ams_delta_cleanup(struct platform_device *pdev)
+static int gpio_nand_remove(struct platform_device *pdev)
{
- struct ams_delta_nand *priv = platform_get_drvdata(pdev);
+ struct gpio_nand *priv = platform_get_drvdata(pdev);
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+ /* Apply write protection */
+ gpiod_set_value(priv->gpiod_nwp, 1);
+
/* Unregister device */
nand_release(mtd_to_nand(mtd));
return 0;
}
-static struct platform_driver ams_delta_nand_driver = {
- .probe = ams_delta_init,
- .remove = ams_delta_cleanup,
+static const struct of_device_id gpio_nand_of_id_table[] = {
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
+
+static const struct platform_device_id gpio_nand_plat_id_table[] = {
+ {
+ .name = "ams-delta-nand",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove = gpio_nand_remove,
+ .id_table = gpio_nand_plat_id_table,
.driver = {
.name = "ams-delta-nand",
+ .of_match_table = of_match_ptr(gpio_nand_of_id_table),
},
};
-module_platform_driver(ams_delta_nand_driver);
+module_platform_driver(gpio_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 44518dada75b..8f9ffb46a09f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -102,6 +102,45 @@ struct brcm_nand_dma_desc {
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
#define NAND_POLL_STATUS_TIMEOUT_MS 100
+#define EDU_CMD_WRITE 0x00
+#define EDU_CMD_READ 0x01
+#define EDU_STATUS_ACTIVE BIT(0)
+#define EDU_ERR_STATUS_ERRACK BIT(0)
+#define EDU_DONE_MASK GENMASK(1, 0)
+
+#define EDU_CONFIG_MODE_NAND BIT(0)
+#define EDU_CONFIG_SWAP_BYTE BIT(1)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
+#else
+#define EDU_CONFIG_SWAP_CFG 0
+#endif
+
+/* edu registers */
+enum edu_reg {
+ EDU_CONFIG = 0,
+ EDU_DRAM_ADDR,
+ EDU_EXT_ADDR,
+ EDU_LENGTH,
+ EDU_CMD,
+ EDU_STOP,
+ EDU_STATUS,
+ EDU_DONE,
+ EDU_ERR_STATUS,
+};
+
+static const u16 edu_regs[] = {
+ [EDU_CONFIG] = 0x00,
+ [EDU_DRAM_ADDR] = 0x04,
+ [EDU_EXT_ADDR] = 0x08,
+ [EDU_LENGTH] = 0x0c,
+ [EDU_CMD] = 0x10,
+ [EDU_STOP] = 0x14,
+ [EDU_STATUS] = 0x18,
+ [EDU_DONE] = 0x1c,
+ [EDU_ERR_STATUS] = 0x20,
+};
+
/* flash_dma registers */
enum flash_dma_reg {
FLASH_DMA_REVISION = 0,
@@ -167,6 +206,8 @@ enum {
BRCMNAND_HAS_WP = BIT(3),
};
+struct brcmnand_host;
+
struct brcmnand_controller {
struct device *dev;
struct nand_controller controller;
@@ -185,17 +226,32 @@ struct brcmnand_controller {
int cmd_pending;
bool dma_pending;
+ bool edu_pending;
struct completion done;
struct completion dma_done;
+ struct completion edu_done;
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
+ /* EDU info, per-transaction */
+ const u16 *edu_offsets;
+ void __iomem *edu_base;
+ int edu_irq;
+ int edu_count;
+ u64 edu_dram_addr;
+ u32 edu_ext_addr;
+ u32 edu_cmd;
+ u32 edu_config;
+
/* flash_dma reg */
const u16 *flash_dma_offsets;
struct brcm_nand_dma_desc *dma_desc;
dma_addr_t dma_pa;
+ int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 dma_cmd);
+
/* in-memory cache of the FLASH_CACHE, used only for some commands */
u8 flash_cache[FC_BYTES];
@@ -216,6 +272,7 @@ struct brcmnand_controller {
u32 nand_cs_nand_xor;
u32 corr_stat_threshold;
u32 flash_dma_mode;
+ u32 flash_edu_mode;
bool pio_poll_mode;
};
@@ -657,6 +714,22 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
__raw_writel(val, ctrl->nand_fc + word * 4);
}
+static inline void edu_writel(struct brcmnand_controller *ctrl,
+ enum edu_reg reg, u32 val)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ brcmnand_writel(val, ctrl->edu_base + offs);
+}
+
+static inline u32 edu_readl(struct brcmnand_controller *ctrl,
+ enum edu_reg reg)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ return brcmnand_readl(ctrl->edu_base + offs);
+}
+
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
{
@@ -926,6 +999,16 @@ static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
return ctrl->flash_dma_base;
}
+static inline bool has_edu(struct brcmnand_controller *ctrl)
+{
+ return ctrl->edu_base;
+}
+
+static inline bool use_dma(struct brcmnand_controller *ctrl)
+{
+ return has_flash_dma(ctrl) || has_edu(ctrl);
+}
+
static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
{
if (ctrl->pio_poll_mode)
@@ -1299,6 +1382,52 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
return tbytes;
}
+static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
+{
+ /* initialize edu */
+ edu_writel(ctrl, EDU_ERR_STATUS, 0);
+ edu_readl(ctrl, EDU_ERR_STATUS);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+}
+
+/* edu irq */
+static irqreturn_t brcmnand_edu_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->edu_count) {
+ ctrl->edu_count--;
+ while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
+ udelay(1);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+ }
+
+ if (ctrl->edu_count) {
+ ctrl->edu_dram_addr += FC_BYTES;
+ ctrl->edu_ext_addr += FC_BYTES;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&ctrl->edu_done);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
{
struct brcmnand_controller *ctrl = data;
@@ -1307,6 +1436,16 @@ static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
if (ctrl->dma_pending)
return IRQ_HANDLED;
+ /* check if you need to piggy back on the ctrlrdy irq */
+ if (ctrl->edu_pending) {
+ if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
+ /* Discard interrupts while using dedicated edu irq */
+ return IRQ_HANDLED;
+
+ /* no registered edu irq, call handler */
+ return brcmnand_edu_irq(irq, data);
+ }
+
complete(&ctrl->done);
return IRQ_HANDLED;
}
@@ -1645,6 +1784,81 @@ static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
}
/**
+ * Kick EDU engine
+ */
+static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(200);
+ int ret = 0;
+ int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
+ unsigned int trans = len >> FC_SHIFT;
+ dma_addr_t pa;
+
+ pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
+ return -ENOMEM;
+ }
+
+ ctrl->edu_pending = true;
+ ctrl->edu_dram_addr = pa;
+ ctrl->edu_ext_addr = addr;
+ ctrl->edu_cmd = edu_cmd;
+ ctrl->edu_count = trans;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+ edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
+ edu_readl(ctrl, EDU_LENGTH);
+
+ /* Start edu engine */
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for EDU; status %#x, error status %#x\n",
+ edu_readl(ctrl, EDU_STATUS),
+ edu_readl(ctrl, EDU_ERR_STATUS));
+ }
+
+ dma_unmap_single(ctrl->dev, pa, len, dir);
+
+ /* for program page check NAND status */
+ if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
+ edu_cmd == EDU_CMD_WRITE) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ /* Make sure the EDU status is clean */
+ if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
+ dev_warn(ctrl->dev, "EDU still active: %#x\n",
+ edu_readl(ctrl, EDU_STATUS));
+
+ if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
+ dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ ctrl->edu_pending = false;
+ brcmnand_edu_init(ctrl);
+ edu_writel(ctrl, EDU_STOP, 0); /* force stop */
+ edu_readl(ctrl, EDU_STOP);
+
+ return ret;
+}
+
+/**
* Construct a FLASH_DMA descriptor as part of a linked list. You must know the
* following ahead of time:
* - Is this descriptor the beginning or end of a linked list?
@@ -1850,9 +2064,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
try_dmaread:
brcmnand_clear_ecc_addr(ctrl);
- if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
- err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
- CMD_PAGE_READ);
+ if (ctrl->dma_trans && !oob && flash_dma_buf_ok(buf)) {
+ err = ctrl->dma_trans(host, addr, buf,
+ trans * FC_BYTES,
+ CMD_PAGE_READ);
+
if (err) {
if (mtd_is_bitflip_or_eccerr(err))
err_addr = addr;
@@ -1988,10 +2204,12 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < ctrl->max_oob; i += 4)
oob_reg_write(ctrl, i, 0xffffffff);
- if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
- if (brcmnand_dma_trans(host, addr, (u32 *)buf,
- mtd->writesize, CMD_PROGRAM_PAGE))
+ if (use_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ if (ctrl->dma_trans(host, addr, (u32 *)buf, mtd->writesize,
+ CMD_PROGRAM_PAGE))
+
ret = -EIO;
+
goto out;
}
@@ -2494,6 +2712,8 @@ static int brcmnand_suspend(struct device *dev)
if (has_flash_dma(ctrl))
ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+ else if (has_edu(ctrl))
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
return 0;
}
@@ -2508,6 +2728,13 @@ static int brcmnand_resume(struct device *dev)
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
}
+ if (has_edu(ctrl)) {
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+ edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
+ edu_readl(ctrl, EDU_CONFIG);
+ brcmnand_edu_init(ctrl);
+ }
+
brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
@@ -2553,6 +2780,49 @@ MODULE_DEVICE_TABLE(of, brcmnand_of_match);
/***********************************************************************
* Platform driver setup (per controller)
***********************************************************************/
+static int brcmnand_edu_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
+ if (res) {
+ ctrl->edu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->edu_base))
+ return PTR_ERR(ctrl->edu_base);
+
+ ctrl->edu_offsets = edu_regs;
+
+ edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
+ EDU_CONFIG_SWAP_CFG);
+ edu_readl(ctrl, EDU_CONFIG);
+
+ /* initialize edu */
+ brcmnand_edu_init(ctrl);
+
+ ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
+ if (ctrl->edu_irq < 0) {
+ dev_warn(dev,
+ "FLASH EDU enabled, using ctlrdy irq\n");
+ } else {
+ ret = devm_request_irq(dev, ctrl->edu_irq,
+ brcmnand_edu_irq, 0,
+ "brcmnand-edu", ctrl);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->edu_irq, ret);
+ return ret;
+ }
+
+ dev_info(dev, "FLASH EDU enabled using irq %u\n",
+ ctrl->edu_irq);
+ }
+ }
+
+ return 0;
+}
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
{
@@ -2578,6 +2848,7 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
+ init_completion(&ctrl->edu_done);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &brcmnand_controller_ops;
INIT_LIST_HEAD(&ctrl->host_list);
@@ -2675,6 +2946,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
}
dev_info(dev, "enabling FLASH_DMA\n");
+ /* set flash dma transfer function to call */
+ ctrl->dma_trans = brcmnand_dma_trans;
+ } else {
+ ret = brcmnand_edu_setup(pdev);
+ if (ret < 0)
+ goto err;
+
+ /* set edu transfer function to call */
+ ctrl->dma_trans = brcmnand_edu_trans;
}
/* Disable automatic device ID config, direct addressing */
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index f6c7102a1e32..efddc5c68afb 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -30,7 +30,6 @@
* Generic mode is used for executing rest of commands.
*/
-#define MAX_OOB_SIZE_PER_SECTOR 32
#define MAX_ADDRESS_CYC 6
#define MAX_ERASE_ADDRESS_CYC 3
#define MAX_DATA_SIZE 0xFFFC
@@ -190,6 +189,7 @@
/* BCH Engine identification register 3. */
#define BCH_CFG_3 0x844
+#define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
/* Ready/Busy# line status. */
#define RBN_SETINGS 0x1004
@@ -499,6 +499,7 @@ struct cdns_nand_ctrl {
unsigned long assigned_cs;
struct list_head chips;
+ u8 bch_metadata_size;
};
struct cdns_nand_chip {
@@ -997,6 +998,7 @@ static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
return status;
cadence_nand_reset_irq(cdns_ctrl);
+ reinit_completion(&cdns_ctrl->complete);
writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
cdns_ctrl->reg + CMD_REG2);
@@ -1077,6 +1079,14 @@ static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
int max_step_size = 0, nstrengths, i;
u32 reg;
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
+ cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
+ if (cdns_ctrl->bch_metadata_size < 4) {
+ dev_err(cdns_ctrl->dev,
+ "Driver needs at least 4 bytes of BCH meta data\n");
+ return -EIO;
+ }
+
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
@@ -1170,7 +1180,8 @@ static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
cadence_nand_get_caps(cdns_ctrl);
- cadence_nand_read_bch_caps(cdns_ctrl);
+ if (cadence_nand_read_bch_caps(cdns_ctrl))
+ return -EIO;
/*
* Set IO width access to 8.
@@ -2585,9 +2596,8 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
- u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+ u32 ecc_size;
struct mtd_info *mtd = nand_to_mtd(chip);
- u32 max_oob_data_size;
int ret;
if (chip->options & NAND_BUSWIDTH_16) {
@@ -2603,12 +2613,9 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
chip->options |= NAND_NO_SUBPAGE_WRITE;
cdns_chip->bbm_offs = chip->badblockpos;
- if (chip->options & NAND_BUSWIDTH_16) {
- cdns_chip->bbm_offs &= ~0x01;
- cdns_chip->bbm_len = 2;
- } else {
- cdns_chip->bbm_len = 1;
- }
+ cdns_chip->bbm_offs &= ~0x01;
+ /* this value should be even number */
+ cdns_chip->bbm_len = 2;
ret = nand_ecc_choose_conf(chip,
&cdns_ctrl->ecc_caps,
@@ -2625,13 +2632,12 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
/* Error correction configuration. */
cdns_chip->sector_size = chip->ecc.size;
cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+ ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
- max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
-
- if (cdns_chip->avail_oob_size > max_oob_data_size)
- cdns_chip->avail_oob_size = max_oob_data_size;
+ if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
+ cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
> mtd->oobsize)
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index fafd0a0aa8e2..6a6c919b2569 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1317,6 +1317,7 @@ int denali_init(struct denali_controller *denali)
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+ iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
denali_clear_irq_all(denali);
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index e5cdcda56d14..ac46eb7956ce 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -328,7 +328,7 @@ struct denali_chip {
struct nand_chip chip;
struct list_head node;
unsigned int nsels;
- struct denali_chip_sel sels[0];
+ struct denali_chip_sel sels[];
};
/**
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index c0e1a8ebe820..c2a391ad2c35 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1169,7 +1169,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
+ " BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = %d.%d.%d.%d\n"
" PercentUsed = %d\n",
@@ -1482,7 +1482,7 @@ static int __init doc_probe(unsigned long physadr)
break;
case DOC_ChipID_DocMilPlus32:
pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
- /* fall through */
+ fallthrough;
default:
ret = -ENODEV;
goto notfound;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 634c550db13a..e1dc675b12bb 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -324,8 +324,7 @@ static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
/* READ0 and READ1 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ1:
column += 256;
-
- /* fall-through */
+ fallthrough;
case NAND_CMD_READ0:
dev_dbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index b9d5d55a5edb..53b00c841aec 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1148,20 +1148,21 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
struct dma_chan *dma_chan;
+ int ret = 0;
/* request dma channel */
- dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!dma_chan) {
- dev_err(this->dev, "Failed to request DMA channel.\n");
- goto acquire_err;
+ dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(this->dev, "DMA channel request failed: %d\n",
+ ret);
+ release_dma_channels(this);
+ } else {
+ this->dma_chans[0] = dma_chan;
}
- this->dma_chans[0] = dma_chan;
- return 0;
-
-acquire_err:
- release_dma_channels(this);
- return -EINVAL;
+ return ret;
}
static int gpmi_get_clks(struct gpmi_nand_data *this)
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index e30feb56b650..96c5ae8b1bbc 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config MTD_NAND_JZ4780
tristate "JZ4780 NAND controller"
+ depends on MIPS || COMPILE_TEST
depends on JZ4780_NEMC
help
Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index c954189606f6..8e22cd6ec71f 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -124,7 +124,6 @@ int ingenic_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_ecc *ecc;
- struct resource *res;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc)
@@ -134,8 +133,7 @@ int ingenic_ecc_probe(struct platform_device *pdev)
if (!ecc->ops)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ecc->base = devm_ioremap_resource(dev, res);
+ ecc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ecc->base))
return PTR_ERR(ecc->base);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 49afebee50db..935c4902ada7 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -253,7 +253,7 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
chip->ecc.calculate = ingenic_nand_ecc_calculate;
chip->ecc.correct = ingenic_nand_ecc_correct;
- /* fall through */
+ fallthrough;
case NAND_ECC_SOFT:
dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
(nfc->ecc) ? "hardware ECC" : "software ECC",
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
index 6c852eae09cf..2d0e0a2192ae 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -145,10 +145,10 @@ static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
- /* fall-through */
+ fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
- /* fall-through */
+ fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
index 079266a0d6cf..d67dbfff76cc 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -123,10 +123,10 @@ static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
- /* fall through */
+ fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
- /* fall through */
+ fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index cba6fe7dd8c4..9d0caadf940e 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -30,6 +30,7 @@
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_SANDISK 0x45
#define NAND_MFR_STMICRO 0x20
+/* Kioxia is new name of Toshiba memory. */
#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_WINBOND 0xef
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index fb5abdcfb007..179f0ca585f8 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -334,7 +334,7 @@ struct marvell_nand_chip {
int addr_cyc;
int selected_die;
unsigned int nsels;
- struct marvell_nand_chip_sel sels[0];
+ struct marvell_nand_chip_sel sels[];
};
static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
@@ -2743,16 +2743,21 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
if (ret)
return ret;
- nfc->dma_chan = dma_request_slave_channel(nfc->dev, "data");
- if (!nfc->dma_chan) {
- dev_err(nfc->dev,
- "Unable to request data DMA channel\n");
- return -ENODEV;
+ nfc->dma_chan = dma_request_chan(nfc->dev, "data");
+ if (IS_ERR(nfc->dma_chan)) {
+ ret = PTR_ERR(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nfc->dev, "DMA channel request failed: %d\n",
+ ret);
+ return ret;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
+ if (!r) {
+ ret = -ENXIO;
+ goto release_channel;
+ }
config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -2763,7 +2768,7 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
ret = dmaengine_slave_config(nfc->dma_chan, &config);
if (ret < 0) {
dev_err(nfc->dev, "Failed to configure DMA channel\n");
- return ret;
+ goto release_channel;
}
/*
@@ -2773,12 +2778,20 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
* the provided buffer.
*/
nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!nfc->dma_buf)
- return -ENOMEM;
+ if (!nfc->dma_buf) {
+ ret = -ENOMEM;
+ goto release_channel;
+ }
nfc->use_dma = true;
return 0;
+
+release_channel:
+ dma_release_channel(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+
+ return ret;
}
static void marvell_nfc_reset(struct marvell_nfc *nfc)
@@ -2920,10 +2933,13 @@ static int marvell_nfc_probe(struct platform_device *pdev)
ret = marvell_nand_chips_init(dev, nfc);
if (ret)
- goto unprepare_reg_clk;
+ goto release_dma;
return 0;
+release_dma:
+ if (nfc->use_dma)
+ dma_release_channel(nfc->dma_chan);
unprepare_reg_clk:
clk_disable_unprepare(nfc->reg_clk);
unprepare_core_clk:
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 9f17b5b8efbf..f6fb5c0e6255 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -118,7 +118,7 @@ struct meson_nfc_nand_chip {
u8 *data_buf;
__le64 *info_buf;
u32 nsels;
- u8 sels[0];
+ u8 sels[];
};
struct meson_nand_ecc {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b8305e39ab51..ef149e8b26d0 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -131,7 +131,7 @@ struct mtk_nfc_nand_chip {
u32 spare_per_sector;
int nsels;
- u8 sels[0];
+ u8 sels[];
/* nothing after this field */
};
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index f64e3b6605c6..c24e5e2ba130 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -683,7 +683,12 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
if (ret)
return ret;
- timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ /*
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
ret = nand_read_data_op(chip, &status, sizeof(status), true);
if (ret)
@@ -4321,16 +4326,22 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
+ *
+ * Returns 0 for success or negative error code otherwise.
*/
static int nand_suspend(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
mutex_lock(&chip->lock);
- chip->suspended = 1;
+ if (chip->suspend)
+ ret = chip->suspend(chip);
+ if (!ret)
+ chip->suspended = 1;
mutex_unlock(&chip->lock);
- return 0;
+ return ret;
}
/**
@@ -4342,11 +4353,14 @@ static void nand_resume(struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
mutex_lock(&chip->lock);
- if (chip->suspended)
+ if (chip->suspended) {
+ if (chip->resume)
+ chip->resume(chip);
chip->suspended = 0;
- else
+ } else {
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
+ }
mutex_unlock(&chip->lock);
}
@@ -4360,6 +4374,38 @@ static void nand_shutdown(struct mtd_info *mtd)
nand_suspend(mtd);
}
+/**
+ * nand_lock - [MTD Interface] Lock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to lock (must be a multiple of block/page size)
+ */
+static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->lock_area)
+ return -ENOTSUPP;
+
+ return chip->lock_area(chip, ofs, len);
+}
+
+/**
+ * nand_unlock - [MTD Interface] Unlock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to unlock (must be a multiple of block/page size)
+ */
+static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->unlock_area)
+ return -ENOTSUPP;
+
+ return chip->unlock_area(chip, ofs, len);
+}
+
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip)
{
@@ -5591,8 +5637,7 @@ static int nand_scan_tail(struct nand_chip *chip)
}
if (!ecc->read_page)
ecc->read_page = nand_read_page_hwecc_oob_first;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
if (!ecc->read_page)
@@ -5611,8 +5656,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->read_subpage = nand_read_subpage;
if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
ecc->write_subpage = nand_write_subpage_hwecc;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_HW_SYNDROME:
if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
(!ecc->read_page ||
@@ -5649,8 +5693,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->size, mtd->writesize);
ecc->mode = NAND_ECC_SOFT;
ecc->algo = NAND_ECC_HAMMING;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_SOFT:
ret = nand_set_ecc_soft_ops(chip);
if (ret) {
@@ -5786,8 +5829,8 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
mtd->_sync = nand_sync;
- mtd->_lock = NULL;
- mtd->_unlock = NULL;
+ mtd->_lock = nand_lock;
+ mtd->_unlock = nand_unlock;
mtd->_suspend = nand_suspend;
mtd->_resume = nand_resume;
mtd->_reboot = nand_shutdown;
@@ -5907,6 +5950,8 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+ nanddev_cleanup(&chip->base);
+
/* Free bad block table memory */
kfree(chip->bbt);
kfree(chip->data_buf);
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 194e4227aefe..7caedaa5b9e5 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -26,7 +26,7 @@
struct hynix_read_retry {
int nregs;
const u8 *regs;
- u8 values[0];
+ u8 values[];
};
/**
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index f2526ec616a6..f91e92e1b972 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -331,8 +331,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
*/
if (column == -1 && page_addr == -1)
return;
- /* fall through */
-
+ fallthrough;
default:
/*
* If we don't have access to the busy pin, we apply the given
@@ -483,8 +482,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
-
- /* fall through - This applies to read commands */
+ fallthrough; /* This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 3ff7ce00cbdb..09c254c97b5c 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -6,11 +6,31 @@
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
*/
+#include "linux/delay.h"
#include "internals.h"
#define MACRONIX_READ_RETRY_BIT BIT(0)
#define MACRONIX_NUM_READ_RETRY_MODES 6
+#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
+#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
+#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
+
+#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
+#define MACRONIX_RANDOMIZER_BIT BIT(1)
+#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
+#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
+#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
+#define MACRONIX_RANDOMIZER_MODE_ENTER \
+ (MACRONIX_RANDOMIZER_ENPGM | \
+ MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+#define MACRONIX_RANDOMIZER_MODE_EXIT \
+ (MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+
+#define MXIC_CMD_POWER_DOWN 0xB9
+
struct nand_onfi_vendor_macronix {
u8 reserved;
u8 reliability_func;
@@ -29,15 +49,83 @@ static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
}
+static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ if (feature[0])
+ return feature[0];
+
+ feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ /* RANDEN and RANDOPT OTP bits are programmed */
+ feature[0] = 0x0;
+ ret = nand_prog_page_op(chip, 0, 0, feature, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void macronix_nand_onfi_init(struct nand_chip *chip)
{
struct nand_parameters *p = &chip->parameters;
struct nand_onfi_vendor_macronix *mxic;
+ struct device_node *dn = nand_get_flash_node(chip);
+ int rand_otp = 0;
+ int ret;
if (!p->onfi)
return;
+ if (of_find_property(dn, "mxic,enable-randomizer-otp", NULL))
+ rand_otp = 1;
+
mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+ /* Subpage write is prohibited in randomizer operatoin */
+ if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
+ mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ ret = macronix_nand_randomizer_check_enable(chip);
+ if (ret < 0) {
+ bitmap_clear(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ bitmap_clear(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ pr_info("Macronix NAND randomizer failed\n");
+ } else {
+ pr_info("Macronix NAND randomizer enabled\n");
+ }
+ }
+ }
+
if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
return;
@@ -91,6 +179,143 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
+/*
+ * Macronix NAND supports Block Protection by Protectoin(PT) pin;
+ * active high at power-on which protects the entire chip even the #WP is
+ * disabled. Lock/unlock protection area can be partition according to
+ * protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
+ */
+static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static void macronix_nand_block_protection_support(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
+ if (ret)
+ pr_err("Block protection check failed\n");
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+ return;
+ }
+
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ chip->lock_area = mxic_nand_lock;
+ chip->unlock_area = mxic_nand_unlock;
+}
+
+static int nand_power_down_op(struct nand_chip *chip)
+{
+ int ret;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ } else {
+ chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
+ }
+
+ return 0;
+}
+
+static int mxic_nand_suspend(struct nand_chip *chip)
+{
+ int ret;
+
+ nand_select_target(chip, 0);
+ ret = nand_power_down_op(chip);
+ if (ret < 0)
+ pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static void mxic_nand_resume(struct nand_chip *chip)
+{
+ /*
+ * Toggle #CS pin to resume NAND device and don't care
+ * of the others CLE, #WE, #RE pins status.
+ * A NAND controller ensure it is able to assert/de-assert #CS
+ * by sending any byte over the NAND bus.
+ * i.e.,
+ * NAND power down command or reset command w/o R/B# status checking.
+ */
+ nand_select_target(chip, 0);
+ nand_power_down_op(chip);
+ /* The minimum of a recovery time tRDP is 35 us */
+ usleep_range(35, 100);
+ nand_deselect_target(chip);
+}
+
+static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
+{
+ int i;
+ static const char * const deep_power_down_dev[] = {
+ "MX30UF1G28AD",
+ "MX30UF2G28AD",
+ "MX30UF4G28AD",
+ };
+
+ i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ chip->suspend = mxic_nand_suspend;
+ chip->resume = mxic_nand_resume;
+}
+
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
@@ -98,6 +323,8 @@ static int macronix_nand_init(struct nand_chip *chip)
macronix_nand_fix_broken_get_timings(chip);
macronix_nand_onfi_init(chip);
+ macronix_nand_block_protection_support(chip);
+ macronix_nand_deep_power_down_support(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index 9c03fbb1f47d..f3dcd695b5db 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -14,14 +14,68 @@
/* Recommended to rewrite for BENAND */
#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
+/* ECC Status Read Command for BENAND */
+#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
+
+/* ECC Status Mask for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
+
+/* Uncorrectable Error for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
+
+/* Max ECC Steps for BENAND */
+#define TOSHIBA_NAND_MAX_ECC_STEPS 8
+
+static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
+ u8 *buf)
+{
+ u8 *ecc_status = buf;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ return -ENOTSUPP;
+}
+
static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
unsigned int max_bitflips = 0;
- u8 status;
+ u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
/* Check Status */
+ ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
+ if (!ret) {
+ unsigned int i, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
+ if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bitflips;
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+ }
+
+ return max_bitflips;
+ }
+
+ /*
+ * Fallback to regular status check if
+ * toshiba_nand_benand_read_eccstatus_op() failed.
+ */
ret = nand_status_op(chip, &status);
if (ret)
return ret;
@@ -108,7 +162,7 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
*/
if (chip->id.len >= 6 && nand_is_slc(chip) &&
(chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
- !(chip->id.data[4] & 0x80) /* !BENAND */) {
+ !(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
memorg->oobsize = 32 * memorg->pagesize >> 9;
mtd->oobsize = memorg->oobsize;
}
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 9a70754a61ef..1de03bb34e84 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2251,10 +2251,10 @@ static int __init ns_init_module(void)
switch (bbt) {
case 2:
chip->bbt_options |= NAND_BBT_NO_OOB;
- /* fall through */
+ fallthrough;
case 1:
chip->bbt_options |= NAND_BBT_USE_FLASH;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 5502ffbdd1e6..3fa0e2cbbe53 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -455,13 +455,13 @@ static int elm_context_save(struct elm_info *info)
ELM_SYNDROME_FRAGMENT_5 + offset);
regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_4 + offset);
- /* fall through */
+ fallthrough;
case BCH8_ECC:
regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_3 + offset);
regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_2 + offset);
- /* fall through */
+ fallthrough;
case BCH4_ECC:
regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_1 + offset);
@@ -503,13 +503,13 @@ static int elm_context_restore(struct elm_info *info)
regs->elm_syndrome_fragment_5[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
regs->elm_syndrome_fragment_4[i]);
- /* fall through */
+ fallthrough;
case BCH8_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
regs->elm_syndrome_fragment_3[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
regs->elm_syndrome_fragment_2[i]);
- /* fall through */
+ fallthrough;
case BCH4_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
regs->elm_syndrome_fragment_1[i]);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 7bb9a7e8e1e7..5b11c7061497 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2628,6 +2628,29 @@ static const struct nand_controller_ops qcom_nandc_ops = {
.attach_chip = qcom_nand_attach_chip,
};
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->is_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+
static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
{
int ret;
@@ -2673,22 +2696,37 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
return -EIO;
}
- nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
- if (!nandc->tx_chan) {
- dev_err(nandc->dev, "failed to request tx channel\n");
- return -ENODEV;
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "tx DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
- nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
- if (!nandc->rx_chan) {
- dev_err(nandc->dev, "failed to request rx channel\n");
- return -ENODEV;
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "rx DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
- nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
- if (!nandc->cmd_chan) {
- dev_err(nandc->dev, "failed to request cmd channel\n");
- return -ENODEV;
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "cmd DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
/*
@@ -2702,14 +2740,19 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unalloc;
}
} else {
- nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
- if (!nandc->chan) {
- dev_err(nandc->dev,
- "failed to request slave channel\n");
- return -ENODEV;
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "rxtx DMA channel request failed: %d\n",
+ ret);
+ return ret;
}
}
@@ -2720,29 +2763,9 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
nandc->controller.ops = &qcom_nandc_ops;
return 0;
-}
-
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
- if (nandc->props->is_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
-
- if (nandc->tx_chan)
- dma_release_channel(nandc->tx_chan);
-
- if (nandc->rx_chan)
- dma_release_channel(nandc->rx_chan);
-
- if (nandc->cmd_chan)
- dma_release_channel(nandc->cmd_chan);
- } else {
- if (nandc->chan)
- dma_release_channel(nandc->chan);
- }
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
}
/* one time setup of a few nand controller registers */
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 3ba73f18841f..b6d45cd911ae 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1606,15 +1606,36 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
/* DMA configuration */
static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
{
- int ret;
+ int ret = 0;
- fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
- fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
- fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
+ fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx");
+ if (IS_ERR(fmc2->dma_tx_ch)) {
+ ret = PTR_ERR(fmc2->dma_tx_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request tx DMA channel: %d\n", ret);
+ fmc2->dma_tx_ch = NULL;
+ goto err_dma;
+ }
- if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
- dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
- return 0;
+ fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx");
+ if (IS_ERR(fmc2->dma_rx_ch)) {
+ ret = PTR_ERR(fmc2->dma_rx_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request rx DMA channel: %d\n", ret);
+ fmc2->dma_rx_ch = NULL;
+ goto err_dma;
+ }
+
+ fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc");
+ if (IS_ERR(fmc2->dma_ecc_ch)) {
+ ret = PTR_ERR(fmc2->dma_ecc_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request ecc DMA channel: %d\n", ret);
+ fmc2->dma_ecc_ch = NULL;
+ goto err_dma;
}
ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
@@ -1635,6 +1656,15 @@ static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
init_completion(&fmc2->dma_ecc_complete);
return 0;
+
+err_dma:
+ if (ret == -ENODEV) {
+ dev_warn(fmc2->dev,
+ "DMAs not defined in the DT, polling mode is used\n");
+ ret = 0;
+ }
+
+ return ret;
}
/* NAND callbacks setup */
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 37a4ac0dd85b..5f3e40b79fb1 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -195,7 +195,7 @@ struct sunxi_nand_chip {
u32 timing_cfg;
u32 timing_ctl;
int nsels;
- struct sunxi_nand_chip_sel sels[0];
+ struct sunxi_nand_chip_sel sels[];
};
static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
@@ -2123,8 +2123,16 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (ret)
goto out_ahb_reset_reassert;
- nfc->dmac = dma_request_slave_channel(dev, "rxtx");
- if (nfc->dmac) {
+ nfc->dmac = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(nfc->dmac)) {
+ ret = PTR_ERR(nfc->dmac);
+ if (ret == -EPROBE_DEFER)
+ goto out_ahb_reset_reassert;
+
+ /* Ignore errors to fall back to PIO mode */
+ dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret);
+ nfc->dmac = NULL;
+ } else {
struct dma_slave_config dmac_cfg = { };
dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
@@ -2138,9 +2146,6 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (nfc->caps->extra_mbus_conf)
writel(readl(nfc->regs + NFC_REG_CTL) |
NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
-
- } else {
- dev_warn(dev, "failed to request rxtx DMA channel\n");
}
platform_set_drvdata(pdev, nfc);
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 89f6beefb01c..e2c382ffc5b6 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -16,6 +16,7 @@
#include <linux/mtd/spinand.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -370,10 +371,11 @@ out:
return status & STATUS_BUSY ? -ETIMEDOUT : 0;
}
-static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
+ u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
- SPINAND_MAX_ID_LEN);
+ struct spi_mem_op op = SPINAND_READID_OP(
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -568,18 +570,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
- .ooblen = 2,
+ .ooblen = sizeof(marker),
.ooboffs = 0,
- .oobbuf.in = spinand->oobbuf,
+ .oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
- memset(spinand->oobbuf, 0, 2);
spinand_select_target(spinand, pos->target);
spinand_read_page(spinand, &req, false);
- if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
+ if (marker[0] != 0xff || marker[1] != 0xff)
return true;
return false;
@@ -603,15 +605,16 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooboffs = 0,
- .ooblen = 2,
- .oobbuf.out = spinand->oobbuf,
+ .ooblen = sizeof(marker),
+ .oobbuf.out = marker,
+ .mode = MTD_OPS_RAW,
};
int ret;
- /* Erase block before marking it bad. */
ret = spinand_select_target(spinand, pos->target);
if (ret)
return ret;
@@ -620,9 +623,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- spinand_erase_op(spinand, pos);
-
- memset(spinand->oobbuf, 0, 2);
return spinand_write_page(spinand, &req);
}
@@ -762,24 +762,62 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&winbond_spinand_manufacturer,
};
-static int spinand_manufacturer_detect(struct spinand_device *spinand)
+static int spinand_manufacturer_match(struct spinand_device *spinand,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
- ret = spinand_manufacturers[i]->ops->detect(spinand);
- if (ret > 0) {
- spinand->manufacturer = spinand_manufacturers[i];
- return 0;
- } else if (ret < 0) {
- return ret;
- }
- }
+ const struct spinand_manufacturer *manufacturer =
+ spinand_manufacturers[i];
+
+ if (id[0] != manufacturer->id)
+ continue;
+
+ ret = spinand_match_and_init(spinand,
+ manufacturer->chips,
+ manufacturer->nchips,
+ rdid_method);
+ if (ret < 0)
+ continue;
+ spinand->manufacturer = manufacturer;
+ return 0;
+ }
return -ENOTSUPP;
}
+static int spinand_id_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ ret = spinand_read_id_op(spinand, 0, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 1, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_ADDR);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 0, 1, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_DUMMY);
+
+ return ret;
+}
+
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
if (spinand->manufacturer->ops->init)
@@ -835,9 +873,9 @@ spinand_select_op_variant(struct spinand_device *spinand,
* @spinand: SPI NAND object
* @table: SPI NAND device description table
* @table_size: size of the device description table
+ * @rdid_method: read id method to match
*
- * Should be used by SPI NAND manufacturer drivers when they want to find a
- * match between a device ID retrieved through the READ_ID command and an
+ * Match between a device ID retrieved through the READ_ID command and an
* entry in the SPI NAND description table. If a match is found, the spinand
* object will be initialized with information provided by the matching
* spinand_info entry.
@@ -846,8 +884,10 @@ spinand_select_op_variant(struct spinand_device *spinand,
*/
int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
- unsigned int table_size, u16 devid)
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
@@ -855,13 +895,17 @@ int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *info = &table[i];
const struct spi_mem_op *op;
- if (devid != info->devid)
+ if (rdid_method != info->devid.method)
+ continue;
+
+ if (memcmp(id + 1, info->devid.id, info->devid.len))
continue;
nand->memorg = table[i].memorg;
nand->eccreq = table[i].eccreq;
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
+ spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
op = spinand_select_op_variant(spinand,
@@ -898,13 +942,7 @@ static int spinand_detect(struct spinand_device *spinand)
if (ret)
return ret;
- ret = spinand_read_id_op(spinand, spinand->id.data);
- if (ret)
- return ret;
-
- spinand->id.len = SPINAND_MAX_ID_LEN;
-
- ret = spinand_manufacturer_detect(spinand);
+ ret = spinand_id_detect(spinand);
if (ret) {
dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
spinand->id.data);
@@ -1051,6 +1089,10 @@ static int spinand_init(struct spinand_device *spinand)
mtd->oobavail = ret;
+ /* Propagate ECC information to mtd_info */
+ mtd->ecc_strength = nand->eccreq.strength;
+ mtd->ecc_step_size = nand->eccreq.step_size;
+
return 0;
err_cleanup_nanddev:
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e99d425aa93f..d219c970042a 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -195,7 +195,8 @@ static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info gigadevice_spinand_table[] = {
- SPINAND_INFO("GD5F1GQ4xA", 0xF1,
+ SPINAND_INFO("GD5F1GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -204,7 +205,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F2GQ4xA", 0xF2,
+ SPINAND_INFO("GD5F2GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -213,7 +215,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F4GQ4xA", 0xF4,
+ SPINAND_INFO("GD5F4GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -222,7 +225,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+ SPINAND_INFO("GD5F1GQ4UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -231,7 +235,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
- SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
+ SPINAND_INFO("GD5F1GQ4UFxxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
@@ -242,39 +247,13 @@ static const struct spinand_info gigadevice_spinand_table[] = {
gd5fxgq4ufxxg_ecc_get_status)),
};
-static int gigadevice_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- u16 did;
- int ret;
-
- /*
- * Earlier GDF5-series devices (A,E) return [0][MID][DID]
- * Later (F) devices return [MID][DID1][DID2]
- */
-
- if (id[0] == SPINAND_MFR_GIGADEVICE)
- did = (id[1] << 8) + id[2];
- else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
- did = id[2];
- else
- return 0;
-
- ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
- ARRAY_SIZE(gigadevice_spinand_table),
- did);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
- .detect = gigadevice_spinand_detect,
};
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
.id = SPINAND_MFR_GIGADEVICE,
.name = "GigaDevice",
+ .chips = gigadevice_spinand_table,
+ .nchips = ARRAY_SIZE(gigadevice_spinand_table),
.ops = &gigadevice_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 21def3f8fb36..0f900f3aa21a 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -99,7 +99,8 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info macronix_spinand_table[] = {
- SPINAND_INFO("MX35LF1GE4AB", 0x12,
+ SPINAND_INFO("MX35LF1GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -108,7 +109,8 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
- SPINAND_INFO("MX35LF2GE4AB", 0x22,
+ SPINAND_INFO("MX35LF2GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -118,33 +120,13 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
};
-static int macronix_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /*
- * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
- * raw_id is garbage.
- */
- if (id[1] != SPINAND_MFR_MACRONIX)
- return 0;
-
- ret = spinand_match_and_init(spinand, macronix_spinand_table,
- ARRAY_SIZE(macronix_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
- .detect = macronix_spinand_detect,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
.id = SPINAND_MFR_MACRONIX,
.name = "Macronix",
+ .chips = macronix_spinand_table,
+ .nchips = ARRAY_SIZE(macronix_spinand_table),
.ops = &macronix_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 7d7b1f7fcf71..5d370cfcdaaa 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -18,6 +18,16 @@
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+#define MICRON_CFG_CR BIT(0)
+
+/*
+ * As per datasheet, die selection is done by the 6th bit of Die
+ * Select Register (Address 0xD0).
+ */
+#define MICRON_DIE_SELECT_REG 0xD0
+
+#define MICRON_SELECT_DIE(x) ((x) << 6)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -34,38 +44,52 @@ static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
- region->offset = 64;
- region->length = 64;
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
return 0;
}
-static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 2 bytes for the BBM. */
region->offset = 2;
- region->length = 62;
+ region->length = (mtd->oobsize / 2) - 2;
return 0;
}
-static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
- .ecc = mt29f2g01abagd_ooblayout_ecc,
- .free = mt29f2g01abagd_ooblayout_free,
+static const struct mtd_ooblayout_ops micron_8_ooblayout = {
+ .ecc = micron_8_ooblayout_ecc,
+ .free = micron_8_ooblayout_free,
};
-static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int micron_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ spinand->scratchbuf);
+
+ if (target > 1)
+ return -EINVAL;
+
+ *spinand->scratchbuf = MICRON_SELECT_DIE(target);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int micron_8_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
switch (status & MICRON_STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -91,43 +115,131 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info micron_spinand_table[] = {
- SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+ /* M79A 2Gb 3.3V */
+ SPINAND_INFO("MT29F2G01ABAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 2Gb 1.8V */
+ SPINAND_INFO("MT29F2G01ABBGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
- mt29f2g01abagd_ecc_get_status)),
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 3.3V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 1.8V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ADAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 4Gb 1.8V */
+ SPINAND_INFO("MT29F4G01ABBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 8Gb 3.3V */
+ SPINAND_INFO("MT29F8G01ADAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 8Gb 1.8V */
+ SPINAND_INFO("MT29F8G01ADBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
};
-static int micron_spinand_detect(struct spinand_device *spinand)
+static int micron_spinand_init(struct spinand_device *spinand)
{
- u8 *id = spinand->id.data;
- int ret;
-
/*
- * Micron SPI NAND read ID need a dummy byte,
- * so the first byte in raw_id is dummy.
+ * M70A device series enable Continuous Read feature at Power-up,
+ * which is not supported. Disable this bit to avoid any possible
+ * failure.
*/
- if (id[1] != SPINAND_MFR_MICRON)
- return 0;
-
- ret = spinand_match_and_init(spinand, micron_spinand_table,
- ARRAY_SIZE(micron_spinand_table), id[2]);
- if (ret)
- return ret;
+ if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
+ return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
- return 1;
+ return 0;
}
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
- .detect = micron_spinand_detect,
+ .init = micron_spinand_init,
};
const struct spinand_manufacturer micron_spinand_manufacturer = {
.id = SPINAND_MFR_MICRON,
.name = "Micron",
+ .chips = micron_spinand_table,
+ .nchips = ARRAY_SIZE(micron_spinand_table),
.ops = &micron_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 52307681cbd0..519ade513c1f 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -97,7 +97,8 @@ static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
static const struct spinand_info paragon_spinand_table[] = {
- SPINAND_INFO("PN26G01A", 0xe1,
+ SPINAND_INFO("PN26G01A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -106,7 +107,8 @@ static const struct spinand_info paragon_spinand_table[] = {
0,
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
pn26g0xa_ecc_get_status)),
- SPINAND_INFO("PN26G02A", 0xe2,
+ SPINAND_INFO("PN26G02A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2),
NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -117,31 +119,13 @@ static const struct spinand_info paragon_spinand_table[] = {
pn26g0xa_ecc_get_status)),
};
-static int paragon_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /* Read ID returns [0][MID][DID] */
-
- if (id[1] != SPINAND_MFR_PARAGON)
- return 0;
-
- ret = spinand_match_and_init(spinand, paragon_spinand_table,
- ARRAY_SIZE(paragon_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
- .detect = paragon_spinand_detect,
};
const struct spinand_manufacturer paragon_spinand_manufacturer = {
.id = SPINAND_MFR_PARAGON,
.name = "Paragon",
+ .chips = paragon_spinand_table,
+ .nchips = ARRAY_SIZE(paragon_spinand_table),
.ops = &paragon_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 0db5ee4e82af..bc801d83343e 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+/* Kioxia is new name of Toshiba memory. */
#define SPINAND_MFR_TOSHIBA 0x98
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
@@ -19,14 +20,26 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+static SPINAND_OP_VARIANTS(write_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/**
+ * Backward compatibility for 1st generation Serial NAND devices
+ * which don't support Quad Program Load operation.
+ */
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
@@ -37,8 +50,8 @@ static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
return 0;
}
-static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
@@ -50,13 +63,13 @@ static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
-static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = {
- .ecc = tc58cxgxsx_ooblayout_ecc,
- .free = tc58cxgxsx_ooblayout_free,
+static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
+ .ecc = tx58cxgxsxraix_ooblayout_ecc,
+ .free = tx58cxgxsxraix_ooblayout_free,
};
-static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
@@ -94,105 +107,174 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info toshiba_spinand_table[] = {
- /* 3.3V 1Gb */
- SPINAND_INFO("TC58CVG0S3", 0xC2,
+ /* 3.3V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 2Gb */
- SPINAND_INFO("TC58CVG1S3", 0xCB,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 4Gb */
- SPINAND_INFO("TC58CVG2S0", 0xCD,
- NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
- NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
- 0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 4Gb */
- SPINAND_INFO("TC58CVG2S0", 0xED,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 1Gb */
- SPINAND_INFO("TC58CYG0S3", 0xB2,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 2Gb */
- SPINAND_INFO("TC58CYG1S3", 0xBB,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 4Gb */
- SPINAND_INFO("TC58CYG2S0", 0xBD,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
-};
-
-static int toshiba_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
/*
- * Toshiba SPI NAND read ID needs a dummy byte,
- * so the first byte in id is garbage.
+ * 2nd generation serial nand has HOLD_D which is equivalent to
+ * QE_BIT.
*/
- if (id[1] != SPINAND_MFR_TOSHIBA)
- return 0;
-
- ret = spinand_match_and_init(spinand, toshiba_spinand_table,
- ARRAY_SIZE(toshiba_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
+ /* 3.3V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CVG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CYG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
- .detect = toshiba_spinand_detect,
};
const struct spinand_manufacturer toshiba_spinand_manufacturer = {
.id = SPINAND_MFR_TOSHIBA,
.name = "Toshiba",
+ .chips = toshiba_spinand_table,
+ .nchips = ARRAY_SIZE(toshiba_spinand_table),
.ops = &toshiba_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index a6c17e0cace8..76684428354e 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -75,7 +75,8 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
}
static const struct spinand_info winbond_spinand_table[] = {
- SPINAND_INFO("W25M02GV", 0xAB,
+ SPINAND_INFO("W25M02GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -84,7 +85,8 @@ static const struct spinand_info winbond_spinand_table[] = {
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
- SPINAND_INFO("W25N01GV", 0xAA,
+ SPINAND_INFO("W25N01GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -94,31 +96,6 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
};
-/**
- * winbond_spinand_detect - initialize device related part in spinand_device
- * struct if it is a Winbond device.
- * @spinand: SPI NAND device structure
- */
-static int winbond_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /*
- * Winbond SPI NAND read ID need a dummy byte,
- * so the first byte in raw_id is dummy.
- */
- if (id[1] != SPINAND_MFR_WINBOND)
- return 0;
-
- ret = spinand_match_and_init(spinand, winbond_spinand_table,
- ARRAY_SIZE(winbond_spinand_table), id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static int winbond_spinand_init(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -138,12 +115,13 @@ static int winbond_spinand_init(struct spinand_device *spinand)
}
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
- .detect = winbond_spinand_detect,
.init = winbond_spinand_init,
};
const struct spinand_manufacturer winbond_spinand_manufacturer = {
.id = SPINAND_MFR_WINBOND,
.name = "Winbond",
+ .chips = winbond_spinand_table,
+ .nchips = ARRAY_SIZE(winbond_spinand_table),
.ops = &winbond_spinand_manuf_ops,
};
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index c1eda67d1ad2..6e816eafb312 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -24,87 +24,6 @@ config MTD_SPI_NOR_USE_4K_SECTORS
Please note that some tools/drivers/filesystems may not work with
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
-config SPI_ASPEED_SMC
- tristate "Aspeed flash controllers in SPI mode"
- depends on ARCH_ASPEED || COMPILE_TEST
- depends on HAS_IOMEM && OF
- help
- This enables support for the Firmware Memory controller (FMC)
- in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
- and support for the SPI flash memory controller (SPI) for
- the host firmware. The implementation only supports SPI NOR.
-
-config SPI_CADENCE_QUADSPI
- tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || ARM64 || COMPILE_TEST)
- help
- Enable support for the Cadence Quad SPI Flash controller.
-
- Cadence QSPI is a specialized controller for connecting an SPI
- Flash over 1/2/4-bit wide bus. Enable this option if you have a
- device with a Cadence QSPI controller and want to access the
- Flash as an MTD device.
-
-config SPI_HISI_SFC
- tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
- depends on ARCH_HISI || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables support for HiSilicon FMC SPI-NOR flash controller.
-
-config SPI_MTK_QUADSPI
- tristate "MediaTek Quad SPI controller"
- depends on HAS_IOMEM
- help
- This enables support for the Quad SPI controller in master mode.
- This controller does not support generic SPI. It only supports
- SPI NOR.
-
-config SPI_NXP_SPIFI
- tristate "NXP SPI Flash Interface (SPIFI)"
- depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
- depends on HAS_IOMEM
- help
- Enable support for the NXP LPC SPI Flash Interface controller.
-
- SPIFI is a specialized controller for connecting serial SPI
- Flash. Enable this option if you have a device with a SPIFI
- controller and want to access the Flash as a mtd device.
-
-config SPI_INTEL_SPI
- tristate
-
-config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
- depends on X86 && PCI
- select SPI_INTEL_SPI
- help
- This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-pci.
-
-config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
- depends on X86
- select SPI_INTEL_SPI
- help
- This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-platform.
+source "drivers/mtd/spi-nor/controllers/Kconfig"
endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 9c5ed03cdc19..653923896205 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,10 +1,22 @@
# SPDX-License-Identifier: GPL-2.0
+
+spi-nor-objs := core.o sfdp.o
+spi-nor-objs += atmel.o
+spi-nor-objs += catalyst.o
+spi-nor-objs += eon.o
+spi-nor-objs += esmt.o
+spi-nor-objs += everspin.o
+spi-nor-objs += fujitsu.o
+spi-nor-objs += gigadevice.o
+spi-nor-objs += intel.o
+spi-nor-objs += issi.o
+spi-nor-objs += macronix.o
+spi-nor-objs += micron-st.o
+spi-nor-objs += spansion.o
+spi-nor-objs += sst.o
+spi-nor-objs += winbond.o
+spi-nor-objs += xilinx.o
+spi-nor-objs += xmc.o
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
-obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
-obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
-obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
-obj-$(CONFIG_SPI_MTK_QUADSPI) += mtk-quadspi.o
-obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
-obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
-obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
-obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
+
+obj-$(CONFIG_MTD_SPI_NOR) += controllers/
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
new file mode 100644
index 000000000000..3f5f21a473a6
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info atmel_parts[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+};
+
+static void atmel_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static const struct spi_nor_fixups atmel_fixups = {
+ .default_init = atmel_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_atmel = {
+ .name = "atmel",
+ .parts = atmel_parts,
+ .nparts = ARRAY_SIZE(atmel_parts),
+ .fixups = &atmel_fixups,
+};
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
new file mode 100644
index 000000000000..011b83e99e95
--- /dev/null
+++ b/drivers/mtd/spi-nor/catalyst.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info catalyst_parts[] = {
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO(16, 8, 16, 1,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c03", CAT25_INFO(32, 8, 16, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c09", CAT25_INFO(128, 8, 32, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c17", CAT25_INFO(256, 8, 32, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_catalyst = {
+ .name = "catalyst",
+ .parts = catalyst_parts,
+ .nparts = ARRAY_SIZE(catalyst_parts),
+};
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
new file mode 100644
index 000000000000..10b86660b821
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SPI_ASPEED_SMC
+ tristate "Aspeed flash controllers in SPI mode"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ This enables support for the Firmware Memory controller (FMC)
+ in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
+ and support for the SPI flash memory controller (SPI) for
+ the host firmware. The implementation only supports SPI NOR.
+
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
+config SPI_HISI_SFC
+ tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for HiSilicon FMC SPI-NOR flash controller.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
+
+config SPI_INTEL_SPI
+ tristate
+
+config SPI_INTEL_SPI_PCI
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ depends on X86 && PCI
+ select SPI_INTEL_SPI
+ help
+ This enables PCI support for the Intel PCH/PCU SPI controller in
+ master mode. This controller is present in modern Intel hardware
+ and is used to hold BIOS and other persistent settings. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-pci.
+
+config SPI_INTEL_SPI_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+ depends on X86
+ select SPI_INTEL_SPI
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-platform.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
new file mode 100644
index 000000000000..46e6fbe586e3
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
+obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
+obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
+obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
+obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
index 395127349aa8..ae85e4c0e114 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -109,7 +109,7 @@ struct aspeed_smc_controller {
void __iomem *ahb_base; /* per-chip windows resource */
u32 ahb_window_size; /* full mapping window size */
- struct aspeed_smc_chip *chips[0]; /* pointers to attached chips */
+ struct aspeed_smc_chip *chips[]; /* pointers to attached chips */
};
/*
@@ -354,7 +354,7 @@ static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr)
default:
WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n",
nor->addr_width);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
cmdaddr = addr & 0xFFFFFF;
cmdaddr |= cmd << 24;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
index 494dcab4aaaa..494dcab4aaaa 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 6c7a4118752e..6c7a4118752e 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index 81329f680bec..81329f680bec 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
index f80f1086f928..f80f1086f928 100644
--- a/drivers/mtd/spi-nor/intel-spi-platform.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c
index 61d2a0ad2131..61d2a0ad2131 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.c
diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h
index e2f41b8827bf..e2f41b8827bf 100644
--- a/drivers/mtd/spi-nor/intel-spi.h
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.h
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9a5b1a7c636a..9a5b1a7c636a 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
new file mode 100644
index 000000000000..cc68ea84318e
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.c
@@ -0,0 +1,3466 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task_stack.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
+
+#define SPI_NOR_MAX_ADDR_WIDTH 4
+
+/**
+ * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
+ * transfer
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * If we have to use the bounce buffer, the data field in @op will be updated.
+ *
+ * Return: true if the bounce buffer is needed, false if not
+ */
+static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ /* op->data.buf.in occupies the same memory as op->data.buf.out */
+ if (object_is_on_stack(op->data.buf.in) ||
+ !virt_addr_valid(op->data.buf.in)) {
+ if (op->data.nbytes > nor->bouncebuf_size)
+ op->data.nbytes = nor->bouncebuf_size;
+ op->data.buf.in = nor->bouncebuf;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * spi_nor_spimem_exec_op() - execute a memory operation
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * Return: 0 on success, -error otherwise.
+ */
+static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ int error;
+
+ error = spi_mem_adjust_op_size(nor->spimem, op);
+ if (error)
+ return error;
+
+ return spi_mem_exec_op(nor->spimem, op);
+}
+
+/**
+ * spi_nor_spimem_read_data() - read data from flash's memory region via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
+ size_t len, u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(len, buf, 1));
+ bool usebouncebuf;
+ ssize_t nbytes;
+ int error;
+
+ /* get transfer protocols. */
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+
+ usebouncebuf = spi_nor_spimem_bounce(nor, &op);
+
+ if (nor->dirmap.rdesc) {
+ nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.in);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ if (usebouncebuf && nbytes > 0)
+ memcpy(buf, op.data.buf.in, nbytes);
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_read_data() - read data from flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_read_data(nor, from, len, buf);
+
+ return nor->controller_ops->read(nor, from, len, buf);
+}
+
+/**
+ * spi_nor_spimem_write_data() - write data to flash memory via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
+ size_t len, const u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ ssize_t nbytes;
+ int error;
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op.addr.nbytes = 0;
+
+ if (spi_nor_spimem_bounce(nor, &op))
+ memcpy(nor->bouncebuf, buf, op.data.nbytes);
+
+ if (nor->dirmap.wdesc) {
+ nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.out);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_write_data() - write data to flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_write_data(nor, to, len, buf);
+
+ return nor->controller_ops->write(nor, to, len, buf);
+}
+
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_disable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
+ sr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, fsr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
+ fsr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor: pointer to 'struct spi_nor'
+ * @cr: pointer to a DMA-able buffer where the value of the
+ * Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, cr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
+ SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
+ * flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ nor->bouncebuf[0] = enable << 7;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_write_ear() - Write Extended Address Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @ear: value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
+{
+ int ret;
+
+ nor->bouncebuf[0] = ear;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
+ sr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
+ * the flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_xsr_ready(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return !!(nor->bouncebuf[0] & XSR_RDY);
+}
+
+/**
+ * spi_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_sr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
+}
+
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int ret = spi_nor_read_sr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_USE_CLSR &&
+ nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
+ dev_err(nor->dev, "Erase Error occurred\n");
+ else
+ dev_err(nor->dev, "Programming Error occurred\n");
+
+ spi_nor_clear_sr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return !(nor->bouncebuf[0] & SR_WIP);
+}
+
+/**
+ * spi_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_fsr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
+}
+
+/**
+ * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
+ * ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ spi_nor_clear_fsr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return nor->bouncebuf[0] & FSR_READY;
+}
+
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+
+ if (nor->flags & SNOR_F_READY_XSR_RDY)
+ sr = spi_nor_xsr_ready(nor);
+ else
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
+}
+
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ * @timeout_jiffies: jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout_jiffies)
+{
+ unsigned long deadline;
+ int timeout = 0, ret;
+
+ deadline = jiffies + timeout_jiffies;
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ cond_resched();
+ }
+
+ dev_dbg(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to DMA-able buffer to write to the Status Register.
+ * @len: number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
+ sr, len);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+
+ nor->bouncebuf[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != sr1) {
+ dev_dbg(nor->dev, "SR1: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 cr_written;
+
+ /* Make sure we don't overwrite the contents of Status Register 2. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+ } else if (nor->params->quad_enable) {
+ /*
+ * If the Status Register 2 Read command (35h) is not
+ * supported, we should at least be sure we don't
+ * change the value of the SR2 Quad Enable bit.
+ *
+ * We can safely assume that when the Quad Enable method is
+ * set, the value of the QE bit is one, as a consequence of the
+ * nor->params->quad_enable() call.
+ *
+ * We can safely assume that the Quad Enable bit is present in
+ * the Status Register 2 at BIT(1). According to the JESD216
+ * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+ * Write Status (01h) command is available just for the cases
+ * in which the QE bit is described in SR2 at BIT(1).
+ */
+ sr_cr[1] = SR2_QUAD_EN_BIT1;
+ } else {
+ sr_cr[1] = 0;
+ }
+
+ sr_cr[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ cr_written = sr_cr[1];
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr_written != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @cr: byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 sr_written;
+
+ /* Keep the current value of the Status Register 1. */
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ sr_cr[1] = cr;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ sr_written = sr_cr[0];
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr_written != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ if (nor->flags & SNOR_F_HAS_16BIT_SR)
+ return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+ return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer where the value of the
+ * Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
+ sr2, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_chip(struct spi_nor *nor)
+{
+ int ret;
+
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+ return ret;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
+{
+ return !!nor->params->erase_map.uniform_erase_type;
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
+}
+
+int spi_nor_lock_and_prep(struct spi_nor *nor)
+{
+ int ret = 0;
+
+ mutex_lock(&nor->lock);
+
+ if (nor->controller_ops && nor->controller_ops->prepare) {
+ ret = nor->controller_ops->prepare(nor);
+ if (ret) {
+ mutex_unlock(&nor->lock);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+void spi_nor_unlock_and_unprep(struct spi_nor *nor)
+{
+ if (nor->controller_ops && nor->controller_ops->unprepare)
+ nor->controller_ops->unprepare(nor);
+ mutex_unlock(&nor->lock);
+}
+
+static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
+{
+ if (!nor->params->convert_addr)
+ return addr;
+
+ return nor->params->convert_addr(nor, addr);
+}
+
+/*
+ * Initiate the erasure of a single sector
+ */
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+ int i;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ return spi_mem_exec_op(nor->spimem, &op);
+ } else if (nor->controller_ops->erase) {
+ return nor->controller_ops->erase(nor, addr);
+ }
+
+ /*
+ * Default implementation, if driver doesn't have a specialized HW
+ * control
+ */
+ for (i = nor->addr_width - 1; i >= 0; i--) {
+ nor->bouncebuf[i] = addr & 0xff;
+ addr >>= 8;
+ }
+
+ return nor->controller_ops->write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
+}
+
+/**
+ * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @dividend: dividend value
+ * @remainder: pointer to u32 remainder (will be updated)
+ *
+ * Return: the result of the division
+ */
+static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
+ u64 dividend, u32 *remainder)
+{
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ *remainder = (u32)dividend & erase->size_mask;
+ return dividend >> erase->size_shift;
+}
+
+/**
+ * spi_nor_find_best_erase_type() - find the best erase type for the given
+ * offset in the serial flash memory and the
+ * number of bytes to erase. The region in
+ * which the address fits is expected to be
+ * provided.
+ * @map: the erase map of the SPI NOR
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Return: a pointer to the best fitted erase type, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ const struct spi_nor_erase_region *region,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_type *erase;
+ u32 rem;
+ int i;
+ u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ /*
+ * Erase types are ordered by size, with the smallest erase type at
+ * index 0.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ /* Does the erase region support the tested erase type? */
+ if (!(erase_mask & BIT(i)))
+ continue;
+
+ erase = &map->erase_type[i];
+
+ /* Don't erase more than what the user has asked for. */
+ if (erase->size > len)
+ continue;
+
+ /* Alignment is not mandatory for overlaid regions */
+ if (region->offset & SNOR_OVERLAID_REGION)
+ return erase;
+
+ spi_nor_div_by_erase_size(erase, addr, &rem);
+ if (rem)
+ continue;
+ else
+ return erase;
+ }
+
+ return NULL;
+}
+
+static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
+{
+ return region->offset & SNOR_LAST_REGION;
+}
+
+static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
+{
+ return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
+}
+
+/**
+ * spi_nor_region_next() - get the next spi nor region
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ *
+ * Return: the next spi nor region or NULL if last region.
+ */
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region)
+{
+ if (spi_nor_region_is_last(region))
+ return NULL;
+ region++;
+ return region;
+}
+
+/**
+ * spi_nor_find_erase_region() - find the region of the serial flash memory in
+ * which the offset fits
+ * @map: the erase map of the SPI NOR
+ * @addr: offset in the serial flash memory
+ *
+ * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_region *
+spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ u64 region_end = region_start + region->size;
+
+ while (addr < region_start || addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ return ERR_PTR(-EINVAL);
+
+ region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ region_end = region_start + region->size;
+ }
+
+ return region;
+}
+
+/**
+ * spi_nor_init_erase_cmd() - initialize an erase command
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ *
+ * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_command *
+spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase)
+{
+ struct spi_nor_erase_command *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&cmd->list);
+ cmd->opcode = erase->opcode;
+ cmd->count = 1;
+
+ if (region->offset & SNOR_OVERLAID_REGION)
+ cmd->size = region->size;
+ else
+ cmd->size = erase->size;
+
+ return cmd;
+}
+
+/**
+ * spi_nor_destroy_erase_cmd_list() - destroy erase command list
+ * @erase_list: list of erase commands
+ */
+static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
+{
+ struct spi_nor_erase_command *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, erase_list, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+}
+
+/**
+ * spi_nor_init_erase_cmd_list() - initialize erase command list
+ * @nor: pointer to a 'struct spi_nor'
+ * @erase_list: list of erase commands to be executed once we validate that the
+ * erase can be performed
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Builds the list of best fitted erase commands and verifies if the erase can
+ * be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ struct list_head *erase_list,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase, *prev_erase = NULL;
+ struct spi_nor_erase_region *region;
+ struct spi_nor_erase_command *cmd = NULL;
+ u64 region_end;
+ int ret = -EINVAL;
+
+ region = spi_nor_find_erase_region(map, addr);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ region_end = spi_nor_region_end(region);
+
+ while (len) {
+ erase = spi_nor_find_best_erase_type(map, region, addr, len);
+ if (!erase)
+ goto destroy_erase_cmd_list;
+
+ if (prev_erase != erase ||
+ region->offset & SNOR_OVERLAID_REGION) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
+
+ addr += cmd->size;
+ len -= cmd->size;
+
+ if (len && addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ goto destroy_erase_cmd_list;
+ region_end = spi_nor_region_end(region);
+ }
+
+ prev_erase = erase;
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(erase_list);
+ return ret;
+}
+
+/**
+ * spi_nor_erase_multi_sectors() - perform a non-uniform erase
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Build a list of best fitted erase commands and execute it once we validate
+ * that the erase can be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
+{
+ LIST_HEAD(erase_list);
+ struct spi_nor_erase_command *cmd, *next;
+ int ret;
+
+ ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(cmd, next, &erase_list, list) {
+ nor->erase_opcode = cmd->opcode;
+ while (cmd->count) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ addr += cmd->size;
+ cmd->count--;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+ }
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(&erase_list);
+ return ret;
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+ }
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ unsigned long timeout;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_erase_chip(nor);
+ if (ret)
+ goto erase_err;
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(mtd->size / SZ_2M));
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ goto erase_err;
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else if (spi_nor_has_uniform_erase(nor)) {
+ while (len) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto erase_err;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+ }
+
+ /* erase multiple sectors */
+ } else {
+ ret = spi_nor_erase_multi_sectors(nor, addr, len);
+ if (ret)
+ goto erase_err;
+ }
+
+ ret = spi_nor_write_disable(nor);
+
+erase_err:
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+}
+
+static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
+{
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
+ return mask | SR_BP3_BIT6;
+
+ if (nor->flags & SNOR_F_HAS_4BIT_BP)
+ return mask | SR_BP3;
+
+ return mask;
+}
+
+static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ return SR_TB_BIT6;
+ else
+ return SR_TB_BIT5;
+}
+
+static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
+{
+ unsigned int bp_slots, bp_slots_needed;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+
+ /* Reserved one for "protect none" and one for "protect all". */
+ bp_slots = (1 << hweight8(mask)) - 2;
+ bp_slots_needed = ilog2(nor->info->n_sectors);
+
+ if (bp_slots_needed > bp_slots)
+ return nor->info->sector_size <<
+ (bp_slots_needed - bp_slots);
+ else
+ return nor->info->sector_size;
+}
+
+static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 bp, val = sr & mask;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
+ val = (val & ~SR_BP3_BIT6) | SR_BP3;
+
+ bp = val >> SR_BP_SHIFT;
+
+ if (!bp) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ return;
+ }
+
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ *len = min_prot_len << (bp - 1);
+
+ if (*len > mtd->size)
+ *len = mtd->size;
+
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
+ *ofs = 0;
+ else
+ *ofs = mtd->size - *len;
+}
+
+/*
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
+ */
+static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, u8 sr, bool locked)
+{
+ loff_t lock_offs;
+ uint64_t lock_len;
+
+ if (!len)
+ return 1;
+
+ spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
+
+ if (locked)
+ /* Requested range is a sub-range of locked range */
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+ else
+ /* Requested range does not overlap with locked range */
+ return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
+ * register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ * - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ * ------|-------|-------|-------|-------|---------------|-------------------
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is unlocked, we don't need to do anything */
+ if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
+ if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
+ can_be_bottom = false;
+
+ /* If anything above us is unlocked, we can't use 'top' protection */
+ if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_top = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should end up locked */
+ if (use_top)
+ lock_len = mtd->size - ofs;
+ else
+ lock_len = ofs + len;
+
+ if (lock_len == mtd->size) {
+ val = mask;
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ if (val & ~mask)
+ return -EINVAL;
+
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /* Disallow further writes if WP pin is asserted */
+ status_new |= SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & mask) < (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Unlock a region of the flash. See spi_nor_sr_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is locked, we don't need to do anything */
+ if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is locked, we can't use 'top' protection */
+ if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
+ can_be_top = false;
+
+ /* If anything above us is locked, we can't use 'bottom' protection */
+ if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_bottom = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should remain locked */
+ if (use_top)
+ lock_len = mtd->size - (ofs + len);
+ else
+ lock_len = ofs;
+
+ if (lock_len == 0) {
+ val = 0; /* fully unlocked */
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /* Don't protect status register if we're fully unlocked */
+ if (lock_len == 0)
+ status_new &= ~SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) > (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
+ * for more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
+}
+
+static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
+ .lock = spi_nor_sr_lock,
+ .unlock = spi_nor_sr_unlock,
+ .is_locked = spi_nor_sr_is_locked,
+};
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->lock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->is_locked(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+/**
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+ return 0;
+
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
+
+ return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
+
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+ return 0;
+
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
+ return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register 2.
+ *
+ * This is one of the procedures to set the QE bit described in the SFDP
+ * (JESD216 rev B) specification but no manufacturer using this procedure has
+ * been identified yet, hence the name of the function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
+{
+ u8 *sr2 = nor->bouncebuf;
+ int ret;
+ u8 sr2_written;
+
+ /* Check current Quad Enable bit value. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+ if (*sr2 & SR2_QUAD_EN_BIT7)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ *sr2 |= SR2_QUAD_EN_BIT7;
+
+ ret = spi_nor_write_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ sr2_written = *sr2;
+
+ /* Read back and check it. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ if (*sr2 != sr2_written) {
+ dev_dbg(nor->dev, "SR2: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct spi_nor_manufacturer *manufacturers[] = {
+ &spi_nor_atmel,
+ &spi_nor_catalyst,
+ &spi_nor_eon,
+ &spi_nor_esmt,
+ &spi_nor_everspin,
+ &spi_nor_fujitsu,
+ &spi_nor_gigadevice,
+ &spi_nor_intel,
+ &spi_nor_issi,
+ &spi_nor_macronix,
+ &spi_nor_micron,
+ &spi_nor_st,
+ &spi_nor_spansion,
+ &spi_nor_sst,
+ &spi_nor_winbond,
+ &spi_nor_xilinx,
+ &spi_nor_xmc,
+};
+
+static const struct flash_info *
+spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
+ const u8 *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < nparts; i++) {
+ if (parts[i].id_len &&
+ !memcmp(parts[i].id, id, parts[i].id_len))
+ return &parts[i];
+ }
+
+ return NULL;
+}
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+ const struct flash_info *info;
+ u8 *id = nor->bouncebuf;
+ unsigned int i;
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
+ }
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ info = spi_nor_search_part_by_id(manufacturers[i]->parts,
+ manufacturers[i]->nparts,
+ id);
+ if (info) {
+ nor->manufacturer = manufacturers[i];
+ return info;
+ }
+ }
+
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
+ SPI_NOR_MAX_ID_LEN, id);
+ return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ while (len) {
+ loff_t addr = from;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ WARN_ON(ret > len);
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+
+ /*
+ * If page_size is a power of two, the offset can be quickly
+ * calculated with an AND operation. On the other cases we
+ * need to do a modulus operation (more expensive).
+ * Power of two numbers have only one bit set and we can use
+ * the instruction hweight32 to detect if we need to do a
+ * modulus (do_div()) or not.
+ */
+ if (hweight32(nor->page_size) == 1) {
+ page_offset = addr & (nor->page_size - 1);
+ } else {
+ uint64_t aux = addr;
+
+ page_offset = do_div(aux, nor->page_size);
+ }
+ /* the size of data remaining on the first page */
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto write_err;
+
+ ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ }
+
+write_err:
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev ||
+ (!nor->spimem && !nor->controller_ops) ||
+ (!nor->spimem && nor->controller_ops &&
+ (!nor->controller_ops->read ||
+ !nor->controller_ops->write ||
+ !nor->controller_ops->read_reg ||
+ !nor->controller_ops->write_reg))) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ if (nor->spimem && nor->controller_ops) {
+ dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ pp->opcode = opcode;
+ pp->proto = proto;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+/**
+ * spi_nor_spimem_check_op - check if the operation is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@op: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_op(struct spi_nor *nor,
+ struct spi_mem_op *op)
+{
+ /*
+ * First test with 4 address bytes. The opcode itself might
+ * be a 3B addressing opcode but we don't care, because
+ * SPI controller implementation should not check the opcode,
+ * but just the sequence.
+ */
+ op->addr.nbytes = 4;
+ if (!spi_mem_supports_op(nor->spimem, op)) {
+ if (nor->mtd.size > SZ_16M)
+ return -ENOTSUPP;
+
+ /* If flash size <= 16MB, 3 address bytes are sufficient */
+ op->addr.nbytes = 3;
+ if (!spi_mem_supports_op(nor->spimem, op))
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_spimem_check_readop - check if the read op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@read: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_readop(struct spi_nor *nor,
+ const struct spi_nor_read_command *read)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
+ SPI_MEM_OP_ADDR(3, 0, 1),
+ SPI_MEM_OP_DUMMY(0, 1),
+ SPI_MEM_OP_DATA_IN(0, NULL, 1));
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
+ op.dummy.buswidth / 8;
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_check_pp - check if the page program op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@pp: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_pp(struct spi_nor *nor,
+ const struct spi_nor_pp_command *pp)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
+ SPI_MEM_OP_ADDR(3, 0, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(0, NULL, 1));
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
+ * based on SPI controller capabilities
+ * @nor: pointer to a 'struct spi_nor'
+ * @hwcaps: pointer to resulting capabilities after adjusting
+ * according to controller and flash's capability
+ */
+static void
+spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ unsigned int cap;
+
+ /* DTR modes are not supported yet, mask them all. */
+ *hwcaps &= ~SNOR_HWCAPS_DTR;
+
+ /* X-X-X modes are not supported yet, mask them all. */
+ *hwcaps &= ~SNOR_HWCAPS_X_X_X;
+
+ for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
+ int rdidx, ppidx;
+
+ if (!(*hwcaps & BIT(cap)))
+ continue;
+
+ rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
+ if (rdidx >= 0 &&
+ spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
+ *hwcaps &= ~BIT(cap);
+
+ ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
+ if (ppidx < 0)
+ continue;
+
+ if (spi_nor_spimem_check_pp(nor,
+ &params->page_programs[ppidx]))
+ *hwcaps &= ~BIT(cap);
+ }
+}
+
+/**
+ * spi_nor_set_erase_type() - set a SPI NOR erase type
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ */
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode)
+{
+ erase->size = size;
+ erase->opcode = opcode;
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ erase->size_shift = ffs(erase->size) - 1;
+ erase->size_mask = (1 << erase->size_shift) - 1;
+}
+
+/**
+ * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: bitmask encoding erase types that can erase the entire
+ * flash memory
+ * @flash_size: the spi nor flash memory size
+ */
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size)
+{
+ /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
+ map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
+ SNOR_LAST_REGION;
+ map->uniform_region.size = flash_size;
+ map->regions = &map->uniform_region;
+ map->uniform_erase_type = erase_mask;
+}
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ int ret;
+
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_bfpt) {
+ ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
+ bfpt, params);
+ if (ret)
+ return ret;
+ }
+
+ if (nor->info->fixups && nor->info->fixups->post_bfpt)
+ return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
+ params);
+
+ return 0;
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ read = &nor->params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the spi-nor framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+ const struct spi_nor_pp_command *pp;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ pp = &nor->params->page_programs[cmd];
+ nor->program_opcode = pp->opcode;
+ nor->write_proto = pp->proto;
+ return 0;
+}
+
+/**
+ * spi_nor_select_uniform_erase() - select optimum uniform erase type
+ * @map: the erase map of the SPI NOR
+ * @wanted_size: the erase type size to search for. Contains the value of
+ * info->sector_size or of the "small sector" size in case
+ * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
+ *
+ * Once the optimum uniform sector erase command is found, disable all the
+ * other.
+ *
+ * Return: pointer to erase type on success, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
+ const u32 wanted_size)
+{
+ const struct spi_nor_erase_type *tested_erase, *erase = NULL;
+ int i;
+ u8 uniform_erase_type = map->uniform_erase_type;
+
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (!(uniform_erase_type & BIT(i)))
+ continue;
+
+ tested_erase = &map->erase_type[i];
+
+ /*
+ * If the current erase size is the one, stop here:
+ * we have found the right uniform Sector Erase command.
+ */
+ if (tested_erase->size == wanted_size) {
+ erase = tested_erase;
+ break;
+ }
+
+ /*
+ * Otherwise, the current erase size is still a valid canditate.
+ * Select the biggest valid candidate.
+ */
+ if (!erase && tested_erase->size)
+ erase = tested_erase;
+ /* keep iterating to find the wanted_size */
+ }
+
+ if (!erase)
+ return NULL;
+
+ /* Disable all other Sector Erase commands. */
+ map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
+ map->uniform_erase_type |= BIT(erase - map->erase_type);
+ return erase;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase = NULL;
+ struct mtd_info *mtd = &nor->mtd;
+ u32 wanted_size = nor->info->sector_size;
+ int i;
+
+ /*
+ * The previous implementation handling Sector Erase commands assumed
+ * that the SPI flash memory has an uniform layout then used only one
+ * of the supported erase sizes for all Sector Erase commands.
+ * So to be backward compatible, the new implementation also tries to
+ * manage the SPI flash memory as uniform with a single erase sector
+ * size, when possible.
+ */
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ wanted_size = 4096u;
+#endif
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ erase = spi_nor_select_uniform_erase(map, wanted_size);
+ if (!erase)
+ return -EINVAL;
+ nor->erase_opcode = erase->opcode;
+ mtd->erasesize = erase->size;
+ return 0;
+ }
+
+ /*
+ * For non-uniform SPI flash memory, set mtd->erasesize to the
+ * maximum erase sector size. No need to set nor->erase_opcode.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (map->erase_type[i].size) {
+ erase = &map->erase_type[i];
+ break;
+ }
+ }
+
+ if (!erase)
+ return -EINVAL;
+
+ mtd->erasesize = erase->size;
+ return 0;
+}
+
+static int spi_nor_default_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u32 ignored_mask, shared_mask;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ if (nor->spimem) {
+ /*
+ * When called from spi_nor_probe(), all caps are set and we
+ * need to discard some of them based on what the SPI
+ * controller actually supports (using spi_mem_supports_op()).
+ */
+ spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
+ } else {
+ /*
+ * SPI n-n-n protocols are not supported when the SPI
+ * controller directly implements the spi_nor interface.
+ * Yet another reason to switch to spi-mem.
+ */
+ ignored_mask = SNOR_HWCAPS_X_X_X;
+ if (shared_mask & ignored_mask) {
+ dev_dbg(nor->dev,
+ "SPI n-n-n protocols are not supported.\n");
+ shared_mask &= ~ignored_mask;
+ }
+ }
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Page Program command. */
+ err = spi_nor_select_pp(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select write settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Sector Erase command. */
+ err = spi_nor_select_erase(nor);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select erase settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ if (!nor->params->setup)
+ return 0;
+
+ return nor->params->setup(nor, hwcaps);
+}
+
+/**
+ * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
+ * settings based on MFR register and ->default_init() hook.
+ * @nor: pointer to a 'struct spi-nor'.
+ */
+static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->default_init)
+ nor->manufacturer->fixups->default_init(nor);
+
+ if (nor->info->fixups && nor->info->fixups->default_init)
+ nor->info->fixups->default_init(nor);
+}
+
+/**
+ * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
+ * based on JESD216 SFDP standard.
+ * @nor: pointer to a 'struct spi-nor'.
+ *
+ * The method has a roll-back mechanism: in case the SFDP parsing fails, the
+ * legacy flash parameters and settings will be restored.
+ */
+static void spi_nor_sfdp_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
+ nor->flags &= ~SNOR_F_4B_OPCODES;
+ } else {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ }
+}
+
+/**
+ * spi_nor_info_init_params() - Initialize the flash's parameters and settings
+ * based on nor->info data.
+ * @nor: pointer to a 'struct spi-nor'.
+ */
+static void spi_nor_info_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ const struct flash_info *info = nor->info;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ u8 i, erase_mask;
+
+ /* Initialize legacy flash parameters and settings. */
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
+ params->setup = spi_nor_default_setup;
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ /* Set SPI NOR sizes. */
+ params->size = (u64)info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ /* Default to Fast Read for DT and non-DT platform devices. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+
+ /* Mask out Fast Read if not requested at DT instantiation. */
+ if (np && !of_property_read_bool(np, "m25p,fast-read"))
+ params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+ }
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+
+ if (info->flags & SPI_NOR_DUAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+ 0, 8, SPINOR_OP_READ_1_1_2,
+ SNOR_PROTO_1_1_2);
+ }
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ if (info->flags & SPI_NOR_OCTAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ 0, 8, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_1_1_8);
+ }
+
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+ /*
+ * Sector Erase settings. Sort Erase Types in ascending order, with the
+ * smallest erase size starting at BIT(0).
+ */
+ erase_mask = 0;
+ i = 0;
+ if (info->flags & SECT_4K_PMC) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K_PMC);
+ i++;
+ } else if (info->flags & SECT_4K) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K);
+ i++;
+ }
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+ SPINOR_OP_SE);
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+}
+
+/**
+ * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
+ * after SFDP has been parsed (is also called for SPI NORs that do not
+ * support RDSFDP).
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Typically used to tweak various parameters that could not be extracted by
+ * other means (i.e. when information provided by the SFDP/flash_info tables
+ * are incomplete or wrong).
+ */
+static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_sfdp)
+ nor->manufacturer->fixups->post_sfdp(nor);
+
+ if (nor->info->fixups && nor->info->fixups->post_sfdp)
+ nor->info->fixups->post_sfdp(nor);
+}
+
+/**
+ * spi_nor_late_init_params() - Late initialization of default flash parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to set default flash parameters and settings when the ->default_init()
+ * hook or the SFDP parser let voids.
+ */
+static void spi_nor_late_init_params(struct spi_nor *nor)
+{
+ /*
+ * NOR protection support. When locking_ops are not provided, we pick
+ * the default ones.
+ */
+ if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
+ nor->params->locking_ops = &spi_nor_sr_locking_ops;
+}
+
+/**
+ * spi_nor_init_params() - Initialize the flash's parameters and settings.
+ * @nor: pointer to a 'struct spi-nor'.
+ *
+ * The flash parameters and settings are initialized based on a sequence of
+ * calls that are ordered by priority:
+ *
+ * 1/ Default flash parameters initialization. The initializations are done
+ * based on nor->info data:
+ * spi_nor_info_init_params()
+ *
+ * which can be overwritten by:
+ * 2/ Manufacturer flash parameters initialization. The initializations are
+ * done based on MFR register, or when the decisions can not be done solely
+ * based on MFR, by using specific flash_info tweeks, ->default_init():
+ * spi_nor_manufacturer_init_params()
+ *
+ * which can be overwritten by:
+ * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
+ * should be more accurate that the above.
+ * spi_nor_sfdp_init_params()
+ *
+ * Please note that there is a ->post_bfpt() fixup hook that can overwrite
+ * the flash parameters and settings immediately after parsing the Basic
+ * Flash Parameter Table.
+ *
+ * which can be overwritten by:
+ * 4/ Post SFDP flash parameters initialization. Used to tweak various
+ * parameters that could not be extracted by other means (i.e. when
+ * information provided by the SFDP/flash_info tables are incomplete or
+ * wrong).
+ * spi_nor_post_sfdp_fixups()
+ *
+ * 5/ Late default flash parameters initialization, used when the
+ * ->default_init() hook or the SFDP parser do not set specific params.
+ * spi_nor_late_init_params()
+ */
+static int spi_nor_init_params(struct spi_nor *nor)
+{
+ nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
+ if (!nor->params)
+ return -ENOMEM;
+
+ spi_nor_info_init_params(nor);
+
+ spi_nor_manufacturer_init_params(nor);
+
+ if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+ !(nor->info->flags & SPI_NOR_SKIP_SFDP))
+ spi_nor_sfdp_init_params(nor);
+
+ spi_nor_post_sfdp_fixups(nor);
+
+ spi_nor_late_init_params(nor);
+
+ return 0;
+}
+
+/**
+ * spi_nor_quad_enable() - enable Quad I/O if needed.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_quad_enable(struct spi_nor *nor)
+{
+ if (!nor->params->quad_enable)
+ return 0;
+
+ if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+ spi_nor_get_protocol_width(nor->write_proto) == 4))
+ return 0;
+
+ return nor->params->quad_enable(nor);
+}
+
+/**
+ * spi_nor_unlock_all() - Unlocks the entire flash memory array.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ */
+static int spi_nor_unlock_all(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_LOCK)
+ return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ err = spi_nor_quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+
+ err = spi_nor_unlock_all(nor);
+ if (err) {
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
+ return err;
+ }
+
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ nor->params->set_4byte_addr_mode(nor, true);
+ }
+
+ return 0;
+}
+
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ nor->flags & SNOR_F_BROKEN_RESET)
+ nor->params->set_4byte_addr_mode(nor, false);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
+static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
+ const char *name)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ for (j = 0; j < manufacturers[i]->nparts; j++) {
+ if (!strcmp(name, manufacturers[i]->parts[j].name)) {
+ nor->manufacturer = manufacturers[i];
+ return &manufacturers[i]->parts[j];
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int spi_nor_set_addr_width(struct spi_nor *nor)
+{
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (nor->info->addr_width) {
+ nor->addr_width = nor->info->addr_width;
+ } else if (nor->mtd.size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_dbg(nor->dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
+static void spi_nor_debugfs_init(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+ mtd->dbg.partname = info->name;
+ mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
+ info->id_len, info->id);
+}
+
+static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
+ const char *name)
+{
+ const struct flash_info *info = NULL;
+
+ if (name)
+ info = spi_nor_match_id(nor, name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
+ return ERR_PTR(-ENOENT);
+
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
+ const struct flash_info *jinfo;
+
+ jinfo = spi_nor_read_id(nor);
+ if (IS_ERR(jinfo)) {
+ return jinfo;
+ } else if (jinfo != info) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(nor->dev, "found %s, expected %s\n",
+ jinfo->name, info->name);
+ info = jinfo;
+ }
+ }
+
+ return info;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ const struct flash_info *info;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = &nor->mtd;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
+ /*
+ * We need the bounce buffer early to read/write registers when going
+ * through the spi-mem layer (buffers have to be DMA-able).
+ * For spi-mem drivers, we'll reallocate a new buffer if
+ * nor->page_size turns out to be greater than PAGE_SIZE (which
+ * shouldn't happen before long since NOR pages are usually less
+ * than 1KB) after spi_nor_scan() returns.
+ */
+ nor->bouncebuf_size = PAGE_SIZE;
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+
+ info = spi_nor_get_flash_info(nor, name);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ nor->info = info;
+
+ spi_nor_debugfs_init(nor, info);
+
+ mutex_init(&nor->lock);
+
+ /*
+ * Make sure the XSR_RDY flag is set before calling
+ * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+ * with Atmel spi-nor
+ */
+ if (info->flags & SPI_NOR_XSR_RDY)
+ nor->flags |= SNOR_F_READY_XSR_RDY;
+
+ if (info->flags & SPI_NOR_HAS_LOCK)
+ nor->flags |= SNOR_F_HAS_LOCK;
+
+ mtd->_write = spi_nor_write;
+
+ /* Init flash parameters based on flash_info struct and SFDP */
+ ret = spi_nor_init_params(nor);
+ if (ret)
+ return ret;
+
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->priv = nor;
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = nor->params->size;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
+
+ if (nor->params->locking_ops) {
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ mtd->_is_locked = spi_nor_is_locked;
+ }
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & SPI_NOR_HAS_TB) {
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (info->flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
+ if (info->flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+ if (info->flags & USE_CLSR)
+ nor->flags |= SNOR_F_USE_CLSR;
+
+ if (info->flags & SPI_NOR_4BIT_BP) {
+ nor->flags |= SNOR_F_HAS_4BIT_BP;
+ if (info->flags & SPI_NOR_BP3_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
+ }
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ mtd->dev.parent = dev;
+ nor->page_size = nor->params->page_size;
+ mtd->writebufsize = nor->page_size;
+
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ */
+ ret = spi_nor_setup(nor, hwcaps);
+ if (ret)
+ return ret;
+
+ if (info->flags & SPI_NOR_4B_OPCODES)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ ret = spi_nor_set_addr_width(nor);
+ if (ret)
+ return ret;
+
+ /* Send all the required SPI flash commands to initialize device */
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+static int spi_nor_create_read_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(0, NULL, 1)),
+ .offset = 0,
+ .length = nor->mtd.size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ /* get transfer protocols. */
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op->dummy.buswidth = op->addr.buswidth;
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
+
+ nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
+}
+
+static int spi_nor_create_write_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
+ .offset = 0,
+ .length = nor->mtd.size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ /* get transfer protocols. */
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ op->dummy.buswidth = op->addr.buswidth;
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op->addr.nbytes = 0;
+
+ nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
+}
+
+static int spi_nor_probe(struct spi_mem *spimem)
+{
+ struct spi_device *spi = spimem->spi;
+ struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct spi_nor *nor;
+ /*
+ * Enable all caps by default. The core will mask them after
+ * checking what's really supported using spi_mem_supports_op().
+ */
+ const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
+ char *flash_name;
+ int ret;
+
+ nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->spimem = spimem;
+ nor->dev = &spi->dev;
+ spi_nor_set_flash_node(nor, spi->dev.of_node);
+
+ spi_mem_set_drvdata(spimem, nor);
+
+ if (data && data->name)
+ nor->mtd.name = data->name;
+
+ if (!nor->mtd.name)
+ nor->mtd.name = spi_mem_get_name(spimem);
+
+ /*
+ * For some (historical?) reason many platforms provide two different
+ * names in flash_platform_data: "name" and "type". Quite often name is
+ * set to "m25p80" and then "type" provides a real chip name.
+ * If that's the case, respect "type" and ignore a "name".
+ */
+ if (data && data->type)
+ flash_name = data->type;
+ else if (!strcmp(spi->modalias, "spi-nor"))
+ flash_name = NULL; /* auto-detect */
+ else
+ flash_name = spi->modalias;
+
+ ret = spi_nor_scan(nor, flash_name, &hwcaps);
+ if (ret)
+ return ret;
+
+ /*
+ * None of the existing parts have > 512B pages, but let's play safe
+ * and add this logic so that if anyone ever adds support for such
+ * a NOR we don't end up with buffer overflows.
+ */
+ if (nor->page_size > PAGE_SIZE) {
+ nor->bouncebuf_size = nor->page_size;
+ devm_kfree(nor->dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(nor->dev,
+ nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+ }
+
+ ret = spi_nor_create_read_dirmap(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_create_write_dirmap(nor);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+}
+
+static int spi_nor_remove(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+
+ /* Clean up MTD stuff. */
+ return mtd_device_unregister(&nor->mtd);
+}
+
+static void spi_nor_shutdown(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+}
+
+/*
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "jedec,spi-nor").
+ *
+ * Many flash names are kept here in this list (as well as in spi-nor.c) to
+ * keep them available as module aliases for existing platforms.
+ */
+static const struct spi_device_id spi_nor_dev_ids[] = {
+ /*
+ * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
+ * hack around the fact that the SPI core does not provide uevent
+ * matching for .of_match_table
+ */
+ {"spi-nor"},
+
+ /*
+ * Entries not used in DTs that should be safe to drop after replacing
+ * them with "spi-nor" in platform data.
+ */
+ {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
+
+ /*
+ * Entries that were used in DTs without "jedec,spi-nor" fallback and
+ * should be kept for backward compatibility.
+ */
+ {"at25df321a"}, {"at25df641"}, {"at26df081a"},
+ {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
+ {"mx25l25635e"},{"mx66l51235l"},
+ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
+ {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
+ {"s25fl064k"},
+ {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
+ {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
+ {"m25p64"}, {"m25p128"},
+ {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
+ {"w25q80bl"}, {"w25q128"}, {"w25q256"},
+
+ /* Flashes that can't be detected using JEDEC */
+ {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
+ {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
+ {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
+
+ /* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
+ { "mr25h256" }, /* 256 Kib, 40 MHz */
+ { "mr25h10" }, /* 1 Mib, 40 MHz */
+ { "mr25h40" }, /* 4 Mib, 40 MHz */
+
+ { },
+};
+MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
+
+static const struct of_device_id spi_nor_of_table[] = {
+ /*
+ * Generic compatibility for SPI NOR that can be identified by the
+ * JEDEC READ ID opcode (0x9F). Use this, if possible.
+ */
+ { .compatible = "jedec,spi-nor" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_nor_of_table);
+
+/*
+ * REVISIT: many of these chips have deep power-down modes, which
+ * should clearly be entered on suspend() to minimize power use.
+ * And also when they're otherwise idle...
+ */
+static struct spi_mem_driver spi_nor_driver = {
+ .spidrv = {
+ .driver = {
+ .name = "spi-nor",
+ .of_match_table = spi_nor_of_table,
+ },
+ .id_table = spi_nor_dev_ids,
+ },
+ .probe = spi_nor_probe,
+ .remove = spi_nor_remove,
+ .shutdown = spi_nor_shutdown,
+};
+module_spi_mem_driver(spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
new file mode 100644
index 000000000000..6f2f6b27173f
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.h
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_INTERNAL_H
+#define __LINUX_MTD_SPI_NOR_INTERNAL_H
+
+#include "sfdp.h"
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+enum spi_nor_option_flags {
+ SNOR_F_USE_FSR = BIT(0),
+ SNOR_F_HAS_SR_TB = BIT(1),
+ SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
+ SNOR_F_READY_XSR_RDY = BIT(3),
+ SNOR_F_USE_CLSR = BIT(4),
+ SNOR_F_BROKEN_RESET = BIT(5),
+ SNOR_F_4B_OPCODES = BIT(6),
+ SNOR_F_HAS_4BAIT = BIT(7),
+ SNOR_F_HAS_LOCK = BIT(8),
+ SNOR_F_HAS_16BIT_SR = BIT(9),
+ SNOR_F_NO_READ_CR = BIT(10),
+ SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
+ SNOR_F_HAS_4BIT_BP = BIT(12),
+ SNOR_F_HAS_SR_BP3_BIT6 = BIT(13),
+};
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octal SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octal SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+/**
+ * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type.
+ * JEDEC JESD216B imposes erase sizes to be a power of 2.
+ * @size_shift: @size is a power of 2, the shift is stored in
+ * @size_shift.
+ * @size_mask: the size mask based on @size_shift.
+ * @opcode: the SPI command op code to erase the sector/block.
+ * @idx: Erase Type index as sorted in the Basic Flash Parameter
+ * Table. It will be used to synchronize the supported
+ * Erase Types with the ones identified in the SFDP
+ * optional tables.
+ */
+struct spi_nor_erase_type {
+ u32 size;
+ u32 size_shift;
+ u32 size_mask;
+ u8 opcode;
+ u8 idx;
+};
+
+/**
+ * struct spi_nor_erase_command - Used for non-uniform erases
+ * The structure is used to describe a list of erase commands to be executed
+ * once we validate that the erase can be performed. The elements in the list
+ * are run-length encoded.
+ * @list: for inclusion into the list of erase commands.
+ * @count: how many times the same erase command should be
+ * consecutively used.
+ * @size: the size of the sector/block erased by the command.
+ * @opcode: the SPI command op code to erase the sector/block.
+ */
+struct spi_nor_erase_command {
+ struct list_head list;
+ u32 count;
+ u32 size;
+ u8 opcode;
+};
+
+/**
+ * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
+ * @offset: the offset in the data array of erase region start.
+ * LSB bits are used as a bitmask encoding flags to
+ * determine if this region is overlaid, if this region is
+ * the last in the SPI NOR flash memory and to indicate
+ * all the supported erase commands inside this region.
+ * The erase types are sorted in ascending order with the
+ * smallest Erase Type size being at BIT(0).
+ * @size: the size of the region in bytes.
+ */
+struct spi_nor_erase_region {
+ u64 offset;
+ u64 size;
+};
+
+#define SNOR_ERASE_TYPE_MAX 4
+#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
+
+#define SNOR_LAST_REGION BIT(4)
+#define SNOR_OVERLAID_REGION BIT(5)
+
+#define SNOR_ERASE_FLAGS_MAX 6
+#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
+
+/**
+ * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
+ * @regions: array of erase regions. The regions are consecutive in
+ * address space. Walking through the regions is done
+ * incrementally.
+ * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
+ * sector size (legacy implementation).
+ * @erase_type: an array of erase types shared by all the regions.
+ * The erase types are sorted in ascending order, with the
+ * smallest Erase Type size being the first member in the
+ * erase_type array.
+ * @uniform_erase_type: bitmask encoding erase types that can erase the
+ * entire memory. This member is completed at init by
+ * uniform and non-uniform SPI NOR flash memories if they
+ * support at least one erase type that can erase the
+ * entire memory.
+ */
+struct spi_nor_erase_map {
+ struct spi_nor_erase_region *regions;
+ struct spi_nor_erase_region uniform_region;
+ struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
+ u8 uniform_erase_type;
+};
+
+/**
+ * struct spi_nor_locking_ops - SPI NOR locking methods
+ * @lock: lock a region of the SPI NOR.
+ * @unlock: unlock a region of the SPI NOR.
+ * @is_locked: check if a region of the SPI NOR is completely locked
+ */
+struct spi_nor_locking_ops {
+ int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+};
+
+/**
+ * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
+ * Includes legacy flash parameters and settings that can be overwritten
+ * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
+ * Serial Flash Discoverable Parameters (SFDP) tables.
+ *
+ * @size: the flash memory density in bytes.
+ * @page_size: the page size of the SPI NOR flash memory.
+ * @hwcaps: describes the read and page program hardware
+ * capabilities.
+ * @reads: read capabilities ordered by priority: the higher index
+ * in the array, the higher priority.
+ * @page_programs: page program capabilities ordered by priority: the
+ * higher index in the array, the higher priority.
+ * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
+ * Table.
+ * @quad_enable: enables SPI NOR quad mode.
+ * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
+ * @convert_addr: converts an absolute address into something the flash
+ * will understand. Particularly useful when pagesize is
+ * not a power-of-2.
+ * @setup: configures the SPI NOR memory. Useful for SPI NOR
+ * flashes that have peculiarities to the SPI NOR standard
+ * e.g. different opcodes, specific address calculation,
+ * page size, etc.
+ * @locking_ops: SPI NOR locking methods.
+ */
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ struct spi_nor_erase_map erase_map;
+
+ int (*quad_enable)(struct spi_nor *nor);
+ int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
+ u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
+ int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
+
+ const struct spi_nor_locking_ops *locking_ops;
+};
+
+/**
+ * struct spi_nor_fixups - SPI NOR fixup hooks
+ * @default_init: called after default flash parameters init. Used to tweak
+ * flash parameters when information provided by the flash_info
+ * table is incomplete or wrong.
+ * @post_bfpt: called after the BFPT table has been parsed
+ * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
+ * that do not support RDSFDP). Typically used to tweak various
+ * parameters that could not be extracted by other means (i.e.
+ * when information provided by the SFDP/flash_info tables are
+ * incomplete or wrong).
+ *
+ * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
+ * table is broken or not available.
+ */
+struct spi_nor_fixups {
+ void (*default_init)(struct spi_nor *nor);
+ int (*post_bfpt)(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params);
+ void (*post_sfdp)(struct spi_nor *nor);
+};
+
+struct flash_info {
+ char *name;
+
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u32 flags;
+#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
+#define SST_WRITE BIT(2) /* use SST byte programming */
+#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
+#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
+#define USE_FSR BIT(7) /* use flag status register */
+#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB BIT(9) /*
+ * Flash SR has Top/Bottom (TB) protect
+ * bit. Must be used with
+ * SPI_NOR_HAS_LOCK.
+ */
+#define SPI_NOR_XSR_RDY BIT(10) /*
+ * S3AN flashes have specific opcode to
+ * read the status register.
+ */
+#define SPI_NOR_4B_OPCODES BIT(11) /*
+ * Use dedicated 4byte address op codes
+ * to support memory size above 128Mib.
+ */
+#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
+#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
+#define USE_CLSR BIT(14) /* use CLSR command */
+#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
+ * Top/Bottom (TB) is bit 6 of
+ * status register. Must be used with
+ * SPI_NOR_HAS_TB.
+ */
+#define SPI_NOR_4BIT_BP BIT(17) /*
+ * Flash SR has 4 bit fields (BP0-3)
+ * for block protection.
+ */
+#define SPI_NOR_BP3_SR_BIT6 BIT(18) /*
+ * BP3 is bit 6 of status register.
+ * Must be used with SPI_NOR_4BIT_BP.
+ */
+
+ /* Part specific fixup hooks. */
+ const struct spi_nor_fixups *fixups;
+};
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = (_flags),
+
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff \
+ }, \
+ .id_len = 3, \
+ .sector_size = (8*_page_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = _page_size, \
+ .addr_width = 3, \
+ .flags = SPI_NOR_NO_FR | SPI_NOR_XSR_RDY,
+
+/**
+ * struct spi_nor_manufacturer - SPI NOR manufacturer object
+ * @name: manufacturer name
+ * @parts: array of parts supported by this manufacturer
+ * @nparts: number of entries in the parts array
+ * @fixups: hooks called at various points in time during spi_nor_scan()
+ */
+struct spi_nor_manufacturer {
+ const char *name;
+ const struct flash_info *parts;
+ unsigned int nparts;
+ const struct spi_nor_fixups *fixups;
+};
+
+/* Manufacturer drivers. */
+extern const struct spi_nor_manufacturer spi_nor_atmel;
+extern const struct spi_nor_manufacturer spi_nor_catalyst;
+extern const struct spi_nor_manufacturer spi_nor_eon;
+extern const struct spi_nor_manufacturer spi_nor_esmt;
+extern const struct spi_nor_manufacturer spi_nor_everspin;
+extern const struct spi_nor_manufacturer spi_nor_fujitsu;
+extern const struct spi_nor_manufacturer spi_nor_gigadevice;
+extern const struct spi_nor_manufacturer spi_nor_intel;
+extern const struct spi_nor_manufacturer spi_nor_issi;
+extern const struct spi_nor_manufacturer spi_nor_macronix;
+extern const struct spi_nor_manufacturer spi_nor_micron;
+extern const struct spi_nor_manufacturer spi_nor_st;
+extern const struct spi_nor_manufacturer spi_nor_spansion;
+extern const struct spi_nor_manufacturer spi_nor_sst;
+extern const struct spi_nor_manufacturer spi_nor_winbond;
+extern const struct spi_nor_manufacturer spi_nor_xilinx;
+extern const struct spi_nor_manufacturer spi_nor_xmc;
+
+int spi_nor_write_enable(struct spi_nor *nor);
+int spi_nor_write_disable(struct spi_nor *nor);
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
+int spi_nor_wait_till_ready(struct spi_nor *nor);
+int spi_nor_lock_and_prep(struct spi_nor *nor);
+void spi_nor_unlock_and_unprep(struct spi_nor *nor);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
+
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+ u8 *buf);
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+u8 spi_nor_convert_3to4_read(u8 opcode);
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto);
+
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode);
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region);
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size);
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params);
+
+static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
new file mode 100644
index 000000000000..ddb8e3650835
--- /dev/null
+++ b/drivers/mtd/spi-nor/eon.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info eon_parts[] = {
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_eon = {
+ .name = "eon",
+ .parts = eon_parts,
+ .nparts = ARRAY_SIZE(eon_parts),
+};
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
new file mode 100644
index 000000000000..c93170008118
--- /dev/null
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info esmt_parts[] = {
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+};
+
+const struct spi_nor_manufacturer spi_nor_esmt = {
+ .name = "esmt",
+ .parts = esmt_parts,
+ .nparts = ARRAY_SIZE(esmt_parts),
+};
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
new file mode 100644
index 000000000000..04a177a32283
--- /dev/null
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info everspin_parts[] = {
+ /* Everspin */
+ { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_everspin = {
+ .name = "everspin",
+ .parts = everspin_parts,
+ .nparts = ARRAY_SIZE(everspin_parts),
+};
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
new file mode 100644
index 000000000000..e385d93e756c
--- /dev/null
+++ b/drivers/mtd/spi-nor/fujitsu.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info fujitsu_parts[] = {
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+};
+
+const struct spi_nor_manufacturer spi_nor_fujitsu = {
+ .name = "fujitsu",
+ .parts = fujitsu_parts,
+ .nparts = ARRAY_SIZE(fujitsu_parts),
+};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
new file mode 100644
index 000000000000..447d84bb2128
--- /dev/null
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static void gd25q256_default_init(struct spi_nor *nor)
+{
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * to set it in the default_init fixup hook.
+ */
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static struct spi_nor_fixups gd25q256_fixups = {
+ .default_init = gd25q256_default_init,
+};
+
+static const struct flash_info gigadevice_parts[] = {
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK |
+ SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+ .fixups = &gd25q256_fixups },
+};
+
+const struct spi_nor_manufacturer spi_nor_gigadevice = {
+ .name = "gigadevice",
+ .parts = gigadevice_parts,
+ .nparts = ARRAY_SIZE(gigadevice_parts),
+};
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
new file mode 100644
index 000000000000..d8196f101368
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info intel_parts[] = {
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+};
+
+static void intel_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static const struct spi_nor_fixups intel_fixups = {
+ .default_init = intel_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_intel = {
+ .name = "intel",
+ .parts = intel_parts,
+ .nparts = ARRAY_SIZE(intel_parts),
+ .fixups = &intel_fixups,
+};
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
new file mode 100644
index 000000000000..ffcb60e54a80
--- /dev/null
+++ b/drivers/mtd/spi-nor/issi.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+is25lp256_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises a
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
+ * Overwrite the address width advertised by the BFPT.
+ */
+ if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
+ BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
+ nor->addr_width = 4;
+
+ return 0;
+}
+
+static struct spi_nor_fixups is25lp256_fixups = {
+ .post_bfpt = is25lp256_post_bfpt_fixups,
+};
+
+static const struct flash_info issi_parts[] = {
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+};
+
+static void issi_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static const struct spi_nor_fixups issi_fixups = {
+ .default_init = issi_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_issi = {
+ .name = "issi",
+ .parts = issi_parts,
+ .nparts = ARRAY_SIZE(issi_parts),
+ .fixups = &issi_fixups,
+};
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
new file mode 100644
index 000000000000..ab0f963d630c
--- /dev/null
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * MX25L25635F supports 4B opcodes but MX25L25635E does not.
+ * Unfortunately, Macronix has re-used the same JEDEC ID for both
+ * variants which prevents us from defining a new entry in the parts
+ * table.
+ * We need a way to differentiate MX25L25635E and MX25L25635F, and it
+ * seems that the F version advertises support for Fast Read 4-4-4 in
+ * its BFPT table.
+ */
+ if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ return 0;
+}
+
+static struct spi_nor_fixups mx25l25635_fixups = {
+ .post_bfpt = mx25l25635_post_bfpt_fixups,
+};
+
+static const struct flash_info macronix_parts[] = {
+ /* Macronix */
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &mx25l25635_fixups },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
+ SPI_NOR_QUAD_READ) },
+};
+
+static void macronix_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups macronix_fixups = {
+ .default_init = macronix_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_macronix = {
+ .name = "macronix",
+ .parts = macronix_parts,
+ .nparts = ARRAY_SIZE(macronix_parts),
+ .fixups = &macronix_fixups,
+};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
new file mode 100644
index 000000000000..6c034b9718e2
--- /dev/null
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info micron_parts[] = {
+ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES) },
+};
+
+static const struct flash_info st_parts[] = {
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64,
+ SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64,
+ SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
+ USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
+};
+
+/**
+ * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron
+ * flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_set_4byte_addr_mode(nor, enable);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+static void micron_st_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ nor->params->quad_enable = NULL;
+ nor->params->set_4byte_addr_mode = st_micron_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups micron_st_fixups = {
+ .default_init = micron_st_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_micron = {
+ .name = "micron",
+ .parts = micron_parts,
+ .nparts = ARRAY_SIZE(micron_parts),
+ .fixups = &micron_st_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_st = {
+ .name = "st",
+ .parts = st_parts,
+ .nparts = ARRAY_SIZE(st_parts),
+ .fixups = &micron_st_fixups,
+};
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
deleted file mode 100644
index b1691680d174..000000000000
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ /dev/null
@@ -1,565 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Bayi Cheng <bayi.cheng@mediatek.com>
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/ioport.h>
-#include <linux/math64.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-
-#define MTK_NOR_CMD_REG 0x00
-#define MTK_NOR_CNT_REG 0x04
-#define MTK_NOR_RDSR_REG 0x08
-#define MTK_NOR_RDATA_REG 0x0c
-#define MTK_NOR_RADR0_REG 0x10
-#define MTK_NOR_RADR1_REG 0x14
-#define MTK_NOR_RADR2_REG 0x18
-#define MTK_NOR_WDATA_REG 0x1c
-#define MTK_NOR_PRGDATA0_REG 0x20
-#define MTK_NOR_PRGDATA1_REG 0x24
-#define MTK_NOR_PRGDATA2_REG 0x28
-#define MTK_NOR_PRGDATA3_REG 0x2c
-#define MTK_NOR_PRGDATA4_REG 0x30
-#define MTK_NOR_PRGDATA5_REG 0x34
-#define MTK_NOR_SHREG0_REG 0x38
-#define MTK_NOR_SHREG1_REG 0x3c
-#define MTK_NOR_SHREG2_REG 0x40
-#define MTK_NOR_SHREG3_REG 0x44
-#define MTK_NOR_SHREG4_REG 0x48
-#define MTK_NOR_SHREG5_REG 0x4c
-#define MTK_NOR_SHREG6_REG 0x50
-#define MTK_NOR_SHREG7_REG 0x54
-#define MTK_NOR_SHREG8_REG 0x58
-#define MTK_NOR_SHREG9_REG 0x5c
-#define MTK_NOR_CFG1_REG 0x60
-#define MTK_NOR_CFG2_REG 0x64
-#define MTK_NOR_CFG3_REG 0x68
-#define MTK_NOR_STATUS0_REG 0x70
-#define MTK_NOR_STATUS1_REG 0x74
-#define MTK_NOR_STATUS2_REG 0x78
-#define MTK_NOR_STATUS3_REG 0x7c
-#define MTK_NOR_FLHCFG_REG 0x84
-#define MTK_NOR_TIME_REG 0x94
-#define MTK_NOR_PP_DATA_REG 0x98
-#define MTK_NOR_PREBUF_STUS_REG 0x9c
-#define MTK_NOR_DELSEL0_REG 0xa0
-#define MTK_NOR_DELSEL1_REG 0xa4
-#define MTK_NOR_INTRSTUS_REG 0xa8
-#define MTK_NOR_INTREN_REG 0xac
-#define MTK_NOR_CHKSUM_CTL_REG 0xb8
-#define MTK_NOR_CHKSUM_REG 0xbc
-#define MTK_NOR_CMD2_REG 0xc0
-#define MTK_NOR_WRPROT_REG 0xc4
-#define MTK_NOR_RADR3_REG 0xc8
-#define MTK_NOR_DUAL_REG 0xcc
-#define MTK_NOR_DELSEL2_REG 0xd0
-#define MTK_NOR_DELSEL3_REG 0xd4
-#define MTK_NOR_DELSEL4_REG 0xd8
-
-/* commands for mtk nor controller */
-#define MTK_NOR_READ_CMD 0x0
-#define MTK_NOR_RDSR_CMD 0x2
-#define MTK_NOR_PRG_CMD 0x4
-#define MTK_NOR_WR_CMD 0x10
-#define MTK_NOR_PIO_WR_CMD 0x90
-#define MTK_NOR_WRSR_CMD 0x20
-#define MTK_NOR_PIO_READ_CMD 0x81
-#define MTK_NOR_WR_BUF_ENABLE 0x1
-#define MTK_NOR_WR_BUF_DISABLE 0x0
-#define MTK_NOR_ENABLE_SF_CMD 0x30
-#define MTK_NOR_DUAD_ADDR_EN 0x8
-#define MTK_NOR_QUAD_READ_EN 0x4
-#define MTK_NOR_DUAL_ADDR_EN 0x2
-#define MTK_NOR_DUAL_READ_EN 0x1
-#define MTK_NOR_DUAL_DISABLE 0x0
-#define MTK_NOR_FAST_READ 0x1
-
-#define SFLASH_WRBUF_SIZE 128
-
-/* Can shift up to 48 bits (6 bytes) of TX/RX */
-#define MTK_NOR_MAX_RX_TX_SHIFT 6
-/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
-#define MTK_NOR_MAX_SHIFT 7
-/* nor controller 4-byte address mode enable bit */
-#define MTK_NOR_4B_ADDR_EN BIT(4)
-
-/* Helpers for accessing the program data / shift data registers */
-#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
-#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
-
-struct mtk_nor {
- struct spi_nor nor;
- struct device *dev;
- void __iomem *base; /* nor flash base address */
- struct clk *spi_clk;
- struct clk *nor_clk;
-};
-
-static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
-{
- struct spi_nor *nor = &mtk_nor->nor;
-
- switch (nor->read_proto) {
- case SNOR_PROTO_1_1_1:
- writeb(nor->read_opcode, mtk_nor->base +
- MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_FAST_READ, mtk_nor->base +
- MTK_NOR_CFG1_REG);
- break;
- case SNOR_PROTO_1_1_2:
- writeb(nor->read_opcode, mtk_nor->base +
- MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
- MTK_NOR_DUAL_REG);
- break;
- case SNOR_PROTO_1_1_4:
- writeb(nor->read_opcode, mtk_nor->base +
- MTK_NOR_PRGDATA4_REG);
- writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
- MTK_NOR_DUAL_REG);
- break;
- default:
- writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
- MTK_NOR_DUAL_REG);
- break;
- }
-}
-
-static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
-{
- int reg;
- u8 val = cmdval & 0x1f;
-
- writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
- return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
- !(reg & val), 100, 10000);
-}
-
-static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
- const u8 *tx, size_t txlen, u8 *rx, size_t rxlen)
-{
- size_t len = 1 + txlen + rxlen;
- int i, ret, idx;
-
- if (len > MTK_NOR_MAX_SHIFT)
- return -EINVAL;
-
- writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
-
- /* start at PRGDATA5, go down to PRGDATA0 */
- idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
-
- /* opcode */
- writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
- idx--;
-
- /* program TX data */
- for (i = 0; i < txlen; i++, idx--)
- writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
-
- /* clear out rest of TX registers */
- while (idx >= 0) {
- writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
- idx--;
- }
-
- ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
- if (ret)
- return ret;
-
- /* restart at first RX byte */
- idx = rxlen - 1;
-
- /* read out RX data */
- for (i = 0; i < rxlen; i++, idx--)
- rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
-
- return 0;
-}
-
-/* Do a WRSR (Write Status Register) command */
-static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, const u8 sr)
-{
- writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
- writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
- return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
-}
-
-static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
-{
- u8 reg;
-
- /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer
- * 0: pre-fetch buffer use for read
- * 1: pre-fetch buffer use for page program
- */
- writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
- 0x01 == (reg & 0x01), 100, 10000);
-}
-
-static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
-{
- u8 reg;
-
- writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
- MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
- 10000);
-}
-
-static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
-{
- u8 val;
- struct spi_nor *nor = &mtk_nor->nor;
-
- val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
-
- switch (nor->addr_width) {
- case 3:
- val &= ~MTK_NOR_4B_ADDR_EN;
- break;
- case 4:
- val |= MTK_NOR_4B_ADDR_EN;
- break;
- default:
- dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
- nor->addr_width);
- break;
- }
-
- writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
-}
-
-static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
-{
- int i;
-
- mtk_nor_set_addr_width(mtk_nor);
-
- for (i = 0; i < 3; i++) {
- writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
- addr >>= 8;
- }
- /* Last register is non-contiguous */
- writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
-}
-
-static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- u_char *buffer)
-{
- int i, ret;
- int addr = (int)from;
- u8 *buf = (u8 *)buffer;
- struct mtk_nor *mtk_nor = nor->priv;
-
- /* set mode for fast read mode ,dual mode or quad mode */
- mtk_nor_set_read_mode(mtk_nor);
- mtk_nor_set_addr(mtk_nor, addr);
-
- for (i = 0; i < length; i++) {
- ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
- if (ret < 0)
- return ret;
- buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
- }
- return length;
-}
-
-static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
- int addr, int length, u8 *data)
-{
- int i, ret;
-
- mtk_nor_set_addr(mtk_nor, addr);
-
- for (i = 0; i < length; i++) {
- writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
- ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
- if (ret < 0)
- return ret;
- }
- return 0;
-}
-
-static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
- const u8 *buf)
-{
- int i, bufidx, data;
-
- mtk_nor_set_addr(mtk_nor, addr);
-
- bufidx = 0;
- for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
- data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
- buf[bufidx + 1]<<8 | buf[bufidx];
- bufidx += 4;
- writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
- }
- return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
-}
-
-static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *buf)
-{
- int ret;
- struct mtk_nor *mtk_nor = nor->priv;
- size_t i;
-
- ret = mtk_nor_write_buffer_enable(mtk_nor);
- if (ret < 0) {
- dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
- return ret;
- }
-
- for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
- ret = mtk_nor_write_buffer(mtk_nor, to, buf);
- if (ret < 0) {
- dev_err(mtk_nor->dev, "write buffer failed!\n");
- return ret;
- }
- to += SFLASH_WRBUF_SIZE;
- buf += SFLASH_WRBUF_SIZE;
- }
- ret = mtk_nor_write_buffer_disable(mtk_nor);
- if (ret < 0) {
- dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
- return ret;
- }
-
- if (i < len) {
- ret = mtk_nor_write_single_byte(mtk_nor, to,
- (int)(len - i), (u8 *)buf);
- if (ret < 0) {
- dev_err(mtk_nor->dev, "write single byte failed!\n");
- return ret;
- }
- }
-
- return len;
-}
-
-static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
-{
- int ret;
- struct mtk_nor *mtk_nor = nor->priv;
-
- switch (opcode) {
- case SPINOR_OP_RDSR:
- ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
- if (ret < 0)
- return ret;
- if (len == 1)
- *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
- else
- dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
- break;
- default:
- ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
- break;
- }
- return ret;
-}
-
-static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
- size_t len)
-{
- int ret;
- struct mtk_nor *mtk_nor = nor->priv;
-
- switch (opcode) {
- case SPINOR_OP_WRSR:
- /* We only handle 1 byte */
- ret = mtk_nor_wr_sr(mtk_nor, *buf);
- break;
- default:
- ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
- if (ret)
- dev_warn(mtk_nor->dev, "write reg failure!\n");
- break;
- }
- return ret;
-}
-
-static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
-{
- clk_disable_unprepare(mtk_nor->spi_clk);
- clk_disable_unprepare(mtk_nor->nor_clk);
-}
-
-static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
-{
- int ret;
-
- ret = clk_prepare_enable(mtk_nor->spi_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(mtk_nor->nor_clk);
- if (ret) {
- clk_disable_unprepare(mtk_nor->spi_clk);
- return ret;
- }
-
- return 0;
-}
-
-static const struct spi_nor_controller_ops mtk_controller_ops = {
- .read_reg = mtk_nor_read_reg,
- .write_reg = mtk_nor_write_reg,
- .read = mtk_nor_read,
- .write = mtk_nor_write,
-};
-
-static int mtk_nor_init(struct mtk_nor *mtk_nor,
- struct device_node *flash_node)
-{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_READ_1_1_2 |
- SNOR_HWCAPS_PP,
- };
- int ret;
- struct spi_nor *nor;
-
- /* initialize controller to accept commands */
- writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
-
- nor = &mtk_nor->nor;
- nor->dev = mtk_nor->dev;
- nor->priv = mtk_nor;
- spi_nor_set_flash_node(nor, flash_node);
- nor->controller_ops = &mtk_controller_ops;
-
- nor->mtd.name = "mtk_nor";
- /* initialized with NULL */
- ret = spi_nor_scan(nor, NULL, &hwcaps);
- if (ret)
- return ret;
-
- return mtd_device_register(&nor->mtd, NULL, 0);
-}
-
-static int mtk_nor_drv_probe(struct platform_device *pdev)
-{
- struct device_node *flash_np;
- struct resource *res;
- int ret;
- struct mtk_nor *mtk_nor;
-
- if (!pdev->dev.of_node) {
- dev_err(&pdev->dev, "No DT found\n");
- return -EINVAL;
- }
-
- mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
- if (!mtk_nor)
- return -ENOMEM;
- platform_set_drvdata(pdev, mtk_nor);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mtk_nor->base))
- return PTR_ERR(mtk_nor->base);
-
- mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(mtk_nor->spi_clk))
- return PTR_ERR(mtk_nor->spi_clk);
-
- mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
- if (IS_ERR(mtk_nor->nor_clk))
- return PTR_ERR(mtk_nor->nor_clk);
-
- mtk_nor->dev = &pdev->dev;
-
- ret = mtk_nor_enable_clk(mtk_nor);
- if (ret)
- return ret;
-
- /* only support one attached flash */
- flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
- if (!flash_np) {
- dev_err(&pdev->dev, "no SPI flash device to configure\n");
- ret = -ENODEV;
- goto nor_free;
- }
- ret = mtk_nor_init(mtk_nor, flash_np);
-
-nor_free:
- if (ret)
- mtk_nor_disable_clk(mtk_nor);
-
- return ret;
-}
-
-static int mtk_nor_drv_remove(struct platform_device *pdev)
-{
- struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
-
- mtk_nor_disable_clk(mtk_nor);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mtk_nor_suspend(struct device *dev)
-{
- struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
-
- mtk_nor_disable_clk(mtk_nor);
-
- return 0;
-}
-
-static int mtk_nor_resume(struct device *dev)
-{
- struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
-
- return mtk_nor_enable_clk(mtk_nor);
-}
-
-static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
- .suspend = mtk_nor_suspend,
- .resume = mtk_nor_resume,
-};
-
-#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops)
-#else
-#define MTK_NOR_DEV_PM_OPS NULL
-#endif
-
-static const struct of_device_id mtk_nor_of_ids[] = {
- { .compatible = "mediatek,mt8173-nor"},
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mtk_nor_of_ids);
-
-static struct platform_driver mtk_nor_driver = {
- .probe = mtk_nor_drv_probe,
- .remove = mtk_nor_drv_remove,
- .driver = {
- .name = "mtk-nor",
- .pm = MTK_NOR_DEV_PM_OPS,
- .of_match_table = mtk_nor_of_ids,
- },
-};
-
-module_platform_driver(mtk_nor_driver);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
new file mode 100644
index 000000000000..f6038d3a3684
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -0,0 +1,1204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+ (((p)->parameter_table_pointer[2] << 16) | \
+ ((p)->parameter_table_pointer[1] << 8) | \
+ ((p)->parameter_table_pointer[0] << 0))
+
+#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
+#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
+
+#define SFDP_SIGNATURE 0x50444653U
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
+struct sfdp_header {
+ u32 signature; /* Ox50444653U <=> "SFDP" */
+ u8 minor;
+ u8 major;
+ u8 nph; /* 0-base number of parameter headers */
+ u8 unused;
+
+ /* Basic Flash Parameter Table. */
+ struct sfdp_parameter_header bfpt_header;
+};
+
+/* Fast Read settings. */
+struct sfdp_bfpt_read {
+ /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+ * whether the Fast Read x-y-z command is supported.
+ */
+ u32 supported_dword;
+ u32 supported_bit;
+
+ /*
+ * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+ * encodes the op code, the number of mode clocks and the number of wait
+ * states to be used by Fast Read x-y-z command.
+ */
+ u32 settings_dword;
+ u32 settings_shift;
+
+ /* The SPI protocol for this Fast Read x-y-z command. */
+ enum spi_nor_protocol proto;
+};
+
+struct sfdp_bfpt_erase {
+ /*
+ * The half-word at offset <shift> in DWORD <dwoard> encodes the
+ * op code and erase sector size to be used by Sector Erase commands.
+ */
+ u32 dword;
+ u32 shift;
+};
+
+#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
+#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
+
+#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
+#define SMPT_CMD_READ_DUMMY_SHIFT 16
+#define SMPT_CMD_READ_DUMMY(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
+#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
+
+#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
+#define SMPT_CMD_READ_DATA_SHIFT 24
+#define SMPT_CMD_READ_DATA(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
+
+#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
+#define SMPT_CMD_OPCODE_SHIFT 8
+#define SMPT_CMD_OPCODE(_cmd) \
+ (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
+
+#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
+#define SMPT_MAP_REGION_COUNT_SHIFT 16
+#define SMPT_MAP_REGION_COUNT(_header) \
+ ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
+ SMPT_MAP_REGION_COUNT_SHIFT) + 1)
+
+#define SMPT_MAP_ID_MASK GENMASK(15, 8)
+#define SMPT_MAP_ID_SHIFT 8
+#define SMPT_MAP_ID(_header) \
+ (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
+
+#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
+#define SMPT_MAP_REGION_SIZE_SHIFT 8
+#define SMPT_MAP_REGION_SIZE(_region) \
+ (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
+ SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
+
+#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
+#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
+ ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
+
+#define SMPT_DESC_TYPE_MAP BIT(1)
+#define SMPT_DESC_END BIT(0)
+
+#define SFDP_4BAIT_DWORD_MAX 2
+
+struct sfdp_4bait {
+ /* The hardware capability. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
+ * the associated 4-byte address op code is supported.
+ */
+ u32 supported_bit;
+};
+
+/**
+ * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
+ * addr_width and read_dummy members of the struct spi_nor
+ * should be previously
+ * set.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to read
+ * @buf: buffer where the data is copied into (dma-safe memory)
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
+{
+ ssize_t ret;
+
+ while (len) {
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret < 0)
+ return ret;
+ if (!ret || ret > len)
+ return -EIO;
+
+ buf += ret;
+ addr += ret;
+ len -= ret;
+ }
+ return 0;
+}
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ u8 addr_width, read_opcode, read_dummy;
+ int ret;
+
+ read_opcode = nor->read_opcode;
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+
+ nor->read_opcode = SPINOR_OP_RDSFDP;
+ nor->addr_width = 3;
+ nor->read_dummy = 8;
+
+ ret = spi_nor_read_raw(nor, addr, len, buf);
+
+ nor->read_opcode = read_opcode;
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ * otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ void *dma_safe_buf;
+ int ret;
+
+ dma_safe_buf = kmalloc(len, GFP_KERNEL);
+ if (!dma_safe_buf)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+ memcpy(buf, dma_safe_buf, len);
+ kfree(dma_safe_buf);
+
+ return ret;
+}
+
+static void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+ u16 half,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = (half >> 5) & 0x07;
+ read->num_wait_states = (half >> 0) & 0x1f;
+ read->opcode = (half >> 8) & 0xff;
+ read->proto = proto;
+}
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+ /* Fast Read 1-1-2 */
+ {
+ SNOR_HWCAPS_READ_1_1_2,
+ BFPT_DWORD(1), BIT(16), /* Supported bit */
+ BFPT_DWORD(4), 0, /* Settings */
+ SNOR_PROTO_1_1_2,
+ },
+
+ /* Fast Read 1-2-2 */
+ {
+ SNOR_HWCAPS_READ_1_2_2,
+ BFPT_DWORD(1), BIT(20), /* Supported bit */
+ BFPT_DWORD(4), 16, /* Settings */
+ SNOR_PROTO_1_2_2,
+ },
+
+ /* Fast Read 2-2-2 */
+ {
+ SNOR_HWCAPS_READ_2_2_2,
+ BFPT_DWORD(5), BIT(0), /* Supported bit */
+ BFPT_DWORD(6), 16, /* Settings */
+ SNOR_PROTO_2_2_2,
+ },
+
+ /* Fast Read 1-1-4 */
+ {
+ SNOR_HWCAPS_READ_1_1_4,
+ BFPT_DWORD(1), BIT(22), /* Supported bit */
+ BFPT_DWORD(3), 16, /* Settings */
+ SNOR_PROTO_1_1_4,
+ },
+
+ /* Fast Read 1-4-4 */
+ {
+ SNOR_HWCAPS_READ_1_4_4,
+ BFPT_DWORD(1), BIT(21), /* Supported bit */
+ BFPT_DWORD(3), 0, /* Settings */
+ SNOR_PROTO_1_4_4,
+ },
+
+ /* Fast Read 4-4-4 */
+ {
+ SNOR_HWCAPS_READ_4_4_4,
+ BFPT_DWORD(5), BIT(4), /* Supported bit */
+ BFPT_DWORD(7), 16, /* Settings */
+ SNOR_PROTO_4_4_4,
+ },
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+ /* Erase Type 1 in DWORD8 bits[15:0] */
+ {BFPT_DWORD(8), 0},
+
+ /* Erase Type 2 in DWORD8 bits[31:16] */
+ {BFPT_DWORD(8), 16},
+
+ /* Erase Type 3 in DWORD9 bits[15:0] */
+ {BFPT_DWORD(9), 0},
+
+ /* Erase Type 4 in DWORD9 bits[31:16] */
+ {BFPT_DWORD(9), 16},
+};
+
+/**
+ * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ * @i: erase type index as sorted in the Basic Flash Parameter Table
+ *
+ * The supported Erase Types will be sorted at init in ascending order, with
+ * the smallest Erase Type size being the first member in the erase_type array
+ * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
+ * the Basic Flash Parameter Table since it will be used later on to
+ * synchronize with the supported Erase Types defined in SFDP optional tables.
+ */
+static void
+spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
+ u32 size, u8 opcode, u8 i)
+{
+ erase->idx = i;
+ spi_nor_set_erase_type(erase, size, opcode);
+}
+
+/**
+ * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
+ * @l: member in the left half of the map's erase_type array
+ * @r: member in the right half of the map's erase_type array
+ *
+ * Comparison function used in the sort() call to sort in ascending order the
+ * map's erase types, the smallest erase type size being the first member in the
+ * sorted erase_type array.
+ *
+ * Return: the result of @l->size - @r->size
+ */
+static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
+{
+ const struct spi_nor_erase_type *left = l, *right = r;
+
+ return left->size - right->size;
+}
+
+/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ int i;
+ u8 sorted_erase_mask = 0;
+
+ if (!erase_mask)
+ return 0;
+
+ /* Replicate the sort done for the map's erase types. */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+ sorted_erase_mask |= BIT(i);
+
+ return sorted_erase_mask;
+}
+
+/**
+ * spi_nor_regions_sort_erase_types() - sort erase types in each region
+ * @map: the erase map of the SPI NOR
+ *
+ * Function assumes that the erase types defined in the erase map are already
+ * sorted in ascending order, with the smallest erase type size being the first
+ * member in the erase_type array. It replicates the sort done for the map's
+ * erase types. Each region's erase bitmask will indicate which erase types are
+ * supported from the sorted erase types defined in the erase map.
+ * Sort the all region's erase type at init in order to speed up the process of
+ * finding the best erase command at runtime.
+ */
+static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u8 region_erase_mask, sorted_erase_mask;
+
+ while (region) {
+ region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ sorted_erase_mask = spi_nor_sort_erase_mask(map,
+ region_erase_mask);
+
+ /* Overwrite erase mask. */
+ region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
+ sorted_erase_mask;
+
+ region = spi_nor_region_next(region);
+ }
+}
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor: pointer to a 'struct spi_nor'
+ * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Basic Flash Parameter Table length and version
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ struct sfdp_bfpt bfpt;
+ size_t len;
+ int i, cmd, err;
+ u32 addr;
+ u16 half;
+ u8 erase_mask;
+
+ /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+ if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+ return -EINVAL;
+
+ /* Read the Basic Flash Parameter Table. */
+ len = min_t(size_t, sizeof(bfpt),
+ bfpt_header->length * sizeof(u32));
+ addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+ memset(&bfpt, 0, sizeof(bfpt));
+ err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
+ if (err < 0)
+ return err;
+
+ /* Fix endianness of the BFPT DWORDs. */
+ le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX);
+
+ /* Number of address bytes. */
+ switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+ case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+ nor->addr_width = 3;
+ break;
+
+ case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+ nor->addr_width = 4;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Flash Memory Density (in bits). */
+ params->size = bfpt.dwords[BFPT_DWORD(2)];
+ if (params->size & BIT(31)) {
+ params->size &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (params->size > 63)
+ return -EINVAL;
+
+ params->size = 1ULL << params->size;
+ } else {
+ params->size++;
+ }
+ params->size >>= 3; /* Convert to bytes. */
+
+ /* Fast Read settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+ const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+ struct spi_nor_read_command *read;
+
+ if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+ params->hwcaps.mask &= ~rd->hwcaps;
+ continue;
+ }
+
+ params->hwcaps.mask |= rd->hwcaps;
+ cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+ read = &params->reads[cmd];
+ half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+ spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+ }
+
+ /*
+ * Sector Erase settings. Reinitialize the uniform erase map using the
+ * Erase Types defined in the bfpt table.
+ */
+ erase_mask = 0;
+ memset(&params->erase_map, 0, sizeof(params->erase_map));
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+ const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+ u32 erasesize;
+ u8 opcode;
+
+ half = bfpt.dwords[er->dword] >> er->shift;
+ erasesize = half & 0xff;
+
+ /* erasesize == 0 means this Erase Type is not supported. */
+ if (!erasesize)
+ continue;
+
+ erasesize = 1U << erasesize;
+ opcode = (half >> 8) & 0xff;
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
+ opcode, i);
+ }
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+ /*
+ * Sort all the map's Erase Types in ascending order with the smallest
+ * erase size being the first member in the erase_type array.
+ */
+ sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
+ spi_nor_map_cmp_erase_type, NULL);
+ /*
+ * Sort the erase types in the uniform region in order to update the
+ * uniform_erase_type bitmask. The bitmask will be used later on when
+ * selecting the uniform erase.
+ */
+ spi_nor_regions_sort_erase_types(map);
+ map->uniform_erase_type = map->uniform_region.offset &
+ SNOR_ERASE_TYPE_MASK;
+
+ /* Stop here if not JESD216 rev A or later. */
+ if (bfpt_header->length < BFPT_DWORD_MAX)
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+ params);
+
+ /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+ params->page_size = bfpt.dwords[BFPT_DWORD(11)];
+ params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
+ params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+ params->page_size = 1U << params->page_size;
+
+ /* Quad Enable Requirements. */
+ switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+ case BFPT_DWORD15_QER_NONE:
+ params->quad_enable = NULL;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ /*
+ * Writing only one byte to the Status Register has the
+ * side-effect of clearing Status Register 2.
+ */
+ case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+ /*
+ * Read Configuration Register (35h) instruction is not
+ * supported.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR1_BIT6:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT7:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr2_bit7_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1:
+ /*
+ * JESD216 rev B or later does not specify if writing only one
+ * byte to the Status Register clears or not the Status
+ * Register 2, so let's be cautious and keep the default
+ * assumption of a 16-bit Write Status (01h) command.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
+}
+
+/**
+ * spi_nor_smpt_addr_width() - return the address width used in the
+ * configuration detection command.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ */
+static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+{
+ switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
+ case SMPT_CMD_ADDRESS_LEN_0:
+ return 0;
+ case SMPT_CMD_ADDRESS_LEN_3:
+ return 3;
+ case SMPT_CMD_ADDRESS_LEN_4:
+ return 4;
+ case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
+ default:
+ return nor->addr_width;
+ }
+}
+
+/**
+ * spi_nor_smpt_read_dummy() - return the configuration detection command read
+ * latency, in clock cycles.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ *
+ * Return: the number of dummy cycles for an SMPT read
+ */
+static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
+{
+ u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
+
+ if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
+ return nor->read_dummy;
+ return read_dummy;
+}
+
+/**
+ * spi_nor_get_map_in_use() - get the configuration map in use
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt: pointer to the sector map parameter table
+ * @smpt_len: sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
+ */
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+ u8 smpt_len)
+{
+ const u32 *ret;
+ u8 *buf;
+ u32 addr;
+ int err;
+ u8 i;
+ u8 addr_width, read_opcode, read_dummy;
+ u8 read_data_mask, map_id;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+ read_opcode = nor->read_opcode;
+
+ map_id = 0;
+ /* Determine if there are any optional Detection Command Descriptors */
+ for (i = 0; i < smpt_len; i += 2) {
+ if (smpt[i] & SMPT_DESC_TYPE_MAP)
+ break;
+
+ read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
+ nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
+ nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
+ addr = smpt[i + 1];
+
+ err = spi_nor_read_raw(nor, addr, 1, buf);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out;
+ }
+
+ /*
+ * Build an index value that is used to select the Sector Map
+ * Configuration that is currently in use.
+ */
+ map_id = map_id << 1 | !!(*buf & read_data_mask);
+ }
+
+ /*
+ * If command descriptors are provided, they always precede map
+ * descriptors in the table. There is no need to start the iteration
+ * over smpt array all over again.
+ *
+ * Find the matching configuration map.
+ */
+ ret = ERR_PTR(-EINVAL);
+ while (i < smpt_len) {
+ if (SMPT_MAP_ID(smpt[i]) == map_id) {
+ ret = smpt + i;
+ break;
+ }
+
+ /*
+ * If there are no more configuration map descriptors and no
+ * configuration ID matched the configuration identifier, the
+ * sector address map is unknown.
+ */
+ if (smpt[i] & SMPT_DESC_END)
+ break;
+
+ /* increment the table index to the next map */
+ i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
+ }
+
+ /* fall through */
+out:
+ kfree(buf);
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+ nor->read_opcode = read_opcode;
+ return ret;
+}
+
+static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_LAST_REGION;
+}
+
+static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_OVERLAID_REGION;
+}
+
+/**
+ * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @erase_type: erase type bitmask
+ */
+static void
+spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase,
+ const u8 erase_type)
+{
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (!(erase_type & BIT(i)))
+ continue;
+ if (region->size & erase[i].size_mask) {
+ spi_nor_region_mark_overlay(region);
+ return;
+ }
+ }
+}
+
+/**
+ * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is
+ * used for storing SFDP parsed data
+ * @smpt: pointer to the sector map parameter table
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params,
+ const u32 *smpt)
+{
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase = map->erase_type;
+ struct spi_nor_erase_region *region;
+ u64 offset;
+ u32 region_count;
+ int i, j;
+ u8 uniform_erase_type, save_uniform_erase_type;
+ u8 erase_type, regions_erase_type;
+
+ region_count = SMPT_MAP_REGION_COUNT(*smpt);
+ /*
+ * The regions will be freed when the driver detaches from the
+ * device.
+ */
+ region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ map->regions = region;
+
+ uniform_erase_type = 0xff;
+ regions_erase_type = 0;
+ offset = 0;
+ /* Populate regions. */
+ for (i = 0; i < region_count; i++) {
+ j = i + 1; /* index for the region dword */
+ region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
+ erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
+ region[i].offset = offset | erase_type;
+
+ spi_nor_region_check_overlay(&region[i], erase, erase_type);
+
+ /*
+ * Save the erase types that are supported in all regions and
+ * can erase the entire flash memory.
+ */
+ uniform_erase_type &= erase_type;
+
+ /*
+ * regions_erase_type mask will indicate all the erase types
+ * supported in this configuration map.
+ */
+ regions_erase_type |= erase_type;
+
+ offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ region[i].size;
+ }
+
+ save_uniform_erase_type = map->uniform_erase_type;
+ map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
+
+ if (!regions_erase_type) {
+ /*
+ * Roll back to the previous uniform_erase_type mask, SMPT is
+ * broken.
+ */
+ map->uniform_erase_type = save_uniform_erase_type;
+ return -EINVAL;
+ }
+
+ /*
+ * BFPT advertises all the erase types supported by all the possible
+ * map configurations. Mask out the erase types that are not supported
+ * by the current map configuration.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (!(regions_erase_type & BIT(erase[i].idx)))
+ spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
+ spi_nor_region_mark_end(&region[i - 1]);
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_smpt() - parse Sector Map Parameter Table
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt_header: sector map parameter table header
+ * @params: pointer to a duplicate 'struct spi_nor_flash_parameter'
+ * that is used for storing SFDP parsed data
+ *
+ * This table is optional, but when available, we parse it to identify the
+ * location and size of sectors within the main data array of the flash memory
+ * device and to identify which Erase Types are supported by each sector.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_smpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *smpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ const u32 *sector_map;
+ u32 *smpt;
+ size_t len;
+ u32 addr;
+ int ret;
+
+ /* Read the Sector Map Parameter Table. */
+ len = smpt_header->length * sizeof(*smpt);
+ smpt = kmalloc(len, GFP_KERNEL);
+ if (!smpt)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(smpt_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, smpt);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the SMPT DWORDs. */
+ le32_to_cpu_array(smpt, smpt_header->length);
+
+ sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+ if (IS_ERR(sector_map)) {
+ ret = PTR_ERR(sector_map);
+ goto out;
+ }
+
+ ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
+ if (ret)
+ goto out;
+
+ spi_nor_regions_sort_erase_types(&params->erase_map);
+ /* fall through */
+out:
+ kfree(smpt);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
+ * @nor: pointer to a 'struct spi_nor'.
+ * @param_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the 4-Byte Address Instruction Table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_4bait(struct spi_nor *nor,
+ const struct sfdp_parameter_header *param_header,
+ struct spi_nor_flash_parameter *params)
+{
+ static const struct sfdp_4bait reads[] = {
+ { SNOR_HWCAPS_READ, BIT(0) },
+ { SNOR_HWCAPS_READ_FAST, BIT(1) },
+ { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
+ { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
+ { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
+ { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ };
+ static const struct sfdp_4bait programs[] = {
+ { SNOR_HWCAPS_PP, BIT(6) },
+ { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
+ { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
+ };
+ static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
+ { 0u /* not used */, BIT(9) },
+ { 0u /* not used */, BIT(10) },
+ { 0u /* not used */, BIT(11) },
+ { 0u /* not used */, BIT(12) },
+ };
+ struct spi_nor_pp_command *params_pp = params->page_programs;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ u32 *dwords;
+ size_t len;
+ u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
+ int i, ret;
+
+ if (param_header->major != SFDP_JESD216_MAJOR ||
+ param_header->length < SFDP_4BAIT_DWORD_MAX)
+ return -EINVAL;
+
+ /* Read the 4-byte Address Instruction Table. */
+ len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(param_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the 4BAIT DWORDs. */
+ le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX);
+
+ /*
+ * Compute the subset of (Fast) Read commands for which the 4-byte
+ * version is supported.
+ */
+ discard_hwcaps = 0;
+ read_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(reads); i++) {
+ const struct sfdp_4bait *read = &reads[i];
+
+ discard_hwcaps |= read->hwcaps;
+ if ((params->hwcaps.mask & read->hwcaps) &&
+ (dwords[0] & read->supported_bit))
+ read_hwcaps |= read->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Page Program commands for which the 4-byte
+ * version is supported.
+ */
+ pp_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(programs); i++) {
+ const struct sfdp_4bait *program = &programs[i];
+
+ /*
+ * The 4 Byte Address Instruction (Optional) Table is the only
+ * SFDP table that indicates support for Page Program Commands.
+ * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
+ * authority for specifying Page Program support.
+ */
+ discard_hwcaps |= program->hwcaps;
+ if (dwords[0] & program->supported_bit)
+ pp_hwcaps |= program->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Sector Erase commands for which the 4-byte
+ * version is supported.
+ */
+ erase_mask = 0;
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ const struct sfdp_4bait *erase = &erases[i];
+
+ if (dwords[0] & erase->supported_bit)
+ erase_mask |= BIT(i);
+ }
+
+ /* Replicate the sort done for the map's erase types in BFPT. */
+ erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
+
+ /*
+ * We need at least one 4-byte op code per read, program and erase
+ * operation; the .read(), .write() and .erase() hooks share the
+ * nor->addr_width value.
+ */
+ if (!read_hwcaps || !pp_hwcaps || !erase_mask)
+ goto out;
+
+ /*
+ * Discard all operations from the 4-byte instruction set which are
+ * not supported by this memory.
+ */
+ params->hwcaps.mask &= ~discard_hwcaps;
+ params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
+
+ /* Use the 4-byte address instruction set. */
+ for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
+ struct spi_nor_read_command *read_cmd = &params->reads[i];
+
+ read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
+ }
+
+ /* 4BAIT is the only SFDP table that indicates page program support. */
+ if (pp_hwcaps & SNOR_HWCAPS_PP)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B,
+ SNOR_PROTO_1_1_4);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4_4B,
+ SNOR_PROTO_1_4_4);
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (erase_mask & BIT(i))
+ erase_type[i].opcode = (dwords[1] >>
+ erase_type[i].idx * 8) & 0xFF;
+ else
+ spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
+ }
+
+ /*
+ * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
+ * later because we already did the conversion to 4byte opcodes. Also,
+ * this latest function implements a legacy quirk for the erase size of
+ * Spansion memory. However this quirk is no longer needed with new
+ * SFDP compliant memories.
+ */
+ nor->addr_width = 4;
+ nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
+
+ /* fall through */
+out:
+ kfree(dwords);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+{
+ const struct sfdp_parameter_header *param_header, *bfpt_header;
+ struct sfdp_parameter_header *param_headers = NULL;
+ struct sfdp_header header;
+ struct device *dev = nor->dev;
+ size_t psize;
+ int i, err;
+
+ /* Get the SFDP header. */
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
+ if (err < 0)
+ return err;
+
+ /* Check the SFDP header version. */
+ if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+ header.major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Verify that the first and only mandatory parameter header is a
+ * Basic Flash Parameter Table header as specified in JESD216.
+ */
+ bfpt_header = &header.bfpt_header;
+ if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+ bfpt_header->major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Allocate memory then read all parameter headers with a single
+ * Read SFDP command. These parameter headers will actually be parsed
+ * twice: a first time to get the latest revision of the basic flash
+ * parameter table, then a second time to handle the supported optional
+ * tables.
+ * Hence we read the parameter headers once for all to reduce the
+ * processing time. Also we use kmalloc() instead of devm_kmalloc()
+ * because we don't need to keep these parameter headers: the allocated
+ * memory is always released with kfree() before exiting this function.
+ */
+ if (header.nph) {
+ psize = header.nph * sizeof(*param_headers);
+
+ param_headers = kmalloc(psize, GFP_KERNEL);
+ if (!param_headers)
+ return -ENOMEM;
+
+ err = spi_nor_read_sfdp(nor, sizeof(header),
+ psize, param_headers);
+ if (err < 0) {
+ dev_dbg(dev, "failed to read SFDP parameter headers\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Check other parameter headers to get the latest revision of
+ * the basic flash parameter table.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+ param_header->major == SFDP_JESD216_MAJOR &&
+ (param_header->minor > bfpt_header->minor ||
+ (param_header->minor == bfpt_header->minor &&
+ param_header->length > bfpt_header->length)))
+ bfpt_header = param_header;
+ }
+
+ err = spi_nor_parse_bfpt(nor, bfpt_header, params);
+ if (err)
+ goto exit;
+
+ /* Parse optional parameter tables. */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ switch (SFDP_PARAM_HEADER_ID(param_header)) {
+ case SFDP_SECTOR_MAP_ID:
+ err = spi_nor_parse_smpt(nor, param_header, params);
+ break;
+
+ case SFDP_4BAIT_ID:
+ err = spi_nor_parse_4bait(nor, param_header, params);
+ break;
+
+ default:
+ break;
+ }
+
+ if (err) {
+ dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+ SFDP_PARAM_HEADER_ID(param_header));
+ /*
+ * Let's not drop all information we extracted so far
+ * if optional table parsers fail. In case of failing,
+ * each optional parser is responsible to roll back to
+ * the previously known spi_nor data.
+ */
+ err = 0;
+ }
+ }
+
+exit:
+ kfree(param_headers);
+ return err;
+}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
new file mode 100644
index 000000000000..e0a8ded04890
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SFDP_H
+#define __LINUX_MTD_SFDP_H
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i) ((i) - 1)
+#define BFPT_DWORD_MAX 16
+
+struct sfdp_bfpt {
+ u32 dwords[BFPT_DWORD_MAX];
+};
+
+/* The first version of JESD216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216 9
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
+#define BFPT_DWORD1_DTR BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
+#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ * reads based on instruction. DQ3/HOLD# functions are hold during
+ * instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * Writing only one byte to the status register has the side-effect of
+ * clearing status register 2, including the QE bit. The 100b code is
+ * used if writing one byte to the status register does not modify
+ * status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ * one data byte where bit 6 is one.
+ * [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ * register 2 instruction 3Eh with one data byte where bit 7 is one.
+ * [...]
+ * The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * In contrast to the 001b code, writing one byte to the status
+ * register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ * Read Status instruction 05h. Status register2 is read using
+ * instruction 35h. QE is set via Write Status instruction 01h with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ */
+#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+
+struct sfdp_parameter_header {
+ u8 id_lsb;
+ u8 minor;
+ u8 major;
+ u8 length; /* in double words */
+ u8 parameter_table_pointer[3]; /* byte address */
+ u8 id_msb;
+};
+
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params);
+
+#endif /* __LINUX_MTD_SFDP_H */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
new file mode 100644
index 000000000000..6756202ace4b
--- /dev/null
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info spansion_parts[] = {
+ /* Spansion/Cypress -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | USE_CLSR) },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+};
+
+static void spansion_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->params->size <= SZ_16M)
+ return;
+
+ nor->flags |= SNOR_F_4B_OPCODES;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = nor->info->sector_size;
+}
+
+static const struct spi_nor_fixups spansion_fixups = {
+ .post_sfdp = spansion_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_spansion = {
+ .name = "spansion",
+ .parts = spansion_parts,
+ .nparts = ARRAY_SIZE(spansion_parts),
+ .fixups = &spansion_fixups,
+};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
deleted file mode 100644
index 4fc632ec18fe..000000000000
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ /dev/null
@@ -1,5434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
- * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
- *
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/math64.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/of_platform.h>
-#include <linux/sched/task_stack.h>
-#include <linux/spi/flash.h>
-#include <linux/mtd/spi-nor.h>
-
-/* Define max times to check status register before we give up. */
-
-/*
- * For everything but full-chip erase; probably could be much smaller, but kept
- * around for safety for now
- */
-#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
-
-/*
- * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
- * for larger flash
- */
-#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-
-#define SPI_NOR_MAX_ID_LEN 6
-#define SPI_NOR_MAX_ADDR_WIDTH 4
-
-struct sfdp_parameter_header {
- u8 id_lsb;
- u8 minor;
- u8 major;
- u8 length; /* in double words */
- u8 parameter_table_pointer[3]; /* byte address */
- u8 id_msb;
-};
-
-#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
-#define SFDP_PARAM_HEADER_PTP(p) \
- (((p)->parameter_table_pointer[2] << 16) | \
- ((p)->parameter_table_pointer[1] << 8) | \
- ((p)->parameter_table_pointer[0] << 0))
-
-#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
-#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
-#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
-
-#define SFDP_SIGNATURE 0x50444653U
-#define SFDP_JESD216_MAJOR 1
-#define SFDP_JESD216_MINOR 0
-#define SFDP_JESD216A_MINOR 5
-#define SFDP_JESD216B_MINOR 6
-
-struct sfdp_header {
- u32 signature; /* Ox50444653U <=> "SFDP" */
- u8 minor;
- u8 major;
- u8 nph; /* 0-base number of parameter headers */
- u8 unused;
-
- /* Basic Flash Parameter Table. */
- struct sfdp_parameter_header bfpt_header;
-};
-
-/* Basic Flash Parameter Table */
-
-/*
- * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
- * They are indexed from 1 but C arrays are indexed from 0.
- */
-#define BFPT_DWORD(i) ((i) - 1)
-#define BFPT_DWORD_MAX 16
-
-/* The first version of JESD216 defined only 9 DWORDs. */
-#define BFPT_DWORD_MAX_JESD216 9
-
-/* 1st DWORD. */
-#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
-#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
-#define BFPT_DWORD1_DTR BIT(19)
-#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
-#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
-#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
-
-/* 5th DWORD. */
-#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
-#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
-
-/* 11th DWORD. */
-#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
-#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
-
-/* 15th DWORD. */
-
-/*
- * (from JESD216 rev B)
- * Quad Enable Requirements (QER):
- * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
- * reads based on instruction. DQ3/HOLD# functions are hold during
- * instruction phase.
- * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * Writing only one byte to the status register has the side-effect of
- * clearing status register 2, including the QE bit. The 100b code is
- * used if writing one byte to the status register does not modify
- * status register 2.
- * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
- * one data byte where bit 6 is one.
- * [...]
- * - 011b: QE is bit 7 of status register 2. It is set via Write status
- * register 2 instruction 3Eh with one data byte where bit 7 is one.
- * [...]
- * The status register 2 is read using instruction 3Fh.
- * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * In contrast to the 001b code, writing one byte to the status
- * register does not modify status register 2.
- * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
- * Read Status instruction 05h. Status register2 is read using
- * instruction 35h. QE is set via Write Status instruction 01h with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- */
-#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
-#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
-#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
-#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
-#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
-
-struct sfdp_bfpt {
- u32 dwords[BFPT_DWORD_MAX];
-};
-
-/**
- * struct spi_nor_fixups - SPI NOR fixup hooks
- * @default_init: called after default flash parameters init. Used to tweak
- * flash parameters when information provided by the flash_info
- * table is incomplete or wrong.
- * @post_bfpt: called after the BFPT table has been parsed
- * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
- * that do not support RDSFDP). Typically used to tweak various
- * parameters that could not be extracted by other means (i.e.
- * when information provided by the SFDP/flash_info tables are
- * incomplete or wrong).
- *
- * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
- * table is broken or not available.
- */
-struct spi_nor_fixups {
- void (*default_init)(struct spi_nor *nor);
- int (*post_bfpt)(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params);
- void (*post_sfdp)(struct spi_nor *nor);
-};
-
-struct flash_info {
- char *name;
-
- /*
- * This array stores the ID bytes.
- * The first three bytes are the JEDIC ID.
- * JEDEC ID zero means "no ID" (mostly older chips).
- */
- u8 id[SPI_NOR_MAX_ID_LEN];
- u8 id_len;
-
- /* The size listed here is what works with SPINOR_OP_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u32 flags;
-#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
-#define SST_WRITE BIT(2) /* use SST byte programming */
-#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
-#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
-#define USE_FSR BIT(7) /* use flag status register */
-#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
-#define SPI_NOR_HAS_TB BIT(9) /*
- * Flash SR has Top/Bottom (TB) protect
- * bit. Must be used with
- * SPI_NOR_HAS_LOCK.
- */
-#define SPI_NOR_XSR_RDY BIT(10) /*
- * S3AN flashes have specific opcode to
- * read the status register.
- * Flags SPI_NOR_XSR_RDY and SPI_S3AN
- * use the same bit as one implies the
- * other, but we will get rid of
- * SPI_S3AN soon.
- */
-#define SPI_S3AN BIT(10) /*
- * Xilinx Spartan 3AN In-System Flash
- * (MFR cannot be used for probing
- * because it has the same value as
- * ATMEL flashes)
- */
-#define SPI_NOR_4B_OPCODES BIT(11) /*
- * Use dedicated 4byte address op codes
- * to support memory size above 128Mib.
- */
-#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
-#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
-#define USE_CLSR BIT(14) /* use CLSR command */
-#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
-#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
- * Top/Bottom (TB) is bit 6 of
- * status register. Must be used with
- * SPI_NOR_HAS_TB.
- */
-
- /* Part specific fixup hooks. */
- const struct spi_nor_fixups *fixups;
-};
-
-#define JEDEC_MFR(info) ((info)->id[0])
-
-/**
- * spi_nor_spimem_xfer_data() - helper function to read/write data to
- * flash's memory region
- * @nor: pointer to 'struct spi_nor'
- * @op: pointer to 'struct spi_mem_op' template for transfer
- *
- * Return: number of bytes transferred on success, -errno otherwise
- */
-static ssize_t spi_nor_spimem_xfer_data(struct spi_nor *nor,
- struct spi_mem_op *op)
-{
- bool usebouncebuf = false;
- void *rdbuf = NULL;
- const void *buf;
- int ret;
-
- if (op->data.dir == SPI_MEM_DATA_IN)
- buf = op->data.buf.in;
- else
- buf = op->data.buf.out;
-
- if (object_is_on_stack(buf) || !virt_addr_valid(buf))
- usebouncebuf = true;
-
- if (usebouncebuf) {
- if (op->data.nbytes > nor->bouncebuf_size)
- op->data.nbytes = nor->bouncebuf_size;
-
- if (op->data.dir == SPI_MEM_DATA_IN) {
- rdbuf = op->data.buf.in;
- op->data.buf.in = nor->bouncebuf;
- } else {
- op->data.buf.out = nor->bouncebuf;
- memcpy(nor->bouncebuf, buf,
- op->data.nbytes);
- }
- }
-
- ret = spi_mem_adjust_op_size(nor->spimem, op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(nor->spimem, op);
- if (ret)
- return ret;
-
- if (usebouncebuf && op->data.dir == SPI_MEM_DATA_IN)
- memcpy(rdbuf, nor->bouncebuf, op->data.nbytes);
-
- return op->data.nbytes;
-}
-
-/**
- * spi_nor_spimem_read_data() - read data from flash's memory region via
- * spi-mem
- * @nor: pointer to 'struct spi_nor'
- * @from: offset to read from
- * @len: number of bytes to read
- * @buf: pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
- size_t len, u8 *buf)
-{
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
- SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
- SPI_MEM_OP_DATA_IN(len, buf, 1));
-
- /* get transfer protocols. */
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
-
- /* convert the dummy cycles to the number of bytes */
- op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
-
- return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_read_data() - read data from flash memory
- * @nor: pointer to 'struct spi_nor'
- * @from: offset to read from
- * @len: number of bytes to read
- * @buf: pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
- u8 *buf)
-{
- if (nor->spimem)
- return spi_nor_spimem_read_data(nor, from, len, buf);
-
- return nor->controller_ops->read(nor, from, len, buf);
-}
-
-/**
- * spi_nor_spimem_write_data() - write data to flash memory via
- * spi-mem
- * @nor: pointer to 'struct spi_nor'
- * @to: offset to write to
- * @len: number of bytes to write
- * @buf: pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
- size_t len, const u8 *buf)
-{
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
- if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
- op.addr.nbytes = 0;
-
- return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_write_data() - write data to flash memory
- * @nor: pointer to 'struct spi_nor'
- * @to: offset to write to
- * @len: number of bytes to write
- * @buf: pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
- const u8 *buf)
-{
- if (nor->spimem)
- return spi_nor_spimem_write_data(nor, to, len, buf);
-
- return nor->controller_ops->write(nor, to, len, buf);
-}
-
-/**
- * spi_nor_write_enable() - Set write enable latch with Write Enable command.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_enable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_write_disable() - Send Write Disable instruction to the chip.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_disable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_sr() - Read the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
- sr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_fsr() - Read the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'
- * @fsr: pointer to a DMA-able buffer where the value of the
- * Flag Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, fsr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
- fsr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading FSR\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_cr() - Read the Configuration Register using the
- * SPINOR_OP_RDCR (35h) command.
- * @nor: pointer to 'struct spi_nor'
- * @cr: pointer to a DMA-able buffer where the value of the
- * Configuration Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, cr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading CR\n", ret);
-
- return ret;
-}
-
-/**
- * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
- SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor,
- enable ? SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
-
- return ret;
-}
-
-/**
- * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- ret = macronix_set_4byte(nor, enable);
- if (ret)
- return ret;
-
- return spi_nor_write_disable(nor);
-}
-
-/**
- * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- nor->bouncebuf[0] = enable << 7;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
- nor->bouncebuf, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_write_ear() - Write Extended Address Register.
- * @nor: pointer to 'struct spi_nor'.
- * @ear: value to write to the Extended Address Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
-{
- int ret;
-
- nor->bouncebuf[0] = ear;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
- nor->bouncebuf, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d writing EAR\n", ret);
-
- return ret;
-}
-
-/**
- * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int winbond_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- ret = macronix_set_4byte(nor, enable);
- if (ret || enable)
- return ret;
-
- /*
- * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
- * Register to be set to 1, so all 3-byte-address reads come from the
- * second 16M. We must clear the register to enable normal behavior.
- */
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- ret = spi_nor_write_ear(nor, 0);
- if (ret)
- return ret;
-
- return spi_nor_write_disable(nor);
-}
-
-/**
- * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
- sr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
-
- return ret;
-}
-
-/**
- * s3an_sr_ready() - Query the Status Register of the S3AN flash to see if the
- * flash is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int s3an_sr_ready(struct spi_nor *nor)
-{
- int ret;
-
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return !!(nor->bouncebuf[0] & XSR_RDY);
-}
-
-/**
- * spi_nor_clear_sr() - Clear the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_sr(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing SR\n", ret);
-}
-
-/**
- * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
- * for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_sr(nor, nor->bouncebuf);
-
- if (ret)
- return ret;
-
- if (nor->flags & SNOR_F_USE_CLSR &&
- nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
- if (nor->bouncebuf[0] & SR_E_ERR)
- dev_err(nor->dev, "Erase Error occurred\n");
- else
- dev_err(nor->dev, "Programming Error occurred\n");
-
- spi_nor_clear_sr(nor);
- return -EIO;
- }
-
- return !(nor->bouncebuf[0] & SR_WIP);
-}
-
-/**
- * spi_nor_clear_fsr() - Clear the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_fsr(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
-}
-
-/**
- * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
- * ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_fsr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
-
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
- if (nor->bouncebuf[0] & FSR_E_ERR)
- dev_err(nor->dev, "Erase operation failed.\n");
- else
- dev_err(nor->dev, "Program operation failed.\n");
-
- if (nor->bouncebuf[0] & FSR_PT_ERR)
- dev_err(nor->dev,
- "Attempted to modify a protected sector.\n");
-
- spi_nor_clear_fsr(nor);
- return -EIO;
- }
-
- return nor->bouncebuf[0] & FSR_READY;
-}
-
-/**
- * spi_nor_ready() - Query the flash to see if it is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_ready(struct spi_nor *nor)
-{
- int sr, fsr;
-
- if (nor->flags & SNOR_F_READY_XSR_RDY)
- sr = s3an_sr_ready(nor);
- else
- sr = spi_nor_sr_ready(nor);
- if (sr < 0)
- return sr;
- fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
- if (fsr < 0)
- return fsr;
- return sr && fsr;
-}
-
-/**
- * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
- * Status Register until ready, or timeout occurs.
- * @nor: pointer to "struct spi_nor".
- * @timeout_jiffies: jiffies to wait until timeout.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
- unsigned long timeout_jiffies)
-{
- unsigned long deadline;
- int timeout = 0, ret;
-
- deadline = jiffies + timeout_jiffies;
-
- while (!timeout) {
- if (time_after_eq(jiffies, deadline))
- timeout = 1;
-
- ret = spi_nor_ready(nor);
- if (ret < 0)
- return ret;
- if (ret)
- return 0;
-
- cond_resched();
- }
-
- dev_dbg(nor->dev, "flash operation timed out\n");
-
- return -ETIMEDOUT;
-}
-
-/**
- * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
- * flash to be ready, or timeout occurs.
- * @nor: pointer to "struct spi_nor".
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
-{
- return spi_nor_wait_till_ready_with_timeout(nor,
- DEFAULT_READY_WAIT_JIFFIES);
-}
-
-/**
- * spi_nor_write_sr() - Write the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to DMA-able buffer to write to the Status Register.
- * @len: number of bytes to write to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
- sr, len);
- }
-
- if (ret) {
- dev_dbg(nor->dev, "error %d writing SR\n", ret);
- return ret;
- }
-
- return spi_nor_wait_till_ready(nor);
-}
-
-/**
- * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
- * ensure that the byte written match the received value.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
-{
- int ret;
-
- nor->bouncebuf[0] = sr1;
-
- ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
- if (ret)
- return ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] != sr1) {
- dev_dbg(nor->dev, "SR1: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
- * Status Register 2 in one shot. Ensure that the byte written in the Status
- * Register 1 match the received value, and that the 16-bit Write did not
- * affect what was already in the Status Register 2.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register 1.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
-{
- int ret;
- u8 *sr_cr = nor->bouncebuf;
- u8 cr_written;
-
- /* Make sure we don't overwrite the contents of Status Register 2. */
- if (!(nor->flags & SNOR_F_NO_READ_CR)) {
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
- } else if (nor->params.quad_enable) {
- /*
- * If the Status Register 2 Read command (35h) is not
- * supported, we should at least be sure we don't
- * change the value of the SR2 Quad Enable bit.
- *
- * We can safely assume that when the Quad Enable method is
- * set, the value of the QE bit is one, as a consequence of the
- * nor->params.quad_enable() call.
- *
- * We can safely assume that the Quad Enable bit is present in
- * the Status Register 2 at BIT(1). According to the JESD216
- * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
- * Write Status (01h) command is available just for the cases
- * in which the QE bit is described in SR2 at BIT(1).
- */
- sr_cr[1] = SR2_QUAD_EN_BIT1;
- } else {
- sr_cr[1] = 0;
- }
-
- sr_cr[0] = sr1;
-
- ret = spi_nor_write_sr(nor, sr_cr, 2);
- if (ret)
- return ret;
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return 0;
-
- cr_written = sr_cr[1];
-
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
-
- if (cr_written != sr_cr[1]) {
- dev_dbg(nor->dev, "CR: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
- * Configuration Register in one shot. Ensure that the byte written in the
- * Configuration Register match the received value, and that the 16-bit Write
- * did not affect what was already in the Status Register 1.
- * @nor: pointer to a 'struct spi_nor'.
- * @cr: byte value to be written to the Configuration Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
-{
- int ret;
- u8 *sr_cr = nor->bouncebuf;
- u8 sr_written;
-
- /* Keep the current value of the Status Register 1. */
- ret = spi_nor_read_sr(nor, sr_cr);
- if (ret)
- return ret;
-
- sr_cr[1] = cr;
-
- ret = spi_nor_write_sr(nor, sr_cr, 2);
- if (ret)
- return ret;
-
- sr_written = sr_cr[0];
-
- ret = spi_nor_read_sr(nor, sr_cr);
- if (ret)
- return ret;
-
- if (sr_written != sr_cr[0]) {
- dev_dbg(nor->dev, "SR: Read back test failed\n");
- return -EIO;
- }
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return 0;
-
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
-
- if (cr != sr_cr[1]) {
- dev_dbg(nor->dev, "CR: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
- * the byte written match the received value without affecting other bits in the
- * Status Register 1 and 2.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
-{
- if (nor->flags & SNOR_F_HAS_16BIT_SR)
- return spi_nor_write_16bit_sr_and_check(nor, sr1);
-
- return spi_nor_write_sr1_and_check(nor, sr1);
-}
-
-/**
- * spi_nor_write_sr2() - Write the Status Register 2 using the
- * SPINOR_OP_WRSR2 (3eh) command.
- * @nor: pointer to 'struct spi_nor'.
- * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
- sr2, 1);
- }
-
- if (ret) {
- dev_dbg(nor->dev, "error %d writing SR2\n", ret);
- return ret;
- }
-
- return spi_nor_wait_till_ready(nor);
-}
-
-/**
- * spi_nor_read_sr2() - Read the Status Register 2 using the
- * SPINOR_OP_RDSR2 (3fh) command.
- * @nor: pointer to 'struct spi_nor'.
- * @sr2: pointer to DMA-able buffer where the value of the
- * Status Register 2 will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
- sr2, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR2\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_erase_chip() - Erase the entire flash memory.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_erase_chip(struct spi_nor *nor)
-{
- int ret;
-
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d erasing chip\n", ret);
-
- return ret;
-}
-
-static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
-{
- return mtd->priv;
-}
-
-static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == opcode)
- return table[i][1];
-
- /* No conversion found, keep input op code. */
- return opcode;
-}
-
-static u8 spi_nor_convert_3to4_read(u8 opcode)
-{
- static const u8 spi_nor_3to4_read[][2] = {
- { SPINOR_OP_READ, SPINOR_OP_READ_4B },
- { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
- { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
- { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
- { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
- { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
- { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
- { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
-
- { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
- { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
- { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
- ARRAY_SIZE(spi_nor_3to4_read));
-}
-
-static u8 spi_nor_convert_3to4_program(u8 opcode)
-{
- static const u8 spi_nor_3to4_program[][2] = {
- { SPINOR_OP_PP, SPINOR_OP_PP_4B },
- { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
- { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
- { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
- { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
- ARRAY_SIZE(spi_nor_3to4_program));
-}
-
-static u8 spi_nor_convert_3to4_erase(u8 opcode)
-{
- static const u8 spi_nor_3to4_erase[][2] = {
- { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
- { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
- { SPINOR_OP_SE, SPINOR_OP_SE_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
- ARRAY_SIZE(spi_nor_3to4_erase));
-}
-
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
-{
- nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
- nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
- nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
-
- if (!spi_nor_has_uniform_erase(nor)) {
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- struct spi_nor_erase_type *erase;
- int i;
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- erase = &map->erase_type[i];
- erase->opcode =
- spi_nor_convert_3to4_erase(erase->opcode);
- }
- }
-}
-
-static int spi_nor_lock_and_prep(struct spi_nor *nor)
-{
- int ret = 0;
-
- mutex_lock(&nor->lock);
-
- if (nor->controller_ops && nor->controller_ops->prepare) {
- ret = nor->controller_ops->prepare(nor);
- if (ret) {
- mutex_unlock(&nor->lock);
- return ret;
- }
- }
- return ret;
-}
-
-static void spi_nor_unlock_and_unprep(struct spi_nor *nor)
-{
- if (nor->controller_ops && nor->controller_ops->unprepare)
- nor->controller_ops->unprepare(nor);
- mutex_unlock(&nor->lock);
-}
-
-/*
- * This code converts an address to the Default Address Mode, that has non
- * power of two page sizes. We must support this mode because it is the default
- * mode supported by Xilinx tools, it can access the whole flash area and
- * changing over to the Power-of-two mode is irreversible and corrupts the
- * original data.
- * Addr can safely be unsigned int, the biggest S3AN device is smaller than
- * 4 MiB.
- */
-static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
-{
- u32 offset, page;
-
- offset = addr % nor->page_size;
- page = addr / nor->page_size;
- page <<= (nor->page_size > 512) ? 10 : 9;
-
- return page | offset;
-}
-
-static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
-{
- if (!nor->params.convert_addr)
- return addr;
-
- return nor->params.convert_addr(nor, addr);
-}
-
-/*
- * Initiate the erasure of a single sector
- */
-static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
-{
- int i;
-
- addr = spi_nor_convert_addr(nor, addr);
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- return spi_mem_exec_op(nor->spimem, &op);
- } else if (nor->controller_ops->erase) {
- return nor->controller_ops->erase(nor, addr);
- }
-
- /*
- * Default implementation, if driver doesn't have a specialized HW
- * control
- */
- for (i = nor->addr_width - 1; i >= 0; i--) {
- nor->bouncebuf[i] = addr & 0xff;
- addr >>= 8;
- }
-
- return nor->controller_ops->write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
-}
-
-/**
- * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @dividend: dividend value
- * @remainder: pointer to u32 remainder (will be updated)
- *
- * Return: the result of the division
- */
-static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
- u64 dividend, u32 *remainder)
-{
- /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
- *remainder = (u32)dividend & erase->size_mask;
- return dividend >> erase->size_shift;
-}
-
-/**
- * spi_nor_find_best_erase_type() - find the best erase type for the given
- * offset in the serial flash memory and the
- * number of bytes to erase. The region in
- * which the address fits is expected to be
- * provided.
- * @map: the erase map of the SPI NOR
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Return: a pointer to the best fitted erase type, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
- const struct spi_nor_erase_region *region,
- u64 addr, u32 len)
-{
- const struct spi_nor_erase_type *erase;
- u32 rem;
- int i;
- u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- /*
- * Erase types are ordered by size, with the smallest erase type at
- * index 0.
- */
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- /* Does the erase region support the tested erase type? */
- if (!(erase_mask & BIT(i)))
- continue;
-
- erase = &map->erase_type[i];
-
- /* Don't erase more than what the user has asked for. */
- if (erase->size > len)
- continue;
-
- /* Alignment is not mandatory for overlaid regions */
- if (region->offset & SNOR_OVERLAID_REGION)
- return erase;
-
- spi_nor_div_by_erase_size(erase, addr, &rem);
- if (rem)
- continue;
- else
- return erase;
- }
-
- return NULL;
-}
-
-/**
- * spi_nor_region_next() - get the next spi nor region
- * @region: pointer to a structure that describes a SPI NOR erase region
- *
- * Return: the next spi nor region or NULL if last region.
- */
-static struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region)
-{
- if (spi_nor_region_is_last(region))
- return NULL;
- region++;
- return region;
-}
-
-/**
- * spi_nor_find_erase_region() - find the region of the serial flash memory in
- * which the offset fits
- * @map: the erase map of the SPI NOR
- * @addr: offset in the serial flash memory
- *
- * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_region *
-spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
-{
- struct spi_nor_erase_region *region = map->regions;
- u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 region_end = region_start + region->size;
-
- while (addr < region_start || addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- return ERR_PTR(-EINVAL);
-
- region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- region_end = region_start + region->size;
- }
-
- return region;
-}
-
-/**
- * spi_nor_init_erase_cmd() - initialize an erase command
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @erase: pointer to a structure that describes a SPI NOR erase type
- *
- * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_command *
-spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
- const struct spi_nor_erase_type *erase)
-{
- struct spi_nor_erase_command *cmd;
-
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&cmd->list);
- cmd->opcode = erase->opcode;
- cmd->count = 1;
-
- if (region->offset & SNOR_OVERLAID_REGION)
- cmd->size = region->size;
- else
- cmd->size = erase->size;
-
- return cmd;
-}
-
-/**
- * spi_nor_destroy_erase_cmd_list() - destroy erase command list
- * @erase_list: list of erase commands
- */
-static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
-{
- struct spi_nor_erase_command *cmd, *next;
-
- list_for_each_entry_safe(cmd, next, erase_list, list) {
- list_del(&cmd->list);
- kfree(cmd);
- }
-}
-
-/**
- * spi_nor_init_erase_cmd_list() - initialize erase command list
- * @nor: pointer to a 'struct spi_nor'
- * @erase_list: list of erase commands to be executed once we validate that the
- * erase can be performed
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Builds the list of best fitted erase commands and verifies if the erase can
- * be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
- struct list_head *erase_list,
- u64 addr, u32 len)
-{
- const struct spi_nor_erase_map *map = &nor->params.erase_map;
- const struct spi_nor_erase_type *erase, *prev_erase = NULL;
- struct spi_nor_erase_region *region;
- struct spi_nor_erase_command *cmd = NULL;
- u64 region_end;
- int ret = -EINVAL;
-
- region = spi_nor_find_erase_region(map, addr);
- if (IS_ERR(region))
- return PTR_ERR(region);
-
- region_end = spi_nor_region_end(region);
-
- while (len) {
- erase = spi_nor_find_best_erase_type(map, region, addr, len);
- if (!erase)
- goto destroy_erase_cmd_list;
-
- if (prev_erase != erase ||
- region->offset & SNOR_OVERLAID_REGION) {
- cmd = spi_nor_init_erase_cmd(region, erase);
- if (IS_ERR(cmd)) {
- ret = PTR_ERR(cmd);
- goto destroy_erase_cmd_list;
- }
-
- list_add_tail(&cmd->list, erase_list);
- } else {
- cmd->count++;
- }
-
- addr += cmd->size;
- len -= cmd->size;
-
- if (len && addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- goto destroy_erase_cmd_list;
- region_end = spi_nor_region_end(region);
- }
-
- prev_erase = erase;
- }
-
- return 0;
-
-destroy_erase_cmd_list:
- spi_nor_destroy_erase_cmd_list(erase_list);
- return ret;
-}
-
-/**
- * spi_nor_erase_multi_sectors() - perform a non-uniform erase
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Build a list of best fitted erase commands and execute it once we validate
- * that the erase can be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
-{
- LIST_HEAD(erase_list);
- struct spi_nor_erase_command *cmd, *next;
- int ret;
-
- ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
- if (ret)
- return ret;
-
- list_for_each_entry_safe(cmd, next, &erase_list, list) {
- nor->erase_opcode = cmd->opcode;
- while (cmd->count) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto destroy_erase_cmd_list;
-
- ret = spi_nor_erase_sector(nor, addr);
- if (ret)
- goto destroy_erase_cmd_list;
-
- addr += cmd->size;
- cmd->count--;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto destroy_erase_cmd_list;
- }
- list_del(&cmd->list);
- kfree(cmd);
- }
-
- return 0;
-
-destroy_erase_cmd_list:
- spi_nor_destroy_erase_cmd_list(&erase_list);
- return ret;
-}
-
-/*
- * Erase an address range on the nor chip. The address range may extend
- * one or more erase sectors. Return an error is there is a problem erasing.
- */
-static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
- uint32_t rem;
- int ret;
-
- dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
- (long long)instr->len);
-
- if (spi_nor_has_uniform_erase(nor)) {
- div_u64_rem(instr->len, mtd->erasesize, &rem);
- if (rem)
- return -EINVAL;
- }
-
- addr = instr->addr;
- len = instr->len;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- /* whole-chip erase? */
- if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
- unsigned long timeout;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_erase_chip(nor);
- if (ret)
- goto erase_err;
-
- /*
- * Scale the timeout linearly with the size of the flash, with
- * a minimum calibrated to an old 2MB flash. We could try to
- * pull these from CFI/SFDP, but these values should be good
- * enough for now.
- */
- timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
- CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
- (unsigned long)(mtd->size / SZ_2M));
- ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
- if (ret)
- goto erase_err;
-
- /* REVISIT in some cases we could speed up erasing large regions
- * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
- * to use "small sector erase", but that's not always optimal.
- */
-
- /* "sector"-at-a-time erase */
- } else if (spi_nor_has_uniform_erase(nor)) {
- while (len) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_erase_sector(nor, addr);
- if (ret)
- goto erase_err;
-
- addr += mtd->erasesize;
- len -= mtd->erasesize;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto erase_err;
- }
-
- /* erase multiple sectors */
- } else {
- ret = spi_nor_erase_multi_sectors(nor, addr, len);
- if (ret)
- goto erase_err;
- }
-
- ret = spi_nor_write_disable(nor);
-
-erase_err:
- spi_nor_unlock_and_unprep(nor);
-
- return ret;
-}
-
-static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
- uint64_t *len)
-{
- struct mtd_info *mtd = &nor->mtd;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- int shift = ffs(mask) - 1;
- int pow;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
-
- if (!(sr & mask)) {
- /* No protection */
- *ofs = 0;
- *len = 0;
- } else {
- pow = ((sr & mask) ^ mask) >> shift;
- *len = mtd->size >> pow;
- if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
- *ofs = 0;
- else
- *ofs = mtd->size - *len;
- }
-}
-
-/*
- * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
- * @locked is false); 0 otherwise
- */
-static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr, bool locked)
-{
- loff_t lock_offs;
- uint64_t lock_len;
-
- if (!len)
- return 1;
-
- stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
-
- if (locked)
- /* Requested range is a sub-range of locked range */
- return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
- else
- /* Requested range does not overlap with locked range */
- return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
-}
-
-static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
-{
- return stm_check_lock_status_sr(nor, ofs, len, sr, true);
-}
-
-static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
-{
- return stm_check_lock_status_sr(nor, ofs, len, sr, false);
-}
-
-/*
- * Lock a region of the flash. Compatible with ST Micro and similar flash.
- * Supports the block protection bits BP{0,1,2} in the status register
- * (SR). Does not support these features found in newer SR bitfields:
- * - SEC: sector/block protect - only handle SEC=0 (block protect)
- * - CMP: complement protect - only support CMP=0 (range is not complemented)
- *
- * Support for the following is provided conditionally for some flash:
- * - TB: top/bottom protect
- *
- * Sample table portion for 8MB flash (Winbond w25q64fw):
- *
- * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
- * --------------------------------------------------------------------------
- * X | X | 0 | 0 | 0 | NONE | NONE
- * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
- * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
- * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
- * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
- * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
- * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
- * X | X | 1 | 1 | 1 | 8 MB | ALL
- * ------|-------|-------|-------|-------|---------------|-------------------
- * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
- * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
- * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
- * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
- * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
- * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- struct mtd_info *mtd = &nor->mtd;
- int ret, status_old, status_new;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- u8 shift = ffs(mask) - 1, pow, val;
- loff_t lock_len;
- bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
- bool use_top;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- status_old = nor->bouncebuf[0];
-
- /* If nothing in our range is unlocked, we don't need to do anything */
- if (stm_is_locked_sr(nor, ofs, len, status_old))
- return 0;
-
- /* If anything below us is unlocked, we can't use 'bottom' protection */
- if (!stm_is_locked_sr(nor, 0, ofs, status_old))
- can_be_bottom = false;
-
- /* If anything above us is unlocked, we can't use 'top' protection */
- if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
- status_old))
- can_be_top = false;
-
- if (!can_be_bottom && !can_be_top)
- return -EINVAL;
-
- /* Prefer top, if both are valid */
- use_top = can_be_top;
-
- /* lock_len: length of region that should end up locked */
- if (use_top)
- lock_len = mtd->size - ofs;
- else
- lock_len = ofs + len;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
-
- /*
- * Need smallest pow such that:
- *
- * 1 / (2^pow) <= (len / size)
- *
- * so (assuming power-of-2 size) we do:
- *
- * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
- */
- pow = ilog2(mtd->size) - ilog2(lock_len);
- val = mask - (pow << shift);
- if (val & ~mask)
- return -EINVAL;
- /* Don't "lock" with no region! */
- if (!(val & mask))
- return -EINVAL;
-
- status_new = (status_old & ~mask & ~tb_mask) | val;
-
- /* Disallow further writes if WP pin is asserted */
- status_new |= SR_SRWD;
-
- if (!use_top)
- status_new |= tb_mask;
-
- /* Don't bother if they're the same */
- if (status_new == status_old)
- return 0;
-
- /* Only modify protection if it will not unlock other areas */
- if ((status_new & mask) < (status_old & mask))
- return -EINVAL;
-
- return spi_nor_write_sr_and_check(nor, status_new);
-}
-
-/*
- * Unlock a region of the flash. See stm_lock() for more info
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- struct mtd_info *mtd = &nor->mtd;
- int ret, status_old, status_new;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- u8 shift = ffs(mask) - 1, pow, val;
- loff_t lock_len;
- bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
- bool use_top;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- status_old = nor->bouncebuf[0];
-
- /* If nothing in our range is locked, we don't need to do anything */
- if (stm_is_unlocked_sr(nor, ofs, len, status_old))
- return 0;
-
- /* If anything below us is locked, we can't use 'top' protection */
- if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
- can_be_top = false;
-
- /* If anything above us is locked, we can't use 'bottom' protection */
- if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
- status_old))
- can_be_bottom = false;
-
- if (!can_be_bottom && !can_be_top)
- return -EINVAL;
-
- /* Prefer top, if both are valid */
- use_top = can_be_top;
-
- /* lock_len: length of region that should remain locked */
- if (use_top)
- lock_len = mtd->size - (ofs + len);
- else
- lock_len = ofs;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
- /*
- * Need largest pow such that:
- *
- * 1 / (2^pow) >= (len / size)
- *
- * so (assuming power-of-2 size) we do:
- *
- * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
- */
- pow = ilog2(mtd->size) - order_base_2(lock_len);
- if (lock_len == 0) {
- val = 0; /* fully unlocked */
- } else {
- val = mask - (pow << shift);
- /* Some power-of-two sizes are not supported */
- if (val & ~mask)
- return -EINVAL;
- }
-
- status_new = (status_old & ~mask & ~tb_mask) | val;
-
- /* Don't protect status register if we're fully unlocked */
- if (lock_len == 0)
- status_new &= ~SR_SRWD;
-
- if (!use_top)
- status_new |= tb_mask;
-
- /* Don't bother if they're the same */
- if (status_new == status_old)
- return 0;
-
- /* Only modify protection if it will not lock other areas */
- if ((status_new & mask) > (status_old & mask))
- return -EINVAL;
-
- return spi_nor_write_sr_and_check(nor, status_new);
-}
-
-/*
- * Check if a region of the flash is (completely) locked. See stm_lock() for
- * more info.
- *
- * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
- * negative on errors.
- */
-static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- int ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return stm_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
-}
-
-static const struct spi_nor_locking_ops stm_locking_ops = {
- .lock = stm_lock,
- .unlock = stm_unlock,
- .is_locked = stm_is_locked,
-};
-
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->lock(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->unlock(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->is_locked(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-/**
- * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
- * Register 1.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
-{
- int ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
- return 0;
-
- nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
-
- return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
-}
-
-/**
- * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
- * Register 2.
- * @nor: pointer to a 'struct spi_nor'.
- *
- * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
-
- ret = spi_nor_read_cr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
- return 0;
-
- nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
-
- return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
-}
-
-/**
- * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register 2.
- *
- * This is one of the procedures to set the QE bit described in the SFDP
- * (JESD216 rev B) specification but no manufacturer using this procedure has
- * been identified yet, hence the name of the function.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
-{
- u8 *sr2 = nor->bouncebuf;
- int ret;
- u8 sr2_written;
-
- /* Check current Quad Enable bit value. */
- ret = spi_nor_read_sr2(nor, sr2);
- if (ret)
- return ret;
- if (*sr2 & SR2_QUAD_EN_BIT7)
- return 0;
-
- /* Update the Quad Enable bit. */
- *sr2 |= SR2_QUAD_EN_BIT7;
-
- ret = spi_nor_write_sr2(nor, sr2);
- if (ret)
- return ret;
-
- sr2_written = *sr2;
-
- /* Read back and check it. */
- ret = spi_nor_read_sr2(nor, sr2);
- if (ret)
- return ret;
-
- if (*sr2 != sr2_written) {
- dev_dbg(nor->dev, "SR2: Read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff, \
- ((_ext_id) >> 8) & 0xff, \
- (_ext_id) & 0xff, \
- }, \
- .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags),
-
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff, \
- ((_ext_id) >> 16) & 0xff, \
- ((_ext_id) >> 8) & 0xff, \
- (_ext_id) & 0xff, \
- }, \
- .id_len = 6, \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags),
-
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .addr_width = (_addr_width), \
- .flags = (_flags),
-
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff \
- }, \
- .id_len = 3, \
- .sector_size = (8*_page_size), \
- .n_sectors = (_n_sectors), \
- .page_size = _page_size, \
- .addr_width = 3, \
- .flags = SPI_NOR_NO_FR | SPI_S3AN,
-
-static int
-is25lp256_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- /*
- * IS25LP256 supports 4B opcodes, but the BFPT advertises a
- * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
- * Overwrite the address width advertised by the BFPT.
- */
- if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
- BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
- nor->addr_width = 4;
-
- return 0;
-}
-
-static struct spi_nor_fixups is25lp256_fixups = {
- .post_bfpt = is25lp256_post_bfpt_fixups,
-};
-
-static int
-mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- /*
- * MX25L25635F supports 4B opcodes but MX25L25635E does not.
- * Unfortunately, Macronix has re-used the same JEDEC ID for both
- * variants which prevents us from defining a new entry in the parts
- * table.
- * We need a way to differentiate MX25L25635E and MX25L25635F, and it
- * seems that the F version advertises support for Fast Read 4-4-4 in
- * its BFPT table.
- */
- if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- return 0;
-}
-
-static struct spi_nor_fixups mx25l25635_fixups = {
- .post_bfpt = mx25l25635_post_bfpt_fixups,
-};
-
-static void gd25q256_default_init(struct spi_nor *nor)
-{
- /*
- * Some manufacturer like GigaDevice may use different
- * bit to set QE on different memories, so the MFR can't
- * indicate the quad_enable method for this case, we need
- * to set it in the default_init fixup hook.
- */
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
-}
-
-static struct spi_nor_fixups gd25q256_fixups = {
- .default_init = gd25q256_default_init,
-};
-
-/* NOTE: double check command sets and memory organization when you add
- * more nor chips. This current list focusses on newer chips, which
- * have been converging on command sets which including JEDEC ID.
- *
- * All newly added entries should describe *hardware* and should use SECT_4K
- * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
- * scenarios excluding small sectors there is config option that can be
- * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
- * For historical (and compatibility) reasons (before we got above config) some
- * old entries may be missing 4K flag.
- */
-static const struct flash_info spi_nor_ids[] = {
- /* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
-
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
-
- { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
-
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
-
- /* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
- { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
-
- /* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
-
- /* Everspin */
- { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
- /* Fujitsu */
- { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
-
- /* GigaDevice */
- {
- "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_TB_SR_BIT6)
- .fixups = &gd25q256_fixups,
- },
-
- /* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
-
- /* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
- { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
- { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
-
- /* Macronix */
- { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
- { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &mx25l25635_fixups },
- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
- { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
-
- /* Micron <--> ST Micro */
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-
- /* Micron */
- {
- "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES)
- },
- { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
-
- /* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
-
- /* Spansion/Cypress -- single (large) sector size only, at least
- * for the chips listed here (without boot sectors).
- */
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | USE_CLSR) },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
- { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-
- /* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
- { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
- { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, SECT_4K |
- SPI_NOR_DUAL_READ) },
- { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- /* ST Microelectronics -- newer production may have feature updates */
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
- { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
-
- /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- {
- "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- {
- "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- {
- "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- {
- "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
-
- /* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
- /* Xilinx S3AN Internal Flash */
- { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
- { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
- { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
-
- /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
- { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { },
-};
-
-static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
-{
- int tmp;
- u8 *id = nor->bouncebuf;
- const struct flash_info *info;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
-
- tmp = spi_mem_exec_op(nor->spimem, &op);
- } else {
- tmp = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
- SPI_NOR_MAX_ID_LEN);
- }
- if (tmp) {
- dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
- return ERR_PTR(tmp);
- }
-
- for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
- info = &spi_nor_ids[tmp];
- if (info->id_len) {
- if (!memcmp(info->id, id, info->id_len))
- return &spi_nor_ids[tmp];
- }
- }
- dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
- SPI_NOR_MAX_ID_LEN, id);
- return ERR_PTR(-ENODEV);
-}
-
-static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- ssize_t ret;
-
- dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- while (len) {
- loff_t addr = from;
-
- addr = spi_nor_convert_addr(nor, addr);
-
- ret = spi_nor_read_data(nor, addr, len, buf);
- if (ret == 0) {
- /* We shouldn't see 0-length reads */
- ret = -EIO;
- goto read_err;
- }
- if (ret < 0)
- goto read_err;
-
- WARN_ON(ret > len);
- *retlen += ret;
- buf += ret;
- from += ret;
- len -= ret;
- }
- ret = 0;
-
-read_err:
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t actual = 0;
- int ret;
-
- dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto out;
-
- nor->sst_write_second = false;
-
- /* Start write from odd address. */
- if (to % 2) {
- nor->program_opcode = SPINOR_OP_BP;
-
- /* write one byte. */
- ret = spi_nor_write_data(nor, to, 1, buf);
- if (ret < 0)
- goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- to++;
- actual++;
- }
-
- /* Write out most of the data here. */
- for (; actual < len - 1; actual += 2) {
- nor->program_opcode = SPINOR_OP_AAI_WP;
-
- /* write two bytes. */
- ret = spi_nor_write_data(nor, to, 2, buf + actual);
- if (ret < 0)
- goto out;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
- to += 2;
- nor->sst_write_second = true;
- }
- nor->sst_write_second = false;
-
- ret = spi_nor_write_disable(nor);
- if (ret)
- goto out;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- /* Write out trailing byte if it exists. */
- if (actual != len) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto out;
-
- nor->program_opcode = SPINOR_OP_BP;
- ret = spi_nor_write_data(nor, to, 1, buf + actual);
- if (ret < 0)
- goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- actual += 1;
-
- ret = spi_nor_write_disable(nor);
- }
-out:
- *retlen += actual;
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-/*
- * Write an address range to the nor chip. Data must be written in
- * FLASH_PAGESIZE chunks. The address range may be any size provided
- * it is within the physical boundaries.
- */
-static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t page_offset, page_remain, i;
- ssize_t ret;
-
- dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- for (i = 0; i < len; ) {
- ssize_t written;
- loff_t addr = to + i;
-
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- * Power of two numbers have only one bit set and we can use
- * the instruction hweight32 to detect if we need to do a
- * modulus (do_div()) or not.
- */
- if (hweight32(nor->page_size) == 1) {
- page_offset = addr & (nor->page_size - 1);
- } else {
- uint64_t aux = addr;
-
- page_offset = do_div(aux, nor->page_size);
- }
- /* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
-
- addr = spi_nor_convert_addr(nor, addr);
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto write_err;
-
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
- if (ret < 0)
- goto write_err;
- written = ret;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto write_err;
- *retlen += written;
- i += written;
- }
-
-write_err:
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_check(struct spi_nor *nor)
-{
- if (!nor->dev ||
- (!nor->spimem && !nor->controller_ops) ||
- (!nor->spimem && nor->controller_ops &&
- (!nor->controller_ops->read ||
- !nor->controller_ops->write ||
- !nor->controller_ops->read_reg ||
- !nor->controller_ops->write_reg))) {
- pr_err("spi-nor: please fill all the necessary fields!\n");
- return -EINVAL;
- }
-
- if (nor->spimem && nor->controller_ops) {
- dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int s3an_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- int ret;
-
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- nor->erase_opcode = SPINOR_OP_XSE;
- nor->program_opcode = SPINOR_OP_XPP;
- nor->read_opcode = SPINOR_OP_READ;
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
- /*
- * This flashes have a page size of 264 or 528 bytes (known as
- * Default addressing mode). It can be changed to a more standard
- * Power of two mode where the page size is 256/512. This comes
- * with a price: there is 3% less of space, the data is corrupted
- * and the page size cannot be changed back to default addressing
- * mode.
- *
- * The current addressing mode can be read from the XRDSR register
- * and should not be changed, because is a destructive operation.
- */
- if (nor->bouncebuf[0] & XSR_PAGESIZE) {
- /* Flash in Power of 2 mode */
- nor->page_size = (nor->page_size == 264) ? 256 : 512;
- nor->mtd.writebufsize = nor->page_size;
- nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
- nor->mtd.erasesize = 8 * nor->page_size;
- } else {
- /* Flash in Default addressing mode */
- nor->params.convert_addr = s3an_convert_addr;
- nor->mtd.erasesize = nor->info->sector_size;
- }
-
- return 0;
-}
-
-static void
-spi_nor_set_read_settings(struct spi_nor_read_command *read,
- u8 num_mode_clocks,
- u8 num_wait_states,
- u8 opcode,
- enum spi_nor_protocol proto)
-{
- read->num_mode_clocks = num_mode_clocks;
- read->num_wait_states = num_wait_states;
- read->opcode = opcode;
- read->proto = proto;
-}
-
-static void
-spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
- u8 opcode,
- enum spi_nor_protocol proto)
-{
- pp->opcode = opcode;
- pp->proto = proto;
-}
-
-static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == (int)hwcaps)
- return table[i][1];
-
- return -EINVAL;
-}
-
-static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
-{
- static const int hwcaps_read2cmd[][2] = {
- { SNOR_HWCAPS_READ, SNOR_CMD_READ },
- { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
- { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
- { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
- { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
- { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
- { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
- { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
- { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
- { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
- { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
- { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
- { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
- { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
- { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
- ARRAY_SIZE(hwcaps_read2cmd));
-}
-
-static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
-{
- static const int hwcaps_pp2cmd[][2] = {
- { SNOR_HWCAPS_PP, SNOR_CMD_PP },
- { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
- { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
- { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
- { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
- { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
- { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
- ARRAY_SIZE(hwcaps_pp2cmd));
-}
-
-/*
- * Serial Flash Discoverable Parameters (SFDP) parsing.
- */
-
-/**
- * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- * addr_width and read_dummy members of the struct spi_nor
- * should be previously
- * set.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the serial flash memory
- * @len: number of bytes to read
- * @buf: buffer where the data is copied into (dma-safe memory)
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
-{
- ssize_t ret;
-
- while (len) {
- ret = spi_nor_read_data(nor, addr, len, buf);
- if (ret < 0)
- return ret;
- if (!ret || ret > len)
- return -EIO;
-
- buf += ret;
- addr += ret;
- len -= ret;
- }
- return 0;
-}
-
-/**
- * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the SFDP area to start reading data from
- * @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into (dma-safe memory)
- *
- * Whatever the actual numbers of bytes for address and dummy cycles are
- * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
- * followed by a 3-byte address and 8 dummy clock cycles.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
- size_t len, void *buf)
-{
- u8 addr_width, read_opcode, read_dummy;
- int ret;
-
- read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
- read_dummy = nor->read_dummy;
-
- nor->read_opcode = SPINOR_OP_RDSFDP;
- nor->addr_width = 3;
- nor->read_dummy = 8;
-
- ret = spi_nor_read_raw(nor, addr, len, buf);
-
- nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
- nor->read_dummy = read_dummy;
-
- return ret;
-}
-
-/**
- * spi_nor_spimem_check_op - check if the operation is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@op: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_op(struct spi_nor *nor,
- struct spi_mem_op *op)
-{
- /*
- * First test with 4 address bytes. The opcode itself might
- * be a 3B addressing opcode but we don't care, because
- * SPI controller implementation should not check the opcode,
- * but just the sequence.
- */
- op->addr.nbytes = 4;
- if (!spi_mem_supports_op(nor->spimem, op)) {
- if (nor->mtd.size > SZ_16M)
- return -ENOTSUPP;
-
- /* If flash size <= 16MB, 3 address bytes are sufficient */
- op->addr.nbytes = 3;
- if (!spi_mem_supports_op(nor->spimem, op))
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_spimem_check_readop - check if the read op is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@read: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_readop(struct spi_nor *nor,
- const struct spi_nor_read_command *read)
-{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_DUMMY(0, 1),
- SPI_MEM_OP_DATA_IN(0, NULL, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
- op.dummy.buswidth / 8;
-
- return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_check_pp - check if the page program op is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@pp: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_pp(struct spi_nor *nor,
- const struct spi_nor_pp_command *pp)
-{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(0, NULL, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
-
- return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
- * based on SPI controller capabilities
- * @nor: pointer to a 'struct spi_nor'
- * @hwcaps: pointer to resulting capabilities after adjusting
- * according to controller and flash's capability
- */
-static void
-spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- unsigned int cap;
-
- /* DTR modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_DTR;
-
- /* X-X-X modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_X_X_X;
-
- for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
- int rdidx, ppidx;
-
- if (!(*hwcaps & BIT(cap)))
- continue;
-
- rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
- if (rdidx >= 0 &&
- spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
- *hwcaps &= ~BIT(cap);
-
- ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
- if (ppidx < 0)
- continue;
-
- if (spi_nor_spimem_check_pp(nor,
- &params->page_programs[ppidx]))
- *hwcaps &= ~BIT(cap);
- }
-}
-
-/**
- * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the SFDP area to start reading data from
- * @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into
- *
- * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
- * guaranteed to be dma-safe.
- *
- * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
- * otherwise.
- */
-static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
- size_t len, void *buf)
-{
- void *dma_safe_buf;
- int ret;
-
- dma_safe_buf = kmalloc(len, GFP_KERNEL);
- if (!dma_safe_buf)
- return -ENOMEM;
-
- ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
- memcpy(buf, dma_safe_buf, len);
- kfree(dma_safe_buf);
-
- return ret;
-}
-
-/* Fast Read settings. */
-
-static void
-spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
- u16 half,
- enum spi_nor_protocol proto)
-{
- read->num_mode_clocks = (half >> 5) & 0x07;
- read->num_wait_states = (half >> 0) & 0x1f;
- read->opcode = (half >> 8) & 0xff;
- read->proto = proto;
-}
-
-struct sfdp_bfpt_read {
- /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
- u32 hwcaps;
-
- /*
- * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
- * whether the Fast Read x-y-z command is supported.
- */
- u32 supported_dword;
- u32 supported_bit;
-
- /*
- * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
- * encodes the op code, the number of mode clocks and the number of wait
- * states to be used by Fast Read x-y-z command.
- */
- u32 settings_dword;
- u32 settings_shift;
-
- /* The SPI protocol for this Fast Read x-y-z command. */
- enum spi_nor_protocol proto;
-};
-
-static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
- /* Fast Read 1-1-2 */
- {
- SNOR_HWCAPS_READ_1_1_2,
- BFPT_DWORD(1), BIT(16), /* Supported bit */
- BFPT_DWORD(4), 0, /* Settings */
- SNOR_PROTO_1_1_2,
- },
-
- /* Fast Read 1-2-2 */
- {
- SNOR_HWCAPS_READ_1_2_2,
- BFPT_DWORD(1), BIT(20), /* Supported bit */
- BFPT_DWORD(4), 16, /* Settings */
- SNOR_PROTO_1_2_2,
- },
-
- /* Fast Read 2-2-2 */
- {
- SNOR_HWCAPS_READ_2_2_2,
- BFPT_DWORD(5), BIT(0), /* Supported bit */
- BFPT_DWORD(6), 16, /* Settings */
- SNOR_PROTO_2_2_2,
- },
-
- /* Fast Read 1-1-4 */
- {
- SNOR_HWCAPS_READ_1_1_4,
- BFPT_DWORD(1), BIT(22), /* Supported bit */
- BFPT_DWORD(3), 16, /* Settings */
- SNOR_PROTO_1_1_4,
- },
-
- /* Fast Read 1-4-4 */
- {
- SNOR_HWCAPS_READ_1_4_4,
- BFPT_DWORD(1), BIT(21), /* Supported bit */
- BFPT_DWORD(3), 0, /* Settings */
- SNOR_PROTO_1_4_4,
- },
-
- /* Fast Read 4-4-4 */
- {
- SNOR_HWCAPS_READ_4_4_4,
- BFPT_DWORD(5), BIT(4), /* Supported bit */
- BFPT_DWORD(7), 16, /* Settings */
- SNOR_PROTO_4_4_4,
- },
-};
-
-struct sfdp_bfpt_erase {
- /*
- * The half-word at offset <shift> in DWORD <dwoard> encodes the
- * op code and erase sector size to be used by Sector Erase commands.
- */
- u32 dword;
- u32 shift;
-};
-
-static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
- /* Erase Type 1 in DWORD8 bits[15:0] */
- {BFPT_DWORD(8), 0},
-
- /* Erase Type 2 in DWORD8 bits[31:16] */
- {BFPT_DWORD(8), 16},
-
- /* Erase Type 3 in DWORD9 bits[15:0] */
- {BFPT_DWORD(9), 0},
-
- /* Erase Type 4 in DWORD9 bits[31:16] */
- {BFPT_DWORD(9), 16},
-};
-
-/**
- * spi_nor_set_erase_type() - set a SPI NOR erase type
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type
- * @opcode: the SPI command op code to erase the sector/block
- */
-static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
- u32 size, u8 opcode)
-{
- erase->size = size;
- erase->opcode = opcode;
- /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
- erase->size_shift = ffs(erase->size) - 1;
- erase->size_mask = (1 << erase->size_shift) - 1;
-}
-
-/**
- * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type
- * @opcode: the SPI command op code to erase the sector/block
- * @i: erase type index as sorted in the Basic Flash Parameter Table
- *
- * The supported Erase Types will be sorted at init in ascending order, with
- * the smallest Erase Type size being the first member in the erase_type array
- * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
- * the Basic Flash Parameter Table since it will be used later on to
- * synchronize with the supported Erase Types defined in SFDP optional tables.
- */
-static void
-spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
- u32 size, u8 opcode, u8 i)
-{
- erase->idx = i;
- spi_nor_set_erase_type(erase, size, opcode);
-}
-
-/**
- * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
- * @l: member in the left half of the map's erase_type array
- * @r: member in the right half of the map's erase_type array
- *
- * Comparison function used in the sort() call to sort in ascending order the
- * map's erase types, the smallest erase type size being the first member in the
- * sorted erase_type array.
- *
- * Return: the result of @l->size - @r->size
- */
-static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
-{
- const struct spi_nor_erase_type *left = l, *right = r;
-
- return left->size - right->size;
-}
-
-/**
- * spi_nor_sort_erase_mask() - sort erase mask
- * @map: the erase map of the SPI NOR
- * @erase_mask: the erase type mask to be sorted
- *
- * Replicate the sort done for the map's erase types in BFPT: sort the erase
- * mask in ascending order with the smallest erase type size starting from
- * BIT(0) in the sorted erase mask.
- *
- * Return: sorted erase mask.
- */
-static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
-{
- struct spi_nor_erase_type *erase_type = map->erase_type;
- int i;
- u8 sorted_erase_mask = 0;
-
- if (!erase_mask)
- return 0;
-
- /* Replicate the sort done for the map's erase types. */
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
- if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
- sorted_erase_mask |= BIT(i);
-
- return sorted_erase_mask;
-}
-
-/**
- * spi_nor_regions_sort_erase_types() - sort erase types in each region
- * @map: the erase map of the SPI NOR
- *
- * Function assumes that the erase types defined in the erase map are already
- * sorted in ascending order, with the smallest erase type size being the first
- * member in the erase_type array. It replicates the sort done for the map's
- * erase types. Each region's erase bitmask will indicate which erase types are
- * supported from the sorted erase types defined in the erase map.
- * Sort the all region's erase type at init in order to speed up the process of
- * finding the best erase command at runtime.
- */
-static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
-{
- struct spi_nor_erase_region *region = map->regions;
- u8 region_erase_mask, sorted_erase_mask;
-
- while (region) {
- region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- sorted_erase_mask = spi_nor_sort_erase_mask(map,
- region_erase_mask);
-
- /* Overwrite erase mask. */
- region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
- sorted_erase_mask;
-
- region = spi_nor_region_next(region);
- }
-}
-
-/**
- * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
- * @map: the erase map of the SPI NOR
- * @erase_mask: bitmask encoding erase types that can erase the entire
- * flash memory
- * @flash_size: the spi nor flash memory size
- */
-static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
- u8 erase_mask, u64 flash_size)
-{
- /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
- map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
- SNOR_LAST_REGION;
- map->uniform_region.size = flash_size;
- map->regions = &map->uniform_region;
- map->uniform_erase_type = erase_mask;
-}
-
-static int
-spi_nor_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- if (nor->info->fixups && nor->info->fixups->post_bfpt)
- return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
- params);
-
- return 0;
-}
-
-/**
- * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
- * @nor: pointer to a 'struct spi_nor'
- * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
- * the Basic Flash Parameter Table length and version
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be
- * filled
- *
- * The Basic Flash Parameter Table is the main and only mandatory table as
- * defined by the SFDP (JESD216) specification.
- * It provides us with the total size (memory density) of the data array and
- * the number of address bytes for Fast Read, Page Program and Sector Erase
- * commands.
- * For Fast READ commands, it also gives the number of mode clock cycles and
- * wait states (regrouped in the number of dummy clock cycles) for each
- * supported instruction op code.
- * For Page Program, the page size is now available since JESD216 rev A, however
- * the supported instruction op codes are still not provided.
- * For Sector Erase commands, this table stores the supported instruction op
- * codes and the associated sector sizes.
- * Finally, the Quad Enable Requirements (QER) are also available since JESD216
- * rev A. The QER bits encode the manufacturer dependent procedure to be
- * executed to set the Quad Enable (QE) bit in some internal register of the
- * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
- * sending any Quad SPI command to the memory. Actually, setting the QE bit
- * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
- * and IO3 hence enabling 4 (Quad) I/O lines.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_bfpt(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- struct spi_nor_flash_parameter *params)
-{
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase_type = map->erase_type;
- struct sfdp_bfpt bfpt;
- size_t len;
- int i, cmd, err;
- u32 addr;
- u16 half;
- u8 erase_mask;
-
- /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
- if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
- return -EINVAL;
-
- /* Read the Basic Flash Parameter Table. */
- len = min_t(size_t, sizeof(bfpt),
- bfpt_header->length * sizeof(u32));
- addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
- memset(&bfpt, 0, sizeof(bfpt));
- err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
- if (err < 0)
- return err;
-
- /* Fix endianness of the BFPT DWORDs. */
- for (i = 0; i < BFPT_DWORD_MAX; i++)
- bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
-
- /* Number of address bytes. */
- switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
- case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
- nor->addr_width = 3;
- break;
-
- case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
- nor->addr_width = 4;
- break;
-
- default:
- break;
- }
-
- /* Flash Memory Density (in bits). */
- params->size = bfpt.dwords[BFPT_DWORD(2)];
- if (params->size & BIT(31)) {
- params->size &= ~BIT(31);
-
- /*
- * Prevent overflows on params->size. Anyway, a NOR of 2^64
- * bits is unlikely to exist so this error probably means
- * the BFPT we are reading is corrupted/wrong.
- */
- if (params->size > 63)
- return -EINVAL;
-
- params->size = 1ULL << params->size;
- } else {
- params->size++;
- }
- params->size >>= 3; /* Convert to bytes. */
-
- /* Fast Read settings. */
- for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
- const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
- struct spi_nor_read_command *read;
-
- if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
- params->hwcaps.mask &= ~rd->hwcaps;
- continue;
- }
-
- params->hwcaps.mask |= rd->hwcaps;
- cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
- read = &params->reads[cmd];
- half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
- spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
- }
-
- /*
- * Sector Erase settings. Reinitialize the uniform erase map using the
- * Erase Types defined in the bfpt table.
- */
- erase_mask = 0;
- memset(&params->erase_map, 0, sizeof(params->erase_map));
- for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
- const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
- u32 erasesize;
- u8 opcode;
-
- half = bfpt.dwords[er->dword] >> er->shift;
- erasesize = half & 0xff;
-
- /* erasesize == 0 means this Erase Type is not supported. */
- if (!erasesize)
- continue;
-
- erasesize = 1U << erasesize;
- opcode = (half >> 8) & 0xff;
- erase_mask |= BIT(i);
- spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
- opcode, i);
- }
- spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
- /*
- * Sort all the map's Erase Types in ascending order with the smallest
- * erase size being the first member in the erase_type array.
- */
- sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
- spi_nor_map_cmp_erase_type, NULL);
- /*
- * Sort the erase types in the uniform region in order to update the
- * uniform_erase_type bitmask. The bitmask will be used later on when
- * selecting the uniform erase.
- */
- spi_nor_regions_sort_erase_types(map);
- map->uniform_erase_type = map->uniform_region.offset &
- SNOR_ERASE_TYPE_MASK;
-
- /* Stop here if not JESD216 rev A or later. */
- if (bfpt_header->length < BFPT_DWORD_MAX)
- return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
- params);
-
- /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
- params->page_size = bfpt.dwords[BFPT_DWORD(11)];
- params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
- params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
- params->page_size = 1U << params->page_size;
-
- /* Quad Enable Requirements. */
- switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
- case BFPT_DWORD15_QER_NONE:
- params->quad_enable = NULL;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
- /*
- * Writing only one byte to the Status Register has the
- * side-effect of clearing Status Register 2.
- */
- case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
- /*
- * Read Configuration Register (35h) instruction is not
- * supported.
- */
- nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR1_BIT6:
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- params->quad_enable = spi_nor_sr1_bit6_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT7:
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- params->quad_enable = spi_nor_sr2_bit7_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT1:
- /*
- * JESD216 rev B or later does not specify if writing only one
- * byte to the Status Register clears or not the Status
- * Register 2, so let's be cautious and keep the default
- * assumption of a 16-bit Write Status (01h) command.
- */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- break;
-
- default:
- return -EINVAL;
- }
-
- return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
-}
-
-#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
-#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
-
-#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
-#define SMPT_CMD_READ_DUMMY_SHIFT 16
-#define SMPT_CMD_READ_DUMMY(_cmd) \
- (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
-#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
-
-#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
-#define SMPT_CMD_READ_DATA_SHIFT 24
-#define SMPT_CMD_READ_DATA(_cmd) \
- (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
-
-#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
-#define SMPT_CMD_OPCODE_SHIFT 8
-#define SMPT_CMD_OPCODE(_cmd) \
- (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
-
-#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
-#define SMPT_MAP_REGION_COUNT_SHIFT 16
-#define SMPT_MAP_REGION_COUNT(_header) \
- ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
- SMPT_MAP_REGION_COUNT_SHIFT) + 1)
-
-#define SMPT_MAP_ID_MASK GENMASK(15, 8)
-#define SMPT_MAP_ID_SHIFT 8
-#define SMPT_MAP_ID(_header) \
- (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
-
-#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
-#define SMPT_MAP_REGION_SIZE_SHIFT 8
-#define SMPT_MAP_REGION_SIZE(_region) \
- (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
- SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
-
-#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
-#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
- ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
-
-#define SMPT_DESC_TYPE_MAP BIT(1)
-#define SMPT_DESC_END BIT(0)
-
-/**
- * spi_nor_smpt_addr_width() - return the address width used in the
- * configuration detection command.
- * @nor: pointer to a 'struct spi_nor'
- * @settings: configuration detection command descriptor, dword1
- */
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
-{
- switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
- case SMPT_CMD_ADDRESS_LEN_0:
- return 0;
- case SMPT_CMD_ADDRESS_LEN_3:
- return 3;
- case SMPT_CMD_ADDRESS_LEN_4:
- return 4;
- case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
- /* fall through */
- default:
- return nor->addr_width;
- }
-}
-
-/**
- * spi_nor_smpt_read_dummy() - return the configuration detection command read
- * latency, in clock cycles.
- * @nor: pointer to a 'struct spi_nor'
- * @settings: configuration detection command descriptor, dword1
- *
- * Return: the number of dummy cycles for an SMPT read
- */
-static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
-{
- u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
-
- if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
- return nor->read_dummy;
- return read_dummy;
-}
-
-/**
- * spi_nor_get_map_in_use() - get the configuration map in use
- * @nor: pointer to a 'struct spi_nor'
- * @smpt: pointer to the sector map parameter table
- * @smpt_len: sector map parameter table length
- *
- * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
- */
-static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
- u8 smpt_len)
-{
- const u32 *ret;
- u8 *buf;
- u32 addr;
- int err;
- u8 i;
- u8 addr_width, read_opcode, read_dummy;
- u8 read_data_mask, map_id;
-
- /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- addr_width = nor->addr_width;
- read_dummy = nor->read_dummy;
- read_opcode = nor->read_opcode;
-
- map_id = 0;
- /* Determine if there are any optional Detection Command Descriptors */
- for (i = 0; i < smpt_len; i += 2) {
- if (smpt[i] & SMPT_DESC_TYPE_MAP)
- break;
-
- read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
- nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
- nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
- nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
- addr = smpt[i + 1];
-
- err = spi_nor_read_raw(nor, addr, 1, buf);
- if (err) {
- ret = ERR_PTR(err);
- goto out;
- }
-
- /*
- * Build an index value that is used to select the Sector Map
- * Configuration that is currently in use.
- */
- map_id = map_id << 1 | !!(*buf & read_data_mask);
- }
-
- /*
- * If command descriptors are provided, they always precede map
- * descriptors in the table. There is no need to start the iteration
- * over smpt array all over again.
- *
- * Find the matching configuration map.
- */
- ret = ERR_PTR(-EINVAL);
- while (i < smpt_len) {
- if (SMPT_MAP_ID(smpt[i]) == map_id) {
- ret = smpt + i;
- break;
- }
-
- /*
- * If there are no more configuration map descriptors and no
- * configuration ID matched the configuration identifier, the
- * sector address map is unknown.
- */
- if (smpt[i] & SMPT_DESC_END)
- break;
-
- /* increment the table index to the next map */
- i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
- }
-
- /* fall through */
-out:
- kfree(buf);
- nor->addr_width = addr_width;
- nor->read_dummy = read_dummy;
- nor->read_opcode = read_opcode;
- return ret;
-}
-
-/**
- * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @erase_type: erase type bitmask
- */
-static void
-spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
- const struct spi_nor_erase_type *erase,
- const u8 erase_type)
-{
- int i;
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- if (!(erase_type & BIT(i)))
- continue;
- if (region->size & erase[i].size_mask) {
- spi_nor_region_mark_overlay(region);
- return;
- }
- }
-}
-
-/**
- * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
- * @nor: pointer to a 'struct spi_nor'
- * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is
- * used for storing SFDP parsed data
- * @smpt: pointer to the sector map parameter table
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int
-spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
- struct spi_nor_flash_parameter *params,
- const u32 *smpt)
-{
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase = map->erase_type;
- struct spi_nor_erase_region *region;
- u64 offset;
- u32 region_count;
- int i, j;
- u8 uniform_erase_type, save_uniform_erase_type;
- u8 erase_type, regions_erase_type;
-
- region_count = SMPT_MAP_REGION_COUNT(*smpt);
- /*
- * The regions will be freed when the driver detaches from the
- * device.
- */
- region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
- GFP_KERNEL);
- if (!region)
- return -ENOMEM;
- map->regions = region;
-
- uniform_erase_type = 0xff;
- regions_erase_type = 0;
- offset = 0;
- /* Populate regions. */
- for (i = 0; i < region_count; i++) {
- j = i + 1; /* index for the region dword */
- region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
- erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
- region[i].offset = offset | erase_type;
-
- spi_nor_region_check_overlay(&region[i], erase, erase_type);
-
- /*
- * Save the erase types that are supported in all regions and
- * can erase the entire flash memory.
- */
- uniform_erase_type &= erase_type;
-
- /*
- * regions_erase_type mask will indicate all the erase types
- * supported in this configuration map.
- */
- regions_erase_type |= erase_type;
-
- offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
- region[i].size;
- }
-
- save_uniform_erase_type = map->uniform_erase_type;
- map->uniform_erase_type = spi_nor_sort_erase_mask(map,
- uniform_erase_type);
-
- if (!regions_erase_type) {
- /*
- * Roll back to the previous uniform_erase_type mask, SMPT is
- * broken.
- */
- map->uniform_erase_type = save_uniform_erase_type;
- return -EINVAL;
- }
-
- /*
- * BFPT advertises all the erase types supported by all the possible
- * map configurations. Mask out the erase types that are not supported
- * by the current map configuration.
- */
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
- if (!(regions_erase_type & BIT(erase[i].idx)))
- spi_nor_set_erase_type(&erase[i], 0, 0xFF);
-
- spi_nor_region_mark_end(&region[i - 1]);
-
- return 0;
-}
-
-/**
- * spi_nor_parse_smpt() - parse Sector Map Parameter Table
- * @nor: pointer to a 'struct spi_nor'
- * @smpt_header: sector map parameter table header
- * @params: pointer to a duplicate 'struct spi_nor_flash_parameter'
- * that is used for storing SFDP parsed data
- *
- * This table is optional, but when available, we parse it to identify the
- * location and size of sectors within the main data array of the flash memory
- * device and to identify which Erase Types are supported by each sector.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_smpt(struct spi_nor *nor,
- const struct sfdp_parameter_header *smpt_header,
- struct spi_nor_flash_parameter *params)
-{
- const u32 *sector_map;
- u32 *smpt;
- size_t len;
- u32 addr;
- int i, ret;
-
- /* Read the Sector Map Parameter Table. */
- len = smpt_header->length * sizeof(*smpt);
- smpt = kmalloc(len, GFP_KERNEL);
- if (!smpt)
- return -ENOMEM;
-
- addr = SFDP_PARAM_HEADER_PTP(smpt_header);
- ret = spi_nor_read_sfdp(nor, addr, len, smpt);
- if (ret)
- goto out;
-
- /* Fix endianness of the SMPT DWORDs. */
- for (i = 0; i < smpt_header->length; i++)
- smpt[i] = le32_to_cpu(smpt[i]);
-
- sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
- if (IS_ERR(sector_map)) {
- ret = PTR_ERR(sector_map);
- goto out;
- }
-
- ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
- if (ret)
- goto out;
-
- spi_nor_regions_sort_erase_types(&params->erase_map);
- /* fall through */
-out:
- kfree(smpt);
- return ret;
-}
-
-#define SFDP_4BAIT_DWORD_MAX 2
-
-struct sfdp_4bait {
- /* The hardware capability. */
- u32 hwcaps;
-
- /*
- * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
- * the associated 4-byte address op code is supported.
- */
- u32 supported_bit;
-};
-
-/**
- * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
- * @nor: pointer to a 'struct spi_nor'.
- * @param_header: pointer to the 'struct sfdp_parameter_header' describing
- * the 4-Byte Address Instruction Table length and version.
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_4bait(struct spi_nor *nor,
- const struct sfdp_parameter_header *param_header,
- struct spi_nor_flash_parameter *params)
-{
- static const struct sfdp_4bait reads[] = {
- { SNOR_HWCAPS_READ, BIT(0) },
- { SNOR_HWCAPS_READ_FAST, BIT(1) },
- { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
- { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
- { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
- { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
- { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
- { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
- { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
- };
- static const struct sfdp_4bait programs[] = {
- { SNOR_HWCAPS_PP, BIT(6) },
- { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
- { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
- };
- static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
- { 0u /* not used */, BIT(9) },
- { 0u /* not used */, BIT(10) },
- { 0u /* not used */, BIT(11) },
- { 0u /* not used */, BIT(12) },
- };
- struct spi_nor_pp_command *params_pp = params->page_programs;
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase_type = map->erase_type;
- u32 *dwords;
- size_t len;
- u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
- int i, ret;
-
- if (param_header->major != SFDP_JESD216_MAJOR ||
- param_header->length < SFDP_4BAIT_DWORD_MAX)
- return -EINVAL;
-
- /* Read the 4-byte Address Instruction Table. */
- len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
-
- /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
- dwords = kmalloc(len, GFP_KERNEL);
- if (!dwords)
- return -ENOMEM;
-
- addr = SFDP_PARAM_HEADER_PTP(param_header);
- ret = spi_nor_read_sfdp(nor, addr, len, dwords);
- if (ret)
- goto out;
-
- /* Fix endianness of the 4BAIT DWORDs. */
- for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
- dwords[i] = le32_to_cpu(dwords[i]);
-
- /*
- * Compute the subset of (Fast) Read commands for which the 4-byte
- * version is supported.
- */
- discard_hwcaps = 0;
- read_hwcaps = 0;
- for (i = 0; i < ARRAY_SIZE(reads); i++) {
- const struct sfdp_4bait *read = &reads[i];
-
- discard_hwcaps |= read->hwcaps;
- if ((params->hwcaps.mask & read->hwcaps) &&
- (dwords[0] & read->supported_bit))
- read_hwcaps |= read->hwcaps;
- }
-
- /*
- * Compute the subset of Page Program commands for which the 4-byte
- * version is supported.
- */
- pp_hwcaps = 0;
- for (i = 0; i < ARRAY_SIZE(programs); i++) {
- const struct sfdp_4bait *program = &programs[i];
-
- /*
- * The 4 Byte Address Instruction (Optional) Table is the only
- * SFDP table that indicates support for Page Program Commands.
- * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
- * authority for specifying Page Program support.
- */
- discard_hwcaps |= program->hwcaps;
- if (dwords[0] & program->supported_bit)
- pp_hwcaps |= program->hwcaps;
- }
-
- /*
- * Compute the subset of Sector Erase commands for which the 4-byte
- * version is supported.
- */
- erase_mask = 0;
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- const struct sfdp_4bait *erase = &erases[i];
-
- if (dwords[0] & erase->supported_bit)
- erase_mask |= BIT(i);
- }
-
- /* Replicate the sort done for the map's erase types in BFPT. */
- erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
-
- /*
- * We need at least one 4-byte op code per read, program and erase
- * operation; the .read(), .write() and .erase() hooks share the
- * nor->addr_width value.
- */
- if (!read_hwcaps || !pp_hwcaps || !erase_mask)
- goto out;
-
- /*
- * Discard all operations from the 4-byte instruction set which are
- * not supported by this memory.
- */
- params->hwcaps.mask &= ~discard_hwcaps;
- params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
-
- /* Use the 4-byte address instruction set. */
- for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
- struct spi_nor_read_command *read_cmd = &params->reads[i];
-
- read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
- }
-
- /* 4BAIT is the only SFDP table that indicates page program support. */
- if (pp_hwcaps & SNOR_HWCAPS_PP)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
- SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
- if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
- SPINOR_OP_PP_1_1_4_4B,
- SNOR_PROTO_1_1_4);
- if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
- SPINOR_OP_PP_1_4_4_4B,
- SNOR_PROTO_1_4_4);
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- if (erase_mask & BIT(i))
- erase_type[i].opcode = (dwords[1] >>
- erase_type[i].idx * 8) & 0xFF;
- else
- spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
- }
-
- /*
- * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
- * later because we already did the conversion to 4byte opcodes. Also,
- * this latest function implements a legacy quirk for the erase size of
- * Spansion memory. However this quirk is no longer needed with new
- * SFDP compliant memories.
- */
- nor->addr_width = 4;
- nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
-
- /* fall through */
-out:
- kfree(dwords);
- return ret;
-}
-
-/**
- * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be
- * filled
- *
- * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
- * specification. This is a standard which tends to supported by almost all
- * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
- * runtime the main parameters needed to perform basic SPI flash operations such
- * as Fast Read, Page Program or Sector Erase commands.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_sfdp(struct spi_nor *nor,
- struct spi_nor_flash_parameter *params)
-{
- const struct sfdp_parameter_header *param_header, *bfpt_header;
- struct sfdp_parameter_header *param_headers = NULL;
- struct sfdp_header header;
- struct device *dev = nor->dev;
- size_t psize;
- int i, err;
-
- /* Get the SFDP header. */
- err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
- if (err < 0)
- return err;
-
- /* Check the SFDP header version. */
- if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
- header.major != SFDP_JESD216_MAJOR)
- return -EINVAL;
-
- /*
- * Verify that the first and only mandatory parameter header is a
- * Basic Flash Parameter Table header as specified in JESD216.
- */
- bfpt_header = &header.bfpt_header;
- if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
- bfpt_header->major != SFDP_JESD216_MAJOR)
- return -EINVAL;
-
- /*
- * Allocate memory then read all parameter headers with a single
- * Read SFDP command. These parameter headers will actually be parsed
- * twice: a first time to get the latest revision of the basic flash
- * parameter table, then a second time to handle the supported optional
- * tables.
- * Hence we read the parameter headers once for all to reduce the
- * processing time. Also we use kmalloc() instead of devm_kmalloc()
- * because we don't need to keep these parameter headers: the allocated
- * memory is always released with kfree() before exiting this function.
- */
- if (header.nph) {
- psize = header.nph * sizeof(*param_headers);
-
- param_headers = kmalloc(psize, GFP_KERNEL);
- if (!param_headers)
- return -ENOMEM;
-
- err = spi_nor_read_sfdp(nor, sizeof(header),
- psize, param_headers);
- if (err < 0) {
- dev_dbg(dev, "failed to read SFDP parameter headers\n");
- goto exit;
- }
- }
-
- /*
- * Check other parameter headers to get the latest revision of
- * the basic flash parameter table.
- */
- for (i = 0; i < header.nph; i++) {
- param_header = &param_headers[i];
-
- if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
- param_header->major == SFDP_JESD216_MAJOR &&
- (param_header->minor > bfpt_header->minor ||
- (param_header->minor == bfpt_header->minor &&
- param_header->length > bfpt_header->length)))
- bfpt_header = param_header;
- }
-
- err = spi_nor_parse_bfpt(nor, bfpt_header, params);
- if (err)
- goto exit;
-
- /* Parse optional parameter tables. */
- for (i = 0; i < header.nph; i++) {
- param_header = &param_headers[i];
-
- switch (SFDP_PARAM_HEADER_ID(param_header)) {
- case SFDP_SECTOR_MAP_ID:
- err = spi_nor_parse_smpt(nor, param_header, params);
- break;
-
- case SFDP_4BAIT_ID:
- err = spi_nor_parse_4bait(nor, param_header, params);
- break;
-
- default:
- break;
- }
-
- if (err) {
- dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
- SFDP_PARAM_HEADER_ID(param_header));
- /*
- * Let's not drop all information we extracted so far
- * if optional table parsers fail. In case of failing,
- * each optional parser is responsible to roll back to
- * the previously known spi_nor data.
- */
- err = 0;
- }
- }
-
-exit:
- kfree(param_headers);
- return err;
-}
-
-static int spi_nor_select_read(struct spi_nor *nor,
- u32 shared_hwcaps)
-{
- int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
- const struct spi_nor_read_command *read;
-
- if (best_match < 0)
- return -EINVAL;
-
- cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
- if (cmd < 0)
- return -EINVAL;
-
- read = &nor->params.reads[cmd];
- nor->read_opcode = read->opcode;
- nor->read_proto = read->proto;
-
- /*
- * In the spi-nor framework, we don't need to make the difference
- * between mode clock cycles and wait state clock cycles.
- * Indeed, the value of the mode clock cycles is used by a QSPI
- * flash memory to know whether it should enter or leave its 0-4-4
- * (Continuous Read / XIP) mode.
- * eXecution In Place is out of the scope of the mtd sub-system.
- * Hence we choose to merge both mode and wait state clock cycles
- * into the so called dummy clock cycles.
- */
- nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
- return 0;
-}
-
-static int spi_nor_select_pp(struct spi_nor *nor,
- u32 shared_hwcaps)
-{
- int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
- const struct spi_nor_pp_command *pp;
-
- if (best_match < 0)
- return -EINVAL;
-
- cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
- if (cmd < 0)
- return -EINVAL;
-
- pp = &nor->params.page_programs[cmd];
- nor->program_opcode = pp->opcode;
- nor->write_proto = pp->proto;
- return 0;
-}
-
-/**
- * spi_nor_select_uniform_erase() - select optimum uniform erase type
- * @map: the erase map of the SPI NOR
- * @wanted_size: the erase type size to search for. Contains the value of
- * info->sector_size or of the "small sector" size in case
- * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
- *
- * Once the optimum uniform sector erase command is found, disable all the
- * other.
- *
- * Return: pointer to erase type on success, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
- const u32 wanted_size)
-{
- const struct spi_nor_erase_type *tested_erase, *erase = NULL;
- int i;
- u8 uniform_erase_type = map->uniform_erase_type;
-
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- if (!(uniform_erase_type & BIT(i)))
- continue;
-
- tested_erase = &map->erase_type[i];
-
- /*
- * If the current erase size is the one, stop here:
- * we have found the right uniform Sector Erase command.
- */
- if (tested_erase->size == wanted_size) {
- erase = tested_erase;
- break;
- }
-
- /*
- * Otherwise, the current erase size is still a valid canditate.
- * Select the biggest valid candidate.
- */
- if (!erase && tested_erase->size)
- erase = tested_erase;
- /* keep iterating to find the wanted_size */
- }
-
- if (!erase)
- return NULL;
-
- /* Disable all other Sector Erase commands. */
- map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
- map->uniform_erase_type |= BIT(erase - map->erase_type);
- return erase;
-}
-
-static int spi_nor_select_erase(struct spi_nor *nor)
-{
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- const struct spi_nor_erase_type *erase = NULL;
- struct mtd_info *mtd = &nor->mtd;
- u32 wanted_size = nor->info->sector_size;
- int i;
-
- /*
- * The previous implementation handling Sector Erase commands assumed
- * that the SPI flash memory has an uniform layout then used only one
- * of the supported erase sizes for all Sector Erase commands.
- * So to be backward compatible, the new implementation also tries to
- * manage the SPI flash memory as uniform with a single erase sector
- * size, when possible.
- */
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
- /* prefer "small sector" erase if possible */
- wanted_size = 4096u;
-#endif
-
- if (spi_nor_has_uniform_erase(nor)) {
- erase = spi_nor_select_uniform_erase(map, wanted_size);
- if (!erase)
- return -EINVAL;
- nor->erase_opcode = erase->opcode;
- mtd->erasesize = erase->size;
- return 0;
- }
-
- /*
- * For non-uniform SPI flash memory, set mtd->erasesize to the
- * maximum erase sector size. No need to set nor->erase_opcode.
- */
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- if (map->erase_type[i].size) {
- erase = &map->erase_type[i];
- break;
- }
- }
-
- if (!erase)
- return -EINVAL;
-
- mtd->erasesize = erase->size;
- return 0;
-}
-
-static int spi_nor_default_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- u32 ignored_mask, shared_mask;
- int err;
-
- /*
- * Keep only the hardware capabilities supported by both the SPI
- * controller and the SPI flash memory.
- */
- shared_mask = hwcaps->mask & params->hwcaps.mask;
-
- if (nor->spimem) {
- /*
- * When called from spi_nor_probe(), all caps are set and we
- * need to discard some of them based on what the SPI
- * controller actually supports (using spi_mem_supports_op()).
- */
- spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
- } else {
- /*
- * SPI n-n-n protocols are not supported when the SPI
- * controller directly implements the spi_nor interface.
- * Yet another reason to switch to spi-mem.
- */
- ignored_mask = SNOR_HWCAPS_X_X_X;
- if (shared_mask & ignored_mask) {
- dev_dbg(nor->dev,
- "SPI n-n-n protocols are not supported.\n");
- shared_mask &= ~ignored_mask;
- }
- }
-
- /* Select the (Fast) Read command. */
- err = spi_nor_select_read(nor, shared_mask);
- if (err) {
- dev_dbg(nor->dev,
- "can't select read settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- /* Select the Page Program command. */
- err = spi_nor_select_pp(nor, shared_mask);
- if (err) {
- dev_dbg(nor->dev,
- "can't select write settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- /* Select the Sector Erase command. */
- err = spi_nor_select_erase(nor);
- if (err) {
- dev_dbg(nor->dev,
- "can't select erase settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- return 0;
-}
-
-static int spi_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- if (!nor->params.setup)
- return 0;
-
- return nor->params.setup(nor, hwcaps);
-}
-
-static void atmel_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void intel_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void issi_set_default_init(struct spi_nor *nor)
-{
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
-}
-
-static void macronix_set_default_init(struct spi_nor *nor)
-{
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
- nor->params.set_4byte = macronix_set_4byte;
-}
-
-static void sst_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void st_micron_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- nor->params.quad_enable = NULL;
- nor->params.set_4byte = st_micron_set_4byte;
-}
-
-static void winbond_set_default_init(struct spi_nor *nor)
-{
- nor->params.set_4byte = winbond_set_4byte;
-}
-
-/**
- * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
- * settings based on MFR register and ->default_init() hook.
- * @nor: pointer to a 'struct spi-nor'.
- */
-static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
-{
- /* Init flash parameters based on MFR */
- switch (JEDEC_MFR(nor->info)) {
- case SNOR_MFR_ATMEL:
- atmel_set_default_init(nor);
- break;
-
- case SNOR_MFR_INTEL:
- intel_set_default_init(nor);
- break;
-
- case SNOR_MFR_ISSI:
- issi_set_default_init(nor);
- break;
-
- case SNOR_MFR_MACRONIX:
- macronix_set_default_init(nor);
- break;
-
- case SNOR_MFR_ST:
- case SNOR_MFR_MICRON:
- st_micron_set_default_init(nor);
- break;
-
- case SNOR_MFR_SST:
- sst_set_default_init(nor);
- break;
-
- case SNOR_MFR_WINBOND:
- winbond_set_default_init(nor);
- break;
-
- default:
- break;
- }
-
- if (nor->info->fixups && nor->info->fixups->default_init)
- nor->info->fixups->default_init(nor);
-}
-
-/**
- * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
- * based on JESD216 SFDP standard.
- * @nor: pointer to a 'struct spi-nor'.
- *
- * The method has a roll-back mechanism: in case the SFDP parsing fails, the
- * legacy flash parameters and settings will be restored.
- */
-static void spi_nor_sfdp_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter sfdp_params;
-
- memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
-
- if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
- nor->addr_width = 0;
- nor->flags &= ~SNOR_F_4B_OPCODES;
- } else {
- memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
- }
-}
-
-/**
- * spi_nor_info_init_params() - Initialize the flash's parameters and settings
- * based on nor->info data.
- * @nor: pointer to a 'struct spi-nor'.
- */
-static void spi_nor_info_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- struct spi_nor_erase_map *map = &params->erase_map;
- const struct flash_info *info = nor->info;
- struct device_node *np = spi_nor_get_flash_node(nor);
- u8 i, erase_mask;
-
- /* Initialize legacy flash parameters and settings. */
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- params->set_4byte = spansion_set_4byte;
- params->setup = spi_nor_default_setup;
- /* Default to 16-bit Write Status (01h) Command */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- /* Set SPI NOR sizes. */
- params->size = (u64)info->sector_size * info->n_sectors;
- params->page_size = info->page_size;
-
- if (!(info->flags & SPI_NOR_NO_FR)) {
- /* Default to Fast Read for DT and non-DT platform devices. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
-
- /* Mask out Fast Read if not requested at DT instantiation. */
- if (np && !of_property_read_bool(np, "m25p,fast-read"))
- params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- }
-
- /* (Fast) Read settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
- 0, 0, SPINOR_OP_READ,
- SNOR_PROTO_1_1_1);
-
- if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
- 0, 8, SPINOR_OP_READ_FAST,
- SNOR_PROTO_1_1_1);
-
- if (info->flags & SPI_NOR_DUAL_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
- 0, 8, SPINOR_OP_READ_1_1_2,
- SNOR_PROTO_1_1_2);
- }
-
- if (info->flags & SPI_NOR_QUAD_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
- 0, 8, SPINOR_OP_READ_1_1_4,
- SNOR_PROTO_1_1_4);
- }
-
- if (info->flags & SPI_NOR_OCTAL_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
- 0, 8, SPINOR_OP_READ_1_1_8,
- SNOR_PROTO_1_1_8);
- }
-
- /* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
-
- /*
- * Sector Erase settings. Sort Erase Types in ascending order, with the
- * smallest erase size starting at BIT(0).
- */
- erase_mask = 0;
- i = 0;
- if (info->flags & SECT_4K_PMC) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K_PMC);
- i++;
- } else if (info->flags & SECT_4K) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K);
- i++;
- }
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
- SPINOR_OP_SE);
- spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
-}
-
-static void spansion_post_sfdp_fixups(struct spi_nor *nor)
-{
- if (nor->params.size <= SZ_16M)
- return;
-
- nor->flags |= SNOR_F_4B_OPCODES;
- /* No small sector erase for 4-byte command set */
- nor->erase_opcode = SPINOR_OP_SE;
- nor->mtd.erasesize = nor->info->sector_size;
-}
-
-static void s3an_post_sfdp_fixups(struct spi_nor *nor)
-{
- nor->params.setup = s3an_nor_setup;
-}
-
-/**
- * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
- * after SFDP has been parsed (is also called for SPI NORs that do not
- * support RDSFDP).
- * @nor: pointer to a 'struct spi_nor'
- *
- * Typically used to tweak various parameters that could not be extracted by
- * other means (i.e. when information provided by the SFDP/flash_info tables
- * are incomplete or wrong).
- */
-static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
-{
- switch (JEDEC_MFR(nor->info)) {
- case SNOR_MFR_SPANSION:
- spansion_post_sfdp_fixups(nor);
- break;
-
- default:
- break;
- }
-
- if (nor->info->flags & SPI_S3AN)
- s3an_post_sfdp_fixups(nor);
-
- if (nor->info->fixups && nor->info->fixups->post_sfdp)
- nor->info->fixups->post_sfdp(nor);
-}
-
-/**
- * spi_nor_late_init_params() - Late initialization of default flash parameters.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Used to set default flash parameters and settings when the ->default_init()
- * hook or the SFDP parser let voids.
- */
-static void spi_nor_late_init_params(struct spi_nor *nor)
-{
- /*
- * NOR protection support. When locking_ops are not provided, we pick
- * the default ones.
- */
- if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
- nor->params.locking_ops = &stm_locking_ops;
-}
-
-/**
- * spi_nor_init_params() - Initialize the flash's parameters and settings.
- * @nor: pointer to a 'struct spi-nor'.
- *
- * The flash parameters and settings are initialized based on a sequence of
- * calls that are ordered by priority:
- *
- * 1/ Default flash parameters initialization. The initializations are done
- * based on nor->info data:
- * spi_nor_info_init_params()
- *
- * which can be overwritten by:
- * 2/ Manufacturer flash parameters initialization. The initializations are
- * done based on MFR register, or when the decisions can not be done solely
- * based on MFR, by using specific flash_info tweeks, ->default_init():
- * spi_nor_manufacturer_init_params()
- *
- * which can be overwritten by:
- * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
- * should be more accurate that the above.
- * spi_nor_sfdp_init_params()
- *
- * Please note that there is a ->post_bfpt() fixup hook that can overwrite
- * the flash parameters and settings immediately after parsing the Basic
- * Flash Parameter Table.
- *
- * which can be overwritten by:
- * 4/ Post SFDP flash parameters initialization. Used to tweak various
- * parameters that could not be extracted by other means (i.e. when
- * information provided by the SFDP/flash_info tables are incomplete or
- * wrong).
- * spi_nor_post_sfdp_fixups()
- *
- * 5/ Late default flash parameters initialization, used when the
- * ->default_init() hook or the SFDP parser do not set specific params.
- * spi_nor_late_init_params()
- */
-static void spi_nor_init_params(struct spi_nor *nor)
-{
- spi_nor_info_init_params(nor);
-
- spi_nor_manufacturer_init_params(nor);
-
- if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
- !(nor->info->flags & SPI_NOR_SKIP_SFDP))
- spi_nor_sfdp_init_params(nor);
-
- spi_nor_post_sfdp_fixups(nor);
-
- spi_nor_late_init_params(nor);
-}
-
-/**
- * spi_nor_quad_enable() - enable Quad I/O if needed.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_quad_enable(struct spi_nor *nor)
-{
- if (!nor->params.quad_enable)
- return 0;
-
- if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
- spi_nor_get_protocol_width(nor->write_proto) == 4))
- return 0;
-
- return nor->params.quad_enable(nor);
-}
-
-/**
- * spi_nor_unlock_all() - Unlocks the entire flash memory array.
- * @nor: pointer to a 'struct spi_nor'.
- *
- * Some SPI NOR flashes are write protected by default after a power-on reset
- * cycle, in order to avoid inadvertent writes during power-up. Backward
- * compatibility imposes to unlock the entire flash memory array at power-up
- * by default.
- */
-static int spi_nor_unlock_all(struct spi_nor *nor)
-{
- if (nor->flags & SNOR_F_HAS_LOCK)
- return spi_nor_unlock(&nor->mtd, 0, nor->params.size);
-
- return 0;
-}
-
-static int spi_nor_init(struct spi_nor *nor)
-{
- int err;
-
- err = spi_nor_quad_enable(nor);
- if (err) {
- dev_dbg(nor->dev, "quad mode not supported\n");
- return err;
- }
-
- err = spi_nor_unlock_all(nor);
- if (err) {
- dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
- return err;
- }
-
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
- /*
- * If the RESET# pin isn't hooked up properly, or the system
- * otherwise doesn't perform a reset command in the boot
- * sequence, it's impossible to 100% protect against unexpected
- * reboots (e.g., crashes). Warn the user (or hopefully, system
- * designer) that this is bad.
- */
- WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
- "enabling reset hack; may not recover from unexpected reboots\n");
- nor->params.set_4byte(nor, true);
- }
-
- return 0;
-}
-
-/* mtd resume handler */
-static void spi_nor_resume(struct mtd_info *mtd)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- struct device *dev = nor->dev;
- int ret;
-
- /* re-initialize the nor chip */
- ret = spi_nor_init(nor);
- if (ret)
- dev_err(dev, "resume() failed\n");
-}
-
-void spi_nor_restore(struct spi_nor *nor)
-{
- /* restore the addressing mode */
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
- nor->flags & SNOR_F_BROKEN_RESET)
- nor->params.set_4byte(nor, false);
-}
-EXPORT_SYMBOL_GPL(spi_nor_restore);
-
-static const struct flash_info *spi_nor_match_id(const char *name)
-{
- const struct flash_info *id = spi_nor_ids;
-
- while (id->name) {
- if (!strcmp(name, id->name))
- return id;
- id++;
- }
- return NULL;
-}
-
-static int spi_nor_set_addr_width(struct spi_nor *nor)
-{
- if (nor->addr_width) {
- /* already configured from SFDP */
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
- } else if (nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
- } else {
- nor->addr_width = 3;
- }
-
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
- return -EINVAL;
- }
-
- /* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
-
- return 0;
-}
-
-static void spi_nor_debugfs_init(struct spi_nor *nor,
- const struct flash_info *info)
-{
- struct mtd_info *mtd = &nor->mtd;
-
- mtd->dbg.partname = info->name;
- mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
- info->id_len, info->id);
-}
-
-static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
- const char *name)
-{
- const struct flash_info *info = NULL;
-
- if (name)
- info = spi_nor_match_id(name);
- /* Try to auto-detect if chip name wasn't specified or not found */
- if (!info)
- info = spi_nor_read_id(nor);
- if (IS_ERR_OR_NULL(info))
- return ERR_PTR(-ENOENT);
-
- /*
- * If caller has specified name of flash model that can normally be
- * detected using JEDEC, let's verify it.
- */
- if (name && info->id_len) {
- const struct flash_info *jinfo;
-
- jinfo = spi_nor_read_id(nor);
- if (IS_ERR(jinfo)) {
- return jinfo;
- } else if (jinfo != info) {
- /*
- * JEDEC knows better, so overwrite platform ID. We
- * can't trust partitions any longer, but we'll let
- * mtd apply them anyway, since some partitions may be
- * marked read-only, and we don't want to lose that
- * information, even if it's not 100% accurate.
- */
- dev_warn(nor->dev, "found %s, expected %s\n",
- jinfo->name, info->name);
- info = jinfo;
- }
- }
-
- return info;
-}
-
-int spi_nor_scan(struct spi_nor *nor, const char *name,
- const struct spi_nor_hwcaps *hwcaps)
-{
- const struct flash_info *info;
- struct device *dev = nor->dev;
- struct mtd_info *mtd = &nor->mtd;
- struct device_node *np = spi_nor_get_flash_node(nor);
- struct spi_nor_flash_parameter *params = &nor->params;
- int ret;
- int i;
-
- ret = spi_nor_check(nor);
- if (ret)
- return ret;
-
- /* Reset SPI protocol for all commands. */
- nor->reg_proto = SNOR_PROTO_1_1_1;
- nor->read_proto = SNOR_PROTO_1_1_1;
- nor->write_proto = SNOR_PROTO_1_1_1;
-
- /*
- * We need the bounce buffer early to read/write registers when going
- * through the spi-mem layer (buffers have to be DMA-able).
- * For spi-mem drivers, we'll reallocate a new buffer if
- * nor->page_size turns out to be greater than PAGE_SIZE (which
- * shouldn't happen before long since NOR pages are usually less
- * than 1KB) after spi_nor_scan() returns.
- */
- nor->bouncebuf_size = PAGE_SIZE;
- nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
- GFP_KERNEL);
- if (!nor->bouncebuf)
- return -ENOMEM;
-
- info = spi_nor_get_flash_info(nor, name);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- nor->info = info;
-
- spi_nor_debugfs_init(nor, info);
-
- mutex_init(&nor->lock);
-
- /*
- * Make sure the XSR_RDY flag is set before calling
- * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
- * with Atmel spi-nor
- */
- if (info->flags & SPI_NOR_XSR_RDY)
- nor->flags |= SNOR_F_READY_XSR_RDY;
-
- if (info->flags & SPI_NOR_HAS_LOCK)
- nor->flags |= SNOR_F_HAS_LOCK;
-
- /* Init flash parameters based on flash_info struct and SFDP */
- spi_nor_init_params(nor);
-
- if (!mtd->name)
- mtd->name = dev_name(dev);
- mtd->priv = nor;
- mtd->type = MTD_NORFLASH;
- mtd->writesize = 1;
- mtd->flags = MTD_CAP_NORFLASH;
- mtd->size = params->size;
- mtd->_erase = spi_nor_erase;
- mtd->_read = spi_nor_read;
- mtd->_resume = spi_nor_resume;
-
- if (nor->params.locking_ops) {
- mtd->_lock = spi_nor_lock;
- mtd->_unlock = spi_nor_unlock;
- mtd->_is_locked = spi_nor_is_locked;
- }
-
- /* sst nor chips use AAI word program */
- if (info->flags & SST_WRITE)
- mtd->_write = sst_write;
- else
- mtd->_write = spi_nor_write;
-
- if (info->flags & USE_FSR)
- nor->flags |= SNOR_F_USE_FSR;
- if (info->flags & SPI_NOR_HAS_TB) {
- nor->flags |= SNOR_F_HAS_SR_TB;
- if (info->flags & SPI_NOR_TB_SR_BIT6)
- nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
- }
-
- if (info->flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
- if (info->flags & USE_CLSR)
- nor->flags |= SNOR_F_USE_CLSR;
-
- if (info->flags & SPI_NOR_NO_ERASE)
- mtd->flags |= MTD_NO_ERASE;
-
- mtd->dev.parent = dev;
- nor->page_size = params->page_size;
- mtd->writebufsize = nor->page_size;
-
- if (of_property_read_bool(np, "broken-flash-reset"))
- nor->flags |= SNOR_F_BROKEN_RESET;
-
- /*
- * Configure the SPI memory:
- * - select op codes for (Fast) Read, Page Program and Sector Erase.
- * - set the number of dummy cycles (mode cycles + wait states).
- * - set the SPI protocols for register and memory accesses.
- */
- ret = spi_nor_setup(nor, hwcaps);
- if (ret)
- return ret;
-
- if (info->flags & SPI_NOR_4B_OPCODES)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- ret = spi_nor_set_addr_width(nor);
- if (ret)
- return ret;
-
- /* Send all the required SPI flash commands to initialize device */
- ret = spi_nor_init(nor);
- if (ret)
- return ret;
-
- dev_info(dev, "%s (%lld Kbytes)\n", info->name,
- (long long)mtd->size >> 10);
-
- dev_dbg(dev,
- "mtd .name = %s, .size = 0x%llx (%lldMiB), "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
- mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
-
- if (mtd->numeraseregions)
- for (i = 0; i < mtd->numeraseregions; i++)
- dev_dbg(dev,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].erasesize / 1024,
- mtd->eraseregions[i].numblocks);
- return 0;
-}
-EXPORT_SYMBOL_GPL(spi_nor_scan);
-
-static int spi_nor_probe(struct spi_mem *spimem)
-{
- struct spi_device *spi = spimem->spi;
- struct flash_platform_data *data = dev_get_platdata(&spi->dev);
- struct spi_nor *nor;
- /*
- * Enable all caps by default. The core will mask them after
- * checking what's really supported using spi_mem_supports_op().
- */
- const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
- char *flash_name;
- int ret;
-
- nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
- if (!nor)
- return -ENOMEM;
-
- nor->spimem = spimem;
- nor->dev = &spi->dev;
- spi_nor_set_flash_node(nor, spi->dev.of_node);
-
- spi_mem_set_drvdata(spimem, nor);
-
- if (data && data->name)
- nor->mtd.name = data->name;
-
- if (!nor->mtd.name)
- nor->mtd.name = spi_mem_get_name(spimem);
-
- /*
- * For some (historical?) reason many platforms provide two different
- * names in flash_platform_data: "name" and "type". Quite often name is
- * set to "m25p80" and then "type" provides a real chip name.
- * If that's the case, respect "type" and ignore a "name".
- */
- if (data && data->type)
- flash_name = data->type;
- else if (!strcmp(spi->modalias, "spi-nor"))
- flash_name = NULL; /* auto-detect */
- else
- flash_name = spi->modalias;
-
- ret = spi_nor_scan(nor, flash_name, &hwcaps);
- if (ret)
- return ret;
-
- /*
- * None of the existing parts have > 512B pages, but let's play safe
- * and add this logic so that if anyone ever adds support for such
- * a NOR we don't end up with buffer overflows.
- */
- if (nor->page_size > PAGE_SIZE) {
- nor->bouncebuf_size = nor->page_size;
- devm_kfree(nor->dev, nor->bouncebuf);
- nor->bouncebuf = devm_kmalloc(nor->dev,
- nor->bouncebuf_size,
- GFP_KERNEL);
- if (!nor->bouncebuf)
- return -ENOMEM;
- }
-
- return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
- data ? data->nr_parts : 0);
-}
-
-static int spi_nor_remove(struct spi_mem *spimem)
-{
- struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
- spi_nor_restore(nor);
-
- /* Clean up MTD stuff. */
- return mtd_device_unregister(&nor->mtd);
-}
-
-static void spi_nor_shutdown(struct spi_mem *spimem)
-{
- struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
- spi_nor_restore(nor);
-}
-
-/*
- * Do NOT add to this array without reading the following:
- *
- * Historically, many flash devices are bound to this driver by their name. But
- * since most of these flash are compatible to some extent, and their
- * differences can often be differentiated by the JEDEC read-ID command, we
- * encourage new users to add support to the spi-nor library, and simply bind
- * against a generic string here (e.g., "jedec,spi-nor").
- *
- * Many flash names are kept here in this list (as well as in spi-nor.c) to
- * keep them available as module aliases for existing platforms.
- */
-static const struct spi_device_id spi_nor_dev_ids[] = {
- /*
- * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
- * hack around the fact that the SPI core does not provide uevent
- * matching for .of_match_table
- */
- {"spi-nor"},
-
- /*
- * Entries not used in DTs that should be safe to drop after replacing
- * them with "spi-nor" in platform data.
- */
- {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
-
- /*
- * Entries that were used in DTs without "jedec,spi-nor" fallback and
- * should be kept for backward compatibility.
- */
- {"at25df321a"}, {"at25df641"}, {"at26df081a"},
- {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
- {"mx25l25635e"},{"mx66l51235l"},
- {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
- {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
- {"s25fl064k"},
- {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
- {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
- {"m25p64"}, {"m25p128"},
- {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
- {"w25q80bl"}, {"w25q128"}, {"w25q256"},
-
- /* Flashes that can't be detected using JEDEC */
- {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
- {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
- {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
-
- /* Everspin MRAMs (non-JEDEC) */
- { "mr25h128" }, /* 128 Kib, 40 MHz */
- { "mr25h256" }, /* 256 Kib, 40 MHz */
- { "mr25h10" }, /* 1 Mib, 40 MHz */
- { "mr25h40" }, /* 4 Mib, 40 MHz */
-
- { },
-};
-MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
-
-static const struct of_device_id spi_nor_of_table[] = {
- /*
- * Generic compatibility for SPI NOR that can be identified by the
- * JEDEC READ ID opcode (0x9F). Use this, if possible.
- */
- { .compatible = "jedec,spi-nor" },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, spi_nor_of_table);
-
-/*
- * REVISIT: many of these chips have deep power-down modes, which
- * should clearly be entered on suspend() to minimize power use.
- * And also when they're otherwise idle...
- */
-static struct spi_mem_driver spi_nor_driver = {
- .spidrv = {
- .driver = {
- .name = "spi-nor",
- .of_match_table = spi_nor_of_table,
- },
- .id_table = spi_nor_dev_ids,
- },
- .probe = spi_nor_probe,
- .remove = spi_nor_remove,
- .shutdown = spi_nor_shutdown,
-};
-module_spi_mem_driver(spi_nor_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
-MODULE_AUTHOR("Mike Lavender");
-MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
new file mode 100644
index 000000000000..e0af6d25d573
--- /dev/null
+++ b/drivers/mtd/spi-nor/sst.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info sst_parts[] = {
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16,
+ SECT_4K | SST_WRITE) },
+ { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+};
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual = 0;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->sst_write_second = false;
+
+ /* Start write from odd address. */
+ if (to % 2) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ ret = spi_nor_write_data(nor, to, 1, buf);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ to++;
+ actual++;
+ }
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ ret = spi_nor_write_data(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ goto out;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->program_opcode = SPINOR_OP_BP;
+ ret = spi_nor_write_data(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ actual += 1;
+
+ ret = spi_nor_write_disable(nor);
+ }
+out:
+ *retlen += actual;
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static void sst_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void sst_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->info->flags & SST_WRITE)
+ nor->mtd._write = sst_write;
+}
+
+static const struct spi_nor_fixups sst_fixups = {
+ .default_init = sst_default_init,
+ .post_sfdp = sst_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_sst = {
+ .name = "sst",
+ .parts = sst_parts,
+ .nparts = ARRAY_SIZE(sst_parts),
+ .fixups = &sst_fixups,
+};
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
new file mode 100644
index 000000000000..17deabad57e1
--- /dev/null
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info winbond_parts[] = {
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_HAS_TB) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+};
+
+/**
+ * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_set_4byte_addr_mode(nor, enable);
+ if (ret || enable)
+ return ret;
+
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
+ * Register to be set to 1, so all 3-byte-address reads come from the
+ * second 16M. We must clear the register to enable normal behavior.
+ */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_ear(nor, 0);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+static void winbond_default_init(struct spi_nor *nor)
+{
+ nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups winbond_fixups = {
+ .default_init = winbond_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_winbond = {
+ .name = "winbond",
+ .parts = winbond_parts,
+ .nparts = ARRAY_SIZE(winbond_parts),
+ .fixups = &winbond_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
new file mode 100644
index 000000000000..1138bdbf4199
--- /dev/null
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xilinx_parts[] = {
+ /* Xilinx S3AN Internal Flash */
+ { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+ { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+ { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+};
+
+/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
+{
+ u32 offset, page;
+
+ offset = addr % nor->page_size;
+ page = addr / nor->page_size;
+ page <<= (nor->page_size > 512) ? 10 : 9;
+
+ return page | offset;
+}
+
+static int xilinx_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ int ret;
+
+ ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ nor->erase_opcode = SPINOR_OP_XSE;
+ nor->program_opcode = SPINOR_OP_XPP;
+ nor->read_opcode = SPINOR_OP_READ;
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ /*
+ * This flashes have a page size of 264 or 528 bytes (known as
+ * Default addressing mode). It can be changed to a more standard
+ * Power of two mode where the page size is 256/512. This comes
+ * with a price: there is 3% less of space, the data is corrupted
+ * and the page size cannot be changed back to default addressing
+ * mode.
+ *
+ * The current addressing mode can be read from the XRDSR register
+ * and should not be changed, because is a destructive operation.
+ */
+ if (nor->bouncebuf[0] & XSR_PAGESIZE) {
+ /* Flash in Power of 2 mode */
+ nor->page_size = (nor->page_size == 264) ? 256 : 512;
+ nor->mtd.writebufsize = nor->page_size;
+ nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
+ nor->mtd.erasesize = 8 * nor->page_size;
+ } else {
+ /* Flash in Default addressing mode */
+ nor->params->convert_addr = s3an_convert_addr;
+ nor->mtd.erasesize = nor->info->sector_size;
+ }
+
+ return 0;
+}
+
+static void xilinx_post_sfdp_fixups(struct spi_nor *nor)
+{
+ nor->params->setup = xilinx_nor_setup;
+}
+
+static const struct spi_nor_fixups xilinx_fixups = {
+ .post_sfdp = xilinx_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_xilinx = {
+ .name = "xilinx",
+ .parts = xilinx_parts,
+ .nparts = ARRAY_SIZE(xilinx_parts),
+ .fixups = &xilinx_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
new file mode 100644
index 000000000000..2c7773b68993
--- /dev/null
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xmc_parts[] = {
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+};
+
+const struct spi_nor_manufacturer spi_nor_xmc = {
+ .name = "xmc",
+ .parts = xmc_parts,
+ .nparts = ARRAY_SIZE(xmc_parts),
+};
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index ea7440ac913b..ae5abe492b52 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1059,7 +1059,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
* be a result of power cut during erasure.
*/
ai->maybe_bad_peb_count += 1;
- /* fall through */
+ fallthrough;
case UBI_IO_BAD_HDR:
/*
* If we're facing a bad VID header we have to drop *all*
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 2f93c25bbaee..12c02342149c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1342,10 +1342,10 @@ static int bytes_str_to_int(const char *str)
switch (*endp) {
case 'G':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'M':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'K':
result *= 1024;
if (endp[1] == 'i' && endp[2] == 'B')
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 54646c2c2744..ac2bdba8bb1a 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -393,9 +393,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos)
{
struct ubi_device *ubi = s->private;
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
if (*pos < ubi->peb_count)
return pos;
@@ -409,8 +406,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct ubi_device *ubi = s->private;
- if (v == SEQ_START_TOKEN)
- return pos;
(*pos)++;
if (*pos < ubi->peb_count)
@@ -432,11 +427,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
int err;
/* If this is the start, print a header */
- if (iter == SEQ_START_TOKEN) {
- seq_puts(s,
- "physical_block_number\terase_count\tblock_status\tread_status\n");
- return 0;
- }
+ if (*block_number == 0)
+ seq_puts(s, "physical_block_number\terase_count\n");
err = ubi_io_is_bad(ubi, *block_number);
if (err)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 426820ab9afe..b486250923c5 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
return victim;
}
+static inline void return_unused_peb(struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+}
+
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
@@ -52,8 +59,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+ return_unused_peb(ubi, e);
}
}
@@ -361,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ if (ubi->fm_anchor) {
+ return_unused_peb(ubi, ubi->fm_anchor);
+ ubi->fm_anchor = NULL;
+ }
+
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index b5fe8f82281b..386db0598e95 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -498,6 +498,6 @@ struct ubi_fm_volhdr {
struct ubi_fm_eba {
__be32 magic;
__be32 reserved_pebs;
- __be32 pnum[0];
+ __be32 pnum[];
} __packed;
#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 837d690a8c60..5146cce5fe32 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1875,7 +1875,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
- ubi_ensure_anchor_pebs(ubi);
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ ubi_ensure_anchor_pebs(ubi);
#endif
return 0;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index db8884ad6d40..b103fbdd0f68 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -259,6 +259,19 @@ config GENEVE
To compile this driver as a module, choose M here: the module
will be called geneve.
+config BAREUDP
+ tristate "Bare UDP Encapsulation"
+ depends on INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ help
+ This adds a bare UDP tunnel module for tunnelling different
+ kinds of traffic like MPLS, IP, etc. inside a UDP tunnel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bareudp.
+
config GTP
tristate "GPRS Tunneling Protocol datapath (GTP-U)"
depends on INET
@@ -432,6 +445,8 @@ source "drivers/net/fddi/Kconfig"
source "drivers/net/hippi/Kconfig"
+source "drivers/net/ipa/Kconfig"
+
config NET_SB1000
tristate "General Instruments Surfboard 1000"
depends on PNP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 71b88ffc5587..94b60800887a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
obj-$(CONFIG_HAMRADIO) += hamradio/
+obj-$(CONFIG_QCOM_IPA) += ipa/
obj-$(CONFIG_PLIP) += plip/
obj-$(CONFIG_PPP) += ppp/
obj-$(CONFIG_PPP_ASYNC) += ppp/
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
new file mode 100644
index 000000000000..efd1a1d1f35e
--- /dev/null
+++ b/drivers/net/bareudp.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Bareudp: UDP tunnel encasulation for different Payload types like
+ * MPLS, NSH, IP, etc.
+ * Copyright (c) 2019 Nokia, Inc.
+ * Authors: Martin Varghese, <martin.varghese@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/hash.h>
+#include <net/dst_metadata.h>
+#include <net/gro_cells.h>
+#include <net/rtnetlink.h>
+#include <net/protocol.h>
+#include <net/ip6_tunnel.h>
+#include <net/ip_tunnels.h>
+#include <net/udp_tunnel.h>
+#include <net/bareudp.h>
+
+#define BAREUDP_BASE_HLEN sizeof(struct udphdr)
+#define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr))
+#define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
+ sizeof(struct udphdr))
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-network namespace private data for this module */
+
+static unsigned int bareudp_net_id;
+
+struct bareudp_net {
+ struct list_head bareudp_list;
+};
+
+/* Pseudo network device */
+struct bareudp_dev {
+ struct net *net; /* netns for packet i/o */
+ struct net_device *dev; /* netdev for bareudp tunnel */
+ __be16 ethertype;
+ __be16 port;
+ u16 sport_min;
+ bool multi_proto_mode;
+ struct socket __rcu *sock;
+ struct list_head next; /* bareudp node on namespace list */
+ struct gro_cells gro_cells;
+};
+
+static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct metadata_dst *tun_dst = NULL;
+ struct pcpu_sw_netstats *stats;
+ struct bareudp_dev *bareudp;
+ unsigned short family;
+ unsigned int len;
+ __be16 proto;
+ void *oiph;
+ int err;
+
+ bareudp = rcu_dereference_sk_user_data(sk);
+ if (!bareudp)
+ goto drop;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ family = AF_INET;
+ else
+ family = AF_INET6;
+
+ if (bareudp->ethertype == htons(ETH_P_IP)) {
+ struct iphdr *iphdr;
+
+ iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
+ if (iphdr->version == 4) {
+ proto = bareudp->ethertype;
+ } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
+ proto = htons(ETH_P_IPV6);
+ } else {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
+ struct iphdr *tunnel_hdr;
+
+ tunnel_hdr = (struct iphdr *)skb_network_header(skb);
+ if (tunnel_hdr->version == 4) {
+ if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
+ proto = bareudp->ethertype;
+ } else if (bareudp->multi_proto_mode &&
+ ipv4_is_multicast(tunnel_hdr->daddr)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ } else {
+ int addr_type;
+ struct ipv6hdr *tunnel_hdr_v6;
+
+ tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
+ addr_type =
+ ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
+ if (!(addr_type & IPV6_ADDR_MULTICAST)) {
+ proto = bareudp->ethertype;
+ } else if (bareudp->multi_proto_mode &&
+ (addr_type & IPV6_ADDR_MULTICAST)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ }
+ } else {
+ proto = bareudp->ethertype;
+ }
+
+ if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
+ proto,
+ !net_eq(bareudp->net,
+ dev_net(bareudp->dev)))) {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+
+ tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+ if (!tun_dst) {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ skb_dst_set(skb, &tun_dst->dst);
+ skb->dev = bareudp->dev;
+ oiph = skb_network_header(skb);
+ skb_reset_network_header(skb);
+
+ if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ err = IP_ECN_decapsulate(oiph, skb);
+ else
+ err = IP6_ECN_decapsulate(oiph, skb);
+
+ if (unlikely(err)) {
+ if (log_ecn_error) {
+ if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ net_info_ratelimited("non-ECT from %pI4 "
+ "with TOS=%#x\n",
+ &((struct iphdr *)oiph)->saddr,
+ ((struct iphdr *)oiph)->tos);
+ else
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &((struct ipv6hdr *)oiph)->saddr);
+ }
+ if (err > 1) {
+ ++bareudp->dev->stats.rx_frame_errors;
+ ++bareudp->dev->stats.rx_errors;
+ goto drop;
+ }
+ }
+
+ len = skb->len;
+ err = gro_cells_receive(&bareudp->gro_cells, skb);
+ if (likely(err == NET_RX_SUCCESS)) {
+ stats = this_cpu_ptr(bareudp->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ }
+ return 0;
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static int bareudp_init(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ int err;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ err = gro_cells_init(&bareudp->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+ return 0;
+}
+
+static void bareudp_uninit(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ gro_cells_destroy(&bareudp->gro_cells);
+ free_percpu(dev->tstats);
+}
+
+static struct socket *bareudp_create_sock(struct net *net, __be16 port)
+{
+ struct udp_port_cfg udp_conf;
+ struct socket *sock;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+#if IS_ENABLED(CONFIG_IPV6)
+ udp_conf.family = AF_INET6;
+#else
+ udp_conf.family = AF_INET;
+#endif
+ udp_conf.local_udp_port = port;
+ /* Open UDP socket */
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
+}
+
+/* Create new listen socket if needed */
+static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
+{
+ struct udp_tunnel_sock_cfg tunnel_cfg;
+ struct socket *sock;
+
+ sock = bareudp_create_sock(bareudp->net, port);
+ if (IS_ERR(sock))
+ return PTR_ERR(sock);
+
+ /* Mark socket as an encapsulation socket */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
+ tunnel_cfg.sk_user_data = bareudp;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
+ tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
+ tunnel_cfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
+
+ /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
+ * socket type is v6 an explicit call to udp_encap_enable is needed.
+ */
+ if (sock->sk->sk_family == AF_INET6)
+ udp_encap_enable();
+
+ rcu_assign_pointer(bareudp->sock, sock);
+ return 0;
+}
+
+static int bareudp_open(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ int ret = 0;
+
+ ret = bareudp_socket_create(bareudp, bareudp->port);
+ return ret;
+}
+
+static void bareudp_sock_release(struct bareudp_dev *bareudp)
+{
+ struct socket *sock;
+
+ sock = bareudp->sock;
+ rcu_assign_pointer(bareudp->sock, NULL);
+ synchronize_net();
+ udp_tunnel_sock_release(sock);
+}
+
+static int bareudp_stop(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ bareudp_sock_release(bareudp);
+ return 0;
+}
+
+static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct bareudp_dev *bareudp,
+ const struct ip_tunnel_info *info)
+{
+ bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct socket *sock = rcu_dereference(bareudp->sock);
+ bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ const struct ip_tunnel_key *key = &info->key;
+ struct rtable *rt;
+ __be16 sport, df;
+ int min_headroom;
+ __u8 tos, ttl;
+ __be32 saddr;
+ int err;
+
+ if (!sock)
+ return -ESHUTDOWN;
+
+ rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info,
+ IPPROTO_UDP, use_cache);
+
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ skb_tunnel_check_pmtu(skb, &rt->dst,
+ BAREUDP_IPV4_HLEN + info->options_len);
+
+ sport = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min, USHRT_MAX,
+ true);
+ tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+ ttl = key->ttl;
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ skb_scrub_packet(skb, xnet);
+
+ err = -ENOSPC;
+ if (!skb_pull(skb, skb_network_offset(skb)))
+ goto free_dst;
+
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ goto free_dst;
+
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
+ goto free_dst;
+
+ skb_set_inner_protocol(skb, bareudp->ethertype);
+ udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
+ tos, ttl, df, sport, bareudp->port,
+ !net_eq(bareudp->net, dev_net(bareudp->dev)),
+ !(info->key.tun_flags & TUNNEL_CSUM));
+ return 0;
+
+free_dst:
+ dst_release(&rt->dst);
+ return err;
+}
+
+static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct bareudp_dev *bareudp,
+ const struct ip_tunnel_info *info)
+{
+ bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct socket *sock = rcu_dereference(bareudp->sock);
+ bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ const struct ip_tunnel_key *key = &info->key;
+ struct dst_entry *dst = NULL;
+ struct in6_addr saddr, daddr;
+ int min_headroom;
+ __u8 prio, ttl;
+ __be16 sport;
+ int err;
+
+ if (!sock)
+ return -ESHUTDOWN;
+
+ dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
+ IPPROTO_UDP, use_cache);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len);
+
+ sport = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min, USHRT_MAX,
+ true);
+ prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+ ttl = key->ttl;
+
+ skb_scrub_packet(skb, xnet);
+
+ err = -ENOSPC;
+ if (!skb_pull(skb, skb_network_offset(skb)))
+ goto free_dst;
+
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ goto free_dst;
+
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
+ goto free_dst;
+
+ daddr = info->key.u.ipv6.dst;
+ udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
+ &saddr, &daddr, prio, ttl,
+ info->key.label, sport, bareudp->port,
+ !(info->key.tun_flags & TUNNEL_CSUM));
+ return 0;
+
+free_dst:
+ dst_release(dst);
+ return err;
+}
+
+static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ struct ip_tunnel_info *info = NULL;
+ int err;
+
+ if (skb->protocol != bareudp->ethertype) {
+ if (!bareudp->multi_proto_mode ||
+ (skb->protocol != htons(ETH_P_MPLS_MC) &&
+ skb->protocol != htons(ETH_P_IPV6))) {
+ err = -EINVAL;
+ goto tx_error;
+ }
+ }
+
+ info = skb_tunnel_info(skb);
+ if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
+ err = -EINVAL;
+ goto tx_error;
+ }
+
+ rcu_read_lock();
+ if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+ err = bareudp6_xmit_skb(skb, dev, bareudp, info);
+ else
+ err = bareudp_xmit_skb(skb, dev, bareudp, info);
+
+ rcu_read_unlock();
+
+ if (likely(!err))
+ return NETDEV_TX_OK;
+tx_error:
+ dev_kfree_skb(skb);
+
+ if (err == -ELOOP)
+ dev->stats.collisions++;
+ else if (err == -ENETUNREACH)
+ dev->stats.tx_carrier_errors++;
+
+ dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+}
+
+static int bareudp_fill_metadata_dst(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ bool use_cache;
+
+ use_cache = ip_tunnel_dst_cache_usable(skb, info);
+
+ if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+ struct rtable *rt;
+ __be32 saddr;
+
+ rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr,
+ info, IPPROTO_UDP, use_cache);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+ info->key.u.ipv4.src = saddr;
+ } else if (ip_tunnel_info_af(info) == AF_INET6) {
+ struct dst_entry *dst;
+ struct in6_addr saddr;
+ struct socket *sock = rcu_dereference(bareudp->sock);
+
+ dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
+ &saddr, info, IPPROTO_UDP,
+ use_cache);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ dst_release(dst);
+ info->key.u.ipv6.src = saddr;
+ } else {
+ return -EINVAL;
+ }
+
+ info->key.tp_src = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min,
+ USHRT_MAX, true);
+ info->key.tp_dst = bareudp->port;
+ return 0;
+}
+
+static const struct net_device_ops bareudp_netdev_ops = {
+ .ndo_init = bareudp_init,
+ .ndo_uninit = bareudp_uninit,
+ .ndo_open = bareudp_open,
+ .ndo_stop = bareudp_stop,
+ .ndo_start_xmit = bareudp_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
+};
+
+static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
+ [IFLA_BAREUDP_PORT] = { .type = NLA_U16 },
+ [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 },
+ [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 },
+ [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG },
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type bareudp_type = {
+ .name = "bareudp",
+};
+
+/* Initialize the device structure. */
+static void bareudp_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &bareudp_netdev_ops;
+ dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &bareudp_type);
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = ETH_DATA_LEN;
+ dev->min_mtu = IPV4_MIN_MTU;
+ dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
+ dev->type = ARPHRD_NONE;
+ netif_keep_dst(dev);
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+}
+
+static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data) {
+ NL_SET_ERR_MSG(extack,
+ "Not enough attributes provided to perform the operation");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
+ struct netlink_ext_ack *extack)
+{
+ if (!data[IFLA_BAREUDP_PORT]) {
+ NL_SET_ERR_MSG(extack, "port not specified");
+ return -EINVAL;
+ }
+ if (!data[IFLA_BAREUDP_ETHERTYPE]) {
+ NL_SET_ERR_MSG(extack, "ethertype not specified");
+ return -EINVAL;
+ }
+
+ if (data[IFLA_BAREUDP_PORT])
+ conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
+
+ if (data[IFLA_BAREUDP_ETHERTYPE])
+ conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
+
+ if (data[IFLA_BAREUDP_SRCPORT_MIN])
+ conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
+
+ return 0;
+}
+
+static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
+ const struct bareudp_conf *conf)
+{
+ struct bareudp_dev *bareudp, *t = NULL;
+
+ list_for_each_entry(bareudp, &bn->bareudp_list, next) {
+ if (conf->port == bareudp->port)
+ t = bareudp;
+ }
+ return t;
+}
+
+static int bareudp_configure(struct net *net, struct net_device *dev,
+ struct bareudp_conf *conf)
+{
+ struct bareudp_net *bn = net_generic(net, bareudp_net_id);
+ struct bareudp_dev *t, *bareudp = netdev_priv(dev);
+ int err;
+
+ bareudp->net = net;
+ bareudp->dev = dev;
+ t = bareudp_find_dev(bn, conf);
+ if (t)
+ return -EBUSY;
+
+ if (conf->multi_proto_mode &&
+ (conf->ethertype != htons(ETH_P_MPLS_UC) &&
+ conf->ethertype != htons(ETH_P_IP)))
+ return -EINVAL;
+
+ bareudp->port = conf->port;
+ bareudp->ethertype = conf->ethertype;
+ bareudp->sport_min = conf->sport_min;
+ bareudp->multi_proto_mode = conf->multi_proto_mode;
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ list_add(&bareudp->next, &bn->bareudp_list);
+ return 0;
+}
+
+static int bareudp_link_config(struct net_device *dev,
+ struct nlattr *tb[])
+{
+ int err;
+
+ if (tb[IFLA_MTU]) {
+ err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int bareudp_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct bareudp_conf conf;
+ int err;
+
+ err = bareudp2info(data, &conf, extack);
+ if (err)
+ return err;
+
+ err = bareudp_configure(net, dev, &conf);
+ if (err)
+ return err;
+
+ err = bareudp_link_config(dev, tb);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t bareudp_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */
+ nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */
+ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */
+ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */
+ 0;
+}
+
+static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
+ goto nla_put_failure;
+ if (bareudp->multi_proto_mode &&
+ nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
+ .kind = "bareudp",
+ .maxtype = IFLA_BAREUDP_MAX,
+ .policy = bareudp_policy,
+ .priv_size = sizeof(struct bareudp_dev),
+ .setup = bareudp_setup,
+ .validate = bareudp_validate,
+ .newlink = bareudp_newlink,
+ .dellink = bareudp_dellink,
+ .get_size = bareudp_get_size,
+ .fill_info = bareudp_fill_info,
+};
+
+struct net_device *bareudp_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct bareudp_conf *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ LIST_HEAD(list_kill);
+ int err;
+
+ memset(tb, 0, sizeof(tb));
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &bareudp_link_ops, tb, NULL);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = bareudp_configure(net, dev, conf);
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
+ if (err)
+ goto err;
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto err;
+
+ return dev;
+err:
+ bareudp_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(bareudp_dev_create);
+
+static __net_init int bareudp_init_net(struct net *net)
+{
+ struct bareudp_net *bn = net_generic(net, bareudp_net_id);
+
+ INIT_LIST_HEAD(&bn->bareudp_list);
+ return 0;
+}
+
+static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
+{
+ struct bareudp_net *bn = net_generic(net, bareudp_net_id);
+ struct bareudp_dev *bareudp, *next;
+
+ list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
+ unregister_netdevice_queue(bareudp->dev, head);
+}
+
+static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+ bareudp_destroy_tunnels(net, &list);
+
+ /* unregister the devices gathered above */
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations bareudp_net_ops = {
+ .init = bareudp_init_net,
+ .exit_batch = bareudp_exit_batch_net,
+ .id = &bareudp_net_id,
+ .size = sizeof(struct bareudp_net),
+};
+
+static int __init bareudp_init_module(void)
+{
+ int rc;
+
+ rc = register_pernet_subsys(&bareudp_net_ops);
+ if (rc)
+ goto out1;
+
+ rc = rtnl_link_register(&bareudp_link_ops);
+ if (rc)
+ goto out2;
+
+ return 0;
+out2:
+ unregister_pernet_subsys(&bareudp_net_ops);
+out1:
+ return rc;
+}
+late_initcall(bareudp_init_module);
+
+static void __exit bareudp_cleanup_module(void)
+{
+ rtnl_link_unregister(&bareudp_link_ops);
+ unregister_pernet_subsys(&bareudp_net_ops);
+}
+module_exit(bareudp_cleanup_module);
+
+MODULE_ALIAS_RTNL_LINK("bareudp");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
+MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d10805e5e623..2e70e43c5df5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1265,7 +1265,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
skb->dev = bond->dev;
if (BOND_MODE(bond) == BOND_MODE_ALB &&
- bond->dev->priv_flags & IFF_BRIDGE_PORT &&
+ netif_is_bridge_port(bond->dev) &&
skb->pkt_type == PACKET_HOST) {
if (unlikely(skb_cow_head(skb,
@@ -4370,7 +4370,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
@@ -5008,8 +5007,6 @@ static int __init bonding_init(void)
int i;
int res;
- pr_info("%s", bond_version);
-
res = bond_check_params(&bonding_defaults);
if (res)
goto out;
@@ -5064,6 +5061,5 @@ static void __exit bonding_exit(void)
module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 007481557191..9b8346638f69 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave)
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
&(slave->dev->dev.kobj), "bonding_slave");
- if (err)
+ if (err) {
+ kobject_put(&slave->kobj);
return err;
+ }
for (a = slave_attrs; *a; ++a) {
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
index 5a4d81a9437c..45b77bc8c7b3 100644
--- a/drivers/net/bonding/bonding_priv.h
+++ b/drivers/net/bonding/bonding_priv.h
@@ -14,12 +14,11 @@
#ifndef _BONDING_PRIV_H
#define _BONDING_PRIV_H
+#include <linux/vermagic.h>
-#define DRV_VERSION "3.7.1"
-#define DRV_RELDATE "April 27, 2011"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+#define bond_version DRV_DESCRIPTION ": v" UTS_RELEASE "\n"
#endif
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index e74e2bb61236..661c25eb1c46 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -50,7 +50,7 @@ config CAIF_HSI
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
- depends on CAIF && HAS_DMA
+ depends on CAIF && HAS_DMA && VHOST_DPN
select VHOST_RING
select VIRTIO
select GENERIC_ALLOCATOR
@@ -58,8 +58,4 @@ config CAIF_VIRTIO
---help---
The CAIF driver for CAIF over Virtio.
-if CAIF_VIRTIO
-source "drivers/vhost/Kconfig.vringh"
-endif
-
endif # CAIF_DRIVERS
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 04d59bede5ea..74503cacf594 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -947,8 +947,11 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
u32 id, rev;
addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
irq = platform_get_irq(pdev, 0);
- if (IS_ERR(addr) || irq < 0)
+ if (irq < 0)
return -EINVAL;
id = readl(addr + IFI_CANFD_IP_ID);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a3664281a33f..91cdc0a2b1a7 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
- cf.can_id = 0;
+ memset(&cf, 0, sizeof(cf));
switch (*cmd) {
case 'r':
@@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl)
else
return;
- *(u64 *) (&cf.data) = 0; /* clear payload */
-
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf.can_id & CAN_RTR_FLAG)) {
for (i = 0; i < cf.can_dlc; i++) {
@@ -348,11 +346,8 @@ static void slcan_write_wakeup(struct tty_struct *tty)
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
- if (!sl)
- goto out;
-
- schedule_work(&sl->tx_work);
-out:
+ if (sl)
+ schedule_work(&sl->tx_work);
rcu_read_unlock();
}
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index e3ba8ab0cbf4..e2c6cf4b2228 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -792,7 +792,7 @@ static int sun4ican_probe(struct platform_device *pdev)
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
- err = -EBUSY;
+ err = PTR_ERR(addr);
goto exit;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 1a69286daa8d..c283593bef17 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -681,7 +681,9 @@ int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 };
+ struct b53_vlan *v;
int i, def_vid;
+ u16 vid;
def_vid = b53_default_pvid(dev);
@@ -699,8 +701,18 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_write16(dev, B53_VLAN_PAGE,
B53_VLAN_PORT_DEF_TAG(i), def_vid);
- if (!is5325(dev) && !is5365(dev))
- b53_set_jumbo(dev, dev->enable_jumbo, false);
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
+
+ if (!v->members)
+ continue;
+
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
return 0;
}
@@ -807,8 +819,6 @@ static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
static int b53_reset_switch(struct b53_device *priv)
{
/* reset vlans */
- priv->enable_jumbo = false;
-
memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
@@ -1289,7 +1299,9 @@ EXPORT_SYMBOL(b53_phylink_mac_link_down);
void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct b53_device *dev = ds->priv;
@@ -1343,6 +1355,14 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
return -EOPNOTSUPP;
+ /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
+ * receiving VLAN tagged frames at all, we can still allow the port to
+ * be configured for egress untagged.
+ */
+ if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
+ !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ return -EINVAL;
+
if (vlan->vid_end > dev->num_vlans)
return -ERANGE;
@@ -1454,6 +1474,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
reg |= ARLTBL_RW;
else
reg &= ~ARLTBL_RW;
+ if (dev->vlan_enabled)
+ reg &= ~ARLTBL_IVL_SVL_SELECT;
+ else
+ reg |= ARLTBL_IVL_SVL_SELECT;
b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
return b53_arl_op_wait(dev);
@@ -1463,6 +1487,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx,
bool is_valid)
{
+ DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i;
int ret;
@@ -1470,6 +1495,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
if (ret)
return ret;
+ bitmap_zero(free_bins, dev->num_arl_entries);
+
/* Read the bins */
for (i = 0; i < dev->num_arl_entries; i++) {
u64 mac_vid;
@@ -1481,13 +1508,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
- if (!(fwd_entry & ARLTBL_VALID))
+ if (!(fwd_entry & ARLTBL_VALID)) {
+ set_bit(i, free_bins);
continue;
+ }
if ((mac_vid & ARLTBL_MAC_MASK) != mac)
continue;
+ if (dev->vlan_enabled &&
+ ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
+ continue;
*idx = i;
+ return 0;
}
+ if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
+ return -ENOSPC;
+
+ *idx = find_first_bit(free_bins, dev->num_arl_entries);
+
return -ENOENT;
}
@@ -1517,10 +1555,21 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
if (op)
return ret;
- /* We could not find a matching MAC, so reset to a new entry */
- if (ret) {
+ switch (ret) {
+ case -ENOSPC:
+ dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
+ addr, vid);
+ return is_valid ? ret : 0;
+ case -ENOENT:
+ /* We could not find a matching MAC, so reset to a new entry */
+ dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
+ addr, vid, idx);
fwd_entry = 0;
- idx = 1;
+ break;
+ default:
+ dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
+ addr, vid, idx);
+ break;
}
/* For multicast address, the port is a bitmask and the validity
@@ -1538,7 +1587,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
ent.is_valid = !!(ent.port);
}
- ent.is_valid = is_valid;
ent.vid = vid;
ent.is_static = true;
ent.is_age = false;
@@ -1708,6 +1756,12 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
u16 pvlan, reg;
unsigned int i;
+ /* On 7278, port 7 which connects to the ASP should only receive
+ * traffic from matching CFP rules.
+ */
+ if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
+ return -EINVAL;
+
/* Make this port leave the all VLANs join since we will have proper
* VLAN entries from now on
*/
@@ -2063,6 +2117,26 @@ int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
}
EXPORT_SYMBOL(b53_set_mac_eee);
+static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct b53_device *dev = ds->priv;
+ bool enable_jumbo;
+ bool allow_10_100;
+
+ if (is5325(dev) || is5365(dev))
+ return -EOPNOTSUPP;
+
+ enable_jumbo = (mtu >= JMS_MIN_SIZE);
+ allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
+
+ return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
+}
+
+static int b53_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return JMS_MAX_SIZE;
+}
+
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
@@ -2100,6 +2174,8 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
+ .port_max_mtu = b53_get_max_mtu,
+ .port_change_mtu = b53_change_mtu,
};
struct b53_chip_data {
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 3c30f3a7eb29..3d42318bc3f1 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -338,7 +338,9 @@ void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev);
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 2a9f421680aa..c90985c294a2 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -292,6 +292,7 @@
/* ARL Table Read/Write Register (8 bit) */
#define B53_ARLTBL_RW_CTRL 0x00
#define ARLTBL_RW BIT(0)
+#define ARLTBL_IVL_SVL_SELECT BIT(6)
#define ARLTBL_START_DONE BIT(7)
/* MAC Address Index Register (48 bit) */
@@ -304,7 +305,7 @@
*
* BCM5325 and BCM5365 share most definitions below
*/
-#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
+#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
#define ARLTBL_VID_MASK_25 0xff
@@ -316,13 +317,16 @@
#define ARLTBL_VALID_25 BIT(63)
/* ARL Table Data Entry N Registers (32 bit) */
-#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
+#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
#define ARLTBL_TC(tc) ((3 & tc) << 11)
#define ARLTBL_AGE BIT(14)
#define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16)
+/* Maximum number of bin entries in the ARL for all switches */
+#define B53_ARLTBL_MAX_BIN_ENTRIES 4
+
/* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 0a1be5259be0..38cd8285ac67 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -609,7 +609,7 @@ static int b53_srab_probe(struct platform_device *pdev)
priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
- return -ENOMEM;
+ return PTR_ERR(priv->regs);
dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
if (!dev)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b0f5280a83cb..c7ac63f41918 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -178,9 +178,17 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
core_writel(priv, reg, CORE_DIS_LEARN);
/* Enable Broadcom tags for that port if requested */
- if (priv->brcm_tag_mask & BIT(port))
+ if (priv->brcm_tag_mask & BIT(port)) {
b53_brcm_hdr_setup(ds, port);
+ /* Disable learning on ASP port */
+ if (port == 7) {
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
+ }
+ }
+
/* Configure Traffic Class to QoS mapping, allow each priority to map
* to a different queue number
*/
@@ -472,7 +480,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
- err = of_mdiobus_register(priv->slave_mii_bus, dn);
+ err = mdiobus_register(priv->slave_mii_bus);
if (err && dn)
of_node_put(dn);
@@ -648,7 +656,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
@@ -1069,6 +1079,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
const struct bcm_sf2_of_data *data;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
+ struct device_node *ports;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
@@ -1136,7 +1147,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);
- bcm_sf2_identify_ports(priv, dn->child);
+ ports = of_find_node_by_name(dn, "ports");
+ if (ports) {
+ bcm_sf2_identify_ports(priv, ports);
+ of_node_put(ports);
+ }
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 1962c8330daa..f707edc641cf 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -13,6 +13,8 @@
#include <net/dsa.h>
#include <linux/bitmap.h>
#include <net/flow_offload.h>
+#include <net/switchdev.h>
+#include <uapi/linux/if_bridge.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -261,16 +263,27 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
struct flow_dissector_key_ipv4_addrs *addrs,
struct flow_dissector_key_ports *ports,
- unsigned int slice_num,
+ const __be16 vlan_tci,
+ unsigned int slice_num, u8 num_udf,
bool mask)
{
u32 reg, offset;
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
+ if (mask)
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
+ else
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
+
/* C-Tag [31:24]
* UDF_n_A8 [23:8]
* UDF_n_A7 [7:0]
*/
- reg = 0;
+ reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
@@ -336,6 +349,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_rx_flow_spec_input input = {};
+ __be16 vlan_tci = 0 , vlan_m_tci = 0xffff;
const struct cfp_udf_layout *layout;
unsigned int slice_num, rule_index;
struct ethtool_rx_flow_rule *flow;
@@ -360,6 +374,12 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
+ /* Extract VLAN TCI */
+ if (fs->flow_type & FLOW_EXT) {
+ vlan_tci = fs->h_ext.vlan_tci;
+ vlan_m_tci = fs->m_ext.vlan_tci;
+ }
+
/* Locate the first rule available */
if (fs->location == RX_CLS_LOC_ANY)
rule_index = find_first_zero_bit(priv->cfp.used,
@@ -421,18 +441,11 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, layout->udfs[slice_num].mask_value |
udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
- /* UDF_Valid[7:0] [31:24]
- * S-Tag [23:8]
- * C-Tag [7:0]
- */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
-
- /* Mask all but valid UDFs */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
-
/* Program the match and the mask */
- bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
- bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
+ slice_num, num_udf, false);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
+ SLICE_NUM_MASK, num_udf, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index);
@@ -468,17 +481,29 @@ out_err_flow_rule:
static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
const __be32 *ip6_addr, const __be16 port,
- unsigned int slice_num,
+ const __be16 vlan_tci,
+ unsigned int slice_num, u32 udf_bits,
bool mask)
{
u32 reg, tmp, val, offset;
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
+ if (mask)
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
+ else
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
+
/* C-Tag [31:24]
* UDF_n_B8 [23:8] (port)
* UDF_n_B7 (upper) [7:0] (addr[15:8])
*/
reg = be32_to_cpu(ip6_addr[3]);
val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
+ val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
@@ -587,6 +612,11 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ /* Compare VLAN TCI values as well */
+ if (rule->fs.flow_type & FLOW_EXT) {
+ ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
+ ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
+ }
if (ret == 0)
break;
}
@@ -600,6 +630,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_rx_flow_spec_input input = {};
+ __be16 vlan_tci = 0, vlan_m_tci = 0xffff;
unsigned int slice_num, rule_index[2];
const struct cfp_udf_layout *layout;
struct ethtool_rx_flow_rule *flow;
@@ -623,6 +654,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
+ /* Extract VLAN TCI */
+ if (fs->flow_type & FLOW_EXT) {
+ vlan_tci = fs->h_ext.vlan_tci;
+ vlan_m_tci = fs->m_ext.vlan_tci;
+ }
+
layout = &udf_tcpip6_layout;
slice_num = bcm_sf2_get_slice_number(layout, 0);
if (slice_num == UDF_NUM_SLICES)
@@ -704,20 +741,13 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
- /* UDF_Valid[7:0] [31:24]
- * S-Tag [23:8]
- * C-Tag [7:0]
- */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
-
- /* Mask all but valid UDFs */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
-
/* Slice the IPv6 source address and port */
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
- ports.key->src, slice_num, false);
+ ports.key->src, vlan_tci, slice_num,
+ udf_lower_bits(num_udf), false);
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
- ports.mask->src, SLICE_NUM_MASK, true);
+ ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
+ udf_lower_bits(num_udf), true);
/* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -768,16 +798,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
udf_lower_bits(num_udf) << 8;
core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
- /* Don't care */
- core_writel(priv, 0, CORE_CFP_DATA_PORT(5));
-
- /* Mask all */
- core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
-
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
- ports.key->dst, slice_num, false);
+ ports.key->dst, 0, slice_num,
+ 0, false);
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
- ports.key->dst, SLICE_NUM_MASK, true);
+ ports.key->dst, 0, SLICE_NUM_MASK,
+ 0, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
@@ -823,7 +849,9 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
+ struct switchdev_obj_port_vlan vlan;
unsigned int queue_num, port_num;
+ u16 vid;
int ret;
/* This rule is a Wake-on-LAN filter and we must specifically
@@ -843,6 +871,34 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
dsa_is_cpu_port(ds, port_num)) ||
port_num >= priv->hw_params.num_ports)
return -EINVAL;
+
+ /* If the rule is matching a particular VLAN, make sure that we honor
+ * the matching and have it tagged or untagged on the destination port,
+ * we do this on egress with a VLAN entry. The egress tagging attribute
+ * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
+ * a 0 means tagged.
+ */
+ if (fs->flow_type & FLOW_EXT) {
+ /* We cannot support matching multiple VLAN IDs yet */
+ if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
+ VLAN_VID_MASK)
+ return -EINVAL;
+
+ vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
+ vlan.vid_begin = vid;
+ vlan.vid_end = vid;
+ if (cpu_to_be32(fs->h_ext.data[1]) & 1)
+ vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
+ else
+ vlan.flags = 0;
+
+ ret = ds->ops->port_vlan_prepare(ds, port_num, &vlan);
+ if (ret)
+ return ret;
+
+ ds->ops->port_vlan_add(ds, port_num, &vlan);
+ }
+
/*
* We have a small oddity where Port 6 just does not have a
* valid bit here (so we substract by one).
@@ -878,21 +934,22 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
int ret = -EINVAL;
/* Check for unsupported extensions */
- if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
- fs->m_ext.data[1]))
+ if (fs->flow_type & FLOW_MAC_EXT)
return -EINVAL;
- if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
+ if ((fs->flow_type & FLOW_EXT) &&
+ !(ds->ops->port_vlan_prepare || ds->ops->port_vlan_add ||
+ ds->ops->port_vlan_del))
+ return -EOPNOTSUPP;
+
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
-
ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
if (ret == 0)
return -EEXIST;
@@ -973,7 +1030,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
struct cfp_rule *rule;
int ret;
- if (loc >= CFP_NUM_RULES)
+ if (loc > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
/* Refuse deleting unused rules, and those that are not unique since
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index fdcb70b9f0e4..400207c5c7de 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -360,6 +360,7 @@ static void __exit dsa_loop_exit(void)
}
module_exit(dsa_loop_exit);
+MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 0369c22fe3e1..cf6fa8fede33 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1517,7 +1517,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct gswip_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 1d7870c6df3c..4ec6a47b7f72 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_DSA_MICROCHIP_KSZ_COMMON
+ select NET_DSA_TAG_KSZ
tristate
menuconfig NET_DSA_MICROCHIP_KSZ9477
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index d8fda4a02640..fd1d6676ae4f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -67,7 +67,7 @@ static void port_r_cnt(struct ksz_device *dev, int port)
static void ksz_mib_read_work(struct work_struct *work)
{
struct ksz_device *dev = container_of(work, struct ksz_device,
- mib_read);
+ mib_read.work);
struct ksz_port_mib *mib;
struct ksz_port *p;
int i;
@@ -93,32 +93,24 @@ static void ksz_mib_read_work(struct work_struct *work)
p->read = false;
mutex_unlock(&mib->cnt_mutex);
}
-}
-
-static void mib_monitor(struct timer_list *t)
-{
- struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
- mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
- schedule_work(&dev->mib_read);
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
}
void ksz_init_mib_timer(struct ksz_device *dev)
{
int i;
+ INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
+
/* Read MIB counters every 30 seconds to avoid overflow. */
dev->mib_read_interval = msecs_to_jiffies(30000);
- INIT_WORK(&dev->mib_read, ksz_mib_read_work);
- timer_setup(&dev->mib_read_timer, mib_monitor, 0);
-
for (i = 0; i < dev->mib_port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);
/* Start the timer 2 seconds later. */
- dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
- add_timer(&dev->mib_read_timer);
+ schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -152,7 +144,7 @@ void ksz_adjust_link(struct dsa_switch *ds, int port,
/* Read all MIB counters when the link is going down. */
if (!phydev->link) {
p->read = true;
- schedule_work(&dev->mib_read);
+ schedule_delayed_work(&dev->mib_read, 0);
}
mutex_lock(&dev->dev_mutex);
if (!phydev->link)
@@ -477,10 +469,8 @@ EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
/* timer started */
- if (dev->mib_read_timer.expires) {
- del_timer_sync(&dev->mib_read_timer);
- flush_work(&dev->mib_read);
- }
+ if (dev->mib_read_interval)
+ cancel_delayed_work_sync(&dev->mib_read);
dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index a20ebb749377..f2c9bb68fd33 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -80,8 +80,7 @@ struct ksz_device {
struct vlan_table *vlan_cache;
struct ksz_port *ports;
- struct timer_list mib_read_timer;
- struct work_struct mib_read;
+ struct delayed_work mib_read;
unsigned long mib_read_interval;
u16 br_member;
u16 member;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 7cbd1bd4c5a6..34e4aadfa705 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -67,58 +67,6 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
};
static int
-mt7623_trgmii_write(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- int ret;
-
- ret = regmap_write(priv->ethernet, TRGMII_BASE(reg), val);
- if (ret < 0)
- dev_err(priv->dev,
- "failed to priv write register\n");
- return ret;
-}
-
-static u32
-mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg)
-{
- int ret;
- u32 val;
-
- ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val);
- if (ret < 0) {
- dev_err(priv->dev,
- "failed to priv read register\n");
- return ret;
- }
-
- return val;
-}
-
-static void
-mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg,
- u32 mask, u32 set)
-{
- u32 val;
-
- val = mt7623_trgmii_read(priv, reg);
- val &= ~mask;
- val |= set;
- mt7623_trgmii_write(priv, reg, val);
-}
-
-static void
-mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- mt7623_trgmii_rmw(priv, reg, 0, val);
-}
-
-static void
-mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- mt7623_trgmii_rmw(priv, reg, val, 0);
-}
-
-static int
core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
{
struct mii_bus *bus = priv->bus;
@@ -530,27 +478,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
mt7530_rmw(priv, MT7530_TRGMII_RD(i),
RD_TAP_MASK, RD_TAP(16));
- else
- if (priv->id != ID_MT7621)
- mt7623_trgmii_set(priv, GSW_INTF_MODE,
- INTF_MODE_TRGMII);
-
- return 0;
-}
-
-static int
-mt7623_pad_clk_setup(struct dsa_switch *ds)
-{
- struct mt7530_priv *priv = ds->priv;
- int i;
-
- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
- mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i),
- TD_DM_DRVP(8) | TD_DM_DRVN(8));
-
- mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL);
- mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST);
-
return 0;
}
@@ -563,17 +490,6 @@ mt7530_mib_reset(struct dsa_switch *ds)
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
-static void
-mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
-{
- u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
-
- if (enable)
- mt7530_set(priv, MT7530_PMCR_P(port), mask);
- else
- mt7530_clear(priv, MT7530_PMCR_P(port), mask);
-}
-
static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct mt7530_priv *priv = ds->priv;
@@ -712,11 +628,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
- /* Disable auto learning on the cpu port */
- mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
-
- /* Unknown unicast frame fordwarding to the cpu port */
- mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
+ /* Unknown multicast frame forwarding to the cpu port */
+ mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
/* Set CPU port number */
if (priv->id == ID_MT7621)
@@ -750,7 +663,7 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_port_set_status(priv, port, 0);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
@@ -773,7 +686,7 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- mt7530_port_set_status(priv, port, 0);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
}
@@ -857,8 +770,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
*/
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_MATRIX_MODE);
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
- VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
@@ -874,8 +788,8 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
if (all_user_ports_removed) {
mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
PCR_MATRIX(dsa_user_ports(priv->ds)));
- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
- PORT_SPEC_TAG);
+ mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
+ | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
}
@@ -901,8 +815,9 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
/* Set the port as a user port which is to be able to recognize VID
* from incoming packets before fetching entry within the VLAN table.
*/
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
- VLAN_ATTR(MT7530_VLAN_USER));
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
}
static void
@@ -1222,6 +1137,64 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
+static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ val = mt7530_read(priv, MT7530_MFC);
+
+ /* MT7530 only supports one monitor port */
+ if (val & MIRROR_EN && MIRROR_PORT(val) != mirror->to_local_port)
+ return -EEXIST;
+
+ val |= MIRROR_EN;
+ val &= ~MIRROR_MASK;
+ val |= mirror->to_local_port;
+ mt7530_write(priv, MT7530_MFC, val);
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (ingress) {
+ val |= PORT_RX_MIR;
+ priv->mirror_rx |= BIT(port);
+ } else {
+ val |= PORT_TX_MIR;
+ priv->mirror_tx |= BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ return 0;
+}
+
+static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (mirror->ingress) {
+ val &= ~PORT_RX_MIR;
+ priv->mirror_rx &= ~BIT(port);
+ } else {
+ val &= ~PORT_TX_MIR;
+ priv->mirror_tx &= ~BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = mt7530_read(priv, MT7530_MFC);
+ val &= ~MIRROR_EN;
+ mt7530_write(priv, MT7530_MFC, val);
+ }
+}
+
static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -1256,10 +1229,6 @@ mt7530_setup(struct dsa_switch *ds)
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
if (priv->id == ID_MT7530) {
- priv->ethernet = syscon_node_to_regmap(dn);
- if (IS_ERR(priv->ethernet))
- return PTR_ERR(priv->ethernet);
-
regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
ret = regulator_enable(priv->core_pwr);
if (ret < 0) {
@@ -1322,8 +1291,6 @@ mt7530_setup(struct dsa_switch *ds)
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
- mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
-
for (i = 0; i < MT7530_NUM_PORTS; i++) {
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -1333,6 +1300,10 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_cpu_port_enable(priv, i);
else
mt7530_port_disable(ds, i);
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
/* Setup port 5 */
@@ -1356,6 +1327,9 @@ mt7530_setup(struct dsa_switch *ds)
continue;
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
if (phy_node->parent == priv->dev->of_node->parent) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV)
@@ -1418,14 +1392,6 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
/* Setup TX circuit incluing relevant PAD and driving */
mt7530_pad_clk_setup(ds, state->interface);
- if (priv->id == ID_MT7530) {
- /* Setup RX circuit, relevant PAD and driving on the
- * host which must be placed after the setup on the
- * device side is all finished.
- */
- mt7623_pad_clk_setup(ds);
- }
-
priv->p6_interface = state->interface;
break;
default:
@@ -1441,8 +1407,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
- mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
- PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
+ mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
PMCR_BACKPR_EN | PMCR_FORCE_MODE;
@@ -1450,22 +1415,6 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
if (port == 5 && dsa_is_user_port(ds, 5))
mcr_new |= PMCR_EXT_PHY;
- switch (state->speed) {
- case SPEED_1000:
- mcr_new |= PMCR_FORCE_SPEED_1000;
- break;
- case SPEED_100:
- mcr_new |= PMCR_FORCE_SPEED_100;
- break;
- }
- if (state->duplex == DUPLEX_FULL) {
- mcr_new |= PMCR_FORCE_FDX;
- if (state->pause & MLO_PAUSE_TX)
- mcr_new |= PMCR_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- mcr_new |= PMCR_RX_FC_EN;
- }
-
if (mcr_new != mcr_cur)
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
@@ -1476,17 +1425,38 @@ static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- mt7530_port_set_status(priv, port, 0);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct mt7530_priv *priv = ds->priv;
+ u32 mcr;
+
+ mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+
+ switch (speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
+ break;
+ }
+ if (duplex == DUPLEX_FULL) {
+ mcr |= PMCR_FORCE_FDX;
+ if (tx_pause)
+ mcr |= PMCR_TX_FC_EN;
+ if (rx_pause)
+ mcr |= PMCR_RX_FC_EN;
+ }
- mt7530_port_set_status(priv, port, 1);
+ mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
@@ -1611,6 +1581,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
+ .port_mirror_add = mt7530_port_mirror_add,
+ .port_mirror_del = mt7530_port_mirror_del,
.phylink_validate = mt7530_phylink_validate,
.phylink_mac_link_state = mt7530_phylink_mac_link_state,
.phylink_mac_config = mt7530_phylink_mac_config,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index ccb9da8cad0d..82af4d2d406e 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -31,11 +31,15 @@ enum {
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
#define UNM_FFP(x) (((x) & 0xff) << 16)
+#define UNM_FFP_MASK UNM_FFP(~0)
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
#define CPU_EN BIT(7)
#define CPU_PORT(x) ((x) << 4)
#define CPU_MASK (0xf << 4)
+#define MIRROR_EN BIT(3)
+#define MIRROR_PORT(x) ((x) & 0x7)
+#define MIRROR_MASK 0x7
/* Registers for address table access */
#define MT7530_ATA1 0x74
@@ -141,6 +145,8 @@ enum mt7530_stp_state {
/* Register for port control */
#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100))
+#define PORT_TX_MIR BIT(9)
+#define PORT_RX_MIR BIT(8)
#define PORT_VLAN(x) ((x) & 0x3)
enum mt7530_port_mode {
@@ -167,9 +173,16 @@ enum mt7530_port_mode {
/* Register for port vlan control */
#define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100))
#define PORT_SPEC_TAG BIT(5)
+#define PVC_EG_TAG(x) (((x) & 0x7) << 8)
+#define PVC_EG_TAG_MASK PVC_EG_TAG(7)
#define VLAN_ATTR(x) (((x) & 0x3) << 6)
#define VLAN_ATTR_MASK VLAN_ATTR(3)
+enum mt7530_vlan_port_eg_tag {
+ MT7530_VLAN_EG_DISABLED = 0,
+ MT7530_VLAN_EG_CONSISTENT = 1,
+};
+
enum mt7530_vlan_port_attr {
MT7530_VLAN_USER = 0,
MT7530_VLAN_TRANSPARENT = 3,
@@ -201,6 +214,10 @@ enum mt7530_vlan_port_attr {
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
PMCR_FORCE_SPEED_1000)
+#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
+ PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
#define PMSR_EEE1G BIT(7)
@@ -268,7 +285,6 @@ enum mt7530_vlan_port_attr {
/* Registers for TRGMII on the both side */
#define MT7530_TRGMII_RCK_CTRL 0x7a00
-#define GSW_TRGMII_RCK_CTRL 0x300
#define RX_RST BIT(31)
#define RXC_DQSISEL BIT(30)
#define DQSI1_TAP_MASK (0x7f << 8)
@@ -277,31 +293,24 @@ enum mt7530_vlan_port_attr {
#define DQSI0_TAP(x) ((x) & 0x7f)
#define MT7530_TRGMII_RCK_RTT 0x7a04
-#define GSW_TRGMII_RCK_RTT 0x304
#define DQS1_GATE BIT(31)
#define DQS0_GATE BIT(30)
#define MT7530_TRGMII_RD(x) (0x7a10 + (x) * 8)
-#define GSW_TRGMII_RD(x) (0x310 + (x) * 8)
#define BSLIP_EN BIT(31)
#define EDGE_CHK BIT(30)
#define RD_TAP_MASK 0x7f
#define RD_TAP(x) ((x) & 0x7f)
-#define GSW_TRGMII_TXCTRL 0x340
#define MT7530_TRGMII_TXCTRL 0x7a40
#define TRAIN_TXEN BIT(31)
#define TXC_INV BIT(30)
#define TX_RST BIT(28)
#define MT7530_TRGMII_TD_ODT(i) (0x7a54 + 8 * (i))
-#define GSW_TRGMII_TD_ODT(i) (0x354 + 8 * (i))
#define TD_DM_DRVP(x) ((x) & 0xf)
#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
-#define GSW_INTF_MODE 0x390
-#define INTF_MODE_TRGMII BIT(1)
-
#define MT7530_TRGMII_TCK_CTRL 0x7a78
#define TCK_TAP(x) (((x) & 0xf) << 8)
@@ -434,7 +443,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
* @ds: The pointer to the dsa core structure
* @bus: The bus used for the device and built-in PHY
* @rstc: The pointer to reset control used by MCM
- * @ethernet: The regmap used for access TRGMII-based registers
* @core_pwr: The power supplied into the core
* @io_pwr: The power supplied into the I/O
* @reset: The descriptor for GPIO line tied to its reset pin
@@ -451,7 +459,6 @@ struct mt7530_priv {
struct dsa_switch *ds;
struct mii_bus *bus;
struct reset_control *rstc;
- struct regmap *ethernet;
struct regulator *core_pwr;
struct regulator *io_pwr;
struct gpio_desc *reset;
@@ -460,6 +467,8 @@ struct mt7530_priv {
phy_interface_t p6_interface;
phy_interface_t p5_interface;
unsigned int p5_intf_sel;
+ u8 mirror_rx;
+ u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 6435020d690d..51185e4d7d15 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -24,8 +24,8 @@ config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx"
default n
depends on NET_DSA_MV88E6XXX_GLOBAL2
+ depends on PTP_1588_CLOCK
imply NETWORK_PHY_TIMESTAMPING
- imply PTP_1588_CLOCK
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 2f993e673ec7..2b4a723c8306 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -397,32 +397,35 @@ static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
mv88e6xxx_reg_unlock(chip);
}
-int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
- int speed, int duplex, int pause,
- phy_interface_t mode)
+static int mv88e6xxx_port_config_interface(struct mv88e6xxx_chip *chip,
+ int port, phy_interface_t interface)
{
- struct phylink_link_state state;
int err;
- if (!chip->info->ops->port_set_link)
- return 0;
+ if (chip->info->ops->port_set_rgmii_delay) {
+ err = chip->info->ops->port_set_rgmii_delay(chip, port,
+ interface);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
- if (!chip->info->ops->port_link_state)
- return 0;
+ if (chip->info->ops->port_set_cmode) {
+ err = chip->info->ops->port_set_cmode(chip, port,
+ interface);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
- err = chip->info->ops->port_link_state(chip, port, &state);
- if (err)
- return err;
+ return 0;
+}
- /* Has anything actually changed? We don't expect the
- * interface mode to change without one of the other
- * parameters also changing
- */
- if (state.link == link &&
- state.speed == speed &&
- state.duplex == duplex &&
- (state.interface == mode ||
- state.interface == PHY_INTERFACE_MODE_NA))
+static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
+ int link, int speed, int duplex, int pause,
+ phy_interface_t mode)
+{
+ int err;
+
+ if (!chip->info->ops->port_set_link)
return 0;
/* Port's MAC control must not be changed unless the link is down */
@@ -430,8 +433,9 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
if (err)
return err;
- if (chip->info->ops->port_set_speed) {
- err = chip->info->ops->port_set_speed(chip, port, speed);
+ if (chip->info->ops->port_set_speed_duplex) {
+ err = chip->info->ops->port_set_speed_duplex(chip, port,
+ speed, duplex);
if (err && err != -EOPNOTSUPP)
goto restore_link;
}
@@ -445,25 +449,7 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
goto restore_link;
}
- if (chip->info->ops->port_set_duplex) {
- err = chip->info->ops->port_set_duplex(chip, port, duplex);
- if (err && err != -EOPNOTSUPP)
- goto restore_link;
- }
-
- if (chip->info->ops->port_set_rgmii_delay) {
- err = chip->info->ops->port_set_rgmii_delay(chip, port, mode);
- if (err && err != -EOPNOTSUPP)
- goto restore_link;
- }
-
- if (chip->info->ops->port_set_cmode) {
- err = chip->info->ops->port_set_cmode(chip, port, mode);
- if (err && err != -EOPNOTSUPP)
- goto restore_link;
- }
-
- err = 0;
+ err = mv88e6xxx_port_config_interface(chip, port, mode);
restore_link:
if (chip->info->ops->port_set_link(chip, port, link))
dev_err(chip->dev, "p%d: failed to restore MAC's link\n", port);
@@ -478,6 +464,97 @@ static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
return port < chip->info->num_internal_phys;
}
+static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev,
+ "p%d: %s: failed to read port status\n",
+ port, __func__);
+ return err;
+ }
+
+ return !!(reg & MV88E6XXX_PORT_STS_PHY_DETECT);
+}
+
+static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u8 lane;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane && chip->info->ops->serdes_pcs_get_state)
+ err = chip->info->ops->serdes_pcs_get_state(chip, port, lane,
+ state);
+ else
+ err = -EOPNOTSUPP;
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ u8 lane;
+
+ if (ops->serdes_pcs_config) {
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane)
+ return ops->serdes_pcs_config(chip, port, lane, mode,
+ interface, advertise);
+ }
+
+ return 0;
+}
+
+static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+ u8 lane;
+
+ ops = chip->info->ops;
+
+ if (ops->serdes_pcs_an_restart) {
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane)
+ err = ops->serdes_pcs_an_restart(chip, port, lane);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ dev_err(ds->dev, "p%d: failed to restart AN\n", port);
+ }
+}
+
+static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ unsigned int mode,
+ int speed, int duplex)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ u8 lane;
+
+ if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) {
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane)
+ return ops->serdes_pcs_link_up(chip, port, lane,
+ speed, duplex);
+ }
+
+ return 0;
+}
+
static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
unsigned long *mask,
struct phylink_link_state *state)
@@ -582,83 +659,107 @@ static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
phylink_helper_basex_speed(state);
}
-static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- if (chip->info->ops->port_link_state)
- err = chip->info->ops->port_link_state(chip, port, state);
- else
- err = -EOPNOTSUPP;
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int speed, duplex, link, pause, err;
+ int err;
+ /* FIXME: is this the correct test? If we're in fixed mode on an
+ * internal port, why should we process this any different from
+ * PHY mode? On the other hand, the port may be automedia between
+ * an internal PHY and the serdes...
+ */
if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
return;
- if (mode == MLO_AN_FIXED) {
- link = LINK_FORCED_UP;
- speed = state->speed;
- duplex = state->duplex;
- } else if (!mv88e6xxx_phy_is_internal(ds, port)) {
- link = state->link;
- speed = state->speed;
- duplex = state->duplex;
- } else {
- speed = SPEED_UNFORCED;
- duplex = DUPLEX_UNFORCED;
- link = LINK_UNFORCED;
- }
- pause = !!phylink_test(state->advertising, Pause);
-
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause,
- state->interface);
+ /* FIXME: should we force the link down here - but if we do, how
+ * do we restore the link force/unforce state? The driver layering
+ * gets in the way.
+ */
+ err = mv88e6xxx_port_config_interface(chip, port, state->interface);
+ if (err && err != -EOPNOTSUPP)
+ goto err_unlock;
+
+ err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
+ state->advertising);
+ /* FIXME: we should restart negotiation if something changed - which
+ * is something we get if we convert to using phylinks PCS operations.
+ */
+ if (err > 0)
+ err = 0;
+
+err_unlock:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
+ dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
}
-static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link)
+static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+
+ ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- err = chip->info->ops->port_set_link(chip, port, link);
+ if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+ mode == MLO_AN_FIXED) && ops->port_set_link)
+ err = ops->port_set_link(chip, port, LINK_FORCED_DOWN);
mv88e6xxx_reg_unlock(chip);
if (err)
- dev_err(chip->dev, "p%d: failed to force MAC link\n", port);
-}
-
-static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
-{
- if (mode == MLO_AN_FIXED)
- mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN);
+ dev_err(chip->dev,
+ "p%d: failed to force MAC link down\n", port);
}
static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- if (mode == MLO_AN_FIXED)
- mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+
+ ops = chip->info->ops;
+
+ mv88e6xxx_reg_lock(chip);
+ if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) {
+ /* FIXME: for an automedia port, should we force the link
+ * down here - what if the link comes up due to "other" media
+ * while we're bringing the port up, how is the exclusivity
+ * handled in the Marvell hardware? E.g. port 2 on 88E6390
+ * shared between internal PHY and Serdes.
+ */
+ err = mv88e6xxx_serdes_pcs_link_up(chip, port, mode, speed,
+ duplex);
+ if (err)
+ goto error;
+
+ if (ops->port_set_speed_duplex) {
+ err = ops->port_set_speed_duplex(chip, port,
+ speed, duplex);
+ if (err && err != -EOPNOTSUPP)
+ goto error;
+ }
+
+ if (ops->port_set_link)
+ err = ops->port_set_link(chip, port, LINK_FORCED_UP);
+ }
+error:
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err && err != -EOPNOTSUPP)
+ dev_err(ds->dev,
+ "p%d: failed to configure MAC link up\n", port);
}
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
@@ -1018,7 +1119,14 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
- return 32 * sizeof(u16);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int len;
+
+ len = 32 * sizeof(u16);
+ if (chip->info->ops->serdes_get_regs_len)
+ len += chip->info->ops->serdes_get_regs_len(chip, port);
+
+ return len;
}
static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
@@ -1043,6 +1151,9 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
p[i] = reg;
}
+ if (chip->info->ops->serdes_get_regs)
+ chip->info->ops->serdes_get_regs(chip, port, &p[i]);
+
mv88e6xxx_reg_unlock(chip);
}
@@ -1785,7 +1896,7 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
}
static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
- u16 vid, u8 member)
+ u16 vid, u8 member, bool warn)
{
const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
struct mv88e6xxx_vtu_entry vlan;
@@ -1830,7 +1941,7 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err)
return err;
- } else {
+ } else if (warn) {
dev_info(chip->dev, "p%d: already a member of VLAN %d\n",
port, vid);
}
@@ -1844,6 +1955,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ bool warn;
u8 member;
u16 vid;
@@ -1857,10 +1969,15 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
else
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
+ /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port
+ * and then the CPU port. Do not warn for duplicates for the CPU port.
+ */
+ warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port);
+
mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (mv88e6xxx_port_vlan_join(chip, port, vid, member))
+ if (mv88e6xxx_port_vlan_join(chip, port, vid, member, warn))
dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
vid, untagged ? 'u' : 't');
@@ -3272,8 +3389,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3282,7 +3398,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3312,12 +3427,10 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
- .port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3343,8 +3456,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3354,7 +3466,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3383,13 +3494,11 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3418,8 +3527,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
@@ -3429,7 +3537,6 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_set_pause = mv88e6185_port_set_pause,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3461,9 +3568,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6341_port_set_speed,
+ .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3474,7 +3580,6 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3493,6 +3598,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
@@ -3509,8 +3619,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3520,7 +3629,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3552,11 +3660,9 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.phy_read = mv88e6165_phy_read,
.phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3588,9 +3694,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3600,7 +3705,6 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3632,9 +3736,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3645,7 +3748,6 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3665,7 +3767,13 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -3679,9 +3787,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3691,7 +3798,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3723,9 +3829,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3736,7 +3841,6 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3756,10 +3860,16 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_irq_enable = mv88e6352_serdes_irq_enable,
.serdes_irq_status = mv88e6352_serdes_irq_status,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -3772,14 +3882,12 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_set_pause = mv88e6185_port_set_pause,
- .port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3810,9 +3918,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -3822,7 +3929,6 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3844,12 +3950,18 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
- .phylink_validate = mv88e6390_phylink_validate,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -3864,9 +3976,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390x_port_set_speed,
+ .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -3876,7 +3987,6 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3898,12 +4008,18 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
- .phylink_validate = mv88e6390_phylink_validate,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3918,9 +4034,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3929,7 +4044,6 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3951,12 +4065,18 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
- .phylink_validate = mv88e6390_phylink_validate,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_validate = mv88e6390_phylink_validate,
@@ -3973,9 +4093,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3986,7 +4105,6 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4006,10 +4124,16 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_irq_enable = mv88e6352_serdes_irq_enable,
.serdes_irq_status = mv88e6352_serdes_irq_status,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4027,9 +4151,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6250_port_set_speed,
+ .port_set_speed_duplex = mv88e6250_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4037,7 +4160,6 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6250_port_link_state,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6250_stats_get_sset_count,
@@ -4066,9 +4188,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -4078,7 +4199,6 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4100,12 +4220,18 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
- .phylink_validate = mv88e6390_phylink_validate,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4123,8 +4249,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4134,7 +4259,6 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4167,8 +4291,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4178,7 +4301,6 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4209,9 +4331,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6341_port_set_speed,
+ .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4222,7 +4343,6 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4241,6 +4361,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
@@ -4259,9 +4384,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4271,7 +4395,6 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4301,9 +4424,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4313,7 +4435,6 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4347,9 +4468,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4360,7 +4480,6 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4380,6 +4499,10 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_irq_enable = mv88e6352_serdes_irq_enable,
@@ -4390,6 +4513,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
.serdes_get_strings = mv88e6352_serdes_get_strings,
.serdes_get_stats = mv88e6352_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -4403,9 +4528,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -4417,7 +4541,6 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4439,6 +4562,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
@@ -4448,6 +4576,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -4461,9 +4591,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390x_port_set_speed,
+ .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -4475,7 +4604,6 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4497,12 +4625,18 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5373,8 +5507,9 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.setup = mv88e6xxx_setup,
.teardown = mv88e6xxx_teardown,
.phylink_validate = mv88e6xxx_validate,
- .phylink_mac_link_state = mv88e6xxx_link_state,
+ .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
+ .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
.phylink_mac_link_down = mv88e6xxx_mac_link_down,
.phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 79cad5e751c6..e5430cf2ad71 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -399,15 +399,6 @@ struct mv88e6xxx_ops {
*/
int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link);
-#define DUPLEX_UNFORCED -2
-
- /* Port's MAC duplex mode
- *
- * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
- * or DUPLEX_UNFORCED for normal duplex detection.
- */
- int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
-
#define PAUSE_ON 1
#define PAUSE_OFF 0
@@ -417,13 +408,18 @@ struct mv88e6xxx_ops {
#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
+#define DUPLEX_UNFORCED -2
- /* Port's MAC speed (in Mbps)
+ /* Port's MAC speed (in Mbps) and MAC duplex mode
*
* Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
* Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ *
+ * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
+ * or DUPLEX_UNFORCED for normal duplex detection.
*/
- int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
+ int (*port_set_speed_duplex)(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
/* What interface mode should be used for maximum speed? */
phy_interface_t (*port_max_speed_mode)(int port);
@@ -462,9 +458,6 @@ struct mv88e6xxx_ops {
*/
int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
- /* Return the port link state, as required by phylink */
- int (*port_link_state)(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
/* Snapshot the statistics for a port. The statistics can then
* be read back a leisure but still with a consistent view.
@@ -502,6 +495,17 @@ struct mv88e6xxx_ops {
/* SERDES lane mapping */
u8 (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
+ int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state);
+ int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+ int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
+ int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex);
+
/* SERDES interrupt handling */
unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip,
int port);
@@ -517,6 +521,11 @@ struct mv88e6xxx_ops {
int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+ /* SERDES registers for ethtool */
+ int (*serdes_get_regs_len)(struct mv88e6xxx_chip *chip, int port);
+ void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
+ void *_p);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
@@ -664,9 +673,6 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val);
int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
int bit, int val);
-int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
- int speed, int duplex, int pause,
- phy_interface_t mode);
struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip);
static inline void mv88e6xxx_reg_lock(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 0b43c650e100..8128dc607cf4 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -162,46 +162,9 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
return 0;
}
-int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
-{
- u16 reg;
- int err;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
- if (err)
- return err;
-
- reg &= ~(MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
- MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL);
-
- switch (dup) {
- case DUPLEX_HALF:
- reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
- break;
- case DUPLEX_FULL:
- reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
- MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
- break;
- case DUPLEX_UNFORCED:
- /* normal duplex detection */
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
- if (err)
- return err;
-
- dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
- reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
- reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
-
- return 0;
-}
-
-static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
- int speed, bool alt_bit, bool force_bit)
+static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
+ int port, int speed, bool alt_bit,
+ bool force_bit, int duplex)
{
u16 reg, ctrl;
int err;
@@ -239,11 +202,29 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
return -EOPNOTSUPP;
}
+ switch (duplex) {
+ case DUPLEX_HALF:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
+ break;
+ case DUPLEX_FULL:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
+ break;
+ case DUPLEX_UNFORCED:
+ /* normal duplex detection */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
if (err)
return err;
- reg &= ~MV88E6XXX_PORT_MAC_CTL_SPEED_MASK;
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK |
+ MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL);
+
if (alt_bit)
reg &= ~MV88E6390_PORT_MAC_CTL_ALTSPEED;
if (force_bit) {
@@ -261,12 +242,16 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
+ dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
return 0;
}
/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
-int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 200;
@@ -275,11 +260,13 @@ int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return -EOPNOTSUPP;
/* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
- return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
}
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
-int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 1000;
@@ -287,11 +274,13 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
}
/* Support 10, 100 Mbps (e.g. 88E6250 family) */
-int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 100;
@@ -299,11 +288,13 @@ int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed > 100)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
}
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
-int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = port < 5 ? 1000 : 2500;
@@ -317,7 +308,8 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 2500 && port < 5)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, !port, true,
+ duplex);
}
phy_interface_t mv88e6341_port_max_speed_mode(int port)
@@ -329,7 +321,8 @@ phy_interface_t mv88e6341_port_max_speed_mode(int port)
}
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
-int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 1000;
@@ -340,11 +333,13 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 200 && port < 5)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, true, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, false,
+ duplex);
}
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6390) */
-int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = port < 9 ? 1000 : 2500;
@@ -358,7 +353,8 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 2500 && port < 9)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
+ duplex);
}
phy_interface_t mv88e6390_port_max_speed_mode(int port)
@@ -370,7 +366,8 @@ phy_interface_t mv88e6390_port_max_speed_mode(int port)
}
/* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
-int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = port < 9 ? 1000 : 10000;
@@ -381,7 +378,8 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed >= 2500 && port < 9)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
+ duplex);
}
phy_interface_t mv88e6390x_port_max_speed_mode(int port)
@@ -586,183 +584,6 @@ int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
return 0;
}
-int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state)
-{
- int err;
- u16 reg;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
- if (err)
- return err;
-
- if (port < 5) {
- switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
- case MV88E6250_PORT_STS_PORTMODE_PHY_10_HALF:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_FULL;
- break;
- case MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_FULL;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- state->duplex = DUPLEX_UNKNOWN;
- break;
- }
- } else {
- switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
- case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_FULL;
- break;
- case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_FULL;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- state->duplex = DUPLEX_UNKNOWN;
- break;
- }
- }
-
- state->link = !!(reg & MV88E6250_PORT_STS_LINK);
- state->an_enabled = 1;
- state->an_complete = state->link;
- state->interface = PHY_INTERFACE_MODE_NA;
-
- return 0;
-}
-
-int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state)
-{
- int err;
- u16 reg;
-
- switch (chip->ports[port].cmode) {
- case MV88E6XXX_PORT_STS_CMODE_RGMII:
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL,
- &reg);
- if (err)
- return err;
-
- if ((reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK) &&
- (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK))
- state->interface = PHY_INTERFACE_MODE_RGMII_ID;
- else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK)
- state->interface = PHY_INTERFACE_MODE_RGMII_RXID;
- else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK)
- state->interface = PHY_INTERFACE_MODE_RGMII_TXID;
- else
- state->interface = PHY_INTERFACE_MODE_RGMII;
- break;
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- state->interface = PHY_INTERFACE_MODE_1000BASEX;
- break;
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- state->interface = PHY_INTERFACE_MODE_SGMII;
- break;
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- state->interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- state->interface = PHY_INTERFACE_MODE_XAUI;
- break;
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- state->interface = PHY_INTERFACE_MODE_RXAUI;
- break;
- default:
- /* we do not support other cmode values here */
- state->interface = PHY_INTERFACE_MODE_NA;
- }
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
- if (err)
- return err;
-
- switch (reg & MV88E6XXX_PORT_STS_SPEED_MASK) {
- case MV88E6XXX_PORT_STS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case MV88E6XXX_PORT_STS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case MV88E6XXX_PORT_STS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- case MV88E6XXX_PORT_STS_SPEED_10000:
- if ((reg & MV88E6XXX_PORT_STS_CMODE_MASK) ==
- MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- state->speed = SPEED_2500;
- else
- state->speed = SPEED_10000;
- break;
- }
-
- state->duplex = reg & MV88E6XXX_PORT_STS_DUPLEX ?
- DUPLEX_FULL : DUPLEX_HALF;
- state->link = !!(reg & MV88E6XXX_PORT_STS_LINK);
- state->an_enabled = 1;
- state->an_complete = state->link;
-
- return 0;
-}
-
-int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state)
-{
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
- u8 cmode = chip->ports[port].cmode;
-
- /* When a port is in "Cross-chip serdes" mode, it uses
- * 1000Base-X full duplex mode, but there is no automatic
- * link detection. Use the sync OK status for link (as it
- * would do for 1000Base-X mode.)
- */
- if (cmode == MV88E6185_PORT_STS_CMODE_SERDES) {
- u16 mac;
- int err;
-
- err = mv88e6xxx_port_read(chip, port,
- MV88E6XXX_PORT_MAC_CTL, &mac);
- if (err)
- return err;
-
- state->link = !!(mac & MV88E6185_PORT_MAC_CTL_SYNC_OK);
- state->an_enabled = 1;
- state->an_complete =
- !!(mac & MV88E6185_PORT_MAC_CTL_AN_DONE);
- state->duplex =
- state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
- state->speed =
- state->link ? SPEED_1000 : SPEED_UNKNOWN;
-
- return 0;
- }
- }
-
- return mv88e6352_port_link_state(chip, port, state);
-}
-
/* Offset 0x02: Jamming Control
*
* Do not limit the period of time that this port can be paused for by
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 0ec4327c2b42..44d76ac973f6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -298,15 +298,20 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
-int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
-
-int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
phy_interface_t mv88e6341_port_max_speed_mode(int port);
phy_interface_t mv88e6390_port_max_speed_mode(int port);
@@ -359,12 +364,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
-int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
-int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
-int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 8d8b3b74aee1..2098f19b534d 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -49,6 +49,52 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
}
+static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
+ u16 status, u16 lpa,
+ struct phylink_link_state *state)
+{
+ if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
+ state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->duplex = status &
+ MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ if (status & MV88E6390_SGMII_PHY_STATUS_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+ if (status & MV88E6390_SGMII_PHY_STATUS_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+
+ switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(chip->dev, "invalid PHY speed\n");
+ return -EINVAL;
+ }
+ } else {
+ state->link = false;
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ else if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+
+ return 0;
+}
+
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool up)
{
@@ -70,6 +116,120 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
return err;
}
+int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
+{
+ u16 adv, bmcr, val;
+ bool changed;
+ int err;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ adv = 0x0001;
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+ break;
+
+ default:
+ return 0;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_ADVERTISE, &val);
+ if (err)
+ return err;
+
+ changed = val != adv;
+ if (changed) {
+ err = mv88e6352_serdes_write(chip, MII_ADVERTISE, adv);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (phylink_autoneg_inband(mode))
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ if (bmcr == val)
+ return changed;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
+}
+
+int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state)
+{
+ u16 lpa, status;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, 0x11, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_LPA, &lpa);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
+ return err;
+ }
+
+ return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+}
+
+int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
+{
+ u16 bmcr;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &bmcr);
+ if (err)
+ return err;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr | BMCR_ANRESTART);
+}
+
+int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex)
+{
+ u16 val, bmcr;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
+ switch (speed) {
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (bmcr == val)
+ return 0;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
+}
+
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
@@ -180,26 +340,17 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
{
- struct dsa_switch *ds = chip->ds;
- u16 status;
- bool up;
+ u16 bmsr;
int err;
- err = mv88e6352_serdes_read(chip, MII_BMSR, &status);
- if (err)
- return;
-
- /* Status must be read twice in order to give the current link
- * status. Otherwise the change in link status since the last
- * read of the register is returned.
- */
- err = mv88e6352_serdes_read(chip, MII_BMSR, &status);
- if (err)
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
return;
+ }
- up = status & BMSR_LSTATUS;
-
- dsa_port_phylink_mac_change(ds, port, up);
+ dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
}
irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
@@ -237,6 +388,29 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ);
}
+int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
+{
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return 0;
+
+ return 32 * sizeof(u16);
+}
+
+void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
+{
+ u16 *p = _p;
+ u16 reg;
+ int i;
+
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return;
+
+ for (i = 0 ; i < 32; i++) {
+ mv88e6352_serdes_read(chip, i, &reg);
+ p[i] = reg;
+ }
+}
+
u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
@@ -387,20 +561,18 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane,
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_CONTROL, &val);
+ MV88E6390_SGMII_BMCR, &val);
if (err)
return err;
if (up)
- new_val = val & ~(MV88E6390_SGMII_CONTROL_RESET |
- MV88E6390_SGMII_CONTROL_LOOPBACK |
- MV88E6390_SGMII_CONTROL_PDOWN);
+ new_val = val & ~(BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN);
else
- new_val = val | MV88E6390_SGMII_CONTROL_PDOWN;
+ new_val = val | BMCR_PDOWN;
if (val != new_val)
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_CONTROL, new_val);
+ MV88E6390_SGMII_BMCR, new_val);
return err;
}
@@ -517,71 +689,153 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
return err;
}
-static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
+int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
{
- u8 cmode = chip->ports[port].cmode;
- struct dsa_switch *ds = chip->ds;
- int duplex = DUPLEX_UNKNOWN;
- int speed = SPEED_UNKNOWN;
- phy_interface_t mode;
- int link, err;
- u16 status;
+ u16 val, bmcr, adv;
+ bool changed;
+ int err;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ adv = 0x0001;
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ break;
+
+ default:
+ return 0;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_ADVERTISE, &val);
+ if (err)
+ return err;
+
+ changed = val != adv;
+ if (changed) {
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_ADVERTISE, adv);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (phylink_autoneg_inband(mode))
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ /* setting ANENABLE triggers a restart of negotiation */
+ if (bmcr == val)
+ return changed;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, bmcr);
+}
+
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state)
+{
+ u16 lpa, status;
+ int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6390_SGMII_PHY_STATUS, &status);
if (err) {
- dev_err(chip->dev, "can't read SGMII PHY status: %d\n", err);
- return;
+ dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
+ return err;
}
- link = status & MV88E6390_SGMII_PHY_STATUS_LINK ?
- LINK_FORCED_UP : LINK_FORCED_DOWN;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_LPA, &lpa);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
+ return err;
+ }
- if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
- duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
- DUPLEX_FULL : DUPLEX_HALF;
+ return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+}
- switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
- case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
- if (cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- speed = SPEED_2500;
- else
- speed = SPEED_1000;
- break;
- case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
- speed = SPEED_100;
- break;
- case MV88E6390_SGMII_PHY_STATUS_SPEED_10:
- speed = SPEED_10;
- break;
- default:
- dev_err(chip->dev, "invalid PHY speed\n");
- return;
- }
- }
+int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
+{
+ u16 bmcr;
+ int err;
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- mode = PHY_INTERFACE_MODE_SGMII;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &bmcr);
+ if (err)
+ return err;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR,
+ bmcr | BMCR_ANRESTART);
+}
+
+int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex)
+{
+ u16 val, bmcr;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
break;
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- mode = PHY_INTERFACE_MODE_1000BASEX;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
break;
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- mode = PHY_INTERFACE_MODE_2500BASEX;
+ case SPEED_10:
break;
- default:
- mode = PHY_INTERFACE_MODE_NA;
}
- err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex,
- PAUSE_OFF, mode);
- if (err)
- dev_err(chip->dev, "can't propagate PHY settings to MAC: %d\n",
- err);
- else
- dsa_port_phylink_mac_change(ds, port, link == LINK_FORCED_UP);
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (bmcr == val)
+ return 0;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, bmcr);
+}
+
+static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ u16 bmsr;
+ int err;
+
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
}
static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
@@ -652,3 +906,57 @@ unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, port);
}
+
+static const u16 mv88e6390_serdes_regs[] = {
+ /* SERDES common registers */
+ 0xf00a, 0xf00b, 0xf00c,
+ 0xf010, 0xf011, 0xf012, 0xf013,
+ 0xf016, 0xf017, 0xf018,
+ 0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f,
+ 0xf020, 0xf021, 0xf022, 0xf023, 0xf024, 0xf025, 0xf026, 0xf027,
+ 0xf028, 0xf029,
+ 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
+ 0xf038, 0xf039,
+ /* SGMII */
+ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
+ 0x2008,
+ 0x200f,
+ 0xa000, 0xa001, 0xa002, 0xa003,
+ /* 10Gbase-X */
+ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
+ 0x1008,
+ 0x100e, 0x100f,
+ 0x1018, 0x1019,
+ 0x9000, 0x9001, 0x9002, 0x9003, 0x9004,
+ 0x9006,
+ 0x9010, 0x9011, 0x9012, 0x9013, 0x9014, 0x9015, 0x9016,
+ /* 10Gbase-R */
+ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
+ 0x1028, 0x1029, 0x102a, 0x102b,
+};
+
+int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6xxx_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ return ARRAY_SIZE(mv88e6390_serdes_regs) * sizeof(u16);
+}
+
+void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
+{
+ u16 *p = _p;
+ int lane;
+ u16 reg;
+ int i;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane == 0)
+ return;
+
+ for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) {
+ mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ mv88e6390_serdes_regs[i], &reg);
+ p[i] = reg;
+ }
+}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index d16ef4da20b0..7990cadba4c2 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -47,14 +47,10 @@
#define MV88E6390_PCS_CONTROL_1_PDOWN BIT(11)
/* 1000BASE-X and SGMII */
-#define MV88E6390_SGMII_CONTROL 0x2000
-#define MV88E6390_SGMII_CONTROL_RESET BIT(15)
-#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
-#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
-#define MV88E6390_SGMII_STATUS 0x2001
-#define MV88E6390_SGMII_STATUS_AN_DONE BIT(5)
-#define MV88E6390_SGMII_STATUS_REMOTE_FAULT BIT(4)
-#define MV88E6390_SGMII_STATUS_LINK BIT(2)
+#define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR)
+#define MV88E6390_SGMII_BMSR (0x2000 + MII_BMSR)
+#define MV88E6390_SGMII_ADVERTISE (0x2000 + MII_ADVERTISE)
+#define MV88E6390_SGMII_LPA (0x2000 + MII_LPA)
#define MV88E6390_SGMII_INT_ENABLE 0xa001
#define MV88E6390_SGMII_INT_SPEED_CHANGE BIT(14)
#define MV88E6390_SGMII_INT_DUPLEX_CHANGE BIT(13)
@@ -73,6 +69,8 @@
#define MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL BIT(13)
#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
+#define MV88E6390_SGMII_PHY_STATUS_TX_PAUSE BIT(3)
+#define MV88E6390_SGMII_PHY_STATUS_RX_PAUSE BIT(2)
/* Packet generator pad packet checker */
#define MV88E6390_PG_CONTROL 0xf010
@@ -82,6 +80,26 @@ u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state);
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state);
+int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
+int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
+int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex);
+int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex);
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
@@ -109,6 +127,11 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+
/* Return the (first) SERDES lane address a port is using, 0 otherwise. */
static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3257962c147e..e113269c220a 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -2,6 +2,7 @@
/* Copyright 2019 NXP Semiconductors
*/
#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
@@ -12,6 +13,7 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
@@ -44,11 +46,8 @@ static int felix_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct ocelot *ocelot = ds->priv;
- bool vlan_aware;
- vlan_aware = dsa_port_is_vlan_filtering(dsa_to_port(ds, port));
-
- return ocelot_fdb_add(ocelot, port, addr, vid, vlan_aware);
+ return ocelot_fdb_add(ocelot, port, addr, vid);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
@@ -103,13 +102,17 @@ static void felix_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
+ u16 flags = vlan->flags;
u16 vid;
int err;
+ if (dsa_is_cpu_port(ds, port))
+ flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
+
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
err = ocelot_vlan_add(ocelot, port, vid,
- vlan->flags & BRIDGE_VLAN_INFO_PVID,
- vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
if (err) {
dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
vid, port, err);
@@ -176,8 +179,7 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 1000baseT_Full);
- /* The internal ports that run at 2.5G are overclocked GMII */
- if (state->interface == PHY_INTERFACE_MODE_GMII ||
+ if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
state->interface == PHY_INTERFACE_MODE_2500BASEX ||
state->interface == PHY_INTERFACE_MODE_USXGMII) {
phylink_set(mask, 2500baseT_Full);
@@ -264,7 +266,9 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int link_an_mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -388,6 +392,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
resource_size_t switch_base;
+ struct resource res;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -400,6 +405,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_stats = felix->info->num_stats;
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+ ocelot->num_mact_rows = felix->info->num_mact_rows;
+ ocelot->vcap_is2_keys = felix->info->vcap_is2_keys;
+ ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
+ ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
@@ -418,17 +427,16 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (i = 0; i < TARGET_MAX; i++) {
struct regmap *target;
- struct resource *res;
if (!felix->info->target_io_res[i].name)
continue;
- res = &felix->info->target_io_res[i];
- res->flags = IORESOURCE_MEM;
- res->start += switch_base;
- res->end += switch_base;
+ memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start += switch_base;
+ res.end += switch_base;
- target = ocelot_regmap_init(ocelot, res);
+ target = ocelot_regmap_init(ocelot, &res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space\n");
@@ -449,7 +457,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
void __iomem *port_regs;
- struct resource *res;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -461,12 +468,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- res = &felix->info->port_io_res[port];
- res->flags = IORESOURCE_MEM;
- res->start += switch_base;
- res->end += switch_base;
+ memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start += switch_base;
+ res.end += switch_base;
- port_regs = devm_ioremap_resource(ocelot->dev, res);
+ port_regs = devm_ioremap_resource(ocelot->dev, &res);
if (IS_ERR(port_regs)) {
dev_err(ocelot->dev,
"failed to map registers for port %d\n", port);
@@ -511,12 +518,23 @@ static int felix_setup(struct dsa_switch *ds)
for (port = 0; port < ds->num_ports; port++) {
ocelot_init_port(ocelot, port);
+ /* Bring up the CPU port module and configure the NPI port */
if (dsa_is_cpu_port(ds, port))
- ocelot_set_cpu_port(ocelot, port,
- OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_LONG);
+ ocelot_configure_cpu(ocelot, port,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_LONG);
}
+ /* Include the CPU port module in the forwarding mask for unknown
+ * unicast - the hardware default value for ANA_FLOODING_FLD_UNICAST
+ * excludes BIT(ocelot->num_phys_ports), and so does ocelot_init, since
+ * Ocelot relies on whitelisting MAC addresses towards PGID_CPU.
+ */
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_UC);
+
+ ds->mtu_enforcement_ingress = true;
/* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
* isn't instantiated for the Felix PF.
* In-band AN may take a few ms to complete, so we need to poll.
@@ -594,6 +612,67 @@ static bool felix_txtstamp(struct dsa_switch *ds, int port,
return false;
}
+static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_set_maxlen(ocelot, port, new_mtu);
+
+ return 0;
+}
+
+static int felix_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_max_mtu(ocelot, port);
+}
+
+static int felix_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+}
+
+static int felix_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
+}
+
+static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
+}
+
+static int felix_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_policer pol = {
+ .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
+ .burst = div_u64(policer->rate_bytes_per_sec *
+ PSCHED_NS2TICKS(policer->burst),
+ PSCHED_TICKS_PER_SEC),
+ };
+
+ return ocelot_port_policer_add(ocelot, port, &pol);
+}
+
+static void felix_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_policer_del(ocelot, port);
+}
+
static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.setup = felix_setup,
@@ -625,6 +704,13 @@ static const struct dsa_switch_ops felix_switch_ops = {
.port_hwtstamp_set = felix_hwtstamp_set,
.port_rxtstamp = felix_rxtstamp,
.port_txtstamp = felix_txtstamp,
+ .port_change_mtu = felix_change_mtu,
+ .port_max_mtu = felix_get_max_mtu,
+ .port_policer_add = felix_port_policer_add,
+ .port_policer_del = felix_port_policer_del,
+ .cls_flower_add = felix_cls_flower_add,
+ .cls_flower_del = felix_cls_flower_del,
+ .cls_flower_stats = felix_cls_flower_stats,
};
static struct felix_info *felix_instance_tbl[] = {
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 3a7580015b62..730a8a90e1f7 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -8,16 +8,20 @@
/* Platform-specific information */
struct felix_info {
- struct resource *target_io_res;
- struct resource *port_io_res;
- struct resource *imdio_res;
+ const struct resource *target_io_res;
+ const struct resource *port_io_res;
+ const struct resource *imdio_res;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
int shared_queue_sz;
+ int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
+ struct vcap_field *vcap_is2_keys;
+ struct vcap_field *vcap_is2_actions;
+ const struct vcap_props *vcap;
int switch_pci_bar;
int imdio_pci_bar;
int (*mdio_bus_alloc)(struct ocelot *ocelot);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2c812b481778..5211f05ef2fb 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -3,12 +3,17 @@
* Copyright 2018-2019 NXP Semiconductors
*/
#include <linux/fsl/enetc_mdio.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
+#define VSC9959_VCAP_IS2_CNT 1024
+#define VSC9959_VCAP_IS2_ENTRY_WIDTH 376
+#define VSC9959_VCAP_PORT_CNT 6
+
/* TODO: should find a better place for these */
#define USXGMII_BMCR_RESET BIT(15)
#define USXGMII_BMCR_AN_EN BIT(12)
@@ -328,10 +333,8 @@ static const u32 *vsc9959_regmap[] = {
[GCB] = vsc9959_gcb_regmap,
};
-/* Addresses are relative to the PCI device's base address and
- * will be fixed up at ioremap time.
- */
-static struct resource vsc9959_target_io_res[] = {
+/* Addresses are relative to the PCI device's base address */
+static const struct resource vsc9959_target_io_res[] = {
[ANA] = {
.start = 0x0280000,
.end = 0x028ffff,
@@ -374,7 +377,7 @@ static struct resource vsc9959_target_io_res[] = {
},
};
-static struct resource vsc9959_port_io_res[] = {
+static const struct resource vsc9959_port_io_res[] = {
{
.start = 0x0100000,
.end = 0x010ffff,
@@ -410,7 +413,7 @@ static struct resource vsc9959_port_io_res[] = {
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static struct resource vsc9959_imdio_res = {
+static const struct resource vsc9959_imdio_res = {
.start = 0x8030,
.end = 0x8040,
.name = "imdio",
@@ -547,6 +550,129 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
{ .offset = 0x111, .name = "drop_green_prio_7", },
};
+struct vcap_field vsc9959_vcap_is2_keys[] = {
+ /* Common: 41 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 7},
+ [VCAP_IS2_HK_RSV2] = { 20, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 21, 1},
+ [VCAP_IS2_HK_L2_MC] = { 22, 1},
+ [VCAP_IS2_HK_L2_BC] = { 23, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 24, 1},
+ [VCAP_IS2_HK_VID] = { 25, 12},
+ [VCAP_IS2_HK_DEI] = { 37, 1},
+ [VCAP_IS2_HK_PCP] = { 38, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 41, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 89, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {137, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {153, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {169, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {177, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {137, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {137, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 41, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 89, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 90, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 91, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 92, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 93, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 95, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 97, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {129, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {161, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 41, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 42, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 43, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 44, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 45, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 46, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 54, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 86, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {119, 1},
+ [VCAP_IS2_HK_L4_SPORT] = {120, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {136, 16},
+ [VCAP_IS2_HK_L4_RNG] = {152, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1},
+ [VCAP_IS2_HK_L4_URG] = {162, 1},
+ [VCAP_IS2_HK_L4_ACK] = {163, 1},
+ [VCAP_IS2_HK_L4_PSH] = {164, 1},
+ [VCAP_IS2_HK_L4_RST] = {165, 1},
+ [VCAP_IS2_HK_L4_SYN] = {166, 1},
+ [VCAP_IS2_HK_L4_FIN] = {167, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {168, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {176, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {119, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {127, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 41, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 42, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {170, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {137, 7},
+ [VCAP_IS2_HK_OAM_VER] = {144, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {149, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {157, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {165, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {181, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {182, 1},
+};
+
+struct vcap_field vsc9959_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS2_ACT_RSV] = { 41, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
+};
+
+static const struct vcap_props vsc9959_vcap_props[] = {
+ [VCAP_IS2] = {
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VSC9959_VCAP_IS2_CNT,
+ .entry_width = VSC9959_VCAP_IS2_ENTRY_WIDTH,
+ .action_count = VSC9959_VCAP_IS2_CNT +
+ VSC9959_VCAP_PORT_CNT + 2,
+ .action_width = 89,
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 44,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = 4,
+ .counter_width = 32,
+ },
+};
+
#define VSC9959_INIT_TIMEOUT 50000
#define VSC9959_GCB_RST_SLEEP 100
#define VSC9959_SYS_RAMINIT_SLEEP 80
@@ -955,8 +1081,7 @@ static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
phy_interface_t phy_mode)
{
switch (phy_mode) {
- case PHY_INTERFACE_MODE_GMII:
- /* Only supported on internal to-CPU ports */
+ case PHY_INTERFACE_MODE_INTERNAL:
if (port != 4 && port != 5)
return -ENOTSUPP;
return 0;
@@ -984,7 +1109,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
struct device *dev = ocelot->dev;
resource_size_t imdio_base;
void __iomem *imdio_regs;
- struct resource *res;
+ struct resource res;
struct enetc_hw *hw;
struct mii_bus *bus;
int port;
@@ -1001,12 +1126,12 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
imdio_base = pci_resource_start(felix->pdev,
felix->info->imdio_pci_bar);
- res = felix->info->imdio_res;
- res->flags = IORESOURCE_MEM;
- res->start += imdio_base;
- res->end += imdio_base;
+ memcpy(&res, felix->info->imdio_res, sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start += imdio_base;
+ res.end += imdio_base;
- imdio_regs = devm_ioremap_resource(dev, res);
+ imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs)) {
dev_err(dev, "failed to map internal MDIO registers\n");
return PTR_ERR(imdio_regs);
@@ -1089,7 +1214,11 @@ struct felix_info felix_info_vsc9959 = {
.ops = &vsc9959_ops,
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
+ .vcap_is2_keys = vsc9959_vcap_is2_keys,
+ .vcap_is2_actions = vsc9959_vcap_is2_actions,
+ .vcap = vsc9959_vcap_props,
.shared_queue_sz = 128 * 1024,
+ .num_mact_rows = 2048,
.num_ports = 6,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index de25f99e995a..7c86056b9401 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -458,7 +458,9 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
struct regmap *regmap = priv->regmap;
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index 0fe1ae173aa1..68c3086af9af 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -20,6 +20,7 @@ tristate "NXP SJA1105 Ethernet switch family support"
config NET_DSA_SJA1105_PTP
bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
depends on NET_DSA_SJA1105
+ depends on PTP_1588_CLOCK
help
This enables support for timestamping and PTP clock manipulations in
the SJA1105 DSA driver.
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index 66161e874344..8943d8d66f2b 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
sja1105-objs := \
sja1105_spi.o \
sja1105_main.o \
+ sja1105_flower.o \
sja1105_ethtool.o \
sja1105_clocking.o \
sja1105_static_config.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index d801fc204d19..8b60dbd567f2 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -19,6 +19,7 @@
* The passed parameter is in multiples of 1 ms.
*/
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+#define SJA1105_NUM_L2_POLICERS 45
typedef enum {
SPI_READ = 0,
@@ -36,11 +37,15 @@ struct sja1105_regs {
u64 port_control;
u64 rgu;
u64 config;
+ u64 sgmii;
u64 rmii_pll1;
+ u64 ptppinst;
+ u64 ptppindur;
u64 ptp_control;
u64 ptpclkval;
u64 ptpclkrate;
u64 ptpclkcorp;
+ u64 ptpsyncts;
u64 ptpschtm;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
@@ -56,6 +61,7 @@ struct sja1105_regs {
u64 mac[SJA1105_NUM_PORTS];
u64 mac_hl1[SJA1105_NUM_PORTS];
u64 mac_hl2[SJA1105_NUM_PORTS];
+ u64 ether_stats[SJA1105_NUM_PORTS];
u64 qlevel[SJA1105_NUM_PORTS];
};
@@ -90,6 +96,36 @@ struct sja1105_info {
const char *name;
};
+enum sja1105_rule_type {
+ SJA1105_RULE_BCAST_POLICER,
+ SJA1105_RULE_TC_POLICER,
+};
+
+struct sja1105_rule {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned long port_mask;
+ enum sja1105_rule_type type;
+
+ union {
+ /* SJA1105_RULE_BCAST_POLICER */
+ struct {
+ int sharindx;
+ } bcast_pol;
+
+ /* SJA1105_RULE_TC_POLICER */
+ struct {
+ int sharindx;
+ int tc;
+ } tc_pol;
+ };
+};
+
+struct sja1105_flow_block {
+ struct list_head rules;
+ bool l2_policer_used[SJA1105_NUM_L2_POLICERS];
+};
+
struct sja1105_private {
struct sja1105_static_config static_config;
bool rgmii_rx_delay[SJA1105_NUM_PORTS];
@@ -98,6 +134,7 @@ struct sja1105_private {
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
+ struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
@@ -122,6 +159,7 @@ enum sja1105_reset_reason {
SJA1105_RX_HWTSTAMPING,
SJA1105_AGEING_TIME,
SJA1105_SCHEDULING,
+ SJA1105_BEST_EFFORT_POLICING,
};
int sja1105_static_config_reload(struct sja1105_private *priv,
@@ -159,6 +197,7 @@ typedef enum {
XMII_MODE_MII = 0,
XMII_MODE_RMII = 1,
XMII_MODE_RGMII = 2,
+ XMII_MODE_SGMII = 3,
} sja1105_phy_interface_t;
typedef enum {
@@ -212,5 +251,15 @@ size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
+/* From sja1105_flower.c */
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+void sja1105_flower_setup(struct dsa_switch *ds);
+void sja1105_flower_teardown(struct dsa_switch *ds);
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 9082e52b55e9..0fdc2d55fff6 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -660,6 +660,10 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
case XMII_MODE_RGMII:
rc = sja1105_rgmii_clocking_setup(priv, port, role);
break;
+ case XMII_MODE_SGMII:
+ /* Nothing to do in the CGU for SGMII */
+ rc = 0;
+ break;
default:
dev_err(dev, "Invalid interface mode specified: %d\n",
phy_mode);
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 25381bd65ed7..bf9b36ff35bf 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -124,6 +124,9 @@
#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
+#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
+
#define SJA1105_MAX_DYN_CMD_SIZE \
SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
@@ -481,6 +484,18 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
return 0;
}
+static void
+sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+}
+
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
@@ -610,7 +625,14 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.addr = 0x38,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {0},
- [BLK_IDX_AVB_PARAMS] = {0},
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = 0x8003,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105et_general_params_entry_packing,
.cmd_packing = sja1105et_general_params_cmd_packing,
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index 064301cc7d5b..d742ffcbfce9 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -7,6 +7,7 @@
#define SJA1105_SIZE_HL1_AREA (0x10 * 4)
#define SJA1105_SIZE_HL2_AREA (0x4 * 4)
#define SJA1105_SIZE_QLEVEL_AREA (0x8 * 4) /* 0x4 to 0xB */
+#define SJA1105_SIZE_ETHER_AREA (0x17 * 4)
struct sja1105_port_status_mac {
u64 n_runt;
@@ -63,10 +64,37 @@ struct sja1105_port_status_hl2 {
u64 qlevel[8]; /* Only for P/Q/R/S */
};
+struct sja1105_port_status_ether {
+ u64 n_drops_nolearn;
+ u64 n_drops_noroute;
+ u64 n_drops_ill_dtag;
+ u64 n_drops_dtag;
+ u64 n_drops_sotag;
+ u64 n_drops_sitag;
+ u64 n_drops_utag;
+ u64 n_tx_bytes_1024_2047;
+ u64 n_tx_bytes_512_1023;
+ u64 n_tx_bytes_256_511;
+ u64 n_tx_bytes_128_255;
+ u64 n_tx_bytes_65_127;
+ u64 n_tx_bytes_64;
+ u64 n_tx_mcast;
+ u64 n_tx_bcast;
+ u64 n_rx_bytes_1024_2047;
+ u64 n_rx_bytes_512_1023;
+ u64 n_rx_bytes_256_511;
+ u64 n_rx_bytes_128_255;
+ u64 n_rx_bytes_65_127;
+ u64 n_rx_bytes_64;
+ u64 n_rx_mcast;
+ u64 n_rx_bcast;
+};
+
struct sja1105_port_status {
struct sja1105_port_status_mac mac;
struct sja1105_port_status_hl1 hl1;
struct sja1105_port_status_hl2 hl2;
+ struct sja1105_port_status_ether ether;
};
static void
@@ -158,6 +186,58 @@ sja1105pqrs_port_status_qlevel_unpack(void *buf,
}
}
+static void
+sja1105pqrs_port_status_ether_unpack(void *buf,
+ struct sja1105_port_status_ether *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x16, &status->n_drops_nolearn, 31, 0, 4);
+ sja1105_unpack(p + 0x15, &status->n_drops_noroute, 31, 0, 4);
+ sja1105_unpack(p + 0x14, &status->n_drops_ill_dtag, 31, 0, 4);
+ sja1105_unpack(p + 0x13, &status->n_drops_dtag, 31, 0, 4);
+ sja1105_unpack(p + 0x12, &status->n_drops_sotag, 31, 0, 4);
+ sja1105_unpack(p + 0x11, &status->n_drops_sitag, 31, 0, 4);
+ sja1105_unpack(p + 0x10, &status->n_drops_utag, 31, 0, 4);
+ sja1105_unpack(p + 0x0F, &status->n_tx_bytes_1024_2047, 31, 0, 4);
+ sja1105_unpack(p + 0x0E, &status->n_tx_bytes_512_1023, 31, 0, 4);
+ sja1105_unpack(p + 0x0D, &status->n_tx_bytes_256_511, 31, 0, 4);
+ sja1105_unpack(p + 0x0C, &status->n_tx_bytes_128_255, 31, 0, 4);
+ sja1105_unpack(p + 0x0B, &status->n_tx_bytes_65_127, 31, 0, 4);
+ sja1105_unpack(p + 0x0A, &status->n_tx_bytes_64, 31, 0, 4);
+ sja1105_unpack(p + 0x09, &status->n_tx_mcast, 31, 0, 4);
+ sja1105_unpack(p + 0x08, &status->n_tx_bcast, 31, 0, 4);
+ sja1105_unpack(p + 0x07, &status->n_rx_bytes_1024_2047, 31, 0, 4);
+ sja1105_unpack(p + 0x06, &status->n_rx_bytes_512_1023, 31, 0, 4);
+ sja1105_unpack(p + 0x05, &status->n_rx_bytes_256_511, 31, 0, 4);
+ sja1105_unpack(p + 0x04, &status->n_rx_bytes_128_255, 31, 0, 4);
+ sja1105_unpack(p + 0x03, &status->n_rx_bytes_65_127, 31, 0, 4);
+ sja1105_unpack(p + 0x02, &status->n_rx_bytes_64, 31, 0, 4);
+ sja1105_unpack(p + 0x01, &status->n_rx_mcast, 31, 0, 4);
+ sja1105_unpack(p + 0x00, &status->n_rx_bcast, 31, 0, 4);
+}
+
+static int
+sja1105pqrs_port_status_get_ether(struct sja1105_private *priv,
+ struct sja1105_port_status_ether *ether,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_ETHER_AREA] = {0};
+ int rc;
+
+ /* Ethernet statistics area */
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ether_stats[port],
+ packed_buf, SJA1105_SIZE_ETHER_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105pqrs_port_status_ether_unpack(packed_buf, ether);
+
+ return 0;
+}
+
static int sja1105_port_status_get_mac(struct sja1105_private *priv,
struct sja1105_port_status_mac *status,
int port)
@@ -241,7 +321,11 @@ static int sja1105_port_status_get(struct sja1105_private *priv,
if (rc < 0)
return rc;
- return 0;
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return 0;
+
+ return sja1105pqrs_port_status_get_ether(priv, &status->ether, port);
}
static char sja1105_port_stats[][ETH_GSTRING_LEN] = {
@@ -308,6 +392,30 @@ static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
"qlevel_5",
"qlevel_6",
"qlevel_7",
+ /* Ether Stats */
+ "n_drops_nolearn",
+ "n_drops_noroute",
+ "n_drops_ill_dtag",
+ "n_drops_dtag",
+ "n_drops_sotag",
+ "n_drops_sitag",
+ "n_drops_utag",
+ "n_tx_bytes_1024_2047",
+ "n_tx_bytes_512_1023",
+ "n_tx_bytes_256_511",
+ "n_tx_bytes_128_255",
+ "n_tx_bytes_65_127",
+ "n_tx_bytes_64",
+ "n_tx_mcast",
+ "n_tx_bcast",
+ "n_rx_bytes_1024_2047",
+ "n_rx_bytes_512_1023",
+ "n_rx_bytes_256_511",
+ "n_rx_bytes_128_255",
+ "n_rx_bytes_65_127",
+ "n_rx_bytes_64",
+ "n_rx_mcast",
+ "n_rx_bcast",
};
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
@@ -376,6 +484,29 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
data[k++] = status.hl2.qlevel_hwm[i];
data[k++] = status.hl2.qlevel[i];
}
+ data[k++] = status.ether.n_drops_nolearn;
+ data[k++] = status.ether.n_drops_noroute;
+ data[k++] = status.ether.n_drops_ill_dtag;
+ data[k++] = status.ether.n_drops_dtag;
+ data[k++] = status.ether.n_drops_sotag;
+ data[k++] = status.ether.n_drops_sitag;
+ data[k++] = status.ether.n_drops_utag;
+ data[k++] = status.ether.n_tx_bytes_1024_2047;
+ data[k++] = status.ether.n_tx_bytes_512_1023;
+ data[k++] = status.ether.n_tx_bytes_256_511;
+ data[k++] = status.ether.n_tx_bytes_128_255;
+ data[k++] = status.ether.n_tx_bytes_65_127;
+ data[k++] = status.ether.n_tx_bytes_64;
+ data[k++] = status.ether.n_tx_mcast;
+ data[k++] = status.ether.n_tx_bcast;
+ data[k++] = status.ether.n_rx_bytes_1024_2047;
+ data[k++] = status.ether.n_rx_bytes_512_1023;
+ data[k++] = status.ether.n_rx_bytes_256_511;
+ data[k++] = status.ether.n_rx_bytes_128_255;
+ data[k++] = status.ether.n_rx_bytes_65_127;
+ data[k++] = status.ether.n_rx_bytes_64;
+ data[k++] = status.ether.n_rx_mcast;
+ data[k++] = status.ether.n_rx_bcast;
}
void sja1105_get_strings(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
new file mode 100644
index 000000000000..5288a722e625
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020, NXP Semiconductors
+ */
+#include "sja1105.h"
+
+static struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie)
+{
+ struct sja1105_rule *rule;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list)
+ if (rule->cookie == cookie)
+ return rule;
+
+ return NULL;
+}
+
+static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
+ if (!priv->flow_block.l2_policer_used[i])
+ return i;
+
+ return -1;
+}
+
+static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_BCAST_POLICER;
+ rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ new_rule = true;
+ }
+
+ if (rule->bcast_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port already has a broadcast policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the broadcast policers of all ports attached to this block
+ * point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
+
+ policing[bcast].sharindx = rule->bcast_pol.sharindx;
+ }
+
+ policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->bcast_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
+ PSCHED_NS2TICKS(burst),
+ PSCHED_TICKS_PER_SEC);
+ /* TODO: support per-flow MTU */
+ policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_setup_tc_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port, int tc,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_TC_POLICER;
+ rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->tc_pol.tc = tc;
+ new_rule = true;
+ }
+
+ if (rule->tc_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port-TC pair already has an L2 policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the policers for traffic class @tc of all ports attached to
+ * this block point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ int index = (p * SJA1105_NUM_TC) + tc;
+
+ policing[index].sharindx = rule->tc_pol.sharindx;
+ }
+
+ policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->tc_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
+ PSCHED_NS2TICKS(burst),
+ PSCHED_TICKS_PER_SEC);
+ /* TODO: support per-flow MTU */
+ policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on protocol not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!ether_addr_equal_masked(match.key->src, null,
+ match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal_masked(match.key->dst, bcast,
+ match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only matching on broadcast DMAC is supported");
+ return -EOPNOTSUPP;
+ }
+
+ return sja1105_setup_bcast_policer(priv, extack, cls->cookie,
+ port, rate_bytes_per_sec,
+ burst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.key->vlan_id & match.mask->vlan_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on VID is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_priority != 0x7) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on PCP is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return sja1105_setup_tc_policer(priv, extack, cls->cookie, port,
+ match.key->vlan_priority,
+ rate_bytes_per_sec,
+ burst);
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
+ return -EOPNOTSUPP;
+}
+
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct sja1105_private *priv = ds->priv;
+ const struct flow_action_entry *act;
+ int rc = -EOPNOTSUPP, i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ rc = sja1105_flower_parse_policer(priv, port, extack, cls,
+ act->police.rate_bytes_ps,
+ act->police.burst);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ struct sja1105_l2_policing_entry *policing;
+ int old_sharindx;
+
+ if (!rule)
+ return 0;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (rule->type == SJA1105_RULE_BCAST_POLICER) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+
+ old_sharindx = policing[bcast].sharindx;
+ policing[bcast].sharindx = port;
+ } else if (rule->type == SJA1105_RULE_TC_POLICER) {
+ int index = (port * SJA1105_NUM_TC) + rule->tc_pol.tc;
+
+ old_sharindx = policing[index].sharindx;
+ policing[index].sharindx = port;
+ } else {
+ return -EINVAL;
+ }
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ priv->flow_block.l2_policer_used[old_sharindx] = false;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+void sja1105_flower_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ INIT_LIST_HEAD(&priv->flow_block.rules);
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++)
+ priv->flow_block.l2_policer_used[port] = true;
+}
+
+void sja1105_flower_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &priv->flow_block.rules) {
+ rule = list_entry(pos, struct sja1105_rule, list);
+ list_del(&rule->list);
+ kfree(rule);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 7edea5741a5f..472f4eb20c49 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -22,6 +22,7 @@
#include <linux/if_ether.h>
#include <linux/dsa/8021q.h>
#include "sja1105.h"
+#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
@@ -135,6 +136,21 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
return 0;
}
+static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
+{
+ if (priv->info->part_no != SJA1105R_PART_NO &&
+ priv->info->part_no != SJA1105S_PART_NO)
+ return false;
+
+ if (port != SJA1105_SGMII_PORT)
+ return false;
+
+ if (dsa_is_unused_port(priv->ds, port))
+ return false;
+
+ return true;
+}
+
static int sja1105_init_mii_settings(struct sja1105_private *priv,
struct sja1105_dt_port *ports)
{
@@ -162,6 +178,9 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
mii = table->entries;
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
switch (ports[i].phy_mode) {
case PHY_INTERFACE_MODE_MII:
mii->xmii_mode[i] = XMII_MODE_MII;
@@ -175,12 +194,24 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
case PHY_INTERFACE_MODE_RGMII_TXID:
mii->xmii_mode[i] = XMII_MODE_RGMII;
break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!sja1105_supports_sgmii(priv, i))
+ return -EINVAL;
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ break;
default:
dev_err(dev, "Unsupported PHY mode %s!\n",
phy_modes(ports[i].phy_mode));
}
- mii->phy_mac[i] = ports[i].role;
+ /* Even though the SerDes port is able to drive SGMII autoneg
+ * like a PHY would, from the perspective of the XMII tables,
+ * the SGMII port should always be put in MAC mode.
+ */
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
+ mii->phy_mac[i] = XMII_MAC;
+ else
+ mii->phy_mac[i] = ports[i].role;
}
return 0;
}
@@ -448,23 +479,93 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
return 0;
}
-#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
-
-static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
- int index)
+static int sja1105_init_avb_params(struct sja1105_private *priv)
{
- policing[index].sharindx = index;
- policing[index].smax = 65535; /* Burst size in bytes */
- policing[index].rate = SJA1105_RATE_MBPS(1000);
- policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
- policing[index].partition = 0;
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+
+ avb = table->entries;
+
+ /* Configure the MAC addresses for meta frames */
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+ /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
+ * default. This is because there might be boards with a hardware
+ * layout where enabling the pin as output might cause an electrical
+ * clash. On E/T the pin is always an output, which the board designers
+ * probably already knew, so even if there are going to be electrical
+ * issues, there's nothing we can do.
+ */
+ avb->cas_master = false;
+
+ return 0;
}
+/* The L2 policing table is 2-stage. The table is looked up for each frame
+ * according to the ingress port, whether it was broadcast or not, and the
+ * classified traffic class (given by VLAN PCP). This portion of the lookup is
+ * fixed, and gives access to the SHARINDX, an indirection register pointing
+ * within the policing table itself, which is used to resolve the policer that
+ * will be used for this frame.
+ *
+ * Stage 1 Stage 2
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 2: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 5: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 7: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 TC 7 |SHARINDX| ...
+ * +------------+--------+
+ * |Port 0 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * |Port 1 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * ... ...
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ *
+ * In this driver, we shall use policers 0-4 as statically alocated port
+ * (matchall) policers. So we need to make the SHARINDX for all lookups
+ * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
+ * lookup) equal.
+ * The remaining policers (40) shall be dynamically allocated for flower
+ * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
+ */
+#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
+
static int sja1105_init_l2_policing(struct sja1105_private *priv)
{
struct sja1105_l2_policing_entry *policing;
struct sja1105_table *table;
- int i, j, k;
+ int port, tc;
table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
@@ -483,18 +584,29 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
policing = table->entries;
- /* k sweeps through all unicast policers (0-39).
- * bcast sweeps through policers 40-44.
- */
- for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
+ /* Setup shared indices for the matchall policers */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
- for (j = 0; j < SJA1105_NUM_TC; j++, k++)
- sja1105_setup_policer(policing, k);
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ policing[port * SJA1105_NUM_TC + tc].sharindx = port;
- /* Set up this port's policer for broadcast traffic */
- sja1105_setup_policer(policing, bcast);
+ policing[bcast].sharindx = port;
}
+
+ /* Setup the matchall policer parameters */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(priv->ds, port))
+ mtu += VLAN_HLEN;
+
+ policing[port].smax = 65535; /* Burst size in bytes */
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].maxlen = mtu;
+ policing[port].partition = 0;
+ }
+
return 0;
}
@@ -538,6 +650,9 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
+ rc = sja1105_init_avb_params(priv);
+ if (rc < 0)
+ return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
@@ -647,6 +762,85 @@ static int sja1105_parse_dt(struct sja1105_private *priv,
return rc;
}
+static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 val;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ return val;
+}
+
+static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
+ u16 pcs_val)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 val = pcs_val;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ return val;
+}
+
+static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
+ bool an_enabled, bool an_master)
+{
+ u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
+
+ /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
+ * stop the clock during LPI mode, make the MAC reconfigure
+ * autonomously after PCS autoneg is done, flush the internal FIFOs.
+ */
+ sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
+ SJA1105_DC1_CLOCK_STOP_EN |
+ SJA1105_DC1_MAC_AUTO_SW |
+ SJA1105_DC1_INIT);
+ /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
+ sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
+ /* AUTONEG_CONTROL: Use SGMII autoneg */
+ if (an_master)
+ ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
+ sja1105_sgmii_write(priv, SJA1105_AC, ac);
+ /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
+ * sja1105_sgmii_pcs_force_speed must be called later for the link
+ * to become operational.
+ */
+ if (an_enabled)
+ sja1105_sgmii_write(priv, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
+ int speed)
+{
+ int pcs_speed;
+
+ switch (speed) {
+ case SPEED_1000:
+ pcs_speed = BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ pcs_speed = BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ pcs_speed = BMCR_SPEED10;
+ break;
+ default:
+ dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
+ return;
+ }
+ sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
+}
+
/* Convert link speed from SJA1105 to ethtool encoding */
static int sja1105_speed[] = {
[SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
@@ -704,8 +898,13 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
+ * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
+ * we need to configure the PCS only (if even that).
*/
- mac[port].speed = speed;
+ if (sja1105_supports_sgmii(priv, port))
+ mac[port].speed = SJA1105_SPEED_1000MBPS;
+ else
+ mac[port].speed = speed;
/* Write to the dynamic reconfiguration tables */
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
@@ -754,26 +953,34 @@ static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
return (phy_mode != XMII_MODE_RGMII);
+ case PHY_INTERFACE_MODE_SGMII:
+ return (phy_mode != XMII_MODE_SGMII);
default:
return true;
}
}
static void sja1105_mac_config(struct dsa_switch *ds, int port,
- unsigned int link_an_mode,
+ unsigned int mode,
const struct phylink_link_state *state)
{
struct sja1105_private *priv = ds->priv;
+ bool is_sgmii = sja1105_supports_sgmii(priv, port);
- if (sja1105_phy_mode_mismatch(priv, port, state->interface))
+ if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
+ dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
+ phy_modes(state->interface));
return;
+ }
- if (link_an_mode == MLO_AN_INBAND) {
+ if (phylink_autoneg_inband(mode) && !is_sgmii) {
dev_err(ds->dev, "In-band AN not supported!\n");
return;
}
- sja1105_adjust_port_config(priv, port, state->speed);
+ if (is_sgmii)
+ sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
+ false);
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -786,9 +993,18 @@ static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- sja1105_inhibit_tx(ds->priv, BIT(port), false);
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_adjust_port_config(priv, port, speed);
+
+ if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
+ sja1105_sgmii_pcs_force_speed(priv, speed);
+
+ sja1105_inhibit_tx(priv, BIT(port), false);
}
static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
@@ -822,7 +1038,9 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, MII);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Full);
- if (mii->xmii_mode[port] == XMII_MODE_RGMII)
+ phylink_set(mask, 100baseT1_Full);
+ if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
+ mii->xmii_mode[port] == XMII_MODE_SGMII)
phylink_set(mask, 1000baseT_Full);
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -830,6 +1048,38 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct sja1105_private *priv = ds->priv;
+ int ais;
+
+ /* Read the vendor-specific AUTONEG_INTR_STATUS register */
+ ais = sja1105_sgmii_read(priv, SJA1105_AIS);
+ if (ais < 0)
+ return ais;
+
+ switch (SJA1105_AIS_SPEED(ais)) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case 1:
+ state->speed = SPEED_100;
+ break;
+ case 2:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
+ SJA1105_AIS_SPEED(ais));
+ }
+ state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
+ state->an_complete = SJA1105_AIS_COMPLETE(ais);
+ state->link = SJA1105_AIS_LINK_STATUS(ais);
+
+ return 0;
+}
+
static int
sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
const struct sja1105_l2_lookup_entry *requested)
@@ -1338,6 +1588,7 @@ static const char * const sja1105_reset_reasons[] = {
[SJA1105_RX_HWTSTAMPING] = "RX timestamping",
[SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling",
+ [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
};
/* For situations where we need to change a setting at runtime that is only
@@ -1356,6 +1607,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
struct dsa_switch *ds = priv->ds;
s64 t1, t2, t3, t4;
s64 t12, t34;
+ u16 bmcr = 0;
int rc, i;
s64 now;
@@ -1373,6 +1625,9 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
mac[i].speed = SJA1105_SPEED_AUTO;
}
+ if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
+ bmcr = sja1105_sgmii_read(priv, MII_BMCR);
+
/* No PTP operations can run right now */
mutex_lock(&priv->ptp_data.lock);
@@ -1422,6 +1677,25 @@ out_unlock_ptp:
if (rc < 0)
goto out;
}
+
+ if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
+ bool an_enabled = !!(bmcr & BMCR_ANENABLE);
+
+ sja1105_sgmii_pcs_config(priv, an_enabled, false);
+
+ if (!an_enabled) {
+ int speed = SPEED_UNKNOWN;
+
+ if (bmcr & BMCR_SPEED1000)
+ speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ speed = SPEED_100;
+ else if (bmcr & BMCR_SPEED10)
+ speed = SPEED_10;
+
+ sja1105_sgmii_pcs_force_speed(priv, speed);
+ }
+ }
out:
mutex_unlock(&priv->mgmt_lock);
@@ -1723,6 +1997,8 @@ static int sja1105_setup(struct dsa_switch *ds)
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
+ ds->mtu_enforcement_ingress = true;
+
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time.
@@ -1745,6 +2021,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
kthread_destroy_worker(sp->xmit_worker);
}
+ sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
@@ -1891,6 +2168,31 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
}
+static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += VLAN_HLEN;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[port].maxlen == new_mtu)
+ return 0;
+
+ policing[port].maxlen = new_mtu;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1981,12 +2283,49 @@ static void sja1105_mirror_del(struct dsa_switch *ds, int port,
mirror->ingress, false);
}
+static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ /* In hardware, every 8 microseconds the credit level is incremented by
+ * the value of RATE bytes divided by 64, up to a maximum of SMAX
+ * bytes.
+ */
+ policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
+ 1000000);
+ policing[port].smax = div_u64(policer->rate_bytes_per_sec *
+ PSCHED_NS2TICKS(policer->burst),
+ PSCHED_TICKS_PER_SEC);
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].smax = 65535;
+
+ sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time,
+ .port_change_mtu = sja1105_change_mtu,
+ .port_max_mtu = sja1105_get_max_mtu,
.phylink_validate = sja1105_phylink_validate,
+ .phylink_mac_link_state = sja1105_mac_pcs_get_state,
.phylink_mac_config = sja1105_mac_config,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
@@ -2016,6 +2355,10 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_setup_tc = sja1105_port_setup_tc,
.port_mirror_add = sja1105_mirror_add,
.port_mirror_del = sja1105_mirror_del,
+ .port_policer_add = sja1105_port_policer_add,
+ .port_policer_del = sja1105_port_policer_del,
+ .cls_flower_add = sja1105_cls_flower_add,
+ .cls_flower_del = sja1105_cls_flower_del,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -2119,6 +2462,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->mgmt_lock);
sja1105_tas_setup(ds);
+ sja1105_flower_setup(ds);
rc = dsa_register_switch(priv->ds);
if (rc)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a836fc38c4a4..bc0e47c1dbb9 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -14,6 +14,18 @@
#define SJA1105_MAX_ADJ_PPB 32000000
#define SJA1105_SIZE_PTP_CMD 4
+/* PTPSYNCTS has no interrupt or update mechanism, because the intended
+ * hardware use case is for the timestamp to be collected synchronously,
+ * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
+ * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
+ * generic extts source, the PTPSYNCTS register needs polling and a comparison
+ * with the old value. The polling interval is configured as the Nyquist rate
+ * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
+ * this hardware can do (but may be enough for some setups). Anything of higher
+ * frequency than 1 Hz will be lost, since there is no timestamp FIFO.
+ */
+#define SJA1105_EXTTS_INTERVAL (HZ / 4)
+
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
* "fractional" part (actually fixed point).
@@ -39,44 +51,13 @@ enum sja1105_ptp_clk_mode {
PTP_SET_MODE = 0,
};
+#define extts_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, extts_work)
#define ptp_caps_to_data(d) \
container_of((d), struct sja1105_ptp_data, caps)
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-static int sja1105_init_avb_params(struct sja1105_private *priv,
- bool on)
-{
- struct sja1105_avb_params_entry *avb;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
-
- /* Discard previous AVB Parameters Table */
- if (table->entry_count) {
- kfree(table->entries);
- table->entry_count = 0;
- }
-
- /* Configure the reception of meta frames only if requested */
- if (!on)
- return 0;
-
- table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
-
- avb = table->entries;
-
- avb->destmeta = SJA1105_META_DMAC;
- avb->srcmeta = SJA1105_META_SMAC;
-
- return 0;
-}
-
/* Must be called only with priv->tagger_data.state bit
* SJA1105_HWTS_RX_EN cleared
*/
@@ -86,17 +67,12 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
- int rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
general_params->send_meta1 = on;
general_params->send_meta0 = on;
- rc = sja1105_init_avb_params(priv, on);
- if (rc < 0)
- return rc;
-
/* Initialize the meta state machine to a known state */
if (priv->tagger_data.stampable_skb) {
kfree_skb(priv->tagger_data.stampable_skb);
@@ -206,6 +182,8 @@ void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
sja1105_packing(buf, &valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
@@ -221,6 +199,8 @@ void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
sja1105_packing(buf, &valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
@@ -615,6 +595,236 @@ static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return rc;
}
+static void sja1105_ptp_extts_work(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct sja1105_ptp_data *ptp_data = extts_to_data(dw);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct ptp_clock_event event;
+ u64 ptpsyncts = 0;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
+ NULL);
+ if (rc < 0)
+ dev_err_ratelimited(priv->ds->dev,
+ "Failed to read PTPSYNCTS: %d\n", rc);
+
+ if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
+ ptp_clock_event(ptp_data->clock, &event);
+
+ ptp_data->ptpsyncts = ptpsyncts;
+ }
+
+ mutex_unlock(&ptp_data->lock);
+
+ schedule_delayed_work(&ptp_data->extts_work, SJA1105_EXTTS_INTERVAL);
+}
+
+static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv,
+ enum ptp_pin_function func)
+{
+ struct sja1105_avb_params_entry *avb;
+ enum ptp_pin_function old_func;
+
+ avb = priv->static_config.tables[BLK_IDX_AVB_PARAMS].entries;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID ||
+ avb->cas_master)
+ old_func = PTP_PF_PEROUT;
+ else
+ old_func = PTP_PF_EXTTS;
+
+ if (func == old_func)
+ return 0;
+
+ avb->cas_master = (func == PTP_PF_PEROUT);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_AVB_PARAMS, 0, avb,
+ true);
+}
+
+/* The PTP_CLK pin may be configured to toggle with a 50% duty cycle and a
+ * frequency f:
+ *
+ * NSEC_PER_SEC
+ * f = ----------------------
+ * (PTPPINDUR * 8 ns) * 2
+ */
+static int sja1105_per_out_enable(struct sja1105_private *priv,
+ struct ptp_perout_request *perout,
+ bool on)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ /* We only support one channel */
+ if (perout->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (perout->flags)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
+ if (rc)
+ goto out;
+
+ if (on) {
+ struct timespec64 pin_duration_ts = {
+ .tv_sec = perout->period.sec,
+ .tv_nsec = perout->period.nsec,
+ };
+ struct timespec64 pin_start_ts = {
+ .tv_sec = perout->start.sec,
+ .tv_nsec = perout->start.nsec,
+ };
+ u64 pin_duration = timespec64_to_ns(&pin_duration_ts);
+ u64 pin_start = timespec64_to_ns(&pin_start_ts);
+ u32 pin_duration32;
+ u64 now;
+
+ /* ptppindur: 32 bit register which holds the interval between
+ * 2 edges on PTP_CLK. So check for truncation which happens
+ * at periods larger than around 68.7 seconds.
+ */
+ pin_duration = ns_to_sja1105_ticks(pin_duration / 2);
+ if (pin_duration > U32_MAX) {
+ rc = -ERANGE;
+ goto out;
+ }
+ pin_duration32 = pin_duration;
+
+ /* ptppins: 64 bit register which needs to hold a PTP time
+ * larger than the current time, otherwise the startptpcp
+ * command won't do anything. So advance the current time
+ * by a number of periods in a way that won't alter the
+ * phase offset.
+ */
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, NULL);
+ if (rc < 0)
+ goto out;
+
+ pin_start = future_base_time(pin_start, pin_duration,
+ now + 1ull * NSEC_PER_SEC);
+ pin_start = ns_to_sja1105_ticks(pin_start);
+
+ rc = sja1105_xfer_u64(priv, SPI_WRITE, regs->ptppinst,
+ &pin_start, NULL);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptppindur,
+ &pin_duration32, NULL);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (on)
+ cmd.startptpcp = true;
+ else
+ cmd.stopptpcp = true;
+
+ rc = sja1105_ptp_commit(priv->ds, &cmd, SPI_WRITE);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_extts_enable(struct sja1105_private *priv,
+ struct ptp_extts_request *extts,
+ bool on)
+{
+ int rc;
+
+ /* We only support one channel */
+ if (extts->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (extts->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* We can only enable time stamping on both edges, sadly. */
+ if ((extts->flags & PTP_STRICT_FLAGS) &&
+ (extts->flags & PTP_ENABLE_FEATURE) &&
+ (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
+ if (rc)
+ return rc;
+
+ if (on)
+ schedule_delayed_work(&priv->ptp_data.extts_work,
+ SJA1105_EXTTS_INTERVAL);
+ else
+ cancel_delayed_work_sync(&priv->ptp_data.extts_work);
+
+ return 0;
+}
+
+static int sja1105_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc = -EOPNOTSUPP;
+
+ if (req->type == PTP_CLK_REQ_PEROUT)
+ rc = sja1105_per_out_enable(priv, &req->perout, on);
+ else if (req->type == PTP_CLK_REQ_EXTTS)
+ rc = sja1105_extts_enable(priv, &req->extts, on);
+
+ return rc;
+}
+
+static int sja1105_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+
+ if (chan != 0 || pin != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static struct ptp_pin_desc sja1105_ptp_pin = {
+ .name = "ptp_clk",
+ .index = 0,
+ .func = PTP_PF_NONE,
+};
+
int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
@@ -628,8 +838,14 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.adjtime = sja1105_ptp_adjtime,
.gettimex64 = sja1105_ptp_gettimex,
.settime64 = sja1105_ptp_settime,
+ .enable = sja1105_ptp_enable,
+ .verify = sja1105_ptp_verify_pin,
.do_aux_work = sja1105_rxtstamp_work,
.max_adj = SJA1105_MAX_ADJ_PPB,
+ .pin_config = &sja1105_ptp_pin,
+ .n_pins = 1,
+ .n_ext_ts = 1,
+ .n_per_out = 1,
};
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
@@ -642,6 +858,8 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
ptp_data->cmd.corrclk4ts = true;
ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+ INIT_DELAYED_WORK(&ptp_data->extts_work, sja1105_ptp_extts_work);
+
return sja1105_ptp_reset(ds);
}
@@ -653,6 +871,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
+ cancel_delayed_work_sync(&ptp_data->extts_work);
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 6f4a19eec709..43480b24f1f0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -21,7 +21,36 @@ static inline s64 sja1105_ticks_to_ns(s64 ticks)
return ticks * SJA1105_TICK_NS;
}
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static inline s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
struct sja1105_ptp_cmd {
+ u64 startptpcp; /* start toggling PTP_CLK pin */
+ u64 stopptpcp; /* stop toggling PTP_CLK pin */
u64 ptpstrtsch; /* start schedule */
u64 ptpstopsch; /* stop schedule */
u64 resptp; /* reset */
@@ -30,12 +59,14 @@ struct sja1105_ptp_cmd {
};
struct sja1105_ptp_data {
+ struct delayed_work extts_work;
struct sk_buff_head skb_rxtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
/* Serializes all operations on the PTP hardware clock */
struct mutex lock;
+ u64 ptpsyncts;
};
int sja1105_ptp_clock_register(struct dsa_switch *ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_sgmii.h b/drivers/net/dsa/sja1105/sja1105_sgmii.h
new file mode 100644
index 000000000000..24d9bc046e70
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_sgmii.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright 2020, NXP Semiconductors
+ */
+#ifndef _SJA1105_SGMII_H
+#define _SJA1105_SGMII_H
+
+#define SJA1105_SGMII_PORT 4
+
+/* DIGITAL_CONTROL_1 (address 1f8000h) */
+#define SJA1105_DC1 0x8000
+#define SJA1105_DC1_VS_RESET BIT(15)
+#define SJA1105_DC1_REMOTE_LOOPBACK BIT(14)
+#define SJA1105_DC1_EN_VSMMD1 BIT(13)
+#define SJA1105_DC1_POWER_SAVE BIT(11)
+#define SJA1105_DC1_CLOCK_STOP_EN BIT(10)
+#define SJA1105_DC1_MAC_AUTO_SW BIT(9)
+#define SJA1105_DC1_INIT BIT(8)
+#define SJA1105_DC1_TX_DISABLE BIT(4)
+#define SJA1105_DC1_AUTONEG_TIMER_OVRR BIT(3)
+#define SJA1105_DC1_BYP_POWERUP BIT(1)
+#define SJA1105_DC1_PHY_MODE_CONTROL BIT(0)
+
+/* DIGITAL_CONTROL_2 register (address 1f80E1h) */
+#define SJA1105_DC2 0x80e1
+#define SJA1105_DC2_TX_POL_INV_DISABLE BIT(4)
+#define SJA1105_DC2_RX_POL_INV BIT(0)
+
+/* DIGITAL_ERROR_CNT register (address 1f80E2h) */
+#define SJA1105_DEC 0x80e2
+#define SJA1105_DEC_ICG_EC_ENA BIT(4)
+#define SJA1105_DEC_CLEAR_ON_READ BIT(0)
+
+/* AUTONEG_CONTROL register (address 1f8001h) */
+#define SJA1105_AC 0x8001
+#define SJA1105_AC_MII_CONTROL BIT(8)
+#define SJA1105_AC_SGMII_LINK BIT(4)
+#define SJA1105_AC_PHY_MODE BIT(3)
+#define SJA1105_AC_AUTONEG_MODE(x) (((x) << 1) & GENMASK(2, 1))
+#define SJA1105_AC_AUTONEG_MODE_SGMII SJA1105_AC_AUTONEG_MODE(2)
+
+/* AUTONEG_INTR_STATUS register (address 1f8002h) */
+#define SJA1105_AIS 0x8002
+#define SJA1105_AIS_LINK_STATUS(x) (!!((x) & BIT(4)))
+#define SJA1105_AIS_SPEED(x) (((x) & GENMASK(3, 2)) >> 2)
+#define SJA1105_AIS_DUPLEX_MODE(x) (!!((x) & BIT(1)))
+#define SJA1105_AIS_COMPLETE(x) (!!((x) & BIT(0)))
+
+/* DEBUG_CONTROL register (address 1f8005h) */
+#define SJA1105_DC 0x8005
+#define SJA1105_DC_SUPPRESS_LOS BIT(4)
+#define SJA1105_DC_RESTART_SYNC BIT(0)
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 29b127f3bf9c..04bdb72ae6b6 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -458,6 +458,8 @@ static struct sja1105_regs sja1105et_regs = {
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
.ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
+ .ptppinst = 0x14,
+ .ptppindur = 0x16,
.ptp_control = 0x17,
.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
@@ -474,11 +476,13 @@ static struct sja1105_regs sja1105pqrs_regs = {
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
+ .sgmii = 0x1F0000,
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ .ether_stats = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
/* UM11040.pdf, Table 114 */
.mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
.mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
@@ -490,10 +494,13 @@ static struct sja1105_regs sja1105pqrs_regs = {
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
.ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
+ .ptppinst = 0x15,
+ .ptppindur = 0x17,
.ptp_control = 0x18,
.ptpclkval = 0x19,
.ptpclkrate = 0x1B,
.ptpclkcorp = 0x1E,
+ .ptpsyncts = 0x1F,
};
struct sja1105_info sja1105e_info = {
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 63d2311817c4..bbfe034910a0 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -102,12 +102,13 @@ static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
-static size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op)
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
struct sja1105_avb_params_entry *entry = entry_ptr;
+ sja1105_packing(buf, &entry->cas_master, 126, 126, size, op);
sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
return size;
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index f4a5c5c04311..8afafb6aef12 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -230,6 +230,7 @@ struct sja1105_l2_policing_entry {
};
struct sja1105_avb_params_entry {
+ u64 cas_master;
u64 destmeta;
u64 srcmeta;
};
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index fa6750d973d7..77e547b4cd89 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -28,33 +28,6 @@ static s64 sja1105_delta_to_ns(s64 delta)
return delta * 200;
}
-/* Calculate the first base_time in the future that satisfies this
- * relationship:
- *
- * future_base_time = base_time + N x cycle_time >= now, or
- *
- * now - base_time
- * N >= ---------------
- * cycle_time
- *
- * Because N is an integer, the ceiling value of the above "a / b" ratio
- * is in fact precisely the floor value of "(a + b - 1) / b", which is
- * easier to calculate only having integer division tools.
- */
-static s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
-{
- s64 a, b, n;
-
- if (base_time >= now)
- return base_time;
-
- a = now - base_time;
- b = cycle_time;
- n = div_s64(a + b - 1, b);
-
- return base_time + n * cycle_time;
-}
-
static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 6e21a2a5cf01..19ce4aa0973b 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -664,16 +664,6 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
VSC73XX_MAC_CFG_TX_EN |
VSC73XX_MAC_CFG_RX_EN);
- /* Max length, we can do up to 9.6 KiB, so allow that.
- * According to application not "VSC7398 Jumbo Frames" setting
- * up the MTU to 9.6 KB does not affect the performance on standard
- * frames, so just enable it. It is clear from the application note
- * that "9.6 kilobytes" == 9600 bytes.
- */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
- port,
- VSC73XX_MAXLEN, 9600);
-
/* Flow control for the CPU port:
* Use a zero delay pause frame when pause condition is left
* Obey pause control frames
@@ -1030,6 +1020,24 @@ static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
}
}
+static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAXLEN, new_mtu);
+}
+
+/* According to application not "VSC7398 Jumbo Frames" setting
+ * up the MTU to 9.6 KB does not affect the performance on standard
+ * frames. It is clear from the application note that
+ * "9.6 kilobytes" == 9600 bytes.
+ */
+static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 9600;
+}
+
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
@@ -1041,6 +1049,8 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable,
+ .port_change_mtu = vsc73xx_change_mtu,
+ .port_max_mtu = vsc73xx_get_max_mtu,
};
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 3031a5fc5427..bab3a9bb5e6f 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -42,7 +42,6 @@
#include <linux/u64_stats_sync.h>
#define DRV_NAME "dummy"
-#define DRV_VERSION "1.0"
static int numdummies = 1;
@@ -104,7 +103,6 @@ static void dummy_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops dummy_ethtool_ops = {
@@ -212,4 +210,3 @@ module_init(dummy_init_module);
module_exit(dummy_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 8cafd06ff0c4..b762176a1406 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -60,8 +60,6 @@
*/
#define DRV_NAME "3c509"
-#define DRV_VERSION "1.20"
-#define DRV_RELDATE "04Feb2008"
/* A few values that may be tweaked. */
@@ -87,13 +85,12 @@
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/bitops.h>
+#include <linux/vermagic.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
-static char version[] = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
-
#ifdef EL3_DEBUG
static int el3_debug = EL3_DEBUG;
#else
@@ -547,8 +544,6 @@ static int el3_common_init(struct net_device *dev)
dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)],
dev->dev_addr, dev->irq);
- if (el3_debug > 0)
- pr_info("%s", version);
return 0;
}
@@ -1143,7 +1138,6 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1e233e2f0a5a..90312fcd6319 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -22,12 +22,8 @@
*/
+#include <linux/vermagic.h>
#define DRV_NAME "3c515"
-#define DRV_VERSION "0.99t-ac"
-#define DRV_RELDATE "28-Oct-2002"
-
-static char *version =
-DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n";
#define CORKSCREW 1
@@ -84,7 +80,6 @@ static int max_interrupt_work = 20;
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
/* "Knobs" for adjusting internal parameters. */
/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
@@ -418,8 +413,6 @@ int init_module(void)
int found = 0;
if (debug >= 0)
corkscrew_debug = debug;
- if (corkscrew_debug)
- pr_debug("%s", version);
while (corkscrew_scan(-1))
found++;
return found ? 0 : -ENODEV;
@@ -429,16 +422,10 @@ int init_module(void)
struct net_device *tc515_probe(int unit)
{
struct net_device *dev = corkscrew_scan(unit);
- static int printed;
if (!dev)
return ERR_PTR(-ENODEV);
- if (corkscrew_debug > 0 && !printed) {
- printed = 1;
- pr_debug("%s", version);
- }
-
return dev;
}
#endif /* not MODULE */
@@ -1540,7 +1527,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index d47cde6c5f08..09816e84314d 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -23,7 +23,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "3c589_cs"
-#define DRV_VERSION "1.162-ac"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -482,7 +481,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 14fce6658106..5ed33c2c4742 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -127,7 +127,6 @@ static const int multicast_filter_limit = 32;
#include "typhoon.h"
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
@@ -311,7 +310,7 @@ enum state_values {
* cannot pass a read, so this forces current writes to post.
*/
#define typhoon_post_pci_writes(x) \
- do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
+ do { if (likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while (0)
/* We'll wait up to six seconds for a reset, and half a second normally.
*/
@@ -381,7 +380,7 @@ typhoon_reset(void __iomem *ioaddr, int wait_type)
int i, err = 0;
int timeout;
- if(wait_type == WaitNoSleep)
+ if (wait_type == WaitNoSleep)
timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
else
timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
@@ -394,13 +393,13 @@ typhoon_reset(void __iomem *ioaddr, int wait_type)
udelay(1);
iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
- if(wait_type != NoWait) {
- for(i = 0; i < timeout; i++) {
- if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
+ if (wait_type != NoWait) {
+ for (i = 0; i < timeout; i++) {
+ if (ioread32(ioaddr + TYPHOON_REG_STATUS) ==
TYPHOON_STATUS_WAITING_FOR_HOST)
goto out;
- if(wait_type == WaitSleep)
+ if (wait_type == WaitSleep)
schedule_timeout_uninterruptible(1);
else
udelay(TYPHOON_UDELAY);
@@ -423,7 +422,7 @@ out:
* which should be enough (I've see it work well at 100us, but still
* saw occasional problems.)
*/
- if(wait_type == WaitSleep)
+ if (wait_type == WaitSleep)
msleep(5);
else
udelay(500);
@@ -435,8 +434,8 @@ typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
{
int i, err = 0;
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
- if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if (ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
goto out;
udelay(TYPHOON_UDELAY);
}
@@ -450,7 +449,7 @@ out:
static inline void
typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
{
- if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
+ if (resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
@@ -466,7 +465,7 @@ typhoon_hello(struct typhoon *tp)
* card in a long while. If the lock is held, then we're in the
* process of issuing a command, so we don't need to respond.
*/
- if(spin_trylock(&tp->command_lock)) {
+ if (spin_trylock(&tp->command_lock)) {
cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
typhoon_inc_cmd_index(&ring->lastWrite, 1);
@@ -490,32 +489,32 @@ typhoon_process_response(struct typhoon *tp, int resp_size,
cleared = le32_to_cpu(indexes->respCleared);
ready = le32_to_cpu(indexes->respReady);
- while(cleared != ready) {
+ while (cleared != ready) {
resp = (struct resp_desc *)(base + cleared);
count = resp->numDesc + 1;
- if(resp_save && resp->seqNo) {
- if(count > resp_size) {
+ if (resp_save && resp->seqNo) {
+ if (count > resp_size) {
resp_save->flags = TYPHOON_RESP_ERROR;
goto cleanup;
}
wrap_len = 0;
len = count * sizeof(*resp);
- if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
+ if (unlikely(cleared + len > RESPONSE_RING_SIZE)) {
wrap_len = cleared + len - RESPONSE_RING_SIZE;
len = RESPONSE_RING_SIZE - cleared;
}
memcpy(resp_save, resp, len);
- if(unlikely(wrap_len)) {
+ if (unlikely(wrap_len)) {
resp_save += len / sizeof(*resp);
memcpy(resp_save, base, wrap_len);
}
resp_save = NULL;
- } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
+ } else if (resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
typhoon_media_status(tp->dev, resp);
- } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
+ } else if (resp->cmd == TYPHOON_CMD_HELLO_RESP) {
typhoon_hello(tp);
} else {
netdev_err(tp->dev,
@@ -589,19 +588,19 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
freeCmd = typhoon_num_free_cmd(tp);
freeResp = typhoon_num_free_resp(tp);
- if(freeCmd < num_cmd || freeResp < num_resp) {
+ if (freeCmd < num_cmd || freeResp < num_resp) {
netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
freeCmd, num_cmd, freeResp, num_resp);
err = -ENOMEM;
goto out;
}
- if(cmd->flags & TYPHOON_CMD_RESPOND) {
+ if (cmd->flags & TYPHOON_CMD_RESPOND) {
/* If we're expecting a response, but the caller hasn't given
* us a place to put it, we'll provide one.
*/
tp->awaiting_resp = 1;
- if(resp == NULL) {
+ if (resp == NULL) {
resp = &local_resp;
num_resp = 1;
}
@@ -609,13 +608,13 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
wrap_len = 0;
len = num_cmd * sizeof(*cmd);
- if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
+ if (unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
len = COMMAND_RING_SIZE - ring->lastWrite;
}
memcpy(ring->ringBase + ring->lastWrite, cmd, len);
- if(unlikely(wrap_len)) {
+ if (unlikely(wrap_len)) {
struct cmd_desc *wrap_ptr = cmd;
wrap_ptr += len / sizeof(*cmd);
memcpy(ring->ringBase, wrap_ptr, wrap_len);
@@ -629,7 +628,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
typhoon_post_pci_writes(tp->ioaddr);
- if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
+ if ((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
goto out;
/* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
@@ -649,14 +648,14 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
* wait here.
*/
got_resp = 0;
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
- if(indexes->respCleared != indexes->respReady)
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
+ if (indexes->respCleared != indexes->respReady)
got_resp = typhoon_process_response(tp, num_resp,
resp);
udelay(TYPHOON_UDELAY);
}
- if(!got_resp) {
+ if (!got_resp) {
err = -ETIMEDOUT;
goto out;
}
@@ -664,11 +663,11 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
/* Collect the error response even if we don't care about the
* rest of the response
*/
- if(resp->flags & TYPHOON_RESP_ERROR)
+ if (resp->flags & TYPHOON_RESP_ERROR)
err = -EIO;
out:
- if(tp->awaiting_resp) {
+ if (tp->awaiting_resp) {
tp->awaiting_resp = 0;
smp_wmb();
@@ -679,7 +678,7 @@ out:
* time. So, check for it, and interrupt ourselves if this
* is the case.
*/
- if(indexes->respCleared != indexes->respReady)
+ if (indexes->respCleared != indexes->respReady)
iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
}
@@ -749,7 +748,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* between marking the queue awake and updating the cleared index.
* Just loop and it will appear. This comes from the acenic driver.
*/
- while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
+ while (unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
smp_rmb();
first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
@@ -761,7 +760,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->tx_addr = (u64)((unsigned long) skb);
first_txd->processFlags = 0;
- if(skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
/* The 3XP will figure out if this is UDP/TCP */
first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
@@ -789,7 +788,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
/* No need to worry about padding packet -- the firmware pads
* it with zeros to ETH_ZLEN for us.
*/
- if(skb_shinfo(skb)->nr_frags == 0) {
+ if (skb_shinfo(skb)->nr_frags == 0) {
skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
@@ -841,14 +840,14 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
*/
numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
- if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
+ if (typhoon_num_free_tx(txRing) < (numDesc + 2)) {
netif_stop_queue(dev);
/* A Tx complete IRQ could have gotten between, making
* the ring free again. Only need to recheck here, since
* Tx is serialized.
*/
- if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
+ if (typhoon_num_free_tx(txRing) >= (numDesc + 2))
netif_wake_queue(dev);
}
@@ -864,7 +863,7 @@ typhoon_set_rx_mode(struct net_device *dev)
__le16 filter;
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
- if(dev->flags & IFF_PROMISC) {
+ if (dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
@@ -906,7 +905,7 @@ typhoon_do_get_stats(struct typhoon *tp)
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
- if(err < 0)
+ if (err < 0)
return err;
/* 3Com's Linux driver uses txMultipleCollisions as it's
@@ -954,10 +953,10 @@ typhoon_get_stats(struct net_device *dev)
struct net_device_stats *saved = &tp->stats_saved;
smp_rmb();
- if(tp->card_state == Sleeping)
+ if (tp->card_state == Sleeping)
return saved;
- if(typhoon_do_get_stats(tp) < 0) {
+ if (typhoon_do_get_stats(tp) < 0) {
netdev_err(dev, "error getting stats\n");
return saved;
}
@@ -974,12 +973,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct resp_desc xp_resp[3];
smp_rmb();
- if(tp->card_state == Sleeping) {
+ if (tp->card_state == Sleeping) {
strlcpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
- if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
strlcpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
@@ -1026,7 +1025,7 @@ typhoon_get_link_ksettings(struct net_device *dev,
break;
}
- if(tp->capabilities & TYPHOON_FIBER) {
+ if (tp->capabilities & TYPHOON_FIBER) {
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
cmd->base.port = PORT_FIBRE;
@@ -1043,7 +1042,7 @@ typhoon_get_link_ksettings(struct net_device *dev,
cmd->base.speed = tp->speed;
cmd->base.duplex = tp->duplex;
cmd->base.phy_address = 0;
- if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
+ if (tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
cmd->base.autoneg = AUTONEG_ENABLE;
else
cmd->base.autoneg = AUTONEG_DISABLE;
@@ -1091,7 +1090,7 @@ typhoon_set_link_ksettings(struct net_device *dev,
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
xp_cmd.parm1 = xcvr;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto out;
tp->xcvr_select = xcvr;
@@ -1114,9 +1113,9 @@ typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
wol->supported = WAKE_PHY | WAKE_MAGIC;
wol->wolopts = 0;
- if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
+ if (tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
wol->wolopts |= WAKE_PHY;
- if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
wol->wolopts |= WAKE_MAGIC;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
@@ -1126,13 +1125,13 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct typhoon *tp = netdev_priv(dev);
- if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
return -EINVAL;
tp->wol_events = 0;
- if(wol->wolopts & WAKE_PHY)
+ if (wol->wolopts & WAKE_PHY)
tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
- if(wol->wolopts & WAKE_MAGIC)
+ if (wol->wolopts & WAKE_MAGIC)
tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
return 0;
@@ -1163,8 +1162,8 @@ typhoon_wait_interrupt(void __iomem *ioaddr)
{
int i, err = 0;
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
- if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if (ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
TYPHOON_INTR_BOOTCMD)
goto out;
udelay(TYPHOON_UDELAY);
@@ -1356,7 +1355,7 @@ typhoon_download_firmware(struct typhoon *tp)
*/
err = -ENOMEM;
dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
- if(!dpage) {
+ if (!dpage) {
netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
}
@@ -1369,7 +1368,7 @@ typhoon_download_firmware(struct typhoon *tp)
ioaddr + TYPHOON_REG_INTR_MASK);
err = -ETIMEDOUT;
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(tp->dev, "card ready timeout\n");
goto err_out_irq;
}
@@ -1398,16 +1397,16 @@ typhoon_download_firmware(struct typhoon *tp)
* last write to the command register to post, so
* we don't need a typhoon_post_pci_writes() after it.
*/
- for(i = 0; i < numSections; i++) {
+ for (i = 0; i < numSections; i++) {
sHdr = (struct typhoon_section_header *) image_data;
image_data += sizeof(struct typhoon_section_header);
load_addr = le32_to_cpu(sHdr->startAddr);
section_len = le32_to_cpu(sHdr->len);
- while(section_len) {
+ while (section_len) {
len = min_t(u32, section_len, PAGE_SIZE);
- if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ if (typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
netdev_err(tp->dev, "segment ready timeout\n");
@@ -1440,7 +1439,7 @@ typhoon_download_firmware(struct typhoon *tp)
}
}
- if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ if (typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
netdev_err(tp->dev, "final segment ready timeout\n");
@@ -1449,7 +1448,7 @@ typhoon_download_firmware(struct typhoon *tp)
iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
ioread32(ioaddr + TYPHOON_REG_STATUS));
goto err_out_irq;
@@ -1472,7 +1471,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
{
void __iomem *ioaddr = tp->ioaddr;
- if(typhoon_wait_status(ioaddr, initial_status) < 0) {
+ if (typhoon_wait_status(ioaddr, initial_status) < 0) {
netdev_err(tp->dev, "boot ready timeout\n");
goto out_timeout;
}
@@ -1483,7 +1482,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
ioaddr + TYPHOON_REG_COMMAND);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
ioread32(ioaddr + TYPHOON_REG_STATUS));
goto out_timeout;
@@ -1513,17 +1512,17 @@ typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
int dma_len;
int type;
- while(lastRead != le32_to_cpu(*index)) {
+ while (lastRead != le32_to_cpu(*index)) {
tx = (struct tx_desc *) (txRing->ringBase + lastRead);
type = tx->flags & TYPHOON_TYPE_MASK;
- if(type == TYPHOON_TX_DESC) {
+ if (type == TYPHOON_TX_DESC) {
/* This tx_desc describes a packet.
*/
unsigned long ptr = tx->tx_addr;
struct sk_buff *skb = (struct sk_buff *) ptr;
dev_kfree_skb_irq(skb);
- } else if(type == TYPHOON_FRAG_DESC) {
+ } else if (type == TYPHOON_FRAG_DESC) {
/* This tx_desc describes a memory mapping. Free it.
*/
skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
@@ -1548,7 +1547,7 @@ typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
/* This will need changing if we start to use the Hi Tx ring. */
lastRead = typhoon_clean_tx(tp, txRing, index);
- if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
+ if (netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
lastRead, TXLO_ENTRIES) > (numDesc + 2))
netif_wake_queue(tp->dev);
@@ -1564,7 +1563,7 @@ typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
struct basic_ring *ring = &tp->rxBuffRing;
struct rx_free *r;
- if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
le32_to_cpu(indexes->rxBuffCleared)) {
/* no room in ring, just drop the skb
*/
@@ -1595,12 +1594,12 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
rxb->skb = NULL;
- if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
le32_to_cpu(indexes->rxBuffCleared))
return -ENOMEM;
skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
- if(!skb)
+ if (!skb)
return -ENOMEM;
#if 0
@@ -1647,7 +1646,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
received = 0;
local_ready = le32_to_cpu(*ready);
rxaddr = le32_to_cpu(*cleared);
- while(rxaddr != local_ready && budget > 0) {
+ while (rxaddr != local_ready && budget > 0) {
rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
idx = rx->addr;
rxb = &tp->rxbuffers[idx];
@@ -1656,14 +1655,14 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
typhoon_inc_rx_index(&rxaddr, 1);
- if(rx->flags & TYPHOON_RX_ERROR) {
+ if (rx->flags & TYPHOON_RX_ERROR) {
typhoon_recycle_rx_skb(tp, idx);
continue;
}
pkt_len = le16_to_cpu(rx->frameLen);
- if(pkt_len < rx_copybreak &&
+ if (pkt_len < rx_copybreak &&
(new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
skb_reserve(new_skb, 2);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
@@ -1685,7 +1684,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
new_skb->protocol = eth_type_trans(new_skb, tp->dev);
csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
- if(csum_bits ==
+ if (csum_bits ==
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
csum_bits ==
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
@@ -1711,11 +1710,11 @@ typhoon_fill_free_ring(struct typhoon *tp)
{
u32 i;
- for(i = 0; i < RXENT_ENTRIES; i++) {
+ for (i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
- if(rxb->skb)
+ if (rxb->skb)
continue;
- if(typhoon_alloc_rx_skb(tp, i) < 0)
+ if (typhoon_alloc_rx_skb(tp, i) < 0)
break;
}
}
@@ -1728,25 +1727,25 @@ typhoon_poll(struct napi_struct *napi, int budget)
int work_done;
rmb();
- if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
+ if (!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
typhoon_process_response(tp, 0, NULL);
- if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
+ if (le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
work_done = 0;
- if(indexes->rxHiCleared != indexes->rxHiReady) {
+ if (indexes->rxHiCleared != indexes->rxHiReady) {
work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
&indexes->rxHiCleared, budget);
}
- if(indexes->rxLoCleared != indexes->rxLoReady) {
+ if (indexes->rxLoCleared != indexes->rxLoReady) {
work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
&indexes->rxLoCleared, budget - work_done);
}
- if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
+ if (le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
/* rxBuff ring is empty, try to fill it. */
typhoon_fill_free_ring(tp);
}
@@ -1770,7 +1769,7 @@ typhoon_interrupt(int irq, void *dev_instance)
u32 intr_status;
intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
- if(!(intr_status & TYPHOON_INTR_HOST_INT))
+ if (!(intr_status & TYPHOON_INTR_HOST_INT))
return IRQ_NONE;
iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
@@ -1790,9 +1789,9 @@ typhoon_free_rx_rings(struct typhoon *tp)
{
u32 i;
- for(i = 0; i < RXENT_ENTRIES; i++) {
+ for (i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
- if(rxb->skb) {
+ if (rxb->skb) {
pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxb->skb);
@@ -1812,7 +1811,7 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
xp_cmd.parm1 = events;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0) {
+ if (err < 0) {
netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
err);
return err;
@@ -1820,12 +1819,12 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0) {
+ if (err < 0) {
netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
return err;
}
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
return -ETIMEDOUT;
/* Since we cannot monitor the status of the link while sleeping,
@@ -1852,7 +1851,7 @@ typhoon_wakeup(struct typhoon *tp, int wait_type)
* the old firmware pay for the reset.
*/
iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
(tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
return typhoon_reset(ioaddr, wait_type);
@@ -1871,12 +1870,12 @@ typhoon_start_runtime(struct typhoon *tp)
typhoon_fill_free_ring(tp);
err = typhoon_download_firmware(tp);
- if(err < 0) {
+ if (err < 0) {
netdev_err(tp->dev, "cannot load runtime on 3XP\n");
goto error_out;
}
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
netdev_err(tp->dev, "cannot boot 3XP\n");
err = -EIO;
goto error_out;
@@ -1885,14 +1884,14 @@ typhoon_start_runtime(struct typhoon *tp)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
/* Disable IRQ coalescing -- we can reenable it when 3Com gives
@@ -1901,38 +1900,38 @@ typhoon_start_runtime(struct typhoon *tp)
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
xp_cmd.parm1 = 0;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
xp_cmd.parm1 = tp->xcvr_select;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
typhoon_set_rx_mode(dev);
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
tp->card_state = Running;
@@ -1972,13 +1971,13 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
/* Wait 1/2 sec for any outstanding transmits to occur
* We'll cleanup after the reset if this times out.
*/
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
- if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if (indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
break;
udelay(TYPHOON_UDELAY);
}
- if(i == TYPHOON_WAIT_TIMEOUT)
+ if (i == TYPHOON_WAIT_TIMEOUT)
netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
@@ -1995,16 +1994,16 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
- if(typhoon_reset(ioaddr, wait_type) < 0) {
+ if (typhoon_reset(ioaddr, wait_type) < 0) {
netdev_err(tp->dev, "unable to reset 3XP\n");
return -ETIMEDOUT;
}
/* cleanup any outstanding Tx packets */
- if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
+ if (indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
}
@@ -2017,7 +2016,7 @@ typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct typhoon *tp = netdev_priv(dev);
- if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
+ if (typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
netdev_warn(dev, "could not reset in tx timeout\n");
goto truly_dead;
}
@@ -2026,7 +2025,7 @@ typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
typhoon_free_rx_rings(tp);
- if(typhoon_start_runtime(tp) < 0) {
+ if (typhoon_start_runtime(tp) < 0) {
netdev_err(dev, "could not start runtime in tx timeout\n");
goto truly_dead;
}
@@ -2051,20 +2050,20 @@ typhoon_open(struct net_device *dev)
goto out;
err = typhoon_wakeup(tp, WaitSleep);
- if(err < 0) {
+ if (err < 0) {
netdev_err(dev, "unable to wakeup device\n");
goto out_sleep;
}
err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
dev->name, dev);
- if(err < 0)
+ if (err < 0)
goto out_sleep;
napi_enable(&tp->napi);
err = typhoon_start_runtime(tp);
- if(err < 0) {
+ if (err < 0) {
napi_disable(&tp->napi);
goto out_irq;
}
@@ -2076,13 +2075,13 @@ out_irq:
free_irq(dev->irq, dev);
out_sleep:
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(dev, "unable to reboot into sleep img\n");
typhoon_reset(tp->ioaddr, NoWait);
goto out;
}
- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
netdev_err(dev, "unable to go back to sleep\n");
out:
@@ -2097,7 +2096,7 @@ typhoon_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&tp->napi);
- if(typhoon_stop_runtime(tp, WaitSleep) < 0)
+ if (typhoon_stop_runtime(tp, WaitSleep) < 0)
netdev_err(dev, "unable to stop runtime\n");
/* Make sure there is no irq handler running on a different CPU. */
@@ -2106,10 +2105,10 @@ typhoon_close(struct net_device *dev)
typhoon_free_rx_rings(tp);
typhoon_init_rings(tp);
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
netdev_err(dev, "unable to boot sleep image\n");
- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
netdev_err(dev, "unable to put card to sleep\n");
return 0;
@@ -2124,15 +2123,15 @@ typhoon_resume(struct pci_dev *pdev)
/* If we're down, resume when we are upped.
*/
- if(!netif_running(dev))
+ if (!netif_running(dev))
return 0;
- if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
+ if (typhoon_wakeup(tp, WaitNoSleep) < 0) {
netdev_err(dev, "critical: could not wake up in resume\n");
goto reset;
}
- if(typhoon_start_runtime(tp) < 0) {
+ if (typhoon_start_runtime(tp) < 0) {
netdev_err(dev, "critical: could not start runtime in resume\n");
goto reset;
}
@@ -2154,16 +2153,16 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
/* If we're down, we're already suspended.
*/
- if(!netif_running(dev))
+ if (!netif_running(dev))
return 0;
/* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
- if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
- if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
+ if (typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
netdev_err(dev, "unable to stop runtime\n");
goto need_resume;
}
@@ -2171,7 +2170,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
typhoon_free_rx_rings(tp);
typhoon_init_rings(tp);
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(dev, "unable to boot sleep image\n");
goto need_resume;
}
@@ -2179,19 +2178,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
- if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
netdev_err(dev, "unable to set mac address in suspend\n");
goto need_resume;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
- if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
netdev_err(dev, "unable to set rx filter in suspend\n");
goto need_resume;
}
- if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
+ if (typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
netdev_err(dev, "unable to put card to sleep\n");
goto need_resume;
}
@@ -2211,10 +2210,10 @@ typhoon_test_mmio(struct pci_dev *pdev)
int mode = 0;
u32 val;
- if(!ioaddr)
+ if (!ioaddr)
goto out;
- if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ if (ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_HOST)
goto out_unmap;
@@ -2227,12 +2226,12 @@ typhoon_test_mmio(struct pci_dev *pdev)
* The 50usec delay is arbitrary -- it could probably be smaller.
*/
val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
- if((val & TYPHOON_INTR_SELF) == 0) {
+ if ((val & TYPHOON_INTR_SELF) == 0) {
iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
udelay(50);
val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
- if(val & TYPHOON_INTR_SELF)
+ if (val & TYPHOON_INTR_SELF)
mode = 1;
}
@@ -2245,7 +2244,7 @@ out_unmap:
pci_iounmap(pdev, ioaddr);
out:
- if(!mode)
+ if (!mode)
pr_info("%s: falling back to port IO\n", pci_name(pdev));
return mode;
}
@@ -2276,7 +2275,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
const char *err_msg;
dev = alloc_etherdev(sizeof(*tp));
- if(dev == NULL) {
+ if (dev == NULL) {
err_msg = "unable to alloc new net device";
err = -ENOMEM;
goto error_out;
@@ -2284,55 +2283,55 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(dev, &pdev->dev);
err = pci_enable_device(pdev);
- if(err < 0) {
+ if (err < 0) {
err_msg = "unable to enable device";
goto error_out_dev;
}
err = pci_set_mwi(pdev);
- if(err < 0) {
+ if (err < 0) {
err_msg = "unable to set MWI";
goto error_out_disable;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if(err < 0) {
+ if (err < 0) {
err_msg = "No usable DMA configuration";
goto error_out_mwi;
}
/* sanity checks on IO and MMIO BARs
*/
- if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
err_msg = "region #1 not a PCI IO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- if(pci_resource_len(pdev, 0) < 128) {
+ if (pci_resource_len(pdev, 0) < 128) {
err_msg = "Invalid PCI IO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
err_msg = "region #1 not a PCI MMIO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- if(pci_resource_len(pdev, 1) < 128) {
+ if (pci_resource_len(pdev, 1) < 128) {
err_msg = "Invalid PCI MMIO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
err = pci_request_regions(pdev, KBUILD_MODNAME);
- if(err < 0) {
+ if (err < 0) {
err_msg = "could not request regions";
goto error_out_mwi;
}
/* map our registers
*/
- if(use_mmio != 0 && use_mmio != 1)
+ if (use_mmio != 0 && use_mmio != 1)
use_mmio = typhoon_test_mmio(pdev);
ioaddr = pci_iomap(pdev, use_mmio, 128);
@@ -2346,7 +2345,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
&shared_dma);
- if(!shared) {
+ if (!shared) {
err_msg = "could not allocate DMA memory";
err = -ENOMEM;
goto error_out_remap;
@@ -2426,7 +2425,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* seem to need a little extra help to get started. Since we don't
* know how to nudge it along, just kick it.
*/
- if(xp_resp[0].numDesc != 0)
+ if (xp_resp[0].numDesc != 0)
tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
err = typhoon_sleep(tp, PCI_D3hot, 0);
@@ -2471,14 +2470,14 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* xp_resp still contains the response to the READ_VERSIONS command.
* For debugging, let the user know what version he has.
*/
- if(xp_resp[0].numDesc == 0) {
+ if (xp_resp[0].numDesc == 0) {
/* This is the Typhoon 1.0 type Sleep Image, last 16 bits
* of version is Month/Day of build.
*/
u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
monthday >> 8, monthday & 0xff);
- } else if(xp_resp[0].numDesc == 2) {
+ } else if (xp_resp[0].numDesc == 2) {
/* This is the Typhoon 1.1+ type Sleep Image
*/
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
diff --git a/drivers/net/ethernet/3com/typhoon.h b/drivers/net/ethernet/3com/typhoon.h
index 88187fc84aa3..2f634c64d5d1 100644
--- a/drivers/net/ethernet/3com/typhoon.h
+++ b/drivers/net/ethernet/3com/typhoon.h
@@ -366,7 +366,7 @@ struct resp_desc {
memset(_ptr, 0, sizeof(struct cmd_desc)); \
_ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \
_ptr->cmd = command; \
- } while(0)
+ } while (0)
/* We set seqNo to 1 if we're expecting a response from this command */
#define INIT_COMMAND_WITH_RESPONSE(x, command) \
@@ -376,7 +376,7 @@ struct resp_desc {
_ptr->flags |= TYPHOON_DESC_VALID; \
_ptr->cmd = command; \
_ptr->seqNo = 1; \
- } while(0)
+ } while (0)
/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1)
*/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 165d18405b0c..2db42211329f 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -27,8 +27,6 @@
*/
#define DRV_NAME "starfire"
-#define DRV_VERSION "2.1"
-#define DRV_RELDATE "July 6, 2008"
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -47,6 +45,7 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <linux/uaccess.h>
#include <asm/io.h>
+#include <linux/vermagic.h>
/*
* The current frame processor firmware fails to checksum a fragment
@@ -165,15 +164,9 @@ static int rx_copybreak /* = 0 */;
#define FIRMWARE_RX "adaptec/starfire_rx.bin"
#define FIRMWARE_TX "adaptec/starfire_tx.bin"
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
-KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
-" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(FIRMWARE_RX);
MODULE_FIRMWARE(FIRMWARE_TX);
@@ -654,13 +647,6 @@ static int starfire_init_one(struct pci_dev *pdev,
int drv_flags, io_size;
int boguscnt;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
if (pci_enable_device (pdev))
return -EIO;
@@ -1853,7 +1839,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -2073,8 +2058,6 @@ static int __init starfire_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk(version);
-
printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
#endif
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 2a9f8643629c..bf546118dbc6 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1114,9 +1114,7 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
strlcpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->version, "revision: 1.0", sizeof(info->version));
strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index cb6a761d5c11..1b19385ad8a9 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2958,7 +2958,6 @@ static void et131x_get_drvinfo(struct net_device *netdev,
struct et131x_adapter *adapter = netdev_priv(netdev);
strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/agere/et131x.h b/drivers/net/ethernet/agere/et131x.h
index be9a11c02526..d0e922584d8a 100644
--- a/drivers/net/ethernet/agere/et131x.h
+++ b/drivers/net/ethernet/agere/et131x.h
@@ -46,7 +46,6 @@
*/
#define DRIVER_NAME "et131x"
-#define DRIVER_VERSION "v2.0"
/* EEPROM registers */
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 9daef4c8feef..6234fcd844ee 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -26,7 +26,6 @@
#include "slic.h"
#define DRV_NAME "slicoss"
-#define DRV_VERSION "1.0"
static const struct pci_device_id slic_id_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
@@ -1533,7 +1532,6 @@ static void slic_get_drvinfo(struct net_device *dev,
struct slic_device *sdev = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
@@ -1852,4 +1850,3 @@ module_pci_driver(slic_driver);
MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver");
MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 22cadfbeedfb..18d3b4340bd4 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -33,7 +33,6 @@
#include "sun4i-emac.h"
#define DRV_NAME "sun4i-emac"
-#define DRV_VERSION "1.02"
#define EMAC_MAX_FRAME_LEN 0x0600
@@ -212,7 +211,6 @@ static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index f366faf88eee..5d192d551623 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2699,9 +2699,8 @@ static void ace_get_drvinfo(struct net_device *dev,
struct ace_private *ap = netdev_priv(dev);
strlcpy(info->driver, "acenic", sizeof(info->driver));
- snprintf(info->version, sizeof(info->version), "%i.%i.%i",
- ap->firmware_major, ap->firmware_minor,
- ap->firmware_fix);
+ snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
+ ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
strlcpy(info->bus_info, pci_name(ap->pdev),
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 23823464f2e7..4299f1301149 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -67,7 +67,6 @@ static void tse_get_drvinfo(struct net_device *dev,
u32 rev = ioread32(&priv->mac_dev->megacore_revision);
strcpy(info->driver, "altera_tse");
- strcpy(info->version, "v8.0");
snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
sprintf(info->bus_info, "platform");
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1fb58f9ad80b..a250046b8e18 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1067,18 +1067,14 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_flow_hash_control *hash_key;
struct ena_admin_get_feat_resp get_resp;
int rc;
- hash_key = (ena_dev->rss).hash_key;
-
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
ena_dev->rss.hash_key_dma_addr,
sizeof(ena_dev->rss.hash_key), 0);
if (unlikely(rc)) {
- hash_key = NULL;
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index ced1d577b62a..9cc28b4b2627 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -221,7 +221,7 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
snprintf(*data, ETH_GSTRING_LEN,
"queue_%u_tx_%s", i, ena_stats->name);
- (*data) += ETH_GSTRING_LEN;
+ (*data) += ETH_GSTRING_LEN;
}
/* Rx stats */
for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
@@ -404,7 +404,6 @@ static void ena_get_drvinfo(struct net_device *dev,
struct ena_adapter *adapter = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -674,7 +673,6 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* supports getting/setting the hash function.
*/
rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
-
if (rc) {
if (rc == -EOPNOTSUPP) {
key = NULL;
@@ -685,9 +683,6 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return rc;
}
- if (rc)
- return rc;
-
switch (ena_func) {
case ENA_ADMIN_TOEPLITZ:
func = ETH_RSS_HASH_TOP;
@@ -831,6 +826,8 @@ static int ena_set_tunable(struct net_device *netdev,
}
static const struct ethtool_ops ena_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = ena_get_link_ksettings,
.get_drvinfo = ena_get_drvinfo,
.get_msglevel = ena_get_msglevel,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index cada6e7e30f4..2cc765df8da3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -49,12 +49,9 @@
#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
-static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
-
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5 * HZ)
@@ -460,10 +457,9 @@ static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
}
-void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
- struct bpf_prog *prog,
- int first,
- int count)
+static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count)
{
struct ena_ring *rx_ring;
int i = 0;
@@ -481,8 +477,8 @@ void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
}
}
-void ena_xdp_exchange_program(struct ena_adapter *adapter,
- struct bpf_prog *prog)
+static void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
{
struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
@@ -1555,7 +1551,7 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
-int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
struct ena_rx_buffer *rx_info;
int ret;
@@ -3104,9 +3100,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
strncpy(host_info->os_dist_str, utsname()->release,
sizeof(host_info->os_dist_str) - 1);
host_info->driver_version =
- (DRV_MODULE_VER_MAJOR) |
- (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
- (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
+ (DRV_MODULE_GEN_MAJOR) |
+ (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
+ (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
host_info->num_cpus = num_online_cpus();
@@ -3486,10 +3482,8 @@ static int ena_restore_device(struct ena_adapter *adapter)
netif_carrier_on(adapter->netdev);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+ dev_err(&pdev->dev, "Device reset completed successfully\n");
adapter->last_keep_alive_jiffies = jiffies;
- dev_err(&pdev->dev,
- "Device reset completed successfully, Driver info: %s\n",
- version);
return rc;
err_disable_msix:
@@ -4127,8 +4121,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_dbg(&pdev->dev, "%s\n", __func__);
- dev_info_once(&pdev->dev, "%s", version);
-
rc = pci_enable_device_mem(pdev);
if (rc) {
dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
@@ -4471,8 +4463,6 @@ static struct pci_driver ena_pci_driver = {
static int __init ena_init(void)
{
- pr_info("%s", version);
-
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
if (!ena_wq) {
pr_err("Failed to create workqueue\n");
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 8795e0b1dc3c..9e1860d81908 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,16 +45,16 @@
#include "ena_com.h"
#include "ena_eth_com.h"
-#define DRV_MODULE_VER_MAJOR 2
-#define DRV_MODULE_VER_MINOR 1
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_GEN_MAJOR 2
+#define DRV_MODULE_GEN_MINOR 1
+#define DRV_MODULE_GEN_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
-#ifndef DRV_MODULE_VERSION
-#define DRV_MODULE_VERSION \
- __stringify(DRV_MODULE_VER_MAJOR) "." \
- __stringify(DRV_MODULE_VER_MINOR) "." \
- __stringify(DRV_MODULE_VER_SUBMINOR) "K"
+#ifndef DRV_MODULE_GENERATION
+#define DRV_MODULE_GENERATION \
+ __stringify(DRV_MODULE_GEN_MAJOR) "." \
+ __stringify(DRV_MODULE_GEN_MINOR) "." \
+ __stringify(DRV_MODULE_GEN_SUBMINOR) "K"
#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)"
@@ -69,7 +69,7 @@
* 16kB.
*/
#if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
+#define ENA_PAGE_SIZE (_AC(SZ_16K, UL))
#else
#define ENA_PAGE_SIZE PAGE_SIZE
#endif
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 0f3b743425e8..7a1286f8e983 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -84,9 +84,8 @@ Revision History:
#include "amd8111e.h"
#define MODULE_NAME "amd8111e"
-#define MODULE_VERS "3.0.7"
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
-MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
+MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
MODULE_LICENSE("GPL");
module_param_array(speed_duplex, int, NULL, 0);
MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
@@ -1366,7 +1365,6 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, MODULE_VERS, sizeof(info->version));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
@@ -1875,7 +1873,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* display driver and device information */
chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
- dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS);
dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
chip_version, dev->dev_addr);
if (lp->ext_phy_id)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 089a4fbc61a0..9f6e3cc2ce80 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -63,14 +63,12 @@ static int au1000_debug = 3;
NETIF_MSG_LINK)
#define DRV_NAME "au1000_eth"
-#define DRV_VERSION "1.7"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
/* AU1000 MAC registers and bits */
#define MAC_CONTROL 0x0
@@ -656,7 +654,6 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct au1000_private *aup = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -1290,8 +1287,6 @@ static int au1000_probe(struct platform_device *pdev)
netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
(unsigned long)base->start, irq);
- pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
-
return 0;
err_out:
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 023aecf6ab30..11c0b13edd30 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -114,8 +114,6 @@ Log: nmclan_cs.c,v
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "nmclan_cs"
-#define DRV_VERSION "0.16"
-
/* ----------------------------------------------------------------------------
Conditional Compilation Options
@@ -367,7 +365,7 @@ typedef struct _mace_private {
char tx_free_frames; /* Number of free transmit frame buffers */
char tx_irq_disabled; /* MACE TX interrupt disabled */
-
+
spinlock_t bank_lock; /* Must be held if you step off bank 0 */
} mace_private;
@@ -444,7 +442,7 @@ static int nmclan_probe(struct pcmcia_device *link)
lp = netdev_priv(dev);
lp->p_dev = link;
link->priv = dev;
-
+
spin_lock_init(&lp->bank_lock);
link->resource[0]->end = 32;
link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
@@ -817,7 +815,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -1110,7 +1107,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
if (pkt_len & 1)
*(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
skb->protocol = eth_type_trans(skb, dev);
-
+
netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
dev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index dc7d88227e76..07e8211eea51 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -24,13 +24,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "pcnet32"
-#define DRV_VERSION "1.35"
#define DRV_RELDATE "21.Apr.2008"
#define PFX DRV_NAME ": "
-static const char *const version =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -809,7 +805,6 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
struct pcnet32_private *lp = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (lp->pci_dev)
strlcpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
@@ -3006,8 +3001,6 @@ MODULE_LICENSE("GPL");
static int __init pcnet32_init_module(void)
{
- pr_info("%s", version);
-
pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index b00e00881253..a21b2e60157e 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -105,14 +105,9 @@ static char lancestr[] = "LANCE";
#include <asm/irq.h>
#define DRV_NAME "sunlance"
-#define DRV_VERSION "2.02"
#define DRV_RELDATE "8/24/03"
#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)"
-static char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION("Sun Lance ethernet driver");
MODULE_LICENSE("GPL");
@@ -1282,7 +1277,6 @@ static void lance_free_hwresources(struct lance_private *lp)
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "sunlance", sizeof(info->driver));
- strlcpy(info->version, "2.02", sizeof(info->version));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
@@ -1305,7 +1299,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
struct platform_device *lebuffer)
{
struct device_node *dp = op->dev.of_node;
- static unsigned version_printed;
struct lance_private *lp;
struct net_device *dev;
int i;
@@ -1316,9 +1309,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
lp = netdev_priv(dev);
- if (sparc_lance_debug && version_printed++ == 0)
- printk (KERN_INFO "%s", version);
-
spin_lock_init(&lp->lock);
/* Copy the IDPROM ethernet address to the device structure, later we
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b71f9b04a51e..a87264f95f1a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -514,7 +514,7 @@ static void xgbe_isr_task(unsigned long data)
xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */
- __napi_schedule_irqoff(&pdata->napi);
+ __napi_schedule(&pdata->napi);
}
} else {
/* Don't clear Rx/Tx status if doing per channel DMA
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 8083173f1a8f..61f39a0e04f9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -405,7 +405,6 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
@@ -451,30 +450,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
unsigned int rx_frames, rx_riwt, rx_usecs;
unsigned int tx_frames;
- /* Check for not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) ||
- (ec->tx_coalesce_usecs) ||
- (ec->tx_coalesce_usecs_irq) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->use_adaptive_rx_coalesce) ||
- (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) ||
- (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) ||
- (ec->tx_coalesce_usecs_low) ||
- (ec->tx_max_coalesced_frames_low) ||
- (ec->pkt_rate_high) ||
- (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval)) {
- netdev_err(netdev, "unsupported coalescing parameter\n");
- return -EOPNOTSUPP;
- }
-
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
rx_frames = ec->rx_max_coalesced_frames;
@@ -838,6 +813,8 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
.set_msglevel = xgbe_set_msglevel,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 7ce9c69e9c44..2a70714a791d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -127,7 +127,6 @@
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
static int debug = -1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 47bcbcf58048..5897e46faca5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -135,7 +135,6 @@
#include <linux/list.h>
#define XGBE_DRV_NAME "amd-xgbe"
-#define XGBE_DRV_VERSION "1.0.3"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
index a58250c1b57a..b78d1a99fe81 100644
--- a/drivers/net/ethernet/apm/xgene-v2/ethtool.c
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
@@ -89,8 +89,6 @@ static void xge_get_drvinfo(struct net_device *ndev,
struct platform_device *pdev = pdata->pdev;
strcpy(info->driver, "xgene-enet-v2");
- strcpy(info->version, XGENE_ENET_V2_VERSION);
- snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
sprintf(info->bus_info, "%s", pdev->name);
}
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index c48f60996761..860c18fb7aae 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -741,5 +741,4 @@ module_platform_driver(xge_driver);
MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
-MODULE_VERSION(XGENE_ENET_V2_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
index d41439d2709d..b3985a7be59d 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.h
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -28,7 +28,6 @@
#include "ring.h"
#include "ethtool.h"
-#define XGENE_ENET_V2_VERSION "v1.0"
#define XGENE_ENET_STD_MTU 1536
#define XGENE_ENET_MIN_FRAME 60
#define IRQ_ID_SIZE 16
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 246dec27140d..ada70425b48c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -103,8 +103,6 @@ static void xgene_get_drvinfo(struct net_device *ndev,
struct platform_device *pdev = pdata->pdev;
strcpy(info->driver, "xgene_enet");
- strcpy(info->version, XGENE_DRV_VERSION);
- snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
sprintf(info->bus_info, "%s", pdev->name);
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 6aee2f0fc0db..5f1fc6582d74 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2179,7 +2179,6 @@ static struct platform_driver xgene_enet_driver = {
module_platform_driver(xgene_enet_driver);
MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
-MODULE_VERSION(XGENE_DRV_VERSION);
MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 18f4923b1723..d35a338120cf 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -28,7 +28,6 @@
#include "xgene_enet_ring2.h"
#include "../../../phy/mdio-xgene.h"
-#define XGENE_DRV_VERSION "v1.0"
#define ETHER_MIN_PACKET 64
#define ETHER_STD_PACKET 1518
#define XGENE_ENET_STD_MTU 1536
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a58185b1d8bf..3e3711b60d01 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
int i;
unsigned short data;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 3; i++)
{
reset_and_select_srom(dev);
data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index 350a48e4f124..76a44b2546ff 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -20,6 +20,7 @@ config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
depends on X86_64 || ARM64 || COMPILE_TEST
+ depends on MACSEC || MACSEC=n
---help---
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 6e0a6e234483..8b555665a33a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -8,6 +8,8 @@
obj-$(CONFIG_AQTION) += atlantic.o
+ccflags-y += -I$(src)
+
atlantic-objs := aq_main.o \
aq_nic.o \
aq_pci_func.o \
@@ -22,6 +24,9 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
- hw_atl/hw_atl_llh.o
+ hw_atl/hw_atl_llh.o \
+ macsec/macsec_api.o
+
+atlantic-$(CONFIG_MACSEC) += aq_macsec.o
atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index f0c41f7408e5..7560f5506e55 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -9,8 +9,6 @@
#ifndef AQ_CFG_H
#define AQ_CFG_H
-#include <generated/utsrelease.h>
-
#define AQ_CFG_VECS_DEF 8U
#define AQ_CFG_TCS_DEF 1U
@@ -85,7 +83,5 @@
#define AQ_CFG_DRV_AUTHOR "aQuantia"
#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "atlantic"
-#define AQ_CFG_DRV_VERSION UTS_RELEASE \
- AQ_CFG_DRV_VERSION_SUFFIX
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 42ea8d8daa46..c8c402b013bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -12,7 +12,6 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/if_vlan.h>
-#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 7b55633d2cb9..7241cf92b43a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -11,6 +11,7 @@
#include "aq_vec.h"
#include "aq_ptp.h"
#include "aq_filters.h"
+#include "aq_macsec.h"
#include <linux/ptp_clock_kernel.h>
@@ -96,6 +97,62 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InErrors",
};
+#if IS_ENABLED(CONFIG_MACSEC)
+static const char aq_macsec_stat_names[][ETH_GSTRING_LEN] = {
+ "MACSec InCtlPackets",
+ "MACSec InTaggedMissPackets",
+ "MACSec InUntaggedMissPackets",
+ "MACSec InNotagPackets",
+ "MACSec InUntaggedPackets",
+ "MACSec InBadTagPackets",
+ "MACSec InNoSciPackets",
+ "MACSec InUnknownSciPackets",
+ "MACSec InCtrlPortPassPackets",
+ "MACSec InUnctrlPortPassPackets",
+ "MACSec InCtrlPortFailPackets",
+ "MACSec InUnctrlPortFailPackets",
+ "MACSec InTooLongPackets",
+ "MACSec InIgpocCtlPackets",
+ "MACSec InEccErrorPackets",
+ "MACSec InUnctrlHitDropRedir",
+ "MACSec OutCtlPackets",
+ "MACSec OutUnknownSaPackets",
+ "MACSec OutUntaggedPackets",
+ "MACSec OutTooLong",
+ "MACSec OutEccErrorPackets",
+ "MACSec OutUnctrlHitDropRedir",
+};
+
+static const char *aq_macsec_txsc_stat_names[] = {
+ "MACSecTXSC%d ProtectedPkts",
+ "MACSecTXSC%d EncryptedPkts",
+ "MACSecTXSC%d ProtectedOctets",
+ "MACSecTXSC%d EncryptedOctets",
+};
+
+static const char *aq_macsec_txsa_stat_names[] = {
+ "MACSecTXSC%dSA%d HitDropRedirect",
+ "MACSecTXSC%dSA%d Protected2Pkts",
+ "MACSecTXSC%dSA%d ProtectedPkts",
+ "MACSecTXSC%dSA%d EncryptedPkts",
+};
+
+static const char *aq_macsec_rxsa_stat_names[] = {
+ "MACSecRXSC%dSA%d UntaggedHitPkts",
+ "MACSecRXSC%dSA%d CtrlHitDrpRedir",
+ "MACSecRXSC%dSA%d NotUsingSa",
+ "MACSecRXSC%dSA%d UnusedSa",
+ "MACSecRXSC%dSA%d NotValidPkts",
+ "MACSecRXSC%dSA%d InvalidPkts",
+ "MACSecRXSC%dSA%d OkPkts",
+ "MACSecRXSC%dSA%d LatePkts",
+ "MACSecRXSC%dSA%d DelayedPkts",
+ "MACSecRXSC%dSA%d UncheckedPkts",
+ "MACSecRXSC%dSA%d ValidatedOctets",
+ "MACSecRXSC%dSA%d DecryptedOctets",
+};
+#endif
+
static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
"DMASystemLoopback",
"PKTSystemLoopback",
@@ -104,18 +161,38 @@ static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
"PHYExternalLoopback",
};
+static u32 aq_ethtool_n_stats(struct net_device *ndev)
+{
+ struct aq_nic_s *nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
+ u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
+ ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (nic->macsec_cfg) {
+ n_stats += ARRAY_SIZE(aq_macsec_stat_names) +
+ ARRAY_SIZE(aq_macsec_txsc_stat_names) *
+ aq_macsec_tx_sc_cnt(nic) +
+ ARRAY_SIZE(aq_macsec_txsa_stat_names) *
+ aq_macsec_tx_sa_cnt(nic) +
+ ARRAY_SIZE(aq_macsec_rxsa_stat_names) *
+ aq_macsec_rx_sa_cnt(nic);
+ }
+#endif
+
+ return n_stats;
+}
+
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
-
- cfg = aq_nic_get_cfg(aq_nic);
- memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
- ARRAY_SIZE(aq_ethtool_queue_stat_names) *
- cfg->vecs) * sizeof(u64));
- aq_nic_get_stats(aq_nic, data);
+ memset(data, 0, aq_ethtool_n_stats(ndev) * sizeof(u64));
+ data = aq_nic_get_stats(aq_nic, data);
+#if IS_ENABLED(CONFIG_MACSEC)
+ data = aq_macsec_get_stats(aq_nic, data);
+#endif
}
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
@@ -123,16 +200,13 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
{
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
u32 firmware_version;
u32 regs_count;
- cfg = aq_nic_get_cfg(aq_nic);
firmware_version = aq_nic_get_fw_version(aq_nic);
regs_count = aq_nic_get_regs_count(aq_nic);
strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
- strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u", firmware_version >> 24,
@@ -140,8 +214,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
- cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = regs_count;
drvinfo->eedump_len = 0;
@@ -154,6 +227,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
+#if IS_ENABLED(CONFIG_MACSEC)
+ int sa;
+#endif
cfg = aq_nic_get_cfg(aq_nic);
@@ -171,6 +247,60 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
p += ETH_GSTRING_LEN;
}
}
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (!aq_nic->macsec_cfg)
+ break;
+
+ memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
+ p = p + sizeof(aq_macsec_stat_names);
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ struct aq_macsec_txsc *aq_txsc;
+
+ if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy)))
+ continue;
+
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_txsc_stat_names[si], i);
+ p += ETH_GSTRING_LEN;
+ }
+ aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i];
+ for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
+ if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
+ continue;
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_txsa_stat_names[si],
+ i, sa);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ struct aq_macsec_rxsc *aq_rxsc;
+
+ if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy)))
+ continue;
+
+ aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i];
+ for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
+ if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
+ continue;
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_rxsa_stat_names[si],
+ i, sa);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+#endif
break;
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
@@ -210,16 +340,11 @@ static int aq_ethtool_set_phys_id(struct net_device *ndev,
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
{
- struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
int ret = 0;
- cfg = aq_nic_get_cfg(aq_nic);
-
switch (stringset) {
case ETH_SS_STATS:
- ret = ARRAY_SIZE(aq_ethtool_stat_names) +
- cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ ret = aq_ethtool_n_stats(ndev);
break;
case ETH_SS_PRIV_FLAGS:
ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
@@ -387,21 +512,10 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
cfg = aq_nic_get_cfg(aq_nic);
- /* This is not yet supported
- */
- if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
- return -EOPNOTSUPP;
-
/* Atlantic only supports timing based coalescing
*/
if (coal->rx_max_coalesced_frames > 1 ||
- coal->rx_coalesce_usecs_irq ||
- coal->rx_max_coalesced_frames_irq)
- return -EOPNOTSUPP;
-
- if (coal->tx_max_coalesced_frames > 1 ||
- coal->tx_coalesce_usecs_irq ||
- coal->tx_max_coalesced_frames_irq)
+ coal->tx_max_coalesced_frames > 1)
return -EOPNOTSUPP;
/* We do not support frame counting. Check this
@@ -743,6 +857,8 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
}
const struct ethtool_ops aq_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 251767c31f7e..7d71bc7dc500 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -343,6 +343,12 @@ struct aq_fw_ops {
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
u32 *supported_rates);
+
+ u32 (*get_link_capabilities)(struct aq_hw_s *self);
+
+ int (*send_macsec_req)(struct aq_hw_s *self,
+ struct macsec_msg_fw_request *msg,
+ struct macsec_msg_fw_response *resp);
};
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
new file mode 100644
index 000000000000..0b3e234a54aa
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "aq_macsec.h"
+#include "aq_nic.h"
+#include <linux/rtnetlink.h>
+
+#include "macsec/macsec_api.h"
+#define AQ_MACSEC_KEY_LEN_128_BIT 16
+#define AQ_MACSEC_KEY_LEN_192_BIT 24
+#define AQ_MACSEC_KEY_LEN_256_BIT 32
+
+enum aq_clear_type {
+ /* update HW configuration */
+ AQ_CLEAR_HW = BIT(0),
+ /* update SW configuration (busy bits, pointers) */
+ AQ_CLEAR_SW = BIT(1),
+ /* update both HW and SW configuration */
+ AQ_CLEAR_ALL = AQ_CLEAR_HW | AQ_CLEAR_SW,
+};
+
+static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
+ enum aq_clear_type clear_type);
+static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
+ const int sa_num, enum aq_clear_type clear_type);
+static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
+ enum aq_clear_type clear_type);
+static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
+ const int sa_num, enum aq_clear_type clear_type);
+static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
+ enum aq_clear_type clear_type);
+static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
+static int aq_apply_secy_cfg(struct aq_nic_s *nic,
+ const struct macsec_secy *secy);
+
+static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac)
+{
+ u32 tmp[2] = { 0 };
+
+ memcpy(((u8 *)tmp) + 2, emac, ETH_ALEN);
+
+ mac[0] = swab32(tmp[1]);
+ mac[1] = swab32(tmp[0]);
+}
+
+/* There's a 1:1 mapping between SecY and TX SC */
+static int aq_get_txsc_idx_from_secy(struct aq_macsec_cfg *macsec_cfg,
+ const struct macsec_secy *secy)
+{
+ int i;
+
+ if (unlikely(!secy))
+ return -1;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (macsec_cfg->aq_txsc[i].sw_secy == secy)
+ return i;
+ }
+ return -1;
+}
+
+static int aq_get_rxsc_idx_from_rxsc(struct aq_macsec_cfg *macsec_cfg,
+ const struct macsec_rx_sc *rxsc)
+{
+ int i;
+
+ if (unlikely(!rxsc))
+ return -1;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (macsec_cfg->aq_rxsc[i].sw_rxsc == rxsc)
+ return i;
+ }
+
+ return -1;
+}
+
+static int aq_get_txsc_idx_from_sc_idx(const enum aq_macsec_sc_sa sc_sa,
+ const int sc_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sc_idx >> 2;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sc_idx >> 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sc_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -1;
+}
+
+/* Rotate keys u32[8] */
+static void aq_rotate_keys(u32 (*key)[8], const int key_len)
+{
+ u32 tmp[8] = { 0 };
+
+ memcpy(&tmp, key, sizeof(tmp));
+ memset(*key, 0, sizeof(*key));
+
+ if (key_len == AQ_MACSEC_KEY_LEN_128_BIT) {
+ (*key)[0] = swab32(tmp[3]);
+ (*key)[1] = swab32(tmp[2]);
+ (*key)[2] = swab32(tmp[1]);
+ (*key)[3] = swab32(tmp[0]);
+ } else if (key_len == AQ_MACSEC_KEY_LEN_192_BIT) {
+ (*key)[0] = swab32(tmp[5]);
+ (*key)[1] = swab32(tmp[4]);
+ (*key)[2] = swab32(tmp[3]);
+ (*key)[3] = swab32(tmp[2]);
+ (*key)[4] = swab32(tmp[1]);
+ (*key)[5] = swab32(tmp[0]);
+ } else if (key_len == AQ_MACSEC_KEY_LEN_256_BIT) {
+ (*key)[0] = swab32(tmp[7]);
+ (*key)[1] = swab32(tmp[6]);
+ (*key)[2] = swab32(tmp[5]);
+ (*key)[3] = swab32(tmp[4]);
+ (*key)[4] = swab32(tmp[3]);
+ (*key)[5] = swab32(tmp[2]);
+ (*key)[6] = swab32(tmp[1]);
+ (*key)[7] = swab32(tmp[0]);
+ } else {
+ pr_warn("Rotate_keys: invalid key_len\n");
+ }
+}
+
+#define STATS_2x32_TO_64(stat_field) \
+ (((u64)stat_field[1] << 32) | stat_field[0])
+
+static int aq_get_macsec_common_stats(struct aq_hw_s *hw,
+ struct aq_macsec_common_stats *stats)
+{
+ struct aq_mss_ingress_common_counters ingress_counters;
+ struct aq_mss_egress_common_counters egress_counters;
+ int ret;
+
+ /* MACSEC counters */
+ ret = aq_mss_get_ingress_common_counters(hw, &ingress_counters);
+ if (unlikely(ret))
+ return ret;
+
+ stats->in.ctl_pkts = STATS_2x32_TO_64(ingress_counters.ctl_pkts);
+ stats->in.tagged_miss_pkts =
+ STATS_2x32_TO_64(ingress_counters.tagged_miss_pkts);
+ stats->in.untagged_miss_pkts =
+ STATS_2x32_TO_64(ingress_counters.untagged_miss_pkts);
+ stats->in.notag_pkts = STATS_2x32_TO_64(ingress_counters.notag_pkts);
+ stats->in.untagged_pkts =
+ STATS_2x32_TO_64(ingress_counters.untagged_pkts);
+ stats->in.bad_tag_pkts =
+ STATS_2x32_TO_64(ingress_counters.bad_tag_pkts);
+ stats->in.no_sci_pkts = STATS_2x32_TO_64(ingress_counters.no_sci_pkts);
+ stats->in.unknown_sci_pkts =
+ STATS_2x32_TO_64(ingress_counters.unknown_sci_pkts);
+ stats->in.ctrl_prt_pass_pkts =
+ STATS_2x32_TO_64(ingress_counters.ctrl_prt_pass_pkts);
+ stats->in.unctrl_prt_pass_pkts =
+ STATS_2x32_TO_64(ingress_counters.unctrl_prt_pass_pkts);
+ stats->in.ctrl_prt_fail_pkts =
+ STATS_2x32_TO_64(ingress_counters.ctrl_prt_fail_pkts);
+ stats->in.unctrl_prt_fail_pkts =
+ STATS_2x32_TO_64(ingress_counters.unctrl_prt_fail_pkts);
+ stats->in.too_long_pkts =
+ STATS_2x32_TO_64(ingress_counters.too_long_pkts);
+ stats->in.igpoc_ctl_pkts =
+ STATS_2x32_TO_64(ingress_counters.igpoc_ctl_pkts);
+ stats->in.ecc_error_pkts =
+ STATS_2x32_TO_64(ingress_counters.ecc_error_pkts);
+ stats->in.unctrl_hit_drop_redir =
+ STATS_2x32_TO_64(ingress_counters.unctrl_hit_drop_redir);
+
+ ret = aq_mss_get_egress_common_counters(hw, &egress_counters);
+ if (unlikely(ret))
+ return ret;
+ stats->out.ctl_pkts = STATS_2x32_TO_64(egress_counters.ctl_pkt);
+ stats->out.unknown_sa_pkts =
+ STATS_2x32_TO_64(egress_counters.unknown_sa_pkts);
+ stats->out.untagged_pkts =
+ STATS_2x32_TO_64(egress_counters.untagged_pkts);
+ stats->out.too_long = STATS_2x32_TO_64(egress_counters.too_long);
+ stats->out.ecc_error_pkts =
+ STATS_2x32_TO_64(egress_counters.ecc_error_pkts);
+ stats->out.unctrl_hit_drop_redir =
+ STATS_2x32_TO_64(egress_counters.unctrl_hit_drop_redir);
+
+ return 0;
+}
+
+static int aq_get_rxsa_stats(struct aq_hw_s *hw, const int sa_idx,
+ struct aq_macsec_rx_sa_stats *stats)
+{
+ struct aq_mss_ingress_sa_counters i_sa_counters;
+ int ret;
+
+ ret = aq_mss_get_ingress_sa_counters(hw, &i_sa_counters, sa_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->untagged_hit_pkts =
+ STATS_2x32_TO_64(i_sa_counters.untagged_hit_pkts);
+ stats->ctrl_hit_drop_redir_pkts =
+ STATS_2x32_TO_64(i_sa_counters.ctrl_hit_drop_redir_pkts);
+ stats->not_using_sa = STATS_2x32_TO_64(i_sa_counters.not_using_sa);
+ stats->unused_sa = STATS_2x32_TO_64(i_sa_counters.unused_sa);
+ stats->not_valid_pkts = STATS_2x32_TO_64(i_sa_counters.not_valid_pkts);
+ stats->invalid_pkts = STATS_2x32_TO_64(i_sa_counters.invalid_pkts);
+ stats->ok_pkts = STATS_2x32_TO_64(i_sa_counters.ok_pkts);
+ stats->late_pkts = STATS_2x32_TO_64(i_sa_counters.late_pkts);
+ stats->delayed_pkts = STATS_2x32_TO_64(i_sa_counters.delayed_pkts);
+ stats->unchecked_pkts = STATS_2x32_TO_64(i_sa_counters.unchecked_pkts);
+ stats->validated_octets =
+ STATS_2x32_TO_64(i_sa_counters.validated_octets);
+ stats->decrypted_octets =
+ STATS_2x32_TO_64(i_sa_counters.decrypted_octets);
+
+ return 0;
+}
+
+static int aq_get_txsa_stats(struct aq_hw_s *hw, const int sa_idx,
+ struct aq_macsec_tx_sa_stats *stats)
+{
+ struct aq_mss_egress_sa_counters e_sa_counters;
+ int ret;
+
+ ret = aq_mss_get_egress_sa_counters(hw, &e_sa_counters, sa_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->sa_hit_drop_redirect =
+ STATS_2x32_TO_64(e_sa_counters.sa_hit_drop_redirect);
+ stats->sa_protected2_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_protected2_pkts);
+ stats->sa_protected_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_protected_pkts);
+ stats->sa_encrypted_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_encrypted_pkts);
+
+ return 0;
+}
+
+static int aq_get_txsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
+{
+ struct aq_mss_egress_sa_record sa_rec;
+ int ret;
+
+ ret = aq_mss_get_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (likely(!ret))
+ *pn = sa_rec.next_pn;
+
+ return ret;
+}
+
+static int aq_get_rxsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
+{
+ struct aq_mss_ingress_sa_record sa_rec;
+ int ret;
+
+ ret = aq_mss_get_ingress_sa_record(hw, &sa_rec, sa_idx);
+ if (likely(!ret))
+ *pn = (!sa_rec.sat_nextpn) ? sa_rec.next_pn : 0;
+
+ return ret;
+}
+
+static int aq_get_txsc_stats(struct aq_hw_s *hw, const int sc_idx,
+ struct aq_macsec_tx_sc_stats *stats)
+{
+ struct aq_mss_egress_sc_counters e_sc_counters;
+ int ret;
+
+ ret = aq_mss_get_egress_sc_counters(hw, &e_sc_counters, sc_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->sc_protected_pkts =
+ STATS_2x32_TO_64(e_sc_counters.sc_protected_pkts);
+ stats->sc_encrypted_pkts =
+ STATS_2x32_TO_64(e_sc_counters.sc_encrypted_pkts);
+ stats->sc_protected_octets =
+ STATS_2x32_TO_64(e_sc_counters.sc_protected_octets);
+ stats->sc_encrypted_octets =
+ STATS_2x32_TO_64(e_sc_counters.sc_encrypted_octets);
+
+ return 0;
+}
+
+static int aq_mdo_dev_open(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int ret = 0;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev))
+ ret = aq_apply_secy_cfg(nic, ctx->secy);
+
+ return ret;
+}
+
+static int aq_mdo_dev_stop(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int i;
+
+ if (ctx->prepare)
+ return 0;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
+ aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
+ AQ_CLEAR_HW);
+ }
+
+ return 0;
+}
+
+static int aq_set_txsc(struct aq_nic_s *nic, const int txsc_idx)
+{
+ struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ struct aq_mss_egress_class_record tx_class_rec = { 0 };
+ const struct macsec_secy *secy = aq_txsc->sw_secy;
+ struct aq_mss_egress_sc_record sc_rec = { 0 };
+ unsigned int sc_idx = aq_txsc->hw_sc_idx;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ aq_ether_addr_to_mac(tx_class_rec.mac_sa, secy->netdev->dev_addr);
+
+ put_unaligned_be64((__force u64)secy->sci, tx_class_rec.sci);
+ tx_class_rec.sci_mask = 0;
+
+ tx_class_rec.sa_mask = 0x3f;
+
+ tx_class_rec.action = 0; /* forward to SA/SC table */
+ tx_class_rec.valid = 1;
+
+ tx_class_rec.sc_idx = sc_idx;
+
+ tx_class_rec.sc_sa = nic->macsec_cfg->sc_sa;
+
+ ret = aq_mss_set_egress_class_record(hw, &tx_class_rec, txsc_idx);
+ if (ret)
+ return ret;
+
+ sc_rec.protect = secy->protect_frames;
+ if (secy->tx_sc.encrypt)
+ sc_rec.tci |= BIT(1);
+ if (secy->tx_sc.scb)
+ sc_rec.tci |= BIT(2);
+ if (secy->tx_sc.send_sci)
+ sc_rec.tci |= BIT(3);
+ if (secy->tx_sc.end_station)
+ sc_rec.tci |= BIT(4);
+ /* The C bit is clear if and only if the Secure Data is
+ * exactly the same as the User Data and the ICV is 16 octets long.
+ */
+ if (!(secy->icv_len == 16 && !secy->tx_sc.encrypt))
+ sc_rec.tci |= BIT(0);
+
+ sc_rec.an_roll = 0;
+
+ switch (secy->key_len) {
+ case AQ_MACSEC_KEY_LEN_128_BIT:
+ sc_rec.sak_len = 0;
+ break;
+ case AQ_MACSEC_KEY_LEN_192_BIT:
+ sc_rec.sak_len = 1;
+ break;
+ case AQ_MACSEC_KEY_LEN_256_BIT:
+ sc_rec.sak_len = 2;
+ break;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ return -EINVAL;
+ }
+
+ sc_rec.curr_an = secy->tx_sc.encoding_sa;
+ sc_rec.valid = 1;
+ sc_rec.fresh = 1;
+
+ return aq_mss_set_egress_sc_record(hw, &sc_rec, sc_idx);
+}
+
+static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa)
+{
+ u32 result = 0;
+
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ result = 8;
+ break;
+ case aq_macsec_sa_sc_2sa_16sc:
+ result = 16;
+ break;
+ case aq_macsec_sa_sc_1sa_32sc:
+ result = 32;
+ break;
+ default:
+ break;
+ };
+
+ return result;
+}
+
+static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sc_idx << 2;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sc_idx << 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sc_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ };
+
+ return sc_idx;
+}
+
+static enum aq_macsec_sc_sa sc_sa_from_num_an(const int num_an)
+{
+ enum aq_macsec_sc_sa sc_sa = aq_macsec_sa_sc_not_used;
+
+ switch (num_an) {
+ case 4:
+ sc_sa = aq_macsec_sa_sc_4sa_8sc;
+ break;
+ case 2:
+ sc_sa = aq_macsec_sa_sc_2sa_16sc;
+ break;
+ case 1:
+ sc_sa = aq_macsec_sa_sc_1sa_32sc;
+ break;
+ default:
+ break;
+ }
+
+ return sc_sa;
+}
+
+static int aq_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ enum aq_macsec_sc_sa sc_sa;
+ u32 txsc_idx;
+ int ret = 0;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ sc_sa = sc_sa_from_num_an(MACSEC_NUM_AN);
+ if (sc_sa == aq_macsec_sa_sc_not_used)
+ return -EINVAL;
+
+ if (hweight32(cfg->txsc_idx_busy) >= aq_sc_idx_max(sc_sa))
+ return -ENOSPC;
+
+ txsc_idx = ffz(cfg->txsc_idx_busy);
+ if (txsc_idx == AQ_MACSEC_MAX_SC)
+ return -ENOSPC;
+
+ if (ctx->prepare)
+ return 0;
+
+ cfg->sc_sa = sc_sa;
+ cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
+ cfg->aq_txsc[txsc_idx].sw_secy = secy;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_set_txsc(nic, txsc_idx);
+
+ set_bit(txsc_idx, &cfg->txsc_idx_busy);
+
+ return 0;
+}
+
+static int aq_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_set_txsc(nic, txsc_idx);
+
+ return ret;
+}
+
+static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
+ enum aq_clear_type clear_type)
+{
+ struct aq_macsec_txsc *tx_sc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ struct aq_mss_egress_class_record tx_class_rec = { 0 };
+ struct aq_mss_egress_sc_record sc_rec = { 0 };
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+ int sa_num;
+
+ for_each_set_bit (sa_num, &tx_sc->tx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
+ ret = aq_clear_txsa(nic, tx_sc, sa_num, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_HW) {
+ ret = aq_mss_set_egress_class_record(hw, &tx_class_rec,
+ txsc_idx);
+ if (ret)
+ return ret;
+
+ sc_rec.fresh = 1;
+ ret = aq_mss_set_egress_sc_record(hw, &sc_rec,
+ tx_sc->hw_sc_idx);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_SW) {
+ clear_bit(txsc_idx, &nic->macsec_cfg->txsc_idx_busy);
+ nic->macsec_cfg->aq_txsc[txsc_idx].sw_secy = NULL;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int ret = 0;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (!nic->macsec_cfg)
+ return 0;
+
+ ret = aq_clear_secy(nic, ctx->secy, AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
+ const struct macsec_secy *secy,
+ const struct macsec_tx_sa *tx_sa,
+ const unsigned char *key, const unsigned char an)
+{
+ const u32 next_pn = tx_sa->next_pn_halves.lower;
+ struct aq_mss_egress_sakey_record key_rec;
+ const unsigned int sa_idx = sc_idx | an;
+ struct aq_mss_egress_sa_record sa_rec;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ memset(&sa_rec, 0, sizeof(sa_rec));
+ sa_rec.valid = tx_sa->active;
+ sa_rec.fresh = 1;
+ sa_rec.next_pn = next_pn;
+
+ ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (ret)
+ return ret;
+
+ if (!key)
+ return ret;
+
+ memset(&key_rec, 0, sizeof(key_rec));
+ memcpy(&key_rec.key, key, secy->key_len);
+
+ aq_rotate_keys(&key_rec.key, secy->key_len);
+
+ ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+
+ return ret;
+}
+
+static int aq_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
+
+ memcpy(aq_txsc->tx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
+ secy->key_len);
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ ctx->sa.tx_sa, ctx->sa.key,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ ctx->sa.tx_sa, NULL, ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
+ const int sa_num, enum aq_clear_type clear_type)
+{
+ const int sa_idx = aq_txsc->hw_sc_idx | sa_num;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ if (clear_type & AQ_CLEAR_SW)
+ clear_bit(sa_num, &aq_txsc->tx_sa_idx_busy);
+
+ if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
+ struct aq_mss_egress_sakey_record key_rec;
+ struct aq_mss_egress_sa_record sa_rec;
+
+ memset(&sa_rec, 0, sizeof(sa_rec));
+ sa_rec.fresh = 1;
+
+ ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (ret)
+ return ret;
+
+ memset(&key_rec, 0, sizeof(key_rec));
+ return aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ }
+
+ return 0;
+}
+
+static int aq_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
+ AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_rxsc_validate_frames(const enum macsec_validation_type validate)
+{
+ switch (validate) {
+ case MACSEC_VALIDATE_DISABLED:
+ return 2;
+ case MACSEC_VALIDATE_CHECK:
+ return 1;
+ case MACSEC_VALIDATE_STRICT:
+ return 0;
+ default:
+ WARN_ONCE(true, "Invalid validation type");
+ }
+
+ return 0;
+}
+
+static int aq_set_rxsc(struct aq_nic_s *nic, const u32 rxsc_idx)
+{
+ const struct aq_macsec_rxsc *aq_rxsc =
+ &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ struct aq_mss_ingress_preclass_record pre_class_record;
+ const struct macsec_rx_sc *rx_sc = aq_rxsc->sw_rxsc;
+ const struct macsec_secy *secy = aq_rxsc->sw_secy;
+ const u32 hw_sc_idx = aq_rxsc->hw_sc_idx;
+ struct aq_mss_ingress_sc_record sc_record;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ memset(&pre_class_record, 0, sizeof(pre_class_record));
+ put_unaligned_be64((__force u64)rx_sc->sci, pre_class_record.sci);
+ pre_class_record.sci_mask = 0xff;
+ /* match all MACSEC ethertype packets */
+ pre_class_record.eth_type = ETH_P_MACSEC;
+ pre_class_record.eth_type_mask = 0x3;
+
+ aq_ether_addr_to_mac(pre_class_record.mac_sa, (char *)&rx_sc->sci);
+ pre_class_record.sa_mask = 0x3f;
+
+ pre_class_record.an_mask = nic->macsec_cfg->sc_sa;
+ pre_class_record.sc_idx = hw_sc_idx;
+ /* strip SecTAG & forward for decryption */
+ pre_class_record.action = 0x0;
+ pre_class_record.valid = 1;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx + 1);
+ if (ret)
+ return ret;
+
+ /* If SCI is absent, then match by SA alone */
+ pre_class_record.sci_mask = 0;
+ pre_class_record.sci_from_table = 1;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx);
+ if (ret)
+ return ret;
+
+ memset(&sc_record, 0, sizeof(sc_record));
+ sc_record.validate_frames =
+ aq_rxsc_validate_frames(secy->validate_frames);
+ if (secy->replay_protect) {
+ sc_record.replay_protect = 1;
+ sc_record.anti_replay_window = secy->replay_window;
+ }
+ sc_record.valid = 1;
+ sc_record.fresh = 1;
+
+ ret = aq_mss_set_ingress_sc_record(hw, &sc_record, hw_sc_idx);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int aq_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const u32 rxsc_idx_max = aq_sc_idx_max(cfg->sc_sa);
+ u32 rxsc_idx;
+ int ret = 0;
+
+ if (hweight32(cfg->rxsc_idx_busy) >= rxsc_idx_max)
+ return -ENOSPC;
+
+ rxsc_idx = ffz(cfg->rxsc_idx_busy);
+ if (rxsc_idx >= rxsc_idx_max)
+ return -ENOSPC;
+
+ if (ctx->prepare)
+ return 0;
+
+ cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
+ cfg->sc_sa);
+ cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
+ cfg->aq_rxsc[rxsc_idx].sw_rxsc = ctx->rx_sc;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
+ ret = aq_set_rxsc(nic, rxsc_idx);
+
+ if (ret < 0)
+ return ret;
+
+ set_bit(rxsc_idx, &cfg->rxsc_idx_busy);
+
+ return 0;
+}
+
+static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
+ ret = aq_set_rxsc(nic, rxsc_idx);
+
+ return ret;
+}
+
+static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
+ enum aq_clear_type clear_type)
+{
+ struct aq_macsec_rxsc *rx_sc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+ int sa_num;
+
+ for_each_set_bit (sa_num, &rx_sc->rx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
+ ret = aq_clear_rxsa(nic, rx_sc, sa_num, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_HW) {
+ struct aq_mss_ingress_preclass_record pre_class_record;
+ struct aq_mss_ingress_sc_record sc_record;
+
+ memset(&pre_class_record, 0, sizeof(pre_class_record));
+ memset(&sc_record, 0, sizeof(sc_record));
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx);
+ if (ret)
+ return ret;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx + 1);
+ if (ret)
+ return ret;
+
+ sc_record.fresh = 1;
+ ret = aq_mss_set_ingress_sc_record(hw, &sc_record,
+ rx_sc->hw_sc_idx);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_SW) {
+ clear_bit(rxsc_idx, &nic->macsec_cfg->rxsc_idx_busy);
+ rx_sc->sw_secy = NULL;
+ rx_sc->sw_rxsc = NULL;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ enum aq_clear_type clear_type = AQ_CLEAR_SW;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev))
+ clear_type = AQ_CLEAR_ALL;
+
+ ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
+
+ return ret;
+}
+
+static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
+ const struct macsec_secy *secy,
+ const struct macsec_rx_sa *rx_sa,
+ const unsigned char *key, const unsigned char an)
+{
+ struct aq_mss_ingress_sakey_record sa_key_record;
+ const u32 next_pn = rx_sa->next_pn_halves.lower;
+ struct aq_mss_ingress_sa_record sa_record;
+ struct aq_hw_s *hw = nic->aq_hw;
+ const int sa_idx = sc_idx | an;
+ int ret = 0;
+
+ memset(&sa_record, 0, sizeof(sa_record));
+ sa_record.valid = rx_sa->active;
+ sa_record.fresh = 1;
+ sa_record.next_pn = next_pn;
+
+ ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
+ if (ret)
+ return ret;
+
+ if (!key)
+ return ret;
+
+ memset(&sa_key_record, 0, sizeof(sa_key_record));
+ memcpy(&sa_key_record.key, key, secy->key_len);
+
+ switch (secy->key_len) {
+ case AQ_MACSEC_KEY_LEN_128_BIT:
+ sa_key_record.key_len = 0;
+ break;
+ case AQ_MACSEC_KEY_LEN_192_BIT:
+ sa_key_record.key_len = 1;
+ break;
+ case AQ_MACSEC_KEY_LEN_256_BIT:
+ sa_key_record.key_len = 2;
+ break;
+ default:
+ return -1;
+ }
+
+ aq_rotate_keys(&sa_key_record.key, secy->key_len);
+
+ ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+
+ return ret;
+}
+
+static int aq_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_rxsc *aq_rxsc;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
+
+ memcpy(aq_rxsc->rx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
+ secy->key_len);
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
+ ctx->sa.rx_sa, ctx->sa.key,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
+ secy, ctx->sa.rx_sa, NULL,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
+ const int sa_num, enum aq_clear_type clear_type)
+{
+ int sa_idx = aq_rxsc->hw_sc_idx | sa_num;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ if (clear_type & AQ_CLEAR_SW)
+ clear_bit(sa_num, &aq_rxsc->rx_sa_idx_busy);
+
+ if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
+ struct aq_mss_ingress_sakey_record sa_key_record;
+ struct aq_mss_ingress_sa_record sa_record;
+
+ memset(&sa_key_record, 0, sizeof(sa_key_record));
+ memset(&sa_record, 0, sizeof(sa_record));
+ sa_record.fresh = 1;
+ ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
+ if (ret)
+ return ret;
+
+ return aq_mss_set_ingress_sakey_record(hw, &sa_key_record,
+ sa_idx);
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
+ AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_get_macsec_common_stats(hw, stats);
+
+ ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
+ ctx->stats.dev_stats->InPktsUntagged = stats->in.untagged_pkts;
+ ctx->stats.dev_stats->OutPktsTooLong = stats->out.too_long;
+ ctx->stats.dev_stats->InPktsNoTag = stats->in.notag_pkts;
+ ctx->stats.dev_stats->InPktsBadTag = stats->in.bad_tag_pkts;
+ ctx->stats.dev_stats->InPktsUnknownSCI = stats->in.unknown_sci_pkts;
+ ctx->stats.dev_stats->InPktsNoSCI = stats->in.no_sci_pkts;
+ ctx->stats.dev_stats->InPktsOverrun = 0;
+
+ return 0;
+}
+
+static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_tx_sc_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ stats = &aq_txsc->stats;
+ aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = stats->sc_protected_pkts;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = stats->sc_encrypted_pkts;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = stats->sc_protected_octets;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = stats->sc_encrypted_octets;
+
+ return 0;
+}
+
+static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_tx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ const struct macsec_secy *secy;
+ struct aq_macsec_txsc *aq_txsc;
+ struct macsec_tx_sa *tx_sa;
+ unsigned int sa_idx;
+ int txsc_idx;
+ u32 next_pn;
+ int ret;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
+ stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
+ ret = aq_get_txsa_stats(hw, sa_idx, stats);
+ if (ret)
+ return ret;
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = stats->sa_protected_pkts;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = stats->sa_encrypted_pkts;
+
+ secy = aq_txsc->sw_secy;
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[ctx->sa.assoc_num]);
+ ret = aq_get_txsa_next_pn(hw, sa_idx, &next_pn);
+ if (ret == 0) {
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = next_pn;
+ spin_unlock_bh(&tx_sa->lock);
+ }
+
+ return ret;
+}
+
+static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_rx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_rxsc *aq_rxsc;
+ unsigned int sa_idx;
+ int rxsc_idx;
+ int ret = 0;
+ int i;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+
+ stats = &aq_rxsc->rx_sa_stats[i];
+ sa_idx = aq_rxsc->hw_sc_idx | i;
+ ret = aq_get_rxsa_stats(hw, sa_idx, stats);
+ if (ret)
+ break;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated +=
+ stats->validated_octets;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted +=
+ stats->decrypted_octets;
+ ctx->stats.rx_sc_stats->InPktsUnchecked +=
+ stats->unchecked_pkts;
+ ctx->stats.rx_sc_stats->InPktsDelayed += stats->delayed_pkts;
+ ctx->stats.rx_sc_stats->InPktsOK += stats->ok_pkts;
+ ctx->stats.rx_sc_stats->InPktsInvalid += stats->invalid_pkts;
+ ctx->stats.rx_sc_stats->InPktsLate += stats->late_pkts;
+ ctx->stats.rx_sc_stats->InPktsNotValid += stats->not_valid_pkts;
+ ctx->stats.rx_sc_stats->InPktsNotUsingSA += stats->not_using_sa;
+ ctx->stats.rx_sc_stats->InPktsUnusedSA += stats->unused_sa;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_rx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_rxsc *aq_rxsc;
+ struct macsec_rx_sa *rx_sa;
+ unsigned int sa_idx;
+ int rxsc_idx;
+ u32 next_pn;
+ int ret;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
+ stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
+ sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
+ ret = aq_get_rxsa_stats(hw, sa_idx, stats);
+ if (ret)
+ return ret;
+
+ ctx->stats.rx_sa_stats->InPktsOK = stats->ok_pkts;
+ ctx->stats.rx_sa_stats->InPktsInvalid = stats->invalid_pkts;
+ ctx->stats.rx_sa_stats->InPktsNotValid = stats->not_valid_pkts;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = stats->not_using_sa;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = stats->unused_sa;
+
+ rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[ctx->sa.assoc_num]);
+ ret = aq_get_rxsa_next_pn(hw, sa_idx, &next_pn);
+ if (ret == 0) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = next_pn;
+ spin_unlock_bh(&rx_sa->lock);
+ }
+
+ return ret;
+}
+
+static int apply_txsc_cfg(struct aq_nic_s *nic, const int txsc_idx)
+{
+ struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ const struct macsec_secy *secy = aq_txsc->sw_secy;
+ struct macsec_tx_sa *tx_sa;
+ int ret = 0;
+ int i;
+
+ if (!netif_running(secy->netdev))
+ return ret;
+
+ ret = aq_set_txsc(nic, txsc_idx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[i]);
+ if (tx_sa) {
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ tx_sa, aq_txsc->tx_sa_key[i], i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int apply_rxsc_cfg(struct aq_nic_s *nic, const int rxsc_idx)
+{
+ struct aq_macsec_rxsc *aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ const struct macsec_secy *secy = aq_rxsc->sw_secy;
+ struct macsec_rx_sa *rx_sa;
+ int ret = 0;
+ int i;
+
+ if (!netif_running(secy->netdev))
+ return ret;
+
+ ret = aq_set_rxsc(nic, rxsc_idx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[i]);
+ if (rx_sa) {
+ ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
+ rx_sa, aq_rxsc->rx_sa_key[i], i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
+ enum aq_clear_type clear_type)
+{
+ struct macsec_rx_sc *rx_sc;
+ int txsc_idx;
+ int rxsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx >= 0) {
+ ret = aq_clear_txsc(nic, txsc_idx, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc;
+ rx_sc = rcu_dereference_bh(rx_sc->next)) {
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (rxsc_idx < 0)
+ continue;
+
+ ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int aq_apply_secy_cfg(struct aq_nic_s *nic,
+ const struct macsec_secy *secy)
+{
+ struct macsec_rx_sc *rx_sc;
+ int txsc_idx;
+ int rxsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx >= 0)
+ apply_txsc_cfg(nic, txsc_idx);
+
+ for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc && rx_sc->active;
+ rx_sc = rcu_dereference_bh(rx_sc->next)) {
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (unlikely(rxsc_idx < 0))
+ continue;
+
+ ret = apply_rxsc_cfg(nic, rxsc_idx);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int aq_apply_macsec_cfg(struct aq_nic_s *nic)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->txsc_idx_busy & BIT(i)) {
+ ret = apply_txsc_cfg(nic, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->rxsc_idx_busy & BIT(i)) {
+ ret = apply_rxsc_cfg(nic, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int aq_sa_from_sa_idx(const enum aq_macsec_sc_sa sc_sa, const int sa_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sa_idx & 3;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sa_idx & 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return 0;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -EINVAL;
+}
+
+static int aq_sc_idx_from_sa_idx(const enum aq_macsec_sc_sa sc_sa,
+ const int sa_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sa_idx & ~3;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sa_idx & ~1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sa_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -EINVAL;
+}
+
+static void aq_check_txsa_expiration(struct aq_nic_s *nic)
+{
+ u32 egress_sa_expired, egress_sa_threshold_expired;
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ const struct macsec_secy *secy;
+ int sc_idx = 0, txsc_idx = 0;
+ enum aq_macsec_sc_sa sc_sa;
+ struct macsec_tx_sa *tx_sa;
+ unsigned char an = 0;
+ int ret;
+ int i;
+
+ sc_sa = cfg->sc_sa;
+
+ ret = aq_mss_get_egress_sa_expired(hw, &egress_sa_expired);
+ if (unlikely(ret))
+ return;
+
+ ret = aq_mss_get_egress_sa_threshold_expired(hw,
+ &egress_sa_threshold_expired);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SA; i++) {
+ if (egress_sa_expired & BIT(i)) {
+ an = aq_sa_from_sa_idx(sc_sa, i);
+ sc_idx = aq_sc_idx_from_sa_idx(sc_sa, i);
+ txsc_idx = aq_get_txsc_idx_from_sc_idx(sc_sa, sc_idx);
+ if (txsc_idx < 0)
+ continue;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ if (!(cfg->txsc_idx_busy & BIT(txsc_idx))) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on invalid TX SC");
+ continue;
+ }
+
+ secy = aq_txsc->sw_secy;
+ if (!netif_running(secy->netdev)) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on down TX SC");
+ continue;
+ }
+
+ if (unlikely(!(aq_txsc->tx_sa_idx_busy & BIT(an)))) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on invalid TX SA");
+ continue;
+ }
+
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ macsec_pn_wrapped((struct macsec_secy *)secy, tx_sa);
+ }
+ }
+
+ aq_mss_set_egress_sa_expired(hw, egress_sa_expired);
+ if (likely(!ret))
+ aq_mss_set_egress_sa_threshold_expired(hw,
+ egress_sa_threshold_expired);
+}
+
+const struct macsec_ops aq_macsec_ops = {
+ .mdo_dev_open = aq_mdo_dev_open,
+ .mdo_dev_stop = aq_mdo_dev_stop,
+ .mdo_add_secy = aq_mdo_add_secy,
+ .mdo_upd_secy = aq_mdo_upd_secy,
+ .mdo_del_secy = aq_mdo_del_secy,
+ .mdo_add_rxsc = aq_mdo_add_rxsc,
+ .mdo_upd_rxsc = aq_mdo_upd_rxsc,
+ .mdo_del_rxsc = aq_mdo_del_rxsc,
+ .mdo_add_rxsa = aq_mdo_add_rxsa,
+ .mdo_upd_rxsa = aq_mdo_upd_rxsa,
+ .mdo_del_rxsa = aq_mdo_del_rxsa,
+ .mdo_add_txsa = aq_mdo_add_txsa,
+ .mdo_upd_txsa = aq_mdo_upd_txsa,
+ .mdo_del_txsa = aq_mdo_del_txsa,
+ .mdo_get_dev_stats = aq_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
+};
+
+int aq_macsec_init(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg;
+ u32 caps_lo;
+
+ if (!nic->aq_fw_ops->get_link_capabilities)
+ return 0;
+
+ caps_lo = nic->aq_fw_ops->get_link_capabilities(nic->aq_hw);
+
+ if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
+ return 0;
+
+ nic->macsec_cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!nic->macsec_cfg)
+ return -ENOMEM;
+
+ nic->ndev->features |= NETIF_F_HW_MACSEC;
+ nic->ndev->macsec_ops = &aq_macsec_ops;
+
+ return 0;
+}
+
+void aq_macsec_free(struct aq_nic_s *nic)
+{
+ kfree(nic->macsec_cfg);
+ nic->macsec_cfg = NULL;
+}
+
+int aq_macsec_enable(struct aq_nic_s *nic)
+{
+ u32 ctl_ether_types[1] = { ETH_P_PAE };
+ struct macsec_msg_fw_response resp = { 0 };
+ struct macsec_msg_fw_request msg = { 0 };
+ struct aq_hw_s *hw = nic->aq_hw;
+ int num_ctl_ether_types = 0;
+ int index = 0, tbl_idx;
+ int ret;
+
+ if (!nic->macsec_cfg)
+ return 0;
+
+ rtnl_lock();
+
+ if (nic->aq_fw_ops->send_macsec_req) {
+ struct macsec_cfg_request cfg = { 0 };
+
+ cfg.enabled = 1;
+ cfg.egress_threshold = 0xffffffff;
+ cfg.ingress_threshold = 0xffffffff;
+ cfg.interrupts_enabled = 1;
+
+ msg.msg_type = macsec_cfg_msg;
+ msg.cfg = cfg;
+
+ ret = nic->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
+ if (ret)
+ goto unlock;
+ }
+
+ /* Init Ethertype bypass filters */
+ for (index = 0; index < ARRAY_SIZE(ctl_ether_types); index++) {
+ struct aq_mss_ingress_prectlf_record rx_prectlf_rec;
+ struct aq_mss_egress_ctlf_record tx_ctlf_rec;
+
+ if (ctl_ether_types[index] == 0)
+ continue;
+
+ memset(&tx_ctlf_rec, 0, sizeof(tx_ctlf_rec));
+ tx_ctlf_rec.eth_type = ctl_ether_types[index];
+ tx_ctlf_rec.match_type = 4; /* Match eth_type only */
+ tx_ctlf_rec.match_mask = 0xf; /* match for eth_type */
+ tx_ctlf_rec.action = 0; /* Bypass MACSEC modules */
+ tbl_idx = NUMROWS_EGRESSCTLFRECORD - num_ctl_ether_types - 1;
+ aq_mss_set_egress_ctlf_record(hw, &tx_ctlf_rec, tbl_idx);
+
+ memset(&rx_prectlf_rec, 0, sizeof(rx_prectlf_rec));
+ rx_prectlf_rec.eth_type = ctl_ether_types[index];
+ rx_prectlf_rec.match_type = 4; /* Match eth_type only */
+ rx_prectlf_rec.match_mask = 0xf; /* match for eth_type */
+ rx_prectlf_rec.action = 0; /* Bypass MACSEC modules */
+ tbl_idx =
+ NUMROWS_INGRESSPRECTLFRECORD - num_ctl_ether_types - 1;
+ aq_mss_set_ingress_prectlf_record(hw, &rx_prectlf_rec, tbl_idx);
+
+ num_ctl_ether_types++;
+ }
+
+ ret = aq_apply_macsec_cfg(nic);
+
+unlock:
+ rtnl_unlock();
+ return ret;
+}
+
+void aq_macsec_work(struct aq_nic_s *nic)
+{
+ if (!nic->macsec_cfg)
+ return;
+
+ if (!netif_carrier_ok(nic->ndev))
+ return;
+
+ rtnl_lock();
+ aq_check_txsa_expiration(nic);
+ rtnl_unlock();
+}
+
+int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int i, cnt = 0;
+
+ if (!cfg)
+ return 0;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->rxsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
+ }
+
+ return cnt;
+}
+
+int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
+{
+ if (!nic->macsec_cfg)
+ return 0;
+
+ return hweight_long(nic->macsec_cfg->txsc_idx_busy);
+}
+
+int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int i, cnt = 0;
+
+ if (!cfg)
+ return 0;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->txsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
+ }
+
+ return cnt;
+}
+
+static int aq_macsec_update_stats(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ struct aq_macsec_rxsc *aq_rxsc;
+ int i, sa_idx, assoc_num;
+ int ret = 0;
+
+ aq_get_macsec_common_stats(hw, &cfg->stats);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!(cfg->txsc_idx_busy & BIT(i)))
+ continue;
+ aq_txsc = &cfg->aq_txsc[i];
+
+ ret = aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx,
+ &aq_txsc->stats);
+ if (ret)
+ return ret;
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
+ continue;
+ sa_idx = aq_txsc->hw_sc_idx | assoc_num;
+ ret = aq_get_txsa_stats(hw, sa_idx,
+ &aq_txsc->tx_sa_stats[assoc_num]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!(test_bit(i, &cfg->rxsc_idx_busy)))
+ continue;
+ aq_rxsc = &cfg->aq_rxsc[i];
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+ sa_idx = aq_rxsc->hw_sc_idx | assoc_num;
+
+ ret = aq_get_rxsa_stats(hw, sa_idx,
+ &aq_rxsc->rx_sa_stats[assoc_num]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_common_stats *common_stats;
+ struct aq_macsec_tx_sc_stats *txsc_stats;
+ struct aq_macsec_tx_sa_stats *txsa_stats;
+ struct aq_macsec_rx_sa_stats *rxsa_stats;
+ struct aq_macsec_txsc *aq_txsc;
+ struct aq_macsec_rxsc *aq_rxsc;
+ unsigned int assoc_num;
+ unsigned int sc_num;
+ unsigned int i = 0U;
+
+ if (!cfg)
+ return data;
+
+ aq_macsec_update_stats(nic);
+
+ common_stats = &cfg->stats;
+ data[i] = common_stats->in.ctl_pkts;
+ data[++i] = common_stats->in.tagged_miss_pkts;
+ data[++i] = common_stats->in.untagged_miss_pkts;
+ data[++i] = common_stats->in.notag_pkts;
+ data[++i] = common_stats->in.untagged_pkts;
+ data[++i] = common_stats->in.bad_tag_pkts;
+ data[++i] = common_stats->in.no_sci_pkts;
+ data[++i] = common_stats->in.unknown_sci_pkts;
+ data[++i] = common_stats->in.ctrl_prt_pass_pkts;
+ data[++i] = common_stats->in.unctrl_prt_pass_pkts;
+ data[++i] = common_stats->in.ctrl_prt_fail_pkts;
+ data[++i] = common_stats->in.unctrl_prt_fail_pkts;
+ data[++i] = common_stats->in.too_long_pkts;
+ data[++i] = common_stats->in.igpoc_ctl_pkts;
+ data[++i] = common_stats->in.ecc_error_pkts;
+ data[++i] = common_stats->in.unctrl_hit_drop_redir;
+ data[++i] = common_stats->out.ctl_pkts;
+ data[++i] = common_stats->out.unknown_sa_pkts;
+ data[++i] = common_stats->out.untagged_pkts;
+ data[++i] = common_stats->out.too_long;
+ data[++i] = common_stats->out.ecc_error_pkts;
+ data[++i] = common_stats->out.unctrl_hit_drop_redir;
+
+ for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
+ if (!(test_bit(sc_num, &cfg->txsc_idx_busy)))
+ continue;
+
+ aq_txsc = &cfg->aq_txsc[sc_num];
+ txsc_stats = &aq_txsc->stats;
+
+ data[++i] = txsc_stats->sc_protected_pkts;
+ data[++i] = txsc_stats->sc_encrypted_pkts;
+ data[++i] = txsc_stats->sc_protected_octets;
+ data[++i] = txsc_stats->sc_encrypted_octets;
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
+ continue;
+
+ txsa_stats = &aq_txsc->tx_sa_stats[assoc_num];
+
+ data[++i] = txsa_stats->sa_hit_drop_redirect;
+ data[++i] = txsa_stats->sa_protected2_pkts;
+ data[++i] = txsa_stats->sa_protected_pkts;
+ data[++i] = txsa_stats->sa_encrypted_pkts;
+ }
+ }
+
+ for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
+ if (!(test_bit(sc_num, &cfg->rxsc_idx_busy)))
+ continue;
+
+ aq_rxsc = &cfg->aq_rxsc[sc_num];
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+
+ rxsa_stats = &aq_rxsc->rx_sa_stats[assoc_num];
+
+ data[++i] = rxsa_stats->untagged_hit_pkts;
+ data[++i] = rxsa_stats->ctrl_hit_drop_redir_pkts;
+ data[++i] = rxsa_stats->not_using_sa;
+ data[++i] = rxsa_stats->unused_sa;
+ data[++i] = rxsa_stats->not_valid_pkts;
+ data[++i] = rxsa_stats->invalid_pkts;
+ data[++i] = rxsa_stats->ok_pkts;
+ data[++i] = rxsa_stats->late_pkts;
+ data[++i] = rxsa_stats->delayed_pkts;
+ data[++i] = rxsa_stats->unchecked_pkts;
+ data[++i] = rxsa_stats->validated_octets;
+ data[++i] = rxsa_stats->decrypted_octets;
+ }
+ }
+
+ i++;
+
+ data += i;
+
+ return data;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
new file mode 100644
index 000000000000..f5fba8b8cdea
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef AQ_MACSEC_H
+#define AQ_MACSEC_H
+
+#include <linux/netdevice.h>
+#if IS_ENABLED(CONFIG_MACSEC)
+
+#include "net/macsec.h"
+
+struct aq_nic_s;
+
+#define AQ_MACSEC_MAX_SC 32
+#define AQ_MACSEC_MAX_SA 32
+
+enum aq_macsec_sc_sa {
+ aq_macsec_sa_sc_4sa_8sc,
+ aq_macsec_sa_sc_not_used,
+ aq_macsec_sa_sc_2sa_16sc,
+ aq_macsec_sa_sc_1sa_32sc,
+};
+
+struct aq_macsec_common_stats {
+ /* Ingress Common Counters */
+ struct {
+ u64 ctl_pkts;
+ u64 tagged_miss_pkts;
+ u64 untagged_miss_pkts;
+ u64 notag_pkts;
+ u64 untagged_pkts;
+ u64 bad_tag_pkts;
+ u64 no_sci_pkts;
+ u64 unknown_sci_pkts;
+ u64 ctrl_prt_pass_pkts;
+ u64 unctrl_prt_pass_pkts;
+ u64 ctrl_prt_fail_pkts;
+ u64 unctrl_prt_fail_pkts;
+ u64 too_long_pkts;
+ u64 igpoc_ctl_pkts;
+ u64 ecc_error_pkts;
+ u64 unctrl_hit_drop_redir;
+ } in;
+
+ /* Egress Common Counters */
+ struct {
+ u64 ctl_pkts;
+ u64 unknown_sa_pkts;
+ u64 untagged_pkts;
+ u64 too_long;
+ u64 ecc_error_pkts;
+ u64 unctrl_hit_drop_redir;
+ } out;
+};
+
+/* Ingress SA Counters */
+struct aq_macsec_rx_sa_stats {
+ u64 untagged_hit_pkts;
+ u64 ctrl_hit_drop_redir_pkts;
+ u64 not_using_sa;
+ u64 unused_sa;
+ u64 not_valid_pkts;
+ u64 invalid_pkts;
+ u64 ok_pkts;
+ u64 late_pkts;
+ u64 delayed_pkts;
+ u64 unchecked_pkts;
+ u64 validated_octets;
+ u64 decrypted_octets;
+};
+
+/* Egress SA Counters */
+struct aq_macsec_tx_sa_stats {
+ u64 sa_hit_drop_redirect;
+ u64 sa_protected2_pkts;
+ u64 sa_protected_pkts;
+ u64 sa_encrypted_pkts;
+};
+
+/* Egress SC Counters */
+struct aq_macsec_tx_sc_stats {
+ u64 sc_protected_pkts;
+ u64 sc_encrypted_pkts;
+ u64 sc_protected_octets;
+ u64 sc_encrypted_octets;
+};
+
+struct aq_macsec_txsc {
+ u32 hw_sc_idx;
+ unsigned long tx_sa_idx_busy;
+ const struct macsec_secy *sw_secy;
+ u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
+ struct aq_macsec_tx_sc_stats stats;
+ struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
+};
+
+struct aq_macsec_rxsc {
+ u32 hw_sc_idx;
+ unsigned long rx_sa_idx_busy;
+ const struct macsec_secy *sw_secy;
+ const struct macsec_rx_sc *sw_rxsc;
+ u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
+ struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
+};
+
+struct aq_macsec_cfg {
+ enum aq_macsec_sc_sa sc_sa;
+ /* Egress channel configuration */
+ unsigned long txsc_idx_busy;
+ struct aq_macsec_txsc aq_txsc[AQ_MACSEC_MAX_SC];
+ /* Ingress channel configuration */
+ unsigned long rxsc_idx_busy;
+ struct aq_macsec_rxsc aq_rxsc[AQ_MACSEC_MAX_SC];
+ /* Statistics / counters */
+ struct aq_macsec_common_stats stats;
+};
+
+extern const struct macsec_ops aq_macsec_ops;
+
+int aq_macsec_init(struct aq_nic_s *nic);
+void aq_macsec_free(struct aq_nic_s *nic);
+int aq_macsec_enable(struct aq_nic_s *nic);
+void aq_macsec_work(struct aq_nic_s *nic);
+u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data);
+int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic);
+int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic);
+int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic);
+
+#endif
+
+#endif /* AQ_MACSEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 538f460a3da7..9fcab646cbd5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -19,7 +19,6 @@
#include <linux/udp.h>
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e95f6a6bef73..a369705a786a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -11,6 +11,7 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
+#include "aq_macsec.h"
#include "aq_main.h"
#include "aq_phy.h"
#include "aq_ptp.h"
@@ -176,6 +177,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_enable(self);
+#endif
netif_tx_wake_all_queues(self->ndev);
}
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
@@ -217,6 +221,10 @@ static void aq_nic_service_task(struct work_struct *work)
if (err)
return;
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_work(self);
+#endif
+
mutex_lock(&self->fwreq_mutex);
if (self->aq_fw_ops->update_stats)
self->aq_fw_ops->update_stats(self->aq_hw);
@@ -262,6 +270,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_init(self);
+#endif
+
mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
@@ -296,6 +308,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
err_exit:
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (err)
+ aq_macsec_free(self);
+#endif
return err;
}
@@ -765,7 +781,7 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
-void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
+u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
@@ -815,7 +831,10 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
aq_vec_get_sw_stats(aq_vec, data, &count);
}
+ data += count;
+
err_exit:;
+ return data;
}
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index a752f8bb4b08..0663b8d0220d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -17,6 +17,7 @@ struct aq_ring_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
+struct aq_macsec_cfg;
struct aq_ptp_s;
enum aq_rx_filter_type;
@@ -129,6 +130,9 @@ struct aq_nic_s {
u32 irqvecs;
/* mutex to serialize FW interface access operations */
struct mutex fwreq_mutex;
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct aq_macsec_cfg *macsec_cfg;
+#endif
/* PTP support */
struct aq_ptp_s *aq_ptp;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
@@ -154,7 +158,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
-void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
+u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 78b6f3248756..8a70ffe1d326 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -18,6 +18,7 @@
#include "hw_atl/hw_atl_b0.h"
#include "aq_filters.h"
#include "aq_drvinfo.h"
+#include "aq_macsec.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -56,7 +57,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
- { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
{ AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
{ AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
@@ -324,6 +325,10 @@ static void aq_pci_remove(struct pci_dev *pdev)
aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_free(self);
+#endif
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 42f0c5c6ec2d..b15513914636 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -225,7 +225,7 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[0];
+ u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
@@ -319,6 +319,32 @@ struct __packed hw_atl_utils_settings {
u32 media_detect;
};
+enum macsec_msg_type {
+ macsec_cfg_msg = 0,
+ macsec_add_rx_sc_msg,
+ macsec_add_tx_sc_msg,
+ macsec_add_rx_sa_msg,
+ macsec_add_tx_sa_msg,
+ macsec_get_stats_msg,
+};
+
+struct __packed macsec_cfg_request {
+ u32 enabled;
+ u32 egress_threshold;
+ u32 ingress_threshold;
+ u32 interrupts_enabled;
+};
+
+struct __packed macsec_msg_fw_request {
+ u32 msg_id; /* not used */
+ u32 msg_type;
+ struct macsec_cfg_request cfg;
+};
+
+struct __packed macsec_msg_fw_response {
+ u32 result;
+};
+
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
@@ -437,34 +463,43 @@ enum hw_atl_fw2x_caps_lo {
CAPS_LO_2P5GBASET_FD,
CAPS_LO_5GBASET_FD = 10,
CAPS_LO_10GBASET_FD,
+ CAPS_LO_AUTONEG,
+ CAPS_LO_SMBUS_READ,
+ CAPS_LO_SMBUS_WRITE,
+ CAPS_LO_MACSEC = 15,
+ CAPS_LO_RESERVED1,
+ CAPS_LO_WAKE_ON_LINK_FORCED,
+ CAPS_LO_HIGH_TEMP_WARNING = 29,
+ CAPS_LO_DRIVER_SCRATCHPAD = 30,
+ CAPS_LO_GLOBAL_FAULT = 31
};
/* 0x374
* Status register
*/
enum hw_atl_fw2x_caps_hi {
- CAPS_HI_RESERVED1 = 0,
+ CAPS_HI_TPO2EN = 0,
CAPS_HI_10BASET_EEE,
CAPS_HI_RESERVED2,
CAPS_HI_PAUSE,
CAPS_HI_ASYMMETRIC_PAUSE,
CAPS_HI_100BASETX_EEE = 5,
- CAPS_HI_RESERVED3,
- CAPS_HI_RESERVED4,
+ CAPS_HI_PHY_BUF_SEND,
+ CAPS_HI_PHY_BUF_RECV,
CAPS_HI_1000BASET_FD_EEE,
CAPS_HI_2P5GBASET_FD_EEE,
CAPS_HI_5GBASET_FD_EEE = 10,
CAPS_HI_10GBASET_FD_EEE,
CAPS_HI_FW_REQUEST,
- CAPS_HI_RESERVED6,
- CAPS_HI_RESERVED7,
- CAPS_HI_RESERVED8 = 15,
- CAPS_HI_RESERVED9,
+ CAPS_HI_PHY_LOG,
+ CAPS_HI_EEE_AUTO_DISABLE_SETTINGS,
+ CAPS_HI_PFC = 15,
+ CAPS_HI_WAKE_ON_LINK,
CAPS_HI_CABLE_DIAG,
CAPS_HI_TEMPERATURE,
CAPS_HI_DOWNSHIFT,
CAPS_HI_PTP_AVB_EN_FW2X = 20,
- CAPS_HI_MEDIA_DETECT,
+ CAPS_HI_THERMAL_SHUTDOWN,
CAPS_HI_LINK_DROP,
CAPS_HI_SLEEP_PROXY,
CAPS_HI_WOL,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 77a4ed64830f..1ad10cc14918 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -55,6 +55,8 @@
#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE)
#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_MACSEC BIT(CAPS_LO_MACSEC)
+
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
@@ -86,6 +88,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
+static u32 aq_fw2x_state_get(struct aq_hw_s *self);
static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
static int aq_fw2x_init(struct aq_hw_s *self)
@@ -619,11 +622,75 @@ static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
return err;
}
+static u32 aq_fw2x_state_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+}
+
static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
}
+static u32 aq_fw2x_get_link_capabilities(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 offset;
+ u32 val;
+
+ offset = self->mbox_addr +
+ offsetof(struct hw_atl_utils_mbox, info.caps_lo);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, &val, 1);
+
+ if (err)
+ return 0;
+
+ return val;
+}
+
+static int aq_fw2x_send_macsec_req(struct aq_hw_s *hw,
+ struct macsec_msg_fw_request *req,
+ struct macsec_msg_fw_response *response)
+{
+ u32 low_status, low_req = 0;
+ u32 dword_cnt;
+ u32 caps_lo;
+ u32 offset;
+ int err;
+
+ if (!req || !response)
+ return -EINVAL;
+
+ caps_lo = aq_fw2x_get_link_capabilities(hw);
+ if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
+ return -EOPNOTSUPP;
+
+ /* Write macsec request to cfg memory */
+ dword_cnt = (sizeof(*req) + sizeof(u32) - 1) / sizeof(u32);
+ err = hw_atl_write_fwcfg_dwords(hw, (void *)req, dword_cnt);
+ if (err < 0)
+ return err;
+
+ /* Toggle 0x368.CAPS_LO_MACSEC bit */
+ low_req = aq_hw_read_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR);
+ low_req ^= HW_ATL_FW2X_CAP_MACSEC;
+ aq_hw_write_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR, low_req);
+
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state_get, hw, low_status,
+ low_req != (low_status & BIT(CAPS_LO_MACSEC)), 1U, 10000U);
+ if (err)
+ return -EIO;
+
+ /* Read status of write operation */
+ offset = hw->rpc_addr + sizeof(u32);
+ err = hw_atl_utils_fw_downld_dwords(hw, offset, (u32 *)(void *)response,
+ sizeof(*response) / sizeof(u32));
+
+ return err;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
@@ -645,4 +712,6 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.led_control = aq_fw2x_led_control,
.set_phyloopback = aq_fw2x_set_phyloopback,
.adjust_ptp = aq_fw3x_adjust_ptp,
+ .get_link_capabilities = aq_fw2x_get_link_capabilities,
+ .send_macsec_req = aq_fw2x_send_macsec_req,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h
new file mode 100644
index 000000000000..71d08ea80b54
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef MSS_EGRESS_REGS_HEADER
+#define MSS_EGRESS_REGS_HEADER
+
+#define MSS_EGRESS_CTL_REGISTER_ADDR 0x00005002
+#define MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR 0x00005060
+#define MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR 0x00005062
+#define MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00005080
+#define MSS_EGRESS_LUT_CTL_REGISTER_ADDR 0x00005081
+#define MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000050A0
+
+struct mss_egress_ctl_register {
+ union {
+ struct {
+ unsigned int soft_reset : 1;
+ unsigned int drop_kay_packet : 1;
+ unsigned int drop_egprc_lut_miss : 1;
+ unsigned int gcm_start : 1;
+ unsigned int gcm_test_mode : 1;
+ unsigned int unmatched_use_sc_0 : 1;
+ unsigned int drop_invalid_sa_sc_packets : 1;
+ unsigned int reserved0 : 1;
+ /* Should always be set to 0. */
+ unsigned int external_classification_enable : 1;
+ unsigned int icv_lsb_8bytes_enable : 1;
+ unsigned int high_prio : 1;
+ unsigned int clear_counter : 1;
+ unsigned int clear_global_time : 1;
+ unsigned int ethertype_explicit_sectag_lsb : 3;
+ } bits_0;
+ unsigned short word_0;
+ };
+ union {
+ struct {
+ unsigned int ethertype_explicit_sectag_msb : 13;
+ unsigned int reserved0 : 3;
+ } bits_1;
+ unsigned short word_1;
+ };
+};
+
+struct mss_egress_lut_addr_ctl_register {
+ union {
+ struct {
+ unsigned int lut_addr : 9;
+ unsigned int reserved0 : 3;
+ /* 0x0 : Egress MAC Control FIlter (CTLF) LUT
+ * 0x1 : Egress Classification LUT
+ * 0x2 : Egress SC/SA LUT
+ * 0x3 : Egress SMIB
+ */
+ unsigned int lut_select : 4;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+struct mss_egress_lut_ctl_register {
+ union {
+ struct {
+ unsigned int reserved0 : 14;
+ unsigned int lut_read : 1;
+ unsigned int lut_write : 1;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+#endif /* MSS_EGRESS_REGS_HEADER */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h
new file mode 100644
index 000000000000..d4c00d9a0fc6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef MSS_INGRESS_REGS_HEADER
+#define MSS_INGRESS_REGS_HEADER
+
+#define MSS_INGRESS_CTL_REGISTER_ADDR 0x0000800E
+#define MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00008080
+#define MSS_INGRESS_LUT_CTL_REGISTER_ADDR 0x00008081
+#define MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000080A0
+
+struct mss_ingress_ctl_register {
+ union {
+ struct {
+ unsigned int soft_reset : 1;
+ unsigned int operation_point_to_point : 1;
+ unsigned int create_sci : 1;
+ /* Unused */
+ unsigned int mask_short_length_error : 1;
+ unsigned int drop_kay_packet : 1;
+ unsigned int drop_igprc_miss : 1;
+ /* Unused */
+ unsigned int check_icv : 1;
+ unsigned int clear_global_time : 1;
+ unsigned int clear_count : 1;
+ unsigned int high_prio : 1;
+ unsigned int remove_sectag : 1;
+ unsigned int global_validate_frames : 2;
+ unsigned int icv_lsb_8bytes_enabled : 1;
+ unsigned int reserved0 : 2;
+ } bits_0;
+ unsigned short word_0;
+ };
+ union {
+ struct {
+ unsigned int reserved0 : 16;
+ } bits_1;
+ unsigned short word_1;
+ };
+};
+
+struct mss_ingress_lut_addr_ctl_register {
+ union {
+ struct {
+ unsigned int lut_addr : 9;
+ unsigned int reserved0 : 3;
+ /* 0x0 : Ingress Pre-Security MAC Control FIlter
+ * (IGPRCTLF) LUT
+ * 0x1 : Ingress Pre-Security Classification LUT (IGPRC)
+ * 0x2 : Ingress Packet Format (IGPFMT) SAKey LUT
+ * 0x3 : Ingress Packet Format (IGPFMT) SC/SA LUT
+ * 0x4 : Ingress Post-Security Classification LUT
+ * (IGPOC)
+ * 0x5 : Ingress Post-Security MAC Control Filter
+ * (IGPOCTLF) LUT
+ * 0x6 : Ingress MIB (IGMIB)
+ */
+ unsigned int lut_select : 4;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+struct mss_ingress_lut_ctl_register {
+ union {
+ struct {
+ unsigned int reserved0 : 14;
+ unsigned int lut_read : 1;
+ unsigned int lut_write : 1;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+#endif /* MSS_INGRESS_REGS_HEADER */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
new file mode 100644
index 000000000000..fbe9d88b13c7
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -0,0 +1,2473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "macsec_api.h"
+#include <linux/mdio.h>
+#include "MSS_Ingress_registers.h"
+#include "MSS_Egress_registers.h"
+#include "aq_phy.h"
+
+#define AQ_API_CALL_SAFE(func, ...) \
+({ \
+ int ret; \
+ do { \
+ ret = aq_mss_mdio_sem_get(hw); \
+ if (unlikely(ret)) \
+ break; \
+ \
+ ret = func(__VA_ARGS__); \
+ \
+ aq_mss_mdio_sem_put(hw); \
+ } while (0); \
+ ret; \
+})
+
+/*******************************************************************************
+ * MDIO wrappers
+ ******************************************************************************/
+static int aq_mss_mdio_sem_get(struct aq_hw_s *hw)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(hw_atl_sem_mdio_get, hw, val,
+ val == 1U, 10U, 100000U);
+}
+
+static void aq_mss_mdio_sem_put(struct aq_hw_s *hw)
+{
+ hw_atl_reg_glb_cpu_sem_set(hw, 1U, HW_ATL_FW_SM_MDIO);
+}
+
+static int aq_mss_mdio_read(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 *data)
+{
+ *data = aq_mdio_read_word(hw, mmd, addr);
+ return (*data != 0xffff) ? 0 : -ETIME;
+}
+
+static int aq_mss_mdio_write(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 data)
+{
+ aq_mdio_write_word(hw, mmd, addr, data);
+ return 0;
+}
+
+/*******************************************************************************
+ * MACSEC config and status
+ ******************************************************************************/
+
+static int set_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_ingress_lut_ctl_register lut_op_reg;
+
+ unsigned int i;
+
+ /* NOTE: MSS registers must always be read/written as adjacent pairs.
+ * For instance, to write either or both 1E.80A0 and 80A1, we have to:
+ * 1. Write 1E.80A0 first
+ * 2. Then write 1E.80A1
+ *
+ * For HHD devices: These writes need to be performed consecutively, and
+ * to ensure this we use the PIF mailbox to delegate the reads/writes to
+ * the FW.
+ *
+ * For EUR devices: Not need to use the PIF mailbox; it is safe to
+ * write to the registers directly.
+ */
+
+ /* Write the packed record words to the data buffer registers. */
+ for (i = 0; i < num_words; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ packed_record[i]);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i +
+ 1,
+ packed_record[i + 1]);
+ }
+
+ /* Clear out the unused data buffer registers. */
+ for (i = num_words; i < 24; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ 0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1, 0);
+ }
+
+ /* Select the table and row index to write to */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 0;
+ lut_op_reg.bits_0.lut_write = 1;
+
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+
+ return 0;
+}
+
+/*! Read the specified Ingress LUT table row.
+ * packed_record - [OUT] The table row data (raw).
+ */
+static int get_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_ingress_lut_ctl_register lut_op_reg;
+ int ret;
+
+ unsigned int i;
+
+ /* Select the table and row index to read */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 1;
+ lut_op_reg.bits_0.lut_write = 0;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+
+ memset(packed_record, 0, sizeof(u16) * num_words);
+
+ for (i = 0; i < num_words; i += 2) {
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i,
+ &packed_record[i]);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i + 1,
+ &packed_record[i + 1]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+/*! Write packed_record to the specified Egress LUT table row. */
+static int set_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_egress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_egress_lut_ctl_register lut_op_reg;
+
+ unsigned int i;
+
+ /* Write the packed record words to the data buffer registers. */
+ for (i = 0; i < num_words; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ packed_record[i]);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
+ packed_record[i + 1]);
+ }
+
+ /* Clear out the unused data buffer registers. */
+ for (i = num_words; i < 28; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i, 0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
+ 0);
+ }
+
+ /* Select the table and row index to write to */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 0;
+ lut_op_reg.bits_0.lut_write = 1;
+
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+
+ return 0;
+}
+
+static int get_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_egress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_egress_lut_ctl_register lut_op_reg;
+ int ret;
+
+ unsigned int i;
+
+ /* Select the table and row index to read */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 1;
+ lut_op_reg.bits_0.lut_write = 0;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+
+ memset(packed_record, 0, sizeof(u16) * num_words);
+
+ for (i = 0; i < num_words; i += 2) {
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i,
+ &packed_record[i]);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i + 1,
+ &packed_record[i + 1]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+ packed_record[3] = rec->eth_type & 0xFFFF;
+ packed_record[4] = rec->match_mask & 0xFFFF;
+ packed_record[5] = rec->match_type & 0xF;
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_prectlf_record, hw, rec,
+ table_index);
+}
+
+static int get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ * This is a workaround for EUR devices that allows us to read
+ * odd-numbered rows. For HHD devices: this workaround will not work,
+ * so don't bother; odd-numbered rows are not readable.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_prectlf_record, hw, rec,
+ table_index);
+}
+
+static int
+set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[20];
+
+ if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 20);
+
+ packed_record[0] = rec->sci[0] & 0xFFFF;
+ packed_record[1] = (rec->sci[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->sci[1] & 0xFFFF;
+ packed_record[3] = (rec->sci[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->tci & 0xFF;
+
+ packed_record[4] |= (rec->encr_offset & 0xFF) << 8;
+
+ packed_record[5] = rec->eth_type & 0xFFFF;
+
+ packed_record[6] = rec->snap[0] & 0xFFFF;
+ packed_record[7] = (rec->snap[0] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->snap[1] & 0xFF;
+
+ packed_record[8] |= (rec->llc & 0xFF) << 8;
+ packed_record[9] = (rec->llc >> 8) & 0xFFFF;
+
+ packed_record[10] = rec->mac_sa[0] & 0xFFFF;
+ packed_record[11] = (rec->mac_sa[0] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->mac_sa[1] & 0xFFFF;
+
+ packed_record[13] = rec->mac_da[0] & 0xFFFF;
+ packed_record[14] = (rec->mac_da[0] >> 16) & 0xFFFF;
+
+ packed_record[15] = rec->mac_da[1] & 0xFFFF;
+
+ packed_record[16] = rec->lpbk_packet & 0x1;
+
+ packed_record[16] |= (rec->an_mask & 0x3) << 1;
+
+ packed_record[16] |= (rec->tci_mask & 0x3F) << 3;
+
+ packed_record[16] |= (rec->sci_mask & 0x7F) << 9;
+ packed_record[17] = (rec->sci_mask >> 7) & 0x1;
+
+ packed_record[17] |= (rec->eth_type_mask & 0x3) << 1;
+
+ packed_record[17] |= (rec->snap_mask & 0x1F) << 3;
+
+ packed_record[17] |= (rec->llc_mask & 0x7) << 8;
+
+ packed_record[17] |= (rec->_802_2_encapsulate & 0x1) << 11;
+
+ packed_record[17] |= (rec->sa_mask & 0xF) << 12;
+ packed_record[18] = (rec->sa_mask >> 4) & 0x3;
+
+ packed_record[18] |= (rec->da_mask & 0x3F) << 2;
+
+ packed_record[18] |= (rec->lpbk_mask & 0x1) << 8;
+
+ packed_record[18] |= (rec->sc_idx & 0x1F) << 9;
+
+ packed_record[18] |= (rec->proc_dest & 0x1) << 14;
+
+ packed_record[18] |= (rec->action & 0x1) << 15;
+ packed_record[19] = (rec->action >> 1) & 0x1;
+
+ packed_record[19] |= (rec->ctrl_unctrl & 0x1) << 1;
+
+ packed_record[19] |= (rec->sci_from_table & 0x1) << 2;
+
+ packed_record[19] |= (rec->reserved & 0xF) << 3;
+
+ packed_record[19] |= (rec->valid & 0x1) << 7;
+
+ return set_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_preclass_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int
+get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[20];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sci[0] = packed_record[0];
+ rec->sci[0] |= packed_record[1] << 16;
+
+ rec->sci[1] = packed_record[2];
+ rec->sci[1] |= packed_record[3] << 16;
+
+ rec->tci = packed_record[4] & 0xFF;
+
+ rec->encr_offset = (packed_record[4] >> 8) & 0xFF;
+
+ rec->eth_type = packed_record[5];
+
+ rec->snap[0] = packed_record[6];
+ rec->snap[0] |= packed_record[7] << 16;
+
+ rec->snap[1] = packed_record[8] & 0xFF;
+
+ rec->llc = (packed_record[8] >> 8) & 0xFF;
+ rec->llc |= packed_record[9] << 8;
+
+ rec->mac_sa[0] = packed_record[10];
+ rec->mac_sa[0] |= packed_record[11] << 16;
+
+ rec->mac_sa[1] = packed_record[12];
+
+ rec->mac_da[0] = packed_record[13];
+ rec->mac_da[0] |= packed_record[14] << 16;
+
+ rec->mac_da[1] = packed_record[15];
+
+ rec->lpbk_packet = packed_record[16] & 0x1;
+
+ rec->an_mask = (packed_record[16] >> 1) & 0x3;
+
+ rec->tci_mask = (packed_record[16] >> 3) & 0x3F;
+
+ rec->sci_mask = (packed_record[16] >> 9) & 0x7F;
+ rec->sci_mask |= (packed_record[17] & 0x1) << 7;
+
+ rec->eth_type_mask = (packed_record[17] >> 1) & 0x3;
+
+ rec->snap_mask = (packed_record[17] >> 3) & 0x1F;
+
+ rec->llc_mask = (packed_record[17] >> 8) & 0x7;
+
+ rec->_802_2_encapsulate = (packed_record[17] >> 11) & 0x1;
+
+ rec->sa_mask = (packed_record[17] >> 12) & 0xF;
+ rec->sa_mask |= (packed_record[18] & 0x3) << 4;
+
+ rec->da_mask = (packed_record[18] >> 2) & 0x3F;
+
+ rec->lpbk_mask = (packed_record[18] >> 8) & 0x1;
+
+ rec->sc_idx = (packed_record[18] >> 9) & 0x1F;
+
+ rec->proc_dest = (packed_record[18] >> 14) & 0x1;
+
+ rec->action = (packed_record[18] >> 15) & 0x1;
+ rec->action |= (packed_record[19] & 0x1) << 1;
+
+ rec->ctrl_unctrl = (packed_record[19] >> 1) & 0x1;
+
+ rec->sci_from_table = (packed_record[19] >> 2) & 0x1;
+
+ rec->reserved = (packed_record[19] >> 3) & 0xF;
+
+ rec->valid = (packed_record[19] >> 7) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_preclass_record, hw, rec,
+ table_index);
+}
+
+static int set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSSCRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->stop_time & 0xFFFF;
+ packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->start_time & 0xFFFF;
+ packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->validate_frames & 0x3;
+
+ packed_record[4] |= (rec->replay_protect & 0x1) << 2;
+
+ packed_record[4] |= (rec->anti_replay_window & 0x1FFF) << 3;
+ packed_record[5] = (rec->anti_replay_window >> 13) & 0xFFFF;
+ packed_record[6] = (rec->anti_replay_window >> 29) & 0x7;
+
+ packed_record[6] |= (rec->receiving & 0x1) << 3;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 4;
+
+ packed_record[6] |= (rec->an_rol & 0x1) << 5;
+
+ packed_record[6] |= (rec->reserved & 0x3FF) << 6;
+ packed_record[7] = (rec->reserved >> 10) & 0x7FFF;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSCRECORD + table_index);
+}
+
+int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sc_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSCRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->stop_time = packed_record[0];
+ rec->stop_time |= packed_record[1] << 16;
+
+ rec->start_time = packed_record[2];
+ rec->start_time |= packed_record[3] << 16;
+
+ rec->validate_frames = packed_record[4] & 0x3;
+
+ rec->replay_protect = (packed_record[4] >> 2) & 0x1;
+
+ rec->anti_replay_window = (packed_record[4] >> 3) & 0x1FFF;
+ rec->anti_replay_window |= packed_record[5] << 13;
+ rec->anti_replay_window |= (packed_record[6] & 0x7) << 29;
+
+ rec->receiving = (packed_record[6] >> 3) & 0x1;
+
+ rec->fresh = (packed_record[6] >> 4) & 0x1;
+
+ rec->an_rol = (packed_record[6] >> 5) & 0x1;
+
+ rec->reserved = (packed_record[6] >> 6) & 0x3FF;
+ rec->reserved |= (packed_record[7] & 0x7FFF) << 10;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sc_record, hw, rec, table_index);
+}
+
+static int set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->stop_time & 0xFFFF;
+ packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->start_time & 0xFFFF;
+ packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->next_pn & 0xFFFF;
+ packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->sat_nextpn & 0x1;
+
+ packed_record[6] |= (rec->in_use & 0x1) << 1;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 2;
+
+ packed_record[6] |= (rec->reserved & 0x1FFF) << 3;
+ packed_record[7] = (rec->reserved >> 13) & 0x7FFF;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSARECORD + table_index);
+}
+
+int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sa_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSARECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->stop_time = packed_record[0];
+ rec->stop_time |= packed_record[1] << 16;
+
+ rec->start_time = packed_record[2];
+ rec->start_time |= packed_record[3] << 16;
+
+ rec->next_pn = packed_record[4];
+ rec->next_pn |= packed_record[5] << 16;
+
+ rec->sat_nextpn = packed_record[6] & 0x1;
+
+ rec->in_use = (packed_record[6] >> 1) & 0x1;
+
+ rec->fresh = (packed_record[6] >> 2) & 0x1;
+
+ rec->reserved = (packed_record[6] >> 3) & 0x1FFF;
+ rec->reserved |= (packed_record[7] & 0x7FFF) << 13;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sa_record, hw, rec, table_index);
+}
+
+static int
+set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[18];
+
+ if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 18);
+
+ packed_record[0] = rec->key[0] & 0xFFFF;
+ packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->key[1] & 0xFFFF;
+ packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->key[2] & 0xFFFF;
+ packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->key[3] & 0xFFFF;
+ packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->key[4] & 0xFFFF;
+ packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
+
+ packed_record[10] = rec->key[5] & 0xFFFF;
+ packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->key[6] & 0xFFFF;
+ packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
+
+ packed_record[14] = rec->key[7] & 0xFFFF;
+ packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
+
+ packed_record[16] = rec->key_len & 0x3;
+
+ return set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sakey_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[18];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->key[0] = packed_record[0];
+ rec->key[0] |= packed_record[1] << 16;
+
+ rec->key[1] = packed_record[2];
+ rec->key[1] |= packed_record[3] << 16;
+
+ rec->key[2] = packed_record[4];
+ rec->key[2] |= packed_record[5] << 16;
+
+ rec->key[3] = packed_record[6];
+ rec->key[3] |= packed_record[7] << 16;
+
+ rec->key[4] = packed_record[8];
+ rec->key[4] |= packed_record[9] << 16;
+
+ rec->key[5] = packed_record[10];
+ rec->key[5] |= packed_record[11] << 16;
+
+ rec->key[6] = packed_record[12];
+ rec->key[6] |= packed_record[13] << 16;
+
+ rec->key[7] = packed_record[14];
+ rec->key[7] |= packed_record[15] << 16;
+
+ rec->key_len = (rec->key_len & 0xFFFFFFFC) |
+ (packed_record[16] & 0x3);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sakey_record, hw, rec, table_index);
+}
+
+static int
+set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->byte0 & 0xFF;
+
+ packed_record[0] |= (rec->byte1 & 0xFF) << 8;
+
+ packed_record[1] = rec->byte2 & 0xFF;
+
+ packed_record[1] |= (rec->byte3 & 0xFF) << 8;
+
+ packed_record[2] = rec->eth_type & 0xFFFF;
+
+ packed_record[3] = rec->eth_type_valid & 0x1;
+
+ packed_record[3] |= (rec->vlan_id & 0xFFF) << 1;
+
+ packed_record[3] |= (rec->vlan_up & 0x7) << 13;
+
+ packed_record[4] = rec->vlan_valid & 0x1;
+
+ packed_record[4] |= (rec->sai & 0x1F) << 1;
+
+ packed_record[4] |= (rec->sai_hit & 0x1) << 6;
+
+ packed_record[4] |= (rec->eth_type_mask & 0xF) << 7;
+
+ packed_record[4] |= (rec->byte3_location & 0x1F) << 11;
+ packed_record[5] = (rec->byte3_location >> 5) & 0x1;
+
+ packed_record[5] |= (rec->byte3_mask & 0x3) << 1;
+
+ packed_record[5] |= (rec->byte2_location & 0x3F) << 3;
+
+ packed_record[5] |= (rec->byte2_mask & 0x3) << 9;
+
+ packed_record[5] |= (rec->byte1_location & 0x1F) << 11;
+ packed_record[6] = (rec->byte1_location >> 5) & 0x1;
+
+ packed_record[6] |= (rec->byte1_mask & 0x3) << 1;
+
+ packed_record[6] |= (rec->byte0_location & 0x3F) << 3;
+
+ packed_record[6] |= (rec->byte0_mask & 0x3) << 9;
+
+ packed_record[6] |= (rec->eth_type_valid_mask & 0x3) << 11;
+
+ packed_record[6] |= (rec->vlan_id_mask & 0x7) << 13;
+ packed_record[7] = (rec->vlan_id_mask >> 3) & 0x1;
+
+ packed_record[7] |= (rec->vlan_up_mask & 0x3) << 1;
+
+ packed_record[7] |= (rec->vlan_valid_mask & 0x3) << 3;
+
+ packed_record[7] |= (rec->sai_mask & 0x3) << 5;
+
+ packed_record[7] |= (rec->sai_hit_mask & 0x3) << 7;
+
+ packed_record[7] |= (rec->firstlevel_actions & 0x1) << 9;
+
+ packed_record[7] |= (rec->secondlevel_actions & 0x1) << 10;
+
+ packed_record[7] |= (rec->reserved & 0xF) << 11;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_postclass_record, hw, rec,
+ table_index);
+}
+
+static int
+get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->byte0 = packed_record[0] & 0xFF;
+
+ rec->byte1 = (packed_record[0] >> 8) & 0xFF;
+
+ rec->byte2 = packed_record[1] & 0xFF;
+
+ rec->byte3 = (packed_record[1] >> 8) & 0xFF;
+
+ rec->eth_type = packed_record[2];
+
+ rec->eth_type_valid = packed_record[3] & 0x1;
+
+ rec->vlan_id = (packed_record[3] >> 1) & 0xFFF;
+
+ rec->vlan_up = (packed_record[3] >> 13) & 0x7;
+
+ rec->vlan_valid = packed_record[4] & 0x1;
+
+ rec->sai = (packed_record[4] >> 1) & 0x1F;
+
+ rec->sai_hit = (packed_record[4] >> 6) & 0x1;
+
+ rec->eth_type_mask = (packed_record[4] >> 7) & 0xF;
+
+ rec->byte3_location = (packed_record[4] >> 11) & 0x1F;
+ rec->byte3_location |= (packed_record[5] & 0x1) << 5;
+
+ rec->byte3_mask = (packed_record[5] >> 1) & 0x3;
+
+ rec->byte2_location = (packed_record[5] >> 3) & 0x3F;
+
+ rec->byte2_mask = (packed_record[5] >> 9) & 0x3;
+
+ rec->byte1_location = (packed_record[5] >> 11) & 0x1F;
+ rec->byte1_location |= (packed_record[6] & 0x1) << 5;
+
+ rec->byte1_mask = (packed_record[6] >> 1) & 0x3;
+
+ rec->byte0_location = (packed_record[6] >> 3) & 0x3F;
+
+ rec->byte0_mask = (packed_record[6] >> 9) & 0x3;
+
+ rec->eth_type_valid_mask = (packed_record[6] >> 11) & 0x3;
+
+ rec->vlan_id_mask = (packed_record[6] >> 13) & 0x7;
+ rec->vlan_id_mask |= (packed_record[7] & 0x1) << 3;
+
+ rec->vlan_up_mask = (packed_record[7] >> 1) & 0x3;
+
+ rec->vlan_valid_mask = (packed_record[7] >> 3) & 0x3;
+
+ rec->sai_mask = (packed_record[7] >> 5) & 0x3;
+
+ rec->sai_hit_mask = (packed_record[7] >> 7) & 0x3;
+
+ rec->firstlevel_actions = (packed_record[7] >> 9) & 0x1;
+
+ rec->secondlevel_actions = (packed_record[7] >> 10) & 0x1;
+
+ rec->reserved = (packed_record[7] >> 11) & 0xF;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_postclass_record, hw, rec,
+ table_index);
+}
+
+static int
+set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+
+ packed_record[3] = rec->eth_type & 0xFFFF;
+
+ packed_record[4] = rec->match_mask & 0xFFFF;
+
+ packed_record[5] = rec->match_type & 0xF;
+
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_postctlf_record, hw, rec,
+ table_index);
+}
+
+static int
+get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_postctlf_record, hw, rec,
+ table_index);
+}
+
+static int set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_EGRESSCTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+
+ packed_record[3] = rec->eth_type & 0xFFFF;
+
+ packed_record[4] = rec->match_mask & 0xFFFF;
+
+ packed_record[5] = rec->match_type & 0xF;
+
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD + table_index);
+}
+
+int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_ctlf_record, hw, rec, table_index);
+}
+
+static int get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSCTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_ctlf_record, hw, rec, table_index);
+}
+
+static int set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[28];
+
+ if (table_index >= NUMROWS_EGRESSCLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 28);
+
+ packed_record[0] = rec->vlan_id & 0xFFF;
+
+ packed_record[0] |= (rec->vlan_up & 0x7) << 12;
+
+ packed_record[0] |= (rec->vlan_valid & 0x1) << 15;
+
+ packed_record[1] = rec->byte3 & 0xFF;
+
+ packed_record[1] |= (rec->byte2 & 0xFF) << 8;
+
+ packed_record[2] = rec->byte1 & 0xFF;
+
+ packed_record[2] |= (rec->byte0 & 0xFF) << 8;
+
+ packed_record[3] = rec->tci & 0xFF;
+
+ packed_record[3] |= (rec->sci[0] & 0xFF) << 8;
+ packed_record[4] = (rec->sci[0] >> 8) & 0xFFFF;
+ packed_record[5] = (rec->sci[0] >> 24) & 0xFF;
+
+ packed_record[5] |= (rec->sci[1] & 0xFF) << 8;
+ packed_record[6] = (rec->sci[1] >> 8) & 0xFFFF;
+ packed_record[7] = (rec->sci[1] >> 24) & 0xFF;
+
+ packed_record[7] |= (rec->eth_type & 0xFF) << 8;
+ packed_record[8] = (rec->eth_type >> 8) & 0xFF;
+
+ packed_record[8] |= (rec->snap[0] & 0xFF) << 8;
+ packed_record[9] = (rec->snap[0] >> 8) & 0xFFFF;
+ packed_record[10] = (rec->snap[0] >> 24) & 0xFF;
+
+ packed_record[10] |= (rec->snap[1] & 0xFF) << 8;
+
+ packed_record[11] = rec->llc & 0xFFFF;
+ packed_record[12] = (rec->llc >> 16) & 0xFF;
+
+ packed_record[12] |= (rec->mac_sa[0] & 0xFF) << 8;
+ packed_record[13] = (rec->mac_sa[0] >> 8) & 0xFFFF;
+ packed_record[14] = (rec->mac_sa[0] >> 24) & 0xFF;
+
+ packed_record[14] |= (rec->mac_sa[1] & 0xFF) << 8;
+ packed_record[15] = (rec->mac_sa[1] >> 8) & 0xFF;
+
+ packed_record[15] |= (rec->mac_da[0] & 0xFF) << 8;
+ packed_record[16] = (rec->mac_da[0] >> 8) & 0xFFFF;
+ packed_record[17] = (rec->mac_da[0] >> 24) & 0xFF;
+
+ packed_record[17] |= (rec->mac_da[1] & 0xFF) << 8;
+ packed_record[18] = (rec->mac_da[1] >> 8) & 0xFF;
+
+ packed_record[18] |= (rec->pn & 0xFF) << 8;
+ packed_record[19] = (rec->pn >> 8) & 0xFFFF;
+ packed_record[20] = (rec->pn >> 24) & 0xFF;
+
+ packed_record[20] |= (rec->byte3_location & 0x3F) << 8;
+
+ packed_record[20] |= (rec->byte3_mask & 0x1) << 14;
+
+ packed_record[20] |= (rec->byte2_location & 0x1) << 15;
+ packed_record[21] = (rec->byte2_location >> 1) & 0x1F;
+
+ packed_record[21] |= (rec->byte2_mask & 0x1) << 5;
+
+ packed_record[21] |= (rec->byte1_location & 0x3F) << 6;
+
+ packed_record[21] |= (rec->byte1_mask & 0x1) << 12;
+
+ packed_record[21] |= (rec->byte0_location & 0x7) << 13;
+ packed_record[22] = (rec->byte0_location >> 3) & 0x7;
+
+ packed_record[22] |= (rec->byte0_mask & 0x1) << 3;
+
+ packed_record[22] |= (rec->vlan_id_mask & 0x3) << 4;
+
+ packed_record[22] |= (rec->vlan_up_mask & 0x1) << 6;
+
+ packed_record[22] |= (rec->vlan_valid_mask & 0x1) << 7;
+
+ packed_record[22] |= (rec->tci_mask & 0xFF) << 8;
+
+ packed_record[23] = rec->sci_mask & 0xFF;
+
+ packed_record[23] |= (rec->eth_type_mask & 0x3) << 8;
+
+ packed_record[23] |= (rec->snap_mask & 0x1F) << 10;
+
+ packed_record[23] |= (rec->llc_mask & 0x1) << 15;
+ packed_record[24] = (rec->llc_mask >> 1) & 0x3;
+
+ packed_record[24] |= (rec->sa_mask & 0x3F) << 2;
+
+ packed_record[24] |= (rec->da_mask & 0x3F) << 8;
+
+ packed_record[24] |= (rec->pn_mask & 0x3) << 14;
+ packed_record[25] = (rec->pn_mask >> 2) & 0x3;
+
+ packed_record[25] |= (rec->eight02dot2 & 0x1) << 2;
+
+ packed_record[25] |= (rec->tci_sc & 0x1) << 3;
+
+ packed_record[25] |= (rec->tci_87543 & 0x1) << 4;
+
+ packed_record[25] |= (rec->exp_sectag_en & 0x1) << 5;
+
+ packed_record[25] |= (rec->sc_idx & 0x1F) << 6;
+
+ packed_record[25] |= (rec->sc_sa & 0x3) << 11;
+
+ packed_record[25] |= (rec->debug & 0x1) << 13;
+
+ packed_record[25] |= (rec->action & 0x3) << 14;
+
+ packed_record[26] = (rec->valid & 0x1) << 3;
+
+ return set_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD + table_index);
+}
+
+int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_class_record, hw, rec, table_index);
+}
+
+static int get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[28];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSCLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->vlan_id = packed_record[0] & 0xFFF;
+
+ rec->vlan_up = (packed_record[0] >> 12) & 0x7;
+
+ rec->vlan_valid = (packed_record[0] >> 15) & 0x1;
+
+ rec->byte3 = packed_record[1] & 0xFF;
+
+ rec->byte2 = (packed_record[1] >> 8) & 0xFF;
+
+ rec->byte1 = packed_record[2] & 0xFF;
+
+ rec->byte0 = (packed_record[2] >> 8) & 0xFF;
+
+ rec->tci = packed_record[3] & 0xFF;
+
+ rec->sci[0] = (packed_record[3] >> 8) & 0xFF;
+ rec->sci[0] |= packed_record[4] << 8;
+ rec->sci[0] |= (packed_record[5] & 0xFF) << 24;
+
+ rec->sci[1] = (packed_record[5] >> 8) & 0xFF;
+ rec->sci[1] |= packed_record[6] << 8;
+ rec->sci[1] |= (packed_record[7] & 0xFF) << 24;
+
+ rec->eth_type = (packed_record[7] >> 8) & 0xFF;
+ rec->eth_type |= (packed_record[8] & 0xFF) << 8;
+
+ rec->snap[0] = (packed_record[8] >> 8) & 0xFF;
+ rec->snap[0] |= packed_record[9] << 8;
+ rec->snap[0] |= (packed_record[10] & 0xFF) << 24;
+
+ rec->snap[1] = (packed_record[10] >> 8) & 0xFF;
+
+ rec->llc = packed_record[11];
+ rec->llc |= (packed_record[12] & 0xFF) << 16;
+
+ rec->mac_sa[0] = (packed_record[12] >> 8) & 0xFF;
+ rec->mac_sa[0] |= packed_record[13] << 8;
+ rec->mac_sa[0] |= (packed_record[14] & 0xFF) << 24;
+
+ rec->mac_sa[1] = (packed_record[14] >> 8) & 0xFF;
+ rec->mac_sa[1] |= (packed_record[15] & 0xFF) << 8;
+
+ rec->mac_da[0] = (packed_record[15] >> 8) & 0xFF;
+ rec->mac_da[0] |= packed_record[16] << 8;
+ rec->mac_da[0] |= (packed_record[17] & 0xFF) << 24;
+
+ rec->mac_da[1] = (packed_record[17] >> 8) & 0xFF;
+ rec->mac_da[1] |= (packed_record[18] & 0xFF) << 8;
+
+ rec->pn = (packed_record[18] >> 8) & 0xFF;
+ rec->pn |= packed_record[19] << 8;
+ rec->pn |= (packed_record[20] & 0xFF) << 24;
+
+ rec->byte3_location = (packed_record[20] >> 8) & 0x3F;
+
+ rec->byte3_mask = (packed_record[20] >> 14) & 0x1;
+
+ rec->byte2_location = (packed_record[20] >> 15) & 0x1;
+ rec->byte2_location |= (packed_record[21] & 0x1F) << 1;
+
+ rec->byte2_mask = (packed_record[21] >> 5) & 0x1;
+
+ rec->byte1_location = (packed_record[21] >> 6) & 0x3F;
+
+ rec->byte1_mask = (packed_record[21] >> 12) & 0x1;
+
+ rec->byte0_location = (packed_record[21] >> 13) & 0x7;
+ rec->byte0_location |= (packed_record[22] & 0x7) << 3;
+
+ rec->byte0_mask = (packed_record[22] >> 3) & 0x1;
+
+ rec->vlan_id_mask = (packed_record[22] >> 4) & 0x3;
+
+ rec->vlan_up_mask = (packed_record[22] >> 6) & 0x1;
+
+ rec->vlan_valid_mask = (packed_record[22] >> 7) & 0x1;
+
+ rec->tci_mask = (packed_record[22] >> 8) & 0xFF;
+
+ rec->sci_mask = packed_record[23] & 0xFF;
+
+ rec->eth_type_mask = (packed_record[23] >> 8) & 0x3;
+
+ rec->snap_mask = (packed_record[23] >> 10) & 0x1F;
+
+ rec->llc_mask = (packed_record[23] >> 15) & 0x1;
+ rec->llc_mask |= (packed_record[24] & 0x3) << 1;
+
+ rec->sa_mask = (packed_record[24] >> 2) & 0x3F;
+
+ rec->da_mask = (packed_record[24] >> 8) & 0x3F;
+
+ rec->pn_mask = (packed_record[24] >> 14) & 0x3;
+ rec->pn_mask |= (packed_record[25] & 0x3) << 2;
+
+ rec->eight02dot2 = (packed_record[25] >> 2) & 0x1;
+
+ rec->tci_sc = (packed_record[25] >> 3) & 0x1;
+
+ rec->tci_87543 = (packed_record[25] >> 4) & 0x1;
+
+ rec->exp_sectag_en = (packed_record[25] >> 5) & 0x1;
+
+ rec->sc_idx = (packed_record[25] >> 6) & 0x1F;
+
+ rec->sc_sa = (packed_record[25] >> 11) & 0x3;
+
+ rec->debug = (packed_record[25] >> 13) & 0x1;
+
+ rec->action = (packed_record[25] >> 14) & 0x3;
+
+ rec->valid = (packed_record[26] >> 3) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_class_record, hw, rec, table_index);
+}
+
+static int set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->start_time & 0xFFFF;
+ packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->stop_time & 0xFFFF;
+ packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->curr_an & 0x3;
+
+ packed_record[4] |= (rec->an_roll & 0x1) << 2;
+
+ packed_record[4] |= (rec->tci & 0x3F) << 3;
+
+ packed_record[4] |= (rec->enc_off & 0x7F) << 9;
+ packed_record[5] = (rec->enc_off >> 7) & 0x1;
+
+ packed_record[5] |= (rec->protect & 0x1) << 1;
+
+ packed_record[5] |= (rec->recv & 0x1) << 2;
+
+ packed_record[5] |= (rec->fresh & 0x1) << 3;
+
+ packed_record[5] |= (rec->sak_len & 0x3) << 4;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSCRECORD + table_index);
+}
+
+int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_sc_record, hw, rec, table_index);
+}
+
+static int get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSCRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->start_time = packed_record[0];
+ rec->start_time |= packed_record[1] << 16;
+
+ rec->stop_time = packed_record[2];
+ rec->stop_time |= packed_record[3] << 16;
+
+ rec->curr_an = packed_record[4] & 0x3;
+
+ rec->an_roll = (packed_record[4] >> 2) & 0x1;
+
+ rec->tci = (packed_record[4] >> 3) & 0x3F;
+
+ rec->enc_off = (packed_record[4] >> 9) & 0x7F;
+ rec->enc_off |= (packed_record[5] & 0x1) << 7;
+
+ rec->protect = (packed_record[5] >> 1) & 0x1;
+
+ rec->recv = (packed_record[5] >> 2) & 0x1;
+
+ rec->fresh = (packed_record[5] >> 3) & 0x1;
+
+ rec->sak_len = (packed_record[5] >> 4) & 0x3;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sc_record, hw, rec, table_index);
+}
+
+static int set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->start_time & 0xFFFF;
+ packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->stop_time & 0xFFFF;
+ packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->next_pn & 0xFFFF;
+ packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->sat_pn & 0x1;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 1;
+
+ packed_record[7] = (rec->valid & 0x1) << 15;
+
+ return set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSARECORD + table_index);
+}
+
+int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_egress_sa_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSARECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->start_time = packed_record[0];
+ rec->start_time |= packed_record[1] << 16;
+
+ rec->stop_time = packed_record[2];
+ rec->stop_time |= packed_record[3] << 16;
+
+ rec->next_pn = packed_record[4];
+ rec->next_pn |= packed_record[5] << 16;
+
+ rec->sat_pn = packed_record[6] & 0x1;
+
+ rec->fresh = (packed_record[6] >> 1) & 0x1;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sa_record, hw, rec, table_index);
+}
+
+static int set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[16];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 16);
+
+ packed_record[0] = rec->key[0] & 0xFFFF;
+ packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->key[1] & 0xFFFF;
+ packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->key[2] & 0xFFFF;
+ packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->key[3] & 0xFFFF;
+ packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->key[4] & 0xFFFF;
+ packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
+
+ packed_record[10] = rec->key[5] & 0xFFFF;
+ packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->key[6] & 0xFFFF;
+ packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
+
+ packed_record[14] = rec->key[7] & 0xFFFF;
+ packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
+
+ ret = set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+ ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index -
+ 32);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_egress_sakey_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[16];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+ ret = get_raw_egress_record(hw, packed_record + 8, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index -
+ 32);
+ if (unlikely(ret))
+ return ret;
+
+ rec->key[0] = packed_record[0];
+ rec->key[0] |= packed_record[1] << 16;
+
+ rec->key[1] = packed_record[2];
+ rec->key[1] |= packed_record[3] << 16;
+
+ rec->key[2] = packed_record[4];
+ rec->key[2] |= packed_record[5] << 16;
+
+ rec->key[3] = packed_record[6];
+ rec->key[3] |= packed_record[7] << 16;
+
+ rec->key[4] = packed_record[8];
+ rec->key[4] |= packed_record[9] << 16;
+
+ rec->key[5] = packed_record[10];
+ rec->key[5] |= packed_record[11] << 16;
+
+ rec->key[6] = packed_record[12];
+ rec->key[6] |= packed_record[13] << 16;
+
+ rec->key[7] = packed_record[14];
+ rec->key[7] |= packed_record[15] << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sakey_record, hw, rec, table_index);
+}
+
+static int get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sc_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_protected_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_protected_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_encrypted_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_encrypted_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_protected_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_protected_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_encrypted_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_encrypted_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_sc_counters, hw, counters, sc_index);
+}
+
+static int get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sa_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_hit_drop_redirect[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_hit_drop_redirect[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_protected2_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_protected2_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_protected_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_protected_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_encrypted_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_encrypted_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_sa_counters, hw, counters, sa_index);
+}
+
+static int
+get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters)
+{
+ u16 packed_record[4];
+ int ret;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->ctl_pkt[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ctl_pkt[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->unknown_sa_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unknown_sa_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->too_long[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->too_long[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->ecc_error_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ecc_error_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_hit_drop_redir[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_hit_drop_redir[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_common_counters, hw, counters);
+}
+
+static int clear_egress_counters(struct aq_hw_s *hw)
+{
+ struct mss_egress_ctl_register ctl_reg;
+ int ret;
+
+ memset(&ctl_reg, 0, sizeof(ctl_reg));
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, MSS_EGRESS_CTL_REGISTER_ADDR,
+ &ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ &ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ /* Toggle the Egress MIB clear bit 0->1->0 */
+ ctl_reg.bits_0.clear_counter = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_counter = 1;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_counter = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_clear_egress_counters(struct aq_hw_s *hw)
+{
+ return AQ_API_CALL_SAFE(clear_egress_counters, hw);
+}
+
+static int get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sa_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_hit_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_hit_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_hit_drop_redir_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_hit_drop_redir_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->not_using_sa[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->not_using_sa[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->unused_sa[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->unused_sa[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->not_valid_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->not_valid_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->invalid_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->invalid_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->ok_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ok_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->late_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->late_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 8);
+ if (unlikely(ret))
+ return ret;
+ counters->delayed_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->delayed_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 9);
+ if (unlikely(ret))
+ return ret;
+ counters->unchecked_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unchecked_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 10);
+ if (unlikely(ret))
+ return ret;
+ counters->validated_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->validated_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 11);
+ if (unlikely(ret))
+ return ret;
+ counters->decrypted_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->decrypted_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_ingress_sa_counters, hw, counters,
+ sa_index);
+}
+
+static int
+get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters)
+{
+ u16 packed_record[4];
+ int ret;
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->ctl_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ctl_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->tagged_miss_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->tagged_miss_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_miss_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_miss_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->notag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->notag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->bad_tag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->bad_tag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->no_sci_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->no_sci_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->unknown_sci_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unknown_sci_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 8);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_prt_pass_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_prt_pass_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 9);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_prt_pass_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_prt_pass_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 10);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_prt_fail_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_prt_fail_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 11);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_prt_fail_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_prt_fail_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 12);
+ if (unlikely(ret))
+ return ret;
+ counters->too_long_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->too_long_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 13);
+ if (unlikely(ret))
+ return ret;
+ counters->igpoc_ctl_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->igpoc_ctl_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 14);
+ if (unlikely(ret))
+ return ret;
+ counters->ecc_error_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ecc_error_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 15);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_hit_drop_redir[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_hit_drop_redir[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_ingress_common_counters, hw, counters);
+}
+
+static int clear_ingress_counters(struct aq_hw_s *hw)
+{
+ struct mss_ingress_ctl_register ctl_reg;
+ int ret;
+
+ memset(&ctl_reg, 0, sizeof(ctl_reg));
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, &ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ &ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ /* Toggle the Ingress MIB clear bit 0->1->0 */
+ ctl_reg.bits_0.clear_count = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_count = 1;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_count = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_clear_ingress_counters(struct aq_hw_s *hw)
+{
+ return AQ_API_CALL_SAFE(clear_ingress_counters, hw);
+}
+
+static int get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
+{
+ u16 val;
+ int ret;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
+ &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired = val;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired |= val << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
+{
+ *expired = 0;
+
+ return AQ_API_CALL_SAFE(get_egress_sa_expired, hw, expired);
+}
+
+static int get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired)
+{
+ u16 val;
+ int ret;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR, &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired = val;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1, &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired |= val << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired)
+{
+ *expired = 0;
+
+ return AQ_API_CALL_SAFE(get_egress_sa_threshold_expired, hw, expired);
+}
+
+static int set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
+{
+ int ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
+ expired & 0xFFFF);
+ if (unlikely(ret))
+ return ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ expired >> 16);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
+{
+ return AQ_API_CALL_SAFE(set_egress_sa_expired, hw, expired);
+}
+
+static int set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
+{
+ int ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR,
+ expired & 0xFFFF);
+ if (unlikely(ret))
+ return ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ expired >> 16);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
+{
+ return AQ_API_CALL_SAFE(set_egress_sa_threshold_expired, hw, expired);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h
new file mode 100644
index 000000000000..ff03cc462a37
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __MACSEC_API_H__
+#define __MACSEC_API_H__
+
+#include "aq_hw.h"
+#include "macsec_struct.h"
+
+#define NUMROWS_INGRESSPRECTLFRECORD 24
+#define ROWOFFSET_INGRESSPRECTLFRECORD 0
+
+#define NUMROWS_INGRESSPRECLASSRECORD 48
+#define ROWOFFSET_INGRESSPRECLASSRECORD 0
+
+#define NUMROWS_INGRESSPOSTCLASSRECORD 48
+#define ROWOFFSET_INGRESSPOSTCLASSRECORD 0
+
+#define NUMROWS_INGRESSSCRECORD 32
+#define ROWOFFSET_INGRESSSCRECORD 0
+
+#define NUMROWS_INGRESSSARECORD 32
+#define ROWOFFSET_INGRESSSARECORD 32
+
+#define NUMROWS_INGRESSSAKEYRECORD 32
+#define ROWOFFSET_INGRESSSAKEYRECORD 0
+
+#define NUMROWS_INGRESSPOSTCTLFRECORD 24
+#define ROWOFFSET_INGRESSPOSTCTLFRECORD 0
+
+#define NUMROWS_EGRESSCTLFRECORD 24
+#define ROWOFFSET_EGRESSCTLFRECORD 0
+
+#define NUMROWS_EGRESSCLASSRECORD 48
+#define ROWOFFSET_EGRESSCLASSRECORD 0
+
+#define NUMROWS_EGRESSSCRECORD 32
+#define ROWOFFSET_EGRESSSCRECORD 0
+
+#define NUMROWS_EGRESSSARECORD 32
+#define ROWOFFSET_EGRESSSARECORD 32
+
+#define NUMROWS_EGRESSSAKEYRECORD 32
+#define ROWOFFSET_EGRESSSAKEYRECORD 96
+
+/*! Read the raw table data from the specified row of the Egress CTL
+ * Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress
+ * Packet Classifier table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 47).
+ */
+int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 47).
+ */
+int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SC
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SC Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SA
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SA Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SA
+ * Key Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SA Key Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Pre-MACSec CTL Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Pre-MACSec CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Pre-MACSec Packet Classifier table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 47).
+ */
+int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Pre-MACSec Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 47).
+ */
+int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SC
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SC Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SA
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SA Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SA
+ * Key Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SA Key Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Post-MACSec Packet Classifier table, and unpack it into the
+ * fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 48).
+ */
+int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Post-MACSec Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 48).
+ */
+int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Post-MACSec CTL Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Post-MACSec CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index);
+
+/*! Read the counters for the specified SC, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sc_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index);
+
+/*! Read the counters for the specified SA, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sa_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index);
+
+/*! Read the counters for the common egress counters, and unpack them
+ * into the fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ */
+int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters);
+
+/*! Clear all Egress counters to 0.*/
+int aq_mss_clear_egress_counters(struct aq_hw_s *hw);
+
+/*! Read the counters for the specified SA, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sa_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index);
+
+/*! Read the counters for the common ingress counters, and unpack them
+ * into the fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ */
+int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters);
+
+/*! Clear all Ingress counters to 0. */
+int aq_mss_clear_ingress_counters(struct aq_hw_s *hw);
+
+/*! Get Egress SA expired. */
+int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired);
+/*! Get Egress SA threshold expired. */
+int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired);
+/*! Set Egress SA expired. */
+int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired);
+/*! Set Egress SA threshold expired. */
+int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 expired);
+
+#endif /* __MACSEC_API_H__ */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
new file mode 100644
index 000000000000..b6119dcc3bb9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _MACSEC_STRUCT_H_
+#define _MACSEC_STRUCT_H_
+
+/*! Represents the bitfields of a single row in the Egress CTL Filter
+ * table.
+ */
+struct aq_mss_egress_ctlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA or
+ * halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every value
+ * will match successfully. The total data is 64 bit, i.e. 16 nibbles
+ * masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the bitfields of a single row in the Egress Packet
+ * Classifier table.
+ */
+struct aq_mss_egress_class_record {
+ /*! VLAN ID field. */
+ u32 vlan_id;
+ /*! VLAN UP field. */
+ u32 vlan_up;
+ /*! VLAN Present in the Packet. */
+ u32 vlan_valid;
+ /*! The 8 bit value used to compare with extracted value for byte 3. */
+ u32 byte3;
+ /*! The 8 bit value used to compare with extracted value for byte 2. */
+ u32 byte2;
+ /*! The 8 bit value used to compare with extracted value for byte 1. */
+ u32 byte1;
+ /*! The 8 bit value used to compare with extracted value for byte 0. */
+ u32 byte0;
+ /*! The 8 bit TCI field used to compare with extracted value. */
+ u32 tci;
+ /*! The 64 bit SCI field in the SecTAG. */
+ u32 sci[2];
+ /*! The 16 bit Ethertype (in the clear) field used to compare with
+ * extracted value.
+ */
+ u32 eth_type;
+ /*! This is to specify the 40bit SNAP header if the SNAP header's mask
+ * is enabled.
+ */
+ u32 snap[2];
+ /*! This is to specify the 24bit LLC header if the LLC header's mask is
+ * enabled.
+ */
+ u32 llc;
+ /*! The 48 bit MAC_SA field used to compare with extracted value. */
+ u32 mac_sa[2];
+ /*! The 48 bit MAC_DA field used to compare with extracted value. */
+ u32 mac_da[2];
+ /*! The 32 bit Packet number used to compare with extracted value. */
+ u32 pn;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte3_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 3 location.
+ */
+ u32 byte3_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte2_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 2 location.
+ */
+ u32 byte2_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte1_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 1 location.
+ */
+ u32 byte1_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte0_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 0 location.
+ */
+ u32 byte0_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of extracted VLAN ID field.
+ */
+ u32 vlan_id_mask;
+ /*! 0: don't care
+ * 1: enable comparison of extracted VLAN UP field.
+ */
+ u32 vlan_up_mask;
+ /*! 0: don't care
+ * 1: enable comparison of extracted VLAN Valid field.
+ */
+ u32 vlan_valid_mask;
+ /*! This is bit mask to enable comparison the 8 bit TCI field,
+ * including the AN field.
+ * For explicit SECTAG, AN is hardware controlled. For sending
+ * packet w/ explicit SECTAG, rest of the TCI fields are directly
+ * from the SECTAG.
+ */
+ u32 tci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of SCI
+ * Note: If this field is not 0, this means the input packet's
+ * SECTAG is explicitly tagged and MACSEC module will only update
+ * the MSDU.
+ * PN number is hardware controlled.
+ */
+ u32 sci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of Ethertype.
+ */
+ u32 eth_type_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no SNAP header exist.
+ * 1: compare the SNAP header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next 5 bytes after the the LLC header is SNAP header.
+ */
+ u32 snap_mask;
+ /*! 0: don't care and no LLC header exist.
+ * 1: compare the LLC header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next three bytes after the 802.3MAC header is LLC header.
+ */
+ u32 llc_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_SA.
+ */
+ u32 sa_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_DA.
+ */
+ u32 da_mask;
+ /*! Mask is per-byte. */
+ u32 pn_mask;
+ /*! Reserved. This bit should be always 0. */
+ u32 eight02dot2;
+ /*! 1: For explicit sectag case use TCI_SC from table
+ * 0: use TCI_SC from explicit sectag.
+ */
+ u32 tci_sc;
+ /*! 1: For explicit sectag case,use TCI_V,ES,SCB,E,C from table
+ * 0: use TCI_V,ES,SCB,E,C from explicit sectag.
+ */
+ u32 tci_87543;
+ /*! 1: indicates that incoming packet has explicit sectag. */
+ u32 exp_sectag_en;
+ /*! If packet matches and tagged as controlled-packet, this SC/SA
+ * index is used for later SC and SA table lookup.
+ */
+ u32 sc_idx;
+ /*! This field is used to specify how many SA entries are
+ * associated with 1 SC entry.
+ * 2'b00: 1 SC has 4 SA.
+ * SC index is equivalent to {SC_Index[4:2], 1'b0}.
+ * SA index is equivalent to {SC_Index[4:2], SC entry's current AN[1:0]
+ * 2'b10: 1 SC has 2 SA.
+ * SC index is equivalent to SC_Index[4:1]
+ * SA index is equivalent to {SC_Index[4:1], SC entry's current AN[0]}
+ * 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
+ * SA index is equivalent to SC_Index[4:0]
+ * Note: if specified as 2'b11, hardware AN roll over is not
+ * supported.
+ */
+ u32 sc_sa;
+ /*! 0: the packets will be sent to MAC FIFO
+ * 1: The packets will be sent to Debug/Loopback FIFO.
+ * If the above's action is drop, this bit has no meaning.
+ */
+ u32 debug;
+ /*! 0: forward to remaining modules
+ * 1: bypass the next encryption modules. This packet is considered
+ * un-control packet.
+ * 2: drop
+ * 3: Reserved.
+ */
+ u32 action;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SC Lookup table. */
+struct aq_mss_egress_sc_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is to specify when the SC was last used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify which of the SA entries are used by current HW.
+ * Note: This value need to be set by SW after reset. It will be
+ * automatically updated by HW, if AN roll over is enabled.
+ */
+ u32 curr_an;
+ /*! 0: Clear the SA Valid Bit after PN expiry.
+ * 1: Do not Clear the SA Valid bit after PN expiry of the current SA.
+ * When the Enable AN roll over is set, S/W does not need to
+ * program the new SA's and the H/W will automatically roll over
+ * between the SA's without session expiry.
+ * For normal operation, Enable AN Roll over will be set to '0'
+ * and in which case, the SW needs to program the new SA values
+ * after the current PN expires.
+ */
+ u32 an_roll;
+ /*! This is the TCI field used if packet is not explicitly tagged. */
+ u32 tci;
+ /*! This value indicates the offset where the decryption will start.
+ * [[Values of 0, 4, 8-50].
+ */
+ u32 enc_off;
+ /*! 0: Do not protect frames, all the packets will be forwarded
+ * unchanged. MIB counter (OutPktsUntagged) will be updated.
+ * 1: Protect.
+ */
+ u32 protect;
+ /*! 0: when none of the SA related to SC has inUse set.
+ * 1: when either of the SA related to the SC has inUse set.
+ * This bit is set by HW.
+ */
+ u32 recv;
+ /*! 0: H/W Clears this bit on the first use.
+ * 1: SW updates this entry, when programming the SC Table.
+ */
+ u32 fresh;
+ /*! AES Key size
+ * 00 - 128bits
+ * 01 - 192bits
+ * 10 - 256bits
+ * 11 - Reserved.
+ */
+ u32 sak_len;
+ /*! 0: Invalid SC
+ * 1: Valid SC.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SA Lookup table. */
+struct aq_mss_egress_sa_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is to specify when the SC was last used. Set by HW. */
+ u32 stop_time;
+ /*! This is set by SW and updated by HW to store the Next PN number
+ * used for encryption.
+ */
+ u32 next_pn;
+ /*! The Next_PN number is going to wrapped around from 0xFFFF_FFFF
+ * to 0. set by HW.
+ */
+ u32 sat_pn;
+ /*! 0: This SA is in use.
+ * 1: This SA is Fresh and set by SW.
+ */
+ u32 fresh;
+ /*! 0: Invalid SA
+ * 1: Valid SA.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SA Key
+ * Lookup table.
+ */
+struct aq_mss_egress_sakey_record {
+ /*! Key for AES-GCM processing. */
+ u32 key[8];
+};
+
+/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
+ * CTL Filter table.
+ */
+struct aq_mss_ingress_prectlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA
+ * or halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every
+ * value will match successfully. The total data is 64 bit, i.e.
+ * 16 nibbles masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+struct aq_mss_ingress_preclass_record {
+ /*! The 64 bit SCI field used to compare with extracted value.
+ * Should have SCI value in case TCI[SCI_SEND] == 0. This will be
+ * used for ICV calculation.
+ */
+ u32 sci[2];
+ /*! The 8 bit TCI field used to compare with extracted value. */
+ u32 tci;
+ /*! 8 bit encryption offset. */
+ u32 encr_offset;
+ /*! The 16 bit Ethertype (in the clear) field used to compare with
+ * extracted value.
+ */
+ u32 eth_type;
+ /*! This is to specify the 40bit SNAP header if the SNAP header's
+ * mask is enabled.
+ */
+ u32 snap[2];
+ /*! This is to specify the 24bit LLC header if the LLC header's
+ * mask is enabled.
+ */
+ u32 llc;
+ /*! The 48 bit MAC_SA field used to compare with extracted value. */
+ u32 mac_sa[2];
+ /*! The 48 bit MAC_DA field used to compare with extracted value. */
+ u32 mac_da[2];
+ /*! 0: this is to compare with non-LPBK packet
+ * 1: this is to compare with LPBK packet.
+ * This value is used to compare with a controlled-tag which goes
+ * with the packet when looped back from Egress port.
+ */
+ u32 lpbk_packet;
+ /*! The value of this bit mask will affects how the SC index and SA
+ * index created.
+ * 2'b00: 1 SC has 4 SA.
+ * SC index is equivalent to {SC_Index[4:2], 1'b0}.
+ * SA index is equivalent to {SC_Index[4:2], SECTAG's AN[1:0]}
+ * Here AN bits are not compared.
+ * 2'b10: 1 SC has 2 SA.
+ * SC index is equivalent to SC_Index[4:1]
+ * SA index is equivalent to {SC_Index[4:1], SECTAG's AN[0]}
+ * Compare AN[1] field only
+ * 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
+ * SA index is equivalent to SC_Index[4:0]
+ * AN[1:0] bits are compared.
+ * NOTE: This design is to supports different usage of AN. User
+ * can either ping-pong buffer 2 SA by using only the AN[0] bit.
+ * Or use 4 SA per SC by use AN[1:0] bits. Or even treat each SA
+ * as independent. i.e. AN[1:0] is just another matching pointer
+ * to select SA.
+ */
+ u32 an_mask;
+ /*! This is bit mask to enable comparison the upper 6 bits TCI
+ * field, which does not include the AN field.
+ * 0: don't compare
+ * 1: enable comparison of the bits.
+ */
+ u32 tci_mask;
+ /*! 0: don't care
+ * 1: enable comparison of SCI.
+ */
+ u32 sci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of Ethertype.
+ */
+ u32 eth_type_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no SNAP header exist.
+ * 1: compare the SNAP header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next 5 bytes after the the LLC header is SNAP header.
+ */
+ u32 snap_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no LLC header exist.
+ * 1: compare the LLC header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next three bytes after the 802.3MAC header is LLC header.
+ */
+ u32 llc_mask;
+ /*! Reserved. This bit should be always 0. */
+ u32 _802_2_encapsulate;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_SA.
+ */
+ u32 sa_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_DA.
+ */
+ u32 da_mask;
+ /*! 0: don't care
+ * 1: enable checking if this is loopback packet or not.
+ */
+ u32 lpbk_mask;
+ /*! If packet matches and tagged as controlled-packet. This SC/SA
+ * index is used for later SC and SA table lookup.
+ */
+ u32 sc_idx;
+ /*! 0: the packets will be sent to MAC FIFO
+ * 1: The packets will be sent to Debug/Loopback FIFO.
+ * If the above's action is drop. This bit has no meaning.
+ */
+ u32 proc_dest;
+ /*! 0: Process: Forward to next two modules for 802.1AE decryption.
+ * 1: Process but keep SECTAG: Forward to next two modules for
+ * 802.1AE decryption but keep the MACSEC header with added error
+ * code information. ICV will be stripped for all control packets.
+ * 2: Bypass: Bypass the next two decryption modules but processed
+ * by post-classification.
+ * 3: Drop: drop this packet and update counts accordingly.
+ */
+ u32 action;
+ /*! 0: This is a controlled-port packet if matched.
+ * 1: This is an uncontrolled-port packet if matched.
+ */
+ u32 ctrl_unctrl;
+ /*! Use the SCI value from the Table if 'SC' bit of the input
+ * packet is not present.
+ */
+ u32 sci_from_table;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SC Lookup table. */
+struct aq_mss_ingress_sc_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! 0: Strict
+ * 1: Check
+ * 2: Disabled.
+ */
+ u32 validate_frames;
+ /*! 1: Replay control enabled.
+ * 0: replay control disabled.
+ */
+ u32 replay_protect;
+ /*! This is to specify the window range for anti-replay. Default is 0.
+ * 0: is strict order enforcement.
+ */
+ u32 anti_replay_window;
+ /*! 0: when none of the SA related to SC has inUse set.
+ * 1: when either of the SA related to the SC has inUse set.
+ * This bit is set by HW.
+ */
+ u32 receiving;
+ /*! 0: when hardware processed the SC for the first time, it clears
+ * this bit
+ * 1: This bit is set by SW, when it sets up the SC.
+ */
+ u32 fresh;
+ /*! 0: The AN number will not automatically roll over if Next_PN is
+ * saturated.
+ * 1: The AN number will automatically roll over if Next_PN is
+ * saturated.
+ * Rollover is valid only after expiry. Normal roll over between
+ * SA's should be normal process.
+ */
+ u32 an_rol;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Invalid SC
+ * 1: Valid SC.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SA Lookup table. */
+struct aq_mss_ingress_sa_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is updated by HW to store the expected NextPN number for
+ * anti-replay.
+ */
+ u32 next_pn;
+ /*! The Next_PN number is going to wrapped around from 0XFFFF_FFFF
+ * to 0. set by HW.
+ */
+ u32 sat_nextpn;
+ /*! 0: This SA is not yet used.
+ * 1: This SA is inUse.
+ */
+ u32 in_use;
+ /*! 0: when hardware processed the SC for the first time, it clears
+ * this timer
+ * 1: This bit is set by SW, when it sets up the SC.
+ */
+ u32 fresh;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Invalid SA.
+ * 1: Valid SA.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SA Key
+ * Lookup table.
+ */
+struct aq_mss_ingress_sakey_record {
+ /*! Key for AES-GCM processing. */
+ u32 key[8];
+ /*! AES key size
+ * 00 - 128bits
+ * 01 - 192bits
+ * 10 - 256bits
+ * 11 - reserved.
+ */
+ u32 key_len;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Post-
+ * MACSec Packet Classifier table.
+ */
+struct aq_mss_ingress_postclass_record {
+ /*! The 8 bit value used to compare with extracted value for byte 0. */
+ u32 byte0;
+ /*! The 8 bit value used to compare with extracted value for byte 1. */
+ u32 byte1;
+ /*! The 8 bit value used to compare with extracted value for byte 2. */
+ u32 byte2;
+ /*! The 8 bit value used to compare with extracted value for byte 3. */
+ u32 byte3;
+ /*! Ethertype in the packet. */
+ u32 eth_type;
+ /*! Ether Type value > 1500 (0x5dc). */
+ u32 eth_type_valid;
+ /*! VLAN ID after parsing. */
+ u32 vlan_id;
+ /*! VLAN priority after parsing. */
+ u32 vlan_up;
+ /*! Valid VLAN coding. */
+ u32 vlan_valid;
+ /*! SA index. */
+ u32 sai;
+ /*! SAI hit, i.e. controlled packet. */
+ u32 sai_hit;
+ /*! Mask for payload ethertype field. */
+ u32 eth_type_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte3_location;
+ /*! Mask for Byte Offset 3. */
+ u32 byte3_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte2_location;
+ /*! Mask for Byte Offset 2. */
+ u32 byte2_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte1_location;
+ /*! Mask for Byte Offset 1. */
+ u32 byte1_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte0_location;
+ /*! Mask for Byte Offset 0. */
+ u32 byte0_mask;
+ /*! Mask for Ethertype valid field. Indicates 802.3 vs. Other. */
+ u32 eth_type_valid_mask;
+ /*! Mask for VLAN ID field. */
+ u32 vlan_id_mask;
+ /*! Mask for VLAN UP field. */
+ u32 vlan_up_mask;
+ /*! Mask for VLAN valid field. */
+ u32 vlan_valid_mask;
+ /*! Mask for SAI. */
+ u32 sai_mask;
+ /*! Mask for SAI_HIT. */
+ u32 sai_hit_mask;
+ /*! Action if only first level matches and second level does not.
+ * 0: pass
+ * 1: drop (fail).
+ */
+ u32 firstlevel_actions;
+ /*! Action if both first and second level matched.
+ * 0: pass
+ * 1: drop (fail).
+ */
+ u32 secondlevel_actions;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Post-
+ * MACSec CTL Filter table.
+ */
+struct aq_mss_ingress_postctlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA
+ * or halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every
+ * value will match successfully. The total data is 64 bit, i.e.
+ * 16 nibbles masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the Egress MIB counters for a single SC. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_egress_sc_counters {
+ /*! The number of integrity protected but not encrypted packets
+ * for this transmitting SC.
+ */
+ u32 sc_protected_pkts[2];
+ /*! The number of integrity protected and encrypted packets for
+ * this transmitting SC.
+ */
+ u32 sc_encrypted_pkts[2];
+ /*! The number of plain text octets that are integrity protected
+ * but not encrypted on the transmitting SC.
+ */
+ u32 sc_protected_octets[2];
+ /*! The number of plain text octets that are integrity protected
+ * and encrypted on the transmitting SC.
+ */
+ u32 sc_encrypted_octets[2];
+};
+
+/*! Represents the Egress MIB counters for a single SA. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_egress_sa_counters {
+ /*! The number of dropped packets for this transmitting SA. */
+ u32 sa_hit_drop_redirect[2];
+ /*! TODO */
+ u32 sa_protected2_pkts[2];
+ /*! The number of integrity protected but not encrypted packets
+ * for this transmitting SA.
+ */
+ u32 sa_protected_pkts[2];
+ /*! The number of integrity protected and encrypted packets for
+ * this transmitting SA.
+ */
+ u32 sa_encrypted_pkts[2];
+};
+
+/*! Represents the common Egress MIB counters; the counter not
+ * associated with a particular SC/SA. Counters are 64 bits, lower 32
+ * bits in field[0].
+ */
+struct aq_mss_egress_common_counters {
+ /*! The number of transmitted packets classified as MAC_CTL packets. */
+ u32 ctl_pkt[2];
+ /*! The number of transmitted packets that did not match any rows
+ * in the Egress Packet Classifier table.
+ */
+ u32 unknown_sa_pkts[2];
+ /*! The number of transmitted packets where the SC table entry has
+ * protect=0 (so packets are forwarded unchanged).
+ */
+ u32 untagged_pkts[2];
+ /*! The number of transmitted packets discarded because the packet
+ * length is greater than the ifMtu of the Common Port interface.
+ */
+ u32 too_long[2];
+ /*! The number of transmitted packets for which table memory was
+ * affected by an ECC error during processing.
+ */
+ u32 ecc_error_pkts[2];
+ /*! The number of transmitted packets for where the matched row in
+ * the Egress Packet Classifier table has action=drop.
+ */
+ u32 unctrl_hit_drop_redir[2];
+};
+
+/*! Represents the Ingress MIB counters for a single SA. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_ingress_sa_counters {
+ /*! For this SA, the number of received packets without a SecTAG. */
+ u32 untagged_hit_pkts[2];
+ /*! For this SA, the number of received packets that were dropped. */
+ u32 ctrl_hit_drop_redir_pkts[2];
+ /*! For this SA which is not currently in use, the number of
+ * received packets that have been discarded, and have either the
+ * packets encrypted or the matched row in the Ingress SC Lookup
+ * table has validate_frames=Strict.
+ */
+ u32 not_using_sa[2];
+ /*! For this SA which is not currently in use, the number of
+ * received, unencrypted, packets with the matched row in the
+ * Ingress SC Lookup table has validate_frames!=Strict.
+ */
+ u32 unused_sa[2];
+ /*! For this SA, the number discarded packets with the condition
+ * that the packets are not valid and one of the following
+ * conditions are true: either the matched row in the Ingress SC
+ * Lookup table has validate_frames=Strict or the packets
+ * encrypted.
+ */
+ u32 not_valid_pkts[2];
+ /*! For this SA, the number of packets with the condition that the
+ * packets are not valid and the matched row in the Ingress SC
+ * Lookup table has validate_frames=Check.
+ */
+ u32 invalid_pkts[2];
+ /*! For this SA, the number of validated packets. */
+ u32 ok_pkts[2];
+ /*! For this SC, the number of received packets that have been
+ * discarded with the condition: the matched row in the Ingress
+ * SC Lookup table has replay_protect=1 and the PN of the packet
+ * is lower than the lower bound replay check PN.
+ */
+ u32 late_pkts[2];
+ /*! For this SA, the number of packets with the condition that the
+ * PN of the packets is lower than the lower bound replay
+ * protection PN.
+ */
+ u32 delayed_pkts[2];
+ /*! For this SC, the number of packets with the following condition:
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=0 or
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=1 and the packet is not encrypted and the
+ * integrity check has failed or
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=1 and the packet is encrypted and integrity
+ * check has failed.
+ */
+ u32 unchecked_pkts[2];
+ /*! The number of octets of plaintext recovered from received
+ * packets that were integrity protected but not encrypted.
+ */
+ u32 validated_octets[2];
+ /*! The number of octets of plaintext recovered from received
+ * packets that were integrity protected and encrypted.
+ */
+ u32 decrypted_octets[2];
+};
+
+/*! Represents the common Ingress MIB counters; the counter not
+ * associated with a particular SA. Counters are 64 bits, lower 32
+ * bits in field[0].
+ */
+struct aq_mss_ingress_common_counters {
+ /*! The number of received packets classified as MAC_CTL packets. */
+ u32 ctl_pkts[2];
+ /*! The number of received packets with the MAC security tag
+ * (SecTAG), not matching any rows in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+ u32 tagged_miss_pkts[2];
+ /*! The number of received packets without the MAC security tag
+ * (SecTAG), not matching any rows in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+ u32 untagged_miss_pkts[2];
+ /*! The number of received packets discarded without the MAC
+ * security tag (SecTAG) and with the matched row in the Ingress
+ * SC Lookup table having validate_frames=Strict.
+ */
+ u32 notag_pkts[2];
+ /*! The number of received packets without the MAC security tag
+ * (SecTAG) and with the matched row in the Ingress SC Lookup
+ * table having validate_frames!=Strict.
+ */
+ u32 untagged_pkts[2];
+ /*! The number of received packets discarded with an invalid
+ * SecTAG or a zero value PN or an invalid ICV.
+ */
+ u32 bad_tag_pkts[2];
+ /*! The number of received packets discarded with unknown SCI
+ * information with the condition:
+ * the matched row in the Ingress SC Lookup table has
+ * validate_frames=Strict or the C bit in the SecTAG is set.
+ */
+ u32 no_sci_pkts[2];
+ /*! The number of received packets with unknown SCI with the condition:
+ * The matched row in the Ingress SC Lookup table has
+ * validate_frames!=Strict and the C bit in the SecTAG is not set.
+ */
+ u32 unknown_sci_pkts[2];
+ /*! The number of received packets by the controlled port service
+ * that passed the Ingress Post-MACSec Packet Classifier table
+ * check.
+ */
+ u32 ctrl_prt_pass_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that passed the Ingress Post-MACSec Packet Classifier
+ * table check.
+ */
+ u32 unctrl_prt_pass_pkts[2];
+ /*! The number of received packets by the controlled port service
+ * that failed the Ingress Post-MACSec Packet Classifier table
+ * check.
+ */
+ u32 ctrl_prt_fail_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that failed the Ingress Post-MACSec Packet Classifier
+ * table check.
+ */
+ u32 unctrl_prt_fail_pkts[2];
+ /*! The number of received packets discarded because the packet
+ * length is greater than the ifMtu of the Common Port interface.
+ */
+ u32 too_long_pkts[2];
+ /*! The number of received packets classified as MAC_CTL by the
+ * Ingress Post-MACSec CTL Filter table.
+ */
+ u32 igpoc_ctl_pkts[2];
+ /*! The number of received packets for which table memory was
+ * affected by an ECC error during processing.
+ */
+ u32 ecc_error_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that were dropped.
+ */
+ u32 unctrl_hit_drop_redir[2];
+};
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
deleted file mode 100644
index 597654b51e01..000000000000
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- */
-
-#ifndef VER_H
-#define VER_H
-
-#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
-
-#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index d9efbc8d783b..d820ae03a966 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -130,7 +130,6 @@ struct arc_emac_mdio_bus_data {
*/
struct arc_emac_priv {
const char *drv_name;
- const char *drv_version;
void (*set_mac_speed)(void *priv, unsigned int speed);
/* Devices */
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 539166112993..1c7736b7eaf7 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -15,7 +15,6 @@
#include "emac.h"
#define DRV_NAME "emac_arc"
-#define DRV_VERSION "1.0"
static int emac_arc_probe(struct platform_device *pdev)
{
@@ -36,7 +35,6 @@ static int emac_arc_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->drv_name = DRV_NAME;
- priv->drv_version = DRV_VERSION;
err = of_get_phy_mode(dev->of_node, &interface);
if (err) {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 17bda4e8cc45..38cd968b6a3b 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -92,7 +92,6 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
struct arc_emac_priv *priv = netdev_priv(ndev);
strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
- strlcpy(info->version, priv->drv_version, sizeof(info->version));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index aae231c5224f..48ecdf15eddc 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -16,7 +16,6 @@
#include "emac.h"
#define DRV_NAME "rockchip_emac"
-#define DRV_VERSION "1.1"
struct emac_rockchip_soc_data {
unsigned int grf_offset;
@@ -112,7 +111,6 @@ static int emac_rockchip_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->emac.drv_name = DRV_NAME;
- priv->emac.drv_version = DRV_VERSION;
priv->emac.set_mac_speed = emac_rockchip_set_mac_speed;
err = of_get_phy_mode(dev->of_node, &interface);
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 0058051ba925..2720bde5034e 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
depends on ATH79
- select PHYLIB
+ select PHYLINK
help
If you wish to compile a kernel for AR7XXX/91XXX and enable
ethernet support, then you should always answer Y to this.
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index e95687a780fb..02b7705393ca 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -32,6 +32,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
+#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -314,6 +315,8 @@ struct ag71xx {
dma_addr_t stop_desc_dma;
phy_interface_t phy_if_mode;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct delayed_work restart_work;
struct timer_list oom_timer;
@@ -845,24 +848,91 @@ static void ag71xx_hw_start(struct ag71xx *ag)
netif_wake_queue(ag->ndev);
}
-static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
+static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct phy_device *phydev = ag->ndev->phydev;
- u32 cfg2;
- u32 ifctl;
- u32 fifo5;
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- if (!phydev->link && update) {
- ag71xx_hw_stop(ag);
+ if (phylink_autoneg_inband(mode))
return;
- }
if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
ag71xx_fast_reset(ag);
+ if (ag->tx_ring.desc_split) {
+ ag->fifodata[2] &= 0xffff;
+ ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
+}
+
+static void ag71xx_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_MII) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set(mask, MII);
+
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ if (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_GMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ state->link = 0;
+}
+
+static void ag71xx_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void ag71xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
+
+ ag71xx_hw_stop(ag);
+}
+
+static void ag71xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
+ u32 cfg2;
+ u32 ifctl;
+ u32 fifo5;
+
cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
- cfg2 |= (phydev->duplex) ? MAC_CFG2_FDX : 0;
+ cfg2 |= duplex ? MAC_CFG2_FDX : 0;
ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
ifctl &= ~(MAC_IFCTL_SPEED);
@@ -870,7 +940,7 @@ static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
fifo5 &= ~FIFO_CFG5_BM;
- switch (phydev->speed) {
+ switch (speed) {
case SPEED_1000:
cfg2 |= MAC_CFG2_IF_1000;
fifo5 |= FIFO_CFG5_BM;
@@ -883,72 +953,38 @@ static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
cfg2 |= MAC_CFG2_IF_10_100;
break;
default:
- WARN(1, "not supported speed %i\n", phydev->speed);
return;
}
- if (ag->tx_ring.desc_split) {
- ag->fifodata[2] &= 0xffff;
- ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
- }
-
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
-
ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
ag71xx_hw_start(ag);
-
- if (update)
- phy_print_status(phydev);
}
-static void ag71xx_phy_link_adjust(struct net_device *ndev)
-{
- struct ag71xx *ag = netdev_priv(ndev);
-
- ag71xx_link_adjust(ag, true);
-}
+static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
+ .validate = ag71xx_mac_validate,
+ .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
+ .mac_an_restart = ag71xx_mac_an_restart,
+ .mac_config = ag71xx_mac_config,
+ .mac_link_down = ag71xx_mac_link_down,
+ .mac_link_up = ag71xx_mac_link_up,
+};
-static int ag71xx_phy_connect(struct ag71xx *ag)
+static int ag71xx_phylink_setup(struct ag71xx *ag)
{
- struct device_node *np = ag->pdev->dev.of_node;
- struct net_device *ndev = ag->ndev;
- struct device_node *phy_node;
- struct phy_device *phydev;
- int ret;
-
- if (of_phy_is_fixed_link(np)) {
- ret = of_phy_register_fixed_link(np);
- if (ret < 0) {
- netif_err(ag, probe, ndev, "Failed to register fixed PHY link: %d\n",
- ret);
- return ret;
- }
+ struct phylink *phylink;
- phy_node = of_node_get(np);
- } else {
- phy_node = of_parse_phandle(np, "phy-handle", 0);
- }
+ ag->phylink_config.dev = &ag->ndev->dev;
+ ag->phylink_config.type = PHYLINK_NETDEV;
- if (!phy_node) {
- netif_err(ag, probe, ndev, "Could not find valid phy node\n");
- return -ENODEV;
- }
-
- phydev = of_phy_connect(ag->ndev, phy_node, ag71xx_phy_link_adjust,
- 0, ag->phy_if_mode);
-
- of_node_put(phy_node);
-
- if (!phydev) {
- netif_err(ag, probe, ndev, "Could not connect to PHY device\n");
- return -ENODEV;
- }
-
- phy_attached_info(phydev);
+ phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
+ ag->phy_if_mode, &ag71xx_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+ ag->phylink = phylink;
return 0;
}
@@ -1239,6 +1275,13 @@ static int ag71xx_open(struct net_device *ndev)
unsigned int max_frame_len;
int ret;
+ ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
+ if (ret) {
+ netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
+ ret);
+ goto err;
+ }
+
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
ag->rx_buf_size =
SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
@@ -1251,11 +1294,7 @@ static int ag71xx_open(struct net_device *ndev)
if (ret)
goto err;
- ret = ag71xx_phy_connect(ag);
- if (ret)
- goto err;
-
- phy_start(ndev->phydev);
+ phylink_start(ag->phylink);
return 0;
@@ -1268,8 +1307,8 @@ static int ag71xx_stop(struct net_device *ndev)
{
struct ag71xx *ag = netdev_priv(ndev);
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
+ phylink_stop(ag->phylink);
+ phylink_disconnect_phy(ag->phylink);
ag71xx_hw_disable(ag);
return 0;
@@ -1414,13 +1453,14 @@ static void ag71xx_restart_work_func(struct work_struct *work)
{
struct ag71xx *ag = container_of(work, struct ag71xx,
restart_work.work);
- struct net_device *ndev = ag->ndev;
rtnl_lock();
ag71xx_hw_disable(ag);
ag71xx_hw_enable(ag);
- if (ndev->phydev->link)
- ag71xx_link_adjust(ag, false);
+
+ phylink_stop(ag->phylink);
+ phylink_start(ag->phylink);
+
rtnl_unlock();
}
@@ -1759,6 +1799,12 @@ static int ag71xx_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ err = ag71xx_phylink_setup(ag);
+ if (err) {
+ netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
+ goto err_mdio_remove;
+ }
+
err = register_netdev(ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 1dcbc486eca9..b9b4edb913c1 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1416,10 +1416,7 @@ static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
0, IPPROTO_TCP, 0);
first->word1 |= 1 << TPD_IPV4_SHIFT;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
/* LSOv2: the first TPD only provides the packet length */
first->adrl.l.pkt_len = skb->len;
first->word1 |= 1 << TPD_LSO_V2_SHIFT;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 60b2febd7315..a0562a90fb6d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -583,7 +583,6 @@ struct atl1c_adapter {
readl(((a)->hw_addr + reg) + ((offset) << 2)))
extern char atl1c_driver_name[];
-extern char atl1c_driver_version[];
void atl1c_reinit_locked(struct atl1c_adapter *adapter);
s32 atl1c_reset_hw(struct atl1c_hw *hw);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index b5a70a36fa04..e2eb7b8c63a0 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -221,8 +221,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
struct atl1c_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, atl1c_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 4c0b1f8551dd..00bd7bd55794 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -8,9 +8,7 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
char atl1c_driver_name[] = "atl1c";
-char atl1c_driver_version[] = ATL1C_DRV_VERSION;
/*
* atl1c_pci_tbl - PCI Device ID Table
@@ -37,7 +35,6 @@ MODULE_AUTHOR("Jie Yang");
MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ATL1C_DRV_VERSION);
static int atl1c_stop_mac(struct atl1c_hw *hw);
static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
@@ -2025,10 +2022,8 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
"IPV6 tso with zero data??\n");
goto check_sum;
} else
- tcp_hdr(skb)->check = ~csum_ipv6_magic(
- &ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
+
etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
etpd->pkt_len = cpu_to_le32(skb->len);
@@ -2644,8 +2639,6 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_register;
}
- if (netif_msg_probe(adapter))
- dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION);
cards_found++;
return 0;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index e9893da50995..9fcad783c939 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -482,7 +482,6 @@ struct atl1e_adapter {
readl(((a)->hw_addr + reg) + ((offset) << 2)))
extern char atl1e_driver_name[];
-extern char atl1e_driver_version[];
void atl1e_check_options(struct atl1e_adapter *adapter);
int atl1e_up(struct atl1e_adapter *adapter);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index c6b9e7ea8e38..0cbde352d1ba 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -307,8 +307,6 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
struct atl1e_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, atl1e_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e0d89942d537..223ef846123e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -8,10 +8,7 @@
#include "atl1e.h"
-#define DRV_VERSION "1.0.0.7-NAPI"
-
char atl1e_driver_name[] = "ATL1E";
-char atl1e_driver_version[] = DRV_VERSION;
#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
/*
* atl1e_pci_tbl - PCI Device ID Table
@@ -33,7 +30,6 @@ MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b498fd6a47d0..271e7034fa70 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -65,12 +65,10 @@
#include "atl1.h"
-#define ATLX_DRIVER_VERSION "2.1.3"
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, "
"Chris Snook <csnook@redhat.com>, "
"Jay Cliburn <jcliburn@gmail.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ATLX_DRIVER_VERSION);
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
@@ -2965,8 +2963,6 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get device revision number */
adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
(REG_MASTER_CTRL + 2));
- if (netif_msg_probe(adapter))
- dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
/* set default ring resource counts */
adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
@@ -3344,8 +3340,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
struct atl1_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index b81a4e0c5b57..c915852b8892 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -36,18 +36,12 @@
#include "atl2.h"
-#define ATL2_DRV_VERSION "2.2.3"
-
static const char atl2_driver_name[] = "atl2";
-static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
-static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
-static const char atl2_driver_version[] = ATL2_DRV_VERSION;
static const struct ethtool_ops atl2_ethtool_ops;
MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
@@ -1688,9 +1682,6 @@ static struct pci_driver atl2_driver = {
*/
static int __init atl2_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
- atl2_driver_version);
- printk(KERN_INFO "%s\n", atl2_copyright);
return pci_register_driver(&atl2_driver);
}
module_init(atl2_init_module);
@@ -2011,8 +2002,6 @@ static void atl2_get_drvinfo(struct net_device *netdev,
struct atl2_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, atl2_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 53055ce5dfd6..2a69c0d06f3c 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -69,6 +69,7 @@ config BCMGENET
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
select DIMLIB
+ select BROADCOM_PHY if ARCH_BCM2835
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index a780b7215021..6fb620e25208 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -40,7 +40,6 @@
#include "b44.h"
#define DRV_MODULE_NAME "b44"
-#define DRV_MODULE_VERSION "2.0"
#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
#define B44_DEF_MSG_ENABLE \
@@ -97,7 +96,6 @@
MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
module_param(b44_debug, int, 0);
@@ -1791,7 +1789,6 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct ssb_bus *bus = bp->sdev->bus;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
@@ -2347,8 +2344,6 @@ static int b44_init_one(struct ssb_device *sdev,
instance++;
- pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
-
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 620cd3fc1fbc..916824cca3fd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -22,7 +22,6 @@
#include "bcm63xx_enet.h"
static char bcm_enet_driver_name[] = "bcm63xx_enet";
-static char bcm_enet_driver_version[] = "1.0";
static int copybreak __read_mostly = 128;
module_param(copybreak, int, 0);
@@ -1304,9 +1303,6 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, bcm_enet_driver_version,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
@@ -1706,7 +1702,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
- ret = 0;
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return -ENOMEM;
@@ -2529,10 +2524,8 @@ static int bcm_enetsw_get_sset_count(struct net_device *netdev,
static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
- strncpy(drvinfo->version, bcm_enet_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "bcm63xx", 32);
+ strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 15b31cddc054..b25356e21a1e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -287,7 +287,6 @@ static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, "0.1", sizeof(info->version));
strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
}
@@ -624,8 +623,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
return -EINVAL;
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
- (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
- ec->use_adaptive_tx_coalesce)
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
return -EINVAL;
for (i = 0; i < dev->num_tx_queues; i++)
@@ -666,7 +664,8 @@ static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
dma_addr_t mapping;
/* Allocate a new SKB for a new packet */
- skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
+ GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
@@ -2210,6 +2209,9 @@ static int bcm_sysport_set_rxnfc(struct net_device *dev,
}
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
@@ -2474,7 +2476,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->wol_irq = platform_get_irq(pdev, 1);
}
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
- dev_err(&pdev->dev, "invalid interrupts\n");
ret = -EINVAL;
goto err_free_netdev;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c46c1b1416f7..6795b6d95f54 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -202,13 +202,8 @@ static int bgmac_probe(struct platform_device *pdev)
if (bgmac->irq < 0)
return bgmac->irq;
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
- if (!regs) {
- dev_err(&pdev->dev, "Unable to obtain base resource\n");
- return -EINVAL;
- }
-
- bgmac->plat.base = devm_ioremap_resource(&pdev->dev, regs);
+ bgmac->plat.base =
+ devm_platform_ioremap_resource_byname(pdev, "amac_base");
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 1bb07a5d82c9..98ec1b8a7d8e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1248,6 +1248,14 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
+static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
+ return 0;
+}
+
static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open,
.ndo_stop = bgmac_stop,
@@ -1256,6 +1264,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_change_mtu = bgmac_change_mtu,
};
/**************************************************
@@ -1530,6 +1539,9 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->hw_features = net_dev->features;
net_dev->vlan_features = net_dev->features;
+ /* Omit FCS from max MTU size */
+ net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 40d02fec2747..351c598a3ec6 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -351,7 +351,7 @@
#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
#define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */
#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */
-#define BGMAC_DESC_CTL1_LEN 0x00001FFF
+#define BGMAC_DESC_CTL1_LEN 0x00003FFF
#define BGMAC_PHY_NOREGS BRCM_PSEUDO_PHY_ADDR
#define BGMAC_PHY_MASK 0x1F
@@ -366,7 +366,8 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */
+/* Jumbo frame size with FCS */
+#define BGMAC_RX_MAX_FRAME_SIZE 9724
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index dbb7874607ca..e1c236cab2a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -59,8 +59,6 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.6"
-#define DRV_MODULE_RELDATE "January 29, 2014"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -72,13 +70,9 @@
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
-static char version[] =
- "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_MIPS_FILE_06);
MODULE_FIRMWARE(FW_RV2P_FILE_06);
MODULE_FIRMWARE(FW_MIPS_FILE_09);
@@ -7048,7 +7042,6 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct bnx2 *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
@@ -7819,6 +7812,11 @@ static int bnx2_set_channels(struct net_device *dev,
}
static const struct ethtool_ops bnx2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_drvinfo = bnx2_get_drvinfo,
.get_regs_len = bnx2_get_regs_len,
.get_regs = bnx2_get_regs,
@@ -8562,15 +8560,11 @@ static const struct net_device_ops bnx2_netdev_ops = {
static int
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed = 0;
struct net_device *dev;
struct bnx2 *bp;
int rc;
char str[40];
- if (version_printed++ == 0)
- pr_info("%s", version);
-
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
if (!dev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 6026b53137aa..4f5b2b81be3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -32,8 +32,14 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
+/* FIXME: Delete the DRV_MODULE_VERSION below, but please be warned
+ * that it is not an easy task because such change has all chances
+ * to break this driver due to amount of abuse of in-kernel interfaces
+ * between modules and FW.
+ *
+ * DO NOT UPDATE DRV_MODULE_VERSION below.
+ */
#define DRV_MODULE_VERSION "1.713.36-0"
-#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 4a0ba6801c9e..7cea33803f7f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,13 +1112,6 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
u32 mbi;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
-
- memset(version, 0, sizeof(version));
- snprintf(version, ETHTOOL_FWVERS_LEN, " storm %d.%d.%d.%d",
- BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION);
- strlcat(info->version, version, sizeof(info->version));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -3663,6 +3656,7 @@ static int bnx2x_get_ts_info(struct net_device *dev,
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9638d65d8261..517caedc0a87 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6874,7 +6874,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
/* In this option, the first PHY makes sure to pass the
* traffic through itself only.
- * Its not clear how to reset the link on the second phy
+ * It's not clear how to reset the link on the second
+ * phy.
*/
active_external_phy = EXT_PHY1;
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1c26fa962233..db5107e7937c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -81,17 +81,12 @@
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
-static char version[] =
- "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
- DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("Eliezer Tamir");
MODULE_DESCRIPTION("QLogic "
"BCM57710/57711/57711E/"
"57712/57712_MF/57800/57800_MF/57810/57810_MF/"
"57840/57840_MF Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
@@ -14480,8 +14475,6 @@ static int __init bnx2x_init(void)
{
int ret;
- pr_info("%s", version);
-
bnx2x_wq = create_singlethread_workqueue("bnx2x");
if (bnx2x_wq == NULL) {
pr_err("Cannot create workqueue\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d28b406a26b1..58e0d9a781e9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -70,12 +70,8 @@
#define BNXT_TX_TIMEOUT (5 * HZ)
-static const char version[] =
- "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
-MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
@@ -2166,6 +2162,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct tx_cmp *txcmp;
cpr->has_more_work = 0;
+ cpr->had_work_done = 1;
while (1) {
int rc;
@@ -2179,7 +2176,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- cpr->had_work_done = 1;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
@@ -2396,7 +2392,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
- u64 dbr_type, bool all)
+ u64 dbr_type)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
@@ -2405,7 +2401,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && (all || cpr2->had_work_done)) {
+ if (cpr2 && cpr2->had_work_done) {
db = &cpr2->cp_db;
writeq(db->db_key64 | dbr_type |
RING_CMP(cpr2->cp_raw_cons), db->doorbell);
@@ -2428,22 +2424,16 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
- if (cpr->has_more_work) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
- return work_done;
- }
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
- if (napi_complete_done(napi, work_done))
- BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
- return work_done;
}
while (1) {
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
- false);
+ if (cpr->has_more_work)
+ break;
+
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
cpr->cp_raw_cons = raw_cons;
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
@@ -2463,16 +2453,17 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
- cpr->has_more_work = cpr2->has_more_work;
+ cpr->has_more_work |= cpr2->has_more_work;
} else {
bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (cpr->has_more_work)
- break;
}
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
- cpr->cp_raw_cons = raw_cons;
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
+ if (raw_cons != cpr->cp_raw_cons) {
+ cpr->cp_raw_cons = raw_cons;
+ BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
+ }
return work_done;
}
@@ -4170,6 +4161,7 @@ static int bnxt_hwrm_to_stderr(u32 hwrm_err)
case HWRM_ERR_CODE_NO_BUFFER:
return -ENOMEM;
case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
@@ -4184,14 +4176,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int i, intr_process, rc, tmo_count;
struct input *req = msg;
u32 *data = msg;
- __le32 *resp_len;
u8 *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
struct hwrm_short_input short_input = {0};
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
- u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
@@ -4209,7 +4199,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
bar_offset = BNXT_GRCPF_REG_KONG_COMM;
doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
resp = bp->hwrm_cmd_kong_resp_addr;
- resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
}
memset(resp, 0, PAGE_SIZE);
@@ -4278,7 +4267,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
- resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
if (intr_process) {
u16 seq_id = bp->hwrm_intr_seq_id;
@@ -4306,9 +4294,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
le16_to_cpu(req->req_type));
return -EBUSY;
}
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
- valid = resp_addr + len - 1;
+ len = le16_to_cpu(resp->resp_len);
+ valid = ((u8 *)resp) + len - 1;
} else {
int j;
@@ -4319,8 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
*/
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
return -EBUSY;
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
+ len = le16_to_cpu(resp->resp_len);
if (len)
break;
/* on first few passes, just barely sleep */
@@ -4342,7 +4328,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Last byte of resp contains valid bit */
- valid = resp_addr + len - 1;
+ valid = ((u8 *)resp) + len - 1;
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
@@ -5069,10 +5055,8 @@ vnic_mru:
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
{
- u32 rc = 0;
-
if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input req = {0};
@@ -5080,10 +5064,9 @@ static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
req.vnic_id =
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
}
- return rc;
}
static void bnxt_hwrm_vnic_free(struct bnxt *bp)
@@ -5200,14 +5183,13 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
{
u16 i;
- u32 rc = 0;
struct hwrm_ring_grp_free_input req = {0};
if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
- return 0;
+ return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@ -5218,12 +5200,10 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
req.ring_group_id =
cpu_to_le32(bp->grp_info[i].fw_grp_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
}
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
@@ -5847,8 +5827,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (bp->hwrm_spec_code < 0x10601)
bp->hw_resc.resv_tx_rings = tx_rings;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int
@@ -5869,8 +5848,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (rc)
return rc;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
@@ -6030,7 +6008,6 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
{
struct hwrm_func_vf_cfg_input req = {0};
u32 flags;
- int rc;
if (!BNXT_NEW_RM(bp))
return 0;
@@ -6047,8 +6024,8 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6057,7 +6034,6 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
{
struct hwrm_func_cfg_input req = {0};
u32 flags;
- int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, stats, vnics);
@@ -6075,8 +6051,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6315,16 +6291,16 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
{
- int rc = 0, i;
struct hwrm_stat_ctx_free_input req = {0};
+ int i;
if (!bp->bnapi)
- return 0;
+ return;
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- return 0;
+ return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
@@ -6336,14 +6312,13 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
}
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
@@ -6548,8 +6523,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
__le64 *pg_dir;
u32 flags = 0;
u8 *pg_attr;
- int i, rc;
u32 ena;
+ int i;
if (!ctx)
return 0;
@@ -6636,8 +6611,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -6662,7 +6636,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
int rc;
if (!mem_size)
- return 0;
+ return -EINVAL;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@@ -7243,7 +7217,7 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
static int bnxt_hwrm_ver_get(struct bnxt *bp)
{
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
- u32 dev_caps_cfg;
+ u32 dev_caps_cfg, hwrm_ver;
int rc;
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
@@ -7263,6 +7237,19 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd_8b);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
+
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (bp->hwrm_spec_code > hwrm_ver)
+ snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
@@ -7341,7 +7328,6 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
- int rc;
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_port_qstats_input req = {0};
@@ -7352,8 +7338,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
@@ -7515,7 +7500,6 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -7526,14 +7510,12 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
else
return -EINVAL;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
return 0;
@@ -7545,8 +7527,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
if (size == 128)
req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
@@ -8804,6 +8785,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -8899,14 +8881,12 @@ int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
{
struct hwrm_wol_filter_free_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
req.port_id = cpu_to_le16(bp->pf.port_id);
req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
req.wol_filter_id = bp->wol_filter_id;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
@@ -9324,7 +9304,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
- if (bp->bnapi)
+ if (bp->bnapi && irq_re_init)
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
if (irq_re_init) {
bnxt_free_irq(bp);
@@ -9794,6 +9774,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
@@ -9810,12 +9791,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX)) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX);
- else
+ else if (vlan_features)
features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX;
}
@@ -11464,6 +11447,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_sriov_disable(bp);
bnxt_dl_fw_reporters_destroy(bp, true);
+ if (BNXT_PF(bp))
+ devlink_port_type_clear(&bp->dl_port);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
bnxt_dl_unregister(bp);
@@ -11764,30 +11749,82 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
+#define BNXT_VPD_LEN 512
+static void bnxt_vpd_read_info(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i, len, pos, ro_size;
+ ssize_t vpd_size;
+ u8 *vpd_data;
+
+ vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
+ if (!vpd_data)
+ return;
+
+ vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
+ if (vpd_size <= 0) {
+ netdev_err(bp->dev, "Unable to read VPD\n");
+ goto exit;
+ }
+
+ i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0) {
+ netdev_err(bp->dev, "VPD READ-Only not found\n");
+ goto exit;
+ }
+
+ ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ if (i + ro_size > vpd_size)
+ goto exit;
+
+ pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
+ PCI_VPD_RO_KEYWORD_PARTNO);
+ if (pos < 0)
+ goto read_sn;
+
+ len = pci_vpd_info_field_size(&vpd_data[pos]);
+ pos += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len + pos > vpd_size)
+ goto read_sn;
+
+ strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+
+read_sn:
+ pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO);
+ if (pos < 0)
+ goto exit;
+
+ len = pci_vpd_info_field_size(&vpd_data[pos]);
+ pos += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len + pos > vpd_size)
+ goto exit;
+
+ strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+exit:
+ kfree(vpd_data);
+}
+
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
{
struct pci_dev *pdev = bp->pdev;
- int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- u32 dw;
+ u64 qword;
- if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN\n");
+ qword = pci_get_dsn(pdev);
+ if (!qword) {
+ netdev_info(bp->dev, "Unable to read adapter's DSN\n");
return -EOPNOTSUPP;
}
- /* DSN (two dw) is at an offset of 4 from the cap pos */
- pos += 4;
- pci_read_config_dword(pdev, pos, &dw);
- put_unaligned_le32(dw, &dsn[0]);
- pci_read_config_dword(pdev, pos + 4, &dw);
- put_unaligned_le32(dw, &dsn[4]);
+ put_unaligned_le64(qword, dsn);
+
bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -11795,9 +11832,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
- if (version_printed++ == 0)
- pr_info("%s", version);
-
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -11829,6 +11863,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
+ bnxt_vpd_read_info(bp);
+
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
goto init_err_pci_clean;
@@ -12173,12 +12209,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
bnxt_ulp_start(bp, err);
}
- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
- dev_close(netdev);
+ if (result != PCI_ERS_RESULT_RECOVERED) {
+ if (netif_running(netdev))
+ dev_close(netdev);
+ pci_disable_device(pdev);
+ }
rtnl_unlock();
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 63b170658532..3d39638521d6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,8 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.10.1"
+/* DO NOT CHANGE DRV_VER_* defines
+ * FIXME: Delete them
+ */
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
#define DRV_VER_UPD 1
@@ -654,11 +656,6 @@ struct nqe_cn {
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
-#define HWRM_RESP_ERR_CODE_MASK 0xffff
-#define HWRM_RESP_LEN_OFFSET 4
-#define HWRM_RESP_LEN_MASK 0xffff0000
-#define HWRM_RESP_LEN_SFT 16
-#define HWRM_RESP_VALID_MASK 0xff000000
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
@@ -1064,7 +1061,6 @@ struct bnxt_vf_info {
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
#define BNXT_VF_TRUST 0x10
- u32 func_flags; /* func cfg flags */
u32 min_tx_rate;
u32 max_tx_rate;
void *hwrm_cmd_req_addr;
@@ -1498,6 +1494,10 @@ struct bnxt {
(chip_num) == CHIP_NUM_58804 || \
(chip_num) == CHIP_NUM_58808)
+#define BNXT_VPD_FLD_LEN 32
+ char board_partno[BNXT_VPD_FLD_LEN];
+ char board_serialno[BNXT_VPD_FLD_LEN];
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1728,6 +1728,7 @@ struct bnxt {
#define BC_HWRM_STR_LEN 21
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index b1511bcffb1b..02b27551d34d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -39,8 +39,8 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_cfg_input req = {0};
- int rc = 0, i;
u8 *pri2cos;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
@@ -56,8 +56,7 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
@@ -93,8 +92,8 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
{
struct hwrm_queue_cos2bw_cfg_input req = {0};
struct bnxt_cos2bw_cfg cos2bw;
- int rc = 0, i;
void *data;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
@@ -128,8 +127,7 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
req.unused_0 = 0;
}
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
@@ -236,7 +234,6 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
bool need_q_remap = false;
- int rc;
if (!my_ets)
return -EINVAL;
@@ -267,15 +264,11 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
}
if (need_q_remap)
- rc = bnxt_queue_remap(bp, tc_mask);
+ bnxt_queue_remap(bp, tc_mask);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
-
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index d3c93ccee86a..a812beb46325 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -150,7 +150,7 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp)
health->fw_reset_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reset_reporter_ops,
- 0, true, bp);
+ 0, bp);
if (IS_ERR(health->fw_reset_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_reset_reporter));
@@ -166,7 +166,7 @@ err_recovery:
health->fw_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reporter_ops,
- 0, false, bp);
+ 0, bp);
if (IS_ERR(health->fw_reporter)) {
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
PTR_ERR(health->fw_reporter));
@@ -182,7 +182,7 @@ err_recovery:
health->fw_fatal_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_fatal_reporter_ops,
- 0, true, bp);
+ 0, bp);
if (IS_ERR(health->fw_fatal_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
@@ -403,6 +403,14 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
+ if (strlen(bp->board_partno)) {
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bp->board_partno);
+ if (rc)
+ return rc;
+ }
+
sprintf(buf, "%X", bp->chip_num);
rc = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
@@ -471,13 +479,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
}
rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver);
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, fw_ver);
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bp->hwrm_ver_supp);
if (rc)
return rc;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, mgmt_ver);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 95f893f2a74d..d5c8bd49383a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define BNXT_NVM_CFG_VER_BITS 24
#define BNXT_NVM_CFG_VER_BYTES 4
-#define BNXT_MSIX_VEC_MAX 1280
+#define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128
enum bnxt_nvm_dir_type {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 3f8a1ded662a..360f9a95c1d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1236,7 +1236,6 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
@@ -2013,11 +2012,12 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
bnxt_hwrm_fw_set_time(bp);
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, &item_len, NULL) != 0) {
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL);
+ if (rc) {
netdev_err(dev, "PKG update area not created in nvram\n");
- return -ENOBUFS;
+ return rc;
}
rc = request_firmware(&fw, filename, &dev->dev);
@@ -2605,7 +2605,7 @@ static int bnxt_set_phys_id(struct net_device *dev,
struct bnxt_led_cfg *led_cfg;
u8 led_state;
__le16 duration;
- int i, rc;
+ int i;
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
@@ -2631,8 +2631,7 @@ static int bnxt_set_phys_id(struct net_device *dev,
led_cfg->led_blink_off = duration;
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
@@ -3471,6 +3470,12 @@ void bnxt_ethtool_free(struct bnxt *bp)
}
const struct ethtool_ops bnxt_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 2aba1e02a8f4..cea2f9958a1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
if (old_setting == setting)
return 0;
- func_flags = vf->func_flags;
if (setting)
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
- vf->func_flags = func_flags;
if (setting)
vf->flags |= BNXT_VF_SPOOFCHK;
else
@@ -138,7 +136,6 @@ static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
@@ -149,8 +146,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
@@ -230,7 +226,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -268,7 +263,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -307,7 +301,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate);
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
@@ -479,7 +472,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
vf = &bp->pf.vf[vf_id];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
if (is_valid_ether_addr(vf->mac_addr)) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 9bec256b0934..782ea0771221 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -279,7 +279,8 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
/* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
* smac (6 bytes) if rewrite of both is specified, otherwise either
@@ -299,6 +300,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -491,7 +495,8 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
flow->tun_mask.tp_src = match.mask->src;
}
- return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
+ return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action,
+ tc_flow_cmd->common.extack);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
@@ -1634,7 +1639,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
spin_unlock(&flow->stats_lock);
flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
- lastused);
+ lastused, FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 6f2faf81c1ae..4b5c8fd76a51 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -219,7 +219,6 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1d678bee2cc9..79636c78127c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "bcmgenet: " fmt
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -621,10 +622,6 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
* always generate an interrupt either after MBDONE packets have been
* transmitted, or when the ring is empty.
*/
- if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
- ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low ||
- ec->use_adaptive_tx_coalesce)
- return -EOPNOTSUPP;
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
@@ -814,7 +811,6 @@ static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
- strlcpy(info->version, "v2.0", sizeof(info->version));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -938,6 +934,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
+ dev->netdev_ops->ndo_get_stats(dev);
+
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
@@ -1049,6 +1047,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.begin = bcmgenet_begin,
.complete = bcmgenet_complete,
.get_strings = bcmgenet_get_strings,
@@ -1623,7 +1624,8 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
dma_addr_t mapping;
/* Allocate a new Rx skb */
- skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
+ skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
+ GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, priv->dev,
@@ -2713,6 +2715,21 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
}
+static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr)
+{
+ u32 addr_tmp;
+
+ addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
+ addr[0] = addr_tmp >> 24;
+ addr[1] = (addr_tmp >> 16) & 0xff;
+ addr[2] = (addr_tmp >> 8) & 0xff;
+ addr[3] = addr_tmp & 0xff;
+ addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
+ addr[4] = (addr_tmp >> 8) & 0xff;
+ addr[5] = addr_tmp & 0xff;
+}
+
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
@@ -3142,6 +3159,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
dev->stats.rx_packets = rx_packets;
dev->stats.rx_errors = rx_errors;
dev->stats.rx_missed_errors = rx_errors;
+ dev->stats.rx_dropped = rx_dropped;
return &dev->stats;
}
@@ -3408,10 +3426,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
- const void *macaddr;
unsigned int i;
int err = -EIO;
- const char *phy_mode_str;
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3440,11 +3456,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
- if (dn)
- macaddr = of_get_mac_address(dn);
- else
- macaddr = pd->mac_address;
-
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
err = PTR_ERR(priv->base);
@@ -3455,12 +3466,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
- if (IS_ERR_OR_NULL(macaddr) || !is_valid_ether_addr(macaddr)) {
- dev_warn(&pdev->dev, "using random Ethernet MAC\n");
- eth_hw_addr_random(dev);
- } else {
- ether_addr_copy(dev->dev_addr, macaddr);
- }
dev->watchdog_timeo = 2 * HZ;
dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
@@ -3489,8 +3494,9 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dev = dev;
priv->pdev = pdev;
- if (of_id) {
- pdata = of_id->data;
+
+ pdata = device_get_match_data(&pdev->dev);
+ if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
} else {
@@ -3500,7 +3506,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
- dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+ dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
priv->clk = NULL;
}
@@ -3524,23 +3530,34 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
if (IS_ERR(priv->clk_wol)) {
- dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+ dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
priv->clk_wol = NULL;
}
priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
- dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
+ dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
priv->clk_eee = NULL;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
- !strcasecmp(phy_mode_str, "internal"))
+ if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ if ((pd) && (!IS_ERR_OR_NULL(pd->mac_address)))
+ ether_addr_copy(dev->dev_addr, pd->mac_address);
+ else
+ if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
+ if (has_acpi_companion(&pdev->dev))
+ bcmgenet_get_hw_addr(priv, dev->dev_addr);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(dev);
+ }
+
reset_umac(priv);
err = bcmgenet_mii_init(dev);
@@ -3713,6 +3730,12 @@ static int bcmgenet_suspend(struct device *d)
static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
+static const struct acpi_device_id genet_acpi_match[] = {
+ { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
+
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
.remove = bcmgenet_remove,
@@ -3721,6 +3744,7 @@ static struct platform_driver bcmgenet_driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
.pm = &bcmgenet_pm_ops,
+ .acpi_match_table = ACPI_PTR(genet_acpi_match),
},
};
module_platform_driver(bcmgenet_driver);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b5930f80039d..511d553a4d11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -5,7 +5,7 @@
* Copyright (c) 2014-2017 Broadcom
*/
-
+#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/wait.h>
@@ -284,7 +284,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *dn = kdev->of_node;
struct phy_device *phydev;
u32 phy_flags = 0;
int ret;
@@ -307,7 +308,27 @@ int bcmgenet_mii_probe(struct net_device *dev)
return -ENODEV;
}
} else {
- phydev = dev->phydev;
+ if (has_acpi_companion(kdev)) {
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ struct mii_bus *unimacbus;
+
+ snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d",
+ UNIMAC_MDIO_DRV_NAME, priv->pdev->id);
+
+ unimacbus = mdio_find_bus(mdio_bus_id);
+ if (!unimacbus) {
+ pr_err("Unable to find mii\n");
+ return -ENODEV;
+ }
+ phydev = phy_find_first(unimacbus);
+ put_device(&unimacbus->dev);
+ if (!phydev) {
+ pr_err("Unable to find PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = dev->phydev;
+ }
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
@@ -428,9 +449,12 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
/* Retain this platform_device pointer for later cleanup */
priv->mii_pdev = ppdev;
ppdev->dev.parent = &pdev->dev;
- ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
- if (pdata)
+ if (dn)
+ ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
+ else if (pdata)
bcmgenet_mii_pdata_init(priv, &ppd);
+ else
+ ppd.phy_mask = ~0;
ret = platform_device_add_resources(ppdev, &res, 1);
if (ret)
@@ -450,12 +474,33 @@ out:
return ret;
}
+static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ int phy_mode = device_get_phy_mode(kdev);
+
+ if (phy_mode < 0) {
+ dev_err(kdev, "invalid PHY mode property\n");
+ return phy_mode;
+ }
+
+ priv->phy_interface = phy_mode;
+
+ /* We need to specifically look up whether this PHY interface is
+ * internal or not *before* we even try to probe the PHY driver
+ * over MDIO as we may have shut down the internal PHY for power
+ * saving purposes.
+ */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ priv->internal_phy = true;
+
+ return 0;
+}
+
static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
- struct device *kdev = &priv->pdev->dev;
struct phy_device *phydev;
- phy_interface_t phy_mode;
int ret;
/* Fetch the PHY phandle */
@@ -473,23 +518,12 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
}
/* Get the link mode */
- ret = of_get_phy_mode(dn, &phy_mode);
- if (ret) {
- dev_err(kdev, "invalid PHY mode property\n");
+ ret = bcmgenet_phy_interface_init(priv);
+ if (ret)
return ret;
- }
-
- priv->phy_interface = phy_mode;
-
- /* We need to specifically look up whether this PHY interface is internal
- * or not *before* we even try to probe the PHY driver over MDIO as we
- * may have shut down the internal PHY for power saving purposes.
- */
- if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
- priv->internal_phy = true;
/* Make sure we initialize MoCA PHYs with a link down */
- if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phydev = of_phy_find_device(dn);
if (phydev) {
phydev->link = 0;
@@ -554,10 +588,13 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
{
- struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *dn = kdev->of_node;
if (dn)
return bcmgenet_mii_of_init(priv);
+ else if (has_acpi_companion(kdev))
+ return bcmgenet_phy_interface_init(priv);
else
return bcmgenet_mii_pd_init(priv);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 88466255bf66..ff98a82b7bc4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -96,11 +96,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
#define DRV_MODULE_NAME "tg3"
+/* DO NOT UPDATE TG3_*_NUM defines */
#define TG3_MAJ_NUM 3
#define TG3_MIN_NUM 137
-#define DRV_MODULE_VERSION \
- __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 11, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -222,13 +220,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
-static char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
-
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
@@ -12317,7 +12311,6 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct tg3 *tp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
@@ -14160,6 +14153,11 @@ static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static const struct ethtool_ops tg3_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_drvinfo = tg3_get_drvinfo,
.get_regs_len = tg3_get_regs_len,
.get_regs = tg3_get_regs,
@@ -17625,8 +17623,6 @@ static int tg3_init_one(struct pci_dev *pdev,
u64 dma_mask, persist_dma_mask;
netdev_features_t features = 0;
- printk_once(KERN_INFO "%s\n", version);
-
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 01a50a4b2113..cc80bbbefe87 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2504,12 +2504,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
IPPROTO_TCP, 0);
BNAD_UPDATE_CTR(bnad, tso4);
} else {
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
- ipv6h->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
- IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
BNAD_UPDATE_CTR(bnad, tso6);
}
@@ -3847,9 +3842,6 @@ bnad_module_init(void)
{
int err;
- pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
- BNAD_VERSION);
-
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver);
@@ -3874,6 +3866,5 @@ module_exit(bnad_module_exit);
MODULE_AUTHOR("Brocade");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
-MODULE_VERSION(BNAD_VERSION);
MODULE_FIRMWARE(CNA_FW_FILE_CT);
MODULE_FIRMWARE(CNA_FW_FILE_CT2);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 492a02d54f14..627a93ce38ab 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -64,8 +64,6 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.25.1"
-
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
#define BNAD_INTX_TX_IB_BITMASK 0x1
@@ -253,7 +251,7 @@ struct bnad_rx_unmap_q {
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
+ struct bnad_rx_unmap unmap[] ____cacheline_aligned;
};
#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index b764c9ff9ad1..588c4804d10a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -284,7 +284,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
unsigned long flags;
strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -1116,6 +1115,9 @@ out:
}
static const struct ethtool_ops bnad_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = bnad_get_drvinfo,
.get_wol = bnad_get_wol,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 53b50c24d9c9..2c4c12b03502 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -35,8 +35,8 @@ config MACB
config MACB_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
depends on MACB
+ depends on PTP_1588_CLOCK
default y
- imply PTP_1588_CLOCK
---help---
Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index a3f0f27fc79a..ab827fb4b6b9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1200,7 +1200,6 @@ struct macb {
unsigned int dma_burst_length;
phy_interface_t phy_interface;
- int speed;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2c28da1737fe..36290a8e2a84 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int status;
status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0)
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
goto mdio_pm_exit;
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
int status;
status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0)
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
goto mdio_pm_exit;
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -571,37 +575,20 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
- /* Clear all the bits we might set later */
- ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE));
-
if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
if (state->interface == PHY_INTERFACE_MODE_RMII)
ctrl |= MACB_BIT(RM9200_RMII);
} else {
- ctrl &= ~(GEM_BIT(GBE) | GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
-
- /* We do not support MLO_PAUSE_RX yet */
- if (state->pause & MLO_PAUSE_TX)
- ctrl |= MACB_BIT(PAE);
+ ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
if (state->interface == PHY_INTERFACE_MODE_SGMII)
ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
}
- if (state->speed == SPEED_1000)
- ctrl |= GEM_BIT(GBE);
- else if (state->speed == SPEED_100)
- ctrl |= MACB_BIT(SPD);
-
- if (state->duplex)
- ctrl |= MACB_BIT(FD);
-
/* Apply the new configuration, if any */
if (old_ctrl ^ ctrl)
macb_or_gem_writel(bp, NCFGR, ctrl);
- bp->speed = state->speed;
-
spin_unlock_irqrestore(&bp->lock, flags);
}
@@ -626,16 +613,42 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
netif_tx_stop_all_queues(ndev);
}
-static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy)
+static void macb_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct net_device *ndev = to_net_dev(config->dev);
struct macb *bp = netdev_priv(ndev);
struct macb_queue *queue;
+ unsigned long flags;
unsigned int q;
+ u32 ctrl;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ ctrl = macb_or_gem_readl(bp, NCFGR);
+
+ ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+
+ if (speed == SPEED_100)
+ ctrl |= MACB_BIT(SPD);
+
+ if (duplex)
+ ctrl |= MACB_BIT(FD);
if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
- macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
+ ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(PAE));
+
+ if (speed == SPEED_1000)
+ ctrl |= GEM_BIT(GBE);
+
+ /* We do not support MLO_PAUSE_RX yet */
+ if (tx_pause)
+ ctrl |= MACB_BIT(PAE);
+
+ macb_set_tx_clk(bp->tx_clk, speed, ndev);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
@@ -648,6 +661,10 @@ static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
}
+ macb_or_gem_writel(bp, NCFGR, ctrl);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
/* Enable Rx and Tx */
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
@@ -724,6 +741,9 @@ static int macb_mdiobus_register(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
+ if (of_phy_is_fixed_link(np))
+ return mdiobus_register(bp->mii_bus);
+
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
@@ -3800,8 +3820,10 @@ static int at91ether_open(struct net_device *dev)
int ret;
ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&lp->pdev->dev);
return ret;
+ }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4156,15 +4178,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
static int fu540_c000_init(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -ENODEV;
-
- mgmt->reg = ioremap(res->start, resource_size(res));
- if (!mgmt->reg)
- return -ENOMEM;
+ mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mgmt->reg))
+ return PTR_ERR(mgmt->reg);
return macb_init(pdev);
}
@@ -4429,8 +4445,6 @@ static int macb_probe(struct platform_device *pdev)
else
bp->phy_interface = interface;
- bp->speed = SPEED_UNKNOWN;
-
/* IP specific init */
err = init(pdev);
if (err)
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 6a700d34019e..4520e7ee00fe 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -54,7 +54,7 @@ config THUNDER_NIC_RGX
config CAVIUM_PTP
tristate "Cavium PTP coprocessor as PTP clock"
depends on 64BIT && PCI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
---help---
This driver adds support for the Precision Time Protocol Clocks and
Timestamping coprocessor (PTP) found on Cavium processors.
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index b821c9e1604c..81ff9ac73f9a 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -13,6 +13,9 @@
#define DRV_NAME "cavium_ptp"
#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_88XX_PTP 0xA10C
+#define PCI_SUBSYS_DEVID_81XX_PTP 0XA20C
+#define PCI_SUBSYS_DEVID_83XX_PTP 0xA30C
#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
#define PCI_PTP_BAR_NO 0
@@ -321,7 +324,12 @@ static void cavium_ptp_remove(struct pci_dev *pdev)
}
static const struct pci_device_id cavium_ptp_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_88XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_81XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_83XX_PTP) },
{ 0, }
};
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
index a04eccbc78e8..1e0ffe8f4152 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.h
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -24,7 +24,7 @@ struct cavium_ptp {
struct ptp_clock *ptp_clock;
};
-#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+#if IS_REACHABLE(CONFIG_CAVIUM_PTP)
struct cavium_ptp *cavium_ptp_get(void);
void cavium_ptp_put(struct cavium_ptp *ptp);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index abe5d0dac851..16eebfc52109 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -442,7 +442,6 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strcpy(drvinfo->driver, "liquidio");
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
@@ -459,7 +458,6 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strcpy(drvinfo->driver, "liquidio_vf");
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
@@ -3099,7 +3097,17 @@ static int lio_set_fecparam(struct net_device *netdev,
return 0;
}
+#define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \
+ ETHTOOL_COALESCE_MAX_FRAMES | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
+
static const struct ethtool_ops lio_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
.get_link_ksettings = lio_get_link_ksettings,
.set_link_ksettings = lio_set_link_ksettings,
.get_fecparam = lio_get_fecparam,
@@ -3130,6 +3138,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
.get_link_ksettings = lio_get_link_ksettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_vf_drvinfo,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index eab05b5534ea..66d31c018c7e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -39,7 +39,6 @@
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(LIQUIDIO_VERSION);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
@@ -1376,7 +1375,6 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
{
u32 dev_id, rev_id;
int ret = 1;
- char *s;
pci_read_config_dword(oct->pci_dev, 0, &dev_id);
pci_read_config_dword(oct->pci_dev, 8, &rev_id);
@@ -1386,13 +1384,11 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
case OCTEON_CN68XX_PCIID:
oct->chip_id = OCTEON_CN68XX;
ret = lio_setup_cn68xx_octeon_device(oct);
- s = "CN68XX";
break;
case OCTEON_CN66XX_PCIID:
oct->chip_id = OCTEON_CN66XX;
ret = lio_setup_cn66xx_octeon_device(oct);
- s = "CN66XX";
break;
case OCTEON_CN23XX_PCIID_PF:
@@ -1405,22 +1401,13 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
pci_sriov_set_totalvfs(oct->pci_dev,
oct->sriov_info.max_vfs);
#endif
- s = "CN23XX";
break;
default:
- s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
dev_id);
}
- if (!ret)
- dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
- OCTEON_MAJOR_REV(oct),
- OCTEON_MINOR_REV(oct),
- octeon_get_conf(oct)->card_name,
- LIQUIDIO_VERSION);
-
return ret;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7a77544a54f5..bbd9bfa4a989 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -32,7 +32,6 @@
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(LIQUIDIO_VERSION);
static int debug = -1;
module_param(debug, int, 0644);
@@ -2352,8 +2351,8 @@ static int octeon_device_init(struct octeon_device *oct)
}
atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
- dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
- LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
+ dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
+ oct->sriov_info.rings_per_vf);
/* Setup the interrupt handler and record the INT SUM register address*/
if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index a5e0e9f17959..4da90757cd3f 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -25,17 +25,11 @@
#include "octeon_config.h"
-#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
#define LIQUIDIO_BASE_MINOR_VERSION 7
#define LIQUIDIO_BASE_MICRO_VERSION 2
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
-#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
-#define LIQUIDIO_VERSION LIQUIDIO_PACKAGE \
- __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
- __stringify(LIQUIDIO_BASE_MINOR_VERSION) \
- "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
struct lio_version {
u16 major;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index dfc77507b159..0d2831d10f65 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -127,7 +127,7 @@ struct octeon_pci_console_desc {
u32 pad;
/* must be 64 bit aligned here... */
/* Array of addresses of octeon_pci_console structures */
- u64 console_addr_array[0];
+ u64 console_addr_array[];
/* Implicit storage for console_addr_array */
};
@@ -840,17 +840,11 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
return -EINVAL;
}
- if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
- LIQUIDIO_PACKAGE, h->version);
- return -EINVAL;
- }
-
- if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE),
+ if (memcmp(LIQUIDIO_BASE_VERSION, h->version,
strlen(LIQUIDIO_BASE_VERSION))) {
dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
LIQUIDIO_BASE_VERSION,
- h->version + strlen(LIQUIDIO_PACKAGE));
+ h->version);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index e9575887a4f8..9d868403d86c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -28,7 +28,6 @@
#include <asm/octeon/cvmx-agl-defs.h>
#define DRV_NAME "octeon_mgmt"
-#define DRV_VERSION "2.0"
#define DRV_DESCRIPTION \
"Cavium Networks Octeon MII (management) port Network Driver"
@@ -1340,9 +1339,6 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1517,7 +1513,6 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
if (result)
goto err;
- dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
return 0;
err:
@@ -1574,4 +1569,3 @@ module_exit(octeon_mgmt_mod_exit);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5e0b16bb95a0..83dabcffc789 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -16,7 +16,6 @@
#include "../common/cavium_ptp.h"
#define DRV_NAME "nicvf"
-#define DRV_VERSION "1.0"
struct nicvf_stat {
char name[ETH_GSTRING_LEN];
@@ -192,7 +191,6 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 016957285f99..b4b33368698f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -126,8 +126,7 @@ static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
- int timeout = NIC_MBOX_MSG_TIMEOUT;
- int sleep = 10;
+ unsigned long timeout;
int ret = 0;
mutex_lock(&nic->rx_mode_mtx);
@@ -137,6 +136,7 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
nicvf_write_to_mbx(nic, mbx);
+ timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
if (nic->pf_nacked) {
@@ -146,11 +146,10 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
ret = -EINVAL;
break;
}
- msleep(sleep);
+ usleep_range(8000, 10000);
if (nic->pf_acked)
break;
- timeout -= sleep;
- if (!timeout) {
+ if (time_after(jiffies, timeout)) {
netdev_err(nic->netdev,
"PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 4ab57d33a87e..069e7413f1ef 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1179,13 +1179,12 @@ void nicvf_sq_disable(struct nicvf *nic, int qidx)
void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
int qidx)
{
- u64 head, tail;
+ u64 head;
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
struct sq_hdr_subdesc *hdr;
head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
- tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
while (sq->head != head) {
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index bc2427c49b89..2460451fc48f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -100,8 +100,8 @@
* RED accepts pkt if unused CQE < 2304 & >= 2560
* DROPs pkts if unused CQE < 2304
*/
-#define RQ_PASS_CQ_LVL 192ULL
-#define RQ_DROP_CQ_LVL 184ULL
+#define RQ_PASS_CQ_LVL 224ULL
+#define RQ_DROP_CQ_LVL 216ULL
/* RED and Backpressure levels of RBDR for pkt reception
* For RBDR, level is a measure of fullness i.e 0x0 means empty
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 94b9482f14a5..6475060649e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -55,7 +55,6 @@
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
-#define DRV_VERSION "2.2"
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 0ccdde366ae1..99736796e1a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,7 +429,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = dev->ml_priv;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -794,6 +793,9 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
}
static const struct ethtool_ops t1_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -984,8 +986,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adapter *adapter = NULL;
struct port_info *pi;
- pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
index b19e4376ba76..401827b82aa1 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
@@ -79,7 +79,7 @@ struct ch_mem_range {
uint32_t addr;
uint32_t len;
uint32_t version;
- uint8_t buf[0];
+ uint8_t buf[];
};
struct ch_qset_params {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 883cfa9c4b6d..42c6e9379882 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -105,7 +105,6 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -1629,7 +1628,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
spin_unlock(&adapter->stats_lock);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
@@ -2106,6 +2104,7 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -3210,8 +3209,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adapter *adapter = NULL;
struct port_info *pi;
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
-
if (!cxgb3_wq) {
cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
if (!cxgb3_wq) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
index 852c399a8b0a..68bb5f39f3f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
@@ -1448,7 +1448,7 @@ struct cpl_rdma_terminate {
#endif
__be32 msn;
__be32 mo;
- __u8 data[0];
+ __u8 data[];
};
/* cpl_rdma_terminate.tid_len fields */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
index 165bfb91487a..b4b2547efc86 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/version.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
@@ -34,8 +34,6 @@
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
-/* Driver version */
-#define DRV_VERSION "1.1.5-ko"
/* Firmware version */
#define FW_VERSION_MAJOR 7
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index a0e0ae19649f..290c1058069a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -29,7 +29,7 @@ struct clip_tbl {
atomic_t nfree;
struct list_head ce_free_head;
void *cl_list;
- struct list_head hash_list[0];
+ struct list_head hash_list[];
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index f5be3ee1bdb4..dcab94cc2dee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -82,7 +82,7 @@ struct cudbg_ulprx_la {
struct cudbg_tp_la {
u32 size;
u32 mode;
- u8 data[0];
+ u8 data[];
};
static const char * const cudbg_region[] = {
@@ -134,7 +134,7 @@ struct cudbg_meminfo {
struct cudbg_cim_pif_la {
int size;
- u8 data[0];
+ u8 data[];
};
struct cudbg_clk_info {
@@ -339,13 +339,13 @@ struct cudbg_qdesc_entry {
u32 qid;
u32 desc_size;
u32 num_desc;
- u8 data[0]; /* Must be last */
+ u8 data[]; /* Must be last */
};
struct cudbg_qdesc_info {
u32 qdesc_entry_size;
u32 num_queues;
- u8 data[0]; /* Must be last */
+ u8 data[]; /* Must be last */
};
#define IREG_NUM_ELEM 4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 19c11568113a..7b9cd69f9844 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1049,9 +1049,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
}
}
-static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
- struct cudbg_error *cudbg_err,
- u8 mem_type)
+static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
+ struct cudbg_error *cudbg_err,
+ u8 mem_type, unsigned long *region_size)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_meminfo mem_info;
@@ -1060,15 +1060,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
rc = cudbg_fill_meminfo(padap, &mem_info);
- if (rc)
+ if (rc) {
+ cudbg_err->sys_err = rc;
return rc;
+ }
cudbg_t4_fwcache(pdbg_init, cudbg_err);
rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
- if (rc)
+ if (rc) {
+ cudbg_err->sys_err = rc;
return rc;
+ }
+
+ if (region_size)
+ *region_size = mem_info.avail[mc_idx].limit -
+ mem_info.avail[mc_idx].base;
- return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+ return 0;
}
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
@@ -1076,7 +1084,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
- unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
+ unsigned long size = 0;
+ int rc;
+
+ rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
+ if (rc)
+ return rc;
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
cudbg_err);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 8b7d156f79d3..e46a14f44a6f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -640,6 +640,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
+ ULP_CRYPTO_KTLS_INLINE = 1 << 3,
};
struct rx_sw_desc;
@@ -1485,9 +1486,8 @@ static inline unsigned int qtimer_val(const struct adapter *adap,
return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
}
-/* driver version & name used for ethtool_drvinfo */
+/* driver name used for ethtool_drvinfo */
extern char cxgb4_driver_name[];
-extern const char cxgb4_driver_version[];
void t4_os_portmod_changed(struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index de30d61af065..ebed99f3d4cf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -984,7 +984,7 @@ static const char * const devlog_facility_strings[] = {
struct devlog_info {
unsigned int nentries; /* number of entries in log[] */
unsigned int first; /* first [temporal] entry in log[] */
- struct fw_devlog_e log[0]; /* Firmware Device Log */
+ struct fw_devlog_e log[]; /* Firmware Device Log */
};
/* Dump a Firmaware Device Log entry.
@@ -3409,6 +3409,41 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
+ seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
+ seq_printf(seq, "Tx connection created: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_open));
+ seq_printf(seq, "Tx connection failed: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_fail));
+ seq_printf(seq, "Tx connection closed: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_close));
+ seq_printf(seq, "Packets passed for encryption : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_packets));
+ seq_printf(seq, "Bytes passed for encryption : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_bytes));
+ seq_printf(seq, "Tx records send: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_send_records));
+ seq_printf(seq, "Tx partial start of records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_start_pkts));
+ seq_printf(seq, "Tx partial middle of records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_middle_pkts));
+ seq_printf(seq, "Tx partial end of record: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_end_pkts));
+ seq_printf(seq, "Tx complete records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_complete_pkts));
+ seq_printf(seq, "TX trim pkts : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "Tx out of order packets: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_ooo));
+ seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_skip_no_sync_data));
+ seq_printf(seq, "Tx drop not synced packets: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_drop_no_sync_data));
+ seq_printf(seq, "Tx drop bypass req: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_drop_bypass_req));
+#endif
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index ba95e13d52da..1471cf0deb58 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -49,7 +49,7 @@ struct seq_tab {
unsigned int rows; /* # of entries */
unsigned char width; /* size in bytes of each entry */
unsigned char skip_first; /* whether the first line is a header */
- char data[0]; /* the table data */
+ char data[]; /* the table data */
};
static inline unsigned int hex2val(char c)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index c837382ee522..9fd496732b2c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -106,6 +106,15 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_empty ",
"write_coal_success ",
"write_coal_fail ",
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ "tx_tls_encrypted_packets",
+ "tx_tls_encrypted_bytes ",
+ "tx_tls_ctx ",
+ "tx_tls_ooo ",
+ "tx_tls_skip_no_sync_data",
+ "tx_tls_drop_no_sync_data",
+ "tx_tls_drop_bypass_req ",
+#endif
};
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
@@ -170,15 +179,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
u32 exprom_vers;
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->version, cxgb4_driver_version,
- sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
- if (!adapter->params.fw_vers)
- strcpy(info->fw_version, "N/A");
- else
+ if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -236,6 +241,15 @@ struct adapter_stats {
u64 db_empty;
u64 wc_success;
u64 wc_fail;
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ u64 tx_tls_encrypted_packets;
+ u64 tx_tls_encrypted_bytes;
+ u64 tx_tls_ctx;
+ u64 tx_tls_ooo;
+ u64 tx_tls_skip_no_sync_data;
+ u64 tx_tls_drop_no_sync_data;
+ u64 tx_tls_drop_bypass_req;
+#endif
};
static void collect_sge_port_stats(const struct adapter *adap,
@@ -1580,6 +1594,10 @@ static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
.get_fecparam = get_fecparam,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index fc05248984fc..796555255207 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -438,13 +438,118 @@ int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
}
-int cxgb4_get_free_ftid(struct net_device *dev, int family)
+static bool cxgb4_filter_prio_in_range(struct tid_info *t, u32 idx, u8 nslots,
+ u32 prio)
+{
+ struct filter_entry *prev_tab, *next_tab, *prev_fe, *next_fe;
+ u32 prev_ftid, next_ftid;
+
+ /* Only insert the rule if both of the following conditions
+ * are met:
+ * 1. The immediate previous rule has priority <= @prio.
+ * 2. The immediate next rule has priority >= @prio.
+ */
+
+ /* High Priority (HPFILTER) region always has higher priority
+ * than normal FILTER region. So, all rules in HPFILTER region
+ * must have prio value <= rules in normal FILTER region.
+ */
+ if (idx < t->nhpftids) {
+ /* Don't insert if there's a rule already present at @idx
+ * in HPFILTER region.
+ */
+ if (test_bit(idx, t->hpftid_bmap))
+ return false;
+
+ next_tab = t->hpftid_tab;
+ next_ftid = find_next_bit(t->hpftid_bmap, t->nhpftids, idx);
+ if (next_ftid >= t->nhpftids) {
+ /* No next entry found in HPFILTER region.
+ * See if there's any next entry in normal
+ * FILTER region.
+ */
+ next_ftid = find_first_bit(t->ftid_bmap, t->nftids);
+ if (next_ftid >= t->nftids)
+ next_ftid = idx;
+ else
+ next_tab = t->ftid_tab;
+ }
+
+ /* Search for the closest previous filter entry in HPFILTER
+ * region. No need to search in normal FILTER region because
+ * there can never be any entry in normal FILTER region whose
+ * prio value is < last entry in HPFILTER region.
+ */
+ prev_ftid = find_last_bit(t->hpftid_bmap, idx);
+ if (prev_ftid >= idx)
+ prev_ftid = idx;
+
+ prev_tab = t->hpftid_tab;
+ } else {
+ idx -= t->nhpftids;
+
+ /* Don't insert if there's a rule already present at @idx
+ * in normal FILTER region.
+ */
+ if (test_bit(idx, t->ftid_bmap))
+ return false;
+
+ prev_tab = t->ftid_tab;
+ prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ if (prev_ftid >= idx) {
+ /* No previous entry found in normal FILTER
+ * region. See if there's any previous entry
+ * in HPFILTER region.
+ */
+ prev_ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
+ if (prev_ftid >= t->nhpftids)
+ prev_ftid = idx;
+ else
+ prev_tab = t->hpftid_tab;
+ }
+
+ /* Search for the closest next filter entry in normal
+ * FILTER region. No need to search in HPFILTER region
+ * because there can never be any entry in HPFILTER
+ * region whose prio value is > first entry in normal
+ * FILTER region.
+ */
+ next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
+ if (next_ftid >= t->nftids)
+ next_ftid = idx;
+
+ next_tab = t->ftid_tab;
+ }
+
+ next_fe = &next_tab[next_ftid];
+
+ /* See if the filter entry belongs to an IPv6 rule, which
+ * occupy 4 slots on T5 and 2 slots on T6. Adjust the
+ * reference to the previously inserted filter entry
+ * accordingly.
+ */
+ prev_fe = &prev_tab[prev_ftid & ~(nslots - 1)];
+ if (!prev_fe->fs.type)
+ prev_fe = &prev_tab[prev_ftid];
+
+ if ((prev_fe->valid && prev_fe->fs.tc_prio > prio) ||
+ (next_fe->valid && next_fe->fs.tc_prio < prio))
+ return false;
+
+ return true;
+}
+
+int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
+ u32 tc_prio)
{
struct adapter *adap = netdev2adap(dev);
struct tid_info *t = &adap->tids;
+ u32 bmap_ftid, max_ftid;
+ struct filter_entry *f;
+ unsigned long *bmap;
bool found = false;
- u8 i, n, cnt;
- int ftid;
+ u8 i, cnt, n;
+ int ftid = 0;
/* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
* on T5.
@@ -456,34 +561,127 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
n += 2;
}
- if (n > t->nftids)
- return -ENOMEM;
-
- /* Find free filter slots from the end of TCAM. Appropriate
- * checks must be done by caller later to ensure the prio
- * passed by TC doesn't conflict with prio saved by existing
- * rules in the TCAM.
+ /* There are 3 filter regions available in hardware in
+ * following order of priority:
+ *
+ * 1. High Priority (HPFILTER) region (Highest Priority).
+ * 2. HASH region.
+ * 3. Normal FILTER region (Lowest Priority).
+ *
+ * Entries in HPFILTER and normal FILTER region have index
+ * 0 as the highest priority and the rules will be scanned
+ * in ascending order until either a rule hits or end of
+ * the region is reached.
+ *
+ * All HASH region entries have same priority. The set of
+ * fields to match in headers are pre-determined. The same
+ * set of header match fields must be compulsorily specified
+ * in all the rules wanting to get inserted in HASH region.
+ * Hence, HASH region is an exact-match region. A HASH is
+ * generated for a rule based on the values in the
+ * pre-determined set of header match fields. The generated
+ * HASH serves as an index into the HASH region. There can
+ * never be 2 rules having the same HASH. Hardware will
+ * compute a HASH for every incoming packet based on the
+ * values in the pre-determined set of header match fields
+ * and uses it as an index to check if there's a rule
+ * inserted in the HASH region at the specified index. If
+ * there's a rule inserted, then it's considered as a filter
+ * hit. Otherwise, it's a filter miss and normal FILTER region
+ * is scanned afterwards.
*/
+
spin_lock_bh(&t->ftid_lock);
- ftid = t->nftids - 1;
- while (ftid >= n - 1) {
+
+ ftid = (tc_prio <= t->nhpftids) ? 0 : t->nhpftids;
+ max_ftid = t->nftids + t->nhpftids;
+ while (ftid < max_ftid) {
+ if (ftid < t->nhpftids) {
+ /* If the new rule wants to get inserted into
+ * HPFILTER region, but its prio is greater
+ * than the rule with the highest prio in HASH
+ * region, then reject the rule.
+ */
+ if (t->tc_hash_tids_max_prio &&
+ tc_prio > t->tc_hash_tids_max_prio)
+ break;
+
+ /* If there's not enough slots available
+ * in HPFILTER region, then move on to
+ * normal FILTER region immediately.
+ */
+ if (ftid + n > t->nhpftids) {
+ ftid = t->nhpftids;
+ continue;
+ }
+
+ bmap = t->hpftid_bmap;
+ bmap_ftid = ftid;
+ } else if (hash_en) {
+ /* Ensure priority is >= last rule in HPFILTER
+ * region.
+ */
+ ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
+ if (ftid < t->nhpftids) {
+ f = &t->hpftid_tab[ftid];
+ if (f->valid && tc_prio < f->fs.tc_prio)
+ break;
+ }
+
+ /* Ensure priority is <= first rule in normal
+ * FILTER region.
+ */
+ ftid = find_first_bit(t->ftid_bmap, t->nftids);
+ if (ftid < t->nftids) {
+ f = &t->ftid_tab[ftid];
+ if (f->valid && tc_prio > f->fs.tc_prio)
+ break;
+ }
+
+ found = true;
+ ftid = t->nhpftids;
+ goto out_unlock;
+ } else {
+ /* If the new rule wants to get inserted into
+ * normal FILTER region, but its prio is less
+ * than the rule with the highest prio in HASH
+ * region, then reject the rule.
+ */
+ if (t->tc_hash_tids_max_prio &&
+ tc_prio < t->tc_hash_tids_max_prio)
+ break;
+
+ if (ftid + n > max_ftid)
+ break;
+
+ bmap = t->ftid_bmap;
+ bmap_ftid = ftid - t->nhpftids;
+ }
+
cnt = 0;
for (i = 0; i < n; i++) {
- if (test_bit(ftid - i, t->ftid_bmap))
+ if (test_bit(bmap_ftid + i, bmap))
break;
cnt++;
}
+
if (cnt == n) {
- ftid &= ~(n - 1);
- found = true;
- break;
+ /* Ensure the new rule's prio doesn't conflict
+ * with existing rules.
+ */
+ if (cxgb4_filter_prio_in_range(t, ftid, n,
+ tc_prio)) {
+ ftid &= ~(n - 1);
+ found = true;
+ break;
+ }
}
- ftid -= n;
+ ftid += n;
}
- spin_unlock_bh(&t->ftid_lock);
- ftid += t->nhpftids;
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
return found ? ftid : -ENOMEM;
}
@@ -555,73 +753,6 @@ static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
spin_unlock_bh(&t->ftid_lock);
}
-bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
-{
- struct filter_entry *prev_fe, *next_fe, *tab;
- struct adapter *adap = netdev2adap(dev);
- u32 prev_ftid, next_ftid, max_tid;
- struct tid_info *t = &adap->tids;
- unsigned long *bmap;
- bool valid = true;
-
- if (idx < t->nhpftids) {
- bmap = t->hpftid_bmap;
- tab = t->hpftid_tab;
- max_tid = t->nhpftids;
- } else {
- idx -= t->nhpftids;
- bmap = t->ftid_bmap;
- tab = t->ftid_tab;
- max_tid = t->nftids;
- }
-
- /* Only insert the rule if both of the following conditions
- * are met:
- * 1. The immediate previous rule has priority <= @prio.
- * 2. The immediate next rule has priority >= @prio.
- */
- spin_lock_bh(&t->ftid_lock);
-
- /* Don't insert if there's a rule already present at @idx. */
- if (test_bit(idx, bmap)) {
- valid = false;
- goto out_unlock;
- }
-
- next_ftid = find_next_bit(bmap, max_tid, idx);
- if (next_ftid >= max_tid)
- next_ftid = idx;
-
- next_fe = &tab[next_ftid];
-
- prev_ftid = find_last_bit(bmap, idx);
- if (prev_ftid >= idx)
- prev_ftid = idx;
-
- /* See if the filter entry belongs to an IPv6 rule, which
- * occupy 4 slots on T5 and 2 slots on T6. Adjust the
- * reference to the previously inserted filter entry
- * accordingly.
- */
- if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
- prev_fe = &tab[prev_ftid & ~0x3];
- if (!prev_fe->fs.type)
- prev_fe = &tab[prev_ftid];
- } else {
- prev_fe = &tab[prev_ftid & ~0x1];
- if (!prev_fe->fs.type)
- prev_fe = &tab[prev_ftid];
- }
-
- if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
- (next_fe->valid && prio > next_fe->fs.tc_prio))
- valid = false;
-
-out_unlock:
- spin_unlock_bh(&t->ftid_lock);
- return valid;
-}
-
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index b3e4a645043d..b0751c0611ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -53,5 +53,4 @@ void clear_all_filters(struct adapter *adapter);
void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
-bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 97f90edbc068..a70018f067aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -90,11 +90,6 @@
char cxgb4_driver_name[] = KBUILD_MODNAME;
-#ifdef DRV_VERSION
-#undef DRV_VERSION
-#endif
-#define DRV_VERSION "2.0.0-ko"
-const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
@@ -137,7 +132,6 @@ const char cxgb4_driver_version[] = DRV_VERSION;
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
@@ -3138,7 +3132,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return ret;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- pi->xact_addr_filt = ret;
return 0;
}
@@ -3626,8 +3619,6 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
struct adapter *adapter = netdev2adap(dev);
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->version, cxgb4_driver_version,
- sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -6086,8 +6077,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, err;
u32 whoami;
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
-
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
/* Just info, some other driver may have claimed the device. */
@@ -6682,6 +6671,10 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
cxgb_close(adapter->port[i]);
+ rtnl_lock();
+ cxgb4_mqprio_stop_offload(adapter);
+ rtnl_unlock();
+
if (is_uld(adapter)) {
detach_ulds(adapter);
t4_uld_clean_up(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index af1f40cbccc8..f5bc996ac77d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct adapter *adapter = (struct adapter *)container_of(ptp,
- struct adapter, ptp_clock_info);
- struct fw_ptp_cmd c;
+ struct adapter *adapter = container_of(ptp, struct adapter,
+ ptp_clock_info);
u64 ns;
- int err;
-
- memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F |
- FW_PTP_CMD_PORTID_V(0));
- c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
- c.u.ts.sc = FW_PTP_SC_GET_TIME;
- err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
- if (err < 0) {
- dev_err(adapter->pdev_dev,
- "PTP: %s error %d\n", __func__, -err);
- return err;
- }
+ ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
+ ns |= (u64)t4_read_reg(adapter,
+ T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
/* convert to timespec*/
- ns = be64_to_cpu(c.u.ts.tm);
*ts = ns_to_timespec64(ns);
-
- return err;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index bb5513bdd293..4a5fa9eba0b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -544,7 +544,8 @@ static bool valid_pedit_action(struct net_device *dev,
}
int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_action *actions)
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry *act;
bool act_redir = false;
@@ -552,6 +553,9 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
bool act_vlan = false;
int i;
+ if (!flow_action_basic_hw_stats_check(actions, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
@@ -631,6 +635,64 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
return 0;
}
+static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
+{
+ spin_lock_bh(&adap->tids.ftid_lock);
+ if (adap->tids.tc_hash_tids_max_prio < tc_prio)
+ adap->tids.tc_hash_tids_max_prio = tc_prio;
+ spin_unlock_bh(&adap->tids.ftid_lock);
+}
+
+static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
+{
+ struct tid_info *t = &adap->tids;
+ struct ch_tc_flower_entry *fe;
+ struct rhashtable_iter iter;
+ u32 found = 0;
+
+ spin_lock_bh(&t->ftid_lock);
+ /* Bail if the current rule is not the one with the max
+ * prio.
+ */
+ if (t->tc_hash_tids_max_prio != tc_prio)
+ goto out_unlock;
+
+ /* Search for the next rule having the same or next lower
+ * max prio.
+ */
+ rhashtable_walk_enter(&adap->flower_tbl, &iter);
+ do {
+ rhashtable_walk_start(&iter);
+
+ fe = rhashtable_walk_next(&iter);
+ while (!IS_ERR_OR_NULL(fe)) {
+ if (fe->fs.hash &&
+ fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
+ t->tc_hash_tids_max_prio = fe->fs.tc_prio;
+ found++;
+
+ /* Bail if we found another rule
+ * having the same prio as the
+ * current max one.
+ */
+ if (fe->fs.tc_prio == tc_prio)
+ break;
+ }
+
+ fe = rhashtable_walk_next(&iter);
+ }
+
+ rhashtable_walk_stop(&iter);
+ } while (fe == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&iter);
+
+ if (!found)
+ t->tc_hash_tids_max_prio = 0;
+
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
+}
+
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls)
{
@@ -640,9 +702,10 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
struct ch_tc_flower_entry *ch_flower;
struct ch_filter_specification *fs;
struct filter_ctx ctx;
+ u8 inet_family;
int fidx, ret;
- if (cxgb4_validate_flow_actions(dev, &rule->action))
+ if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls))
@@ -660,39 +723,32 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
- if (fs->hash) {
- fidx = 0;
- } else {
- u8 inet_family;
-
- inet_family = fs->type ? PF_INET6 : PF_INET;
+ inet_family = fs->type ? PF_INET6 : PF_INET;
- /* Note that TC uses prio 0 to indicate stack to
- * generate automatic prio and hence doesn't pass prio
- * 0 to driver. However, the hardware TCAM index
- * starts from 0. Hence, the -1 here.
- */
- if (cls->common.prio <= (adap->tids.nftids +
- adap->tids.nhpftids)) {
- fidx = cls->common.prio - 1;
- if (fidx < adap->tids.nhpftids)
- fs->prio = 1;
- } else {
- fidx = cxgb4_get_free_ftid(dev, inet_family);
- }
+ /* Get a free filter entry TID, where we can insert this new
+ * rule. Only insert rule if its prio doesn't conflict with
+ * existing rules.
+ */
+ fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
+ cls->common.prio);
+ if (fidx < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ ret = -ENOMEM;
+ goto free_entry;
+ }
- /* Only insert FLOWER rule if its priority doesn't
- * conflict with existing rules in the LETCAM.
- */
- if (fidx < 0 ||
- !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
- NL_SET_ERR_MSG_MOD(extack,
- "No free LETCAM index available");
- ret = -ENOMEM;
- goto free_entry;
- }
+ if (fidx < adap->tids.nhpftids) {
+ fs->prio = 1;
+ fs->hash = 0;
}
+ /* If the rule can be inserted into HASH region, then ignore
+ * the index to normal FILTER region.
+ */
+ if (fs->hash)
+ fidx = 0;
+
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
@@ -723,6 +779,9 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
if (ret)
goto del_filter;
+ if (fs->hash)
+ cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio);
+
return 0;
del_filter:
@@ -738,12 +797,17 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
{
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
+ u32 tc_prio;
+ bool hash;
int ret;
ch_flower = ch_flower_lookup(adap, cls->cookie);
if (!ch_flower)
return -ENOENT;
+ hash = ch_flower->fs.hash;
+ tc_prio = ch_flower->fs.tc_prio;
+
ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
if (ret)
goto err;
@@ -756,6 +820,9 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
}
kfree_rcu(ch_flower, rcu);
+ if (hash)
+ cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
+
err:
return ret;
}
@@ -836,7 +903,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
ofld_stats->last_used = jiffies;
flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
packets - ofld_stats->packet_count,
- ofld_stats->last_used);
+ ofld_stats->last_used,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
ofld_stats->packet_count = packets;
ofld_stats->byte_count = bytes;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index e132516e9868..0a30c96b81ff 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -112,7 +112,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_action *actions);
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack);
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 1b7681a4eb32..c88c47a14fbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -198,22 +198,14 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
struct ch_filter_specification *fs;
int ret, fidx;
- /* Note that TC uses prio 0 to indicate stack to generate
- * automatic prio and hence doesn't pass prio 0 to driver.
- * However, the hardware TCAM index starts from 0. Hence, the
- * -1 here. 1 slot is enough to create a wildcard matchall
- * VIID rule.
+ /* Get a free filter entry TID, where we can insert this new
+ * rule. Only insert rule if its prio doesn't conflict with
+ * existing rules.
+ *
+ * 1 slot is enough to create a wildcard matchall VIID rule.
*/
- if (cls->common.prio <= (adap->tids.nftids + adap->tids.nhpftids))
- fidx = cls->common.prio - 1;
- else
- fidx = cxgb4_get_free_ftid(dev, PF_INET);
-
- /* Only insert MATCHALL rule if its priority doesn't conflict
- * with existing rules in the LETCAM.
- */
- if (fidx < 0 ||
- !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ fidx = cxgb4_get_free_ftid(dev, PF_INET, false, cls->common.prio);
+ if (fidx < 0) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
@@ -286,7 +278,8 @@ int cxgb4_tc_matchall_replace(struct net_device *dev,
}
ret = cxgb4_validate_flow_actions(dev,
- &cls_matchall->rule->action);
+ &cls_matchall->rule->action,
+ extack);
if (ret)
return ret;
@@ -353,7 +346,8 @@ int cxgb4_tc_matchall_stats(struct net_device *dev,
flow_stats_update(&cls_matchall->stats,
bytes - tc_port_matchall->ingress.bytes,
packets - tc_port_matchall->ingress.packets,
- tc_port_matchall->ingress.last_used);
+ tc_port_matchall->ingress.last_used,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
tc_port_matchall->ingress.packets = packets;
tc_port_matchall->ingress.bytes = bytes;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index ec3eb45ee3b4..e6af4906d674 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -301,6 +301,7 @@ static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
cxgb4_clear_msix_aff(eorxq->msix->vec,
eorxq->msix->aff_mask);
free_irq(eorxq->msix->vec, &eorxq->rspq);
+ cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
}
free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
@@ -611,6 +612,28 @@ out:
return ret;
}
+void cxgb4_mqprio_stop_offload(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct net_device *dev;
+ u8 i;
+
+ if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
+ return;
+
+ for_each_port(adap, i) {
+ dev = adap->port[i];
+ if (!dev)
+ continue;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ if (!tc_port_mqprio->mqprio.qopt.num_tc)
+ continue;
+
+ cxgb4_mqprio_disable_offload(dev);
+ }
+}
+
int cxgb4_init_tc_mqprio(struct adapter *adap)
{
struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index c532f1ef8451..ff8794132b22 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -38,6 +38,7 @@ struct cxgb4_tc_mqprio {
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio);
+void cxgb4_mqprio_stop_offload(struct adapter *adap);
int cxgb4_init_tc_mqprio(struct adapter *adap);
void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
#endif /* __CXGB4_TC_MQPRIO_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 269b8d9e25e0..3f3c11e54d97 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -155,9 +155,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
struct ch_filter_specification fs;
struct cxgb4_tc_u32_table *t;
struct cxgb4_link *link;
- unsigned int filter_id;
u32 uhtid, link_uhtid;
bool is_ipv6 = false;
+ u8 inet_family;
+ int filter_id;
int ret;
if (!can_tc_u32_offload(dev))
@@ -166,18 +167,15 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
return -EOPNOTSUPP;
- /* Note that TC uses prio 0 to indicate stack to generate
- * automatic prio and hence doesn't pass prio 0 to driver.
- * However, the hardware TCAM index starts from 0. Hence, the
- * -1 here.
- */
- filter_id = TC_U32_NODE(cls->knode.handle) - 1;
+ inet_family = (protocol == htons(ETH_P_IPV6)) ? PF_INET6 : PF_INET;
- /* Only insert U32 rule if its priority doesn't conflict with
- * existing rules in the LETCAM.
+ /* Get a free filter entry TID, where we can insert this new
+ * rule. Only insert rule if its prio doesn't conflict with
+ * existing rules.
*/
- if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids ||
- !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
+ filter_id = cxgb4_get_free_ftid(dev, inet_family, false,
+ TC_U32_NODE(cls->knode.handle));
+ if (filter_id < 0) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
@@ -358,23 +356,65 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
struct cxgb4_link *link = NULL;
struct cxgb4_tc_u32_table *t;
struct filter_entry *f;
+ bool found = false;
u32 handle, uhtid;
+ u8 nslots;
int ret;
if (!can_tc_u32_offload(dev))
return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
- filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids)
- return -ERANGE;
+ max_tids = adapter->tids.nhpftids + adapter->tids.nftids;
+
+ spin_lock_bh(&adapter->tids.ftid_lock);
+ filter_id = 0;
+ while (filter_id < max_tids) {
+ if (filter_id < adapter->tids.nhpftids) {
+ i = filter_id;
+ f = &adapter->tids.hpftid_tab[i];
+ if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
+ found = true;
+ break;
+ }
- if (filter_id < adapter->tids.nhpftids)
- f = &adapter->tids.hpftid_tab[filter_id];
- else
- f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+ i = find_next_bit(adapter->tids.hpftid_bmap,
+ adapter->tids.nhpftids, i + 1);
+ if (i >= adapter->tids.nhpftids) {
+ filter_id = adapter->tids.nhpftids;
+ continue;
+ }
+
+ filter_id = i;
+ } else {
+ i = filter_id - adapter->tids.nhpftids;
+ f = &adapter->tids.ftid_tab[i];
+ if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
+ found = true;
+ break;
+ }
+
+ i = find_next_bit(adapter->tids.ftid_bmap,
+ adapter->tids.nftids, i + 1);
+ if (i >= adapter->tids.nftids)
+ break;
+
+ filter_id = i + adapter->tids.nhpftids;
+ }
+
+ nslots = 0;
+ if (f->fs.type) {
+ nslots++;
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <
+ CHELSIO_T6)
+ nslots += 2;
+ }
+
+ filter_id += nslots;
+ }
+ spin_unlock_bh(&adapter->tids.ftid_lock);
- if (cls->knode.handle != f->fs.tc_cookie)
+ if (!found)
return -ERANGE;
t = adapter->tc_u32;
@@ -407,7 +447,6 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* If a link is being deleted, then delete all filters
* associated with the link.
*/
- max_tids = adapter->tids.nftids;
for (i = 0; i < t->size; i++) {
link = &t->table[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index a4b99edcc339..125868c6770a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -289,6 +289,6 @@ struct cxgb4_link {
struct cxgb4_tc_u32_table {
unsigned int size; /* number of entries in table */
- struct cxgb4_link table[0]; /* Jump table */
+ struct cxgb4_link table[]; /* Jump table */
};
#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index cce33d279094..e65b52375dd8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -662,6 +662,25 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
+ * @adap: adapter info
+ * @enable: 1 to enable / 0 to disable ktls settings.
+ */
+static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
+{
+ u32 params = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_TX_HW) |
+ FW_PARAMS_PARAM_Y_V(enable));
+ int ret = 0;
+
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &params, &params);
+ /* if fw returns failure, clear the ktls flag */
+ if (ret)
+ adap->params.crypto &= ~ULP_CRYPTO_KTLS_INLINE;
+}
+#endif
+
/* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
@@ -698,6 +717,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
}
if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ /* send mbox to enable ktls related settings. */
+ if (type == CXGB4_ULD_CRYPTO &&
+ (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
+ cxgb4_set_ktls_feature(adap, 1);
+#endif
if (adap->uld[type].add)
goto free_irq;
ret = setup_sge_txq_uld(adap, type, p);
@@ -750,6 +775,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue;
cxgb4_shutdown_uld_adapter(adap, type);
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ /* send mbox to disable ktls related settings. */
+ if (type == CXGB4_ULD_CRYPTO &&
+ (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
+ cxgb4_set_ktls_feature(adap, 0);
+#endif
}
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index d9d27bc1ae67..be831317520a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -149,6 +149,8 @@ struct tid_info {
atomic_t conns_in_use;
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
+
+ unsigned int tc_hash_tids_max_prio;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -263,7 +265,8 @@ struct filter_ctx {
struct ch_filter_specification;
-int cxgb4_get_free_ftid(struct net_device *dev, int family);
+int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
+ u32 tc_prio);
int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs,
struct filter_ctx *ctx);
@@ -357,6 +360,26 @@ struct chcr_stats_debug {
atomic_t tls_pdu_tx;
atomic_t tls_pdu_rx;
atomic_t tls_key;
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ atomic64_t ktls_tx_connection_open;
+ atomic64_t ktls_tx_connection_fail;
+ atomic64_t ktls_tx_connection_close;
+ atomic64_t ktls_tx_send_records;
+ atomic64_t ktls_tx_end_pkts;
+ atomic64_t ktls_tx_start_pkts;
+ atomic64_t ktls_tx_middle_pkts;
+ atomic64_t ktls_tx_retransmit_pkts;
+ atomic64_t ktls_tx_complete_pkts;
+ atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_encrypted_packets;
+ atomic64_t ktls_tx_encrypted_bytes;
+ atomic64_t ktls_tx_ctx;
+ atomic64_t ktls_tx_ooo;
+ atomic64_t ktls_tx_skip_no_sync_data;
+ atomic64_t ktls_tx_drop_no_sync_data;
+ atomic64_t ktls_tx_drop_bypass_req;
+
+#endif
};
#define OCQ_WIN_OFFSET(pdev, vres) \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1a16449e9deb..72b37a66c7d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -59,7 +59,7 @@ struct l2t_data {
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[0]; /* MUST BE LAST */
+ struct l2t_entry l2tab[]; /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -700,6 +700,17 @@ static char l2e_state(const struct l2t_entry *e)
}
}
+bool cxgb4_check_l2t_valid(struct l2t_entry *e)
+{
+ bool valid;
+
+ spin_lock(&e->lock);
+ valid = (e->state == L2T_STATE_VALID);
+ spin_unlock(&e->lock);
+ return valid;
+}
+EXPORT_SYMBOL(cxgb4_check_l2t_valid);
+
static int l2t_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 79665bd8f881..340fecb28a13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -122,6 +122,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
u8 port, u8 *dmac);
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+bool cxgb4_check_l2t_valid(struct l2t_entry *e);
extern const struct file_operations t4_l2t_fops;
#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 5cc74a5a1774..5f8b871d79af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[0];
+ struct sched_class tab[];
};
static inline bool can_sched(struct net_device *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cab3d17e0e1a..6516c45864b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1418,6 +1418,11 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
#endif /* CHELSIO_IPSEC_INLINE */
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (skb->decrypted)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_TLS_DEVICE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
@@ -2202,6 +2207,9 @@ static void ethofld_hard_xmit(struct net_device *dev,
if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr;
eosw_txq->state = next_state;
+ eosw_txq->cred -= wrlen16;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
goto write_wr_headers;
}
@@ -2360,6 +2368,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
return cxgb4_eth_xmit(skb, dev);
}
+static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
+{
+ int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+ int pidx = eosw_txq->pidx;
+ struct sk_buff *skb;
+
+ if (!pktcount)
+ return;
+
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+
+ while (pktcount--) {
+ pidx--;
+ if (pidx < 0)
+ pidx += eosw_txq->ndesc;
+
+ skb = eosw_txq->desc[pidx].skb;
+ if (skb) {
+ dev_consume_skb_any(skb);
+ eosw_txq->desc[pidx].skb = NULL;
+ eosw_txq->inuse--;
+ }
+ }
+
+ eosw_txq->pidx = eosw_txq->last_pidx + 1;
+}
+
/**
* cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
* @dev - netdevice
@@ -2435,9 +2471,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
FW_FLOWC_MNEM_EOSTATE_CLOSING :
FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
- eosw_txq->cred -= len16;
- eosw_txq->ncompl++;
- eosw_txq->last_compl = 0;
+ /* Free up any pending skbs to ensure there's room for
+ * termination FLOWC.
+ */
+ if (tc == FW_SCHED_CLS_NONE)
+ eosw_txq_flush_pending_skbs(eosw_txq);
ret = eosw_txq_enqueue(eosw_txq, skb);
if (ret) {
@@ -2690,6 +2728,7 @@ static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
* is ever running at a time ...
*/
static void service_ofldq(struct sge_uld_txq *q)
+ __must_hold(&q->sendq.lock)
{
u64 *pos, *before, *end;
int credits;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index 1268d6e93a47..541249d78914 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -66,7 +66,7 @@ struct smt_entry {
struct smt_data {
unsigned int smt_size;
rwlock_t lock;
- struct smt_entry smtab[0];
+ struct smt_entry smtab[];
};
struct smt_data *t4_init_smt(void);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 844fdcf55118..2a3480fc1d91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1379,8 +1379,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9608, 0x9638,
0x9640, 0x96f4,
0x9800, 0x9808,
- 0x9820, 0x983c,
- 0x9850, 0x9864,
+ 0x9810, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
@@ -1389,7 +1388,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
- 0xd004, 0xd004,
+ 0xd000, 0xd004,
0xd010, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0x1106c,
@@ -1430,10 +1429,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1a0b0, 0x1a0e4,
0x1a0ec, 0x1a0f8,
0x1a100, 0x1a108,
- 0x1a114, 0x1a120,
- 0x1a128, 0x1a130,
- 0x1a138, 0x1a138,
- 0x1a190, 0x1a1c4,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e044,
@@ -2162,8 +2159,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9640, 0x9704,
0x9710, 0x971c,
0x9800, 0x9808,
- 0x9820, 0x983c,
- 0x9850, 0x9864,
+ 0x9810, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
@@ -2172,7 +2168,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
- 0xd004, 0xd03c,
+ 0xd000, 0xd03c,
0xd100, 0xd118,
0xd200, 0xd214,
0xd220, 0xd234,
@@ -2240,10 +2236,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1a0b0, 0x1a0e4,
0x1a0ec, 0x1a0f8,
0x1a100, 0x1a108,
- 0x1a114, 0x1a120,
- 0x1a128, 0x1a130,
- 0x1a138, 0x1a138,
- 0x1a190, 0x1a1c4,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e044,
@@ -3748,7 +3742,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
&param, &val);
- if (ret < 0)
+ if (ret)
return ret;
*phy_fw_ver = val;
return 0;
@@ -4480,7 +4474,7 @@ static void tp_intr_handler(struct adapter *adapter)
*/
static void sge_intr_handler(struct adapter *adapter)
{
- u64 v;
+ u32 v = 0, perr;
u32 err;
static const struct intr_info sge_intr_info[] = {
@@ -4515,13 +4509,29 @@ static void sge_intr_handler(struct adapter *adapter)
{ 0 }
};
- v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
- ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
- if (v) {
- dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
- (unsigned long long)v);
- t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
- t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
+ perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
+ if (perr) {
+ v |= perr;
+ dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
+ perr);
+ }
+
+ perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
+ if (perr) {
+ v |= perr;
+ dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
+ perr);
+ }
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
+ perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
+ /* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
+ perr &= ~ERR_T_RXCRC_F;
+ if (perr) {
+ v |= perr;
+ dev_alert(adapter->pdev_dev,
+ "SGE Cause5 Parity Error %#x\n", perr);
+ }
}
v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 575c6abcdae7..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -47,6 +47,7 @@ enum {
CPL_CLOSE_LISTSRV_REQ = 0x9,
CPL_ABORT_REQ = 0xA,
CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
@@ -705,6 +706,14 @@ struct cpl_set_tcb_field {
__be64 val;
};
+struct cpl_set_tcb_field_core {
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
/* cpl_set_tcb_field.word_cookie fields */
#define TCB_WORD_S 0
#define TCB_WORD_V(x) ((x) << TCB_WORD_S)
@@ -1462,6 +1471,16 @@ struct cpl_tx_data {
#define TX_FORCE_S 13
#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
+#define TX_DATA_MSS_S 16
+#define TX_DATA_MSS_M 0xFFFF
+#define TX_DATA_MSS_V(x) ((x) << TX_DATA_MSS_S)
+#define TX_DATA_MSS_G(x) (((x) >> TX_DATA_MSS_S) & TX_DATA_MSS_M)
+
+#define TX_LENGTH_S 0
+#define TX_LENGTH_M 0xFFFF
+#define TX_LENGTH_V(x) ((x) << TX_LENGTH_S)
+#define TX_LENGTH_G(x) (((x) >> TX_LENGTH_S) & TX_LENGTH_M)
+
#define T6_TX_FORCE_S 20
#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
@@ -1471,6 +1490,15 @@ struct cpl_tx_data {
#define TX_SHOVE_S 14
#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
+#define TX_SHOVE_F TX_SHOVE_V(1U)
+
+#define TX_BYPASS_S 21
+#define TX_BYPASS_V(x) ((x) << TX_BYPASS_S)
+#define TX_BYPASS_F TX_BYPASS_V(1U)
+
+#define TX_PUSH_S 22
+#define TX_PUSH_V(x) ((x) << TX_PUSH_S)
+#define TX_PUSH_F TX_PUSH_V(1U)
#define TX_ULP_MODE_S 10
#define TX_ULP_MODE_M 0x7
@@ -1511,7 +1539,7 @@ struct ulptx_sgl {
__be32 cmd_nsge;
__be32 len0;
__be64 addr0;
- struct ulptx_sge_pair sge[0];
+ struct ulptx_sge_pair sge[];
};
struct ulptx_idata {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a957a6e4d4c4..4a9fcd6c226c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -487,6 +487,12 @@
#define ERROR_QID_M 0x1ffffU
#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+#define SGE_INT_CAUSE5_A 0x110c
+
+#define ERR_T_RXCRC_S 31
+#define ERR_T_RXCRC_V(x) ((x) << ERR_T_RXCRC_S)
+#define ERR_T_RXCRC_F ERR_T_RXCRC_V(1U)
+
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
@@ -1900,6 +1906,9 @@
#define MAC_PORT_CFG2_A 0x818
+#define MAC_PORT_PTP_SUM_LO_A 0x990
+#define MAC_PORT_PTP_SUM_HI_A 0x994
+
#define MPS_CMN_CTL_A 0x9000
#define COUNTPAUSEMCRX_S 5
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 1b9afb192f7f..50232e063f49 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -35,6 +35,11 @@
#ifndef __T4_TCB_H
#define __T4_TCB_H
+#define TCB_L2T_IX_W 0
+#define TCB_L2T_IX_S 12
+#define TCB_L2T_IX_M 0xfffULL
+#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
@@ -45,11 +50,6 @@
#define TCB_T_FLAGS_M 0xffffffffffffffffULL
#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
-#define TCB_RQ_START_W 30
-#define TCB_RQ_START_S 0
-#define TCB_RQ_START_M 0x3ffffffULL
-#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
-
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
@@ -59,6 +59,11 @@
#define TCB_RSS_INFO_M 0x3ffULL
#define TCB_RSS_INFO_V(x) ((x) << TCB_RSS_INFO_S)
+#define TCB_T_STATE_W 3
+#define TCB_T_STATE_S 16
+#define TCB_T_STATE_M 0xfULL
+#define TCB_T_STATE_V(x) ((x) << TCB_T_STATE_S)
+
#define TCB_TIMESTAMP_W 5
#define TCB_TIMESTAMP_S 0
#define TCB_TIMESTAMP_M 0xffffffffULL
@@ -69,13 +74,60 @@
#define TCB_RTT_TS_RECENT_AGE_M 0xffffffffULL
#define TCB_RTT_TS_RECENT_AGE_V(x) ((x) << TCB_RTT_TS_RECENT_AGE_S)
+#define TCB_T_RTSEQ_RECENT_W 7
+#define TCB_T_RTSEQ_RECENT_S 0
+#define TCB_T_RTSEQ_RECENT_M 0xffffffffULL
+#define TCB_T_RTSEQ_RECENT_V(x) ((x) << TCB_T_RTSEQ_RECENT_S)
+
+#define TCB_TX_MAX_W 9
+#define TCB_TX_MAX_S 0
+#define TCB_TX_MAX_M 0xffffffffULL
+#define TCB_TX_MAX_V(x) ((x) << TCB_TX_MAX_S)
+
#define TCB_SND_UNA_RAW_W 10
+#define TCB_SND_UNA_RAW_S 0
+#define TCB_SND_UNA_RAW_M 0xfffffffULL
+#define TCB_SND_UNA_RAW_V(x) ((x) << TCB_SND_UNA_RAW_S)
+
+#define TCB_SND_NXT_RAW_W 10
+#define TCB_SND_NXT_RAW_S 28
+#define TCB_SND_NXT_RAW_M 0xfffffffULL
+#define TCB_SND_NXT_RAW_V(x) ((x) << TCB_SND_NXT_RAW_S)
+
+#define TCB_SND_MAX_RAW_W 11
+#define TCB_SND_MAX_RAW_S 24
+#define TCB_SND_MAX_RAW_M 0xfffffffULL
+#define TCB_SND_MAX_RAW_V(x) ((x) << TCB_SND_MAX_RAW_S)
+
+#define TCB_RCV_NXT_W 16
+#define TCB_RCV_NXT_S 10
+#define TCB_RCV_NXT_M 0xffffffffULL
+#define TCB_RCV_NXT_V(x) ((x) << TCB_RCV_NXT_S)
+
+#define TCB_RCV_WND_W 17
+#define TCB_RCV_WND_S 10
+#define TCB_RCV_WND_M 0xffffffULL
+#define TCB_RCV_WND_V(x) ((x) << TCB_RCV_WND_S)
+
#define TCB_RX_FRAG2_PTR_RAW_W 27
#define TCB_RX_FRAG3_LEN_RAW_W 29
#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
#define TCB_PDU_HDR_LEN_W 31
+#define TCB_RQ_START_W 30
+#define TCB_RQ_START_S 0
+#define TCB_RQ_START_M 0x3ffffffULL
+#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
+
#define TF_RX_PDU_OUT_S 49
#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S)
+#define TF_CORE_BYPASS_S 63
+#define TF_CORE_BYPASS_V(x) ((__u64)(x) << TF_CORE_BYPASS_S)
+#define TF_CORE_BYPASS_F TF_CORE_BYPASS_V(1)
+
+#define TF_NON_OFFLOAD_S 1
+#define TF_NON_OFFLOAD_V(x) ((x) << TF_NON_OFFLOAD_S)
+#define TF_NON_OFFLOAD_F TF_NON_OFFLOAD_V(1)
+
#endif /* __T4_TCB_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index accad1101ad1..68fe734b9b37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -737,7 +737,7 @@ struct fw_flowc_mnemval {
struct fw_flowc_wr {
__be32 op_to_nparams;
__be32 flowid_len16;
- struct fw_flowc_mnemval mnemval[0];
+ struct fw_flowc_mnemval mnemval[];
};
#define FW_FLOWC_WR_NPARAMS_S 0
@@ -1205,6 +1205,7 @@ enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLS_INLINE = 0x00000002,
FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
+ FW_CAPS_CONFIG_TX_TLS_HW = 0x00000008,
};
enum fw_caps_config_fcoe {
@@ -1328,6 +1329,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
+ FW_PARAMS_PARAM_DEV_KTLS_TX_HW = 0x31,
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index f4d41f968afa..9cc3541a7e1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -55,7 +55,6 @@
/*
* Generic information about the driver.
*/
-#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
/*
@@ -1556,7 +1555,6 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
struct adapter *adapter = netdev2adap(dev);
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -1921,6 +1919,8 @@ static void cxgb4vf_get_wol(struct net_device *dev,
NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = cxgb4vf_get_link_ksettings,
.get_fecparam = cxgb4vf_get_fecparam,
.get_drvinfo = cxgb4vf_get_drvinfo,
@@ -2934,12 +2934,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
unsigned int pf;
/*
- * Print our driver banner the first time we're called to initialize a
- * device.
- */
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
-
- /*
* Initialize generic PCI device state.
*/
err = pci_enable_device(pdev);
@@ -3454,7 +3448,6 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
static struct pci_driver cxgb4vf_driver = {
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 21034536c9c5..854d87e1125c 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -35,7 +35,6 @@
*/
#define DRV_NAME "libcxgb"
-#define DRV_VERSION "1.0.0-ko"
#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/kernel.h>
@@ -530,5 +529,4 @@ EXPORT_SYMBOL(cxgbi_tagmask_set);
MODULE_AUTHOR("Chelsio Communications");
MODULE_DESCRIPTION("Chelsio common library");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
index 7b02c200dd1e..1b4156461ba1 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
@@ -122,7 +122,7 @@ struct cxgbi_ppm_pool {
unsigned int base; /* base index */
unsigned int next; /* next possible free index */
spinlock_t lock; /* ppm pool lock */
- unsigned long bmap[0];
+ unsigned long bmap[];
} ____cacheline_aligned_in_smp;
struct cxgbi_ppm {
@@ -145,7 +145,7 @@ struct cxgbi_ppm {
unsigned int next;
unsigned int max_index_in_edram;
unsigned long *ppod_bmap;
- struct cxgbi_ppod_data ppod_data[0];
+ struct cxgbi_ppod_data ppod_data[];
};
#define DDP_THRESHOLD 512
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index f37c9a08c4cf..9f5e5ec69991 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -24,7 +24,6 @@
#include <linux/platform_data/eth-ep93xx.h>
#define DRV_MODULE_NAME "ep93xx-eth"
-#define DRV_MODULE_VERSION "0.1"
#define RX_QUEUE_ENTRIES 64
#define TX_QUEUE_ENTRIES 8
@@ -691,7 +690,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 0dd64acd2a3f..18f3aeb88f22 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,8 +33,6 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.53"
-#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index ebd5c2cf1efe..4d8e0aa447fb 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -147,7 +147,6 @@ static void enic_get_drvinfo(struct net_device *netdev,
return;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
@@ -324,25 +323,6 @@ static int enic_coalesce_valid(struct enic *enic,
u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
ec->rx_coalesce_usecs_low);
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -EINVAL;
-
if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
ec->tx_coalesce_usecs)
return -EINVAL;
@@ -636,6 +616,10 @@ static int enic_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops enic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_RX_USECS_LOW |
+ ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
.set_msglevel = enic_set_msglevel,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ddf60dc9ad16..cd5fe4f6b54c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -80,7 +80,6 @@ static const struct pci_device_id enic_id_table[] = {
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
#define ENIC_LARGE_PKT_THRESHOLD 1000
@@ -696,8 +695,7 @@ static void enic_preload_tcp_csum(struct sk_buff *skb)
tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
}
@@ -3056,8 +3054,6 @@ static struct pci_driver enic_driver = {
static int __init enic_init_module(void)
{
- pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
-
return pci_register_driver(&enic_driver);
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index fef5a0a0663d..fcc4a3ccdd94 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -541,7 +541,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/* These are used in flags field of different filters to denote
@@ -648,9 +648,9 @@ enum {
#define FILTER_MAX_BUF_SIZE 100
struct filter_tlv {
- u_int32_t type;
- u_int32_t length;
- u_int32_t val[0];
+ u32 type;
+ u32 length;
+ u32 val[];
};
enum {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
index 9ef81f148351..057776908828 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
@@ -59,7 +59,7 @@ struct vic_provinfo {
u16 type;
u16 length;
u8 value[0];
- } tlv[0];
+ } tlv[];
} __packed;
#define VIC_PROVINFO_ADD_TLV(vp, tlvtype, tlvlen, data) \
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index f30fa8e6ef80..5bff5c2be88b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -44,7 +44,6 @@
#include "gemini.h"
#define DRV_NAME "gmac-gemini"
-#define DRV_VERSION "1.0"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
static int debug = -1;
@@ -2204,7 +2203,6 @@ static void gmac_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
}
@@ -2224,6 +2222,8 @@ static const struct net_device_ops gmac_351x_ops = {
};
static const struct ethtool_ops gmac_351x_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_sset_count = gmac_get_sset_count,
.get_strings = gmac_get_strings,
.get_ethtool_stats = gmac_get_ethtool_stats,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index e94ae9b94dbf..7f7705138262 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -42,7 +42,6 @@
#define DM9000_PHY 0x40 /* PHY address 0x01 */
#define CARDNAME "dm9000"
-#define DRV_VERSION "1.31"
/*
* Transmit timeout, default 5 seconds.
@@ -543,7 +542,6 @@ static void dm9000_get_drvinfo(struct net_device *dev,
struct board_info *dm = to_dm9000_board(dev);
strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 42b798a3fad4..592454f444ce 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -30,7 +30,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "de2104x"
-#define DRV_VERSION "0.7"
#define DRV_RELDATE "Mar 17, 2004"
#include <linux/module.h>
@@ -52,14 +51,9 @@
#include <linux/uaccess.h>
#include <asm/unaligned.h>
-/* These identify the driver base version and may not be removed. */
-static char version[] =
-"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
-
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
static int debug = -1;
module_param (debug, int, 0);
@@ -1603,7 +1597,6 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
struct de_private *de = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
@@ -1980,11 +1973,6 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
board_idx++;
-#ifndef MODULE
- if (board_idx == 0)
- pr_info("%s\n", version);
-#endif
-
/* allocate a new ethernet device structure, and fill in defaults */
dev = alloc_etherdev(sizeof(struct de_private));
if (!dev)
@@ -2196,9 +2184,6 @@ static struct pci_driver de_driver = {
static int __init de_init (void)
{
-#ifdef MODULE
- pr_info("%s\n", version);
-#endif
return pci_register_driver(&de_driver);
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 32d470d4122a..c1884fc9ad32 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -56,8 +56,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "dmfe"
-#define DRV_VERSION "1.36.4"
-#define DRV_RELDATE "2002-01-17"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -280,10 +278,6 @@ enum dmfe_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int printed_version;
-static const char version[] =
- "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
-
static int dmfe_debug;
static unsigned char dmfe_media_mode = DMFE_AUTO;
static u32 dmfe_cr6_user_set;
@@ -364,9 +358,6 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
DMFE_DBUG(0, "dmfe_init_one()", 0);
- if (!printed_version++)
- pr_info("%s\n", version);
-
/*
* SPARC on-board DM910x chips should be handled by the main
* tulip driver, except for early DM9100s.
@@ -1081,7 +1072,6 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
struct dmfe_board_info *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -2177,7 +2167,6 @@ static struct pci_driver dmfe_driver = {
MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(debug, int, 0);
module_param(mode, byte, 0);
@@ -2204,9 +2193,6 @@ static int __init dmfe_init_module(void)
{
int rc;
- pr_info("%s\n", version);
- printed_version = 1;
-
DMFE_DBUG(0, "init_module() ", debug);
if (debug)
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index b458140aeaef..815907259048 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -381,7 +381,7 @@ struct mediatable {
unsigned has_reset:6;
u32 csr15dir;
u32 csr15val; /* 21143 NWay setting. */
- struct medialeaf mleaf[0];
+ struct medialeaf mleaf[];
};
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9e9d9eee29d9..15efc294f513 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -12,13 +12,6 @@
#define pr_fmt(fmt) "tulip: " fmt
#define DRV_NAME "tulip"
-#ifdef CONFIG_TULIP_NAPI
-#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
-#else
-#define DRV_VERSION "1.1.15"
-#endif
-#define DRV_RELDATE "Feb 27, 2007"
-
#include <linux/module.h>
#include <linux/pci.h>
@@ -37,9 +30,6 @@
#include <asm/prom.h>
#endif
-static char version[] =
- "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
-
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -109,7 +99,6 @@ static int csr0;
MODULE_AUTHOR("The Linux Kernel Team");
MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(tulip_debug, int, 0);
module_param(max_interrupt_work, int, 0);
module_param(rx_copybreak, int, 0);
@@ -868,7 +857,6 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct tulip_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1289,7 +1277,7 @@ static const struct net_device_ops tulip_netdev_ops = {
#endif
};
-const struct pci_device_id early_486_chipsets[] = {
+static const struct pci_device_id early_486_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
{ },
@@ -1314,11 +1302,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned int eeprom_missing = 0;
unsigned int force_csr0 = 0;
-#ifndef MODULE
- if (tulip_debug > 0)
- printk_once(KERN_INFO "%s", version);
-#endif
-
board_idx++;
/*
@@ -1800,14 +1783,13 @@ static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
void __iomem *ioaddr = tp->base_addr;
if (tp->flags & COMET_PM) {
-
unsigned int tmp;
-
+
tmp = ioread32(ioaddr + CSR18);
tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
tmp |= comet_csr18_pm_mode;
iowrite32(tmp, ioaddr + CSR18);
-
+
/* Set the Wake-up Control/Status Register to the given WOL options*/
tmp = ioread32(ioaddr + CSR13);
tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
@@ -1969,10 +1951,6 @@ static struct pci_driver tulip_driver = {
static int __init tulip_init (void)
{
-#ifdef MODULE
- pr_info("%s", version);
-#endif
-
if (!csr0) {
pr_warn("tulip: unknown CPU architecture, using default csr0\n");
/* default to 8 longword cache line alignment */
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 117ffe08800d..f726436b1985 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -7,8 +7,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "uli526x"
-#define DRV_VERSION "0.9.3"
-#define DRV_RELDATE "2005-7-29"
#include <linux/module.h>
@@ -196,10 +194,6 @@ enum uli526x_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int printed_version;
-static const char version[] =
- "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
-
static int uli526x_debug;
static unsigned char uli526x_media_mode = ULI526X_AUTO;
static u32 uli526x_cr6_user_set;
@@ -282,9 +276,6 @@ static int uli526x_init_one(struct pci_dev *pdev,
ULI526X_DBUG(0, "uli526x_init_one()", 0);
- if (!printed_version++)
- pr_info("%s\n", version);
-
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
@@ -972,7 +963,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct uli526x_board_info *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1799,9 +1789,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
static int __init uli526x_init_module(void)
{
- pr_info("%s\n", version);
- printed_version = 1;
-
ULI526X_DBUG(0, "init_module() ", debug);
if (debug)
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 7f136488e67c..4d5e4fa53023 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -47,9 +47,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "winbond-840"
-#define DRV_VERSION "1.01-e"
-#define DRV_RELDATE "Sep-11-2006"
-
/* Automatically extracted configuration info:
probe-func: winbond840_probe
@@ -139,16 +136,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#undef PKT_BUF_SZ /* tulip.h also defines this */
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
-/* These identify the driver base version and may not be removed. */
-static const char version[] __initconst =
- "v" DRV_VERSION " (2.4 port) "
- DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
- " http://www.scyld.com/network/drivers.html\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
@@ -1385,7 +1375,6 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1650,7 +1639,6 @@ static struct pci_driver w840_driver = {
static int __init w840_init(void)
{
- printk(version);
return pci_register_driver(&w840_driver);
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 26c5da032b1e..643090555cc7 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -8,8 +8,6 @@
*/
#define DRV_NAME "DL2000/TC902x-based linux driver"
-#define DRV_VERSION "v1.19"
-#define DRV_RELDATE "2007/08/12"
#include "dl2k.h"
#include <linux/dma-mapping.h>
@@ -20,8 +18,6 @@
#define dr16(reg) ioread16(ioaddr + (reg))
#define dr8(reg) ioread8(ioaddr + (reg))
-static char version[] =
- KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS];
@@ -113,13 +109,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int err, irq;
void __iomem *ioaddr;
- static int version_printed;
void *ring_space;
dma_addr_t ring_dma;
- if (!version_printed++)
- printk ("%s", version);
-
err = pci_enable_device (pdev);
if (err)
return err;
@@ -1244,7 +1236,6 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index b91387c456ba..dc566fcc3ba9 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -23,9 +23,6 @@
*/
#define DRV_NAME "sundance"
-#define DRV_VERSION "1.2"
-#define DRV_RELDATE "11-Sep-2006"
-
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -101,11 +98,6 @@ static char *media[MAX_UNITS];
#include <linux/ethtool.h>
#include <linux/mii.h>
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
- " Written by Donald Becker\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
MODULE_LICENSE("GPL");
@@ -516,13 +508,6 @@ static int sundance_probe1(struct pci_dev *pdev,
#endif
int phy, phy_end, phy_idx = 0;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
if (pci_enable_device(pdev))
return -EIO;
pci_set_master(pdev);
@@ -1657,7 +1642,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -2010,10 +1994,6 @@ static struct pci_driver sundance_driver = {
static int __init sundance_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
return pci_register_driver(&sundance_driver);
}
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 5f8fa1145db6..057a508dd6e2 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -729,7 +729,6 @@ static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, "0", sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 8af6c0705ab3..030724484b49 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -8,7 +8,6 @@
#define _DNET_H
#define DRV_NAME "dnet"
-#define DRV_VERSION "0.9.1"
#define PFX DRV_NAME ": "
/* Register access macros */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf3e6f2892ff..6e9022083004 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -33,7 +33,6 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "12.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 022a54a1805b..d6ed1d943762 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -221,7 +221,6 @@ static void be_get_drvinfo(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
strlcpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
@@ -1409,6 +1408,9 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags)
}
const struct ethtool_ops be_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 56f59db6ebf2..a7ac23a6862b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,8 +21,7 @@
#include <net/busy_poll.h>
#include <net/vxlan.h>
-MODULE_VERSION(DRV_VER);
-MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
+MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
@@ -5949,8 +5948,6 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
struct net_device *netdev;
int status = 0;
- dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
-
status = pci_enable_device(pdev);
if (status)
goto do_none;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4572797f00d7..87236206366f 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -30,7 +30,6 @@
#include "ftgmac100.h"
#define DRV_NAME "ftgmac100"
-#define DRV_VERSION "0.7"
/* Arbitrary values, I am not sure the HW has limits */
#define MAX_RX_QUEUE_ENTRIES 1024
@@ -1150,7 +1149,6 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
@@ -1733,7 +1731,7 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv)
if (rc)
goto cleanup_clk;
- /* RCLK is for RMII, typically used for NCSI. Optional because its not
+ /* RCLK is for RMII, typically used for NCSI. Optional because it's not
* necessary if it's the AST2400 MAC, or the MAC is configured for
* RGMII, or the controller is not an ASPEED-based controller.
*/
@@ -1757,9 +1755,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
struct device_node *np;
int err = 0;
- if (!pdev)
- return -ENODEV;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 6c247cbbd23e..32cf54f0e35b 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -23,7 +23,6 @@
#include "ftmac100.h"
#define DRV_NAME "ftmac100"
-#define DRV_VERSION "0.2"
#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
@@ -809,7 +808,6 @@ static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
@@ -1184,7 +1182,6 @@ static struct platform_driver ftmac100_driver = {
*****************************************************************************/
static int __init ftmac100_init(void)
{
- pr_info("Loading version " DRV_VERSION " ...\n");
return platform_driver_register(&ftmac100_driver);
}
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 84f10970299a..73e896a7d8fd 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -25,8 +25,6 @@
*/
#define DRV_NAME "fealnx"
-#define DRV_VERSION "2.52"
-#define DRV_RELDATE "Sep-11-2006"
static int debug; /* 1-> print debug message */
static int max_interrupt_work = 20;
@@ -91,11 +89,6 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <linux/uaccess.h>
#include <asm/byteorder.h>
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
-
-
/* This driver was written to use PCI memory space, however some x86 systems
work only with I/O space accesses. */
#ifndef __alpha__
@@ -495,13 +488,6 @@ static int fealnx_init_one(struct pci_dev *pdev,
int bar = 1;
#endif
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
card_idx++;
sprintf(boardname, "fealnx%d", card_idx);
@@ -1809,7 +1795,6 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1950,11 +1935,6 @@ static struct pci_driver fealnx_driver = {
static int __init fealnx_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
-
return pci_register_driver(&fealnx_driver);
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 2bd7ace0a953..bfc6bfe94d0a 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -77,6 +77,7 @@ config UCC_GETH
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
select PHYLIB
+ select FIXED_PHY
---help---
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
@@ -90,6 +91,7 @@ config GIANFAR
depends on HAS_DMA
select FSL_PQ_MDIO
select PHYLIB
+ select FIXED_PHY
select CRC32
---help---
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index 3b325733a4f8..0a54c7e0e4ae 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
depends on FSL_DPAA && FSL_FMAN
select PHYLIB
+ select FIXED_PHY
select FSL_FMAN_MAC
---help---
Data Path Acceleration Architecture Ethernet driver,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ca74a684a904..6bfa7575af94 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -259,8 +259,20 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ if (is_valid_ether_addr(mac_addr)) {
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else {
+ eth_hw_addr_random(net_dev);
+ err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "Failed to set random MAC address\n");
+ return -EINVAL;
+ }
+ dev_info(dev, "Using random MAC address: %pM\n",
+ net_dev->dev_addr);
+ }
net_dev->ethtool_ops = &dpaa_ethtool_ops;
@@ -2050,7 +2062,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
}
#ifdef CONFIG_DPAA_ERRATUM_A050385
-int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct sk_buff *new_skb, *skb = *s;
@@ -2902,7 +2914,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
}
/* Do this here, so we can be verbose early */
- SET_NETDEV_DEV(net_dev, dev);
+ SET_NETDEV_DEV(net_dev, dev->parent);
dev_set_drvdata(dev, net_dev);
priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 66d150872d48..9db2a02fb531 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -106,19 +106,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- int len;
-
strlcpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- len = snprintf(drvinfo->version, sizeof(drvinfo->version),
- "%X", 0);
- len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%X", 0);
-
- if (len >= sizeof(drvinfo->fw_version)) {
- /* Truncated output */
- netdev_notice(net_dev, "snprintf() = %d\n", len);
- }
strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
@@ -536,7 +525,6 @@ static int dpaa_get_coalesce(struct net_device *dev,
c->rx_coalesce_usecs = period;
c->rx_max_coalesced_frames = thresh;
- c->use_adaptive_rx_coalesce = false;
return 0;
}
@@ -551,9 +539,6 @@ static int dpaa_set_coalesce(struct net_device *dev,
u8 thresh, prev_thresh;
int cpu, res;
- if (c->use_adaptive_rx_coalesce)
- return -EINVAL;
-
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -593,6 +578,8 @@ revert_values:
}
const struct ethtool_ops dpaa_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
.set_msglevel = dpaa_set_msglevel,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7ff147e89426..d97c320a2dc0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)sg_vaddr, 0);
@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
- dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
(page_address(page) - page_address(head_page));
skb_add_rx_frag(skb, i - 1, head_page, page_offset,
- sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ sg_length, priv->rx_buf_size);
}
if (dpaa2_sg_is_final(sge))
@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)vaddr, 0);
}
@@ -335,7 +335,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
break;
case XDP_REDIRECT:
dma_unmap_page(priv->net_dev->dev.parent, addr,
- DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
ch->buf_count--;
xdp.data_hard_start = vaddr;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
@@ -374,7 +374,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
@@ -393,13 +393,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0);
@@ -974,7 +974,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
if (!page)
goto err_alloc;
- addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@@ -984,7 +984,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
page, DPAA2_ETH_RX_BUF_RAW_SIZE,
- addr, DPAA2_ETH_RX_BUF_SIZE,
+ addr, priv->rx_buf_size,
bpid);
}
@@ -1704,10 +1704,15 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- return -EINVAL;
+ if (priv->mac)
+ return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+
+ return -EOPNOTSUPP;
}
static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
@@ -1715,7 +1720,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
int mfl, linear_mfl;
mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
- linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+ linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
if (mfl > linear_mfl) {
@@ -2457,6 +2462,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
else
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+ /* We need to ensure that the buffer size seen by WRIOP is a multiple
+ * of 64 or 256 bytes depending on the WRIOP version.
+ */
+ priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
+
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true;
@@ -3121,7 +3131,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
- pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
+ pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
if (err) {
dev_err(dev, "dpni_set_pools() failed\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7635db3ef903..13242bf5b427 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -382,6 +382,7 @@ struct dpaa2_eth_priv {
u16 tx_data_offset;
struct fsl_mc_device *dpbp_dev;
+ u16 rx_buf_size;
u16 bpid;
struct iommu_domain *iommu_domain;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 96676abcebd5..b7141fdc279e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -79,6 +79,16 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
sizeof(drvinfo->bus_info));
}
+static int dpaa2_eth_nway_reset(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (priv->mac)
+ return phylink_ethtool_nway_reset(priv->mac->phylink);
+
+ return -EOPNOTSUPP;
+}
+
static int
dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
@@ -625,7 +635,7 @@ static int num_rules(struct dpaa2_eth_priv *priv)
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
- int location)
+ unsigned int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;
@@ -761,6 +771,7 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev,
const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
+ .nway_reset = dpaa2_eth_nway_reset,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
.set_link_ksettings = dpaa2_eth_set_link_ksettings,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 84233e467ed1..3ee236c5fc37 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -123,49 +123,60 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
struct dpmac_link_state *dpmac_state = &mac->state;
int err;
- if (state->speed != SPEED_UNKNOWN)
- dpmac_state->rate = state->speed;
-
- if (state->duplex != DUPLEX_UNKNOWN) {
- if (!state->duplex)
- dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
- }
-
if (state->an_enabled)
dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
else
dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
- if (state->pause & MLO_PAUSE_RX)
- dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
-
- if (!!(state->pause & MLO_PAUSE_RX) ^ !!(state->pause & MLO_PAUSE_TX))
- dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
-
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
if (err)
- netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
}
-static void dpaa2_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy)
+static void dpaa2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
struct dpmac_link_state *dpmac_state = &mac->state;
int err;
dpmac_state->up = 1;
+
+ if (mac->if_link_type == DPMAC_LINK_TYPE_PHY) {
+ /* If the DPMAC is configured for PHY mode, we need
+ * to pass the link parameters to the MC firmware.
+ */
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ /* This is lossy; the firmware really should take the pause
+ * enablement status rather than pause/asym pause status.
+ */
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+ }
+
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
if (err)
- netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
}
static void dpaa2_mac_link_down(struct phylink_config *config,
@@ -238,6 +249,8 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ mac->if_link_type = attr.link_type;
+
dpmac_node = dpaa2_mac_get_node(attr.id);
if (!dpmac_node) {
netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 4da8079b9155..2130d9c7d40e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -20,6 +20,7 @@ struct dpaa2_mac {
struct phylink_config phylink_config;
struct phylink *phylink;
phy_interface_t if_mode;
+ enum dpmac_link_type if_link_type;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index fe942de19597..2b43848e1363 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC
tristate "ENETC PF driver"
- depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI && PCI_MSI
select FSL_ENETC_MDIO
select PHYLIB
help
@@ -13,7 +13,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
- depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI && PCI_MSI
select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
@@ -23,7 +23,7 @@ config FSL_ENETC_VF
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
@@ -42,16 +42,6 @@ config FSL_ENETC_PTP_CLOCK
If compiled as module (M), the module name is fsl-enetc-ptp.
-config FSL_ENETC_HW_TIMESTAMPING
- bool "ENETC hardware timestamping support"
- depends on FSL_ENETC || FSL_ENETC_VF
- help
- Enable hardware timestamping support on the Ethernet packets
- using the SO_TIMESTAMPING API. Because the RX BD ring dynamic
- allocation has not been supported and it is too expensive to use
- extended RX BDs if timestamping is not used, this option enables
- extended RX BDs in order to support hardware timestamping.
-
config FSL_ENETC_QOS
bool "ENETC hardware Time-sensitive Network support"
depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 1f79e36116a3..ccf2611f4a20 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -451,7 +451,7 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
i = rx_ring->next_to_use;
rx_swbd = &rx_ring->rx_swbd[i];
- rxbd = ENETC_RXBD(*rx_ring, i);
+ rxbd = enetc_rxbd(rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
/* try reuse page */
@@ -468,13 +468,12 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
/* clear 'R" as well */
rxbd->r.lstatus = 0;
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
rx_swbd++;
- rxbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rx_swbd = rx_ring->rx_swbd;
- rxbd = ENETC_RXBD(*rx_ring, 0);
}
}
@@ -488,7 +487,7 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static void enetc_get_rx_tstamp(struct net_device *ndev,
union enetc_rx_bd *rxbd,
struct sk_buff *skb)
@@ -502,7 +501,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
lo = enetc_rd(hw, ENETC_SICTR0);
hi = enetc_rd(hw, ENETC_SICTR1);
- tstamp_lo = le32_to_cpu(rxbd->r.tstamp);
+ rxbd = enetc_rxbd_ext(rxbd);
+ tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
if (lo <= tstamp_lo)
hi -= 1;
@@ -516,7 +516,7 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
{
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
#endif
/* TODO: hashing */
@@ -533,7 +533,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxbd->r.vlan_opt));
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
#endif
@@ -655,7 +655,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
cleaned_cnt -= count;
}
- rxbd = ENETC_RXBD(*rx_ring, i);
+ rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
if (!bd_status)
break;
@@ -670,12 +670,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_get_offloads(rx_ring, rxbd, skb);
cleaned_cnt++;
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
@@ -683,12 +681,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb();
bd_status = le32_to_cpu(rxbd->r.lstatus);
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
}
rx_ring->ndev->stats.rx_dropped++;
@@ -710,12 +706,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
cleaned_cnt++;
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
}
rx_byte_cnt += skb->len;
@@ -845,15 +839,19 @@ static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
enetc_free_txbdr(priv->tx_ring[i]);
}
-static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
{
+ size_t size = sizeof(union enetc_rx_bd);
int err;
rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
if (!rxr->rx_swbd)
return -ENOMEM;
- err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
+ if (extended)
+ size *= 2;
+
+ err = enetc_dma_alloc_bdr(rxr, size);
if (err) {
vfree(rxr->rx_swbd);
return err;
@@ -862,6 +860,7 @@ static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
rxr->next_to_clean = 0;
rxr->next_to_use = 0;
rxr->next_to_alloc = 0;
+ rxr->ext_en = extended;
return 0;
}
@@ -881,10 +880,11 @@ static void enetc_free_rxbdr(struct enetc_bdr *rxr)
static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
{
+ bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
int i, err;
for (i = 0; i < priv->num_rx_rings; i++) {
- err = enetc_alloc_rxbdr(priv->rx_ring[i]);
+ err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
if (err)
goto fail;
@@ -1174,9 +1174,10 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
rbmr = ENETC_RBMR_EN;
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
- rbmr |= ENETC_RBMR_BDS;
-#endif
+
+ if (rx_ring->ext_en)
+ rbmr |= ENETC_RBMR_BDS;
+
if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rbmr |= ENETC_RBMR_VTE;
@@ -1577,11 +1578,12 @@ int enetc_set_features(struct net_device *ndev,
return 0;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct hwtstamp_config config;
+ int ao;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -1597,6 +1599,7 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
+ ao = priv->active_offloads;
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
@@ -1606,6 +1609,11 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
+ if (netif_running(ndev) && ao != priv->active_offloads) {
+ enetc_close(ndev);
+ enetc_open(ndev);
+ }
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -1632,7 +1640,7 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (cmd == SIOCSHWTSTAMP)
return enetc_hwtstamp_set(ndev, rq);
if (cmd == SIOCGHWTSTAMP)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index dd4a227ffc7a..56c43f35b633 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -73,6 +73,7 @@ struct enetc_bdr {
dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */
+ bool ext_en; /* enable h/w descriptor extensions */
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
@@ -104,7 +105,37 @@ struct enetc_cbdr {
};
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
-#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
+{
+ int hw_idx = i;
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ hw_idx = 2 * i;
+#endif
+ return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
+}
+
+static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd *rxbd,
+ int i)
+{
+ rxbd++;
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ rxbd++;
+#endif
+ if (unlikely(++i == rx_ring->bd_count))
+ rxbd = rx_ring->bd_base;
+
+ return rxbd;
+}
+
+static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
+{
+ return ++rxbd;
+}
struct enetc_msg_swbd {
void *vaddr;
@@ -163,7 +194,7 @@ struct enetc_int_vector {
char name[ENETC_INT_NAME_MAX];
struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
- struct enetc_bdr tx_ring[0];
+ struct enetc_bdr tx_ring[];
};
struct enetc_cls_rule {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 301ee0dde02d..34bd1f3fb415 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -195,15 +195,21 @@ static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
static int enetc_get_sset_count(struct net_device *ndev, int sset)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int len;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
- if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(enetc_si_counters) +
- ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
- ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings +
- (enetc_si_is_pf(priv->si) ?
- ARRAY_SIZE(enetc_port_counters) : 0);
+ len = ARRAY_SIZE(enetc_si_counters) +
+ ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
+ ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings;
- return -EOPNOTSUPP;
+ if (!enetc_si_is_pf(priv->si))
+ return len;
+
+ len += ARRAY_SIZE(enetc_port_counters);
+
+ return len;
}
static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -568,7 +574,7 @@ static int enetc_get_ts_info(struct net_device *ndev,
info->phc_index = -1;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 62554f28ce07..2a6523136947 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -418,9 +418,6 @@ union enetc_rx_bd {
struct {
__le64 addr;
u8 reserved[8];
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
- u8 reserved1[16];
-#endif
} w;
struct {
__le16 inet_csum;
@@ -435,11 +432,11 @@ union enetc_rx_bd {
};
__le32 lstatus;
};
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ } r;
+ struct {
__le32 tstamp;
u8 reserved[12];
-#endif
- } r;
+ } ext;
};
#define ENETC_RXBD_LSTATUS_R BIT(30)
@@ -588,7 +585,7 @@ struct tgs_gcl_data {
__le32 bth;
__le32 ct;
__le32 cte;
- struct gce entry[0];
+ struct gce entry[];
};
struct enetc_cbd {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index ebc635f8a4cc..15f37c5b8dc1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -74,8 +74,8 @@ err_pci_mem_reg:
pci_disable_device(pdev);
err_pci_enable:
err_mdiobus_alloc:
- iounmap(port_regs);
err_hw_alloc:
+ iounmap(port_regs);
err_ioremap:
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index fc0d7d99e9a1..85e2b741df41 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -7,14 +7,7 @@
#include <linux/of_net.h>
#include "enetc_pf.h"
-#define ENETC_DRV_VER_MAJ 1
-#define ENETC_DRV_VER_MIN 0
-
-#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
- __stringify(ENETC_DRV_VER_MIN)
-static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC PF driver"
-static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
{
@@ -803,11 +796,6 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
struct device_node *mdio_np;
int err;
- if (!np) {
- dev_err(priv->dev, "missing ENETC port node\n");
- return -ENODEV;
- }
-
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node) {
if (!of_phy_is_fixed_link(np)) {
@@ -929,9 +917,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
netif_carrier_off(ndev);
- netif_info(priv, probe, ndev, "%s v%s\n",
- enetc_drv_name, enetc_drv_ver);
-
return 0;
err_reg_netdev:
@@ -959,9 +944,6 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_sriov_configure(pdev, 0);
priv = netdev_priv(si->ndev);
- netif_info(priv, drv, si->ndev, "%s v%s remove\n",
- enetc_drv_name, enetc_drv_ver);
-
unregister_netdev(si->ndev);
enetc_mdio_remove(pf);
@@ -995,4 +977,3 @@ module_pci_driver(enetc_pf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index ebd21bf4cfa1..f14576212a0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -4,14 +4,7 @@
#include <linux/module.h>
#include "enetc.h"
-#define ENETC_DRV_VER_MAJ 1
-#define ENETC_DRV_VER_MIN 0
-
-#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
- __stringify(ENETC_DRV_VER_MIN)
-static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC VF driver"
-static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
/* Messaging */
static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
@@ -201,9 +194,6 @@ static int enetc_vf_probe(struct pci_dev *pdev,
netif_carrier_off(ndev);
- netif_info(priv, probe, ndev, "%s v%s\n",
- enetc_drv_name, enetc_drv_ver);
-
return 0;
err_reg_netdev:
@@ -225,8 +215,6 @@ static void enetc_vf_remove(struct pci_dev *pdev)
struct enetc_ndev_priv *priv;
priv = netdev_priv(si->ndev);
- netif_info(priv, drv, si->ndev, "%s v%s remove\n",
- enetc_drv_name, enetc_drv_ver);
unregister_netdev(si->ndev);
enetc_free_msix(priv);
@@ -254,4 +242,3 @@ module_pci_driver(enetc_vf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f79e57f735b3..e74dd1f86bba 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q {
struct sk_buff *rx_skbuff[RX_RING_SIZE];
};
+struct fec_stop_mode_gpr {
+ struct regmap *gpr;
+ u8 reg;
+ u8 bit;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
@@ -562,6 +568,7 @@ struct fec_enet_private {
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
+ struct fec_stop_mode_gpr stop_gpr;
unsigned int tx_align;
unsigned int rx_align;
@@ -584,7 +591,7 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
- u64 ethtool_stats[0];
+ u64 ethtool_stats[];
};
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 23c5fef2f1ad..dc6f8763a5d4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -62,6 +62,8 @@
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
#include <linux/prefetch.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_OPD_V 0xFFF0
#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+struct fec_devinfo {
+ u32 quirks;
+ u8 stop_gpr_reg;
+ u8 stop_gpr_bit;
+};
+
+static const struct fec_devinfo fec_imx25_info = {
+ .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx27_info = {
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx28_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx6q_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+ FEC_QUIRK_HAS_RACC,
+ .stop_gpr_reg = 0x34,
+ .stop_gpr_bit = 27,
+};
+
+static const struct fec_devinfo fec_mvf600_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+};
+
+static const struct fec_devinfo fec_imx6x_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+};
+
+static const struct fec_devinfo fec_imx6ul_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_COALESCE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx25_info,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx27_info,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx28_info,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_imx6q_info,
}, {
.name = "mvf600-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_mvf600_info,
}, {
.name = "imx6sx-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6x_info,
}, {
.name = "imx6ul-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
- FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
}, {
/* sentinel */
}
@@ -1092,11 +1130,28 @@ fec_restart(struct net_device *ndev)
}
+static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
+{
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
+
+ if (stop_gpr->gpr) {
+ if (enabled)
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit),
+ BIT(stop_gpr->bit));
+ else
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit), 0);
+ } else if (pdata && pdata->sleep_mode_enable) {
+ pdata->sleep_mode_enable(enabled);
+ }
+}
+
static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
@@ -1125,9 +1180,7 @@ fec_stop(struct net_device *ndev)
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
-
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(true);
+ fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -2128,7 +2181,6 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strlcpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
@@ -2642,6 +2694,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = fec_enet_get_drvinfo,
.get_regs_len = fec_enet_get_regs_len,
.get_regs = fec_enet_get_regs,
@@ -3397,6 +3451,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
return irq_cnt;
}
+static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
+ struct fec_devinfo *dev_info,
+ struct device_node *np)
+{
+ struct device_node *gpr_np;
+ int ret = 0;
+
+ if (!dev_info)
+ return 0;
+
+ gpr_np = of_parse_phandle(np, "gpr", 0);
+ if (!gpr_np)
+ return 0;
+
+ fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(fep->stop_gpr.gpr)) {
+ dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
+ ret = PTR_ERR(fep->stop_gpr.gpr);
+ fep->stop_gpr.gpr = NULL;
+ goto out;
+ }
+
+ fep->stop_gpr.reg = dev_info->stop_gpr_reg;
+ fep->stop_gpr.bit = dev_info->stop_gpr_bit;
+
+out:
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
static int
fec_probe(struct platform_device *pdev)
{
@@ -3412,6 +3497,7 @@ fec_probe(struct platform_device *pdev)
int num_rx_qs;
char irq_name[8];
int irq_cnt;
+ struct fec_devinfo *dev_info;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -3429,7 +3515,9 @@ fec_probe(struct platform_device *pdev)
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
- fep->quirks = pdev->id_entry->driver_data;
+ dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+ if (dev_info)
+ fep->quirks = dev_info->quirks;
fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
@@ -3463,6 +3551,10 @@ fec_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+ ret = fec_enet_init_stop_mode(fep, dev_info, np);
+ if (ret)
+ goto failed_stop_mode;
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
@@ -3631,6 +3723,7 @@ failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
+failed_stop_mode:
failed_phy:
dev_id--;
failed_ioremap:
@@ -3708,7 +3801,6 @@ static int __maybe_unused fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
int val;
@@ -3726,8 +3818,8 @@ static int __maybe_unused fec_resume(struct device *dev)
goto failed_clk;
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(false);
+ fec_enet_stop_mode(fep, false);
+
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
@@ -3793,6 +3885,7 @@ static struct platform_driver fec_driver = {
.name = DRIVER_NAME,
.pm = &fec_pm_ops,
.of_match_table = fec_dt_ids,
+ .suppress_bind_attrs = true,
},
.id_table = fec_devtype,
.probe = fec_probe,
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1ca543ac8f2c..004c266802a8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -366,13 +366,26 @@ static void set_dflts(struct dtsec_cfg *cfg)
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
}
+static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+{
+ u32 tmp;
+
+ tmp = (u32)((adr[5] << 24) |
+ (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+}
+
static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
- phy_interface_t iface, u16 iface_speed, u8 *macaddr,
+ phy_interface_t iface, u16 iface_speed, u64 addr,
u32 exception_mask, u8 tbi_addr)
{
bool is_rgmii, is_sgmii, is_qsgmii;
- int i;
+ enet_addr_t eth_addr;
u32 tmp;
+ int i;
/* Soft reset */
iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
@@ -501,12 +514,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
iowrite32be(0xffffffff, &regs->ievent);
- tmp = (u32)((macaddr[5] << 24) |
- (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
- iowrite32be(tmp, &regs->macstnaddr1);
-
- tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
- iowrite32be(tmp, &regs->macstnaddr2);
+ if (addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
+ set_mac_address(regs, (u8 *)eth_addr);
+ }
/* HASH */
for (i = 0; i < NUM_OF_HASH_REGS; i++) {
@@ -519,18 +530,6 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
return 0;
}
-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
-{
- u32 tmp;
-
- tmp = (u32)((adr[5] << 24) |
- (adr[4] << 16) | (adr[3] << 8) | adr[2]);
- iowrite32be(tmp, &regs->macstnaddr1);
-
- tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
- iowrite32be(tmp, &regs->macstnaddr2);
-}
-
static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
bool enable)
{
@@ -556,10 +555,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
pr_err("1G MAC driver supports 1G or lower speeds\n");
return -EINVAL;
}
- if (dtsec->addr == 0) {
- pr_err("Ethernet MAC Must have a valid MAC Address\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -1391,9 +1386,8 @@ int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
- int err;
u16 max_frm_ln;
- enet_addr_t eth_addr;
+ int err;
if (is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
@@ -1410,10 +1404,8 @@ int dtsec_init(struct fman_mac *dtsec)
dtsec_drv_param = dtsec->dtsec_drv_param;
- MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
-
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
- dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
+ dtsec->max_speed, dtsec->addr, dtsec->exceptions,
dtsec->tbiphy->mdio.addr);
if (err) {
free_init_resources(dtsec);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 0d2b4ab01f24..a5500ede4070 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -596,10 +596,6 @@ static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
static int check_init_parameters(struct fman_mac *memac)
{
- if (memac->addr == 0) {
- pr_err("Ethernet MAC must have a valid MAC address\n");
- return -EINVAL;
- }
if (!memac->exception_cb) {
pr_err("Uninitialized exception handler\n");
return -EINVAL;
@@ -1057,8 +1053,10 @@ int memac_init(struct fman_mac *memac)
}
/* MAC Address */
- MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ if (memac->addr != 0) {
+ MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
+ add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ }
fixed_link = memac_drv_param->fixed_link;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index f75b9c11b2d2..8c7eb878d5b4 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -273,10 +273,6 @@ static int check_init_parameters(struct fman_mac *tgec)
pr_err("10G MAC driver only support 10G speed\n");
return -EINVAL;
}
- if (tgec->addr == 0) {
- pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
- return -EINVAL;
- }
if (!tgec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -706,8 +702,10 @@ int tgec_init(struct fman_mac *tgec)
cfg = tgec->cfg;
- MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
- set_mac_address(tgec->regs, (u8 *)eth_addr);
+ if (tgec->addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
+ set_mac_address(tgec->regs, (u8 *)eth_addr);
+ }
/* interrupts */
/* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 55f2122c3217..43427c5b9396 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -724,12 +724,10 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
- if (IS_ERR(mac_addr)) {
- dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
- }
- ether_addr_copy(mac_dev->addr, mac_addr);
+ if (IS_ERR(mac_addr))
+ dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
+ else
+ ether_addr_copy(mac_dev->addr, mac_addr);
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
@@ -855,7 +853,8 @@ static int mac_probe(struct platform_device *_of_dev)
if (err < 0)
dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
- dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
+ if (!IS_ERR(mac_addr))
+ dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
if (IS_ERR(priv->eth_dev)) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index add61fed33ee..ce85feaac357 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -53,7 +53,6 @@
MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
MODULE_DESCRIPTION("Freescale Ethernet Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
module_param(fs_enet_debug, int, 0);
@@ -790,7 +789,6 @@ static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int fs_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 195fae6aec4a..5ff2634bee2f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -190,8 +190,6 @@ void fs_cleanup_bds(struct net_device *dev);
#define DRV_MODULE_NAME "fs_enet"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "Sep 22, 2014"
/***************************************************************************/
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f7e5cafe89a9..b3c69e9038ea 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -103,8 +103,6 @@
#define TX_TIMEOUT (5*HZ)
-const char gfar_driver_version[] = "2.0";
-
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 432c6a818ae5..8ced783f5302 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -68,7 +68,6 @@ struct ethtool_rx_list {
#define RXBUF_ALIGNMENT 64
#define DRV_NAME "gfar-enet"
-extern const char gfar_driver_version[];
/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
#define MAX_TX_QS 0x8
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3c8e4e2efc07..cc7d4f93da54 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -164,10 +164,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, gfar_driver_version,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
}
/* Return the length of the register structure */
@@ -276,35 +272,6 @@ static int gfar_gcoalesce(struct net_device *dev,
cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
cvals->tx_max_coalesced_frames = txcount;
- cvals->use_adaptive_rx_coalesce = 0;
- cvals->use_adaptive_tx_coalesce = 0;
-
- cvals->pkt_rate_low = 0;
- cvals->rx_coalesce_usecs_low = 0;
- cvals->rx_max_coalesced_frames_low = 0;
- cvals->tx_coalesce_usecs_low = 0;
- cvals->tx_max_coalesced_frames_low = 0;
-
- /* When the packet rate is below pkt_rate_high but above
- * pkt_rate_low (both measured in packets per second) the
- * normal {rx,tx}_* coalescing parameters are used.
- */
-
- /* When the packet rate is (measured in packets per second)
- * is above pkt_rate_high, the {rx,tx}_*_high parameters are
- * used.
- */
- cvals->pkt_rate_high = 0;
- cvals->rx_coalesce_usecs_high = 0;
- cvals->rx_max_coalesced_frames_high = 0;
- cvals->tx_coalesce_usecs_high = 0;
- cvals->tx_max_coalesced_frames_high = 0;
-
- /* How often to do adaptive coalescing packet rate sampling,
- * measured in seconds. Must not be zero.
- */
- cvals->rate_sample_interval = 0;
-
return 0;
}
@@ -1507,6 +1474,8 @@ static int gfar_get_ts_info(struct net_device *dev,
}
const struct ethtool_ops gfar_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 0d101c00286f..552e7554a9f8 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -42,6 +42,7 @@
#include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h>
#include <asm/machdep.h>
+#include <net/sch_generic.h>
#include "ucc_geth.h"
@@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Prevent any further xmits, plus detach the device. */
- netif_device_detach(ugeth->ndev);
-
- /* Wait for any current xmits to finish. */
- netif_tx_disable(ugeth->ndev);
+ /* Prevent any further xmits */
+ netif_tx_stop_all_queues(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
disable_irq(ugeth->ug_info->uf_info.irq);
@@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_device_attach(ugeth->ndev);
+
+ /* allow to xmit again */
+ netif_tx_wake_all_queues(ugeth->ndev);
+ __netdev_watchdog_up(ugeth->ndev);
}
/* Called every time the controller might need to be made
@@ -3990,5 +3991,4 @@ module_exit(ucc_geth_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION(DRV_DESC);
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index a86a42131fc7..3fe903972195 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -26,7 +26,6 @@
#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
#define DRV_NAME "ucc_geth"
-#define DRV_VERSION "1.1"
#define NUM_TX_QUEUES 8
#define NUM_RX_QUEUES 8
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index dfebacf443fc..14c08a868190 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -334,8 +334,6 @@ uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 3892a2062404..2fff43509098 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -64,7 +64,7 @@ config HNS_MDIO
the PHY
config HNS
- tristate "Hisilicon Network Subsystem Support (Framework)"
+ tristate
---help---
This selects the framework support for Hisilicon Network Subsystem. It
is needed by any driver which provides HNS acceleration engine or make
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d9718b87279d..12f6c2442a7a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -811,20 +811,6 @@ static int hip04_set_coalesce(struct net_device *netdev,
{
struct hip04_priv *priv = netdev_priv(netdev);
- /* Check not supported parameters */
- if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
- (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
(ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
@@ -845,6 +831,8 @@ static void hip04_get_drvinfo(struct net_device *netdev,
}
static const struct ethtool_ops hip04_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
.get_coalesce = hip04_get_coalesce,
.set_coalesce = hip04_set_coalesce,
.get_drvinfo = hip04_get_drvinfo,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 2721f1f1ab42..0f0e16f9afc0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -92,7 +92,7 @@ struct ppe_common_cb {
u8 comm_index; /*ppe_common index*/
u32 ppe_num;
- struct hns_ppe_cb ppe_cb[0];
+ struct hns_ppe_cb ppe_cb[];
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 3741befb914e..a9f805925699 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -108,7 +108,7 @@ struct rcb_common_cb {
u32 ring_num;
u32 desc_num; /* desc num per queue*/
- struct ring_pair_cb ring_pair_cb[0];
+ struct ring_pair_cb ring_pair_cb[];
};
int hns_rcb_buf_size2type(u32 buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 717fccc2efba..49624acf2473 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1264,6 +1264,11 @@ static int hns_get_rxnfc(struct net_device *netdev,
}
static const struct ethtool_ops hns_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH |
+ ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH,
.get_drvinfo = hns_nic_get_drvinfo,
.get_link = hns_nic_get_link,
.get_ringparam = hns_get_ringparam,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index d87158acdf6f..948e67ef30fd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -7,8 +7,6 @@
#include <linux/mutex.h>
#include <linux/types.h>
-#define HCLGE_MBX_VF_MSG_DATA_NUM 16
-
enum HCLGE_MBX_OPCODE {
HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/
@@ -72,10 +70,15 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
-#define HCLGE_MBX_MAX_MSG_SIZE 16
+#define HCLGE_MBX_MAX_MSG_SIZE 14
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
-#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
-#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
+#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+
+struct hclge_ring_chain_param {
+ u8 ring_type;
+ u8 tqp_index;
+ u8 int_gl_index;
+};
struct hclgevf_mbx_resp_status {
struct mutex mbx_mutex; /* protects against contending sync cmd resp */
@@ -85,6 +88,41 @@ struct hclgevf_mbx_resp_status {
u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
};
+struct hclge_respond_to_vf_msg {
+ int status;
+ u8 data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ u16 len;
+};
+
+struct hclge_vf_to_pf_msg {
+ u8 code;
+ union {
+ struct {
+ u8 subcode;
+ u8 data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ struct {
+ u8 en_bc;
+ u8 en_uc;
+ u8 en_mc;
+ };
+ struct {
+ u8 vector_id;
+ u8 ring_num;
+ struct hclge_ring_chain_param
+ param[HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM];
+ };
+ };
+};
+
+struct hclge_pf_to_vf_msg {
+ u16 code;
+ u16 vf_mbx_msg_code;
+ u16 vf_mbx_msg_subcode;
+ u16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
struct hclge_mbx_vf_to_pf_cmd {
u8 rsv;
u8 mbx_src_vfid; /* Auto filled by IMP */
@@ -92,17 +130,17 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 rsv1[1];
u8 msg_len;
u8 rsv2[3];
- u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+ struct hclge_vf_to_pf_msg msg;
};
-#define HCLGE_MBX_NEED_RESP_BIT BIT(0)
+#define HCLGE_MBX_NEED_RESP_B 0
struct hclge_mbx_pf_to_vf_cmd {
u8 dest_vfid;
u8 rsv[3];
u8 msg_len;
u8 rsv1[3];
- u16 msg[8];
+ struct hclge_pf_to_vf_msg msg;
};
struct hclge_vf_rst_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a3e4081b84ba..5587605d6deb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -78,6 +78,7 @@
enum hns_desc_type {
DESC_TYPE_SKB,
+ DESC_TYPE_FRAGLIST_SKB,
DESC_TYPE_PAGE,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 1d4ffc5f408a..e1d88095a77e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -260,6 +260,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump m7 info\n");
dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
dev_info(&h->pdev->dev, "dump mac tnl status\n");
+ dev_info(&h->pdev->dev, "dump loopback\n");
+ dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a7f40aa1a0ea..da98fd7c8eca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1107,6 +1107,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)priv;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
frag = (skb_frag_t *)priv;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
@@ -1144,8 +1148,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
- desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
- DESC_TYPE_SKB : DESC_TYPE_PAGE;
+ desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB ||
+ type == DESC_TYPE_SKB) && !k) ?
+ type : DESC_TYPE_PAGE;
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
@@ -1354,7 +1359,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
ring_ptr_move_bw(ring, next_to_use);
/* unmap the descriptor dma address */
- if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
+ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
+ ring->desc_cb[ring->next_to_use].type ==
+ DESC_TYPE_FRAGLIST_SKB)
dma_unmap_single(dev,
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
@@ -1447,7 +1454,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
skb_walk_frags(skb, frag_skb) {
- ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
+ ret = hns3_fill_skb_to_desc(ring, frag_skb,
+ DESC_TYPE_FRAGLIST_SKB);
if (unlikely(ret < 0))
goto fill_err;
@@ -2228,7 +2236,7 @@ static void hns3_reset_prepare(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr prepare\n");
+ dev_info(&pdev->dev, "FLR prepare\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
ae_dev->ops->flr_prepare(ae_dev);
}
@@ -2237,7 +2245,7 @@ static void hns3_reset_done(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr done\n");
+ dev_info(&pdev->dev, "FLR done\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
ae_dev->ops->flr_done(ae_dev);
}
@@ -2356,7 +2364,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- if (cb->type == DESC_TYPE_SKB)
+ if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
else if (cb->length)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c03856e63320..28b81f24afa1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -736,7 +736,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (ops->get_media_type)
ops->get_media_type(handle, &media_type, &module_type);
- if (cmd->base.duplex != DUPLEX_FULL &&
+ if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
"only copper port supports half duplex!");
@@ -1390,7 +1390,13 @@ static int hns3_set_fecparam(struct net_device *netdev,
return ops->set_fec(handle, fec_mode);
}
+#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_TX_USECS_HIGH)
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
@@ -1416,6 +1422,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
};
static const struct ethtool_ops hns3_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 67fad80035d3..17228288d4df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -87,7 +87,7 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
entries_per_desc = ARRAY_SIZE(desc[0].data);
index = offset % entries_per_desc;
- return (int)desc[offset / entries_per_desc].data[index];
+ return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -145,10 +145,8 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
+ if (!desc_src)
return;
- }
desc = desc_src;
ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
@@ -179,6 +177,7 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_dbg_bitmap_cmd *bitmap;
+ enum hclge_opcode_type cmd;
int rq_id, pri_id, qset_id;
int port_id, nq_id, pg_id;
struct hclge_desc desc[2];
@@ -193,10 +192,10 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
return;
}
- ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
- HCLGE_OPC_QSET_DFX_STS);
+ cmd = HCLGE_OPC_QSET_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
@@ -204,48 +203,53 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
- ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
+ cmd = HCLGE_OPC_PRI_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
- ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
+ cmd = HCLGE_OPC_PG_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_PORT_DFX_STS);
+ cmd = HCLGE_OPC_PORT_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
+ cmd = HCLGE_OPC_SCH_NQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
+ cmd = HCLGE_OPC_SCH_RQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
+ cmd = HCLGE_OPC_TM_INTERNAL_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
@@ -257,18 +261,18 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_TM_INTERNAL_CNT);
+ cmd = HCLGE_OPC_TM_INTERNAL_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_TM_INTERNAL_STS_1);
+ cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
@@ -277,6 +281,12 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
le32_to_cpu(desc[0].data[4]));
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
le32_to_cpu(desc[0].data[5]));
+ return;
+
+err_dcb_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "failed to dump dcb dfx, cmd = %#x, ret = %d\n",
+ cmd, ret);
}
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
@@ -310,8 +320,9 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
char *false_buf)
{
if (flag)
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- true_buf);
+ dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n",
+ title_buf, index, true_buf,
+ hdev->tm_info.pg_info[0].tc_dwrr[index]);
else
dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
false_buf);
@@ -339,7 +350,8 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump tc\n");
+ dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
+ hdev->tm_info.num_tc);
dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
ets_weight->weight_offset);
@@ -581,7 +593,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_map_cmd_send;
- qset_id = nq_to_qs_map->qset_id & 0x3FF;
+ qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF;
cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
@@ -621,7 +633,8 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
if (ret)
goto err_tm_map_cmd_send;
- qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
+ qset_maping[group_id] =
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
}
dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
@@ -824,6 +837,7 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
struct hclge_mac_ethertype_idx_rd_cmd *req0;
char printf_buf[HCLGE_DBG_BUF_LEN];
struct hclge_desc desc;
+ u32 msg_egress_port;
int ret, i;
dev_info(&hdev->pdev->dev, "mng tab:\n");
@@ -865,20 +879,21 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
"%x |%04x |%x |%04x|%x |%02x |%02x |",
!!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- req0->ethter_type,
+ le16_to_cpu(req0->ethter_type),
!!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
+ le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
!!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
req0->i_port_bitmap, req0->i_port_direction);
+ msg_egress_port = le16_to_cpu(req0->egress_port);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
- "%d |%d |%02d |%04d|%x\n",
- !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- req0->egress_port & HCLGE_DBG_MNG_PF_ID,
- (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- req0->egress_queue,
- !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
+ "%x |%x |%02x |%04x|%x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
dev_info(&hdev->pdev->dev, "%s", printf_buf);
}
@@ -1065,11 +1080,8 @@ static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev,
- "allocate desc for get_m7_stats failed\n");
+ if (!desc_src)
return;
- }
desc_tmp = desc_src;
ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
@@ -1132,7 +1144,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
const char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
-#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
@@ -1156,8 +1168,8 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
while (length > 0) {
data0 = offset;
- if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
- data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
+ if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
+ data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
else
data0 |= length << 16;
ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
@@ -1169,6 +1181,57 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
}
}
+static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ struct hclge_config_mac_mode_cmd *req_app;
+ struct hclge_serdes_lb_cmd *req_serdes;
+ struct hclge_desc desc;
+ u8 loopback_en;
+ int ret;
+
+ req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
+ req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump app loopback status, ret = %d\n", ret);
+ return;
+ }
+
+ loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
+ HCLGE_MAC_APP_LP_B);
+ dev_info(&hdev->pdev->dev, "app loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump serdes loopback status, ret = %d\n",
+ ret);
+ return;
+ }
+
+ loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ loopback_en = req_serdes->enable &
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ if (phydev)
+ dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
+ phydev->loopback_enabled ? "on" : "off");
+}
+
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
@@ -1269,6 +1332,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
#define DUMP_TM_MAP "dump tm map"
+#define DUMP_LOOPBACK "dump loopback"
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1302,6 +1366,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
&cmd_buf[sizeof("dump ncl_config")]);
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
hclge_dbg_dump_mac_tnl_status(hdev);
+ } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
+ strlen(DUMP_LOOPBACK)) == 0) {
+ hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index c85b72dc44d2..50d5ef71756b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1667,9 +1667,6 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
hclge_handle_rocee_ras_error(ae_dev);
}
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- goto out;
-
if (ae_dev->hw_err_reset_req)
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d3b0cd74ecd2..a758f9ae32be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -824,6 +824,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
+#define HCLGE_MAC_ID_MASK 0xF
+
if (!(status->pf_state & HCLGE_PF_STATE_DONE))
return -EINVAL;
@@ -833,6 +835,7 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
else
hdev->flag &= ~HCLGE_FLAG_MAIN;
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
return 0;
}
@@ -3441,7 +3444,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
u32 val;
if (hclge_get_hw_reset_stat(handle)) {
- dev_info(&pdev->dev, "Hardware reset not finish\n");
+ dev_info(&pdev->dev, "hardware reset not finish\n");
dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
@@ -3450,20 +3453,20 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
+ dev_info(&pdev->dev, "global reset requested\n");
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
- dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_FUNC_RESET:
- dev_info(&pdev->dev, "PF Reset requested\n");
+ dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", hdev->reset_type);
+ "unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
@@ -6765,7 +6768,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back;
if (enable) {
- hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ hclge_task_schedule(hdev, 0);
} else {
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
@@ -7353,7 +7356,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
@@ -7398,7 +7400,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -7618,11 +7619,17 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
}
ether_addr_copy(vport->vf_info.mac, mac_addr);
- dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
- return hclge_inform_reset_assert_to_vf(vport);
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+ return hclge_inform_reset_assert_to_vf(vport);
+ }
+
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
+ vf, mac_addr);
+ return 0;
}
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
@@ -8979,6 +8986,12 @@ static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ /* When nic is down, the service task is not running, doesn't update
+ * the port information per second. Query the port information before
+ * return the media type, ensure getting the correct media information.
+ */
+ hclge_update_port_info(hdev);
+
if (media_type)
*media_type = hdev->hw.mac.media_type;
@@ -9109,8 +9122,8 @@ init_nic_err:
static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
struct hclge_vport *vport)
{
- struct hnae3_client *client = vport->roce.client;
struct hclge_dev *hdev = ae_dev->priv;
+ struct hnae3_client *client;
int rst_cnt;
int ret;
@@ -10286,8 +10299,9 @@ static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int data_len_per_desc, data_len, bd_num, i;
+ int data_len_per_desc, bd_num, i;
int bd_num_list[BD_LIST_MAX_NUM];
+ u32 data_len;
int ret;
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
@@ -10666,7 +10680,7 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index f78cbb4cc85e..71df23d5f1b4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -249,6 +249,7 @@ enum HCLGE_MAC_DUPLEX {
#define QUERY_ACTIVE_SPEED 1
struct hclge_mac {
+ u8 mac_id;
u8 phy_addr;
u8 flag;
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 3d850f6b1e37..7f24fcb4f96a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -5,6 +5,11 @@
#include "hclge_mbx.h"
#include "hnae3.h"
+static u16 hclge_errno_to_resp(int errno)
+{
+ return abs(errno);
+}
+
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
* receives a mailbox message from VF.
* @vport: pointer to struct hclge_vport
@@ -14,25 +19,25 @@
*/
static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
- int resp_status,
- u8 *resp_data, u16 resp_data_len)
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
enum hclge_cmd_status status;
struct hclge_desc desc;
+ u16 resp;
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
- if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
"PF fail to gen resp to VF len %u exceeds max len %u\n",
- resp_data_len,
+ resp_msg->len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
- /* If resp_data_len is too long, set the value to max length
+ /* If resp_msg->len is too long, set the value to max length
* and return the msg to VF
*/
- resp_data_len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -40,18 +45,29 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
- resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
- resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
- resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
- resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+ resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
+ resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
+ resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode;
+ resp = hclge_errno_to_resp(resp_msg->status);
+ if (resp < SHRT_MAX) {
+ resp_pf_to_vf->msg.resp_status = resp;
+ } else {
+ dev_warn(&hdev->pdev->dev,
+ "failed to send response to VF, response status %d is out-of-bound\n",
+ resp);
+ resp_pf_to_vf->msg.resp_status = EIO;
+ }
- if (resp_data && resp_data_len > 0)
- memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+ if (resp_msg->len > 0)
+ memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
+ resp_msg->len);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
- "PF failed(=%d) to send response to VF\n", status);
+ "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n",
+ status, vf_to_pf_req->mbx_src_vfid,
+ vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode);
return status;
}
@@ -70,15 +86,15 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
resp_pf_to_vf->dest_vfid = dest_vfid;
resp_pf_to_vf->msg_len = msg_len;
- resp_pf_to_vf->msg[0] = mbx_opcode;
+ resp_pf_to_vf->msg.code = mbx_opcode;
- memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+ memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
- "PF failed(=%d) to send mailbox message to VF\n",
- status);
+ "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n",
+ status, dest_vfid, mbx_opcode);
return status;
}
@@ -138,21 +154,20 @@ static int hclge_get_ring_chain_from_mbx(
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
int ring_num;
- int i;
+ int i = 0;
- ring_num = req->msg[2];
+ ring_num = req->msg.ring_num;
- if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
- HCLGE_MBX_RING_NODE_VARIABLE_NUM))
+ if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
- hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg.param[i].ring_type);
ring_chain->tqp_index =
- hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg.param[i].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- req->msg[5]);
+ HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
cur_chain = ring_chain;
@@ -162,18 +177,15 @@ static int hclge_get_ring_chain_from_mbx(
goto err;
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+ req->msg.param[i].ring_type);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
+ [req->msg.param[i].tqp_index]);
hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+ req->msg.param[i].int_gl_index);
cur_chain->next = new_chain;
cur_chain = new_chain;
@@ -189,7 +201,7 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
struct hclge_mbx_vf_to_pf_cmd *req)
{
struct hnae3_ring_chain_node ring_chain;
- int vector_id = req->msg[1];
+ int vector_id = req->msg.vector_id;
int ret;
memset(&ring_chain, 0, sizeof(ring_chain));
@@ -207,13 +219,9 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
-#define HCLGE_MBX_BC_INDEX 1
-#define HCLGE_MBX_UC_INDEX 2
-#define HCLGE_MBX_MC_INDEX 3
-
- bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
- bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
- bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
+ bool en_bc = req->msg.en_bc ? true : false;
+ bool en_uc = req->msg.en_uc ? true : false;
+ bool en_mc = req->msg.en_mc ? true : false;
int ret;
if (!vport->vf_info.trusted) {
@@ -222,8 +230,6 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
}
ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
- if (req->mbx_need_resp)
- hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
@@ -244,26 +250,25 @@ void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6
+
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
int status;
- if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
- const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)
+ (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]);
/* If VF MAC has been configured by the host then it
* cannot be overridden by the MAC specified by the VM.
*/
if (!is_zero_ether_addr(vport->vf_info.mac) &&
- !ether_addr_equal(mac_addr, vport->vf_info.mac)) {
- status = -EPERM;
- goto out;
- }
+ !ether_addr_equal(mac_addr, vport->vf_info.mac))
+ return -EPERM;
- if (!is_valid_ether_addr(mac_addr)) {
- status = -EINVAL;
- goto out;
- }
+ if (!is_valid_ether_addr(mac_addr))
+ return -EINVAL;
hclge_rm_uc_addr_common(vport, old_addr);
status = hclge_add_uc_addr_common(vport, mac_addr);
@@ -275,12 +280,12 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
}
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
status = hclge_add_uc_addr_common(vport, mac_addr);
if (!status)
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
status = hclge_rm_uc_addr_common(vport, mac_addr);
if (!status)
hclge_rm_vport_mac_table(vport, mac_addr,
@@ -288,33 +293,26 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %u\n",
- mbx_req->msg[1]);
+ mbx_req->msg.subcode);
return -EIO;
}
-out:
- if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
- hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
-
- return 0;
+ return status;
}
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
- u8 resp_len = 0;
- u8 resp_data;
int status;
- if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
status = hclge_add_mc_addr_common(vport, mac_addr);
if (!status)
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_MC);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
if (!status)
hclge_rm_vport_mac_table(vport, mac_addr,
@@ -322,15 +320,11 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %u\n",
- mbx_req->msg[1]);
+ mbx_req->msg.subcode);
return -EIO;
}
- if (gen_resp)
- hclge_gen_resp_to_vf(vport, mbx_req, status,
- &resp_data, resp_len);
-
- return 0;
+ return status;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
@@ -351,12 +345,16 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
}
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_MBX_VLAN_STATE_OFFSET 0
+#define HCLGE_MBX_VLAN_INFO_OFFSET 2
+
struct hclge_vf_vlan_cfg *msg_cmd;
int status = 0;
- msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
@@ -367,38 +365,32 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
- if (mbx_req->mbx_need_resp)
- return hclge_gen_resp_to_vf(vport, mbx_req, status,
- NULL, 0);
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = msg_cmd->is_kill ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
- } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
+ } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
struct hclge_vlan_info *vlan_info;
u16 *state;
- state = (u16 *)&mbx_req->msg[2];
- vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
+ state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET];
+ vlan_info = (struct hclge_vlan_info *)
+ &mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET];
status = hclge_update_port_base_vlan_cfg(vport, *state,
vlan_info);
- } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
- u8 state;
-
- state = vport->port_base_vlan_cfg.state;
- status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
- sizeof(u8));
+ } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
+ resp_msg->data[0] = vport->port_base_vlan_cfg.state;
+ resp_msg->len = sizeof(u8);
}
return status;
}
static int hclge_set_vf_alive(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- bool alive = !!mbx_req->msg[2];
+ bool alive = !!mbx_req->msg.data[0];
int ret = 0;
if (alive)
@@ -409,73 +401,76 @@ static int hclge_set_vf_alive(struct hclge_vport *vport,
return ret;
}
-static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u8 vf_tc_map = 0;
unsigned int i;
- int ret;
for (i = 0; i < kinfo->num_tc; i++)
- vf_tc_map |= BIT(i);
-
- ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
- sizeof(vf_tc_map));
+ resp_msg->data[0] |= BIT(i);
- return ret;
+ resp_msg->len = sizeof(u8);
}
-static int hclge_get_vf_queue_info(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_RSS_INFO_LEN 6
- u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+#define HCLGE_TQPS_ALLOC_OFFSET 0
+#define HCLGE_TQPS_RSS_SIZE_OFFSET 2
+#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4
+
struct hclge_dev *hdev = vport->back;
/* get the queue related info */
- memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
- memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
- memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16));
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_TQPS_RSS_INFO_LEN);
+ memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET],
+ &vport->alloc_tqps, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET],
+ &vport->nic.kinfo.rss_size, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET],
+ &hdev->rx_buf_len, sizeof(u16));
+ resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
}
-static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
- ETH_ALEN);
+ ether_addr_copy(resp_msg->data, vport->vf_info.mac);
+ resp_msg->len = ETH_ALEN;
}
-static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_DEPTH_INFO_LEN 4
- u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN];
+#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0
+#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2
+
struct hclge_dev *hdev = vport->back;
/* get the queue depth info */
- memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16));
- memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16));
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_TQPS_DEPTH_INFO_LEN);
+ memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET],
+ &hdev->num_tx_desc, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET],
+ &hdev->num_rx_desc, sizeof(u16));
+ resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
}
-static int hclge_get_vf_media_type(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_VF_MEDIA_TYPE_OFFSET 0
+#define HCLGE_VF_MODULE_TYPE_OFFSET 1
+#define HCLGE_VF_MEDIA_TYPE_LENGTH 2
+
struct hclge_dev *hdev = vport->back;
- u8 resp_data[2];
- resp_data[0] = hdev->hw.mac.media_type;
- resp_data[1] = hdev->hw.mac.module_type;
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- sizeof(resp_data));
+ resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] =
+ hdev->hw.mac.media_type;
+ resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] =
+ hdev->hw.mac.module_type;
+ resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH;
}
static int hclge_get_link_info(struct hclge_vport *vport,
@@ -529,7 +524,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
advertising = hdev->hw.mac.advertising[0];
supported = hdev->hw.mac.supported[0];
dest_vfid = mbx_req->mbx_src_vfid;
- msg_data[0] = mbx_req->msg[2];
+ msg_data[0] = mbx_req->msg.data[0];
send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
@@ -543,29 +538,22 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
{
u16 queue_id;
- memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+ memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
hclge_reset_vf_queue(vport, queue_id);
-
- /* send response msg to VF after queue reset complete */
- hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
-static void hclge_reset_vf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static int hclge_reset_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
vport->vport_id);
- ret = hclge_func_reset_cmd(hdev, vport->vport_id);
- hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+ return hclge_func_reset_cmd(hdev, vport->vport_id);
}
-static void hclge_vf_keep_alive(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_vf_keep_alive(struct hclge_vport *vport)
{
vport->last_active_jiffies = jiffies;
}
@@ -573,45 +561,39 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport,
static int hclge_set_vf_mtu(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- int ret;
u32 mtu;
- memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
- ret = hclge_set_vport_mtu(vport, mtu);
+ memcpy(&mtu, mbx_req->msg.data, sizeof(mtu));
- return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+ return hclge_set_vport_mtu(vport, mtu);
}
-static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
u16 queue_id, qid_in_pf;
- u8 resp_data[2];
- memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+ memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
- memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- sizeof(resp_data));
+ memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
+ resp_msg->len = sizeof(qid_in_pf);
}
-static int hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
- u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
struct hclge_dev *hdev = vport->back;
u8 index;
- index = mbx_req->msg[2];
+ index = mbx_req->msg.data[0];
- memcpy(&resp_data[0],
+ memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_RSS_MBX_RESP_LEN);
+ resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -634,13 +616,10 @@ static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
static void hclge_handle_link_change_event(struct hclge_dev *hdev,
struct hclge_mbx_vf_to_pf_cmd *req)
{
-#define LINK_STATUS_OFFSET 1
-#define LINK_FAIL_CODE_OFFSET 2
-
hclge_task_schedule(hdev, 0);
- if (!req->msg[LINK_STATUS_OFFSET])
- hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
+ if (!req->msg.subcode)
+ hclge_link_fail_parse(hdev, req->msg.data[0]);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -662,12 +641,14 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
unsigned int flag;
- int ret;
+ int ret = 0;
+ memset(&resp_msg, 0, sizeof(resp_msg));
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -683,7 +664,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
- req->msg[0]);
+ req->msg.code);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
@@ -693,7 +674,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
vport = &hdev->vport[req->mbx_src_vfid];
- switch (req->msg[0]) {
+ switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
req);
@@ -717,47 +698,34 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+ ret = hclge_set_vf_mc_mac_addr(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to set VF MC MAC Addr\n",
ret);
break;
case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req);
+ ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to config VF's VLAN\n",
ret);
break;
case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req, false);
+ ret = hclge_set_vf_alive(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to set VF's ALIVE\n",
ret);
break;
case HCLGE_MBX_GET_QINFO:
- ret = hclge_get_vf_queue_info(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get Q info for VF\n",
- ret);
+ hclge_get_vf_queue_info(vport, &resp_msg);
break;
case HCLGE_MBX_GET_QDEPTH:
- ret = hclge_get_vf_queue_depth(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get Q depth for VF\n",
- ret);
+ hclge_get_vf_queue_depth(vport, &resp_msg);
break;
-
case HCLGE_MBX_GET_TCINFO:
- ret = hclge_get_vf_tcinfo(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get TC info for VF\n",
- ret);
+ hclge_get_vf_tcinfo(vport, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_STATUS:
ret = hclge_get_link_info(vport, req);
@@ -770,10 +738,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_mbx_reset_vf_queue(vport, req);
break;
case HCLGE_MBX_RESET:
- hclge_reset_vf(vport, req);
+ ret = hclge_reset_vf(vport);
break;
case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport, req);
+ hclge_vf_keep_alive(vport);
break;
case HCLGE_MBX_SET_MTU:
ret = hclge_set_vf_mtu(vport, req);
@@ -782,18 +750,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"VF fail(%d) to set mtu\n", ret);
break;
case HCLGE_MBX_GET_QID_IN_PF:
- ret = hclge_get_queue_id_in_pf(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get qid for VF\n",
- ret);
+ hclge_get_queue_id_in_pf(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_RSS_KEY:
- ret = hclge_get_rss_key(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get rss key for VF\n",
- ret);
+ hclge_get_rss_key(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req);
@@ -807,21 +767,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_rm_vport_all_vlan_table(vport, true);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
- ret = hclge_get_vf_media_type(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to media type for VF\n",
- ret);
+ hclge_get_vf_media_type(vport, &resp_msg);
break;
case HCLGE_MBX_PUSH_LINK_STATUS:
hclge_handle_link_change_event(hdev, req);
break;
case HCLGE_MBX_GET_MAC_ADDR:
- ret = hclge_get_vf_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get MAC for VF\n",
- ret);
+ hclge_get_vf_mac_addr(vport, &resp_msg);
break;
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
@@ -829,11 +781,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
- req->msg[0]);
+ req->msg.code);
break;
}
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ resp_msg.status = ret;
+ hclge_gen_resp_to_vf(vport, req, &resp_msg);
+ }
+
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
+
+ /* reinitialize ret after complete the mbx message processing */
+ ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 0510d85a7f6a..e02d427131ee 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -229,13 +229,25 @@ static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
hclgevf_tqps_get_stats(handle, data);
}
+static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
+ u8 subcode)
+{
+ if (msg) {
+ memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
+ msg->code = code;
+ msg->subcode = subcode;
+ }
+}
+
static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg;
int status;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
- true, &resp_msg, sizeof(resp_msg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ sizeof(resp_msg));
if (status) {
dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d",
@@ -251,12 +263,14 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg;
int ret;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
- NULL, 0, true, &resp_msg, sizeof(u8));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ sizeof(u8));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get port based vlan state failed %d",
@@ -272,11 +286,16 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
+#define HCLGEVF_TQPS_ALLOC_OFFSET 0
+#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
+#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
+
u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
int status;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
- true, resp_msg,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
HCLGEVF_TQPS_RSS_INFO_LEN);
if (status) {
dev_err(&hdev->pdev->dev,
@@ -285,9 +304,12 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
return status;
}
- memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
- memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
- memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16));
+ memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
+ sizeof(u16));
return 0;
}
@@ -295,11 +317,15 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
+#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
+#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
+
u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0,
- true, resp_msg,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
HCLGEVF_TQPS_DEPTH_INFO_LEN);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -308,8 +334,10 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
return ret;
}
- memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16));
- memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16));
+ memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
+ sizeof(u16));
return 0;
}
@@ -317,14 +345,14 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data[2], resp_data[2];
+ struct hclge_vf_to_pf_msg send_msg;
u16 qid_in_pf = 0;
+ u8 resp_data[2];
int ret;
- memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
- sizeof(msg_data), true, resp_data,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
sizeof(resp_data));
if (!ret)
qid_in_pf = *(u16 *)resp_data;
@@ -334,11 +362,13 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg[2];
int ret;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
- true, resp_msg, sizeof(resp_msg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ sizeof(resp_msg));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get the pf port media type failed %d",
@@ -425,11 +455,11 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
int status;
- u8 resp_msg;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
- 0, false, &resp_msg, sizeof(resp_msg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (status)
dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status);
@@ -463,19 +493,16 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_ADVERTISING 0
-#define HCLGEVF_SUPPORTED 1
- u8 send_msg;
- u8 resp_msg;
+#define HCLGEVF_ADVERTISING 0
+#define HCLGEVF_SUPPORTED 1
- send_msg = HCLGEVF_ADVERTISING;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
- &send_msg, sizeof(send_msg), false,
- &resp_msg, sizeof(resp_msg));
- send_msg = HCLGEVF_SUPPORTED;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
- &send_msg, sizeof(send_msg), false,
- &resp_msg, sizeof(resp_msg));
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
+ send_msg.data[0] = HCLGEVF_ADVERTISING;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ send_msg.data[0] = HCLGEVF_SUPPORTED;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
@@ -677,19 +704,19 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RSS_MBX_RESP_LEN 8
-
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
u16 msg_num, hash_key_index;
u8 index;
int ret;
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
HCLGEVF_RSS_MBX_RESP_LEN;
for (index = 0; index < msg_num; index++) {
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0,
- &index, sizeof(index),
- true, resp_msg,
+ send_msg.data[0] = index;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
HCLGEVF_RSS_MBX_RESP_LEN);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -1001,44 +1028,32 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
struct hnae3_ring_chain_node *node;
- struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclgevf_desc desc;
- int i = 0;
int status;
- u8 type;
+ int i = 0;
- req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- type = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ send_msg.vector_id = vector_id;
for (node = ring_chain; node; node = node->next) {
- int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
- HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
-
- if (i == 0) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_MBX_VF_TO_PF,
- false);
- req->msg[0] = type;
- req->msg[1] = vector_id;
- }
-
- req->msg[idx_offset] =
+ send_msg.param[i].ring_type =
hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
- req->msg[idx_offset + 1] = node->tqp_index;
- req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S);
+
+ send_msg.param[i].tqp_index = node->tqp_index;
+ send_msg.param[i].int_gl_index =
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
i++;
- if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
- HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
- !node->next) {
- req->msg[2] = i;
+ if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
+ send_msg.ring_num = i;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
+ NULL, 0);
if (status) {
dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n",
@@ -1046,11 +1061,6 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
return status;
}
i = 0;
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_MBX_VF_TO_PF,
- false);
- req->msg[0] = type;
- req->msg[1] = vector_id;
}
}
@@ -1123,18 +1133,17 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
bool en_uc_pmc, bool en_mc_pmc,
bool en_bc_pmc)
{
- struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
- req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
- req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
- req->msg[1] = en_bc_pmc ? 1 : 0;
- req->msg[2] = en_uc_pmc ? 1 : 0;
- req->msg[3] = en_mc_pmc ? 1 : 0;
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
+ send_msg.en_bc = en_bc_pmc ? 1 : 0;
+ send_msg.en_uc = en_uc_pmc ? 1 : 0;
+ send_msg.en_mc = en_mc_pmc ? 1 : 0;
+
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret);
@@ -1193,11 +1202,13 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
{
+ struct hclge_vf_to_pf_msg send_msg;
u8 host_mac[ETH_ALEN];
int status;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0,
- true, host_mac, ETH_ALEN);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
+ ETH_ALEN);
if (status) {
dev_err(&hdev->pdev->dev,
"fail to get VF MAC from host %d", status);
@@ -1229,20 +1240,16 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ struct hclge_vf_to_pf_msg send_msg;
u8 *new_mac_addr = (u8 *)p;
- u8 msg_data[ETH_ALEN * 2];
- u16 subcode;
int status;
- ether_addr_copy(msg_data, new_mac_addr);
- ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
-
- subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
+ send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
HCLGE_MBX_MAC_VLAN_UC_MODIFY;
-
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- subcode, msg_data, sizeof(msg_data),
- true, NULL, 0);
+ ether_addr_copy(send_msg.data, new_mac_addr);
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
@@ -1253,49 +1260,60 @@ static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_ADD,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_ADD);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_REMOVE,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_ADD,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_ADD);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_REMOVE,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
{
-#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
+#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
+#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
+
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
if (vlan_id > HCLGEVF_MAX_VLAN_ID)
@@ -1313,16 +1331,18 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
return -EBUSY;
}
- msg_data[0] = is_kill;
- memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
- memcpy(&msg_data[3], &proto, sizeof(proto));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER);
+ send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
+ sizeof(vlan_id));
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
+ sizeof(proto));
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (is_kill && ret)
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
@@ -1355,37 +1375,38 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data;
+ struct hclge_vf_to_pf_msg send_msg;
- msg_data = enable ? 1 : 0;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
- 1, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_RX_OFF_CFG);
+ send_msg.data[0] = enable ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data[2];
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
- memcpy(msg_data, &queue_id, sizeof(queue_id));
-
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
if (ret)
return ret;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
- sizeof(msg_data), true, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
}
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
- sizeof(new_mtu), true, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
+ memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
}
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
@@ -1500,11 +1521,12 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_SYNC_TIME 100
+ struct hclge_vf_to_pf_msg send_msg;
int ret = 0;
if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
- 0, true, NULL, sizeof(u8));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
hdev->rst_stats.vf_func_rst_cnt++;
}
@@ -1881,14 +1903,14 @@ static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
{
- u8 respmsg;
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
- 0, false, &respmsg, sizeof(respmsg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (ret)
dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret);
@@ -2002,7 +2024,10 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_MBX;
}
- dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+ /* print other vector0 event source */
+ dev_info(&hdev->pdev->dev,
+ "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
+ cmdq_stat_reg);
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
@@ -2124,50 +2149,51 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
return ret;
}
-static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int ret;
+ struct hclgevf_rss_tuple_cfg *tuple_sets;
u32 i;
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
-
+ tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->pdev->revision >= 0x21) {
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
+ tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ }
+
+ /* Initialize RSS indirect table */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret;
+
+ if (hdev->pdev->revision >= 0x21) {
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key);
if (ret)
return ret;
- rss_cfg->rss_tuple_sets.ipv4_tcp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv4_udp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv4_sctp_en =
- HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- rss_cfg->rss_tuple_sets.ipv4_fragment_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv6_tcp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv6_udp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv6_sctp_en =
- HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- rss_cfg->rss_tuple_sets.ipv6_fragment_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
-
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
if (ret)
return ret;
}
- /* Initialize RSS indirect table */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
- rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
-
ret = hclgevf_set_rss_indir_table(hdev);
if (ret)
return ret;
@@ -2242,12 +2268,16 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
+#define HCLGEVF_STATE_ALIVE 1
+#define HCLGEVF_STATE_NOT_ALIVE 0
+
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data;
+ struct hclge_vf_to_pf_msg send_msg;
- msg_data = alive ? 1 : 0;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
- 0, &msg_data, 1, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
+ send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
+ HCLGEVF_STATE_NOT_ALIVE;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_client_start(struct hnae3_handle *handle)
@@ -2764,6 +2794,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
+ hclgevf_rss_init_cfg(hdev);
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -2801,10 +2832,12 @@ err_cmd_queue_init:
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
+
hclgevf_state_uninit(hdev);
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0,
- false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
@@ -2936,6 +2969,8 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_indir[i] = i % kinfo->rss_size;
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
+
ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
if (ret)
dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
@@ -3101,16 +3136,17 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size)
{
struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock();
/* send msg to PF and wait update port based vlan info */
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_PORT_BASE_VLAN_CFG,
- port_base_vlan_info, data_size,
- false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_PORT_BASE_VLAN_CFG);
+ memcpy(send_msg.data, port_base_vlan_info, data_size);
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
@@ -3188,7 +3224,7 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index fee8d97f323c..3b88d866facc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -305,8 +305,8 @@ static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
return !!hdev->reset_pending;
}
-int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
- const u8 *msg_data, u8 msg_len, bool need_resp,
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
u8 *resp_data, u16 resp_len);
void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 7cbd715d5e7a..9b8154955f91 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -5,6 +5,11 @@
#include "hclgevf_main.h"
#include "hnae3.h"
+static int hclgevf_resp_to_errno(u16 resp_code)
+{
+ return resp_code ? -resp_code : 0;
+}
+
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
@@ -79,8 +84,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
return 0;
}
-int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
- const u8 *msg_data, u8 msg_len, bool need_resp,
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
u8 *resp_data, u16 resp_len)
{
struct hclge_mbx_vf_to_pf_cmd *req;
@@ -89,21 +94,17 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- /* first two bytes are reserved for code & subcode */
- if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+ if (!send_msg) {
dev_err(&hdev->pdev->dev,
- "VF send mbx msg fail, msg len %d exceeds max len %d\n",
- msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ "failed to send mbx, msg is NULL\n");
return -EINVAL;
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
- req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
- ~HCLGE_MBX_NEED_RESP_BIT;
- req->msg[0] = code;
- req->msg[1] = subcode;
- if (msg_data)
- memcpy(&req->msg[2], msg_data, msg_len);
+ if (need_resp)
+ hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1);
+
+ memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
/* synchronous send */
if (need_resp) {
@@ -118,7 +119,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
return status;
}
- status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+ status = hclgevf_get_mbx_resp(hdev, send_msg->code,
+ send_msg->subcode, resp_data,
resp_len);
mutex_unlock(&hdev->mbx_resp.mbx_mutex);
} else {
@@ -169,7 +171,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
- req->msg[0]);
+ req->msg.code);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
@@ -183,19 +185,21 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
* timeout and simultaneously queue the async messages for later
* prcessing in context of mailbox task i.e. the slow path.
*/
- switch (req->msg[0]) {
+ switch (req->msg.code) {
case HCLGE_MBX_PF_VF_RESP:
if (resp->received_resp)
dev_warn(&hdev->pdev->dev,
"VF mbx resp flag not clear(%u)\n",
- req->msg[1]);
+ req->msg.vf_mbx_msg_code);
resp->received_resp = true;
- resp->origin_mbx_msg = (req->msg[1] << 16);
- resp->origin_mbx_msg |= req->msg[2];
- resp->resp_status = req->msg[3];
+ resp->origin_mbx_msg =
+ (req->msg.vf_mbx_msg_code << 16);
+ resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
+ resp->resp_status =
+ hclgevf_resp_to_errno(req->msg.resp_status);
- temp = (u8 *)&req->msg[4];
+ temp = (u8 *)req->msg.resp_data;
for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
resp->additional_info[i] = *temp;
temp++;
@@ -220,13 +224,13 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%u)\n",
- req->msg[1]);
+ req->msg.code);
break;
}
/* tail the async message in arq */
msg_q = hdev->arq.msg_q[hdev->arq.tail];
- memcpy(&msg_q[0], req->msg,
+ memcpy(&msg_q[0], &req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
atomic_inc(&hdev->arq.count);
@@ -237,7 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
default:
dev_err(&hdev->pdev->dev,
"VF received unsupported(%u) mbx msg from PF\n",
- req->msg[0]);
+ req->msg.code);
break;
}
crq->desc[crq->next_to_use].flag = 0;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 8995e32dd1c0..992908e6eebf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -45,6 +45,8 @@
#define MGMT_MSG_TIMEOUT 5000
+#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
+
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
u8 *buf_in, u16 in_size,
u8 *buf_out, u16 *out_size,
enum mgmt_direction_type direction,
- u16 resp_msg_id)
+ u16 resp_msg_id, u32 timeout)
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_recv_msg *recv_msg;
struct completion *recv_done;
+ unsigned long timeo;
u16 msg_id;
int err;
@@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
- if (!wait_for_completion_timeout(recv_done,
- msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+ if (!wait_for_completion_timeout(recv_done, timeo)) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT;
goto unlock_sync_msg;
@@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
+ u32 timeout = 0;
if (sync != HINIC_MGMT_MSG_SYNC) {
dev_err(&pdev->dev, "Invalid MGMT msg type\n");
@@ -353,9 +358,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL;
}
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
- MSG_NOT_RESP);
+ MSG_NOT_RESP, timeout);
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 13560975c103..63b92f6cc856 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -483,7 +483,6 @@ static int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
- int err;
down(&nic_dev->mgmt_lock);
@@ -497,20 +496,9 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock);
- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set func port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
- return err;
- }
+ hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
- err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
- return err;
- }
+ hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
if (nic_dev->flags & HINIC_RSS_ENABLE) {
hinic_rss_deinit(nic_dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b7fc17756c51..06248a7db7f2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -872,7 +872,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
{
struct emac_regs __iomem *p = dev->emacp;
u32 r = 0;
- int n, err = -ETIMEDOUT;
+ int n;
mutex_lock(&dev->mdio_lock);
@@ -919,7 +919,6 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
goto bail;
}
}
- err = 0;
bail:
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 84121aab7ff1..96d36ae5049e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -712,29 +712,36 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
-static int netdev_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
+static int ibmveth_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL;
- cmd->base.port = PORT_FIBRE;
- cmd->base.phy_address = 0;
- cmd->base.autoneg = AUTONEG_ENABLE;
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &adapter->speed,
+ &adapter->duplex);
+}
+
+static int ibmveth_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ cmd->base.speed = adapter->speed;
+ cmd->base.duplex = adapter->duplex;
+ cmd->base.port = PORT_OTHER;
return 0;
}
+static void ibmveth_init_link_settings(struct net_device *dev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ adapter->speed = SPEED_1000;
+ adapter->duplex = DUPLEX_FULL;
+}
+
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -965,12 +972,13 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_strings = ibmveth_get_strings,
- .get_sset_count = ibmveth_get_sset_count,
- .get_ethtool_stats = ibmveth_get_ethtool_stats,
- .get_link_ksettings = netdev_get_link_ksettings,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ibmveth_get_strings,
+ .get_sset_count = ibmveth_get_sset_count,
+ .get_ethtool_stats = ibmveth_get_ethtool_stats,
+ .get_link_ksettings = ibmveth_get_link_ksettings,
+ .set_link_ksettings = ibmveth_set_link_ksettings,
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -978,8 +986,6 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
-
static int ibmveth_send(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc *descs, unsigned long mss)
{
@@ -1674,6 +1680,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->netdev = netdev;
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
adapter->pool_config = 0;
+ ibmveth_init_link_settings(netdev);
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 4e9bf3421f4f..27dfff200166 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -162,6 +162,9 @@ struct ibmveth_adapter {
u64 tx_send_failed;
u64 tx_large_packets;
u64 rx_large_packets;
+ /* Ethtool settings */
+ u8 duplex;
+ u32 speed;
};
/*
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4bd33245bad6..197dc5b2c090 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2189,7 +2189,8 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- } else {
+ } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
+ adapter->from_passive_init)) {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@@ -4677,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
break;
}
- dev_info(dev, "Partner protocol version is %d\n",
- crq->version_exchange_rsp.version);
- if (be16_to_cpu(crq->version_exchange_rsp.version) <
- ibmvnic_version)
- ibmvnic_version =
+ ibmvnic_version =
be16_to_cpu(crq->version_exchange_rsp.version);
+ dev_info(dev, "Partner protocol version is %d\n",
+ ibmvnic_version);
send_cap_queries(adapter);
break;
case QUERY_CAPABILITY_RSP:
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 154e2e818ec6..ad34e4335df2 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -294,6 +294,7 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
+ select NET_DEVLINK
---help---
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index be56e631d693..6f45df5690d4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1852,6 +1852,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops e1000_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 2bced34c19ba..0d51cbc88028 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1476,7 +1476,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
- return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
+ return ((begin ^ (end - 1)) >> 16) == 0;
}
return true;
@@ -2715,11 +2715,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index adce7e319b9e..1d47e2503072 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -897,6 +897,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
/* fall through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
mask |= BIT(18);
break;
default:
@@ -1561,6 +1562,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
@@ -2305,6 +2307,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops e1000_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f556163481cb..b1447221669e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -97,6 +97,11 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
+#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5
+#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E
+#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
+#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
+#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
#define E1000_REVISION_4 4
@@ -121,6 +126,7 @@ enum e1000_mac_type {
e1000_pch_spt,
e1000_pch_cnp,
e1000_pch_tgp,
+ e1000_pch_adp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b4135c50e905..735bf25952fc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -317,6 +317,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -460,6 +461,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -703,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1642,6 +1645,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2095,6 +2099,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3133,6 +3138,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4077,6 +4083,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e531976f8a67..51512a73fdd0 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1363,7 +1363,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- usleep_range(50, 100);
+ udelay(100);
i++;
}
@@ -1381,7 +1381,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- usleep_range(50, 100);
+ udelay(100);
}
if (i == timeout) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index db4ea58bac82..177c6da80c57 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3280,10 +3280,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Some CPU C-states have been disabled in order to enable jumbo frames\n");
- pm_qos_update_request(&adapter->pm_qos_req, lat);
+ cpu_latency_qos_update_request(&adapter->pm_qos_req, lat);
} else {
- pm_qos_update_request(&adapter->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&adapter->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
/* Enable Receives */
@@ -3536,6 +3536,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
break;
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3807,7 +3808,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
tdt = er32(TDT(0));
BUG_ON(tdt != tx_ring->next_to_use);
tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
- tx_desc->buffer_addr = tx_ring->dma;
+ tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
tx_desc->lower.data = cpu_to_le32(txd_lower | size);
tx_desc->upper.data = 0;
@@ -4049,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -4636,8 +4638,7 @@ int e1000e_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
- pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -4679,7 +4680,7 @@ int e1000e_open(struct net_device *netdev)
return 0;
err_req_irq:
- pm_qos_remove_request(&adapter->pm_qos_req);
+ cpu_latency_qos_remove_request(&adapter->pm_qos_req);
e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -4743,7 +4744,7 @@ int e1000e_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- pm_qos_remove_request(&adapter->pm_qos_req);
+ cpu_latency_qos_remove_request(&adapter->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -5462,10 +5463,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@@ -7760,6 +7758,11 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index eaa5a0fb99f0..439fda2f5368 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index f306084ca12c..5b78362b82ac 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -41,7 +41,7 @@ struct fm10k_l2_accel {
u16 count;
u16 dglort;
struct rcu_head rcu;
- struct net_device *macvlan[0];
+ struct net_device *macvlan[];
};
enum fm10k_ring_state_t {
@@ -198,7 +198,7 @@ struct fm10k_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
- struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
};
enum fm10k_ring_f_enum {
@@ -218,7 +218,7 @@ struct fm10k_iov_data {
unsigned int num_vfs;
unsigned int next_vf_mbx;
struct rcu_head rcu;
- struct fm10k_vf_info vf_info[0];
+ struct fm10k_vf_info vf_info[];
};
struct fm10k_udp_port {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 68edf55ac906..37fbc646deb9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1151,6 +1151,8 @@ static int fm10k_set_channels(struct net_device *dev,
}
static const struct ethtool_ops fm10k_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
.get_ethtool_stats = fm10k_get_ethtool_stats,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4833187bd259..e95b8da45e07 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -334,13 +334,13 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
struct i40e_ddp_profile_list {
u32 p_count;
- struct i40e_profile_info p_info[0];
+ struct i40e_profile_info p_info[];
};
struct i40e_ddp_old_profile_list {
struct list_head list;
size_t old_ddp_size;
- u8 old_ddp_buf[0];
+ u8 old_ddp_buf[];
};
/* macros related to FLX_PIT */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 317f3f1458db..aa8026b1eb81 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5249,6 +5249,11 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
};
static const struct ethtool_ops i40e_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_RX_USECS_HIGH |
+ ETHTOOL_COALESCE_TX_USECS_HIGH,
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
.get_regs = i40e_get_regs,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 8c3e753bfb9d..2a037ec244b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1611,7 +1611,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
}
}
if (lut) {
- bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+ bool pf_lut = vsi->type == I40E_VSI_MAIN;
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
@@ -11436,7 +11436,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
}
if (lut) {
- bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+ bool pf_lut = vsi->type == I40E_VSI_MAIN;
ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index bd1b1ed323f4..bcd11b4b29df 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -81,7 +81,7 @@ struct iavf_vsi {
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
-#define IAVF_MAX_REQ_QUEUES 4
+#define IAVF_MAX_REQ_QUEUES 16
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 84c3d8d97ef6..2c39d46b6138 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = IAVF_MAX_REQ_QUEUES;
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
@@ -881,14 +881,7 @@ static int iavf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- int num_req = ch->combined_count;
-
- if (num_req != adapter->num_active_queues &&
- !(adapter->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
- dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
- return -EINVAL;
- }
+ u32 num_req = ch->combined_count;
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) {
@@ -899,14 +892,19 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
- if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES)
+ if (num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL;
+ if (num_req == adapter->num_active_queues)
+ return 0;
+
if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
return -EINVAL;
adapter->num_req_queues = num_req;
- return iavf_request_queues(adapter, num_req);
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ iavf_schedule_reset(adapter);
+ return 0;
}
/**
@@ -998,6 +996,10 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops iavf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 62fe56ddcb6e..2050649848ba 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3061,9 +3061,6 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return iavf_configure_clsflower(adapter, cls_flower);
@@ -3087,6 +3084,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ struct iavf_adapter *adapter = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
@@ -3448,7 +3450,7 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
if (num_req_queues &&
- num_req_queues != adapter->vsi_res->num_queue_pairs) {
+ num_req_queues > adapter->vsi_res->num_queue_pairs) {
/* Problem. The PF gave us fewer queues than what we had
* negotiated in our request. Need a reset to see if we can't
* get back to a working state.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 1ab9cb339acb..d58374c2c33d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -397,33 +397,6 @@ void iavf_map_queues(struct iavf_adapter *adapter)
}
/**
- * iavf_request_queues
- * @adapter: adapter structure
- * @num: number of requested queues
- *
- * We get a default number of queues from the PF. This enables us to request a
- * different number. Returns 0 on success, negative on failure
- **/
-int iavf_request_queues(struct iavf_adapter *adapter, int num)
-{
- struct virtchnl_vf_res_request vfres;
-
- if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
- /* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
- adapter->current_op);
- return -EBUSY;
- }
-
- vfres.num_queue_pairs = min_t(int, num, num_online_cpus());
-
- adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
- return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
- (u8 *)&vfres, sizeof(vfres));
-}
-
-/**
* iavf_add_ether_addrs
* @adapter: adapter structure
*
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 59544b0fc086..29c6c6743450 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -19,6 +19,7 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_devlink.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cb10abb14e11..5c11448bfbb3 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,6 +34,7 @@
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
+#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
#include "ice_devids.h"
@@ -60,7 +61,6 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
@@ -70,7 +70,6 @@ extern const char ice_drv_ver[];
#define ICE_Q_WAIT_RETRY_LIMIT 10
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256
-#define ICE_MAX_SMALL_RSS_QS 8
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
@@ -212,6 +211,8 @@ enum ice_state {
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
+ __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
+ __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
__ICE_STATE_NBITS /* must be last */
};
@@ -340,12 +341,18 @@ enum ice_pf_flags {
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
+ ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
struct ice_pf {
struct pci_dev *pdev;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+
+ struct devlink_region *nvm_region;
+
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
@@ -361,8 +368,10 @@ struct ice_pf {
struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
- u16 num_vf_qps; /* num queue pairs per VF */
- u16 num_vf_msix; /* num vectors per VF */
+ u16 num_qps_per_vf;
+ u16 num_msix_per_vf;
+ /* used to ratelimit the MDD event logging */
+ unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6873998cf145..2381b4014ed6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1232,6 +1232,7 @@ struct ice_aqc_sff_eeprom {
* NVM Update commands (indirect 0x0703)
*/
struct ice_aqc_nvm {
+#define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF
__le16 offset_low;
u8 offset_high;
u8 cmd_flags;
@@ -1250,6 +1251,8 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+#define ICE_AQC_NVM_START_POINT 0
+
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
@@ -1661,6 +1664,13 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[1];
};
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct ice_aqc_event_lan_overflow {
+ __le32 prtdcb_ruptq;
+ __le32 qtx_ctl;
+ u8 reserved[8];
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1730,6 +1740,7 @@ struct ice_aq_desc {
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
+ struct ice_aqc_event_lan_overflow lan_overflow;
} params;
};
@@ -1756,6 +1767,7 @@ enum ice_aq_err {
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
+ ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
@@ -1860,6 +1872,9 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ /* Standalone Commands/Events */
+ ice_aqc_opc_event_lan_overflow = 0x1001,
+
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
ice_aqc_opc_fw_logging_info = 0xFF10,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 81885efadc7a..a19cd6f5436b 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
*/
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
{
- WARN_ONCE(ice_ring_is_xdp(ring) && tc,
- "XDP ring can't belong to TC other than 0");
+ WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
@@ -247,7 +246,6 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
switch (vsi->type) {
case ICE_VSI_LB:
- /* fall through */
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -387,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
if (vsi->type != ICE_VSI_VF) {
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
@@ -399,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ } else {
+ regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
+ QRXFLXP_CNTXT_RXDID_PRIO_M |
+ QRXFLXP_CNTXT_TS_M);
}
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -459,17 +461,20 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
}
/**
- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
+ * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
* @vsi: the VSI being configured
- * @ena: start or stop the Rx rings
- * @rxq_idx: Rx queue index
+ * @ena: start or stop the Rx ring
+ * @rxq_idx: 0-based Rx queue index for the VSI passed in
+ * @wait: wait or don't wait for configuration to finish in hardware
+ *
+ * Return 0 on success and negative on error.
*/
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+int
+ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
{
int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int ret = 0;
u32 rx_reg;
rx_reg = rd32(hw, QRX_CTRL(pf_q));
@@ -485,13 +490,30 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
rx_reg &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), rx_reg);
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret)
- dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
+ if (!wait)
+ return 0;
+
+ ice_flush(hw);
+ return ice_pf_rxq_wait(pf, pf_q, ena);
+}
- return ret;
+/**
+ * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
+ * @vsi: the VSI being configured
+ * @ena: true/false to verify Rx ring has been enabled/disabled respectively
+ * @rxq_idx: 0-based Rx queue index for the VSI passed in
+ *
+ * This routine will wait for the given Rx queue of the VSI to reach the
+ * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
+ * the requested state after multiple retries; else will return 0 in case of
+ * success.
+ */
+int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+{
+ int pf_q = vsi->rxq_map[rxq_idx];
+ struct ice_pf *pf = vsi->back;
+
+ return ice_pf_rxq_wait(pf, pf_q, ena);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 407995e8e944..44efdb627043 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -8,7 +8,9 @@
int ice_setup_rx_ctx(struct ice_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int
+ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
+int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 04d5db0a25bf..2c0d8fd3d5cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,7 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
-#define ICE_PF_RESET_WAIT_COUNT 200
+#define ICE_PF_RESET_WAIT_COUNT 300
/**
* ice_set_mac_type - Sets MAC type
@@ -615,29 +615,6 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
- * ice_get_nvm_version - get cached NVM version data
- * @hw: pointer to the hardware structure
- * @oem_ver: 8 bit NVM version
- * @oem_build: 16 bit NVM build number
- * @oem_patch: 8 NVM patch number
- * @ver_hi: high 16 bits of the NVM version
- * @ver_lo: low 16 bits of the NVM version
- */
-void
-ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
- u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
-{
- struct ice_nvm_info *nvm = &hw->nvm;
-
- *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
- *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
- *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
- ICE_OEM_VER_BUILD_SHIFT);
- *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
-}
-
-/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -958,72 +935,6 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
- * @hw: pointer to hardware structure
- * @module_tlv: pointer to module TLV to return
- * @module_tlv_len: pointer to module TLV length to return
- * @module_type: module type requested
- *
- * Finds the requested sub module TLV type from the Preserved Field
- * Area (PFA) and returns the TLV pointer and length. The caller can
- * use these to read the variable length TLV value.
- */
-enum ice_status
-ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
- u16 module_type)
-{
- enum ice_status status;
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
-
- status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
- return status;
- }
- status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
- return status;
- }
- /* Starting with first TLV after PFA length, iterate through the list
- * of TLVs to find the requested one.
- */
- next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
- u16 tlv_sub_module_type;
- u16 tlv_len;
-
- /* Read TLV type */
- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
- break;
- }
- /* Read TLV length */
- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
- break;
- }
- if (tlv_sub_module_type == module_type) {
- if (tlv_len) {
- *module_tlv = next_tlv;
- *module_tlv_len = tlv_len;
- return 0;
- }
- return ICE_ERR_INVAL_SIZE;
- }
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
- }
- /* Module does not exist */
- return ICE_ERR_DOES_NOT_EXIST;
-}
-
-/**
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
@@ -1181,7 +1092,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
break;
- /* fall-through */
+ fallthrough;
default:
mutex_lock(&ice_global_cfg_lock_sw);
lock_acquired = true;
@@ -2703,7 +2614,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
- /* fall-through */
+ fallthrough;
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index f9fc005d35a7..8104f3d64d96 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -15,9 +15,6 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status
-ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
- u16 module_type);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
@@ -38,9 +35,6 @@ enum ice_status
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
enum ice_status
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status
-ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
@@ -153,9 +147,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-void
-ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
- u8 *oem_patch, u8 *ver_hi, u8 *ver_lo);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 7108fb41b604..7bea09363b42 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -63,6 +63,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_dcb_get_mode - gets the DCB mode
+ * @port_info: pointer to port info structure
+ * @host: if set it's HOST if not it's MANAGED
+ */
+static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
+{
+ u8 mode;
+
+ if (host)
+ mode = DCB_CAP_DCBX_HOST;
+ else
+ mode = DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
+ return mode | DCB_CAP_DCBX_VER_CEE;
+ else
+ return mode | DCB_CAP_DCBX_VER_IEEE;
+}
+
+/**
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config
* @dcbcfg: config to retrieve number of TCs from
*/
@@ -149,6 +169,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
}
/**
+ * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct
+ * @pf: pointer to the PF struct
+ * @dcbcfg: pointer to DCB config structure
+ */
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
+ u8 num_tc, total_bw = 0;
+ int i;
+
+ /* returns number of contigous TCs and 1 TC for non-contigous TCs,
+ * since at least 1 TC has to be configured
+ */
+ num_tc = ice_dcb_get_num_tc(dcbcfg);
+
+ /* no bandwidth checks required if there's only one TC, so assign
+ * all bandwidth to TC0 and return
+ */
+ if (num_tc == 1) {
+ etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+ return 0;
+ }
+
+ for (i = 0; i < num_tc; i++)
+ total_bw += etscfg->tcbwtable[i];
+
+ if (!total_bw) {
+ etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+ } else if (total_bw != ICE_TC_MAX_BW) {
+ dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
@@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
return ret;
}
+ if (ice_dcb_bwchk(pf, new_cfg))
+ return -EINVAL;
+
/* Store old config in case FW config fails */
old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
if (!old_cfg)
@@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
ice_cfg_sw_lldp(pf_vsi, false, true);
- pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
}
set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
- /* DCBX in FW and LLDP enabled in FW */
- pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
+ /* DCBX/LLDP enabled in FW, set DCBNL mode advertisement */
+ pf->dcbx_cap = ice_dcb_get_mode(port_info, false);
err = ice_dcb_init_cfg(pf, locked);
if (err)
@@ -719,7 +779,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
bool need_reconfig = false;
struct ice_port_info *pi;
struct ice_vsi *pf_vsi;
- u8 type;
+ u8 mib_type;
int ret;
/* Not DCB capable or capability disabled */
@@ -734,16 +794,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */
- type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M);
- dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
- if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
+ mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
+ if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* Check MIB Type and return if event for Remote MIB update */
- type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
- dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
- if (type == ICE_AQ_LLDP_MIB_REMOTE) {
+ mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
+ if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
@@ -775,6 +835,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
goto out;
}
+ pf->dcbx_cap = ice_dcb_get_mode(pi, false);
+
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index f15e5776f287..37680e815b02 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index b61aba428adb..c4c12414083a 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
- /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
- * for the TC's index number. Add one to value if not zero, and
- * for zero set it to the FW's default value
- */
- if (max_tc)
- max_tc++;
- else
- max_tc = IEEE_8021QAZ_MAX_TCS;
+ if (ice_dcb_bwchk(pf, new_cfg)) {
+ err = -EINVAL;
+ goto ets_out;
+ }
+
+ max_tc = pf->hw.func_caps.common_cap.maxtc;
new_cfg->etscfg.maxtcs = max_tc;
@@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
if (err == ICE_DCB_NO_HW_CHG)
err = ICE_DCB_HW_CHG_RST;
+ets_out:
mutex_unlock(&pf->tc_mutex);
return err;
}
@@ -535,6 +534,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
}
/**
+ * ice_dcbnl_set_pg_tc_cfg_rx
+ * @netdev: relevant netdev struct
+ * @prio: corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: BW percentage for corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
+ int __always_unused prio,
+ u8 __always_unused prio_type,
+ u8 __always_unused pgid,
+ u8 __always_unused bw_pct,
+ u8 __always_unused up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
+}
+
+/**
* ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
* @netdev: pointer to netdev struct
* @pgid: the corresponding traffic class
@@ -554,6 +577,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
}
/**
+ * ice_dcbnl_set_pg_bwg_cfg_rx
+ * @netdev: the corresponding netdev
+ * @pgid: corresponding TC
+ * @bw_pct: BW percentage for given TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
+ u8 __always_unused bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
+}
+
+/**
* ice_dcbnl_get_cap - Get DCBX capabilities of adapter
* @netdev: pointer to netdev struct
* @capid: the capability type
@@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
.getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index ce63017c56c7..9d8194671f6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,12 +5,34 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP 0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE 0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP 0x159B
+/* Intel(R) Ethernet Connection E823-C for backplane */
+#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
+/* Intel(R) Ethernet Connection E823-C for QSFP */
+#define ICE_DEV_ID_E823C_QSFP 0x188B
+/* Intel(R) Ethernet Connection E823-C for SFP */
+#define ICE_DEV_ID_E823C_SFP 0x188C
+/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
+/* Intel(R) Ethernet Connection E823-C 1GbE */
+#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */
@@ -21,8 +43,8 @@
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
-/* Intel(R) Ethernet Connection E822-X for backplane */
-#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
new file mode 100644
index 000000000000..c6833944b90a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_devlink.h"
+
+static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+{
+ u8 dsn[8];
+
+ /* Copy the DSN into an array in Big Endian format */
+ put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
+
+ snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
+ dsn[0], dsn[1], dsn[2], dsn[3],
+ dsn[4], dsn[5], dsn[6], dsn[7]);
+
+ return 0;
+}
+
+static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+
+ status = ice_read_pba_string(hw, (u8 *)buf, len);
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
+ hw->fw_patch);
+
+ return 0;
+}
+
+static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver);
+
+ return 0;
+}
+
+static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "0x%08x", hw->fw_build);
+
+ return 0;
+}
+
+static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_orom_info *orom = &pf->hw.nvm.orom;
+
+ snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch);
+
+ return 0;
+}
+
+static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_nvm_info *nvm = &pf->hw.nvm;
+
+ snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver);
+
+ return 0;
+}
+
+static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_nvm_info *nvm = &pf->hw.nvm;
+
+ snprintf(buf, len, "0x%08x", nvm->eetrack);
+
+ return 0;
+}
+
+static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "%s", hw->active_pkg_name);
+
+ return 0;
+}
+
+static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
+
+ snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
+ pkg->draft);
+
+ return 0;
+}
+
+#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
+#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
+
+enum ice_version_type {
+ ICE_VERSION_FIXED,
+ ICE_VERSION_RUNNING,
+ ICE_VERSION_STORED,
+};
+
+static const struct ice_devlink_version {
+ enum ice_version_type type;
+ const char *key;
+ int (*getter)(struct ice_pf *pf, char *buf, size_t len);
+} ice_devlink_versions[] = {
+ fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
+ running("fw.mgmt.api", ice_info_fw_api),
+ running("fw.mgmt.build", ice_info_fw_build),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver),
+ running("fw.psid.api", ice_info_nvm_ver),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
+ running("fw.app.name", ice_info_ddp_pkg_name),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+};
+
+/**
+ * ice_devlink_info_get - .info_get devlink handler
+ * @devlink: devlink instance structure
+ * @req: the devlink info request
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .info_get operation. Reports information about the
+ * device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ char buf[100];
+ size_t i;
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
+ return err;
+ }
+
+ err = ice_info_get_dsn(pf, buf, sizeof(buf));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number");
+ return err;
+ }
+
+ err = devlink_info_serial_number_put(req, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
+ enum ice_version_type type = ice_devlink_versions[i].type;
+ const char *key = ice_devlink_versions[i].key;
+
+ err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
+ return err;
+ }
+
+ switch (type) {
+ case ICE_VERSION_FIXED:
+ err = devlink_info_version_fixed_put(req, key, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
+ return err;
+ }
+ break;
+ case ICE_VERSION_RUNNING:
+ err = devlink_info_version_running_put(req, key, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
+ return err;
+ }
+ break;
+ case ICE_VERSION_STORED:
+ err = devlink_info_version_stored_put(req, key, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
+ return err;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops ice_devlink_ops = {
+ .info_get = ice_devlink_info_get,
+};
+
+static void ice_devlink_free(void *devlink_ptr)
+{
+ devlink_free((struct devlink *)devlink_ptr);
+}
+
+/**
+ * ice_allocate_pf - Allocate devlink and return PF structure pointer
+ * @dev: the device to allocate for
+ *
+ * Allocate a devlink instance for this device and return the private area as
+ * the PF structure. The devlink memory is kept track of through devres by
+ * adding an action to remove it when unwinding.
+ */
+struct ice_pf *ice_allocate_pf(struct device *dev)
+{
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf));
+ if (!devlink)
+ return NULL;
+
+ /* Add an action to teardown the devlink when unwinding the driver */
+ if (devm_add_action(dev, ice_devlink_free, devlink)) {
+ devlink_free(devlink);
+ return NULL;
+ }
+
+ return devlink_priv(devlink);
+}
+
+/**
+ * ice_devlink_register - Register devlink interface for this PF
+ * @pf: the PF to register the devlink for.
+ *
+ * Register the devlink instance associated with this physical function.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_register(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = devlink_register(devlink, dev);
+ if (err) {
+ dev_err(dev, "devlink registration failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_unregister - Unregister devlink resources for this PF.
+ * @pf: the PF structure to cleanup
+ *
+ * Releases resources used by devlink and cleans up associated memory.
+ */
+void ice_devlink_unregister(struct ice_pf *pf)
+{
+ devlink_unregister(priv_to_devlink(pf));
+}
+
+/**
+ * ice_devlink_create_port - Create a devlink port for this PF
+ * @pf: the PF to create a port for
+ *
+ * Create and register a devlink_port for this PF. Note that although each
+ * physical function is connected to a separate devlink instance, the port
+ * will still be numbered according to the physical function id.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_port(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ if (!vsi) {
+ dev_err(dev, "%s: unable to find main VSI\n", __func__);
+ return -EIO;
+ }
+
+ devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ pf->hw.pf_id, false, 0, NULL, 0);
+ err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
+ if (err) {
+ dev_err(dev, "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ */
+void ice_devlink_destroy_port(struct ice_pf *pf)
+{
+ devlink_port_type_clear(&pf->devlink_port);
+ devlink_port_unregister(&pf->devlink_port);
+}
+
+/**
+ * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
+ * @devlink: the devlink instance
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
+ * the shadow-ram devlink region. It captures a snapshot of the shadow ram
+ * contents. This snapshot can later be viewed via the devlink-region
+ * interface.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int ice_devlink_nvm_snapshot(struct devlink *devlink,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ void *nvm_data;
+ u32 nvm_size;
+
+ nvm_size = hw->nvm.flash_size;
+ nvm_data = vzalloc(nvm_size);
+ if (!nvm_data)
+ return -ENOMEM;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
+ if (status) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ nvm_size, status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ice_release_nvm(hw);
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ ice_release_nvm(hw);
+
+ *data = nvm_data;
+
+ return 0;
+}
+
+static const struct devlink_region_ops ice_nvm_region_ops = {
+ .name = "nvm-flash",
+ .destructor = vfree,
+ .snapshot = ice_devlink_nvm_snapshot,
+};
+
+/**
+ * ice_devlink_init_regions - Initialize devlink regions
+ * @pf: the PF device structure
+ *
+ * Create devlink regions used to enable access to dump the contents of the
+ * flash memory on the device.
+ */
+void ice_devlink_init_regions(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ u64 nvm_size;
+
+ nvm_size = pf->hw.nvm.flash_size;
+ pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
+ nvm_size);
+ if (IS_ERR(pf->nvm_region)) {
+ dev_err(dev, "failed to create NVM devlink region, err %ld\n",
+ PTR_ERR(pf->nvm_region));
+ pf->nvm_region = NULL;
+ }
+}
+
+/**
+ * ice_devlink_destroy_regions - Destroy devlink regions
+ * @pf: the PF device structure
+ *
+ * Remove previously created regions for this PF.
+ */
+void ice_devlink_destroy_regions(struct ice_pf *pf)
+{
+ if (pf->nvm_region)
+ devlink_region_destroy(pf->nvm_region);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
new file mode 100644
index 000000000000..6e806a08dc23
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DEVLINK_H_
+#define _ICE_DEVLINK_H_
+
+struct ice_pf *ice_allocate_pf(struct device *dev);
+
+int ice_devlink_register(struct ice_pf *pf);
+void ice_devlink_unregister(struct ice_pf *pf);
+int ice_devlink_create_port(struct ice_pf *pf);
+void ice_devlink_destroy_port(struct ice_pf *pf);
+
+void ice_devlink_init_regions(struct ice_pf *pf);
+void ice_devlink_destroy_regions(struct ice_pf *pf);
+
+#endif /* _ICE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 77c412a7e7a4..593fb37bd59e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -157,6 +157,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -166,25 +167,26 @@ static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- u16 oem_build;
+ struct ice_orom_info *orom;
+ struct ice_nvm_info *nvm;
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
+ nvm = &hw->nvm;
+ orom = &nvm->orom;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
/* Display NVM version (from which the firmware version can be
* determined) which contains more pertinent information.
*/
- ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
- &nvm_ver_hi, &nvm_ver_lo);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
- hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
+ "%x.%02x 0x%x %d.%d.%d", nvm->major_ver, nvm->minor_ver,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -243,7 +245,7 @@ static int ice_get_eeprom_len(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
- return (int)(pf->hw.nvm.sr_words * sizeof(u16));
+ return (int)pf->hw.nvm.flash_size;
}
static int
@@ -251,39 +253,46 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u16 first_word, last_word, nwords;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
struct device *dev;
int ret = 0;
- u16 *buf;
+ u8 *buf;
dev = ice_pf_to_dev(pf);
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+ netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n",
+ eeprom->cmd, eeprom->offset, eeprom->len);
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- nwords = last_word - first_word + 1;
-
- buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL);
+ buf = kzalloc(eeprom->len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- status = ice_read_sr_buf(hw, first_word, &nwords, buf);
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
- dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n",
+ dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
- eeprom->len = sizeof(u16) * nwords;
ret = -EIO;
goto out;
}
- memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len);
+ status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
+ false);
+ if (status) {
+ dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ ret = -EIO;
+ goto release;
+ }
+
+ memcpy(bytes, buf, eeprom->len);
+release:
+ ice_release_nvm(hw);
out:
- devm_kfree(dev, buf);
+ kfree(buf);
return ret;
}
@@ -462,7 +471,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
- status = ice_vsi_start_rx_rings(vsi);
+ status = ice_vsi_start_all_rx_rings(vsi);
if (status)
goto err_start_rx_ring;
@@ -494,7 +503,7 @@ static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, status);
- status = ice_vsi_stop_rx_rings(vsi);
+ status = ice_vsi_stop_all_rx_rings(vsi);
if (status)
netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, status);
@@ -672,7 +681,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
if (!test_vsi) {
- netdev_err(netdev, "Failed to create a VSI for the loopback test");
+ netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
return 1;
}
@@ -731,7 +740,7 @@ lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list))
- netdev_err(netdev, "Could not remove MAC filter for the test VSI");
+ netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
free_mac_list:
ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
@@ -744,7 +753,7 @@ lbtest_rings_dis:
lbtest_vsi_close:
test_vsi->netdev = NULL;
if (ice_vsi_release(test_vsi))
- netdev_err(netdev, "Failed to remove the test VSI");
+ netdev_err(netdev, "Failed to remove the test VSI\n");
return ret;
}
@@ -834,7 +843,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev);
if (status) {
- dev_err(dev, "Could not open device %s, err %d",
+ dev_err(dev, "Could not open device %s, err %d\n",
pf->int_name, status);
}
}
@@ -1091,7 +1100,6 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fecparam->active_fec = ETHTOOL_FEC_BASER;
break;
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
- /* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fecparam->active_fec = ETHTOOL_FEC_RS;
break;
@@ -1132,6 +1140,33 @@ done:
}
/**
+ * ice_nway_reset - restart autonegotiation
+ * @netdev: network interface device structure
+ */
+static int ice_nway_reset(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
+ enum ice_status status;
+
+ pi = vsi->port_info;
+ /* If VSI state is up, then restart autoneg with link up */
+ if (!test_bit(__ICE_DOWN, vsi->back->state))
+ status = ice_aq_set_link_restart_an(pi, true, NULL);
+ else
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
+
+ if (status) {
+ netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
+ status, pi->hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_priv_flags - report device private flags
* @netdev: network interface device structure
*
@@ -1264,6 +1299,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
dev_dbg(dev, "Fail to enable MIB change events\n");
+
+ ice_nway_reset(netdev);
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
@@ -1781,7 +1818,6 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
Asym_Pause);
break;
case ICE_FC_PFC:
- /* fall through */
default:
ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
@@ -2776,30 +2812,6 @@ done:
return err;
}
-static int ice_nway_reset(struct net_device *netdev)
-{
- /* restart autonegotiation */
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_port_info *pi;
- enum ice_status status;
-
- pi = vsi->port_info;
- /* If VSI state is up, then restart autoneg with link up */
- if (!test_bit(__ICE_DOWN, vsi->back->state))
- status = ice_aq_set_link_restart_an(pi, true, NULL);
- else
- status = ice_aq_set_link_restart_an(pi, false, NULL);
-
- if (status) {
- netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
- status, pi->hw->adminq.sq_last_status);
- return -EIO;
- }
-
- return 0;
-}
-
/**
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
@@ -3453,12 +3465,6 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
case ICE_TX_CONTAINER:
- if (ec->tx_coalesce_usecs_high) {
- netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
- c_type_str);
- return -EINVAL;
- }
-
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3535,53 +3541,6 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
}
/**
- * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
- * @netdev: pointer to the netdev associated with this query
- * @ec: ethtool structure to fill with driver's coalesce settings
- *
- * Print netdev info if driver doesn't support one of the parameters
- * and return error. When any parameters will be implemented, remove only
- * this parameter from param array.
- */
-static int
-ice_is_coalesce_param_invalid(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct ice_ethtool_not_used {
- u32 value;
- const char *name;
- } param[] = {
- {ec->stats_block_coalesce_usecs, "stats-block-usecs"},
- {ec->rate_sample_interval, "sample-interval"},
- {ec->pkt_rate_low, "pkt-rate-low"},
- {ec->pkt_rate_high, "pkt-rate-high"},
- {ec->rx_max_coalesced_frames, "rx-frames"},
- {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
- {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
- {ec->tx_max_coalesced_frames, "tx-frames"},
- {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
- {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
- {ec->rx_coalesce_usecs_low, "rx-usecs-low"},
- {ec->rx_max_coalesced_frames_low, "rx-frames-low"},
- {ec->tx_coalesce_usecs_low, "tx-usecs-low"},
- {ec->tx_max_coalesced_frames_low, "tx-frames-low"},
- {ec->rx_max_coalesced_frames_high, "rx-frames-high"},
- {ec->tx_max_coalesced_frames_high, "tx-frames-high"}
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(param); i++) {
- if (param[i].value) {
- netdev_info(netdev, "Setting %s not supported\n",
- param[i].name);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
* @netdev: netdev used for print
* @itr_setting: previous user setting
@@ -3621,9 +3580,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_is_coalesce_param_invalid(netdev, ec))
- return -EINVAL;
-
if (q_num < 0) {
struct ice_q_vector *q_vector = vsi->q_vectors[0];
int v_idx;
@@ -3818,6 +3774,9 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -3867,6 +3826,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_strings = ice_get_strings,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 99208946224c..42bac3ec5526 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3471,6 +3471,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
}
/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
+{
+ struct ice_chs_chg *pos, *tmp;
+
+ list_for_each_entry_safe(tmp, pos, chg, list_entry)
+ if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+ list_del(&tmp->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), tmp);
+ }
+}
+
+/**
* ice_prof_tcam_ena_dis - add enable or disable TCAM change
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
enum ice_status status;
struct ice_chs_chg *p;
- /* Default: enable means change the low flag bit to don't care */
- u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
/* if disabling, free the TCAM */
if (!enable) {
- status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+ /* if we have already created a change for this TCAM entry, then
+ * we need to remove that entry, in order to prevent writing to
+ * a TCAM entry we no longer will have ownership of.
+ */
+ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
tcam->tcam_idx = 0;
tcam->in_use = 0;
return status;
@@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @blk: hardware block
* @vsig: the VSIG to which this profile is to be added
* @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
* @chg: the change list
*/
static enum ice_status
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
- struct list_head *chg)
+ bool rev, struct list_head *chg)
{
/* Masks that ignore flags */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
- u16 i;
+ u16 vsig_idx, i;
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
@@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
}
/* add profile to VSIG */
- list_add(&t->list,
- &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+ vsig_idx = vsig & ICE_VSIG_IDX_M;
+ if (rev)
+ list_add_tail(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+ else
+ list_add(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
return 0;
@@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
if (status)
goto err_ice_create_prof_id_vsig;
- status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
if (status)
goto err_ice_create_prof_id_vsig;
@@ -3753,11 +3782,13 @@ err_ice_create_prof_id_vsig:
* @blk: hardware block
* @vsi: the initial VSI that will be in VSIG
* @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
* @chg: the change list
*/
static enum ice_status
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
- struct list_head *lst, struct list_head *chg)
+ struct list_head *lst, u16 *new_vsig,
+ struct list_head *chg)
{
struct ice_vsig_prof *t;
enum ice_status status;
@@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
return status;
list_for_each_entry(t, lst, list) {
+ /* Reverse the order here since we are copying the list */
status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
- chg);
+ true, chg);
if (status)
return status;
}
+ *new_vsig = vsig;
+
return 0;
}
@@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* not sharing entries and we can simply add the new
* profile to the VSIG.
*/
- status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
} else {
/* No match, so we need a new VSIG */
status = ice_create_vsig_from_lst(hw, blk, vsi,
- &union_lst, &chg);
+ &union_lst, &vsig,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* new VSIG and TCAM entries
*/
status = ice_create_vsig_from_lst(hw, blk, vsi,
- &copy, &chg);
+ &copy, &vsig,
+ &chg);
if (status)
goto err_ice_rem_prof_id_flow;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index a05ceb59863b..3de862a3c789 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -694,7 +694,7 @@ out:
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
- * @type: type of the field
+ * @field_type: type of the field
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
* entry's input buffer
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
@@ -715,16 +715,16 @@ out:
*/
static void
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
- enum ice_flow_fld_match_type type, u16 val_loc,
+ enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc)
{
u64 bit = BIT_ULL(fld);
seg->match |= bit;
- if (type == ICE_FLOW_FLD_TYPE_RANGE)
+ if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit;
- seg->fields[fld].type = type;
+ seg->fields[fld].type = field_type;
seg->fields[fld].src.val = val_loc;
seg->fields[fld].src.mask = mask_loc;
seg->fields[fld].src.last = last_loc;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6db3d0494127..1d37a9f02c1c 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -85,6 +85,7 @@
#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
+#define QRXFLXP_CNTXT_TS_M BIT(11)
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180
@@ -217,6 +218,8 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define GL_MDCK_TX_TDPU 0x00049348
+#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -286,6 +289,8 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLDCB_RTCTQ_RXQNUM_S 0
+#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d974e2fa3e63..2f256bf45efc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -9,11 +9,11 @@
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
- * @type: VSI type enum
+ * @vsi_type: VSI type enum
*/
-const char *ice_vsi_type_str(enum ice_vsi_type type)
+const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
{
- switch (type) {
+ switch (vsi_type) {
case ICE_VSI_PF:
return "ICE_VSI_PF";
case ICE_VSI_VF:
@@ -26,16 +26,26 @@ const char *ice_vsi_type_str(enum ice_vsi_type type)
}
/**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
+ *
+ * First enable/disable all of the Rx rings, flush any remaining writes, and
+ * then verify that they have all been enabled/disabled successfully. This will
+ * let all of the register writes complete when enabling/disabling the Rx rings
+ * before waiting for the change in hardware to complete.
*/
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
int i, ret = 0;
+ for (i = 0; i < vsi->num_rxq; i++)
+ ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
+
+ ice_flush(&vsi->back->hw);
+
for (i = 0; i < vsi->num_rxq; i++) {
- ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
+ ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
}
@@ -111,7 +121,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
- /* fall through */
case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
@@ -169,12 +178,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vf = &pf->vf[vsi->vf_id];
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
- /* pf->num_vf_msix includes (VF miscellaneous vector +
+ /* pf->num_msix_per_vf includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
@@ -341,13 +350,13 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
- * @type: type of VSI
+ * @vsi_type: type of VSI
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -368,13 +377,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
if (!vsi)
goto unlock_pf;
- vsi->type = type;
+ vsi->type = vsi_type;
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
vsi->idx = pf->next_vsi;
- if (type == ICE_VSI_VF)
+ if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
@@ -433,7 +442,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = 0,
- .mapping_mode = vsi->tx_mapping_mode
+ .mapping_mode = ICE_VSI_MAP_CONTIG
};
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -443,18 +452,21 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
.vsi_map_offset = 0,
- .mapping_mode = vsi->rx_mapping_mode
+ .mapping_mode = ICE_VSI_MAP_CONTIG
};
- int ret = 0;
-
- vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
- vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
+ int ret;
ret = __ice_vsi_get_qs(&tx_qs_cfg);
- if (!ret)
- ret = __ice_vsi_get_qs(&rx_qs_cfg);
+ if (ret)
+ return ret;
+ vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
- return ret;
+ ret = __ice_vsi_get_qs(&rx_qs_cfg);
+ if (ret)
+ return ret;
+ vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
+
+ return 0;
}
/**
@@ -559,12 +571,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
case ICE_VSI_VF:
- /* VF VSI will gets a small RSS table
- * For VSI_LUT, LUT size should be set to 64 bytes
+ /* VF VSI will get a small RSS table.
+ * For VSI_LUT, LUT size should be set to 64 bytes.
*/
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vsi->rss_size = min_t(int, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
+ vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_LB:
@@ -672,7 +683,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
if (vsi->type == ICE_VSI_PF)
max_rss = ICE_MAX_LG_RSS_QS;
else
- max_rss = ICE_MAX_SMALL_RSS_QS;
+ max_rss = ICE_MAX_RSS_QS_PER_VF;
qcount_rx = min_t(int, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
qcount_rx = min_t(int, qcount_rx,
@@ -804,7 +815,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_LB:
- /* fall through */
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
@@ -897,6 +907,109 @@ out:
}
/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ *
+ * Returns number of resources freed
+ */
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+ int count = 0;
+ int i;
+
+ if (!res || index >= res->end)
+ return -EINVAL;
+
+ id |= ICE_RES_VALID_BIT;
+ for (i = index; i < res->end && res->list[i] == id; i++) {
+ res->list[i] = 0;
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int start = 0, end = 0;
+
+ if (needed > res->end)
+ return -ENOMEM;
+
+ id |= ICE_RES_VALID_BIT;
+
+ do {
+ /* skip already allocated entries */
+ if (res->list[end++] & ICE_RES_VALID_BIT) {
+ start = end;
+ if ((start + needed) > res->end)
+ break;
+ }
+
+ if (end == (start + needed)) {
+ int i = start;
+
+ /* there was enough, so assign it to the requestor */
+ while (i != end)
+ res->list[i++] = id;
+
+ return start;
+ }
+ } while (end < res->end);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_get_free_res_count - Get free count from a resource tracker
+ * @res: Resource tracker instance
+ */
+static u16 ice_get_free_res_count(struct ice_res_tracker *res)
+{
+ u16 i, count = 0;
+
+ for (i = 0; i < res->end; i++)
+ if (!(res->list[i] & ICE_RES_VALID_BIT))
+ count++;
+
+ return count;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or negative for error
+ */
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ if (!res || !pf)
+ return -EINVAL;
+
+ if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+ dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ needed, res->num_entries, id);
+ return -EINVAL;
+ }
+
+ return ice_search_res(res, needed, id);
+}
+
+/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -928,8 +1041,9 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->base_vector);
+ dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
+ ice_get_free_res_count(pf->irq_tracker),
+ ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
pf->num_avail_sw_msix -= num_q_vectors;
@@ -1348,7 +1462,9 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&tmp->list_entry, &tmp_add_list);
status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (status) {
+ if (!status) {
+ vsi->num_vlan++;
+ } else {
err = -ENODEV;
dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
vsi->vsi_num);
@@ -1390,10 +1506,12 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&list->list_entry, &tmp_add_list);
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
- if (status == ICE_ERR_DOES_NOT_EXIST) {
+ if (!status) {
+ vsi->num_vlan--;
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
- } else if (status) {
+ } else {
dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
@@ -1678,25 +1796,25 @@ out:
}
/**
- * ice_vsi_start_rx_rings - start VSI's Rx rings
- * @vsi: the VSI whose rings are to be started
+ * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
+ * @vsi: the VSI whose rings are to be enabled
*
* Returns 0 on success and a negative value on error
*/
-int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_ctrl_rx_rings(vsi, true);
+ return ice_vsi_ctrl_all_rx_rings(vsi, true);
}
/**
- * ice_vsi_stop_rx_rings - stop VSI's Rx rings
- * @vsi: the VSI
+ * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
+ * @vsi: the VSI whose rings are to be disabled
*
* Returns 0 on success and a negative value on error
*/
-int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_ctrl_rx_rings(vsi, false);
+ return ice_vsi_ctrl_all_rx_rings(vsi, false);
}
/**
@@ -1756,6 +1874,20 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
+ * @vsi: VSI to check whether or not VLAN pruning is enabled.
+ *
+ * returns true if Rx VLAN pruning is enabled and false otherwise.
+ */
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
+{
+ if (!vsi)
+ return false;
+
+ return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -1952,7 +2084,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
- * @type: VSI type
+ * @vsi_type: VSI type
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
@@ -1964,7 +2096,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 vf_id)
+ enum ice_vsi_type vsi_type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -1972,10 +2104,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (type == ICE_VSI_VF)
- vsi = ice_vsi_alloc(pf, type, vf_id);
+ if (vsi_type == ICE_VSI_VF)
+ vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
- vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2025,6 +2157,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ /* Always add VLAN ID 0 switch rule by default. This is needed
+ * in order to allow all untagged and 0 tagged priority traffic
+ * if Rx VLAN pruning is enabled. Also there are cases where we
+ * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
+ * so this handles those cases (i.e. adding the PF to a bridge
+ * without the 8021q module loaded).
+ */
+ ret = ice_vsi_add_vlan(vsi, 0);
+ if (ret)
+ goto unroll_clear_rings;
+
ice_vsi_map_rings_to_vectors(vsi);
/* Do not exit if configuring RSS had an issue, at least
@@ -2104,6 +2247,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
return vsi;
+unroll_clear_rings:
+ ice_vsi_clear_rings(vsi);
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
@@ -2299,94 +2444,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
/**
- * ice_free_res - free a block of resources
- * @res: pointer to the resource
- * @index: starting index previously returned by ice_get_res
- * @id: identifier to track owner
- *
- * Returns number of resources freed
- */
-int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
-{
- int count = 0;
- int i;
-
- if (!res || index >= res->end)
- return -EINVAL;
-
- id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->end && res->list[i] == id; i++) {
- res->list[i] = 0;
- count++;
- }
-
- return count;
-}
-
-/**
- * ice_search_res - Search the tracker for a block of resources
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or -ENOMEM for error
- */
-static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
-{
- int start = 0, end = 0;
-
- if (needed > res->end)
- return -ENOMEM;
-
- id |= ICE_RES_VALID_BIT;
-
- do {
- /* skip already allocated entries */
- if (res->list[end++] & ICE_RES_VALID_BIT) {
- start = end;
- if ((start + needed) > res->end)
- break;
- }
-
- if (end == (start + needed)) {
- int i = start;
-
- /* there was enough, so assign it to the requestor */
- while (i != end)
- res->list[i++] = id;
-
- return start;
- }
- } while (end < res->end);
-
- return -ENOMEM;
-}
-
-/**
- * ice_get_res - get a block of resources
- * @pf: board private structure
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or negative for error
- */
-int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
-{
- if (!res || !pf)
- return -EINVAL;
-
- if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
- needed, res->num_entries, id);
- return -EINVAL;
- }
-
- return ice_search_res(res, needed, id);
-}
-
-/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @vsi: the VSI being un-configured
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index e2c0dadce920..04ca00799364 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,7 +6,7 @@
#include "ice.h"
-const char *ice_vsi_type_str(enum ice_vsi_type type);
+const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -30,9 +30,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
-int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
-int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
@@ -42,6 +42,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
+
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -56,7 +58,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 vf_id);
+ enum ice_vsi_type vsi_type, u16 vf_id);
void ice_napi_del(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5ef28052c0f8..5b190c257124 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
+#include "ice_devlink.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
@@ -706,7 +707,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
/* Get FEC mode based on negotiated link info */
switch (vsi->port_info->phy.link_info.fec_info) {
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
- /* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fec = "RS-FEC";
break;
@@ -1029,6 +1029,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ice_handle_link_event(pf, &event))
dev_err(dev, "Could not handle link event\n");
break;
+ case ice_aqc_opc_event_lan_overflow:
+ ice_vf_lan_overflow_event(pf, &event);
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
@@ -1185,20 +1188,28 @@ static void ice_service_timer(struct timer_list *t)
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
- * Called from service task. OICR interrupt handler indicates MDD event
+ * Called from service task. OICR interrupt handler indicates MDD event.
+ * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
+ * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
+ * disable the queue, the PF can be configured to reset the VF using ethtool
+ * private flag mdd-auto-reset-vf.
*/
static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- bool mdd_detected = false;
u32 reg;
int i;
- if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ ice_print_vfs_mdd_events(pf);
return;
+ }
- /* find what triggered the MDD event */
+ /* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
@@ -1214,7 +1225,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_TX_TCLAN);
@@ -1232,7 +1242,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_RX);
@@ -1250,85 +1259,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff);
- mdd_detected = true;
}
- if (mdd_detected) {
- bool pf_mdd_detected = false;
-
- reg = rd32(hw, PF_MDET_TX_PQM);
- if (reg & PF_MDET_TX_PQM_VALID_M) {
- wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ /* check to see if this PF caused an MDD event */
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_TX_TCLAN);
- if (reg & PF_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_RX);
- if (reg & PF_MDET_RX_VALID_M) {
- wr32(hw, PF_MDET_RX, 0xFFFF);
- dev_info(dev, "RX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
- /* Queue belongs to the PF initiate a reset */
- if (pf_mdd_detected) {
- set_bit(__ICE_NEEDS_RESTART, pf->state);
- ice_service_task_schedule(pf);
- }
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xFFFF);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
}
- /* check to see if one of the VFs caused the MDD */
+ /* Check to see if one of the VFs caused an MDD event, and then
+ * increment counters and set print pending
+ */
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
- bool vf_mdd_detected = false;
-
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "RX driver issue detected on VF %d\n",
- i);
- }
-
- if (vf_mdd_detected) {
- vf->num_mdd_events++;
- if (vf->num_mdd_events &&
- vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
- i, vf->num_mdd_events);
+ vf->mdd_rx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
+ i);
+
+ /* Since the queue is disabled on VF Rx MDD events, the
+ * PF can be configured to reset the VF through ethtool
+ * private flag mdd-auto-reset-vf.
+ */
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ ice_reset_vf(&pf->vf[i], false);
}
}
+
+ ice_print_vfs_mdd_events(pf);
}
/**
@@ -1510,7 +1519,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
- hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
+ hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
@@ -1916,8 +1925,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
ret = ice_down(vsi);
if (ret) {
- NL_SET_ERR_MSG_MOD(extack,
- "Preparing device for XDP attach failed");
+ NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
return ret;
}
}
@@ -1926,13 +1934,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
vsi->num_xdp_txq = vsi->alloc_txq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Setting up XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Freeing XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
} else {
ice_vsi_assign_bpf_prog(vsi, prog);
}
@@ -1965,8 +1971,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct ice_vsi *vsi = np->vsi;
if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack,
- "XDP can be loaded only on PF VSI");
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
return -EINVAL;
}
@@ -1993,6 +1998,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u32 val;
+ /* Disable anti-spoof detection interrupt to prevent spurious event
+ * interrupts during a function reset. Anti-spoof functionally is
+ * still supported.
+ */
+ val = rd32(hw, GL_MDCK_TX_TDPU);
+ val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
+ wr32(hw, GL_MDCK_TX_TDPU, val);
+
/* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
@@ -2042,8 +2055,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
}
if (oicr & PFINT_OICR_VFLR_M) {
- ena_mask &= ~PFINT_OICR_VFLR_M;
- set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ /* disable any further VFLR event notifications */
+ if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, PFINT_OICR_ENA);
+
+ reg &= ~PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ } else {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
}
if (oicr & PFINT_OICR_GRST_M) {
@@ -2351,10 +2372,16 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
+ err = ice_devlink_create_port(pf);
+ if (err)
+ return err;
+
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_destroy_devlink_port;
+ }
vsi->netdev = netdev;
np = netdev_priv(netdev);
@@ -2384,7 +2411,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
- return err;
+ goto err_destroy_devlink_port;
+
+ devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -2392,6 +2421,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
netif_tx_stop_all_queues(vsi->netdev);
return 0;
+
+err_destroy_devlink_port:
+ ice_devlink_destroy_port(pf);
+
+ return err;
}
/**
@@ -2461,16 +2495,19 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid)) {
+ /* VLAN 0 is added by default during load/reset */
+ if (!vid)
+ return 0;
+
+ /* Enable VLAN pruning when a VLAN other than 0 is added */
+ if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
- /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
- * needed to continue allowing all untagged packets since VLAN prune
- * list is applied to all packets by the switch
+ /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
+ * packets aren't pruned by the device's internal switch on Rx
*/
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret) {
@@ -2500,6 +2537,10 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
+ /* don't allow removal of VLAN 0 */
+ if (!vid)
+ return 0;
+
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
@@ -2507,8 +2548,8 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (ret)
return ret;
- /* Disable VLAN pruning when VLAN 0 is removed */
- if (unlikely(!vid))
+ /* Disable pruning when VLAN 0 is the only VLAN rule */
+ if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
ret = ice_cfg_vlan_pruning(vsi, false, false);
vsi->vlan_ena = false;
@@ -2945,7 +2986,6 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
}
break;
case ICE_ERR_BUF_TOO_SHORT:
- /* fall-through */
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
@@ -2977,7 +3017,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
default:
break;
}
- /* fall-through */
+ fallthrough;
default:
dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
@@ -3069,30 +3109,22 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
* followed by a EUI-64 identifier (PCIe Device Serial Number)
*/
struct pci_dev *pdev = pf->pdev;
- char *opt_fw_filename = NULL;
- u32 dword;
- u8 dsn[8];
- int pos;
+ char *opt_fw_filename;
+ u64 dsn;
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability).
*/
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
- if (!opt_fw_filename)
- return NULL;
-
- pci_read_config_dword(pdev, pos + 4, &dword);
- put_unaligned_le32(dword, &dsn[0]);
- pci_read_config_dword(pdev, pos + 8, &dword);
- put_unaligned_le32(dword, &dsn[4]);
- snprintf(opt_fw_filename, NAME_MAX,
- "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
- ICE_DDP_PKG_PATH,
- dsn[7], dsn[6], dsn[5], dsn[4],
- dsn[3], dsn[2], dsn[1], dsn[0]);
- }
+ dsn = pci_get_dsn(pdev);
+ if (!dsn)
+ return NULL;
+
+ opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
+ if (!opt_fw_filename)
+ return NULL;
+
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
}
@@ -3166,7 +3198,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return err;
}
- pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
+ pf = ice_allocate_pf(dev);
if (!pf)
return -ENOMEM;
@@ -3204,6 +3236,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+ err = ice_devlink_register(pf);
+ if (err) {
+ dev_err(dev, "ice_devlink_register failed: %d\n", err);
+ goto err_exit_unroll;
+ }
+
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -3238,6 +3276,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
+ ice_devlink_init_regions(pf);
+
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
@@ -3336,6 +3376,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return 0;
err_alloc_sw_unroll:
+ ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -3346,8 +3387,10 @@ err_init_interrupt_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
+ ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
+ ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
return err;
}
@@ -3370,11 +3413,15 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
+ set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
+ ice_free_vfs(pf);
+ }
+
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
- ice_free_vfs(pf);
+ ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -3383,7 +3430,10 @@ static void ice_remove(struct pci_dev *pdev)
ice_vsi_free_q_vectors(pf->vsi[i]);
}
ice_deinit_pf(pf);
+ ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
+ ice_devlink_unregister(pf);
+
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
@@ -3458,9 +3508,9 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ err = pci_aer_clear_nonfatal_status(pdev);
if (err)
- dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
err);
/* non-fatal, continue */
@@ -3534,15 +3584,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
/* required last entry */
{ 0, }
};
@@ -3961,7 +4022,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
- err = ice_vsi_start_rx_rings(vsi);
+ err = ice_vsi_start_all_rx_rings(vsi);
if (err)
return err;
@@ -4340,7 +4401,7 @@ int ice_down(struct ice_vsi *vsi)
vsi->vsi_num, tx_err);
}
- rx_err = ice_vsi_stop_rx_rings(vsi);
+ rx_err = ice_vsi_stop_all_rx_rings(vsi);
if (rx_err)
netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
@@ -5027,6 +5088,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/**
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: Tx queue
*/
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -5064,13 +5126,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 7525ac50742e..8beb675d676b 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -11,25 +11,29 @@
* @length: length of the section to be read (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
* @cd: pointer to command details structure or NULL
*
* Read the NVM using the admin queue commands (0x0701)
*/
static enum ice_status
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
- void *data, bool last_command, struct ice_sq_cd *cd)
+ void *data, bool last_command, bool read_shadow_ram,
+ struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
cmd = &desc.params.nvm;
- /* In offset the highest byte must be zeroed. */
- if (offset & 0xFF000000)
+ if (offset > ICE_AQC_NVM_MAX_OFFSET)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
+ if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
+ cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
+
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
@@ -42,65 +46,64 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
}
/**
- * ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
- * @hw: pointer to the HW structure
- * @offset: offset in words from module start
- * @words: number of words to access
+ * ice_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors and ensures that no single
+ * read request exceeds the maximum 4Kb read for a single AdminQ command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
*/
-static enum ice_status
-ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
+enum ice_status
+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ bool read_shadow_ram)
{
- if ((offset + words) > hw->nvm.sr_words) {
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: offset beyond SR lmt.\n");
- return ICE_ERR_PARAM;
- }
+ enum ice_status status;
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
- if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
- /* We can access only up to 4KB (one sector), in one AQ write */
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: tried to access %d words, limit is %d.\n",
- words, ICE_SR_SECTOR_SIZE_IN_WORDS);
- return ICE_ERR_PARAM;
- }
+ *length = 0;
- if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
- (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
- /* A single access cannot spread over two sectors */
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM,
- "NVM error: cannot spread over two sectors.\n");
+ "NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
- return 0;
-}
+ do {
+ u32 read_size, sector_offset;
-/**
- * ice_read_sr_aq - Read Shadow RAM.
- * @hw: pointer to the HW structure
- * @offset: offset in words from module start
- * @words: number of words to read
- * @data: buffer for words reads from Shadow RAM
- * @last_command: tells the AdminQ that this is the last command
- *
- * Reads 16-bit word buffers from the Shadow RAM using the admin command.
- */
-static enum ice_status
-ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
- bool last_command)
-{
- enum ice_status status;
+ /* ice_aq_read_nvm cannot read more than 4Kb at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also
+ * 4Kb.
+ */
+ sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
+ read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
+ inlen - bytes_read);
- status = ice_check_sr_access_params(hw, offset, words);
+ last_cmd = !(bytes_read + read_size < inlen);
- /* values in "offset" and "words" parameters are sized as words
- * (16 bits) but ice_aq_read_nvm expects these values in bytes.
- * So do this conversion while calling ice_aq_read_nvm.
- */
- if (!status)
- status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data,
- last_command, NULL);
+ status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
+ offset, read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram, NULL);
+ if (status)
+ break;
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
return status;
}
@@ -110,75 +113,25 @@ ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
- * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method.
+ * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
static enum ice_status
ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
+ u32 bytes = sizeof(u16);
enum ice_status status;
+ __le16 data_local;
- status = ice_read_sr_aq(hw, offset, 1, data, true);
- if (!status)
- *data = le16_to_cpu(*(__force __le16 *)data);
-
- return status;
-}
-
-/**
- * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
- *
- * Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq
- * method. Ownership of the NVM is taken before reading the buffer and later
- * released.
- */
-static enum ice_status
-ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
-{
- enum ice_status status;
- bool last_cmd = false;
- u16 words_read = 0;
- u16 i = 0;
-
- do {
- u16 read_size, off_w;
-
- /* Calculate number of bytes we should read in this step.
- * It's not allowed to read more than one page at a time or
- * to cross page boundaries.
- */
- off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;
- read_size = off_w ?
- min_t(u16, *words,
- (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :
- min_t(u16, (*words - words_read),
- ICE_SR_SECTOR_SIZE_IN_WORDS);
-
- /* Check if this is last command, if so set proper flag */
- if ((words_read + read_size) >= *words)
- last_cmd = true;
-
- status = ice_read_sr_aq(hw, offset, read_size,
- data + words_read, last_cmd);
- if (status)
- goto read_nvm_buf_aq_exit;
-
- /* Increment counter for words already read and move offset to
- * new read location
- */
- words_read += read_size;
- offset += read_size;
- } while (words_read < *words);
-
- for (i = 0; i < *words; i++)
- data[i] = le16_to_cpu(((__force __le16 *)data)[i]);
+ /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
+ * Shadow RAM sector restrictions necessary when reading from the NVM.
+ */
+ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
-read_nvm_buf_aq_exit:
- *words = words_read;
- return status;
+ *data = le16_to_cpu(data_local);
+ return 0;
}
/**
@@ -188,7 +141,7 @@ read_nvm_buf_aq_exit:
*
* This function will request NVM ownership.
*/
-static enum ice_status
+enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
@@ -203,7 +156,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
*
* This function will release NVM ownership.
*/
-static void ice_release_nvm(struct ice_hw *hw)
+void ice_release_nvm(struct ice_hw *hw)
{
if (hw->nvm.blank_nvm_mode)
return;
@@ -233,6 +186,239 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
}
/**
+ * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ */
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type)
+{
+ enum ice_status status;
+ u16 pfa_len, pfa_ptr;
+ u16 next_tlv;
+
+ status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ while (next_tlv < pfa_ptr + pfa_len) {
+ u16 tlv_sub_module_type;
+ u16 tlv_len;
+
+ /* Read TLV type */
+ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
+ break;
+ }
+ /* Read TLV length */
+ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return ICE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_read_pba_string - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ */
+enum ice_status
+ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ enum ice_status status;
+ u16 pba_word, pba_size;
+ u16 i;
+
+ status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ ICE_SR_PBA_BLOCK_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
+ return ICE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
+ * ice_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ *
+ * Read the Combo Image version data from the Boot Configuration TLV and fill
+ * in the option ROM version data.
+ */
+static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
+{
+ u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
+ struct ice_orom_info *orom = &hw->nvm.orom;
+ enum ice_status status;
+ u32 combo_ver;
+
+ status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
+ ICE_SR_BOOT_CFG_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read Boot Configuration Block TLV.\n");
+ return status;
+ }
+
+ /* Boot Configuration Block must have length at least 2 words
+ * (Combo Image Version High and Combo Image Version Low)
+ */
+ if (boot_cfg_tlv_len < 2) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Invalid Boot Configuration Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
+ &combo_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
+ &combo_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
+ return status;
+ }
+
+ combo_ver = ((u32)combo_hi << 16) | combo_lo;
+
+ orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
+ ICE_OROM_VER_SHIFT);
+ orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
+ orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
+ ICE_OROM_VER_BUILD_SHIFT);
+
+ return 0;
+}
+
+/**
+ * ice_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ */
+static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+{
+ u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ice_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "%s: New upper bound of %u bytes\n",
+ __func__, offset);
+ status = 0;
+ max_size = offset;
+ } else if (!status) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "%s: New lower bound of %u bytes\n",
+ __func__, offset);
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ ice_debug(hw, ICE_DBG_NVM,
+ "Predicted flash size is %u bytes\n", max_size);
+
+ hw->nvm.flash_size = max_size;
+
+err_read_flat_nvm:
+ ice_release_nvm(hw);
+
+ return status;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -241,9 +427,8 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
- u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len;
struct ice_nvm_info *nvm = &hw->nvm;
- u16 eetrack_lo, eetrack_hi;
+ u16 eetrack_lo, eetrack_hi, ver;
enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -269,12 +454,14 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver);
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
return status;
}
+ nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
@@ -289,80 +476,49 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
- /* the following devices do not have boot_cfg_tlv yet */
- if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
- hw->device_id == ICE_DEV_ID_E822C_QSFP ||
- hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
- hw->device_id == ICE_DEV_ID_E822C_SGMII ||
- hw->device_id == ICE_DEV_ID_E822C_SFP ||
- hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
- hw->device_id == ICE_DEV_ID_E822L_SFP ||
- hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
- hw->device_id == ICE_DEV_ID_E822L_SGMII)
- return status;
-
- status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
- ICE_SR_BOOT_CFG_PTR);
+ status = ice_discover_flash_size(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read Boot Configuration Block TLV.\n");
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM init error: failed to discover flash size.\n");
return status;
}
- /* Boot Configuration Block must have length at least 2 words
- * (Combo Image Version High and Combo Image Version Low)
- */
- if (boot_cfg_tlv_len < 2) {
- ice_debug(hw, ICE_DBG_INIT,
- "Invalid Boot Configuration Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
- }
-
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF),
- &oem_hi);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n");
+ switch (hw->device_id) {
+ /* the following devices do not have boot_cfg_tlv yet */
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
return status;
+ default:
+ break;
}
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1),
- &oem_lo);
+ status = ice_get_orom_ver_info(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
return status;
}
- nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo;
-
return 0;
}
/**
- * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
- *
- * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
- * method. The buf read is preceded by the NVM ownership take
- * and followed by the release.
- */
-enum ice_status
-ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
-{
- enum ice_status status;
-
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (!status) {
- status = ice_read_sr_buf_aq(hw, offset, words, data);
- ice_release_nvm(hw);
- }
-
- return status;
-}
-
-/**
* ice_nvm_validate_checksum
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index a9fa011c22c6..999f273ba6ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -4,5 +4,17 @@
#ifndef _ICE_NVM_H_
#define _ICE_NVM_H_
+enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_nvm(struct ice_hw *hw);
+enum ice_status
+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ bool read_shadow_ram);
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
+enum ice_status
+ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
+enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index d2db0d04e117..554f567476f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -121,9 +121,7 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
- /* fall through */
case ICE_AQ_LINK_SPEED_50GB:
- /* fall through */
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 431266081a80..51825a203e35 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -578,7 +578,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
struct ice_aqc_get_sw_cfg_resp_elem *ele;
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
- u8 type;
+ u8 res_type;
ele = rbuf[i].elements;
vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
@@ -593,16 +593,16 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
- type = le16_to_cpu(ele->vsi_port_num) >>
+ res_type = le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
- if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
+ if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
continue;
}
ice_init_port_info(hw->port_info, vsi_port_num,
- type, swid, pf_vf_num, is_vf);
+ res_type, swid, pf_vf_num, is_vf);
}
} while (req_desc && !status);
@@ -760,7 +760,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_ETHERTYPE_MAC:
daddr = f_info->l_data.ethertype_mac.mac_addr;
- /* fall-through */
+ fallthrough;
case ICE_SW_LKUP_ETHERTYPE:
off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
@@ -771,7 +771,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_PROMISC_VLAN:
vlan_id = f_info->l_data.mac_vlan.vlan_id;
- /* fall-through */
+ fallthrough;
case ICE_SW_LKUP_PROMISC:
daddr = f_info->l_data.mac_vlan.mac_addr;
break;
@@ -958,7 +958,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status;
u16 s_rule_size;
- u16 type;
+ u16 rule_type;
int i;
if (!num_vsi)
@@ -970,11 +970,11 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
- type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
- ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
- type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
- ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
+ rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
return ICE_ERR_PARAM;
@@ -992,7 +992,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
- s_rule->type = cpu_to_le16(type);
+ s_rule->type = cpu_to_le16(rule_type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 4de61dbedd36..f67e8362958c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -453,10 +453,10 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough -- not supported action */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping frame */
+ fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -1188,7 +1188,6 @@ ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_10GB:
- /* fall through */
default:
itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
avg_pkt_size + 640);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index db0ef6ba907f..4ce5f92fca4a 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -239,12 +239,21 @@ struct ice_fc_info {
enum ice_fc_mode req_mode; /* FC mode requested by caller */
};
+/* Option ROM version information */
+struct ice_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+};
+
/* NVM Information */
struct ice_nvm_info {
- u32 eetrack; /* NVM data version */
- u32 oem_ver; /* OEM version info */
- u16 sr_words; /* Shadow RAM size in words */
- u16 ver; /* NVM package version */
+ struct ice_orom_info orom; /* Option ROM version info */
+ u32 eetrack; /* NVM data version */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 major_ver; /* major version of NVM package */
+ u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
@@ -626,7 +635,8 @@ struct ice_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define ICE_SR_BOOT_CFG_PTR 0x132
-#define ICE_NVM_OEM_VER_OFF 0x02
+#define ICE_NVM_OROM_VER_OFF 0x02
+#define ICE_SR_PBA_BLOCK_PTR 0x16
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
@@ -634,12 +644,12 @@ struct ice_hw_port_stats {
#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
#define ICE_NVM_VER_HI_SHIFT 12
#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
-#define ICE_OEM_VER_PATCH_SHIFT 0
-#define ICE_OEM_VER_PATCH_MASK (0xff << ICE_OEM_VER_PATCH_SHIFT)
-#define ICE_OEM_VER_BUILD_SHIFT 8
-#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
-#define ICE_OEM_VER_SHIFT 24
-#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
+#define ICE_OROM_VER_PATCH_SHIFT 0
+#define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT)
+#define ICE_OROM_VER_BUILD_SHIFT 8
+#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
+#define ICE_OROM_VER_SHIFT 24
+#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 75c70d432c72..15191a325918 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -91,6 +91,39 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+static bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (ice_check_vf_init(pf, vf))
+ return false;
+
+ if (ice_vf_has_no_qs_ena(vf))
+ return false;
+ else if (vf->link_forced)
+ return vf->link_up;
+ else
+ return pf->hw.port_info->phy.link_info.link_info &
+ ICE_AQ_LINK_UP;
+}
+
+/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -99,28 +132,16 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe = { 0 };
- struct ice_link_status *ls;
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- ls = &hw->port_info->phy.link_info;
+ struct ice_hw *hw = &vf->pf->hw;
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->num_qs_ena) {
+ if (ice_is_vf_link_up(vf))
+ ice_set_pfe_link(vf, &pfe,
+ hw->port_info->phy.link_info.link_speed, true);
+ else
ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
- } else if (vf->link_forced) {
- u16 link_speed = vf->link_up ?
- ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
-
- ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
- } else {
- ice_set_pfe_link(vf, &pfe, ls->link_speed,
- ls->link_info & ICE_AQ_LINK_UP);
- }
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
@@ -149,7 +170,12 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0;
}
- last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
+ last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
+
+ /* clear VF MDD event information */
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
/* Disable interrupts so that VF starts in a known state */
for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
@@ -180,7 +206,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx;
- last = first + pf->num_vf_msix - 1;
+ last = first + pf->num_msix_per_vf - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -206,11 +232,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
* ice_sriov_free_msix_res - Reset/free any used MSIX resources
* @pf: pointer to the PF structure
*
- * If MSIX entries from the pf->irq_tracker were needed then we need to
- * reset the irq_tracker->end and give back the entries we needed to
- * num_avail_sw_msix.
- *
- * If no MSIX entries were taken from the pf->irq_tracker then just clear
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
* the pf->sriov_base_vector.
*
* Returns 0 on success, and -EINVAL on error.
@@ -227,11 +249,7 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
return -EINVAL;
/* give back irq_tracker resources used */
- if (pf->sriov_base_vector < res->num_entries) {
- res->end = res->num_entries;
- pf->num_avail_sw_msix +=
- res->num_entries - pf->sriov_base_vector;
- }
+ WARN_ON(pf->sriov_base_vector < res->num_entries);
pf->sriov_base_vector = 0;
@@ -245,9 +263,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
- vf->num_qs_ena = 0;
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
@@ -263,7 +280,7 @@ static void ice_dis_vf_qs(struct ice_vf *vf)
vsi = pf->vsi[vf->lan_vsi_idx];
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_rx_rings(vsi);
+ ice_vsi_stop_all_rx_rings(vsi);
ice_set_vf_state_qs_dis(vf);
}
@@ -283,11 +300,6 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
- ice_dis_vf_qs(&pf->vf[i]);
-
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
@@ -297,8 +309,13 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+ /* Avoid wait time by stopping all VFs at the same time */
+ ice_for_each_vf(pf, i)
+ if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
+ ice_dis_vf_qs(&pf->vf[i]);
+
tmp = pf->num_alloc_vfs;
- pf->num_vf_qps = 0;
+ pf->num_qps_per_vf = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
@@ -407,43 +424,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
}
/**
- * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
- * @ctxt: the VSI ctxt to fill
- * @vid: the VLAN ID to set as a PVID
- */
-static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
-{
- ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR);
- ctxt->info.pvid = cpu_to_le16(vid);
- ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-}
-
-/**
- * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
- * @ctxt: the VSI ctxt to fill
- */
-static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
-{
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
- ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
- ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-}
-
-/**
* ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
* @vsi: the VSI to update
- * @vid: the VLAN ID to set as a PVID
+ * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
* @enable: true for enable PVID false for disable
*/
-static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
+static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
{
struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
@@ -453,20 +442,33 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
return -ENOMEM;
ctxt->info = vsi->info;
- if (enable)
- ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
- else
- ice_vsi_kill_pvid_fill_ctxt(ctxt);
+ info = &ctxt->info;
+ if (enable) {
+ info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR;
+ info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ } else {
+ info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
+ ICE_AQ_VSI_VLAN_MODE_ALL;
+ info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
+ info->pvid = cpu_to_le16(pvid_info);
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
- vsi->info = ctxt->info;
+ vsi->info.vlan_flags = info->vlan_flags;
+ vsi->info.sw_flags2 = info->sw_flags2;
+ vsi->info.pvid = info->pvid;
out:
kfree(ctxt);
return ret;
@@ -501,7 +503,7 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
*/
static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
{
- return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+ return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
}
/**
@@ -533,9 +535,20 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vf->lan_vsi_num = vsi->vsi_num;
/* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_id) {
- ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
- ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
+ if (vf->port_vlan_info) {
+ ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
+ if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
+ dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
+ vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
+ } else {
+ /* set VLAN 0 filter by default when no port VLAN is
+ * enabled. If a port VLAN is enabled we don't want
+ * untagged broadcast/multicast traffic seen on the VF
+ * interface.
+ */
+ if (ice_vsi_add_vlan(vsi, 0))
+ dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
+ vf->vf_id);
}
eth_broadcast_addr(broadcast);
@@ -583,7 +596,7 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
*/
tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
ice_get_avail_rxq_count(pf));
- tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
+ tx_rx_queue_left += pf->num_qps_per_vf;
if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -629,9 +642,9 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
- last = (first + pf->num_vf_msix) - 1;
+ last = (first + pf->num_msix_per_vf) - 1;
abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
- abs_last = (abs_first + pf->num_vf_msix) - 1;
+ abs_last = (abs_first + pf->num_msix_per_vf) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* VF Vector allocation */
@@ -749,7 +762,7 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
pf = vf->pf;
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
+ return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
q_vector->v_idx + 1;
}
@@ -782,127 +795,112 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
* @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
*
* This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so in many cases the irq_tracker will not
- * be needed. In these cases we just set the pf->sriov_base_vector and return
- * success.
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
*
- * If SR-IOV needs to use any pf->irq_tracker entries it updates the
- * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
- * so any calls to ice_get_res() using the irq_tracker will not try to use
- * resources at or beyond the newly set value.
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
*
* Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
* in the PF's space available for SR-IOV.
*/
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- u16 pf_total_msix_vectors =
- pf->hw.func_caps.common_cap.num_msix_vectors;
- struct ice_res_tracker *res = pf->irq_tracker;
+ u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors_used = pf->irq_tracker->num_entries;
int sriov_base_vector;
- if (max_valid_res_idx < 0)
- return max_valid_res_idx;
-
- sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
+ sriov_base_vector = total_vectors - num_msix_needed;
/* make sure we only grab irq_tracker entries from the list end and
* that we have enough available MSIX vectors
*/
- if (sriov_base_vector <= max_valid_res_idx)
+ if (sriov_base_vector < vectors_used)
return -EINVAL;
pf->sriov_base_vector = sriov_base_vector;
- /* dip into irq_tracker entries and update used resources */
- if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
- pf->num_avail_sw_msix -=
- res->num_entries - pf->sriov_base_vector;
- res->end = pf->sriov_base_vector;
- }
-
return 0;
}
/**
- * ice_check_avail_res - check if vectors and queues are available
+ * ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
*
- * This function is where we calculate actual number of resources for VF VSIs,
- * we don't reserve ahead of time during probe. Returns success if vectors and
- * queues resources are available, otherwise returns error code
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
+ *
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
+ *
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
+ *
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
*/
-static int ice_check_avail_res(struct ice_pf *pf)
+static int ice_set_per_vf_res(struct ice_pf *pf)
{
int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- u16 num_msix, num_txq, num_rxq, num_avail_msix;
+ int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
+ u16 num_msix_per_vf, num_txq, num_rxq;
if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
return -EINVAL;
- /* add 1 to max_valid_res_idx to account for it being 0-based */
- num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
- (max_valid_res_idx + 1);
-
- /* Grab from HW interrupts common pool
- * Note: By the time the user decides it needs more vectors in a VF
- * its already too late since one must decide this prior to creating the
- * VF interface. So the best we can do is take a guess as to what the
- * user might want.
- *
- * We have two policies for vector allocation:
- * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
- * number of NFV VFs used for NFV appliances, since this is a special
- * case, we try to assign maximum vectors per VF (65) as much as
- * possible, based on determine_resources algorithm.
- * 2. if num_alloc_vfs is from 17 to 256, then its large number of
- * regular VFs which are not used for any special purpose. Hence try to
- * grab default interrupt vectors (5 as supported by AVF driver).
- */
- if (pf->num_alloc_vfs <= 16) {
- num_msix = ice_determine_res(pf, num_avail_msix,
- ICE_MAX_INTR_PER_VF,
- ICE_MIN_INTR_PER_VF);
- } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
- num_msix = ice_determine_res(pf, num_avail_msix,
- ICE_DFLT_INTR_PER_VF,
- ICE_MIN_INTR_PER_VF);
+ /* determine MSI-X resources per VF */
+ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+ pf->irq_tracker->num_entries;
+ msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
+ if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+ num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else {
- dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
- pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+ dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+ msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+ pf->num_alloc_vfs);
return -EIO;
}
- if (!num_msix)
- return -EIO;
-
- /* Grab from the common pool
- * start by requesting Default queues (4 as supported by AVF driver),
- * Note that, the main difference between queues and vectors is, latter
- * can only be reserved at init time but queues can be requested by VF
- * at runtime through Virtchnl, that is the reason we start by reserving
- * few queues.
- */
+ /* determine queue resources per VF */
num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
- ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
+ min_t(u16,
+ num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF),
+ ICE_MIN_QS_PER_VF);
num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
- ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
-
- if (!num_txq || !num_rxq)
+ min_t(u16,
+ num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF),
+ ICE_MIN_QS_PER_VF);
+
+ if (!num_txq || !num_rxq) {
+ dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+ ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
return -EIO;
+ }
- if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
+ if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
+ dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
+ pf->num_alloc_vfs);
return -EINVAL;
+ }
- /* since AVF driver works with only queue pairs which means, it expects
- * to have equal number of Rx and Tx queues, so take the minimum of
- * available Tx or Rx queues
- */
- pf->num_vf_qps = min_t(int, num_txq, num_rxq);
- pf->num_vf_msix = num_msix;
+ /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+ pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
+ pf->num_msix_per_vf = num_msix_per_vf;
+ dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+ pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
return 0;
}
@@ -943,17 +941,9 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
/* reallocate VF resources to finish resetting the VSI state */
if (!ice_alloc_vf_res(vf)) {
- struct ice_vsi *vsi;
-
ice_ena_vf_mappings(vf);
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
-
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (ice_vsi_add_vlan(vsi, 0))
- dev_warn(ice_pf_to_dev(pf),
- "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
- vf->vf_id);
}
/* Tell the VF driver the reset is done. This needs to be done only
@@ -985,13 +975,13 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
if (vsi->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
- } else if (vf->port_vlan_id) {
+ } else if (vf->port_vlan_info) {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_id);
+ vf->port_vlan_info);
else
status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_id);
+ vf->port_vlan_info);
} else {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
@@ -1019,7 +1009,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int v;
- if (ice_check_avail_res(pf)) {
+ if (ice_set_per_vf_res(pf)) {
dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
return false;
}
@@ -1032,7 +1022,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v];
- vf->num_vf_qs = pf->num_vf_qps;
+ vf->num_vf_qs = pf->num_qps_per_vf;
dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
@@ -1165,9 +1155,10 @@ static bool ice_is_vf_disabled(struct ice_vf *vf)
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
*
- * Returns true if the VF is reset, false otherwise.
+ * Returns true if the VF is currently in reset, resets successfully, or resets
+ * are disabled and false otherwise.
*/
-static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -1180,6 +1171,12 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev = ice_pf_to_dev(pf);
+ if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+ vf->vf_id);
+ return true;
+ }
+
if (ice_is_vf_disabled(vf)) {
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
vf->vf_id);
@@ -1231,7 +1228,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_id || vsi->num_vlan)
+ if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
@@ -1432,7 +1429,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
if (num_vfs > pf->num_vfs_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
num_vfs, pf->num_vfs_supported);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
dev_info(dev, "Allocating %d VFs\n", num_vfs);
@@ -1518,6 +1515,72 @@ static void ice_vc_reset_vf(struct ice_vf *vf)
}
/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
+ *
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
+ */
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
+{
+ int vf_id;
+
+ ice_for_each_vf(pf, vf_id) {
+ struct ice_vf *vf = &pf->vf[vf_id];
+ struct ice_vsi *vsi;
+ u16 rxq_idx;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ ice_for_each_rxq(vsi, rxq_idx)
+ if (vsi->rxq_map[rxq_idx] == pfq)
+ return vf;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
+ */
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+ return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
+ */
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 gldcb_rtctq, queue;
+ struct ice_vf *vf;
+
+ gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
+
+ /* event returns device global Rx queue number */
+ queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+ GLDCB_RTCTQ_RXQNUM_S;
+
+ vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+ if (!vf)
+ return;
+
+ ice_vc_reset_vf(vf);
+}
+
+/**
* ice_vc_send_msg_to_vf - Send message to VF
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
@@ -1675,7 +1738,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = pf->num_vf_msix;
+ vfres->max_vectors = pf->num_msix_per_vf;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
@@ -1981,7 +2044,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO;
goto out;
@@ -2039,6 +2102,22 @@ error_param:
}
/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
* ice_vc_ena_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2065,13 +2144,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (!vqs->rx_queues && !vqs->tx_queues) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
- vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2087,7 +2160,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* programmed using ice_vsi_cfg_txqs
*/
q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2097,7 +2170,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
if (test_bit(vf_q_id, vf->rxq_ena))
continue;
- if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2105,12 +2178,11 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
}
set_bit(vf_q_id, vf->rxq_ena);
- vf->num_qs_ena++;
}
vsi = pf->vsi[vf->lan_vsi_idx];
q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2121,7 +2193,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
set_bit(vf_q_id, vf->txq_ena);
- vf->num_qs_ena++;
}
/* Set flag to indicate that queues are enabled */
@@ -2163,13 +2234,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (!vqs->rx_queues && !vqs->tx_queues) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
- vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2183,7 +2248,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (vqs->tx_queues) {
q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
struct ice_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
@@ -2208,14 +2273,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->txq_ena);
- vf->num_qs_ena--;
}
}
- if (vqs->rx_queues) {
- q_map = vqs->rx_queues;
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2225,7 +2299,8 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (!test_bit(vf_q_id, vf->rxq_ena))
continue;
- if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2234,12 +2309,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->rxq_ena);
- vf->num_qs_ena--;
}
}
/* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param:
@@ -2249,6 +2323,57 @@ error_param:
}
/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @vector_id: vector ID
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static int
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
* ice_vc_cfg_irq_map_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2258,13 +2383,11 @@ error_param:
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
- u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
struct ice_pf *pf = vf->pf;
- u16 num_q_vectors_mapped;
struct ice_vsi *vsi;
- unsigned long qmap;
int i;
irqmap_info = (struct virtchnl_irq_map_info *)msg;
@@ -2275,8 +2398,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->num_vf_msix < num_q_vectors_mapped ||
- !irqmap_info->num_vectors) {
+ pf->num_msix_per_vf < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2297,7 +2420,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
+ if (!(vector_id < pf->num_msix_per_vf) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2318,33 +2441,10 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
}
/* lookout for the invalid queue index */
- qmap = map->rxq_map;
- q_vector->num_ring_rx = 0;
- for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- q_vector->num_ring_tx = 0;
- for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->tx.itr_idx);
- }
+ v_ret = (enum virtchnl_status_code)
+ ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ if (v_ret)
+ goto error_param;
}
error_param:
@@ -2387,7 +2487,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
@@ -2694,16 +2794,16 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
if (!req_queues) {
dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
vf->vf_id);
- } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_BASE_QS_PER_VF);
+ ICE_MAX_RSS_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
@@ -2733,19 +2833,20 @@ int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
- u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vsi;
struct device *dev;
struct ice_vf *vf;
+ u16 vlanprio;
int ret;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- if (vlan_id > ICE_MAX_VLANID || qos > 7) {
- dev_err(dev, "Invalid VF Parameters\n");
+ if (vlan_id >= VLAN_N_VID || qos > 7) {
+ dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+ vf_id, vlan_id, qos);
return -EINVAL;
}
@@ -2761,40 +2862,52 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (ret)
return ret;
- if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+ vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
+
+ if (vf->port_vlan_info == vlanprio) {
/* duplicate request, so just return success */
dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
return 0;
}
- /* If PVID, then remove all filters on the old VLAN */
- if (vsi->info.pvid)
- ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
- VLAN_VID_MASK));
-
if (vlan_id || qos) {
+ /* remove VLAN 0 filter set by default when transitioning from
+ * no port VLAN to a port VLAN. No change to old port VLAN on
+ * failure.
+ */
+ ret = ice_vsi_kill_vlan(vsi, 0);
+ if (ret)
+ return ret;
ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
return ret;
} else {
- ice_vsi_manage_pvid(vsi, 0, false);
- vsi->info.pvid = 0;
+ /* add VLAN 0 filter back when transitioning from port VLAN to
+ * no port VLAN. No change to old port VLAN on failure.
+ */
+ ret = ice_vsi_add_vlan(vsi, 0);
+ if (ret)
+ return ret;
+ ret = ice_vsi_manage_pvid(vsi, 0, false);
+ if (ret)
+ return ret;
}
if (vlan_id) {
dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
- /* add new VLAN filter for each MAC */
+ /* add VLAN filter for the port VLAN */
ret = ice_vsi_add_vlan(vsi, vlan_id);
if (ret)
return ret;
}
+ /* remove old port VLAN filter with valid VLAN ID or QoS fields */
+ if (vf->port_vlan_info)
+ ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
- /* The Port VLAN needs to be saved across resets the same as the
- * default LAN MAC address.
- */
- vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ /* keep port VLAN information persistent on resets */
+ vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
return 0;
}
@@ -2849,7 +2962,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
for (i = 0; i < vfl->num_elements; i++) {
- if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+ if (vfl->vlan_id[i] >= VLAN_N_VID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "invalid VF VLAN id %d\n",
vfl->vlan_id[i]);
@@ -2911,9 +3024,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- vsi->num_vlan++;
- /* Enable VLAN pruning when VLAN is added */
- if (!vlan_promisc) {
+ /* Enable VLAN pruning when non-zero VLAN is added */
+ if (!vlan_promisc && vid &&
+ !ice_vsi_is_vlan_pruning_ena(vsi)) {
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2921,7 +3034,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
vid, status);
goto error_param;
}
- } else {
+ } else if (vlan_promisc) {
/* Enable Ucast/Mcast VLAN promiscuous mode */
promisc_m = ICE_PROMISC_VLAN_TX |
ICE_PROMISC_VLAN_RX;
@@ -2965,9 +3078,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- vsi->num_vlan--;
- /* Disable VLAN pruning when the last VLAN is removed */
- if (!vsi->num_vlan)
+ /* Disable VLAN pruning when only VLAN 0 is left */
+ if (vsi->num_vlan == 1 &&
+ ice_vsi_is_vlan_pruning_ena(vsi))
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
@@ -3246,14 +3359,12 @@ int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
struct ice_vf *vf;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_check_vf_init(pf, vf))
return -EBUSY;
@@ -3262,9 +3373,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
/* VF configuration for VLAN and applicable QoS */
- ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
- ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
- ICE_VLAN_PRIORITY_S;
+ ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
+ ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
ivi->trusted = vf->trusted;
ivi->spoofchk = vf->spoofchk;
@@ -3442,3 +3552,52 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
return 0;
}
+
+/**
+ * ice_print_vfs_mdd_event - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ /* VF MDD event logs are rate limited to one second intervals */
+ if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
+ return;
+
+ pf->last_printed_mdd_jiffies = jiffies;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed =
+ vf->mdd_rx_events.count;
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, hw->pf_id, i,
+ vf->dflt_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed =
+ vf->mdd_tx_events.count;
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+ vf->mdd_tx_events.count, hw->pf_id, i,
+ vf->dflt_lan_addr.addr);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index ac67982751df..3f9464269bd2 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -5,11 +5,6 @@
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
-#define ICE_MAX_VLANID 4095
-#define ICE_VLAN_PRIORITY_S 12
-#define ICE_VLAN_M 0xFFF
-#define ICE_PRIORITY_M 0x7000
-
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
#define ICE_MAX_MACADDR_PER_VF 12
@@ -26,18 +21,15 @@
#define ICE_PCI_CIAD_WAIT_COUNT 100
#define ICE_PCI_CIAD_WAIT_DELAY_US 1
-/* VF resources default values and limitation */
+/* VF resource constraints */
#define ICE_MAX_VF_COUNT 256
-#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
-#define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_BASE_QS_PER_VF 16
-#define ICE_MAX_INTR_PER_VF 65
-#define ICE_MAX_POLICY_INTR_PER_VF 33
+#define ICE_MAX_RSS_QS_PER_VF 16
+#define ICE_NUM_VF_MSIX_MED 17
+#define ICE_NUM_VF_MSIX_SMALL 5
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
@@ -61,6 +53,13 @@ enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
};
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+ u16 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u16 last_printed;
+};
+
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -73,9 +72,9 @@ struct ice_vf {
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
- DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
- DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
- u16 port_vlan_id;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ u16 port_vlan_info; /* Port VLAN ID and QoS */
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
u8 spoofchk:1;
@@ -89,14 +88,14 @@ struct ice_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
- u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */
- u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
+ struct ice_mdd_vf_events mdd_rx_events;
+ struct ice_mdd_vf_events mdd_tx_events;
};
#ifdef CONFIG_PCI_IOV
@@ -111,6 +110,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -128,6 +128,9 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -135,6 +138,8 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id,
#define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0)
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
+#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
+#define ice_print_vfs_mdd_events(pf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -143,6 +148,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf,
return true;
}
+static inline bool
+ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
+{
+ return true;
+}
+
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
int __always_unused num_vfs)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 4d3407bbd4c4..8279db15e870 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -183,7 +183,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
+ err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
ice_qvec_cfg_msix(vsi, q_vector);
- err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
goto free_buf;
@@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (if_running) {
ret = ice_qp_dis(vsi, qid);
if (ret) {
- netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+ netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_umem_if_up;
}
}
@@ -471,11 +471,11 @@ xsk_umem_if_up:
if (!ret && umem_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
- netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+ netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
umem_present ? "en" : "dis", umem_failure);
return umem_failure;
}
@@ -609,7 +609,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
*/
static bool
ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
- bool alloc(struct ice_ring *, struct ice_rx_buf *))
+ bool (*alloc)(struct ice_ring *, struct ice_rx_buf *))
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -816,10 +816,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough -- not supported action */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping frame */
+ fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -841,8 +841,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
+ bool failure = false;
struct xdp_buff xdp;
- bool failure = 0;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+ if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ else
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+
+ return (int)total_rx_packets;
+ }
+
return failure ? budget : (int)total_rx_packets;
}
@@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
+ if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ else
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
+ }
+
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 3479e1de98fe..8a4ba7c6d549 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -24,7 +24,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
struct xdp_umem __always_unused *umem,
u16 __always_unused qid)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void
@@ -63,7 +63,7 @@ static inline int
ice_xsk_wakeup(struct net_device __always_unused *netdev,
u32 __always_unused queue_id, u32 __always_unused flags)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 49b5fa9d4783..0c9282e2aaec 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -306,7 +306,7 @@ struct igb_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct igb_ring ring[] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f96ffa83efbe..39d3b76a6f5d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2183,27 +2183,6 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -ENOTSUPP;
-
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -3477,6 +3456,7 @@ static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops igb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 3b83747b2700..21a29a0ca7f4 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -198,11 +198,11 @@ int igb_sysfs_init(struct igb_adapter *adapter)
}
/* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
+ client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (IS_ERR(client)) {
dev_info(&adapter->pdev->dev,
"Failed to create new i2c device.\n");
- rc = -ENODEV;
+ rc = PTR_ERR(client);
goto exit;
}
adapter->i2c_client = client;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 3ae358b35227..9217d150e286 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -424,6 +424,7 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops igbvf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 49fb1e1965cd..e3c164c12e10 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o igc_ptp.o
+igc_ethtool.o igc_ptp.o igc_dump.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 52066bdbbad0..a1f845a2aa80 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -42,6 +42,10 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
+/* igc_dump declarations */
+void igc_rings_dump(struct igc_adapter *adapter);
+void igc_regs_dump(struct igc_adapter *adapter);
+
extern char igc_driver_name[];
extern char igc_driver_version[];
@@ -53,10 +57,13 @@ extern char igc_driver_version[];
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
+
+/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_PTP BIT(8)
+#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
@@ -108,7 +115,7 @@ extern char igc_driver_version[];
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
-/* FIXME: These values were estimated using the ones that i210 has as
+/* FIXME: These values were estimated using the ones that i225 has as
* basis, they seem to provide good numbers with ptp4l/phc2sys, but we
* need to confirm them.
*/
@@ -319,7 +326,7 @@ struct igc_q_vector {
struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
- struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
};
#define MAX_ETYPE_FILTER (4 - 1)
@@ -552,6 +559,7 @@ int igc_erase_filter(struct igc_adapter *adapter,
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
+void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 58efa7a02c68..4ddccccf42cc 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -16,7 +16,10 @@
/* Wake Up Filter Control */
#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
@@ -259,6 +262,9 @@
#define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000
+/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */
+
/* Transmit Descriptor bit definitions */
#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
new file mode 100644
index 000000000000..657ab50ae296
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include "igc.h"
+
+struct igc_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igc_reg_info igc_reg_info_tbl[] = {
+ /* General Registers */
+ {IGC_CTRL, "CTRL"},
+ {IGC_STATUS, "STATUS"},
+ {IGC_CTRL_EXT, "CTRL_EXT"},
+ {IGC_MDIC, "MDIC"},
+
+ /* Interrupt Registers */
+ {IGC_ICR, "ICR"},
+
+ /* RX Registers */
+ {IGC_RCTL, "RCTL"},
+ {IGC_RDLEN(0), "RDLEN"},
+ {IGC_RDH(0), "RDH"},
+ {IGC_RDT(0), "RDT"},
+ {IGC_RXDCTL(0), "RXDCTL"},
+ {IGC_RDBAL(0), "RDBAL"},
+ {IGC_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IGC_TCTL, "TCTL"},
+ {IGC_TDBAL(0), "TDBAL"},
+ {IGC_TDBAH(0), "TDBAH"},
+ {IGC_TDLEN(0), "TDLEN"},
+ {IGC_TDH(0), "TDH"},
+ {IGC_TDT(0), "TDT"},
+ {IGC_TXDCTL(0), "TXDCTL"},
+ {IGC_TDFH, "TDFH"},
+ {IGC_TDFT, "TDFT"},
+ {IGC_TDFHS, "TDFHS"},
+ {IGC_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/* igc_regdump - register printout routine */
+static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case IGC_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDLEN(n));
+ break;
+ case IGC_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDH(n));
+ break;
+ case IGC_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDT(n));
+ break;
+ case IGC_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RXDCTL(n));
+ break;
+ case IGC_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAL(n));
+ break;
+ case IGC_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAH(n));
+ break;
+ case IGC_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAL(n));
+ break;
+ case IGC_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDBAH(n));
+ break;
+ case IGC_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDLEN(n));
+ break;
+ case IGC_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDH(n));
+ break;
+ case IGC_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDT(n));
+ break;
+ case IGC_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TXDCTL(n));
+ break;
+ default:
+ pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
+}
+
+/* igc_rings_dump - Tx-rings and Rx-rings */
+void igc_rings_dump(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ union igc_adv_tx_desc *tx_desc;
+ union igc_adv_rx_desc *rx_desc;
+ struct igc_ring *tx_ring;
+ struct igc_ring *rx_ring;
+ u32 staterr;
+ u16 i, n;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ struct igc_tx_buffer *buffer_info;
+
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
+ struct igc_tx_buffer *buffer_info;
+
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+ buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
+
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, buffer_info->skb->data,
+ dma_unmap_len(buffer_info, len),
+ true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info(" %5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+ struct igc_rx_buffer *buffer_info;
+
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IGC_RX_DESC(rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ if (staterr & IGC_RXD_STAT_DD) {
+ /* Descriptor Done */
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ next_desc);
+ } else {
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ next_desc);
+
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->dma && buffer_info->page) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ page_address
+ (buffer_info->page) +
+ buffer_info->page_offset,
+ igc_rx_bufsz(rx_ring),
+ true);
+ }
+ }
+ }
+ }
+
+exit:
+ return;
+}
+
+/* igc_regs_dump - registers dump */
+void igc_regs_dump(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ struct igc_reg_info *reginfo;
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ pr_info(" Register Name Value\n");
+ for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igc_regdump(hw, reginfo);
+ }
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ee07011e13e9..f530fc29b074 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -308,6 +308,65 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
}
+static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ wol->wolopts = 0;
+
+ if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
+ return;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+
+ /* apply any specific unsupported masks here */
+ switch (adapter->hw.device_id) {
+ default:
+ break;
+ }
+
+ if (adapter->wol & IGC_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & IGC_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & IGC_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & IGC_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & IGC_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
+ return -EOPNOTSUPP;
+
+ if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IGC_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IGC_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IGC_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= IGC_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= IGC_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
static u32 igc_get_msglevel(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -802,27 +861,6 @@ static int igc_set_coalesce(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -ENOTSUPP;
-
if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS ||
(ec->rx_coalesce_usecs > 3 &&
ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
@@ -1856,9 +1894,12 @@ static int igc_set_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops igc_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igc_get_drvinfo,
.get_regs_len = igc_get_regs_len,
.get_regs = igc_get_regs,
+ .get_wol = igc_get_wol,
+ .set_wol = igc_set_wol,
.get_msglevel = igc_get_msglevel,
.set_msglevel = igc_set_msglevel,
.nway_reset = igc_nway_reset,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index d9d5425fe8d9..69fa1ce1f927 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3546,6 +3546,8 @@ static void igc_reset_task(struct work_struct *work)
adapter = container_of(work, struct igc_adapter, reset_task);
+ igc_rings_dump(adapter);
+ igc_regs_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igc_reinit_locked(adapter);
}
@@ -4029,6 +4031,9 @@ static void igc_watchdog_task(struct work_struct *work)
}
}
if (link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
@@ -4114,6 +4119,8 @@ no_wait:
return;
}
}
+ pm_schedule_suspend(netdev->dev.parent,
+ MSEC_PER_SEC * 5);
/* also check for alternate media here */
} else if (!netif_carrier_ok(netdev) &&
@@ -4337,6 +4344,7 @@ request_done:
static int __igc_open(struct net_device *netdev, bool resuming)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
int err = 0;
int i = 0;
@@ -4348,6 +4356,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return -EBUSY;
}
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -4386,6 +4397,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
rd32(IGC_ICR);
igc_irq_enable(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
+
netif_tx_start_all_queues(netdev);
/* start the watchdog. */
@@ -4404,6 +4418,8 @@ err_setup_rx:
igc_free_all_tx_resources(adapter);
err_setup_tx:
igc_reset(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
return err;
}
@@ -4428,9 +4444,13 @@ static int igc_open(struct net_device *netdev)
static int __igc_close(struct net_device *netdev, bool suspending)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
+
igc_down(adapter);
igc_release_hw_control(adapter);
@@ -4440,6 +4460,9 @@ static int __igc_close(struct net_device *netdev, bool suspending)
igc_free_all_tx_resources(adapter);
igc_free_all_rx_resources(adapter);
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -4766,6 +4789,16 @@ static int igc_probe(struct pci_dev *pdev,
hw->fc.requested_mode = igc_fc_default;
hw->fc.current_mode = igc_fc_default;
+ /* By default, support wake on port A */
+ adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
+
+ /* initialize the wol settings based on the eeprom settings */
+ if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
+ adapter->wol |= IGC_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev,
+ adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -4792,6 +4825,10 @@ static int igc_probe(struct pci_dev *pdev,
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
err_register:
@@ -4826,6 +4863,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_get_noresume(&pdev->dev);
+
igc_ptp_stop(adapter);
set_bit(__IGC_DOWN, &adapter->state);
@@ -4870,6 +4909,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igc_close(netdev, true);
+ igc_ptp_suspend(adapter);
+
igc_clear_interrupt_scheme(adapter);
rtnl_unlock();
@@ -5045,6 +5086,108 @@ static void igc_shutdown(struct pci_dev *pdev)
}
}
+/**
+ * igc_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current PCI connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ **/
+static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ igc_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igc_io_slot_reset - called after the PCI bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igc_resume routine.
+ **/
+static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ pci_ers_result_t result;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Could not re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ /* In case of PCI error, adapter loses its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
+ igc_reset(adapter);
+ wr32(IGC_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ return result;
+}
+
+/**
+ * igc_io_resume - called when traffic can start to flow again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igc_resume routine.
+ */
+static void igc_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ if (igc_open(netdev)) {
+ dev_err(&pdev->dev, "igc_open failed after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers igc_err_handler = {
+ .error_detected = igc_io_error_detected,
+ .slot_reset = igc_io_slot_reset,
+ .resume = igc_io_resume,
+};
+
#ifdef CONFIG_PM
static const struct dev_pm_ops igc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
@@ -5062,6 +5205,7 @@ static struct pci_driver igc_driver = {
.driver.pm = &igc_pm_ops,
#endif
.shutdown = igc_shutdown,
+ .err_handler = &igc_err_handler,
};
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 693506587198..f99c514ad0f4 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -509,7 +509,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
*/
-void igc_ptp_tx_work(struct work_struct *work)
+static void igc_ptp_tx_work(struct work_struct *work)
{
struct igc_adapter *adapter = container_of(work, struct igc_adapter,
ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index c9029b549b90..d4af53a80f11 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -17,6 +17,11 @@
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 39e73ad60352..2833e4f041ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -462,7 +462,7 @@ struct ixgbe_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
};
#ifdef CONFIG_IXGBE_HWMON
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 7c52ae8ac005..c6bf0a50ee63 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3444,6 +3444,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ccd852ad62a4..ec7a11d13fdc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -968,8 +968,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int i, pos;
- u8 buf[8];
+ u64 dsn;
if (!info)
return -EINVAL;
@@ -985,17 +984,11 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/* Serial Number */
/* Get the PCI-e Device Serial Number Capability */
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- pos += 4;
- for (i = 0; i < 8; i++)
- pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
-
+ dsn = pci_get_dsn(adapter->pdev);
+ if (dsn)
snprintf(info->serial_number, sizeof(info->serial_number),
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- buf[7], buf[6], buf[5], buf[4],
- buf[3], buf[2], buf[1], buf[0]);
- } else
+ "%016llX", dsn);
+ else
snprintf(info->serial_number, sizeof(info->serial_number),
"Unknown");
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index f7f309c96fa8..988fa49fa99a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -968,6 +968,7 @@ static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 2e4975572e9f..c97c74164c73 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2077,12 +2077,7 @@ jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
IPPROTO_TCP,
0);
} else {
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
- &ip6h->daddr, 0,
- IPPROTO_TCP,
- 0);
+ tcp_v6_gso_csum_prep(skb);
}
return 0;
@@ -2844,6 +2839,9 @@ jme_set_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops jme_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = jme_get_drvinfo,
.get_regs_len = jme_get_regs_len,
.get_regs = jme_get_regs,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 3c8125cbc84d..81d24481b22c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1737,6 +1737,7 @@ static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
}
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = mv643xx_eth_get_drvinfo,
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 11babc79dc6c..51889770958d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -341,6 +341,13 @@ enum {
ETHTOOL_STAT_EEE_WAKEUP,
ETHTOOL_STAT_SKB_ALLOC_ERR,
ETHTOOL_STAT_REFILL_ERR,
+ ETHTOOL_XDP_REDIRECT,
+ ETHTOOL_XDP_PASS,
+ ETHTOOL_XDP_DROP,
+ ETHTOOL_XDP_TX,
+ ETHTOOL_XDP_TX_ERR,
+ ETHTOOL_XDP_XMIT,
+ ETHTOOL_XDP_XMIT_ERR,
ETHTOOL_MAX_STATS,
};
@@ -354,10 +361,10 @@ struct mvneta_statistic {
#define T_REG_64 64
#define T_SW 1
-#define MVNETA_XDP_PASS BIT(0)
-#define MVNETA_XDP_DROPPED BIT(1)
-#define MVNETA_XDP_TX BIT(2)
-#define MVNETA_XDP_REDIR BIT(3)
+#define MVNETA_XDP_PASS 0
+#define MVNETA_XDP_DROPPED BIT(0)
+#define MVNETA_XDP_TX BIT(1)
+#define MVNETA_XDP_REDIR BIT(2)
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
@@ -395,16 +402,42 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
+ { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
+ { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
+ { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
+ { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
+ { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
+ { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
+ { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
};
-struct mvneta_pcpu_stats {
- struct u64_stats_sync syncp;
+struct mvneta_stats {
u64 rx_packets;
u64 rx_bytes;
- u64 rx_dropped;
- u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
+ /* xdp */
+ u64 xdp_redirect;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+};
+
+struct mvneta_ethtool_stats {
+ struct mvneta_stats ps;
+ u64 skb_alloc_error;
+ u64 refill_error;
+};
+
+struct mvneta_pcpu_stats {
+ struct u64_stats_sync syncp;
+
+ struct mvneta_ethtool_stats es;
+ u64 rx_dropped;
+ u64 rx_errors;
};
struct mvneta_pcpu_port {
@@ -660,10 +693,6 @@ struct mvneta_rx_queue {
/* pointer to uncomplete skb buffer */
struct sk_buff *skb;
int left_size;
-
- /* error counters */
- u32 skb_alloc_err;
- u32 refill_err;
};
static enum cpuhp_state online_hpstate;
@@ -748,12 +777,12 @@ mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
+ rx_packets = cpu_stats->es.ps.rx_packets;
+ rx_bytes = cpu_stats->es.ps.rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
+ tx_packets = cpu_stats->es.ps.tx_packets;
+ tx_bytes = cpu_stats->es.ps.tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
@@ -1933,7 +1962,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- page_pool_put_page(rxq->page_pool, data, false);
+ page_pool_put_full_page(rxq->page_pool, data, false);
}
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -1942,19 +1971,18 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
}
static void
-mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
- u32 len, bool tx)
+mvneta_update_stats(struct mvneta_port *pp,
+ struct mvneta_stats *ps)
{
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u64_stats_update_begin(&stats->syncp);
- if (tx) {
- stats->tx_packets += pkts;
- stats->tx_bytes += len;
- } else {
- stats->rx_packets += pkts;
- stats->rx_bytes += len;
- }
+ stats->es.ps.rx_packets += ps->rx_packets;
+ stats->es.ps.rx_bytes += ps->rx_bytes;
+ /* xdp */
+ stats->es.ps.xdp_redirect += ps->xdp_redirect;
+ stats->es.ps.xdp_pass += ps->xdp_pass;
+ stats->es.ps.xdp_drop += ps->xdp_drop;
u64_stats_update_end(&stats->syncp);
}
@@ -1969,9 +1997,15 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
rx_desc = rxq->descs + curr_desc;
if (!(rx_desc->buf_phys_addr)) {
if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ struct mvneta_pcpu_stats *stats;
+
pr_err("Can't refill queue %d. Done %d from %d\n",
rxq->id, i, rxq->refill_num);
- rxq->refill_err++;
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
break;
}
}
@@ -2021,7 +2055,6 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
tx_desc->buf_phys_addr = dma_addr;
tx_desc->data_size = xdpf->len;
- mvneta_update_stats(pp, 1, xdpf->len, true);
mvneta_txq_inc_put(txq);
txq->pending++;
txq->count++;
@@ -2032,6 +2065,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
static int
mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
struct xdp_frame *xdpf;
@@ -2048,8 +2082,19 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
__netif_tx_lock(nq, cpu);
ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
- if (ret == MVNETA_XDP_TX)
+ if (ret == MVNETA_XDP_TX) {
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_packets++;
+ stats->es.ps.xdp_tx++;
+ u64_stats_update_end(&stats->syncp);
+
mvneta_txq_pend_desc_add(pp, txq, 0);
+ } else {
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.xdp_tx_err++;
+ u64_stats_update_end(&stats->syncp);
+ }
__netif_tx_unlock(nq);
return ret;
@@ -2060,10 +2105,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
struct xdp_frame **frames, u32 flags)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ int i, nxmit_byte = 0, nxmit = num_frame;
int cpu = smp_processor_id();
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
- int i, drops = 0;
u32 ret;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -2075,9 +2121,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
- if (ret != MVNETA_XDP_TX) {
+ if (ret == MVNETA_XDP_TX) {
+ nxmit_byte += frames[i]->len;
+ } else {
xdp_return_frame_rx_napi(frames[i]);
- drops++;
+ nxmit--;
}
}
@@ -2085,12 +2133,20 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
mvneta_txq_pend_desc_add(pp, txq, 0);
__netif_tx_unlock(nq);
- return num_frame - drops;
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += nxmit_byte;
+ stats->es.ps.tx_packets += nxmit;
+ stats->es.ps.xdp_xmit += nxmit;
+ stats->es.ps.xdp_xmit_err += num_frame - nxmit;
+ u64_stats_update_end(&stats->syncp);
+
+ return nxmit;
}
static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct bpf_prog *prog, struct xdp_buff *xdp)
+ struct bpf_prog *prog, struct xdp_buff *xdp,
+ struct mvneta_stats *stats)
{
unsigned int len;
u32 ret, act;
@@ -2100,28 +2156,29 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
switch (act) {
case XDP_PASS:
- ret = MVNETA_XDP_PASS;
- break;
+ stats->xdp_pass++;
+ return MVNETA_XDP_PASS;
case XDP_REDIRECT: {
int err;
err = xdp_do_redirect(pp->dev, xdp, prog);
- if (err) {
+ if (unlikely(err)) {
ret = MVNETA_XDP_DROPPED;
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
} else {
ret = MVNETA_XDP_REDIR;
+ stats->xdp_redirect++;
}
break;
}
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2130,13 +2187,16 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
/* fall through */
case XDP_DROP:
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len, true);
ret = MVNETA_XDP_DROPPED;
+ stats->xdp_drop++;
break;
}
+ stats->rx_bytes += xdp->data_end - xdp->data;
+ stats->rx_packets++;
+
return ret;
}
@@ -2146,12 +2206,14 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog,
- struct page *page, u32 *xdp_ret)
+ struct page *page,
+ struct mvneta_stats *stats)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
+ int ret = 0;
if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2175,17 +2237,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
xdp_set_data_meta_invalid(xdp);
if (xdp_prog) {
- u32 ret;
-
- ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
- if (ret != MVNETA_XDP_PASS) {
- mvneta_update_stats(pp, 1,
- xdp->data_end - xdp->data,
- false);
- rx_desc->buf_phys_addr = 0;
- *xdp_ret |= ret;
- return ret;
- }
+ ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
+ if (ret)
+ goto out;
}
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
@@ -2193,9 +2247,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
- rxq->skb_alloc_err++;
u64_stats_update_begin(&stats->syncp);
+ stats->es.skb_alloc_error++;
stats->rx_dropped++;
u64_stats_update_end(&stats->syncp);
@@ -2209,9 +2263,11 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
rxq->left_size = rx_desc->data_size - len;
+
+out:
rx_desc->buf_phys_addr = 0;
- return 0;
+ return ret;
}
static void
@@ -2252,12 +2308,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
- int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
+ int rx_proc = 0, rx_todo, refill;
struct net_device *dev = pp->dev;
+ struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
struct xdp_buff xdp_buf;
- int rx_todo, refill;
- u32 xdp_ret = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2290,7 +2345,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- xdp_prog, page, &xdp_ret);
+ xdp_prog, page, &ps);
if (err)
continue;
} else {
@@ -2314,8 +2369,9 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rxq->skb = NULL;
continue;
}
- rcvd_pkts++;
- rcvd_bytes += rxq->skb->len;
+
+ ps.rx_bytes += rxq->skb->len;
+ ps.rx_packets++;
/* Linux processing */
rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
@@ -2327,11 +2383,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
rcu_read_unlock();
- if (xdp_ret & MVNETA_XDP_REDIR)
+ if (ps.xdp_redirect)
xdp_do_flush_map();
- if (rcvd_pkts)
- mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+ if (ps.rx_packets)
+ mvneta_update_stats(pp, &ps);
/* return some buffers to hardware queue, one at a time is too slow */
refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2339,7 +2395,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
- return rcvd_pkts;
+ return ps.rx_packets;
}
/* Main rx processing when using hardware buffer management */
@@ -2423,8 +2479,15 @@ err_drop_frame:
/* Refill processing */
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
+ struct mvneta_pcpu_stats *stats;
+
netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->refill_err++;
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
+
goto err_drop_frame_ret_pool;
}
@@ -2454,8 +2517,14 @@ err_drop_frame:
napi_gro_receive(napi, skb);
}
- if (rcvd_pkts)
- mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.rx_packets += rcvd_pkts;
+ stats->es.ps.rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2711,6 +2780,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_tx_sent_queue(nq, len);
@@ -2724,7 +2794,10 @@ out:
else
txq->pending += frags;
- mvneta_update_stats(pp, 1, len, true);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += len;
+ stats->es.ps.tx_packets++;
+ u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
@@ -3766,13 +3839,9 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_INBAND_RESTART_AN |
- MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
MVNETA_GMAC_AN_SPEED_EN |
MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
- MVNETA_GMAC_CONFIG_FLOW_CTRL |
MVNETA_GMAC_AN_FLOW_CTRL_EN |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX |
MVNETA_GMAC_AN_DUPLEX_EN);
/* Even though it might look weird, when we're configured in
@@ -3787,24 +3856,20 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
if (phylink_test(state->advertising, Pause))
new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
- if (state->pause & MLO_PAUSE_TXRX_MASK)
- new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
if (!phylink_autoneg_inband(mode)) {
- /* Phy or fixed speed */
- if (state->duplex)
- new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
- new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (state->speed == SPEED_100)
- new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ /* Phy or fixed speed - nothing to do, leave the
+ * configured speed, duplex and flow control as-is.
+ */
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII mode receives the state from the PHY */
new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_AN_SPEED_EN |
MVNETA_GMAC_AN_DUPLEX_EN;
@@ -3813,7 +3878,8 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_CONFIG_MII_SPEED)) |
MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_CONFIG_GMII_SPEED |
/* The MAC only supports FD mode */
@@ -3901,9 +3967,11 @@ static void mvneta_mac_link_down(struct phylink_config *config,
mvneta_set_eee(pp, false);
}
-static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
+static void mvneta_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
@@ -3911,8 +3979,36 @@ static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
if (!phylink_autoneg_inband(mode)) {
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FLOW_CTRL |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX);
val |= MVNETA_GMAC_FORCE_LINK_PASS;
+
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (tx_pause || rx_pause)
+ val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ } else {
+ /* When inband doesn't cover flow control or flow control is
+ * disabled, we need to manually configure it. This bit will
+ * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
+ */
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ if (tx_pause || rx_pause)
+ val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
@@ -4419,45 +4515,112 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
}
}
+static void
+mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
+ struct mvneta_ethtool_stats *es)
+{
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *stats;
+ u64 skb_alloc_error;
+ u64 refill_error;
+ u64 xdp_redirect;
+ u64 xdp_xmit_err;
+ u64 xdp_tx_err;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_tx;
+
+ stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ skb_alloc_error = stats->es.skb_alloc_error;
+ refill_error = stats->es.refill_error;
+ xdp_redirect = stats->es.ps.xdp_redirect;
+ xdp_pass = stats->es.ps.xdp_pass;
+ xdp_drop = stats->es.ps.xdp_drop;
+ xdp_xmit = stats->es.ps.xdp_xmit;
+ xdp_xmit_err = stats->es.ps.xdp_xmit_err;
+ xdp_tx = stats->es.ps.xdp_tx;
+ xdp_tx_err = stats->es.ps.xdp_tx_err;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ es->skb_alloc_error += skb_alloc_error;
+ es->refill_error += refill_error;
+ es->ps.xdp_redirect += xdp_redirect;
+ es->ps.xdp_pass += xdp_pass;
+ es->ps.xdp_drop += xdp_drop;
+ es->ps.xdp_xmit += xdp_xmit;
+ es->ps.xdp_xmit_err += xdp_xmit_err;
+ es->ps.xdp_tx += xdp_tx;
+ es->ps.xdp_tx_err += xdp_tx_err;
+ }
+}
+
static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
+ struct mvneta_ethtool_stats stats = {};
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
u32 high, low;
u64 val;
int i;
+ mvneta_ethtool_update_pcpu_stats(pp, &stats);
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
- val = 0;
-
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
+ pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
val = (u64)high << 32 | low;
+ pp->ethtool_stats[i] += val;
break;
case T_SW:
switch (s->offset) {
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
+ pp->ethtool_stats[i] += val;
break;
case ETHTOOL_STAT_SKB_ALLOC_ERR:
- val = pp->rxqs[0].skb_alloc_err;
+ pp->ethtool_stats[i] = stats.skb_alloc_error;
break;
case ETHTOOL_STAT_REFILL_ERR:
- val = pp->rxqs[0].refill_err;
+ pp->ethtool_stats[i] = stats.refill_error;
+ break;
+ case ETHTOOL_XDP_REDIRECT:
+ pp->ethtool_stats[i] = stats.ps.xdp_redirect;
+ break;
+ case ETHTOOL_XDP_PASS:
+ pp->ethtool_stats[i] = stats.ps.xdp_pass;
+ break;
+ case ETHTOOL_XDP_DROP:
+ pp->ethtool_stats[i] = stats.ps.xdp_drop;
+ break;
+ case ETHTOOL_XDP_TX:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx;
+ break;
+ case ETHTOOL_XDP_TX_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
+ break;
+ case ETHTOOL_XDP_XMIT:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit;
+ break;
+ case ETHTOOL_XDP_XMIT_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
break;
}
break;
}
-
- pp->ethtool_stats[i] += val;
}
}
@@ -4674,6 +4837,8 @@ static const struct net_device_ops mvneta_netdev_ops = {
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvneta_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
@@ -5218,7 +5383,7 @@ static int __init mvneta_driver_init(void)
{
int ret;
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
mvneta_cpu_online,
mvneta_cpu_down_prepare);
if (ret < 0)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 35478cba2aa5..d4a4e241333d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1070,7 +1070,7 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
(port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
- val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+ val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
@@ -1082,6 +1082,9 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
u8 qh, ql, pmap;
int index, ctx;
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
memset(&c2, 0, sizeof(c2));
index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
@@ -1305,6 +1308,9 @@ static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
struct flow_rule *flow = rule->flow;
struct flow_action_entry *act;
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
act = &flow->action.entries[0];
if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
return -EOPNOTSUPP;
@@ -1422,6 +1428,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
struct mvpp2_ethtool_fs *efs;
int ret;
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+ return -EINVAL;
+
efs = port->rfs_rules[info->fs.location];
if (!efs)
return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 72133cbe55d4..2b5dad2ec650 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,8 +58,11 @@ static struct {
*/
static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy);
+static void mvpp2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -3473,8 +3476,9 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
.interface = port->phy_interface,
};
mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
- port->phy_interface, NULL);
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
+ MLO_AN_INBAND, port->phy_interface,
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
}
netif_tx_start_all_queues(port->dev);
@@ -4325,6 +4329,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
+ if (rss_context >= MVPP22_N_RSS_TABLES)
+ return -EINVAL;
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
@@ -4380,6 +4386,8 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvpp2_ethtool_set_coalesce,
@@ -4972,15 +4980,13 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
+ an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
- MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
+ MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
MVPP2_GMAC_PCS_ENABLE_MASK);
- ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -5010,31 +5016,20 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
- /* Phy or fixed speed - no in-band AN */
- if (state->duplex)
- an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
- an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
- else if (state->speed == SPEED_100)
- an |= MVPP2_GMAC_CONFIG_MII_SPEED;
-
- if (state->pause & MLO_PAUSE_TX)
- ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+ /* Phy or fixed speed - no in-band AN, nothing to do, leave the
+ * configured speed, duplex and flow control as-is.
+ */
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII in-band mode receives the speed and duplex from
* the PHY. Flow control information is not received. */
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+ an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX);
an |= MVPP2_GMAC_IN_BAND_AUTONEG |
MVPP2_GMAC_AN_SPEED_EN |
MVPP2_GMAC_AN_DUPLEX_EN;
-
- if (state->pause & MLO_PAUSE_TX)
- ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
} else if (phy_interface_mode_is_8023z(state->interface)) {
/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
* they negotiate duplex: they are always operating with a fixed
@@ -5042,19 +5037,17 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
* speed and full duplex here.
*/
ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+ an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX);
an |= MVPP2_GMAC_IN_BAND_AUTONEG |
MVPP2_GMAC_CONFIG_GMII_SPEED |
MVPP2_GMAC_CONFIG_FULL_DUPLEX;
- if (state->pause & MLO_PAUSE_AN && state->an_enabled) {
+ if (state->pause & MLO_PAUSE_AN && state->an_enabled)
an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
- } else {
- if (state->pause & MLO_PAUSE_TX)
- ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
- }
}
/* Some fields of the auto-negotiation register require the port to be down when
@@ -5141,25 +5134,54 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
mvpp2_port_enable(port);
}
-static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy)
+static void mvpp2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
- if (!phylink_autoneg_inband(mode)) {
- if (mvpp2_is_xlg(interface)) {
+ if (mvpp2_is_xlg(interface)) {
+ if (!phylink_autoneg_inband(mode)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
+ }
+ } else {
+ if (!phylink_autoneg_inband(mode)) {
val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
+ val &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX);
val |= MVPP2_GMAC_FORCE_LINK_PASS;
+
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
}
+
+ /* We can always update the flow control enable bits;
+ * these will only be effective if flow control AN
+ * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
+ */
+ val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+ val &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+ if (tx_pause)
+ val |= MVPP22_CTRL4_TX_FC_EN;
+ if (rx_pause)
+ val |= MVPP22_CTRL4_RX_FC_EN;
+ writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
}
mvpp2_port_enable(port);
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index ced514c05c97..d9dfb614daa6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -33,3 +33,9 @@ config OCTEONTX2_PF
depends on PCI
help
This driver supports Marvell's OcteonTX2 NIC physical function.
+
+config OCTEONTX2_VF
+ tristate "Marvell OcteonTX2 NIC Virtual Function driver"
+ depends on OCTEONTX2_PF
+ help
+ This driver supports Marvell's OcteonTX2 NIC virtual function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 5ca788691911..a4e65da8d95b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -113,7 +113,6 @@ int cgx_get_cgxcnt_max(void)
return idmax + 1;
}
-EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
@@ -124,7 +123,6 @@ int cgx_get_lmac_cnt(void *cgxd)
return cgx->lmac_count;
}
-EXPORT_SYMBOL(cgx_get_lmac_cnt);
void *cgx_get_pdata(int cgx_id)
{
@@ -136,7 +134,6 @@ void *cgx_get_pdata(int cgx_id)
}
return NULL;
}
-EXPORT_SYMBOL(cgx_get_pdata);
int cgx_get_cgxid(void *cgxd)
{
@@ -164,7 +161,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
*linfo = lmac->link_info;
return 0;
}
-EXPORT_SYMBOL(cgx_get_link_info);
static u64 mac2u64 (u8 *mac_addr)
{
@@ -195,7 +191,6 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_addr_set);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
@@ -205,7 +200,6 @@ u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
-EXPORT_SYMBOL(cgx_lmac_addr_get);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
{
@@ -217,7 +211,6 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
return 0;
}
-EXPORT_SYMBOL(cgx_set_pkind);
static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
{
@@ -255,7 +248,6 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
}
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_internal_loopback);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
@@ -289,7 +281,6 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
}
}
-EXPORT_SYMBOL(cgx_lmac_promisc_config);
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
@@ -318,7 +309,6 @@ void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
}
}
-EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
@@ -329,7 +319,6 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
-EXPORT_SYMBOL(cgx_get_rx_stats);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
@@ -340,7 +329,6 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
-EXPORT_SYMBOL(cgx_get_tx_stats);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
@@ -358,7 +346,6 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
{
@@ -379,7 +366,107 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return !!(last & DATA_PKT_TX_EN);
}
-EXPORT_SYMBOL(cgx_lmac_tx_enable);
+
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return;
+ if (enable) {
+ /* Enable receive pause frames */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Enable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ /* Set pause time and interval */
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+
+ cfg = cgx_read(cgx, lmac_id,
+ CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+ } else {
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ }
+}
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
@@ -558,60 +645,6 @@ static inline bool cgx_event_is_linkevent(u64 event)
return false;
}
-static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
- struct cgx *cgx)
-{
- u64 req = 0;
- u64 resp;
- int err;
-
- req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
- if (!err)
- *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
-
- return err;
-}
-
-static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
- struct cgx *cgx)
-{
- u64 req = 0;
- u64 resp;
- int err;
-
- req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
- if (!err)
- *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
-
- return err;
-}
-
-int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
-{
- struct cgx *cgx_dev;
- int err;
-
- if (!addr || !size)
- return -EINVAL;
-
- cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
- if (!cgx_dev)
- return -ENXIO;
-
- err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
- if (err)
- return -EIO;
-
- err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
- if (err)
- return -EIO;
-
- return 0;
-}
-EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
-
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
struct lmac *lmac = data;
@@ -676,7 +709,6 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_evh_register);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
{
@@ -695,7 +727,24 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+
+int cgx_get_fwdata_base(u64 *base)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err;
+
+ cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ if (!err)
+ *base = FIELD_GET(RESP_FWD_BASE, resp);
+
+ return err;
+}
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
@@ -769,7 +818,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_linkup_start);
static int cgx_lmac_init(struct cgx *cgx)
{
@@ -805,6 +853,7 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */
cgx->lmac_idmap[i] = lmac;
+ cgx_lmac_pause_frm_config(cgx, i, true);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -823,6 +872,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
/* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) {
+ cgx_lmac_pause_frm_config(cgx, i, false);
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 9343bf39cfac..394f96591feb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -60,10 +60,20 @@
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
+#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
+#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
+#define CGXX_CMR_RX_OVR_BP 0x130
+#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
+#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
+#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
@@ -124,5 +134,9 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
-int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
+int cgx_get_fwdata_base(u64 *base);
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index 473d9751601f..c3702fa58b6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -79,7 +79,8 @@ enum cgx_cmd_id {
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
- CGX_CMD_GET_MKEX_PRFL_ADDR
+ CGX_CMD_GET_MKEX_PRFL_ADDR,
+ CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
};
/* async event ids */
@@ -149,6 +150,11 @@ enum cgx_cmd_own {
*/
#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
+/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_FWD_BASE GENMASK_ULL(56, 9)
+
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 8bbc1f1d81f5..6dfd0f90cd70 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -125,7 +125,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
-M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
+M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
@@ -143,6 +143,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
+ cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -211,6 +213,9 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -251,7 +256,8 @@ enum rvu_af_status {
struct ready_msg_rsp {
struct mbox_msghdr hdr;
- u16 sclk_feq; /* SCLK frequency */
+ u16 sclk_freq; /* SCLK frequency (in MHz) */
+ u16 rclk_freq; /* RCLK frequency (in MHz) */
};
/* Structure for requesting resource provisioning.
@@ -342,6 +348,15 @@ struct cgx_link_info_msg {
struct cgx_link_user_info link_info;
};
+struct cgx_pause_frm_cfg {
+ struct mbox_msghdr hdr;
+ u8 set;
+ /* set = 1 if the request is to config pause frames */
+ /* set = 0 if the request is to fetch pause frames config */
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -676,6 +691,25 @@ struct nix_lso_format_cfg_rsp {
u8 lso_format_idx;
};
+struct nix_bp_cfg_req {
+ struct mbox_msghdr hdr;
+ u16 chan_base; /* Starting channel number */
+ u8 chan_cnt; /* Number of channels */
+ u8 bpid_per_chan;
+ /* bpid_per_chan = 0 assigns single bp id for range of channels */
+ /* bpid_per_chan = 1 assigns separate bp id for each channel */
+};
+
+/* PF can be mapped to either CGX or LBK interface,
+ * so maximum 64 channels are possible.
+ */
+#define NIX_MAX_BPID_CHAN 64
+struct nix_bp_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
+ u8 chan_cnt; /* Number of channel for which bpids are assigned */
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c190c3ce898..557e4292c846 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -21,7 +21,6 @@
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-#define DRV_VERSION "1.0"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
@@ -46,10 +45,9 @@ static const struct pci_device_id rvu_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table);
static char *mkex_profile; /* MKEX profile name */
@@ -88,13 +86,15 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset);
- while (time_before(jiffies, timeout)) {
- reg_val = readq(reg);
- if (zero && !(reg_val & mask))
- return 0;
- if (!zero && (reg_val & mask))
- return 0;
+again:
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ if (time_before(jiffies, timeout)) {
usleep_range(1, 5);
+ goto again;
}
return -EBUSY;
}
@@ -421,6 +421,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
}
}
+static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
+ RVU_BLK_RVUM_REVID);
+}
+
+static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
+}
+
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{
int err;
@@ -603,7 +616,11 @@ setup_vfmsix:
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
max_msix = cfg & 0xFFFFF;
- phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ if (rvu->fwdata && rvu->fwdata->msixtr_base)
+ phy_addr = rvu->fwdata->msixtr_base;
+ else
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+
iova = dma_map_resource(rvu->dev, phy_addr,
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
@@ -613,10 +630,18 @@ setup_vfmsix:
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
rvu->msix_base_iova = iova;
+ rvu->msixtr_base_phy = phy_addr;
return 0;
}
+static void rvu_reset_msix(struct rvu *rvu)
+{
+ /* Restore msixtr base register */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
+ rvu->msixtr_base_phy);
+}
+
static void rvu_free_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -655,9 +680,80 @@ static void rvu_free_hw_resources(struct rvu *rvu)
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
+ rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
}
+static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct rvu_pfvf *pfvf;
+ u64 *mac;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ /* Assign MAC address to PF */
+ pfvf = &rvu->pf[pf];
+ if (rvu->fwdata && pf < PF_MACNUM_MAX) {
+ mac = &rvu->fwdata->pf_macs[pf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+
+ /* Assign MAC address to VFs */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pfvf = &rvu->hwvf[hwvf];
+ if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
+ mac = &rvu->fwdata->vf_macs[hwvf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ }
+ }
+}
+
+static int rvu_fwdata_init(struct rvu *rvu)
+{
+ u64 fwdbase;
+ int err;
+
+ /* Get firmware data base address */
+ err = cgx_get_fwdata_base(&fwdbase);
+ if (err)
+ goto fail;
+ rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
+ if (!rvu->fwdata)
+ goto fail;
+ if (!is_rvu_fwdata_valid(rvu)) {
+ dev_err(rvu->dev,
+ "Mismatch in 'fwdata' struct btw kernel and firmware\n");
+ iounmap(rvu->fwdata);
+ rvu->fwdata = NULL;
+ return -EINVAL;
+ }
+ return 0;
+fail:
+ dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
+ return -EIO;
+}
+
+static void rvu_fwdata_exit(struct rvu *rvu)
+{
+ if (rvu->fwdata)
+ iounmap(rvu->fwdata);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -813,6 +909,8 @@ init:
mutex_init(&rvu->rsrc_lock);
+ rvu_fwdata_init(rvu);
+
err = rvu_setup_msix_resources(rvu);
if (err)
return err;
@@ -825,8 +923,10 @@ init:
/* Allocate memory for block LF/slot to pcifunc mapping info */
block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
sizeof(u16), GFP_KERNEL);
- if (!block->fn_map)
- return -ENOMEM;
+ if (!block->fn_map) {
+ err = -ENOMEM;
+ goto msix_err;
+ }
/* Scan all blocks to check if low level firmware has
* already provisioned any of the resources to a PF/VF.
@@ -836,25 +936,36 @@ init:
err = rvu_npc_init(rvu);
if (err)
- goto exit;
+ goto npc_err;
err = rvu_cgx_init(rvu);
if (err)
- goto exit;
+ goto cgx_err;
+
+ /* Assign MACs for CGX mapped functions */
+ rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
if (err)
- goto cgx_err;
+ goto npa_err;
err = rvu_nix_init(rvu);
if (err)
- goto cgx_err;
+ goto nix_err;
return 0;
+nix_err:
+ rvu_nix_freemem(rvu);
+npa_err:
+ rvu_npa_freemem(rvu);
cgx_err:
rvu_cgx_exit(rvu);
-exit:
+npc_err:
+ rvu_npc_freemem(rvu);
+ rvu_fwdata_exit(rvu);
+msix_err:
+ rvu_reset_msix(rvu);
return err;
}
@@ -901,6 +1012,10 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
+ if (rvu->fwdata) {
+ rsp->rclk_freq = rvu->fwdata->rclk;
+ rsp->sclk_freq = rvu->fwdata->sclk;
+ }
return 0;
}
@@ -2128,6 +2243,9 @@ static int rvu_register_interrupts(struct rvu *rvu)
}
rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+ /* Clear TRPEND bit for all PF */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
/* Enable ME interrupt for all PFs*/
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
@@ -2439,17 +2557,13 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
- dev_err(dev, "Unable to set DMA mask\n");
+ dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to set consistent DMA mask\n");
- goto err_release_regions;
- }
+ pci_set_master(pdev);
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
@@ -2489,6 +2603,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_flr;
+ rvu_setup_rvum_blk_revid(rvu);
+
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
if (err)
@@ -2506,8 +2622,10 @@ err_mbox:
rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2527,11 +2645,12 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
-
+ rvu_clear_rvum_blk_revid(rvu);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 51c206f4fe6f..dcf25a092008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -269,6 +269,26 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct rvu_fwdata {
+#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
+#define RVU_FWDATA_VERSION 0x0001
+ u32 header_magic;
+ u32 version; /* version id */
+
+ /* MAC address */
+#define PF_MACNUM_MAX 32
+#define VF_MACNUM_MAX 256
+ u64 pf_macs[PF_MACNUM_MAX];
+ u64 vf_macs[VF_MACNUM_MAX];
+ u64 sclk;
+ u64 rclk;
+ u64 mcam_addr;
+ u64 mcam_sz;
+ u64 msixtr_base;
+#define FWDATA_RESERVED_MEM 1023
+ u64 reserved[FWDATA_RESERVED_MEM];
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -294,6 +314,7 @@ struct rvu {
char *irq_name;
bool *irq_allocated;
dma_addr_t msix_base_iova;
+ u64 msixtr_base_phy; /* Register reset value */
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
@@ -313,6 +334,9 @@ struct rvu {
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ /* Firmware data */
+ struct rvu_fwdata *fwdata;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -363,6 +387,12 @@ static inline int is_afvf(u16 pcifunc)
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
+{
+ return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
+ (rvu->fwdata->version == RVU_FWDATA_VERSION);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
@@ -432,7 +462,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
-int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 11e5921c55b9..f3c82e489897 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -350,6 +350,18 @@ int rvu_cgx_exit(struct rvu *rvu)
return 0;
}
+/* Most of the CGX configuration is restricted to the mapped PF only,
+ * VF's of mapped PF and other PFs are not allowed. This fn() checks
+ * whether a PFFUNC is permitted to do the config or not.
+ */
+static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+{
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return false;
+ return true;
+}
+
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
u8 cgx_id, lmac_id;
@@ -373,11 +385,8 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -409,8 +418,7 @@ int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
u8 cgx_idx, lmac;
void *cgxd;
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
@@ -477,12 +485,8 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -493,16 +497,11 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -515,11 +514,8 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -571,11 +567,8 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -597,6 +590,30 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
+ struct cgx_pause_frm_cfg *req,
+ struct cgx_pause_frm_cfg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (req->set)
+ cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ req->tx_pause, req->rx_pause);
+ else
+ cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->tx_pause, &rsp->rx_pause);
+ return 0;
+}
+
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index eb5e542424e7..36953d4f51c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -18,6 +18,8 @@
#include "cgx.h"
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -90,6 +92,26 @@ int rvu_get_nixlf_count(struct rvu *rvu)
return block->lf.max;
}
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (nix_blkaddr)
+ *nix_blkaddr = blkaddr;
+
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -191,6 +213,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->tx_chan_cnt = 1;
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
+
+ /* By default we enable pause frames */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
+ cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -253,6 +280,142 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, pf, type;
+ u16 chan_base, chan;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg & ~BIT_ULL(16));
+ }
+ return 0;
+}
+
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id)
+{
+ int bpid, blkaddr, lmac_chan_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 cgx_bpid_cnt, lbk_bpid_cnt;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ lmac_chan_cnt = cfg & 0xFF;
+
+ cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
+ lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ /* Backpressure IDs range division
+ * CGX channles are mapped to (0 - 191) BPIDs
+ * LBK channles are mapped to (192 - 255) BPIDs
+ * SDP channles are mapped to (256 - 511) BPIDs
+ *
+ * Lmac channles and bpids mapped as follows
+ * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
+ * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
+ * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
+ */
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ if ((req->chan_base + req->chan_cnt) > 15)
+ return -EINVAL;
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+ /* Assign bpid based on cgx, lmac and chan id */
+ bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
+ (lmac_id * lmac_chan_cnt) + req->chan_base;
+
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > cgx_bpid_cnt)
+ return -EINVAL;
+ break;
+
+ case NIX_INTF_TYPE_LBK:
+ if ((req->chan_base + req->chan_cnt) > 63)
+ return -EINVAL;
+ bpid = cgx_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bpid;
+}
+
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int blkaddr, pf, type, chan_id = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u16 chan_base, chan;
+ s16 bpid, bpid_base;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+ /* Enable backpressure only for CGX mapped PFs and LBK interface */
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ bpid = bpid_base;
+
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ if (bpid < 0) {
+ dev_warn(rvu->dev, "Fail to enable backpressure\n");
+ return -EINVAL;
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg | (bpid & 0xFF) | BIT_ULL(16));
+ chan_id++;
+ bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ }
+
+ for (chan = 0; chan < req->chan_cnt; chan++) {
+ /* Map channel and bpid assign to it */
+ rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
+ (bpid_base & 0x3FF);
+ if (req->bpid_per_chan)
+ bpid_base++;
+ }
+ rsp->chan_cnt = req->chan_cnt;
+
+ return 0;
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -545,6 +708,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -589,11 +757,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
+ spin_unlock(&aq->lock);
return rc;
}
- spin_lock(&aq->lock);
-
/* Submit the instruction to AQ */
rc = nix_aq_enqueue_wait(rvu, block, &inst);
if (rc) {
@@ -698,6 +865,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (req->ctype == NIX_AQ_CTYPE_CQ) {
aq_req.cq.ena = 0;
aq_req.cq_mask.ena = 1;
+ aq_req.cq.bp_ena = 0;
+ aq_req.cq_mask.bp_ena = 1;
q_cnt = pfvf->cq_ctx->qsize;
bmap = pfvf->cq_bmap;
}
@@ -1667,13 +1836,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
if (err)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ return err;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -1767,17 +1932,12 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
if (req->cfg_type) {
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
@@ -2119,18 +2279,13 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- int i, nixlf, blkaddr;
+ int i, nixlf, blkaddr, err;
u64 stats;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
/* Get stats count supported by HW */
stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
@@ -2418,18 +2573,14 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct nix_rss_flowkey_cfg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int alg_idx, nixlf, blkaddr;
struct nix_hw *nix_hw;
+ int err;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -2522,19 +2673,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
- int blkaddr, nixlf;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
@@ -2567,19 +2714,15 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
bool allmulti = false, disable_promisc = false;
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
- int blkaddr, nixlf;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
if (req->mode & NIX_RX_MODE_PROMISC)
allmulti = false;
@@ -2794,22 +2937,12 @@ free_entry:
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
- u16 pcifunc = req->hdr.pcifunc;
- struct rvu_block *block;
- struct rvu_pfvf *pfvf;
- int nixlf, blkaddr;
+ int nixlf, blkaddr, err;
u64 cfg;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- block = &hw->block[blkaddr];
- nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
/* Set the interface configuration */
@@ -3077,6 +3210,9 @@ int rvu_nix_init(struct rvu *rvu)
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
nix_link_config(rvu, blkaddr);
+
+ /* Enable Channel backpressure */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
}
return 0;
}
@@ -3114,30 +3250,13 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
-int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
-{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (*nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- return 0;
-}
-
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
@@ -3152,7 +3271,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
u16 pcifunc = req->hdr.pcifunc;
int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 6e7c7f459f74..67471cb2b129 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -94,6 +94,11 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -138,10 +143,10 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
break;
}
- if (rc)
+ if (rc) {
+ spin_unlock(&aq->lock);
return rc;
-
- spin_lock(&aq->lock);
+ }
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst);
@@ -218,6 +223,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1;
+ aq_req.aura.bp_ena = 0;
+ aq_req.aura_mask.bp_ena = 1;
cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 40e431debbe9..0a214084406a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -825,8 +825,10 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
goto load_default;
- if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
+ if (!rvu->fwdata)
goto load_default;
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz)
goto load_default;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 9d8942acc232..a3ecb5de9000 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -11,6 +11,9 @@
#ifndef RVU_STRUCT_H
#define RVU_STRUCT_H
+/* RVU Block revision IDs */
+#define RVU_BLK_RVUM_REVID 0x01
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 41bf00cf5b1d..778df331c8ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -4,7 +4,9 @@
#
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
+obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b945bd3d5d88..f1d2dea90a8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -49,15 +49,15 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
if (!netif_running(pfvf->netdev))
return;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return;
}
otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
}
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
@@ -128,6 +128,7 @@ void otx2_get_stats64(struct net_device *netdev,
stats->tx_packets = dev_stats->tx_frames;
stats->tx_dropped = dev_stats->tx_drops;
}
+EXPORT_SYMBOL(otx2_get_stats64);
/* Sync MAC address with RVU AF */
static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
@@ -135,17 +136,17 @@ static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
struct nix_set_mac_addr *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
ether_addr_copy(req->mac_addr, mac);
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -157,27 +158,27 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
struct msg_req *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(msghdr)) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return 0;
}
@@ -197,26 +198,50 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
+EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* SMQ config limits maximum pkt size that can be transmitted */
- req->update_smq = true;
pfvf->max_frs = mtu + OTX2_ETH_HLEN;
req->maxlen = pfvf->max_frs;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_config_pause_frm(struct otx2_nic *pfvf)
+{
+ struct cgx_pause_frm_cfg *req;
+ int err;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
+ req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
+ req->set = 1;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -226,10 +251,10 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
struct nix_rss_flowkey_cfg *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->mcam_index = -1; /* Default or reserved index */
@@ -237,7 +262,7 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
req->group = DEFAULT_RSS_CONTEXT_GROUP;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -248,7 +273,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
struct nix_aq_enq_req *aq;
int idx, err;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -258,12 +283,12 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
*/
err = otx2_sync_mbox_msg(mbox);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return -ENOMEM;
}
}
@@ -276,7 +301,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
aq->op = NIX_AQ_INSTOP_INIT;
}
err = otx2_sync_mbox_msg(mbox);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
@@ -394,6 +419,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
schedule_work(&pfvf->reset_task);
}
+EXPORT_SYMBOL(otx2_tx_timeout);
void otx2_get_mac_from_af(struct net_device *netdev)
{
@@ -408,6 +434,7 @@ void otx2_get_mac_from_af(struct net_device *netdev)
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
}
+EXPORT_SYMBOL(otx2_get_mac_from_af);
static int otx2_get_link(struct otx2_nic *pfvf)
{
@@ -443,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) |
+ req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
@@ -529,17 +556,17 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
struct nix_txsch_free_req *free_req;
int lvl, schq, err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
/* Free the transmit schedulers */
free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
if (!free_req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
free_req->flags = TXSCHQ_FREE_ALL;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
/* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
@@ -553,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
u64 incr, *ptr, val;
+ int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
incr = (u64)qidx << 32;
- while (1) {
+ while (timeout) {
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F;
if (sqe_head == sqe_tail)
break;
usleep_range(1, 3);
+ timeout--;
}
}
}
@@ -580,6 +609,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
* RED accepts pkts if free pointers > 102 & <= 205.
* Drops pkts if free pointers < 102.
*/
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
@@ -741,6 +771,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (qidx < pfvf->hw.rx_queues) {
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
+
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
}
/* Fill AQ info */
@@ -951,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->fc_addr);
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
+ pfvf->qset.pool = NULL;
}
static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
@@ -996,6 +1034,14 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_addr = pool->fc_addr->iova;
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt) {
+ aq->aura.bp_ena = 0;
+ aq->aura.nix0_bpid = pfvf->bpid[0];
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
/* Fill AQ info */
aq->ctype = NPA_AQ_CTYPE_AURA;
aq->op = NPA_AQ_INSTOP_INIT;
@@ -1210,10 +1256,10 @@ int otx2_detach_resources(struct mbox *mbox)
{
struct rsrc_detach *detach;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
detach = otx2_mbox_alloc_msg_detach_resources(mbox);
if (!detach) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return -ENOMEM;
}
@@ -1222,9 +1268,10 @@ int otx2_detach_resources(struct mbox *mbox)
/* Send detach request to AF */
otx2_mbox_msg_send(&mbox->mbox, 0);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return 0;
}
+EXPORT_SYMBOL(otx2_detach_resources);
int otx2_attach_npa_nix(struct otx2_nic *pfvf)
{
@@ -1232,11 +1279,11 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
struct msg_req *msix;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
/* Get memory to put this msg */
attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
if (!attach) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
@@ -1246,7 +1293,7 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Send attach request to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -1261,16 +1308,16 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Get NPA and NIX MSIX vector offsets */
msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
if (!msix) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
@@ -1281,12 +1328,13 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_attach_npa_nix);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
{
struct hwctx_disable_req *req;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Request AQ to disable this context */
if (npa)
req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
@@ -1294,7 +1342,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
if (!req) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return;
}
@@ -1304,7 +1352,26 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
__func__);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
+}
+
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
}
/* Mbox message handlers */
@@ -1330,6 +1397,7 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
}
+EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
struct npa_lf_alloc_rsp *rsp)
@@ -1337,6 +1405,7 @@ void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
}
+EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
struct nix_lf_alloc_rsp *rsp)
@@ -1347,6 +1416,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
}
+EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
struct msix_offset_rsp *rsp)
@@ -1354,6 +1424,19 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf,
pfvf->hw.npa_msixoff = rsp->npa_msixoff;
pfvf->hw.nix_msixoff = rsp->nix_msixoff;
}
+EXPORT_SYMBOL(mbox_handler_msix_offset);
+
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int chan, chan_id;
+
+ for (chan = 0; chan < rsp->chan_cnt; chan++) {
+ chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
+ pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
+ }
+}
+EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
void otx2_free_cints(struct otx2_nic *pfvf, int n)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 320f3b7bf57f..018c283a0ac4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -20,6 +20,8 @@
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
@@ -191,6 +193,17 @@ struct otx2_hw {
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
};
+struct otx2_vf_config {
+ struct otx2_nic *pf;
+ struct delayed_work link_event_work;
+ bool intf_down; /* interface was either configured or not */
+};
+
+struct flr_work {
+ struct work_struct work;
+ struct otx2_nic *pf;
+};
+
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
@@ -204,6 +217,8 @@ struct otx2_nic {
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
+#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
struct otx2_qset qset;
@@ -213,14 +228,23 @@ struct otx2_nic {
/* Mbox */
struct mbox mbox;
+ struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
+ struct workqueue_struct *mbox_pfvf_wq;
+ u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */
+ u16 bpid[NIX_MAX_BPID_CHAN];
+ struct otx2_vf_config *vf_configs;
struct cgx_link_user_info linfo;
u64 reset_count;
struct work_struct reset_task;
+ struct workqueue_struct *flr_wq;
+ struct flr_work *flr_wrk;
struct refill_work *refill_wrk;
+ struct workqueue_struct *otx2_wq;
+ struct work_struct rx_mode_work;
/* Ethtool stuff */
u32 msg_enable;
@@ -229,6 +253,11 @@ struct otx2_nic {
int nix_blkaddr;
};
+static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
+}
+
static inline bool is_96xx_A0(struct pci_dev *pdev)
{
return (pdev->revision == 0x00) &&
@@ -348,21 +377,6 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
hw_mbase + mbox->rx_start, msg_size + msgs_offset);
}
-static inline void otx2_mbox_lock_init(struct mbox *mbox)
-{
- mutex_init(&mbox->lock);
-}
-
-static inline void otx2_mbox_lock(struct mbox *mbox)
-{
- mutex_lock(&mbox->lock);
-}
-
-static inline void otx2_mbox_unlock(struct mbox *mbox)
-{
- mutex_unlock(&mbox->lock);
-}
-
/* With the absence of API for 128-bit IO memory access for arm64,
* implement required operations at place.
*/
@@ -558,6 +572,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+int otx2_config_pause_frm(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -578,6 +593,7 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
gfp_t gfp);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
@@ -598,6 +614,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp);
/* Device stats APIs */
void otx2_get_dev_stats(struct otx2_nic *pfvf);
@@ -607,6 +625,7 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
+void otx2vf_set_ethtool_ops(struct net_device *netdev);
int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 60fcf82dd8cb..d59f5a9c7273 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -17,6 +17,7 @@
#include "otx2_common.h"
#define DRV_NAME "octeontx2-nicpf"
+#define DRV_VF_NAME "octeontx2-nicvf"
struct otx2_stat {
char name[ETH_GSTRING_LEN];
@@ -63,16 +64,6 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
-static void otx2_dev_open(struct net_device *netdev)
-{
- otx2_open(netdev);
-}
-
-static void otx2_dev_stop(struct net_device *netdev)
-{
- otx2_stop(netdev);
-}
-
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -232,7 +223,7 @@ static int otx2_set_channels(struct net_device *dev,
return -EINVAL;
if (if_up)
- otx2_dev_stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
@@ -245,7 +236,7 @@ static int otx2_set_channels(struct net_device *dev,
fail:
if (if_up)
- otx2_dev_open(dev);
+ dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -253,6 +244,51 @@ fail:
return err;
}
+static void otx2_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_pause_frm_cfg *req, *rsp;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return;
+
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req)
+ return;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
+}
+
+static int otx2_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+
+ if (pause->tx_pause)
+ pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return otx2_config_pause_frm(pfvf);
+}
+
static void otx2_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -297,14 +333,15 @@ static int otx2_set_ringparam(struct net_device *netdev,
return 0;
if (if_up)
- otx2_dev_stop(netdev);
+ netdev->netdev_ops->ndo_stop(netdev);
/* Assigned to the nearest possible exponent. */
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
if (if_up)
- otx2_dev_open(netdev);
+ netdev->netdev_ops->ndo_open(netdev);
+
return 0;
}
@@ -329,17 +366,6 @@ static int otx2_set_coalesce(struct net_device *netdev,
struct otx2_hw *hw = &pfvf->hw;
int qidx;
- if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
- ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs || ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high || ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high || ec->rate_sample_interval)
- return -EOPNOTSUPP;
-
if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
return 0;
@@ -631,10 +657,15 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ /* LBK link is internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 1;
return pfvf->linfo.link_up;
}
static const struct ethtool_ops otx2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -654,9 +685,110 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_rxfh = otx2_set_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &otx2_ethtool_ops;
}
+
+/* VF's ethtool APIs */
+static void otx2vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(vf, &data, 0);
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stat;
+
+ otx2_get_dev_stats(vf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&vf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(vf, stats, &data);
+ *(data++) = vf->reset_count;
+}
+
+static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qstats_count;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (vf->hw.rx_queues + vf->hw.tx_queues);
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
+}
+
+static const struct ethtool_ops otx2vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2vf_get_drvinfo,
+ .get_strings = otx2vf_get_strings,
+ .get_ethtool_stats = otx2vf_get_ethtool_stats,
+ .get_sset_count = otx2vf_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
+};
+
+void otx2vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2vf_ethtool_ops;
+}
+EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 85f9b9ba6bd5..411e5ea1031e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -24,7 +24,6 @@
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
-#define DRV_VERSION "1.0"
/* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = {
@@ -32,10 +31,9 @@ static const struct pci_device_id otx2_pf_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
enum {
@@ -61,6 +59,224 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
+static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
+{
+ int irq, vfs = pf->total_vfs;
+
+ /* Disable VFs ME interrupts */
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(irq, pf);
+
+ /* Disable VFs FLR interrupts */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(irq, pf);
+
+ if (vfs <= 64)
+ return;
+
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(irq, pf);
+
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(irq, pf);
+}
+
+static void otx2_flr_wq_destroy(struct otx2_nic *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ devm_kfree(pf->dev, pf->flr_wrk);
+}
+
+static void otx2_flr_handler(struct work_struct *work)
+{
+ struct flr_work *flrwork = container_of(work, struct flr_work, work);
+ struct otx2_nic *pf = flrwork->pf;
+ struct mbox *mbox = &pf->mbox;
+ struct msg_req *req;
+ int vf, reg = 0;
+
+ vf = flrwork - pf->flr_wrk;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_vf_flr(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+ req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ if (!otx2_sync_mbox_msg(&pf->mbox)) {
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* clear transcation pending bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ }
+
+ mutex_unlock(&mbox->lock);
+}
+
+static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int reg, dev, vf, start_vf, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
+ /* Clear interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int vf, reg, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* clear trpend bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ /* clear interrupt */
+ otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int ret;
+
+ /* Register ME interrupt handler*/
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME0\n");
+ }
+
+ /* Register FLR interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR0\n");
+ return ret;
+ }
+
+ if (numvfs > 64) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFME1),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME1\n");
+ }
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR1\n");
+ return ret;
+ }
+ }
+
+ /* Enable ME interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ /* Enable FLR interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ if (numvfs > 64) {
+ numvfs -= 64;
+
+ otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+ return 0;
+}
+
+static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
+{
+ int vf;
+
+ pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
+ WQ_UNBOUND | WQ_HIGHPRI, 1);
+ if (!pf->flr_wq)
+ return -ENOMEM;
+
+ pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
+ sizeof(struct flr_work), GFP_KERNEL);
+ if (!pf->flr_wrk) {
+ destroy_workqueue(pf->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ pf->flr_wrk[vf].pf = pf;
+ INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
+ }
+
+ return 0;
+}
+
static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
int first, int mdevs, u64 intr, int type)
{
@@ -115,9 +331,391 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
}
}
+static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
+ struct otx2_mbox *pfvf_mbox, void *bbuf_base,
+ int devid)
+{
+ struct otx2_mbox_dev *src_mdev = mdev;
+ int offset;
+
+ /* Msgs are already copied, trigger VF's mbox irq */
+ smp_wmb();
+
+ offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
+ writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+
+ /* Restore VF's mbox bounce buffer region address */
+ src_mdev->mbase = bbuf_base;
+}
+
+static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
+ struct otx2_mbox *src_mbox,
+ int dir, int vf, int num_msgs)
+{
+ struct otx2_mbox_dev *src_mdev, *dst_mdev;
+ struct mbox_hdr *mbox_hdr;
+ struct mbox_hdr *req_hdr;
+ struct mbox *dst_mbox;
+ int dst_size, err;
+
+ if (dir == MBOX_DIR_PFAF) {
+ /* Set VF's mailbox memory as PF's bounce buffer memory, so
+ * that explicit copying of VF's msgs to PF=>AF mbox region
+ * and AF=>PF responses to VF's mbox region can be avoided.
+ */
+ src_mdev = &src_mbox->dev[vf];
+ mbox_hdr = src_mbox->hwbase +
+ src_mbox->rx_start + (vf * MBOX_SIZE);
+
+ dst_mbox = &pf->mbox;
+ dst_size = dst_mbox->mbox.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area */
+ if (mbox_hdr->msg_size > dst_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox.dev[0];
+
+ mutex_lock(&pf->mbox.lock);
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = num_msgs;
+ err = otx2_sync_mbox_msg(dst_mbox);
+ if (err) {
+ dev_warn(pf->dev,
+ "AF not responding to VF%d messages\n", vf);
+ /* restore PF mbase and exit */
+ dst_mdev->mbase = pf->mbox.bbuf_base;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+ /* At this point, all the VF messages sent to AF are acked
+ * with proper responses and responses are copied to VF
+ * mailbox hence raise interrupt to VF.
+ */
+ req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
+ dst_mbox->mbox.rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
+ pf->mbox.bbuf_base, vf);
+ mutex_unlock(&pf->mbox.lock);
+ } else if (dir == MBOX_DIR_PFVF_UP) {
+ src_mdev = &src_mbox->dev[0];
+ mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
+ req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ dst_mbox = &pf->mbox_pfvf[0];
+ dst_size = dst_mbox->mbox_up.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area */
+ if (mbox_hdr->msg_size > dst_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox_up.dev[vf];
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = mbox_hdr->num_msgs;
+ err = otx2_sync_mbox_up_msg(dst_mbox, vf);
+ if (err) {
+ dev_warn(pf->dev,
+ "VF%d is not responding to mailbox\n", vf);
+ return err;
+ }
+ } else if (dir == MBOX_DIR_VFPF_UP) {
+ req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+ otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
+ &pf->mbox.mbox_up,
+ pf->mbox_pfvf[vf].bbuf_base,
+ 0);
+ }
+
+ return 0;
+}
+
+static void otx2_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct mbox_msghdr *msg = NULL;
+ int offset, vf_idx, id, err;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *pf;
+
+ vf_mbox = container_of(work, struct mbox, mbox_wrk);
+ pf = vf_mbox->pfvf;
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+
+ mbox = &pf->mbox_pfvf[0].mbox;
+ mdev = &mbox->dev[vf_idx];
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if (msg->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ /* Set VF's number in each of the msg */
+ msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
+ offset = msg->next_msgoff;
+ }
+ err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
+ vf_mbox->num_msgs);
+ if (err)
+ goto inval_msg;
+ return;
+
+inval_msg:
+ otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
+ otx2_mbox_msg_send(mbox, vf_idx);
+}
+
+static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_nic *pf = vf_mbox->pfvf;
+ struct otx2_mbox_dev *mdev;
+ int offset, id, vf_idx = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+ mbox = &pf->mbox_pfvf[0].mbox_up;
+ mdev = &mbox->dev[vf_idx];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, vf_idx);
+}
+
+static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
+ int vfs = pf->total_vfs;
+ struct mbox *mbox;
+ u64 intr;
+
+ mbox = pf->mbox_pfvf;
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+ TYPE_PFVF);
+ vfs -= 64;
+ }
+
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+
+ return IRQ_HANDLED;
+}
+
+static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
+{
+ void __iomem *hwbase;
+ struct mbox *mbox;
+ int err, vf;
+ u64 base;
+
+ if (!numvfs)
+ return -EINVAL;
+
+ pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
+ sizeof(struct mbox), GFP_KERNEL);
+ if (!pf->mbox_pfvf)
+ return -ENOMEM;
+
+ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!pf->mbox_pfvf_wq)
+ return -ENOMEM;
+
+ base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
+
+ mbox = &pf->mbox_pfvf[0];
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF, numvfs);
+ if (err)
+ goto free_iomem;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF_UP, numvfs);
+ if (err)
+ goto free_iomem;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ mbox->pfvf = pf;
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
+ mbox++;
+ }
+
+ return 0;
+
+free_iomem:
+ if (hwbase)
+ iounmap(hwbase);
+free_wq:
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ return err;
+}
+
+static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox_pfvf[0];
+
+ if (!mbox)
+ return;
+
+ if (pf->mbox_pfvf_wq) {
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ pf->mbox_pfvf_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap(mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+}
+
+static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, pf);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, pf);
+ }
+}
+
+static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int err;
+
+ /* Register MBOX0 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
+ otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return err;
+ }
+
+ if (numvfs > 64) {
+ /* Register MBOX1 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
+ err = request_irq(pci_irq_vector(pf->pdev,
+ RVU_PF_INT_VEC_VFPF_MBOX1),
+ otx2_pfvf_mbox_intr_handler,
+ 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
+ return err;
+ }
+ }
+
+ otx2_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
+
static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
struct mbox_msghdr *msg)
{
+ int devid;
+
if (msg->id >= MBOX_MSG_MAX) {
dev_err(pf->dev,
"Mbox msg with unknown ID 0x%x\n", msg->id);
@@ -131,6 +729,26 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
return;
}
+ /* message response heading VF */
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ if (devid) {
+ struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
+ struct delayed_work *dwork;
+
+ switch (msg->id) {
+ case MBOX_MSG_NIX_LF_START_RX:
+ config->intf_down = false;
+ dwork = &config->link_event_work;
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ break;
+ case MBOX_MSG_NIX_LF_STOP_RX:
+ config->intf_down = true;
+ break;
+ }
+
+ return;
+ }
+
switch (msg->id) {
case MBOX_MSG_READY:
pf->pcifunc = msg->pcifunc;
@@ -148,6 +766,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
mbox_handler_nix_txsch_alloc(pf,
(struct nix_txsch_alloc_rsp *)msg);
break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
+ break;
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
@@ -209,9 +830,22 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
{
+ int i;
+
/* Copy the link info sent by AF */
pf->linfo = msg->link_info;
+ /* notify VFs about link event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->link_event_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+
/* interface has not been fully configured yet */
if (pf->flags & OTX2_FLAG_INTF_DOWN)
return 0;
@@ -283,6 +917,12 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
+ if (devid) {
+ otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
+ MBOX_DIR_PFVF_UP, devid - 1,
+ af_mbox->up_num_msgs);
+ return;
+ }
otx2_mbox_msg_send(mbox, 0);
}
@@ -359,7 +999,6 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
struct mbox *mbox = &pf->mbox;
if (pf->mbox_wq) {
- flush_workqueue(pf->mbox_wq);
destroy_workqueue(pf->mbox_wq);
pf->mbox_wq = NULL;
}
@@ -412,7 +1051,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
- otx2_mbox_lock_init(&pf->mbox);
+ mutex_init(&mbox->lock);
return 0;
exit:
@@ -425,19 +1064,19 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
else
msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return err;
}
@@ -446,19 +1085,19 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
else
msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return err;
}
@@ -480,6 +1119,7 @@ int otx2_set_real_num_queues(struct net_device *netdev,
"Failed to set no of Rx queues: %d\n", rx_queues);
return err;
}
+EXPORT_SYMBOL(otx2_set_real_num_queues);
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
@@ -643,7 +1283,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
/* Get the size of receive buffers to allocate */
pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* NPA init */
err = otx2_config_npa(pf);
if (err)
@@ -654,38 +1294,41 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Enable backpressure */
+ otx2_nix_config_bp(pf, true);
+
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_nix_lf;
}
/* Init Auras and pools used by NIX SQ, for queueing SQEs */
err = otx2_sq_aura_pool_init(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_rq_ptrs;
}
err = otx2_txsch_alloc(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_sq_ptrs;
}
err = otx2_config_nix_queues(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
err = otx2_txschq_config(pf, lvl);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
err_free_nix_queues:
@@ -703,7 +1346,7 @@ err_free_rq_ptrs:
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf);
err_free_nix_lf:
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
@@ -717,7 +1360,7 @@ err_free_npa_lf:
dev_err(pf->dev, "%s failed to free npalf\n", __func__);
}
exit:
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
@@ -737,6 +1380,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+ mutex_lock(&mbox->lock);
+ /* Disable backpressure */
+ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_nix_config_bp(pf, false);
+ mutex_unlock(&mbox->lock);
+
/* Disable RQs */
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
@@ -756,28 +1405,28 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Reset NIX LF */
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
/* Disable NPA Pool and Aura hw context */
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free npalf\n", __func__);
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
}
int otx2_open(struct net_device *netdev)
@@ -906,6 +1555,9 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
+ /* Restore pause frame settings */
+ otx2_config_pause_frm(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_free_cints;
@@ -929,6 +1581,7 @@ err_free_mem:
kfree(qset->napi);
return err;
}
+EXPORT_SYMBOL(otx2_open);
int otx2_stop(struct net_device *netdev)
{
@@ -989,6 +1642,7 @@ int otx2_stop(struct net_device *netdev)
sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
return 0;
}
+EXPORT_SYMBOL(otx2_stop);
static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
@@ -1025,15 +1679,23 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
+
+ queue_work(pf->otx2_wq, &pf->rx_mode_work);
+}
+
+static void otx2_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
if (!(netdev->flags & IFF_UP))
return;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return;
}
@@ -1046,7 +1708,7 @@ static void otx2_set_rx_mode(struct net_device *netdev)
req->mode |= NIX_RX_MODE_ALLMULTI;
otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
}
static int otx2_set_features(struct net_device *netdev,
@@ -1086,6 +1748,17 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
};
+static int otx2_wq_init(struct otx2_nic *pf)
+{
+ pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
+ if (!pf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->reset_task, otx2_reset_task);
+ return 0;
+}
+
static int otx2_check_pf_usable(struct otx2_nic *nic)
{
u64 rev;
@@ -1117,7 +1790,6 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
otx2_disable_mbox_intr(pf);
pci_free_irq_vectors(hw->pdev);
- pci_free_irq_vectors(hw->pdev);
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
@@ -1172,6 +1844,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pf->netdev = netdev;
pf->pdev = pdev;
pf->dev = dev;
+ pf->total_vfs = pci_sriov_get_totalvfs(pdev);
pf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &pf->hw;
@@ -1270,21 +1943,29 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = OTX2_MAX_MTU;
- INIT_WORK(&pf->reset_task, otx2_reset_task);
-
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_detach_rsrc;
}
+ err = otx2_wq_init(pf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2_set_ethtool_ops(netdev);
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ /* Enable pause frames by default */
+ pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+
return 0;
+err_unreg_netdev:
+ unregister_netdev(netdev);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -1301,6 +1982,121 @@ err_release_regions:
return err;
}
+static void otx2_vf_link_event_task(struct work_struct *work)
+{
+ struct otx2_vf_config *config;
+ struct cgx_link_info_msg *req;
+ struct mbox_msghdr *msghdr;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+ config = container_of(work, struct otx2_vf_config,
+ link_event_work.work);
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_link_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+}
+
+static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret, i;
+
+ /* Init PF <=> VF mailbox stuff */
+ ret = otx2_pfvf_mbox_init(pf, numvfs);
+ if (ret)
+ return ret;
+
+ ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
+ if (ret)
+ goto free_mbox;
+
+ pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs) {
+ ret = -ENOMEM;
+ goto free_intr;
+ }
+
+ for (i = 0; i < numvfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ }
+
+ ret = otx2_pf_flr_init(pf, numvfs);
+ if (ret)
+ goto free_configs;
+
+ ret = otx2_register_flr_me_intr(pf, numvfs);
+ if (ret)
+ goto free_flr;
+
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret)
+ goto free_flr_intr;
+
+ return numvfs;
+free_flr_intr:
+ otx2_disable_flr_me_intr(pf);
+free_flr:
+ otx2_flr_wq_destroy(pf);
+free_configs:
+ kfree(pf->vf_configs);
+free_intr:
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+free_mbox:
+ otx2_pfvf_mbox_destroy(pf);
+ return ret;
+}
+
+static int otx2_sriov_disable(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int numvfs = pci_num_vf(pdev);
+ int i;
+
+ if (!numvfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+
+ for (i = 0; i < pci_num_vf(pdev); i++)
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ kfree(pf->vf_configs);
+
+ otx2_disable_flr_me_intr(pf);
+ otx2_flr_wq_destroy(pf);
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+ otx2_pfvf_mbox_destroy(pf);
+
+ return 0;
+}
+
+static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ if (numvfs == 0)
+ return otx2_sriov_disable(pdev);
+ else
+ return otx2_sriov_enable(pdev, numvfs);
+}
+
static void otx2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1315,6 +2111,10 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_cgx_config_linkevents(pf, false);
unregister_netdev(netdev);
+ otx2_sriov_disable(pf->pdev);
+ if (pf->otx2_wq)
+ destroy_workqueue(pf->otx2_wq);
+
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
@@ -1331,6 +2131,7 @@ static struct pci_driver otx2_pf_driver = {
.probe = otx2_probe,
.shutdown = otx2_remove,
.remove = otx2_remove,
+ .sriov_configure = otx2_sriov_configure
};
static int __init otx2_rvupf_init_module(void)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 7963d418886a..867f646e0802 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -45,6 +45,19 @@
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3)
+#define RVU_VF_INT (0x20)
+#define RVU_VF_INT_W1S (0x28)
+#define RVU_VF_INT_ENA_W1S (0x30)
+#define RVU_VF_INT_ENA_W1C (0x38)
+#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index bef4c20fe314..45abe0cd0e7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -138,6 +138,25 @@ static void otx2_set_rxhash(struct otx2_nic *pfvf,
skb_set_hash(skb, hash, hash_type);
}
+static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
+ int qidx)
+{
+ struct nix_rx_sg_s *sg = &cqe->sg;
+ void *end, *start;
+ u64 *seg_addr;
+ int seg;
+
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++)
+ otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
+ start += sizeof(*sg);
+ }
+}
+
static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe, int qidx)
{
@@ -189,16 +208,17 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and
* pass the packets to stack.
*/
- return false;
+ if (cqe->sg.segs == 1)
+ return false;
}
/* If RXALL is enabled pass on packets to stack. */
- if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL))
+ if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
return false;
/* Free buffer back to pool */
if (cqe->sg.segs)
- otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL);
+ otx2_free_rcv_seg(pfvf, cqe, qidx);
return true;
}
@@ -210,7 +230,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct sk_buff *skb = NULL;
- if (unlikely(parse->errlev || parse->errcode)) {
+ if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return;
}
@@ -284,6 +304,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
+ otx2_get_page(cq->rbpool);
return processed_cqe;
}
@@ -778,6 +799,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return true;
}
+EXPORT_SYMBOL(otx2_sq_append_skb);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
{
@@ -788,11 +810,15 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
if (!cqe->sg.subdc)
continue;
+ processed_cqe++;
+ if (cqe->sg.segs > 1) {
+ otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
+ continue;
+ }
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
put_page(virt_to_page(phys_to_virt(pa)));
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -831,18 +857,18 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
else
msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
new file mode 100644
index 000000000000..f4227517dc8e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+
+#define DRV_NAME "octeontx2-nicvf"
+#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+
+static const struct pci_device_id otx2_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { }
+};
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
+
+/* RVU VF Interrupt Vector Enumeration */
+enum {
+ RVU_VF_INT_VEC_MBOX = 0x0,
+};
+
+static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(vf->dev,
+ "Mbox msg with unknown ID %d\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(vf->dev,
+ "Mbox msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ if (msg->rc == MBOX_MSG_INVALID) {
+ dev_err(vf->dev,
+ "PF/AF says the sent msg(s) %d were invalid\n",
+ msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ vf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_TXSCH_ALLOC:
+ mbox_handler_nix_txsch_alloc(vf,
+ (struct nix_txsch_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(vf->dev,
+ "Mbox msg response has err %d, ID %d\n",
+ msg->rc, msg->id);
+ }
+}
+
+static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (af_mbox->num_msgs == 0)
+ return;
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
+ struct mbox_msghdr *req)
+{
+ struct msg_rsp *rsp;
+ int err;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
+ &vf->mbox.mbox_up, 0,
+ sizeof(struct msg_rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = 0;
+ rsp->hdr.rc = 0;
+ err = otx2_mbox_up_handler_cgx_link_event(
+ vf, (struct cgx_link_info_msg *)req, rsp);
+ return err;
+ default:
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *vf;
+ int offset, id;
+
+ vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ vf = vf_mbox->pfvf;
+ mbox = &vf_mbox->mbox_up;
+ mdev = &mbox->dev[0];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (vf_mbox->up_num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_mbox_msg_up(vf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = (struct otx2_nic *)vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ }
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
+{
+ int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX);
+
+ /* Disable VF => PF mailbox IRQ */
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, vf);
+}
+
+static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ if (err) {
+ dev_err(vf->dev,
+ "RVUPF: IRQ registration failed for VFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from PF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_pf)
+ return 0;
+
+ /* Check mailbox communication with PF */
+ req = otx2_mbox_alloc_msg_ready(&vf->mbox);
+ if (!req) {
+ otx2vf_disable_mbox_intr(vf);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&vf->mbox);
+ if (err) {
+ dev_warn(vf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2vf_disable_mbox_intr(vf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+
+ if (vf->mbox_wq) {
+ flush_workqueue(vf->mbox_wq);
+ destroy_workqueue(vf->mbox_wq);
+ vf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = vf;
+ vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!vf->mbox_wq)
+ return -ENOMEM;
+
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, vf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler);
+ mutex_init(&mbox->lock);
+
+ return 0;
+exit:
+ destroy_workqueue(vf->mbox_wq);
+ return err;
+}
+
+static int otx2vf_open(struct net_device *netdev)
+{
+ struct otx2_nic *vf;
+ int err;
+
+ err = otx2_open(netdev);
+ if (err)
+ return err;
+
+ /* LBKs do not receive link events so tell everyone we are up here */
+ vf = netdev_priv(netdev);
+ if (is_otx2_lbkvf(vf->pdev)) {
+ pr_info("%s NIC Link is UP\n", netdev->name);
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ }
+
+ return 0;
+}
+
+static int otx2vf_stop(struct net_device *netdev)
+{
+ return otx2_stop(netdev);
+}
+
+static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ sq = &vf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2vf_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2vf_open(netdev);
+
+ return err;
+}
+
+static void otx2vf_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
+
+ rtnl_lock();
+
+ if (netif_running(vf->netdev)) {
+ otx2vf_stop(vf->netdev);
+ vf->reset_count++;
+ otx2vf_open(vf->netdev);
+ }
+
+ rtnl_unlock();
+}
+
+static const struct net_device_ops otx2vf_netdev_ops = {
+ .ndo_open = otx2vf_open,
+ .ndo_stop = otx2vf_stop,
+ .ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_get_stats64 = otx2_get_stats64,
+ .ndo_tx_timeout = otx2_tx_timeout,
+};
+
+static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ int num_vec, err;
+
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2vf_disable_mbox_intr(vf);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2vf_register_mbox_intr(vf, false);
+}
+
+static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int num_vec = pci_msix_vec_count(pdev);
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct otx2_nic *vf;
+ struct otx2_hw *hw;
+ int err, qcount;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ qcount = num_online_cpus();
+ netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ vf = netdev_priv(netdev);
+ vf->netdev = netdev;
+ vf->pdev = pdev;
+ vf->dev = dev;
+ vf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ vf->flags |= OTX2_FLAG_INTF_DOWN;
+ hw = &vf->hw;
+ hw->pdev = vf->pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!vf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
+ /* Init VF <=> PF mailbox stuff */
+ err = otx2vf_vfaf_mbox_init(vf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2vf_register_mbox_intr(vf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and LIX LFs to this AF */
+ err = otx2_attach_npa_nix(vf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2vf_realloc_msix_vectors(vf);
+ if (err)
+ goto err_mbox_destroy;
+
+ err = otx2_set_real_num_queues(netdev, qcount, qcount);
+ if (err)
+ goto err_detach_rsrc;
+
+ otx2_setup_dev_hw_settings(vf);
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ netdev->features = netdev->hw_features;
+
+ netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2vf_netdev_ops;
+
+ /* MTU range: 68 - 9190 */
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = OTX2_MAX_MTU;
+
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+
+ /* To distinguish, for LBK VFs set netdev name explicitly */
+ if (is_otx2_lbkvf(vf->pdev)) {
+ int n;
+
+ n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
+ /* Need to subtract 1 to get proper VF number */
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_detach_rsrc;
+ }
+
+ otx2vf_set_ethtool_ops(netdev);
+
+ /* Enable pause frames by default */
+ vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return 0;
+
+err_detach_rsrc:
+ otx2_detach_resources(&vf->mbox);
+err_disable_mbox_intr:
+ otx2vf_disable_mbox_intr(vf);
+err_mbox_destroy:
+ otx2vf_vfaf_mbox_destroy(vf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2vf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *vf;
+
+ if (!netdev)
+ return;
+
+ vf = netdev_priv(netdev);
+
+ otx2vf_disable_mbox_intr(vf);
+
+ otx2_detach_resources(&vf->mbox);
+ otx2vf_vfaf_mbox_destroy(vf);
+ pci_free_irq_vectors(vf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2vf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_vf_id_table,
+ .probe = otx2vf_probe,
+ .remove = otx2vf_remove,
+ .shutdown = otx2vf_remove,
+};
+
+static int __init otx2vf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2vf_driver);
+}
+
+static void __exit otx2vf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2vf_driver);
+}
+
+module_init(otx2vf_init_module);
+module_exit(otx2vf_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 7a0d785b826c..17243bb5ba91 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1418,7 +1418,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pep->base)) {
- err = -ENOMEM;
+ err = PTR_ERR(pep->base);
goto err_netdev;
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 97f270d30cce..3c89206f18a7 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -876,6 +876,7 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
}
static const struct ethtool_ops skge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = skge_get_drvinfo,
.get_regs_len = skge_get_regs_len,
.get_regs = skge_get_regs,
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index 6fa7b6a34c08..6928abcec0a3 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -15,12 +15,6 @@
#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */
#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */
-#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
- PCI_STATUS_SIG_SYSTEM_ERROR | \
- PCI_STATUS_REC_MASTER_ABORT | \
- PCI_STATUS_REC_TARGET_ABORT | \
- PCI_STATUS_PARITY)
-
enum csr_regs {
B0_RAP = 0x0000,
B0_CTST = 0x0004,
@@ -2426,7 +2420,7 @@ struct skge_hw {
spinlock_t phy_lock;
struct tasklet_struct phy_task;
- char irq_name[0]; /* skge@pci:000:04:00.0 */
+ char irq_name[]; /* skge@pci:000:04:00.0 */
};
enum pause_control {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ebfd0ceac884..241f00716979 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4400,6 +4400,10 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
}
static const struct ethtool_ops sky2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ,
.get_drvinfo = sky2_get_drvinfo,
.get_wol = sky2_get_wol,
.set_wol = sky2_set_wol,
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index b02b6523083c..b2dddd8a246c 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -252,12 +252,6 @@ enum {
};
-#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
- PCI_STATUS_SIG_SYSTEM_ERROR | \
- PCI_STATUS_REC_MASTER_ABORT | \
- PCI_STATUS_REC_TARGET_ABORT | \
- PCI_STATUS_PARITY)
-
enum csr_regs {
B0_RAP = 0x0000,
B0_CTST = 0x0004,
@@ -2309,7 +2303,7 @@ struct sky2_hw {
struct work_struct restart_work;
wait_queue_head_t msi_wait;
- char irq_name[0];
+ char irq_name[];
};
static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 8c6cfd15481c..09047109d0da 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -65,6 +65,17 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+{
+ u32 val;
+
+ val = mtk_r32(eth, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(eth, val, reg);
+ return reg;
+}
+
static int mtk_mdio_busy_wait(struct mtk_eth *eth)
{
unsigned long t_start = jiffies;
@@ -193,7 +204,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new, sid;
+ u32 mcr_cur, mcr_new, sid, i;
int val, ge_mode, err;
/* MT76x8 has no hardware settings between for the MAC */
@@ -255,6 +266,17 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
PHY_INTERFACE_MODE_TRGMII)
mtk_gmac0_rgmii_adjust(mac->hw,
state->speed);
+
+ /* mt7623_pad_clk_setup */
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mtk_w32(mac->hw,
+ TD_DM_DRVP(8) | TD_DM_DRVN(8),
+ TRGMII_TD_ODT(i));
+
+ /* Assert/release MT7623 RXC reset */
+ mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
+ TRGMII_RCK_CTRL);
+ mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
}
}
@@ -412,9 +434,10 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 85830fe14a1b..454cfcd465fd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -352,10 +352,13 @@
#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_RST BIT(31)
#define RXC_DQSISEL BIT(30)
#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+#define NUM_TRGMII_CTRL 5
+
/* TRGMII RXC control register */
#define TRGMII_TCK_CTRL 0x10340
#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
@@ -363,6 +366,11 @@
#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+/* TRGMII TX Drive Strength */
+#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i))
+#define TD_DM_DRVP(x) ((x) & 0xf)
+#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
/* TRGMII Interface mode register */
#define INTF_MODE 0x10390
#define TRGMII_INTF_DIS BIT(0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 64ed725aec28..73eae80e1cb7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -38,8 +38,21 @@
#define CR_ENABLE_BIT_OFFSET 0xF3F04
#define MAX_NUM_OF_DUMPS_TO_STORE (8)
-static const char *region_cr_space_str = "cr-space";
-static const char *region_fw_health_str = "fw-health";
+#define REGION_CR_SPACE "cr-space"
+#define REGION_FW_HEALTH "fw-health"
+
+static const char * const region_cr_space_str = REGION_CR_SPACE;
+static const char * const region_fw_health_str = REGION_FW_HEALTH;
+
+static const struct devlink_region_ops region_cr_space_ops = {
+ .name = REGION_CR_SPACE,
+ .destructor = &kvfree,
+};
+
+static const struct devlink_region_ops region_fw_health_ops = {
+ .name = REGION_FW_HEALTH,
+ .destructor = &kvfree,
+};
/* Set to true in case cr enable bit was set to true before crdump */
static bool crdump_enbale_bit_set;
@@ -99,7 +112,7 @@ static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
readl(cr_space + offset);
err = devlink_region_snapshot_create(crdump->region_crspace,
- crspace_data, id, &kvfree);
+ crspace_data, id);
if (err) {
kvfree(crspace_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
@@ -138,7 +151,7 @@ static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
readl(health_buf_start + offset);
err = devlink_region_snapshot_create(crdump->region_fw_health,
- health_data, id, &kvfree);
+ health_data, id);
if (err) {
kvfree(health_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
@@ -159,6 +172,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
struct pci_dev *pdev = dev->persist->pdev;
unsigned long cr_res_size;
u8 __iomem *cr_space;
+ int err;
u32 id;
if (!dev->caps.health_buffer_addrs) {
@@ -179,15 +193,22 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
return -ENODEV;
}
- crdump_enable_crspace_access(dev, cr_space);
-
/* Get the available snapshot ID for the dumps */
- id = devlink_region_snapshot_id_get(devlink);
+ err = devlink_region_snapshot_id_get(devlink, &id);
+ if (err) {
+ mlx4_err(dev, "crdump: devlink get snapshot id err %d\n", err);
+ return err;
+ }
+
+ crdump_enable_crspace_access(dev, cr_space);
/* Try to capture dumps */
mlx4_crdump_collect_crspace(dev, cr_space, id);
mlx4_crdump_collect_fw_health(dev, cr_space, id);
+ /* Release reference on the snapshot id */
+ devlink_region_snapshot_id_put(devlink, id);
+
crdump_disable_crspace_access(dev, cr_space);
iounmap(cr_space);
@@ -205,7 +226,7 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
devlink_region_create(devlink,
- region_cr_space_str,
+ &region_cr_space_ops,
MAX_NUM_OF_DUMPS_TO_STORE,
pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
@@ -216,7 +237,7 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
devlink_region_create(devlink,
- region_fw_health_str,
+ &region_fw_health_ops,
MAX_NUM_OF_DUMPS_TO_STORE,
HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8bf1f08fdee2..8a5ea2543670 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2121,6 +2121,10 @@ static int mlx4_en_set_phys_id(struct net_device *dev,
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_drvinfo = mlx4_en_get_drvinfo,
.get_link_ksettings = mlx4_en_get_link_ksettings,
.set_link_ksettings = mlx4_en_set_link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4d5ca302c067..a30edb436f4a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -43,6 +43,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/moduleparam.h>
+#include <linux/indirect_call_wrapper.h>
#include "mlx4_en.h"
@@ -261,6 +262,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
}
}
+INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u64 timestamp,
+ int napi_mode));
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -329,6 +334,11 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
return tx_info->nr_txbb;
}
+INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u64 timestamp,
+ int napi_mode));
+
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
@@ -449,7 +459,9 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
- last_nr_txbb = ring->free_tx_desc(
+ last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
+ mlx4_en_free_tx_desc,
+ mlx4_en_recycle_tx_desc,
priv, ring, ring_index,
timestamp, napi_budget);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 6e501af0e532..f6ff9620a137 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2734,7 +2734,7 @@ void mlx4_opreq_action(struct work_struct *work)
if (err) {
mlx4_err(dev, "Failed to retrieve required operation: %d\n",
err);
- return;
+ goto out;
}
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5716c3d2bb86..c72c4e1ea383 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx;
+ err = 0;
} else if (err == -ENOENT) {
err = 0;
continue;
@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
*idx = get_param_l(&out_param);
-
+ if (WARN_ON(err == -ENOSPC))
+ err = -EINVAL;
return err;
}
return __mlx4_counter_alloc(dev, idx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index a1f20b205299..fd375cbe586e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -7,10 +7,10 @@ config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on PCI
select NET_DEVLINK
- imply PTP_1588_CLOCK
- imply VXLAN
- imply MLXFW
- imply PCI_HYPERV_INTERFACE
+ depends on VXLAN || !VXLAN
+ depends on MLXFW || !MLXFW
+ depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
+ depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
default n
---help---
Core driver for low level functionality of the ConnectX-4 and
@@ -78,6 +78,16 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
+config MLX5_TC_CT
+ bool "MLX5 TC connection tracking offload support"
+ depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ default y
+ help
+ Say Y here if you want to support offloading connection tracking rules
+ via tc ct action.
+
+ If unsure, set to Y
+
config MLX5_CORE_EN_DCB
bool "Data Center Bridging (DCB) Support"
default y
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index d3e06cec8317..6d32915000fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o
#
# Netdev basic
@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
#
# Netdev extra
@@ -34,15 +34,16 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
- lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
+ lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
en/tc_tun_geneve.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
+mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o eswitch_offloads_chains.o
+ ecpf.o rdma.o esw/chains.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 34cba97f7bf4..7a77fe40af3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -848,6 +848,14 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg);
+static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
+{
+ if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
+ return true;
+
+ return cmd->allowed_opcode == opcode;
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -861,6 +869,7 @@ static void cmd_work_handler(struct work_struct *work)
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
@@ -888,7 +897,6 @@ static void cmd_work_handler(struct work_struct *work)
}
cmd->ent_arr[ent->idx] = ent;
- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -910,10 +918,13 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+ cmd->state != MLX5_CMDIF_STATE_UP ||
+ !opcode_allowed(&dev->cmd, ent->op)) {
u8 status = 0;
u32 drv_synd;
@@ -922,6 +933,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ /* no doorbell, no need to keep the entry */
+ free_ent(cmd, ent->idx);
+ if (ent->callback)
+ free_cmd(ent);
return;
}
@@ -974,6 +989,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd *cmd = &dev->cmd;
int err;
+ if (!wait_for_completion_timeout(&ent->handling, timeout) &&
+ cancel_work_sync(&ent->work)) {
+ ent->ret = -ECANCELED;
+ goto out_err;
+ }
if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
@@ -981,12 +1001,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
+out_err:
err = ent->ret;
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
+ } else if (err == -ECANCELED) {
+ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1022,6 +1047,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->token = token;
ent->polling = force_polling;
+ init_completion(&ent->handling);
if (!callback)
init_completion(&ent->done);
@@ -1041,6 +1067,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
goto out;
+ if (err == -ECANCELED)
+ goto out_free;
ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode);
@@ -1387,6 +1415,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
mlx5_cmdif_debugfs_init(dev);
}
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int i;
+
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ down(&cmd->sem);
+ down(&cmd->pages_sem);
+
+ cmd->allowed_opcode = opcode;
+
+ up(&cmd->pages_sem);
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ up(&cmd->sem);
+}
+
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
@@ -1663,12 +1707,14 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err;
u8 status = 0;
u32 drv_synd;
+ u16 opcode;
u8 token;
+ opcode = MLX5_GET(mbox_in, in, opcode);
if (pci_channel_offline(dev->pdev) ||
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
-
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+ dev->cmd.state != MLX5_CMDIF_STATE_UP ||
+ !opcode_allowed(&dev->cmd, opcode)) {
err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
MLX5_SET(mbox_out, out, status, status);
MLX5_SET(mbox_out, out, syndrome, drv_synd);
@@ -1933,6 +1979,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_free_page;
}
+ cmd->state = MLX5_CMDIF_STATE_DOWN;
cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
@@ -1970,6 +2017,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
cmd->mode = CMD_MODE_POLLING;
+ cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
create_msg_cache(dev);
@@ -2009,3 +2057,10 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
dma_pool_destroy(cmd->pool);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
+
+void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
+ enum mlx5_cmdif_state cmdif_state)
+{
+ dev->cmd.state = cmdif_state;
+}
+EXPORT_SYMBOL(mlx5_cmd_set_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 50862275544e..1972ddd12704 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -193,7 +193,7 @@ bool mlx5_device_registered(struct mlx5_core_dev *dev)
return found;
}
-int mlx5_register_device(struct mlx5_core_dev *dev)
+void mlx5_register_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_interface *intf;
@@ -203,8 +203,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
list_for_each_entry(intf, &intf_list, list)
mlx5_add_device(intf, priv);
mutex_unlock(&mlx5_intf_mutex);
-
- return 0;
}
void mlx5_unregister_device(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index ac108f1e5bd6..e94f0c4d74a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -23,7 +23,10 @@ static int mlx5_devlink_flash_update(struct devlink *devlink,
if (err)
return err;
- return mlx5_firmware_flash(dev, fw, extack);
+ err = mlx5_firmware_flash(dev, fw, extack);
+ release_firmware(fw);
+
+ return err;
}
static u8 mlx5_fw_ver_major(u32 version)
@@ -90,7 +93,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- return mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev, false);
+ return 0;
}
static int mlx5_devlink_reload_up(struct devlink *devlink,
@@ -190,11 +194,6 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
return 0;
}
-enum mlx5_devlink_param_id {
- MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
-};
-
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -210,14 +209,38 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
return 0;
}
+#ifdef CONFIG_MLX5_ESWITCH
+static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int group_num = val.vu32;
+
+ if (group_num < 1 || group_num > 1024) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported group number, supported range is 1-1024");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif
+
static const struct devlink_param mlx5_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
+#ifdef CONFIG_MLX5_ESWITCH
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ "fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ mlx5_devlink_large_group_num_validate),
+#endif
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -230,13 +253,20 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
else
strcpy(value.vstr, "smfs");
devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
value);
value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
value);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ value);
+#endif
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index d0ba03774ddf..f0de327a59be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -6,6 +6,12 @@
#include <net/devlink.h>
+enum mlx5_devlink_param_id {
+ MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+};
+
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 94d7b69a95c7..5ce6ebbc7f10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -188,7 +188,7 @@ static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer)
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
- mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++)
mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE);
@@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
return NULL;
}
- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+ tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL);
if (!tracer)
return ERR_PTR(-ENOMEM);
@@ -982,7 +982,7 @@ destroy_workqueue:
tracer->dev = NULL;
destroy_workqueue(tracer->work_queue);
free_tracer:
- kfree(tracer);
+ kvfree(tracer);
return ERR_PTR(err);
}
@@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_destroy_log_buf(tracer);
flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
- kfree(tracer);
+ kvfree(tracer);
}
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
new file mode 100644
index 000000000000..17ab7efe693d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "rsc_dump.h"
+#include "lib/mlx5.h"
+
+#define MLX5_SGMT_TYPE(SGMT) MLX5_SGMT_TYPE_##SGMT
+#define MLX5_SGMT_STR_ASSING(SGMT)[MLX5_SGMT_TYPE(SGMT)] = #SGMT
+static const char *const mlx5_rsc_sgmt_name[] = {
+ MLX5_SGMT_STR_ASSING(HW_CQPC),
+ MLX5_SGMT_STR_ASSING(HW_SQPC),
+ MLX5_SGMT_STR_ASSING(HW_RQPC),
+ MLX5_SGMT_STR_ASSING(FULL_SRQC),
+ MLX5_SGMT_STR_ASSING(FULL_CQC),
+ MLX5_SGMT_STR_ASSING(FULL_EQC),
+ MLX5_SGMT_STR_ASSING(FULL_QPC),
+ MLX5_SGMT_STR_ASSING(SND_BUFF),
+ MLX5_SGMT_STR_ASSING(RCV_BUFF),
+ MLX5_SGMT_STR_ASSING(SRQ_BUFF),
+ MLX5_SGMT_STR_ASSING(CQ_BUFF),
+ MLX5_SGMT_STR_ASSING(EQ_BUFF),
+ MLX5_SGMT_STR_ASSING(SX_SLICE),
+ MLX5_SGMT_STR_ASSING(SX_SLICE_ALL),
+ MLX5_SGMT_STR_ASSING(RDB),
+ MLX5_SGMT_STR_ASSING(RX_SLICE_ALL),
+};
+
+struct mlx5_rsc_dump {
+ u32 pdn;
+ struct mlx5_core_mkey mkey;
+ u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
+};
+
+struct mlx5_rsc_dump_cmd {
+ u64 mem_size;
+ u8 cmd[MLX5_ST_SZ_BYTES(resource_dump)];
+};
+
+static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5_rsc_sgmt_name); i++)
+ if (!strcmp(name, mlx5_rsc_sgmt_name[i]))
+ return i;
+
+ return -EINVAL;
+}
+
+static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
+{
+ void *data = page_address(page);
+ enum mlx5_sgmt_type sgmt_idx;
+ int num_of_items;
+ char *sgmt_name;
+ void *member;
+ void *menu;
+ int i;
+
+ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
+ num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
+
+ for (i = 0; i < num_of_items; i++) {
+ member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
+ sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
+ sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
+ if (sgmt_idx == -EINVAL)
+ continue;
+ rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
+ member, segment_type);
+ }
+}
+
+static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page)
+{
+ struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
+ struct device *ddev = &dev->pdev->dev;
+ u32 out_seq_num;
+ u32 in_seq_num;
+ dma_addr_t dma;
+ int err;
+
+ dma = dma_map_page(ddev, page, 0, cmd->mem_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ddev, dma)))
+ return -ENOMEM;
+
+ in_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
+ MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey.key);
+ MLX5_SET64(resource_dump, cmd->cmd, address, dma);
+
+ err = mlx5_core_access_reg(dev, cmd->cmd, sizeof(cmd->cmd), cmd->cmd,
+ sizeof(cmd->cmd), MLX5_REG_RESOURCE_DUMP, 0, 1);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to access err %d\n", err);
+ goto out;
+ }
+ out_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
+ if (out_seq_num && (in_seq_num + 1 != out_seq_num))
+ err = -EIO;
+out:
+ dma_unmap_page(ddev, dma, cmd->mem_size, DMA_FROM_DEVICE);
+ return err;
+}
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key)
+{
+ struct mlx5_rsc_dump_cmd *cmd;
+ int sgmt_type;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ sgmt_type = dev->rsc_dump->fw_segment_type[key->rsc];
+ if (!sgmt_type && key->rsc != MLX5_SGMT_TYPE_MENU)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ mlx5_core_err(dev, "Resource dump: Failed to allocate command\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ MLX5_SET(resource_dump, cmd->cmd, segment_type, sgmt_type);
+ MLX5_SET(resource_dump, cmd->cmd, index1, key->index1);
+ MLX5_SET(resource_dump, cmd->cmd, index2, key->index2);
+ MLX5_SET(resource_dump, cmd->cmd, num_of_obj1, key->num_of_obj1);
+ MLX5_SET(resource_dump, cmd->cmd, num_of_obj2, key->num_of_obj2);
+ MLX5_SET(resource_dump, cmd->cmd, size, key->size);
+ cmd->mem_size = key->size;
+ return cmd;
+}
+
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size)
+{
+ bool more_dump;
+ int err;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return -EOPNOTSUPP;
+
+ err = mlx5_rsc_dump_trigger(dev, cmd, page);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to trigger dump, %d\n", err);
+ return err;
+ }
+ *size = MLX5_GET(resource_dump, cmd->cmd, size);
+ more_dump = MLX5_GET(resource_dump, cmd->cmd, more_dump);
+
+ return more_dump;
+}
+
+#define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff
+static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump_cmd *cmd = NULL;
+ struct mlx5_rsc_key key = {};
+ struct page *page;
+ int size;
+ int err;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ key.rsc = MLX5_SGMT_TYPE_MENU;
+ key.size = PAGE_SIZE;
+ cmd = mlx5_rsc_dump_cmd_create(dev, &key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+ MLX5_SET(resource_dump, cmd->cmd, segment_type, MLX5_RSC_DUMP_MENU_SEGMENT);
+
+ do {
+ err = mlx5_rsc_dump_next(dev, cmd, page, &size);
+ if (err < 0)
+ goto destroy_cmd;
+
+ mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
+
+ } while (err > 0);
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+free_page:
+ __free_page(page);
+
+ return err;
+}
+
+static int mlx5_rsc_dump_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+struct mlx5_rsc_dump *mlx5_rsc_dump_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump *rsc_dump;
+
+ if (!MLX5_CAP_DEBUG(dev, resource_dump)) {
+ mlx5_core_dbg(dev, "Resource dump: capability not present\n");
+ return NULL;
+ }
+ rsc_dump = kzalloc(sizeof(*rsc_dump), GFP_KERNEL);
+ if (!rsc_dump)
+ return ERR_PTR(-ENOMEM);
+
+ return rsc_dump;
+}
+
+void mlx5_rsc_dump_destroy(struct mlx5_core_dev *dev)
+{
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return;
+ kfree(dev->rsc_dump);
+}
+
+int mlx5_rsc_dump_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
+ int err;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return 0;
+
+ err = mlx5_core_alloc_pd(dev, &rsc_dump->pdn);
+ if (err) {
+ mlx5_core_warn(dev, "Resource dump: Failed to allocate PD %d\n", err);
+ return err;
+ }
+ err = mlx5_rsc_dump_create_mkey(dev, rsc_dump->pdn, &rsc_dump->mkey);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to create mkey, %d\n", err);
+ goto free_pd;
+ }
+ err = mlx5_rsc_dump_menu(dev);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to read menu, %d\n", err);
+ goto destroy_mkey;
+ }
+ return err;
+
+destroy_mkey:
+ mlx5_core_destroy_mkey(dev, &rsc_dump->mkey);
+free_pd:
+ mlx5_core_dealloc_pd(dev, rsc_dump->pdn);
+ return err;
+}
+
+void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev)
+{
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return;
+
+ mlx5_core_destroy_mkey(dev, &dev->rsc_dump->mkey);
+ mlx5_core_dealloc_pd(dev, dev->rsc_dump->pdn);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h
new file mode 100644
index 000000000000..148270073e71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_RSC_DUMP_H
+#define __MLX5_RSC_DUMP_H
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+enum mlx5_sgmt_type {
+ MLX5_SGMT_TYPE_HW_CQPC,
+ MLX5_SGMT_TYPE_HW_SQPC,
+ MLX5_SGMT_TYPE_HW_RQPC,
+ MLX5_SGMT_TYPE_FULL_SRQC,
+ MLX5_SGMT_TYPE_FULL_CQC,
+ MLX5_SGMT_TYPE_FULL_EQC,
+ MLX5_SGMT_TYPE_FULL_QPC,
+ MLX5_SGMT_TYPE_SND_BUFF,
+ MLX5_SGMT_TYPE_RCV_BUFF,
+ MLX5_SGMT_TYPE_SRQ_BUFF,
+ MLX5_SGMT_TYPE_CQ_BUFF,
+ MLX5_SGMT_TYPE_EQ_BUFF,
+ MLX5_SGMT_TYPE_SX_SLICE,
+ MLX5_SGMT_TYPE_SX_SLICE_ALL,
+ MLX5_SGMT_TYPE_RDB,
+ MLX5_SGMT_TYPE_RX_SLICE_ALL,
+ MLX5_SGMT_TYPE_MENU,
+ MLX5_SGMT_TYPE_TERMINATE,
+
+ MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+ enum mlx5_sgmt_type rsc;
+ int index1;
+ int index2;
+ int num_of_obj1;
+ int num_of_obj2;
+ int size;
+};
+
+#define MLX5_RSC_DUMP_ALL 0xFFFF
+struct mlx5_rsc_dump_cmd;
+struct mlx5_rsc_dump;
+
+struct mlx5_rsc_dump *mlx5_rsc_dump_create(struct mlx5_core_dev *dev);
+void mlx5_rsc_dump_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_rsc_dump_init(struct mlx5_core_dev *dev);
+void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev);
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c9606b8ab6ef..0a5aada0f50f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -204,7 +204,7 @@ struct mlx5e_tx_wqe {
struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_cyc {
@@ -367,6 +367,7 @@ enum {
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
+ MLX5E_SQ_STATE_PENDING_XSK_TX,
};
struct mlx5e_sq_wqe_info {
@@ -738,7 +739,6 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
- cpumask_var_t xps_cpumask;
};
struct mlx5e_channels {
@@ -814,6 +814,15 @@ struct mlx5e_xsk {
bool ever_used;
};
+/* Temporary storage for variables that are allocated when struct mlx5e_priv is
+ * initialized, and used where we can't allocate them because that functions
+ * must not fail. Use with care and make sure the same variable is not used
+ * simultaneously by multiple users.
+ */
+struct mlx5e_scratchpad {
+ cpumask_var_t cpumask;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
@@ -873,10 +882,12 @@ struct mlx5e_priv {
#endif
struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter;
+ struct devlink_port dl_port;
struct mlx5e_xsk xsk;
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
+ struct mlx5e_scratchpad scratchpad;
};
struct mlx5e_profile {
@@ -950,7 +961,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
+int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
@@ -1036,23 +1047,33 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs);
-/* Function pointer to be used to modify WH settings while
+/* Function pointer to be used to modify HW or kernel settings while
* switching channels
*/
-typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
+#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
+int fn##_ctx(struct mlx5e_priv *priv, void *context) \
+{ \
+ return fn(priv); \
+}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify);
+ mlx5e_fp_preactivate preactivate,
+ void *context);
+int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
+int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
- u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
- u8 cq_period_mode);
+
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
@@ -1102,7 +1123,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
@@ -1124,10 +1145,10 @@ void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
-typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
+int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
- change_hw_mtu_cb set_mtu_cb);
+ mlx5e_fp_preactivate preactivate);
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -1153,6 +1174,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
+ const u8 hfunc);
+int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs);
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
new file mode 100644
index 000000000000..f8b2de4b04be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "en/devlink.h"
+
+int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
+
+ if (mlx5_core_is_pf(priv->mdev))
+ devlink_port_attrs_set(&priv->dl_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ PCI_FUNC(priv->mdev->pdev->devfn),
+ false, 0,
+ NULL, 0);
+ else
+ devlink_port_attrs_set(&priv->dl_port,
+ DEVLINK_PORT_FLAVOUR_VIRTUAL,
+ 0, false, 0, NULL, 0);
+
+ return devlink_port_register(devlink, &priv->dl_port, 1);
+}
+
+void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
+{
+ devlink_port_type_eth_set(&priv->dl_port, priv->netdev);
+}
+
+void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
+{
+ devlink_port_unregister(&priv->dl_port);
+}
+
+struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return &priv->dl_port;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
new file mode 100644
index 000000000000..83123a801adc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_EN_DEVLINK_H
+#define __MLX5E_EN_DEVLINK_H
+
+#include <net/devlink.h>
+#include "en.h"
+
+int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
+void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
+void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
+struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 20b907dc1e29..3a199a03d929 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -3,6 +3,7 @@
#include "health.h"
#include "lib/eq.h"
+#include "lib/mlx5.h"
int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
{
@@ -197,10 +198,114 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx)
{
- netdev_err(priv->netdev, err_str);
+ netdev_err(priv->netdev, "%s\n", err_str);
if (!reporter)
return err_ctx->recover(err_ctx->ctx);
return devlink_health_report(reporter, err_str, err_ctx);
}
+
+#define MLX5_HEALTH_DEVLINK_MAX_SIZE 1024
+static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
+ const void *value, u32 value_len)
+
+{
+ u32 data_size;
+ u32 offset;
+ int err;
+
+ for (offset = 0; offset < value_len; offset += data_size) {
+ data_size = value_len - offset;
+ if (data_size > MLX5_HEALTH_DEVLINK_MAX_SIZE)
+ data_size = MLX5_HEALTH_DEVLINK_MAX_SIZE;
+ err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_rsc_dump_cmd *cmd;
+ struct page *page;
+ int cmd_err, err;
+ int end_err;
+ int size;
+
+ if (IS_ERR_OR_NULL(mdev->rsc_dump))
+ return -EOPNOTSUPP;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
+ if (err)
+ return err;
+
+ cmd = mlx5_rsc_dump_cmd_create(mdev, key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+
+ do {
+ cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
+ if (cmd_err < 0) {
+ err = cmd_err;
+ goto destroy_cmd;
+ }
+
+ err = mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size);
+ if (err)
+ goto destroy_cmd;
+
+ } while (cmd_err > 0);
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+ end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
+ if (end_err)
+ err = end_err;
+free_page:
+ __free_page(page);
+ return err;
+}
+
+int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl)
+{
+ struct mlx5_rsc_key key = {};
+ int err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = queue_idx;
+ key.size = PAGE_SIZE;
+ key.num_of_obj1 = 1;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, lbl);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx);
+ if (err)
+ return err;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_end(fmsg);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index e54f70d9af22..38f97f79ef16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -5,6 +5,7 @@
#define __MLX5E_EN_HEALTH_H
#include "en.h"
+#include "diag/rsc_dump.h"
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
@@ -35,6 +36,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
struct mlx5e_err_ctx {
int (*recover)(void *ctx);
+ int (*dump)(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, void *ctx);
void *ctx;
};
@@ -47,6 +49,8 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
int mlx5e_health_create_reporters(struct mlx5e_priv *priv);
void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
void mlx5e_health_channels_update(struct mlx5e_priv *priv);
-
-
+int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
+ struct devlink_fmsg *fmsg);
+int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
new file mode 100644
index 000000000000..ea321e528749
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <linux/hashtable.h>
+
+#include "mapping.h"
+
+#define MAPPING_GRACE_PERIOD 2000
+
+struct mapping_ctx {
+ struct xarray xarray;
+ DECLARE_HASHTABLE(ht, 8);
+ struct mutex lock; /* Guards hashtable and xarray */
+ unsigned long max_id;
+ size_t data_size;
+ bool delayed_removal;
+ struct delayed_work dwork;
+ struct list_head pending_list;
+ spinlock_t pending_list_lock; /* Guards pending list */
+};
+
+struct mapping_item {
+ struct rcu_head rcu;
+ struct list_head list;
+ unsigned long timeout;
+ struct hlist_node node;
+ int cnt;
+ u32 id;
+ char data[];
+};
+
+int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
+{
+ struct mapping_item *mi;
+ int err = -ENOMEM;
+ u32 hash_key;
+
+ mutex_lock(&ctx->lock);
+
+ hash_key = jhash(data, ctx->data_size, 0);
+ hash_for_each_possible(ctx->ht, mi, node, hash_key) {
+ if (!memcmp(data, mi->data, ctx->data_size))
+ goto attach;
+ }
+
+ mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
+ if (!mi)
+ goto err_alloc;
+
+ memcpy(mi->data, data, ctx->data_size);
+ hash_add(ctx->ht, &mi->node, hash_key);
+
+ err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
+ GFP_KERNEL);
+ if (err)
+ goto err_assign;
+attach:
+ ++mi->cnt;
+ *id = mi->id;
+
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+
+err_assign:
+ hash_del(&mi->node);
+ kfree(mi);
+err_alloc:
+ mutex_unlock(&ctx->lock);
+
+ return err;
+}
+
+static void mapping_remove_and_free(struct mapping_ctx *ctx,
+ struct mapping_item *mi)
+{
+ xa_erase(&ctx->xarray, mi->id);
+ kfree_rcu(mi, rcu);
+}
+
+static void mapping_free_item(struct mapping_ctx *ctx,
+ struct mapping_item *mi)
+{
+ if (!ctx->delayed_removal) {
+ mapping_remove_and_free(ctx, mi);
+ return;
+ }
+
+ mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);
+
+ spin_lock(&ctx->pending_list_lock);
+ list_add_tail(&mi->list, &ctx->pending_list);
+ spin_unlock(&ctx->pending_list_lock);
+
+ schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
+}
+
+int mapping_remove(struct mapping_ctx *ctx, u32 id)
+{
+ unsigned long index = id;
+ struct mapping_item *mi;
+ int err = -ENOENT;
+
+ mutex_lock(&ctx->lock);
+ mi = xa_load(&ctx->xarray, index);
+ if (!mi)
+ goto out;
+ err = 0;
+
+ if (--mi->cnt > 0)
+ goto out;
+
+ hash_del(&mi->node);
+ mapping_free_item(ctx, mi);
+out:
+ mutex_unlock(&ctx->lock);
+
+ return err;
+}
+
+int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
+{
+ unsigned long index = id;
+ struct mapping_item *mi;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ mi = xa_load(&ctx->xarray, index);
+ if (!mi)
+ goto err_find;
+
+ memcpy(data, mi->data, ctx->data_size);
+ err = 0;
+
+err_find:
+ rcu_read_unlock();
+ return err;
+}
+
+static void
+mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
+{
+ struct mapping_item *mi;
+
+ list_for_each_entry(mi, list, list)
+ mapping_remove_and_free(ctx, mi);
+}
+
+static void mapping_work_handler(struct work_struct *work)
+{
+ unsigned long min_timeout = 0, now = jiffies;
+ struct mapping_item *mi, *next;
+ LIST_HEAD(pending_items);
+ struct mapping_ctx *ctx;
+
+ ctx = container_of(work, struct mapping_ctx, dwork.work);
+
+ spin_lock(&ctx->pending_list_lock);
+ list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
+ if (time_after(now, mi->timeout))
+ list_move(&mi->list, &pending_items);
+ else if (!min_timeout ||
+ time_before(mi->timeout, min_timeout))
+ min_timeout = mi->timeout;
+ }
+ spin_unlock(&ctx->pending_list_lock);
+
+ mapping_remove_and_free_list(ctx, &pending_items);
+
+ if (min_timeout)
+ schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
+}
+
+static void mapping_flush_work(struct mapping_ctx *ctx)
+{
+ if (!ctx->delayed_removal)
+ return;
+
+ cancel_delayed_work_sync(&ctx->dwork);
+ mapping_remove_and_free_list(ctx, &ctx->pending_list);
+}
+
+struct mapping_ctx *
+mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
+{
+ struct mapping_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->max_id = max_id ? max_id : UINT_MAX;
+ ctx->data_size = data_size;
+
+ if (delayed_removal) {
+ INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
+ INIT_LIST_HEAD(&ctx->pending_list);
+ spin_lock_init(&ctx->pending_list_lock);
+ ctx->delayed_removal = true;
+ }
+
+ mutex_init(&ctx->lock);
+ xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
+
+ return ctx;
+}
+
+void mapping_destroy(struct mapping_ctx *ctx)
+{
+ mapping_flush_work(ctx);
+ xa_destroy(&ctx->xarray);
+ mutex_destroy(&ctx->lock);
+
+ kfree(ctx);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
new file mode 100644
index 000000000000..285525cc5470
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#ifndef __MLX5_MAPPING_H__
+#define __MLX5_MAPPING_H__
+
+struct mapping_ctx;
+
+int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id);
+int mapping_remove(struct mapping_ctx *ctx, u32 id);
+int mapping_find(struct mapping_ctx *ctx, u32 id, void *data);
+
+/* mapping uses an xarray to map data to ids in add(), and for find().
+ * For locking, it uses a internal xarray spin lock for add()/remove(),
+ * find() uses rcu_read_lock().
+ * Choosing delayed_removal postpones the removal of a previously mapped
+ * id by MAPPING_GRACE_PERIOD milliseconds.
+ * This is to avoid races against hardware, where we mark the packet in
+ * hardware with a previous id, and quick remove() and add() reusing the same
+ * previous id. Then find() will get the new mapping instead of the old
+ * which was used to mark the packet.
+ */
+struct mapping_ctx *mapping_create(size_t data_size, u32 max_id,
+ bool delayed_removal);
+void mapping_destroy(struct mapping_ctx *ctx);
+
+#endif /* __MLX5_MAPPING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index fce6eccdcf8b..2a8950b3056f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -343,64 +343,78 @@ out:
return err;
}
-static u32 fec_supported_speeds[] = {
- 10000,
- 40000,
- 25000,
- 50000,
- 56000,
- 100000
+enum mlx5e_fec_supported_link_mode {
+ MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_25G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_50G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_56G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_100G,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X,
+ MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
-#define MLX5E_FEC_SUPPORTED_SPEEDS ARRAY_SIZE(fec_supported_speeds)
+#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
+
+#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
+ do { \
+ u16 *_policy = &(policy); \
+ u32 *_buf = buf; \
+ \
+ if (write) \
+ MLX5_SET(pplm_reg, _buf, fec_override_admin_##link, *_policy); \
+ else \
+ *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
+ } while (0)
+
+#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
+ do { \
+ unsigned long policy_long; \
+ u16 *__policy = &(policy); \
+ bool _write = (write); \
+ \
+ policy_long = *__policy; \
+ if (_write && *__policy) \
+ *__policy = find_first_bit(&policy_long, \
+ sizeof(policy_long) * BITS_PER_BYTE);\
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
+ if (!_write && *__policy) \
+ *__policy = 1 << *__policy; \
+ } while (0)
/* get/set FEC admin field for a given speed */
-static int mlx5e_fec_admin_field(u32 *pplm,
- u8 *fec_policy,
- bool write,
- u32 speed)
+static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
+ enum mlx5e_fec_supported_link_mode link_mode)
{
- switch (speed) {
- case 10000:
- case 40000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_10g_40g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_10g_40g, *fec_policy);
+ switch (link_mode) {
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 10g_40g);
break;
- case 25000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_25g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_25g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_25G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 25g);
break;
- case 50000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_50g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_50g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_50G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g);
break;
- case 56000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_56g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_56g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_56G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 56g);
break;
- case 100000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_100g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_100g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_100G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
default:
return -EINVAL;
@@ -408,32 +422,40 @@ static int mlx5e_fec_admin_field(u32 *pplm,
return 0;
}
+#define MLX5E_GET_FEC_OVERRIDE_CAP(buf, link) \
+ MLX5_GET(pplm_reg, buf, fec_override_cap_##link)
+
/* returns FEC capabilities for a given speed */
-static int mlx5e_get_fec_cap_field(u32 *pplm,
- u8 *fec_cap,
- u32 speed)
+static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
+ enum mlx5e_fec_supported_link_mode link_mode)
{
- switch (speed) {
- case 10000:
- case 40000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_10g_40g);
+ switch (link_mode) {
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 10g_40g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_25G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 25g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_50G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 50g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_56G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 56g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_100G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g);
break;
- case 25000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_25g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 50g_1x);
break;
- case 50000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_50g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_2x);
break;
- case 56000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_56g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_4x);
break;
- case 100000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_100g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x);
break;
default:
return -EINVAL;
@@ -441,13 +463,14 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
return 0;
}
-int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
+bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
{
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u32 current_fec_speed;
int err;
+ int i;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
@@ -458,23 +481,30 @@ int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
- return err;
+ return false;
- err = mlx5e_port_linkspeed(dev, &current_fec_speed);
- if (err)
- return err;
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ u16 fec_caps;
- return mlx5e_get_fec_cap_field(out, fec_caps, current_fec_speed);
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
+
+ mlx5e_get_fec_cap_field(out, &fec_caps, i);
+ if (fec_caps & fec_policy)
+ return true;
+ }
+ return false;
}
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
- u8 *fec_configured_mode)
+ u16 *fec_configured_mode)
{
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u32 link_speed;
int err;
+ int i;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
@@ -490,24 +520,28 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
*fec_mode_active = MLX5_GET(pplm_reg, out, fec_mode_active);
if (!fec_configured_mode)
- return 0;
+ goto out;
- err = mlx5e_port_linkspeed(dev, &link_speed);
- if (err)
- return err;
+ *fec_configured_mode = 0;
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
- return mlx5e_fec_admin_field(out, fec_configured_mode, 0, link_speed);
+ mlx5e_fec_admin_field(out, fec_configured_mode, 0, i);
+ if (*fec_configured_mode != 0)
+ goto out;
+ }
+out:
+ return 0;
}
-int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
- u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
- bool fec_mode_not_supp_in_speed = false;
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u8 fec_policy_auto = 0;
- u8 fec_caps = 0;
+ u16 fec_policy_auto = 0;
int err;
int i;
@@ -517,6 +551,9 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
if (!MLX5_CAP_PCAM_REG(dev, pplm))
return -EOPNOTSUPP;
+ if (fec_policy >= (1 << MLX5E_FEC_LLRS_272_257_1) && !fec_50g_per_lane)
+ return -EOPNOTSUPP;
+
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
@@ -524,25 +561,31 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
MLX5_SET(pplm_reg, out, local_port, 1);
- for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
- mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
- /* policy supported for link speed, or policy is auto */
- if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
- mlx5e_fec_admin_field(out, &fec_policy, 1,
- fec_supported_speeds[i]);
- } else {
- /* turn off FEC if supported. Else, leave it the same */
- if (fec_caps & fec_policy_nofec)
- mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
- fec_supported_speeds[i]);
- fec_mode_not_supp_in_speed = true;
- }
- }
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ u16 conf_fec = fec_policy;
+ u16 fec_caps = 0;
+
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
- if (fec_mode_not_supp_in_speed)
- mlx5_core_dbg(dev,
- "FEC policy 0x%x is not supported for some speeds",
- fec_policy);
+ /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
+ * to link modes up to 25G per lane and to
+ * MLX5E_FEC_RS_544_514 in the new link modes based on
+ * 50 G per lane
+ */
+ if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
+ i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ conf_fec = (1 << MLX5E_FEC_RS_544_514);
+
+ mlx5e_get_fec_cap_field(out, &fec_caps, i);
+
+ /* policy supported for link speed */
+ if (fec_caps & conf_fec)
+ mlx5e_fec_admin_field(out, &conf_fec, 1, i);
+ else
+ /* set FEC to auto*/
+ mlx5e_fec_admin_field(out, &fec_policy_auto, 1, i);
+ }
return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 4a7f4497692b..a2ddd446dd59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -60,15 +60,17 @@ int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
-int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps);
+bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy);
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
- u8 *fec_configured_mode);
-int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy);
+ u16 *fec_configured_mode);
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy);
enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514 = 7,
+ MLX5E_FEC_LLRS_272_257_1 = 9,
};
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index a01e2de2488f..c209579fc213 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -102,19 +102,6 @@ out:
return err;
}
-void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
-{
- struct mlx5e_priv *priv = icosq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = icosq;
- err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
- sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
{
struct net_device *dev = rq->netdev;
@@ -171,19 +158,6 @@ out:
return err;
}
-void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
-{
- struct mlx5e_priv *priv = rq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = rq;
- err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
- sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
struct mlx5e_icosq *icosq;
@@ -201,21 +175,6 @@ static int mlx5e_rx_reporter_timeout_recover(void *ctx)
return err;
}
-void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
-{
- struct mlx5e_icosq *icosq = &rq->channel->icosq;
- struct mlx5e_priv *priv = rq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = rq;
- err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
- sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n",
- icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
{
return err_ctx->recover(err_ctx->ctx);
@@ -371,10 +330,235 @@ unlock:
return err;
}
+static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_txqsq *icosq = ctx;
+ struct mlx5_rsc_key key = {};
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "ICOSQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = icosq->sqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5_rsc_key key = {};
+ struct mlx5e_rq *rq = ctx;
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = rq->rqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "receive_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_rsc_key key = {};
+ int i, err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
+ if (err)
+ return err;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+
+ err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
+ if (err)
+ return err;
+ }
+
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
+ struct mlx5e_err_ctx *err_ctx,
+ struct devlink_fmsg *fmsg)
+{
+ return err_ctx->dump(priv, fmsg, err_ctx->ctx);
+}
+
+static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_rx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
+ mlx5e_rx_reporter_dump_all_rqs(priv, fmsg);
+}
+
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *icosq = &rq->channel->icosq;
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+ snprintf(err_str, sizeof(err_str),
+ "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x",
+ icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on RQ: 0x%x", rq->rqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_priv *priv = icosq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = icosq;
+ err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_icosq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
.diagnose = mlx5e_rx_reporter_diagnose,
+ .dump = mlx5e_rx_reporter_dump,
};
#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
@@ -387,7 +571,7 @@ int mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
reporter = devlink_health_reporter_create(devlink,
&mlx5_rx_reporter_ops,
MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
- true, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
PTR_ERR(reporter));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index b468549e96ff..9805fc085512 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -82,19 +82,6 @@ out:
return err;
}
-void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
-{
- struct mlx5e_priv *priv = sq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {0};
-
- err_ctx.ctx = sq;
- err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
- sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
-
- mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_tx_reporter_timeout_recover(void *ctx)
{
struct mlx5_eq_comp *eq;
@@ -110,22 +97,6 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
-int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
-{
- struct mlx5e_priv *priv = sq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx;
-
- err_ctx.ctx = sq;
- err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
- sprintf(err_str,
- "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
- sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
- jiffies_to_usecs(jiffies - sq->txq->trans_start));
-
- return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
-}
-
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
@@ -275,10 +246,162 @@ unlock:
return err;
}
+static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5_rsc_key key = {};
+ struct mlx5e_txqsq *sq = ctx;
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = sq->sqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_rsc_key key = {};
+ int i, tc, err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
+ if (err)
+ return err;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ struct mlx5e_txqsq *sq = &c->sq[tc];
+
+ err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
+ if (err)
+ return err;
+ }
+ }
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
+ struct mlx5e_err_ctx *err_ctx,
+ struct devlink_fmsg *fmsg)
+{
+ return err_ctx->dump(priv, fmsg, err_ctx->ctx);
+}
+
+static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
+ mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
+}
+
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
+ err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
+
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+ err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ snprintf(err_str, sizeof(err_str),
+ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
+ sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - sq->txq->trans_start));
+
+ return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
.diagnose = mlx5e_tx_reporter_diagnose,
+ .dump = mlx5e_tx_reporter_dump,
};
#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
@@ -293,7 +416,7 @@ int mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
reporter =
devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD,
- true, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
"Failed to create tx reporter, err = %ld\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
new file mode 100644
index 000000000000..4eb305af0106
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -0,0 +1,1366 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <uapi/linux/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_ct.h>
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+
+#include "esw/chains.h"
+#include "en/tc_ct.h"
+#include "en.h"
+#include "en_tc.h"
+#include "en_rep.h"
+
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen * 8)
+#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
+#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
+#define MLX5_CT_STATE_TRK_BIT BIT(2)
+
+#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
+#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
+#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
+
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
+
+struct mlx5_tc_ct_priv {
+ struct mlx5_eswitch *esw;
+ const struct net_device *netdev;
+ struct idr fte_ids;
+ struct xarray tuple_ids;
+ struct rhashtable zone_ht;
+ struct mlx5_flow_table *ct;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_table *post_ct;
+ struct mutex control_lock; /* guards parallel adds/dels */
+};
+
+struct mlx5_ct_flow {
+ struct mlx5_esw_flow_attr pre_ct_attr;
+ struct mlx5_esw_flow_attr post_ct_attr;
+ struct mlx5_flow_handle *pre_ct_rule;
+ struct mlx5_flow_handle *post_ct_rule;
+ struct mlx5_ct_ft *ft;
+ u32 fte_id;
+ u32 chain_mapping;
+};
+
+struct mlx5_ct_zone_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_esw_flow_attr attr;
+ int tupleid;
+ bool nat;
+};
+
+struct mlx5_ct_ft {
+ struct rhash_head node;
+ u16 zone;
+ refcount_t refcount;
+ struct nf_flowtable *nf_ft;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct rhashtable ct_entries_ht;
+};
+
+struct mlx5_ct_entry {
+ u16 zone;
+ struct rhash_head node;
+ struct flow_rule *flow_rule;
+ struct mlx5_fc *counter;
+ unsigned long lastuse;
+ unsigned long cookie;
+ unsigned long restore_cookie;
+ struct mlx5_ct_zone_rule zone_rules[2];
+};
+
+static const struct rhashtable_params cts_ht_params = {
+ .head_offset = offsetof(struct mlx5_ct_entry, node),
+ .key_offset = offsetof(struct mlx5_ct_entry, cookie),
+ .key_len = sizeof(((struct mlx5_ct_entry *)0)->cookie),
+ .automatic_shrinking = true,
+ .min_size = 16 * 1024,
+};
+
+static const struct rhashtable_params zone_params = {
+ .head_offset = offsetof(struct mlx5_ct_ft, node),
+ .key_offset = offsetof(struct mlx5_ct_ft, zone),
+ .key_len = sizeof(((struct mlx5_ct_ft *)0)->zone),
+ .automatic_shrinking = true,
+};
+
+static struct mlx5_tc_ct_priv *
+mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ return uplink_priv->ct_priv;
+}
+
+static int
+mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec,
+ struct flow_rule *rule)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(match.mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(match.key->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ match.mask->ip_proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ match.key->ip_proto);
+
+ ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &match.mask->src, sizeof(match.mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &match.key->src, sizeof(match.key->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &match.key->dst, sizeof(match.key->dst));
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.mask->src, sizeof(match.mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.key->src, sizeof(match.key->src));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.key->dst, sizeof(match.key->dst));
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_sport, ntohs(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_sport, ntohs(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_dport, ntohs(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_dport, ntohs(match.key->dst));
+ break;
+
+ case IPPROTO_UDP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_sport, ntohs(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_sport, ntohs(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_dport, ntohs(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_dport, ntohs(match.key->dst));
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(match.mask->flags));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ ntohs(match.key->flags));
+ }
+
+ return 0;
+}
+
+static void
+mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry,
+ bool nat)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+
+ ct_dbg("Deleting ct entry rule in zone %d", entry->zone);
+
+ mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
+ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+ xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
+}
+
+static void
+mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry)
+{
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+
+ mlx5_fc_destroy(ct_priv->esw->dev, entry->counter);
+}
+
+static struct flow_action_entry *
+mlx5_tc_ct_get_ct_metadata_action(struct flow_rule *flow_rule)
+{
+ struct flow_action *flow_action = &flow_rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT_METADATA)
+ return act;
+ }
+
+ return NULL;
+}
+
+static int
+mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
+ u8 ct_state,
+ u32 mark,
+ u32 label,
+ u32 tupleid)
+{
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ int err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ CTSTATE_TO_REG, ct_state);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ MARK_TO_REG, mark);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ LABELS_TO_REG, label);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ TUPLEID_TO_REG, tupleid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
+ char *modact)
+{
+ u32 offset = act->mangle.offset, field;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ MLX5_SET(set_action_in, modact, length, 0);
+ if (offset == offsetof(struct iphdr, saddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV4;
+ else if (offset == offsetof(struct iphdr, daddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV4;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ MLX5_SET(set_action_in, modact, length, 0);
+ if (offset == offsetof(struct ipv6hdr, saddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 4)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 8)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 12)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96;
+ else if (offset == offsetof(struct ipv6hdr, daddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 4)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 8)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 12)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ MLX5_SET(set_action_in, modact, length, 16);
+ if (offset == offsetof(struct tcphdr, source))
+ field = MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT;
+ else if (offset == offsetof(struct tcphdr, dest))
+ field = MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ MLX5_SET(set_action_in, modact, length, 16);
+ if (offset == offsetof(struct udphdr, source))
+ field = MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT;
+ else if (offset == offsetof(struct udphdr, dest))
+ field = MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, offset, 0);
+ MLX5_SET(set_action_in, modact, field, field);
+ MLX5_SET(set_action_in, modact, data, act->mangle.val);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
+{
+ struct flow_action *flow_action = &flow_rule->action;
+ struct mlx5_core_dev *mdev = ct_priv->esw->dev;
+ struct flow_action_entry *act;
+ size_t action_size;
+ char *modact;
+ int err, i;
+
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE: {
+ err = alloc_mod_hdr_actions(mdev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts);
+ if (err)
+ return err;
+
+ modact = mod_acts->actions +
+ mod_acts->num_actions * action_size;
+
+ err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
+ if (err)
+ return err;
+
+ mod_acts->num_actions++;
+ }
+ break;
+
+ case FLOW_ACTION_CT_METADATA:
+ /* Handled earlier */
+ continue;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_esw_flow_attr *attr,
+ struct flow_rule *flow_rule,
+ u32 tupleid,
+ bool nat)
+{
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct flow_action_entry *meta;
+ int err;
+
+ meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
+ if (!meta)
+ return -EOPNOTSUPP;
+
+ if (meta->ct_metadata.labels[1] ||
+ meta->ct_metadata.labels[2] ||
+ meta->ct_metadata.labels[3]) {
+ ct_dbg("Failed to offload ct entry due to unsupported label");
+ return -EOPNOTSUPP;
+ }
+
+ if (nat) {
+ err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
+ &mod_acts);
+ if (err)
+ goto err_mapping;
+ }
+
+ err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
+ (MLX5_CT_STATE_ESTABLISHED_BIT |
+ MLX5_CT_STATE_TRK_BIT),
+ meta->ct_metadata.mark,
+ meta->ct_metadata.labels[0],
+ tupleid);
+ if (err)
+ goto err_mapping;
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mapping;
+ }
+ attr->modify_hdr = mod_hdr;
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ return 0;
+
+err_mapping:
+ dealloc_mod_hdr_actions(&mod_acts);
+ return err;
+}
+
+static int
+mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_spec *spec = NULL;
+ u32 tupleid;
+ int err;
+
+ zone_rule->nat = nat;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Get tuple unique id */
+ err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule,
+ XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL);
+ if (err) {
+ netdev_warn(ct_priv->netdev,
+ "Failed to allocate tuple id, err: %d\n", err);
+ goto err_xa_alloc;
+ }
+ zone_rule->tupleid = tupleid;
+
+ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
+ tupleid, nat);
+ if (err) {
+ ct_dbg("Failed to create ct entry mod hdr");
+ goto err_mod_hdr;
+ }
+
+ attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = 0;
+ attr->dest_ft = ct_priv->post_ct;
+ attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct;
+ attr->outer_match_level = MLX5_MATCH_L4;
+ attr->counter = entry->counter;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+
+ mlx5_tc_ct_set_tuple_match(spec, flow_rule);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+ entry->zone & MLX5_CT_ZONE_MASK,
+ MLX5_CT_ZONE_MASK);
+
+ zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ if (IS_ERR(zone_rule->rule)) {
+ err = PTR_ERR(zone_rule->rule);
+ ct_dbg("Failed to add ct entry rule, nat: %d", nat);
+ goto err_rule;
+ }
+
+ kfree(spec);
+ ct_dbg("Offloaded ct entry rule in zone %d", entry->zone);
+
+ return 0;
+
+err_rule:
+ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+err_mod_hdr:
+ xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
+err_xa_alloc:
+ kfree(spec);
+ return err;
+}
+
+static int
+mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry)
+{
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ int err;
+
+ entry->counter = mlx5_fc_create(esw->dev, true);
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
+ ct_dbg("Failed to create counter for ct entry");
+ return err;
+ }
+
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false);
+ if (err)
+ goto err_orig;
+
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true);
+ if (err)
+ goto err_nat;
+
+ return 0;
+
+err_nat:
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+err_orig:
+ mlx5_fc_destroy(esw->dev, entry->counter);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *flow_rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_action_entry *meta_action;
+ unsigned long cookie = flow->cookie;
+ struct mlx5_ct_entry *entry;
+ int err;
+
+ meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
+ if (!meta_action)
+ return -EOPNOTSUPP;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (entry)
+ return 0;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->zone = ft->zone;
+ entry->flow_rule = flow_rule;
+ entry->cookie = flow->cookie;
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
+
+ err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry);
+ if (err)
+ goto err_rules;
+
+ err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
+ cts_ht_params);
+ if (err)
+ goto err_insert;
+
+ return 0;
+
+err_insert:
+ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+err_rules:
+ kfree(entry);
+ netdev_warn(ct_priv->netdev,
+ "Failed to offload ct entry, err: %d\n", err);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ unsigned long cookie = flow->cookie;
+ struct mlx5_ct_entry *entry;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+ WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
+ &entry->node,
+ cts_ht_params));
+ kfree(entry);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *f)
+{
+ unsigned long cookie = f->cookie;
+ struct mlx5_ct_entry *entry;
+ u64 lastuse, packets, bytes;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *f = type_data;
+ struct mlx5_ct_ft *ft = cb_priv;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return mlx5_tc_ct_block_flow_offload_add(ft, f);
+ case FLOW_CLS_DESTROY:
+ return mlx5_tc_ct_block_flow_offload_del(ft, f);
+ case FLOW_CLS_STATS:
+ return mlx5_tc_ct_block_flow_offload_stats(ft, f);
+ default:
+ break;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector_key_ct *mask, *key;
+ bool trk, est, untrk, unest, new;
+ u32 ctstate = 0, ctstate_mask = 0;
+ u16 ct_state_on, ct_state_off;
+ u16 ct_state, ct_state_mask;
+ struct flow_match_ct match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
+ return 0;
+
+ if (!ct_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload of ct matching isn't available");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_ct(rule, &match);
+
+ key = match.key;
+ mask = match.mask;
+
+ ct_state = key->ct_state;
+ ct_state_mask = mask->ct_state;
+
+ if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
+ TCA_FLOWER_KEY_CT_FLAGS_NEW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "only ct_state trk, est and new are supported for offload");
+ return -EOPNOTSUPP;
+ }
+
+ if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "only lower 32bits of ct_labels are supported for offload");
+ return -EOPNOTSUPP;
+ }
+
+ ct_state_on = ct_state & ct_state_mask;
+ ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
+ trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
+ est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+
+ ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+
+ if (new) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "matching on ct_state +new isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mask->ct_zone)
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+ key->ct_zone, MLX5_CT_ZONE_MASK);
+ if (ctstate_mask)
+ mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
+ ctstate, ctstate_mask);
+ if (mask->ct_mark)
+ mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
+ key->ct_mark, mask->ct_mark);
+ if (mask->ct_labels[0])
+ mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG,
+ key->ct_labels[0],
+ mask->ct_labels[0]);
+
+ return 0;
+}
+
+int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+
+ if (!ct_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload of ct action isn't available");
+ return -EOPNOTSUPP;
+ }
+
+ attr->ct_attr.zone = act->ct.zone;
+ attr->ct_attr.ct_action = act->ct.action;
+ attr->ct_attr.nf_ft = act->ct.flow_table;
+
+ return 0;
+}
+
+static struct mlx5_ct_ft *
+mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
+ struct nf_flowtable *nf_ft)
+{
+ struct mlx5_ct_ft *ft;
+ int err;
+
+ ft = rhashtable_lookup_fast(&ct_priv->zone_ht, &zone, zone_params);
+ if (ft) {
+ refcount_inc(&ft->refcount);
+ return ft;
+ }
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ return ERR_PTR(-ENOMEM);
+
+ ft->zone = zone;
+ ft->nf_ft = nf_ft;
+ ft->ct_priv = ct_priv;
+ refcount_set(&ft->refcount, 1);
+
+ err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
+ if (err)
+ goto err_init;
+
+ err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
+ zone_params);
+ if (err)
+ goto err_insert;
+
+ err = nf_flow_table_offload_add_cb(ft->nf_ft,
+ mlx5_tc_ct_block_flow_offload, ft);
+ if (err)
+ goto err_add_cb;
+
+ return ft;
+
+err_add_cb:
+ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
+err_insert:
+ rhashtable_destroy(&ft->ct_entries_ht);
+err_init:
+ kfree(ft);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
+{
+ struct mlx5_tc_ct_priv *ct_priv = arg;
+ struct mlx5_ct_entry *entry = ptr;
+
+ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+}
+
+static void
+mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
+{
+ if (!refcount_dec_and_test(&ft->refcount))
+ return;
+
+ nf_flow_table_offload_del_cb(ft->nf_ft,
+ mlx5_tc_ct_block_flow_offload, ft);
+ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
+ rhashtable_free_and_destroy(&ft->ct_entries_ht,
+ mlx5_tc_ct_flush_ft_entry,
+ ct_priv);
+ kfree(ft);
+}
+
+/* We translate the tc filter with CT action to the following HW model:
+ *
+ * +-------------------+ +--------------------+ +--------------+
+ * + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +----->
+ * + original match + | + tuple + zone match + | + fte_id match + |
+ * +-------------------+ | +--------------------+ | +--------------+ |
+ * v v v
+ * set chain miss mapping set mark original
+ * set fte_id set label filter
+ * set zone set established actions
+ * set tunnel_id do nat (if needed)
+ * do decap
+ */
+static int
+__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *orig_spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_handle **flow_rule)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+ struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5_flow_spec *post_ct_spec = NULL;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ct_flow *ct_flow;
+ int chain_mapping = 0, err;
+ struct mlx5_ct_ft *ft;
+ u32 fte_id = 1;
+
+ post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
+ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
+ if (!post_ct_spec || !ct_flow) {
+ kfree(post_ct_spec);
+ kfree(ct_flow);
+ return -ENOMEM;
+ }
+
+ /* Register for CT established events */
+ ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
+ attr->ct_attr.nf_ft);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ ct_dbg("Failed to register to ft callback");
+ goto err_ft;
+ }
+ ct_flow->ft = ft;
+
+ err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id,
+ MLX5_FTE_ID_MAX, GFP_KERNEL);
+ if (err) {
+ netdev_warn(priv->netdev,
+ "Failed to allocate fte id, err: %d\n", err);
+ goto err_idr;
+ }
+ ct_flow->fte_id = fte_id;
+
+ /* Base esw attributes of both rules on original rule attribute */
+ pre_ct_attr = &ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, sizeof(*attr));
+ memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr));
+
+ /* Modify the original rule's action to fwd and modify, leave decap */
+ pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ /* Write chain miss tag for miss in ct table as we
+ * don't go though all prios of this chain as normal tc rules
+ * miss.
+ */
+ err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain,
+ &chain_mapping);
+ if (err) {
+ ct_dbg("Failed to get chain register mapping for chain");
+ goto err_get_chain;
+ }
+ ct_flow->chain_mapping = chain_mapping;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ CHAIN_TO_REG, chain_mapping);
+ if (err) {
+ ct_dbg("Failed to set chain register mapping");
+ goto err_mapping;
+ }
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ZONE_TO_REG,
+ attr->ct_attr.zone &
+ MLX5_CT_ZONE_MASK);
+ if (err) {
+ ct_dbg("Failed to set zone register mapping");
+ goto err_mapping;
+ }
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ FTEID_TO_REG, fte_id);
+ if (err) {
+ ct_dbg("Failed to set fte_id register mapping");
+ goto err_mapping;
+ }
+
+ /* If original flow is decap, we do it before going into ct table
+ * so add a rewrite for the tunnel match_id.
+ */
+ if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ attr->chain == 0) {
+ u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ TUNNEL_TO_REG,
+ tun_id);
+ if (err) {
+ ct_dbg("Failed to set tunnel register mapping");
+ goto err_mapping;
+ }
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ pre_mod_acts.num_actions,
+ pre_mod_acts.actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to create pre ct mod hdr");
+ goto err_mapping;
+ }
+ pre_ct_attr->modify_hdr = mod_hdr;
+
+ /* Post ct rule matches on fte_id and executes original rule's
+ * tc rule action
+ */
+ mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
+ fte_id, MLX5_FTE_ID_MASK);
+
+ /* Put post_ct rule on post_ct fdb */
+ ct_flow->post_ct_attr.chain = 0;
+ ct_flow->post_ct_attr.prio = 0;
+ ct_flow->post_ct_attr.fdb = ct_priv->post_ct;
+
+ ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec,
+ &ct_flow->post_ct_attr);
+ ct_flow->post_ct_rule = rule;
+ if (IS_ERR(ct_flow->post_ct_rule)) {
+ err = PTR_ERR(ct_flow->post_ct_rule);
+ ct_dbg("Failed to add post ct rule");
+ goto err_insert_post_ct;
+ }
+
+ /* Change original rule point to ct table */
+ pre_ct_attr->dest_chain = 0;
+ pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
+ orig_spec,
+ pre_ct_attr);
+ if (IS_ERR(ct_flow->pre_ct_rule)) {
+ err = PTR_ERR(ct_flow->pre_ct_rule);
+ ct_dbg("Failed to add pre ct rule");
+ goto err_insert_orig;
+ }
+
+ attr->ct_attr.ct_flow = ct_flow;
+ *flow_rule = ct_flow->post_ct_rule;
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ kfree(post_ct_spec);
+
+ return 0;
+
+err_insert_orig:
+ mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
+ &ct_flow->post_ct_attr);
+err_insert_post_ct:
+ mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
+err_mapping:
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+err_get_chain:
+ idr_remove(&ct_priv->fte_ids, fte_id);
+err_idr:
+ mlx5_tc_ct_del_ft_cb(ct_priv, ft);
+err_ft:
+ kfree(post_ct_spec);
+ kfree(ct_flow);
+ netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
+ return err;
+}
+
+static int
+__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *orig_spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
+ struct mlx5_flow_handle **flow_rule)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ct_flow *ct_flow;
+ int err;
+
+ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
+ if (!ct_flow)
+ return -ENOMEM;
+
+ /* Base esw attributes on original rule attribute */
+ pre_ct_attr = &ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, sizeof(*attr));
+
+ err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
+ if (err) {
+ ct_dbg("Failed to set register for ct clear");
+ goto err_set_registers;
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts->num_actions,
+ mod_acts->actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to add create ct clear mod hdr");
+ goto err_set_registers;
+ }
+
+ dealloc_mod_hdr_actions(mod_acts);
+ pre_ct_attr->modify_hdr = mod_hdr;
+ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add ct clear rule");
+ goto err_insert;
+ }
+
+ attr->ct_attr.ct_flow = ct_flow;
+ ct_flow->pre_ct_rule = rule;
+ *flow_rule = rule;
+
+ return 0;
+
+err_insert:
+ mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
+err_set_registers:
+ netdev_warn(priv->netdev,
+ "Failed to offload ct clear flow, err %d\n", err);
+ return err;
+}
+
+struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ if (!ct_priv)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&ct_priv->control_lock);
+ if (clear_action)
+ err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr,
+ mod_hdr_acts, &rule);
+ else
+ err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr,
+ &rule);
+ mutex_unlock(&ct_priv->control_lock);
+ if (err)
+ return ERR_PTR(err);
+
+ return rule;
+}
+
+static void
+__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_flow *ct_flow)
+{
+ struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+
+ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule,
+ pre_ct_attr);
+ mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr);
+
+ if (ct_flow->post_ct_rule) {
+ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
+ &ct_flow->post_ct_attr);
+ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
+ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
+ }
+
+ kfree(ct_flow);
+}
+
+void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
+
+ /* We are called on error to clean up stuff from parsing
+ * but we don't have anything for now
+ */
+ if (!ct_flow)
+ return;
+
+ mutex_lock(&ct_priv->control_lock);
+ __mlx5_tc_ct_delete_flow(ct_priv, ct_flow);
+ mutex_unlock(&ct_priv->control_lock);
+}
+
+static int
+mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
+ const char **err_msg)
+{
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ /* cannot restore chain ID on HW miss */
+
+ *err_msg = "tc skb extension missing";
+ return -EOPNOTSUPP;
+#endif
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
+ *err_msg = "firmware level support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) {
+ /* vlan workaround should be avoided for multi chain rules.
+ * This is just a sanity check as pop vlan action should
+ * be supported by any FW that supports ignore_flow_level
+ */
+
+ *err_msg = "firmware vlan actions support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev,
+ fdb_modify_header_fwd_to_table)) {
+ /* CT always writes to registers which are mod header actions.
+ * Therefore, mod header and goto is required
+ */
+
+ *err_msg = "firmware fwd and modify support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ *err_msg = "register loopback isn't supported";
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void
+mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err)
+{
+ if (msg)
+ netdev_warn(rpriv->netdev,
+ "tc ct offload not supported, %s, err: %d\n",
+ msg, err);
+ else
+ netdev_warn(rpriv->netdev,
+ "tc ct offload not supported, err: %d\n",
+ err);
+}
+
+int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ const char *msg;
+ int err;
+
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_tc_ct_init_check_support(esw, &msg);
+ if (err) {
+ mlx5_tc_ct_init_err(rpriv, msg, err);
+ goto err_support;
+ }
+
+ ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
+ if (!ct_priv) {
+ mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM);
+ goto err_alloc;
+ }
+
+ ct_priv->esw = esw;
+ ct_priv->netdev = rpriv->netdev;
+ ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->ct)) {
+ err = PTR_ERR(ct_priv->ct);
+ mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
+ goto err_ct_tbl;
+ }
+
+ ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->ct_nat)) {
+ err = PTR_ERR(ct_priv->ct_nat);
+ mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
+ err);
+ goto err_ct_nat_tbl;
+ }
+
+ ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->post_ct)) {
+ err = PTR_ERR(ct_priv->post_ct);
+ mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
+ err);
+ goto err_post_ct_tbl;
+ }
+
+ idr_init(&ct_priv->fte_ids);
+ xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1);
+ mutex_init(&ct_priv->control_lock);
+ rhashtable_init(&ct_priv->zone_ht, &zone_params);
+
+ /* Done, set ct_priv to know it initializted */
+ uplink_priv->ct_priv = ct_priv;
+
+ return 0;
+
+err_post_ct_tbl:
+ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat);
+err_ct_nat_tbl:
+ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
+err_ct_tbl:
+ kfree(ct_priv);
+err_alloc:
+err_support:
+
+ return 0;
+}
+
+void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+
+ if (!ct_priv)
+ return;
+
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+
+ rhashtable_destroy(&ct_priv->zone_ht);
+ mutex_destroy(&ct_priv->control_lock);
+ xa_destroy(&ct_priv->tuple_ids);
+ idr_destroy(&ct_priv->fte_ids);
+ kfree(ct_priv);
+
+ uplink_priv->ct_priv = NULL;
+}
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+ struct mlx5_ct_zone_rule *zone_rule;
+ struct mlx5_ct_entry *entry;
+
+ if (!ct_priv || !tupleid)
+ return true;
+
+ zone_rule = xa_load(&ct_priv->tuple_ids, tupleid);
+ if (!zone_rule)
+ return false;
+
+ entry = container_of(zone_rule, struct mlx5_ct_entry,
+ zone_rules[zone_rule->nat]);
+ tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
new file mode 100644
index 000000000000..626f6c04882e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_CT_H__
+#define __MLX5_EN_TC_CT_H__
+
+#include <net/pkt_cls.h>
+#include <linux/mlx5/fs.h>
+#include <net/tc_act/tc_ct.h>
+
+#include "en.h"
+
+struct mlx5_esw_flow_attr;
+struct mlx5e_tc_mod_hdr_acts;
+struct mlx5_rep_uplink_priv;
+struct mlx5e_tc_flow;
+struct mlx5e_priv;
+
+struct mlx5_ct_flow;
+
+struct nf_flowtable;
+
+struct mlx5_ct_attr {
+ u16 zone;
+ u16 ct_action;
+ struct mlx5_ct_flow *ct_flow;
+ struct nf_flowtable *nf_ft;
+};
+
+#define zone_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
+ .moffset = 0,\
+ .mlen = 2,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_2) + 2,\
+}
+
+#define ctstate_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
+ .moffset = 2,\
+ .mlen = 2,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_2),\
+}
+
+#define mark_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_3,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_3),\
+}
+
+#define labels_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_4,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_4),\
+}
+
+#define fteid_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_5),\
+}
+
+#define tupleid_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
+ .moffset = 0,\
+ .mlen = 3,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_1),\
+}
+
+#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8)
+#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0)
+
+#if IS_ENABLED(CONFIG_MLX5_TC_CT)
+
+int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
+void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
+
+int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack);
+int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
+struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr);
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid);
+
+#else /* CONFIG_MLX5_TC_CT */
+
+static inline int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ return 0;
+}
+
+static inline void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+}
+
+static inline int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
+ netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
+ return -EOPNOTSUPP;
+}
+
+static inline int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
+ netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
+ return -EOPNOTSUPP;
+}
+
+static inline struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+}
+
+static inline bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ if (!tupleid)
+ return true;
+
+ return false;
+}
+
+#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
+#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index af4ebd2951b5..b45c3f46570b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -66,6 +66,9 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
return -EOPNOTSUPP;
+ if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev)
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -469,10 +472,15 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level)
+ u8 *match_level)
{
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ struct netlink_ext_ack *extack = f->common.extack;
int err = 0;
if (!tunnel) {
@@ -499,6 +507,109 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
goto out;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match;
+ u16 addr_type;
+
+ flow_rule_match_enc_control(rule, &match);
+ addr_type = match.key->addr_type;
+
+ /* For tunnel addr_type used same key id`s as for non-tunnel */
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(match.key->dst));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ETH_P_IP);
+ } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ETH_P_IPV6);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
+ match.mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
+ match.key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
+ match.mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
+ match.key->ttl);
+
+ if (match.mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB
+ (priv->mdev,
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ /* Enforce DMAC when offloading incoming tunneled flows.
+ * Flow counters require a match on the DMAC.
+ */
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16), priv->netdev->dev_addr);
+
+ /* let software handle IP fragments */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+
+ return 0;
+
out:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 6f9a78c85ffd..1630f0ec3ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -76,8 +76,7 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level);
+ u8 *match_level);
int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index fe2d596cb361..3bcdb5b2fc20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
return 0;
+ if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
+ return 0;
+
spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index cf58c9637904..29626c6c9c25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -433,7 +433,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
- drain_workqueue(ipsec->wq);
destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 46725cd743a3..7d1985fa0d4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -69,8 +69,8 @@ static void mlx5e_ktls_del(struct net_device *netdev,
struct mlx5e_ktls_offload_context_tx *tx_priv =
mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
- mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
+ mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
kvfree(tx_priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 2c75b2752f58..014639ea06e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -175,28 +175,20 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
- struct mlx5_flow_spec *spec;
enum mlx5e_traffic_types tt;
int err = 0;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out;
- }
-
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
tt = arfs_get_tt(type);
if (tt == -EINVAL) {
netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
__func__, type);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
dest.tir_num = tir[tt].tirn;
- arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
+ arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
@@ -205,8 +197,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
__func__, type);
}
-out:
- kvfree(spec);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 01f2918063af..47874d34156b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1098,49 +1098,59 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
mlx5e_dcbnl_dscp_app(priv, DELETE);
}
-static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
- struct mlx5e_params *params)
+static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u8 trust_state)
{
- mlx5_query_min_inline(priv->mdev, &params->tx_min_inline_mode);
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+ if (trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
}
-static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
+static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
+{
+ u8 *trust_state = context;
+ int err;
+
+ err = mlx5_set_trust_state(priv->mdev, *trust_state);
+ if (err)
+ return err;
+ priv->dcbx_dp.trust_state = *trust_state;
+
+ return 0;
+}
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
+ bool reset_channels = true;
+ int err = 0;
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
- mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
+ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
+ trust_state);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
- goto out;
+ reset_channels = false;
}
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
- goto out;
+ reset_channels = false;
- mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ if (reset_channels)
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_update_trust_state_hw,
+ &trust_state);
+ else
+ err = mlx5e_update_trust_state_hw(priv, &trust_state);
-out:
mutex_unlock(&priv->state_lock);
-}
-
-static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
-{
- int err;
-
- err = mlx5_set_trust_state(priv->mdev, trust_state);
- if (err)
- return err;
- priv->dcbx_dp.trust_state = trust_state;
- mlx5e_trust_update_sq_inline_mode(priv);
return err;
}
@@ -1171,7 +1181,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
if (err)
return err;
- mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
+ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
+ priv->dcbx_dp.trust_state);
err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d674cb679895..bc290ae80a53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -357,7 +357,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
goto unlock;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
unlock:
mutex_unlock(&priv->state_lock);
@@ -432,9 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
*cur_params = new_channels.params;
- if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
+ mlx5e_num_channels_changed(priv);
goto out;
}
@@ -442,12 +440,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (arfs_enabled)
mlx5e_arfs_disable(priv);
- if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
-
/* Switch to new channels, set new parameters and close old ones */
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_num_channels_changed_ctx, NULL);
if (arfs_enabled) {
int err2 = mlx5e_arfs_enable(priv);
@@ -532,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
+ bool reset_rx, reset_tx;
int err = 0;
- bool reset;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
@@ -571,16 +566,29 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
- (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
+ reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+ reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
- if (!reset) {
+ if (!reset_rx && !reset_tx) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ if (reset_rx) {
+ u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ MLX5E_PFLAG_RX_CQE_BASED_MODER);
+
+ mlx5e_reset_rx_moderation(&new_channels.params, mode);
+ }
+ if (reset_tx) {
+ u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER);
+
+ mlx5e_reset_tx_moderation(&new_channels.params, mode);
+ }
+
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
out:
mutex_unlock(&priv->state_lock);
@@ -633,6 +641,8 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF,
[MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER,
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
+ [MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
+ [MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -650,45 +660,50 @@ static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
return 0;
}
-/* we use ETHTOOL_FEC_* offset and apply it to ETHTOOL_LINK_MODE_FEC_*_BIT */
-static u32 ethtool_fec2ethtool_caps(u_long ethtool_fec_code)
-{
- u32 offset;
-
- offset = find_first_bit(&ethtool_fec_code, sizeof(u32));
- offset -= ETHTOOL_FEC_OFF_BIT;
- offset += ETHTOOL_LINK_MODE_FEC_NONE_BIT;
+#define MLX5E_ADVERTISE_SUPPORTED_FEC(mlx5_fec, ethtool_fec) \
+ do { \
+ if (mlx5e_fec_in_caps(dev, 1 << (mlx5_fec))) \
+ __set_bit(ethtool_fec, \
+ link_ksettings->link_modes.supported);\
+ } while (0)
- return offset;
-}
+static const u32 pplm_fec_2_ethtool_linkmodes[] = {
+ [MLX5E_FEC_NOFEC] = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ [MLX5E_FEC_FIRECODE] = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ [MLX5E_FEC_RS_528_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ [MLX5E_FEC_RS_544_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ [MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+};
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- u_long fec_caps = 0;
- u32 active_fec = 0;
- u32 offset;
+ unsigned long active_fec_long;
+ u32 active_fec;
u32 bitn;
int err;
- err = mlx5e_get_fec_caps(dev, (u8 *)&fec_caps);
- if (err)
- return (err == -EOPNOTSUPP) ? 0 : err;
-
err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
if (err)
- return err;
-
- for_each_set_bit(bitn, &fec_caps, ARRAY_SIZE(pplm_fec_2_ethtool)) {
- u_long ethtool_bitmask = pplm_fec_2_ethtool[bitn];
-
- offset = ethtool_fec2ethtool_caps(ethtool_bitmask);
- __set_bit(offset, link_ksettings->link_modes.supported);
- }
+ return (err == -EOPNOTSUPP) ? 0 : err;
- active_fec = pplm2ethtool_fec(active_fec, sizeof(u32) * BITS_PER_BYTE);
- offset = ethtool_fec2ethtool_caps(active_fec);
- __set_bit(offset, link_ksettings->link_modes.advertising);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_NOFEC,
+ ETHTOOL_LINK_MODE_FEC_NONE_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_FIRECODE,
+ ETHTOOL_LINK_MODE_FEC_BASER_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_RS_528_514,
+ ETHTOOL_LINK_MODE_FEC_RS_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
+ ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
+
+ active_fec_long = active_fec;
+ /* active fec is a bit set, find out which bit is set and
+ * advertise the corresponding ethtool bit
+ */
+ bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE);
+ if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
+ __set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
+ link_ksettings->link_modes.advertising);
return 0;
}
@@ -773,6 +788,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -784,7 +800,10 @@ static void get_speed_duplex(struct net_device *netdev,
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
- speed = SPEED_UNKNOWN;
+ if (data_rate_oper)
+ speed = 100 * data_rate_oper;
+ else
+ speed = SPEED_UNKNOWN;
goto out;
}
@@ -873,17 +892,18 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- u32 rx_pause = 0;
- u32 tx_pause = 0;
- u32 eth_proto_cap;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
u32 eth_proto_admin;
- u32 eth_proto_lp;
- u32 eth_proto_oper;
u8 an_disable_admin;
- u8 an_status;
+ u16 data_rate_oper;
+ u32 eth_proto_oper;
+ u32 eth_proto_cap;
u8 connector_type;
+ u32 rx_pause = 0;
+ u32 tx_pause = 0;
+ u32 eth_proto_lp;
bool admin_ext;
+ u8 an_status;
bool ext;
int err;
@@ -917,6 +937,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
connector_type = MLX5_GET(ptys_reg, out, connector_type);
+ data_rate_oper = MLX5_GET(ptys_reg, out, data_rate_oper);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
@@ -927,7 +948,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- link_ksettings);
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -1126,8 +1147,8 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -1146,8 +1167,8 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -1511,8 +1532,8 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 fec_configured = 0;
- u32 fec_active = 0;
+ u16 fec_configured;
+ u32 fec_active;
int err;
err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
@@ -1520,14 +1541,14 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
if (err)
return err;
- fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
- sizeof(u32) * BITS_PER_BYTE);
+ fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active,
+ sizeof(unsigned long) * BITS_PER_BYTE);
if (!fecparam->active_fec)
return -EOPNOTSUPP;
- fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
- sizeof(u8) * BITS_PER_BYTE);
+ fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured,
+ sizeof(unsigned long) * BITS_PER_BYTE);
return 0;
}
@@ -1537,10 +1558,14 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 fec_policy = 0;
+ u16 fec_policy = 0;
int mode;
int err;
+ if (bitmap_weight((unsigned long *)&fecparam->fec,
+ ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ return -EOPNOTSUPP;
+
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
continue;
@@ -1739,7 +1764,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
return 0;
}
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -1772,7 +1797,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return 0;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
if (err)
return err;
@@ -1829,7 +1854,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return 0;
}
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
@@ -1873,7 +1898,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
return 0;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
return err;
}
@@ -1938,7 +1963,8 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1955,12 +1981,15 @@ static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u
return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
}
-static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_get_strings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4ef3dc79f73c..bd8d0e096085 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -63,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
+#include "en/devlink.h"
#include "lib/mlx5.h"
@@ -1811,29 +1812,6 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
- struct mlx5e_params *params)
-{
- int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
- int irq;
-
- if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
- return -ENOMEM;
-
- for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
-
- cpumask_set_cpu(cpu, c->xps_cpumask);
- }
-
- return 0;
-}
-
-static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
-{
- free_cpumask_var(c->xps_cpumask);
-}
-
static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1984,10 +1962,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->irq_desc = irq_to_desc(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
- err = mlx5e_alloc_xps_cpumask(c, params);
- if (err)
- goto err_free_channel;
-
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_queues(c, params, cparam);
@@ -2010,9 +1984,7 @@ err_close_queues:
err_napi_del:
netif_napi_del(&c->napi);
- mlx5e_free_xps_cpumask(c);
-err_free_channel:
kvfree(c);
return err;
@@ -2026,7 +1998,6 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_rq(&c->rq);
- netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
@@ -2051,7 +2022,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
netif_napi_del(&c->napi);
- mlx5e_free_xps_cpumask(c);
kvfree(c);
}
@@ -2747,7 +2717,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ /* Verify inner tirs resources allocated */
+ if (!priv->inner_indir_tir[0].tirn)
return;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
@@ -2801,6 +2772,8 @@ free_in:
return err;
}
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
+
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
{
@@ -2850,6 +2823,8 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
return 0;
}
+MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
+
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
{
struct mlx5e_params *params = &priv->channels.params;
@@ -2886,6 +2861,54 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count)
+{
+ int num_txqs = count * priv->channels.params.num_tc;
+ int num_rxqs = count * priv->profile->rq_groups;
+ struct net_device *netdev = priv->netdev;
+
+ mlx5e_netdev_set_tcs(netdev);
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, num_rxqs);
+}
+
+static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int num_comp_vectors, ix, irq;
+
+ num_comp_vectors = mlx5_comp_vectors_count(mdev);
+
+ for (ix = 0; ix < params->num_channels; ix++) {
+ cpumask_clear(priv->scratchpad.cpumask);
+
+ for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
+
+ cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
+ }
+
+ netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
+ }
+}
+
+int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
+{
+ u16 count = priv->channels.params.num_channels;
+
+ mlx5e_update_netdev_queues(priv, count);
+ mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
+
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
+
+ return 0;
+}
+
+MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
+
static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
{
int i, ch;
@@ -2907,14 +2930,6 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
- int num_txqs = priv->channels.num * priv->channels.params.num_tc;
- int num_rxqs = priv->channels.num * priv->profile->rq_groups;
- struct net_device *netdev = priv->netdev;
-
- mlx5e_netdev_set_tcs(netdev);
- netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, num_rxqs);
-
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
mlx5e_xdp_tx_enable(priv);
@@ -2947,42 +2962,52 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_deactivate_channels(&priv->channels);
}
-static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify)
+static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_preactivate preactivate,
+ void *context)
{
struct net_device *netdev = priv->netdev;
- int new_num_txqs;
+ struct mlx5e_channels old_chs;
int carrier_ok;
-
- new_num_txqs = new_chs->num * new_chs->params.num_tc;
+ int err = 0;
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
- if (new_num_txqs < netdev->real_num_tx_queues)
- netif_set_real_num_tx_queues(netdev, new_num_txqs);
-
mlx5e_deactivate_priv_channels(priv);
- mlx5e_close_channels(&priv->channels);
+ old_chs = priv->channels;
priv->channels = *new_chs;
- /* New channels are ready to roll, modify HW settings if needed */
- if (hw_modify)
- hw_modify(priv);
+ /* New channels are ready to roll, call the preactivate hook if needed
+ * to modify HW settings or update kernel parameters.
+ */
+ if (preactivate) {
+ err = preactivate(priv, context);
+ if (err) {
+ priv->channels = old_chs;
+ goto out;
+ }
+ }
+ mlx5e_close_channels(&old_chs);
priv->profile->update_rx(priv);
+
+out:
mlx5e_activate_priv_channels(priv);
/* return carrier back if needed */
if (carrier_ok)
netif_carrier_on(netdev);
+
+ return err;
}
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify)
+ mlx5e_fp_preactivate preactivate,
+ void *context)
{
int err;
@@ -2990,8 +3015,16 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
if (err)
return err;
- mlx5e_switch_priv_channels(priv, new_chs, hw_modify);
+ err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
+ if (err)
+ goto err_close;
+
return 0;
+
+err_close:
+ mlx5e_close_channels(new_chs);
+
+ return err;
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
@@ -2999,7 +3032,7 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
struct mlx5e_channels new_channels = {};
new_channels.params = priv->channels.params;
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -3376,14 +3409,15 @@ out:
return err;
}
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
- if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ /* Verify inner tirs resources allocated */
+ if (!priv->inner_indir_tir[0].tirn)
return;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
@@ -3448,7 +3482,8 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_num_channels_changed_ctx, NULL);
if (err)
goto out;
@@ -3550,7 +3585,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- if (!mlx5e_monitor_counter_supported(priv)) {
+ /* In switchdev mode, monitor counters doesn't monitor
+ * rx/tx stats of 802_3. The update stats mechanism
+ * should keep the 802_3 layout counters updated
+ */
+ if (!mlx5e_monitor_counter_supported(priv) ||
+ mlx5e_is_uplink_rep(priv)) {
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
}
@@ -3661,7 +3701,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_modify_tirs_lro_ctx, NULL);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3880,7 +3921,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
}
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
- change_hw_mtu_cb set_mtu_cb)
+ mlx5e_fp_preactivate preactivate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels new_channels = {};
@@ -3929,13 +3970,13 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (!reset) {
params->sw_mtu = new_mtu;
- if (set_mtu_cb)
- set_mtu_cb(priv);
+ if (preactivate)
+ preactivate(priv, NULL);
netdev->mtu = params->sw_mtu;
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL);
if (err)
goto out;
@@ -3948,7 +3989,7 @@ out:
static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
{
- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
+ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -4409,7 +4450,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mlx5e_set_rq_type(priv->mdev, &new_channels.params);
old_prog = priv->channels.params.xdp_prog;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
if (err)
goto unlock;
} else {
@@ -4589,6 +4630,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
#endif
+ .ndo_get_devlink_port = mlx5e_get_devlink_port,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -4674,7 +4716,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->tx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4683,13 +4725,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
} else {
params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
}
-
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->rx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4698,7 +4736,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
} else {
params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
}
+}
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_tx_moderation(params, cq_period_mode);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_rx_moderation(params, cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
@@ -4787,9 +4837,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv,
mlx5e_build_rq_params(mdev, params);
/* HW LRO */
-
- /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (MLX5_CAP_ETH(mdev, lro_cap) &&
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
/* No XSK params: checking the availability of striding RQ in general. */
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
params->lro_en = !slow_pci_heuristic(mdev);
@@ -5084,7 +5133,7 @@ err_destroy_xsk_rqts:
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@@ -5103,7 +5152,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
@@ -5230,6 +5279,9 @@ int mlx5e_netdev_init(struct net_device *netdev,
priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
priv->max_opened_tc = 1;
+ if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
mutex_init(&priv->state_lock);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
@@ -5238,7 +5290,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- return -ENOMEM;
+ goto err_free_cpumask;
/* netdev init */
netif_carrier_off(netdev);
@@ -5248,11 +5300,17 @@ int mlx5e_netdev_init(struct net_device *netdev,
#endif
return 0;
+
+err_free_cpumask:
+ free_cpumask_var(priv->scratchpad.cpumask);
+
+ return -ENOMEM;
}
void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
{
destroy_workqueue(priv->wq);
+ free_cpumask_var(priv->scratchpad.cpumask);
}
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
@@ -5287,6 +5345,7 @@ err_free_netdev:
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
+ const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile;
int max_nch;
int err;
@@ -5298,10 +5357,25 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
max_nch = mlx5e_get_max_num_channels(priv->mdev);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
+ /* Reducing the number of channels - RXFH has to be reset, and
+ * mlx5e_num_channels_changed below will build the RQT.
+ */
+ priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
priv->channels.params.num_channels = max_nch;
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, max_nch);
}
+ /* 1. Set the real number of queues in the kernel the first time.
+ * 2. Set our default XPS cpumask.
+ * 3. Build the RQT.
+ *
+ * rtnl_lock is required by netif_set_real_num_*_queues in case the
+ * netdev has been registered by this point (if this function was called
+ * in the reload or resume flow).
+ */
+ if (take_rtnl)
+ rtnl_lock();
+ mlx5e_num_channels_changed(priv);
+ if (take_rtnl)
+ rtnl_unlock();
err = profile->init_tx(priv);
if (err)
@@ -5425,17 +5499,27 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_destroy_netdev;
}
+ err = mlx5e_devlink_port_register(priv);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+ goto err_detach;
+ }
+
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_detach;
+ goto err_devlink_port_unregister;
}
+ mlx5e_devlink_port_type_eth_set(priv);
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
return priv;
+err_devlink_port_unregister:
+ mlx5e_devlink_port_unregister(priv);
err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
@@ -5458,6 +5542,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
mlx5e_dcbnl_delete_app(priv);
#endif
unregister_netdev(priv->netdev);
+ mlx5e_devlink_port_unregister(priv);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 6ed307d7f191..4a8e0dfdc5f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -41,7 +41,7 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -192,7 +192,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
if (err) {
- pr_warn("vport %d error %d reading stats\n", rep->vport, err);
+ netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
+ rep->vport, err);
return;
}
@@ -252,25 +253,6 @@ static int mlx5e_rep_set_ringparam(struct net_device *dev,
return mlx5e_ethtool_set_ringparam(priv, param);
}
-static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_flow_handle *flow_rule;
-
- flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
- rep->vport,
- dest);
- if (IS_ERR(flow_rule))
- return PTR_ERR(flow_rule);
-
- mlx5_del_flow_rules(rpriv->vport_rx_rule);
- rpriv->vport_rx_rule = flow_rule;
- return 0;
-}
-
static void mlx5e_rep_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
@@ -283,33 +265,8 @@ static int mlx5e_rep_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- u16 curr_channels_amount = priv->channels.params.num_channels;
- u32 new_channels_amount = ch->combined_count;
- struct mlx5_flow_destination new_dest;
- int err = 0;
- err = mlx5e_ethtool_set_channels(priv, ch);
- if (err)
- return err;
-
- if (curr_channels_amount == 1 && new_channels_amount > 1) {
- new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- new_dest.ft = priv->fs.ttc.ft.t;
- } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
- new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- new_dest.tir_num = priv->direct_tir[0].tirn;
- } else {
- return 0;
- }
-
- err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
- if (err) {
- netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
- curr_channels_amount, new_channels_amount);
- return err;
- }
-
- return 0;
+ return mlx5e_ethtool_set_channels(priv, ch);
}
static int mlx5e_rep_get_coalesce(struct net_device *netdev,
@@ -375,6 +332,9 @@ static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -391,6 +351,9 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
};
static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -406,6 +369,10 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
+ .set_rxnfc = mlx5e_set_rxnfc,
.get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
@@ -727,9 +694,9 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
static int
mlx5e_rep_indr_offload(struct net_device *netdev,
struct flow_cls_offload *flower,
- struct mlx5e_rep_indr_block_priv *indr_priv)
+ struct mlx5e_rep_indr_block_priv *indr_priv,
+ unsigned long flags)
{
- unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
int err = 0;
@@ -750,20 +717,68 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
return err;
}
-static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
- void *type_data, void *indr_priv)
+static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_rep_indr_block_priv *priv = indr_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
+ return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
+ flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+ struct flow_cls_offload *f = type_data;
+ struct flow_cls_offload tmp;
+ struct mlx5e_priv *mpriv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ mpriv = netdev_priv(priv->rpriv->netdev);
+ esw = mpriv->mdev->priv.eswitch;
+
+ flags = MLX5_TC_FLAG(EGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ memcpy(&tmp, f, sizeof(*f));
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
+ */
+ if (!mlx5_esw_chains_prios_supported(esw) ||
+ tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+ tmp.common.chain_index)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+ return err;
default:
return -EOPNOTSUPP;
}
}
-static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
+static void mlx5e_rep_indr_block_unbind(void *cb_priv)
{
struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
@@ -774,9 +789,10 @@ static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
static LIST_HEAD(mlx5e_block_cb_list);
static int
-mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
- struct mlx5e_rep_priv *rpriv,
- struct flow_block_offload *f)
+mlx5e_rep_indr_setup_block(struct net_device *netdev,
+ struct mlx5e_rep_priv *rpriv,
+ struct flow_block_offload *f,
+ flow_setup_cb_t *setup_cb)
{
struct mlx5e_rep_indr_block_priv *indr_priv;
struct flow_block_cb *block_cb;
@@ -802,9 +818,8 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
- block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
- indr_priv, indr_priv,
- mlx5e_rep_indr_tc_block_unbind);
+ block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+ mlx5e_rep_indr_block_unbind);
if (IS_ERR(block_cb)) {
list_del(&indr_priv->list);
kfree(indr_priv);
@@ -819,9 +834,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (!indr_priv)
return -ENOENT;
- block_cb = flow_block_cb_lookup(f->block,
- mlx5e_rep_indr_setup_block_cb,
- indr_priv);
+ block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
if (!block_cb)
return -ENOENT;
@@ -835,13 +848,16 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
}
static
-int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
{
switch (type) {
case TC_SETUP_BLOCK:
- return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
- type_data);
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_tc_cb);
+ case TC_SETUP_FT:
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_ft_cb);
default:
return -EOPNOTSUPP;
}
@@ -853,7 +869,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
int err;
err = __flow_indr_block_cb_register(netdev, rpriv,
- mlx5e_rep_indr_setup_tc_cb,
+ mlx5e_rep_indr_setup_cb,
rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
@@ -867,7 +883,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
- __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
+ __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb,
rpriv);
}
@@ -1279,8 +1295,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
memcpy(&tmp, f, sizeof(*f));
- if (!mlx5_esw_chains_prios_supported(esw) ||
- tmp.common.chain_index)
+ if (!mlx5_esw_chains_prios_supported(esw))
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
@@ -1396,7 +1411,7 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
{
- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
+ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
@@ -1422,7 +1437,7 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
+static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1435,7 +1450,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
+ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -1448,7 +1463,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_start_xmit = mlx5e_xmit,
.ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
+ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -1464,13 +1479,14 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_set_features = mlx5e_set_features,
};
-bool mlx5e_eswitch_rep(struct net_device *netdev)
+bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
{
- if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
- netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
- return true;
+ return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
+}
- return false;
+bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
static void mlx5e_build_rep_params(struct net_device *netdev)
@@ -1584,6 +1600,8 @@ static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct ttc_params ttc_params = {};
int tt, err;
@@ -1593,6 +1611,11 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* The inner_ttc in the ttc params is intentionally not set */
ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
mlx5e_set_ttc_ft_params(&ttc_params);
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ /* To give uplik rep TTC a lower level for chaining from root ft */
+ ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
+
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
@@ -1604,6 +1627,52 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
return 0;
}
+static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err = 0;
+
+ if (rep->vport != MLX5_VPORT_UPLINK) {
+ /* non uplik reps will skip any bypass tables and go directly to
+ * their own ttc
+ */
+ rpriv->root_ft = priv->fs.ttc.ft.t;
+ return 0;
+ }
+
+ /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
+ ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
+ ft_attr.prio = 1;
+ ft_attr.level = 1;
+
+ rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(rpriv->root_ft)) {
+ err = PTR_ERR(rpriv->root_ft);
+ rpriv->root_ft = NULL;
+ }
+
+ return err;
+}
+
+static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ return;
+ mlx5_destroy_flow_table(rpriv->root_ft);
+}
+
static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -1612,11 +1681,10 @@ static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_destination dest;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest.tir_num = priv->direct_tir[0].tirn;
- flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
- rep->vport,
- &dest);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rpriv->root_ft;
+
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
rpriv->vport_rx_rule = flow_rule;
@@ -1656,18 +1724,26 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_tirs;
- err = mlx5e_create_rep_vport_rx_rule(priv);
+ err = mlx5e_create_rep_root_ft(priv);
if (err)
goto err_destroy_ttc_table;
+ err = mlx5e_create_rep_vport_rx_rule(priv);
+ if (err)
+ goto err_destroy_root_ft;
+
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
+err_destroy_root_ft:
+ mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv, false);
+ mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@@ -1682,9 +1758,10 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
- mlx5e_destroy_indirect_tirs(priv, false);
+ mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
@@ -1692,19 +1769,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
- int err = mlx5e_init_rep_rx(priv);
-
- if (err)
- return err;
-
mlx5e_create_q_counters(priv);
- return 0;
+ return mlx5e_init_rep_rx(priv);
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
- mlx5e_destroy_q_counters(priv);
mlx5e_cleanup_rep_rx(priv);
+ mlx5e_destroy_q_counters(priv);
}
static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
@@ -1920,7 +1992,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
@@ -1940,7 +2012,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_ul_rep_stats_grps,
@@ -1969,29 +2041,30 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct netdev_phys_item_id ppid = {};
unsigned int dl_port_index = 0;
+ u16 pfnum;
if (!is_devlink_port_supported(dev, rpriv))
return 0;
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
if (rep->vport == MLX5_VPORT_UPLINK) {
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
- PCI_FUNC(dev->pdev->devfn), false, 0,
+ pfnum, false, 0,
&ppid.id[0], ppid.id_len);
dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
} else if (rep->vport == MLX5_VPORT_PF) {
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
- dev->pdev->devfn);
+ pfnum);
dl_port_index = rep->vport;
} else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
rpriv->rep->vport)) {
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
- dev->pdev->devfn,
- rep->vport - 1);
+ pfnum, rep->vport - 1);
dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
}
@@ -2026,8 +2099,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
- pr_warn("Failed to create representor netdev for vport %d\n",
- rep->vport);
+ mlx5_core_warn(dev,
+ "Failed to create representor netdev for vport %d\n",
+ rep->vport);
kfree(rpriv);
return -EINVAL;
}
@@ -2045,29 +2119,32 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
- pr_warn("Failed to attach representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to attach representor netdev for vport %d\n",
+ rep->vport);
goto err_destroy_mdev_resources;
}
err = mlx5e_rep_neigh_init(rpriv);
if (err) {
- pr_warn("Failed to initialized neighbours handling for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to initialized neighbours handling for vport %d\n",
+ rep->vport);
goto err_detach_netdev;
}
err = register_devlink_port(dev, rpriv);
if (err) {
- esw_warn(dev, "Failed to register devlink port %d\n",
- rep->vport);
+ netdev_warn(netdev, "Failed to register devlink port %d\n",
+ rep->vport);
goto err_neigh_cleanup;
}
err = register_netdev(netdev);
if (err) {
- pr_warn("Failed to register representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to register representor netdev for vport %d\n",
+ rep->vport);
goto err_devlink_cleanup;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 3f756d51435f..612b5cf0673d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -55,6 +55,7 @@ struct mlx5e_neigh_update_table {
unsigned long min_interval; /* jiffies */
};
+struct mlx5_tc_ct_priv;
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
@@ -81,12 +82,20 @@ struct mlx5_rep_uplink_priv {
struct mutex unready_flows_lock;
struct list_head unready_flows;
struct work_struct reoffload_flows_work;
+
+ /* maps tun_info to a unique id*/
+ struct mapping_ctx *tunnel_mapping;
+ /* maps tun_enc_opts to a unique id*/
+ struct mapping_ctx *tunnel_enc_opts_mapping;
+
+ struct mlx5_tc_ct_priv *ct_priv;
};
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev;
+ struct mlx5_flow_table *root_ft;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
@@ -191,6 +200,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
@@ -199,7 +210,13 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
-bool mlx5e_eswitch_rep(struct net_device *netdev);
+bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
+bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
+static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
+{
+ return mlx5e_eswitch_vf_rep(netdev) ||
+ mlx5e_eswitch_uplink_rep(netdev);
+}
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 312d4692425b..e2beb89c1832 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -158,7 +158,8 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
- rq->handle_rx_cqe(rq, &cqd->title);
+ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, rq, &cqd->title);
}
mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
wq->cc = cqcc;
@@ -178,7 +179,8 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
mlx5e_read_title_slot(rq, wq, cc);
mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
mlx5e_decompress_cqe(rq, wq, cc);
- rq->handle_rx_cqe(rq, &cqd->title);
+ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, rq, &cqd->title);
cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
@@ -587,7 +589,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err;
}
-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
@@ -595,11 +597,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
int i;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return;
+ return 0;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe))
- return;
+ return 0;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
@@ -648,6 +650,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq);
+
+ return i;
}
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
@@ -1192,6 +1196,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
@@ -1224,13 +1229,78 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
+ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ goto free_wqe;
+
napi_gro_receive(rq->cq.napi, skb);
+ mlx5_tc_rep_post_napi_receive(&tc_priv);
+
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
+
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
+ u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ struct mlx5e_tc_update_priv tc_priv = {};
+ struct mlx5e_rx_wqe_ll *wqe;
+ struct mlx5_wq_ll *wq;
+ struct sk_buff *skb;
+ u16 cqe_bcnt;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
+ goto mpwrq_cqe_out;
+ }
+
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+ skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ mlx5e_skb_from_cqe_mpwrq_linear,
+ mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ rq, wi, cqe_bcnt, head_offset, page_idx);
+ if (!skb)
+ goto mpwrq_cqe_out;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ goto mpwrq_cqe_out;
+
+ napi_gro_receive(rq->cq.napi, skb);
+
+ mlx5_tc_rep_post_napi_receive(&tc_priv);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ return;
+
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
#endif
struct sk_buff *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ec5fc52bf572..10f705761666 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,14 +51,18 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
+#include "en/mapping.h"
+#include "en/tc_ct.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "diag/en_tc_tracepoint.h"
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
@@ -84,6 +88,7 @@ enum {
MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
+ MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -134,6 +139,8 @@ struct mlx5e_tc_flow {
refcount_t refcnt;
struct rcu_head rcu_head;
struct completion init_done;
+ int tunnel_id; /* the mapped tunnel id of this flow */
+
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -144,15 +151,118 @@ struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
- int num_mod_hdr_actions;
- int max_mod_hdr_actions;
- void *mod_hdr_actions;
+ struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
+struct tunnel_match_key {
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_keyid enc_key_id;
+ struct flow_dissector_key_ports enc_tp;
+ struct flow_dissector_key_ip enc_ip;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+
+ int filter_ifindex;
+};
+
+/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
+ * Upper TUNNEL_INFO_BITS for general tunnel info.
+ * Lower ENC_OPTS_BITS bits for enc_opts.
+ */
+#define TUNNEL_INFO_BITS 6
+#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
+#define ENC_OPTS_BITS 2
+#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
+#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
+#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
+
+struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
+ [CHAIN_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
+ .moffset = 0,
+ .mlen = 2,
+ },
+ [TUNNEL_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
+ .moffset = 3,
+ .mlen = 1,
+ .soffset = MLX5_BYTE_OFF(fte_match_param,
+ misc_parameters_2.metadata_reg_c_1),
+ },
+ [ZONE_TO_REG] = zone_to_reg_ct,
+ [CTSTATE_TO_REG] = ctstate_to_reg_ct,
+ [MARK_TO_REG] = mark_to_reg_ct,
+ [LABELS_TO_REG] = labels_to_reg_ct,
+ [FTEID_TO_REG] = fteid_to_reg_ct,
+ [TUPLEID_TO_REG] = tupleid_to_reg_ct,
+};
+
+static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+
+void
+mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data,
+ u32 mask)
+{
+ int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+ int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ void *headers_c = spec->match_criteria;
+ void *headers_v = spec->match_value;
+ void *fmask, *fval;
+
+ fmask = headers_c + soffset;
+ fval = headers_v + soffset;
+
+ mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
+ data = cpu_to_be32(data) >> (32 - (match_len * 8));
+
+ memcpy(fmask, &mask, match_len);
+ memcpy(fval, &data, match_len);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
+int
+mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data)
+{
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
+ int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
+ int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ char *modact;
+ int err;
+
+ err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr_acts);
+ if (err)
+ return err;
+
+ modact = mod_hdr_acts->actions +
+ (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+
+ /* Firmware has 5bit length field and 0 means 32bits */
+ if (mlen == 4)
+ mlen = 0;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field, mfield);
+ MLX5_SET(set_action_in, modact, offset, moffset * 8);
+ MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, data, data);
+ mod_hdr_acts->num_actions++;
+
+ return 0;
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -210,8 +320,6 @@ struct mlx5e_mod_hdr_entry {
int compl_result;
};
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
-
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
@@ -361,10 +469,10 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mod_hdr_key key;
u32 hash_key;
- num_actions = parse_attr->num_mod_hdr_actions;
+ num_actions = parse_attr->mod_hdr_acts.num_actions;
actions_size = MLX5_MH_ACT_SZ * num_actions;
- key.actions = parse_attr->mod_hdr_actions;
+ key.actions = parse_attr->mod_hdr_acts.actions;
key.num_actions = num_actions;
hash_key = hash_mod_hdr_info(&key);
@@ -954,7 +1062,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_hdr = attr->modify_hdr;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1043,8 +1151,16 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (flow_flag_test(flow, CT)) {
+ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
+
+ return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
+ mod_hdr_acts);
+ }
+
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule))
return rule;
@@ -1063,10 +1179,15 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_esw_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
+ if (flow_flag_test(flow, CT)) {
+ mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
+ return;
+ }
+
if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1076,17 +1197,17 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
static struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *slow_attr)
+ struct mlx5_flow_spec *spec)
{
+ struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
+ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr.split_count = 0;
+ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
@@ -1095,14 +1216,15 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *slow_attr)
+ struct mlx5e_tc_flow *flow)
{
- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+ struct mlx5_esw_flow_attr slow_attr;
+
+ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
+ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr.split_count = 0;
+ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1173,7 +1295,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int out_index;
if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
- NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
@@ -1184,13 +1307,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
*/
max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
- NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) {
- NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested priority is out of supported range");
return -EOPNOTSUPP;
}
@@ -1218,9 +1343,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
return err;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1237,14 +1363,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid) {
- /* continue with goto slow path rule instead */
- struct mlx5_esw_flow_attr slow_attr;
-
- flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
- } else {
+ if (!encap_valid)
+ flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
+ else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
- }
if (IS_ERR(flow->rule[0]))
return PTR_ERR(flow->rule[0]);
@@ -1272,9 +1394,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5_esw_flow_attr slow_attr;
int out_index;
+ mlx5e_put_flow_tunnel_id(flow);
+
if (flow_flag_test(flow, NOT_READY)) {
remove_unready_flow(flow);
kvfree(attr->parse_attr);
@@ -1283,7 +1406,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
@@ -1312,7 +1435,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr slow_attr, *esw_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@@ -1365,7 +1488,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
flow->rule[0] = rule;
/* was unset when slow path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -1377,7 +1500,6 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@@ -1389,7 +1511,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
@@ -1664,150 +1786,272 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
}
}
+static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_action *flow_action = &rule->action;
+ const struct flow_action_entry *act;
+ int i;
-static int parse_tunnel_attr(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct net_device *filter_dev, u8 *match_level)
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_GOTO:
+ return true;
+ default:
+ continue;
+ }
+ }
+
+ return false;
+}
+
+static int
+enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
+ struct flow_dissector_key_enc_opts *opts,
+ struct netlink_ext_ack *extack,
+ bool *dont_care)
+{
+ struct geneve_opt *opt;
+ int off = 0;
+
+ *dont_care = true;
+
+ while (opts->len > off) {
+ opt = (struct geneve_opt *)&opts->data[off];
+
+ if (!(*dont_care) || opt->opt_class || opt->type ||
+ memchr_inv(opt->opt_data, 0, opt->length * 4)) {
+ *dont_care = false;
+
+ if (opt->opt_class != U16_MAX ||
+ opt->type != U8_MAX ||
+ memchr_inv(opt->opt_data, 0xFF,
+ opt->length * 4)) {
+ NL_SET_ERR_MSG(extack,
+ "Partial match of tunnel options in chain > 0 isn't supported");
+ netdev_warn(priv->netdev,
+ "Partial match of tunnel options in chain > 0 isn't supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ off += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ return 0;
+}
+
+#define COPY_DISSECTOR(rule, diss_key, dst)\
+({ \
+ struct flow_rule *__rule = (rule);\
+ typeof(dst) __dst = dst;\
+\
+ memcpy(__dst,\
+ skb_flow_dissector_target(__rule->match.dissector,\
+ diss_key,\
+ __rule->match.key),\
+ sizeof(*__dst));\
+})
+
+static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct flow_cls_offload *f,
+ struct net_device *filter_dev)
{
- struct netlink_ext_ack *extack = f->common.extack;
- void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
+ struct flow_match_enc_opts enc_opts_match;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tunnel_match_key tunnel_key;
+ bool enc_opts_is_dont_care = true;
+ u32 tun_id, enc_opts_id = 0;
+ struct mlx5_eswitch *esw;
+ u32 value, mask;
int err;
- err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
- headers_c, headers_v, match_level);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "failed to parse tunnel attributes");
+ esw = priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ memset(&tunnel_key, 0, sizeof(tunnel_key));
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ &tunnel_key.enc_control);
+ if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ &tunnel_key.enc_ipv4);
+ else
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ &tunnel_key.enc_ipv6);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
+ &tunnel_key.enc_tp);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
+ &tunnel_key.enc_key_id);
+ tunnel_key.filter_ifindex = filter_dev->ifindex;
+
+ err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
+ if (err)
return err;
- }
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_match_control match;
- u16 addr_type;
+ flow_rule_match_enc_opts(rule, &enc_opts_match);
+ err = enc_opts_is_dont_care_or_full_match(priv,
+ enc_opts_match.mask,
+ extack,
+ &enc_opts_is_dont_care);
+ if (err)
+ goto err_enc_opts;
- flow_rule_match_enc_control(rule, &match);
- addr_type = match.key->addr_type;
+ if (!enc_opts_is_dont_care) {
+ err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_match.key, &enc_opts_id);
+ if (err)
+ goto err_enc_opts;
+ }
- /* For tunnel addr_type used same key id`s as for non-tunnel */
- if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_match_ipv4_addrs match;
+ value = tun_id << ENC_OPTS_BITS | enc_opts_id;
+ mask = enc_opts_id ? TUNNEL_ID_MASK :
+ (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
- flow_rule_match_enc_ipv4_addrs(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(match.mask->src));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(match.key->src));
+ if (attr->chain) {
+ mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
+ TUNNEL_TO_REG, value, mask);
+ } else {
+ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
+ err = mlx5e_tc_match_to_reg_set(priv->mdev,
+ mod_hdr_acts,
+ TUNNEL_TO_REG, value);
+ if (err)
+ goto err_set;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(match.mask->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(match.key->dst));
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IP);
- } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_match_ipv6_addrs match;
-
- flow_rule_match_enc_ipv6_addrs(rule, &match);
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
-
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IPV6);
- }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_match_ip match;
+ flow->tunnel_id = value;
+ return 0;
- flow_rule_match_enc_ip(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
- match.mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
- match.key->tos & 0x3);
+err_set:
+ if (enc_opts_id)
+ mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id);
+err_enc_opts:
+ mapping_remove(uplink_priv->tunnel_mapping, tun_id);
+ return err;
+}
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
- match.mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
- match.key->tos >> 2);
+static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
+{
+ u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
+ u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5_eswitch *esw;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
- match.mask->ttl);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
- match.key->ttl);
+ esw = flow->priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (tun_id)
+ mapping_remove(uplink_priv->tunnel_mapping, tun_id);
+ if (enc_opts_id)
+ mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id);
+}
- if (match.mask->ttl &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB
- (priv->mdev,
- ft_field_support.outer_ipv4_ttl)) {
+u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
+{
+ return flow->tunnel_id;
+}
+
+static int parse_tunnel_attr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct net_device *filter_dev,
+ u8 *match_level,
+ bool *match_inner)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct netlink_ext_ack *extack = f->common.extack;
+ bool needs_mapping, sets_mapping;
+ int err;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return -EOPNOTSUPP;
+
+ needs_mapping = !!flow->esw_attr->chain;
+ sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
+ *match_inner = !needs_mapping;
+
+ if ((needs_mapping || sets_mapping) &&
+ !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ NL_SET_ERR_MSG(extack,
+ "Chains on tunnel devices isn't supported without register loopback support");
+ netdev_warn(priv->netdev,
+ "Chains on tunnel devices isn't supported without register loopback support");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow->esw_attr->chain) {
+ err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
+ match_level);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Matching on TTL is not supported");
- return -EOPNOTSUPP;
+ "Failed to parse tunnel attributes");
+ netdev_warn(priv->netdev,
+ "Failed to parse tunnel attributes");
+ return err;
}
+ flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
- /* Enforce DMAC when offloading incoming tunneled flows.
- * Flow counters require a match on the DMAC.
- */
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dmac_47_16), priv->netdev->dev_addr);
+ if (!needs_mapping && !sets_mapping)
+ return 0;
- /* let software handle IP fragments */
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+ return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
+}
- return 0;
+static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ inner_headers);
}
-static void *get_match_headers_criteria(u32 flags,
- struct mlx5_flow_spec *spec)
+static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
{
- return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
- MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- inner_headers) :
- MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
+ return MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ inner_headers);
+}
+
+static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+}
+
+static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
}
static void *get_match_headers_value(u32 flags,
struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
- MLX5_ADDR_OF(fte_match_param, spec->match_value,
- inner_headers) :
- MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
+ get_match_inner_headers_value(spec) :
+ get_match_outer_headers_value(spec);
+}
+
+static void *get_match_headers_criteria(u32 flags,
+ struct mlx5_flow_spec *spec)
+{
+ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+ get_match_inner_headers_criteria(spec) :
+ get_match_outer_headers_criteria(spec);
}
static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
@@ -1824,7 +2068,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
flow_rule_match_meta(rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
@@ -1832,19 +2076,20 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
- return -EINVAL;
+ return -ENOENT;
}
if (ingress_dev != filter_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't match on the ingress filter port");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
return 0;
}
static int __parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
@@ -1885,6 +2130,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
@@ -1894,18 +2140,22 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
if (mlx5e_get_tc_tun(filter_dev)) {
- if (parse_tunnel_attr(priv, spec, f, filter_dev,
- outer_match_level))
- return -EOPNOTSUPP;
+ bool match_inner = false;
- /* At this point, header pointers should point to the inner
- * headers, outer header were already set by parse_tunnel_attr
- */
- match_level = inner_match_level;
- headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
- spec);
- headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
- spec);
+ err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
+ outer_match_level, &match_inner);
+ if (err)
+ return err;
+
+ if (match_inner) {
+ /* header pointers should point to the inner headers
+ * if the packet was decapsulated already.
+ * outer headers are set by parse_tunnel_attr.
+ */
+ match_level = inner_match_level;
+ headers_c = get_match_inner_headers_criteria(spec);
+ headers_v = get_match_inner_headers_value(spec);
+ }
}
err = mlx5e_flower_parse_meta(filter_dev, f);
@@ -2222,8 +2472,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
inner_match_level = MLX5_MATCH_NONE;
outer_match_level = MLX5_MATCH_NONE;
- err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
- &outer_match_level);
+ err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
+ &inner_match_level, &outer_match_level);
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
@@ -2383,25 +2633,26 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
-/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
- * max from the SW pedit action. On success, attr->num_mod_hdr_actions
- * says how many HW actions were actually parsed.
- */
-static int offload_pedit_fields(struct pedit_headers_action *hdrs,
+static int offload_pedit_fields(struct mlx5e_priv *priv,
+ int namespace,
+ struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, nactions, max_actions, first, last, next_z;
+ int i, action_size, first, last, next_z;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5_fields *f;
unsigned long mask;
__be32 mask_be32;
__be16 mask_be16;
+ int err;
u8 cmd;
+ mod_acts = &parse_attr->mod_hdr_acts;
headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
@@ -2411,11 +2662,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
add_vals = &hdrs[1].vals;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- action = parse_attr->mod_hdr_actions +
- parse_attr->num_mod_hdr_actions * action_size;
-
- max_actions = parse_attr->max_mod_hdr_actions;
- nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2441,13 +2687,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
- if (nactions == max_actions) {
- NL_SET_ERR_MSG_MOD(extack,
- "too many pedit actions, can't offload");
- printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
- return -EOPNOTSUPP;
- }
-
skip = false;
if (s_mask) {
void *match_mask = headers_c + f->match_offset;
@@ -2495,6 +2734,18 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
+ err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "too many pedit actions, can't offload");
+ mlx5_core_warn(priv->mdev,
+ "mlx5: parsed %d pedit actions, can't do more\n",
+ mod_acts->num_actions);
+ return err;
+ }
+
+ action = mod_acts->actions +
+ (mod_acts->num_actions * action_size);
MLX5_SET(set_action_in, action, action_type, cmd);
MLX5_SET(set_action_in, action, field, f->field);
@@ -2517,11 +2768,9 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
- action += action_size;
- nactions++;
+ ++mod_acts->num_actions;
}
- parse_attr->num_mod_hdr_actions = nactions;
return 0;
}
@@ -2534,34 +2783,52 @@ static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
}
-static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
- struct pedit_headers_action *hdrs,
- int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
+ int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
- int nkeys, action_size, max_actions;
+ int action_size, new_num_actions, max_hw_actions;
+ size_t new_sz, old_sz;
+ void *ret;
- nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
+ return 0;
- max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
- /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
- max_actions = min(max_actions, nkeys * 16);
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
- if (!parse_attr->mod_hdr_actions)
+ max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
+ namespace);
+ new_num_actions = min(max_hw_actions,
+ mod_hdr_acts->actions ?
+ mod_hdr_acts->max_actions * 2 : 1);
+ if (mod_hdr_acts->max_actions == new_num_actions)
+ return -ENOSPC;
+
+ new_sz = action_size * new_num_actions;
+ old_sz = mod_hdr_acts->max_actions * action_size;
+ ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
+ if (!ret)
return -ENOMEM;
- parse_attr->max_mod_hdr_actions = max_actions;
+ memset(ret + old_sz, 0, new_sz - old_sz);
+ mod_hdr_acts->actions = ret;
+ mod_hdr_acts->max_actions = new_num_actions;
+
return 0;
}
+void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ kfree(mod_hdr_acts->actions);
+ mod_hdr_acts->actions = NULL;
+ mod_hdr_acts->num_actions = 0;
+ mod_hdr_acts->max_actions = 0;
+}
+
static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
@@ -2609,13 +2876,8 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
int err;
u8 cmd;
- if (!parse_attr->mod_hdr_actions) {
- err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
- if (err)
- goto out_err;
- }
-
- err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
+ err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
+ action_flags, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
@@ -2635,8 +2897,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
return 0;
out_dealloc_parsed_actions:
- kfree(parse_attr->mod_hdr_actions);
-out_err:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
return err;
}
@@ -2681,7 +2942,9 @@ struct ipv6_hoplimit_word {
__u8 hop_limit;
};
-static bool is_action_keys_supported(const struct flow_action_entry *act)
+static int is_action_keys_supported(const struct flow_action_entry *act,
+ bool ct_flow, bool *modify_ip_header,
+ struct netlink_ext_ack *extack)
{
u32 mask, offset;
u8 htype;
@@ -2700,7 +2963,13 @@ static bool is_action_keys_supported(const struct flow_action_entry *act)
if (offset != offsetof(struct iphdr, ttl) ||
ttl_word->protocol ||
ttl_word->check) {
- return true;
+ *modify_ip_header = true;
+ }
+
+ if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of ipv4 address with action ct");
+ return -EOPNOTSUPP;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
@@ -2709,15 +2978,27 @@ static bool is_action_keys_supported(const struct flow_action_entry *act)
if (offset != offsetof(struct ipv6hdr, payload_len) ||
hoplimit_word->payload_len ||
hoplimit_word->nexthdr) {
- return true;
+ *modify_ip_header = true;
}
+
+ if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of ipv6 address with action ct");
+ return -EOPNOTSUPP;
+ }
+ } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
+ htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of transport header ports with action ct");
+ return -EOPNOTSUPP;
}
- return false;
+
+ return 0;
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
- u32 actions,
+ u32 actions, bool ct_flow,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
@@ -2725,7 +3006,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i;
+ int i, err;
headers_v = get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
@@ -2740,10 +3021,10 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- if (is_action_keys_supported(act)) {
- modify_ip_header = true;
- break;
- }
+ err = is_action_keys_supported(act, ct_flow,
+ &modify_ip_header, extack);
+ if (err)
+ return err;
}
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
@@ -2765,27 +3046,38 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ bool ct_flow;
u32 actions;
- if (mlx5e_is_eswitch_flow(flow))
+ ct_flow = flow_flag_test(flow, CT);
+ if (mlx5e_is_eswitch_flow(flow)) {
actions = flow->esw_attr->action;
- else
- actions = flow->nic_attr->action;
- if (flow_flag_test(flow, EGRESS) &&
- !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
- return false;
+ if (flow->esw_attr->split_count && ct_flow) {
+ /* All registers used by ct are cleared when using
+ * split rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't offload mirroring with action ct");
+ return false;
+ }
+ } else {
+ actions = flow->nic_attr->action;
+ }
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec,
flow_action, actions,
- extack);
+ ct_flow, extack);
return true;
}
+static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ return priv->mdev == peer_priv->mdev;
+}
+
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
@@ -2837,8 +3129,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
- hdrs, NULL);
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -2883,6 +3174,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (!flow_action_has_entries(flow_action))
return -EINVAL;
+ if (!flow_action_hw_stats_check(flow_action, extack,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
flow_action_for_each(i, act, flow_action) {
@@ -2900,7 +3195,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, extack);
+ hdrs, extack);
if (err)
return err;
@@ -2969,9 +3264,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
/* in case all pedit actions are skipped, remove the MOD_HDR
* flag.
*/
- if (parse_attr->num_mod_hdr_actions == 0) {
+ if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
}
}
@@ -3001,7 +3296,7 @@ static inline int hash_encap_info(struct encap_key *key)
}
-static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
struct mlx5e_priv *peer_priv;
@@ -3009,13 +3304,11 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
peer_priv = netdev_priv(peer_netdev);
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
- mlx5e_eswitch_rep(priv->netdev) &&
- mlx5e_eswitch_rep(peer_netdev) &&
+ mlx5e_eswitch_vf_rep(priv->netdev) &&
+ mlx5e_eswitch_vf_rep(peer_netdev) &&
same_hw_devs(priv, peer_priv));
}
-
-
bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
{
return refcount_inc_not_zero(&e->refcnt);
@@ -3269,12 +3562,13 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
u32 *action)
{
- int nest_level = attr->parse_attr->filter_dev->lower_level;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
- int err = 0;
+ int nest_level, err = 0;
+ nest_level = attr->parse_attr->filter_dev->lower_level -
+ priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
if (err)
@@ -3284,14 +3578,37 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
return err;
}
+static bool same_hw_reps(struct mlx5e_priv *priv,
+ struct net_device *peer_netdev)
+{
+ struct mlx5e_priv *peer_priv;
+
+ peer_priv = netdev_priv(peer_netdev);
+
+ return mlx5e_eswitch_rep(priv->netdev) &&
+ mlx5e_eswitch_rep(peer_netdev) &&
+ same_hw_devs(priv, peer_priv);
+}
+
+static bool is_lag_dev(struct mlx5e_priv *priv,
+ struct net_device *peer_netdev)
+{
+ return ((mlx5_lag_is_sriov(priv->mdev) ||
+ mlx5_lag_is_multipath(priv->mdev)) &&
+ same_hw_reps(priv, peer_netdev));
+}
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
- if (is_merged_eswitch_dev(priv, out_dev))
+ if (is_merged_eswitch_vfs(priv, out_dev))
+ return true;
+
+ if (is_lag_dev(priv, out_dev))
return true;
return mlx5e_eswitch_rep(out_dev) &&
- same_hw_devs(priv, netdev_priv(out_dev));
+ same_port_devs(priv, netdev_priv(out_dev));
}
static bool is_duplicated_output_device(struct net_device *dev,
@@ -3314,6 +3631,85 @@ static bool is_duplicated_output_device(struct net_device *dev,
return false;
}
+static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ u32 actions,
+ struct netlink_ext_ack *extack)
+{
+ u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_esw_chains_backwards_supported(esw) &&
+ dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int verify_uplink_forwarding(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_rep_priv *rep_priv;
+
+ /* Forwarding non encapsulated traffic between
+ * uplink ports is allowed only if
+ * termination_table_raw_traffic cap is set.
+ *
+ * Input vport was stored esw_attr->in_rep.
+ * In LAG case, *priv* is the private data of
+ * uplink which may be not the input vport.
+ */
+ rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
+
+ if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
+ mlx5e_eswitch_uplink_rep(out_dev)))
+ return 0;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
+ termination_table_raw_traffic)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are both uplink, can't offload forwarding");
+ pr_err("devices %s %s are both uplink, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EOPNOTSUPP;
+ } else if (out_dev != rep_priv->netdev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not the same uplink, can't offload forwarding");
+ pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -3328,13 +3724,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ bool encap = false, decap = false;
+ u32 action = attr->action;
int err, i, if_count = 0;
- bool encap = false;
- u32 action = 0;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
+ if (!flow_action_hw_stats_check(flow_action, extack,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -3344,7 +3744,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, extack);
+ hdrs, extack);
if (err)
return err;
@@ -3382,8 +3782,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
- pr_err("can't support more than %d output ports, can't offload forwarding\n",
- attr->out_count);
+ netdev_warn(priv->netdev,
+ "can't support more than %d output ports, can't offload forwarding\n",
+ attr->out_count);
return -EOPNOTSUPP;
}
@@ -3441,11 +3842,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
+ err = verify_uplink_forwarding(priv, flow, out_dev, extack);
+ if (err)
+ return err;
+
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
}
@@ -3464,8 +3867,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name,
+ out_dev->name);
return -EINVAL;
}
}
@@ -3507,28 +3912,24 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->split_count = attr->out_count;
break;
case FLOW_ACTION_TUNNEL_DECAP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ decap = true;
break;
- case FLOW_ACTION_GOTO: {
- u32 dest_chain = act->chain_index;
- u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
+ case FLOW_ACTION_GOTO:
+ err = mlx5_validate_goto_chain(esw, flow, act, action,
+ extack);
+ if (err)
+ return err;
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain <= attr->chain) {
- NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
- }
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = dest_chain;
+ attr->dest_chain = act->chain_index;
+ break;
+ case FLOW_ACTION_CT:
+ err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
+ if (err)
+ return err;
+
+ flow_flag_set(flow, CT);
break;
- }
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
return -EOPNOTSUPP;
@@ -3557,9 +3958,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
* flag. we might have set split_count either by pedit or
* pop/push. if there is no pop/push either, reset it too.
*/
- if (parse_attr->num_mod_hdr_actions == 0) {
+ if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
attr->split_count = 0;
@@ -3571,8 +3972,25 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
if (attr->dest_chain) {
+ if (decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG(extack,
+ "Decap with goto isn't supported");
+ netdev_warn(priv->netdev,
+ "Decap with goto isn't supported");
+ return -EOPNOTSUPP;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -3580,7 +3998,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!(attr->action &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rule must have at least one forward/drop action");
return -EOPNOTSUPP;
}
@@ -3751,6 +4170,10 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
+ if (err)
+ goto err_free;
+
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
@@ -4035,7 +4458,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto errout;
}
- if (mlx5e_is_offloaded_flow(flow)) {
+ if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
counter = mlx5e_tc_get_counter(flow);
if (!counter)
goto errout;
@@ -4069,7 +4492,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
- flow_stats_update(&f->stats, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
trace_mlx5e_stats_flower(f);
errout:
mlx5e_flow_put(priv, flow);
@@ -4126,6 +4550,9 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
@@ -4147,8 +4574,14 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = ma->common.extack;
+ if (!mlx5_esw_qos_enabled(esw)) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
@@ -4177,7 +4610,8 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
- flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
+ flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
+ FLOW_ACTION_HW_STATS_DELAYED);
}
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
@@ -4295,12 +4729,63 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
- return rhashtable_init(tc_ht, &tc_ht_params);
+ const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *priv;
+ struct mapping_ctx *mapping;
+ int err;
+
+ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+
+ err = mlx5_tc_ct_init(uplink_priv);
+ if (err)
+ goto err_ct;
+
+ mapping = mapping_create(sizeof(struct tunnel_match_key),
+ TUNNEL_INFO_BITS_MASK, true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto err_tun_mapping;
+ }
+ uplink_priv->tunnel_mapping = mapping;
+
+ mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto err_enc_opts_mapping;
+ }
+ uplink_priv->tunnel_enc_opts_mapping = mapping;
+
+ err = rhashtable_init(tc_ht, &tc_ht_params);
+ if (err)
+ goto err_ht_init;
+
+ return err;
+
+err_ht_init:
+ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+err_enc_opts_mapping:
+ mapping_destroy(uplink_priv->tunnel_mapping);
+err_tun_mapping:
+ mlx5_tc_ct_clean(uplink_priv);
+err_ct:
+ netdev_warn(priv->netdev,
+ "Failed to initialize tc (eswitch), err: %d", err);
+ return err;
}
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+
+ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+ mapping_destroy(uplink_priv->tunnel_mapping);
+
+ mlx5_tc_ct_clean(uplink_priv);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -4332,3 +4817,147 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
}
mutex_unlock(&rpriv->unready_flows_lock);
}
+
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct flow_dissector_key_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
+ int err;
+
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
+
+ if (!tun_id)
+ return true;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
+ if (err) {
+ WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
+ return false;
+ }
+
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
+ return false;
+ }
+ }
+
+ tun_dst = tun_rx_dst(enc_opts.len);
+ if (!tun_dst) {
+ WARN_ON_ONCE(true);
+ return false;
+ }
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ 0, /* label */
+ key.enc_tp.src, key.enc_tp.dst,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ TUNNEL_KEY);
+
+ if (enc_opts.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
+ enc_opts.len, enc_opts.dst_opt_type);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
+
+ /* Set tun_dev so we do dev_put() after datapath */
+ tc_priv->tun_dev = dev;
+
+ skb->dev = dev;
+
+ return true;
+}
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int tunnel_moffset;
+ int err;
+
+ reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
+ reg_c0 = 0;
+ reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
+
+ if (!reg_c0)
+ return true;
+
+ priv = netdev_priv(skb->dev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ reg_c0, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ tc_skb_ext->chain = chain;
+
+ tuple_id = reg_c1 & TUPLE_ID_MAX;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+ return false;
+ }
+
+ tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
+ tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
+
+void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (tc_priv->tun_dev)
+ dev_put(tc_priv->tun_dev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 262cdb7b69b1..abdcfa4c4e0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -91,9 +91,63 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
+enum mlx5e_tc_attr_to_reg {
+ CHAIN_TO_REG,
+ TUNNEL_TO_REG,
+ CTSTATE_TO_REG,
+ ZONE_TO_REG,
+ MARK_TO_REG,
+ LABELS_TO_REG,
+ FTEID_TO_REG,
+ TUPLEID_TO_REG,
+};
+
+struct mlx5e_tc_attr_to_reg_mapping {
+ int mfield; /* rewrite field */
+ int moffset; /* offset of mfield */
+ int mlen; /* bytes to rewrite/match */
+
+ int soffset; /* offset of spec for match */
+};
+
+extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
+struct mlx5e_tc_update_priv {
+ struct net_device *tun_dev;
+};
+
+bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv);
+
+void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
+
+struct mlx5e_tc_mod_hdr_acts {
+ int num_actions;
+ int max_actions;
+ void *actions;
+};
+
+int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data);
+
+void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data,
+ u32 mask);
+
+int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
+ int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+
+struct mlx5e_tc_flow;
+u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ee60383adc5b..119a5c6cc167 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -72,8 +72,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
int txq_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
- u16 num_channels;
int up = 0;
+ int ch_ix;
if (!netdev_get_num_tc(dev))
return txq_ix;
@@ -86,14 +86,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_vlan_tag_present(skb))
up = skb_vlan_tag_get_prio(skb);
- /* txq_ix can be larger than num_channels since
- * dev->num_real_tx_queues = num_channels * num_tc
+ /* Normalize any picked txq_ix to [0, num_channels),
+ * So we can return a txq_ix that matches the channel and
+ * packet UP.
*/
- num_channels = priv->channels.params.num_channels;
- if (txq_ix >= num_channels)
- txq_ix = priv->txq2sq[txq_ix]->ch_ix;
+ ch_ix = priv->txq2sq[txq_ix]->ch_ix;
- return priv->channel_tc2realtxq[txq_ix][up];
+ return priv->channel_tc2realtxq[ch_ix][up];
}
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
@@ -538,10 +537,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
+ u32 dma_fifo_cc, nbytes = 0;
+ u16 ci, sqcc, npkts = 0;
struct sk_buff *skb;
- u32 dma_fifo_cc;
- u16 sqcc;
- u16 ci;
int i;
sqcc = sq->cc;
@@ -566,11 +564,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
}
dev_kfree_skb_any(skb);
+ npkts++;
+ nbytes += wi->num_bytes;
sqcc += wi->num_wqebbs;
}
sq->dma_fifo_cc = dma_fifo_cc;
sq->cc = sqcc;
+
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
}
#ifdef CONFIG_MLX5_CORE_IPOIB
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 800d34ed8a96..acb20215a33b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -31,6 +31,7 @@
*/
#include <linux/irq.h>
+#include <linux/indirect_call_wrapper.h>
#include "en.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
@@ -100,7 +101,10 @@ static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskr
busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
mlx5e_xsk_update_tx_wakeup(xsksq);
- xsk_rx_alloc_err = xskrq->post_wqes(xskrq);
+ xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
+ mlx5e_post_rx_mpwqes,
+ mlx5e_post_rx_wqes,
+ xskrq);
busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
return busy_xsk;
@@ -143,9 +147,16 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_poll_ico_cq(&c->icosq.cq);
- busy |= rq->post_wqes(rq);
+ busy |= INDIRECT_CALL_2(rq->post_wqes,
+ mlx5e_post_rx_mpwqes,
+ mlx5e_post_rx_wqes,
+ rq);
if (xsk_open) {
- mlx5e_poll_ico_cq(&c->xskicosq.cq);
+ if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
+ /* Don't clear the flag if nothing was polled to prevent
+ * queueing more WQEs and overflowing XSKICOSQ.
+ */
+ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index cccea3a8eddd..ce6c621af043 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -611,11 +611,13 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
+ mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
if (err)
goto err1;
mlx5_cmd_use_events(dev);
+ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
.irq_index = 0,
@@ -645,6 +647,7 @@ err2:
mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
+ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
index 4276194b633f..029001040737 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
@@ -5,23 +5,26 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/fs.h>
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
+#include "en/mapping.h"
#include "mlx5_core.h"
#include "fs_core.h"
#include "eswitch.h"
#include "en.h"
+#include "en_tc.h"
#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
+#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping)
#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
#define fdb_ignore_flow_level_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
-
-#define ESW_OFFLOADS_NUM_GROUPS 4
+#define fdb_modify_header_fwd_to_table_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
@@ -36,6 +39,7 @@ static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
128 };
+#define ESW_FT_TBL_SZ (64 * 1024)
struct mlx5_esw_chains_priv {
struct rhashtable chains_ht;
@@ -44,6 +48,7 @@ struct mlx5_esw_chains_priv {
struct mutex lock;
struct mlx5_flow_table *tc_end_fdb;
+ struct mapping_ctx *chains_mapping;
int fdb_left[ARRAY_SIZE(ESW_POOLS)];
};
@@ -54,9 +59,12 @@ struct fdb_chain {
u32 chain;
int ref;
+ int id;
struct mlx5_eswitch *esw;
struct list_head prios_list;
+ struct mlx5_flow_handle *restore_rule;
+ struct mlx5_modify_hdr *miss_modify_hdr;
};
struct fdb_prio_key {
@@ -99,6 +107,12 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
}
+bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_chains_prios_supported(esw) &&
+ fdb_ignore_flow_level_supported(esw);
+}
+
u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
@@ -198,7 +212,9 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
+ sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
+ mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
+ mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
if (!sz)
return ERR_PTR(-ENOSPC);
ft_attr.max_fte = sz;
@@ -234,7 +250,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
}
ft_attr.autogroup.num_reserved_entries = 2;
- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev,
@@ -255,6 +271,83 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
mlx5_destroy_flow_table(fdb);
}
+static int
+create_fdb_chain_restore(struct fdb_chain *fdb_chain)
+{
+ char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)];
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+ struct mlx5_modify_hdr *mod_hdr;
+ u32 index;
+ int err;
+
+ if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) ||
+ !mlx5_esw_chains_prios_supported(esw))
+ return 0;
+
+ err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
+ if (err)
+ return err;
+ if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
+ /* we got the special default flow tag id, so we won't know
+ * if we actually marked the packet with the restore rule
+ * we create.
+ *
+ * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
+ */
+ err = mapping_add(esw_chains_mapping(esw),
+ &fdb_chain->chain, &index);
+ mapping_remove(esw_chains_mapping(esw),
+ MLX5_FS_DEFAULT_FLOW_TAG);
+ if (err)
+ return err;
+ }
+
+ fdb_chain->id = index;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield);
+ MLX5_SET(set_action_in, modact, offset,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8);
+ MLX5_SET(set_action_in, modact, length,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8);
+ MLX5_SET(set_action_in, modact, data, fdb_chain->id);
+ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ 1, modact);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+ fdb_chain->miss_modify_hdr = mod_hdr;
+
+ fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
+ if (IS_ERR(fdb_chain->restore_rule)) {
+ err = PTR_ERR(fdb_chain->restore_rule);
+ goto err_rule;
+ }
+
+ return 0;
+
+err_rule:
+ mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
+err_mod_hdr:
+ /* Datapath can't find this mapping, so we can safely remove it */
+ mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
+ return err;
+}
+
+static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain)
+{
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+
+ if (!fdb_chain->miss_modify_hdr)
+ return;
+
+ mlx5_del_flow_rules(fdb_chain->restore_rule);
+ mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
+ mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
+}
+
static struct fdb_chain *
mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
{
@@ -269,6 +362,10 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
fdb_chain->chain = chain;
INIT_LIST_HEAD(&fdb_chain->prios_list);
+ err = create_fdb_chain_restore(fdb_chain);
+ if (err)
+ goto err_restore;
+
err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
chain_params);
if (err)
@@ -277,6 +374,8 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
return fdb_chain;
err_insert:
+ destroy_fdb_chain_restore(fdb_chain);
+err_restore:
kvfree(fdb_chain);
return ERR_PTR(err);
}
@@ -288,6 +387,8 @@ mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
chain_params);
+
+ destroy_fdb_chain_restore(fdb_chain);
kvfree(fdb_chain);
}
@@ -310,10 +411,11 @@ mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
}
static struct mlx5_flow_handle *
-mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
+mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
+ struct mlx5_flow_table *fdb,
struct mlx5_flow_table *next_fdb)
{
- static const struct mlx5_flow_spec spec = {};
+ struct mlx5_eswitch *esw = fdb_chain->esw;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act act = {};
@@ -322,7 +424,13 @@ mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_fdb;
- return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
+ if (next_fdb == tc_end_fdb(esw) &&
+ mlx5_esw_chains_prios_supported(esw)) {
+ act.modify_hdr = fdb_chain->miss_modify_hdr;
+ act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ }
+
+ return mlx5_add_flow_rules(fdb, NULL, &act, &dest, 1);
}
static int
@@ -345,7 +453,8 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
list_for_each_entry_continue_reverse(pos,
&fdb_chain->prios_list,
list) {
- miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb,
+ miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain,
+ pos->fdb,
next_fdb);
if (IS_ERR(miss_rules[n])) {
err = PTR_ERR(miss_rules[n]);
@@ -459,7 +568,7 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
}
/* Add miss rule to next_fdb */
- miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb);
+ miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb);
if (IS_ERR(miss_rule)) {
err = PTR_ERR(miss_rule);
goto err_miss_rule;
@@ -618,12 +727,44 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
return tc_end_fdb(esw);
}
+struct mlx5_flow_table *
+mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw)
+{
+ u32 chain, prio, level;
+ int err;
+
+ if (!fdb_ignore_flow_level_supported(esw)) {
+ err = -EOPNOTSUPP;
+
+ esw_warn(esw->dev,
+ "Couldn't create global flow table, ignore_flow_level not supported.");
+ goto err_ignore;
+ }
+
+ chain = mlx5_esw_chains_get_chain_range(esw),
+ prio = mlx5_esw_chains_get_prio_range(esw);
+ level = mlx5_esw_chains_get_level_range(esw);
+
+ return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
+
+err_ignore:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_esw_chains_destroy_fdb_table(esw, ft);
+}
+
static int
mlx5_esw_chains_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_chains_priv *chains_priv;
struct mlx5_core_dev *dev = esw->dev;
u32 max_flow_counter, fdb_max;
+ struct mapping_ctx *mapping;
int err;
chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
@@ -637,7 +778,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw_debug(dev,
"Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
+ max_flow_counter, esw->params.large_group_num, fdb_max);
mlx5_esw_chains_init_sz_pool(esw);
@@ -645,6 +786,16 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
+ } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
+ /* Disabled when ttl workaround is needed, e.g
+ * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
+ */
+ esw_warn(dev,
+ "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
} else {
esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
@@ -660,10 +811,20 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
if (err)
goto init_prios_ht_err;
+ mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw),
+ true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto mapping_err;
+ }
+ esw_chains_mapping(esw) = mapping;
+
mutex_init(&esw_chains_lock(esw));
return 0;
+mapping_err:
+ rhashtable_destroy(&esw_prios_ht(esw));
init_prios_ht_err:
rhashtable_destroy(&esw_chains_ht(esw));
init_chains_ht_err:
@@ -675,6 +836,7 @@ static void
mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
{
mutex_destroy(&esw_chains_lock(esw));
+ mapping_destroy(esw_chains_mapping(esw));
rhashtable_destroy(&esw_prios_ht(esw));
rhashtable_destroy(&esw_chains_ht(esw));
@@ -704,12 +866,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw)
/* Open level 1 for split rules now if prios isn't supported */
if (!mlx5_esw_chains_prios_supported(esw)) {
- ft = mlx5_esw_chains_get_table(esw, 0, 1, 1);
-
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
+ err = mlx5_esw_vport_tbl_get(esw);
+ if (err)
goto level_1_err;
- }
}
return 0;
@@ -725,7 +884,7 @@ static void
mlx5_esw_chains_close(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
- mlx5_esw_chains_put_table(esw, 0, 1, 1);
+ mlx5_esw_vport_tbl_put(esw);
mlx5_esw_chains_put_table(esw, 0, 1, 0);
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
}
@@ -756,3 +915,30 @@ mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
mlx5_esw_chains_close(esw);
mlx5_esw_chains_cleanup(esw);
}
+
+int
+mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
+ u32 *chain_mapping)
+{
+ return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping);
+}
+
+int
+mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping)
+{
+ return mapping_remove(esw_chains_mapping(esw), chain_mapping);
+}
+
+int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag,
+ u32 *chain)
+{
+ int err;
+
+ err = mapping_find(esw_chains_mapping(esw), tag, chain);
+ if (err) {
+ esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag);
+ return -ENOENT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
index 2e13097fe348..f8c4239846ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
@@ -4,8 +4,12 @@
#ifndef __ML5_ESW_CHAINS_H__
#define __ML5_ESW_CHAINS_H__
+#include "eswitch.h"
+
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
+bool
+mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
u32
@@ -23,8 +27,23 @@ mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
+struct mlx5_flow_table *
+mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw);
+void
+mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft);
+
+int
+mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
+ u32 *chain_mapping);
+int
+mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw,
+ u32 chain_mapping);
+
int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
-#endif /* __ML5_ESW_CHAINS_H__ */
+int
+mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
+#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e49acd0c5da5..7f618a443bfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -39,6 +39,7 @@
#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
+#include "devlink.h"
#include "ecpf.h"
enum {
@@ -1333,7 +1334,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
@@ -1345,7 +1345,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
dest_num++;
}
vport->ingress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->ingress.acl, spec,
+ mlx5_add_flow_rules(vport->ingress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.legacy.drop_rule)) {
err = PTR_ERR(vport->ingress.legacy.drop_rule);
@@ -1408,7 +1408,6 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
int dest_num = 0;
int err = 0;
@@ -1437,11 +1436,6 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
if (err)
return err;
- /* Drop others rule (star rule) */
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- goto out;
-
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
@@ -1453,7 +1447,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
dest_num++;
}
vport->egress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->egress.acl, spec,
+ mlx5_add_flow_rules(vport->egress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->egress.legacy.drop_rule)) {
err = PTR_ERR(vport->egress.legacy.drop_rule);
@@ -1462,8 +1456,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
vport->vport, err);
vport->egress.legacy.drop_rule = NULL;
}
-out:
- kvfree(spec);
+
return err;
}
@@ -1669,34 +1662,6 @@ static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
((u8 *)node_guid)[0] = mac[5];
}
-static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- u16 vport_num = vport->vport;
- int flags;
-
- if (mlx5_esw_is_manager_vport(esw, vport_num))
- return;
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport_num, 1,
- vport->info.link_state);
-
- /* Host PF has its own mac/guid. */
- if (vport_num) {
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
- vport->info.mac);
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
- vport->info.node_guid);
- }
-
- flags = (vport->info.vlan || vport->info.qos) ?
- SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
- modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
- flags);
-}
-
static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
@@ -1706,8 +1671,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return 0;
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
- MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(vport->ingress.legacy.drop_counter)) {
esw_warn(esw->dev,
@@ -1721,8 +1685,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (ret)
goto ingress_err;
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
- MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(vport->egress.legacy.drop_counter)) {
esw_warn(esw->dev,
@@ -1783,29 +1746,75 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- enum mlx5_eswitch_vport_event enabled_events)
+static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ u16 vport_num = vport->vport;
+ int flags;
+ int err;
+
+ err = esw_vport_setup_acl(esw, vport);
+ if (err)
+ return err;
+
+ /* Attach vport to the eswitch rate limiter */
+ esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
+
+ if (mlx5_esw_is_manager_vport(esw, vport_num))
+ return 0;
+
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ vport_num, 1,
+ vport->info.link_state);
+
+ /* Host PF has its own mac/guid. */
+ if (vport_num) {
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
+ vport->info.mac);
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
+ vport->info.node_guid);
+ }
+
+ flags = (vport->info.vlan || vport->info.qos) ?
+ SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
+ modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
+ vport->info.qos, flags);
+
+ return 0;
+}
+
+/* Don't cleanup vport->info, it's needed to restore vport configuration */
+static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport_num))
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ vport_num, 1,
+ MLX5_VPORT_ADMIN_STATE_DOWN);
+
+ esw_vport_disable_qos(esw, vport);
+ esw_vport_cleanup_acl(esw, vport);
+}
+
+static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
int ret;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Restore old vport configuration */
- esw_apply_vport_conf(esw, vport);
-
- ret = esw_vport_setup_acl(esw, vport);
+ ret = esw_vport_setup(esw, vport);
if (ret)
goto done;
- /* Attach vport to the eswitch rate limiter */
- if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
- vport->qos.bw_share))
- esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
-
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
@@ -1826,10 +1835,11 @@ done:
return ret;
}
-static void esw_disable_vport(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
- u16 vport_num = vport->vport;
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock);
if (!vport->enabled)
@@ -1847,16 +1857,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- esw_vport_disable_qos(esw, vport);
-
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
- esw->mode == MLX5_ESWITCH_LEGACY)
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport_num, 1,
- MLX5_VPORT_ADMIN_STATE_DOWN);
-
- esw_vport_cleanup_acl(esw, vport);
+ esw_vport_cleanup(esw, vport);
esw->enabled_vports--;
done:
@@ -1944,6 +1945,59 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ int err;
+
+ err = esw_enable_vport(esw, vport_num, enabled_events);
+ if (err)
+ return err;
+
+ err = esw_offloads_load_rep(esw, vport_num);
+ if (err)
+ goto err_rep;
+
+ return err;
+
+err_rep:
+ esw_disable_vport(esw, vport_num);
+ return err;
+}
+
+void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ esw_offloads_unload_rep(esw, vport_num);
+ esw_disable_vport(esw, vport_num);
+}
+
+void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
+{
+ int i;
+
+ mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
+ mlx5_eswitch_unload_vport(esw, i);
+}
+
+int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ int err;
+ int i;
+
+ mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
+ err = mlx5_eswitch_load_vport(esw, i, enabled_events);
+ if (err)
+ goto vf_err;
+ }
+
+ return 0;
+
+vf_err:
+ mlx5_eswitch_unload_vf_vports(esw, i - 1);
+ return err;
+}
+
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
@@ -1951,46 +2005,33 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
- struct mlx5_vport *vport;
- int num_vfs;
int ret;
- int i;
/* Enable PF vport */
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- ret = esw_enable_vport(esw, vport, enabled_events);
+ ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- ret = esw_enable_vport(esw, vport, enabled_events);
+ ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
}
/* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
- ret = esw_enable_vport(esw, vport, enabled_events);
- if (ret)
- goto vf_err;
- }
+ ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
+ enabled_events);
+ if (ret)
+ goto vf_err;
return 0;
vf_err:
- num_vfs = i - 1;
- mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
- esw_disable_vport(esw, vport);
-
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_disable_vport(esw, vport);
- }
+ if (mlx5_ecpf_vport_exists(esw->dev))
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_disable_vport(esw, vport);
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1999,19 +2040,81 @@ ecpf_err:
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
- struct mlx5_vport *vport;
- int i;
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
- mlx5_esw_for_all_vports_reverse(esw, i, vport)
- esw_disable_vport(esw, vport);
+ if (mlx5_ecpf_vport_exists(esw->dev))
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
+
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
}
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+ union devlink_param_value val;
int err;
- if (!ESW_ALLOWED(esw) ||
- !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ &val);
+ if (!err) {
+ esw->params.large_group_num = val.vu32;
+ } else {
+ esw_warn(esw->dev,
+ "Devlink can't get param fdb_large_groups, uses default (%d).\n",
+ ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
+ esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ }
+}
+
+static void
+mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
+{
+ const u32 *out;
+
+ WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
+
+ if (num_vfs < 0)
+ return;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ esw->esw_funcs.num_vfs = num_vfs;
+ return;
+ }
+
+ out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(out))
+ return;
+
+ esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_num_of_vfs);
+ kvfree(out);
+}
+
+/**
+ * mlx5_eswitch_enable_locked - Enable eswitch
+ * @esw: Pointer to eswitch
+ * @mode: Eswitch mode to enable
+ * @num_vfs: Enable eswitch for given number of VFs. This is optional.
+ * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
+ * Caller should pass num_vfs > 0 when enabling eswitch for
+ * vf vports. Caller should pass num_vfs = 0, when eswitch
+ * is enabled without sriov VFs or when caller
+ * is unaware of the sriov state of the host PF on ECPF based
+ * eswitch. Caller should pass < 0 when num_vfs should be
+ * completely ignored. This is typically the case when eswitch
+ * is enabled without sriov regardless of PF/ECPF system.
+ * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
+ * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
+ * It returns 0 on success or error code on failure.
+ */
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
+{
+ int err;
+
+ lockdep_assert_held(&esw->mode_lock);
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
}
@@ -2022,6 +2125,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ mlx5_eswitch_get_devlink_param(esw);
+
+ mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
+
esw_create_tsar(esw);
esw->mode = mode;
@@ -2058,11 +2165,34 @@ abort:
return err;
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+/**
+ * mlx5_eswitch_enable - Enable eswitch
+ * @esw: Pointer to eswitch
+ * @num_vfs: Enable eswitch swich for given number of VFs.
+ * Caller must pass num_vfs > 0 when enabling eswitch for
+ * vf vports.
+ * mlx5_eswitch_enable() returns 0 on success or error code on failure.
+ */
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
+{
+ int ret;
+
+ if (!ESW_ALLOWED(esw))
+ return 0;
+
+ mutex_lock(&esw->mode_lock);
+ ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+ mutex_unlock(&esw->mode_lock);
+ return ret;
+}
+
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
{
int old_mode;
- if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
+ lockdep_assert_held_write(&esw->mode_lock);
+
+ if (esw->mode == MLX5_ESWITCH_NONE)
return;
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -2091,6 +2221,16 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+{
+ if (!ESW_ALLOWED(esw))
+ return;
+
+ mutex_lock(&esw->mode_lock);
+ mlx5_eswitch_disable_locked(esw, clear_vf);
+ mutex_unlock(&esw->mode_lock);
+}
+
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
@@ -2142,6 +2282,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.mod_hdr.hlist);
atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock);
+ mutex_init(&esw->mode_lock);
mlx5_esw_for_all_vports(esw, i, vport) {
vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
@@ -2176,6 +2317,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw);
+ mutex_destroy(&esw->mode_lock);
+ mutex_destroy(&esw->state_lock);
mutex_destroy(&esw->offloads.mod_hdr.lock);
mutex_destroy(&esw->offloads.encap_tbl_lock);
kfree(esw->vports);
@@ -2410,12 +2553,11 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
}
/* Star rule to forward all traffic to uplink vport */
- memset(spec, 0, sizeof(*spec));
memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -2600,9 +2742,13 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
u64 bytes = 0;
int err = 0;
- if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled)
+ goto unlock;
+
if (vport->egress.legacy.drop_counter)
mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
@@ -2613,20 +2759,22 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
- return 0;
+ goto unlock;
err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
&rx_discard_vport_down,
&tx_discard_vport_down);
if (err)
- return err;
+ goto unlock;
if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
stats->rx_dropped += rx_discard_vport_down;
if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
stats->tx_dropped += tx_discard_vport_down;
- return 0;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -2742,22 +2890,4 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
-void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
-{
- const u32 *out;
-
- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
-
- if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- esw->esw_funcs.num_vfs = num_vfs;
- return;
- }
-
- out = mlx5_esw_query_functions(esw->dev);
- if (IS_ERR(out))
- return;
- esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
- host_params_context.host_num_of_vfs);
- kvfree(out);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 4472710ccc9c..c1848b57f61c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -42,6 +42,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#include "en/tc_ct.h"
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -55,6 +56,8 @@
#ifdef CONFIG_MLX5_ESWITCH
+#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
+
#define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
@@ -183,12 +186,22 @@ struct mlx5_eswitch_fdb {
int vlan_push_pop_refcount;
struct mlx5_esw_chains_priv *esw_chains_priv;
+ struct {
+ DECLARE_HASHTABLE(table, 8);
+ /* Protects vports.table */
+ struct mutex lock;
+ } vports;
+
} offloads;
};
u32 flags;
};
struct mlx5_esw_offload {
+ struct mlx5_flow_table *ft_offloads_restore;
+ struct mlx5_flow_group *restore_group;
+ struct mlx5_modify_hdr *restore_copy_hdr_id;
+
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
@@ -224,6 +237,7 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
+ MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
};
struct mlx5_eswitch {
@@ -244,6 +258,11 @@ struct mlx5_eswitch {
*/
struct mutex state_lock;
+ /* Protects eswitch mode change that occurs via one or more
+ * user commands, i.e. sriov state change, devlink commands.
+ */
+ struct mutex mode_lock;
+
struct {
bool enabled;
u32 root_tsar_id;
@@ -255,6 +274,9 @@ struct mlx5_eswitch {
u16 manager_vport;
u16 first_host_vport;
struct mlx5_esw_functions esw_funcs;
+ struct {
+ u32 large_group_num;
+ } params;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -279,7 +301,11 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
+
+#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]);
@@ -315,6 +341,7 @@ struct mlx5_termtbl_handle;
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec);
@@ -375,6 +402,7 @@ enum {
enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
+ MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
};
struct mlx5_esw_flow_attr {
@@ -405,6 +433,9 @@ struct mlx5_esw_flow_attr {
u16 prio;
u32 dest_chain;
u32 flags;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *dest_ft;
+ struct mlx5_ct_attr ct_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
@@ -414,7 +445,6 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack);
@@ -433,6 +463,11 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
u16 vlan_id, u32 flow_action);
+static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
+{
+ return esw->qos.enabled;
+}
+
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
@@ -608,7 +643,6 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
int
@@ -623,11 +657,30 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
+u32
+esw_get_max_restore_tag(struct mlx5_eswitch *esw);
+
+int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
+void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
-static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
+static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
@@ -636,8 +689,11 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
-
+static inline struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1a57b2bd74b8..5d9def18ae3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,7 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -50,6 +50,181 @@
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
+/* Per vport tables */
+
+#define MLX5_ESW_VPORT_TABLE_SIZE 128
+
+/* This struct is used as a key to the hash table and we need it to be packed
+ * so hash result is consistent
+ */
+struct mlx5_vport_key {
+ u32 chain;
+ u16 prio;
+ u16 vport;
+ u16 vhca_id;
+} __packed;
+
+struct mlx5_vport_table {
+ struct hlist_node hlist;
+ struct mlx5_flow_table *fdb;
+ u32 num_rules;
+ struct mlx5_vport_key key;
+};
+
+#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+
+static struct mlx5_flow_table *
+esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb;
+
+ ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
+ ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
+ ft_attr.prio = FDB_PER_VPORT;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
+ PTR_ERR(fdb));
+ }
+
+ return fdb;
+}
+
+static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_vport_key *key)
+{
+ key->vport = attr->in_rep->vport;
+ key->chain = attr->chain;
+ key->prio = attr->prio;
+ key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ return jhash(key, sizeof(*key), 0);
+}
+
+/* caller must hold vports.lock */
+static struct mlx5_vport_table *
+esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
+{
+ struct mlx5_vport_table *e;
+
+ hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
+ if (!memcmp(&e->key, skey, sizeof(*skey)))
+ return e;
+
+ return NULL;
+}
+
+static void
+esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key key;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &key);
+ e = esw_vport_tbl_lookup(esw, &key, hkey);
+ if (!e || --e->num_rules)
+ goto out;
+
+ hash_del(&e->hlist);
+ mlx5_destroy_flow_table(e->fdb);
+ kfree(e);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+}
+
+static struct mlx5_flow_table *
+esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key skey;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &skey);
+ e = esw_vport_tbl_lookup(esw, &skey, hkey);
+ if (e) {
+ e->num_rules++;
+ goto out;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ fdb = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB namespace\n");
+ fdb = ERR_PTR(-ENOENT);
+ goto err_ns;
+ }
+
+ fdb = esw_vport_tbl_create(esw, ns);
+ if (IS_ERR(fdb))
+ goto err_ns;
+
+ e->fdb = fdb;
+ e->num_rules = 1;
+ e->key = skey;
+ hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return e->fdb;
+
+err_ns:
+ kfree(e);
+err_alloc:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return fdb;
+}
+
+int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_flow_attr attr = {};
+ struct mlx5_eswitch_rep rep = {};
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport *vport;
+ int i;
+
+ attr.prio = 1;
+ attr.in_rep = &rep;
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ attr.in_rep->vport = vport->vport;
+ fdb = esw_vport_tbl_get(esw, &attr);
+ if (IS_ERR(fdb))
+ goto out;
+ }
+ return 0;
+
+out:
+ mlx5_esw_vport_tbl_put(esw);
+ return PTR_ERR(fdb);
+}
+
+void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_flow_attr attr = {};
+ struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport *vport;
+ int i;
+
+ attr.prio = 1;
+ attr.in_rep = &rep;
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ attr.in_rep->vport = vport->vport;
+ esw_vport_tbl_put(esw, &attr);
+ }
+}
+
+/* End: Per vport tables */
+
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
@@ -85,7 +260,8 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
attr->in_rep->vport));
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
@@ -148,7 +324,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
struct mlx5_flow_table *ft;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ if (attr->dest_ft) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = attr->dest_ft;
+ i++;
+ } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
@@ -191,8 +372,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
i++;
}
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
-
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->inner_match_level != MLX5_MATCH_NONE)
@@ -201,14 +380,24 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
- !!split);
+ if (split) {
+ fdb = esw_vport_tbl_get(esw, attr);
+ } else {
+ if (attr->chain || attr->prio)
+ fdb = mlx5_esw_chains_get_table(esw, attr->chain,
+ attr->prio, 0);
+ else
+ fdb = attr->fdb;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ }
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
+ if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
else
@@ -221,7 +410,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
+ if (split)
+ esw_vport_tbl_put(esw, attr);
+ else if (attr->chain || attr->prio)
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
@@ -247,7 +439,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
goto err_get_fast;
}
- fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
+ fwd_fdb = esw_vport_tbl_get(esw, attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -285,7 +477,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ esw_vport_tbl_put(esw, attr);
err_get_fwd:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
@@ -303,20 +495,25 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- /* unref the term table */
- for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
- if (attr->dests[i].termtbl)
- mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ /* unref the term table */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (attr->dests[i].termtbl)
+ mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ }
}
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ esw_vport_tbl_put(esw, attr);
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else {
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
- !!split);
+ if (split)
+ esw_vport_tbl_put(esw, attr);
+ else if (attr->chain || attr->prio)
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
+ 0);
if (attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
}
@@ -578,14 +775,21 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
+{
+ return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
+ MLX5_FDB_TO_VPORT_REG_C_1;
+}
+
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
- u8 fdb_to_vport_reg_c_id;
+ u8 curr, wanted;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
+ !mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
@@ -593,22 +797,33 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
if (err)
return err;
- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.fdb_to_vport_reg_c_id);
+ curr = MLX5_GET(query_esw_vport_context_out, out,
+ esw_vport_context.fdb_to_vport_reg_c_id);
+ wanted = MLX5_FDB_TO_VPORT_REG_C_0;
+ if (mlx5_eswitch_reg_c1_loopback_supported(esw))
+ wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
if (enable)
- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ curr |= wanted;
else
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+ curr &= ~wanted;
MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
+ esw_vport_context.fdb_to_vport_reg_c_id, curr);
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
- return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
- in, sizeof(in));
+ err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
+ sizeof(in));
+ if (!err) {
+ if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
+ esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
+ else
+ esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
+ }
+
+ return err;
}
static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
@@ -621,7 +836,8 @@ static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
@@ -836,6 +1052,59 @@ out:
return err;
}
+struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
+{
+ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
+ struct mlx5_flow_context *flow_context;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ ESW_CHAIN_TAG_METADATA_MASK);
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
+
+ flow_context = &spec->flow_context;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ flow_context->flow_tag = tag;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = esw->offloads.ft_offloads;
+
+ flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kfree(spec);
+
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev,
+ "Failed to create restore rule for tag: %d, err(%d)\n",
+ tag, (int)PTR_ERR(flow_rule));
+
+ return flow_rule;
+}
+
+u32
+esw_get_max_restore_tag(struct mlx5_eswitch *esw)
+{
+ return ESW_CHAIN_TAG_METADATA_MASK;
+}
+
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -851,8 +1120,9 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0);
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
} else {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
@@ -1057,6 +1327,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
}
ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
+ ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
@@ -1134,7 +1405,8 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
@@ -1160,6 +1432,148 @@ out:
return flow_rule;
}
+
+static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
+{
+ u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
+ struct mlx5_core_dev *dev = esw->dev;
+ int vport;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == MLX5_ESWITCH_NONE)
+ return -EOPNOTSUPP;
+
+ switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+ mlx5_mode = MLX5_INLINE_MODE_NONE;
+ goto out;
+ case MLX5_CAP_INLINE_MODE_L2:
+ mlx5_mode = MLX5_INLINE_MODE_L2;
+ goto out;
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ goto query_vports;
+ }
+
+query_vports:
+ mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
+ mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
+ mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+ if (prev_mlx5_mode != mlx5_mode)
+ return -EINVAL;
+ prev_mlx5_mode = mlx5_mode;
+ }
+
+out:
+ *mode = mlx5_mode;
+ return 0;
+}
+
+static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
+ return;
+
+ mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
+ mlx5_destroy_flow_group(offloads->restore_group);
+ mlx5_destroy_flow_table(offloads->ft_offloads_restore);
+}
+
+static int esw_create_restore_table(struct mlx5_eswitch *esw)
+{
+ u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_modify_hdr *mod_hdr;
+ void *match_criteria, *misc;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
+ return 0;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ esw_warn(esw->dev, "Failed to create restore table, err %d\n",
+ err);
+ goto out_free;
+ }
+
+ memset(flow_group_in, 0, inlen);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ ESW_CHAIN_TAG_METADATA_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft_attr.max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create restore flow group, err: %d\n",
+ err);
+ goto err_group;
+ }
+
+ MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, modact, src_field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(copy_action_in, modact, dst_field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_KERNEL, 1,
+ modact);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ esw_warn(dev, "Failed to create restore mod header, err: %d\n",
+ err);
+ goto err_mod_hdr;
+ }
+
+ esw->offloads.ft_offloads_restore = ft;
+ esw->offloads.restore_group = g;
+ esw->offloads.restore_copy_hdr_id = mod_hdr;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_mod_hdr:
+ mlx5_destroy_flow_group(g);
+err_group:
+ mlx5_destroy_flow_table(ft);
+out_free:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
@@ -1172,13 +1586,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw, false);
- mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
+ mlx5_eswitch_disable_locked(esw, false);
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
+ esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
+ MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
@@ -1233,187 +1648,66 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
-static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
- struct mlx5_eswitch_rep *rep;
-
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
- __esw_offloads_unload_rep(esw, rep, rep_type);
- }
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
- __esw_offloads_unload_rep(esw, rep, rep_type);
- }
-
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
- __esw_offloads_unload_rep(esw, rep, rep_type);
-}
-
-static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int i;
- mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
+ mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
__esw_offloads_unload_rep(esw, rep, rep_type);
-}
-
-static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
-{
- u8 rep_type = NUM_REP_TYPES;
-
- while (rep_type-- > 0)
- __unload_reps_vf_vport(esw, nvports, rep_type);
-}
-
-static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
- __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
-
- /* Special vports must be the last to unload. */
- __unload_reps_special_vport(esw, rep_type);
-}
-
-static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
-{
- u8 rep_type = NUM_REP_TYPES;
-
- while (rep_type-- > 0)
- __unload_reps_all_vport(esw, rep_type);
-}
-
-static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep, u8 rep_type)
-{
- int err = 0;
-
- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
- if (err)
- atomic_set(&rep->rep_data[rep_type].state,
- REP_REGISTERED);
- }
-
- return err;
-}
-
-static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
- struct mlx5_eswitch_rep *rep;
- int err;
-
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- return err;
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- goto err_pf;
- }
if (mlx5_ecpf_vport_exists(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- goto err_ecpf;
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
- return 0;
-
-err_ecpf:
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-err_pf:
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
__esw_offloads_unload_rep(esw, rep, rep_type);
- return err;
}
-static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
- int err, i;
-
- mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- goto err_vf;
- }
-
- return 0;
-
-err_vf:
- __unload_reps_vf_vport(esw, --i, rep_type);
- return err;
-}
-
-static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
+ int rep_type;
int err;
- /* Special vports must be loaded first, uplink rep creates mdev resource. */
- err = __load_reps_special_vport(esw, rep_type);
- if (err)
- return err;
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return 0;
- err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
- if (err)
- goto err_vfs;
+ rep = mlx5_eswitch_get_rep(esw, vport_num);
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
+ err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+ if (err)
+ goto err_reps;
+ }
return 0;
-err_vfs:
- __unload_reps_special_vport(esw, rep_type);
- return err;
-}
-
-static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
-{
- u8 rep_type = 0;
- int err;
-
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = __load_reps_vf_vport(esw, nvports, rep_type);
- if (err)
- goto err_reps;
- }
-
- return err;
-
err_reps:
- while (rep_type-- > 0)
- __unload_reps_vf_vport(esw, nvports, rep_type);
+ atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
+ for (--rep_type; rep_type >= 0; rep_type--)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
-static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
+void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
- u8 rep_type = 0;
- int err;
-
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = __load_reps_all_vport(esw, rep_type);
- if (err)
- goto err_reps;
- }
+ struct mlx5_eswitch_rep *rep;
+ int rep_type;
- return err;
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return;
-err_reps:
- while (rep_type-- > 0)
- __unload_reps_all_vport(esw, rep_type);
- return err;
+ rep = mlx5_eswitch_get_rep(esw, vport_num);
+ for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
@@ -1601,14 +1895,21 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
- static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_act flow_act = {};
int err = 0;
+ u32 key;
+
+ key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
+ key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
- MLX5_SET(set_action_in, action, data,
- mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ MLX5_SET(set_action_in, action, data, key);
+ MLX5_SET(set_action_in, action, offset,
+ ESW_SOURCE_PORT_METADATA_OFFSET);
+ MLX5_SET(set_action_in, action, length,
+ ESW_SOURCE_PORT_METADATA_BITS);
vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
@@ -1625,7 +1926,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
- &spec, &flow_act, NULL, 0);
+ NULL, &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
@@ -1837,6 +2138,18 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
+static bool
+esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
+{
+ return mlx5_core_mp_enabled(esw->dev);
+}
+
+static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
+{
+ return esw_check_vport_match_metadata_mandatory(esw) &&
+ esw_check_vport_match_metadata_supported(esw);
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
@@ -1875,7 +2188,7 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int err;
- if (esw_check_vport_match_metadata_supported(esw))
+ if (esw_use_vport_metadata(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
@@ -1906,18 +2219,24 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
+ mutex_init(&esw->fdb_table.offloads.vports.lock);
+ hash_init(esw->fdb_table.offloads.vports.table);
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
- return err;
+ goto create_acl_err;
- err = esw_create_offloads_fdb_tables(esw, total_vports);
+ err = esw_create_offloads_table(esw, total_vports);
if (err)
- goto create_fdb_err;
+ goto create_offloads_err;
- err = esw_create_offloads_table(esw, total_vports);
+ err = esw_create_restore_table(esw);
if (err)
- goto create_ft_err;
+ goto create_restore_err;
+
+ err = esw_create_offloads_fdb_tables(esw, total_vports);
+ if (err)
+ goto create_fdb_err;
err = esw_create_vport_rx_group(esw, total_vports);
if (err)
@@ -1926,23 +2245,26 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
return 0;
create_fg_err:
- esw_destroy_offloads_table(esw);
-
-create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
-
create_fdb_err:
+ esw_destroy_restore_table(esw);
+create_restore_err:
+ esw_destroy_offloads_table(esw);
+create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
-
+create_acl_err:
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
esw_destroy_vport_rx_group(esw);
- esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
+ esw_destroy_restore_table(esw);
+ esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
static void
@@ -1961,11 +2283,12 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
- esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
} else {
int err;
- err = esw_offloads_load_vf_reps(esw, new_num_vfs);
+ err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
+ MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
return;
}
@@ -2023,40 +2346,43 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
- err = esw_offloads_steering_init(esw);
- if (err)
- goto err_steering_init;
err = esw_set_passing_vport_metadata(esw, true);
if (err)
goto err_vport_metadata;
+ err = esw_offloads_steering_init(esw);
+ if (err)
+ goto err_steering_init;
+
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
- err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ /* Uplink vport rep must load first. */
+ err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
if (err)
- goto err_vports;
+ goto err_uplink;
- err = esw_offloads_load_all_reps(esw);
+ err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
- goto err_reps;
+ goto err_vports;
esw_offloads_devcom_init(esw);
- mutex_init(&esw->offloads.termtbl_mutex);
return 0;
-err_reps:
- mlx5_eswitch_disable_pf_vf_vports(esw);
err_vports:
- esw_set_passing_vport_metadata(esw, false);
-err_vport_metadata:
+ esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+err_uplink:
esw_offloads_steering_cleanup(esw);
err_steering_init:
+ esw_set_passing_vport_metadata(esw, false);
+err_vport_metadata:
mlx5_rdma_disable_roce(esw->dev);
+ mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
@@ -2065,11 +2391,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw, false);
- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
+ mlx5_eswitch_disable_locked(esw, false);
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
+ MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
+ MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -2082,11 +2410,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
esw_offloads_devcom_cleanup(esw);
- esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mlx5_rdma_disable_roce(esw->dev);
+ mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
@@ -2166,60 +2495,82 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static int mlx5_devlink_eswitch_check(struct devlink *devlink)
+static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -EOPNOTSUPP;
if(!MLX5_ESWITCH_MANAGER(dev))
return -EPERM;
- if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
- !mlx5_core_is_ecpf_esw_manager(dev))
- return -EOPNOTSUPP;
-
return 0;
}
+static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
+{
+ /* devlink commands in NONE eswitch mode are currently supported only
+ * on ECPF.
+ */
+ return (esw->mode == MLX5_ESWITCH_NONE &&
+ !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
u16 cur_mlx5_mode, mlx5_mode = 0;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
- cur_mlx5_mode = dev->priv.eswitch->mode;
-
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
+
+ cur_mlx5_mode = esw->mode;
+
if (cur_mlx5_mode == mlx5_mode)
- return 0;
+ goto unlock;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- return esw_offloads_start(dev->priv.eswitch, extack);
+ err = esw_offloads_start(esw, extack);
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- return esw_offloads_stop(dev->priv.eswitch, extack);
+ err = esw_offloads_stop(esw, extack);
else
- return -EINVAL;
+ err = -EINVAL;
+
+unlock:
+ mutex_unlock(&esw->mode_lock);
+ return err;
}
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
- return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
+ if (err)
+ goto unlock;
+
+ err = esw_mode_to_devlink(esw->mode, mode);
+unlock:
+ mutex_unlock(&esw->mode_lock);
+ return err;
}
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
@@ -2230,18 +2581,24 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
int err, vport, num_vport;
u8 mlx5_mode;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto out;
+
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
- return 0;
+ goto out;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
@@ -2249,7 +2606,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
@@ -2266,6 +2624,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
}
esw->offloads.inline_mode = mlx5_mode;
+ mutex_unlock(&esw->mode_lock);
return 0;
revert_inline_mode:
@@ -2275,6 +2634,7 @@ revert_inline_mode:
vport,
esw->offloads.inline_mode);
out:
+ mutex_unlock(&esw->mode_lock);
return err;
}
@@ -2284,48 +2644,19 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
- return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-}
-
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
-{
- u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
- struct mlx5_core_dev *dev = esw->dev;
- int vport;
-
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
-
- if (esw->mode == MLX5_ESWITCH_NONE)
- return -EOPNOTSUPP;
-
- switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
- case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
- mlx5_mode = MLX5_INLINE_MODE_NONE;
- goto out;
- case MLX5_CAP_INLINE_MODE_L2:
- mlx5_mode = MLX5_INLINE_MODE_L2;
- goto out;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- goto query_vports;
- }
-
-query_vports:
- mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
- mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
- mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
- if (prev_mlx5_mode != mlx5_mode)
- return -EINVAL;
- prev_mlx5_mode = mlx5_mode;
- }
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
-out:
- *mode = mlx5_mode;
- return 0;
+ err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
+unlock:
+ mutex_unlock(&esw->mode_lock);
+ return err;
}
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
@@ -2336,30 +2667,40 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
+
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
- !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
- return -EOPNOTSUPP;
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
- if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
- return -EOPNOTSUPP;
+ if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw->offloads.encap = encap;
- return 0;
+ goto unlock;
}
if (esw->offloads.encap == encap)
- return 0;
+ goto unlock;
if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto unlock;
}
esw_destroy_offloads_fdb_tables(esw);
@@ -2375,6 +2716,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw, esw->nvports);
}
+unlock:
+ mutex_unlock(&esw->mode_lock);
return err;
}
@@ -2385,14 +2728,36 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
+
*encap = esw->offloads.encap;
+unlock:
+ mutex_unlock(&esw->mode_lock);
return 0;
}
+static bool
+mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ /* Currently, only ECPF based device has representor for host PF. */
+ if (vport_num == MLX5_VPORT_PF &&
+ !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return false;
+
+ if (vport_num == MLX5_VPORT_ECPF &&
+ !mlx5_ecpf_vport_exists(esw->dev))
+ return false;
+
+ return true;
+}
+
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type)
@@ -2403,8 +2768,10 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) {
- rep_data = &rep->rep_data[rep_type];
- atomic_set(&rep_data->state, REP_REGISTERED);
+ if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
+ rep_data = &rep->rep_data[rep_type];
+ atomic_set(&rep_data->state, REP_REGISTERED);
+ }
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
@@ -2464,15 +2831,53 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
vport_num <= esw->dev->priv.sriov.max_vfs;
}
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
+}
+EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
+
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{
return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
-u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num)
{
- return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
+ u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
+ u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
+ u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ u32 val;
+
+ /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
+ WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
+
+ /* Trim vhca_id to ESW_VHCA_ID_BITS */
+ vhca_id &= vhca_id_mask;
+
+ /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
+ * don't overlap with VF numbers, and themselves, after trimming.
+ */
+ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
+ vport_num_mask - 1);
+ WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
+ vport_num_mask - 1);
+ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
+ (MLX5_VPORT_ECPF & vport_num_mask));
+
+ /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
+ * overlap with pf and ecpf.
+ */
+ if (vport_num != MLX5_VPORT_UPLINK &&
+ vport_num != MLX5_VPORT_ECPF)
+ WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
+
+ /* We can now trim vport_num to ESW_VPORT_BITS */
+ vport_num &= vport_num_mask;
+
+ val = (vhca_id << ESW_VPORT_BITS) | vport_num;
+ return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index dc08ed9339ab..17a0d2bc102b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/fs.h>
#include "eswitch.h"
+#include "fs_core.h"
struct mlx5_termtbl_handle {
struct hlist_node termtbl_hlist;
@@ -28,6 +29,10 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash);
+ if (dest->vport.pkt_reformat)
+ hash = jhash(dest->vport.pkt_reformat,
+ sizeof(*dest->vport.pkt_reformat),
+ hash);
return hash;
}
@@ -37,11 +42,19 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
struct mlx5_flow_act *flow_act2,
struct mlx5_flow_destination *dest2)
{
- return flow_act1->action != flow_act2->action ||
- dest1->vport.num != dest2->vport.num ||
- dest1->vport.vhca_id != dest2->vport.vhca_id ||
- memcmp(&flow_act1->vlan, &flow_act2->vlan,
- sizeof(flow_act1->vlan));
+ int ret;
+
+ ret = flow_act1->action != flow_act2->action ||
+ dest1->vport.num != dest2->vport.num ||
+ dest1->vport.vhca_id != dest2->vport.vhca_id ||
+ memcmp(&flow_act1->vlan, &flow_act2->vlan,
+ sizeof(flow_act1->vlan));
+ if (ret)
+ return ret;
+
+ return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
+ memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
+ sizeof(*dest1->vport.pkt_reformat)) : 0;
}
static int
@@ -49,7 +62,6 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_termtbl_handle *tt,
struct mlx5_flow_act *flow_act)
{
- static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
int err;
@@ -63,7 +75,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION;
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft_attr.prio = FDB_SLOW_PATH;
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
@@ -73,9 +86,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
}
- tt->rule = mlx5_add_flow_rules(tt->termtbl, &spec, flow_act,
+ tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
-
if (IS_ERR(tt->rule)) {
esw_warn(dev, "Failed to create termination table rule\n");
goto add_flow_err;
@@ -93,7 +105,8 @@ add_flow_err:
static struct mlx5_termtbl_handle *
mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest)
+ struct mlx5_flow_destination *dest,
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_termtbl_handle *tt;
bool found = false;
@@ -101,7 +114,6 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
int err;
mutex_lock(&esw->offloads.termtbl_mutex);
-
hash_key = mlx5_eswitch_termtbl_hash(flow_act, dest);
hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
termtbl_hlist, hash_key) {
@@ -123,6 +135,7 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
tt->dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
tt->dest.vport.num = dest->vport.num;
tt->dest.vport.vhca_id = dest->vport.vhca_id;
+ tt->dest.vport.flags = dest->vport.flags;
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
@@ -157,31 +170,50 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
+static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
+{
+ switch (rt->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
{
- if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
- return;
-
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
- memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
-
- if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
- return;
+ if (src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
+ memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
+
+ if (src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
+ memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+ }
+ }
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
- memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+ if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+ mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dst->pkt_reformat = src->pkt_reformat;
+ src->pkt_reformat = NULL;
+ }
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
const struct mlx5_flow_spec *spec)
{
- u32 port_mask, port_value;
+ u16 port_mask, port_value;
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return spec->flow_context.flow_source ==
@@ -191,20 +223,32 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
misc_parameters.source_port);
port_value = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.source_port);
- return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+ return (port_mask & port_value) == MLX5_VPORT_UPLINK;
}
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
+ int i;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+ attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
+ !mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
/* push vlan on RX */
- return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
- mlx5_eswitch_offload_is_uplink_port(esw, spec);
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)
+ return true;
+
+ /* hairpin */
+ for (i = attr->split_count; i < attr->out_count; i++)
+ if (attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ return true;
+
+ return false;
}
struct mlx5_flow_handle *
@@ -234,7 +278,7 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
- &dest[i]);
+ &dest[i], attr);
if (IS_ERR(tt)) {
esw_warn(esw->dev, "Failed to create termination table\n");
goto revert_changes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 8bcf3426b9c6..3ce17c3d7a00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
events->dev = dev;
dev->priv.events = events;
events->wq = create_singlethread_workqueue("mlx5_events");
- if (!events->wq)
+ if (!events->wq) {
+ kfree(events);
return -ENOMEM;
+ }
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 4c61d25d2e88..b794888fa3ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -57,7 +57,7 @@ struct mlx5_fpga_ipsec_cmd_context {
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
- u8 command[0];
+ u8 command[];
};
struct mlx5_fpga_esp_xfrm;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index b25465d9e030..90048697b2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -904,6 +904,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_SNIFFER_TX:
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
+ case FS_FT_RDMA_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9dc24241dc91..9620c8650e13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -87,6 +87,15 @@
.identified_miss_table_mode), \
FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
+#define FS_CHAINING_CAPS_RDMA_TX \
+ FS_REQUIRED_CAPS( \
+ FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
+ .identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
+ .flow_table_modify))
+
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
@@ -110,9 +119,9 @@
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
-#define OFFLOADS_MAX_FT 1
-#define OFFLOADS_NUM_PRIOS 1
-#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+#define OFFLOADS_MAX_FT 2
+#define OFFLOADS_NUM_PRIOS 2
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
#define LAG_PRIO_NUM_LEVELS 1
#define LAG_NUM_PRIOS 1
@@ -145,7 +154,7 @@ static struct init_tree_node {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
LAG_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
OFFLOADS_MAX_FT))),
@@ -202,6 +211,18 @@ static struct init_tree_node rdma_rx_root_fs = {
}
};
+static struct init_tree_node rdma_tx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 1,
+ .children = (struct init_tree_node[]) {
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ FS_CHAINING_CAPS_RDMA_TX,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
enum fs_i_lock_class {
FS_LOCK_GRANDPARENT,
FS_LOCK_PARENT,
@@ -323,17 +344,12 @@ static void tree_put_node(struct fs_node *node, bool locked)
if (node->del_hw_func)
node->del_hw_func(node);
if (parent_node) {
- /* Only root namespace doesn't have parent and we just
- * need to free its node.
- */
down_write_ref_node(parent_node, locked);
list_del_init(&node->list);
- if (node->del_sw_func)
- node->del_sw_func(node);
- up_write_ref_node(parent_node, locked);
- } else {
- kfree(node);
}
+ node->del_sw_func(node);
+ if (parent_node)
+ up_write_ref_node(parent_node, locked);
node = NULL;
}
if (!node && parent_node)
@@ -447,8 +463,10 @@ static void del_sw_flow_table(struct fs_node *node)
fs_get_obj(ft, node);
rhltable_destroy(&ft->fgs_hash);
- fs_get_obj(prio, ft->node.parent);
- prio->num_ft--;
+ if (ft->node.parent) {
+ fs_get_obj(prio, ft->node.parent);
+ prio->num_ft--;
+ }
kfree(ft);
}
@@ -1322,7 +1340,7 @@ add_rule_fte(struct fs_fte *fte,
fte->node.active = true;
fte->status |= FS_FTE_STATUS_EXISTING;
- atomic_inc(&fte->node.version);
+ atomic_inc(&fg->node.version);
out:
return handle;
@@ -1577,28 +1595,19 @@ struct match_list {
struct mlx5_flow_group *g;
};
-struct match_list_head {
- struct list_head list;
- struct match_list first;
-};
-
-static void free_match_list(struct match_list_head *head, bool ft_locked)
+static void free_match_list(struct match_list *head, bool ft_locked)
{
- if (!list_empty(&head->list)) {
- struct match_list *iter, *match_tmp;
+ struct match_list *iter, *match_tmp;
- list_del(&head->first.list);
- tree_put_node(&head->first.g->node, ft_locked);
- list_for_each_entry_safe(iter, match_tmp, &head->list,
- list) {
- tree_put_node(&iter->g->node, ft_locked);
- list_del(&iter->list);
- kfree(iter);
- }
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
+ list) {
+ tree_put_node(&iter->g->node, ft_locked);
+ list_del(&iter->list);
+ kfree(iter);
}
}
-static int build_match_list(struct match_list_head *match_head,
+static int build_match_list(struct match_list *match_head,
struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec,
bool ft_locked)
@@ -1615,14 +1624,8 @@ static int build_match_list(struct match_list_head *match_head,
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
- if (likely(list_empty(&match_head->list))) {
- if (!tree_get_node(&g->node))
- continue;
- match_head->first.g = g;
- list_add_tail(&match_head->first.list,
- &match_head->list);
+ if (unlikely(!tree_get_node(&g->node)))
continue;
- }
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
@@ -1630,10 +1633,6 @@ static int build_match_list(struct match_list_head *match_head,
err = -ENOMEM;
goto out;
}
- if (!tree_get_node(&g->node)) {
- kfree(curr_match);
- continue;
- }
curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list);
}
@@ -1699,7 +1698,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct match_list *iter;
bool take_write = false;
struct fs_fte *fte;
- u64 version;
+ u64 version = 0;
int err;
fte = alloc_fte(ft, spec, flow_act);
@@ -1707,10 +1706,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM);
search_again_locked:
- version = matched_fgs_get_version(match_head);
if (flow_act->flags & FLOW_ACT_NO_APPEND)
goto skip_search;
- /* Try to find a fg that already contains a matching fte */
+ version = matched_fgs_get_version(match_head);
+ /* Try to find an fte with identical match value and attempt update its
+ * action.
+ */
list_for_each_entry(iter, match_head, list) {
struct fs_fte *fte_tmp;
@@ -1738,10 +1739,12 @@ skip_search:
goto out;
}
- /* Check the fgs version, for case the new FTE with the
- * same values was added while the fgs weren't locked
+ /* Check the fgs version. If version have changed it could be that an
+ * FTE with the same match value was added while the fgs weren't
+ * locked.
*/
- if (version != matched_fgs_get_version(match_head)) {
+ if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
+ version != matched_fgs_get_version(match_head)) {
take_write = true;
goto search_again_locked;
}
@@ -1785,9 +1788,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
- struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
- struct match_list_head match_head;
+ struct match_list match_head;
+ struct mlx5_flow_group *g;
bool take_write = false;
struct fs_fte *fte;
int version;
@@ -1892,12 +1895,16 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int num_dest)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ static const struct mlx5_flow_spec zero_spec = {};
struct mlx5_flow_destination gen_dest = {};
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
struct fs_prio *prio;
+ if (!spec)
+ spec = &zero_spec;
+
fs_get_obj(prio, ft->node.parent);
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
@@ -2132,6 +2139,8 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_KERNEL_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ root_ns = steering->rdma_tx_root_ns;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@ -2339,6 +2348,17 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
return 0;
}
+static void del_sw_root_ns(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_namespace *ns;
+
+ fs_get_obj(ns, node);
+ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
+ mutex_destroy(&root_ns->chain_lock);
+ kfree(node);
+}
+
static struct mlx5_flow_root_namespace
*create_root_ns(struct mlx5_flow_steering *steering,
enum fs_flow_table_type table_type)
@@ -2365,7 +2385,7 @@ static struct mlx5_flow_root_namespace
ns = &root_ns->ns;
fs_init_namespace(ns);
mutex_init(&root_ns->chain_lock);
- tree_init_node(&ns->node, NULL, NULL);
+ tree_init_node(&ns->node, NULL, del_sw_root_ns);
tree_add_node(&ns->node, NULL);
return root_ns;
@@ -2535,6 +2555,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
+ cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
@@ -2591,6 +2612,29 @@ out_err:
return err;
}
+static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
+ if (!steering->rdma_tx_root_ns)
+ return -ENOMEM;
+
+ err = init_root_tree(steering, &rdma_tx_root_fs,
+ &steering->rdma_tx_root_ns->ns.node);
+ if (err)
+ goto out_err;
+
+ set_prio_attrs(steering->rdma_tx_root_ns);
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->rdma_tx_root_ns);
+ steering->rdma_tx_root_ns = NULL;
+ return err;
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -2700,6 +2744,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err;
}
+ /* We put this priority last, knowing that nothing will get here
+ * unless explicitly forwarded to. This is possible because the
+ * slow path tables have catch all rules and nothing gets passed
+ * those tables.
+ */
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
set_prio_attrs(steering->fdb_root_ns);
return 0;
@@ -2890,6 +2945,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
+ err = init_rdma_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index be5f5e32c1e8..508108c58dae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -86,7 +86,8 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
- FS_FT_MAX_TYPE = FS_FT_RDMA_RX,
+ FS_FT_RDMA_TX = 0X8,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TX,
};
enum fs_flow_table_op_mod {
@@ -116,6 +117,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
};
@@ -316,7 +318,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index ab69effb056d..f43caefd07a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -470,7 +470,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
- struct mlx5_fc fcs[0];
+ struct mlx5_fc fcs[];
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 909a7f284614..90e3d0233101 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -613,6 +613,44 @@ static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
fwhandle, 0);
}
+#define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */
+static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT);
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+ u32 out[MLX5_ST_SZ_DW(mirc_reg)];
+ u32 in[MLX5_ST_SZ_DW(mirc_reg)];
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG2(dev, mirc))
+ return -EOPNOTSUPP;
+
+ memset(in, 0, sizeof(in));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MIRC, 0, 1);
+ if (err)
+ return err;
+
+ do {
+ memset(out, 0, sizeof(out));
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MIRC, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(mirc_reg, out, status_code);
+ if (*status != MLXFW_FSM_REACTIVATE_STATUS_BUSY)
+ return 0;
+
+ msleep(20);
+ } while (time_before(jiffies, exp_time));
+
+ return 0;
+}
+
static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
.component_query = mlx5_component_query,
.fsm_lock = mlx5_fsm_lock,
@@ -620,6 +658,7 @@ static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
.fsm_block_download = mlx5_fsm_block_download,
.fsm_component_verify = mlx5_fsm_component_verify,
.fsm_activate = mlx5_fsm_activate,
+ .fsm_reactivate = mlx5_fsm_reactivate,
.fsm_query_state = mlx5_fsm_query_state,
.fsm_cancel = mlx5_fsm_cancel,
.fsm_release = mlx5_fsm_release
@@ -634,6 +673,7 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev,
.ops = &mlx5_mlxfw_dev_ops,
.psid = dev->board_id,
.psid_size = strlen(dev->board_id),
+ .devlink = priv_to_devlink(dev),
},
.mlx5_core_dev = dev
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d9f4e8c59c1f..f99e1752d4e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -243,7 +243,7 @@ recover_from_sw_reset:
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
break;
- cond_resched();
+ msleep(20);
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
@@ -627,7 +627,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
health->fw_reporter =
devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, false, dev);
+ 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
@@ -636,7 +636,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
devlink_health_reporter_create(devlink,
&mlx5_fw_fatal_reporter_ops,
MLX5_REPORTER_FW_GRACEFUL_PERIOD,
- true, dev);
+ dev);
if (IS_ERR(health->fw_fatal_reporter))
mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 90cb50fe17fd..1eef66ee849e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -235,6 +235,9 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
}
const struct ethtool_ops mlx5i_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5i_get_drvinfo,
.get_strings = mlx5i_get_strings,
.get_sset_count = mlx5i_get_sset_count,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 56078b23f1a0..505cf6eeae25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -396,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@@ -412,7 +412,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
- mlx5e_destroy_indirect_tirs(priv, true);
+ mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
@@ -483,7 +483,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index c87962cab921..de7e01a027bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -56,7 +56,7 @@ struct mlx5i_priv {
u32 qkey;
u16 pkey_index;
struct mlx5i_pkey_qpn_ht *qpn_htbl;
- char *mlx5e_priv[0];
+ char *mlx5e_priv[];
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
@@ -107,7 +107,7 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_datagram_seg datagram;
struct mlx5_wqe_eth_pad pad;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 416676c35b1f..e9089a793632 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -93,9 +93,8 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
{
struct lag_mp *mp = container_of(nb, struct lag_mp, fib_nb);
- struct mlx5_lag *ldev = container_of(mp, struct mlx5_lag, lag_mp);
- flush_workqueue(ldev->wq);
+ flush_workqueue(mp->wq);
}
struct mlx5_fib_event_work {
@@ -293,7 +292,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
- queue_work(ldev->wq, &fib_work->work);
+ queue_work(mp->wq, &fib_work->work);
return NOTIFY_DONE;
}
@@ -306,11 +305,17 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
if (mp->fib_nb.notifier_call)
return 0;
+ mp->wq = create_singlethread_workqueue("mlx5_lag_mp");
+ if (!mp->wq)
+ return -ENOMEM;
+
mp->fib_nb.notifier_call = mlx5_lag_fib_event;
err = register_fib_notifier(&init_net, &mp->fib_nb,
mlx5_lag_fib_event_flush, NULL);
- if (err)
+ if (err) {
+ destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
+ }
return err;
}
@@ -323,5 +328,6 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
return;
unregister_fib_notifier(&init_net, &mp->fib_nb);
+ destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
index 79be89e9c7a4..258ac7b2964e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
@@ -16,6 +16,7 @@ enum mlx5_lag_port_affinity {
struct lag_mp {
struct notifier_block fib_nb;
struct fib_info *mfi; /* used in tracking fib events */
+ struct workqueue_struct *wq;
};
#ifdef CONFIG_MLX5_ESWITCH
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index 3fc575d1c3ec..dcea87ec5977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -42,7 +42,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index e065c2f68f5a..6cbccba56f70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -21,7 +21,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
struct mlx5_dm *dm;
if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
- return 0;
+ return NULL;
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
if (!dm)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f554cfddcf4e..17f818a54090 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -70,6 +70,7 @@
#include "diag/fw_tracer.h"
#include "ecpf.h"
#include "lib/hv_vhca.h"
+#include "diag/rsc_dump.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -880,6 +881,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
+ dev->rsc_dump = mlx5_rsc_dump_create(dev);
return 0;
@@ -909,6 +911,7 @@ err_devcom:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_rsc_dump_destroy(dev);
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
@@ -962,6 +965,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
goto err_cmd_cleanup;
}
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
+
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
@@ -1023,6 +1028,7 @@ reclaim_boot_pages:
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
return err;
@@ -1040,6 +1046,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
}
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
return 0;
@@ -1079,6 +1086,12 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_hv_vhca_init(dev->hv_vhca);
+ err = mlx5_rsc_dump_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init Resource dump\n");
+ goto err_rsc_dump;
+ }
+
err = mlx5_fpga_device_start(dev);
if (err) {
mlx5_core_err(dev, "fpga device start failed %d\n", err);
@@ -1134,6 +1147,8 @@ err_tls_start:
err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
+ mlx5_rsc_dump_cleanup(dev);
+err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
@@ -1155,6 +1170,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
+ mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
@@ -1179,7 +1195,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
err = mlx5_function_setup(dev, boot);
if (err)
- goto out;
+ goto err_function;
if (boot) {
err = mlx5_init_once(dev);
@@ -1199,15 +1215,10 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
goto err_devlink_reg;
}
- if (mlx5_device_registered(dev)) {
+ if (mlx5_device_registered(dev))
mlx5_attach_device(dev);
- } else {
- err = mlx5_register_device(dev);
- if (err) {
- mlx5_core_err(dev, "register device failed %d\n", err);
- goto err_reg_dev;
- }
- }
+ else
+ mlx5_register_device(dev);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
@@ -1215,9 +1226,6 @@ out:
return err;
-err_reg_dev:
- if (boot)
- mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg:
mlx5_unload(dev);
err_load:
@@ -1225,13 +1233,14 @@ err_load:
mlx5_cleanup_once(dev);
function_teardown:
mlx5_function_teardown(dev, boot);
+err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
return err;
}
-int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
if (cleanup) {
mlx5_unregister_device(dev);
@@ -1260,7 +1269,6 @@ int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
mlx5_function_teardown(dev, cleanup);
out:
mutex_unlock(&dev->intf_state_mutex);
- return 0;
}
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
@@ -1282,7 +1290,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mutex_init(&priv->alloc_mutex);
mutex_init(&priv->pgdir_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
- spin_lock_init(&priv->mkey_lock);
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
@@ -1381,12 +1388,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
- if (mlx5_unload_one(dev, true)) {
- mlx5_core_err(dev, "mlx5_unload_one failed\n");
- mlx5_health_flush(dev);
- return;
- }
-
+ mlx5_unload_one(dev, true);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
mlx5_devlink_free(devlink);
@@ -1547,6 +1549,22 @@ static void shutdown(struct pci_dev *pdev)
mlx5_pci_disable_device(dev);
}
+static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ mlx5_unload_one(dev, false);
+
+ return 0;
+}
+
+static int mlx5_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ return mlx5_load_one(dev, false);
+}
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
@@ -1590,6 +1608,8 @@ static struct pci_driver mlx5_core_driver = {
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one,
+ .suspend = mlx5_suspend,
+ .resume = mlx5_resume,
.shutdown = shutdown,
.err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index da67b28d6e23..a8fb43a85d1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -182,7 +182,7 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
void mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev);
bool mlx5_device_registered(struct mlx5_core_dev *dev);
-int mlx5_register_device(struct mlx5_core_dev *dev);
+void mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
@@ -244,6 +244,6 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
-int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 42cc3c7ac5b6..366f2cbfc6db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,54 +36,31 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- struct mlx5_async_ctx *async_ctx, u32 *in,
- int inlen, u32 *out, int outlen,
- mlx5_async_cbk_t callback,
- struct mlx5_async_work *context)
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
u32 mkey_index;
void *mkc;
int err;
- u8 key;
-
- spin_lock_irq(&dev->priv.mkey_lock);
- key = dev->priv.mkey_key++;
- spin_unlock_irq(&dev->priv.mkey_lock);
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
- MLX5_SET(mkc, mkc, mkey_7_0, key);
-
- if (callback)
- return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
- callback, context);
err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
if (err)
return err;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
+ mkey->key |= mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
- mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
- mkey_index, key, mkey->key);
+ mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key);
return 0;
}
-EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
-
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
-{
- return mlx5_core_create_mkey_cb(dev, mkey, NULL, in, inlen,
- NULL, 0, NULL, NULL);
-}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 01c380425f9d..f3b29d9ade1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
+ u16 uid)
+{
+ return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
+ entry->uid == uid);
+}
+
/* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry.
* If the table is full, return NULL
*/
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
- struct mlx5_rate_limit *rl)
+ void *rl_in, u16 uid, bool dedicated)
{
struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false;
int i;
for (i = 0; i < table->max_size; i++) {
- if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl))
- return &table->rl_entry[i];
- if (!empty_found && !table->rl_entry[i].rl.rate) {
+ if (dedicated) {
+ if (!table->rl_entry[i].refcount)
+ return &table->rl_entry[i];
+ continue;
+ }
+
+ if (table->rl_entry[i].refcount) {
+ if (table->rl_entry[i].dedicated)
+ continue;
+ if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
+ uid))
+ return &table->rl_entry[i];
+ } else if (!empty_found) {
empty_found = true;
ret_entry = &table->rl_entry[i];
}
@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
}
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
- u16 index,
- struct mlx5_rate_limit *rl)
+ struct mlx5_rl_entry *entry, bool set)
{
- u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
+ void *pp_context;
+ pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate);
- MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz);
- MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz);
+ MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
+ if (set)
+ memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
- struct mlx5_rate_limit *rl)
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
int err = 0;
+ u32 rate;
+ rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock);
- if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) {
+ if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
- rl->rate, table->min_rate, table->max_rate);
+ rate, table->min_rate, table->max_rate);
err = -EINVAL;
goto out;
}
- entry = find_rl_entry(table, rl);
+ entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* rate already configured */
entry->refcount++;
} else {
+ memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
+ entry->uid = uid;
/* new rate limit */
- err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
+ err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
- mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
- err, rl->rate, rl->max_burst_sz,
- rl->typical_pkt_sz);
+ mlx5_core_err(
+ dev,
+ "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+ err, rate,
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ burst_upper_bound),
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ typical_packet_size));
goto out;
}
- entry->rl = *rl;
+
entry->refcount = 1;
+ entry->dedicated = dedicated_entry;
}
*index = entry->index;
@@ -202,20 +230,61 @@ out:
mutex_unlock(&table->rl_lock);
return err;
}
+EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
+
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry;
+
+ mutex_lock(&table->rl_lock);
+ entry = &table->rl_entry[index - 1];
+ entry->refcount--;
+ if (!entry->refcount)
+ /* need to remove rate */
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+ mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
+ return mlx5_rl_add_rate_raw(dev, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0,
+ false, index);
+}
EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL;
- struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */
if (rl->rate == 0)
return;
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
mutex_lock(&table->rl_lock);
- entry = find_rl_entry(table, rl);
+ entry = find_rl_entry(table, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
}
entry->refcount--;
- if (!entry->refcount) {
+ if (!entry->refcount)
/* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl);
- entry->rl = reset_rl;
- }
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
out:
mutex_unlock(&table->rl_lock);
@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- struct mlx5_rate_limit rl = {0};
int i;
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
- if (table->rl_entry[i].rl.rate)
- mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index,
- &rl);
+ if (table->rl_entry[i].refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
+ false);
kfree(dev->priv.rl_table.rl_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 03f037811f1d..3094d20297a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -77,8 +77,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca;
- mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
- err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
+ err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs);
if (err) {
mlx5_core_warn(dev,
"failed to enable eswitch SRIOV (%d)\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 2d93228ff633..554811de4c9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -672,7 +672,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
dest_action = action;
if (!action->dest_tbl.is_fw_tbl) {
if (action->dest_tbl.tbl->dmn != dmn) {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Destination table belongs to a different domain\n");
goto out_invalid_arg;
}
@@ -703,7 +703,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->dest_tbl.fw_tbl.rx_icm_addr =
output.sw_owner_icm_root_0;
} else {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Failed mlx5_cmd_query_flow_table ret: %d\n",
ret);
return ret;
@@ -772,7 +772,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* Check action duplication */
if (++action_type_set[action_type] > max_actions_type) {
- mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n",
+ mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
action_type, max_actions_type);
goto out_invalid_arg;
}
@@ -781,7 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (dr_action_validate_and_get_next_state(action_domain,
action_type,
&state)) {
- mlx5dr_dbg(dmn, "Invalid action sequence provided\n");
+ mlx5dr_err(dmn, "Invalid action sequence provided\n");
return -EOPNOTSUPP;
}
}
@@ -797,7 +797,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
rx_rule && recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Failed to handle checksum recalculation err %d\n",
ret);
return ret;
@@ -964,6 +964,24 @@ struct mlx5dr_action *mlx5dr_action_create_drop(void)
}
struct mlx5dr_action *
+mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ return NULL;
+
+ action->dest_tbl.is_fw_tbl = true;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+ action->dest_tbl.fw_tbl.id = table_num;
+ action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+ refcount_inc(&dmn->refcount);
+
+ return action;
+}
+
+struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
{
struct mlx5dr_action *action;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index a9da961d4d2f..48b6358b6845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -59,7 +59,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
- mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
+ mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
return ret;
}
@@ -192,7 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
ret = dr_domain_query_vports(dmn);
if (ret) {
- mlx5dr_dbg(dmn, "Failed to query vports caps\n");
+ mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
goto free_vports_caps;
}
@@ -213,7 +213,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
int ret;
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
- mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
+ mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
return -EOPNOTSUPP;
}
@@ -257,7 +257,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
if (!vport_cap) {
- mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
+ mlx5dr_err(dmn, "Failed to get esw manager vport\n");
return -ENOENT;
}
@@ -268,7 +268,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
break;
default:
- mlx5dr_dbg(dmn, "Invalid domain\n");
+ mlx5dr_err(dmn, "Invalid domain\n");
ret = -EINVAL;
break;
}
@@ -300,7 +300,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mutex_init(&dmn->mutex);
if (dr_domain_caps_init(mdev, dmn)) {
- mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
+ mlx5dr_err(dmn, "Failed init domain, no caps\n");
goto free_domain;
}
@@ -348,8 +348,11 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
mutex_lock(&dmn->mutex);
ret = mlx5dr_send_ring_force_drain(dmn);
mutex_unlock(&dmn->mutex);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
+ flags, ret);
return ret;
+ }
}
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index d7c7467e2d53..30d2d7376f56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -468,7 +468,7 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
if (err) {
dr_icm_chill_buckets_abort(pool, bucket, buckets);
- mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
+ mlx5dr_err(pool->dmn, "Sync_steering failed\n");
chunk = NULL;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index c6dbd856df94..a95938874798 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -388,14 +388,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
if (idx == 0) {
- mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n");
+ mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
return -EINVAL;
}
/* Check that all mask fields were consumed */
for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
if (((u8 *)&mask)[i] != 0) {
- mlx5dr_info(dmn, "Mask contains unsupported parameters\n");
+ mlx5dr_err(dmn, "Mask contains unsupported parameters\n");
return -EOPNOTSUPP;
}
}
@@ -563,7 +563,7 @@ static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
if (!nic_matcher->ste_builder) {
- mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
+ mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL;
}
@@ -634,13 +634,13 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
int ret;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
- mlx5dr_info(dmn, "Invalid match criteria attribute\n");
+ mlx5dr_err(dmn, "Invalid match criteria attribute\n");
return -EINVAL;
}
if (mask) {
if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
- mlx5dr_info(dmn, "Invalid match size attribute\n");
+ mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL;
}
mlx5dr_ste_copy_param(matcher->match_criteria,
@@ -671,7 +671,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *tbl,
- u16 priority,
+ u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index e4cff7abb348..cce3ee7a6614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -826,8 +826,8 @@ again:
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_htbl_put(cur_htbl);
- mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
- cur_htbl->chunk_size);
+ mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
+ cur_htbl->chunk_size);
} else {
cur_htbl = new_htbl;
}
@@ -877,7 +877,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
if (!value_size ||
(value_size > sizeof(struct mlx5dr_match_param) ||
(value_size % sizeof(u32)))) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false;
}
@@ -888,7 +888,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->outer), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
return false;
}
}
@@ -898,7 +898,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
return false;
}
}
@@ -908,7 +908,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->inner), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
return false;
}
}
@@ -918,7 +918,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc2), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
return false;
}
}
@@ -928,7 +928,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc3), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
return false;
}
}
@@ -1221,7 +1221,7 @@ remove_action_members:
dr_rule_remove_action_members(rule);
free_rule:
kfree(rule);
- mlx5dr_info(dmn, "Failed creating rule\n");
+ mlx5dr_err(dmn, "Failed creating rule\n");
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 095ec7b1399d..18719acb7e54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -136,7 +136,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
&dr_qp->wq_ctrl);
if (err) {
- mlx5_core_info(mdev, "Can't create QP WQ\n");
+ mlx5_core_warn(mdev, "Can't create QP WQ\n");
goto err_wq;
}
@@ -652,8 +652,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
/* Init */
ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP rst2init\n");
return ret;
+ }
/* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
@@ -668,8 +670,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
return ret;
+ }
/* RTS */
rts_attr.timeout = 14;
@@ -677,8 +681,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rts_attr.rnr_retry = 7;
ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
return ret;
+ }
return 0;
}
@@ -689,6 +695,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
}
+static void dr_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@@ -750,6 +762,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
cq->mcq.event = dr_cq_event;
+ cq->mcq.comp = dr_cq_complete;
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
@@ -761,7 +774,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
*cq->mcq.set_ci_db = 0;
- *cq->mcq.arm_db = 0;
+
+ /* set no-zero value, in order to avoid the HW to run db-recovery on
+ * CQ that used in polling mode.
+ */
+ *cq->mcq.arm_db = cpu_to_be32(2 << 28);
+
cq->mcq.vector = 0;
cq->mcq.irqn = irqn;
cq->mcq.uar = uar;
@@ -862,6 +880,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
cq_size = QUEUE_SIZE + 1;
dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
if (!dmn->send_ring->cq) {
+ mlx5dr_err(dmn, "Failed creating CQ\n");
ret = -ENOMEM;
goto free_send_ring;
}
@@ -873,6 +892,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
if (!dmn->send_ring->qp) {
+ mlx5dr_err(dmn, "Failed creating QP\n");
ret = -ENOMEM;
goto clean_cq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index aade62a9ee5c..c0e3a1e7389d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -728,7 +728,7 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
- mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
+ mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 14ce2d7dbb66..c2fe48d7b75a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -128,16 +128,20 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
DR_CHUNK_SIZE_1,
MLX5DR_STE_LU_TYPE_DONT_CARE,
0);
- if (!nic_tbl->s_anchor)
+ if (!nic_tbl->s_anchor) {
+ mlx5dr_err(dmn, "Failed allocating htbl\n");
return -ENOMEM;
+ }
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_dmn->default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
nic_tbl->s_anchor,
&info, true);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed int and send htbl\n");
goto free_s_anchor;
+ }
mlx5dr_htbl_get(nic_tbl->s_anchor);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index dffe35145d19..3fa739951b34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -705,7 +705,7 @@ struct mlx5dr_matcher {
struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx;
struct list_head matcher_list;
- u16 prio;
+ u32 prio;
struct mlx5dr_match_param mask;
u8 match_criteria;
refcount_t refcount;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index c2027192e21e..3b3f5b9d4f95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -140,7 +140,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_group *fg)
{
struct mlx5dr_matcher *matcher;
- u16 priority = MLX5_GET(create_flow_group_in, in,
+ u32 priority = MLX5_GET(create_flow_group_in, in,
start_flow_index);
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
in,
@@ -384,6 +384,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
+ u32 ft_id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -420,6 +421,17 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
num_term_actions++;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ ft_id = dst->dest_attr.ft_num;
+ tmp_action = mlx5dr_action_create_dest_table_num(domain,
+ ft_id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
+ break;
default:
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index e1edc9c247b7..7deaca9ade3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -38,8 +38,6 @@ struct mlx5dr_action_dest {
struct mlx5dr_action *reformat;
};
-#ifdef CONFIG_MLX5_SW_STEERING
-
struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
@@ -59,7 +57,7 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table,
- u16 priority,
+ u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask);
@@ -77,6 +75,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action);
struct mlx5dr_action *
+mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num);
+
+struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
struct mlx5dr_action *
@@ -125,103 +126,4 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
}
-#else /* CONFIG_MLX5_SW_STEERING */
-
-static inline struct mlx5dr_domain *
-mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; }
-
-static inline int
-mlx5dr_domain_destroy(struct mlx5dr_domain *domain) { return 0; }
-
-static inline int
-mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags) { return 0; }
-
-static inline void
-mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
- struct mlx5dr_domain *peer_dmn) { }
-
-static inline struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; }
-
-static inline int
-mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
-
-static inline u32
-mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
-
-static inline struct mlx5dr_matcher *
-mlx5dr_matcher_create(struct mlx5dr_table *table,
- u16 priority,
- u8 match_criteria_enable,
- struct mlx5dr_match_parameters *mask) { return NULL; }
-
-static inline int
-mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { return 0; }
-
-static inline struct mlx5dr_rule *
-mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
- struct mlx5dr_match_parameters *value,
- size_t num_actions,
- struct mlx5dr_action *actions[]) { return NULL; }
-
-static inline int
-mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { return 0; }
-
-static inline int
-mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
- struct mlx5dr_action *action) { return 0; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
- struct mlx5_flow_table *ft) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
- u32 vport, u8 vhca_id_valid,
- u16 vhca_id) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
- struct mlx5dr_action_dest *dests,
- u32 num_of_dests) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_drop(void) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_tag(u32 tag_value) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_flow_counter(u32 counter_id) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
- enum mlx5dr_action_reformat_type reformat_type,
- size_t data_sz,
- void *data) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
- u32 flags,
- size_t actions_sz,
- __be64 actions[]) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_pop_vlan(void) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain,
- __be32 vlan_hdr) { return NULL; }
-
-static inline int
-mlx5dr_action_destroy(struct mlx5dr_action *action) { return 0; }
-
-static inline bool
-mlx5dr_is_supported(struct mlx5_core_dev *dev) { return false; }
-
-#endif /* CONFIG_MLX5_SW_STEERING */
-
#endif /* _MLX5DR_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Kconfig b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
index 0367f835a846..5b604501f33e 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
@@ -12,3 +12,4 @@ config MLXFW
To compile this driver as a module, choose M here: the
module will be called mlxfw.
select XZ_DEC
+ select NET_DEVLINK
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index c50e74ab02c4..7654841a05c2 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -6,6 +6,30 @@
#include <linux/firmware.h>
#include <linux/netlink.h>
+#include <linux/device.h>
+#include <net/devlink.h>
+
+struct mlxfw_dev {
+ const struct mlxfw_dev_ops *ops;
+ const char *psid;
+ u16 psid_size;
+ struct devlink *devlink;
+};
+
+static inline
+struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev)
+{
+ return mlxfw_dev->devlink->dev;
+}
+
+#define MLXFW_PRFX "mlxfw: "
+
+#define mlxfw_info(mlxfw_dev, fmt, ...) \
+ dev_info(mlxfw_dev_dev(mlxfw_dev), MLXFW_PRFX fmt, ## __VA_ARGS__)
+#define mlxfw_err(mlxfw_dev, fmt, ...) \
+ dev_err(mlxfw_dev_dev(mlxfw_dev), MLXFW_PRFX fmt, ## __VA_ARGS__)
+#define mlxfw_dbg(mlxfw_dev, fmt, ...) \
+ dev_dbg(mlxfw_dev_dev(mlxfw_dev), MLXFW_PRFX fmt, ## __VA_ARGS__)
enum mlxfw_fsm_state {
MLXFW_FSM_STATE_IDLE,
@@ -31,7 +55,19 @@ enum mlxfw_fsm_state_err {
MLXFW_FSM_STATE_ERR_MAX,
};
-struct mlxfw_dev;
+enum mlxfw_fsm_reactivate_status {
+ MLXFW_FSM_REACTIVATE_STATUS_OK,
+ MLXFW_FSM_REACTIVATE_STATUS_BUSY,
+ MLXFW_FSM_REACTIVATE_STATUS_PROHIBITED_FW_VER_ERR,
+ MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_COPY_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_ERASE_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_RESTORE_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_CANDIDATE_FW_DEACTIVATION_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED,
+ MLXFW_FSM_REACTIVATE_STATUS_ERR_DEVICE_RESET_REQUIRED,
+ MLXFW_FSM_REACTIVATE_STATUS_ERR_FW_PROGRAMMING_NEEDED,
+ MLXFW_FSM_REACTIVATE_STATUS_MAX,
+};
struct mlxfw_dev_ops {
int (*component_query)(struct mlxfw_dev *mlxfw_dev, u16 component_index,
@@ -51,6 +87,8 @@ struct mlxfw_dev_ops {
int (*fsm_activate)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+ int (*fsm_reactivate)(struct mlxfw_dev *mlxfw_dev, u8 *status);
+
int (*fsm_query_state)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
enum mlxfw_fsm_state *fsm_state,
enum mlxfw_fsm_state_err *fsm_state_err);
@@ -58,16 +96,6 @@ struct mlxfw_dev_ops {
void (*fsm_cancel)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
void (*fsm_release)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
-
- void (*status_notify)(struct mlxfw_dev *mlxfw_dev,
- const char *msg, const char *comp_name,
- u32 done_bytes, u32 total_bytes);
-};
-
-struct mlxfw_dev {
- const struct mlxfw_dev_ops *ops;
- const char *psid;
- u16 psid_size;
};
#if IS_REACHABLE(CONFIG_MLXFW)
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 29e95d0a6ad1..046a0cb82ed8 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -16,38 +16,70 @@
(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
-static const char * const mlxfw_fsm_state_err_str[] = {
- [MLXFW_FSM_STATE_ERR_ERROR] =
- "general error",
- [MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR] =
- "component hash mismatch",
- [MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE] =
- "component not applicable",
- [MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY] =
- "unknown key",
- [MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED] =
- "authentication failed",
- [MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED] =
- "component was not signed",
- [MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE] =
- "key not applicable",
- [MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT] =
- "bad format",
- [MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET] =
- "pending reset",
- [MLXFW_FSM_STATE_ERR_MAX] =
- "unknown error"
+static const int mlxfw_fsm_state_errno[] = {
+ [MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
+ [MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR] = -EBADMSG,
+ [MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE] = -ENOENT,
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY] = -ENOKEY,
+ [MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED] = -EACCES,
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED] = -EKEYREVOKED,
+ [MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE] = -EKEYREJECTED,
+ [MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT] = -ENOEXEC,
+ [MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET] = -EBUSY,
+ [MLXFW_FSM_STATE_ERR_MAX] = -EINVAL
};
-static void mlxfw_status_notify(struct mlxfw_dev *mlxfw_dev,
- const char *msg, const char *comp_name,
- u32 done_bytes, u32 total_bytes)
+#define MLXFW_ERR_PRFX "Firmware flash failed: "
+#define MLXFW_ERR_MSG(fwdev, extack, msg, err) do { \
+ mlxfw_err(fwdev, "%s, err (%d)\n", MLXFW_ERR_PRFX msg, err); \
+ NL_SET_ERR_MSG_MOD(extack, MLXFW_ERR_PRFX msg); \
+} while (0)
+
+static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev,
+ struct netlink_ext_ack *extack,
+ enum mlxfw_fsm_state_err err)
{
- if (!mlxfw_dev->ops->status_notify)
- return;
- mlxfw_dev->ops->status_notify(mlxfw_dev, msg, comp_name,
- done_bytes, total_bytes);
-}
+ enum mlxfw_fsm_state_err fsm_state_err;
+
+ fsm_state_err = min_t(enum mlxfw_fsm_state_err, err,
+ MLXFW_FSM_STATE_ERR_MAX);
+
+ switch (fsm_state_err) {
+ case MLXFW_FSM_STATE_ERR_ERROR:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "general error", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "component hash mismatch", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "component not applicable", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown key", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "authentication failed", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "component was not signed", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "key not applicable", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "bad format", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "pending reset", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_OK: /* fall through */
+ case MLXFW_FSM_STATE_ERR_MAX:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err);
+ break;
+ };
+
+ return mlxfw_fsm_state_errno[fsm_state_err];
+};
static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
enum mlxfw_fsm_state fsm_state,
@@ -62,21 +94,18 @@ static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
retry:
err = mlxfw_dev->ops->fsm_query_state(mlxfw_dev, fwhandle,
&curr_fsm_state, &fsm_state_err);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "FSM state query failed", err);
return err;
-
- if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
- fsm_state_err = min_t(enum mlxfw_fsm_state_err,
- fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
- pr_err("Firmware flash failed: %s\n",
- mlxfw_fsm_state_err_str[fsm_state_err]);
- NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
- return -EINVAL;
}
+
+ if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK)
+ return mlxfw_fsm_state_err(mlxfw_dev, extack, fsm_state_err);
+
if (curr_fsm_state != fsm_state) {
if (--times == 0) {
- pr_err("Timeout reached on FSM state change");
- NL_SET_ERR_MSG_MOD(extack, "Timeout reached on FSM state change");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Timeout reached on FSM state change", -ETIMEDOUT);
return -ETIMEDOUT;
}
msleep(MLXFW_FSM_STATE_WAIT_CYCLE_MS);
@@ -85,6 +114,92 @@ retry:
return 0;
}
+static int
+mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev,
+ struct netlink_ext_ack *extack, u8 err)
+{
+ enum mlxfw_fsm_reactivate_status status;
+
+#define MXFW_REACT_PRFX "Reactivate FSM: "
+#define MLXFW_REACT_ERR(msg, err) \
+ MLXFW_ERR_MSG(mlxfw_dev, extack, MXFW_REACT_PRFX msg, err)
+
+ status = min_t(enum mlxfw_fsm_reactivate_status, err,
+ MLXFW_FSM_REACTIVATE_STATUS_MAX);
+
+ switch (status) {
+ case MLXFW_FSM_REACTIVATE_STATUS_BUSY:
+ MLXFW_REACT_ERR("busy", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_PROHIBITED_FW_VER_ERR:
+ MLXFW_REACT_ERR("prohibited fw ver", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_COPY_FAILED:
+ MLXFW_REACT_ERR("first page copy failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_ERASE_FAILED:
+ MLXFW_REACT_ERR("first page erase failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_RESTORE_FAILED:
+ MLXFW_REACT_ERR("first page restore failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_CANDIDATE_FW_DEACTIVATION_FAILED:
+ MLXFW_REACT_ERR("candidate fw deactivation failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_ERR_DEVICE_RESET_REQUIRED:
+ MLXFW_REACT_ERR("device reset required", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_ERR_FW_PROGRAMMING_NEEDED:
+ MLXFW_REACT_ERR("fw programming needed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED:
+ MLXFW_REACT_ERR("fw already activated", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_OK: /* fall through */
+ case MLXFW_FSM_REACTIVATE_STATUS_MAX:
+ MLXFW_REACT_ERR("unexpected error", err);
+ break;
+ };
+ return -EREMOTEIO;
+};
+
+static int mlxfw_fsm_reactivate(struct mlxfw_dev *mlxfw_dev,
+ struct netlink_ext_ack *extack,
+ bool *supported)
+{
+ u8 status;
+ int err;
+
+ if (!mlxfw_dev->ops->fsm_reactivate)
+ return 0;
+
+ err = mlxfw_dev->ops->fsm_reactivate(mlxfw_dev, &status);
+ if (err == -EOPNOTSUPP) {
+ *supported = false;
+ return 0;
+ }
+
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not reactivate firmware flash", err);
+ return err;
+ }
+
+ if (status == MLXFW_FSM_REACTIVATE_STATUS_OK ||
+ status == MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED)
+ return 0;
+
+ return mlxfw_fsm_reactivate_err(mlxfw_dev, extack, status);
+}
+
+static void mlxfw_status_notify(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes)
+{
+ devlink_flash_update_status_notify(mlxfw_dev->devlink, msg, comp_name,
+ done_bytes, total_bytes);
+}
+
#define MLXFW_ALIGN_DOWN(x, align_bits) ((x) & ~((1 << (align_bits)) - 1))
#define MLXFW_ALIGN_UP(x, align_bits) \
MLXFW_ALIGN_DOWN((x) + ((1 << (align_bits)) - 1), (align_bits))
@@ -92,6 +207,7 @@ retry:
static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
u32 fwhandle,
struct mlxfw_mfa2_component *comp,
+ bool reactivate_supp,
struct netlink_ext_ack *extack)
{
u16 comp_max_write_size;
@@ -108,34 +224,43 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
err = mlxfw_dev->ops->component_query(mlxfw_dev, comp->index,
&comp_max_size, &comp_align_bits,
&comp_max_write_size);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "FSM component query failed", err);
return err;
+ }
comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
if (comp->data_size > comp_max_size) {
- pr_err("Component %d is of size %d which is bigger than limit %d\n",
- comp->index, comp->data_size, comp_max_size);
- NL_SET_ERR_MSG_MOD(extack, "Component is bigger than limit");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Component size is bigger than limit", -EINVAL);
return -EINVAL;
}
comp_max_write_size = MLXFW_ALIGN_DOWN(comp_max_write_size,
comp_align_bits);
- pr_debug("Component update\n");
+ mlxfw_dbg(mlxfw_dev, "Component update\n");
mlxfw_status_notify(mlxfw_dev, "Updating component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_update(mlxfw_dev, fwhandle,
comp->index,
comp->data_size);
- if (err)
+ if (err) {
+ if (!reactivate_supp)
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "FSM component update failed, FW reactivate is not supported",
+ err);
+ else
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "FSM component update failed", err);
return err;
+ }
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
MLXFW_FSM_STATE_DOWNLOAD, extack);
if (err)
goto err_out;
- pr_debug("Component download\n");
+ mlxfw_dbg(mlxfw_dev, "Component download\n");
mlxfw_status_notify(mlxfw_dev, "Downloading component",
comp_name, 0, comp->data_size);
for (offset = 0;
@@ -147,19 +272,25 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
err = mlxfw_dev->ops->fsm_block_download(mlxfw_dev, fwhandle,
block_ptr, block_size,
offset);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Component download failed", err);
goto err_out;
+ }
mlxfw_status_notify(mlxfw_dev, "Downloading component",
comp_name, offset + block_size,
comp->data_size);
}
- pr_debug("Component verify\n");
+ mlxfw_dbg(mlxfw_dev, "Component verify\n");
mlxfw_status_notify(mlxfw_dev, "Verifying component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_verify(mlxfw_dev, fwhandle,
comp->index);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "FSM component verify failed", err);
goto err_out;
+ }
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
MLXFW_FSM_STATE_LOCKED, extack);
@@ -174,6 +305,7 @@ err_out:
static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
struct mlxfw_mfa2_file *mfa2_file,
+ bool reactivate_supp,
struct netlink_ext_ack *extack)
{
u32 component_count;
@@ -184,8 +316,8 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
mlxfw_dev->psid_size,
&component_count);
if (err) {
- pr_err("Could not find device PSID in MFA2 file\n");
- NL_SET_ERR_MSG_MOD(extack, "Could not find device PSID in MFA2 file");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not find device PSID in MFA2 file", err);
return err;
}
@@ -194,11 +326,17 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
comp = mlxfw_mfa2_file_component_get(mfa2_file, mlxfw_dev->psid,
mlxfw_dev->psid_size, i);
- if (IS_ERR(comp))
- return PTR_ERR(comp);
+ if (IS_ERR(comp)) {
+ err = PTR_ERR(comp);
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Failed to get MFA2 component", err);
+ return err;
+ }
- pr_info("Flashing component type %d\n", comp->index);
- err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp, extack);
+ mlxfw_info(mlxfw_dev, "Flashing component type %d\n",
+ comp->index);
+ err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp,
+ reactivate_supp, extack);
mlxfw_mfa2_file_component_put(comp);
if (err)
return err;
@@ -211,26 +349,32 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
struct netlink_ext_ack *extack)
{
struct mlxfw_mfa2_file *mfa2_file;
+ bool reactivate_supp = true;
u32 fwhandle;
int err;
if (!mlxfw_mfa2_check(firmware)) {
- pr_err("Firmware file is not MFA2\n");
- NL_SET_ERR_MSG_MOD(extack, "Firmware file is not MFA2");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Firmware file is not MFA2", -EINVAL);
return -EINVAL;
}
mfa2_file = mlxfw_mfa2_file_init(firmware);
- if (IS_ERR(mfa2_file))
- return PTR_ERR(mfa2_file);
+ if (IS_ERR(mfa2_file)) {
+ err = PTR_ERR(mfa2_file);
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Failed to initialize MFA2 firmware file", err);
+ return err;
+ }
- pr_info("Initialize firmware flash process\n");
+ mlxfw_info(mlxfw_dev, "Initialize firmware flash process\n");
+ devlink_flash_update_begin_notify(mlxfw_dev->devlink);
mlxfw_status_notify(mlxfw_dev, "Initializing firmware flash process",
NULL, 0, 0);
err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
if (err) {
- pr_err("Could not lock the firmware FSM\n");
- NL_SET_ERR_MSG_MOD(extack, "Could not lock the firmware FSM");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not lock the firmware FSM", err);
goto err_fsm_lock;
}
@@ -239,16 +383,26 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
if (err)
goto err_state_wait_idle_to_locked;
- err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file, extack);
+ err = mlxfw_fsm_reactivate(mlxfw_dev, extack, &reactivate_supp);
+ if (err)
+ goto err_fsm_reactivate;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED, extack);
+ if (err)
+ goto err_state_wait_reactivate_to_locked;
+
+ err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file,
+ reactivate_supp, extack);
if (err)
goto err_flash_components;
- pr_debug("Activate image\n");
+ mlxfw_dbg(mlxfw_dev, "Activate image\n");
mlxfw_status_notify(mlxfw_dev, "Activating image", NULL, 0, 0);
err = mlxfw_dev->ops->fsm_activate(mlxfw_dev, fwhandle);
if (err) {
- pr_err("Could not activate the downloaded image\n");
- NL_SET_ERR_MSG_MOD(extack, "Could not activate the downloaded image");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not activate the downloaded image", err);
goto err_fsm_activate;
}
@@ -257,21 +411,25 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
if (err)
goto err_state_wait_activate_to_locked;
- pr_debug("Handle release\n");
+ mlxfw_dbg(mlxfw_dev, "Handle release\n");
mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
- pr_info("Firmware flash done.\n");
+ mlxfw_info(mlxfw_dev, "Firmware flash done\n");
mlxfw_status_notify(mlxfw_dev, "Firmware flash done", NULL, 0, 0);
mlxfw_mfa2_file_fini(mfa2_file);
+ devlink_flash_update_end_notify(mlxfw_dev->devlink);
return 0;
err_state_wait_activate_to_locked:
err_fsm_activate:
err_flash_components:
+err_state_wait_reactivate_to_locked:
+err_fsm_reactivate:
err_state_wait_idle_to_locked:
mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
err_fsm_lock:
mlxfw_mfa2_file_fini(mfa2_file);
+ devlink_flash_update_end_notify(mlxfw_dev->devlink);
return err;
}
EXPORT_SYMBOL(mlxfw_firmware_flash);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 79057af4fe99..5d9ddf36fb4e 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -496,7 +496,7 @@ mlxfw_mfa2_file_component_tlv_get(const struct mlxfw_mfa2_file *mfa2_file,
struct mlxfw_mfa2_comp_data {
struct mlxfw_mfa2_component comp;
- u8 buff[0];
+ u8 buff[];
};
static const struct mlxfw_mfa2_tlv_component_descriptor *
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
index 33c971190bba..2014a5de5a01 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -11,7 +11,7 @@ struct mlxfw_mfa2_tlv {
u8 version;
u8 type;
__be16 len;
- u8 data[0];
+ u8 data[];
} __packed;
static inline const struct mlxfw_mfa2_tlv *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e9f791c43f20..e9ccd333f61d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -82,7 +82,7 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool fw_flash_in_progress;
- unsigned long driver_priv[0];
+ unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -142,6 +142,7 @@ struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
void *priv;
+ bool enabled;
};
struct mlxsw_event_listener_item {
@@ -1197,6 +1198,72 @@ mlxsw_devlink_trap_group_init(struct devlink *devlink,
return mlxsw_driver->trap_group_init(mlxsw_core, group);
}
+static int
+mlxsw_devlink_trap_group_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_group_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_group_set(mlxsw_core, group, policer);
+}
+
+static int
+mlxsw_devlink_trap_policer_init(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
+}
+
+static void
+mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_fini)
+ return;
+ mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
+}
+
+static int
+mlxsw_devlink_trap_policer_set(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
+ extack);
+}
+
+static int
+mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_counter_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
+ p_drops);
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
@@ -1219,6 +1286,11 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.trap_fini = mlxsw_devlink_trap_fini,
.trap_action_set = mlxsw_devlink_trap_action_set,
.trap_group_init = mlxsw_devlink_trap_group_init,
+ .trap_group_set = mlxsw_devlink_trap_group_set,
+ .trap_policer_init = mlxsw_devlink_trap_policer_init,
+ .trap_policer_fini = mlxsw_devlink_trap_policer_fini,
+ .trap_policer_set = mlxsw_devlink_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
};
static int
@@ -1457,14 +1529,12 @@ static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
static struct mlxsw_rx_listener_item *
__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_rx_listener *rxl,
- void *priv)
+ const struct mlxsw_rx_listener *rxl)
{
struct mlxsw_rx_listener_item *rxl_item;
list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
- if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
- rxl_item->priv == priv)
+ if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
return rxl_item;
}
return NULL;
@@ -1472,11 +1542,11 @@ __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
- void *priv)
+ void *priv, bool enabled)
{
struct mlxsw_rx_listener_item *rxl_item;
- rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
if (rxl_item)
return -EEXIST;
rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
@@ -1484,6 +1554,7 @@ int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
return -ENOMEM;
rxl_item->rxl = *rxl;
rxl_item->priv = priv;
+ rxl_item->enabled = enabled;
list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
return 0;
@@ -1491,12 +1562,11 @@ int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_rx_listener *rxl,
- void *priv)
+ const struct mlxsw_rx_listener *rxl)
{
struct mlxsw_rx_listener_item *rxl_item;
- rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
if (!rxl_item)
return;
list_del_rcu(&rxl_item->list);
@@ -1505,6 +1575,19 @@ void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+static void
+mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ bool enabled)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
+ if (WARN_ON(!rxl_item))
+ return;
+ rxl_item->enabled = enabled;
+}
+
static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
@@ -1534,14 +1617,12 @@ static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
static struct mlxsw_event_listener_item *
__find_event_listener_item(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_event_listener *el,
- void *priv)
+ const struct mlxsw_event_listener *el)
{
struct mlxsw_event_listener_item *el_item;
list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
- if (__is_event_listener_equal(&el_item->el, el) &&
- el_item->priv == priv)
+ if (__is_event_listener_equal(&el_item->el, el))
return el_item;
}
return NULL;
@@ -1559,7 +1640,7 @@ int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
.trap_id = el->trap_id,
};
- el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ el_item = __find_event_listener_item(mlxsw_core, el);
if (el_item)
return -EEXIST;
el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
@@ -1568,7 +1649,7 @@ int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
el_item->el = *el;
el_item->priv = priv;
- err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+ err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
if (err)
goto err_rx_listener_register;
@@ -1586,8 +1667,7 @@ err_rx_listener_register:
EXPORT_SYMBOL(mlxsw_core_event_listener_register);
void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_event_listener *el,
- void *priv)
+ const struct mlxsw_event_listener *el)
{
struct mlxsw_event_listener_item *el_item;
const struct mlxsw_rx_listener rxl = {
@@ -1596,10 +1676,10 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
.trap_id = el->trap_id,
};
- el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ el_item = __find_event_listener_item(mlxsw_core, el);
if (!el_item)
return;
- mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+ mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
list_del(&el_item->list);
kfree(el_item);
}
@@ -1607,16 +1687,18 @@ EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
- void *priv)
+ void *priv, bool enabled)
{
- if (listener->is_event)
+ if (listener->is_event) {
+ WARN_ON(!enabled);
return mlxsw_core_event_listener_register(mlxsw_core,
- &listener->u.event_listener,
+ &listener->event_listener,
priv);
- else
+ } else {
return mlxsw_core_rx_listener_register(mlxsw_core,
- &listener->u.rx_listener,
- priv);
+ &listener->rx_listener,
+ priv, enabled);
+ }
}
static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
@@ -1625,26 +1707,31 @@ static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
{
if (listener->is_event)
mlxsw_core_event_listener_unregister(mlxsw_core,
- &listener->u.event_listener,
- priv);
+ &listener->event_listener);
else
mlxsw_core_rx_listener_unregister(mlxsw_core,
- &listener->u.rx_listener,
- priv);
+ &listener->rx_listener);
}
int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener, void *priv)
{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+ enum mlxsw_reg_hpkt_action action;
char hpkt_pl[MLXSW_REG_HPKT_LEN];
int err;
- err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
+ err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
+ listener->enabled_on_register);
if (err)
return err;
- mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
- listener->trap_group, listener->is_ctrl);
+ action = listener->enabled_on_register ? listener->en_action :
+ listener->dis_action;
+ trap_group = listener->enabled_on_register ? listener->en_trap_group :
+ listener->dis_trap_group;
+ mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
+ trap_group, listener->is_ctrl);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
goto err_trap_set;
@@ -1664,8 +1751,8 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
char hpkt_pl[MLXSW_REG_HPKT_LEN];
if (!listener->is_event) {
- mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
- listener->trap_id, listener->trap_group,
+ mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
+ listener->trap_id, listener->dis_trap_group,
listener->is_ctrl);
mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
}
@@ -1674,17 +1761,33 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
-int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_listener *listener,
- enum mlxsw_reg_hpkt_action action)
+int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ bool enabled)
{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+ enum mlxsw_reg_hpkt_action action;
char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ /* Not supported for event listener */
+ if (WARN_ON(listener->is_event))
+ return -EINVAL;
+ action = enabled ? listener->en_action : listener->dis_action;
+ trap_group = enabled ? listener->en_trap_group :
+ listener->dis_trap_group;
mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
- listener->trap_group, listener->is_ctrl);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ trap_group, listener->is_ctrl);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ return err;
+
+ mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
+ enabled);
+ return 0;
}
-EXPORT_SYMBOL(mlxsw_core_trap_action_set);
+EXPORT_SYMBOL(mlxsw_core_trap_state_set);
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
{
@@ -1942,7 +2045,8 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
rxl->local_port == local_port) &&
rxl->trap_id == rx_info->trap_id) {
- found = true;
+ if (rxl_item->enabled)
+ found = true;
break;
}
}
@@ -2168,13 +2272,22 @@ int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
/* Here we need to get the module width according to the module type. */
switch (module_type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
+ return 8;
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: /* fall through */
case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP:
+ case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
return 4;
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
return 2;
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
return 1;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 543476a2e503..22b0dfa7cfae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -62,7 +62,6 @@ struct mlxsw_rx_listener {
void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
u8 local_port;
u16 trap_id;
- enum mlxsw_reg_hpkt_action action;
};
struct mlxsw_event_listener {
@@ -76,58 +75,71 @@ struct mlxsw_listener {
union {
struct mlxsw_rx_listener rx_listener;
struct mlxsw_event_listener event_listener;
- } u;
- enum mlxsw_reg_hpkt_action action;
- enum mlxsw_reg_hpkt_action unreg_action;
- u8 trap_group;
- bool is_ctrl; /* should go via control buffer or not */
- bool is_event;
+ };
+ enum mlxsw_reg_hpkt_action en_action; /* Action when enabled */
+ enum mlxsw_reg_hpkt_action dis_action; /* Action when disabled */
+ u8 en_trap_group; /* Trap group when enabled */
+ u8 dis_trap_group; /* Trap group when disabled */
+ u8 is_ctrl:1, /* should go via control buffer or not */
+ is_event:1,
+ enabled_on_register:1; /* Trap should be enabled when listener
+ * is registered.
+ */
};
-#define MLXSW_RXL(_func, _trap_id, _action, _is_ctrl, _trap_group, \
- _unreg_action) \
- { \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- .u.rx_listener = \
- { \
- .func = _func, \
- .local_port = MLXSW_PORT_DONT_CARE, \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- }, \
- .action = MLXSW_REG_HPKT_ACTION_##_action, \
- .unreg_action = MLXSW_REG_HPKT_ACTION_##_unreg_action, \
- .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
- .is_ctrl = _is_ctrl, \
- .is_event = false, \
+#define __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, _enabled_on_register, _dis_trap_group) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .rx_listener = \
+ { \
+ .func = _func, \
+ .local_port = MLXSW_PORT_DONT_CARE, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .en_action = MLXSW_REG_HPKT_ACTION_##_en_action, \
+ .dis_action = MLXSW_REG_HPKT_ACTION_##_dis_action, \
+ .en_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_en_trap_group, \
+ .dis_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_dis_trap_group, \
+ .is_ctrl = _is_ctrl, \
+ .enabled_on_register = _enabled_on_register, \
}
-#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
- { \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- .u.event_listener = \
- { \
- .func = _func, \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- }, \
- .action = MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, \
- .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
- .is_ctrl = false, \
- .is_event = true, \
+#define MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
+ _dis_action) \
+ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
+ _dis_action, true, _trap_group)
+
+#define MLXSW_RXL_DIS(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, _dis_trap_group) \
+ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, false, _dis_trap_group)
+
+#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .event_listener = \
+ { \
+ .func = _func, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .en_action = MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, \
+ .en_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
+ .is_event = true, \
+ .enabled_on_register = true, \
}
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
- void *priv);
+ void *priv, bool enabled);
void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_rx_listener *rxl,
- void *priv);
+ const struct mlxsw_rx_listener *rxl);
int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_event_listener *el,
void *priv);
void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_event_listener *el,
- void *priv);
+ const struct mlxsw_event_listener *el);
int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
@@ -135,9 +147,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
-int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_listener *listener,
- enum mlxsw_reg_hpkt_action action);
+int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ bool enabled);
typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
size_t payload_len, unsigned long cb_priv);
@@ -315,6 +327,20 @@ struct mlxsw_driver {
enum devlink_trap_action action);
int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
+ int (*trap_group_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer);
+ int (*trap_policer_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+ void (*trap_policer_fini)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+ int (*trap_policer_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack);
+ int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
@@ -461,7 +487,10 @@ enum mlxsw_devlink_param_id {
};
struct mlxsw_skb_cb {
- struct mlxsw_tx_info tx_info;
+ union {
+ struct mlxsw_tx_info tx_info;
+ u32 cookie_index; /* Only used during receive */
+ };
};
static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index c51b2adfc1e1..c3d04319ff44 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -7,6 +7,9 @@
#include <linux/errno.h>
#include <linux/rhashtable.h>
#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/refcount.h>
+#include <net/flow_offload.h>
#include "item.h"
#include "trap.h"
@@ -63,6 +66,8 @@ struct mlxsw_afa {
void *ops_priv;
struct rhashtable set_ht;
struct rhashtable fwd_entry_ht;
+ struct rhashtable cookie_ht;
+ struct idr cookie_idr;
};
#define MLXSW_AFA_SET_LEN 0xA8
@@ -121,6 +126,55 @@ static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
.automatic_shrinking = true,
};
+struct mlxsw_afa_cookie {
+ struct rhash_head ht_node;
+ refcount_t ref_count;
+ struct rcu_head rcu;
+ u32 cookie_index;
+ struct flow_action_cookie fa_cookie;
+};
+
+static u32 mlxsw_afa_cookie_hash(const struct flow_action_cookie *fa_cookie,
+ u32 seed)
+{
+ return jhash2((u32 *) fa_cookie->cookie,
+ fa_cookie->cookie_len / sizeof(u32), seed);
+}
+
+static u32 mlxsw_afa_cookie_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct flow_action_cookie *fa_cookie = data;
+
+ return mlxsw_afa_cookie_hash(fa_cookie, seed);
+}
+
+static u32 mlxsw_afa_cookie_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct mlxsw_afa_cookie *cookie = data;
+
+ return mlxsw_afa_cookie_hash(&cookie->fa_cookie, seed);
+}
+
+static int mlxsw_afa_cookie_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct flow_action_cookie *fa_cookie = arg->key;
+ const struct mlxsw_afa_cookie *cookie = obj;
+
+ if (cookie->fa_cookie.cookie_len == fa_cookie->cookie_len)
+ return memcmp(cookie->fa_cookie.cookie, fa_cookie->cookie,
+ fa_cookie->cookie_len);
+ return 1;
+}
+
+static const struct rhashtable_params mlxsw_afa_cookie_ht_params = {
+ .head_offset = offsetof(struct mlxsw_afa_cookie, ht_node),
+ .hashfn = mlxsw_afa_cookie_key_hashfn,
+ .obj_hashfn = mlxsw_afa_cookie_obj_hashfn,
+ .obj_cmpfn = mlxsw_afa_cookie_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
const struct mlxsw_afa_ops *ops,
void *ops_priv)
@@ -138,11 +192,18 @@ struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
&mlxsw_afa_fwd_entry_ht_params);
if (err)
goto err_fwd_entry_rhashtable_init;
+ err = rhashtable_init(&mlxsw_afa->cookie_ht,
+ &mlxsw_afa_cookie_ht_params);
+ if (err)
+ goto err_cookie_rhashtable_init;
+ idr_init(&mlxsw_afa->cookie_idr);
mlxsw_afa->max_acts_per_set = max_acts_per_set;
mlxsw_afa->ops = ops;
mlxsw_afa->ops_priv = ops_priv;
return mlxsw_afa;
+err_cookie_rhashtable_init:
+ rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
err_fwd_entry_rhashtable_init:
rhashtable_destroy(&mlxsw_afa->set_ht);
err_set_rhashtable_init:
@@ -153,6 +214,9 @@ EXPORT_SYMBOL(mlxsw_afa_create);
void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
{
+ WARN_ON(!idr_is_empty(&mlxsw_afa->cookie_idr));
+ idr_destroy(&mlxsw_afa->cookie_idr);
+ rhashtable_destroy(&mlxsw_afa->cookie_ht);
rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
rhashtable_destroy(&mlxsw_afa->set_ht);
kfree(mlxsw_afa);
@@ -316,7 +380,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
@@ -344,7 +408,7 @@ err_second_set_create:
mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(mlxsw_afa_block_create);
@@ -627,6 +691,151 @@ err_counter_index_get:
return ERR_PTR(err);
}
+/* 20 bits is a maximum that hardware can handle in trap with userdef action
+ * and carry along with the trapped packet.
+ */
+#define MLXSW_AFA_COOKIE_INDEX_BITS 20
+#define MLXSW_AFA_COOKIE_INDEX_MAX ((1 << MLXSW_AFA_COOKIE_INDEX_BITS) - 1)
+
+static struct mlxsw_afa_cookie *
+mlxsw_afa_cookie_create(struct mlxsw_afa *mlxsw_afa,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie *cookie;
+ u32 cookie_index;
+ int err;
+
+ cookie = kzalloc(sizeof(*cookie) + fa_cookie->cookie_len, GFP_KERNEL);
+ if (!cookie)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&cookie->ref_count, 1);
+ memcpy(&cookie->fa_cookie, fa_cookie,
+ sizeof(*fa_cookie) + fa_cookie->cookie_len);
+
+ err = rhashtable_insert_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ /* Start cookie indexes with 1. Leave the 0 index unused. Packets
+ * that come from the HW which are not dropped by drop-with-cookie
+ * action are going to pass cookie_index 0 to lookup.
+ */
+ cookie_index = 1;
+ err = idr_alloc_u32(&mlxsw_afa->cookie_idr, cookie, &cookie_index,
+ MLXSW_AFA_COOKIE_INDEX_MAX, GFP_KERNEL);
+ if (err)
+ goto err_idr_alloc;
+ cookie->cookie_index = cookie_index;
+ return cookie;
+
+err_idr_alloc:
+ rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+err_rhashtable_insert:
+ kfree(cookie);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afa_cookie_destroy(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_cookie *cookie)
+{
+ idr_remove(&mlxsw_afa->cookie_idr, cookie->cookie_index);
+ rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+ kfree_rcu(cookie, rcu);
+}
+
+static struct mlxsw_afa_cookie *
+mlxsw_afa_cookie_get(struct mlxsw_afa *mlxsw_afa,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie *cookie;
+
+ cookie = rhashtable_lookup_fast(&mlxsw_afa->cookie_ht, fa_cookie,
+ mlxsw_afa_cookie_ht_params);
+ if (cookie) {
+ refcount_inc(&cookie->ref_count);
+ return cookie;
+ }
+ return mlxsw_afa_cookie_create(mlxsw_afa, fa_cookie);
+}
+
+static void mlxsw_afa_cookie_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_cookie *cookie)
+{
+ if (!refcount_dec_and_test(&cookie->ref_count))
+ return;
+ mlxsw_afa_cookie_destroy(mlxsw_afa, cookie);
+}
+
+/* RCU read lock must be held */
+const struct flow_action_cookie *
+mlxsw_afa_cookie_lookup(struct mlxsw_afa *mlxsw_afa, u32 cookie_index)
+{
+ struct mlxsw_afa_cookie *cookie;
+
+ /* 0 index means no cookie */
+ if (!cookie_index)
+ return NULL;
+ cookie = idr_find(&mlxsw_afa->cookie_idr, cookie_index);
+ if (!cookie)
+ return NULL;
+ return &cookie->fa_cookie;
+}
+EXPORT_SYMBOL(mlxsw_afa_cookie_lookup);
+
+struct mlxsw_afa_cookie_ref {
+ struct mlxsw_afa_resource resource;
+ struct mlxsw_afa_cookie *cookie;
+};
+
+static void
+mlxsw_afa_cookie_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_cookie_ref *cookie_ref)
+{
+ mlxsw_afa_resource_del(&cookie_ref->resource);
+ mlxsw_afa_cookie_put(block->afa, cookie_ref->cookie);
+ kfree(cookie_ref);
+}
+
+static void
+mlxsw_afa_cookie_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+
+ cookie_ref = container_of(resource, struct mlxsw_afa_cookie_ref,
+ resource);
+ mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
+}
+
+static struct mlxsw_afa_cookie_ref *
+mlxsw_afa_cookie_ref_create(struct mlxsw_afa_block *block,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+ struct mlxsw_afa_cookie *cookie;
+ int err;
+
+ cookie_ref = kzalloc(sizeof(*cookie_ref), GFP_KERNEL);
+ if (!cookie_ref)
+ return ERR_PTR(-ENOMEM);
+ cookie = mlxsw_afa_cookie_get(block->afa, fa_cookie);
+ if (IS_ERR(cookie)) {
+ err = PTR_ERR(cookie);
+ goto err_cookie_get;
+ }
+ cookie_ref->cookie = cookie;
+ cookie_ref->resource.destructor = mlxsw_afa_cookie_ref_destructor;
+ mlxsw_afa_resource_add(block, &cookie_ref->resource);
+ return cookie_ref;
+
+err_cookie_get:
+ kfree(cookie_ref);
+ return ERR_PTR(err);
+}
+
#define MLXSW_AFA_ONE_ACTION_LEN 32
#define MLXSW_AFA_PAYLOAD_OFFSET 4
@@ -747,97 +956,170 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
-/* Trap / Discard Action
- * ---------------------
- * The Trap / Discard action enables trapping / mirroring packets to the CPU
+/* Trap Action / Trap With Userdef Action
+ * --------------------------------------
+ * The Trap action enables trapping / mirroring packets to the CPU
* as well as discarding packets.
* The ACL Trap / Discard separates the forward/discard control from CPU
* trap control. In addition, the Trap / Discard action enables activating
* SPAN (port mirroring).
+ *
+ * The Trap with userdef action action has the same functionality as
+ * the Trap action with addition of user defined value that can be set
+ * and used by higher layer applications.
*/
-#define MLXSW_AFA_TRAPDISC_CODE 0x03
-#define MLXSW_AFA_TRAPDISC_SIZE 1
+#define MLXSW_AFA_TRAP_CODE 0x03
+#define MLXSW_AFA_TRAP_SIZE 1
+
+#define MLXSW_AFA_TRAPWU_CODE 0x04
+#define MLXSW_AFA_TRAPWU_SIZE 2
-enum mlxsw_afa_trapdisc_trap_action {
- MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP = 0,
- MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP = 2,
+enum mlxsw_afa_trap_trap_action {
+ MLXSW_AFA_TRAP_TRAP_ACTION_NOP = 0,
+ MLXSW_AFA_TRAP_TRAP_ACTION_TRAP = 2,
};
-/* afa_trapdisc_trap_action
+/* afa_trap_trap_action
* Trap Action.
*/
-MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
+MLXSW_ITEM32(afa, trap, trap_action, 0x00, 24, 4);
-enum mlxsw_afa_trapdisc_forward_action {
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
+enum mlxsw_afa_trap_forward_action {
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD = 1,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD = 3,
};
-/* afa_trapdisc_forward_action
+/* afa_trap_forward_action
* Forward Action.
*/
-MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
+MLXSW_ITEM32(afa, trap, forward_action, 0x00, 0, 4);
-/* afa_trapdisc_trap_id
+/* afa_trap_trap_id
* Trap ID to configure.
*/
-MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+MLXSW_ITEM32(afa, trap, trap_id, 0x04, 0, 9);
-/* afa_trapdisc_mirror_agent
+/* afa_trap_mirror_agent
* Mirror agent.
*/
-MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
+MLXSW_ITEM32(afa, trap, mirror_agent, 0x08, 29, 3);
-/* afa_trapdisc_mirror_enable
+/* afa_trap_mirror_enable
* Mirror enable.
*/
-MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
+MLXSW_ITEM32(afa, trap, mirror_enable, 0x08, 24, 1);
+
+/* user_def_val
+ * Value for the SW usage. Can be used to pass information of which
+ * rule has caused a trap. This may be overwritten by later traps.
+ * This field does a set on the packet's user_def_val only if this
+ * is the first trap_id or if the trap_id has replaced the previous
+ * packet's trap_id.
+ */
+MLXSW_ITEM32(afa, trap, user_def_val, 0x0C, 0, 20);
static inline void
-mlxsw_afa_trapdisc_pack(char *payload,
- enum mlxsw_afa_trapdisc_trap_action trap_action,
- enum mlxsw_afa_trapdisc_forward_action forward_action,
- u16 trap_id)
+mlxsw_afa_trap_pack(char *payload,
+ enum mlxsw_afa_trap_trap_action trap_action,
+ enum mlxsw_afa_trap_forward_action forward_action,
+ u16 trap_id)
{
- mlxsw_afa_trapdisc_trap_action_set(payload, trap_action);
- mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
- mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
+ mlxsw_afa_trap_trap_action_set(payload, trap_action);
+ mlxsw_afa_trap_forward_action_set(payload, forward_action);
+ mlxsw_afa_trap_trap_id_set(payload, trap_id);
}
static inline void
-mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
- u8 mirror_agent)
+mlxsw_afa_trapwu_pack(char *payload,
+ enum mlxsw_afa_trap_trap_action trap_action,
+ enum mlxsw_afa_trap_forward_action forward_action,
+ u16 trap_id, u32 user_def_val)
{
- mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
- mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
+ mlxsw_afa_trap_pack(payload, trap_action, forward_action, trap_id);
+ mlxsw_afa_trap_user_def_val_set(payload, user_def_val);
}
-int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
+static inline void
+mlxsw_afa_trap_mirror_pack(char *payload, bool mirror_enable,
+ u8 mirror_agent)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ mlxsw_afa_trap_mirror_enable_set(payload, mirror_enable);
+ mlxsw_afa_trap_mirror_agent_set(payload, mirror_agent);
+}
+
+static int mlxsw_afa_block_append_drop_plain(struct mlxsw_afa_block *block,
+ bool ingress)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
+ ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL);
return 0;
}
+
+static int
+mlxsw_afa_block_append_drop_with_cookie(struct mlxsw_afa_block *block,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+ u32 cookie_index;
+ char *act;
+ int err;
+
+ cookie_ref = mlxsw_afa_cookie_ref_create(block, fa_cookie);
+ if (IS_ERR(cookie_ref)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create cookie for drop action");
+ return PTR_ERR(cookie_ref);
+ }
+ cookie_index = cookie_ref->cookie->cookie_index;
+
+ act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPWU_CODE,
+ MLXSW_AFA_TRAPWU_SIZE);
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append drop with cookie action");
+ err = PTR_ERR(act);
+ goto err_append_action;
+ }
+ mlxsw_afa_trapwu_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
+ ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL,
+ cookie_index);
+ return 0;
+
+err_append_action:
+ mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
+ return err;
+}
+
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
+{
+ return fa_cookie ?
+ mlxsw_afa_block_append_drop_with_cookie(block, ingress,
+ fa_cookie, extack) :
+ mlxsw_afa_block_append_drop_plain(block, ingress);
+}
EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
- trap_id);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD, trap_id);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
@@ -845,15 +1127,13 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
- trap_id);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, trap_id);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
@@ -920,13 +1200,13 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
u8 mirror_agent)
{
char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
- mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, 0);
+ mlxsw_afa_trap_mirror_pack(act, true, mirror_agent);
return 0;
}
@@ -958,6 +1238,179 @@ err_append_allocated_mirror:
}
EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+/* QoS Action
+ * ----------
+ * The QOS_ACTION is used for manipulating the QoS attributes of a packet. It
+ * can be used to change the DCSP, ECN, Color and Switch Priority of the packet.
+ * Note that PCP field can be changed using the VLAN action.
+ */
+
+#define MLXSW_AFA_QOS_CODE 0x06
+#define MLXSW_AFA_QOS_SIZE 1
+
+enum mlxsw_afa_qos_ecn_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_ECN_CMD_NOP,
+ /* Set ECN to afa_qos_ecn */
+ MLXSW_AFA_QOS_ECN_CMD_SET,
+};
+
+/* afa_qos_ecn_cmd
+ */
+MLXSW_ITEM32(afa, qos, ecn_cmd, 0x04, 29, 3);
+
+/* afa_qos_ecn
+ * ECN value.
+ */
+MLXSW_ITEM32(afa, qos, ecn, 0x04, 24, 2);
+
+enum mlxsw_afa_qos_dscp_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_DSCP_CMD_NOP,
+ /* Set DSCP 3 LSB bits according to dscp[2:0] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_3LSB,
+ /* Set DSCP 3 MSB bits according to dscp[5:3] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_3MSB,
+ /* Set DSCP 6 bits according to dscp[5:0] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
+};
+
+/* afa_qos_dscp_cmd
+ * DSCP command.
+ */
+MLXSW_ITEM32(afa, qos, dscp_cmd, 0x04, 14, 2);
+
+/* afa_qos_dscp
+ * DSCP value.
+ */
+MLXSW_ITEM32(afa, qos, dscp, 0x04, 0, 6);
+
+enum mlxsw_afa_qos_switch_prio_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_SWITCH_PRIO_CMD_NOP,
+ /* Set Switch Priority to afa_qos_switch_prio */
+ MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
+};
+
+/* afa_qos_switch_prio_cmd
+ */
+MLXSW_ITEM32(afa, qos, switch_prio_cmd, 0x08, 14, 2);
+
+/* afa_qos_switch_prio
+ * Switch Priority.
+ */
+MLXSW_ITEM32(afa, qos, switch_prio, 0x08, 0, 4);
+
+enum mlxsw_afa_qos_dscp_rw {
+ MLXSW_AFA_QOS_DSCP_RW_PRESERVE,
+ MLXSW_AFA_QOS_DSCP_RW_SET,
+ MLXSW_AFA_QOS_DSCP_RW_CLEAR,
+};
+
+/* afa_qos_dscp_rw
+ * DSCP Re-write Enable. Controlling the rewrite_enable for DSCP.
+ */
+MLXSW_ITEM32(afa, qos, dscp_rw, 0x0C, 30, 2);
+
+static inline void
+mlxsw_afa_qos_ecn_pack(char *payload,
+ enum mlxsw_afa_qos_ecn_cmd ecn_cmd, u8 ecn)
+{
+ mlxsw_afa_qos_ecn_cmd_set(payload, ecn_cmd);
+ mlxsw_afa_qos_ecn_set(payload, ecn);
+}
+
+static inline void
+mlxsw_afa_qos_dscp_pack(char *payload,
+ enum mlxsw_afa_qos_dscp_cmd dscp_cmd, u8 dscp)
+{
+ mlxsw_afa_qos_dscp_cmd_set(payload, dscp_cmd);
+ mlxsw_afa_qos_dscp_set(payload, dscp);
+}
+
+static inline void
+mlxsw_afa_qos_switch_prio_pack(char *payload,
+ enum mlxsw_afa_qos_switch_prio_cmd prio_cmd,
+ u8 prio)
+{
+ mlxsw_afa_qos_switch_prio_cmd_set(payload, prio_cmd);
+ mlxsw_afa_qos_switch_prio_set(payload, prio);
+}
+
+static int __mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ bool set_dscp, u8 dscp,
+ bool set_ecn, u8 ecn,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_QOS_CODE,
+ MLXSW_AFA_QOS_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
+ return PTR_ERR(act);
+ }
+
+ if (set_ecn)
+ mlxsw_afa_qos_ecn_pack(act, MLXSW_AFA_QOS_ECN_CMD_SET, ecn);
+ if (set_dscp) {
+ mlxsw_afa_qos_dscp_pack(act, MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
+ dscp);
+ mlxsw_afa_qos_dscp_rw_set(act, MLXSW_AFA_QOS_DSCP_RW_CLEAR);
+ }
+
+ return 0;
+}
+
+int mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ u8 dsfield,
+ struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ true, dsfield >> 2,
+ true, dsfield & 0x03,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dsfield);
+
+int mlxsw_afa_block_append_qos_dscp(struct mlxsw_afa_block *block,
+ u8 dscp, struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ true, dscp,
+ false, 0,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dscp);
+
+int mlxsw_afa_block_append_qos_ecn(struct mlxsw_afa_block *block,
+ u8 ecn, struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ false, 0,
+ true, ecn,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_ecn);
+
+int mlxsw_afa_block_append_qos_switch_prio(struct mlxsw_afa_block *block,
+ u8 prio,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_QOS_CODE,
+ MLXSW_AFA_QOS_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
+ return PTR_ERR(act);
+ }
+ mlxsw_afa_qos_switch_prio_pack(act, MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
+ prio);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_switch_prio);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 0e3a59dda12e..8c2705e16ef7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <net/flow_offload.h>
struct mlxsw_afa;
struct mlxsw_afa_block;
@@ -42,7 +43,11 @@ int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
-int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
+const struct flow_action_cookie *
+mlxsw_afa_cookie_lookup(struct mlxsw_afa *mlxsw_afa, u32 cookie_index);
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
@@ -57,6 +62,16 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et,
struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_switch_prio(struct mlxsw_afa_block *block,
+ u8 prio,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ u8 dsfield,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_dscp(struct mlxsw_afa_block *block,
+ u8 dscp, struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_ecn(struct mlxsw_afa_block *block,
+ u8 ecn, struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index feb4672a5ac0..9f6905fa6b47 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -9,6 +9,41 @@
#include "item.h"
#include "core_acl_flex_keys.h"
+/* For the purpose of the driver, define an internal storage scratchpad
+ * that will be used to store key/mask values. For each defined element type
+ * define an internal storage geometry.
+ *
+ * When adding new elements, MLXSW_AFK_ELEMENT_STORAGE_SIZE must be increased
+ * accordingly.
+ */
+static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
+ MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
+ MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
+};
+
struct mlxsw_afk {
struct list_head key_info_list;
unsigned int max_blocks;
@@ -26,13 +61,15 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
for (j = 0; j < block->instances_count; j++) {
+ const struct mlxsw_afk_element_info *elinfo;
struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
- if (elinst->type != elinst->info->type ||
+ elinfo = &mlxsw_afk_element_infos[elinst->element];
+ if (elinst->type != elinfo->type ||
(!elinst->avoid_size_check &&
elinst->item.size.bits !=
- elinst->info->item.size.bits))
+ elinfo->item.size.bits))
return false;
}
}
@@ -72,7 +109,7 @@ struct mlxsw_afk_key_info {
* is index inside "blocks"
*/
struct mlxsw_afk_element_usage elusage;
- const struct mlxsw_afk_block *blocks[0];
+ const struct mlxsw_afk_block *blocks[];
};
static bool
@@ -116,7 +153,7 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
- if (elinst->info->element == element) {
+ if (elinst->element == element) {
__set_bit(element, picker->hits[i].element);
picker->hits[i].total++;
}
@@ -301,7 +338,7 @@ mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[i];
- if (elinst->info->element == element)
+ if (elinst->element == element)
return elinst;
}
return NULL;
@@ -409,9 +446,12 @@ static void
mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
char *output, char *storage, int u32_diff)
{
- const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
+ const struct mlxsw_afk_element_info *elinfo;
+ const struct mlxsw_item *storage_item;
+ elinfo = &mlxsw_afk_element_infos[elinst->element];
+ storage_item = &elinfo->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
mlxsw_sp_afk_encode_u32(storage_item, output_item,
storage, output, u32_diff);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index cb229b55ecc4..a47a17c04c62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -69,42 +69,10 @@ struct mlxsw_afk_element_info {
MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
_element, _offset, 0, _size)
-/* For the purpose of the driver, define an internal storage scratchpad
- * that will be used to store key/mask values. For each defined element type
- * define an internal storage geometry.
- */
-static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
- MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
- MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
- MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
- MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
- MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
-};
-
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
struct mlxsw_afk_element_inst { /* element instance in actual block */
- const struct mlxsw_afk_element_info *info;
+ enum mlxsw_afk_element element;
enum mlxsw_afk_element_type type;
struct mlxsw_item item; /* element geometry in block */
int u32_key_diff; /* in case value needs to be adjusted before write
@@ -116,7 +84,7 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, \
_shift, _size, _u32_key_diff, _avoid_size_check) \
{ \
- .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \
+ .element = MLXSW_AFK_ELEMENT_##_element, \
.type = _type, \
.item = { \
.offset = _offset, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 34566eb62c47..939b692ffc33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -53,6 +53,7 @@
/**
* struct mlxsw_i2c - device private data:
+ * @cmd: command attributes;
* @cmd.mb_size_in: input mailbox size;
* @cmd.mb_off_in: input mailbox offset in register space;
* @cmd.mb_size_out: output mailbox size;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e9ded1a6e131..fd0e97de44e7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -575,6 +575,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+ if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
+ rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
+ u32 cookie_index = 0;
+
+ if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
+ cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
+ mlxsw_skb_cb(skb)->cookie_index = cookie_index;
+ }
+
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 43fa8c85b5d9..32c7cabfb261 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -25,8 +25,6 @@
#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
-#define MLXSW_PCI_SW_RESET 0xF0010
-#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200
#define MLXSW_PCI_FW_READY 0xA1844
@@ -210,6 +208,11 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+/* pci_cqe_user_def_val_orig_pkt_len
+ * When trap_id is an ACL: User defined value from policy engine action.
+ */
+MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
+
/* pci_cqe_owner
* Ownership bit.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index e05d1d1be2fd..9b39b8e70519 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -621,7 +621,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload)
{
MLXSW_REG_ZERO(sfn, payload);
mlxsw_reg_sfn_swid_set(payload, 0);
- mlxsw_reg_sfn_end_set(payload, 1);
+ mlxsw_reg_sfn_end_set(payload, 0);
mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
}
@@ -3296,6 +3296,12 @@ MLXSW_ITEM32(reg, qpcr, g, 0x00, 14, 2);
*/
MLXSW_ITEM32(reg, qpcr, pid, 0x00, 0, 14);
+/* reg_qpcr_clear_counter
+ * Clear counters.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, qpcr, clear_counter, 0x04, 31, 1);
+
/* reg_qpcr_color_aware
* Is the policer aware of colors.
* Must be 0 (unaware) for cpu port.
@@ -3393,6 +3399,17 @@ enum mlxsw_reg_qpcr_action {
*/
MLXSW_ITEM32(reg, qpcr, violate_action, 0x18, 0, 4);
+/* reg_qpcr_violate_count
+ * Counts the number of times violate_action happened on this PID.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, qpcr, violate_count, 0x20, 0, 64);
+
+#define MLXSW_REG_QPCR_LOWEST_CIR 1
+#define MLXSW_REG_QPCR_HIGHEST_CIR (2 * 1000 * 1000 * 1000) /* 2Gpps */
+#define MLXSW_REG_QPCR_LOWEST_CBS 4
+#define MLXSW_REG_QPCR_HIGHEST_CBS 24
+
static inline void mlxsw_reg_qpcr_pack(char *payload, u16 pid,
enum mlxsw_reg_qpcr_ir_units ir_units,
bool bytes, u32 cir, u16 cbs)
@@ -5440,15 +5457,29 @@ enum mlxsw_reg_pmtm_module_type {
/* Backplane with 4 lanes */
MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
/* QSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP,
/* SFP */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP,
/* Backplane with single lane */
MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
/* Backplane with two lane */
MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
- /* Chip2Chip */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C = 10,
+ /* Chip2Chip4x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10,
+ /* Chip2Chip2x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C2X,
+ /* Chip2Chip1x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C1X,
+ /* QSFP-DD */
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
+ /* OSFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_OSFP,
+ /* SFP-DD */
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD,
+ /* DSFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_DSFP,
+ /* Chip2Chip8x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C8X,
};
/* reg_pmtm_module_type
@@ -5506,12 +5537,10 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
@@ -5526,9 +5555,11 @@ enum mlxsw_reg_htgt_trap_group {
enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
};
/* reg_htgt_trap_group
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 6534184cb942..d62496ef299c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -18,6 +18,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_CQE_V1,
MLXSW_RES_ID_CQE_V2,
MLXSW_RES_ID_COUNTER_POOL_SIZE,
+ MLXSW_RES_ID_COUNTER_BANK_SIZE,
MLXSW_RES_ID_MAX_SPAN,
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
@@ -75,6 +76,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_CQE_V1] = 0x2211,
[MLXSW_RES_ID_CQE_V2] = 0x2212,
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
+ [MLXSW_RES_ID_COUNTER_BANK_SIZE] = 0x2411,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 7358b5bc7eb6..6b39978acd07 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -43,6 +43,7 @@
#include "spectrum_acl_flex_actions.h"
#include "spectrum_span.h"
#include "spectrum_ptp.h"
+#include "spectrum_trap.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_SP1_FWREV_MAJOR 13
@@ -347,19 +348,6 @@ static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
}
-static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev,
- const char *msg, const char *comp_name,
- u32 done_bytes, u32 total_bytes)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
-
- devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core),
- msg, comp_name,
- done_bytes, total_bytes);
-}
-
static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.component_query = mlxsw_sp_component_query,
.fsm_lock = mlxsw_sp_fsm_lock,
@@ -370,7 +358,6 @@ static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.fsm_query_state = mlxsw_sp_fsm_query_state,
.fsm_cancel = mlxsw_sp_fsm_cancel,
.fsm_release = mlxsw_sp_fsm_release,
- .status_notify = mlxsw_sp_status_notify,
};
static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
@@ -382,16 +369,15 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
.ops = &mlxsw_sp_mlxfw_dev_ops,
.psid = mlxsw_sp->bus_info->psid,
.psid_size = strlen(mlxsw_sp->bus_info->psid),
+ .devlink = priv_to_devlink(mlxsw_sp->core),
},
.mlxsw_sp = mlxsw_sp
};
int err;
mlxsw_core_fw_flash_start(mlxsw_sp->core);
- devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core));
err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
firmware, extack);
- devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core));
mlxsw_core_fw_flash_end(mlxsw_sp->core);
return err;
@@ -1798,6 +1784,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_TBF:
return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_FIFO:
+ return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -2243,6 +2231,15 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = {
+ {
+ .str = "ecn_marked",
+ .getter = mlxsw_reg_ppcnt_ecn_marked_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
{
.str = "discard_ingress_general",
@@ -2352,6 +2349,7 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+ MLXSW_SP_PORT_HW_EXT_STATS_LEN + \
MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
@@ -2413,6 +2411,12 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
ETH_GSTRING_LEN);
@@ -2474,6 +2478,10 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
*p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
*p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_EXT_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_ext_stats;
+ *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_DISCARD_CNT:
*p_hw_stats = mlxsw_sp_port_hw_discard_stats;
*p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
@@ -2543,6 +2551,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+ /* Extended Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+
/* Discard Counters */
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
data, data_index);
@@ -2788,27 +2801,6 @@ static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
return ptys_proto;
}
-static u32
-mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
-{
- u32 ptys_proto = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed)
- ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
- }
- return ptys_proto;
-}
-
-static int
-mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u32 *base_speed)
-{
- *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
- return 0;
-}
-
static void
mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin, bool autoneg)
@@ -2833,8 +2825,6 @@ mlxsw_sp1_port_type_speed_ops = {
.from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
- .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed,
- .port_speed_base = mlxsw_sp1_port_speed_base,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
};
@@ -3235,51 +3225,6 @@ static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
return ptys_proto;
}
-static u32
-mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
-{
- u32 ptys_proto = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed)
- ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
- }
- return ptys_proto;
-}
-
-static int
-mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u32 *base_speed)
-{
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_cap;
- int err;
-
- /* In Spectrum-2, the speed of 1x can change from port to port, so query
- * it from firmware.
- */
- mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err)
- return err;
- mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
-
- if (eth_proto_cap &
- MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) {
- *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G;
- return 0;
- }
-
- if (eth_proto_cap &
- MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) {
- *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
- return 0;
- }
-
- return -EIO;
-}
-
static void
mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin,
@@ -3305,8 +3250,6 @@ mlxsw_sp2_port_type_speed_ops = {
.from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
- .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed,
- .port_speed_base = mlxsw_sp2_port_speed_base,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
};
@@ -3520,24 +3463,24 @@ static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_admin;
- u32 upper_speed;
- u32 base_speed;
int err;
ops = mlxsw_sp->port_type_speed_ops;
- err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port,
- &base_speed);
+ /* Set advertised speeds to supported speeds. */
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
- upper_speed = base_speed * mlxsw_sp_port->mapping.width;
- eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_admin, mlxsw_sp_port->link.autoneg);
+ eth_proto_cap, mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -4043,6 +3986,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
@@ -4079,6 +4023,7 @@ err_port_create:
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
return err;
}
@@ -4200,6 +4145,14 @@ static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
}
+static struct mlxsw_sp_port *
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
+ return mlxsw_sp->ports[local_port];
+ return NULL;
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
@@ -4213,7 +4166,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
int i;
int err;
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
@@ -4308,7 +4261,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
int offset;
int i;
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
@@ -4614,6 +4567,7 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = {
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
char qpcr_pl[MLXSW_REG_QPCR_LEN];
enum mlxsw_reg_qpcr_ir_units ir_units;
int max_cpu_policers;
@@ -4636,7 +4590,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
rate = 128;
burst_size = 7;
@@ -4649,7 +4602,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
@@ -4677,6 +4629,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
continue;
}
+ __set_bit(i, mlxsw_sp->trap->policers_usage);
mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
burst_size);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
@@ -4729,19 +4682,20 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
priority = 2;
tc = 2;
break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
priority = 1;
tc = 1;
break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
+ priority = 0;
+ tc = 1;
+ break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
tc = MLXSW_REG_HTGT_DEFAULT_TC;
@@ -4805,20 +4759,32 @@ static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_trap *trap;
+ u64 max_policers;
int err;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
+ return -EIO;
+ max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
+ trap = kzalloc(struct_size(trap, policers_usage,
+ BITS_TO_LONGS(max_policers)), GFP_KERNEL);
+ if (!trap)
+ return -ENOMEM;
+ trap->max_policers = max_policers;
+ mlxsw_sp->trap = trap;
+
err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
if (err)
- return err;
+ goto err_cpu_policers_set;
err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
if (err)
- return err;
+ goto err_trap_groups_set;
err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
ARRAY_SIZE(mlxsw_sp_listener));
if (err)
- return err;
+ goto err_traps_register;
err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
mlxsw_sp->listeners_count);
@@ -4830,6 +4796,10 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
err_extra_traps_init:
mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
ARRAY_SIZE(mlxsw_sp_listener));
+err_traps_register:
+err_trap_groups_set:
+err_cpu_policers_set:
+ kfree(trap);
return err;
}
@@ -4839,6 +4809,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->listeners_count);
mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
ARRAY_SIZE(mlxsw_sp_listener));
+ kfree(mlxsw_sp->trap);
}
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
@@ -4935,16 +4906,35 @@ static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
};
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
+{
+ return 3 * mtu + buffer_factor * speed / 1000;
+}
static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
{
- return 3 * mtu + MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR * speed / 1000;
+ int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.buffsize_get = mlxsw_sp2_span_buffsize_get,
};
+static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
+ .buffsize_get = mlxsw_sp3_span_buffsize_get,
+};
+
u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
{
u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
@@ -5223,7 +5213,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
- mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -5460,8 +5450,13 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_span_register;
+ err = mlxsw_sp_counter_resources_register(mlxsw_core);
+ if (err)
+ goto err_resources_counter_register;
+
return 0;
+err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
return err;
@@ -5479,8 +5474,13 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_span_register;
+ err = mlxsw_sp_counter_resources_register(mlxsw_core);
+ if (err)
+ goto err_resources_counter_register;
+
return 0;
+err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
return err;
@@ -5684,6 +5684,11 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
@@ -5718,6 +5723,11 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
@@ -5751,6 +5761,11 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
@@ -6316,7 +6331,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6472,7 +6487,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6549,7 +6564,7 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6609,7 +6624,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0f1f9dceec5..ca56e72cb4b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -19,6 +19,7 @@
#include <net/pkt_cls.h>
#include <net/red.h>
#include <net/vxlan.h>
+#include <net/flow_offload.h>
#include "port.h"
#include "core.h"
@@ -32,9 +33,6 @@
#define MLXSW_SP_MID_MAX 7000
-#define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */
-#define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */
-
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
@@ -48,6 +46,10 @@
#define MLXSW_SP_RESOURCE_NAME_SPAN "span_agents"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS "counters"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW "flow"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF "rif"
+
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD = 1,
MLXSW_SP_RESOURCE_KVD_LINEAR,
@@ -57,6 +59,9 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_SPAN,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ MLXSW_SP_RESOURCE_COUNTERS_FLOW,
+ MLXSW_SP_RESOURCE_COUNTERS_RIF,
};
struct mlxsw_sp_port;
@@ -141,6 +146,7 @@ struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
+struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -168,12 +174,9 @@ struct mlxsw_sp {
struct notifier_block netdevice_nb;
struct mlxsw_sp_ptp_clock *clock;
struct mlxsw_sp_ptp_state *ptp_state;
-
struct mlxsw_sp_counter_pool *counter_pool;
- struct {
- struct mlxsw_sp_span_entry *entries;
- int entries_count;
- } span;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp_trap *trap;
const struct mlxsw_fw_rev *req_rev;
const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
@@ -282,8 +285,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
- struct mlxsw_sp_qdisc *root_qdisc;
- struct mlxsw_sp_qdisc *tclass_qdiscs;
+ struct mlxsw_sp_qdisc_state *qdisc;
unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block;
struct mlxsw_sp_acl_block *eg_acl_block;
@@ -313,9 +315,6 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
- u32 (*to_ptys_upper_speed)(struct mlxsw_sp *mlxsw_sp, u32 upper_speed);
- int (*port_speed_base)(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u32 *base_speed);
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin, bool autoneg);
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
@@ -468,10 +467,6 @@ int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
const struct net_device *vxlan_dev);
-struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- u16 vid,
- struct netlink_ext_ack *extack);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -556,7 +551,7 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info);
bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
-bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
@@ -571,10 +566,10 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
const union mlxsw_sp_l3addr *ul_sip,
@@ -653,7 +648,9 @@ struct mlxsw_sp_acl_rule_info {
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
u8 action_created:1,
- egress_bind_blocker:1;
+ ingress_bind_blocker:1,
+ egress_bind_blocker:1,
+ counter_valid:1;
unsigned int counter_index;
};
@@ -672,16 +669,20 @@ struct mlxsw_sp_acl_block {
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
unsigned int disable_count;
+ unsigned int ingress_blocker_rule_count;
unsigned int egress_blocker_rule_count;
+ unsigned int ingress_binding_count;
+ unsigned int egress_binding_count;
struct net *net;
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
-unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+unsigned int
+mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block);
void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
struct net *net);
void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
@@ -694,7 +695,9 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
-bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
@@ -726,7 +729,10 @@ int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
-int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -741,6 +747,14 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 action, u16 vid, u16 proto, u8 prio,
struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 prio, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ enum flow_action_mangle_base htype,
+ u32 offset, u32 mask, u32 val,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct netlink_ext_ack *extack);
@@ -773,10 +787,17 @@ struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
- u64 *packets, u64 *bytes, u64 *last_use);
+ u64 *packets, u64 *bytes, u64 *last_use,
+ enum flow_action_hw_stats *used_hw_stats);
struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
+static inline const struct flow_action_cookie *
+mlxsw_sp_acl_act_cookie_lookup(struct mlxsw_sp *mlxsw_sp, u32 cookie_index)
+{
+ return mlxsw_afa_cookie_lookup(mlxsw_sp->afa, cookie_index);
+}
+
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
@@ -864,6 +885,8 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_ets_qopt_offload *p);
int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_tbf_qopt_offload *p);
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
@@ -974,9 +997,6 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
union mlxsw_sp_l3addr *addr);
-u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp);
-bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
- u32 tb_id, __be32 addr);
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack);
@@ -1003,6 +1023,22 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
enum devlink_trap_action action);
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
+int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer);
+int
+mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+void mlxsw_sp_trap_policer_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+int
+mlxsw_sp_trap_policer_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst, struct netlink_ext_ack *extack);
+int
+mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index 09ee0a807747..a9fff8adc75e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -60,7 +60,7 @@ static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
struct mlxsw_sp1_kvdl_part {
struct mlxsw_sp1_kvdl_part_info info;
- unsigned long usage[0]; /* Entries */
+ unsigned long usage[]; /* Entries */
};
struct mlxsw_sp1_kvdl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 6c66a0f1b79e..ad69913f19c1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
* to be written using PEFA register to all indexes for all regions.
*/
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
- if (!afa_block) {
- err = -ENOMEM;
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
goto err_afa_block;
}
err = mlxsw_afa_block_continue(afa_block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 8d14770766b4..3a73d654017f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -45,7 +45,7 @@ struct mlxsw_sp2_kvdl_part {
unsigned int usage_bit_count;
unsigned int indexes_per_usage_bit;
unsigned int last_allocated_bit;
- unsigned long usage[0]; /* Usage bits */
+ unsigned long usage[]; /* Usage bits */
};
struct mlxsw_sp2_kvdl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 3d3cca596116..01cff711bbd2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -58,7 +58,7 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -71,7 +71,7 @@ struct mlxsw_sp_acl_rule {
u64 last_used;
u64 last_packets;
u64 last_bytes;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -99,7 +99,8 @@ struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
return block->mlxsw_sp;
}
-unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+unsigned int
+mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block)
{
return block ? block->rule_count : 0;
}
@@ -116,20 +117,24 @@ void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
block->disable_count--;
}
-bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block)
{
return block->disable_count;
}
-bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block)
+bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ return block->egress_binding_count;
+}
- list_for_each_entry(binding, &block->binding_list, list) {
- if (!binding->ingress)
- return true;
- }
- return false;
+bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block)
+{
+ return block->ingress_binding_count;
+}
+
+bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block)
+{
+ return block->ingress_binding_count && block->egress_binding_count;
}
static bool
@@ -163,7 +168,8 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+static bool
+mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block *block)
{
return block->ruleset_zero;
}
@@ -250,6 +256,11 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
+ if (ingress && block->ingress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
if (!ingress && block->egress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
return -EOPNOTSUPP;
@@ -267,6 +278,10 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
goto err_ruleset_bind;
}
+ if (ingress)
+ block->ingress_binding_count++;
+ else
+ block->egress_binding_count++;
list_add(&binding->list, &block->binding_list);
return 0;
@@ -288,6 +303,11 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
list_del(&binding->list);
+ if (ingress)
+ block->ingress_binding_count--;
+ else
+ block->egress_binding_count--;
+
if (mlxsw_sp_acl_ruleset_block_bound(block))
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
@@ -444,7 +464,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (afa_block) {
rulei->act_block = afa_block;
@@ -515,9 +535,13 @@ int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
return mlxsw_afa_block_terminate(rulei->act_block);
}
-int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_drop(rulei->act_block);
+ return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
+ fa_cookie, extack);
}
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
@@ -614,12 +638,126 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
}
}
+int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ /* Even though both Linux and Spectrum switches support 16 priorities,
+ * spectrum_qdisc only processes the first eight priomap elements, and
+ * the DCB and PFC features are tied to 8 priorities as well. Therefore
+ * bounce attempts to prioritize packets to higher priorities.
+ */
+ if (prio >= IEEE_8021QAZ_MAX_TCS) {
+ NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
+ return -EINVAL;
+ }
+ return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
+ extack);
+}
+
+enum mlxsw_sp_acl_mangle_field {
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
+};
+
+struct mlxsw_sp_acl_mangle_action {
+ enum flow_action_mangle_base htype;
+ /* Offset is u32-aligned. */
+ u32 offset;
+ /* Mask bits are unset for the modified field. */
+ u32 mask;
+ /* Shift required to extract the set value. */
+ u32 shift;
+ enum mlxsw_sp_acl_mangle_field field;
+};
+
+#define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
+ { \
+ .htype = _htype, \
+ .offset = _offset, \
+ .mask = _mask, \
+ .shift = _shift, \
+ .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
+ }
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
+ _offset, _mask, _shift, _field)
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
+ _offset, _mask, _shift, _field)
+
+static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
+};
+
+static int
+mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_mangle_action *mact,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ switch (mact->field) {
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
+ return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
+ return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
+ return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
+ val, extack);
+ }
+
+ /* We shouldn't have gotten a match in the first place! */
+ WARN_ONCE(1, "Unhandled mangle field");
+ return -EINVAL;
+}
+
+int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ enum flow_action_mangle_base htype,
+ u32 offset, u32 mask, u32 val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_acl_mangle_action *mact;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
+ mact = &mlxsw_sp_acl_mangle_actions[i];
+ if (mact->htype == htype &&
+ mact->offset == offset &&
+ mact->mask == mask) {
+ val >>= mact->shift;
+ return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp,
+ rulei, mact,
+ val, extack);
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return -EINVAL;
+}
+
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_counter(rulei->act_block,
- &rulei->counter_index, extack);
+ int err;
+
+ err = mlxsw_afa_block_append_counter(rulei->act_block,
+ &rulei->counter_index, extack);
+ if (err)
+ return err;
+ rulei->counter_valid = true;
+ return 0;
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -707,6 +845,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
mutex_unlock(&mlxsw_sp->acl->rules_lock);
block->rule_count++;
+ block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
@@ -726,6 +865,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
+ block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
ruleset->ht_key.block->rule_count--;
mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
@@ -827,20 +967,24 @@ static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
- u64 *packets, u64 *bytes, u64 *last_use)
+ u64 *packets, u64 *bytes, u64 *last_use,
+ enum flow_action_hw_stats *used_hw_stats)
{
struct mlxsw_sp_acl_rule_info *rulei;
- u64 current_packets;
- u64 current_bytes;
+ u64 current_packets = 0;
+ u64 current_bytes = 0;
int err;
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
- &current_packets, &current_bytes);
- if (err)
- return err;
-
+ if (rulei->counter_valid) {
+ err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+ &current_packets,
+ &current_bytes);
+ if (err)
+ return err;
+ *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
+ }
*packets = current_packets - rule->last_packets;
*bytes = current_bytes - rule->last_bytes;
*last_use = rule->last_used;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 3a2de13fcb68..dbd3bebf11ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -13,7 +13,7 @@
struct mlxsw_sp_acl_bf {
struct mutex lock; /* Protects Bloom Filter updates. */
unsigned int bank_size;
- refcount_t refcnt[0];
+ refcount_t refcnt[];
};
/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index e993159e8e4c..a6e30e020b5c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -224,7 +224,7 @@ struct mlxsw_sp_acl_tcam_vchunk;
struct mlxsw_sp_acl_tcam_chunk {
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
struct mlxsw_sp_acl_tcam_region *region;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -243,7 +243,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
struct mlxsw_sp_acl_tcam_entry {
struct mlxsw_sp_acl_tcam_ventry *ventry;
struct mlxsw_sp_acl_tcam_chunk *chunk;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
struct mlxsw_sp_acl_tcam_vregion *vregion;
- struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct list_head *pos;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
@@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
- list_add_tail(&vchunk->list, &vregion->vchunk_list);
+
+ /* Position the vchunk inside the list according to priority */
+ list_for_each(pos, &vregion->vchunk_list) {
+ vchunk2 = list_entry(pos, typeof(*vchunk2), list);
+ if (vchunk2->priority > priority)
+ break;
+ }
+ list_add_tail(&vchunk->list, pos);
mutex_unlock(&vregion->lock);
return vchunk;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 5965913565a5..96437992b102 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -20,7 +20,7 @@ struct mlxsw_sp_acl_tcam {
struct mutex lock; /* guards vregion list */
struct list_head vregion_list;
u32 vregion_rehash_intrvl; /* ms */
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -86,7 +86,7 @@ struct mlxsw_sp_acl_tcam_region {
char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
struct mlxsw_afk_key_info *key_info;
struct mlxsw_sp *mlxsw_sp;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 83c2e1e5f216..7974982533b5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -3,92 +3,147 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/spinlock.h>
#include "spectrum_cnt.h"
-#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
-
struct mlxsw_sp_counter_sub_pool {
+ u64 size;
unsigned int base_index;
- unsigned int size;
+ enum mlxsw_res_id entry_size_res_id;
+ const char *resource_name; /* devlink resource name */
+ u64 resource_id; /* devlink resource id */
unsigned int entry_size;
unsigned int bank_count;
+ atomic_t active_entries_count;
};
struct mlxsw_sp_counter_pool {
- unsigned int pool_size;
+ u64 pool_size;
unsigned long *usage; /* Usage bitmap */
- struct mlxsw_sp_counter_sub_pool *sub_pools;
+ spinlock_t counter_pool_lock; /* Protects counter pool allocations */
+ atomic_t active_entries_count;
+ unsigned int sub_pools_count;
+ struct mlxsw_sp_counter_sub_pool sub_pools[];
};
-static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
+ .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
+ .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW,
+ .resource_id = MLXSW_SP_RESOURCE_COUNTERS_FLOW,
.bank_count = 6,
},
[MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
+ .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
+ .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF,
+ .resource_id = MLXSW_SP_RESOURCE_COUNTERS_RIF,
.bank_count = 2,
}
};
-static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
+static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
+{
+ const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
+
+ return atomic_read(&sub_pool->active_entries_count);
+}
+
+static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
{
- unsigned int total_bank_config = 0;
- unsigned int pool_size;
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int base_index = 0;
+ enum mlxsw_res_id res_id;
+ int err;
int i;
- pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
- /* Check config is valid, no bank over subscription */
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
- total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
- if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
- return -EINVAL;
+ for (i = 0; i < pool->sub_pools_count; i++) {
+ sub_pool = &pool->sub_pools[i];
+ res_id = sub_pool->entry_size_res_id;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core, res_id))
+ return -EIO;
+ sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
+ res_id);
+ err = devlink_resource_size_get(devlink,
+ sub_pool->resource_id,
+ &sub_pool->size);
+ if (err)
+ goto err_resource_size_get;
+
+ devlink_resource_occ_get_register(devlink,
+ sub_pool->resource_id,
+ mlxsw_sp_counter_sub_pool_occ_get,
+ sub_pool);
+
+ sub_pool->base_index = base_index;
+ base_index += sub_pool->size;
+ atomic_set(&sub_pool->active_entries_count, 0);
+ }
return 0;
+
+err_resource_size_get:
+ for (i--; i >= 0; i--) {
+ sub_pool = &pool->sub_pools[i];
+
+ devlink_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
+ }
+ return err;
}
-static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_counter_sub_pool *sub_pool;
+ int i;
- /* Prepare generic flow pool*/
- sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
- return -EIO;
- sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- COUNTER_SIZE_PACKETS_BYTES);
- /* Prepare erif pool*/
- sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
- return -EIO;
- sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- COUNTER_SIZE_ROUTER_BASIC);
- return 0;
+ for (i = 0; i < pool->sub_pools_count; i++) {
+ sub_pool = &pool->sub_pools[i];
+
+ WARN_ON(atomic_read(&sub_pool->active_entries_count));
+ devlink_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
+ }
+}
+
+static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
+{
+ const struct mlxsw_sp_counter_pool *pool = priv;
+
+ return atomic_read(&pool->active_entries_count);
}
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_counter_sub_pool *sub_pool;
struct mlxsw_sp_counter_pool *pool;
- unsigned int base_index;
unsigned int map_size;
- int i;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
- return -EIO;
-
- err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
- if (err)
- return err;
-
- err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
- if (err)
- return err;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
+ GFP_KERNEL);
if (!pool)
return -ENOMEM;
+ mlxsw_sp->counter_pool = pool;
+ memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
+ sub_pools_count * sizeof(*sub_pool));
+ pool->sub_pools_count = sub_pools_count;
+ spin_lock_init(&pool->counter_pool_lock);
+ atomic_set(&pool->active_entries_count, 0);
+
+ err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ &pool->pool_size);
+ if (err)
+ goto err_pool_resource_size_get;
+ devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
- pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
pool->usage = kzalloc(map_size, GFP_KERNEL);
@@ -97,26 +152,18 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
goto err_usage_alloc;
}
- pool->sub_pools = mlxsw_sp_counter_sub_pools;
- /* Allocation is based on bank count which should be
- * specified for each sub pool statically.
- */
- base_index = 0;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
- sub_pool = &pool->sub_pools[i];
- sub_pool->size = sub_pool->bank_count *
- MLXSW_SP_COUNTER_POOL_BANK_SIZE;
- sub_pool->base_index = base_index;
- base_index += sub_pool->size;
- /* The last bank can't be fully used */
- if (sub_pool->base_index + sub_pool->size > pool->pool_size)
- sub_pool->size = pool->pool_size - sub_pool->base_index;
- }
+ err = mlxsw_sp_counter_sub_pools_init(mlxsw_sp);
+ if (err)
+ goto err_sub_pools_init;
- mlxsw_sp->counter_pool = pool;
return 0;
+err_sub_pools_init:
+ kfree(pool->usage);
err_usage_alloc:
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
+err_pool_resource_size_get:
kfree(pool);
return err;
}
@@ -124,10 +171,15 @@ err_usage_alloc:
void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
pool->pool_size);
+ WARN_ON(atomic_read(&pool->active_entries_count));
kfree(pool->usage);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
}
@@ -139,25 +191,37 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_counter_sub_pool *sub_pool;
unsigned int entry_index;
unsigned int stop_index;
- int i;
+ int i, err;
- sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ sub_pool = &pool->sub_pools[sub_pool_id];
stop_index = sub_pool->base_index + sub_pool->size;
entry_index = sub_pool->base_index;
+ spin_lock(&pool->counter_pool_lock);
entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
- if (entry_index == stop_index)
- return -ENOBUFS;
+ if (entry_index == stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
/* The sub-pools can contain non-integer number of entries
* so we must check for overflow
*/
- if (entry_index + sub_pool->entry_size > stop_index)
- return -ENOBUFS;
+ if (entry_index + sub_pool->entry_size > stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
for (i = 0; i < sub_pool->entry_size; i++)
__set_bit(entry_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
*p_counter_index = entry_index;
+ atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
+ atomic_add(sub_pool->entry_size, &pool->active_entries_count);
return 0;
+
+err_alloc:
+ spin_unlock(&pool->counter_pool_lock);
+ return err;
}
void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
@@ -170,7 +234,77 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(counter_index >= pool->pool_size))
return;
- sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ sub_pool = &pool->sub_pools[sub_pool_id];
+ spin_lock(&pool->counter_pool_lock);
for (i = 0; i < sub_pool->entry_size; i++)
__clear_bit(counter_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
+ atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
+ atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
+}
+
+int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ static struct devlink_resource_size_params size_params;
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ const struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int total_bank_config;
+ u64 sub_pool_size;
+ u64 base_index;
+ u64 pool_size;
+ u64 bank_size;
+ int err;
+ int i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_POOL_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_BANK_SIZE))
+ return -EIO;
+
+ pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE);
+ bank_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_BANK_SIZE);
+
+ devlink_resource_size_params_init(&size_params, pool_size,
+ pool_size, bank_size,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink,
+ MLXSW_SP_RESOURCE_NAME_COUNTERS,
+ pool_size,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ return err;
+
+ /* Allocation is based on bank count which should be
+ * specified for each sub pool statically.
+ */
+ total_bank_config = 0;
+ base_index = 0;
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+ sub_pool = &mlxsw_sp_counter_sub_pools[i];
+ sub_pool_size = sub_pool->bank_count * bank_size;
+ /* The last bank can't be fully used */
+ if (base_index + sub_pool_size > pool_size)
+ sub_pool_size = pool_size - base_index;
+ base_index += sub_pool_size;
+
+ devlink_resource_size_params_init(&size_params, sub_pool_size,
+ sub_pool_size, bank_size,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink,
+ sub_pool->resource_name,
+ sub_pool_size,
+ sub_pool->resource_id,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ &size_params);
+ if (err)
+ return err;
+ total_bank_config += sub_pool->bank_count;
+ }
+
+ /* Check config is valid, no bank over subscription */
+ if (WARN_ON(total_bank_config > div64_u64(pool_size, bank_size) + 1))
+ return -EINVAL;
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index 81465e267b10..a68d931090dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -4,6 +4,7 @@
#ifndef _MLXSW_SPECTRUM_CNT_H
#define _MLXSW_SPECTRUM_CNT_H
+#include "core.h"
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
@@ -19,5 +20,6 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 2dc0978428e6..daf029931b5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <net/devlink.h>
#include "spectrum.h"
@@ -210,7 +211,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
return err;
rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
i = 0;
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
@@ -241,14 +242,14 @@ start_again:
devlink_dpipe_entry_ctx_close(dump_ctx);
if (i != rif_count)
goto start_again;
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
devlink_dpipe_entry_clear(&entry);
return 0;
err_entry_append:
err_entry_get:
err_ctx_prepare:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
devlink_dpipe_entry_clear(&entry);
return err;
}
@@ -258,7 +259,7 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
struct mlxsw_sp *mlxsw_sp = priv;
int i;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
@@ -271,7 +272,7 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return 0;
}
@@ -546,7 +547,7 @@ mlxsw_sp_dpipe_table_host_entries_get(struct mlxsw_sp *mlxsw_sp,
int i, j;
int err;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
i = 0;
rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
start_again:
@@ -602,12 +603,12 @@ out:
if (i != rif_count)
goto start_again;
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return 0;
err_ctx_prepare:
err_entry_append:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -662,7 +663,7 @@ mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp,
{
int i;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -684,7 +685,7 @@ mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp,
enable);
}
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static int mlxsw_sp_dpipe_table_host4_counters_update(void *priv, bool enable)
@@ -701,7 +702,7 @@ mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type)
u64 size = 0;
int i;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -721,7 +722,7 @@ mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type)
size++;
}
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return size;
}
@@ -1093,7 +1094,7 @@ mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp,
int j;
int err;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
nh_count_max = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
@@ -1130,13 +1131,13 @@ skip:
devlink_dpipe_entry_ctx_close(dump_ctx);
if (nh_count != nh_count_max)
goto start_again;
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return 0;
err_ctx_prepare:
err_entry_append:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -1206,9 +1207,9 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv)
struct mlxsw_sp *mlxsw_sp = priv;
u64 size;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
size = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return size;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 8df3cb21baa6..004c42274e48 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "reg.h"
@@ -24,7 +25,7 @@ struct mlxsw_sp_fid_core {
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
- unsigned int ref_count;
+ refcount_t ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -149,7 +150,7 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
mlxsw_sp_fid_ht_params);
if (fid)
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -183,7 +184,7 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->vni_ht, &vni,
mlxsw_sp_fid_vni_ht_params);
if (fid)
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -437,16 +438,6 @@ static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u16 vid, bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid_index, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
u8 local_port, u16 vid, bool valid)
{
@@ -457,140 +448,6 @@ static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
-static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
-{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
- struct mlxsw_sp_fid_8021q *fid_8021q;
- int err;
-
- err = mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, fid->fid_index, true);
- if (err)
- return err;
-
- fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
- err = mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid,
- true);
- if (err)
- goto err_fid_map;
-
- return 0;
-
-err_fid_map:
- mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
- return err;
-}
-
-static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
-{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
- struct mlxsw_sp_fid_8021q *fid_8021q;
-
- fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
- mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid, false);
- mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
-}
-
-static int mlxsw_sp_fid_8021q_index_alloc(struct mlxsw_sp_fid *fid,
- const void *arg, u16 *p_fid_index)
-{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
- u16 vid = *(u16 *) arg;
-
- /* Use 1:1 mapping for simplicity although not a must */
- if (vid < fid_family->start_index || vid > fid_family->end_index)
- return -EINVAL;
- *p_fid_index = vid;
-
- return 0;
-}
-
-static bool
-mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
-{
- u16 vid = *(u16 *) arg;
-
- return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
-}
-
-static u16 mlxsw_sp_fid_8021q_flood_index(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_index;
-}
-
-static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
- struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
-
- /* In case there are no {Port, VID} => FID mappings on the port,
- * we can use the global VID => FID mapping we created when the
- * FID was configured.
- */
- if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
- return 0;
- return __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port,
- vid, true);
-}
-
-static void
-mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
- struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
-
- if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
- return;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port, vid,
- false);
-}
-
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
- .setup = mlxsw_sp_fid_8021q_setup,
- .configure = mlxsw_sp_fid_8021q_configure,
- .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
- .index_alloc = mlxsw_sp_fid_8021q_index_alloc,
- .compare = mlxsw_sp_fid_8021q_compare,
- .flood_index = mlxsw_sp_fid_8021q_flood_index,
- .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
- .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
-};
-
-static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021q_flood_tables[] = {
- {
- .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
- .table_index = 0,
- },
- {
- .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
- .table_index = 1,
- },
- {
- .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
- .table_index = 2,
- },
-};
-
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_family = {
- .type = MLXSW_SP_FID_TYPE_8021Q,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
- .start_index = 1,
- .end_index = VLAN_VID_MASK,
- .flood_tables = mlxsw_sp_fid_8021q_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021q_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_ops,
-};
-
static struct mlxsw_sp_fid_8021d *
mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
{
@@ -845,6 +702,14 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
.lag_vid_valid = 1,
};
+static bool
+mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
+}
+
static void
mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev)
@@ -1030,7 +895,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(fid, &fid_family->fids_list, list) {
if (!fid->fid_family->ops->compare(fid, arg))
continue;
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -1075,7 +940,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add(&fid->list, &fid_family->fids_list);
- fid->ref_count++;
+ refcount_set(&fid->ref_count, 1);
return fid;
err_rhashtable_insert:
@@ -1093,7 +958,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (--fid->ref_count != 0)
+ if (!refcount_dec_and_test(&fid->ref_count))
return;
list_del(&fid->list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index b607919c8ad0..890b078851c9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -26,11 +26,21 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (!flow_action_has_entries(flow_action))
return 0;
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
- /* Count action is inserted first */
- err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
- if (err)
- return err;
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY ||
+ act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) {
+ /* Count action is inserted first */
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
+ if (err)
+ return err;
+ } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
+ act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -41,12 +51,30 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return err;
}
break;
- case FLOW_ACTION_DROP:
- err = mlxsw_sp_acl_rulei_act_drop(rulei);
+ case FLOW_ACTION_DROP: {
+ bool ingress;
+
+ if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
+ return -EOPNOTSUPP;
+ }
+ ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
+ err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
+ act->cookie, extack);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
}
+
+ /* Forbid block with this rulei to be bound
+ * to ingress/egress in future. Ingress rule is
+ * a blocker for egress and vice versa.
+ */
+ if (ingress)
+ rulei->egress_bind_blocker = 1;
+ else
+ rulei->ingress_bind_blocker = 1;
+ }
break;
case FLOW_ACTION_TRAP:
err = mlxsw_sp_acl_rulei_act_trap(rulei);
@@ -123,9 +151,34 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
u8 prio = act->vlan.prio;
u16 vid = act->vlan.vid;
- return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
- act->id, vid,
- proto, prio, extack);
+ err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+ act->id, vid,
+ proto, prio, extack);
+ if (err)
+ return err;
+ break;
+ }
+ case FLOW_ACTION_PRIORITY:
+ err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
+ act->priority,
+ extack);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_MANGLE: {
+ enum flow_action_mangle_base htype = act->mangle.htype;
+ __be32 be_mask = (__force __be32) act->mangle.mask;
+ __be32 be_val = (__force __be32) act->mangle.val;
+ u32 offset = act->mangle.offset;
+ u32 mask = be32_to_cpu(be_mask);
+ u32 val = be32_to_cpu(be_val);
+
+ err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
+ htype, offset,
+ mask, val, extack);
+ if (err)
+ return err;
+ break;
}
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
@@ -525,6 +578,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct flow_cls_offload *f)
{
+ enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
@@ -543,11 +597,11 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
- &lastuse);
+ &lastuse, &used_hw_stats);
if (err)
goto err_rule_get_stats;
- flow_stats_update(&f->stats, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 1e4cdee7bcd7..20d72f1c0cee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -2,13 +2,15 @@
/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include "spectrum.h"
struct mlxsw_sp_kvdl {
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
- unsigned long priv[0];
+ struct mutex kvdl_lock; /* Protects kvdl allocations */
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -22,6 +24,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
+ mutex_init(&kvdl->kvdl_lock);
kvdl->kvdl_ops = kvdl_ops;
mlxsw_sp->kvdl = kvdl;
@@ -31,6 +34,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_init:
+ mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
return err;
}
@@ -40,6 +44,7 @@ void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
}
@@ -48,9 +53,14 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count, u32 *p_entry_index)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ int err;
+
+ mutex_lock(&kvdl->kvdl_lock);
+ err = kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
- return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
- entry_count, p_entry_index);
+ return err;
}
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
@@ -59,8 +69,10 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ mutex_lock(&kvdl->kvdl_lock);
kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
entry_count, entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
}
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 336e5ecc68f8..47eb751a2570 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+#include <linux/mutex.h>
#include <linux/rhashtable.h>
#include <net/ipv6.h>
@@ -12,6 +13,7 @@ struct mlxsw_sp_mr {
void *catchall_route_priv;
struct delayed_work stats_update_dw;
struct list_head table_list;
+ struct mutex table_list_lock; /* Protects table_list */
#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
unsigned long priv[0];
/* priv has to be always the last item */
@@ -66,9 +68,10 @@ struct mlxsw_sp_mr_table {
u32 vr_id;
struct mlxsw_sp_mr_vif vifs[MAXVIFS];
struct list_head route_list;
+ struct mutex route_list_lock; /* Protects route_list */
struct rhashtable route_ht;
const struct mlxsw_sp_mr_table_ops *ops;
- char catchall_route_priv[0];
+ char catchall_route_priv[];
/* catchall_route_priv has to be always the last item */
};
@@ -370,11 +373,13 @@ static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route)
{
+ WARN_ON_ONCE(!mutex_is_locked(&mr_table->route_list_lock));
+
mlxsw_sp_mr_mfc_offload_set(mr_route, false);
- mlxsw_sp_mr_route_erase(mr_table, mr_route);
rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
mlxsw_sp_mr_route_ht_params);
list_del(&mr_route->node);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
mlxsw_sp_mr_route_destroy(mr_table, mr_route);
}
@@ -415,19 +420,21 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
goto err_duplicate_route;
}
+ /* Write the route to the hardware */
+ err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
+ if (err)
+ goto err_mr_route_write;
+
/* Put it in the table data-structures */
+ mutex_lock(&mr_table->route_list_lock);
list_add_tail(&mr_route->node, &mr_table->route_list);
+ mutex_unlock(&mr_table->route_list_lock);
err = rhashtable_insert_fast(&mr_table->route_ht,
&mr_route->ht_node,
mlxsw_sp_mr_route_ht_params);
if (err)
goto err_rhashtable_insert;
- /* Write the route to the hardware */
- err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
- if (err)
- goto err_mr_route_write;
-
/* Destroy the original route */
if (replace) {
rhashtable_remove_fast(&mr_table->route_ht,
@@ -440,11 +447,12 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
mlxsw_sp_mr_mfc_offload_update(mr_route);
return 0;
-err_mr_route_write:
- rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
- mlxsw_sp_mr_route_ht_params);
err_rhashtable_insert:
+ mutex_lock(&mr_table->route_list_lock);
list_del(&mr_route->node);
+ mutex_unlock(&mr_table->route_list_lock);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
+err_mr_route_write:
err_no_orig_route:
err_duplicate_route:
mlxsw_sp_mr_route_destroy(mr_table, mr_route);
@@ -460,8 +468,11 @@ void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
mr_table->ops->key_create(mr_table, &key, mfc);
mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
mlxsw_sp_mr_route_ht_params);
- if (mr_route)
+ if (mr_route) {
+ mutex_lock(&mr_table->route_list_lock);
__mlxsw_sp_mr_route_del(mr_table, mr_route);
+ mutex_unlock(&mr_table->route_list_lock);
+ }
}
/* Should be called after the VIF struct is updated */
@@ -910,6 +921,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
mr_table->proto = proto;
mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
INIT_LIST_HEAD(&mr_table->route_list);
+ mutex_init(&mr_table->route_list_lock);
err = rhashtable_init(&mr_table->route_ht,
&mlxsw_sp_mr_route_ht_params);
@@ -927,12 +939,15 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
&catchall_route_params);
if (err)
goto err_ops_route_create;
+ mutex_lock(&mr->table_list_lock);
list_add_tail(&mr_table->node, &mr->table_list);
+ mutex_unlock(&mr->table_list_lock);
return mr_table;
err_ops_route_create:
rhashtable_destroy(&mr_table->route_ht);
err_route_rhashtable_init:
+ mutex_destroy(&mr_table->route_list_lock);
kfree(mr_table);
return ERR_PTR(err);
}
@@ -943,10 +958,13 @@ void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
+ mutex_lock(&mr->table_list_lock);
list_del(&mr_table->node);
+ mutex_unlock(&mr->table_list_lock);
mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
&mr_table->catchall_route_priv);
rhashtable_destroy(&mr_table->route_ht);
+ mutex_destroy(&mr_table->route_list_lock);
kfree(mr_table);
}
@@ -955,8 +973,10 @@ void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
struct mlxsw_sp_mr_route *mr_route, *tmp;
int i;
+ mutex_lock(&mr_table->route_list_lock);
list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
__mlxsw_sp_mr_route_del(mr_table, mr_route);
+ mutex_unlock(&mr_table->route_list_lock);
for (i = 0; i < MAXVIFS; i++) {
mr_table->vifs[i].dev = NULL;
@@ -1000,12 +1020,15 @@ static void mlxsw_sp_mr_stats_update(struct work_struct *work)
struct mlxsw_sp_mr_route *mr_route;
unsigned long interval;
- rtnl_lock();
- list_for_each_entry(mr_table, &mr->table_list, node)
+ mutex_lock(&mr->table_list_lock);
+ list_for_each_entry(mr_table, &mr->table_list, node) {
+ mutex_lock(&mr_table->route_list_lock);
list_for_each_entry(mr_route, &mr_table->route_list, node)
mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
mr_route);
- rtnl_unlock();
+ mutex_unlock(&mr_table->route_list_lock);
+ }
+ mutex_unlock(&mr->table_list_lock);
interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
@@ -1024,6 +1047,7 @@ int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops = mr_ops;
mlxsw_sp->mr = mr;
INIT_LIST_HEAD(&mr->table_list);
+ mutex_init(&mr->table_list_lock);
err = mr_ops->init(mlxsw_sp, mr->priv);
if (err)
@@ -1035,6 +1059,7 @@ int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
return 0;
err:
+ mutex_destroy(&mr->table_list_lock);
kfree(mr);
return err;
}
@@ -1045,5 +1070,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mr->stats_update_dw);
mr->mr_ops->fini(mlxsw_sp, mr->priv);
+ mutex_destroy(&mr->table_list_lock);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 346f4a5fe053..221aa6a474eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
int err;
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
- if (!afa_block)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(afa_block))
+ return afa_block;
err = mlxsw_afa_block_append_allocated_counter(afa_block,
counter_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 2153bcc4b585..54d3e7dcd303 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -67,7 +67,7 @@ struct mlxsw_sp_nve_mc_record {
struct mlxsw_sp_nve_mc_list *mc_list;
const struct mlxsw_sp_nve_mc_record_ops *ops;
u32 kvdl_index;
- struct mlxsw_sp_nve_mc_entry entries[0];
+ struct mlxsw_sp_nve_mc_entry entries[];
};
struct mlxsw_sp_nve_mc_list {
@@ -713,27 +713,6 @@ static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
}
-u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp)
-{
- WARN_ON(mlxsw_sp->nve->num_nve_tunnels == 0);
-
- return mlxsw_sp->nve->tunnel_index;
-}
-
-bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
- u32 tb_id, __be32 addr)
-{
- struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
- struct mlxsw_sp_nve_config *config = &nve->config;
-
- if (nve->num_nve_tunnels &&
- config->ul_proto == MLXSW_SP_L3_PROTO_IPV4 &&
- config->ul_sip.addr4 == addr && config->ul_tb_id == tb_id)
- return true;
-
- return false;
-}
-
static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nve_config *config)
{
@@ -744,6 +723,8 @@ static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
if (nve->num_nve_tunnels++ != 0)
return 0;
+ nve->config = *config;
+
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
&nve->tunnel_index);
if (err)
@@ -760,6 +741,7 @@ err_ops_init:
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
nve->tunnel_index);
err_kvdl_alloc:
+ memset(&nve->config, 0, sizeof(nve->config));
nve->num_nve_tunnels--;
return err;
}
@@ -840,8 +822,6 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
goto err_fid_vni_set;
}
- nve->config = config;
-
err = ops->fdb_replay(params->dev, params->vni, extack);
if (err)
goto err_fdb_replay;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 34f7c3501b08..9650562fc0ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -922,6 +922,8 @@ static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
case HWTSTAMP_TX_ONESTEP_SYNC:
case HWTSTAMP_TX_ONESTEP_P2P:
return -ERANGE;
+ default:
+ return -EINVAL;
}
switch (rx_filter) {
@@ -952,6 +954,8 @@ static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_NTP_ALL:
return -ERANGE;
+ default:
+ return -EINVAL;
}
*p_ing_types = ing_types;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 02526c53d4f5..670a43fe2a00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -20,14 +20,17 @@ enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_PRIO,
MLXSW_SP_QDISC_ETS,
MLXSW_SP_QDISC_TBF,
+ MLXSW_SP_QDISC_FIFO,
};
+struct mlxsw_sp_qdisc;
+
struct mlxsw_sp_qdisc_ops {
enum mlxsw_sp_qdisc_type type;
int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params);
- int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
@@ -64,6 +67,25 @@ struct mlxsw_sp_qdisc {
struct mlxsw_sp_qdisc_ops *ops;
};
+struct mlxsw_sp_qdisc_state {
+ struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
+
+ /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
+ * created first. When notifications for these FIFOs arrive, it is not
+ * known what qdisc their parent handle refers to. It could be a
+ * newly-created PRIO that will replace the currently-offloaded one, or
+ * it could be e.g. a RED that will be attached below it.
+ *
+ * As the notifications start to arrive, use them to note what the
+ * future parent handle is, and keep track of which child FIFOs were
+ * seen. Then when the parent is known, retroactively offload those
+ * FIFOs.
+ */
+ u32 future_handle;
+ bool future_fifos[IEEE_8021QAZ_MAX_TCS];
+};
+
static bool
mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
enum mlxsw_sp_qdisc_type type)
@@ -77,36 +99,38 @@ static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
bool root_only)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int tclass, child_index;
if (parent == TC_H_ROOT)
- return mlxsw_sp_port->root_qdisc;
+ return &qdisc_state->root_qdisc;
- if (root_only || !mlxsw_sp_port->root_qdisc ||
- !mlxsw_sp_port->root_qdisc->ops ||
- TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
+ if (root_only || !qdisc_state ||
+ !qdisc_state->root_qdisc.ops ||
+ TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
return NULL;
child_index = TC_H_MIN(parent);
tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
- return &mlxsw_sp_port->tclass_qdiscs[tclass];
+ return &qdisc_state->tclass_qdiscs[tclass];
}
static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
- if (mlxsw_sp_port->root_qdisc->handle == handle)
- return mlxsw_sp_port->root_qdisc;
+ if (qdisc_state->root_qdisc.handle == handle)
+ return &qdisc_state->root_qdisc;
- if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
+ if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
return NULL;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
- return &mlxsw_sp_port->tclass_qdiscs[i];
+ if (qdisc_state->tclass_qdiscs[i].handle == handle)
+ return &qdisc_state->tclass_qdiscs[i];
return NULL;
}
@@ -147,11 +171,15 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
if (err)
goto err_bad_param;
- err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
goto err_config;
- if (mlxsw_sp_qdisc->handle != handle) {
+ /* Check if the Qdisc changed. That includes a situation where an
+ * invisible Qdisc replaces another one, or is being added for the
+ * first time.
+ */
+ if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
mlxsw_sp_qdisc->ops = ops;
if (ops->clean_stats)
ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
@@ -295,7 +323,7 @@ mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
- u32 probability, bool is_ecn)
+ u32 probability, bool is_wred, bool is_ecn)
{
char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
char cwtp_cmd[MLXSW_REG_CWTP_LEN];
@@ -313,7 +341,7 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
- MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
+ MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
}
@@ -347,7 +375,6 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->prio_bitmap,
&stats_base->tx_packets,
&stats_base->tx_bytes);
- red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
@@ -361,7 +388,8 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
if (root_qdisc != mlxsw_sp_qdisc)
root_qdisc->stats_base.backlog -=
@@ -400,7 +428,7 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
@@ -417,8 +445,9 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
+ min, max, prob,
+ !p->is_nodrop, p->is_ecn);
}
static void
@@ -453,22 +482,19 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
- int early_drops, marks, pdrops;
+ int early_drops, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- marks = xstats->ecn - xstats_base->prob_mark;
pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
- res->prob_mark += marks;
xstats_base->pdrop += pdrops;
xstats_base->prob_drop += early_drops;
- xstats_base->prob_mark += marks;
return 0;
}
@@ -486,8 +512,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base = &mlxsw_sp_qdisc->stats_base;
mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
- overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- stats_base->overlimits;
+ overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
stats_ptr->qstats->overlimits += overlimits;
stats_base->overlimits += overlimits;
@@ -564,7 +589,8 @@ static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
if (root_qdisc != mlxsw_sp_qdisc)
root_qdisc->stats_base.backlog -=
@@ -651,7 +677,7 @@ mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
@@ -740,8 +766,121 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
+mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
+ .type = MLXSW_SP_QDISC_FIFO,
+ .check_params = mlxsw_sp_qdisc_fifo_check_params,
+ .replace = mlxsw_sp_qdisc_fifo_replace,
+ .destroy = mlxsw_sp_qdisc_fifo_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int tclass, child_index;
+ u32 parent_handle;
+
+ /* Invisible FIFOs are tracked in future_handle and future_fifos. Make
+ * sure that not more than one qdisc is created for a port at a time.
+ * RTNL is a simple proxy for that.
+ */
+ ASSERT_RTNL();
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
+ parent_handle = TC_H_MAJ(p->parent);
+ if (parent_handle != qdisc_state->future_handle) {
+ /* This notifications is for a different Qdisc than
+ * previously. Wipe the future cache.
+ */
+ memset(qdisc_state->future_fifos, 0,
+ sizeof(qdisc_state->future_fifos));
+ qdisc_state->future_handle = parent_handle;
+ }
+
+ child_index = TC_H_MIN(p->parent);
+ tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
+ if (tclass < IEEE_8021QAZ_MAX_TCS) {
+ if (p->command == TC_FIFO_REPLACE)
+ qdisc_state->future_fifos[tclass] = true;
+ else if (p->command == TC_FIFO_DESTROY)
+ qdisc_state->future_fifos[tclass] = false;
+ }
+ }
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_FIFO_REPLACE) {
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo, NULL);
+ }
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_FIFO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_FIFO_DESTROY:
+ if (p->handle == mlxsw_sp_qdisc->handle)
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ return 0;
+ case TC_FIFO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_FIFO_REPLACE: /* Handled above. */
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
@@ -751,8 +890,8 @@ __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_port->tclass_qdiscs[i]);
- mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
+ &qdisc_state->tclass_qdiscs[i]);
+ qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
}
return 0;
@@ -785,12 +924,13 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
unsigned int nbands,
const unsigned int *quanta,
const unsigned int *weights,
const u8 *priomap)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *child_qdisc;
int tclass, i, band, backlog;
u8 old_priomap;
@@ -798,7 +938,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
@@ -827,28 +967,41 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
child_qdisc);
child_qdisc->stats_base.backlog = backlog;
}
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[tclass]) {
+ err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ if (err)
+ return err;
+ }
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
tclass, 0, false, 0);
}
+
+ qdisc_state->future_handle = TC_H_UNSPEC;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
return 0;
}
static int
-mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
zeroes, zeroes, p->priomap);
}
@@ -880,6 +1033,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *tc_qdisc;
u64 tx_packets = 0;
u64 tx_bytes = 0;
@@ -888,7 +1042,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i];
+ tc_qdisc = &qdisc_state->tclass_qdiscs[i];
mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
&tx_bytes, &tx_packets,
&drops, &backlog);
@@ -946,13 +1100,13 @@ mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_ets_qopt_offload_replace_params *p = params;
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
p->quanta, p->weights, p->priomap);
}
@@ -1014,11 +1168,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u8 band, u32 child_handle)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
if (band < IEEE_8021QAZ_MAX_TCS &&
- mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle)
+ qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
return 0;
if (!child_handle) {
@@ -1037,7 +1192,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
+ &qdisc_state->tclass_qdiscs[tclass_num]);
return -EOPNOTSUPP;
}
@@ -1119,37 +1274,23 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state;
int i;
- mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
- if (!mlxsw_sp_qdisc)
- goto err_root_qdisc_init;
-
- mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
- mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
- mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
+ if (!qdisc_state)
+ return -ENOMEM;
- mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
- sizeof(*mlxsw_sp_qdisc),
- GFP_KERNEL);
- if (!mlxsw_sp_qdisc)
- goto err_tclass_qdiscs_init;
-
- mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
+ qdisc_state->root_qdisc.prio_bitmap = 0xff;
+ qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
+ qdisc_state->tclass_qdiscs[i].tclass_num = i;
+ mlxsw_sp_port->qdisc = qdisc_state;
return 0;
-
-err_tclass_qdiscs_init:
- kfree(mlxsw_sp_port->root_qdisc);
-err_root_qdisc_init:
- return -ENOMEM;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
- kfree(mlxsw_sp_port->tclass_qdiscs);
- kfree(mlxsw_sp_port->root_qdisc);
+ kfree(mlxsw_sp_port->qdisc);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4a77b511ead2..d5bca1be3ef5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -17,6 +17,7 @@
#include <linux/refcount.h>
#include <linux/jhash.h>
#include <linux/net_namespace.h>
+#include <linux/mutex.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -48,39 +49,6 @@ struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
struct mlxsw_sp_rif_ops;
-struct mlxsw_sp_router {
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif **rifs;
- struct mlxsw_sp_vr *vrs;
- struct rhashtable neigh_ht;
- struct rhashtable nexthop_group_ht;
- struct rhashtable nexthop_ht;
- struct list_head nexthop_list;
- struct {
- /* One tree for each protocol: IPv4 and IPv6 */
- struct mlxsw_sp_lpm_tree *proto_trees[2];
- struct mlxsw_sp_lpm_tree *trees;
- unsigned int tree_count;
- } lpm;
- struct {
- struct delayed_work dw;
- unsigned long interval; /* ms */
- } neighs_update;
- struct delayed_work nexthop_probe_dw;
-#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
- struct list_head nexthop_neighs_list;
- struct list_head ipip_list;
- bool aborted;
- struct notifier_block fib_nb;
- struct notifier_block netevent_nb;
- struct notifier_block inetaddr_nb;
- struct notifier_block inet6addr_nb;
- const struct mlxsw_sp_rif_ops **rif_ops_arr;
- const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
- u32 adj_discard_index;
- bool adj_discard_index_valid;
-};
-
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
@@ -145,6 +113,9 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
@@ -760,13 +731,18 @@ int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
u16 *vr_id)
{
struct mlxsw_sp_vr *vr;
+ int err = 0;
+ mutex_lock(&mlxsw_sp->router->lock);
vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
- if (!vr)
- return -ESRCH;
+ if (!vr) {
+ err = -ESRCH;
+ goto out;
+ }
*vr_id = vr->id;
-
- return 0;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
@@ -988,17 +964,23 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
struct ip_tunnel *tun = netdev_priv(ol_dev);
struct net *net = dev_net(ol_dev);
- return __dev_get_by_index(net, tun->parms.link);
+ return dev_get_by_index_rcu(net, tun->parms.link);
}
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
- struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ struct net_device *d;
+ u32 tb_id;
+ rcu_read_lock();
+ d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
- return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
+ tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
- return RT_TABLE_MAIN;
+ tb_id = RT_TABLE_MAIN;
+ rcu_read_unlock();
+
+ return tb_id;
}
static struct mlxsw_sp_rif *
@@ -1230,7 +1212,7 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
saddr_len = 4;
saddr_prefix_len = 32;
break;
- case MLXSW_SP_L3_PROTO_IPV6:
+ default:
WARN_ON(1);
return NULL;
}
@@ -1355,8 +1337,12 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
ipip_list_node);
list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
- struct net_device *ipip_ul_dev =
- __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+
+ rcu_read_lock();
+ ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
return ipip_entry;
@@ -1365,10 +1351,16 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
- return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
+ bool is_ipip_ul;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return is_ipip_ul;
}
static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
@@ -1388,9 +1380,9 @@ static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *ol_dev)
{
+ enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
struct mlxsw_sp_ipip_entry *ipip_entry;
enum mlxsw_sp_l3proto ul_proto;
- enum mlxsw_sp_ipip_type ipipt;
union mlxsw_sp_l3addr saddr;
u32 ul_tb_id;
@@ -1543,13 +1535,17 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif);
/**
- * Update the offload related to an IPIP entry. This always updates decap, and
- * in addition to that it also:
- * @recreate_loopback: recreates the associated loopback RIF
- * @keep_encap: updates next hops that use the tunnel netdevice. This is only
+ * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
+ * @mlxsw_sp: mlxsw_sp.
+ * @ipip_entry: IPIP entry.
+ * @recreate_loopback: Recreates the associated loopback RIF.
+ * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
* relevant when recreate_loopback is true.
- * @update_nexthops: updates next hops, keeping the current loopback RIF. This
+ * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
* is only relevant when recreate_loopback is false.
+ * @extack: extack.
+ *
+ * Return: Non-zero value on failure.
*/
int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1722,9 +1718,12 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
- struct net_device *ipip_ul_dev =
- __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+ rcu_read_lock();
+ ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
}
@@ -1737,35 +1736,41 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
{
struct netdev_notifier_changeupper_info *chup;
struct netlink_ext_ack *extack;
+ int err = 0;
+ mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_REGISTER:
- return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
+ err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
+ break;
case NETDEV_UNREGISTER:
mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
- return 0;
+ break;
case NETDEV_UP:
mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
- return 0;
+ break;
case NETDEV_DOWN:
mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
- return 0;
+ break;
case NETDEV_CHANGEUPPER:
chup = container_of(info, typeof(*chup), info);
extack = info->extack;
if (netif_is_l3_master(chup->upper_dev))
- return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
- ol_dev,
- extack);
- return 0;
+ err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
+ ol_dev,
+ extack);
+ break;
case NETDEV_CHANGE:
extack = info->extack;
- return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
- ol_dev, extack);
+ err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
+ ol_dev, extack);
+ break;
case NETDEV_CHANGEMTU:
- return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
+ err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
+ break;
}
- return 0;
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
static int
@@ -1809,8 +1814,9 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct netdev_notifier_info *info)
{
struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
- int err;
+ int err = 0;
+ mutex_lock(&mlxsw_sp->router->lock);
while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
ul_dev,
ipip_entry))) {
@@ -1823,7 +1829,7 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
ul_dev);
- return err;
+ break;
}
if (demote_this) {
@@ -1840,8 +1846,9 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
ipip_entry = prev;
}
}
+ mutex_unlock(&mlxsw_sp->router->lock);
- return 0;
+ return err;
}
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
@@ -1850,8 +1857,22 @@ int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
u32 tunnel_index)
{
enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
struct mlxsw_sp_fib_entry *fib_entry;
- int err;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ router->nve_decap_config.ul_tb_id = ul_tb_id;
+ router->nve_decap_config.tunnel_index = tunnel_index;
+ router->nve_decap_config.ul_proto = ul_proto;
+ router->nve_decap_config.ul_sip = *ul_sip;
+ router->nve_decap_config.valid = true;
/* It is valid to create a tunnel with a local IP and only later
* assign this IP address to a local interface
@@ -1860,7 +1881,7 @@ int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
ul_proto, ul_sip,
type);
if (!fib_entry)
- return 0;
+ goto out;
fib_entry->decap.tunnel_index = tunnel_index;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
@@ -1869,11 +1890,13 @@ int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
if (err)
goto err_fib_entry_update;
- return 0;
+ goto out;
err_fib_entry_update:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -1882,16 +1905,40 @@ void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
const union mlxsw_sp_l3addr *ul_sip)
{
enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
struct mlxsw_sp_fib_entry *fib_entry;
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (WARN_ON_ONCE(!router->nve_decap_config.valid))
+ goto out;
+
+ router->nve_decap_config.valid = false;
+
fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
ul_proto, ul_sip,
type);
if (!fib_entry)
- return;
+ goto out;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
+ u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+
+ return router->nve_decap_config.valid &&
+ router->nve_decap_config.ul_tb_id == ul_tb_id &&
+ router->nve_decap_config.ul_proto == ul_proto &&
+ !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
+ sizeof(*ul_sip));
}
struct mlxsw_sp_neigh_key {
@@ -2264,10 +2311,8 @@ __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
int i, num_rec;
int err;
- /* Make sure the neighbour's netdev isn't removed in the
- * process.
- */
- rtnl_lock();
+ /* Ensure the RIF we read from the device does not change mid-dump. */
+ mutex_lock(&mlxsw_sp->router->lock);
do {
mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
@@ -2281,7 +2326,7 @@ __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
i);
} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -2312,15 +2357,14 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- /* Take RTNL mutex here to prevent lists from changes */
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
nexthop_neighs_list_node)
/* If this neigh have nexthops, make the kernel think this neigh
* is active regardless of the traffic.
*/
neigh_event_send(neigh_entry->key.n, NULL);
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static void
@@ -2360,15 +2404,13 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
* the nexthop wouldn't get offloaded until the neighbor is resolved
* but it wouldn't get resolved ever in case traffic is flowing in HW
* using different nexthop.
- *
- * Take RTNL mutex here to prevent lists from changes.
*/
- rtnl_lock();
+ mutex_lock(&router->lock);
list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
nexthop_neighs_list_node)
if (!neigh_entry->connected)
neigh_event_send(neigh_entry->key.n, NULL);
- rtnl_unlock();
+ mutex_unlock(&router->lock);
mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
@@ -2506,7 +2548,7 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
dead = n->dead;
read_unlock_bh(&n->lock);
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
entry_connected = nud_state & NUD_VALID && !dead;
@@ -2528,7 +2570,7 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
out:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
neigh_release(n);
kfree(net_work);
}
@@ -3189,7 +3231,6 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
u32 adj_index = nh_grp->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
int i;
- int err;
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
@@ -3200,6 +3241,8 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
}
if (nh->update || reallocate) {
+ int err = 0;
+
switch (nh->type) {
case MLXSW_SP_NEXTHOP_TYPE_ETH:
err = mlxsw_sp_nexthop_update
@@ -3711,9 +3754,15 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
{
- struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ struct net_device *ul_dev;
+ bool is_up;
+
+ rcu_read_lock();
+ ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
+ rcu_read_unlock();
- return ul_dev ? (ul_dev->flags & IFF_UP) : true;
+ return is_up;
}
static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
@@ -3840,10 +3889,14 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (!dev)
return 0;
- in_dev = __in_dev_get_rtnl(dev);
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
+ fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
+ rcu_read_unlock();
return 0;
+ }
+ rcu_read_unlock();
err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
if (err)
@@ -4473,6 +4526,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
{
struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
struct mlxsw_sp_ipip_entry *ipip_entry;
struct fib_info *fi = fen_info->fi;
@@ -4487,12 +4541,13 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry,
ipip_entry);
}
- if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id,
- dip.addr4)) {
- u32 t_index;
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV4,
+ &dip)) {
+ u32 tunnel_index;
- t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp);
- fib_entry->decap.tunnel_index = t_index;
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
return 0;
}
@@ -5923,8 +5978,7 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- /* Protect internal structures from changes */
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
@@ -5946,7 +6000,7 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
kfree(fib_work);
}
@@ -5957,7 +6011,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
@@ -5984,7 +6038,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
break;
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
kfree(fib_work);
}
@@ -5997,6 +6051,7 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
int err;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD:
@@ -6025,6 +6080,7 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
dev_put(fib_work->ven_info.dev);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
kfree(fib_work);
}
@@ -6233,7 +6289,7 @@ err_fib_event:
return NOTIFY_BAD;
}
-struct mlxsw_sp_rif *
+static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
@@ -6247,6 +6303,41 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return rif;
+}
+
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+ u16 vid = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ goto out;
+
+ /* We only return the VID for VLAN RIFs. Otherwise we return an
+ * invalid value (0).
+ */
+ if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
+ goto out;
+
+ vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return vid;
+}
+
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -6281,7 +6372,8 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
case NETDEV_UP:
return rif == NULL;
case NETDEV_DOWN:
- idev = __in_dev_get_rtnl(dev);
+ rcu_read_lock();
+ idev = __in_dev_get_rcu(dev);
if (idev && idev->ifa_list)
addr_list_empty = false;
@@ -6289,6 +6381,7 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
if (addr_list_empty && inet6_dev &&
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
+ rcu_read_unlock();
/* macvlans do not have a RIF, but rather piggy back on the
* RIF of their lower device.
@@ -6411,11 +6504,6 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
-{
- return rif->fid;
-}
-
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -6528,10 +6616,13 @@ void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_rif *rif;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
- return;
+ goto out;
mlxsw_sp_rif_destroy(rif);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static void
@@ -6631,8 +6722,8 @@ err_fid_port_vid_map:
return err;
}
-void
-mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+static void
+__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
@@ -6650,6 +6741,16 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_rif_subport_put(rif);
}
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
struct net_device *port_dev,
unsigned long event, u16 vid,
@@ -6667,7 +6768,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
l3_dev, extack);
case NETDEV_DOWN:
- mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
}
@@ -6848,8 +6949,8 @@ err_rif_vrrp_add:
return err;
}
-void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *macvlan_dev)
+static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
{
struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
struct mlxsw_sp_rif *rif;
@@ -6866,6 +6967,14 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_index(rif->fid), false);
}
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ mutex_lock(&mlxsw_sp->router->lock);
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *macvlan_dev,
unsigned long event,
@@ -6875,7 +6984,7 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
case NETDEV_UP:
return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
case NETDEV_DOWN:
- mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
break;
}
@@ -6945,15 +7054,17 @@ static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
if (event == NETDEV_UP)
- goto out;
+ return NOTIFY_DONE;
router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
+ mutex_lock(&router->lock);
rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
out:
+ mutex_unlock(&router->lock);
return notifier_from_errno(err);
}
@@ -6968,8 +7079,9 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
- goto out;
+ return NOTIFY_DONE;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
@@ -6981,6 +7093,7 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
out:
+ mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
}
@@ -7001,6 +7114,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
struct mlxsw_sp_rif *rif;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
@@ -7008,6 +7122,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
out:
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
dev_put(dev);
kfree(inet6addr_work);
@@ -7052,8 +7167,9 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
- goto out;
+ return NOTIFY_DONE;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
@@ -7065,6 +7181,7 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
out:
+ mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
}
@@ -7151,24 +7268,30 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
+ int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
return 0;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
- return 0;
+ goto out;
switch (event) {
case NETDEV_CHANGEMTU: /* fall through */
case NETDEV_CHANGEADDR:
- return mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ break;
case NETDEV_PRE_CHANGEADDR:
- return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ break;
}
- return 0;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
@@ -7211,9 +7334,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
+ mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_PRECHANGEUPPER:
- return 0;
+ break;
case NETDEV_CHANGEUPPER:
if (info->linking) {
struct netlink_ext_ack *extack;
@@ -7225,6 +7349,7 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
}
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -7351,13 +7476,14 @@ u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
-static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
int err;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
+ true);
if (err)
return err;
@@ -7386,13 +7512,13 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
return err;
}
-static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
{
- u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
@@ -7404,10 +7530,41 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
}
static struct mlxsw_sp_fid *
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+}
+
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *dev;
+
+ dev = br_fdb_find_port(rif->dev, mac, 0);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
+ .type = MLXSW_SP_RIF_TYPE_FID,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_fid_fid_get,
+ .fdb_del = mlxsw_sp_rif_fid_fdb_del,
+};
+
+static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
@@ -7428,7 +7585,7 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
}
}
- return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack);
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
}
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -7449,103 +7606,6 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
- .type = MLXSW_SP_RIF_TYPE_VLAN,
- .rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_vlan_configure,
- .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
- .fid_get = mlxsw_sp_rif_vlan_fid_get,
- .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
-};
-
-static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
-{
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- u16 fid_index = mlxsw_sp_fid_index(rif->fid);
- int err;
-
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
- if (err)
- return err;
-
- err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
- mlxsw_sp_router_port(mlxsw_sp), true);
- if (err)
- goto err_fid_mc_flood_set;
-
- err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
- mlxsw_sp_router_port(mlxsw_sp), true);
- if (err)
- goto err_fid_bc_flood_set;
-
- err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
- mlxsw_sp_fid_index(rif->fid), true);
- if (err)
- goto err_rif_fdb_op;
-
- mlxsw_sp_fid_rif_set(rif->fid, rif);
- return 0;
-
-err_rif_fdb_op:
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
- mlxsw_sp_router_port(mlxsw_sp), false);
-err_fid_bc_flood_set:
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
- mlxsw_sp_router_port(mlxsw_sp), false);
-err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
- return err;
-}
-
-static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
-{
- u16 fid_index = mlxsw_sp_fid_index(rif->fid);
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- struct mlxsw_sp_fid *fid = rif->fid;
-
- mlxsw_sp_fid_rif_set(fid, NULL);
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
- mlxsw_sp_fid_index(fid), false);
- mlxsw_sp_rif_macvlan_flush(rif);
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
- mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
- mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-}
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
- struct netlink_ext_ack *extack)
-{
- return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack);
-}
-
-static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
-{
- struct switchdev_notifier_fdb_info info;
- struct net_device *dev;
-
- dev = br_fdb_find_port(rif->dev, mac, 0);
- if (!dev)
- return;
-
- info.addr = mac;
- info.vid = 0;
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
- NULL);
-}
-
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
- .type = MLXSW_SP_RIF_TYPE_FID,
- .rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
- .fid_get = mlxsw_sp_rif_fid_fid_get,
- .fdb_del = mlxsw_sp_rif_fid_fdb_del,
-};
-
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
@@ -7733,28 +7793,32 @@ int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
u16 *ul_rif_index)
{
struct mlxsw_sp_rif *ul_rif;
+ int err = 0;
- ASSERT_RTNL();
-
+ mutex_lock(&mlxsw_sp->router->lock);
ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
- if (IS_ERR(ul_rif))
- return PTR_ERR(ul_rif);
+ if (IS_ERR(ul_rif)) {
+ err = PTR_ERR(ul_rif);
+ goto out;
+ }
*ul_rif_index = ul_rif->rif_index;
-
- return 0;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
{
struct mlxsw_sp_rif *ul_rif;
- ASSERT_RTNL();
-
+ mutex_lock(&mlxsw_sp->router->lock);
ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
if (WARN_ON(!ul_rif))
- return;
+ goto out;
mlxsw_sp_ul_rif_put(ul_rif);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static int
@@ -8004,6 +8068,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
if (!router)
return -ENOMEM;
+ mutex_init(&router->lock);
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
@@ -8107,6 +8172,7 @@ err_router_init:
err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
+ mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
}
@@ -8127,5 +8193,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
__mlxsw_sp_router_fini(mlxsw_sp);
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index c9b94f435cdd..8418dc3ae967 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -7,6 +7,49 @@
#include "spectrum.h"
#include "reg.h"
+struct mlxsw_sp_router_nve_decap {
+ u32 ul_tb_id;
+ u32 tunnel_index;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr ul_sip;
+ u8 valid:1;
+};
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif **rifs;
+ struct mlxsw_sp_vr *vrs;
+ struct rhashtable neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable nexthop_ht;
+ struct list_head nexthop_list;
+ struct {
+ /* One tree for each protocol: IPv4 and IPv6 */
+ struct mlxsw_sp_lpm_tree *proto_trees[2];
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_neighs_list;
+ struct list_head ipip_list;
+ bool aborted;
+ struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inet6addr_nb;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
+ const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
+ u32 adj_discard_index;
+ bool adj_discard_index_valid;
+ struct mlxsw_sp_router_nve_decap nve_decap_config;
+ struct mutex lock; /* Protects shared router resources */
+};
+
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 0cdd7954a085..9fb2e9d93929 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -3,6 +3,8 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/gre.h>
#include <net/lag.h>
@@ -14,38 +16,43 @@
#include "spectrum_span.h"
#include "spectrum_switchdev.h"
+struct mlxsw_sp_span {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+ atomic_t active_entries_count;
+ int entries_count;
+ struct mlxsw_sp_span_entry entries[0];
+};
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work);
+
static u64 mlxsw_sp_span_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
- u64 occ = 0;
- int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (mlxsw_sp->span.entries[i].ref_count)
- occ++;
- }
-
- return occ;
+ return atomic_read(&mlxsw_sp->span->active_entries_count);
}
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- int i;
+ struct mlxsw_sp_span *span;
+ int i, entries_count;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
return -EIO;
- mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_SPAN);
- mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
- sizeof(struct mlxsw_sp_span_entry),
- GFP_KERNEL);
- if (!mlxsw_sp->span.entries)
+ entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
+ span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
+ if (!span)
return -ENOMEM;
+ span->entries_count = entries_count;
+ atomic_set(&span->active_entries_count, 0);
+ span->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp->span = span;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
INIT_LIST_HEAD(&curr->bound_ports_list);
curr->id = i;
@@ -53,6 +60,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
+ INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
}
@@ -62,14 +70,15 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
}
- kfree(mlxsw_sp->span.entries);
+ kfree(mlxsw_sp->span);
}
static int
@@ -645,15 +654,16 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
int i;
/* find a free entry to use */
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (!mlxsw_sp->span.entries[i].ref_count) {
- span_entry = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ if (!mlxsw_sp->span->entries[i].ref_count) {
+ span_entry = &mlxsw_sp->span->entries[i];
break;
}
}
if (!span_entry)
return NULL;
+ atomic_inc(&mlxsw_sp->span->active_entries_count);
span_entry->ops = ops;
span_entry->ref_count = 1;
span_entry->to_dev = to_dev;
@@ -662,9 +672,11 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
return span_entry;
}
-static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
{
mlxsw_sp_span_entry_deconfigure(span_entry);
+ atomic_dec(&mlxsw_sp->span->active_entries_count);
}
struct mlxsw_sp_span_entry *
@@ -673,8 +685,8 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->to_dev == to_dev)
return curr;
@@ -694,8 +706,8 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
{
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->id == span_id)
return curr;
@@ -726,7 +738,7 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
{
WARN_ON(!span_entry->ref_count);
if (--span_entry->ref_count == 0)
- mlxsw_sp_span_entry_destroy(span_entry);
+ mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
}
@@ -736,8 +748,8 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
struct mlxsw_sp_span_inspected_port *p;
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
list_for_each_entry(p, &curr->bound_ports_list, list)
if (p->local_port == port->local_port &&
@@ -842,9 +854,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
* so if a binding is requested, check for conflicts.
*/
if (bind)
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr =
- &mlxsw_sp->span.entries[i];
+ &mlxsw_sp->span->entries[i];
if (mlxsw_sp_span_entry_bound_port_find(curr, type,
port, bind))
@@ -988,14 +1000,18 @@ void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
-void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
- int i;
- int err;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp *mlxsw_sp;
+ int i, err;
- ASSERT_RTNL();
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ span = container_of(work, struct mlxsw_sp_span, work);
+ mlxsw_sp = span->mlxsw_sp;
+
+ rtnl_lock();
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
struct mlxsw_sp_span_parms sparms = {NULL};
if (!curr->ref_count)
@@ -1010,4 +1026,12 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
}
}
+ rtnl_unlock();
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+ if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ return;
+ mlxsw_core_schedule_work(&mlxsw_sp->span->work);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a3af171c6358..a26162b08b7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -153,16 +153,64 @@ static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp);
}
+static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev, *stop_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
+ br_dev, dev, 0,
+ extack);
+ if (err) {
+ stop_dev = dev;
+ goto err_vxlan_join;
+ }
+ }
+ }
+
+ return 0;
+
+err_vxlan_join:
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ if (stop_dev == dev)
+ break;
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+ }
+ return err;
+}
+
+static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev))
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct device *dev = bridge->mlxsw_sp->bus_info->dev;
struct mlxsw_sp_bridge_device *bridge_device;
bool vlan_enabled = br_vlan_enabled(br_dev);
+ int err;
if (vlan_enabled && bridge->vlan_enabled_exists) {
dev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
return ERR_PTR(-EINVAL);
}
@@ -184,13 +232,29 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
INIT_LIST_HEAD(&bridge_device->mids_list);
list_add(&bridge_device->list, &bridge->bridges_list);
+ /* It is possible we already have VXLAN devices enslaved to the bridge.
+ * In which case, we need to replay their configuration as if they were
+ * just now enslaved to the bridge.
+ */
+ err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
+ if (err)
+ goto err_vxlan_init;
+
return bridge_device;
+
+err_vxlan_init:
+ list_del(&bridge_device->list);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ kfree(bridge_device);
+ return ERR_PTR(err);
}
static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
+ mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
bridge_device->dev);
list_del(&bridge_device->list);
@@ -203,7 +267,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_bridge_device *bridge_device;
@@ -211,7 +276,7 @@ mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
if (bridge_device)
return bridge_device;
- return mlxsw_sp_bridge_device_create(bridge, br_dev);
+ return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
}
static void
@@ -292,7 +357,8 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
- struct net_device *brport_dev)
+ struct net_device *brport_dev,
+ struct netlink_ext_ack *extack)
{
struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
struct mlxsw_sp_bridge_device *bridge_device;
@@ -305,7 +371,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
return bridge_port;
}
- bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
+ bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
if (IS_ERR(bridge_device))
return ERR_CAST(bridge_device);
@@ -1000,7 +1066,7 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
&bridge_vlan->port_vlan_list);
mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
- bridge_port->dev);
+ bridge_port->dev, extack);
mlxsw_sp_port_vlan->bridge_port = bridge_port;
return 0;
@@ -1107,16 +1173,12 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
const struct switchdev_obj_port_vlan *vlan)
{
- struct mlxsw_sp_rif *rif;
- struct mlxsw_sp_fid *fid;
u16 pvid;
u16 vid;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
- if (!rif)
+ pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
+ if (!pvid)
return 0;
- fid = mlxsw_sp_rif_fid(rif);
- pvid = mlxsw_sp_fid_8021q_vid(fid);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
@@ -1712,36 +1774,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-struct mlxsw_sp_span_respin_work {
- struct work_struct work;
- struct mlxsw_sp *mlxsw_sp;
-};
-
-static void mlxsw_sp_span_respin_work(struct work_struct *work)
-{
- struct mlxsw_sp_span_respin_work *respin_work =
- container_of(work, struct mlxsw_sp_span_respin_work, work);
-
- rtnl_lock();
- mlxsw_sp_span_respin(respin_work->mlxsw_sp);
- rtnl_unlock();
- kfree(respin_work);
-}
-
-static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_span_respin_work *respin_work;
-
- respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
- if (!respin_work)
- return;
-
- INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
- respin_work->mlxsw_sp = mlxsw_sp;
-
- mlxsw_core_schedule_work(&respin_work->work);
-}
-
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans,
@@ -1763,7 +1795,7 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
* call for later, so that the respin logic sees the
* updated bridge state.
*/
- mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
}
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
@@ -1916,7 +1948,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
break;
}
- mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
@@ -1990,12 +2022,11 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
return err;
}
- /* If no other port is member in the VLAN, then the FID does not exist.
- * NVE will be enabled on the FID once a port joins the VLAN
- */
- fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
- if (!fid)
- return 0;
+ fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
+ return PTR_ERR(fid);
+ }
if (mlxsw_sp_fid_vni_is_set(fid)) {
NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
@@ -2007,11 +2038,6 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
if (err)
goto err_nve_fid_enable;
- /* The tunnel port does not hold a reference on the FID. Only
- * local ports and the router port
- */
- mlxsw_sp_fid_put(fid);
-
return 0;
err_nve_fid_enable:
@@ -2048,38 +2074,8 @@ mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct net_device *vxlan_dev;
- struct mlxsw_sp_fid *fid;
- int err;
-
- fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
- if (IS_ERR(fid))
- return fid;
-
- if (mlxsw_sp_fid_vni_is_set(fid))
- return fid;
-
- /* Find the VxLAN device that has the specified VLAN configured as
- * PVID and egress untagged. There can be at most one such device
- */
- vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
- vid);
- if (!vxlan_dev)
- return fid;
-
- if (!netif_running(vxlan_dev))
- return fid;
-
- err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
- if (err)
- goto err_vxlan_join;
-
- return fid;
-err_vxlan_join:
- mlxsw_sp_fid_put(fid);
- return ERR_PTR(err);
+ return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
}
static struct mlxsw_sp_fid *
@@ -2184,9 +2180,9 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_fid *fid;
int err;
- fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
- if (!fid) {
- NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
+ fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
return -EINVAL;
}
@@ -2200,11 +2196,6 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
if (err)
goto err_nve_fid_enable;
- /* The tunnel port does not hold a reference on the FID. Only
- * local ports and the router port
- */
- mlxsw_sp_fid_put(fid);
-
return 0;
err_nve_fid_enable:
@@ -2218,34 +2209,8 @@ mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct net_device *vxlan_dev;
- struct mlxsw_sp_fid *fid;
- int err;
- fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
- if (IS_ERR(fid))
- return fid;
-
- if (mlxsw_sp_fid_vni_is_set(fid))
- return fid;
-
- vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
- if (!vxlan_dev)
- return fid;
-
- if (!netif_running(vxlan_dev))
- return fid;
-
- err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
- extack);
- if (err)
- goto err_vxlan_join;
-
- return fid;
-
-err_vxlan_join:
- mlxsw_sp_fid_put(fid);
- return ERR_PTR(err);
+ return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
}
static struct mlxsw_sp_fid *
@@ -2287,7 +2252,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
+ bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
+ extack);
if (IS_ERR(bridge_port))
return PTR_ERR(bridge_port);
bridge_device = bridge_port->bridge_device;
@@ -2351,21 +2317,11 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+ /* Drop both the reference we just took during lookup and the reference
+ * the VXLAN device took.
+ */
+ mlxsw_sp_fid_put(fid);
mlxsw_sp_fid_put(fid);
-}
-
-struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return ERR_PTR(-EINVAL);
-
- return bridge_device->ops->fid_get(bridge_device, vid, extack);
}
static void
@@ -2718,19 +2674,24 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
+ bool no_delay)
{
struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
- msecs_to_jiffies(bridge->fdb_notify.interval));
+ msecs_to_jiffies(interval));
}
+#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
+
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
{
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp *mlxsw_sp;
char *sfn_pl;
+ int queries;
u8 num_rec;
int i;
int err;
@@ -2743,20 +2704,26 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = bridge->mlxsw_sp;
rtnl_lock();
- mlxsw_reg_sfn_pack(sfn_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
- if (err) {
- dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
- goto out;
+ queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
+ while (queries > 0) {
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ goto out;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+ if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
+ goto out;
+ queries--;
}
- num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
- for (i = 0; i < num_rec; i++)
- mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
out:
rtnl_unlock();
kfree(sfn_pl);
- mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
}
struct mlxsw_sp_switchdev_event_work {
@@ -3502,7 +3469,7 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
- mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
return 0;
err_register_switchdev_blocking_notifier:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 60205aa3f6a5..fbf714d027d8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/netlink.h>
#include <net/devlink.h>
#include <uapi/linux/devlink.h>
#include "core.h"
#include "reg.h"
#include "spectrum.h"
+#include "spectrum_trap.h"
/* All driver-specific traps must be documented in
* Documentation/networking/devlink/mlxsw.rst
@@ -25,36 +28,166 @@ enum {
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ return 0;
+}
+
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
- void *priv);
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ u32 cookie_index = mlxsw_skb_cb(skb)->cookie_index;
+ const struct flow_action_cookie *fa_cookie;
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ rcu_read_lock();
+ fa_cookie = mlxsw_sp_acl_act_cookie_lookup(mlxsw_sp, cookie_index);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, fa_cookie);
+ rcu_read_unlock();
+ consume_skb(skb);
+}
+
static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx);
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
+ skb_pull(skb, ETH_HLEN);
+ skb->offload_fwd_mark = 1;
+ netif_receive_skb(skb);
+}
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_DROP_EXT(_id, _group_id, _metadata) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA | (_metadata))
+
#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
DEVLINK_MLXSW_TRAP_NAME_##_id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
- MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
- false, SP_##_group_id, DISCARD)
+ MLXSW_RXL_DIS(mlxsw_sp_rx_drop_listener, DISCARD_##_id, \
+ TRAP_EXCEPTION_TO_CPU, false, SP_##_group_id, \
+ SET_FW_DEFAULT, SP_##_group_id)
+
+#define MLXSW_SP_RXL_ACL_DISCARD(_id, _en_group_id, _dis_group_id) \
+ MLXSW_RXL_DIS(mlxsw_sp_rx_acl_drop_listener, DISCARD_##_id, \
+ TRAP_EXCEPTION_TO_CPU, false, SP_##_en_group_id, \
+ SET_FW_DEFAULT, SP_##_dis_group_id)
#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \
- _action, false, SP_##_group_id, DISCARD)
+ _action, false, SP_##_group_id, SET_FW_DEFAULT)
+
+#define MLXSW_SP_TRAP_POLICER(_id, _rate, _burst) \
+ DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
+ MLXSW_REG_QPCR_HIGHEST_CIR, \
+ MLXSW_REG_QPCR_LOWEST_CIR, \
+ 1 << MLXSW_REG_QPCR_HIGHEST_CBS, \
+ 1 << MLXSW_REG_QPCR_LOWEST_CBS)
+
+/* Ordered by policer identifier */
+static const struct devlink_trap_policer mlxsw_sp_trap_policers_arr[] = {
+ MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+};
-static struct devlink_trap mlxsw_sp_traps_arr[] = {
+static const struct devlink_trap_group mlxsw_sp_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1),
+};
+
+static const struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
@@ -83,9 +216,13 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
+ MLXSW_SP_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ MLXSW_SP_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
};
-static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
+static const struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
@@ -103,34 +240,37 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(MTUERROR, ROUTER_EXP, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(TTLERROR, ROUTER_EXP, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RPF, RPF, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, REMOTE_ROUTE, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, HOST_MISS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, HOST_MISS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, REMOTE_ROUTE,
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RPF, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, ROUTER_EXP,
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, L3_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, L3_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, ROUTER_EXP, TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, ROUTER_EXP,
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, TUNNEL_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
+ MLXSW_SP_RXL_ACL_DISCARD(INGRESS_ACL, ACL_DISCARDS, DUMMY),
+ MLXSW_SP_RXL_ACL_DISCARD(EGRESS_ACL, ACL_DISCARDS, DUMMY),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
* be mapped to the same devlink trap. Order is according to
* 'mlxsw_sp_listeners_arr'.
*/
-static u16 mlxsw_sp_listener_devlink_map[] = {
+static const u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
@@ -164,99 +304,168 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP,
+ DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP,
};
-static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+#define MLXSW_SP_THIN_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
+
+static struct mlxsw_sp_trap_policer_item *
+mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
{
- struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
- if (unlikely(!mlxsw_sp_port)) {
- dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
- local_port);
- kfree_skb(skb);
- return -EINVAL;
+ list_for_each_entry(policer_item, &trap->policer_item_list, list) {
+ if (policer_item->id == id)
+ return policer_item;
}
- skb->dev = mlxsw_sp_port->dev;
+ return NULL;
+}
- pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
- u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += skb->len;
- u64_stats_update_end(&pcpu_stats->syncp);
+static int mlxsw_sp_trap_cpu_policers_set(struct mlxsw_sp *mlxsw_sp)
+{
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
- skb->protocol = eth_type_trans(skb, skb->dev);
+ /* The purpose of "thin" policer is to drop as many packets
+ * as possible. The dummy group is using it.
+ */
+ __set_bit(MLXSW_SP_THIN_POLICER_ID, mlxsw_sp->trap->policers_usage);
+ mlxsw_reg_qpcr_pack(qpcr_pl, MLXSW_SP_THIN_POLICER_ID,
+ MLXSW_REG_QPCR_IR_UNITS_M, false, 1, 4);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
- return 0;
+static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
+ MLXSW_SP_THIN_POLICER_ID, 0, 1);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
}
-static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx)
+static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink_port *in_devlink_port;
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
- struct devlink *devlink;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ u64 free_policers = 0;
+ u32 last_id = 0;
+ int err, i;
- mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ for_each_clear_bit(i, trap->policers_usage, trap->max_policers)
+ free_policers++;
- if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
- return;
+ if (ARRAY_SIZE(mlxsw_sp_trap_policers_arr) > free_policers) {
+ dev_err(mlxsw_sp->bus_info->dev, "Exceeded number of supported packet trap policers\n");
+ return -ENOBUFS;
+ }
- devlink = priv_to_devlink(mlxsw_sp->core);
- in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
- local_port);
- skb_push(skb, ETH_HLEN);
- devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
- consume_skb(skb);
-}
+ trap->policers_arr = kcalloc(free_policers,
+ sizeof(struct devlink_trap_policer),
+ GFP_KERNEL);
+ if (!trap->policers_arr)
+ return -ENOMEM;
+
+ trap->policers_count = free_policers;
+
+ for (i = 0; i < free_policers; i++) {
+ const struct devlink_trap_policer *policer;
+
+ if (i < ARRAY_SIZE(mlxsw_sp_trap_policers_arr)) {
+ policer = &mlxsw_sp_trap_policers_arr[i];
+ trap->policers_arr[i] = *policer;
+ last_id = policer->id;
+ } else {
+ /* Use parameters set for first policer and override
+ * relevant ones.
+ */
+ policer = &mlxsw_sp_trap_policers_arr[0];
+ trap->policers_arr[i] = *policer;
+ trap->policers_arr[i].id = ++last_id;
+ trap->policers_arr[i].init_rate = 1;
+ trap->policers_arr[i].init_burst = 16;
+ }
+ }
-static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx)
-{
- struct devlink_port *in_devlink_port;
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
- struct devlink *devlink;
+ INIT_LIST_HEAD(&trap->policer_item_list);
- mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ err = devlink_trap_policers_register(devlink, trap->policers_arr,
+ trap->policers_count);
+ if (err)
+ goto err_trap_policers_register;
- if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
- return;
+ return 0;
- devlink = priv_to_devlink(mlxsw_sp->core);
- in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
- local_port);
- skb_push(skb, ETH_HLEN);
- devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
- skb_pull(skb, ETH_HLEN);
- skb->offload_fwd_mark = 1;
- netif_receive_skb(skb);
+err_trap_policers_register:
+ kfree(trap->policers_arr);
+ return err;
+}
+
+static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+
+ devlink_trap_policers_unregister(devlink, trap->policers_arr,
+ trap->policers_count);
+ WARN_ON(!list_empty(&trap->policer_item_list));
+ kfree(trap->policers_arr);
}
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
{
+ size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ err = mlxsw_sp_trap_cpu_policers_set(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_trap_dummy_group_init(mlxsw_sp);
+ if (err)
+ return err;
if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) !=
ARRAY_SIZE(mlxsw_sp_listeners_arr)))
return -EINVAL;
- return devlink_traps_register(devlink, mlxsw_sp_traps_arr,
- ARRAY_SIZE(mlxsw_sp_traps_arr),
- mlxsw_sp);
+ err = mlxsw_sp_trap_policers_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = devlink_trap_groups_register(devlink, mlxsw_sp_trap_groups_arr,
+ groups_count);
+ if (err)
+ goto err_trap_groups_register;
+
+ err = devlink_traps_register(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr), mlxsw_sp);
+ if (err)
+ goto err_traps_register;
+
+ return 0;
+
+err_traps_register:
+ devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
+ groups_count);
+err_trap_groups_register:
+ mlxsw_sp_trap_policers_fini(mlxsw_sp);
+ return err;
}
void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
+ size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
devlink_traps_unregister(devlink, mlxsw_sp_traps_arr,
ARRAY_SIZE(mlxsw_sp_traps_arr));
+ devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
+ groups_count);
+ mlxsw_sp_trap_policers_fini(mlxsw_sp);
}
int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
@@ -265,7 +474,7 @@ int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
- struct mlxsw_listener *listener;
+ const struct mlxsw_listener *listener;
int err;
if (mlxsw_sp_listener_devlink_map[i] != trap->id)
@@ -286,7 +495,7 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
- struct mlxsw_listener *listener;
+ const struct mlxsw_listener *listener;
if (mlxsw_sp_listener_devlink_map[i] != trap->id)
continue;
@@ -303,27 +512,24 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
- enum mlxsw_reg_hpkt_action hw_action;
- struct mlxsw_listener *listener;
+ const struct mlxsw_listener *listener;
+ bool enabled;
int err;
if (mlxsw_sp_listener_devlink_map[i] != trap->id)
continue;
listener = &mlxsw_sp_listeners_arr[i];
-
switch (action) {
case DEVLINK_TRAP_ACTION_DROP:
- hw_action = MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT;
+ enabled = false;
break;
case DEVLINK_TRAP_ACTION_TRAP:
- hw_action = MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU;
+ enabled = true;
break;
default:
return -EINVAL;
}
-
- err = mlxsw_core_trap_action_set(mlxsw_core, listener,
- hw_action);
+ err = mlxsw_core_trap_state_set(mlxsw_core, listener, enabled);
if (err)
return err;
}
@@ -331,62 +537,34 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
return 0;
}
-#define MLXSW_SP_DISCARD_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
-
-static int
-mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
- const struct devlink_trap_group *group)
-{
- enum mlxsw_reg_qpcr_ir_units ir_units;
- char qpcr_pl[MLXSW_REG_QPCR_LEN];
- u16 policer_id;
- u8 burst_size;
- bool is_bytes;
- u32 rate;
-
- switch (group->id) {
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: /* fall through */
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS: /* fall through */
- case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
- ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
- is_bytes = false;
- rate = 10 * 1024; /* 10Kpps */
- burst_size = 7;
- break;
- default:
- return -EINVAL;
- }
-
- mlxsw_reg_qpcr_pack(qpcr_pl, policer_id, ir_units, is_bytes, rate,
- burst_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
-}
-
static int
-__mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
- const struct devlink_trap_group *group)
+__mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ u32 policer_id)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
char htgt_pl[MLXSW_REG_HTGT_LEN];
u8 priority, tc, group_id;
- u16 policer_id;
switch (group->id) {
case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS;
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
priority = 0;
tc = 1;
break;
case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:
group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS;
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
priority = 0;
tc = 1;
break;
case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS;
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS;
priority = 0;
tc = 1;
break;
@@ -394,23 +572,179 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- mlxsw_reg_htgt_pack(htgt_pl, group_id, policer_id, priority, tc);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (policer_id) {
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp,
+ policer_id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+ hw_policer_id = policer_item->hw_id;
+ }
+
+ mlxsw_reg_htgt_pack(htgt_pl, group_id, hw_policer_id, priority, tc);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group,
+ group->init_policer_id);
+}
+
+int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer)
+{
+ u32 policer_id = policer ? policer->id : 0;
+
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id);
+}
+
+static struct mlxsw_sp_trap_policer_item *
+mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ u16 hw_id;
+
+ /* We should be able to allocate a policer because the number of
+ * policers we registered with devlink is in according with the number
+ * of available policers.
+ */
+ hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
+ if (WARN_ON(hw_id == trap->max_policers))
+ return ERR_PTR(-ENOBUFS);
+
+ policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
+ if (!policer_item)
+ return ERR_PTR(-ENOMEM);
+
+ __set_bit(hw_id, trap->policers_usage);
+ policer_item->hw_id = hw_id;
+ policer_item->id = id;
+ list_add_tail(&policer_item->list, &trap->policer_item_list);
+
+ return policer_item;
+}
+
+static void
+mlxsw_sp_trap_policer_item_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_trap_policer_item *policer_item)
+{
+ list_del(&policer_item->list);
+ __clear_bit(policer_item->hw_id, mlxsw_sp->trap->policers_usage);
+ kfree(policer_item);
+}
+
+static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size,
+ struct netlink_ext_ack *extack)
+{
+ int bs = fls64(burst) - 1;
+
+ if (burst != (BIT_ULL(bs))) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two");
+ return -EINVAL;
+ }
+
+ *p_burst_size = bs;
+
+ return 0;
+}
+
+static int __mlxsw_sp_trap_policer_set(struct mlxsw_sp *mlxsw_sp, u16 hw_id,
+ u64 rate, u64 burst, bool clear_counter,
+ struct netlink_ext_ack *extack)
+{
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u8 burst_size;
int err;
- err = mlxsw_sp_trap_group_policer_init(mlxsw_sp, group);
+ err = mlxsw_sp_trap_policer_bs(burst, &burst_size, extack);
if (err)
return err;
- err = __mlxsw_sp_trap_group_init(mlxsw_sp, group);
+ mlxsw_reg_qpcr_pack(qpcr_pl, hw_id, MLXSW_REG_QPCR_IR_UNITS_M, false,
+ rate, burst_size);
+ mlxsw_reg_qpcr_clear_counter_set(qpcr_pl, clear_counter);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+int mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ int err;
+
+ policer_item = mlxsw_sp_trap_policer_item_init(mlxsw_sp, policer->id);
+ if (IS_ERR(policer_item))
+ return PTR_ERR(policer_item);
+
+ err = __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
+ policer->init_rate,
+ policer->init_burst, true, NULL);
+ if (err)
+ goto err_trap_policer_set;
+
+ return 0;
+
+err_trap_policer_set:
+ mlxsw_sp_trap_policer_item_fini(mlxsw_sp, policer_item);
+ return err;
+}
+
+void mlxsw_sp_trap_policer_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return;
+
+ mlxsw_sp_trap_policer_item_fini(mlxsw_sp, policer_item);
+}
+
+int mlxsw_sp_trap_policer_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ return __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
+ rate, burst, false, extack);
+}
+
+int
+mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ int err;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, policer_item->hw_id,
+ MLXSW_REG_QPCR_IR_UNITS_M, false, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
if (err)
return err;
+ *p_drops = mlxsw_reg_qpcr_violate_count_get(qpcr_pl);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
new file mode 100644
index 000000000000..8c54897ba173
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_TRAP_H
+#define _MLXSW_SPECTRUM_TRAP_H
+
+#include <linux/list.h>
+#include <net/devlink.h>
+
+struct mlxsw_sp_trap {
+ struct devlink_trap_policer *policers_arr; /* Registered policers */
+ u64 policers_count; /* Number of registered policers */
+ struct list_head policer_item_list;
+ u64 max_policers;
+ unsigned long policers_usage[]; /* Usage bitmap */
+};
+
+struct mlxsw_sp_trap_policer_item {
+ u16 hw_id;
+ u32 id;
+ struct list_head list; /* Member of policer_item_list */
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index f0e98ec8f1ee..2503f61db5fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -180,7 +180,7 @@ static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
if (err)
return err;
oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP;
return 0;
}
@@ -1259,6 +1259,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
+ mlxsw_sx->ports = NULL;
}
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
@@ -1293,6 +1294,7 @@ err_port_module_info_get:
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
+ mlxsw_sx->ports = NULL;
return err;
}
@@ -1376,6 +1378,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
u8 module, width;
int err;
+ if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ return -EINVAL;
+ }
+
if (new_type == DEVLINK_PORT_TYPE_AUTO)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 12e1fa998d42..eaa521b7561b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -102,6 +102,8 @@ enum {
MLXSW_TRAP_ID_ACL1 = 0x1C1,
/* Multicast trap used for routes with trap-and-forward action */
MLXSW_TRAP_ID_ACL2 = 0x1C2,
+ MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3,
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index d1444ba36e10..4fe6aedca22f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5694,7 +5694,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
* from the bridge.
*/
if ((hw->features & STP_SUPPORT) && !promiscuous &&
- (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ netif_is_bridge_port(dev)) {
struct ksz_switch *sw = hw->ksz_switch;
int port = priv->port.first_port;
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 39925e4bf2ec..b25a13da900a 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1070,7 +1070,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
if (unlikely(ret)) {
netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
ret);
- goto out_free;
+ goto out_stop;
}
eidled = encx24j600_read_reg(priv, EIDLED);
@@ -1088,6 +1088,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
out_unregister:
unregister_netdev(priv->ndev);
+out_stop:
+ kthread_stop(priv->kworker_task);
out_free:
free_netdev(ndev);
@@ -1100,6 +1102,7 @@ static int encx24j600_spi_remove(struct spi_device *spi)
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
unregister_netdev(priv->ndev);
+ kthread_stop(priv->kworker_task);
free_netdev(priv->ndev);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index e1651756bf9d..f70bb81e1ed6 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
unregister_netdev(ndev);
- free_irq(ndev->irq, ndev);
+ devm_free_irq(&pdev->dev, ndev->irq, ndev);
moxart_mac_free_memory(ndev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d3b7373c5961..efb3965a3e42 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -183,44 +183,47 @@ static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
ocelot_write(ocelot, val, ANA_VLANMASK);
}
-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware)
+static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+ u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- u32 val;
+ u32 val = 0;
- if (vlan_aware)
- val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
- else
- val = 0;
- ocelot_rmw_gix(ocelot, val,
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
- ANA_PORT_VLAN_CFG, port);
+ if (ocelot_port->vid != vid) {
+ /* Always permit deleting the native VLAN (vid = 0) */
+ if (ocelot_port->vid && vid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ ocelot_port->vid);
+ return -EBUSY;
+ }
+ ocelot_port->vid = vid;
+ }
+
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port);
- if (vlan_aware && !ocelot_port->vid)
+ if (ocelot_port->vlan_aware && !ocelot_port->vid)
/* If port is vlan-aware and tagged, drop untagged and priority
* tagged frames.
*/
val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
- else
- val = 0;
ocelot_rmw_gix(ocelot, val,
ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
ANA_PORT_DROP_CFG, port);
- if (vlan_aware) {
+ if (ocelot_port->vlan_aware) {
if (ocelot_port->vid)
/* Tag all frames except when VID == DEFAULT_VLAN */
- val |= REW_TAG_CFG_TAG_CFG(1);
+ val = REW_TAG_CFG_TAG_CFG(1);
else
/* Tag all frames */
- val |= REW_TAG_CFG_TAG_CFG(3);
+ val = REW_TAG_CFG_TAG_CFG(3);
} else {
/* Port tagging disabled. */
val = REW_TAG_CFG_TAG_CFG(0);
@@ -228,31 +231,31 @@ void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ocelot_rmw_gix(ocelot, val,
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
+
+ return 0;
}
-EXPORT_SYMBOL(ocelot_port_vlan_filtering);
-static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
- u16 vid)
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val;
- if (ocelot_port->vid != vid) {
- /* Always permit deleting the native VLAN (vid = 0) */
- if (ocelot_port->vid && vid) {
- dev_err(ocelot->dev,
- "Port already has a native VLAN: %d\n",
- ocelot_port->vid);
- return -EBUSY;
- }
- ocelot_port->vid = vid;
- }
+ ocelot_port->vlan_aware = vlan_aware;
- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
- REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port);
+ if (vlan_aware)
+ val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+ else
+ val = 0;
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+ ANA_PORT_VLAN_CFG, port);
- return 0;
+ ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
}
+EXPORT_SYMBOL(ocelot_port_vlan_filtering);
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
@@ -442,8 +445,23 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
mode, DEV_MAC_MODE_CFG);
- if (ocelot->ops->pcs_init)
- ocelot->ops->pcs_init(ocelot, port);
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
/* Enable MAC module */
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
@@ -858,12 +876,12 @@ static void ocelot_get_stats64(struct net_device *dev,
}
int ocelot_fdb_add(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid, bool vlan_aware)
+ const unsigned char *addr, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!vid) {
- if (!vlan_aware)
+ if (!ocelot_port->vlan_aware)
/* If the bridge is not VLAN aware and no VID was
* provided, set it to pvid to ensure the MAC entry
* matches incoming untagged packets
@@ -890,7 +908,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->chip_port;
- return ocelot_fdb_add(ocelot, port, addr, vid, priv->vlan_aware);
+ return ocelot_fdb_add(ocelot, port, addr, vid);
}
int ocelot_fdb_del(struct ocelot *ocelot, int port,
@@ -1013,10 +1031,8 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
{
int i, j;
- /* Loop through all the mac tables entries. There are 1024 rows of 4
- * entries.
- */
- for (i = 0; i < 1024; i++) {
+ /* Loop through all the mac tables entries. */
+ for (i = 0; i < ocelot->num_mact_rows; i++) {
for (j = 0; j < 4; j++) {
struct ocelot_mact_entry entry;
bool is_static;
@@ -1398,7 +1414,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
* a source for the other ports.
*/
for (p = 0; p < ocelot->num_phys_ports; p++) {
- if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) {
+ if (ocelot->bridge_fwd_mask & BIT(p)) {
unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) {
@@ -1413,18 +1429,10 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
}
}
- /* Avoid the NPI port from looping back to itself */
- if (p != ocelot->cpu)
- mask |= BIT(ocelot->cpu);
-
ocelot_write_rix(ocelot, mask,
ANA_PGID_PGID, PGID_SRC + p);
} else {
- /* Only the CPU port, this is compatible with link
- * aggregation.
- */
- ocelot_write_rix(ocelot,
- BIT(ocelot->cpu),
+ ocelot_write_rix(ocelot, 0,
ANA_PGID_PGID, PGID_SRC + p);
}
}
@@ -1443,8 +1451,15 @@ static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
{
- ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
- ANA_AUTOAGE);
+ unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
+
+ /* Setting AGE_PERIOD to zero effectively disables automatic aging,
+ * which is clearly not what our intention is. So avoid that.
+ */
+ if (!age_period)
+ age_period = 1;
+
+ ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
}
EXPORT_SYMBOL(ocelot_set_ageing_time);
@@ -1452,7 +1467,7 @@ static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
- u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
ocelot_set_ageing_time(ocelot, ageing_time);
}
@@ -1489,8 +1504,8 @@ static int ocelot_port_attr_set(struct net_device *dev,
ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- priv->vlan_aware = attr->u.vlan_filtering;
- ocelot_port_vlan_filtering(ocelot, port, priv->vlan_aware);
+ ocelot_port_vlan_filtering(ocelot, port,
+ attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
@@ -1861,7 +1876,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
} else {
err = ocelot_port_bridge_leave(ocelot, port,
info->upper_dev);
- priv->vlan_aware = false;
}
}
if (netif_is_lag_master(info->upper_dev)) {
@@ -2178,13 +2192,25 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ * In the special case that it's the NPI port that we're configuring, the
+ * length of the tag and optional prefix needs to be accounted for privately,
+ * in order to be able to sustain communication at the requested @sdu.
*/
-static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
+void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
int atop_wm;
+ if (port == ocelot->npi) {
+ maxlen += OCELOT_TAG_LEN;
+
+ if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ maxlen += OCELOT_SHORT_PREFIX_LEN;
+ else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ maxlen += OCELOT_LONG_PREFIX_LEN;
+ }
+
ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause WM hysteresis
@@ -2202,6 +2228,24 @@ static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
SYS_ATOP, port);
ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
}
+EXPORT_SYMBOL(ocelot_port_set_maxlen);
+
+int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
+{
+ int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
+
+ if (port == ocelot->npi) {
+ max_mtu -= OCELOT_TAG_LEN;
+
+ if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ max_mtu -= OCELOT_SHORT_PREFIX_LEN;
+ else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ max_mtu -= OCELOT_LONG_PREFIX_LEN;
+ }
+
+ return max_mtu;
+}
+EXPORT_SYMBOL(ocelot_get_max_mtu);
void ocelot_init_port(struct ocelot *ocelot, int port)
{
@@ -2299,42 +2343,57 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
}
EXPORT_SYMBOL(ocelot_probe_port);
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction)
+/* Configure and enable the CPU port module, which is a set of queues.
+ * If @npi contains a valid port index, the CPU port module is connected
+ * to the Node Processor Interface (NPI). This is the mode through which
+ * frames can be injected from and extracted to an external CPU,
+ * over Ethernet.
+ */
+void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction)
{
- /* Configure and enable the CPU port. */
+ int cpu = ocelot->num_phys_ports;
+
+ ocelot->npi = npi;
+ ocelot->inj_prefix = injection;
+ ocelot->xtr_prefix = extraction;
+
+ /* The unicast destination PGID for the CPU port module is unused */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ /* Instead set up a multicast destination PGID for traffic copied to
+ * the CPU. Whitelisted MAC addresses like the port netdevice MAC
+ * addresses will be copied to the CPU via this PGID.
+ */
ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
ANA_PORT_PORT_CFG, cpu);
- /* If the CPU port is a physical port, set up the port in Node
- * Processor Interface (NPI) mode. This is the mode through which
- * frames can be injected from and extracted to an external CPU.
- * Only one port can be an NPI at the same time.
- */
- if (cpu < ocelot->num_phys_ports) {
- int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
-
+ if (npi >= 0 && npi < ocelot->num_phys_ports) {
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi),
QSYS_EXT_CPU_CFG);
- if (injection == OCELOT_TAG_PREFIX_SHORT)
- sdu += OCELOT_SHORT_PREFIX_LEN;
- else if (injection == OCELOT_TAG_PREFIX_LONG)
- sdu += OCELOT_LONG_PREFIX_LEN;
-
- ocelot_port_set_maxlen(ocelot, cpu, sdu);
+ /* Enable NPI port */
+ ocelot_write_rix(ocelot,
+ QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, npi);
+ /* NPI port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot,
+ SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
+ SYS_PORT_MODE_INCL_INJ_HDR(injection),
+ SYS_PORT_MODE, npi);
}
- /* CPU port Injection/Extraction configuration */
+ /* Enable CPU port module */
ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
QSYS_SWITCH_PORT_MODE_PORT_ENA,
QSYS_SWITCH_PORT_MODE, cpu);
+ /* CPU port Injection/Extraction configuration */
ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
SYS_PORT_MODE_INCL_INJ_HDR(injection),
SYS_PORT_MODE, cpu);
@@ -2344,10 +2403,8 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
-
- ocelot->cpu = cpu;
}
-EXPORT_SYMBOL(ocelot_set_cpu_port);
+EXPORT_SYMBOL(ocelot_configure_cpu);
int ocelot_init(struct ocelot *ocelot)
{
@@ -2499,7 +2556,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
- ocelot_ace_deinit();
if (ocelot->ptp_clock)
ptp_clock_unregister(ocelot->ptp_clock);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 04372ba72fec..641af929497f 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -28,16 +28,6 @@
#include "ocelot_tc.h"
#include "ocelot_ptp.h"
-#define PGID_AGGR 64
-#define PGID_SRC 80
-
-/* Reserved PGIDs */
-#define PGID_CPU (PGID_AGGR - 5)
-#define PGID_UC (PGID_AGGR - 4)
-#define PGID_MC (PGID_AGGR - 3)
-#define PGID_MCIPV4 (PGID_AGGR - 2)
-#define PGID_MCIPV6 (PGID_AGGR - 1)
-
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
@@ -66,8 +56,6 @@ struct ocelot_port_private {
struct phy_device *phy;
u8 chip_port;
- u8 vlan_aware;
-
struct phy *serdes;
struct ocelot_port_tc tc;
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 86fc6e6b46dd..3bd286044480 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -6,60 +6,13 @@
#include <linux/iopoll.h>
#include <linux/proc_fs.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot_police.h"
#include "ocelot_ace.h"
-#include "ocelot_vcap.h"
#include "ocelot_s2.h"
#define OCELOT_POLICER_DISCARD 0x17f
-
-static struct ocelot_acl_block *acl_block;
-
-struct vcap_props {
- const char *name; /* Symbolic name */
- u16 tg_width; /* Type-group width (in bits) */
- u16 sw_count; /* Sub word count */
- u16 entry_count; /* Entry count */
- u16 entry_words; /* Number of entry words */
- u16 entry_width; /* Entry width (in bits) */
- u16 action_count; /* Action count */
- u16 action_words; /* Number of action words */
- u16 action_width; /* Action width (in bits) */
- u16 action_type_width; /* Action type width (in bits) */
- struct {
- u16 width; /* Action type width (in bits) */
- u16 count; /* Action type sub word count */
- } action_table[2];
- u16 counter_words; /* Number of counter words */
- u16 counter_width; /* Counter width (in bits) */
-};
-
#define ENTRY_WIDTH 32
-#define BITS_TO_32BIT(x) (1 + (((x) - 1) / ENTRY_WIDTH))
-
-static const struct vcap_props vcap_is2 = {
- .name = "IS2",
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VCAP_IS2_CNT,
- .entry_words = BITS_TO_32BIT(VCAP_IS2_ENTRY_WIDTH),
- .entry_width = VCAP_IS2_ENTRY_WIDTH,
- .action_count = (VCAP_IS2_CNT + VCAP_PORT_CNT + 2),
- .action_words = BITS_TO_32BIT(VCAP_IS2_ACTION_WIDTH),
- .action_width = (VCAP_IS2_ACTION_WIDTH),
- .action_type_width = 1,
- .action_table = {
- {
- .width = (IS2_AO_ACL_ID + IS2_AL_ACL_ID),
- .count = 2
- },
- {
- .width = 6,
- .count = 4
- },
- },
- .counter_words = BITS_TO_32BIT(4 * ENTRY_WIDTH),
- .counter_width = ENTRY_WIDTH,
-};
enum vcap_sel {
VCAP_SEL_ENTRY = 0x1,
@@ -95,18 +48,20 @@ struct vcap_data {
u32 tg_mask; /* Current type-group mask */
};
-static u32 vcap_s2_read_update_ctrl(struct ocelot *oc)
+static u32 vcap_s2_read_update_ctrl(struct ocelot *ocelot)
{
- return ocelot_read(oc, S2_CORE_UPDATE_CTRL);
+ return ocelot_read(ocelot, S2_CORE_UPDATE_CTRL);
}
-static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
+static void vcap_cmd(struct ocelot *ocelot, u16 ix, int cmd, int sel)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+
u32 value = (S2_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
S2_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
S2_CORE_UPDATE_CTRL_UPDATE_SHOT);
- if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2.entry_count)
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2->entry_count)
return;
if (!(sel & VCAP_SEL_ENTRY))
@@ -118,83 +73,101 @@ static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
if (!(sel & VCAP_SEL_COUNTER))
value |= S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
- ocelot_write(oc, value, S2_CORE_UPDATE_CTRL);
- readx_poll_timeout(vcap_s2_read_update_ctrl, oc, value,
+ ocelot_write(ocelot, value, S2_CORE_UPDATE_CTRL);
+ readx_poll_timeout(vcap_s2_read_update_ctrl, ocelot, value,
(value & S2_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
10, 100000);
}
/* Convert from 0-based row to VCAP entry row and run command */
-static void vcap_row_cmd(struct ocelot *oc, u32 row, int cmd, int sel)
+static void vcap_row_cmd(struct ocelot *ocelot, u32 row, int cmd, int sel)
{
- vcap_cmd(oc, vcap_is2.entry_count - row - 1, cmd, sel);
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+
+ vcap_cmd(ocelot, vcap_is2->entry_count - row - 1, cmd, sel);
}
-static void vcap_entry2cache(struct ocelot *oc, struct vcap_data *data)
+static void vcap_entry2cache(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 entry_words, i;
- for (i = 0; i < vcap_is2.entry_words; i++) {
- ocelot_write_rix(oc, data->entry[i], S2_CACHE_ENTRY_DAT, i);
- ocelot_write_rix(oc, ~data->mask[i], S2_CACHE_MASK_DAT, i);
+ entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
+
+ for (i = 0; i < entry_words; i++) {
+ ocelot_write_rix(ocelot, data->entry[i], S2_CACHE_ENTRY_DAT, i);
+ ocelot_write_rix(ocelot, ~data->mask[i], S2_CACHE_MASK_DAT, i);
}
- ocelot_write(oc, data->tg, S2_CACHE_TG_DAT);
+ ocelot_write(ocelot, data->tg, S2_CACHE_TG_DAT);
}
-static void vcap_cache2entry(struct ocelot *oc, struct vcap_data *data)
+static void vcap_cache2entry(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 entry_words, i;
+
+ entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.entry_words; i++) {
- data->entry[i] = ocelot_read_rix(oc, S2_CACHE_ENTRY_DAT, i);
+ for (i = 0; i < entry_words; i++) {
+ data->entry[i] = ocelot_read_rix(ocelot, S2_CACHE_ENTRY_DAT, i);
// Invert mask
- data->mask[i] = ~ocelot_read_rix(oc, S2_CACHE_MASK_DAT, i);
+ data->mask[i] = ~ocelot_read_rix(ocelot, S2_CACHE_MASK_DAT, i);
}
- data->tg = ocelot_read(oc, S2_CACHE_TG_DAT);
+ data->tg = ocelot_read(ocelot, S2_CACHE_TG_DAT);
}
-static void vcap_action2cache(struct ocelot *oc, struct vcap_data *data)
+static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i, width, mask;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 action_words, i, width, mask;
/* Encode action type */
- width = vcap_is2.action_type_width;
+ width = vcap_is2->action_type_width;
if (width) {
mask = GENMASK(width, 0);
data->action[0] = ((data->action[0] & ~mask) | data->type);
}
- for (i = 0; i < vcap_is2.action_words; i++)
- ocelot_write_rix(oc, data->action[i], S2_CACHE_ACTION_DAT, i);
+ action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.counter_words; i++)
- ocelot_write_rix(oc, data->counter[i], S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < action_words; i++)
+ ocelot_write_rix(ocelot, data->action[i], S2_CACHE_ACTION_DAT,
+ i);
+
+ for (i = 0; i < vcap_is2->counter_words; i++)
+ ocelot_write_rix(ocelot, data->counter[i], S2_CACHE_CNT_DAT, i);
}
-static void vcap_cache2action(struct ocelot *oc, struct vcap_data *data)
+static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i, width;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 action_words, i, width;
+
+ action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.action_words; i++)
- data->action[i] = ocelot_read_rix(oc, S2_CACHE_ACTION_DAT, i);
+ for (i = 0; i < action_words; i++)
+ data->action[i] = ocelot_read_rix(ocelot, S2_CACHE_ACTION_DAT,
+ i);
- for (i = 0; i < vcap_is2.counter_words; i++)
- data->counter[i] = ocelot_read_rix(oc, S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < vcap_is2->counter_words; i++)
+ data->counter[i] = ocelot_read_rix(ocelot, S2_CACHE_CNT_DAT, i);
/* Extract action type */
- width = vcap_is2.action_type_width;
+ width = vcap_is2->action_type_width;
data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
}
/* Calculate offsets for entry */
-static void is2_data_get(struct vcap_data *data, int ix)
+static void is2_data_get(struct ocelot *ocelot, struct vcap_data *data, int ix)
{
- u32 i, col, offset, count, cnt, base, width = vcap_is2.tg_width;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 i, col, offset, count, cnt, base;
+ u32 width = vcap_is2->tg_width;
count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
col = (ix % 2);
- cnt = (vcap_is2.sw_count / count);
- base = (vcap_is2.sw_count - col * cnt - cnt);
+ cnt = (vcap_is2->sw_count / count);
+ base = (vcap_is2->sw_count - col * cnt - cnt);
data->tg_value = 0;
data->tg_mask = 0;
for (i = 0; i < cnt; i++) {
@@ -205,13 +178,13 @@ static void is2_data_get(struct vcap_data *data, int ix)
/* Calculate key/action/counter offsets */
col = (count - col - 1);
- data->key_offset = (base * vcap_is2.entry_width) / vcap_is2.sw_count;
- data->counter_offset = (cnt * col * vcap_is2.counter_width);
+ data->key_offset = (base * vcap_is2->entry_width) / vcap_is2->sw_count;
+ data->counter_offset = (cnt * col * vcap_is2->counter_width);
i = data->type;
- width = vcap_is2.action_table[i].width;
- cnt = vcap_is2.action_table[i].count;
+ width = vcap_is2->action_table[i].width;
+ cnt = vcap_is2->action_table[i].count;
data->action_offset =
- (((cnt * col * width) / count) + vcap_is2.action_type_width);
+ (((cnt * col * width) / count) + vcap_is2->action_type_width);
}
static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
@@ -242,22 +215,39 @@ static u32 vcap_data_get(u32 *data, u32 offset, u32 len)
return value;
}
-static void vcap_key_set(struct vcap_data *data, u32 offset, u32 width,
- u32 value, u32 mask)
+static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value, u32 mask)
{
vcap_data_set(data->entry, offset + data->key_offset, width, value);
vcap_data_set(data->mask, offset + data->key_offset, width, mask);
}
-static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
- u8 *msk, u32 count)
+static void vcap_key_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
+ u32 value, u32 mask)
{
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+
+ vcap_key_field_set(data, offset, length, value, mask);
+}
+
+static void vcap_key_bytes_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
+ u8 *val, u8 *msk)
+{
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 count = ocelot->vcap_is2_keys[field].length;
u32 i, j, n = 0, value = 0, mask = 0;
+ WARN_ON(count % 8);
+
/* Data wider than 32 bits are split up in chunks of maximum 32 bits.
* The 32 LSB of the data are written to the 32 MSB of the TCAM.
*/
- offset += (count * 8);
+ offset += count;
+ count /= 8;
+
for (i = 0; i < count; i++) {
j = (count - i - 1);
value += (val[j] << n);
@@ -265,7 +255,7 @@ static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
n += 8;
if (n == ENTRY_WIDTH || (i + 1) == count) {
offset -= n;
- vcap_key_set(data, offset, n, value, mask);
+ vcap_key_field_set(data, offset, n, value, mask);
n = 0;
value = 0;
mask = 0;
@@ -273,55 +263,71 @@ static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
}
}
-static void vcap_key_l4_port_set(struct vcap_data *data, u32 offset,
+static void vcap_key_l4_port_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
struct ocelot_vcap_udp_tcp *port)
{
- vcap_key_set(data, offset, 16, port->value, port->mask);
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+
+ WARN_ON(length != 16);
+
+ vcap_key_field_set(data, offset, length, port->value, port->mask);
}
-static void vcap_key_bit_set(struct vcap_data *data, u32 offset,
+static void vcap_key_bit_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
enum ocelot_vcap_bit val)
{
- vcap_key_set(data, offset, 1, val == OCELOT_VCAP_BIT_1 ? 1 : 0,
- val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
-}
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+ u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0);
+ u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
-#define VCAP_KEY_SET(fld, val, msk) \
- vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, val, msk)
-#define VCAP_KEY_ANY_SET(fld) \
- vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, 0, 0)
-#define VCAP_KEY_BIT_SET(fld, val) vcap_key_bit_set(&data, IS2_HKO_##fld, val)
-#define VCAP_KEY_BYTES_SET(fld, val, msk) \
- vcap_key_bytes_set(&data, IS2_HKO_##fld, val, msk, IS2_HKL_##fld / 8)
+ WARN_ON(length != 1);
-static void vcap_action_set(struct vcap_data *data, u32 offset, u32 width,
- u32 value)
-{
- vcap_data_set(data->action, offset + data->action_offset, width, value);
+ vcap_key_field_set(data, offset, length, value, msk);
}
-#define VCAP_ACT_SET(fld, val) \
- vcap_action_set(data, IS2_AO_##fld, IS2_AL_##fld, val)
+static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_action_field field, u32 value)
+{
+ int offset = ocelot->vcap_is2_actions[field].offset;
+ int length = ocelot->vcap_is2_actions[field].length;
+
+ vcap_data_set(data->action, offset + data->action_offset, length,
+ value);
+}
-static void is2_action_set(struct vcap_data *data,
- enum ocelot_ace_action action)
+static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ struct ocelot_ace_rule *ace)
{
- switch (action) {
+ switch (ace->action) {
case OCELOT_ACL_ACTION_DROP:
- VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x1);
- VCAP_ACT_SET(POLICE_ENA, 0x1);
- VCAP_ACT_SET(POLICE_IDX, OCELOT_POLICER_DISCARD);
- VCAP_ACT_SET(CPU_QU_NUM, 0x0);
- VCAP_ACT_SET(CPU_COPY_ENA, 0x0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
+ OCELOT_POLICER_DISCARD);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
break;
case OCELOT_ACL_ACTION_TRAP:
- VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x1);
- VCAP_ACT_SET(POLICE_ENA, 0x0);
- VCAP_ACT_SET(POLICE_IDX, 0x0);
- VCAP_ACT_SET(CPU_QU_NUM, 0x0);
- VCAP_ACT_SET(CPU_COPY_ENA, 0x1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1);
+ break;
+ case OCELOT_ACL_ACTION_POLICE:
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
+ ace->pol_ix);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
break;
}
}
@@ -329,6 +335,7 @@ static void is2_action_set(struct vcap_data *data,
static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_ace_rule *ace)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 val, msk, type, type_mask = 0xf, i, count;
struct ocelot_ace_vlan *tag = &ace->vlan;
struct ocelot_vcap_u64 payload;
@@ -344,60 +351,76 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(&data, ix);
+ is2_data_get(ocelot, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (ace->prio != 0)
data.tg |= data.tg_value;
data.type = IS2_ACTION_TYPE_NORMAL;
- VCAP_KEY_ANY_SET(PAG);
- VCAP_KEY_SET(IGR_PORT_MASK, 0, ~BIT(ace->chip_port));
- VCAP_KEY_BIT_SET(FIRST, OCELOT_VCAP_BIT_1);
- VCAP_KEY_BIT_SET(HOST_MATCH, OCELOT_VCAP_BIT_ANY);
- VCAP_KEY_BIT_SET(L2_MC, ace->dmac_mc);
- VCAP_KEY_BIT_SET(L2_BC, ace->dmac_bc);
- VCAP_KEY_BIT_SET(VLAN_TAGGED, tag->tagged);
- VCAP_KEY_SET(VID, tag->vid.value, tag->vid.mask);
- VCAP_KEY_SET(PCP, tag->pcp.value[0], tag->pcp.mask[0]);
- VCAP_KEY_BIT_SET(DEI, tag->dei);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
+ ~ace->ingress_port_mask);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH,
+ OCELOT_VCAP_BIT_ANY);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, ace->dmac_mc);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, ace->dmac_bc);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei);
switch (ace->type) {
case OCELOT_ACE_TYPE_ETYPE: {
struct ocelot_ace_frame_etype *etype = &ace->frame.etype;
type = IS2_TYPE_ETYPE;
- VCAP_KEY_BYTES_SET(L2_DMAC, etype->dmac.value,
- etype->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, etype->smac.value,
- etype->smac.mask);
- VCAP_KEY_BYTES_SET(MAC_ETYPE_ETYPE, etype->etype.value,
- etype->etype.mask);
- VCAP_KEY_ANY_SET(MAC_ETYPE_L2_PAYLOAD); // Clear unused bits
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ETYPE_L2_PAYLOAD,
- etype->data.value, etype->data.mask, 2);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ etype->dmac.value, etype->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ etype->smac.value, etype->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ etype->etype.value, etype->etype.mask);
+ /* Clear unused bits */
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ 0, 0);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ etype->data.value, etype->data.mask);
break;
}
case OCELOT_ACE_TYPE_LLC: {
struct ocelot_ace_frame_llc *llc = &ace->frame.llc;
type = IS2_TYPE_LLC;
- VCAP_KEY_BYTES_SET(L2_DMAC, llc->dmac.value, llc->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, llc->smac.value, llc->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ llc->dmac.value, llc->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ llc->smac.value, llc->smac.mask);
for (i = 0; i < 4; i++) {
payload.value[i] = llc->llc.value[i];
payload.mask[i] = llc->llc.mask[i];
}
- VCAP_KEY_BYTES_SET(MAC_LLC_L2_LLC, payload.value, payload.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ payload.value, payload.mask);
break;
}
case OCELOT_ACE_TYPE_SNAP: {
struct ocelot_ace_frame_snap *snap = &ace->frame.snap;
type = IS2_TYPE_SNAP;
- VCAP_KEY_BYTES_SET(L2_DMAC, snap->dmac.value, snap->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, snap->smac.value, snap->smac.mask);
- VCAP_KEY_BYTES_SET(MAC_SNAP_L2_SNAP,
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ snap->dmac.value, snap->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ snap->smac.value, snap->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
ace->frame.snap.snap.value,
ace->frame.snap.snap.mask);
break;
@@ -406,26 +429,42 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_ace_frame_arp *arp = &ace->frame.arp;
type = IS2_TYPE_ARP;
- VCAP_KEY_BYTES_SET(MAC_ARP_L2_SMAC, arp->smac.value,
- arp->smac.mask);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_ADDR_SPACE_OK, arp->ethernet);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_PROTO_SPACE_OK, arp->ip);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_LEN_OK, arp->length);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_TGT_MATCH, arp->dmac_match);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_SENDER_MATCH, arp->smac_match);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_OPCODE_UNKNOWN, arp->unknown);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
+ arp->smac.value, arp->smac.mask);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
+ arp->ethernet);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
+ arp->ip);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_LEN_OK,
+ arp->length);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
+ arp->dmac_match);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
+ arp->smac_match);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
+ arp->unknown);
/* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */
val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
(arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
(arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
- VCAP_KEY_SET(MAC_ARP_ARP_OPCODE, val, msk);
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_DIP,
- arp->dip.value.addr, arp->dip.mask.addr, 4);
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_SIP,
- arp->sip.value.addr, arp->sip.mask.addr, 4);
- VCAP_KEY_ANY_SET(MAC_ARP_DIP_EQ_SIP);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
+ val, msk);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
+ arp->dip.value.addr, arp->dip.mask.addr);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
+ arp->sip.value.addr, arp->sip.mask.addr);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ 0, 0);
break;
}
case OCELOT_ACE_TYPE_IPV4:
@@ -493,18 +532,23 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
seq_zero = ipv6->seq_zero;
}
- VCAP_KEY_BIT_SET(IP4,
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4,
ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- VCAP_KEY_BIT_SET(L3_FRAGMENT, fragment);
- VCAP_KEY_ANY_SET(L3_FRAG_OFS_GT0);
- VCAP_KEY_BIT_SET(L3_OPTIONS, options);
- VCAP_KEY_BIT_SET(L3_TTL_GT0, ttl);
- VCAP_KEY_BYTES_SET(L3_TOS, ds.value, ds.mask);
- vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_DIP, dip.value.addr,
- dip.mask.addr, 4);
- vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_SIP, sip.value.addr,
- sip.mask.addr, 4);
- VCAP_KEY_BIT_SET(DIP_EQ_SIP, sip_eq_dip);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_FRAGMENT,
+ fragment);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_OPTIONS,
+ options);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ ttl);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_TOS,
+ ds.value, ds.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_DIP,
+ dip.value.addr, dip.mask.addr);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_SIP,
+ sip.value.addr, sip.mask.addr);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DIP_EQ_SIP,
+ sip_eq_dip);
val = proto.value[0];
msk = proto.mask[0];
type = IS2_TYPE_IP_UDP_TCP;
@@ -512,25 +556,34 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
/* UDP/TCP protocol match */
tcp = (val == 6 ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_TCP, tcp);
- vcap_key_l4_port_set(&data,
- IS2_HKO_IP4_TCP_UDP_L4_DPORT,
- dport);
- vcap_key_l4_port_set(&data,
- IS2_HKO_IP4_TCP_UDP_L4_SPORT,
- sport);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_RNG);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_SPORT_EQ_DPORT,
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_TCP, tcp);
+ vcap_key_l4_port_set(ocelot, &data,
+ VCAP_IS2_HK_L4_DPORT, dport);
+ vcap_key_l4_port_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SPORT, sport);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
sport_eq_dport);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_SEQUENCE_EQ0, seq_zero);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_FIN, tcp_fin);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_SYN, tcp_syn);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_RST, tcp_rst);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_PSH, tcp_psh);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_ACK, tcp_ack);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_URG, tcp_urg);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_DOM);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_VER);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SEQUENCE_EQ0,
+ seq_zero);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_FIN,
+ tcp_fin);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_SYN,
+ tcp_syn);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_RST,
+ tcp_rst);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_PSH,
+ tcp_psh);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_ACK,
+ tcp_ack);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_URG,
+ tcp_urg);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_DOM,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_VER,
+ 0, 0);
} else {
if (msk == 0) {
/* Any IP protocol match */
@@ -543,10 +596,12 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
payload.mask[i] = ip_data->mask[i];
}
}
- VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PROTO, proto.value,
- proto.mask);
- VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PAYLOAD, payload.value,
- payload.mask);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_IP4_L3_PROTO,
+ proto.value, proto.mask);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_L3_PAYLOAD,
+ payload.value, payload.mask);
}
break;
}
@@ -554,19 +609,21 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
default:
type = 0;
type_mask = 0;
- count = (vcap_is2.entry_width / 2);
- for (i = (IS2_HKO_PCP + IS2_HKL_PCP); i < count;
- i += ENTRY_WIDTH) {
- /* Clear entry data */
- vcap_key_set(&data, i, min(32u, count - i), 0, 0);
+ count = vcap_is2->entry_width / 2;
+ /* Iterate over the non-common part of the key and
+ * clear entry data
+ */
+ for (i = ocelot->vcap_is2_keys[VCAP_IS2_HK_L2_DMAC].offset;
+ i < count; i += ENTRY_WIDTH) {
+ vcap_key_field_set(&data, i, min(32u, count - i), 0, 0);
}
break;
}
- VCAP_KEY_SET(TYPE, type, type_mask);
- is2_action_set(&data, ace->action);
- vcap_data_set(data.counter, data.counter_offset, vcap_is2.counter_width,
- ace->stats.pkts);
+ vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask);
+ is2_action_set(ocelot, &data, ace);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap_is2->counter_width, ace->stats.pkts);
/* Write row */
vcap_entry2cache(ocelot, &data);
@@ -574,29 +631,37 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
}
-static void is2_entry_get(struct ocelot_ace_rule *rule, int ix)
+static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule,
+ int ix)
{
- struct ocelot *op = rule->port->ocelot;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
struct vcap_data data;
int row = (ix / 2);
u32 cnt;
- vcap_row_cmd(op, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
- vcap_cache2action(op, &data);
+ vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(&data, ix);
+ is2_data_get(ocelot, &data, ix);
cnt = vcap_data_get(data.counter, data.counter_offset,
- vcap_is2.counter_width);
+ vcap_is2->counter_width);
rule->stats.pkts = cnt;
}
-static void ocelot_ace_rule_add(struct ocelot_acl_block *block,
+static void ocelot_ace_rule_add(struct ocelot *ocelot,
+ struct ocelot_acl_block *block,
struct ocelot_ace_rule *rule)
{
struct ocelot_ace_rule *tmp;
struct list_head *pos, *n;
+ if (rule->action == OCELOT_ACL_ACTION_POLICE) {
+ block->pol_lpr--;
+ rule->pol_ix = block->pol_lpr;
+ ocelot_ace_policer_add(ocelot, rule->pol_ix, &rule->pol);
+ }
+
block->count++;
if (list_empty(&block->rules)) {
@@ -641,29 +706,57 @@ ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
return NULL;
}
-int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *ace;
int i, index;
/* Add rule to the linked list */
- ocelot_ace_rule_add(acl_block, rule);
+ ocelot_ace_rule_add(ocelot, block, rule);
/* Get the index of the inserted rule */
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ index = ocelot_ace_rule_get_index_id(block, rule);
/* Move down the rules to make place for the new rule */
- for (i = acl_block->count - 1; i > index; i--) {
- ace = ocelot_ace_rule_get_rule_index(acl_block, i);
- is2_entry_set(rule->port->ocelot, i, ace);
+ for (i = block->count - 1; i > index; i--) {
+ ace = ocelot_ace_rule_get_rule_index(block, i);
+ is2_entry_set(ocelot, i, ace);
}
/* Now insert the new rule */
- is2_entry_set(rule->port->ocelot, index, rule);
+ is2_entry_set(ocelot, index, rule);
return 0;
}
-static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
+static void ocelot_ace_police_del(struct ocelot *ocelot,
+ struct ocelot_acl_block *block,
+ u32 ix)
+{
+ struct ocelot_ace_rule *ace;
+ int index = -1;
+
+ if (ix < block->pol_lpr)
+ return;
+
+ list_for_each_entry(ace, &block->rules, list) {
+ index++;
+ if (ace->action == OCELOT_ACL_ACTION_POLICE &&
+ ace->pol_ix < ix) {
+ ace->pol_ix += 1;
+ ocelot_ace_policer_add(ocelot, ace->pol_ix,
+ &ace->pol);
+ is2_entry_set(ocelot, index, ace);
+ }
+ }
+
+ ocelot_ace_policer_del(ocelot, block->pol_lpr);
+ block->pol_lpr++;
+}
+
+static void ocelot_ace_rule_del(struct ocelot *ocelot,
+ struct ocelot_acl_block *block,
struct ocelot_ace_rule *rule)
{
struct ocelot_ace_rule *tmp;
@@ -672,6 +765,10 @@ static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
list_for_each_safe(pos, q, &block->rules) {
tmp = list_entry(pos, struct ocelot_ace_rule, list);
if (tmp->id == rule->id) {
+ if (tmp->action == OCELOT_ACL_ACTION_POLICE)
+ ocelot_ace_police_del(ocelot, block,
+ tmp->pol_ix);
+
list_del(pos);
kfree(tmp);
}
@@ -680,8 +777,10 @@ static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
block->count--;
}
-int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule del_ace;
struct ocelot_ace_rule *ace;
int i, index;
@@ -689,70 +788,55 @@ int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
memset(&del_ace, 0, sizeof(del_ace));
/* Gets index of the rule */
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ index = ocelot_ace_rule_get_index_id(block, rule);
/* Delete rule */
- ocelot_ace_rule_del(acl_block, rule);
+ ocelot_ace_rule_del(ocelot, block, rule);
/* Move up all the blocks over the deleted rule */
- for (i = index; i < acl_block->count; i++) {
- ace = ocelot_ace_rule_get_rule_index(acl_block, i);
- is2_entry_set(rule->port->ocelot, i, ace);
+ for (i = index; i < block->count; i++) {
+ ace = ocelot_ace_rule_get_rule_index(block, i);
+ is2_entry_set(ocelot, i, ace);
}
/* Now delete the last rule, because it is duplicated */
- is2_entry_set(rule->port->ocelot, acl_block->count, &del_ace);
+ is2_entry_set(ocelot, block->count, &del_ace);
return 0;
}
-int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *tmp;
int index;
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
- is2_entry_get(rule, index);
+ index = ocelot_ace_rule_get_index_id(block, rule);
+ is2_entry_get(ocelot, rule, index);
/* After we get the result we need to clear the counters */
- tmp = ocelot_ace_rule_get_rule_index(acl_block, index);
+ tmp = ocelot_ace_rule_get_rule_index(block, index);
tmp->stats.pkts = 0;
- is2_entry_set(rule->port->ocelot, index, tmp);
+ is2_entry_set(ocelot, index, tmp);
return 0;
}
-static struct ocelot_acl_block *ocelot_acl_block_create(struct ocelot *ocelot)
-{
- struct ocelot_acl_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
-
- INIT_LIST_HEAD(&block->rules);
- block->count = 0;
- block->ocelot = ocelot;
-
- return block;
-}
-
-static void ocelot_acl_block_destroy(struct ocelot_acl_block *block)
-{
- kfree(block);
-}
-
int ocelot_ace_init(struct ocelot *ocelot)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct vcap_data data;
memset(&data, 0, sizeof(data));
+
vcap_entry2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2.entry_count, S2_CORE_MV_CFG);
+ ocelot_write(ocelot, vcap_is2->entry_count, S2_CORE_MV_CFG);
vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
vcap_action2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2.action_count, S2_CORE_MV_CFG);
+ ocelot_write(ocelot, vcap_is2->action_count, S2_CORE_MV_CFG);
vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE,
VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
@@ -771,12 +855,9 @@ int ocelot_ace_init(struct ocelot *ocelot)
ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
OCELOT_POLICER_DISCARD);
- acl_block = ocelot_acl_block_create(ocelot);
+ block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
- return 0;
-}
+ INIT_LIST_HEAD(&ocelot->acl_block.rules);
-void ocelot_ace_deinit(void)
-{
- ocelot_acl_block_destroy(acl_block);
+ return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index c08e3e8482e7..29d22c566786 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -7,6 +7,7 @@
#define _MSCC_OCELOT_ACE_H_
#include "ocelot.h"
+#include "ocelot_police.h"
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
@@ -176,6 +177,7 @@ struct ocelot_ace_frame_ipv6 {
enum ocelot_ace_action {
OCELOT_ACL_ACTION_DROP,
OCELOT_ACL_ACTION_TRAP,
+ OCELOT_ACL_ACTION_POLICE,
};
struct ocelot_ace_stats {
@@ -186,14 +188,13 @@ struct ocelot_ace_stats {
struct ocelot_ace_rule {
struct list_head list;
- struct ocelot_port *port;
u16 prio;
u32 id;
enum ocelot_ace_action action;
struct ocelot_ace_stats stats;
- int chip_port;
+ u16 ingress_port_mask;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -209,24 +210,21 @@ struct ocelot_ace_rule {
struct ocelot_ace_frame_ipv4 ipv4;
struct ocelot_ace_frame_ipv6 ipv6;
} frame;
+ struct ocelot_policer pol;
+ u32 pol_ix;
};
-struct ocelot_acl_block {
- struct list_head rules;
- struct ocelot *ocelot;
- int count;
-};
-
-int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule);
-int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule);
-int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
int ocelot_ace_init(struct ocelot *ocelot);
-void ocelot_ace_deinit(void);
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
- struct flow_block_offload *f);
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
- struct flow_block_offload *f);
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress);
#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 1135a18019c7..0ac9fbf77a01 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -14,9 +14,14 @@
#include <linux/skbuff.h>
#include <net/switchdev.h>
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+#define VSC7514_VCAP_IS2_CNT 64
+#define VSC7514_VCAP_IS2_ENTRY_WIDTH 376
+#define VSC7514_VCAP_IS2_ACTION_WIDTH 99
+#define VSC7514_VCAP_PORT_CNT 11
static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
{
@@ -211,29 +216,6 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
-static void ocelot_port_pcs_init(struct ocelot *ocelot, int port)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- /* Disable HDX fast control */
- ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
- DEV_PORT_MISC);
-
- /* SGMII only for now */
- ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
- PCS1G_MODE_CFG);
- ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
-
- /* Enable PCS */
- ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
-
- /* No aneg on SGMII */
- ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
-
- /* No loopback */
- ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
-}
-
static int ocelot_reset(struct ocelot *ocelot)
{
int retries = 100;
@@ -258,10 +240,132 @@ static int ocelot_reset(struct ocelot *ocelot)
}
static const struct ocelot_ops ocelot_ops = {
- .pcs_init = ocelot_port_pcs_init,
.reset = ocelot_reset,
};
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
+ /* Common: 46 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12},
+ [VCAP_IS2_HK_RSV2] = { 25, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 26, 1},
+ [VCAP_IS2_HK_L2_MC] = { 27, 1},
+ [VCAP_IS2_HK_L2_BC] = { 28, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1},
+ [VCAP_IS2_HK_VID] = { 30, 12},
+ [VCAP_IS2_HK_DEI] = { 42, 1},
+ [VCAP_IS2_HK_PCP] = { 43, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 46, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 94, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 46, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 51, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {124, 1},
+ [VCAP_IS2_HK_L4_SPORT] = {125, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {141, 16},
+ [VCAP_IS2_HK_L4_RNG] = {157, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
+ [VCAP_IS2_HK_L4_URG] = {167, 1},
+ [VCAP_IS2_HK_L4_ACK] = {168, 1},
+ [VCAP_IS2_HK_L4_PSH] = {169, 1},
+ [VCAP_IS2_HK_L4_RST] = {170, 1},
+ [VCAP_IS2_HK_L4_SYN] = {171, 1},
+ [VCAP_IS2_HK_L4_FIN] = {172, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {181, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7},
+ [VCAP_IS2_HK_OAM_VER] = {149, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {154, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {162, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {170, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS2_ACT_RSV] = { 41, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
+};
+
+static const struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_IS2] = {
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VSC7514_VCAP_IS2_CNT,
+ .entry_width = VSC7514_VCAP_IS2_ENTRY_WIDTH,
+ .action_count = VSC7514_VCAP_IS2_CNT +
+ VSC7514_VCAP_PORT_CNT + 2,
+ .action_width = 99,
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 49,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = 4,
+ .counter_width = 32,
+ },
+};
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -349,8 +453,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ptp = 1;
}
- ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
-
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(&pdev->dev, "no ethernet-ports child node found\n");
@@ -362,9 +464,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
+ ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
+ ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
+ ocelot->vcap = vsc7514_vcap_props;
+
ocelot_init(ocelot);
- ocelot_set_cpu_port(ocelot, ocelot->num_phys_ports,
- OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_NONE);
+ /* No NPI port */
+ ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_NONE);
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 3d65b99b9734..341923311fec 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -8,27 +8,35 @@
#include "ocelot_ace.h"
-struct ocelot_port_block {
- struct ocelot_acl_block *block;
- struct ocelot_port_private *priv;
-};
-
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
- struct ocelot_ace_rule *rule)
+ struct ocelot_ace_rule *ace)
{
const struct flow_action_entry *a;
+ s64 burst;
+ u64 rate;
int i;
- if (f->rule->action.num_entries != 1)
+ if (!flow_offload_has_one_action(&f->rule->action))
+ return -EOPNOTSUPP;
+
+ if (!flow_action_basic_hw_stats_check(&f->rule->action,
+ f->common.extack))
return -EOPNOTSUPP;
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_DROP:
- rule->action = OCELOT_ACL_ACTION_DROP;
+ ace->action = OCELOT_ACL_ACTION_DROP;
break;
case FLOW_ACTION_TRAP:
- rule->action = OCELOT_ACL_ACTION_TRAP;
+ ace->action = OCELOT_ACL_ACTION_TRAP;
+ break;
+ case FLOW_ACTION_POLICE:
+ ace->action = OCELOT_ACL_ACTION_POLICE;
+ rate = a->police.rate_bytes_ps;
+ ace->pol.rate = div_u64(rate, 1000) * 8;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ ace->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC);
break;
default:
return -EOPNOTSUPP;
@@ -39,7 +47,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f,
}
static int ocelot_flower_parse(struct flow_cls_offload *f,
- struct ocelot_ace_rule *ocelot_rule)
+ struct ocelot_ace_rule *ace)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
@@ -84,14 +92,14 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
- ocelot_rule->type = OCELOT_ACE_TYPE_ETYPE;
- ether_addr_copy(ocelot_rule->frame.etype.dmac.value,
+ ace->type = OCELOT_ACE_TYPE_ETYPE;
+ ether_addr_copy(ace->frame.etype.dmac.value,
match.key->dst);
- ether_addr_copy(ocelot_rule->frame.etype.smac.value,
+ ether_addr_copy(ace->frame.etype.smac.value,
match.key->src);
- ether_addr_copy(ocelot_rule->frame.etype.dmac.mask,
+ ether_addr_copy(ace->frame.etype.dmac.mask,
match.mask->dst);
- ether_addr_copy(ocelot_rule->frame.etype.smac.mask,
+ ether_addr_copy(ace->frame.etype.smac.mask,
match.mask->src);
goto finished_key_parsing;
}
@@ -101,17 +109,17 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
flow_rule_match_basic(rule, &match);
if (ntohs(match.key->n_proto) == ETH_P_IP) {
- ocelot_rule->type = OCELOT_ACE_TYPE_IPV4;
- ocelot_rule->frame.ipv4.proto.value[0] =
+ ace->type = OCELOT_ACE_TYPE_IPV4;
+ ace->frame.ipv4.proto.value[0] =
match.key->ip_proto;
- ocelot_rule->frame.ipv4.proto.mask[0] =
+ ace->frame.ipv4.proto.mask[0] =
match.mask->ip_proto;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
- ocelot_rule->type = OCELOT_ACE_TYPE_IPV6;
- ocelot_rule->frame.ipv6.proto.value[0] =
+ ace->type = OCELOT_ACE_TYPE_IPV6;
+ ace->frame.ipv6.proto.value[0] =
match.key->ip_proto;
- ocelot_rule->frame.ipv6.proto.mask[0] =
+ ace->frame.ipv6.proto.mask[0] =
match.mask->ip_proto;
}
}
@@ -122,16 +130,16 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
u8 *tmp;
flow_rule_match_ipv4_addrs(rule, &match);
- tmp = &ocelot_rule->frame.ipv4.sip.value.addr[0];
+ tmp = &ace->frame.ipv4.sip.value.addr[0];
memcpy(tmp, &match.key->src, 4);
- tmp = &ocelot_rule->frame.ipv4.sip.mask.addr[0];
+ tmp = &ace->frame.ipv4.sip.mask.addr[0];
memcpy(tmp, &match.mask->src, 4);
- tmp = &ocelot_rule->frame.ipv4.dip.value.addr[0];
+ tmp = &ace->frame.ipv4.dip.value.addr[0];
memcpy(tmp, &match.key->dst, 4);
- tmp = &ocelot_rule->frame.ipv4.dip.mask.addr[0];
+ tmp = &ace->frame.ipv4.dip.mask.addr[0];
memcpy(tmp, &match.mask->dst, 4);
}
@@ -144,213 +152,111 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- ocelot_rule->frame.ipv4.sport.value = ntohs(match.key->src);
- ocelot_rule->frame.ipv4.sport.mask = ntohs(match.mask->src);
- ocelot_rule->frame.ipv4.dport.value = ntohs(match.key->dst);
- ocelot_rule->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ ace->frame.ipv4.sport.value = ntohs(match.key->src);
+ ace->frame.ipv4.sport.mask = ntohs(match.mask->src);
+ ace->frame.ipv4.dport.value = ntohs(match.key->dst);
+ ace->frame.ipv4.dport.mask = ntohs(match.mask->dst);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- ocelot_rule->type = OCELOT_ACE_TYPE_ANY;
- ocelot_rule->vlan.vid.value = match.key->vlan_id;
- ocelot_rule->vlan.vid.mask = match.mask->vlan_id;
- ocelot_rule->vlan.pcp.value[0] = match.key->vlan_priority;
- ocelot_rule->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ ace->type = OCELOT_ACE_TYPE_ANY;
+ ace->vlan.vid.value = match.key->vlan_id;
+ ace->vlan.vid.mask = match.mask->vlan_id;
+ ace->vlan.pcp.value[0] = match.key->vlan_priority;
+ ace->vlan.pcp.mask[0] = match.mask->vlan_priority;
}
finished_key_parsing:
- ocelot_rule->prio = f->common.prio;
- ocelot_rule->id = f->cookie;
- return ocelot_flower_parse_action(f, ocelot_rule);
+ ace->prio = f->common.prio;
+ ace->id = f->cookie;
+ return ocelot_flower_parse_action(f, ace);
}
static
-struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
- struct ocelot_port_block *block)
+struct ocelot_ace_rule *ocelot_ace_rule_create(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
{
- struct ocelot_ace_rule *rule;
+ struct ocelot_ace_rule *ace;
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
+ ace = kzalloc(sizeof(*ace), GFP_KERNEL);
+ if (!ace)
return NULL;
- rule->port = &block->priv->port;
- rule->chip_port = block->priv->chip_port;
- return rule;
+ ace->ingress_port_mask = BIT(port);
+ return ace;
}
-static int ocelot_flower_replace(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule *rule;
+ struct ocelot_ace_rule *ace;
int ret;
- rule = ocelot_ace_rule_create(f, port_block);
- if (!rule)
+ ace = ocelot_ace_rule_create(ocelot, port, f);
+ if (!ace)
return -ENOMEM;
- ret = ocelot_flower_parse(f, rule);
+ ret = ocelot_flower_parse(f, ace);
if (ret) {
- kfree(rule);
+ kfree(ace);
return ret;
}
- ret = ocelot_ace_rule_offload_add(rule);
- if (ret)
- return ret;
-
- port_block->priv->tc.offload_cnt++;
- return 0;
+ return ocelot_ace_rule_offload_add(ocelot, ace);
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
-static int ocelot_flower_destroy(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule rule;
- int ret;
-
- rule.prio = f->common.prio;
- rule.port = &port_block->priv->port;
- rule.id = f->cookie;
+ struct ocelot_ace_rule ace;
- ret = ocelot_ace_rule_offload_del(&rule);
- if (ret)
- return ret;
+ ace.prio = f->common.prio;
+ ace.id = f->cookie;
- port_block->priv->tc.offload_cnt--;
- return 0;
+ return ocelot_ace_rule_offload_del(ocelot, &ace);
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
-static int ocelot_flower_stats_update(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule rule;
+ struct ocelot_ace_rule ace;
int ret;
- rule.prio = f->common.prio;
- rule.port = &port_block->priv->port;
- rule.id = f->cookie;
- ret = ocelot_ace_rule_stats_update(&rule);
+ ace.prio = f->common.prio;
+ ace.id = f->cookie;
+ ret = ocelot_ace_rule_stats_update(ocelot, &ace);
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, rule.stats.pkts, 0x0);
+ flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats);
-static int ocelot_setup_tc_cls_flower(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress)
{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ingress)
+ return -EOPNOTSUPP;
+
switch (f->command) {
case FLOW_CLS_REPLACE:
- return ocelot_flower_replace(f, port_block);
+ return ocelot_cls_flower_replace(ocelot, port, f, ingress);
case FLOW_CLS_DESTROY:
- return ocelot_flower_destroy(f, port_block);
+ return ocelot_cls_flower_destroy(ocelot, port, f, ingress);
case FLOW_CLS_STATS:
- return ocelot_flower_stats_update(f, port_block);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
- void *type_data, void *cb_priv)
-{
- struct ocelot_port_block *port_block = cb_priv;
-
- if (!tc_cls_can_offload_and_chain0(port_block->priv->dev, type_data))
- return -EOPNOTSUPP;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return ocelot_setup_tc_cls_flower(type_data, cb_priv);
- case TC_SETUP_CLSMATCHALL:
- return 0;
+ return ocelot_cls_flower_stats(ocelot, port, f, ingress);
default:
return -EOPNOTSUPP;
}
}
-
-static struct ocelot_port_block*
-ocelot_port_block_create(struct ocelot_port_private *priv)
-{
- struct ocelot_port_block *port_block;
-
- port_block = kzalloc(sizeof(*port_block), GFP_KERNEL);
- if (!port_block)
- return NULL;
-
- port_block->priv = priv;
-
- return port_block;
-}
-
-static void ocelot_port_block_destroy(struct ocelot_port_block *block)
-{
- kfree(block);
-}
-
-static void ocelot_tc_block_unbind(void *cb_priv)
-{
- struct ocelot_port_block *port_block = cb_priv;
-
- ocelot_port_block_destroy(port_block);
-}
-
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
- struct flow_block_offload *f)
-{
- struct ocelot_port_block *port_block;
- struct flow_block_cb *block_cb;
- int ret;
-
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- return -EOPNOTSUPP;
-
- block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, priv);
- if (!block_cb) {
- port_block = ocelot_port_block_create(priv);
- if (!port_block)
- return -ENOMEM;
-
- block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
- priv, port_block,
- ocelot_tc_block_unbind);
- if (IS_ERR(block_cb)) {
- ret = PTR_ERR(block_cb);
- goto err_cb_register;
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, f->driver_block_list);
- } else {
- port_block = flow_block_cb_priv(block_cb);
- }
-
- flow_block_cb_incref(block_cb);
- return 0;
-
-err_cb_register:
- ocelot_port_block_destroy(port_block);
-
- return ret;
-}
-
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
- struct flow_block_offload *f)
-{
- struct flow_block_cb *block_cb;
-
- block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, priv);
- if (!block_cb)
- return;
-
- if (!flow_block_cb_decref(block_cb)) {
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- }
-}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index faddce43f2e3..2e1d8e187332 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -4,6 +4,7 @@
* Copyright (c) 2019 Microsemi Corporation
*/
+#include <soc/mscc/ocelot.h>
#include "ocelot_police.h"
enum mscc_qos_rate_mode {
@@ -203,6 +204,7 @@ int ocelot_port_policer_add(struct ocelot *ocelot, int port,
return 0;
}
+EXPORT_SYMBOL(ocelot_port_policer_add);
int ocelot_port_policer_del(struct ocelot *ocelot, int port)
{
@@ -225,3 +227,28 @@ int ocelot_port_policer_del(struct ocelot *ocelot, int port)
return 0;
}
+EXPORT_SYMBOL(ocelot_port_policer_del);
+
+int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol)
+{
+ struct qos_policer_conf pp = { 0 };
+
+ if (!pol)
+ return -EINVAL;
+
+ pp.mode = MSCC_QOS_RATE_MODE_DATA;
+ pp.pir = pol->rate;
+ pp.pbs = pol->burst;
+
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+}
+
+int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix)
+{
+ struct qos_policer_conf pp = { 0 };
+
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index ae9509229463..792abd28010a 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -9,14 +9,9 @@
#include "ocelot.h"
-struct ocelot_policer {
- u32 rate; /* kilobit per second */
- u32 burst; /* bytes */
-};
+int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol);
-int ocelot_port_policer_add(struct ocelot *ocelot, int port,
- struct ocelot_policer *pol);
-
-int ocelot_port_policer_del(struct ocelot *ocelot, int port);
+int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix);
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index b88b5899b227..7d4fd1b6adda 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->shared_queue_sz = 224 * 1024;
+ ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
ret = ocelot_regfields_init(ocelot, ocelot_regfields);
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index a4f7fbd76507..d326e231f0ad 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -4,8 +4,8 @@
* Copyright (c) 2019 Microsemi Corporation
*/
+#include <soc/mscc/ocelot.h>
#include "ocelot_tc.h"
-#include "ocelot_police.h"
#include "ocelot_ace.h"
#include <net/pkt_cls.h>
@@ -20,9 +20,6 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
int port = priv->chip_port;
int err;
- netdev_dbg(priv->dev, "%s: port %u command %d cookie %lu\n",
- __func__, port, f->command, f->cookie);
-
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
return -EOPNOTSUPP;
@@ -99,17 +96,10 @@ static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSMATCHALL:
- netdev_dbg(priv->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
- ingress ? "ingress" : "egress");
-
return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return 0;
+ return ocelot_setup_tc_cls_flower(priv, type_data, ingress);
default:
- netdev_dbg(priv->dev, "tc_block_cb: type %d %s\n",
- type,
- ingress ? "ingress" : "egress");
-
return -EOPNOTSUPP;
}
}
@@ -137,10 +127,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- int err;
-
- netdev_dbg(priv->dev, "tc_block command %d, binder_type %d\n",
- f->command, f->binder_type);
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = ocelot_setup_tc_block_cb_ig;
@@ -162,11 +148,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
- err = ocelot_setup_tc_block_flower_bind(priv, f);
- if (err < 0) {
- flow_block_cb_free(block_cb);
- return err;
- }
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
@@ -175,7 +156,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
if (!block_cb)
return -ENOENT;
- ocelot_setup_tc_block_flower_unbind(priv, f);
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
deleted file mode 100644
index e22eac1da783..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
- * Microsemi Ocelot Switch driver
- * Copyright (c) 2019 Microsemi Corporation
- */
-
-#ifndef _OCELOT_VCAP_H_
-#define _OCELOT_VCAP_H_
-
-/* =================================================================
- * VCAP Common
- * =================================================================
- */
-
-/* VCAP Type-Group values */
-#define VCAP_TG_NONE 0 /* Entry is invalid */
-#define VCAP_TG_FULL 1 /* Full entry */
-#define VCAP_TG_HALF 2 /* Half entry */
-#define VCAP_TG_QUARTER 3 /* Quarter entry */
-
-/* =================================================================
- * VCAP IS2
- * =================================================================
- */
-
-#define VCAP_IS2_CNT 64
-#define VCAP_IS2_ENTRY_WIDTH 376
-#define VCAP_IS2_ACTION_WIDTH 99
-#define VCAP_PORT_CNT 11
-
-/* IS2 half key types */
-#define IS2_TYPE_ETYPE 0
-#define IS2_TYPE_LLC 1
-#define IS2_TYPE_SNAP 2
-#define IS2_TYPE_ARP 3
-#define IS2_TYPE_IP_UDP_TCP 4
-#define IS2_TYPE_IP_OTHER 5
-#define IS2_TYPE_IPV6 6
-#define IS2_TYPE_OAM 7
-#define IS2_TYPE_SMAC_SIP6 8
-#define IS2_TYPE_ANY 100 /* Pseudo type */
-
-/* IS2 half key type mask for matching any IP */
-#define IS2_TYPE_MASK_IP_ANY 0xe
-
-/* IS2 action types */
-#define IS2_ACTION_TYPE_NORMAL 0
-#define IS2_ACTION_TYPE_SMAC_SIP 1
-
-/* IS2 MASK_MODE values */
-#define IS2_ACT_MASK_MODE_NONE 0
-#define IS2_ACT_MASK_MODE_FILTER 1
-#define IS2_ACT_MASK_MODE_POLICY 2
-#define IS2_ACT_MASK_MODE_REDIR 3
-
-/* IS2 REW_OP values */
-#define IS2_ACT_REW_OP_NONE 0
-#define IS2_ACT_REW_OP_PTP_ONE 2
-#define IS2_ACT_REW_OP_PTP_TWO 3
-#define IS2_ACT_REW_OP_SPECIAL 8
-#define IS2_ACT_REW_OP_PTP_ORG 9
-#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
-#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
-#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
-#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
-
-#define VCAP_PORT_WIDTH 4
-
-/* IS2 quarter key - SMAC_SIP4 */
-#define IS2_QKO_IGR_PORT 0
-#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
-#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
-#define IS2_QKL_L2_SMAC 48
-#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
-#define IS2_QKL_L3_IP4_SIP 32
-
-/* IS2 half key - common */
-#define IS2_HKO_TYPE 0
-#define IS2_HKL_TYPE 4
-#define IS2_HKO_FIRST (IS2_HKO_TYPE + IS2_HKL_TYPE)
-#define IS2_HKL_FIRST 1
-#define IS2_HKO_PAG (IS2_HKO_FIRST + IS2_HKL_FIRST)
-#define IS2_HKL_PAG 8
-#define IS2_HKO_IGR_PORT_MASK (IS2_HKO_PAG + IS2_HKL_PAG)
-#define IS2_HKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
-#define IS2_HKO_SERVICE_FRM (IS2_HKO_IGR_PORT_MASK + IS2_HKL_IGR_PORT_MASK)
-#define IS2_HKL_SERVICE_FRM 1
-#define IS2_HKO_HOST_MATCH (IS2_HKO_SERVICE_FRM + IS2_HKL_SERVICE_FRM)
-#define IS2_HKL_HOST_MATCH 1
-#define IS2_HKO_L2_MC (IS2_HKO_HOST_MATCH + IS2_HKL_HOST_MATCH)
-#define IS2_HKL_L2_MC 1
-#define IS2_HKO_L2_BC (IS2_HKO_L2_MC + IS2_HKL_L2_MC)
-#define IS2_HKL_L2_BC 1
-#define IS2_HKO_VLAN_TAGGED (IS2_HKO_L2_BC + IS2_HKL_L2_BC)
-#define IS2_HKL_VLAN_TAGGED 1
-#define IS2_HKO_VID (IS2_HKO_VLAN_TAGGED + IS2_HKL_VLAN_TAGGED)
-#define IS2_HKL_VID 12
-#define IS2_HKO_DEI (IS2_HKO_VID + IS2_HKL_VID)
-#define IS2_HKL_DEI 1
-#define IS2_HKO_PCP (IS2_HKO_DEI + IS2_HKL_DEI)
-#define IS2_HKL_PCP 3
-
-/* IS2 half key - MAC_ETYPE/MAC_LLC/MAC_SNAP/OAM common */
-#define IS2_HKO_L2_DMAC (IS2_HKO_PCP + IS2_HKL_PCP)
-#define IS2_HKL_L2_DMAC 48
-#define IS2_HKO_L2_SMAC (IS2_HKO_L2_DMAC + IS2_HKL_L2_DMAC)
-#define IS2_HKL_L2_SMAC 48
-
-/* IS2 half key - MAC_ETYPE */
-#define IS2_HKO_MAC_ETYPE_ETYPE (IS2_HKO_L2_SMAC + IS2_HKL_L2_SMAC)
-#define IS2_HKL_MAC_ETYPE_ETYPE 16
-#define IS2_HKO_MAC_ETYPE_L2_PAYLOAD \
- (IS2_HKO_MAC_ETYPE_ETYPE + IS2_HKL_MAC_ETYPE_ETYPE)
-#define IS2_HKL_MAC_ETYPE_L2_PAYLOAD 27
-
-/* IS2 half key - MAC_LLC */
-#define IS2_HKO_MAC_LLC_L2_LLC IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_MAC_LLC_L2_LLC 40
-
-/* IS2 half key - MAC_SNAP */
-#define IS2_HKO_MAC_SNAP_L2_SNAP IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_MAC_SNAP_L2_SNAP 40
-
-/* IS2 half key - ARP */
-#define IS2_HKO_MAC_ARP_L2_SMAC IS2_HKO_L2_DMAC
-#define IS2_HKL_MAC_ARP_L2_SMAC 48
-#define IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK \
- (IS2_HKO_MAC_ARP_L2_SMAC + IS2_HKL_MAC_ARP_L2_SMAC)
-#define IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK 1
-#define IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK \
- (IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK + IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK)
-#define IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK 1
-#define IS2_HKO_MAC_ARP_ARP_LEN_OK \
- (IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK + \
- IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK)
-#define IS2_HKL_MAC_ARP_ARP_LEN_OK 1
-#define IS2_HKO_MAC_ARP_ARP_TGT_MATCH \
- (IS2_HKO_MAC_ARP_ARP_LEN_OK + IS2_HKL_MAC_ARP_ARP_LEN_OK)
-#define IS2_HKL_MAC_ARP_ARP_TGT_MATCH 1
-#define IS2_HKO_MAC_ARP_ARP_SENDER_MATCH \
- (IS2_HKO_MAC_ARP_ARP_TGT_MATCH + IS2_HKL_MAC_ARP_ARP_TGT_MATCH)
-#define IS2_HKL_MAC_ARP_ARP_SENDER_MATCH 1
-#define IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN \
- (IS2_HKO_MAC_ARP_ARP_SENDER_MATCH + IS2_HKL_MAC_ARP_ARP_SENDER_MATCH)
-#define IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN 1
-#define IS2_HKO_MAC_ARP_ARP_OPCODE \
- (IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN + \
- IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN)
-#define IS2_HKL_MAC_ARP_ARP_OPCODE 2
-#define IS2_HKO_MAC_ARP_L3_IP4_DIP \
- (IS2_HKO_MAC_ARP_ARP_OPCODE + IS2_HKL_MAC_ARP_ARP_OPCODE)
-#define IS2_HKL_MAC_ARP_L3_IP4_DIP 32
-#define IS2_HKO_MAC_ARP_L3_IP4_SIP \
- (IS2_HKO_MAC_ARP_L3_IP4_DIP + IS2_HKL_MAC_ARP_L3_IP4_DIP)
-#define IS2_HKL_MAC_ARP_L3_IP4_SIP 32
-#define IS2_HKO_MAC_ARP_DIP_EQ_SIP \
- (IS2_HKO_MAC_ARP_L3_IP4_SIP + IS2_HKL_MAC_ARP_L3_IP4_SIP)
-#define IS2_HKL_MAC_ARP_DIP_EQ_SIP 1
-
-/* IS2 half key - IP4_TCP_UDP/IP4_OTHER common */
-#define IS2_HKO_IP4 IS2_HKO_L2_DMAC
-#define IS2_HKL_IP4 1
-#define IS2_HKO_L3_FRAGMENT (IS2_HKO_IP4 + IS2_HKL_IP4)
-#define IS2_HKL_L3_FRAGMENT 1
-#define IS2_HKO_L3_FRAG_OFS_GT0 (IS2_HKO_L3_FRAGMENT + IS2_HKL_L3_FRAGMENT)
-#define IS2_HKL_L3_FRAG_OFS_GT0 1
-#define IS2_HKO_L3_OPTIONS (IS2_HKO_L3_FRAG_OFS_GT0 + IS2_HKL_L3_FRAG_OFS_GT0)
-#define IS2_HKL_L3_OPTIONS 1
-#define IS2_HKO_L3_TTL_GT0 (IS2_HKO_L3_OPTIONS + IS2_HKL_L3_OPTIONS)
-#define IS2_HKL_L3_TTL_GT0 1
-#define IS2_HKO_L3_TOS (IS2_HKO_L3_TTL_GT0 + IS2_HKL_L3_TTL_GT0)
-#define IS2_HKL_L3_TOS 8
-#define IS2_HKO_L3_IP4_DIP (IS2_HKO_L3_TOS + IS2_HKL_L3_TOS)
-#define IS2_HKL_L3_IP4_DIP 32
-#define IS2_HKO_L3_IP4_SIP (IS2_HKO_L3_IP4_DIP + IS2_HKL_L3_IP4_DIP)
-#define IS2_HKL_L3_IP4_SIP 32
-#define IS2_HKO_DIP_EQ_SIP (IS2_HKO_L3_IP4_SIP + IS2_HKL_L3_IP4_SIP)
-#define IS2_HKL_DIP_EQ_SIP 1
-
-/* IS2 half key - IP4_TCP_UDP */
-#define IS2_HKO_IP4_TCP_UDP_TCP (IS2_HKO_DIP_EQ_SIP + IS2_HKL_DIP_EQ_SIP)
-#define IS2_HKL_IP4_TCP_UDP_TCP 1
-#define IS2_HKO_IP4_TCP_UDP_L4_DPORT \
- (IS2_HKO_IP4_TCP_UDP_TCP + IS2_HKL_IP4_TCP_UDP_TCP)
-#define IS2_HKL_IP4_TCP_UDP_L4_DPORT 16
-#define IS2_HKO_IP4_TCP_UDP_L4_SPORT \
- (IS2_HKO_IP4_TCP_UDP_L4_DPORT + IS2_HKL_IP4_TCP_UDP_L4_DPORT)
-#define IS2_HKL_IP4_TCP_UDP_L4_SPORT 16
-#define IS2_HKO_IP4_TCP_UDP_L4_RNG \
- (IS2_HKO_IP4_TCP_UDP_L4_SPORT + IS2_HKL_IP4_TCP_UDP_L4_SPORT)
-#define IS2_HKL_IP4_TCP_UDP_L4_RNG 8
-#define IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT \
- (IS2_HKO_IP4_TCP_UDP_L4_RNG + IS2_HKL_IP4_TCP_UDP_L4_RNG)
-#define IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT 1
-#define IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 \
- (IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT + \
- IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT)
-#define IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0 1
-#define IS2_HKO_IP4_TCP_UDP_L4_FIN \
- (IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 + IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0)
-#define IS2_HKL_IP4_TCP_UDP_L4_FIN 1
-#define IS2_HKO_IP4_TCP_UDP_L4_SYN \
- (IS2_HKO_IP4_TCP_UDP_L4_FIN + IS2_HKL_IP4_TCP_UDP_L4_FIN)
-#define IS2_HKL_IP4_TCP_UDP_L4_SYN 1
-#define IS2_HKO_IP4_TCP_UDP_L4_RST \
- (IS2_HKO_IP4_TCP_UDP_L4_SYN + IS2_HKL_IP4_TCP_UDP_L4_SYN)
-#define IS2_HKL_IP4_TCP_UDP_L4_RST 1
-#define IS2_HKO_IP4_TCP_UDP_L4_PSH \
- (IS2_HKO_IP4_TCP_UDP_L4_RST + IS2_HKL_IP4_TCP_UDP_L4_RST)
-#define IS2_HKL_IP4_TCP_UDP_L4_PSH 1
-#define IS2_HKO_IP4_TCP_UDP_L4_ACK \
- (IS2_HKO_IP4_TCP_UDP_L4_PSH + IS2_HKL_IP4_TCP_UDP_L4_PSH)
-#define IS2_HKL_IP4_TCP_UDP_L4_ACK 1
-#define IS2_HKO_IP4_TCP_UDP_L4_URG \
- (IS2_HKO_IP4_TCP_UDP_L4_ACK + IS2_HKL_IP4_TCP_UDP_L4_ACK)
-#define IS2_HKL_IP4_TCP_UDP_L4_URG 1
-#define IS2_HKO_IP4_TCP_UDP_L4_1588_DOM \
- (IS2_HKO_IP4_TCP_UDP_L4_URG + IS2_HKL_IP4_TCP_UDP_L4_URG)
-#define IS2_HKL_IP4_TCP_UDP_L4_1588_DOM 8
-#define IS2_HKO_IP4_TCP_UDP_L4_1588_VER \
- (IS2_HKO_IP4_TCP_UDP_L4_1588_DOM + IS2_HKL_IP4_TCP_UDP_L4_1588_DOM)
-#define IS2_HKL_IP4_TCP_UDP_L4_1588_VER 4
-
-/* IS2 half key - IP4_OTHER */
-#define IS2_HKO_IP4_OTHER_L3_PROTO IS2_HKO_IP4_TCP_UDP_TCP
-#define IS2_HKL_IP4_OTHER_L3_PROTO 8
-#define IS2_HKO_IP4_OTHER_L3_PAYLOAD \
- (IS2_HKO_IP4_OTHER_L3_PROTO + IS2_HKL_IP4_OTHER_L3_PROTO)
-#define IS2_HKL_IP4_OTHER_L3_PAYLOAD 56
-
-/* IS2 half key - IP6_STD */
-#define IS2_HKO_IP6_STD_L3_TTL_GT0 IS2_HKO_L2_DMAC
-#define IS2_HKL_IP6_STD_L3_TTL_GT0 1
-#define IS2_HKO_IP6_STD_L3_IP6_SIP \
- (IS2_HKO_IP6_STD_L3_TTL_GT0 + IS2_HKL_IP6_STD_L3_TTL_GT0)
-#define IS2_HKL_IP6_STD_L3_IP6_SIP 128
-#define IS2_HKO_IP6_STD_L3_PROTO \
- (IS2_HKO_IP6_STD_L3_IP6_SIP + IS2_HKL_IP6_STD_L3_IP6_SIP)
-#define IS2_HKL_IP6_STD_L3_PROTO 8
-
-/* IS2 half key - OAM */
-#define IS2_HKO_OAM_OAM_MEL_FLAGS IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_OAM_OAM_MEL_FLAGS 7
-#define IS2_HKO_OAM_OAM_VER \
- (IS2_HKO_OAM_OAM_MEL_FLAGS + IS2_HKL_OAM_OAM_MEL_FLAGS)
-#define IS2_HKL_OAM_OAM_VER 5
-#define IS2_HKO_OAM_OAM_OPCODE (IS2_HKO_OAM_OAM_VER + IS2_HKL_OAM_OAM_VER)
-#define IS2_HKL_OAM_OAM_OPCODE 8
-#define IS2_HKO_OAM_OAM_FLAGS (IS2_HKO_OAM_OAM_OPCODE + IS2_HKL_OAM_OAM_OPCODE)
-#define IS2_HKL_OAM_OAM_FLAGS 8
-#define IS2_HKO_OAM_OAM_MEPID (IS2_HKO_OAM_OAM_FLAGS + IS2_HKL_OAM_OAM_FLAGS)
-#define IS2_HKL_OAM_OAM_MEPID 16
-#define IS2_HKO_OAM_OAM_CCM_CNTS_EQ0 \
- (IS2_HKO_OAM_OAM_MEPID + IS2_HKL_OAM_OAM_MEPID)
-#define IS2_HKL_OAM_OAM_CCM_CNTS_EQ0 1
-
-/* IS2 half key - SMAC_SIP6 */
-#define IS2_HKO_SMAC_SIP6_IGR_PORT IS2_HKL_TYPE
-#define IS2_HKL_SMAC_SIP6_IGR_PORT VCAP_PORT_WIDTH
-#define IS2_HKO_SMAC_SIP6_L2_SMAC \
- (IS2_HKO_SMAC_SIP6_IGR_PORT + IS2_HKL_SMAC_SIP6_IGR_PORT)
-#define IS2_HKL_SMAC_SIP6_L2_SMAC 48
-#define IS2_HKO_SMAC_SIP6_L3_IP6_SIP \
- (IS2_HKO_SMAC_SIP6_L2_SMAC + IS2_HKL_SMAC_SIP6_L2_SMAC)
-#define IS2_HKL_SMAC_SIP6_L3_IP6_SIP 128
-
-/* IS2 full key - common */
-#define IS2_FKO_TYPE 0
-#define IS2_FKL_TYPE 2
-#define IS2_FKO_FIRST (IS2_FKO_TYPE + IS2_FKL_TYPE)
-#define IS2_FKL_FIRST 1
-#define IS2_FKO_PAG (IS2_FKO_FIRST + IS2_FKL_FIRST)
-#define IS2_FKL_PAG 8
-#define IS2_FKO_IGR_PORT_MASK (IS2_FKO_PAG + IS2_FKL_PAG)
-#define IS2_FKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
-#define IS2_FKO_SERVICE_FRM (IS2_FKO_IGR_PORT_MASK + IS2_FKL_IGR_PORT_MASK)
-#define IS2_FKL_SERVICE_FRM 1
-#define IS2_FKO_HOST_MATCH (IS2_FKO_SERVICE_FRM + IS2_FKL_SERVICE_FRM)
-#define IS2_FKL_HOST_MATCH 1
-#define IS2_FKO_L2_MC (IS2_FKO_HOST_MATCH + IS2_FKL_HOST_MATCH)
-#define IS2_FKL_L2_MC 1
-#define IS2_FKO_L2_BC (IS2_FKO_L2_MC + IS2_FKL_L2_MC)
-#define IS2_FKL_L2_BC 1
-#define IS2_FKO_VLAN_TAGGED (IS2_FKO_L2_BC + IS2_FKL_L2_BC)
-#define IS2_FKL_VLAN_TAGGED 1
-#define IS2_FKO_VID (IS2_FKO_VLAN_TAGGED + IS2_FKL_VLAN_TAGGED)
-#define IS2_FKL_VID 12
-#define IS2_FKO_DEI (IS2_FKO_VID + IS2_FKL_VID)
-#define IS2_FKL_DEI 1
-#define IS2_FKO_PCP (IS2_FKO_DEI + IS2_FKL_DEI)
-#define IS2_FKL_PCP 3
-
-/* IS2 full key - IP6_TCP_UDP/IP6_OTHER common */
-#define IS2_FKO_L3_TTL_GT0 (IS2_FKO_PCP + IS2_FKL_PCP)
-#define IS2_FKL_L3_TTL_GT0 1
-#define IS2_FKO_L3_TOS (IS2_FKO_L3_TTL_GT0 + IS2_FKL_L3_TTL_GT0)
-#define IS2_FKL_L3_TOS 8
-#define IS2_FKO_L3_IP6_DIP (IS2_FKO_L3_TOS + IS2_FKL_L3_TOS)
-#define IS2_FKL_L3_IP6_DIP 128
-#define IS2_FKO_L3_IP6_SIP (IS2_FKO_L3_IP6_DIP + IS2_FKL_L3_IP6_DIP)
-#define IS2_FKL_L3_IP6_SIP 128
-#define IS2_FKO_DIP_EQ_SIP (IS2_FKO_L3_IP6_SIP + IS2_FKL_L3_IP6_SIP)
-#define IS2_FKL_DIP_EQ_SIP 1
-
-/* IS2 full key - IP6_TCP_UDP */
-#define IS2_FKO_IP6_TCP_UDP_TCP (IS2_FKO_DIP_EQ_SIP + IS2_FKL_DIP_EQ_SIP)
-#define IS2_FKL_IP6_TCP_UDP_TCP 1
-#define IS2_FKO_IP6_TCP_UDP_L4_DPORT \
- (IS2_FKO_IP6_TCP_UDP_TCP + IS2_FKL_IP6_TCP_UDP_TCP)
-#define IS2_FKL_IP6_TCP_UDP_L4_DPORT 16
-#define IS2_FKO_IP6_TCP_UDP_L4_SPORT \
- (IS2_FKO_IP6_TCP_UDP_L4_DPORT + IS2_FKL_IP6_TCP_UDP_L4_DPORT)
-#define IS2_FKL_IP6_TCP_UDP_L4_SPORT 16
-#define IS2_FKO_IP6_TCP_UDP_L4_RNG \
- (IS2_FKO_IP6_TCP_UDP_L4_SPORT + IS2_FKL_IP6_TCP_UDP_L4_SPORT)
-#define IS2_FKL_IP6_TCP_UDP_L4_RNG 8
-#define IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT \
- (IS2_FKO_IP6_TCP_UDP_L4_RNG + IS2_FKL_IP6_TCP_UDP_L4_RNG)
-#define IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT 1
-#define IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 \
- (IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT + \
- IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT)
-#define IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0 1
-#define IS2_FKO_IP6_TCP_UDP_L4_FIN \
- (IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 + IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0)
-#define IS2_FKL_IP6_TCP_UDP_L4_FIN 1
-#define IS2_FKO_IP6_TCP_UDP_L4_SYN \
- (IS2_FKO_IP6_TCP_UDP_L4_FIN + IS2_FKL_IP6_TCP_UDP_L4_FIN)
-#define IS2_FKL_IP6_TCP_UDP_L4_SYN 1
-#define IS2_FKO_IP6_TCP_UDP_L4_RST \
- (IS2_FKO_IP6_TCP_UDP_L4_SYN + IS2_FKL_IP6_TCP_UDP_L4_SYN)
-#define IS2_FKL_IP6_TCP_UDP_L4_RST 1
-#define IS2_FKO_IP6_TCP_UDP_L4_PSH \
- (IS2_FKO_IP6_TCP_UDP_L4_RST + IS2_FKL_IP6_TCP_UDP_L4_RST)
-#define IS2_FKL_IP6_TCP_UDP_L4_PSH 1
-#define IS2_FKO_IP6_TCP_UDP_L4_ACK \
- (IS2_FKO_IP6_TCP_UDP_L4_PSH + IS2_FKL_IP6_TCP_UDP_L4_PSH)
-#define IS2_FKL_IP6_TCP_UDP_L4_ACK 1
-#define IS2_FKO_IP6_TCP_UDP_L4_URG \
- (IS2_FKO_IP6_TCP_UDP_L4_ACK + IS2_FKL_IP6_TCP_UDP_L4_ACK)
-#define IS2_FKL_IP6_TCP_UDP_L4_URG 1
-#define IS2_FKO_IP6_TCP_UDP_L4_1588_DOM \
- (IS2_FKO_IP6_TCP_UDP_L4_URG + IS2_FKL_IP6_TCP_UDP_L4_URG)
-#define IS2_FKL_IP6_TCP_UDP_L4_1588_DOM 8
-#define IS2_FKO_IP6_TCP_UDP_L4_1588_VER \
- (IS2_FKO_IP6_TCP_UDP_L4_1588_DOM + IS2_FKL_IP6_TCP_UDP_L4_1588_DOM)
-#define IS2_FKL_IP6_TCP_UDP_L4_1588_VER 4
-
-/* IS2 full key - IP6_OTHER */
-#define IS2_FKO_IP6_OTHER_L3_PROTO IS2_FKO_IP6_TCP_UDP_TCP
-#define IS2_FKL_IP6_OTHER_L3_PROTO 8
-#define IS2_FKO_IP6_OTHER_L3_PAYLOAD \
- (IS2_FKO_IP6_OTHER_L3_PROTO + IS2_FKL_IP6_OTHER_L3_PROTO)
-#define IS2_FKL_IP6_OTHER_L3_PAYLOAD 56
-
-/* IS2 full key - CUSTOM */
-#define IS2_FKO_CUSTOM_CUSTOM_TYPE IS2_FKO_L3_TTL_GT0
-#define IS2_FKL_CUSTOM_CUSTOM_TYPE 1
-#define IS2_FKO_CUSTOM_CUSTOM \
- (IS2_FKO_CUSTOM_CUSTOM_TYPE + IS2_FKL_CUSTOM_CUSTOM_TYPE)
-#define IS2_FKL_CUSTOM_CUSTOM 320
-
-/* IS2 action - BASE_TYPE */
-#define IS2_AO_HIT_ME_ONCE 0
-#define IS2_AL_HIT_ME_ONCE 1
-#define IS2_AO_CPU_COPY_ENA (IS2_AO_HIT_ME_ONCE + IS2_AL_HIT_ME_ONCE)
-#define IS2_AL_CPU_COPY_ENA 1
-#define IS2_AO_CPU_QU_NUM (IS2_AO_CPU_COPY_ENA + IS2_AL_CPU_COPY_ENA)
-#define IS2_AL_CPU_QU_NUM 3
-#define IS2_AO_MASK_MODE (IS2_AO_CPU_QU_NUM + IS2_AL_CPU_QU_NUM)
-#define IS2_AL_MASK_MODE 2
-#define IS2_AO_MIRROR_ENA (IS2_AO_MASK_MODE + IS2_AL_MASK_MODE)
-#define IS2_AL_MIRROR_ENA 1
-#define IS2_AO_LRN_DIS (IS2_AO_MIRROR_ENA + IS2_AL_MIRROR_ENA)
-#define IS2_AL_LRN_DIS 1
-#define IS2_AO_POLICE_ENA (IS2_AO_LRN_DIS + IS2_AL_LRN_DIS)
-#define IS2_AL_POLICE_ENA 1
-#define IS2_AO_POLICE_IDX (IS2_AO_POLICE_ENA + IS2_AL_POLICE_ENA)
-#define IS2_AL_POLICE_IDX 9
-#define IS2_AO_POLICE_VCAP_ONLY (IS2_AO_POLICE_IDX + IS2_AL_POLICE_IDX)
-#define IS2_AL_POLICE_VCAP_ONLY 1
-#define IS2_AO_PORT_MASK (IS2_AO_POLICE_VCAP_ONLY + IS2_AL_POLICE_VCAP_ONLY)
-#define IS2_AL_PORT_MASK VCAP_PORT_CNT
-#define IS2_AO_REW_OP (IS2_AO_PORT_MASK + IS2_AL_PORT_MASK)
-#define IS2_AL_REW_OP 9
-#define IS2_AO_LM_CNT_DIS (IS2_AO_REW_OP + IS2_AL_REW_OP)
-#define IS2_AL_LM_CNT_DIS 1
-#define IS2_AO_ISDX_ENA \
- (IS2_AO_LM_CNT_DIS + IS2_AL_LM_CNT_DIS + 1) /* Reserved bit */
-#define IS2_AL_ISDX_ENA 1
-#define IS2_AO_ACL_ID (IS2_AO_ISDX_ENA + IS2_AL_ISDX_ENA)
-#define IS2_AL_ACL_ID 6
-
-/* IS2 action - SMAC_SIP */
-#define IS2_AO_SMAC_SIP_CPU_COPY_ENA 0
-#define IS2_AL_SMAC_SIP_CPU_COPY_ENA 1
-#define IS2_AO_SMAC_SIP_CPU_QU_NUM 1
-#define IS2_AL_SMAC_SIP_CPU_QU_NUM 3
-#define IS2_AO_SMAC_SIP_FWD_KILL_ENA 4
-#define IS2_AL_SMAC_SIP_FWD_KILL_ENA 1
-#define IS2_AO_SMAC_SIP_HOST_MATCH 5
-#define IS2_AL_SMAC_SIP_HOST_MATCH 1
-
-#endif /* _OCELOT_VCAP_H_ */
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2ee0d0be113a..2616fd735aab 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1920,6 +1920,7 @@ myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
}
static const struct ethtool_ops myri10ge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
.set_coalesce = myri10ge_set_coalesce,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 51fa82b429a3..8b018ed37b1b 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -147,39 +147,12 @@ static int sonic_probe1(struct net_device *dev)
dev->dev_addr[i*2+1] = val >> 8;
}
- err = -ENOMEM;
-
- /* Initialize the device structure. */
-
lp->dma_bitmode = SONIC_BITMODE32;
- /* Allocate the entire chunk of memory for the descriptors.
- Note that this cannot cross a 64K boundary. */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL)
+ err = sonic_alloc_descriptors(dev);
+ if (err)
goto out;
- /* Now set up the pointers to point to the appropriate places */
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
dev->netdev_ops = &sonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
@@ -235,11 +208,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0937fc2a928e..1b5559aacb38 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -114,17 +114,6 @@ static inline void bit_reverse_addr(unsigned char addr[6])
addr[i] = bitrev8(addr[i]);
}
-static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
-{
- irqreturn_t result;
- unsigned long flags;
-
- local_irq_save(flags);
- result = sonic_interrupt(irq, dev_id);
- local_irq_restore(flags);
- return result;
-}
-
static int macsonic_open(struct net_device* dev)
{
int retval;
@@ -135,12 +124,12 @@ static int macsonic_open(struct net_device* dev)
dev->name, dev->irq);
goto err;
}
- /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes
- * in at priority level 3. However, we sometimes get the level 2 inter-
- * rupt as well, which must prevent re-entrance of the sonic handler.
+ /* Under the A/UX interrupt scheme, the onboard SONIC interrupt gets
+ * moved from level 2 to level 3. Unfortunately we still get some
+ * level 2 interrupts so register the handler for both.
*/
if (dev->irq == IRQ_AUTO_3) {
- retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
+ retval = request_irq(IRQ_NUBUS_9, sonic_interrupt, 0,
"sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
@@ -186,33 +175,10 @@ static const struct net_device_ops macsonic_netdev_ops = {
static int macsonic_init(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
+ int err = sonic_alloc_descriptors(dev);
- /* Allocate the entire chunk of memory for the descriptors.
- Note that this cannot cross a 64K boundary. */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL)
- return -ENOMEM;
-
- /* Now set up the pointers to point to the appropriate places */
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
+ if (err)
+ return err;
dev->netdev_ops = &macsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 31be3ba66877..dd3605aa5f23 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -50,6 +50,42 @@ static void sonic_msg_init(struct net_device *dev)
netif_dbg(lp, drv, dev, "%s", version);
}
+static int sonic_alloc_descriptors(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ /* Allocate a chunk of memory for the descriptors. Note that this
+ * must not cross a 64K boundary. It is smaller than one page which
+ * means that page alignment is a sufficient condition.
+ */
+ lp->descriptors =
+ dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL);
+
+ if (!lp->descriptors)
+ return -ENOMEM;
+
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + SIZEOF_SONIC_CDA *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+
+ return 0;
+}
+
/*
* Open/initialize the SONIC controller.
*
@@ -264,7 +300,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
- entry = lp->next_tx;
+ entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
@@ -275,27 +311,26 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- wmb();
+ sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
+ sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
+
+ netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
lp->tx_skb[entry] = skb;
- wmb();
- sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
- sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
lp->eol_tx = entry;
- lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
- if (lp->tx_skb[lp->next_tx] != NULL) {
+ entry = (entry + 1) & SONIC_TDS_MASK;
+ if (lp->tx_skb[entry]) {
/* The ring is full, the ISR has yet to process the next TD. */
netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
netif_stop_queue(dev);
/* after this packet, wait for ISR to free up some TDAs */
- } else netif_start_queue(dev);
-
- netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
-
- SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
spin_unlock_irqrestore(&lp->lock, flags);
@@ -594,11 +629,6 @@ static void sonic_rx(struct net_device *dev)
if (rbe)
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
- /*
- * If any worth-while packets have been received, netif_rx()
- * has done a mark_bh(NET_BH) for us and will work on them
- * when we get to the bottom-half routine.
- */
}
@@ -780,7 +810,7 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
- lp->cur_tx = lp->next_tx = 0;
+ lp->cur_tx = 0;
lp->eol_tx = SONIC_NUM_TDS - 1;
/*
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index e0e4cba6f6f6..3cbb62c860c8 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -321,7 +321,6 @@ struct sonic_local {
unsigned int cur_tx; /* first unacked transmit packet */
unsigned int eol_rx;
unsigned int eol_tx; /* last unacked transmit packet */
- unsigned int next_tx; /* next free TD */
int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
@@ -342,6 +341,7 @@ static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
+static int sonic_alloc_descriptors(struct net_device *dev);
/* Internal inlines for reading/writing DMA buffers. Note that bus
size and endianness matter here, whereas they don't for registers,
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e1b886e87a76..dda9ec7d9cee 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -167,47 +167,11 @@ static int __init sonic_probe1(struct net_device *dev)
dev->dev_addr[i*2+1] = val >> 8;
}
- /* Initialize the device structure. */
-
lp->dma_bitmode = SONIC_BITMODE32;
- /*
- * Allocate local private descriptor areas in uncached space.
- * The entire structure must be located within the same 64kb segment.
- * A simple way to ensure this is to allocate twice the
- * size of the structure -- given that the structure is
- * much less than 64 kB, at least one of the halves of
- * the allocated area will be contained entirely in 64 kB.
- * We also allocate extra space for a pointer to allow freeing
- * this structure later on (in xtsonic_cleanup_module()).
- */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL) {
- err = -ENOMEM;
+ err = sonic_alloc_descriptors(dev);
+ if (err)
goto out;
- }
-
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- /* get the virtual dma address */
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
dev->netdev_ops = &xtsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 0ec6b8e8b549..67e62603fe3b 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5155,7 +5155,7 @@ static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
/* read mac entries from CAM */
static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
{
- u64 tmp64 = 0xffffffffffff0000ULL, val64;
+ u64 tmp64, val64;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
/* read mac addr */
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 9183b3e85d21..bdbf0726145e 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
eth_hw_addr_random(nn->dp.netdev);
+ nfp_nsp_close(nsp);
return;
}
@@ -332,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
goto err_free_alink;
alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
- if (!alink->prio_map)
+ if (!alink->prio_map) {
+ err = -ENOMEM;
goto err_free_alink;
+ }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index a83a0ad5e27d..4268a7e0f344 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -104,14 +104,14 @@ struct cmsg_req_map_op {
__be32 tid;
__be32 count;
__be32 flags;
- u8 data[0];
+ u8 data[];
};
struct cmsg_reply_map_op {
struct cmsg_reply_map_simple reply_hdr;
__be32 count;
__be32 resv;
- u8 data[0];
+ u8 data[];
};
struct cmsg_bpf_event {
@@ -120,6 +120,6 @@ struct cmsg_bpf_event {
__be64 map_ptr;
__be32 data_size;
__be32 pkt_size;
- u8 data[0];
+ u8 data[];
};
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c06600fb47ff..1c76e1592ca2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -1207,6 +1207,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
bool pkt_host = false;
u32 csum_updated = 0;
+ if (!flow_action_hw_stats_check(&flow->rule->action, extack,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
tun_type = NFP_FL_TUNNEL_NONE;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 9b50d76bbc09..bf516285510f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -587,7 +587,7 @@ struct nfp_flower_cmsg_mac_repr {
u8 info;
u8 nbi_port;
u8 phys_port;
- } ports[0];
+ } ports[];
};
#define NFP_FLOWER_CMSG_MAC_REPR_NBI GENMASK(1, 0)
@@ -619,7 +619,7 @@ struct nfp_flower_cmsg_merge_hint {
struct {
__be32 host_ctx;
__be64 host_cookie;
- } __packed flow[0];
+ } __packed flow[];
};
enum nfp_flower_cmsg_port_type {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 7ca5c1becfcf..6b60771ccb19 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1440,7 +1440,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
priv->stats[ctx_id].pkts += pkts;
priv->stats[ctx_id].bytes += bytes;
- max_t(u64, priv->stats[ctx_id].used, used);
+ priv->stats[ctx_id].used = max_t(u64, used,
+ priv->stats[ctx_id].used);
}
}
@@ -1490,7 +1491,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
nfp_flower_update_merge_stats(app, nfp_flow);
flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
- priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
+ priv->stats[ctx_id].pkts, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 124a43dc136a..d18a830e4264 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -320,7 +320,8 @@ nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
spin_unlock_bh(&fl_priv->qos_stats_lock);
flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
- repr_priv->qos_table.last_update);
+ repr_priv->qos_table.last_update,
+ FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index c50fce42f473..07dbf4d72227 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -211,7 +211,7 @@ static const struct nfp_devlink_versions {
enum nfp_nsp_versions id;
const char *key;
} nfp_devlink_versions_nsp[] = {
- { NFP_VERSIONS_BUNDLE, "fw.bundle_id", },
+ { NFP_VERSIONS_BUNDLE, DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, },
{ NFP_VERSIONS_BSP, DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, },
{ NFP_VERSIONS_CPLD, "fw.cpld", },
{ NFP_VERSIONS_APP, DEVLINK_INFO_VERSION_GENERIC_FW_APP, },
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 5d5812fd9317..fa6b13a05941 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -42,7 +42,7 @@ struct nfp_shared_buf;
*/
struct nfp_dumpspec {
u32 size;
- u8 data[0];
+ u8 data[];
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index 769ceef09756..a614df095b08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -36,7 +36,7 @@ enum nfp_dumpspec_type {
struct nfp_dump_tl {
__be32 type;
__be32 length; /* chunk length to follow, aligned to 8 bytes */
- char data[0];
+ char data[];
};
/* NFP CPP parameters */
@@ -62,7 +62,7 @@ struct nfp_dumpspec_csr {
struct nfp_dumpspec_rtsym {
struct nfp_dump_tl tl;
- char rtsym[0];
+ char rtsym[];
};
/* header for register dumpable */
@@ -79,7 +79,7 @@ struct nfp_dump_rtsym {
struct nfp_dump_common_cpp cpp;
__be32 error; /* error code encountered while reading */
u8 padded_name_length; /* pad so data starts at 8 byte boundary */
- char rtsym[0];
+ char rtsym[];
/* after padded_name_length, there is dump_length data */
};
@@ -92,7 +92,7 @@ struct nfp_dump_error {
struct nfp_dump_tl tl;
__be32 error;
char padding[4];
- char spec[0];
+ char spec[];
};
/* to track state through debug size calculation TLV traversal */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index d648e32c0520..2779f1526d1e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1343,26 +1343,6 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
unsigned int factor;
- if (ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -EOPNOTSUPP;
-
/* Compute factor used to convert coalesce '_usecs' parameters to
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
@@ -1476,6 +1456,8 @@ static int nfp_net_set_channels(struct net_device *netdev,
}
static const struct ethtool_ops nfp_net_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index e0f13dfe1f39..48a74accbbd3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -18,7 +18,7 @@ struct nfp_port;
*/
struct nfp_reprs {
unsigned int num_reprs;
- struct net_device __rcu *reprs[0];
+ struct net_device __rcu *reprs[];
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 684e4e036c55..a486008eb80a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1247,19 +1247,16 @@ static void nfp6000_free(struct nfp_cpp *cpp)
static int nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int pos;
- u32 reg;
+ u64 dsn;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos) {
+ dsn = pci_get_dsn(pdev);
+ if (!dsn) {
dev_err(dev, "can't find PCIe Serial Number Capability\n");
return -EINVAL;
}
- pci_read_config_dword(pdev, pos + 4, &reg);
- put_unaligned_be16(reg >> 16, serial + 4);
- pci_read_config_dword(pdev, pos + 8, &reg);
- put_unaligned_be32(reg, serial);
+ put_unaligned_be32((u32)(dsn >> 32), serial);
+ put_unaligned_be16((u16)(dsn >> 16), serial + 4);
return 0;
}
@@ -1267,18 +1264,15 @@ static int nfp6000_read_serial(struct device *dev, u8 *serial)
static int nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int pos;
- u32 reg;
+ u64 dsn;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos) {
+ dsn = pci_get_dsn(pdev);
+ if (!dsn) {
dev_err(dev, "can't find PCIe Serial Number Capability\n");
return -EINVAL;
}
- pci_read_config_dword(pdev, pos + 4, &reg);
-
- return reg & 0xffff;
+ return dsn & 0xffff;
}
static const struct nfp_cpp_operations nfp6000_pcie_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 1531c1870020..f5360bae6f75 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -183,7 +183,7 @@ struct nfp_eth_table {
bool is_split;
unsigned int fec_modes_supported;
- } ports[0];
+ } ports[];
};
struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 49c7987c2abd..2fdd0753b3af 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1019,27 +1019,6 @@ static int nixge_ethtools_set_coalesce(struct net_device *ndev,
return -EBUSY;
}
- if (ecoalesce->rx_coalesce_usecs ||
- ecoalesce->rx_coalesce_usecs_irq ||
- ecoalesce->rx_max_coalesced_frames_irq ||
- ecoalesce->tx_coalesce_usecs ||
- ecoalesce->tx_coalesce_usecs_irq ||
- ecoalesce->tx_max_coalesced_frames_irq ||
- ecoalesce->stats_block_coalesce_usecs ||
- ecoalesce->use_adaptive_rx_coalesce ||
- ecoalesce->use_adaptive_tx_coalesce ||
- ecoalesce->pkt_rate_low ||
- ecoalesce->rx_coalesce_usecs_low ||
- ecoalesce->rx_max_coalesced_frames_low ||
- ecoalesce->tx_coalesce_usecs_low ||
- ecoalesce->tx_max_coalesced_frames_low ||
- ecoalesce->pkt_rate_high ||
- ecoalesce->rx_coalesce_usecs_high ||
- ecoalesce->rx_max_coalesced_frames_high ||
- ecoalesce->tx_coalesce_usecs_high ||
- ecoalesce->tx_max_coalesced_frames_high ||
- ecoalesce->rate_sample_interval)
- return -EOPNOTSUPP;
if (ecoalesce->rx_max_coalesced_frames)
priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->tx_max_coalesced_frames)
@@ -1083,6 +1062,7 @@ static int nixge_ethtools_set_phys_id(struct net_device *ndev,
}
static const struct ethtool_ops nixge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = nixge_ethtools_get_drvinfo,
.get_coalesce = nixge_ethtools_get_coalesce,
.set_coalesce = nixge_ethtools_set_coalesce,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index bb106a32f416..23ccc0da2341 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,12 +12,12 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.20.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT 0x1004
#define DEVCMD_TIMEOUT 10
@@ -42,6 +42,7 @@ struct ionic {
struct dentry *dentry;
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
+ bool is_mgmt_nic;
struct ionic_identity ident;
struct list_head lifs;
struct ionic_lif *master_lif;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 448d7b23b2f7..60fc191a35e5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -15,6 +15,7 @@
static const struct pci_device_id ionic_id_table[] = {
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) },
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) },
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, ionic_id_table);
@@ -37,6 +38,9 @@ int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs)
void ionic_bus_free_irq_vectors(struct ionic *ionic)
{
+ if (!ionic->nintrs)
+ return;
+
pci_free_irq_vectors(ionic->pdev);
}
@@ -221,6 +225,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, ionic);
mutex_init(&ionic->dev_cmd_lock);
+ ionic->is_mgmt_nic =
+ ent->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT;
+
/* Query system for DMA addressing limitation for the device. */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
if (err) {
@@ -245,6 +252,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
+ if (!ionic->is_mgmt_nic)
+ pcie_print_link_status(pdev);
err = ionic_map_bars(ionic);
if (err)
@@ -346,6 +355,11 @@ err_out_reset:
ionic_reset(ionic);
err_out_teardown:
ionic_dev_teardown(ionic);
+ /* Don't fail the probe for these errors, keep
+ * the hw interface around for inspection
+ */
+ return 0;
+
err_out_unmap_bars:
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
@@ -369,11 +383,14 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
- ionic_devlink_unregister(ionic);
- ionic_lifs_unregister(ionic);
- ionic_lifs_deinit(ionic);
- ionic_lifs_free(ionic);
- ionic_bus_free_irq_vectors(ionic);
+ if (ionic->master_lif) {
+ ionic_devlink_unregister(ionic);
+ ionic_lifs_unregister(ionic);
+ ionic_lifs_deinit(ionic);
+ ionic_lifs_free(ionic);
+ ionic_bus_free_irq_vectors(ionic);
+ }
+
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index bc03cecf80cc..11621ccc1faf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -170,8 +170,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
- debugfs_create_u8("done_color", 0400, cq_dentry,
- (u8 *)&cq->done_color);
+ debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops);
@@ -228,7 +227,13 @@ DEFINE_SHOW_ATTRIBUTE(netdev);
void ionic_debugfs_add_lif(struct ionic_lif *lif)
{
- lif->dentry = debugfs_create_dir(lif->name, lif->ionic->dentry);
+ struct dentry *lif_dentry;
+
+ lif_dentry = debugfs_create_dir(lif->name, lif->ionic->dentry);
+ if (IS_ERR_OR_NULL(lif_dentry))
+ return;
+ lif->dentry = lif_dentry;
+
debugfs_create_file("netdev", 0400, lif->dentry,
lif->netdev, &netdev_fops);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 46107de5e6c3..f4ae40ae1e53 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -14,11 +14,15 @@
static void ionic_watchdog_cb(struct timer_list *t)
{
struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+ int hb;
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
- ionic_heartbeat_check(ionic);
+ hb = ionic_heartbeat_check(ionic);
+
+ if (hb >= 0 && ionic->master_lif)
+ ionic_link_status_check_request(ionic->master_lif);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -82,6 +86,7 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
+ idev->last_fw_status = 0xff;
timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
mod_timer(&ionic->watchdog_timer,
@@ -115,8 +120,43 @@ int ionic_heartbeat_check(struct ionic *ionic)
* fw_status != 0xff (bad PCI read)
*/
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- if (fw_status == 0xff ||
- !(fw_status & IONIC_FW_STS_F_RUNNING))
+ if (fw_status != 0xff)
+ fw_status &= IONIC_FW_STS_F_RUNNING; /* use only the run bit */
+
+ /* is this a transition? */
+ if (fw_status != idev->last_fw_status &&
+ idev->last_fw_status != 0xff) {
+ struct ionic_lif *lif = ionic->master_lif;
+ bool trigger = false;
+
+ if (!fw_status || fw_status == 0xff) {
+ dev_info(ionic->dev, "FW stopped %u\n", fw_status);
+ if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ trigger = true;
+ } else {
+ dev_info(ionic->dev, "FW running %u\n", fw_status);
+ if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ trigger = true;
+ }
+
+ if (trigger) {
+ struct ionic_deferred_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ dev_err(ionic->dev, "%s OOM\n", __func__);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ if (fw_status & IONIC_FW_STS_F_RUNNING &&
+ fw_status != 0xff)
+ work->fw_status = 1;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
+ }
+ }
+ idev->last_fw_status = fw_status;
+
+ if (!fw_status || fw_status == 0xff)
return -ENXIO;
/* early FW has no heartbeat, else FW will return non-zero */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 7838e342c4fd..587398b01997 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -132,6 +132,7 @@ struct ionic_dev {
unsigned long last_hb_time;
u32 last_hb;
+ u8 last_fw_status;
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index 6fb27dcc5787..273c889faaad 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -77,6 +77,10 @@ int ionic_devlink_register(struct ionic *ionic)
return err;
}
+ /* don't register the mgmt_nic as a port */
+ if (ionic->is_mgmt_nic)
+ return 0;
+
devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
0, false, 0, NULL, 0);
err = devlink_port_register(dl, &ionic->dl_port, 0);
@@ -93,6 +97,7 @@ void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- devlink_port_unregister(&ionic->dl_port);
+ if (ionic->dl_port.registered)
+ devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index f778fff034f5..6996229facfd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/sfp.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -86,7 +87,6 @@ static void ionic_get_drvinfo(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, IONIC_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, ionic_bus_info(ionic),
@@ -412,28 +412,6 @@ static int ionic_set_coalesce(struct net_device *netdev,
unsigned int i;
u32 coal;
- if (coalesce->rx_max_coalesced_frames ||
- coalesce->rx_coalesce_usecs_irq ||
- coalesce->rx_max_coalesced_frames_irq ||
- coalesce->tx_max_coalesced_frames ||
- coalesce->tx_coalesce_usecs_irq ||
- coalesce->tx_max_coalesced_frames_irq ||
- coalesce->stats_block_coalesce_usecs ||
- coalesce->use_adaptive_rx_coalesce ||
- coalesce->use_adaptive_tx_coalesce ||
- coalesce->pkt_rate_low ||
- coalesce->rx_coalesce_usecs_low ||
- coalesce->rx_max_coalesced_frames_low ||
- coalesce->tx_coalesce_usecs_low ||
- coalesce->tx_max_coalesced_frames_low ||
- coalesce->pkt_rate_high ||
- coalesce->rx_coalesce_usecs_high ||
- coalesce->rx_max_coalesced_frames_high ||
- coalesce->tx_coalesce_usecs_high ||
- coalesce->tx_max_coalesced_frames_high ||
- coalesce->rate_sample_interval)
- return -EINVAL;
-
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n",
@@ -462,7 +440,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (coal != lif->rx_coalesce_hw) {
lif->rx_coalesce_hw = coal;
- if (test_bit(IONIC_LIF_UP, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
@@ -509,11 +487,11 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
- running = test_bit(IONIC_LIF_UP, lif->state);
+ running = test_bit(IONIC_LIF_F_UP, lif->state);
if (running)
ionic_stop(netdev);
@@ -522,7 +500,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
if (running)
ionic_open(netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return 0;
}
@@ -553,11 +531,11 @@ static int ionic_set_channels(struct net_device *netdev,
if (ch->combined_count == lif->nxqs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
- running = test_bit(IONIC_LIF_UP, lif->state);
+ running = test_bit(IONIC_LIF_F_UP, lif->state);
if (running)
ionic_stop(netdev);
@@ -565,7 +543,7 @@ static int ionic_set_channels(struct net_device *netdev,
if (running)
ionic_open(netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return 0;
}
@@ -575,7 +553,7 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
struct ionic_lif *lif = netdev_priv(netdev);
u32 priv_flags = 0;
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state))
+ if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
priv_flags |= PRIV_F_SW_DBG_STATS;
return priv_flags;
@@ -584,14 +562,10 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ionic_lif *lif = netdev_priv(netdev);
- u32 flags = lif->flags;
- clear_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
+ clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
if (priv_flags & PRIV_F_SW_DBG_STATS)
- set_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
-
- if (flags != lif->flags)
- lif->flags = flags;
+ set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
return 0;
}
@@ -704,23 +678,27 @@ static int ionic_get_module_info(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_dev *idev = &lif->ionic->idev;
struct ionic_xcvr_status *xcvr;
+ struct sfp_eeprom_base *sfp;
xcvr = &idev->port_info->status.xcvr;
+ sfp = (struct sfp_eeprom_base *) xcvr->sprom;
/* report the module data type and length */
- switch (xcvr->sprom[0]) {
- case 0x03: /* SFP */
+ switch (sfp->phys_id) {
+ case SFF8024_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
break;
- case 0x0D: /* QSFP */
- case 0x11: /* QSFP28 */
+ case SFF8024_ID_QSFP_8436_8636:
+ case SFF8024_ID_QSFP28_8636:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
default:
netdev_info(netdev, "unknown xcvr type 0x%02x\n",
xcvr->sprom[0]);
+ modinfo->type = 0;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
break;
}
@@ -784,6 +762,7 @@ static int ionic_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops ionic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 51adf5059834..ceeb7629e7a0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -4,8 +4,6 @@
#ifndef _IONIC_IF_H_
#define _IONIC_IF_H_
-#pragma pack(push, 1)
-
#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */
#define IONIC_DEV_INFO_VERSION 1
#define IONIC_IFNAMSIZ 16
@@ -366,7 +364,7 @@ union ionic_lif_config {
u8 rsvd2[2];
__le64 features;
__le32 queue_count[IONIC_QTYPE_MAX];
- };
+ } __packed;
__le32 words[64];
};
@@ -417,7 +415,7 @@ union ionic_lif_identity {
__le32 max_frame_size;
u8 rsvd2[106];
union ionic_lif_config config;
- } eth;
+ } __packed eth;
struct {
u8 version;
@@ -439,8 +437,8 @@ union ionic_lif_identity {
struct ionic_lif_logical_qtype rq_qtype;
struct ionic_lif_logical_qtype cq_qtype;
struct ionic_lif_logical_qtype eq_qtype;
- } rdma;
- };
+ } __packed rdma;
+ } __packed;
__le32 words[512];
};
@@ -526,7 +524,7 @@ struct ionic_q_init_cmd {
__le64 sg_ring_base;
__le32 eq_index;
u8 rsvd2[16];
-};
+} __packed;
/**
* struct ionic_q_init_comp - Queue init command completion
@@ -1095,7 +1093,7 @@ struct ionic_port_status {
u8 status;
u8 rsvd[51];
struct ionic_xcvr_status xcvr;
-};
+} __packed;
/**
* struct ionic_port_identify_cmd - Port identify command
@@ -1251,7 +1249,7 @@ struct ionic_port_getattr_comp {
u8 pause_type;
u8 loopback_mode;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1319,7 +1317,7 @@ struct ionic_dev_setattr_cmd {
char name[IONIC_IFNAMSIZ];
__le64 features;
u8 rsvd2[60];
- };
+ } __packed;
};
/**
@@ -1334,7 +1332,7 @@ struct ionic_dev_setattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1361,7 +1359,7 @@ struct ionic_dev_getattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1426,7 +1424,7 @@ struct ionic_lif_setattr_cmd {
} rss;
u8 stats_ctl;
u8 rsvd[60];
- };
+ } __packed;
};
/**
@@ -1444,7 +1442,7 @@ struct ionic_lif_setattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1483,7 +1481,7 @@ struct ionic_lif_getattr_comp {
u8 mac[6];
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1688,7 +1686,7 @@ struct ionic_vf_setattr_cmd {
u8 linkstate;
__le64 stats_pa;
u8 pad[60];
- };
+ } __packed;
};
struct ionic_vf_setattr_comp {
@@ -1726,7 +1724,7 @@ struct ionic_vf_getattr_comp {
u8 linkstate;
__le64 stats_pa;
u8 pad[11];
- };
+ } __packed;
u8 color;
};
@@ -2472,7 +2470,7 @@ union ionic_dev_cmd_regs {
union ionic_dev_cmd_comp comp;
u8 rsvd[48];
u32 data[478];
- };
+ } __packed;
u32 words[512];
};
@@ -2485,7 +2483,7 @@ union ionic_dev_regs {
struct {
union ionic_dev_info_regs info;
union ionic_dev_cmd_regs devcmd;
- };
+ } __packed;
__le32 words[1024];
};
@@ -2575,6 +2573,4 @@ struct ionic_identity {
union ionic_qos_identity qos;
};
-#pragma pack(pop)
-
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 938e19ee0bcd..f8a9c1bcffc9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -21,6 +21,12 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
static void ionic_link_status_check(struct ionic_lif *lif);
+static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
+static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
+static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
+
+static int ionic_start_queues(struct ionic_lif *lif);
+static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_deferred_work(struct work_struct *work)
{
@@ -50,6 +56,12 @@ static void ionic_lif_deferred_work(struct work_struct *work)
case IONIC_DW_TYPE_LINK_STATUS:
ionic_link_status_check(lif);
break;
+ case IONIC_DW_TYPE_LIF_RESET:
+ if (w->fw_status)
+ ionic_lif_handle_fw_up(lif);
+ else
+ ionic_lif_handle_fw_down(lif);
+ break;
default:
break;
}
@@ -58,8 +70,8 @@ static void ionic_lif_deferred_work(struct work_struct *work)
}
}
-static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
- struct ionic_deferred_work *work)
+void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+ struct ionic_deferred_work *work)
{
spin_lock_bh(&def->lock);
list_add_tail(&work->list, &def->list);
@@ -73,40 +85,47 @@ static void ionic_link_status_check(struct ionic_lif *lif)
u16 link_status;
bool link_up;
+ if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
+ return;
+
+ if (lif->ionic->is_mgmt_nic)
+ return;
+
link_status = le16_to_cpu(lif->info->status.link_status);
link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
- /* filter out the no-change cases */
- if (link_up == netif_carrier_ok(netdev))
- goto link_out;
-
if (link_up) {
- netdev_info(netdev, "Link up - %d Gbps\n",
- le32_to_cpu(lif->info->status.link_speed) / 1000);
+ if (!netif_carrier_ok(netdev)) {
+ u32 link_speed;
- if (test_bit(IONIC_LIF_UP, lif->state)) {
- netif_tx_wake_all_queues(lif->netdev);
+ ionic_port_identify(lif->ionic);
+ link_speed = le32_to_cpu(lif->info->status.link_speed);
+ netdev_info(netdev, "Link up - %d Gbps\n",
+ link_speed / 1000);
netif_carrier_on(netdev);
}
+
+ if (netif_running(lif->netdev))
+ ionic_start_queues(lif);
} else {
- netdev_info(netdev, "Link down\n");
+ if (netif_carrier_ok(netdev)) {
+ netdev_info(netdev, "Link down\n");
+ netif_carrier_off(netdev);
+ }
- /* carrier off first to avoid watchdog timeout */
- netif_carrier_off(netdev);
- if (test_bit(IONIC_LIF_UP, lif->state))
- netif_tx_stop_all_queues(netdev);
+ if (netif_running(lif->netdev))
+ ionic_stop_queues(lif);
}
-link_out:
- clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
+ clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
}
-static void ionic_link_status_check_request(struct ionic_lif *lif)
+void ionic_link_status_check_request(struct ionic_lif *lif)
{
struct ionic_deferred_work *work;
/* we only need one request outstanding at a time */
- if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
+ if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
if (in_interrupt()) {
@@ -244,38 +263,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static void ionic_lif_quiesce(struct ionic_lif *lif)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.lif_setattr = {
- .opcode = IONIC_CMD_LIF_SETATTR,
- .attr = IONIC_LIF_ATTR_STATE,
- .index = lif->index,
- .state = IONIC_LIF_DISABLE
- },
- };
-
- ionic_adminq_post_wait(lif, &ctx);
-}
-
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
- struct device *dev = lif->ionic->dev;
if (!qcq)
return;
- ionic_debugfs_del_qcq(qcq);
-
if (!(qcq->flags & IONIC_QCQ_F_INITED))
return;
if (qcq->flags & IONIC_QCQ_F_INTR) {
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
- devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
netif_napi_del(&qcq->napi);
}
@@ -289,12 +289,18 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
if (!qcq)
return;
+ ionic_debugfs_del_qcq(qcq);
+
dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
qcq->base = NULL;
qcq->base_pa = 0;
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector, NULL);
+ devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
+ qcq->intr.vector = 0;
ionic_intr_free(lif, qcq->intr.index);
+ }
devm_kfree(dev, qcq->cq.info);
qcq->cq.info = NULL;
@@ -318,19 +324,21 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
lif->adminqcq = NULL;
}
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
-
- devm_kfree(dev, lif->rxqcqs);
- lif->rxqcqs = NULL;
-
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->rxqcqs[i].stats)
+ devm_kfree(dev, lif->rxqcqs[i].stats);
+ devm_kfree(dev, lif->rxqcqs);
+ lif->rxqcqs = NULL;
+ }
- devm_kfree(dev, lif->txqcqs);
- lif->txqcqs = NULL;
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->txqcqs[i].stats)
+ devm_kfree(dev, lif->txqcqs[i].stats);
+ devm_kfree(dev, lif->txqcqs);
+ lif->txqcqs = NULL;
+ }
}
static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
@@ -424,8 +432,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
IONIC_INTR_MASK_SET);
- new->intr.cpu = new->intr.index % num_online_cpus();
- if (cpu_online(new->intr.cpu))
+ err = ionic_request_irq(lif, new);
+ if (err) {
+ netdev_warn(lif->netdev, "irq request failed %d\n", err);
+ goto err_out_free_intr;
+ }
+
+ new->intr.cpu = cpumask_local_spread(new->intr.index,
+ dev_to_node(dev));
+ if (new->intr.cpu != -1)
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
@@ -437,13 +452,13 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
err = -ENOMEM;
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
@@ -451,7 +466,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
new->total_size = total_size;
@@ -477,8 +492,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
return 0;
+err_out_free_irq:
+ if (flags & IONIC_QCQ_F_INTR)
+ devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
- ionic_intr_free(lif, new->intr.index);
+ if (flags & IONIC_QCQ_F_INTR)
+ ionic_intr_free(lif, new->intr.index);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -500,6 +519,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
0, lif->kern_pid, &lif->adminqcq);
if (err)
return err;
+ ionic_debugfs_add_qcq(lif, lif->adminqcq);
if (lif->ionic->nnqs_per_lif) {
flags = IONIC_QCQ_F_NOTIFYQ;
@@ -510,6 +530,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
0, lif->kern_pid, &lif->notifyqcq);
if (err)
goto err_out_free_adminqcq;
+ ionic_debugfs_add_qcq(lif, lif->notifyqcq);
/* Let the notifyq ride on the adminq interrupt */
ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
@@ -594,6 +615,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ q->tail = q->info;
+ q->head = q->tail;
+ cq->tail = cq->info;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -607,8 +632,6 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -641,6 +664,10 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ q->tail = q->info;
+ q->head = q->tail;
+ cq->tail = cq->info;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -655,16 +682,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
NAPI_POLL_WEIGHT);
- err = ionic_request_irq(lif, qcq);
- if (err) {
- netif_napi_del(&qcq->napi);
- return err;
- }
-
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -672,6 +691,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
union ionic_notifyq_comp *comp = cq_info->cq_desc;
+ struct ionic_deferred_work *work;
struct net_device *netdev;
struct ionic_queue *q;
struct ionic_lif *lif;
@@ -697,11 +717,13 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
ionic_link_status_check_request(lif);
break;
case IONIC_EVENT_RESET:
- netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
- eid);
- netdev_info(netdev, " reset_code=%d state=%d\n",
- comp->reset.reset_code,
- comp->reset.state);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "%s OOM\n", __func__);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
break;
default:
netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
@@ -831,7 +853,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
err = ionic_adminq_post_wait(lif, &ctx);
- if (err)
+ if (err && err != -EEXIST)
return err;
return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
@@ -861,7 +883,7 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
spin_unlock_bh(&lif->rx_filters.lock);
err = ionic_adminq_post_wait(lif, &ctx);
- if (err)
+ if (err && err != -EEXIST)
return err;
netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
@@ -1093,6 +1115,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
IONIC_ETH_HW_VLAN_RX_STRIP |
IONIC_ETH_HW_VLAN_RX_FILTER;
+ u64 old_hw_features;
int err;
ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
@@ -1100,9 +1123,13 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
if (err)
return err;
+ old_hw_features = lif->hw_features;
lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
ctx.comp.lif_setattr.features);
+ if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
+ ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
+
if ((vlan_flags & features) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
@@ -1149,6 +1176,10 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev_features_t features;
int err;
+ /* no netdev features on the management device */
+ if (lif->ionic->is_mgmt_nic)
+ return 0;
+
/* set up what we expect to support by default */
features = NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -1205,7 +1236,8 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->hw_features |= netdev->hw_enc_features;
netdev->features |= netdev->hw_features;
- netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_UNICAST_FLT |
+ IFF_LIVE_ADDR_CHANGE;
return 0;
}
@@ -1356,13 +1388,15 @@ int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
.cmd.lif_setattr = {
.opcode = IONIC_CMD_LIF_SETATTR,
.attr = IONIC_LIF_ATTR_RSS,
- .rss.types = cpu_to_le16(types),
.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
},
};
unsigned int i, tbl_sz;
- lif->rss_types = types;
+ if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
+ lif->rss_types = types;
+ ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
+ }
if (key)
memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
@@ -1413,10 +1447,22 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif)
static void ionic_txrx_disable(struct ionic_lif *lif)
{
unsigned int i;
+ int err;
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_disable(lif->txqcqs[i].qcq);
- ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_qcq_disable(lif->txqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
+ }
+ }
+
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
+ }
}
}
@@ -1424,26 +1470,40 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
{
unsigned int i;
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
+ ionic_tx_empty(&lif->txqcqs[i].qcq->q);
+ }
+ }
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
- ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
- ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+ ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
+ ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ }
}
+ lif->rx_mode = 0;
}
static void ionic_txrx_free(struct ionic_lif *lif)
{
unsigned int i;
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->txqcqs[i].qcq);
- lif->txqcqs[i].qcq = NULL;
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_qcq_free(lif, lif->txqcqs[i].qcq);
+ lif->txqcqs[i].qcq = NULL;
+ }
+ }
- ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
- lif->rxqcqs[i].qcq = NULL;
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
+ lif->rxqcqs[i].qcq = NULL;
+ }
}
}
@@ -1465,6 +1525,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
goto err_out;
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
}
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
@@ -1485,6 +1546,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
lif->rx_coalesce_hw);
ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
lif->txqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
}
return 0;
@@ -1533,14 +1595,15 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int i, err;
for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_enable(lif->txqcqs[i].qcq);
+ ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
+ err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
if (err)
goto err_out;
- ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
- err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
+ err = ionic_qcq_enable(lif->txqcqs[i].qcq);
if (err) {
- ionic_qcq_disable(lif->txqcqs[i].qcq);
+ if (err != -ETIMEDOUT)
+ ionic_qcq_disable(lif->rxqcqs[i].qcq);
goto err_out;
}
}
@@ -1549,74 +1612,84 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out:
while (i--) {
- ionic_qcq_disable(lif->rxqcqs[i].qcq);
- ionic_qcq_disable(lif->txqcqs[i].qcq);
+ err = ionic_qcq_disable(lif->txqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
+ err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
}
return err;
}
+static int ionic_start_queues(struct ionic_lif *lif)
+{
+ int err;
+
+ if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
+ return 0;
+
+ err = ionic_txrx_enable(lif);
+ if (err) {
+ clear_bit(IONIC_LIF_F_UP, lif->state);
+ return err;
+ }
+ netif_tx_wake_all_queues(lif->netdev);
+
+ return 0;
+}
+
int ionic_open(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
int err;
- netif_carrier_off(netdev);
-
err = ionic_txrx_alloc(lif);
if (err)
return err;
err = ionic_txrx_init(lif);
if (err)
- goto err_txrx_free;
-
- err = ionic_txrx_enable(lif);
- if (err)
- goto err_txrx_deinit;
-
- netif_set_real_num_tx_queues(netdev, lif->nxqs);
- netif_set_real_num_rx_queues(netdev, lif->nxqs);
-
- set_bit(IONIC_LIF_UP, lif->state);
+ goto err_out;
- ionic_link_status_check_request(lif);
- if (netif_carrier_ok(netdev))
- netif_tx_wake_all_queues(netdev);
+ /* don't start the queues until we have link */
+ if (netif_carrier_ok(netdev)) {
+ err = ionic_start_queues(lif);
+ if (err)
+ goto err_txrx_deinit;
+ }
return 0;
err_txrx_deinit:
ionic_txrx_deinit(lif);
-err_txrx_free:
+err_out:
ionic_txrx_free(lif);
return err;
}
+static void ionic_stop_queues(struct ionic_lif *lif)
+{
+ if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
+ return;
+
+ ionic_txrx_disable(lif);
+ netif_tx_disable(lif->netdev);
+}
+
int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int err = 0;
- if (!test_bit(IONIC_LIF_UP, lif->state)) {
- dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
- __func__, lif->name);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
return 0;
- }
- dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
- clear_bit(IONIC_LIF_UP, lif->state);
- /* carrier off before disabling queues to avoid watchdog timeout */
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- netif_tx_disable(netdev);
-
- ionic_txrx_disable(lif);
- ionic_lif_quiesce(lif);
+ ionic_stop_queues(lif);
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
- return err;
+ return 0;
}
static int ionic_get_vf_config(struct net_device *netdev,
@@ -1871,7 +1944,7 @@ int ionic_reset_queues(struct ionic_lif *lif)
/* Put off the next watchdog timeout */
netif_trans_update(lif->netdev);
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
@@ -1881,7 +1954,7 @@ int ionic_reset_queues(struct ionic_lif *lif)
if (!err && running)
ionic_open(lif->netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return err;
}
@@ -1910,6 +1983,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
ionic_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 2 * HZ;
+ netif_carrier_off(netdev);
+
netdev->min_mtu = IONIC_MIN_MTU;
netdev->max_mtu = IONIC_MAX_MTU;
@@ -1944,6 +2019,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
goto err_out_free_netdev;
}
+ ionic_debugfs_add_lif(lif);
+
/* allocate queues */
err = ionic_qcqs_alloc(lif);
if (err)
@@ -2003,6 +2080,85 @@ static void ionic_lif_reset(struct ionic_lif *lif)
mutex_unlock(&lif->ionic->dev_cmd_lock);
}
+static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+
+ if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
+ dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
+
+ netif_device_detach(lif->netdev);
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
+ ionic_stop_queues(lif);
+ }
+
+ if (netif_running(lif->netdev)) {
+ ionic_txrx_deinit(lif);
+ ionic_txrx_free(lif);
+ }
+ ionic_lifs_deinit(ionic);
+ ionic_reset(ionic);
+ ionic_qcqs_free(lif);
+
+ dev_info(ionic->dev, "FW Down: LIFs stopped\n");
+}
+
+static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
+ dev_info(ionic->dev, "FW Up: restarting LIFs\n");
+
+ ionic_init_devinfo(ionic);
+ ionic_port_init(ionic);
+ err = ionic_qcqs_alloc(lif);
+ if (err)
+ goto err_out;
+
+ err = ionic_lifs_init(ionic);
+ if (err)
+ goto err_qcqs_free;
+
+ if (lif->registered)
+ ionic_lif_set_netdev_info(lif);
+
+ ionic_rx_filter_replay(lif);
+
+ if (netif_running(lif->netdev)) {
+ err = ionic_txrx_alloc(lif);
+ if (err)
+ goto err_lifs_deinit;
+
+ err = ionic_txrx_init(lif);
+ if (err)
+ goto err_txrx_free;
+ }
+
+ clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
+ ionic_link_status_check_request(lif);
+ netif_device_attach(lif->netdev);
+ dev_info(ionic->dev, "FW Up: LIFs restarted\n");
+
+ return;
+
+err_txrx_free:
+ ionic_txrx_free(lif);
+err_lifs_deinit:
+ ionic_lifs_deinit(ionic);
+err_qcqs_free:
+ ionic_qcqs_free(lif);
+err_out:
+ dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
+}
+
static void ionic_lif_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2015,7 +2171,8 @@ static void ionic_lif_free(struct ionic_lif *lif)
/* free queues */
ionic_qcqs_free(lif);
- ionic_lif_reset(lif);
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ ionic_lif_reset(lif);
/* free lif info */
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
@@ -2048,13 +2205,17 @@ void ionic_lifs_free(struct ionic *ionic)
static void ionic_lif_deinit(struct ionic_lif *lif)
{
- if (!test_bit(IONIC_LIF_INITED, lif->state))
+ if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
return;
- clear_bit(IONIC_LIF_INITED, lif->state);
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ cancel_work_sync(&lif->deferred.work);
+ cancel_work_sync(&lif->tx_timeout_work);
+ ionic_rx_filters_deinit(lif);
+ }
- ionic_rx_filters_deinit(lif);
- ionic_lif_rss_deinit(lif);
+ if (lif->netdev->features & NETIF_F_RXHASH)
+ ionic_lif_rss_deinit(lif);
napi_disable(&lif->adminqcq->napi);
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
@@ -2107,13 +2268,6 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
NAPI_POLL_WEIGHT);
- err = ionic_request_irq(lif, qcq);
- if (err) {
- netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
- netif_napi_del(&qcq->napi);
- return err;
- }
-
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)
@@ -2122,8 +2276,6 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -2159,6 +2311,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
if (err)
return err;
+ lif->last_eid = 0;
q->hw_type = ctx.comp.q_init.hw_type;
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
q->dbval = IONIC_DBELL_QID(q->hw_index);
@@ -2171,8 +2324,6 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -2193,24 +2344,34 @@ static int ionic_station_set(struct ionic_lif *lif)
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
-
+ netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
+ ctx.comp.lif_getattr.mac);
if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
return 0;
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
- addr.sa_family = AF_INET;
- err = eth_prepare_mac_addr_change(netdev, &addr);
- if (err) {
- netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
- addr.sa_data);
- return 0;
- }
+ if (!is_zero_ether_addr(netdev->dev_addr)) {
+ /* If the netdev mac is non-zero and doesn't match the default
+ * device address, it was set by something earlier and we're
+ * likely here again after a fw-upgrade reset. We need to be
+ * sure the netdev mac is in our filter list.
+ */
+ if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
+ netdev->dev_addr))
+ ionic_lif_addr(lif, netdev->dev_addr, true);
+ } else {
+ /* Update the netdev mac with the device's mac */
+ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ addr.sa_family = AF_INET;
+ err = eth_prepare_mac_addr_change(netdev, &addr);
+ if (err) {
+ netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
+ addr.sa_data, err);
+ return 0;
+ }
- netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
- netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, false);
+ eth_commit_mac_addr_change(netdev, &addr);
+ }
- eth_commit_mac_addr_change(netdev, &addr);
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
ionic_lif_addr(lif, netdev->dev_addr, true);
@@ -2226,8 +2387,6 @@ static int ionic_lif_init(struct ionic_lif *lif)
int dbpage_num;
int err;
- ionic_debugfs_add_lif(lif);
-
mutex_lock(&lif->ionic->dev_cmd_lock);
ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
@@ -2277,9 +2436,11 @@ static int ionic_lif_init(struct ionic_lif *lif)
if (err)
goto err_out_notifyq_deinit;
- err = ionic_rx_filters_init(lif);
- if (err)
- goto err_out_notifyq_deinit;
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ err = ionic_rx_filters_init(lif);
+ if (err)
+ goto err_out_notifyq_deinit;
+ }
err = ionic_station_set(lif);
if (err)
@@ -2287,7 +2448,7 @@ static int ionic_lif_init(struct ionic_lif *lif)
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
- set_bit(IONIC_LIF_INITED, lif->state);
+ set_bit(IONIC_LIF_F_INITED, lif->state);
INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
@@ -2375,6 +2536,12 @@ int ionic_lifs_register(struct ionic *ionic)
{
int err;
+ /* the netdev is not registered on the management device, it is
+ * only used as a vehicle for napi operations on the adminq
+ */
+ if (ionic->is_mgmt_nic)
+ return 0;
+
INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
ionic->nb.notifier_call = ionic_lif_notify;
@@ -2389,8 +2556,6 @@ int ionic_lifs_register(struct ionic *ionic)
dev_err(ionic->dev, "Cannot register net device, aborting\n");
return err;
}
-
- ionic_link_status_check_request(ionic->master_lif);
ionic->master_lif->registered = true;
return 0;
@@ -2408,9 +2573,8 @@ void ionic_lifs_unregister(struct ionic *ionic)
* current model, so don't bother searching the
* ionic->lif for candidates to unregister
*/
- cancel_work_sync(&ionic->master_lif->deferred.work);
- cancel_work_sync(&ionic->master_lif->tx_timeout_work);
- if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
+ if (ionic->master_lif &&
+ ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(ionic->master_lif->netdev);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9c5a7dd45f9d..5d4ffda5c05f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -98,6 +98,7 @@ struct ionic_deferred_work {
union {
unsigned int rx_mode;
u8 addr[ETH_ALEN];
+ u8 fw_status;
};
};
@@ -121,14 +122,15 @@ struct ionic_lif_sw_stats {
};
enum ionic_lif_state_flags {
- IONIC_LIF_INITED,
- IONIC_LIF_SW_DEBUG_STATS,
- IONIC_LIF_UP,
- IONIC_LIF_LINK_CHECK_REQUESTED,
- IONIC_LIF_QUEUE_RESET,
+ IONIC_LIF_F_INITED,
+ IONIC_LIF_F_SW_DEBUG_STATS,
+ IONIC_LIF_F_UP,
+ IONIC_LIF_F_LINK_CHECK_REQUESTED,
+ IONIC_LIF_F_QUEUE_RESET,
+ IONIC_LIF_F_FW_RESET,
/* leave this as last */
- IONIC_LIF_STATE_SIZE
+ IONIC_LIF_F_STATE_SIZE
};
#define IONIC_LIF_NAME_MAX_SZ 32
@@ -136,7 +138,7 @@ struct ionic_lif {
char name[IONIC_LIF_NAME_MAX_SZ];
struct list_head list;
struct net_device *netdev;
- DECLARE_BITMAP(state, IONIC_LIF_STATE_SIZE);
+ DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE);
struct ionic *ionic;
bool registered;
unsigned int index;
@@ -179,7 +181,6 @@ struct ionic_lif {
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
- u32 flags;
struct work_struct tx_timeout_work;
};
@@ -225,6 +226,9 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
return (units * div) / mult;
}
+void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+ struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
void ionic_lifs_free(struct ionic *ionic);
void ionic_lifs_deinit(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index a8e3fb73b465..3344bc1f7671 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
+#include <linux/vermagic.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -15,7 +16,6 @@
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
MODULE_AUTHOR("Pensando Systems, Inc");
MODULE_LICENSE("GPL");
-MODULE_VERSION(IONIC_DRV_VERSION);
static const char *ionic_error_to_str(enum ionic_status_code code)
{
@@ -58,6 +58,8 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
return "IONIC_RC_BAD_ADDR";
case IONIC_RC_DEV_CMD:
return "IONIC_RC_DEV_CMD";
+ case IONIC_RC_ENOSUPP:
+ return "IONIC_RC_ENOSUPP";
case IONIC_RC_ERROR:
return "IONIC_RC_ERROR";
case IONIC_RC_ERDMA:
@@ -76,6 +78,7 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
+ case IONIC_RC_ENOSUPP:
return -EINVAL;
case IONIC_RC_EPERM:
return -EPERM;
@@ -240,11 +243,16 @@ static void ionic_adminq_cb(struct ionic_queue *q,
static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_queue *adminq = &lif->adminqcq->q;
+ struct ionic_queue *adminq;
int err = 0;
WARN_ON(in_interrupt());
+ if (!lif->adminqcq)
+ return -EIO;
+
+ adminq = &lif->adminqcq->q;
+
spin_lock(&lif->adminq_lock);
if (!ionic_q_has_space(adminq, 1)) {
err = -ENOSPC;
@@ -278,9 +286,11 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
err = ionic_adminq_post(lif, ctx);
if (err) {
- name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
- netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
- name, ctx->cmd.cmd.opcode, err);
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
+ name, ctx->cmd.cmd.opcode, err);
+ }
return err;
}
@@ -357,7 +367,10 @@ try_again:
done, duration / HZ, duration);
if (!done && hb) {
- ionic_dev_cmd_clean(ionic);
+ /* It is possible (but unlikely) that FW was busy and missed a
+ * heartbeat check but is still alive and will process this
+ * request, so don't clean the dev_cmd in this case.
+ */
dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
ionic_opcode_to_str(opcode), opcode);
return -ENXIO;
@@ -414,7 +427,7 @@ int ionic_identify(struct ionic *ionic)
memset(ident, 0, sizeof(*ident));
ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX);
- strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION,
+ strncpy(ident->drv.driver_ver_str, UTS_RELEASE,
sizeof(ident->drv.driver_ver_str) - 1);
mutex_lock(&ionic->dev_cmd_lock);
@@ -496,16 +509,16 @@ int ionic_port_init(struct ionic *ionic)
size_t sz;
int err;
- if (idev->port_info)
- return 0;
-
- idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
- idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz,
- &idev->port_info_pa,
- GFP_KERNEL);
if (!idev->port_info) {
- dev_err(ionic->dev, "Failed to allocate port info, aborting\n");
- return -ENOMEM;
+ idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
+ idev->port_info = dma_alloc_coherent(ionic->dev,
+ idev->port_info_sz,
+ &idev->port_info_pa,
+ GFP_KERNEL);
+ if (!idev->port_info) {
+ dev_err(ionic->dev, "Failed to allocate port info\n");
+ return -ENOMEM;
+ }
}
sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data));
@@ -558,8 +571,6 @@ int ionic_port_reset(struct ionic *ionic)
static int __init ionic_init_module(void)
{
- pr_info("%s %s, ver %s\n",
- IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION);
ionic_debugfs_create();
return ionic_bus_register_driver();
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 7a093f148ee5..80eeb7696e01 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
#include <linux/netdevice.h>
+#include <linux/dynamic_debug.h>
#include <linux/etherdevice.h>
#include "ionic.h"
@@ -17,17 +18,49 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
devm_kfree(dev, f);
}
-int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f)
+void ionic_rx_filter_replay(struct ionic_lif *lif)
{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .filter_id = cpu_to_le32(f->filter_id),
- },
- };
-
- return ionic_adminq_post_wait(lif, &ctx);
+ struct ionic_rx_filter_add_cmd *ac;
+ struct ionic_admin_ctx ctx;
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned int i;
+ int err;
+
+ ac = &ctx.cmd.rx_filter_add;
+
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ head = &lif->rx_filters.by_id[i];
+ hlist_for_each_entry_safe(f, tmp, head, by_id) {
+ ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
+ memcpy(ac, &f->cmd, sizeof(f->cmd));
+ dev_dbg(&lif->netdev->dev, "replay filter command:\n");
+ dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ &ctx.cmd, sizeof(ctx.cmd), true);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err) {
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
+ err,
+ le16_to_cpu(ac->vlan.vlan));
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
+ err, ac->mac.addr);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+ netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
+ err,
+ le16_to_cpu(ac->vlan.vlan),
+ ac->mac.addr);
+ break;
+ }
+ }
+ }
+ }
}
int ionic_rx_filters_init(struct ionic_lif *lif)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index b6aec9c19918..cf8f4c0a961c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -24,7 +24,7 @@ struct ionic_rx_filters {
};
void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f);
-int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f);
+void ionic_rx_filter_replay(struct ionic_lif *lif);
int ionic_rx_filters_init(struct ionic_lif *lif);
void ionic_rx_filters_deinit(struct ionic_lif *lif);
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index a1e9796a660a..8f2a8fb029f1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -118,8 +118,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
@@ -151,8 +151,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"txq_%d_%s",
@@ -190,8 +190,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"rxq_%d_cq_%s",
@@ -247,8 +247,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
txqcq = lif_to_txqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
@@ -281,8 +281,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
rxqcq = lif_to_rxqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index e452f4242ba0..d233b6e77b1e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -158,7 +158,7 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
}
/* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
+ if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) {
stats->dropped++;
return;
}
@@ -593,6 +593,22 @@ void ionic_tx_flush(struct ionic_cq *cq)
work_done, 0);
}
+void ionic_tx_empty(struct ionic_queue *q)
+{
+ struct ionic_desc_info *desc_info;
+ int done = 0;
+
+ /* walk the not completed tx entries, if any */
+ while (q->head != q->tail) {
+ desc_info = q->tail;
+ q->tail = desc_info->next;
+ ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ done++;
+ }
+}
+
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
{
int err;
@@ -632,10 +648,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
return 0;
@@ -1026,7 +1039,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
int ndescs;
int err;
- if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) {
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 53775c62c85a..71973e3c35a6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -9,6 +9,7 @@ void ionic_tx_flush(struct ionic_cq *cq);
void ionic_rx_fill(struct ionic_queue *q);
void ionic_rx_empty(struct ionic_queue *q);
+void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 3dce769d83a1..86153660d245 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -1316,7 +1316,7 @@ struct netxen_minidump_template_hdr {
u32 driver_info_word4;
u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN];
u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN];
- u32 rsvd[0];
+ u32 rsvd[];
};
/* Common Entry Header: Common to All Entry Types */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 6a2d91d58968..66f45fce90fa 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -748,24 +748,7 @@ static int netxen_set_intr_coalesce(struct net_device *netdev,
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->tx_max_coalesced_frames > 0xffff)
return -EINVAL;
if (!ethcoal->rx_coalesce_usecs ||
@@ -923,6 +906,8 @@ netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
}
const struct ethtool_ops netxen_nic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = netxen_nic_get_drvinfo,
.get_regs_len = netxen_nic_get_regs_len,
.get_regs = netxen_nic_get_regs,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 03bdd2e26329..38a65b984e47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4691,26 +4691,20 @@ static void qed_chain_free_single(struct qed_dev *cdev,
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
- void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
- u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
- if (!pp_virt_addr_tbl)
+ if (!pp_addr_tbl)
return;
- if (!p_pbl_virt)
- goto out;
-
for (i = 0; i < page_cnt; i++) {
- if (!pp_virt_addr_tbl[i])
+ if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
break;
dma_free_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
- pp_virt_addr_tbl[i],
- *(dma_addr_t *)p_pbl_virt);
-
- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ pp_addr_tbl[i].virt_addr,
+ pp_addr_tbl[i].dma_map);
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -4720,9 +4714,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
-out:
- vfree(p_chain->pbl.pp_virt_addr_tbl);
- p_chain->pbl.pp_virt_addr_tbl = NULL;
+
+ vfree(p_chain->pbl.pp_addr_tbl);
+ p_chain->pbl.pp_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
@@ -4823,19 +4817,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
- void **pp_virt_addr_tbl = NULL;
+ struct addr_tbl_entry *pp_addr_tbl;
u8 *p_pbl_virt = NULL;
void *p_virt = NULL;
- size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = vzalloc(size);
- if (!pp_virt_addr_tbl)
+ size = page_cnt * sizeof(*pp_addr_tbl);
+ pp_addr_tbl = vzalloc(size);
+ if (!pp_addr_tbl)
return -ENOMEM;
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
* qed_chain_init_pbl_mem() is called even in a case of an allocation
- * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * failure, since tbl was previously allocated, and it
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -4849,8 +4843,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
p_chain->b_external_pbl = true;
}
- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
- pp_virt_addr_tbl);
+ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
if (!p_pbl_virt)
return -ENOMEM;
@@ -4869,7 +4862,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
/* Fill the PBL table with the physical address of the page */
*(dma_addr_t *)p_pbl_virt = p_phys;
/* Keep the virtual address of the page */
- p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 1a5fc2ae351c..29810a1aa210 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -369,8 +369,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
- int rc = -EINVAL;
u16 rx_mode = 0;
+ int rc;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2c189c637cca..96356e897c80 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1087,9 +1087,6 @@ static void qed_update_pf_params(struct qed_dev *cdev,
#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
#define QED_PERIODIC_DB_REC_INTERVAL \
msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
-#define QED_PERIODIC_DB_REC_WAIT_COUNT 10
-#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
- (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
enum qed_slowpath_wq_flag wq_flag,
@@ -1123,7 +1120,7 @@ void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
static void qed_slowpath_wq_stop(struct qed_dev *cdev)
{
- int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
+ int i;
if (IS_VF(cdev))
return;
@@ -1135,13 +1132,7 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev)
/* Stop queuing new delayed works */
cdev->hwfns[i].slowpath_wq_active = false;
- /* Wait until the last periodic doorbell recovery is executed */
- while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
- &cdev->hwfns[i].slowpath_task_flags) &&
- sleep_count--)
- msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
-
- flush_workqueue(cdev->hwfns[i].slowpath_wq);
+ cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
destroy_workqueue(cdev->hwfns[i].slowpath_wq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8a426afb6a55..812c7766e096 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1566,7 +1566,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev)
{
- u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ u16 sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
@@ -1596,17 +1596,6 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
continue;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- /* Memory barrier to prevent the CPU from doing speculative
- * reads of CQE/BD before reading hw_comp_cons. If the CQE is
- * read before it is written by FW, then FW writes CQE and SB,
- * and then the CPU reads the hw_comp_cons, it will use an old
- * CQE.
- */
- rmb();
-
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
@@ -2087,6 +2076,7 @@ err:
}
static const struct ethtool_ops qede_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
@@ -2133,6 +2123,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index d1ce4531d01a..fe72bb6c9455 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1746,7 +1746,8 @@ unlock:
}
static int qede_parse_actions(struct qede_dev *edev,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
int i;
@@ -1756,6 +1757,9 @@ static int qede_parse_actions(struct qede_dev *edev,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -1970,7 +1974,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action))
+ if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -2038,7 +2042,7 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
return -EINVAL;
}
- if (qede_parse_actions(edev, flow_action))
+ if (qede_parse_actions(edev, flow_action, NULL))
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 374a4d4371f9..134611aa2c9a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -418,7 +418,7 @@ struct qlcnic_83xx_dump_template_hdr {
u32 saved_state[16];
u32 cap_sizes[8];
u32 ocm_wnd_reg[16];
- u32 rsvd[0];
+ u32 rsvd[];
};
struct qlcnic_82xx_dump_template_hdr {
@@ -436,7 +436,7 @@ struct qlcnic_82xx_dump_template_hdr {
u32 cap_sizes[8];
u32 rsvd[7];
u32 capabilities;
- u32 rsvd1[0];
+ u32 rsvd1[];
};
#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
@@ -740,7 +740,7 @@ struct qlcnic_hostrq_rx_ctx {
The following is packed:
- N hostrq_rds_rings
- N hostrq_sds_rings */
- char data[0];
+ char data[];
} __packed;
struct qlcnic_cardrsp_rds_ring{
@@ -769,7 +769,7 @@ struct qlcnic_cardrsp_rx_ctx {
The following is packed:
- N cardrsp_rds_rings
- N cardrs_sds_rings */
- char data[0];
+ char data[];
} __packed;
#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 2a533280b124..29b9c728a65e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
ahw->diag_cnt = 0;
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
if (ret)
- goto fail_diag_irq;
+ goto fail_mbx_args;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
intrpt_id = ahw->intr_tbl[0].id;
@@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
done:
qlcnic_free_mbx_args(&cmd);
+
+fail_mbx_args:
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 75d83c3cbf27..5c2a3acf1e89 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1542,24 +1542,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->tx_max_coalesced_frames > 0xffff)
return -EINVAL;
err = qlcnic_config_intr_coalesce(adapter, ethcoal);
@@ -1834,6 +1817,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
}
const struct ethtool_ops qlcnic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
@@ -1865,6 +1850,8 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
};
const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index bebe38d74d66..251d4ac4af02 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1288,11 +1288,8 @@ static int emac_tso_csum(struct emac_adapter *adpt,
memset(tpd, 0, sizeof(*tpd));
memset(&extra_tpd, 0, sizeof(extra_tpd));
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
+
TPD_PKT_LEN_SET(&extra_tpd, skb->len);
TPD_LSO_SET(&extra_tpd, 1);
TPD_LSOV_SET(&extra_tpd, 1);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index fbf4cbcf1a65..40efe60eff8d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -57,7 +57,7 @@ static int rmnet_register_real_device(struct net_device *real_dev)
if (rmnet_is_real_dev_registered(real_dev))
return 0;
- port = kzalloc(sizeof(*port), GFP_ATOMIC);
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -122,13 +122,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
}
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev || !dev)
+ if (!real_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
+ }
- if (!data[IFLA_RMNET_MUX_ID])
- return -EINVAL;
-
- ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
@@ -139,7 +138,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
goto err0;
port = rmnet_get_port_rtnl(real_dev);
- err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
+ err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep, extack);
if (err)
goto err1;
@@ -263,12 +262,16 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
{
u16 mux_id;
- if (!data || !data[IFLA_RMNET_MUX_ID])
+ if (!data || !data[IFLA_RMNET_MUX_ID]) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID not specified");
return -EINVAL;
+ }
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
+ if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid MUX ID");
return -ERANGE;
+ }
return 0;
}
@@ -279,7 +282,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
- struct rmnet_endpoint *ep;
struct rmnet_port *port;
u16 mux_id;
@@ -294,19 +296,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- if (rmnet_get_endpoint(port, mux_id)) {
- NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
- return -EINVAL;
- }
- ep = rmnet_get_endpoint(port, priv->mux_id);
- if (!ep)
- return -ENODEV;
- hlist_del_init_rcu(&ep->hlnode);
- hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+ if (mux_id != priv->mux_id) {
+ struct rmnet_endpoint *ep;
+
+ ep = rmnet_get_endpoint(port, priv->mux_id);
+ if (!ep)
+ return -ENODEV;
- ep->mux_id = mux_id;
- priv->mux_id = mux_id;
+ if (rmnet_get_endpoint(port, mux_id)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MUX ID already exists");
+ return -EINVAL;
+ }
+
+ hlist_del_init_rcu(&ep->hlnode);
+ hlist_add_head_rcu(&ep->hlnode,
+ &port->muxed_ep[mux_id]);
+
+ ep->mux_id = mux_id;
+ priv->mux_id = mux_id;
+ }
}
if (data[IFLA_RMNET_FLAGS]) {
@@ -406,14 +416,22 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
*/
- if (port->nr_rmnet_devs > 1)
+ if (port->nr_rmnet_devs > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "more than one rmnet dev attached");
return -EINVAL;
+ }
- if (port->rmnet_mode != RMNET_EPMODE_VND)
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
return -EINVAL;
+ }
+
+ if (rmnet_is_real_dev_registered(slave_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "slave cannot be another rmnet dev");
- if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
+ }
err = rmnet_register_real_device(slave_dev);
if (err)
@@ -475,4 +493,5 @@ static void __exit rmnet_exit(void)
module_init(rmnet_init)
module_exit(rmnet_exit)
+MODULE_ALIAS_RTNL_LINK("rmnet");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 26ad40f19c64..d58b51d277f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -212,6 +212,8 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
+ rmnet_dev->features |= NETIF_F_LLTX;
+
/* This perm addr will be used as interface identifier by IPv6 */
rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(rmnet_dev->perm_addr);
@@ -222,16 +224,17 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
- struct rmnet_endpoint *ep)
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack)
+
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
- if (ep->egress_dev)
- return -EINVAL;
-
- if (rmnet_get_endpoint(port, id))
+ if (rmnet_get_endpoint(port, id)) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
return -EBUSY;
+ }
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 14d77c709d4a..4967f3461ed1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -11,7 +11,8 @@ int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
- struct rmnet_endpoint *ep);
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack);
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 791d99b9e1cf..c51b48dc3639 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -212,7 +212,6 @@ enum rtl_registers {
/* Unlimited maximum PCI burst. */
#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
- RxMissed = 0x4c,
Cfg9346 = 0x50,
Config0 = 0x51,
Config1 = 0x52,
@@ -576,6 +575,7 @@ struct rtl8169_tc_offsets {
__le64 tx_errors;
__le32 tx_multi_collision;
__le16 tx_aborted;
+ __le16 rx_missed;
};
enum rtl_flag {
@@ -689,6 +689,12 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
+static void rtl_pci_commit(struct rtl8169_private *tp)
+{
+ /* Read an arbitrary register to commit a preceding PCI write */
+ RTL_R8(tp, ChipCmd);
+}
+
static bool rtl_is_8125(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_60;
@@ -1044,6 +1050,13 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
+static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
+{
+ /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
+ *cmd |= 0x7f0 << 18;
+}
+
DECLARE_RTL_COND(rtl_eriar_cond)
{
return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
@@ -1052,9 +1065,12 @@ DECLARE_RTL_COND(rtl_eriar_cond)
static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
u32 val, int type)
{
+ u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
+
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(tp, ERIDR, val);
- RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
@@ -1067,7 +1083,10 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
- RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
+ u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
+
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
RTL_R32(tp, ERIDR) : ~0;
@@ -1302,10 +1321,6 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
tp->irq_enabled = 0;
}
-#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
-#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
-#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
-
static void rtl_irq_enable(struct rtl8169_private *tp)
{
tp->irq_enabled = 1;
@@ -1319,18 +1334,13 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
rtl_irq_disable(tp);
rtl_ack_events(tp, 0xffffffff);
- /* PCI commit */
- RTL_R8(tp, ChipCmd);
+ rtl_pci_commit(tp);
}
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
- struct net_device *dev = tp->dev;
struct phy_device *phydev = tp->phydev;
- if (!netif_running(dev))
- return;
-
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (phydev->speed == SPEED_1000) {
@@ -1536,7 +1546,7 @@ static int rtl8169_set_features(struct net_device *dev,
}
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
rtl_unlock_work(tp);
@@ -1622,7 +1632,7 @@ static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
u32 cmd;
RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
- RTL_R32(tp, CounterAddrHigh);
+ rtl_pci_commit(tp);
cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
@@ -1689,6 +1699,7 @@ static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
tp->tc_offset.tx_errors = counters->tx_errors;
tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
tp->tc_offset.tx_aborted = counters->tx_aborted;
+ tp->tc_offset.rx_missed = counters->rx_missed;
tp->tc_offset.inited = true;
return ret;
@@ -1946,7 +1957,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
rtl_unlock_work(tp);
@@ -2008,6 +2019,8 @@ out:
}
static const struct ethtool_ops rtl8169_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
@@ -2044,7 +2057,7 @@ static void rtl_enable_eee(struct rtl8169_private *tp)
phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
}
-static void rtl8169_get_mac_version(struct rtl8169_private *tp)
+static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2060,7 +2073,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
static const struct rtl_mac_info {
u16 mask;
u16 val;
- u16 mac_version;
+ enum mac_version ver;
} mac_info[] = {
/* 8125 family. */
{ 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
@@ -2127,6 +2140,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
+ /* RTL8401, reportedly works if treated as RTL8101e */
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
{ 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
{ 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
@@ -2147,22 +2162,22 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
const struct rtl_mac_info *p = mac_info;
- u16 reg = RTL_R32(tp, TxConfig) >> 20;
+ enum mac_version ver;
- while ((reg & p->mask) != p->val)
+ while ((xid & p->mask) != p->val)
p++;
- tp->mac_version = p->mac_version;
-
- if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
- } else if (!tp->supports_gmii) {
- if (tp->mac_version == RTL_GIGA_MAC_VER_42)
- tp->mac_version = RTL_GIGA_MAC_VER_43;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
- tp->mac_version = RTL_GIGA_MAC_VER_47;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
- tp->mac_version = RTL_GIGA_MAC_VER_48;
+ ver = p->ver;
+
+ if (ver != RTL_GIGA_MAC_NONE && !gmii) {
+ if (ver == RTL_GIGA_MAC_VER_42)
+ ver = RTL_GIGA_MAC_VER_43;
+ else if (ver == RTL_GIGA_MAC_VER_45)
+ ver = RTL_GIGA_MAC_VER_47;
+ else if (ver == RTL_GIGA_MAC_VER_46)
+ ver = RTL_GIGA_MAC_VER_48;
}
+
+ return ver;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2228,8 +2243,8 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
- if (!test_and_set_bit(flag, tp->wk.flags))
- schedule_work(&tp->wk.work);
+ set_bit(flag, tp->wk.flags);
+ schedule_work(&tp->wk.work);
}
static void rtl8169_init_phy(struct rtl8169_private *tp)
@@ -2264,10 +2279,10 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
rtl_unlock_config_regs(tp);
RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
- RTL_R32(tp, MAC4);
+ rtl_pci_commit(tp);
RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
- RTL_R32(tp, MAC0);
+ rtl_pci_commit(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
rtl_rar_exgmac_set(tp, addr);
@@ -2471,66 +2486,52 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
-static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
+static void rtl_jumbo_config(struct rtl8169_private *tp)
{
- rtl_unlock_config_regs(tp);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168b_1_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168c_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
- r8168dp_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168e_hw_jumbo_enable(tp);
- break;
- default:
- break;
- }
- rtl_lock_config_regs(tp);
-}
+ bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
-static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
-{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
- r8168b_1_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168b_1_hw_jumbo_enable(tp);
+ } else {
+ r8168b_1_hw_jumbo_disable(tp);
+ }
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- r8168c_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168c_hw_jumbo_enable(tp);
+ } else {
+ r8168c_hw_jumbo_disable(tp);
+ }
break;
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
- r8168dp_hw_jumbo_disable(tp);
+ if (jumbo)
+ r8168dp_hw_jumbo_enable(tp);
+ else
+ r8168dp_hw_jumbo_disable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- r8168e_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168e_hw_jumbo_enable(tp);
+ } else {
+ r8168e_hw_jumbo_disable(tp);
+ }
break;
default:
break;
}
rtl_lock_config_regs(tp);
- if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, 4096);
}
-static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
-{
- if (mtu > ETH_DATA_LEN)
- rtl_hw_jumbo_enable(tp);
- else
- rtl_hw_jumbo_disable(tp);
-}
-
DECLARE_RTL_COND(rtl_chipcmd_cond)
{
return RTL_R8(tp, ChipCmd) & CmdReset;
@@ -3838,9 +3839,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
static void rtl_hw_start_8169(struct rtl8169_private *tp)
{
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
-
RTL_W8(tp, EarlyTxThres, NoEarlyTx);
tp->cp_cmd |= PCIMulRW;
@@ -3853,8 +3851,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
rtl8169_set_magic_reg(tp, tp->mac_version);
- RTL_W32(tp, RxMissed, 0);
-
/* disable interrupt coalescing */
RTL_W16(tp, IntrMitigate, 0x0000);
}
@@ -3877,10 +3873,11 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
- rtl_jumbo_config(tp, tp->dev->mtu);
+ rtl_jumbo_config(tp);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
+
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
@@ -3892,10 +3889,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl_jumbo_config(tp, new_mtu);
-
dev->mtu = new_mtu;
netdev_update_features(dev);
+ rtl_jumbo_config(tp);
return 0;
}
@@ -3910,6 +3906,7 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+ desc->opts2 = 0;
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
@@ -3991,17 +3988,15 @@ static int rtl8169_init_ring(struct rtl8169_private *tp)
return rtl8169_rx_fill(tp);
}
-static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
- struct TxDesc *desc)
+static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry)
{
- unsigned int len = tx_skb->len;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ struct TxDesc *desc = tp->TxDescArray + entry;
- dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
-
- desc->opts1 = 0x00;
- desc->opts2 = 0x00;
- desc->addr = 0x00;
- tx_skb->len = 0;
+ dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len,
+ DMA_TO_DEVICE);
+ memset(desc, 0, sizeof(*desc));
+ memset(tx_skb, 0, sizeof(*tx_skb));
}
static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
@@ -4017,12 +4012,9 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
if (len) {
struct sk_buff *skb = tx_skb->skb;
- rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
- tp->TxDescArray + entry);
- if (skb) {
+ rtl8169_unmap_tx_skb(tp, entry);
+ if (skb)
dev_consume_skb_any(skb);
- tx_skb->skb = NULL;
- }
}
}
}
@@ -4063,57 +4055,56 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
+static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
+ void *addr, unsigned int entry, bool desc_own)
{
- u32 status = opts0 | len;
+ struct TxDesc *txd = tp->TxDescArray + entry;
+ struct device *d = tp_to_dev(tp);
+ dma_addr_t mapping;
+ u32 opts1;
+ int ret;
+ mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(d, mapping);
+ if (unlikely(ret)) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev, "Failed to map TX data!\n");
+ return ret;
+ }
+
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts[1]);
+
+ opts1 = opts[0] | len;
if (entry == NUM_TX_DESC - 1)
- status |= RingEnd;
+ opts1 |= RingEnd;
+ if (desc_own)
+ opts1 |= DescOwn;
+ txd->opts1 = cpu_to_le32(opts1);
- return cpu_to_le32(status);
+ tp->tx_skb[entry].len = len;
+
+ return 0;
}
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
- u32 *opts)
+ const u32 *opts, unsigned int entry)
{
struct skb_shared_info *info = skb_shinfo(skb);
- unsigned int cur_frag, entry;
- struct TxDesc *uninitialized_var(txd);
- struct device *d = tp_to_dev(tp);
+ unsigned int cur_frag;
- entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
const skb_frag_t *frag = info->frags + cur_frag;
- dma_addr_t mapping;
- u32 len;
- void *addr;
+ void *addr = skb_frag_address(frag);
+ u32 len = skb_frag_size(frag);
entry = (entry + 1) % NUM_TX_DESC;
- txd = tp->TxDescArray + entry;
- len = skb_frag_size(frag);
- addr = skb_frag_address(frag);
- mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, tp->dev,
- "Failed to map TX fragments DMA!\n");
+ if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true)))
goto err_out;
- }
-
- txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
- txd->opts2 = cpu_to_le32(opts[1]);
- txd->addr = cpu_to_le64(mapping);
-
- tp->tx_skb[entry].len = len;
- }
-
- if (cur_frag) {
- tp->tx_skb[entry].skb = skb;
- txd->opts1 |= cpu_to_le32(LastFrag);
}
- return cur_frag;
+ return 0;
err_out:
rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
@@ -4125,36 +4116,13 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
}
-/* msdn_giant_send_check()
- * According to the document of microsoft, the TCP Pseudo Header excludes the
- * packet length for IPv6 TCP large packets.
- */
-static int msdn_giant_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct tcphdr *th;
- int ret;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- return ret;
-
- ipv6h = ipv6_hdr(skb);
- th = tcp_hdr(skb);
-
- th->check = 0;
- th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
-
- return ret;
-}
-
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
{
u32 mss = skb_shinfo(skb)->gso_size;
if (mss) {
opts[0] |= TD_LSO;
- opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
+ opts[0] |= mss << TD0_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
@@ -4180,9 +4148,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
break;
case htons(ETH_P_IPV6):
- if (msdn_giant_send_check(skb))
+ if (skb_cow_head(skb, 0))
return false;
+ tcp_v6_gso_csum_prep(skb);
opts[0] |= TD1_GTSENV6;
break;
@@ -4192,7 +4161,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
}
opts[0] |= transport_offset << GTTCPHO_SHIFT;
- opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
+ opts[1] |= mss << TD1_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
@@ -4260,56 +4229,44 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ unsigned int frags = skb_shinfo(skb)->nr_frags;
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
- struct TxDesc *txd = tp->TxDescArray + entry;
- struct device *d = tp_to_dev(tp);
- dma_addr_t mapping;
- u32 opts[2], len;
- bool stop_queue;
- bool door_bell;
- int frags;
+ struct TxDesc *txd_first, *txd_last;
+ bool stop_queue, door_bell;
+ u32 opts[2];
+
+ txd_first = tp->TxDescArray + entry;
- if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
+ if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
- if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn))
goto err_stop_0;
opts[1] = rtl8169_tx_vlan_tag(skb);
- opts[0] = DescOwn;
+ opts[0] = 0;
- if (rtl_chip_supports_csum_v2(tp)) {
- if (!rtl8169_tso_csum_v2(tp, skb, opts))
- goto err_dma_0;
- } else {
+ if (!rtl_chip_supports_csum_v2(tp))
rtl8169_tso_csum_v1(skb, opts);
- }
-
- len = skb_headlen(skb);
- mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
+ else if (!rtl8169_tso_csum_v2(tp, skb, opts))
goto err_dma_0;
- }
- tp->tx_skb[entry].len = len;
- txd->addr = cpu_to_le64(mapping);
+ if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data,
+ entry, false)))
+ goto err_dma_0;
- frags = rtl8169_xmit_frags(tp, skb, opts);
- if (frags < 0)
- goto err_dma_1;
- else if (frags)
- opts[0] |= FirstFrag;
- else {
- opts[0] |= FirstFrag | LastFrag;
- tp->tx_skb[entry].skb = skb;
+ if (frags) {
+ if (rtl8169_xmit_frags(tp, skb, opts, entry))
+ goto err_dma_1;
+ entry = (entry + frags) % NUM_TX_DESC;
}
- txd->opts2 = cpu_to_le32(opts[1]);
+ txd_last = tp->TxDescArray + entry;
+ txd_last->opts1 |= cpu_to_le32(LastFrag);
+ tp->tx_skb[entry].skb = skb;
skb_tx_timestamp(skb);
@@ -4318,7 +4275,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
- txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
+ txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
/* Force all memory writes to complete before notifying device */
wmb();
@@ -4354,7 +4311,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
err_dma_1:
- rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ rtl8169_unmap_tx_skb(tp, entry);
err_dma_0:
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
@@ -4403,13 +4360,15 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
- u16 pci_status, pci_cmd;
+ int pci_status_errs;
+ u16 pci_cmd;
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
- pci_cmd, pci_status);
+ pci_status_errs = pci_status_get_and_clear_errors(pdev);
+
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
+ pci_cmd, pci_status_errs);
/*
* The recovery sequence below admits a very elaborated explanation:
@@ -4426,11 +4385,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- pci_write_config_word(pdev, PCI_STATUS,
- pci_status & (PCI_STATUS_DETECTED_PARITY |
- PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
-
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
@@ -4441,33 +4395,24 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
dirty_tx = tp->dirty_tx;
smp_rmb();
- tx_left = tp->cur_tx - dirty_tx;
- while (tx_left > 0) {
+ for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
- struct ring_info *tx_skb = tp->tx_skb + entry;
+ struct sk_buff *skb = tp->tx_skb[entry].skb;
u32 status;
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
- /* This barrier is needed to keep us from reading
- * any other fields out of the Tx descriptor until
- * we know the status of DescOwn
- */
- dma_rmb();
+ rtl8169_unmap_tx_skb(tp, entry);
- rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
- tp->TxDescArray + entry);
- if (tx_skb->skb) {
+ if (skb) {
pkts_compl++;
- bytes_compl += tx_skb->skb->len;
- napi_consume_skb(tx_skb->skb, budget);
- tx_skb->skb = NULL;
+ bytes_compl += skb->len;
+ napi_consume_skb(skb, budget);
}
dirty_tx++;
- tx_left--;
}
if (tp->dirty_tx != dirty_tx) {
@@ -4606,7 +4551,6 @@ process_pkt:
u64_stats_update_end(&tp->rx_stats.syncp);
}
release_descriptor:
- desc->opts2 = 0;
rtl8169_mark_to_asic(desc);
}
@@ -4636,8 +4580,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (unlikely(status & RxFIFOOver &&
tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(tp->dev);
- /* XXX - Hack alert. See rtl_task(). */
- set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
rtl_irq_disable(tp);
@@ -4650,31 +4593,17 @@ out:
static void rtl_task(struct work_struct *work)
{
- static const struct {
- int bitnr;
- void (*action)(struct rtl8169_private *);
- } rtl_work[] = {
- { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
- };
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
- struct net_device *dev = tp->dev;
- int i;
rtl_lock_work(tp);
- if (!netif_running(dev) ||
+ if (!netif_running(tp->dev) ||
!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
- for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
- bool pending;
-
- pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
- if (pending)
- rtl_work[i].action(tp);
- }
-
+ if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags))
+ rtl_reset_work(tp);
out_unlock:
rtl_unlock_work(tp);
}
@@ -4697,17 +4626,6 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void rtl8169_rx_missed(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- return;
-
- dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff;
- RTL_W32(tp, RxMissed, 0);
-}
-
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
@@ -4757,12 +4675,6 @@ static void rtl8169_down(struct net_device *dev)
netif_stop_queue(dev);
rtl8169_hw_reset(tp);
- /*
- * At this point device interrupts can not be enabled in any function,
- * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
- * and napi is disabled (rtl8169_poll).
- */
- rtl8169_rx_missed(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_rcu();
@@ -4907,8 +4819,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_get_noresume(&pdev->dev);
- if (netif_running(dev) && pm_runtime_active(&pdev->dev))
- rtl8169_rx_missed(dev);
+ netdev_stats_to_stats64(stats, &dev->stats);
do {
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
@@ -4922,15 +4833,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_bytes = tp->tx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
- stats->rx_dropped = dev->stats.rx_dropped;
- stats->tx_dropped = dev->stats.tx_dropped;
- stats->rx_length_errors = dev->stats.rx_length_errors;
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_crc_errors = dev->stats.rx_crc_errors;
- stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
- stats->rx_missed_errors = dev->stats.rx_missed_errors;
- stats->multicast = dev->stats.multicast;
-
/*
* Fetch additional counter values missing in stats collected by driver
* from tally counters.
@@ -4948,6 +4850,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
le32_to_cpu(tp->tc_offset.tx_multi_collision);
stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
le16_to_cpu(tp->tc_offset.tx_aborted);
+ stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) -
+ le16_to_cpu(tp->tc_offset.rx_missed);
pm_runtime_put_noidle(&pdev->dev);
}
@@ -5033,7 +4937,6 @@ static int rtl8169_runtime_suspend(struct device *device)
rtl8169_net_suspend(dev);
/* Update counters before going runtime suspend */
- rtl8169_rx_missed(dev);
rtl8169_update_counters(tp);
return 0;
@@ -5098,8 +5001,7 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
pci_clear_master(tp->pci_dev);
RTL_W8(tp, ChipCmd, CmdRxEnb);
- /* PCI commit */
- RTL_R8(tp, ChipCmd);
+ rtl_pci_commit(tp);
break;
default:
break;
@@ -5173,7 +5075,7 @@ static const struct net_device_ops rtl_netdev_ops = {
static void rtl_set_irq_mask(struct rtl8169_private *tp)
{
- tp->irq_mask = RTL_EVENT_NAPI | LinkChg;
+ tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
@@ -5449,9 +5351,10 @@ done:
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
+ int jumbo_max, region, rc;
+ enum mac_version chipset;
struct net_device *dev;
- int chipset, region;
- int jumbo_max, rc;
+ u16 xid;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
@@ -5509,10 +5412,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf;
+
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp);
- if (tp->mac_version == RTL_GIGA_MAC_NONE)
+ chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
+ if (chipset == RTL_GIGA_MAC_NONE) {
+ dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
return -ENODEV;
+ }
+
+ tp->mac_version = chipset;
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
@@ -5530,8 +5439,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- chipset = tp->mac_version;
-
rc = rtl_alloc_irq(tp);
if (rc < 0) {
dev_err(&pdev->dev, "Can't allocate interrupt\n");
@@ -5549,12 +5456,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -5571,26 +5474,26 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ if (rtl_chip_supports_csum_v2(tp))
+ dev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ dev->features |= dev->hw_features;
+
+ /* There has been a number of reports that using SG/TSO results in
+ * tx timeouts. However for a lot of people SG/TSO works fine.
+ * Therefore disable both features by default, but allow users to
+ * enable them. Use at own risk!
+ */
if (rtl_chip_supports_csum_v2(tp)) {
- dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
} else {
+ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
}
- /* RTL8168e-vl and one RTL8168c variant are known to have a
- * HW issue with TSO.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
- dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- }
-
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
@@ -5622,8 +5525,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mdio_unregister;
netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr,
- (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid,
pci_irq_vector(pdev, 0));
if (jumbo_max)
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index e367e77c773b..b73f7d023e99 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -796,6 +796,11 @@ static void rtl8168g_disable_aldps(struct phy_device *phydev)
phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0);
}
+static void rtl8168g_enable_gphy_10m(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+}
+
static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev)
{
phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
@@ -904,8 +909,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
- /* enable GPHY 10M */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+ rtl8168g_enable_gphy_10m(phydev);
/* SAR ADC performance */
phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
@@ -940,8 +944,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
- /* enable GPHY 10M */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+ rtl8168g_enable_gphy_10m(phydev);
ioffset = rtl8168h_2_get_adc_bias_ioffset(tp);
if (ioffset != 0xffff)
@@ -1063,8 +1066,7 @@ static void rtl8117_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
- /* enable GPHY 10M */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+ rtl8168g_enable_gphy_10m(phydev);
r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
@@ -1171,7 +1173,7 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp,
phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+ rtl8168g_enable_gphy_10m(phydev);
rtl8125_config_eee_phy(phydev);
}
@@ -1236,7 +1238,7 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+ rtl8168g_enable_gphy_10m(phydev);
rtl8125_config_eee_phy(phydev);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 58ca126518a2..8ed73f44405d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -142,69 +142,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
};
-static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
- SH_ETH_OFFSET_DEFAULTS,
-
- [EDSR] = 0x0000,
- [EDMR] = 0x0400,
- [EDTRR] = 0x0408,
- [EDRRR] = 0x0410,
- [EESR] = 0x0428,
- [EESIPR] = 0x0430,
- [TDLAR] = 0x0010,
- [TDFAR] = 0x0014,
- [TDFXR] = 0x0018,
- [TDFFR] = 0x001c,
- [RDLAR] = 0x0030,
- [RDFAR] = 0x0034,
- [RDFXR] = 0x0038,
- [RDFFR] = 0x003c,
- [TRSCER] = 0x0438,
- [RMFCR] = 0x0440,
- [TFTR] = 0x0448,
- [FDR] = 0x0450,
- [RMCR] = 0x0458,
- [RPADIR] = 0x0460,
- [FCFTR] = 0x0468,
- [CSMR] = 0x04E4,
-
- [ECMR] = 0x0500,
- [RFLR] = 0x0508,
- [ECSR] = 0x0510,
- [ECSIPR] = 0x0518,
- [PIR] = 0x0520,
- [APR] = 0x0554,
- [MPR] = 0x0558,
- [PFTCR] = 0x055c,
- [PFRCR] = 0x0560,
- [TPAUSER] = 0x0564,
- [MAHR] = 0x05c0,
- [MALR] = 0x05c8,
- [CEFCR] = 0x0740,
- [FRECR] = 0x0748,
- [TSFRCR] = 0x0750,
- [TLFRCR] = 0x0758,
- [RFCR] = 0x0760,
- [MAFCR] = 0x0778,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWSLC] = 0x0038,
- [TSU_VTAG0] = 0x0058,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
- [TSU_ADRH0] = 0x0100,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008C,
-};
-
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -569,6 +506,9 @@ static void sh_eth_set_rate_gether(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, GECMR_10, GECMR);
@@ -590,7 +530,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
- .register_type = SH_ETH_REG_FAST_RZ,
+ .register_type = SH_ETH_REG_GIGABIT,
.edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD,
@@ -663,6 +603,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -788,6 +729,7 @@ static struct sh_eth_cpu_data r8a77980_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.nbst = 1,
@@ -957,6 +899,9 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, 0x00000000, GECMR);
@@ -1002,6 +947,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -1042,6 +988,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -1083,6 +1030,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -2140,11 +2088,13 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(EESR);
add_reg(EESIPR);
add_reg(TDLAR);
- add_reg(TDFAR);
+ if (!cd->no_xdfar)
+ add_reg(TDFAR);
add_reg(TDFXR);
add_reg(TDFFR);
add_reg(RDLAR);
- add_reg(RDFAR);
+ if (!cd->no_xdfar)
+ add_reg(RDFAR);
add_reg(RDFXR);
add_reg(RDFFR);
add_reg(TRSCER);
@@ -2179,21 +2129,26 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
if (cd->tpauser)
add_reg(TPAUSER);
add_reg(TPAUSECR);
- add_reg(GECMR);
+ if (cd->gecmr)
+ add_reg(GECMR);
if (cd->bculr)
add_reg(BCULR);
add_reg(MAHR);
add_reg(MALR);
- add_reg(TROCR);
- add_reg(CDCR);
- add_reg(LCCR);
- add_reg(CNDCR);
+ if (!cd->no_tx_cntrs) {
+ add_reg(TROCR);
+ add_reg(CDCR);
+ add_reg(LCCR);
+ add_reg(CNDCR);
+ }
add_reg(CEFCR);
add_reg(FRECR);
add_reg(TSFRCR);
add_reg(TLFRCR);
- add_reg(CERCR);
- add_reg(CEECR);
+ if (cd->cexcr) {
+ add_reg(CERCR);
+ add_reg(CEECR);
+ }
add_reg(MAFCR);
if (cd->rtrate)
add_reg(RTRATE);
@@ -3121,9 +3076,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
- case SH_ETH_REG_FAST_RZ:
- reg_offset = sh_eth_offset_fast_rz;
- break;
case SH_ETH_REG_FAST_RCAR:
reg_offset = sh_eth_offset_fast_rcar;
break;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 850726301e1c..c1b3751b12c4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -145,7 +145,6 @@ enum {
enum {
SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_RZ,
SH_ETH_REG_FAST_RCAR,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
@@ -490,6 +489,7 @@ struct sh_eth_cpu_data {
unsigned apr:1; /* EtherC has APR */
unsigned mpr:1; /* EtherC has MPR */
unsigned tpauser:1; /* EtherC has TPAUSER */
+ unsigned gecmr:1; /* EtherC has GECMR */
unsigned bculr:1; /* EtherC has BCULR */
unsigned tsu:1; /* EtherC has TSU */
unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 466483c4ac67..21465cb3d60a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -476,6 +476,7 @@ static int sxgbe_get_regs_len(struct net_device *dev)
}
static const struct ethtool_ops sxgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = sxgbe_getdrvinfo,
.get_msglevel = sxgbe_getmsglevel,
.set_msglevel = sxgbe_setmsglevel,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 14393767ef9f..4580b30caae1 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -685,10 +685,70 @@ reset_nic:
return rc ? rc : rc2;
}
-int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
- bool spoofchk)
+static int efx_ef10_sriov_set_privilege_mask(struct efx_nic *efx, int vf_i,
+ u32 mask, u32 value)
{
- return spoofchk ? -EOPNOTSUPP : 0;
+ MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
+ MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 old_mask, new_mask;
+ size_t outlen;
+ int rc;
+
+ EFX_WARN_ON_PARANOID((value & ~mask) != 0);
+
+ /* Get privilege mask */
+ MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, nic_data->pf_index,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf_i);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PRIVILEGE_MASK,
+ pm_inbuf, sizeof(pm_inbuf),
+ pm_outbuf, sizeof(pm_outbuf), &outlen);
+
+ if (rc != 0)
+ return rc;
+ if (outlen != MC_CMD_PRIVILEGE_MASK_OUT_LEN)
+ return -EIO;
+
+ old_mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ new_mask = old_mask & ~mask;
+ new_mask |= value;
+
+ if (new_mask == old_mask)
+ return 0;
+
+ new_mask |= MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE;
+
+ /* Set privilege mask */
+ MCDI_SET_DWORD(pm_inbuf, PRIVILEGE_MASK_IN_NEW_MASK, new_mask);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PRIVILEGE_MASK,
+ pm_inbuf, sizeof(pm_inbuf),
+ pm_outbuf, sizeof(pm_outbuf), &outlen);
+
+ if (rc != 0)
+ return rc;
+ if (outlen != MC_CMD_PRIVILEGE_MASK_OUT_LEN)
+ return -EIO;
+
+ return 0;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, bool spoofchk)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* Can't enable spoofchk if firmware doesn't support it. */
+ if (!(nic_data->datapath_caps &
+ BIT(MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN)) &&
+ spoofchk)
+ return -EOPNOTSUPP;
+
+ return efx_ef10_sriov_set_privilege_mask(efx, vf_i,
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX,
+ spoofchk ? 0 : MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX);
}
int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4481f21a1f43..256807c28ff7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -113,7 +113,6 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 95395d67ea2d..66dcab140449 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -151,24 +151,6 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
-static inline void efx_filter_rfs_expire(struct work_struct *data)
-{
- struct delayed_work *dwork = to_delayed_work(data);
- struct efx_channel *channel;
- unsigned int time, quota;
-
- channel = container_of(dwork, struct efx_channel, filter_work);
- time = jiffies - channel->rfs_last_expiry;
- quota = channel->rfs_filter_count * time / (30 * HZ);
- if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
- channel->rfs_last_expiry += time;
- /* Ensure we do more work eventually even if NAPI poll is not happening */
- schedule_delayed_work(dwork, 30 * HZ);
-}
-#define efx_filter_rfs_enabled() 1
-#else
-static inline void efx_filter_rfs_expire(struct work_struct *data) {}
-#define efx_filter_rfs_enabled() 0
#endif
/* RSS contexts */
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 73d4e39b5b16..c492523b986c 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -485,6 +485,23 @@ void efx_remove_eventq(struct efx_channel *channel)
*
*************************************************************************/
+#ifdef CONFIG_RFS_ACCEL
+static void efx_filter_rfs_expire(struct work_struct *data)
+{
+ struct delayed_work *dwork = to_delayed_work(data);
+ struct efx_channel *channel;
+ unsigned int time, quota;
+
+ channel = container_of(dwork, struct efx_channel, filter_work);
+ time = jiffies - channel->rfs_last_expiry;
+ quota = channel->rfs_filter_count * time / (30 * HZ);
+ if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+ channel->rfs_last_expiry += time;
+ /* Ensure we do more work eventually even if NAPI poll is not happening */
+ schedule_delayed_work(dwork, 30 * HZ);
+}
+#endif
+
/* Allocate and initialise a channel structure. */
struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
@@ -1167,6 +1184,9 @@ static int efx_poll(struct napi_struct *napi, int budget)
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
struct efx_nic *efx = channel->efx;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int time;
+#endif
int spent;
netif_vdbg(efx, intr, efx->net_dev,
@@ -1186,7 +1206,10 @@ static int efx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_RFS_ACCEL
/* Perhaps expire some ARFS filters */
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ time = jiffies - channel->rfs_last_expiry;
+ /* Would our quota be >= 20? */
+ if (channel->rfs_filter_count * time >= 600 * HZ)
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index b0d76bc19673..1799ff9a45d9 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -200,11 +200,11 @@ void efx_link_status_changed(struct efx_nic *efx)
unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
{
/* The maximum MTU that we can fit in a single page, allowing for
- * framing, overhead and XDP headroom.
+ * framing, overhead and XDP headroom + tailroom.
*/
int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
efx->rx_prefix_size + efx->type->rx_buffer_padding +
- efx->rx_ip_align + XDP_PACKET_HEADROOM;
+ efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM;
return PAGE_SIZE - overhead;
}
@@ -302,8 +302,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
- efx->rx_ip_align + efx->rx_dma_len);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + EFX_XDP_HEADROOM +
+ efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM);
+
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 993b5769525b..04e88d05e8ff 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -232,9 +232,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
bool adaptive, rx_may_override_tx;
int rc;
- if (coalesce->use_adaptive_tx_coalesce)
- return -EINVAL;
-
efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs)
@@ -582,6 +579,7 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXFH: {
struct efx_rss_context *ctx = &efx->rss_context;
+ __u64 data;
mutex_lock(&efx->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) {
@@ -591,35 +589,38 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
goto out_unlock;
}
}
- info->data = 0;
+
+ data = 0;
if (!efx_rss_active(ctx)) /* No RSS */
- goto out_unlock;
+ goto out_setdata_unlock;
+
switch (info->flow_type & ~FLOW_RSS) {
case UDP_V4_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- /* fall through */
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
case UDP_V6_FLOW:
if (ctx->rx_hash_udp_4tuple)
- /* fall through */
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
case TCP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
case IPV6_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
+ data = RXH_IP_SRC | RXH_IP_DST;
break;
default:
break;
}
+out_setdata_unlock:
+ info->data = data;
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
@@ -1134,6 +1135,9 @@ static int efx_ethtool_set_fecparam(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_regs_len = efx_ethtool_get_regs_len,
.get_regs = efx_ethtool_get_regs,
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 08bd6a321918..db90d94e24c9 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -603,9 +603,6 @@ static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
bool adaptive, rx_may_override_tx;
int rc;
- if (coalesce->use_adaptive_tx_coalesce)
- return -EINVAL;
-
ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs)
@@ -1311,6 +1308,9 @@ static int ef4_ethtool_get_module_info(struct net_device *net_dev,
}
const struct ethtool_ops ef4_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = ef4_ethtool_get_drvinfo,
.get_regs_len = ef4_ethtool_get_regs_len,
.get_regs = ef4_ethtool_get_regs,
diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
index 605f486fa675..729a05c1b0cf 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
@@ -88,11 +88,11 @@ static int ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
struct falcon_board *board = falcon_board(efx);
- struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
+ struct i2c_client *client = i2c_new_client_device(&board->i2c_adap, info);
int rc;
- if (!client)
- return -EIO;
+ if (IS_ERR(client))
+ return PTR_ERR(client);
/* Read-to-clear alarm/interrupt status */
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a49ea2e719b6..a529ff395ead 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -288,7 +288,7 @@ struct ef4_rx_buffer {
struct ef4_rx_page_state {
dma_addr_t dma_addr;
- unsigned int __pad[0] ____cacheline_aligned;
+ unsigned int __pad[] ____cacheline_aligned;
};
/**
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8164f0edcbf0..b084e623b5f4 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,12 @@
#define EFX_RX_BUF_ALIGNMENT 4
#endif
+/* Non-standard XDP_PACKET_HEADROOM and tailroom to satisfy XDP_REDIRECT and
+ * still fit two standard MTU size packets into a single 4K page.
+ */
+#define EFX_XDP_HEADROOM 128
+#define EFX_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
struct hwtstamp_config;
@@ -333,7 +339,7 @@ struct efx_rx_buffer {
struct efx_rx_page_state {
dma_addr_t dma_addr;
- unsigned int __pad[0] ____cacheline_aligned;
+ unsigned int __pad[] ____cacheline_aligned;
};
/**
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a2042f16babc..260352d97d9d 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -302,7 +302,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
efx->rx_prefix_size);
xdp.data = *ehp;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
/* No support yet for XDP metadata */
xdp_set_data_meta_invalid(&xdp);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index ee8beb87bdc1..e10c23833515 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -412,10 +412,10 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
+ EFX_XDP_HEADROOM;
rx_buf->page = page;
rx_buf->page_offset = page_offset + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
+ EFX_XDP_HEADROOM;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
@@ -433,7 +433,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
void efx_rx_config_page_split(struct efx_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
- XDP_PACKET_HEADROOM,
+ EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 8aafc54a4684..19b58563cb78 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -287,9 +287,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
return PTR_ERR(segments);
dev_consume_skb_any(skb);
- skb = segments;
- skb_list_walk_safe(skb, skb, next) {
+ skb_list_walk_safe(segments, skb, next) {
skb_mark_not_on_list(skb);
efx_enqueue_skb(tx_queue, skb);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index db6b2988e632..6646eba9f57f 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -582,40 +582,23 @@ static void ioc3_timer(struct timer_list *t)
/* Try to find a PHY. There is no apparent relation between the MII addresses
* in the SGI documentation and what we find in reality, so we simply probe
- * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
- * onboard IOC3s has the special oddity that probing doesn't seem to find it
- * yet the interface seems to work fine, so if probing fails we for now will
- * simply default to PHY 31 instead of bailing out.
+ * for the PHY.
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
- int ioc3_phy_workaround = 1;
- int i, found = 0, res = 0;
u16 word;
+ int i;
for (i = 0; i < 32; i++) {
word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
- found = 1;
- break; /* Found a PHY */
- }
- }
-
- if (!found) {
- if (ioc3_phy_workaround) {
- i = 31;
- } else {
- ip->mii.phy_id = -1;
- res = -ENODEV;
- goto out;
+ ip->mii.phy_id = i;
+ return 0;
}
}
-
- ip->mii.phy_id = i;
-
-out:
- return res;
+ ip->mii.phy_id = -1;
+ return -ENODEV;
}
static void ioc3_mii_start(struct ioc3_private *ip)
@@ -865,14 +848,14 @@ static int ioc3eth_probe(struct platform_device *pdev)
ip = netdev_priv(dev);
ip->dma_dev = pdev->dev.parent;
ip->regs = devm_platform_ioremap_resource(pdev, 0);
- if (!ip->regs) {
- err = -ENOMEM;
+ if (IS_ERR(ip->regs)) {
+ err = PTR_ERR(ip->regs);
goto out_free;
}
ip->ssram = devm_platform_ioremap_resource(pdev, 1);
- if (!ip->ssram) {
- err = -ENOMEM;
+ if (IS_ERR(ip->ssram)) {
+ err = PTR_ERR(ip->ssram);
goto out_free;
}
diff --git a/drivers/net/ethernet/sgi/meth.h b/drivers/net/ethernet/sgi/meth.h
index 5b145c6bad60..2ba15c263e8b 100644
--- a/drivers/net/ethernet/sgi/meth.h
+++ b/drivers/net/ethernet/sgi/meth.h
@@ -1,19 +1,3 @@
-
-/*
- * snull.h -- definitions for the network module
- *
- * Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
- * Copyright (C) 2001 O'Reilly & Associates
- *
- * The source code in this file can be freely used, adapted,
- * and redistributed in source or binary form, so long as an
- * acknowledgment appears in derived source files. The citation
- * should list that the code comes from the book "Linux Device
- * Drivers" by Alessandro Rubini and Jonathan Corbet, published
- * by O'Reilly & Associates. No warranty is attached;
- * we cannot take responsibility for errors or fitness for use.
- */
-
/* version dependencies have been confined to a separate file */
/* Tunable parameters */
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 49a6a9167af4..fc168f85e7af 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2493,20 +2493,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_init(dev);
if (retval < 0)
- goto out_disable_resources;
+ goto out_init_fail;
netif_carrier_off(dev);
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
- goto out_disable_resources;
+ goto out_init_fail;
}
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_disable_resources;
+ goto out_init_fail;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
@@ -2547,9 +2547,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
-out_disable_resources:
+out_init_fail:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+out_disable_resources:
(void)smsc911x_disable_resources(pdev);
out_enable_resources_fail:
smsc911x_free_resources(pdev);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index e8224b543dfc..a5a0fb60193a 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -589,6 +589,8 @@ static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
}
static const struct ethtool_ops netsec_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = netsec_et_get_drvinfo,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -896,9 +898,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX)
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -906,9 +908,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
}
break;
default:
@@ -919,9 +921,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len, true);
break;
}
@@ -1020,8 +1021,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
* cache state. Since we paid the allocation cost if
* building an skb fails try to put the page into cache
*/
- __page_pool_put_page(dring->page_pool, page,
- pkt_len, true);
+ page_pool_put_page(dring->page_pool, page, pkt_len,
+ true);
netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n");
break;
@@ -1148,11 +1149,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
~tcp_v4_check(0, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0);
} else {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
tx_ctrl.tcp_seg_offload_flag = true;
@@ -1199,7 +1196,7 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
if (id == NETSEC_RING_RX) {
struct page *page = virt_to_page(desc->addr);
- page_pool_put_page(dring->page_pool, page, false);
+ page_pool_put_full_page(dring->page_pool, page, false);
} else if (id == NETSEC_RING_TX) {
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 338e25a6374e..b46f8d2ae6d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,6 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
+ select MDIO_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
@@ -197,6 +198,15 @@ config DWMAC_SUN8I
EMAC ethernet controller.
endif
+config DWMAC_INTEL
+ tristate "Intel GMAC support"
+ default X86
+ depends on X86 && STMMAC_ETH && PCI
+ depends on COMMON_CLK
+ ---help---
+ This selects the Intel platform specific bus support for the
+ stmmac driver. This driver is used for Intel Quark/EHL/TGL.
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c59926d96bcc..5a6f265bc540 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -30,5 +30,6 @@ obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 487099092693..6208a68a331d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
+#include <linux/mdio-xpcs.h>
#include <linux/module.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
@@ -33,6 +34,11 @@
#define DWMAC_CORE_5_00 0x50
#define DWMAC_CORE_5_10 0x51
#define DWXGMAC_CORE_2_10 0x21
+#define DWXLGMAC_CORE_2_00 0x20
+
+/* Device ID */
+#define DWXGMAC_ID 0x76
+#define DWXLGMAC_ID 0x27
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
@@ -425,6 +431,12 @@ struct mac_link {
u32 speed5000;
u32 speed10000;
} xgmii;
+ struct {
+ u32 speed25000;
+ u32 speed40000;
+ u32 speed50000;
+ u32 speed100000;
+ } xlgmii;
};
struct mii_regs {
@@ -446,6 +458,8 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
+ const struct mdio_xpcs_ops *xpcs;
+ struct mdio_xpcs_args xpcs_args;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
@@ -456,6 +470,9 @@ struct mac_device_info {
unsigned int pcs;
unsigned int pmt;
unsigned int ps;
+ unsigned int xlgmac;
+ unsigned int num_vlan;
+ u32 vlan_filter[32];
};
struct stmmac_rx_routing {
@@ -467,6 +484,7 @@ int dwmac100_setup(struct stmmac_priv *priv);
int dwmac1000_setup(struct stmmac_priv *priv);
int dwmac4_setup(struct stmmac_priv *priv);
int dwxgmac2_setup(struct stmmac_priv *priv);
+int dwxlgmac2_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
new file mode 100644
index 000000000000..2e4aaedb93f5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include "dwmac-intel.h"
+#include "stmmac.h"
+
+struct intel_priv_data {
+ int mdio_adhoc_addr; /* mdio address for serdes & etc */
+};
+
+/* This struct is used to associate PCI Function of MAC controller on a board,
+ * discovered via DMI, with the address of PHY connected to the MAC. The
+ * negative value of the address means that MAC controller is not connected
+ * with PHY.
+ */
+struct stmmac_pci_func_data {
+ unsigned int func;
+ int phy_addr;
+};
+
+struct stmmac_pci_dmi_data {
+ const struct stmmac_pci_func_data *func;
+ size_t nfuncs;
+};
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
+
+static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
+ const struct dmi_system_id *dmi_list)
+{
+ const struct stmmac_pci_func_data *func_data;
+ const struct stmmac_pci_dmi_data *dmi_data;
+ const struct dmi_system_id *dmi_id;
+ int func = PCI_FUNC(pdev->devfn);
+ size_t n;
+
+ dmi_id = dmi_first_match(dmi_list);
+ if (!dmi_id)
+ return -ENODEV;
+
+ dmi_data = dmi_id->driver_data;
+ func_data = dmi_data->func;
+
+ for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
+ if (func_data->func == func)
+ return func_data->phy_addr;
+
+ return -ENODEV;
+}
+
+static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 mask, u32 val)
+{
+ unsigned int retries = 10;
+ int val_rd;
+
+ do {
+ val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
+ if ((val_rd & mask) == (val & mask))
+ return 0;
+ udelay(POLL_DELAY_US);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
+{
+ struct intel_priv_data *intel_priv = priv_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int serdes_phy_addr = 0;
+ u32 data = 0;
+
+ if (!intel_priv->mdio_adhoc_addr)
+ return 0;
+
+ serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+
+ /* assert clk_req */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data |= SERDES_PLL_CLK;
+
+ mdiobus_write(priv->mii, serdes_phy_addr,
+ SERDES_GCR0, data);
+
+ /* check for clk_ack assertion */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PLL_CLK,
+ SERDES_PLL_CLK);
+
+ if (data) {
+ dev_err(priv->device, "Serdes PLL clk request timeout\n");
+ return data;
+ }
+
+ /* assert lane reset */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data |= SERDES_RST;
+
+ mdiobus_write(priv->mii, serdes_phy_addr,
+ SERDES_GCR0, data);
+
+ /* check for assert lane reset reflection */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_RST,
+ SERDES_RST);
+
+ if (data) {
+ dev_err(priv->device, "Serdes assert lane reset timeout\n");
+ return data;
+ }
+
+ /* move power state to P0 */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data &= ~SERDES_PWR_ST_MASK;
+ data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
+
+ mdiobus_write(priv->mii, serdes_phy_addr,
+ SERDES_GCR0, data);
+
+ /* Check for P0 state */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PWR_ST_MASK,
+ SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
+
+ if (data) {
+ dev_err(priv->device, "Serdes power state P0 timeout.\n");
+ return data;
+ }
+
+ return 0;
+}
+
+static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int serdes_phy_addr = 0;
+ u32 data = 0;
+
+ if (!intel_priv->mdio_adhoc_addr)
+ return;
+
+ serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+
+ /* move power state to P3 */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data &= ~SERDES_PWR_ST_MASK;
+ data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
+
+ mdiobus_write(priv->mii, serdes_phy_addr,
+ SERDES_GCR0, data);
+
+ /* Check for P3 state */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PWR_ST_MASK,
+ SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
+
+ if (data) {
+ dev_err(priv->device, "Serdes power state P3 timeout\n");
+ return;
+ }
+
+ /* de-assert clk_req */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data &= ~SERDES_PLL_CLK;
+
+ mdiobus_write(priv->mii, serdes_phy_addr,
+ SERDES_GCR0, data);
+
+ /* check for clk_ack de-assert */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PLL_CLK,
+ (u32)~SERDES_PLL_CLK);
+
+ if (data) {
+ dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
+ return;
+ }
+
+ /* de-assert lane reset */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data &= ~SERDES_RST;
+
+ mdiobus_write(priv->mii, serdes_phy_addr,
+ SERDES_GCR0, data);
+
+ /* check for de-assert lane reset reflection */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_RST,
+ (u32)~SERDES_RST);
+
+ if (data) {
+ dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
+ return;
+ }
+}
+
+static void common_default_data(struct plat_stmmacenet_data *plat)
+{
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
+
+ plat->mdio_bus_data->needs_reset = true;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
+}
+
+static int intel_mgbe_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int i;
+
+ plat->clk_csr = 5;
+ plat->has_gmac = 0;
+ plat->has_gmac4 = 1;
+ plat->force_sf_dma_mode = 0;
+ plat->tso_en = 1;
+
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->rx_queues_cfg[i].chan = i;
+
+ /* Disable Priority config by default */
+ plat->rx_queues_cfg[i].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[i].pkt_route = 0x0;
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[i].use_prio = false;
+ }
+
+ /* FIFO size is 4096 bytes for 1 tx/rx queue */
+ plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
+ plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
+
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ plat->tx_queues_cfg[0].weight = 0x09;
+ plat->tx_queues_cfg[1].weight = 0x0A;
+ plat->tx_queues_cfg[2].weight = 0x0B;
+ plat->tx_queues_cfg[3].weight = 0x0C;
+ plat->tx_queues_cfg[4].weight = 0x0D;
+ plat->tx_queues_cfg[5].weight = 0x0E;
+ plat->tx_queues_cfg[6].weight = 0x0F;
+ plat->tx_queues_cfg[7].weight = 0x10;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 0;
+ plat->dma_cfg->mixed_burst = 0;
+ plat->dma_cfg->aal = 0;
+
+ plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
+ GFP_KERNEL);
+ if (!plat->axi)
+ return -ENOMEM;
+
+ plat->axi->axi_lpi_en = 0;
+ plat->axi->axi_xit_frm = 0;
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_blen[0] = 4;
+ plat->axi->axi_blen[1] = 8;
+ plat->axi->axi_blen[2] = 16;
+
+ plat->ptp_max_adj = plat->clk_ptp_rate;
+
+ /* Set system clock */
+ plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
+ "stmmac-clk", NULL, 0,
+ plat->clk_ptp_rate);
+
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ return 0;
+}
+
+static int ehl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ehl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+ .setup = ehl_sgmii_data,
+};
+
+static int ehl_rgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+ .setup = ehl_rgmii_data,
+};
+
+static int ehl_pse0_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 2;
+ plat->phy_addr = 1;
+ return ehl_common_data(pdev, plat);
+}
+
+static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
+ return ehl_pse0_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse0_rgmii1g_pci_info = {
+ .setup = ehl_pse0_rgmii1g_data,
+};
+
+static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ return ehl_pse0_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse0_sgmii1g_pci_info = {
+ .setup = ehl_pse0_sgmii1g_data,
+};
+
+static int ehl_pse1_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 3;
+ plat->phy_addr = 1;
+ return ehl_common_data(pdev, plat);
+}
+
+static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
+ return ehl_pse1_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse1_rgmii1g_pci_info = {
+ .setup = ehl_pse1_rgmii1g_data,
+};
+
+static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ return ehl_pse1_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse1_sgmii1g_pci_info = {
+ .setup = ehl_pse1_sgmii1g_data,
+};
+
+static int tgl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tgl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+ .setup = tgl_sgmii_data,
+};
+
+static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
+ .func = galileo_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
+};
+
+static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .func = 7,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
+ .func = iot2040_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
+};
+
+static const struct dmi_system_id quark_pci_dmi[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
+ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+ * has only one pci network device while other asset tags are
+ * for IOT2040 which has two.
+ */
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-0YA2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ },
+ .driver_data = (void *)&iot2040_stmmac_dmi_data,
+ },
+ {}
+};
+
+static int quark_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ /* Set common default data first */
+ common_default_data(plat);
+
+ /* Refuse to load the driver and register net device if MAC controller
+ * does not connect to any PHY interface.
+ */
+ ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
+ if (ret < 0) {
+ /* Return error to the caller on DMI enabled boards. */
+ if (dmi_get_system_info(DMI_BOARD_NAME))
+ return ret;
+
+ /* Galileo boards with old firmware don't support DMI. We always
+ * use 1 here as PHY address, so at least the first found MAC
+ * controller would be probed.
+ */
+ ret = 1;
+ }
+
+ plat->bus_id = pci_dev_id(pdev);
+ plat->phy_addr = ret;
+ plat->phy_interface = PHY_INTERFACE_MODE_RMII;
+
+ plat->dma_cfg->pbl = 16;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 1;
+ /* AXI (TODO) */
+
+ return 0;
+}
+
+static const struct stmmac_pci_info quark_pci_info = {
+ .setup = quark_default_data,
+};
+
+/**
+ * intel_eth_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int intel_eth_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
+ struct intel_priv_data *intel_priv;
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res;
+ int i;
+ int ret;
+
+ intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv),
+ GFP_KERNEL);
+ if (!intel_priv)
+ return -ENOMEM;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
+ GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
+ return ret;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
+ if (ret)
+ return ret;
+ break;
+ }
+
+ pci_set_master(pdev);
+
+ plat->bsp_priv = intel_priv;
+ intel_priv->mdio_adhoc_addr = 0x15;
+
+ ret = info->setup(pdev, plat);
+ if (ret)
+ return ret;
+
+ pci_enable_msi(pdev);
+
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[i];
+ res.wol_irq = pdev->irq;
+ res.irq = pdev->irq;
+
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
+}
+
+/**
+ * intel_eth_pci_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void intel_eth_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int i;
+
+ stmmac_dvr_remove(&pdev->dev);
+
+ if (priv->plat->stmmac_clk)
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int __maybe_unused intel_eth_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
+ intel_eth_pci_resume);
+
+#define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937
+#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID 0x4b32
+/* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
+ * which are named PSE0 and PSE1
+ */
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID 0x4ba0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID 0x4ba1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID 0x4ba2
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
+
+static const struct pci_device_id intel_eth_pci_id_table[] = {
+ { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID,
+ &ehl_pse0_rgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID,
+ &ehl_pse0_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID,
+ &ehl_pse0_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID,
+ &ehl_pse1_rgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID,
+ &ehl_pse1_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID,
+ &ehl_pse1_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_pci_info) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
+
+static struct pci_driver intel_eth_pci_driver = {
+ .name = "intel-eth-pci",
+ .id_table = intel_eth_pci_id_table,
+ .probe = intel_eth_pci_probe,
+ .remove = intel_eth_pci_remove,
+ .driver = {
+ .pm = &intel_eth_pm_ops,
+ },
+};
+
+module_pci_driver(intel_eth_pci_driver);
+
+MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
new file mode 100644
index 000000000000..e723096c0b15
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020, Intel Corporation
+ * DWMAC Intel header file
+ */
+
+#ifndef __DWMAC_INTEL_H__
+#define __DWMAC_INTEL_H__
+
+#define POLL_DELAY_US 8
+
+/* SERDES Register */
+#define SERDES_GSR0 0x5 /* Global Status Reg0 */
+#define SERDES_GCR0 0xb /* Global Configuration Reg0 */
+
+/* SERDES defines */
+#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */
+#define SERDES_RST BIT(2) /* Serdes Reset */
+#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/
+#define SERDES_PWR_ST_SHIFT 4
+#define SERDES_PWR_ST_P0 0x0
+#define SERDES_PWR_ST_P3 0x3
+
+#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 6ae13dc19510..02102c781a8c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -319,6 +319,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
/* Enable PTP clock */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+ default:
+ /* We don't get here; the switch above will have errored out */
+ unreachable();
+ }
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 0e2fa14f1423..a3934ca6a043 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -119,6 +119,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{ .div = 5, .val = 5, },
{ .div = 6, .val = 6, },
{ .div = 7, .val = 7, },
+ { /* end of array */ }
};
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e0a5fe83d8e0..bfc4a92f1d92 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -75,6 +75,11 @@ struct ethqos_emac_por {
unsigned int value;
};
+struct ethqos_emac_driver_data {
+ const struct ethqos_emac_por *por;
+ unsigned int num_por;
+};
+
struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
@@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
{ .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
};
+static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
+ .por = emac_v2_3_0_por,
+ .num_por = ARRAY_SIZE(emac_v2_3_0_por),
+};
+
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
unsigned int val;
@@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
+ const struct ethqos_emac_driver_data *data;
struct qcom_ethqos *ethqos;
struct resource *res;
int ret;
@@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
goto err_mem;
}
- ethqos->por = of_device_get_match_data(&pdev->dev);
+ data = of_device_get_match_data(&pdev->dev);
+ ethqos->por = data->por;
+ ethqos->num_por = data->num_por;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_ethqos_match[] = {
- { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
+ { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index e0212d2fc2a1..70d41783329d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -241,6 +241,8 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
*val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
break;
case PHY_INTERFACE_MODE_MII:
@@ -289,16 +291,19 @@ static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac)
phymode == PHY_INTERFACE_MODE_MII ||
phymode == PHY_INTERFACE_MODE_GMII ||
phymode == PHY_INTERFACE_MODE_SGMII) {
- ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
&module);
module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
module);
- } else {
- ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
}
+ if (dwmac->f2h_ptp_ref_clk)
+ ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
+ else
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK <<
+ (reg_shift / 2));
+
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
/* Deassert reset for the phy configuration to be sampled by
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 9b7be996d07b..b2dc99289687 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -304,7 +304,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
/* Get ETH_CLK clocks */
dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck");
if (IS_ERR(dwmac->clk_eth_ck)) {
- dev_warn(dev, "No phy clock provided...\n");
+ dev_info(dev, "No phy clock provided...\n");
dwmac->clk_eth_ck = NULL;
}
@@ -324,7 +324,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
+ dwmac->irq_pwr_wakeup = platform_get_irq_byname_optional(pdev,
"stm32_pwr_wakeup");
if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 7d40760e9ba8..0e1ca2cba3c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -150,6 +150,8 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
plat_dat->init = sun7i_gmac_init;
plat_dat->exit = sun7i_gmac_exit;
plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->tx_fifo_size = 4096;
+ plat_dat->rx_fifo_size = 16384;
ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 542784300620..efc6ec1b8027 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -207,7 +207,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
reg++;
}
- while (reg <= perfect_addr_number) {
+ while (reg < perfect_addr_number) {
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
reg++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index af50af27550b..28cac28253b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -18,6 +18,7 @@
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
#define GMAC_VLAN_TAG 0x00000050
+#define GMAC_VLAN_TAG_DATA 0x00000054
#define GMAC_VLAN_HASH_TABLE 0x00000058
#define GMAC_RX_FLOW_CTRL 0x00000090
#define GMAC_VLAN_INCL 0x00000060
@@ -90,6 +91,29 @@
#define GMAC_VLAN_VLC GENMASK(17, 16)
#define GMAC_VLAN_VLC_SHIFT 16
+/* MAC VLAN Tag */
+#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
+#define GMAC_VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
+#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
+#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
+#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+
+/* MAC VLAN Tag Data/Filter */
+#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
+#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
+
/* MAC RX Queue Enable */
#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
@@ -248,6 +272,7 @@ enum power_event {
#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
#define GMAC_HW_FEAT_FRPSEL BIT(10)
#define GMAC_HW_FEAT_DVLAN BIT(5)
+#define GMAC_HW_FEAT_NRVF GENMASK(2, 0)
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index dc09d2131e40..39692d15d80c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -394,6 +394,156 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
}
+static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ val &= ~GMAC_VLAN_TAG_VID;
+ val |= GMAC_VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + GMAC_VLAN_TAG);
+}
+
+static int dwmac4_write_vlan_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int i, timeout = 10;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
+
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
+ GMAC_VLAN_TAG_CTRL_CT |
+ GMAC_VLAN_TAG_CTRL_OB);
+ val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + GMAC_VLAN_TAG);
+
+ for (i = 0; i < timeout; i++) {
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ if (!(val & GMAC_VLAN_TAG_CTRL_OB))
+ return 0;
+ udelay(1);
+ }
+
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+
+ return -EBUSY;
+}
+
+static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ dwmac4_write_single_vlan(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = dwmac4_write_vlan_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ dwmac4_write_single_vlan(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
+ ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ dwmac4_write_vlan_filter(dev, hw, i, val);
+ }
+ }
+}
+
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -469,6 +619,10 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
}
+ /* VLAN filtering */
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ value |= GMAC_PACKET_FILTER_VTFE;
+
writel(value, ioaddr + GMAC_PACKET_FILTER);
}
@@ -947,6 +1101,9 @@ const struct stmmac_ops dwmac4_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
};
const struct stmmac_ops dwmac410_ops = {
@@ -987,6 +1144,9 @@ const struct stmmac_ops dwmac410_ops = {
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1032,8 +1192,42 @@ const struct stmmac_ops dwmac510_ops = {
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
};
+static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + GMAC_HW_FEATURE3);
+ switch (val & GMAC_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
+
int dwmac4_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1062,6 +1256,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(20, 16);
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
+ mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9becca280074..6e30d7eb4983 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -6,6 +6,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/delay.h>
#include "common.h"
#include "dwmac4_dma.h"
@@ -14,22 +15,14 @@
int dwmac4_dma_reset(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
- int limit;
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
- break;
- mdelay(10);
- }
-
- if (limit < 0)
- return -EBUSY;
- return 0;
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 100000);
}
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 494c859b4ade..67ba67ed0cb9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -624,7 +624,7 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
total_offset += offset;
}
- total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
+ total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
total_ctr += total_offset;
ctr_low = do_div(total_ctr, 1000000000);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 688d36095333..cb87d31a99df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -16,19 +16,14 @@
int dwmac_dma_reset(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
- int err;
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
10000, 100000);
- if (err)
- return -EBUSY;
-
- return 0;
}
/* CSR1 enables the transmit DMA to check for new descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 67b754a56288..ad4df9bddcf3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -9,6 +9,7 @@
#include <linux/iopoll.h>
#include "stmmac.h"
#include "stmmac_ptp.h"
+#include "dwxlgmac2.h"
#include "dwxgmac2.h"
static void dwxgmac2_core_init(struct mac_device_info *hw,
@@ -576,8 +577,13 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
value |= XGMAC_VLAN_DOVLTC;
+ } else {
+ value &= ~XGMAC_VLAN_EDVLP;
+ value &= ~XGMAC_VLAN_ESVL;
+ value &= ~XGMAC_VLAN_DOVLTC;
}
+ value &= ~XGMAC_VLAN_VID;
writel(value, ioaddr + XGMAC_VLAN_TAG);
} else if (perfect_match) {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
@@ -588,13 +594,19 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
value = readl(ioaddr + XGMAC_VLAN_TAG);
+ value &= ~XGMAC_VLAN_VTHM;
value |= XGMAC_VLAN_ETV;
if (is_double) {
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
value |= XGMAC_VLAN_DOVLTC;
+ } else {
+ value &= ~XGMAC_VLAN_EDVLP;
+ value &= ~XGMAC_VLAN_ESVL;
+ value &= ~XGMAC_VLAN_DOVLTC;
}
+ value &= ~XGMAC_VLAN_VID;
writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
@@ -1485,6 +1497,67 @@ const struct stmmac_ops dwxgmac210_ops = {
.fpe_configure = dwxgmac3_fpe_configure,
};
+static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
+ writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
+}
+
+const struct stmmac_ops dwxlgmac2_ops = {
+ .core_init = dwxgmac2_core_init,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxlgmac2_rx_queue_enable,
+ .rx_queue_prio = dwxgmac2_rx_queue_prio,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
+ .rx_queue_routing = NULL,
+ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
+ .config_cbs = dwxgmac2_config_cbs,
+ .dump_regs = dwxgmac2_dump_regs,
+ .host_irq_status = dwxgmac2_host_irq_status,
+ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
+ .flow_ctrl = dwxgmac2_flow_ctrl,
+ .pmt = dwxgmac2_pmt,
+ .set_umac_addr = dwxgmac2_set_umac_addr,
+ .get_umac_addr = dwxgmac2_get_umac_addr,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
+ .pcs_ctrl_ane = NULL,
+ .pcs_rane = NULL,
+ .pcs_get_adv_lp = NULL,
+ .debug = NULL,
+ .set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
+ .set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
+ .config_l3_filter = dwxgmac2_config_l3_filter,
+ .config_l4_filter = dwxgmac2_config_l4_filter,
+ .set_arp_offload = dwxgmac2_set_arp_offload,
+ .est_configure = dwxgmac3_est_configure,
+ .fpe_configure = dwxgmac3_fpe_configure,
+};
+
int dwxgmac2_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1521,3 +1594,40 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
return 0;
}
+
+int dwxlgmac2_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tXLGMAC\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = 0;
+ mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
+ mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
+ mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
+ mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
+ mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
+ mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
+ mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
+ mac->link.speed_mask = XLGMAC_CONFIG_SS;
+
+ mac->mii.addr = XGMAC_MDIO_ADDR;
+ mac->mii.data = XGMAC_MDIO_DATA;
+ mac->mii.addr_shift = 16;
+ mac->mii.addr_mask = GENMASK(20, 16);
+ mac->mii.reg_shift = 0;
+ mac->mii.reg_mask = GENMASK(15, 0);
+ mac->mii.clk_csr_shift = 19;
+ mac->mii.clk_csr_mask = GENMASK(21, 19);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h
new file mode 100644
index 000000000000..726090d49221
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XLGMAC definitions.
+ */
+
+#ifndef __STMMAC_DWXLGMAC2_H__
+#define __STMMAC_DWXLGMAC2_H__
+
+/* MAC Registers */
+#define XLGMAC_CONFIG_SS GENMASK(30, 28)
+#define XLGMAC_CONFIG_SS_SHIFT 28
+#define XLGMAC_CONFIG_SS_40G (0x0 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_25G (0x1 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_50G (0x2 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_100G (0x3 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_10G (0x4 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_2500 (0x6 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_1000 (0x7 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_RXQ_ENABLE_CTRL0 0x00000140
+
+#endif /* __STMMAC_DWXLGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 3af2e5015245..bb7114f970f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -23,6 +23,18 @@ static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
return reg & GENMASK(7, 0);
}
+static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg)
+{
+ u32 reg = readl(priv->ioaddr + id_reg);
+
+ if (!reg) {
+ dev_info(priv->device, "Version ID not available\n");
+ return 0x0;
+ }
+
+ return (reg & GENMASK(15, 8)) >> 8;
+}
+
static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -69,11 +81,18 @@ static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
return 0;
}
+static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv)
+{
+ priv->hw->xlgmac = true;
+ return 0;
+}
+
static const struct stmmac_hwif_entry {
bool gmac;
bool gmac4;
bool xgmac;
u32 min_id;
+ u32 dev_id;
const struct stmmac_regs_off regs;
const void *desc;
const void *dma;
@@ -199,6 +218,7 @@ static const struct stmmac_hwif_entry {
.gmac4 = false,
.xgmac = true,
.min_id = DWXGMAC_CORE_2_10,
+ .dev_id = DWXGMAC_ID,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
@@ -212,6 +232,25 @@ static const struct stmmac_hwif_entry {
.mmc = &dwxgmac_mmc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = true,
+ .min_id = DWXLGMAC_CORE_2_00,
+ .dev_id = DWXLGMAC_ID,
+ .regs = {
+ .ptp_off = PTP_XGMAC_OFFSET,
+ .mmc_off = MMC_XGMAC_OFFSET,
+ },
+ .desc = &dwxgmac210_desc_ops,
+ .dma = &dwxgmac210_dma_ops,
+ .mac = &dwxlgmac2_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwxgmac_mmc_ops,
+ .setup = dwxlgmac2_setup,
+ .quirks = stmmac_dwxlgmac_quirks,
},
};
@@ -223,13 +262,15 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
const struct stmmac_hwif_entry *entry;
struct mac_device_info *mac;
bool needs_setup = true;
+ u32 id, dev_id = 0;
int i, ret;
- u32 id;
if (needs_gmac) {
id = stmmac_get_id(priv, GMAC_VERSION);
} else if (needs_gmac4 || needs_xgmac) {
id = stmmac_get_id(priv, GMAC4_VERSION);
+ if (needs_xgmac)
+ dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION);
} else {
id = 0;
}
@@ -267,6 +308,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
/* Use synopsys_id var because some setups can override this */
if (priv->synopsys_id < entry->min_id)
continue;
+ if (needs_xgmac && (dev_id ^ entry->dev_id))
+ continue;
/* Only use generic HW helpers if needed */
mac->desc = mac->desc ? : entry->desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index df63b0367aff..ffe2d63389b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -369,6 +369,14 @@ struct stmmac_ops {
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
__le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
@@ -461,6 +469,12 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
#define stmmac_enable_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
@@ -577,6 +591,18 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
+/* XPCS callbacks */
+#define stmmac_xpcs_validate(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, validate, __args)
+#define stmmac_xpcs_config(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, config, __args)
+#define stmmac_xpcs_get_state(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, get_state, __args)
+#define stmmac_xpcs_link_up(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, link_up, __args)
+#define stmmac_xpcs_probe(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, probe, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
@@ -593,6 +619,7 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index b29603ec744c..eae11c585025 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -732,20 +732,6 @@ static int stmmac_set_coalesce(struct net_device *dev,
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int rx_riwt;
- /* Check not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
- (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) {
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
@@ -914,6 +900,8 @@ static int stmmac_set_tunable(struct net_device *dev,
}
static const struct ethtool_ops stmmac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_msglevel = stmmac_ethtool_getmsglevel,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 020159622559..d291612eeafb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -10,6 +10,7 @@
*******************************************************************************/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/delay.h>
#include "common.h"
#include "stmmac_ptp.h"
@@ -26,12 +27,16 @@ static void config_sub_second_increment(void __iomem *ioaddr,
unsigned long data;
u32 reg_value;
- /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
- * formula = (1/ptp_clock) * 1000000000
- * where ptp_clock is 50MHz if fine method is used to update system
+ /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
+ * increment to twice the number of nanoseconds of a clock cycle.
+ * The calculation of the default_addend value by the caller will set it
+ * to mid-range = 2^31 when the remainder of this division is zero,
+ * which will make the accumulator overflow once every 2 ptp_clock
+ * cycles, adding twice the number of nanoseconds of a clock cycle :
+ * 2000000000ULL / ptp_clock.
*/
if (value & PTP_TCR_TSCFUPDT)
- data = (1000000000ULL / 50000000);
+ data = (2000000000ULL / ptp_clock);
else
data = (1000000000ULL / ptp_clock);
@@ -53,7 +58,6 @@ static void config_sub_second_increment(void __iomem *ioaddr,
static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
{
- int limit;
u32 value;
writel(sec, ioaddr + PTP_STSUR);
@@ -64,16 +68,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSINIT),
+ 10000, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7da18c9afa01..7e9cbfd23530 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
- ts_event_en = PTP_TCR_TSEVNTENA;
+ if (priv->synopsys_id != DWMAC_CORE_5_10)
+ ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
ptp_over_ethernet = PTP_TCR_TSIPENA;
@@ -849,6 +850,38 @@ static void stmmac_validate(struct phylink_config *config,
phylink_set(mac_supported, 10000baseKX4_Full);
phylink_set(mac_supported, 10000baseKR_Full);
}
+ if (!max_speed || (max_speed >= 25000)) {
+ phylink_set(mac_supported, 25000baseCR_Full);
+ phylink_set(mac_supported, 25000baseKR_Full);
+ phylink_set(mac_supported, 25000baseSR_Full);
+ }
+ if (!max_speed || (max_speed >= 40000)) {
+ phylink_set(mac_supported, 40000baseKR4_Full);
+ phylink_set(mac_supported, 40000baseCR4_Full);
+ phylink_set(mac_supported, 40000baseSR4_Full);
+ phylink_set(mac_supported, 40000baseLR4_Full);
+ }
+ if (!max_speed || (max_speed >= 50000)) {
+ phylink_set(mac_supported, 50000baseCR2_Full);
+ phylink_set(mac_supported, 50000baseKR2_Full);
+ phylink_set(mac_supported, 50000baseSR2_Full);
+ phylink_set(mac_supported, 50000baseKR_Full);
+ phylink_set(mac_supported, 50000baseSR_Full);
+ phylink_set(mac_supported, 50000baseCR_Full);
+ phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
+ phylink_set(mac_supported, 50000baseDR_Full);
+ }
+ if (!max_speed || (max_speed >= 100000)) {
+ phylink_set(mac_supported, 100000baseKR4_Full);
+ phylink_set(mac_supported, 100000baseSR4_Full);
+ phylink_set(mac_supported, 100000baseCR4_Full);
+ phylink_set(mac_supported, 100000baseLR4_ER4_Full);
+ phylink_set(mac_supported, 100000baseKR2_Full);
+ phylink_set(mac_supported, 100000baseSR2_Full);
+ phylink_set(mac_supported, 100000baseCR2_Full);
+ phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
+ phylink_set(mac_supported, 100000baseDR2_Full);
+ }
}
/* Half-Duplex can only work with single queue */
@@ -858,33 +891,65 @@ static void stmmac_validate(struct phylink_config *config,
phylink_set(mask, 1000baseT_Half);
}
- bitmap_and(supported, supported, mac_supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_andnot(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mac_supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_andnot(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mac_supported);
+ linkmode_andnot(supported, supported, mask);
+
+ linkmode_and(state->advertising, state->advertising, mac_supported);
+ linkmode_andnot(state->advertising, state->advertising, mask);
+
+ /* If PCS is supported, check which modes it supports. */
+ stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
}
static void stmmac_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
state->link = 0;
+ stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
+}
+
+static void stmmac_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 ctrl;
+ stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
+
ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl &= ~priv->hw->link.speed_mask;
- if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
- switch (state->speed) {
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (speed) {
case SPEED_10000:
ctrl |= priv->hw->link.xgmii.speed10000;
break;
@@ -897,8 +962,34 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
default:
return;
}
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
+ switch (speed) {
+ case SPEED_100000:
+ ctrl |= priv->hw->link.xlgmii.speed100000;
+ break;
+ case SPEED_50000:
+ ctrl |= priv->hw->link.xlgmii.speed50000;
+ break;
+ case SPEED_40000:
+ ctrl |= priv->hw->link.xlgmii.speed40000;
+ break;
+ case SPEED_25000:
+ ctrl |= priv->hw->link.xlgmii.speed25000;
+ break;
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ default:
+ return;
+ }
} else {
- switch (state->speed) {
+ switch (speed) {
case SPEED_2500:
ctrl |= priv->hw->link.speed2500;
break;
@@ -916,44 +1007,21 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
}
}
- priv->speed = state->speed;
+ priv->speed = speed;
if (priv->plat->fix_mac_speed)
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
- if (!state->duplex)
+ if (!duplex)
ctrl &= ~priv->hw->link.duplex;
else
ctrl |= priv->hw->link.duplex;
/* Flow Control operation */
- if (state->pause)
- stmmac_mac_flow_ctrl(priv, state->duplex);
+ if (tx_pause && rx_pause)
+ stmmac_mac_flow_ctrl(priv, duplex);
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
-}
-
-static void stmmac_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
-static void stmmac_mac_link_down(struct phylink_config *config,
- unsigned int mode, phy_interface_t interface)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-
- stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
-}
-
-static void stmmac_mac_link_up(struct phylink_config *config,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phy)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
@@ -1043,6 +1111,10 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.pcs_poll = true;
+
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
@@ -1251,11 +1323,11 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
- page_pool_put_page(rx_q->page_pool, buf->page, false);
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
buf->page = NULL;
if (buf->sec_page)
- page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
buf->sec_page = NULL;
}
@@ -2687,7 +2759,8 @@ static int stmmac_open(struct net_device *dev)
int ret;
if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI) {
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
+ priv->hw->xpcs == NULL) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
@@ -3988,7 +4061,7 @@ static int stmmac_set_features(struct net_device *netdev,
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
- * @dev_id: to pass the net device pointer.
+ * @dev_id: to pass the net device pointer (must be valid).
* Description: this is the main driver interrupt service routine.
* It can call:
* o DMA service routine (to manage incoming frame reception and transmission
@@ -4012,11 +4085,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -4494,18 +4562,32 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
return ret;
}
- return ret;
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
+ int ret;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
clear_bit(vid, priv->active_vlans);
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ return ret;
+ }
+
return stmmac_vlan_update(priv, is_double);
}
@@ -4900,12 +4982,22 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(ndev,
+ priv->plat->bsp_priv);
+
+ if (ret < 0)
+ goto error_serdes_powerup;
+ }
+
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
return ret;
+error_serdes_powerup:
+ unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_phy_setup:
@@ -4943,6 +5035,9 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_stop_all_dma(priv);
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
@@ -4995,6 +5090,9 @@ int stmmac_suspend(struct device *dev)
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
@@ -5057,6 +5155,7 @@ int stmmac_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
if (!netif_running(ndev))
return 0;
@@ -5084,7 +5183,13 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
- netif_device_attach(ndev);
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(ndev,
+ priv->plat->bsp_priv);
+
+ if (ret < 0)
+ return ret;
+ }
mutex_lock(&priv->lock);
@@ -5096,6 +5201,8 @@ int stmmac_resume(struct device *dev)
stmmac_init_coalesce(priv);
stmmac_set_rx_mode(ndev);
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+
stmmac_enable_all_queues(priv);
stmmac_start_all_queues(priv);
@@ -5110,6 +5217,8 @@ int stmmac_resume(struct device *dev)
phylink_mac_change(priv->phylink, true);
+ netif_device_attach(ndev);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_resume);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index cfe5d8b73142..b2a707e2ef43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -382,6 +382,14 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
+ if (mdio_bus_data->has_xpcs) {
+ priv->hw->xpcs = mdio_xpcs_get_ops();
+ if (!priv->hw->xpcs) {
+ err = -ENODEV;
+ goto bus_register_fail;
+ }
+ }
+
if (mdio_bus_data->needs_reset)
new_bus->reset = &stmmac_mdio_reset;
@@ -433,6 +441,25 @@ int stmmac_mdio_register(struct net_device *ndev)
found = 1;
}
+ /* Try to probe the XPCS by scanning all addresses. */
+ if (priv->hw->xpcs) {
+ struct mdio_xpcs_args *xpcs = &priv->hw->xpcs_args;
+ int ret, mode = priv->plat->phy_interface;
+ max_addr = PHY_MAX_ADDR;
+
+ xpcs->bus = new_bus;
+
+ for (addr = 0; addr < max_addr; addr++) {
+ xpcs->addr = addr;
+
+ ret = stmmac_xpcs_probe(priv, xpcs, mode);
+ if (!ret) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
if (!found && !mdio_node) {
dev_warn(dev, "No PHY found\n");
mdiobus_unregister(new_bus);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index fe2c9fa6a71c..3fb21f7ac9fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -15,49 +15,10 @@
#include "stmmac.h"
-/*
- * This struct is used to associate PCI Function of MAC controller on a board,
- * discovered via DMI, with the address of PHY connected to the MAC. The
- * negative value of the address means that MAC controller is not connected
- * with PHY.
- */
-struct stmmac_pci_func_data {
- unsigned int func;
- int phy_addr;
-};
-
-struct stmmac_pci_dmi_data {
- const struct stmmac_pci_func_data *func;
- size_t nfuncs;
-};
-
struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
-static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
- const struct dmi_system_id *dmi_list)
-{
- const struct stmmac_pci_func_data *func_data;
- const struct stmmac_pci_dmi_data *dmi_data;
- const struct dmi_system_id *dmi_id;
- int func = PCI_FUNC(pdev->devfn);
- size_t n;
-
- dmi_id = dmi_first_match(dmi_list);
- if (!dmi_id)
- return -ENODEV;
-
- dmi_data = dmi_id->driver_data;
- func_data = dmi_data->func;
-
- for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
- if (func_data->func == func)
- return func_data->phy_addr;
-
- return -ENODEV;
-}
-
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@@ -108,272 +69,6 @@ static const struct stmmac_pci_info stmmac_pci_info = {
.setup = stmmac_default_data,
};
-static int intel_mgbe_common_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int i;
-
- plat->clk_csr = 5;
- plat->has_gmac = 0;
- plat->has_gmac4 = 1;
- plat->force_sf_dma_mode = 0;
- plat->tso_en = 1;
-
- plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
-
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].chan = i;
-
- /* Disable Priority config by default */
- plat->rx_queues_cfg[i].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- }
-
- for (i = 0; i < plat->tx_queues_to_use; i++) {
- plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[i].use_prio = false;
- }
-
- /* FIFO size is 4096 bytes for 1 tx/rx queue */
- plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
- plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
-
- plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
- plat->tx_queues_cfg[0].weight = 0x09;
- plat->tx_queues_cfg[1].weight = 0x0A;
- plat->tx_queues_cfg[2].weight = 0x0B;
- plat->tx_queues_cfg[3].weight = 0x0C;
- plat->tx_queues_cfg[4].weight = 0x0D;
- plat->tx_queues_cfg[5].weight = 0x0E;
- plat->tx_queues_cfg[6].weight = 0x0F;
- plat->tx_queues_cfg[7].weight = 0x10;
-
- plat->dma_cfg->pbl = 32;
- plat->dma_cfg->pblx8 = true;
- plat->dma_cfg->fixed_burst = 0;
- plat->dma_cfg->mixed_burst = 0;
- plat->dma_cfg->aal = 0;
-
- plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
- GFP_KERNEL);
- if (!plat->axi)
- return -ENOMEM;
-
- plat->axi->axi_lpi_en = 0;
- plat->axi->axi_xit_frm = 0;
- plat->axi->axi_wr_osr_lmt = 1;
- plat->axi->axi_rd_osr_lmt = 1;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
-
- plat->ptp_max_adj = plat->clk_ptp_rate;
-
- /* Set system clock */
- plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
- "stmmac-clk", NULL, 0,
- plat->clk_ptp_rate);
-
- if (IS_ERR(plat->stmmac_clk)) {
- dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
- plat->stmmac_clk = NULL;
- }
- clk_prepare_enable(plat->stmmac_clk);
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- return 0;
-}
-
-static int ehl_common_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int ret;
-
- plat->rx_queues_to_use = 8;
- plat->tx_queues_to_use = 8;
- plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ehl_sgmii_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- plat->bus_id = 1;
- plat->phy_addr = 0;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
-
- return ehl_common_data(pdev, plat);
-}
-
-static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
- .setup = ehl_sgmii_data,
-};
-
-static int ehl_rgmii_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- plat->bus_id = 1;
- plat->phy_addr = 0;
- plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
-
- return ehl_common_data(pdev, plat);
-}
-
-static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
- .setup = ehl_rgmii_data,
-};
-
-static int tgl_common_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int ret;
-
- plat->rx_queues_to_use = 6;
- plat->tx_queues_to_use = 4;
- plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int tgl_sgmii_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- plat->bus_id = 1;
- plat->phy_addr = 0;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- return tgl_common_data(pdev, plat);
-}
-
-static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
- .setup = tgl_sgmii_data,
-};
-
-static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
- {
- .func = 6,
- .phy_addr = 1,
- },
-};
-
-static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
- .func = galileo_stmmac_func_data,
- .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
-};
-
-static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
- {
- .func = 6,
- .phy_addr = 1,
- },
- {
- .func = 7,
- .phy_addr = 1,
- },
-};
-
-static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
- .func = iot2040_stmmac_func_data,
- .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
-};
-
-static const struct dmi_system_id quark_pci_dmi[] = {
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
- },
- .driver_data = (void *)&galileo_stmmac_dmi_data,
- },
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
- },
- .driver_data = (void *)&galileo_stmmac_dmi_data,
- },
- /*
- * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
- * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
- * has only one pci network device while other asset tags are
- * for IOT2040 which has two.
- */
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-0YA2"),
- },
- .driver_data = (void *)&galileo_stmmac_dmi_data,
- },
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- },
- .driver_data = (void *)&iot2040_stmmac_dmi_data,
- },
- {}
-};
-
-static int quark_default_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int ret;
-
- /* Set common default data first */
- common_default_data(plat);
-
- /*
- * Refuse to load the driver and register net device if MAC controller
- * does not connect to any PHY interface.
- */
- ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
- if (ret < 0) {
- /* Return error to the caller on DMI enabled boards. */
- if (dmi_get_system_info(DMI_BOARD_NAME))
- return ret;
-
- /*
- * Galileo boards with old firmware don't support DMI. We always
- * use 1 here as PHY address, so at least the first found MAC
- * controller would be probed.
- */
- ret = 1;
- }
-
- plat->bus_id = pci_dev_id(pdev);
- plat->phy_addr = ret;
- plat->phy_interface = PHY_INTERFACE_MODE_RMII;
-
- plat->dma_cfg->pbl = 16;
- plat->dma_cfg->pblx8 = true;
- plat->dma_cfg->fixed_burst = 1;
- /* AXI (TODO) */
-
- return 0;
-}
-
-static const struct stmmac_pci_info quark_pci_info = {
- .setup = quark_default_data,
-};
-
static int snps_gmac5_default_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
@@ -579,28 +274,15 @@ static int __maybe_unused stmmac_pci_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
/* synthetic ID, no official vendor */
-#define PCI_VENDOR_ID_STMMAC 0x700
-
-#define STMMAC_QUARK_ID 0x0937
-#define STMMAC_DEVICE_ID 0x1108
-#define STMMAC_EHL_RGMII1G_ID 0x4b30
-#define STMMAC_EHL_SGMII1G_ID 0x4b31
-#define STMMAC_TGL_SGMII1G_ID 0xa0ac
-#define STMMAC_GMAC5_ID 0x7102
-
-#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
- PCI_VDEVICE(vendor_id, dev_id), \
- .driver_data = (kernel_ulong_t)&info \
- }
+#define PCI_VENDOR_ID_STMMAC 0x0700
+
+#define PCI_DEVICE_ID_STMMAC_STMMAC 0x1108
+#define PCI_DEVICE_ID_SYNOPSYS_GMAC5_ID 0x7102
static const struct pci_device_id stmmac_id_table[] = {
- STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
- STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info),
- STMMAC_DEVICE(SYNOPSYS, STMMAC_GMAC5_ID, snps_gmac5_pci_info),
+ { PCI_DEVICE_DATA(STMMAC, STMMAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(STMICRO, MAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(SYNOPSYS, GMAC5_ID, &snps_gmac5_pci_info) },
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 13fafd905db8..bcda49dcf619 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -588,7 +588,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
if (IS_ERR(plat->clk_ptp_ref)) {
plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
plat->clk_ptp_ref = NULL;
- dev_warn(&pdev->dev, "PTP uses main clock\n");
+ dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
@@ -645,8 +645,6 @@ EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
- struct resource *res;
-
memset(stmmac_res, 0, sizeof(*stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
@@ -680,8 +678,7 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
+ stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
return PTR_ERR_OR_ZERO(stmmac_res->addr);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 2aba2673d6c3..e6696495f126 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -380,7 +380,7 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
int ret;
if (!priv->dev->phydev)
- return -EBUSY;
+ return -EOPNOTSUPP;
ret = phy_loopback(priv->dev->phydev, true);
if (ret)
@@ -1387,6 +1387,7 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
@@ -1515,6 +1516,7 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 7a01dee2f9a8..3d747846f482 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -367,7 +367,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
static int tc_parse_flow_actions(struct stmmac_priv *priv,
struct flow_action *action,
- struct stmmac_flow_entry *entry)
+ struct stmmac_flow_entry *entry,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry *act;
int i;
@@ -375,6 +376,9 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv,
if (!flow_action_has_entries(action))
return -EINVAL;
+ if (!flow_action_basic_hw_stats_check(action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -530,7 +534,8 @@ static int tc_add_flow(struct stmmac_priv *priv,
return -ENOENT;
}
- ret = tc_parse_flow_actions(priv, &rule->action, entry);
+ ret = tc_parse_flow_actions(priv, &rule->action, entry,
+ cls->common.extack);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6ec9163e232c..f1c8615ab6f0 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1716,34 +1716,26 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
- u16 cfg;
+ int pci_errs;
/* Interrogate PCI config space for the
* true cause.
*/
- pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
- netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
- if (cfg & PCI_STATUS_PARITY)
+ pci_errs = pci_status_get_and_clear_errors(cp->pdev);
+
+ netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
+ if (pci_errs & PCI_STATUS_PARITY)
netdev_err(dev, "PCI parity error detected\n");
- if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
netdev_err(dev, "PCI target abort\n");
- if (cfg & PCI_STATUS_REC_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
netdev_err(dev, "PCI master acks target abort\n");
- if (cfg & PCI_STATUS_REC_MASTER_ABORT)
+ if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
netdev_err(dev, "PCI master abort\n");
- if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
+ if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
netdev_err(dev, "PCI system error SERR#\n");
- if (cfg & PCI_STATUS_DETECTED_PARITY)
+ if (pci_errs & PCI_STATUS_DETECTED_PARITY)
netdev_err(dev, "PCI parity error\n");
-
- /* Write the error bits back to clear them. */
- cfg &= (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY);
- pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
}
/* For all PCI errors, we should reset the chip. */
@@ -4971,7 +4963,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
cas_cacheline_size)) {
dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
- goto err_write_cacheline;
+ goto err_out_free_res;
}
}
#endif
@@ -5144,7 +5136,6 @@ err_out_iounmap:
err_out_free_res:
pci_release_regions(pdev);
-err_write_cacheline:
/* Try to restore it in case the error occurred after we
* set it.
*/
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 8358064fbd48..2d392a7b179a 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -545,37 +545,25 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
}
if (pci_estat & GREG_PCIESTAT_OTHER) {
- u16 pci_cfg_stat;
+ int pci_errs;
/* Interrogate PCI config space for the
* true cause.
*/
- pci_read_config_word(gp->pdev, PCI_STATUS,
- &pci_cfg_stat);
- netdev_err(dev, "Read PCI cfg space status [%04x]\n",
- pci_cfg_stat);
- if (pci_cfg_stat & PCI_STATUS_PARITY)
+ pci_errs = pci_status_get_and_clear_errors(gp->pdev);
+ netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
+ if (pci_errs & PCI_STATUS_PARITY)
netdev_err(dev, "PCI parity error detected\n");
- if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
netdev_err(dev, "PCI target abort\n");
- if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
netdev_err(dev, "PCI master acks target abort\n");
- if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
+ if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
netdev_err(dev, "PCI master abort\n");
- if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
+ if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
netdev_err(dev, "PCI system error SERR#\n");
- if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
+ if (pci_errs & PCI_STATUS_DETECTED_PARITY)
netdev_err(dev, "PCI parity error\n");
-
- /* Write the error bits back to clear them. */
- pci_cfg_stat &= (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY);
- pci_write_config_word(gp->pdev,
- PCI_STATUS, pci_cfg_stat);
}
/* For all PCI errors, we should reset the chip. */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
index fde722136869..bc198eadfcab 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
@@ -151,7 +151,6 @@ static int xlgmac_ethtool_get_coalesce(struct net_device *netdev,
{
struct xlgmac_pdata *pdata = netdev_priv(netdev);
- memset(ec, 0, sizeof(struct ethtool_coalesce));
ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
ec->tx_max_coalesced_frames = pdata->tx_frames;
@@ -167,20 +166,6 @@ static int xlgmac_ethtool_set_coalesce(struct net_device *netdev,
unsigned int rx_frames, rx_riwt, rx_usecs;
unsigned int tx_frames;
- /* Check for not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) ||
- (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->stats_block_coalesce_usecs) || (ec->pkt_rate_low) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->rx_max_coalesced_frames_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) ||
- (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
rx_usecs = ec->rx_coalesce_usecs;
rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs);
rx_frames = ec->rx_max_coalesced_frames;
@@ -257,6 +242,8 @@ static void xlgmac_ethtool_get_ethtool_stats(struct net_device *netdev,
}
static const struct ethtool_ops xlgmac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xlgmac_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = xlgmac_ethtool_get_msglevel,
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 0f8a924fc60c..40a2ce0ca808 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2373,6 +2373,8 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
static void bdx_set_ethtool_ops(struct net_device *netdev)
{
static const struct ethtool_ops bdx_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = bdx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = bdx_get_coalesce,
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 5fc03c8eba0c..909e7296cecf 100644
--- a/drivers/net/ethernet/tehuti/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -330,7 +330,7 @@ struct txd_desc {
u16 length;
u32 va_lo;
u32 va_hi;
- struct pbl pbl[0]; /* Fragments */
+ struct pbl pbl[]; /* Fragments */
} __packed;
/* Register region size */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index bf98e0fa7d8b..62f809b67469 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TI
bool "Texas Instruments (TI) devices"
default y
- depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
+ depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -31,7 +31,7 @@ config TI_DAVINCI_EMAC
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
select PHYLIB
---help---
This driver supports TI's DaVinci MDIO module.
@@ -49,10 +49,12 @@ config TI_CPSW_PHY_SEL
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on TI_CPTS || !TI_CPTS
select TI_DAVINCI_MDIO
select MFD_SYSCON
select PAGE_POOL
select REGMAP
+ imply PHY_TI_GMII_SEL
---help---
This driver supports TI's CPSW Ethernet Switch.
@@ -63,6 +65,7 @@ config TI_CPSW_SWITCHDEV
tristate "TI CPSW Switch Support with switchdev"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
depends on NET_SWITCHDEV
+ depends on TI_CPTS || !TI_CPTS
select PAGE_POOL
select TI_DAVINCI_MDIO
select MFD_SYSCON
@@ -76,29 +79,37 @@ config TI_CPSW_SWITCHDEV
will be called cpsw_new.
config TI_CPTS
- bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST
+ tristate "TI Common Platform Time Sync (CPTS) Support"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on COMMON_CLK
- depends on POSIX_TIMERS
+ depends on PTP_1588_CLOCK
---help---
This driver supports the Common Platform Time Sync unit of
the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
driver offers a PTP Hardware Clock.
-config TI_CPTS_MOD
- tristate
- depends on TI_CPTS
- default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
- select NET_PTP_CLASSIFY
- imply PTP_1588_CLOCK
- default m
+config TI_K3_AM65_CPSW_NUSS
+ tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ select TI_DAVINCI_MDIO
+ imply PHY_TI_GMII_SEL
+ help
+ This driver supports TI K3 AM654/J721E CPSW2G Ethernet SubSystem.
+ The two-port Gigabit Ethernet MAC (MCU_CPSW0) subsystem provides
+ Ethernet packet communication for the device: One Ethernet port
+ (port 1) with selectable RGMII and RMII interfaces and an internal
+ Communications Port Programming Interface (CPPI) port (port 0).
+
+ To compile this driver as a module, choose M here: the module
+ will be called ti-am65-cpsw-nuss.
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_DAVINCI_MDIO
depends on OF
depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
+ depends on TI_CPTS || !TI_CPTS
---help---
This driver supports TI's Keystone NETCP Core.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index ecf776ad8689..cb26a9d21869 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
-obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
+obj-$(CONFIG_TI_CPTS) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
@@ -23,3 +23,6 @@ obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o cpsw_ale.o
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
+
+obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
new file mode 100644
index 000000000000..c3502aa15ea0
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver ethtool ops
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "am65-cpsw-nuss.h"
+#include "cpsw_ale.h"
+
+#define AM65_CPSW_REGDUMP_VER 0x1
+
+enum {
+ AM65_CPSW_REGDUMP_MOD_NUSS = 1,
+ AM65_CPSW_REGDUMP_MOD_RGMII_STATUS = 2,
+ AM65_CPSW_REGDUMP_MOD_MDIO = 3,
+ AM65_CPSW_REGDUMP_MOD_CPSW = 4,
+ AM65_CPSW_REGDUMP_MOD_CPSW_P0 = 5,
+ AM65_CPSW_REGDUMP_MOD_CPSW_P1 = 6,
+ AM65_CPSW_REGDUMP_MOD_CPSW_CPTS = 7,
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE = 8,
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL = 9,
+ AM65_CPSW_REGDUMP_MOD_LAST,
+};
+
+/**
+ * struct am65_cpsw_regdump_hdr - regdump record header
+ *
+ * @module_id: CPSW module ID
+ * @len: CPSW module registers space length in u32
+ */
+
+struct am65_cpsw_regdump_hdr {
+ u32 module_id;
+ u32 len;
+};
+
+/**
+ * struct am65_cpsw_regdump_item - regdump module description
+ *
+ * @hdr: CPSW module header
+ * @start_ofs: CPSW module registers start addr
+ * @end_ofs: CPSW module registers end addr
+ *
+ * Registers dump provided in the format:
+ * u32 : module ID
+ * u32 : dump length
+ * u32[..len]: registers values
+ */
+struct am65_cpsw_regdump_item {
+ struct am65_cpsw_regdump_hdr hdr;
+ u32 start_ofs;
+ u32 end_ofs;
+};
+
+#define AM65_CPSW_REGDUMP_REC(mod, start, end) { \
+ .hdr.module_id = (mod), \
+ .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \
+ sizeof(struct am65_cpsw_regdump_hdr), \
+ .start_ofs = (start), \
+ .end_ofs = end, \
+}
+
+static const struct am65_cpsw_regdump_item am65_cpsw_regdump[] = {
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_NUSS, 0x0, 0x1c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_RGMII_STATUS, 0x30, 0x4c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_MDIO, 0xf00, 0xffc),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW, 0x20000, 0x2011c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_P0, 0x21000, 0x21320),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_P1, 0x22000, 0x223a4),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_CPTS,
+ 0x3d000, 0x3d048),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_ALE, 0x3e000, 0x3e13c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL, 0, 0),
+};
+
+struct am65_cpsw_stats_regs {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames; /* slave */
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors; /* slave */
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames; /* slave */
+ u32 rx_undersized_frames;
+ u32 rx_fragments; /* slave */
+ u32 ale_drop;
+ u32 ale_overrun_drop;
+ u32 rx_octets;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames; /* slave */
+ u32 tx_deferred_frames; /* slave */
+ u32 tx_collision_frames; /* slave */
+ u32 tx_single_coll_frames; /* slave */
+ u32 tx_mult_coll_frames; /* slave */
+ u32 tx_excessive_collisions; /* slave */
+ u32 tx_late_collisions; /* slave */
+ u32 rx_ipg_error; /* slave 10G only */
+ u32 tx_carrier_sense_errors; /* slave */
+ u32 tx_octets;
+ u32 tx_64B_frames;
+ u32 tx_65_to_127B_frames;
+ u32 tx_128_to_255B_frames;
+ u32 tx_256_to_511B_frames;
+ u32 tx_512_to_1023B_frames;
+ u32 tx_1024B_frames;
+ u32 net_octets;
+ u32 rx_bottom_fifo_drop;
+ u32 rx_port_mask_drop;
+ u32 rx_top_fifo_drop;
+ u32 ale_rate_limit_drop;
+ u32 ale_vid_ingress_drop;
+ u32 ale_da_eq_sa_drop;
+ u32 ale_block_drop; /* K3 */
+ u32 ale_secure_drop; /* K3 */
+ u32 ale_auth_drop; /* K3 */
+ u32 ale_unknown_ucast;
+ u32 ale_unknown_ucast_bytes;
+ u32 ale_unknown_mcast;
+ u32 ale_unknown_mcast_bytes;
+ u32 ale_unknown_bcast;
+ u32 ale_unknown_bcast_bytes;
+ u32 ale_pol_match;
+ u32 ale_pol_match_red;
+ u32 ale_pol_match_yellow;
+ u32 ale_mcast_sa_drop; /* K3 */
+ u32 ale_dual_vlan_drop; /* K3 */
+ u32 ale_len_err_drop; /* K3 */
+ u32 ale_ip_next_hdr_drop; /* K3 */
+ u32 ale_ipv4_frag_drop; /* K3 */
+ u32 __rsvd_1[24];
+ u32 iet_rx_assembly_err; /* K3 slave */
+ u32 iet_rx_assembly_ok; /* K3 slave */
+ u32 iet_rx_smd_err; /* K3 slave */
+ u32 iet_rx_frag; /* K3 slave */
+ u32 iet_tx_hold; /* K3 slave */
+ u32 iet_tx_frag; /* K3 slave */
+ u32 __rsvd_2[9];
+ u32 tx_mem_protect_err;
+ /* following NU only */
+ u32 tx_pri0;
+ u32 tx_pri1;
+ u32 tx_pri2;
+ u32 tx_pri3;
+ u32 tx_pri4;
+ u32 tx_pri5;
+ u32 tx_pri6;
+ u32 tx_pri7;
+ u32 tx_pri0_bcnt;
+ u32 tx_pri1_bcnt;
+ u32 tx_pri2_bcnt;
+ u32 tx_pri3_bcnt;
+ u32 tx_pri4_bcnt;
+ u32 tx_pri5_bcnt;
+ u32 tx_pri6_bcnt;
+ u32 tx_pri7_bcnt;
+ u32 tx_pri0_drop;
+ u32 tx_pri1_drop;
+ u32 tx_pri2_drop;
+ u32 tx_pri3_drop;
+ u32 tx_pri4_drop;
+ u32 tx_pri5_drop;
+ u32 tx_pri6_drop;
+ u32 tx_pri7_drop;
+ u32 tx_pri0_drop_bcnt;
+ u32 tx_pri1_drop_bcnt;
+ u32 tx_pri2_drop_bcnt;
+ u32 tx_pri3_drop_bcnt;
+ u32 tx_pri4_drop_bcnt;
+ u32 tx_pri5_drop_bcnt;
+ u32 tx_pri6_drop_bcnt;
+ u32 tx_pri7_drop_bcnt;
+};
+
+struct am65_cpsw_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define AM65_CPSW_STATS(prefix, field) \
+{ \
+ #prefix#field, \
+ offsetof(struct am65_cpsw_stats_regs, field) \
+}
+
+static const struct am65_cpsw_ethtool_stat am65_host_stats[] = {
+ AM65_CPSW_STATS(p0_, rx_good_frames),
+ AM65_CPSW_STATS(p0_, rx_broadcast_frames),
+ AM65_CPSW_STATS(p0_, rx_multicast_frames),
+ AM65_CPSW_STATS(p0_, rx_crc_errors),
+ AM65_CPSW_STATS(p0_, rx_oversized_frames),
+ AM65_CPSW_STATS(p0_, rx_undersized_frames),
+ AM65_CPSW_STATS(p0_, ale_drop),
+ AM65_CPSW_STATS(p0_, ale_overrun_drop),
+ AM65_CPSW_STATS(p0_, rx_octets),
+ AM65_CPSW_STATS(p0_, tx_good_frames),
+ AM65_CPSW_STATS(p0_, tx_broadcast_frames),
+ AM65_CPSW_STATS(p0_, tx_multicast_frames),
+ AM65_CPSW_STATS(p0_, tx_octets),
+ AM65_CPSW_STATS(p0_, tx_64B_frames),
+ AM65_CPSW_STATS(p0_, tx_65_to_127B_frames),
+ AM65_CPSW_STATS(p0_, tx_128_to_255B_frames),
+ AM65_CPSW_STATS(p0_, tx_256_to_511B_frames),
+ AM65_CPSW_STATS(p0_, tx_512_to_1023B_frames),
+ AM65_CPSW_STATS(p0_, tx_1024B_frames),
+ AM65_CPSW_STATS(p0_, net_octets),
+ AM65_CPSW_STATS(p0_, rx_bottom_fifo_drop),
+ AM65_CPSW_STATS(p0_, rx_port_mask_drop),
+ AM65_CPSW_STATS(p0_, rx_top_fifo_drop),
+ AM65_CPSW_STATS(p0_, ale_rate_limit_drop),
+ AM65_CPSW_STATS(p0_, ale_vid_ingress_drop),
+ AM65_CPSW_STATS(p0_, ale_da_eq_sa_drop),
+ AM65_CPSW_STATS(p0_, ale_block_drop),
+ AM65_CPSW_STATS(p0_, ale_secure_drop),
+ AM65_CPSW_STATS(p0_, ale_auth_drop),
+ AM65_CPSW_STATS(p0_, ale_unknown_ucast),
+ AM65_CPSW_STATS(p0_, ale_unknown_ucast_bytes),
+ AM65_CPSW_STATS(p0_, ale_unknown_mcast),
+ AM65_CPSW_STATS(p0_, ale_unknown_mcast_bytes),
+ AM65_CPSW_STATS(p0_, ale_unknown_bcast),
+ AM65_CPSW_STATS(p0_, ale_unknown_bcast_bytes),
+ AM65_CPSW_STATS(p0_, ale_pol_match),
+ AM65_CPSW_STATS(p0_, ale_pol_match_red),
+ AM65_CPSW_STATS(p0_, ale_pol_match_yellow),
+ AM65_CPSW_STATS(p0_, ale_mcast_sa_drop),
+ AM65_CPSW_STATS(p0_, ale_dual_vlan_drop),
+ AM65_CPSW_STATS(p0_, ale_len_err_drop),
+ AM65_CPSW_STATS(p0_, ale_ip_next_hdr_drop),
+ AM65_CPSW_STATS(p0_, ale_ipv4_frag_drop),
+ AM65_CPSW_STATS(p0_, tx_mem_protect_err),
+ AM65_CPSW_STATS(p0_, tx_pri0),
+ AM65_CPSW_STATS(p0_, tx_pri1),
+ AM65_CPSW_STATS(p0_, tx_pri2),
+ AM65_CPSW_STATS(p0_, tx_pri3),
+ AM65_CPSW_STATS(p0_, tx_pri4),
+ AM65_CPSW_STATS(p0_, tx_pri5),
+ AM65_CPSW_STATS(p0_, tx_pri6),
+ AM65_CPSW_STATS(p0_, tx_pri7),
+ AM65_CPSW_STATS(p0_, tx_pri0_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri1_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri2_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri3_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri4_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri5_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri6_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri7_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri0_drop),
+ AM65_CPSW_STATS(p0_, tx_pri1_drop),
+ AM65_CPSW_STATS(p0_, tx_pri2_drop),
+ AM65_CPSW_STATS(p0_, tx_pri3_drop),
+ AM65_CPSW_STATS(p0_, tx_pri4_drop),
+ AM65_CPSW_STATS(p0_, tx_pri5_drop),
+ AM65_CPSW_STATS(p0_, tx_pri6_drop),
+ AM65_CPSW_STATS(p0_, tx_pri7_drop),
+ AM65_CPSW_STATS(p0_, tx_pri0_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri1_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri2_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri3_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri4_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri5_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri6_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri7_drop_bcnt),
+};
+
+static const struct am65_cpsw_ethtool_stat am65_slave_stats[] = {
+ AM65_CPSW_STATS(, rx_good_frames),
+ AM65_CPSW_STATS(, rx_broadcast_frames),
+ AM65_CPSW_STATS(, rx_multicast_frames),
+ AM65_CPSW_STATS(, rx_pause_frames),
+ AM65_CPSW_STATS(, rx_crc_errors),
+ AM65_CPSW_STATS(, rx_align_code_errors),
+ AM65_CPSW_STATS(, rx_oversized_frames),
+ AM65_CPSW_STATS(, rx_jabber_frames),
+ AM65_CPSW_STATS(, rx_undersized_frames),
+ AM65_CPSW_STATS(, rx_fragments),
+ AM65_CPSW_STATS(, ale_drop),
+ AM65_CPSW_STATS(, ale_overrun_drop),
+ AM65_CPSW_STATS(, rx_octets),
+ AM65_CPSW_STATS(, tx_good_frames),
+ AM65_CPSW_STATS(, tx_broadcast_frames),
+ AM65_CPSW_STATS(, tx_multicast_frames),
+ AM65_CPSW_STATS(, tx_pause_frames),
+ AM65_CPSW_STATS(, tx_deferred_frames),
+ AM65_CPSW_STATS(, tx_collision_frames),
+ AM65_CPSW_STATS(, tx_single_coll_frames),
+ AM65_CPSW_STATS(, tx_mult_coll_frames),
+ AM65_CPSW_STATS(, tx_excessive_collisions),
+ AM65_CPSW_STATS(, tx_late_collisions),
+ AM65_CPSW_STATS(, rx_ipg_error),
+ AM65_CPSW_STATS(, tx_carrier_sense_errors),
+ AM65_CPSW_STATS(, tx_octets),
+ AM65_CPSW_STATS(, tx_64B_frames),
+ AM65_CPSW_STATS(, tx_65_to_127B_frames),
+ AM65_CPSW_STATS(, tx_128_to_255B_frames),
+ AM65_CPSW_STATS(, tx_256_to_511B_frames),
+ AM65_CPSW_STATS(, tx_512_to_1023B_frames),
+ AM65_CPSW_STATS(, tx_1024B_frames),
+ AM65_CPSW_STATS(, net_octets),
+ AM65_CPSW_STATS(, rx_bottom_fifo_drop),
+ AM65_CPSW_STATS(, rx_port_mask_drop),
+ AM65_CPSW_STATS(, rx_top_fifo_drop),
+ AM65_CPSW_STATS(, ale_rate_limit_drop),
+ AM65_CPSW_STATS(, ale_vid_ingress_drop),
+ AM65_CPSW_STATS(, ale_da_eq_sa_drop),
+ AM65_CPSW_STATS(, ale_block_drop),
+ AM65_CPSW_STATS(, ale_secure_drop),
+ AM65_CPSW_STATS(, ale_auth_drop),
+ AM65_CPSW_STATS(, ale_unknown_ucast),
+ AM65_CPSW_STATS(, ale_unknown_ucast_bytes),
+ AM65_CPSW_STATS(, ale_unknown_mcast),
+ AM65_CPSW_STATS(, ale_unknown_mcast_bytes),
+ AM65_CPSW_STATS(, ale_unknown_bcast),
+ AM65_CPSW_STATS(, ale_unknown_bcast_bytes),
+ AM65_CPSW_STATS(, ale_pol_match),
+ AM65_CPSW_STATS(, ale_pol_match_red),
+ AM65_CPSW_STATS(, ale_pol_match_yellow),
+ AM65_CPSW_STATS(, ale_mcast_sa_drop),
+ AM65_CPSW_STATS(, ale_dual_vlan_drop),
+ AM65_CPSW_STATS(, ale_len_err_drop),
+ AM65_CPSW_STATS(, ale_ip_next_hdr_drop),
+ AM65_CPSW_STATS(, ale_ipv4_frag_drop),
+ AM65_CPSW_STATS(, iet_rx_assembly_err),
+ AM65_CPSW_STATS(, iet_rx_assembly_ok),
+ AM65_CPSW_STATS(, iet_rx_smd_err),
+ AM65_CPSW_STATS(, iet_rx_frag),
+ AM65_CPSW_STATS(, iet_tx_hold),
+ AM65_CPSW_STATS(, iet_tx_frag),
+ AM65_CPSW_STATS(, tx_mem_protect_err),
+ AM65_CPSW_STATS(, tx_pri0),
+ AM65_CPSW_STATS(, tx_pri1),
+ AM65_CPSW_STATS(, tx_pri2),
+ AM65_CPSW_STATS(, tx_pri3),
+ AM65_CPSW_STATS(, tx_pri4),
+ AM65_CPSW_STATS(, tx_pri5),
+ AM65_CPSW_STATS(, tx_pri6),
+ AM65_CPSW_STATS(, tx_pri7),
+ AM65_CPSW_STATS(, tx_pri0_bcnt),
+ AM65_CPSW_STATS(, tx_pri1_bcnt),
+ AM65_CPSW_STATS(, tx_pri2_bcnt),
+ AM65_CPSW_STATS(, tx_pri3_bcnt),
+ AM65_CPSW_STATS(, tx_pri4_bcnt),
+ AM65_CPSW_STATS(, tx_pri5_bcnt),
+ AM65_CPSW_STATS(, tx_pri6_bcnt),
+ AM65_CPSW_STATS(, tx_pri7_bcnt),
+ AM65_CPSW_STATS(, tx_pri0_drop),
+ AM65_CPSW_STATS(, tx_pri1_drop),
+ AM65_CPSW_STATS(, tx_pri2_drop),
+ AM65_CPSW_STATS(, tx_pri3_drop),
+ AM65_CPSW_STATS(, tx_pri4_drop),
+ AM65_CPSW_STATS(, tx_pri5_drop),
+ AM65_CPSW_STATS(, tx_pri6_drop),
+ AM65_CPSW_STATS(, tx_pri7_drop),
+ AM65_CPSW_STATS(, tx_pri0_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri1_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri2_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri3_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri4_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri5_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri6_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri7_drop_bcnt),
+};
+
+/* Ethtool priv_flags */
+static const char am65_cpsw_ethtool_priv_flags[][ETH_GSTRING_LEN] = {
+#define AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN BIT(0)
+ "p0-rx-ptype-rrobin",
+};
+
+static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ dev_err(common->dev, "ethtool begin failed %d\n", ret);
+ pm_runtime_put_noidle(common->dev);
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_put(common->dev);
+ if (ret < 0 && ret != -EBUSY)
+ dev_err(common->dev, "ethtool complete failed %d\n", ret);
+}
+
+static void am65_cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ strlcpy(info->driver, dev_driver_string(common->dev),
+ sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
+}
+
+static u32 am65_cpsw_get_msglevel(struct net_device *ndev)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void am65_cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+static void am65_cpsw_get_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
+ ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
+ ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
+ ch->tx_count = common->tx_ch_num;
+}
+
+static int am65_cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!chs->rx_count || !chs->tx_count)
+ return -EINVAL;
+
+ /* Check if interface is up. Can change the num queues when
+ * the interface is down.
+ */
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ am65_cpsw_nuss_remove_tx_chns(common);
+
+ return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
+}
+
+static void am65_cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ /* not supported */
+ ering->tx_pending = common->tx_chns[0].descs_num;
+ ering->rx_pending = common->rx_chns.descs_num;
+}
+
+static void am65_cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = salve->rx_pause ? true : false;
+ pause->tx_pause = salve->tx_pause ? true : false;
+}
+
+static int am65_cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy)
+ return -EINVAL;
+
+ if (!phy_validate_pause(salve->phy, pause))
+ return -EINVAL;
+
+ salve->rx_pause = pause->rx_pause ? true : false;
+ salve->tx_pause = pause->tx_pause ? true : false;
+
+ phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause);
+
+ return 0;
+}
+
+static void am65_cpsw_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (salve->phy)
+ phy_ethtool_get_wol(salve->phy, wol);
+}
+
+static int am65_cpsw_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_wol(salve->phy, wol);
+}
+
+static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy)
+ return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(salve->phy, ecmd);
+ return 0;
+}
+
+static int
+am65_cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_ksettings_set(salve->phy, ecmd);
+}
+
+static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_get_eee(salve->phy, edata);
+}
+
+static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_eee(salve->phy, edata);
+}
+
+static int am65_cpsw_nway_reset(struct net_device *ndev)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_restart_aneg(salve->phy);
+}
+
+static int am65_cpsw_get_regs_len(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 i, regdump_len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
+ if (am65_cpsw_regdump[i].hdr.module_id ==
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
+ regdump_len += sizeof(struct am65_cpsw_regdump_hdr);
+ regdump_len += common->ale->params.ale_entries *
+ ALE_ENTRY_WORDS * sizeof(u32);
+ continue;
+ }
+ regdump_len += am65_cpsw_regdump[i].hdr.len;
+ }
+
+ return regdump_len;
+}
+
+static void am65_cpsw_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 i, j, pos, *reg = p;
+
+ /* update CPSW IP version */
+ regs->version = AM65_CPSW_REGDUMP_VER;
+
+ pos = 0;
+ for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
+ reg[pos++] = am65_cpsw_regdump[i].hdr.module_id;
+
+ if (am65_cpsw_regdump[i].hdr.module_id ==
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
+ u32 ale_tbl_len = common->ale->params.ale_entries *
+ ALE_ENTRY_WORDS * sizeof(u32) +
+ sizeof(struct am65_cpsw_regdump_hdr);
+ reg[pos++] = ale_tbl_len;
+ cpsw_ale_dump(common->ale, &reg[pos]);
+ pos += ale_tbl_len;
+ continue;
+ }
+
+ reg[pos++] = am65_cpsw_regdump[i].hdr.len;
+
+ j = am65_cpsw_regdump[i].start_ofs;
+ do {
+ reg[pos++] = j;
+ reg[pos++] = readl_relaxed(common->ss_base + j);
+ j += sizeof(u32);
+ } while (j <= am65_cpsw_regdump[i].end_ofs);
+ }
+}
+
+static int am65_cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(am65_host_stats) +
+ ARRAY_SIZE(am65_slave_stats);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(am65_cpsw_ethtool_priv_flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void am65_cpsw_get_strings(struct net_device *ndev,
+ u32 stringset, u8 *data)
+{
+ const struct am65_cpsw_ethtool_stat *hw_stats;
+ u32 i, num_stats;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(am65_host_stats);
+ hw_stats = am65_host_stats;
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, hw_stats[i].desc, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ num_stats = ARRAY_SIZE(am65_slave_stats);
+ hw_stats = am65_slave_stats;
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, hw_stats[i].desc, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ num_stats = ARRAY_SIZE(am65_cpsw_ethtool_priv_flags);
+
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, am65_cpsw_ethtool_priv_flags[i],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ const struct am65_cpsw_ethtool_stat *hw_stats;
+ struct am65_cpsw_host *host_p;
+ struct am65_cpsw_port *port;
+ u32 i, num_stats;
+
+ host_p = am65_common_get_host(common);
+ port = am65_ndev_to_port(ndev);
+ num_stats = ARRAY_SIZE(am65_host_stats);
+ hw_stats = am65_host_stats;
+ for (i = 0; i < num_stats; i++)
+ *data++ = readl_relaxed(host_p->stat_base +
+ hw_stats[i].offset);
+
+ num_stats = ARRAY_SIZE(am65_slave_stats);
+ hw_stats = am65_slave_stats;
+ for (i = 0; i < num_stats; i++)
+ *data++ = readl_relaxed(port->stat_base +
+ hw_stats[i].offset);
+}
+
+static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 priv_flags = 0;
+
+ if (common->pf_p0_rx_ptype_rrobin)
+ priv_flags |= AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN;
+
+ return priv_flags;
+}
+
+static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ common->pf_p0_rx_ptype_rrobin =
+ !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+ am65_cpsw_nuss_set_p0_ptype(common);
+
+ return 0;
+}
+
+const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
+ .begin = am65_cpsw_ethtool_op_begin,
+ .complete = am65_cpsw_ethtool_op_complete,
+ .get_drvinfo = am65_cpsw_get_drvinfo,
+ .get_msglevel = am65_cpsw_get_msglevel,
+ .set_msglevel = am65_cpsw_set_msglevel,
+ .get_channels = am65_cpsw_get_channels,
+ .set_channels = am65_cpsw_set_channels,
+ .get_ringparam = am65_cpsw_get_ringparam,
+ .get_regs_len = am65_cpsw_get_regs_len,
+ .get_regs = am65_cpsw_get_regs,
+ .get_sset_count = am65_cpsw_get_sset_count,
+ .get_strings = am65_cpsw_get_strings,
+ .get_ethtool_stats = am65_cpsw_get_ethtool_stats,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
+ .set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
+
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = am65_cpsw_get_link_ksettings,
+ .set_link_ksettings = am65_cpsw_set_link_ksettings,
+ .get_pauseparam = am65_cpsw_get_pauseparam,
+ .set_pauseparam = am65_cpsw_set_pauseparam,
+ .get_wol = am65_cpsw_get_wol,
+ .set_wol = am65_cpsw_set_wol,
+ .get_eee = am65_cpsw_get_eee,
+ .set_eee = am65_cpsw_set_eee,
+ .nway_reset = am65_cpsw_nway_reset,
+};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
new file mode 100644
index 000000000000..88f52a2f85b3
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -0,0 +1,1967 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "cpsw_ale.h"
+#include "cpsw_sl.h"
+#include "am65-cpsw-nuss.h"
+#include "k3-cppi-desc-pool.h"
+
+#define AM65_CPSW_SS_BASE 0x0
+#define AM65_CPSW_SGMII_BASE 0x100
+#define AM65_CPSW_XGMII_BASE 0x2100
+#define AM65_CPSW_CPSW_NU_BASE 0x20000
+#define AM65_CPSW_NU_PORTS_BASE 0x1000
+#define AM65_CPSW_NU_STATS_BASE 0x1a000
+#define AM65_CPSW_NU_ALE_BASE 0x1e000
+#define AM65_CPSW_NU_CPTS_BASE 0x1d000
+
+#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
+#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
+
+#define AM65_CPSW_MAX_PORTS 8
+
+#define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
+#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_REG_STAT_PORT_EN 0x014
+#define AM65_CPSW_REG_PTYPE 0x018
+
+#define AM65_CPSW_P0_REG_CTL 0x004
+#define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
+
+#define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
+#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
+
+#define AM65_CPSW_PORTN_REG_SA_L 0x308
+#define AM65_CPSW_PORTN_REG_SA_H 0x30c
+#define AM65_CPSW_PORTN_REG_TS_CTL 0x310
+#define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
+#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
+#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
+
+#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
+#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
+#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
+#define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
+
+/* AM65_CPSW_P0_REG_CTL */
+#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
+
+/* AM65_CPSW_PORT_REG_PRI_CTL */
+#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
+
+/* AM65_CPSW_PN_TS_CTL register fields */
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
+#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
+#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
+#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
+#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
+
+/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
+#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
+
+/* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
+
+#define AM65_CPSW_TS_TX_ANX_ALL_EN \
+ (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
+ AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
+ AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
+
+#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
+/* Number of TX/RX descriptors */
+#define AM65_CPSW_MAX_TX_DESC 500
+#define AM65_CPSW_MAX_RX_DESC 500
+
+#define AM65_CPSW_NAV_PS_DATA_SIZE 16
+#define AM65_CPSW_NAV_SW_DATA_SIZE 16
+
+#define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
+ const u8 *dev_addr)
+{
+ u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
+ (dev_addr[2] << 16) | (dev_addr[3] << 24);
+ u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
+
+ writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
+ writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
+}
+
+static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
+{
+ cpsw_sl_reset(port->slave.mac_sl, 100);
+ /* Max length register has to be restored after MAC SL reset */
+ writel(AM65_CPSW_MAX_PACKET_SIZE,
+ port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
+}
+
+static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
+{
+ common->nuss_ver = readl(common->ss_base);
+ common->cpsw_ver = readl(common->cpsw_base);
+ dev_info(common->dev,
+ "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u\n",
+ common->nuss_ver,
+ common->cpsw_ver,
+ common->port_num + 1);
+}
+
+void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct phy_device *phy = port->slave.phy;
+ u32 mac_control = 0;
+
+ if (!phy)
+ return;
+
+ if (phy->link) {
+ mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (phy->speed == 1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (phy->speed == 10 && phy_interface_is_rgmii(phy))
+ /* Can be used with in band mode only */
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (phy->duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
+
+ /* rx_pause/tx_pause */
+ if (port->slave.rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (port->slave.tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ int tmo;
+ /* disable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+ tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
+ cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
+ tmo);
+
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ netif_tx_stop_all_queues(ndev);
+ }
+
+ phy_print_status(phy);
+}
+
+static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask, unreg_mcast = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ if (!vid)
+ unreg_mcast = port_mask;
+ dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
+ ret = cpsw_ale_add_vlan(common->ale, vid, port_mask,
+ unreg_mcast, port_mask, 0);
+
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(common->ale, vid, 0);
+
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port,
+ bool promisc)
+{
+ struct am65_cpsw_common *common = port->common;
+
+ if (promisc) {
+ /* Enable promiscuous mode */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY_CAF, 1);
+ dev_dbg(common->dev, "promisc enabled\n");
+ } else {
+ /* Disable promiscuous mode */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY_CAF, 0);
+ dev_dbg(common->dev, "promisc disabled\n");
+ }
+}
+
+static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask;
+ bool promisc;
+
+ promisc = !!(ndev->flags & IFF_PROMISC);
+ am65_cpsw_slave_set_promisc_2g(port, promisc);
+
+ if (promisc)
+ return;
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+ port_mask = ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+ if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ /* program multicast address list into ALE register */
+ netdev_for_each_mc_addr(ha, ndev) {
+ cpsw_ale_add_mcast(common->ale, ha->addr,
+ port_mask, 0, 0, 0);
+ }
+ }
+}
+
+static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
+ unsigned int txqueue)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned long trans_start;
+
+ netif_txq = netdev_get_tx_queue(ndev, txqueue);
+ tx_chn = &common->tx_chns[txqueue];
+ trans_start = netif_txq->trans_start;
+
+ netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
+ txqueue,
+ netif_tx_queue_stopped(netif_txq),
+ jiffies_to_msecs(jiffies - trans_start),
+ dql_avail(&netif_txq->dql),
+ k3_cppi_desc_pool_avail(tx_chn->desc_pool));
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* try recover if stopped by us */
+ txq_trans_update(netif_txq);
+ netif_tx_wake_queue(netif_txq);
+ }
+}
+
+static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
+ struct sk_buff *skb)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct cppi5_host_desc_t *desc_rx;
+ struct device *dev = common->dev;
+ u32 pkt_len = skb_tailroom(skb);
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ void *swdata;
+
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ dev_err(dev, "Failed to map rx skb buffer\n");
+ return -EINVAL;
+ }
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb));
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ *((void **)swdata) = skb;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+}
+
+void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ u32 val, pri_map;
+
+ /* P0 set Receive Priority Type */
+ val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
+ /* Enet Ports fifos works in fixed priority mode only, so
+ * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
+ */
+ pri_map = 0x0;
+ } else {
+ val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
+ /* restore P0_Rx_Pri_Map */
+ pri_map = 0x76543210;
+ }
+
+ writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
+ writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
+}
+
+static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
+ netdev_features_t features)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ int port_idx, i, ret;
+ struct sk_buff *skb;
+ u32 val, port_mask;
+
+ if (common->usage_count)
+ return 0;
+
+ /* Control register */
+ writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
+ AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
+ common->cpsw_base + AM65_CPSW_REG_CTL);
+ /* Max length register */
+ writel(AM65_CPSW_MAX_PACKET_SIZE,
+ host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
+ /* set base flow_id */
+ writel(common->rx_flow_id_base,
+ host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
+ /* en tx crc offload */
+ if (features & NETIF_F_HW_CSUM)
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
+ host_p->port_base + AM65_CPSW_P0_REG_CTL);
+
+ am65_cpsw_nuss_set_p0_ptype(common);
+
+ /* enable statistic */
+ val = BIT(HOST_PORT_NUM);
+ for (port_idx = 0; port_idx < common->port_num; port_idx++) {
+ struct am65_cpsw_port *port = &common->ports[port_idx];
+
+ if (!port->disabled)
+ val |= BIT(port->port_id);
+ }
+ writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+
+ /* disable priority elevation */
+ writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
+
+ cpsw_ale_start(common->ale);
+
+ /* limit to one RX flow only */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_DEFAULT_THREAD_ID, 0);
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_DEFAULT_THREAD_ENABLE, 1);
+ if (AM65_CPSW_IS_CPSW2G(common))
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_NOLEARN, 1);
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ /* default vlan cfg: create mask based on enabled ports */
+ port_mask = GENMASK(common->port_num, 0) &
+ ~common->disabled_ports_mask;
+
+ cpsw_ale_add_vlan(common->ale, 0, port_mask,
+ port_mask, port_mask,
+ port_mask & ~ALE_PORT_HOST);
+
+ for (i = 0; i < common->rx_chns.descs_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(NULL,
+ AM65_CPSW_MAX_PACKET_SIZE,
+ GFP_KERNEL);
+ if (!skb) {
+ dev_err(common->dev, "cannot allocate skb\n");
+ return -ENOMEM;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, skb);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit skb to channel rx, error %d\n",
+ ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ kmemleak_not_leak(skb);
+ }
+ k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
+ if (ret)
+ return ret;
+ napi_enable(&common->tx_chns[i].napi_tx);
+ }
+
+ napi_enable(&common->napi_rx);
+
+ dev_dbg(common->dev, "cpsw_nuss started\n");
+ return 0;
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+
+static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
+{
+ int i;
+
+ if (common->usage_count != 1)
+ return 0;
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ for (i = 0; i < common->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
+
+ i = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "tx timeout\n");
+ for (i = 0; i < common->tx_ch_num; i++)
+ napi_disable(&common->tx_chns[i].napi_tx);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
+ &common->tx_chns[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ }
+
+ k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+ napi_disable(&common->napi_rx);
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
+ &common->rx_chns,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+
+ cpsw_ale_stop(common->ale);
+
+ writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
+ writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+
+ dev_dbg(common->dev, "cpsw_nuss stopped\n");
+ return 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ if (port->slave.phy)
+ phy_stop(port->slave.phy);
+
+ netif_tx_stop_all_queues(ndev);
+
+ if (port->slave.phy) {
+ phy_disconnect(port->slave.phy);
+ port->slave.phy = NULL;
+ }
+
+ ret = am65_cpsw_nuss_common_stop(common);
+ if (ret)
+ return ret;
+
+ common->usage_count--;
+ pm_runtime_put(common->dev);
+ return 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of tx queues\n");
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of rx queues\n");
+ return ret;
+ }
+
+ for (i = 0; i < common->tx_ch_num; i++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
+
+ ret = am65_cpsw_nuss_common_open(common, ndev->features);
+ if (ret)
+ return ret;
+
+ common->usage_count++;
+
+ am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
+
+ if (port->slave.mac_only)
+ /* enable mac-only mode on port */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY, 1);
+ if (AM65_CPSW_IS_CPSW2G(common))
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_NOLEARN, 1);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE, 0);
+ cpsw_ale_add_mcast(common->ale, ndev->broadcast,
+ port_mask, 0, 0, ALE_MCAST_FWD_2);
+
+ /* mac_sl should be configured via phy-link interface */
+ am65_cpsw_sl_ctl_reset(port);
+
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
+ port->slave.phy_if);
+ if (ret)
+ goto error_cleanup;
+
+ if (port->slave.phy_node) {
+ port->slave.phy = of_phy_connect(ndev,
+ port->slave.phy_node,
+ &am65_cpsw_nuss_adjust_link,
+ 0, port->slave.phy_if);
+ if (!port->slave.phy) {
+ dev_err(common->dev, "phy %pOF not found on slave %d\n",
+ port->slave.phy_node,
+ port->port_id);
+ ret = -ENODEV;
+ goto error_cleanup;
+ }
+ }
+
+ phy_attached_info(port->slave.phy);
+ phy_start(port->slave.phy);
+
+ return 0;
+
+error_cleanup:
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
+ return ret;
+}
+
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+
+ dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+/* RX psdata[2] word format - checksum information */
+#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
+#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
+#define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
+#define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
+#define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
+#define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
+
+static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
+{
+ /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
+ * csum information provides in psdata[2] word:
+ * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
+ * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
+ * bits - indicates IPv4/IPv6 packet
+ * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
+ * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
+ * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
+ */
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return;
+
+ if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
+ AM65_CPSW_RX_PSD_IPV4_VALID)) &&
+ !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
+ /* csum for fragmented packets is unsupported */
+ if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
+ u32 flow_idx)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_rx;
+ struct device *dev = common->dev;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ struct am65_cpsw_port *port;
+ struct net_device *ndev;
+ void **swdata;
+ u32 *psdata;
+ int ret = 0;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ dev_err(dev, "RX: pop chn fail %d\n", ret);
+ return ret;
+ }
+
+ if (desc_dma & 0x1) {
+ dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ return 0;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
+ __func__, flow_idx, &desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
+ port = am65_common_get_port(common, port_id);
+ ndev = port->ndev;
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ csum_info = psdata[2];
+ dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
+
+ dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
+ if (new_skb) {
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ am65_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&common->napi_rx, skb);
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+ kmemleak_not_leak(new_skb);
+ } else {
+ ndev->stats.rx_dropped++;
+ new_skb = skb;
+ }
+
+ if (netif_dormant(ndev)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_dropped++;
+ return 0;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, new_skb);
+ if (WARN_ON(ret < 0)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
+ int flow = AM65_CPSW_MAX_RX_FLOWS;
+ int cur_budget, ret;
+ int num_rx = 0;
+
+ /* process every flow */
+ while (flow--) {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = am65_cpsw_nuss_rx_packets(common, flow);
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
+ enable_irq(common->rx_chns.irq);
+
+ return num_rx;
+}
+
+static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
+ struct device *dev,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+
+ dma_unmap_single(dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+
+ dma_unmap_page(dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
+ int chn, unsigned int budget)
+{
+ struct cppi5_host_desc_t *desc_tx;
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned int total_bytes = 0;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+ void **swdata;
+
+ tx_chn = &common->tx_chns[chn];
+
+ while (true) {
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ if (desc_dma & 0x1) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
+
+ ndev = skb->dev;
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* Check whether the queue is stopped due to stalled tx dma,
+ * if the queue is stopped then wake the queue as
+ * we have free desc for tx
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+
+ __netif_tx_unlock(netif_txq);
+ }
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
+ int num_tx;
+
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id,
+ budget);
+ num_tx = min(num_tx, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ enable_irq(tx_chn->irq);
+ }
+
+ return num_tx;
+}
+
+static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
+{
+ struct am65_cpsw_common *common = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&common->napi_rx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ dma_addr_t desc_dma, buf_dma;
+ int ret, q_idx, i;
+ void **swdata;
+ u32 *psdata;
+ u32 pkt_len;
+
+ /* padding enabled in hw */
+ pkt_len = skb_headlen(skb);
+
+ q_idx = skb_get_queue_mapping(skb);
+ dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
+
+ tx_chn = &common->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb buffer\n");
+ ndev->stats.tx_errors++;
+ goto err_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ dev_dbg(dev, "Failed to allocate descriptor\n");
+ dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ goto busy_stop_q;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
+ cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *(swdata) = skb;
+ psdata = cppi5_hdesc_get_psdata(first_desc);
+
+ /* HW csum offload if enabled */
+ psdata[2] = 0;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ unsigned int cs_start, cs_offset;
+
+ cs_start = skb_transport_offset(skb);
+ cs_offset = cs_start + skb->csum_offset;
+ /* HW numerates bytes starting from 1 */
+ psdata[2] = ((cs_offset + 1) << 24) |
+ ((cs_start + 1) << 16) | (skb->len - cs_start);
+ dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
+ }
+
+ if (!skb_is_nonlinear(skb))
+ goto done_tx;
+
+ dev_dbg(dev, "fragmented SKB\n");
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ dev_err(dev, "Failed to allocate descriptor\n");
+ goto busy_free_descs;
+ }
+
+ buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ndev->stats.tx_errors++;
+ goto err_free_descs;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON(pkt_len != skb->len);
+
+done_tx:
+ skb_tx_timestamp(skb);
+
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ dev_err(dev, "can't push desc %d\n", ret);
+ /* inform bql */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ goto err_free_descs;
+ }
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+ dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
+
+ /* re-check for smp */
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS) {
+ netif_tx_wake_queue(netif_txq);
+ dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
+ }
+ }
+
+ return NETDEV_TX_OK;
+
+err_free_descs:
+ am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+err_free_skb:
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+busy_free_descs:
+ am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+busy_stop_q:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+
+static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
+ void *addr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct sockaddr *sockaddr = (struct sockaddr *)addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
+ HOST_PORT_NUM, 0, 0);
+ cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
+ HOST_PORT_NUM, ALE_SECURE, 0);
+
+ am65_cpsw_port_set_sl_mac(port, addr);
+ eth_commit_mac_addr_change(ndev, sockaddr);
+
+ pm_runtime_put(common->dev);
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!port->slave.phy)
+ return -EOPNOTSUPP;
+
+ return phy_mii_ioctl(port->slave.phy, req, cmd);
+}
+
+static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct am65_cpsw_ndev_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ netdev_features_t changes = features ^ ndev->features;
+ struct am65_cpsw_host *host_p;
+
+ host_p = am65_common_get_host(common);
+
+ if (changes & NETIF_F_HW_CSUM) {
+ bool enable = !!(features & NETIF_F_HW_CSUM);
+
+ dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n",
+ enable ? "ON" : "OFF");
+ if (enable)
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
+ host_p->port_base + AM65_CPSW_P0_REG_CTL);
+ else
+ writel(0,
+ host_p->port_base + AM65_CPSW_P0_REG_CTL);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
+ .ndo_open = am65_cpsw_nuss_ndo_slave_open,
+ .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
+ .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
+ .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode,
+ .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address,
+ .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
+ .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
+ .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
+ .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
+ .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
+};
+
+static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *common = port->common;
+
+ if (!port->disabled)
+ return;
+
+ common->disabled_ports_mask |= BIT(port->port_id);
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_reset(port->slave.mac_sl, 100);
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+}
+
+static void am65_cpsw_nuss_free_tx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ int i;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ int i;
+
+ devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ if (tx_chn->irq)
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+
+ netif_napi_del(&tx_chn->napi_tx);
+
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
+{
+ u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
+ struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
+ struct device *dev = common->dev;
+ struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0
+ };
+ u32 hdesc_size;
+ int i, ret = 0;
+
+ hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
+ AM65_CPSW_NAV_SW_DATA_SIZE);
+
+ tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+ tx_cfg.tx_cfg.size = max_desc_num;
+ tx_cfg.txcq_cfg.size = max_desc_num;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ snprintf(tx_chn->tx_chn_name,
+ sizeof(tx_chn->tx_chn_name), "tx%d", i);
+
+ tx_chn->common = common;
+ tx_chn->id = i;
+ tx_chn->descs_num = max_desc_num;
+ tx_chn->desc_pool =
+ k3_cppi_desc_pool_create_name(dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev,
+ tx_chn->tx_chn_name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ dev_err(dev, "Failed to request tx dma channel %d\n",
+ ret);
+ goto err;
+ }
+
+ tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (tx_chn->irq <= 0) {
+ dev_err(dev, "Failed to get tx dma irq %d\n",
+ tx_chn->irq);
+ goto err;
+ }
+
+ snprintf(tx_chn->tx_chn_name,
+ sizeof(tx_chn->tx_chn_name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+err:
+ i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+ if (i) {
+ dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
+ return i;
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_nuss_free_rx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ struct am65_cpsw_rx_chn *rx_chn;
+
+ rx_chn = &common->rx_chns;
+
+ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+
+ if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+}
+
+static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
+ u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
+ struct device *dev = common->dev;
+ u32 hdesc_size;
+ u32 fdqring_id;
+ int i, ret = 0;
+
+ hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
+ AM65_CPSW_NAV_SW_DATA_SIZE);
+
+ rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_base = common->rx_flow_id_base;
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ dev_err(dev, "Failed to request rx dma channel %d\n", ret);
+ goto err;
+ }
+
+ common->rx_flow_id_base =
+ k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
+ goto err;
+ }
+ if (!i)
+ fdqring_id =
+ k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+
+ rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+
+ if (rx_chn->irq <= 0) {
+ dev_err(dev, "Failed to get rx dma irq %d\n",
+ rx_chn->irq);
+ ret = -ENXIO;
+ goto err;
+ }
+ }
+
+err:
+ i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+ if (i) {
+ dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
+ return i;
+ }
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+
+ host_p->common = common;
+ host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
+ host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
+
+ return 0;
+}
+
+static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
+ int slave, u8 *mac_addr)
+{
+ u32 mac_lo, mac_hi, offset;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
+ &offset);
+ if (ret)
+ return ret;
+
+ regmap_read(syscon, offset, &mac_lo);
+ regmap_read(syscon, offset + 4, &mac_hi);
+
+ mac_addr[0] = (mac_hi >> 8) & 0xff;
+ mac_addr[1] = mac_hi & 0xff;
+ mac_addr[2] = (mac_lo >> 24) & 0xff;
+ mac_addr[3] = (mac_lo >> 16) & 0xff;
+ mac_addr[4] = (mac_lo >> 8) & 0xff;
+ mac_addr[5] = mac_lo & 0xff;
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
+{
+ struct device_node *node, *port_np;
+ struct device *dev = common->dev;
+ int ret;
+
+ node = of_get_child_by_name(dev->of_node, "ethernet-ports");
+ if (!node)
+ return -ENOENT;
+
+ for_each_child_of_node(node, port_np) {
+ struct am65_cpsw_port *port;
+ const void *mac_addr;
+ u32 port_id;
+
+ /* it is not a slave port node, continue */
+ if (strcmp(port_np->name, "port"))
+ continue;
+
+ ret = of_property_read_u32(port_np, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ port_np, ret);
+ return ret;
+ }
+
+ if (!port_id || port_id > common->port_num) {
+ dev_err(dev, "%pOF has invalid port_id %u %s\n",
+ port_np, port_id, port_np->name);
+ return -EINVAL;
+ }
+
+ port = am65_common_get_port(common, port_id);
+ port->port_id = port_id;
+ port->common = common;
+ port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
+ AM65_CPSW_NU_PORTS_OFFSET * (port_id);
+ port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
+ (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
+ port->name = of_get_property(port_np, "label", NULL);
+
+ port->disabled = !of_device_is_available(port_np);
+ if (port->disabled)
+ continue;
+
+ port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
+ if (IS_ERR(port->slave.ifphy)) {
+ ret = PTR_ERR(port->slave.ifphy);
+ dev_err(dev, "%pOF error retrieving port phy: %d\n",
+ port_np, ret);
+ return ret;
+ }
+
+ port->slave.mac_only =
+ of_property_read_bool(port_np, "ti,mac-only");
+
+ /* get phy/link info */
+ if (of_phy_is_fixed_link(port_np)) {
+ ret = of_phy_register_fixed_link(port_np);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
+ port_np, ret);
+ return ret;
+ }
+ port->slave.phy_node = of_node_get(port_np);
+ } else {
+ port->slave.phy_node =
+ of_parse_phandle(port_np, "phy-handle", 0);
+ }
+
+ if (!port->slave.phy_node) {
+ dev_err(dev,
+ "slave[%d] no phy found\n", port_id);
+ return -ENODEV;
+ }
+
+ ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF read phy-mode err %d\n",
+ port_np, ret);
+ return ret;
+ }
+
+ port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
+ if (IS_ERR(port->slave.mac_sl))
+ return PTR_ERR(port->slave.mac_sl);
+
+ mac_addr = of_get_mac_address(port_np);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(port->slave.mac_addr, mac_addr);
+ } else if (am65_cpsw_am654_get_efuse_macid(port_np,
+ port->port_id,
+ port->slave.mac_addr) ||
+ !is_valid_ether_addr(port->slave.mac_addr)) {
+ random_ether_addr(port->slave.mac_addr);
+ dev_err(dev, "Use random MAC address\n");
+ }
+ }
+ of_node_put(node);
+
+ return 0;
+}
+
+static void am65_cpsw_pcpu_stats_free(void *data)
+{
+ struct am65_cpsw_ndev_stats __percpu *stats = data;
+
+ free_percpu(stats);
+}
+
+static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ int ret;
+
+ port = am65_common_get_port(common, 1);
+
+ /* alloc netdev */
+ port->ndev = devm_alloc_etherdev_mqs(common->dev,
+ sizeof(struct am65_cpsw_ndev_priv),
+ AM65_CPSW_MAX_TX_QUEUES,
+ AM65_CPSW_MAX_RX_QUEUES);
+ if (!port->ndev) {
+ dev_err(dev, "error allocating slave net_device %u\n",
+ port->port_id);
+ return -ENOMEM;
+ }
+
+ ndev_priv = netdev_priv(port->ndev);
+ ndev_priv->port = port;
+ ndev_priv->msg_enable = AM65_CPSW_DEBUG;
+ SET_NETDEV_DEV(port->ndev, dev);
+
+ ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr);
+
+ port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
+ port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
+ port->ndev->hw_features = NETIF_F_SG |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM;
+ port->ndev->features = port->ndev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ port->ndev->vlan_features |= NETIF_F_SG;
+ port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g;
+ port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
+
+ /* Disable TX checksum offload by default due to HW bug */
+ if (common->pdata->quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
+ port->ndev->features &= ~NETIF_F_HW_CSUM;
+
+ ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
+ if (!ndev_priv->stats)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
+ ndev_priv->stats);
+ if (ret) {
+ dev_err(dev, "Failed to add percpu stat free action %d\n", ret);
+ return ret;
+ }
+
+ netif_napi_add(port->ndev, &common->napi_rx,
+ am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+
+ common->pf_p0_rx_ptype_rrobin = false;
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ int i, ret = 0;
+
+ port = am65_common_get_port(common, 1);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ netif_tx_napi_add(port->ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
+
+ ret = devm_request_irq(dev, tx_chn->irq,
+ am65_cpsw_nuss_tx_irq,
+ IRQF_TRIGGER_HIGH,
+ tx_chn->tx_chn_name, tx_chn);
+ if (ret) {
+ dev_err(dev, "failure requesting tx%u irq %u, %d\n",
+ tx_chn->id, tx_chn->irq, ret);
+ goto err;
+ }
+ }
+
+err:
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ int ret = 0;
+
+ port = am65_common_get_port(common, 1);
+ ret = am65_cpsw_nuss_ndev_add_napi_2g(common);
+ if (ret)
+ goto err;
+
+ ret = devm_request_irq(dev, common->rx_chns.irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "failure requesting rx irq %u, %d\n",
+ common->rx_chns.irq, ret);
+ goto err;
+ }
+
+ ret = register_netdev(port->ndev);
+ if (ret)
+ dev_err(dev, "error registering slave net device %d\n", ret);
+
+ /* can't auto unregister ndev using devm_add_action() due to
+ * devres release sequence in DD core for DMA
+ */
+err:
+ return ret;
+}
+
+int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
+{
+ int ret;
+
+ common->tx_ch_num = num_tx;
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_nuss_ndev_add_napi_2g(common);
+}
+
+static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->ndev)
+ unregister_netdev(port->ndev);
+ }
+}
+
+static const struct am65_cpsw_pdata am65x_sr1_0 = {
+ .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+};
+
+static const struct am65_cpsw_pdata j721e_sr1_0 = {
+ .quirks = 0,
+};
+
+static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
+ { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0 },
+ { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_sr1_0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
+
+static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+{
+ struct cpsw_ale_params ale_params;
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct am65_cpsw_common *common;
+ struct device_node *node;
+ struct resource *res;
+ int ret, i;
+
+ common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+ common->dev = dev;
+
+ of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
+ if (!of_id)
+ return -EINVAL;
+ common->pdata = of_id->data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
+ common->ss_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(common->ss_base))
+ return PTR_ERR(common->ss_base);
+ common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
+
+ node = of_get_child_by_name(dev->of_node, "ethernet-ports");
+ if (!node)
+ return -ENOENT;
+ common->port_num = of_get_child_count(node);
+ if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
+ return -ENOENT;
+ of_node_put(node);
+
+ if (common->port_num != 1)
+ return -EOPNOTSUPP;
+
+ common->rx_flow_id_base = -1;
+ init_completion(&common->tdown_complete);
+ common->tx_ch_num = 1;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "error setting dma mask: %d\n", ret);
+ return ret;
+ }
+
+ common->ports = devm_kcalloc(dev, common->port_num,
+ sizeof(*common->ports),
+ GFP_KERNEL);
+ if (!common->ports)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ dev_warn(dev, "populating child nodes err:%d\n", ret);
+
+ am65_cpsw_nuss_get_ver(common);
+
+ /* init tx channels */
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ goto err_of_clear;
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_of_clear;
+
+ /* init common data */
+ ale_params.dev = dev;
+ ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
+ ale_params.ale_entries = 0;
+ ale_params.ale_ports = common->port_num + 1;
+ ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
+ ale_params.nu_switch_ale = true;
+
+ common->ale = cpsw_ale_create(&ale_params);
+ if (IS_ERR(common->ale)) {
+ dev_err(dev, "error initializing ale engine\n");
+ ret = PTR_ERR(common->ale);
+ goto err_of_clear;
+ }
+
+ /* init ports */
+ for (i = 0; i < common->port_num; i++)
+ am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
+
+ dev_set_drvdata(dev, common);
+
+ ret = am65_cpsw_nuss_init_ndev_2g(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_ndev_reg_2g(common);
+ if (ret)
+ goto err_of_clear;
+
+ pm_runtime_put(dev);
+ return 0;
+
+err_of_clear:
+ of_platform_depopulate(dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct am65_cpsw_common *common;
+ int ret;
+
+ common = dev_get_drvdata(dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
+
+ /* must unregister ndevs here because DD release_driver routine calls
+ * dma_deconfigure(dev) before devres_release_all(dev)
+ */
+ am65_cpsw_nuss_cleanup_ndev(common);
+
+ of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver am65_cpsw_nuss_driver = {
+ .driver = {
+ .name = AM65_CPSW_DRV_NAME,
+ .of_match_table = am65_cpsw_nuss_of_mtable,
+ },
+ .probe = am65_cpsw_nuss_probe,
+ .remove = am65_cpsw_nuss_remove,
+};
+
+module_platform_driver(am65_cpsw_nuss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
new file mode 100644
index 000000000000..41ae5b4c7931
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#ifndef AM65_CPSW_NUSS_H_
+#define AM65_CPSW_NUSS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#define HOST_PORT_NUM 0
+
+#define AM65_CPSW_MAX_TX_QUEUES 8
+#define AM65_CPSW_MAX_RX_QUEUES 1
+#define AM65_CPSW_MAX_RX_FLOWS 1
+
+struct am65_cpsw_slave_data {
+ bool mac_only;
+ struct cpsw_sl *mac_sl;
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ phy_interface_t phy_if;
+ struct phy *ifphy;
+ bool rx_pause;
+ bool tx_pause;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct am65_cpsw_port {
+ struct am65_cpsw_common *common;
+ struct net_device *ndev;
+ const char *name;
+ u32 port_id;
+ void __iomem *port_base;
+ void __iomem *stat_base;
+ bool disabled;
+ struct am65_cpsw_slave_data slave;
+};
+
+struct am65_cpsw_host {
+ struct am65_cpsw_common *common;
+ void __iomem *port_base;
+ void __iomem *stat_base;
+};
+
+struct am65_cpsw_tx_chn {
+ struct napi_struct napi_tx;
+ struct am65_cpsw_common *common;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int irq;
+ u32 id;
+ u32 descs_num;
+ char tx_chn_name[128];
+};
+
+struct am65_cpsw_rx_chn {
+ struct device *dev;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_rx_channel *rx_chn;
+ u32 descs_num;
+ int irq;
+};
+
+#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
+
+struct am65_cpsw_pdata {
+ u32 quirks;
+};
+
+struct am65_cpsw_common {
+ struct device *dev;
+ const struct am65_cpsw_pdata *pdata;
+
+ void __iomem *ss_base;
+ void __iomem *cpsw_base;
+
+ u32 port_num;
+ struct am65_cpsw_host host;
+ struct am65_cpsw_port *ports;
+ u32 disabled_ports_mask;
+
+ int usage_count; /* number of opened ports */
+ struct cpsw_ale *ale;
+ int tx_ch_num;
+ u32 rx_flow_id_base;
+
+ struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
+ struct completion tdown_complete;
+ atomic_t tdown_cnt;
+
+ struct am65_cpsw_rx_chn rx_chns;
+ struct napi_struct napi_rx;
+
+ u32 nuss_ver;
+ u32 cpsw_ver;
+
+ bool pf_p0_rx_ptype_rrobin;
+};
+
+struct am65_cpsw_ndev_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+struct am65_cpsw_ndev_priv {
+ u32 msg_enable;
+ struct am65_cpsw_port *port;
+ struct am65_cpsw_ndev_stats __percpu *stats;
+};
+
+#define am65_ndev_to_priv(ndev) \
+ ((struct am65_cpsw_ndev_priv *)netdev_priv(ndev))
+#define am65_ndev_to_port(ndev) (am65_ndev_to_priv(ndev)->port)
+#define am65_ndev_to_common(ndev) (am65_ndev_to_port(ndev)->common)
+#define am65_ndev_to_slave(ndev) (&am65_ndev_to_port(ndev)->slave)
+
+#define am65_common_get_host(common) (&(common)->host)
+#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
+
+#define am65_cpsw_napi_to_common(pnapi) \
+ container_of(pnapi, struct am65_cpsw_common, napi_rx)
+#define am65_cpsw_napi_to_tx_chn(pnapi) \
+ container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
+
+#define AM65_CPSW_DRV_NAME "am65-cpsw-nuss"
+
+#define AM65_CPSW_IS_CPSW2G(common) ((common)->port_num == 1)
+
+extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
+
+void am65_cpsw_nuss_adjust_link(struct net_device *ndev);
+void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
+void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
+int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+
+#endif /* AM65_CPSW_NUSS_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 6ae4a72e6f43..ffeb8633e530 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1211,6 +1211,7 @@ static int cpsw_set_channels(struct net_device *ndev,
}
static const struct ethtool_ops cpsw_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
@@ -1752,11 +1753,15 @@ static int cpsw_suspend(struct device *dev)
struct cpsw_common *cpsw = dev_get_drvdata(dev);
int i;
+ rtnl_lock();
+
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
if (netif_running(cpsw->slaves[i].ndev))
cpsw_ndo_stop(cpsw->slaves[i].ndev);
+ rtnl_unlock();
+
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ecdbde539eb7..8dc6be11b2ff 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -44,6 +44,8 @@
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
+
#define ALE_TABLE_WRITE BIT(31)
#define ALE_TYPE_FREE 0
@@ -122,6 +124,8 @@ DEFINE_ALE_FIELD(mcast, 40, 1)
DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3)
DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3)
+#define NU_VLAN_UNREG_MCAST_IDX 1
+
/* The MAC address field in the ALE entry cannot be macroized as above */
static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
{
@@ -455,6 +459,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
ale->vlan_field_bits);
} else {
+ cpsw_ale_set_vlan_unreg_mcast_idx(ale_entry,
+ NU_VLAN_UNREG_MCAST_IDX);
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
@@ -775,6 +781,22 @@ static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_PORT_MACONLY] = {
+ .name = "mac_only_port_mode",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 11,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_MACONLY_CAF] = {
+ .name = "mac_only_port_caf",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 13,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_PORT_MCAST_LIMIT] = {
.name = "mcast_limit",
.offset = ALE_PORTCTL,
@@ -823,6 +845,22 @@ static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 6,
},
+ [ALE_DEFAULT_THREAD_ID] = {
+ .name = "default_thread_id",
+ .offset = AM65_CPSW_ALE_THREAD_DEF_REG,
+ .port_offset = 0,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_DEFAULT_THREAD_ENABLE] = {
+ .name = "default_thread_id_enable",
+ .offset = AM65_CPSW_ALE_THREAD_DEF_REG,
+ .port_offset = 0,
+ .shift = 15,
+ .port_shift = 0,
+ .bits = 1,
+ },
};
int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
@@ -917,7 +955,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ale->p0_untag_vid_mask =
devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 70d0955c2652..6a3cb6898728 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -62,8 +62,12 @@ enum cpsw_ale_control {
ALE_PORT_UNKNOWN_MCAST_FLOOD,
ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
ALE_PORT_UNTAGGED_EGRESS,
+ ALE_PORT_MACONLY,
+ ALE_PORT_MACONLY_CAF,
ALE_PORT_BCAST_LIMIT,
ALE_PORT_MCAST_LIMIT,
+ ALE_DEFAULT_THREAD_ID,
+ ALE_DEFAULT_THREAD_ENABLE,
ALE_NUM_CONTROLS,
};
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 71215db7934b..9209e613257d 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1175,6 +1175,7 @@ static int cpsw_set_channels(struct net_device *ndev,
}
static const struct ethtool_ops cpsw_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 97a058ca60ac..d0b6c418a870 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -490,9 +490,9 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
cpsw->ale = cpsw_ale_create(&ale_params);
- if (!cpsw->ale) {
+ if (IS_ERR(cpsw->ale)) {
dev_err(dev, "error initializing ale engine\n");
- return -ENODEV;
+ return PTR_ERR(cpsw->ale);
}
dma_params.dev = dev;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 75d4e16c692b..de282531f68b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -481,6 +481,7 @@ static int emac_set_coalesce(struct net_device *ndev,
* Ethtool support for EMAC adapter
*/
static const struct ethtool_ops ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = emac_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = emac_get_coalesce,
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
new file mode 100644
index 000000000000..ad7cfc1316ce
--- /dev/null
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 CPPI5 descriptors pool API
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+
+#include "k3-cppi-desc-pool.h"
+
+struct k3_cppi_desc_pool {
+ struct device *dev;
+ dma_addr_t dma_addr;
+ void *cpumem; /* dma_alloc map */
+ size_t desc_size;
+ size_t mem_size;
+ size_t num_desc;
+ struct gen_pool *gen_pool;
+};
+
+void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
+{
+ if (!pool)
+ return;
+
+ WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
+ "k3_knav_desc_pool size %zu != avail %zu",
+ gen_pool_size(pool->gen_pool),
+ gen_pool_avail(pool->gen_pool));
+ if (pool->cpumem)
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
+ pool->dma_addr);
+
+ gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+}
+
+struct k3_cppi_desc_pool *
+k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
+ size_t desc_size,
+ const char *name)
+{
+ struct k3_cppi_desc_pool *pool;
+ const char *pool_name = NULL;
+ int ret = -ENOMEM;
+
+ pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(ret);
+
+ pool->dev = dev;
+ pool->desc_size = roundup_pow_of_two(desc_size);
+ pool->num_desc = size;
+ pool->mem_size = pool->num_desc * pool->desc_size;
+
+ pool_name = kstrdup_const(name ? name : dev_name(pool->dev),
+ GFP_KERNEL);
+ if (!pool_name)
+ return ERR_PTR(-ENOMEM);
+
+ pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
+ if (IS_ERR(pool->gen_pool)) {
+ ret = PTR_ERR(pool->gen_pool);
+ dev_err(pool->dev, "pool create failed %d\n", ret);
+ kfree_const(pool_name);
+ goto gen_pool_create_fail;
+ }
+
+ pool->gen_pool->name = pool_name;
+
+ pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size,
+ &pool->dma_addr, GFP_KERNEL);
+
+ if (!pool->cpumem)
+ goto dma_alloc_fail;
+
+ ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->cpumem,
+ (phys_addr_t)pool->dma_addr, pool->mem_size,
+ -1);
+ if (ret < 0) {
+ dev_err(pool->dev, "pool add failed %d\n", ret);
+ goto gen_pool_add_virt_fail;
+ }
+
+ return pool;
+
+gen_pool_add_virt_fail:
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
+ pool->dma_addr);
+dma_alloc_fail:
+ gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+gen_pool_create_fail:
+ devm_kfree(pool->dev, pool);
+ return ERR_PTR(ret);
+}
+
+dma_addr_t k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool,
+ void *addr)
+{
+ return addr ? pool->dma_addr + (addr - pool->cpumem) : 0;
+}
+
+void *k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->cpumem + (dma - pool->dma_addr) : NULL;
+}
+
+void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool)
+{
+ return (void *)gen_pool_alloc(pool->gen_pool, pool->desc_size);
+}
+
+void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr)
+{
+ gen_pool_free(pool->gen_pool, (unsigned long)addr, pool->desc_size);
+}
+
+size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
+{
+ return gen_pool_avail(pool->gen_pool) / pool->desc_size;
+}
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
new file mode 100644
index 000000000000..a7e3fa5e7b62
--- /dev/null
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* TI K3 CPPI5 descriptors pool
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPPI_DESC_POOL_H_
+#define K3_CPPI_DESC_POOL_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct k3_cppi_desc_pool;
+
+void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool);
+struct k3_cppi_desc_pool *
+k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
+ size_t desc_size,
+ const char *name);
+#define k3_cppi_desc_pool_create(dev, size, desc_size) \
+ k3_cppi_desc_pool_create_name(dev, size, desc_size, NULL)
+dma_addr_t
+k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool, void *addr);
+void *
+k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma);
+void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool);
+void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr);
+size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool);
+
+#endif /* K3_CPPI_DESC_POOL_H_ */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index fb36115e9c51..fdbae734acce 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3704,9 +3704,9 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ale_params.nu_switch_ale = true;
}
gbe_dev->ale = cpsw_ale_create(&ale_params);
- if (!gbe_dev->ale) {
+ if (IS_ERR(gbe_dev->ale)) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
- ret = -ENODEV;
+ ret = PTR_ERR(gbe_dev->ale);
goto free_sec_ports;
} else {
dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 805903dbddcc..68f324ed4eaf 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -308,7 +308,7 @@ struct gelic_port {
struct gelic_card *card;
struct net_device *netdev;
enum gelic_port_type type;
- long priv[0]; /* long for alignment */
+ long priv[]; /* long for alignment */
};
static inline struct gelic_card *port_to_card(struct gelic_port *p)
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index 4041d946b649..1f203d1ae8db 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -158,7 +158,7 @@ struct gelic_eurus_scan_info {
__be32 reserved2;
__be32 reserved3;
__be32 reserved4;
- u8 elements[0]; /* ie */
+ u8 elements[]; /* ie */
} __packed;
/* the hypervisor returns bbs up to 16 */
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index c0c68cbc898c..05b1a0736835 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -470,7 +470,7 @@ struct spider_net_card {
struct spider_net_extra_stats spider_stats;
/* Must be last item in struct */
- struct spider_net_descr darray[0];
+ struct spider_net_descr darray[];
};
#endif
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 3fd43d30b20d..6bcda20ed7e7 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -367,7 +367,7 @@ struct TxFD {
struct RxFD {
struct FDesc fd;
- struct BDesc bd[0]; /* variable length */
+ struct BDesc bd[]; /* variable length */
};
struct FrFD {
@@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev)
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
}
- linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_andnot(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
lp->link = 0;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4b556b74541a..713dbc04b25b 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3648,6 +3648,8 @@ static void velocity_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops velocity_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = velocity_get_drvinfo,
.get_wol = velocity_ethtool_get_wol,
.set_wol = velocity_ethtool_set_wol,
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 6304ebd8b5c6..0810af8193cb 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -32,7 +32,6 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
- depends on PPC || MICROBLAZE || X86 || COMPILE_TEST
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 53fb8141f1a6..4a73127e10a6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -369,18 +369,20 @@ struct temac_local {
/* Buffer descriptors */
struct cdmac_bd *tx_bd_v;
dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
struct cdmac_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
int tx_bd_ci;
- int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
int rx_bd_tail;
/* DMA channel control setup */
- u32 tx_chnl_ctrl;
- u32 rx_chnl_ctrl;
+ u8 coalesce_count_tx;
+ u8 coalesce_delay_tx;
u8 coalesce_count_rx;
+ u8 coalesce_delay_rx;
struct delayed_work restart_work;
};
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9461acec6f70..3e313e71ae36 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -58,8 +58,11 @@
#include "ll_temac.h"
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 64
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* ---------------------------------------------------------------------
* Low level register access functions
@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
else {
@@ -312,12 +315,12 @@ static void temac_dma_bd_release(struct net_device *ndev)
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v, lp->rx_bd_p);
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v, lp->rx_bd_p);
if (lp->tx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v, lp->tx_bd_p);
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v, lp->tx_bd_p);
}
/**
@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev)
dma_addr_t skb_dma_addr;
int i;
- lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
- GFP_KERNEL);
+ lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
+ sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb)
goto out;
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
- + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
+ + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
- + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
+ + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
@@ -376,21 +379,22 @@ static int temac_dma_bd_init(struct net_device *ndev)
}
/* Configure DMA channel (irq setup) */
- lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
+ lp->dma_out(lp, TX_CHNL_CTRL,
+ lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
- lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
+ lp->dma_out(lp, RX_CHNL_CTRL,
+ lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
CHNL_CTRL_IRQ_IOE |
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
/* Init descriptor indexes */
lp->tx_bd_ci = 0;
- lp->tx_bd_next = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- lp->rx_bd_tail = RX_BD_NUM - 1;
+ lp->rx_bd_tail = lp->rx_bd_num - 1;
/* Enable RX DMA transfers */
wmb();
@@ -785,7 +789,7 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++;
- if (lp->tx_bd_ci >= TX_BD_NUM)
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
@@ -811,7 +815,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return NETDEV_TX_BUSY;
tail++;
- if (tail >= TX_BD_NUM)
+ if (tail >= lp->tx_bd_num)
tail = 0;
cur_p = &lp->tx_bd_v[tail];
@@ -826,14 +830,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
- dma_addr_t start_p, tail_p, skb_dma_addr;
+ dma_addr_t tail_p, skb_dma_addr;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0];
- start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
@@ -876,7 +879,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= TX_BD_NUM)
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -886,7 +889,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) {
--frag;
@@ -895,7 +898,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_size(frag),
DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
}
dma_unmap_single(ndev->dev.parent,
@@ -914,7 +917,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
- if (lp->tx_bd_tail >= TX_BD_NUM)
+ if (lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
skb_tx_timestamp(skb);
@@ -934,7 +937,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp)
return 0;
available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
if (available <= 0)
- available += RX_BD_NUM;
+ available += lp->rx_bd_num;
return available;
}
@@ -1003,7 +1006,7 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_bytes += length;
rx_bd = lp->rx_bd_ci;
- if (++lp->rx_bd_ci >= RX_BD_NUM)
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail);
@@ -1034,7 +1037,7 @@ static void ll_temac_recv(struct net_device *ndev)
dma_addr_t skb_dma_addr;
rx_bd = lp->rx_bd_tail + 1;
- if (rx_bd >= RX_BD_NUM)
+ if (rx_bd >= lp->rx_bd_num)
rx_bd = 0;
bd = &lp->rx_bd_v[rx_bd];
@@ -1250,13 +1253,96 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
-/* ethtool support */
+/* ---------------------------------------------------------------------
+ * ethtool support
+ */
+
+static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->rx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
+}
+
+static int ll_temac_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
+ ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
+ return 0;
+}
+
+static int ll_temac_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
+ return -EFAULT;
+ }
+
+ if (ec->rx_max_coalesced_frames)
+ lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
+ if (ec->tx_max_coalesced_frames)
+ lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
+ /* With typical LocalLink clock speed of 200 MHz and
+ * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
+ */
+ if (ec->rx_coalesce_usecs)
+ lp->coalesce_delay_rx =
+ min(255U, (ec->rx_coalesce_usecs * 100) / 512);
+ if (ec->tx_coalesce_usecs)
+ lp->coalesce_delay_tx =
+ min(255U, (ec->tx_coalesce_usecs * 100) / 512);
+
+ return 0;
+}
+
static const struct ethtool_ops temac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ringparam = ll_temac_ethtools_get_ringparam,
+ .set_ringparam = ll_temac_ethtools_set_ringparam,
+ .get_coalesce = ll_temac_ethtools_get_coalesce,
+ .set_coalesce = ll_temac_ethtools_set_coalesce,
};
static int temac_probe(struct platform_device *pdev)
@@ -1300,6 +1386,8 @@ static int temac_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
spin_lock_init(&lp->rx_lock);
INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
@@ -1364,6 +1452,14 @@ static int temac_probe(struct platform_device *pdev)
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
+ /* Defaults for IRQ delay/coalescing setup. These are
+ * configuration values, so does not belong in device-tree.
+ */
+ lp->coalesce_delay_tx = 0x10;
+ lp->coalesce_count_tx = 0x22;
+ lp->coalesce_delay_rx = 0xff;
+ lp->coalesce_count_rx = 0x07;
+
/* Setup LocalLink DMA */
if (temac_np) {
/* Find the DMA node, map the DMA registers, and
@@ -1402,14 +1498,6 @@ static int temac_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
- /* Use defaults for IRQ delay/coalescing setup. These
- * are configuration values, so does not belong in
- * device-tree.
- */
- lp->tx_chnl_ctrl = 0x10220000;
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
-
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
} else if (pdata) {
@@ -1435,18 +1523,13 @@ static int temac_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq(pdev, 1);
/* IRQ delay/coalescing setup */
- if (pdata->tx_irq_timeout || pdata->tx_irq_count)
- lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
- (pdata->tx_irq_count << 16);
- else
- lp->tx_chnl_ctrl = 0x10220000;
+ if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
+ lp->coalesce_delay_tx = pdata->tx_irq_timeout;
+ lp->coalesce_count_tx = pdata->tx_irq_count;
+ }
if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
- lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
- (pdata->rx_irq_count << 16);
+ lp->coalesce_delay_rx = pdata->rx_irq_timeout;
lp->coalesce_count_rx = pdata->rx_irq_count;
- } else {
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
}
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 2dacfc85b3ba..fbaf3c987d9c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -161,17 +161,11 @@
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
+#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
-#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-/* MII Mgmt Interrupt Pending register offset */
-#define XAE_MDIO_MIP_OFFSET 0x00000620
-/* MII Management Interrupt Enable register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640
-/* MII Management Interrupt Clear register offset. */
-#define XAE_MDIO_MIC_OFFSET 0x00000660
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
@@ -335,6 +329,7 @@
#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
+#define XAE_FEATURE_DMA_64BIT (1 << 4)
#define XAE_NO_CSUM_OFFLOAD 0
@@ -347,9 +342,9 @@
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
- * @reserved1: Reserved and not used
+ * @next_msb: MM2S/S2MM Next Descriptor Pointer (high 32 bits)
* @phys: MM2S/S2MM Buffer Address
- * @reserved2: Reserved and not used
+ * @phys_msb: MM2S/S2MM Buffer Address (high 32 bits)
* @reserved3: Reserved and not used
* @reserved4: Reserved and not used
* @cntrl: MM2S/S2MM Control value
@@ -362,9 +357,9 @@
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
- u32 reserved1;
+ u32 next_msb; /* high 32 bits for IP >= v7.1, reserved on older IP */
u32 phys;
- u32 reserved2;
+ u32 phys_msb; /* for IP >= v7.1, reserved for older IP */
u32 reserved3;
u32 reserved4;
u32 cntrl;
@@ -435,7 +430,7 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
- struct tasklet_struct dma_err_tasklet;
+ struct work_struct dma_err_task;
int tx_irq;
int rx_irq;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 20746b801959..fa5dc2993520 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -147,6 +147,34 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
iowrite32(value, lp->dma_regs + reg);
}
+static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
+}
+
+static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
+ struct axidma_bd *desc)
+{
+ desc->phys = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ desc->phys_msb = upper_32_bits(addr);
+}
+
+static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
+ struct axidma_bd *desc)
+{
+ dma_addr_t ret = desc->phys;
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
+
+ return ret;
+}
+
/**
* axienet_dma_bd_release - Release buffer descriptor rings
* @ndev: Pointer to the net_device structure
@@ -160,24 +188,41 @@ static void axienet_dma_bd_release(struct net_device *ndev)
int i;
struct axienet_local *lp = netdev_priv(ndev);
+ /* If we end up here, tx_bd_v must have been DMA allocated. */
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v,
+ lp->tx_bd_p);
+
+ if (!lp->rx_bd_v)
+ return;
+
for (i = 0; i < lp->rx_bd_num; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
+ dma_addr_t phys;
+
+ /* A NULL skb means this descriptor has not been initialised
+ * at all.
+ */
+ if (!lp->rx_bd_v[i].skb)
+ break;
+
dev_kfree_skb(lp->rx_bd_v[i].skb);
- }
- if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- lp->rx_bd_v,
- lp->rx_bd_p);
- }
- if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- lp->tx_bd_v,
- lp->tx_bd_p);
+ /* For each descriptor, we programmed cntrl with the (non-zero)
+ * descriptor size, after it had been successfully allocated.
+ * So a non-zero value in there means we need to unmap it.
+ */
+ if (lp->rx_bd_v[i].cntrl) {
+ phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
+ dma_unmap_single(ndev->dev.parent, phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ }
}
+
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v,
+ lp->rx_bd_p);
}
/**
@@ -207,7 +252,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
- goto out;
+ return -ENOMEM;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
@@ -216,25 +261,37 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
for (i = 0; i < lp->tx_bd_num; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % lp->tx_bd_num);
+ dma_addr_t addr = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+
+ lp->tx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
}
for (i = 0; i < lp->rx_bd_num; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % lp->rx_bd_num);
+ dma_addr_t addr;
+
+ addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+ lp->rx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
lp->rx_bd_v[i].skb = skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ addr = dma_map_single(ndev->dev.parent, skb->data,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out;
+ }
+ desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
+
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
@@ -267,18 +324,18 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -437,9 +494,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp)
+static int __axienet_device_reset(struct axienet_local *lp)
{
u32 timeout;
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
@@ -455,9 +513,11 @@ static void __axienet_device_reset(struct axienet_local *lp)
if (--timeout == 0) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
__func__);
- break;
+ return -ETIMEDOUT;
}
}
+
+ return 0;
}
/**
@@ -470,13 +530,17 @@ static void __axienet_device_reset(struct axienet_local *lp)
* areconnected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
+ * Returns 0 on success or a negative error number otherwise.
*/
-static void axienet_device_reset(struct net_device *ndev)
+static int axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
- __axienet_device_reset(lp);
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ return ret;
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -491,9 +555,11 @@ static void axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_JUMBO;
}
- if (axienet_dma_bd_init(ndev)) {
+ ret = axienet_dma_bd_init(ndev);
+ if (ret) {
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
+ return ret;
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -518,36 +584,54 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_setoptions(ndev, lp->options);
netif_trans_update(ndev);
+
+ return 0;
}
/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
- * Axi DMA Tx channel.
+ * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
* @ndev: Pointer to the net_device structure
+ * @first_bd: Index of first descriptor to clean up
+ * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
+ * @sizep: Pointer to a u32 filled with the total sum of all bytes
+ * in all cleaned-up descriptors. Ignored if NULL.
*
- * This function is invoked from the Axi DMA Tx isr to notify the completion
- * of transmit operation. It clears fields in the corresponding Tx BDs and
- * unmaps the corresponding buffer so that CPU can regain ownership of the
- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
- * required.
+ * Would either be called after a successful transmit operation, or after
+ * there was an error when setting up the chain.
+ * Returns the number of descriptors handled.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
+ int nr_bds, u32 *sizep)
{
- u32 size = 0;
- u32 packets = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- unsigned int status = 0;
-
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
- while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->skb)
+ int max_bds = nr_bds;
+ unsigned int status;
+ dma_addr_t phys;
+ int i;
+
+ if (max_bds == -1)
+ max_bds = lp->tx_bd_num;
+
+ for (i = 0; i < max_bds; i++) {
+ cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
+ status = cur_p->status;
+
+ /* If no number is given, clean up *all* descriptors that have
+ * been completed by the MAC.
+ */
+ if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ break;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);
- /*cur_p->phys = 0;*/
+
+ cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
@@ -555,15 +639,36 @@ static void axienet_start_xmit_done(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = NULL;
- size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
- packets++;
-
- if (++lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
+ if (sizep)
+ *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
+ return i;
+}
+
+/**
+ * axienet_start_xmit_done - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is invoked from the Axi DMA Tx isr to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static void axienet_start_xmit_done(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 packets = 0;
+ u32 size = 0;
+
+ packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
+
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci -= lp->tx_bd_num;
+
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
@@ -617,9 +722,10 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_start_off;
u32 csum_index_off;
skb_frag_t *frag;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, phys;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
+ u32 orig_tail_ptr = lp->tx_bd_tail;
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -655,19 +761,37 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
+ phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+ NULL);
+ lp->tx_bd_tail = orig_tail_ptr;
+
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_frag_size(frag);
}
@@ -676,7 +800,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
- axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
@@ -706,10 +830,12 @@ static void axienet_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ dma_addr_t phys;
+
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- lp->max_frm_size,
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
skb = cur_p->skb;
@@ -745,9 +871,17 @@ static void axienet_recv(struct net_device *ndev)
if (!new_skb)
return;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "RX DMA mapping error\n");
+ dev_kfree_skb(new_skb);
+ return;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
cur_p->skb = new_skb;
@@ -761,7 +895,7 @@ static void axienet_recv(struct net_device *ndev)
ndev->stats.rx_bytes += size;
if (tail_p)
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -791,7 +925,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
(lp->tx_bd_v[lp->tx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -806,7 +941,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
@@ -840,7 +975,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
(lp->rx_bd_v[lp->rx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -855,7 +991,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
@@ -891,7 +1027,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(unsigned long data);
+static void axienet_dma_err_handler(struct work_struct *work);
/**
* axienet_open - Driver open routine.
@@ -921,8 +1057,9 @@ static int axienet_open(struct net_device *ndev)
*/
mutex_lock(&lp->mii_bus->mdio_lock);
axienet_mdio_disable(lp);
- axienet_device_reset(ndev);
- ret = axienet_mdio_enable(lp);
+ ret = axienet_device_reset(ndev);
+ if (ret == 0)
+ ret = axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
if (ret < 0)
return ret;
@@ -935,9 +1072,8 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
- /* Enable tasklets for Axi DMA error handling */
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
+ /* Enable worker thread for Axi DMA error handling */
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
@@ -966,7 +1102,7 @@ err_rx_irq:
err_tx_irq:
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -1025,7 +1161,7 @@ static int axienet_stop(struct net_device *ndev)
axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
@@ -1083,6 +1219,16 @@ static void axienet_poll_controller(struct net_device *ndev)
}
#endif
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phylink_mii_ioctl(lp->phylink, rq, cmd);
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
@@ -1090,6 +1236,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
@@ -1170,10 +1317,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
- data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
- data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
- data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
- data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
@@ -1309,27 +1452,6 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EFAULT;
}
- if ((ecoalesce->rx_coalesce_usecs) ||
- (ecoalesce->rx_coalesce_usecs_irq) ||
- (ecoalesce->rx_max_coalesced_frames_irq) ||
- (ecoalesce->tx_coalesce_usecs) ||
- (ecoalesce->tx_coalesce_usecs_irq) ||
- (ecoalesce->tx_max_coalesced_frames_irq) ||
- (ecoalesce->stats_block_coalesce_usecs) ||
- (ecoalesce->use_adaptive_rx_coalesce) ||
- (ecoalesce->use_adaptive_tx_coalesce) ||
- (ecoalesce->pkt_rate_low) ||
- (ecoalesce->rx_coalesce_usecs_low) ||
- (ecoalesce->rx_max_coalesced_frames_low) ||
- (ecoalesce->tx_coalesce_usecs_low) ||
- (ecoalesce->tx_max_coalesced_frames_low) ||
- (ecoalesce->pkt_rate_high) ||
- (ecoalesce->rx_coalesce_usecs_high) ||
- (ecoalesce->rx_max_coalesced_frames_high) ||
- (ecoalesce->tx_coalesce_usecs_high) ||
- (ecoalesce->tx_max_coalesced_frames_high) ||
- (ecoalesce->rate_sample_interval))
- return -EOPNOTSUPP;
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->tx_max_coalesced_frames)
@@ -1357,6 +1479,7 @@ axienet_ethtools_set_link_ksettings(struct net_device *ndev,
}
static const struct ethtool_ops axienet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1441,6 +1564,22 @@ static void axienet_mac_an_restart(struct phylink_config *config)
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
u32 emmc_reg, fcc_reg;
@@ -1448,7 +1587,7 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
- switch (state->speed) {
+ switch (speed) {
case SPEED_1000:
emmc_reg |= XAE_EMMC_LINKSPD_1000;
break;
@@ -1467,32 +1606,17 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (state->pause & MLO_PAUSE_TX)
+ if (tx_pause)
fcc_reg |= XAE_FCC_FCTX_MASK;
else
fcc_reg &= ~XAE_FCC_FCTX_MASK;
- if (state->pause & MLO_PAUSE_RX)
+ if (rx_pause)
fcc_reg |= XAE_FCC_FCRX_MASK;
else
fcc_reg &= ~XAE_FCC_FCRX_MASK;
axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
}
-static void axienet_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- /* nothing meaningful to do */
-}
-
-static void axienet_mac_link_up(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
-{
- /* nothing meaningful to do */
-}
-
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = axienet_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
@@ -1503,17 +1627,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
};
/**
- * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
- * @data: Data passed
+ * axienet_dma_err_handler - Work queue task for Axi DMA Error
+ * @work: pointer to work_struct
*
* Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
* Tx/Rx BDs.
*/
-static void axienet_dma_err_handler(unsigned long data)
+static void axienet_dma_err_handler(struct work_struct *work)
{
u32 axienet_status;
u32 cr, i;
- struct axienet_local *lp = (struct axienet_local *) data;
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ dma_err_task);
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
@@ -1533,14 +1658,18 @@ static void axienet_dma_err_handler(unsigned long data)
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
- if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ if (cur_p->cntrl) {
+ dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
+
+ dma_unmap_single(ndev->dev.parent, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
+ }
if (cur_p->skb)
dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
+ cur_p->phys_msb = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1594,18 +1723,18 @@ static void axienet_dma_err_handler(unsigned long data)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -1651,6 +1780,7 @@ static int axienet_probe(struct platform_device *pdev)
struct net_device *ndev;
const void *mac_addr;
struct resource *ethres;
+ int addr_width = 32;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1781,7 +1911,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- lp->eth_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
} else {
/* Check for these resources directly on the Ethernet node. */
struct resource *res = platform_get_resource(pdev,
@@ -1789,7 +1919,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq(pdev, 2);
+ lp->eth_irq = platform_get_irq_optional(pdev, 2);
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
@@ -1802,6 +1932,36 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+ * We can detect this case by writing all 1's to one such register
+ * and see if that sticks: when the IP is configured for 32 bits
+ * only, those registers are RES0.
+ * Those MSB registers were introduced in IP v7.1, which we check first.
+ */
+ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+ void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
+
+ iowrite32(0x0, desc);
+ if (ioread32(desc) == 0) { /* sanity check */
+ iowrite32(0xffffffff, desc);
+ if (ioread32(desc) > 0) {
+ lp->features |= XAE_FEATURE_DMA_64BIT;
+ addr_width = 64;
+ dev_info(&pdev->dev,
+ "autodetected 64-bit DMA range\n");
+ }
+ iowrite32(0x0, desc);
+ }
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto free_netdev;
+ }
+
/* Check for Ethernet core IRQ (optional) */
if (lp->eth_irq <= 0)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 269596c15133..2e5202923510 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1387,6 +1387,8 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
return -ENODEV;
regs_phys = res->start;
port->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->regs))
+ return PTR_ERR(port->regs);
switch (port->id) {
case IXP4XX_ETH_NPEA:
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index 9c8aa3a95463..cc9ac572423e 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -20,7 +20,7 @@
#include "h/supern_2.h"
#include "h/skfbiinc.h"
#include <linux/bitrev.h>
-#include <linux/pci_regs.h>
+#include <linux/pci.h>
#ifndef lint
static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
@@ -112,7 +112,7 @@ static void card_start(struct s_smc *smc)
*/
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_ON) ; /* enable for writes */
word = inpw(PCI_C(PCI_STATUS)) ;
- outpw(PCI_C(PCI_STATUS), word | PCI_ERRBITS) ;
+ outpw(PCI_C(PCI_STATUS), word | PCI_STATUS_ERROR_BITS);
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_OFF) ; /* disable writes */
/*
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h
index 480795681719..ccee00b71dbc 100644
--- a/drivers/net/fddi/skfp/h/skfbi.h
+++ b/drivers/net/fddi/skfp/h/skfbi.h
@@ -33,11 +33,6 @@
*/
#define I2C_ADDR_VPD 0xA0 /* I2C address for the VPD EEPROM */
-
-#define PCI_ERRBITS (PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_PARITY)
-
-
-
/*
* Control Register File:
* Bank 0
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 09f279c0182b..6b461be1820b 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1207,7 +1207,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
if (df < 0 || df > GENEVE_DF_MAX) {
- NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF],
"Invalid DF attribute");
return -EINVAL;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 672cd2caf2fb..21640a035d7d 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1169,11 +1169,11 @@ out_unlock:
static struct genl_family gtp_genl_family;
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
- u32 type, struct pdp_ctx *pctx)
+ int flags, u32 type, struct pdp_ctx *pctx)
{
void *genlh;
- genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+ genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
type);
if (genlh == NULL)
goto nlmsg_failure;
@@ -1227,8 +1227,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
goto err_unlock;
}
- err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
- info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+ err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
+ 0, info->nlhdr->nlmsg_type, pctx);
if (err < 0)
goto err_unlock_free;
@@ -1271,6 +1271,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
gtp_genl_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
cb->args[1] = j;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index fbea6f232819..e2ad3c2e8df5 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -127,7 +127,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
{
struct bpqdev *bpq;
- list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
+ list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list,
+ lockdep_rtnl_is_held()) {
if (bpq->ethdev == dev)
return bpq->axdev;
}
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1b320bcf150a..ca68aa1df801 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -388,10 +388,11 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
- /* Setup receive completion ring */
- net_device->recv_completion_cnt
- = round_up(net_device->recv_section_cnt + 1,
- PAGE_SIZE / sizeof(u64));
+ /* Setup receive completion ring.
+ * Add 1 to the recv_section_cnt because at least one entry in a
+ * ring buffer has to be empty.
+ */
+ net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
ret = netvsc_alloc_recv_comp_ring(net_device, 0);
if (ret)
goto cleanup;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2c0a24c606fc..ebcfbae05690 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -638,10 +638,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
@@ -710,7 +707,8 @@ no_memory:
goto drop;
}
-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
return netvsc_xmit(skb, ndev, false);
}
@@ -1143,23 +1141,6 @@ out:
return ret;
}
-static bool
-netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
-{
- struct ethtool_link_ksettings diff1 = *cmd;
- struct ethtool_link_ksettings diff2 = {};
-
- diff1.base.speed = 0;
- diff1.base.duplex = 0;
- /* advertising and cmd are usually set */
- ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
- diff1.base.cmd = 0;
- /* We set port to PORT_OTHER */
- diff2.base.port = PORT_OTHER;
-
- return !memcmp(&diff1, &diff2, sizeof(diff1));
-}
-
static void netvsc_init_settings(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
@@ -1176,6 +1157,12 @@ static int netvsc_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
+ struct net_device *vf_netdev;
+
+ vf_netdev = rtnl_dereference(ndc->vf_netdev);
+
+ if (vf_netdev)
+ return __ethtool_get_link_ksettings(vf_netdev, cmd);
cmd->base.speed = ndc->speed;
cmd->base.duplex = ndc->duplex;
@@ -1188,18 +1175,18 @@ static int netvsc_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
- u32 speed;
+ struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
- speed = cmd->base.speed;
- if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->base.duplex) ||
- !netvsc_validate_ethtool_ss_cmd(cmd))
- return -EINVAL;
+ if (vf_netdev) {
+ if (!vf_netdev->ethtool_ops->set_link_ksettings)
+ return -EOPNOTSUPP;
- ndc->speed = speed;
- ndc->duplex = cmd->base.duplex;
+ return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
+ cmd);
+ }
- return 0;
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &ndc->speed, &ndc->duplex);
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 430c93786153..e04c3b60cae7 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -946,7 +946,8 @@ static int ca8210_spi_transfer(
cas_ctl->transfer.bits_per_word = 0; /* Use device setting */
cas_ctl->transfer.tx_buf = cas_ctl->tx_buf;
cas_ctl->transfer.rx_buf = cas_ctl->tx_in_buf;
- cas_ctl->transfer.delay_usecs = 0;
+ cas_ctl->transfer.delay.value = 0;
+ cas_ctl->transfer.delay.unit = SPI_DELAY_UNIT_USECS;
cas_ctl->transfer.cs_change = 0;
cas_ctl->transfer.len = sizeof(struct mac_message);
cas_ctl->msg.complete = ca8210_spi_transfer_complete;
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
new file mode 100644
index 000000000000..9f0d2a93379c
--- /dev/null
+++ b/drivers/net/ipa/Kconfig
@@ -0,0 +1,19 @@
+config QCOM_IPA
+ tristate "Qualcomm IPA support"
+ depends on ARCH_QCOM && 64BIT && NET
+ depends on QCOM_Q6V5_MSS
+ select QCOM_QMI_HELPERS
+ select QCOM_MDT_LOADER
+ help
+ Choose Y or M here to include support for the Qualcomm
+ IP Accelerator (IPA), a hardware block present in some
+ Qualcomm SoCs. The IPA is a programmable protocol processor
+ that is capable of generic hardware handling of IP packets,
+ including routing, filtering, and NAT. Currently the IPA
+ driver supports only basic transport of network traffic
+ between the AP and modem, on the Qualcomm SDM845 SoC.
+
+ Note that if selected, the selection type must match that
+ of QCOM_Q6V5_COMMON (Y or M).
+
+ If unsure, say N.
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
new file mode 100644
index 000000000000..afe5df1e6eee
--- /dev/null
+++ b/drivers/net/ipa/Makefile
@@ -0,0 +1,12 @@
+# Un-comment the next line if you want to validate configuration data
+#ccflags-y += -DIPA_VALIDATE
+
+obj-$(CONFIG_QCOM_IPA) += ipa.o
+
+ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
+ ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
+ ipa_gsi.o ipa_smp2p.o ipa_uc.o \
+ ipa_endpoint.o ipa_cmd.o ipa_modem.o \
+ ipa_qmi.o ipa_qmi_msg.o
+
+ipa-y += ipa_data-sdm845.o ipa_data-sc7180.o
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
new file mode 100644
index 000000000000..8d9ca1c335e8
--- /dev/null
+++ b/drivers/net/ipa/gsi.c
@@ -0,0 +1,2063 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gsi.h"
+#include "gsi_reg.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+
+/**
+ * DOC: The IPA Generic Software Interface
+ *
+ * The generic software interface (GSI) is an integral component of the IPA,
+ * providing a well-defined communication layer between the AP subsystem
+ * and the IPA core. The modem uses the GSI layer as well.
+ *
+ * -------- ---------
+ * | | | |
+ * | AP +<---. .----+ Modem |
+ * | +--. | | .->+ |
+ * | | | | | | | |
+ * -------- | | | | ---------
+ * v | v |
+ * --+-+---+-+--
+ * | GSI |
+ * |-----------|
+ * | |
+ * | IPA |
+ * | |
+ * -------------
+ *
+ * In the above diagram, the AP and Modem represent "execution environments"
+ * (EEs), which are independent operating environments that use the IPA for
+ * data transfer.
+ *
+ * Each EE uses a set of unidirectional GSI "channels," which allow transfer
+ * of data to or from the IPA. A channel is implemented as a ring buffer,
+ * with a DRAM-resident array of "transfer elements" (TREs) available to
+ * describe transfers to or from other EEs through the IPA. A transfer
+ * element can also contain an immediate command, requesting the IPA perform
+ * actions other than data transfer.
+ *
+ * Each TRE refers to a block of data--also located DRAM. After writing one
+ * or more TREs to a channel, the writer (either the IPA or an EE) writes a
+ * doorbell register to inform the receiving side how many elements have
+ * been written.
+ *
+ * Each channel has a GSI "event ring" associated with it. An event ring
+ * is implemented very much like a channel ring, but is always directed from
+ * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
+ * events by adding an entry to the event ring associated with the channel.
+ * The GSI then writes its doorbell for the event ring, causing the target
+ * EE to be interrupted. Each entry in an event ring contains a pointer
+ * to the channel TRE whose completion the event represents.
+ *
+ * Each TRE in a channel ring has a set of flags. One flag indicates whether
+ * the completion of the transfer operation generates an entry (and possibly
+ * an interrupt) in the channel's event ring. Other flags allow transfer
+ * elements to be chained together, forming a single logical transaction.
+ * TRE flags are used to control whether and when interrupts are generated
+ * to signal completion of channel transfers.
+ *
+ * Elements in channel and event rings are completed (or consumed) strictly
+ * in order. Completion of one entry implies the completion of all preceding
+ * entries. A single completion interrupt can therefore communicate the
+ * completion of many transfers.
+ *
+ * Note that all GSI registers are little-endian, which is the assumed
+ * endianness of I/O space accesses. The accessor functions perform byte
+ * swapping if needed (i.e., for a big endian CPU).
+ */
+
+/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
+#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
+
+#define GSI_CMD_TIMEOUT 5 /* seconds */
+
+#define GSI_CHANNEL_STOP_RX_RETRIES 10
+
+#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
+#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
+
+#define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
+
+/* An entry in an event ring */
+struct gsi_event {
+ __le64 xfer_ptr;
+ __le16 len;
+ u8 reserved1;
+ u8 code;
+ __le16 reserved2;
+ u8 type;
+ u8 chid;
+};
+
+/* Hardware values from the error log register error code field */
+enum gsi_err_code {
+ GSI_INVALID_TRE_ERR = 0x1,
+ GSI_OUT_OF_BUFFERS_ERR = 0x2,
+ GSI_OUT_OF_RESOURCES_ERR = 0x3,
+ GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
+ GSI_EVT_RING_EMPTY_ERR = 0x5,
+ GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
+ GSI_HWO_1_ERR = 0x8,
+};
+
+/* Hardware values from the error log register error type field */
+enum gsi_err_type {
+ GSI_ERR_TYPE_GLOB = 0x1,
+ GSI_ERR_TYPE_CHAN = 0x2,
+ GSI_ERR_TYPE_EVT = 0x3,
+};
+
+/* Hardware values used when programming an event ring */
+enum gsi_evt_chtype {
+ GSI_EVT_CHTYPE_MHI_EV = 0x0,
+ GSI_EVT_CHTYPE_XHCI_EV = 0x1,
+ GSI_EVT_CHTYPE_GPI_EV = 0x2,
+ GSI_EVT_CHTYPE_XDCI_EV = 0x3,
+};
+
+/* Hardware values used when programming a channel */
+enum gsi_channel_protocol {
+ GSI_CHANNEL_PROTOCOL_MHI = 0x0,
+ GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
+ GSI_CHANNEL_PROTOCOL_GPI = 0x2,
+ GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
+};
+
+/* Hardware values representing an event ring immediate command opcode */
+enum gsi_evt_cmd_opcode {
+ GSI_EVT_ALLOCATE = 0x0,
+ GSI_EVT_RESET = 0x9,
+ GSI_EVT_DE_ALLOC = 0xa,
+};
+
+/* Hardware values representing a generic immediate command opcode */
+enum gsi_generic_cmd_opcode {
+ GSI_GENERIC_HALT_CHANNEL = 0x1,
+ GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+};
+
+/* Hardware values representing a channel immediate command opcode */
+enum gsi_ch_cmd_opcode {
+ GSI_CH_ALLOCATE = 0x0,
+ GSI_CH_START = 0x1,
+ GSI_CH_STOP = 0x2,
+ GSI_CH_RESET = 0x9,
+ GSI_CH_DE_ALLOC = 0xa,
+};
+
+/** gsi_channel_scratch_gpi - GPI protocol scratch register
+ * @max_outstanding_tre:
+ * Defines the maximum number of TREs allowed in a single transaction
+ * on a channel (in bytes). This determines the amount of prefetch
+ * performed by the hardware. We configure this to equal the size of
+ * the TLV FIFO for the channel.
+ * @outstanding_threshold:
+ * Defines the threshold (in bytes) determining when the sequencer
+ * should update the channel doorbell. We configure this to equal
+ * the size of two TREs.
+ */
+struct gsi_channel_scratch_gpi {
+ u64 reserved1;
+ u16 reserved2;
+ u16 max_outstanding_tre;
+ u16 reserved3;
+ u16 outstanding_threshold;
+};
+
+/** gsi_channel_scratch - channel scratch configuration area
+ *
+ * The exact interpretation of this register is protocol-specific.
+ * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
+ */
+union gsi_channel_scratch {
+ struct gsi_channel_scratch_gpi gpi;
+ struct {
+ u32 word1;
+ u32 word2;
+ u32 word3;
+ u32 word4;
+ } data;
+};
+
+/* Check things that can be validated at build time. */
+static void gsi_validate_build(void)
+{
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
+
+ /* Code assumes the size of channel and event ring element are
+ * the same (and fixed). Make sure the size of an event ring
+ * element is what's expected.
+ */
+ BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
+
+ /* Hardware requires a 2^n ring size. We ensure the number of
+ * elements in an event ring is a power of 2 elsewhere; this
+ * ensure the elements themselves meet the requirement.
+ */
+ BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
+
+ /* The channel element size must fit in this field */
+ BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
+
+ /* The event ring element size must fit in this field */
+ BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
+}
+
+/* Return the channel id associated with a given channel */
+static u32 gsi_channel_id(struct gsi_channel *channel)
+{
+ return channel - &channel->gsi->channel[0];
+}
+
+static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ gsi->event_enable_bitmap |= BIT(evt_ring_id);
+ val = gsi->event_enable_bitmap;
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+}
+
+static void gsi_isr_ieob_clear(struct gsi *gsi, u32 mask)
+{
+ iowrite32(mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
+}
+
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
+ val = gsi->event_enable_bitmap;
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+}
+
+/* Enable all GSI_interrupt types */
+static void gsi_irq_enable(struct gsi *gsi)
+{
+ u32 val;
+
+ /* We don't use inter-EE channel or event interrupts */
+ val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
+ val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
+ val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
+ iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+
+ val = GENMASK(gsi->channel_count - 1, 0);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ val = GENMASK(gsi->evt_ring_count - 1, 0);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ /* Each IEOB interrupt is enabled (later) as needed by channels */
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ val = GSI_CNTXT_GLOB_IRQ_ALL;
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
+ /* Never enable GSI_BREAK_POINT */
+ val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
+ iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+}
+
+/* Disable all GSI_interrupt types */
+static void gsi_irq_disable(struct gsi *gsi)
+{
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+}
+
+/* Return the virtual address associated with a ring index */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the 32-bit DMA address associated with a ring index */
+static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
+{
+ return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the ring index of a 32-bit ring offset */
+static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
+{
+ return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
+}
+
+/* Issue a GSI command by writing a value to a register, then wait for
+ * completion to be signaled. Returns true if the command completes
+ * or false if it times out.
+ */
+static bool
+gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
+{
+ reinit_completion(completion);
+
+ iowrite32(val, gsi->virt + reg);
+
+ return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+}
+
+/* Return the hardware's notion of the current state of an event ring */
+static enum gsi_evt_ring_state
+gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+
+ return u32_get_bits(val, EV_CHSTATE_FMASK);
+}
+
+/* Issue an event ring command and wait for it to complete */
+static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ struct completion *completion = &evt_ring->completion;
+ u32 val;
+
+ val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
+ val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
+
+ if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
+ "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
+
+ return -ETIMEDOUT;
+}
+
+/* Allocate an event ring in NOT_ALLOCATED state */
+static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ int ret;
+
+ /* Get initial event ring state */
+ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
+
+ if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return -EINVAL;
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
+ evt_ring->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Reset a GSI event ring in ALLOCATED or ERROR state. */
+static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ enum gsi_evt_ring_state state = evt_ring->state;
+ int ret;
+
+ if (state != GSI_EVT_RING_STATE_ALLOCATED &&
+ state != GSI_EVT_RING_STATE_ERROR) {
+ dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
+ evt_ring->state);
+ return;
+ }
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
+ dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
+ evt_ring->state);
+}
+
+/* Issue a hardware de-allocation request for an allocated event ring */
+static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ int ret;
+
+ if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
+ evt_ring->state);
+ return;
+ }
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
+ evt_ring->state);
+}
+
+/* Return the hardware's notion of the current state of a channel */
+static enum gsi_channel_state
+gsi_channel_state(struct gsi *gsi, u32 channel_id)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+
+ return u32_get_bits(val, CHSTATE_FMASK);
+}
+
+/* Issue a channel command and wait for it to complete */
+static int
+gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
+{
+ struct completion *completion = &channel->completion;
+ u32 channel_id = gsi_channel_id(channel);
+ u32 val;
+
+ val = u32_encode_bits(channel_id, CH_CHID_FMASK);
+ val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
+
+ if (gsi_command(channel->gsi, GSI_CH_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(channel->gsi->dev, "GSI command %u to channel %u timed out "
+ "(state is %u)\n", opcode, channel_id, channel->state);
+
+ return -ETIMEDOUT;
+}
+
+/* Allocate GSI channel in NOT_ALLOCATED state */
+static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ /* Get initial channel state */
+ channel->state = gsi_channel_state(gsi, channel_id);
+
+ if (channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
+ channel->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Start an ALLOCATED channel */
+static int gsi_channel_start_command(struct gsi_channel *channel)
+{
+ enum gsi_channel_state state = channel->state;
+ int ret;
+
+ if (state != GSI_CHANNEL_STATE_ALLOCATED &&
+ state != GSI_CHANNEL_STATE_STOPPED)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_START);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_STARTED) {
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after start\n",
+ channel->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Stop a GSI channel in STARTED state */
+static int gsi_channel_stop_command(struct gsi_channel *channel)
+{
+ enum gsi_channel_state state = channel->state;
+ int ret;
+
+ if (state != GSI_CHANNEL_STATE_STARTED &&
+ state != GSI_CHANNEL_STATE_STOP_IN_PROC)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_STOP);
+ if (ret || channel->state == GSI_CHANNEL_STATE_STOPPED)
+ return ret;
+
+ /* We may have to try again if stop is in progress */
+ if (channel->state == GSI_CHANNEL_STATE_STOP_IN_PROC)
+ return -EAGAIN;
+
+ dev_err(channel->gsi->dev, "bad channel state (%u) after stop\n",
+ channel->state);
+
+ return -EIO;
+}
+
+/* Reset a GSI channel in ALLOCATED or ERROR state. */
+static void gsi_channel_reset_command(struct gsi_channel *channel)
+{
+ int ret;
+
+ msleep(1); /* A short delay is required before a RESET command */
+
+ if (channel->state != GSI_CHANNEL_STATE_STOPPED &&
+ channel->state != GSI_CHANNEL_STATE_ERROR) {
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) before reset\n",
+ channel->state);
+ return;
+ }
+
+ ret = gsi_channel_command(channel, GSI_CH_RESET);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED)
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after reset\n",
+ channel->state);
+}
+
+/* Deallocate an ALLOCATED GSI channel */
+static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ if (channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad channel state (%u) before dealloc\n",
+ channel->state);
+ return;
+ }
+
+ ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev, "bad channel state (%u) after dealloc\n",
+ channel->state);
+}
+
+/* Ring an event ring doorbell, reporting the last entry processed by the AP.
+ * The index argument (modulo the ring count) is the first unfilled entry, so
+ * we supply one less than that with the doorbell. Update the event ring
+ * index field with the value provided.
+ */
+static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
+{
+ struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
+ u32 val;
+
+ ring->index = index; /* Next unused entry */
+
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(ring, (index - 1) % ring->count);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
+}
+
+/* Program an event ring for use */
+static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ u32 val;
+
+ val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
+ val |= EV_INTYPE_FMASK;
+ val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+
+ val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the event ring,
+ * respectively.
+ */
+ val = evt_ring->ring.addr & GENMASK(31, 0);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
+
+ val = evt_ring->ring.addr >> 32;
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
+
+ /* Enable interrupt moderation by setting the moderation delay */
+ val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
+ val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
+
+ /* No MSI write data, and MSI address high and low address is 0 */
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
+
+ /* We don't need to get event read pointer updates */
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
+
+ /* Finally, tell the hardware we've completed event 0 (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+}
+
+/* Return the last (most recent) transaction completed on a channel. */
+static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct gsi_trans *trans;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ if (!list_empty(&trans_info->complete))
+ trans = list_last_entry(&trans_info->complete,
+ struct gsi_trans, links);
+ else if (!list_empty(&trans_info->polled))
+ trans = list_last_entry(&trans_info->polled,
+ struct gsi_trans, links);
+ else
+ trans = NULL;
+
+ /* Caller will wait for this, so take a reference */
+ if (trans)
+ refcount_inc(&trans->refcount);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ return trans;
+}
+
+/* Wait for transaction activity on a channel to complete */
+static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the last transaction, and wait for it to complete */
+ trans = gsi_channel_trans_last(channel);
+ if (trans) {
+ wait_for_completion(&trans->completion);
+ gsi_trans_free(trans);
+ }
+}
+
+/* Stop channel activity. Transactions may not be allocated until thawed. */
+static void gsi_channel_freeze(struct gsi_channel *channel)
+{
+ gsi_channel_trans_quiesce(channel);
+
+ napi_disable(&channel->napi);
+
+ gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
+}
+
+/* Allow transactions to be used on the channel again. */
+static void gsi_channel_thaw(struct gsi_channel *channel)
+{
+ gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
+
+ napi_enable(&channel->napi);
+}
+
+/* Program a channel for use */
+static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
+{
+ size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
+ u32 channel_id = gsi_channel_id(channel);
+ union gsi_channel_scratch scr = { };
+ struct gsi_channel_scratch_gpi *gpi;
+ struct gsi *gsi = channel->gsi;
+ u32 wrr_weight = 0;
+ u32 val;
+
+ /* Arbitrarily pick TRE 0 as the first channel element to use */
+ channel->tre_ring.index = 0;
+
+ /* We program all channels to use GPI protocol */
+ val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
+ if (channel->toward_ipa)
+ val |= CHTYPE_DIR_FMASK;
+ val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
+ val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+
+ val = u32_encode_bits(size, R_LENGTH_FMASK);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the channel ring,
+ * respectively.
+ */
+ val = channel->tre_ring.addr & GENMASK(31, 0);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
+
+ val = channel->tre_ring.addr >> 32;
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
+
+ /* Command channel gets low weighted round-robin priority */
+ if (channel->command)
+ wrr_weight = field_max(WRR_WEIGHT_FMASK);
+ val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
+
+ /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
+
+ /* Enable the doorbell engine if requested */
+ if (doorbell)
+ val |= USE_DB_ENG_FMASK;
+
+ if (!channel->use_prefetch)
+ val |= USE_ESCAPE_BUF_ONLY_FMASK;
+
+ iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
+
+ /* Now update the scratch registers for GPI protocol */
+ gpi = &scr.gpi;
+ gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ GSI_RING_ELEMENT_SIZE;
+ gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
+
+ val = scr.data.word1;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
+
+ val = scr.data.word2;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
+
+ val = scr.data.word3;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
+
+ /* We must preserve the upper 16 bits of the last scratch register.
+ * The next sequence assumes those bits remain unchanged between the
+ * read and the write.
+ */
+ val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+ val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+
+ /* All done! */
+}
+
+static void gsi_channel_deprogram(struct gsi_channel *channel)
+{
+ /* Nothing to do */
+}
+
+/* Start an allocated GSI channel */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+ int ret;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_start_command(channel);
+
+ mutex_unlock(&gsi->mutex);
+
+ /* Clear the channel's event ring interrupt in case it's pending */
+ gsi_isr_ieob_clear(gsi, BIT(evt_ring_id));
+
+ gsi_channel_thaw(channel);
+
+ return ret;
+}
+
+/* Stop a started channel */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 retries;
+ int ret;
+
+ gsi_channel_freeze(channel);
+
+ /* Channel could have entered STOPPED state since last call if the
+ * STOP command timed out. We won't stop a channel if stopping it
+ * was successful previously (so we still want the freeze above).
+ */
+ if (channel->state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
+
+ /* RX channels might require a little time to enter STOPPED state */
+ retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
+
+ mutex_lock(&gsi->mutex);
+
+ do {
+ ret = gsi_channel_stop_command(channel);
+ if (ret != -EAGAIN)
+ break;
+ msleep(1);
+ } while (retries--);
+
+ mutex_unlock(&gsi->mutex);
+
+ /* Thaw the channel if we need to retry (or on error) */
+ if (ret)
+ gsi_channel_thaw(channel);
+
+ return ret;
+}
+
+/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ mutex_lock(&gsi->mutex);
+
+ /* Due to a hardware quirk we need to reset RX channels twice. */
+ gsi_channel_reset_command(channel);
+ if (!channel->toward_ipa)
+ gsi_channel_reset_command(channel);
+
+ gsi_channel_program(channel, db_enable);
+ gsi_channel_trans_cancel_pending(channel);
+
+ mutex_unlock(&gsi->mutex);
+}
+
+/* Stop a STARTED channel for suspend (using stop if requested) */
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ if (stop)
+ return gsi_channel_stop(gsi, channel_id);
+
+ gsi_channel_freeze(channel);
+
+ return 0;
+}
+
+/* Resume a suspended channel (starting will be requested if STOPPED) */
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ if (start)
+ return gsi_channel_start(gsi, channel_id);
+
+ gsi_channel_thaw(channel);
+
+ return 0;
+}
+
+/**
+ * gsi_channel_tx_queued() - Report queued TX transfers for a channel
+ * @channel: Channel for which to report
+ *
+ * Report to the network stack the number of bytes and transactions that
+ * have been queued to hardware since last call. This and the next function
+ * supply information used by the network stack for throttling.
+ *
+ * For each channel we track the number of transactions used and bytes of
+ * data those transactions represent. We also track what those values are
+ * each time this function is called. Subtracting the two tells us
+ * the number of bytes and transactions that have been added between
+ * successive calls.
+ *
+ * Calling this each time we ring the channel doorbell allows us to
+ * provide accurate information to the network stack about how much
+ * work we've given the hardware at any point in time.
+ */
+void gsi_channel_tx_queued(struct gsi_channel *channel)
+{
+ u32 trans_count;
+ u32 byte_count;
+
+ byte_count = channel->byte_count - channel->queued_byte_count;
+ trans_count = channel->trans_count - channel->queued_trans_count;
+ channel->queued_byte_count = channel->byte_count;
+ channel->queued_trans_count = channel->trans_count;
+
+ ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
+ trans_count, byte_count);
+}
+
+/**
+ * gsi_channel_tx_update() - Report completed TX transfers
+ * @channel: Channel that has completed transmitting packets
+ * @trans: Last transation known to be complete
+ *
+ * Compute the number of transactions and bytes that have been transferred
+ * over a TX channel since the given transaction was committed. Report this
+ * information to the network stack.
+ *
+ * At the time a transaction is committed, we record its channel's
+ * committed transaction and byte counts *in the transaction*.
+ * Completions are signaled by the hardware with an interrupt, and
+ * we can determine the latest completed transaction at that time.
+ *
+ * The difference between the byte/transaction count recorded in
+ * the transaction and the count last time we recorded a completion
+ * tells us exactly how much data has been transferred between
+ * completions.
+ *
+ * Calling this each time we learn of a newly-completed transaction
+ * allows us to provide accurate information to the network stack
+ * about how much work has been completed by the hardware at a given
+ * point in time.
+ */
+static void
+gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+{
+ u64 byte_count = trans->byte_count + trans->len;
+ u64 trans_count = trans->trans_count + 1;
+
+ byte_count -= channel->compl_byte_count;
+ channel->compl_byte_count += byte_count;
+ trans_count -= channel->compl_trans_count;
+ channel->compl_trans_count += trans_count;
+
+ ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
+ trans_count, byte_count);
+}
+
+/* Channel control interrupt handler */
+static void gsi_isr_chan_ctrl(struct gsi *gsi)
+{
+ u32 channel_mask;
+
+ channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
+ iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ while (channel_mask) {
+ u32 channel_id = __ffs(channel_mask);
+ struct gsi_channel *channel;
+
+ channel_mask ^= BIT(channel_id);
+
+ channel = &gsi->channel[channel_id];
+ channel->state = gsi_channel_state(gsi, channel_id);
+
+ complete(&channel->completion);
+ }
+}
+
+/* Event ring control interrupt handler */
+static void gsi_isr_evt_ctrl(struct gsi *gsi)
+{
+ u32 event_mask;
+
+ event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
+ iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+ struct gsi_evt_ring *evt_ring;
+
+ event_mask ^= BIT(evt_ring_id);
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
+
+ complete(&evt_ring->completion);
+ }
+}
+
+/* Global channel error interrupt handler */
+static void
+gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
+ complete(&gsi->channel[channel_id].completion);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
+ channel_id, err_ee, code);
+}
+
+/* Global event error interrupt handler */
+static void
+gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ u32 channel_id = gsi_channel_id(evt_ring->channel);
+
+ complete(&evt_ring->completion);
+ dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
+ channel_id);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
+ evt_ring_id, err_ee, code);
+}
+
+/* Global error interrupt handler */
+static void gsi_isr_glob_err(struct gsi *gsi)
+{
+ enum gsi_err_type type;
+ enum gsi_err_code code;
+ u32 which;
+ u32 val;
+ u32 ee;
+
+ /* Get the logged error, then reinitialize the log */
+ val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
+ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+ iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
+
+ ee = u32_get_bits(val, ERR_EE_FMASK);
+ which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
+ type = u32_get_bits(val, ERR_TYPE_FMASK);
+ code = u32_get_bits(val, ERR_CODE_FMASK);
+
+ if (type == GSI_ERR_TYPE_CHAN)
+ gsi_isr_glob_chan_err(gsi, ee, which, code);
+ else if (type == GSI_ERR_TYPE_EVT)
+ gsi_isr_glob_evt_err(gsi, ee, which, code);
+ else /* type GSI_ERR_TYPE_GLOB should be fatal */
+ dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
+}
+
+/* Generic EE interrupt handler */
+static void gsi_isr_gp_int1(struct gsi *gsi)
+{
+ u32 result;
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+ result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
+ if (result != GENERIC_EE_SUCCESS_FVAL)
+ dev_err(gsi->dev, "global INT1 generic result %u\n", result);
+
+ complete(&gsi->completion);
+}
+
+/* Inter-EE interrupt handler */
+static void gsi_isr_glob_ee(struct gsi *gsi)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
+
+ if (val & ERROR_INT_FMASK)
+ gsi_isr_glob_err(gsi);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
+
+ val &= ~ERROR_INT_FMASK;
+
+ if (val & EN_GP_INT1_FMASK) {
+ val ^= EN_GP_INT1_FMASK;
+ gsi_isr_gp_int1(gsi);
+ }
+
+ if (val)
+ dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
+}
+
+/* I/O completion interrupt event */
+static void gsi_isr_ieob(struct gsi *gsi)
+{
+ u32 event_mask;
+
+ event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ gsi_isr_ieob_clear(gsi, event_mask);
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+
+ event_mask ^= BIT(evt_ring_id);
+
+ gsi_irq_ieob_disable(gsi, evt_ring_id);
+ napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
+ }
+}
+
+/* General event interrupts represent serious problems, so report them */
+static void gsi_isr_general(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
+ iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
+
+ if (val)
+ dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
+}
+
+/**
+ * gsi_isr() - Top level GSI interrupt service routine
+ * @irq: Interrupt number (ignored)
+ * @dev_id: GSI pointer supplied to request_irq()
+ *
+ * This is the main handler function registered for the GSI IRQ. Each type
+ * of interrupt has a separate handler function that is called from here.
+ */
+static irqreturn_t gsi_isr(int irq, void *dev_id)
+{
+ struct gsi *gsi = dev_id;
+ u32 intr_mask;
+ u32 cnt = 0;
+
+ while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
+ /* intr_mask contains bitmask of pending GSI interrupts */
+ do {
+ u32 gsi_intr = BIT(__ffs(intr_mask));
+
+ intr_mask ^= gsi_intr;
+
+ switch (gsi_intr) {
+ case CH_CTRL_FMASK:
+ gsi_isr_chan_ctrl(gsi);
+ break;
+ case EV_CTRL_FMASK:
+ gsi_isr_evt_ctrl(gsi);
+ break;
+ case GLOB_EE_FMASK:
+ gsi_isr_glob_ee(gsi);
+ break;
+ case IEOB_FMASK:
+ gsi_isr_ieob(gsi);
+ break;
+ case GENERAL_FMASK:
+ gsi_isr_general(gsi);
+ break;
+ default:
+ dev_err(gsi->dev,
+ "%s: unrecognized type 0x%08x\n",
+ __func__, gsi_intr);
+ break;
+ }
+ } while (intr_mask);
+
+ if (++cnt > GSI_ISR_MAX_ITER) {
+ dev_err(gsi->dev, "interrupt flood\n");
+ break;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Return the transaction associated with a transfer completion event */
+static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
+ struct gsi_event *event)
+{
+ u32 tre_offset;
+ u32 tre_index;
+
+ /* Event xfer_ptr records the TRE it's associated with */
+ tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
+ tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
+
+ return gsi_channel_trans_mapped(channel, tre_index);
+}
+
+/**
+ * gsi_evt_ring_rx_update() - Record lengths of received data
+ * @evt_ring: Event ring associated with channel that received packets
+ * @index: Event index in ring reported by hardware
+ *
+ * Events for RX channels contain the actual number of bytes received into
+ * the buffer. Every event has a transaction associated with it, and here
+ * we update transactions to record their actual received lengths.
+ *
+ * This function is called whenever we learn that the GSI hardware has filled
+ * new events since the last time we checked. The ring's index field tells
+ * the first entry in need of processing. The index provided is the
+ * first *unfilled* event in the ring (following the last filled one).
+ *
+ * Events are sequential within the event ring, and transactions are
+ * sequential within the transaction pool.
+ *
+ * Note that @index always refers to an element *within* the event ring.
+ */
+static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+{
+ struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_ring *ring = &evt_ring->ring;
+ struct gsi_trans_info *trans_info;
+ struct gsi_event *event_done;
+ struct gsi_event *event;
+ struct gsi_trans *trans;
+ u32 byte_count = 0;
+ u32 old_index;
+ u32 event_avail;
+
+ trans_info = &channel->trans_info;
+
+ /* We'll start with the oldest un-processed event. RX channels
+ * replenish receive buffers in single-TRE transactions, so we
+ * can just map that event to its transaction. Transactions
+ * associated with completion events are consecutive.
+ */
+ old_index = ring->index;
+ event = gsi_ring_virt(ring, old_index);
+ trans = gsi_event_trans(channel, event);
+
+ /* Compute the number of events to process before we wrap,
+ * and determine when we'll be done processing events.
+ */
+ event_avail = ring->count - old_index % ring->count;
+ event_done = gsi_ring_virt(ring, index);
+ do {
+ trans->len = __le16_to_cpu(event->len);
+ byte_count += trans->len;
+
+ /* Move on to the next event and transaction */
+ if (--event_avail)
+ event++;
+ else
+ event = gsi_ring_virt(ring, 0);
+ trans = gsi_trans_pool_next(&trans_info->pool, trans);
+ } while (event != event_done);
+
+ /* We record RX bytes when they are received */
+ channel->byte_count += byte_count;
+ channel->trans_count++;
+}
+
+/* Initialize a ring, including allocating DMA memory for its entries */
+static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
+{
+ size_t size = count * GSI_RING_ELEMENT_SIZE;
+ struct device *dev = gsi->dev;
+ dma_addr_t addr;
+
+ /* Hardware requires a 2^n ring size, with alignment equal to size */
+ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (ring->virt && addr % size) {
+ dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
+ size);
+ return -EINVAL; /* Not a good error value, but distinct */
+ } else if (!ring->virt) {
+ return -ENOMEM;
+ }
+ ring->addr = addr;
+ ring->count = count;
+
+ return 0;
+}
+
+/* Free a previously-allocated ring */
+static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
+{
+ size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
+
+ dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
+}
+
+/* Allocate an available event ring id */
+static int gsi_evt_ring_id_alloc(struct gsi *gsi)
+{
+ u32 evt_ring_id;
+
+ if (gsi->event_bitmap == ~0U) {
+ dev_err(gsi->dev, "event rings exhausted\n");
+ return -ENOSPC;
+ }
+
+ evt_ring_id = ffz(gsi->event_bitmap);
+ gsi->event_bitmap |= BIT(evt_ring_id);
+
+ return (int)evt_ring_id;
+}
+
+/* Free a previously-allocated event ring id */
+static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi->event_bitmap &= ~BIT(evt_ring_id);
+}
+
+/* Ring a channel doorbell, reporting the first un-filled entry */
+void gsi_channel_doorbell(struct gsi_channel *channel)
+{
+ struct gsi_ring *tre_ring = &channel->tre_ring;
+ u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
+ u32 val;
+
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
+ iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
+}
+
+/* Consult hardware, move any newly completed transactions to completed list */
+static void gsi_channel_update(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ struct gsi_trans *trans;
+ struct gsi_ring *ring;
+ u32 offset;
+ u32 index;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ ring = &evt_ring->ring;
+
+ /* See if there's anything new to process; if not, we're done. Note
+ * that index always refers to an entry *within* the event ring.
+ */
+ offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
+ index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
+ if (index == ring->index % ring->count)
+ return;
+
+ /* Get the transaction for the latest completed event. Take a
+ * reference to keep it from completing before we give the events
+ * for this and previous transactions back to the hardware.
+ */
+ trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
+ refcount_inc(&trans->refcount);
+
+ /* For RX channels, update each completed transaction with the number
+ * of bytes that were actually received. For TX channels, report
+ * the number of transactions and bytes this completion represents
+ * up the network stack.
+ */
+ if (channel->toward_ipa)
+ gsi_channel_tx_update(channel, trans);
+ else
+ gsi_evt_ring_rx_update(evt_ring, index);
+
+ gsi_trans_move_complete(trans);
+
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
+
+ gsi_trans_free(trans);
+}
+
+/**
+ * gsi_channel_poll_one() - Return a single completed transaction on a channel
+ * @channel: Channel to be polled
+ *
+ * @Return: Transaction pointer, or null if none are available
+ *
+ * This function returns the first entry on a channel's completed transaction
+ * list. If that list is empty, the hardware is consulted to determine
+ * whether any new transactions have completed. If so, they're moved to the
+ * completed list and the new first entry is returned. If there are no more
+ * completed transactions, a null pointer is returned.
+ */
+static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the first transaction from the completed list */
+ trans = gsi_channel_trans_complete(channel);
+ if (!trans) {
+ /* List is empty; see if there's more to do */
+ gsi_channel_update(channel);
+ trans = gsi_channel_trans_complete(channel);
+ }
+
+ if (trans)
+ gsi_trans_move_polled(trans);
+
+ return trans;
+}
+
+/**
+ * gsi_channel_poll() - NAPI poll function for a channel
+ * @napi: NAPI structure for the channel
+ * @budget: Budget supplied by NAPI core
+
+ * @Return: Number of items polled (<= budget)
+ *
+ * Single transactions completed by hardware are polled until either
+ * the budget is exhausted, or there are no more. Each transaction
+ * polled is passed to gsi_trans_complete(), to perform remaining
+ * completion processing and retire/free the transaction.
+ */
+static int gsi_channel_poll(struct napi_struct *napi, int budget)
+{
+ struct gsi_channel *channel;
+ int count = 0;
+
+ channel = container_of(napi, struct gsi_channel, napi);
+ while (count < budget) {
+ struct gsi_trans *trans;
+
+ count++;
+ trans = gsi_channel_poll_one(channel);
+ if (!trans)
+ break;
+ gsi_trans_complete(trans);
+ }
+
+ if (count < budget) {
+ napi_complete(&channel->napi);
+ gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
+ }
+
+ return count;
+}
+
+/* The event bitmap represents which event ids are available for allocation.
+ * Set bits are not available, clear bits can be used. This function
+ * initializes the map so all events supported by the hardware are available,
+ * then precludes any reserved events from being allocated.
+ */
+static u32 gsi_event_bitmap_init(u32 evt_ring_max)
+{
+ u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
+
+ event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
+
+ return event_bitmap;
+}
+
+/* Setup function for event rings */
+static void gsi_evt_ring_setup(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+/* Inverse of gsi_evt_ring_setup() */
+static void gsi_evt_ring_teardown(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+/* Setup function for a single channel */
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
+ bool db_enable)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+ int ret;
+
+ if (!channel->gsi)
+ return 0; /* Ignore uninitialized channels */
+
+ ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
+ if (ret)
+ return ret;
+
+ gsi_evt_ring_program(gsi, evt_ring_id);
+
+ ret = gsi_channel_alloc_command(gsi, channel_id);
+ if (ret)
+ goto err_evt_ring_de_alloc;
+
+ gsi_channel_program(channel, db_enable);
+
+ if (channel->toward_ipa)
+ netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll, NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll, NAPI_POLL_WEIGHT);
+
+ return 0;
+
+err_evt_ring_de_alloc:
+ /* We've done nothing with the event ring yet so don't reset */
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup_one() */
+static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+
+ if (!channel->gsi)
+ return; /* Ignore uninitialized channels */
+
+ netif_napi_del(&channel->napi);
+
+ gsi_channel_deprogram(channel);
+ gsi_channel_de_alloc_command(gsi, channel_id);
+ gsi_evt_ring_reset_command(gsi, evt_ring_id);
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+}
+
+static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
+ enum gsi_generic_cmd_opcode opcode)
+{
+ struct completion *completion = &gsi->completion;
+ u32 val;
+
+ /* First zero the result code field */
+ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+ val &= ~GENERIC_EE_RESULT_FMASK;
+ iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+
+ /* Now issue the command */
+ val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
+ val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
+ val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+
+ if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
+ opcode, channel_id);
+
+ return -ETIMEDOUT;
+}
+
+static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
+{
+ return gsi_generic_command(gsi, channel_id,
+ GSI_GENERIC_ALLOCATE_CHANNEL);
+}
+
+static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
+{
+ int ret;
+
+ ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
+ if (ret)
+ dev_err(gsi->dev, "error %d halting modem channel %u\n",
+ ret, channel_id);
+}
+
+/* Setup function for channels */
+static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
+{
+ u32 channel_id = 0;
+ u32 mask;
+ int ret;
+
+ gsi_evt_ring_setup(gsi);
+ gsi_irq_enable(gsi);
+
+ mutex_lock(&gsi->mutex);
+
+ do {
+ ret = gsi_channel_setup_one(gsi, channel_id, db_enable);
+ if (ret)
+ goto err_unwind;
+ } while (++channel_id < gsi->channel_count);
+
+ /* Make sure no channels were defined that hardware does not support */
+ while (channel_id < GSI_CHANNEL_COUNT_MAX) {
+ struct gsi_channel *channel = &gsi->channel[channel_id++];
+
+ if (!channel->gsi)
+ continue; /* Ignore uninitialized channels */
+
+ dev_err(gsi->dev, "channel %u not supported by hardware\n",
+ channel_id - 1);
+ channel_id = gsi->channel_count;
+ goto err_unwind;
+ }
+
+ /* Allocate modem channels if necessary */
+ mask = gsi->modem_channel_bitmap;
+ while (mask) {
+ u32 modem_channel_id = __ffs(mask);
+
+ ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
+ if (ret)
+ goto err_unwind_modem;
+
+ /* Clear bit from mask only after success (for unwind) */
+ mask ^= BIT(modem_channel_id);
+ }
+
+ mutex_unlock(&gsi->mutex);
+
+ return 0;
+
+err_unwind_modem:
+ /* Compute which modem channels need to be deallocated */
+ mask ^= gsi->modem_channel_bitmap;
+ while (mask) {
+ u32 channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+err_unwind:
+ while (channel_id--)
+ gsi_channel_teardown_one(gsi, channel_id);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+ gsi_evt_ring_teardown(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup() */
+static void gsi_channel_teardown(struct gsi *gsi)
+{
+ u32 mask = gsi->modem_channel_bitmap;
+ u32 channel_id;
+
+ mutex_lock(&gsi->mutex);
+
+ while (mask) {
+ u32 channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+ channel_id = gsi->channel_count - 1;
+ do
+ gsi_channel_teardown_one(gsi, channel_id);
+ while (channel_id--);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+ gsi_evt_ring_teardown(gsi);
+}
+
+/* Setup function for GSI. GSI firmware must be loaded and initialized */
+int gsi_setup(struct gsi *gsi, bool db_enable)
+{
+ u32 val;
+
+ /* Here is where we first touch the GSI hardware */
+ val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
+ if (!(val & ENABLED_FMASK)) {
+ dev_err(gsi->dev, "GSI has not been enabled\n");
+ return -EIO;
+ }
+
+ val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+
+ gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ if (!gsi->channel_count) {
+ dev_err(gsi->dev, "GSI reports zero channels supported\n");
+ return -EINVAL;
+ }
+ if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
+ dev_warn(gsi->dev,
+ "limiting to %u channels (hardware supports %u)\n",
+ GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
+ gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+ }
+
+ gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (!gsi->evt_ring_count) {
+ dev_err(gsi->dev, "GSI reports zero event rings supported\n");
+ return -EINVAL;
+ }
+ if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
+ dev_warn(gsi->dev,
+ "limiting to %u event rings (hardware supports %u)\n",
+ GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
+ gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+ }
+
+ /* Initialize the error log */
+ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+
+ /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
+ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
+
+ return gsi_channel_setup(gsi, db_enable);
+}
+
+/* Inverse of gsi_setup() */
+void gsi_teardown(struct gsi *gsi)
+{
+ gsi_channel_teardown(gsi);
+}
+
+/* Initialize a channel's event ring */
+static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
+{
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ int ret;
+
+ ret = gsi_evt_ring_id_alloc(gsi);
+ if (ret < 0)
+ return ret;
+ channel->evt_ring_id = ret;
+
+ evt_ring = &gsi->evt_ring[channel->evt_ring_id];
+ evt_ring->channel = channel;
+
+ ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
+ if (!ret)
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
+ ret, gsi_channel_id(channel));
+
+ gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_evt_ring_init() */
+static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ gsi_ring_free(gsi, &evt_ring->ring);
+ gsi_evt_ring_id_free(gsi, evt_ring_id);
+}
+
+/* Init function for event rings */
+static void gsi_evt_ring_init(struct gsi *gsi)
+{
+ u32 evt_ring_id = 0;
+
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->event_enable_bitmap = 0;
+ do
+ init_completion(&gsi->evt_ring[evt_ring_id].completion);
+ while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
+}
+
+/* Inverse of gsi_evt_ring_init() */
+static void gsi_evt_ring_exit(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+static bool gsi_channel_data_valid(struct gsi *gsi,
+ const struct ipa_gsi_endpoint_data *data)
+{
+#ifdef IPA_VALIDATION
+ u32 channel_id = data->channel_id;
+ struct device *dev = gsi->dev;
+
+ /* Make sure channel ids are in the range driver supports */
+ if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
+ dev_err(dev, "bad channel id %u (must be less than %u)\n",
+ channel_id, GSI_CHANNEL_COUNT_MAX);
+ return false;
+ }
+
+ if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
+ dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
+ return false;
+ }
+
+ if (!data->channel.tlv_count ||
+ data->channel.tlv_count > GSI_TLV_MAX) {
+ dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
+ channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ /* We have to allow at least one maximally-sized transaction to
+ * be outstanding (which would use tlv_count TREs). Given how
+ * gsi_channel_tre_max() is computed, tre_count has to be almost
+ * twice the TLV FIFO size to satisfy this requirement.
+ */
+ if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
+ channel_id, data->channel.tlv_count,
+ data->channel.tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(data->channel.tre_count)) {
+ dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
+ channel_id, data->channel.tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(data->channel.event_count)) {
+ dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
+ channel_id, data->channel.event_count);
+ return false;
+ }
+#endif /* IPA_VALIDATION */
+
+ return true;
+}
+
+/* Init function for a single channel */
+static int gsi_channel_init_one(struct gsi *gsi,
+ const struct ipa_gsi_endpoint_data *data,
+ bool command, bool prefetch)
+{
+ struct gsi_channel *channel;
+ u32 tre_count;
+ int ret;
+
+ if (!gsi_channel_data_valid(gsi, data))
+ return -EINVAL;
+
+ /* Worst case we need an event for every outstanding TRE */
+ if (data->channel.tre_count > data->channel.event_count) {
+ tre_count = data->channel.event_count;
+ dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
+ data->channel_id, tre_count);
+ } else {
+ tre_count = data->channel.tre_count;
+ }
+
+ channel = &gsi->channel[data->channel_id];
+ memset(channel, 0, sizeof(*channel));
+
+ channel->gsi = gsi;
+ channel->toward_ipa = data->toward_ipa;
+ channel->command = command;
+ channel->use_prefetch = command && prefetch;
+ channel->tlv_count = data->channel.tlv_count;
+ channel->tre_count = tre_count;
+ channel->event_count = data->channel.event_count;
+ init_completion(&channel->completion);
+
+ ret = gsi_channel_evt_ring_init(channel);
+ if (ret)
+ goto err_clear_gsi;
+
+ ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
+ if (ret) {
+ dev_err(gsi->dev, "error %d allocating channel %u ring\n",
+ ret, data->channel_id);
+ goto err_channel_evt_ring_exit;
+ }
+
+ ret = gsi_channel_trans_init(gsi, data->channel_id);
+ if (ret)
+ goto err_ring_free;
+
+ if (command) {
+ u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
+
+ ret = ipa_cmd_pool_init(channel, tre_max);
+ }
+ if (!ret)
+ return 0; /* Success! */
+
+ gsi_channel_trans_exit(channel);
+err_ring_free:
+ gsi_ring_free(gsi, &channel->tre_ring);
+err_channel_evt_ring_exit:
+ gsi_channel_evt_ring_exit(channel);
+err_clear_gsi:
+ channel->gsi = NULL; /* Mark it not (fully) initialized */
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init_one() */
+static void gsi_channel_exit_one(struct gsi_channel *channel)
+{
+ if (!channel->gsi)
+ return; /* Ignore uninitialized channels */
+
+ if (channel->command)
+ ipa_cmd_pool_exit(channel);
+ gsi_channel_trans_exit(channel);
+ gsi_ring_free(channel->gsi, &channel->tre_ring);
+ gsi_channel_evt_ring_exit(channel);
+}
+
+/* Init function for channels */
+static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
+ const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc)
+{
+ int ret = 0;
+ u32 i;
+
+ gsi_evt_ring_init(gsi);
+
+ /* The endpoint data array is indexed by endpoint name */
+ for (i = 0; i < count; i++) {
+ bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
+
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue; /* Skip over empty slots */
+
+ /* Mark modem channels to be allocated (hardware workaround) */
+ if (data[i].ee_id == GSI_EE_MODEM) {
+ if (modem_alloc)
+ gsi->modem_channel_bitmap |=
+ BIT(data[i].channel_id);
+ continue;
+ }
+
+ ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
+ if (ret)
+ goto err_unwind;
+ }
+
+ return ret;
+
+err_unwind:
+ while (i--) {
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue;
+ if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
+ gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
+ continue;
+ }
+ gsi_channel_exit_one(&gsi->channel[data->channel_id]);
+ }
+ gsi_evt_ring_exit(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init() */
+static void gsi_channel_exit(struct gsi *gsi)
+{
+ u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
+
+ do
+ gsi_channel_exit_one(&gsi->channel[channel_id]);
+ while (channel_id--);
+ gsi->modem_channel_bitmap = 0;
+
+ gsi_evt_ring_exit(gsi);
+}
+
+/* Init function for GSI. GSI hardware does not need to be "ready" */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
+ u32 count, const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc)
+{
+ struct resource *res;
+ resource_size_t size;
+ unsigned int irq;
+ int ret;
+
+ gsi_validate_build();
+
+ gsi->dev = &pdev->dev;
+
+ /* The GSI layer performs NAPI on all endpoints. NAPI requires a
+ * network device structure, but the GSI layer does not have one,
+ * so we must create a dummy network device for this purpose.
+ */
+ init_dummy_netdev(&gsi->dummy_dev);
+
+ /* Get the GSI IRQ and request for it to wake the system */
+ ret = platform_get_irq_byname(pdev, "gsi");
+ if (ret <= 0) {
+ dev_err(gsi->dev,
+ "DT error %d getting \"gsi\" IRQ property\n", ret);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
+ if (ret) {
+ dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ return ret;
+ }
+ gsi->irq = irq;
+
+ ret = enable_irq_wake(gsi->irq);
+ if (ret)
+ dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
+ gsi->irq_wake_enabled = !ret;
+
+ /* Get GSI memory range and map it */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
+ if (!res) {
+ dev_err(gsi->dev,
+ "DT error getting \"gsi\" memory property\n");
+ ret = -ENODEV;
+ goto err_disable_irq_wake;
+ }
+
+ size = resource_size(res);
+ if (res->start > U32_MAX || size > U32_MAX - res->start) {
+ dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
+ ret = -EINVAL;
+ goto err_disable_irq_wake;
+ }
+
+ gsi->virt = ioremap(res->start, size);
+ if (!gsi->virt) {
+ dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
+ ret = -ENOMEM;
+ goto err_disable_irq_wake;
+ }
+
+ ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
+ if (ret)
+ goto err_iounmap;
+
+ mutex_init(&gsi->mutex);
+ init_completion(&gsi->completion);
+
+ return 0;
+
+err_iounmap:
+ iounmap(gsi->virt);
+err_disable_irq_wake:
+ if (gsi->irq_wake_enabled)
+ (void)disable_irq_wake(gsi->irq);
+ free_irq(gsi->irq, gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_init() */
+void gsi_exit(struct gsi *gsi)
+{
+ mutex_destroy(&gsi->mutex);
+ gsi_channel_exit(gsi);
+ if (gsi->irq_wake_enabled)
+ (void)disable_irq_wake(gsi->irq);
+ free_irq(gsi->irq, gsi);
+ iounmap(gsi->virt);
+}
+
+/* The maximum number of outstanding TREs on a channel. This limits
+ * a channel's maximum number of transactions outstanding (worst case
+ * is one TRE per transaction).
+ *
+ * The absolute limit is the number of TREs in the channel's TRE ring,
+ * and in theory we should be able use all of them. But in practice,
+ * doing that led to the hardware reporting exhaustion of event ring
+ * slots for writing completion information. So the hardware limit
+ * would be (tre_count - 1).
+ *
+ * We reduce it a bit further though. Transaction resource pools are
+ * sized to be a little larger than this maximum, to allow resource
+ * allocations to always be contiguous. The number of entries in a
+ * TRE ring buffer is a power of 2, and the extra resources in a pool
+ * tends to nearly double the memory allocated for it. Reducing the
+ * maximum number of outstanding TREs allows the number of entries in
+ * a pool to avoid crossing that power-of-2 boundary, and this can
+ * substantially reduce pool memory requirements. The number we
+ * reduce it by matches the number added in gsi_trans_pool_init().
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ /* Hardware limit is channel->tre_count - 1 */
+ return channel->tre_count - (channel->tlv_count - 1);
+}
+
+/* Returns the maximum number of TREs in a single transaction for a channel */
+u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ return channel->tlv_count;
+}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
new file mode 100644
index 000000000000..0698ff1ae7a6
--- /dev/null
+++ b/drivers/net/ipa/gsi.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_H_
+#define _GSI_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+/* Maximum number of channels and event rings supported by the driver */
+#define GSI_CHANNEL_COUNT_MAX 17
+#define GSI_EVT_RING_COUNT_MAX 13
+
+/* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
+#define GSI_TLV_MAX 64
+
+struct device;
+struct scatterlist;
+struct platform_device;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_channel_data;
+struct ipa_gsi_endpoint_data;
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0,
+ GSI_EE_MODEM = 1,
+ GSI_EE_UC = 2,
+ GSI_EE_TZ = 3,
+};
+
+struct gsi_ring {
+ void *virt; /* ring array base address */
+ dma_addr_t addr; /* primarily low 32 bits used */
+ u32 count; /* number of elements in ring */
+
+ /* The ring index value indicates the next "open" entry in the ring.
+ *
+ * A channel ring consists of TRE entries filled by the AP and passed
+ * to the hardware for processing. For a channel ring, the ring index
+ * identifies the next unused entry to be filled by the AP.
+ *
+ * An event ring consists of event structures filled by the hardware
+ * and passed to the AP. For event rings, the ring index identifies
+ * the next ring entry that is not known to have been filled by the
+ * hardware.
+ */
+ u32 index;
+};
+
+/* Transactions use several resources that can be allocated dynamically
+ * but taken from a fixed-size pool. The number of elements required for
+ * the pool is limited by the total number of TREs that can be outstanding.
+ *
+ * If sufficient TREs are available to reserve for a transaction,
+ * allocation from these pools is guaranteed to succeed. Furthermore,
+ * these resources are implicitly freed whenever the TREs in the
+ * transaction they're associated with are released.
+ *
+ * The result of a pool allocation of multiple elements is always
+ * contiguous.
+ */
+struct gsi_trans_pool {
+ void *base; /* base address of element pool */
+ u32 count; /* # elements in the pool */
+ u32 free; /* next free element in pool (modulo) */
+ u32 size; /* size (bytes) of an element */
+ u32 max_alloc; /* max allocation request */
+ dma_addr_t addr; /* DMA address if DMA pool (or 0) */
+};
+
+struct gsi_trans_info {
+ atomic_t tre_avail; /* TREs available for allocation */
+ struct gsi_trans_pool pool; /* transaction pool */
+ struct gsi_trans_pool sg_pool; /* scatterlist pool */
+ struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
+ struct gsi_trans_pool info_pool;/* command information pool */
+ struct gsi_trans **map; /* TRE -> transaction map */
+
+ spinlock_t spinlock; /* protects updates to the lists */
+ struct list_head alloc; /* allocated, not committed */
+ struct list_head pending; /* committed, awaiting completion */
+ struct list_head complete; /* completed, awaiting poll */
+ struct list_head polled; /* returned by gsi_channel_poll_one() */
+};
+
+/* Hardware values signifying the state of a channel */
+enum gsi_channel_state {
+ GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
+ GSI_CHANNEL_STATE_ALLOCATED = 0x1,
+ GSI_CHANNEL_STATE_STARTED = 0x2,
+ GSI_CHANNEL_STATE_STOPPED = 0x3,
+ GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_ERROR = 0xf,
+};
+
+/* We only care about channels between IPA and AP */
+struct gsi_channel {
+ struct gsi *gsi;
+ bool toward_ipa;
+ bool command; /* AP command TX channel or not */
+ bool use_prefetch; /* use prefetch (else escape buf) */
+
+ u8 tlv_count; /* # entries in TLV FIFO */
+ u16 tre_count;
+ u16 event_count;
+
+ struct completion completion; /* signals channel state changes */
+ enum gsi_channel_state state;
+
+ struct gsi_ring tre_ring;
+ u32 evt_ring_id;
+
+ u64 byte_count; /* total # bytes transferred */
+ u64 trans_count; /* total # transactions */
+ /* The following counts are used only for TX endpoints */
+ u64 queued_byte_count; /* last reported queued byte count */
+ u64 queued_trans_count; /* ...and queued trans count */
+ u64 compl_byte_count; /* last reported completed byte count */
+ u64 compl_trans_count; /* ...and completed trans count */
+
+ struct gsi_trans_info trans_info;
+
+ struct napi_struct napi;
+};
+
+/* Hardware values signifying the state of an event ring */
+enum gsi_evt_ring_state {
+ GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
+ GSI_EVT_RING_STATE_ALLOCATED = 0x1,
+ GSI_EVT_RING_STATE_ERROR = 0xf,
+};
+
+struct gsi_evt_ring {
+ struct gsi_channel *channel;
+ struct completion completion; /* signals event ring state changes */
+ enum gsi_evt_ring_state state;
+ struct gsi_ring ring;
+};
+
+struct gsi {
+ struct device *dev; /* Same as IPA device */
+ struct net_device dummy_dev; /* needed for NAPI */
+ void __iomem *virt;
+ u32 irq;
+ bool irq_wake_enabled;
+ u32 channel_count;
+ u32 evt_ring_count;
+ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+ struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
+ u32 event_bitmap;
+ u32 event_enable_bitmap;
+ u32 modem_channel_bitmap;
+ struct completion completion; /* for global EE commands */
+ struct mutex mutex; /* protects commands, programming */
+};
+
+/**
+ * gsi_setup() - Set up the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ * @db_enable: Whether to use the GSI doorbell engine
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Performs initialization that must wait until the GSI hardware is
+ * ready (including firmware loaded).
+ */
+int gsi_setup(struct gsi *gsi, bool db_enable);
+
+/**
+ * gsi_teardown() - Tear down GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_setup() call
+ */
+void gsi_teardown(struct gsi *gsi);
+
+/**
+ * gsi_channel_tre_max() - Channel maximum number of in-flight TREs
+ * @gsi: GSI pointer
+ * @channel_id: Channel whose limit is to be returned
+ *
+ * @Return: The maximum number of TREs oustanding on the channel
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
+ * @gsi: GSI pointer
+ * @channel_id: Channel whose limit is to be returned
+ *
+ * @Return: The maximum TRE count per transaction on the channel
+ */
+u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_start() - Start an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to start
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_stop() - Stop a started GSI channel
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Channel to stop
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_reset() - Reset an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to be reset
+ * @db_enable: Whether doorbell engine should be enabled
+ *
+ * Reset a channel and reconfigure it. The @db_enable flag indicates
+ * whether the doorbell engine will be enabled following reconfiguration.
+ *
+ * GSI hardware relinquishes ownership of all pending receive buffer
+ * transactions and they will complete with their cancelled flag set.
+ */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable);
+
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
+
+/**
+ * gsi_init() - Initialize the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ * @pdev: IPA platform device
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Early stage initialization of the GSI subsystem, performing tasks
+ * that can be done before the GSI hardware is ready to use.
+ */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
+ u32 count, const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc);
+
+/**
+ * gsi_exit() - Exit the GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_init() call
+ */
+void gsi_exit(struct gsi *gsi);
+
+#endif /* _GSI_H_ */
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
new file mode 100644
index 000000000000..b57d0198ebc1
--- /dev/null
+++ b/drivers/net/ipa/gsi_private.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_PRIVATE_H_
+#define _GSI_PRIVATE_H_
+
+/* === Only "gsi.c" and "gsi_trans.c" should include this file === */
+
+#include <linux/types.h>
+
+struct gsi_trans;
+struct gsi_ring;
+struct gsi_channel;
+
+#define GSI_RING_ELEMENT_SIZE 16 /* bytes */
+
+/* Return the entry that follows one provided in a transaction pool */
+void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
+
+/**
+ * gsi_trans_move_complete() - Mark a GSI transaction completed
+ * @trans: Transaction to commit
+ */
+void gsi_trans_move_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_move_polled() - Mark a transaction polled
+ * @trans: Transaction to update
+ */
+void gsi_trans_move_polled(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_complete() - Complete a GSI transaction
+ * @trans: Transaction to complete
+ *
+ * Marks a transaction complete (including freeing it).
+ */
+void gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_channel_trans_mapped() - Return a transaction mapped to a TRE index
+ * @channel: Channel associated with the transaction
+ * @index: Index of the TRE having a transaction
+ *
+ * @Return: The GSI transaction pointer associated with the TRE index
+ */
+struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
+ u32 index);
+
+/**
+ * gsi_channel_trans_complete() - Return a channel's next completed transaction
+ * @channel: Channel whose next transaction is to be returned
+ *
+ * @Return: The next completed transaction, or NULL if nothing new
+ */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_cancel_pending() - Cancel pending transactions
+ * @channel: Channel whose pending transactions should be cancelled
+ *
+ * Cancel all pending transactions on a channel. These are transactions
+ * that have been committed but not yet completed. This is required when
+ * the channel gets reset. At that time all pending transactions will be
+ * marked as cancelled.
+ *
+ * NOTE: Transactions already complete at the time of this call are
+ * unaffected.
+ */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_init() - Initialize a channel's GSI transaction info
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ *
+ * @Return: 0 if successful, or -ENOMEM on allocation failure
+ *
+ * Creates and sets up information for managing transactions on a channel
+ */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_exit() - Inverse of gsi_channel_trans_init()
+ * @channel: Channel whose transaction information is to be cleaned up
+ */
+void gsi_channel_trans_exit(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_doorbell() - Ring a channel's doorbell
+ * @channel: Channel whose doorbell should be rung
+ *
+ * Rings a channel's doorbell to inform the GSI hardware that new
+ * transactions (TREs, really) are available for it to process.
+ */
+void gsi_channel_doorbell(struct gsi_channel *channel);
+
+/**
+ * gsi_ring_virt() - Return virtual address for a ring entry
+ * @ring: Ring whose address is to be translated
+ * @addr: Index (slot number) of entry
+ */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
+
+/**
+ * gsi_channel_tx_queued() - Report the number of bytes queued to hardware
+ * @channel: Channel whose bytes have been queued
+ *
+ * This arranges for the the number of transactions and bytes for
+ * transfer that have been queued to hardware to be reported. It
+ * passes this information up the network stack so it can be used to
+ * throttle transmissions.
+ */
+void gsi_channel_tx_queued(struct gsi_channel *channel);
+
+#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
new file mode 100644
index 000000000000..acc9e744c67d
--- /dev/null
+++ b/drivers/net/ipa/gsi_reg.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_REG_H_
+#define _GSI_REG_H_
+
+/* === Only "gsi.c" should include this file === */
+
+#include <linux/bits.h>
+
+/**
+ * DOC: GSI Registers
+ *
+ * GSI registers are located within the "gsi" address space defined by Device
+ * Tree. The offset of each register within that space is specified by
+ * symbols defined below. The GSI address space is mapped to virtual memory
+ * space in gsi_init(). All GSI registers are 32 bits wide.
+ *
+ * Each register type is duplicated for a number of instances of something.
+ * For example, each GSI channel has its own set of registers defining its
+ * configuration. The offset to a channel's set of registers is computed
+ * based on a "base" offset plus an additional "stride" amount computed
+ * from the channel's ID. For such registers, the offset is computed by a
+ * function-like macro that takes a parameter used in the computation.
+ *
+ * The offset of a register dependent on execution environment is computed
+ * by a macro that is supplied a parameter "ee". The "ee" value is a member
+ * of the gsi_ee_id enumerated type.
+ *
+ * The offset of a channel register is computed by a macro that is supplied a
+ * parameter "ch". The "ch" value is a channel id whose maximum value is 30
+ * (though the actual limit is hardware-dependent).
+ *
+ * The offset of an event register is computed by a macro that is supplied a
+ * parameter "ev". The "ev" value is an event id whose maximum value is 15
+ * (though the actual limit is hardware-dependent).
+ */
+
+#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
+ GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
+ (0x0000c018 + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
+ GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
+ (0x0000c01c + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
+ GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0000c028 + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
+ GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0000c02c + 0x1000 * (ee))
+
+#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
+ (0x0001c000 + 0x4000 * (ee) + 0x80 * (ch))
+#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
+#define CHTYPE_DIR_FMASK GENMASK(3, 3)
+#define EE_FMASK GENMASK(7, 4)
+#define CHID_FMASK GENMASK(12, 8)
+/* The next field is present for GSI v2.0 and above */
+#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
+#define ERINDEX_FMASK GENMASK(18, 14)
+#define CHSTATE_FMASK GENMASK(23, 20)
+#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
+
+#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
+ (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
+#define R_LENGTH_FMASK GENMASK(15, 0)
+
+#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_2_OFFSET(ch, ee) \
+ (0x0001c008 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_3_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_3_OFFSET(ch, ee) \
+ (0x0001c00c + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_QOS_OFFSET(ch) \
+ GSI_EE_N_CH_C_QOS_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_QOS_OFFSET(ch, ee) \
+ (0x0001c05c + 0x4000 * (ee) + 0x80 * (ch))
+#define WRR_WEIGHT_FMASK GENMASK(3, 0)
+#define MAX_PREFETCH_FMASK GENMASK(8, 8)
+#define USE_DB_ENG_FMASK GENMASK(9, 9)
+/* The next field is present for GSI v2.0 and above */
+#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
+
+#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_0_OFFSET(ch, ee) \
+ (0x0001c060 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_1_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_1_OFFSET(ch, ee) \
+ (0x0001c064 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_2_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_2_OFFSET(ch, ee) \
+ (0x0001c068 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_3_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_3_OFFSET(ch, ee) \
+ (0x0001c06c + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
+ (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+#define EV_CHTYPE_FMASK GENMASK(3, 0)
+#define EV_EE_FMASK GENMASK(7, 4)
+#define EV_EVCHID_FMASK GENMASK(15, 8)
+#define EV_INTYPE_FMASK GENMASK(16, 16)
+#define EV_CHSTATE_FMASK GENMASK(23, 20)
+#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
+ (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
+#define EV_R_LENGTH_FMASK GENMASK(15, 0)
+
+#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET(ev, ee) \
+ (0x0001d008 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET(ev, ee) \
+ (0x0001d00c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET(ev, ee) \
+ (0x0001d010 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET(ev, ee) \
+ (0x0001d020 + 0x4000 * (ee) + 0x80 * (ev))
+#define MODT_FMASK GENMASK(15, 0)
+#define MODC_FMASK GENMASK(23, 16)
+#define MOD_CNT_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET(ev, ee) \
+ (0x0001d024 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET(ev, ee) \
+ (0x0001d028 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET(ev, ee) \
+ (0x0001d02c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET(ev, ee) \
+ (0x0001d030 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET(ev, ee) \
+ (0x0001d034 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET(ev, ee) \
+ (0x0001d048 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET(ev, ee) \
+ (0x0001d04c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_DOORBELL_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_DOORBELL_0_OFFSET(ch, ee) \
+ (0x0001e000 + 0x4000 * (ee) + 0x08 * (ch))
+
+#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET(ev, ee) \
+ (0x0001e100 + 0x4000 * (ee) + 0x08 * (ev))
+
+#define GSI_GSI_STATUS_OFFSET \
+ GSI_EE_N_GSI_STATUS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GSI_STATUS_OFFSET(ee) \
+ (0x0001f000 + 0x4000 * (ee))
+#define ENABLED_FMASK GENMASK(0, 0)
+
+#define GSI_CH_CMD_OFFSET \
+ GSI_EE_N_CH_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CH_CMD_OFFSET(ee) \
+ (0x0001f008 + 0x4000 * (ee))
+#define CH_CHID_FMASK GENMASK(7, 0)
+#define CH_OPCODE_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_CMD_OFFSET \
+ GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
+ (0x0001f010 + 0x4000 * (ee))
+#define EV_CHID_FMASK GENMASK(7, 0)
+#define EV_OPCODE_FMASK GENMASK(31, 24)
+
+#define GSI_GENERIC_CMD_OFFSET \
+ GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
+ (0x0001f018 + 0x4000 * (ee))
+#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
+#define GENERIC_CHID_FMASK GENMASK(9, 5)
+#define GENERIC_EE_FMASK GENMASK(13, 10)
+
+#define GSI_GSI_HW_PARAM_2_OFFSET \
+ GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
+ (0x0001f040 + 0x4000 * (ee))
+#define IRAM_SIZE_FMASK GENMASK(2, 0)
+#define IRAM_SIZE_ONE_KB_FVAL 0
+#define IRAM_SIZE_TWO_KB_FVAL 1
+/* The next two values are available for GSI v2.0 and above */
+#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2
+#define IRAM_SIZE_THREE_KB_FVAL 3
+#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
+#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
+#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
+#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
+/* Fields below are present for GSI v2.0 and above */
+#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
+#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
+#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
+#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
+/* Fields below are present for GSI v2.2 and above */
+#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
+#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+
+#define GSI_CNTXT_TYPE_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
+ (0x0001f080 + 0x4000 * (ee))
+#define CH_CTRL_FMASK GENMASK(0, 0)
+#define EV_CTRL_FMASK GENMASK(1, 1)
+#define GLOB_EE_FMASK GENMASK(2, 2)
+#define IEOB_FMASK GENMASK(3, 3)
+#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
+#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
+#define GENERAL_FMASK GENMASK(6, 6)
+
+#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
+ (0x0001f088 + 0x4000 * (ee))
+#define MSK_CH_CTRL_FMASK GENMASK(0, 0)
+#define MSK_EV_CTRL_FMASK GENMASK(1, 1)
+#define MSK_GLOB_EE_FMASK GENMASK(2, 2)
+#define MSK_IEOB_FMASK GENMASK(3, 3)
+#define MSK_INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
+#define MSK_INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
+#define MSK_GENERAL_FMASK GENMASK(6, 6)
+#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
+
+#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(ee) \
+ (0x0001f090 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(ee) \
+ (0x0001f094 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(ee) \
+ (0x0001f098 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
+ (0x0001f09c + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0a0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0a4 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(ee) \
+ (0x0001f0b0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(ee) \
+ (0x0001f0b8 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0c0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
+ (0x0001f100 + 0x4000 * (ee))
+#define ERROR_INT_FMASK GENMASK(0, 0)
+#define GP_INT1_FMASK GENMASK(1, 1)
+#define GP_INT2_FMASK GENMASK(2, 2)
+#define GP_INT3_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
+ (0x0001f108 + 0x4000 * (ee))
+#define EN_ERROR_INT_FMASK GENMASK(0, 0)
+#define EN_GP_INT1_FMASK GENMASK(1, 1)
+#define EN_GP_INT2_FMASK GENMASK(2, 2)
+#define EN_GP_INT3_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
+
+#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
+ (0x0001f110 + 0x4000 * (ee))
+#define CLR_ERROR_INT_FMASK GENMASK(0, 0)
+#define CLR_GP_INT1_FMASK GENMASK(1, 1)
+#define CLR_GP_INT2_FMASK GENMASK(2, 2)
+#define CLR_GP_INT3_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
+ (0x0001f118 + 0x4000 * (ee))
+#define BREAK_POINT_FMASK GENMASK(0, 0)
+#define BUS_ERROR_FMASK GENMASK(1, 1)
+#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
+ (0x0001f120 + 0x4000 * (ee))
+#define EN_BREAK_POINT_FMASK GENMASK(0, 0)
+#define EN_BUS_ERROR_FMASK GENMASK(1, 1)
+#define EN_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define EN_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
+
+#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
+ (0x0001f128 + 0x4000 * (ee))
+#define CLR_BREAK_POINT_FMASK GENMASK(0, 0)
+#define CLR_BUS_ERROR_FMASK GENMASK(1, 1)
+#define CLR_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define CLR_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_INTSET_OFFSET \
+ GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_INTSET_OFFSET(ee) \
+ (0x0001f180 + 0x4000 * (ee))
+#define INTYPE_FMASK GENMASK(0, 0)
+
+#define GSI_ERROR_LOG_OFFSET \
+ GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
+ (0x0001f200 + 0x4000 * (ee))
+#define ERR_ARG3_FMASK GENMASK(3, 0)
+#define ERR_ARG2_FMASK GENMASK(7, 4)
+#define ERR_ARG1_FMASK GENMASK(11, 8)
+#define ERR_CODE_FMASK GENMASK(15, 12)
+#define ERR_VIRT_IDX_FMASK GENMASK(23, 19)
+#define ERR_TYPE_FMASK GENMASK(27, 24)
+#define ERR_EE_FMASK GENMASK(31, 28)
+
+#define GSI_ERROR_LOG_CLR_OFFSET \
+ GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
+ (0x0001f210 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SCRATCH_0_OFFSET \
+ GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(ee) \
+ (0x0001f400 + 0x4000 * (ee))
+#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
+#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
+#define GENERIC_EE_SUCCESS_FVAL 1
+#define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3
+#define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5
+#define GENERIC_EE_NO_RESOURCES_FVAL 7
+#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
+#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
+
+#endif /* _GSI_REG_H_ */
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
new file mode 100644
index 000000000000..bdbfeed359db
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+#include "ipa_cmd.h"
+
+/**
+ * DOC: GSI Transactions
+ *
+ * A GSI transaction abstracts the behavior of a GSI channel by representing
+ * everything about a related group of IPA commands in a single structure.
+ * (A "command" in this sense is either a data transfer or an IPA immediate
+ * command.) Most details of interaction with the GSI hardware are managed
+ * by the GSI transaction core, allowing users to simply describe commands
+ * to be performed. When a transaction has completed a callback function
+ * (dependent on the type of endpoint associated with the channel) allows
+ * cleanup of resources associated with the transaction.
+ *
+ * To perform a command (or set of them), a user of the GSI transaction
+ * interface allocates a transaction, indicating the number of TREs required
+ * (one per command). If sufficient TREs are available, they are reserved
+ * for use in the transaction and the allocation succeeds. This way
+ * exhaustion of the available TREs in a channel ring is detected
+ * as early as possible. All resources required to complete a transaction
+ * are allocated at transaction allocation time.
+ *
+ * Commands performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures. This array is allocated with the
+ * transaction, and its entries are initialized using standard scatterlist
+ * functions (such as sg_set_buf() or skb_to_sgvec()).
+ *
+ * Once a transaction's scatterlist structures have been initialized, the
+ * transaction is committed. The caller is responsible for mapping buffers
+ * for DMA if necessary, and this should be done *before* allocating
+ * the transaction. Between a successful allocation and commit of a
+ * transaction no errors should occur.
+ *
+ * Committing transfers ownership of the entire transaction to the GSI
+ * transaction core. The GSI transaction code formats the content of
+ * the scatterlist array into the channel ring buffer and informs the
+ * hardware that new TREs are available to process.
+ *
+ * The last TRE in each transaction is marked to interrupt the AP when the
+ * GSI hardware has completed it. Because transfers described by TREs are
+ * performed strictly in order, signaling the completion of just the last
+ * TRE in the transaction is sufficient to indicate the full transaction
+ * is complete.
+ *
+ * When a transaction is complete, ipa_gsi_trans_complete() is called by the
+ * GSI code into the IPA layer, allowing it to perform any final cleanup
+ * required before the transaction is freed.
+ */
+
+/* Hardware values representing a transfer element type */
+enum gsi_tre_type {
+ GSI_RE_XFER = 0x2,
+ GSI_RE_IMMD_CMD = 0x3,
+};
+
+/* An entry in a channel ring */
+struct gsi_tre {
+ __le64 addr; /* DMA address */
+ __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
+ __le16 reserved;
+ __le32 flags; /* TRE_FLAGS_* */
+};
+
+/* gsi_tre->flags mask values (in CPU byte order) */
+#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
+#define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
+#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
+#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
+#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
+
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc)
+{
+ void *virt;
+
+#ifdef IPA_VALIDATE
+ if (!size || size % 8)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+#endif /* IPA_VALIDATE */
+
+ /* By allocating a few extra entries in our pool (one less
+ * than the maximum number that will be requested in a
+ * single allocation), we can always satisfy requests without
+ * ever worrying about straddling the end of the pool array.
+ * If there aren't enough entries starting at the free index,
+ * we just allocate free entries from the beginning of the pool.
+ */
+ virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ /* If the allocator gave us any extra memory, use it */
+ pool->count = ksize(pool->base) / size;
+ pool->free = 0;
+ pool->max_alloc = max_alloc;
+ pool->size = size;
+ pool->addr = 0; /* Only used for DMA pools */
+
+ return 0;
+}
+
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
+{
+ kfree(pool->base);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Allocate the requested number of (zeroed) entries from the pool */
+/* Home-grown DMA pool. This way we can preallocate and use the tre_count
+ * to guarantee allocations will succeed. Even though we specify max_alloc
+ * (and it can be more than one), we only allow allocation of a single
+ * element from a DMA pool.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc)
+{
+ size_t total_size;
+ dma_addr_t addr;
+ void *virt;
+
+#ifdef IPA_VALIDATE
+ if (!size || size % 8)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+#endif /* IPA_VALIDATE */
+
+ /* Don't let allocations cross a power-of-two boundary */
+ size = __roundup_pow_of_two(size);
+ total_size = (count + max_alloc - 1) * size;
+
+ /* The allocator will give us a power-of-2 number of pages. But we
+ * can't guarantee that, so request it. That way we won't waste any
+ * memory that would be available beyond the required space.
+ */
+ total_size = get_order(total_size) << PAGE_SHIFT;
+
+ virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ pool->count = total_size / size;
+ pool->free = 0;
+ pool->size = size;
+ pool->max_alloc = max_alloc;
+ pool->addr = addr;
+
+ return 0;
+}
+
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
+{
+ dma_free_coherent(dev, pool->size, pool->base, pool->addr);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Return the byte offset of the next free entry in the pool */
+static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
+{
+ u32 offset;
+
+ /* assert(count > 0); */
+ /* assert(count <= pool->max_alloc); */
+
+ /* Allocate from beginning if wrap would occur */
+ if (count > pool->count - pool->free)
+ pool->free = 0;
+
+ offset = pool->free * pool->size;
+ pool->free += count;
+ memset(pool->base + offset, 0, count * pool->size);
+
+ return offset;
+}
+
+/* Allocate a contiguous block of zeroed entries from a pool */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
+{
+ return pool->base + gsi_trans_pool_alloc_common(pool, count);
+}
+
+/* Allocate a single zeroed entry from a DMA pool */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
+{
+ u32 offset = gsi_trans_pool_alloc_common(pool, 1);
+
+ *addr = pool->addr + offset;
+
+ return pool->base + offset;
+}
+
+/* Return the pool element that immediately follows the one given.
+ * This only works done if elements are allocated one at a time.
+ */
+void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
+{
+ void *end = pool->base + pool->count * pool->size;
+
+ /* assert(element >= pool->base); */
+ /* assert(element < end); */
+ /* assert(pool->max_alloc == 1); */
+ element += pool->size;
+
+ return element < end ? element : pool->base;
+}
+
+/* Map a given ring entry index to the transaction associated with it */
+static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
+ struct gsi_trans *trans)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ channel->trans_info.map[index % channel->tre_ring.count] = trans;
+}
+
+/* Return the transaction mapped to a given ring entry */
+struct gsi_trans *
+gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return channel->trans_info.map[index % channel->tre_ring.count];
+}
+
+/* Return the oldest completed transaction for a channel (or null) */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
+{
+ return list_first_entry_or_null(&channel->trans_info.complete,
+ struct gsi_trans, links);
+}
+
+/* Move a transaction from the allocated list to the pending list */
+static void gsi_trans_move_pending(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->pending);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move a transaction and all of its predecessors from the pending list
+ * to the completed list.
+ */
+void gsi_trans_move_complete(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct list_head list;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ /* Move this transaction and all predecessors to completed list */
+ list_cut_position(&list, &trans_info->pending, &trans->links);
+ list_splice_tail(&list, &trans_info->complete);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move a transaction from the completed list to the polled list */
+void gsi_trans_move_polled(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->polled);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Reserve some number of TREs on a channel. Returns true if successful */
+static bool
+gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ int avail = atomic_read(&trans_info->tre_avail);
+ int new;
+
+ do {
+ new = avail - (int)tre_count;
+ if (unlikely(new < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
+
+ return true;
+}
+
+/* Release previously-reserved TRE entries to a channel */
+static void
+gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ atomic_add(tre_count, &trans_info->tre_avail);
+}
+
+/* Allocate a GSI transaction on a channel */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_trans_info *trans_info;
+ struct gsi_trans *trans;
+
+ /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
+
+ trans_info = &channel->trans_info;
+
+ /* We reserve the TREs now, but consume them at commit time.
+ * If there aren't enough available, we're done.
+ */
+ if (!gsi_trans_tre_reserve(trans_info, tre_count))
+ return NULL;
+
+ /* Allocate and initialize non-zero fields in the the transaction */
+ trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
+ trans->gsi = gsi;
+ trans->channel_id = channel_id;
+ trans->tre_count = tre_count;
+ init_completion(&trans->completion);
+
+ /* Allocate the scatterlist and (if requested) info entries. */
+ trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
+ sg_init_marker(trans->sgl, tre_count);
+
+ trans->direction = direction;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_add_tail(&trans->links, &trans_info->alloc);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ refcount_set(&trans->refcount, 1);
+
+ return trans;
+}
+
+/* Free a previously-allocated transaction (used only in case of error) */
+void gsi_trans_free(struct gsi_trans *trans)
+{
+ struct gsi_trans_info *trans_info;
+
+ if (!refcount_dec_and_test(&trans->refcount))
+ return;
+
+ trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_del(&trans->links);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ ipa_gsi_trans_release(trans);
+
+ /* Releasing the reserved TREs implicitly frees the sgl[] and
+ * (if present) info[] arrays, plus the transaction itself.
+ */
+ gsi_trans_tre_release(trans_info, trans->tre_count);
+}
+
+/* Add an immediate command to a transaction */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum dma_data_direction direction,
+ enum ipa_cmd_opcode opcode)
+{
+ struct ipa_cmd_info *info;
+ u32 which = trans->used++;
+ struct scatterlist *sg;
+
+ /* assert(which < trans->tre_count); */
+
+ /* Set the page information for the buffer. We also need to fill in
+ * the DMA address and length for the buffer (something dma_map_sg()
+ * normally does).
+ */
+ sg = &trans->sgl[which];
+
+ sg_set_buf(sg, buf, size);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = sg->length;
+
+ info = &trans->info[which];
+ info->opcode = opcode;
+ info->direction = direction;
+}
+
+/* Add a page transfer to a transaction. It will fill the only TRE. */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ int ret;
+
+ /* assert(trans->tre_count == 1); */
+ /* assert(!trans->used); */
+
+ sg_set_page(sg, page, size, offset);
+ ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ trans->used++; /* Transaction now owns the (DMA mapped) page */
+
+ return 0;
+}
+
+/* Add an SKB transfer to a transaction. No other TREs will be used. */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ u32 used;
+ int ret;
+
+ /* assert(trans->tre_count == 1); */
+ /* assert(!trans->used); */
+
+ /* skb->len will not be 0 (checked early) */
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (ret < 0)
+ return ret;
+ used = ret;
+
+ ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ trans->used += used; /* Transaction now owns the (DMA mapped) skb */
+
+ return 0;
+}
+
+/* Compute the length/opcode value to use for a TRE */
+static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
+{
+ return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
+ : cpu_to_le16((u16)opcode);
+}
+
+/* Compute the flags value to use for a given TRE */
+static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
+{
+ enum gsi_tre_type tre_type;
+ u32 tre_flags;
+
+ tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
+ tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
+
+ /* Last TRE contains interrupt flags */
+ if (last_tre) {
+ /* All transactions end in a transfer completion interrupt */
+ tre_flags |= TRE_FLAGS_IEOT_FMASK;
+ /* Don't interrupt when outbound commands are acknowledged */
+ if (bei)
+ tre_flags |= TRE_FLAGS_BEI_FMASK;
+ } else { /* All others indicate there's more to come */
+ tre_flags |= TRE_FLAGS_CHAIN_FMASK;
+ }
+
+ return cpu_to_le32(tre_flags);
+}
+
+static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
+ u32 len, bool last_tre, bool bei,
+ enum ipa_cmd_opcode opcode)
+{
+ struct gsi_tre tre;
+
+ tre.addr = cpu_to_le64(addr);
+ tre.len_opcode = gsi_tre_len_opcode(opcode, len);
+ tre.reserved = 0;
+ tre.flags = gsi_tre_flags(last_tre, bei, opcode);
+
+ /* ARM64 can write 16 bytes as a unit with a single instruction.
+ * Doing the assignment this way is an attempt to make that happen.
+ */
+ *dest_tre = tre;
+}
+
+/**
+ * __gsi_trans_commit() - Common GSI transaction commit code
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ *
+ * Formats channel ring TRE entries based on the content of the scatterlist.
+ * Maps a transaction pointer to the last ring entry used for the transaction,
+ * so it can be recovered when it completes. Moves the transaction to the
+ * pending list. Finally, updates the channel ring pointer and optionally
+ * rings the doorbell.
+ */
+static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_ring *ring = &channel->tre_ring;
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ bool bei = channel->toward_ipa;
+ struct ipa_cmd_info *info;
+ struct gsi_tre *dest_tre;
+ struct scatterlist *sg;
+ u32 byte_count = 0;
+ u32 avail;
+ u32 i;
+
+ /* assert(trans->used > 0); */
+
+ /* Consume the entries. If we cross the end of the ring while
+ * filling them we'll switch to the beginning to finish.
+ * If there is no info array we're doing a simple data
+ * transfer request, whose opcode is IPA_CMD_NONE.
+ */
+ info = trans->info ? &trans->info[0] : NULL;
+ avail = ring->count - ring->index % ring->count;
+ dest_tre = gsi_ring_virt(ring, ring->index);
+ for_each_sg(trans->sgl, sg, trans->used, i) {
+ bool last_tre = i == trans->used - 1;
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 len = sg_dma_len(sg);
+
+ byte_count += len;
+ if (!avail--)
+ dest_tre = gsi_ring_virt(ring, 0);
+ if (info)
+ opcode = info++->opcode;
+
+ gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
+ dest_tre++;
+ }
+ ring->index += trans->used;
+
+ if (channel->toward_ipa) {
+ /* We record TX bytes when they are sent */
+ trans->len = byte_count;
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+ channel->trans_count++;
+ channel->byte_count += byte_count;
+ }
+
+ /* Associate the last TRE with the transaction */
+ gsi_channel_trans_map(channel, ring->index - 1, trans);
+
+ gsi_trans_move_pending(trans);
+
+ /* Ring doorbell if requested, or if all TREs are allocated */
+ if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
+ /* Report what we're handing off to hardware for TX channels */
+ if (channel->toward_ipa)
+ gsi_channel_tx_queued(channel);
+ gsi_channel_doorbell(channel);
+ }
+}
+
+/* Commit a GSI transaction */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ if (trans->used)
+ __gsi_trans_commit(trans, ring_db);
+ else
+ gsi_trans_free(trans);
+}
+
+/* Commit a GSI transaction and wait for it to complete */
+void gsi_trans_commit_wait(struct gsi_trans *trans)
+{
+ if (!trans->used)
+ goto out_trans_free;
+
+ refcount_inc(&trans->refcount);
+
+ __gsi_trans_commit(trans, true);
+
+ wait_for_completion(&trans->completion);
+
+out_trans_free:
+ gsi_trans_free(trans);
+}
+
+/* Commit a GSI transaction and wait for it to complete, with timeout */
+int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
+ unsigned long timeout)
+{
+ unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
+ unsigned long remaining = 1; /* In case of empty transaction */
+
+ if (!trans->used)
+ goto out_trans_free;
+
+ refcount_inc(&trans->refcount);
+
+ __gsi_trans_commit(trans, true);
+
+ remaining = wait_for_completion_timeout(&trans->completion,
+ timeout_jiffies);
+out_trans_free:
+ gsi_trans_free(trans);
+
+ return remaining ? 0 : -ETIMEDOUT;
+}
+
+/* Process the completion of a transaction; called while polling */
+void gsi_trans_complete(struct gsi_trans *trans)
+{
+ /* If the entire SGL was mapped when added, unmap it now */
+ if (trans->direction != DMA_NONE)
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
+ trans->direction);
+
+ ipa_gsi_trans_complete(trans);
+
+ complete(&trans->completion);
+
+ gsi_trans_free(trans);
+}
+
+/* Cancel a channel's pending transactions */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct gsi_trans *trans;
+ bool cancelled;
+
+ /* channel->gsi->mutex is held by caller */
+ spin_lock_bh(&trans_info->spinlock);
+
+ cancelled = !list_empty(&trans_info->pending);
+ list_for_each_entry(trans, &trans_info->pending, links)
+ trans->cancelled = true;
+
+ list_splice_tail_init(&trans_info->pending, &trans_info->complete);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ /* Schedule NAPI polling to complete the cancelled transactions */
+ if (cancelled)
+ napi_schedule(&channel->napi);
+}
+
+/* Issue a command to read a single byte from a channel */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_trans_info *trans_info;
+ struct gsi_tre *dest_tre;
+
+ trans_info = &channel->trans_info;
+
+ /* First reserve the TRE, if possible */
+ if (!gsi_trans_tre_reserve(trans_info, 1))
+ return -EBUSY;
+
+ /* Now fill the the reserved TRE and tell the hardware */
+
+ dest_tre = gsi_ring_virt(ring, ring->index);
+ gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
+
+ ring->index++;
+ gsi_channel_doorbell(channel);
+
+ return 0;
+}
+
+/* Mark a gsi_trans_read_byte() request done */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ gsi_trans_tre_release(&channel->trans_info, 1);
+}
+
+/* Initialize a channel's GSI transaction info */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_trans_info *trans_info;
+ u32 tre_max;
+ int ret;
+
+ /* Ensure the size of a channel element is what's expected */
+ BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
+
+ /* The map array is used to determine what transaction is associated
+ * with a TRE that the hardware reports has completed. We need one
+ * map entry per TRE.
+ */
+ trans_info = &channel->trans_info;
+ trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map)
+ return -ENOMEM;
+
+ /* We can't use more TREs than there are available in the ring.
+ * This limits the number of transactions that can be oustanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). We allocate resources
+ * for transactions (including transaction structures) based on
+ * this maximum number.
+ */
+ tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+
+ /* Transactions are allocated one at a time. */
+ ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
+ tre_max, 1);
+ if (ret)
+ goto err_kfree;
+
+ /* A transaction uses a scatterlist array to represent the data
+ * transfers implemented by the transaction. Each scatterlist
+ * element is used to fill a single TRE when the transaction is
+ * committed. So we need as many scatterlist elements as the
+ * maximum number of TREs that can be outstanding.
+ *
+ * All TREs in a transaction must fit within the channel's TLV FIFO.
+ * A transaction on a channel can allocate as many TREs as that but
+ * no more.
+ */
+ ret = gsi_trans_pool_init(&trans_info->sg_pool,
+ sizeof(struct scatterlist),
+ tre_max, channel->tlv_count);
+ if (ret)
+ goto err_trans_pool_exit;
+
+ /* Finally, the tre_avail field is what ultimately limits the number
+ * of outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient for
+ * what the transaction might need. Transaction resource pools are
+ * sized based on the maximum number of outstanding TREs, so there
+ * will always be resources available if there are TREs available.
+ */
+ atomic_set(&trans_info->tre_avail, tre_max);
+
+ spin_lock_init(&trans_info->spinlock);
+ INIT_LIST_HEAD(&trans_info->alloc);
+ INIT_LIST_HEAD(&trans_info->pending);
+ INIT_LIST_HEAD(&trans_info->complete);
+ INIT_LIST_HEAD(&trans_info->polled);
+
+ return 0;
+
+err_trans_pool_exit:
+ gsi_trans_pool_exit(&trans_info->pool);
+err_kfree:
+ kfree(trans_info->map);
+
+ dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
+ ret, channel_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_trans_init() */
+void gsi_channel_trans_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ gsi_trans_pool_exit(&trans_info->sg_pool);
+ gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->map);
+}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
new file mode 100644
index 000000000000..1477fc15b30a
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _GSI_TRANS_H_
+#define _GSI_TRANS_H_
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+
+#include "ipa_cmd.h"
+
+struct scatterlist;
+struct device;
+struct sk_buff;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_trans_pool;
+
+/**
+ * struct gsi_trans - a GSI transaction
+ *
+ * Most fields in this structure for internal use by the transaction core code:
+ * @links: Links for channel transaction lists by state
+ * @gsi: GSI pointer
+ * @channel_id: Channel number transaction is associated with
+ * @cancelled: If set by the core code, transaction was cancelled
+ * @tre_count: Number of TREs reserved for this transaction
+ * @used: Number of TREs *used* (could be less than tre_count)
+ * @len: Total # of transfer bytes represented in sgl[] (set by core)
+ * @data: Preserved but not touched by the core transaction code
+ * @sgl: An array of scatter/gather entries managed by core code
+ * @info: Array of command information structures (command channel)
+ * @direction: DMA transfer direction (DMA_NONE for commands)
+ * @refcount: Reference count used for destruction
+ * @completion: Completed when the transaction completes
+ * @byte_count: TX channel byte count recorded when transaction committed
+ * @trans_count: Channel transaction count when committed (for BQL accounting)
+ *
+ * The size used for some fields in this structure were chosen to ensure
+ * the full structure size is no larger than 128 bytes.
+ */
+struct gsi_trans {
+ struct list_head links; /* gsi_channel lists */
+
+ struct gsi *gsi;
+ u8 channel_id;
+
+ bool cancelled; /* true if transaction was cancelled */
+
+ u8 tre_count; /* # TREs requested */
+ u8 used; /* # entries used in sgl[] */
+ u32 len; /* total # bytes across sgl[] */
+
+ void *data;
+ struct scatterlist *sgl;
+ struct ipa_cmd_info *info; /* array of entries, or null */
+ enum dma_data_direction direction;
+
+ refcount_t refcount;
+ struct completion completion;
+
+ u64 byte_count; /* channel byte_count when committed */
+ u64 trans_count; /* channel trans_count when committed */
+};
+
+/**
+ * gsi_trans_pool_init() - Initialize a pool of structures for transactions
+ * @gsi: GSI pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc() - Allocate one or more elements from a pool
+ * @pool: Pool pointer
+ * @count: Number of elements to allocate from the pool
+ *
+ * @Return: Virtual address of element(s) allocated from the pool
+ */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count);
+
+/**
+ * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool);
+
+/**
+ * gsi_trans_pool_init_dma() - Initialize a pool of DMA-able structures
+ * @dev: Device used for DMA
+ * @pool: Pool pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Structures in this pool reside in DMA-coherent memory.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc_dma() - Allocate an element from a DMA pool
+ * @pool: DMA pool pointer
+ * @addr: DMA address "handle" associated with the allocation
+ *
+ * @Return: Virtual address of element allocated from the pool
+ *
+ * Only one element at a time may be allocated from a DMA pool.
+ */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
+
+/**
+ * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
+
+/**
+ * gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ * @tre_count: Number of elements in the transaction
+ * @direction: DMA direction for entire SGL (or DMA_NONE)
+ *
+ * @Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction);
+
+/**
+ * gsi_trans_free() - Free a previously-allocated GSI transaction
+ * @trans: Transaction to be freed
+ */
+void gsi_trans_free(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_cmd_add() - Add an immediate command to a transaction
+ * @trans: Transaction
+ * @buf: Buffer pointer for command payload
+ * @size: Number of bytes in buffer
+ * @addr: DMA address for payload
+ * @direction: Direction of DMA transfer (or DMA_NONE if none required)
+ * @opcode: IPA immediate command opcode
+ */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum dma_data_direction direction,
+ enum ipa_cmd_opcode opcode);
+
+/**
+ * gsi_trans_page_add() - Add a page transfer to a transaction
+ * @trans: Transaction
+ * @page: Page pointer
+ * @size: Number of bytes (starting at offset) to transfer
+ * @offset: Offset within page for start of transfer
+ */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset);
+
+/**
+ * gsi_trans_skb_add() - Add a socket transfer to a transaction
+ * @trans: Transaction
+ * @skb: Socket buffer for transfer (outbound)
+ *
+ * @Return: 0, or -EMSGSIZE if socket data won't fit in transaction.
+ */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb);
+
+/**
+ * gsi_trans_commit() - Commit a GSI transaction
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
+
+/**
+ * gsi_trans_commit_wait() - Commit a GSI transaction and wait for it
+ * to complete
+ * @trans: Transaction to commit
+ */
+void gsi_trans_commit_wait(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for
+ * it to complete, with timeout
+ * @trans: Transaction to commit
+ * @timeout: Timeout period (in milliseconds)
+ */
+int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
+ unsigned long timeout);
+
+/**
+ * gsi_trans_read_byte() - Issue a single byte read TRE on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which to read a byte
+ * @addr: DMA address into which to transfer the one byte
+ *
+ * This is not a transaction operation at all. It's defined here because
+ * it needs to be done in coordination with other transaction activity.
+ */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr);
+
+/**
+ * gsi_trans_read_byte_done() - Clean up after a single byte read TRE
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which byte was read
+ *
+ * This function needs to be called to signal that the work related
+ * to reading a byte initiated by gsi_trans_read_byte() is complete.
+ */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id);
+
+#endif /* _GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
new file mode 100644
index 000000000000..23fb29889e5a
--- /dev/null
+++ b/drivers/net/ipa/ipa.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/pm_wakeup.h>
+
+#include "ipa_version.h"
+#include "gsi.h"
+#include "ipa_mem.h"
+#include "ipa_qmi.h"
+#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
+
+struct clk;
+struct icc_path;
+struct net_device;
+struct platform_device;
+
+struct ipa_clock;
+struct ipa_smp2p;
+struct ipa_interrupt;
+
+/**
+ * struct ipa - IPA information
+ * @gsi: Embedded GSI structure
+ * @version: IPA hardware version
+ * @pdev: Platform device
+ * @modem_rproc: Remoteproc handle for modem subsystem
+ * @smp2p: SMP2P information
+ * @clock: IPA clocking information
+ * @suspend_ref: Whether clock reference preventing suspend taken
+ * @table_addr: DMA address of filter/route table content
+ * @table_virt: Virtual address of filter/route table content
+ * @interrupt: IPA Interrupt information
+ * @uc_loaded: true after microcontroller has reported it's ready
+ * @reg_addr: DMA address used for IPA register access
+ * @reg_virt: Virtual address used for IPA register access
+ * @mem_addr: DMA address of IPA-local memory space
+ * @mem_virt: Virtual address of IPA-local memory space
+ * @mem_offset: Offset from @mem_virt used for access to IPA memory
+ * @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem: Array of IPA-local memory region descriptors
+ * @zero_addr: DMA address of preallocated zero-filled memory
+ * @zero_virt: Virtual address of preallocated zero-filled memory
+ * @zero_size: Size (bytes) of preallocated zero-filled memory
+ * @wakeup_source: Wakeup source information
+ * @available: Bit mask indicating endpoints hardware supports
+ * @filter_map: Bit mask indicating endpoints that support filtering
+ * @initialized: Bit mask indicating endpoints initialized
+ * @set_up: Bit mask indicating endpoints set up
+ * @enabled: Bit mask indicating endpoints enabled
+ * @endpoint: Array of endpoint information
+ * @channel_map: Mapping of GSI channel to IPA endpoint
+ * @name_map: Mapping of IPA endpoint name to IPA endpoint
+ * @setup_complete: Flag indicating whether setup stage has completed
+ * @modem_state: State of modem (stopped, running)
+ * @modem_netdev: Network device structure used for modem
+ * @qmi: QMI information
+ */
+struct ipa {
+ struct gsi gsi;
+ enum ipa_version version;
+ struct platform_device *pdev;
+ struct rproc *modem_rproc;
+ struct ipa_smp2p *smp2p;
+ struct ipa_clock *clock;
+ atomic_t suspend_ref;
+
+ dma_addr_t table_addr;
+ __le64 *table_virt;
+
+ struct ipa_interrupt *interrupt;
+ bool uc_loaded;
+
+ dma_addr_t reg_addr;
+ void __iomem *reg_virt;
+
+ dma_addr_t mem_addr;
+ void *mem_virt;
+ u32 mem_offset;
+ u32 mem_size;
+ const struct ipa_mem *mem;
+
+ dma_addr_t zero_addr;
+ void *zero_virt;
+ size_t zero_size;
+
+ struct wakeup_source *wakeup_source;
+
+ /* Bit masks indicating endpoint state */
+ u32 available; /* supported by hardware */
+ u32 filter_map;
+ u32 initialized;
+ u32 set_up;
+ u32 enabled;
+
+ struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
+ struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
+ struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
+
+ bool setup_complete;
+
+ atomic_t modem_state; /* enum ipa_modem_state */
+ struct net_device *modem_netdev;
+ struct ipa_qmi qmi;
+};
+
+/**
+ * ipa_setup() - Perform IPA setup
+ * @ipa: IPA pointer
+ *
+ * IPA initialization is broken into stages: init; config; and setup.
+ * (These have inverses exit, deconfig, and teardown.)
+ *
+ * Activities performed at the init stage can be done without requiring
+ * any access to IPA hardware. Activities performed at the config stage
+ * require the IPA clock to be running, because they involve access
+ * to IPA registers. The setup stage is performed only after the GSI
+ * hardware is ready (more on this below). The setup stage allows
+ * the AP to perform more complex initialization by issuing "immediate
+ * commands" using a special interface to the IPA.
+ *
+ * This function, @ipa_setup(), starts the setup stage.
+ *
+ * In order for the GSI hardware to be functional it needs firmware to be
+ * loaded (in addition to some other low-level initialization). This early
+ * GSI initialization can be done either by Trust Zone on the AP or by the
+ * modem.
+ *
+ * If it's done by Trust Zone, the AP loads the GSI firmware and supplies
+ * it to Trust Zone to verify and install. When this completes, if
+ * verification was successful, the GSI layer is ready and ipa_setup()
+ * implements the setup phase of initialization.
+ *
+ * If the modem performs early GSI initialization, the AP needs to know
+ * when this has occurred. An SMP2P interrupt is used for this purpose,
+ * and receipt of that interrupt triggers the call to ipa_setup().
+ */
+int ipa_setup(struct ipa *ipa);
+
+#endif /* _IPA_H_ */
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
new file mode 100644
index 000000000000..374491ea11cf
--- /dev/null
+++ b/drivers/net/ipa/ipa_clock.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_modem.h"
+
+/**
+ * DOC: IPA Clocking
+ *
+ * The "IPA Clock" manages both the IPA core clock and the interconnects
+ * (buses) the IPA depends on as a single logical entity. A reference count
+ * is incremented by "get" operations and decremented by "put" operations.
+ * Transitions of that count from 0 to 1 result in the clock and interconnects
+ * being enabled, and transitions of the count from 1 to 0 cause them to be
+ * disabled. We currently operate the core clock at a fixed clock rate, and
+ * all buses at a fixed average and peak bandwidth. As more advanced IPA
+ * features are enabled, we can make better use of clock and bus scaling.
+ *
+ * An IPA clock reference must be held for any access to IPA hardware.
+ */
+
+#define IPA_CORE_CLOCK_RATE (75UL * 1000 * 1000) /* Hz */
+
+/* Interconnect path bandwidths (each times 1000 bytes per second) */
+#define IPA_MEMORY_AVG (80 * 1000) /* 80 MBps */
+#define IPA_MEMORY_PEAK (600 * 1000)
+
+#define IPA_IMEM_AVG (80 * 1000)
+#define IPA_IMEM_PEAK (350 * 1000)
+
+#define IPA_CONFIG_AVG (40 * 1000)
+#define IPA_CONFIG_PEAK (40 * 1000)
+
+/**
+ * struct ipa_clock - IPA clocking information
+ * @count: Clocking reference count
+ * @mutex; Protects clock enable/disable
+ * @core: IPA core clock
+ * @memory_path: Memory interconnect
+ * @imem_path: Internal memory interconnect
+ * @config_path: Configuration space interconnect
+ */
+struct ipa_clock {
+ atomic_t count;
+ struct mutex mutex; /* protects clock enable/disable */
+ struct clk *core;
+ struct icc_path *memory_path;
+ struct icc_path *imem_path;
+ struct icc_path *config_path;
+};
+
+static struct icc_path *
+ipa_interconnect_init_one(struct device *dev, const char *name)
+{
+ struct icc_path *path;
+
+ path = of_icc_get(dev, name);
+ if (IS_ERR(path))
+ dev_err(dev, "error %ld getting memory interconnect\n",
+ PTR_ERR(path));
+
+ return path;
+}
+
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+{
+ struct icc_path *path;
+
+ path = ipa_interconnect_init_one(dev, "memory");
+ if (IS_ERR(path))
+ goto err_return;
+ clock->memory_path = path;
+
+ path = ipa_interconnect_init_one(dev, "imem");
+ if (IS_ERR(path))
+ goto err_memory_path_put;
+ clock->imem_path = path;
+
+ path = ipa_interconnect_init_one(dev, "config");
+ if (IS_ERR(path))
+ goto err_imem_path_put;
+ clock->config_path = path;
+
+ return 0;
+
+err_imem_path_put:
+ icc_put(clock->imem_path);
+err_memory_path_put:
+ icc_put(clock->memory_path);
+err_return:
+ return PTR_ERR(path);
+}
+
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_clock *clock)
+{
+ icc_put(clock->config_path);
+ icc_put(clock->imem_path);
+ icc_put(clock->memory_path);
+}
+
+/* Currently we only use one bandwidth level, so just "enable" interconnects */
+static int ipa_interconnect_enable(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+ if (ret)
+ return ret;
+
+ ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+ if (ret)
+ goto err_memory_path_disable;
+
+ ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK);
+ if (ret)
+ goto err_imem_path_disable;
+
+ return 0;
+
+err_imem_path_disable:
+ (void)icc_set_bw(clock->imem_path, 0, 0);
+err_memory_path_disable:
+ (void)icc_set_bw(clock->memory_path, 0, 0);
+
+ return ret;
+}
+
+/* To disable an interconnect, we just its bandwidth to 0 */
+static int ipa_interconnect_disable(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ ret = icc_set_bw(clock->memory_path, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = icc_set_bw(clock->imem_path, 0, 0);
+ if (ret)
+ goto err_memory_path_reenable;
+
+ ret = icc_set_bw(clock->config_path, 0, 0);
+ if (ret)
+ goto err_imem_path_reenable;
+
+ return 0;
+
+err_imem_path_reenable:
+ (void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+err_memory_path_reenable:
+ (void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+
+ return ret;
+}
+
+/* Turn on IPA clocks, including interconnects */
+static int ipa_clock_enable(struct ipa *ipa)
+{
+ int ret;
+
+ ret = ipa_interconnect_enable(ipa);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ipa->clock->core);
+ if (ret)
+ ipa_interconnect_disable(ipa);
+
+ return ret;
+}
+
+/* Inverse of ipa_clock_enable() */
+static void ipa_clock_disable(struct ipa *ipa)
+{
+ clk_disable_unprepare(ipa->clock->core);
+ (void)ipa_interconnect_disable(ipa);
+}
+
+/* Get an IPA clock reference, but only if the reference count is
+ * already non-zero. Returns true if the additional reference was
+ * added successfully, or false otherwise.
+ */
+bool ipa_clock_get_additional(struct ipa *ipa)
+{
+ return !!atomic_inc_not_zero(&ipa->clock->count);
+}
+
+/* Get an IPA clock reference. If the reference count is non-zero, it is
+ * incremented and return is immediate. Otherwise it is checked again
+ * under protection of the mutex, and if appropriate the clock (and
+ * interconnects) are enabled suspended endpoints (if any) are resumed
+ * before returning.
+ *
+ * Incrementing the reference count is intentionally deferred until
+ * after the clock is running and endpoints are resumed.
+ */
+void ipa_clock_get(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ /* If the clock is running, just bump the reference count */
+ if (ipa_clock_get_additional(ipa))
+ return;
+
+ /* Otherwise get the mutex and check again */
+ mutex_lock(&clock->mutex);
+
+ /* A reference might have been added before we got the mutex. */
+ if (ipa_clock_get_additional(ipa))
+ goto out_mutex_unlock;
+
+ ret = ipa_clock_enable(ipa);
+ if (ret) {
+ dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
+ goto out_mutex_unlock;
+ }
+
+ ipa_endpoint_resume(ipa);
+
+ atomic_inc(&clock->count);
+
+out_mutex_unlock:
+ mutex_unlock(&clock->mutex);
+}
+
+/* Attempt to remove an IPA clock reference. If this represents the last
+ * reference, suspend endpoints and disable the clock (and interconnects)
+ * under protection of a mutex.
+ */
+void ipa_clock_put(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+
+ /* If this is not the last reference there's nothing more to do */
+ if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex))
+ return;
+
+ ipa_endpoint_suspend(ipa);
+
+ ipa_clock_disable(ipa);
+
+ mutex_unlock(&clock->mutex);
+}
+
+/* Initialize IPA clocking */
+struct ipa_clock *ipa_clock_init(struct device *dev)
+{
+ struct ipa_clock *clock;
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(dev, "core");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "error %ld getting core clock\n", PTR_ERR(clk));
+ return ERR_CAST(clk);
+ }
+
+ ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE);
+ if (ret) {
+ dev_err(dev, "error %d setting core clock rate to %lu\n",
+ ret, IPA_CORE_CLOCK_RATE);
+ goto err_clk_put;
+ }
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ ret = -ENOMEM;
+ goto err_clk_put;
+ }
+ clock->core = clk;
+
+ ret = ipa_interconnect_init(clock, dev);
+ if (ret)
+ goto err_kfree;
+
+ mutex_init(&clock->mutex);
+ atomic_set(&clock->count, 0);
+
+ return clock;
+
+err_kfree:
+ kfree(clock);
+err_clk_put:
+ clk_put(clk);
+
+ return ERR_PTR(ret);
+}
+
+/* Inverse of ipa_clock_init() */
+void ipa_clock_exit(struct ipa_clock *clock)
+{
+ struct clk *clk = clock->core;
+
+ WARN_ON(atomic_read(&clock->count) != 0);
+ mutex_destroy(&clock->mutex);
+ ipa_interconnect_exit(clock);
+ kfree(clock);
+ clk_put(clk);
+}
diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h
new file mode 100644
index 000000000000..bc52b35e6bb2
--- /dev/null
+++ b/drivers/net/ipa/ipa_clock.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_CLOCK_H_
+#define _IPA_CLOCK_H_
+
+struct device;
+
+struct ipa;
+
+/**
+ * ipa_clock_init() - Initialize IPA clocking
+ * @dev: IPA device
+ *
+ * @Return: A pointer to an ipa_clock structure, or a pointer-coded error
+ */
+struct ipa_clock *ipa_clock_init(struct device *dev);
+
+/**
+ * ipa_clock_exit() - Inverse of ipa_clock_init()
+ * @clock: IPA clock pointer
+ */
+void ipa_clock_exit(struct ipa_clock *clock);
+
+/**
+ * ipa_clock_get() - Get an IPA clock reference
+ * @ipa: IPA pointer
+ *
+ * This call blocks if this is the first reference.
+ */
+void ipa_clock_get(struct ipa *ipa);
+
+/**
+ * ipa_clock_get_additional() - Get an IPA clock reference if not first
+ * @ipa: IPA pointer
+ *
+ * This returns immediately, and only takes a reference if not the first
+ */
+bool ipa_clock_get_additional(struct ipa *ipa);
+
+/**
+ * ipa_clock_put() - Drop an IPA clock reference
+ * @ipa: IPA pointer
+ *
+ * This drops a clock reference. If the last reference is being dropped,
+ * the clock is stopped and RX endpoints are suspended. This call will
+ * not block unless the last reference is dropped.
+ */
+void ipa_clock_put(struct ipa *ipa);
+
+#endif /* _IPA_CLOCK_H_ */
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
new file mode 100644
index 000000000000..cee417181f98
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA Immediate Commands
+ *
+ * The AP command TX endpoint is used to issue immediate commands to the IPA.
+ * An immediate command is generally used to request the IPA do something
+ * other than data transfer to another endpoint.
+ *
+ * Immediate commands are represented by GSI transactions just like other
+ * transfer requests, represented by a single GSI TRE. Each immediate
+ * command has a well-defined format, having a payload of a known length.
+ * This allows the transfer element's length field to be used to hold an
+ * immediate command's opcode. The payload for a command resides in DRAM
+ * and is described by a single scatterlist entry in its transaction.
+ * Commands do not require a transaction completion callback. To commit
+ * an immediate command transaction, either gsi_trans_commit_wait() or
+ * gsi_trans_commit_wait_timeout() is used.
+ */
+
+/* Some commands can wait until indicated pipeline stages are clear */
+enum pipeline_clear_options {
+ pipeline_clear_hps = 0,
+ pipeline_clear_src_grp = 1,
+ pipeline_clear_full = 2,
+};
+
+/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
+
+struct ipa_cmd_hw_ip_fltrt_init {
+ __le64 hash_rules_addr;
+ __le64 flags;
+ __le64 nhash_rules_addr;
+};
+
+/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
+#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
+#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
+#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
+#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
+
+/* IPA_CMD_HDR_INIT_LOCAL */
+
+struct ipa_cmd_hw_hdr_init_local {
+ __le64 hdr_table_addr;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
+#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
+#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
+
+/* IPA_CMD_REGISTER_WRITE */
+
+/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
+
+#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_register_write {
+ __le16 flags; /* Unused/reserved for IPA v3.5.1 */
+ __le16 offset;
+ __le32 value;
+ __le32 value_mask;
+ __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
+};
+
+/* Field masks for ipa_cmd_register_write structure fields */
+/* The next field is present for IPA v4.0 and above */
+#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
+/* The next field is present for IPA v3.5.1 only */
+#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
+
+/* The next field and its values are present for IPA v3.5.1 only */
+#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
+
+/* IPA_CMD_IP_PACKET_INIT */
+
+struct ipa_cmd_ip_packet_init {
+ u8 dest_endpoint;
+ u8 reserved[7];
+};
+
+/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
+#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
+
+/* IPA_CMD_DMA_TASK_32B_ADDR */
+
+/* This opcode gets modified with a DMA operation count */
+
+#define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8)
+
+struct ipa_cmd_hw_dma_task_32b_addr {
+ __le16 flags;
+ __le16 size;
+ __le32 addr;
+ __le16 packet_size;
+ u8 reserved[6];
+};
+
+/* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */
+#define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0)
+#define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11)
+#define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12)
+#define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13)
+#define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14)
+#define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15)
+
+/* IPA_CMD_DMA_SHARED_MEM */
+
+/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
+
+#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_hw_dma_mem_mem {
+ __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
+ __le16 size;
+ __le16 local_addr;
+ __le16 flags;
+ __le64 system_addr;
+};
+
+/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
+#define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
+
+/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
+#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
+/* The next two fields are present for IPA v3.5.1 only. */
+#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
+#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
+
+/* IPA_CMD_IP_PACKET_TAG_STATUS */
+
+struct ipa_cmd_ip_packet_tag_status {
+ __le64 tag;
+};
+
+#define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
+
+/* Immediate command payload */
+union ipa_cmd_payload {
+ struct ipa_cmd_hw_ip_fltrt_init table_init;
+ struct ipa_cmd_hw_hdr_init_local hdr_init_local;
+ struct ipa_cmd_register_write register_write;
+ struct ipa_cmd_ip_packet_init ip_packet_init;
+ struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr;
+ struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
+ struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
+};
+
+static void ipa_cmd_validate_build(void)
+{
+ /* The sizes of a filter and route tables need to fit into fields
+ * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
+ * might not be used, non-hashed and hashed tables have the same
+ * maximum size. IPv4 and IPv6 filter tables have the same number
+ * of entries, as and IPv4 and IPv6 route tables have the same number
+ * of entries.
+ */
+#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
+#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
+#undef TABLE_COUNT_MAX
+#undef TABLE_SIZE
+}
+
+#ifdef IPA_VALIDATE
+
+/* Validate a memory region holding a table */
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route, bool ipv6, bool hashed)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+
+ offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
+ : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ if (mem->offset > offset_max ||
+ ipa->mem_offset > offset_max - mem->offset) {
+ dev_err(dev, "IPv%c %s%s table region offset too large "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter",
+ ipa->mem_offset, mem->offset, offset_max);
+ return false;
+ }
+
+ if (mem->offset > ipa->mem_size ||
+ mem->size > ipa->mem_size - mem->offset) {
+ dev_err(dev, "IPv%c %s%s table region out of range "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter",
+ mem->offset, mem->size, ipa->mem_size);
+ return false;
+ }
+
+ return true;
+}
+
+/* Validate the memory region that holds headers */
+static bool ipa_cmd_header_valid(struct ipa *ipa)
+{
+ const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+ u32 size_max;
+ u32 size;
+
+ offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ if (mem->offset > offset_max ||
+ ipa->mem_offset > offset_max - mem->offset) {
+ dev_err(dev, "header table region offset too large "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset + mem->offset, offset_max);
+ return false;
+ }
+
+ size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
+ size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) {
+ dev_err(dev, "header table region out of range "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ mem->offset, size, ipa->mem_size);
+ return false;
+ }
+
+ return true;
+}
+
+/* Indicate whether an offset can be used with a register_write command */
+static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
+ const char *name, u32 offset)
+{
+ struct ipa_cmd_register_write *payload;
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+ u32 bit_count;
+
+ /* The maximum offset in a register_write immediate command depends
+ * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but
+ * newer versions allow some additional high-order bits.
+ */
+ bit_count = BITS_PER_BYTE * sizeof(payload->offset);
+ if (ipa->version != IPA_VERSION_3_5_1)
+ bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ BUILD_BUG_ON(bit_count > 32);
+ offset_max = ~0 >> (32 - bit_count);
+
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
+ dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset + offset, offset_max);
+ return false;
+ }
+
+ return true;
+}
+
+/* Check whether offsets passed to register_write are valid */
+static bool ipa_cmd_register_write_valid(struct ipa *ipa)
+{
+ const char *name;
+ u32 offset;
+
+ offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ name = "filter/route hash flush";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT);
+ name = "maximal endpoint status";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+
+ return true;
+}
+
+bool ipa_cmd_data_valid(struct ipa *ipa)
+{
+ if (!ipa_cmd_header_valid(ipa))
+ return false;
+
+ if (!ipa_cmd_register_write_valid(ipa))
+ return false;
+
+ return true;
+}
+
+#endif /* IPA_VALIDATE */
+
+int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+ int ret;
+
+ /* This is as good a place as any to validate build constants */
+ ipa_cmd_validate_build();
+
+ /* Even though command payloads are allocated one at a time,
+ * a single transaction can require up to tlv_count of them,
+ * so we treat them as if that many can be allocated at once.
+ */
+ ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
+ sizeof(union ipa_cmd_payload),
+ tre_max, channel->tlv_count);
+ if (ret)
+ return ret;
+
+ /* Each TRE needs a command info structure */
+ ret = gsi_trans_pool_init(&trans_info->info_pool,
+ sizeof(struct ipa_cmd_info),
+ tre_max, channel->tlv_count);
+ if (ret)
+ gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
+
+ return ret;
+}
+
+void ipa_cmd_pool_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+
+ gsi_trans_pool_exit(&trans_info->info_pool);
+ gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
+}
+
+static union ipa_cmd_payload *
+ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
+{
+ struct gsi_trans_info *trans_info;
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
+
+ return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
+}
+
+/* If hash_size is 0, hash_offset and hash_addr ignored. */
+void ipa_cmd_table_init_add(struct gsi_trans *trans,
+ enum ipa_cmd_opcode opcode, u16 size, u32 offset,
+ dma_addr_t addr, u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_hw_ip_fltrt_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u64 val;
+
+ /* Record the non-hash table offset and size */
+ offset += ipa->mem_offset;
+ val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
+
+ /* The hash table offset and address are zero if its size is 0 */
+ if (hash_size) {
+ /* Record the hash table offset and size */
+ hash_offset += ipa->mem_offset;
+ val |= u64_encode_bits(hash_offset,
+ IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
+ val |= u64_encode_bits(hash_size,
+ IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->table_init;
+
+ /* Fill in all offsets and sizes and the non-hash table address */
+ if (hash_size)
+ payload->hash_rules_addr = cpu_to_le64(hash_addr);
+ payload->flags = cpu_to_le64(val);
+ payload->nhash_rules_addr = cpu_to_le64(addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Initialize header space in IPA-local memory */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_hw_hdr_init_local *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u32 flags;
+
+ offset += ipa->mem_offset;
+
+ /* With this command we tell the IPA where in its local memory the
+ * header tables reside. The content of the buffer provided is
+ * also written via DMA into that space. The IPA hardware owns
+ * the table, but the AP must initialize it.
+ */
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->hdr_init_local;
+
+ payload->hdr_table_addr = cpu_to_le64(addr);
+ flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ payload->flags = cpu_to_le32(flags);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct ipa_cmd_register_write *payload;
+ union ipa_cmd_payload *cmd_payload;
+ u32 opcode = IPA_CMD_REGISTER_WRITE;
+ dma_addr_t payload_addr;
+ u32 clear_option;
+ u32 options;
+ u16 flags;
+
+ /* pipeline_clear_src_grp is not used */
+ clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ u16 offset_high;
+ u32 val;
+
+ /* Opcode encodes pipeline clear options */
+ /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
+ val = u16_encode_bits(clear_option,
+ REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
+ opcode |= val;
+
+ /* Extract the high 4 bits from the offset */
+ offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
+ offset &= (1 << 16) - 1;
+
+ /* Extract the top 4 bits and encode it into the flags field */
+ flags = u16_encode_bits(offset_high,
+ REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ options = 0; /* reserved */
+
+ } else {
+ flags = 0; /* SKIP_CLEAR flag is always 0 */
+ options = u16_encode_bits(clear_option,
+ REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->register_write;
+
+ payload->flags = cpu_to_le16(flags);
+ payload->offset = cpu_to_le16((u16)offset);
+ payload->value = cpu_to_le32(value);
+ payload->value_mask = cpu_to_le32(mask);
+ payload->clear_options = cpu_to_le32(options);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ DMA_NONE, opcode);
+}
+
+/* Skip IP packet processing on the next data transfer on a TX channel */
+static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_ip_packet_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ /* assert(endpoint_id <
+ field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_init;
+
+ payload->dest_endpoint = u8_encode_bits(endpoint_id,
+ IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Use a 32-bit DMA command to zero a block of memory */
+void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
+ dma_addr_t addr, bool toward_ipa)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR;
+ struct ipa_cmd_hw_dma_task_32b_addr *payload;
+ union ipa_cmd_payload *cmd_payload;
+ enum dma_data_direction direction;
+ dma_addr_t payload_addr;
+ u16 flags;
+
+ /* assert(addr <= U32_MAX); */
+ addr &= GENMASK_ULL(31, 0);
+
+ /* The opcode encodes the number of DMA operations in the high byte */
+ opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK);
+
+ direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ /* complete: 0 = don't interrupt; eof: 0 = don't assert eot */
+ flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK;
+ /* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->dma_task_32b_addr;
+
+ payload->flags = cpu_to_le16(flags);
+ payload->size = cpu_to_le16(size);
+ payload->addr = cpu_to_le32((u32)addr);
+ payload->packet_size = cpu_to_le16(size);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Use a DMA command to read or write a block of IPA-resident memory */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr, bool toward_ipa)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
+ struct ipa_cmd_hw_dma_mem_mem *payload;
+ union ipa_cmd_payload *cmd_payload;
+ enum dma_data_direction direction;
+ dma_addr_t payload_addr;
+ u16 flags;
+
+ /* size and offset must fit in 16 bit fields */
+ /* assert(size > 0 && size <= U16_MAX); */
+ /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
+
+ offset += ipa->mem_offset;
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->dma_shared_mem;
+
+ /* payload->clear_after_read was reserved prior to IPA v4.0. It's
+ * never needed for current code, so it's 0 regardless of version.
+ */
+ payload->size = cpu_to_le16(size);
+ payload->local_addr = cpu_to_le16(offset);
+ /* payload->flags:
+ * direction: 0 = write to IPA, 1 read from IPA
+ * Starting at v4.0 these are reserved; either way, all zero:
+ * pipeline clear: 0 = wait for pipeline clear (don't skip)
+ * clear_options: 0 = pipeline_clear_hps
+ * Instead, for v4.0+ these are encoded in the opcode. But again
+ * since both values are 0 we won't bother OR'ing them in.
+ */
+ flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
+ payload->flags = cpu_to_le16(flags);
+ payload->system_addr = cpu_to_le64(addr);
+
+ direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_ip_packet_tag_status *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_tag_status;
+
+ payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Issue a small command TX data transfer */
+static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ union ipa_cmd_payload *payload;
+ dma_addr_t payload_addr;
+
+ /* assert(size <= sizeof(*payload)); */
+
+ /* Just transfer a zero-filled payload structure */
+ payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+void ipa_cmd_tag_process_add(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+
+ ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+ ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
+ ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
+ ipa_cmd_transfer_add(trans, 4);
+}
+
+/* Returns the number of commands required for the tag process */
+u32 ipa_cmd_tag_process_count(void)
+{
+ return 4;
+}
+
+static struct ipa_cmd_info *
+ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
+{
+ struct gsi_channel *channel;
+
+ channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
+
+ return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
+}
+
+/* Allocate a transaction for the command TX endpoint */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
+{
+ struct ipa_endpoint *endpoint;
+ struct gsi_trans *trans;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+
+ trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
+ tre_count, DMA_NONE);
+ if (trans)
+ trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
+
+ return trans;
+}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
new file mode 100644
index 000000000000..4917525b3a47
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_CMD_H_
+#define _IPA_CMD_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+struct sk_buff;
+struct scatterlist;
+
+struct ipa;
+struct ipa_mem;
+struct gsi_trans;
+struct gsi_channel;
+
+/**
+ * enum ipa_cmd_opcode: IPA immediate commands
+ *
+ * All immediate commands are issued using the AP command TX endpoint.
+ * The numeric values here are the opcodes for IPA v3.5.1 hardware.
+ *
+ * IPA_CMD_NONE is a special (invalid) value that's used to indicate
+ * a request is *not* an immediate command.
+ */
+enum ipa_cmd_opcode {
+ IPA_CMD_NONE = 0,
+ IPA_CMD_IP_V4_FILTER_INIT = 3,
+ IPA_CMD_IP_V6_FILTER_INIT = 4,
+ IPA_CMD_IP_V4_ROUTING_INIT = 7,
+ IPA_CMD_IP_V6_ROUTING_INIT = 8,
+ IPA_CMD_HDR_INIT_LOCAL = 9,
+ IPA_CMD_REGISTER_WRITE = 12,
+ IPA_CMD_IP_PACKET_INIT = 16,
+ IPA_CMD_DMA_TASK_32B_ADDR = 17,
+ IPA_CMD_DMA_SHARED_MEM = 19,
+ IPA_CMD_IP_PACKET_TAG_STATUS = 20,
+};
+
+/**
+ * struct ipa_cmd_info - information needed for an IPA immediate command
+ *
+ * @opcode: The command opcode.
+ * @direction: Direction of data transfer for DMA commands
+ */
+struct ipa_cmd_info {
+ enum ipa_cmd_opcode opcode;
+ enum dma_data_direction direction;
+};
+
+
+#ifdef IPA_VALIDATE
+
+/**
+ * ipa_cmd_table_valid() - Validate a memory region holding a table
+ * @ipa: - IPA pointer
+ * @mem: - IPA memory region descriptor
+ * @route: - Whether the region holds a route or filter table
+ * @ipv6: - Whether the table is for IPv6 or IPv4
+ * @hashed: - Whether the table is hashed or non-hashed
+ *
+ * @Return: true if region is valid, false otherwise
+ */
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route, bool ipv6, bool hashed);
+
+/**
+ * ipa_cmd_data_valid() - Validate command-realted configuration is valid
+ * @ipa: - IPA pointer
+ *
+ * @Return: true if assumptions required for command are valid
+ */
+bool ipa_cmd_data_valid(struct ipa *ipa);
+
+#else /* !IPA_VALIDATE */
+
+static inline bool ipa_cmd_table_valid(struct ipa *ipa,
+ const struct ipa_mem *mem, bool route,
+ bool ipv6, bool hashed)
+{
+ return true;
+}
+
+static inline bool ipa_cmd_data_valid(struct ipa *ipa)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/**
+ * ipa_cmd_pool_init() - initialize command channel pools
+ * @channel: AP->IPA command TX GSI channel pointer
+ * @tre_count: Number of pool elements to allocate
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count);
+
+/**
+ * ipa_cmd_pool_exit() - Inverse of ipa_cmd_pool_init()
+ * @channel: AP->IPA command TX GSI channel pointer
+ */
+void ipa_cmd_pool_exit(struct gsi_channel *channel);
+
+/**
+ * ipa_cmd_table_init_add() - Add table init command to a transaction
+ * @trans: GSI transaction
+ * @opcode: IPA immediate command opcode
+ * @size: Size of non-hashed routing table memory
+ * @offset: Offset in IPA shared memory of non-hashed routing table memory
+ * @addr: DMA address of non-hashed table data to write
+ * @hash_size: Size of hashed routing table memory
+ * @hash_offset: Offset in IPA shared memory of hashed routing table memory
+ * @hash_addr: DMA address of hashed table data to write
+ *
+ * If hash_size is 0, hash_offset and hash_addr are ignored.
+ */
+void ipa_cmd_table_init_add(struct gsi_trans *trans, enum ipa_cmd_opcode opcode,
+ u16 size, u32 offset, dma_addr_t addr,
+ u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr);
+
+/**
+ * ipa_cmd_hdr_init_local_add() - Add a header init command to a transaction
+ * @ipa: IPA structure
+ * @offset: Offset of header memory in IPA local space
+ * @size: Size of header memory
+ * @addr: DMA address of buffer to be written from
+ *
+ * Defines and fills the location in IPA memory to use for headers.
+ */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr);
+
+/**
+ * ipa_cmd_register_write_add() - Add a register write command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of register to be written
+ * @value: Value to be written
+ * @mask: Mask of bits in register to update with bits from value
+ * @clear_full: Pipeline clear option; true means full pipeline clear
+ */
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full);
+
+/**
+ * ipa_cmd_dma_task_32b_addr_add() - Add a 32-bit DMA command to a transaction
+ * @trans: GSi transaction
+ * @size: Number of bytes to be memory to be transferred
+ * @addr: DMA address of buffer to be read into or written from
+ * @toward_ipa: true means write to IPA memory; false means read
+ */
+void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
+ dma_addr_t addr, bool toward_ipa);
+
+/**
+ * ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of IPA memory to be read or written
+ * @size: Number of bytes of memory to be transferred
+ * @addr: DMA address of buffer to be read into or written from
+ * @toward_ipa: true means write to IPA memory; false means read
+ */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
+ u16 size, dma_addr_t addr, bool toward_ipa);
+
+/**
+ * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction
+ * @trans: GSI transaction
+ */
+void ipa_cmd_tag_process_add(struct gsi_trans *trans);
+
+/**
+ * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
+ *
+ * @Return: The number of elements to allocate in a transaction
+ * to hold tag process commands
+ */
+u32 ipa_cmd_tag_process_count(void);
+
+/**
+ * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
+ * @ipa: IPA pointer
+ * @tre_count: Number of elements in the transaction
+ *
+ * @Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
+
+#endif /* _IPA_CMD_H_ */
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
new file mode 100644
index 000000000000..042b5fc3c135
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2019-2020 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/* Endpoint configuration for the SC7180 SoC. */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_DMA_ONLY,
+ .config = {
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 0,
+ .endpoint_id = 1,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .seq_type =
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 1,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ },
+ [IPA_ENDPOINT_MODEM_LAN_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 3,
+ .endpoint_id = 13,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 4,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ },
+};
+
+/* For the SC7180, resource groups are allocated this way:
+ * group 0: UL_DL
+ */
+static const struct ipa_resource_src ipa_resource_src[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ .limits[0] = {
+ .min = 3,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ .limits[0] = {
+ .min = 3,
+ .max = 3,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ .limits[0] = {
+ .min = 10,
+ .max = 10,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ .limits[0] = {
+ .min = 1,
+ .max = 1,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+ .limits[0] = {
+ .min = 5,
+ .max = 5,
+ },
+ },
+};
+
+static const struct ipa_resource_dst ipa_resource_dst[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ .limits[0] = {
+ .min = 3,
+ .max = 3,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ .limits[0] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+};
+
+/* Resource configuration for the SC7180 SoC. */
+static const struct ipa_resource_data ipa_resource_data = {
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region configuration for the SC7180 SoC. */
+static const struct ipa_mem ipa_mem_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0290,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0310,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0318,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0398,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x03a0,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0420,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0428,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x04a8,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x05e8,
+ .size = 0x0000,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x05f0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x07f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_PDN_CONFIG] = {
+ .offset = 0x09f8,
+ .size = 0x0050,
+ .canary_count = 2,
+ },
+ [IPA_MEM_STATS_QUOTA] = {
+ .offset = 0x0a50,
+ .size = 0x0060,
+ .canary_count = 2,
+ },
+ [IPA_MEM_STATS_TETHERING] = {
+ .offset = 0x0ab0,
+ .size = 0x0140,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_DROP] = {
+ .offset = 0x0bf0,
+ .size = 0,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x0bf0,
+ .size = 0x140c,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x2000,
+ .size = 0,
+ .canary_count = 1,
+ },
+};
+
+/* Configuration data for the SC7180 SoC. */
+const struct ipa_data ipa_data_sc7180 = {
+ .version = IPA_VERSION_4_2,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_count = ARRAY_SIZE(ipa_mem_data),
+ .mem_data = ipa_mem_data,
+};
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
new file mode 100644
index 000000000000..0d9c36e1e806
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/* Endpoint configuration for the SDM845 SoC. */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 4,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_DMA_ONLY,
+ .config = {
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .seq_type =
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ .delay = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 1,
+ .endpoint_id = 4,
+ .toward_ipa = true,
+ },
+ [IPA_ENDPOINT_MODEM_LAN_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 3,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_LAN_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 3,
+ .endpoint_id = 13,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 4,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 12,
+ .toward_ipa = false,
+ },
+};
+
+/* For the SDM845, resource groups are allocated this way:
+ * group 0: LWA_DL
+ * group 1: UL_DL
+ */
+static const struct ipa_resource_src ipa_resource_src[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ .limits[0] = {
+ .min = 1,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ .limits[0] = {
+ .min = 10,
+ .max = 10,
+ },
+ .limits[1] = {
+ .min = 10,
+ .max = 10,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ .limits[0] = {
+ .min = 12,
+ .max = 12,
+ },
+ .limits[1] = {
+ .min = 14,
+ .max = 14,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ .limits[0] = {
+ .min = 0,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 0,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+ .limits[0] = {
+ .min = 14,
+ .max = 14,
+ },
+ .limits[1] = {
+ .min = 20,
+ .max = 20,
+ },
+ },
+};
+
+static const struct ipa_resource_dst ipa_resource_dst[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ .limits[0] = {
+ .min = 4,
+ .max = 4,
+ },
+ .limits[1] = {
+ .min = 4,
+ .max = 4,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ .limits[0] = {
+ .min = 2,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+};
+
+/* Resource configuration for the SDM845 SoC. */
+static const struct ipa_resource_data ipa_resource_data = {
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region configuration for the SDM845 SoC. */
+static const struct ipa_mem ipa_mem_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x0688,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x07c8,
+ .size = 0x0000,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x07d0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x09d0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x0bd8,
+ .size = 0x1024,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x1c00,
+ .size = 0x0400,
+ .canary_count = 1,
+ },
+};
+
+/* Configuration data for the SDM845 SoC. */
+const struct ipa_data ipa_data_sdm845 = {
+ .version = IPA_VERSION_3_5_1,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_count = ARRAY_SIZE(ipa_mem_data),
+ .mem_data = ipa_mem_data,
+};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
new file mode 100644
index 000000000000..7110de2de817
--- /dev/null
+++ b/drivers/net/ipa/ipa_data.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_DATA_H_
+#define _IPA_DATA_H_
+
+#include <linux/types.h>
+
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA/GSI Configuration Data
+ *
+ * Boot-time configuration data is used to define the configuration of the
+ * IPA and GSI resources to use for a given platform. This data is supplied
+ * via the Device Tree match table, associated with a particular compatible
+ * string. The data defines information about resources, endpoints, and
+ * channels.
+ *
+ * Resources are data structures used internally by the IPA hardware. The
+ * configuration data defines the number (or limits of the number) of various
+ * types of these resources.
+ *
+ * Endpoint configuration data defines properties of both IPA endpoints and
+ * GSI channels. A channel is a GSI construct, and represents a single
+ * communication path between the IPA and a particular execution environment
+ * (EE), such as the AP or Modem. Each EE has a set of channels associated
+ * with it, and each channel has an ID unique for that EE. For the most part
+ * the only GSI channels of concern to this driver belong to the AP
+ *
+ * An endpoint is an IPA construct representing a single channel anywhere
+ * in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
+ * pair. Generally, this driver is concerned with only endpoints associated
+ * with the AP, however this will change when support for routing (etc.) is
+ * added. IPA endpoint and GSI channel configuration data are defined
+ * together, establishing the endpoint_id->(EE, channel_id) mapping.
+ *
+ * Endpoint configuration data consists of three parts: properties that
+ * are common to IPA and GSI (EE ID, channel ID, endpoint ID, and direction);
+ * properties associated with the GSI channel; and properties associated with
+ * the IPA endpoint.
+ */
+
+/* The maximum value returned by ipa_resource_group_count() */
+#define IPA_RESOURCE_GROUP_COUNT 4
+
+/** enum ipa_resource_type_src - source resource types */
+/**
+ * struct gsi_channel_data - GSI channel configuration data
+ * @tre_count: number of TREs in the channel ring
+ * @event_count: number of slots in the associated event ring
+ * @tlv_count: number of entries in channel's TLV FIFO
+ *
+ * A GSI channel is a unidirectional means of transferring data to or
+ * from (and through) the IPA. A GSI channel has a ring buffer made
+ * up of "transfer elements" (TREs) that specify individual data transfers
+ * or IPA immediate commands. TREs are filled by the AP, and control
+ * is passed to IPA hardware by writing the last written element
+ * into a doorbell register.
+ *
+ * When data transfer commands have completed the GSI generates an
+ * event (a structure of data) and optionally signals the AP with
+ * an interrupt. Event structures are implemented by another ring
+ * buffer, directed toward the AP from the IPA.
+ *
+ * The input to a GSI channel is a FIFO of type/length/value (TLV)
+ * elements, and the size of this FIFO limits the number of TREs
+ * that can be included in a single transaction.
+ */
+struct gsi_channel_data {
+ u16 tre_count;
+ u16 event_count;
+ u8 tlv_count;
+};
+
+/**
+ * struct ipa_endpoint_tx_data - configuration data for TX endpoints
+ * @status_endpoint: endpoint to which status elements are sent
+ * @delay: whether endpoint starts in delay mode
+ *
+ * Delay mode prevents a TX endpoint from transmitting anything, even if
+ * commands have been presented to the hardware. Once the endpoint exits
+ * delay mode, queued transfer commands are sent.
+ *
+ * The @status_endpoint is only valid if the endpoint's @status_enable
+ * flag is set.
+ */
+struct ipa_endpoint_tx_data {
+ enum ipa_endpoint_name status_endpoint;
+ bool delay;
+};
+
+/**
+ * struct ipa_endpoint_rx_data - configuration data for RX endpoints
+ * @pad_align: power-of-2 boundary to which packet payload is aligned
+ * @aggr_close_eof: whether aggregation closes on end-of-frame
+ *
+ * With each packet it transfers, the IPA hardware can perform certain
+ * transformations of its packet data. One of these is adding pad bytes
+ * to the end of the packet data so the result ends on a power-of-2 boundary.
+ *
+ * It is also able to aggregate multiple packets into a single receive buffer.
+ * Aggregation is "open" while a buffer is being filled, and "closes" when
+ * certain criteria are met. One of those criteria is the sender indicating
+ * a "frame" consisting of several transfers has ended.
+ */
+struct ipa_endpoint_rx_data {
+ u32 pad_align;
+ bool aggr_close_eof;
+};
+
+/**
+ * struct ipa_endpoint_config_data - IPA endpoint hardware configuration
+ * @checksum: whether checksum offload is enabled
+ * @qmap: whether endpoint uses QMAP protocol
+ * @aggregation: whether endpoint supports aggregation
+ * @status_enable: whether endpoint uses status elements
+ * @dma_mode: whether endpoint operates in DMA mode
+ * @dma_endpoint: peer endpoint, if operating in DMA mode
+ * @tx: TX-specific endpoint information (see above)
+ * @rx: RX-specific endpoint information (see above)
+ */
+struct ipa_endpoint_config_data {
+ bool checksum;
+ bool qmap;
+ bool aggregation;
+ bool status_enable;
+ bool dma_mode;
+ enum ipa_endpoint_name dma_endpoint;
+ union {
+ struct ipa_endpoint_tx_data tx;
+ struct ipa_endpoint_rx_data rx;
+ };
+};
+
+/**
+ * struct ipa_endpoint_data - IPA endpoint configuration data
+ * @filter_support: whether endpoint supports filtering
+ * @seq_type: hardware sequencer type used for endpoint
+ * @config: hardware configuration (see above)
+ *
+ * Not all endpoints support the IPA filtering capability. A filter table
+ * defines the filters to apply for those endpoints that support it. The
+ * AP is responsible for initializing this table, and it must include entries
+ * for non-AP endpoints. For this reason we define *all* endpoints used
+ * in the system, and indicate whether they support filtering.
+ *
+ * The remaining endpoint configuration data applies only to AP endpoints.
+ * The IPA hardware is implemented by sequencers, and the AP must program
+ * the type(s) of these sequencers at initialization time. The remaining
+ * endpoint configuration data is defined above.
+ */
+struct ipa_endpoint_data {
+ bool filter_support;
+ /* The next two are specified only for AP endpoints */
+ enum ipa_seq_type seq_type;
+ struct ipa_endpoint_config_data config;
+};
+
+/**
+ * struct ipa_gsi_endpoint_data - GSI channel/IPA endpoint data
+ * ee: GSI execution environment ID
+ * channel_id: GSI channel ID
+ * endpoint_id: IPA endpoint ID
+ * toward_ipa: direction of data transfer
+ * gsi: GSI channel configuration data (see above)
+ * ipa: IPA endpoint configuration data (see above)
+ */
+struct ipa_gsi_endpoint_data {
+ u8 ee_id; /* enum gsi_ee_id */
+ u8 channel_id;
+ u8 endpoint_id;
+ bool toward_ipa;
+
+ struct gsi_channel_data channel;
+ struct ipa_endpoint_data endpoint;
+};
+
+/** enum ipa_resource_type_src - source resource types */
+enum ipa_resource_type_src {
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+};
+
+/** enum ipa_resource_type_dst - destination resource types */
+enum ipa_resource_type_dst {
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/**
+ * struct ipa_resource_limits - minimum and maximum resource counts
+ * @min: minimum number of resources of a given type
+ * @max: maximum number of resources of a given type
+ */
+struct ipa_resource_limits {
+ u32 min;
+ u32 max;
+};
+
+/**
+ * struct ipa_resource_src - source endpoint group resource usage
+ * @type: source group resource type
+ * @limits: array of limits to use for each resource group
+ */
+struct ipa_resource_src {
+ enum ipa_resource_type_src type;
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+};
+
+/**
+ * struct ipa_resource_dst - destination endpoint group resource usage
+ * @type: destination group resource type
+ * @limits: array of limits to use for each resource group
+ */
+struct ipa_resource_dst {
+ enum ipa_resource_type_dst type;
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+};
+
+/**
+ * struct ipa_resource_data - IPA resource configuration data
+ * @resource_src_count: number of entries in the resource_src array
+ * @resource_src: source endpoint group resources
+ * @resource_dst_count: number of entries in the resource_dst array
+ * @resource_dst: destination endpoint group resources
+ *
+ * In order to manage quality of service between endpoints, certain resources
+ * required for operation are allocated to groups of endpoints. Generally
+ * this information is invisible to the AP, but the AP is responsible for
+ * programming it at initialization time, so we specify it here.
+ */
+struct ipa_resource_data {
+ u32 resource_src_count;
+ const struct ipa_resource_src *resource_src;
+ u32 resource_dst_count;
+ const struct ipa_resource_dst *resource_dst;
+};
+
+/**
+ * struct ipa_mem - IPA-local memory region description
+ * @offset: offset in IPA memory space to base of the region
+ * @size: size in bytes base of the region
+ * @canary_count: number of 32-bit "canary" values that precede region
+ */
+struct ipa_mem_data {
+ u32 offset;
+ u16 size;
+ u16 canary_count;
+};
+
+/**
+ * struct ipa_data - combined IPA/GSI configuration data
+ * @version: IPA hardware version
+ * @endpoint_count: number of entries in endpoint_data array
+ * @endpoint_data: IPA endpoint/GSI channel data
+ * @resource_data: IPA resource configuration data
+ * @mem_count: number of entries in mem_data array
+ * @mem_data: IPA-local shared memory region data
+ */
+struct ipa_data {
+ enum ipa_version version;
+ u32 endpoint_count; /* # entries in endpoint_data[] */
+ const struct ipa_gsi_endpoint_data *endpoint_data;
+ const struct ipa_resource_data *resource_data;
+ u32 mem_count; /* # entries in mem_data[] */
+ const struct ipa_mem *mem_data;
+};
+
+extern const struct ipa_data ipa_data_sdm845;
+extern const struct ipa_data ipa_data_sc7180;
+
+#endif /* _IPA_DATA_H_ */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
new file mode 100644
index 000000000000..a21534f1462f
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -0,0 +1,1703 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/if_rmnet.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_table.h"
+#include "ipa_gsi.h"
+
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+
+#define IPA_REPLENISH_BATCH 16
+
+/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
+#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
+
+/* The amount of RX buffer space consumed by standard skb overhead */
+#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
+
+#define IPA_ENDPOINT_STOP_RX_RETRIES 10
+#define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */
+
+#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
+#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
+
+#define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */
+
+/** enum ipa_status_opcode - status element opcode hardware values */
+enum ipa_status_opcode {
+ IPA_STATUS_OPCODE_PACKET = 0x01,
+ IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
+ IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
+ IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
+ IPA_STATUS_OPCODE_LOG = 0x10,
+ IPA_STATUS_OPCODE_DCMP = 0x20,
+ IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
+};
+
+/** enum ipa_status_exception - status element exception type */
+enum ipa_status_exception {
+ /* 0 means no exception */
+ IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
+ IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
+ IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
+ IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
+ IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
+ /* The meaning of the next value depends on whether the IP version */
+ IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
+ IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
+};
+
+/* Status element provided by hardware */
+struct ipa_status {
+ u8 opcode; /* enum ipa_status_opcode */
+ u8 exception; /* enum ipa_status_exception */
+ __le16 mask;
+ __le16 pkt_len;
+ u8 endp_src_idx;
+ u8 endp_dst_idx;
+ __le32 metadata;
+ __le32 flags1;
+ __le64 flags2;
+ __le32 flags3;
+ __le32 flags4;
+};
+
+/* Field masks for struct ipa_status structure fields */
+
+#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
+
+#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
+
+#define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
+#define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
+#define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
+#define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
+#define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
+#define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
+#define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
+#define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
+#define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
+#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
+
+#define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
+#define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
+#define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
+#define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
+
+#define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
+#define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
+
+#define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
+#define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
+#define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
+#define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
+#define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
+
+#ifdef IPA_VALIDATE
+
+static void ipa_endpoint_validate_build(void)
+{
+ /* The aggregation byte limit defines the point at which an
+ * aggregation window will close. It is programmed into the
+ * IPA hardware as a number of KB. We don't use "hard byte
+ * limit" aggregation, which means that we need to supply
+ * enough space in a receive buffer to hold a complete MTU
+ * plus normal skb overhead *after* that aggregation byte
+ * limit has been crossed.
+ *
+ * This check just ensures we don't define a receive buffer
+ * size that would exceed what we can represent in the field
+ * that is used to program its size.
+ */
+ BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
+ field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
+ IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
+
+ /* I honestly don't know where this requirement comes from. But
+ * it holds, and if we someday need to loosen the constraint we
+ * can try to track it down.
+ */
+ BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
+}
+
+static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *all_data,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *other_data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name other_name;
+
+ if (ipa_gsi_endpoint_data_empty(data))
+ return true;
+
+ if (!data->toward_ipa) {
+ if (data->endpoint.filter_support) {
+ dev_err(dev, "filtering not supported for "
+ "RX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ return true; /* Nothing more to check for RX */
+ }
+
+ if (data->endpoint.config.status_enable) {
+ other_name = data->endpoint.config.tx.status_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "status endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* Status endpoint must be defined... */
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* ...and has to be an RX endpoint... */
+ if (other_data->toward_ipa) {
+ dev_err(dev,
+ "status endpoint for endpoint %u not RX\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ /* ...and if it's to be an AP endpoint... */
+ if (other_data->ee_id == GSI_EE_AP) {
+ /* ...make sure it has status enabled. */
+ if (!other_data->endpoint.config.status_enable) {
+ dev_err(dev,
+ "status not enabled for endpoint %u\n",
+ other_data->endpoint_id);
+ return false;
+ }
+ }
+ }
+
+ if (data->endpoint.config.dma_mode) {
+ other_name = data->endpoint.config.dma_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "DMA endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *dp = data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name name;
+
+ ipa_endpoint_validate_build();
+
+ if (count > IPA_ENDPOINT_COUNT) {
+ dev_err(dev, "too many endpoints specified (%u > %u)\n",
+ count, IPA_ENDPOINT_COUNT);
+ return false;
+ }
+
+ /* Make sure needed endpoints have defined data */
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
+ dev_err(dev, "command TX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
+ dev_err(dev, "LAN RX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
+ dev_err(dev, "AP->modem TX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
+ dev_err(dev, "AP<-modem RX endpoint not defined\n");
+ return false;
+ }
+
+ for (name = 0; name < count; name++, dp++)
+ if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
+ return false;
+
+ return true;
+}
+
+#else /* !IPA_VALIDATE */
+
+static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/* Allocate a transaction to use on a non-command endpoint */
+static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
+ u32 tre_count)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+ enum dma_data_direction direction;
+
+ direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
+}
+
+/* suspend_delay represents suspend for RX, delay for TX endpoints.
+ * Note that suspend is not supported starting with IPA v4.0.
+ */
+static int
+ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
+{
+ u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 mask;
+ u32 val;
+
+ /* assert(ipa->version == IPA_VERSION_3_5_1 */
+ mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
+
+ val = ioread32(ipa->reg_virt + offset);
+ if (suspend_delay == !!(val & mask))
+ return -EALREADY; /* Already set to desired state */
+
+ val ^= mask;
+ iowrite32(val, ipa->reg_virt + offset);
+
+ return 0;
+}
+
+/* Enable or disable delay or suspend mode on all modem endpoints */
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
+{
+ bool support_suspend;
+ u32 endpoint_id;
+
+ /* DELAY mode doesn't work right on IPA v4.2 */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
+ support_suspend = ipa->version == IPA_VERSION_3_5_1;
+
+ for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
+
+ if (endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
+ if (endpoint->toward_ipa || support_suspend)
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ }
+}
+
+/* Reset all modem endpoints to use the default exception endpoint */
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+ struct gsi_trans *trans;
+ u32 count;
+
+ /* We need one command per modem TX endpoint. We can get an upper
+ * bound on that by assuming all initialized endpoints are modem->IPA.
+ * That won't happen, and we could be more precise, but this is fine
+ * for now. We need to end the transactio with a "tag process."
+ */
+ count = hweight32(initialized) + ipa_cmd_tag_process_count();
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to reset modem exception endpoints\n");
+ return -EBUSY;
+ }
+
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+ struct ipa_endpoint *endpoint;
+ u32 offset;
+
+ initialized ^= BIT(endpoint_id);
+
+ /* We only reset modem TX endpoints */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
+ continue;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+
+ /* Value written is 0, and all bits are updated. That
+ * means status is disabled on the endpoint, and as a
+ * result all other fields in the register are ignored.
+ */
+ ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
+ }
+
+ ipa_cmd_tag_process_add(trans);
+
+ /* XXX This should have a 1 second timeout */
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ /* FRAG_OFFLOAD_EN is 0 */
+ if (endpoint->data->checksum) {
+ if (endpoint->toward_ipa) {
+ u32 checksum_offset;
+
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
+ CS_OFFLOAD_EN_FMASK);
+ /* Checksum header offset is in 4-byte units */
+ checksum_offset = sizeof(struct rmnet_map_header);
+ checksum_offset /= sizeof(u32);
+ val |= u32_encode_bits(checksum_offset,
+ CS_METADATA_HDR_OFFSET_FMASK);
+ } else {
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
+ CS_OFFLOAD_EN_FMASK);
+ }
+ } else {
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
+ CS_OFFLOAD_EN_FMASK);
+ }
+ /* CS_GEN_QMB_MASTER_SEL is 0 */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ if (endpoint->data->qmap) {
+ size_t header_size = sizeof(struct rmnet_map_header);
+
+ if (endpoint->toward_ipa && endpoint->data->checksum)
+ header_size += sizeof(struct rmnet_map_ul_csum_header);
+
+ val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
+ /* metadata is the 4 byte rmnet_map header itself */
+ val |= HDR_OFST_METADATA_VALID_FMASK;
+ val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK);
+ /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */
+ if (!endpoint->toward_ipa) {
+ u32 size_offset = offsetof(struct rmnet_map_header,
+ pkt_len);
+
+ val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
+ val |= u32_encode_bits(size_offset,
+ HDR_OFST_PKT_SIZE_FMASK);
+ }
+ /* HDR_A5_MUX is 0 */
+ /* HDR_LEN_INC_DEAGG_HDR is 0 */
+ /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */
+ }
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
+ u32 pad_align = endpoint->data->rx.pad_align;
+ u32 val = 0;
+
+ val |= HDR_ENDIANNESS_FMASK; /* big endian */
+ val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ if (!endpoint->toward_ipa)
+ val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/**
+ * Generate a metadata mask value that will select only the mux_id
+ * field in an rmnet_map header structure. The mux_id is at offset
+ * 1 byte from the beginning of the structure, but the metadata
+ * value is treated as a 4-byte unit. So this mask must be computed
+ * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask()
+ * will convert this value to the proper byte order.
+ *
+ * Marked __always_inline because this is really computing a
+ * constant value.
+ */
+static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void)
+{
+ size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id);
+ u32 mux_id_mask = 0;
+ u8 *bytes;
+
+ bytes = (u8 *)&mux_id_mask;
+ bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */
+
+ return cpu_to_be32(mux_id_mask);
+}
+
+static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 val = 0;
+ u32 offset;
+
+ offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+
+ if (!endpoint->toward_ipa && endpoint->data->qmap)
+ val = ipa_rmnet_mux_id_metadata_mask();
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ u32 val;
+
+ if (endpoint->toward_ipa && endpoint->data->dma_mode) {
+ enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
+ u32 dma_endpoint_id;
+
+ dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
+
+ val = u32_encode_bits(IPA_DMA, MODE_FMASK);
+ val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
+ } else {
+ val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
+ }
+ /* Other bitfields unspecified (and 0) */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/* Compute the aggregation size value to use for a given buffer size */
+static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
+{
+ /* We don't use "hard byte limit" aggregation, so we define the
+ * aggregation limit such that our buffer has enough space *after*
+ * that limit to receive a full MTU of data, plus overhead.
+ */
+ rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+
+ return rx_buffer_size / SZ_1K;
+}
+
+static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ if (endpoint->data->aggregation) {
+ if (!endpoint->toward_ipa) {
+ u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ u32 limit;
+
+ val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
+ val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ val |= u32_encode_bits(aggr_size,
+ AGGR_BYTE_LIMIT_FMASK);
+ limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
+ val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
+ AGGR_TIME_LIMIT_FMASK);
+ val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
+ if (endpoint->data->rx.aggr_close_eof)
+ val |= AGGR_SW_EOF_ACTIVE_FMASK;
+ /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
+ } else {
+ val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
+ AGGR_EN_FMASK);
+ val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
+ /* other fields ignored */
+ }
+ /* AGGR_FORCE_CLOSE is 0 */
+ } else {
+ val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
+ /* other fields ignored */
+ }
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/* A return value of 0 indicates an error */
+static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+{
+ u32 scale;
+ u32 base;
+ u32 val;
+
+ if (!microseconds)
+ return 0; /* invalid delay */
+
+ /* Timer is represented in units of clock ticks. */
+ if (ipa->version < IPA_VERSION_4_2)
+ return microseconds; /* XXX Needs to be computed */
+
+ /* IPA v4.2 represents the tick count as base * scale */
+ scale = 1; /* XXX Needs to be computed */
+ if (scale > field_max(SCALE_FMASK))
+ return 0; /* scale too big */
+
+ base = DIV_ROUND_CLOSEST(microseconds, scale);
+ if (base > field_max(BASE_VALUE_FMASK))
+ return 0; /* microseconds too big */
+
+ val = u32_encode_bits(scale, SCALE_FMASK);
+ val |= u32_encode_bits(base, BASE_VALUE_FMASK);
+
+ return val;
+}
+
+static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
+ u32 val;
+
+ /* XXX We'll fix this when the register definition is clear */
+ if (microseconds) {
+ struct device *dev = &ipa->pdev->dev;
+
+ dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
+ endpoint_id);
+ microseconds = 0;
+ }
+
+ if (microseconds) {
+ val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
+ if (!val)
+ return -EINVAL;
+ } else {
+ val = 0; /* timeout is immediate */
+ }
+ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
+
+ return 0;
+}
+
+static void
+ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 offset;
+ u32 val;
+
+ val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
+ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
+{
+ u32 i;
+
+ for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[i];
+
+ if (endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
+ ipa_endpoint_init_hol_block_enable(endpoint, true);
+ }
+}
+
+static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ /* DEAGGR_HDR_LEN is 0 */
+ /* PACKET_OFFSET_VALID is 0 */
+ /* PACKET_OFFSET_LOCATION is ignored (not valid) */
+ /* MAX_PACKET_LEN is 0 (not enforced) */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
+ u32 seq_type = endpoint->seq_type;
+ u32 val = 0;
+
+ val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
+ val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
+ /* HPS_REP_SEQ_TYPE is 0 */
+ /* DPS_REP_SEQ_TYPE is 0 */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/**
+ * ipa_endpoint_skb_tx() - Transmit a socket buffer
+ * @endpoint: Endpoint pointer
+ * @skb: Socket buffer to send
+ *
+ * Returns: 0 if successful, or a negative error code
+ */
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
+{
+ struct gsi_trans *trans;
+ u32 nr_frags;
+ int ret;
+
+ /* Make sure source endpoint's TLV FIFO has enough entries to
+ * hold the linear portion of the skb and all its fragments.
+ * If not, see if we can linearize it before giving up.
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (1 + nr_frags > endpoint->trans_tre_max) {
+ if (skb_linearize(skb))
+ return -E2BIG;
+ nr_frags = 0;
+ }
+
+ trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
+ if (!trans)
+ return -EBUSY;
+
+ ret = gsi_trans_skb_add(trans, skb);
+ if (ret)
+ goto err_trans_free;
+ trans->data = skb; /* transaction owns skb now */
+
+ gsi_trans_commit(trans, !netdev_xmit_more());
+
+ return 0;
+
+err_trans_free:
+ gsi_trans_free(trans);
+
+ return -ENOMEM;
+}
+
+static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ u32 val = 0;
+ u32 offset;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+
+ if (endpoint->data->status_enable) {
+ val |= STATUS_EN_FMASK;
+ if (endpoint->toward_ipa) {
+ enum ipa_endpoint_name name;
+ u32 status_endpoint_id;
+
+ name = endpoint->data->tx.status_endpoint;
+ status_endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ val |= u32_encode_bits(status_endpoint_id,
+ STATUS_ENDP_FMASK);
+ }
+ /* STATUS_LOCATION is 0 (status element precedes packet) */
+ /* The next field is present for IPA v4.0 and above */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 */
+ }
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
+{
+ struct gsi_trans *trans;
+ bool doorbell = false;
+ struct page *page;
+ u32 offset;
+ u32 len;
+ int ret;
+
+ page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
+ if (!page)
+ return -ENOMEM;
+
+ trans = ipa_endpoint_trans_alloc(endpoint, 1);
+ if (!trans)
+ goto err_free_pages;
+
+ /* Offset the buffer to make space for skb headroom */
+ offset = NET_SKB_PAD;
+ len = IPA_RX_BUFFER_SIZE - offset;
+
+ ret = gsi_trans_page_add(trans, page, len, offset);
+ if (ret)
+ goto err_trans_free;
+ trans->data = page; /* transaction owns page now */
+
+ if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
+ doorbell = true;
+ endpoint->replenish_ready = 0;
+ }
+
+ gsi_trans_commit(trans, doorbell);
+
+ return 0;
+
+err_trans_free:
+ gsi_trans_free(trans);
+err_free_pages:
+ __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+
+ return -ENOMEM;
+}
+
+/**
+ * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ *
+ * Allocate RX packet wrapper structures with maximal socket buffers
+ * for an endpoint. These are supplied to the hardware, which fills
+ * them with incoming data.
+ */
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
+{
+ struct gsi *gsi;
+ u32 backlog;
+
+ if (!endpoint->replenish_enabled) {
+ if (count)
+ atomic_add(count, &endpoint->replenish_saved);
+ return;
+ }
+
+
+ while (atomic_dec_not_zero(&endpoint->replenish_backlog))
+ if (ipa_endpoint_replenish_one(endpoint))
+ goto try_again_later;
+ if (count)
+ atomic_add(count, &endpoint->replenish_backlog);
+
+ return;
+
+try_again_later:
+ /* The last one didn't succeed, so fix the backlog */
+ backlog = atomic_inc_return(&endpoint->replenish_backlog);
+
+ if (count)
+ atomic_add(count, &endpoint->replenish_backlog);
+
+ /* Whenever a receive buffer transaction completes we'll try to
+ * replenish again. It's unlikely, but if we fail to supply even
+ * one buffer, nothing will trigger another replenish attempt.
+ * Receive buffer transactions use one TRE, so schedule work to
+ * try replenishing again if our backlog is *all* available TREs.
+ */
+ gsi = &endpoint->ipa->gsi;
+ if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
+ schedule_delayed_work(&endpoint->replenish_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 max_backlog;
+ u32 saved;
+
+ endpoint->replenish_enabled = true;
+ while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
+ atomic_add(saved, &endpoint->replenish_backlog);
+
+ /* Start replenishing if hardware currently has no buffers */
+ max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
+ if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
+ ipa_endpoint_replenish(endpoint, 0);
+}
+
+static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
+{
+ u32 backlog;
+
+ endpoint->replenish_enabled = false;
+ while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
+ atomic_add(backlog, &endpoint->replenish_saved);
+}
+
+static void ipa_endpoint_replenish_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
+
+ ipa_endpoint_replenish(endpoint, 0);
+}
+
+static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
+ void *data, u32 len, u32 extra)
+{
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+ }
+
+ /* Now receive it, or drop it if there's no netdev */
+ if (endpoint->netdev)
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+ else if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
+ struct page *page, u32 len)
+{
+ struct sk_buff *skb;
+
+ /* Nothing to do if there's no netdev */
+ if (!endpoint->netdev)
+ return false;
+
+ /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
+ skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
+ if (skb) {
+ /* Reserve the headroom and account for the data */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_put(skb, len);
+ }
+
+ /* Receive the buffer (or record drop if unable to build it) */
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+
+ return skb != NULL;
+}
+
+/* The format of a packet status element is the same for several status
+ * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
+ * aren't currently supported
+ */
+static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
+{
+ switch (opcode) {
+ case IPA_STATUS_OPCODE_PACKET:
+ case IPA_STATUS_OPCODE_DROPPED_PACKET:
+ case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
+ case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
+{
+ u32 endpoint_id;
+
+ if (!ipa_status_format_packet(status->opcode))
+ return true;
+ if (!status->pkt_len)
+ return true;
+ endpoint_id = u32_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
+ if (endpoint_id != endpoint->endpoint_id)
+ return true;
+
+ return false; /* Don't skip this packet, process it */
+}
+
+/* Return whether the status indicates the packet should be dropped */
+static bool ipa_status_drop_packet(const struct ipa_status *status)
+{
+ u32 val;
+
+ /* Deaggregation exceptions we drop; others we consume */
+ if (status->exception)
+ return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
+
+ /* Drop the packet if it fails to match a routing rule; otherwise no */
+ val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+
+ return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+}
+
+static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
+ struct page *page, u32 total_len)
+{
+ void *data = page_address(page) + NET_SKB_PAD;
+ u32 unused = IPA_RX_BUFFER_SIZE - total_len;
+ u32 resid = total_len;
+
+ while (resid) {
+ const struct ipa_status *status = data;
+ u32 align;
+ u32 len;
+
+ if (resid < sizeof(*status)) {
+ dev_err(&endpoint->ipa->pdev->dev,
+ "short message (%u bytes < %zu byte status)\n",
+ resid, sizeof(*status));
+ break;
+ }
+
+ /* Skip over status packets that lack packet data */
+ if (ipa_endpoint_status_skip(endpoint, status)) {
+ data += sizeof(*status);
+ resid -= sizeof(*status);
+ continue;
+ }
+
+ /* Compute the amount of buffer space consumed by the
+ * packet, including the status element. If the hardware
+ * is configured to pad packet data to an aligned boundary,
+ * account for that. And if checksum offload is is enabled
+ * a trailer containing computed checksum information will
+ * be appended.
+ */
+ align = endpoint->data->rx.pad_align ? : 1;
+ len = le16_to_cpu(status->pkt_len);
+ len = sizeof(*status) + ALIGN(len, align);
+ if (endpoint->data->checksum)
+ len += sizeof(struct rmnet_map_dl_csum_trailer);
+
+ /* Charge the new packet with a proportional fraction of
+ * the unused space in the original receive buffer.
+ * XXX Charge a proportion of the *whole* receive buffer?
+ */
+ if (!ipa_status_drop_packet(status)) {
+ u32 extra = unused * len / total_len;
+ void *data2 = data + sizeof(*status);
+ u32 len2 = le16_to_cpu(status->pkt_len);
+
+ /* Client receives only packet data (no status) */
+ ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
+ }
+
+ /* Consume status and the full packet it describes */
+ data += len;
+ resid -= len;
+ }
+}
+
+/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
+static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+}
+
+/* Complete transaction initiated in ipa_endpoint_replenish_one() */
+static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ struct page *page;
+
+ ipa_endpoint_replenish(endpoint, 1);
+
+ if (trans->cancelled)
+ return;
+
+ /* Parse or build a socket buffer using the actual received length */
+ page = trans->data;
+ if (endpoint->data->status_enable)
+ ipa_endpoint_status_parse(endpoint, page, trans->len);
+ else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
+ trans->data = NULL; /* Pages have been consumed */
+}
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ if (endpoint->toward_ipa)
+ ipa_endpoint_tx_complete(endpoint, trans);
+ else
+ ipa_endpoint_rx_complete(endpoint, trans);
+}
+
+void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ if (endpoint->toward_ipa) {
+ struct ipa *ipa = endpoint->ipa;
+
+ /* Nothing to do for command transactions */
+ if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
+ struct sk_buff *skb = trans->data;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ struct page *page = trans->data;
+
+ if (page)
+ __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ }
+}
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
+{
+ u32 val;
+
+ /* ROUTE_DIS is 0 */
+ val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
+ val |= ROUTE_DEF_HDR_TABLE_FMASK;
+ val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
+ val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
+ val |= ROUTE_DEF_RETAIN_HDR_FMASK;
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+}
+
+void ipa_endpoint_default_route_clear(struct ipa *ipa)
+{
+ ipa_endpoint_default_route_set(ipa, 0);
+}
+
+static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
+ u32 val;
+
+ /* assert(mask & ipa->available); */
+ offset = ipa_reg_state_aggr_active_offset(ipa->version);
+ val = ioread32(ipa->reg_virt + offset);
+
+ return !!(val & mask);
+}
+
+static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+
+ /* assert(mask & ipa->available); */
+ iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+}
+
+/**
+ * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
+ * @endpoint: Endpoint to be reset
+ *
+ * If aggregation is active on an RX endpoint when a reset is performed
+ * on its underlying GSI channel, a special sequence of actions must be
+ * taken to ensure the IPA pipeline is properly cleared.
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct ipa *ipa = endpoint->ipa;
+ bool endpoint_suspended = false;
+ struct gsi *gsi = &ipa->gsi;
+ dma_addr_t addr;
+ bool db_enable;
+ u32 retries;
+ u32 len = 1;
+ void *virt;
+ int ret;
+
+ virt = kzalloc(len, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ /* Force close aggregation before issuing the reset */
+ ipa_endpoint_force_close(endpoint);
+
+ /* Reset and reconfigure the channel with the doorbell engine
+ * disabled. Then poll until we know aggregation is no longer
+ * active. We'll re-enable the doorbell (if appropriate) when
+ * we reset again below.
+ */
+ gsi_channel_reset(gsi, endpoint->channel_id, false);
+
+ /* Make sure the channel isn't suspended */
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1)
+ if (!ipa_endpoint_init_ctrl(endpoint, false))
+ endpoint_suspended = true;
+
+ /* Start channel and do a 1 byte read */
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret)
+ goto out_suspend_again;
+
+ ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
+ if (ret)
+ goto err_endpoint_stop;
+
+ /* Wait for aggregation to be closed on the channel */
+ retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
+ do {
+ if (!ipa_endpoint_aggr_active(endpoint))
+ break;
+ msleep(1);
+ } while (retries--);
+
+ /* Check one last time */
+ if (ipa_endpoint_aggr_active(endpoint))
+ dev_err(dev, "endpoint %u still active during reset\n",
+ endpoint->endpoint_id);
+
+ gsi_trans_read_byte_done(gsi, endpoint->channel_id);
+
+ ret = ipa_endpoint_stop(endpoint);
+ if (ret)
+ goto out_suspend_again;
+
+ /* Finally, reset and reconfigure the channel again (re-enabling the
+ * the doorbell engine if appropriate). Sleep for 1 millisecond to
+ * complete the channel reset sequence. Finish by suspending the
+ * channel again (if necessary).
+ */
+ db_enable = ipa->version == IPA_VERSION_3_5_1;
+ gsi_channel_reset(gsi, endpoint->channel_id, db_enable);
+
+ msleep(1);
+
+ goto out_suspend_again;
+
+err_endpoint_stop:
+ ipa_endpoint_stop(endpoint);
+out_suspend_again:
+ if (endpoint_suspended)
+ (void)ipa_endpoint_init_ctrl(endpoint, true);
+ dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
+out_kfree:
+ kfree(virt);
+
+ return ret;
+}
+
+static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
+{
+ u32 channel_id = endpoint->channel_id;
+ struct ipa *ipa = endpoint->ipa;
+ bool db_enable;
+ bool special;
+ int ret = 0;
+
+ /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
+ * is active, we need to handle things specially to recover.
+ * All other cases just need to reset the underlying GSI channel.
+ *
+ * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
+ */
+ db_enable = ipa->version == IPA_VERSION_3_5_1;
+ special = !endpoint->toward_ipa && endpoint->data->aggregation;
+ if (special && ipa_endpoint_aggr_active(endpoint))
+ ret = ipa_endpoint_reset_rx_aggr(endpoint);
+ else
+ gsi_channel_reset(&ipa->gsi, channel_id, db_enable);
+
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d resetting channel %u for endpoint %u\n",
+ ret, endpoint->channel_id, endpoint->endpoint_id);
+}
+
+static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
+{
+ u16 size = IPA_ENDPOINT_STOP_RX_SIZE;
+ struct gsi_trans *trans;
+ dma_addr_t addr;
+ int ret;
+
+ trans = ipa_cmd_trans_alloc(ipa, 1);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for RX endpoint STOP workaround\n");
+ return -EBUSY;
+ }
+
+ /* Read into the highest part of the zero memory area */
+ addr = ipa->zero_addr + ipa->zero_size - size;
+
+ ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false);
+
+ ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT);
+ if (ret)
+ gsi_trans_free(trans);
+
+ return ret;
+}
+
+/**
+ * ipa_endpoint_stop() - Stops a GSI channel in IPA
+ * @client: Client whose endpoint should be stopped
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is is STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
+{
+ u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES;
+ int ret;
+
+ do {
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
+ if (ret != -EAGAIN || endpoint->toward_ipa)
+ break;
+
+ /* For IPA v3.5.1, send a DMA read task and check again */
+ if (ipa->version == IPA_VERSION_3_5_1) {
+ ret = ipa_endpoint_stop_rx_dma(ipa);
+ if (ret)
+ break;
+ }
+
+ msleep(1);
+ } while (retries--);
+
+ return retries ? ret : -EIO;
+}
+
+static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ int ret;
+
+ if (endpoint->toward_ipa) {
+ bool delay_mode = endpoint->data->tx.delay;
+
+ ret = ipa_endpoint_init_ctrl(endpoint, delay_mode);
+ /* Endpoint is expected to not be in delay mode */
+ if (!ret != delay_mode) {
+ dev_warn(dev,
+ "TX endpoint %u was %sin delay mode\n",
+ endpoint->endpoint_id,
+ delay_mode ? "already " : "");
+ }
+ ipa_endpoint_init_hdr_ext(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_seq(endpoint);
+ } else {
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1) {
+ if (!ipa_endpoint_init_ctrl(endpoint, false))
+ dev_warn(dev,
+ "RX endpoint %u was suspended\n",
+ endpoint->endpoint_id);
+ }
+ ipa_endpoint_init_hdr_ext(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ }
+ ipa_endpoint_init_cfg(endpoint);
+ ipa_endpoint_init_hdr(endpoint);
+ ipa_endpoint_init_hdr_metadata_mask(endpoint);
+ ipa_endpoint_init_mode(endpoint);
+ ipa_endpoint_status(endpoint);
+}
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+ int ret;
+
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret) {
+ dev_err(&ipa->pdev->dev,
+ "error %d starting %cX channel %u for endpoint %u\n",
+ ret, endpoint->toward_ipa ? 'T' : 'R',
+ endpoint->channel_id, endpoint->endpoint_id);
+ return ret;
+ }
+
+ if (!endpoint->toward_ipa) {
+ ipa_interrupt_suspend_enable(ipa->interrupt,
+ endpoint->endpoint_id);
+ ipa_endpoint_replenish_enable(endpoint);
+ }
+
+ ipa->enabled |= BIT(endpoint->endpoint_id);
+
+ return 0;
+}
+
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & mask))
+ return;
+
+ endpoint->ipa->enabled ^= mask;
+
+ if (!endpoint->toward_ipa) {
+ ipa_endpoint_replenish_disable(endpoint);
+ ipa_interrupt_suspend_disable(ipa->interrupt,
+ endpoint->endpoint_id);
+ }
+
+ /* Note that if stop fails, the channel's state is not well-defined */
+ ret = ipa_endpoint_stop(endpoint);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d attempting to stop endpoint %u\n", ret,
+ endpoint->endpoint_id);
+}
+
+/**
+ * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
+ * @endpoint_id: Endpoint on which to emulate a suspend
+ *
+ * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
+ * with an open aggregation frame. This is to work around a hardware
+ * issue in IPA version 3.5.1 where the suspend interrupt will not be
+ * generated when it should be.
+ */
+static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+
+ /* assert(ipa->version == IPA_VERSION_3_5_1); */
+
+ if (!endpoint->data->aggregation)
+ return;
+
+ /* Nothing to do if the endpoint doesn't have aggregation open */
+ if (!ipa_endpoint_aggr_active(endpoint))
+ return;
+
+ /* Force close aggregation */
+ ipa_endpoint_force_close(endpoint);
+
+ ipa_interrupt_simulate_suspend(ipa->interrupt);
+}
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ bool stop_channel;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ return;
+
+ if (!endpoint->toward_ipa)
+ ipa_endpoint_replenish_disable(endpoint);
+
+ /* IPA v3.5.1 doesn't use channel stop for suspend */
+ stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ if (!endpoint->toward_ipa && !stop_channel) {
+ /* Due to a hardware bug, a client suspended with an open
+ * aggregation frame will not generate a SUSPEND IPA
+ * interrupt. We work around this by force-closing the
+ * aggregation frame, then simulating the arrival of such
+ * an interrupt.
+ */
+ WARN_ON(ipa_endpoint_init_ctrl(endpoint, true));
+ ipa_endpoint_suspend_aggr(endpoint);
+ }
+
+ ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
+ if (ret)
+ dev_err(dev, "error %d suspending channel %u\n", ret,
+ endpoint->channel_id);
+}
+
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ bool start_channel;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ return;
+
+ /* IPA v3.5.1 doesn't use channel start for resume */
+ start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ if (!endpoint->toward_ipa && !start_channel)
+ WARN_ON(ipa_endpoint_init_ctrl(endpoint, false));
+
+ ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
+ if (ret)
+ dev_err(dev, "error %d resuming channel %u\n", ret,
+ endpoint->channel_id);
+ else if (!endpoint->toward_ipa)
+ ipa_endpoint_replenish_enable(endpoint);
+}
+
+void ipa_endpoint_suspend(struct ipa *ipa)
+{
+ if (ipa->modem_netdev)
+ ipa_modem_suspend(ipa->modem_netdev);
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+}
+
+void ipa_endpoint_resume(struct ipa *ipa)
+{
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+
+ if (ipa->modem_netdev)
+ ipa_modem_resume(ipa->modem_netdev);
+}
+
+static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+
+ /* Only AP endpoints get set up */
+ if (endpoint->ee_id != GSI_EE_AP)
+ return;
+
+ endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
+ if (!endpoint->toward_ipa) {
+ /* RX transactions require a single TRE, so the maximum
+ * backlog is the same as the maximum outstanding TREs.
+ */
+ endpoint->replenish_enabled = false;
+ atomic_set(&endpoint->replenish_saved,
+ gsi_channel_tre_max(gsi, endpoint->channel_id));
+ atomic_set(&endpoint->replenish_backlog, 0);
+ INIT_DELAYED_WORK(&endpoint->replenish_work,
+ ipa_endpoint_replenish_work);
+ }
+
+ ipa_endpoint_program(endpoint);
+
+ endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
+}
+
+static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
+{
+ endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
+
+ if (!endpoint->toward_ipa)
+ cancel_delayed_work_sync(&endpoint->replenish_work);
+
+ ipa_endpoint_reset(endpoint);
+}
+
+void ipa_endpoint_setup(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+
+ ipa->set_up = 0;
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+
+ initialized ^= BIT(endpoint_id);
+
+ ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
+ }
+}
+
+void ipa_endpoint_teardown(struct ipa *ipa)
+{
+ u32 set_up = ipa->set_up;
+
+ while (set_up) {
+ u32 endpoint_id = __fls(set_up);
+
+ set_up ^= BIT(endpoint_id);
+
+ ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
+ }
+ ipa->set_up = 0;
+}
+
+int ipa_endpoint_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 initialized;
+ u32 rx_base;
+ u32 rx_mask;
+ u32 tx_mask;
+ int ret = 0;
+ u32 max;
+ u32 val;
+
+ /* Find out about the endpoints supplied by the hardware, and ensure
+ * the highest one doesn't exceed the number we support.
+ */
+ val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+
+ /* Our RX is an IPA producer */
+ rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
+ max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
+ if (max > IPA_ENDPOINT_MAX) {
+ dev_err(dev, "too many endpoints (%u > %u)\n",
+ max, IPA_ENDPOINT_MAX);
+ return -EINVAL;
+ }
+ rx_mask = GENMASK(max - 1, rx_base);
+
+ /* Our TX is an IPA consumer */
+ max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
+ tx_mask = GENMASK(max - 1, 0);
+
+ ipa->available = rx_mask | tx_mask;
+
+ /* Check for initialized endpoints not supported by the hardware */
+ if (ipa->initialized & ~ipa->available) {
+ dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
+ ipa->initialized & ~ipa->available);
+ ret = -EINVAL; /* Report other errors too */
+ }
+
+ initialized = ipa->initialized;
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+ struct ipa_endpoint *endpoint;
+
+ initialized ^= BIT(endpoint_id);
+
+ /* Make sure it's pointing in the right direction */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
+ dev_err(dev, "endpoint id %u wrong direction\n",
+ endpoint_id);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+void ipa_endpoint_deconfig(struct ipa *ipa)
+{
+ ipa->available = 0; /* Nothing more to do */
+}
+
+static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ struct ipa_endpoint *endpoint;
+
+ endpoint = &ipa->endpoint[data->endpoint_id];
+
+ if (data->ee_id == GSI_EE_AP)
+ ipa->channel_map[data->channel_id] = endpoint;
+ ipa->name_map[name] = endpoint;
+
+ endpoint->ipa = ipa;
+ endpoint->ee_id = data->ee_id;
+ endpoint->seq_type = data->endpoint.seq_type;
+ endpoint->channel_id = data->channel_id;
+ endpoint->endpoint_id = data->endpoint_id;
+ endpoint->toward_ipa = data->toward_ipa;
+ endpoint->data = &data->endpoint.config;
+
+ ipa->initialized |= BIT(endpoint->endpoint_id);
+}
+
+void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
+{
+ endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+}
+
+void ipa_endpoint_exit(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+
+ while (initialized) {
+ u32 endpoint_id = __fls(initialized);
+
+ initialized ^= BIT(endpoint_id);
+
+ ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
+ }
+ memset(ipa->name_map, 0, sizeof(ipa->name_map));
+ memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
+}
+
+/* Returns a bitmask of endpoints that support filtering, or 0 on error */
+u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ enum ipa_endpoint_name name;
+ u32 filter_map;
+
+ if (!ipa_endpoint_data_valid(ipa, count, data))
+ return 0; /* Error */
+
+ ipa->initialized = 0;
+
+ filter_map = 0;
+ for (name = 0; name < count; name++, data++) {
+ if (ipa_gsi_endpoint_data_empty(data))
+ continue; /* Skip over empty slots */
+
+ ipa_endpoint_init_one(ipa, name, data);
+
+ if (data->endpoint.filter_support)
+ filter_map |= BIT(data->endpoint_id);
+ }
+
+ if (!ipa_filter_map_valid(ipa, filter_map))
+ goto err_endpoint_exit;
+
+ return filter_map; /* Non-zero bitmask */
+
+err_endpoint_exit:
+ ipa_endpoint_exit(ipa);
+
+ return 0; /* Error */
+}
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
new file mode 100644
index 000000000000..4b336a1f759d
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_ENDPOINT_H_
+#define _IPA_ENDPOINT_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+
+#include "gsi.h"
+#include "ipa_reg.h"
+
+struct net_device;
+struct sk_buff;
+
+struct ipa;
+struct ipa_gsi_endpoint_data;
+
+/* Non-zero granularity of counter used to implement aggregation timeout */
+#define IPA_AGGR_GRANULARITY 500 /* microseconds */
+
+#define IPA_MTU ETH_DATA_LEN
+
+enum ipa_endpoint_name {
+ IPA_ENDPOINT_AP_MODEM_TX = 0,
+ IPA_ENDPOINT_MODEM_LAN_TX,
+ IPA_ENDPOINT_MODEM_COMMAND_TX,
+ IPA_ENDPOINT_AP_COMMAND_TX,
+ IPA_ENDPOINT_MODEM_AP_TX,
+ IPA_ENDPOINT_AP_LAN_RX,
+ IPA_ENDPOINT_AP_MODEM_RX,
+ IPA_ENDPOINT_MODEM_AP_RX,
+ IPA_ENDPOINT_MODEM_LAN_RX,
+ IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
+};
+
+#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
+
+/**
+ * struct ipa_endpoint - IPA endpoint information
+ * @client: Client associated with the endpoint
+ * @channel_id: EP's GSI channel
+ * @evt_ring_id: EP's GSI channel event ring
+ */
+struct ipa_endpoint {
+ struct ipa *ipa;
+ enum ipa_seq_type seq_type;
+ enum gsi_ee_id ee_id;
+ u32 channel_id;
+ u32 endpoint_id;
+ bool toward_ipa;
+ const struct ipa_endpoint_config_data *data;
+
+ u32 trans_tre_max; /* maximum descriptors per transaction */
+ u32 evt_ring_id;
+
+ /* Net device this endpoint is associated with, if any */
+ struct net_device *netdev;
+
+ /* Receive buffer replenishing for RX endpoints */
+ bool replenish_enabled;
+ u32 replenish_ready;
+ atomic_t replenish_saved;
+ atomic_t replenish_backlog;
+ struct delayed_work replenish_work; /* global wq */
+};
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa);
+
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable);
+
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
+
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
+
+int ipa_endpoint_stop(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend(struct ipa *ipa);
+void ipa_endpoint_resume(struct ipa *ipa);
+
+void ipa_endpoint_setup(struct ipa *ipa);
+void ipa_endpoint_teardown(struct ipa *ipa);
+
+int ipa_endpoint_config(struct ipa *ipa);
+void ipa_endpoint_deconfig(struct ipa *ipa);
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
+void ipa_endpoint_default_route_clear(struct ipa *ipa);
+
+u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
+void ipa_endpoint_exit(struct ipa *ipa);
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+
+#endif /* _IPA_ENDPOINT_H_ */
diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c
new file mode 100644
index 000000000000..dc4a5c2196ae
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_data.h"
+
+void ipa_gsi_trans_complete(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_complete(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_trans_release(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_release(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_sent_queue(endpoint->netdev, byte_count);
+}
+
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_completed_queue(endpoint->netdev, count, byte_count);
+}
+
+/* Indicate whether an endpoint config data entry is "empty" */
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data)
+{
+ return data->ee_id == GSI_EE_AP && !data->channel.tlv_count;
+}
diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h
new file mode 100644
index 000000000000..3cf18600c68e
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_GSI_TRANS_H_
+#define _IPA_GSI_TRANS_H_
+
+#include <linux/types.h>
+
+struct gsi_trans;
+
+/**
+ * ipa_gsi_trans_complete() - GSI transaction completion callback
+ * @trans: Transaction that has completed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction has completed.
+ */
+void ipa_gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_trans_release() - GSI transaction release callback
+ * @trans: Transaction whose resources should be freed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction is about to be freed, so any resources associated
+ * with it should be released.
+ */
+void ipa_gsi_trans_release(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_channel_tx_queued() - GSI queued to hardware notification
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions queued
+ * @byte_count: Number of bytes to transfer represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that some
+ * number of transactions have been queued to hardware for execution.
+ */
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+/**
+ * ipa_gsi_trans_complete() - GSI transaction completion callback
+ipa_gsi_channel_tx_completed()
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions completed since last report
+ * @byte_count: Number of bytes transferred represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that the hardware
+ * has reported the completion of some number of transactions.
+ */
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data);
+
+#endif /* _IPA_GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
new file mode 100644
index 000000000000..90353987c45f
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+/* DOC: IPA Interrupts
+ *
+ * The IPA has an interrupt line distinct from the interrupt used by the GSI
+ * code. Whereas GSI interrupts are generally related to channel events (like
+ * transfer completions), IPA interrupts are related to other events related
+ * to the IPA. Some of the IPA interrupts come from a microcontroller
+ * embedded in the IPA. Each IPA interrupt type can be both masked and
+ * acknowledged independent of the others.
+ *
+ * Two of the IPA interrupts are initiated by the microcontroller. A third
+ * can be generated to signal the need for a wakeup/resume when an IPA
+ * endpoint has been suspended. There are other IPA events, but at this
+ * time only these three are supported.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_reg.h"
+#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
+
+/**
+ * struct ipa_interrupt - IPA interrupt information
+ * @ipa: IPA pointer
+ * @irq: Linux IRQ number used for IPA interrupts
+ * @enabled: Mask indicating which interrupts are enabled
+ * @handler: Array of handlers indexed by IPA interrupt ID
+ */
+struct ipa_interrupt {
+ struct ipa *ipa;
+ u32 irq;
+ u32 enabled;
+ ipa_irq_handler_t handler[IPA_IRQ_COUNT];
+};
+
+/* Returns true if the interrupt type is associated with the microcontroller */
+static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id)
+{
+ return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1;
+}
+
+/* Process a particular interrupt type that has been received */
+static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
+{
+ bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(irq_id);
+
+ /* For microcontroller interrupts, clear the interrupt right away,
+ * "to avoid clearing unhandled interrupts."
+ */
+ if (uc_irq)
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+
+ if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id])
+ interrupt->handler[irq_id](interrupt->ipa, irq_id);
+
+ /* Clearing the SUSPEND_TX interrupt also clears the register
+ * that tells us which suspended endpoint(s) caused the interrupt,
+ * so defer clearing until after the handler has been called.
+ */
+ if (!uc_irq)
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+}
+
+/* Process all IPA interrupt types that have been signaled */
+static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 enabled = interrupt->enabled;
+ u32 mask;
+
+ /* The status register indicates which conditions are present,
+ * including conditions whose interrupt is not enabled. Handle
+ * only the enabled ones.
+ */
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ while ((mask &= enabled)) {
+ do {
+ u32 irq_id = __ffs(mask);
+
+ mask ^= BIT(irq_id);
+
+ ipa_interrupt_process(interrupt, irq_id);
+ } while (mask);
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ }
+}
+
+/* Threaded part of the IPA IRQ handler */
+static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
+{
+ struct ipa_interrupt *interrupt = dev_id;
+
+ ipa_clock_get(interrupt->ipa);
+
+ ipa_interrupt_process_all(interrupt);
+
+ ipa_clock_put(interrupt->ipa);
+
+ return IRQ_HANDLED;
+}
+
+/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */
+static irqreturn_t ipa_isr(int irq, void *dev_id)
+{
+ struct ipa_interrupt *interrupt = dev_id;
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask;
+
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ if (mask & interrupt->enabled)
+ return IRQ_WAKE_THREAD;
+
+ /* Nothing in the mask was supposed to cause an interrupt */
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+
+ dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n",
+ __func__, mask);
+
+ return IRQ_HANDLED;
+}
+
+/* Common function used to enable/disable TX_SUSPEND for an endpoint */
+static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
+ u32 endpoint_id, bool enable)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(endpoint_id);
+ u32 val;
+
+ /* assert(mask & ipa->available); */
+ val = ioread32(ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ if (enable)
+ val |= mask;
+ else
+ val &= ~mask;
+ iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+}
+
+/* Enable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, true);
+}
+
+/* Disable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
+}
+
+/* Clear the suspend interrupt for all endpoints that signaled it */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 val;
+
+ val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_CLR_OFFSET);
+}
+
+/* Simulate arrival of an IPA TX_SUSPEND interrupt */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
+{
+ ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
+}
+
+/* Add a handler for an IPA interrupt */
+void ipa_interrupt_add(struct ipa_interrupt *interrupt,
+ enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
+{
+ struct ipa *ipa = interrupt->ipa;
+
+ /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ interrupt->handler[ipa_irq] = handler;
+
+ /* Update the IPA interrupt mask to enable it */
+ interrupt->enabled |= BIT(ipa_irq);
+ iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+}
+
+/* Remove the handler for an IPA interrupt type */
+void
+ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
+{
+ struct ipa *ipa = interrupt->ipa;
+
+ /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ /* Update the IPA interrupt mask to disable it */
+ interrupt->enabled &= ~BIT(ipa_irq);
+ iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+
+ interrupt->handler[ipa_irq] = NULL;
+}
+
+/* Set up the IPA interrupt framework */
+struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct ipa_interrupt *interrupt;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(ipa->pdev, "ipa");
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
+ ret);
+ return ERR_PTR(ret ? : -EINVAL);
+ }
+ irq = ret;
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->ipa = ipa;
+ interrupt->irq = irq;
+
+ /* Start with all IPA interrupts disabled */
+ iowrite32(0, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+
+ ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT,
+ "ipa", interrupt);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
+ goto err_kfree;
+ }
+
+ return interrupt;
+
+err_kfree:
+ kfree(interrupt);
+
+ return ERR_PTR(ret);
+}
+
+/* Tear down the IPA interrupt framework */
+void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
+{
+ free_irq(interrupt->irq, interrupt);
+ kfree(interrupt);
+}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
new file mode 100644
index 000000000000..d4f4c1c9f0b1
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_INTERRUPT_H_
+#define _IPA_INTERRUPT_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct ipa;
+struct ipa_interrupt;
+
+/**
+ * enum ipa_irq_id - IPA interrupt type
+ * @IPA_IRQ_UC_0: Microcontroller event interrupt
+ * @IPA_IRQ_UC_1: Microcontroller response interrupt
+ * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ *
+ * The data ready interrupt is signaled if data has arrived that is destined
+ * for an AP RX endpoint whose underlying GSI channel is suspended/stopped.
+ */
+enum ipa_irq_id {
+ IPA_IRQ_UC_0 = 2,
+ IPA_IRQ_UC_1 = 3,
+ IPA_IRQ_TX_SUSPEND = 14,
+ IPA_IRQ_COUNT, /* Number of interrupt types (not an index) */
+};
+
+/**
+ * typedef ipa_irq_handler_t - IPA interrupt handler function type
+ * @ipa: IPA pointer
+ * @irq_id: interrupt type
+ *
+ * Callback function registered by ipa_interrupt_add() to handle a specific
+ * IPA interrupt type
+ */
+typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id);
+
+/**
+ * ipa_interrupt_add() - Register a handler for an IPA interrupt type
+ * @irq_id: IPA interrupt type
+ * @handler: Handler function for the interrupt
+ *
+ * Add a handler for an IPA interrupt and enable it. IPA interrupt
+ * handlers are run in threaded interrupt context, so are allowed to
+ * block.
+ */
+void ipa_interrupt_add(struct ipa_interrupt *interrupt, enum ipa_irq_id irq_id,
+ ipa_irq_handler_t handler);
+
+/**
+ * ipa_interrupt_remove() - Remove the handler for an IPA interrupt type
+ * @interrupt: IPA interrupt structure
+ * @irq_id: IPA interrupt type
+ *
+ * Remove an IPA interrupt handler and disable it.
+ */
+void ipa_interrupt_remove(struct ipa_interrupt *interrupt,
+ enum ipa_irq_id irq_id);
+
+/**
+ * ipa_interrupt_suspend_enable - Enable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be enabled
+ *
+ * Note: The "TX" in the name is from the perspective of the IPA hardware.
+ * A TX_SUSPEND interrupt arrives on an AP RX enpoint when packet data can't
+ * be delivered to the endpoint because it is suspended (or its underlying
+ * channel is stopped).
+ */
+void ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_disable - Disable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be disabled
+ */
+void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
+ * @interrupt: IPA interrupt structure
+ *
+ * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
+ */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
+ * @interrupt: IPA interrupt structure
+ *
+ * This calls the TX_SUSPEND interrupt handler, as if such an interrupt
+ * had been signaled. This is needed to work around a hardware quirk
+ * that occurs if aggregation is active on an endpoint when its underlying
+ * channel is suspended.
+ */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_setup() - Set up the IPA interrupt framework
+ * @ipa: IPA pointer
+ *
+ * @Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_teardown() - Tear down the IPA interrupt framework
+ * @interrupt: IPA interrupt structure
+ */
+void ipa_interrupt_teardown(struct ipa_interrupt *interrupt);
+
+#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
new file mode 100644
index 000000000000..28998dcce3d2
--- /dev/null
+++ b/drivers/net/ipa/ipa_main.c
@@ -0,0 +1,953 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/remoteproc.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_cmd.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_modem.h"
+#include "ipa_uc.h"
+#include "ipa_interrupt.h"
+#include "gsi_trans.h"
+
+/**
+ * DOC: The IP Accelerator
+ *
+ * This driver supports the Qualcomm IP Accelerator (IPA), which is a
+ * networking component found in many Qualcomm SoCs. The IPA is connected
+ * to the application processor (AP), but is also connected (and partially
+ * controlled by) other "execution environments" (EEs), such as a modem.
+ *
+ * The IPA is the conduit between the AP and the modem that carries network
+ * traffic. This driver presents a network interface representing the
+ * connection of the modem to external (e.g. LTE) networks.
+ *
+ * The IPA provides protocol checksum calculation, offloading this work
+ * from the AP. The IPA offers additional functionality, including routing,
+ * filtering, and NAT support, but that more advanced functionality is not
+ * currently supported. Despite that, some resources--including routing
+ * tables and filter tables--are defined in this driver because they must
+ * be initialized even when the advanced hardware features are not used.
+ *
+ * There are two distinct layers that implement the IPA hardware, and this
+ * is reflected in the organization of the driver. The generic software
+ * interface (GSI) is an integral component of the IPA, providing a
+ * well-defined communication layer between the AP subsystem and the IPA
+ * core. The GSI implements a set of "channels" used for communication
+ * between the AP and the IPA.
+ *
+ * The IPA layer uses GSI channels to implement its "endpoints". And while
+ * a GSI channel carries data between the AP and the IPA, a pair of IPA
+ * endpoints is used to carry traffic between two EEs. Specifically, the main
+ * modem network interface is implemented by two pairs of endpoints: a TX
+ * endpoint on the AP coupled with an RX endpoint on the modem; and another
+ * RX endpoint on the AP receiving data from a TX endpoint on the modem.
+ */
+
+/* The name of the GSI firmware file relative to /lib/firmware */
+#define IPA_FWS_PATH "ipa_fws.mdt"
+#define IPA_PAS_ID 15
+
+/**
+ * ipa_suspend_handler() - Handle the suspend IPA interrupt
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt type (unused)
+ *
+ * When in suspended state, the IPA can trigger a resume by sending a SUSPEND
+ * IPA interrupt.
+ */
+static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* Take a a single clock reference to prevent suspend. All
+ * endpoints will be resumed as a result. This reference will
+ * be dropped when we get a power management suspend request.
+ */
+ if (!atomic_xchg(&ipa->suspend_ref, 1))
+ ipa_clock_get(ipa);
+
+ /* Acknowledge/clear the suspend interrupt on all endpoints */
+ ipa_interrupt_suspend_clear_all(ipa->interrupt);
+}
+
+/**
+ * ipa_setup() - Set up IPA hardware
+ * @ipa: IPA pointer
+ *
+ * Perform initialization that requires issuing immediate commands on
+ * the command TX endpoint. If the modem is doing GSI firmware load
+ * and initialization, this function will be called when an SMP2P
+ * interrupt has been signaled by the modem. Otherwise it will be
+ * called from ipa_probe() after GSI firmware has been successfully
+ * loaded, authenticated, and started by Trust Zone.
+ */
+int ipa_setup(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+ int ret;
+
+ /* IPA v4.0 and above don't use the doorbell engine. */
+ ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
+ if (ret)
+ return ret;
+
+ ipa->interrupt = ipa_interrupt_setup(ipa);
+ if (IS_ERR(ipa->interrupt)) {
+ ret = PTR_ERR(ipa->interrupt);
+ goto err_gsi_teardown;
+ }
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
+ ipa_suspend_handler);
+
+ ipa_uc_setup(ipa);
+
+ ipa_endpoint_setup(ipa);
+
+ /* We need to use the AP command TX endpoint to perform other
+ * initialization, so we enable first.
+ */
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ret = ipa_endpoint_enable_one(command_endpoint);
+ if (ret)
+ goto err_endpoint_teardown;
+
+ ret = ipa_mem_setup(ipa);
+ if (ret)
+ goto err_command_disable;
+
+ ret = ipa_table_setup(ipa);
+ if (ret)
+ goto err_mem_teardown;
+
+ /* Enable the exception handling endpoint, and tell the hardware
+ * to use it by default.
+ */
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ret = ipa_endpoint_enable_one(exception_endpoint);
+ if (ret)
+ goto err_table_teardown;
+
+ ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
+
+ /* We're all set. Now prepare for communication with the modem */
+ ret = ipa_modem_setup(ipa);
+ if (ret)
+ goto err_default_route_clear;
+
+ ipa->setup_complete = true;
+
+ dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n");
+
+ return 0;
+
+err_default_route_clear:
+ ipa_endpoint_default_route_clear(ipa);
+ ipa_endpoint_disable_one(exception_endpoint);
+err_table_teardown:
+ ipa_table_teardown(ipa);
+err_mem_teardown:
+ ipa_mem_teardown(ipa);
+err_command_disable:
+ ipa_endpoint_disable_one(command_endpoint);
+err_endpoint_teardown:
+ ipa_endpoint_teardown(ipa);
+ ipa_uc_teardown(ipa);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_teardown(ipa->interrupt);
+err_gsi_teardown:
+ gsi_teardown(&ipa->gsi);
+
+ return ret;
+}
+
+/**
+ * ipa_teardown() - Inverse of ipa_setup()
+ * @ipa: IPA pointer
+ */
+static void ipa_teardown(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+
+ ipa_modem_teardown(ipa);
+ ipa_endpoint_default_route_clear(ipa);
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ipa_endpoint_disable_one(exception_endpoint);
+ ipa_table_teardown(ipa);
+ ipa_mem_teardown(ipa);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ipa_endpoint_disable_one(command_endpoint);
+ ipa_endpoint_teardown(ipa);
+ ipa_uc_teardown(ipa);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_teardown(ipa->interrupt);
+ gsi_teardown(&ipa->gsi);
+}
+
+/* Configure QMB Core Master Port selection */
+static void ipa_hardware_config_comp(struct ipa *ipa)
+{
+ u32 val;
+
+ /* Nothing to configure for IPA v3.5.1 */
+ if (ipa->version == IPA_VERSION_3_5_1)
+ return;
+
+ val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+
+ if (ipa->version == IPA_VERSION_4_0) {
+ val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
+ val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
+ val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
+ } else {
+ val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ }
+
+ val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
+ val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+}
+
+/* Configure DDR and PCIe max read/write QSB values */
+static void ipa_hardware_config_qsb(struct ipa *ipa)
+{
+ u32 val;
+
+ /* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */
+ val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK);
+ if (ipa->version == IPA_VERSION_4_2)
+ val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK);
+ else
+ val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+
+ if (ipa->version == IPA_VERSION_3_5_1) {
+ val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK);
+ val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ } else {
+ val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK);
+ if (ipa->version == IPA_VERSION_4_2)
+ val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK);
+ else
+ val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ /* GEN_QMB_0_MAX_READS_BEATS is 0 */
+ /* GEN_QMB_1_MAX_READS_BEATS is 0 */
+ }
+ iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+}
+
+static void ipa_idle_indication_cfg(struct ipa *ipa,
+ u32 enter_idle_debounce_thresh,
+ bool const_non_idle_enable)
+{
+ u32 offset;
+ u32 val;
+
+ val = u32_encode_bits(enter_idle_debounce_thresh,
+ ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
+ if (const_non_idle_enable)
+ val |= CONST_NON_IDLE_ENABLE_FMASK;
+
+ offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/**
+ * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
+ *
+ * Configures when the IPA signals it is idle to the global clock
+ * controller, which can respond by scalling down the clock to
+ * save power.
+ */
+static void ipa_hardware_dcd_config(struct ipa *ipa)
+{
+ /* Recommended values for IPA 3.5 according to IPA HPG */
+ ipa_idle_indication_cfg(ipa, 256, false);
+}
+
+static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
+{
+ /* Power-on reset values */
+ ipa_idle_indication_cfg(ipa, 0, true);
+}
+
+/**
+ * ipa_hardware_config() - Primitive hardware initialization
+ * @ipa: IPA pointer
+ */
+static void ipa_hardware_config(struct ipa *ipa)
+{
+ u32 granularity;
+ u32 val;
+
+ /* Fill in backward-compatibility register, based on version */
+ val = ipa_reg_bcr_val(ipa->version);
+ iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ /* Enable open global clocks (hardware workaround) */
+ val = GLOBAL_FMASK;
+ val |= GLOBAL_2X_CLK_FMASK;
+ iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
+
+ /* Disable PA mask to allow HOLB drop (hardware workaround) */
+ val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ val &= ~PA_MASK_EN;
+ iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ }
+
+ ipa_hardware_config_comp(ipa);
+
+ /* Configure system bus limits */
+ ipa_hardware_config_qsb(ipa);
+
+ /* Configure aggregation granularity */
+ val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ val = u32_encode_bits(granularity, AGGR_GRANULARITY);
+ iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+
+ /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */
+ if (ipa->version == IPA_VERSION_4_2)
+ iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET);
+
+ /* Enable dynamic clock division */
+ ipa_hardware_dcd_config(ipa);
+}
+
+/**
+ * ipa_hardware_deconfig() - Inverse of ipa_hardware_config()
+ * @ipa: IPA pointer
+ *
+ * This restores the power-on reset values (even if they aren't different)
+ */
+static void ipa_hardware_deconfig(struct ipa *ipa)
+{
+ /* Mostly we just leave things as we set them. */
+ ipa_hardware_dcd_deconfig(ipa);
+}
+
+#ifdef IPA_VALIDATION
+
+/* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */
+static int ipa_resource_group_count(struct ipa *ipa)
+{
+ switch (ipa->version) {
+ case IPA_VERSION_3_5_1:
+ return 3;
+
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count = ipa_resource_group_count(ipa);
+ u32 i;
+ u32 j;
+
+ if (!group_count)
+ return false;
+
+ /* Return an error if a non-zero resource group limit is specified
+ * for a resource not supported by hardware.
+ */
+ for (i = 0; i < data->resource_src_count; i++) {
+ const struct ipa_resource_src *resource;
+
+ resource = &data->resource_src[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ const struct ipa_resource_dst *resource;
+
+ resource = &data->resource_dst[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ return true;
+}
+
+#else /* !IPA_VALIDATION */
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATION */
+
+static void
+ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ const struct ipa_resource_limits *xlimits,
+ const struct ipa_resource_limits *ylimits)
+{
+ u32 val;
+
+ val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
+ val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_resource_config_src_01(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
+{
+ u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[0], &resource->limits[1]);
+}
+
+static void ipa_resource_config_src_23(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
+{
+ u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[2], &resource->limits[3]);
+}
+
+static void ipa_resource_config_dst_01(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
+{
+ u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[0], &resource->limits[1]);
+}
+
+static void ipa_resource_config_dst_23(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
+{
+ u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[2], &resource->limits[3]);
+}
+
+static int
+ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
+{
+ u32 i;
+
+ if (!ipa_resource_limits_valid(ipa, data))
+ return -EINVAL;
+
+ for (i = 0; i < data->resource_src_count; i++) {
+ ipa_resource_config_src_01(ipa, &data->resource_src[i]);
+ ipa_resource_config_src_23(ipa, &data->resource_src[i]);
+ }
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ ipa_resource_config_dst_01(ipa, &data->resource_dst[i]);
+ ipa_resource_config_dst_23(ipa, &data->resource_dst[i]);
+ }
+
+ return 0;
+}
+
+static void ipa_resource_deconfig(struct ipa *ipa)
+{
+ /* Nothing to do */
+}
+
+/**
+ * ipa_config() - Configure IPA hardware
+ * @ipa: IPA pointer
+ *
+ * Perform initialization requiring IPA clock to be enabled.
+ */
+static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
+{
+ int ret;
+
+ /* Get a clock reference to allow initialization. This reference
+ * is held after initialization completes, and won't get dropped
+ * unless/until a system suspend request arrives.
+ */
+ atomic_set(&ipa->suspend_ref, 1);
+ ipa_clock_get(ipa);
+
+ ipa_hardware_config(ipa);
+
+ ret = ipa_endpoint_config(ipa);
+ if (ret)
+ goto err_hardware_deconfig;
+
+ ret = ipa_mem_config(ipa);
+ if (ret)
+ goto err_endpoint_deconfig;
+
+ ipa_table_config(ipa);
+
+ /* Assign resource limitation to each group */
+ ret = ipa_resource_config(ipa, data->resource_data);
+ if (ret)
+ goto err_table_deconfig;
+
+ ret = ipa_modem_config(ipa);
+ if (ret)
+ goto err_resource_deconfig;
+
+ return 0;
+
+err_resource_deconfig:
+ ipa_resource_deconfig(ipa);
+err_table_deconfig:
+ ipa_table_deconfig(ipa);
+ ipa_mem_deconfig(ipa);
+err_endpoint_deconfig:
+ ipa_endpoint_deconfig(ipa);
+err_hardware_deconfig:
+ ipa_hardware_deconfig(ipa);
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+
+ return ret;
+}
+
+/**
+ * ipa_deconfig() - Inverse of ipa_config()
+ * @ipa: IPA pointer
+ */
+static void ipa_deconfig(struct ipa *ipa)
+{
+ ipa_modem_deconfig(ipa);
+ ipa_resource_deconfig(ipa);
+ ipa_table_deconfig(ipa);
+ ipa_mem_deconfig(ipa);
+ ipa_endpoint_deconfig(ipa);
+ ipa_hardware_deconfig(ipa);
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+}
+
+static int ipa_firmware_load(struct device *dev)
+{
+ const struct firmware *fw;
+ struct device_node *node;
+ struct resource res;
+ phys_addr_t phys;
+ ssize_t size;
+ void *virt;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(dev, "DT error getting \"memory-region\" property\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "error %d getting \"memory-region\" resource\n",
+ ret);
+ return ret;
+ }
+
+ ret = request_firmware(&fw, IPA_FWS_PATH, dev);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH);
+ return ret;
+ }
+
+ phys = res.start;
+ size = (size_t)resource_size(&res);
+ virt = memremap(phys, size, MEMREMAP_WC);
+ if (!virt) {
+ dev_err(dev, "unable to remap firmware memory\n");
+ ret = -ENOMEM;
+ goto out_release_firmware;
+ }
+
+ ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID,
+ virt, phys, size, NULL);
+ if (ret)
+ dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH);
+ else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
+ dev_err(dev, "error %d authenticating \"%s\"\n", ret,
+ IPA_FWS_PATH);
+
+ memunmap(virt);
+out_release_firmware:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static const struct of_device_id ipa_match[] = {
+ {
+ .compatible = "qcom,sdm845-ipa",
+ .data = &ipa_data_sdm845,
+ },
+ {
+ .compatible = "qcom,sc7180-ipa",
+ .data = &ipa_data_sc7180,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ipa_match);
+
+static phandle of_property_read_phandle(const struct device_node *np,
+ const char *name)
+{
+ struct property *prop;
+ int len = 0;
+
+ prop = of_find_property(np, name, &len);
+ if (!prop || len != sizeof(__be32))
+ return 0;
+
+ return be32_to_cpup(prop->value);
+}
+
+/* Check things that can be validated at build time. This just
+ * groups these things BUILD_BUG_ON() calls don't clutter the rest
+ * of the code.
+ * */
+static void ipa_validate_build(void)
+{
+#ifdef IPA_VALIDATE
+ /* We assume we're working on 64-bit hardware */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT));
+
+ /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
+ BUILD_BUG_ON(GSI_EE_AP != 0);
+
+ /* There's no point if we have no channels or event rings */
+ BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX);
+ BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX);
+
+ /* GSI hardware design limits */
+ BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32);
+ BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31);
+
+ /* The number of TREs in a transaction is limited by the channel's
+ * TLV FIFO size. A transaction structure uses 8-bit fields
+ * to represents the number of TREs it has allocated and used.
+ */
+ BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
+
+ /* Exceeding 128 bytes makes the transaction pool *much* larger */
+ BUILD_BUG_ON(sizeof(struct gsi_trans) > 128);
+
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
+#endif /* IPA_VALIDATE */
+}
+
+/**
+ * ipa_probe() - IPA platform driver probe function
+ * @pdev: Platform device pointer
+ *
+ * @Return: 0 if successful, or a negative error code (possibly
+ * EPROBE_DEFER)
+ *
+ * This is the main entry point for the IPA driver. Initialization proceeds
+ * in several stages:
+ * - The "init" stage involves activities that can be initialized without
+ * access to the IPA hardware.
+ * - The "config" stage requires the IPA clock to be active so IPA registers
+ * can be accessed, but does not require the use of IPA immediate commands.
+ * - The "setup" stage uses IPA immediate commands, and so requires the GSI
+ * layer to be initialized.
+ *
+ * A Boolean Device Tree "modem-init" property determines whether GSI
+ * initialization will be performed by the AP (Trust Zone) or the modem.
+ * If the AP does GSI initialization, the setup phase is entered after
+ * this has completed successfully. Otherwise the modem initializes
+ * the GSI layer and signals it has finished by sending an SMP2P interrupt
+ * to the AP; this triggers the start if IPA setup.
+ */
+static int ipa_probe(struct platform_device *pdev)
+{
+ struct wakeup_source *wakeup_source;
+ struct device *dev = &pdev->dev;
+ const struct ipa_data *data;
+ struct ipa_clock *clock;
+ struct rproc *rproc;
+ bool modem_alloc;
+ bool modem_init;
+ struct ipa *ipa;
+ phandle phandle;
+ bool prefetch;
+ int ret;
+
+ ipa_validate_build();
+
+ /* If we need Trust Zone, make sure it's available */
+ modem_init = of_property_read_bool(dev->of_node, "modem-init");
+ if (!modem_init)
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ /* We rely on remoteproc to tell us about modem state changes */
+ phandle = of_property_read_phandle(dev->of_node, "modem-remoteproc");
+ if (!phandle) {
+ dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
+ return -EINVAL;
+ }
+
+ rproc = rproc_get_by_phandle(phandle);
+ if (!rproc)
+ return -EPROBE_DEFER;
+
+ /* The clock and interconnects might not be ready when we're
+ * probed, so might return -EPROBE_DEFER.
+ */
+ clock = ipa_clock_init(dev);
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto err_rproc_put;
+ }
+
+ /* No more EPROBE_DEFER. Get our configuration data */
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ /* This is really IPA_VALIDATE (should never happen) */
+ dev_err(dev, "matched hardware not supported\n");
+ ret = -ENOTSUPP;
+ goto err_clock_exit;
+ }
+
+ /* Create a wakeup source. */
+ wakeup_source = wakeup_source_register(dev, "ipa");
+ if (!wakeup_source) {
+ /* The most likely reason for failure is memory exhaustion */
+ ret = -ENOMEM;
+ goto err_clock_exit;
+ }
+
+ /* Allocate and initialize the IPA structure */
+ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
+ if (!ipa) {
+ ret = -ENOMEM;
+ goto err_wakeup_source_unregister;
+ }
+
+ ipa->pdev = pdev;
+ dev_set_drvdata(dev, ipa);
+ ipa->modem_rproc = rproc;
+ ipa->clock = clock;
+ atomic_set(&ipa->suspend_ref, 0);
+ ipa->wakeup_source = wakeup_source;
+ ipa->version = data->version;
+
+ ret = ipa_reg_init(ipa);
+ if (ret)
+ goto err_kfree_ipa;
+
+ ret = ipa_mem_init(ipa, data->mem_count, data->mem_data);
+ if (ret)
+ goto err_reg_exit;
+
+ /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */
+ prefetch = ipa->version != IPA_VERSION_3_5_1;
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = ipa->version == IPA_VERSION_4_2;
+
+ ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count,
+ data->endpoint_data, modem_alloc);
+ if (ret)
+ goto err_mem_exit;
+
+ /* Result is a non-zero mask endpoints that support filtering */
+ ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
+ data->endpoint_data);
+ if (!ipa->filter_map) {
+ ret = -EINVAL;
+ goto err_gsi_exit;
+ }
+
+ ret = ipa_table_init(ipa);
+ if (ret)
+ goto err_endpoint_exit;
+
+ ret = ipa_modem_init(ipa, modem_init);
+ if (ret)
+ goto err_table_exit;
+
+ ret = ipa_config(ipa, data);
+ if (ret)
+ goto err_modem_exit;
+
+ dev_info(dev, "IPA driver initialized");
+
+ /* If the modem is doing early initialization, it will trigger a
+ * call to ipa_setup() call when it has finished. In that case
+ * we're done here.
+ */
+ if (modem_init)
+ return 0;
+
+ /* Otherwise we need to load the firmware and have Trust Zone validate
+ * and install it. If that succeeds we can proceed with setup.
+ */
+ ret = ipa_firmware_load(dev);
+ if (ret)
+ goto err_deconfig;
+
+ ret = ipa_setup(ipa);
+ if (ret)
+ goto err_deconfig;
+
+ return 0;
+
+err_deconfig:
+ ipa_deconfig(ipa);
+err_modem_exit:
+ ipa_modem_exit(ipa);
+err_table_exit:
+ ipa_table_exit(ipa);
+err_endpoint_exit:
+ ipa_endpoint_exit(ipa);
+err_gsi_exit:
+ gsi_exit(&ipa->gsi);
+err_mem_exit:
+ ipa_mem_exit(ipa);
+err_reg_exit:
+ ipa_reg_exit(ipa);
+err_kfree_ipa:
+ kfree(ipa);
+err_wakeup_source_unregister:
+ wakeup_source_unregister(wakeup_source);
+err_clock_exit:
+ ipa_clock_exit(clock);
+err_rproc_put:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int ipa_remove(struct platform_device *pdev)
+{
+ struct ipa *ipa = dev_get_drvdata(&pdev->dev);
+ struct rproc *rproc = ipa->modem_rproc;
+ struct ipa_clock *clock = ipa->clock;
+ struct wakeup_source *wakeup_source;
+ int ret;
+
+ wakeup_source = ipa->wakeup_source;
+
+ if (ipa->setup_complete) {
+ ret = ipa_modem_stop(ipa);
+ if (ret)
+ return ret;
+
+ ipa_teardown(ipa);
+ }
+
+ ipa_deconfig(ipa);
+ ipa_modem_exit(ipa);
+ ipa_table_exit(ipa);
+ ipa_endpoint_exit(ipa);
+ gsi_exit(&ipa->gsi);
+ ipa_mem_exit(ipa);
+ ipa_reg_exit(ipa);
+ kfree(ipa);
+ wakeup_source_unregister(wakeup_source);
+ ipa_clock_exit(clock);
+ rproc_put(rproc);
+
+ return 0;
+}
+
+/**
+ * ipa_suspend() - Power management system suspend callback
+ * @dev: IPA device structure
+ *
+ * @Return: Zero
+ *
+ * Called by the PM framework when a system suspend operation is invoked.
+ */
+static int ipa_suspend(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+
+ return 0;
+}
+
+/**
+ * ipa_resume() - Power management system resume callback
+ * @dev: IPA device structure
+ *
+ * @Return: Always returns 0
+ *
+ * Called by the PM framework when a system resume operation is invoked.
+ */
+static int ipa_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ /* This clock reference will keep the IPA out of suspend
+ * until we get a power management suspend request.
+ */
+ atomic_set(&ipa->suspend_ref, 1);
+ ipa_clock_get(ipa);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipa_pm_ops = {
+ .suspend_noirq = ipa_suspend,
+ .resume_noirq = ipa_resume,
+};
+
+static struct platform_driver ipa_driver = {
+ .probe = ipa_probe,
+ .remove = ipa_remove,
+ .driver = {
+ .name = "ipa",
+ .pm = &ipa_pm_ops,
+ .of_match_table = ipa_match,
+ },
+};
+
+module_platform_driver(ipa_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver");
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
new file mode 100644
index 000000000000..42d2c29d9f0c
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_data.h"
+#include "ipa_table.h"
+#include "gsi_trans.h"
+
+/* "Canary" value placed between memory regions to detect overflow */
+#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
+
+/* Add an immediate command to a transaction that zeroes a memory region */
+static void
+ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t addr = ipa->zero_addr;
+
+ if (!mem->size)
+ return;
+
+ ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
+}
+
+/**
+ * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
+ *
+ * Set up the shared memory regions in IPA local memory. This involves
+ * zero-filling memory regions, and in the case of header memory, telling
+ * the IPA where it's located.
+ *
+ * This function performs the initial setup of this memory. If the modem
+ * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
+ *
+ * The AP informs the modem where its portions of memory are located
+ * in a QMI exchange that occurs at modem startup.
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_setup(struct ipa *ipa)
+{
+ dma_addr_t addr = ipa->zero_addr;
+ struct gsi_trans *trans;
+ u32 offset;
+ u16 size;
+
+ /* Get a transaction to define the header memory region and to zero
+ * the processing context and modem memory regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ return -EBUSY;
+ }
+
+ /* Initialize IPA-local header memory. The modem and AP header
+ * regions are contiguous, and initialized together.
+ */
+ offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
+ size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
+ size += ipa->mem[IPA_MEM_AP_HEADER].size;
+
+ ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+
+ gsi_trans_commit_wait(trans);
+
+ /* Tell the hardware where the processing context area is located */
+ iowrite32(ipa->mem_offset + offset,
+ ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
+
+ return 0;
+}
+
+void ipa_mem_teardown(struct ipa *ipa)
+{
+ /* Nothing to do */
+}
+
+#ifdef IPA_VALIDATE
+
+static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ const struct ipa_mem *mem = &ipa->mem[mem_id];
+ struct device *dev = &ipa->pdev->dev;
+ u16 size_multiple;
+
+ /* Other than modem memory, sizes must be a multiple of 8 */
+ size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
+ if (mem->size % size_multiple)
+ dev_err(dev, "region %u size not a multiple of %u bytes\n",
+ mem_id, size_multiple);
+ else if (mem->offset % 8)
+ dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
+ else if (mem->offset < mem->canary_count * sizeof(__le32))
+ dev_err(dev, "region %u offset too small for %hu canaries\n",
+ mem_id, mem->canary_count);
+ else if (mem->offset + mem->size > ipa->mem_size)
+ dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
+ mem_id, ipa->mem_size);
+ else
+ return true;
+
+ return false;
+}
+
+#else /* !IPA_VALIDATE */
+
+static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ return true;
+}
+
+#endif /*! IPA_VALIDATE */
+
+/**
+ * ipa_mem_config() - Configure IPA shared memory
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id;
+ dma_addr_t addr;
+ u32 mem_size;
+ void *virt;
+ u32 val;
+
+ /* Check the advertised location and size of the shared memory area */
+ val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+
+ /* The fields in the register are in 8 byte units */
+ ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
+ /* Make sure the end is within the region's mapped space */
+ mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
+
+ /* If the sizes don't match, issue a warning */
+ if (ipa->mem_offset + mem_size > ipa->mem_size) {
+ dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
+ mem_size);
+ } else if (ipa->mem_offset + mem_size < ipa->mem_size) {
+ dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
+ mem_size);
+ ipa->mem_size = mem_size;
+ }
+
+ /* Prealloc DMA memory for zeroing regions */
+ virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+ ipa->zero_addr = addr;
+ ipa->zero_virt = virt;
+ ipa->zero_size = IPA_MEM_MAX;
+
+ /* Verify each defined memory region is valid, and if indicated
+ * for the region, write "canary" values in the space prior to
+ * the region's base address.
+ */
+ for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
+ const struct ipa_mem *mem = &ipa->mem[mem_id];
+ u16 canary_count;
+ __le32 *canary;
+
+ /* Validate all regions (even undefined ones) */
+ if (!ipa_mem_valid(ipa, mem_id))
+ goto err_dma_free;
+
+ /* Skip over undefined regions */
+ if (!mem->offset && !mem->size)
+ continue;
+
+ canary_count = mem->canary_count;
+ if (!canary_count)
+ continue;
+
+ /* Write canary values in the space before the region */
+ canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
+ do
+ *--canary = IPA_MEM_CANARY_VAL;
+ while (--canary_count);
+ }
+
+ /* Make sure filter and route table memory regions are valid */
+ if (!ipa_table_valid(ipa))
+ goto err_dma_free;
+
+ /* Validate memory-related properties relevant to immediate commands */
+ if (!ipa_cmd_data_valid(ipa))
+ goto err_dma_free;
+
+ /* Verify the microcontroller ring alignment (0 is OK too) */
+ if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
+ dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
+ goto err_dma_free;
+ }
+
+ return 0;
+
+err_dma_free:
+ dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
+
+ return -EINVAL;
+}
+
+/* Inverse of ipa_mem_config() */
+void ipa_mem_deconfig(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+
+ dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
+ ipa->zero_size = 0;
+ ipa->zero_virt = NULL;
+ ipa->zero_addr = 0;
+}
+
+/**
+ * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
+ *
+ * Zero regions of IPA-local memory used by the modem. These are configured
+ * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
+ * restarts via SSR we need to re-initialize them. A QMI message tells the
+ * modem where to find regions of IPA local memory it needs to know about
+ * (these included).
+ */
+int ipa_mem_zero_modem(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ /* Get a transaction to zero the modem memory, modem header,
+ * and modem processing context regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 3);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to zero modem memory\n");
+ return -EBUSY;
+ }
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/* Perform memory region-related initialization */
+int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct resource *res;
+ int ret;
+
+ if (count > IPA_MEM_COUNT) {
+ dev_err(dev, "to many memory regions (%u > %u)\n",
+ count, IPA_MEM_COUNT);
+ return -EINVAL;
+ }
+
+ ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "error %d setting DMA mask\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-shared");
+ if (!res) {
+ dev_err(dev,
+ "DT error getting \"ipa-shared\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
+ if (!ipa->mem_virt) {
+ dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
+ return -ENOMEM;
+ }
+
+ ipa->mem_addr = res->start;
+ ipa->mem_size = resource_size(res);
+
+ /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
+ ipa->mem = mem;
+
+ return 0;
+}
+
+/* Inverse of ipa_mem_init() */
+void ipa_mem_exit(struct ipa *ipa)
+{
+ memunmap(ipa->mem_virt);
+}
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
new file mode 100644
index 000000000000..065cb499ebe5
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_MEM_H_
+#define _IPA_MEM_H_
+
+struct ipa;
+
+/**
+ * DOC: IPA Local Memory
+ *
+ * The IPA has a block of shared memory, divided into regions used for
+ * specific purposes.
+ *
+ * The regions within the shared block are bounded by an offset (relative to
+ * the "ipa-shared" memory range) and size found in the IPA_SHARED_MEM_SIZE
+ * register.
+ *
+ * Each region is optionally preceded by one or more 32-bit "canary" values.
+ * These are meant to detect out-of-range writes (if they become corrupted).
+ * A given region (such as a filter or routing table) has the same number
+ * of canaries for all IPA hardware versions. Still, the number used is
+ * defined in the config data, allowing for generic handling of regions.
+ *
+ * The set of memory regions is defined in configuration data. They are
+ * subject to these constraints:
+ * - a zero offset and zero size represents and undefined region
+ * - a region's offset is defined to be *past* all "canary" values
+ * - offset must be large enough to account for all canaries
+ * - a region's size may be zero, but may still have canaries
+ * - all offsets must be 8-byte aligned
+ * - most sizes must be a multiple of 8
+ * - modem memory size must be a multiple of 4
+ * - the microcontroller ring offset must be a multiple of 1024
+ */
+
+/* The maximum allowed size for any memory region */
+#define IPA_MEM_MAX (2 * PAGE_SIZE)
+
+/* IPA-resident memory region ids */
+enum ipa_mem_id {
+ IPA_MEM_UC_SHARED, /* 0 canaries */
+ IPA_MEM_UC_INFO, /* 0 canaries */
+ IPA_MEM_V4_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V4_FILTER, /* 2 canaries */
+ IPA_MEM_V6_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V6_FILTER, /* 2 canaries */
+ IPA_MEM_V4_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V4_ROUTE, /* 2 canaries */
+ IPA_MEM_V6_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V6_ROUTE, /* 2 canaries */
+ IPA_MEM_MODEM_HEADER, /* 2 canaries */
+ IPA_MEM_AP_HEADER, /* 0 canaries */
+ IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
+ IPA_MEM_AP_PROC_CTX, /* 0 canaries */
+ IPA_MEM_PDN_CONFIG, /* 2 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_QUOTA, /* 2 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_MODEM, /* 0 canaries */
+ IPA_MEM_UC_EVENT_RING, /* 1 canary */
+ IPA_MEM_COUNT, /* Number of regions (not an index) */
+};
+
+/**
+ * struct ipa_mem - IPA local memory region description
+ * @offset: offset in IPA memory space to base of the region
+ * @size: size in bytes base of the region
+ * @canary_count # 32-bit "canary" values that precede region
+ */
+struct ipa_mem {
+ u32 offset;
+ u16 size;
+ u16 canary_count;
+};
+
+int ipa_mem_config(struct ipa *ipa);
+void ipa_mem_deconfig(struct ipa *ipa);
+
+int ipa_mem_setup(struct ipa *ipa);
+void ipa_mem_teardown(struct ipa *ipa);
+
+int ipa_mem_zero_modem(struct ipa *ipa);
+
+int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem);
+void ipa_mem_exit(struct ipa *ipa);
+
+#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
new file mode 100644
index 000000000000..ed10818dd99f
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_rmnet.h>
+#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_smp2p.h"
+#include "ipa_qmi.h"
+
+#define IPA_NETDEV_NAME "rmnet_ipa%d"
+#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
+#define IPA_NETDEV_TIMEOUT 10 /* seconds */
+
+enum ipa_modem_state {
+ IPA_MODEM_STATE_STOPPED = 0,
+ IPA_MODEM_STATE_STARTING,
+ IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING,
+};
+
+/** struct ipa_priv - IPA network device private data */
+struct ipa_priv {
+ struct ipa *ipa;
+};
+
+/** ipa_open() - Opens the modem network interface */
+static int ipa_open(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+ int ret;
+
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ if (ret)
+ return ret;
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ if (ret)
+ goto err_disable_tx;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_disable_tx:
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+
+ return ret;
+}
+
+/** ipa_stop() - Stops the modem network interface. */
+static int ipa_stop(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ netif_stop_queue(netdev);
+
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+
+ return 0;
+}
+
+/** ipa_start_xmit() - Transmits an skb.
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * NETDEV_TX_OK: Success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ */
+static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa_endpoint *endpoint;
+ struct ipa *ipa = priv->ipa;
+ u32 skb_len = skb->len;
+ int ret;
+
+ if (!skb_len)
+ goto err_drop_skb;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
+ goto err_drop_skb;
+
+ ret = ipa_endpoint_skb_tx(endpoint, skb);
+ if (ret) {
+ if (ret != -E2BIG)
+ return NETDEV_TX_BUSY;
+ goto err_drop_skb;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += skb_len;
+
+ return NETDEV_TX_OK;
+
+err_drop_skb:
+ dev_kfree_skb_any(skb);
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &netdev->stats;
+
+ if (skb) {
+ skb->dev = netdev;
+ skb->protocol = htons(ETH_P_MAP);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ (void)netif_receive_skb(skb);
+ } else {
+ stats->rx_dropped++;
+ }
+}
+
+static const struct net_device_ops ipa_modem_ops = {
+ .ndo_open = ipa_open,
+ .ndo_stop = ipa_stop,
+ .ndo_start_xmit = ipa_start_xmit,
+};
+
+/** ipa_modem_netdev_setup() - netdev setup function for the modem */
+static void ipa_modem_netdev_setup(struct net_device *netdev)
+{
+ netdev->netdev_ops = &ipa_modem_ops;
+ ether_setup(netdev);
+ /* No header ops (override value set by ether_setup()) */
+ netdev->header_ops = NULL;
+ netdev->type = ARPHRD_RAWIP;
+ netdev->hard_header_len = 0;
+ netdev->max_mtu = IPA_MTU;
+ netdev->mtu = netdev->max_mtu;
+ netdev->addr_len = 0;
+ netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ /* The endpoint is configured for QMAP */
+ netdev->needed_headroom = sizeof(struct rmnet_map_header);
+ netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
+ netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
+ netdev->hw_features = NETIF_F_SG;
+}
+
+/** ipa_modem_suspend() - suspend callback
+ * @netdev: Network device
+ *
+ * Suspend the modem's endpoints.
+ */
+void ipa_modem_suspend(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ netif_stop_queue(netdev);
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+}
+
+/** ipa_modem_resume() - resume callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * Resume the modem's endpoints.
+ */
+void ipa_modem_resume(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+
+ netif_wake_queue(netdev);
+}
+
+int ipa_modem_start(struct ipa *ipa)
+{
+ enum ipa_modem_state state;
+ struct net_device *netdev;
+ struct ipa_priv *priv;
+ int ret;
+
+ /* Only attempt to start the modem if it's stopped */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
+ IPA_MODEM_STATE_STARTING);
+
+ /* Silently ignore attempts when running, or when changing state */
+ if (state != IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
+ NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
+ if (!netdev) {
+ ret = -ENOMEM;
+ goto out_set_state;
+ }
+
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+
+ priv = netdev_priv(netdev);
+ priv->ipa = ipa;
+
+ ret = register_netdev(netdev);
+ if (ret)
+ free_netdev(netdev);
+ else
+ ipa->modem_netdev = netdev;
+
+out_set_state:
+ if (ret)
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ else
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
+ smp_mb__after_atomic();
+
+ return ret;
+}
+
+int ipa_modem_stop(struct ipa *ipa)
+{
+ struct net_device *netdev = ipa->modem_netdev;
+ enum ipa_modem_state state;
+ int ret;
+
+ /* Only attempt to stop the modem if it's running */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING);
+
+ /* Silently ignore attempts when already stopped */
+ if (state == IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ /* If we're somewhere between stopped and starting, we're busy */
+ if (state != IPA_MODEM_STATE_RUNNING)
+ return -EBUSY;
+
+ /* Prevent the modem from triggering a call to ipa_setup() */
+ ipa_smp2p_disable(ipa);
+
+ if (netdev) {
+ /* Stop the queue and disable the endpoints if it's open */
+ ret = ipa_stop(netdev);
+ if (ret)
+ goto out_set_state;
+
+ ipa->modem_netdev = NULL;
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ } else {
+ ret = 0;
+ }
+
+out_set_state:
+ if (ret)
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
+ else
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ smp_mb__after_atomic();
+
+ return ret;
+}
+
+/* Treat a "clean" modem stop the same as a crash */
+static void ipa_modem_crashed(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ ipa_endpoint_modem_pause_all(ipa, true);
+
+ ipa_endpoint_modem_hol_block_clear_all(ipa);
+
+ ipa_table_reset(ipa, true);
+
+ ret = ipa_table_hash_flush(ipa);
+ if (ret)
+ dev_err(dev, "error %d flushing hash caches\n", ret);
+
+ ret = ipa_endpoint_modem_exception_reset_all(ipa);
+ if (ret)
+ dev_err(dev, "error %d resetting exception endpoint\n", ret);
+
+ ipa_endpoint_modem_pause_all(ipa, false);
+
+ ret = ipa_modem_stop(ipa);
+ if (ret)
+ dev_err(dev, "error %d stopping modem\n", ret);
+
+ /* Now prepare for the next modem boot */
+ ret = ipa_mem_zero_modem(ipa);
+ if (ret)
+ dev_err(dev, "error %d zeroing modem memory regions\n", ret);
+}
+
+static void ipa_modem_notify(void *data, enum qcom_rproc_event event)
+{
+ struct ipa *ipa = data;
+ struct device *dev;
+
+ dev = &ipa->pdev->dev;
+ switch (event) {
+ case MODEM_STARTING:
+ dev_info(dev, "received modem starting event\n");
+ ipa_smp2p_notify_reset(ipa);
+ break;
+
+ case MODEM_RUNNING:
+ dev_info(dev, "received modem running event\n");
+ break;
+
+ case MODEM_STOPPING:
+ case MODEM_CRASHED:
+ dev_info(dev, "received modem %s event\n",
+ event == MODEM_STOPPING ? "stopping"
+ : "crashed");
+ if (ipa->setup_complete)
+ ipa_modem_crashed(ipa);
+ break;
+
+ case MODEM_OFFLINE:
+ dev_info(dev, "received modem offline event\n");
+ break;
+
+ case MODEM_REMOVING:
+ dev_info(dev, "received modem stopping event\n");
+ break;
+
+ default:
+ dev_err(&ipa->pdev->dev, "unrecognized event %u\n", event);
+ break;
+ }
+}
+
+int ipa_modem_init(struct ipa *ipa, bool modem_init)
+{
+ return ipa_smp2p_init(ipa, modem_init);
+}
+
+void ipa_modem_exit(struct ipa *ipa)
+{
+ ipa_smp2p_exit(ipa);
+}
+
+int ipa_modem_config(struct ipa *ipa)
+{
+ return qcom_register_ipa_notify(ipa->modem_rproc, ipa_modem_notify,
+ ipa);
+}
+
+void ipa_modem_deconfig(struct ipa *ipa)
+{
+ qcom_deregister_ipa_notify(ipa->modem_rproc);
+}
+
+int ipa_modem_setup(struct ipa *ipa)
+{
+ return ipa_qmi_setup(ipa);
+}
+
+void ipa_modem_teardown(struct ipa *ipa)
+{
+ ipa_qmi_teardown(ipa);
+}
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
new file mode 100644
index 000000000000..2de3e216d1d4
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_MODEM_H_
+#define _IPA_MODEM_H_
+
+struct ipa;
+struct ipa_endpoint;
+struct net_device;
+struct sk_buff;
+
+int ipa_modem_start(struct ipa *ipa);
+int ipa_modem_stop(struct ipa *ipa);
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
+
+void ipa_modem_suspend(struct net_device *netdev);
+void ipa_modem_resume(struct net_device *netdev);
+
+int ipa_modem_init(struct ipa *ipa, bool modem_init);
+void ipa_modem_exit(struct ipa *ipa);
+
+int ipa_modem_config(struct ipa *ipa);
+void ipa_modem_deconfig(struct ipa *ipa);
+
+int ipa_modem_setup(struct ipa *ipa);
+void ipa_modem_teardown(struct ipa *ipa);
+
+#endif /* _IPA_MODEM_H_ */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
new file mode 100644
index 000000000000..5090f0f923ad
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/qrtr.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_modem.h"
+#include "ipa_qmi_msg.h"
+
+/**
+ * DOC: AP/Modem QMI Handshake
+ *
+ * The AP and modem perform a "handshake" at initialization time to ensure
+ * both sides know when everything is ready to begin operating. The AP
+ * driver (this code) uses two QMI handles (endpoints) for this; a client
+ * using a service on the modem, and server to service modem requests (and
+ * to supply an indication message from the AP). Once the handshake is
+ * complete, the AP and modem may begin IPA operation. This occurs
+ * only when the AP IPA driver, modem IPA driver, and IPA microcontroller
+ * are ready.
+ *
+ * The QMI service on the modem expects to receive an INIT_DRIVER request from
+ * the AP, which contains parameters used by the modem during initialization.
+ * The AP sends this request as soon as it is knows the modem side service
+ * is available. The modem responds to this request, and if this response
+ * contains a success result, the AP knows the modem IPA driver is ready.
+ *
+ * The modem is responsible for loading firmware on the IPA microcontroller.
+ * This occurs only during the initial modem boot. The modem sends a
+ * separate DRIVER_INIT_COMPLETE request to the AP to report that the
+ * microcontroller is ready. The AP may assume the microcontroller is
+ * ready and remain so (even if the modem reboots) once it has received
+ * and responded to this request.
+ *
+ * There is one final exchange involved in the handshake. It is required
+ * on the initial modem boot, but optional (but in practice does occur) on
+ * subsequent boots. The modem expects to receive a final INIT_COMPLETE
+ * indication message from the AP when it is about to begin its normal
+ * operation. The AP will only send this message after it has received
+ * and responded to an INDICATION_REGISTER request from the modem.
+ *
+ * So in summary:
+ * - Whenever the AP learns the modem has booted and its IPA QMI service
+ * is available, it sends an INIT_DRIVER request to the modem. The
+ * modem supplies a success response when it is ready to operate.
+ * - On the initial boot, the modem sets up the IPA microcontroller, and
+ * sends a DRIVER_INIT_COMPLETE request to the AP when this is done.
+ * - When the modem is ready to receive an INIT_COMPLETE indication from
+ * the AP, it sends an INDICATION_REGISTER request to the AP.
+ * - On the initial modem boot, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - AP has responded to an INDICATION_REGISTER request from the modem
+ * - AP has sent an INIT_COMPLETE indication to the modem
+ * - On subsequent modem boots, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - The INDICATION_REGISTER request and INIT_COMPLETE indication are
+ * optional for non-initial modem boots, and have no bearing on the
+ * determination of when things are "ready"
+ */
+
+#define IPA_HOST_SERVICE_SVC_ID 0x31
+#define IPA_HOST_SVC_VERS 1
+#define IPA_HOST_SERVICE_INS_ID 1
+
+#define IPA_MODEM_SERVICE_SVC_ID 0x31
+#define IPA_MODEM_SERVICE_INS_ID 2
+#define IPA_MODEM_SVC_VERS 1
+
+#define QMI_INIT_DRIVER_TIMEOUT 60000 /* A minute in milliseconds */
+
+/* Send an INIT_COMPLETE indication message to the modem */
+static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct qmi_handle *qmi = &ipa_qmi->server_handle;
+ struct sockaddr_qrtr *sq = &ipa_qmi->modem_sq;
+ struct ipa_init_complete_ind ind = { };
+ int ret;
+
+ ind.status.result = QMI_RESULT_SUCCESS_V01;
+ ind.status.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_indication(qmi, sq, IPA_QMI_INIT_COMPLETE,
+ IPA_QMI_INIT_COMPLETE_IND_SZ,
+ ipa_init_complete_ind_ei, &ind);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete indication\n", ret);
+ else
+ ipa_qmi->indication_sent = true;
+}
+
+/* If requested (and not already sent) send the INIT_COMPLETE indication */
+static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
+{
+ if (!ipa_qmi->indication_requested)
+ return;
+
+ if (ipa_qmi->indication_sent)
+ return;
+
+ ipa_server_init_complete(ipa_qmi);
+}
+
+/* Determine whether everything is ready to start normal operation.
+ * We know everything (else) is ready when we know the IPA driver on
+ * the modem is ready, and the microcontroller is ready.
+ *
+ * When the modem boots (or reboots), the handshake sequence starts
+ * with the AP sending the modem an INIT_DRIVER request. Within
+ * that request, the uc_loaded flag will be zero (false) for an
+ * initial boot, non-zero (true) for a subsequent (SSR) boot.
+ */
+static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ int ret;
+
+ /* We aren't ready until the modem and microcontroller are */
+ if (!ipa_qmi->modem_ready || !ipa_qmi->uc_ready)
+ return;
+
+ /* Send the indication message if it was requested */
+ ipa_qmi_indication(ipa_qmi);
+
+ /* The initial boot requires us to send the indication. */
+ if (ipa_qmi->initial_boot) {
+ if (!ipa_qmi->indication_sent)
+ return;
+
+ /* The initial modem boot completed successfully */
+ ipa_qmi->initial_boot = false;
+ }
+
+ /* We're ready. Start up normal operation */
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ ret = ipa_modem_start(ipa);
+ if (ret)
+ dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+}
+
+/* All QMI clients from the modem node are gone (modem shut down or crashed). */
+static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+
+ /* The modem client and server go away at the same time */
+ memset(&ipa_qmi->modem_sq, 0, sizeof(ipa_qmi->modem_sq));
+
+ /* initial_boot doesn't change when modem reboots */
+ /* uc_ready doesn't change when modem reboots */
+ ipa_qmi->modem_ready = false;
+ ipa_qmi->indication_requested = false;
+ ipa_qmi->indication_sent = false;
+}
+
+static struct qmi_ops ipa_server_ops = {
+ .bye = ipa_server_bye,
+};
+
+/* Callback function to handle an INDICATION_REGISTER request message from the
+ * modem. This informs the AP that the modem is now ready to receive the
+ * INIT_COMPLETE indication message.
+ */
+static void ipa_server_indication_register(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_indication_register_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_INDICATION_REGISTER,
+ IPA_QMI_INDICATION_REGISTER_RSP_SZ,
+ ipa_indication_register_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->indication_requested = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending register indication response\n", ret);
+ }
+}
+
+/* Respond to a DRIVER_INIT_COMPLETE request message from the modem. */
+static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_driver_init_complete_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_DRIVER_INIT_COMPLETE,
+ IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ,
+ ipa_driver_init_complete_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->uc_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete response\n", ret);
+ }
+}
+
+/* The server handles two request message types sent by the modem. */
+static struct qmi_msg_handler ipa_server_msg_handlers[] = {
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_INDICATION_REGISTER,
+ .ei = ipa_indication_register_req_ei,
+ .decoded_size = IPA_QMI_INDICATION_REGISTER_REQ_SZ,
+ .fn = ipa_server_indication_register,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_DRIVER_INIT_COMPLETE,
+ .ei = ipa_driver_init_complete_req_ei,
+ .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
+ .fn = ipa_server_driver_init_complete,
+ },
+};
+
+/* Handle an INIT_DRIVER response message from the modem. */
+static void ipa_client_init_driver(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *decoded)
+{
+ txn->result = 0; /* IPA_QMI_INIT_DRIVER request was successful */
+ complete(&txn->completion);
+}
+
+/* The client handles one response message type sent by the modem. */
+static struct qmi_msg_handler ipa_client_msg_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id = IPA_QMI_INIT_DRIVER,
+ .ei = ipa_init_modem_driver_rsp_ei,
+ .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ,
+ .fn = ipa_client_init_driver,
+ },
+};
+
+/* Return a pointer to an init modem driver request structure, which contains
+ * configuration parameters for the modem. The modem may be started multiple
+ * times, but generally these parameters don't change so we can reuse the
+ * request structure once it's initialized. The only exception is the
+ * skip_uc_load field, which will be set only after the microcontroller has
+ * reported it has completed its initialization.
+ */
+static const struct ipa_init_modem_driver_req *
+init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ static struct ipa_init_modem_driver_req req;
+ const struct ipa_mem *mem;
+
+ /* The microcontroller is initialized on the first boot */
+ req.skip_uc_load_valid = 1;
+ req.skip_uc_load = ipa->uc_loaded ? 1 : 0;
+
+ /* We only have to initialize most of it once */
+ if (req.platform_type_valid)
+ return &req;
+
+ req.platform_type_valid = 1;
+ req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
+
+ mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ if (mem->size) {
+ req.hdr_tbl_info_valid = 1;
+ req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ req.v4_route_tbl_info_valid = 1;
+ req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ req.v6_route_tbl_info_valid = 1;
+ req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ req.v4_filter_tbl_start_valid = 1;
+ req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = &ipa->mem[IPA_MEM_V6_FILTER];
+ req.v6_filter_tbl_start_valid = 1;
+ req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = &ipa->mem[IPA_MEM_MODEM];
+ if (mem->size) {
+ req.modem_mem_info_valid = 1;
+ req.modem_mem_info.start = ipa->mem_offset + mem->offset;
+ req.modem_mem_info.size = mem->size;
+ }
+
+ req.ctrl_comm_dest_end_pt_valid = 1;
+ req.ctrl_comm_dest_end_pt =
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->endpoint_id;
+
+ /* skip_uc_load_valid and skip_uc_load are set above */
+
+ mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
+ if (mem->size) {
+ req.hdr_proc_ctx_tbl_info_valid = 1;
+ req.hdr_proc_ctx_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.hdr_proc_ctx_tbl_info.end =
+ req.hdr_proc_ctx_tbl_info.start + mem->size - 1;
+ }
+
+ /* Nothing to report for the compression table (zip_tbl_info) */
+
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
+ if (mem->size) {
+ req.v4_hash_route_tbl_info_valid = 1;
+ req.v4_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v4_hash_route_tbl_info.count =
+ mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+ if (mem->size) {
+ req.v6_hash_route_tbl_info_valid = 1;
+ req.v6_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v6_hash_route_tbl_info.count =
+ mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+ if (mem->size) {
+ req.v4_hash_filter_tbl_start_valid = 1;
+ req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
+ if (mem->size) {
+ req.v6_hash_filter_tbl_start_valid = 1;
+ req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ /* None of the stats fields are valid (IPA v4.0 and above) */
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ mem = &ipa->mem[IPA_MEM_STATS_QUOTA];
+ if (mem->size) {
+ req.hw_stats_quota_base_addr_valid = 1;
+ req.hw_stats_quota_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_quota_size_valid = 1;
+ req.hw_stats_quota_size = ipa->mem_offset + mem->size;
+ }
+
+ mem = &ipa->mem[IPA_MEM_STATS_DROP];
+ if (mem->size) {
+ req.hw_stats_drop_base_addr_valid = 1;
+ req.hw_stats_drop_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_drop_size_valid = 1;
+ req.hw_stats_drop_size = ipa->mem_offset + mem->size;
+ }
+ }
+
+ return &req;
+}
+
+/* Send an INIT_DRIVER request to the modem, and wait for it to complete. */
+static void ipa_client_init_driver_work(struct work_struct *work)
+{
+ unsigned long timeout = msecs_to_jiffies(QMI_INIT_DRIVER_TIMEOUT);
+ const struct ipa_init_modem_driver_req *req;
+ struct ipa_qmi *ipa_qmi;
+ struct qmi_handle *qmi;
+ struct qmi_txn txn;
+ struct device *dev;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
+ qmi = &ipa_qmi->client_handle,
+
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ dev = &ipa->pdev->dev;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error %d preparing init driver request\n", ret);
+ return;
+ }
+
+ /* Send the request, and if successful wait for its response */
+ req = init_modem_driver_req(ipa_qmi);
+ ret = qmi_send_request(qmi, &ipa_qmi->modem_sq, &txn,
+ IPA_QMI_INIT_DRIVER, IPA_QMI_INIT_DRIVER_REQ_SZ,
+ ipa_init_modem_driver_req_ei, req);
+ if (ret)
+ dev_err(dev, "error %d sending init driver request\n", ret);
+ else if ((ret = qmi_txn_wait(&txn, timeout)))
+ dev_err(dev, "error %d awaiting init driver response\n", ret);
+
+ if (!ret) {
+ ipa_qmi->modem_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ /* If any error occurs we need to cancel the transaction */
+ qmi_txn_cancel(&txn);
+ }
+}
+
+/* The modem server is now available. We will send an INIT_DRIVER request
+ * to the modem, but can't wait for it to complete in this callback thread.
+ * Schedule a worker on the global workqueue to do that for us.
+ */
+static int
+ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, client_handle);
+
+ ipa_qmi->modem_sq.sq_family = AF_QIPCRTR;
+ ipa_qmi->modem_sq.sq_node = svc->node;
+ ipa_qmi->modem_sq.sq_port = svc->port;
+
+ schedule_work(&ipa_qmi->init_driver_work);
+
+ return 0;
+}
+
+static struct qmi_ops ipa_client_ops = {
+ .new_server = ipa_client_new_server,
+};
+
+/* This is called by ipa_setup(). We can be informed via remoteproc that
+ * the modem has shut down, in which case this function will be called
+ * again to prepare for it coming back up again.
+ */
+int ipa_qmi_setup(struct ipa *ipa)
+{
+ struct ipa_qmi *ipa_qmi = &ipa->qmi;
+ int ret;
+
+ ipa_qmi->initial_boot = true;
+
+ /* The server handle is used to handle the DRIVER_INIT_COMPLETE
+ * request on the first modem boot. It also receives the
+ * INDICATION_REGISTER request on the first boot and (optionally)
+ * subsequent boots. The INIT_COMPLETE indication message is
+ * sent over the server handle if requested.
+ */
+ ret = qmi_handle_init(&ipa_qmi->server_handle,
+ IPA_QMI_SERVER_MAX_RCV_SZ, &ipa_server_ops,
+ ipa_server_msg_handlers);
+ if (ret)
+ return ret;
+
+ ret = qmi_add_server(&ipa_qmi->server_handle, IPA_HOST_SERVICE_SVC_ID,
+ IPA_HOST_SVC_VERS, IPA_HOST_SERVICE_INS_ID);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* The client handle is only used for sending an INIT_DRIVER request
+ * to the modem, and receiving its response message.
+ */
+ ret = qmi_handle_init(&ipa_qmi->client_handle,
+ IPA_QMI_CLIENT_MAX_RCV_SZ, &ipa_client_ops,
+ ipa_client_msg_handlers);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* We need this ready before the service lookup is added */
+ INIT_WORK(&ipa_qmi->init_driver_work, ipa_client_init_driver_work);
+
+ ret = qmi_add_lookup(&ipa_qmi->client_handle, IPA_MODEM_SERVICE_SVC_ID,
+ IPA_MODEM_SVC_VERS, IPA_MODEM_SERVICE_INS_ID);
+ if (ret)
+ goto err_client_handle_release;
+
+ return 0;
+
+err_client_handle_release:
+ /* Releasing the handle also removes registered lookups */
+ qmi_handle_release(&ipa_qmi->client_handle);
+ memset(&ipa_qmi->client_handle, 0, sizeof(ipa_qmi->client_handle));
+err_server_handle_release:
+ /* Releasing the handle also removes registered services */
+ qmi_handle_release(&ipa_qmi->server_handle);
+ memset(&ipa_qmi->server_handle, 0, sizeof(ipa_qmi->server_handle));
+
+ return ret;
+}
+
+void ipa_qmi_teardown(struct ipa *ipa)
+{
+ cancel_work_sync(&ipa->qmi.init_driver_work);
+
+ qmi_handle_release(&ipa->qmi.client_handle);
+ memset(&ipa->qmi.client_handle, 0, sizeof(ipa->qmi.client_handle));
+
+ qmi_handle_release(&ipa->qmi.server_handle);
+ memset(&ipa->qmi.server_handle, 0, sizeof(ipa->qmi.server_handle));
+}
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
new file mode 100644
index 000000000000..3993687593d0
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_H_
+#define _IPA_QMI_H_
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+struct ipa;
+
+/**
+ * struct ipa_qmi - QMI state associated with an IPA
+ * @client_handle - used to send an QMI requests to the modem
+ * @server_handle - used to handle QMI requests from the modem
+ * @initialized - whether QMI initialization has completed
+ * @indication_register_received - tracks modem request receipt
+ * @init_driver_response_received - tracks modem response receipt
+ */
+struct ipa_qmi {
+ struct qmi_handle client_handle;
+ struct qmi_handle server_handle;
+
+ /* Information used for the client handle */
+ struct sockaddr_qrtr modem_sq;
+ struct work_struct init_driver_work;
+
+ /* Flags used in negotiating readiness */
+ bool initial_boot;
+ bool uc_ready;
+ bool modem_ready;
+ bool indication_requested;
+ bool indication_sent;
+};
+
+int ipa_qmi_setup(struct ipa *ipa);
+void ipa_qmi_teardown(struct ipa *ipa);
+
+#endif /* !_IPA_QMI_H_ */
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
new file mode 100644
index 000000000000..03a1d0e55964
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#include <linux/stddef.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa_qmi_msg.h"
+
+/* QMI message structure definition for struct ipa_indication_register_req */
+struct qmi_elem_info ipa_indication_register_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_indication_register_rsp */
+struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_indication_register_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_req */
+struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_req,
+ status),
+ .tlv_type = 0x01,
+ .offset = offsetof(struct ipa_driver_init_complete_req,
+ status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_rsp */
+struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .elem_size = offsetof(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_complete_ind */
+struct qmi_elem_info ipa_init_complete_ind_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_complete_ind,
+ status),
+ .tlv_type = 0x02,
+ .elem_size = offsetof(struct ipa_init_complete_ind,
+ status),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_bounds */
+struct qmi_elem_info ipa_mem_bounds_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, start),
+ .offset = offsetof(struct ipa_mem_bounds, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, end),
+ .offset = offsetof(struct ipa_mem_bounds, end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_array */
+struct qmi_elem_info ipa_mem_array_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, start),
+ .offset = offsetof(struct ipa_mem_array, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, count),
+ .offset = offsetof(struct ipa_mem_array, count),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_range */
+struct qmi_elem_info ipa_mem_range_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, start),
+ .offset = offsetof(struct ipa_mem_range, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, size),
+ .offset = offsetof(struct ipa_mem_range, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_req */
+struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ .tlv_type = 0x10,
+ .elem_size = offsetof(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ platform_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .ei_array = ipa_mem_range_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_rsp */
+struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
new file mode 100644
index 000000000000..cfac456cea0c
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_MSG_H_
+#define _IPA_QMI_MSG_H_
+
+/* === Only "ipa_qmi" and "ipa_qmi_msg.c" should include this file === */
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+/* Request/response/indication QMI message ids used for IPA. Receiving
+ * end issues a response for requests; indications require no response.
+ */
+#define IPA_QMI_INDICATION_REGISTER 0x20 /* modem -> AP request */
+#define IPA_QMI_INIT_DRIVER 0x21 /* AP -> modem request */
+#define IPA_QMI_INIT_COMPLETE 0x22 /* AP -> modem indication */
+#define IPA_QMI_DRIVER_INIT_COMPLETE 0x35 /* modem -> AP request */
+
+/* The maximum size required for message types. These sizes include
+ * the message data, along with type (1 byte) and length (2 byte)
+ * information for each field. The qmi_send_*() interfaces require
+ * the message size to be provided.
+ */
+#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 12 /* -> server handle */
+#define IPA_QMI_INDICATION_REGISTER_RSP_SZ 7 /* <- server handle */
+#define IPA_QMI_INIT_DRIVER_REQ_SZ 162 /* client handle -> */
+#define IPA_QMI_INIT_DRIVER_RSP_SZ 25 /* client handle <- */
+#define IPA_QMI_INIT_COMPLETE_IND_SZ 7 /* <- server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ 4 /* -> server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ 7 /* <- server handle */
+
+/* Maximum size of messages we expect the AP to receive (max of above) */
+#define IPA_QMI_SERVER_MAX_RCV_SZ 8
+#define IPA_QMI_CLIENT_MAX_RCV_SZ 25
+
+/* Request message for the IPA_QMI_INDICATION_REGISTER request */
+struct ipa_indication_register_req {
+ u8 master_driver_init_complete_valid;
+ u8 master_driver_init_complete;
+ u8 data_usage_quota_reached_valid;
+ u8 data_usage_quota_reached;
+ u8 ipa_mhi_ready_ind_valid;
+ u8 ipa_mhi_ready_ind;
+};
+
+/* The response to a IPA_QMI_INDICATION_REGISTER request consists only of
+ * a standard QMI response.
+ */
+struct ipa_indication_register_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* Request message for the IPA_QMI_DRIVER_INIT_COMPLETE request */
+struct ipa_driver_init_complete_req {
+ u8 status;
+};
+
+/* The response to a IPA_QMI_DRIVER_INIT_COMPLETE request consists only
+ * of a standard QMI response.
+ */
+struct ipa_driver_init_complete_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* The message for the IPA_QMI_INIT_COMPLETE_IND indication consists
+ * only of a standard QMI response.
+ */
+struct ipa_init_complete_ind {
+ struct qmi_response_type_v01 status;
+};
+
+/* The AP tells the modem its platform type. We assume Android. */
+enum ipa_platform_type {
+ IPA_QMI_PLATFORM_TYPE_INVALID = 0, /* Invalid */
+ IPA_QMI_PLATFORM_TYPE_TN = 1, /* Data card */
+ IPA_QMI_PLATFORM_TYPE_LE = 2, /* Data router */
+ IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 3, /* Android MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 4, /* Windows MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
+};
+
+/* This defines the start and end offset of a range of memory. Both
+ * fields are offsets relative to the start of IPA shared memory.
+ * The end value is the last addressable byte *within* the range.
+ */
+struct ipa_mem_bounds {
+ u32 start;
+ u32 end;
+};
+
+/* This defines the location and size of an array. The start value
+ * is an offset relative to the start of IPA shared memory. The
+ * size of the array is implied by the number of entries (the entry
+ * size is assumed to be known).
+ */
+struct ipa_mem_array {
+ u32 start;
+ u32 count;
+};
+
+/* This defines the location and size of a range of memory. The
+ * start is an offset relative to the start of IPA shared memory.
+ * This differs from the ipa_mem_bounds structure in that the size
+ * (in bytes) of the memory region is specified rather than the
+ * offset of its last byte.
+ */
+struct ipa_mem_range {
+ u32 start;
+ u32 size;
+};
+
+/* The message for the IPA_QMI_INIT_DRIVER request contains information
+ * from the AP that affects modem initialization.
+ */
+struct ipa_init_modem_driver_req {
+ u8 platform_type_valid;
+ u32 platform_type; /* enum ipa_platform_type */
+
+ /* Modem header table information. This defines the IPA shared
+ * memory in which the modem may insert header table entries.
+ */
+ u8 hdr_tbl_info_valid;
+ struct ipa_mem_bounds hdr_tbl_info;
+
+ /* Routing table information. These define the location and size of
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_route_tbl_info_valid;
+ struct ipa_mem_array v4_route_tbl_info;
+ u8 v6_route_tbl_info_valid;
+ struct ipa_mem_array v6_route_tbl_info;
+
+ /* Filter table information. These define the location of the
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_filter_tbl_start_valid;
+ u32 v4_filter_tbl_start;
+ u8 v6_filter_tbl_start_valid;
+ u32 v6_filter_tbl_start;
+
+ /* Modem memory information. This defines the location and
+ * size of memory available for the modem to use.
+ */
+ u8 modem_mem_info_valid;
+ struct ipa_mem_range modem_mem_info;
+
+ /* This defines the destination endpoint on the AP to which
+ * the modem driver can send control commands. Must be less
+ * than ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines whether the modem should load the microcontroller
+ * or not. It is unnecessary to reload it if the modem is being
+ * restarted.
+ *
+ * NOTE: this field is named "is_ssr_bootup" elsewhere.
+ */
+ u8 skip_uc_load_valid;
+ u8 skip_uc_load;
+
+ /* Processing context memory information. This defines the memory in
+ * which the modem may insert header processing context table entries.
+ */
+ u8 hdr_proc_ctx_tbl_info_valid;
+ struct ipa_mem_bounds hdr_proc_ctx_tbl_info;
+
+ /* Compression command memory information. This defines the memory
+ * in which the modem may insert compression/decompression commands.
+ */
+ u8 zip_tbl_info_valid;
+ struct ipa_mem_bounds zip_tbl_info;
+
+ /* Routing table information. These define the location and size
+ * of hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_route_tbl_info_valid;
+ struct ipa_mem_array v4_hash_route_tbl_info;
+ u8 v6_hash_route_tbl_info_valid;
+ struct ipa_mem_array v6_hash_route_tbl_info;
+
+ /* Filter table information. These define the location and size
+ * of hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_filter_tbl_start_valid;
+ u32 v4_hash_filter_tbl_start;
+ u8 v6_hash_filter_tbl_start_valid;
+ u32 v6_hash_filter_tbl_start;
+
+ /* Statistics information. These define the locations of the
+ * first and last statistics sub-regions. (IPA v4.0 and above)
+ */
+ u8 hw_stats_quota_base_addr_valid;
+ u32 hw_stats_quota_base_addr;
+ u8 hw_stats_quota_size_valid;
+ u32 hw_stats_quota_size;
+ u8 hw_stats_drop_base_addr_valid;
+ u32 hw_stats_drop_base_addr;
+ u8 hw_stats_drop_size_valid;
+ u32 hw_stats_drop_size;
+};
+
+/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
+ * QMI response, but contains other information as well. Currently we
+ * simply wait for the the INIT_DRIVER transaction to complete and
+ * ignore any other data that might be returned.
+ */
+struct ipa_init_modem_driver_rsp {
+ struct qmi_response_type_v01 rsp;
+
+ /* This defines the destination endpoint on the modem to which
+ * the AP driver can send control commands. Must be less than
+ * ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines the default endpoint. The AP driver is not
+ * required to configure the hardware with this value. Must
+ * be less than ipa_endpoint_max().
+ */
+ u8 default_end_pt_valid;
+ u32 default_end_pt;
+
+ /* This defines whether a second handshake is required to complete
+ * initialization.
+ */
+ u8 modem_driver_init_pending_valid;
+ u8 modem_driver_init_pending;
+};
+
+/* Message structure definitions defined in "ipa_qmi_msg.c" */
+extern struct qmi_elem_info ipa_indication_register_req_ei[];
+extern struct qmi_elem_info ipa_indication_register_rsp_ei[];
+extern struct qmi_elem_info ipa_driver_init_complete_req_ei[];
+extern struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
+extern struct qmi_elem_info ipa_init_complete_ind_ei[];
+extern struct qmi_elem_info ipa_mem_bounds_ei[];
+extern struct qmi_elem_info ipa_mem_array_ei[];
+extern struct qmi_elem_info ipa_mem_range_ei[];
+extern struct qmi_elem_info ipa_init_modem_driver_req_ei[];
+extern struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
+
+#endif /* !_IPA_QMI_MSG_H_ */
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
new file mode 100644
index 000000000000..e6147a1cd787
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/io.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+
+int ipa_reg_init(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct resource *res;
+
+ /* Setup IPA register memory */
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-reg");
+ if (!res) {
+ dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->reg_virt = ioremap(res->start, resource_size(res));
+ if (!ipa->reg_virt) {
+ dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
+ return -ENOMEM;
+ }
+ ipa->reg_addr = res->start;
+
+ return 0;
+}
+
+void ipa_reg_exit(struct ipa *ipa)
+{
+ iounmap(ipa->reg_virt);
+}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
new file mode 100644
index 000000000000..3b8106aa277a
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.h
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_REG_H_
+#define _IPA_REG_H_
+
+#include <linux/bitfield.h>
+
+#include "ipa_version.h"
+
+struct ipa;
+
+/**
+ * DOC: IPA Registers
+ *
+ * IPA registers are located within the "ipa-reg" address space defined by
+ * Device Tree. The offset of each register within that space is specified
+ * by symbols defined below. The address space is mapped to virtual memory
+ * space in ipa_mem_init(). All IPA registers are 32 bits wide.
+ *
+ * Certain register types are duplicated for a number of instances of
+ * something. For example, each IPA endpoint has an set of registers
+ * defining its configuration. The offset to an endpoint's set of registers
+ * is computed based on an "base" offset, plus an endpoint's ID multiplied
+ * and a "stride" value for the register. For such registers, the offset is
+ * computed by a function-like macro that takes a parameter used in the
+ * computation.
+ *
+ * Some register offsets depend on execution environment. For these an "ee"
+ * parameter is supplied to the offset macro. The "ee" value is a member of
+ * the gsi_ee enumerated type.
+ *
+ * The offset of a register dependent on endpoint id is computed by a macro
+ * that is supplied a parameter "ep". The "ep" value is assumed to be less
+ * than the maximum endpoint value for the current hardware, and that will
+ * not exceed IPA_ENDPOINT_MAX.
+ *
+ * The offset of registers related to filter and route tables is computed
+ * by a macro that is supplied a parameter "er". The "er" represents an
+ * endpoint ID for filters, or a route ID for routes. For filters, the
+ * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted
+ * because not all endpoints support filtering. For routes, the route ID
+ * must be less than IPA_ROUTE_MAX.
+ *
+ * The offset of registers related to resource types is computed by a macro
+ * that is supplied a parameter "rt". The "rt" represents a resource type,
+ * which is is a member of the ipa_resource_type_src enumerated type for
+ * source endpoint resources or the ipa_resource_type_dst enumerated type
+ * for destination endpoint resources.
+ *
+ * Some registers encode multiple fields within them. For these, each field
+ * has a symbol below defining a field mask that encodes both the position
+ * and width of the field within its register.
+ *
+ * In some cases, different versions of IPA hardware use different offset or
+ * field mask values. In such cases an inline_function(ipa) is used rather
+ * than a MACRO to define the offset or field mask to use.
+ *
+ * Finally, some registers hold bitmasks representing endpoints. In such
+ * cases the @available field in the @ipa structure defines the "full" set
+ * of valid bits for the register.
+ */
+
+#define IPA_REG_ENABLED_PIPES_OFFSET 0x00000038
+
+#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
+#define ENABLE_FMASK GENMASK(0, 0)
+#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
+#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
+#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
+#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
+#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
+#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
+#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
+#define GSI_MULTI_INORDER_WR_DIS_FMASK GENMASK(8, 8)
+#define GEN_QMB_0_MULTI_INORDER_RD_DIS_FMASK GENMASK(9, 9)
+#define GEN_QMB_1_MULTI_INORDER_RD_DIS_FMASK GENMASK(10, 10)
+#define GEN_QMB_0_MULTI_INORDER_WR_DIS_FMASK GENMASK(11, 11)
+#define GEN_QMB_1_MULTI_INORDER_WR_DIS_FMASK GENMASK(12, 12)
+#define GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS_FMASK GENMASK(13, 13)
+#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
+#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
+#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
+#define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17)
+
+#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
+#define RX_FMASK GENMASK(0, 0)
+#define PROC_FMASK GENMASK(1, 1)
+#define TX_WRAPPER_FMASK GENMASK(2, 2)
+#define MISC_FMASK GENMASK(3, 3)
+#define RAM_ARB_FMASK GENMASK(4, 4)
+#define FTCH_HPS_FMASK GENMASK(5, 5)
+#define FTCH_DPS_FMASK GENMASK(6, 6)
+#define HPS_FMASK GENMASK(7, 7)
+#define DPS_FMASK GENMASK(8, 8)
+#define RX_HPS_CMDQS_FMASK GENMASK(9, 9)
+#define HPS_DPS_CMDQS_FMASK GENMASK(10, 10)
+#define DPS_TX_CMDQS_FMASK GENMASK(11, 11)
+#define RSRC_MNGR_FMASK GENMASK(12, 12)
+#define CTX_HANDLER_FMASK GENMASK(13, 13)
+#define ACK_MNGR_FMASK GENMASK(14, 14)
+#define D_DCPH_FMASK GENMASK(15, 15)
+#define H_DCPH_FMASK GENMASK(16, 16)
+#define DCMP_FMASK GENMASK(17, 17)
+#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
+#define TX_0_FMASK GENMASK(19, 19)
+#define TX_1_FMASK GENMASK(20, 20)
+#define FNR_FMASK GENMASK(21, 21)
+#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
+#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
+#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
+#define QMB_FMASK GENMASK(25, 25)
+#define WEIGHT_ARB_FMASK GENMASK(26, 26)
+#define GSI_IF_FMASK GENMASK(27, 27)
+#define GLOBAL_FMASK GENMASK(28, 28)
+#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
+
+#define IPA_REG_ROUTE_OFFSET 0x00000048
+#define ROUTE_DIS_FMASK GENMASK(0, 0)
+#define ROUTE_DEF_PIPE_FMASK GENMASK(5, 1)
+#define ROUTE_DEF_HDR_TABLE_FMASK GENMASK(6, 6)
+#define ROUTE_DEF_HDR_OFST_FMASK GENMASK(16, 7)
+#define ROUTE_FRAG_DEF_PIPE_FMASK GENMASK(21, 17)
+#define ROUTE_DEF_RETAIN_HDR_FMASK GENMASK(24, 24)
+
+#define IPA_REG_SHARED_MEM_SIZE_OFFSET 0x00000054
+#define SHARED_MEM_SIZE_FMASK GENMASK(15, 0)
+#define SHARED_MEM_BADDR_FMASK GENMASK(31, 16)
+
+#define IPA_REG_QSB_MAX_WRITES_OFFSET 0x00000074
+#define GEN_QMB_0_MAX_WRITES_FMASK GENMASK(3, 0)
+#define GEN_QMB_1_MAX_WRITES_FMASK GENMASK(7, 4)
+
+#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
+#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
+#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
+/* The next two fields are present for IPA v4.0 and above */
+#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
+#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
+
+static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000010c;
+
+ return 0x000000b4;
+}
+/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
+
+/* The next register is present for IPA v4.2 and above */
+#define IPA_REG_FILT_ROUT_HASH_EN_OFFSET 0x00000148
+#define IPV6_ROUTER_HASH_EN GENMASK(0, 0)
+#define IPV6_FILTER_HASH_EN GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_EN GENMASK(8, 8)
+#define IPV4_FILTER_HASH_EN GENMASK(12, 12)
+
+static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000090;
+
+ return 0x000014c;
+}
+
+#define IPV6_ROUTER_HASH_FLUSH GENMASK(0, 0)
+#define IPV6_FILTER_HASH_FLUSH GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_FLUSH GENMASK(8, 8)
+#define IPV4_FILTER_HASH_FLUSH GENMASK(12, 12)
+
+#define IPA_REG_BCR_OFFSET 0x000001d0
+#define BCR_CMDQ_L_LACK_ONE_ENTRY BIT(0)
+#define BCR_TX_NOT_USING_BRESP BIT(1)
+#define BCR_SUSPEND_L2_IRQ BIT(3)
+#define BCR_HOLB_DROP_L2_IRQ BIT(4)
+#define BCR_DUAL_TX BIT(5)
+
+/* Backward compatibility register value to use for each version */
+static inline u32 ipa_reg_bcr_val(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_TX_NOT_USING_BRESP |
+ BCR_SUSPEND_L2_IRQ | BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+
+ if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1)
+ return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_SUSPEND_L2_IRQ |
+ BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+
+ return 0x00000000;
+}
+
+
+#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
+
+#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
+/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+
+#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
+#define AGGR_GRANULARITY GENMASK(8, 4)
+/* Compute the value to use in the AGGR_GRANULARITY field representing
+ * the given number of microseconds (up to 1 millisecond).
+ * x = (32 * usec) / 1000 - 1
+ */
+static inline u32 ipa_aggr_granularity_val(u32 microseconds)
+{
+ /* assert(microseconds >= 16); (?) */
+ /* assert(microseconds <= 1015); */
+
+ return DIV_ROUND_CLOSEST(32 * microseconds, 1000) - 1;
+}
+
+#define IPA_REG_TX_CFG_OFFSET 0x000001fc
+/* The first three fields are present for IPA v3.5.1 only */
+#define TX0_PREFETCH_DISABLE GENMASK(0, 0)
+#define TX1_PREFETCH_DISABLE GENMASK(1, 1)
+#define PREFETCH_ALMOST_EMPTY_SIZE GENMASK(4, 2)
+/* The next fields are present for IPA v4.0 and above */
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX0 GENMASK(5, 2)
+#define DMAW_SCND_OUTSD_PRED_THRESHOLD GENMASK(9, 6)
+#define DMAW_SCND_OUTSD_PRED_EN GENMASK(10, 10)
+#define DMAW_MAX_BEATS_256_DIS GENMASK(11, 11)
+#define PA_MASK_EN GENMASK(12, 12)
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX1 GENMASK(16, 13)
+/* The last two fields are present for IPA v4.2 and above */
+#define SSPND_PA_NO_START_STATE GENMASK(18, 18)
+#define SSPND_PA_NO_BQ_STATE GENMASK(19, 19)
+
+#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
+#define BAM_MAX_PIPES_FMASK GENMASK(4, 0)
+#define BAM_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
+#define BAM_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
+#define BAM_PROD_LOWEST_FMASK GENMASK(27, 24)
+
+static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_4_2)
+ return 0x00000240;
+
+ return 0x00000220;
+}
+
+#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
+#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
+
+#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000400 + 0x0020 * (rt))
+#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000404 + 0x0020 * (rt))
+#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000408 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000500 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000504 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000508 + 0x0020 * (rt))
+#define X_MIN_LIM_FMASK GENMASK(5, 0)
+#define X_MAX_LIM_FMASK GENMASK(13, 8)
+#define Y_MIN_LIM_FMASK GENMASK(21, 16)
+#define Y_MAX_LIM_FMASK GENMASK(29, 24)
+
+#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
+ (0x00000800 + 0x0070 * (ep))
+#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
+#define ENDP_DELAY_FMASK GENMASK(1, 1)
+
+#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
+ (0x00000808 + 0x0070 * (ep))
+#define FRAG_OFFLOAD_EN_FMASK GENMASK(0, 0)
+#define CS_OFFLOAD_EN_FMASK GENMASK(2, 1)
+#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
+#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+
+#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
+ (0x00000810 + 0x0070 * (ep))
+#define HDR_LEN_FMASK GENMASK(5, 0)
+#define HDR_OFST_METADATA_VALID_FMASK GENMASK(6, 6)
+#define HDR_OFST_METADATA_FMASK GENMASK(12, 7)
+#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
+#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
+#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
+#define HDR_A5_MUX_FMASK GENMASK(26, 26)
+#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
+#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
+
+#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
+ (0x00000814 + 0x0070 * (ep))
+#define HDR_ENDIANNESS_FMASK GENMASK(0, 0)
+#define HDR_TOTAL_LEN_OR_PAD_VALID_FMASK GENMASK(1, 1)
+#define HDR_TOTAL_LEN_OR_PAD_FMASK GENMASK(2, 2)
+#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
+#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
+#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
+
+#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(ep) \
+ (0x00000818 + 0x0070 * (ep))
+
+#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(ep) \
+ (0x00000820 + 0x0070 * (ep))
+#define MODE_FMASK GENMASK(2, 0)
+#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
+#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
+#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
+#define PAD_EN_FMASK GENMASK(29, 29)
+#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
+
+#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
+ (0x00000824 + 0x0070 * (ep))
+#define AGGR_EN_FMASK GENMASK(1, 0)
+#define AGGR_TYPE_FMASK GENMASK(4, 2)
+#define AGGR_BYTE_LIMIT_FMASK GENMASK(9, 5)
+#define AGGR_TIME_LIMIT_FMASK GENMASK(14, 10)
+#define AGGR_PKT_LIMIT_FMASK GENMASK(20, 15)
+#define AGGR_SW_EOF_ACTIVE_FMASK GENMASK(21, 21)
+#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
+#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
+
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(ep) \
+ (0x0000082c + 0x0070 * (ep))
+#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
+
+/* The next register is valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(ep) \
+ (0x00000830 + 0x0070 * (ep))
+/* The next fields are present for IPA v4.2 only */
+#define BASE_VALUE_FMASK GENMASK(4, 0)
+#define SCALE_FMASK GENMASK(12, 8)
+
+#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(ep) \
+ (0x00000834 + 0x0070 * (ep))
+#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
+#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
+#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
+#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
+
+#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
+ (0x00000838 + 0x0070 * (ep))
+#define RSRC_GRP_FMASK GENMASK(1, 0)
+
+#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(ep) \
+ (0x0000083c + 0x0070 * (ep))
+#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0)
+#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4)
+#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
+#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12)
+
+#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
+ (0x00000840 + 0x0070 * (ep))
+#define STATUS_EN_FMASK GENMASK(0, 0)
+#define STATUS_ENDP_FMASK GENMASK(5, 1)
+#define STATUS_LOCATION_FMASK GENMASK(8, 8)
+/* The next field is present for IPA v4.0 and above */
+#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
+
+/* "er" is either an endpoint id (for filters) or a route id (for routes) */
+#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
+ (0x0000085c + 0x0070 * (er))
+#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
+#define FILTER_HASH_MSK_SRC_IP_FMASK GENMASK(1, 1)
+#define FILTER_HASH_MSK_DST_IP_FMASK GENMASK(2, 2)
+#define FILTER_HASH_MSK_SRC_PORT_FMASK GENMASK(3, 3)
+#define FILTER_HASH_MSK_DST_PORT_FMASK GENMASK(4, 4)
+#define FILTER_HASH_MSK_PROTOCOL_FMASK GENMASK(5, 5)
+#define FILTER_HASH_MSK_METADATA_FMASK GENMASK(6, 6)
+#define IPA_REG_ENDP_FILTER_HASH_MSK_ALL GENMASK(6, 0)
+
+#define ROUTER_HASH_MSK_SRC_ID_FMASK GENMASK(16, 16)
+#define ROUTER_HASH_MSK_SRC_IP_FMASK GENMASK(17, 17)
+#define ROUTER_HASH_MSK_DST_IP_FMASK GENMASK(18, 18)
+#define ROUTER_HASH_MSK_SRC_PORT_FMASK GENMASK(19, 19)
+#define ROUTER_HASH_MSK_DST_PORT_FMASK GENMASK(20, 20)
+#define ROUTER_HASH_MSK_PROTOCOL_FMASK GENMASK(21, 21)
+#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
+#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
+
+#define IPA_REG_IRQ_STTS_OFFSET \
+ IPA_REG_IRQ_STTS_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_STTS_EE_N_OFFSET(ee) \
+ (0x00003008 + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_EN_OFFSET \
+ IPA_REG_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_EN_EE_N_OFFSET(ee) \
+ (0x0000300c + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_CLR_OFFSET \
+ IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \
+ (0x00003010 + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_UC_OFFSET \
+ IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \
+ (0x0000301c + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \
+ IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \
+ (0x00003030 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_INFO register */
+
+#define IPA_REG_SUSPEND_IRQ_EN_OFFSET \
+ IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(ee) \
+ (0x00003034 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_IRQ_EN register */
+
+#define IPA_REG_SUSPEND_IRQ_CLR_OFFSET \
+ IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(ee) \
+ (0x00003038 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_IRQ_CLR register */
+
+/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
+enum ipa_cs_offload_en {
+ IPA_CS_OFFLOAD_NONE = 0,
+ IPA_CS_OFFLOAD_UL = 1,
+ IPA_CS_OFFLOAD_DL = 2,
+ IPA_CS_RSVD
+};
+
+/** enum ipa_aggr_en - aggregation type field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_en {
+ IPA_BYPASS_AGGR = 0,
+ IPA_ENABLE_AGGR = 1,
+ IPA_ENABLE_DEAGGR = 2,
+};
+
+/** enum ipa_aggr_type - aggregation type field in in_ENDP_INIT_AGGR_N */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0,
+ IPA_HDLC = 1,
+ IPA_TLP = 2,
+ IPA_RNDIS = 3,
+ IPA_GENERIC = 4,
+ IPA_COALESCE = 5,
+ IPA_QCMAP = 6,
+};
+
+/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
+enum ipa_mode {
+ IPA_BASIC = 0,
+ IPA_ENABLE_FRAMING_HDLC = 1,
+ IPA_ENABLE_DEFRAMING_HDLC = 2,
+ IPA_DMA = 3,
+};
+
+/**
+ * enum ipa_seq_type - HPS and DPS sequencer type fields in in ENDP_INIT_SEQ_N
+ * @IPA_SEQ_DMA_ONLY: only DMA is performed
+ * @IPA_SEQ_PKT_PROCESS_NO_DEC_UCP:
+ * packet processing + no decipher + microcontroller (Ethernet Bridging)
+ * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
+ * second packet processing pass + no decipher + microcontroller
+ * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher
+ * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression
+ * @IPA_SEQ_INVALID: invalid sequencer type
+ *
+ * The values defined here are broken into 4-bit nibbles that are written
+ * into fields of the INIT_SEQ_N endpoint registers.
+ */
+enum ipa_seq_type {
+ IPA_SEQ_DMA_ONLY = 0x0000,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_UCP = 0x0002,
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
+ IPA_SEQ_DMA_DEC = 0x0011,
+ IPA_SEQ_DMA_COMP_DECOMP = 0x0020,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
+ IPA_SEQ_INVALID = 0xffff,
+};
+
+int ipa_reg_init(struct ipa *ipa);
+void ipa_reg_exit(struct ipa *ipa);
+
+#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
new file mode 100644
index 000000000000..a5f7a79a1923
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "ipa_smp2p.h"
+#include "ipa.h"
+#include "ipa_uc.h"
+#include "ipa_clock.h"
+
+/**
+ * DOC: IPA SMP2P communication with the modem
+ *
+ * SMP2P is a primitive communication mechanism available between the AP and
+ * the modem. The IPA driver uses this for two purposes: to enable the modem
+ * to state that the GSI hardware is ready to use; and to communicate the
+ * state of the IPA clock in the event of a crash.
+ *
+ * GSI needs to have early initialization completed before it can be used.
+ * This initialization is done either by Trust Zone or by the modem. In the
+ * latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver
+ * when the GSI is ready to use.
+ *
+ * The modem is also able to inquire about the current state of the IPA
+ * clock by trigging another SMP2P interrupt to the AP. We communicate
+ * whether the clock is enabled using two SMP2P state bits--one to
+ * indicate the clock state (on or off), and a second to indicate the
+ * clock state bit is valid. The modem will poll the valid bit until it
+ * is set, and at that time records whether the AP has the IPA clock enabled.
+ *
+ * Finally, if the AP kernel panics, we update the SMP2P state bits even if
+ * we never receive an interrupt from the modem requesting this.
+ */
+
+/**
+ * struct ipa_smp2p - IPA SMP2P information
+ * @ipa: IPA pointer
+ * @valid_state: SMEM state indicating enabled state is valid
+ * @enabled_state: SMEM state to indicate clock is enabled
+ * @valid_bit: Valid bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @clock_query_irq: IPA interrupt triggered by modem for clock query
+ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
+ * @clock_on: Whether IPA clock is on
+ * @notified: Whether modem has been notified of clock state
+ * @disabled: Whether setup ready interrupt handling is disabled
+ * @mutex: Mutex protecting ready-interrupt/shutdown interlock
+ * @panic_notifier: Panic notifier structure
+*/
+struct ipa_smp2p {
+ struct ipa *ipa;
+ struct qcom_smem_state *valid_state;
+ struct qcom_smem_state *enabled_state;
+ u32 valid_bit;
+ u32 enabled_bit;
+ u32 clock_query_irq;
+ u32 setup_ready_irq;
+ bool clock_on;
+ bool notified;
+ bool disabled;
+ struct mutex mutex;
+ struct notifier_block panic_notifier;
+};
+
+/**
+ * ipa_smp2p_notify() - use SMP2P to tell modem about IPA clock state
+ * @smp2p: SMP2P information
+ *
+ * This is called either when the modem has requested it (by triggering
+ * the modem clock query IPA interrupt) or whenever the AP is shutting down
+ * (via a panic notifier). It sets the two SMP2P state bits--one saying
+ * whether the IPA clock is running, and the other indicating the first bit
+ * is valid.
+ */
+static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
+{
+ u32 value;
+ u32 mask;
+
+ if (smp2p->notified)
+ return;
+
+ smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa);
+
+ /* Signal whether the clock is enabled */
+ mask = BIT(smp2p->enabled_bit);
+ value = smp2p->clock_on ? mask : 0;
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, value);
+
+ /* Now indicate that the enabled flag is valid */
+ mask = BIT(smp2p->valid_bit);
+ value = mask;
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, value);
+
+ smp2p->notified = true;
+}
+
+/* Threaded IRQ handler for modem "ipa-clock-query" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_clk_query_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+
+ ipa_smp2p_notify(smp2p);
+
+ return IRQ_HANDLED;
+}
+
+static int ipa_smp2p_panic_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ipa_smp2p *smp2p;
+
+ smp2p = container_of(nb, struct ipa_smp2p, panic_notifier);
+
+ ipa_smp2p_notify(smp2p);
+
+ if (smp2p->clock_on)
+ ipa_uc_panic_notifier(smp2p->ipa);
+
+ return NOTIFY_DONE;
+}
+
+static int ipa_smp2p_panic_notifier_register(struct ipa_smp2p *smp2p)
+{
+ /* IPA panic handler needs to run before modem shuts down */
+ smp2p->panic_notifier.notifier_call = ipa_smp2p_panic_notifier;
+ smp2p->panic_notifier.priority = INT_MAX; /* Do it early */
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+/* Threaded IRQ handler for modem "ipa-setup-ready" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+
+ mutex_lock(&smp2p->mutex);
+
+ if (!smp2p->disabled) {
+ int ret;
+
+ ret = ipa_setup(smp2p->ipa);
+ if (ret)
+ dev_err(&smp2p->ipa->pdev->dev,
+ "error %d from ipa_setup()\n", ret);
+ smp2p->disabled = true;
+ }
+
+ mutex_unlock(&smp2p->mutex);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialize SMP2P interrupts */
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
+ irq_handler_t handler)
+{
+ struct device *dev = &smp2p->ipa->pdev->dev;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"%s\" IRQ property\n",
+ ret, name);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\" IRQ\n", ret, name);
+ return ret;
+ }
+
+ return irq;
+}
+
+static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
+{
+ free_irq(irq, smp2p);
+}
+
+/* Drop the clock reference if it was taken in ipa_smp2p_notify() */
+static void ipa_smp2p_clock_release(struct ipa *ipa)
+{
+ if (!ipa->smp2p->clock_on)
+ return;
+
+ ipa_clock_put(ipa);
+ ipa->smp2p->clock_on = false;
+}
+
+/* Initialize the IPA SMP2P subsystem */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+{
+ struct qcom_smem_state *enabled_state;
+ struct device *dev = &ipa->pdev->dev;
+ struct qcom_smem_state *valid_state;
+ struct ipa_smp2p *smp2p;
+ u32 enabled_bit;
+ u32 valid_bit;
+ int ret;
+
+ valid_state = qcom_smem_state_get(dev, "ipa-clock-enabled-valid",
+ &valid_bit);
+ if (IS_ERR(valid_state))
+ return PTR_ERR(valid_state);
+ if (valid_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ enabled_state = qcom_smem_state_get(dev, "ipa-clock-enabled",
+ &enabled_bit);
+ if (IS_ERR(enabled_state))
+ return PTR_ERR(enabled_state);
+ if (enabled_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ smp2p = kzalloc(sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->ipa = ipa;
+
+ /* These fields are needed by the clock query interrupt
+ * handler, so initialize them now.
+ */
+ mutex_init(&smp2p->mutex);
+ smp2p->valid_state = valid_state;
+ smp2p->valid_bit = valid_bit;
+ smp2p->enabled_state = enabled_state;
+ smp2p->enabled_bit = enabled_bit;
+
+ /* We have enough information saved to handle notifications */
+ ipa->smp2p = smp2p;
+
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ipa_smp2p_modem_clk_query_isr);
+ if (ret < 0)
+ goto err_null_smp2p;
+ smp2p->clock_query_irq = ret;
+
+ ret = ipa_smp2p_panic_notifier_register(smp2p);
+ if (ret)
+ goto err_irq_exit;
+
+ if (modem_init) {
+ /* Result will be non-zero (negative for error) */
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ipa_smp2p_modem_setup_ready_isr);
+ if (ret < 0)
+ goto err_notifier_unregister;
+ smp2p->setup_ready_irq = ret;
+ }
+
+ return 0;
+
+err_notifier_unregister:
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+err_irq_exit:
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+err_null_smp2p:
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+
+ return ret;
+}
+
+void ipa_smp2p_exit(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (smp2p->setup_ready_irq)
+ ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq);
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+ /* We won't get notified any more; drop clock reference (if any) */
+ ipa_smp2p_clock_release(ipa);
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+}
+
+void ipa_smp2p_disable(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (!smp2p->setup_ready_irq)
+ return;
+
+ mutex_lock(&smp2p->mutex);
+
+ smp2p->disabled = true;
+
+ mutex_unlock(&smp2p->mutex);
+}
+
+/* Reset state tracking whether we have notified the modem */
+void ipa_smp2p_notify_reset(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+ u32 mask;
+
+ if (!smp2p->notified)
+ return;
+
+ ipa_smp2p_clock_release(ipa);
+
+ /* Reset the clock enabled valid flag */
+ mask = BIT(smp2p->valid_bit);
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, 0);
+
+ /* Mark the clock disabled for good measure... */
+ mask = BIT(smp2p->enabled_bit);
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0);
+
+ smp2p->notified = false;
+}
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
new file mode 100644
index 000000000000..1f65cdc9d406
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_SMP2P_H_
+#define _IPA_SMP2P_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/**
+ * ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
+ * @ipa: IPA pointer
+ * @modem_init: Whether the modem is responsible for GSI initialization
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+
+/**
+ * ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
+ * @ipa: IPA pointer
+ */
+void ipa_smp2p_exit(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
+ * @IPA: IPA pointer
+ *
+ * Prevent handling of the "setup ready" interrupt from the modem.
+ * This is used before initiating shutdown of the driver.
+ */
+void ipa_smp2p_disable(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_notify_reset() - Reset modem notification state
+ * @ipa: IPA pointer
+ *
+ * If the modem crashes it queries the IPA clock state. In cleaning
+ * up after such a crash this is used to reset some state maintained
+ * for managing this notification.
+ */
+void ipa_smp2p_notify_reset(struct ipa *ipa);
+
+#endif /* _IPA_SMP2P_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
new file mode 100644
index 000000000000..9df2a3e78c98
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/build_bug.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_cmd.h"
+#include "gsi.h"
+#include "gsi_trans.h"
+
+/**
+ * DOC: IPA Filter and Route Tables
+ *
+ * The IPA has tables defined in its local shared memory that define filter
+ * and routing rules. Each entry in these tables contains a 64-bit DMA
+ * address that refers to DRAM (system memory) containing a rule definition.
+ * A rule consists of a contiguous block of 32-bit values terminated with
+ * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
+ * represents "no filtering" or "no routing," and is the reset value for
+ * filter or route table rules. Separate tables (both filter and route)
+ * used for IPv4 and IPv6. Additionally, there can be hashed filter or
+ * route tables, which are used when a hash of message metadata matches.
+ * Hashed operation is not supported by all IPA hardware.
+ *
+ * Each filter rule is associated with an AP or modem TX endpoint, though
+ * not all TX endpoints support filtering. The first 64-bit entry in a
+ * filter table is a bitmap indicating which endpoints have entries in
+ * the table. The low-order bit (bit 0) in this bitmap represents a
+ * special global filter, which applies to all traffic. This is not
+ * used in the current code. Bit 1, if set, indicates that there is an
+ * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
+ * table. Bit 2, if set, indicates there is an entry for endpoint 1,
+ * and so on. Space is set aside in IPA local memory to hold as many
+ * filter table entries as might be required, but typically they are not
+ * all used.
+ *
+ * The AP initializes all entries in a filter table to refer to a "zero"
+ * entry. Once initialized the modem and AP update the entries for
+ * endpoints they "own" directly. Currently the AP does not use the
+ * IPA filtering functionality.
+ *
+ * IPA Filter Table
+ * ----------------------
+ * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5)
+ * |--------------------|
+ * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
+ * |--------------------|
+ * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * ----------------------
+ *
+ * The set of available route rules is divided about equally between the AP
+ * and modem. The AP initializes all entries in a route table to refer to
+ * a "zero entry". Once initialized, the modem and AP are responsible for
+ * updating their own entries. All entries in a route table are usable,
+ * though the AP currently does not use the IPA routing functionality.
+ *
+ * IPA Route Table
+ * ----------------------
+ * 1st modem route | 0x0001234500001100 | DMA address for first route rule
+ * |--------------------|
+ * 2nd modem route | 0x0001234500001140 | DMA address for second route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last modem route| 0x0001234500002280 | DMA address for Nth route rule
+ * |--------------------|
+ * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
+ * |--------------------|
+ * 2nd AP route | 0x0001234500001140 | DMA address for next route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last AP route | 0x0001234500002280 | DMA address for last route rule
+ * ----------------------
+ */
+
+/* IPA hardware constrains filter and route tables alignment */
+#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
+
+/* Assignment of route table entries to the modem and AP */
+#define IPA_ROUTE_MODEM_MIN 0
+#define IPA_ROUTE_MODEM_COUNT 8
+
+#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
+#define IPA_ROUTE_AP_COUNT \
+ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
+
+/* Filter or route rules consist of a set of 32-bit values followed by a
+ * 32-bit all-zero rule list terminator. The "zero rule" is simply an
+ * all-zero rule followed by the list terminator.
+ */
+#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
+
+#ifdef IPA_VALIDATE
+
+/* Check things that can be validated at build time. */
+static void ipa_table_validate_build(void)
+{
+ /* IPA hardware accesses memory 128 bytes at a time. Addresses
+ * referred to by entries in filter and route tables must be
+ * aligned on 128-byte byte boundaries. The only rule address
+ * ever use is the "zero rule", and it's aligned at the base
+ * of a coherent DMA allocation.
+ */
+ BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
+
+ /* Filter and route tables contain DMA addresses that refer to
+ * filter or route rules. We use a fixed constant to represent
+ * the size of either type of table entry. Code in ipa_table_init()
+ * uses a pointer to __le64 to initialize table entriews.
+ */
+ BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
+ BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
+
+ /* A "zero rule" is used to represent no filtering or no routing.
+ * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
+ * assumes that it can be written using a pointer to __le64.
+ */
+ BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
+
+ /* Impose a practical limit on the number of routes */
+ BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
+ /* The modem must be allotted at least one route table entry */
+ BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
+ /* But it can't have more than what is available */
+ BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
+
+}
+
+static bool
+ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
+ u32 size;
+
+ if (route) {
+ if (ipv6)
+ mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
+ : &ipa->mem[IPA_MEM_V6_ROUTE];
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
+ : &ipa->mem[IPA_MEM_V4_ROUTE];
+ size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
+ } else {
+ if (ipv6)
+ mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
+ : &ipa->mem[IPA_MEM_V6_FILTER];
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
+ : &ipa->mem[IPA_MEM_V4_FILTER];
+ size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
+ }
+
+ if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
+ return false;
+
+ /* mem->size >= size is sufficient, but we'll demand more */
+ if (mem->size == size)
+ return true;
+
+ /* Hashed table regions can be zero size if hashing is not supported */
+ if (hashed && !mem->size)
+ return true;
+
+ dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter", mem->size, size);
+
+ return false;
+}
+
+/* Verify the filter and route table memory regions are the expected size */
+bool ipa_table_valid(struct ipa *ipa)
+{
+ bool valid = true;
+
+ valid = valid && ipa_table_valid_one(ipa, false, false, false);
+ valid = valid && ipa_table_valid_one(ipa, false, false, true);
+ valid = valid && ipa_table_valid_one(ipa, false, true, false);
+ valid = valid && ipa_table_valid_one(ipa, false, true, true);
+ valid = valid && ipa_table_valid_one(ipa, true, false, false);
+ valid = valid && ipa_table_valid_one(ipa, true, false, true);
+ valid = valid && ipa_table_valid_one(ipa, true, true, false);
+ valid = valid && ipa_table_valid_one(ipa, true, true, true);
+
+ return valid;
+}
+
+bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 count;
+
+ if (!filter_map) {
+ dev_err(dev, "at least one filtering endpoint is required\n");
+
+ return false;
+ }
+
+ count = hweight32(filter_map);
+ if (count > IPA_FILTER_COUNT_MAX) {
+ dev_err(dev, "too many filtering endpoints (%u, max %u)\n",
+ count, IPA_FILTER_COUNT_MAX);
+
+ return false;
+ }
+
+ return true;
+}
+
+#else /* !IPA_VALIDATE */
+static void ipa_table_validate_build(void)
+
+{
+}
+
+#endif /* !IPA_VALIDATE */
+
+/* Zero entry count means no table, so just return a 0 address */
+static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
+{
+ u32 skip;
+
+ if (!count)
+ return 0;
+
+/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */
+
+ /* Skip over the zero rule and possibly the filter mask */
+ skip = filter_mask ? 1 : 2;
+
+ return ipa->table_addr + skip * sizeof(*ipa->table_virt);
+}
+
+static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
+ u16 first, u16 count, const struct ipa_mem *mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t addr;
+ u32 offset;
+ u16 size;
+
+ /* Nothing to do if the table memory regions is empty */
+ if (!mem->size)
+ return;
+
+ if (filter)
+ first++; /* skip over bitmap */
+
+ offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
+ size = count * IPA_TABLE_ENTRY_SIZE;
+ addr = ipa_table_addr(ipa, false, count);
+
+ ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
+}
+
+/* Reset entries in a single filter table belonging to either the AP or
+ * modem to refer to the zero entry. The memory region supplied will be
+ * for the IPv4 and IPv6 non-hashed and hashed filter tables.
+ */
+static int
+ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
+{
+ u32 ep_mask = ipa->filter_map;
+ u32 count = hweight32(ep_mask);
+ struct gsi_trans *trans;
+ enum gsi_ee_id ee_id;
+
+ if (!mem->size)
+ return 0;
+
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s filter reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id != ee_id)
+ continue;
+
+ ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
+ }
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/* Theoretically, each filter table could have more filter slots to
+ * update than the maximum number of commands in a transaction. So
+ * we do each table separately.
+ */
+static int ipa_filter_reset(struct ipa *ipa, bool modem)
+{
+ int ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
+ if (ret)
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
+ modem);
+ if (ret)
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
+ if (ret)
+ return ret;
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
+ modem);
+
+ return ret;
+}
+
+/* The AP routes and modem routes are each contiguous within the
+ * table. We can update each table with a single command, and we
+ * won't exceed the per-transaction command limit.
+ * */
+static int ipa_route_reset(struct ipa *ipa, bool modem)
+{
+ struct gsi_trans *trans;
+ u16 first;
+ u16 count;
+
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s route reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ if (modem) {
+ first = IPA_ROUTE_MODEM_MIN;
+ count = IPA_ROUTE_MODEM_COUNT;
+ } else {
+ first = IPA_ROUTE_AP_MIN;
+ count = IPA_ROUTE_AP_COUNT;
+ }
+
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V4_ROUTE]);
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V6_ROUTE]);
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+void ipa_table_reset(struct ipa *ipa, bool modem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const char *ee_name;
+ int ret;
+
+ ee_name = modem ? "modem" : "AP";
+
+ /* Report errors, but reset filter and route tables */
+ ret = ipa_filter_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting filter table for %s\n",
+ ret, ee_name);
+
+ ret = ipa_route_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting route table for %s\n",
+ ret, ee_name);
+}
+
+int ipa_table_hash_flush(struct ipa *ipa)
+{
+ u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ struct gsi_trans *trans;
+ u32 val;
+
+ /* IPA version 4.2 does not support hashed tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return 0;
+
+ trans = ipa_cmd_trans_alloc(ipa, 1);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ return -EBUSY;
+ }
+
+ val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH;
+ val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH;
+
+ ipa_cmd_register_write_add(trans, offset, val, val, false);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
+ enum ipa_cmd_opcode opcode,
+ const struct ipa_mem *mem,
+ const struct ipa_mem *hash_mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t hash_addr;
+ dma_addr_t addr;
+ u16 hash_count;
+ u16 hash_size;
+ u16 count;
+ u16 size;
+
+ /* The number of filtering endpoints determines number of entries
+ * in the filter table. The hashed and non-hashed filter table
+ * will have the same number of entries. The size of the route
+ * table region determines the number of entries it has.
+ */
+ if (filter) {
+ count = hweight32(ipa->filter_map);
+ hash_count = hash_mem->size ? count : 0;
+ } else {
+ count = mem->size / IPA_TABLE_ENTRY_SIZE;
+ hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+ size = count * IPA_TABLE_ENTRY_SIZE;
+ hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
+
+ addr = ipa_table_addr(ipa, filter, count);
+ hash_addr = ipa_table_addr(ipa, filter, hash_count);
+
+ ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
+ hash_size, hash_mem->offset, hash_addr);
+}
+
+int ipa_table_setup(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ return -EBUSY;
+ }
+
+ ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
+ &ipa->mem[IPA_MEM_V4_ROUTE],
+ &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+
+ ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
+ &ipa->mem[IPA_MEM_V6_ROUTE],
+ &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+
+ ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
+ &ipa->mem[IPA_MEM_V4_FILTER],
+ &ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
+
+ ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
+ &ipa->mem[IPA_MEM_V6_FILTER],
+ &ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+void ipa_table_teardown(struct ipa *ipa)
+{
+ /* Nothing to do */ /* XXX Maybe reset the tables? */
+}
+
+/**
+ * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
+ * @endpoint_id: Endpoint whose filter hash tuple should be zeroed
+ *
+ * Endpoint must be for the AP (not modem) and support filtering. Updates
+ * the filter hash values without changing route ones.
+ */
+static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 offset;
+ u32 val;
+
+ offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+
+ val = ioread32(endpoint->ipa->reg_virt + offset);
+
+ /* Zero all filter-related fields, preserving the rest */
+ u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_filter_config(struct ipa *ipa, bool modem)
+{
+ enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ u32 ep_mask = ipa->filter_map;
+
+ /* IPA version 4.2 has no hashed route tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id == ee_id)
+ ipa_filter_tuple_zero(endpoint);
+ }
+}
+
+static void ipa_filter_deconfig(struct ipa *ipa, bool modem)
+{
+ /* Nothing to do */
+}
+
+static bool ipa_route_id_modem(u32 route_id)
+{
+ return route_id >= IPA_ROUTE_MODEM_MIN &&
+ route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
+}
+
+/**
+ * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
+ * @route_id: Route table entry whose hash tuple should be zeroed
+ *
+ * Updates the route hash values without changing filter ones.
+ */
+static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
+{
+ u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ u32 val;
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ /* Zero all route-related fields, preserving the rest */
+ u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_route_config(struct ipa *ipa, bool modem)
+{
+ u32 route_id;
+
+ /* IPA version 4.2 has no hashed route tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
+ if (ipa_route_id_modem(route_id) == modem)
+ ipa_route_tuple_zero(ipa, route_id);
+}
+
+static void ipa_route_deconfig(struct ipa *ipa, bool modem)
+{
+ /* Nothing to do */
+}
+
+void ipa_table_config(struct ipa *ipa)
+{
+ ipa_filter_config(ipa, false);
+ ipa_filter_config(ipa, true);
+ ipa_route_config(ipa, false);
+ ipa_route_config(ipa, true);
+}
+
+void ipa_table_deconfig(struct ipa *ipa)
+{
+ ipa_route_deconfig(ipa, true);
+ ipa_route_deconfig(ipa, false);
+ ipa_filter_deconfig(ipa, true);
+ ipa_filter_deconfig(ipa, false);
+}
+
+/*
+ * Initialize a coherent DMA allocation containing initialized filter and
+ * route table data. This is used when initializing or resetting the IPA
+ * filter or route table.
+ *
+ * The first entry in a filter table contains a bitmap indicating which
+ * endpoints contain entries in the table. In addition to that first entry,
+ * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table
+ * entries are 64 bits wide, and (other than the bitmap) contain the DMA
+ * address of a filter rule. A "zero rule" indicates no filtering, and
+ * consists of 64 bits of zeroes. When a filter table is initialized (or
+ * reset) its entries are made to refer to the zero rule.
+ *
+ * Each entry in a route table is the DMA address of a routing rule. For
+ * routing there is also a 64-bit "zero rule" that means no routing, and
+ * when a route table is initialized or reset, its entries are made to refer
+ * to the zero rule. The zero rule is shared for route and filter tables.
+ *
+ * Note that the IPA hardware requires a filter or route rule address to be
+ * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
+ * has a minimum alignment, and we place the zero rule at the base of that
+ * allocated space. In ipa_table_init() we verify the minimum DMA allocation
+ * meets our requirement.
+ *
+ * +-------------------+
+ * --> | zero rule |
+ * / |-------------------|
+ * | | filter mask |
+ * |\ |-------------------|
+ * | ---- zero rule address | \
+ * |\ |-------------------| |
+ * | ---- zero rule address | | IPA_FILTER_COUNT_MAX
+ * | |-------------------| > or IPA_ROUTE_COUNT_MAX,
+ * | ... | whichever is greater
+ * \ |-------------------| |
+ * ---- zero rule address | /
+ * +-------------------+
+ */
+int ipa_table_init(struct ipa *ipa)
+{
+ u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ struct device *dev = &ipa->pdev->dev;
+ dma_addr_t addr;
+ __le64 le_addr;
+ __le64 *virt;
+ size_t size;
+
+ ipa_table_validate_build();
+
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ ipa->table_virt = virt;
+ ipa->table_addr = addr;
+
+ /* First slot is the zero rule */
+ *virt++ = 0;
+
+ /* Next is the filter table bitmap. The "soft" bitmap value
+ * must be converted to the hardware representation by shifting
+ * it left one position. (Bit 0 repesents global filtering,
+ * which is possible but not used.)
+ */
+ *virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
+
+ /* All the rest contain the DMA address of the zero rule */
+ le_addr = cpu_to_le64(addr);
+ while (count--)
+ *virt++ = le_addr;
+
+ return 0;
+}
+
+void ipa_table_exit(struct ipa *ipa)
+{
+ u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ struct device *dev = &ipa->pdev->dev;
+ size_t size;
+
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+
+ dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
+ ipa->table_addr = 0;
+ ipa->table_virt = NULL;
+}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
new file mode 100644
index 000000000000..64ea0221441a
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_TABLE_H_
+#define _IPA_TABLE_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/* The size of a filter or route table entry */
+#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */
+
+/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
+#define IPA_FILTER_COUNT_MAX 14
+
+/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
+#define IPA_ROUTE_COUNT_MAX 15
+
+#ifdef IPA_VALIDATE
+
+/**
+ * ipa_table_valid() - Validate route and filter table memory regions
+ * @ipa: IPA pointer
+
+ * @Return: true if all regions are valid, false otherwise
+ */
+bool ipa_table_valid(struct ipa *ipa);
+
+/**
+ * ipa_filter_map_valid() - Validate a filter table endpoint bitmap
+ * @ipa: IPA pointer
+ *
+ * @Return: true if all regions are valid, false otherwise
+ */
+bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
+
+#else /* !IPA_VALIDATE */
+
+static inline bool ipa_table_valid(struct ipa *ipa)
+{
+ return true;
+}
+
+static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/**
+ * ipa_table_reset() - Reset filter and route tables entries to "none"
+ * @ipa: IPA pointer
+ * @modem: Whether to reset modem or AP entries
+ */
+void ipa_table_reset(struct ipa *ipa, bool modem);
+
+/**
+ * ipa_table_hash_flush() - Synchronize hashed filter and route updates
+ * @ipa: IPA pointer
+ */
+int ipa_table_hash_flush(struct ipa *ipa);
+
+/**
+ * ipa_table_setup() - Set up filter and route tables
+ * @ipa: IPA pointer
+ */
+int ipa_table_setup(struct ipa *ipa);
+
+/**
+ * ipa_table_teardown() - Inverse of ipa_table_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_table_teardown(struct ipa *ipa);
+
+/**
+ * ipa_table_config() - Configure filter and route tables
+ * @ipa: IPA pointer
+ */
+void ipa_table_config(struct ipa *ipa);
+
+/**
+ * ipa_table_deconfig() - Inverse of ipa_table_config()
+ * @ipa: IPA pointer
+ */
+void ipa_table_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_table_init() - Do early initialization of filter and route tables
+ * @ipa: IPA pointer
+ */
+int ipa_table_init(struct ipa *ipa);
+
+/**
+ * ipa_table_exit() - Inverse of ipa_table_init()
+ * @ipa: IPA pointer
+ */
+void ipa_table_exit(struct ipa *ipa);
+
+#endif /* _IPA_TABLE_H_ */
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
new file mode 100644
index 000000000000..a1f8db00d55a
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_uc.h"
+
+/**
+ * DOC: The IPA embedded microcontroller
+ *
+ * The IPA incorporates a microcontroller that is able to do some additional
+ * handling/offloading of network activity. The current code makes
+ * essentially no use of the microcontroller, but it still requires some
+ * initialization. It needs to be notified in the event the AP crashes.
+ *
+ * The microcontroller can generate two interrupts to the AP. One interrupt
+ * is used to indicate that a response to a request from the AP is available.
+ * The other is used to notify the AP of the occurrence of an event. In
+ * addition, the AP can interrupt the microcontroller by writing a register.
+ *
+ * A 128 byte block of structured memory within the IPA SRAM is used together
+ * with these interrupts to implement the communication interface between the
+ * AP and the IPA microcontroller. Each side writes data to the shared area
+ * before interrupting its peer, which will read the written data in response
+ * to the interrupt. Some information found in the shared area is currently
+ * unused. All remaining space in the shared area is reserved, and must not
+ * be read or written by the AP.
+ */
+/* Supports hardware interface version 0x2000 */
+
+/* Offset relative to the base of the IPA shared address space of the
+ * shared region used for communication with the microcontroller. The
+ * region is 128 bytes in size, but only the first 40 bytes are used.
+ */
+#define IPA_MEM_UC_OFFSET 0x0000
+
+/* Delay to allow a the microcontroller to save state when crashing */
+#define IPA_SEND_DELAY 100 /* microseconds */
+
+/**
+ * struct ipa_uc_mem_area - AP/microcontroller shared memory area
+ * @command: command code (AP->microcontroller)
+ * @command_param: low 32 bits of command parameter (AP->microcontroller)
+ * @command_param_hi: high 32 bits of command parameter (AP->microcontroller)
+ *
+ * @response: response code (microcontroller->AP)
+ * @response_param: response parameter (microcontroller->AP)
+ *
+ * @event: event code (microcontroller->AP)
+ * @event_param: event parameter (microcontroller->AP)
+ *
+ * @first_error_address: address of first error-source on SNOC
+ * @hw_state: state of hardware (including error type information)
+ * @warning_counter: counter of non-fatal hardware errors
+ * @interface_version: hardware-reported interface version
+ */
+struct ipa_uc_mem_area {
+ u8 command; /* enum ipa_uc_command */
+ u8 reserved0[3];
+ __le32 command_param;
+ __le32 command_param_hi;
+ u8 response; /* enum ipa_uc_response */
+ u8 reserved1[3];
+ __le32 response_param;
+ u8 event; /* enum ipa_uc_event */
+ u8 reserved2[3];
+
+ __le32 event_param;
+ __le32 first_error_address;
+ u8 hw_state;
+ u8 warning_counter;
+ __le16 reserved3;
+ __le16 interface_version;
+ __le16 reserved4;
+};
+
+/** enum ipa_uc_command - commands from the AP to the microcontroller */
+enum ipa_uc_command {
+ IPA_UC_COMMAND_NO_OP = 0,
+ IPA_UC_COMMAND_UPDATE_FLAGS = 1,
+ IPA_UC_COMMAND_DEBUG_RUN_TEST = 2,
+ IPA_UC_COMMAND_DEBUG_GET_INFO = 3,
+ IPA_UC_COMMAND_ERR_FATAL = 4,
+ IPA_UC_COMMAND_CLK_GATE = 5,
+ IPA_UC_COMMAND_CLK_UNGATE = 6,
+ IPA_UC_COMMAND_MEMCPY = 7,
+ IPA_UC_COMMAND_RESET_PIPE = 8,
+ IPA_UC_COMMAND_REG_WRITE = 9,
+ IPA_UC_COMMAND_GSI_CH_EMPTY = 10,
+};
+
+/** enum ipa_uc_response - microcontroller response codes */
+enum ipa_uc_response {
+ IPA_UC_RESPONSE_NO_OP = 0,
+ IPA_UC_RESPONSE_INIT_COMPLETED = 1,
+ IPA_UC_RESPONSE_CMD_COMPLETED = 2,
+ IPA_UC_RESPONSE_DEBUG_GET_INFO = 3,
+};
+
+/** enum ipa_uc_event - common cpu events reported by the microcontroller */
+enum ipa_uc_event {
+ IPA_UC_EVENT_NO_OP = 0,
+ IPA_UC_EVENT_ERROR = 1,
+ IPA_UC_EVENT_LOG_INFO = 2,
+};
+
+static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
+{
+ u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
+
+ return ipa->mem_virt + offset;
+}
+
+/* Microcontroller event IPA interrupt handler */
+static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ struct device *dev = &ipa->pdev->dev;
+
+ if (shared->event == IPA_UC_EVENT_ERROR)
+ dev_err(dev, "microcontroller error event\n");
+ else
+ dev_err(dev, "unsupported microcontroller event %hhu\n",
+ shared->event);
+}
+
+/* Microcontroller response IPA interrupt handler */
+static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+
+ /* An INIT_COMPLETED response message is sent to the AP by the
+ * microcontroller when it is operational. Other than this, the AP
+ * should only receive responses from the microcontroller when it has
+ * sent it a request message.
+ *
+ * We can drop the clock reference taken in ipa_uc_init() once we
+ * know the microcontroller has finished its initialization.
+ */
+ switch (shared->response) {
+ case IPA_UC_RESPONSE_INIT_COMPLETED:
+ ipa->uc_loaded = true;
+ ipa_clock_put(ipa);
+ break;
+ default:
+ dev_warn(&ipa->pdev->dev,
+ "unsupported microcontroller response %hhu\n",
+ shared->response);
+ break;
+ }
+}
+
+/* ipa_uc_setup() - Set up the microcontroller */
+void ipa_uc_setup(struct ipa *ipa)
+{
+ /* The microcontroller needs the IPA clock running until it has
+ * completed its initialization. It signals this by sending an
+ * INIT_COMPLETED response message to the AP. This could occur after
+ * we have finished doing the rest of the IPA initialization, so we
+ * need to take an extra "proxy" reference, and hold it until we've
+ * received that signal. (This reference is dropped in
+ * ipa_uc_response_hdlr(), above.)
+ */
+ ipa_clock_get(ipa);
+
+ ipa->uc_loaded = false;
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler);
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr);
+}
+
+/* Inverse of ipa_uc_setup() */
+void ipa_uc_teardown(struct ipa *ipa)
+{
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (!ipa->uc_loaded)
+ ipa_clock_put(ipa);
+}
+
+/* Send a command to the microcontroller */
+static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+
+ shared->command = command;
+ shared->command_param = cpu_to_le32(command_param);
+ shared->command_param_hi = 0;
+ shared->response = 0;
+ shared->response_param = 0;
+
+ iowrite32(1, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
+}
+
+/* Tell the microcontroller the AP is shutting down */
+void ipa_uc_panic_notifier(struct ipa *ipa)
+{
+ if (!ipa->uc_loaded)
+ return;
+
+ send_uc_command(ipa, IPA_UC_COMMAND_ERR_FATAL, 0);
+
+ /* give uc enough time to save state */
+ udelay(IPA_SEND_DELAY);
+}
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
new file mode 100644
index 000000000000..e8510899a3f0
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_UC_H_
+#define _IPA_UC_H_
+
+struct ipa;
+
+/**
+ * ipa_uc_setup() - set up the IPA microcontroller subsystem
+ * @ipa: IPA pointer
+ */
+void ipa_uc_setup(struct ipa *ipa);
+
+/**
+ * ipa_uc_teardown() - inverse of ipa_uc_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_uc_teardown(struct ipa *ipa);
+
+/**
+ * ipa_uc_panic_notifier()
+ * @ipa: IPA pointer
+ *
+ * Notifier function called when the system crashes, to inform the
+ * microcontroller of the event.
+ */
+void ipa_uc_panic_notifier(struct ipa *ipa);
+
+#endif /* _IPA_UC_H_ */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
new file mode 100644
index 000000000000..85449df0f512
--- /dev/null
+++ b/drivers/net/ipa/ipa_version.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_VERSION_H_
+#define _IPA_VERSION_H_
+
+/**
+ * enum ipa_version
+ *
+ * Defines the version of IPA (and GSI) hardware present on the platform.
+ * It seems this might be better defined elsewhere, but having it here gets
+ * it where it's needed.
+ */
+enum ipa_version {
+ IPA_VERSION_3_5_1, /* GSI version 1.3.0 */
+ IPA_VERSION_4_0, /* GSI version 2.0 */
+ IPA_VERSION_4_1, /* GSI version 2.1 */
+ IPA_VERSION_4_2, /* GSI version 2.2 */
+};
+
+#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 92bc2b2df660..d0d31cb99180 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -19,6 +19,7 @@
#include <net/gro_cells.h>
#include <net/macsec.h>
#include <linux/phy.h>
+#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
#include <uapi/linux/if_macsec.h>
@@ -69,6 +70,16 @@ struct macsec_eth_header {
sc; \
sc = rtnl_dereference(sc->next))
+#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
+
+struct gcm_iv_xpn {
+ union {
+ u8 short_secure_channel_id[4];
+ ssci_t ssci;
+ };
+ __be64 pn;
+} __packed;
+
struct gcm_iv {
union {
u8 secure_channel_id[8];
@@ -77,17 +88,6 @@ struct gcm_iv {
__be32 pn;
};
-struct macsec_dev_stats {
- __u64 OutPktsUntagged;
- __u64 InPktsUntagged;
- __u64 OutPktsTooLong;
- __u64 InPktsNoTag;
- __u64 InPktsBadTag;
- __u64 InPktsUnknownSCI;
- __u64 InPktsNoSCI;
- __u64 InPktsOverrun;
-};
-
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
struct pcpu_secy_stats {
@@ -230,11 +230,13 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define MACSEC_PORT_ES (htons(0x0001))
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
+#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
#define MACSEC_GCM_AES_128_SAK_LEN 16
#define MACSEC_GCM_AES_256_SAK_LEN 32
#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
+#define DEFAULT_XPN false
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
@@ -326,7 +328,8 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
/* Checks if a MACsec interface is being offloaded to an hardware engine */
static bool macsec_is_offloaded(struct macsec_dev *macsec)
{
- if (macsec->offload == MACSEC_OFFLOAD_PHY)
+ if (macsec->offload == MACSEC_OFFLOAD_MAC ||
+ macsec->offload == MACSEC_OFFLOAD_PHY)
return true;
return false;
@@ -342,6 +345,9 @@ static bool macsec_check_offload(enum macsec_offload offload,
if (offload == MACSEC_OFFLOAD_PHY)
return macsec->real_dev->phydev &&
macsec->real_dev->phydev->macsec_ops;
+ else if (offload == MACSEC_OFFLOAD_MAC)
+ return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
+ macsec->real_dev->macsec_ops;
return false;
}
@@ -356,9 +362,14 @@ static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
if (offload == MACSEC_OFFLOAD_PHY)
ctx->phydev = macsec->real_dev->phydev;
+ else if (offload == MACSEC_OFFLOAD_MAC)
+ ctx->netdev = macsec->real_dev;
}
- return macsec->real_dev->phydev->macsec_ops;
+ if (offload == MACSEC_OFFLOAD_PHY)
+ return macsec->real_dev->phydev->macsec_ops;
+ else
+ return macsec->real_dev->macsec_ops;
}
/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
@@ -373,8 +384,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
return __macsec_get_ops(macsec->offload, macsec, ctx);
}
-/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
-static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
+/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
+static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
{
struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
int len = skb->len - 2 * ETH_ALEN;
@@ -399,8 +410,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
if (h->unused)
return false;
- /* rx.pn != 0 (figure 10-5) */
- if (!h->packet_number)
+ /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
+ if (!h->packet_number && !xpn)
return false;
/* length check, f) g) h) i) */
@@ -412,6 +423,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
+static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
+ salt_t salt)
+{
+ struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
+
+ gcm_iv->ssci = ssci ^ salt.ssci;
+ gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
+}
+
static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
{
struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
@@ -447,14 +467,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
}
EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
-static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
+static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
+ struct macsec_secy *secy)
{
- u32 pn;
+ pn_t pn;
spin_lock_bh(&tx_sa->lock);
- pn = tx_sa->next_pn;
- tx_sa->next_pn++;
+ pn = tx_sa->next_pn_halves;
+ if (secy->xpn)
+ tx_sa->next_pn++;
+ else
+ tx_sa->next_pn_halves.lower++;
+
if (tx_sa->next_pn == 0)
__macsec_pn_wrapped(secy, tx_sa);
spin_unlock_bh(&tx_sa->lock);
@@ -569,7 +594,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
bool sci_present;
- u32 pn;
+ pn_t pn;
secy = &macsec->secy;
tx_sc = &secy->tx_sc;
@@ -611,12 +636,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
- if (pn == 0) {
+ if (pn.full64 == 0) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn, sci_present);
+ macsec_fill_sectag(hh, secy, pn.lower, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
@@ -647,7 +672,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-ENOMEM);
}
- macsec_fill_iv(iv, secy->sci, pn);
+ if (secy->xpn)
+ macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
+ else
+ macsec_fill_iv(iv, secy->sci, pn.lower);
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -699,13 +727,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u32 lowest_pn = 0;
spin_lock(&rx_sa->lock);
- if (rx_sa->next_pn >= secy->replay_window)
- lowest_pn = rx_sa->next_pn - secy->replay_window;
+ if (rx_sa->next_pn_halves.lower >= secy->replay_window)
+ lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
/* Now perform replay protection check again
* (see IEEE 802.1AE-2006 figure 10-5)
*/
- if (secy->replay_protect && pn < lowest_pn) {
+ if (secy->replay_protect && pn < lowest_pn &&
+ (!secy->xpn || pn_same_half(pn, lowest_pn))) {
spin_unlock(&rx_sa->lock);
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
@@ -754,8 +783,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
}
u64_stats_update_end(&rxsc_stats->syncp);
- if (pn >= rx_sa->next_pn)
- rx_sa->next_pn = pn + 1;
+ // Instead of "pn >=" - to support pn overflow in xpn
+ if (pn + 1 > rx_sa->next_pn_halves.lower) {
+ rx_sa->next_pn_halves.lower = pn + 1;
+ } else if (secy->xpn &&
+ !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
+ rx_sa->next_pn_halves.upper++;
+ rx_sa->next_pn_halves.lower = pn + 1;
+ }
+
spin_unlock(&rx_sa->lock);
}
@@ -842,6 +878,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
+ u32 hdr_pn;
u16 icv_len = secy->icv_len;
macsec_skb_cb(skb)->valid = false;
@@ -861,7 +898,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
}
hdr = (struct macsec_eth_header *)skb->data;
- macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
+ hdr_pn = ntohl(hdr->packet_number);
+
+ if (secy->xpn) {
+ pn_t recovered_pn = rx_sa->next_pn_halves;
+
+ recovered_pn.lower = hdr_pn;
+ if (hdr_pn < rx_sa->next_pn_halves.lower &&
+ !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
+ recovered_pn.upper++;
+
+ macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
+ rx_sa->key.salt);
+ } else {
+ macsec_fill_iv(iv, sci, hdr_pn);
+ }
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -944,22 +995,53 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
{
/* Deliver to the uncontrolled port by default */
enum rx_handler_result ret = RX_HANDLER_PASS;
+ struct ethhdr *hdr = eth_hdr(skb);
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
- /* 10.6 If the management control validateFrames is not
- * Strict, frames without a SecTAG are received, counted, and
- * delivered to the Controlled Port
- */
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
+ struct net_device *ndev = macsec->secy.netdev;
- if (!macsec_is_offloaded(macsec) &&
- macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ /* If h/w offloading is enabled, HW decodes frames and strips
+ * the SecTAG, so we have to deduce which port to deliver to.
+ */
+ if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
+ if (ether_addr_equal_64bits(hdr->h_dest,
+ ndev->dev_addr)) {
+ /* exact match, divert skb to this port */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ } else if (is_multicast_ether_addr_64bits(
+ hdr->h_dest)) {
+ /* multicast frame, deliver on this port too */
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ break;
+
+ nskb->dev = ndev;
+ if (ether_addr_equal_64bits(hdr->h_dest,
+ ndev->broadcast))
+ nskb->pkt_type = PACKET_BROADCAST;
+ else
+ nskb->pkt_type = PACKET_MULTICAST;
+
+ netif_rx(nskb);
+ }
+ continue;
+ }
+
+ /* 10.6 If the management control validateFrames is not
+ * Strict, frames without a SecTAG are received, counted, and
+ * delivered to the Controlled Port
+ */
+ if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
@@ -971,19 +1053,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
if (!nskb)
break;
- nskb->dev = macsec->secy.netdev;
+ nskb->dev = ndev;
if (netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
}
-
- if (netif_running(macsec->secy.netdev) &&
- macsec_is_offloaded(macsec)) {
- ret = RX_HANDLER_EXACT;
- goto out;
- }
}
out:
@@ -1002,7 +1078,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
sci_t sci;
- u32 pn;
+ u32 hdr_pn;
bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats;
struct pcpu_secy_stats *secy_stats;
@@ -1073,7 +1149,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
secy_stats = this_cpu_ptr(macsec->stats);
rxsc_stats = this_cpu_ptr(rx_sc->stats);
- if (!macsec_validate_skb(skb, secy->icv_len)) {
+ if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1105,13 +1181,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
}
/* First, PN check to avoid decrypting obviously wrong packets */
- pn = ntohl(hdr->packet_number);
+ hdr_pn = ntohl(hdr->packet_number);
if (secy->replay_protect) {
bool late;
spin_lock(&rx_sa->lock);
- late = rx_sa->next_pn >= secy->replay_window &&
- pn < (rx_sa->next_pn - secy->replay_window);
+ late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
+ hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
+
+ if (secy->xpn)
+ late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
spin_unlock(&rx_sa->lock);
if (late) {
@@ -1140,7 +1219,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
}
- if (!macsec_post_decrypt(skb, secy, pn))
+ if (!macsec_post_decrypt(skb, secy, hdr_pn))
goto drop;
deliver:
@@ -1226,7 +1305,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+ /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return tfm;
@@ -1258,6 +1338,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
return PTR_ERR(rx_sa->key.tfm);
}
+ rx_sa->ssci = MACSEC_UNDEF_SSCI;
rx_sa->active = false;
rx_sa->next_pn = 1;
refcount_set(&rx_sa->refcnt, 1);
@@ -1356,6 +1437,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
return PTR_ERR(tx_sa->key.tfm);
}
+ tx_sa->ssci = MACSEC_UNDEF_SSCI;
tx_sa->active = false;
refcount_set(&tx_sa->refcnt, 1);
spin_lock_init(&tx_sa->lock);
@@ -1388,6 +1470,11 @@ static struct net_device *get_dev_from_nl(struct net *net,
return dev;
}
+static enum macsec_offload nla_get_offload(const struct nlattr *nla)
+{
+ return (__force enum macsec_offload)nla_get_u8(nla);
+}
+
static sci_t nla_get_sci(const struct nlattr *nla)
{
return (__force sci_t)nla_get_u64(nla);
@@ -1399,6 +1486,16 @@ static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
}
+static ssci_t nla_get_ssci(const struct nlattr *nla)
+{
+ return (__force ssci_t)nla_get_u32(nla);
+}
+
+static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
+{
+ return nla_put_u32(skb, attrtype, (__force u64)value);
+}
+
static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
struct nlattr **attrs,
struct nlattr **tb_sa,
@@ -1514,11 +1611,14 @@ static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
+ [MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
.len = MACSEC_KEYID_LEN, },
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
.len = MACSEC_MAX_KEY_LEN, },
+ [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
+ [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
+ .len = MACSEC_SALT_LEN, },
};
static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
@@ -1591,7 +1691,8 @@ static bool validate_add_rxsa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (attrs[MACSEC_SA_ATTR_PN] &&
+ *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1613,6 +1714,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
struct macsec_rx_sa *rx_sa;
unsigned char assoc_num;
+ int pn_len;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
int err;
@@ -1645,6 +1747,29 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (secy->xpn) {
+ if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
+ pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
+ MACSEC_SA_ATTR_SALT);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ }
+
rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
if (rx_sa) {
rtnl_unlock();
@@ -1667,7 +1792,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -1689,6 +1814,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
@@ -1697,6 +1823,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
+ if (secy->xpn) {
+ rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@@ -1730,6 +1862,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct nlattr **attrs = info->attrs;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ struct macsec_secy *secy;
bool was_active;
int ret;
@@ -1749,6 +1882,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dev);
}
+ secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
rx_sc = create_rx_sc(dev, sci);
@@ -1772,6 +1906,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
if (ret)
@@ -1821,6 +1956,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
unsigned char assoc_num;
+ int pn_len;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_operational;
int err;
@@ -1853,6 +1989,29 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (secy->xpn) {
+ if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
+ pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
+ MACSEC_SA_ATTR_SALT);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ }
+
tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
if (tx_sa) {
rtnl_unlock();
@@ -1874,7 +2033,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
}
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
@@ -1897,6 +2056,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
@@ -1905,6 +2065,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
+ if (secy->xpn) {
+ tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
@@ -1966,6 +2132,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
if (ret)
@@ -2031,6 +2198,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
if (ret)
goto cleanup;
@@ -2089,6 +2257,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_txsa, &ctx);
if (ret)
@@ -2111,7 +2280,9 @@ static bool validate_upd_sa(struct nlattr **attrs)
{
if (!attrs[MACSEC_SA_ATTR_AN] ||
attrs[MACSEC_SA_ATTR_KEY] ||
- attrs[MACSEC_SA_ATTR_KEYID])
+ attrs[MACSEC_SA_ATTR_KEYID] ||
+ attrs[MACSEC_SA_ATTR_SSCI] ||
+ attrs[MACSEC_SA_ATTR_SALT])
return false;
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
@@ -2138,9 +2309,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_operational, was_active;
- u32 prev_pn = 0;
+ pn_t prev_pn;
int ret = 0;
+ prev_pn.full64 = 0;
+
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2159,9 +2332,19 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
}
if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ int pn_len;
+
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
spin_lock_bh(&tx_sa->lock);
- prev_pn = tx_sa->next_pn;
- tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ prev_pn = tx_sa->next_pn_halves;
+ tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
@@ -2186,6 +2369,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
if (ret)
@@ -2199,7 +2383,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
cleanup:
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = prev_pn;
+ tx_sa->next_pn_halves = prev_pn;
spin_unlock_bh(&tx_sa->lock);
}
tx_sa->active = was_active;
@@ -2219,9 +2403,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_active;
- u32 prev_pn = 0;
+ pn_t prev_pn;
int ret = 0;
+ prev_pn.full64 = 0;
+
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2243,9 +2429,19 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
}
if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ int pn_len;
+
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
spin_lock_bh(&rx_sa->lock);
- prev_pn = rx_sa->next_pn;
- rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ prev_pn = rx_sa->next_pn_halves;
+ rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -2266,6 +2462,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
if (ret)
@@ -2278,7 +2475,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
cleanup:
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = prev_pn;
+ rx_sa->next_pn_halves = prev_pn;
spin_unlock_bh(&rx_sa->lock);
}
rx_sa->active = was_active;
@@ -2336,6 +2533,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
if (ret)
@@ -2375,11 +2573,10 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
enum macsec_offload offload, prev_offload;
int (*func)(struct macsec_context *ctx);
struct nlattr **attrs = info->attrs;
- struct net_device *dev, *loop_dev;
+ struct net_device *dev;
const struct macsec_ops *ops;
struct macsec_context ctx;
struct macsec_dev *macsec;
- struct net *loop_net;
int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -2398,6 +2595,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dev);
macsec = macsec_priv(dev);
+ if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
+ return -EINVAL;
+
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
if (macsec->offload == offload)
return 0;
@@ -2407,28 +2607,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
!macsec_check_offload(offload, macsec))
return -EOPNOTSUPP;
- if (offload == MACSEC_OFFLOAD_OFF)
- goto skip_limitation;
-
- /* Check the physical interface isn't offloading another interface
- * first.
- */
- for_each_net(loop_net) {
- for_each_netdev(loop_net, loop_dev) {
- struct macsec_dev *priv;
-
- if (!netif_is_macsec(loop_dev))
- continue;
-
- priv = macsec_priv(loop_dev);
-
- if (priv->real_dev == macsec->real_dev &&
- priv->offload != MACSEC_OFFLOAD_OFF)
- return -EBUSY;
- }
- }
-
-skip_limitation:
/* Check if the net device is busy. */
if (netif_running(dev))
return -EBUSY;
@@ -2463,6 +2641,11 @@ skip_limitation:
if (ret)
goto rollback;
+ /* Force features update, since they are different for SW MACSec and
+ * HW offloading cases.
+ */
+ netdev_update_features(dev);
+
rtnl_unlock();
return 0;
@@ -2473,207 +2656,309 @@ rollback:
return ret;
}
-static int copy_tx_sa_stats(struct sk_buff *skb,
- struct macsec_tx_sa_stats __percpu *pstats)
+static void get_tx_sa_stats(struct net_device *dev, int an,
+ struct macsec_tx_sa *tx_sa,
+ struct macsec_tx_sa_stats *sum)
{
- struct macsec_tx_sa_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.sa.assoc_num = an;
+ ctx.sa.tx_sa = tx_sa;
+ ctx.stats.tx_sa_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
- const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
+ const struct macsec_tx_sa_stats *stats =
+ per_cpu_ptr(tx_sa->stats, cpu);
- sum.OutPktsProtected += stats->OutPktsProtected;
- sum.OutPktsEncrypted += stats->OutPktsEncrypted;
+ sum->OutPktsProtected += stats->OutPktsProtected;
+ sum->OutPktsEncrypted += stats->OutPktsEncrypted;
}
+}
- if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
+static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
+{
+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
+ sum->OutPktsProtected) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+ sum->OutPktsEncrypted))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_rx_sa_stats(struct sk_buff *skb,
- struct macsec_rx_sa_stats __percpu *pstats)
+static void get_rx_sa_stats(struct net_device *dev,
+ struct macsec_rx_sc *rx_sc, int an,
+ struct macsec_rx_sa *rx_sa,
+ struct macsec_rx_sa_stats *sum)
{
- struct macsec_rx_sa_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
- for_each_possible_cpu(cpu) {
- const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
- sum.InPktsOK += stats->InPktsOK;
- sum.InPktsInvalid += stats->InPktsInvalid;
- sum.InPktsNotValid += stats->InPktsNotValid;
- sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
- sum.InPktsUnusedSA += stats->InPktsUnusedSA;
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.sa.assoc_num = an;
+ ctx.sa.rx_sa = rx_sa;
+ ctx.stats.rx_sa_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ ctx.rx_sc = rx_sc;
+ macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
+ }
+ return;
}
- if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+ for_each_possible_cpu(cpu) {
+ const struct macsec_rx_sa_stats *stats =
+ per_cpu_ptr(rx_sa->stats, cpu);
+
+ sum->InPktsOK += stats->InPktsOK;
+ sum->InPktsInvalid += stats->InPktsInvalid;
+ sum->InPktsNotValid += stats->InPktsNotValid;
+ sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
+ sum->InPktsUnusedSA += stats->InPktsUnusedSA;
+ }
+}
+
+static int copy_rx_sa_stats(struct sk_buff *skb,
+ struct macsec_rx_sa_stats *sum)
+{
+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
+ sum->InPktsInvalid) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
+ sum->InPktsNotValid) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+ sum->InPktsNotUsingSA) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ sum->InPktsUnusedSA))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
+static void get_rx_sc_stats(struct net_device *dev,
+ struct macsec_rx_sc *rx_sc,
+ struct macsec_rx_sc_stats *sum)
{
- struct macsec_rx_sc_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.stats.rx_sc_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ ctx.rx_sc = rx_sc;
+ macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
const struct pcpu_rx_sc_stats *stats;
struct macsec_rx_sc_stats tmp;
unsigned int start;
- stats = per_cpu_ptr(pstats, cpu);
+ stats = per_cpu_ptr(rx_sc->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- sum.InOctetsValidated += tmp.InOctetsValidated;
- sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
- sum.InPktsUnchecked += tmp.InPktsUnchecked;
- sum.InPktsDelayed += tmp.InPktsDelayed;
- sum.InPktsOK += tmp.InPktsOK;
- sum.InPktsInvalid += tmp.InPktsInvalid;
- sum.InPktsLate += tmp.InPktsLate;
- sum.InPktsNotValid += tmp.InPktsNotValid;
- sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
- sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
+ sum->InOctetsValidated += tmp.InOctetsValidated;
+ sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
+ sum->InPktsUnchecked += tmp.InPktsUnchecked;
+ sum->InPktsDelayed += tmp.InPktsDelayed;
+ sum->InPktsOK += tmp.InPktsOK;
+ sum->InPktsInvalid += tmp.InPktsInvalid;
+ sum->InPktsLate += tmp.InPktsLate;
+ sum->InPktsNotValid += tmp.InPktsNotValid;
+ sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
+ sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
}
+}
+static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
+{
if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
- sum.InOctetsValidated,
+ sum->InOctetsValidated,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
- sum.InOctetsDecrypted,
+ sum->InOctetsDecrypted,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
- sum.InPktsUnchecked,
+ sum->InPktsUnchecked,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
- sum.InPktsDelayed,
+ sum->InPktsDelayed,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
- sum.InPktsOK,
+ sum->InPktsOK,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
- sum.InPktsInvalid,
+ sum->InPktsInvalid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
- sum.InPktsLate,
+ sum->InPktsLate,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
- sum.InPktsNotValid,
+ sum->InPktsNotValid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
- sum.InPktsNotUsingSA,
+ sum->InPktsNotUsingSA,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
- sum.InPktsUnusedSA,
+ sum->InPktsUnusedSA,
MACSEC_RXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
+static void get_tx_sc_stats(struct net_device *dev,
+ struct macsec_tx_sc_stats *sum)
{
- struct macsec_tx_sc_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.stats.tx_sc_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
const struct pcpu_tx_sc_stats *stats;
struct macsec_tx_sc_stats tmp;
unsigned int start;
- stats = per_cpu_ptr(pstats, cpu);
+ stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- sum.OutPktsProtected += tmp.OutPktsProtected;
- sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
- sum.OutOctetsProtected += tmp.OutOctetsProtected;
- sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
+ sum->OutPktsProtected += tmp.OutPktsProtected;
+ sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
+ sum->OutOctetsProtected += tmp.OutOctetsProtected;
+ sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
}
+}
+static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
+{
if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
- sum.OutPktsProtected,
+ sum->OutPktsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
- sum.OutPktsEncrypted,
+ sum->OutPktsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
- sum.OutOctetsProtected,
+ sum->OutOctetsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
- sum.OutOctetsEncrypted,
+ sum->OutOctetsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
+static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
{
- struct macsec_dev_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.stats.dev_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ macsec_offload(ops->mdo_get_dev_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
const struct pcpu_secy_stats *stats;
struct macsec_dev_stats tmp;
unsigned int start;
- stats = per_cpu_ptr(pstats, cpu);
+ stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- sum.OutPktsUntagged += tmp.OutPktsUntagged;
- sum.InPktsUntagged += tmp.InPktsUntagged;
- sum.OutPktsTooLong += tmp.OutPktsTooLong;
- sum.InPktsNoTag += tmp.InPktsNoTag;
- sum.InPktsBadTag += tmp.InPktsBadTag;
- sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
- sum.InPktsNoSCI += tmp.InPktsNoSCI;
- sum.InPktsOverrun += tmp.InPktsOverrun;
+ sum->OutPktsUntagged += tmp.OutPktsUntagged;
+ sum->InPktsUntagged += tmp.InPktsUntagged;
+ sum->OutPktsTooLong += tmp.OutPktsTooLong;
+ sum->InPktsNoTag += tmp.InPktsNoTag;
+ sum->InPktsBadTag += tmp.InPktsBadTag;
+ sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
+ sum->InPktsNoSCI += tmp.InPktsNoSCI;
+ sum->InPktsOverrun += tmp.InPktsOverrun;
}
+}
+static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
+{
if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
- sum.OutPktsUntagged,
+ sum->OutPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
- sum.InPktsUntagged,
+ sum->InPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
- sum.OutPktsTooLong,
+ sum->OutPktsTooLong,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
- sum.InPktsNoTag,
+ sum->InPktsNoTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
- sum.InPktsBadTag,
+ sum->InPktsBadTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
- sum.InPktsUnknownSCI,
+ sum->InPktsUnknownSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
- sum.InPktsNoSCI,
+ sum->InPktsNoSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
- sum.InPktsOverrun,
+ sum->InPktsOverrun,
MACSEC_SECY_STATS_ATTR_PAD))
return -EMSGSIZE;
@@ -2692,10 +2977,10 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
- csid = MACSEC_DEFAULT_CIPHER_ID;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
break;
case MACSEC_GCM_AES_256_SAK_LEN:
- csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
break;
default:
goto cancel;
@@ -2734,7 +3019,12 @@ static noinline_for_stack int
dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct macsec_tx_sc_stats tx_sc_stats = {0, };
+ struct macsec_tx_sa_stats tx_sa_stats = {0, };
+ struct macsec_rx_sc_stats rx_sc_stats = {0, };
+ struct macsec_rx_sa_stats rx_sa_stats = {0, };
struct macsec_dev *macsec = netdev_priv(dev);
+ struct macsec_dev_stats dev_stats = {0, };
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list;
struct macsec_rx_sc *rx_sc;
@@ -2765,7 +3055,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
if (!attr)
goto nla_put_failure;
- if (copy_tx_sc_stats(skb, tx_sc->stats)) {
+
+ get_tx_sc_stats(dev, &tx_sc_stats);
+ if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
@@ -2774,7 +3066,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
if (!attr)
goto nla_put_failure;
- if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
+ get_secy_stats(dev, &dev_stats);
+ if (copy_secy_stats(skb, &dev_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
@@ -2786,6 +3079,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
struct nlattr *txsa_nest;
+ u64 pn;
+ int pn_len;
if (!tx_sa)
continue;
@@ -2796,22 +3091,15 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
- nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
- nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
- nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
- nla_nest_cancel(skb, txsa_nest);
- nla_nest_cancel(skb, txsa_list);
- goto nla_put_failure;
- }
-
attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
- if (copy_tx_sa_stats(skb, tx_sa->stats)) {
+ memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
+ get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
+ if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
@@ -2819,6 +3107,24 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
+ if (secy->xpn) {
+ pn = tx_sa->next_pn;
+ pn_len = MACSEC_XPN_PN_LEN;
+ } else {
+ pn = tx_sa->next_pn_halves.lower;
+ pn_len = MACSEC_DEFAULT_PN_LEN;
+ }
+
+ if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+ nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
+ nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
+ (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
+ nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
+ nla_nest_cancel(skb, txsa_nest);
+ nla_nest_cancel(skb, txsa_list);
+ goto nla_put_failure;
+ }
+
nla_nest_end(skb, txsa_nest);
}
nla_nest_end(skb, txsa_list);
@@ -2852,7 +3158,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
- if (copy_rx_sc_stats(skb, rx_sc->stats)) {
+ memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
+ get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
+ if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
@@ -2871,6 +3179,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
struct nlattr *rxsa_nest;
+ u64 pn;
+ int pn_len;
if (!rx_sa)
continue;
@@ -2891,7 +3201,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
- if (copy_rx_sa_stats(skb, rx_sa->stats)) {
+ memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
+ get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
+ if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
@@ -2900,9 +3212,18 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
+ if (secy->xpn) {
+ pn = rx_sa->next_pn;
+ pn_len = MACSEC_XPN_PN_LEN;
+ } else {
+ pn = rx_sa->next_pn_halves.lower;
+ pn_len = MACSEC_DEFAULT_PN_LEN;
+ }
+
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
- nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
+ nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
+ (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
nla_nest_cancel(skb, rxsa_nest);
nla_nest_cancel(skb, rxsc_nest);
@@ -3092,9 +3413,16 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return ret;
}
-#define MACSEC_FEATURES \
+#define SW_MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+/* If h/w offloading is enabled, use real device features save for
+ * VLAN_FEATURES - they require additional ops
+ * HW_MACSEC - no reason to report it
+ */
+#define REAL_DEV_FEATURES(dev) \
+ ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
+
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3111,8 +3439,12 @@ static int macsec_dev_init(struct net_device *dev)
return err;
}
- dev->features = real_dev->features & MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ if (macsec_is_offloaded(macsec)) {
+ dev->features = REAL_DEV_FEATURES(real_dev);
+ } else {
+ dev->features = real_dev->features & SW_MACSEC_FEATURES;
+ dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ }
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
@@ -3141,7 +3473,10 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
- features &= (real_dev->features & MACSEC_FEATURES) |
+ if (macsec_is_offloaded(macsec))
+ return REAL_DEV_FEATURES(real_dev);
+
+ features &= (real_dev->features & SW_MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
@@ -3181,6 +3516,7 @@ static int macsec_dev_open(struct net_device *dev)
goto clear_allmulti;
}
+ ctx.secy = &macsec->secy;
err = macsec_offload(ops->mdo_dev_open, &ctx);
if (err)
goto clear_allmulti;
@@ -3212,8 +3548,10 @@ static int macsec_dev_stop(struct net_device *dev)
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
- if (ops)
+ if (ops) {
+ ctx.secy = &macsec->secy;
macsec_offload(ops->mdo_dev_stop, &ctx);
+ }
}
dev_mc_unsync(real_dev, dev);
@@ -3446,9 +3784,19 @@ static int macsec_changelink_common(struct net_device *dev,
case MACSEC_CIPHER_ID_GCM_AES_128:
case MACSEC_DEFAULT_CIPHER_ID:
secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+ secy->xpn = false;
break;
case MACSEC_CIPHER_ID_GCM_AES_256:
secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+ secy->xpn = false;
+ break;
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+ secy->xpn = true;
+ break;
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
+ secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+ secy->xpn = true;
break;
default:
return -EINVAL;
@@ -3463,7 +3811,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
- struct macsec_tx_sa tx_sc;
+ struct macsec_tx_sc tx_sc;
struct macsec_secy secy;
int ret;
@@ -3638,6 +3986,7 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
secy->protect_frames = true;
secy->replay_protect = false;
+ secy->xpn = DEFAULT_XPN;
secy->sci = sci;
secy->tx_sc.active = true;
@@ -3655,11 +4004,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ rx_handler_func_t *rx_handler;
+ u8 icv_len = DEFAULT_ICV_LEN;
struct net_device *real_dev;
- int err;
+ int err, mtu;
sci_t sci;
- u8 icv_len = DEFAULT_ICV_LEN;
- rx_handler_func_t *rx_handler;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -3673,12 +4022,24 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec->real_dev = real_dev;
- /* MACsec offloading is off by default */
- macsec->offload = MACSEC_OFFLOAD_OFF;
+ if (data && data[IFLA_MACSEC_OFFLOAD])
+ macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
+ else
+ /* MACsec offloading is off by default */
+ macsec->offload = MACSEC_OFFLOAD_OFF;
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (macsec->offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(macsec->offload, macsec))
+ return -EOPNOTSUPP;
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
- dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
+ mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
+ if (mtu < 0)
+ dev->mtu = 0;
+ else
+ dev->mtu = mtu;
rx_handler = rtnl_dereference(real_dev->rx_handler);
if (rx_handler && rx_handler != macsec_handle_frame)
@@ -3717,6 +4078,20 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
goto del_dev;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ err = macsec_offload(ops->mdo_add_secy, &ctx);
+ if (err)
+ goto del_dev;
+ }
+ }
+
err = register_macsec_dev(real_dev, dev);
if (err < 0)
goto del_dev;
@@ -3769,6 +4144,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
switch (csid) {
case MACSEC_CIPHER_ID_GCM_AES_128:
case MACSEC_CIPHER_ID_GCM_AES_256:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
case MACSEC_DEFAULT_CIPHER_ID:
if (icv_len < MACSEC_MIN_ICV_LEN ||
icv_len > MACSEC_STD_ICV_LEN)
@@ -3842,10 +4219,10 @@ static int macsec_fill_info(struct sk_buff *skb,
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
- csid = MACSEC_DEFAULT_CIPHER_ID;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
break;
case MACSEC_GCM_AES_256_SAK_LEN:
- csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
break;
default:
goto nla_put_failure;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index e7289d67268f..0482adc9916b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1704,7 +1704,7 @@ static int macvlan_device_event(struct notifier_block *unused,
struct macvlan_dev,
list);
- if (macvlan_sync_address(vlan->dev, dev->dev_addr))
+ if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
return NOTIFY_BAD;
break;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index d7706a0346f2..dc3ff0e20944 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -28,6 +28,7 @@
#include <linux/workqueue.h>
#include <net/devlink.h>
#include <net/ip.h>
+#include <net/flow_offload.h>
#include <uapi/linux/devlink.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/udp.h>
@@ -38,24 +39,48 @@ static struct dentry *nsim_dev_ddir;
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
+static int
+nsim_dev_take_snapshot(struct devlink *devlink, struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ void *dummy_data;
+
+ dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
+ if (!dummy_data)
+ return -ENOMEM;
+
+ get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
+
+ *data = dummy_data;
+
+ return 0;
+}
+
static ssize_t nsim_dev_take_snapshot_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev *nsim_dev = file->private_data;
- void *dummy_data;
+ struct devlink *devlink;
+ u8 *dummy_data;
int err;
u32 id;
- dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
- if (!dummy_data)
- return -ENOMEM;
+ devlink = priv_to_devlink(nsim_dev);
- get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
+ err = nsim_dev_take_snapshot(devlink, NULL, &dummy_data);
+ if (err)
+ return err;
- id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
+ err = devlink_region_snapshot_id_get(devlink, &id);
+ if (err) {
+ pr_err("Failed to get snapshot id\n");
+ kfree(dummy_data);
+ return err;
+ }
err = devlink_region_snapshot_create(nsim_dev->dummy_region,
- dummy_data, id, kfree);
+ dummy_data, id);
+ devlink_region_snapshot_id_put(devlink, id);
if (err) {
pr_err("Failed to create region snapshot\n");
kfree(dummy_data);
@@ -71,6 +96,98 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
.llseek = generic_file_llseek,
};
+static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ struct flow_action_cookie *fa_cookie;
+ unsigned int buf_len;
+ ssize_t ret;
+ char *buf;
+
+ spin_lock(&nsim_dev->fa_cookie_lock);
+ fa_cookie = nsim_dev->fa_cookie;
+ if (!fa_cookie) {
+ ret = -EINVAL;
+ goto errout;
+ }
+ buf_len = fa_cookie->cookie_len * 2;
+ buf = kmalloc(buf_len, GFP_ATOMIC);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto errout;
+ }
+ bin2hex(buf, fa_cookie->cookie, fa_cookie->cookie_len);
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+
+ ret = simple_read_from_buffer(data, count, ppos, buf, buf_len);
+
+ kfree(buf);
+ return ret;
+
+errout:
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+ return ret;
+}
+
+static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ struct flow_action_cookie *fa_cookie;
+ size_t cookie_len;
+ ssize_t ret;
+ char *buf;
+
+ if (*ppos != 0)
+ return -EINVAL;
+ cookie_len = (count - 1) / 2;
+ if ((count - 1) % 2)
+ return -EINVAL;
+ buf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_write_to_buffer(buf, count, ppos, data, count);
+ if (ret < 0)
+ goto free_buf;
+
+ fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!fa_cookie) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ fa_cookie->cookie_len = cookie_len;
+ ret = hex2bin(fa_cookie->cookie, buf, cookie_len);
+ if (ret)
+ goto free_fa_cookie;
+ kfree(buf);
+
+ spin_lock(&nsim_dev->fa_cookie_lock);
+ kfree(nsim_dev->fa_cookie);
+ nsim_dev->fa_cookie = fa_cookie;
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+
+ return count;
+
+free_fa_cookie:
+ kfree(fa_cookie);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
+ .open = simple_open,
+ .read = nsim_dev_trap_fa_cookie_read,
+ .write = nsim_dev_trap_fa_cookie_write,
+ .llseek = generic_file_llseek,
+};
+
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[sizeof(DRV_NAME) + 10];
@@ -97,6 +214,17 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->dont_allow_reload);
debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
&nsim_dev->fail_reload);
+ debugfs_create_file("trap_flow_action_cookie", 0600, nsim_dev->ddir,
+ nsim_dev, &nsim_dev_trap_fa_cookie_fops);
+ debugfs_create_bool("fail_trap_group_set", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_group_set);
+ debugfs_create_bool("fail_trap_policer_set", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_policer_set);
+ debugfs_create_bool("fail_trap_policer_counter_get", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_policer_counter_get);
return 0;
}
@@ -245,11 +373,17 @@ static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
#define NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX 16
+static const struct devlink_region_ops dummy_region_ops = {
+ .name = "dummy",
+ .destructor = &kfree,
+ .snapshot = nsim_dev_take_snapshot,
+};
+
static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
struct devlink *devlink)
{
nsim_dev->dummy_region =
- devlink_region_create(devlink, "dummy",
+ devlink_region_create(devlink, &dummy_region_ops,
NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
NSIM_DEV_DUMMY_REGION_SIZE);
return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
@@ -268,6 +402,7 @@ struct nsim_trap_item {
struct nsim_trap_data {
struct delayed_work trap_report_dw;
struct nsim_trap_item *trap_items_arr;
+ u64 *trap_policers_cnt_arr;
struct nsim_dev *nsim_dev;
spinlock_t trap_lock; /* Protects trap_items_arr */
};
@@ -286,18 +421,47 @@ enum {
#define NSIM_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
+#define NSIM_TRAP_DROP_EXT(_id, _group_id, _metadata) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA | (_metadata))
#define NSIM_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \
NSIM_TRAP_NAME_##_id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
+#define NSIM_DEV_TRAP_POLICER_MIN_RATE 1
+#define NSIM_DEV_TRAP_POLICER_MAX_RATE 8000
+#define NSIM_DEV_TRAP_POLICER_MIN_BURST 8
+#define NSIM_DEV_TRAP_POLICER_MAX_BURST 65536
+
+#define NSIM_TRAP_POLICER(_id, _rate, _burst) \
+ DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
+ NSIM_DEV_TRAP_POLICER_MAX_RATE, \
+ NSIM_DEV_TRAP_POLICER_MIN_RATE, \
+ NSIM_DEV_TRAP_POLICER_MAX_BURST, \
+ NSIM_DEV_TRAP_POLICER_MIN_BURST)
+
+static const struct devlink_trap_policer nsim_trap_policers_arr[] = {
+ NSIM_TRAP_POLICER(1, 1000, 128),
+ NSIM_TRAP_POLICER(2, 2000, 256),
+ NSIM_TRAP_POLICER(3, 3000, 512),
+};
+
+static const struct devlink_trap_group nsim_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 2),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 3),
+};
+
static const struct devlink_trap nsim_traps_arr[] = {
NSIM_TRAP_DROP(SMAC_MC, L2_DROPS),
NSIM_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
@@ -309,6 +473,10 @@ static const struct devlink_trap nsim_traps_arr[] = {
NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS),
+ NSIM_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ NSIM_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
};
#define NSIM_TRAP_L4_DATA_LEN 100
@@ -366,8 +534,13 @@ static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
spin_lock(&nsim_trap_data->trap_lock);
for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ struct flow_action_cookie *fa_cookie = NULL;
struct nsim_trap_item *nsim_trap_item;
struct sk_buff *skb;
+ bool has_fa_cookie;
+
+ has_fa_cookie = nsim_traps_arr[i].metadata_cap &
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE;
nsim_trap_item = &nsim_trap_data->trap_items_arr[i];
if (nsim_trap_item->action == DEVLINK_TRAP_ACTION_DROP)
@@ -383,10 +556,12 @@ static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
* softIRQs to prevent lockdep from complaining about
* "incosistent lock state".
*/
- local_bh_disable();
+
+ spin_lock_bh(&nsim_dev->fa_cookie_lock);
+ fa_cookie = has_fa_cookie ? nsim_dev->fa_cookie : NULL;
devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx,
- &nsim_dev_port->devlink_port);
- local_bh_enable();
+ &nsim_dev_port->devlink_port, fa_cookie);
+ spin_unlock_bh(&nsim_dev->fa_cookie_lock);
consume_skb(skb);
}
spin_unlock(&nsim_trap_data->trap_lock);
@@ -422,6 +597,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
static int nsim_dev_traps_init(struct devlink *devlink)
{
+ size_t policers_count = ARRAY_SIZE(nsim_trap_policers_arr);
struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct nsim_trap_data *nsim_trap_data;
int err;
@@ -438,6 +614,14 @@ static int nsim_dev_traps_init(struct devlink *devlink)
goto err_trap_data_free;
}
+ nsim_trap_data->trap_policers_cnt_arr = kcalloc(policers_count,
+ sizeof(u64),
+ GFP_KERNEL);
+ if (!nsim_trap_data->trap_policers_cnt_arr) {
+ err = -ENOMEM;
+ goto err_trap_items_free;
+ }
+
/* The lock is used to protect the action state of the registered
* traps. The value is written by user and read in delayed work when
* iterating over all the traps.
@@ -446,10 +630,20 @@ static int nsim_dev_traps_init(struct devlink *devlink)
nsim_trap_data->nsim_dev = nsim_dev;
nsim_dev->trap_data = nsim_trap_data;
+ err = devlink_trap_policers_register(devlink, nsim_trap_policers_arr,
+ policers_count);
+ if (err)
+ goto err_trap_policers_cnt_free;
+
+ err = devlink_trap_groups_register(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ if (err)
+ goto err_trap_policers_unregister;
+
err = devlink_traps_register(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr), NULL);
if (err)
- goto err_trap_items_free;
+ goto err_trap_groups_unregister;
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
@@ -458,6 +652,14 @@ static int nsim_dev_traps_init(struct devlink *devlink)
return 0;
+err_trap_groups_unregister:
+ devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+err_trap_policers_unregister:
+ devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
+err_trap_policers_cnt_free:
+ kfree(nsim_trap_data->trap_policers_cnt_arr);
err_trap_items_free:
kfree(nsim_trap_data->trap_items_arr);
err_trap_data_free:
@@ -472,6 +674,11 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
devlink_traps_unregister(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr));
+ devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
+ kfree(nsim_dev->trap_data->trap_policers_cnt_arr);
kfree(nsim_dev->trap_data->trap_items_arr);
kfree(nsim_dev->trap_data);
}
@@ -610,6 +817,52 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink,
return 0;
}
+static int
+nsim_dev_devlink_trap_group_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->fail_trap_group_set)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_policer_set(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->fail_trap_policer_set) {
+ NL_SET_ERR_MSG_MOD(extack, "User setup the operation to fail for testing purposes");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ u64 *cnt;
+
+ if (nsim_dev->fail_trap_policer_counter_get)
+ return -EINVAL;
+
+ cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1];
+ *p_drops = (*cnt)++;
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
@@ -617,6 +870,9 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
.flash_update = nsim_dev_flash_update,
.trap_init = nsim_dev_devlink_trap_init,
.trap_action_set = nsim_dev_devlink_trap_action_set,
+ .trap_group_set = nsim_dev_devlink_trap_group_set,
+ .trap_policer_set = nsim_dev_devlink_trap_policer_set,
+ .trap_policer_counter_get = nsim_dev_devlink_trap_policer_counter_get,
};
#define NSIM_DEV_MAX_MACS_DEFAULT 32
@@ -780,6 +1036,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->fw_update_status = true;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+ spin_lock_init(&nsim_dev->fa_cookie_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index ba8d9ad60feb..62958b238d50 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -271,14 +271,14 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
health->empty_reporter =
devlink_health_reporter_create(devlink,
&nsim_dev_empty_reporter_ops,
- 0, false, health);
+ 0, health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
devlink_health_reporter_create(devlink,
&nsim_dev_dummy_reporter_ops,
- 0, false, health);
+ 0, health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 2eb7b0dc1594..4ded54a21e1e 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -178,6 +178,11 @@ struct nsim_dev {
bool fail_reload;
struct devlink_region *dummy_region;
struct nsim_dev_health health;
+ struct flow_action_cookie *fa_cookie;
+ spinlock_t fa_cookie_lock; /* protects fa_cookie */
+ bool fail_trap_group_set;
+ bool fail_trap_policer_set;
+ bool fail_trap_policer_counter_get;
};
static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9dabe03a668c..3fa33d27eeba 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -157,6 +157,14 @@ config MDIO_I2C
This is library mode.
+config MDIO_IPQ8064
+ tristate "Qualcomm IPQ8064 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ depends on MFD_SYSCON
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the IPQ8064 SoC
+
config MDIO_MOXART
tristate "MOXA ART MDIO interface support"
depends on ARCH_MOXART || COMPILE_TEST
@@ -171,6 +179,13 @@ config MDIO_MSCC_MIIM
This driver supports the MIIM (MDIO) interface found in the network
switches of the Microsemi SoCs
+config MDIO_MVUSB
+ tristate "Marvell USB to MDIO Adapter"
+ depends on USB
+ help
+ A USB to MDIO converter present on development boards for
+ Marvell's Link Street family of Ethernet switches.
+
config MDIO_OCTEON
tristate "Octeon and some ThunderX SOCs MDIO buses"
depends on (64BIT && OF_MDIO) || COMPILE_TEST
@@ -206,6 +221,12 @@ config MDIO_XGENE
This module provides a driver for the MDIO busses found in the
APM X-Gene SoC's.
+config MDIO_XPCS
+ tristate "Synopsys DesignWare XPCS controller"
+ help
+ This module provides helper functions for Synopsys DesignWare XPCS
+ controllers.
+
endif
endif
@@ -326,8 +347,8 @@ config BROADCOM_PHY
BCM5481, BCM54810 and BCM5482 PHYs.
config BCM84881_PHY
- bool "Broadcom BCM84881 PHY"
- depends on PHYLIB=y
+ tristate "Broadcom BCM84881 PHY"
+ depends on PHYLIB
---help---
Support the Broadcom BCM84881 PHY.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fe5badf13b65..2f5c7093a65b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PHY drivers and MDIO bus drivers
-libphy-y := phy.o phy-c45.o phy-core.o phy_device.o
+libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
+ linkmode.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
@@ -36,12 +37,15 @@ obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
+obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+obj-$(CONFIG_MDIO_XPCS) += mdio-xpcs.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
@@ -86,7 +90,7 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
-obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
+obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 31927b2c7d5a..41e7c1432497 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -290,17 +290,6 @@ static int aqr_read_status(struct phy_device *phydev)
return genphy_c45_read_status(phydev);
}
-static int aqr107_read_downshift_event(struct phy_device *phydev)
-{
- int val;
-
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS1);
- if (val < 0)
- return val;
-
- return !!(val & MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT);
-}
-
static int aqr107_read_rate(struct phy_device *phydev)
{
int val;
@@ -377,13 +366,7 @@ static int aqr107_read_status(struct phy_device *phydev)
break;
}
- val = aqr107_read_downshift_event(phydev);
- if (val <= 0)
- return val;
-
- phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
-
- /* Read downshifted rate from vendor register */
+ /* Read possibly downshifted rate from vendor register */
return aqr107_read_rate(phydev);
}
@@ -451,16 +434,11 @@ static int aqr107_set_tunable(struct phy_device *phydev,
*/
static int aqr107_wait_reset_complete(struct phy_device *phydev)
{
- int val, retries = 100;
-
- do {
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
- if (val < 0)
- return val;
- msleep(20);
- } while (!val && --retries);
+ int val;
- return val ? 0 : -ETIMEDOUT;
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_FW_ID, val, val != 0,
+ 20000, 2000000, false);
}
static void aqr107_chip_info(struct phy_device *phydev)
@@ -506,9 +484,6 @@ static int aqr107_config_init(struct phy_device *phydev)
if (!ret)
aqr107_chip_info(phydev);
- /* ensure that a latched downshift event is cleared */
- aqr107_read_downshift_event(phydev);
-
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
@@ -533,9 +508,6 @@ static int aqcs109_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* ensure that a latched downshift event is cleared */
- aqr107_read_downshift_event(phydev);
-
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 481cf48c9b9e..31f731e6df72 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -425,8 +425,8 @@ static int at803x_parse_dt(struct phy_device *phydev)
*/
if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
- priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK;
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
}
}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index e0d3310957ff..e77b274a09fd 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -423,6 +423,28 @@ int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init);
+int bcm_phy_enable_jumbo(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL);
+ if (ret < 0)
+ return ret;
+
+ /* Enable extended length packet reception */
+ ret = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
+ ret | MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the elastic FIFO for raising the transmission limit from
+ * 4.5KB to 10KB, at the expense of an additional 16 ns in propagation
+ * latency.
+ */
+ return phy_set_bits(phydev, MII_BCM54XX_ECR, MII_BCM54XX_ECR_FIFOE);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_enable_jumbo);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index c86fb9d1240c..129df819be8c 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -65,5 +65,6 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
struct ethtool_stats *stats, u64 *data);
void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
+int bcm_phy_enable_jumbo(struct phy_device *phydev);
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index af8eabe7a6d4..692048d86ab1 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -181,6 +181,10 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
if (ret)
return ret;
+ ret = bcm_phy_enable_jumbo(phydev);
+ if (ret)
+ return ret;
+
ret = bcm_phy_downshift_get(phydev, &count);
if (ret)
return ret;
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index 14d55a77eb28..9717a1626f3f 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -22,30 +22,11 @@ enum {
static int bcm84881_wait_init(struct phy_device *phydev)
{
- unsigned int tries = 20;
- int ret, val;
-
- do {
- val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
- if (val < 0) {
- ret = val;
- break;
- }
- if (!(val & MDIO_CTRL1_RESET)) {
- ret = 0;
- break;
- }
- if (!--tries) {
- ret = -ETIMEDOUT;
- break;
- }
- msleep(100);
- } while (1);
+ int val;
- if (ret)
- phydev_err(phydev, "%s failed: %d\n", __func__, ret);
-
- return ret;
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ val, !(val & MDIO_CTRL1_RESET),
+ 100000, 2000000, false);
}
static int bcm84881_config_init(struct phy_device *phydev)
@@ -174,9 +155,6 @@ static int bcm84881_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
phydev->link = false;
- if (!phydev->link)
- return 0;
-
linkmode_zero(phydev->lp_advertising);
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
@@ -184,6 +162,9 @@ static int bcm84881_read_status(struct phy_device *phydev)
phydev->asym_pause = 0;
phydev->mdix = 0;
+ if (!phydev->link)
+ return 0;
+
if (phydev->autoneg_complete) {
val = genphy_c45_read_lpa(phydev);
if (val < 0)
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a62229a8b1a4..d14d91b759b7 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -194,7 +194,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
/* Abort if we are using an untested phy. */
if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810)
return;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -224,8 +225,12 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
else
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
- if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
- val |= BCM54XX_SHD_SCR3_TRDDAPD;
+ if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
+ if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810)
+ val |= BCM54810_SHD_SCR3_TRDDAPD;
+ else
+ val |= BCM54XX_SHD_SCR3_TRDDAPD;
+ }
if (orig != val)
bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
@@ -272,10 +277,7 @@ static int bcm54xx_config_init(struct phy_device *phydev)
(phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
- if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
- (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
- (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
- bcm54xx_adjust_rxrefclk(phydev);
+ bcm54xx_adjust_rxrefclk(phydev);
if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
err = bcm54210e_config_init(phydev);
@@ -315,6 +317,20 @@ static int bcm54xx_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm54xx_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Writes to register other than BMCR would be ignored
+ * unless we clear the PDOWN bit first
+ */
+ ret = genphy_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ return bcm54xx_config_init(phydev);
+}
+
static int bcm5482_config_init(struct phy_device *phydev)
{
int err, reg;
@@ -708,6 +724,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index ac72a324fcd1..ecbd5e0d685c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -628,7 +628,7 @@ static void recalibrate(struct dp83640_clock *clock)
u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
trigger = CAL_TRIGGER;
- cal_gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PHYSYNC, 0);
+ cal_gpio = 1 + ptp_find_pin_unlocked(clock->ptp_clock, PTP_PF_PHYSYNC, 0);
if (cal_gpio < 1) {
pr_err("PHY calibration pin not available - PHY is not calibrated.");
return;
@@ -1120,7 +1120,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
goto out;
}
dp83640_clock_init(clock, bus);
- list_add_tail(&phyter_clocks, &clock->list);
+ list_add_tail(&clock->list, &phyter_clocks);
out:
mutex_unlock(&phyter_clocks_lock);
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index fe9aa3ad52a7..1dd19d0cb269 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -137,19 +137,18 @@ static int dp83822_set_wol(struct phy_device *phydev,
value &= ~DP83822_WOL_SECURE_ON;
}
- value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
- DP83822_WOL_CLR_INDICATION);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
- value);
+ /* Clear any pending WoL interrupt */
+ phy_read(phydev, MII_DP83822_MISR2);
+
+ value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
+ DP83822_WOL_CLR_INDICATION;
+
+ return phy_write_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_WOL_CFG, value);
} else {
- value = phy_read_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_WOL_CFG);
- value &= ~DP83822_WOL_EN;
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
- value);
+ return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_WOL_CFG, DP83822_WOL_EN);
}
-
- return 0;
}
static void dp83822_get_wol(struct phy_device *phydev,
@@ -258,12 +257,11 @@ static int dp83822_config_intr(struct phy_device *phydev)
static int dp83822_config_init(struct phy_device *phydev)
{
- int value;
-
- value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN;
+ int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
+ DP83822_WOL_SECURE_ON;
- return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
- value);
+ return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_WOL_CFG, value);
}
static int dp83822_phy_reset(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9a07ad137c2e..b55e3c0403ed 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -21,6 +22,7 @@
#define DP83867_DEVADDR 0x1f
#define MII_DP83867_PHYCTRL 0x10
+#define MII_DP83867_PHYSTS 0x11
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
#define DP83867_CFG2 0x14
@@ -120,6 +122,24 @@
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+/* PHY STS bits */
+#define DP83867_PHYSTS_1000 BIT(15)
+#define DP83867_PHYSTS_100 BIT(14)
+#define DP83867_PHYSTS_DUPLEX BIT(13)
+#define DP83867_PHYSTS_LINK BIT(10)
+
+/* CFG2 bits */
+#define DP83867_DOWNSHIFT_EN (BIT(8) | BIT(9))
+#define DP83867_DOWNSHIFT_ATTEMPT_MASK (BIT(10) | BIT(11))
+#define DP83867_DOWNSHIFT_1_COUNT_VAL 0
+#define DP83867_DOWNSHIFT_2_COUNT_VAL 1
+#define DP83867_DOWNSHIFT_4_COUNT_VAL 2
+#define DP83867_DOWNSHIFT_8_COUNT_VAL 3
+#define DP83867_DOWNSHIFT_1_COUNT 1
+#define DP83867_DOWNSHIFT_2_COUNT 2
+#define DP83867_DOWNSHIFT_4_COUNT 4
+#define DP83867_DOWNSHIFT_8_COUNT 8
+
/* CFG3 bits */
#define DP83867_CFG3_INT_OE BIT(7)
#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
@@ -292,6 +312,126 @@ static int dp83867_config_intr(struct phy_device *phydev)
return phy_write(phydev, MII_DP83867_MICR, micr_status);
}
+static int dp83867_read_status(struct phy_device *phydev)
+{
+ int status = phy_read(phydev, MII_DP83867_PHYSTS);
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (status < 0)
+ return status;
+
+ if (status & DP83867_PHYSTS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (status & DP83867_PHYSTS_1000)
+ phydev->speed = SPEED_1000;
+ else if (status & DP83867_PHYSTS_100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ return 0;
+}
+
+static int dp83867_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable, count;
+
+ val = phy_read(phydev, DP83867_CFG2);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(DP83867_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(DP83867_DOWNSHIFT_ATTEMPT_MASK, val);
+
+ switch (cnt) {
+ case DP83867_DOWNSHIFT_1_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_1_COUNT;
+ break;
+ case DP83867_DOWNSHIFT_2_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_2_COUNT;
+ break;
+ case DP83867_DOWNSHIFT_4_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_4_COUNT;
+ break;
+ case DP83867_DOWNSHIFT_8_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_8_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ *data = enable ? count : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val, count;
+
+ if (cnt > DP83867_DOWNSHIFT_8_COUNT)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, DP83867_CFG2,
+ DP83867_DOWNSHIFT_EN);
+
+ switch (cnt) {
+ case DP83867_DOWNSHIFT_1_COUNT:
+ count = DP83867_DOWNSHIFT_1_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_2_COUNT:
+ count = DP83867_DOWNSHIFT_2_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_4_COUNT:
+ count = DP83867_DOWNSHIFT_4_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_8_COUNT:
+ count = DP83867_DOWNSHIFT_8_COUNT_VAL;
+ break;
+ default:
+ phydev_err(phydev,
+ "Downshift count must be 1, 2, 4 or 8\n");
+ return -EINVAL;
+ };
+
+ val = DP83867_DOWNSHIFT_EN;
+ val |= FIELD_PREP(DP83867_DOWNSHIFT_ATTEMPT_MASK, count);
+
+ return phy_modify(phydev, DP83867_CFG2,
+ DP83867_DOWNSHIFT_EN | DP83867_DOWNSHIFT_ATTEMPT_MASK,
+ val);
+}
+
+static int dp83867_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83867_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83867_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83867_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int dp83867_config_port_mirroring(struct phy_device *phydev)
{
struct dp83867_private *dp83867 =
@@ -472,6 +612,12 @@ static int dp83867_config_init(struct phy_device *phydev)
int ret, val, bs;
u16 delay;
+ /* Force speed optimization for the PHY even if it strapped */
+ ret = phy_modify(phydev, DP83867_CFG2, DP83867_DOWNSHIFT_EN,
+ DP83867_DOWNSHIFT_EN);
+ if (ret)
+ return ret;
+
ret = dp83867_verify_rgmii_cfg(phydev);
if (ret)
return ret;
@@ -674,6 +820,10 @@ static struct phy_driver dp83867_driver[] = {
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
+ .read_status = dp83867_read_status,
+ .get_tunable = dp83867_get_tunable,
+ .set_tunable = dp83867_set_tunable,
+
.get_wol = dp83867_get_wol,
.set_wol = dp83867_set_wol,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 06f08832ebcd..d73725312c7c 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -139,16 +139,19 @@ static int dp83811_set_wol(struct phy_device *phydev,
value &= ~DP83811_WOL_SECURE_ON;
}
- value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
- DP83811_WOL_CLR_INDICATION);
- phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
- value);
+ /* Clear any pending WoL interrupt */
+ phy_read(phydev, MII_DP83811_INT_STAT1);
+
+ value |= DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
+ DP83811_WOL_CLR_INDICATION;
+
+ return phy_write_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_WOL_CFG, value);
} else {
- phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
- DP83811_WOL_EN);
+ return phy_clear_bits_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_WOL_CFG, DP83811_WOL_EN);
}
- return 0;
}
static void dp83811_get_wol(struct phy_device *phydev,
@@ -292,8 +295,8 @@ static int dp83811_config_init(struct phy_device *phydev)
value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
- return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
- value);
+ return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ value);
}
static int dp83811_phy_reset(struct phy_device *phydev)
diff --git a/drivers/net/phy/linkmode.c b/drivers/net/phy/linkmode.c
new file mode 100644
index 000000000000..f60560fe3499
--- /dev/null
+++ b/drivers/net/phy/linkmode.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/linkmode.h>
+
+/**
+ * linkmode_resolve_pause - resolve the allowable pause modes
+ * @local_adv: local advertisement in ethtool format
+ * @partner_adv: partner advertisement in ethtool format
+ * @tx_pause: pointer to bool to indicate whether transmit pause should be
+ * enabled.
+ * @rx_pause: pointer to bool to indicate whether receive pause should be
+ * enabled.
+ *
+ * Flow control is resolved according to our and the link partners
+ * advertisements using the following drawn from the 802.3 specs:
+ * Local device Link partner
+ * Pause AsymDir Pause AsymDir Result
+ * 0 X 0 X Disabled
+ * 0 1 1 0 Disabled
+ * 0 1 1 1 TX
+ * 1 0 0 X Disabled
+ * 1 X 1 X TX+RX
+ * 1 1 0 1 RX
+ */
+void linkmode_resolve_pause(const unsigned long *local_adv,
+ const unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(m);
+
+ linkmode_and(m, local_adv, partner_adv);
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, m)) {
+ *tx_pause = true;
+ *rx_pause = true;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, m)) {
+ *tx_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ partner_adv);
+ *rx_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ local_adv);
+ } else {
+ *tx_pause = false;
+ *rx_pause = false;
+ }
+}
+EXPORT_SYMBOL_GPL(linkmode_resolve_pause);
+
+/**
+ * linkmode_set_pause - set the pause mode advertisement
+ * @advertisement: advertisement in ethtool format
+ * @tx: boolean from ethtool struct ethtool_pauseparam tx_pause member
+ * @rx: boolean from ethtool struct ethtool_pauseparam rx_pause member
+ *
+ * Configure the advertised Pause and Asym_Pause bits according to the
+ * capabilities of provided in @tx and @rx.
+ *
+ * We convert as follows:
+ * tx rx Pause AsymDir
+ * 0 0 0 0
+ * 0 1 1 1
+ * 1 0 0 1
+ * 1 1 1 0
+ *
+ * Note: this translation from ethtool tx/rx notation to the advertisement
+ * is actually very problematical. Here are some examples:
+ *
+ * For tx=0 rx=1, meaning transmit is unsupported, receive is supported:
+ *
+ * Local device Link partner
+ * Pause AsymDir Pause AsymDir Result
+ * 1 1 1 0 TX + RX - but we have no TX support.
+ * 1 1 0 1 Only this gives RX only
+ *
+ * For tx=1 rx=1, meaning we have the capability to transmit and receive
+ * pause frames:
+ *
+ * Local device Link partner
+ * Pause AsymDir Pause AsymDir Result
+ * 1 0 0 1 Disabled - but since we do support tx and rx,
+ * this should resolve to RX only.
+ *
+ * Hence, asking for:
+ * rx=1 tx=0 gives Pause+AsymDir advertisement, but we may end up
+ * resolving to tx+rx pause or only rx pause depending on
+ * the partners advertisement.
+ * rx=0 tx=1 gives AsymDir only, which will only give tx pause if
+ * the partners advertisement allows it.
+ * rx=1 tx=1 gives Pause only, which will only allow tx+rx pause
+ * if the other end also advertises Pause.
+ */
+void linkmode_set_pause(unsigned long *advertisement, bool tx, bool rx)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertisement, rx);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertisement,
+ rx ^ tx);
+}
+EXPORT_SYMBOL_GPL(linkmode_set_pause);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 9a8badafea8a..7fc8e10c5f33 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -867,21 +867,6 @@ static int m88e1011_set_tunable(struct phy_device *phydev,
}
}
-static void m88e1011_link_change_notify(struct phy_device *phydev)
-{
- int status;
-
- if (phydev->state != PHY_RUNNING)
- return;
-
- /* we may be on fiber page currently */
- status = phy_read_paged(phydev, MII_MARVELL_COPPER_PAGE,
- MII_M1011_PHY_SSR);
-
- if (status > 0 && status & MII_M1011_PHY_SSR_DOWNSHIFT)
- phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
-}
-
static int m88e1116r_config_init(struct phy_device *phydev)
{
int err;
@@ -1278,6 +1263,30 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
int lpa;
int err;
+ if (!(status & MII_M1011_PHY_STATUS_RESOLVED)) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ switch (status & MII_M1011_PHY_STATUS_SPD_MASK) {
+ case MII_M1011_PHY_STATUS_1000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case MII_M1011_PHY_STATUS_100:
+ phydev->speed = SPEED_100;
+ break;
+
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
+
if (!fiber) {
err = genphy_read_lpa(phydev);
if (err < 0)
@@ -1306,28 +1315,6 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
}
- if (!(status & MII_M1011_PHY_STATUS_RESOLVED))
- return 0;
-
- if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- switch (status & MII_M1011_PHY_STATUS_SPD_MASK) {
- case MII_M1011_PHY_STATUS_1000:
- phydev->speed = SPEED_1000;
- break;
-
- case MII_M1011_PHY_STATUS_100:
- phydev->speed = SPEED_100;
- break;
-
- default:
- phydev->speed = SPEED_10;
- break;
- }
-
return 0;
}
@@ -2201,7 +2188,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1111,
@@ -2223,7 +2209,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1118,
@@ -2264,7 +2249,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1318S,
@@ -2308,7 +2292,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1149R,
@@ -2364,7 +2347,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1510,
@@ -2390,7 +2372,6 @@ static struct phy_driver marvell_drivers[] = {
.set_loopback = genphy_loopback,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
@@ -2413,7 +2394,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2436,7 +2416,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -2479,7 +2458,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 64c9f3bba2cd..1f1a01c98e44 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -23,6 +23,7 @@
* link takes priority and the other port is completely locked out.
*/
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
@@ -32,6 +33,8 @@
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
enum {
+ MV_PMA_FW_VER0 = 0xc011,
+ MV_PMA_FW_VER1 = 0xc012,
MV_PMA_BOOT = 0xc050,
MV_PMA_BOOT_FATAL = BIT(0),
@@ -39,10 +42,32 @@ enum {
MV_PCS_BASE_R = 0x1000,
MV_PCS_1000BASEX = 0x2000,
- MV_PCS_PAIRSWAP = 0x8182,
- MV_PCS_PAIRSWAP_MASK = 0x0003,
- MV_PCS_PAIRSWAP_AB = 0x0002,
- MV_PCS_PAIRSWAP_NONE = 0x0003,
+ MV_PCS_CSCR1 = 0x8000,
+ MV_PCS_CSCR1_ED_MASK = 0x0300,
+ MV_PCS_CSCR1_ED_OFF = 0x0000,
+ MV_PCS_CSCR1_ED_RX = 0x0200,
+ MV_PCS_CSCR1_ED_NLP = 0x0300,
+ MV_PCS_CSCR1_MDIX_MASK = 0x0060,
+ MV_PCS_CSCR1_MDIX_MDI = 0x0000,
+ MV_PCS_CSCR1_MDIX_MDIX = 0x0020,
+ MV_PCS_CSCR1_MDIX_AUTO = 0x0060,
+
+ MV_PCS_CSSR1 = 0x8008,
+ MV_PCS_CSSR1_SPD1_MASK = 0xc000,
+ MV_PCS_CSSR1_SPD1_SPD2 = 0xc000,
+ MV_PCS_CSSR1_SPD1_1000 = 0x8000,
+ MV_PCS_CSSR1_SPD1_100 = 0x4000,
+ MV_PCS_CSSR1_SPD1_10 = 0x0000,
+ MV_PCS_CSSR1_DUPLEX_FULL= BIT(13),
+ MV_PCS_CSSR1_RESOLVED = BIT(11),
+ MV_PCS_CSSR1_MDIX = BIT(6),
+ MV_PCS_CSSR1_SPD2_MASK = 0x000c,
+ MV_PCS_CSSR1_SPD2_5000 = 0x0008,
+ MV_PCS_CSSR1_SPD2_2500 = 0x0004,
+ MV_PCS_CSSR1_SPD2_10000 = 0x0000,
+
+ /* Temperature read register (88E2110 only) */
+ MV_PCS_TEMP = 0x8042,
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
@@ -53,7 +78,9 @@ enum {
/* Vendor2 MMD registers */
MV_V2_PORT_CTRL = 0xf001,
- MV_V2_PORT_CTRL_PWRDOWN = 0x0800,
+ MV_V2_PORT_CTRL_SWRST = BIT(15),
+ MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
+ /* Temperature control/read registers (88X3310 only) */
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
MV_V2_TEMP_CTRL_SAMPLE = 0x0000,
@@ -63,6 +90,8 @@ enum {
};
struct mv3310_priv {
+ u32 firmware_ver;
+
struct device *hwmon_dev;
char *hwmon_name;
};
@@ -79,6 +108,24 @@ static umode_t mv3310_hwmon_is_visible(const void *data,
return 0;
}
+static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+ return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
+}
+
+static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+ return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP);
+}
+
+static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+ if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310)
+ return mv3310_hwmon_read_temp_reg(phydev);
+ else /* MARVELL_PHY_ID_88E2110 */
+ return mv2110_hwmon_read_temp_reg(phydev);
+}
+
static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *value)
{
@@ -91,7 +138,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
}
if (type == hwmon_temp && attr == hwmon_temp_input) {
- temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
+ temp = mv10g_hwmon_read_temp_reg(phydev);
if (temp < 0)
return temp;
@@ -144,6 +191,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
u16 val;
int ret;
+ if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310)
+ return 0;
+
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP,
MV_V2_TEMP_UNKNOWN);
if (ret < 0)
@@ -207,6 +257,96 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
}
#endif
+static int mv3310_power_down(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+}
+
+static int mv3310_power_up(struct phy_device *phydev)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ int ret;
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+
+ if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 ||
+ priv->firmware_ver < 0x00030000)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_SWRST);
+}
+
+static int mv3310_reset(struct phy_device *phydev, u32 unit)
+{
+ int val, err;
+
+ err = phy_modify_mmd(phydev, MDIO_MMD_PCS, unit + MDIO_CTRL1,
+ MDIO_CTRL1_RESET, MDIO_CTRL1_RESET);
+ if (err < 0)
+ return err;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PCS,
+ unit + MDIO_CTRL1, val,
+ !(val & MDIO_CTRL1_RESET),
+ 5000, 100000, true);
+}
+
+static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1);
+ if (val < 0)
+ return val;
+
+ switch (val & MV_PCS_CSCR1_ED_MASK) {
+ case MV_PCS_CSCR1_ED_NLP:
+ *edpd = 1000;
+ break;
+ case MV_PCS_CSCR1_ED_RX:
+ *edpd = ETHTOOL_PHY_EDPD_NO_TX;
+ break;
+ default:
+ *edpd = ETHTOOL_PHY_EDPD_DISABLE;
+ break;
+ }
+ return 0;
+}
+
+static int mv3310_set_edpd(struct phy_device *phydev, u16 edpd)
+{
+ u16 val;
+ int err;
+
+ switch (edpd) {
+ case 1000:
+ case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS:
+ val = MV_PCS_CSCR1_ED_NLP;
+ break;
+
+ case ETHTOOL_PHY_EDPD_NO_TX:
+ val = MV_PCS_CSCR1_ED_RX;
+ break;
+
+ case ETHTOOL_PHY_EDPD_DISABLE:
+ val = MV_PCS_CSCR1_ED_OFF;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ err = phy_modify_mmd_changed(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1,
+ MV_PCS_CSCR1_ED_MASK, val);
+ if (err > 0)
+ err = mv3310_reset(phydev, MV_PCS_BASE_T);
+
+ return err;
+}
+
static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
@@ -255,6 +395,27 @@ static int mv3310_probe(struct phy_device *phydev)
dev_set_drvdata(&phydev->mdio.dev, priv);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_FW_VER0);
+ if (ret < 0)
+ return ret;
+
+ priv->firmware_ver = ret << 16;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_FW_VER1);
+ if (ret < 0)
+ return ret;
+
+ priv->firmware_ver |= ret;
+
+ phydev_info(phydev, "Firmware version %u.%u.%u.%u\n",
+ priv->firmware_ver >> 24, (priv->firmware_ver >> 16) & 255,
+ (priv->firmware_ver >> 8) & 255, priv->firmware_ver & 255);
+
+ /* Powering down the port when not in use saves about 600mW */
+ ret = mv3310_power_down(phydev);
+ if (ret)
+ return ret;
+
ret = mv3310_hwmon_probe(phydev);
if (ret)
return ret;
@@ -264,16 +425,14 @@ static int mv3310_probe(struct phy_device *phydev)
static int mv3310_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
- MV_V2_PORT_CTRL_PWRDOWN);
+ return mv3310_power_down(phydev);
}
static int mv3310_resume(struct phy_device *phydev)
{
int ret;
- ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
- MV_V2_PORT_CTRL_PWRDOWN);
+ ret = mv3310_power_up(phydev);
if (ret)
return ret;
@@ -299,6 +458,8 @@ static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev)
static int mv3310_config_init(struct phy_device *phydev)
{
+ int err;
+
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
@@ -307,7 +468,15 @@ static int mv3310_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
- return 0;
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* Power up so reset works */
+ err = mv3310_power_up(phydev);
+ if (err)
+ return err;
+
+ /* Enable EDPD mode - saving 600mW */
+ return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
}
static int mv3310_get_features(struct phy_device *phydev)
@@ -336,14 +505,42 @@ static int mv3310_get_features(struct phy_device *phydev)
return 0;
}
+static int mv3310_config_mdix(struct phy_device *phydev)
+{
+ u16 val;
+ int err;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI_AUTO:
+ val = MV_PCS_CSCR1_MDIX_AUTO;
+ break;
+ case ETH_TP_MDI_X:
+ val = MV_PCS_CSCR1_MDIX_MDIX;
+ break;
+ case ETH_TP_MDI:
+ val = MV_PCS_CSCR1_MDIX_MDI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = phy_modify_mmd_changed(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1,
+ MV_PCS_CSCR1_MDIX_MASK, val);
+ if (err > 0)
+ err = mv3310_reset(phydev, MV_PCS_BASE_T);
+
+ return err;
+}
+
static int mv3310_config_aneg(struct phy_device *phydev)
{
bool changed = false;
u16 reg;
int ret;
- /* We don't support manual MDI control */
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ ret = mv3310_config_mdix(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->autoneg == AUTONEG_DISABLE)
return genphy_c45_pma_setup_forced(phydev);
@@ -413,35 +610,18 @@ static void mv3310_update_interface(struct phy_device *phydev)
}
/* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
-static int mv3310_read_10gbr_status(struct phy_device *phydev)
+static int mv3310_read_status_10gbaser(struct phy_device *phydev)
{
phydev->link = 1;
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
- mv3310_update_interface(phydev);
-
return 0;
}
-static int mv3310_read_status(struct phy_device *phydev)
+static int mv3310_read_status_copper(struct phy_device *phydev)
{
- int val;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- linkmode_zero(phydev->lp_advertising);
- phydev->link = 0;
- phydev->pause = 0;
- phydev->asym_pause = 0;
- phydev->mdix = 0;
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
- if (val < 0)
- return val;
-
- if (val & MDIO_STAT1_LSTATUS)
- return mv3310_read_10gbr_status(phydev);
+ int cssr1, speed, val;
val = genphy_c45_read_link(phydev);
if (val < 0)
@@ -451,6 +631,52 @@ static int mv3310_read_status(struct phy_device *phydev)
if (val < 0)
return val;
+ cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
+ if (cssr1 < 0)
+ return val;
+
+ /* If the link settings are not resolved, mark the link down */
+ if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ /* Read the copper link settings */
+ speed = cssr1 & MV_PCS_CSSR1_SPD1_MASK;
+ if (speed == MV_PCS_CSSR1_SPD1_SPD2)
+ speed |= cssr1 & MV_PCS_CSSR1_SPD2_MASK;
+
+ switch (speed) {
+ case MV_PCS_CSSR1_SPD1_SPD2 | MV_PCS_CSSR1_SPD2_10000:
+ phydev->speed = SPEED_10000;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_SPD2 | MV_PCS_CSSR1_SPD2_5000:
+ phydev->speed = SPEED_5000;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_SPD2 | MV_PCS_CSSR1_SPD2_2500:
+ phydev->speed = SPEED_2500;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_1000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_100:
+ phydev->speed = SPEED_100;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_10:
+ phydev->speed = SPEED_10;
+ break;
+ }
+
+ phydev->duplex = cssr1 & MV_PCS_CSSR1_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ phydev->mdix = cssr1 & MV_PCS_CSSR1_MDIX ?
+ ETH_TP_MDI_X : ETH_TP_MDI;
+
if (val & MDIO_AN_STAT1_COMPLETE) {
val = genphy_c45_read_lpa(phydev);
if (val < 0)
@@ -463,43 +689,64 @@ static int mv3310_read_status(struct phy_device *phydev)
mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
- if (phydev->autoneg == AUTONEG_ENABLE)
- phy_resolve_aneg_linkmode(phydev);
+ /* Update the pause status */
+ phy_resolve_aneg_pause(phydev);
}
- if (phydev->autoneg != AUTONEG_ENABLE) {
- val = genphy_c45_read_pma(phydev);
- if (val < 0)
- return val;
- }
+ return 0;
+}
- if (phydev->speed == SPEED_10000) {
- val = genphy_c45_read_mdix(phydev);
- if (val < 0)
- return val;
- } else {
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP);
- if (val < 0)
- return val;
+static int mv3310_read_status(struct phy_device *phydev)
+{
+ int err, val;
- switch (val & MV_PCS_PAIRSWAP_MASK) {
- case MV_PCS_PAIRSWAP_AB:
- phydev->mdix = ETH_TP_MDI_X;
- break;
- case MV_PCS_PAIRSWAP_NONE:
- phydev->mdix = ETH_TP_MDI;
- break;
- default:
- phydev->mdix = ETH_TP_MDI_INVALID;
- break;
- }
- }
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ linkmode_zero(phydev->lp_advertising);
+ phydev->link = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->mdix = ETH_TP_MDI_INVALID;
- mv3310_update_interface(phydev);
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_STAT1_LSTATUS)
+ err = mv3310_read_status_10gbaser(phydev);
+ else
+ err = mv3310_read_status_copper(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->link)
+ mv3310_update_interface(phydev);
return 0;
}
+static int mv3310_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD:
+ return mv3310_get_edpd(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv3310_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD:
+ return mv3310_set_edpd(phydev, *(u16 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -514,6 +761,8 @@ static struct phy_driver mv3310_drivers[] = {
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
.read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
@@ -527,6 +776,8 @@ static struct phy_driver mv3310_drivers[] = {
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
.read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
},
};
diff --git a/drivers/net/phy/mdio-ipq8064.c b/drivers/net/phy/mdio-ipq8064.c
new file mode 100644
index 000000000000..1bd18857e1c5
--- /dev/null
+++ b/drivers/net/phy/mdio-ipq8064.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Qualcomm IPQ8064 MDIO interface driver
+ *
+ * Copyright (C) 2019 Christian Lamparter <chunkeey@gmail.com>
+ * Copyright (C) 2020 Ansuel Smith <ansuelsmth@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+
+/* MII address register definitions */
+#define MII_ADDR_REG_ADDR 0x10
+#define MII_BUSY BIT(0)
+#define MII_WRITE BIT(1)
+#define MII_CLKRANGE_60_100M (0 << 2)
+#define MII_CLKRANGE_100_150M (1 << 2)
+#define MII_CLKRANGE_20_35M (2 << 2)
+#define MII_CLKRANGE_35_60M (3 << 2)
+#define MII_CLKRANGE_150_250M (4 << 2)
+#define MII_CLKRANGE_250_300M (5 << 2)
+#define MII_CLKRANGE_MASK GENMASK(4, 2)
+#define MII_REG_SHIFT 6
+#define MII_REG_MASK GENMASK(10, 6)
+#define MII_ADDR_SHIFT 11
+#define MII_ADDR_MASK GENMASK(15, 11)
+
+#define MII_DATA_REG_ADDR 0x14
+
+#define MII_MDIO_DELAY_USEC (1000)
+#define MII_MDIO_RETRY_MSEC (10)
+
+struct ipq8064_mdio {
+ struct regmap *base; /* NSS_GMAC0_BASE */
+};
+
+static int
+ipq8064_mdio_wait_busy(struct ipq8064_mdio *priv)
+{
+ u32 busy;
+
+ return regmap_read_poll_timeout(priv->base, MII_ADDR_REG_ADDR, busy,
+ !(busy & MII_BUSY), MII_MDIO_DELAY_USEC,
+ MII_MDIO_RETRY_MSEC * USEC_PER_MSEC);
+}
+
+static int
+ipq8064_mdio_read(struct mii_bus *bus, int phy_addr, int reg_offset)
+{
+ u32 miiaddr = MII_BUSY | MII_CLKRANGE_250_300M;
+ struct ipq8064_mdio *priv = bus->priv;
+ u32 ret_val;
+ int err;
+
+ /* Reject clause 45 */
+ if (reg_offset & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
+ ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
+
+ regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
+ usleep_range(8, 10);
+
+ err = ipq8064_mdio_wait_busy(priv);
+ if (err)
+ return err;
+
+ regmap_read(priv->base, MII_DATA_REG_ADDR, &ret_val);
+ return (int)ret_val;
+}
+
+static int
+ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
+{
+ u32 miiaddr = MII_WRITE | MII_BUSY | MII_CLKRANGE_250_300M;
+ struct ipq8064_mdio *priv = bus->priv;
+
+ /* Reject clause 45 */
+ if (reg_offset & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ regmap_write(priv->base, MII_DATA_REG_ADDR, data);
+
+ miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
+ ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
+
+ regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
+ usleep_range(8, 10);
+
+ return ipq8064_mdio_wait_busy(priv);
+}
+
+static int
+ipq8064_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ipq8064_mdio *priv;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ipq8064_mdio_bus";
+ bus->read = ipq8064_mdio_read;
+ bus->write = ipq8064_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ priv = bus->priv;
+ priv->base = device_node_to_regmap(np);
+ if (IS_ERR(priv->base)) {
+ if (priv->base == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ dev_err(&pdev->dev, "error getting device regmap, error=%pe\n",
+ priv->base);
+ return PTR_ERR(priv->base);
+ }
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, bus);
+ return 0;
+}
+
+static int
+ipq8064_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+
+ return 0;
+}
+
+static const struct of_device_id ipq8064_mdio_dt_ids[] = {
+ { .compatible = "qcom,ipq8064-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids);
+
+static struct platform_driver ipq8064_mdio_driver = {
+ .probe = ipq8064_mdio_probe,
+ .remove = ipq8064_mdio_remove,
+ .driver = {
+ .name = "ipq8064-mdio",
+ .of_match_table = ipq8064_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(ipq8064_mdio_driver);
+
+MODULE_DESCRIPTION("Qualcomm IPQ8064 MDIO interface driver");
+MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
+MODULE_AUTHOR("Ansuel Smith <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index aad6809ebe39..42fb5f166136 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -10,6 +10,7 @@
#include <linux/phy.h>
#include <linux/mdio-mux.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
#define MDIO_RATE_ADJ_INT_OFFSET 0x004
@@ -78,18 +79,11 @@ static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
{
- unsigned int timeout = 1000; /* loop for 1s */
u32 val;
- do {
- val = readl(base + MDIO_STAT_OFFSET);
- if ((val & MDIO_STAT_DONE) == result)
- return 0;
-
- usleep_range(1000, 2000);
- } while (timeout--);
-
- return -ETIMEDOUT;
+ return readl_poll_timeout(base + MDIO_STAT_OFFSET, val,
+ (val & MDIO_STAT_DONE) == result,
+ 2000, 1000000);
}
/* start_miim_ops- Program and start MDIO transaction over mdio bus.
diff --git a/drivers/net/phy/mdio-mvusb.c b/drivers/net/phy/mdio-mvusb.c
new file mode 100644
index 000000000000..d5eabddfdf51
--- /dev/null
+++ b/drivers/net/phy/mdio-mvusb.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/usb.h>
+
+#define USB_MARVELL_VID 0x1286
+
+static const struct usb_device_id mvusb_mdio_table[] = {
+ { USB_DEVICE(USB_MARVELL_VID, 0x1fa4) },
+
+ {}
+};
+MODULE_DEVICE_TABLE(usb, mvusb_mdio_table);
+
+enum {
+ MVUSB_CMD_PREAMBLE0,
+ MVUSB_CMD_PREAMBLE1,
+ MVUSB_CMD_ADDR,
+ MVUSB_CMD_VAL,
+};
+
+struct mvusb_mdio {
+ struct usb_device *udev;
+ struct mii_bus *mdio;
+
+ __le16 buf[4];
+};
+
+static int mvusb_mdio_read(struct mii_bus *mdio, int dev, int reg)
+{
+ struct mvusb_mdio *mvusb = mdio->priv;
+ int err, alen;
+
+ if (dev & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0xa400 | (dev << 5) | reg);
+
+ err = usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2),
+ mvusb->buf, 6, &alen, 100);
+ if (err)
+ return err;
+
+ err = usb_bulk_msg(mvusb->udev, usb_rcvbulkpipe(mvusb->udev, 6),
+ &mvusb->buf[MVUSB_CMD_VAL], 2, &alen, 100);
+ if (err)
+ return err;
+
+ return le16_to_cpu(mvusb->buf[MVUSB_CMD_VAL]);
+}
+
+static int mvusb_mdio_write(struct mii_bus *mdio, int dev, int reg, u16 val)
+{
+ struct mvusb_mdio *mvusb = mdio->priv;
+ int alen;
+
+ if (dev & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0x8000 | (dev << 5) | reg);
+ mvusb->buf[MVUSB_CMD_VAL] = cpu_to_le16(val);
+
+ return usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2),
+ mvusb->buf, 8, &alen, 100);
+}
+
+static int mvusb_mdio_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct device *dev = &interface->dev;
+ struct mvusb_mdio *mvusb;
+ struct mii_bus *mdio;
+
+ mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
+ if (!mdio)
+ return -ENOMEM;
+
+ mvusb = mdio->priv;
+ mvusb->mdio = mdio;
+ mvusb->udev = usb_get_dev(interface_to_usbdev(interface));
+
+ /* Reversed from USB PCAPs, no idea what these mean. */
+ mvusb->buf[MVUSB_CMD_PREAMBLE0] = cpu_to_le16(0xe800);
+ mvusb->buf[MVUSB_CMD_PREAMBLE1] = cpu_to_le16(0x0001);
+
+ snprintf(mdio->id, MII_BUS_ID_SIZE, "mvusb-%s", dev_name(dev));
+ mdio->name = mdio->id;
+ mdio->parent = dev;
+ mdio->read = mvusb_mdio_read;
+ mdio->write = mvusb_mdio_write;
+
+ usb_set_intfdata(interface, mvusb);
+ return of_mdiobus_register(mdio, dev->of_node);
+}
+
+static void mvusb_mdio_disconnect(struct usb_interface *interface)
+{
+ struct mvusb_mdio *mvusb = usb_get_intfdata(interface);
+ struct usb_device *udev = mvusb->udev;
+
+ mdiobus_unregister(mvusb->mdio);
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(udev);
+}
+
+static struct usb_driver mvusb_mdio_driver = {
+ .name = "mvusb_mdio",
+ .id_table = mvusb_mdio_table,
+ .probe = mvusb_mdio_probe,
+ .disconnect = mvusb_mdio_disconnect,
+};
+
+module_usb_driver(mvusb_mdio_driver);
+
+MODULE_AUTHOR("Tobias Waldekranz <tobias@waldekranz.com>");
+MODULE_DESCRIPTION("Marvell USB MDIO Adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-xpcs.c b/drivers/net/phy/mdio-xpcs.c
new file mode 100644
index 000000000000..0d66a8ba7eb6
--- /dev/null
+++ b/drivers/net/phy/mdio-xpcs.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ *
+ * Author: Jose Abreu <Jose.Abreu@synopsys.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/mdio.h>
+#include <linux/mdio-xpcs.h>
+#include <linux/phylink.h>
+#include <linux/workqueue.h>
+
+#define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0
+#define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0
+#define SYNOPSYS_XPCS_XLGMII_ID 0x7996ced0
+#define SYNOPSYS_XPCS_MASK 0xffffffff
+
+/* Vendor regs access */
+#define DW_VENDOR BIT(15)
+
+/* VR_XS_PCS */
+#define DW_USXGMII_RST BIT(10)
+#define DW_USXGMII_EN BIT(9)
+#define DW_VR_XS_PCS_DIG_STS 0x0010
+#define DW_RXFIFO_ERR GENMASK(6, 5)
+
+/* SR_MII */
+#define DW_USXGMII_FULL BIT(8)
+#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5))
+#define DW_USXGMII_10000 (BIT(13) | BIT(6))
+#define DW_USXGMII_5000 (BIT(13) | BIT(5))
+#define DW_USXGMII_2500 (BIT(5))
+#define DW_USXGMII_1000 (BIT(6))
+#define DW_USXGMII_100 (BIT(13))
+#define DW_USXGMII_10 (0)
+
+/* SR_AN */
+#define DW_SR_AN_ADV1 0x10
+#define DW_SR_AN_ADV2 0x11
+#define DW_SR_AN_ADV3 0x12
+#define DW_SR_AN_LP_ABL1 0x13
+#define DW_SR_AN_LP_ABL2 0x14
+#define DW_SR_AN_LP_ABL3 0x15
+
+/* Clause 73 Defines */
+/* AN_LP_ABL1 */
+#define DW_C73_PAUSE BIT(10)
+#define DW_C73_ASYM_PAUSE BIT(11)
+#define DW_C73_AN_ADV_SF 0x1
+/* AN_LP_ABL2 */
+#define DW_C73_1000KX BIT(5)
+#define DW_C73_10000KX4 BIT(6)
+#define DW_C73_10000KR BIT(7)
+/* AN_LP_ABL3 */
+#define DW_C73_2500KX BIT(0)
+#define DW_C73_5000KR BIT(1)
+
+static const int xpcs_usxgmii_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_10gkr_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_xlgmii_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const phy_interface_t xpcs_usxgmii_interfaces[] = {
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static const phy_interface_t xpcs_10gkr_interfaces[] = {
+ PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static const phy_interface_t xpcs_xlgmii_interfaces[] = {
+ PHY_INTERFACE_MODE_XLGMII,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static struct xpcs_id {
+ u32 id;
+ u32 mask;
+ const int *supported;
+ const phy_interface_t *interface;
+} xpcs_id_list[] = {
+ {
+ .id = SYNOPSYS_XPCS_USXGMII_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_usxgmii_features,
+ .interface = xpcs_usxgmii_interfaces,
+ }, {
+ .id = SYNOPSYS_XPCS_10GKR_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_10gkr_features,
+ .interface = xpcs_10gkr_interfaces,
+ }, {
+ .id = SYNOPSYS_XPCS_XLGMII_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_xlgmii_features,
+ .interface = xpcs_xlgmii_interfaces,
+ },
+};
+
+static int xpcs_read(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+{
+ u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+
+ return mdiobus_read(xpcs->bus, xpcs->addr, reg_addr);
+}
+
+static int xpcs_write(struct mdio_xpcs_args *xpcs, int dev, u32 reg, u16 val)
+{
+ u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+
+ return mdiobus_write(xpcs->bus, xpcs->addr, reg_addr, val);
+}
+
+static int xpcs_read_vendor(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+{
+ return xpcs_read(xpcs, dev, DW_VENDOR | reg);
+}
+
+static int xpcs_write_vendor(struct mdio_xpcs_args *xpcs, int dev, int reg,
+ u16 val)
+{
+ return xpcs_write(xpcs, dev, DW_VENDOR | reg, val);
+}
+
+static int xpcs_read_vpcs(struct mdio_xpcs_args *xpcs, int reg)
+{
+ return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg);
+}
+
+static int xpcs_write_vpcs(struct mdio_xpcs_args *xpcs, int reg, u16 val)
+{
+ return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
+}
+
+static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
+{
+ /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+ unsigned int retries = 12;
+ int ret;
+
+ do {
+ msleep(50);
+ ret = xpcs_read(xpcs, dev, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+ } while (ret & MDIO_CTRL1_RESET && --retries);
+
+ return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
+}
+
+static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev)
+{
+ int ret;
+
+ ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_poll_reset(xpcs, dev);
+}
+
+#define xpcs_warn(__xpcs, __state, __args...) \
+({ \
+ if ((__state)->link) \
+ dev_warn(&(__xpcs)->bus->dev, ##__args); \
+})
+
+static int xpcs_read_fault(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_STAT1_FAULT) {
+ xpcs_warn(xpcs, state, "Link fault condition detected!\n");
+ return -EFAULT;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_STAT2_RXFAULT)
+ xpcs_warn(xpcs, state, "Receiver fault detected!\n");
+ if (ret & MDIO_STAT2_TXFAULT)
+ xpcs_warn(xpcs, state, "Transmitter fault detected!\n");
+
+ ret = xpcs_read_vendor(xpcs, MDIO_MMD_PCS, DW_VR_XS_PCS_DIG_STS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_RXFIFO_ERR) {
+ xpcs_warn(xpcs, state, "FIFO fault condition detected!\n");
+ return -EFAULT;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_PCS_10GBRT_STAT1_BLKLK))
+ xpcs_warn(xpcs, state, "Link is not locked!\n");
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_PCS_10GBRT_STAT2_ERR) {
+ xpcs_warn(xpcs, state, "Link has errors!\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int xpcs_read_link(struct mdio_xpcs_args *xpcs, bool an)
+{
+ bool link = true;
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_STAT1_LSTATUS))
+ link = false;
+
+ if (an) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_STAT1_LSTATUS))
+ link = false;
+ }
+
+ return link;
+}
+
+static int xpcs_get_max_usxgmii_speed(const unsigned long *supported)
+{
+ int max = SPEED_UNKNOWN;
+
+ if (phylink_test(supported, 1000baseKX_Full))
+ max = SPEED_1000;
+ if (phylink_test(supported, 2500baseX_Full))
+ max = SPEED_2500;
+ if (phylink_test(supported, 10000baseKX4_Full))
+ max = SPEED_10000;
+ if (phylink_test(supported, 10000baseKR_Full))
+ max = SPEED_10000;
+
+ return max;
+}
+
+static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
+{
+ int ret, speed_sel;
+
+ switch (speed) {
+ case SPEED_10:
+ speed_sel = DW_USXGMII_10;
+ break;
+ case SPEED_100:
+ speed_sel = DW_USXGMII_100;
+ break;
+ case SPEED_1000:
+ speed_sel = DW_USXGMII_1000;
+ break;
+ case SPEED_2500:
+ speed_sel = DW_USXGMII_2500;
+ break;
+ case SPEED_5000:
+ speed_sel = DW_USXGMII_5000;
+ break;
+ case SPEED_10000:
+ speed_sel = DW_USXGMII_10000;
+ break;
+ default:
+ /* Nothing to do here */
+ return -EINVAL;
+ }
+
+ ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DW_USXGMII_SS_MASK;
+ ret |= speed_sel | DW_USXGMII_FULL;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
+}
+
+static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+{
+ int ret, adv;
+
+ /* By default, in USXGMII mode XPCS operates at 10G baud and
+ * replicates data to achieve lower speeds. Hereby, in this
+ * default configuration we need to advertise all supported
+ * modes and not only the ones we want to use.
+ */
+
+ /* SR_AN_ADV3 */
+ adv = 0;
+ if (phylink_test(xpcs->supported, 2500baseX_Full))
+ adv |= DW_C73_2500KX;
+
+ /* TODO: 5000baseKR */
+
+ ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV3, adv);
+ if (ret < 0)
+ return ret;
+
+ /* SR_AN_ADV2 */
+ adv = 0;
+ if (phylink_test(xpcs->supported, 1000baseKX_Full))
+ adv |= DW_C73_1000KX;
+ if (phylink_test(xpcs->supported, 10000baseKX4_Full))
+ adv |= DW_C73_10000KX4;
+ if (phylink_test(xpcs->supported, 10000baseKR_Full))
+ adv |= DW_C73_10000KR;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV2, adv);
+ if (ret < 0)
+ return ret;
+
+ /* SR_AN_ADV1 */
+ adv = DW_C73_AN_ADV_SF;
+ if (phylink_test(xpcs->supported, Pause))
+ adv |= DW_C73_PAUSE;
+ if (phylink_test(xpcs->supported, Asym_Pause))
+ adv |= DW_C73_ASYM_PAUSE;
+
+ return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv);
+}
+
+static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs)
+{
+ int ret;
+
+ ret = xpcs_config_aneg_c73(xpcs);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
+
+ return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret);
+}
+
+static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_AN_STAT1_COMPLETE) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
+ if (ret < 0)
+ return ret;
+
+ /* Check if Aneg outcome is valid */
+ if (!(ret & DW_C73_AN_ADV_SF)) {
+ xpcs_config_aneg(xpcs);
+ return 0;
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_AN_STAT1_LPABLE)) {
+ phylink_clear(state->lp_advertising, Autoneg);
+ return 0;
+ }
+
+ phylink_set(state->lp_advertising, Autoneg);
+
+ /* Clause 73 outcome */
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL3);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_2500KX)
+ phylink_set(state->lp_advertising, 2500baseX_Full);
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_1000KX)
+ phylink_set(state->lp_advertising, 1000baseKX_Full);
+ if (ret & DW_C73_10000KX4)
+ phylink_set(state->lp_advertising, 10000baseKX4_Full);
+ if (ret & DW_C73_10000KR)
+ phylink_set(state->lp_advertising, 10000baseKR_Full);
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_PAUSE)
+ phylink_set(state->lp_advertising, Pause);
+ if (ret & DW_C73_ASYM_PAUSE)
+ phylink_set(state->lp_advertising, Asym_Pause);
+
+ linkmode_and(state->lp_advertising, state->lp_advertising,
+ state->advertising);
+ return 0;
+}
+
+static void xpcs_resolve_lpa(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising);
+
+ state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->speed = max_speed;
+ state->duplex = DUPLEX_FULL;
+}
+
+static int xpcs_get_max_xlgmii_speed(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ unsigned long *adv = state->advertising;
+ int speed = SPEED_UNKNOWN;
+ int bit;
+
+ for_each_set_bit(bit, adv, __ETHTOOL_LINK_MODE_MASK_NBITS) {
+ int new_speed = SPEED_UNKNOWN;
+
+ switch (bit) {
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ new_speed = SPEED_25000;
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ new_speed = SPEED_40000;
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseDR_Full_BIT:
+ new_speed = SPEED_50000;
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT:
+ new_speed = SPEED_100000;
+ break;
+ default:
+ continue;
+ }
+
+ if (new_speed > speed)
+ speed = new_speed;
+ }
+
+ return speed;
+}
+
+static void xpcs_resolve_pma(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->duplex = DUPLEX_FULL;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ state->speed = SPEED_10000;
+ break;
+ case PHY_INTERFACE_MODE_XLGMII:
+ state->speed = xpcs_get_max_xlgmii_speed(xpcs, state);
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int xpcs_validate(struct mdio_xpcs_args *xpcs,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ linkmode_and(supported, supported, xpcs->supported);
+ linkmode_and(state->advertising, state->advertising, xpcs->supported);
+ return 0;
+}
+
+static int xpcs_config(struct mdio_xpcs_args *xpcs,
+ const struct phylink_link_state *state)
+{
+ int ret;
+
+ if (state->an_enabled) {
+ ret = xpcs_config_aneg(xpcs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ /* Link needs to be read first ... */
+ state->link = xpcs_read_link(xpcs, state->an_enabled) > 0 ? 1 : 0;
+
+ /* ... and then we check the faults. */
+ ret = xpcs_read_fault(xpcs, state);
+ if (ret) {
+ ret = xpcs_soft_reset(xpcs, MDIO_MMD_PCS);
+ if (ret)
+ return ret;
+
+ state->link = 0;
+
+ return xpcs_config(xpcs, state);
+ }
+
+ if (state->an_enabled && xpcs_aneg_done(xpcs, state)) {
+ state->an_complete = true;
+ xpcs_read_lpa(xpcs, state);
+ xpcs_resolve_lpa(xpcs, state);
+ } else if (state->an_enabled) {
+ state->link = 0;
+ } else if (state->link) {
+ xpcs_resolve_pma(xpcs, state);
+ }
+
+ return 0;
+}
+
+static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_USXGMII)
+ return xpcs_config_usxgmii(xpcs, speed);
+
+ return 0;
+}
+
+static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
+{
+ int ret;
+ u32 id;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1);
+ if (ret < 0)
+ return 0xffffffff;
+
+ id = ret << 16;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID2);
+ if (ret < 0)
+ return 0xffffffff;
+
+ return id | ret;
+}
+
+static bool xpcs_check_features(struct mdio_xpcs_args *xpcs,
+ struct xpcs_id *match,
+ phy_interface_t interface)
+{
+ int i;
+
+ for (i = 0; match->interface[i] != PHY_INTERFACE_MODE_MAX; i++) {
+ if (match->interface[i] == interface)
+ break;
+ }
+
+ if (match->interface[i] == PHY_INTERFACE_MODE_MAX)
+ return false;
+
+ for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(match->supported[i], xpcs->supported);
+
+ return true;
+}
+
+static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface)
+{
+ u32 xpcs_id = xpcs_get_id(xpcs);
+ struct xpcs_id *match = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
+ struct xpcs_id *entry = &xpcs_id_list[i];
+
+ if ((xpcs_id & entry->mask) == entry->id) {
+ match = entry;
+
+ if (xpcs_check_features(xpcs, match, interface))
+ return xpcs_soft_reset(xpcs, MDIO_MMD_PCS);
+ }
+ }
+
+ return -ENODEV;
+}
+
+static struct mdio_xpcs_ops xpcs_ops = {
+ .validate = xpcs_validate,
+ .config = xpcs_config,
+ .get_state = xpcs_get_state,
+ .link_up = xpcs_link_up,
+ .probe = xpcs_probe,
+};
+
+struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
+{
+ return &xpcs_ops;
+}
+EXPORT_SYMBOL_GPL(mdio_xpcs_get_ops);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 9bb9f37f21dc..7a4eb3f2cb74 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -462,6 +462,23 @@ static struct class mdio_bus_class = {
.dev_groups = mdio_bus_groups,
};
+/**
+ * mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
+ * @mdio_name: The name of a mdiobus.
+ *
+ * Returns a reference to the mii_bus, or NULL if none found. The
+ * embedded struct device will have its reference count incremented,
+ * and this must be put_deviced'ed once the bus is finished with.
+ */
+struct mii_bus *mdio_find_bus(const char *mdio_name)
+{
+ struct device *d;
+
+ d = class_find_device_by_name(&mdio_bus_class, mdio_name);
+ return d ? to_mii_bus(d) : NULL;
+}
+EXPORT_SYMBOL(mdio_find_bus);
+
#if IS_ENABLED(CONFIG_OF_MDIO)
/**
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
@@ -808,6 +825,38 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
EXPORT_SYMBOL(__mdiobus_write);
/**
+ * __mdiobus_modify_changed - Unlocked version of the mdiobus_modify function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Read, modify, and if any change, write the register value back to the
+ * device. Any error returns a negative number.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set)
+{
+ int new, ret;
+
+ ret = __mdiobus_read(bus, addr, regnum);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ ret = __mdiobus_write(bus, addr, regnum, new);
+
+ return ret < 0 ? ret : 1;
+}
+EXPORT_SYMBOL_GPL(__mdiobus_modify_changed);
+
+/**
* mdiobus_read_nested - Nested version of the mdiobus_read function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -824,7 +873,8 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
retval = __mdiobus_read(bus, addr, regnum);
@@ -848,7 +898,8 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock(&bus->mdio_lock);
retval = __mdiobus_read(bus, addr, regnum);
@@ -876,7 +927,8 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
err = __mdiobus_write(bus, addr, regnum, val);
@@ -901,7 +953,8 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock(&bus->mdio_lock);
err = __mdiobus_write(bus, addr, regnum, val);
@@ -912,6 +965,30 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
EXPORT_SYMBOL(mdiobus_write);
/**
+ * mdiobus_modify - Convenience function for modifying a given mdio device
+ * register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
+{
+ int err;
+
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err < 0 ? err : 0;
+}
+EXPORT_SYMBOL_GPL(mdiobus_modify);
+
+/**
* mdio_bus_match - determine if given MDIO driver supports the given
* MDIO device
* @dev: target MDIO device
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 63dedec0433d..3a4d83fa52dc 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -25,6 +25,7 @@
#include <linux/micrel_phy.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/delay.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -704,6 +705,50 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
return phy_write_mmd(phydev, 2, reg, newval);
}
+#define KSZ9131RN_MMD_COMMON_CTRL_REG 2
+#define KSZ9131RN_RXC_DLL_CTRL 76
+#define KSZ9131RN_TXC_DLL_CTRL 77
+#define KSZ9131RN_DLL_CTRL_BYPASS BIT_MASK(12)
+#define KSZ9131RN_DLL_ENABLE_DELAY 0
+#define KSZ9131RN_DLL_DISABLE_DELAY BIT(12)
+
+static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
+{
+ u16 rxcdll_val, txcdll_val;
+ int ret;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ break;
+ default:
+ return 0;
+ }
+
+ ret = phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ KSZ9131RN_RXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ rxcdll_val);
+ if (ret < 0)
+ return ret;
+
+ return phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ KSZ9131RN_TXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ txcdll_val);
+}
+
static int ksz9131_config_init(struct phy_device *phydev)
{
const struct device *dev = &phydev->mdio.dev;
@@ -730,6 +775,12 @@ static int ksz9131_config_init(struct phy_device *phydev)
if (!of_node)
return 0;
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = ksz9131_config_rgmii_delay(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
ret = ksz9131_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_CLK_PAD_SKEW, 5,
clk_skews, 2);
@@ -902,6 +953,12 @@ static int kszphy_resume(struct phy_device *phydev)
genphy_resume(phydev);
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
ret = kszphy_config_reset(phydev);
if (ret)
return ret;
@@ -1147,7 +1204,7 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
- .read_status = ksz9031_read_status,
+ .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 001def4509c2..fed3e395f18e 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -3,9 +3,21 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/phy.h>
+/* External Register Control Register */
+#define LAN87XX_EXT_REG_CTL (0x14)
+#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
+#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
+
+/* External Register Read Data Register */
+#define LAN87XX_EXT_REG_RD_DATA (0x15)
+
+/* External Register Write Data Register */
+#define LAN87XX_EXT_REG_WR_DATA (0x16)
+
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
@@ -14,9 +26,160 @@
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
+/* phyaccess nested types */
+#define PHYACC_ATTR_MODE_READ 0
+#define PHYACC_ATTR_MODE_WRITE 1
+#define PHYACC_ATTR_MODE_MODIFY 2
+
+#define PHYACC_ATTR_BANK_SMI 0
+#define PHYACC_ATTR_BANK_MISC 1
+#define PHYACC_ATTR_BANK_PCS 2
+#define PHYACC_ATTR_BANK_AFE 3
+#define PHYACC_ATTR_BANK_MAX 7
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
+struct access_ereg_val {
+ u8 mode;
+ u8 bank;
+ u8 offset;
+ u16 val;
+ u16 mask;
+};
+
+static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
+ u8 offset, u16 val)
+{
+ u16 ereg = 0;
+ int rc = 0;
+
+ if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX)
+ return -EINVAL;
+
+ if (bank == PHYACC_ATTR_BANK_SMI) {
+ if (mode == PHYACC_ATTR_MODE_WRITE)
+ rc = phy_write(phydev, offset, val);
+ else
+ rc = phy_read(phydev, offset);
+ return rc;
+ }
+
+ if (mode == PHYACC_ATTR_MODE_WRITE) {
+ ereg = LAN87XX_EXT_REG_CTL_WR_CTL;
+ rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val);
+ if (rc < 0)
+ return rc;
+ } else {
+ ereg = LAN87XX_EXT_REG_CTL_RD_CTL;
+ }
+
+ ereg |= (bank << 8) | offset;
+
+ rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
+ if (rc < 0)
+ return rc;
+
+ if (mode == PHYACC_ATTR_MODE_READ)
+ rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA);
+
+ return rc;
+}
+
+static int access_ereg_modify_changed(struct phy_device *phydev,
+ u8 bank, u8 offset, u16 val, u16 mask)
+{
+ int new = 0, rc = 0;
+
+ if (bank > PHYACC_ATTR_BANK_MAX)
+ return -EINVAL;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val);
+ if (rc < 0)
+ return rc;
+
+ new = val | (rc & (mask ^ 0xFFFF));
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new);
+
+ return rc;
+}
+
+static int lan87xx_phy_init(struct phy_device *phydev)
+{
+ static const struct access_ereg_val init[] = {
+ /* TX Amplitude = 5 */
+ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
+ 0x000A, 0x001E},
+ /* Clear SMI interrupts */
+ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
+ 0, 0},
+ /* Clear MISC interrupts */
+ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
+ 0, 0},
+ /* Turn on TC10 Ring Oscillator (ROSC) */
+ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
+ 0x0020, 0x0020},
+ /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
+ 0x283C, 0},
+ /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
+ 0x274F, 0},
+ /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
+ * and Wake_In to wake PHY
+ */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
+ 0x80A7, 0},
+ /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
+ * to 128 uS
+ */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
+ 0xF110, 0},
+ /* Enable HW Init */
+ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
+ 0x0100, 0x0100},
+ };
+ int rc, i;
+
+ /* Start manual initialization procedures in Managed Mode */
+ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
+ 0x1a, 0x0000, 0x0100);
+ if (rc < 0)
+ return rc;
+
+ /* Soft Reset the SMI block */
+ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
+ 0x00, 0x8000, 0x8000);
+ if (rc < 0)
+ return rc;
+
+ /* Check to see if the self-clearing bit is cleared */
+ usleep_range(1000, 2000);
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_SMI, 0x00, 0);
+ if (rc < 0)
+ return rc;
+ if ((rc & 0x8000) != 0)
+ return -ETIMEDOUT;
+
+ /* PHY Initialization */
+ for (i = 0; i < ARRAY_SIZE(init); i++) {
+ if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
+ rc = access_ereg_modify_changed(phydev, init[i].bank,
+ init[i].offset,
+ init[i].val,
+ init[i].mask);
+ } else {
+ rc = access_ereg(phydev, init[i].mode, init[i].bank,
+ init[i].offset, init[i].val);
+ }
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
static int lan87xx_phy_config_intr(struct phy_device *phydev)
{
int rc, val = 0;
@@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
+static int lan87xx_config_init(struct phy_device *phydev)
+{
+ int rc = lan87xx_phy_init(phydev);
+
+ return rc < 0 ? rc : 0;
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
.phy_id = 0x0007c150,
@@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
+ .config_init = lan87xx_config_init,
.config_aneg = genphy_config_aneg,
.ack_interrupt = lan87xx_phy_ack_interrupt,
diff --git a/drivers/net/phy/mscc/Makefile b/drivers/net/phy/mscc/Makefile
new file mode 100644
index 000000000000..10af42cd9839
--- /dev/null
+++ b/drivers/net/phy/mscc/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for MSCC networking PHY driver
+
+obj-$(CONFIG_MICROSEMI_PHY) := mscc.o
+mscc-objs := mscc_main.o
+
+ifdef CONFIG_MACSEC
+mscc-objs += mscc_macsec.o
+endif
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
new file mode 100644
index 000000000000..414e3b31bb1f
--- /dev/null
+++ b/drivers/net/phy/mscc/mscc.h
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#ifndef _MSCC_PHY_H_
+#define _MSCC_PHY_H_
+
+#if IS_ENABLED(CONFIG_MACSEC)
+#include "mscc_macsec.h"
+#endif
+
+enum rgmii_clock_delay {
+ RGMII_CLK_DELAY_0_2_NS = 0,
+ RGMII_CLK_DELAY_0_8_NS = 1,
+ RGMII_CLK_DELAY_1_1_NS = 2,
+ RGMII_CLK_DELAY_1_7_NS = 3,
+ RGMII_CLK_DELAY_2_0_NS = 4,
+ RGMII_CLK_DELAY_2_3_NS = 5,
+ RGMII_CLK_DELAY_2_6_NS = 6,
+ RGMII_CLK_DELAY_3_4_NS = 7
+};
+
+/* Microsemi VSC85xx PHY registers */
+/* IEEE 802. Std Registers */
+#define MSCC_PHY_BYPASS_CONTROL 18
+#define DISABLE_HP_AUTO_MDIX_MASK 0x0080
+#define DISABLE_PAIR_SWAP_CORR_MASK 0x0020
+#define DISABLE_POLARITY_CORR_MASK 0x0010
+#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008
+
+#define MSCC_PHY_EXT_CNTL_STATUS 22
+#define SMI_BROADCAST_WR_EN 0x0001
+
+#define MSCC_PHY_ERR_RX_CNT 19
+#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20
+#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21
+#define ERR_CNT_MASK GENMASK(7, 0)
+
+#define MSCC_PHY_EXT_PHY_CNTL_1 23
+#define MAC_IF_SELECTION_MASK 0x1800
+#define MAC_IF_SELECTION_GMII 0
+#define MAC_IF_SELECTION_RMII 1
+#define MAC_IF_SELECTION_RGMII 2
+#define MAC_IF_SELECTION_POS 11
+#define VSC8584_MAC_IF_SELECTION_MASK 0x1000
+#define VSC8584_MAC_IF_SELECTION_SGMII 0
+#define VSC8584_MAC_IF_SELECTION_1000BASEX 1
+#define VSC8584_MAC_IF_SELECTION_POS 12
+#define FAR_END_LOOPBACK_MODE_MASK 0x0008
+#define MEDIA_OP_MODE_MASK 0x0700
+#define MEDIA_OP_MODE_COPPER 0
+#define MEDIA_OP_MODE_SERDES 1
+#define MEDIA_OP_MODE_1000BASEX 2
+#define MEDIA_OP_MODE_100BASEFX 3
+#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5
+#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6
+#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7
+#define MEDIA_OP_MODE_POS 8
+
+#define MSCC_PHY_EXT_PHY_CNTL_2 24
+
+#define MII_VSC85XX_INT_MASK 25
+#define MII_VSC85XX_INT_MASK_MDINT BIT(15)
+#define MII_VSC85XX_INT_MASK_LINK_CHG BIT(13)
+#define MII_VSC85XX_INT_MASK_WOL BIT(6)
+#define MII_VSC85XX_INT_MASK_EXT BIT(5)
+#define MII_VSC85XX_INT_STATUS 26
+
+#define MII_VSC85XX_INT_MASK_MASK (MII_VSC85XX_INT_MASK_MDINT | \
+ MII_VSC85XX_INT_MASK_LINK_CHG | \
+ MII_VSC85XX_INT_MASK_EXT)
+
+#define MSCC_PHY_WOL_MAC_CONTROL 27
+#define EDGE_RATE_CNTL_POS 5
+#define EDGE_RATE_CNTL_MASK 0x00E0
+
+#define MSCC_PHY_DEV_AUX_CNTL 28
+#define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000
+
+#define MSCC_PHY_LED_MODE_SEL 29
+#define LED_MODE_SEL_POS(x) ((x) * 4)
+#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
+#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
+
+#define MSCC_EXT_PAGE_CSR_CNTL_17 17
+#define MSCC_EXT_PAGE_CSR_CNTL_18 18
+
+#define MSCC_EXT_PAGE_CSR_CNTL_19 19
+#define MSCC_PHY_CSR_CNTL_19_REG_ADDR(x) (x)
+#define MSCC_PHY_CSR_CNTL_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_CSR_CNTL_19_READ BIT(14)
+#define MSCC_PHY_CSR_CNTL_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_CSR_CNTL_20 20
+#define MSCC_PHY_CSR_CNTL_20_TARGET(x) (x)
+
+#define PHY_MCB_TARGET 0x07
+#define PHY_MCB_S6G_WRITE BIT(31)
+#define PHY_MCB_S6G_READ BIT(30)
+
+#define PHY_S6G_PLL5G_CFG0 0x06
+#define PHY_S6G_LCPLL_CFG 0x11
+#define PHY_S6G_PLL_CFG 0x2b
+#define PHY_S6G_COMMON_CFG 0x2c
+#define PHY_S6G_GPC_CFG 0x2e
+#define PHY_S6G_MISC_CFG 0x3b
+#define PHY_MCB_S6G_CFG 0x3f
+#define PHY_S6G_DFT_CFG2 0x3e
+#define PHY_S6G_PLL_STATUS 0x31
+#define PHY_S6G_IB_STATUS0 0x2f
+
+#define PHY_S6G_SYS_RST_POS 31
+#define PHY_S6G_ENA_LANE_POS 18
+#define PHY_S6G_ENA_LOOP_POS 8
+#define PHY_S6G_QRATE_POS 6
+#define PHY_S6G_IF_MODE_POS 4
+#define PHY_S6G_PLL_ENA_OFFS_POS 21
+#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
+#define PHY_S6G_PLL_FSM_ENA_POS 7
+
+#define MSCC_EXT_PAGE_ACCESS 31
+#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
+#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
+#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
+#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
+#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
+#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
+#define MSCC_PHY_PAGE_MACSEC MSCC_PHY_PAGE_EXTENDED_4
+/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
+ * in the same package.
+ */
+#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */
+#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
+#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
+
+/* Extended Page 1 Registers */
+#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
+#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0)
+
+#define MSCC_PHY_EXT_MODE_CNTL 19
+#define FORCE_MDI_CROSSOVER_MASK 0x000C
+#define FORCE_MDI_CROSSOVER_MDIX 0x000C
+#define FORCE_MDI_CROSSOVER_MDI 0x0008
+
+#define MSCC_PHY_ACTIPHY_CNTL 20
+#define PHY_ADDR_REVERSED 0x0200
+#define DOWNSHIFT_CNTL_MASK 0x001C
+#define DOWNSHIFT_EN 0x0010
+#define DOWNSHIFT_CNTL_POS 2
+
+#define MSCC_PHY_EXT_PHY_CNTL_4 23
+#define PHY_CNTL_4_ADDR_POS 11
+
+#define MSCC_PHY_VERIPHY_CNTL_2 25
+
+#define MSCC_PHY_VERIPHY_CNTL_3 26
+
+/* Extended Page 2 Registers */
+#define MSCC_PHY_CU_PMD_TX_CNTL 16
+
+/* RGMII setting controls at address 18E2, for VSC8572 and similar */
+#define VSC8572_RGMII_CNTL 18
+#define VSC8572_RGMII_RX_DELAY_MASK 0x000E
+#define VSC8572_RGMII_TX_DELAY_MASK 0x0070
+
+/* RGMII controls at address 20E2, for VSC8502 and similar */
+#define VSC8502_RGMII_CNTL 20
+#define VSC8502_RGMII_RX_DELAY_MASK 0x0070
+#define VSC8502_RGMII_TX_DELAY_MASK 0x0007
+
+#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
+#define MSCC_PHY_WOL_MID_MAC_ADDR 22
+#define MSCC_PHY_WOL_UPPER_MAC_ADDR 23
+#define MSCC_PHY_WOL_LOWER_PASSWD 24
+#define MSCC_PHY_WOL_MID_PASSWD 25
+#define MSCC_PHY_WOL_UPPER_PASSWD 26
+
+#define MSCC_PHY_WOL_MAC_CONTROL 27
+#define SECURE_ON_ENABLE 0x8000
+#define SECURE_ON_PASSWD_LEN_4 0x4000
+
+#define MSCC_PHY_EXTENDED_INT 28
+#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
+
+/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_TX_VALID_CNT 21
+#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
+#define MSCC_PHY_SERDES_RX_VALID_CNT 28
+#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29
+
+/* Extended page GPIO Registers */
+#define MSCC_DW8051_CNTL_STATUS 0
+#define MICRO_NSOFT_RESET 0x8000
+#define RUN_FROM_INT_ROM 0x4000
+#define AUTOINC_ADDR 0x2000
+#define PATCH_RAM_CLK 0x1000
+#define MICRO_PATCH_EN 0x0080
+#define DW8051_CLK_EN 0x0010
+#define MICRO_CLK_EN 0x0008
+#define MICRO_CLK_DIVIDE(x) ((x) >> 1)
+#define MSCC_DW8051_VLD_MASK 0xf1ff
+
+/* x Address in range 1-4 */
+#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1)
+#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2)
+#define MSCC_INT_MEM_ADDR 11
+
+#define MSCC_INT_MEM_CNTL 12
+#define READ_SFR 0x6000
+#define READ_PRAM 0x4000
+#define READ_ROM 0x2000
+#define READ_RAM 0x0000
+#define INT_MEM_WRITE_EN 0x1000
+#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1))
+#define INT_MEM_DATA_M 0x00ff
+#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x))
+
+#define MSCC_PHY_PROC_CMD 18
+#define PROC_CMD_NCOMPLETED 0x8000
+#define PROC_CMD_FAILED 0x4000
+#define PROC_CMD_SGMII_PORT(x) ((x) << 8)
+#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4)
+#define PROC_CMD_QSGMII_PORT 0x0c00
+#define PROC_CMD_RST_CONF_PORT 0x0080
+#define PROC_CMD_RECONF_PORT 0x0000
+#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040
+#define PROC_CMD_WRITE 0x0040
+#define PROC_CMD_READ 0x0000
+#define PROC_CMD_FIBER_DISABLE 0x0020
+#define PROC_CMD_FIBER_100BASE_FX 0x0010
+#define PROC_CMD_FIBER_1000BASE_X 0x0000
+#define PROC_CMD_SGMII_MAC 0x0030
+#define PROC_CMD_QSGMII_MAC 0x0020
+#define PROC_CMD_NO_MAC_CONF 0x0000
+#define PROC_CMD_1588_DEFAULT_INIT 0x0010
+#define PROC_CMD_NOP 0x000f
+#define PROC_CMD_PHY_INIT 0x000a
+#define PROC_CMD_CRC16 0x0008
+#define PROC_CMD_FIBER_MEDIA_CONF 0x0001
+#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000
+#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500
+
+#define MSCC_PHY_MAC_CFG_FASTLINK 19
+#define MAC_CFG_MASK 0xc000
+#define MAC_CFG_SGMII 0x0000
+#define MAC_CFG_QSGMII 0x4000
+#define MAC_CFG_RGMII 0x8000
+
+/* Test page Registers */
+#define MSCC_PHY_TEST_PAGE_5 5
+#define MSCC_PHY_TEST_PAGE_8 8
+#define MSCC_PHY_TEST_PAGE_9 9
+#define MSCC_PHY_TEST_PAGE_20 20
+#define MSCC_PHY_TEST_PAGE_24 24
+
+/* Token ring page Registers */
+#define MSCC_PHY_TR_CNTL 16
+#define TR_WRITE 0x8000
+#define TR_ADDR(x) (0x7fff & (x))
+#define MSCC_PHY_TR_LSB 17
+#define MSCC_PHY_TR_MSB 18
+
+/* Microsemi PHY ID's
+ * Code assumes lowest nibble is 0
+ */
+#define PHY_ID_VSC8502 0x00070630
+#define PHY_ID_VSC8504 0x000704c0
+#define PHY_ID_VSC8514 0x00070670
+#define PHY_ID_VSC8530 0x00070560
+#define PHY_ID_VSC8531 0x00070570
+#define PHY_ID_VSC8540 0x00070760
+#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8552 0x000704e0
+#define PHY_ID_VSC856X 0x000707e0
+#define PHY_ID_VSC8572 0x000704d0
+#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8575 0x000707d0
+#define PHY_ID_VSC8582 0x000707b0
+#define PHY_ID_VSC8584 0x000707c0
+
+#define MSCC_VDDMAC_1500 1500
+#define MSCC_VDDMAC_1800 1800
+#define MSCC_VDDMAC_2500 2500
+#define MSCC_VDDMAC_3300 3300
+
+#define DOWNSHIFT_COUNT_MAX 5
+
+#define MAX_LEDS 4
+
+#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+ BIT(VSC8531_LINK_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+ BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_DUPLEX_COLLISION) | \
+ BIT(VSC8531_COLLISION) | \
+ BIT(VSC8531_ACTIVITY) | \
+ BIT(VSC8584_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_AUTONEG_FAULT) | \
+ BIT(VSC8531_SERIAL_MODE) | \
+ BIT(VSC8531_FORCE_LED_OFF) | \
+ BIT(VSC8531_FORCE_LED_ON))
+
+#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+ BIT(VSC8531_LINK_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+ BIT(VSC8531_DUPLEX_COLLISION) | \
+ BIT(VSC8531_COLLISION) | \
+ BIT(VSC8531_ACTIVITY) | \
+ BIT(VSC8531_AUTONEG_FAULT) | \
+ BIT(VSC8531_SERIAL_MODE) | \
+ BIT(VSC8531_FORCE_LED_OFF) | \
+ BIT(VSC8531_FORCE_LED_ON))
+
+#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin"
+#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
+#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
+
+#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin"
+#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
+#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
+
+#define VSC8584_REVB 0x0001
+#define MSCC_DEV_REV_MASK GENMASK(3, 0)
+
+struct reg_val {
+ u16 reg;
+ u32 val;
+};
+
+struct vsc85xx_hw_stat {
+ const char *string;
+ u8 reg;
+ u16 page;
+ u16 mask;
+};
+
+struct vsc8531_private {
+ int rate_magic;
+ u16 supp_led_modes;
+ u32 leds_mode[MAX_LEDS];
+ u8 nleds;
+ const struct vsc85xx_hw_stat *hw_stats;
+ u64 *stats;
+ int nstats;
+ bool pkg_init;
+ /* PHY address within the package. */
+ u8 addr;
+ /* For multiple port PHYs; the MDIO address of the base PHY in the
+ * package.
+ */
+ unsigned int base_addr;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec fields:
+ * - One SecY per device (enforced at the s/w implementation level)
+ * - macsec_flows: list of h/w flows
+ * - ingr_flows: bitmap of ingress flows
+ * - egr_flows: bitmap of egress flows
+ */
+ struct macsec_secy *secy;
+ struct list_head macsec_flows;
+ unsigned long ingr_flows;
+ unsigned long egr_flows;
+#endif
+};
+
+#ifdef CONFIG_OF_MDIO
+struct vsc8531_edge_rate_table {
+ u32 vddmac;
+ u32 slowdown[8];
+};
+#endif /* CONFIG_OF_MDIO */
+
+#if IS_ENABLED(CONFIG_MACSEC)
+int vsc8584_macsec_init(struct phy_device *phydev);
+void vsc8584_handle_macsec_interrupt(struct phy_device *phydev);
+void vsc8584_config_macsec_intr(struct phy_device *phydev);
+#else
+static inline int vsc8584_macsec_init(struct phy_device *phydev)
+{
+ return 0;
+}
+static inline void vsc8584_handle_macsec_interrupt(struct phy_device *phydev)
+{
+}
+static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
+{
+}
+#endif
+
+#endif /* _MSCC_PHY_H_ */
diff --git a/drivers/net/phy/mscc_fc_buffer.h b/drivers/net/phy/mscc/mscc_fc_buffer.h
index 7e9c0e877895..3803e826c37d 100644
--- a/drivers/net/phy/mscc_fc_buffer.h
+++ b/drivers/net/phy/mscc/mscc_fc_buffer.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
- * Microsemi Ocelot Switch driver
+ * Driver for Microsemi VSC85xx PHYs
*
* Copyright (C) 2019 Microsemi Corporation
*/
-#ifndef _MSCC_OCELOT_FC_BUFFER_H_
-#define _MSCC_OCELOT_FC_BUFFER_H_
+#ifndef _MSCC_PHY_FC_BUFFER_H_
+#define _MSCC_PHY_FC_BUFFER_H_
#define MSCC_FCBUF_ENA_CFG 0x00
#define MSCC_FCBUF_MODE_CFG 0x01
@@ -61,4 +61,4 @@
#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(x) ((x) << 16)
#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH_M GENMASK(31, 16)
-#endif
+#endif /* _MSCC_PHY_FC_BUFFER_H_ */
diff --git a/drivers/net/phy/mscc_mac.h b/drivers/net/phy/mscc/mscc_mac.h
index 9420ee5175a6..59b6837c60b3 100644
--- a/drivers/net/phy/mscc_mac.h
+++ b/drivers/net/phy/mscc/mscc_mac.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
- * Microsemi Ocelot Switch driver
+ * Driver for Microsemi VSC85xx PHYs
*
* Copyright (c) 2017 Microsemi Corporation
*/
-#ifndef _MSCC_OCELOT_LINE_MAC_H_
-#define _MSCC_OCELOT_LINE_MAC_H_
+#ifndef _MSCC_PHY_LINE_MAC_H_
+#define _MSCC_PHY_LINE_MAC_H_
#define MSCC_MAC_CFG_ENA_CFG 0x00
#define MSCC_MAC_CFG_MODE_CFG 0x01
@@ -152,8 +152,8 @@
#define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0)
#define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4)
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
-#endif /* _MSCC_OCELOT_LINE_MAC_H_ */
+#endif /* _MSCC_PHY_LINE_MAC_H_ */
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
new file mode 100644
index 000000000000..b4d3dc4068e2
--- /dev/null
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -0,0 +1,1055 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Author: Nagaraju Lakkaraju
+ * License: Dual MIT/GPL
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#include <linux/phy.h>
+#include <dt-bindings/net/mscc-phy-vsc8531.h>
+
+#include <crypto/skcipher.h>
+
+#include <net/macsec.h>
+
+#include "mscc.h"
+#include "mscc_mac.h"
+#include "mscc_macsec.h"
+#include "mscc_fc_buffer.h"
+
+static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg)
+{
+ u32 val, val_l = 0, val_h = 0;
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if (bank >> 2 == 0x1)
+ /* non-MACsec access */
+ bank &= 0x3;
+ else
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
+ MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+ val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
+ val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+
+ return (val_h << 16) | val_l;
+}
+
+static void vsc8584_macsec_phy_write(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg, u32 val)
+{
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
+ bank &= 0x3;
+ else
+ /* MACsec access */
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+}
+
+static void vsc8584_macsec_classification(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ /* enable VLAN tag parsing */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
+ MSCC_MS_SAM_CP_TAG_PARSE_STAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
+ enum macsec_bank bank,
+ bool block)
+{
+ u32 port = (bank == MACSEC_INGR) ?
+ MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
+ u32 action = MSCC_MS_FLOW_BYPASS;
+
+ if (block)
+ action = MSCC_MS_FLOW_DROP;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
+}
+
+static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+
+ if (bank != MACSEC_INGR)
+ return;
+
+ /* Set default rules to pass unmatched frames */
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MS_PARAMS2_IG_CC_CONTROL);
+ val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
+ MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
+ val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_block_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_SW_RST |
+ MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
+
+ /* Set the MACsec block out of s/w reset and enable clocks */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
+ bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
+ MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
+ MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
+
+ /* Clear the counters */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Enable octet increment mode */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
+ MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Set the MTU */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
+
+ for (i = 0; i < 8; i++)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
+
+ if (bank == MACSEC_EGR) {
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
+ val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
+ MSCC_MS_FC_CFG_FCBUF_ENA |
+ MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
+ MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
+ MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
+ MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
+ }
+
+ vsc8584_macsec_classification(phydev, bank);
+ vsc8584_macsec_flow_default_action(phydev, bank, false);
+ vsc8584_macsec_integrity_checks(phydev, bank);
+
+ /* Enable the MACsec block */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
+}
+
+static void vsc8584_macsec_mac_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ /* Clear host & line stats */
+ for (i = 0; i < 36; i++)
+ vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
+ val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
+ val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
+ val |= 0xffff;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
+ if (bank == HOST_MAC)
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
+ else
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
+ (bank == HOST_MAC ?
+ MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
+ val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
+ val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
+ val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
+ MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
+ val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
+ MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_RX_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_ENA);
+}
+
+/* Must be called with mdio_lock taken */
+static int __vsc8584_macsec_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank proc_bank;
+ u32 val;
+
+ vsc8584_macsec_block_init(phydev, MACSEC_INGR);
+ vsc8584_macsec_block_init(phydev, MACSEC_EGR);
+ vsc8584_macsec_mac_init(phydev, HOST_MAC);
+ vsc8584_macsec_mac_init(phydev, LINE_MAC);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_FC_READ_THRESH_CFG,
+ MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
+ MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
+ val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
+ MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
+ MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG);
+ val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
+ val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
+ val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
+
+ proc_bank = (priv->addr < 2) ? PROC_0 : PROC_2;
+
+ val = vsc8584_macsec_phy_read(phydev, proc_bank,
+ MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL);
+ val &= ~MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
+ val |= MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
+ vsc8584_macsec_phy_write(phydev, proc_bank,
+ MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
+
+ return 0;
+}
+
+static void vsc8584_macsec_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
+
+ if (flow->match.tagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
+ if (flow->match.untagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
+
+ if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
+ match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
+ mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
+ }
+
+ if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
+ match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
+ mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
+ MSCC_MS_SAM_MASK_SCI_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
+ lower_32_bits(flow->rx_sa->sc->sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
+ upper_32_bits(flow->rx_sa->sc->sci));
+ }
+
+ if (flow->match.etype) {
+ mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
+ MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
+ }
+
+ match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
+
+ /* Action for matching packets */
+ if (flow->action.drop)
+ action = MSCC_MS_FLOW_DROP;
+ else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
+ action = MSCC_MS_FLOW_BYPASS;
+ else
+ action = (bank == MACSEC_INGR) ?
+ MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
+
+ val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
+ MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
+
+ if (action == MSCC_MS_FLOW_BYPASS)
+ goto write_ctrl;
+
+ if (bank == MACSEC_INGR) {
+ if (priv->secy->replay_protect)
+ val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
+ if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
+ else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
+ } else if (bank == MACSEC_EGR) {
+ if (priv->secy->protect_frames)
+ val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
+ if (priv->secy->tx_sc.encrypt)
+ val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
+ if (priv->secy->tx_sc.send_sci)
+ val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
+ }
+
+write_ctrl:
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
+ enum macsec_bank bank)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
+ if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
+ return pos;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
+ (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
+ return;
+
+ /* Enable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
+
+ /* Set in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ /* Disable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
+
+ /* Clear in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
+{
+ if (flow->bank == MACSEC_INGR)
+ return flow->index + MSCC_MS_MAX_FLOWS;
+
+ return flow->index;
+}
+
+/* Derive the AES key to get a key for the hash autentication */
+static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
+ u16 key_len, u8 hkey[16])
+{
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ struct skcipher_request *req = NULL;
+ struct scatterlist src, dst;
+ DECLARE_CRYPTO_WAIT(wait);
+ u32 input[4] = {0};
+ int ret;
+
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
+ &wait);
+ ret = crypto_skcipher_setkey(tfm, key, key_len);
+ if (ret < 0)
+ goto out;
+
+ sg_init_one(&src, input, 16);
+ sg_init_one(&dst, hkey, 16);
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+
+ ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return ret;
+}
+
+static int vsc8584_macsec_transformation(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ int i, ret, index = flow->index;
+ u32 rec = 0, control = 0;
+ u8 hkey[16];
+ sci_t sci;
+
+ ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
+ if (ret)
+ return ret;
+
+ switch (priv->secy->key_len) {
+ case 16:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
+ break;
+ case 32:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ control |= (bank == MACSEC_EGR) ?
+ (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
+ (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
+
+ control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
+ CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
+ CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
+ CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
+
+ /* Set the control word */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ control);
+
+ /* Set the context ID. Must be unique. */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ vsc8584_macsec_flow_context_id(flow));
+
+ /* Set the encryption/decryption key */
+ for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)flow->key)[i]);
+
+ /* Set the authentication key */
+ for (i = 0; i < 4; i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)hkey)[i]);
+
+ /* Initial sequence number */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ bank == MACSEC_INGR ?
+ flow->rx_sa->next_pn : flow->tx_sa->next_pn);
+
+ if (bank == MACSEC_INGR)
+ /* Set the mask (replay window size) */
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ priv->secy->replay_window);
+
+ /* Set the input vectors */
+ sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ lower_32_bits(sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ upper_32_bits(sci));
+
+ while (rec < 20)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ 0);
+
+ flow->has_transformation = true;
+ return 0;
+}
+
+static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
+ enum macsec_bank bank)
+{
+ unsigned long *bitmap = bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+ struct macsec_flow *flow;
+ int index;
+
+ index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
+
+ if (index == MSCC_MS_MAX_FLOWS)
+ return ERR_PTR(-ENOMEM);
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(index, bitmap);
+ flow->index = index;
+ flow->bank = bank;
+ flow->priority = 8;
+ flow->assoc_num = -1;
+
+ list_add_tail(&flow->list, &priv->macsec_flows);
+ return flow;
+}
+
+static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
+ struct macsec_flow *flow)
+{
+ unsigned long *bitmap = flow->bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+
+ list_del(&flow->list);
+ clear_bit(flow->index, bitmap);
+ kfree(flow);
+}
+
+static int vsc8584_macsec_add_flow(struct phy_device *phydev,
+ struct macsec_flow *flow, bool update)
+{
+ int ret;
+
+ flow->port = MSCC_MS_PORT_CONTROLLED;
+ vsc8584_macsec_flow(phydev, flow);
+
+ if (update)
+ return 0;
+
+ ret = vsc8584_macsec_transformation(phydev, flow);
+ if (ret) {
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_default_flows(struct phy_device *phydev)
+{
+ struct macsec_flow *flow;
+
+ /* Add a rule to let the MKA traffic go through, ingress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_UNCONTROLLED;
+ flow->match.tagged = 1;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ /* Add a rule to let the MKA traffic go through, egress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_COMMON;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ return 0;
+}
+
+static void vsc8584_macsec_del_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ vsc8584_macsec_flow_disable(phydev, flow);
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+}
+
+static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->rx_sa = ctx->sa.rx_sa;
+
+ /* Always match tagged packets on ingress */
+ flow->match.tagged = 1;
+ flow->match.sci = 1;
+
+ if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->tx_sa = ctx->sa.tx_sa;
+
+ /* Always match untagged packets on egress */
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_secy *secy = ctx->secy;
+
+ if (ctx->prepare) {
+ if (priv->secy)
+ return -EEXIST;
+
+ return 0;
+ }
+
+ priv->secy = secy;
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+
+ return vsc8584_macsec_default_flows(ctx->phydev);
+}
+
+static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
+
+ priv->secy = NULL;
+ return 0;
+}
+
+static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
+{
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_secy(ctx);
+ return vsc8584_macsec_add_secy(ctx);
+}
+
+static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ if (flow->bank == MACSEC_INGR && flow->rx_sa &&
+ flow->rx_sa->sc->sci == ctx->rx_sc->sci)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_txsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_txsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static struct macsec_ops vsc8584_macsec_ops = {
+ .mdo_dev_open = vsc8584_macsec_dev_open,
+ .mdo_dev_stop = vsc8584_macsec_dev_stop,
+ .mdo_add_secy = vsc8584_macsec_add_secy,
+ .mdo_upd_secy = vsc8584_macsec_upd_secy,
+ .mdo_del_secy = vsc8584_macsec_del_secy,
+ .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
+ .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
+ .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
+ .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
+ .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
+ .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
+ .mdo_add_txsa = vsc8584_macsec_add_txsa,
+ .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
+ .mdo_del_txsa = vsc8584_macsec_del_txsa,
+};
+
+int vsc8584_macsec_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
+ INIT_LIST_HEAD(&vsc8531->macsec_flows);
+ vsc8531->secy = NULL;
+
+ phydev->macsec_ops = &vsc8584_macsec_ops;
+
+ return __vsc8584_macsec_init(phydev);
+ }
+
+ return 0;
+}
+
+void vsc8584_handle_macsec_interrupt(struct phy_device *phydev)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow, *tmp;
+ u32 cause, rec;
+
+ /* Check MACsec PN rollover */
+ cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS);
+ cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
+ if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
+ return;
+
+ rec = 6 + priv->secy->key_len / sizeof(u32);
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ u32 val;
+
+ if (flow->bank != MACSEC_EGR || !flow->has_transformation)
+ continue;
+
+ val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_XFORM_REC(flow->index, rec));
+ if (val == 0xffffffff) {
+ vsc8584_macsec_flow_disable(phydev, flow);
+ macsec_pn_wrapped(priv->secy, flow->tx_sa);
+ return;
+ }
+ }
+}
+
+void vsc8584_config_macsec_intr(struct phy_device *phydev)
+{
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+ phy_write(phydev, MSCC_PHY_EXTENDED_INT, MSCC_PHY_EXTENDED_INT_MS_EGR);
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR, MSCC_MS_AIC_CTRL, 0xf);
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR, MSCC_MS_INTR_CTRL_STATUS,
+ MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
+}
diff --git a/drivers/net/phy/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
index d9ab6aba7482..d751f2946b79 100644
--- a/drivers/net/phy/mscc_macsec.h
+++ b/drivers/net/phy/mscc/mscc_macsec.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
- * Microsemi Ocelot Switch driver
+ * Driver for Microsemi VSC85xx PHYs
*
* Copyright (c) 2018 Microsemi Corporation
*/
-#ifndef _MSCC_OCELOT_MACSEC_H_
-#define _MSCC_OCELOT_MACSEC_H_
+#ifndef _MSCC_PHY_MACSEC_H_
+#define _MSCC_PHY_MACSEC_H_
+
+#include <net/macsec.h>
#define MSCC_MS_MAX_FLOWS 16
@@ -58,6 +60,63 @@ enum mscc_macsec_validate_levels {
MSCC_MS_VALIDATE_STRICT = 2,
};
+enum macsec_bank {
+ FC_BUFFER = 0x04,
+ HOST_MAC = 0x05,
+ LINE_MAC = 0x06,
+ PROC_0 = 0x0e,
+ PROC_2 = 0x0f,
+ MACSEC_INGR = 0x38,
+ MACSEC_EGR = 0x3c,
+};
+
+struct macsec_flow {
+ struct list_head list;
+ enum mscc_macsec_destination_ports port;
+ enum macsec_bank bank;
+ u32 index;
+ int assoc_num;
+ bool has_transformation;
+
+ /* Highest takes precedence [0..15] */
+ u8 priority;
+
+ u8 key[MACSEC_KEYID_LEN];
+
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+
+ /* Matching */
+ struct {
+ u8 sci:1;
+ u8 tagged:1;
+ u8 untagged:1;
+ u8 etype:1;
+ } match;
+
+ u16 etype;
+
+ /* Action */
+ struct {
+ u8 bypass:1;
+ u8 drop:1;
+ } action;
+};
+
+#define MSCC_EXT_PAGE_MACSEC_17 17
+#define MSCC_EXT_PAGE_MACSEC_18 18
+
+#define MSCC_EXT_PAGE_MACSEC_19 19
+#define MSCC_PHY_MACSEC_19_REG_ADDR(x) (x)
+#define MSCC_PHY_MACSEC_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_MACSEC_19_READ BIT(14)
+#define MSCC_PHY_MACSEC_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_MACSEC_20 20
+#define MSCC_PHY_MACSEC_20_TARGET(x) (x)
+
#define MSCC_MS_XFORM_REC(x, y) (((x) << 5) + (y))
#define MSCC_MS_ENA_CFG 0x800
#define MSCC_MS_FC_CFG 0x804
@@ -263,4 +322,4 @@ enum mscc_macsec_validate_levels {
#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M GENMASK(31, 16)
#define MACSEC_INTR_CTRL_STATUS_ROLLOVER BIT(5)
-#endif
+#endif /* _MSCC_PHY_MACSEC_H_ */
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc/mscc_main.c
index f686f40f6bdc..c8aa6d905d8e 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -18,355 +18,7 @@
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
-#include <linux/scatterlist.h>
-#include <crypto/skcipher.h>
-
-#if IS_ENABLED(CONFIG_MACSEC)
-#include <net/macsec.h>
-#endif
-
-#include "mscc_macsec.h"
-#include "mscc_mac.h"
-#include "mscc_fc_buffer.h"
-
-enum rgmii_rx_clock_delay {
- RGMII_RX_CLK_DELAY_0_2_NS = 0,
- RGMII_RX_CLK_DELAY_0_8_NS = 1,
- RGMII_RX_CLK_DELAY_1_1_NS = 2,
- RGMII_RX_CLK_DELAY_1_7_NS = 3,
- RGMII_RX_CLK_DELAY_2_0_NS = 4,
- RGMII_RX_CLK_DELAY_2_3_NS = 5,
- RGMII_RX_CLK_DELAY_2_6_NS = 6,
- RGMII_RX_CLK_DELAY_3_4_NS = 7
-};
-
-/* Microsemi VSC85xx PHY registers */
-/* IEEE 802. Std Registers */
-#define MSCC_PHY_BYPASS_CONTROL 18
-#define DISABLE_HP_AUTO_MDIX_MASK 0x0080
-#define DISABLE_PAIR_SWAP_CORR_MASK 0x0020
-#define DISABLE_POLARITY_CORR_MASK 0x0010
-#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008
-
-#define MSCC_PHY_EXT_CNTL_STATUS 22
-#define SMI_BROADCAST_WR_EN 0x0001
-
-#define MSCC_PHY_ERR_RX_CNT 19
-#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20
-#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21
-#define ERR_CNT_MASK GENMASK(7, 0)
-
-#define MSCC_PHY_EXT_PHY_CNTL_1 23
-#define MAC_IF_SELECTION_MASK 0x1800
-#define MAC_IF_SELECTION_GMII 0
-#define MAC_IF_SELECTION_RMII 1
-#define MAC_IF_SELECTION_RGMII 2
-#define MAC_IF_SELECTION_POS 11
-#define VSC8584_MAC_IF_SELECTION_MASK 0x1000
-#define VSC8584_MAC_IF_SELECTION_SGMII 0
-#define VSC8584_MAC_IF_SELECTION_1000BASEX 1
-#define VSC8584_MAC_IF_SELECTION_POS 12
-#define FAR_END_LOOPBACK_MODE_MASK 0x0008
-#define MEDIA_OP_MODE_MASK 0x0700
-#define MEDIA_OP_MODE_COPPER 0
-#define MEDIA_OP_MODE_SERDES 1
-#define MEDIA_OP_MODE_1000BASEX 2
-#define MEDIA_OP_MODE_100BASEFX 3
-#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5
-#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6
-#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7
-#define MEDIA_OP_MODE_POS 8
-
-#define MSCC_PHY_EXT_PHY_CNTL_2 24
-
-#define MII_VSC85XX_INT_MASK 25
-#define MII_VSC85XX_INT_MASK_MASK 0xa020
-#define MII_VSC85XX_INT_MASK_WOL 0x0040
-#define MII_VSC85XX_INT_STATUS 26
-
-#define MSCC_PHY_WOL_MAC_CONTROL 27
-#define EDGE_RATE_CNTL_POS 5
-#define EDGE_RATE_CNTL_MASK 0x00E0
-
-#define MSCC_PHY_DEV_AUX_CNTL 28
-#define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000
-
-#define MSCC_PHY_LED_MODE_SEL 29
-#define LED_MODE_SEL_POS(x) ((x) * 4)
-#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
-#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
-
-#define MSCC_EXT_PAGE_CSR_CNTL_17 17
-#define MSCC_EXT_PAGE_CSR_CNTL_18 18
-
-#define MSCC_EXT_PAGE_CSR_CNTL_19 19
-#define MSCC_PHY_CSR_CNTL_19_REG_ADDR(x) (x)
-#define MSCC_PHY_CSR_CNTL_19_TARGET(x) ((x) << 12)
-#define MSCC_PHY_CSR_CNTL_19_READ BIT(14)
-#define MSCC_PHY_CSR_CNTL_19_CMD BIT(15)
-
-#define MSCC_EXT_PAGE_CSR_CNTL_20 20
-#define MSCC_PHY_CSR_CNTL_20_TARGET(x) (x)
-
-#define PHY_MCB_TARGET 0x07
-#define PHY_MCB_S6G_WRITE BIT(31)
-#define PHY_MCB_S6G_READ BIT(30)
-
-#define PHY_S6G_PLL5G_CFG0 0x06
-#define PHY_S6G_LCPLL_CFG 0x11
-#define PHY_S6G_PLL_CFG 0x2b
-#define PHY_S6G_COMMON_CFG 0x2c
-#define PHY_S6G_GPC_CFG 0x2e
-#define PHY_S6G_MISC_CFG 0x3b
-#define PHY_MCB_S6G_CFG 0x3f
-#define PHY_S6G_DFT_CFG2 0x3e
-#define PHY_S6G_PLL_STATUS 0x31
-#define PHY_S6G_IB_STATUS0 0x2f
-
-#define PHY_S6G_SYS_RST_POS 31
-#define PHY_S6G_ENA_LANE_POS 18
-#define PHY_S6G_ENA_LOOP_POS 8
-#define PHY_S6G_QRATE_POS 6
-#define PHY_S6G_IF_MODE_POS 4
-#define PHY_S6G_PLL_ENA_OFFS_POS 21
-#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
-#define PHY_S6G_PLL_FSM_ENA_POS 7
-
-#define MSCC_EXT_PAGE_MACSEC_17 17
-#define MSCC_EXT_PAGE_MACSEC_18 18
-
-#define MSCC_EXT_PAGE_MACSEC_19 19
-#define MSCC_PHY_MACSEC_19_REG_ADDR(x) (x)
-#define MSCC_PHY_MACSEC_19_TARGET(x) ((x) << 12)
-#define MSCC_PHY_MACSEC_19_READ BIT(14)
-#define MSCC_PHY_MACSEC_19_CMD BIT(15)
-
-#define MSCC_EXT_PAGE_MACSEC_20 20
-#define MSCC_PHY_MACSEC_20_TARGET(x) (x)
-enum macsec_bank {
- FC_BUFFER = 0x04,
- HOST_MAC = 0x05,
- LINE_MAC = 0x06,
- IP_1588 = 0x0e,
- MACSEC_INGR = 0x38,
- MACSEC_EGR = 0x3c,
-};
-
-#define MSCC_EXT_PAGE_ACCESS 31
-#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
-#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
-#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
-#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
-#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
-#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
-#define MSCC_PHY_PAGE_MACSEC MSCC_PHY_PAGE_EXTENDED_4
-/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
- * in the same package.
- */
-#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */
-#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
-#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
-
-/* Extended Page 1 Registers */
-#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
-#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0)
-
-#define MSCC_PHY_EXT_MODE_CNTL 19
-#define FORCE_MDI_CROSSOVER_MASK 0x000C
-#define FORCE_MDI_CROSSOVER_MDIX 0x000C
-#define FORCE_MDI_CROSSOVER_MDI 0x0008
-
-#define MSCC_PHY_ACTIPHY_CNTL 20
-#define PHY_ADDR_REVERSED 0x0200
-#define DOWNSHIFT_CNTL_MASK 0x001C
-#define DOWNSHIFT_EN 0x0010
-#define DOWNSHIFT_CNTL_POS 2
-
-#define MSCC_PHY_EXT_PHY_CNTL_4 23
-#define PHY_CNTL_4_ADDR_POS 11
-
-#define MSCC_PHY_VERIPHY_CNTL_2 25
-
-#define MSCC_PHY_VERIPHY_CNTL_3 26
-
-/* Extended Page 2 Registers */
-#define MSCC_PHY_CU_PMD_TX_CNTL 16
-
-#define MSCC_PHY_RGMII_CNTL 20
-#define RGMII_RX_CLK_DELAY_MASK 0x0070
-#define RGMII_RX_CLK_DELAY_POS 4
-
-#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
-#define MSCC_PHY_WOL_MID_MAC_ADDR 22
-#define MSCC_PHY_WOL_UPPER_MAC_ADDR 23
-#define MSCC_PHY_WOL_LOWER_PASSWD 24
-#define MSCC_PHY_WOL_MID_PASSWD 25
-#define MSCC_PHY_WOL_UPPER_PASSWD 26
-
-#define MSCC_PHY_WOL_MAC_CONTROL 27
-#define SECURE_ON_ENABLE 0x8000
-#define SECURE_ON_PASSWD_LEN_4 0x4000
-
-#define MSCC_PHY_EXTENDED_INT 28
-#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
-
-/* Extended Page 3 Registers */
-#define MSCC_PHY_SERDES_TX_VALID_CNT 21
-#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
-#define MSCC_PHY_SERDES_RX_VALID_CNT 28
-#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29
-
-/* Extended page GPIO Registers */
-#define MSCC_DW8051_CNTL_STATUS 0
-#define MICRO_NSOFT_RESET 0x8000
-#define RUN_FROM_INT_ROM 0x4000
-#define AUTOINC_ADDR 0x2000
-#define PATCH_RAM_CLK 0x1000
-#define MICRO_PATCH_EN 0x0080
-#define DW8051_CLK_EN 0x0010
-#define MICRO_CLK_EN 0x0008
-#define MICRO_CLK_DIVIDE(x) ((x) >> 1)
-#define MSCC_DW8051_VLD_MASK 0xf1ff
-
-/* x Address in range 1-4 */
-#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1)
-#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2)
-#define MSCC_INT_MEM_ADDR 11
-
-#define MSCC_INT_MEM_CNTL 12
-#define READ_SFR 0x6000
-#define READ_PRAM 0x4000
-#define READ_ROM 0x2000
-#define READ_RAM 0x0000
-#define INT_MEM_WRITE_EN 0x1000
-#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1))
-#define INT_MEM_DATA_M 0x00ff
-#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x))
-
-#define MSCC_PHY_PROC_CMD 18
-#define PROC_CMD_NCOMPLETED 0x8000
-#define PROC_CMD_FAILED 0x4000
-#define PROC_CMD_SGMII_PORT(x) ((x) << 8)
-#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4)
-#define PROC_CMD_QSGMII_PORT 0x0c00
-#define PROC_CMD_RST_CONF_PORT 0x0080
-#define PROC_CMD_RECONF_PORT 0x0000
-#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040
-#define PROC_CMD_WRITE 0x0040
-#define PROC_CMD_READ 0x0000
-#define PROC_CMD_FIBER_DISABLE 0x0020
-#define PROC_CMD_FIBER_100BASE_FX 0x0010
-#define PROC_CMD_FIBER_1000BASE_X 0x0000
-#define PROC_CMD_SGMII_MAC 0x0030
-#define PROC_CMD_QSGMII_MAC 0x0020
-#define PROC_CMD_NO_MAC_CONF 0x0000
-#define PROC_CMD_1588_DEFAULT_INIT 0x0010
-#define PROC_CMD_NOP 0x000f
-#define PROC_CMD_PHY_INIT 0x000a
-#define PROC_CMD_CRC16 0x0008
-#define PROC_CMD_FIBER_MEDIA_CONF 0x0001
-#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000
-#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500
-
-#define MSCC_PHY_MAC_CFG_FASTLINK 19
-#define MAC_CFG_MASK 0xc000
-#define MAC_CFG_SGMII 0x0000
-#define MAC_CFG_QSGMII 0x4000
-
-/* Test page Registers */
-#define MSCC_PHY_TEST_PAGE_5 5
-#define MSCC_PHY_TEST_PAGE_8 8
-#define MSCC_PHY_TEST_PAGE_9 9
-#define MSCC_PHY_TEST_PAGE_20 20
-#define MSCC_PHY_TEST_PAGE_24 24
-
-/* Token ring page Registers */
-#define MSCC_PHY_TR_CNTL 16
-#define TR_WRITE 0x8000
-#define TR_ADDR(x) (0x7fff & (x))
-#define MSCC_PHY_TR_LSB 17
-#define MSCC_PHY_TR_MSB 18
-
-/* Microsemi PHY ID's
- * Code assumes lowest nibble is 0
- */
-#define PHY_ID_VSC8504 0x000704c0
-#define PHY_ID_VSC8514 0x00070670
-#define PHY_ID_VSC8530 0x00070560
-#define PHY_ID_VSC8531 0x00070570
-#define PHY_ID_VSC8540 0x00070760
-#define PHY_ID_VSC8541 0x00070770
-#define PHY_ID_VSC8552 0x000704e0
-#define PHY_ID_VSC856X 0x000707e0
-#define PHY_ID_VSC8572 0x000704d0
-#define PHY_ID_VSC8574 0x000704a0
-#define PHY_ID_VSC8575 0x000707d0
-#define PHY_ID_VSC8582 0x000707b0
-#define PHY_ID_VSC8584 0x000707c0
-
-#define MSCC_VDDMAC_1500 1500
-#define MSCC_VDDMAC_1800 1800
-#define MSCC_VDDMAC_2500 2500
-#define MSCC_VDDMAC_3300 3300
-
-#define DOWNSHIFT_COUNT_MAX 5
-
-#define MAX_LEDS 4
-
-#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
- BIT(VSC8531_LINK_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_100_ACTIVITY) | \
- BIT(VSC8531_LINK_10_ACTIVITY) | \
- BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_100_ACTIVITY) | \
- BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \
- BIT(VSC8531_DUPLEX_COLLISION) | \
- BIT(VSC8531_COLLISION) | \
- BIT(VSC8531_ACTIVITY) | \
- BIT(VSC8584_100FX_1000X_ACTIVITY) | \
- BIT(VSC8531_AUTONEG_FAULT) | \
- BIT(VSC8531_SERIAL_MODE) | \
- BIT(VSC8531_FORCE_LED_OFF) | \
- BIT(VSC8531_FORCE_LED_ON))
-
-#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
- BIT(VSC8531_LINK_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_100_ACTIVITY) | \
- BIT(VSC8531_LINK_10_ACTIVITY) | \
- BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_100_ACTIVITY) | \
- BIT(VSC8531_DUPLEX_COLLISION) | \
- BIT(VSC8531_COLLISION) | \
- BIT(VSC8531_ACTIVITY) | \
- BIT(VSC8531_AUTONEG_FAULT) | \
- BIT(VSC8531_SERIAL_MODE) | \
- BIT(VSC8531_FORCE_LED_OFF) | \
- BIT(VSC8531_FORCE_LED_ON))
-
-#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin"
-#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
-#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
-
-#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin"
-#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
-#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
-
-#define VSC8584_REVB 0x0001
-#define MSCC_DEV_REV_MASK GENMASK(3, 0)
-
-struct reg_val {
- u16 reg;
- u32 val;
-};
-
-struct vsc85xx_hw_stat {
- const char *string;
- u8 reg;
- u16 page;
- u16 mask;
-};
+#include "mscc.h"
static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
{
@@ -446,85 +98,14 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
},
};
-#if IS_ENABLED(CONFIG_MACSEC)
-struct macsec_flow {
- struct list_head list;
- enum mscc_macsec_destination_ports port;
- enum macsec_bank bank;
- u32 index;
- int assoc_num;
- bool has_transformation;
-
- /* Highest takes precedence [0..15] */
- u8 priority;
-
- u8 key[MACSEC_KEYID_LEN];
-
- union {
- struct macsec_rx_sa *rx_sa;
- struct macsec_tx_sa *tx_sa;
- };
-
- /* Matching */
- struct {
- u8 sci:1;
- u8 tagged:1;
- u8 untagged:1;
- u8 etype:1;
- } match;
-
- u16 etype;
-
- /* Action */
- struct {
- u8 bypass:1;
- u8 drop:1;
- } action;
-
-};
-#endif
-
-struct vsc8531_private {
- int rate_magic;
- u16 supp_led_modes;
- u32 leds_mode[MAX_LEDS];
- u8 nleds;
- const struct vsc85xx_hw_stat *hw_stats;
- u64 *stats;
- int nstats;
- bool pkg_init;
- /* For multiple port PHYs; the MDIO address of the base PHY in the
- * package.
- */
- unsigned int base_addr;
-
-#if IS_ENABLED(CONFIG_MACSEC)
- /* MACsec fields:
- * - One SecY per device (enforced at the s/w implementation level)
- * - macsec_flows: list of h/w flows
- * - ingr_flows: bitmap of ingress flows
- * - egr_flows: bitmap of egress flows
- */
- struct macsec_secy *secy;
- struct list_head macsec_flows;
- unsigned long ingr_flows;
- unsigned long egr_flows;
-#endif
-};
-
#ifdef CONFIG_OF_MDIO
-struct vsc8531_edge_rate_table {
- u32 vddmac;
- u32 slowdown[8];
-};
-
static const struct vsc8531_edge_rate_table edge_table[] = {
{MSCC_VDDMAC_3300, { 0, 2, 4, 7, 10, 17, 29, 53} },
{MSCC_VDDMAC_2500, { 0, 3, 6, 10, 14, 23, 37, 63} },
{MSCC_VDDMAC_1800, { 0, 5, 9, 16, 23, 35, 52, 76} },
{MSCC_VDDMAC_1500, { 0, 6, 14, 21, 29, 42, 58, 77} },
};
-#endif /* CONFIG_OF_MDIO */
+#endif
static int vsc85xx_phy_read_page(struct phy_device *phydev)
{
@@ -910,6 +491,9 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev,
reg_val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
reg_val &= ~(MAC_IF_SELECTION_MASK);
switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
reg_val |= (MAC_IF_SELECTION_RGMII << MAC_IF_SELECTION_POS);
break;
@@ -936,18 +520,34 @@ out_unlock:
return rc;
}
-static int vsc85xx_default_config(struct phy_device *phydev)
+/* Set the RGMII RX and TX clock skews individually, according to the PHY
+ * interface type, to:
+ * * 0.2 ns (their default, and lowest, hardware value) if delays should
+ * not be enabled
+ * * 2.0 ns (which causes the data to be sampled at exactly half way between
+ * clock transitions at 1000 Mbps) if delays should be enabled
+ */
+static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
+ u16 rgmii_rx_delay_mask,
+ u16 rgmii_tx_delay_mask)
{
+ u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
+ u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
+ u16 reg_val = 0;
int rc;
- u16 reg_val;
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
- reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_rx_delay_pos;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
- MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
+ rgmii_cntl,
+ rgmii_rx_delay_mask | rgmii_tx_delay_mask,
reg_val);
mutex_unlock(&phydev->lock);
@@ -955,6 +555,23 @@ static int vsc85xx_default_config(struct phy_device *phydev)
return rc;
}
+static int vsc85xx_default_config(struct phy_device *phydev)
+{
+ int rc;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ if (phy_interface_mode_is_rgmii(phydev->interface)) {
+ rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL,
+ VSC8502_RGMII_RX_DELAY_MASK,
+ VSC8502_RGMII_TX_DELAY_MASK);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int vsc85xx_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
@@ -1670,978 +1287,6 @@ out:
return ret;
}
-#if IS_ENABLED(CONFIG_MACSEC)
-static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
- enum macsec_bank bank, u32 reg)
-{
- u32 val, val_l = 0, val_h = 0;
- unsigned long deadline;
- int rc;
-
- rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
- if (rc < 0)
- goto failed;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
- MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
-
- if (bank >> 2 == 0x1)
- /* non-MACsec access */
- bank &= 0x3;
- else
- bank = 0;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
- MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
- MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
- MSCC_PHY_MACSEC_19_TARGET(bank));
-
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
- } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
-
- val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
- val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
-
-failed:
- phy_restore_page(phydev, rc, rc);
-
- return (val_h << 16) | val_l;
-}
-
-static void vsc8584_macsec_phy_write(struct phy_device *phydev,
- enum macsec_bank bank, u32 reg, u32 val)
-{
- unsigned long deadline;
- int rc;
-
- rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
- if (rc < 0)
- goto failed;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
- MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
-
- if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
- bank &= 0x3;
- else
- /* MACsec access */
- bank = 0;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
- MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
- MSCC_PHY_MACSEC_19_TARGET(bank));
-
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
- } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
-
-failed:
- phy_restore_page(phydev, rc, rc);
-}
-
-static void vsc8584_macsec_classification(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- /* enable VLAN tag parsing */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
- MSCC_MS_SAM_CP_TAG_PARSE_STAG |
- MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
- MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
-}
-
-static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
- enum macsec_bank bank,
- bool block)
-{
- u32 port = (bank == MACSEC_INGR) ?
- MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
- u32 action = MSCC_MS_FLOW_BYPASS;
-
- if (block)
- action = MSCC_MS_FLOW_DROP;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
- /* MACsec untagged */
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
- /* MACsec tagged */
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
- /* Bad tag */
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
- /* Kay tag */
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
- /* MACsec untagged */
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
- /* MACsec tagged */
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
- /* Bad tag */
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
- /* Kay tag */
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
-}
-
-static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- u32 val;
-
- if (bank != MACSEC_INGR)
- return;
-
- /* Set default rules to pass unmatched frames */
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MS_PARAMS2_IG_CC_CONTROL);
- val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
- MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
- val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
- MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
- MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
- MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
-}
-
-static void vsc8584_macsec_block_init(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- u32 val;
- int i;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
- MSCC_MS_ENA_CFG_SW_RST |
- MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
-
- /* Set the MACsec block out of s/w reset and enable clocks */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
- MSCC_MS_ENA_CFG_CLK_ENA);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
- bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
- MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
- MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
-
- /* Clear the counters */
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
- val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
-
- /* Enable octet increment mode */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
- MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
- val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
-
- /* Set the MTU */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
- MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
- MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
-
- for (i = 0; i < 8; i++)
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
- MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
- MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
-
- if (bank == MACSEC_EGR) {
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
- val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
- MSCC_MS_FC_CFG_FCBUF_ENA |
- MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
- MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
- MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
- MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
- }
-
- vsc8584_macsec_classification(phydev, bank);
- vsc8584_macsec_flow_default_action(phydev, bank, false);
- vsc8584_macsec_integrity_checks(phydev, bank);
-
- /* Enable the MACsec block */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
- MSCC_MS_ENA_CFG_CLK_ENA |
- MSCC_MS_ENA_CFG_MACSEC_ENA |
- MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
-}
-
-static void vsc8584_macsec_mac_init(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- u32 val;
- int i;
-
- /* Clear host & line stats */
- for (i = 0; i < 36; i++)
- vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
-
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
- val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
- val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
-
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
- val |= 0xffff;
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
-
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
- if (bank == HOST_MAC)
- val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
- else
- val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
- MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
- (bank == HOST_MAC ?
- MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
- val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
- val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
- val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
- MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
- MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
- MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
- MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
- val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
- MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
- MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
- MSCC_MAC_CFG_ENA_CFG_RX_ENA |
- MSCC_MAC_CFG_ENA_CFG_TX_ENA);
-}
-
-/* Must be called with mdio_lock taken */
-static int vsc8584_macsec_init(struct phy_device *phydev)
-{
- u32 val;
-
- vsc8584_macsec_block_init(phydev, MACSEC_INGR);
- vsc8584_macsec_block_init(phydev, MACSEC_EGR);
- vsc8584_macsec_mac_init(phydev, HOST_MAC);
- vsc8584_macsec_mac_init(phydev, LINE_MAC);
-
- vsc8584_macsec_phy_write(phydev, FC_BUFFER,
- MSCC_FCBUF_FC_READ_THRESH_CFG,
- MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
- MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
-
- val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
- val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
- MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
- MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
- vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
-
- vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
- MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
- MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
-
- val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
- MSCC_FCBUF_TX_DATA_QUEUE_CFG);
- val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
- MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
- val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
- MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
- vsc8584_macsec_phy_write(phydev, FC_BUFFER,
- MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
-
- val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
- val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
- vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
-
- val = vsc8584_macsec_phy_read(phydev, IP_1588,
- MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
- val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
- val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
- vsc8584_macsec_phy_write(phydev, IP_1588,
- MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
-
- return 0;
-}
-
-static void vsc8584_macsec_flow(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- struct vsc8531_private *priv = phydev->priv;
- enum macsec_bank bank = flow->bank;
- u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
-
- if (flow->match.tagged)
- match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
- if (flow->match.untagged)
- match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
-
- if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
- match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
- mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
- }
-
- if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
- match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
- mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
- MSCC_MS_SAM_MASK_SCI_MASK;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
- lower_32_bits(flow->rx_sa->sc->sci));
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
- upper_32_bits(flow->rx_sa->sc->sci));
- }
-
- if (flow->match.etype) {
- mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
- MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
- }
-
- match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
-
- /* Action for matching packets */
- if (flow->action.drop)
- action = MSCC_MS_FLOW_DROP;
- else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
- action = MSCC_MS_FLOW_BYPASS;
- else
- action = (bank == MACSEC_INGR) ?
- MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
-
- val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
- MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
-
- if (action == MSCC_MS_FLOW_BYPASS)
- goto write_ctrl;
-
- if (bank == MACSEC_INGR) {
- if (priv->secy->replay_protect)
- val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
- if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
- val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
- else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
- val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
- } else if (bank == MACSEC_EGR) {
- if (priv->secy->protect_frames)
- val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
- if (priv->secy->tx_sc.encrypt)
- val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
- if (priv->secy->tx_sc.send_sci)
- val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
- }
-
-write_ctrl:
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
-}
-
-static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
- enum macsec_bank bank)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *pos, *tmp;
-
- list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
- if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
- return pos;
-
- return ERR_PTR(-ENOENT);
-}
-
-static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- enum macsec_bank bank = flow->bank;
- u32 val, idx = flow->index;
-
- if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
- (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
- return;
-
- /* Enable */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
-
- /* Set in-use */
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
- val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
-}
-
-static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- enum macsec_bank bank = flow->bank;
- u32 val, idx = flow->index;
-
- /* Disable */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
-
- /* Clear in-use */
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
- val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
-}
-
-static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
-{
- if (flow->bank == MACSEC_INGR)
- return flow->index + MSCC_MS_MAX_FLOWS;
-
- return flow->index;
-}
-
-/* Derive the AES key to get a key for the hash autentication */
-static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
- u16 key_len, u8 hkey[16])
-{
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- struct skcipher_request *req = NULL;
- struct scatterlist src, dst;
- DECLARE_CRYPTO_WAIT(wait);
- u32 input[4] = {0};
- int ret;
-
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto out;
- }
-
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
- &wait);
- ret = crypto_skcipher_setkey(tfm, key, key_len);
- if (ret < 0)
- goto out;
-
- sg_init_one(&src, input, 16);
- sg_init_one(&dst, hkey, 16);
- skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
- ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
-
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return ret;
-}
-
-static int vsc8584_macsec_transformation(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- struct vsc8531_private *priv = phydev->priv;
- enum macsec_bank bank = flow->bank;
- int i, ret, index = flow->index;
- u32 rec = 0, control = 0;
- u8 hkey[16];
- sci_t sci;
-
- ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
- if (ret)
- return ret;
-
- switch (priv->secy->key_len) {
- case 16:
- control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
- break;
- case 32:
- control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
- break;
- default:
- return -EINVAL;
- }
-
- control |= (bank == MACSEC_EGR) ?
- (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
- (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
-
- control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
- CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
- CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
- CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
-
- /* Set the control word */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- control);
-
- /* Set the context ID. Must be unique. */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- vsc8584_macsec_flow_context_id(flow));
-
- /* Set the encryption/decryption key */
- for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MS_XFORM_REC(index, rec++),
- ((u32 *)flow->key)[i]);
-
- /* Set the authentication key */
- for (i = 0; i < 4; i++)
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MS_XFORM_REC(index, rec++),
- ((u32 *)hkey)[i]);
-
- /* Initial sequence number */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- bank == MACSEC_INGR ?
- flow->rx_sa->next_pn : flow->tx_sa->next_pn);
-
- if (bank == MACSEC_INGR)
- /* Set the mask (replay window size) */
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MS_XFORM_REC(index, rec++),
- priv->secy->replay_window);
-
- /* Set the input vectors */
- sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- lower_32_bits(sci));
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- upper_32_bits(sci));
-
- while (rec < 20)
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- 0);
-
- flow->has_transformation = true;
- return 0;
-}
-
-static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
- enum macsec_bank bank)
-{
- unsigned long *bitmap = bank == MACSEC_INGR ?
- &priv->ingr_flows : &priv->egr_flows;
- struct macsec_flow *flow;
- int index;
-
- index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
-
- if (index == MSCC_MS_MAX_FLOWS)
- return ERR_PTR(-ENOMEM);
-
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
- if (!flow)
- return ERR_PTR(-ENOMEM);
-
- set_bit(index, bitmap);
- flow->index = index;
- flow->bank = bank;
- flow->priority = 8;
- flow->assoc_num = -1;
-
- list_add_tail(&flow->list, &priv->macsec_flows);
- return flow;
-}
-
-static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
- struct macsec_flow *flow)
-{
- unsigned long *bitmap = flow->bank == MACSEC_INGR ?
- &priv->ingr_flows : &priv->egr_flows;
-
- list_del(&flow->list);
- clear_bit(flow->index, bitmap);
- kfree(flow);
-}
-
-static int vsc8584_macsec_add_flow(struct phy_device *phydev,
- struct macsec_flow *flow, bool update)
-{
- int ret;
-
- flow->port = MSCC_MS_PORT_CONTROLLED;
- vsc8584_macsec_flow(phydev, flow);
-
- if (update)
- return 0;
-
- ret = vsc8584_macsec_transformation(phydev, flow);
- if (ret) {
- vsc8584_macsec_free_flow(phydev->priv, flow);
- return ret;
- }
-
- return 0;
-}
-
-static int vsc8584_macsec_default_flows(struct phy_device *phydev)
-{
- struct macsec_flow *flow;
-
- /* Add a rule to let the MKA traffic go through, ingress */
- flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- flow->priority = 15;
- flow->port = MSCC_MS_PORT_UNCONTROLLED;
- flow->match.tagged = 1;
- flow->match.untagged = 1;
- flow->match.etype = 1;
- flow->etype = ETH_P_PAE;
- flow->action.bypass = 1;
-
- vsc8584_macsec_flow(phydev, flow);
- vsc8584_macsec_flow_enable(phydev, flow);
-
- /* Add a rule to let the MKA traffic go through, egress */
- flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- flow->priority = 15;
- flow->port = MSCC_MS_PORT_COMMON;
- flow->match.untagged = 1;
- flow->match.etype = 1;
- flow->etype = ETH_P_PAE;
- flow->action.bypass = 1;
-
- vsc8584_macsec_flow(phydev, flow);
- vsc8584_macsec_flow_enable(phydev, flow);
-
- return 0;
-}
-
-static void vsc8584_macsec_del_flow(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- vsc8584_macsec_flow_disable(phydev, flow);
- vsc8584_macsec_free_flow(phydev->priv, flow);
-}
-
-static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
- struct macsec_flow *flow, bool update)
-{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
- flow->assoc_num = ctx->sa.assoc_num;
- flow->rx_sa = ctx->sa.rx_sa;
-
- /* Always match tagged packets on ingress */
- flow->match.tagged = 1;
- flow->match.sci = 1;
-
- if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
- flow->match.untagged = 1;
-
- return vsc8584_macsec_add_flow(phydev, flow, update);
-}
-
-static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
- struct macsec_flow *flow, bool update)
-{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
- flow->assoc_num = ctx->sa.assoc_num;
- flow->tx_sa = ctx->sa.tx_sa;
-
- /* Always match untagged packets on egress */
- flow->match.untagged = 1;
-
- return vsc8584_macsec_add_flow(phydev, flow, update);
-}
-
-static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
-
- return 0;
-}
-
-static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
-
- return 0;
-}
-
-static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_secy *secy = ctx->secy;
-
- if (ctx->prepare) {
- if (priv->secy)
- return -EEXIST;
-
- return 0;
- }
-
- priv->secy = secy;
-
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
- secy->validate_frames != MACSEC_VALIDATE_DISABLED);
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
- secy->validate_frames != MACSEC_VALIDATE_DISABLED);
-
- return vsc8584_macsec_default_flows(ctx->phydev);
-}
-
-static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
- vsc8584_macsec_del_flow(ctx->phydev, flow);
-
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
-
- priv->secy = NULL;
- return 0;
-}
-
-static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
-{
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- vsc8584_macsec_del_secy(ctx);
- return vsc8584_macsec_add_secy(ctx);
-}
-
-static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
-{
- /* Nothing to do */
- return 0;
-}
-
-static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
-{
- return -EOPNOTSUPP;
-}
-
-static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
- if (flow->bank == MACSEC_INGR && flow->rx_sa &&
- flow->rx_sa->sc->sci == ctx->rx_sc->sci)
- vsc8584_macsec_del_flow(ctx->phydev, flow);
- }
-
- return 0;
-}
-
-static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_rxsa(ctx, flow, false);
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
-
- return __vsc8584_macsec_add_rxsa(ctx, flow, true);
- }
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
-
- if (IS_ERR(flow))
- return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
-
- vsc8584_macsec_del_flow(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_txsa(ctx, flow, false);
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
-
- return __vsc8584_macsec_add_txsa(ctx, flow, true);
- }
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
-
- if (IS_ERR(flow))
- return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
-
- vsc8584_macsec_del_flow(ctx->phydev, flow);
- return 0;
-}
-
-static struct macsec_ops vsc8584_macsec_ops = {
- .mdo_dev_open = vsc8584_macsec_dev_open,
- .mdo_dev_stop = vsc8584_macsec_dev_stop,
- .mdo_add_secy = vsc8584_macsec_add_secy,
- .mdo_upd_secy = vsc8584_macsec_upd_secy,
- .mdo_del_secy = vsc8584_macsec_del_secy,
- .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
- .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
- .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
- .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
- .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
- .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
- .mdo_add_txsa = vsc8584_macsec_add_txsa,
- .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
- .mdo_del_txsa = vsc8584_macsec_del_txsa,
-};
-#endif /* CONFIG_MACSEC */
-
/* Check if one PHY has already done the init of the parts common to all PHYs
* in the Quad PHY package.
*/
@@ -2702,6 +1347,8 @@ static int vsc8584_config_init(struct phy_device *phydev)
else
vsc8531->base_addr = phydev->mdio.addr - addr;
+ vsc8531->addr = addr;
+
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
@@ -2751,27 +1398,35 @@ static int vsc8584_config_init(struct phy_device *phydev)
val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
val &= ~MAC_CFG_MASK;
- if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) {
val |= MAC_CFG_QSGMII;
- else
+ } else if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val |= MAC_CFG_SGMII;
+ } else if (phy_interface_is_rgmii(phydev)) {
+ val |= MAC_CFG_RGMII;
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
if (ret)
goto err;
- val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
- PROC_CMD_READ_MOD_WRITE_PORT;
- if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
- val |= PROC_CMD_QSGMII_MAC;
- else
- val |= PROC_CMD_SGMII_MAC;
+ if (!phy_interface_is_rgmii(phydev)) {
+ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ val |= PROC_CMD_QSGMII_MAC;
+ else
+ val |= PROC_CMD_SGMII_MAC;
- ret = vsc8584_cmd(phydev, val);
- if (ret)
- goto err;
+ ret = vsc8584_cmd(phydev, val);
+ if (ret)
+ goto err;
- usleep_range(10000, 20000);
+ usleep_range(10000, 20000);
+ }
/* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
@@ -2791,31 +1446,27 @@ static int vsc8584_config_init(struct phy_device *phydev)
mutex_unlock(&phydev->mdio.bus->mdio_lock);
-#if IS_ENABLED(CONFIG_MACSEC)
- /* MACsec */
- switch (phydev->phy_id & phydev->drv->phy_id_mask) {
- case PHY_ID_VSC856X:
- case PHY_ID_VSC8575:
- case PHY_ID_VSC8582:
- case PHY_ID_VSC8584:
- INIT_LIST_HEAD(&vsc8531->macsec_flows);
- vsc8531->secy = NULL;
-
- phydev->macsec_ops = &vsc8584_macsec_ops;
-
- ret = vsc8584_macsec_init(phydev);
- if (ret)
- goto err;
- }
-#endif
+ ret = vsc8584_macsec_init(phydev);
+ if (ret)
+ return ret;
phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK);
- val |= MEDIA_OP_MODE_COPPER | (VSC8584_MAC_IF_SELECTION_SGMII <<
- VSC8584_MAC_IF_SELECTION_POS);
+ val |= (MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS) |
+ (VSC8584_MAC_IF_SELECTION_SGMII << VSC8584_MAC_IF_SELECTION_POS);
ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val);
+ if (ret)
+ return ret;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL,
+ VSC8572_RGMII_RX_DELAY_MASK,
+ VSC8572_RGMII_TX_DELAY_MASK);
+ if (ret)
+ return ret;
+ }
ret = genphy_soft_reset(phydev);
if (ret)
@@ -2834,41 +1485,21 @@ err:
return ret;
}
-static int vsc8584_handle_interrupt(struct phy_device *phydev)
+static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev)
{
-#if IS_ENABLED(CONFIG_MACSEC)
- struct vsc8531_private *priv = phydev->priv;
- struct macsec_flow *flow, *tmp;
- u32 cause, rec;
+ int irq_status;
- /* Check MACsec PN rollover */
- cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
- MSCC_MS_INTR_CTRL_STATUS);
- cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
- if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
- goto skip_rollover;
+ irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (irq_status < 0 || !(irq_status & MII_VSC85XX_INT_MASK_MASK))
+ return IRQ_NONE;
- rec = 6 + priv->secy->key_len / sizeof(u32);
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
- u32 val;
+ if (irq_status & MII_VSC85XX_INT_MASK_EXT)
+ vsc8584_handle_macsec_interrupt(phydev);
- if (flow->bank != MACSEC_EGR || !flow->has_transformation)
- continue;
+ if (irq_status & MII_VSC85XX_INT_MASK_LINK_CHG)
+ phy_mac_interrupt(phydev);
- val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
- MSCC_MS_XFORM_REC(flow->index, rec));
- if (val == 0xffffffff) {
- vsc8584_macsec_flow_disable(phydev, flow);
- macsec_pn_wrapped(priv->secy, flow->tx_sa);
- break;
- }
- }
-
-skip_rollover:
-#endif
-
- phy_mac_interrupt(phydev);
- return 0;
+ return IRQ_HANDLED;
}
static int vsc85xx_config_init(struct phy_device *phydev)
@@ -3142,6 +1773,8 @@ static int vsc8514_config_init(struct phy_device *phydev)
else
vsc8531->base_addr = phydev->mdio.addr - addr;
+ vsc8531->addr = addr;
+
/* Some parts of the init sequence are identical for every PHY in the
* package. Some parts are modifying the GPIO register bank which is a
* set of registers that are affecting all PHYs, a few resetting the
@@ -3276,7 +1909,7 @@ static int vsc8514_config_init(struct phy_device *phydev)
return ret;
ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK,
- MEDIA_OP_MODE_COPPER);
+ MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS);
if (ret)
return ret;
@@ -3314,20 +1947,8 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
-#if IS_ENABLED(CONFIG_MACSEC)
- phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_EXTENDED_2);
- phy_write(phydev, MSCC_PHY_EXTENDED_INT,
- MSCC_PHY_EXTENDED_INT_MS_EGR);
- phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_STANDARD);
-
- vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
- MSCC_MS_AIC_CTRL, 0xf);
- vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
- MSCC_MS_INTR_CTRL_STATUS,
- MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
-#endif
+ vsc8584_config_macsec_intr(phydev);
+
rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
MII_VSC85XX_INT_MASK_MASK);
} else {
@@ -3475,6 +2096,30 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8502,
+ .name = "Microsemi GE VSC8502 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_BASIC_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8504,
.name = "Microsemi GE VSC8504 SyncE",
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index b705d0bd798b..47caae770ffc 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -72,20 +72,10 @@ static struct tja11xx_phy_stats tja11xx_hw_stats[] = {
static int tja11xx_check(struct phy_device *phydev, u8 reg, u16 mask, u16 set)
{
- int i, ret;
-
- for (i = 0; i < 200; i++) {
- ret = phy_read(phydev, reg);
- if (ret < 0)
- return ret;
-
- if ((ret & mask) == set)
- return 0;
-
- usleep_range(100, 150);
- }
+ int val;
- return -ETIMEDOUT;
+ return phy_read_poll_timeout(phydev, reg, val, (val & mask) == set,
+ 150, 30000, false);
}
static int phy_modify_check(struct phy_device *phydev, u8 reg,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index dd2e23fb67c0..67ba47ae5284 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -239,9 +239,10 @@ int genphy_c45_read_link(struct phy_device *phydev)
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
- * in polling mode to detect such short link drops.
+ * in polling mode to detect such short link drops except
+ * the link was already down.
*/
- if (!phy_polling_mode(phydev)) {
+ if (!phy_polling_mode(phydev) || !phydev->link) {
val = phy_read_mmd(phydev, devad, MDIO_STAT1);
if (val < 0)
return val;
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index a4d2d59fceca..66b8c61ca74c 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,7 +8,7 @@
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 74,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 75,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -329,6 +329,44 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+/**
+ * phy_check_downshift - check whether downshift occurred
+ * @phydev: The phy_device struct
+ *
+ * Check whether a downshift to a lower speed occurred. If this should be the
+ * case warn the user.
+ * Prerequisite for detecting downshift is that PHY driver implements the
+ * read_status callback and sets phydev->speed to the actual link speed.
+ */
+void phy_check_downshift(struct phy_device *phydev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ int i, speed = SPEED_UNKNOWN;
+
+ phydev->downshifted_rate = 0;
+
+ if (phydev->autoneg == AUTONEG_DISABLE ||
+ phydev->speed == SPEED_UNKNOWN)
+ return;
+
+ linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+ for (i = 0; i < ARRAY_SIZE(settings); i++)
+ if (test_bit(settings[i].bit, common)) {
+ speed = settings[i].speed;
+ break;
+ }
+
+ if (speed == SPEED_UNKNOWN || phydev->speed >= speed)
+ return;
+
+ phydev_warn(phydev, "Downshift occurred from negotiated speed %s to actual speed %s, check cabling!\n",
+ phy_speed_to_str(speed), phy_speed_to_str(phydev->speed));
+
+ phydev->downshifted_rate = 1;
+}
+EXPORT_SYMBOL_GPL(phy_check_downshift);
+
static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
@@ -489,37 +527,6 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
EXPORT_SYMBOL(phy_write_mmd);
/**
- * __phy_modify_changed() - Convenience function for modifying a PHY register
- * @phydev: a pointer to a &struct phy_device
- * @regnum: register number
- * @mask: bit mask of bits to clear
- * @set: bit mask of bits to set
- *
- * Unlocked helper function which allows a PHY register to be modified as
- * new register value = (old register value & ~mask) | set
- *
- * Returns negative errno, 0 if there was no change, and 1 in case of change
- */
-int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
- u16 set)
-{
- int new, ret;
-
- ret = __phy_read(phydev, regnum);
- if (ret < 0)
- return ret;
-
- new = (ret & ~mask) | set;
- if (new == ret)
- return 0;
-
- ret = __phy_write(phydev, regnum, new);
-
- return ret < 0 ? ret : 1;
-}
-EXPORT_SYMBOL_GPL(__phy_modify_changed);
-
-/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 355bfdef48d2..20ca6418f7bc 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -96,9 +96,10 @@ void phy_print_status(struct phy_device *phydev)
{
if (phydev->link) {
netdev_info(phydev->attached_dev,
- "Link is Up - %s/%s - flow control %s\n",
+ "Link is Up - %s/%s %s- flow control %s\n",
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
+ phydev->downshifted_rate ? "(downshifted) " : "",
phy_pause_str(phydev));
} else {
netdev_info(phydev->attached_dev, "Link is Down\n");
@@ -507,6 +508,7 @@ static int phy_check_link_status(struct phy_device *phydev)
return err;
if (phydev->link && phydev->state != PHY_RUNNING) {
+ phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
@@ -715,26 +717,24 @@ static int phy_disable_interrupts(struct phy_device *phydev)
static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
+ struct phy_driver *drv = phydev->drv;
- if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
+ if (drv->handle_interrupt)
+ return drv->handle_interrupt(phydev);
+
+ if (drv->did_interrupt && !drv->did_interrupt(phydev))
return IRQ_NONE;
- if (phydev->drv->handle_interrupt) {
- if (phydev->drv->handle_interrupt(phydev))
- goto phy_err;
- } else {
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
- }
+ /* reschedule state queue work to run as soon as possible */
+ phy_trigger_machine(phydev);
/* did_interrupt() may have cleared the interrupt already */
- if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
- goto phy_err;
- return IRQ_HANDLED;
+ if (!drv->did_interrupt && phy_clear_interrupt(phydev)) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
-phy_err:
- phy_error(phydev);
- return IRQ_NONE;
+ return IRQ_HANDLED;
}
/**
@@ -1132,9 +1132,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
/* Restart autonegotiation so the new modes get sent to the
* link partner.
*/
- ret = phy_restart_aneg(phydev);
- if (ret < 0)
- return ret;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = phy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 28e3c5c0e3c3..697c74deb222 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1059,18 +1059,12 @@ EXPORT_SYMBOL(phy_disconnect);
static int phy_poll_reset(struct phy_device *phydev)
{
/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
- unsigned int retries = 12;
- int ret;
-
- do {
- msleep(50);
- ret = phy_read(phydev, MII_BMCR);
- if (ret < 0)
- return ret;
- } while (ret & BMCR_RESET && --retries);
- if (ret & BMCR_RESET)
- return -ETIMEDOUT;
+ int ret, val;
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
/* Some chips (smsc911x) may still need up to another 1ms after the
* BMCR_RESET bit is cleared before they are usable.
*/
@@ -1239,7 +1233,7 @@ int phy_sfp_probe(struct phy_device *phydev,
const struct sfp_upstream_ops *ops)
{
struct sfp_bus *bus;
- int ret;
+ int ret = 0;
if (phydev->mdio.dev.fwnode) {
bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode);
@@ -1251,7 +1245,7 @@ int phy_sfp_probe(struct phy_device *phydev,
ret = sfp_bus_add_upstream(bus, phydev, ops);
sfp_bus_put(bus);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL(phy_sfp_probe);
@@ -1525,23 +1519,22 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
- struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
- struct net_device *netdev = phydev->attached_dev;
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
- int ret = 0;
+ struct net_device *netdev = phydev->attached_dev;
+ struct phy_driver *phydrv = phydev->drv;
+ int ret;
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts || (netdev && netdev->wol_enabled))
return -EBUSY;
- if (phydev->drv && phydrv->suspend)
- ret = phydrv->suspend(phydev);
-
- if (ret)
- return ret;
+ if (!phydrv || !phydrv->suspend)
+ return 0;
- phydev->suspended = true;
+ ret = phydrv->suspend(phydev);
+ if (!ret)
+ phydev->suspended = true;
return ret;
}
@@ -1549,18 +1542,17 @@ EXPORT_SYMBOL(phy_suspend);
int __phy_resume(struct phy_device *phydev)
{
- struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
- int ret = 0;
+ struct phy_driver *phydrv = phydev->drv;
+ int ret;
WARN_ON(!mutex_is_locked(&phydev->lock));
- if (phydev->drv && phydrv->resume)
- ret = phydrv->resume(phydev);
-
- if (ret)
- return ret;
+ if (!phydrv || !phydrv->resume)
+ return 0;
- phydev->suspended = false;
+ ret = phydrv->resume(phydev);
+ if (!ret)
+ phydev->suspended = false;
return ret;
}
@@ -1935,9 +1927,10 @@ int genphy_update_link(struct phy_device *phydev)
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
- * in polling mode to detect such short link drops.
+ * in polling mode to detect such short link drops except
+ * the link was already down.
*/
- if (!phy_polling_mode(phydev)) {
+ if (!phy_polling_mode(phydev) || !phydev->link) {
status = phy_read(phydev, MII_BMSR);
if (status < 0)
return status;
@@ -2366,22 +2359,7 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx)
__ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv);
linkmode_copy(oldadv, phydev->advertising);
-
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
-
- if (rx) {
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->advertising);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
- }
-
- if (tx)
- linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
+ linkmode_set_pause(phydev->advertising, tx, rx);
if (!linkmode_equal(oldadv, phydev->advertising) &&
phydev->autoneg)
@@ -2414,6 +2392,32 @@ bool phy_validate_pause(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_validate_pause);
+/**
+ * phy_get_pause - resolve negotiated pause modes
+ * @phydev: phy_device struct
+ * @tx_pause: pointer to bool to indicate whether transmit pause should be
+ * enabled.
+ * @rx_pause: pointer to bool to indicate whether receive pause should be
+ * enabled.
+ *
+ * Resolve and return the flow control modes according to the negotiation
+ * result. This includes checking that we are operating in full duplex mode.
+ * See linkmode_resolve_pause() for further details.
+ */
+void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
+{
+ if (phydev->duplex != DUPLEX_FULL) {
+ *tx_pause = false;
+ *rx_pause = false;
+ return;
+ }
+
+ return linkmode_resolve_pause(phydev->advertising,
+ phydev->lp_advertising,
+ tx_pause, rx_pause);
+}
+EXPORT_SYMBOL(phy_get_pause);
+
static bool phy_drv_supports_irq(struct phy_driver *phydrv)
{
return phydrv->config_intr && phydrv->ack_interrupt;
@@ -2571,6 +2575,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.probe = phy_probe;
new_driver->mdiodrv.driver.remove = phy_remove;
new_driver->mdiodrv.driver.owner = owner;
+ new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
retval = driver_register(&new_driver->mdiodrv.driver);
if (retval) {
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 6e66b8e77ec7..34ca12aec61b 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -40,7 +40,8 @@ enum {
struct phylink {
/* private: */
struct net_device *netdev;
- const struct phylink_mac_ops *ops;
+ const struct phylink_mac_ops *mac_ops;
+ const struct phylink_pcs_ops *pcs_ops;
struct phylink_config *config;
struct device *dev;
unsigned int old_link_state:1;
@@ -154,7 +155,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
- pl->ops->validate(pl->config, supported, state);
+ pl->mac_ops->validate(pl->config, supported, state);
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
@@ -181,9 +182,11 @@ static int phylink_parse_fixedlink(struct phylink *pl,
/* We treat the "pause" and "asym-pause" terminology as
* defining the link partner's ability. */
if (fwnode_property_read_bool(fixed_node, "pause"))
- pl->link_config.pause |= MLO_PAUSE_SYM;
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ pl->link_config.lp_advertising);
if (fwnode_property_read_bool(fixed_node, "asym-pause"))
- pl->link_config.pause |= MLO_PAUSE_ASYM;
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ pl->link_config.lp_advertising);
if (ret == 0) {
desc = fwnode_gpiod_get_index(fixed_node, "link", 0,
@@ -215,9 +218,11 @@ static int phylink_parse_fixedlink(struct phylink *pl,
DUPLEX_FULL : DUPLEX_HALF;
pl->link_config.speed = prop[2];
if (prop[3])
- pl->link_config.pause |= MLO_PAUSE_SYM;
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ pl->link_config.lp_advertising);
if (prop[4])
- pl->link_config.pause |= MLO_PAUSE_ASYM;
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ pl->link_config.lp_advertising);
}
}
@@ -308,11 +313,13 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
phylink_set(pl->supported, 1000baseX_Full);
+ phylink_set(pl->supported, 1000baseKX_Full);
phylink_set(pl->supported, 2500baseT_Full);
phylink_set(pl->supported, 2500baseX_Full);
phylink_set(pl->supported, 5000baseT_Full);
phylink_set(pl->supported, 10000baseT_Full);
phylink_set(pl->supported, 10000baseKR_Full);
+ phylink_set(pl->supported, 10000baseKX4_Full);
phylink_set(pl->supported, 10000baseCR_Full);
phylink_set(pl->supported, 10000baseSR_Full);
phylink_set(pl->supported, 10000baseLR_Full);
@@ -320,6 +327,33 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 10000baseER_Full);
break;
+ case PHY_INTERFACE_MODE_XLGMII:
+ phylink_set(pl->supported, 25000baseCR_Full);
+ phylink_set(pl->supported, 25000baseKR_Full);
+ phylink_set(pl->supported, 25000baseSR_Full);
+ phylink_set(pl->supported, 40000baseKR4_Full);
+ phylink_set(pl->supported, 40000baseCR4_Full);
+ phylink_set(pl->supported, 40000baseSR4_Full);
+ phylink_set(pl->supported, 40000baseLR4_Full);
+ phylink_set(pl->supported, 50000baseCR2_Full);
+ phylink_set(pl->supported, 50000baseKR2_Full);
+ phylink_set(pl->supported, 50000baseSR2_Full);
+ phylink_set(pl->supported, 50000baseKR_Full);
+ phylink_set(pl->supported, 50000baseSR_Full);
+ phylink_set(pl->supported, 50000baseCR_Full);
+ phylink_set(pl->supported, 50000baseLR_ER_FR_Full);
+ phylink_set(pl->supported, 50000baseDR_Full);
+ phylink_set(pl->supported, 100000baseKR4_Full);
+ phylink_set(pl->supported, 100000baseSR4_Full);
+ phylink_set(pl->supported, 100000baseCR4_Full);
+ phylink_set(pl->supported, 100000baseLR4_ER4_Full);
+ phylink_set(pl->supported, 100000baseKR2_Full);
+ phylink_set(pl->supported, 100000baseSR2_Full);
+ phylink_set(pl->supported, 100000baseCR2_Full);
+ phylink_set(pl->supported, 100000baseLR2_ER2_FR2_Full);
+ phylink_set(pl->supported, 100000baseDR2_Full);
+ break;
+
default:
phylink_err(pl,
"incorrect link mode %s for in-band status\n",
@@ -334,11 +368,42 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
"failed to validate link configuration for in-band status\n");
return -EINVAL;
}
+
+ /* Check if MAC/PCS also supports Autoneg. */
+ pl->link_config.an_enabled = phylink_test(pl->supported, Autoneg);
}
return 0;
}
+static void phylink_apply_manual_flow(struct phylink *pl,
+ struct phylink_link_state *state)
+{
+ /* If autoneg is disabled, pause AN is also disabled */
+ if (!state->an_enabled)
+ state->pause &= ~MLO_PAUSE_AN;
+
+ /* Manual configuration of pause modes */
+ if (!(pl->link_config.pause & MLO_PAUSE_AN))
+ state->pause = pl->link_config.pause;
+}
+
+static void phylink_resolve_flow(struct phylink_link_state *state)
+{
+ bool tx_pause, rx_pause;
+
+ state->pause = MLO_PAUSE_NONE;
+ if (state->duplex == DUPLEX_FULL) {
+ linkmode_resolve_pause(state->advertising,
+ state->lp_advertising,
+ &tx_pause, &rx_pause);
+ if (tx_pause)
+ state->pause |= MLO_PAUSE_TX;
+ if (rx_pause)
+ state->pause |= MLO_PAUSE_RX;
+ }
+}
+
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
@@ -351,7 +416,7 @@ static void phylink_mac_config(struct phylink *pl,
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
- pl->ops->mac_config(pl->config, pl->cur_link_an_mode, state);
+ pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, state);
}
static void phylink_mac_config_up(struct phylink *pl,
@@ -361,11 +426,32 @@ static void phylink_mac_config_up(struct phylink *pl,
phylink_mac_config(pl, state);
}
-static void phylink_mac_an_restart(struct phylink *pl)
+static void phylink_mac_pcs_an_restart(struct phylink *pl)
{
if (pl->link_config.an_enabled &&
- phy_interface_mode_is_8023z(pl->link_config.interface))
- pl->ops->mac_an_restart(pl->config);
+ phy_interface_mode_is_8023z(pl->link_config.interface)) {
+ if (pl->pcs_ops)
+ pl->pcs_ops->pcs_an_restart(pl->config);
+ else
+ pl->mac_ops->mac_an_restart(pl->config);
+ }
+}
+
+static void phylink_pcs_config(struct phylink *pl, bool force_restart,
+ const struct phylink_link_state *state)
+{
+ bool restart = force_restart;
+
+ if (pl->pcs_ops && pl->pcs_ops->pcs_config(pl->config,
+ pl->cur_link_an_mode,
+ state->interface,
+ state->advertising))
+ restart = true;
+
+ phylink_mac_config(pl, state);
+
+ if (restart)
+ phylink_mac_pcs_an_restart(pl);
}
static void phylink_mac_pcs_get_state(struct phylink *pl,
@@ -381,55 +467,54 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->an_complete = 0;
state->link = 1;
- pl->ops->mac_pcs_get_state(pl->config, state);
+ if (pl->pcs_ops)
+ pl->pcs_ops->pcs_get_state(pl->config, state);
+ else
+ pl->mac_ops->mac_pcs_get_state(pl->config, state);
}
/* The fixed state is... fixed except for the link state,
* which may be determined by a GPIO or a callback.
*/
-static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state)
+static void phylink_get_fixed_state(struct phylink *pl,
+ struct phylink_link_state *state)
{
*state = pl->link_config;
if (pl->get_fixed_state)
pl->get_fixed_state(pl->netdev, state);
else if (pl->link_gpio)
state->link = !!gpiod_get_value_cansleep(pl->link_gpio);
+
+ phylink_resolve_flow(state);
}
-/* Flow control is resolved according to our and the link partners
- * advertisements using the following drawn from the 802.3 specs:
- * Local device Link partner
- * Pause AsymDir Pause AsymDir Result
- * 1 X 1 X TX+RX
- * 0 1 1 1 TX
- * 1 1 0 1 RX
- */
-static void phylink_resolve_flow(struct phylink *pl,
- struct phylink_link_state *state)
+static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
- int new_pause = 0;
+ struct phylink_link_state link_state;
- if (pl->link_config.pause & MLO_PAUSE_AN) {
- int pause = 0;
+ switch (pl->cur_link_an_mode) {
+ case MLO_AN_PHY:
+ link_state = pl->phy_state;
+ break;
- if (phylink_test(pl->link_config.advertising, Pause))
- pause |= MLO_PAUSE_SYM;
- if (phylink_test(pl->link_config.advertising, Asym_Pause))
- pause |= MLO_PAUSE_ASYM;
+ case MLO_AN_FIXED:
+ phylink_get_fixed_state(pl, &link_state);
+ break;
- pause &= state->pause;
+ case MLO_AN_INBAND:
+ link_state = pl->link_config;
+ if (link_state.interface == PHY_INTERFACE_MODE_SGMII)
+ link_state.pause = MLO_PAUSE_NONE;
+ break;
- if (pause & MLO_PAUSE_SYM)
- new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
- else if (pause & MLO_PAUSE_ASYM)
- new_pause = state->pause & MLO_PAUSE_SYM ?
- MLO_PAUSE_TX : MLO_PAUSE_RX;
- } else {
- new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
+ default: /* can't happen */
+ return;
}
- state->pause &= ~MLO_PAUSE_TXRX_MASK;
- state->pause |= new_pause;
+ link_state.link = false;
+
+ phylink_apply_manual_flow(pl, &link_state);
+ phylink_pcs_config(pl, force_restart, &link_state);
}
static const char *phylink_pause_to_str(int pause)
@@ -446,14 +531,23 @@ static const char *phylink_pause_to_str(int pause)
}
}
-static void phylink_mac_link_up(struct phylink *pl,
- struct phylink_link_state link_state)
+static void phylink_link_up(struct phylink *pl,
+ struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
pl->cur_interface = link_state.interface;
- pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode,
- pl->cur_interface, pl->phydev);
+
+ if (pl->pcs_ops && pl->pcs_ops->pcs_link_up)
+ pl->pcs_ops->pcs_link_up(pl->config, pl->cur_link_an_mode,
+ pl->cur_interface,
+ link_state.speed, link_state.duplex);
+
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev,
+ pl->cur_link_an_mode, pl->cur_interface,
+ link_state.speed, link_state.duplex,
+ !!(link_state.pause & MLO_PAUSE_TX),
+ !!(link_state.pause & MLO_PAUSE_RX));
if (ndev)
netif_carrier_on(ndev);
@@ -465,14 +559,14 @@ static void phylink_mac_link_up(struct phylink *pl,
phylink_pause_to_str(link_state.pause));
}
-static void phylink_mac_link_down(struct phylink *pl)
+static void phylink_link_down(struct phylink *pl)
{
struct net_device *ndev = pl->netdev;
if (ndev)
netif_carrier_off(ndev);
- pl->ops->mac_link_down(pl->config, pl->cur_link_an_mode,
- pl->cur_interface);
+ pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode,
+ pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
@@ -493,7 +587,7 @@ static void phylink_resolve(struct work_struct *w)
switch (pl->cur_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
- phylink_resolve_flow(pl, &link_state);
+ phylink_apply_manual_flow(pl, &link_state);
phylink_mac_config_up(pl, &link_state);
break;
@@ -515,10 +609,12 @@ static void phylink_resolve(struct work_struct *w)
link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with
- * the pause mode bits. */
- link_state.pause |= pl->phy_state.pause;
- phylink_resolve_flow(pl, &link_state);
+ * the PHY flow control bits. */
+ link_state.pause = pl->phy_state.pause;
+ phylink_apply_manual_flow(pl, &link_state);
phylink_mac_config(pl, &link_state);
+ } else {
+ phylink_apply_manual_flow(pl, &link_state);
}
break;
}
@@ -532,9 +628,9 @@ static void phylink_resolve(struct work_struct *w)
if (link_changed) {
pl->old_link_state = link_state.link;
if (!link_state.link)
- phylink_mac_link_down(pl);
+ phylink_link_down(pl);
else
- phylink_mac_link_up(pl, link_state);
+ phylink_link_up(pl, link_state);
}
if (!link_state.link && pl->mac_link_dropped) {
pl->mac_link_dropped = false;
@@ -601,7 +697,7 @@ static int phylink_register_sfp(struct phylink *pl,
* @fwnode: a pointer to a &struct fwnode_handle describing the network
* interface
* @iface: the desired link mode defined by &typedef phy_interface_t
- * @ops: a pointer to a &struct phylink_mac_ops for the MAC.
+ * @mac_ops: a pointer to a &struct phylink_mac_ops for the MAC.
*
* Create a new phylink instance, and parse the link parameters found in @np.
* This will parse in-band modes, fixed-link or SFP configuration.
@@ -614,7 +710,7 @@ static int phylink_register_sfp(struct phylink *pl,
struct phylink *phylink_create(struct phylink_config *config,
struct fwnode_handle *fwnode,
phy_interface_t iface,
- const struct phylink_mac_ops *ops)
+ const struct phylink_mac_ops *mac_ops)
{
struct phylink *pl;
int ret;
@@ -647,7 +743,7 @@ struct phylink *phylink_create(struct phylink_config *config,
pl->link_config.speed = SPEED_UNKNOWN;
pl->link_config.duplex = DUPLEX_UNKNOWN;
pl->link_config.an_enabled = true;
- pl->ops = ops;
+ pl->mac_ops = mac_ops;
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
timer_setup(&pl->link_poll, phylink_fixed_poll, 0);
@@ -681,6 +777,12 @@ struct phylink *phylink_create(struct phylink_config *config,
}
EXPORT_SYMBOL_GPL(phylink_create);
+void phylink_add_pcs(struct phylink *pl, const struct phylink_pcs_ops *ops)
+{
+ pl->pcs_ops = ops;
+}
+EXPORT_SYMBOL_GPL(phylink_add_pcs);
+
/**
* phylink_destroy() - cleanup and destroy the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -705,15 +807,18 @@ static void phylink_phy_change(struct phy_device *phydev, bool up,
bool do_carrier)
{
struct phylink *pl = phydev->phylink;
+ bool tx_pause, rx_pause;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
mutex_lock(&pl->state_mutex);
pl->phy_state.speed = phydev->speed;
pl->phy_state.duplex = phydev->duplex;
pl->phy_state.pause = MLO_PAUSE_NONE;
- if (phydev->pause)
- pl->phy_state.pause |= MLO_PAUSE_SYM;
- if (phydev->asym_pause)
- pl->phy_state.pause |= MLO_PAUSE_ASYM;
+ if (tx_pause)
+ pl->phy_state.pause |= MLO_PAUSE_TX;
+ if (rx_pause)
+ pl->phy_state.pause |= MLO_PAUSE_RX;
pl->phy_state.interface = phydev->interface;
pl->phy_state.link = up;
mutex_unlock(&pl->state_mutex);
@@ -783,6 +888,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
pl->phy_state.interface = interface;
+ pl->phy_state.pause = MLO_PAUSE_NONE;
+ pl->phy_state.speed = SPEED_UNKNOWN;
+ pl->phy_state.duplex = DUPLEX_UNKNOWN;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -1011,15 +1119,12 @@ void phylink_start(struct phylink *pl)
/* Apply the link configuration to the MAC when starting. This allows
* a fixed-link to start with the correct parameters, and also
* ensures that we set the appropriate advertisement for Serdes links.
- */
- phylink_resolve_flow(pl, &pl->link_config);
- phylink_mac_config(pl, &pl->link_config);
-
- /* Restart autonegotiation if using 802.3z to ensure that the link
+ *
+ * Restart autonegotiation if using 802.3z to ensure that the link
* parameters are properly negotiated. This is necessary for DSA
* switches using 802.3z negotiation to ensure they see our modes.
*/
- phylink_mac_an_restart(pl);
+ phylink_mac_initial_config(pl, true);
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
@@ -1316,8 +1421,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* advertisement; the only thing we have is the pause
* modes which can only come from a PHY.
*/
- phylink_mac_config(pl, &pl->link_config);
- phylink_mac_an_restart(pl);
+ phylink_pcs_config(pl, true, &pl->link_config);
}
mutex_unlock(&pl->state_mutex);
}
@@ -1345,7 +1449,7 @@ int phylink_ethtool_nway_reset(struct phylink *pl)
if (pl->phydev)
ret = phy_restart_aneg(pl->phydev);
- phylink_mac_an_restart(pl);
+ phylink_mac_pcs_an_restart(pl);
return ret;
}
@@ -1379,6 +1483,9 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
ASSERT_RTNL();
+ if (pl->cur_link_an_mode == MLO_AN_FIXED)
+ return -EOPNOTSUPP;
+
if (!phylink_test(pl->supported, Pause) &&
!phylink_test(pl->supported, Asym_Pause))
return -EOPNOTSUPP;
@@ -1387,8 +1494,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
!pause->autoneg && pause->rx_pause != pause->tx_pause)
return -EINVAL;
- config->pause &= ~(MLO_PAUSE_AN | MLO_PAUSE_TXRX_MASK);
-
+ mutex_lock(&pl->state_mutex);
+ config->pause = 0;
if (pause->autoneg)
config->pause |= MLO_PAUSE_AN;
if (pause->rx_pause)
@@ -1396,6 +1503,22 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
if (pause->tx_pause)
config->pause |= MLO_PAUSE_TX;
+ /*
+ * See the comments for linkmode_set_pause(), wrt the deficiencies
+ * with the current implementation. A solution to this issue would
+ * be:
+ * ethtool Local device
+ * rx tx Pause AsymDir
+ * 0 0 0 0
+ * 1 0 1 1
+ * 0 1 0 1
+ * 1 1 1 1
+ * and then use the ethtool rx/tx enablement status to mask the
+ * rx/tx pause resolution.
+ */
+ linkmode_set_pause(config->advertising, pause->tx_pause,
+ pause->rx_pause);
+
/* If we have a PHY, phylib will call our link state function if the
* mode has changed, which will trigger a resolve and update the MAC
* configuration.
@@ -1405,19 +1528,9 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
pause->tx_pause);
} else if (!test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state)) {
- switch (pl->cur_link_an_mode) {
- case MLO_AN_FIXED:
- /* Should we allow fixed links to change against the config? */
- phylink_resolve_flow(pl, config);
- phylink_mac_config(pl, config);
- break;
-
- case MLO_AN_INBAND:
- phylink_mac_config(pl, config);
- phylink_mac_an_restart(pl);
- break;
- }
+ phylink_pcs_config(pl, true, &pl->link_config);
}
+ mutex_unlock(&pl->state_mutex);
return 0;
}
@@ -1509,13 +1622,14 @@ static int phylink_mii_emul_read(unsigned int reg,
struct phylink_link_state *state)
{
struct fixed_phy_status fs;
+ unsigned long *lpa = state->lp_advertising;
int val;
fs.link = state->link;
fs.speed = state->speed;
fs.duplex = state->duplex;
- fs.pause = state->pause & MLO_PAUSE_SYM;
- fs.asym_pause = state->pause & MLO_PAUSE_ASYM;
+ fs.pause = test_bit(ETHTOOL_LINK_MODE_Pause_BIT, lpa);
+ fs.asym_pause = test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, lpa);
val = swphy_read_reg(reg, &fs);
if (reg == MII_BMSR) {
@@ -1820,7 +1934,7 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state))
- phylink_mac_config(pl, &pl->link_config);
+ phylink_mac_initial_config(pl, false);
return ret;
}
@@ -1987,4 +2101,242 @@ void phylink_helper_basex_speed(struct phylink_link_state *state)
}
EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
+static void phylink_decode_c37_word(struct phylink_link_state *state,
+ uint16_t config_reg, int speed)
+{
+ bool tx_pause, rx_pause;
+ int fd_bit;
+
+ if (speed == SPEED_2500)
+ fd_bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT;
+ else
+ fd_bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT;
+
+ mii_lpa_mod_linkmode_x(state->lp_advertising, config_reg, fd_bit);
+
+ if (linkmode_test_bit(fd_bit, state->advertising) &&
+ linkmode_test_bit(fd_bit, state->lp_advertising)) {
+ state->speed = speed;
+ state->duplex = DUPLEX_FULL;
+ } else {
+ /* negotiation failure */
+ state->link = false;
+ }
+
+ linkmode_resolve_pause(state->advertising, state->lp_advertising,
+ &tx_pause, &rx_pause);
+
+ if (tx_pause)
+ state->pause |= MLO_PAUSE_TX;
+ if (rx_pause)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static void phylink_decode_sgmii_word(struct phylink_link_state *state,
+ uint16_t config_reg)
+{
+ if (!(config_reg & LPA_SGMII_LINK)) {
+ state->link = false;
+ return;
+ }
+
+ switch (config_reg & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_10:
+ state->speed = SPEED_10;
+ break;
+ case LPA_SGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case LPA_SGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->link = false;
+ return;
+ }
+ if (config_reg & LPA_SGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+}
+
+/**
+ * phylink_mii_c22_pcs_get_state() - read the MAC PCS state
+ * @pcs: a pointer to a &struct mdio_device.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation and/or SGMII control.
+ *
+ * Read the MAC PCS state from the MII device configured in @config and
+ * parse the Clause 37 or Cisco SGMII link partner negotiation word into
+ * the phylink @state structure. This is suitable to be directly plugged
+ * into the mac_pcs_get_state() member of the struct phylink_mac_ops
+ * structure.
+ */
+void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int bmsr, lpa;
+
+ bmsr = mdiobus_read(bus, addr, MII_BMSR);
+ lpa = mdiobus_read(bus, addr, MII_LPA);
+ if (bmsr < 0 || lpa < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(bmsr & BMSR_LSTATUS);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+ if (!state->link)
+ return;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_decode_c37_word(state, lpa, SPEED_1000);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_decode_c37_word(state, lpa, SPEED_2500);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ phylink_decode_sgmii_word(state, lpa);
+ break;
+
+ default:
+ state->link = false;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
+
+/**
+ * phylink_mii_c22_pcs_set_advertisement() - configure the clause 37 PCS
+ * advertisement
+ * @pcs: a pointer to a &struct mdio_device.
+ * @interface: the PHY interface mode being configured
+ * @advertising: the ethtool advertisement mask
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation and/or SGMII control.
+ *
+ * Configure the clause 37 PCS advertisement as specified by @state. This
+ * does not trigger a renegotiation; phylink will do that via the
+ * mac_an_restart() method of the struct phylink_mac_ops structure.
+ *
+ * Returns negative error code on failure to configure the advertisement,
+ * zero if no change has been made, or one if the advertisement has changed.
+ */
+int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
+ phy_interface_t interface,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int val, ret;
+ u16 adv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ adv = ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ val = mdiobus_read(bus, addr, MII_ADVERTISE);
+ if (val < 0)
+ return val;
+
+ if (val == adv)
+ return 0;
+
+ ret = mdiobus_write(bus, addr, MII_ADVERTISE, adv);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ val = mdiobus_read(bus, addr, MII_ADVERTISE);
+ if (val < 0)
+ return val;
+
+ if (val == 0x0001)
+ return 0;
+
+ ret = mdiobus_write(bus, addr, MII_ADVERTISE, 0x0001);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+
+ default:
+ /* Nothing to do for other modes */
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement);
+
+/**
+ * phylink_mii_c22_pcs_an_restart() - restart 802.3z autonegotiation
+ * @pcs: a pointer to a &struct mdio_device.
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation.
+ *
+ * Restart the clause 37 negotiation with the link partner. This is
+ * suitable to be directly plugged into the mac_pcs_get_state() member
+ * of the struct phylink_mac_ops structure.
+ */
+void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs)
+{
+ struct mii_bus *bus = pcs->bus;
+ int val, addr = pcs->addr;
+
+ val = mdiobus_read(bus, addr, MII_BMCR);
+ if (val >= 0) {
+ val |= BMCR_ANRESTART;
+
+ mdiobus_write(bus, addr, MII_BMCR, val);
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_an_restart);
+
+#define C45_ADDR(d,a) (MII_ADDR_C45 | (d) << 16 | (a))
+void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int stat;
+
+ stat = mdiobus_read(bus, addr, C45_ADDR(MDIO_MMD_PCS, MDIO_STAT1));
+ if (stat < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(stat & MDIO_STAT1_LSTATUS);
+ if (!state->link)
+ return;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ state->speed = SPEED_10000;
+ state->duplex = DUPLEX_FULL;
+ break;
+
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f5fa2fff3ddc..2d99e9de6ee1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -49,6 +49,8 @@
#define RTL_LPADV_5000FULL BIT(6)
#define RTL_LPADV_2500FULL BIT(5)
+#define RTLGEN_SPEED_MASK 0x0630
+
#define RTL_GENERIC_PHYID 0x001cc800
MODULE_DESCRIPTION("Realtek PHY driver");
@@ -309,6 +311,55 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
return ret;
}
+/* get actual speed to cover the downshift case */
+static int rtlgen_get_speed(struct phy_device *phydev)
+{
+ int val;
+
+ if (!phydev->link)
+ return 0;
+
+ val = phy_read_paged(phydev, 0xa43, 0x12);
+ if (val < 0)
+ return val;
+
+ switch (val & RTLGEN_SPEED_MASK) {
+ case 0x0000:
+ phydev->speed = SPEED_10;
+ break;
+ case 0x0010:
+ phydev->speed = SPEED_100;
+ break;
+ case 0x0020:
+ phydev->speed = SPEED_1000;
+ break;
+ case 0x0200:
+ phydev->speed = SPEED_10000;
+ break;
+ case 0x0210:
+ phydev->speed = SPEED_2500;
+ break;
+ case 0x0220:
+ phydev->speed = SPEED_5000;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int rtlgen_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ return rtlgen_get_speed(phydev);
+}
+
static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
@@ -429,6 +480,8 @@ static int rtl8125_config_aneg(struct phy_device *phydev)
static int rtl8125_read_status(struct phy_device *phydev)
{
+ int ret;
+
if (phydev->autoneg == AUTONEG_ENABLE) {
int lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
@@ -443,7 +496,11 @@ static int rtl8125_read_status(struct phy_device *phydev)
phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
}
- return genphy_read_status(phydev);
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ return rtlgen_get_speed(phydev);
}
static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
@@ -550,6 +607,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.name = "Generic FE-GE Realtek PHY",
.match_phy_device = rtlgen_match_phy_device,
+ .read_status = rtlgen_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b73298250793..93da7d3d0954 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -112,8 +112,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
int err = genphy_read_status(phydev);
if (!phydev->link && priv->energy_enable) {
- int i;
-
/* Disable EDPD to wake up PHY */
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
@@ -125,15 +123,11 @@ static int lan87xx_read_status(struct phy_device *phydev)
return rc;
/* Wait max 640 ms to detect energy */
- for (i = 0; i < 64; i++) {
- /* Sleep to allow link test pulses to be sent */
- msleep(10);
- rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
- if (rc < 0)
- return rc;
- if (rc & MII_LAN83C185_ENERGYON)
- break;
- }
+ phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
+ rc & MII_LAN83C185_ENERGYON, 10000,
+ 640000, true);
+ if (rc < 0)
+ return rc;
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index d760a36db28c..beedaad08255 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -490,6 +490,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto out;
+ if (skb->pkt_type != PACKET_HOST)
+ goto abort;
+
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto abort;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index babb01888b78..f81fb0b13a94 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -456,11 +456,8 @@ static void slip_write_wakeup(struct tty_struct *tty)
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
- if (!sl)
- goto out;
-
- schedule_work(&sl->tx_work);
-out:
+ if (sl)
+ schedule_work(&sl->tx_work);
rcu_read_unlock();
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4004f98e50d9..04845a4017f9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -468,6 +468,9 @@ static const struct team_mode *team_mode_get(const char *kind)
struct team_mode_item *mitem;
const struct team_mode *mode = NULL;
+ if (!try_module_get(THIS_MODULE))
+ return NULL;
+
spin_lock(&mode_list_lock);
mitem = __find_mode(kind);
if (!mitem) {
@@ -483,6 +486,7 @@ static const struct team_mode *team_mode_get(const char *kind)
}
spin_unlock(&mode_list_lock);
+ module_put(THIS_MODULE);
return mode;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 650c937ed56b..44889eba1dbc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -75,35 +75,6 @@
static void tun_default_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd);
-/* Uncomment to enable debugging */
-/* #define TUN_DEBUG 1 */
-
-#ifdef TUN_DEBUG
-static int debug;
-
-#define tun_debug(level, tun, fmt, args...) \
-do { \
- if (tun->debug) \
- netdev_printk(level, tun->dev, fmt, ##args); \
-} while (0)
-#define DBG1(level, fmt, args...) \
-do { \
- if (debug == 2) \
- printk(level fmt, ##args); \
-} while (0)
-#else
-#define tun_debug(level, tun, fmt, args...) \
-do { \
- if (0) \
- netdev_printk(level, tun->dev, fmt, ##args); \
-} while (0)
-#define DBG1(level, fmt, args...) \
-do { \
- if (0) \
- printk(level fmt, ##args); \
-} while (0)
-#endif
-
#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */
@@ -225,9 +196,7 @@ struct tun_struct {
struct sock_fprog fprog;
/* protected by rtnl lock */
bool filter_attached;
-#ifdef TUN_DEBUG
- int debug;
-#endif
+ u32 msg_enable;
spinlock_t lock;
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
struct timer_list flow_gc_timer;
@@ -423,8 +392,9 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e) {
- tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
- rxhash, queue_index);
+ netif_info(tun, tx_queued, tun->dev,
+ "create flow: hash %u index %u\n",
+ rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
e->rps_rxhash = 0;
@@ -438,8 +408,8 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
- tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
- e->rxhash, e->queue_index);
+ netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
+ e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -485,8 +455,6 @@ static void tun_flow_cleanup(struct timer_list *t)
unsigned long count = 0;
int i;
- tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
-
spin_lock(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
@@ -546,8 +514,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_unlock();
}
-/**
- * Save the hash received in the stack receive path and update the
+/* Save the hash received in the stack receive path and update the
* flow_hash table accordingly.
*/
static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
@@ -1076,9 +1043,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (!rcu_dereference(tun->steering_prog))
tun_automq_xmit(tun, skb);
- tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
-
- BUG_ON(!tfile);
+ netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
@@ -1435,8 +1400,6 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
sk = tfile->socket.sk;
- tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
-
poll_wait(file, sk_sleep(sk), wait);
if (!ptr_ring_empty(&tfile->tx_ring))
@@ -1715,8 +1678,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
alloc_frag->offset += buflen;
}
err = tun_xdp_act(tun, xdp_prog, &xdp, act);
- if (err < 0)
- goto err_xdp;
+ if (err < 0) {
+ if (act == XDP_REDIRECT || act == XDP_TX)
+ put_page(alloc_frag->page);
+ goto out;
+ }
+
if (err == XDP_REDIRECT)
xdp_do_flush();
if (err != XDP_PASS)
@@ -1730,8 +1697,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
-err_xdp:
- put_page(alloc_frag->page);
out:
rcu_read_unlock();
local_bh_enable();
@@ -1923,6 +1888,7 @@ drop:
skb_reset_network_header(skb);
skb_probe_transport_header(skb);
+ skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
struct bpf_prog *xdp_prog;
@@ -2207,8 +2173,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
ssize_t ret;
int err;
- tun_debug(KERN_INFO, tun, "tun_do_read\n");
-
if (!iov_iter_count(to)) {
tun_ptr_free(ptr);
return 0;
@@ -2496,6 +2460,7 @@ build:
skb->protocol = eth_type_trans(skb, tun->dev);
skb_reset_network_header(skb);
skb_probe_transport_header(skb);
+ skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
err = do_xdp_generic(xdp_prog, skb);
@@ -2507,7 +2472,6 @@ build:
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
- skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
/* No need for get_cpu_ptr() here since this function is
@@ -2853,8 +2817,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
netif_carrier_on(tun->dev);
- tun_debug(KERN_INFO, tun, "tun_set_iff\n");
-
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
@@ -2885,8 +2847,6 @@ err_free_dev:
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
- tun_debug(KERN_INFO, tun, "tun_get_iff\n");
-
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
@@ -3110,7 +3070,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (!tun)
goto unlock;
- tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
+ netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
net = dev_net(tun->dev);
ret = 0;
@@ -3131,8 +3091,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
/* Disable/Enable checksum */
/* [unimplemented] */
- tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
- arg ? "disabled" : "enabled");
+ netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
+ arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
@@ -3150,8 +3110,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
do_notify = true;
}
- tun_debug(KERN_INFO, tun, "persist %s\n",
- arg ? "enabled" : "disabled");
+ netif_info(tun, drv, tun->dev, "persist %s\n",
+ arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
@@ -3163,8 +3123,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
}
tun->owner = owner;
do_notify = true;
- tun_debug(KERN_INFO, tun, "owner set to %u\n",
- from_kuid(&init_user_ns, tun->owner));
+ netif_info(tun, drv, tun->dev, "owner set to %u\n",
+ from_kuid(&init_user_ns, tun->owner));
break;
case TUNSETGROUP:
@@ -3176,29 +3136,28 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
}
tun->group = group;
do_notify = true;
- tun_debug(KERN_INFO, tun, "group set to %u\n",
- from_kgid(&init_user_ns, tun->group));
+ netif_info(tun, drv, tun->dev, "group set to %u\n",
+ from_kgid(&init_user_ns, tun->group));
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
- tun_debug(KERN_INFO, tun,
- "Linktype set failed because interface is up\n");
+ netif_info(tun, drv, tun->dev,
+ "Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
- tun_debug(KERN_INFO, tun, "linktype set to %d\n",
- tun->dev->type);
+ netif_info(tun, drv, tun->dev, "linktype set to %d\n",
+ tun->dev->type);
ret = 0;
}
break;
-#ifdef TUN_DEBUG
case TUNSETDEBUG:
- tun->debug = arg;
+ tun->msg_enable = (u32)arg;
break;
-#endif
+
case TUNSETOFFLOAD:
ret = set_offload(tun, arg);
break;
@@ -3221,9 +3180,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCSIFHWADDR:
/* Set hw address */
- tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
- ifr.ifr_hwaddr.sa_data);
-
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
break;
@@ -3418,8 +3374,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
struct net *net = current->nsproxy->net_ns;
struct tun_file *tfile;
- DBG1(KERN_INFO, "tunX: tun_chr_open\n");
-
tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&tun_proto, 0);
if (!tfile)
@@ -3559,20 +3513,16 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
static u32 tun_get_msglevel(struct net_device *dev)
{
-#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
- return tun->debug;
-#else
- return -EOPNOTSUPP;
-#endif
+
+ return tun->msg_enable;
}
static void tun_set_msglevel(struct net_device *dev, u32 value)
{
-#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
- tun->debug = value;
-#endif
+
+ tun->msg_enable = value;
}
static int tun_get_coalesce(struct net_device *dev,
@@ -3599,6 +3549,7 @@ static int tun_set_coalesce(struct net_device *dev,
}
static const struct ethtool_ops tun_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index bcabd39d136a..9bdbd7b472a0 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -36,7 +36,7 @@ struct usbpn_dev {
spinlock_t rx_lock;
struct sk_buff *rx_skb;
- struct urb *urbs[0];
+ struct urb *urbs[];
};
static void tx_complete(struct urb *req);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 0cdb2ce47645..a657943c9f01 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -815,14 +815,21 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
- /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index c2c82e6391b4..8929669b5e6d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -175,7 +175,11 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
u32 val, max, min;
/* clamp new_tx to sane values */
- min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ if (ctx->is_ndp16)
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ else
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
+
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
/* some devices set dwNtbOutMaxSize too low for the above default */
@@ -307,10 +311,17 @@ static ssize_t ndp_to_end_store(struct device *d, struct device_attribute *attr
if (enable == (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
return len;
- if (enable && !ctx->delayed_ndp16) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- return -ENOMEM;
+ if (enable) {
+ if (ctx->is_ndp16 && !ctx->delayed_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ return -ENOMEM;
+ }
+ if (!ctx->is_ndp16 && !ctx->delayed_ndp32) {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ return -ENOMEM;
+ }
}
/* flush pending data before changing flag */
@@ -512,6 +523,9 @@ static int cdc_ncm_init(struct usbnet *dev)
dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
}
+ /* use ndp16 by default */
+ ctx->is_ndp16 = 1;
+
/* set NTB format, if both formats are supported.
*
* "The host shall only send this command while the NCM Data
@@ -519,14 +533,27 @@ static int cdc_ncm_init(struct usbnet *dev)
*/
if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
USB_CDC_NCM_NTB32_SUPPORTED) {
- dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
- if (err < 0)
+ if (ctx->drvflags & CDC_NCM_FLAG_PREFER_NTB32) {
+ ctx->is_ndp16 = 0;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 32-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB32_FORMAT,
+ iface_no, NULL, 0);
+ } else {
+ ctx->is_ndp16 = 1;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+ }
+ if (err < 0) {
+ ctx->is_ndp16 = 1;
dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+ }
}
/* set initial device values */
@@ -549,7 +576,10 @@ static int cdc_ncm_init(struct usbnet *dev)
ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
/* set up maximum NDP size */
- ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ if (ctx->is_ndp16)
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ else
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp32) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe32);
/* initial coalescing timer interval */
ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
@@ -734,7 +764,10 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
ctx->tx_curr_skb = NULL;
}
- kfree(ctx->delayed_ndp16);
+ if (ctx->is_ndp16)
+ kfree(ctx->delayed_ndp16);
+ else
+ kfree(ctx->delayed_ndp32);
kfree(ctx);
}
@@ -772,10 +805,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
- int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
- __le16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -879,32 +910,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
- /*
- * Some Huawei devices have been observed to come out of reset in NDP32 mode.
- * Let's check if this is the case, and set the device to NDP16 mode again if
- * needed.
- */
- if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
- err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &curr_ntb_format, 2);
- if (err < 0) {
- goto error2;
- }
-
- if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
- dev_info(&intf->dev, "resetting NTB format to 16-bit");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
-
- if (err < 0)
- goto error2;
- }
- }
-
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
@@ -929,9 +934,15 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- goto error2;
+ if (ctx->is_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ goto error2;
+ } else {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ goto error2;
+ }
dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
}
@@ -1055,7 +1066,7 @@ static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remai
/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
* allocating a new one within skb
*/
-static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
{
struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
@@ -1110,12 +1121,73 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
return ndp16;
}
+static struct usb_cdc_ncm_ndp32 *cdc_ncm_ndp32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+{
+ struct usb_cdc_ncm_ndp32 *ndp32 = NULL;
+ struct usb_cdc_ncm_nth32 *nth32 = (void *)skb->data;
+ size_t ndpoffset = le32_to_cpu(nth32->dwNdpIndex);
+
+ /* If NDP should be moved to the end of the NCM package, we can't follow the
+ * NTH32 header as we would normally do. NDP isn't written to the SKB yet, and
+ * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ if (ctx->delayed_ndp32->dwSignature == sign)
+ return ctx->delayed_ndp32;
+
+ /* We can only push a single NDP to the end. Return
+ * NULL to send what we've already got and queue this
+ * skb for later.
+ */
+ else if (ctx->delayed_ndp32->dwSignature)
+ return NULL;
+ }
+
+ /* follow the chain of NDPs, looking for a match */
+ while (ndpoffset) {
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb->data + ndpoffset);
+ if (ndp32->dwSignature == sign)
+ return ndp32;
+ ndpoffset = le32_to_cpu(ndp32->dwNextNdpIndex);
+ }
+
+ /* align new NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
+
+ /* verify that there is room for the NDP and the datagram (reserve) */
+ if ((ctx->tx_curr_size - skb->len - reserve) < ctx->max_ndp_size)
+ return NULL;
+
+ /* link to it */
+ if (ndp32)
+ ndp32->dwNextNdpIndex = cpu_to_le32(skb->len);
+ else
+ nth32->dwNdpIndex = cpu_to_le32(skb->len);
+
+ /* push a new empty NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ ndp32 = skb_put_zero(skb, ctx->max_ndp_size);
+ else
+ ndp32 = ctx->delayed_ndp32;
+
+ ndp32->dwSignature = sign;
+ ndp32->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp32) + sizeof(struct usb_cdc_ncm_dpe32));
+ return ndp32;
+}
+
struct sk_buff *
cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_nth16 *nth16;
- struct usb_cdc_ncm_ndp16 *ndp16;
+ union {
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_nth32 *nth32;
+ } nth;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
@@ -1179,11 +1251,19 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
ctx->tx_low_mem_val--;
}
- /* fill out the initial 16-bit NTB header */
- nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
- nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
- nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
- nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ if (ctx->is_ndp16) {
+ /* fill out the initial 16-bit NTB header */
+ nth.nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth.nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ } else {
+ /* fill out the initial 32-bit NTB header */
+ nth.nth32 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH32_SIGN);
+ nth.nth32->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->wSequence = cpu_to_le16(ctx->tx_seq++);
+ }
/* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
@@ -1205,13 +1285,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* get the appropriate NDP for this skb */
- ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ if (ctx->is_ndp16)
+ ndp.ndp16 = cdc_ncm_ndp16(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ else
+ ndp.ndp32 = cdc_ncm_ndp32(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
/* align beginning of next frame */
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_curr_size);
/* check if we had enough room left for both NDP and frame */
- if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
+ if ((ctx->is_ndp16 && !ndp.ndp16) || (!ctx->is_ndp16 && !ndp.ndp32) ||
+ skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -1233,13 +1317,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* calculate frame number withing this NDP */
- ndplen = le16_to_cpu(ndp16->wLength);
- index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+ if (ctx->is_ndp16) {
+ ndplen = le16_to_cpu(ndp.ndp16->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+
+ /* OK, add this skb */
+ ndp.ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
+ ndp.ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
+ ndp.ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ } else {
+ ndplen = le16_to_cpu(ndp.ndp32->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp32)) / sizeof(struct usb_cdc_ncm_dpe32) - 1;
- /* OK, add this skb */
- ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
- ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
- ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ ndp.ndp32->dpe32[index].dwDatagramLength = cpu_to_le32(skb->len);
+ ndp.ndp32->dpe32[index].dwDatagramIndex = cpu_to_le32(skb_out->len);
+ ndp.ndp32->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe32));
+ }
skb_put_data(skb_out, skb->data, skb->len);
ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
@@ -1286,13 +1379,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* If requested, put NDP at end of frame. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
- nth16->wNdpIndex = cpu_to_le16(skb_out->len);
- skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+
+ /* Zero out delayed NDP - signature checking will naturally fail. */
+ ndp.ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth32->dwNdpIndex = cpu_to_le32(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp32, ctx->max_ndp_size);
- /* Zero out delayed NDP - signature checking will naturally fail. */
- ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ ndp.ndp32 = memset(ctx->delayed_ndp32, 0, ctx->max_ndp_size);
+ }
}
/* If collected data size is less or equal ctx->min_tx_pkt
@@ -1314,8 +1416,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* set final frame length */
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ nth.nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ nth.nth32->dwBlockLength = cpu_to_le32(skb_out->len);
+ }
/* return skb */
ctx->tx_curr_skb = NULL;
@@ -1398,7 +1505,12 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+
+ if (ctx->is_ndp16)
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+ else
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN));
+
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -1459,6 +1571,54 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_nth32 *nth32;
+ int len;
+ int ret = -EINVAL;
+
+ if (ctx == NULL)
+ goto error;
+
+ if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth32) +
+ sizeof(struct usb_cdc_ncm_ndp32))) {
+ netif_dbg(dev, rx_err, dev->net, "frame too short\n");
+ goto error;
+ }
+
+ nth32 = (struct usb_cdc_ncm_nth32 *)skb_in->data;
+
+ if (nth32->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH32_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid NTH32 signature <%#010x>\n",
+ le32_to_cpu(nth32->dwSignature));
+ goto error;
+ }
+
+ len = le32_to_cpu(nth32->dwBlockLength);
+ if (len > ctx->rx_max) {
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
+ goto error;
+ }
+
+ if ((ctx->rx_seq + 1) != le16_to_cpu(nth32->wSequence) &&
+ (ctx->rx_seq || le16_to_cpu(nth32->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth32->wSequence))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth32->wSequence));
+ }
+ ctx->rx_seq = le16_to_cpu(nth32->wSequence);
+
+ ret = le32_to_cpu(nth32->dwNdpIndex);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth32);
+
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
@@ -1495,6 +1655,42 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
+/* verify NDP header and return number of datagrams, or negative error */
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ int ret = -EINVAL;
+
+ if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
+ ndpoffset);
+ goto error;
+ }
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (le16_to_cpu(ndp32->wLength) < USB_CDC_NCM_NDP32_LENGTH_MIN) {
+ netif_dbg(dev, rx_err, dev->net, "invalid DPT32 length <%u>\n",
+ le16_to_cpu(ndp32->wLength));
+ goto error;
+ }
+
+ ret = ((le16_to_cpu(ndp32->wLength) -
+ sizeof(struct usb_cdc_ncm_ndp32)) /
+ sizeof(struct usb_cdc_ncm_dpe32));
+ ret--; /* we process NDP entries except for the last one */
+
+ if ((sizeof(struct usb_cdc_ncm_ndp32) +
+ ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
+ ret = -EINVAL;
+ }
+
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp32);
+
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
@@ -1503,34 +1699,66 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
int nframes;
int x;
int offset;
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct usb_cdc_ncm_dpe16 *dpe16;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
+ union {
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ struct usb_cdc_ncm_dpe32 *dpe32;
+ } dpe;
+
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
u32 payload = 0;
- ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ctx->is_ndp16)
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ else
+ ndpoffset = cdc_ncm_rx_verify_nth32(ctx, skb_in);
+
if (ndpoffset < 0)
goto error;
next_ndp:
- nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
- if (nframes < 0)
- goto error;
+ if (ctx->is_ndp16) {
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+ ndp.ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
- if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
- netif_dbg(dev, rx_err, dev->net,
- "invalid DPT16 signature <%#010x>\n",
- le32_to_cpu(ndp16->dwSignature));
- goto err_ndp;
+ if (ndp.ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT16 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp16->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe16 = ndp.ndp16->dpe16;
+ } else {
+ nframes = cdc_ncm_rx_verify_ndp32(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp.ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (ndp.ndp32->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT32 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp32->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe32 = ndp.ndp32->dpe32;
}
- dpe16 = ndp16->dpe16;
- for (x = 0; x < nframes; x++, dpe16++) {
- offset = le16_to_cpu(dpe16->wDatagramIndex);
- len = le16_to_cpu(dpe16->wDatagramLength);
+ for (x = 0; x < nframes; x++) {
+ if (ctx->is_ndp16) {
+ offset = le16_to_cpu(dpe.dpe16->wDatagramIndex);
+ len = le16_to_cpu(dpe.dpe16->wDatagramLength);
+ } else {
+ offset = le32_to_cpu(dpe.dpe32->dwDatagramIndex);
+ len = le32_to_cpu(dpe.dpe32->dwDatagramLength);
+ }
/*
* CDC NCM ch. 3.7
@@ -1561,10 +1789,19 @@ next_ndp:
usbnet_skb_return(dev, skb);
payload += len; /* count payload bytes in this NTB */
}
+
+ if (ctx->is_ndp16)
+ dpe.dpe16++;
+ else
+ dpe.dpe32++;
}
err_ndp:
/* are there more NDPs to process? */
- ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ctx->is_ndp16)
+ ndpoffset = le16_to_cpu(ndp.ndp16->wNextNdpIndex);
+ else
+ ndpoffset = le32_to_cpu(ndp.ndp32->dwNextNdpIndex);
+
if (ndpoffset && loopcount--)
goto next_ndp;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 417e42c9fd03..bb8c34d746ab 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2659,7 +2659,7 @@ static struct hso_device *hso_create_bulk_serial_device(
if (!
(serial->out_endp =
hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
- dev_err(&interface->dev, "Failed to find BULK IN ep\n");
+ dev_err(&interface->dev, "Failed to find BULK OUT ep\n");
goto exit2;
}
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index e15a472c6a54..099d84827004 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -77,11 +77,11 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
- /* Additionally, it has been reported that some Huawei E3372H devices, with
- * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
- * needing to be set to the NTB16 one again.
+ /* For many Huawei devices the NTB32 mode is the default and the best mode
+ * they work with. Huawei E5785 and E5885 devices refuse to work in NTB16 mode at all.
*/
- drvflags |= CDC_NCM_FLAG_RESET_NTB16;
+ drvflags |= CDC_NCM_FLAG_PREFER_NTB32;
+
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 8783e2ab3ec0..0ef7e1f443e3 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -54,6 +54,7 @@ static const char driver_name[] = "pegasus";
#undef PEGASUS_WRITE_EEPROM
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
+#define CARRIER_CHECK_DELAY (2 * HZ)
static bool loopback;
static bool mii_mode;
@@ -1089,17 +1090,12 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg81, 2);
}
-
-static int pegasus_count;
-static struct workqueue_struct *pegasus_workqueue;
-#define CARRIER_CHECK_DELAY (2 * HZ)
-
static void check_carrier(struct work_struct *work)
{
pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
set_carrier(pegasus->net);
if (!(pegasus->flags & PEGASUS_UNPLUG)) {
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
}
}
@@ -1120,18 +1116,6 @@ static int pegasus_blacklisted(struct usb_device *udev)
return 0;
}
-/* we rely on probe() and remove() being serialized so we
- * don't need extra locking on pegasus_count.
- */
-static void pegasus_dec_workqueue(void)
-{
- pegasus_count--;
- if (pegasus_count == 0) {
- destroy_workqueue(pegasus_workqueue);
- pegasus_workqueue = NULL;
- }
-}
-
static int pegasus_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -1144,14 +1128,6 @@ static int pegasus_probe(struct usb_interface *intf,
if (pegasus_blacklisted(dev))
return -ENODEV;
- if (pegasus_count == 0) {
- pegasus_workqueue = alloc_workqueue("pegasus", WQ_MEM_RECLAIM,
- 0);
- if (!pegasus_workqueue)
- return -ENOMEM;
- }
- pegasus_count++;
-
net = alloc_etherdev(sizeof(struct pegasus));
if (!net)
goto out;
@@ -1209,7 +1185,7 @@ static int pegasus_probe(struct usb_interface *intf,
res = register_netdev(net);
if (res)
goto out3;
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
dev_info(&intf->dev, "%s, %s, %pM\n", net->name,
usb_dev_id[dev_index].name, net->dev_addr);
@@ -1222,7 +1198,6 @@ out2:
out1:
free_netdev(net);
out:
- pegasus_dec_workqueue();
return res;
}
@@ -1237,7 +1212,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
}
pegasus->flags |= PEGASUS_UNPLUG;
- cancel_delayed_work(&pegasus->carrier_check);
+ cancel_delayed_work_sync(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
@@ -1246,7 +1221,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
pegasus->rx_skb = NULL;
}
free_netdev(pegasus->net);
- pegasus_dec_workqueue();
}
static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
@@ -1254,7 +1228,7 @@ static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
struct pegasus *pegasus = usb_get_intfdata(intf);
netif_device_detach(pegasus->net);
- cancel_delayed_work(&pegasus->carrier_check);
+ cancel_delayed_work_sync(&pegasus->carrier_check);
if (netif_running(pegasus->net)) {
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->intr_urb);
@@ -1276,7 +1250,7 @@ static int pegasus_resume(struct usb_interface *intf)
pegasus->intr_urb->actual_length = 0;
intr_callback(pegasus->intr_urb);
}
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
return 0;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6c738a271257..4a2c7355be63 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
@@ -1359,6 +1360,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 95b19ce96513..c8c873a613b6 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -891,7 +891,7 @@ struct fw_block {
struct fw_header {
u8 checksum[32];
char version[RTL_VER_SIZE];
- struct fw_block blocks[0];
+ struct fw_block blocks[];
} __packed;
/**
@@ -930,7 +930,7 @@ struct fw_mac {
__le32 reserved;
__le16 fw_ver_reg;
u8 fw_ver_data;
- char info[0];
+ char info[];
} __packed;
/**
@@ -982,7 +982,7 @@ struct fw_phy_nc {
__le16 bp_start;
__le16 bp_num;
__le16 bp[4];
- char info[0];
+ char info[];
} __packed;
enum rtl_fw_type {
@@ -1948,29 +1948,6 @@ drop:
}
}
-/* msdn_giant_send_check()
- * According to the document of microsoft, the TCP Pseudo Header excludes the
- * packet length for IPv6 TCP large packets.
- */
-static int msdn_giant_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct tcphdr *th;
- int ret;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- return ret;
-
- ipv6h = ipv6_hdr(skb);
- th = tcp_hdr(skb);
-
- th->check = 0;
- th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
-
- return ret;
-}
-
static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
{
if (skb_vlan_tag_present(skb)) {
@@ -2016,10 +1993,11 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
break;
case htons(ETH_P_IPV6):
- if (msdn_giant_send_check(skb)) {
+ if (skb_cow_head(skb, 0)) {
ret = TX_CSUM_TSO;
goto unavailable;
}
+ tcp_v6_gso_csum_prep(skb);
opts1 |= GTSENDV6;
break;
@@ -6375,6 +6353,7 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
}
static const struct ethtool_ops ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = rtl8152_get_drvinfo,
.get_link = ethtool_op_get_link,
.nway_reset = rtl8152_nway_reset,
@@ -6901,6 +6880,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index d4cbb9e8c63f..aece0e5eec8c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -34,16 +34,23 @@
#define VETH_RING_SIZE 256
#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
-/* Separating two types of XDP xmit */
-#define VETH_XDP_TX BIT(0)
-#define VETH_XDP_REDIR BIT(1)
-
#define VETH_XDP_TX_BULK_SIZE 16
+struct veth_stats {
+ u64 rx_drops;
+ /* xdp */
+ u64 xdp_packets;
+ u64 xdp_bytes;
+ u64 xdp_redirect;
+ u64 xdp_drops;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+ u64 peer_tq_xdp_xmit;
+ u64 peer_tq_xdp_xmit_err;
+};
+
struct veth_rq_stats {
- u64 xdp_packets;
- u64 xdp_bytes;
- u64 xdp_drops;
+ struct veth_stats vs;
struct u64_stats_sync syncp;
};
@@ -80,16 +87,27 @@ struct veth_q_stat_desc {
size_t offset;
};
-#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
+#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
{ "xdp_packets", VETH_RQ_STAT(xdp_packets) },
{ "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
+ { "drops", VETH_RQ_STAT(rx_drops) },
+ { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
{ "xdp_drops", VETH_RQ_STAT(xdp_drops) },
+ { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
+ { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
};
#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
+static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
+ { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
+ { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
+};
+
+#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
+
static struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
@@ -124,11 +142,19 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_%.11s",
+ "rx_queue_%u_%.18s",
i, veth_rq_stats_desc[j].desc);
p += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < dev->real_num_tx_queues; i++) {
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "tx_queue_%u_%.18s",
+ i, veth_tq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -138,7 +164,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ethtool_stats_keys) +
- VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
+ VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
+ VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
default:
return -EOPNOTSUPP;
}
@@ -147,7 +174,7 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
static void veth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct veth_priv *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
int i, j, idx;
@@ -155,7 +182,7 @@ static void veth_get_ethtool_stats(struct net_device *dev,
idx = 1;
for (i = 0; i < dev->real_num_rx_queues; i++) {
const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
- const void *stats_base = (void *)rq_stats;
+ const void *stats_base = (void *)&rq_stats->vs;
unsigned int start;
size_t offset;
@@ -168,6 +195,26 @@ static void veth_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
idx += VETH_RQ_STATS_LEN;
}
+
+ if (!peer)
+ return;
+
+ rcv_priv = netdev_priv(peer);
+ for (i = 0; i < peer->real_num_rx_queues; i++) {
+ const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
+ const void *base = (void *)&rq_stats->vs;
+ unsigned int start, tx_idx = idx;
+ size_t offset;
+
+ tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
+ do {
+ start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
+ offset = veth_tq_stats_desc[j].offset;
+ data[tx_idx + j] += *(u64 *)(base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ }
}
static const struct ethtool_ops veth_ethtool_ops = {
@@ -283,28 +330,34 @@ static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
return atomic64_read(&priv->dropped);
}
-static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
+static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
+ result->peer_tq_xdp_xmit_err = 0;
result->xdp_packets = 0;
+ result->xdp_tx_err = 0;
result->xdp_bytes = 0;
- result->xdp_drops = 0;
+ result->rx_drops = 0;
for (i = 0; i < dev->num_rx_queues; i++) {
+ u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
struct veth_rq_stats *stats = &priv->rq[i].stats;
- u64 packets, bytes, drops;
unsigned int start;
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->xdp_packets;
- bytes = stats->xdp_bytes;
- drops = stats->xdp_drops;
+ peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
+ xdp_tx_err = stats->vs.xdp_tx_err;
+ packets = stats->vs.xdp_packets;
+ bytes = stats->vs.xdp_bytes;
+ drops = stats->vs.rx_drops;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
+ result->xdp_tx_err += xdp_tx_err;
result->xdp_packets += packets;
result->xdp_bytes += bytes;
- result->xdp_drops += drops;
+ result->rx_drops += drops;
}
}
@@ -313,7 +366,7 @@ static void veth_get_stats64(struct net_device *dev,
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct veth_rq_stats rx;
+ struct veth_stats rx;
u64 packets, bytes;
tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
@@ -321,7 +374,8 @@ static void veth_get_stats64(struct net_device *dev,
tot->tx_packets = packets;
veth_stats_rx(&rx, dev);
- tot->rx_dropped = rx.xdp_drops;
+ tot->tx_dropped += rx.xdp_tx_err;
+ tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
tot->rx_bytes = rx.xdp_bytes;
tot->rx_packets = rx.xdp_packets;
@@ -333,6 +387,8 @@ static void veth_get_stats64(struct net_device *dev,
tot->rx_packets += packets;
veth_stats_rx(&rx, peer);
+ tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
+ tot->rx_dropped += rx.xdp_tx_err;
tot->tx_bytes += rx.xdp_bytes;
tot->tx_packets += rx.xdp_packets;
}
@@ -369,25 +425,22 @@ static int veth_select_rxq(struct net_device *dev)
}
static int veth_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
+ struct xdp_frame **frames,
+ u32 flags, bool ndo_xmit)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ int i, ret = -ENXIO, drops = 0;
struct net_device *rcv;
- int i, ret, drops = n;
unsigned int max_len;
struct veth_rq *rq;
- rcu_read_lock();
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
- ret = -EINVAL;
- goto drop;
- }
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+ rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
- ret = -ENXIO;
- goto drop;
- }
+ if (unlikely(!rcv))
+ goto out;
rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
@@ -395,12 +448,9 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
* side. This means an XDP program is loaded on the peer and the peer
* device is up.
*/
- if (!rcu_access_pointer(rq->xdp_prog)) {
- ret = -ENXIO;
- goto drop;
- }
+ if (!rcu_access_pointer(rq->xdp_prog))
+ goto out;
- drops = 0;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
spin_lock(&rq->xdp_ring.producer_lock);
@@ -419,59 +469,80 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- if (likely(!drops)) {
- rcu_read_unlock();
- return n;
+ ret = n - drops;
+ if (ndo_xmit) {
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.vs.peer_tq_xdp_xmit += n - drops;
+ rq->stats.vs.peer_tq_xdp_xmit_err += drops;
+ u64_stats_update_end(&rq->stats.syncp);
}
- ret = n - drops;
-drop:
+out:
rcu_read_unlock();
- atomic64_add(drops, &priv->dropped);
return ret;
}
-static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
+static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ int err;
+
+ err = veth_xdp_xmit(dev, n, frames, flags, true);
+ if (err < 0) {
+ struct veth_priv *priv = netdev_priv(dev);
+
+ atomic64_add(n, &priv->dropped);
+ }
+
+ return err;
+}
+
+static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
{
int sent, i, err = 0;
- sent = veth_xdp_xmit(dev, bq->count, bq->q, 0);
+ sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
if (sent < 0) {
err = sent;
sent = 0;
for (i = 0; i < bq->count; i++)
xdp_return_frame(bq->q[i]);
}
- trace_xdp_bulk_tx(dev, sent, bq->count - sent, err);
+ trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
+
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.vs.xdp_tx += sent;
+ rq->stats.vs.xdp_tx_err += bq->count - sent;
+ u64_stats_update_end(&rq->stats.syncp);
bq->count = 0;
}
-static void veth_xdp_flush(struct net_device *dev, struct veth_xdp_tx_bq *bq)
+static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
{
- struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
struct net_device *rcv;
- struct veth_rq *rq;
+ struct veth_rq *rcv_rq;
rcu_read_lock();
- veth_xdp_flush_bq(dev, bq);
+ veth_xdp_flush_bq(rq, bq);
rcv = rcu_dereference(priv->peer);
if (unlikely(!rcv))
goto out;
rcv_priv = netdev_priv(rcv);
- rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
/* xdp_ring is initialized on receive side? */
- if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
+ if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
goto out;
- __veth_xdp_flush(rq);
+ __veth_xdp_flush(rcv_rq);
out:
rcu_read_unlock();
}
-static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
+static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
struct veth_xdp_tx_bq *bq)
{
struct xdp_frame *frame = convert_to_xdp_frame(xdp);
@@ -480,7 +551,7 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
return -EOVERFLOW;
if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
- veth_xdp_flush_bq(dev, bq);
+ veth_xdp_flush_bq(rq, bq);
bq->q[bq->count++] = frame;
@@ -489,8 +560,8 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
struct xdp_frame *frame,
- unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
void *hard_start = frame->data - frame->headroom;
void *head = hard_start - sizeof(struct xdp_frame);
@@ -523,12 +594,13 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
orig_frame = *frame;
xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
+ if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_TX;
+ stats->xdp_tx++;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
@@ -537,9 +609,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
xdp.rxq->mem = frame->mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
frame = &orig_frame;
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_REDIR;
+ stats->xdp_redirect++;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -549,6 +622,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
trace_xdp_exception(rq->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
+ stats->xdp_drops++;
goto err_xdp;
}
}
@@ -558,6 +632,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
skb = veth_build_skb(head, headroom, len, 0);
if (!skb) {
xdp_return_frame(frame);
+ stats->rx_drops++;
goto err;
}
@@ -573,9 +648,10 @@ xdp_xmit:
return NULL;
}
-static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
- unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
+ struct sk_buff *skb,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
u32 pktlen, headroom, act, metalen;
void *orig_data, *orig_data_end;
@@ -651,20 +727,23 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
get_page(virt_to_page(xdp.data));
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
+ if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_TX;
+ stats->xdp_tx++;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
get_page(virt_to_page(xdp.data));
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
- if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
+ if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ stats->rx_drops++;
goto err_xdp;
- *xdp_xmit |= VETH_XDP_REDIR;
+ }
+ stats->xdp_redirect++;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -674,7 +753,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
trace_xdp_exception(rq->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
- goto drop;
+ stats->xdp_drops++;
+ goto xdp_drop;
}
rcu_read_unlock();
@@ -696,6 +776,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
out:
return skb;
drop:
+ stats->rx_drops++;
+xdp_drop:
rcu_read_unlock();
kfree_skb(skb);
return NULL;
@@ -706,14 +788,14 @@ xdp_xmit:
return NULL;
}
-static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+static int veth_xdp_rcv(struct veth_rq *rq, int budget,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
- int i, done = 0, drops = 0, bytes = 0;
+ int i, done = 0;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
- unsigned int xdp_xmit_one = 0;
struct sk_buff *skb;
if (!ptr)
@@ -722,27 +804,26 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
if (veth_is_xdp_frame(ptr)) {
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- bytes += frame->len;
- skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
+ stats->xdp_bytes += frame->len;
+ skb = veth_xdp_rcv_one(rq, frame, bq, stats);
} else {
skb = ptr;
- bytes += skb->len;
- skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
+ stats->xdp_bytes += skb->len;
+ skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
}
- *xdp_xmit |= xdp_xmit_one;
if (skb)
napi_gro_receive(&rq->xdp_napi, skb);
- else if (!xdp_xmit_one)
- drops++;
done++;
}
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.xdp_packets += done;
- rq->stats.xdp_bytes += bytes;
- rq->stats.xdp_drops += drops;
+ rq->stats.vs.xdp_redirect += stats->xdp_redirect;
+ rq->stats.vs.xdp_bytes += stats->xdp_bytes;
+ rq->stats.vs.xdp_drops += stats->xdp_drops;
+ rq->stats.vs.rx_drops += stats->rx_drops;
+ rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
return done;
@@ -752,14 +833,14 @@ static int veth_poll(struct napi_struct *napi, int budget)
{
struct veth_rq *rq =
container_of(napi, struct veth_rq, xdp_napi);
- unsigned int xdp_xmit = 0;
+ struct veth_stats stats = {};
struct veth_xdp_tx_bq bq;
int done;
bq.count = 0;
xdp_set_return_frame_no_direct();
- done = veth_xdp_rcv(rq, budget, &xdp_xmit, &bq);
+ done = veth_xdp_rcv(rq, budget, &bq, &stats);
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
@@ -770,9 +851,9 @@ static int veth_poll(struct napi_struct *napi, int budget)
}
}
- if (xdp_xmit & VETH_XDP_TX)
- veth_xdp_flush(rq->dev, &bq);
- if (xdp_xmit & VETH_XDP_REDIR)
+ if (stats.xdp_tx > 0)
+ veth_xdp_flush(rq, &bq);
+ if (stats.xdp_redirect > 0)
xdp_do_flush();
xdp_clear_return_frame_no_direct();
@@ -1158,7 +1239,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
- .ndo_xdp_xmit = veth_xdp_xmit,
+ .ndo_xdp_xmit = veth_ndo_xdp_xmit,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2fe7a3188282..ce07f52d89e7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -371,7 +371,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
- bool hdr_valid)
+ bool hdr_valid, unsigned int metasize)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -393,6 +393,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
+ /* hdr_valid means no XDP, so we can copy the vnet header */
if (hdr_valid)
memcpy(hdr, p, hdr_len);
@@ -405,6 +406,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
copy = skb_tailroom(skb);
skb_put_data(skb, p, copy);
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
len -= copy;
offset += copy;
@@ -450,10 +456,6 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
- /* virtqueue want to use data area in-front of packet */
- if (unlikely(xdpf->metasize > 0))
- return -EOPNOTSUPP;
-
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
@@ -644,6 +646,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int delta = 0;
struct page *xdp_page;
int err;
+ unsigned int metasize = 0;
len -= vi->hdr_len;
stats->bytes += len;
@@ -683,8 +686,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
xdp.data = xdp.data_hard_start + xdp_headroom;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -695,6 +698,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
len = xdp.data_end - xdp.data;
+ metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
stats->xdp_tx++;
@@ -735,10 +739,13 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
skb_reserve(skb, headroom - delta);
skb_put(skb, len);
- if (!delta) {
+ if (!xdp_prog) {
buf += header_offset;
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
- } /* keep zeroed vnet hdr since packet was changed by bpf */
+ } /* keep zeroed vnet hdr since XDP is loaded */
+
+ if (metasize)
+ skb_metadata_set(skb, metasize);
err:
return skb;
@@ -760,8 +767,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
- PAGE_SIZE, true);
+ struct sk_buff *skb =
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -793,6 +800,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
int err;
+ unsigned int metasize = 0;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
@@ -839,8 +847,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
data = page_address(xdp_page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -848,24 +856,27 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
switch (act) {
case XDP_PASS:
+ metasize = xdp.data - xdp.data_meta;
+
/* recalculate offset to account for any header
- * adjustments. Note other cases do not build an
- * skb and avoid using offset
+ * adjustments and minus the metasize to copy the
+ * metadata in page_to_skb(). Note other cases do not
+ * build an skb and avoid using offset
*/
- offset = xdp.data -
- page_address(xdp_page) - vi->hdr_len;
+ offset = xdp.data - page_address(xdp_page) -
+ vi->hdr_len - metasize;
- /* recalculate len if xdp.data or xdp.data_end were
- * adjusted
+ /* recalculate len if xdp.data, xdp.data_end or
+ * xdp.data_meta were adjusted
*/
- len = xdp.data_end - xdp.data + vi->hdr_len;
+ len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
put_page(page);
- head_skb = page_to_skb(vi, rq, xdp_page,
- offset, len,
- PAGE_SIZE, false);
+ head_skb = page_to_skb(vi, rq, xdp_page, offset,
+ len, PAGE_SIZE, false,
+ metasize);
return head_skb;
}
break;
@@ -921,7 +932,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
+ metasize);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -1231,9 +1243,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
break;
} while (rq->vq->num_free);
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
- u64_stats_update_begin(&rq->stats.syncp);
+ unsigned long flags;
+
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
rq->stats.kicks++;
- u64_stats_update_end(&rq->stats.syncp);
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
return !oom;
@@ -2166,48 +2180,13 @@ static void virtnet_get_channels(struct net_device *dev,
channels->other_count = 0;
}
-/* Check if the user is trying to change anything besides speed/duplex */
-static bool
-virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
-{
- struct ethtool_link_ksettings diff1 = *cmd;
- struct ethtool_link_ksettings diff2 = {};
-
- /* cmd is always set so we need to clear it, validate the port type
- * and also without autonegotiation we can ignore advertising
- */
- diff1.base.speed = 0;
- diff2.base.port = PORT_OTHER;
- ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
- diff1.base.duplex = 0;
- diff1.base.cmd = 0;
- diff1.base.link_mode_masks_nwords = 0;
-
- return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
- bitmap_empty(diff1.link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS) &&
- bitmap_empty(diff1.link_modes.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS) &&
- bitmap_empty(diff1.link_modes.lp_advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
static int virtnet_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
- u32 speed;
-
- speed = cmd->base.speed;
- /* don't allow custom speed and duplex */
- if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->base.duplex) ||
- !virtnet_validate_ethtool_cmd(cmd))
- return -EINVAL;
- vi->speed = speed;
- vi->duplex = cmd->base.duplex;
- return 0;
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &vi->speed, &vi->duplex);
}
static int virtnet_get_link_ksettings(struct net_device *dev,
@@ -2225,23 +2204,14 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_SCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
int i, napi_weight;
- if (ec->tx_max_coalesced_frames > 1)
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
return -EINVAL;
- ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
-
- /* disallow changes to fields not explicitly tested above */
- if (memcmp(ec, &ec_default, sizeof(ec_default)))
- return -EINVAL;
-
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -2296,6 +2266,7 @@ static void virtnet_update_settings(struct virtnet_info *vi)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 18f152fa0068..722cb054a5cd 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -942,10 +942,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
} else if (ctx->ipv6) {
- struct ipv6hdr *iph = ipv6_hdr(skb);
-
- tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
- IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 1e4b9ba70983..6528940ce5f3 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -780,27 +780,6 @@ vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
if (!VMXNET3_VERSION_GE_3(adapter))
return -EOPNOTSUPP;
- if (ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval) {
- return -EINVAL;
- }
-
if ((ec->rx_coalesce_usecs == 0) &&
(ec->use_adaptive_rx_coalesce == 0) &&
(ec->tx_max_coalesced_frames == 0) &&
@@ -891,6 +870,9 @@ done:
}
static const struct ethtool_ops vmxnet3_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
.get_regs = vmxnet3_get_regs,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index b8228f50bc94..56f8aab46f89 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -188,8 +188,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
fl6.flowi6_proto = iph->nexthdr;
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst == dst_null)
+ dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
+ if (IS_ERR(dst) || dst == dst_null)
goto err;
skb_dst_drop(skb);
@@ -474,7 +474,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
return skb;
- if (qdisc_tx_is_default(vrf_dev))
+ if (qdisc_tx_is_default(vrf_dev) ||
+ IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return vrf_ip6_out_direct(vrf_dev, sk, skb);
return vrf_ip6_out_redirect(vrf_dev, skb);
@@ -504,7 +505,7 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
static int vrf_rt6_create(struct net_device *dev)
{
- int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM;
+ int flags = DST_NOPOLICY | DST_NOXFRM;
struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
struct rt6_info *rt6;
@@ -686,7 +687,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
- if (qdisc_tx_is_default(vrf_dev))
+ if (qdisc_tx_is_default(vrf_dev) ||
+ IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return vrf_ip_out_direct(vrf_dev, sk, skb);
return vrf_ip_out_redirect(vrf_dev, skb);
@@ -739,7 +741,7 @@ static int vrf_rtable_create(struct net_device *dev)
return -ENOMEM;
/* create a dst for routing packets out through a VRF device */
- rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1);
if (!rth)
return -ENOMEM;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 45308b3350cf..a5b415fed11e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3144,7 +3144,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
if (id >= VXLAN_N_VID) {
- NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID],
"VXLAN ID must be lower than 16777216");
return -ERANGE;
}
@@ -3155,7 +3155,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
if (ntohs(p->high) < ntohs(p->low)) {
- NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE],
"Invalid source port range");
return -EINVAL;
}
@@ -3165,7 +3165,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
if (df < 0 || df > VXLAN_DF_MAX) {
- NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF],
"Invalid DF attribute");
return -EINVAL;
}
diff --git a/drivers/net/wan/.gitignore b/drivers/net/wan/.gitignore
index dae3ea6bb18c..247bfbf10912 100644
--- a/drivers/net/wan/.gitignore
+++ b/drivers/net/wan/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
wanxlfw.inc
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4530840e15ef..dbc0e3f7a3e2 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -200,7 +200,7 @@ config WANXL_BUILD_FIRMWARE
depends on WANXL && !PREVENT_FIRMWARE_BUILD
help
Allows you to rebuild firmware run by the QUICC processor.
- It requires as68k, ld68k and hexdump programs.
+ It requires m68k toolchains and hexdump programs.
You should never need this option, say N.
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 701f5d2fe3b6..380271a011e4 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -40,21 +40,30 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc
ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
ifeq ($(ARCH),m68k)
- AS68K = $(AS)
- LD68K = $(LD)
+ M68KCC = $(CC)
+ M68KLD = $(LD)
else
- AS68K = as68k
- LD68K = ld68k
+ M68KCC = $(CROSS_COMPILE_M68K)gcc
+ M68KLD = $(CROSS_COMPILE_M68K)ld
endif
-quiet_cmd_build_wanxlfw = BLD FW $@
- cmd_build_wanxlfw = \
- $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
- $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
- hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
- rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
+quiet_cmd_build_wanxlfw = BLDFW $@
+ cmd_build_wanxlfw = hexdump -ve '"\n" 16/1 "0x%02X,"' $< | \
+ sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' > $@
-$(obj)/wanxlfw.inc: $(src)/wanxlfw.S
- $(call if_changed_dep,build_wanxlfw)
-targets += wanxlfw.inc
+$(obj)/wanxlfw.inc: $(obj)/wanxlfw.bin FORCE
+ $(call if_changed,build_wanxlfw)
+
+quiet_cmd_m68kld_bin_o = M68KLD $@
+ cmd_m68kld_bin_o = $(M68KLD) --oformat binary -Ttext 0x1000 $< -o $@
+
+$(obj)/wanxlfw.bin: $(obj)/wanxlfw.o FORCE
+ $(call if_changed,m68kld_bin_o)
+
+quiet_cmd_m68kas_o_S = M68KAS $@
+ cmd_m68kas_o_S = $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $@ $<
+
+$(obj)/wanxlfw.o: $(src)/wanxlfw.S FORCE
+ $(call if_changed_dep,m68kas_o_S)
endif
+targets += wanxlfw.inc wanxlfw.bin wanxlfw.o
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index 47b8e36f97ab..5f43568a9715 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -65,7 +65,7 @@
struct fstioc_write {
unsigned int size;
unsigned int offset;
- unsigned char data[0];
+ unsigned char data[];
};
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 23f93f1c815d..499f7cd19a4a 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -78,7 +78,7 @@ struct card {
struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
struct card_status *status; /* shared between host and card */
dma_addr_t status_address;
- struct port ports[0]; /* 1 - 4 port structures follow */
+ struct port ports[]; /* 1 - 4 port structures follow */
};
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index f66c0f8f6f4a..ecb3fccca603 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -740,9 +740,6 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
static
int i2400m_bm_buf_alloc(struct i2400m *i2400m)
{
- int result;
-
- result = -ENOMEM;
i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
if (i2400m->bm_cmd_buf == NULL)
goto error_bm_cmd_kzalloc;
@@ -754,7 +751,7 @@ int i2400m_bm_buf_alloc(struct i2400m *i2400m)
error_bm_ack_buf_kzalloc:
kfree(i2400m->bm_cmd_buf);
error_bm_cmd_kzalloc:
- return result;
+ return -ENOMEM;
}
@@ -843,7 +840,7 @@ EXPORT_SYMBOL_GPL(i2400m_reset);
*/
int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
{
- int result = -ENODEV;
+ int result;
struct device *dev = i2400m_dev(i2400m);
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 529ebca1e9e1..1f7709d24f35 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -354,6 +354,7 @@ out:
usb_autopm_put_interface(i2400mu->usb_iface);
d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
i2400m, ack, ack_size, (long) result);
+ usb_put_urb(&notif_urb);
return result;
error_exceeded:
diff --git a/drivers/net/wireguard/messages.h b/drivers/net/wireguard/messages.h
index b8a7b9ce32ba..208da72673fc 100644
--- a/drivers/net/wireguard/messages.h
+++ b/drivers/net/wireguard/messages.h
@@ -32,7 +32,7 @@ enum cookie_values {
};
enum counter_values {
- COUNTER_BITS_TOTAL = 2048,
+ COUNTER_BITS_TOTAL = 8192,
COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
};
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index 708dc61c974f..626433690abb 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_create(struct wg_peer *peer)
if (unlikely(!keypair))
return NULL;
+ spin_lock_init(&keypair->receiving_counter.lock);
keypair->internal_id = atomic64_inc_return(&keypair_counter);
keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
keypair->entry.peer = peer;
@@ -358,25 +359,16 @@ out:
memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
}
-static void symmetric_key_init(struct noise_symmetric_key *key)
-{
- spin_lock_init(&key->counter.receive.lock);
- atomic64_set(&key->counter.counter, 0);
- memset(key->counter.receive.backtrack, 0,
- sizeof(key->counter.receive.backtrack));
- key->birthdate = ktime_get_coarse_boottime_ns();
- key->is_valid = true;
-}
-
static void derive_keys(struct noise_symmetric_key *first_dst,
struct noise_symmetric_key *second_dst,
const u8 chaining_key[NOISE_HASH_LEN])
{
+ u64 birthdate = ktime_get_coarse_boottime_ns();
kdf(first_dst->key, second_dst->key, NULL, NULL,
NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
chaining_key);
- symmetric_key_init(first_dst);
- symmetric_key_init(second_dst);
+ first_dst->birthdate = second_dst->birthdate = birthdate;
+ first_dst->is_valid = second_dst->is_valid = true;
}
static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
@@ -715,6 +707,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
u8 e[NOISE_PUBLIC_KEY_LEN];
u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
u8 static_private[NOISE_PUBLIC_KEY_LEN];
+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
down_read(&wg->static_identity.lock);
@@ -733,6 +726,8 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
memcpy(ephemeral_private, handshake->ephemeral_private,
NOISE_PUBLIC_KEY_LEN);
+ memcpy(preshared_key, handshake->preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
up_read(&handshake->lock);
if (state != HANDSHAKE_CREATED_INITIATION)
@@ -750,7 +745,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
goto fail;
/* psk */
- mix_psk(chaining_key, hash, key, handshake->preshared_key);
+ mix_psk(chaining_key, hash, key, preshared_key);
/* {} */
if (!message_decrypt(NULL, src->encrypted_nothing,
@@ -783,6 +778,7 @@ out:
memzero_explicit(chaining_key, NOISE_HASH_LEN);
memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
+ memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN);
up_read(&wg->static_identity.lock);
return ret_peer;
}
diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h
index f532d59d3f19..c527253dba80 100644
--- a/drivers/net/wireguard/noise.h
+++ b/drivers/net/wireguard/noise.h
@@ -15,18 +15,14 @@
#include <linux/mutex.h>
#include <linux/kref.h>
-union noise_counter {
- struct {
- u64 counter;
- unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
- spinlock_t lock;
- } receive;
- atomic64_t counter;
+struct noise_replay_counter {
+ u64 counter;
+ spinlock_t lock;
+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
};
struct noise_symmetric_key {
u8 key[NOISE_SYMMETRIC_KEY_LEN];
- union noise_counter counter;
u64 birthdate;
bool is_valid;
};
@@ -34,7 +30,9 @@ struct noise_symmetric_key {
struct noise_keypair {
struct index_hashtable_entry entry;
struct noise_symmetric_key sending;
+ atomic64_t sending_counter;
struct noise_symmetric_key receiving;
+ struct noise_replay_counter receiving_counter;
__le32 remote_index;
bool i_am_the_initiator;
struct kref refcount;
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 5c964fcb994e..71b8e80b58e1 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
if (multicore) {
queue->worker = wg_packet_percpu_multicore_worker_alloc(
function, queue);
- if (!queue->worker)
+ if (!queue->worker) {
+ ptr_ring_cleanup(&queue->ring, NULL);
return -ENOMEM;
+ }
} else {
INIT_WORK(&queue->work, function);
}
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 3432232afe06..c58df439dbbe 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -87,12 +87,20 @@ static inline bool wg_check_packet_protocol(struct sk_buff *skb)
return real_protocol && skb->protocol == real_protocol;
}
-static inline void wg_reset_packet(struct sk_buff *skb)
+static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
{
+ u8 l4_hash = skb->l4_hash;
+ u8 sw_hash = skb->sw_hash;
+ u32 hash = skb->hash;
skb_scrub_packet(skb, true);
memset(&skb->headers_start, 0,
offsetof(struct sk_buff, headers_end) -
offsetof(struct sk_buff, headers_start));
+ if (encapsulating) {
+ skb->l4_hash = l4_hash;
+ skb->sw_hash = sw_hash;
+ skb->hash = hash;
+ }
skb->queue_mapping = 0;
skb->nohdr = 0;
skb->peeked = 0;
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index da3b782ab7d3..91438144e4f7 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -226,40 +226,39 @@ void wg_packet_handshake_receive_worker(struct work_struct *work)
static void keep_key_fresh(struct wg_peer *peer)
{
struct noise_keypair *keypair;
- bool send = false;
+ bool send;
if (peer->sent_lastminute_handshake)
return;
rcu_read_lock_bh();
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
- keypair->i_am_the_initiator &&
- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
- REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
- send = true;
+ send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+ keypair->i_am_the_initiator &&
+ wg_birthdate_has_expired(keypair->sending.birthdate,
+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
rcu_read_unlock_bh();
- if (send) {
+ if (unlikely(send)) {
peer->sent_lastminute_handshake = true;
wg_packet_send_queued_handshake_initiation(peer, false);
}
}
-static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
+static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
{
struct scatterlist sg[MAX_SKB_FRAGS + 8];
struct sk_buff *trailer;
unsigned int offset;
int num_frags;
- if (unlikely(!key))
+ if (unlikely(!keypair))
return false;
- if (unlikely(!READ_ONCE(key->is_valid) ||
- wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
- WRITE_ONCE(key->is_valid, false);
+ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
+ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
+ keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
+ WRITE_ONCE(keypair->receiving.is_valid, false);
return false;
}
@@ -284,7 +283,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
PACKET_CB(skb)->nonce,
- key->key))
+ keypair->receiving.key))
return false;
/* Another ugly situation of pushing and pulling the header so as to
@@ -299,41 +298,41 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
}
/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
-static bool counter_validate(union noise_counter *counter, u64 their_counter)
+static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
{
unsigned long index, index_current, top, i;
bool ret = false;
- spin_lock_bh(&counter->receive.lock);
+ spin_lock_bh(&counter->lock);
- if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
+ if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
their_counter >= REJECT_AFTER_MESSAGES))
goto out;
++their_counter;
if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
- counter->receive.counter))
+ counter->counter))
goto out;
index = their_counter >> ilog2(BITS_PER_LONG);
- if (likely(their_counter > counter->receive.counter)) {
- index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
+ if (likely(their_counter > counter->counter)) {
+ index_current = counter->counter >> ilog2(BITS_PER_LONG);
top = min_t(unsigned long, index - index_current,
COUNTER_BITS_TOTAL / BITS_PER_LONG);
for (i = 1; i <= top; ++i)
- counter->receive.backtrack[(i + index_current) &
+ counter->backtrack[(i + index_current) &
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
- counter->receive.counter = their_counter;
+ counter->counter = their_counter;
}
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
- &counter->receive.backtrack[index]);
+ &counter->backtrack[index]);
out:
- spin_unlock_bh(&counter->receive.lock);
+ spin_unlock_bh(&counter->lock);
return ret;
}
@@ -393,13 +392,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
len = ntohs(ip_hdr(skb)->tot_len);
if (unlikely(len < sizeof(struct iphdr)))
goto dishonest_packet_size;
- if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
- IP_ECN_set_ce(ip_hdr(skb));
+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
len = ntohs(ipv6_hdr(skb)->payload_len) +
sizeof(struct ipv6hdr);
- if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
- IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
} else {
goto dishonest_packet_type;
}
@@ -475,19 +472,19 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(state != PACKET_STATE_CRYPTED))
goto next;
- if (unlikely(!counter_validate(&keypair->receiving.counter,
+ if (unlikely(!counter_validate(&keypair->receiving_counter,
PACKET_CB(skb)->nonce))) {
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
peer->device->dev->name,
PACKET_CB(skb)->nonce,
- keypair->receiving.counter.receive.counter);
+ keypair->receiving_counter.counter);
goto next;
}
if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
goto next;
- wg_reset_packet(skb);
+ wg_reset_packet(skb, false);
wg_packet_consume_data_done(peer, skb, &endpoint);
free = false;
@@ -514,10 +511,12 @@ void wg_packet_decrypt_worker(struct work_struct *work)
struct sk_buff *skb;
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
- enum packet_state state = likely(decrypt_packet(skb,
- &PACKET_CB(skb)->keypair->receiving)) ?
+ enum packet_state state =
+ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
wg_queue_enqueue_per_peer_napi(skb, state);
+ if (need_resched())
+ cond_resched();
}
}
diff --git a/drivers/net/wireguard/selftest/counter.c b/drivers/net/wireguard/selftest/counter.c
index f4fbb9072ed7..ec3c156bf91b 100644
--- a/drivers/net/wireguard/selftest/counter.c
+++ b/drivers/net/wireguard/selftest/counter.c
@@ -6,18 +6,24 @@
#ifdef DEBUG
bool __init wg_packet_counter_selftest(void)
{
+ struct noise_replay_counter *counter;
unsigned int test_num = 0, i;
- union noise_counter counter;
bool success = true;
-#define T_INIT do { \
- memset(&counter, 0, sizeof(union noise_counter)); \
- spin_lock_init(&counter.receive.lock); \
+ counter = kmalloc(sizeof(*counter), GFP_KERNEL);
+ if (unlikely(!counter)) {
+ pr_err("nonce counter self-test malloc: FAIL\n");
+ return false;
+ }
+
+#define T_INIT do { \
+ memset(counter, 0, sizeof(*counter)); \
+ spin_lock_init(&counter->lock); \
} while (0)
#define T_LIM (COUNTER_WINDOW_SIZE + 1)
#define T(n, v) do { \
++test_num; \
- if (counter_validate(&counter, n) != (v)) { \
+ if (counter_validate(counter, n) != (v)) { \
pr_err("nonce counter self-test %u: FAIL\n", \
test_num); \
success = false; \
@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(void)
if (success)
pr_info("nonce counter self-tests: pass\n");
+ kfree(counter);
return success;
}
#endif
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index bcd6462e4540..007cd4457c5f 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void)
enum { TRIALS_BEFORE_GIVING_UP = 5000 };
bool success = false;
int test = 0, trials;
- struct sk_buff *skb4, *skb6;
+ struct sk_buff *skb4, *skb6 = NULL;
struct iphdr *hdr4;
- struct ipv6hdr *hdr6;
+ struct ipv6hdr *hdr6 = NULL;
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 7348c10cbae3..f74b9341ab0f 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg,
static void keep_key_fresh(struct wg_peer *peer)
{
struct noise_keypair *keypair;
- bool send = false;
+ bool send;
rcu_read_lock_bh();
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
- (unlikely(atomic64_read(&keypair->sending.counter.counter) >
- REKEY_AFTER_MESSAGES) ||
- (keypair->i_am_the_initiator &&
- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
- REKEY_AFTER_TIME)))))
- send = true;
+ send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+ (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES ||
+ (keypair->i_am_the_initiator &&
+ wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
rcu_read_unlock_bh();
- if (send)
+ if (unlikely(send))
wg_packet_send_queued_handshake_initiation(peer, false);
}
@@ -170,6 +167,11 @@ static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
struct sk_buff *trailer;
int num_frags;
+ /* Force hash calculation before encryption so that flow analysis is
+ * consistent over the inner packet.
+ */
+ skb_get_hash(skb);
+
/* Calculate lengths. */
padding_len = calculate_skb_padding(skb);
trailer_len = padding_len + noise_encrypted_len(0);
@@ -281,6 +283,8 @@ void wg_packet_tx_worker(struct work_struct *work)
wg_noise_keypair_put(keypair, false);
wg_peer_put(peer);
+ if (need_resched())
+ cond_resched();
}
}
@@ -296,7 +300,7 @@ void wg_packet_encrypt_worker(struct work_struct *work)
skb_list_walk_safe(first, skb, next) {
if (likely(encrypt_packet(skb,
PACKET_CB(first)->keypair))) {
- wg_reset_packet(skb);
+ wg_reset_packet(skb, true);
} else {
state = PACKET_STATE_DEAD;
break;
@@ -304,7 +308,8 @@ void wg_packet_encrypt_worker(struct work_struct *work)
}
wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
state);
-
+ if (need_resched())
+ cond_resched();
}
}
@@ -344,7 +349,6 @@ void wg_packet_purge_staged_packets(struct wg_peer *peer)
void wg_packet_send_staged_packets(struct wg_peer *peer)
{
- struct noise_symmetric_key *key;
struct noise_keypair *keypair;
struct sk_buff_head packets;
struct sk_buff *skb;
@@ -364,10 +368,9 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
rcu_read_unlock_bh();
if (unlikely(!keypair))
goto out_nokey;
- key = &keypair->sending;
- if (unlikely(!READ_ONCE(key->is_valid)))
+ if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
goto out_nokey;
- if (unlikely(wg_birthdate_has_expired(key->birthdate,
+ if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
REJECT_AFTER_TIME)))
goto out_invalid;
@@ -382,7 +385,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
*/
PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
PACKET_CB(skb)->nonce =
- atomic64_inc_return(&key->counter.counter) - 1;
+ atomic64_inc_return(&keypair->sending_counter) - 1;
if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
goto out_invalid;
}
@@ -394,7 +397,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
return;
out_invalid:
- WRITE_ONCE(key->is_valid, false);
+ WRITE_ONCE(keypair->sending.is_valid, false);
out_nokey:
wg_noise_keypair_put(keypair, false);
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index b0d6541582d3..f9018027fc13 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
- } else if (unlikely(rt->dst.dev == skb->dev)) {
- ip_rt_put(rt);
- ret = -ELOOP;
- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
- wg->dev->name, &endpoint->addr);
- goto err;
}
if (cache)
dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
- } else if (unlikely(dst->dev == skb->dev)) {
- dst_release(dst);
- ret = -ELOOP;
- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
- wg->dev->name, &endpoint->addr);
- goto err;
}
if (cache)
dst_cache_set_ip6(cache, dst, &fl.saddr);
diff --git a/drivers/net/wireless/admtek/adm8211.h b/drivers/net/wireless/admtek/adm8211.h
index 2c55c629de28..095625ecb8ff 100644
--- a/drivers/net/wireless/admtek/adm8211.h
+++ b/drivers/net/wireless/admtek/adm8211.h
@@ -531,7 +531,7 @@ struct adm8211_eeprom {
u8 lpf_cutoff[14]; /* 0x62 */
u8 lnags_threshold[14]; /* 0x70 */
__le16 checksum; /* 0x7E */
- u8 cis_data[0]; /* 0x80, 384 bytes */
+ u8 cis_data[]; /* 0x80, 384 bytes */
} __packed;
struct adm8211_priv {
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index ed87bc00f2aa..342a7e58018a 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -459,7 +459,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ar_ahb->mem_len = resource_size(res);
ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
- ATH10K_GCC_REG_SIZE);
+ ATH10K_GCC_REG_SIZE);
if (!ar_ahb->gcc_mem) {
ath10k_err(ar, "gcc mem ioremap error\n");
ret = -ENOMEM;
@@ -467,7 +467,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
}
ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
- ATH10K_TCSR_REG_SIZE);
+ ATH10K_TCSR_REG_SIZE);
if (!ar_ahb->tcsr_mem) {
ath10k_err(ar, "tcsr mem ioremap error\n");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 5ec16ce19b69..f26cc6989dad 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -541,6 +541,33 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
},
{
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_SDIO,
+ .name = "qca9377 hw1.1 sdio",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 19,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_QCA9377_HL_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .uart_pin_workaround = true,
+ },
+ {
.id = QCA4019_HW_1_0_DEV_VERSION,
.dev_id = 0,
.bus = ATH10K_BUS_AHB,
@@ -874,6 +901,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
return -ENODATA;
}
+ if (ar->id.bmi_ids_valid) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot already acquired valid otp board id,skip download, board_id %d chip_id %d\n",
+ ar->id.bmi_board_id, ar->id.bmi_chip_id);
+ goto skip_otp_download;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot upload otp to 0x%x len %zd for board id\n",
address, ar->normal_mode_fw.fw_file.otp_len);
@@ -921,6 +955,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
ar->id.bmi_board_id = board_id;
ar->id.bmi_chip_id = chip_id;
+skip_otp_download:
+
return 0;
}
@@ -1052,11 +1088,11 @@ static int ath10k_download_fw(struct ath10k *ar)
}
memset(&latency_qos, 0, sizeof(latency_qos));
- pm_qos_add_request(&latency_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&latency_qos, 0);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
- pm_qos_remove_request(&latency_qos);
+ cpu_latency_qos_remove_request(&latency_qos);
return ret;
}
@@ -2119,6 +2155,40 @@ done:
return 0;
}
+static void ath10k_core_fetch_btcoex_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ u8 coex_support = 0;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ goto out;
+
+ ret = of_property_read_u8(node, "qcom,coexist-support", &coex_support);
+ if (ret) {
+ ar->coex_support = true;
+ goto out;
+ }
+
+ if (coex_support) {
+ ar->coex_support = true;
+ } else {
+ ar->coex_support = false;
+ ar->coex_gpio_pin = -1;
+ goto out;
+ }
+
+ ret = of_property_read_u32(node, "qcom,coexist-gpio-pin",
+ &ar->coex_gpio_pin);
+ if (ret)
+ ar->coex_gpio_pin = -1;
+
+out:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot coex_support %d coex_gpio_pin %d\n",
+ ar->coex_support, ar->coex_gpio_pin);
+}
+
static int ath10k_init_uart(struct ath10k *ar)
{
int ret;
@@ -2696,14 +2766,22 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+ ath10k_core_fetch_btcoex_dt(ar);
+
/* 10.4 firmware supports BT-Coex without reloading firmware
* via pdev param. To support Bluetooth coexistence pdev param,
* WMI_COEX_GPIO_SUPPORT of extended resource config should be
* enabled always.
+ *
+ * We can still enable BTCOEX if firmware has the support
+ * eventhough btceox_support value is
+ * ATH10K_DT_BTCOEX_NOT_FOUND
*/
+
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
- ar->running_fw->fw_file.fw_features))
+ ar->running_fw->fw_file.fw_features) &&
+ ar->coex_support)
val |= WMI_10_4_COEX_GPIO_SUPPORT;
if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
@@ -2863,6 +2941,8 @@ void ath10k_core_stop(struct ath10k *ar)
ath10k_htt_tx_stop(&ar->htt);
ath10k_htt_rx_free(&ar->htt);
ath10k_wmi_detach(ar);
+
+ ar->id.bmi_ids_valid = false;
}
EXPORT_SYMBOL(ath10k_core_stop);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 5101bf2b5b15..bd8ef576c590 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -119,6 +119,7 @@ struct ath10k_skb_cb {
u16 airtime_est;
struct ieee80211_vif *vif;
struct ieee80211_txq *txq;
+ u32 ucast_cipher;
} __packed;
struct ath10k_skb_rxcb {
@@ -504,6 +505,7 @@ struct ath10k_sta {
struct work_struct update_wk;
u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats;
+ u32 ucast_cipher;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
@@ -1222,6 +1224,9 @@ struct ath10k {
struct ath10k_bus_params bus_param;
struct completion peer_delete_done;
+ bool coex_support;
+ int coex_gpio_pin;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index e000677ac516..f811e6940fb0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1978,6 +1978,9 @@ static ssize_t ath10k_write_btcoex(struct file *file,
if (strtobool(buf, &val) != 0)
return -EINVAL;
+ if (!ar->coex_support)
+ return -EOPNOTSUPP;
+
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON &&
@@ -2370,9 +2373,6 @@ static ssize_t ath10k_write_warm_hw_reset(struct file *file,
goto exit;
}
- if (!(test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)))
- ath10k_warn(ar, "wmi service for reset chip is not available\n");
-
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset,
WMI_RST_MODE_WARM_RESET);
@@ -2647,8 +2647,10 @@ int ath10k_debug_register(struct ath10k *ar)
ar->debug.debugfs_phy, ar,
&fops_tpc_stats_final);
- debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
- &fops_warm_hw_reset);
+ if (test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map))
+ debugfs_create_file("warm_hw_reset", 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_warm_hw_reset);
debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ps_state_enable);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 38a5814cf345..f883f2a724dd 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2744,7 +2744,8 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
continue;
}
- tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0);
+ tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
+ IEEE80211_QOS_CTL_TID_MASK;
tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a182c0944cc7..e9d12ea708b6 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1163,6 +1163,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int len = 0;
int msdu_id = -1;
int res;
+ const u8 *peer_addr;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
len += sizeof(cmd->hdr);
@@ -1178,7 +1179,16 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ peer_addr = hdr->addr1;
+ if (is_multicast_ether_addr(peer_addr)) {
+ skb_put(msdu, sizeof(struct ieee80211_mmie_16));
+ } else {
+ if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
+ skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)
+ skb_put(msdu, IEEE80211_GCMP_MIC_LEN);
+ else
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
txdesc = ath10k_htc_alloc_skb(ar, len);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 2451e0fb8ee5..57c58af64a57 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1131,6 +1131,7 @@ static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp)
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 775fd62fb92d..970c736ac6bb 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -774,6 +774,9 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_HL_TLV_AST_SKID_LIMIT 16
#define TARGET_HL_TLV_NUM_WDS_ENTRIES 2
+/* Target specific defines for QCA9377 high latency firmware */
+#define TARGET_QCA9377_HL_NUM_PEERS 15
+
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 7fee35ff966b..2d03b8dd3b8c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -258,6 +258,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM];
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
@@ -3576,6 +3577,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_txq *txq,
+ struct ieee80211_sta *sta,
struct sk_buff *skb, u16 airtime)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -3583,6 +3585,7 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool is_data = ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_data_qos(hdr->frame_control);
+ struct ath10k_sta *arsta;
cb->flags = 0;
if (!ath10k_tx_h_use_hwcrypto(vif, skb))
@@ -3607,6 +3610,12 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
cb->vif = vif;
cb->txq = txq;
cb->airtime_est = airtime;
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ spin_lock_bh(&ar->data_lock);
+ cb->ucast_cipher = arsta->ucast_cipher;
+ spin_unlock_bh(&ar->data_lock);
+ }
}
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
@@ -4078,7 +4087,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
}
airtime = ath10k_mac_update_airtime(ar, txq, skb);
- ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
skb_len = skb->len;
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
@@ -4348,7 +4357,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
u16 airtime;
airtime = ath10k_mac_update_airtime(ar, txq, skb);
- ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
@@ -4982,7 +4991,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
param = ar->wmi.pdev_param->enable_btcoex;
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
- ar->running_fw->fw_file.fw_features)) {
+ ar->running_fw->fw_file.fw_features) &&
+ ar->coex_support) {
ret = ath10k_wmi_pdev_set_param(ar, param, 0);
if (ret) {
ath10k_warn(ar,
@@ -5103,7 +5113,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
- if (arvif->txpower <= 0)
+ /* txpower not initialized yet? */
+ if (arvif->txpower == INT_MIN)
continue;
if (txpower == -1)
@@ -6195,6 +6206,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta;
struct ath10k_peer *peer;
const u8 *peer_addr;
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
@@ -6219,12 +6231,17 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&ar->conf_mutex);
- if (sta)
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
peer_addr = sta->addr;
- else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ spin_lock_bh(&ar->data_lock);
+ arsta->ucast_cipher = key->cipher;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
peer_addr = vif->bss_conf.bssid;
- else
+ } else {
peer_addr = vif->addr;
+ }
key->hw_key_idx = key->keyidx;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index e5316b911e1d..1f709b65c29b 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -694,7 +694,7 @@ static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
- if (pkt->act_len > pkt->alloc_len ) {
+ if (pkt->act_len > pkt->alloc_len) {
ret = -EINVAL;
goto err;
}
@@ -953,8 +953,11 @@ static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
*/
ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
irq_proc_reg, sizeof(*irq_proc_reg));
- if (ret)
+ if (ret) {
+ queue_work(ar->workqueue, &ar->restart_work);
+ ath10k_warn(ar, "read int status fail, start recovery\n");
goto out;
+ }
/* Update only those registers that are enabled */
*host_int_status = irq_proc_reg->host_int_status &
@@ -1647,23 +1650,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
size_t buf_len)
{
int ret;
+ void *mem;
+
+ mem = kzalloc(buf_len, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
/* set window register to start read cycle */
ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
if (ret) {
ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
- return ret;
+ goto out;
}
/* read the data */
- ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
+ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
if (ret) {
ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
ret);
- return ret;
+ goto out;
}
- return 0;
+ memcpy(buf, mem, buf_len);
+
+out:
+ kfree(mem);
+
+ return ret;
}
static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 61885d4d662c..2ea77bb880b1 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1926,6 +1926,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
u32 vdev_id;
u32 buf_len = msdu->len;
u16 fc;
+ const u8 *peer_addr;
hdr = (struct ieee80211_hdr *)msdu->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -1946,8 +1947,20 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- len += IEEE80211_CCMP_MIC_LEN;
- buf_len += IEEE80211_CCMP_MIC_LEN;
+ peer_addr = hdr->addr1;
+ if (is_multicast_ether_addr(peer_addr)) {
+ len += sizeof(struct ieee80211_mmie_16);
+ buf_len += sizeof(struct ieee80211_mmie_16);
+ } else {
+ if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
+ cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
+ len += IEEE80211_GCMP_MIC_LEN;
+ buf_len += IEEE80211_GCMP_MIC_LEN;
+ } else {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+ }
}
len = round_up(len, 4);
@@ -8787,7 +8800,7 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
cmd->host_platform_config = __cpu_to_le32(type);
cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
- cmd->wlan_gpio_priority = __cpu_to_le32(-1);
+ cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 972d53d77654..6df415778374 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -371,6 +371,11 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
WMI_10_4_SERVICE_REPORT_AIRTIME,
WMI_10_4_SERVICE_TX_PWR_PER_PEER,
+ WMI_10_4_SERVICE_FETCH_PEER_TX_PN,
+ WMI_10_4_SERVICE_MULTIPLE_VDEV_RESTART,
+ WMI_10_4_SERVICE_ENHANCED_RADIO_COUNTERS,
+ WMI_10_4_SERVICE_QINQ_SUPPORT,
+ WMI_10_4_SERVICE_RESET_CHIP,
};
static inline char *wmi_service_name(enum wmi_service service_id)
@@ -827,6 +832,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_REPORT_AIRTIME, len);
SVCMAP(WMI_10_4_SERVICE_TX_PWR_PER_PEER,
WMI_SERVICE_TX_PWR_PER_PEER, len);
+ SVCMAP(WMI_10_4_SERVICE_RESET_CHIP,
+ WMI_SERVICE_RESET_CHIP, len);
}
#undef SVCMAP
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index c88e16d4022b..738f99090d83 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -3,6 +3,7 @@ config ATH11K
tristate "Qualcomm Technologies 802.11ax chipset support"
depends on MAC80211 && HAS_DMA
depends on REMOTEPROC
+ depends on CRYPTO_MICHAEL_MIC
depends on ARCH_QCOM || COMPILE_TEST
select ATH_COMMON
select QCOM_QMI_HELPERS
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 2761d07d938e..fe7736e53583 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -20,6 +20,7 @@ ath11k-y += core.o \
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
+ath11k-$(CONFIG_THERMAL) += thermal.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index e7e3e64c07aa..59342d2797ca 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -458,7 +458,6 @@ static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
{
- struct sk_buff *skb;
int i;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
@@ -468,9 +467,6 @@ static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
-
- while ((skb = __skb_dequeue(&irq_grp->pending_q)))
- dev_kfree_skb_any(skb);
}
}
@@ -681,6 +677,9 @@ static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
tasklet_schedule(&ce_pipe->intr_tq);
@@ -712,6 +711,9 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
ath11k_ahb_ext_grp_disable(irq_grp);
napi_schedule(&irq_grp->napi);
@@ -734,7 +736,6 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
- __skb_queue_head_init(&irq_grp->pending_q);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
if (ath11k_tx_ring_mask[i] & BIT(j)) {
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index e355dfda48bf..688f357e6eaf 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -161,6 +161,7 @@ struct ath11k_ce_pipe {
struct ath11k_ce_ring *src_ring;
struct ath11k_ce_ring *dest_ring;
struct ath11k_ce_ring *status_ring;
+ u64 timestamp;
};
struct ath11k_ce {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 9e823056e673..bf5657d2ae18 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -392,11 +392,19 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
goto err_mac_unregister;
}
+ ret = ath11k_thermal_register(ab);
+ if (ret) {
+ ath11k_err(ab, "could not register thermal device: %d\n",
+ ret);
+ goto err_dp_pdev_free;
+ }
+
return 0;
+err_dp_pdev_free:
+ ath11k_dp_pdev_free(ab);
err_mac_unregister:
ath11k_mac_unregister(ab);
-
err_pdev_debug:
ath11k_debug_pdev_destroy(ab);
@@ -405,6 +413,7 @@ err_pdev_debug:
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
+ ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
ath11k_ahb_ext_irq_disable(ab);
ath11k_dp_pdev_free(ab);
@@ -569,6 +578,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
int ret;
mutex_lock(&ab->core_lock);
+ ath11k_thermal_unregister(ab);
ath11k_ahb_ext_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_ahb_stop(ab);
@@ -607,6 +617,7 @@ void ath11k_core_halt(struct ath11k *ar)
lockdep_assert_held(&ar->conf_mutex);
ar->num_created_vdevs = 0;
+ ar->allocated_vdev_map = 0;
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
@@ -644,6 +655,7 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
complete(&ar->bss_survey_done);
+ complete(&ar->thermal.wmi_sync);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 25cdcf71d0c4..6e7b8ecd09a6 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -20,6 +20,7 @@
#include "hw.h"
#include "hal_rx.h"
#include "reg.h"
+#include "thermal.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -76,6 +77,8 @@ struct ath11k_skb_rxcb {
u8 err_code;
u8 mac_id;
u8 unmapped;
+ u8 is_frag;
+ u8 tid;
};
enum ath11k_hw_rev {
@@ -108,12 +111,9 @@ struct ath11k_ext_irq_grp {
u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
u32 num_irq;
u32 grp_id;
+ u64 timestamp;
struct napi_struct napi;
struct net_device napi_ndev;
- /* Queue of pending packets, not expected to be accessed concurrently
- * to avoid locking overhead.
- */
- struct sk_buff_head pending_q;
};
#define HEHANDLE_CAP_PHYINFO_SIZE 3
@@ -243,6 +243,8 @@ struct ath11k_rx_peer_stats {
u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
u64 rx_duration;
+ u64 dcm_count;
+ u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
};
#define ATH11K_HE_MCS_NUM 12
@@ -329,9 +331,9 @@ struct ath11k_sta {
u32 bw;
u32 nss;
u32 smps;
+ enum hal_pn_type pn_type;
struct work_struct update_wk;
- struct ieee80211_tx_info tx_info;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
@@ -486,6 +488,7 @@ struct ath11k {
int max_num_peers;
u32 num_started_vdevs;
u32 num_created_vdevs;
+ unsigned long long allocated_vdev_map;
struct idr txmgmt_idr;
/* protects txmgmt_idr data */
@@ -524,6 +527,7 @@ struct ath11k {
struct ath11k_debug debug;
#endif
bool dfs_block_radar_events;
+ struct ath11k_thermal thermal;
};
struct ath11k_band_cap {
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index 8e8d5588b541..97e7306c506d 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -53,6 +53,8 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
/* keep this last */
ATH11K_DBG_HTT_NUM_EXT_STATS,
@@ -68,12 +70,19 @@ struct debug_htt_stats_req {
u8 buf[0];
};
-#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+struct ath_pktlog_hdr {
+ u16 flags;
+ u16 missed_cnt;
+ u16 log_type;
+ u16 size;
+ u32 timestamp;
+ u32 type_specific_data;
+ u8 payload[0];
+};
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
-#define ATH11K_HTT_PKTLOG_MAX_SIZE 2048
-
enum ath11k_pktlog_filter {
ATH11K_PKTLOG_RX = 0x000000001,
ATH11K_PKTLOG_TX = 0x000000002,
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
index 9939e909628f..5db0c27de475 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
@@ -22,7 +22,7 @@
do { \
int index = 0; u8 i; \
for (i = 0; i < len; i++) { \
- index += snprintf(out + index, HTT_MAX_STRING_LEN - index, \
+ index += scnprintf(out + index, HTT_MAX_STRING_LEN - index, \
" %u:%u,", i, arr[i]); \
if (index < 0 || index >= HTT_MAX_STRING_LEN) \
break; \
@@ -46,7 +46,7 @@ static inline void htt_print_stats_string_tlv(const void *tag_buf,
len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
for (i = 0; i < tag_len; i++) {
- index += snprintf(&data[index],
+ index += scnprintf(&data[index],
HTT_MAX_STRING_LEN - index,
"%.*s", 4, (char *)&(htt_stats_buf->data[i]));
if (index >= HTT_MAX_STRING_LEN)
@@ -3097,7 +3097,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
- index += snprintf(&rx_pilot_evm_db[j][index],
+ index += scnprintf(&rx_pilot_evm_db[j][index],
HTT_MAX_STRING_LEN - index,
" %u:%d,",
i,
@@ -3109,7 +3109,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
- index += snprintf(&str_buf[index],
+ index += scnprintf(&str_buf[index],
HTT_MAX_STRING_LEN - index,
" %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
@@ -3217,7 +3217,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
- index += snprintf(&str_buf[index],
+ index += scnprintf(&str_buf[index],
HTT_MAX_STRING_LEN - index,
" %u:%d,",
i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
@@ -3232,7 +3232,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
- index += snprintf(&str_buf[index],
+ index += scnprintf(&str_buf[index],
HTT_MAX_STRING_LEN - index,
" %u:%d,",
i,
@@ -3854,6 +3854,47 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
stats_req->buf_len = len;
}
+static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
+ u8 *data)
+{
+ struct debug_htt_stats_req *stats_req =
+ (struct debug_htt_stats_req *)data;
+ struct htt_ring_backpressure_stats_tlv *htt_stats_buf =
+ (struct htt_ring_backpressure_stats_tlv *)tag_buf;
+ int i;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ htt_stats_buf->pdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_head_idx = %u",
+ htt_stats_buf->current_head_idx);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_tail_idx = %u",
+ htt_stats_buf->current_tail_idx);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_htt_msgs_sent = %u",
+ htt_stats_buf->num_htt_msgs_sent);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "backpressure_time_ms = %u",
+ htt_stats_buf->backpressure_time_ms);
+
+ for (i = 0; i < 5; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "backpressure_hist_%u = %u",
+ i + 1, htt_stats_buf->backpressure_hist[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================");
+
+ if (len >= buf_len) {
+ buf[buf_len - 1] = 0;
+ stats_req->buf_len = buf_len - 1;
+ } else {
+ buf[len] = 0;
+ stats_req->buf_len = len;
+ }
+}
+
static inline void htt_htt_stats_debug_dump(const u32 *tag_buf,
struct debug_htt_stats_req *stats_req)
{
@@ -4246,6 +4287,9 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
case HTT_STATS_PDEV_OBSS_PD_TAG:
htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
break;
+ case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
+ htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
index 4bdb62dd7b8d..23a6baa9e95a 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
@@ -100,6 +100,8 @@ enum htt_tlv_tag_t {
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90,
HTT_STATS_MAX_TAG,
};
@@ -1659,4 +1661,30 @@ struct htt_pdev_obss_pd_stats_tlv {
};
void ath11k_debug_htt_stats_init(struct ath11k *ar);
+
+struct htt_ring_backpressure_stats_tlv {
+ u32 pdev_id;
+ u32 current_head_idx;
+ u32 current_tail_idx;
+ u32 num_htt_msgs_sent;
+ /* Time in milliseconds for which the ring has been in
+ * its current backpressure condition
+ */
+ u32 backpressure_time_ms;
+ /* backpressure_hist - histogram showing how many times
+ * different degrees of backpressure duration occurred:
+ * Index 0 indicates the number of times ring was
+ * continuously in backpressure state for 100 - 200ms.
+ * Index 1 indicates the number of times ring was
+ * continuously in backpressure state for 200 - 300ms.
+ * Index 2 indicates the number of times ring was
+ * continuously in backpressure state for 300 - 400ms.
+ * Index 3 indicates the number of times ring was
+ * continuously in backpressure state for 400 - 500ms.
+ * Index 4 indicates the number of times ring was
+ * continuously in backpressure state beyond 500ms.
+ */
+ u32 backpressure_hist[5];
+};
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 743760c9bcae..389dac219238 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -24,7 +24,7 @@ ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
tx_stats = arsta->tx_stats;
gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
mcs = txrate->mcs;
- bw = txrate->bw;
+ bw = ath11k_mac_mac80211_bw_to_ath11k_bw(txrate->bw);
nss = txrate->nss - 1;
#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
@@ -136,7 +136,7 @@ void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate;
- u8 rate_idx;
+ u8 rate_idx = 0;
int ret;
u8 mcs;
@@ -219,6 +219,9 @@ static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
const int size = 2 * 4096;
char *buf;
+ if (!arsta->tx_stats)
+ return -ENOENT;
+
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -379,6 +382,13 @@ static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
rx_stats->rx_duration);
+ len += scnprintf(buf + len, size - len,
+ "\nDCM: %llu\nRU: 26 %llu 52: %llu 106: %llu 242: %llu 484: %llu 996: %llu\n",
+ rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+ rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+ rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+ rx_stats->ru_alloc_cnt[5]);
+
len += scnprintf(buf + len, size - len, "\n");
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index b112825a52ed..50350f77b309 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
+#include <crypto/hash.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
@@ -33,14 +34,16 @@ void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
}
ath11k_peer_rx_tid_cleanup(ar, peer);
+ crypto_free_shash(peer->tfm_mmic);
spin_unlock_bh(&ab->base_lock);
}
int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
{
struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
u32 reo_dest;
- int ret;
+ int ret = 0, tid;
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
@@ -54,24 +57,42 @@ int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
return ret;
}
- ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id,
- HAL_DESC_REO_NON_QOS_TID, 1, 0);
- if (ret) {
- ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d\n",
- ret);
- return ret;
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
+ HAL_PN_TYPE_NONE);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
+ tid, ret);
+ goto peer_clean;
+ }
}
- ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0);
+ ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
if (ret) {
- ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d\n",
- ret);
+ ath11k_warn(ab, "failed to setup rx defrag context\n");
return ret;
}
/* TODO: Setup other peer specific resource used in data path */
return 0;
+
+peer_clean:
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to del rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (; tid >= 0; tid--)
+ ath11k_peer_rx_tid_delete(ar, peer, tid);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
}
void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
@@ -197,6 +218,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
+ u32 ring_hash_map;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@@ -284,7 +306,21 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
goto err;
}
- ath11k_hal_reo_hw_setup(ab);
+ /* When hash based routing of rx packet is enabled, 32 entries to map
+ * the hash values to the ring will be configured. Each hash entry uses
+ * three bits to map to a particular ring. The ring mapping will be
+ * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:Not used.
+ */
+ ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 3 |
+ HAL_HASH_ROUTING_RING_SW3 << 6 |
+ HAL_HASH_ROUTING_RING_SW4 << 9 |
+ HAL_HASH_ROUTING_RING_SW1 << 12 |
+ HAL_HASH_ROUTING_RING_SW2 << 15 |
+ HAL_HASH_ROUTING_RING_SW3 << 18 |
+ HAL_HASH_ROUTING_RING_SW4 << 21;
+
+ ath11k_hal_reo_hw_setup(ab, ring_hash_map);
return 0;
@@ -614,17 +650,13 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
}
if (ath11k_rx_ring_mask[grp_id]) {
- for (i = 0; i < ab->num_radios; i++) {
- if (ath11k_rx_ring_mask[grp_id] & BIT(i)) {
- work_done = ath11k_dp_process_rx(ab, i, napi,
- &irq_grp->pending_q,
- budget);
- budget -= work_done;
- tot_work_done += work_done;
- }
- if (budget <= 0)
- goto done;
- }
+ i = fls(ath11k_rx_ring_mask[grp_id]) - 1;
+ work_done = ath11k_dp_process_rx(ab, i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
}
if (rx_mon_status_ring_mask[grp_id]) {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 6ef5be4201b2..551f9c9fb847 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -22,6 +22,18 @@ struct dp_rx_tid {
u32 size;
u32 ba_win_sz;
bool active;
+
+ /* Info related to rx fragments */
+ u32 cur_sn;
+ u16 last_frag_no;
+ u16 rx_frag_bitmap;
+
+ struct sk_buff_head rx_frags;
+ struct hal_reo_dest_ring *dst_ring_desc;
+
+ /* Timer info related to fragments */
+ struct timer_list frag_timer;
+ struct ath11k_base *ab;
};
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
@@ -128,7 +140,6 @@ struct ath11k_pdev_dp {
u32 mac_id;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
- struct dp_srng reo_dst_ring;
struct dp_rxdma_ring rx_refill_buf_ring;
struct dp_srng rxdma_err_dst_ring;
struct dp_srng rxdma_mon_dst_ring;
@@ -148,7 +159,7 @@ struct ath11k_pdev_dp {
#define DP_AVG_MPDUS_PER_TID_MAX 128
#define DP_AVG_MSDUS_PER_MPDU 4
-#define DP_RX_HASH_ENABLE 0 /* Disable hash based Rx steering */
+#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
#define DP_BA_WIN_SZ_MAX 256
@@ -168,7 +179,7 @@ struct ath11k_pdev_dp {
#define DP_RX_RELEASE_RING_SIZE 1024
#define DP_REO_EXCEPTION_RING_SIZE 128
#define DP_REO_CMD_RING_SIZE 128
-#define DP_REO_STATUS_RING_SIZE 256
+#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
#define DP_RXDMA_REFILL_RING_SIZE 2048
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
@@ -206,6 +217,7 @@ struct ath11k_dp {
struct dp_srng reo_except_ring;
struct dp_srng reo_cmd_ring;
struct dp_srng reo_status_ring;
+ struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
@@ -924,6 +936,7 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x1f,
HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+ HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
};
#define HTT_TARGET_VERSION_MAJOR 3
@@ -972,6 +985,13 @@ struct htt_resp_msg {
};
} __packed;
+#define HTT_BACKPRESSURE_EVENT_PDEV_ID_M GENMASK(15, 8)
+#define HTT_BACKPRESSURE_EVENT_RING_TYPE_M GENMASK(23, 16)
+#define HTT_BACKPRESSURE_EVENT_RING_ID_M GENMASK(31, 24)
+
+#define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
+#define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
+
/* ppdu stats
*
* @details
@@ -1066,6 +1086,13 @@ struct htt_ppdu_stats_common {
u16 bw_mhz;
} __packed;
+enum htt_ppdu_stats_gi {
+ HTT_PPDU_STATS_SGI_0_8_US,
+ HTT_PPDU_STATS_SGI_0_4_US,
+ HTT_PPDU_STATS_SGI_1_6_US,
+ HTT_PPDU_STATS_SGI_3_2_US,
+};
+
#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
@@ -1094,6 +1121,8 @@ struct htt_ppdu_stats_common {
FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
#define HTT_USR_RATE_GI(_val) \
FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+#define HTT_USR_RATE_DCM(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M, _val)
#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index b08da839b7d9..f74a0e74bf3e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -4,6 +4,9 @@
*/
#include <linux/ieee80211.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <crypto/hash.h>
#include "core.h"
#include "debug.h"
#include "hal_desc.h"
@@ -13,6 +16,8 @@
#include "dp_tx.h"
#include "peer.h"
+#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+
static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
{
return desc->hdr_status;
@@ -28,22 +33,56 @@ static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_des
__le32_to_cpu(desc->mpdu_start.info2));
}
-static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE,
- __le32_to_cpu(desc->mpdu_start.info5));
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->msdu_start.info2));
}
-static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
- __le32_to_cpu(desc->attention.info2));
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->msdu_start.info2));
}
-static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU,
- __le32_to_cpu(desc->attention.info1));
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->mpdu_start.info1));
+}
+
+static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->mpdu_start.info1));
+}
+
+static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ return ieee80211_has_morefrags(hdr->frame_control);
+}
+
+static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+}
+
+static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->mpdu_start.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
+ __le32_to_cpu(desc->attention.info2));
}
static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
@@ -137,6 +176,17 @@ static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
return hweight8(mimo_ss_bitmap);
}
+static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID,
+ __le32_to_cpu(desc->mpdu_start.info2));
+}
+
+static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->mpdu_start.sw_peer_id);
+}
+
static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
@@ -402,32 +452,25 @@ static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
{
- struct ath11k_pdev_dp *dp;
- struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
int i;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring);
- }
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
}
int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
{
- struct ath11k *ar;
- struct ath11k_pdev_dp *dp;
+ struct ath11k_dp *dp = &ab->dp;
int ret;
int i;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST,
- dp->mac_id, dp->mac_id,
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
+ HAL_REO_DST, i, 0,
DP_REO_DST_RING_SIZE);
if (ret) {
- ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n");
+ ath11k_warn(ab, "failed to setup reo_dst_ring\n");
goto err_reo_cleanup;
}
}
@@ -633,8 +676,8 @@ free_desc:
kfree(rx_tid->vaddr);
}
-static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
- struct ath11k_peer *peer, u8 tid)
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid)
{
struct ath11k_hal_reo_cmd cmd = {0};
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
@@ -661,12 +704,75 @@ static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
rx_tid->active = false;
}
+static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+ u32 *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ u32 *desc;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
+ action);
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
+{
+ struct ath11k_base *ab = rx_tid->ab;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rx_tid->dst_ring_desc) {
+ if (rel_link_desc)
+ ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ kfree(rx_tid->dst_ring_desc);
+ rx_tid->dst_ring_desc = NULL;
+ }
+
+ rx_tid->cur_sn = 0;
+ rx_tid->last_frag_no = 0;
+ rx_tid->rx_frag_bitmap = 0;
+ __skb_queue_purge(&rx_tid->rx_frags);
+}
+
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
+ struct dp_rx_tid *rx_tid;
int i;
- for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
ath11k_peer_rx_tid_delete(ar, peer, i);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+ }
}
static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
@@ -732,7 +838,8 @@ unlock_exit:
}
int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
- u8 tid, u32 ba_win_sz, u16 ssn)
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
@@ -793,7 +900,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
- ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn);
+ ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
DMA_BIDIRECTIONAL);
@@ -837,7 +945,7 @@ int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
params->tid, params->buf_size,
- params->ssn);
+ params->ssn, arsta->pn_type);
if (ret)
ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
@@ -890,8 +998,80 @@ int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
return ret;
}
-static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
- u16 peer_id)
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u8 tid;
+ int ret = 0;
+
+ /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
+ * We use mac80211 PN/TSC replay check functionality for bcast/mcast
+ * for now.
+ */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return 0;
+
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
+ HAL_REO_CMD_UPD0_PN_SIZE |
+ HAL_REO_CMD_UPD0_PN_VALID |
+ HAL_REO_CMD_UPD0_PN_CHECK |
+ HAL_REO_CMD_UPD0_SVLD;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (key_cmd == SET_KEY) {
+ cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+ cmd.pn_size = 48;
+ }
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ continue;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
+ tid, ret);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ return ret;
+}
+
+static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
{
int i;
@@ -1028,23 +1208,23 @@ int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
return 0;
}
-static u32 ath11k_bw_to_mac80211_bwflags(u8 bw)
+static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
{
- u32 bwflags = 0;
+ u32 ret = 0;
- switch (bw) {
- case ATH11K_BW_40:
- bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH;
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
break;
- case ATH11K_BW_80:
- bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH;
+ case RX_MSDU_START_SGI_1_6_US:
+ ret = NL80211_RATE_INFO_HE_GI_1_6;
break;
- case ATH11K_BW_160:
- bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH;
+ case RX_MSDU_START_SGI_3_2_US:
+ ret = NL80211_RATE_INFO_HE_GI_3_2;
break;
}
- return bwflags;
+ return ret;
}
static void
@@ -1056,12 +1236,11 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
struct ieee80211_sta *sta;
struct ath11k_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
- struct ieee80211_chanctx_conf *conf = NULL;
struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
struct htt_ppdu_stats_common *common = &ppdu_stats->common;
int ret;
- u8 flags, mcs, nss, bw, sgi, rate_idx = 0;
+ u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
u32 succ_bytes = 0;
u16 rate = 0, succ_pkts = 0;
u32 tx_duration = 0;
@@ -1096,18 +1275,29 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+ dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
/* Note: If host configured fixed rates and in some other special
* cases, the broadcast/management frames are sent in different rates.
* Firmware rate's control to be skipped for this?
*/
- if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) {
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
+ ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
return;
}
- if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) {
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
mcs, nss);
return;
@@ -1136,60 +1326,42 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
arsta = (struct ath11k_sta *)sta->drv_priv;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
- memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
switch (flags) {
case WMI_RATE_PREAMBLE_OFDM:
arsta->txrate.legacy = rate;
- if (arsta->arvif && arsta->arvif->vif)
- conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
- if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
- arsta->tx_info.status.rates[0].idx = rate_idx - 4;
break;
case WMI_RATE_PREAMBLE_CCK:
arsta->txrate.legacy = rate;
- arsta->tx_info.status.rates[0].idx = rate_idx;
- if (mcs > ATH11K_HW_RATE_CCK_LP_1M &&
- mcs <= ATH11K_HW_RATE_CCK_SP_2M)
- arsta->tx_info.status.rates[0].flags |=
- IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
break;
case WMI_RATE_PREAMBLE_HT:
arsta->txrate.mcs = mcs + 8 * (nss - 1);
- arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
- arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
- if (sgi) {
+ if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- arsta->tx_info.status.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- }
break;
case WMI_RATE_PREAMBLE_VHT:
arsta->txrate.mcs = mcs;
- ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss);
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
- arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
- if (sgi) {
+ if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- arsta->tx_info.status.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- }
+ break;
+ case WMI_RATE_PREAMBLE_HE:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_dcm = dcm;
+ arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
+ arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
+ (user_rate->ru_end -
+ user_rate->ru_start) + 1);
break;
}
arsta->txrate.nss = nss;
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
- arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw);
arsta->tx_duration += tx_duration;
memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
- if (succ_pkts) {
- arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
- arsta->tx_info.status.rates[0].count = 1;
- ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
- }
-
/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
* So skip peer stats update for mgmt packets.
*/
@@ -1308,18 +1480,10 @@ exit:
static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
{
struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+ struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
struct ath11k *ar;
- u32 len;
u8 pdev_id;
- len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr);
- if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) {
- ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n",
- len,
- ATH11K_HTT_PKTLOG_MAX_SIZE);
- return;
- }
-
pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
@@ -1327,7 +1491,30 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
return;
}
- trace_ath11k_htt_pktlog(ar, data->payload, len);
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
+}
+
+static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ u32 *data = (u32 *)skb->data;
+ u8 pdev_id, ring_type, ring_id;
+ u16 hp, tp;
+ u32 backpressure_time;
+
+ pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
+ ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
+ ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
+ ++data;
+
+ hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
+ tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
+ ++data;
+
+ backpressure_time = *data;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
+ pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
}
void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
@@ -1379,6 +1566,9 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
case HTT_T2H_MSG_TYPE_PKTLOG:
ath11k_htt_pktlog(ab, skb);
break;
+ case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
+ ath11k_htt_backpressure_event_handler(ab, skb);
+ break;
default:
ath11k_warn(ab, "htt event %d not handled\n", type);
break;
@@ -1490,88 +1680,6 @@ static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_
return NULL;
}
-static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar,
- struct sk_buff_head *msdu_list,
- struct sk_buff_head *amsdu_list)
-{
- struct sk_buff *msdu = skb_peek(msdu_list);
- struct sk_buff *last_buf;
- struct ath11k_skb_rxcb *rxcb;
- struct ieee80211_hdr *hdr;
- struct hal_rx_desc *rx_desc, *lrx_desc;
- u16 msdu_len;
- u8 l3_pad_bytes;
- u8 *hdr_status;
- int ret;
-
- if (!msdu)
- return -ENOENT;
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
- hdr = (struct ieee80211_hdr *)hdr_status;
- /* Process only data frames */
- if (!ieee80211_is_data(hdr->frame_control)) {
- __skb_unlink(msdu, msdu_list);
- dev_kfree_skb_any(msdu);
- return -EINVAL;
- }
-
- do {
- __skb_unlink(msdu, msdu_list);
- last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
- if (!last_buf) {
- ath11k_warn(ar->ab,
- "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
- ret = -EIO;
- goto free_out;
- }
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- lrx_desc = (struct hal_rx_desc *)last_buf->data;
-
- if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
- ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
- ret = -EIO;
- goto free_out;
- }
-
- rxcb = ATH11K_SKB_RXCB(msdu);
- rxcb->rx_desc = rx_desc;
- msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
- l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
-
- if (!rxcb->is_continuation) {
- skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
- } else {
- ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
- msdu, last_buf,
- l3_pad_bytes, msdu_len);
- if (ret) {
- ath11k_warn(ar->ab,
- "failed to coalesce msdu rx buffer%d\n", ret);
- goto free_out;
- }
- }
- __skb_queue_tail(amsdu_list, msdu);
-
- /* Should we also consider msdu_cnt from mpdu_meta while
- * preparing amsdu list?
- */
- if (rxcb->is_last_msdu)
- break;
- } while ((msdu = skb_peek(msdu_list)) != NULL);
-
- return 0;
-
-free_out:
- dev_kfree_skb_any(msdu);
- __skb_queue_purge(amsdu_list);
-
- return ret;
-}
-
static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
@@ -1670,20 +1778,53 @@ static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status)
{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
struct ieee80211_hdr *hdr;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ u16 qos_ctl = 0;
+ u8 *qos;
- /* pull decapped header and copy SA & DA */
+ /* copy SA & DA and pull decapped header */
hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
- /* push original 802.11 header */
- hdr = (struct ieee80211_hdr *)first_hdr;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ if (rxcb->is_first_msdu) {
+ /* original 802.11 header is valid for the first msdu
+ * hence we can reuse the same header
+ */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /* Each A-MSDU subframe will be reported as a separate MSDU,
+ * so strip the A-MSDU bit from QoS Ctl.
+ */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+ } else {
+ /* Rebuild qos header if this is a middle/last msdu */
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+
+ /* Reset the order bit as the HT_Control header is stripped */
+ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
+
+ qos_ctl = rxcb->tid;
+
+ if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO Add other QoS ctl fields when required */
+
+ /* copy decap header before overwriting for reuse below */
+ memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
+ }
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
@@ -1692,6 +1833,14 @@ static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
ath11k_dp_rx_crypto_param_len(ar, enctype));
}
+ if (!rxcb->is_first_msdu) {
+ memcpy(skb_push(msdu,
+ IEEE80211_QOS_CTL_LEN), &qos_ctl,
+ IEEE80211_QOS_CTL_LEN);
+ memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
+ return;
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1846,7 +1995,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
u8 decap;
first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
- decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc);
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc);
switch (decap) {
case DP_RX_DECAP_TYPE_NATIVE_WIFI:
@@ -1858,6 +2007,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ /* TODO undecap support for middle/last msdu's of amsdu */
ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
enctype, status);
break;
@@ -1868,42 +2018,41 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
}
static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
- struct sk_buff_head *amsdu_list,
+ struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
- struct ieee80211_hdr *hdr;
+ bool fill_crypto_hdr, mcast;
enum hal_encrypt_type enctype;
- struct sk_buff *last_msdu;
- struct sk_buff *msdu;
- struct ath11k_skb_rxcb *last_rxcb;
- bool is_decrypted;
+ bool is_decrypted = false;
+ struct ieee80211_hdr *hdr;
+ struct ath11k_peer *peer;
u32 err_bitmap;
- u8 *qos;
- if (skb_queue_empty(amsdu_list))
- return;
+ hdr = (struct ieee80211_hdr *)msdu->data;
- hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc);
+ /* PN for multicast packets will be checked in mac80211 */
- /* Each A-MSDU subframe will use the original header as the base and be
- * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
- */
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos = ieee80211_get_qos_ctl(hdr);
- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- }
+ mcast = is_multicast_ether_addr(hdr->addr1);
+ fill_crypto_hdr = mcast;
is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
- enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
- /* Some attention flags are valid only in the last MSDU. */
- last_msdu = skb_peek_tail(amsdu_list);
- last_rxcb = ATH11K_SKB_RXCB(last_msdu);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
+ if (peer) {
+ if (mcast)
+ enctype = peer->sec_type_grp;
+ else
+ enctype = peer->sec_type;
+ } else {
+ enctype = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
- err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
- /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact */
rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
@@ -1912,19 +2061,29 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
if (err_bitmap & DP_RX_MPDU_ERR_FCS)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
- if (is_decrypted)
- rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED |
- RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED;
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
- skb_queue_walk(amsdu_list, msdu) {
- ath11k_dp_rx_h_csum_offload(msdu);
- ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
- enctype, rx_status, is_decrypted);
+ if (fill_crypto_hdr)
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ rx_status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_PN_VALIDATED;
}
+
+ ath11k_dp_rx_h_csum_offload(msdu);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+
+ if (!is_decrypted || fill_crypto_hdr)
+ return;
+
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
@@ -1988,6 +2147,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
}
rx_status->encoding = RX_ENC_HE;
rx_status->nss = nss;
+ rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
break;
}
@@ -2013,9 +2173,13 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;
} else {
- ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
- channel_num);
- return;
+ spin_lock_bh(&ar->data_lock);
+ rx_status->band = ar->rx_channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ spin_unlock_bh(&ar->data_lock);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(struct hal_rx_desc));
}
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
@@ -2024,29 +2188,6 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
-static void ath11k_dp_rx_process_amsdu(struct ath11k *ar,
- struct sk_buff_head *amsdu_list,
- struct ieee80211_rx_status *rx_status)
-{
- struct sk_buff *first;
- struct ath11k_skb_rxcb *rxcb;
- struct hal_rx_desc *rx_desc;
- bool first_mpdu;
-
- if (skb_queue_empty(amsdu_list))
- return;
-
- first = skb_peek(amsdu_list);
- rxcb = ATH11K_SKB_RXCB(first);
- rx_desc = rxcb->rx_desc;
-
- first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc);
- if (first_mpdu)
- ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
-
- ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status);
-}
-
static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
size_t size)
{
@@ -2113,55 +2254,115 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
}
-static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar,
- struct sk_buff_head *amsdu_list,
- struct ieee80211_rx_status *rxs)
+static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
{
- struct sk_buff *msdu;
- struct sk_buff *first_subframe;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct ieee80211_rx_status rx_status = {0};
struct ieee80211_rx_status *status;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *last_buf;
+ u8 l3_pad_bytes;
+ u16 msdu_len;
+ int ret;
- first_subframe = skb_peek(amsdu_list);
+ last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath11k_warn(ar->ab,
+ "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
+ ret = -EIO;
+ goto free_out;
+ }
- skb_queue_walk(amsdu_list, msdu) {
- /* Setup per-MSDU flags */
- if (skb_queue_empty(amsdu_list))
- rxs->flag &= ~RX_FLAG_AMSDU_MORE;
- else
- rxs->flag |= RX_FLAG_AMSDU_MORE;
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+ if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
+ ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
- if (msdu == first_subframe) {
- first_subframe = NULL;
- rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
- } else {
- rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+ } else if (!rxcb->is_continuation) {
+ if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ ret = -EINVAL;
+ ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
+ goto free_out;
+ }
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
+ } else {
+ ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
}
- rxs->flag |= RX_FLAG_SKIP_MONITOR;
-
- status = IEEE80211_SKB_RXCB(msdu);
- *status = *rxs;
}
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+
+ /* Process only data frames */
+ if (!ieee80211_is_data(hdr->frame_control))
+ return -EINVAL;
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
+ ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
+
+ rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = rx_status;
+ return 0;
+
+free_out:
+ return ret;
}
-static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
- struct napi_struct *napi,
- struct sk_buff_head *pending_q,
- int *quota, u8 mac_id)
+static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *msdu_list,
+ int *quota, int ring_id)
{
- struct ath11k *ar;
+ struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
- struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+ u8 mac_id;
+ int ret;
- if (skb_queue_empty(pending_q))
+ if (skb_queue_empty(msdu_list))
return;
- ar = ab->pdevs[mac_id].ar;
-
rcu_read_lock();
- pdev = rcu_dereference(ab->pdevs_active[mac_id]);
- while (*quota && (msdu = __skb_dequeue(pending_q))) {
- if (!pdev) {
+ while (*quota && (msdu = __skb_dequeue(msdu_list))) {
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ mac_id = rxcb->mac_id;
+ ar = ab->pdevs[mac_id].ar;
+ if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
continue;
}
@@ -2169,46 +2370,31 @@ static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
(*quota)--;
}
+
rcu_read_unlock();
}
-int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, struct sk_buff_head *pending_q,
- int budget)
+int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ieee80211_rx_status *rx_status = &dp->rx_status;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- struct hal_srng *srng;
- struct sk_buff *msdu;
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ int num_buffs_reaped[MAX_RADIOS] = {0};
struct sk_buff_head msdu_list;
- struct sk_buff_head amsdu_list;
struct ath11k_skb_rxcb *rxcb;
- u32 *rx_desc;
- int buf_id;
- int num_buffs_reaped = 0;
+ int total_msdu_reaped = 0;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
int quota = budget;
- int ret;
bool done = false;
-
- /* Process any pending packets from the previous napi poll.
- * Note: All msdu's in this pending_q corresponds to the same mac id
- * due to pdev based reo dest mapping and also since each irq group id
- * maps to specific reo dest ring.
- */
- ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, &quota,
- mac_id);
-
- /* If all quota is exhausted by processing the pending_q,
- * Wait for the next napi poll to reap the new info
- */
- if (!quota)
- goto exit;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ u32 *rx_desc;
+ int i;
__skb_queue_head_init(&msdu_list);
- srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id];
+ srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
@@ -2224,6 +2410,10 @@ try_again:
desc->buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (!msdu) {
@@ -2241,15 +2431,15 @@ try_again:
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[mac_id]++;
+ total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc->info0);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
- /* TODO: Check if the msdu can be sent up for processing */
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++;
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
continue;
}
@@ -2260,19 +2450,12 @@ try_again:
rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
+ rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
+ desc->info0);
+
__skb_queue_tail(&msdu_list, msdu);
- /* Stop reaping from the ring once quota is exhausted
- * and we've received all msdu's in the the AMSDU. The
- * additional msdu's reaped in excess of quota here would
- * be pushed into the pending queue to be processed during
- * the next napi poll.
- * Note: More profiling can be done to see the impact on
- * pending_q and throughput during various traffic & density
- * and how use of budget instead of remaining quota affects it.
- */
- if (num_buffs_reaped >= quota && rxcb->is_last_msdu &&
- !rxcb->is_continuation) {
+ if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
done = true;
break;
}
@@ -2293,58 +2476,23 @@ try_again:
spin_unlock_bh(&srng->lock);
- if (!num_buffs_reaped)
+ if (!total_msdu_reaped)
goto exit;
- /* Should we reschedule it later if we are not able to replenish all
- * the buffers?
- */
- ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped,
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
-
- rcu_read_lock();
- if (!rcu_dereference(ab->pdevs_active[mac_id])) {
- __skb_queue_purge(&msdu_list);
- goto rcu_unlock;
- }
-
- if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
- __skb_queue_purge(&msdu_list);
- goto rcu_unlock;
- }
-
- while (!skb_queue_empty(&msdu_list)) {
- __skb_queue_head_init(&amsdu_list);
- ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list);
- if (ret) {
- if (ret == -EIO) {
- ath11k_err(ab, "rx ring got corrupted %d\n", ret);
- __skb_queue_purge(&msdu_list);
- /* Should stop processing any more rx in
- * future from this ring?
- */
- goto rcu_unlock;
- }
-
- /* A-MSDU retrieval got failed due to non-fatal condition,
- * continue processing with the next msdu.
- */
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
continue;
- }
- ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status);
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
- ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status);
- skb_queue_splice_tail(&amsdu_list, pending_q);
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
}
- while (quota && (msdu = __skb_dequeue(pending_q))) {
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
- quota--;
- }
+ ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
+ &quota, ring_id);
-rcu_unlock:
- rcu_read_unlock();
exit:
return budget - quota;
}
@@ -2411,6 +2559,8 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+ rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
arsta->rssi_comb = ppdu_info->rssi_comb;
rx_stats->rx_duration += ppdu_info->rx_duration;
@@ -2681,99 +2831,563 @@ exit:
return num_buffs_reaped;
}
-static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
- u32 *link_desc,
- enum hal_wbm_rel_bm_act action)
+static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
- struct ath11k_dp *dp = &ab->dp;
- struct hal_srng *srng;
- u32 *desc;
- int ret = 0;
+ struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
- srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+ spin_lock_bh(&rx_tid->ab->base_lock);
+ if (rx_tid->last_frag_no &&
+ rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+ return;
+ }
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+}
- spin_lock_bh(&srng->lock);
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct crypto_shash *tfm;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ int i;
- ath11k_hal_srng_access_begin(ab, srng);
+ tfm = crypto_alloc_shash("michael_mic", 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
- desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
- if (!desc) {
- ret = -ENOBUFS;
- goto exit;
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
}
- ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
- action);
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+ rx_tid->ab = ab;
+ timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
+ skb_queue_head_init(&rx_tid->rx_frags);
+ }
-exit:
- ath11k_hal_srng_access_end(ab, srng);
+ peer->tfm_mmic = tfm;
+ spin_unlock_bh(&ab->base_lock);
- spin_unlock_bh(&srng->lock);
+ return 0;
+}
+
+static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ u8 mic_hdr[16] = {0};
+ u8 tid = 0;
+ int ret;
+
+ if (!tfm)
+ return -EINVAL;
+
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, key, 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out;
+
+ /* TKIP MIC header */
+ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ mic_hdr[12] = tid;
+
+ ret = crypto_shash_update(desc, mic_hdr, 16);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(desc, data, data_len);
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(desc, mic);
+out:
+ shash_desc_zero(desc);
return ret;
}
-static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
+ struct sk_buff *msdu)
+{
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
+ struct ieee80211_key_conf *key_conf;
+ struct ieee80211_hdr *hdr;
+ u8 mic[IEEE80211_CCMP_MIC_LEN];
+ int head_len, tail_len, ret;
+ size_t data_len;
+ u32 hdr_len;
+ u8 *key, *data;
+ u8 key_idx;
+
+ if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
+ return 0;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN;
+ tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ key_idx = peer->ucast_keyidx;
+ else
+ key_idx = peer->mcast_keyidx;
+
+ key_conf = peer->keys[key_idx];
+
+ data = msdu->data + head_len;
+ data_len = msdu->len - head_len - tail_len;
+ key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
+
+ ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
+ if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
+ goto mic_fail;
+
+ return 0;
+
+mic_fail:
+ (ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1;
+ (ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1;
+
+ rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
+ ieee80211_rx(ar->hw, msdu);
+ return -EINVAL;
+}
+
+static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!flags)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
+
+ if (flags & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ if (flags & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+
+ if (flags & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len,
+ (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ struct sk_buff **defrag_skb)
{
- u8 rx_channel;
+ struct hal_rx_desc *rx_desc;
+ struct sk_buff *skb, *first_frag, *last_frag;
+ struct ieee80211_hdr *hdr;
enum hal_encrypt_type enctype;
- bool is_decrypted;
- u32 err_bitmap;
+ bool is_decrypted = false;
+ int msdu_len = 0;
+ int extra_space;
+ u32 flags;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ last_frag = skb_peek_tail(&rx_tid->rx_frags);
+
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ flags = 0;
+ rx_desc = (struct hal_rx_desc *)skb->data;
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN)
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+
+ if (is_decrypted) {
+ if (skb != first_frag)
+ flags |= RX_FLAG_IV_STRIPPED;
+ if (skb != last_frag)
+ flags |= RX_FLAG_ICV_STRIPPED |
+ RX_FLAG_MIC_STRIPPED;
+ }
- is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
- enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
- err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+ /* RX fragments are always raw packets */
+ if (skb != last_frag)
+ skb_trim(skb, skb->len - FCS_LEN);
+ ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
- if (err_bitmap & DP_RX_MPDU_ERR_FCS)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (skb != first_frag)
+ skb_pull(skb, HAL_RX_DESC_SIZE +
+ ieee80211_hdrlen(hdr->frame_control));
+ msdu_len += skb->len;
+ }
- if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
+ if (extra_space > 0 &&
+ (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
+ return -ENOMEM;
- rx_status->encoding = RX_ENC_LEGACY;
- rx_status->bw = RATE_INFO_BW_20;
+ __skb_unlink(first_frag, &rx_tid->rx_frags);
+ while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
+ skb_put_data(first_frag, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
- rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE);
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+ ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
- rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+ if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
+ first_frag = NULL;
- if (rx_channel >= 1 && rx_channel <= 14) {
- rx_status->band = NL80211_BAND_2GHZ;
- } else if (rx_channel >= 36 && rx_channel <= 173) {
- rx_status->band = NL80211_BAND_5GHZ;
- } else {
- ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
- rx_channel);
+ *defrag_skb = first_frag;
+ return 0;
+}
+
+static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
+ struct sk_buff *defrag_skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
+ struct hal_reo_entrance_ring *reo_ent_ring;
+ struct hal_reo_dest_ring *reo_dest_ring;
+ struct dp_link_desc_bank *link_desc_banks;
+ struct hal_rx_msdu_link *msdu_link;
+ struct hal_rx_msdu_details *msdu0;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ u32 desc_bank, msdu_info, mpdu_info;
+ u32 dst_idx, cookie;
+ u32 *msdu_len_offset;
+ int ret, buf_id;
+
+ link_desc_banks = ab->dp.link_desc_banks;
+ reo_dest_ring = rx_tid->dst_ring_desc;
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr));
+ msdu0 = &msdu_link->msdu_link[0];
+ dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
+ memset(msdu0, 0, sizeof(*msdu0));
+
+ msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
+ defrag_skb->len - HAL_RX_DESC_SIZE) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
+ msdu0->rx_msdu_info.info0 = msdu_info;
+
+ /* change msdu len in hal rx desc */
+ msdu_len_offset = (u32 *)&rx_desc->msdu_start;
+ *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH);
+ *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE;
+
+ paddr = dma_map_single(ab->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ return -ENOMEM;
+
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
+ rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
+
+ /* Fill mpdu details into reo entrace ring */
+ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ reo_ent_ring = (struct hal_reo_entrance_ring *)
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_ent_ring) {
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ ret = -ENOSPC;
+ goto err_free_idr;
+ }
+ memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
+
+ mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
+
+ reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
+ reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
+ reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
+ FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
+ reo_dest_ring->info0)) |
+ FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+err_free_idr:
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ idr_remove(&rx_refill_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+err_unmap_dma:
+ dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ return ret;
+}
+
+static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b)
+{
+ int frag1, frag2;
+
+ frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a);
+ frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b);
+
+ return frag1 - frag2;
+}
+
+static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag)
+{
+ struct sk_buff *skb;
+ int cmp;
+
+ skb_queue_walk(frag_list, skb) {
+ cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag);
+ if (cmp < 0)
+ continue;
+ __skb_queue_before(frag_list, skb, cur_frag);
return;
}
+ __skb_queue_tail(frag_list, cur_frag);
+}
- rx_status->freq = ieee80211_channel_to_frequency(rx_channel,
- rx_status->band);
- ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
- /* Rx fragments are received in raw mode */
- skb_trim(msdu, msdu->len - FCS_LEN);
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control);
- if (is_decrypted) {
- rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
- skb_trim(msdu, msdu->len -
- ath11k_dp_rx_crypto_mic_len(ar, enctype));
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+
+ return pn;
+}
+
+static bool
+ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
+{
+ enum hal_encrypt_type encrypt_type;
+ struct sk_buff *first_frag, *skb;
+ struct hal_rx_desc *desc;
+ u64 last_pn;
+ u64 cur_pn;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ desc = (struct hal_rx_desc *)first_frag->data;
+
+ encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc);
+ if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
+ return true;
+
+ last_pn = ath11k_dp_rx_h_get_pn(first_frag);
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ if (skb == first_frag)
+ continue;
+
+ cur_pn = ath11k_dp_rx_h_get_pn(skb);
+ if (cur_pn != last_pn + 1)
+ return false;
+ last_pn = cur_pn;
+ }
+ return true;
+}
+
+static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u32 *ring_desc)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ struct sk_buff *defrag_skb = NULL;
+ u32 peer_id;
+ u16 seqno, frag_no;
+ u8 tid;
+ int ret = 0;
+ bool more_frags;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc);
+ tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc);
+ seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc);
+ frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu);
+ more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu);
+
+ if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) ||
+ !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) ||
+ tid > IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ /* received unfragmented packet in reo
+ * exception ring, this shouldn't happen
+ * as these packets typically come from
+ * reo2sw srngs.
+ */
+ if (WARN_ON_ONCE(!frag_no && !more_frags))
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
+ peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+ skb_queue_empty(&rx_tid->rx_frags)) {
+ /* Flush stored fragments and start a new sequence */
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ rx_tid->cur_sn = seqno;
+ }
+
+ if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
+ /* Fragment already present */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (frag_no > __fls(rx_tid->rx_frag_bitmap))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu);
+
+ rx_tid->rx_frag_bitmap |= BIT(frag_no);
+ if (!more_frags)
+ rx_tid->last_frag_no = frag_no;
+
+ if (frag_no == 0) {
+ rx_tid->dst_ring_desc = kmemdup(ring_desc,
+ sizeof(*rx_tid->dst_ring_desc),
+ GFP_ATOMIC);
+ if (!rx_tid->dst_ring_desc) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ath11k_dp_rx_link_desc_return(ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
+
+ if (!rx_tid->last_frag_no ||
+ rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
+ mod_timer(&rx_tid->frag_timer, jiffies +
+ ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
+ goto out_unlock;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer)
+ goto err_frags_cleanup;
+
+ if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
+ goto err_frags_cleanup;
+
+ if (!defrag_skb)
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
+ goto err_frags_cleanup;
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, false);
+ goto out_unlock;
+
+err_frags_cleanup:
+ dev_kfree_skb_any(defrag_skb);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+out_unlock:
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
}
static int
-ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
- int buf_id, bool frag)
+ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- struct ieee80211_rx_status rx_status = {0};
struct sk_buff *msdu;
struct ath11k_skb_rxcb *rxcb;
- struct ieee80211_rx_status *status;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
@@ -2794,10 +3408,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- if (!frag) {
- /* Process only rx fragments below, and drop
- * msdu's indicated due to error reasons.
- */
+ if (drop) {
dev_kfree_skb_any(msdu);
return 0;
}
@@ -2816,16 +3427,12 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE);
-
- ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status);
-
- status = IEEE80211_SKB_RXCB(msdu);
-
- *status = rx_status;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
+ dev_kfree_skb_any(msdu);
+ ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
exit:
rcu_read_unlock();
return 0;
@@ -2850,6 +3457,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
dma_addr_t paddr;
u32 *desc;
bool is_frag;
+ u8 drop = 0;
tot_n_bufs_reaped = 0;
quota = budget;
@@ -2891,9 +3499,15 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
- /* Return the link desc back to wbm idle list */
- ath11k_dp_rx_link_desc_return(ab, desc,
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ /* Process only rx fragments with one msdu per link desc below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ if (!is_frag || num_msdus > 1) {
+ drop = 1;
+ /* Return the link desc back to wbm idle list */
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
for (i = 0; i < num_msdus; i++) {
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
@@ -2904,8 +3518,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ar = ab->pdevs[mac_id].ar;
- if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id,
- is_frag)) {
+ if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
n_bufs_reaped[mac_id]++;
tot_n_bufs_reaped++;
}
@@ -2966,7 +3579,6 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status,
struct sk_buff_head *msdu_list)
{
- struct sk_buff_head amsdu_list;
u16 msdu_len;
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
@@ -2974,7 +3586,7 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
- if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) {
/* First buffer will be freed by the caller, so deduct it's length */
msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
@@ -2997,24 +3609,25 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
* This error can show up both in a REO destination or WBM release ring.
*/
- __skb_queue_head_init(&amsdu_list);
-
rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
- l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
-
- if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
- return -EINVAL;
+ if (rxcb->is_frag) {
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+ } else {
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
- skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+ if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+ }
ath11k_dp_rx_h_ppdu(ar, desc, status);
- __skb_queue_tail(&amsdu_list, msdu);
+ ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
- ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status);
+ rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc);
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
@@ -3037,6 +3650,13 @@ static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
drop = true;
break;
+ case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+ /* TODO: Do not drop PN failed packets in the driver;
+ * instead, it is good to drop such packets in mac80211
+ * after incrementing the replay counters.
+ */
+
+ /* fall through */
default:
/* TODO: Review other errors and process them to mac80211
* as appropriate.
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index eec5deaa59ad..88bbcae14e34 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -9,6 +9,8 @@
#include "rx_desc.h"
#include "debug.h"
+#define DP_MAX_NWIFI_HDR_LEN 30
+
#define DP_RX_MPDU_ERR_FCS BIT(0)
#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
@@ -43,9 +45,16 @@ int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
struct ieee80211_ampdu_params *params);
int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
struct ieee80211_ampdu_params *params);
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid);
int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
- u8 tid, u32 ba_win_sz, u16 ssn);
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type);
void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
struct sk_buff *skb);
int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
@@ -60,7 +69,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
int budget);
int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, struct sk_buff_head *pending_q,
+ struct napi_struct *napi,
int budget);
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
@@ -82,5 +91,6 @@ int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
gfp_t gfp);
int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);
#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 6d7d33761caf..7aac4b0eea0c 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -7,6 +7,7 @@
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
+#include "peer.h"
/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
static const u8
@@ -46,7 +47,7 @@ static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
}
-static enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index b58ac11c2747..9e40c4bdd674 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -877,23 +877,32 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
*/
- if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
- else
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ }
} else {
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
ath11k_ahb_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
ath11k_ahb_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
srng->u.dst_ring.tp);
}
}
+
+ srng->timestamp = jiffies;
}
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
@@ -1017,6 +1026,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
params->intr_batch_cntr_thres_entries;
srng->intr_timer_thres_us = params->intr_timer_thres_us;
srng->flags = params->flags;
+ srng->initialized = 1;
spin_lock_init(&srng->lock);
for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
@@ -1122,3 +1132,55 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
}
+
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+{
+ struct hal_srng *srng;
+ struct ath11k_ext_irq_grp *irq_grp;
+ struct ath11k_ce_pipe *ce_pipe;
+ int i;
+
+ ath11k_err(ab, "Last interrupt received for each CE:\n");
+ for (i = 0; i < CE_COUNT; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
+ i, ce_pipe->pipe_num,
+ jiffies_to_msecs(jiffies - ce_pipe->timestamp));
+ }
+
+ ath11k_err(ab, "\nLast interrupt received for each group:\n");
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ ath11k_err(ab, "group_id %d %ums before\n",
+ irq_grp->grp_id,
+ jiffies_to_msecs(jiffies - irq_grp->timestamp));
+ }
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
+ srng = &ab->hal.srng_list[i];
+
+ if (!srng->initialized)
+ continue;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_err(ab,
+ "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.src_ring.hp,
+ srng->u.src_ring.reap_hp,
+ *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
+ srng->u.src_ring.last_tp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ else if (srng->ring_dir == HAL_SRNG_DIR_DST)
+ ath11k_err(ab,
+ "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.dst_ring.tp,
+ *srng->u.dst_ring.hp_addr,
+ srng->u.dst_ring.cached_hp,
+ srng->u.dst_ring.last_hp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 5b13ccdf39e4..7722822a0456 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -96,6 +96,8 @@ struct ath11k_base;
/* REO2SW(x) R0 ring configuration address */
#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
#define HAL_REO1_RING_BASE_LSB 0x0000029c
@@ -529,6 +531,8 @@ struct hal_srng {
*/
u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+ u64 timestamp;
+
/* Source or Destination ring */
enum hal_srng_dir ring_dir;
@@ -554,6 +558,9 @@ struct hal_srng {
/* max transfer size */
u16 max_buffer_length;
+
+ /* head pointer at access end */
+ u32 last_hp;
} dst_ring;
struct {
@@ -577,6 +584,9 @@ struct hal_srng {
/* Low threshold - in number of ring entries */
u32 low_threshold;
+
+ /* tail pointer at access end */
+ u32 last_tp;
} src_ring;
} u;
};
@@ -717,6 +727,14 @@ enum hal_ce_desc {
HAL_CE_DESC_DST_STATUS,
};
+#define HAL_HASH_ROUTING_RING_TCL 0
+#define HAL_HASH_ROUTING_RING_SW1 1
+#define HAL_HASH_ROUTING_RING_SW2 2
+#define HAL_HASH_ROUTING_RING_SW3 3
+#define HAL_HASH_ROUTING_RING_SW4 4
+#define HAL_HASH_ROUTING_RING_REL 5
+#define HAL_HASH_ROUTING_RING_FW 6
+
struct hal_reo_status_header {
u16 cmd_num;
enum hal_reo_cmd_status cmd_status;
@@ -847,10 +865,10 @@ struct ath11k_hal {
u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
- u32 start_seqtype);
+ u32 start_seq, enum hal_pn_type type);
void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
struct hal_srng *srng);
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab);
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map);
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
struct hal_wbm_idle_scatter_list *sbuf,
u32 nsbufs, u32 tot_link_desc,
@@ -893,5 +911,6 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params);
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 9e0f8064e427..f277c9434a25 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -694,7 +694,7 @@ u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
}
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
- u32 start_seq)
+ u32 start_seq, enum hal_pn_type type)
{
struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
struct hal_rx_reo_queue_ext *ext_desc;
@@ -723,6 +723,18 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
ba_window_size - 1);
+ switch (type) {
+ case HAL_PN_TYPE_NONE:
+ case HAL_PN_TYPE_WAPI_EVEN:
+ case HAL_PN_TYPE_WAPI_UNEVEN:
+ break;
+ case HAL_PN_TYPE_WPA:
+ qdesc->info0 |=
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
+ HAL_RX_REO_QUEUE_PN_SIZE_48);
+ break;
+ }
/* TODO: Set Ignore ampdu flags based on BA window size and/or
* AMPDU capabilities
@@ -787,7 +799,7 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab)
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
@@ -809,6 +821,19 @@ void ath11k_hal_reo_hw_setup(struct ath11k_base *ab)
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
}
static enum hal_rx_mon_status
@@ -1001,6 +1026,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
}
ppdu_info->nss = nsts + 1;
+ ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
@@ -1038,9 +1064,15 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
break;
}
case HAL_PHYRX_HE_SIG_B1_MU: {
- /* TODO: Check if resource unit(RU) allocation stats
- * are required
- */
+ struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
+ (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
+ u16 ru_tones;
+
+ info0 = __le32_to_cpu(he_sig_b1_mu->info0);
+
+ ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION,
+ info0);
+ ppdu_info->ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index bb022c781c48..e863e4abfcc1 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -99,6 +99,8 @@ struct hal_rx_mon_ppdu_info {
u8 beamformed;
u8 rssi_comb;
u8 tid;
+ u8 dcm;
+ u8 ru_alloc;
u8 reception_type;
u64 rx_duration;
};
@@ -325,6 +327,34 @@ enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct sk_buff *skb);
+
+static inline u32 ath11k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ u32 ret = 0;
+
+ switch (ru_tones) {
+ case RU_26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ }
+ return ret;
+}
+
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index dd39333ec0ea..9973477ae373 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -62,6 +62,7 @@
#define TARGET_RX_BATCHMODE 1
#define ATH11K_HW_MAX_QUEUES 4
+#define ATH11K_QUEUE_LEN 4096
#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 6640662f5ede..9f8bc19cc5ae 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -178,6 +178,22 @@ u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
return ret;
}
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw)
+{
+ switch (bw) {
+ case RATE_INFO_BW_20:
+ return ATH11K_BW_20;
+ case RATE_INFO_BW_40:
+ return ATH11K_BW_40;
+ case RATE_INFO_BW_80:
+ return ATH11K_BW_80;
+ case RATE_INFO_BW_160:
+ return ATH11K_BW_160;
+ default:
+ return ATH11K_BW_20;
+ }
+}
+
int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate)
{
@@ -369,8 +385,10 @@ struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
flags,
ath11k_get_arvif_iter,
&arvif_iter);
- if (!arvif_iter.arvif)
+ if (!arvif_iter.arvif) {
+ ath11k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
return NULL;
+ }
return arvif_iter.arvif;
}
@@ -398,14 +416,12 @@ struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
{
int i;
struct ath11k_pdev *pdev;
- struct ath11k_vif *arvif;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
- arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
- if (arvif)
- return arvif->ar;
+ if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
+ return pdev->ar;
}
}
@@ -1940,6 +1956,31 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&info->he_obss_pd);
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, info->he_bss_color.color,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS,
+ !info->he_bss_color.disabled);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar,
+ arvif->vdev_id,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, 0,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS, 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -2309,6 +2350,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
const u8 *peer_addr;
int ret = 0;
u32 flags = 0;
@@ -2366,15 +2408,53 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto exit;
}
+ ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret);
+ goto exit;
+ }
+
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
- if (peer && cmd == SET_KEY)
+ if (peer && cmd == SET_KEY) {
peer->keys[key->keyidx] = key;
- else if (peer && cmd == DISABLE_KEY)
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ peer->ucast_keyidx = key->keyidx;
+ peer->sec_type = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ } else {
+ peer->mcast_keyidx = key->keyidx;
+ peer->sec_type_grp = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ }
+ } else if (peer && cmd == DISABLE_KEY) {
peer->keys[key->keyidx] = NULL;
- else if (!peer)
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ peer->ucast_keyidx = 0;
+ else
+ peer->mcast_keyidx = 0;
+ } else if (!peer)
/* impossible unless FW goes crazy */
ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (cmd == SET_KEY)
+ arsta->pn_type = HAL_PN_TYPE_WPA;
+ else
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ default:
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ }
+ }
+
spin_unlock_bh(&ab->base_lock);
exit:
@@ -2786,6 +2866,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k_peer *peer;
int ret = 0;
/* cancel must be done outside the mutex to avoid deadlock */
@@ -2818,6 +2899,17 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
ath11k_mac_dec_num_stations(arvif, sta);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@@ -3874,6 +3966,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
ar->num_peers = 0;
+ ar->allocated_vdev_map = 0;
/* Configure monitor status ring with default rx_filter to get rx status
* such as rssi, rx_duration.
@@ -3885,6 +3978,9 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
goto err;
}
+ /* Configure the hash seed for hash based reo dest ring selection */
+ ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+
mutex_unlock(&ar->conf_mutex);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
@@ -4112,8 +4208,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ar->num_created_vdevs++;
-
+ ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+
spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
@@ -4227,6 +4324,7 @@ err_peer_del:
err_vdev_del:
ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << arvif->vdev_id;
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
@@ -4263,7 +4361,6 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
arvif->vdev_id);
- ab->free_vdev_map |= 1LL << (arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
@@ -4281,6 +4378,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->num_created_vdevs--;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ab->free_vdev_map |= 1LL << (arvif->vdev_id);
ath11k_peer_cleanup(ar, arvif->vdev_id);
@@ -5699,6 +5798,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(ar->hw, USES_RSS);
}
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
@@ -5730,6 +5830,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
+ ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
@@ -5873,6 +5974,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
+ init_completion(&ar->thermal.wmi_sync);
+
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index f286531cdd78..0607479774a9 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -144,4 +144,6 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 4bf1dfa498b6..f43deacc01bd 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -228,6 +228,9 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer->sta = sta;
arvif->ast_hash = peer->ast_hash;
+ peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index 9a40d1f6ccd9..ccca1523a6ea 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -17,6 +17,15 @@ struct ath11k_peer {
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+
+ /* Info used in MMIC verification of
+ * RX fragments
+ */
+ struct crypto_shash *tfm_mmic;
+ u8 mcast_keyidx;
+ u8 ucast_keyidx;
+ u16 sec_type;
+ u16 sec_type_grp;
};
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 2377895a58ec..c00a99ad8dbc 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2365,6 +2365,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH11K_QMI_EVENT_FW_READY:
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ ath11k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index a5aff801f17f..1c4264637a41 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -342,7 +342,7 @@ struct rx_attention {
#define RX_MPDU_START_INFO0_PROTO_VER_ERR BIT(12)
#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID BIT(13)
-#define RX_MPDU_START_INFO1_MPDU_CTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_FCTRL_VALID BIT(0)
#define RX_MPDU_START_INFO1_MPDU_DUR_VALID BIT(1)
#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID BIT(2)
#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID BIT(3)
@@ -1209,4 +1209,12 @@ struct hal_rx_desc {
u8 msdu_payload[0];
} __packed;
+#define HAL_RX_RU_ALLOC_TYPE_MAX 6
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+
#endif /* ATH11K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
new file mode 100644
index 000000000000..259dddbda2c7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+
+static int
+ath11k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH11K_THERMAL_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+ath11k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath11k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
+{
+ struct ath11k *ar = cdev->devdata;
+ int ret;
+
+ if (throttle_state > ATH11K_THERMAL_THROTTLE_MAX) {
+ ath11k_warn(ar->ab, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH11K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_thermal_set_throttling(ar, throttle_state);
+ if (ret == 0)
+ ar->thermal.throttle_state = throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct thermal_cooling_device_ops ath11k_thermal_ops = {
+ .get_max_state = ath11k_thermal_get_max_throttle_state,
+ .get_cur_state = ath11k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath11k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath11k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath11k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+ unsigned long time_left;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath11k_wmi_send_pdev_temperature_cmd(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH11K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ar->ab, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree celcius */
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath11k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath11k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath11k_hwmon);
+
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+ struct ath11k_base *sc = ar->ab;
+ struct thermal_mitigation_params param;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return 0;
+
+ memset(&param, 0, sizeof(param));
+ param.pdev_id = ar->pdev->pdev_id;
+ param.enable = throttle_state ? 1 : 0;
+ param.dc = ATH11K_THERMAL_DEFAULT_DUTY_CYCLE;
+ param.dc_per_event = 0xFFFFFFFF;
+
+ param.levelconf[0].tmplwm = ATH11K_THERMAL_TEMP_LOW_MARK;
+ param.levelconf[0].tmphwm = ATH11K_THERMAL_TEMP_HIGH_MARK;
+ param.levelconf[0].dcoffpercent = throttle_state;
+ param.levelconf[0].priority = 0; /* disable all data tx queues */
+
+ ret = ath11k_wmi_send_thermal_mitigation_param_cmd(ar, &param);
+ if (ret) {
+ ath11k_warn(sc, "failed to send thermal mitigation duty cycle %u ret %d\n",
+ throttle_state, ret);
+ }
+
+ return ret;
+}
+
+int ath11k_thermal_register(struct ath11k_base *sc)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i, ret;
+
+ for (i = 0; i < sc->num_radios; i++) {
+ pdev = &sc->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ cdev = thermal_cooling_device_register("ath11k_thermal", ar,
+ &ath11k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath11k_err(sc, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ return -EINVAL;
+ }
+
+ ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath11k_err(sc, "failed to create cooling device symlink\n");
+ goto err_thermal_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&ar->hw->wiphy->dev,
+ "ath11k_hwmon", ar,
+ ath11k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath11k_err(ar->ab, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_thermal_destroy;
+ }
+ }
+
+ return 0;
+
+err_thermal_destroy:
+ ath11k_thermal_unregister(sc);
+ return ret;
+}
+
+void ath11k_thermal_unregister(struct ath11k_base *sc)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < sc->num_radios; i++) {
+ pdev = &sc->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ sysfs_remove_link(&ar->hw->wiphy->dev.kobj, "cooling_device");
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
new file mode 100644
index 000000000000..f9af55f3682d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_THERMAL_
+#define _ATH11K_THERMAL_
+
+#define ATH11K_THERMAL_TEMP_LOW_MARK -100
+#define ATH11K_THERMAL_TEMP_HIGH_MARK 150
+#define ATH11K_THERMAL_THROTTLE_MAX 100
+#define ATH11K_THERMAL_DEFAULT_DUTY_CYCLE 100
+#define ATH11K_HWMON_NAME_LEN 15
+#define ATH11K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+
+struct ath11k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 throttle_state;
+ /* temperature value in Celcius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+int ath11k_thermal_register(struct ath11k_base *sc);
+void ath11k_thermal_unregister(struct ath11k_base *sc);
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state);
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature);
+#else
+static inline int ath11k_thermal_register(struct ath11k_base *sc)
+{
+ return 0;
+}
+
+static inline void ath11k_thermal_unregister(struct ath11k_base *sc)
+{
+}
+
+static inline int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+ return 0;
+}
+
+static inline void ath11k_thermal_event_temperature(struct ath11k *ar,
+ int temperature)
+{
+}
+
+#endif
+#endif /* _ATH11K_THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index a9b301ceb24b..e7ce36966d6a 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1471,6 +1471,34 @@ int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_get_pdev_temperature_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
+
+ return ret;
+}
+
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op)
{
@@ -2442,6 +2470,70 @@ out:
return ret;
}
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_therm_throt_config_request_cmd *cmd;
+ struct wmi_therm_throt_level_config_info *lvl_conf;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int i, ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->enable = param->enable;
+ cmd->dc = param->dc;
+ cmd->dc_per_event = param->dc_per_event;
+ cmd->therm_throt_levels = THERMAL_LEVELS;
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN,
+ (THERMAL_LEVELS *
+ sizeof(struct wmi_therm_throt_level_config_info)));
+
+ lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
+ sizeof(*cmd) +
+ TLV_HDR_SIZE);
+ for (i = 0; i < THERMAL_LEVELS; i++) {
+ lvl_conf->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE);
+
+ lvl_conf->temp_lwm = param->levelconf[i].tmplwm;
+ lvl_conf->temp_hwm = param->levelconf[i].tmphwm;
+ lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent;
+ lvl_conf->prio = param->levelconf[i].priority;
+ lvl_conf++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
+ ar->pdev->pdev_id, param->enable, param->dc,
+ param->dc_per_event, THERMAL_LEVELS);
+
+ return ret;
+}
+
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@@ -2615,6 +2707,84 @@ ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
return ret;
}
+int
+ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_color_collision_cfg_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION :
+ ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE;
+ cmd->current_bss_color = bss_color;
+ cmd->detection_period_ms = period;
+ cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS;
+ cmd->free_slot_expiry_time_ms = 0;
+ cmd->flags = 0;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
+ cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
+ cmd->detection_period_ms, cmd->scan_period_ms);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_bss_color_change_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable ? 1 : 0;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi_send_bss_color_change_enable id %d enable %d\n",
+ cmd->vdev_id, cmd->enable);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_DIeABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
static void
ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
struct wmi_host_pdev_band_to_mac *band_to_mac)
@@ -2825,6 +2995,41 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
return ret;
}
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar,
+ int pdev_id)
+{
+ struct ath11k_wmi_pdev_lro_config_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
+ get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send lro cfg req wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
{
unsigned long time_left;
@@ -4168,6 +4373,31 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
return 0;
}
+static int
+ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
+ u32 len, const struct wmi_pdev_temperature_event *ev)
+{
+ const void **tb;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev temp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
{
struct ath11k_fw_stats_vdev *i;
@@ -5345,15 +5575,18 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
"peer assoc conf ev vdev id %d macaddr %pM\n",
peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+ rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
peer_assoc_conf.vdev_id);
+ rcu_read_unlock();
return;
}
complete(&ar->peer_assoc_done);
+ rcu_read_unlock();
}
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -5511,6 +5744,30 @@ exit:
kfree(tb);
}
+static void
+ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ struct wmi_pdev_temperature_event ev = {0};
+
+ if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ ath11k_warn(ab, "failed to extract pdev temperature event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+ return;
+ }
+
+ ath11k_thermal_event_temperature(ar, ev.temp);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -5588,6 +5845,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
break;
+ case WMI_PDEV_TEMPERATURE_EVENTID:
+ ath11k_wmi_pdev_temperature_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_VDEV_DELETE_RESP_EVENTID:
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 1fde15c762ad..510f9c6bc1d7 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -442,6 +442,10 @@ enum wmi_tlv_cmd_id {
WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_THERM_THROT_SET_CONF_CMDID,
+ WMI_RUNTIME_DPD_RECAL_CMDID,
+ WMI_GET_TPC_POWER_CMDID,
+ WMI_IDLE_TRIGGER_MONITOR_CMDID,
WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
WMI_GPIO_OUTPUT_CMDID,
WMI_TXBF_CMDID,
@@ -484,6 +488,7 @@ enum wmi_tlv_cmd_id {
WMI_SAR_LIMITS_CMDID,
WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
WMI_LPI_START_SCAN_CMDID,
WMI_LPI_STOP_SCAN_CMDID,
@@ -3300,6 +3305,12 @@ struct wmi_request_stats_cmd {
u32 pdev_id;
} __packed;
+struct wmi_get_pdev_temperature_cmd {
+ u32 tlv_header;
+ u32 param;
+ u32 pdev_id;
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
struct wmi_bcn_tmpl_cmd {
@@ -3605,6 +3616,39 @@ struct wmi_init_country_cmd {
} cc_info;
} __packed;
+#define THERMAL_LEVELS 1
+struct tt_level_config {
+ u32 tmplwm;
+ u32 tmphwm;
+ u32 dcoffpercent;
+ u32 priority;
+};
+
+struct thermal_mitigation_params {
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ struct tt_level_config levelconf[THERMAL_LEVELS];
+};
+
+struct wmi_therm_throt_config_request_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ u32 therm_throt_levels;
+} __packed;
+
+struct wmi_therm_throt_level_config_info {
+ u32 tlv_header;
+ u32 temp_lwm;
+ u32 temp_hwm;
+ u32 dc_off_percent;
+ u32 prio;
+} __packed;
+
struct wmi_pdev_pktlog_filter_info {
u32 tlv_header;
struct wmi_mac_addr peer_macaddr;
@@ -4095,6 +4139,12 @@ struct wmi_pdev_radar_ev {
s32 sidx;
} __packed;
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celcius degree */
+ s32 temp;
+ u32 pdev_id;
+} __packed;
+
#define WMI_RX_STATUS_OK 0x00
#define WMI_RX_STATUS_ERR_CRC 0x01
#define WMI_RX_STATUS_ERR_DECRYPT 0x08
@@ -4571,6 +4621,42 @@ struct wmi_obss_spatial_reuse_params_cmd {
u32 vdev_id;
} __packed;
+#define ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION 1
+
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000
+
+struct wmi_obss_color_collision_cfg_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u32 evt_type;
+ u32 current_bss_color;
+ u32 detection_period_ms;
+ u32 scan_period_ms;
+ u32 free_slot_expiry_time_ms;
+} __packed;
+
+struct wmi_bss_color_change_enable_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+} __packed;
+
+#define ATH11K_IPV4_TH_SEED_SIZE 5
+#define ATH11K_IPV6_TH_SEED_SIZE 11
+
+struct ath11k_wmi_pdev_lro_config_cmd {
+ u32 tlv_header;
+ u32 lro_enable;
+ u32 res;
+ u32 th_4[ATH11K_IPV4_TH_SEED_SIZE];
+ u32 th_6[ATH11K_IPV6_TH_SEED_SIZE];
+ u32 pdev_id;
+} __packed;
+
struct target_resource_config {
u32 num_vdevs;
u32 num_peers;
@@ -4726,6 +4812,7 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
enum wmi_bss_chan_info_req_type type);
int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
struct stats_request_params *param);
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar);
int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
u8 peer_addr[ETH_ALEN],
struct peer_flush_params *param);
@@ -4740,6 +4827,9 @@ int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
int
ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
struct wmi_init_country_params init_cc_param);
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param);
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter);
int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar);
int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable);
@@ -4761,4 +4851,10 @@ int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
+int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable);
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable);
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id);
#endif
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 802f8f87773a..96010d4b00e7 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -54,7 +54,7 @@ config ATH5K_TRACER
config ATH5K_AHB
bool "Atheros 5xxx AHB bus support"
- depends on ATH25
+ depends on ATH25 && ATH5K
---help---
This adds support for WiSoC type chipsets of the 5xxx Atheros
family.
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 94f70047d3fc..2eaba1ccab20 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -201,35 +201,35 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
u64 tsf;
v = ath5k_hw_reg_read(ah, AR5K_BEACON);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
"AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
(v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
"AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
"AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER0 (TBTT)", v, v);
v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER1 (DMA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER2 (SWBA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER3 (ATIM)", v, v);
tsf = ath5k_hw_get_tsf64(ah);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"TSF\t\t0x%016llx\tTU: %08x\n",
(unsigned long long)tsf, TSF_TO_TU(tsf));
@@ -320,16 +320,16 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
unsigned int len = 0;
unsigned int i;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level & dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level == dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
@@ -383,60 +383,60 @@ static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
unsigned int i;
unsigned int v;
- len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
ah->ah_ant_mode);
- len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
ah->ah_def_ant);
- len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
ah->ah_tx_ant);
- len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"[antenna %d]\t%d\t%d\n",
i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
(v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
(v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
(v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
if (len > sizeof(buf))
@@ -495,36 +495,36 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
unsigned int len = 0;
u32 filt = ath5k_hw_get_rx_filter(ah);
- len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
ah->bssidmask);
- len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
+ len += scnprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
filt);
if (filt & AR5K_RX_FILTER_UCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (filt & AR5K_RX_FILTER_MCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (filt & AR5K_RX_FILTER_BCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (filt & AR5K_RX_FILTER_CONTROL)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (filt & AR5K_RX_FILTER_BEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (filt & AR5K_RX_FILTER_PROM)
- len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (filt & AR5K_RX_FILTER_XRPOLL)
- len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " XRPOLL");
if (filt & AR5K_RX_FILTER_PROBEREQ)
- len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (filt & AR5K_RX_FILTER_PHYERR_5212)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
if (filt & AR5K_RX_FILTER_RADARERR_5212)
- len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
if (filt & AR5K_RX_FILTER_PHYERR_5211)
snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211");
if (filt & AR5K_RX_FILTER_RADARERR_5211)
- len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
- len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
ath_opmode_to_string(ah->opmode), ah->opmode);
if (len > sizeof(buf))
@@ -551,65 +551,65 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
unsigned int len = 0;
int i;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"RX\n---------------------\n");
- len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
st->rxerr_crc,
st->rx_all_count > 0 ?
st->rxerr_crc * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
st->rxerr_phy,
st->rx_all_count > 0 ?
st->rxerr_phy * 100 / st->rx_all_count : 0);
for (i = 0; i < 32; i++) {
if (st->rxerr_phy_code[i])
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" phy_err[%u]\t%u\n",
i, st->rxerr_phy_code[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->rxerr_fifo,
st->rx_all_count > 0 ?
st->rxerr_fifo * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
st->rxerr_decrypt,
st->rx_all_count > 0 ?
st->rxerr_decrypt * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
st->rxerr_mic,
st->rx_all_count > 0 ?
st->rxerr_mic * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
st->rxerr_proc,
st->rx_all_count > 0 ?
st->rxerr_proc * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
st->rxerr_jumbo,
st->rx_all_count > 0 ?
st->rxerr_jumbo * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
st->rx_all_count);
- len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
st->rx_bytes_count);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nTX\n---------------------\n");
- len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
st->txerr_retry,
st->tx_all_count > 0 ?
st->txerr_retry * 100 / st->tx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->txerr_fifo,
st->tx_all_count > 0 ?
st->txerr_fifo * 100 / st->tx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
st->txerr_filt,
st->tx_all_count > 0 ?
st->txerr_filt * 100 / st->tx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
st->tx_all_count);
- len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
st->tx_bytes_count);
if (len > sizeof(buf))
@@ -670,56 +670,56 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
char buf[700];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"HW has PHY error counters:\t%s\n",
ah->ah_capabilities.cap_has_phyerr_counters ?
"yes" : "no");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"HW max spur immunity level:\t%d\n",
as->max_spur_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nANI state\n--------------------------------------------\n");
- len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
+ len += scnprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
switch (as->ani_mode) {
case ATH5K_ANI_MODE_OFF:
- len += snprintf(buf + len, sizeof(buf) - len, "OFF\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "OFF\n");
break;
case ATH5K_ANI_MODE_MANUAL_LOW:
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"MANUAL LOW\n");
break;
case ATH5K_ANI_MODE_MANUAL_HIGH:
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"MANUAL HIGH\n");
break;
case ATH5K_ANI_MODE_AUTO:
- len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "AUTO\n");
break;
default:
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"??? (not good)\n");
break;
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"noise immunity level:\t\t%d\n",
as->noise_imm_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"spur immunity level:\t\t%d\n",
as->spur_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"firstep level:\t\t\t%d\n",
as->firstep_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"OFDM weak signal detection:\t%s\n",
as->ofdm_weak_sig ? "on" : "off");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"CCK weak signal detection:\t%s\n",
as->cck_weak_sig ? "on" : "off");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nMIB INTERRUPTS:\t\t%u\n",
st->mib_intr);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"beacon RSSI average:\t%d\n",
(int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
@@ -728,35 +728,35 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
_struct.cycles > 0 ? \
_struct._field * 100 / _struct.cycles : 0
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt tx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, tx_frame));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt rx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_frame));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt busy\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_busy));
#undef CC_PRINT
- len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
as->last_cc.cycles);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"listen time\t\t%d\tlast: %d\n",
as->listen_time, as->last_listen);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
as->ofdm_errors, as->last_ofdm_errors,
as->sum_ofdm_errors);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"CCK errors\t\t%u\tlast: %u\tsum: %u\n",
as->cck_errors, as->last_cck_errors,
as->sum_cck_errors);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
@@ -836,13 +836,13 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
struct ath5k_buf *bf, *bf0;
int i, n;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"available txbuffers: %d\n", ah->txbuf_len);
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
txq = &ah->txqs[i];
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
if (!txq->setup)
@@ -854,9 +854,9 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
n++;
spin_unlock_bh(&txq->lock);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" len: %d bufs: %d\n", txq->txq_len, n);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" stuck: %d\n", txq->txq_stuck);
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 0548aa3702e3..457e9b0d21ca 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1196,6 +1196,9 @@ static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
int *power = data;
+ if (vif->bss_conf.txpower == INT_MIN)
+ return;
+
if (*power < vif->bss_conf.txpower)
*power = vif->bss_conf.txpower;
}
@@ -1457,6 +1460,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
}
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ ath9k_set_txpower(sc, NULL);
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 31e7b108279c..e60d4737fc6e 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2095,10 +2095,13 @@ static void setup_frame_info(struct ieee80211_hw *hw,
if (tx_info->control.vif) {
struct ieee80211_vif *vif = tx_info->control.vif;
-
+ if (vif->bss_conf.txpower == INT_MIN)
+ goto nonvifpower;
txpower = 2 * vif->bss_conf.txpower;
} else {
- struct ath_softc *sc = hw->priv;
+ struct ath_softc *sc;
+ nonvifpower:
+ sc = hw->priv;
txpower = sc->cur_chan->cur_txpower;
}
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index a9b6dc17e408..19009aafc4e1 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -45,7 +45,7 @@
#include "cmd.h"
#define ADD(buf, off, max, fmt, args...) \
- off += snprintf(&buf[off], max - off, fmt, ##args);
+ off += scnprintf(&buf[off], max - off, fmt, ##args);
struct carl9170_debugfs_fops {
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index fef10886ca4a..e481674485c2 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -95,6 +95,7 @@ out_bad:
}
static const struct ethtool_ops wil_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = cfg80211_get_drvinfo,
.get_coalesce = wil_ethtoolops_get_coalesce,
.set_coalesce = wil_ethtoolops_set_coalesce,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.h b/drivers/net/wireless/atmel/at76c50x-usb.h
index f56863403b05..746e64dfd8aa 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.h
+++ b/drivers/net/wireless/atmel/at76c50x-usb.h
@@ -151,7 +151,7 @@ struct at76_command {
u8 cmd;
u8 reserved;
__le16 size;
- u8 data[0];
+ u8 data[];
} __packed;
/* Length of Atmel-specific Rx header before 802.11 frame */
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index 1325727a74ed..dc1819ca52ac 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -51,7 +51,7 @@ struct b43_dfs_file *fops_to_dfs_file(struct b43_wldev *dev,
#define fappend(fmt, x...) \
do { \
if (bufsize - count) \
- count += snprintf(buf + count, \
+ count += scnprintf(buf + count, \
bufsize - count, \
fmt , ##x); \
else \
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 082aab8353b8..fa133dfb2ecb 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -54,7 +54,7 @@ struct b43legacy_dfs_file * fops_to_dfs_file(struct b43legacy_wldev *dev,
#define fappend(fmt, x...) \
do { \
if (bufsize - count) \
- count += snprintf(buf + count, \
+ count += scnprintf(buf + count, \
bufsize - count, \
fmt , ##x); \
else \
diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
index 9312c1dd3417..eec087ca30e6 100644
--- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
@@ -25,13 +25,15 @@
static int get_integer(const char *buf, size_t count)
{
char tmp[10 + 1] = { 0 };
- int ret = -EINVAL;
+ int ret = -EINVAL, res;
if (count == 0)
goto out;
count = min_t(size_t, count, 10);
memcpy(tmp, buf, count);
- ret = simple_strtol(tmp, NULL, 10);
+ ret = kstrtoint(tmp, 10, &res);
+ if (!ret)
+ return res;
out:
return ret;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index a2328d3eee03..2ba165330038 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2953,7 +2953,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
bphy_err(drvr, "Bss info is larger than buffer. Discarding\n");
- return 0;
+ return -EINVAL;
}
if (!bi->ctl_ch) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 282d0bc14e8e..a3a257089696 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -723,6 +723,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
return 0x200000;
case BRCM_CC_4359_CHIP_ID:
return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
+ case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 23627c953a5e..436f501be937 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -729,9 +729,18 @@ static int brcmf_net_mon_stop(struct net_device *ndev)
return err;
}
+static netdev_tx_t brcmf_net_mon_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
static const struct net_device_ops brcmf_netdev_ops_mon = {
.ndo_open = brcmf_net_mon_open,
.ndo_stop = brcmf_net_mon_stop,
+ .ndo_start_xmit = brcmf_net_mon_start_xmit,
};
int brcmf_net_mon_attach(struct brcmf_if *ifp)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index 3347439543bb..46c66415b4a6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -60,7 +60,7 @@ struct brcmf_fw_request {
u16 bus_nr;
u32 n_items;
const char *board_type;
- struct brcmf_fw_item items[0];
+ struct brcmf_fw_item items[];
};
struct brcmf_fw_name {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 79c8a858b6d6..a5cced2c89ac 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -32,7 +32,7 @@ struct brcmf_fweh_queue_item {
u8 ifaddr[ETH_ALEN];
struct brcmf_event_msg_be emsg;
u32 datalen;
- u8 data[0];
+ u8 data[];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 5e1a11c07551..8cc52935fd41 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -404,7 +404,7 @@ struct brcmf_fws_mac_descriptor {
u8 traffic_lastreported_bmp;
};
-#define BRCMF_FWS_HANGER_MAXITEMS 1024
+#define BRCMF_FWS_HANGER_MAXITEMS 3072
/**
* enum brcmf_fws_hanger_item_state - state of hanger item.
@@ -2145,8 +2145,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
brcmf_fws_schedule_deq(fws);
} else {
- bphy_err(drvr, "drop skb: no hanger slot\n");
- brcmf_txfinalize(ifp, skb, false);
+ bphy_err(drvr, "no hanger slot available\n");
rc = -ENOMEM;
}
brcmf_fws_unlock(fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 5105f62767fb..39381cbde89e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -52,6 +52,7 @@ BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
+BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
@@ -70,6 +71,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
@@ -2105,6 +2107,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index f9047db6a11d..3a08252f1a53 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1938,6 +1938,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
+ brcmf_sdio_rxfail(bus, true, true);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 575ed19e9195..ac5463838fcf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -164,7 +164,6 @@ struct brcmf_usbdev_info {
struct urb *bulk_urb; /* used for FW download */
- bool wowl_enabled;
struct brcmf_mp_device *settings;
};
@@ -312,27 +311,43 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
int err = 0;
int timeout = 0;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ struct usb_interface *intf = to_usb_interface(dev);
brcmf_dbg(USB, "Enter\n");
- if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
- return -EIO;
- if (test_and_set_bit(0, &devinfo->ctl_op))
- return -EIO;
+ err = usb_autopm_get_interface(intf);
+ if (err)
+ goto out;
+
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+ err = -EIO;
+ goto fail;
+ }
+
+ if (test_and_set_bit(0, &devinfo->ctl_op)) {
+ err = -EIO;
+ goto fail;
+ }
devinfo->ctl_completed = false;
err = brcmf_usb_send_ctl(devinfo, buf, len);
if (err) {
brcmf_err("fail %d bytes: %d\n", err, len);
clear_bit(0, &devinfo->ctl_op);
- return err;
+ goto fail;
}
timeout = brcmf_usb_ioctl_resp_wait(devinfo);
- clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
brcmf_err("Txctl wait timed out\n");
+ usb_kill_urb(devinfo->ctl_urb);
err = -EIO;
+ goto fail;
}
+ clear_bit(0, &devinfo->ctl_op);
+
+fail:
+ usb_autopm_put_interface(intf);
+out:
return err;
}
@@ -341,32 +356,46 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
int err = 0;
int timeout = 0;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ struct usb_interface *intf = to_usb_interface(dev);
brcmf_dbg(USB, "Enter\n");
- if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
- return -EIO;
- if (test_and_set_bit(0, &devinfo->ctl_op))
- return -EIO;
+ err = usb_autopm_get_interface(intf);
+ if (err)
+ goto out;
+
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+ err = -EIO;
+ goto fail;
+ }
+
+ if (test_and_set_bit(0, &devinfo->ctl_op)) {
+ err = -EIO;
+ goto fail;
+ }
devinfo->ctl_completed = false;
err = brcmf_usb_recv_ctl(devinfo, buf, len);
if (err) {
brcmf_err("fail %d bytes: %d\n", err, len);
clear_bit(0, &devinfo->ctl_op);
- return err;
+ goto fail;
}
timeout = brcmf_usb_ioctl_resp_wait(devinfo);
err = devinfo->ctl_urb_status;
- clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
brcmf_err("rxctl wait timed out\n");
+ usb_kill_urb(devinfo->ctl_urb);
err = -EIO;
+ goto fail;
}
+ clear_bit(0, &devinfo->ctl_op);
+fail:
+ usb_autopm_put_interface(intf);
if (!err)
return devinfo->ctl_urb_actual_length;
- else
- return err;
+out:
+ return err;
}
static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
@@ -500,10 +529,12 @@ static void brcmf_usb_rx_complete(struct urb *urb)
return;
}
- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP ||
+ devinfo->bus_pub.state == BRCMFMAC_USB_STATE_SLEEP) {
skb_put(skb, urb->actual_length);
brcmf_rx_frame(devinfo->dev, skb, true);
brcmf_usb_rx_refill(devinfo, req);
+ usb_mark_last_busy(urb->dev);
} else {
brcmu_pkt_buf_free_skb(skb);
brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
@@ -587,6 +618,11 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
struct brcmf_usbreq *req;
int ret;
unsigned long flags;
+ struct usb_interface *intf = to_usb_interface(dev);
+
+ ret = usb_autopm_get_interface(intf);
+ if (ret)
+ goto out;
brcmf_dbg(USB, "Enter, skb=%p\n", skb);
if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
@@ -625,9 +661,10 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
devinfo->tx_flowblock = true;
}
spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
- return 0;
fail:
+ usb_autopm_put_interface(intf);
+out:
return ret;
}
@@ -991,20 +1028,32 @@ static int
brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
{
int err;
+ struct usb_interface *intf;
brcmf_dbg(USB, "Enter\n");
- if (devinfo == NULL)
- return -ENODEV;
+ if (!devinfo) {
+ err = -ENODEV;
+ goto out;
+ }
if (!devinfo->image) {
brcmf_err("No firmware!\n");
- return -ENOENT;
+ err = -ENOENT;
+ goto out;
}
+ intf = to_usb_interface(devinfo->dev);
+ err = usb_autopm_get_interface(intf);
+ if (err)
+ goto out;
+
err = brcmf_usb_dlstart(devinfo,
(u8 *)devinfo->image, devinfo->image_len);
if (err == 0)
err = brcmf_usb_dlrun(devinfo);
+
+ usb_autopm_put_interface(intf);
+out:
return err;
}
@@ -1105,18 +1154,6 @@ error:
return NULL;
}
-static void brcmf_usb_wowl_config(struct device *dev, bool enabled)
-{
- struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
-
- brcmf_dbg(USB, "Configuring WOWL, enabled=%d\n", enabled);
- devinfo->wowl_enabled = enabled;
- if (enabled)
- device_set_wakeup_enable(devinfo->dev, true);
- else
- device_set_wakeup_enable(devinfo->dev, false);
-}
-
static
int brcmf_usb_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
{
@@ -1143,7 +1180,6 @@ static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
- .wowl_config = brcmf_usb_wowl_config,
.get_fwname = brcmf_usb_get_fwname,
};
@@ -1332,6 +1368,8 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, devinfo);
+ intf->needs_remote_wakeup = 1;
+
/* Check that the device supports only one configuration */
if (usb->descriptor.bNumConfigurations != 1) {
brcmf_err("Number of configurations: %d not supported\n",
@@ -1445,12 +1483,8 @@ static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
brcmf_dbg(USB, "Enter\n");
devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
- if (devinfo->wowl_enabled) {
- brcmf_cancel_all_urbs(devinfo);
- } else {
- brcmf_detach(&usb->dev);
- brcmf_free(&usb->dev);
- }
+ brcmf_cancel_all_urbs(devinfo);
+ device_set_wakeup_enable(devinfo->dev, true);
return 0;
}
@@ -1463,22 +1497,10 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- if (!devinfo->wowl_enabled) {
- int err;
-
- err = brcmf_alloc(&usb->dev, devinfo->settings);
- if (err)
- return err;
-
- err = brcmf_attach(devinfo->dev);
- if (err) {
- brcmf_free(devinfo->dev);
- return err;
- }
- }
devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP;
brcmf_usb_rx_fill_all(devinfo);
+ device_set_wakeup_enable(devinfo->dev, false);
return 0;
}
@@ -1535,6 +1557,7 @@ static struct usb_driver brcmf_usbdrvr = {
.suspend = brcmf_usb_suspend,
.resume = brcmf_usb_resume,
.reset_resume = brcmf_usb_reset_resume,
+ .supports_autosuspend = true,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index d1037b6ef2d6..c6c4be05159d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -44,6 +44,7 @@
#define BRCM_CC_4358_CHIP_ID 0x4358
#define BRCM_CC_4359_CHIP_ID 0x4359
#define BRCM_CC_43602_CHIP_ID 43602
+#define BRCM_CC_4364_CHIP_ID 0x4364
#define BRCM_CC_4365_CHIP_ID 0x4365
#define BRCM_CC_4366_CHIP_ID 0x4366
#define BRCM_CC_43664_CHIP_ID 43664
@@ -74,6 +75,7 @@
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
+#define BRCM_PCIE_4364_DEVICE_ID 0x4464
#define BRCM_PCIE_4365_DEVICE_ID 0x43ca
#define BRCM_PCIE_4365_2G_DEVICE_ID 0x43cb
#define BRCM_PCIE_4365_5G_DEVICE_ID 0x43cc
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 536cd729c086..97ea6e2035e6 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -629,30 +629,30 @@ static char *snprint_line(char *buf, size_t count,
int out, i, j, l;
char c;
- out = snprintf(buf, count, "%08X", ofs);
+ out = scnprintf(buf, count, "%08X", ofs);
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++)
- out += snprintf(buf + out, count - out, "%02X ",
+ out += scnprintf(buf + out, count - out, "%02X ",
data[(i * 8 + j)]);
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++) {
c = data[(i * 8 + j)];
if (!isascii(c) || !isprint(c))
c = '.';
- out += snprintf(buf + out, count - out, "%c", c);
+ out += scnprintf(buf + out, count - out, "%c", c);
}
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
return buf;
@@ -1730,7 +1730,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
/* the ipw2100 hardware really doesn't want power management delays
* longer than 175usec
*/
- pm_qos_update_request(&ipw2100_pm_qos_req, 175);
+ cpu_latency_qos_update_request(&ipw2100_pm_qos_req, 175);
/* If the interrupt is enabled, turn it off... */
spin_lock_irqsave(&priv->low_lock, flags);
@@ -1875,7 +1875,8 @@ static void ipw2100_down(struct ipw2100_priv *priv)
ipw2100_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->low_lock, flags);
- pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&ipw2100_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
/* We have to signal any supplicant if we are disassociating */
if (associated)
@@ -6566,8 +6567,7 @@ static int __init ipw2100_init(void)
printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
- pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
ret = pci_register_driver(&ipw2100_pci_driver);
if (ret)
@@ -6594,7 +6594,7 @@ static void __exit ipw2100_exit(void)
&driver_attr_debug_level);
#endif
pci_unregister_driver(&ipw2100_pci_driver);
- pm_qos_remove_request(&ipw2100_pm_qos_req);
+ cpu_latency_qos_remove_request(&ipw2100_pm_qos_req);
}
module_init(ipw2100_init);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 5ef6f87a48ac..60b5e08dd6df 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -223,30 +223,30 @@ static int snprint_line(char *buf, size_t count,
int out, i, j, l;
char c;
- out = snprintf(buf, count, "%08X", ofs);
+ out = scnprintf(buf, count, "%08X", ofs);
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++)
- out += snprintf(buf + out, count - out, "%02X ",
+ out += scnprintf(buf + out, count - out, "%02X ",
data[(i * 8 + j)]);
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++) {
c = data[(i * 8 + j)];
if (!isascii(c) || !isprint(c))
c = '.';
- out += snprintf(buf + out, count - out, "%c", c);
+ out += scnprintf(buf + out, count - out, "%c", c);
}
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
return out;
@@ -1279,12 +1279,12 @@ static ssize_t show_event_log(struct device *d,
log_len = log_size / sizeof(*log);
ipw_capture_event_log(priv, log_len, log);
- len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
for (i = 0; i < log_len; i++)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X%08X%08X",
log[i].time, log[i].event, log[i].data);
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
kfree(log);
return len;
}
@@ -1298,13 +1298,13 @@ static ssize_t show_error(struct device *d,
u32 len = 0, i;
if (!priv->error)
return 0;
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"%08lX%08X%08X%08X",
priv->error->jiffies,
priv->error->status,
priv->error->config, priv->error->elem_len);
for (i = 0; i < priv->error->elem_len; i++)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X%08X%08X%08X%08X%08X%08X",
priv->error->elem[i].time,
priv->error->elem[i].desc,
@@ -1314,15 +1314,15 @@ static ssize_t show_error(struct device *d,
priv->error->elem[i].link2,
priv->error->elem[i].data);
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X", priv->error->log_len);
for (i = 0; i < priv->error->log_len; i++)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X%08X%08X",
priv->error->log[i].time,
priv->error->log[i].event,
priv->error->log[i].data);
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -1350,7 +1350,7 @@ static ssize_t show_cmd_log(struct device *d,
(i != priv->cmdlog_pos) && (len < PAGE_SIZE);
i = (i + 1) % priv->cmdlog_len) {
len +=
- snprintf(buf + len, PAGE_SIZE - len,
+ scnprintf(buf + len, PAGE_SIZE - len,
"\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
priv->cmdlog[i].cmd.len);
@@ -1358,9 +1358,9 @@ static ssize_t show_cmd_log(struct device *d,
snprintk_buf(buf + len, PAGE_SIZE - len,
(u8 *) priv->cmdlog[i].cmd.param,
priv->cmdlog[i].cmd.len);
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
}
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -9608,24 +9608,24 @@ static int ipw_wx_get_powermode(struct net_device *dev,
int level = IPW_POWER_LEVEL(priv->power_mode);
char *p = extra;
- p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
+ p += scnprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
switch (level) {
case IPW_POWER_AC:
- p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
+ p += scnprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
break;
case IPW_POWER_BATTERY:
- p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
+ p += scnprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
break;
default:
- p += snprintf(p, MAX_WX_STRING - (p - extra),
+ p += scnprintf(p, MAX_WX_STRING - (p - extra),
"(Timeout %dms, Period %dms)",
timeout_duration[level - 1] / 1000,
period_duration[level - 1] / 1000);
}
if (!(priv->power_mode & IPW_POWER_ENABLED))
- p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
+ p += scnprintf(p, MAX_WX_STRING - (p - extra), " OFF");
wrqu->data.length = p - extra + 1;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 0cb36d1b983a..5a2a723e480b 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -1156,7 +1156,7 @@ static int libipw_parse_info_param(struct libipw_info_element
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
#ifdef CONFIG_LIBIPW_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
#endif
@@ -1183,7 +1183,7 @@ static int libipw_parse_info_param(struct libipw_info_element
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
#ifdef CONFIG_LIBIPW_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates_ex[i]);
#endif
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index 3d558b47168b..a0cf78c418ac 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -213,7 +213,7 @@ static char *libipw_translate_scan(struct libipw_device *ieee,
* for given network. */
iwe.cmd = IWEVCUSTOM;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %ums ago",
elapsed_jiffies_msecs(network->last_scanned));
iwe.u.data.length = p - custom;
@@ -223,18 +223,18 @@ static char *libipw_translate_scan(struct libipw_device *ieee,
/* Add spectrum management information */
iwe.cmd = -1;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
if (libipw_get_channel_flags(ieee, network->channel) &
LIBIPW_CH_INVALID) {
iwe.cmd = IWEVCUSTOM;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
}
if (libipw_get_channel_flags(ieee, network->channel) &
LIBIPW_CH_RADAR_DETECT) {
iwe.cmd = IWEVCUSTOM;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
}
if (iwe.cmd == IWEVCUSTOM) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 206b43b9dff8..9167c3d2711d 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -228,9 +228,7 @@ il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
static int
il3945_remove_static_key(struct il_priv *il)
{
- int ret = -EOPNOTSUPP;
-
- return ret;
+ return -EOPNOTSUPP;
}
static int
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 6209f85a71dd..0af9e997c9f6 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -374,7 +374,7 @@ out:
}
static void *
-il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+il3945_rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 7c6e2c863497..0a02d8aca320 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2474,7 +2474,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
}
static void *
-il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+il4965_rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index 34d0579132ce..fc8fa5818de7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -416,7 +416,6 @@ il4965_set_ucode_ptrs(struct il_priv *il)
{
dma_addr_t pinst;
dma_addr_t pdata;
- int ret = 0;
/* bits 35:4 for 4965 */
pinst = il->ucode_code.p_addr >> 4;
@@ -433,7 +432,7 @@ il4965_set_ucode_ptrs(struct il_priv *il)
il->ucode_code.len | BSM_DRAM_INST_LOAD);
D_INFO("Runtime uCode pointers are set.\n");
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 355af47c5f73..bc49cdd819df 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -57,7 +57,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 52
+#define IWL_22000_UCODE_API_MAX 53
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -76,10 +76,8 @@
#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
-#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-"
@@ -98,14 +96,10 @@
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
@@ -235,6 +229,38 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
+const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+ .integrated = true,
+ .xtal_latency = 5000,
+};
+
+const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+ .integrated = true,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+};
+
+const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+};
+
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@@ -246,6 +272,64 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
#define IWL_NUM_RBDS_22000_HE 2048
#define IWL_NUM_RBDS_AX210_HE 4096
+/*
+ * All JF radio modules are part of the 9000 series, but the MAC part
+ * looks more like 22000. That's why this device is here, but called
+ * 9560 nevertheless.
+ */
+const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .bisr_workaround = 1,
+};
+
+const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
+
+const char iwl_ax200_killer_1650w_name[] =
+ "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
+const char iwl_ax200_killer_1650x_name[] =
+ "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)";
+
const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
@@ -354,35 +438,6 @@ const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
- .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
- .fw_name_pre = IWL_CC_A_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .trans.bisr_workaround = 1,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650x_2ax_cfg = {
- .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
- .fw_name_pre = IWL_CC_A_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .trans.bisr_workaround = 1,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650w_2ax_cfg = {
- .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -391,188 +446,9 @@ const struct iwl_cfg killer1650w_2ax_cfg = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .trans.bisr_workaround = 1,
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-/*
- * All JF radio modules are part of the 9000 series, but the MAC part
- * looks more like 22000. That's why this device is here, but called
- * 9560 nevertheless.
- */
-const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9461",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9462",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9461",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9462",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
@@ -625,32 +501,6 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwl22000_2ax_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_B_FW_PRE,
@@ -664,19 +514,6 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_A0_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
@@ -725,9 +562,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 379ea788e424..f84b8e5d3f0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -123,8 +123,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
#define IWL_DEVICE_9000 \
.ucode_api_max = IWL9000_UCODE_API_MAX, \
.ucode_api_min = IWL9000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_9000, \
- .trans.base_params = &iwl9000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = 10, \
.non_shared_ant = ANT_B, \
@@ -137,11 +135,9 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
.num_rbds = 512, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
- .trans.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \
@@ -178,173 +174,54 @@ const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
.rf_id = true,
};
-const struct iwl_cfg iwl9160_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9160",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
-const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
-
-const struct iwl_cfg iwl9260_2ac_160_cfg = {
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9260_killer_2ac_cfg = {
- .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9270_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9270",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9460_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9460_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
+const struct iwl_cfg_trans_params iwl9560_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
.integrated = true,
- .soc_latency = 5000,
+ .xtal_latency = 5000,
};
-const struct iwl_cfg iwl9461_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
+const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
.integrated = true,
- .soc_latency = 5000,
+ .xtal_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
-const struct iwl_cfg iwl9462_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
+const char iwl9162_name[] = "Intel(R) Wireless-AC 9162";
+const char iwl9260_name[] = "Intel(R) Wireless-AC 9260";
+const char iwl9260_1_name[] = "Intel(R) Wireless-AC 9260-1";
+const char iwl9270_name[] = "Intel(R) Wireless-AC 9270";
+const char iwl9461_name[] = "Intel(R) Wireless-AC 9461";
+const char iwl9462_name[] = "Intel(R) Wireless-AC 9462";
+const char iwl9560_name[] = "Intel(R) Wireless-AC 9560";
+const char iwl9162_160_name[] = "Intel(R) Wireless-AC 9162 160MHz";
+const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
+const char iwl9270_160_name[] = "Intel(R) Wireless-AC 9270 160MHz";
+const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz";
+const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz";
+const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
-const struct iwl_cfg iwl9560_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
+const char iwl9260_killer_1550_name[] =
+ "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)";
+const char iwl9560_killer_1550i_name[] =
+ "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550s_name[] =
+ "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
-const struct iwl_cfg iwl9560_2ac_160_cfg = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
+const struct iwl_cfg iwl9260_2ac_cfg = {
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_soc = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 226165db7dfd..dac809df7f1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -3019,7 +3019,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
cpu_to_le16(priv->lib->bt_params->agg_time_limit);
}
-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+static void *rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index ba2aff3af0fe..e3a33388be70 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -296,9 +296,14 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
if (!prof->enabled) {
IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
profs[i]);
- /* if one of the profiles is disabled, we fail all */
- return -ENOENT;
+ /*
+ * if one of the profiles is disabled, we
+ * ignore all of them and return 1 to
+ * differentiate disabled from other failures.
+ */
+ return 1;
}
+
IWL_DEBUG_INFO(fwrt,
"SAR EWRD: chain %d profile index %d\n",
i, profs[i]);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 22dff2c92d4f..4f46f3ed8794 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -5,9 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -647,6 +647,11 @@ enum iwl_system_subcmd_ids {
SHARED_MEM_CFG_CMD = 0x0,
/**
+ * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd
+ */
+ SOC_CONFIGURATION_CMD = 0x01,
+
+ /**
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index a0d6802c2715..0214e553d5ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -427,6 +427,9 @@ struct iwl_tof_range_req_ap_entry_v2 {
* Default algo type is ML.
* @IWL_INITIATOR_AP_FLAGS_MCSI_REPORT: Send the MCSI for each FTM frame to the
* driver.
+ * @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow
+ * @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow
+ * @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@@ -436,6 +439,9 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ALGO_LR = BIT(5),
IWL_INITIATOR_AP_FLAGS_ALGO_FFT = BIT(6),
IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = BIT(8),
+ IWL_INITIATOR_AP_FLAGS_NON_TB = BIT(9),
+ IWL_INITIATOR_AP_FLAGS_TB = BIT(10),
+ IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 1b2b5fa56e19..3d770f406c38 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -95,6 +95,7 @@ struct iwl_ssid_ie {
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
#define IWL_SCAN_MAX_PROFILES 11
+#define IWL_SCAN_MAX_PROFILES_V2 8
#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
#define SCAN_NUM_BAND_PROBE_DATA_V_1 2
#define SCAN_NUM_BAND_PROBE_DATA_V_2 3
@@ -160,8 +161,7 @@ struct iwl_scan_offload_profile {
} __packed;
/**
- * struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
- * @profiles: profiles to search for match
+ * struct iwl_scan_offload_profile_cfg_data
* @blacklist_len: length of blacklist
* @num_profiles: num of profiles in the list
* @match_notify: clients waiting for match found notification
@@ -170,8 +170,7 @@ struct iwl_scan_offload_profile {
* @any_beacon_notify: clients waiting for match notification without match
* @reserved: reserved
*/
-struct iwl_scan_offload_profile_cfg {
- struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+struct iwl_scan_offload_profile_cfg_data {
u8 blacklist_len;
u8 num_profiles;
u8 match_notify;
@@ -182,6 +181,26 @@ struct iwl_scan_offload_profile_cfg {
} __packed;
/**
+ * struct iwl_scan_offload_profile_cfg
+ * @profiles: profiles to search for match
+ * @data: the rest of the data for profile_cfg
+ */
+struct iwl_scan_offload_profile_cfg_v1 {
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+ struct iwl_scan_offload_profile_cfg_data data;
+} __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1-2*/
+
+/**
+ * struct iwl_scan_offload_profile_cfg
+ * @profiles: profiles to search for match
+ * @data: the rest of the data for profile_cfg
+ */
+struct iwl_scan_offload_profile_cfg {
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES_V2];
+ struct iwl_scan_offload_profile_cfg_data data;
+} __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_3*/
+
+/**
* struct iwl_scan_schedule_lmac - schedule of scan offload
* @delay: delay between iterations, in seconds.
* @iterations: num of scan iterations
@@ -702,13 +721,16 @@ struct iwl_scan_channel_cfg_umac {
u8 channel_num;
u8 iter_count;
__le16 iter_interval;
- } v1; /* SCAN_CHANNEL_CFG_S_VER1 */
+ } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */
struct {
u8 channel_num;
u8 band;
u8 iter_count;
u8 iter_interval;
- } v2; /* SCAN_CHANNEL_CFG_S_VER2 */
+ } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2
+ * SCAN_CHANNEL_CONFIG_API_S_VER_3
+ * SCAN_CHANNEL_CONFIG_API_S_VER_4
+ */
};
} __packed;
@@ -943,6 +965,25 @@ struct iwl_scan_channel_params_v4 {
u8 adwell_ch_override_bitmap[16];
} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_4 also
SCAN_CHANNEL_PARAMS_API_S_VER_5 */
+
+/**
+ * struct iwl_scan_channel_params_v6
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @n_aps_override: override the number of APs the FW uses to calculate dwell
+ * time when adaptive dwell is used.
+ * Channel k will use n_aps_override[i] when BIT(20 + i) is set in
+ * channel_config[k].flags
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwl_scan_channel_params_v6 {
+ u8 flags;
+ u8 count;
+ u8 n_aps_override[2];
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
+
/**
* struct iwl_scan_general_params_v10
* @flags: &enum iwl_umac_scan_flags
@@ -1024,6 +1065,20 @@ struct iwl_scan_req_params_v13 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
+ * struct iwl_scan_req_params_v14
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v6
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v4
+ */
+struct iwl_scan_req_params_v14 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v6 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v4 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */
+
+/**
* struct iwl_scan_req_umac_v12
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -1048,6 +1103,18 @@ struct iwl_scan_req_umac_v13 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
/**
+ * struct iwl_scan_req_umac_v14
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v14 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v14 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */
+
+/**
* struct iwl_umac_scan_abort
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
@@ -1121,7 +1188,7 @@ struct iwl_scan_offload_profiles_query_v1 {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match_v1 matches[IWL_SCAN_MAX_PROFILES];
+ struct iwl_scan_offload_profile_match_v1 matches[0];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
/**
@@ -1165,7 +1232,7 @@ struct iwl_scan_offload_profiles_query {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+ struct iwl_scan_offload_profile_match matches[0];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
new file mode 100644
index 000000000000..aadca78e9846
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_soc_h__
+#define __iwl_fw_api_soc_h__
+
+#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
+#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
+
+/**
+ * struct iwl_soc_configuration_cmd - Set device stabilization latency
+ *
+ * @flags: soc settings flags. In VER_1, we can only set the DISCRETE
+ * flag, because the FW treats the whole value as an integer. In
+ * VER_2, we can set the bits independently.
+ * @latency: time for SOC to ensure stable power & XTAL
+ */
+struct iwl_soc_configuration_cmd {
+ __le32 flags;
+ __le32 latency;
+} __packed; /*
+ * SOC_CONFIGURATION_CMD_S_VER_1 (see description above)
+ * SOC_CONFIGURATION_CMD_S_VER_2
+ */
+
+#endif /* __iwl_fw_api_soc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 73196cbc7fbe..75d958bab0e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -8,7 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -99,7 +99,7 @@ enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
- IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+ IWL_MVM_DQA_MAX_DATA_QUEUE = 30,
};
enum iwl_mvm_tx_fifo {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 8796ab8f2a5f..14ac7153a3e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1108,6 +1108,38 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
+ int i;
+
+ /* we shouldn't get here if the trans doesn't have read_config32 */
+ if (WARN_ON_ONCE(!trans->ops->read_config32))
+ return -EOPNOTSUPP;
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ int ret;
+ u32 tmp;
+
+ ret = trans->ops->read_config32(trans, addr + i, &tmp);
+ if (ret < 0)
+ return ret;
+
+ *val++ = cpu_to_le32(tmp);
+ }
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
@@ -1701,13 +1733,7 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
- struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
- u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id), size;
-
- fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
- if (le32_to_cpu(fw_mon_cfg->buf_location) !=
- IWL_FW_INI_LOCATION_SRAM_PATH)
- return 0;
+ u32 size;
size = le32_to_cpu(reg->internal_buffer.size);
if (!size)
@@ -2048,7 +2074,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_range = iwl_dump_ini_csr_iter,
},
[IWL_FW_INI_REGION_DRAM_IMR] = {},
- [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {},
+ [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_config_iter,
+ },
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 1554f5fdd483..35f42e529a6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -5,11 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,11 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -369,6 +365,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
+ * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting
+ * stabilization latency for SoCs.
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -437,6 +435,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
/* set 1 */
+ IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index f8c6ed823bc5..da0d90e2b537 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -69,8 +69,6 @@
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
-#define IWL_FW_DBG_DOMAIN IWL_TRANS_FW_DBG_DOMAIN(fwrt->trans)
-
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index be6a2bf9ce74..d5d984d7ce83 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -287,27 +287,36 @@ struct iwl_pwr_tx_backoff {
/**
* struct iwl_cfg_trans - information needed to start the trans
*
- * These values cannot be changed when multiple configs are used for a
- * single PCI ID, because they are needed before the HW REV or RFID
- * can be read.
+ * These values are specific to the device ID and do not change when
+ * multiple configs are used for a single device ID. They values are
+ * used, among other things, to boot the NIC so that the HW REV or
+ * RFID can be read before deciding the remaining parameters to use.
*
* @base_params: pointer to basic parameters
* @csr: csr flags and addresses that are different across devices
* @device_family: the device family
* @umac_prph_offset: offset to add to UMAC periphery address
+ * @xtal_latency: power up latency to get the xtal stabilized
+ * @extra_phy_cfg_flags: extra configuration flags to pass to the PHY
* @rf_id: need to read rf_id to determine the firmware image
* @use_tfh: use TFH
* @gen2: 22000 and on transport operation
* @mq_rx_supported: multi-queue rx support
+ * @integrated: discrete or integrated
+ * @low_latency_xtal: use the low latency xtal if supported
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
enum iwl_device_family device_family;
u32 umac_prph_offset;
+ u32 xtal_latency;
+ u32 extra_phy_cfg_flags;
u32 rf_id:1,
use_tfh:1,
gen2:1,
mq_rx_supported:1,
+ integrated:1,
+ low_latency_xtal:1,
bisr_workaround:1;
};
@@ -374,7 +383,6 @@ struct iwl_fw_mon_regs {
* @smem_offset: offset from which the SMEM begins
* @smem_len: the length of SMEM
* @vht_mu_mimo_supported: VHT MU-MIMO support
- * @integrated: discrete or integrated
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
* @d3_debug_data_base_addr: base address where D3 debug data is stored
@@ -413,7 +421,6 @@ struct iwl_cfg {
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
- u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u32 rx_with_siso_diversity:1,
@@ -427,7 +434,6 @@ struct iwl_cfg {
disable_dummy_notification:1,
apmg_not_supported:1,
vht_mu_mimo_supported:1,
- integrated:1,
cdb:1,
dbgc_supported:1,
uhb_supported:1;
@@ -442,7 +448,6 @@ struct iwl_cfg {
u8 ucode_api_min;
u16 num_rbds;
u32 min_umac_error_event_table;
- u32 extra_phy_cfg_flags;
u32 d3_debug_data_base_addr;
u32 d3_debug_data_length;
u32 min_txq_size;
@@ -454,9 +459,43 @@ struct iwl_cfg {
#define IWL_CFG_ANY (~0)
+#define IWL_CFG_MAC_TYPE_PU 0x31
+#define IWL_CFG_MAC_TYPE_PNJ 0x32
+#define IWL_CFG_MAC_TYPE_TH 0x32
+#define IWL_CFG_MAC_TYPE_QU 0x33
+#define IWL_CFG_MAC_TYPE_QUZ 0x35
+#define IWL_CFG_MAC_TYPE_QNJ 0x36
+
+#define IWL_CFG_RF_TYPE_TH 0x105
+#define IWL_CFG_RF_TYPE_TH1 0x108
+#define IWL_CFG_RF_TYPE_JF2 0x105
+#define IWL_CFG_RF_TYPE_JF1 0x108
+
+#define IWL_CFG_RF_ID_TH 0x1
+#define IWL_CFG_RF_ID_TH1 0x1
+#define IWL_CFG_RF_ID_JF 0x3
+#define IWL_CFG_RF_ID_JF1 0x6
+#define IWL_CFG_RF_ID_JF1_DIV 0xA
+
+#define IWL_CFG_NO_160 0x0
+#define IWL_CFG_160 0x1
+
+#define IWL_CFG_CORES_BT 0x0
+#define IWL_CFG_CORES_BT_GNSS 0x5
+
+#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
+
struct iwl_dev_info {
u16 device;
u16 subdevice;
+ u16 mac_type;
+ u16 rf_type;
+ u8 mac_step;
+ u8 rf_id;
+ u8 no_160;
+ u8 cores;
const struct iwl_cfg *cfg;
const char *name;
};
@@ -465,8 +504,31 @@ struct iwl_dev_info {
* This list declares the config structures for all devices.
*/
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
+extern const char iwl9162_name[];
+extern const char iwl9260_name[];
+extern const char iwl9260_1_name[];
+extern const char iwl9270_name[];
+extern const char iwl9461_name[];
+extern const char iwl9462_name[];
+extern const char iwl9560_name[];
+extern const char iwl9162_160_name[];
extern const char iwl9260_160_name[];
+extern const char iwl9270_160_name[];
+extern const char iwl9461_160_name[];
+extern const char iwl9462_160_name[];
extern const char iwl9560_160_name[];
+extern const char iwl9260_killer_1550_name[];
+extern const char iwl9560_killer_1550i_name[];
+extern const char iwl9560_killer_1550s_name[];
+extern const char iwl_ax200_name[];
+extern const char iwl_ax200_killer_1650w_name[];
+extern const char iwl_ax200_killer_1650x_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
@@ -533,38 +595,15 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
-extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
-extern const struct iwl_cfg iwl9260_2ac_160_cfg;
-extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
-extern const struct iwl_cfg iwl9270_2ac_cfg;
-extern const struct iwl_cfg iwl9460_2ac_cfg;
-extern const struct iwl_cfg iwl9560_2ac_cfg;
-extern const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg;
+extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg;
+extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg;
+extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
-extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
-extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
@@ -578,22 +617,8 @@ extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
-extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
-extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 4208e720f6e6..bf2f00b89214 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -236,6 +236,12 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EINVAL;
}
+ if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
+ !trans->ops->read_config32) {
+ IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
+ return -EOPNOTSUPP;
+ }
+
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 0481796f75bc..eeb750bdbda1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
kmemdup(pieces->dbg_conf_tlv[i],
pieces->dbg_conf_tlv_len[i],
GFP_KERNEL);
- if (!pieces->dbg_conf_tlv[i])
+ if (!drv->fw.dbg.conf_tlv[i])
goto out_free_fw;
}
}
@@ -1715,6 +1715,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
+ .enable_ini = true,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1837,7 +1838,7 @@ MODULE_PARM_DESC(uapsd_disable,
module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_ini,
- "Enable debug INI TLV FW debug infrastructure (default: 0");
+ "Enable debug INI TLV FW debug infrastructure (default: true");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index bab0999f002c..ccf0bc16465d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -532,8 +532,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
- IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
- IEEE80211_HE_MAC_CAP2_ACK_EN,
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
@@ -617,8 +616,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
- IEEE80211_HE_MAC_CAP2_BSR |
- IEEE80211_HE_MAC_CAP2_ACK_EN,
+ IEEE80211_HE_MAC_CAP2_BSR,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
@@ -684,7 +682,9 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
},
};
-static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+static void iwl_init_he_hw_capab(struct iwl_trans *trans,
+ struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
u8 tx_chains, u8 rx_chains)
{
sband->iftype_data = iwl_he_capa;
@@ -728,7 +728,7 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
- iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
@@ -743,7 +743,7 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
- iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 7b3b1f4c99b4..bba527b339b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -544,6 +544,8 @@ struct iwl_trans_rxq_dma_data {
* @read_mem: read device's SRAM in DWORD
* @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
* will be zeroed.
+ * @read_config32: read a u32 value from the device's config space at
+ * the given offset.
* @configure: configure parameters required by the transport layer from
* the op_mode. May be called several times before start_fw, can't be
* called after that.
@@ -614,6 +616,7 @@ struct iwl_trans_ops {
void *buf, int dwords);
int (*write_mem)(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords);
+ int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 22a32eb10f01..122ca7624073 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1709,6 +1709,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
};
int ret, len;
size_t query_len, matches_len;
+ int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@@ -1720,11 +1721,11 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
query_len = sizeof(struct iwl_scan_offload_profiles_query);
matches_len = sizeof(struct iwl_scan_offload_profile_match) *
- IWL_SCAN_MAX_PROFILES;
+ max_profiles;
} else {
query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
- IWL_SCAN_MAX_PROFILES;
+ max_profiles;
}
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 190cf15b825c..3beef8d077b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -461,6 +461,8 @@ static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
lq_sta->last_rate_n_flags);
+ if (desc < bufsz - 1)
+ buff[desc++] = '\n';
mutex_unlock(&mvm->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -1013,6 +1015,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
(int)(ARRAY_SIZE(stats->last_rates) - i));
pos += rs_pretty_print_rate(pos, endpos - pos,
stats->last_rates[idx]);
+ if (pos < endpos - 1)
+ *pos++ = '\n';
}
spin_unlock_bh(&mvm->drv_stats_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 6e1ea921c299..9e21f5e5d364 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -278,6 +278,10 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
return -EINVAL;
}
+ /* non EDCA based measurement must use HE preamble */
+ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
+ *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
+
*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
@@ -349,6 +353,11 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
FTM_PUT_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
FTM_PUT_FLAG(ALGO_FFT);
+
+ if (peer->ftm.trigger_based)
+ FTM_PUT_FLAG(TB);
+ else if (peer->ftm.non_trigger_based)
+ FTM_PUT_FLAG(NON_TB);
}
static int
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index e3eb812e0248..05a06f88db6c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -73,6 +73,7 @@
#include "fw/api/datapath.h"
#include "fw/api/phy.h"
#include "fw/api/config.h"
+#include "fw/api/soc.h"
#include "fw/api/alive.h"
#include "fw/api/binding.h"
#include "fw/api/cmdhdr.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 98263cd37944..e67c452fa92c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,6 +87,36 @@ struct iwl_mvm_alive_data {
u32 scd_base_addr;
};
+/* set device type and latency */
+static int iwl_set_soc_latency(struct iwl_mvm *mvm)
+{
+ struct iwl_soc_configuration_cmd cmd = {};
+ int ret;
+
+ /*
+ * In VER_1 of this command, the discrete value is considered
+ * an integer; In VER_2, it's a bitmask. Since we have only 2
+ * values in VER_1, this is backwards-compatible with VER_2,
+ * as long as we don't set any other bits.
+ */
+ if (!mvm->trans->trans_cfg->integrated)
+ cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
+
+ if (iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC) >= 2 &&
+ (mvm->trans->trans_cfg->low_latency_xtal))
+ cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
+
+ cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SOC_CONFIGURATION_CMD,
+ SYSTEM_GROUP, 0), 0,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to set soc latency: %d\n", ret);
+ return ret;
+}
+
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
@@ -544,7 +572,8 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
/* set flags extra PHY configuration flags from the device's cfg */
- phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
+ phy_cfg_cmd.phy_cfg |=
+ cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
phy_cfg_cmd.calib_control.event_trigger =
mvm->fw->default_calib[ucode_type].event_trigger;
@@ -698,6 +727,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
struct iwl_dev_tx_power_cmd_v4 v4;
} cmd;
+ int ret;
u16 len = 0;
cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
@@ -712,9 +742,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
len = sizeof(cmd.v4.v3);
- if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
- prof_a, prof_b))
- return -ENOENT;
+ ret = iwl_sar_select_profile(&mvm->fwrt,
+ cmd.v5.v3.per_chain_restriction,
+ prof_a, prof_b);
+
+ /* return on error or if the profile is disabled (positive number) */
+ if (ret)
+ return ret;
+
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -1005,16 +1040,7 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
ret);
- ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
- /*
- * If we don't have profile 0 from BIOS, just skip it. This
- * means that SAR Geo will not be enabled either, even if we
- * have other valid profiles.
- */
- if (ret == -ENOENT)
- return 1;
-
- return ret;
+ return iwl_mvm_sar_select_profile(mvm, 1, 1);
}
static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
@@ -1110,6 +1136,13 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
+ ret = iwl_set_soc_latency(mvm);
+ if (ret)
+ goto error;
+ }
+
/* Init RSS configuration */
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
ret = iwl_configure_rxq(mvm);
@@ -1236,7 +1269,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0) {
ret = iwl_mvm_sar_geo_init(mvm);
- } else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
+ } else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 02df603b6400..7aa1350b093e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -193,6 +193,8 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.non_asap = 1,
.request_lci = 1,
.request_civicloc = 1,
+ .trigger_based = 1,
+ .non_trigger_based = 1,
.max_bursts_exponent = -1, /* all supported */
.max_ftms_per_burst = 0, /* no limits */
.bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
@@ -201,7 +203,8 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
BIT(NL80211_CHAN_WIDTH_80),
.preambles = BIT(NL80211_PREAMBLE_LEGACY) |
BIT(NL80211_PREAMBLE_HT) |
- BIT(NL80211_PREAMBLE_VHT),
+ BIT(NL80211_PREAMBLE_VHT) |
+ BIT(NL80211_PREAMBLE_HE),
},
};
@@ -614,7 +617,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_sched_scan_reqs = 1;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw);
/* we create the 802.11 header and zero length SSID IE. */
hw->wiphy->max_sched_scan_ie_len =
SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
@@ -702,7 +705,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
- mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
+ mvm->wowlan.max_nd_match_sets =
+ iwl_umac_scan_get_max_profiles(mvm->fw);
hw->wiphy->wowlan = &mvm->wowlan;
}
#endif
@@ -2019,7 +2023,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
.sta_id = sta_id,
.tid_limit = IWL_MAX_TID_COUNT,
- .bss_color = vif->bss_conf.bss_color,
+ .bss_color = vif->bss_conf.he_bss_color.color,
.htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d6ecc2d2bf21..afcf2b98a9cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -2147,4 +2147,11 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
iwl_mvm_get_ctrl_pos(chandef));
}
+static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
+{
+ u8 ver = iwl_mvm_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
+ return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
+ IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
+}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index ca99a9c4f70e..15d11fb72aca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -416,8 +416,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
return IEEE80211_MAX_MPDU_LEN_VHT_7991;
default:
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
- }
-
+ }
} else if (ht_cap->ht_supported) {
if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 1a990ed9c3ca..00e7fdbaeb7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -3665,7 +3665,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
}
-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+static void *rs_alloc(struct ieee80211_hw *hw)
{
return hw->priv;
}
@@ -3697,7 +3697,7 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
!(rate & RATE_MCS_HE_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
- return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+ return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
rs_pretty_ant(ant),
index == IWL_RATE_INVALID ? "BAD" :
iwl_rate_mcs[index].mbps);
@@ -3740,7 +3740,7 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
- "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s",
rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
@@ -3888,6 +3888,8 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += scnprintf(buff + desc, bufsz - desc,
" rate[%d] 0x%X ", i, r);
desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
+ if (desc < bufsz - 1)
+ buff[desc++] = '\n';
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 5ee33c8ae9d2..77b8def26edb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -566,6 +566,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_stat_data {
struct iwl_mvm *mvm;
+ __le32 flags;
__le32 mac_id;
u8 beacon_filter_average_energy;
void *general;
@@ -606,6 +607,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
-general->beacon_average_energy[vif_id];
}
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ mvmvif->beacon_stats.accu_num_beacons +=
+ mvmvif->beacon_stats.num_beacons;
+
if (mvmvif->id != id)
return;
@@ -763,6 +771,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
flags = stats->flag;
}
+ data.flags = flags;
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 3b263c81bcae..7a6ad1ff7055 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -91,8 +91,14 @@
#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
/* number of scan channels */
#define IWL_SCAN_NUM_CHANNELS 112
-/* adaptive dwell default number of APs override */
-#define IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE 10
+/* adaptive dwell number of APs override mask for p2p friendly GO */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
+/* adaptive dwell number of APs override mask for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
+/* adaptive dwell number of APs override for p2p friendly GO channels */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+/* adaptive dwell number of APs override for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
@@ -588,11 +594,15 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
struct iwl_scan_offload_profile *profile;
- struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1;
struct iwl_scan_offload_blacklist *blacklist;
+ struct iwl_scan_offload_profile_cfg_data *data;
+ int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
+ int profile_cfg_size = sizeof(*data) +
+ sizeof(*profile) * max_profiles;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
- .len[1] = sizeof(*profile_cfg),
+ .len[1] = profile_cfg_size,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
};
@@ -600,7 +610,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
int i;
int ret;
- if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+ if (WARN_ON(req->n_match_sets > max_profiles))
return -EIO;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
@@ -612,27 +622,37 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
if (!blacklist)
return -ENOMEM;
- profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
- if (!profile_cfg) {
+ profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL);
+ if (!profile_cfg_v1) {
ret = -ENOMEM;
goto free_blacklist;
}
cmd.data[0] = blacklist;
cmd.len[0] = sizeof(*blacklist) * blacklist_len;
- cmd.data[1] = profile_cfg;
+ cmd.data[1] = profile_cfg_v1;
+
+ /* if max_profile is MAX_PROFILES_V2, we have the new API */
+ if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) {
+ struct iwl_scan_offload_profile_cfg *profile_cfg =
+ (struct iwl_scan_offload_profile_cfg *)profile_cfg_v1;
+
+ data = &profile_cfg->data;
+ } else {
+ data = &profile_cfg_v1->data;
+ }
/* No blacklist configuration */
+ data->num_profiles = req->n_match_sets;
+ data->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ data->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ data->match_notify = SCAN_CLIENT_SCHED_SCAN;
- profile_cfg->num_profiles = req->n_match_sets;
- profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
- profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
- profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
- profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+ data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
for (i = 0; i < req->n_match_sets; i++) {
- profile = &profile_cfg->profiles[i];
+ profile = &profile_cfg_v1->profiles[i];
profile->ssid_index = i;
/* Support any cipher and auth algorithm */
profile->unicast_cipher = 0xff;
@@ -645,7 +665,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
ret = iwl_mvm_send_cmd(mvm, &cmd);
- kfree(profile_cfg);
+ kfree(profile_cfg_v1);
free_blacklist:
kfree(blacklist);
@@ -1529,14 +1549,19 @@ static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
return -EINVAL;
}
+static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+};
+
+static const u8 social_chs[] = {
+ 1, 6, 11
+};
+
static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
u8 ch_id, u8 band, u8 *ch_bitmap,
size_t bitmap_n_entries)
{
int i;
- static const u8 p2p_go_friendly_chs[] = {
- 36, 40, 44, 48, 149, 153, 157, 161, 165,
- };
if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
return;
@@ -1561,6 +1586,35 @@ static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
}
}
+static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
+{
+ int i;
+ u32 flags = 0;
+
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (p2p_go_friendly_chs[i] == ch_id) {
+ flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+ break;
+ }
+ }
+
+ if (flags)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(social_chs); i++) {
+ if (social_chs[i] == ch_id) {
+ flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
+ break;
+ }
+ }
+
+out:
+ return flags;
+}
+
static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
@@ -1615,6 +1669,30 @@ iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
}
}
+static void
+iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v6 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
+ u32 n_aps_flag =
+ iwl_mvm_scan_ch_n_aps_flag(vif_type,
+ cfg->v2.channel_num);
+
+ cfg->flags = cpu_to_le32(flags | n_aps_flag);
+ cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+ }
+}
+
static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -1915,7 +1993,7 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
{
cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
cp->count = params->n_channels;
- cp->num_of_aps_override = IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE;
+ cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
params->n_channels,
@@ -1923,6 +2001,23 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
vif->type);
}
+static void
+iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v6 *cp,
+ u32 channel_cfg_flags)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+ cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+
+ iwl_mvm_umac_scan_cfg_channels_v6(mvm, params->channels, cp,
+ params->n_channels,
+ channel_cfg_flags,
+ vif->type);
+}
static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
@@ -1990,6 +2085,40 @@ static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+ u32 bitmap_ssid = 0;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
+ &bitmap_ssid);
+ iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif,
+ &scan_p->channel_params, bitmap_ssid);
+
+ return 0;
+}
+
static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
{
return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
@@ -2105,6 +2234,7 @@ struct iwl_scan_umac_handler {
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
+ IWL_SCAN_UMAC_HANDLER(14),
IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
};
@@ -2463,6 +2593,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
static int iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 9da0dae78510..368b9d117f73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -162,7 +162,9 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
* capabilities of the AP station, and choose the watermark accordingly.
*/
if (sta) {
- if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ if (sta->ht_cap.ht_supported ||
+ sta->vht_cap.vht_supported ||
+ sta->he_cap.has_he) {
switch (sta->rx_nss) {
case 1:
watermark = SF_W_MARK_SISO;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 64ef3f3ba23b..56ae72debb96 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -722,6 +722,11 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
lockdep_assert_held(&mvm->mutex);
+ if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
+ "max queue %d >= num_of_queues (%d)", maxq,
+ mvm->trans->trans_cfg->base_params->num_of_queues))
+ maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
+
/* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -ENOSPC;
@@ -1164,9 +1169,9 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
inactive_tid_bitmap,
&unshare_queues,
&changetid_queues);
- if (ret >= 0 && free_queue < 0) {
+ if (ret && free_queue < 0) {
queue_owner = sta;
- free_queue = ret;
+ free_queue = i;
}
/* only unlock sta lock - we still need the queue info lock */
spin_unlock_bh(&mvmsta->lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 01f248ba8fec..9d5b1e51b50d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -129,6 +129,18 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
+ switch (trans_pcie->rx_buf_size) {
+ case IWL_AMSDU_DEF:
+ return -EINVAL;
+ case IWL_AMSDU_2K:
+ break;
+ case IWL_AMSDU_4K:
+ case IWL_AMSDU_8K:
+ case IWL_AMSDU_12K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ break;
+ }
+
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
&trans_pcie->prph_scratch_dma_addr,
@@ -143,10 +155,8 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
- control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
- IWL_PRPH_SCRATCH_MTR_MODE |
- (IWL_PRPH_MTR_FORMAT_256B &
- IWL_PRPH_SCRATCH_MTR_FORMAT);
+ control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
+ control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f441b20e1642..29971c25dba4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,442 +513,26 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-
- {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
-
- {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
-
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
-
- {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
-
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)},
-
-/* 22000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x4244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr)},
-
- {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
+/* Qu devices */
+ {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x2720, PCI_ANY_ID, iwl_qnj_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)},
{IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
@@ -971,31 +555,398 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
-#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
- { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
- .name = _name }
+#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
+ _rf_id, _no_160, _cores, _cfg, _name) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \
+ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
+ .mac_step = _mac_step }
+
+#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
+ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
+ _cfg, _name)
static const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
- IWL_DEV_INFO(0x2526, 0x0010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x8010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0xA014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0xE010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0xE014, iwl9260_2ac_160_cfg, iwl9260_160_name),
-
- IWL_DEV_INFO(0x2526, 0x0030, iwl9560_2ac_160_cfg, iwl9560_160_name),
- IWL_DEV_INFO(0x2526, 0x0038, iwl9560_2ac_160_cfg, iwl9560_160_name),
- IWL_DEV_INFO(0x2526, 0x003C, iwl9560_2ac_160_cfg, iwl9560_160_name),
- IWL_DEV_INFO(0x2526, 0x4030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+/* 9000 */
+ IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
+ IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+ IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+ IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+ IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+ IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+ IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+
+ IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
+
+/* AX200 */
+ IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name),
+ IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
+ IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
+
+/* Qu with Hr */
+ IWL_DEV_INFO(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+
+ IWL_DEV_INFO(0x3DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+
+ IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0078, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x007C, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0310, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0A10, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x1080, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x1651, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x1652, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x2074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x4070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x4244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9560_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9461_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_CORES_BT_GNSS,
+ iwl9260_2ac_cfg, iwl9270_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS,
+ iwl9260_2ac_cfg, iwl9270_name),
+
+ _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9162_160_name),
+ _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9162_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9260_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9260_name),
+
+ /* Qu with Jf */
+ /* Qu B step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+ /* Qu C step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+ /* QuZ */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+ /* QnJ */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
#endif /* CONFIG_IWLMVM */
};
@@ -1031,16 +982,34 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* the trans_cfg should never change, so set it now */
iwl_trans->trans_cfg = trans;
+ iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
+
for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
-
- if ((dev_info->device == IWL_CFG_ANY ||
+ if ((dev_info->device == (u16)IWL_CFG_ANY ||
dev_info->device == pdev->device) &&
- (dev_info->subdevice == IWL_CFG_ANY ||
- dev_info->subdevice == pdev->subsystem_device)) {
+ (dev_info->subdevice == (u16)IWL_CFG_ANY ||
+ dev_info->subdevice == pdev->subsystem_device) &&
+ (dev_info->mac_type == (u16)IWL_CFG_ANY ||
+ dev_info->mac_type ==
+ CSR_HW_REV_TYPE(iwl_trans->hw_rev)) &&
+ (dev_info->mac_step == (u8)IWL_CFG_ANY ||
+ dev_info->mac_step ==
+ CSR_HW_REV_STEP(iwl_trans->hw_rev)) &&
+ (dev_info->rf_type == (u16)IWL_CFG_ANY ||
+ dev_info->rf_type ==
+ CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id)) &&
+ (dev_info->rf_id == (u8)IWL_CFG_ANY ||
+ dev_info->rf_id ==
+ IWL_SUBDEVICE_RF_ID(pdev->subsystem_device)) &&
+ (dev_info->no_160 == (u8)IWL_CFG_ANY ||
+ dev_info->no_160 ==
+ IWL_SUBDEVICE_NO_160(pdev->subsystem_device)) &&
+ (dev_info->cores == (u8)IWL_CFG_ANY ||
+ dev_info->cores ==
+ IWL_SUBDEVICE_CORES(pdev->subsystem_device))) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
- goto found;
}
}
@@ -1062,8 +1031,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
iwl_trans->cfg = cfg_7265d;
- iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
-
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
@@ -1092,9 +1059,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- iwl_trans->cfg = &iwl22000_2ax_cfg_jf;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
return -EINVAL;
@@ -1103,59 +1067,35 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id));
return -EINVAL;
}
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
- u32 hw_status;
-
- hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
- if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP)
- iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
- else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
- CSR_HW_RF_STEP(iwl_trans->hw_rf_id) ==
- SILICON_A_STEP)
- iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
}
/*
* This is a hack to switch from Qu B0 to Qu C0. We need to
- * do this for all cfgs that use Qu B0. All this code is in
- * urgent need for a refactor, but for now this is the easiest
- * thing to do to support Qu C-step.
+ * do this for all cfgs that use Qu B0, except for those using
+ * Jf, which have already been moved to the new table. The
+ * rest must be removed once we convert Qu with Hr as well.
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
- if (cfg == &iwl_ax101_cfg_qu_hr)
+ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
- else if (cfg == &iwl_ax201_cfg_qu_hr)
+ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
- else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
- else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
- else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
- else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
- else if (cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
- else if (cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
/* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- if (cfg == &iwl_ax101_cfg_qu_hr)
- cfg = &iwl_ax101_cfg_quz_hr;
- else if (cfg == &iwl_ax201_cfg_qu_hr)
- cfg = &iwl_ax201_cfg_quz_hr;
- else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
- else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
- else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
- else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
+ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
+ else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr;
+ else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr;
}
#endif
@@ -1166,7 +1106,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!iwl_trans->cfg)
iwl_trans->cfg = cfg;
-found:
/* if we don't have a name yet, copy name from the old cfg */
if (!iwl_trans->name)
iwl_trans->name = iwl_trans->cfg->name;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 72f144c3a46e..595e6873d56e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -718,7 +718,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 427fcea5cb2d..8c29071cb415 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1043,7 +1043,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP |
RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
- trans->cfg->integrated ?
+ trans->trans_cfg->integrated ?
RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
RFH_GEN_CFG_RB_CHUNK_SIZE_128));
/* Enable the relevant rx queues */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 38d8fe21690a..e4cbd8daa7c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1817,7 +1817,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans);
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
- trans->cfg->integrated) {
+ trans->trans_cfg->integrated) {
err = iwl_pcie_gen2_force_power_gating(trans);
if (err)
return err;
@@ -2206,6 +2206,13 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
return ret;
}
+static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
+{
+ return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
+ ofs, val);
+}
+
static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs,
bool freeze)
@@ -3380,6 +3387,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.write_prph = iwl_trans_pcie_write_prph, \
.read_mem = iwl_trans_pcie_read_mem, \
.write_mem = iwl_trans_pcie_write_mem, \
+ .read_config32 = iwl_trans_pcie_read_config32, \
.configure = iwl_trans_pcie_configure, \
.set_pmi = iwl_trans_pcie_set_pmi, \
.sw_reset = iwl_trans_pcie_sw_reset, \
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 86fc00167817..9664dbc70ef1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1418,6 +1418,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue);
+ iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
+ trans_pcie->txq[queue] = NULL;
+
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 2f69a6469fe7..4582d418ba4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1287,7 +1287,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
* need to be reclaimed. As result, some free space forms. If there is
* enough free space (> low mark), wake the stack that feeds us.
*/
-void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
diff --git a/drivers/net/wireless/intersil/hostap/hostap_common.h b/drivers/net/wireless/intersil/hostap/hostap_common.h
index 22543538239b..dd29a8e8d349 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_common.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_common.h
@@ -322,7 +322,7 @@ struct prism2_download_param {
u32 addr; /* wlan card address */
u32 len;
void __user *ptr; /* pointer to data in user space */
- } data[0];
+ } data[];
};
#define PRISM2_MAX_DOWNLOAD_AREA_LEN 131072
diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c
index 8722000b6c27..7c6a5a6d1d45 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_download.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_download.c
@@ -232,11 +232,11 @@ static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *
return ret;
}
-static const struct file_operations prism2_download_aux_dump_proc_fops = {
- .open = prism2_download_aux_dump_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
+static const struct proc_ops prism2_download_aux_dump_proc_ops = {
+ .proc_open = prism2_download_aux_dump_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
};
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
index 487883fbb58c..dd2603d9b5d3 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
@@ -615,7 +615,7 @@ struct prism2_download_data {
u32 addr; /* wlan card address */
u32 len;
u8 *data; /* allocated data */
- } data[0];
+ } data[];
};
diff --git a/drivers/net/wireless/intersil/orinoco/fw.c b/drivers/net/wireless/intersil/orinoco/fw.c
index 400a35217644..015af782881b 100644
--- a/drivers/net/wireless/intersil/orinoco/fw.c
+++ b/drivers/net/wireless/intersil/orinoco/fw.c
@@ -49,7 +49,7 @@ struct orinoco_fw_header {
__le32 pdr_offset; /* Offset to PDR data from eof header */
__le32 pri_offset; /* Offset to primary plug data */
__le32 compat_offset; /* Offset to compatibility data*/
- char signature[0]; /* FW signature length headersize-20 */
+ char signature[]; /* FW signature length headersize-20 */
} __packed;
/* Check the range of various header entries. Return a pointer to a
diff --git a/drivers/net/wireless/intersil/orinoco/hermes.h b/drivers/net/wireless/intersil/orinoco/hermes.h
index 121fdd8e5da2..9f668185b7d2 100644
--- a/drivers/net/wireless/intersil/orinoco/hermes.h
+++ b/drivers/net/wireless/intersil/orinoco/hermes.h
@@ -341,7 +341,7 @@ struct agere_ext_scan_info {
__le64 timestamp;
__le16 beacon_interval;
__le16 capabilities;
- u8 data[0];
+ u8 data[];
} __packed;
#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
diff --git a/drivers/net/wireless/intersil/orinoco/hermes_dld.c b/drivers/net/wireless/intersil/orinoco/hermes_dld.c
index 4a10b7aca043..dbeadfcfefe2 100644
--- a/drivers/net/wireless/intersil/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/intersil/orinoco/hermes_dld.c
@@ -64,7 +64,7 @@
struct dblock {
__le32 addr; /* adapter address where to write the block */
__le16 len; /* length of the data only, in bytes */
- char data[0]; /* data to be written */
+ char data[]; /* data to be written */
} __packed;
/*
@@ -76,7 +76,7 @@ struct pdr {
__le32 id; /* record ID */
__le32 addr; /* adapter address where to write the data */
__le32 len; /* expected length of the data, in bytes */
- char next[0]; /* next PDR starts here */
+ char next[]; /* next PDR starts here */
} __packed;
/*
@@ -87,7 +87,7 @@ struct pdr {
struct pdi {
__le16 len; /* length of ID and data, in words */
__le16 id; /* record ID */
- char data[0]; /* plug data */
+ char data[]; /* plug data */
} __packed;
/*** FW data block access functions ***/
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index e753f43e0162..651c676b5506 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -202,7 +202,7 @@ struct ezusb_packet {
__le16 crc; /* CRC up to here */
__le16 hermes_len;
__le16 hermes_rid;
- u8 data[0];
+ u8 data[];
} __packed;
/* Table of devices that work or may work with this driver */
@@ -365,17 +365,6 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
return ctx;
}
-
-/* Hopefully the real complete_all will soon be exported, in the mean
- * while this should work. */
-static inline void ezusb_complete_all(struct completion *comp)
-{
- complete(comp);
- complete(comp);
- complete(comp);
- complete(comp);
-}
-
static void ezusb_ctx_complete(struct request_context *ctx)
{
struct ezusb_priv *upriv = ctx->upriv;
@@ -409,7 +398,7 @@ static void ezusb_ctx_complete(struct request_context *ctx)
netif_wake_queue(dev);
}
- ezusb_complete_all(&ctx->done);
+ complete_all(&ctx->done);
ezusb_request_context_put(ctx);
break;
@@ -419,7 +408,7 @@ static void ezusb_ctx_complete(struct request_context *ctx)
/* This is normal, as all request contexts get flushed
* when the device is disconnected */
err("Called, CTX not terminating, but device gone");
- ezusb_complete_all(&ctx->done);
+ complete_all(&ctx->done);
ezusb_request_context_put(ctx);
break;
}
@@ -690,11 +679,11 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
* get the chance to run themselves. So we make sure
* that we don't sleep for ever */
int msecs = DEF_TIMEOUT * (1000 / HZ);
- while (!ctx->done.done && msecs--)
+
+ while (!try_wait_for_completion(&ctx->done) && msecs--)
udelay(1000);
} else {
- wait_event_interruptible(ctx->done.wait,
- ctx->done.done);
+ wait_for_completion(&ctx->done);
}
break;
default:
diff --git a/drivers/net/wireless/intersil/p54/eeprom.h b/drivers/net/wireless/intersil/p54/eeprom.h
index b8f46883a292..1d0aaf54389a 100644
--- a/drivers/net/wireless/intersil/p54/eeprom.h
+++ b/drivers/net/wireless/intersil/p54/eeprom.h
@@ -24,7 +24,7 @@
struct pda_entry {
__le16 len; /* includes both code and data */
__le16 code;
- u8 data[0];
+ u8 data[];
} __packed;
struct eeprom_pda_wrap {
@@ -32,7 +32,7 @@ struct eeprom_pda_wrap {
__le16 pad;
__le16 len;
__le32 arm_opcode;
- u8 data[0];
+ u8 data[];
} __packed;
struct p54_iq_autocal_entry {
@@ -87,7 +87,7 @@ struct pda_pa_curve_data {
u8 channels;
u8 points_per_channel;
u8 padding;
- u8 data[0];
+ u8 data[];
} __packed;
struct pda_rssi_cal_ext_entry {
@@ -119,7 +119,7 @@ struct pda_custom_wrapper {
__le16 entry_size;
__le16 offset;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/*
diff --git a/drivers/net/wireless/intersil/p54/lmac.h b/drivers/net/wireless/intersil/p54/lmac.h
index e00761536cfc..8adde6ba35ab 100644
--- a/drivers/net/wireless/intersil/p54/lmac.h
+++ b/drivers/net/wireless/intersil/p54/lmac.h
@@ -81,7 +81,7 @@ struct p54_hdr {
__le16 type; /* enum p54_control_frame_types */
u8 rts_tries;
u8 tries;
- u8 data[0];
+ u8 data[];
} __packed;
#define GET_REQ_ID(skb) \
@@ -176,7 +176,7 @@ struct p54_rx_data {
u8 rssi_raw;
__le32 tsf32;
__le32 unalloc0;
- u8 align[0];
+ u8 align[];
} __packed;
enum p54_trap_type {
@@ -267,7 +267,7 @@ struct p54_tx_data {
} __packed normal;
} __packed;
u8 unalloc2[2];
- u8 align[0];
+ u8 align[];
} __packed;
/* unit is ms */
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 0a9c1a19380f..3356ea708d81 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -126,7 +126,7 @@ struct p54_cal_database {
size_t entry_size;
size_t offset;
size_t len;
- u8 data[0];
+ u8 data[];
};
#define EEPROM_READBACK_LEN 0x3fc
diff --git a/drivers/net/wireless/intersil/prism54/oid_mgt.c b/drivers/net/wireless/intersil/prism54/oid_mgt.c
index 5705ad925a51..9fd307ca4b6d 100644
--- a/drivers/net/wireless/intersil/prism54/oid_mgt.c
+++ b/drivers/net/wireless/intersil/prism54/oid_mgt.c
@@ -780,17 +780,17 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
{
switch (isl_oid[n].flags & OID_FLAG_TYPE) {
case OID_TYPE_U32:
- return snprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
+ return scnprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
case OID_TYPE_BUFFER:{
struct obj_buffer *buff = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"size=%u\naddr=0x%X\n", buff->size,
buff->addr);
}
break;
case OID_TYPE_BSS:{
struct obj_bss *bss = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"age=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n", bss->age,
@@ -801,9 +801,9 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
case OID_TYPE_BSSLIST:{
struct obj_bsslist *list = r->ptr;
int i, k;
- k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
+ k = scnprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
for (i = 0; i < list->nr; i++)
- k += snprintf(str + k, PRIV_STR_SIZE - k,
+ k += scnprintf(str + k, PRIV_STR_SIZE - k,
"bss[%u] :\nage=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n",
@@ -819,23 +819,23 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
struct obj_frequencies *freq = r->ptr;
int i, t;
printk("nr : %u\n", freq->nr);
- t = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
+ t = scnprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
for (i = 0; i < freq->nr; i++)
- t += snprintf(str + t, PRIV_STR_SIZE - t,
+ t += scnprintf(str + t, PRIV_STR_SIZE - t,
"mhz[%u]=%u\n", i, freq->mhz[i]);
return t;
}
break;
case OID_TYPE_MLME:{
struct obj_mlme *mlme = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"id=0x%X\nstate=0x%X\ncode=0x%X\n",
mlme->id, mlme->state, mlme->code);
}
break;
case OID_TYPE_MLMEEX:{
struct obj_mlmeex *mlme = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"id=0x%X\nstate=0x%X\n"
"code=0x%X\nsize=0x%X\n", mlme->id,
mlme->state, mlme->code, mlme->size);
@@ -843,7 +843,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
break;
case OID_TYPE_ATTACH:{
struct obj_attachment *attach = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"id=%d\nsize=%d\n",
attach->id,
attach->size);
@@ -851,7 +851,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
break;
case OID_TYPE_SSID:{
struct obj_ssid *ssid = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"length=%u\noctets=%.*s\n",
ssid->length, ssid->length,
ssid->octets);
@@ -860,13 +860,13 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
case OID_TYPE_KEY:{
struct obj_key *key = r->ptr;
int t, i;
- t = snprintf(str, PRIV_STR_SIZE,
+ t = scnprintf(str, PRIV_STR_SIZE,
"type=0x%X\nlength=0x%X\nkey=0x",
key->type, key->length);
for (i = 0; i < key->length; i++)
- t += snprintf(str + t, PRIV_STR_SIZE - t,
+ t += scnprintf(str + t, PRIV_STR_SIZE - t,
"%02X:", key->key[i]);
- t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
+ t += scnprintf(str + t, PRIV_STR_SIZE - t, "\n");
return t;
}
break;
@@ -874,11 +874,11 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
case OID_TYPE_ADDR:{
unsigned char *buff = r->ptr;
int t, i;
- t = snprintf(str, PRIV_STR_SIZE, "hex data=");
+ t = scnprintf(str, PRIV_STR_SIZE, "hex data=");
for (i = 0; i < isl_oid[n].size; i++)
- t += snprintf(str + t, PRIV_STR_SIZE - t,
+ t += scnprintf(str + t, PRIV_STR_SIZE - t,
"%02X:", buff[i]);
- t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
+ t += scnprintf(str + t, PRIV_STR_SIZE - t, "\n");
return t;
}
break;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 03738107fd10..0528d4cb4d37 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*/
/*
@@ -33,6 +33,9 @@
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
#include <linux/nospec.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@@ -300,14 +303,12 @@ static struct net_device *hwsim_mon; /* global monitor netdev */
.band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
- .max_power = 20, \
}
#define CHAN5G(_freq) { \
.band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
- .max_power = 20, \
}
static const struct ieee80211_channel hwsim_channels_2ghz[] = {
@@ -615,14 +616,14 @@ static const struct genl_multicast_group hwsim_mcgrps[] = {
/* MAC80211_HWSIM netlink policy */
static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
- [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
- [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+ [HWSIM_ATTR_ADDR_RECEIVER] = NLA_POLICY_ETH_ADDR_COMPAT,
+ [HWSIM_ATTR_ADDR_TRANSMITTER] = NLA_POLICY_ETH_ADDR_COMPAT,
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
[HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
[HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
- [HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
+ [HWSIM_ATTR_TX_INFO] = { .type = NLA_BINARY,
.len = IEEE80211_TX_MAX_RATES *
sizeof(struct hwsim_tx_rate)},
[HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
@@ -632,15 +633,61 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
[HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_USE_CHANCTX] = { .type = NLA_FLAG },
[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG },
[HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING },
[HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG },
[HWSIM_ATTR_FREQ] = { .type = NLA_U32 },
- [HWSIM_ATTR_PERM_ADDR] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+ [HWSIM_ATTR_TX_INFO_FLAGS] = { .type = NLA_BINARY },
+ [HWSIM_ATTR_PERM_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT,
[HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 },
[HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY },
};
+#if IS_REACHABLE(CONFIG_VIRTIO)
+
+/* MAC80211_HWSIM virtio queues */
+static struct virtqueue *hwsim_vqs[HWSIM_NUM_VQS];
+static bool hwsim_virtio_enabled;
+static spinlock_t hwsim_virtio_lock;
+
+static void hwsim_virtio_rx_work(struct work_struct *work);
+static DECLARE_WORK(hwsim_virtio_rx, hwsim_virtio_rx_work);
+
+static int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
+ struct sk_buff *skb)
+{
+ struct scatterlist sg[1];
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (!hwsim_virtio_enabled) {
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ sg_init_one(sg, skb->head, skb_end_offset(skb));
+ err = virtqueue_add_outbuf(hwsim_vqs[HWSIM_VQ_TX], sg, 1, skb,
+ GFP_ATOMIC);
+ if (err)
+ goto out_free;
+ virtqueue_kick(hwsim_vqs[HWSIM_VQ_TX]);
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+ return 0;
+
+out_free:
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+ nlmsg_free(skb);
+ return err;
+}
+#else
+/* cause a linker error if this ends up being needed */
+extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
+ struct sk_buff *skb);
+#define hwsim_virtio_enabled false
+#endif
+
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan);
@@ -1140,8 +1187,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
genlmsg_end(skb, msg_head);
- if (hwsim_unicast_netgroup(data, skb, dst_portid))
- goto err_free_txskb;
+
+ if (hwsim_virtio_enabled) {
+ if (hwsim_tx_virtio(data, skb))
+ goto err_free_txskb;
+ } else {
+ if (hwsim_unicast_netgroup(data, skb, dst_portid))
+ goto err_free_txskb;
+ }
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
@@ -1443,7 +1496,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
/* wmediumd mode check */
_portid = READ_ONCE(data->wmediumd);
- if (_portid)
+ if (_portid || hwsim_virtio_enabled)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
/* NO wmediumd detected, perfect medium simulation */
@@ -1549,7 +1602,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, chan);
- if (_pid)
+ if (_pid || hwsim_virtio_enabled)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
@@ -1595,6 +1648,11 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
+ while ((skb = ieee80211_get_buffered_bc(hw, vif)) != NULL) {
+ mac80211_hwsim_tx_frame(hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ }
+
if (vif->csa_active && ieee80211_csa_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -2925,11 +2983,15 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_UAPSD |
@@ -2940,6 +3002,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION);
hw->wiphy->interface_modes = param->iftypes;
@@ -3285,11 +3348,14 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
- if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
- goto out;
+ if (!hwsim_virtio_enabled) {
+ if (hwsim_net_get_netgroup(genl_info_net(info)) !=
+ data2->netgroup)
+ goto out;
- if (info->snd_portid != data2->wmediumd)
- goto out;
+ if (info->snd_portid != data2->wmediumd)
+ goto out;
+ }
/* look for the skb matching the cookie passed back from user */
skb_queue_walk_safe(&data2->pending, skb, tmp) {
@@ -3379,11 +3445,14 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
- if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
- goto out;
+ if (!hwsim_virtio_enabled) {
+ if (hwsim_net_get_netgroup(genl_info_net(info)) !=
+ data2->netgroup)
+ goto out;
- if (info->snd_portid != data2->wmediumd)
- goto out;
+ if (info->snd_portid != data2->wmediumd)
+ goto out;
+ }
/* check if radio is configured properly */
@@ -3600,9 +3669,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
}
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
- hwname = kasprintf(GFP_KERNEL, "%.*s",
- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ GFP_KERNEL);
if (!hwname)
return -ENOMEM;
param.hwname = hwname;
@@ -3622,9 +3691,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
} else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
- hwname = kasprintf(GFP_KERNEL, "%.*s",
- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ GFP_KERNEL);
if (!hwname)
return -ENOMEM;
} else
@@ -3924,6 +3993,229 @@ static void hwsim_exit_netlink(void)
genl_unregister_family(&hwsim_genl_family);
}
+#if IS_REACHABLE(CONFIG_VIRTIO)
+static void hwsim_virtio_tx_done(struct virtqueue *vq)
+{
+ unsigned int len;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ while ((skb = virtqueue_get_buf(vq, &len)))
+ nlmsg_free(skb);
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+}
+
+static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *gnlh;
+ struct nlattr *tb[HWSIM_ATTR_MAX + 1];
+ struct genl_info info = {};
+ int err;
+
+ nlh = nlmsg_hdr(skb);
+ gnlh = nlmsg_data(nlh);
+ err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
+ hwsim_genl_policy, NULL);
+ if (err) {
+ pr_err_ratelimited("hwsim: genlmsg_parse returned %d\n", err);
+ return err;
+ }
+
+ info.attrs = tb;
+
+ switch (gnlh->cmd) {
+ case HWSIM_CMD_FRAME:
+ hwsim_cloned_frame_received_nl(skb, &info);
+ break;
+ case HWSIM_CMD_TX_INFO_FRAME:
+ hwsim_tx_info_frame_received_nl(skb, &info);
+ break;
+ default:
+ pr_err_ratelimited("hwsim: invalid cmd: %d\n", gnlh->cmd);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static void hwsim_virtio_rx_work(struct work_struct *work)
+{
+ struct virtqueue *vq;
+ unsigned int len;
+ struct sk_buff *skb;
+ struct scatterlist sg[1];
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (!hwsim_virtio_enabled)
+ goto out_unlock;
+
+ skb = virtqueue_get_buf(hwsim_vqs[HWSIM_VQ_RX], &len);
+ if (!skb)
+ goto out_unlock;
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+
+ skb->data = skb->head;
+ skb_set_tail_pointer(skb, len);
+ hwsim_virtio_handle_cmd(skb);
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (!hwsim_virtio_enabled) {
+ nlmsg_free(skb);
+ goto out_unlock;
+ }
+ vq = hwsim_vqs[HWSIM_VQ_RX];
+ sg_init_one(sg, skb->head, skb_end_offset(skb));
+ err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_ATOMIC);
+ if (WARN(err, "virtqueue_add_inbuf returned %d\n", err))
+ nlmsg_free(skb);
+ else
+ virtqueue_kick(vq);
+ schedule_work(&hwsim_virtio_rx);
+
+out_unlock:
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+}
+
+static void hwsim_virtio_rx_done(struct virtqueue *vq)
+{
+ schedule_work(&hwsim_virtio_rx);
+}
+
+static int init_vqs(struct virtio_device *vdev)
+{
+ vq_callback_t *callbacks[HWSIM_NUM_VQS] = {
+ [HWSIM_VQ_TX] = hwsim_virtio_tx_done,
+ [HWSIM_VQ_RX] = hwsim_virtio_rx_done,
+ };
+ const char *names[HWSIM_NUM_VQS] = {
+ [HWSIM_VQ_TX] = "tx",
+ [HWSIM_VQ_RX] = "rx",
+ };
+
+ return virtio_find_vqs(vdev, HWSIM_NUM_VQS,
+ hwsim_vqs, callbacks, names, NULL);
+}
+
+static int fill_vq(struct virtqueue *vq)
+{
+ int i, err;
+ struct sk_buff *skb;
+ struct scatterlist sg[1];
+
+ for (i = 0; i < virtqueue_get_vring_size(vq); i++) {
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ sg_init_one(sg, skb->head, skb_end_offset(skb));
+ err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
+ if (err) {
+ nlmsg_free(skb);
+ return err;
+ }
+ }
+ virtqueue_kick(vq);
+ return 0;
+}
+
+static void remove_vqs(struct virtio_device *vdev)
+{
+ int i;
+
+ vdev->config->reset(vdev);
+
+ for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) {
+ struct virtqueue *vq = hwsim_vqs[i];
+ struct sk_buff *skb;
+
+ while ((skb = virtqueue_detach_unused_buf(vq)))
+ nlmsg_free(skb);
+ }
+
+ vdev->config->del_vqs(vdev);
+}
+
+static int hwsim_virtio_probe(struct virtio_device *vdev)
+{
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (hwsim_virtio_enabled) {
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+ return -EEXIST;
+ }
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+
+ err = init_vqs(vdev);
+ if (err)
+ return err;
+
+ err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]);
+ if (err)
+ goto out_remove;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ hwsim_virtio_enabled = true;
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+
+ schedule_work(&hwsim_virtio_rx);
+ return 0;
+
+out_remove:
+ remove_vqs(vdev);
+ return err;
+}
+
+static void hwsim_virtio_remove(struct virtio_device *vdev)
+{
+ hwsim_virtio_enabled = false;
+
+ cancel_work_sync(&hwsim_virtio_rx);
+
+ remove_vqs(vdev);
+}
+
+/* MAC80211_HWSIM virtio device id table */
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_MAC80211_HWSIM, VIRTIO_DEV_ANY_ID },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_hwsim = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = hwsim_virtio_probe,
+ .remove = hwsim_virtio_remove,
+};
+
+static int hwsim_register_virtio_driver(void)
+{
+ spin_lock_init(&hwsim_virtio_lock);
+
+ return register_virtio_driver(&virtio_hwsim);
+}
+
+static void hwsim_unregister_virtio_driver(void)
+{
+ unregister_virtio_driver(&virtio_hwsim);
+}
+#else
+static inline int hwsim_register_virtio_driver(void)
+{
+ return 0;
+}
+
+static inline void hwsim_unregister_virtio_driver(void)
+{
+}
+#endif
+
static int __init init_mac80211_hwsim(void)
{
int i, err;
@@ -3952,10 +4244,14 @@ static int __init init_mac80211_hwsim(void)
if (err)
goto out_unregister_driver;
+ err = hwsim_register_virtio_driver();
+ if (err)
+ goto out_exit_netlink;
+
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
- goto out_exit_netlink;
+ goto out_exit_virtio;
}
for (i = 0; i < radios; i++) {
@@ -4067,6 +4363,8 @@ out_free_mon:
free_netdev(hwsim_mon);
out_free_radios:
mac80211_hwsim_free();
+out_exit_virtio:
+ hwsim_unregister_virtio_driver();
out_exit_netlink:
hwsim_exit_netlink();
out_unregister_driver:
@@ -4083,6 +4381,7 @@ static void __exit exit_mac80211_hwsim(void)
{
pr_debug("mac80211_hwsim: unregister radios\n");
+ hwsim_unregister_virtio_driver();
hwsim_exit_netlink();
mac80211_hwsim_free();
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index a85bc7c5c030..28ade92adcb4 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -3,6 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+ * Copyright (C) 2020 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -245,4 +246,24 @@ struct hwsim_tx_rate_flag {
s8 idx;
u16 flags;
} __packed;
+
+/**
+ * DOC: Frame transmission support over virtio
+ *
+ * Frame transmission is also supported over virtio to allow communication
+ * with external entities.
+ */
+
+/**
+ * enum hwsim_vqs - queues for virtio frame transmission
+ *
+ * @HWSIM_VQ_TX: send frames to external entity
+ * @HWSIM_VQ_RX: receive frames and transmission info reports
+ * @HWSIM_NUM_VQS: enum limit
+ */
+enum {
+ HWSIM_VQ_TX,
+ HWSIM_VQ_RX,
+ HWSIM_NUM_VQS,
+};
#endif /* __MAC80211_HWSIM_H */
diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h
index a4fc3f79bb17..dfa22468b14a 100644
--- a/drivers/net/wireless/marvell/libertas/host.h
+++ b/drivers/net/wireless/marvell/libertas/host.h
@@ -461,7 +461,7 @@ struct cmd_ds_802_11_scan {
uint8_t bsstype;
uint8_t bssid[ETH_ALEN];
- uint8_t tlvbuffer[0];
+ uint8_t tlvbuffer[];
} __packed;
struct cmd_ds_802_11_scan_rsp {
@@ -469,7 +469,7 @@ struct cmd_ds_802_11_scan_rsp {
__le16 bssdescriptsize;
uint8_t nr_sets;
- uint8_t bssdesc_and_tlvbuffer[0];
+ uint8_t bssdesc_and_tlvbuffer[];
} __packed;
struct cmd_ds_802_11_get_log {
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 30f1025ecb9b..acf61b93b782 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -103,7 +103,7 @@ MODULE_FIRMWARE("sd8688.bin");
struct if_sdio_packet {
struct if_sdio_packet *next;
u16 nb;
- u8 buffer[0] __attribute__((aligned(4)));
+ u8 buffer[] __aligned(4);
};
struct if_sdio_card {
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index d07fe82c557e..cd9f8ecf171f 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -35,7 +35,7 @@
struct if_spi_packet {
struct list_head list;
u16 blen;
- u8 buffer[0] __attribute__((aligned(4)));
+ u8 buffer[] __aligned(4);
};
struct if_spi_card {
@@ -235,8 +235,9 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
spi_message_add_tail(&dummy_trans, &m);
} else {
/* Busy-wait while the SPU fills the FIFO */
- reg_trans.delay_usecs =
+ reg_trans.delay.value =
DIV_ROUND_UP((100 + (delay * 10)), 1000);
+ reg_trans.delay.unit = SPI_DELAY_UNIT_USECS;
}
/* read in data */
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.h b/drivers/net/wireless/marvell/libertas/if_usb.h
index 8dc14bec3e16..7d0daeb33c3f 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.h
+++ b/drivers/net/wireless/marvell/libertas/if_usb.h
@@ -91,7 +91,7 @@ struct fwheader {
struct fwdata {
struct fwheader hdr;
__le32 seqnum;
- uint8_t data[0];
+ uint8_t data[];
};
/* fwsyncheader */
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.h b/drivers/net/wireless/marvell/libertas_tf/if_usb.h
index 585ad36f9055..f6dd7373b09e 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.h
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.h
@@ -81,7 +81,7 @@ struct fwheader {
struct fwdata {
struct fwheader hdr;
__le32 seqnum;
- uint8_t data[0];
+ uint8_t data[];
};
/** fwsyncheader */
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.c b/drivers/net/wireless/marvell/mwifiex/11ac.c
index 59d23fb2365f..756f019ef28a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.c
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11ac
+ * NXP Wireless LAN device driver: 802.11ac
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.h b/drivers/net/wireless/marvell/mwifiex/11ac.h
index 1ca92c7a8a4a..29e83468cf3f 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.h
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11ac
+ * NXP Wireless LAN device driver: 802.11ac
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 238accfe4f41..d2ee6469e67b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11h
+ * NXP Wireless LAN device driver: 802.11h
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e435f801bc91..6696bce56178 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n
+ * NXP Wireless LAN device driver: 802.11n
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 33268ce2cd82..83a88eecbda6 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n
+ * NXP Wireless LAN device driver: 802.11n
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 088612438530..46f41dbcf30d 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n Aggregation
+ * NXP Wireless LAN device driver: 802.11n Aggregation
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
index 8279b159da7c..382c1265c441 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n Aggregation
+ * NXP Wireless LAN device driver: 802.11n Aggregation
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 05a3c61ac603..0bdafe9f66db 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
+ * NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
index 22d991f514c8..465f244b3636 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
+ * NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index d89684168500..1566d2197906 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: CFG80211
+ * NXP Wireless LAN device driver: CFG80211
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
@@ -3052,7 +3052,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
- dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
+ dev->needed_headroom = MWIFIEX_MIN_DATA_HEADER_LEN;
dev->ethtool_ops = &mwifiex_ethtool_ops;
mdev_priv = netdev_priv(dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.h b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
index 908367857d58..530a63f13f14 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: CFG80211
+ * NXP Wireless LAN device driver: CFG80211
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index f1522fb1c1e8..fb91ecfc5546 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: Channel, Frequence and Power
+ * NXP Wireless LAN device driver: Channel, Frequence and Power
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index e8788c35a453..7e4b8cd52605 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: commands and events
+ * NXP Wireless LAN device driver: commands and events
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 8ab114cf3467..dded92db1f37 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: debugfs
+ * NXP Wireless LAN device driver: debugfs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index 46696ea0b23e..6bd23c9b1eed 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: generic data structures and APIs
+ * NXP Wireless LAN device driver: generic data structures and APIs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/ethtool.c b/drivers/net/wireless/marvell/mwifiex/ethtool.c
index 58400c69ab26..9bdad3f59039 100644
--- a/drivers/net/wireless/marvell/mwifiex/ethtool.c
+++ b/drivers/net/wireless/marvell/mwifiex/ethtool.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: ethtool
+ * NXP Wireless LAN device driver: ethtool
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 1fb76d2f5d3f..a415d73a73e6 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: Firmware specific macros & structures
+ * NXP Wireless LAN device driver: Firmware specific macros & structures
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
@@ -846,7 +846,7 @@ struct mwifiex_ie_types_random_mac {
struct mwifiex_ietypes_chanstats {
struct mwifiex_ie_types_header header;
- struct mwifiex_fw_chan_stats chanstats[0];
+ struct mwifiex_fw_chan_stats chanstats[];
} __packed;
struct mwifiex_ie_types_wildcard_ssid_params {
@@ -1082,7 +1082,7 @@ struct host_cmd_ds_get_hw_spec {
__le32 reserved_6;
__le32 dot_11ac_dev_cap;
__le32 dot_11ac_mcs_support;
- u8 tlvs[0];
+ u8 tlvs[];
} __packed;
struct host_cmd_ds_802_11_rssi_info {
@@ -1140,7 +1140,7 @@ struct ieee_types_assoc_rsp {
__le16 cap_info_bitmap;
__le16 status_code;
__le16 a_id;
- u8 ie_buffer[0];
+ u8 ie_buffer[];
} __packed;
struct host_cmd_ds_802_11_associate_rsp {
@@ -1455,7 +1455,7 @@ struct host_cmd_ds_chan_rpt_event {
__le32 result;
__le64 start_tsf;
__le32 duration;
- u8 tlvbuf[0];
+ u8 tlvbuf[];
} __packed;
struct host_cmd_sdio_sp_rx_aggr_cfg {
@@ -1625,7 +1625,7 @@ struct host_cmd_ds_802_11_bg_scan_config {
__le32 reserved2;
__le32 report_condition;
__le16 reserved3;
- u8 tlv[0];
+ u8 tlv[];
} __packed;
struct host_cmd_ds_802_11_bg_scan_query {
@@ -1720,7 +1720,7 @@ struct mwifiex_ie_types_sta_info {
struct host_cmd_ds_sta_list {
__le16 sta_count;
- u8 tlv[0];
+ u8 tlv[];
} __packed;
struct mwifiex_ie_types_pwr_capability {
@@ -1743,7 +1743,7 @@ struct mwifiex_ie_types_wmm_param_set {
struct mwifiex_ie_types_mgmt_frame {
struct mwifiex_ie_types_header header;
__le16 frame_control;
- u8 frame_contents[0];
+ u8 frame_contents[];
};
struct mwifiex_ie_types_wmm_queue_status {
@@ -1861,7 +1861,7 @@ struct mwifiex_ie_types_2040bssco {
struct mwifiex_ie_types_extcap {
struct mwifiex_ie_types_header header;
- u8 ext_capab[0];
+ u8 ext_capab[];
} __packed;
struct host_cmd_ds_mem_access {
@@ -1918,12 +1918,12 @@ struct mwifiex_assoc_event {
__le16 frame_control;
__le16 cap_info;
__le16 listen_interval;
- u8 data[0];
+ u8 data[];
} __packed;
struct host_cmd_ds_sys_config {
__le16 action;
- u8 tlv[0];
+ u8 tlv[];
};
struct host_cmd_11ac_vht_cfg {
@@ -1956,7 +1956,7 @@ struct host_cmd_tlv_gwk_cipher {
struct host_cmd_tlv_passphrase {
struct mwifiex_ie_types_header header;
- u8 passphrase[0];
+ u8 passphrase[];
} __packed;
struct host_cmd_tlv_wep_key {
@@ -1978,12 +1978,12 @@ struct host_cmd_tlv_encrypt_protocol {
struct host_cmd_tlv_ssid {
struct mwifiex_ie_types_header header;
- u8 ssid[0];
+ u8 ssid[];
} __packed;
struct host_cmd_tlv_rates {
struct mwifiex_ie_types_header header;
- u8 rates[0];
+ u8 rates[];
} __packed;
struct mwifiex_ie_types_bssid_list {
@@ -2100,13 +2100,13 @@ struct mwifiex_fw_mef_entry {
u8 mode;
u8 action;
__le16 exprsize;
- u8 expr[0];
+ u8 expr[];
} __packed;
struct host_cmd_ds_mef_cfg {
__le32 criteria;
__le16 num_entries;
- struct mwifiex_fw_mef_entry mef_entry[0];
+ struct mwifiex_fw_mef_entry mef_entry[];
} __packed;
#define CONNECTION_TYPE_INFRA 0
@@ -2169,7 +2169,7 @@ struct mwifiex_radar_det_event {
struct mwifiex_ie_types_multi_chan_info {
struct mwifiex_ie_types_header header;
__le16 status;
- u8 tlv_buffer[0];
+ u8 tlv_buffer[];
} __packed;
struct mwifiex_ie_types_mc_group_info {
@@ -2185,7 +2185,7 @@ struct mwifiex_ie_types_mc_group_info {
u8 usb_ep_num;
} hid_num;
u8 intf_num;
- u8 bss_type_numlist[0];
+ u8 bss_type_numlist[];
} __packed;
struct meas_rpt_map {
@@ -2250,13 +2250,13 @@ struct coalesce_receive_filt_rule {
u8 num_of_fields;
u8 pkt_type;
__le16 max_coalescing_delay;
- struct coalesce_filt_field_param params[0];
+ struct coalesce_filt_field_param params[];
} __packed;
struct host_cmd_ds_coalesce_cfg {
__le16 action;
__le16 num_of_rules;
- struct coalesce_receive_filt_rule rule[0];
+ struct coalesce_receive_filt_rule rule[];
} __packed;
struct host_cmd_ds_multi_chan_policy {
@@ -2295,7 +2295,7 @@ struct host_cmd_ds_pkt_aggr_ctrl {
struct host_cmd_ds_sta_configure {
__le16 action;
- u8 tlv_buffer[0];
+ u8 tlv_buffer[];
} __packed;
struct host_cmd_ds_command {
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 580387f9f12a..811abe963af2 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -1,11 +1,11 @@
/*
- * Marvell Wireless LAN device driver: management IE handling- setting and
+ * NXP Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 1aa93e7e9835..82d69bc3aaaf 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: HW/FW Initialization
+ * NXP Wireless LAN device driver: HW/FW Initialization
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 0dd592ea6e83..3db449efa167 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: ioctl data structures & APIs
+ * NXP Wireless LAN device driver: ioctl data structures & APIs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index d87aeff70cef..5934f7147547 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: association and ad-hoc start/join
+ * NXP Wireless LAN device driver: association and ad-hoc start/join
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 7d94695e7961..529099137644 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: major functions
+ * NXP Wireless LAN device driver: major functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index fa5634af40f7..afaffc325452 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: major data structures and prototypes
+ * NXP Wireless LAN device driver: major data structures and prototypes
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index fc1706d0647d..87b4ccca4b9a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: PCIE specific handling
+ * NXP Wireless LAN device driver: PCIE specific handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index f7ce9b6db6b4..fc59b522f670 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -3,10 +3,10 @@
* @brief This file contains definitions for PCI-E interface.
* driver.
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index a7968a84aaf8..ff932627a46c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: scan ioctl and command handling
+ * NXP Wireless LAN device driver: scan ioctl and command handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index fec38b6e86ff..6a2dcb01caf4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: SDIO specific handling
+ * NXP Wireless LAN device driver: SDIO specific handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index f672bdf52cc1..71cd8629b28e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: SDIO specific definitions
+ * NXP Wireless LAN device driver: SDIO specific definitions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 4ed10cf82f9a..0bd93f26bd7f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station command handling
+ * NXP Wireless LAN device driver: station command handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 20c206da0631..f21660149f58 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station command response handling
+ * NXP Wireless LAN device driver: station command response handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 5fdffb114913..bc79ca4cb803 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station event handling
+ * NXP Wireless LAN device driver: station event handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index fbfa0b15d0c8..653f9e094256 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: functions for station ioctl
+ * NXP Wireless LAN device driver: functions for station ioctl
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 52a2ce2e78b0..0d2adf887900 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station RX data handling
+ * NXP Wireless LAN device driver: station RX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 37c24b95e642..241305377e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station TX data handling
+ * NXP Wireless LAN device driver: station TX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index f8f282ce39bd..97bb87c3676b 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -1,9 +1,10 @@
-/* Marvell Wireless LAN device driver: TDLS handling
+/*
+ * NXP Wireless LAN device driver: TDLS handling
*
- * Copyright (C) 2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available on the worldwide web at
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index e3c1446dd847..a8479b879382 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: generic TX/RX data handling
+ * NXP Wireless LAN device driver: generic TX/RX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 0939a8c8f3ab..b48a85d791f6 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP specific command handling
+ * NXP Wireless LAN device driver: AP specific command handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 86bfa1b9ef9d..9121447e2701 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP event handling
+ * NXP Wireless LAN device driver: AP event handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 354b09c5e8dc..77c8595f84f8 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP TX and RX data handling
+ * NXP Wireless LAN device driver: AP TX and RX data handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index c2365eeb7016..6f3cfde4654c 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: USB specific handling
+ * NXP Wireless LAN device driver: USB specific handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index 37abd228a84f..d822ec15b7e6 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -1,10 +1,10 @@
/*
* This file contains definitions for mwifiex USB interface driver.
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 3b0d31827681..de89a1e710b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: utility functions
+ * NXP Wireless LAN device driver: utility functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h
index 7cafcecd7b85..44aa80eb7827 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.h
+++ b/drivers/net/wireless/marvell/mwifiex/util.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: utility functions
+ * NXP Wireless LAN device driver: utility functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 132f9e8ed68c..a06fff199ea3 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: WMM
+ * NXP Wireless LAN device driver: WMM
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 38f09762bd2f..04d7da95e307 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: WMM
+ * NXP Wireless LAN device driver: WMM
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index d55f229abeea..47fb4b3ea004 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -592,7 +592,7 @@ struct mwl8k_cmd_pkt {
__u8 seq_num;
__u8 macid;
__le16 result;
- char payload[0];
+ char payload[];
} __packed;
/*
@@ -806,7 +806,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
struct mwl8k_dma_data {
__le16 fwlen;
struct ieee80211_hdr wh;
- char data[0];
+ char data[];
} __packed;
/* Routines to add/remove DMA header from skb. */
@@ -2955,7 +2955,7 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
struct mwl8k_cmd_set_beacon {
struct mwl8k_cmd_pkt header;
__le16 beacon_len;
- __u8 beacon[0];
+ __u8 beacon[];
};
static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 99bbc74acda8..d7a1ddc9e407 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o airtime.o
+ tx.o agg-rx.o mcu.o
mt76-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 59c187898132..f77f03530259 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -4,7 +4,13 @@
*/
#include "mt76.h"
-#define REORDER_TIMEOUT (HZ / 10)
+static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
+{
+ /* Currently voice traffic (AC_VO) always runs without aggregation,
+ * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
+ * for non AC_BK/AC_BE and set smaller timeout for it. */
+ return HZ / (tidno >= 4 ? 25 : 10);
+}
static void
mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
@@ -71,7 +77,8 @@ mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
nframes--;
status = (struct mt76_rx_status *)skb->cb;
if (!time_after(jiffies,
- status->reorder_time + REORDER_TIMEOUT))
+ status->reorder_time +
+ mt76_aggr_tid_to_timeo(tid->num)))
continue;
mt76_rx_aggr_release_frames(tid, frames, status->seqno);
@@ -101,7 +108,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
if (nframes)
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
- REORDER_TIMEOUT);
+ mt76_aggr_tid_to_timeo(tid->num));
mt76_rx_complete(dev, &frames, NULL);
rcu_read_unlock();
@@ -225,7 +232,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
mt76_rx_aggr_release_head(tid, frames);
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
- REORDER_TIMEOUT);
+ mt76_aggr_tid_to_timeo(tid->num));
out:
spin_unlock_bh(&tid->lock);
@@ -245,6 +252,7 @@ int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
tid->dev = dev;
tid->head = ssn;
tid->size = size;
+ tid->num = tidno;
INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
spin_lock_init(&tid->lock);
@@ -268,6 +276,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
if (!skb)
continue;
+ tid->reorder_buf[i] = NULL;
tid->nframes--;
dev_kfree_skb(skb);
}
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
deleted file mode 100644
index a4a785467748..000000000000
--- a/drivers/net/wireless/mediatek/mt76/airtime.c
+++ /dev/null
@@ -1,326 +0,0 @@
-// SPDX-License-Identifier: ISC
-/*
- * Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
- */
-
-#include "mt76.h"
-
-#define AVG_PKT_SIZE 1024
-
-/* Number of bits for an average sized packet */
-#define MCS_NBITS (AVG_PKT_SIZE << 3)
-
-/* Number of symbols for a packet with (bps) bits per symbol */
-#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
-
-/* Transmission time (1024 usec) for a packet containing (syms) * symbols */
-#define MCS_SYMBOL_TIME(sgi, syms) \
- (sgi ? \
- ((syms) * 18 * 1024 + 4 * 1024) / 5 : /* syms * 3.6 us */ \
- ((syms) * 1024) << 2 /* syms * 4 us */ \
- )
-
-/* Transmit duration for the raw data part of an average sized packet */
-#define MCS_DURATION(streams, sgi, bps) \
- MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
-
-#define BW_20 0
-#define BW_40 1
-#define BW_80 2
-
-/*
- * Define group sort order: HT40 -> SGI -> #streams
- */
-#define MT_MAX_STREAMS 4
-#define MT_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
-#define MT_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
-
-#define MT_HT_GROUPS_NB (MT_MAX_STREAMS * \
- MT_HT_STREAM_GROUPS)
-#define MT_VHT_GROUPS_NB (MT_MAX_STREAMS * \
- MT_VHT_STREAM_GROUPS)
-#define MT_GROUPS_NB (MT_HT_GROUPS_NB + \
- MT_VHT_GROUPS_NB)
-
-#define MT_HT_GROUP_0 0
-#define MT_VHT_GROUP_0 (MT_HT_GROUP_0 + MT_HT_GROUPS_NB)
-
-#define MCS_GROUP_RATES 10
-
-#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
- MT_HT_GROUP_0 + \
- MT_MAX_STREAMS * 2 * _ht40 + \
- MT_MAX_STREAMS * _sgi + \
- _streams - 1
-
-#define _MAX(a, b) (((a)>(b))?(a):(b))
-
-#define GROUP_SHIFT(duration) \
- _MAX(0, 16 - __builtin_clz(duration))
-
-/* MCS rate information for an MCS group */
-#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
- [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \
- .shift = _s, \
- .duration = { \
- MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
- } \
-}
-
-#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
- GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
-
-#define MCS_GROUP(_streams, _sgi, _ht40) \
- __MCS_GROUP(_streams, _sgi, _ht40, \
- MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
-
-#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
- (MT_VHT_GROUP_0 + \
- MT_MAX_STREAMS * 2 * (_bw) + \
- MT_MAX_STREAMS * (_sgi) + \
- (_streams) - 1)
-
-#define BW2VBPS(_bw, r3, r2, r1) \
- (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
-
-#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
- [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
- .shift = _s, \
- .duration = { \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 234, 108, 52)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 351, 162, 78)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 468, 216, 104)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 702, 324, 156)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 936, 432, 208)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1560, 720, 346)) >> _s \
- } \
-}
-
-#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
- GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)))
-
-#define VHT_GROUP(_streams, _sgi, _bw) \
- __VHT_GROUP(_streams, _sgi, _bw, \
- VHT_GROUP_SHIFT(_streams, _sgi, _bw))
-
-struct mcs_group {
- u8 shift;
- u16 duration[MCS_GROUP_RATES];
-};
-
-static const struct mcs_group airtime_mcs_groups[] = {
- MCS_GROUP(1, 0, BW_20),
- MCS_GROUP(2, 0, BW_20),
- MCS_GROUP(3, 0, BW_20),
- MCS_GROUP(4, 0, BW_20),
-
- MCS_GROUP(1, 1, BW_20),
- MCS_GROUP(2, 1, BW_20),
- MCS_GROUP(3, 1, BW_20),
- MCS_GROUP(4, 1, BW_20),
-
- MCS_GROUP(1, 0, BW_40),
- MCS_GROUP(2, 0, BW_40),
- MCS_GROUP(3, 0, BW_40),
- MCS_GROUP(4, 0, BW_40),
-
- MCS_GROUP(1, 1, BW_40),
- MCS_GROUP(2, 1, BW_40),
- MCS_GROUP(3, 1, BW_40),
- MCS_GROUP(4, 1, BW_40),
-
- VHT_GROUP(1, 0, BW_20),
- VHT_GROUP(2, 0, BW_20),
- VHT_GROUP(3, 0, BW_20),
- VHT_GROUP(4, 0, BW_20),
-
- VHT_GROUP(1, 1, BW_20),
- VHT_GROUP(2, 1, BW_20),
- VHT_GROUP(3, 1, BW_20),
- VHT_GROUP(4, 1, BW_20),
-
- VHT_GROUP(1, 0, BW_40),
- VHT_GROUP(2, 0, BW_40),
- VHT_GROUP(3, 0, BW_40),
- VHT_GROUP(4, 0, BW_40),
-
- VHT_GROUP(1, 1, BW_40),
- VHT_GROUP(2, 1, BW_40),
- VHT_GROUP(3, 1, BW_40),
- VHT_GROUP(4, 1, BW_40),
-
- VHT_GROUP(1, 0, BW_80),
- VHT_GROUP(2, 0, BW_80),
- VHT_GROUP(3, 0, BW_80),
- VHT_GROUP(4, 0, BW_80),
-
- VHT_GROUP(1, 1, BW_80),
- VHT_GROUP(2, 1, BW_80),
- VHT_GROUP(3, 1, BW_80),
- VHT_GROUP(4, 1, BW_80),
-};
-
-static u32
-mt76_calc_legacy_rate_duration(const struct ieee80211_rate *rate, bool short_pre,
- int len)
-{
- u32 duration;
-
- switch (rate->hw_value >> 8) {
- case MT_PHY_TYPE_CCK:
- duration = 144 + 48; /* preamble + PLCP */
- if (short_pre)
- duration >>= 1;
-
- duration += 10; /* SIFS */
- break;
- case MT_PHY_TYPE_OFDM:
- duration = 20 + 16; /* premable + SIFS */
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- len <<= 3;
- duration += (len * 10) / rate->bitrate;
-
- return duration;
-}
-
-u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
- int len)
-{
- struct ieee80211_supported_band *sband;
- const struct ieee80211_rate *rate;
- bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
- bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
- int bw, streams;
- u32 duration;
- int group, idx;
-
- switch (status->bw) {
- case RATE_INFO_BW_20:
- bw = BW_20;
- break;
- case RATE_INFO_BW_40:
- bw = BW_40;
- break;
- case RATE_INFO_BW_80:
- bw = BW_80;
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- switch (status->encoding) {
- case RX_ENC_LEGACY:
- if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
- return 0;
-
- sband = dev->hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx >= sband->n_bitrates)
- return 0;
-
- rate = &sband->bitrates[status->rate_idx];
-
- return mt76_calc_legacy_rate_duration(rate, sp, len);
- case RX_ENC_VHT:
- streams = status->nss;
- idx = status->rate_idx;
- group = VHT_GROUP_IDX(streams, sgi, bw);
- break;
- case RX_ENC_HT:
- streams = ((status->rate_idx >> 3) & 3) + 1;
- idx = status->rate_idx & 7;
- group = HT_GROUP_IDX(streams, sgi, bw);
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- if (WARN_ON_ONCE(streams > 4))
- return 0;
-
- duration = airtime_mcs_groups[group].duration[idx];
- duration <<= airtime_mcs_groups[group].shift;
- duration *= len;
- duration /= AVG_PKT_SIZE;
- duration /= 1024;
-
- duration += 36 + (streams << 2);
-
- return duration;
-}
-
-u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
- int len)
-{
- struct mt76_rx_status stat = {
- .band = info->band,
- };
- u32 duration = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
- struct ieee80211_tx_rate *rate = &info->status.rates[i];
- u32 cur_duration;
-
- if (rate->idx < 0 || !rate->count)
- break;
-
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_80;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_40;
- else
- stat.bw = RATE_INFO_BW_20;
-
- stat.enc_flags = 0;
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- stat.rate_idx = rate->idx;
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- stat.encoding = RX_ENC_VHT;
- stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
- stat.nss = ieee80211_rate_get_vht_nss(rate);
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- stat.encoding = RX_ENC_HT;
- } else {
- stat.encoding = RX_ENC_LEGACY;
- }
-
- cur_duration = mt76_calc_rx_airtime(dev, &stat, len);
- duration += cur_duration * rate->count;
- }
-
- return duration;
-}
-EXPORT_SYMBOL_GPL(mt76_calc_tx_airtime);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 1847f55e199b..75e659774e07 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -132,6 +132,11 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
writel(q->ndesc, &q->regs->ring_size);
q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
writel(q->head, &q->regs->cpu_idx);
}
@@ -141,7 +146,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
- unsigned int n_swq_queued[4] = {};
+ unsigned int n_swq_queued[8] = {};
unsigned int n_queued = 0;
bool wake = false;
int i, last;
@@ -178,15 +183,25 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
spin_lock_bh(&q->lock);
q->queued -= n_queued;
- for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+ for (i = 0; i < 4; i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i];
}
- if (flush)
+ /* ext PHY */
+ for (i = 0; i < 4; i++) {
+ if (!n_swq_queued[i])
+ continue;
+
+ dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
+ }
+
+ if (flush) {
mt76_dma_sync_idx(dev, q);
+ mt76_dma_kick_queue(dev, q);
+ }
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
@@ -238,7 +253,9 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
if (!q->queued)
return NULL;
- if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ if (flush)
+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
q->tail = (q->tail + 1) % q->ndesc;
@@ -247,12 +264,6 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
return mt76_dma_get_buf(dev, q, idx, len, info, more);
}
-static void
-mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
-{
- writel(q->head, &q->regs->cpu_idx);
-}
-
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
@@ -261,10 +272,13 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_queue_buf buf;
dma_addr_t addr;
+ if (q->queued + 1 >= q->ndesc - 1)
+ goto error;
+
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
- return -ENOMEM;
+ goto error;
buf.addr = addr;
buf.len = skb->len;
@@ -275,6 +289,10 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
spin_unlock_bh(&q->lock);
return 0;
+
+error:
+ dev_kfree_skb(skb);
+ return -ENOMEM;
}
static int
@@ -286,6 +304,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct ieee80211_hw *hw;
int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
@@ -295,7 +314,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
t = mt76_get_txwi(dev);
if (!t) {
- ieee80211_free_txskb(dev->hw, skb);
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
return -ENOMEM;
}
txwi = mt76_get_txwi_ptr(dev, t);
@@ -427,7 +447,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
int i;
for (i = 0; i < q->ndesc; i++)
- q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
@@ -531,6 +551,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
dev = container_of(napi->dev, struct mt76_dev, napi_dev);
qid = napi - dev->napi;
+ local_bh_disable();
rcu_read_lock();
do {
@@ -540,6 +561,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
} while (cur && done < budget);
rcu_read_unlock();
+ local_bh_enable();
if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
@@ -558,7 +580,6 @@ mt76_dma_init(struct mt76_dev *dev)
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
- skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 804224e81103..c236e303ccfd 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -64,6 +64,16 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
goto out_put_node;
}
+ if (of_property_read_bool(dev->dev->of_node, "big-endian")) {
+ u8 *data = (u8 *)dev->eeprom.data;
+ int i;
+
+ /* convert eeprom data in Little Endian */
+ for (i = 0; i < round_down(len, 2); i += 2)
+ put_unaligned_le16(get_unaligned_be16(&data[i]),
+ &data[i]);
+ }
+
out_put_node:
of_node_put(np);
return ret;
@@ -77,13 +87,11 @@ mt76_eeprom_override(struct mt76_dev *dev)
{
#ifdef CONFIG_OF
struct device_node *np = dev->dev->of_node;
- const u8 *mac;
-
- if (!np)
- return;
+ const u8 *mac = NULL;
- mac = of_get_mac_address(np);
- if (!IS_ERR(mac))
+ if (np)
+ mac = of_get_mac_address(np);
+ if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(dev->macaddr, mac);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 96018fd65779..f44f99184c10 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -121,7 +121,7 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
bool vht)
{
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
- int i, nstream = hweight8(dev->antenna_mask);
+ int i, nstream = hweight8(dev->phy.antenna_mask);
struct ieee80211_sta_vht_cap *vht_cap;
u16 mcs_map = 0;
@@ -156,9 +156,9 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
{
if (dev->cap.has_2ghz)
- mt76_init_stream_cap(dev, &dev->sband_2g.sband, false);
+ mt76_init_stream_cap(dev, &dev->phy.sband_2g.sband, false);
if (dev->cap.has_5ghz)
- mt76_init_stream_cap(dev, &dev->sband_5g.sband, vht);
+ mt76_init_stream_cap(dev, &dev->phy.sband_5g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
@@ -187,8 +187,6 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
sband->n_channels = n_chan;
sband->bitrates = rates;
sband->n_bitrates = n_rates;
- dev->chandef.chan = &sband->channels[0];
- dev->chan_state = &msband->chan[0];
ht_cap = &sband->ht_cap;
ht_cap->ht_supported = true;
@@ -223,9 +221,9 @@ static int
mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
int n_rates)
{
- dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->phy.sband_2g.sband;
- return mt76_init_sband(dev, &dev->sband_2g,
+ return mt76_init_sband(dev, &dev->phy.sband_2g,
mt76_channels_2ghz,
ARRAY_SIZE(mt76_channels_2ghz),
rates, n_rates, false);
@@ -235,18 +233,19 @@ static int
mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
int n_rates, bool vht)
{
- dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+ dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->phy.sband_5g.sband;
- return mt76_init_sband(dev, &dev->sband_5g,
+ return mt76_init_sband(dev, &dev->phy.sband_5g,
mt76_channels_5ghz,
ARRAY_SIZE(mt76_channels_5ghz),
rates, n_rates, vht);
}
static void
-mt76_check_sband(struct mt76_dev *dev, int band)
+mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
+ enum nl80211_band band)
{
- struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+ struct ieee80211_supported_band *sband = &msband->sband;
bool found = false;
int i;
@@ -261,20 +260,145 @@ mt76_check_sband(struct mt76_dev *dev, int band)
break;
}
- if (found)
+ if (found) {
+ phy->chandef.chan = &sband->channels[0];
+ phy->chan_state = &msband->chan[0];
return;
+ }
sband->n_channels = 0;
- dev->hw->wiphy->bands[band] = NULL;
+ phy->hw->wiphy->bands[band] = NULL;
}
+static void
+mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
+{
+ struct wiphy *wiphy = hw->wiphy;
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
+
+ wiphy->available_antennas_tx = dev->phy.antenna_mask;
+ wiphy->available_antennas_rx = dev->phy.antenna_mask;
+
+ hw->txq_data_size = sizeof(struct mt76_txq);
+
+ if (!hw->max_tx_fragments)
+ hw->max_tx_fragments = 16;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+}
+
+struct mt76_phy *
+mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops)
+{
+ struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
+ unsigned int phy_size, chan_size;
+ unsigned int size_2g, size_5g;
+ void *priv;
+
+ phy_size = ALIGN(sizeof(*phy), 8);
+ chan_size = sizeof(dev->phy.sband_2g.chan[0]);
+ size_2g = ALIGN(ARRAY_SIZE(mt76_channels_2ghz) * chan_size, 8);
+ size_5g = ALIGN(ARRAY_SIZE(mt76_channels_5ghz) * chan_size, 8);
+
+ size += phy_size + size_2g + size_5g;
+ hw = ieee80211_alloc_hw(size, ops);
+ if (!hw)
+ return NULL;
+
+ phy = hw->priv;
+ phy->dev = dev;
+ phy->hw = hw;
+
+ mt76_phy_init(dev, hw);
+
+ priv = hw->priv + phy_size;
+
+ phy->sband_2g = dev->phy.sband_2g;
+ phy->sband_2g.chan = priv;
+ priv += size_2g;
+
+ phy->sband_5g = dev->phy.sband_5g;
+ phy->sband_5g.chan = priv;
+ priv += size_5g;
+
+ phy->priv = priv;
+
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
+
+ mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_phy);
+
+int
+mt76_register_phy(struct mt76_phy *phy)
+{
+ int ret;
+
+ ret = ieee80211_register_hw(phy->hw);
+ if (ret)
+ return ret;
+
+ phy->dev->phy2 = phy;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_register_phy);
+
+void
+mt76_unregister_phy(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ dev->phy2 = NULL;
+ mt76_tx_status_check(dev, NULL, true);
+ ieee80211_unregister_hw(phy->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_phy);
+
struct mt76_dev *
mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops)
{
struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
struct mt76_dev *dev;
+ int i;
hw = ieee80211_alloc_hw(size, ops);
if (!hw)
@@ -285,6 +409,10 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->dev = pdev;
dev->drv = drv_ops;
+ phy = &dev->phy;
+ phy->dev = dev;
+ phy->hw = hw;
+
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
@@ -292,6 +420,15 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
init_waitqueue_head(&dev->tx_wait);
skb_queue_head_init(&dev->status_list);
+ skb_queue_head_init(&dev->mcu.res_q);
+ init_waitqueue_head(&dev->mcu.wait);
+ mutex_init(&dev->mcu.mutex);
+
+ INIT_LIST_HEAD(&dev->txwi_cache);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
+ skb_queue_head_init(&dev->rx_skb[i]);
+
tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
return dev;
@@ -302,51 +439,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates)
{
struct ieee80211_hw *hw = dev->hw;
- struct wiphy *wiphy = hw->wiphy;
+ struct mt76_phy *phy = &dev->phy;
int ret;
dev_set_drvdata(dev->dev, dev);
-
- INIT_LIST_HEAD(&dev->txwi_cache);
-
- SET_IEEE80211_DEV(hw, dev->dev);
- SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
-
- wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
-
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
-
- wiphy->available_antennas_tx = dev->antenna_mask;
- wiphy->available_antennas_rx = dev->antenna_mask;
-
- hw->txq_data_size = sizeof(struct mt76_txq);
- hw->max_tx_fragments = 16;
-
- ieee80211_hw_set(hw, SIGNAL_DBM);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
- ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
- ieee80211_hw_set(hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
- ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
- ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
- ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
- ieee80211_hw_set(hw, TX_AMSDU);
- ieee80211_hw_set(hw, TX_FRAG_LIST);
- ieee80211_hw_set(hw, MFP_CAPABLE);
- ieee80211_hw_set(hw, AP_LINK_PS);
- ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
+ mt76_phy_init(dev, hw);
if (dev->cap.has_2ghz) {
ret = mt76_init_sband_2g(dev, rates, n_rates);
@@ -360,9 +457,9 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret;
}
- wiphy_read_of_freq_limits(dev->hw->wiphy);
- mt76_check_sband(dev, NL80211_BAND_2GHZ);
- mt76_check_sband(dev, NL80211_BAND_5GHZ);
+ wiphy_read_of_freq_limits(hw->wiphy);
+ mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
ret = mt76_led_init(dev);
@@ -394,7 +491,10 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
- if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
dev_kfree_skb(skb);
return;
}
@@ -403,13 +503,16 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_rx);
-bool mt76_has_tx_pending(struct mt76_dev *dev)
+bool mt76_has_tx_pending(struct mt76_phy *phy)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_queue *q;
- int i;
+ int i, offset;
- for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- q = dev->q_tx[i].q;
+ offset = __MT_TXQ_MAX * (phy != &dev->phy);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ q = dev->q_tx[offset + i].q;
if (q && q->queued)
return true;
}
@@ -419,37 +522,45 @@ bool mt76_has_tx_pending(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
static struct mt76_channel_state *
-mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
{
struct mt76_sband *msband;
int idx;
if (c->band == NL80211_BAND_2GHZ)
- msband = &dev->sband_2g;
+ msband = &phy->sband_2g;
else
- msband = &dev->sband_5g;
+ msband = &phy->sband_5g;
idx = c - &msband->sband.channels[0];
return &msband->chan[idx];
}
+static void
+mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
+{
+ struct mt76_channel_state *state = phy->chan_state;
+
+ state->cc_active += ktime_to_us(ktime_sub(time,
+ phy->survey_time));
+ phy->survey_time = time;
+}
+
void mt76_update_survey(struct mt76_dev *dev)
{
- struct mt76_channel_state *state = dev->chan_state;
ktime_t cur_time;
- if (!test_bit(MT76_STATE_RUNNING, &dev->state))
- return;
-
if (dev->drv->update_survey)
dev->drv->update_survey(dev);
cur_time = ktime_get_boottime();
- state->cc_active += ktime_to_us(ktime_sub(cur_time,
- dev->survey_time));
- dev->survey_time = cur_time;
+ mt76_update_survey_active_time(&dev->phy, cur_time);
+ if (dev->phy2)
+ mt76_update_survey_active_time(dev->phy2, cur_time);
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
+ struct mt76_channel_state *state = dev->phy.chan_state;
+
spin_lock_bh(&dev->cc_lock);
state->cc_bss_rx += dev->cur_cc_bss_rx;
dev->cur_cc_bss_rx = 0;
@@ -458,31 +569,33 @@ void mt76_update_survey(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-void mt76_set_channel(struct mt76_dev *dev)
+void mt76_set_channel(struct mt76_phy *phy)
{
- struct ieee80211_hw *hw = dev->hw;
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_hw *hw = phy->hw;
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
- wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
+ wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(dev);
- dev->chandef = *chandef;
- dev->chan_state = mt76_channel_state(dev, chandef->chan);
+ phy->chandef = *chandef;
+ phy->chan_state = mt76_channel_state(phy, chandef->chan);
if (!offchannel)
- dev->main_chan = chandef->chan;
+ phy->main_chan = chandef->chan;
- if (chandef->chan != dev->main_chan)
- memset(dev->chan_state, 0, sizeof(*dev->chan_state));
+ if (chandef->chan != phy->main_chan)
+ memset(phy->chan_state, 0, sizeof(*phy->chan_state));
}
EXPORT_SYMBOL_GPL(mt76_set_channel);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
struct mt76_sband *sband;
struct ieee80211_channel *chan;
struct mt76_channel_state *state;
@@ -492,10 +605,10 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
if (idx == 0 && dev->drv->update_survey)
mt76_update_survey(dev);
- sband = &dev->sband_2g;
+ sband = &phy->sband_2g;
if (idx >= sband->sband.n_channels) {
idx -= sband->sband.n_channels;
- sband = &dev->sband_5g;
+ sband = &phy->sband_5g;
}
if (idx >= sband->sband.n_channels) {
@@ -504,13 +617,16 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
}
chan = &sband->sband.channels[idx];
- state = mt76_channel_state(dev, chan);
+ state = mt76_channel_state(phy, chan);
memset(survey, 0, sizeof(*survey));
survey->channel = chan;
survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
survey->filled |= dev->drv->survey_flags;
- if (chan == dev->main_chan) {
+ if (state->noise)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (chan == phy->main_chan) {
survey->filled |= SURVEY_INFO_IN_USE;
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
@@ -520,6 +636,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
survey->time_busy = div_u64(state->cc_busy, 1000);
survey->time_rx = div_u64(state->cc_rx, 1000);
survey->time = div_u64(state->cc_active, 1000);
+ survey->noise = state->noise;
spin_lock_bh(&dev->cc_lock);
survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
@@ -555,8 +672,12 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
-static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+static void
+mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_hw **hw,
+ struct ieee80211_sta **sta)
{
+
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt76_rx_status mstat;
@@ -581,7 +702,8 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
memcpy(status->chain_signal, mstat.chain_signal,
sizeof(mstat.chain_signal));
- return wcid_to_sta(mstat.wcid);
+ *sta = wcid_to_sta(mstat.wcid);
+ *hw = mt76_phy_hw(dev, mstat.ext_phy);
}
static int
@@ -628,10 +750,18 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
int len)
{
struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_rx_status info = {
+ .enc_flags = status->enc_flags,
+ .rate_idx = status->rate_idx,
+ .encoding = status->encoding,
+ .band = status->band,
+ .nss = status->nss,
+ .bw = status->bw,
+ };
struct ieee80211_sta *sta;
u32 airtime;
- airtime = mt76_calc_rx_airtime(dev, status, len);
+ airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
spin_lock(&dev->cc_lock);
dev->cur_cc_bss_rx += airtime;
spin_unlock(&dev->cc_lock);
@@ -707,12 +837,14 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;
bool ps;
int i;
+ hw = mt76_phy_hw(dev, status->ext_phy);
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
- sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
if (sta)
wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
}
@@ -770,7 +902,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
if (!skb_queue_empty(&mtxq->retry_q))
- ieee80211_schedule_txq(dev->hw, sta->txq[i]);
+ ieee80211_schedule_txq(hw, sta->txq[i]);
}
}
@@ -778,6 +910,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi)
{
struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
struct sk_buff *skb;
spin_lock(&dev->rx_lock);
@@ -787,8 +920,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
continue;
}
- sta = mt76_rx_convert(skb);
- ieee80211_rx_napi(dev->hw, sta, skb, napi);
+ mt76_rx_convert(dev, skb, &hw, &sta);
+ ieee80211_rx_napi(hw, sta, skb, napi);
}
spin_unlock(&dev->rx_lock);
}
@@ -812,7 +945,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
static int
mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, bool ext_phy)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int ret;
@@ -837,6 +970,9 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
}
ewma_signal_init(&wcid->rssi);
+ if (ext_phy)
+ mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
+ wcid->ext_phy = ext_phy;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
out:
@@ -851,9 +987,6 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int i, idx = wcid->idx;
- rcu_assign_pointer(dev->wcid[idx], NULL);
- synchronize_rcu();
-
for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
mt76_rx_aggr_stop(dev, wcid, i);
@@ -863,7 +996,8 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_tx_status_check(dev, wcid, true);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
- mt76_wcid_free(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
}
EXPORT_SYMBOL_GPL(__mt76_sta_remove);
@@ -881,11 +1015,13 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
- return mt76_sta_add(dev, vif, sta);
+ return mt76_sta_add(dev, vif, sta, ext_phy);
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
@@ -900,30 +1036,27 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
{
- struct mt76_dev *dev = hw->priv;
- int n_chains = hweight8(dev->antenna_mask);
+ struct mt76_phy *phy = hw->priv;
+ int n_chains = hweight8(phy->antenna_mask);
+ int delta = mt76_tx_power_nss_delta(n_chains);
- *dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
-
- /* convert from per-chain power to combined
- * output power
- */
- switch (n_chains) {
- case 4:
- *dbm += 6;
- break;
- case 3:
- *dbm += 4;
- break;
- case 2:
- *dbm += 3;
- break;
- default:
- break;
- }
+ *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
return 0;
}
@@ -1005,11 +1138,11 @@ int mt76_get_rate(struct mt76_dev *dev,
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband == &dev->sband_5g.sband)
+ if (sband == &dev->phy.sband_5g.sband)
return 0;
idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->sband_2g.sband) {
+ } else if (sband == &dev->phy.sband_2g.sband) {
offset = 4;
}
@@ -1025,27 +1158,28 @@ EXPORT_SYMBOL_GPL(mt76_get_rate);
void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
- set_bit(MT76_SCANNING, &dev->state);
+ set_bit(MT76_SCANNING, &phy->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan);
void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
- clear_bit(MT76_SCANNING, &dev->state);
+ clear_bit(MT76_SCANNING, &phy->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
mutex_lock(&dev->mutex);
- *tx_ant = dev->antenna_mask;
- *rx_ant = dev->antenna_mask;
+ *tx_ant = phy->antenna_mask;
+ *rx_ant = phy->antenna_mask;
mutex_unlock(&dev->mutex);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 2a976688804d..4048f446e3ee 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -9,14 +9,16 @@ struct sk_buff *
mt76_mcu_msg_alloc(const void *data, int head_len,
int data_len, int tail_len)
{
+ int length = head_len + data_len + tail_len;
struct sk_buff *skb;
- skb = alloc_skb(head_len + data_len + tail_len,
- GFP_KERNEL);
+ skb = alloc_skb(length, GFP_KERNEL);
if (!skb)
return NULL;
+ memset(skb->head, 0, length);
skb_reserve(skb, head_len);
+
if (data && data_len)
skb_put_data(skb, data, data_len);
@@ -24,7 +26,6 @@ mt76_mcu_msg_alloc(const void *data, int head_len,
}
EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
-/* mmio */
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires)
{
@@ -34,16 +35,17 @@ struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
return NULL;
timeout = expires - jiffies;
- wait_event_timeout(dev->mmio.mcu.wait,
- !skb_queue_empty(&dev->mmio.mcu.res_q),
+ wait_event_timeout(dev->mcu.wait,
+ (!skb_queue_empty(&dev->mcu.res_q) ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)),
timeout);
- return skb_dequeue(&dev->mmio.mcu.res_q);
+ return skb_dequeue(&dev->mcu.res_q);
}
EXPORT_SYMBOL_GPL(mt76_mcu_get_response);
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
{
- skb_queue_tail(&dev->mmio.mcu.res_q, skb);
- wake_up(&dev->mmio.mcu.wait);
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
}
EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 1c974df1fe25..7ead6620bb8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -94,9 +94,6 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
dev->bus = &mt76_mmio_ops;
dev->mmio.regs = regs;
- skb_queue_head_init(&dev->mmio.mcu.res_q);
- init_waitqueue_head(&dev->mmio.mcu.wait);
spin_lock_init(&dev->mmio.irq_lock);
- mutex_init(&dev->mmio.mcu.mutex);
}
EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index fb077760347a..8e4759bc8f59 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
#define MT_SKB_HEAD_LEN 128
struct mt76_dev;
+struct mt76_phy;
struct mt76_wcid;
struct mt76_reg_pair {
@@ -138,6 +139,8 @@ struct mt76_sw_queue {
struct mt76_mcu_ops {
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
+ int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
@@ -177,6 +180,9 @@ enum mt76_wcid_flags {
#define MT76_N_WCIDS 128
+/* stored in ieee80211_tx_info::hw_queue */
+#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
+
DECLARE_EWMA(signal, 10, 8);
#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
@@ -196,6 +202,7 @@ struct mt76_wcid {
u8 hw_key_idx;
u8 sta:1;
+ u8 ext_phy:1;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
@@ -237,6 +244,8 @@ struct mt76_rx_tid {
u8 size;
u8 nframes;
+ u8 num;
+
u8 started:1, stopped:1, timer_pending:1;
struct sk_buff *reorder_buf[];
@@ -267,6 +276,7 @@ enum {
MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_RESET,
+ MT76_MCU_RESET,
MT76_REMOVED,
MT76_READING_STATS,
};
@@ -279,6 +289,7 @@ struct mt76_hw_cap {
#define MT_DRV_TXWI_NO_FREE BIT(0)
#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
#define MT_DRV_SW_RX_AIRTIME BIT(2)
+#define MT_DRV_RX_DMA_HDR BIT(3)
struct mt76_driver_ops {
u32 drv_flags;
@@ -321,6 +332,8 @@ struct mt76_channel_state {
u64 cc_rx;
u64 cc_bss_rx;
u64 cc_tx;
+
+ s8 noise;
};
struct mt76_sband {
@@ -350,12 +363,15 @@ struct mt76_rate_power {
enum mt_vendor_req {
MT_VEND_DEV_MODE = 0x1,
MT_VEND_WRITE = 0x2,
+ MT_VEND_POWER_ON = 0x4,
MT_VEND_MULTI_WRITE = 0x6,
MT_VEND_MULTI_READ = 0x7,
MT_VEND_READ_EEPROM = 0x9,
MT_VEND_WRITE_FCE = 0x42,
MT_VEND_WRITE_CFG = 0x46,
MT_VEND_READ_CFG = 0x47,
+ MT_VEND_READ_EXT = 0x63,
+ MT_VEND_WRITE_EXT = 0x66,
};
enum mt76u_in_ep {
@@ -374,20 +390,26 @@ enum mt76u_out_ep {
__MT_EP_OUT_MAX,
};
+struct mt76_mcu {
+ struct mutex mutex;
+ u32 msg_seq;
+
+ struct sk_buff_head res_q;
+ wait_queue_head_t wait;
+};
+
#define MT_TX_SG_MAX_SIZE 8
-#define MT_RX_SG_MAX_SIZE 1
+#define MT_RX_SG_MAX_SIZE 4
#define MT_NUM_TX_ENTRIES 256
#define MT_NUM_RX_ENTRIES 128
#define MCU_RESP_URB_SIZE 1024
struct mt76_usb {
struct mutex usb_ctrl_mtx;
- union {
- u8 data[32];
- __le32 reg_val;
- };
+ u8 *data;
+ u16 data_len;
struct tasklet_struct rx_tasklet;
- struct workqueue_struct *stat_wq;
+ struct workqueue_struct *wq;
struct work_struct stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
@@ -395,10 +417,7 @@ struct mt76_usb {
bool sg_en;
struct mt76u_mcu {
- struct mutex mutex;
u8 *data;
- u32 msg_seq;
-
/* multiple reads */
struct mt76_reg_pair *rp;
int rp_len;
@@ -408,14 +427,6 @@ struct mt76_usb {
};
struct mt76_mmio {
- struct mt76e_mcu {
- struct mutex mutex;
-
- wait_queue_head_t wait;
- struct sk_buff_head res_q;
-
- u32 msg_seq;
- } mcu;
void __iomem *regs;
spinlock_t irq_lock;
u32 irqmask;
@@ -433,6 +444,7 @@ struct mt76_rx_status {
u8 iv[6];
+ u8 ext_phy:1;
u8 aggr:1;
u8 tid;
u16 seqno;
@@ -449,12 +461,33 @@ struct mt76_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
-struct mt76_dev {
+struct mt76_phy {
struct ieee80211_hw *hw;
+ struct mt76_dev *dev;
+ void *priv;
+
+ unsigned long state;
+
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
+ ktime_t survey_time;
+
+ struct mt76_sband sband_2g;
+ struct mt76_sband sband_5g;
+
+ int txpower_cur;
+ u8 antenna_mask;
+};
+
+struct mt76_dev {
+ struct mt76_phy phy; /* must be first */
+
+ struct mt76_phy *phy2;
+
+ struct ieee80211_hw *hw;
+
spinlock_t lock;
spinlock_t cc_lock;
@@ -471,14 +504,15 @@ struct mt76_dev {
const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
+ struct mt76_mcu mcu;
+
struct net_device napi_dev;
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
- u32 ampdu_ref;
struct list_head txwi_cache;
- struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
@@ -491,32 +525,25 @@ struct mt76_dev {
struct sk_buff_head status_list;
unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
+ unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG];
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
u8 macaddr[ETH_ALEN];
u32 rev;
- unsigned long state;
u32 aggr_stats[32];
- u8 antenna_mask;
- u16 chainmask;
-
struct tasklet_struct pre_tbtt_tasklet;
int beacon_int;
u8 beacon_mask;
- struct mt76_sband sband_2g;
- struct mt76_sband sband_5g;
struct debugfs_blob_wrapper eeprom;
struct debugfs_blob_wrapper otp;
struct mt76_hw_cap cap;
struct mt76_rate_power rate_power;
- int txpower_conf;
- int txpower_cur;
enum nl80211_dfs_regions region;
@@ -529,8 +556,6 @@ struct mt76_dev {
u8 csa_complete;
- ktime_t survey_time;
-
u32 rxfilter;
union {
@@ -565,7 +590,9 @@ enum mt76_phy_type {
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
+
#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
+#define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__)
#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
@@ -581,7 +608,17 @@ enum mt76_phy_type {
#define __mt76_rmw_field(_dev, _reg, _field, _val) \
__mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
-#define mt76_hw(dev) (dev)->mt76.hw
+#define mt76_hw(dev) (dev)->mphy.hw
+
+static inline struct ieee80211_hw *
+mt76_wcid_hw(struct mt76_dev *dev, u8 wcid)
+{
+ if (wcid <= MT76_N_WCIDS &&
+ mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
+ return dev->phy2->hw;
+
+ return dev->phy.hw;
+}
bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
int timeout);
@@ -624,6 +661,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
+void mt76_unregister_phy(struct mt76_phy *phy);
+
+struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops);
+int mt76_register_phy(struct mt76_phy *phy);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_queues_read(struct seq_file *s, void *data);
@@ -633,6 +675,20 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+static inline struct mt76_phy *
+mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
+{
+ if (phy_ext && dev->phy2)
+ return dev->phy2;
+ return &dev->phy;
+}
+
+static inline struct ieee80211_hw *
+mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
+{
+ return mt76_dev_phy(dev, phy_ext)->hw;
+}
+
static inline u8 *
mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@@ -701,24 +757,31 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
return pktid >= MT_PACKET_ID_FIRST;
}
+static inline u8 mt76_tx_power_nss_delta(u8 nss)
+{
+ static const u8 nss_delta[4] = { 0, 6, 9, 12 };
+
+ return nss_delta[nss - 1];
+}
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
-void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
-void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
-void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
+void mt76_txq_schedule_all(struct mt76_phy *phy);
void mt76_tx_tasklet(unsigned long data);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
-bool mt76_has_tx_pending(struct mt76_dev *dev);
-void mt76_set_channel(struct mt76_dev *dev);
+bool mt76_has_tx_pending(struct mt76_phy *phy);
+void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
@@ -752,8 +815,10 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_sta_state new_state);
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);
@@ -771,10 +836,22 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
- int len);
/* internal */
+static inline struct ieee80211_hw *
+mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = dev->phy.hw;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
+ hw = dev->phy2->hw;
+
+ info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
+
+ return hw;
+}
+
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
@@ -783,8 +860,6 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
-u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
- int len);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -804,7 +879,7 @@ static inline u8 q2ep(u8 qid)
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
- int timeout)
+ int timeout, int ep)
{
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
@@ -812,20 +887,23 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
unsigned int pipe;
if (actual_len)
- pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
+ pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
else
- pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
+int mt76u_skb_dma_info(struct sk_buff *skb, u32 info);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
-int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ bool ext);
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
void mt76u_stop_rx(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
index e5af4f3389cc..60a996b63c0c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
#include "mt7603.h"
+#include "../trace.h"
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
@@ -17,9 +18,11 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
intr &= dev->mt76.mmio.irqmask;
if (intr & MT_INT_MAC_IRQ3) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index 47c85a9fac28..cc7c788abedd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -121,4 +121,8 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
mt7603_reset_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7603_radio_read);
+ debugfs_create_u8("sensitivity_limit", 0600, dir,
+ &dev->sensitivity_limit);
+ debugfs_create_bool("dynamic_sensitivity", 0600, dir,
+ &dev->dynamic_sensitivity);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index a6ab73060aad..a08b85281170 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -30,6 +30,16 @@ mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
+ static const u8 tid_to_ac[8] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+ };
__le32 *txd = (__le32 *)skb->data;
struct ieee80211_hdr *hdr;
struct ieee80211_sta *sta;
@@ -38,7 +48,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
void *priv;
int idx;
u32 val;
- u8 tid;
+ u8 tid = 0;
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
@@ -56,15 +66,16 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
- skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
-
val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
- tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TAG1D_MASK;
+ skb_set_queue_mapping(skb, tid_to_ac[tid]);
ieee80211_sta_set_buffered(sta, tid, true);
spin_lock_bh(&dev->ps_lock);
@@ -210,7 +221,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
- MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 0696dbf28c5b..f641a8b56b39 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -113,7 +113,7 @@ mt7603_dma_sched_init(struct mt7603_dev *dev)
static void
mt7603_phy_init(struct mt7603_dev *dev)
{
- int rx_chains = dev->mt76.antenna_mask;
+ int rx_chains = dev->mphy.antenna_mask;
int tx_chains = hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
@@ -284,7 +284,7 @@ mt7603_init_hardware(struct mt7603_dev *dev)
mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850);
mt7603_mac_dma_start(dev);
dev->rxfilter = mt76_rr(dev, MT_WF_RFCR);
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
for (i = 0; i < MT7603_WTBL_SIZE; i++) {
mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE |
@@ -363,9 +363,9 @@ static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
mt76);
u32 val, addr;
- val = MT_LED_STATUS_DURATION(0xffff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
mt76_wr(dev, addr, val);
@@ -493,12 +493,12 @@ mt7603_init_txpower(struct mt7603_dev *dev,
target_power += max_offset;
dev->tx_power_limit = target_power;
- dev->mt76.txpower_cur = target_power;
+ dev->mphy.txpower_cur = target_power;
target_power = DIV_ROUND_UP(target_power, 2);
/* add 3 dBm for 2SS devices (combined output) */
- if (dev->mt76.antenna_mask & BIT(1))
+ if (dev->mphy.antenna_mask & BIT(1))
target_power += 3;
for (i = 0; i < sband->n_channels; i++) {
@@ -535,11 +535,13 @@ int mt7603_register_device(struct mt7603_dev *dev)
(unsigned long)dev);
/* Check for 7688, which only has 1SS */
- dev->mt76.antenna_mask = 3;
+ dev->mphy.antenna_mask = 3;
if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
- dev->mt76.antenna_mask = 1;
+ dev->mphy.antenna_mask = 1;
dev->slottime = 9;
+ dev->sensitivity_limit = 28;
+ dev->dynamic_sensitivity = true;
ret = mt7603_init_hardware(dev);
if (ret)
@@ -557,6 +559,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -564,7 +567,6 @@ int mt7603_register_device(struct mt7603_dev *dev)
dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
}
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7603_regd_notifier;
ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
@@ -573,7 +575,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
return ret;
mt7603_init_debugfs(dev);
- mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt7603_init_txpower(dev, &dev->mphy.sband_2g.sband);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 812d081ad943..39b7c5d6e6cd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -4,6 +4,7 @@
#include <linux/timekeeping.h>
#include "mt7603.h"
#include "mac.h"
+#include "../trace.h"
#define MT_PSE_PAGE_SIZE 128
@@ -53,7 +54,7 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev)
int sifs;
u32 val;
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
sifs = 16;
else
sifs = 10;
@@ -456,7 +457,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev)
return;
spin_lock_bh(&dev->mt76.cc_lock);
- dev->mt76.chan_state->cc_tx += total_airtime;
+ dev->mphy.chan_state->cc_tx += total_airtime;
spin_unlock_bh(&dev->mt76.cc_lock);
}
@@ -502,7 +503,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
- sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband;
+ sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband;
i >>= 1;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
@@ -531,12 +532,12 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
/* all subframes of an A-MPDU have the same timestamp */
if (dev->rx_ampdu_ts != rxd[12]) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
}
dev->rx_ampdu_ts = rxd[12];
- status->ampdu_ref = dev->mt76.ampdu_ref;
+ status->ampdu_ref = dev->ampdu_ref;
}
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
@@ -609,7 +610,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->rate_idx = i;
- status->chains = dev->mt76.antenna_mask;
+ status->chains = dev->mphy.antenna_mask;
status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
dev->rssi_offset[0];
status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
@@ -668,7 +669,7 @@ mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
*bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = dev->mphy.chandef.chan->band;
u16 val;
nss = 1;
@@ -1156,10 +1157,10 @@ out:
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mphy.sband_5g.sband;
else
- sband = &dev->mt76.sband_2g.sband;
+ sband = &dev->mphy.sband_2g.sband;
final_rate &= GENMASK(5, 0);
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
@@ -1193,6 +1194,8 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
if (pid < MT_PACKET_ID_FIRST)
return false;
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
if (skb) {
@@ -1389,10 +1392,10 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
int i;
ieee80211_stop_queues(dev->mt76.hw);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
tasklet_disable(&dev->mt76.tx_tasklet);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
@@ -1426,7 +1429,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
@@ -1439,7 +1442,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_irq_enable(dev, mask);
skip_dma_reset:
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.tx_tasklet);
@@ -1456,7 +1459,7 @@ skip_dma_reset:
napi_schedule(&dev->mt76.napi[1]);
ieee80211_wake_queues(dev->mt76.hw);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
@@ -1574,7 +1577,7 @@ void mt7603_update_channel(struct mt76_dev *mdev)
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->chan_state;
+ state = mdev->phy.chan_state;
state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
}
@@ -1724,6 +1727,9 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
int min_signal;
u32 val;
+ if (!dev->dynamic_sensitivity)
+ return;
+
val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
@@ -1737,7 +1743,7 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
mt7603_cca_stats_reset(dev);
- min_signal = mt76_get_min_avg_rssi(&dev->mt76);
+ min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!min_signal) {
dev->sensitivity = 0;
dev->last_cca_adj = jiffies;
@@ -1747,7 +1753,8 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
min_signal -= 15;
false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
- if (false_cca > 600) {
+ if (false_cca > 600 &&
+ dev->sensitivity < -100 + dev->sensitivity_limit) {
if (!dev->sensitivity)
dev->sensitivity = -92;
else
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 962e2822d19f..26cb711b465f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -15,8 +15,8 @@ mt7603_start(struct ieee80211_hw *hw)
mt7603_mac_reset_counters(dev);
mt7603_mac_start(dev);
- dev->mt76.survey_time = ktime_get_boottime();
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ dev->mphy.survey_time = ktime_get_boottime();
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt7603_mac_work(&dev->mt76.mac_work.work);
return 0;
@@ -27,7 +27,7 @@ mt7603_stop(struct ieee80211_hw *hw)
{
struct mt7603_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_delayed_work_sync(&dev->mt76.mac_work);
mt7603_mac_stop(dev);
}
@@ -143,16 +143,16 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
mt7603_beacon_set_timer(dev, -1, 0);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
- dev->mt76.chandef = *def;
+ dev->mphy.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
@@ -176,9 +176,9 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
@@ -187,10 +187,10 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS);
mt76_set(dev, MT_MIB_CTL,
MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME);
- mt76_rr(dev, MT_MIB_STAT_PSCCA);
+ mt76_rr(dev, MT_MIB_STAT_CCA);
mt7603_cca_stats_reset(dev);
- dev->mt76.survey_time = ktime_get_boottime();
+ dev->mphy.survey_time = ktime_get_boottime();
mt7603_init_edcca(dev);
@@ -642,7 +642,7 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7603_dev *dev = hw->priv;
- dev->coverage_class = coverage_class;
+ dev->coverage_class = max_t(s16, coverage_class, 0);
mt7603_mac_set_timing(dev);
}
@@ -667,7 +667,7 @@ static void mt7603_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
}
const struct ieee80211_ops mt7603_ops = {
@@ -680,6 +680,7 @@ const struct ieee80211_ops mt7603_ops = {
.configure_filter = mt7603_configure_filter,
.bss_info_changed = mt7603_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7603_set_key,
.conf_tx = mt7603_conf_tx,
.sw_scan_start = mt76_sw_scan,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 02b2bd60d04d..77985d81c447 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -22,12 +22,11 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
struct mt7603_mcu_txd *txd;
u8 seq;
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
- memset(txd, 0, hdrlen);
txd->len = cpu_to_le16(skb->len);
if (cmd == -MCU_CMD_FW_SCATTER)
@@ -67,7 +66,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&mdev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mcu.mutex);
ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
@@ -97,7 +96,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
@@ -277,7 +276,7 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
}
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
@@ -397,7 +396,7 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
u8 temp_comp_power[17];
u8 reserved;
} req = {
- .center_channel = dev->mt76.chandef.chan->hw_value,
+ .center_channel = dev->mphy.chandef.chan->hw_value,
#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n]
.tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1),
.temp_comp = EEP_VAL(MT_EE_NIC_CONF_1),
@@ -430,9 +429,9 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
- int n_chains = hweight8(dev->mt76.antenna_mask);
+ int n_chains = hweight8(dev->mphy.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;
@@ -452,7 +451,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
s8 tx_power;
int i, ret;
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_40) {
req.bw = MT_BW_40;
if (chandef->center_freq1 > chandef->chan->center_freq)
req.center_chan += 2;
@@ -461,11 +460,11 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
}
tx_power = hw->conf.power_level * 2;
- if (dev->mt76.antenna_mask == 3)
+ if (dev->mphy.antenna_mask == 3)
tx_power -= 6;
tx_power = min(tx_power, dev->tx_power_limit);
- dev->mt76.txpower_cur = tx_power;
+ dev->mphy.txpower_cur = tx_power;
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req.txpower[i] = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index ab54b0612e98..7fadf094e9be 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -15,6 +15,7 @@
#define MT7603_RATE_RETRY 2
+#define MT7603_MCU_RX_RING_SIZE 64
#define MT7603_RX_RING_SIZE 128
#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
@@ -98,7 +99,10 @@ enum mt7603_reset_cause {
};
struct mt7603_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
const struct mt76_bus_ops *bus_ops;
@@ -115,6 +119,7 @@ struct mt7603_dev {
u32 false_cca_ofdm, false_cca_cck;
unsigned long last_cca_adj;
+ u32 ampdu_ref;
__le32 rx_ampdu_ts;
u8 rssi_offset[3];
@@ -137,7 +142,9 @@ struct mt7603_dev {
u8 ed_strict_mode;
u8 ed_strong_signal;
+ bool dynamic_sensitivity;
s8 sensitivity;
+ u8 sensitivity_limit;
u8 beacon_check;
u8 tx_hang_check;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index 6e23ed3dfdff..6741e6907194 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -585,18 +585,9 @@ enum {
#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << \
- __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << \
- __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 0)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << \
- __ffs(MT_LED_STATUS_DURATION_MASK)) &\
- MT_LED_STATUS_DURATION_MASK)
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
#define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
index 4cabba9aa2ea..6afd4aea67ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -11,3 +11,14 @@ config MT7615E
MU-MIMO up to 4 users/group and 160MHz channels.
To compile this driver as a module, choose M here.
+
+config MT7622_WMAC
+ bool "MT7622 (SoC) WMAC support"
+ depends on MT7615E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ default y
+ help
+ This adds support for the built-in WMAC on MT7622 SoC devices
+ which has the same feature set as a MT7615, but limited to
+ 2.4 GHz only.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
index 5aaac69849d6..5c6a220ed7e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -2,5 +2,8 @@
obj-$(CONFIG_MT7615E) += mt7615e.o
-mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
- debugfs.o
+CFLAGS_trace.o := -I$(src)
+
+mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o mmio.o \
+ debugfs.o trace.o
+mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index f6b75f832e6a..b4d0795154e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -7,6 +7,9 @@ mt7615_radar_pattern_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
return mt7615_mcu_rdd_send_pattern(dev);
}
@@ -18,6 +21,9 @@ mt7615_scs_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
mt7615_mac_set_scs(dev, val);
return 0;
@@ -37,6 +43,84 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
mt7615_scs_set, "%lld\n");
static int
+mt7615_dbdc_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ if (val)
+ mt7615_register_ext_phy(dev);
+ else
+ mt7615_unregister_ext_phy(dev);
+
+ return 0;
+}
+
+static int
+mt7615_dbdc_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = !!mt7615_ext_phy(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7615_dbdc_get,
+ mt7615_dbdc_set, "%lld\n");
+
+static int
+mt7615_fw_debug_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ dev->fw_debug = val;
+ mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+
+ return 0;
+}
+
+static int
+mt7615_fw_debug_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = dev->fw_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7615_fw_debug_get,
+ mt7615_fw_debug_set, "%lld\n");
+
+static int
+mt7615_reset_test_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+ struct sk_buff *skb;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ skb = alloc_skb(1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, 1);
+ mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL,
+ mt7615_reset_test_set, "%lld\n");
+
+static int
mt7615_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt7615_dev *dev = file->private;
@@ -74,15 +158,28 @@ static const struct file_operations fops_ampdu_stat = {
.release = single_release,
};
+static void
+mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!phy)
+ return;
+
+ seq_printf(s, "Radio %d sensitivity: ofdm=%d cck=%d\n", ext_phy,
+ phy->ofdm_sensitivity, phy->cck_sensitivity);
+ seq_printf(s, "Radio %d false CCA: ofdm=%d cck=%d\n", ext_phy,
+ phy->false_cca_ofdm, phy->false_cca_cck);
+}
+
static int
mt7615_radio_read(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
- seq_printf(s, "Sensitivity: ofdm=%d cck=%d\n",
- dev->ofdm_sensitivity, dev->cck_sensitivity);
- seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
- dev->false_cca_ofdm, dev->false_cca_cck);
+ mt7615_radio_read_phy(&dev->phy, s);
+ mt7615_radio_read_phy(mt7615_ext_phy(dev), s);
return 0;
}
@@ -92,6 +189,9 @@ static int mt7615_read_temperature(struct seq_file *s, void *data)
struct mt7615_dev *dev = dev_get_drvdata(s->private);
int temp;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
/* cpu */
temp = mt7615_mcu_get_temperature(dev, 0);
seq_printf(s, "Temperature: %d\n", temp);
@@ -164,12 +264,18 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
if (!dir)
return -ENOMEM;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
- mt7615_queues_read);
+ if (is_mt7615(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7615_queues_read);
+ else
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt76_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7615_queues_acq);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
+ debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
+ debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);
debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
@@ -184,6 +290,8 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
&dev->radar_pattern.power);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_pattern);
+ debugfs_create_file("reset_test", 0200, dir, dev,
+ &fops_reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
mt7615_read_temperature);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 285d4f1d6178..b19f208e3d54 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -12,47 +12,85 @@
#include "mac.h"
static int
-mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
+mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
+ int idx, int n_desc)
{
- struct mt76_sw_queue *q;
struct mt76_queue *hwq;
- int err, i;
+ int err;
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
if (!hwq)
return -ENOMEM;
- err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
- for (i = 0; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
- }
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
return 0;
}
static int
-mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
{
- struct mt76_queue *hwq;
- int err;
+ static const u8 wmm_queue_map[] = {
+ MT7622_TXQ_AC0,
+ MT7622_TXQ_AC1,
+ MT7622_TXQ_AC2,
+ MT7622_TXQ_AC3,
+ };
+ int ret;
+ int i;
- hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
- if (!hwq)
- return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i],
+ wmm_queue_map[i],
+ MT7615_TX_RING_SIZE / 2);
+ if (ret)
+ return ret;
+ }
- err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
- if (err < 0)
- return err;
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
+ if (ret)
+ return ret;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
+ return ret;
+}
+
+static int
+mt7615_init_tx_queues(struct mt7615_dev *dev)
+{
+ struct mt76_sw_queue *q;
+ int ret, i;
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ MT7615_TXQ_FWDL,
+ MT7615_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return mt7622_init_tx_queues_multi(dev);
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0,
+ MT7615_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ for (i = 1; i < MT_TXQ_MCU; i++) {
+ q = &dev->mt76.q_tx[i];
+ INIT_LIST_HEAD(&q->swq);
+ q->q = dev->mt76.q_tx[0].q;
+ }
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7615_TXQ_MCU,
+ MT7615_TX_MCU_RING_SIZE);
return 0;
}
@@ -63,8 +101,12 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *rxd = (__le32 *)skb->data;
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
+ u16 flag;
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
+ type = PKT_TYPE_NORMAL_MCU;
switch (type) {
case PKT_TYPE_TXS:
@@ -78,6 +120,7 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
break;
+ case PKT_TYPE_NORMAL_MCU:
case PKT_TYPE_NORMAL:
if (!mt7615_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
@@ -90,25 +133,32 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
+static void
+mt7615_tx_cleanup(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
+ if (is_mt7615(&dev->mt76)) {
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ mt76_queue_tx_cleanup(dev, i, false);
+ }
+}
+
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
- static const u8 queue_map[] = {
- MT_TXQ_MCU,
- MT_TXQ_BE
- };
struct mt7615_dev *dev;
- int i;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
- for (i = 0; i < ARRAY_SIZE(queue_map); i++)
- mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_tx_cleanup(dev);
if (napi_complete_done(napi, 0))
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
- for (i = 0; i < ARRAY_SIZE(queue_map); i++)
- mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_tx_cleanup(dev);
mt7615_mac_sta_poll(dev);
@@ -117,8 +167,68 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
+static void mt7622_dma_sched_init(struct mt7615_dev *dev)
+{
+ u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE);
+ int i;
+
+ mt76_rmw(dev, reg + MT_DMASHDL_PKT_MAX_SIZE,
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ for (i = 0; i <= 5; i++)
+ mt76_wr(dev, reg + MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x10) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800));
+
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(0), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(1), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(2), 0x5);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(3), 0);
+
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET0, 0x6012345f);
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET1, 0xedcba987);
+}
+
+static void mt7663_dma_sched_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ /* enable refill control group 0, 1, 2, 4, 5 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffc80000);
+ /* enable group 0, 1, 2, 4, 5, 15 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x70068037);
+
+ /* each group min quota must larger then PLE_PKT_MAX_SIZE_NUM */
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800));
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(5)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x40));
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(15)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x20) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x20));
+
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x00050005);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(3)), 0);
+ /* ALTX0 and ALTX1 QID mapping to group 5 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6012345f);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
+}
+
int mt7615_dma_init(struct mt7615_dev *dev)
{
+ int rx_ring_size = MT7615_RX_RING_SIZE;
int ret;
mt76_dma_attach(&dev->mt76);
@@ -126,7 +236,6 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
- MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
@@ -141,28 +250,22 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
- mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
- mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
- mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
- mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
- mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
- mt76_set(dev, 0x7158, BIT(16));
- mt76_clear(dev, 0x7000, BIT(23));
- mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
-
- ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
- if (ret)
- return ret;
+ if (is_mt7615(&dev->mt76)) {
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
+ mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
+ mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
+ mt76_set(dev, 0x7158, BIT(16));
+ mt76_clear(dev, 0x7000, BIT(23));
+ }
- ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7615_TXQ_MCU,
- MT7615_TX_MCU_RING_SIZE);
- if (ret)
- return ret;
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
- ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
- MT7615_TXQ_FWDL,
- MT7615_TX_FWDL_RING_SIZE);
+ ret = mt7615_init_tx_queues(dev);
if (ret)
return ret;
@@ -173,9 +276,11 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret)
return ret;
+ if (!is_mt7615(&dev->mt76))
+ rx_ring_size /= 2;
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
- MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
- MT_RX_RING_BASE);
+ rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
if (ret)
return ret;
@@ -199,7 +304,14 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
- mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+
+ if (is_mt7622(&dev->mt76))
+ mt7622_dma_sched_init(dev);
+
+ if (is_mt7663(&dev->mt76))
+ mt7663_dma_sched_init(dev);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 17e277bf39e0..dfa9a08b896d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -84,17 +84,30 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
switch (val) {
case 0x7615:
+ case 0x7622:
return 0;
default:
return -EINVAL;
}
}
-static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+static void
+mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
{
- u8 *eeprom = dev->mt76.eeprom.data;
- u8 tx_mask, rx_mask, max_nss;
- u32 val;
+ u8 val, *eeprom = dev->mt76.eeprom.data;
+
+ if (is_mt7663(&dev->mt76)) {
+ /* dual band */
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+ return;
+ }
+
+ if (is_mt7622(&dev->mt76)) {
+ /* 2GHz only */
+ dev->mt76.cap.has_2ghz = true;
+ return;
+ }
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
@@ -110,23 +123,34 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
dev->mt76.cap.has_5ghz = true;
break;
}
+}
+
+static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask;
- /* read tx-rx mask from eeprom */
- val = mt76_rr(dev, MT_TOP_STRAP_STA);
- max_nss = val & MT_TOP_3NSS ? 3 : 4;
+ mt7615_eeprom_parse_hw_band_cap(dev);
- rx_mask = FIELD_GET(MT_EE_NIC_CONF_RX_MASK,
- eeprom[MT_EE_NIC_CONF_0]);
- if (!rx_mask || rx_mask > max_nss)
- rx_mask = max_nss;
+ if (is_mt7663(&dev->mt76)) {
+ tx_mask = 2;
+ } else {
+ u8 max_nss;
+ u32 val;
- tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
- eeprom[MT_EE_NIC_CONF_0]);
- if (!tx_mask || tx_mask > max_nss)
- tx_mask = max_nss;
+ /* read tx-rx mask from eeprom */
+ val = mt76_rr(dev, MT_TOP_STRAP_STA);
+ max_nss = val & MT_TOP_3NSS ? 3 : 4;
- dev->mt76.chainmask = tx_mask << 8 | rx_mask;
- dev->mt76.antenna_mask = BIT(tx_mask) - 1;
+ tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
+ eeprom[MT_EE_NIC_CONF_0]);
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
+ }
+
+ dev->chainmask = BIT(tx_mask) - 1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
}
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
@@ -209,6 +233,38 @@ static void mt7615_apply_cal_free_data(struct mt7615_dev *dev)
eeprom[ical_nocheck[i]] = otp[ical_nocheck[i]];
}
+static void mt7622_apply_cal_free_data(struct mt7615_dev *dev)
+{
+ static const u16 ical[] = {
+ 0x53, 0x54, 0x55, 0x56, 0xf4, 0xf7, 0x144, 0x156, 0x15b
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 *otp = dev->mt76.otp.data;
+ int i;
+
+ if (!otp)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ical); i++) {
+ if (!otp[ical[i]])
+ continue;
+
+ eeprom[ical[i]] = otp[ical[i]];
+ }
+}
+
+static void mt7615_cal_free_data(struct mt7615_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ mt7622_apply_cal_free_data(dev);
+ break;
+ case 0x7615:
+ mt7615_apply_cal_free_data(dev);
+ break;
+ }
+}
+
int mt7615_eeprom_init(struct mt7615_dev *dev)
{
int ret;
@@ -222,7 +278,7 @@ int mt7615_eeprom_init(struct mt7615_dev *dev)
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
MT7615_EEPROM_SIZE);
else
- mt7615_apply_cal_free_data(dev);
+ mt7615_cal_free_data(dev);
mt7615_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index c3bc69ac210e..8a2a64b7fcd3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -18,10 +18,13 @@ enum mt7615_eeprom_field {
MT_EE_TX1_5G_G0_TARGET_POWER = 0x098,
MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2,
MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3,
+ MT7663_EE_TX0_2G_TARGET_POWER = 0x123,
MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
- __MT_EE_MAX = 0x3bf
+ MT7615_EE_MAX = 0x3bf,
+ MT7622_EE_MAX = 0x3db,
+ MT7663_EE_MAX = 0x400,
};
#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 553bd4d988f7..03b1e56534d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -4,6 +4,7 @@
* Author: Roy Luo <royluo@google.com>
* Ryder Lee <ryder.lee@mediatek.com>
* Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/etherdevice.h>
@@ -13,55 +14,35 @@
static void mt7615_phy_init(struct mt7615_dev *dev)
{
- /* disable band 0 rf low power beacon mode */
- mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
- MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+ /* disable rf low power beacon mode */
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
}
-static void mt7615_mac_init(struct mt7615_dev *dev)
+static void
+mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
{
u32 val, mask, set;
- int i;
- /* enable band 0/1 clk */
- mt76_set(dev, MT_CFG_CCR,
- MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN |
- MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN);
-
- val = mt76_rmw(dev, MT_TMAC_TRCR0,
- MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL,
- FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) |
- FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0));
- mt76_wr(dev, MT_TMAC_TRCR1, val);
-
- val = MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE |
- FIELD_PREP(MT_AGG_ACR_CFEND_RATE, 0x49) | /* 24M */
- FIELD_PREP(MT_AGG_ACR_BAR_RATE, 0x4b); /* 6M */
- mt76_wr(dev, MT_AGG_ACR0, val);
- mt76_wr(dev, MT_AGG_ACR1, val);
-
- mt76_rmw_field(dev, MT_TMAC_CTCR0,
- MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
- mt76_rmw_field(dev, MT_TMAC_CTCR0,
- MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
- mt76_rmw(dev, MT_TMAC_CTCR0,
- MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
- MT_TMAC_CTCR0_INS_DDLMT_EN,
- MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
- MT_TMAC_CTCR0_INS_DDLMT_EN);
-
- mt7615_mcu_set_rts_thresh(dev, 0x92b);
- mt7615_mac_set_scs(dev, true);
+ if (!chain)
+ val = MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN;
+ else
+ val = MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN;
- mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
- MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+ /* enable band 0/1 clk */
+ mt76_set(dev, MT_CFG_CCR, val);
- mt7615_mcu_init_mac(dev);
+ mt76_rmw(dev, MT_TMAC_TRCR(chain),
+ MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL,
+ FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) |
+ FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0));
- mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
- FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
+ mt76_wr(dev, MT_AGG_ACR(chain),
+ MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE |
+ FIELD_PREP(MT_AGG_ACR_CFEND_RATE, MT7615_CFEND_RATE_DEFAULT) |
+ FIELD_PREP(MT_AGG_ACR_BAR_RATE, MT7615_BAR_RATE_DEFAULT));
- mt76_wr(dev, MT_AGG_ARUCR,
+ mt76_wr(dev, MT_AGG_ARUCR(chain),
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
@@ -71,7 +52,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
- mt76_wr(dev, MT_AGG_ARDCR,
+ mt76_wr(dev, MT_AGG_ARDCR(chain),
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
@@ -81,12 +62,6 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
- mt76_wr(dev, MT_AGG_ARCR,
- (FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
- MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
- FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
- FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
-
mask = MT_DMA_RCFR0_MCU_RX_MGMT |
MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
MT_DMA_RCFR0_MCU_RX_CTL_BAR |
@@ -95,8 +70,36 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_DMA_RCFR0_RX_DROPPED_MCAST;
set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
- mt76_rmw(dev, MT_DMA_BN0RCFR0, mask, set);
- mt76_rmw(dev, MT_DMA_BN1RCFR0, mask, set);
+ mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set);
+}
+
+static void mt7615_mac_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt7615_init_mac_chain(dev, 0);
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
+ mt76_rmw(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b);
+ mt7615_mac_set_scs(dev, true);
+
+ mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
+ MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+
+ mt76_wr(dev, MT_AGG_ARCR,
+ FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+ MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+ FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+ FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4));
for (i = 0; i < MT7615_WTBL_SIZE; i++)
mt7615_mac_wtbl_update(dev, i,
@@ -104,6 +107,39 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN);
+
+ /* disable hdr translation and hw AMSDU */
+ mt76_wr(dev, MT_DMA_DCR0,
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) |
+ MT_DMA_DCR0_RX_VEC_DROP);
+ if (is_mt7663(&dev->mt76)) {
+ mt76_wr(dev, MT_CSR(0x010), 0x8208);
+ mt76_wr(dev, 0x44064, 0x2000000);
+ mt76_wr(dev, MT_WF_AGG(0x160), 0x5c341c02);
+ mt76_wr(dev, MT_WF_AGG(0x164), 0x70708040);
+ } else {
+ mt7615_init_mac_chain(dev, 1);
+ }
+}
+
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
+{
+ flush_work(&dev->mcu_work);
+
+ return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+}
+
+static void mt7615_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev = container_of(work, struct mt7615_dev, mcu_work);
+
+ if (mt7615_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
@@ -112,6 +148,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+ INIT_WORK(&dev->mcu_work, mt7615_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
@@ -123,17 +160,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
if (ret)
return ret;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
-
- ret = mt7615_mcu_init(dev);
- if (ret)
- return ret;
-
- mt7615_mcu_set_eeprom(dev);
- mt7615_mac_init(dev);
- mt7615_phy_init(dev);
- mt7615_mcu_ctrl_pm_state(dev, 0);
- mt7615_mcu_del_wtbl_all(dev);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
@@ -200,12 +227,65 @@ static const struct ieee80211_iface_combination if_comb[] = {
};
static void
+mt7615_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7615_dev *dev;
+ struct mt76_dev *mt76;
+ u32 val, addr;
+
+ mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
+ dev = container_of(mt76, struct mt7615_dev, mt76);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ addr = mt7615_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+}
+
+static int
+mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7615_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7615_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7615_led_set_config(led_cdev, 0xff, 0);
+}
+
+static void
mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
{
- int i, n_chains = hweight8(dev->mt76.antenna_mask), target_chains;
+ int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
+ int delta = mt76_tx_power_nss_delta(n_chains);
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < sband->n_channels; i++) {
@@ -220,21 +300,7 @@ mt7615_init_txpower(struct mt7615_dev *dev,
target_power = max(target_power, eep[index]);
}
- target_power = DIV_ROUND_UP(target_power, 2);
- switch (n_chains) {
- case 4:
- target_power += 6;
- break;
- case 3:
- target_power += 4;
- break;
- case 2:
- target_power += 3;
- break;
- default:
- break;
- }
-
+ target_power = DIV_ROUND_UP(target_power + delta, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
chan->orig_mpwr = target_power;
@@ -246,74 +312,190 @@ mt7615_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct mt7615_dev *dev = hw->priv;
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
-
- if (request->dfs_region == dev->mt76.region)
- return;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7615_phy *phy = mphy->priv;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
dev->mt76.region = request->dfs_region;
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
- mt7615_dfs_stop_radar_detector(dev);
- if (request->dfs_region == NL80211_DFS_UNSET)
- mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- else
- mt7615_dfs_start_radar_detector(dev);
+ mt7615_dfs_init_radar_detector(phy);
}
-int mt7615_register_device(struct mt7615_dev *dev)
+static void
+mt7615_init_wiphy(struct ieee80211_hw *hw)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct wiphy *wiphy = hw->wiphy;
- int ret;
-
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
- INIT_LIST_HEAD(&dev->sta_poll_list);
- spin_lock_init(&dev->sta_poll_lock);
-
- ret = mt7615_init_hardware(dev);
- if (ret)
- return ret;
hw->queues = 4;
hw->max_rates = 3;
hw->max_report_rates = 7;
hw->max_rate_tries = 11;
+ phy->slottime = 9;
+
hw->sta_data_size = sizeof(struct mt7615_sta);
hw->vif_data_size = sizeof(struct mt7615_vif);
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7615_regd_notifier;
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.vht_cap.cap |=
+ if (is_mt7615(&phy->dev->mt76))
+ hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+ else
+ hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+}
+
+static void
+mt7615_cap_dbdc_enable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap &=
+ ~(IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ);
+ if (dev->chainmask == 0xf)
+ dev->mphy.antenna_mask = dev->chainmask >> 2;
+ else
+ dev->mphy.antenna_mask = dev->chainmask >> 1;
+ dev->phy.chainmask = dev->mphy.antenna_mask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
+ mt76_set_stream_caps(&dev->mt76, true);
+}
+
+static void
+mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
- dev->dfs_state = -1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
+ mt76_set_stream_caps(&dev->mt76, true);
+}
+
+int mt7615_register_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy;
+ int ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return -EOPNOTSUPP;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EINVAL;
+
+ if (phy)
+ return 0;
+
+ mt7615_cap_dbdc_enable(dev);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops);
+ if (!mphy)
+ return -ENOMEM;
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+ phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mt7615_init_wiphy(mphy->hw);
+
+ /*
+ * Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ mphy->hw->wiphy->perm_addr[0] |= 2;
+ mphy->hw->wiphy->perm_addr[0] ^= BIT(7);
+
+ /* second phy can only handle 5 GHz */
+ mphy->sband_2g.sband.n_channels = 0;
+ mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+
+ /* The second interface does not get any packets unless it has a vif */
+ ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
+
+ ret = mt76_register_phy(mphy);
+ if (ret)
+ ieee80211_free_hw(mphy->hw);
+
+ return ret;
+}
+
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy = dev->mt76.phy2;
+
+ if (!phy)
+ return;
+
+ mt7615_cap_dbdc_disable(dev);
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+}
+
+void mt7615_init_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
+
+ mt7615_init_wiphy(hw);
+ dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ mt7615_cap_dbdc_disable(dev);
+ dev->phy.dfs_state = -1;
+}
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt7615_init_device(dev);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
+ }
+
+ ret = mt7622_wmac_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_hardware(dev);
+ if (ret)
+ return ret;
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
return ret;
- mt7615_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt7615_init_txpower(dev, &dev->mt76.sband_5g.sband);
-
- hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+ ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
+ mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}
@@ -321,10 +503,15 @@ int mt7615_register_device(struct mt7615_dev *dev)
void mt7615_unregister_device(struct mt7615_dev *dev)
{
struct mt76_txwi_cache *txwi;
+ bool mcu_running;
int id;
+ mcu_running = mt7615_wait_for_mcu_init(dev);
+
+ mt7615_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
- mt7615_mcu_exit(dev);
+ if (mcu_running)
+ mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
spin_lock_bh(&dev->token_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index c77adc5d2552..a27a6d164009 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -10,13 +10,50 @@
#include <linux/etherdevice.h>
#include <linux/timekeeping.h>
#include "mt7615.h"
+#include "../trace.h"
#include "../dma.h"
+#include "mt7615_trace.h"
#include "mac.h"
-static inline s8 to_rssi(u32 field, u32 rxv)
-{
- return (FIELD_GET(field, rxv) - 220) / 2;
-}
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
+ [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
+ [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 },
+ [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 },
+ [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 },
+ [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 },
+ },
+};
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
@@ -49,34 +86,116 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
mt76_rr(dev, MT_TX_AGG_CNT(i));
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
-
- /* TODO: add DBDC support */
+ dev->mt76.phy.survey_time = ktime_get_boottime();
+ if (dev->mt76.phy2)
+ dev->mt76.phy2->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
+ mt76_rr(dev, MT_MIB_SDR9(1));
+
mt76_rr(dev, MT_MIB_SDR36(0));
+ mt76_rr(dev, MT_MIB_SDR36(1));
+
mt76_rr(dev, MT_MIB_SDR37(0));
+ mt76_rr(dev, MT_MIB_SDR37(1));
+
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
+void mt7615_mac_set_timing(struct mt7615_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
+ int sifs, offset;
+
+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ if (ext_phy) {
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ } else {
+ struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
+
+ if (phy_ext)
+ coverage_class = max_t(s16, phy_ext->coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+ }
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
+
+ mt76_wr(dev, MT_TMAC_ICR(ext_phy),
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20)
+ val = MT7615_CFEND_RATE_DEFAULT;
+ else
+ val = MT7615_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
+ if (ext_phy)
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ else
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+
+}
+
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_phy *phy = &dev->phy;
+ struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
__le32 *rxd = (__le32 *)skb->data;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
+ __le32 rxd12 = rxd[12];
bool unicast, remove_pad, insert_ccmp_hdr = false;
+ int phy_idx;
int i, idx;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
+ u8 chfreq;
memset(status, 0, sizeof(*status));
+ chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+ if (!phy2)
+ phy_idx = 0;
+ else if (phy2->chfreq == phy->chfreq)
+ phy_idx = -1;
+ else if (phy->chfreq == chfreq)
+ phy_idx = 0;
+ else if (phy2->chfreq == chfreq)
+ phy_idx = 1;
+ else
+ phy_idx = -1;
+
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
@@ -91,14 +210,6 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
spin_unlock_bh(&dev->sta_poll_lock);
}
- /* TODO: properly support DBDC */
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
- if (status->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
- else
- sband = &dev->mt76.sband_2g.sband;
-
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -112,28 +223,11 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
- if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
- MT_RXD2_NORMAL_NON_AMPDU))) {
- status->flag |= RX_FLAG_AMPDU_DETAILS;
-
- /* all subframes of an A-MPDU have the same timestamp */
- if (dev->rx_ampdu_ts != rxd[12]) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
- }
- dev->rx_ampdu_ts = rxd[12];
-
- status->ampdu_ref = dev->mt76.ampdu_ref;
- }
-
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
return -EINVAL;
- if (!sband->channels)
- return -EINVAL;
-
rxd += 4;
if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
rxd += 4;
@@ -166,6 +260,59 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
}
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg5 = le32_to_cpu(rxd[5]);
+
+ /*
+ * If both PHYs are on the same channel and we don't have a WCID,
+ * we need to figure out which PHY this packet was received on.
+ * On the primary PHY, the noise value for the chains belonging to the
+ * second PHY will be set to the noise value of the last packet from
+ * that PHY.
+ */
+ if (phy_idx < 0) {
+ int first_chain = ffs(phy2->chainmask) - 1;
+
+ phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
+ }
+ }
+
+ if (phy_idx == 1 && phy2) {
+ mphy = dev->mt76.phy2;
+ phy = phy2;
+ status->ext_phy = true;
+ }
+
+ if (chfreq != phy->chfreq)
+ return -EINVAL;
+
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != rxd12) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = rxd12;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
u32 rxdg0 = le32_to_cpu(rxd[0]);
u32 rxdg1 = le32_to_cpu(rxd[1]);
u32 rxdg3 = le32_to_cpu(rxd[3]);
@@ -218,14 +365,14 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- status->chains = dev->mt76.antenna_mask;
+ status->chains = mphy->antenna_mask;
status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
status->signal = status->chain_signal[0];
- for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
+ for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
if (!(status->chains & BIT(i)))
continue;
@@ -274,13 +421,20 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
if (e->skb == DMA_DUMMY_DATA) {
struct mt76_txwi_cache *t;
struct mt7615_dev *dev;
- struct mt7615_txp *txp;
+ struct mt7615_txp_common *txp;
+ u16 token;
dev = container_of(mdev, struct mt7615_dev, mt76);
txp = mt7615_txwi_to_txp(mdev, e->txwi);
+ if (is_mt7615(&dev->mt76))
+ token = le16_to_cpu(txp->fw.token);
+ else
+ token = le16_to_cpu(txp->hw.msdu_id[0]) &
+ ~MT_MSDU_ID_VALID;
+
spin_lock_bh(&dev->token_lock);
- t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+ t = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
e->skb = t ? t->skb : NULL;
}
@@ -291,6 +445,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
static u16
mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ struct mt76_phy *mphy,
const struct ieee80211_tx_rate *rate,
bool stbc, u8 *bw)
{
@@ -319,11 +474,11 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
*bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = mphy->chandef.chan->band;
u16 val;
nss = 1;
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
@@ -348,13 +503,15 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key)
+ struct ieee80211_key_conf *key, bool beacon)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->mphy;
+ bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
int tx_count = 8;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
__le16 fc = hdr->frame_control;
@@ -374,6 +531,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
tx_count = msta->rate_count;
}
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
@@ -381,11 +541,17 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
skb_get_queue_mapping(skb);
p_fmt = MT_TX_TYPE_CT;
- } else if (ieee80211_is_beacon(fc)) {
- q_idx = MT_LMAC_BCN0;
+ } else if (beacon) {
+ if (ext_phy)
+ q_idx = MT_LMAC_BCN1;
+ else
+ q_idx = MT_LMAC_BCN0;
p_fmt = MT_TX_TYPE_FW;
} else {
- q_idx = MT_LMAC_ALTX0;
+ if (ext_phy)
+ q_idx = MT_LMAC_ALTX1;
+ else
+ q_idx = MT_LMAC_ALTX0;
p_fmt = MT_TX_TYPE_CT;
}
@@ -431,7 +597,8 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
u8 bw;
- u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
+ u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
+ &bw);
txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
@@ -486,21 +653,59 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
return 0;
}
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
+static void
+mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
{
- struct mt7615_txp *txp;
int i;
- txp = mt7615_txwi_to_txp(dev, t);
for (i = 1; i < txp->nbuf; i++)
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
-static u32 mt7615_mac_wtbl_addr(int wcid)
+static void
+mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
{
- return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
+ struct mt7615_txp_ptr *ptr = &txp->ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & MT_TXD_LEN_MSDU_LAST;
+ len &= ~MT_TXD_LEN_MSDU_LAST;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & MT_TXD_LEN_MSDU_LAST;
+ len &= ~MT_TXD_LEN_MSDU_LAST;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7615_txp_common *txp;
+
+ txp = mt7615_txwi_to_txp(dev, t);
+ if (is_mt7615(dev))
+ mt7615_txp_skb_unmap_fw(dev, &txp->fw);
+ else
+ mt7615_txp_skb_unmap_hw(dev, &txp->hw);
+}
+
+static u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
+{
+ return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
}
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
@@ -546,7 +751,7 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
- addr = mt7615_mac_wtbl_addr(msta->wcid.idx) + 19 * 4;
+ addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
for (i = 0; i < 4; i++, addr += 8) {
u32 tx_last = msta->airtime_ac[i];
@@ -588,13 +793,15 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
rcu_read_unlock();
}
-void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
{
+ struct mt7615_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
struct ieee80211_tx_rate *ref;
int wcid = sta->wcid.idx;
- u32 addr = mt7615_mac_wtbl_addr(wcid);
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid);
bool stbc = false;
int n_rates = sta->n_rates;
u8 bw, bw_prev, bw_idx = 0;
@@ -649,11 +856,12 @@ void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
}
}
- val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+ val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
bw_prev = bw;
if (probe_rate) {
- probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+ probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
+ stbc, &bw);
if (bw)
bw_idx = 1;
else
@@ -662,19 +870,19 @@ void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
probe_val = val[0];
}
- val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+ val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
if (bw_prev) {
bw_idx = 3;
bw_prev = bw;
}
- val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+ val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
if (bw_prev) {
bw_idx = 5;
bw_prev = bw;
}
- val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+ val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
if (bw_prev)
bw_idx = 7;
@@ -758,7 +966,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
{
- u32 addr = mt7615_mac_wtbl_addr(wcid->idx) + 30 * 4;
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
u8 data[32] = {};
if (key->keylen > sizeof(data))
@@ -796,7 +1004,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher, int keyidx,
enum set_key_cmd cmd)
{
- u32 addr = mt7615_mac_wtbl_addr(wcid->idx), w0, w1;
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
return -ETIMEDOUT;
@@ -832,7 +1040,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
{
- u32 addr = mt7615_mac_wtbl_addr(wcid->idx);
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
if (cmd == SET_KEY) {
if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
@@ -884,39 +1092,51 @@ out:
return err;
}
-int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
+static void
+mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_vif *vif = info->control.vif;
- int i, pid, id, nbuf = tx_info->nbuf - 1;
- u8 *txwi = (u8 *)txwi_ptr;
- struct mt76_txwi_cache *t;
- struct mt7615_txp *txp;
+ struct mt7615_hw_txp *txp = txp_ptr;
+ struct mt7615_txp_ptr *ptr = &txp->ptr[0];
+ int nbuf = tx_info->nbuf - 1;
+ int i;
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
- spin_lock_bh(&dev->mt76.lock);
- mt7615_mac_set_rates(dev, msta, &info->control.rates[0],
- msta->rates);
- msta->rate_probe = true;
- spin_unlock_bh(&dev->mt76.lock);
+ for (i = 0; i < nbuf; i++) {
+ u32 addr = tx_info->buf[i + 1].addr;
+ u16 len = tx_info->buf[i + 1].len;
+
+ if (i == nbuf - 1)
+ len |= MT_TXD_LEN_MSDU_LAST |
+ MT_TXD_LEN_AMSDU_LAST;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
}
+}
- mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
- pid, key);
+static void
+mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7615_fw_txp *txp = txp_ptr;
+ int nbuf = tx_info->nbuf - 1;
+ int i;
- txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
@@ -924,6 +1144,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->nbuf = nbuf;
/* pass partial skb header to fw */
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
@@ -941,6 +1162,42 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->bss_idx = mvif->idx;
}
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int pid, id;
+ u8 *txwi = (u8 *)txwi_ptr;
+ struct mt76_txwi_cache *t;
+ void *txp;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ struct mt7615_phy *phy = &dev->phy;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
+ phy = mdev->phy2->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
+ msta->rates);
+ msta->rate_probe = true;
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
@@ -950,8 +1207,16 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
- txp->token = cpu_to_le16(id);
- txp->rept_wds_wcid = 0xff;
+ mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+ pid, key, false);
+
+ txp = txwi + MT_TXD_SIZE;
+ memset(txp, 0, sizeof(struct mt7615_txp_common));
+ if (is_mt7615(&dev->mt76))
+ mt7615_write_fw_txp(dev, tx_info, txp, id);
+ else
+ mt7615_write_hw_txp(dev, tx_info, txp, id);
+
tx_info->skb = DMA_DUMMY_DATA;
return 0;
@@ -962,6 +1227,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
{
struct ieee80211_supported_band *sband;
struct mt7615_rate_set *rs;
+ struct mt76_phy *mphy;
int first_idx = 0, last_idx;
int i, idx, count;
bool fixed_rate, ack_timeout;
@@ -1019,7 +1285,12 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
spin_lock_bh(&dev->mt76.lock);
if (sta->rate_probe) {
- mt7615_mac_set_rates(dev, sta, NULL, sta->rates);
+ struct mt7615_phy *phy = &dev->phy;
+
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ phy = dev->mt76.phy2->priv;
+
+ mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
sta->rate_probe = false;
}
spin_unlock_bh(&dev->mt76.lock);
@@ -1059,10 +1330,14 @@ out:
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
+ mphy = &dev->mphy;
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
else
- sband = &dev->mt76.sband_2g.sband;
+ sband = &mphy->sband_2g.sband;
final_rate &= MT_TX_RATE_IDX;
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
@@ -1105,6 +1380,8 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
if (pid < MT_PACKET_ID_FIRST)
return false;
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
if (skb) {
@@ -1128,6 +1405,7 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
struct ieee80211_sta *sta = NULL;
struct mt7615_sta *msta = NULL;
struct mt76_wcid *wcid;
+ struct mt76_phy *mphy = &dev->mt76.phy;
__le32 *txs_data = data;
u32 txs;
u8 wcidx;
@@ -1164,111 +1442,160 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (wcidx >= MT7615_WTBL_STA || !sta)
goto out;
+ if (wcid->ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
if (mt7615_fill_txs(dev, msta, &info, txs_data))
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+ ieee80211_tx_status_noskb(mphy->hw, sta, &info);
out:
rcu_read_unlock();
}
-void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void
+mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
+
+ trace_mac_tx_free(dev, token);
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ return;
+
+ mt7615_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ mt76_tx_complete_skb(mdev, txwi->skb);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+}
+
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
- for (i = 0; i < count; i++) {
- spin_lock_bh(&dev->token_lock);
- txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
- spin_unlock_bh(&dev->token_lock);
-
- if (!txwi)
- continue;
+ if (is_mt7615(&dev->mt76)) {
+ __le16 *token = &free->token[0];
- mt7615_txp_skb_unmap(mdev, txwi);
- if (txwi->skb) {
- mt76_tx_complete_skb(mdev, txwi->skb);
- txwi->skb = NULL;
- }
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
+ } else {
+ __le32 *token = (__le32 *)&free->token[0];
- mt76_put_txwi(mdev, txwi);
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
+
dev_kfree_skb(skb);
}
static void
-mt7615_mac_set_default_sensitivity(struct mt7615_dev *dev)
+mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
{
- mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_OFDM_MASK,
- MT_WF_PHY_B0_PD_OFDM(0x13c));
- mt76_rmw(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
- MT_WF_PHY_B1_PD_OFDM_MASK,
- MT_WF_PHY_B1_PD_OFDM(0x13c));
-
- mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
- MT_WF_PHY_B0_PD_CCK_MASK,
- MT_WF_PHY_B0_PD_CCK(0x92));
- mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
- MT_WF_PHY_B1_PD_CCK_MASK,
- MT_WF_PHY_B1_PD_CCK(0x92));
-
- dev->ofdm_sensitivity = -98;
- dev->cck_sensitivity = -110;
- dev->last_cca_adj = jiffies;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, 0x13c));
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, 0x92));
+
+ phy->ofdm_sensitivity = -98;
+ phy->cck_sensitivity = -110;
+ phy->last_cca_adj = jiffies;
}
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable)
{
+ struct mt7615_phy *ext_phy;
+
mutex_lock(&dev->mt76.mutex);
if (dev->scs_en == enable)
goto out;
+ if (is_mt7663(&dev->mt76))
+ goto out;
+
if (enable) {
- /* DBDC not supported */
- mt76_set(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_BLK);
+ mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(0),
+ MT_WF_PHY_PD_BLK(0));
+ mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(1),
+ MT_WF_PHY_PD_BLK(1));
if (is_mt7622(&dev->mt76)) {
mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8);
mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7);
}
} else {
- mt76_clear(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_BLK);
- mt76_clear(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
- MT_WF_PHY_B1_PD_BLK);
+ mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(0),
+ MT_WF_PHY_PD_BLK(0));
+ mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(1),
+ MT_WF_PHY_PD_BLK(1));
}
- mt7615_mac_set_default_sensitivity(dev);
+ mt7615_mac_set_default_sensitivity(&dev->phy);
+ ext_phy = mt7615_ext_phy(dev);
+ if (ext_phy)
+ mt7615_mac_set_default_sensitivity(ext_phy);
+
dev->scs_en = enable;
out:
mutex_unlock(&dev->mt76.mutex);
}
-void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev)
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
{
- mt76_clear(dev, MT_WF_PHY_R0_B0_PHYMUX_5, GENMASK(22, 20));
- mt76_set(dev, MT_WF_PHY_R0_B0_PHYMUX_5, BIT(22) | BIT(20));
+ u32 rxtd;
+
+ if (is_mt7663(&dev->mt76))
+ return;
+
+ if (ext_phy)
+ rxtd = MT_WF_PHY_RXTD2(10);
+ else
+ rxtd = MT_WF_PHY_RXTD(12);
+
+ mt76_set(dev, rxtd, BIT(18) | BIT(29));
+ mt76_set(dev, MT_WF_PHY_R0_PHYMUX_5(ext_phy), 0x5 << 12);
+}
+
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
+
+ mt76_clear(dev, reg, GENMASK(22, 20));
+ mt76_set(dev, reg, BIT(22) | BIT(20));
}
static void
-mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
+mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
u32 rts_err_rate, bool ofdm)
{
- int false_cca = ofdm ? dev->false_cca_ofdm : dev->false_cca_cck;
+ struct mt7615_dev *dev = phy->dev;
+ int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
+ bool ext_phy = phy != &dev->phy;
u16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
- sensitivity = ofdm ? &dev->ofdm_sensitivity : &dev->cck_sensitivity;
- signal = mt76_get_min_avg_rssi(&dev->mt76);
+ sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
+ signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
if (!signal) {
- mt7615_mac_set_default_sensitivity(dev);
+ mt7615_mac_set_default_sensitivity(phy);
return;
}
@@ -1303,99 +1630,155 @@ mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
u16 val;
if (ofdm) {
- /* DBDC not supported */
val = *sensitivity * 2 + 512;
- mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_OFDM_MASK,
- MT_WF_PHY_B0_PD_OFDM(val));
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, val));
} else {
val = *sensitivity + 256;
- mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
- MT_WF_PHY_B0_PD_CCK_MASK,
- MT_WF_PHY_B0_PD_CCK(val));
- mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
- MT_WF_PHY_B1_PD_CCK_MASK,
- MT_WF_PHY_B1_PD_CCK(val));
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, val));
}
- dev->last_cca_adj = jiffies;
+ phy->last_cca_adj = jiffies;
}
}
static void
-mt7615_mac_scs_check(struct mt7615_dev *dev)
+mt7615_mac_scs_check(struct mt7615_phy *phy)
{
- u32 val, rts_cnt = 0, rts_retries_cnt = 0, rts_err_rate = 0;
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ u32 val, rts_err_rate = 0;
u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
- int i;
+ bool ext_phy = phy != &dev->phy;
if (!dev->scs_en)
return;
- for (i = 0; i < 4; i++) {
- u32 data;
-
- val = mt76_rr(dev, MT_MIB_MB_SDR0(i));
- data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- if (data > rts_retries_cnt) {
- rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- rts_retries_cnt = data;
- }
- }
-
- val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS0);
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
- val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS5);
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
- dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
- dev->false_cca_cck = pd_cck - mdrdy_cck;
- mt7615_mac_cca_stats_reset(dev);
+ phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+ phy->false_cca_cck = pd_cck - mdrdy_cck;
+ mt7615_mac_cca_stats_reset(phy);
- if (rts_cnt + rts_retries_cnt)
- rts_err_rate = MT_FRAC(rts_retries_cnt,
- rts_cnt + rts_retries_cnt);
+ if (mib->rts_cnt + mib->rts_retries_cnt)
+ rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
+ mib->rts_cnt + mib->rts_retries_cnt);
/* cck */
- mt7615_mac_adjust_sensitivity(dev, rts_err_rate, false);
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
/* ofdm */
- mt7615_mac_adjust_sensitivity(dev, rts_err_rate, true);
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
- if (time_after(jiffies, dev->last_cca_adj + 10 * HZ))
- mt7615_mac_set_default_sensitivity(dev);
+ if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
+ mt7615_mac_set_default_sensitivity(phy);
}
-void mt7615_update_channel(struct mt76_dev *mdev)
+static u8
+mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
+ u32 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
+ u32 val, sum = 0, n = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
+ val = mt76_rr(dev, reg);
+ sum += val * nf_power[i];
+ n += val;
+ }
+
+ if (!n)
+ return 0;
+
+ return sum / n;
+}
+
+static void
+mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
+{
+ struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
+ struct mt7615_phy *phy = mphy->priv;
struct mt76_channel_state *state;
u64 busy_time, tx_time, rx_time, obss_time;
+ u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
+ int nf;
- /* TODO: add DBDC support */
- busy_time = mt76_get_field(dev, MT_MIB_SDR9(0),
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
MT_MIB_SDR9_BUSY_MASK);
- tx_time = mt76_get_field(dev, MT_MIB_SDR36(0),
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
MT_MIB_SDR36_TXTIME_MASK);
- rx_time = mt76_get_field(dev, MT_MIB_SDR37(0),
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
MT_MIB_SDR37_RXTIME_MASK);
- obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_TIME5,
- MT_MIB_OBSSTIME_MASK);
+ obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
+
+ nf = mt7615_phy_get_nf(dev, idx);
+ if (!phy->noise)
+ phy->noise = nf << 4;
+ else if (nf)
+ phy->noise += nf - (phy->noise >> 4);
- state = mdev->chan_state;
+ state = mphy->chan_state;
state->cc_busy += busy_time;
state->cc_tx += tx_time;
state->cc_rx += rx_time + obss_time;
state->cc_bss_rx += rx_time;
+ state->noise = -(phy->noise >> 4);
+}
+
+void mt7615_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_phy_update_channel(&mdev->phy, 0);
+ if (mdev->phy2)
+ mt7615_phy_update_channel(mdev->phy2, 1);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
+static void
+mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ bool ext_phy = phy != &dev->phy;
+ int i;
+
+ memset(mib, 0, sizeof(*mib));
+
+ mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+
+ for (i = 0; i < 4; i++) {
+ u32 data, val, val2;
+
+ val = mt76_get_field(dev, MT_MIB_MB_SDR1(ext_phy, i),
+ MT_MIB_ACK_FAIL_COUNT_MASK);
+ if (val > mib->ack_fail_cnt)
+ mib->ack_fail_cnt = val;
+
+ val2 = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+ data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val2);
+ if (data > mib->rts_retries_cnt) {
+ mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val2);
+ mib->rts_retries_cnt = data;
+ }
+ }
+}
+
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_dev *dev;
+ struct mt7615_phy *ext_phy;
int i, idx;
dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
@@ -1404,7 +1787,15 @@ void mt7615_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
mt76_update_survey(&dev->mt76);
if (++dev->mac_work_count == 5) {
- mt7615_mac_scs_check(dev);
+ ext_phy = mt7615_ext_phy(dev);
+
+ mt7615_mac_update_mib_stats(&dev->phy);
+ mt7615_mac_scs_check(&dev->phy);
+ if (ext_phy) {
+ mt7615_mac_update_mib_stats(ext_phy);
+ mt7615_mac_scs_check(ext_phy);
+ }
+
dev->mac_work_count = 0;
}
@@ -1421,21 +1812,140 @@ void mt7615_mac_work(struct work_struct *work)
MT7615_WATCHDOG_TIME);
}
-int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev)
+static bool
+mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
- int err;
+ bool ret;
- err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7615_RESET_TIMEOUT);
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80)
- err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD1,
- MT_RX_SEL0, 0);
- return err;
+static void
+mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+
+ mt7615_mcu_add_beacon(dev, hw, vif, vif->bss_conf.enable_beacon);
+}
+
+static void
+mt7615_update_beacons(struct mt7615_dev *dev)
+{
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.hw);
+
+ if (!dev->mt76.phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.phy2->hw);
+}
+
+static void
+mt7615_dma_reset(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_queue_rx_reset(dev, i);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+
+void mt7615_mac_reset_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, reset_work);
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (dev->mt76.phy2)
+ ieee80211_stop_queues(dev->mt76.phy2->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (dev->mt76.phy2)
+ mt76_txq_schedule_all(dev->mt76.phy2);
+
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
+
+ if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7615_dma_reset(dev);
+
+ mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (dev->mt76.phy2)
+ ieee80211_wake_queues(dev->mt76.phy2->hw);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt7615_update_beacons(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+}
+
+static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
@@ -1450,61 +1960,112 @@ static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
MT_RX_SEL0, 1);
}
-int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev)
+static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, MT_HW_RDD0,
- MT_RX_SEL0, 0);
+ err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
if (err < 0)
return err;
- /* TODO: DBDC support */
-
- err = mt7615_dfs_start_rdd(dev, MT_HW_RDD0);
+ err = mt7615_dfs_start_rdd(dev, ext_phy);
if (err < 0)
return err;
+ phy->rdd_state |= BIT(ext_phy);
+
if (chandef->width == NL80211_CHAN_WIDTH_160 ||
chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7615_dfs_start_rdd(dev, MT_HW_RDD1);
+ err = mt7615_dfs_start_rdd(dev, 1);
if (err < 0)
return err;
+
+ phy->rdd_state |= BIT(1);
}
return 0;
}
-int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev)
+static int
+mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
+{
+ const struct mt7615_dfs_radar_spec *radar_specs;
+ struct mt7615_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7615_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7615_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int err;
- if (dev->mt76.region == NL80211_DFS_UNSET)
+ if (dev->mt76.region == NL80211_DFS_UNSET) {
+ phy->dfs_state = -1;
+ if (phy->rdd_state)
+ goto stop;
+
return 0;
+ }
- if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ if (test_bit(MT76_SCANNING, &phy->mt76->state))
return 0;
- if (dev->dfs_state == chandef->chan->dfs_state)
+ if (phy->dfs_state == chandef->chan->dfs_state)
return 0;
- dev->dfs_state = chandef->chan->dfs_state;
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0) {
+ phy->dfs_state = -1;
+ goto stop;
+ }
+
+ phy->dfs_state = chandef->chan->dfs_state;
if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(dev);
- else
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- } else {
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START,
- MT_HW_RDD0, MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ return mt7615_dfs_start_radar_detector(phy);
- return mt7615_dfs_stop_radar_detector(dev);
+ return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
+ MT_RX_SEL0, 0);
}
+
+stop:
+ err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ mt7615_dfs_stop_radar_detector(phy);
+ return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index 38695d4f92e2..e0b89257db90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -8,6 +8,7 @@
#define MT_CT_DMA_BUF_NUM 2
#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_FLAG GENMASK(19, 16)
#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
@@ -26,7 +27,8 @@ enum rx_pkt_type {
PKT_TYPE_RX_TMR,
PKT_TYPE_RETRIEVE,
PKT_TYPE_TXRX_NOTIFY,
- PKT_TYPE_RX_EVENT
+ PKT_TYPE_RX_EVENT,
+ PKT_TYPE_NORMAL_MCU,
};
#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
@@ -103,6 +105,11 @@ enum rx_pkt_type {
#define MT_RXV4_RCPI1 GENMASK(15, 8)
#define MT_RXV4_RCPI0 GENMASK(7, 0)
+#define MT_RXV6_NF3 GENMASK(31, 24)
+#define MT_RXV6_NF2 GENMASK(23, 16)
+#define MT_RXV6_NF1 GENMASK(15, 8)
+#define MT_RXV6_NF0 GENMASK(7, 0)
+
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
@@ -126,6 +133,10 @@ enum tx_pkt_queue_idx {
MT_LMAC_BMC0,
MT_LMAC_BCN0,
MT_LMAC_PSMP0,
+ MT_LMAC_ALTX1,
+ MT_LMAC_BMC1,
+ MT_LMAC_BCN1,
+ MT_LMAC_PSMP1,
};
enum tx_port_idx {
@@ -220,8 +231,15 @@ enum tx_phy_bandwidth {
#define MT_TXD6_FIXED_BW BIT(2)
#define MT_TXD6_BW GENMASK(1, 0)
+/* MT7663 DW7 HW-AMSDU */
+#define MT_TXD7_HW_AMSDU_CAP BIT(30)
#define MT_TXD7_TYPE GENMASK(21, 20)
#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_SPE_IDX_SLE BIT(10)
+
+#define MT_TXD8_L_TYPE GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
#define MT_TX_RATE_STBC BIT(11)
#define MT_TX_RATE_NSS GENMASK(10, 9)
@@ -229,8 +247,27 @@ enum tx_phy_bandwidth {
#define MT_TX_RATE_IDX GENMASK(5, 0)
#define MT_TXP_MAX_BUF_NUM 6
+#define MT_HW_TXP_MAX_MSDU_NUM 4
+#define MT_HW_TXP_MAX_BUF_NUM 4
+
+#define MT_MSDU_ID_VALID BIT(15)
+
+#define MT_TXD_LEN_MSDU_LAST BIT(14)
+#define MT_TXD_LEN_AMSDU_LAST BIT(15)
-struct mt7615_txp {
+struct mt7615_txp_ptr {
+ __le32 buf0;
+ __le16 len0;
+ __le16 len1;
+ __le32 buf1;
+} __packed __aligned(4);
+
+struct mt7615_hw_txp {
+ __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
+ struct mt7615_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
+} __packed __aligned(4);
+
+struct mt7615_fw_txp {
__le16 flags;
__le16 token;
u8 bss_idx;
@@ -239,7 +276,14 @@ struct mt7615_txp {
u8 nbuf;
__le32 buf[MT_TXP_MAX_BUF_NUM];
__le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed;
+} __packed __aligned(4);
+
+struct mt7615_txp_common {
+ union {
+ struct mt7615_fw_txp fw;
+ struct mt7615_hw_txp hw;
+ };
+};
struct mt7615_tx_free {
__le16 rx_byte_cnt;
@@ -247,7 +291,7 @@ struct mt7615_tx_free {
u8 txd_cnt;
u8 rsv[3];
__le16 token[];
-} __packed;
+} __packed __aligned(4);
#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
@@ -302,6 +346,38 @@ struct mt7615_tx_free {
#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
+struct mt7615_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7615_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u8 max_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+};
+
+struct mt7615_dfs_radar_spec {
+ struct mt7615_dfs_pulse pulse_th;
+ struct mt7615_dfs_pattern radar_pattern[16];
+};
+
enum mt7615_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
@@ -317,7 +393,7 @@ enum mt7615_cipher_type {
MT_CIPHER_GCMP_256,
};
-static inline struct mt7615_txp *
+static inline struct mt7615_txp_common *
mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
u8 *txwi;
@@ -327,7 +403,7 @@ mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
txwi = mt76_get_txwi_ptr(dev, t);
- return (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+ return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 070b03403894..6586176c29af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -11,27 +11,85 @@
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7615.h"
+#include "mcu.h"
+
+static bool mt7615_dev_running(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7615_ext_phy(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
static int mt7615_start(struct ieee80211_hw *hw)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool running;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return -EIO;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ running = mt7615_dev_running(dev);
+
+ if (!running) {
+ mt7615_mcu_set_pm(dev, 0, 0);
+ mt7615_mcu_set_mac_enable(dev, 0, true);
+ mt7615_mac_enable_nf(dev, 0);
+ }
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_set_pm(dev, 1, 0);
+ mt7615_mcu_set_mac_enable(dev, 1, true);
+ mt7615_mac_enable_nf(dev, 1);
+ }
+
+ mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (running)
+ goto out;
mt7615_mac_reset_counters(dev);
- dev->mt76.survey_time = ktime_get_boottime();
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7615_WATCHDOG_TIME);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
return 0;
}
static void mt7615_stop(struct ieee80211_hw *hw)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mutex_lock(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_set_pm(dev, 1, 1);
+ mt7615_mcu_set_mac_enable(dev, 1, false);
+ }
+
+ if (!mt7615_dev_running(dev)) {
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+ mt7615_mcu_set_pm(dev, 0, 1);
+ mt7615_mcu_set_mac_enable(dev, 0, false);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
}
static int get_omac_idx(enum nl80211_iftype type, u32 mask)
@@ -39,6 +97,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
int i;
switch (type) {
+ case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
@@ -70,8 +129,10 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt76_txq *mtxq;
+ bool ext_phy = phy != &dev->phy;
int idx, ret = 0;
mutex_lock(&dev->mt76.mutex);
@@ -89,28 +150,38 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
}
mvif->omac_idx = idx;
- /* TODO: DBDC support. Use band 0 for now */
- mvif->band_idx = 0;
- mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
+ mvif->band_idx = ext_phy;
+ if (mt7615_ext_phy(dev))
+ mvif->wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) +
+ mvif->idx % (MT7615_MAX_WMM_SETS / 2);
+ else
+ mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
- ret = mt7615_mcu_set_dev_info(dev, vif, 1);
+ ret = mt7615_mcu_add_dev_info(dev, vif, true);
if (ret)
goto out;
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
+ phy->omac_mask |= BIT(mvif->omac_idx);
+
+ mt7615_mcu_set_dbdc(dev);
+
idx = MT7615_WTBL_RESERVED - mvif->idx;
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.ext_phy = mvif->band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
- mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ mt76_txq_init(&dev->mt76, vif->txq);
+ }
out:
mutex_unlock(&dev->mt76.mutex);
@@ -123,19 +194,22 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = &mvif->sta;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
int idx = msta->wcid.idx;
/* TODO: disable beacon for the bss */
- mt7615_mcu_set_dev_info(dev, vif, 0);
+ mt7615_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mt76_txq_remove(&dev->mt76, vif->txq);
+ if (vif->txq)
+ mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
+ phy->omac_mask &= ~BIT(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
spin_lock_bh(&dev->sta_poll_lock);
@@ -144,34 +218,38 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&dev->sta_poll_lock);
}
-static int mt7615_set_channel(struct mt7615_dev *dev)
+static int mt7615_set_channel(struct mt7615_phy *phy)
{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int ret;
cancel_delayed_work_sync(&dev->mt76.mac_work);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &phy->mt76->state);
- mt7615_dfs_check_channel(dev);
+ phy->dfs_state = -1;
+ mt76_set_channel(phy->mt76);
- mt76_set_channel(&dev->mt76);
-
- ret = mt7615_mcu_set_channel(dev);
+ ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
if (ret)
goto out;
- ret = mt7615_dfs_init_radar_detector(dev);
- mt7615_mac_cca_stats_reset(dev);
- dev->mt76.survey_time = ktime_get_boottime();
+ mt7615_mac_set_timing(phy);
+ ret = mt7615_dfs_init_radar_detector(phy);
+ mt7615_mac_cca_stats_reset(phy);
+ mt7615_mcu_set_sku_en(phy, true);
mt7615_mac_reset_counters(dev);
+ phy->noise = 0;
+ phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
out:
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &phy->mt76->state);
mutex_unlock(&dev->mt76.mutex);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(phy->mt76);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7615_WATCHDOG_TIME);
return ret;
@@ -181,7 +259,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
@@ -230,27 +308,27 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
int ret = 0;
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER)) {
ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(dev);
+ ret = mt7615_set_channel(phy);
ieee80211_wake_queues(hw);
}
mutex_lock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_POWER)
- ret = mt7615_mcu_set_tx_power(dev);
-
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
- dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
else
- dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
- mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
}
mutex_unlock(&dev->mt76.mutex);
@@ -263,7 +341,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
@@ -275,7 +353,10 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
+
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -285,21 +366,21 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
- dev->mt76.rxfilter &= ~(_hw); \
- dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
- dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
@@ -313,12 +394,12 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1, ctl_flags);
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
else
- mt76_set(dev, MT_WF_RFCR1, ctl_flags);
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
}
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
@@ -326,24 +407,32 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *info,
u32 changed)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ASSOC)
- mt7615_mcu_set_bss_info(dev, vif, info->assoc);
+ mt7615_mcu_add_bss_info(dev, vif, info->assoc);
- /* TODO: update beacon content
- * BSS_CHANGED_BEACON
- */
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7615_mac_set_timing(phy);
+ }
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- mt7615_mcu_set_bss_info(dev, vif, info->enable_beacon);
- mt7615_mcu_wtbl_bmc(dev, vif, info->enable_beacon);
- mt7615_mcu_set_sta_rec_bmc(dev, vif, info->enable_beacon);
- mt7615_mcu_set_bcn(dev, vif, info->enable_beacon);
+ mt7615_mcu_add_bss_info(dev, vif, info->enable_beacon);
+ mt7615_mcu_sta_add(dev, vif, NULL, info->enable_beacon);
}
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon);
+
mutex_unlock(&dev->mt76.mutex);
}
@@ -352,15 +441,15 @@ mt7615_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7615_mcu_set_bcn(dev, vif, true);
+ mt7615_mcu_add_beacon(dev, hw, vif, true);
mutex_unlock(&dev->mt76.mutex);
}
-int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
@@ -375,33 +464,23 @@ int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
+ msta->wcid.ext_phy = mvif->band_idx;
+
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- mt7615_mcu_add_wtbl(dev, vif, sta);
- mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
+ mt7615_mcu_sta_add(dev, vif, sta, true);
return 0;
}
-void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-
- if (sta->ht_cap.ht_supported)
- mt7615_mcu_set_ht_cap(dev, vif, sta);
-}
-
-void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
- mt7615_mcu_del_wtbl(dev, sta);
-
+ mt7615_mcu_sta_add(dev, vif, sta, false);
mt7615_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -415,7 +494,8 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
@@ -430,7 +510,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
break;
}
msta->n_rates = i;
- mt7615_mac_set_rates(dev, msta, NULL, msta->rates);
+ mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
msta->rate_probe = false;
spin_unlock_bh(&dev->mt76.lock);
}
@@ -439,7 +519,8 @@ static void mt7615_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
@@ -458,15 +539,16 @@ static void mt7615_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(mphy, control->sta, wcid, skb);
}
static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
mutex_lock(&dev->mt76.mutex);
- mt7615_mcu_set_rts_thresh(dev, val);
+ mt7615_mcu_set_rts_thresh(phy, val);
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -477,7 +559,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
struct ieee80211_txq *txq = sta->txq[params->tid];
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
@@ -496,21 +578,21 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
params->buf_size);
- mt7615_mcu_set_rx_ba(dev, params, 1);
+ mt7615_mcu_add_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt7615_mcu_set_rx_ba(dev, params, 0);
+ mt7615_mcu_add_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7615_mcu_set_tx_ba(dev, params, 1);
+ mt7615_mcu_add_tx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- mt7615_mcu_set_tx_ba(dev, params, 0);
+ mt7615_mcu_add_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
@@ -518,7 +600,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7615_mcu_set_tx_ba(dev, params, 0);
+ mt7615_mcu_add_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -527,6 +609,98 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return ret;
}
+static int
+mt7615_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7615_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7615_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ return 0;
+}
+
+static u64
+mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return tsf.t64;
+}
+
+static void
+mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7615_mac_set_timing(phy);
+}
+
+static int
+mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->mt76->antenna_mask = tx_ant;
+ if (ext_phy) {
+ if (dev->chainmask == 0xf)
+ tx_ant <<= 2;
+ else
+ tx_ant <<= 1;
+ }
+ phy->chainmask = tx_ant;
+
+ mt76_set_stream_caps(&dev->mt76, true);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops mt7615_ops = {
.tx = mt7615_tx,
.start = mt7615_start,
@@ -537,7 +711,9 @@ const struct ieee80211_ops mt7615_ops = {
.conf_tx = mt7615_conf_tx,
.configure_filter = mt7615_configure_filter,
.bss_info_changed = mt7615_bss_info_changed,
- .sta_state = mt76_sta_state,
+ .sta_add = mt7615_sta_add,
+ .sta_remove = mt7615_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7615_set_key,
.ampdu_action = mt7615_ampdu_action,
.set_rts_threshold = mt7615_set_rts_threshold,
@@ -548,6 +724,38 @@ const struct ieee80211_ops mt7615_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7615_channel_switch_beacon,
+ .get_stats = mt7615_get_stats,
+ .get_tsf = mt7615_get_tsf,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
+ .set_antenna = mt7615_set_antenna,
+ .set_coverage_class = mt7615_set_coverage_class,
};
+
+static int __init mt7615_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7615_pci_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
+ ret = platform_driver_register(&mt7622_wmac_driver);
+ if (ret)
+ pci_unregister_driver(&mt7615_pci_driver);
+ }
+
+ return ret;
+}
+
+static void __exit mt7615_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7622_WMAC))
+ platform_driver_unregister(&mt7622_wmac_driver);
+ pci_unregister_driver(&mt7615_pci_driver);
+}
+
+module_init(mt7615_init);
+module_exit(mt7615_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index f229c9ce9f65..610cfa918c7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -29,7 +29,37 @@ struct mt7615_fw_trailer {
__le32 len;
} __packed;
-#define MCU_PATCH_ADDRESS 0x80000
+#define FW_V3_COMMON_TAILER_SIZE 36
+#define FW_V3_REGION_TAILER_SIZE 40
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_DLYCAL BIT(1)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+struct mt7663_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 reserv[2];
+ char fw_ver[10];
+ char build_date[15];
+ __le32 crc;
+} __packed;
+
+struct mt7663_fw_buf {
+ __le32 crc;
+ __le32 d_img_size;
+ __le32 block_size;
+ u8 rsv[4];
+ __le32 img_dest_addr;
+ __le32 img_size;
+ u8 feature_set;
+};
+
+#define MT7615_PATCH_ADDRESS 0x80000
+#define MT7622_PATCH_ADDRESS 0x9c000
+#define MT7663_PATCH_ADDRESS 0xdc000
#define N9_REGION_NUM 2
#define CR4_REGION_NUM 1
@@ -43,29 +73,32 @@ struct mt7615_fw_trailer {
#define DL_MODE_KEY_IDX GENMASK(2, 1)
#define DL_MODE_RESET_SEC_IV BIT(3)
#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_VALID_RAM_ENTRY BIT(5)
#define DL_MODE_NEED_RSP BIT(31)
#define FW_START_OVERRIDE BIT(0)
#define FW_START_WORKING_PDA_CR4 BIT(2)
-static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
- int cmd, int *wait_seq)
+void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
{
+ int txd_len, mcu_cmd = cmd & MCU_CMD_MASK;
+ struct mt7615_uni_txd *uni_txd;
struct mt7615_mcu_txd *mcu_txd;
u8 seq, q_idx, pkt_fmt;
- enum mt76_txq_id qid;
- u32 val;
__le32 *txd;
+ u32 val;
- seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
- seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (wait_seq)
+ *wait_seq = seq;
- mcu_txd = (struct mt7615_mcu_txd *)skb_push(skb,
- sizeof(struct mt7615_mcu_txd));
- memset(mcu_txd, 0, sizeof(struct mt7615_mcu_txd));
+ txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd = (__le32 *)skb_push(skb, txd_len);
- if (cmd != -MCU_CMD_FW_SCATTER) {
+ if (cmd != MCU_CMD_FW_SCATTER) {
q_idx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
} else {
@@ -73,8 +106,6 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
pkt_fmt = MT_TX_TYPE_FW;
}
- txd = mcu_txd->txd;
-
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) |
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
@@ -85,26 +116,43 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
txd[1] = cpu_to_le32(val);
+ if (cmd & MCU_UNI_PREFIX) {
+ uni_txd = (struct mt7615_uni_txd *)txd;
+ uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ uni_txd->cid = cpu_to_le16(mcu_cmd);
+ uni_txd->s2d_index = MCU_S2D_H2N;
+ uni_txd->pkt_type = MCU_PKT_ID;
+ uni_txd->seq = seq;
+
+ return;
+ }
+
+ mcu_txd = (struct mt7615_mcu_txd *)txd;
mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx));
+ mcu_txd->s2d_index = MCU_S2D_H2N;
mcu_txd->pkt_type = MCU_PKT_ID;
mcu_txd->seq = seq;
- if (cmd < 0) {
+ if (cmd & MCU_FW_PREFIX) {
mcu_txd->set_query = MCU_Q_NA;
- mcu_txd->cid = -cmd;
+ mcu_txd->cid = mcu_cmd;
} else {
mcu_txd->cid = MCU_CMD_EXT_CID;
mcu_txd->set_query = MCU_Q_SET;
mcu_txd->ext_cid = cmd;
mcu_txd->ext_cid_ack = 1;
}
- mcu_txd->s2d_index = MCU_S2D_H2N;
+}
- if (wait_seq)
- *wait_seq = seq;
+static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ enum mt76_txq_id qid;
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state))
+ mt7615_mcu_fill_msg(dev, skb, cmd, wait_seq);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
qid = MT_TXQ_MCU;
else
qid = MT_TXQ_FWDL;
@@ -123,7 +171,7 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
return -EAGAIN;
switch (cmd) {
- case -MCU_CMD_PATCH_SEM_CONTROL:
+ case MCU_CMD_PATCH_SEM_CONTROL:
skb_pull(skb, sizeof(*rxd) - 4);
ret = *skb->data;
break;
@@ -139,32 +187,18 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
return ret;
}
-static int
-mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
- int len, bool wait_resp)
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- unsigned long expires = jiffies + 10 * HZ;
+ unsigned long expires = jiffies + 20 * HZ;
struct sk_buff *skb;
- int ret, seq;
-
- skb = mt7615_mcu_msg_alloc(data, len);
- if (!skb)
- return -ENOMEM;
-
- mutex_lock(&mdev->mmio.mcu.mutex);
-
- ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
- if (ret)
- goto out;
+ int ret = 0;
- while (wait_resp) {
- skb = mt76_mcu_get_response(mdev, expires);
+ while (true) {
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
if (!skb) {
- dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
- cmd, seq);
- ret = -ETIMEDOUT;
- break;
+ dev_err(dev->mt76.dev, "Message %ld (seq %d) timeout\n",
+ cmd & MCU_CMD_MASK, seq);
+ return -ETIMEDOUT;
}
ret = mt7615_mcu_parse_response(dev, cmd, skb, seq);
@@ -172,12 +206,44 @@ mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
break;
}
+ return ret;
+}
+
+static int
+mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ int ret, seq;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt7615_mcu_wait_response(dev, cmd, seq);
+
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
+static int
+mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp);
+}
+
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
@@ -186,20 +252,59 @@ mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
+mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_mcu_rdd_report *r;
+
+ r = (struct mt7615_mcu_rdd_report *)skb->data;
+
+ if (r->idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1];
+ const char *type;
+
+ switch (rxd->s2d_index) {
+ case 0:
+ type = "N9";
+ break;
+ case 2:
+ type = "CR4";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+}
+
+static void
mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
switch (rxd->ext_eid) {
case MCU_EXT_EVENT_RDD_REPORT:
- ieee80211_radar_detected(dev->mt76.hw);
- dev->hw_pattern++;
+ mt7615_mcu_rx_radar_detected(dev, skb);
break;
case MCU_EXT_EVENT_CSA_NOTIFY:
ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_mcu_csa_finish, dev);
break;
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7615_mcu_rx_log_message(dev, skb);
+ break;
default:
break;
}
@@ -247,10 +352,1095 @@ static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
.mode = cpu_to_le32(mode),
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_add_dev(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 band_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ .band_idx = mvif->band_idx,
+ },
+ };
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
+}
+
+static int
+mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
+ struct req {
+ u8 omac_idx;
+ u8 enable;
+ u8 wlan_idx;
+ u8 band_idx;
+ u8 pkt_type;
+ u8 need_pre_tbtt_int;
+ __le16 csa_ie_pos;
+ __le16 pkt_len;
+ __le16 tim_ie_pos;
+ u8 pkt[512];
+ u8 csa_cnt;
+ /* bss color change */
+ u8 bcc_cnt;
+ __le16 bcc_ie_pos;
+ } __packed req = {
+ .omac_idx = mvif->omac_idx,
+ .enable = enable,
+ .wlan_idx = wcid->idx,
+ .band_idx = mvif->band_idx,
+ };
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (mvif->band_idx) {
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+ 0, NULL, true);
+ memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+ if (offs.csa_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ req.csa_ie_pos = cpu_to_le16(csa_offs);
+ req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
+ }
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
+{
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 rsv[3];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
+ &req, sizeof(req), true);
+}
+
+static struct sk_buff *
+mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta)
+{
+ struct sta_req_hdr hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx = msta ? msta->wcid.idx : 0,
+ .muar_idx = msta ? mvif->omac_idx : 0,
+ .is_tlv_append = 1,
+ };
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_msg_alloc(NULL, MT7615_STA_UPDATE_MAX_SIZE);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ return skb;
+}
+
+static struct wtbl_req_hdr *
+mt7615_mcu_alloc_wtbl_req(struct mt7615_sta *msta, int cmd,
+ void *sta_wtbl, struct sk_buff **skb)
+{
+ struct tlv *sta_hdr = sta_wtbl;
+ struct wtbl_req_hdr hdr = {
+ .wlan_idx = msta->wcid.idx,
+ .operation = cmd,
+ };
+ struct sk_buff *nskb = *skb;
+
+ if (!nskb) {
+ nskb = mt7615_mcu_msg_alloc(NULL, MT7615_WTBL_UPDATE_BA_SIZE);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ *skb = nskb;
+ }
+
+ if (sta_hdr)
+ sta_hdr->len = cpu_to_le16(sizeof(hdr));
+
+ return skb_put_data(nskb, &hdr, sizeof(hdr));
+}
+
+static struct tlv *
+mt7615_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ void *sta_ntlv, void *sta_wtbl)
+{
+ struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
+ struct tlv *sta_hdr = sta_wtbl;
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len),
+ };
+ u16 ntlv;
+
+ ptlv = skb_put(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+ ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
+
+ if (sta_hdr) {
+ u16 size = le16_to_cpu(sta_hdr->len);
+
+ sta_hdr->len = cpu_to_le16(size + len);
+ }
+
+ return ptlv;
+}
+
+static struct tlv *
+mt7615_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
+{
+ return mt7615_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
+}
+
+static int
+mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info_basic *bss;
+ u8 wlan_idx = mvif->sta.wcid.idx;
+ u32 type = NETWORK_INFRA;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (enable) {
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7615_sta *)sta->drv_priv;
+ wlan_idx = msta->wcid.idx;
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss = (struct bss_info_basic *)tlv;
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->network_type = cpu_to_le32(type);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bmc_tx_wlan_idx = wlan_idx;
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+
+ return 0;
+}
+
+static void
+mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+ u8 idx;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ omac = (struct bss_info_omac *)tlv;
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = idx;
+}
+
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+static void
+mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
+{
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+
+static void
+mt7615_mcu_sta_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct sta_rec_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
+
+ ba = (struct sta_rec_ba *)tlv;
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT,
+ ba->winsize = cpu_to_le16(params->buf_size);
+ ba->ssn = cpu_to_le16(params->ssn);
+ ba->ba_en = enable << params->tid;
+ ba->amsdu = params->amsdu;
+ ba->tid = params->tid;
+}
+
+static void
+mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct sta_rec_basic *basic;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
+
+ basic = (struct sta_rec_basic *)tlv;
+ basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
+
+ if (enable) {
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = CONN_STATE_PORT_SECURE;
+ } else {
+ basic->conn_state = CONN_STATE_DISCONNECT;
+ }
+
+ if (!sta) {
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(basic->peer_addr);
+ return;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ case NL80211_IFTYPE_STATION:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
+ basic->aid = cpu_to_le16(sta->aid);
+ basic->qos = sta->wme;
+}
+
+static void
+mt7615_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct tlv *tlv;
+
+ if (sta->ht_cap.ht_supported) {
+ struct sta_rec_ht *ht;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ }
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *vht;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ }
+}
+
+static void
+mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct wtbl_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
+ wtbl_tlv, sta_wtbl);
+
+ ba = (struct wtbl_ba *)tlv;
+ ba->tid = params->tid;
+
+ if (tx) {
+ ba->ba_type = MT_BA_TYPE_ORIGINATOR;
+ ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
+ ba->ba_winsize = cpu_to_le16(params->buf_size);
+ ba->ba_en = enable;
+ } else {
+ memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
+ ba->ba_type = MT_BA_TYPE_RECIPIENT;
+ ba->rst_ba_tid = params->tid;
+ ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ ba->rst_ba_sb = 1;
+ }
+
+ if (enable && tx) {
+ u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+ int i;
+
+ for (i = 7; i > 0; i--) {
+ if (params->buf_size >= ba_range[i])
+ break;
+ }
+ ba->ba_winsize_idx = i;
+ }
+}
+
+static void
+mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_generic *generic;
+ struct wtbl_rx *rx;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
+ wtbl_tlv, sta_wtbl);
+
+ generic = (struct wtbl_generic *)tlv;
+
+ if (sta) {
+ memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
+ generic->partial_aid = cpu_to_le16(sta->aid);
+ generic->muar_idx = mvif->omac_idx;
+ generic->qos = sta->wme;
+ } else {
+ eth_broadcast_addr(generic->peer_addr);
+ generic->muar_idx = 0xe;
+ }
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
+ wtbl_tlv, sta_wtbl);
+
+ rx = (struct wtbl_rx *)tlv;
+ rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
+ rx->rca2 = 1;
+ rx->rv = 1;
+}
+
+static void
+mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct tlv *tlv;
+ u32 flags = 0;
+
+ if (sta->ht_cap.ht_supported) {
+ struct wtbl_ht *ht;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
+ wtbl_tlv, sta_wtbl);
+ ht = (struct wtbl_ht *)tlv;
+ ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ ht->ht = 1;
+
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ flags |= MT_WTBL_W5_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ flags |= MT_WTBL_W5_SHORT_GI_40;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *vht;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
+ wtbl_tlv, sta_wtbl);
+ vht = (struct wtbl_vht *)tlv;
+ vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC,
+ vht->vht = 1;
+
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ flags |= MT_WTBL_W5_SHORT_GI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ flags |= MT_WTBL_W5_SHORT_GI_160;
+ }
+
+ /* wtbl smps */
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
+ struct wtbl_smps *smps;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
+ wtbl_tlv, sta_wtbl);
+ smps = (struct wtbl_smps *)tlv;
+ smps->smps = 1;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ /* sgi */
+ u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+ MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+ struct wtbl_raw *raw;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RAW_DATA,
+ sizeof(*raw), wtbl_tlv,
+ sta_wtbl);
+ raw = (struct wtbl_raw *)tlv;
+ raw->val = cpu_to_le32(flags);
+ raw->msk = cpu_to_le32(~msk);
+ raw->wtbl_idx = 1;
+ raw->dw = 5;
+ }
+}
+
+static int
+mt7615_mcu_add_bss(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, NULL);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ if (enable)
+ mt7615_mcu_bss_omac_tlv(skb, vif);
+
+ mt7615_mcu_bss_basic_tlv(skb, vif, enable);
+
+ if (enable && mvif->omac_idx > EXT_BSSID_START)
+ mt7615_mcu_bss_ext_tlv(skb, mvif);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+ int err;
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, NULL, wtbl_hdr);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_WTBL_UPDATE, true);
+ if (err < 0)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (err < 0 || !enable)
+ return err;
+
+ skb = NULL;
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, NULL, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_WTBL_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct sk_buff *skb, *sskb, *wskb = NULL;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7615_sta *msta;
+ int cmd, err;
+
+ msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+
+ sskb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(sskb))
+ return PTR_ERR(sskb);
+
+ mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable);
+ if (enable && sta)
+ mt7615_mcu_sta_ht_tlv(sskb, sta);
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET, NULL,
+ &wskb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ if (enable) {
+ mt7615_mcu_wtbl_generic_tlv(wskb, vif, sta, NULL, wtbl_hdr);
+ if (sta)
+ mt7615_mcu_wtbl_ht_tlv(wskb, sta, NULL, wtbl_hdr);
+ }
+
+ cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
+ skb = enable ? wskb : sskb;
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+ if (err < 0)
+ return err;
+
+ cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE;
+ skb = enable ? sskb : wskb;
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+}
+
+static const struct mt7615_mcu_ops wtbl_update_ops = {
+ .add_beacon_offload = mt7615_mcu_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_add_dev,
+ .add_bss_info = mt7615_mcu_add_bss,
+ .add_tx_ba = mt7615_mcu_wtbl_tx_ba,
+ .add_rx_ba = mt7615_mcu_wtbl_rx_ba,
+ .sta_add = mt7615_mcu_wtbl_sta_add,
+};
+
+static int
+mt7615_mcu_sta_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, tx);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_sta_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7615_mcu_sta_ba(dev, params, enable, true);
+}
+
+static int
+mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7615_mcu_sta_ba(dev, params, enable, false);
+}
+
+static int
+mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable, int cmd)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7615_sta *msta;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ if (enable && sta)
+ mt7615_mcu_sta_ht_tlv(skb, sta);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET,
+ sta_wtbl, &skb);
+ if (enable) {
+ mt7615_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+ if (sta)
+ mt7615_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+}
+
+static int
+mt7615_mcu_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
+ MCU_EXT_CMD_STA_REC_UPDATE);
+}
+
+static const struct mt7615_mcu_ops sta_update_ops = {
+ .add_beacon_offload = mt7615_mcu_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_add_dev,
+ .add_bss_info = mt7615_mcu_add_bss,
+ .add_tx_ba = mt7615_mcu_sta_tx_ba,
+ .add_rx_ba = mt7615_mcu_sta_rx_ba,
+ .sta_add = mt7615_mcu_add_sta,
+};
+
+static int
+mt7615_mcu_uni_add_dev(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 pad;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ },
+ };
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
+}
+
+static int
+mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
+{
+ return 0;
+}
+
+static int
+mt7615_mcu_uni_add_bss(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct basic_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 omac_idx;
+ u8 hw_bss_idx;
+ u8 band_idx;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 wmm_idx;
+ u8 bssid[ETH_ALEN];
+ __le16 bmc_tx_wlan_idx;
+ __le16 bcn_interval;
+ u8 dtim_period;
+ u8 phymode;
+ __le16 sta_idx;
+ u8 nonht_basic_phy;
+ u8 pad[3];
+ } __packed basic;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct basic_tlv)),
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = enable,
+ },
+ };
+ u8 idx, tx_wlan_idx = 0;
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ req.basic.hw_bss_idx = idx;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ tx_wlan_idx = mvif->sta.wcid.idx;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (enable) {
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7615_sta *)sta->drv_priv;
+ tx_wlan_idx = msta->wcid.idx;
+ rcu_read_unlock();
+ }
+ req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ req.basic.bmc_tx_wlan_idx = cpu_to_le16(tx_wlan_idx);
+ req.basic.sta_idx = cpu_to_le16(tx_wlan_idx);
+ req.basic.conn_state = !enable;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct {
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bcn_content_tlv {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ie_pos;
+ __le16 csa_ie_pos;
+ __le16 bcc_ie_pos;
+ /* 0: enable beacon offload
+ * 1: disable beacon offload
+ * 2: update probe respond offload
+ */
+ u8 enable;
+ /* 0: legacy format (TXD + payload)
+ * 1: only cap field IE
+ */
+ u8 type;
+ __le16 pkt_len;
+ u8 pkt[512];
+ } __packed beacon_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .beacon_tlv = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
+ .len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
+ .enable = enable,
+ },
+ };
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "beacon size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
+ wcid, NULL, 0, NULL, true);
+ memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+
+ if (offs.csa_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
+ }
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
&req, sizeof(req), true);
}
+static int
+mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, sta_wtbl,
+ wtbl_hdr);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+ if (err < 0)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+ if (err < 0 || !enable)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, sta_wtbl,
+ wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_uni_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
+ MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
+static const struct mt7615_mcu_ops uni_update_ops = {
+ .add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_uni_add_dev,
+ .add_bss_info = mt7615_mcu_uni_add_bss,
+ .add_tx_ba = mt7615_mcu_uni_tx_ba,
+ .add_rx_ba = mt7615_mcu_uni_rx_ba,
+ .sta_add = mt7615_mcu_uni_add_sta,
+};
+
static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
int len)
{
@@ -260,14 +1450,16 @@ static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
len);
- ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_SCATTER,
data, cur_len, false);
if (ret)
break;
data += cur_len;
len -= cur_len;
- mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+
+ if (mt76_is_mmio(&dev->mt76))
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
}
return ret;
@@ -284,13 +1476,13 @@ static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
.addr = cpu_to_le32(addr),
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_START_REQ,
&req, sizeof(req), true);
}
static int mt7615_mcu_restart(struct mt76_dev *dev)
{
- return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ, NULL,
+ return __mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL,
0, true);
}
@@ -302,7 +1494,7 @@ static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
.op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_SEM_CONTROL,
&req, sizeof(req), true);
}
@@ -315,23 +1507,59 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
.check_crc = 0,
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_FINISH_REQ,
&req, sizeof(req), true);
}
+static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+{
+ if (!is_mt7622(&dev->mt76))
+ return;
+
+ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
+ MT_INFRACFG_MISC_AP2CONN_WAKE,
+ !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
+}
+
static int mt7615_driver_own(struct mt7615_dev *dev)
{
- mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
- MT_CFG_LPCR_HOST_FW_OWN, 0, 500)) {
+ u32 addr;
+
+ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+
+ mt7622_trigger_hif_int(dev, true);
+ if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
+ mt7622_trigger_hif_int(dev, false);
return 0;
}
-static int mt7615_load_patch(struct mt7615_dev *dev)
+static int mt7615_firmware_own(struct mt7615_dev *dev)
+{
+ u32 addr;
+
+ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ mt7622_trigger_hif_int(dev, true);
+
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
+
+ if (is_mt7622(&dev->mt76) &&
+ !mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
+ MT_CFG_LPCR_HOST_FW_OWN,
+ MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
+ dev_err(dev->mt76.dev, "Timeout for firmware own\n");
+ return -EIO;
+ }
+ mt7622_trigger_hif_int(dev, false);
+
+ return 0;
+}
+
+static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
{
const struct mt7615_patch_hdr *hdr;
const struct firmware *fw = NULL;
@@ -348,7 +1576,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
goto out;
@@ -365,8 +1593,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
len = fw->size - sizeof(*hdr);
- ret = mt7615_mcu_init_download(dev, MCU_PATCH_ADDRESS, len,
- DL_MODE_NEED_RSP);
+ ret = mt7615_mcu_init_download(dev, addr, len, DL_MODE_NEED_RSP);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
@@ -444,13 +1671,13 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
return 0;
}
-static int mt7615_load_ram(struct mt7615_dev *dev)
+static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
{
const struct mt7615_fw_trailer *hdr;
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
return ret;
@@ -477,9 +1704,30 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
goto out;
}
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+ if (!strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
+ dev->fw_ver = MT7615_FIRMWARE_V2;
+ dev->mcu_ops = &sta_update_ops;
+ } else {
+ dev->fw_ver = MT7615_FIRMWARE_V1;
+ dev->mcu_ops = &wtbl_update_ops;
+ }
+
+out:
release_firmware(fw);
+ return ret;
+}
- ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
+static int mt7615_load_cr4(struct mt7615_dev *dev, const char *name)
+{
+ const struct mt7615_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
return ret;
@@ -500,8 +1748,10 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
goto out;
ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
- if (ret)
+ if (ret) {
dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
+ goto out;
+ }
out:
release_firmware(fw);
@@ -509,6 +1759,17 @@ out:
return ret;
}
+static int mt7615_load_ram(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_load_n9(dev, MT7615_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ return mt7615_load_cr4(dev, MT7615_FIRMWARE_CR4);
+}
+
static int mt7615_load_firmware(struct mt7615_dev *dev)
{
int ret;
@@ -521,7 +1782,7 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- ret = mt7615_load_patch(dev);
+ ret = mt7615_load_patch(dev, MT7615_PATCH_ADDRESS, MT7615_ROM_PATCH);
if (ret)
return ret;
@@ -536,7 +1797,164 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ return 0;
+}
+
+static int mt7622_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+ u32 val;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ val = mt76_get_field(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE);
+ if (val != FW_STATE_FW_DOWNLOAD) {
+ dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev, MT7622_PATCH_ADDRESS, MT7622_ROM_PATCH);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_n9(dev, MT7622_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE,
+ FIELD_PREP(MT_TOP_OFF_RSV_FW_STATE,
+ FW_STATE_NORMAL_TRX), 1500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ return 0;
+}
+
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST,
+ &data, sizeof(data), true);
+}
+
+static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
+{
+ u32 offset = 0, override_addr = 0, flag = 0;
+ const struct mt7663_fw_trailer *hdr;
+ const struct mt7663_fw_buf *buf;
+ const struct firmware *fw;
+ const u8 *base_addr;
+ int i, ret;
+
+ ret = request_firmware(&fw, name, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < FW_V3_COMMON_TAILER_SIZE) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7663_fw_trailer *)(fw->data + fw->size -
+ FW_V3_COMMON_TAILER_SIZE);
+
+ dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+ dev_info(dev->mt76.dev, "Region number: 0x%x\n", hdr->n_region);
+
+ base_addr = fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE;
+ for (i = 0; i < hdr->n_region; i++) {
+ u32 shift = (hdr->n_region - i) * FW_V3_REGION_TAILER_SIZE;
+ u32 len, addr, mode;
+
+ dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
+
+ buf = (const struct mt7663_fw_buf *)(base_addr - shift);
+ mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
+ addr = le32_to_cpu(buf->img_dest_addr);
+ len = le32_to_cpu(buf->img_size);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware\n");
+ goto out;
+ }
+
+ offset += le32_to_cpu(buf->img_size);
+ if (buf->feature_set & DL_MODE_VALID_RAM_ENTRY) {
+ override_addr = le32_to_cpu(buf->img_dest_addr);
+ dev_info(dev->mt76.dev, "Region %d, override_addr = 0x%08x\n",
+ i, override_addr);
+ }
+ }
+
+ if (is_mt7663(&dev->mt76)) {
+ flag |= FW_START_DLYCAL;
+ if (override_addr)
+ flag |= FW_START_OVERRIDE;
+
+ dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n",
+ override_addr, flag);
+ }
+
+ ret = mt7615_mcu_start_firmware(dev, override_addr, flag);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7663_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+ if (ret) {
+ dev_dbg(dev->mt76.dev, "Firmware is already download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, MT7663_ROM_PATCH);
+ if (ret)
+ return ret;
+
+ dev->fw_ver = MT7615_FIRMWARE_V3;
+ dev->mcu_ops = &uni_update_ops;
+
+ ret = mt7663_load_n9(dev, MT7663_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
+ MT_TOP_MISC2_FW_N9_RDY, 1500)) {
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC,
+ MT7663_TOP_MISC2_FW_STATE);
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
dev_dbg(dev->mt76.dev, "Firmware init done\n");
@@ -546,6 +1964,7 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
int mt7615_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7615_mcu_ops = {
+ .mcu_skb_send_msg = mt7615_mcu_send_message,
.mcu_send_msg = mt7615_mcu_msg_send,
.mcu_restart = mt7615_mcu_restart,
};
@@ -557,11 +1976,24 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
if (ret)
return ret;
- ret = mt7615_load_firmware(dev);
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ ret = mt7622_load_firmware(dev);
+ break;
+ case 0x7663:
+ ret = mt7663_load_firmware(dev);
+ break;
+ default:
+ ret = mt7615_load_firmware(dev);
+ break;
+ }
if (ret)
return ret;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7615_mcu_fw_log_2_host(dev, 0);
return 0;
}
@@ -569,55 +2001,70 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
- skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+ mt7615_firmware_own(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
}
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
{
struct {
u8 buffer_mode;
- u8 pad;
- u16 len;
+ u8 content_format;
+ __le16 len;
} __packed req_hdr = {
.buffer_mode = 1,
- .len = __MT_EE_MAX - MT_EE_NIC_CONF_0,
};
- int ret, len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
- u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ struct sk_buff *skb;
+ int eep_len, offset;
- req = kzalloc(len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ eep_len = MT7622_EE_MAX - MT_EE_NIC_CONF_0;
+ offset = MT_EE_NIC_CONF_0;
+ break;
+ case 0x7663:
+ eep_len = MT7663_EE_MAX - MT_EE_CHIP_ID;
+ req_hdr.content_format = 1;
+ offset = MT_EE_CHIP_ID;
+ break;
+ default:
+ eep_len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+ offset = MT_EE_NIC_CONF_0;
+ break;
+ }
- memcpy(req, &req_hdr, sizeof(req_hdr));
- memcpy(req + sizeof(req_hdr), eep + MT_EE_NIC_CONF_0,
- __MT_EE_MAX - MT_EE_NIC_CONF_0);
+ req_hdr.len = cpu_to_le16(eep_len);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
- req, len, true);
- kfree(req);
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + eep_len);
+ if (!skb)
+ return -ENOMEM;
- return ret;
+ skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+ skb_put_data(skb, eep + offset, eep_len);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE, true);
}
-int mt7615_mcu_init_mac(struct mt7615_dev *dev)
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable)
{
struct {
u8 enable;
u8 band;
u8 rsv[2];
} __packed req = {
- .enable = 1,
- .band = 0,
+ .enable = enable,
+ .band = band,
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val)
{
+ struct mt7615_dev *dev = phy->dev;
struct {
u8 prot_idx;
u8 band;
@@ -626,7 +2073,7 @@ int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
__le32 pkt_thresh;
} __packed req = {
.prot_idx = 1,
- .band = 0,
+ .band = phy != &dev->phy,
.len_thresh = cpu_to_le32(val),
.pkt_thresh = cpu_to_le32(0x2),
};
@@ -672,329 +2119,62 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
&req, sizeof(req), true);
}
-int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev)
{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 rsv[3];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = 0,
+ struct mt7615_phy *ext_phy = mt7615_ext_phy(dev);
+ struct dbdc_entry {
+ u8 type;
+ u8 index;
+ u8 band;
+ u8 _rsv;
};
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
- struct req_hdr {
- u8 omac_idx;
- u8 band_idx;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 rsv[3];
- } __packed hdr;
- struct req_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 band_idx;
- u8 omac_addr[ETH_ALEN];
- } __packed tlv;
- } data = {
- .hdr = {
- .omac_idx = mvif->omac_idx,
- .band_idx = mvif->band_idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- },
- .tlv = {
- .tag = cpu_to_le16(DEV_INFO_ACTIVE),
- .len = cpu_to_le16(sizeof(struct req_tlv)),
- .active = enable,
- .band_idx = mvif->band_idx,
- },
+ u8 enable;
+ u8 num;
+ u8 _rsv[2];
+ struct dbdc_entry entry[64];
+ } req = {
+ .enable = !!ext_phy,
};
+ int i;
- memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
- &data, sizeof(data), true);
-}
-
-static void
-mt7615_mcu_bss_info_omac_header(struct mt7615_vif *mvif, u8 *data,
- u32 conn_type)
-{
- struct bss_info_omac *hdr = (struct bss_info_omac *)data;
- u8 idx;
-
- idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
- hdr->tag = cpu_to_le16(BSS_INFO_OMAC);
- hdr->len = cpu_to_le16(sizeof(struct bss_info_omac));
- hdr->hw_bss_idx = idx;
- hdr->omac_idx = mvif->omac_idx;
- hdr->band_idx = mvif->band_idx;
- hdr->conn_type = cpu_to_le32(conn_type);
-}
-
-static void
-mt7615_mcu_bss_info_basic_header(struct ieee80211_vif *vif, u8 *data,
- u32 net_type, u8 tx_wlan_idx,
- bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct bss_info_basic *hdr = (struct bss_info_basic *)data;
-
- hdr->tag = cpu_to_le16(BSS_INFO_BASIC);
- hdr->len = cpu_to_le16(sizeof(struct bss_info_basic));
- hdr->network_type = cpu_to_le32(net_type);
- hdr->active = enable;
- hdr->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- memcpy(hdr->bssid, vif->bss_conf.bssid, ETH_ALEN);
- hdr->wmm_idx = mvif->wmm_idx;
- hdr->dtim_period = vif->bss_conf.dtim_period;
- hdr->bmc_tx_wlan_idx = tx_wlan_idx;
-}
-
-static void
-mt7615_mcu_bss_info_ext_header(struct mt7615_vif *mvif, u8 *data)
-{
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
- struct bss_info_ext_bss *hdr = (struct bss_info_ext_bss *)data;
- int ext_bss_idx, tsf_offset;
-
- ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- hdr->tag = cpu_to_le16(BSS_INFO_EXT_BSS);
- hdr->len = cpu_to_le16(sizeof(struct bss_info_ext_bss));
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- hdr->mbss_tsf_offset = cpu_to_le32(tsf_offset);
-}
-
-int mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, int en)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct req_hdr {
- u8 bss_idx;
- u8 rsv0;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 rsv1[3];
- } __packed;
- int len = sizeof(struct req_hdr) + sizeof(struct bss_info_basic);
- int ret, i, features = BIT(BSS_INFO_BASIC), ntlv = 1;
- u32 conn_type = 0, net_type = NETWORK_INFRA;
- u8 *buf, *data, tx_wlan_idx = 0;
- struct req_hdr *hdr;
-
- if (en) {
- len += sizeof(struct bss_info_omac);
- features |= BIT(BSS_INFO_OMAC);
- if (mvif->omac_idx > EXT_BSSID_START) {
- len += sizeof(struct bss_info_ext_bss);
- features |= BIT(BSS_INFO_EXT_BSS);
- ntlv++;
- }
- ntlv++;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- tx_wlan_idx = mvif->sta.wcid.idx;
- conn_type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION: {
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (en) {
- struct ieee80211_sta *sta;
- struct mt7615_sta *msta;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- msta = (struct mt7615_sta *)sta->drv_priv;
- tx_wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
- }
- conn_type = CONNECTION_INFRA_STA;
- break;
- }
- case NL80211_IFTYPE_ADHOC:
- conn_type = CONNECTION_IBSS_ADHOC;
- tx_wlan_idx = mvif->sta.wcid.idx;
- net_type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!ext_phy)
+ goto out;
- hdr = (struct req_hdr *)buf;
- hdr->bss_idx = mvif->idx;
- hdr->tlv_num = cpu_to_le16(ntlv);
- hdr->is_tlv_append = 1;
+#define ADD_DBDC_ENTRY(_type, _idx, _band) \
+ do { \
+ req.entry[req.num].type = _type; \
+ req.entry[req.num].index = _idx; \
+ req.entry[req.num++].band = _band; \
+ } while (0)
- data = buf + sizeof(*hdr);
- for (i = 0; i < BSS_INFO_MAX_NUM; i++) {
- int tag = ffs(features & BIT(i)) - 1;
+ for (i = 0; i < 4; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(i));
- switch (tag) {
- case BSS_INFO_OMAC:
- mt7615_mcu_bss_info_omac_header(mvif, data,
- conn_type);
- data += sizeof(struct bss_info_omac);
- break;
- case BSS_INFO_BASIC:
- mt7615_mcu_bss_info_basic_header(vif, data, net_type,
- tx_wlan_idx, en);
- data += sizeof(struct bss_info_basic);
- break;
- case BSS_INFO_EXT_BSS:
- mt7615_mcu_bss_info_ext_header(mvif, data);
- data += sizeof(struct bss_info_ext_bss);
- break;
- default:
- break;
- }
+ ADD_DBDC_ENTRY(DBDC_TYPE_BSS, i, band);
}
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BSS_INFO_UPDATE,
- buf, len, true);
- kfree(buf);
-
- return ret;
-}
-
-static int
-mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev,
- struct mt7615_vif *mvif)
-{
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_generic g_wtbl;
- struct wtbl_rx rx_wtbl;
- } req = {
- .hdr = {
- .wlan_idx = mvif->sta.wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- .tlv_num = cpu_to_le16(2),
- },
- .g_wtbl = {
- .tag = cpu_to_le16(WTBL_GENERIC),
- .len = cpu_to_le16(sizeof(struct wtbl_generic)),
- .muar_idx = 0xe,
- },
- .rx_wtbl = {
- .tag = cpu_to_le16(WTBL_RX),
- .len = cpu_to_le16(sizeof(struct wtbl_rx)),
- .rca1 = 1,
- .rca2 = 1,
- .rv = 1,
- },
- };
- eth_broadcast_addr(req.g_wtbl.peer_addr);
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ for (i = 0; i < 14; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(0x11 + i));
- if (!enable) {
- struct wtbl_req_hdr req = {
- .wlan_idx = mvif->sta.wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- };
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
+ ADD_DBDC_ENTRY(DBDC_TYPE_MBSS, i, band);
}
- return mt7615_mcu_add_wtbl_bmc(dev, mvif);
-}
+ ADD_DBDC_ENTRY(DBDC_TYPE_MU, 0, 1);
-int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_generic g_wtbl;
- struct wtbl_rx rx_wtbl;
- } req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- .tlv_num = cpu_to_le16(2),
- },
- .g_wtbl = {
- .tag = cpu_to_le16(WTBL_GENERIC),
- .len = cpu_to_le16(sizeof(struct wtbl_generic)),
- .muar_idx = mvif->omac_idx,
- .qos = sta->wme,
- .partial_aid = cpu_to_le16(sta->aid),
- },
- .rx_wtbl = {
- .tag = cpu_to_le16(WTBL_RX),
- .len = cpu_to_le16(sizeof(struct wtbl_rx)),
- .rca1 = vif->type != NL80211_IFTYPE_AP,
- .rca2 = 1,
- .rv = 1,
- },
- };
- memcpy(req.g_wtbl.peer_addr, sta->addr, ETH_ALEN);
+ for (i = 0; i < 3; i++)
+ ADD_DBDC_ENTRY(DBDC_TYPE_BF, i, 1);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
-}
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 1, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 2, 1);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 3, 1);
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev,
- struct ieee80211_sta *sta)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr req = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- };
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+out:
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL,
&req, sizeof(req), true);
}
@@ -1008,238 +2188,72 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
-int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool en)
+int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
+ enum mt7615_rdd_cmd cmd, u8 index,
+ u8 rx_sel, u8 val)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
- struct sta_req_hdr hdr;
- struct sta_rec_basic basic;
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
} req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = mvif->sta.wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .basic = {
- .tag = cpu_to_le16(STA_REC_BASIC),
- .len = cpu_to_le16(sizeof(struct sta_rec_basic)),
- .conn_type = cpu_to_le32(CONNECTION_INFRA_BC),
- },
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
};
- eth_broadcast_addr(req.basic.peer_addr);
-
- if (en) {
- req.basic.conn_state = CONN_STATE_PORT_SECURE;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
- EXTRA_INFO_NEW);
- } else {
- req.basic.conn_state = CONN_STATE_DISCONNECT;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
- }
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool en)
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
-
struct {
- struct sta_req_hdr hdr;
- struct sta_rec_basic basic;
+ u16 tag;
+ u16 min_lpn;
} req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = msta->wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .basic = {
- .tag = cpu_to_le16(STA_REC_BASIC),
- .len = cpu_to_le16(sizeof(struct sta_rec_basic)),
- .qos = sta->wme,
- .aid = cpu_to_le16(sta->aid),
- },
+ .tag = 0x1,
+ .min_lpn = val,
};
- memcpy(req.basic.peer_addr, sta->addr, ETH_ALEN);
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
- break;
- case NL80211_IFTYPE_STATION:
- req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- break;
- case NL80211_IFTYPE_ADHOC:
- req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- if (en) {
- req.basic.conn_state = CONN_STATE_PORT_SECURE;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
- EXTRA_INFO_NEW);
- } else {
- req.basic.conn_state = CONN_STATE_DISCONNECT;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
- }
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en)
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt76_wcid *wcid = &dev->mt76.global_wcid;
- struct ieee80211_mutable_offsets offs;
- struct req {
- u8 omac_idx;
- u8 enable;
- u8 wlan_idx;
- u8 band_idx;
- u8 pkt_type;
- u8 need_pre_tbtt_int;
- __le16 csa_ie_pos;
- __le16 pkt_len;
- __le16 tim_ie_pos;
- u8 pkt[512];
- u8 csa_cnt;
- /* bss color change */
- u8 bcc_cnt;
- __le16 bcc_ie_pos;
- } __packed req = {
- .omac_idx = mvif->omac_idx,
- .enable = en,
- .wlan_idx = wcid->idx,
- .band_idx = mvif->band_idx,
- };
- struct sk_buff *skb;
-
- skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
- if (!skb)
- return -EINVAL;
-
- if (skb->len > 512 - MT_TXD_SIZE) {
- dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-
- mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
- 0, NULL);
- memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
- req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
- req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
- u16 csa_offs;
-
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
- req.csa_ie_pos = cpu_to_le16(csa_offs);
- req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
- }
- dev_kfree_skb(skb);
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_set_tx_power(struct mt7615_dev *dev)
-{
- int i, ret, n_chains = hweight8(dev->mt76.antenna_mask);
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
- int freq = chandef->center_freq1, len, target_chains;
- u8 *req, *data, *eep = (u8 *)dev->mt76.eeprom.data;
- enum nl80211_band band = chandef->chan->band;
- struct ieee80211_hw *hw = mt76_hw(dev);
struct {
- u8 center_chan;
- u8 dbdc_idx;
- u8 band;
- u8 rsv;
- } __packed req_hdr = {
- .center_chan = ieee80211_frequency_to_channel(freq),
- .band = band,
+ u16 tag;
+ struct mt7615_dfs_pulse pulse;
+ } req = {
+ .tag = 0x3,
};
- s8 tx_power;
-
- len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
- req = kzalloc(len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
- memcpy(req, &req_hdr, sizeof(req_hdr));
- data = req + sizeof(req_hdr);
- memcpy(data, eep + MT_EE_NIC_CONF_0,
- __MT_EE_MAX - MT_EE_NIC_CONF_0);
+ memcpy(&req.pulse, pulse, sizeof(*pulse));
- tx_power = hw->conf.power_level * 2;
- switch (n_chains) {
- case 4:
- tx_power -= 12;
- break;
- case 3:
- tx_power -= 8;
- break;
- case 2:
- tx_power -= 6;
- break;
- default:
- break;
- }
- tx_power = max_t(s8, tx_power, 0);
- dev->mt76.txpower_cur = tx_power;
-
- target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
- for (i = 0; i < target_chains; i++) {
- int index = -MT_EE_NIC_CONF_0;
-
- ret = mt7615_eeprom_get_power_index(dev, chandef->chan, i);
- if (ret < 0)
- goto out;
-
- index += ret;
- data[index] = min_t(u8, data[index], tx_power);
- }
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
- req, len, true);
-out:
- kfree(req);
-
- return ret;
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
}
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern)
{
struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
+ u16 tag;
+ u16 radar_type;
+ struct mt7615_dfs_pattern pattern;
} req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
+ .tag = 0x2,
+ .radar_type = index,
};
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
+ memcpy(&req.pattern, pattern, sizeof(*pattern));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
&req, sizeof(req), true);
}
@@ -1274,9 +2288,35 @@ int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
&req, sizeof(req), false);
}
-int mt7615_mcu_set_channel(struct mt7615_dev *dev)
+static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ int n_chains = hweight8(mphy->antenna_mask);
+ int tx_power;
+ int i;
+
+ tx_power = hw->conf.power_level * 2 -
+ mt76_tx_power_nss_delta(n_chains);
+ mphy->txpower_cur = tx_power;
+
+ for (i = 0; i < MT_SKU_1SS_DELTA; i++)
+ sku[i] = tx_power;
+
+ for (i = 0; i < 4; i++) {
+ int delta = 0;
+
+ if (i < n_chains - 1)
+ delta = mt76_tx_power_nss_delta(n_chains) -
+ mt76_tx_power_nss_delta(i + 1);
+ sku[MT_SKU_1SS_DELTA + i] = delta;
+ }
+}
+
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
struct {
u8 control_chan;
@@ -1299,11 +2339,10 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
} req = {
.control_chan = chandef->chan->hw_value,
.center_chan = ieee80211_frequency_to_channel(freq1),
- .tx_streams = (dev->mt76.chainmask >> 8) & 0xf,
- .rx_streams_mask = dev->mt76.antenna_mask,
+ .tx_streams = hweight8(phy->mt76->antenna_mask),
+ .rx_streams_mask = phy->chainmask,
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
- int ret;
if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
@@ -1313,7 +2352,9 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
else
req.switch_reason = CH_SWITCH_NORMAL;
- switch (dev->mt76.chandef.width) {
+ req.band_idx = phy != &dev->phy;
+
+ switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
req.bw = CMD_CBW_40MHZ;
break;
@@ -1338,278 +2379,39 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
req.bw = CMD_CBW_20MHZ;
break;
}
- memset(req.txpower_sku, 0x3f, 49);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
- &req, sizeof(req), true);
- if (ret)
- return ret;
+ mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RX_PATH,
- &req, sizeof(req), true);
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
-int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sta_req_hdr *sta_hdr;
- struct wtbl_raw *wtbl_raw;
- struct sta_rec_ht *sta_ht;
- struct wtbl_ht *wtbl_ht;
- int buf_len, ret, ntlv = 2;
- u32 msk, val = 0;
- u8 *buf;
-
- buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- wtbl_hdr = (struct wtbl_req_hdr *)buf;
- wtbl_hdr->wlan_idx = msta->wcid.idx;
- wtbl_hdr->operation = WTBL_SET;
- buf_len = sizeof(*wtbl_hdr);
-
- /* ht basic */
- wtbl_ht = (struct wtbl_ht *)(buf + buf_len);
- wtbl_ht->tag = cpu_to_le16(WTBL_HT);
- wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
- wtbl_ht->ht = 1;
- wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
- wtbl_ht->af = sta->ht_cap.ampdu_factor;
- wtbl_ht->mm = sta->ht_cap.ampdu_density;
- buf_len += sizeof(*wtbl_ht);
-
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- val |= MT_WTBL_W5_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- val |= MT_WTBL_W5_SHORT_GI_40;
-
- /* vht basic */
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *wtbl_vht;
-
- wtbl_vht = (struct wtbl_vht *)(buf + buf_len);
- buf_len += sizeof(*wtbl_vht);
- wtbl_vht->tag = cpu_to_le16(WTBL_VHT);
- wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
- wtbl_vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC;
- wtbl_vht->vht = 1;
- ntlv++;
-
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
- val |= MT_WTBL_W5_SHORT_GI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
- val |= MT_WTBL_W5_SHORT_GI_160;
- }
-
- /* smps */
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
- struct wtbl_smps *wtbl_smps;
-
- wtbl_smps = (struct wtbl_smps *)(buf + buf_len);
- buf_len += sizeof(*wtbl_smps);
- wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
- wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
- wtbl_smps->smps = 1;
- ntlv++;
- }
-
- /* sgi */
- msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
- MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
-
- wtbl_raw = (struct wtbl_raw *)(buf + buf_len);
- buf_len += sizeof(*wtbl_raw);
- wtbl_raw->tag = cpu_to_le16(WTBL_RAW_DATA);
- wtbl_raw->len = cpu_to_le16(sizeof(*wtbl_raw));
- wtbl_raw->wtbl_idx = 1;
- wtbl_raw->dw = 5;
- wtbl_raw->msk = cpu_to_le32(~msk);
- wtbl_raw->val = cpu_to_le32(val);
-
- wtbl_hdr->tlv_num = cpu_to_le16(ntlv);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- buf, buf_len, true);
- if (ret)
- goto out;
-
- memset(buf, 0, MT7615_WTBL_UPDATE_MAX_SIZE);
-
- sta_hdr = (struct sta_req_hdr *)buf;
- sta_hdr->bss_idx = mvif->idx;
- sta_hdr->wlan_idx = msta->wcid.idx;
- sta_hdr->is_tlv_append = 1;
- ntlv = sta->vht_cap.vht_supported ? 2 : 1;
- sta_hdr->tlv_num = cpu_to_le16(ntlv);
- sta_hdr->muar_idx = mvif->omac_idx;
- buf_len = sizeof(*sta_hdr);
-
- sta_ht = (struct sta_rec_ht *)(buf + buf_len);
- sta_ht->tag = cpu_to_le16(STA_REC_HT);
- sta_ht->len = cpu_to_le16(sizeof(*sta_ht));
- sta_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
- buf_len += sizeof(*sta_ht);
-
- if (sta->vht_cap.vht_supported) {
- struct sta_rec_vht *sta_vht;
-
- sta_vht = (struct sta_rec_vht *)(buf + buf_len);
- buf_len += sizeof(*sta_vht);
- sta_vht->tag = cpu_to_le16(STA_REC_VHT);
- sta_vht->len = cpu_to_le16(sizeof(*sta_vht));
- sta_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- sta_vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- sta_vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
- }
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- buf, buf_len, true);
-out:
- kfree(buf);
-
- return ret;
-}
-
-int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
- struct mt7615_vif *mvif = msta->vif;
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_ba ba;
- } wtbl_req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_SET,
- .tlv_num = cpu_to_le16(1),
- },
- .ba = {
- .tag = cpu_to_le16(WTBL_BA),
- .len = cpu_to_le16(sizeof(struct wtbl_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_ORIGINATOR,
- .sn = add ? cpu_to_le16(params->ssn) : 0,
- .ba_en = add,
- },
- };
- struct {
- struct sta_req_hdr hdr;
- struct sta_rec_ba ba;
- } sta_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = msta->wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .ba = {
- .tag = cpu_to_le16(STA_REC_BA),
- .len = cpu_to_le16(sizeof(struct sta_rec_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_ORIGINATOR,
- .amsdu = params->amsdu,
- .ba_en = add << params->tid,
- .ssn = cpu_to_le16(params->ssn),
- .winsize = cpu_to_le16(params->buf_size),
- },
- };
- int ret;
-
- if (add) {
- u8 idx, ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
-
- for (idx = 7; idx > 0; idx--) {
- if (params->buf_size >= ba_range[idx])
- break;
- }
-
- wtbl_req.ba.ba_winsize_idx = idx;
- }
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &wtbl_req, sizeof(wtbl_req), true);
- if (ret)
- return ret;
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &sta_req, sizeof(sta_req), true);
-}
-
-int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add)
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
{
- struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
- struct mt7615_vif *mvif = msta->vif;
struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_ba ba;
- } wtbl_req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_SET,
- .tlv_num = cpu_to_le16(1),
- },
- .ba = {
- .tag = cpu_to_le16(WTBL_BA),
- .len = cpu_to_le16(sizeof(struct wtbl_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_RECIPIENT,
- .rst_ba_tid = params->tid,
- .rst_ba_sel = RST_BA_MAC_TID_MATCH,
- .rst_ba_sb = 1,
- },
- };
- struct {
- struct sta_req_hdr hdr;
- struct sta_rec_ba ba;
- } sta_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = msta->wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .ba = {
- .tag = cpu_to_le16(STA_REC_BA),
- .len = cpu_to_le16(sizeof(struct sta_rec_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_RECIPIENT,
- .amsdu = params->amsdu,
- .ba_en = add << params->tid,
- .ssn = cpu_to_le16(params->ssn),
- .winsize = cpu_to_le16(params->buf_size),
- },
+ u8 action;
+ u8 rsv[3];
+ } req = {
+ .action = index,
};
- int ret;
-
- memcpy(wtbl_req.ba.peer_addr, params->sta->addr, ETH_ALEN);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &sta_req, sizeof(sta_req), true);
- if (ret || !add)
- return ret;
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &wtbl_req, sizeof(wtbl_req), true);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
+ sizeof(req), true);
}
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
{
+ struct mt7615_dev *dev = phy->dev;
struct {
- u8 action;
- u8 rsv[3];
+ u8 format_id;
+ u8 sku_enable;
+ u8 band_idx;
+ u8 rsv;
} req = {
- .action = index,
+ .format_id = 0,
+ .band_idx = phy != &dev->phy,
+ .sku_enable = enable,
};
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
sizeof(req), true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 1fd7dffa6eef..d1f7391472fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -23,6 +23,57 @@ struct mt7615_mcu_txd {
u32 reserved[5];
} __packed __aligned(4);
+/**
+ * struct mt7615_uni_txd - mcu command descriptor for firmware v3
+ * @txd: hardware descriptor
+ * @len: total length not including txd
+ * @cid: command identifier
+ * @pkt_type: must be 0xa0 (cmd packet by long format)
+ * @frag_n: fragment number
+ * @seq: sequence number
+ * @checksum: 0 mean there is no checksum
+ * @s2d_index: index for command source and destination
+ * Definition | value | note
+ * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
+ * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
+ * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
+ * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
+ *
+ * @option: command option
+ * BIT[0]: UNI_CMD_OPT_BIT_ACK
+ * set to 1 to request a fw reply
+ * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
+ * is set, mcu firmware will send response event EID = 0x01
+ * (UNI_EVENT_ID_CMD_RESULT) to the host.
+ * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
+ * 0: original command
+ * 1: unified command
+ * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
+ * 0: QUERY command
+ * 1: SET command
+ */
+struct mt7615_uni_txd {
+ __le32 txd[8];
+
+ /* DW1 */
+ __le16 len;
+ __le16 cid;
+
+ /* DW2 */
+ u8 reserved;
+ u8 pkt_type;
+ u8 frag_n;
+ u8 seq;
+
+ /* DW3 */
+ __le16 checksum;
+ u8 s2d_index;
+ u8 option;
+
+ /* DW4 */
+ u8 reserved2[4];
+} __packed __aligned(4);
+
/* event table */
enum {
MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
@@ -45,6 +96,62 @@ enum {
MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
};
+enum {
+ MT_SKU_CCK_1_2 = 0,
+ MT_SKU_CCK_55_11,
+ MT_SKU_OFDM_6_9,
+ MT_SKU_OFDM_12_18,
+ MT_SKU_OFDM_24_36,
+ MT_SKU_OFDM_48,
+ MT_SKU_OFDM_54,
+ MT_SKU_HT20_0_8,
+ MT_SKU_HT20_32,
+ MT_SKU_HT20_1_2_9_10,
+ MT_SKU_HT20_3_4_11_12,
+ MT_SKU_HT20_5_13,
+ MT_SKU_HT20_6_14,
+ MT_SKU_HT20_7_15,
+ MT_SKU_HT40_0_8,
+ MT_SKU_HT40_32,
+ MT_SKU_HT40_1_2_9_10,
+ MT_SKU_HT40_3_4_11_12,
+ MT_SKU_HT40_5_13,
+ MT_SKU_HT40_6_14,
+ MT_SKU_HT40_7_15,
+ MT_SKU_VHT20_0,
+ MT_SKU_VHT20_1_2,
+ MT_SKU_VHT20_3_4,
+ MT_SKU_VHT20_5_6,
+ MT_SKU_VHT20_7,
+ MT_SKU_VHT20_8,
+ MT_SKU_VHT20_9,
+ MT_SKU_VHT40_0,
+ MT_SKU_VHT40_1_2,
+ MT_SKU_VHT40_3_4,
+ MT_SKU_VHT40_5_6,
+ MT_SKU_VHT40_7,
+ MT_SKU_VHT40_8,
+ MT_SKU_VHT40_9,
+ MT_SKU_VHT80_0,
+ MT_SKU_VHT80_1_2,
+ MT_SKU_VHT80_3_4,
+ MT_SKU_VHT80_5_6,
+ MT_SKU_VHT80_7,
+ MT_SKU_VHT80_8,
+ MT_SKU_VHT80_9,
+ MT_SKU_VHT160_0,
+ MT_SKU_VHT160_1_2,
+ MT_SKU_VHT160_3_4,
+ MT_SKU_VHT160_5_6,
+ MT_SKU_VHT160_7,
+ MT_SKU_VHT160_8,
+ MT_SKU_VHT160_9,
+ MT_SKU_1SS_DELTA,
+ MT_SKU_2SS_DELTA,
+ MT_SKU_3SS_DELTA,
+ MT_SKU_4SS_DELTA,
+};
+
struct mt7615_mcu_rxd {
__le32 rxd[4];
@@ -60,6 +167,52 @@ struct mt7615_mcu_rxd {
u8 s2d_index;
};
+struct mt7615_mcu_rdd_report {
+ struct mt7615_mcu_rxd rxd;
+
+ u8 idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 _rsv[2];
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ } hw_pulse[32];
+};
+
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
@@ -77,22 +230,27 @@ enum {
MCU_S2D_H2CN
};
+#define MCU_FW_PREFIX BIT(31)
+#define MCU_UNI_PREFIX BIT(30)
+#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX)
+
enum {
- MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
- MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
+ MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02,
MCU_CMD_INIT_ACCESS_REG = 0x3,
MCU_CMD_PATCH_START_REQ = 0x05,
- MCU_CMD_PATCH_FINISH_REQ = 0x07,
- MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_PATCH_FINISH_REQ = MCU_FW_PREFIX | 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = MCU_FW_PREFIX | 0x10,
MCU_CMD_EXT_CID = 0xED,
- MCU_CMD_FW_SCATTER = 0xEE,
- MCU_CMD_RESTART_DL_REQ = 0xEF,
+ MCU_CMD_FW_SCATTER = MCU_FW_PREFIX | 0xEE,
+ MCU_CMD_RESTART_DL_REQ = MCU_FW_PREFIX | 0xEF,
};
enum {
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
@@ -102,13 +260,33 @@ enum {
MCU_EXT_CMD_WTBL_UPDATE = 0x32,
MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_DBDC_CTRL = 0x45,
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_SET_RDD_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
};
enum {
+ MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
+ MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
+ MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
+};
+
+#define MCU_CMD_ACK BIT(0)
+#define MCU_CMD_UNI BIT(1)
+#define MCU_CMD_QUERY BIT(2)
+
+#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | MCU_CMD_QUERY)
+
+enum {
+ UNI_BSS_INFO_BASIC = 0,
+ UNI_BSS_INFO_BCN_CONTENT = 7,
+};
+
+enum {
PATCH_SEM_RELEASE = 0x0,
PATCH_SEM_GET = 0x1
};
@@ -156,6 +334,23 @@ enum {
DEV_INFO_MAX_NUM
};
+enum {
+ DBDC_TYPE_WMM,
+ DBDC_TYPE_MGMT,
+ DBDC_TYPE_BSS,
+ DBDC_TYPE_MBSS,
+ DBDC_TYPE_REPEATER,
+ DBDC_TYPE_MU,
+ DBDC_TYPE_BF,
+ DBDC_TYPE_PTA,
+ __DBDC_TYPE_MAX,
+};
+
+struct tlv {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
struct bss_info_omac {
__le16 tag;
__le16 len;
@@ -365,18 +560,28 @@ struct wtbl_raw {
__le32 val;
} __packed;
-#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_generic) + \
- sizeof(struct wtbl_rx) + \
- sizeof(struct wtbl_ht) + \
- sizeof(struct wtbl_vht) + \
- sizeof(struct wtbl_tx_ps) + \
- sizeof(struct wtbl_hdr_trans) + \
- sizeof(struct wtbl_ba) + \
- sizeof(struct wtbl_bf) + \
- sizeof(struct wtbl_smps) + \
- sizeof(struct wtbl_pn) + \
- sizeof(struct wtbl_spe))
+#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_tx_ps) + \
+ sizeof(struct wtbl_hdr_trans) +\
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_bf) + \
+ sizeof(struct wtbl_smps) + \
+ sizeof(struct wtbl_pn) + \
+ sizeof(struct wtbl_spe))
+
+#define MT7615_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct tlv) + \
+ MT7615_WTBL_UPDATE_MAX_SIZE)
+
+#define MT7615_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_ba))
enum {
WTBL_GENERIC,
@@ -399,6 +604,11 @@ enum {
WTBL_MAX_NUM
};
+struct sta_ntlv_hdr {
+ u8 rsv[2];
+ __le16 tlv_num;
+} __packed;
+
struct sta_req_hdr {
u8 bss_idx;
u8 wlan_idx;
@@ -408,6 +618,15 @@ struct sta_req_hdr {
u8 rsv[2];
} __packed;
+struct sta_rec_state {
+ __le16 tag;
+ __le16 len;
+ u8 state;
+ __le32 flags;
+ u8 vhtop;
+ u8 pad[2];
+} __packed;
+
struct sta_rec_basic {
__le16 tag;
__le16 len;
@@ -447,10 +666,6 @@ struct sta_rec_ba {
__le16 winsize;
} __packed;
-#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
- sizeof(struct sta_rec_ht) + \
- sizeof(struct sta_rec_vht))
-
enum {
STA_REC_BASIC,
STA_REC_RA,
@@ -459,11 +674,12 @@ enum {
STA_REC_BF,
STA_REC_AMSDU, /* for CR4 */
STA_REC_BA,
- STA_REC_RED, /* not used */
+ STA_REC_STATE,
STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
STA_REC_HT,
STA_REC_VHT,
STA_REC_APPS,
+ STA_REC_WTBL = 13,
STA_REC_MAX_NUM
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
new file mode 100644
index 000000000000..d2eff5442824
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -0,0 +1,174 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt7615.h"
+#include "regs.h"
+#include "mac.h"
+#include "../trace.h"
+
+const u32 mt7615e_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x01000,
+ [MT_HW_BASE] = 0x01000,
+ [MT_PCIE_REMAP_2] = 0x02504,
+ [MT_ARB_BASE] = 0x20c00,
+ [MT_HIF_BASE] = 0x04000,
+ [MT_CSR_BASE] = 0x07000,
+ [MT_PHY_BASE] = 0x10000,
+ [MT_CFG_BASE] = 0x20200,
+ [MT_AGG_BASE] = 0x20a00,
+ [MT_TMAC_BASE] = 0x21000,
+ [MT_RMAC_BASE] = 0x21200,
+ [MT_DMA_BASE] = 0x21800,
+ [MT_WTBL_BASE_ON] = 0x23000,
+ [MT_WTBL_BASE_OFF] = 0x23400,
+ [MT_LPON_BASE] = 0x24200,
+ [MT_MIB_BASE] = 0x24800,
+ [MT_WTBL_BASE_ADDR] = 0x30000,
+ [MT_PCIE_REMAP_BASE2] = 0x80000,
+ [MT_TOP_MISC_BASE] = 0xc0000,
+ [MT_EFUSE_ADDR_BASE] = 0x81070000,
+};
+
+const u32 mt7663e_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x01000,
+ [MT_HW_BASE] = 0x02000,
+ [MT_DMA_SHDL_BASE] = 0x06000,
+ [MT_PCIE_REMAP_2] = 0x0700c,
+ [MT_ARB_BASE] = 0x20c00,
+ [MT_HIF_BASE] = 0x04000,
+ [MT_CSR_BASE] = 0x07000,
+ [MT_PHY_BASE] = 0x10000,
+ [MT_CFG_BASE] = 0x20000,
+ [MT_AGG_BASE] = 0x22000,
+ [MT_TMAC_BASE] = 0x24000,
+ [MT_RMAC_BASE] = 0x25000,
+ [MT_DMA_BASE] = 0x27000,
+ [MT_WTBL_BASE_ON] = 0x29000,
+ [MT_WTBL_BASE_OFF] = 0x29800,
+ [MT_LPON_BASE] = 0x2b000,
+ [MT_MIB_BASE] = 0x2d000,
+ [MT_WTBL_BASE_ADDR] = 0x30000,
+ [MT_PCIE_REMAP_BASE2] = 0x90000,
+ [MT_TOP_MISC_BASE] = 0xc0000,
+ [MT_EFUSE_ADDR_BASE] = 0x78011000,
+};
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base, offset;
+
+ if (is_mt7663(&dev->mt76)) {
+ base = addr & MT7663_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET;
+ } else {
+ base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+ }
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+
+static void
+mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7615_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ napi_schedule(&dev->mt76.tx_napi);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
+ int irq, const u32 *map)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
+ .drv_flags = MT_DRV_TXWI_NO_FREE,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7615_tx_prepare_skb,
+ .tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_skb = mt7615_queue_rx_skb,
+ .rx_poll_complete = mt7615_rx_poll_complete,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_mac_sta_add,
+ .sta_remove = mt7615_mac_sta_remove,
+ .update_survey = mt7615_update_channel,
+ };
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7615_ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
+
+ dev->reg_map = map;
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ if (is_mt7663(mdev))
+ mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
+
+ ret = mt7615_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 21486831172c..676ca622c35a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -6,6 +6,7 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
+#include <linux/regmap.h>
#include "../mt76.h"
#include "regs.h"
@@ -17,9 +18,11 @@
MT7615_MAX_INTERFACES)
#define MT7615_WATCHDOG_TIME (HZ / 10)
+#define MT7615_RESET_TIMEOUT (30 * HZ)
#define MT7615_RATE_RETRY 2
#define MT7615_TX_RING_SIZE 1024
+#define MT7615_TX_MGMT_RING_SIZE 128
#define MT7615_TX_MCU_RING_SIZE 128
#define MT7615_TX_FWDL_RING_SIZE 128
@@ -30,14 +33,34 @@
#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
+#define MT7622_FIRMWARE_N9 "mediatek/mt7622_n9.bin"
+#define MT7622_ROM_PATCH "mediatek/mt7622_rom_patch.bin"
+
+#define MT7615_FIRMWARE_V1 1
+#define MT7615_FIRMWARE_V2 2
+#define MT7615_FIRMWARE_V3 3
+
+#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_v3.bin"
+#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin"
+
#define MT7615_EEPROM_SIZE 1024
#define MT7615_TOKEN_SIZE 4096
#define MT_FRAC_SCALE 12
#define MT_FRAC(val, div) (((val) << MT_FRAC_SCALE) / (div))
+#define MT_CHFREQ_VALID BIT(7)
+#define MT_CHFREQ_DBDC_IDX BIT(6)
+#define MT_CHFREQ_SEQ GENMASK(5, 0)
+
+#define MT7615_BAR_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
struct mt7615_vif;
struct mt7615_sta;
+struct mt7615_dfs_pulse;
+struct mt7615_dfs_pattern;
enum mt7615_hw_txq_id {
MT7615_TXQ_MAIN,
@@ -46,6 +69,16 @@ enum mt7615_hw_txq_id {
MT7615_TXQ_FWDL,
};
+enum mt7622_hw_txq_id {
+ MT7622_TXQ_AC0,
+ MT7622_TXQ_AC1,
+ MT7622_TXQ_AC2,
+ MT7622_TXQ_FWDL = MT7615_TXQ_FWDL,
+ MT7622_TXQ_AC3,
+ MT7622_TXQ_MGMT,
+ MT7622_TXQ_MCU = 15,
+};
+
struct mt7615_rate_set {
struct ieee80211_tx_rate probe_rate;
struct ieee80211_tx_rate rates[4];
@@ -79,12 +112,90 @@ struct mt7615_vif {
struct mt7615_sta sta;
};
+struct mib_stats {
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+};
+
+struct mt7615_phy {
+ struct mt76_phy *mt76;
+ struct mt7615_dev *dev;
+
+ u32 rxfilter;
+ u32 omac_mask;
+
+ u16 noise;
+
+ unsigned long last_cca_adj;
+ int false_cca_ofdm, false_cca_cck;
+ s8 ofdm_sensitivity;
+ s8 cck_sensitivity;
+
+ u16 chainmask;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 chfreq;
+ u8 rdd_state;
+ int dfs_state;
+
+ __le32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+};
+
+#define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__)
+#define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__)
+#define mt7615_mcu_sta_add(dev, ...) (dev)->mcu_ops->sta_add((dev), __VA_ARGS__)
+#define mt7615_mcu_add_dev_info(dev, ...) (dev)->mcu_ops->add_dev_info((dev), __VA_ARGS__)
+#define mt7615_mcu_add_bss_info(dev, ...) (dev)->mcu_ops->add_bss_info((dev), __VA_ARGS__)
+#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
+#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
+struct mt7615_mcu_ops {
+ int (*add_tx_ba)(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ int (*add_rx_ba)(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ int (*sta_add)(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+ int (*add_dev_info)(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+ int (*add_bss_info)(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+ int (*add_beacon_offload)(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, bool enable);
+ int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
+};
+
struct mt7615_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ struct mt7615_phy phy;
u32 vif_mask;
u32 omac_mask;
- __le32 rx_ampdu_ts;
+ u16 chainmask;
+
+ const struct mt7615_mcu_ops *mcu_ops;
+ struct regmap *infracfg;
+ const u32 *reg_map;
+
+ struct work_struct mcu_work;
+
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
@@ -96,17 +207,15 @@ struct mt7615_dev {
s16 power;
} radar_pattern;
u32 hw_pattern;
- int dfs_state;
- int false_cca_ofdm, false_cca_cck;
- unsigned long last_cca_adj;
u8 mac_work_count;
- s8 ofdm_sensitivity;
- s8 cck_sensitivity;
bool scs_en;
+ bool fw_debug;
spinlock_t token_lock;
struct idr token;
+
+ u8 fw_ver;
};
enum {
@@ -135,11 +244,6 @@ enum {
};
enum {
- MT_HW_RDD0,
- MT_HW_RDD1,
-};
-
-enum {
MT_RX_SEL0,
MT_RX_SEL1,
};
@@ -158,13 +262,57 @@ enum mt7615_rdd_cmd {
RDD_RESUME_BF,
};
+static inline struct mt7615_phy *
+mt7615_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7615_dev *
+mt7615_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7615_dev, mt76);
+}
+
+static inline struct mt7615_phy *
+mt7615_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt76_phy *phy = dev->mt76.phy2;
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
extern const struct ieee80211_ops mt7615_ops;
+extern const u32 mt7615e_reg_map[__MT_BASE_MAX];
+extern const u32 mt7663e_reg_map[__MT_BASE_MAX];
extern struct pci_driver mt7615_pci_driver;
+extern struct platform_driver mt7622_wmac_driver;
+#ifdef CONFIG_MT7622_WMAC
+int mt7622_wmac_init(struct mt7615_dev *dev);
+#else
+static inline int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ return 0;
+}
+#endif
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
+ int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+void mt7615_init_device(struct mt7615_dev *dev);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
+int mt7615_register_ext_phy(struct mt7615_dev *dev);
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev);
int mt7615_eeprom_init(struct mt7615_dev *dev);
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
@@ -172,58 +320,37 @@ int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
int mt7615_dma_init(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
-int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable);
-int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en);
-void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
-int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- bool enable);
-int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_sta *sta);
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
-int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool en);
-int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool en);
-int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en);
-int mt7615_mcu_set_channel(struct mt7615_dev *dev);
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
-int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add);
-int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add);
-int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
u8 rx_sel, u8 val);
-int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev);
-int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
static inline bool is_mt7622(struct mt76_dev *dev)
{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
return mt76_chip(dev) == 0x7622;
}
-static inline void mt7615_dfs_check_channel(struct mt7615_dev *dev)
+static inline bool is_mt7615(struct mt76_dev *dev)
{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- u32 freq = dev->mt76.chandef.chan->center_freq;
- struct ieee80211_hw *hw = mt76_hw(dev);
+ return mt76_chip(dev) == 0x7615;
+}
- if (hw->conf.chandef.chan->center_freq != freq ||
- hw->conf.chandef.width != width)
- dev->dfs_state = -1;
+static inline bool is_mt7663(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7663;
}
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
@@ -239,27 +366,32 @@ static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
void mt7615_update_channel(struct mt76_dev *mdev);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
-void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev);
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key);
+ struct ieee80211_key_conf *key, bool beacon);
+void mt7615_mac_set_timing(struct mt7615_phy *phy);
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
+void mt7615_mac_reset_work(struct work_struct *work);
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
-int mt7615_mcu_init_mac(struct mt7615_dev *dev);
-int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
-int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val);
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
-int mt7615_mcu_set_tx_power(struct mt7615_dev *dev);
void mt7615_mcu_exit(struct mt7615_dev *dev);
+void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -272,17 +404,20 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
-int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt7615_mac_work(struct work_struct *work);
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
struct mt76_txwi_cache *txwi);
-int mt76_dfs_start_rdd(struct mt7615_dev *dev, bool force);
-int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev);
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse);
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern);
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
int mt7615_init_debugfs(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
new file mode 100644
index 000000000000..d3eb49d83b98
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#if !defined(__MT7615_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7615_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7615.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7615
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define TOKEN_ENTRY __field(u16, token)
+#define TOKEN_ASSIGN __entry->token = token
+#define TOKEN_PR_FMT " %d"
+#define TOKEN_PR_ARG __entry->token
+
+DECLARE_EVENT_CLASS(dev_token,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TOKEN_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ TOKEN_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TOKEN_PR_FMT,
+ DEV_PR_ARG, TOKEN_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_token, mac_tx_free,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt7615_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 1eb1eb659c3f..c8d0f893a47f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -10,84 +10,17 @@
#include <linux/pci.h>
#include "mt7615.h"
-#include "mac.h"
static const struct pci_device_id mt7615_pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7615) },
+ { PCI_DEVICE(0x14c3, 0x7663) },
{ },
};
-u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
-{
- u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
- u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
-
- return MT_PCIE_REMAP_BASE_2 + offset;
-}
-
-static void
-mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-
- mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
-}
-
-static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
-{
- struct mt7615_dev *dev = dev_instance;
- u32 intr;
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
- return IRQ_NONE;
-
- intr &= dev->mt76.mmio.irqmask;
-
- if (intr & MT_INT_TX_DONE_ALL) {
- mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
- napi_schedule(&dev->mt76.tx_napi);
- }
-
- if (intr & MT_INT_RX_DONE(0)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
- napi_schedule(&dev->mt76.napi[0]);
- }
-
- if (intr & MT_INT_RX_DONE(1)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
- napi_schedule(&dev->mt76.napi[1]);
- }
-
- return IRQ_HANDLED;
-}
-
static int mt7615_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static const struct mt76_driver_ops drv_ops = {
- /* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE,
- .survey_flags = SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_BSS_RX,
- .tx_prepare_skb = mt7615_tx_prepare_skb,
- .tx_complete_skb = mt7615_tx_complete_skb,
- .rx_skb = mt7615_queue_rx_skb,
- .rx_poll_complete = mt7615_rx_poll_complete,
- .sta_ps = mt7615_sta_ps,
- .sta_add = mt7615_sta_add,
- .sta_assoc = mt7615_sta_assoc,
- .sta_remove = mt7615_sta_remove,
- .update_survey = mt7615_update_channel,
- };
- struct mt7615_dev *dev;
- struct mt76_dev *mdev;
+ const u32 *map;
int ret;
ret = pcim_enable_device(pdev);
@@ -104,31 +37,9 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
- &drv_ops);
- if (!mdev)
- return -ENOMEM;
-
- dev = container_of(mdev, struct mt7615_dev, mt76);
- mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
-
- mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
- (mt76_rr(dev, MT_HW_REV) & 0xff);
- dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
-
- ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
- goto error;
-
- ret = mt7615_register_device(dev);
- if (ret)
- goto error;
-
- return 0;
-error:
- ieee80211_free_hw(mt76_hw(dev));
- return ret;
+ map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map;
+ return mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ pdev->irq, map);
}
static void mt7615_pci_remove(struct pci_dev *pdev)
@@ -146,10 +57,9 @@ struct pci_driver mt7615_pci_driver = {
.remove = mt7615_pci_remove,
};
-module_pci_driver(mt7615_pci_driver);
-
MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
MODULE_FIRMWARE(MT7615_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 61a4aa9ac6e6..1e0d95b917e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -4,13 +4,46 @@
#ifndef __MT7615_REGS_H
#define __MT7615_REGS_H
-#define MT_HW_REV 0x1000
-#define MT_HW_CHIPID 0x1008
-#define MT_TOP_STRAP_STA 0x1010
+enum mt7615_reg_base {
+ MT_TOP_CFG_BASE,
+ MT_HW_BASE,
+ MT_DMA_SHDL_BASE,
+ MT_PCIE_REMAP_2,
+ MT_ARB_BASE,
+ MT_HIF_BASE,
+ MT_CSR_BASE,
+ MT_PHY_BASE,
+ MT_CFG_BASE,
+ MT_AGG_BASE,
+ MT_TMAC_BASE,
+ MT_RMAC_BASE,
+ MT_DMA_BASE,
+ MT_WTBL_BASE_ON,
+ MT_WTBL_BASE_OFF,
+ MT_LPON_BASE,
+ MT_MIB_BASE,
+ MT_WTBL_BASE_ADDR,
+ MT_PCIE_REMAP_BASE2,
+ MT_TOP_MISC_BASE,
+ MT_EFUSE_ADDR_BASE,
+ __MT_BASE_MAX,
+};
+
+#define MT_HW_INFO_BASE ((dev)->reg_map[MT_HW_BASE])
+#define MT_HW_INFO(ofs) (MT_HW_INFO_BASE + (ofs))
+#define MT_HW_REV MT_HW_INFO(0x000)
+#define MT_HW_CHIPID MT_HW_INFO(0x008)
+#define MT_TOP_STRAP_STA MT_HW_INFO(0x010)
#define MT_TOP_3NSS BIT(24)
-#define MT_TOP_MISC2 0x1134
+
+#define MT_TOP_OFF_RSV 0x1128
+#define MT_TOP_OFF_RSV_FW_STATE GENMASK(18, 16)
+
+#define MT_TOP_MISC2 ((dev)->reg_map[MT_TOP_CFG_BASE] + 0x134)
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
+#define MT7663_TOP_MISC2_FW_STATE GENMASK(3, 1)
+
#define MT_MCU_BASE 0x2000
#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
@@ -19,26 +52,39 @@
#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
#define MT_PCIE_REMAP_BASE_1 0x40000
-#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2 ((dev)->reg_map[MT_PCIE_REMAP_2])
#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
-#define MT_PCIE_REMAP_BASE_2 0x80000
+#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2])
+
+#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs))
+
+#define MT7663_MCU_PCIE_REMAP_2_OFFSET GENMASK(15, 0)
+#define MT7663_MCU_PCIE_REMAP_2_BASE GENMASK(31, 16)
-#define MT_HIF_BASE 0x4000
-#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
+#define MT_HIF2_BASE 0xf0000
+#define MT_HIF2(ofs) (MT_HIF2_BASE + (ofs))
+#define MT_PCIE_IRQ_ENABLE MT_HIF2(0x188)
#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_MCU_INT_EVENT MT_HIF(0x1f8)
+#define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_PDMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
#define MT_INT_SOURCE_CSR MT_HIF(0x200)
#define MT_INT_MASK_CSR MT_HIF(0x204)
#define MT_DELAY_INT_CFG MT_HIF(0x210)
#define MT_INT_RX_DONE(_n) BIT(_n)
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
-#define MT_INT_TX_DONE_ALL GENMASK(7, 4)
+#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+#define MT_INT_MCU_CMD BIT(30)
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
@@ -49,6 +95,7 @@
#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
+#define MT_WPDMA_GLO_CFG_BYPASS_TX_SCH BIT(9) /* MT7622 */
#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
@@ -58,6 +105,22 @@
#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+#define MT_WPDMA_MEM_RNG_ERR MT_HIF(0x224)
+
+#define MT_MCU_CMD MT_HIF(0x234)
+#define MT_MCU_CMD_CLEAR_FW_OWN BIT(0)
+#define MT_MCU_CMD_STOP_PDMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_PDMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_LMAC_ERROR BIT(24)
+#define MT_MCU_CMD_PSE_ERROR BIT(25)
+#define MT_MCU_CMD_PLE_ERROR BIT(26)
+#define MT_MCU_CMD_PDMA_ERROR BIT(27)
+#define MT_MCU_CMD_PCIE_ERROR BIT(28)
+#define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24))
+
#define MT_TX_RING_BASE MT_HIF(0x300)
#define MT_RX_RING_BASE MT_HIF(0x400)
@@ -67,6 +130,9 @@
#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
+#define MT_CSR(ofs) ((dev)->reg_map[MT_CSR_BASE] + (ofs))
+#define MT_CONN_HIF_ON_LPCTL MT_CSR(0x000)
+
#define MT_PLE_BASE 0x8000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
@@ -78,41 +144,40 @@
#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
((n) << 2))
-#define MT_WF_PHY_BASE 0x10000
+#define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE])
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
-#define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900)
+#define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400)
#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
-#define MT_WF_PHY_R0_B0_PHYMUX_5 MT_WF_PHY(0x0614)
+#define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9))
-#define MT_WF_PHY_R0_B0_PHYCTRL_STS0 MT_WF_PHY(0x020c)
+#define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
-#define MT_WF_PHY_R0_B0_PHYCTRL_STS5 MT_WF_PHY(0x0220)
+#define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
-#define MT_WF_PHY_B0_MIN_PRI_PWR MT_WF_PHY(0x229c)
-#define MT_WF_PHY_B0_PD_OFDM_MASK GENMASK(28, 20)
-#define MT_WF_PHY_B0_PD_OFDM(v) ((v) << 20)
-#define MT_WF_PHY_B0_PD_BLK BIT(19)
+#define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c)
+#define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \
+ GENMASK(28, 20))
+#define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20))
+#define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19))
-#define MT_WF_PHY_B1_MIN_PRI_PWR MT_WF_PHY(0x084)
-#define MT_WF_PHY_B1_PD_OFDM_MASK GENMASK(24, 16)
-#define MT_WF_PHY_B1_PD_OFDM(v) ((v) << 16)
-#define MT_WF_PHY_B1_PD_BLK BIT(25)
+#define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200)
+#define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2))
-#define MT_WF_PHY_B0_RXTD_CCK_PD MT_WF_PHY(0x2310)
-#define MT_WF_PHY_B0_PD_CCK_MASK GENMASK(8, 1)
-#define MT_WF_PHY_B0_PD_CCK(v) ((v) << 1)
+#define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310)
+#define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \
+ GENMASK(8, 1)
+#define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1))
-#define MT_WF_PHY_B1_RXTD_CCK_PD MT_WF_PHY(0x2314)
-#define MT_WF_PHY_B1_PD_CCK_MASK GENMASK(31, 24)
-#define MT_WF_PHY_B1_PD_CCK(v) ((v) << 24)
+#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00)
+#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
-#define MT_WF_CFG_BASE 0x20200
+#define MT_WF_CFG_BASE ((dev)->reg_map[MT_CFG_BASE])
#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
#define MT_CFG_CCR MT_WF_CFG(0x000)
@@ -121,7 +186,7 @@
#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30)
#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31)
-#define MT_WF_AGG_BASE 0x20a00
+#define MT_WF_AGG_BASE ((dev)->reg_map[MT_AGG_BASE])
#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
#define MT_AGG_ARCR MT_WF_AGG(0x010)
@@ -131,8 +196,8 @@
#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
-#define MT_AGG_ARUCR MT_WF_AGG(0x018)
-#define MT_AGG_ARDCR MT_WF_AGG(0x01c)
+#define MT_AGG_ARUCR(_band) MT_WF_AGG(0x018 + (_band) * 0x100)
+#define MT_AGG_ARDCR(_band) MT_WF_AGG(0x01c + (_band) * 0x100)
#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
@@ -142,8 +207,7 @@
#define MT_AGG_ASRCR1 MT_WF_AGG(0x064)
#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
-#define MT_AGG_ACR0 MT_WF_AGG(0x070)
-#define MT_AGG_ACR1 MT_WF_AGG(0x170)
+#define MT_AGG_ACR(_band) MT_WF_AGG(0x070 + (_band) * 0x100)
#define MT_AGG_ACR_NO_BA_RULE BIT(0)
#define MT_AGG_ACR_NO_BA_AR_RULE BIT(1)
#define MT_AGG_ACR_PKT_TIME_EN BIT(2)
@@ -153,24 +217,43 @@
#define MT_AGG_SCR MT_WF_AGG(0x0fc)
#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
-#define MT_WF_TMAC_BASE 0x21000
+#define MT_WF_ARB_BASE ((dev)->reg_map[MT_ARB_BASE])
+#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
+
+#define MT_ARB_SCR MT_WF_ARB(0x080)
+#define MT_ARB_SCR_TX0_DISABLE BIT(8)
+#define MT_ARB_SCR_RX0_DISABLE BIT(9)
+#define MT_ARB_SCR_TX1_DISABLE BIT(10)
+#define MT_ARB_SCR_RX1_DISABLE BIT(11)
+
+#define MT_WF_TMAC_BASE ((dev)->reg_map[MT_TMAC_BASE])
#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
-#define MT_TMAC_TRCR0 MT_WF_TMAC(0x09c)
-#define MT_TMAC_TRCR1 MT_WF_TMAC(0x070)
+#define MT_TMAC_CDTR MT_WF_TMAC(0x090)
+#define MT_TMAC_ODTR MT_WF_TMAC(0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_TRCR(_band) MT_WF_TMAC((_band) ? 0x070 : 0x09c)
#define MT_TMAC_TRCR_CCA_SEL GENMASK(31, 30)
#define MT_TMAC_TRCR_SEC_CCA_SEL GENMASK(29, 28)
+#define MT_TMAC_ICR(_band) MT_WF_TMAC((_band) ? 0x074 : 0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_WF_RMAC_BASE 0x21200
+#define MT_WF_RMAC_BASE ((dev)->reg_map[MT_RMAC_BASE])
#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
-#define MT_WF_RFCR MT_WF_RMAC(0x000)
+#define MT_WF_RFCR(_band) MT_WF_RMAC((_band) ? 0x100 : 0x000)
#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
#define MT_WF_RFCR_DROP_VERSION BIT(3)
@@ -193,13 +276,15 @@
#define MT_WF_RFCR_DROP_NDPA BIT(20)
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
-#define MT_WF_RFCR1 MT_WF_RMAC(0x004)
+#define MT_WF_RFCR1(_band) MT_WF_RMAC((_band) ? 0x104 : 0x004)
#define MT_WF_RFCR1_DROP_ACK BIT(4)
#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
#define MT_WF_RFCR1_DROP_BA BIT(6)
#define MT_WF_RFCR1_DROP_CFEND BIT(7)
#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+#define MT_CHFREQ(_band) MT_WF_RMAC((_band) ? 0x130 : 0x030)
+
#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
@@ -207,17 +292,17 @@
#define MT_WF_RMAC_MIB_AIRTIME0 MT_WF_RMAC(0x0380)
#define MT_WF_RMAC_MIB_TIME5 MT_WF_RMAC(0x03d8)
+#define MT_WF_RMAC_MIB_TIME6 MT_WF_RMAC(0x03dc)
#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
-#define MT_WF_DMA_BASE 0x21800
+#define MT_WF_DMA_BASE ((dev)->reg_map[MT_DMA_BASE])
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
-#define MT_DMA_BN0RCFR0 MT_WF_DMA(0x070)
-#define MT_DMA_BN1RCFR0 MT_WF_DMA(0x0b0)
+#define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40)
#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
@@ -225,10 +310,10 @@
#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24)
#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26)
-#define MT_WTBL_BASE 0x30000
+#define MT_WTBL_BASE(dev) ((dev)->reg_map[MT_WTBL_BASE_ADDR])
#define MT_WTBL_ENTRY_SIZE 256
-#define MT_WTBL_OFF_BASE 0x23400
+#define MT_WTBL_OFF_BASE ((dev)->reg_map[MT_WTBL_BASE_OFF])
#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
#define MT_WTBL_W0_KEY_IDX GENMASK(24, 23)
@@ -245,7 +330,11 @@
#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_ON_BASE 0x23000
+#define MT_TOP_MISC(ofs) ((dev)->reg_map[MT_TOP_MISC_BASE] + (ofs))
+#define MT_CONN_ON_MISC MT_TOP_MISC(0x1140)
+#define MT_TOP_MISC2_FW_N9_RDY BIT(2)
+
+#define MT_WTBL_ON_BASE ((dev)->reg_map[MT_WTBL_BASE_ON])
#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
#define MT_WTBL_RICR0 MT_WTBL_ON(0x010)
@@ -281,8 +370,7 @@
#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5)
-#define MT_LPON_BASE 0x24200
-#define MT_LPON(_n) (MT_LPON_BASE + (_n))
+#define MT_LPON(_n) ((dev)->reg_map[MT_LPON_BASE] + (_n))
#define MT_LPON_T0CR MT_LPON(0x010)
#define MT_LPON_T0CR_MODE GENMASK(1, 0)
@@ -290,13 +378,13 @@
#define MT_LPON_UTTR0 MT_LPON(0x018)
#define MT_LPON_UTTR1 MT_LPON(0x01c)
-#define MT_WF_MIB_BASE 0x24800
+#define MT_WF_MIB_BASE (dev->reg_map[MT_MIB_BASE])
#define MT_WF_MIB(ofs) (MT_WF_MIB_BASE + (ofs))
#define MT_MIB_M0_MISC_CR MT_WF_MIB(0x00c)
-#define MT_MIB_MB_SDR0(n) MT_WF_MIB(0x100 + ((n) << 4))
-#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
-#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR3(n) MT_WF_MIB(0x014 + ((n) << 9))
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9))
#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
@@ -309,9 +397,59 @@
#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9))
#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(0x100 + ((_band) << 9) + \
+ ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(0x104 + ((_band) << 9) + \
+ ((n) << 4))
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2))
-#define MT_EFUSE_BASE 0x81070000
+#define MT_DMA_SHDL(ofs) (dev->reg_map[MT_DMA_SHDL_BASE] + (ofs))
+
+#define MT_DMASHDL_BASE 0x5000a000
+#define MT_DMASHDL_OPTIONAL 0x008
+#define MT_DMASHDL_PAGE 0x00c
+
+#define MT_DMASHDL_REFILL 0x010
+
+#define MT_DMASHDL_PKT_MAX_SIZE 0x01c
+#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
+#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
+
+#define MT_DMASHDL_GROUP_QUOTA(_n) (0x020 + ((_n) << 2))
+#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0)
+#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16)
+
+#define MT_DMASHDL_SCHED_SET0 0x0b0
+#define MT_DMASHDL_SCHED_SET1 0x0b4
+
+#define MT_DMASHDL_Q_MAP(_n) (0x0d0 + ((_n) << 2))
+#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0)
+#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8))
+
+#define MT_LED_BASE_PHYS 0x80024000
+#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n))
+
+#define MT_LED_CTRL MT_LED_PHYS(0x00)
+
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
+#define MT_EFUSE_BASE ((dev)->reg_map[MT_EFUSE_ADDR_BASE])
#define MT_EFUSE_BASE_CTRL 0x000
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
@@ -328,4 +466,8 @@
#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+/* INFRACFG host register range on MT7622 */
+#define MT_INFRACFG_MISC 0x700
+#define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
new file mode 100644
index 000000000000..43aa49706c66
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include "mt7615.h"
+
+int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ struct device_node *np = dev->mt76.dev->of_node;
+
+ if (!is_mt7622(&dev->mt76))
+ return 0;
+
+ dev->infracfg = syscon_regmap_lookup_by_phandle(np, "mediatek,infracfg");
+ if (IS_ERR(dev->infracfg)) {
+ dev_err(dev->mt76.dev, "Cannot find infracfg controller\n");
+ return PTR_ERR(dev->infracfg);
+ }
+
+ return 0;
+}
+
+static int mt7622_wmac_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *mem_base;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get device IRQ\n");
+ return irq;
+ }
+
+ mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ return mt7615_mmio_probe(&pdev->dev, mem_base, irq, mt7615e_reg_map);
+}
+
+static int mt7622_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7615_dev *dev = platform_get_drvdata(pdev);
+
+ mt7615_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt7622_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7622-wmac" },
+ {},
+};
+
+struct platform_driver mt7622_wmac_driver = {
+ .driver = {
+ .name = "mt7622-wmac",
+ .of_match_table = mt7622_wmac_of_match,
+ },
+ .probe = mt7622_wmac_probe,
+ .remove = mt7622_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7622_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7622_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/trace.c b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
new file mode 100644
index 000000000000..6c02d5aff68f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt7615_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index d1405528b504..9087607b621e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -109,7 +109,7 @@ static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
s8 val, lna_5g[3], lna_2g;
u16 rssi_offset;
@@ -129,7 +129,7 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
u8 val;
if (chandef->width == NL80211_CHAN_WIDTH_80) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 388b54cded1b..57f8d56737eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -264,12 +264,12 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
if (dev->mt76.cap.has_5ghz) {
/* overwrite unsupported features */
- mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
- mt76x0_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x0_vht_cap_mask(&dev->mphy.sband_5g.sband);
+ mt76x0_init_txpower(dev, &dev->mphy.sband_5g.sband);
}
if (dev->mt76.cap.has_2ghz)
- mt76x0_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt76x02_init_debugfs(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index b2ccf50512dc..700ae9c12f1d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -16,7 +16,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x0_phy_set_channel(dev, chandef);
mt76x02_mac_cc_reset(dev);
@@ -28,7 +28,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
}
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
@@ -44,9 +44,9 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
mt76x0_phy_set_txpower(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index e2974e0ae1fc..0b520ae08d01 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -20,7 +20,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -47,7 +47,7 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76x0e_stop_hw(dev);
}
@@ -67,6 +67,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
@@ -124,7 +125,7 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
return 0;
}
@@ -195,7 +196,7 @@ error:
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{
- clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
index 038187b390ce..007c762c6db1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -126,7 +126,7 @@ int mt76x0e_mcu_init(struct mt76x02_dev *dev)
if (err < 0)
return err;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 2ecd45f8af90..09f34deb6ba1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -23,7 +23,7 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
int ret = 0;
u8 bank, reg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -ENODEV;
bank = MT_RF_BANK(offset);
@@ -62,7 +62,7 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
u32 val;
u8 bank, reg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -ENODEV;
bank = MT_RF_BANK(offset);
@@ -109,7 +109,7 @@ mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
- &dev->mt76.state));
+ &dev->mphy.state));
return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
return mt76x0_rf_csr_wr(dev, offset, val);
@@ -127,7 +127,7 @@ static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
- &dev->mt76.state));
+ &dev->mphy.state));
ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
@@ -502,7 +502,7 @@ mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
static void mt76x0_phy_tssi_dc_calibrate(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 val;
if (chan->band == NL80211_BAND_5GHZ)
@@ -543,7 +543,7 @@ static int
mt76x0_phy_tssi_adc_calibrate(struct mt76x02_dev *dev, s16 *ltssi,
u8 *info)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 val;
val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
@@ -696,7 +696,7 @@ mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
s8 target_power, s8 target_pa_power,
s16 ltssi)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int tssi_target = target_power << 12, tssi_slope;
int tssi_offset, tssi_db, ret;
u32 data;
@@ -844,12 +844,12 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
struct mt76_rate_power *t = &dev->mt76.rate_power;
s8 info;
- mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t);
- mt76x0_get_power_info(dev, dev->mt76.chandef.chan, &info);
+ mt76x0_get_tx_power_per_rate(dev, dev->mphy.chandef.chan, t);
+ mt76x0_get_power_info(dev, dev->mphy.chandef.chan, &info);
mt76x02_add_rate_power_offset(t, info);
- mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
- dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
+ mt76x02_limit_rate_power(t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info);
dev->target_power = info;
@@ -858,7 +858,7 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
u32 val, tx_alc, reg_val;
@@ -933,7 +933,7 @@ void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
};
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
int ch_group_index, freq, freq1;
u8 channel;
u32 val;
@@ -1037,7 +1037,7 @@ static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev)
if (abs(val - dev->cal.temp_vco) > 20) {
mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
- dev->mt76.chandef.chan->hw_value);
+ dev->mphy.chandef.chan->hw_value);
dev->cal.temp_vco = val;
}
if (abs(val - dev->cal.temp) > 30) {
@@ -1057,7 +1057,7 @@ static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
- if ((dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
+ if ((dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
!is_mt7630(dev))
mt76x02_phy_dfs_adjust_agc(dev);
}
@@ -1069,7 +1069,7 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
@@ -1155,7 +1155,6 @@ static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev,
static void mt76x0_phy_rf_init(struct mt76x02_dev *dev)
{
int i;
- u8 val;
mt76x0_rf_patch_reg_array(dev, mt76x0_rf_central_tab,
ARRAY_SIZE(mt76x0_rf_central_tab));
@@ -1188,7 +1187,7 @@ static void mt76x0_phy_rf_init(struct mt76x02_dev *dev)
*/
mt76x0_rf_wr(dev, MT_RF(0, 22),
min_t(u8, dev->cal.rx.freq_offset, 0xbf));
- val = mt76x0_rf_rr(dev, MT_RF(0, 22));
+ mt76x0_rf_rr(dev, MT_RF(0, 22));
/* Reset procedure DAC during power-up:
* - set B0.R73<7>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 65ba9fc6ea0b..5535b9c0632f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -71,7 +71,7 @@ static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
static void mt76x0u_cleanup(struct mt76x02_dev *dev)
{
- clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x0_chip_onoff(dev, false, false);
mt76u_queues_deinit(&dev->mt76);
}
@@ -80,13 +80,13 @@ static void mt76x0u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mt76.mac_work);
mt76u_stop_tx(&dev->mt76);
mt76x02u_exit_beacon_config(dev);
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return;
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
@@ -112,7 +112,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -126,6 +126,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
@@ -172,8 +173,14 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
static int mt76x0u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = dev->mt76.hw;
+ struct mt76_usb *usb = &dev->mt76.usb;
int err;
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
goto out_err;
@@ -182,17 +189,13 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto out_err;
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
err = mt76x0_register_device(dev);
if (err < 0)
goto out_err;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
- else
- hw->max_tx_fragments = 1;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
return 0;
@@ -240,7 +243,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
mt76x02u_init_mcu(mdev);
- ret = mt76u_init(mdev, usb_intf);
+ ret = mt76u_init(mdev, usb_intf, false);
if (ret)
goto err;
@@ -283,7 +286,7 @@ err:
static void mt76x0_disconnect(struct usb_interface *usb_intf)
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
if (!initialized)
return;
@@ -304,7 +307,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
mt76u_stop_rx(&dev->mt76);
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
mt76x0_chip_onoff(dev, false, false);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 888a930a5e08..45502fd4693f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -168,7 +168,7 @@ int mt76x0u_mcu_init(struct mt76x02_dev *dev)
if (ret < 0)
return ret;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 0ca0bbfe8769..23040c193ca5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -70,18 +70,23 @@ struct mt76x02_beacon_ops {
(dev)->beacon_ops->pre_tbtt_enable(dev, enable)
struct mt76x02_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
struct mac_address macaddr_list[8];
struct mutex phy_mutex;
u16 vif_mask;
+ u16 chainmask;
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
spinlock_t txstatus_fifo_lock;
u32 tx_airtime;
+ u32 ampdu_ref;
struct sk_buff *rx_head;
@@ -93,8 +98,7 @@ struct mt76x02_dev {
const struct mt76x02_beacon_ops *beacon_ops;
- struct sk_buff *beacons[8];
- u8 beacon_data_mask;
+ u8 beacon_data_count;
u8 tbtt_count;
@@ -104,13 +108,14 @@ struct mt76x02_dev {
struct mt76x02_calibration cal;
+ int txpower_conf;
s8 target_power;
s8 target_power_delta[2];
bool enable_tpc;
bool no_2ghz;
- u8 coverage_class;
+ s16 coverage_class;
u8 slottime;
struct mt76x02_dfs_pattern_detector dfs_pd;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 4209209ac940..5d034cec191b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -26,78 +26,40 @@ static int
mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
{
int beacon_len = dev->beacon_ops->slot_size;
- struct mt76x02_txwi txwi;
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
- mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
-
- mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
- offset += sizeof(txwi);
-
- mt76_wr_copy(dev, offset, skb->data, skb->len);
- return 0;
-}
-
-static int
-__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
- struct sk_buff *skb)
-{
- int beacon_len = dev->beacon_ops->slot_size;
- int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx);
- int ret = 0;
- int i;
-
- /* Prevent corrupt transmissions during update */
- mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+ /* USB devices already reserve enough skb headroom for txwi's. This
+ * helps to save slow copies over USB.
+ */
+ if (mt76_is_usb(&dev->mt76)) {
+ struct mt76x02_txwi *txwi;
- if (skb) {
- ret = mt76x02_write_beacon(dev, beacon_addr, skb);
- if (!ret)
- dev->beacon_data_mask |= BIT(bcn_idx);
+ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(*txwi));
+ mt76x02_mac_write_txwi(dev, txwi, skb, NULL, NULL, skb->len);
+ skb_push(skb, sizeof(*txwi));
} else {
- dev->beacon_data_mask &= ~BIT(bcn_idx);
- for (i = 0; i < beacon_len; i += 4)
- mt76_wr(dev, beacon_addr + i, 0);
- }
+ struct mt76x02_txwi txwi;
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+ }
- return ret;
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
}
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb)
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev,
+ struct sk_buff *skb)
{
- bool force_update = false;
- int bcn_idx = 0;
- int i;
+ int bcn_len = dev->beacon_ops->slot_size;
+ int bcn_addr = MT_BEACON_BASE + (bcn_len * dev->beacon_data_count);
- for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
- if (vif_idx == i) {
- force_update = !!dev->beacons[i] ^ !!skb;
- dev_kfree_skb(dev->beacons[i]);
- dev->beacons[i] = skb;
- __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
- } else if (force_update && dev->beacons[i]) {
- __mt76x02_mac_set_beacon(dev, bcn_idx,
- dev->beacons[i]);
- }
-
- bcn_idx += !!dev->beacons[i];
- }
-
- for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
- if (!(dev->beacon_data_mask & BIT(i)))
- break;
-
- __mt76x02_mac_set_beacon(dev, i, NULL);
- }
-
- mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
- bcn_idx - 1);
- return 0;
+ if (!mt76x02_write_beacon(dev, bcn_addr, skb))
+ dev->beacon_data_count++;
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
@@ -116,7 +78,6 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
dev->mt76.beacon_mask |= BIT(mvif->idx);
} else {
dev->mt76.beacon_mask &= ~BIT(mvif->idx);
- mt76x02_mac_set_beacon(dev, mvif->idx, NULL);
}
if (!!old_mask == !!dev->mt76.beacon_mask)
@@ -182,7 +143,7 @@ mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!skb)
return;
- mt76x02_mac_set_beacon(dev, mvif->idx, skb);
+ mt76x02_mac_set_beacon(dev, skb);
}
EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
@@ -241,17 +202,11 @@ EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
{
- int i;
-
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX));
mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
-
- for (i = 0; i < 8; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
-
mt76x02_set_beacon_offsets(dev);
}
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 5dec33ed8527..ff6a9e4daac0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -307,8 +307,8 @@ static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
pulse->period <= 100100);
break;
case NL80211_DFS_JP:
- if (dev->mt76.chandef.chan->center_freq >= 5250 &&
- dev->mt76.chandef.chan->center_freq <= 5350) {
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350) {
/* JPW53 */
if (pulse->w1 <= 130)
ret = (pulse->period >= 28360 &&
@@ -616,7 +616,7 @@ static void mt76x02_dfs_tasklet(unsigned long arg)
u32 engine_mask;
int i;
- if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ if (test_bit(MT76_SCANNING, &dev->mphy.state))
goto out;
if (time_is_before_jiffies(dfs_pd->last_sw_check +
@@ -702,7 +702,7 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
u8 i, shift;
u32 data;
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_40:
shift = MT_DFS_NUM_ENGINES;
break;
@@ -722,8 +722,8 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
radar_specs = &etsi_radar_specs[shift];
break;
case NL80211_DFS_JP:
- if (dev->mt76.chandef.chan->center_freq >= 5250 &&
- dev->mt76.chandef.chan->center_freq <= 5350)
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350)
radar_specs = &jp_w53_radar_specs[shift];
else
radar_specs = &jp_w56_radar_specs[shift];
@@ -822,7 +822,7 @@ EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->mt76.region != NL80211_DFS_UNSET) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 4460548f346a..8b072277ea10 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -6,6 +6,7 @@
#include "mt76x02.h"
#include "mt76x02_trace.h"
+#include "trace.h"
void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
{
@@ -200,7 +201,7 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = dev->mphy.chandef.chan->band;
u16 val;
r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
@@ -344,7 +345,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
+ u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
memset(txwi, 0, sizeof(*txwi));
@@ -386,7 +387,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
- txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
max_txpwr_adj);
txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
@@ -487,17 +488,17 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
first_rate |= st->pktid & MT_PKTID_RATE;
mt76x02_mac_process_tx_rate(&rate[0], first_rate,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
} else if (rate[0].idx < 0) {
if (!msta)
return;
mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
}
mt76x02_mac_process_tx_rate(&last_rate, st->rate,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
retry--;
@@ -630,7 +631,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (!len)
goto out;
- duration = mt76_calc_tx_airtime(&dev->mt76, &info, len);
+ duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
spin_lock_bh(&dev->mt76.cc_lock);
dev->tx_airtime += duration;
@@ -679,7 +680,7 @@ mt76x02_mac_process_rate(struct mt76x02_dev *dev,
status->rate_idx = idx;
break;
case MT_PHY_TYPE_VHT: {
- u8 n_rxstream = dev->mt76.chainmask & 0xf;
+ u8 n_rxstream = dev->chainmask & 0xf;
status->encoding = RX_ENC_VHT;
status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
@@ -741,6 +742,8 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+ /* enable 7 additional beacon slots and control them with bypass mask */
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
for (i = 0; i < 16; i++)
mt76x02_mac_set_bssid(dev, i, null_addr);
@@ -769,13 +772,13 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+ int pad_len = 0, nstreams = dev->chainmask & 0xf;
s8 signal;
u8 pn_len;
u8 wcid;
int len;
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
return -EINVAL;
if (rxinfo & MT_RXINFO_L2PAD)
@@ -824,7 +827,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
if (rxinfo & MT_RXINFO_AMPDU) {
status->flag |= RX_FLAG_AMPDU_DETAILS;
- status->ampdu_ref = dev->mt76.ampdu_ref;
+ status->ampdu_ref = dev->ampdu_ref;
/*
* When receiving an A-MPDU subframe and RSSI info is not valid,
@@ -832,8 +835,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
* are coming. The last one will have valid RSSI info
*/
if (rxinfo & MT_RXINFO_RSSI) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
}
}
@@ -853,8 +856,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
signal = max_t(s8, signal, status->chain_signal[1]);
}
status->signal = signal;
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
+ status->freq = dev->mphy.chandef.chan->center_freq;
+ status->band = dev->mphy.chandef.chan->band;
status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
@@ -868,7 +871,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
u8 update = 1;
bool ret;
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
return;
trace_mac_txstat_poll(dev);
@@ -908,7 +911,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
txwi = (struct mt76x02_txwi *)txwi_ptr;
- trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+ trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb);
}
@@ -1018,7 +1021,7 @@ void mt76x02_update_channel(struct mt76_dev *mdev)
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->chan_state;
+ state = mdev->phy.chan_state;
state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
spin_lock_bh(&dev->mt76.cc_lock);
@@ -1074,7 +1077,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev)
dev->ed_silent = 0;
if (dev->ed_monitor) {
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
@@ -1184,7 +1187,7 @@ void mt76x02_mac_work(struct work_struct *work)
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
{
- dev->mt76.survey_time = ktime_get_boottime();
+ dev->mphy.survey_time = ktime_get_boottime();
mt76_wr(dev, MT_CH_TIME_CFG,
MT_CH_TIME_CFG_TIMER_EN |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 7d946aa77182..c70d17b2290c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -152,7 +152,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
int i;
for (i = 0; i < 500; i++) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return false;
switch (dev->bus->rr(dev, MAC_CSR0)) {
@@ -201,8 +201,7 @@ void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb);
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev, struct sk_buff *skb);
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
struct ieee80211_vif *vif, bool enable);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 6274b6a24b07..5664749ad6c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -24,11 +24,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&mdev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mcu.mutex);
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
@@ -65,7 +65,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
@@ -141,7 +141,7 @@ int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
- while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
+ while ((skb = skb_dequeue(&dev->mt76.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 4e2371c926d8..7dcc5d342e9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -9,7 +9,7 @@
#include "mt76x02.h"
#include "mt76x02_mcu.h"
-#include "mt76x02_trace.h"
+#include "trace.h"
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
@@ -24,10 +24,17 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
mt76x02_resync_beacon_timer(dev);
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
+
mt76_csa_check(&dev->mt76);
if (dev->mt76.csa_complete)
@@ -151,7 +158,7 @@ static void mt76x02_tx_tasklet(unsigned long data)
mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
@@ -261,10 +268,10 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
- trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
intr &= dev->mt76.mmio.irqmask;
@@ -402,7 +409,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
lockdep_assert_held(&dev->mt76.mutex);
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
rcu_read_lock();
ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
@@ -420,6 +427,8 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
if (!wcid)
continue;
+ rcu_assign_pointer(dev->mt76.wcid[i], NULL);
+
priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
@@ -441,7 +450,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
int i;
ieee80211_stop_queues(dev->mt76.hw);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->mt76.tx_tasklet);
@@ -452,6 +461,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_lock(&dev->mt76.mutex);
+ dev->mcu_timeout = 0;
if (restart)
mt76x02_reset_state(dev);
@@ -476,7 +486,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (restart)
mt76_mcu_restart(dev);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
@@ -496,7 +506,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_unlock(&dev->mt76.mutex);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
tasklet_enable(&dev->mt76.tx_tasklet);
napi_enable(&dev->mt76.tx_napi);
@@ -514,7 +524,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
ieee80211_restart_hw(dev->mt76.hw);
} else {
ieee80211_wake_queues(dev->mt76.hw);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
}
@@ -535,10 +545,6 @@ static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
restart:
mt76x02_watchdog_reset(dev);
- mutex_lock(&dev->mt76.mmio.mcu.mutex);
- dev->mcu_timeout = 0;
- mutex_unlock(&dev->mt76.mmio.mcu.mutex);
-
dev->tx_hang_reset++;
dev->tx_hang_check = 0;
memset(dev->mt76.tx_dma_idx, 0xff,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index d7334267b530..aaadc15ea83c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -16,7 +16,7 @@ void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
- switch (dev->mt76.chainmask & 0xf) {
+ switch (dev->chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
@@ -35,7 +35,7 @@ void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
- txpath = (dev->mt76.chainmask >> 8) & 0xf;
+ txpath = (dev->chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index fc2e41006a0d..1def25bf735a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -11,7 +11,7 @@
static inline int
mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
{
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -62;
case NL80211_CHAN_WIDTH_40:
@@ -24,7 +24,7 @@ mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
static inline int
mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
{
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -76;
case NL80211_CHAN_WIDTH_40:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 21c0f351fa09..3e722276b5c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -235,15 +235,9 @@
#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
#define MT_LED_S1_BASE 0x0780
#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
- MT_LED_STATUS_DURATION_MASK)
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 8)
#define MT_FCE_PSE_CTRL 0x0800
#define MT_FCE_PARAMETERS 0x0804
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
index 61ecaf0fe065..6a98092e996b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
@@ -20,7 +20,6 @@
#define DEV_PR_ARG __entry->wiphy_name
#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
-#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
#define TXID_PR_FMT " [%d:%d]"
#define TXID_PR_ARG __entry->wcid, __entry->pktid
@@ -36,28 +35,6 @@ DECLARE_EVENT_CLASS(dev_evt,
TP_printk(DEV_PR_FMT, DEV_PR_ARG)
);
-DECLARE_EVENT_CLASS(dev_txid_evt,
- TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
- TP_ARGS(dev, wcid, pktid),
- TP_STRUCT__entry(
- DEV_ENTRY
- TXID_ENTRY
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- TXID_ASSIGN;
- ),
- TP_printk(
- DEV_PR_FMT TXID_PR_FMT,
- DEV_PR_ARG, TXID_PR_ARG
- )
-);
-
-DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
- TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
- TP_ARGS(dev, wcid, pktid)
-);
-
DEFINE_EVENT(dev_evt, mac_txstat_poll,
TP_PROTO(struct mt76x02_dev *dev),
TP_ARGS(dev)
@@ -100,29 +77,6 @@ TRACE_EVENT(mac_txstat_fetch,
)
);
-TRACE_EVENT(dev_irq,
- TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask),
-
- TP_ARGS(dev, val, mask),
-
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, val)
- __field(u32, mask)
- ),
-
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->val = val;
- __entry->mask = mask;
- ),
-
- TP_printk(
- DEV_PR_FMT " %08x & %08x",
- DEV_PR_ARG, __entry->val, __entry->mask
- )
-);
-
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 13825f642087..96fdf423a348 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -28,7 +28,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
wcid = &mvif->group_wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
}
EXPORT_SYMBOL_GPL(mt76x02_tx);
@@ -39,7 +39,6 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
void *rxwi = skb->data;
if (q == MT_RXQ_MCU) {
- /* this is used just by mmio code */
mt76_mcu_rx_event(&dev->mt76, skb);
return;
}
@@ -74,7 +73,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
} else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
if (band == NL80211_BAND_2GHZ) {
const struct ieee80211_rate *r;
@@ -96,7 +95,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
{
- txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
txpwr -= (dev->target_power + dev->target_power_delta[0]);
txpwr = min_t(s8, txpwr, max_txpwr_adj);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index d03d3c8e296c..0180b6200b17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -46,8 +46,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
- struct sk_buff *iter, *last = skb;
- u32 info, pad;
+ u32 info;
/* Buffer layout:
* | 4B | xfer len | pad | 4B |
@@ -57,28 +56,8 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
*/
info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- /* Add zero pad of 4 - 7 bytes */
- pad = round_up(skb->len, 4) + 4 - skb->len;
-
- /* First packet of a A-MSDU burst keeps track of the whole burst
- * length, need to update length of it and the last packet.
- */
- skb_walk_frags(skb, iter) {
- last = iter;
- if (!iter->next) {
- skb->data_len += pad;
- skb->len += pad;
- break;
- }
- }
-
- if (skb_pad(last, pad))
- return -ENOMEM;
- __skb_put(last, pad);
-
- return 0;
+ return mt76u_skb_dma_info(skb, info);
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
@@ -198,7 +177,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
container_of(work, struct mt76x02_dev, pre_tbtt_work);
struct beacon_bc_data data = {};
struct sk_buff *skb;
- int i, nbeacons;
+ int nbeacons;
if (!dev->mt76.beacon_mask)
return;
@@ -208,17 +187,30 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
mt76x02_resync_beacon_timer(dev);
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
ieee80211_iterate_active_interfaces(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_csa_check(&dev->mt76);
+
+ if (dev->mt76.csa_complete) {
+ mt76_csa_finish(&dev->mt76);
+ goto out;
+ }
+
nbeacons = hweight8(dev->mt76.beacon_mask);
mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
- for (i = nbeacons; i < N_BCN_SLOTS; i++) {
- skb = __skb_dequeue(&data.q);
- mt76x02_mac_set_beacon(dev, i, skb);
- }
+ while ((skb = __skb_dequeue(&data.q)) != NULL)
+ mt76x02_mac_set_beacon(dev, skb);
+
+out:
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
mt76x02u_restart_pre_tbtt_timer(dev);
}
@@ -244,20 +236,11 @@ static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
{
- int i;
-
if (WARN_ON_ONCE(!dev->mt76.beacon_int))
return;
- if (en) {
+ if (en)
mt76x02u_start_pre_tbtt_timer(dev);
- } else {
- /* Timer is already stopped, only clean up
- * PS buffered frames if any.
- */
- for (i = 0; i < N_BCN_SLOTS; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
- }
}
void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
@@ -280,7 +263,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
{
- if (!test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (!test_bit(MT76_REMOVED, &dev->mphy.state))
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index a993cd7e9948..843b86560ed4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -55,7 +55,8 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
u32 rxfce;
for (i = 0; i < 5; i++) {
- ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300);
+ ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
+ 300, MT_EP_IN_CMD_RESP);
if (ret == -ETIMEDOUT)
continue;
if (ret)
@@ -82,18 +83,17 @@ static int
__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
- struct mt76_usb *usb = &dev->usb;
- int ret;
u8 seq = 0;
u32 info;
+ int ret;
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return 0;
if (wait_resp) {
- seq = ++usb->mcu.msg_seq & 0xf;
+ seq = ++dev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++usb->mcu.msg_seq & 0xf;
+ seq = ++dev->mcu.msg_seq & 0xf;
}
info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
@@ -103,7 +103,8 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
if (ret)
return ret;
- ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
+ ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
+ MT_EP_OUT_INBAND_CMD);
if (ret)
return ret;
@@ -119,7 +120,6 @@ static int
mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp)
{
- struct mt76_usb *usb = &dev->usb;
struct sk_buff *skb;
int err;
@@ -127,9 +127,9 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
return err;
}
@@ -143,9 +143,8 @@ static int
mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data, int n)
{
- const int CMD_RANDOM_WRITE = 12;
const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
- struct mt76_usb *usb = &dev->usb;
+ const int CMD_RANDOM_WRITE = 12;
struct sk_buff *skb;
int cnt, i, ret;
@@ -164,9 +163,9 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value);
}
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
if (ret)
return ret;
@@ -200,7 +199,7 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value);
}
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
usb->mcu.rp = data;
usb->mcu.rp_len = n;
@@ -211,7 +210,7 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
usb->mcu.rp = NULL;
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
return ret;
}
@@ -248,7 +247,8 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
data_len = MT_CMD_HDR_LEN + len + sizeof(info);
- err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000);
+ err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
+ MT_EP_OUT_INBAND_CMD);
if (err) {
dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 0960fc56b672..b7a120b0856d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -96,9 +96,9 @@ mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
mt76);
u32 val;
- val = MT_LED_STATUS_DURATION(0xff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
@@ -166,7 +166,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -182,21 +181,22 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
dev->slottime = 9;
if (is_mt76x2(dev)) {
- dev->mt76.sband_2g.sband.ht_cap.cap |=
+ dev->mphy.sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |=
+ dev->mphy.sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.chainmask = 0x202;
- dev->mt76.antenna_mask = 3;
+ dev->chainmask = 0x202;
+ dev->mphy.antenna_mask = 3;
} else {
- dev->mt76.chainmask = 0x101;
- dev->mt76.antenna_mask = 1;
+ dev->chainmask = 0x101;
+ dev->mphy.antenna_mask = 1;
}
}
EXPORT_SYMBOL_GPL(mt76x02_init_device);
@@ -325,7 +325,9 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_STATION)
idx += 8;
- if (dev->vif_mask & BIT(idx))
+ /* vif is already set or idx is 8 for AP/Mesh/... */
+ if (dev->vif_mask & BIT(idx) ||
+ (vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
dev->vif_mask |= BIT(idx);
@@ -545,7 +547,7 @@ void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
- dev->coverage_class = coverage_class;
+ dev->coverage_class = max_t(s16, coverage_class, 0);
mt76x02_set_tx_ackto(dev);
mutex_unlock(&dev->mt76.mutex);
}
@@ -602,7 +604,7 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_SCANNING, &dev->mt76.state);
+ clear_bit(MT76_SCANNING, &dev->mphy.state);
if (dev->cal.gain_init_done) {
/* Restore AGC gain and resume calibration after scanning. */
dev->cal.low_gain = -1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
index 7b2b187fbf47..caf089538c11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -13,5 +13,3 @@ mt76x2e-y := \
mt76x2u-y := \
usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
usb_phy.o
-
-CFLAGS_pci_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 9f91556c7f38..4a748a6f0ce2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -248,7 +248,7 @@ mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
u8 lna;
@@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
{
- enum nl80211_band band = dev->mt76.chandef.chan->band;
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
u16 val, slope;
u8 bounds;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
index 4dcf6518cb0d..3755632e6494 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -53,7 +53,7 @@ mt76x2_has_ext_lna(struct mt76x02_dev *dev)
{
u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
else
return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 79e583eb066b..a92a479aebaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -82,7 +82,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_PBF_SYS_CTRL, 0x00080c00 },
{ MT_PBF_CFG, 0x1efebcff },
{ MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
{ MT_MAX_LEN_CFG, 0x003e3f00 },
{ MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
{ MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index 76d8cd37d4de..9635c04ce032 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -29,7 +29,7 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
.idx = channel,
.scan = scan,
.bw = bw,
- .chainmask = cpu_to_le16(dev->mt76.chainmask),
+ .chainmask = cpu_to_le16(dev->chainmask),
};
/* first set the channel without the extension channel info */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index 41680c420cda..eca95b7f64d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -30,7 +30,7 @@ static inline bool is_mt7612(struct mt76x02_dev *dev)
static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
return ((chan->flags & IEEE80211_CHAN_RADAR) &&
chan->dfs_state != NL80211_DFS_AVAILABLE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 33fcec9179b2..c69579e5f647 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -239,7 +239,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
if (ret)
return ret;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x02_mac_start(dev);
ret = mt76x2_mcu_init(dev);
@@ -289,8 +289,8 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
goto fail;
mt76x02_init_debugfs(dev);
- mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index cfe8905ce73f..105e5b99b3f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -22,7 +22,7 @@ mt76x2_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
MT_WATCHDOG_TIME);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -31,7 +31,7 @@ mt76x2_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76x2_stop_hardware(dev);
}
@@ -45,9 +45,9 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
@@ -57,13 +57,13 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
return ret;
}
@@ -86,14 +86,14 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->mt76.txpower_conf -= 6;
+ dev->txpower_conf -= 6;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
mt76x2_phy_set_txpower(dev);
- mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
+ mt76x02_tx_set_txpwr_auto(dev, dev->txpower_conf);
}
}
@@ -124,8 +124,8 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
mutex_lock(&dev->mt76.mutex);
- dev->mt76.chainmask = (tx_ant == 3) ? 0x202 : 0x101;
- dev->mt76.antenna_mask = tx_ant;
+ dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+ dev->mphy.antenna_mask = tx_ant;
mt76_set_stream_caps(&dev->mt76, true);
mt76x2_phy_set_antenna(dev);
@@ -145,6 +145,7 @@ const struct ieee80211_ops mt76x2_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 23f35bf8d47b..8831337df23e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -12,7 +12,7 @@
static bool
mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 flag = 0;
if (!mt76x2_tssi_enabled(dev))
@@ -35,7 +35,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
static void
mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
if (dev->cal.channel_cal_done)
@@ -74,7 +74,7 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~(BIT(4) | BIT(1));
- switch (dev->mt76.antenna_mask) {
+ switch (dev->mphy.antenna_mask) {
case 1:
/* disable mac DAC control */
mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
@@ -118,7 +118,7 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *chan = chandef->chan;
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
enum nl80211_band band = chan->band;
u8 channel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index edbab4fa7f6e..ed2dcb05d614 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -136,8 +136,8 @@ mt76x2_get_min_rate_power(struct mt76_rate_power *r)
void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ enum nl80211_chan_width width = dev->mphy.chandef.width;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
int txp_0, txp_1, delta = 0;
struct mt76_rate_power t = {};
@@ -152,8 +152,8 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
mt76x2_get_rate_power(dev, &t, chan);
mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
- mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
- dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
+ mt76x02_limit_rate_power(&t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(&t);
base_power = mt76x2_get_min_rate_power(&t);
delta = base_power - txp.target_power;
@@ -202,7 +202,7 @@ EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
struct mt76x2_tssi_comp t = {};
@@ -252,12 +252,12 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
val = 0x1836 << 16;
if (!mt76x2_has_ext_lna(dev) &&
- dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ dev->mphy.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
if (mt76x2_has_ext_lna(dev) &&
- dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
- dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+ dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ &&
+ dev->mphy.chandef.width < NL80211_CHAN_WIDTH_40)
val = 0x0f36 << 16;
val |= 0xf8;
@@ -267,7 +267,7 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(AGC, 9),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
- if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+ if (dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR)
mt76x02_phy_dfs_adjust_agc(dev);
}
@@ -280,7 +280,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
@@ -297,7 +297,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
return;
}
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
if (low_gain == 2)
@@ -315,11 +315,11 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
low_gain_delta = 14;
agc_37 = 0x2121262c;
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
agc_35 = 0x11111516;
else if (low_gain == 2)
agc_35 = agc_37 = 0x08080808;
- else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ else if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80)
agc_35 = 0x10101014;
else
agc_35 = 0x11111116;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index b64ad816cc25..eafa283ca699 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -54,7 +54,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
mt76x02u_init_mcu(mdev);
- err = mt76u_init(mdev, intf);
+ err = mt76u_init(mdev, intf, false);
if (err < 0)
goto err;
@@ -86,7 +86,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct ieee80211_hw *hw = mt76_hw(dev);
- set_bit(MT76_REMOVED, &dev->mt76.state);
+ set_bit(MT76_REMOVED, &dev->mphy.state);
ieee80211_unregister_hw(hw);
mt76x2u_cleanup(dev);
mt76u_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 2910068f4e79..ffc2deba29ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -190,6 +190,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_usb *usb = &dev->mt76.usb;
int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
@@ -199,6 +200,11 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
goto fail;
@@ -207,22 +213,18 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto fail;
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (err)
goto fail;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
- else
- hw->max_tx_fragments = 1;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x02_init_debugfs(dev);
- mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index 59cbe826188a..eaa622833f85 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -98,7 +98,7 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev)
bool stopped = false;
u32 rts_cfg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -EIO;
rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 9e97204841f5..bab4e6e1904e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -17,7 +17,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -26,7 +26,7 @@ static void mt76x2u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76u_stop_tx(&dev->mt76);
mt76x2u_stop_hw(dev);
}
@@ -41,9 +41,9 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x02_pre_tbtt_enable(dev, false);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x2_mac_stop(dev, false);
@@ -52,11 +52,11 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
return err;
}
@@ -78,12 +78,12 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->mt76.txpower_conf -= 6;
+ dev->txpower_conf -= 6;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
mt76x2_phy_set_txpower(dev);
}
@@ -105,6 +105,7 @@ const struct ieee80211_ops mt76x2u_ops = {
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.ampdu_action = mt76x02_ampdu_action,
.config = mt76x2u_config,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index b1381f9df992..a04a98f5ce1e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -10,7 +10,7 @@
static void
mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
if (dev->cal.channel_cal_done)
@@ -82,7 +82,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
};
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
struct ieee80211_channel *chan = chandef->chan;
u8 channel = chan->hw_value, bw, bw_index;
int ch_group_index, freq, freq1, ret;
@@ -185,7 +185,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct ieee80211_channel *chan;
u32 flag = 0;
- chan = dev->mt76.chandef.chan;
+ chan = dev->mphy.chandef.chan;
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
if (mt76x02_ext_pa_enabled(dev, chan->band))
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
index ed3df3c8b4b3..f199fcd2a63d 100644
--- a/drivers/net/wireless/mediatek/mt76/trace.c
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -9,4 +9,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(mac_txdone);
+EXPORT_TRACEPOINT_SYMBOL_GPL(dev_irq);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
index 0b3e635da868..c3d0ef8e2890 100644
--- a/drivers/net/wireless/mediatek/mt76/trace.h
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -14,7 +14,7 @@
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
-#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+#define DEVICE_ASSIGN strlcpy(__entry->wiphy_name, \
wiphy_name(dev->hw->wiphy), MAXNAME)
#define DEV_PR_FMT "%s"
#define DEV_PR_ARG __entry->wiphy_name
@@ -24,6 +24,11 @@
#define REG_PR_FMT " %04x=%08x"
#define REG_PR_ARG __entry->reg, __entry->val
+#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT " [%d:%d]"
+#define TXID_PR_ARG __entry->wcid, __entry->pktid
+
DECLARE_EVENT_CLASS(dev_reg_evt,
TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val),
@@ -32,7 +37,7 @@ DECLARE_EVENT_CLASS(dev_reg_evt,
REG_ENTRY
),
TP_fast_assign(
- DEV_ASSIGN;
+ DEVICE_ASSIGN;
REG_ASSIGN;
),
TP_printk(
@@ -51,6 +56,51 @@ DEFINE_EVENT(dev_reg_evt, reg_wr,
TP_ARGS(dev, reg, val)
);
+TRACE_EVENT(dev_irq,
+ TP_PROTO(struct mt76_dev *dev, u32 val, u32 mask),
+
+ TP_ARGS(dev, val, mask),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, val)
+ __field(u32, mask)
+ ),
+
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ __entry->val = val;
+ __entry->mask = mask;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT " %08x & %08x",
+ DEV_PR_ARG, __entry->val, __entry->mask
+ )
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ ),
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ TXID_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT,
+ DEV_PR_ARG, TXID_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7ee91d946882..eff522dbda34 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -109,13 +109,17 @@ void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.unlock)
{
+ struct ieee80211_hw *hw;
struct sk_buff *skb;
spin_unlock_bh(&dev->status_list.lock);
__release(&dev->status_list.unlock);
- while ((skb = __skb_dequeue(list)) != NULL)
- ieee80211_tx_status(dev->hw, skb);
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_tx_status(hw, skb);
+ }
+
}
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
@@ -231,10 +235,12 @@ EXPORT_SYMBOL_GPL(mt76_tx_status_check);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
{
+ struct ieee80211_hw *hw;
struct sk_buff_head list;
if (!skb->prev) {
- ieee80211_free_txskb(dev->hw, skb);
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -245,13 +251,15 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
void
-mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
+ bool ext_phy = phy != &dev->phy;
if (WARN_ON(qid >= MT_TXQ_PSD)) {
qid = MT_TXQ_BE;
@@ -275,6 +283,9 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
mt76_check_agg_ssn(mtxq, skb);
}
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
@@ -282,7 +293,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
- ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+ ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -291,9 +302,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_tx);
static struct sk_buff *
-mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_tx_info *info;
+ bool ext_phy = phy != &phy->dev->phy;
struct sk_buff *skb;
skb = skb_dequeue(&mtxq->retry_q);
@@ -306,10 +319,14 @@ mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
return skb;
}
- skb = ieee80211_tx_dequeue(dev->hw, txq);
+ skb = ieee80211_tx_dequeue(phy->hw, txq);
if (!skb)
return NULL;
+ info = IEEE80211_SKB_CB(skb);
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
return skb;
}
@@ -335,7 +352,8 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
bool more_data)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL;
struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
int i;
@@ -350,7 +368,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
continue;
do {
- skb = mt76_txq_dequeue(dev, mtxq, true);
+ skb = mt76_txq_dequeue(phy, mtxq, true);
if (!skb)
break;
@@ -377,9 +395,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
struct mt76_txq *mtxq)
{
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
@@ -395,7 +414,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
return 0;
- skb = mt76_txq_dequeue(dev, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq, false);
if (!skb)
return 0;
@@ -423,10 +442,10 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
if (probe)
break;
- if (test_bit(MT76_RESET, &dev->state))
+ if (test_bit(MT76_RESET, &phy->state))
return -EBUSY;
- skb = mt76_txq_dequeue(dev, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq, false);
if (!skb)
break;
@@ -464,8 +483,9 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
}
static int
-mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
+mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *hwq = sq->q;
struct ieee80211_txq *txq;
@@ -478,12 +498,12 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
if (sq->swq_queued >= 4)
break;
- if (test_bit(MT76_RESET, &dev->state)) {
+ if (test_bit(MT76_RESET, &phy->state)) {
ret = -EBUSY;
break;
}
- txq = ieee80211_next_txq(dev->hw, qid);
+ txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -505,8 +525,8 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
spin_lock_bh(&hwq->lock);
}
- ret += mt76_txq_send_burst(dev, sq, mtxq);
- ieee80211_return_txq(dev->hw, txq,
+ ret += mt76_txq_send_burst(phy, sq, mtxq);
+ ieee80211_return_txq(phy->hw, txq,
!skb_queue_empty(&mtxq->retry_q));
}
spin_unlock_bh(&hwq->lock);
@@ -514,8 +534,9 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
return ret;
}
-void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
@@ -528,21 +549,21 @@ void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
rcu_read_lock();
do {
- ieee80211_txq_schedule_start(dev->hw, qid);
- len = mt76_txq_schedule_list(dev, qid);
- ieee80211_txq_schedule_end(dev->hw, qid);
+ ieee80211_txq_schedule_start(phy->hw, qid);
+ len = mt76_txq_schedule_list(phy, qid);
+ ieee80211_txq_schedule_end(phy->hw, qid);
} while (len > 0);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
-void mt76_txq_schedule_all(struct mt76_dev *dev)
+void mt76_txq_schedule_all(struct mt76_phy *phy)
{
int i;
for (i = 0; i <= MT_TXQ_BK; i++)
- mt76_txq_schedule(dev, i);
+ mt76_txq_schedule(phy, i);
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
@@ -550,7 +571,9 @@ void mt76_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
- mt76_txq_schedule_all(dev);
+ mt76_txq_schedule_all(&dev->phy);
+ if (dev->phy2)
+ mt76_txq_schedule_all(dev->phy2);
}
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -578,9 +601,10 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
- if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state))
return;
tasklet_schedule(&dev->tx_tasklet);
@@ -589,6 +613,7 @@ EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
+ struct ieee80211_hw *hw;
struct mt76_txq *mtxq;
struct sk_buff *skb;
@@ -597,8 +622,10 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
mtxq = (struct mt76_txq *)txq->drv_priv;
- while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
- ieee80211_free_txskb(dev->hw, skb);
+ while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
+ }
}
EXPORT_SYMBOL_GPL(mt76_txq_remove);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index d6d47081e281..a981da6c35a5 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -29,13 +29,13 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
: usb_sndctrlpipe(udev, 0);
for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return -EIO;
ret = usb_control_msg(udev, pipe, req, req_type, val,
offset, buf, len, MT_VEND_REQ_TOUT_MS);
if (ret == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
if (ret >= 0 || ret == -ENODEV)
return ret;
usleep_range(5000, 10000);
@@ -62,12 +62,25 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
u32 data = ~0;
- u16 offset;
int ret;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -81,16 +94,8 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
req = MT_VEND_MULTI_READ;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
-
- ret = __mt76u_vendor_request(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR,
- 0, offset, &usb->reg_val, sizeof(__le32));
- if (ret == sizeof(__le32))
- data = le32_to_cpu(usb->reg_val);
- trace_usb_reg_rr(dev, addr, data);
- return data;
+ return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
}
static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
@@ -104,10 +109,32 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
+ u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
- u16 offset;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -118,13 +145,7 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
req = MT_VEND_MULTI_WRITE;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
-
- usb->reg_val = cpu_to_le32(val);
- __mt76u_vendor_request(dev, req,
- USB_DIR_OUT | USB_TYPE_VENDOR, 0,
- offset, &usb->reg_val, sizeof(__le32));
- trace_usb_reg_wr(dev, addr, val);
+ ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
}
static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -134,6 +155,13 @@ static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
+static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
{
@@ -145,22 +173,94 @@ static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
return val;
}
+static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
static void mt76u_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
struct mt76_usb *usb = &dev->usb;
- const u32 *val = data;
- int i, ret;
+ const u8 *val = data;
+ int ret;
+ int current_batch_size;
+ int i = 0;
+
+ /* Assure that always a multiple of 4 bytes are copied,
+ * otherwise beacons can be corrupted.
+ * See: "mt76: round up length on mt76_wr_copy"
+ * Commit 850e8f6fbd5d0003b0
+ */
+ len = round_up(len, 4);
mutex_lock(&usb->usb_ctrl_mtx);
- for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
- put_unaligned(val[i], (u32 *)usb->data);
+ while (i < len) {
+ current_batch_size = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, current_batch_size);
ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
USB_DIR_OUT | USB_TYPE_VENDOR,
- 0, offset + i * 4, usb->data,
- sizeof(u32));
+ 0, offset + i, usb->data,
+ current_batch_size);
+ if (ret < 0)
+ break;
+
+ i += current_batch_size;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
if (ret < 0)
break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void
+mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int i = 0, batch_len, ret;
+ u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ memcpy(val + i, usb->data, batch_len);
+ i += batch_len;
}
mutex_unlock(&usb->usb_ctrl_mtx);
}
@@ -200,7 +300,7 @@ static int
mt76u_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data, int n)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
else
return mt76u_req_wr_rp(dev, base, data, n);
@@ -227,7 +327,7 @@ static int
mt76u_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data, int n)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
else
return mt76u_req_rd_rp(dev, base, data, n);
@@ -306,11 +406,12 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
}
static int
-mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
+mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
+ struct urb *urb, int nsgs, gfp_t gfp)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
- if (dev->usb.sg_en)
+ if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
urb->transfer_buffer_length = q->buf_size;
@@ -334,23 +435,25 @@ mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
usb_init_urb(e->urb);
- if (dev->usb.sg_en)
+ if (dev->usb.sg_en && sg_max_size > 0)
e->urb->sg = (struct scatterlist *)(e->urb + 1);
return 0;
}
static int
-mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
{
- int err;
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int err, sg_size;
- err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
+ sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
+ err = mt76u_urb_alloc(dev, e, sg_size);
if (err)
return err;
- return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
- GFP_KERNEL);
+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
}
static void mt76u_urb_free(struct urb *urb)
@@ -386,10 +489,9 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
urb->context = context;
}
-static inline struct urb *
-mt76u_get_next_rx_entry(struct mt76_dev *dev)
+static struct urb *
+mt76u_get_next_rx_entry(struct mt76_queue *q)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb = NULL;
unsigned long flags;
@@ -404,14 +506,17 @@ mt76u_get_next_rx_entry(struct mt76_dev *dev)
return urb;
}
-static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+static int
+mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
+ u32 data_len)
{
u16 dma_len, min_len;
dma_len = get_unaligned_le16(data);
- min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
- MT_FCE_INFO_LEN;
+ if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
+ return dma_len;
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
if (data_len < min_len || !dma_len ||
dma_len + MT_DMA_HDR_LEN > data_len ||
(dma_len & 0x3))
@@ -420,11 +525,14 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
}
static struct sk_buff *
-mt76u_build_rx_skb(void *data, int len, int buf_size)
+mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
+ int len, int buf_size)
{
+ int head_room, drv_flags = dev->drv->drv_flags;
struct sk_buff *skb;
- if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
struct page *page;
/* slow path, not enough space for data and
@@ -434,8 +542,8 @@ mt76u_build_rx_skb(void *data, int len, int buf_size)
if (!skb)
return NULL;
- skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
- data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
+ skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
+ data += head_room + MT_SKB_HEAD_LEN;
page = virt_to_head_page(data);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, data - page_address(page),
@@ -449,30 +557,31 @@ mt76u_build_rx_skb(void *data, int len, int buf_size)
if (!skb)
return NULL;
- skb_reserve(skb, MT_DMA_HDR_LEN);
+ skb_reserve(skb, head_room);
__skb_put(skb, len);
return skb;
}
static int
-mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
+ int buf_size)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
- int len, nsgs = 1;
+ int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
struct sk_buff *skb;
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
return 0;
- len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
if (len < 0)
return 0;
- data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ data_len = min_t(int, len, data_len - head_room);
+ skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
@@ -481,8 +590,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
sg_page(&urb->sg[nsgs]),
- urb->sg[nsgs].offset,
- data_len, q->buf_size);
+ urb->sg[nsgs].offset, data_len,
+ buf_size);
len -= data_len;
nsgs++;
}
@@ -493,8 +602,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
static void mt76u_complete_rx(struct urb *urb)
{
- struct mt76_dev *dev = urb->context;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue *q = urb->context;
unsigned long flags;
trace_rx_urb(dev, urb);
@@ -524,50 +633,69 @@ out:
}
static int
-mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
+mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ struct urb *urb)
{
- mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
- mt76u_complete_rx, dev);
+ int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
+
+ mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
+ mt76u_complete_rx, &dev->q_rx[qid]);
trace_submit_urb(dev, urb);
return usb_submit_urb(urb, GFP_ATOMIC);
}
-static void mt76u_rx_tasklet(unsigned long data)
+static void
+mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
+ int qid = q - &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb;
int err, count;
- rcu_read_lock();
-
while (true) {
- urb = mt76u_get_next_rx_entry(dev);
+ urb = mt76u_get_next_rx_entry(q);
if (!urb)
break;
- count = mt76u_process_rx_entry(dev, urb);
+ count = mt76u_process_rx_entry(dev, urb, q->buf_size);
if (count > 0) {
- err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
if (err < 0)
break;
}
- mt76u_submit_rx_buf(dev, urb);
+ mt76u_submit_rx_buf(dev, qid, urb);
}
- mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+ if (qid == MT_RXQ_MAIN)
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue *q;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+ mt76u_process_rx_queue(dev, q);
+ }
rcu_read_unlock();
}
-static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+static int
+mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q = &dev->q_rx[qid];
unsigned long flags;
int i, err = 0;
spin_lock_irqsave(&q->lock, flags);
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
+ err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
if (err < 0)
break;
}
@@ -578,16 +706,12 @@ static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
return err;
}
-static int mt76u_alloc_rx(struct mt76_dev *dev)
+static int
+mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_usb *usb = &dev->usb;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q = &dev->q_rx[qid];
int i, err;
- usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
- if (!usb->mcu.data)
- return -ENOMEM;
-
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
@@ -599,17 +723,23 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
q->buf_size = PAGE_SIZE;
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
+ err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
if (err < 0)
return err;
}
- return mt76u_submit_rx_buffers(dev);
+ return mt76u_submit_rx_buffers(dev, qid);
}
-static void mt76u_free_rx(struct mt76_dev *dev)
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
+{
+ return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
+
+static void
+mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct page *page;
int i;
@@ -624,13 +754,33 @@ static void mt76u_free_rx(struct mt76_dev *dev)
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
-void mt76u_stop_rx(struct mt76_dev *dev)
+static void mt76u_free_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q;
int i;
- for (i = 0; i < q->ndesc; i++)
- usb_poison_urb(q->entry[i].urb);
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+
+ mt76u_free_rx_queue(dev, q);
+ }
+}
+
+void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_poison_urb(q->entry[j].urb);
+ }
tasklet_kill(&dev->usb.rx_tasklet);
}
@@ -638,13 +788,24 @@ EXPORT_SYMBOL_GPL(mt76u_stop_rx);
int mt76u_resume_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i;
+ struct mt76_queue *q;
+ int i, j, err;
- for (i = 0; i < q->ndesc; i++)
- usb_unpoison_urb(q->entry[i].urb);
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+
+ if (!q->ndesc)
+ continue;
- return mt76u_submit_rx_buffers(dev);
+ for (j = 0; j < q->ndesc; j++)
+ usb_unpoison_urb(q->entry[j].urb);
+
+ err = mt76u_submit_rx_buffers(dev, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76u_resume_rx);
@@ -694,10 +855,11 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_unlock_bh(&q->lock);
- mt76_txq_schedule(dev, i);
+ mt76_txq_schedule(&dev->phy, i);
- if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
- queue_work(dev->usb.stat_wq, &dev->usb.stat_work);
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->usb.wq, &dev->usb.stat_work);
if (wake)
ieee80211_wake_queue(dev->hw, i);
}
@@ -714,7 +876,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
dev = container_of(usb, struct mt76_dev, usb);
while (true) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
@@ -722,10 +884,10 @@ static void mt76u_tx_status_data(struct work_struct *work)
count++;
}
- if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
- queue_work(usb->stat_wq, &usb->stat_work);
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+ queue_work(usb->wq, &usb->stat_work);
else
- clear_bit(MT76_READING_STATS, &dev->state);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
}
static void mt76u_complete_tx(struct urb *urb)
@@ -759,6 +921,35 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
return urb->num_sgs;
}
+int mt76u_skb_dma_info(struct sk_buff *skb, u32 info)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 pad;
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ /* Add zero pad of 4 - 7 bytes */
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+
+ /* First packet of a A-MSDU burst keeps track of the whole burst
+ * length, need to update length of it and the last packet.
+ */
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (skb_pad(last, pad))
+ return -ENOMEM;
+ __skb_put(last, pad);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
static int
mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
@@ -806,7 +997,7 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
if (err == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
else
dev_err(dev->dev, "tx urb submit failed:%d\n",
err);
@@ -816,6 +1007,14 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
}
}
+static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+{
+ if (mt76_chip(dev) == 0x7663)
+ return ac ^ 0x3;
+
+ return mt76_ac_to_hwq(ac);
+}
+
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
struct mt76_queue *q;
@@ -834,7 +1033,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
return -ENOMEM;
spin_lock_init(&q->lock);
- q->hw_idx = mt76_ac_to_hwq(i);
+ q->hw_idx = mt76u_ac_to_hwq(dev, i);
dev->q_tx[i].q = q;
q->entry = devm_kcalloc(dev->dev,
@@ -872,7 +1071,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
struct mt76_queue *q;
int i, j, ret;
- ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
+ ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5);
if (!ret) {
dev_err(dev->dev, "timed out waiting for pending tx\n");
@@ -905,7 +1104,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
}
cancel_work_sync(&dev->usb.stat_work);
- clear_bit(MT76_READING_STATS, &dev->state);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
mt76_tx_status_check(dev, NULL, true);
}
@@ -925,7 +1124,7 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
{
int err;
- err = mt76u_alloc_rx(dev);
+ err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
if (err < 0)
return err;
@@ -938,31 +1137,48 @@ static const struct mt76_queue_ops usb_queue_ops = {
.kick = mt76u_tx_kick,
};
+void mt76u_deinit(struct mt76_dev *dev)
+{
+ if (dev->usb.wq) {
+ destroy_workqueue(dev->usb.wq);
+ dev->usb.wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76u_deinit);
+
int mt76u_init(struct mt76_dev *dev,
- struct usb_interface *intf)
+ struct usb_interface *intf, bool ext)
{
- static const struct mt76_bus_ops mt76u_ops = {
- .rr = mt76u_rr,
- .wr = mt76u_wr,
- .rmw = mt76u_rmw,
- .write_copy = mt76u_copy,
+ static struct mt76_bus_ops mt76u_ops = {
+ .read_copy = mt76u_read_copy_ext,
.wr_rp = mt76u_wr_rp,
.rd_rp = mt76u_rd_rp,
.type = MT76_BUS_USB,
};
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
+ int err = -ENOMEM;
+
+ mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
+ mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
+ mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
+ mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
- skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
- usb->stat_wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
- if (!usb->stat_wq)
+ usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
+ if (!usb->wq)
return -ENOMEM;
- mutex_init(&usb->mcu.mutex);
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ if (usb->data_len < 32)
+ usb->data_len = 32;
+
+ usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
+ if (!usb->data)
+ goto error;
mutex_init(&usb->usb_ctrl_mtx);
dev->bus = &mt76u_ops;
@@ -972,18 +1188,17 @@ int mt76u_init(struct mt76_dev *dev,
usb->sg_en = mt76u_check_sg(dev);
- return mt76u_set_endpoints(intf, usb);
-}
-EXPORT_SYMBOL_GPL(mt76u_init);
+ err = mt76u_set_endpoints(intf, usb);
+ if (err < 0)
+ goto error;
-void mt76u_deinit(struct mt76_dev *dev)
-{
- if (dev->usb.stat_wq) {
- destroy_workqueue(dev->usb.stat_wq);
- dev->usb.stat_wq = NULL;
- }
+ return 0;
+
+error:
+ mt76u_deinit(dev);
+ return err;
}
-EXPORT_SYMBOL_GPL(mt76u_deinit);
+EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 23d1e1da78b2..8c60c450125a 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -64,7 +64,7 @@ int mt76_wcid_alloc(unsigned long *mask, int size)
}
EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev)
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
{
struct mt76_wcid *wcid;
int i, j, min_rssi = 0;
@@ -75,14 +75,18 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
unsigned long mask = dev->wcid_mask[i];
+ unsigned long phy_mask = dev->wcid_phy_mask[i];
if (!mask)
continue;
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1, phy_mask >>= 1) {
if (!(mask & 1))
continue;
+ if (!!(phy_mask & 1) != ext_phy)
+ continue;
+
wcid = rcu_dereference(dev->wcid[j]);
if (!wcid)
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index fe3479c8e561..48a71e7479e5 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -16,8 +16,20 @@
int mt76_wcid_alloc(unsigned long *mask, int size);
+static inline bool
+mt76_wcid_mask_test(unsigned long *mask, int idx)
+{
+ return mask[idx / BITS_PER_LONG] & BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_wcid_mask_set(unsigned long *mask, int idx)
+{
+ mask[idx / BITS_PER_LONG] |= BIT(idx % BITS_PER_LONG);
+}
+
static inline void
-mt76_wcid_free(unsigned long *mask, int idx)
+mt76_wcid_mask_clear(unsigned long *mask, int idx)
{
mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 8849faa5bc10..8be17106008d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -60,7 +60,8 @@ qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
BIT(IEEE80211_STYPE_AUTH >> 4),
},
[NL80211_IFTYPE_AP] = {
- .tx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
@@ -101,6 +102,21 @@ qtnf_validate_iface_combinations(struct wiphy *wiphy,
ret = cfg80211_check_combinations(wiphy, &params);
+ if (ret)
+ return ret;
+
+ /* Check repeater interface combination: primary VIF should be STA only.
+ * STA (primary) + AP (secondary) is OK.
+ * AP (primary) + STA (secondary) is not supported.
+ */
+ vif = qtnf_mac_get_base_vif(mac);
+ if (vif && vif->wdev.iftype == NL80211_IFTYPE_AP &&
+ vif != change_vif && new_type == NL80211_IFTYPE_STATION) {
+ ret = -EINVAL;
+ pr_err("MAC%u invalid combination: AP as primary repeater interface is not supported\n",
+ mac->macid);
+ }
+
return ret;
}
@@ -248,7 +264,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
goto error_del_vif;
}
- if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret) {
unregister_netdevice(vif->netdev);
@@ -679,10 +695,8 @@ qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev,
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
- if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
- if (!ether_addr_equal(vif->bssid, auth->bssid))
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
+ !ether_addr_equal(vif->bssid, auth->bssid))
pr_warn("unexpected bssid: %pM", auth->bssid);
ret = qtnf_cmd_send_external_auth(vif, auth);
@@ -739,7 +753,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_supported_band *sband;
const struct cfg80211_chan_def *chandef = &wdev->chandef;
struct ieee80211_channel *chan;
- struct qtnf_chan_stats stats;
int ret;
sband = wiphy->bands[NL80211_BAND_2GHZ];
@@ -755,49 +768,16 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
chan = &sband->channels[idx];
- memset(&stats, 0, sizeof(stats));
-
survey->channel = chan;
survey->filled = 0x0;
- if (chandef->chan) {
- if (chan->hw_value == chandef->chan->hw_value)
- survey->filled = SURVEY_INFO_IN_USE;
- }
-
- ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats);
- switch (ret) {
- case 0:
- if (unlikely(stats.chan_num != chan->hw_value)) {
- pr_err("received stats for channel %d instead of %d\n",
- stats.chan_num, chan->hw_value);
- ret = -EINVAL;
- break;
- }
+ if (chan == chandef->chan)
+ survey->filled = SURVEY_INFO_IN_USE;
- survey->filled |= SURVEY_INFO_TIME |
- SURVEY_INFO_TIME_SCAN |
- SURVEY_INFO_TIME_BUSY |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_TX |
- SURVEY_INFO_NOISE_DBM;
-
- survey->time_scan = stats.cca_try;
- survey->time = stats.cca_try;
- survey->time_tx = stats.cca_tx;
- survey->time_rx = stats.cca_rx;
- survey->time_busy = stats.cca_busy;
- survey->noise = stats.chan_noise;
- break;
- case -ENOENT:
- pr_debug("no stats for channel %u\n", chan->hw_value);
- ret = 0;
- break;
- default:
+ ret = qtnf_cmd_get_chan_stats(mac, chan->center_freq, survey);
+ if (ret)
pr_debug("failed to get chan(%d) stats from card\n",
chan->hw_value);
- break;
- }
return ret;
}
@@ -943,6 +923,26 @@ static int qtnf_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
return ret;
}
+static int qtnf_update_owe_info(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_owe_info *owe_info)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_AP)
+ return -EOPNOTSUPP;
+
+ ret = qtnf_cmd_send_update_owe(vif, owe_info);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to update owe info\n",
+ vif->mac->macid, vif->vifid);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
#ifdef CONFIG_PM
static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
{
@@ -1039,6 +1039,7 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.set_power_mgmt = qtnf_set_power_mgmt,
.get_tx_power = qtnf_get_tx_power,
.set_tx_power = qtnf_set_tx_power,
+ .update_owe_info = qtnf_update_owe_info,
#ifdef CONFIG_PM
.suspend = qtnf_suspend,
.resume = qtnf_resume,
@@ -1075,22 +1076,26 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
}
}
-struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
+struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus,
+ struct platform_device *pdev)
{
struct wiphy *wiphy;
if (qtnf_dfs_offload_get() &&
- (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
+ qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
qtn_cfg80211_ops.start_radar_detection = NULL;
- if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+ if (!qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_PWR_MGMT))
qtn_cfg80211_ops.set_power_mgmt = NULL;
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
- set_wiphy_dev(wiphy, bus->dev);
+ if (pdev)
+ set_wiphy_dev(wiphy, &pdev->dev);
+ else
+ set_wiphy_dev(wiphy, bus->dev);
return wiphy;
}
@@ -1142,7 +1147,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->coverage_class = macinfo->coverage_class;
wiphy->max_scan_ssids =
- (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
+ (macinfo->max_scan_ssids) ? macinfo->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
@@ -1166,10 +1171,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (qtnf_dfs_offload_get() &&
- (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
+ qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_DWELL))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
@@ -1185,16 +1190,16 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
ether_addr_copy(wiphy->perm_addr, mac->macaddr);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_STA_INACT_TIMEOUT))
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR))
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
- if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
+ if (!qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_OBSS_SCAN))
wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SAE)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SAE))
wiphy->features |= NL80211_FEATURE_SAE;
#ifdef CONFIG_PM
@@ -1205,7 +1210,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
regdomain_is_known = isalpha(mac->rd->alpha2[0]) &&
isalpha(mac->rd->alpha2[1]);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_REG_UPDATE)) {
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index d0d7ec8794c4..f40d8c3c3d9e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -11,11 +11,11 @@
#include "bus.h"
#include "commands.h"
+/* Let device itself to select best values for current conditions */
#define QTNF_SCAN_TIME_AUTO 0
-/* Let device itself to select best values for current conditions */
-#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT QTNF_SCAN_TIME_AUTO
-#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT QTNF_SCAN_TIME_AUTO
+#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT 90
+#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT 100
#define QTNF_SCAN_SAMPLE_DURATION_DEFAULT QTNF_SCAN_TIME_AUTO
static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp,
@@ -175,7 +175,8 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
{
struct qlink_tlv_ie_set *tlv;
- tlv = (struct qlink_tlv_ie_set *)skb_put(cmd_skb, sizeof(*tlv) + len);
+ tlv = (struct qlink_tlv_ie_set *)skb_put(cmd_skb, sizeof(*tlv) +
+ round_up(len, QLINK_ALIGN));
tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_IE_SET);
tlv->hdr.len = cpu_to_le16(len + sizeof(*tlv) - sizeof(tlv->hdr));
tlv->type = frame_type;
@@ -190,20 +191,24 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
{
unsigned int len = sizeof(struct qlink_cmd_start_ap);
- len += s->ssid_len;
- len += s->beacon.head_len;
- len += s->beacon.tail_len;
- len += s->beacon.beacon_ies_len;
- len += s->beacon.proberesp_ies_len;
- len += s->beacon.assocresp_ies_len;
- len += s->beacon.probe_resp_len;
+ len += round_up(s->ssid_len, QLINK_ALIGN);
+ len += round_up(s->beacon.head_len, QLINK_ALIGN);
+ len += round_up(s->beacon.tail_len, QLINK_ALIGN);
+ len += round_up(s->beacon.beacon_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.proberesp_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.assocresp_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.probe_resp_len, QLINK_ALIGN);
if (cfg80211_chandef_valid(&s->chandef))
len += sizeof(struct qlink_tlv_chandef);
- if (s->acl)
+ if (s->acl) {
+ unsigned int acl_len = struct_size(s->acl, mac_addrs,
+ s->acl->n_acl_entries);
+
len += sizeof(struct qlink_tlv_hdr) +
- struct_size(s->acl, mac_addrs, s->acl->n_acl_entries);
+ round_up(acl_len, QLINK_ALIGN);
+ }
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
@@ -315,7 +320,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
if (s->ht_cap) {
struct qlink_tlv_hdr *tlv = (struct qlink_tlv_hdr *)
- skb_put(cmd_skb, sizeof(*tlv) + sizeof(*s->ht_cap));
+ skb_put(cmd_skb, sizeof(*tlv) +
+ round_up(sizeof(*s->ht_cap), QLINK_ALIGN));
tlv->type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
tlv->len = cpu_to_le16(sizeof(*s->ht_cap));
@@ -339,7 +345,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
size_t acl_size = struct_size(s->acl, mac_addrs,
s->acl->n_acl_entries);
struct qlink_tlv_hdr *tlv =
- skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ skb_put(cmd_skb,
+ sizeof(*tlv) + round_up(acl_size, QLINK_ALIGN));
tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
tlv->len = cpu_to_le16(acl_size);
@@ -581,10 +588,10 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
}
static void
-qtnf_cmd_sta_info_parse(struct station_info *sinfo,
- const struct qlink_tlv_hdr *tlv,
+qtnf_cmd_sta_info_parse(struct station_info *sinfo, const u8 *data,
size_t resp_size)
{
+ const struct qlink_tlv_hdr *tlv;
const struct qlink_sta_stats *stats = NULL;
const u8 *map = NULL;
unsigned int map_len = 0;
@@ -595,11 +602,11 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
(qtnf_utils_is_bit_set(map, bitn, map_len) && \
(offsetofend(struct qlink_sta_stats, stat_name) <= stats_len))
- while (resp_size >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, data, resp_size) {
tlv_len = le16_to_cpu(tlv->len);
switch (le16_to_cpu(tlv->type)) {
- case QTN_TLV_ID_STA_STATS_MAP:
+ case QTN_TLV_ID_BITMAP:
map_len = tlv_len;
map = tlv->val;
break;
@@ -610,9 +617,11 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
default:
break;
}
+ }
- resp_size -= tlv_len + sizeof(*tlv);
- tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len);
+ if (!qlink_tlv_parsing_ok(tlv, data, resp_size)) {
+ pr_err("Malformed TLV buffer\n");
+ return;
}
if (!map || !stats)
@@ -736,9 +745,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
goto out;
}
- qtnf_cmd_sta_info_parse(sinfo,
- (const struct qlink_tlv_hdr *)resp->info,
- var_resp_len);
+ qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -895,31 +902,21 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
const char *uboot_ver = NULL;
u32 hw_ver = 0;
u16 tlv_type;
- u16 tlv_value_len;
+ u16 tlv_len;
hwinfo->num_mac = resp->num_mac;
hwinfo->mac_bitmap = resp->mac_bitmap;
hwinfo->fw_ver = le32_to_cpu(resp->fw_ver);
- hwinfo->ql_proto_ver = le16_to_cpu(resp->ql_proto_ver);
hwinfo->total_tx_chain = resp->total_tx_chain;
hwinfo->total_rx_chain = resp->total_rx_chain;
- hwinfo->hw_capab = le32_to_cpu(resp->hw_capab);
bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
plat_id = le32_to_cpu(resp->plat_id);
hw_ver = le32_to_cpu(resp->hw_ver);
- tlv = (const struct qlink_tlv_hdr *)resp->info;
-
- while (info_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, resp->info, info_len) {
tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
-
- if (tlv_value_len + sizeof(*tlv) > info_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
+ tlv_len = le16_to_cpu(tlv->len);
switch (tlv_type) {
case QTN_TLV_ID_BUILD_NAME:
@@ -943,36 +940,43 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
case QTN_TLV_ID_UBOOT_VER:
uboot_ver = (const void *)tlv->val;
break;
- case QTN_TLV_ID_MAX_SCAN_SSIDS:
- hwinfo->max_scan_ssids = *tlv->val;
+ case QTN_TLV_ID_BITMAP:
+ memcpy(hwinfo->hw_capab, tlv->val,
+ min(sizeof(hwinfo->hw_capab), (size_t)tlv_len));
break;
default:
break;
}
+ }
+
+ if (!qlink_tlv_parsing_ok(tlv, resp->info, info_len)) {
+ pr_err("Malformed TLV buffer\n");
+ return -EINVAL;
+ }
- info_len -= tlv_value_len + sizeof(*tlv);
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- pr_info("fw_version=%d, MACs map %#x, chains Tx=%u Rx=%u, capab=0x%x\n",
- hwinfo->fw_ver, hwinfo->mac_bitmap,
- hwinfo->total_tx_chain, hwinfo->total_rx_chain,
- hwinfo->hw_capab);
-
- pr_info("\nBuild name: %s" \
- "\nBuild revision: %s" \
- "\nBuild type: %s" \
- "\nBuild label: %s" \
- "\nBuild timestamp: %lu" \
- "\nPlatform ID: %lu" \
- "\nHardware ID: %s" \
- "\nCalibration version: %s" \
- "\nU-Boot version: %s" \
- "\nHardware version: 0x%08x\n",
+ pr_info("\nBuild name: %s\n"
+ "Build revision: %s\n"
+ "Build type: %s\n"
+ "Build label: %s\n"
+ "Build timestamp: %lu\n"
+ "Platform ID: %lu\n"
+ "Hardware ID: %s\n"
+ "Calibration version: %s\n"
+ "U-Boot version: %s\n"
+ "Hardware version: 0x%08x\n"
+ "Qlink ver: %u.%u\n"
+ "MACs map: %#x\n"
+ "Chains Rx-Tx: %ux%u\n"
+ "FW version: 0x%x\n",
bld_name, bld_rev, bld_type, bld_label,
(unsigned long)bld_tmstamp,
(unsigned long)plat_id,
- hw_id, calibration_ver, uboot_ver, hw_ver);
+ hw_id, calibration_ver, uboot_ver, hw_ver,
+ QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver),
+ QLINK_VER_MINOR(bus->hw_info.ql_proto_ver),
+ hwinfo->mac_bitmap,
+ hwinfo->total_rx_chain, hwinfo->total_tx_chain,
+ hwinfo->fw_ver);
strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
hwinfo->hw_version = hw_ver;
@@ -1016,18 +1020,15 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const struct qlink_resp_get_mac_info *resp,
size_t tlv_buf_size)
{
- const u8 *tlv_buf = resp->var_info;
- struct ieee80211_iface_combination *comb = NULL;
+ struct ieee80211_iface_combination *comb = mac->macinfo.if_comb;
size_t n_comb = 0;
struct ieee80211_iface_limit *limits;
- const struct qlink_iface_comb_num *comb_num;
const struct qlink_iface_limit_record *rec;
const struct qlink_iface_limit *lim;
const struct qlink_wowlan_capab_data *wowlan;
u16 rec_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
u8 *ext_capa = NULL;
u8 *ext_capa_mask = NULL;
@@ -1066,44 +1067,11 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
break;
}
- tlv = (const struct qlink_tlv_hdr *)tlv_buf;
- while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, resp->var_info, tlv_buf_size) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > tlv_buf_size) {
- pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
- mac->macid, tlv_type, tlv_value_len);
- return -EINVAL;
- }
switch (tlv_type) {
- case QTN_TLV_ID_NUM_IFACE_COMB:
- if (tlv_value_len != sizeof(*comb_num))
- return -EINVAL;
-
- comb_num = (void *)tlv->val;
-
- /* free earlier iface comb memory */
- qtnf_mac_iface_comb_free(mac);
-
- mac->macinfo.n_if_comb =
- le32_to_cpu(comb_num->iface_comb_num);
-
- mac->macinfo.if_comb =
- kcalloc(mac->macinfo.n_if_comb,
- sizeof(*mac->macinfo.if_comb),
- GFP_KERNEL);
-
- if (!mac->macinfo.if_comb)
- return -ENOMEM;
-
- comb = mac->macinfo.if_comb;
-
- pr_debug("MAC%u: %zu iface combinations\n",
- mac->macid, mac->macinfo.n_if_comb);
-
- break;
case QTN_TLV_ID_IFACE_LIMIT:
if (unlikely(!comb)) {
pr_warn("MAC%u: no combinations advertised\n",
@@ -1207,14 +1175,10 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
mac->macid, tlv_type);
break;
}
-
- tlv_buf_size -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (tlv_buf_size) {
- pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
- mac->macid, tlv_buf_size);
+ if (!qlink_tlv_parsing_ok(tlv, resp->var_info, tlv_buf_size)) {
+ pr_err("Malformed TLV buffer\n");
return -EINVAL;
}
@@ -1260,13 +1224,15 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
return 0;
}
-static void
+static int
qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
const struct qlink_resp_get_mac_info *resp_info)
{
struct qtnf_mac_info *mac_info;
struct qtnf_vif *vif;
+ qtnf_mac_iface_comb_free(mac);
+
mac_info = &mac->macinfo;
mac_info->bands_cap = resp_info->bands_cap;
@@ -1285,12 +1251,28 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info->radar_detect_widths =
qlink_chan_width_mask_to_nl(le16_to_cpu(
resp_info->radar_detect_widths));
- mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs);
+ mac_info->max_acl_mac_addrs = le16_to_cpu(resp_info->max_acl_mac_addrs);
+ mac_info->frag_thr = le32_to_cpu(resp_info->frag_threshold);
+ mac_info->rts_thr = le32_to_cpu(resp_info->rts_threshold);
+ mac_info->sretry_limit = resp_info->retry_short;
+ mac_info->lretry_limit = resp_info->retry_long;
+ mac_info->coverage_class = resp_info->coverage_class;
+ mac_info->max_scan_ssids = resp_info->max_scan_ssids;
memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
sizeof(mac_info->ht_cap_mod_mask));
memcpy(&mac_info->vht_cap_mod_mask, &resp_info->vht_cap_mod_mask,
sizeof(mac_info->vht_cap_mod_mask));
+
+ mac_info->n_if_comb = resp_info->n_iface_combinations;
+ mac_info->if_comb = kcalloc(mac->macinfo.n_if_comb,
+ sizeof(*mac->macinfo.if_comb),
+ GFP_KERNEL);
+
+ if (!mac->macinfo.if_comb)
+ return -ENOMEM;
+
+ return 0;
}
static void qtnf_cmd_resp_band_fill_htcap(const u8 *info,
@@ -1389,7 +1371,6 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
size_t payload_len)
{
u16 tlv_type;
- size_t tlv_len;
size_t tlv_dlen;
const struct qlink_tlv_hdr *tlv;
const struct qlink_channel *qchan;
@@ -1424,24 +1405,15 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
return -ENOMEM;
}
- tlv = (struct qlink_tlv_hdr *)resp->info;
-
- while (payload_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, resp->info, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_dlen = le16_to_cpu(tlv->len);
- tlv_len = tlv_dlen + sizeof(*tlv);
-
- if (tlv_len > payload_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %zu\n",
- tlv_type, tlv_len);
- goto error_ret;
- }
switch (tlv_type) {
case QTN_TLV_ID_CHANNEL:
if (unlikely(tlv_dlen != sizeof(*qchan))) {
pr_err("invalid channel TLV len %zu\n",
- tlv_len);
+ tlv_dlen);
goto error_ret;
}
@@ -1544,13 +1516,10 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
pr_warn("unknown TLV type: %#x\n", tlv_type);
break;
}
-
- payload_len -= tlv_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_dlen);
}
- if (payload_len) {
- pr_err("malformed TLV buf; bytes left: %zu\n", payload_len);
+ if (!qlink_tlv_parsing_ok(tlv, resp->info, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
goto error_ret;
}
@@ -1570,128 +1539,6 @@ error_ret:
return ret;
}
-static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
- const u8 *payload, size_t payload_len)
-{
- struct qtnf_mac_info *mac_info;
- struct qlink_tlv_frag_rts_thr *phy_thr;
- struct qlink_tlv_rlimit *limit;
- struct qlink_tlv_cclass *class;
- u16 tlv_type;
- u16 tlv_value_len;
- size_t tlv_full_len;
- const struct qlink_tlv_hdr *tlv;
-
- mac_info = &mac->macinfo;
-
- tlv = (struct qlink_tlv_hdr *)payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len) {
- pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
- mac->macid, tlv_type, tlv_value_len);
- return -EINVAL;
- }
-
- switch (tlv_type) {
- case QTN_TLV_ID_FRAG_THRESH:
- phy_thr = (void *)tlv;
- mac_info->frag_thr = le32_to_cpu(phy_thr->thr);
- break;
- case QTN_TLV_ID_RTS_THRESH:
- phy_thr = (void *)tlv;
- mac_info->rts_thr = le32_to_cpu(phy_thr->thr);
- break;
- case QTN_TLV_ID_SRETRY_LIMIT:
- limit = (void *)tlv;
- mac_info->sretry_limit = limit->rlimit;
- break;
- case QTN_TLV_ID_LRETRY_LIMIT:
- limit = (void *)tlv;
- mac_info->lretry_limit = limit->rlimit;
- break;
- case QTN_TLV_ID_COVERAGE_CLASS:
- class = (void *)tlv;
- mac_info->coverage_class = class->cclass;
- break;
- default:
- pr_err("MAC%u: Unknown TLV type: %#x\n", mac->macid,
- le16_to_cpu(tlv->type));
- break;
- }
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- if (payload_len) {
- pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
- mac->macid, payload_len);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-qtnf_cmd_resp_proc_chan_stat_info(struct qtnf_chan_stats *stats,
- const u8 *payload, size_t payload_len)
-{
- struct qlink_chan_stats *qlink_stats;
- const struct qlink_tlv_hdr *tlv;
- size_t tlv_full_len;
- u16 tlv_value_len;
- u16 tlv_type;
-
- tlv = (struct qlink_tlv_hdr *)payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
- switch (tlv_type) {
- case QTN_TLV_ID_CHANNEL_STATS:
- if (unlikely(tlv_value_len != sizeof(*qlink_stats))) {
- pr_err("invalid CHANNEL_STATS entry size\n");
- return -EINVAL;
- }
-
- qlink_stats = (void *)tlv->val;
-
- stats->chan_num = le32_to_cpu(qlink_stats->chan_num);
- stats->cca_tx = le32_to_cpu(qlink_stats->cca_tx);
- stats->cca_rx = le32_to_cpu(qlink_stats->cca_rx);
- stats->cca_busy = le32_to_cpu(qlink_stats->cca_busy);
- stats->cca_try = le32_to_cpu(qlink_stats->cca_try);
- stats->chan_noise = qlink_stats->chan_noise;
-
- pr_debug("chan(%u) try(%u) busy(%u) noise(%d)\n",
- stats->chan_num, stats->cca_try,
- stats->cca_busy, stats->chan_noise);
- break;
- default:
- pr_warn("Unknown TLV type: %#x\n",
- le16_to_cpu(tlv->type));
- }
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- if (payload_len) {
- pr_warn("malformed TLV buf; bytes left: %zu\n", payload_len);
- return -EINVAL;
- }
-
- return 0;
-}
-
int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
@@ -1712,7 +1559,10 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
goto out;
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
- qtnf_cmd_resp_proc_mac_info(mac, resp);
+ ret = qtnf_cmd_resp_proc_mac_info(mac, resp);
+ if (ret)
+ goto out;
+
ret = qtnf_parse_variable_mac_info(mac, resp, var_data_len);
out:
@@ -1793,35 +1643,6 @@ out:
return ret;
}
-int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
-{
- struct sk_buff *cmd_skb, *resp_skb = NULL;
- struct qlink_resp_phy_params *resp;
- size_t response_size = 0;
- int ret = 0;
-
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
- QLINK_CMD_PHY_PARAMS_GET,
- sizeof(struct qlink_cmd));
- if (!cmd_skb)
- return -ENOMEM;
-
- qtnf_bus_lock(mac->bus);
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
- sizeof(*resp), &response_size);
- if (ret)
- goto out;
-
- resp = (struct qlink_resp_phy_params *)resp_skb->data;
- ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size);
-
-out:
- qtnf_bus_unlock(mac->bus);
- consume_skb(resp_skb);
-
- return ret;
-}
-
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
@@ -1843,16 +1664,16 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_RTS_THRESH,
wiphy->rts_threshold);
if (changed & WIPHY_PARAM_COVERAGE_CLASS)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
- wiphy->coverage_class);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
+ wiphy->coverage_class);
if (changed & WIPHY_PARAM_RETRY_LONG)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
- wiphy->retry_long);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
+ wiphy->retry_long);
if (changed & WIPHY_PARAM_RETRY_SHORT)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
- wiphy->retry_short);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
+ wiphy->retry_short);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
if (ret)
@@ -1866,23 +1687,35 @@ out:
int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
{
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_resp_init_fw *resp;
+ struct qlink_cmd_init_fw *cmd;
struct sk_buff *cmd_skb;
- int ret = 0;
+ size_t info_len = 0;
+ int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_FW_INIT,
- sizeof(struct qlink_cmd));
+ sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
+ cmd = (struct qlink_cmd_init_fw *)cmd_skb->data;
+ cmd->qlink_proto_ver = cpu_to_le32(QLINK_PROTO_VER);
+
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb);
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), &info_len);
+ qtnf_bus_unlock(bus);
+
if (ret)
goto out;
-out:
- qtnf_bus_unlock(bus);
+ resp = (struct qlink_resp_init_fw *)resp_skb->data;
+ bus->hw_info.ql_proto_ver = le32_to_cpu(resp->qlink_proto_ver);
+out:
+ consume_skb(resp_skb);
return ret;
}
@@ -2178,108 +2011,90 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
}
-static void qtnf_cmd_scan_set_dwell(struct qtnf_wmac *mac,
- struct sk_buff *cmd_skb)
+int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct cfg80211_scan_request *scan_req = mac->scan_req;
- u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
u16 dwell_passive = QTNF_SCAN_DWELL_PASSIVE_DEFAULT;
- u16 duration = QTNF_SCAN_SAMPLE_DURATION_DEFAULT;
-
- if (scan_req->duration) {
- dwell_active = scan_req->duration;
- dwell_passive = scan_req->duration;
- }
-
- pr_debug("MAC%u: %s scan dwell active=%u, passive=%u, duration=%u\n",
- mac->macid,
- scan_req->duration_mandatory ? "mandatory" : "max",
- dwell_active, dwell_passive, duration);
-
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_DWELL_ACTIVE,
- dwell_active);
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_DWELL_PASSIVE,
- dwell_passive);
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_SAMPLE_DURATION,
- duration);
-}
-
-int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
-{
- struct sk_buff *cmd_skb;
+ u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
+ struct wireless_dev *wdev = scan_req->wdev;
struct ieee80211_channel *sc;
- struct cfg80211_scan_request *scan_req = mac->scan_req;
- int n_channels;
- int count = 0;
+ struct qlink_cmd_scan *cmd;
+ struct sk_buff *cmd_skb;
+ int n_channels = 0;
+ u64 flags = 0;
+ int count;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
- sizeof(struct qlink_cmd));
+ sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
+ cmd = (struct qlink_cmd_scan *)cmd_skb->data;
- if (scan_req->n_ssids != 0) {
- while (count < scan_req->n_ssids) {
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID,
- scan_req->ssids[count].ssid,
- scan_req->ssids[count].ssid_len);
- count++;
- }
+ if (scan_req->duration) {
+ dwell_active = scan_req->duration;
+ dwell_passive = scan_req->duration;
+ } else if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ wdev->current_bss) {
+ /* let device select dwell based on traffic conditions */
+ dwell_active = QTNF_SCAN_TIME_AUTO;
+ dwell_passive = QTNF_SCAN_TIME_AUTO;
+ }
+
+ cmd->n_ssids = cpu_to_le16(scan_req->n_ssids);
+ for (count = 0; count < scan_req->n_ssids; ++count) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID,
+ scan_req->ssids[count].ssid,
+ scan_req->ssids[count].ssid_len);
}
if (scan_req->ie_len != 0)
qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_PROBE_REQ,
scan_req->ie, scan_req->ie_len);
- if (scan_req->n_channels) {
- n_channels = scan_req->n_channels;
- count = 0;
-
- while (n_channels != 0) {
- sc = scan_req->channels[count];
- if (sc->flags & IEEE80211_CHAN_DISABLED) {
- n_channels--;
- continue;
- }
+ for (count = 0; count < scan_req->n_channels; ++count) {
+ sc = scan_req->channels[count];
+ if (sc->flags & IEEE80211_CHAN_DISABLED)
+ continue;
- pr_debug("MAC%u: scan chan=%d, freq=%d, flags=%#x\n",
- mac->macid, sc->hw_value, sc->center_freq,
- sc->flags);
+ pr_debug("[MAC%u] scan chan=%d, freq=%d, flags=%#x\n",
+ mac->macid, sc->hw_value, sc->center_freq,
+ sc->flags);
- qtnf_cmd_channel_tlv_add(cmd_skb, sc);
- n_channels--;
- count++;
- }
+ qtnf_cmd_channel_tlv_add(cmd_skb, sc);
+ ++n_channels;
}
- qtnf_cmd_scan_set_dwell(mac, cmd_skb);
+ if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH)
+ flags |= QLINK_SCAN_FLAG_FLUSH;
+
+ if (scan_req->duration_mandatory)
+ flags |= QLINK_SCAN_FLAG_DURATION_MANDATORY;
+
+ cmd->n_channels = cpu_to_le16(n_channels);
+ cmd->active_dwell = cpu_to_le16(dwell_active);
+ cmd->passive_dwell = cpu_to_le16(dwell_passive);
+ cmd->sample_duration = cpu_to_le16(QTNF_SCAN_SAMPLE_DURATION_DEFAULT);
+ cmd->flags = cpu_to_le64(flags);
+
+ pr_debug("[MAC%u] %s scan dwell active=%u passive=%u duration=%u\n",
+ mac->macid,
+ scan_req->duration_mandatory ? "mandatory" : "max",
+ dwell_active, dwell_passive,
+ QTNF_SCAN_SAMPLE_DURATION_DEFAULT);
if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
+ pr_debug("[MAC%u] scan with random addr=%pM, mask=%pM\n",
mac->macid,
scan_req->mac_addr, scan_req->mac_addr_mask);
-
qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr,
scan_req->mac_addr_mask);
}
- if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) {
- pr_debug("MAC%u: flush cache before scan\n", mac->macid);
-
- qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
- }
-
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(mac->bus);
return ret;
@@ -2396,7 +2211,7 @@ int qtnf_cmd_send_external_auth(struct qtnf_vif *vif,
cmd = (struct qlink_cmd_external_auth *)cmd_skb->data;
- ether_addr_copy(cmd->bssid, auth->bssid);
+ ether_addr_copy(cmd->peer, auth->bssid);
cmd->status = cpu_to_le16(auth->status);
qtnf_bus_lock(vif->mac->bus);
@@ -2552,8 +2367,91 @@ int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
return ret;
}
-int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
- struct qtnf_chan_stats *stats)
+static int
+qtnf_cmd_resp_proc_chan_stat_info(struct survey_info *survey,
+ const u8 *payload, size_t payload_len)
+{
+ const struct qlink_chan_stats *stats = NULL;
+ const struct qlink_tlv_hdr *tlv;
+ u16 tlv_value_len;
+ u16 tlv_type;
+ const u8 *map = NULL;
+ unsigned int map_len = 0;
+ unsigned int stats_len = 0;
+
+ qlink_for_each_tlv(tlv, payload, payload_len) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+
+ switch (tlv_type) {
+ case QTN_TLV_ID_BITMAP:
+ map = tlv->val;
+ map_len = tlv_value_len;
+ break;
+ case QTN_TLV_ID_CHANNEL_STATS:
+ stats = (struct qlink_chan_stats *)tlv->val;
+ stats_len = tlv_value_len;
+ break;
+ default:
+ pr_info("Unknown TLV type: %#x\n", tlv_type);
+ break;
+ }
+ }
+
+ if (!qlink_tlv_parsing_ok(tlv, payload, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
+ return -EINVAL;
+ }
+
+ if (!map || !stats)
+ return 0;
+
+#define qtnf_chan_stat_avail(stat_name, bitn) \
+ (qtnf_utils_is_bit_set(map, bitn, map_len) && \
+ (offsetofend(struct qlink_chan_stats, stat_name) <= stats_len))
+
+ if (qtnf_chan_stat_avail(time_on, QLINK_CHAN_STAT_TIME_ON)) {
+ survey->filled |= SURVEY_INFO_TIME;
+ survey->time = le64_to_cpu(stats->time_on);
+ }
+
+ if (qtnf_chan_stat_avail(time_tx, QLINK_CHAN_STAT_TIME_TX)) {
+ survey->filled |= SURVEY_INFO_TIME_TX;
+ survey->time_tx = le64_to_cpu(stats->time_tx);
+ }
+
+ if (qtnf_chan_stat_avail(time_rx, QLINK_CHAN_STAT_TIME_RX)) {
+ survey->filled |= SURVEY_INFO_TIME_RX;
+ survey->time_rx = le64_to_cpu(stats->time_rx);
+ }
+
+ if (qtnf_chan_stat_avail(cca_busy, QLINK_CHAN_STAT_CCA_BUSY)) {
+ survey->filled |= SURVEY_INFO_TIME_BUSY;
+ survey->time_busy = le64_to_cpu(stats->cca_busy);
+ }
+
+ if (qtnf_chan_stat_avail(cca_busy_ext, QLINK_CHAN_STAT_CCA_BUSY_EXT)) {
+ survey->filled |= SURVEY_INFO_TIME_EXT_BUSY;
+ survey->time_ext_busy = le64_to_cpu(stats->cca_busy_ext);
+ }
+
+ if (qtnf_chan_stat_avail(time_scan, QLINK_CHAN_STAT_TIME_SCAN)) {
+ survey->filled |= SURVEY_INFO_TIME_SCAN;
+ survey->time_scan = le64_to_cpu(stats->time_scan);
+ }
+
+ if (qtnf_chan_stat_avail(chan_noise, QLINK_CHAN_STAT_CHAN_NOISE)) {
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+ survey->noise = stats->chan_noise;
+ }
+
+#undef qtnf_chan_stat_avail
+
+ return 0;
+}
+
+int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u32 chan_freq,
+ struct survey_info *survey)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_get_chan_stats *cmd;
@@ -2567,22 +2465,30 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
-
cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data;
- cmd->channel = cpu_to_le16(channel);
+ cmd->channel_freq = cpu_to_le32(chan_freq);
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &var_data_len);
+ qtnf_bus_unlock(mac->bus);
+
if (ret)
goto out;
resp = (struct qlink_resp_get_chan_stats *)resp_skb->data;
- ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info,
+
+ if (le32_to_cpu(resp->chan_freq) != chan_freq) {
+ pr_err("[MAC%u] channel stats freq %u != requested %u\n",
+ mac->macid, le32_to_cpu(resp->chan_freq), chan_freq);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = qtnf_cmd_resp_proc_chan_stat_info(survey, resp->info,
var_data_len);
out:
- qtnf_bus_unlock(mac->bus);
consume_skb(resp_skb);
return ret;
@@ -2595,6 +2501,7 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct qlink_cmd_chan_switch *cmd;
struct sk_buff *cmd_skb;
int ret;
+ u64 flags = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
QLINK_CMD_CHAN_SWITCH,
@@ -2602,19 +2509,19 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
+ if (params->radar_required)
+ flags |= QLINK_CHAN_SW_RADAR_REQUIRED;
+
+ if (params->block_tx)
+ flags |= QLINK_CHAN_SW_BLOCK_TX;
cmd = (struct qlink_cmd_chan_switch *)cmd_skb->data;
- cmd->channel = cpu_to_le16(params->chandef.chan->hw_value);
- cmd->radar_required = params->radar_required;
- cmd->block_tx = params->block_tx;
+ qlink_chandef_cfg2q(&params->chandef, &cmd->channel);
+ cmd->flags = cpu_to_le64(flags);
cmd->beacon_count = params->count;
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(mac->bus);
return ret;
@@ -2695,7 +2602,7 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
if (!cmd_skb)
return -ENOMEM;
- tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ tlv = skb_put(cmd_skb, sizeof(*tlv) + round_up(acl_size, QLINK_ALIGN));
tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
tlv->len = cpu_to_le16(acl_size);
qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
@@ -2884,3 +2791,39 @@ int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain)
return ret;
}
+
+int qtnf_cmd_send_update_owe(struct qtnf_vif *vif,
+ struct cfg80211_update_owe_info *owe)
+{
+ struct qlink_cmd_update_owe *cmd;
+ struct sk_buff *cmd_skb;
+ int ret;
+
+ if (sizeof(*cmd) + owe->ie_len > QTNF_MAX_CMD_BUF_SIZE) {
+ pr_warn("VIF%u.%u: OWE update IEs too big: %zu\n",
+ vif->mac->macid, vif->vifid, owe->ie_len);
+ return -E2BIG;
+ }
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_UPDATE_OWE,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_update_owe *)cmd_skb->data;
+ ether_addr_copy(cmd->peer, owe->peer);
+ cmd->status = cpu_to_le16(owe->status);
+ if (owe->ie_len && owe->ie)
+ qtnf_cmd_skb_put_buffer(cmd_skb, owe->ie, owe->ie_len);
+
+ qtnf_bus_lock(vif->mac->bus);
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
+ goto out;
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index ab273257b078..72ad6ae5c750 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -59,8 +59,8 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
bool slave_radar, bool dfs_offload);
-int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
- struct qtnf_chan_stats *stats);
+int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u32 chan_freq,
+ struct survey_info *survey);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct cfg80211_csa_settings *params);
int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
@@ -76,5 +76,7 @@ int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl);
int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain);
+int qtnf_cmd_send_update_owe(struct qtnf_vif *vif,
+ struct cfg80211_update_owe_info *owe);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 4320180f8c07..eea777f8acea 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/if_ether.h>
+#include <linux/nospec.h>
#include "core.h"
#include "bus.h"
@@ -41,11 +42,12 @@ struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
- if (unlikely(macid >= QTNF_MAX_MAC)) {
+ if (macid >= QTNF_MAX_MAC) {
pr_err("invalid MAC index %u\n", macid);
return NULL;
}
+ macid = array_index_nospec(macid, QTNF_MAX_MAC);
mac = bus->mac[macid];
if (unlikely(!mac)) {
@@ -429,18 +431,28 @@ static void qtnf_vif_send_data_high_pri(struct work_struct *work)
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
+ struct platform_device *pdev = NULL;
+ struct qtnf_wmac *mac;
struct qtnf_vif *vif;
struct wiphy *wiphy;
- struct qtnf_wmac *mac;
unsigned int i;
- wiphy = qtnf_wiphy_allocate(bus);
+ if (bus->hw_info.num_mac > 1) {
+ pdev = platform_device_register_data(bus->dev,
+ dev_name(bus->dev),
+ macid, NULL, 0);
+ if (IS_ERR(pdev))
+ return ERR_PTR(-EINVAL);
+ }
+
+ wiphy = qtnf_wiphy_allocate(bus, pdev);
if (!wiphy)
return ERR_PTR(-ENOMEM);
mac = wiphy_priv(wiphy);
mac->macid = macid;
+ mac->pdev = pdev;
mac->bus = bus;
mutex_init(&mac->mac_lock);
INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
@@ -491,19 +503,18 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = &vif->wdev;
ether_addr_copy(dev->dev_addr, vif->mac_addr);
- SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
dev->tx_queue_len = 100;
dev->ethtool_ops = &qtnf_ethtool_ops;
- if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)
+ if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE))
dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info);
qdev_vif = netdev_priv(dev);
*((void **)qdev_vif) = vif;
- SET_NETDEV_DEV(dev, mac->bus->dev);
+ SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
ret = register_netdevice(dev);
if (ret) {
@@ -559,6 +570,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
wiphy->bands[band] = NULL;
}
+ platform_device_unregister(mac->pdev);
qtnf_mac_iface_comb_free(mac);
qtnf_mac_ext_caps_free(mac);
kfree(mac->macinfo.wowlan);
@@ -585,16 +597,6 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
return PTR_ERR(mac);
}
- ret = qtnf_cmd_get_mac_info(mac);
- if (ret) {
- pr_err("MAC%u: failed to get info\n", macid);
- goto error;
- }
-
- /* Use MAC address of the first active radio as a unique device ID */
- if (is_zero_ether_addr(mac->bus->hw_id))
- ether_addr_copy(mac->bus->hw_id, mac->macaddr);
-
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not ready\n", macid);
@@ -609,12 +611,16 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
- ret = qtnf_cmd_send_get_phy_params(mac);
+ ret = qtnf_cmd_get_mac_info(mac);
if (ret) {
- pr_err("MAC%u: failed to get PHY settings\n", macid);
+ pr_err("MAC%u: failed to get MAC info\n", macid);
goto error_del_vif;
}
+ /* Use MAC address of the first active radio as a unique device ID */
+ if (is_zero_ether_addr(mac->bus->hw_id))
+ ether_addr_copy(mac->bus->hw_id, mac->macaddr);
+
ret = qtnf_mac_init_bands(mac);
if (ret) {
pr_err("MAC%u: failed to init bands\n", macid);
@@ -639,7 +645,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error_del_vif;
}
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ if (qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret)
goto error;
@@ -705,7 +711,8 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
info->linking ? "add" : "del");
if (IS_ENABLED(CONFIG_NET_SWITCHDEV) &&
- (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)) {
+ qtnf_hwcap_is_set(&bus->hw_info,
+ QLINK_HW_CAPAB_HW_BRIDGE)) {
if (info->linking)
br_domain = brdev->ifindex;
else
@@ -756,6 +763,15 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
+ if (QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver) !=
+ QLINK_PROTO_VER_MAJOR) {
+ pr_err("qlink driver vs FW version mismatch: %u vs %u\n",
+ QLINK_PROTO_VER_MAJOR,
+ QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver));
+ ret = -EPROTONOSUPPORT;
+ goto error;
+ }
+
bus->fw_state = QTNF_FW_STATE_ACTIVE;
ret = qtnf_cmd_get_hw_info(bus);
if (ret) {
@@ -763,14 +779,7 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
- if (bus->hw_info.ql_proto_ver != QLINK_PROTO_VER) {
- pr_err("qlink version mismatch %u != %u\n",
- QLINK_PROTO_VER, bus->hw_info.ql_proto_ver);
- ret = -EPROTONOSUPPORT;
- goto error;
- }
-
- if ((bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) &&
+ if (qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE) &&
bus->bus_ops->data_tx_use_meta_set)
bus->bus_ops->data_tx_use_meta_set(bus, true);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index d715e1cd0006..269ce12cf8bf 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -20,9 +20,11 @@
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/platform_device.h>
#include "qlink.h"
#include "trans.h"
+#include "qlink_util.h"
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
@@ -83,7 +85,8 @@ struct qtnf_mac_info {
u8 sretry_limit;
u8 coverage_class;
u8 radar_detect_widths;
- u32 max_acl_mac_addrs;
+ u8 max_scan_ssids;
+ u16 max_acl_mac_addrs;
struct ieee80211_ht_cap ht_cap_mod_mask;
struct ieee80211_vht_cap vht_cap_mod_mask;
struct ieee80211_iface_combination *if_comb;
@@ -94,15 +97,6 @@ struct qtnf_mac_info {
struct wiphy_wowlan_support *wowlan;
};
-struct qtnf_chan_stats {
- u32 chan_num;
- u32 cca_tx;
- u32 cca_rx;
- u32 cca_busy;
- u32 cca_try;
- s8 chan_noise;
-};
-
struct qtnf_wmac {
u8 macid;
u8 wiphy_registered;
@@ -114,19 +108,19 @@ struct qtnf_wmac {
struct mutex mac_lock; /* lock during wmac speicific ops */
struct delayed_work scan_timeout;
struct ieee80211_regdomain *rd;
+ struct platform_device *pdev;
};
struct qtnf_hw_info {
- u16 ql_proto_ver;
+ u32 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
u32 fw_ver;
- u32 hw_capab;
u8 total_tx_chain;
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
- u8 max_scan_ssids;
+ u8 hw_capab[QLINK_HW_CAPAB_NUM / BITS_PER_BYTE + 1];
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
@@ -135,12 +129,12 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
bool qtnf_slave_radar_get(void);
bool qtnf_dfs_offload_get(void);
-struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
+struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus,
+ struct platform_device *pdev);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
void qtnf_main_work_queue(struct work_struct *work);
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
-int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
@@ -160,4 +154,11 @@ static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
return *((void **)netdev_priv(dev));
}
+static inline bool qtnf_hwcap_is_set(const struct qtnf_hw_info *info,
+ unsigned int bit)
+{
+ return qtnf_utils_is_bit_set(info->hw_capab, bit,
+ sizeof(info->hw_capab));
+}
+
#endif /* _QTN_FMAC_CORE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 51af93bdf06e..c775c177933b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include "cfg80211.h"
#include "core.h"
@@ -25,7 +26,6 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
int ret = 0;
@@ -58,23 +58,17 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
sinfo->generation = vif->generation;
payload_len = len - sizeof(*sta_assoc);
- tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
- while (payload_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, sta_assoc->ies, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len) {
- ret = -EINVAL;
- goto out;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set)) {
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr))) {
ret = -EINVAL;
goto out;
}
@@ -88,12 +82,10 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
sinfo->assoc_req_ies_len = ie_len;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len) {
+ if (!qlink_tlv_parsing_ok(tlv, sta_assoc->ies, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
ret = -EINVAL;
goto out;
}
@@ -153,7 +145,6 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
const u8 *rsp_ies = NULL;
size_t rsp_ies_len = 0;
@@ -235,24 +226,17 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
}
payload_len = len - sizeof(*join_info);
- tlv = (struct qlink_tlv_hdr *)join_info->ies;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, join_info->ies, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (payload_len < tlv_full_len) {
- pr_warn("invalid %u TLV\n", tlv_type);
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto done;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set)) {
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr))) {
pr_warn("invalid IE_SET TLV\n");
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto done;
@@ -275,15 +259,10 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
break;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len)
- pr_warn("VIF%u.%u: unexpected remaining payload: %zu\n",
- vif->mac->macid, vif->vifid, payload_len);
-
+ if (!qlink_tlv_parsing_ok(tlv, join_info->ies, payload_len))
+ pr_warn("Malformed TLV buffer\n");
done:
cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, rsp_ies,
rsp_ies_len, status, GFP_KERNEL);
@@ -368,7 +347,6 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
const u8 *ies = NULL;
size_t ies_len = 0;
@@ -387,21 +365,17 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
}
payload_len = len - sizeof(*sr);
- tlv = (struct qlink_tlv_hdr *)sr->payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, sr->payload, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len)
- return -EINVAL;
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set))
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr)))
return -EINVAL;
ie_set = (const struct qlink_tlv_ie_set *)tlv;
@@ -424,12 +398,9 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
ies_len = ie_len;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len)
+ if (!qlink_tlv_parsing_ok(tlv, sr->payload, payload_len))
return -EINVAL;
bss = cfg80211_inform_bss(wiphy, channel, frame_type,
@@ -607,9 +578,9 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
ether_addr_copy(auth.bssid, ev->bssid);
auth.action = ev->action;
- pr_info("%s: external auth bss=%pM action=%u akm=%u\n",
- vif->netdev->name, auth.bssid, auth.action,
- auth.key_mgmt_suite);
+ pr_debug("%s: external SAE processing: bss=%pM action=%u akm=%u\n",
+ vif->netdev->name, auth.bssid, auth.action,
+ auth.key_mgmt_suite);
ret = cfg80211_external_auth_request(vif->netdev, &auth, GFP_KERNEL);
if (ret)
@@ -654,6 +625,50 @@ qtnf_event_handle_mic_failure(struct qtnf_vif *vif,
return 0;
}
+static int
+qtnf_event_handle_update_owe(struct qtnf_vif *vif,
+ const struct qlink_event_update_owe *owe_ev,
+ u16 len)
+{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ struct cfg80211_update_owe_info owe_info = {};
+ const u16 ie_len = len - sizeof(*owe_ev);
+ u8 *ie;
+
+ if (len < sizeof(*owe_ev)) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_update_owe));
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_AP) {
+ pr_err("VIF%u.%u: UPDATE_OWE event when not in AP mode\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ ie = kzalloc(ie_len, GFP_KERNEL);
+ if (!ie)
+ return -ENOMEM;
+
+ memcpy(owe_info.peer, owe_ev->peer, ETH_ALEN);
+ memcpy(ie, owe_ev->ies, ie_len);
+ owe_info.ie_len = ie_len;
+ owe_info.ie = ie;
+
+ pr_info("%s: external OWE processing: peer=%pM\n",
+ vif->netdev->name, owe_ev->peer);
+
+ cfg80211_update_owe_info_event(vif->netdev, &owe_info, GFP_KERNEL);
+ kfree(ie);
+
+ return 0;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -662,18 +677,20 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
int ret = -1;
u16 event_id;
u16 event_len;
+ u8 vifid;
event = (const struct qlink_event *)event_skb->data;
event_id = le16_to_cpu(event->event_id);
event_len = le16_to_cpu(event->mhdr.len);
- if (likely(event->vifid < QTNF_MAX_INTF)) {
- vif = &mac->iflist[event->vifid];
- } else {
+ if (event->vifid >= QTNF_MAX_INTF) {
pr_err("invalid vif(%u)\n", event->vifid);
return -EINVAL;
}
+ vifid = array_index_nospec(event->vifid, QTNF_MAX_INTF);
+ vif = &mac->iflist[vifid];
+
switch (event_id) {
case QLINK_EVENT_STA_ASSOCIATED:
ret = qtnf_event_handle_sta_assoc(mac, vif, (const void *)event,
@@ -720,6 +737,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_mic_failure(vif, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_UPDATE_OWE:
+ ret = qtnf_event_handle_update_owe(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 8e0d8018208a..dbb241106d8a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -593,7 +593,7 @@ static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
priv->tx_bd_w_index = i;
tx_done:
- if (ret && skb) {
+ if (ret) {
pr_err_ratelimited("drop skb\n");
if (skb->dev)
skb->dev->stats.tx_dropped++;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index b2edb03819d1..4d22a54c034f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,20 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 16
+#define QLINK_PROTO_VER_MAJOR_M 0xFFFF
+#define QLINK_PROTO_VER_MAJOR_S 16
+#define QLINK_PROTO_VER_MINOR_M 0xFFFF
+#define QLINK_VER_MINOR(_ver) ((_ver) & QLINK_PROTO_VER_MINOR_M)
+#define QLINK_VER_MAJOR(_ver) \
+ (((_ver) >> QLINK_PROTO_VER_MAJOR_S) & QLINK_PROTO_VER_MAJOR_M)
+#define QLINK_VER(_maj, _min) (((_maj) << QLINK_PROTO_VER_MAJOR_S) | (_min))
+
+#define QLINK_PROTO_VER_MAJOR 18
+#define QLINK_PROTO_VER_MINOR 1
+#define QLINK_PROTO_VER \
+ QLINK_VER(QLINK_PROTO_VER_MAJOR, QLINK_PROTO_VER_MINOR)
+
+#define QLINK_ALIGN 4
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -62,15 +75,24 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_HW_BRIDGE: device has hardware switch capabilities.
*/
enum qlink_hw_capab {
- QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
- QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
- QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
- QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
- QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
- QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
- QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
- QLINK_HW_CAPAB_SAE = BIT(8),
- QLINK_HW_CAPAB_HW_BRIDGE = BIT(9),
+ QLINK_HW_CAPAB_REG_UPDATE = 0,
+ QLINK_HW_CAPAB_STA_INACT_TIMEOUT,
+ QLINK_HW_CAPAB_DFS_OFFLOAD,
+ QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR,
+ QLINK_HW_CAPAB_PWR_MGMT,
+ QLINK_HW_CAPAB_OBSS_SCAN,
+ QLINK_HW_CAPAB_SCAN_DWELL,
+ QLINK_HW_CAPAB_SAE,
+ QLINK_HW_CAPAB_HW_BRIDGE,
+ QLINK_HW_CAPAB_NUM
+};
+
+/**
+ * enum qlink_driver_capab - host driver capabilities.
+ *
+ */
+enum qlink_driver_capab {
+ QLINK_DRV_CAPAB_NUM = 0
};
enum qlink_iface_type {
@@ -164,7 +186,7 @@ struct qlink_chandef {
__le16 center_freq1;
__le16 center_freq2;
u8 width;
- u8 rsvd;
+ u8 rsvd[3];
} __packed;
#define QLINK_MAX_NR_CIPHER_SUITES 5
@@ -269,7 +291,6 @@ enum qlink_cmd_type {
QLINK_CMD_REGISTER_MGMT = 0x0003,
QLINK_CMD_SEND_FRAME = 0x0004,
QLINK_CMD_MGMT_SET_APPIE = 0x0005,
- QLINK_CMD_PHY_PARAMS_GET = 0x0011,
QLINK_CMD_PHY_PARAMS_SET = 0x0012,
QLINK_CMD_GET_HW_INFO = 0x0013,
QLINK_CMD_MAC_INFO = 0x0014,
@@ -301,6 +322,7 @@ enum qlink_cmd_type {
QLINK_CMD_WOWLAN_SET = 0x0063,
QLINK_CMD_EXTERNAL_AUTH = 0x0066,
QLINK_CMD_TXPWR = 0x0067,
+ QLINK_CMD_UPDATE_OWE = 0x0068,
};
/**
@@ -321,9 +343,26 @@ struct qlink_cmd {
struct qlink_msg_header mhdr;
__le16 cmd_id;
__le16 seq_num;
- u8 rsvd[2];
u8 macid;
u8 vifid;
+ u8 rsvd[2];
+} __packed;
+
+/**
+ * struct qlink_cmd_init_fw - data for QLINK_CMD_FW_INIT
+ *
+ * Initialize firmware based on specified host configuration. This is the first
+ * command sent to wifi card and it's fixed part should never be changed, any
+ * additions must be done by appending TLVs.
+ * If wifi card can not operate with a specified parameters it will return
+ * error.
+ *
+ * @qlink_proto_ver: QLINK protocol version used by host driver.
+ */
+struct qlink_cmd_init_fw {
+ struct qlink_cmd chdr;
+ __le32 qlink_proto_ver;
+ u8 var_info[0];
} __packed;
/**
@@ -368,6 +407,7 @@ struct qlink_cmd_mgmt_frame_register {
struct qlink_cmd chdr;
__le16 frame_type;
u8 do_register;
+ u8 rsvd[1];
} __packed;
/**
@@ -405,6 +445,7 @@ struct qlink_cmd_frame_tx {
struct qlink_cmd_get_sta_info {
struct qlink_cmd chdr;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
} __packed;
/**
@@ -424,6 +465,7 @@ struct qlink_cmd_add_key {
u8 addr[ETH_ALEN];
__le32 cipher;
__le16 vlanid;
+ u8 rsvd[2];
u8 key_data[0];
} __packed;
@@ -453,6 +495,7 @@ struct qlink_cmd_set_def_key {
u8 key_index;
u8 unicast;
u8 multicast;
+ u8 rsvd[1];
} __packed;
/**
@@ -463,6 +506,7 @@ struct qlink_cmd_set_def_key {
struct qlink_cmd_set_def_mgmt_key {
struct qlink_cmd chdr;
u8 key_index;
+ u8 rsvd[3];
} __packed;
/**
@@ -479,6 +523,7 @@ struct qlink_cmd_change_sta {
__le16 if_type;
__le16 vlanid;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
} __packed;
/**
@@ -489,8 +534,9 @@ struct qlink_cmd_change_sta {
struct qlink_cmd_del_sta {
struct qlink_cmd chdr;
__le16 reason_code;
- u8 subtype;
u8 sta_addr[ETH_ALEN];
+ u8 subtype;
+ u8 rsvd[3];
} __packed;
enum qlink_sta_connect_flags {
@@ -544,7 +590,7 @@ struct qlink_cmd_connect {
*/
struct qlink_cmd_external_auth {
struct qlink_cmd chdr;
- u8 bssid[ETH_ALEN];
+ u8 peer[ETH_ALEN];
__le16 status;
u8 payload[0];
} __packed;
@@ -557,6 +603,7 @@ struct qlink_cmd_external_auth {
struct qlink_cmd_disconnect {
struct qlink_cmd chdr;
__le16 reason;
+ u8 rsvd[2];
} __packed;
/**
@@ -568,6 +615,7 @@ struct qlink_cmd_disconnect {
struct qlink_cmd_updown {
struct qlink_cmd chdr;
u8 if_up;
+ u8 rsvd[3];
} __packed;
/**
@@ -591,16 +639,17 @@ enum qlink_band {
struct qlink_cmd_band_info_get {
struct qlink_cmd chdr;
u8 band;
+ u8 rsvd[3];
} __packed;
/**
* struct qlink_cmd_get_chan_stats - data for QLINK_CMD_CHAN_STATS command
*
- * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J
+ * @channel_freq: channel center frequency
*/
struct qlink_cmd_get_chan_stats {
struct qlink_cmd chdr;
- __le16 channel;
+ __le32 channel_freq;
} __packed;
/**
@@ -653,19 +702,33 @@ struct qlink_cmd_reg_notify {
} __packed;
/**
+ * enum qlink_chan_sw_flags - channel switch control flags
+ *
+ * @QLINK_CHAN_SW_RADAR_REQUIRED: whether radar detection is required on a new
+ * channel.
+ * @QLINK_CHAN_SW_BLOCK_TX: whether transmissions should be blocked while
+ * changing a channel.
+ */
+enum qlink_chan_sw_flags {
+ QLINK_CHAN_SW_RADAR_REQUIRED = BIT(0),
+ QLINK_CHAN_SW_BLOCK_TX = BIT(1),
+};
+
+/**
* struct qlink_cmd_chan_switch - data for QLINK_CMD_CHAN_SWITCH command
*
- * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J
- * @radar_required: whether radar detection is required on the new channel
- * @block_tx: whether transmissions should be blocked while changing
+ * @channel: channel to switch to.
+ * @flags: flags to control channel switch, bitmap of &enum qlink_chan_sw_flags.
* @beacon_count: number of beacons until switch
*/
struct qlink_cmd_chan_switch {
struct qlink_cmd chdr;
- __le16 channel;
- u8 radar_required;
- u8 block_tx;
+ struct qlink_chandef channel;
+ __le64 flags;
+ __le32 n_counter_offsets_beacon;
+ __le32 n_counter_offsets_presp;
u8 beacon_count;
+ u8 rsvd[3];
} __packed;
/**
@@ -769,6 +832,7 @@ struct qlink_cmd_pm_set {
struct qlink_cmd chdr;
__le32 pm_standby_timer;
u8 pm_mode;
+ u8 rsvd[3];
} __packed;
/**
@@ -857,6 +921,60 @@ struct qlink_cmd_ndev_changeupper {
u8 rsvd[1];
} __packed;
+/**
+ * enum qlink_scan_flags - scan request control flags
+ *
+ * Scan flags are used to control QLINK_CMD_SCAN behavior.
+ *
+ * @QLINK_SCAN_FLAG_FLUSH: flush cache before scanning.
+ */
+enum qlink_scan_flags {
+ QLINK_SCAN_FLAG_FLUSH = BIT(0),
+ QLINK_SCAN_FLAG_DURATION_MANDATORY = BIT(1),
+};
+
+/**
+ * struct qlink_cmd_scan - data for QLINK_CMD_SCAN command
+ *
+ * @flags: scan flags, a bitmap of &enum qlink_scan_flags.
+ * @n_ssids: number of WLAN_EID_SSID TLVs expected in variable portion of the
+ * command.
+ * @n_channels: number of QTN_TLV_ID_CHANNEL TLVs expected in variable payload.
+ * @active_dwell: time spent on a single channel for an active scan.
+ * @passive_dwell: time spent on a single channel for a passive scan.
+ * @sample_duration: total duration of sampling a single channel during a scan
+ * including off-channel dwell time and operating channel time.
+ * @bssid: specific BSSID to scan for or a broadcast BSSID.
+ * @scan_width: channel width to use, one of &enum qlink_channel_width.
+ */
+struct qlink_cmd_scan {
+ struct qlink_cmd chdr;
+ __le64 flags;
+ __le16 n_ssids;
+ __le16 n_channels;
+ __le16 active_dwell;
+ __le16 passive_dwell;
+ __le16 sample_duration;
+ u8 bssid[ETH_ALEN];
+ u8 scan_width;
+ u8 rsvd[3];
+ u8 var_info[0];
+} __packed;
+
+/**
+ * struct qlink_cmd_update_owe - data for QLINK_CMD_UPDATE_OWE_INFO command
+ *
+ * @peer: MAC of the peer device for which OWE processing has been completed
+ * @status: OWE external processing status code
+ * @ies: IEs for the peer constructed by the user space
+ */
+struct qlink_cmd_update_owe {
+ struct qlink_cmd chdr;
+ u8 peer[ETH_ALEN];
+ __le16 status;
+ u8 ies[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -896,6 +1014,16 @@ struct qlink_resp {
} __packed;
/**
+ * struct qlink_resp_init_fw - response for QLINK_CMD_FW_INIT
+ *
+ * @qlink_proto_ver: QLINK protocol version used by wifi card firmware.
+ */
+struct qlink_resp_init_fw {
+ struct qlink_resp rhdr;
+ __le32 qlink_proto_ver;
+} __packed;
+
+/**
* enum qlink_dfs_regions - regulatory DFS regions
*
* Corresponds to &enum nl80211_dfs_regions.
@@ -919,6 +1047,7 @@ enum qlink_dfs_regions {
* @num_rx_chain: Number of receive chains used by WMAC.
* @vht_cap_mod_mask: mask specifying which VHT capabilities can be altered.
* @ht_cap_mod_mask: mask specifying which HT capabilities can be altered.
+ * @max_scan_ssids: maximum number of SSIDs the device can scan for in any scan.
* @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
* @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
* @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
@@ -935,14 +1064,48 @@ struct qlink_resp_get_mac_info {
u8 num_rx_chain;
struct ieee80211_vht_cap vht_cap_mod_mask;
struct ieee80211_ht_cap ht_cap_mod_mask;
+
__le16 max_ap_assoc_sta;
+ __le32 hw_version;
+ __le32 probe_resp_offload;
+ __le32 bss_select_support;
+ __le16 n_addresses;
__le16 radar_detect_widths;
- __le32 max_acl_mac_addrs;
+ __le16 max_remain_on_channel_duration;
+ __le16 max_acl_mac_addrs;
+
+ __le32 frag_threshold;
+ __le32 rts_threshold;
+ u8 retry_short;
+ u8 retry_long;
+ u8 coverage_class;
+
+ u8 max_scan_ssids;
+ u8 max_sched_scan_reqs;
+ u8 max_sched_scan_ssids;
+ u8 max_match_sets;
+ u8 max_adj_channel_rssi_comp;
+
+ __le16 max_scan_ie_len;
+ __le16 max_sched_scan_ie_len;
+ __le32 max_sched_scan_plans;
+ __le32 max_sched_scan_plan_interval;
+ __le32 max_sched_scan_plan_iterations;
+
+ u8 n_cipher_suites;
+ u8 n_akm_suites;
+ u8 max_num_pmkids;
+ u8 num_iftype_ext_capab;
+ u8 extended_capabilities_len;
+ u8 max_data_retry_count;
+ u8 n_iface_combinations;
+ u8 max_num_csa_counters;
+
u8 bands_cap;
u8 alpha2[2];
u8 n_reg_rules;
u8 dfs_region;
- u8 rsvd[1];
+ u8 rsvd[3];
u8 var_info[0];
} __packed;
@@ -952,8 +1115,6 @@ struct qlink_resp_get_mac_info {
* Description of wireless hardware capabilities and features.
*
* @fw_ver: wireless hardware firmware version.
- * @hw_capab: Bitmap of capabilities supported by firmware.
- * @ql_proto_ver: Version of QLINK protocol used by firmware.
* @num_mac: Number of separate physical radio devices provided by hardware.
* @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware.
* @total_tx_chains: total number of transmit chains used by device.
@@ -963,11 +1124,9 @@ struct qlink_resp_get_mac_info {
struct qlink_resp_get_hw_info {
struct qlink_resp rhdr;
__le32 fw_ver;
- __le32 hw_capab;
__le32 bld_tmstamp;
__le32 plat_id;
__le32 hw_ver;
- __le16 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
u8 total_tx_chain;
@@ -1001,8 +1160,6 @@ enum qlink_sta_info_rate_flags {
*
* Response data containing statistics for specified STA.
*
- * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response
- * is valid.
* @sta_addr: MAC address of STA the response carries statistic for.
* @info: variable statistics for specified STA.
*/
@@ -1031,22 +1188,14 @@ struct qlink_resp_band_info_get {
} __packed;
/**
- * struct qlink_resp_phy_params - response for QLINK_CMD_PHY_PARAMS_GET command
- *
- * @info: variable-length array of PHY params.
- */
-struct qlink_resp_phy_params {
- struct qlink_resp rhdr;
- u8 info[0];
-} __packed;
-
-/**
* struct qlink_resp_get_chan_stats - response for QLINK_CMD_CHAN_STATS cmd
*
+ * @chan_freq: center frequency for a channel the report is sent for.
* @info: variable-length channel info.
*/
struct qlink_resp_get_chan_stats {
- struct qlink_cmd rhdr;
+ struct qlink_resp rhdr;
+ __le32 chan_freq;
u8 info[0];
} __packed;
@@ -1088,6 +1237,7 @@ enum qlink_event_type {
QLINK_EVENT_RADAR = 0x0029,
QLINK_EVENT_EXTERNAL_AUTH = 0x0030,
QLINK_EVENT_MIC_FAILURE = 0x0031,
+ QLINK_EVENT_UPDATE_OWE = 0x0032,
};
/**
@@ -1158,6 +1308,7 @@ struct qlink_event_bss_join {
struct qlink_event_bss_leave {
struct qlink_event ehdr;
__le16 reason;
+ u8 rsvd[2];
} __packed;
/**
@@ -1274,10 +1425,10 @@ struct qlink_event_radar {
*/
struct qlink_event_external_auth {
struct qlink_event ehdr;
+ __le32 akm_suite;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
u8 bssid[ETH_ALEN];
- __le32 akm_suite;
+ u8 ssid_len;
u8 action;
} __packed;
@@ -1295,26 +1446,36 @@ struct qlink_event_mic_failure {
u8 pairwise;
} __packed;
+/**
+ * struct qlink_event_update_owe - data for QLINK_EVENT_UPDATE_OWE event
+ *
+ * @peer: MAC addr of the peer device for which OWE processing needs to be done
+ * @ies: IEs from the peer
+ */
+struct qlink_event_update_owe {
+ struct qlink_event ehdr;
+ u8 peer[ETH_ALEN];
+ u8 rsvd[2];
+ u8 ies[0];
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
/**
* enum qlink_tlv_id - list of TLVs that Qlink messages can carry
*
- * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to
- * indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid.
+ * @QTN_TLV_ID_BITMAP: a data representing a bitmap that is used together with
+ * other TLVs:
+ * &enum qlink_sta_info used to indicate which statistic carried in
+ * QTN_TLV_ID_STA_STATS is valid.
+ * &enum qlink_hw_capab listing wireless card capabilities.
+ * &enum qlink_driver_capab listing driver/host system capabilities.
+ * &enum qlink_chan_stat used to indicate which statistic carried in
+ * QTN_TLV_ID_CHANNEL_STATS is valid.
* @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
* &struct qlink_sta_stats. Valid values are marked as such in a bitmap
- * carried by QTN_TLV_ID_STA_STATS_MAP.
- * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
- * for in any given scan.
- * @QTN_TLV_ID_SCAN_DWELL_ACTIVE: time spent on a single channel for an active
- * scan.
- * @QTN_TLV_ID_SCAN_DWELL_PASSIVE: time spent on a single channel for a passive
- * scan.
- * @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
- * during a scan including off-channel dwell time and operating channel
- * time.
+ * carried by QTN_TLV_ID_BITMAP.
* @QTN_TLV_ID_IFTYPE_DATA: supported band data.
*/
enum qlink_tlv_id {
@@ -1325,11 +1486,10 @@ enum qlink_tlv_id {
QTN_TLV_ID_REG_RULE = 0x0207,
QTN_TLV_ID_CHANNEL = 0x020F,
QTN_TLV_ID_CHANDEF = 0x0210,
- QTN_TLV_ID_STA_STATS_MAP = 0x0211,
+ QTN_TLV_ID_BITMAP = 0x0211,
QTN_TLV_ID_STA_STATS = 0x0212,
QTN_TLV_ID_COVERAGE_CLASS = 0x0213,
QTN_TLV_ID_IFACE_LIMIT = 0x0214,
- QTN_TLV_ID_NUM_IFACE_COMB = 0x0215,
QTN_TLV_ID_CHANNEL_STATS = 0x0216,
QTN_TLV_ID_KEY = 0x0302,
QTN_TLV_ID_SEQ = 0x0303,
@@ -1344,13 +1504,8 @@ enum qlink_tlv_id {
QTN_TLV_ID_CALIBRATION_VER = 0x0406,
QTN_TLV_ID_UBOOT_VER = 0x0407,
QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
- QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
- QTN_TLV_ID_SCAN_FLUSH = 0x0412,
- QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
- QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
- QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
QTN_TLV_ID_IFTYPE_DATA = 0x0418,
};
@@ -1360,10 +1515,6 @@ struct qlink_tlv_hdr {
u8 val[0];
} __packed;
-struct qlink_iface_comb_num {
- __le32 iface_comb_num;
-} __packed;
-
struct qlink_iface_limit {
__le16 max_num;
__le16 type;
@@ -1378,21 +1529,6 @@ struct qlink_iface_limit_record {
#define QLINK_RSSI_OFFSET 120
-struct qlink_tlv_frag_rts_thr {
- struct qlink_tlv_hdr hdr;
- __le32 thr;
-} __packed;
-
-struct qlink_tlv_rlimit {
- struct qlink_tlv_hdr hdr;
- u8 rlimit;
-} __packed;
-
-struct qlink_tlv_cclass {
- struct qlink_tlv_hdr hdr;
- u8 cclass;
-} __packed;
-
/**
* enum qlink_reg_rule_flags - regulatory rule flags
*
@@ -1510,6 +1646,7 @@ struct qlink_tlv_ie_set {
struct qlink_tlv_hdr hdr;
u8 type;
u8 flags;
+ u8 rsvd[2];
u8 ie_data[0];
} __packed;
@@ -1522,6 +1659,7 @@ struct qlink_tlv_ie_set {
struct qlink_tlv_ext_ie {
struct qlink_tlv_hdr hdr;
u8 eid_ext;
+ u8 rsvd[3];
u8 ie_data[0];
} __packed;
@@ -1546,13 +1684,57 @@ struct qlink_tlv_iftype_data {
struct qlink_sband_iftype_data iftype_data[0];
} __packed;
+/**
+ * enum qlink_chan_stat - channel statistics bitmap
+ *
+ * Used to indicate which statistics values in &struct qlink_chan_stats
+ * are valid. Individual values are used to fill a bitmap carried in a
+ * payload of QTN_TLV_ID_BITMAP.
+ *
+ * @QLINK_CHAN_STAT_TIME_ON: time_on value is valid.
+ * @QLINK_CHAN_STAT_TIME_TX: time_tx value is valid.
+ * @QLINK_CHAN_STAT_TIME_RX: time_rx value is valid.
+ * @QLINK_CHAN_STAT_CCA_BUSY: cca_busy value is valid.
+ * @QLINK_CHAN_STAT_CCA_BUSY_EXT: cca_busy_ext value is valid.
+ * @QLINK_CHAN_STAT_TIME_SCAN: time_scan value is valid.
+ * @QLINK_CHAN_STAT_CHAN_NOISE: chan_noise value is valid.
+ */
+enum qlink_chan_stat {
+ QLINK_CHAN_STAT_TIME_ON,
+ QLINK_CHAN_STAT_TIME_TX,
+ QLINK_CHAN_STAT_TIME_RX,
+ QLINK_CHAN_STAT_CCA_BUSY,
+ QLINK_CHAN_STAT_CCA_BUSY_EXT,
+ QLINK_CHAN_STAT_TIME_SCAN,
+ QLINK_CHAN_STAT_CHAN_NOISE,
+ QLINK_CHAN_STAT_NUM,
+};
+
+/**
+ * struct qlink_chan_stats - data for QTN_TLV_ID_CHANNEL_STATS
+ *
+ * Carries a per-channel statistics. Not all fields may be filled with
+ * valid values. Valid fields should be indicated as such using a bitmap of
+ * &enum qlink_chan_stat. Bitmap is carried separately in a payload of
+ * QTN_TLV_ID_BITMAP.
+ *
+ * @time_on: amount of time radio operated on that channel.
+ * @time_tx: amount of time radio spent transmitting on the channel.
+ * @time_rx: amount of time radio spent receiving on the channel.
+ * @cca_busy: amount of time the the primary channel was busy.
+ * @cca_busy_ext: amount of time the the secondary channel was busy.
+ * @time_scan: amount of radio spent scanning on the channel.
+ * @chan_noise: channel noise.
+ */
struct qlink_chan_stats {
- __le32 chan_num;
- __le32 cca_tx;
- __le32 cca_rx;
- __le32 cca_busy;
- __le32 cca_try;
+ __le64 time_on;
+ __le64 time_tx;
+ __le64 time_rx;
+ __le64 cca_busy;
+ __le64 cca_busy_ext;
+ __le64 time_scan;
s8 chan_noise;
+ u8 rsvd[3];
} __packed;
/**
@@ -1560,7 +1742,7 @@ struct qlink_chan_stats {
*
* Used to indicate which statistics values in &struct qlink_sta_stats
* are valid. Individual values are used to fill a bitmap carried in a
- * payload of QTN_TLV_ID_STA_STATS_MAP.
+ * payload of QTN_TLV_ID_BITMAP.
*
* @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid.
* @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid.
@@ -1624,7 +1806,7 @@ struct qlink_sta_info_rate {
* Carries statistics of a STA. Not all fields may be filled with
* valid values. Valid fields should be indicated as such using a bitmap of
* &enum qlink_sta_info. Bitmap is carried separately in a payload of
- * QTN_TLV_ID_STA_STATS_MAP.
+ * QTN_TLV_ID_BITMAP.
*/
struct qlink_sta_stats {
__le64 rx_bytes;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index 1a972bce7b8b..30b60d6ae546 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -124,6 +124,8 @@ void qlink_chandef_q2cfg(struct wiphy *wiphy,
chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
chdef->width = qlink_chanwidth_to_nl(qch->width);
+ chdef->edmg.bw_config = 0;
+ chdef->edmg.channels = 0;
}
void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index f873beed2ae7..230a10a41c7a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -20,42 +20,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
u16 tlv_id, const u8 arr[],
size_t arr_len)
{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + arr_len);
+ struct qlink_tlv_hdr *hdr;
+ hdr = skb_put(skb, sizeof(*hdr) + round_up(arr_len, QLINK_ALIGN));
hdr->type = cpu_to_le16(tlv_id);
hdr->len = cpu_to_le16(arr_len);
memcpy(hdr->val, arr, arr_len);
}
-static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr));
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(0);
-}
-
-static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
- u8 value)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(sizeof(value));
- *hdr->val = value;
-}
-
-static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
- u16 tlv_id, u16 value)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
- __le16 tmp = cpu_to_le16(value);
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(sizeof(value));
- memcpy(hdr->val, &tmp, sizeof(tmp));
-}
-
static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb,
u16 tlv_id, u32 value)
{
@@ -85,4 +57,17 @@ u32 qlink_utils_chflags_cfg2q(u32 cfgflags);
void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
const struct qlink_tlv_reg_rule *tlv_rule);
+#define qlink_for_each_tlv(_tlv, _start, _datalen) \
+ for (_tlv = (const struct qlink_tlv_hdr *)(_start); \
+ (const u8 *)(_start) + (_datalen) - (const u8 *)_tlv >= \
+ (int)sizeof(*_tlv) && \
+ (const u8 *)(_start) + (_datalen) - (const u8 *)_tlv >= \
+ (int)sizeof(*_tlv) + le16_to_cpu(_tlv->len); \
+ _tlv = (const struct qlink_tlv_hdr *)(_tlv->val + \
+ round_up(le16_to_cpu(_tlv->len), QLINK_ALIGN)))
+
+#define qlink_tlv_parsing_ok(_tlv_last, _start, _datalen) \
+ ((const u8 *)(_tlv_last) == \
+ (const u8 *)(_start) + round_up(_datalen, QLINK_ALIGN))
+
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index 668444f6bf07..2b0f332043d7 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -570,7 +570,7 @@ struct phy_header {
};
struct ray_rx_msg {
struct mac_header mac;
- UCHAR var[0];
+ UCHAR var[];
};
struct tx_msg {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 6598c8d786ea..d6d1be4169e5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -627,7 +627,7 @@ struct rtl8xxxu_firmware_header {
u32 reserved4;
u32 reserved5;
- u8 data[0];
+ u8 data[];
};
/*
@@ -1133,6 +1133,15 @@ enum bt_mp_oper_opcode_8723b {
BT_MP_OP_ENABLE_CFO_TRACKING = 0x24,
};
+enum rtl8xxxu_bw_mode {
+ RTL8XXXU_CHANNEL_WIDTH_20 = 0,
+ RTL8XXXU_CHANNEL_WIDTH_40 = 1,
+ RTL8XXXU_CHANNEL_WIDTH_80 = 2,
+ RTL8XXXU_CHANNEL_WIDTH_160 = 3,
+ RTL8XXXU_CHANNEL_WIDTH_80_80 = 4,
+ RTL8XXXU_CHANNEL_WIDTH_MAX = 5,
+};
+
struct rtl8723bu_c2h {
u8 id;
u8 seq;
@@ -1174,13 +1183,16 @@ struct rtl8723bu_c2h {
} __packed bt_info;
struct {
u8 rate:7;
- u8 dummy0_0:1;
+ u8 sgi:1;
u8 macid;
u8 ldpc:1;
u8 txbf:1;
u8 noisy_state:1;
u8 dummy2_0:5;
u8 dummy3_0;
+ u8 dummy4_0;
+ u8 dummy5_0;
+ u8 bw;
} __packed ra_report;
};
};
@@ -1260,6 +1272,12 @@ struct rtl8xxxu_btcoex {
#define RTL8XXXU_SNR_THRESH_HIGH 50
#define RTL8XXXU_SNR_THRESH_LOW 20
+struct rtl8xxxu_ra_report {
+ struct rate_info txrate;
+ u32 bit_rate;
+ u8 desc_rate;
+};
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1375,6 +1393,7 @@ struct rtl8xxxu_priv {
struct sk_buff_head c2hcmd_queue;
spinlock_t c2hcmd_lock;
struct rtl8xxxu_btcoex bt_coex;
+ struct rtl8xxxu_ra_report ra_report;
};
struct rtl8xxxu_rx_urb {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 54a1a4ea107b..19efae462a24 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4328,7 +4328,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
- u8 bw = 0;
+ u8 bw = RTL8XXXU_CHANNEL_WIDTH_20;
memset(&h2c, 0, sizeof(struct h2c_cmd));
@@ -4816,8 +4816,8 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
rate = tx_rate->hw_value;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
- dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
- __func__, rate, cpu_to_le16(tx_desc->pkt_size));
+ dev_info(dev, "%s: TX rate: %d, pkt size %u\n",
+ __func__, rate, le16_to_cpu(tx_desc->pkt_size));
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
@@ -4889,8 +4889,8 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
rate = tx_rate->hw_value;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
- dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
- __func__, rate, cpu_to_le16(tx_desc40->pkt_size));
+ dev_info(dev, "%s: TX rate: %d, pkt size %u\n",
+ __func__, rate, le16_to_cpu(tx_desc40->pkt_size));
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
@@ -5389,6 +5389,35 @@ void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
}
}
+static struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+{
+ if (rate <= DESC_RATE_54M)
+ return;
+
+ if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
+ if (rate < DESC_RATE_MCS8)
+ *nss = 1;
+ else
+ *nss = 2;
+ *mcs = rate - DESC_RATE_MCS0;
+ }
+}
+
static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv;
@@ -5397,9 +5426,14 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
unsigned long flags;
u8 bt_info = 0;
struct rtl8xxxu_btcoex *btcoex;
+ struct rtl8xxxu_ra_report *rarpt;
+ u8 rate, sgi, bw;
+ u32 bit_rate;
+ u8 mcs = 0, nss = 0;
priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
btcoex = &priv->bt_coex;
+ rarpt = &priv->ra_report;
if (priv->rf_paths > 1)
goto out;
@@ -5422,6 +5456,34 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
}
rtl8723bu_handle_bt_info(priv);
break;
+ case C2H_8723B_RA_REPORT:
+ rarpt->txrate.flags = 0;
+ rate = c2h->ra_report.rate;
+ sgi = c2h->ra_report.sgi;
+ bw = c2h->ra_report.bw;
+
+ if (rate < DESC_RATE_MCS0) {
+ rarpt->txrate.legacy =
+ rtl8xxxu_legacy_ratetable[rate].bitrate;
+ } else {
+ rtl8xxxu_desc_to_mcsrate(rate, &mcs, &nss);
+ rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS;
+
+ rarpt->txrate.mcs = mcs;
+ rarpt->txrate.nss = nss;
+
+ if (sgi) {
+ rarpt->txrate.flags |=
+ RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+ if (bw == RATE_INFO_BW_20)
+ rarpt->txrate.bw |= RATE_INFO_BW_20;
+ }
+ bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ rarpt->bit_rate = bit_rate;
+ rarpt->desc_rate = rate;
+ break;
default:
break;
}
@@ -5465,7 +5527,7 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
case C2H_8723B_RA_REPORT:
dev_dbg(dev,
"C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n",
- c2h->ra_report.rate, c2h->ra_report.dummy0_0,
+ c2h->ra_report.rate, c2h->ra_report.sgi,
c2h->ra_report.macid, c2h->ra_report.noisy_state);
break;
default:
@@ -6069,6 +6131,16 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
+static void
+rtl8xxxu_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct station_info *sinfo)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ sinfo->txrate = priv->ra_report.txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
static u8 rtl8xxxu_signal_to_snr(int signal)
{
if (signal < RTL8XXXU_NOISE_FLOOR_MIN)
@@ -6371,6 +6443,7 @@ static const struct ieee80211_ops rtl8xxxu_ops = {
.sw_scan_complete = rtl8xxxu_sw_scan_complete,
.set_key = rtl8xxxu_set_key,
.ampdu_action = rtl8xxxu_ampdu_action,
+ .sta_statistics = rtl8xxxu_sta_statistics,
};
static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 0c7d74902d33..4b5ea0ec9109 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -261,7 +261,7 @@ static void rtl_rate_update(void *ppriv,
{
}
-static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+static void *rtl_rate_alloc(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
return rtlpriv;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 1cff9f07c9e9..13421cf2d201 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1051,13 +1051,13 @@ struct rtl_hdr_3addr {
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtl_info_element {
u8 id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct rtl_probe_rsp {
@@ -1068,7 +1068,7 @@ struct rtl_probe_rsp {
/*SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN
*/
- struct rtl_info_element info_element[0];
+ struct rtl_info_element info_element[];
} __packed;
/*LED related.*/
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index fda771d23f71..b6d1d71f4d30 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -41,7 +41,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta_vht_cap *ic_vht_cap;
const u8 *bssid = bss_conf->bssid;
u32 sound_dim;
- u8 bfee_role = RTW_BFEE_NONE;
u8 i;
if (!(chip->band & RTW_BAND_5G))
@@ -67,7 +66,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
ether_addr_copy(bfee->mac_addr, bssid);
- bfee_role = RTW_BFEE_MU;
+ bfee->role = RTW_BFEE_MU;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
bfee->aid = bss_conf->aid;
bfinfo->bfer_mu_cnt++;
@@ -85,7 +84,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
ether_addr_copy(bfee->mac_addr, bssid);
- bfee_role = RTW_BFEE_SU;
+ bfee->role = RTW_BFEE_SU;
bfee->sound_dim = (u8)sound_dim;
bfee->g_id = 0;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
@@ -102,7 +101,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
out_unlock:
- bfee->role = bfee_role;
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 4dfb2ec395ee..567372fb4e12 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -1904,6 +1904,9 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
lockdep_assert_held(&rtwdev->mutex);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ return;
+
coex_dm->reason = reason;
/* update wifi_link_info_ext variable */
@@ -2500,3 +2503,495 @@ void rtw_coex_defreeze_work(struct work_struct *work)
rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
mutex_unlock(&rtwdev->mutex);
}
+
+#ifdef CONFIG_RTW88_DEBUGFS
+#define INFO_SIZE 80
+
+#define case_BTINFO(src) \
+ case COEX_BTINFO_SRC_##src: return #src
+
+static const char *rtw_coex_get_bt_info_src_string(u8 bt_info_src)
+{
+ switch (bt_info_src) {
+ case_BTINFO(WL_FW);
+ case_BTINFO(BT_RSP);
+ case_BTINFO(BT_ACT);
+ default:
+ return "Unknown";
+ }
+}
+
+#define case_RSN(src) \
+ case COEX_RSN_##src: return #src
+
+static const char *rtw_coex_get_reason_string(u8 reason)
+{
+ switch (reason) {
+ case_RSN(2GSCANSTART);
+ case_RSN(5GSCANSTART);
+ case_RSN(SCANFINISH);
+ case_RSN(2GSWITCHBAND);
+ case_RSN(5GSWITCHBAND);
+ case_RSN(2GCONSTART);
+ case_RSN(5GCONSTART);
+ case_RSN(2GCONFINISH);
+ case_RSN(5GCONFINISH);
+ case_RSN(2GMEDIA);
+ case_RSN(5GMEDIA);
+ case_RSN(MEDIADISCON);
+ case_RSN(BTINFO);
+ case_RSN(LPS);
+ case_RSN(WLSTATUS);
+ default:
+ return "Unknown";
+ }
+}
+
+static int rtw_coex_addr_info(struct rtw_dev *rtwdev,
+ const struct rtw_reg_domain *reg,
+ char addr_info[], int n)
+{
+ const char *rf_prefix = "";
+ const char *sep = n == 0 ? "" : "/ ";
+ int ffs, fls;
+ int max_fls;
+
+ if (INFO_SIZE - n <= 0)
+ return 0;
+
+ switch (reg->domain) {
+ case RTW_REG_DOMAIN_MAC32:
+ max_fls = 31;
+ break;
+ case RTW_REG_DOMAIN_MAC16:
+ max_fls = 15;
+ break;
+ case RTW_REG_DOMAIN_MAC8:
+ max_fls = 7;
+ break;
+ case RTW_REG_DOMAIN_RF_A:
+ case RTW_REG_DOMAIN_RF_B:
+ rf_prefix = "RF_";
+ max_fls = 19;
+ break;
+ default:
+ return 0;
+ }
+
+ ffs = __ffs(reg->mask);
+ fls = __fls(reg->mask);
+
+ if (ffs == 0 && fls == max_fls)
+ return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x",
+ sep, rf_prefix, reg->addr);
+ else if (ffs == fls)
+ return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x[%d]",
+ sep, rf_prefix, reg->addr, ffs);
+ else
+ return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x[%d:%d]",
+ sep, rf_prefix, reg->addr, fls, ffs);
+}
+
+static int rtw_coex_val_info(struct rtw_dev *rtwdev,
+ const struct rtw_reg_domain *reg,
+ char val_info[], int n)
+{
+ const char *sep = n == 0 ? "" : "/ ";
+ u8 rf_path;
+
+ if (INFO_SIZE - n <= 0)
+ return 0;
+
+ switch (reg->domain) {
+ case RTW_REG_DOMAIN_MAC32:
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read32_mask(rtwdev, reg->addr, reg->mask));
+ case RTW_REG_DOMAIN_MAC16:
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read16_mask(rtwdev, reg->addr, reg->mask));
+ case RTW_REG_DOMAIN_MAC8:
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read8_mask(rtwdev, reg->addr, reg->mask));
+ case RTW_REG_DOMAIN_RF_A:
+ rf_path = RF_PATH_A;
+ break;
+ case RTW_REG_DOMAIN_RF_B:
+ rf_path = RF_PATH_B;
+ break;
+ default:
+ return 0;
+ }
+
+ /* only RF go through here */
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read_rf(rtwdev, rf_path, reg->addr, reg->mask));
+}
+
+static void rtw_coex_set_coexinfo_hw(struct rtw_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_reg_domain *reg;
+ char addr_info[INFO_SIZE];
+ int n_addr = 0;
+ char val_info[INFO_SIZE];
+ int n_val = 0;
+ int i;
+
+ for (i = 0; i < chip->coex_info_hw_regs_num; i++) {
+ reg = &chip->coex_info_hw_regs[i];
+
+ n_addr += rtw_coex_addr_info(rtwdev, reg, addr_info, n_addr);
+ n_val += rtw_coex_val_info(rtwdev, reg, val_info, n_val);
+
+ if (reg->domain == RTW_REG_DOMAIN_NL) {
+ seq_printf(m, "%-40s = %s\n", addr_info, val_info);
+ n_addr = 0;
+ n_val = 0;
+ }
+ }
+
+ if (n_addr != 0 && n_val != 0)
+ seq_printf(m, "%-40s = %s\n", addr_info, val_info);
+}
+
+static bool rtw_coex_get_bt_reg(struct rtw_dev *rtwdev,
+ u8 type, u16 addr, u16 *val)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ __le16 le_addr;
+ u8 *payload;
+
+ le_addr = cpu_to_le16(addr);
+ req.op_code = BT_MP_INFO_OP_READ_REG;
+ req.para1 = type;
+ req.para2 = le16_get_bits(le_addr, GENMASK(7, 0));
+ req.para3 = le16_get_bits(le_addr, GENMASK(15, 8));
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb) {
+ *val = 0xeaea;
+ return false;
+ }
+
+ payload = get_payload_from_coex_resp(skb);
+ *val = GET_COEX_RESP_BT_REG_VAL(payload);
+
+ return true;
+}
+
+static bool rtw_coex_get_bt_patch_version(struct rtw_dev *rtwdev,
+ u32 *patch_version)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_PATCH_VER;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *patch_version = GET_COEX_RESP_BT_PATCH_VER(payload);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
+ u32 *supported_version)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_SUPP_VER;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *supported_version = GET_COEX_RESP_BT_SUPP_VER(payload);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
+ u32 *supported_feature)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_SUPP_FEAT;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *supported_feature = GET_COEX_RESP_BT_SUPP_FEAT(payload);
+ ret = true;
+
+out:
+ return ret;
+}
+
+struct rtw_coex_sta_stat_iter_data {
+ struct rtw_vif *rtwvif;
+ struct seq_file *file;
+};
+
+static void rtw_coex_sta_stat_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_coex_sta_stat_iter_data *sta_iter_data = data;
+ struct rtw_vif *rtwvif = sta_iter_data->rtwvif;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct seq_file *m = sta_iter_data->file;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u8 rssi;
+
+ if (si->vif != vif)
+ return;
+
+ rssi = ewma_rssi_read(&si->avg_rssi);
+ seq_printf(m, "\tPeer %3d\n", si->mac_id);
+ seq_printf(m, "\t\t%-24s = %d\n", "RSSI", rssi);
+ seq_printf(m, "\t\t%-24s = %d\n", "BW mode", si->bw_mode);
+}
+
+struct rtw_coex_vif_stat_iter_data {
+ struct rtw_dev *rtwdev;
+ struct seq_file *file;
+};
+
+static void rtw_coex_vif_stat_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_coex_vif_stat_iter_data *vif_iter_data = data;
+ struct rtw_coex_sta_stat_iter_data sta_iter_data;
+ struct rtw_dev *rtwdev = vif_iter_data->rtwdev;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct seq_file *m = vif_iter_data->file;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ seq_printf(m, "Iface on Port (%d)\n", rtwvif->port);
+ seq_printf(m, "\t%-32s = %d\n",
+ "Beacon interval", bss_conf->beacon_int);
+ seq_printf(m, "\t%-32s = %d\n",
+ "Network Type", rtwvif->net_type);
+
+ sta_iter_data.rtwvif = rtwvif;
+ sta_iter_data.file = m;
+ rtw_iterate_stas_atomic(rtwdev, rtw_coex_sta_stat_iter,
+ &sta_iter_data);
+}
+
+void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_coex_vif_stat_iter_data vif_iter_data;
+ u8 reason = coex_dm->reason;
+ u8 sys_lte;
+ u16 score_board_WB, score_board_BW;
+ u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc;
+ u32 lte_coex, bt_coex;
+ u32 bt_hi_pri, bt_lo_pri;
+ int i;
+
+ score_board_BW = rtw_coex_read_scbd(rtwdev);
+ score_board_WB = coex_stat->score_board;
+ wl_reg_6c0 = rtw_read32(rtwdev, 0x6c0);
+ wl_reg_6c4 = rtw_read32(rtwdev, 0x6c4);
+ wl_reg_6c8 = rtw_read32(rtwdev, 0x6c8);
+ wl_reg_6cc = rtw_read32(rtwdev, 0x6cc);
+ wl_reg_778 = rtw_read32(rtwdev, 0x778);
+ bt_hi_pri = rtw_read32(rtwdev, 0x770);
+ bt_lo_pri = rtw_read32(rtwdev, 0x774);
+ rtw_write8(rtwdev, 0x76e, 0xc);
+ sys_lte = rtw_read8(rtwdev, 0x73);
+ lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
+ bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
+
+ if (!coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) {
+ rtw_coex_get_bt_supported_version(rtwdev,
+ &coex_stat->bt_supported_version);
+ rtw_coex_get_bt_patch_version(rtwdev, &coex_stat->patch_ver);
+ rtw_coex_get_bt_supported_feature(rtwdev,
+ &coex_stat->bt_supported_feature);
+ rtw_coex_get_bt_reg(rtwdev, 3, 0xae, &coex_stat->bt_reg_vendor_ae);
+ rtw_coex_get_bt_reg(rtwdev, 3, 0xac, &coex_stat->bt_reg_vendor_ac);
+
+ if (coex_stat->patch_ver != 0)
+ coex_stat->bt_mailbox_reply = true;
+ }
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tBT Coexist info %x\n", chip->id);
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %s/ %d\n",
+ "Mech/ RFE",
+ efuse->share_ant ? "Shared" : "Non-Shared",
+ efuse->rfe_option);
+ seq_printf(m, "%-40s = %08x/ 0x%02x/ 0x%08x %s\n",
+ "Coex Ver/ BT Dez/ BT Rpt",
+ chip->coex_para_ver, chip->bt_desired_ver,
+ coex_stat->bt_supported_version,
+ coex_stat->bt_disabled ? "(BT disabled)" :
+ coex_stat->bt_supported_version >= chip->bt_desired_ver ?
+ "(Match)" : "(Mismatch)");
+ seq_printf(m, "%-40s = %s/ %u/ %d\n",
+ "Role/ RoleSwCnt/ IgnWL/ Feature",
+ coex_stat->bt_slave ? "Slave" : "Master",
+ coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH],
+ coex_dm->ignore_wl_act);
+ seq_printf(m, "%-40s = %u.%u/ 0x%x/ %c\n",
+ "WL FW/ BT FW/ KT",
+ fw->version, fw->sub_version,
+ coex_stat->patch_ver, coex_stat->kt_ver + 65);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ ch-(%u)\n",
+ "AFH Map",
+ coex_dm->wl_ch_info[0], coex_dm->wl_ch_info[1],
+ coex_dm->wl_ch_info[2], hal->current_channel);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tBT Status\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %s/ %ddBm/ %u/ %u\n",
+ "BT status/ rssi/ retry/ pop",
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE ? "non-conn" :
+ coex_dm->bt_status == COEX_BTSTATUS_CON_IDLE ? "conn-idle" : "busy",
+ coex_stat->bt_rssi - 100,
+ coex_stat->cnt_bt[COEX_CNT_BT_RETRY],
+ coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]);
+ seq_printf(m, "%-40s = %s%s%s%s%s (multi-link %d)\n",
+ "Profiles",
+ coex_stat->bt_a2dp_exist ? (coex_stat->bt_a2dp_sink ?
+ "A2DP sink," : "A2DP,") : "",
+ coex_stat->bt_hfp_exist ? "HFP," : "",
+ coex_stat->bt_hid_exist ?
+ (coex_stat->bt_ble_exist ? "HID(RCU)," :
+ coex_stat->bt_hid_slot >= 2 ? "HID(4/18)" :
+ "HID(2/18),") : "",
+ coex_stat->bt_pan_exist ? coex_stat->bt_opp_exist ?
+ "OPP," : "PAN," : "",
+ coex_stat->bt_ble_voice ? "Voice," : "",
+ coex_stat->bt_multi_link);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ 0x%08x\n",
+ "Reinit/ Relink/ IgnWl/ Feature",
+ coex_stat->cnt_bt[COEX_CNT_BT_REINIT],
+ coex_stat->cnt_bt[COEX_CNT_BT_SETUPLINK],
+ coex_stat->cnt_bt[COEX_CNT_BT_IGNWLANACT],
+ coex_stat->bt_supported_feature);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n",
+ "Page/ Inq/ iqk/ iqk fail",
+ coex_stat->cnt_bt[COEX_CNT_BT_PAGE],
+ coex_stat->cnt_bt[COEX_CNT_BT_INQ],
+ coex_stat->cnt_bt[COEX_CNT_BT_IQK],
+ coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]);
+ seq_printf(m, "%-40s = 0x%04x/ 0x%04x/ 0x%04x/ 0x%04x\n",
+ "0xae/ 0xac/ score board (W->B)/ (B->W)",
+ coex_stat->bt_reg_vendor_ae,
+ coex_stat->bt_reg_vendor_ac,
+ score_board_WB, score_board_BW);
+ seq_printf(m, "%-40s = %u/%u, %u/%u\n",
+ "Hi-Pri TX/RX, Lo-Pri TX/RX",
+ bt_hi_pri & 0xffff, bt_hi_pri >> 16,
+ bt_lo_pri & 0xffff, bt_lo_pri >> 16);
+ for (i = 0; i < COEX_BTINFO_SRC_BT_IQK; i++)
+ seq_printf(m, "%-40s = %7ph\n",
+ rtw_coex_get_bt_info_src_string(i),
+ coex_stat->bt_info_c2h[i]);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tWiFi Status\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %d\n",
+ "Scanning", test_bit(RTW_FLAG_SCANNING, rtwdev->flags));
+ seq_printf(m, "%-40s = %u/ TX %d Mbps/ RX %d Mbps\n",
+ "G_busy/ TX/ RX",
+ coex_stat->wl_gl_busy,
+ rtwdev->stats.tx_throughput, rtwdev->stats.rx_throughput);
+ seq_printf(m, "%-40s = %u/ %u/ %u\n",
+ "IPS/ Low Power/ PS mode",
+ test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags),
+ test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags),
+ rtwdev->lps_conf.mode);
+
+ vif_iter_data.rtwdev = rtwdev;
+ vif_iter_data.file = m;
+ rtw_iterate_vifs_atomic(rtwdev, rtw_coex_vif_stat_iter, &vif_iter_data);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tMechanism\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %5ph (case-%d)\n",
+ "TDMA",
+ coex_dm->ps_tdma_para, coex_dm->cur_ps_tdma);
+ seq_printf(m, "%-40s = %d\n",
+ "Timer base", coex_stat->tdma_timer_base);
+ seq_printf(m, "%-40s = %d/ 0x%08x/ 0x%08x/ 0x%08x\n",
+ "Table/ 0x6c0/ 0x6c4/ 0x6c8",
+ coex_dm->cur_table, wl_reg_6c0, wl_reg_6c4, wl_reg_6c8);
+ seq_printf(m, "%-40s = 0x%08x/ 0x%08x/ reason (%s)\n",
+ "0x778/ 0x6cc/ Reason",
+ wl_reg_778, wl_reg_6cc, rtw_coex_get_reason_string(reason));
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u/ %u\n",
+ "Null All/ Retry/ Ack/ BT Empty/ BT Late",
+ coex_stat->wl_fw_dbg_info[1], coex_stat->wl_fw_dbg_info[2],
+ coex_stat->wl_fw_dbg_info[3], coex_stat->wl_fw_dbg_info[4],
+ coex_stat->wl_fw_dbg_info[5]);
+ seq_printf(m, "%-40s = %u/ %u/ %s/ %u\n",
+ "Cnt TDMA Toggle/ Lk 5ms/ Lk 5ms on/ FW",
+ coex_stat->wl_fw_dbg_info[6],
+ coex_stat->wl_fw_dbg_info[7],
+ coex_stat->wl_slot_extend ? "Yes" : "No",
+ coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tHW setting\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %s/ %s\n",
+ "LTE Coex/ Path Owner",
+ lte_coex & BIT(7) ? "ON" : "OFF",
+ sys_lte & BIT(2) ? "WL" : "BT");
+ seq_printf(m, "%-40s = RF:%s_BB:%s/ RF:%s_BB:%s/ %s\n",
+ "GNT_WL_CTRL/ GNT_BT_CTRL/ Dbg",
+ lte_coex & BIT(12) ? "SW" : "HW",
+ lte_coex & BIT(8) ? "SW" : "HW",
+ lte_coex & BIT(14) ? "SW" : "HW",
+ lte_coex & BIT(10) ? "SW" : "HW",
+ sys_lte & BIT(3) ? "On" : "Off");
+ seq_printf(m, "%-40s = %lu/ %lu\n",
+ "GNT_WL/ GNT_BT",
+ (bt_coex & BIT(2)) >> 2, (bt_coex & BIT(3)) >> 3);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n",
+ "CRC OK CCK/ OFDM/ HT/ VHT",
+ dm_info->cck_ok_cnt, dm_info->ofdm_ok_cnt,
+ dm_info->ht_ok_cnt, dm_info->vht_ok_cnt);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n",
+ "CRC ERR CCK/ OFDM/ HT/ VHT",
+ dm_info->cck_err_cnt, dm_info->ofdm_err_cnt,
+ dm_info->ht_err_cnt, dm_info->vht_err_cnt);
+ seq_printf(m, "%-40s = %s/ %s/ %s/ %u\n",
+ "HiPr/ Locking/ Locked/ Noisy",
+ coex_stat->wl_hi_pri_task1 ? "Y" : "N",
+ coex_stat->wl_cck_lock ? "Y" : "N",
+ coex_stat->wl_cck_lock_ever ? "Y" : "N",
+ coex_stat->wl_noisy_level);
+
+ rtw_coex_set_coexinfo_hw(rtwdev, m);
+}
+#endif /* CONFIG_RTW88_DEBUGFS */
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index 008d1af5996b..4c3a01968f5e 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -46,6 +46,14 @@
(__rssi__ == COEX_RSSI_STATE_LOW || \
__rssi__ == COEX_RSSI_STATE_STAY_LOW ? true : false); })
+#define GET_COEX_RESP_BT_SUPP_VER(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 32))
+#define GET_COEX_RESP_BT_SUPP_FEAT(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 24))
+#define GET_COEX_RESP_BT_PATCH_VER(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(55, 24))
+#define GET_COEX_RESP_BT_REG_VAL(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 24))
#define GET_COEX_RESP_BT_SCAN_TYPE(payload) \
le64_get_bits(*((__le64 *)(payload)), GENMASK(31, 24))
@@ -367,4 +375,6 @@ void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev);
+void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 5a181e01ebef..b4964306de61 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -5,6 +5,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "main.h"
+#include "coex.h"
#include "sec.h"
#include "fw.h"
#include "debug.h"
@@ -691,6 +692,56 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
dm_info->ht_ok_cnt, dm_info->ht_err_cnt);
seq_printf(m, " * VHT cnt (ok, err) = (%u, %u)\n",
dm_info->vht_ok_cnt, dm_info->vht_err_cnt);
+
+ return 0;
+}
+
+static int rtw_debugfs_get_coex_info(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+
+ rtw_coex_display_coex_info(rtwdev, m);
+
+ return 0;
+}
+
+static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_coex *coex = &rtwdev->coex;
+ char tmp[32 + 1];
+ bool enable;
+ int ret;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+
+ ret = kstrtobool(tmp, &enable);
+ if (ret) {
+ rtw_warn(rtwdev, "invalid arguments\n");
+ return ret;
+ }
+
+ mutex_lock(&rtwdev->mutex);
+ coex->stop_dm = enable == 0;
+ mutex_unlock(&rtwdev->mutex);
+
+ return count;
+}
+
+static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ seq_printf(m, "coex mechanism %s\n",
+ coex->stop_dm ? "disabled" : "enabled");
+
return 0;
}
@@ -784,6 +835,15 @@ static struct rtw_debugfs_priv rtw_debug_priv_phy_info = {
.cb_read = rtw_debugfs_get_phy_info,
};
+static struct rtw_debugfs_priv rtw_debug_priv_coex_enable = {
+ .cb_write = rtw_debugfs_set_coex_enable,
+ .cb_read = rtw_debugfs_get_coex_enable,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
+ .cb_read = rtw_debugfs_get_coex_info,
+};
+
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -814,6 +874,8 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_rw(dump_cam);
rtw_debugfs_add_rw(rsvd_page);
rtw_debugfs_add_r(phy_info);
+ rtw_debugfs_add_r(coex_info);
+ rtw_debugfs_add_rw(coex_enable);
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 243441453ead..05c430b3489c 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -136,6 +136,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
mutex_lock(&rtwdev->mutex);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ goto unlock;
+
switch (c2h->id) {
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
@@ -153,6 +156,7 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
break;
}
+unlock:
mutex_unlock(&rtwdev->mutex);
}
@@ -579,7 +583,7 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
struct rtw_rsvd_page *rsvd_pkt;
u8 location = 0;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
if (type == rsvd_pkt->type)
location = rsvd_pkt->page;
}
@@ -632,7 +636,7 @@ u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
struct rtw_rsvd_page *rsvd_pkt;
u8 location = 0;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
if (rsvd_pkt->type != RSVD_PROBE_REQ)
continue;
if ((!ssid && !rsvd_pkt->ssid) ||
@@ -649,7 +653,7 @@ u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
struct rtw_rsvd_page *rsvd_pkt;
u16 size = 0;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
if (rsvd_pkt->type != RSVD_PROBE_REQ)
continue;
if ((!ssid && !rsvd_pkt->ssid) ||
@@ -686,25 +690,6 @@ void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-static struct sk_buff *
-rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct sk_buff *skb_new;
-
- if (vif->type != NL80211_IFTYPE_AP &&
- vif->type != NL80211_IFTYPE_ADHOC &&
- !ieee80211_vif_is_mesh(vif)) {
- skb_new = alloc_skb(1, GFP_KERNEL);
- if (!skb_new)
- return NULL;
- skb_put(skb_new, 1);
- } else {
- skb_new = ieee80211_beacon_get(hw, vif);
- }
-
- return skb_new;
-}
-
static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -745,7 +730,7 @@ static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
if (!loc) {
rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
- kfree(skb);
+ kfree_skb(skb);
return NULL;
}
nlo_hdr->location[i] = loc;
@@ -819,8 +804,7 @@ static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
return skb;
}
-static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -850,15 +834,31 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
}
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
struct rtw_rsvd_page *rsvd_pkt)
{
+ struct ieee80211_vif *vif;
+ struct rtw_vif *rtwvif;
struct sk_buff *skb_new;
struct cfg80211_ssid *ssid;
+ if (rsvd_pkt->type == RSVD_DUMMY) {
+ skb_new = alloc_skb(1, GFP_KERNEL);
+ if (!skb_new)
+ return NULL;
+
+ skb_put(skb_new, 1);
+ return skb_new;
+ }
+
+ rtwvif = rsvd_pkt->rtwvif;
+ if (!rtwvif)
+ return NULL;
+
+ vif = rtwvif_to_vif(rtwvif);
+
switch (rsvd_pkt->type) {
case RSVD_BEACON:
- skb_new = rtw_beacon_get(hw, vif);
+ skb_new = ieee80211_beacon_get(hw, vif);
break;
case RSVD_PS_POLL:
skb_new = ieee80211_pspoll_get(hw, vif);
@@ -876,7 +876,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
skb_new = rtw_lps_pg_dpk_get(hw);
break;
case RSVD_LPS_PG_INFO:
- skb_new = rtw_lps_pg_info_get(hw, vif);
+ skb_new = rtw_lps_pg_info_get(hw);
break;
case RSVD_PROBE_REQ:
ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
@@ -945,6 +945,8 @@ static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
if (!rsvd_pkt)
return NULL;
+ INIT_LIST_HEAD(&rsvd_pkt->vif_list);
+ INIT_LIST_HEAD(&rsvd_pkt->build_list);
rsvd_pkt->type = type;
rsvd_pkt->add_txdesc = txdesc;
@@ -952,51 +954,124 @@ static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
}
static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
struct rtw_rsvd_page *rsvd_pkt)
{
lockdep_assert_held(&rtwdev->mutex);
- list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+
+ list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
}
-void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
- bool txdesc)
+static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ enum rtw_rsvd_packet_type type,
+ bool txdesc)
{
struct rtw_rsvd_page *rsvd_pkt;
rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
- if (!rsvd_pkt)
+ if (!rsvd_pkt) {
+ rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
return;
+ }
- rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
+ rsvd_pkt->rtwvif = rtwvif;
+ rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
}
-void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid)
+static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ struct cfg80211_ssid *ssid)
{
struct rtw_rsvd_page *rsvd_pkt;
rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
- if (!rsvd_pkt)
+ if (!rsvd_pkt) {
+ rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
return;
+ }
+ rsvd_pkt->rtwvif = rtwvif;
rsvd_pkt->ssid = ssid;
- rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
+ rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
}
-void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
+void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
struct rtw_rsvd_page *rsvd_pkt, *tmp;
lockdep_assert_held(&rtwdev->mutex);
- list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
- if (rsvd_pkt->type == RSVD_BEACON)
- continue;
- list_del(&rsvd_pkt->list);
+ /* remove all of the rsvd pages for vif */
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
+ vif_list) {
+ list_del(&rsvd_pkt->vif_list);
+ if (!list_empty(&rsvd_pkt->build_list))
+ list_del(&rsvd_pkt->build_list);
kfree(rsvd_pkt);
}
}
+void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC &&
+ vif->type != NL80211_IFTYPE_MESH_POINT) {
+ rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
+ vif->type);
+ return;
+ }
+
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
+}
+
+void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
+ struct cfg80211_ssid *ssid;
+ int i;
+
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
+ vif->type);
+ return;
+ }
+
+ for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
+ ssid = &rtw_pno_req->match_sets[i].ssid;
+ rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
+ }
+
+ rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
+}
+
+void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
+ vif->type);
+ return;
+ }
+
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
+}
+
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size)
{
@@ -1060,8 +1135,72 @@ static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
}
-static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
- struct ieee80211_vif *vif, u32 *size)
+static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
+{
+ struct rtw_rsvd_page *rsvd_pkt, *tmp;
+
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
+ build_list) {
+ list_del_init(&rsvd_pkt->build_list);
+
+ /* Don't free except for the dummy rsvd page,
+ * others will be freed when removing vif
+ */
+ if (rsvd_pkt->type == RSVD_DUMMY)
+ kfree(rsvd_pkt);
+ }
+}
+
+static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
+ if (rsvd_pkt->type == RSVD_BEACON)
+ list_add(&rsvd_pkt->build_list,
+ &rtwdev->rsvd_page_list);
+ else
+ list_add_tail(&rsvd_pkt->build_list,
+ &rtwdev->rsvd_page_list);
+ }
+}
+
+static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ __rtw_build_rsvd_page_reset(rtwdev);
+
+ /* gather rsvd page from vifs */
+ rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
+
+ rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
+ struct rtw_rsvd_page, build_list);
+ if (!rsvd_pkt) {
+ WARN(1, "Should not have an empty reserved page\n");
+ return -EINVAL;
+ }
+
+ /* the first rsvd should be beacon, otherwise add a dummy one */
+ if (rsvd_pkt->type != RSVD_BEACON) {
+ struct rtw_rsvd_page *dummy_pkt;
+
+ dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
+ if (!dummy_pkt) {
+ rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
+ return -ENOMEM;
+ }
+
+ list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
+ }
+
+ return 0;
+}
+
+static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -1071,13 +1210,21 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
u8 total_page = 0;
u8 page_size, page_margin, tx_desc_sz;
u8 *buf;
+ int ret;
page_size = chip->page_size;
tx_desc_sz = chip->tx_pkt_desc_sz;
page_margin = page_size - tx_desc_sz;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt);
+ ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev,
+ "failed to build rsvd page from vifs, ret %d\n", ret);
+ return NULL;
+ }
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
+ iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
if (!iter) {
rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
@@ -1101,7 +1248,8 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
* is smaller than the actual size of the whole rsvd_page
*/
if (total_page == 0) {
- if (rsvd_pkt->type != RSVD_BEACON) {
+ if (rsvd_pkt->type != RSVD_BEACON &&
+ rsvd_pkt->type != RSVD_DUMMY) {
rtw_err(rtwdev, "first page should be a beacon\n");
goto release_skb;
}
@@ -1129,7 +1277,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
* And that rsvd_pkt does not require tx_desc because when it goes
* through TX path, the TX path will generate one for it.
*/
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
page, buf, rsvd_pkt);
if (page == 0)
@@ -1145,7 +1293,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
return buf;
release_skb:
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
kfree_skb(rsvd_pkt->skb);
rsvd_pkt->skb = NULL;
}
@@ -1153,18 +1301,31 @@ release_skb:
return NULL;
}
-static int
-rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+static int rtw_download_beacon(struct rtw_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_rsvd_page *rsvd_pkt;
struct sk_buff *skb;
int ret = 0;
- skb = rtw_beacon_get(hw, vif);
+ rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
+ struct rtw_rsvd_page, build_list);
+ if (!rsvd_pkt) {
+ rtw_err(rtwdev, "failed to get rsvd page from build list\n");
+ return -ENOENT;
+ }
+
+ if (rsvd_pkt->type != RSVD_BEACON &&
+ rsvd_pkt->type != RSVD_DUMMY) {
+ rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
+ rsvd_pkt->type);
+ return -EINVAL;
+ }
+
+ skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
if (!skb) {
rtw_err(rtwdev, "failed to get beacon skb\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
@@ -1173,17 +1334,16 @@ rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
dev_kfree_skb(skb);
-out:
return ret;
}
-int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
{
u8 *buf;
u32 size;
int ret;
- buf = rtw_build_rsvd_page(rtwdev, vif, &size);
+ buf = rtw_build_rsvd_page(rtwdev, &size);
if (!buf) {
rtw_err(rtwdev, "failed to build rsvd page pkt\n");
return -ENOMEM;
@@ -1200,7 +1360,7 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
* the beacon again to replace the TX desc header, and we will get
* a correct tx_desc for the beacon in the rsvd page.
*/
- ret = rtw_download_beacon(rtwdev, vif);
+ ret = rtw_download_beacon(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to download beacon\n");
goto free;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index ccd27bd45775..cdd244857048 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -36,11 +36,12 @@ enum rtw_c2h_cmd_id_ext {
struct rtw_c2h_cmd {
u8 id;
u8 seq;
- u8 payload[0];
+ u8 payload[];
} __packed;
enum rtw_rsvd_packet_type {
RSVD_BEACON,
+ RSVD_DUMMY,
RSVD_PS_POLL,
RSVD_PROBE_RESP,
RSVD_NULL,
@@ -98,7 +99,13 @@ struct rtw_lps_pg_info_hdr {
} __packed;
struct rtw_rsvd_page {
- struct list_head list;
+ /* associated with each vif */
+ struct list_head vif_list;
+ struct rtw_vif *rtwvif;
+
+ /* associated when build rsvd page */
+ struct list_head build_list;
+
struct sk_buff *skb;
enum rtw_rsvd_packet_type type;
u8 page;
@@ -502,15 +509,17 @@ void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
-void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
- bool txdesc);
-void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
-void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
-int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
- struct ieee80211_vif *vif);
+void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev);
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 85a81a578fd5..2cba327e6218 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -7,9 +7,10 @@
/* ops for PCI, USB and SDIO */
struct rtw_hci_ops {
- int (*tx)(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb);
+ int (*tx_write)(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb);
+ void (*tx_kick_off)(struct rtw_dev *rtwdev);
int (*setup)(struct rtw_dev *rtwdev);
int (*start)(struct rtw_dev *rtwdev);
void (*stop)(struct rtw_dev *rtwdev);
@@ -28,11 +29,16 @@ struct rtw_hci_ops {
void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
};
-static inline int rtw_hci_tx(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb)
+static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
{
- return rtwdev->hci.ops->tx(rtwdev, pkt_info, skb);
+ return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb);
+}
+
+static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.ops->tx_kick_off(rtwdev);
}
static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
@@ -193,6 +199,32 @@ rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
return ret;
}
+static inline u16
+rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read16(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline u8
+rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read8(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
static inline void
rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index cadf0abbe16b..7b245779ff90 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -17,10 +17,10 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
txsc20 = primary_ch_idx;
if (bw == RTW_CHANNEL_WIDTH_80) {
- if (txsc20 == 1 || txsc20 == 3)
- txsc40 = 9;
+ if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
+ txsc40 = RTW_SC_40_UPPER;
else
- txsc40 = 10;
+ txsc40 = RTW_SC_40_LOWER;
}
rtw_write8(rtwdev, REG_DATA_SC,
BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
@@ -101,7 +101,7 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
}
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
- struct rtw_pwr_seq_cmd *cmd)
+ const struct rtw_pwr_seq_cmd *cmd)
{
u8 value;
u8 flag = 0;
@@ -139,9 +139,10 @@ static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
}
static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
- u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
+ u8 cut_mask,
+ const struct rtw_pwr_seq_cmd *cmd)
{
- struct rtw_pwr_seq_cmd *cur_cmd;
+ const struct rtw_pwr_seq_cmd *cur_cmd;
u32 offset;
u8 value;
@@ -183,13 +184,13 @@ static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
}
static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
- struct rtw_pwr_seq_cmd **cmd_seq)
+ const struct rtw_pwr_seq_cmd **cmd_seq)
{
u8 cut_mask;
u8 intf_mask;
u8 cut;
u32 idx = 0;
- struct rtw_pwr_seq_cmd *cmd;
+ const struct rtw_pwr_seq_cmd *cmd;
int ret;
cut = rtwdev->hal.cut_version;
@@ -223,7 +224,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_pwr_seq_cmd **pwr_seq;
+ const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
@@ -705,7 +706,7 @@ dlfw_fail:
static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
{
- struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+ const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
u32 prio_queues = 0;
if (queues & BIT(IEEE80211_AC_VO))
@@ -793,7 +794,7 @@ void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_rqpn *rqpn = NULL;
+ const struct rtw_rqpn *rqpn = NULL;
u16 txdma_pq_map = 0;
switch (rtw_hci_type(rtwdev)) {
@@ -882,7 +883,7 @@ static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_page_table *pg_tbl = NULL;
+ const struct rtw_page_table *pg_tbl = NULL;
u16 pubq_num;
int ret;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 6fc33e11d08c..d7d02e4c0184 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -75,15 +75,12 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw_leave_lps_deep(rtwdev);
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- if (hw->conf.flags & IEEE80211_CONF_IDLE) {
- rtw_enter_ips(rtwdev);
- } else {
- ret = rtw_leave_ips(rtwdev);
- if (ret) {
- rtw_err(rtwdev, "failed to leave idle state\n");
- goto out;
- }
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ !(hw->conf.flags & IEEE80211_CONF_IDLE)) {
+ ret = rtw_leave_ips(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave idle state\n");
+ goto out;
}
}
@@ -99,6 +96,10 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (hw->conf.flags & IEEE80211_CONF_IDLE))
+ rtw_enter_ips(rtwdev);
+
out:
mutex_unlock(&rtwdev->mutex);
return ret;
@@ -160,6 +161,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
rtw_txq_init(rtwdev, vif->txq);
+ INIT_LIST_HEAD(&rtwvif->rsvd_page_list);
mutex_lock(&rtwdev->mutex);
@@ -168,18 +170,24 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ rtw_add_rsvd_page_bcn(rtwdev, rtwvif);
net_type = RTW_NET_AP_MODE;
bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT;
break;
case NL80211_IFTYPE_ADHOC:
+ rtw_add_rsvd_page_bcn(rtwdev, rtwvif);
net_type = RTW_NET_AD_HOC;
bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT;
break;
case NL80211_IFTYPE_STATION:
- default:
+ rtw_add_rsvd_page_sta(rtwdev, rtwvif);
net_type = RTW_NET_NO_LINK;
bcn_ctrl = BIT_EN_BCN_FUNCTION;
break;
+ default:
+ WARN_ON(1);
+ mutex_unlock(&rtwdev->mutex);
+ return -EINVAL;
}
ether_addr_copy(rtwvif->mac_addr, vif->addr);
@@ -210,6 +218,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
rtw_leave_lps_deep(rtwdev);
rtw_txq_cleanup(rtwdev, vif->txq);
+ rtw_remove_rsvd_page(rtwdev, rtwvif);
eth_zero_addr(rtwvif->mac_addr);
config |= PORT_SET_MAC_ADDR;
@@ -341,12 +350,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
chip->ops->phy_calibration(rtwdev);
rtwvif->aid = conf->aid;
- rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
- rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_fw_download_rsvd_page(rtwdev);
rtw_send_rsvd_page_h2c(rtwdev);
rtw_coex_media_status_notify(rtwdev, conf->assoc);
if (rtw_bf_support)
@@ -355,7 +359,6 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_leave_lps(rtwdev);
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
- rtw_reset_rsvd_page(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
}
@@ -370,7 +373,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BEACON)
- rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_fw_download_rsvd_page(rtwdev);
if (changed & BSS_CHANGED_MU_GROUPS) {
struct rtw_chip_info *chip = rtwdev->chip;
@@ -514,6 +517,9 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
/* suppress error messages */
return -EOPNOTSUPP;
default:
@@ -552,7 +558,7 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* download new cam settings for PG to backup */
if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
- rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_fw_download_rsvd_page(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
@@ -592,6 +598,20 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
+static bool rtw_ops_can_aggregate_in_amsdu(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ /* we don't want to enable TX AMSDU on 2.4G */
+ if (hal->current_band_type == RTW_BAND_2G)
+ return false;
+
+ return true;
+}
+
static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
@@ -787,6 +807,7 @@ const struct ieee80211_ops rtw_ops = {
.sta_remove = rtw_ops_sta_remove,
.set_key = rtw_ops_set_key,
.ampdu_action = rtw_ops_ampdu_action,
+ .can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu,
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
.mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 2845d2838f7b..7640e97706f5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -317,15 +317,15 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW_CHANNEL_WIDTH_20;
- primary_chan_idx = 0;
+ primary_chan_idx = RTW_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW_CHANNEL_WIDTH_40;
if (primary_freq > center_freq) {
- primary_chan_idx = 1;
+ primary_chan_idx = RTW_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = 2;
+ primary_chan_idx = RTW_SC_20_LOWER;
center_chan += 2;
}
break;
@@ -333,10 +333,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
bandwidth = RTW_CHANNEL_WIDTH_80;
if (primary_freq > center_freq) {
if (primary_freq - center_freq == 10) {
- primary_chan_idx = 1;
+ primary_chan_idx = RTW_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = 3;
+ primary_chan_idx = RTW_SC_20_UPMOST;
center_chan -= 6;
}
/* assign the center channel used
@@ -345,10 +345,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
if (center_freq - primary_freq == 10) {
- primary_chan_idx = 2;
+ primary_chan_idx = RTW_SC_20_LOWER;
center_chan += 2;
} else {
- primary_chan_idx = 4;
+ primary_chan_idx = RTW_SC_20_LOWEST;
center_chan += 6;
}
/* assign the center channel used
@@ -909,11 +909,16 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
clear_bit(RTW_FLAG_RUNNING, rtwdev->flags);
clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
+ mutex_unlock(&rtwdev->mutex);
+
+ cancel_work_sync(&rtwdev->c2h_work);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
cancel_delayed_work_sync(&coex->bt_reenable_work);
cancel_delayed_work_sync(&coex->defreeze_work);
+ mutex_lock(&rtwdev->mutex);
+
rtw_power_off(rtwdev);
}
@@ -1113,7 +1118,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
}
hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1);
- hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2;
hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version);
hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1;
if (hal->chip_version & BIT_RF_TYPE_ID) {
@@ -1128,11 +1132,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
hal->antenna_rx = BB_PATH_A;
}
- if (hal->fab_version == 2)
- hal->fab_version = 1;
- else if (hal->fab_version == 1)
- hal->fab_version = 2;
-
efuse->physical_size = chip->phy_efuse_size;
efuse->logical_size = chip->log_efuse_size;
efuse->protect_size = chip->ptct_efuse_size;
@@ -1395,10 +1394,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
else
rtwdev->lps_conf.deep_mode = rtw_fw_lps_deep_mode;
- mutex_lock(&rtwdev->mutex);
- rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
- mutex_unlock(&rtwdev->mutex);
-
rtw_stats_init(rtwdev);
/* default rx filter setting */
@@ -1441,8 +1436,9 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
skb_queue_purge(&rtwdev->tx_report.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
- list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
- list_del(&rsvd_pkt->list);
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
+ build_list) {
+ list_del(&rsvd_pkt->build_list);
kfree(rsvd_pkt);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index f334d201bfb5..c6b590fdb573 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -99,6 +99,16 @@ enum rtw_bandwidth {
RTW_CHANNEL_WIDTH_10 = 6,
};
+enum rtw_sc_offset {
+ RTW_SC_DONT_CARE = 0,
+ RTW_SC_20_UPPER = 1,
+ RTW_SC_20_LOWER = 2,
+ RTW_SC_20_UPMOST = 3,
+ RTW_SC_20_LOWEST = 4,
+ RTW_SC_40_UPPER = 9,
+ RTW_SC_40_LOWER = 10,
+};
+
enum rtw_net_type {
RTW_NET_NO_LINK = 0,
RTW_NET_AD_HOC = 1,
@@ -505,6 +515,18 @@ struct rtw_hw_reg {
u32 mask;
};
+struct rtw_reg_domain {
+ u32 addr;
+ u32 mask;
+#define RTW_REG_DOMAIN_MAC32 0
+#define RTW_REG_DOMAIN_MAC16 1
+#define RTW_REG_DOMAIN_MAC8 2
+#define RTW_REG_DOMAIN_RF_A 3
+#define RTW_REG_DOMAIN_RF_B 4
+#define RTW_REG_DOMAIN_NL 0xFF
+ u8 domain;
+};
+
struct rtw_backup_info {
u8 len;
u32 reg;
@@ -552,6 +574,9 @@ struct rtw_tx_pkt_info {
bool short_gi;
bool report;
bool rts;
+ bool dis_qselseq;
+ bool en_hwseq;
+ u8 hw_ssn_sel;
};
struct rtw_rx_pkt_stat {
@@ -742,6 +767,7 @@ struct rtw_vif {
u8 bssid[ETH_ALEN];
u8 port;
u8 bcn_ctrl;
+ struct list_head rsvd_page_list;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
const struct rtw_vif_port *conf;
@@ -948,10 +974,10 @@ struct rtw_wow_param {
};
struct rtw_intf_phy_para_table {
- struct rtw_intf_phy_para *usb2_para;
- struct rtw_intf_phy_para *usb3_para;
- struct rtw_intf_phy_para *gen1_para;
- struct rtw_intf_phy_para *gen2_para;
+ const struct rtw_intf_phy_para *usb2_para;
+ const struct rtw_intf_phy_para *usb3_para;
+ const struct rtw_intf_phy_para *gen1_para;
+ const struct rtw_intf_phy_para *gen2_para;
u8 n_usb2_para;
u8 n_usb3_para;
u8 n_gen1_para;
@@ -1048,13 +1074,13 @@ struct rtw_chip_info {
/* init values */
u8 sys_func_en;
- struct rtw_pwr_seq_cmd **pwr_on_seq;
- struct rtw_pwr_seq_cmd **pwr_off_seq;
- struct rtw_rqpn *rqpn_table;
- struct rtw_page_table *page_table;
- struct rtw_intf_phy_para_table *intf_table;
+ const struct rtw_pwr_seq_cmd **pwr_on_seq;
+ const struct rtw_pwr_seq_cmd **pwr_off_seq;
+ const struct rtw_rqpn *rqpn_table;
+ const struct rtw_page_table *page_table;
+ const struct rtw_intf_phy_para_table *intf_table;
- struct rtw_hw_reg *dig;
+ const struct rtw_hw_reg *dig;
u32 rf_base_addr[2];
u32 rf_sipi_addr[2];
@@ -1096,6 +1122,7 @@ struct rtw_chip_info {
u8 bt_afh_span_bw40;
u8 afh_5g_num;
u8 wl_rf_para_num;
+ u8 coex_info_hw_regs_num;
const u8 *bt_rssi_step;
const u8 *wl_rssi_step;
const struct coex_table_para *table_nsant;
@@ -1105,6 +1132,7 @@ struct rtw_chip_info {
const struct coex_rf_para *wl_rf_para_tx;
const struct coex_rf_para *wl_rf_para_rx;
const struct coex_5g_afh_map *afh_5g;
+ const struct rtw_reg_domain *coex_info_hw_regs;
};
enum rtw_coex_bt_state_cnt {
@@ -1151,6 +1179,7 @@ struct rtw_coex_rfe {
struct rtw_coex_dm {
bool cur_ps_tdma_on;
bool cur_wl_rx_low_gain_en;
+ bool ignore_wl_act;
u8 reason;
u8 bt_rssi_state[4];
@@ -1231,6 +1260,9 @@ struct rtw_coex_stat {
u32 bt_supported_version;
u32 bt_supported_feature;
+ u32 patch_ver;
+ u16 bt_reg_vendor_ae;
+ u16 bt_reg_vendor_ac;
s8 bt_rssi;
u8 kt_ver;
u8 gnt_workaround_state;
@@ -1500,7 +1532,7 @@ struct rtw_fifo_conf {
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
- struct rtw_rqpn *rqpn;
+ const struct rtw_rqpn *rqpn;
};
struct rtw_fw_state {
@@ -1517,7 +1549,6 @@ struct rtw_hal {
u32 rcr;
u32 chip_version;
- u8 fab_version;
u8 cut_version;
u8 mp_chip;
u8 oem_id;
@@ -1631,7 +1662,7 @@ struct rtw_dev {
struct rtw_wow_param wow;
/* hci related data, must be last */
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
#include "hci.h"
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 1fbc14c149ec..1af87eb2e53a 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -186,6 +186,11 @@ static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
dma_addr_t dma;
u8 *head;
+ if (len > TRX_BD_IDX_MASK) {
+ rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
+ return -EINVAL;
+ }
+
head = pci_zalloc_consistent(pdev, ring_sz, &dma);
if (!head) {
rtw_err(rtwdev, "failed to allocate tx ring\n");
@@ -259,6 +264,11 @@ static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
int i, allocated;
int ret = 0;
+ if (len > TRX_BD_IDX_MASK) {
+ rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
+ return -EINVAL;
+ }
+
head = pci_zalloc_consistent(pdev, ring_sz, &dma);
if (!head) {
rtw_err(rtwdev, "failed to allocate rx ring\n");
@@ -382,6 +392,7 @@ static int rtw_pci_init(struct rtw_dev *rtwdev)
rtwpci->irq_mask[3] = IMR_H2CDOK |
0;
spin_lock_init(&rtwpci->irq_lock);
+ spin_lock_init(&rtwpci->hwirq_lock);
ret = rtw_pci_init_trx_ring(rtwdev);
return ret;
@@ -404,56 +415,56 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
+ rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
/* reset read/write point */
@@ -472,19 +483,35 @@ static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
+
rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
rtwpci->irq_enabled = true;
+
+ spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
+
+ if (!rtwpci->irq_enabled)
+ goto out;
+
rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
rtwpci->irq_enabled = false;
+
+out:
+ spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
@@ -520,11 +547,10 @@ static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
static int rtw_pci_start(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_enable_interrupt(rtwdev, rtwpci);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
return 0;
}
@@ -532,12 +558,11 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
static void rtw_pci_stop(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_disable_interrupt(rtwdev, rtwpci);
rtw_pci_dma_release(rtwdev, rtwpci);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
}
static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
@@ -590,9 +615,8 @@ static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
rtw_pci_deep_ps_enter(rtwdev);
@@ -600,7 +624,7 @@ static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
rtw_pci_deep_ps_leave(rtwdev);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
}
static u8 ac_to_hwq[] = {
@@ -667,9 +691,34 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
}
-static int rtw_pci_xmit(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb, u8 queue)
+static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *ring;
+ u32 bd_idx;
+
+ ring = &rtwpci->tx_rings[queue];
+ bd_idx = rtw_pci_tx_queue_idx_addr[queue];
+
+ spin_lock_bh(&rtwpci->irq_lock);
+ rtw_pci_deep_ps_leave(rtwdev);
+ rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
+ spin_unlock_bh(&rtwpci->irq_lock);
+}
+
+static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ u8 queue;
+
+ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
+ if (test_and_clear_bit(queue, rtwpci->tx_queued))
+ rtw_pci_tx_kick_off_queue(rtwdev, queue);
+}
+
+static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb, u8 queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -682,8 +731,6 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
u32 psb_len;
u8 *pkt_desc;
struct rtw_pci_tx_buffer_desc *buf_desc;
- u32 bd_idx;
- unsigned long flags;
ring = &rtwpci->tx_rings[queue];
@@ -720,25 +767,20 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
tx_data->dma = dma;
tx_data->sn = pkt_info->sn;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
- rtw_pci_deep_ps_leave(rtwdev);
skb_queue_tail(&ring->queue, skb);
- /* kick off tx queue */
- if (queue != RTW_TX_QUEUE_BCN) {
- if (++ring->r.wp >= ring->r.len)
- ring->r.wp = 0;
- bd_idx = rtw_pci_tx_queue_idx_addr[queue];
- rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
- } else {
- u32 reg_bcn_work;
-
- reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
- reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
- rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
- }
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ if (queue == RTW_TX_QUEUE_BCN)
+ goto out_unlock;
+
+ /* update write-index, and kick it off later */
+ set_bit(queue, rtwpci->tx_queued);
+ if (++ring->r.wp >= ring->r.len)
+ ring->r.wp = 0;
+
+out_unlock:
+ spin_unlock_bh(&rtwpci->irq_lock);
return 0;
}
@@ -747,56 +789,59 @@ static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
u32 size)
{
struct sk_buff *skb;
- struct rtw_tx_pkt_info pkt_info;
- u32 tx_pkt_desc_sz;
- u32 length;
+ struct rtw_tx_pkt_info pkt_info = {0};
+ u8 reg_bcn_work;
+ int ret;
- tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
- length = size + tx_pkt_desc_sz;
- skb = dev_alloc_skb(length);
+ skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
if (!skb)
return -ENOMEM;
- skb_reserve(skb, tx_pkt_desc_sz);
- memcpy((u8 *)skb_put(skb, size), buf, size);
- memset(&pkt_info, 0, sizeof(pkt_info));
- pkt_info.tx_pkt_size = size;
- pkt_info.offset = tx_pkt_desc_sz;
+ ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write rsvd page data\n");
+ return ret;
+ }
+
+ /* reserved pages go through beacon queue */
+ reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
+ reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
+ rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
- return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
+ return 0;
}
static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
struct sk_buff *skb;
- struct rtw_tx_pkt_info pkt_info;
- u32 tx_pkt_desc_sz;
- u32 length;
+ struct rtw_tx_pkt_info pkt_info = {0};
+ int ret;
- tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
- length = size + tx_pkt_desc_sz;
- skb = dev_alloc_skb(length);
+ skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
if (!skb)
return -ENOMEM;
- skb_reserve(skb, tx_pkt_desc_sz);
- memcpy((u8 *)skb_put(skb, size), buf, size);
- memset(&pkt_info, 0, sizeof(pkt_info));
- pkt_info.tx_pkt_size = size;
+ ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write h2c data\n");
+ return ret;
+ }
+
+ rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
- return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
+ return 0;
}
-static int rtw_pci_tx(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb)
+static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *ring;
u8 queue = rtw_hw_queue_mapping(skb);
int ret;
- ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
+ ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
if (ret)
return ret;
@@ -827,7 +872,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
bd_idx = rtw_read32(rtwdev, bd_idx_addr);
cur_rp = bd_idx >> 16;
- cur_rp &= 0xfff;
+ cur_rp &= TRX_BD_IDX_MASK;
if (cur_rp >= ring->r.rp)
count = cur_rp - ring->r.rp;
else
@@ -901,7 +946,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
cur_wp = tmp >> 16;
- cur_wp &= 0xfff;
+ cur_wp &= TRX_BD_IDX_MASK;
if (cur_wp >= ring->r.wp)
count = cur_wp - ring->r.wp;
else
@@ -961,6 +1006,10 @@ next_rp:
static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci, u32 *irq_status)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
+
irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
@@ -970,6 +1019,8 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
+
+ spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
@@ -977,10 +1028,6 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
struct rtw_dev *rtwdev = dev;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- spin_lock(&rtwpci->irq_lock);
- if (!rtwpci->irq_enabled)
- goto out;
-
/* disable RTW PCI interrupt to avoid more interrupts before the end of
* thread function
*
@@ -990,8 +1037,6 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
* a new HISR flag is set.
*/
rtw_pci_disable_interrupt(rtwdev, rtwpci);
-out:
- spin_unlock(&rtwpci->irq_lock);
return IRQ_WAKE_THREAD;
}
@@ -1000,10 +1045,9 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
{
struct rtw_dev *rtwdev = dev;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
u32 irq_status[4];
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
if (irq_status[0] & IMR_MGNTDOK)
@@ -1025,7 +1069,7 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
/* all of the jobs for this interrupt have been done */
rtw_pci_enable_interrupt(rtwdev, rtwpci);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
return IRQ_HANDLED;
}
@@ -1197,11 +1241,18 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
{
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
u16 link_ctrl;
int ret;
+ /* RTL8822CE has enabled REFCLK auto calibration, it does not need
+ * to add clock delay to cover the REFCLK timing gap.
+ */
+ if (chip->id == RTW_CHIP_TYPE_8822C)
+ rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
+
/* Though there is standard PCIE configuration space to set the
* link control register, but by Realtek's design, driver should
* check if host supports CLKREQ/ASPM to enable the HW module.
@@ -1248,7 +1299,7 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_intf_phy_para *para;
+ const struct rtw_intf_phy_para *para;
u16 cut;
u16 value;
u16 offset;
@@ -1287,22 +1338,17 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_pci_link_cfg(rtwdev);
}
-#ifdef CONFIG_PM
-static int rtw_pci_suspend(struct device *dev)
+static int __maybe_unused rtw_pci_suspend(struct device *dev)
{
return 0;
}
-static int rtw_pci_resume(struct device *dev)
+static int __maybe_unused rtw_pci_resume(struct device *dev)
{
return 0;
}
static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
-#define RTW_PM_OPS (&rtw_pm_ops)
-#else
-#define RTW_PM_OPS NULL
-#endif
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
@@ -1364,7 +1410,8 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
}
static struct rtw_hci_ops rtw_pci_ops = {
- .tx = rtw_pci_tx,
+ .tx_write = rtw_pci_tx_write,
+ .tx_kick_off = rtw_pci_tx_kick_off,
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
@@ -1530,7 +1577,7 @@ static struct pci_driver rtw_pci_driver = {
.id_table = rtw_pci_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
- .driver.pm = RTW_PM_OPS,
+ .driver.pm = &rtw_pm_ops,
};
module_pci_driver(rtw_pci_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 1580cfc57361..3ac4fb328d31 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -39,6 +39,7 @@
#define RTK_PCIE_LINK_CFG 0x0719
#define BIT_CLKREQ_SW_EN BIT(4)
#define BIT_L1_SW_EN BIT(3)
+#define RTK_PCIE_CLKDLY_CTRL 0x0725
#define BIT_PCI_BCNQ_FLAG BIT(4)
#define RTK_PCI_TXBD_DESA_BCNQ 0x308
@@ -51,6 +52,8 @@
#define RTK_PCI_TXBD_DESA_HI0Q 0x340
#define RTK_PCI_RXBD_DESA_MPDUQ 0x338
+#define TRX_BD_IDX_MASK GENMASK(11, 0)
+
/* BCNQ is specialized for rsvd page, does not need to specify a number */
#define RTK_PCI_TXBD_NUM_H2CQ 0x1328
#define RTK_PCI_TXBD_NUM_MGMTQ 0x380
@@ -197,12 +200,15 @@ struct rtw_pci_rx_ring {
struct rtw_pci {
struct pci_dev *pdev;
- /* used for pci interrupt */
+ /* Used for PCI interrupt. */
+ spinlock_t hwirq_lock;
+ /* Used for PCI TX queueing. */
spinlock_t irq_lock;
u32 irq_mask[4];
bool irq_enabled;
u16 rx_tag;
+ DECLARE_BITMAP(tx_queued, RTK_MAX_TX_QUEUE_NUM);
struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
u16 link_ctrl;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index eea9d888fbf1..8793dd22188f 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -749,20 +749,10 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
- if (addr == RF_CFGCH) {
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
- }
-
rtw_write32_mask(rtwdev, direct_addr, mask, data);
udelay(1);
- if (addr == RF_CFGCH) {
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
- }
-
return true;
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 4bc14b1a6340..4dd7d4143b04 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -645,7 +645,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
break;
case RTW_CHANNEL_WIDTH_40:
- if (primary_ch_idx == 1)
+ if (primary_ch_idx == RTW_SC_20_UPPER)
rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
else
rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
@@ -1543,7 +1543,7 @@ static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
rtw_warn(rtwdev, "wrong bfee role\n");
}
-static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1581,7 +1581,7 @@ static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
{0x0012,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -1714,7 +1714,7 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
{0x0003,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1787,7 +1787,7 @@ static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1905,26 +1905,26 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
trans_carddis_to_cardemu_8822b,
trans_cardemu_to_act_8822b,
NULL
};
-static struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
trans_act_to_cardemu_8822b,
trans_cardemu_to_carddis_8822b,
NULL
};
-static struct rtw_intf_phy_para usb2_param_8822b[] = {
+static const struct rtw_intf_phy_para usb2_param_8822b[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para usb3_param_8822b[] = {
+static const struct rtw_intf_phy_para usb3_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_D,
@@ -1935,7 +1935,7 @@ static struct rtw_intf_phy_para usb3_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
+static const struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
@@ -1982,7 +1982,7 @@ static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
+static const struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
@@ -2029,7 +2029,7 @@ static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para_table phy_para_table_8822b = {
+static const struct rtw_intf_phy_para_table phy_para_table_8822b = {
.usb2_para = usb2_param_8822b,
.usb3_para = usb3_param_8822b,
.gen1_para = pcie_gen1_param_8822b,
@@ -2046,12 +2046,12 @@ static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
[5] = RTW_DEF_RFE(8822b, 5, 5),
};
-static struct rtw_hw_reg rtw8822b_dig[] = {
+static const struct rtw_hw_reg rtw8822b_dig[] = {
[0] = { .addr = 0xc50, .mask = 0x7f },
[1] = { .addr = 0xe50, .mask = 0x7f },
};
-static struct rtw_page_table page_table_8822b[] = {
+static const struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
@@ -2059,7 +2059,7 @@ static struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
};
-static struct rtw_rqpn rqpn_table_8822b[] = {
+static const struct rtw_rqpn rqpn_table_8822b[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
@@ -2371,6 +2371,33 @@ static const struct rtw_pwr_track_tbl rtw8822b_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822b_pwrtrk_2g_cck_a_p,
};
+static const struct rtw_reg_domain coex_info_hw_regs_8822b[] = {
+ {0xcb0, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0xcb4, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0xcba, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0xcbd, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0xc58, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0xcbd, BIT(0), RTW_REG_DOMAIN_MAC8},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32},
+ {0x64, BIT(0), RTW_REG_DOMAIN_MAC8},
+ {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8},
+ {0x40, BIT(5), RTW_REG_DOMAIN_MAC8},
+ {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_B},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
+ {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -2439,6 +2466,9 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.bt_afh_span_bw40 = 0x36,
.afh_5g_num = ARRAY_SIZE(afh_5g_8822b),
.afh_5g = afh_5g_8822b,
+
+ .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822b),
+ .coex_info_hw_regs = coex_info_hw_regs_8822b,
};
EXPORT_SYMBOL(rtw8822b_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 3865097696d4..dc07e6be38e8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -1289,6 +1289,17 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
return 0;
}
+static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable)
+{
+ if (enable) {
+ rtw_write32_mask(rtwdev, REG_RSTB, BIT_RSTB_3WIRE, 0x1);
+ rtw_write32_mask(rtwdev, REG_ANAPAR_A, BIT_ANAPAR_UPDATE, 0x1);
+ rtw_write32_mask(rtwdev, REG_ANAPAR_B, BIT_ANAPAR_UPDATE, 0x1);
+ } else {
+ rtw_write32_mask(rtwdev, REG_RSTB, BIT_RSTB_3WIRE, 0x0);
+ }
+}
+
static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
{
#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
@@ -1337,6 +1348,8 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
break;
}
+ rtw8822c_rstb_3wire(rtwdev, false);
+
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x01);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, 0x1f, 0x12);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, 0xfffff, rf_rxbb);
@@ -1349,6 +1362,8 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_reg18);
rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_reg18);
+
+ rtw8822c_rstb_3wire(rtwdev, true);
}
static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
@@ -1482,7 +1497,7 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
break;
case RTW_CHANNEL_WIDTH_40:
rtw_write32_mask(rtwdev, REG_CCKSB, BIT(4),
- (primary_ch_idx == 1 ? 1 : 0));
+ (primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0));
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x5);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
@@ -3399,7 +3414,7 @@ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
-static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -3442,7 +3457,7 @@ static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
{0x0000,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
@@ -3544,6 +3559,11 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x1064,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -3551,7 +3571,7 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
{0x0093,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -3614,7 +3634,7 @@ static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -3677,47 +3697,47 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
trans_carddis_to_cardemu_8822c,
trans_cardemu_to_act_8822c,
NULL
};
-static struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
trans_act_to_cardemu_8822c,
trans_cardemu_to_carddis_8822c,
NULL
};
-static struct rtw_intf_phy_para usb2_param_8822c[] = {
+static const struct rtw_intf_phy_para usb2_param_8822c[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para usb3_param_8822c[] = {
+static const struct rtw_intf_phy_para usb3_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
+static const struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
+static const struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para_table phy_para_table_8822c = {
+static const struct rtw_intf_phy_para_table phy_para_table_8822c = {
.usb2_para = usb2_param_8822c,
.usb3_para = usb3_param_8822c,
.gen1_para = pcie_gen1_param_8822c,
@@ -3734,12 +3754,12 @@ static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
[2] = RTW_DEF_RFE(8822c, 0, 0),
};
-static struct rtw_hw_reg rtw8822c_dig[] = {
+static const struct rtw_hw_reg rtw8822c_dig[] = {
[0] = { .addr = 0x1d70, .mask = 0x7f },
[1] = { .addr = 0x1d70, .mask = 0x7f00 },
};
-static struct rtw_page_table page_table_8822c[] = {
+static const struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
@@ -3747,7 +3767,7 @@ static struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
};
-static struct rtw_rqpn rqpn_table_8822c[] = {
+static const struct rtw_rqpn rqpn_table_8822c[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
@@ -4072,6 +4092,31 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
};
#endif
+static const struct rtw_reg_domain coex_info_hw_regs_8822c[] = {
+ {0x1860, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x4160, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x1c32, BIT(6), RTW_REG_DOMAIN_MAC8},
+ {0x1c38, BIT(28), RTW_REG_DOMAIN_MAC32},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32},
+ {0x64, BIT(0), RTW_REG_DOMAIN_MAC8},
+ {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8},
+ {0x40, BIT(5), RTW_REG_DOMAIN_MAC8},
+ {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_B},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
+ {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+};
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -4108,7 +4153,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.agc_tbl = &rtw8822c_agc_tbl,
.bb_tbl = &rtw8822c_bb_tbl,
.rfk_init_tbl = &rtw8822c_array_mp_cal_init_tbl,
- .rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl},
+ .rf_tbl = {&rtw8822c_rf_b_tbl, &rtw8822c_rf_a_tbl},
.rfe_defs = rtw8822c_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
.en_dis_dpd = true,
@@ -4148,6 +4193,9 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bt_afh_span_bw40 = 0x36,
.afh_5g_num = ARRAY_SIZE(afh_5g_8822c),
.afh_5g = afh_5g_8822c,
+
+ .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822c),
+ .coex_info_hw_regs = coex_info_hw_regs_8822c,
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index abd9f300bedd..dfd8662a0c0e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -190,6 +190,8 @@ const struct rtw_table name ## _tbl = { \
#define BIT_3WIRE_TX_EN BIT(0)
#define BIT_3WIRE_RX_EN BIT(1)
#define BIT_3WIRE_PI_ON BIT(28)
+#define REG_ANAPAR_A 0x1830
+#define BIT_ANAPAR_UPDATE BIT(29)
#define REG_RXAGCCTL0 0x18ac
#define BITS_RXAGC_CCK GENMASK(15, 12)
#define BITS_RXAGC_OFDM GENMASK(8, 4)
@@ -223,6 +225,8 @@ const struct rtw_table name ## _tbl = { \
#define BIT_CCK_BLK_EN BIT(1)
#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
#define REG_CCAMSK 0x1c80
+#define REG_RSTB 0x1c90
+#define BIT_RSTB_3WIRE BIT(8)
#define REG_RX_BREAK 0x1d2c
#define BIT_COM_RX_GCK_EN BIT(31)
#define REG_RXFNCTL 0x1d30
@@ -243,6 +247,7 @@ const struct rtw_table name ## _tbl = { \
#define REG_OFDM_TXCNT 0x2de0
#define REG_ORITXCODE2 0x4100
#define REG_3WIRE2 0x410c
+#define REG_ANAPAR_B 0x4130
#define REG_RXAGCCTL 0x41ac
#define REG_DCKB_I_0 0x41bc
#define REG_DCKB_I_1 0x41c0
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 24c39c60c99a..60989987f67b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -6,6 +6,7 @@
#include "tx.h"
#include "fw.h"
#include "ps.h"
+#include "debug.h"
static
void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
@@ -57,6 +58,9 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
+ SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq);
+ SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq);
+ SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel);
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
@@ -220,20 +224,22 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
pkt_info->use_rate = true;
pkt_info->rate_id = 6;
pkt_info->dis_rate_fallback = true;
+ pkt_info->dis_qselseq = true;
+ pkt_info->en_hwseq = true;
+ pkt_info->hw_ssn_sel = 0;
}
static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
- struct ieee80211_sta *sta = control->sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rtw_sta_info *si;
@@ -292,7 +298,7 @@ out:
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -304,15 +310,15 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
u8 sec_type = 0;
bool bmc;
- if (control->sta) {
- si = (struct rtw_sta_info *)control->sta->drv_priv;
+ if (sta) {
+ si = (struct rtw_sta_info *)sta->drv_priv;
vif = si->vif;
}
if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
- rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, control, skb);
+ rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, sta, skb);
else if (ieee80211_is_data(fc))
- rtw_tx_data_pkt_info_update(rtwdev, pkt_info, control, skb);
+ rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb);
if (info->control.hw_key) {
struct ieee80211_key_conf *key = info->control.hw_key;
@@ -368,15 +374,74 @@ void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
pkt_info->ls = true;
}
+struct sk_buff *
+rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to alloc write data rsvd page skb\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ skb_put_data(skb, buf, size);
+ pkt_info->tx_pkt_size = size;
+ pkt_info->offset = tx_pkt_desc_sz;
+
+ return skb;
+}
+EXPORT_SYMBOL(rtw_tx_write_data_rsvd_page_get);
+
+struct sk_buff *
+rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to alloc write data h2c skb\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ skb_put_data(skb, buf, size);
+ pkt_info->tx_pkt_size = size;
+
+ return skb;
+}
+EXPORT_SYMBOL(rtw_tx_write_data_h2c_get);
+
void rtw_tx(struct rtw_dev *rtwdev,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct rtw_tx_pkt_info pkt_info = {0};
+ int ret;
- rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
- if (rtw_hci_tx(rtwdev, &pkt_info, skb))
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, control->sta, skb);
+ ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write TX skb to HCI\n");
goto out;
+ }
+
+ rtw_hci_tx_kick_off(rtwdev);
return;
@@ -416,38 +481,62 @@ static void rtw_txq_check_agg(struct rtw_dev *rtwdev,
ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work);
}
-static bool rtw_txq_dequeue(struct rtw_dev *rtwdev,
- struct rtw_txq *rtwtxq)
+static int rtw_txq_push_skb(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ struct sk_buff *skb)
{
struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
- struct ieee80211_tx_control control;
- struct sk_buff *skb;
-
- skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
- if (!skb)
- return false;
+ struct rtw_tx_pkt_info pkt_info = {0};
+ int ret;
rtw_txq_check_agg(rtwdev, rtwtxq, skb);
- control.sta = txq->sta;
- rtw_tx(rtwdev, &control, skb);
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, txq->sta, skb);
+ ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write TX skb to HCI\n");
+ return ret;
+ }
rtwtxq->last_push = jiffies;
- return true;
+ return 0;
+}
+
+static struct sk_buff *rtw_txq_dequeue(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct sk_buff *skb;
+
+ skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
+ if (!skb)
+ return NULL;
+
+ return skb;
}
static void rtw_txq_push(struct rtw_dev *rtwdev,
struct rtw_txq *rtwtxq,
unsigned long frames)
{
+ struct sk_buff *skb;
+ int ret;
int i;
rcu_read_lock();
- for (i = 0; i < frames; i++)
- if (!rtw_txq_dequeue(rtwdev, rtwtxq))
+ for (i = 0; i < frames; i++) {
+ skb = rtw_txq_dequeue(rtwdev, rtwtxq);
+ if (!skb)
break;
+ ret = rtw_txq_push_skb(rtwdev, rtwtxq, skb);
+ if (ret) {
+ rtw_err(rtwdev, "failed to pusk skb, ret %d\n", ret);
+ break;
+ }
+ }
+
rcu_read_unlock();
}
@@ -469,6 +558,8 @@ void rtw_tx_tasklet(unsigned long data)
list_del_init(&rtwtxq->list);
}
+ rtw_hci_tx_kick_off(rtwdev);
+
spin_unlock_bh(&rtwdev->txq_lock);
}
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 9ca4f74a501b..b973de0f4dc0 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -53,6 +53,12 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(19))
#define SET_TX_DESC_SW_DEFINE(tx_desc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x06, value, GENMASK(11, 0))
+#define SET_TX_DESC_DISQSELSEQ(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(31))
+#define SET_TX_DESC_EN_HWSEQ(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x08, value, BIT(15))
+#define SET_TX_DESC_HW_SSN_SEL(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(7, 6))
enum rtw_tx_desc_queue_select {
TX_DESC_QSEL_TID0 = 0,
@@ -85,7 +91,7 @@ void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
void rtw_tx_tasklet(unsigned long data);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb);
void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
@@ -93,5 +99,13 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb);
+struct sk_buff *
+rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size);
+struct sk_buff *
+rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
index 4820dca958dd..2fcdf70a3a77 100644
--- a/drivers/net/wireless/realtek/rtw88/wow.c
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -431,50 +431,39 @@ static void rtw_wow_fw_media_status(struct rtw_dev *rtwdev, bool connect)
rtw_iterate_stas_atomic(rtwdev, rtw_wow_fw_media_status_iter, &data);
}
-static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev)
+static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
- struct rtw_wow_param *rtw_wow = &rtwdev->wow;
- struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
- struct cfg80211_ssid *ssid;
- int i;
-
- for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
- ssid = &rtw_pno_req->match_sets[i].ssid;
- rtw_add_rsvd_page_probe_req(rtwdev, ssid);
- }
- rtw_add_rsvd_page_probe_req(rtwdev, NULL);
- rtw_add_rsvd_page(rtwdev, RSVD_NLO_INFO, false);
- rtw_add_rsvd_page(rtwdev, RSVD_CH_INFO, true);
+ rtw_add_rsvd_page_pno(rtwdev, rtwvif);
}
-static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev)
+static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
- rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
+ rtw_add_rsvd_page_sta(rtwdev, rtwvif);
}
-static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev)
+static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
- rtw_reset_rsvd_page(rtwdev);
+ rtw_remove_rsvd_page(rtwdev, rtwvif);
if (rtw_wow_mgd_linked(rtwdev)) {
- rtw_wow_config_linked_rsvd_page(rtwdev);
+ rtw_wow_config_linked_rsvd_page(rtwdev, rtwvif);
} else if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags) &&
rtw_wow_no_link(rtwdev)) {
- rtw_wow_config_pno_rsvd_page(rtwdev);
+ rtw_wow_config_pno_rsvd_page(rtwdev, rtwvif);
}
}
static int rtw_wow_dl_fw_rsvd_page(struct rtw_dev *rtwdev)
{
struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
- rtw_wow_config_rsvd_page(rtwdev);
+ rtw_wow_config_rsvd_page(rtwdev, rtwvif);
- return rtw_fw_download_rsvd_page(rtwdev, wow_vif);
+ return rtw_fw_download_rsvd_page(rtwdev);
}
static int rtw_wow_swap_fw(struct rtw_dev *rtwdev, enum rtw_fw_type type)
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 1bebba4e8527..5d6143a55187 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1468,12 +1468,15 @@ static void rsi_shutdown(struct device *dev)
struct rsi_91x_sdiodev *sdev =
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
struct ieee80211_hw *hw = adapter->hw;
- struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n");
- if (rsi_config_wowlan(adapter, wowlan))
- rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ if (hw) {
+ struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+
+ if (rsi_config_wowlan(adapter, wowlan))
+ rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ }
if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 &&
adapter->priv->bt_adapter) {
diff --git a/drivers/net/wireless/st/cw1200/wsm.h b/drivers/net/wireless/st/cw1200/wsm.h
index ddea57f8c8ab..1ffa47994bb9 100644
--- a/drivers/net/wireless/st/cw1200/wsm.h
+++ b/drivers/net/wireless/st/cw1200/wsm.h
@@ -1623,7 +1623,7 @@ struct wsm_p2p_device_info {
u8 local_devname[D11_MAX_SSID_LEN];
u8 reserved2[3];
u8 num_secdev_supported;
- struct wsm_p2p_device_type secdevs[0];
+ struct wsm_p2p_device_type secdevs[];
} __packed;
/* 4.36 SetWCDMABand - WO */
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index 1c1a591c6055..e5874186f9d7 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -90,7 +90,7 @@ struct wl1251_cmd_header {
u16 id;
u16 status;
/* payload */
- u8 data[0];
+ u8 data[];
} __packed;
struct wl1251_command {
@@ -281,7 +281,7 @@ struct wl1251_cmd_packet_template {
struct wl1251_cmd_header header;
__le16 size;
- u8 data[0];
+ u8 data[];
} __packed;
#define TIM_ELE_ID 5
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 94569cd695c8..c9a4e9a43400 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -15,9 +15,7 @@
#include <linux/wl12xx.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
-#include <linux/gpio.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include "wl1251.h"
@@ -160,15 +158,6 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
int ret;
if (enable) {
- /*
- * Power is controlled by runtime PM, but we still call board
- * callback in case it wants to do any additional setup,
- * for example enabling clock buffer for the module.
- */
- if (gpio_is_valid(wl->power_gpio))
- gpio_set_value(wl->power_gpio, true);
-
-
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0) {
pm_runtime_put_sync(&func->dev);
@@ -186,9 +175,6 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
ret = pm_runtime_put_sync(&func->dev);
if (ret < 0)
goto out;
-
- if (gpio_is_valid(wl->power_gpio))
- gpio_set_value(wl->power_gpio, false);
}
out:
@@ -241,31 +227,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl1251_board_data = wl1251_get_platform_data();
if (!IS_ERR(wl1251_board_data)) {
- wl->power_gpio = wl1251_board_data->power_gpio;
wl->irq = wl1251_board_data->irq;
wl->use_eeprom = wl1251_board_data->use_eeprom;
} else if (np) {
- wl->use_eeprom = of_property_read_bool(np,
- "ti,wl1251-has-eeprom");
- wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
wl->irq = of_irq_get(np, 0);
-
- if (wl->power_gpio == -EPROBE_DEFER ||
- wl->irq == -EPROBE_DEFER) {
+ if (wl->irq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto disable;
}
}
- if (gpio_is_valid(wl->power_gpio)) {
- ret = devm_gpio_request(&func->dev, wl->power_gpio,
- "wl1251 power");
- if (ret) {
- wl1251_error("Failed to request gpio: %d\n", ret);
- goto disable;
- }
- }
-
if (wl->irq) {
irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
diff --git a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
index 7fabe702c4cc..7e28fe435b43 100644
--- a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
@@ -65,7 +65,7 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index a265fba0cb4c..c725f5855c13 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -938,7 +938,7 @@ struct acx_rx_filter_cfg {
u8 action;
u8 num_fields;
- u8 fields[0];
+ u8 fields[];
} __packed;
struct acx_roaming_stats {
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
index 14b367e98dce..24a2dfcb41ea 100644
--- a/drivers/net/wireless/ti/wlcore/boot.h
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -26,7 +26,7 @@ struct wl1271_static_data {
u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
u32 hw_version;
u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
- u8 priv[0];
+ u8 priv[];
};
/* number of times we try to read the INIT interrupt */
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index bfad7b5a1ac6..f2609d5b6bf7 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -209,7 +209,7 @@ struct wl1271_cmd_header {
__le16 id;
__le16 status;
/* payload */
- u8 data[0];
+ u8 data[];
} __packed;
#define WL1271_CMD_MAX_PARAMS 572
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 6116383ee248..31be425f2332 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1150,7 +1150,7 @@ struct wlcore_conf {
struct wlcore_conf_file {
struct wlcore_conf_header header;
struct wlcore_conf core;
- u8 priv[0];
+ u8 priv[];
} __packed;
#endif
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
index 181be725eff8..1dd7ecc11f86 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
@@ -66,7 +66,7 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 01305ba2d3aa..c878097f0dda 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -436,10 +436,18 @@ static int virt_wifi_net_device_stop(struct net_device *dev)
return 0;
}
+static int virt_wifi_net_device_get_iflink(const struct net_device *dev)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+
+ return priv->lowerdev->ifindex;
+}
+
static const struct net_device_ops virt_wifi_ops = {
.ndo_start_xmit = virt_wifi_start_xmit,
- .ndo_open = virt_wifi_net_device_open,
- .ndo_stop = virt_wifi_net_device_stop,
+ .ndo_open = virt_wifi_net_device_open,
+ .ndo_stop = virt_wifi_net_device_stop,
+ .ndo_get_iflink = virt_wifi_net_device_get_iflink,
};
/* Invoked as part of rtnl lock release. */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index efdce9ae36ea..b446cb369557 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -231,7 +231,7 @@ struct iw_mgmt_info_element {
u8 id; /* one of enum iw_mgmt_info_element_ids,
but sizeof(enum) > sizeof(u8) :-( */
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct iw_mgmt_essid_pset {
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.h b/drivers/net/wireless/zydas/zd1211rw/zd_usb.h
index a52ee323a142..8f03b09a602c 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.h
@@ -69,7 +69,7 @@ enum control_requests {
struct usb_req_read_regs {
__le16 id;
- __le16 addr[0];
+ __le16 addr[];
} __packed;
struct reg_data {
@@ -79,7 +79,7 @@ struct reg_data {
struct usb_req_write_regs {
__le16 id;
- struct reg_data reg_writes[0];
+ struct reg_data reg_writes[];
} __packed;
enum {
@@ -95,7 +95,7 @@ struct usb_req_rfwrite {
/* 2: other (default) */
__le16 bits;
/* RF2595: 24 */
- __le16 bit_values[0];
+ __le16 bit_values[];
/* (ZD_CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
} __packed;
@@ -118,7 +118,7 @@ struct usb_int_header {
struct usb_int_regs {
struct usb_int_header hdr;
- struct reg_data regs[0];
+ struct reg_data regs[];
} __packed;
struct usb_int_retry_fail {
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index ed2123129e0e..4dc7bd7e02b6 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -76,7 +76,7 @@ static u8 nci_core_get_config_otp_ram_version[5] = {
struct nci_core_get_config_rsp {
u8 status;
u8 count;
- u8 data[0];
+ u8 data[];
};
static int fdp_nci_create_conn(struct nci_dev *ndev)
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 60acdfd1cb8c..0b9ca6d20ffa 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -66,7 +66,7 @@ struct st21nfca_atr_req {
u8 bsi;
u8 bri;
u8 ppi;
- u8 gbi[0];
+ u8 gbi[];
} __packed;
struct st21nfca_atr_res {
@@ -79,7 +79,7 @@ struct st21nfca_atr_res {
u8 bri;
u8 to;
u8 ppi;
- u8 gbi[0];
+ u8 gbi[];
} __packed;
struct st21nfca_psl_req {
@@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev,
memcpy(atr_res->gbi, atr_req->gbi, gb_len);
r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
gb_len);
- if (r < 0)
+ if (r < 0) {
+ kfree_skb(skb);
return r;
+ }
}
info->dep_info.curr_nfc_dep_pni = 0;
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index e52b300b2f5b..9e310e1ad4d0 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -195,26 +195,117 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return 0;
}
-static int amd_link_is_up(struct amd_ntb_dev *ndev)
+static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev)
{
- if (!ndev->peer_sta)
- return NTB_LNK_STA_ACTIVE(ndev->cntl_sta);
+ struct pci_dev *pdev = NULL;
+ struct pci_dev *pci_swds = NULL;
+ struct pci_dev *pci_swus = NULL;
+ u32 stat;
+ int rc;
- if (ndev->peer_sta & AMD_LINK_UP_EVENT) {
- ndev->peer_sta = 0;
- return 1;
+ if (ndev->ntb.topo == NTB_TOPO_SEC) {
+ /* Locate the pointer to Downstream Switch for this device */
+ pci_swds = pci_upstream_bridge(ndev->ntb.pdev);
+ if (pci_swds) {
+ /*
+ * Locate the pointer to Upstream Switch for
+ * the Downstream Switch.
+ */
+ pci_swus = pci_upstream_bridge(pci_swds);
+ if (pci_swus) {
+ rc = pcie_capability_read_dword(pci_swus,
+ PCI_EXP_LNKCTL,
+ &stat);
+ if (rc)
+ return 0;
+ } else {
+ return 0;
+ }
+ } else {
+ return 0;
+ }
+ } else if (ndev->ntb.topo == NTB_TOPO_PRI) {
+ /*
+ * For NTB primary, we simply read the Link Status and control
+ * register of the NTB device itself.
+ */
+ pdev = ndev->ntb.pdev;
+ rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat);
+ if (rc)
+ return 0;
+ } else {
+ /* Catch all for everything else */
+ return 0;
}
- /* If peer_sta is reset or D0 event, the ISR has
- * started a timer to check link status of hardware.
- * So here just clear status bit. And if peer_sta is
- * D3 or PME_TO, D0/reset event will be happened when
- * system wakeup/poweron, so do nothing here.
+ ndev->lnk_sta = stat;
+
+ return 1;
+}
+
+static int amd_link_is_up(struct amd_ntb_dev *ndev)
+{
+ int ret;
+
+ /*
+ * We consider the link to be up under two conditions:
+ *
+ * - When a link-up event is received. This is indicated by
+ * AMD_LINK_UP_EVENT set in peer_sta.
+ * - When driver on both sides of the link have been loaded.
+ * This is indicated by bit 1 being set in the peer
+ * SIDEINFO register.
+ *
+ * This function should return 1 when the latter of the above
+ * two conditions is true.
+ *
+ * Now consider the sequence of events - Link-Up event occurs,
+ * then the peer side driver loads. In this case, we would have
+ * received LINK_UP event and bit 1 of peer SIDEINFO is also
+ * set. What happens now if the link goes down? Bit 1 of
+ * peer SIDEINFO remains set, but LINK_DOWN bit is set in
+ * peer_sta. So we should return 0 from this function. Not only
+ * that, we clear bit 1 of peer SIDEINFO to 0, since the peer
+ * side driver did not even get a chance to clear it before
+ * the link went down. This can be the case of surprise link
+ * removal.
+ *
+ * LINK_UP event will always occur before the peer side driver
+ * gets loaded the very first time. So there can be a case when
+ * the LINK_UP event has occurred, but the peer side driver hasn't
+ * yet loaded. We return 0 in that case.
+ *
+ * There is also a special case when the primary side driver is
+ * unloaded and then loaded again. Since there is no change in
+ * the status of NTB secondary in this case, there is no Link-Up
+ * or Link-Down notification received. We recognize this condition
+ * with peer_sta being set to 0.
+ *
+ * If bit 1 of peer SIDEINFO register is not set, then we
+ * simply return 0 irrespective of the link up or down status
+ * set in peer_sta.
*/
- if (ndev->peer_sta & AMD_PEER_RESET_EVENT)
- ndev->peer_sta &= ~AMD_PEER_RESET_EVENT;
- else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT))
- ndev->peer_sta = 0;
+ ret = amd_poll_link(ndev);
+ if (ret) {
+ /*
+ * We need to check the below only for NTB primary. For NTB
+ * secondary, simply checking the result of PSIDE_INFO
+ * register will suffice.
+ */
+ if (ndev->ntb.topo == NTB_TOPO_PRI) {
+ if ((ndev->peer_sta & AMD_LINK_UP_EVENT) ||
+ (ndev->peer_sta == 0))
+ return ret;
+ else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) {
+ /* Clear peer sideinfo register */
+ amd_clear_side_info_reg(ndev, true);
+
+ return 0;
+ }
+ } else { /* NTB_TOPO_SEC */
+ return ret;
+ }
+ }
return 0;
}
@@ -253,7 +344,6 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb,
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
- u32 ntb_ctl;
/* Enable event interrupt */
ndev->int_mask &= ~AMD_EVENT_INTMASK;
@@ -263,10 +353,6 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb,
return -EINVAL;
dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
- ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
- ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
- writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
-
return 0;
}
@@ -274,7 +360,6 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
- u32 ntb_ctl;
/* Disable event interrupt */
ndev->int_mask |= AMD_EVENT_INTMASK;
@@ -284,10 +369,6 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb)
return -EINVAL;
dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
- ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
- ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
- writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
-
return 0;
}
@@ -493,8 +574,6 @@ static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
reg = readl(mmio + AMD_SMUACK_OFFSET);
reg |= bit;
writel(reg, mmio + AMD_SMUACK_OFFSET);
-
- ndev->peer_sta |= bit;
}
static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
@@ -512,10 +591,16 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
status &= AMD_EVENT_INTMASK;
switch (status) {
case AMD_PEER_FLUSH_EVENT:
+ ndev->peer_sta |= AMD_PEER_FLUSH_EVENT;
dev_info(dev, "Flush is done.\n");
break;
case AMD_PEER_RESET_EVENT:
- amd_ack_smu(ndev, AMD_PEER_RESET_EVENT);
+ case AMD_LINK_DOWN_EVENT:
+ ndev->peer_sta |= status;
+ if (status == AMD_LINK_DOWN_EVENT)
+ ndev->peer_sta &= ~AMD_LINK_UP_EVENT;
+
+ amd_ack_smu(ndev, status);
/* link down first */
ntb_link_event(&ndev->ntb);
@@ -526,7 +611,12 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
case AMD_PEER_D3_EVENT:
case AMD_PEER_PMETO_EVENT:
case AMD_LINK_UP_EVENT:
- case AMD_LINK_DOWN_EVENT:
+ ndev->peer_sta |= status;
+ if (status == AMD_LINK_UP_EVENT)
+ ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT;
+ else if (status == AMD_PEER_D3_EVENT)
+ ndev->peer_sta &= ~AMD_PEER_D0_EVENT;
+
amd_ack_smu(ndev, status);
/* link down */
@@ -540,6 +630,8 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
if (status & 0x1)
dev_info(dev, "Wakeup is done.\n");
+ ndev->peer_sta |= AMD_PEER_D0_EVENT;
+ ndev->peer_sta &= ~AMD_PEER_D3_EVENT;
amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
/* start a timer to poll link status */
@@ -550,6 +642,39 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
dev_info(dev, "event status = 0x%x.\n", status);
break;
}
+
+ /* Clear the interrupt status */
+ writel(status, mmio + AMD_INTSTAT_OFFSET);
+}
+
+static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec)
+{
+ struct device *dev = &ndev->ntb.pdev->dev;
+ u64 status;
+
+ status = amd_ntb_db_read(&ndev->ntb);
+
+ dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec);
+
+ /*
+ * Since we had reserved highest order bit of DB for signaling peer of
+ * a special event, this is the only status bit we should be concerned
+ * here now.
+ */
+ if (status & BIT(ndev->db_last_bit)) {
+ ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit));
+ /* send link down event notification */
+ ntb_link_event(&ndev->ntb);
+
+ /*
+ * If we are here, that means the peer has signalled a special
+ * event which notifies that the peer driver has been
+ * un-loaded for some reason. Since there is a chance that the
+ * peer will load its driver again sometime, we schedule link
+ * polling routine.
+ */
+ schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
+ }
}
static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
@@ -559,8 +684,10 @@ static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
amd_handle_event(ndev, vec);
- if (vec < AMD_DB_CNT)
+ if (vec < AMD_DB_CNT) {
+ amd_handle_db_event(ndev, vec);
ntb_db_event(&ndev->ntb, vec);
+ }
return IRQ_HANDLED;
}
@@ -842,26 +969,18 @@ static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
static int amd_poll_link(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->peer_mmio;
- u32 reg, stat;
- int rc;
+ u32 reg;
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
- reg &= NTB_LIN_STA_ACTIVE_BIT;
+ reg &= AMD_SIDE_READY;
dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
- if (reg == ndev->cntl_sta)
- return 0;
-
ndev->cntl_sta = reg;
- rc = pci_read_config_dword(ndev->ntb.pdev,
- AMD_LINK_STATUS_OFFSET, &stat);
- if (rc)
- return 0;
- ndev->lnk_sta = stat;
+ amd_ntb_get_link_status(ndev);
- return 1;
+ return ndev->cntl_sta;
}
static void amd_link_hb(struct work_struct *work)
@@ -880,11 +999,16 @@ static int amd_init_isr(struct amd_ntb_dev *ndev)
return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
}
-static void amd_init_side_info(struct amd_ntb_dev *ndev)
+static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
{
- void __iomem *mmio = ndev->self_mmio;
+ void __iomem *mmio = NULL;
unsigned int reg;
+ if (peer)
+ mmio = ndev->peer_mmio;
+ else
+ mmio = ndev->self_mmio;
+
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
if (!(reg & AMD_SIDE_READY)) {
reg |= AMD_SIDE_READY;
@@ -892,11 +1016,16 @@ static void amd_init_side_info(struct amd_ntb_dev *ndev)
}
}
-static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
+static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
{
- void __iomem *mmio = ndev->self_mmio;
+ void __iomem *mmio = NULL;
unsigned int reg;
+ if (peer)
+ mmio = ndev->peer_mmio;
+ else
+ mmio = ndev->self_mmio;
+
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
if (reg & AMD_SIDE_READY) {
reg &= ~AMD_SIDE_READY;
@@ -905,6 +1034,30 @@ static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
}
}
+static void amd_init_side_info(struct amd_ntb_dev *ndev)
+{
+ void __iomem *mmio = ndev->self_mmio;
+ u32 ntb_ctl;
+
+ amd_set_side_info_reg(ndev, false);
+
+ ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
+ ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
+ writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
+}
+
+static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
+{
+ void __iomem *mmio = ndev->self_mmio;
+ u32 ntb_ctl;
+
+ amd_clear_side_info_reg(ndev, false);
+
+ ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
+ ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
+ writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
+}
+
static int amd_init_ntb(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->self_mmio;
@@ -935,8 +1088,6 @@ static int amd_init_ntb(struct amd_ntb_dev *ndev)
return -EINVAL;
}
- ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
-
/* Mask event interrupts */
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
@@ -957,6 +1108,7 @@ static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
static int amd_init_dev(struct amd_ntb_dev *ndev)
{
+ void __iomem *mmio = ndev->self_mmio;
struct pci_dev *pdev;
int rc = 0;
@@ -977,6 +1129,25 @@ static int amd_init_dev(struct amd_ntb_dev *ndev)
}
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+ /*
+ * We reserve the highest order bit of the DB register which will
+ * be used to notify peer when the driver on this side is being
+ * un-loaded.
+ */
+ ndev->db_last_bit =
+ find_last_bit((unsigned long *)&ndev->db_valid_mask,
+ hweight64(ndev->db_valid_mask));
+ writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET);
+ /*
+ * Since now there is one less bit to account for, the DB count
+ * and DB mask should be adjusted accordingly.
+ */
+ ndev->db_count -= 1;
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+ /* Enable Link-Up and Link-Down event interrupts */
+ ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT);
+ writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
return 0;
}
@@ -1111,9 +1282,31 @@ static void amd_ntb_pci_remove(struct pci_dev *pdev)
{
struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
+ /*
+ * Clear the READY bit in SIDEINFO register before sending DB event
+ * to the peer. This will make sure that when the peer handles the
+ * DB event, it correctly reads this bit as being 0.
+ */
+ amd_deinit_side_info(ndev);
+ ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
+ amd_deinit_dev(ndev);
+ amd_ntb_deinit_pci(ndev);
+ kfree(ndev);
+}
+
+static void amd_ntb_pci_shutdown(struct pci_dev *pdev)
+{
+ struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
+
+ /* Send link down notification */
+ ntb_link_event(&ndev->ntb);
+
amd_deinit_side_info(ndev);
+ ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
+ ntb_unregister_device(&ndev->ntb);
+ ndev_deinit_debugfs(ndev);
amd_deinit_dev(ndev);
amd_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -1149,6 +1342,7 @@ static struct pci_driver amd_ntb_pci_driver = {
.id_table = amd_ntb_pci_tbl,
.probe = amd_ntb_pci_probe,
.remove = amd_ntb_pci_remove,
+ .shutdown = amd_ntb_pci_shutdown,
};
static int __init amd_ntb_pci_driver_init(void)
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h
index 139a307147bc..5f337b1572a0 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.h
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -53,11 +53,8 @@
#include <linux/pci.h>
#define AMD_LINK_HB_TIMEOUT msecs_to_jiffies(1000)
-#define AMD_LINK_STATUS_OFFSET 0x68
-#define NTB_LIN_STA_ACTIVE_BIT 0x00000002
#define NTB_LNK_STA_SPEED_MASK 0x000F0000
#define NTB_LNK_STA_WIDTH_MASK 0x03F00000
-#define NTB_LNK_STA_ACTIVE(x) (!!((x) & NTB_LIN_STA_ACTIVE_BIT))
#define NTB_LNK_STA_SPEED(x) (((x) & NTB_LNK_STA_SPEED_MASK) >> 16)
#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 20)
@@ -196,6 +193,7 @@ struct amd_ntb_dev {
u64 db_valid_mask;
u64 db_mask;
+ u64 db_last_bit;
u32 int_mask;
struct msix_entry *msix;
@@ -218,4 +216,8 @@ struct amd_ntb_dev {
#define ntb_ndev(__ntb) container_of(__ntb, struct amd_ntb_dev, ntb)
#define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work)
+static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer);
+static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer);
+static int amd_poll_link(struct amd_ntb_dev *ndev);
+
#endif
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index dcf234680535..edae52384b8a 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2674,8 +2674,8 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
ret = pci_enable_pcie_error_reporting(pdev);
if (ret != 0)
dev_warn(&pdev->dev, "PCIe AER capability disabled\n");
- else /* Cleanup uncorrectable error status before getting to init */
- pci_cleanup_aer_uncorrect_error_status(pdev);
+ else /* Cleanup nonfatal error status before getting to init */
+ pci_aer_clear_nonfatal_status(pdev);
/* First enable the PCI device */
ret = pcim_enable_device(pdev);
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 86ffa716eaf2..4c6eb61a6ac6 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -285,7 +285,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
if (widx >= switchtec_ntb_mw_count(ntb, pidx))
return -EINVAL;
- if (xlate_pos < 12)
+ if (size != 0 && xlate_pos < 12)
return -EINVAL;
if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 00a5d5764993..e6d1f5b298f3 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -481,70 +481,70 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
return -ENOMEM;
out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"\nNTB QP stats:\n\n");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_bytes - \t%llu\n", qp->rx_bytes);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_pkts - \t%llu\n", qp->rx_pkts);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_memcpy - \t%llu\n", qp->rx_memcpy);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_async - \t%llu\n", qp->rx_async);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_ring_empty - %llu\n", qp->rx_ring_empty);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_err_ver - \t%llu\n", qp->rx_err_ver);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_buff - \t0x%p\n", qp->rx_buff);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_index - \t%u\n", qp->rx_index);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_max_entry - \t%u\n", qp->rx_max_entry);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_bytes - \t%llu\n", qp->tx_bytes);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_pkts - \t%llu\n", qp->tx_pkts);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_memcpy - \t%llu\n", qp->tx_memcpy);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_async - \t%llu\n", qp->tx_async);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_ring_full - \t%llu\n", qp->tx_ring_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_mw - \t0x%p\n", qp->tx_mw);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_index (H) - \t%u\n", qp->tx_index);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"RRI (T) - \t%u\n",
qp->remote_rx_info->entry);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_max_entry - \t%u\n", qp->tx_max_entry);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"free tx - \t%u\n",
ntb_transport_tx_free_entry(qp));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"\n");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Using TX DMA - \t%s\n",
qp->tx_dma_chan ? "Yes" : "No");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Using RX DMA - \t%s\n",
qp->rx_dma_chan ? "Yes" : "No");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"QP Link - \t%s\n",
qp->link_is_up ? "Up" : "Down");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"\n");
if (out_offset > out_count)
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index e9b7c2dfc730..972f6d984f6d 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -149,7 +149,8 @@ struct perf_peer {
u64 outbuf_xlat;
resource_size_t outbuf_size;
void __iomem *outbuf;
-
+ phys_addr_t out_phys_addr;
+ dma_addr_t dma_dst_addr;
/* Inbound MW params */
dma_addr_t inbuf_xlat;
resource_size_t inbuf_size;
@@ -782,6 +783,10 @@ static int perf_copy_chunk(struct perf_thread *pthr,
struct dmaengine_unmap_data *unmap;
struct device *dma_dev;
int try = 0, ret = 0;
+ struct perf_peer *peer = pthr->perf->test_peer;
+ void __iomem *vbase;
+ void __iomem *dst_vaddr;
+ dma_addr_t dst_dma_addr;
if (!use_dma) {
memcpy_toio(dst, src, len);
@@ -794,6 +799,10 @@ static int perf_copy_chunk(struct perf_thread *pthr,
offset_in_page(dst), len))
return -EIO;
+ vbase = peer->outbuf;
+ dst_vaddr = dst;
+ dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
+
unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
@@ -807,8 +816,7 @@ static int perf_copy_chunk(struct perf_thread *pthr,
}
unmap->to_cnt = 1;
- unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst),
- offset_in_page(dst), len, DMA_FROM_DEVICE);
+ unmap->addr[1] = dst_dma_addr;
if (dma_mapping_error(dma_dev, unmap->addr[1])) {
ret = -EIO;
goto err_free_resource;
@@ -865,6 +873,7 @@ static int perf_init_test(struct perf_thread *pthr)
{
struct perf_ctx *perf = pthr->perf;
dma_cap_mask_t dma_mask;
+ struct perf_peer *peer = pthr->perf->test_peer;
pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
dev_to_node(&perf->ntb->dev));
@@ -882,15 +891,33 @@ static int perf_init_test(struct perf_thread *pthr)
if (!pthr->dma_chan) {
dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
pthr->tidx);
- atomic_dec(&perf->tsync);
- wake_up(&perf->twait);
- kfree(pthr->src);
- return -ENODEV;
+ goto err_free;
}
+ peer->dma_dst_addr =
+ dma_map_resource(pthr->dma_chan->device->dev,
+ peer->out_phys_addr, peer->outbuf_size,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(pthr->dma_chan->device->dev,
+ peer->dma_dst_addr)) {
+ dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n",
+ pthr->tidx);
+ peer->dma_dst_addr = 0;
+ dma_release_channel(pthr->dma_chan);
+ goto err_free;
+ }
+ dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n",
+ pthr->tidx,
+ &peer->out_phys_addr,
+ &peer->dma_dst_addr);
atomic_set(&pthr->dma_sync, 0);
-
return 0;
+
+err_free:
+ atomic_dec(&perf->tsync);
+ wake_up(&perf->twait);
+ kfree(pthr->src);
+ return -ENODEV;
}
static int perf_run_test(struct perf_thread *pthr)
@@ -978,8 +1005,13 @@ static void perf_clear_test(struct perf_thread *pthr)
* We call it anyway just to be sure of the transfers completion.
*/
(void)dmaengine_terminate_sync(pthr->dma_chan);
-
- dma_release_channel(pthr->dma_chan);
+ if (pthr->perf->test_peer->dma_dst_addr)
+ dma_unmap_resource(pthr->dma_chan->device->dev,
+ pthr->perf->test_peer->dma_dst_addr,
+ pthr->perf->test_peer->outbuf_size,
+ DMA_FROM_DEVICE, 0);
+ if (pthr->dma_chan)
+ dma_release_channel(pthr->dma_chan);
no_dma_notify:
atomic_dec(&perf->tsync);
@@ -1195,6 +1227,9 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
"\tOut buffer addr 0x%pK\n", peer->outbuf);
pos += scnprintf(buf + pos, buf_size - pos,
+ "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
+
+ pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buffer size %pa\n", &peer->outbuf_size);
pos += scnprintf(buf + pos, buf_size - pos,
@@ -1388,6 +1423,8 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
if (!peer->outbuf)
return -ENOMEM;
+ peer->out_phys_addr = phys_addr;
+
if (max_mw_size && peer->outbuf_size > max_mw_size) {
peer->outbuf_size = max_mw_size;
dev_warn(&peer->perf->ntb->dev,
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index d592c0ffbd19..69da758fe64c 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -678,19 +678,19 @@ static ssize_t tool_mw_trans_read(struct file *filep, char __user *ubuf,
&inmw->dma_base);
off += scnprintf(buf + off, buf_size - off,
- "Window Size \t%pa[p]\n",
+ "Window Size \t%pap\n",
&inmw->size);
off += scnprintf(buf + off, buf_size - off,
- "Alignment \t%pa[p]\n",
+ "Alignment \t%pap\n",
&addr_align);
off += scnprintf(buf + off, buf_size - off,
- "Size Alignment \t%pa[p]\n",
+ "Size Alignment \t%pap\n",
&size_align);
off += scnprintf(buf + off, buf_size - off,
- "Size Max \t%pa[p]\n",
+ "Size Max \t%pap\n",
&size_max);
ret = simple_read_from_buffer(ubuf, size, offp, buf, off);
@@ -907,16 +907,16 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, char __user *ubuf,
"Virtual address \t0x%pK\n", outmw->io_base);
off += scnprintf(buf + off, buf_size - off,
- "Phys Address \t%pa[p]\n", &map_base);
+ "Phys Address \t%pap\n", &map_base);
off += scnprintf(buf + off, buf_size - off,
- "Mapping Size \t%pa[p]\n", &map_size);
+ "Mapping Size \t%pap\n", &map_size);
off += scnprintf(buf + off, buf_size - off,
"Translation Address \t0x%016llx\n", outmw->tr_base);
off += scnprintf(buf + off, buf_size - off,
- "Window Size \t%pa[p]\n", &outmw->size);
+ "Window Size \t%pap\n", &outmw->size);
ret = simple_read_from_buffer(ubuf, size, offp, buf, off);
kfree(buf);
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 677d6f45b5c4..43751fab9d36 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -249,13 +249,12 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
- q = blk_alloc_queue(GFP_KERNEL);
+ q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE);
if (!q)
return -ENOMEM;
if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
return -ENOMEM;
- blk_queue_make_request(q, nd_blk_make_request);
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0d04ea3d9fd7..3b09419218d6 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1521,7 +1521,7 @@ static int btt_blk_init(struct btt *btt)
struct nd_namespace_common *ndns = nd_btt->ndns;
/* create a new disk and request queue for btt */
- btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
+ btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE);
if (!btt->btt_queue)
return -ENOMEM;
@@ -1540,7 +1540,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->queue->backing_dev_info->capabilities |=
BDI_CAP_SYNCHRONOUS_IO;
- blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index a8b515968569..09087c38fabd 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -1042,8 +1042,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
return -EFAULT;
}
- if (!desc || (desc->out_num + desc->in_num == 0) ||
- !test_bit(cmd, &cmd_mask))
+ if (!desc ||
+ (desc->out_num + desc->in_num == 0) ||
+ cmd > ND_CMD_CALL ||
+ !test_bit(cmd, &cmd_mask))
return -ENOTTY;
/* fail write commands (when read-only) */
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 64776ed15bb3..7d4ddc4d9322 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -99,7 +99,7 @@ static int nvdimm_probe(struct device *dev)
if (ndd->ns_current >= 0) {
rc = nd_label_reserve_dpa(ndd);
if (rc == 0)
- nvdimm_set_aliasing(dev);
+ nvdimm_set_labeling(dev);
}
nvdimm_bus_unlock(dev);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 94ea6dba6b4f..b7b77e8d9027 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -32,7 +32,7 @@ int nvdimm_check_config_data(struct device *dev)
if (!nvdimm->cmd_mask ||
!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
- if (test_bit(NDD_ALIASING, &nvdimm->flags))
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
return -ENXIO;
else
return -ENOTTY;
@@ -173,11 +173,11 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
return rc;
}
-void nvdimm_set_aliasing(struct device *dev)
+void nvdimm_set_labeling(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- set_bit(NDD_ALIASING, &nvdimm->flags);
+ set_bit(NDD_LABELING, &nvdimm->flags);
}
void nvdimm_set_locked(struct device *dev)
@@ -312,8 +312,9 @@ static ssize_t flags_show(struct device *dev,
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- return sprintf(buf, "%s%s\n",
+ return sprintf(buf, "%s%s%s\n",
test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
+ test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
}
static DEVICE_ATTR_RO(flags);
@@ -562,6 +563,21 @@ int nvdimm_security_freeze(struct nvdimm *nvdimm)
return rc;
}
+static unsigned long dpa_align(struct nd_region *nd_region)
+{
+ struct device *dev = &nd_region->dev;
+
+ if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
+ "bus lock required for capacity provision\n"))
+ return 0;
+ if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
+ % nd_region->ndr_mappings,
+ "invalid region align %#lx mappings: %d\n",
+ nd_region->align, nd_region->ndr_mappings))
+ return 0;
+ return nd_region->align / nd_region->ndr_mappings;
+}
+
int alias_dpa_busy(struct device *dev, void *data)
{
resource_size_t map_end, blk_start, new;
@@ -570,6 +586,7 @@ int alias_dpa_busy(struct device *dev, void *data)
struct nd_region *nd_region;
struct nvdimm_drvdata *ndd;
struct resource *res;
+ unsigned long align;
int i;
if (!is_memory(dev))
@@ -607,13 +624,21 @@ int alias_dpa_busy(struct device *dev, void *data)
* Find the free dpa from the end of the last pmem allocation to
* the end of the interleave-set mapping.
*/
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
+
for_each_dpa_resource(ndd, res) {
+ resource_size_t start, end;
+
if (strncmp(res->name, "pmem", 4) != 0)
continue;
- if ((res->start >= blk_start && res->start < map_end)
- || (res->end >= blk_start
- && res->end <= map_end)) {
- new = max(blk_start, min(map_end + 1, res->end + 1));
+
+ start = ALIGN_DOWN(res->start, align);
+ end = ALIGN(res->end + 1, align) - 1;
+ if ((start >= blk_start && start < map_end)
+ || (end >= blk_start && end <= map_end)) {
+ new = max(blk_start, min(map_end, end) + 1);
if (new != blk_start) {
blk_start = new;
goto retry;
@@ -653,6 +678,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
.res = NULL,
};
struct resource *res;
+ unsigned long align;
if (!ndd)
return 0;
@@ -660,10 +686,20 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
/* now account for busy blk allocations in unaliased dpa */
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
for_each_dpa_resource(ndd, res) {
+ resource_size_t start, end, size;
+
if (strncmp(res->name, "blk", 3) != 0)
continue;
- info.available -= resource_size(res);
+ start = ALIGN_DOWN(res->start, align);
+ end = ALIGN(res->end + 1, align) - 1;
+ size = end - start + 1;
+ if (size >= info.available)
+ return 0;
+ info.available -= size;
}
return info.available;
@@ -682,19 +718,31 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nvdimm_bus *nvdimm_bus;
resource_size_t max = 0;
struct resource *res;
+ unsigned long align;
/* if a dimm is disabled the available capacity is zero */
if (!ndd)
return 0;
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
+
nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
return 0;
for_each_dpa_resource(ndd, res) {
+ resource_size_t start, end;
+
if (strcmp(res->name, "pmem-reserve") != 0)
continue;
- if (resource_size(res) > max)
- max = resource_size(res);
+ /* trim free space relative to current alignment setting */
+ start = ALIGN(res->start, align);
+ end = ALIGN_DOWN(res->end + 1, align) - 1;
+ if (end < start)
+ continue;
+ if (end - start + 1 > max)
+ max = end - start + 1;
}
release_free_pmem(nvdimm_bus, nd_mapping);
return max;
@@ -722,24 +770,33 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
const char *reason;
+ unsigned long align;
if (!ndd)
return 0;
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
+
map_start = nd_mapping->start;
map_end = map_start + nd_mapping->size - 1;
blk_start = max(map_start, map_end + 1 - *overlap);
for_each_dpa_resource(ndd, res) {
- if (res->start >= map_start && res->start < map_end) {
+ resource_size_t start, end;
+
+ start = ALIGN_DOWN(res->start, align);
+ end = ALIGN(res->end + 1, align) - 1;
+ if (start >= map_start && start < map_end) {
if (strncmp(res->name, "blk", 3) == 0)
blk_start = min(blk_start,
- max(map_start, res->start));
- else if (res->end > map_end) {
+ max(map_start, start));
+ else if (end > map_end) {
reason = "misaligned to iset";
goto err;
} else
- busy += resource_size(res);
- } else if (res->end >= map_start && res->end <= map_end) {
+ busy += end - start + 1;
+ } else if (end >= map_start && end <= map_end) {
if (strncmp(res->name, "blk", 3) == 0) {
/*
* If a BLK allocation overlaps the start of
@@ -748,8 +805,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
*/
blk_start = map_start;
} else
- busy += resource_size(res);
- } else if (map_start > res->start && map_start < res->end) {
+ busy += end - start + 1;
+ } else if (map_start > start && map_start < end) {
/* total eclipse of the mapping */
busy += nd_mapping->size;
blk_start = map_start;
@@ -759,7 +816,7 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
*overlap = map_end + 1 - blk_start;
available = blk_start - map_start;
if (busy < available)
- return available - busy;
+ return ALIGN_DOWN(available - busy, align);
return 0;
err:
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index e02f60ad6c99..4cd18be9d0e9 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -7,6 +7,7 @@
#include <linux/memory_hotplug.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
+#include <linux/numa.h>
static int e820_pmem_remove(struct platform_device *pdev)
{
@@ -16,27 +17,16 @@ static int e820_pmem_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_MEMORY_HOTPLUG
-static int e820_range_to_nid(resource_size_t addr)
-{
- return memory_add_physaddr_to_nid(addr);
-}
-#else
-static int e820_range_to_nid(resource_size_t addr)
-{
- return NUMA_NO_NODE;
-}
-#endif
-
static int e820_register_one(struct resource *res, void *data)
{
struct nd_region_desc ndr_desc;
struct nvdimm_bus *nvdimm_bus = data;
+ int nid = phys_to_target_node(res->start);
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.res = res;
- ndr_desc.numa_node = e820_range_to_nid(res->start);
- ndr_desc.target_node = ndr_desc.numa_node;
+ ndr_desc.numa_node = numa_map_to_online_node(nid);
+ ndr_desc.target_node = nid;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
return -ENXIO;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 4c7b775c2811..956b6d1bd8cc 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -62,7 +62,7 @@ struct nd_namespace_index {
__le16 major;
__le16 minor;
__le64 checksum;
- u8 free[0];
+ u8 free[];
};
/**
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 032dc61725ff..ae155e860fdc 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -10,6 +10,7 @@
#include <linux/nd.h>
#include "nd-core.h"
#include "pmem.h"
+#include "pfn.h"
#include "nd.h"
static void namespace_io_release(struct device *dev)
@@ -541,6 +542,11 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
{
bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
+ unsigned long align;
+
+ align = nd_region->align / nd_region->ndr_mappings;
+ valid->start = ALIGN(valid->start, align);
+ valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
if (valid->start >= valid->end)
goto invalid;
@@ -980,10 +986,10 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
return -ENXIO;
}
- div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder);
+ div_u64_rem(val, nd_region->align, &remainder);
if (remainder) {
dev_dbg(dev, "%llu is not %ldK aligned\n", val,
- (PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K);
+ nd_region->align / SZ_1K);
return -EINVAL;
}
@@ -1739,6 +1745,22 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
return ERR_PTR(-ENODEV);
}
+ /*
+ * Note, alignment validation for fsdax and devdax mode
+ * namespaces happens in nd_pfn_validate() where infoblock
+ * padding parameters can be applied.
+ */
+ if (pmem_should_map_pages(dev)) {
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct resource *res = &nsio->res;
+
+ if (!IS_ALIGNED(res->start | (res->end + 1),
+ memremap_compat_align())) {
+ dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ }
+
if (is_namespace_pmem(&ndns->dev)) {
struct nd_namespace_pmem *nspm;
@@ -2521,7 +2543,7 @@ static int init_active_labels(struct nd_region *nd_region)
if (!ndd) {
if (test_bit(NDD_LOCKED, &nvdimm->flags))
/* fail, label data may be unreadable */;
- else if (test_bit(NDD_ALIASING, &nvdimm->flags))
+ else if (test_bit(NDD_LABELING, &nvdimm->flags))
/* fail, labels needed to disambiguate dpa */;
else
return 0;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index c9f6a5b5253a..85dbb2a322b9 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -39,7 +39,7 @@ struct nd_region_data {
int ns_count;
int ns_active;
unsigned int hints_shift;
- void __iomem *flush_wpq[0];
+ void __iomem *flush_wpq[];
};
static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
@@ -146,6 +146,7 @@ struct nd_region {
struct device *btt_seed;
struct device *pfn_seed;
struct device *dax_seed;
+ unsigned long align;
u16 ndr_mappings;
u64 ndr_size;
u64 ndr_start;
@@ -156,7 +157,7 @@ struct nd_region {
struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
- struct nd_mapping mapping[0];
+ struct nd_mapping mapping[];
};
struct nd_blk_region {
@@ -252,7 +253,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
-void nvdimm_set_aliasing(struct device *dev);
+void nvdimm_set_labeling(struct device *dev);
void nvdimm_set_locked(struct device *dev);
void nvdimm_clear_locked(struct device *dev);
int nvdimm_security_setup_events(struct device *dev);
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 8224d1431ea9..6826a274a1f1 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -62,8 +62,10 @@ static int of_pmem_region_probe(struct platform_device *pdev)
if (is_volatile)
region = nvdimm_volatile_region_create(bus, &ndr_desc);
- else
+ else {
+ set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
region = nvdimm_pmem_region_create(bus, &ndr_desc);
+ }
if (!region)
dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n",
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index acb19517f678..37cb1b8a2a39 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -24,6 +24,18 @@ struct nd_pfn_sb {
__le64 npfns;
__le32 mode;
/* minor-version-1 additions for section alignment */
+ /**
+ * @start_pad: Deprecated attribute to pad start-misaligned namespaces
+ *
+ * start_pad is deprecated because the original definition did
+ * not comprehend that dataoff is relative to the base address
+ * of the namespace not the start_pad adjusted base. The result
+ * is that the dax path is broken, but the block-I/O path is
+ * not. The kernel will no longer create namespaces using start
+ * padding, but it still supports block-I/O for legacy
+ * configurations mainly to allow a backup, reconfigure the
+ * namespace, and restore flow to repair dax operation.
+ */
__le32 start_pad;
__le32 end_trunc;
/* minor-version-2 record the base alignment of the mapping */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index b94f7a7e94b8..34db557dbad1 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -446,6 +446,7 @@ static bool nd_supported_alignment(unsigned long align)
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
+ struct resource *res;
enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
unsigned long align, start_pad;
@@ -561,14 +562,14 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
nd_pfn->align, align, nd_pfn->mode,
mode);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
align, nvdimm_namespace_capacity(ndns));
- return -EINVAL;
+ return -EOPNOTSUPP;
}
/*
@@ -578,18 +579,31 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
* established.
*/
nsio = to_nd_namespace_io(&ndns->dev);
- if (offset >= resource_size(&nsio->res)) {
+ res = &nsio->res;
+ if (offset >= resource_size(res)) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
dev_name(&ndns->dev));
- return -EBUSY;
+ return -EOPNOTSUPP;
}
- if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
+ if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
offset, align);
- return -ENXIO;
+ return -EOPNOTSUPP;
+ }
+
+ if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad),
+ memremap_compat_align())) {
+ dev_err(&nd_pfn->dev, "resource start misaligned\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc),
+ memremap_compat_align())) {
+ dev_err(&nd_pfn->dev, "resource end misaligned\n");
+ return -EOPNOTSUPP;
}
return 0;
@@ -750,7 +764,19 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
start = nsio->res.start;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
- align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
+ align = max(nd_pfn->align, memremap_compat_align());
+
+ /*
+ * When @start is misaligned fail namespace creation. See
+ * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
+ * an option.
+ */
+ if (!IS_ALIGNED(start, memremap_compat_align())) {
+ dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
+ dev_name(&ndns->dev), &start,
+ memremap_compat_align());
+ return -EINVAL;
+ }
end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 4eae441f86c9..2df6994acf83 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -136,9 +136,25 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
return BLK_STS_OK;
}
-static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, unsigned int op,
- sector_t sector)
+static blk_status_t pmem_do_read(struct pmem_device *pmem,
+ struct page *page, unsigned int page_off,
+ sector_t sector, unsigned int len)
+{
+ blk_status_t rc;
+ phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ void *pmem_addr = pmem->virt_addr + pmem_off;
+
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
+ return BLK_STS_IOERR;
+
+ rc = read_pmem(page, page_off, pmem_addr, len);
+ flush_dcache_page(page);
+ return rc;
+}
+
+static blk_status_t pmem_do_write(struct pmem_device *pmem,
+ struct page *page, unsigned int page_off,
+ sector_t sector, unsigned int len)
{
blk_status_t rc = BLK_STS_OK;
bool bad_pmem = false;
@@ -148,34 +164,25 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (!op_is_write(op)) {
- if (unlikely(bad_pmem))
- rc = BLK_STS_IOERR;
- else {
- rc = read_pmem(page, off, pmem_addr, len);
- flush_dcache_page(page);
- }
- } else {
- /*
- * Note that we write the data both before and after
- * clearing poison. The write before clear poison
- * handles situations where the latest written data is
- * preserved and the clear poison operation simply marks
- * the address range as valid without changing the data.
- * In this case application software can assume that an
- * interrupted write will either return the new good
- * data or an error.
- *
- * However, if pmem_clear_poison() leaves the data in an
- * indeterminate state we need to perform the write
- * after clear poison.
- */
- flush_dcache_page(page);
- write_pmem(pmem_addr, page, off, len);
- if (unlikely(bad_pmem)) {
- rc = pmem_clear_poison(pmem, pmem_off, len);
- write_pmem(pmem_addr, page, off, len);
- }
+ /*
+ * Note that we write the data both before and after
+ * clearing poison. The write before clear poison
+ * handles situations where the latest written data is
+ * preserved and the clear poison operation simply marks
+ * the address range as valid without changing the data.
+ * In this case application software can assume that an
+ * interrupted write will either return the new good
+ * data or an error.
+ *
+ * However, if pmem_clear_poison() leaves the data in an
+ * indeterminate state we need to perform the write
+ * after clear poison.
+ */
+ flush_dcache_page(page);
+ write_pmem(pmem_addr, page, page_off, len);
+ if (unlikely(bad_pmem)) {
+ rc = pmem_clear_poison(pmem, pmem_off, len);
+ write_pmem(pmem_addr, page, page_off, len);
}
return rc;
@@ -197,8 +204,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
- rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_op(bio), iter.bi_sector);
+ if (op_is_write(bio_op(bio)))
+ rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
+ iter.bi_sector, bvec.bv_len);
+ else
+ rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
+ iter.bi_sector, bvec.bv_len);
if (rc) {
bio->bi_status = rc;
break;
@@ -223,9 +234,12 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_queue->queuedata;
blk_status_t rc;
- rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
- 0, op, sector);
-
+ if (op_is_write(op))
+ rc = pmem_do_write(pmem, page, 0, sector,
+ hpage_nr_pages(page) * PAGE_SIZE);
+ else
+ rc = pmem_do_read(pmem, page, 0, sector,
+ hpage_nr_pages(page) * PAGE_SIZE);
/*
* The ->rw_page interface is subtle and tricky. The core
* retries on any error, so we can only invoke page_endio() in
@@ -268,6 +282,16 @@ static const struct block_device_operations pmem_fops = {
.revalidate_disk = nvdimm_revalidate_disk,
};
+static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ struct pmem_device *pmem = dax_get_private(dax_dev);
+
+ return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
+ PFN_PHYS(pgoff) >> SECTOR_SHIFT,
+ PAGE_SIZE));
+}
+
static long pmem_dax_direct_access(struct dax_device *dax_dev,
pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
{
@@ -299,6 +323,7 @@ static const struct dax_operations pmem_dax_ops = {
.dax_supported = generic_fsdax_supported,
.copy_from_iter = pmem_copy_from_iter,
.copy_to_iter = pmem_copy_to_iter,
+ .zero_page_range = pmem_dax_zero_page_range,
};
static const struct attribute_group *pmem_attribute_groups[] = {
@@ -395,7 +420,7 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY;
}
- q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
+ q = blk_alloc_queue(pmem_make_request, dev_to_node(dev));
if (!q)
return -ENOMEM;
@@ -433,7 +458,6 @@ static int pmem_attach_disk(struct device *dev,
pmem->virt_addr = addr;
blk_queue_write_cache(q, true, fua);
- blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
@@ -462,9 +486,9 @@ static int pmem_attach_disk(struct device *dev,
if (is_nvdimm_sync(nd_region))
flags = DAXDEV_F_SYNC;
dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
- if (!dax_dev) {
+ if (IS_ERR(dax_dev)) {
put_disk(disk);
- return -ENOMEM;
+ return PTR_ERR(dax_dev);
}
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a19e535830d9..ccbb5b43b8b2 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -195,16 +195,16 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
int nd_region_to_nstype(struct nd_region *nd_region)
{
if (is_memory(&nd_region->dev)) {
- u16 i, alias;
+ u16 i, label;
- for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
+ for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- if (test_bit(NDD_ALIASING, &nvdimm->flags))
- alias++;
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
+ label++;
}
- if (alias)
+ if (label)
return ND_DEVICE_NAMESPACE_PMEM;
else
return ND_DEVICE_NAMESPACE_IO;
@@ -216,21 +216,25 @@ int nd_region_to_nstype(struct nd_region *nd_region)
}
EXPORT_SYMBOL(nd_region_to_nstype);
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static unsigned long long region_size(struct nd_region *nd_region)
{
- struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long size = 0;
-
- if (is_memory(dev)) {
- size = nd_region->ndr_size;
+ if (is_memory(&nd_region->dev)) {
+ return nd_region->ndr_size;
} else if (nd_region->ndr_mappings == 1) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
- size = nd_mapping->size;
+ return nd_mapping->size;
}
- return sprintf(buf, "%llu\n", size);
+ return 0;
+}
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return sprintf(buf, "%llu\n", region_size(nd_region));
}
static DEVICE_ATTR_RO(size);
@@ -529,6 +533,54 @@ static ssize_t read_only_store(struct device *dev,
}
static DEVICE_ATTR_RW(read_only);
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return sprintf(buf, "%#lx\n", nd_region->align);
+}
+
+static ssize_t align_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ unsigned long val, dpa;
+ u32 remainder;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (!nd_region->ndr_mappings)
+ return -ENXIO;
+
+ /*
+ * Ensure space-align is evenly divisible by the region
+ * interleave-width because the kernel typically has no facility
+ * to determine which DIMM(s), dimm-physical-addresses, would
+ * contribute to the tail capacity in system-physical-address
+ * space for the namespace.
+ */
+ dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
+ if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
+ || val > region_size(nd_region) || remainder)
+ return -EINVAL;
+
+ /*
+ * Given that space allocation consults this value multiple
+ * times ensure it does not change for the duration of the
+ * allocation.
+ */
+ nvdimm_bus_lock(dev);
+ nd_region->align = val;
+ nvdimm_bus_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(align);
+
static ssize_t region_badblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -571,6 +623,7 @@ static DEVICE_ATTR_RO(persistence_domain);
static struct attribute *nd_region_attributes[] = {
&dev_attr_size.attr,
+ &dev_attr_align.attr,
&dev_attr_nstype.attr,
&dev_attr_mappings.attr,
&dev_attr_btt_seed.attr,
@@ -626,6 +679,19 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return a->mode;
}
+ if (a == &dev_attr_align.attr) {
+ int i;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
+ return a->mode;
+ }
+ return 0;
+ }
+
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
return a->mode;
@@ -935,6 +1001,41 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
}
EXPORT_SYMBOL(nd_region_release_lane);
+/*
+ * PowerPC requires this alignment for memremap_pages(). All other archs
+ * should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
+ */
+#define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
+
+static unsigned long default_align(struct nd_region *nd_region)
+{
+ unsigned long align;
+ int i, mappings;
+ u32 remainder;
+
+ if (is_nd_blk(&nd_region->dev))
+ align = PAGE_SIZE;
+ else
+ align = MEMREMAP_COMPAT_ALIGN_MAX;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ if (test_bit(NDD_ALIASING, &nvdimm->flags)) {
+ align = MEMREMAP_COMPAT_ALIGN_MAX;
+ break;
+ }
+ }
+
+ mappings = max_t(u16, 1, nd_region->ndr_mappings);
+ div_u64_rem(align, mappings, &remainder);
+ if (remainder)
+ align *= mappings;
+
+ return align;
+}
+
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc,
const struct device_type *dev_type, const char *caller)
@@ -1039,6 +1140,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
dev->of_node = ndr_desc->of_node;
nd_region->ndr_size = resource_size(ndr_desc->res);
nd_region->ndr_start = ndr_desc->res->start;
+ nd_region->align = default_align(nd_region);
if (ndr_desc->flush)
nd_region->flush = ndr_desc->flush;
else
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index b9358db83e96..9c17ed32be64 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -32,8 +32,6 @@ config NVME_HWMON
a hardware monitoring device will be created for each NVMe drive
in the system.
- If unsure, say N.
-
config NVME_FABRICS
tristate
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a4d8c90ee7cc..f3c037f5a9ba 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -6,6 +6,7 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
@@ -171,7 +172,6 @@ static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
nvme_remove_namespaces(ctrl);
ctrl->ops->delete_ctrl(ctrl);
nvme_uninit_ctrl(ctrl);
- nvme_put_ctrl(ctrl);
}
static void nvme_delete_ctrl_work(struct work_struct *work)
@@ -192,21 +192,16 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
-static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
+static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
{
- int ret = 0;
-
/*
* Keep a reference until nvme_do_delete_ctrl() complete,
* since ->delete_ctrl can free the controller.
*/
nvme_get_ctrl(ctrl);
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
- ret = -EBUSY;
- if (!ret)
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
nvme_do_delete_ctrl(ctrl);
nvme_put_ctrl(ctrl);
- return ret;
}
static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
@@ -291,11 +286,8 @@ void nvme_complete_rq(struct request *req)
nvme_req(req)->ctrl->comp_seen = true;
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if ((req->cmd_flags & REQ_NVME_MPATH) &&
- blk_path_error(status)) {
- nvme_failover_req(req);
+ if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
return;
- }
if (!blk_queue_dying(req->q)) {
nvme_retry_req(req);
@@ -1055,6 +1047,43 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
return error;
}
+static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
+ struct nvme_ns_id_desc *cur)
+{
+ const char *warn_str = "ctrl returned bogus length:";
+ void *data = cur;
+
+ switch (cur->nidt) {
+ case NVME_NIDT_EUI64:
+ if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
+ return NVME_NIDT_EUI64_LEN;
+ case NVME_NIDT_NGUID:
+ if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
+ return NVME_NIDT_NGUID_LEN;
+ case NVME_NIDT_UUID:
+ if (cur->nidl != NVME_NIDT_UUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ uuid_copy(&ids->uuid, data + sizeof(*cur));
+ return NVME_NIDT_UUID_LEN;
+ default:
+ /* Skip unknown types */
+ return cur->nidl;
+ }
+}
+
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
@@ -1074,8 +1103,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
NVME_IDENTIFY_DATA_SIZE);
- if (status)
+ if (status) {
+ dev_warn(ctrl->device,
+ "Identify Descriptors failed (%d)\n", status);
+ /*
+ * Don't treat an error as fatal, as we potentially already
+ * have a NGUID or EUI-64.
+ */
+ if (status > 0 && !(status & NVME_SC_DNR))
+ status = 0;
goto free_data;
+ }
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
struct nvme_ns_id_desc *cur = data + pos;
@@ -1083,42 +1121,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- switch (cur->nidt) {
- case NVME_NIDT_EUI64:
- if (cur->nidl != NVME_NIDT_EUI64_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_EUI64_LEN;
- memcpy(ids->eui64, data + pos + sizeof(*cur), len);
- break;
- case NVME_NIDT_NGUID:
- if (cur->nidl != NVME_NIDT_NGUID_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_NGUID_LEN;
- memcpy(ids->nguid, data + pos + sizeof(*cur), len);
- break;
- case NVME_NIDT_UUID:
- if (cur->nidl != NVME_NIDT_UUID_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_UUID_LEN;
- uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
- break;
- default:
- /* Skip unknown types */
- len = cur->nidl;
- break;
- }
+ len = nvme_process_ns_desc(ctrl, ids, cur);
+ if (len < 0)
+ goto free_data;
len += sizeof(*cur);
}
@@ -1248,6 +1253,18 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
}
+/*
+ * Convert integer values from ioctl structures to user pointers, silently
+ * ignoring the upper bits in the compat case to match behaviour of 32-bit
+ * kernels.
+ */
+static void __user *nvme_to_user_ptr(uintptr_t ptrval)
+{
+ if (in_compat_syscall())
+ ptrval = (compat_uptr_t)ptrval;
+ return (void __user *)ptrval;
+}
+
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_user_io io;
@@ -1271,7 +1288,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
length = (io.nblocks + 1) << ns->lba_shift;
meta_len = (io.nblocks + 1) * ns->ms;
- metadata = (void __user *)(uintptr_t)io.metadata;
+ metadata = nvme_to_user_ptr(io.metadata);
if (ns->ext) {
length += meta_len;
@@ -1294,7 +1311,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
- (void __user *)(uintptr_t)io.addr, length,
+ nvme_to_user_ptr(io.addr), length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
@@ -1414,9 +1431,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata,
- cmd.metadata_len, 0, &result, timeout);
+ nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
+ 0, &result, timeout);
nvme_passthru_end(ctrl, effects);
if (status >= 0) {
@@ -1461,8 +1478,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
+ nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
nvme_passthru_end(ctrl, effects);
@@ -1584,6 +1601,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
+#ifdef CONFIG_COMPAT
+struct nvme_user_io32 {
+ __u8 opcode;
+ __u8 flags;
+ __u16 control;
+ __u16 nblocks;
+ __u16 rsvd;
+ __u64 metadata;
+ __u64 addr;
+ __u64 slba;
+ __u32 dsmgmt;
+ __u32 reftag;
+ __u16 apptag;
+ __u16 appmask;
+} __attribute__((__packed__));
+
+#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
+
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ /*
+ * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
+ * between 32 bit programs and 64 bit kernel.
+ * The cause is that the results of sizeof(struct nvme_user_io),
+ * which is used to define NVME_IOCTL_SUBMIT_IO,
+ * are not same between 32 bit compiler and 64 bit compiler.
+ * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
+ * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
+ * Other IOCTL numbers are same between 32 bit and 64 bit.
+ * So there is nothing to do regarding to other IOCTL numbers.
+ */
+ if (cmd == NVME_IOCTL_SUBMIT_IO32)
+ return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
+
+ return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
@@ -1721,26 +1779,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
struct nvme_id_ns *id, struct nvme_ns_ids *ids)
{
- int ret = 0;
-
memset(ids, 0, sizeof(*ids));
if (ctrl->vs >= NVME_VS(1, 1, 0))
memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
- if (ctrl->vs >= NVME_VS(1, 3, 0)) {
- /* Don't treat error as fatal we potentially
- * already have a NGUID or EUI-64
- */
- ret = nvme_identify_ns_descs(ctrl, nsid, ids);
- if (ret)
- dev_warn(ctrl->device,
- "Identify Descriptors failed (%d)\n", ret);
- if (ret > 0)
- ret = 0;
- }
- return ret;
+ if (ctrl->vs >= NVME_VS(1, 3, 0))
+ return nvme_identify_ns_descs(ctrl, nsid, ids);
+ return 0;
}
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
@@ -1810,7 +1857,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
ns->lba_shift > PAGE_SHIFT)
capacity = 0;
- set_capacity(disk, capacity);
+ set_capacity_revalidate_and_notify(disk, capacity, false);
nvme_config_discard(disk, ns);
nvme_config_write_zeroes(disk, ns);
@@ -1850,6 +1897,13 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
+ struct backing_dev_info *info =
+ ns->head->disk->queue->backing_dev_info;
+
+ info->capabilities |= BDI_CAP_STABLE_WRITES;
+ }
+
revalidate_disk(ns->head->disk);
}
#endif
@@ -2027,7 +2081,7 @@ EXPORT_SYMBOL_GPL(nvme_sec_submit);
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -2055,7 +2109,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
.getgeo = nvme_getgeo,
.pr_ops = &nvme_pr_ops,
};
@@ -2074,13 +2128,13 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
if ((csts & NVME_CSTS_RDY) == bit)
break;
- msleep(100);
+ usleep_range(1000, 2000);
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
dev_err(ctrl->device,
- "Device not ready; aborting %s\n", enabled ?
- "initialisation" : "reset");
+ "Device not ready; aborting %s, CSTS=0x%x\n",
+ enabled ? "initialisation" : "reset", csts);
return -ENODEV;
}
}
@@ -2591,8 +2645,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
lockdep_assert_held(&nvme_subsystems_lock);
list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
- if (tmp->state == NVME_CTRL_DELETING ||
- tmp->state == NVME_CTRL_DEAD)
+ if (nvme_state_terminal(tmp))
continue;
if (tmp->cntlid == ctrl->cntlid) {
@@ -3193,6 +3246,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ /* Can't delete non-created controllers */
+ if (!ctrl->created)
+ return -EBUSY;
+
if (device_remove_file_self(dev, attr))
nvme_delete_ctrl_sync(ctrl);
return count;
@@ -3242,6 +3299,26 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
+}
+static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
+
+static ssize_t nvme_sysfs_show_hostid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
+}
+static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
+
static ssize_t nvme_sysfs_show_address(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3267,6 +3344,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_numa_node.attr,
&dev_attr_queue_count.attr,
&dev_attr_sqsize.attr,
+ &dev_attr_hostnqn.attr,
+ &dev_attr_hostid.attr,
NULL
};
@@ -3280,6 +3359,10 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
return 0;
+ if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_hostid.attr && !ctrl->opts)
+ return 0;
return a->mode;
}
@@ -3294,7 +3377,7 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
NULL,
};
-static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
+static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
unsigned nsid)
{
struct nvme_ns_head *h;
@@ -3327,7 +3410,8 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_id_ns *id)
+ unsigned nsid, struct nvme_id_ns *id,
+ struct nvme_ns_ids *ids)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3350,12 +3434,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
goto out_ida_remove;
head->subsys = ctrl->subsys;
head->ns_id = nsid;
+ head->ids = *ids;
kref_init(&head->ref);
- ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
- if (ret)
- goto out_cleanup_srcu;
-
ret = __nvme_check_ids(ctrl->subsys, head);
if (ret) {
dev_err(ctrl->device,
@@ -3390,24 +3471,23 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
struct nvme_ctrl *ctrl = ns->ctrl;
bool is_shared = id->nmic & (1 << 0);
struct nvme_ns_head *head = NULL;
+ struct nvme_ns_ids ids;
int ret = 0;
+ ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
+ if (ret)
+ goto out;
+
mutex_lock(&ctrl->subsys->lock);
if (is_shared)
- head = __nvme_find_ns_head(ctrl->subsys, nsid);
+ head = nvme_find_ns_head(ctrl->subsys, nsid);
if (!head) {
- head = nvme_alloc_ns_head(ctrl, nsid, id);
+ head = nvme_alloc_ns_head(ctrl, nsid, id, &ids);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
} else {
- struct nvme_ns_ids ids;
-
- ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
- if (ret)
- goto out_unlock;
-
if (!nvme_ns_ids_equal(&head->ids, &ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
@@ -3422,6 +3502,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
out_unlock:
mutex_unlock(&ctrl->subsys->lock);
+out:
if (ret > 0)
ret = blk_status_to_errno(nvme_error_status(ret));
return ret;
@@ -3480,7 +3561,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
return 0;
}
-static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
struct gendisk *disk;
@@ -3490,13 +3571,11 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- return -ENOMEM;
+ return;
ns->queue = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ns->queue)) {
- ret = PTR_ERR(ns->queue);
+ if (IS_ERR(ns->queue))
goto out_free_ns;
- }
if (ctrl->opts && ctrl->opts->data_digest)
ns->queue->backing_dev_info->capabilities
@@ -3519,10 +3598,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (ret)
goto out_free_queue;
- if (id->ncap == 0) {
- ret = -EINVAL;
+ if (id->ncap == 0) /* no namespace (legacy quirk) */
goto out_free_id;
- }
ret = nvme_init_ns_head(ns, nsid, id);
if (ret)
@@ -3531,10 +3608,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
disk = alloc_disk_node(0, node);
- if (!disk) {
- ret = -ENOMEM;
+ if (!disk)
goto out_unlink_ns;
- }
disk->fops = &nvme_fops;
disk->private_data = ns;
@@ -3565,8 +3640,10 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
kfree(id);
- return 0;
+ return;
out_put_disk:
+ /* prevent double queue cleanup */
+ ns->disk->queue = NULL;
put_disk(ns->disk);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
@@ -3579,9 +3656,6 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_cleanup_queue(ns->queue);
out_free_ns:
kfree(ns);
- if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
- return ret;
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -3987,6 +4061,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
}
+ ctrl->created = true;
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
@@ -3995,6 +4070,7 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
+ nvme_put_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
@@ -4077,6 +4153,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
if (ret)
goto out_release_instance;
+ nvme_get_ctrl(ctrl);
cdev_init(&ctrl->cdev, &nvme_dev_fops);
ctrl->cdev.owner = ops->module;
ret = cdev_device_add(&ctrl->cdev, ctrl->device);
@@ -4095,6 +4172,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
return 0;
out_free_name:
+ nvme_put_ctrl(ctrl);
kfree_const(ctrl->device->kobj.name);
out_release_instance:
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
@@ -4299,6 +4377,7 @@ static void __exit nvme_core_exit(void)
destroy_workqueue(nvme_delete_wq);
destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
+ ida_destroy(&nvme_instance_ida);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 74b8818ac9a1..2a6c8190eeb7 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -105,14 +105,14 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
int len = 0;
if (ctrl->opts->mask & NVMF_OPT_TRADDR)
- len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
+ len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
- len += snprintf(buf + len, size - len, "%strsvcid=%s",
+ len += scnprintf(buf + len, size - len, "%strsvcid=%s",
(len) ? "," : "", ctrl->opts->trsvcid);
if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
- len += snprintf(buf + len, size - len, "%shost_traddr=%s",
+ len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
(len) ? "," : "", ctrl->opts->host_traddr);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
return len;
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5a70ac395d53..7dfc4a2ecf1e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
- !template->max_dif_sgl_segments || !template->dma_boundary ||
- !template->module) {
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL;
goto out_reghost_failed;
}
@@ -2016,7 +2015,6 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
- struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
if (ctrl->ctrl.tagset) {
@@ -2043,7 +2041,6 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
- module_put(lport->ops->module);
}
static void
@@ -3074,15 +3071,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
- if (!try_module_get(lport->ops->module)) {
- ret = -EUNATCH;
- goto out_free_ctrl;
- }
-
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
- goto out_mod_put;
+ goto out_free_ctrl;
}
ctrl->ctrl.opts = opts;
@@ -3181,10 +3173,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto fail_ctrl;
}
- nvme_get_ctrl(&ctrl->ctrl);
-
if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
- nvme_put_ctrl(&ctrl->ctrl);
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to schedule initial connect\n",
ctrl->cnum);
@@ -3235,8 +3224,6 @@ out_free_queues:
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
-out_mod_put:
- module_put(lport->ops->module);
out_free_ctrl:
kfree(ctrl);
out_fail:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a11900cf3a36..54603bd3e02d 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -64,17 +64,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
}
}
-void nvme_failover_req(struct request *req)
+bool nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
u16 status = nvme_req(req)->status;
unsigned long flags;
- spin_lock_irqsave(&ns->head->requeue_lock, flags);
- blk_steal_bios(&ns->head->requeue_list, req);
- spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
-
switch (status & 0x7ff) {
case NVME_SC_ANA_TRANSITION:
case NVME_SC_ANA_INACCESSIBLE:
@@ -103,15 +98,17 @@ void nvme_failover_req(struct request *req)
nvme_mpath_clear_current_path(ns);
break;
default:
- /*
- * Reset the controller for any non-ANA error as we don't know
- * what caused the error.
- */
- nvme_reset_ctrl(ns->ctrl);
- break;
+ /* This was a non-ANA error so follow the normal error path. */
+ return false;
}
+ spin_lock_irqsave(&ns->head->requeue_lock, flags);
+ blk_steal_bios(&ns->head->requeue_list, req);
+ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+ blk_mq_end_request(req, 0);
+
kblockd_schedule_work(&ns->head->requeue_work);
+ return true;
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
@@ -377,11 +374,10 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
return 0;
- q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node);
+ q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node);
if (!q)
goto out;
q->queuedata = head;
- blk_queue_make_request(q, nvme_ns_head_make_request);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
@@ -514,7 +510,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_write(&ctrl->namespaces_rwsem);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
unsigned nsid = le32_to_cpu(desc->nsids[n]);
@@ -525,7 +521,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (++n == nr_nsids)
break;
}
- up_write(&ctrl->namespaces_rwsem);
+ up_read(&ctrl->namespaces_rwsem);
return 0;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1024fec7914c..2e04a36296d9 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -259,6 +259,7 @@ struct nvme_ctrl {
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
+ bool created;
#ifdef CONFIG_NVME_MULTIPATH
/* asymmetric namespace access: */
@@ -550,7 +551,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
-void nvme_failover_req(struct request *req);
+bool nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
@@ -599,8 +600,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
}
-static inline void nvme_failover_req(struct request *req)
+static inline bool nvme_failover_req(struct request *req)
{
+ return false;
}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d3f23d6254e4..cc46e250fcac 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -971,39 +971,34 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
nvme_end_request(req, cqe->status, cqe->result);
}
-static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
-{
- while (start != end) {
- nvme_handle_cqe(nvmeq, start);
- if (++start == nvmeq->q_depth)
- start = 0;
- }
-}
-
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- if (nvmeq->cq_head == nvmeq->q_depth - 1) {
+ u16 tmp = nvmeq->cq_head + 1;
+
+ if (tmp == nvmeq->q_depth) {
nvmeq->cq_head = 0;
- nvmeq->cq_phase = !nvmeq->cq_phase;
+ nvmeq->cq_phase ^= 1;
} else {
- nvmeq->cq_head++;
+ nvmeq->cq_head = tmp;
}
}
-static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
- u16 *end, unsigned int tag)
+static inline int nvme_process_cq(struct nvme_queue *nvmeq)
{
int found = 0;
- *start = nvmeq->cq_head;
while (nvme_cqe_pending(nvmeq)) {
- if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
- found++;
+ found++;
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
+ nvme_handle_cqe(nvmeq, nvmeq->cq_head);
nvme_update_cq_head(nvmeq);
}
- *end = nvmeq->cq_head;
- if (*start != *end)
+ if (found)
nvme_ring_cq_doorbell(nvmeq);
return found;
}
@@ -1012,21 +1007,16 @@ static irqreturn_t nvme_irq(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
irqreturn_t ret = IRQ_NONE;
- u16 start, end;
/*
* The rmb/wmb pair ensures we see all updates from a previous run of
* the irq handler, even if that was on another CPU.
*/
rmb();
- nvme_process_cq(nvmeq, &start, &end, -1);
+ if (nvme_process_cq(nvmeq))
+ ret = IRQ_HANDLED;
wmb();
- if (start != end) {
- nvme_complete_cqes(nvmeq, start, end);
- return IRQ_HANDLED;
- }
-
return ret;
}
@@ -1039,46 +1029,30 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
}
/*
- * Poll for completions any queue, including those not dedicated to polling.
+ * Poll for completions for any interrupt driven queue
* Can be called from any context.
*/
-static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
+static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
{
struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
- u16 start, end;
- int found;
- /*
- * For a poll queue we need to protect against the polling thread
- * using the CQ lock. For normal interrupt driven threads we have
- * to disable the interrupt to avoid racing with it.
- */
- if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
- spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq, &start, &end, tag);
- spin_unlock(&nvmeq->cq_poll_lock);
- } else {
- disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- found = nvme_process_cq(nvmeq, &start, &end, tag);
- enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- }
+ WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
- nvme_complete_cqes(nvmeq, start, end);
- return found;
+ disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ nvme_process_cq(nvmeq);
+ enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx)
{
struct nvme_queue *nvmeq = hctx->driver_data;
- u16 start, end;
bool found;
if (!nvme_cqe_pending(nvmeq))
return 0;
spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq, &start, &end, -1);
- nvme_complete_cqes(nvmeq, start, end);
+ found = nvme_process_cq(nvmeq);
spin_unlock(&nvmeq->cq_poll_lock);
return found;
@@ -1255,7 +1229,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
/*
* Did we miss an interrupt?
*/
- if (nvme_poll_irqdisable(nvmeq, req->tag)) {
+ if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
+ nvme_poll(req->mq_hctx);
+ else
+ nvme_poll_irqdisable(nvmeq);
+
+ if (blk_mq_request_completed(req)) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid);
@@ -1398,23 +1377,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
else
nvme_disable_ctrl(&dev->ctrl);
- nvme_poll_irqdisable(nvmeq, -1);
+ nvme_poll_irqdisable(nvmeq);
}
/*
* Called only on a device that has been disabled and after all other threads
- * that can check this device's completion queues have synced. This is the
- * last chance for the driver to see a natural completion before
- * nvme_cancel_request() terminates all incomplete requests.
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
*/
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
{
- u16 start, end;
int i;
for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
- nvme_process_cq(&dev->queues[i], &start, &end, -1);
- nvme_complete_cqes(&dev->queues[i], start, end);
+ spin_lock(&dev->queues[i].cq_poll_lock);
+ nvme_process_cq(&dev->queues[i]);
+ spin_unlock(&dev->queues[i].cq_poll_lock);
}
}
@@ -2503,13 +2482,13 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
struct nvme_dev *dev = to_nvme_dev(ctrl);
nvme_dbbuf_dma_free(dev);
- put_device(dev->dev);
nvme_free_tagset(dev);
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
- kfree(dev->queues);
free_opal_dev(dev->ctrl.opal_dev);
mempool_destroy(dev->iod_mempool);
+ put_device(dev->dev);
+ kfree(dev->queues);
kfree(dev);
}
@@ -2689,7 +2668,7 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
- return snprintf(buf, size, "%s", dev_name(&pdev->dev));
+ return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -2835,7 +2814,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
nvme_reset_ctrl(&dev->ctrl);
- nvme_get_ctrl(&dev->ctrl);
async_schedule(nvme_async_probe, dev);
return 0;
@@ -2907,10 +2885,9 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_uninit_ctrl(&dev->ctrl);
nvme_release_prp_pools(dev);
nvme_dev_unmap(dev);
- nvme_put_ctrl(&dev->ctrl);
+ nvme_uninit_ctrl(&dev->ctrl);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0fe08c4dfd2f..cac8a930396a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -142,14 +142,6 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static const struct blk_mq_ops nvme_rdma_mq_ops;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
-/* XXX: really should move to a generic header sooner or later.. */
-static inline void put_unaligned_le24(u32 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
-}
-
static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
{
return queue - queue->ctrl->queues;
@@ -1024,8 +1016,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
if (!changed) {
- /* state change failure is ok if we're in DELETING state */
+ /*
+ * state change failure is ok if we're in DELETING state,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
}
@@ -1345,7 +1342,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
int ret;
sge->addr = qe->dma;
- sge->length = sizeof(struct nvme_command),
+ sge->length = sizeof(struct nvme_command);
sge->lkey = queue->device->pd->local_dma_lkey;
wr.next = NULL;
@@ -2045,8 +2042,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- nvme_get_ctrl(&ctrl->ctrl);
-
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
mutex_unlock(&nvme_rdma_ctrl_mutex);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 49d4373b84eb..c15a92163c1f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -20,6 +20,16 @@
struct nvme_tcp_queue;
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+module_param(so_priority, int, 0644);
+MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+
enum nvme_tcp_send_state {
NVME_TCP_SEND_CMD_PDU = 0,
NVME_TCP_SEND_H2C_PDU,
@@ -164,16 +174,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
{
struct request *rq;
- unsigned int bytes;
if (unlikely(nvme_tcp_async_req(req)))
return false; /* async events don't have a request */
rq = blk_mq_rq_from_pdu(req);
- bytes = blk_rq_payload_bytes(rq);
- return rq_data_dir(rq) == WRITE && bytes &&
- bytes <= nvme_tcp_inline_data_size(req->queue);
+ return rq_data_dir(rq) == WRITE && req->data_len &&
+ req->data_len <= nvme_tcp_inline_data_size(req->queue);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -1017,8 +1025,15 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
if (req->state == NVME_TCP_SEND_DDGST)
ret = nvme_tcp_try_send_ddgst(req);
done:
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
ret = 0;
+ } else if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to send request %d\n", ret);
+ if (ret != -EPIPE && ret != -ECONNRESET)
+ nvme_tcp_fail_request(queue->request);
+ nvme_tcp_done_send_req(queue);
+ }
return ret;
}
@@ -1049,25 +1064,16 @@ static void nvme_tcp_io_work(struct work_struct *w)
int result;
result = nvme_tcp_try_send(queue);
- if (result > 0) {
+ if (result > 0)
pending = true;
- } else if (unlikely(result < 0)) {
- dev_err(queue->ctrl->ctrl.device,
- "failed to send request %d\n", result);
-
- /*
- * Fail the request unless peer closed the connection,
- * in which case error recovery flow will complete all.
- */
- if ((result != -EPIPE) && (result != -ECONNRESET))
- nvme_tcp_fail_request(queue->request);
- nvme_tcp_done_send_req(queue);
- return;
- }
+ else if (unlikely(result < 0))
+ break;
result = nvme_tcp_try_recv(queue);
if (result > 0)
pending = true;
+ else if (unlikely(result < 0))
+ return;
if (!pending)
return;
@@ -1248,13 +1254,67 @@ free_icreq:
return ret;
}
+static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
+{
+ return nvme_tcp_queue_id(queue) == 0;
+}
+
+static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
+}
+
+static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ];
+}
+
+static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ !nvme_tcp_read_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ] +
+ ctrl->io_queues[HCTX_TYPE_POLL];
+}
+
+static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+ int n = 0;
+
+ if (nvme_tcp_default_queue(queue))
+ n = qid - 1;
+ else if (nvme_tcp_read_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ else if (nvme_tcp_poll_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
+ ctrl->io_queues[HCTX_TYPE_READ] - 1;
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+}
+
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
int qid, size_t queue_size)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
struct linger sol = { .l_onoff = 1, .l_linger = 0 };
- int ret, opt, rcv_pdu_size, n;
+ int ret, opt, rcv_pdu_size;
queue->ctrl = ctrl;
INIT_LIST_HEAD(&queue->send_list);
@@ -1309,6 +1369,17 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
goto err_sock;
}
+ if (so_priority > 0) {
+ ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY,
+ (char *)&so_priority, sizeof(so_priority));
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to set SO_PRIORITY sock opt, ret %d\n",
+ ret);
+ goto err_sock;
+ }
+ }
+
/* Set socket type of service */
if (nctrl->opts->tos >= 0) {
opt = nctrl->opts->tos;
@@ -1322,11 +1393,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
}
queue->sock->sk->sk_allocation = GFP_ATOMIC;
- if (!qid)
- n = 0;
- else
- n = (qid - 1) % num_online_cpus();
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ nvme_tcp_set_queue_io_cpu(queue);
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1861,8 +1928,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
}
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
- /* state change failure is ok if we're in DELETING state */
+ /*
+ * state change failure is ok if we're in DELETING state,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
}
@@ -2090,7 +2162,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
c->common.flags |= NVME_CMD_SGL_METABUF;
- if (rq_data_dir(rq) == WRITE && req->data_len &&
+ if (!blk_rq_nr_phys_segments(rq))
+ nvme_tcp_set_sg_null(c);
+ else if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
@@ -2117,7 +2191,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_sent = 0;
req->pdu_len = 0;
req->pdu_sent = 0;
- req->data_len = blk_rq_payload_bytes(rq);
+ req->data_len = blk_rq_nr_phys_segments(rq) ?
+ blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
if (rq_data_dir(rq) == WRITE &&
@@ -2224,6 +2299,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return 0;
+
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
@@ -2359,8 +2437,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- nvme_get_ctrl(&ctrl->ctrl);
-
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
mutex_unlock(&nvme_tcp_ctrl_mutex);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 72a7e41f3018..9d6f75cfa77c 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/rculist.h>
+#include <linux/part_stat.h>
#include <generated/utsrelease.h>
#include <asm/unaligned.h>
@@ -322,12 +323,25 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
}
+static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
+ struct nvmet_subsys *subsys)
+{
+ const char *model = NVMET_DEFAULT_CTRL_MODEL;
+ struct nvmet_subsys_model *subsys_model;
+
+ rcu_read_lock();
+ subsys_model = rcu_dereference(subsys->model);
+ if (subsys_model)
+ model = subsys_model->number;
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
+ rcu_read_unlock();
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
u16 status = 0;
- const char model[] = "Linux";
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
@@ -342,7 +356,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
memset(id->sn, ' ', sizeof(id->sn));
bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
- memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ nvmet_id_set_model_number(id, ctrl->subsys);
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
@@ -356,8 +370,12 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* we support multiple ports, multiples hosts and ANA: */
id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
- /* no limit on data transfer sizes for now */
- id->mdts = 0;
+ /* Limit MDTS according to transport capability */
+ if (ctrl->ops->get_mdts)
+ id->mdts = ctrl->ops->get_mdts(ctrl);
+ else
+ id->mdts = 0;
+
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
@@ -720,13 +738,22 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0;
+ u16 nsqr;
+ u16 ncqr;
if (!nvmet_check_data_len(req, 0))
return;
switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
+ ncqr = (cdw11 >> 16) & 0xffff;
+ nsqr = cdw11 & 0xffff;
+ if (ncqr == 0xffff || nsqr == 0xffff) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 98613a45bd3b..58cabd7b6fc5 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -395,14 +395,12 @@ static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
struct nvmet_subsys *subsys = ns->subsys;
int ret = 0;
-
mutex_lock(&subsys->lock);
if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
-
if (uuid_parse(page, &ns->uuid))
ret = -EINVAL;
@@ -815,10 +813,10 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
(int)NVME_MAJOR(subsys->ver),
(int)NVME_MINOR(subsys->ver),
(int)NVME_TERTIARY(subsys->ver));
- else
- return snprintf(page, PAGE_SIZE, "%d.%d\n",
- (int)NVME_MAJOR(subsys->ver),
- (int)NVME_MINOR(subsys->ver));
+
+ return snprintf(page, PAGE_SIZE, "%d.%d\n",
+ (int)NVME_MAJOR(subsys->ver),
+ (int)NVME_MINOR(subsys->ver));
}
static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
@@ -828,7 +826,6 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
int major, minor, tertiary = 0;
int ret;
-
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
if (ret != 2 && ret != 3)
return -EINVAL;
@@ -852,20 +849,151 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
const char *page, size_t count)
{
- struct nvmet_subsys *subsys = to_subsys(item);
+ u64 serial;
+
+ if (sscanf(page, "%llx\n", &serial) != 1)
+ return -EINVAL;
down_write(&nvmet_config_sem);
- sscanf(page, "%llx\n", &subsys->serial);
+ to_subsys(item)->serial = serial;
up_write(&nvmet_config_sem);
return count;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
+static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_min;
+
+ if (sscanf(page, "%hu\n", &cntlid_min) != 1)
+ return -EINVAL;
+
+ if (cntlid_min == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_min >= to_subsys(item)->cntlid_max)
+ goto out_unlock;
+ to_subsys(item)->cntlid_min = cntlid_min;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
+
+static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_max;
+
+ if (sscanf(page, "%hu\n", &cntlid_max) != 1)
+ return -EINVAL;
+
+ if (cntlid_max == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_max <= to_subsys(item)->cntlid_min)
+ goto out_unlock;
+ to_subsys(item)->cntlid_max = cntlid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
+
+static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ struct nvmet_subsys_model *subsys_model;
+ char *model = NVMET_DEFAULT_CTRL_MODEL;
+ int ret;
+
+ rcu_read_lock();
+ subsys_model = rcu_dereference(subsys->model);
+ if (subsys_model)
+ model = subsys_model->number;
+ ret = snprintf(page, PAGE_SIZE, "%s\n", model);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/* See Section 1.5 of NVMe 1.4 */
+static bool nvmet_is_ascii(const char c)
+{
+ return c >= 0x20 && c <= 0x7e;
+}
+
+static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ struct nvmet_subsys_model *new_model;
+ char *new_model_number;
+ int pos = 0, len;
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos]))
+ return -EINVAL;
+ }
+
+ new_model_number = kstrndup(page, len, GFP_KERNEL);
+ if (!new_model_number)
+ return -ENOMEM;
+
+ new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL);
+ if (!new_model) {
+ kfree(new_model_number);
+ return -ENOMEM;
+ }
+ memcpy(new_model->number, new_model_number, len);
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ new_model = rcu_replace_pointer(subsys->model, new_model,
+ mutex_is_locked(&subsys->lock));
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ kfree_rcu(new_model, rcuhead);
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_attr_version,
&nvmet_subsys_attr_attr_serial,
+ &nvmet_subsys_attr_attr_cntlid_min,
+ &nvmet_subsys_attr_attr_cntlid_max,
+ &nvmet_subsys_attr_attr_model,
NULL,
};
@@ -970,12 +1098,19 @@ static struct configfs_attribute *nvmet_referral_attrs[] = {
NULL,
};
-static void nvmet_referral_release(struct config_item *item)
+static void nvmet_referral_notify(struct config_group *group,
+ struct config_item *item)
{
struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
struct nvmet_port *port = to_nvmet_port(item);
nvmet_referral_disable(parent, port);
+}
+
+static void nvmet_referral_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
kfree(port);
}
@@ -1006,6 +1141,7 @@ static struct config_group *nvmet_referral_make(
static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
+ .disconnect_notify = nvmet_referral_notify,
};
static const struct config_item_type nvmet_referrals_type = {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 576de773b4db..b685f99d56a1 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1289,8 +1289,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!ctrl->sqs)
goto out_free_cqs;
+ if (subsys->cntlid_min > subsys->cntlid_max)
+ goto out_free_cqs;
+
ret = ida_simple_get(&cntlid_ida,
- NVME_CNTLID_MIN, NVME_CNTLID_MAX,
+ subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
@@ -1438,7 +1441,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
kfree(subsys);
return ERR_PTR(-ENOMEM);
}
-
+ subsys->cntlid_min = NVME_CNTLID_MIN;
+ subsys->cntlid_max = NVME_CNTLID_MAX;
kref_init(&subsys->ref);
mutex_init(&subsys->lock);
@@ -1457,6 +1461,7 @@ static void nvmet_subsys_free(struct kref *ref)
WARN_ON_ONCE(!list_empty(&subsys->namespaces));
kfree(subsys->subsysnqn);
+ kfree_rcu(subsys->model, rcuhead);
kfree(subsys);
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a0db6371b43e..a8ceb7721640 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -684,7 +684,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
disconnect = atomic_xchg(&queue->connected, 0);
spin_lock_irqsave(&queue->qlock, flags);
- /* about outstanding io's */
+ /* abort outstanding io's */
for (i = 0; i < queue->sqsize; fod++, i++) {
if (fod->active) {
spin_lock(&fod->flock);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1c50af6219f3..f69ce66e2d44 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -198,10 +198,13 @@ struct fcloop_lport_priv {
};
struct fcloop_rport {
- struct nvme_fc_remote_port *remoteport;
- struct nvmet_fc_target_port *targetport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvme_fc_remote_port *remoteport;
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_tport {
@@ -224,11 +227,10 @@ struct fcloop_nport {
};
struct fcloop_lsreq {
- struct fcloop_tport *tport;
struct nvmefc_ls_req *lsreq;
- struct work_struct work;
struct nvmefc_tgt_ls_req tgt_ls_req;
int status;
+ struct list_head ls_list; /* fcloop_rport->ls_list */
};
struct fcloop_rscn {
@@ -292,21 +294,32 @@ fcloop_delete_queue(struct nvme_fc_local_port *localport,
{
}
-
-/*
- * Transmit of LS RSP done (e.g. buffers all set). call back up
- * initiator "done" flows.
- */
static void
-fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+fcloop_rport_lsrqst_work(struct work_struct *work)
{
- struct fcloop_lsreq *tls_req =
- container_of(work, struct fcloop_lsreq, work);
- struct fcloop_tport *tport = tls_req->tport;
- struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport =
+ container_of(work, struct fcloop_rport, ls_work);
+ struct fcloop_lsreq *tls_req;
- if (!tport || tport->remoteport)
- lsreq->done(lsreq, tls_req->status);
+ spin_lock(&rport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&rport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&rport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&rport->lock);
+ }
+ spin_unlock(&rport->lock);
}
static int
@@ -319,17 +332,18 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
int ret = 0;
tls_req->lsreq = lsreq;
- INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+ INIT_LIST_HEAD(&tls_req->ls_list);
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
- tls_req->tport = NULL;
- schedule_work(&tls_req->work);
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ schedule_work(&rport->ls_work);
return ret;
}
tls_req->status = 0;
- tls_req->tport = rport->targetport->private;
ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
lsreq->rqstaddr, lsreq->rqstlen);
@@ -337,18 +351,28 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
}
static int
-fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
+fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
struct nvmefc_tgt_ls_req *tgt_lsreq)
{
struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_tport *tport = targetport->private;
+ struct nvme_fc_remote_port *remoteport = tport->remoteport;
+ struct fcloop_rport *rport;
memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
((lsreq->rsplen < tgt_lsreq->rsplen) ?
lsreq->rsplen : tgt_lsreq->rsplen));
+
tgt_lsreq->done(tgt_lsreq);
- schedule_work(&tls_req->work);
+ if (remoteport) {
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ schedule_work(&rport->ls_work);
+ }
return 0;
}
@@ -834,6 +858,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
+ flush_work(&rport->ls_work);
fcloop_nport_put(rport->nport);
}
@@ -850,7 +875,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
- .module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
@@ -1136,6 +1160,9 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->nport = nport;
rport->lport = nport->lport;
nport->rport = rport;
+ spin_lock_init(&rport->lock);
+ INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
+ INIT_LIST_HEAD(&rport->ls_list);
return count;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 4df4ebde208a..0d54e730cbf2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -485,7 +485,6 @@ out_destroy_admin:
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
}
static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
@@ -618,8 +617,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device,
"new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
- nvme_get_ctrl(&ctrl->ctrl);
-
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index eda28b22a2c8..421dff3ea143 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -23,6 +23,7 @@
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
+#define NVMET_DEFAULT_CTRL_MODEL "Linux"
/*
* Supported optional AENs:
@@ -202,6 +203,11 @@ struct nvmet_ctrl {
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
};
+struct nvmet_subsys_model {
+ struct rcu_head rcuhead;
+ char number[];
+};
+
struct nvmet_subsys {
enum nvme_subsys_type type;
@@ -211,6 +217,8 @@ struct nvmet_subsys {
struct list_head namespaces;
unsigned int nr_namespaces;
unsigned int max_nsid;
+ u16 cntlid_min;
+ u16 cntlid_max;
struct list_head ctrls;
@@ -227,6 +235,8 @@ struct nvmet_subsys {
struct config_group namespaces_group;
struct config_group allowed_hosts_group;
+
+ struct nvmet_subsys_model __rcu *model;
};
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
@@ -279,6 +289,7 @@ struct nvmet_fabrics_ops {
struct nvmet_port *port, char *traddr);
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
+ u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 37d262a65877..fd47de0e4e4e 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -31,6 +31,9 @@
#define NVMET_RDMA_MAX_INLINE_SGE 4
#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
+/* Assume mpsmin == device_page_size == 4KB */
+#define NVMET_RDMA_MAX_MDTS 8
+
struct nvmet_rdma_cmd {
struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
struct ib_cqe cqe;
@@ -75,6 +78,7 @@ enum nvmet_rdma_queue_state {
struct nvmet_rdma_queue {
struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
struct nvmet_port *port;
struct ib_cq *cq;
atomic_t sq_wr_avail;
@@ -102,6 +106,13 @@ struct nvmet_rdma_queue {
struct list_head queue_list;
};
+struct nvmet_rdma_port {
+ struct nvmet_port *nport;
+ struct sockaddr_storage addr;
+ struct rdma_cm_id *cm_id;
+ struct delayed_work repair_work;
+};
+
struct nvmet_rdma_device {
struct ib_device *device;
struct ib_pd *pd;
@@ -143,12 +154,6 @@ static int num_pages(int len)
return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
}
-/* XXX: really should move to a generic header sooner or later.. */
-static inline u32 get_unaligned_le24(const u8 *p)
-{
- return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
-}
-
static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
{
return nvme_is_write(rsp->req.cmd) &&
@@ -464,7 +469,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
if (ndev->srq)
ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
else
- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
if (unlikely(ret))
pr_err("post_recv cmd failed\n");
@@ -503,7 +508,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
if (rsp->n_rdma) {
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
}
@@ -587,7 +592,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(rsp->n_rdma <= 0);
atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
rsp->n_rdma = 0;
@@ -742,7 +747,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
}
if (nvmet_rdma_need_data_in(rsp)) {
- if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
+ if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
@@ -914,7 +919,8 @@ static void nvmet_rdma_free_dev(struct kref *ref)
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
- struct nvmet_port *port = cm_id->context;
+ struct nvmet_rdma_port *port = cm_id->context;
+ struct nvmet_port *nport = port->nport;
struct nvmet_rdma_device *ndev;
int inline_page_count;
int inline_sge_count;
@@ -931,17 +937,17 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
if (!ndev)
goto out_err;
- inline_page_count = num_pages(port->inline_data_size);
+ inline_page_count = num_pages(nport->inline_data_size);
inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
cm_id->device->attrs.max_recv_sge) - 1;
if (inline_page_count > inline_sge_count) {
pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
- port->inline_data_size, cm_id->device->name,
+ nport->inline_data_size, cm_id->device->name,
inline_sge_count * PAGE_SIZE);
- port->inline_data_size = inline_sge_count * PAGE_SIZE;
+ nport->inline_data_size = inline_sge_count * PAGE_SIZE;
inline_page_count = inline_sge_count;
}
- ndev->inline_data_size = port->inline_data_size;
+ ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -975,7 +981,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
{
struct ib_qp_init_attr qp_attr;
struct nvmet_rdma_device *ndev = queue->dev;
- int comp_vector, nr_cqe, ret, i;
+ int comp_vector, nr_cqe, ret, i, factor;
/*
* Spread the io queues across completion vectors,
@@ -1008,7 +1014,9 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
qp_attr.qp_type = IB_QPT_RC;
/* +1 for drain */
qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
- qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
+ factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
+ 1 << NVMET_RDMA_MAX_MDTS);
+ qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
ndev->device->attrs.max_send_sge);
@@ -1025,6 +1033,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
pr_err("failed to create_qp ret= %d\n", ret);
goto err_destroy_cq;
}
+ queue->qp = queue->cm_id->qp;
atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
@@ -1053,11 +1062,10 @@ err_destroy_cq:
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
- struct ib_qp *qp = queue->cm_id->qp;
-
- ib_drain_qp(qp);
- rdma_destroy_id(queue->cm_id);
- ib_destroy_qp(qp);
+ ib_drain_qp(queue->qp);
+ if (queue->cm_id)
+ rdma_destroy_id(queue->cm_id);
+ ib_destroy_qp(queue->qp);
ib_free_cq(queue->cq);
}
@@ -1267,6 +1275,7 @@ static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
+ struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_rdma_device *ndev;
struct nvmet_rdma_queue *queue;
int ret = -EINVAL;
@@ -1282,7 +1291,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = -ENOMEM;
goto put_device;
}
- queue->port = cm_id->context;
+ queue->port = port->nport;
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
@@ -1291,9 +1300,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- schedule_work(&queue->release_work);
- /* Destroying rdma_cm id is not needed here */
- return 0;
+ /*
+ * Don't destroy the cm_id in free path, as we implicitly
+ * destroy the cm_id here with non-zero ret code.
+ */
+ queue->cm_id = NULL;
+ goto free_queue;
}
mutex_lock(&nvmet_rdma_queue_mutex);
@@ -1302,6 +1314,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
return 0;
+free_queue:
+ nvmet_rdma_free_queue(queue);
put_device:
kref_put(&ndev->ref, nvmet_rdma_free_dev);
@@ -1407,7 +1421,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
{
- struct nvmet_port *port;
+ struct nvmet_rdma_port *port;
if (queue) {
/*
@@ -1426,7 +1440,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
* cm_id destroy. use atomic xchg to make sure
* we don't compete with remove_port.
*/
- if (xchg(&port->priv, NULL) != cm_id)
+ if (xchg(&port->cm_id, NULL) != cm_id)
return 0;
/*
@@ -1457,6 +1471,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
nvmet_rdma_queue_established(queue);
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
+ if (!queue) {
+ struct nvmet_rdma_port *port = cm_id->context;
+
+ schedule_delayed_work(&port->repair_work, 0);
+ break;
+ }
+ /* FALLTHROUGH */
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
nvmet_rdma_queue_disconnect(queue);
@@ -1499,42 +1520,19 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
-static int nvmet_rdma_add_port(struct nvmet_port *port)
+static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
- struct rdma_cm_id *cm_id;
- struct sockaddr_storage addr = { };
- __kernel_sa_family_t af;
- int ret;
-
- switch (port->disc_addr.adrfam) {
- case NVMF_ADDR_FAMILY_IP4:
- af = AF_INET;
- break;
- case NVMF_ADDR_FAMILY_IP6:
- af = AF_INET6;
- break;
- default:
- pr_err("address family %d not supported\n",
- port->disc_addr.adrfam);
- return -EINVAL;
- }
+ struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
- if (port->inline_data_size < 0) {
- port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
- } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
- pr_warn("inline_data_size %u is too large, reducing to %u\n",
- port->inline_data_size,
- NVMET_RDMA_MAX_INLINE_DATA_SIZE);
- port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
- }
+ if (cm_id)
+ rdma_destroy_id(cm_id);
+}
- ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
- port->disc_addr.trsvcid, &addr);
- if (ret) {
- pr_err("malformed ip/port passed: %s:%s\n",
- port->disc_addr.traddr, port->disc_addr.trsvcid);
- return ret;
- }
+static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
+{
+ struct sockaddr *addr = (struct sockaddr *)&port->addr;
+ struct rdma_cm_id *cm_id;
+ int ret;
cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
RDMA_PS_TCP, IB_QPT_RC);
@@ -1553,23 +1551,19 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
goto out_destroy_id;
}
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
+ ret = rdma_bind_addr(cm_id, addr);
if (ret) {
- pr_err("binding CM ID to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
ret = rdma_listen(cm_id, 128);
if (ret) {
- pr_err("listening to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
- pr_info("enabling port %d (%pISpcs)\n",
- le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
- port->priv = cm_id;
+ port->cm_id = cm_id;
return 0;
out_destroy_id:
@@ -1577,18 +1571,92 @@ out_destroy_id:
return ret;
}
-static void nvmet_rdma_remove_port(struct nvmet_port *port)
+static void nvmet_rdma_repair_port_work(struct work_struct *w)
{
- struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
+ struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
+ struct nvmet_rdma_port, repair_work);
+ int ret;
- if (cm_id)
- rdma_destroy_id(cm_id);
+ nvmet_rdma_disable_port(port);
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ schedule_delayed_work(&port->repair_work, 5 * HZ);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port;
+ __kernel_sa_family_t af;
+ int ret;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ nport->priv = port;
+ port->nport = nport;
+ INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
+
+ switch (nport->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ af = AF_INET;
+ break;
+ case NVMF_ADDR_FAMILY_IP6:
+ af = AF_INET6;
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ nport->disc_addr.adrfam);
+ ret = -EINVAL;
+ goto out_free_port;
+ }
+
+ if (nport->inline_data_size < 0) {
+ nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+ } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+ pr_warn("inline_data_size %u is too large, reducing to %u\n",
+ nport->inline_data_size,
+ NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+ nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ }
+
+ ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
+ nport->disc_addr.trsvcid, &port->addr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s:%s\n",
+ nport->disc_addr.traddr, nport->disc_addr.trsvcid);
+ goto out_free_port;
+ }
+
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ goto out_free_port;
+
+ pr_info("enabling port %d (%pISpcs)\n",
+ le16_to_cpu(nport->disc_addr.portid),
+ (struct sockaddr *)&port->addr);
+
+ return 0;
+
+out_free_port:
+ kfree(port);
+ return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port = nport->priv;
+
+ cancel_delayed_work_sync(&port->repair_work);
+ nvmet_rdma_disable_port(port);
+ kfree(port);
}
static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
- struct nvmet_port *port, char *traddr)
+ struct nvmet_port *nport, char *traddr)
{
- struct rdma_cm_id *cm_id = port->priv;
+ struct nvmet_rdma_port *port = nport->priv;
+ struct rdma_cm_id *cm_id = port->cm_id;
if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
struct nvmet_rdma_rsp *rsp =
@@ -1598,10 +1666,15 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
sprintf(traddr, "%pISc", addr);
} else {
- memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
}
+static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
+{
+ return NVMET_RDMA_MAX_MDTS;
+}
+
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
@@ -1612,6 +1685,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.queue_response = nvmet_rdma_queue_response,
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
+ .get_mdts = nvmet_rdma_get_mdts,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 5bb5342b8d0c..f0da04e960f4 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -19,6 +19,16 @@
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+module_param(so_priority, int, 0644);
+MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
+
#define NVMET_TCP_RECV_BUDGET 8
#define NVMET_TCP_SEND_BUDGET 8
#define NVMET_TCP_IO_WORK_BUDGET 64
@@ -622,7 +632,7 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
return 1;
}
-static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
@@ -632,6 +642,9 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
};
int ret;
+ if (!last_in_batch && cmd->queue->send_list_len)
+ msg.msg_flags |= MSG_MORE;
+
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
return ret;
@@ -672,7 +685,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
}
if (cmd->state == NVMET_TCP_SEND_DDGST) {
- ret = nvmet_try_send_ddgst(cmd);
+ ret = nvmet_try_send_ddgst(cmd, last_in_batch);
if (ret <= 0)
goto done_send;
}
@@ -794,7 +807,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
- icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */
+ icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
icresp->cpda = 0;
if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -1439,6 +1452,13 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
if (ret)
return ret;
+ if (so_priority > 0) {
+ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY,
+ (char *)&so_priority, sizeof(so_priority));
+ if (ret)
+ return ret;
+ }
+
/* Set socket type of service */
if (inet->rcv_tos > 0) {
int tos = inet->rcv_tos;
@@ -1628,6 +1648,15 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
goto err_sock;
}
+ if (so_priority > 0) {
+ ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY,
+ (char *)&so_priority, sizeof(so_priority));
+ if (ret) {
+ pr_err("failed to set SO_PRIORITY sock opt %d\n", ret);
+ goto err_sock;
+ }
+ }
+
ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
sizeof(port->addr));
if (ret) {
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 35efab1ba8d9..d7b7f6d688e7 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -55,6 +55,18 @@ config NVMEM_IMX_OCOTP_SCU
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
+config JZ4780_EFUSE
+ tristate "JZ4780 EFUSE Memory Support"
+ depends on MACH_INGENIC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ select REGMAP_MMIO
+ help
+ Say Y here to include support for JZ4780 efuse memory found on
+ all JZ4780 SoC based devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_jz4780_efuse.
+
config NVMEM_LPC18XX_EEPROM
tristate "NXP LPC18XX EEPROM Memory Support"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 6b466cd1427b..a7c377218341 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,9 +6,6 @@
obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
-obj-$(CONFIG_NVMEM_SYSFS) += nvmem_sysfs.o
-nvmem_sysfs-y := nvmem-sysfs.o
-
# Devices
obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
nvmem-bcm-ocotp-y := bcm-ocotp.o
@@ -18,6 +15,8 @@ obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o
nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
+obj-$(CONFIG_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
+nvmem_jz4780_efuse-y := jz4780-efuse.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index ef326f243f36..05c6ae4b0b97 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -18,7 +18,31 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/slab.h>
-#include "nvmem.h"
+
+struct nvmem_device {
+ struct module *owner;
+ struct device dev;
+ int stride;
+ int word_size;
+ int id;
+ struct kref refcnt;
+ size_t size;
+ bool read_only;
+ bool root_only;
+ int flags;
+ enum nvmem_type type;
+ struct bin_attribute eeprom;
+ struct device *base_dev;
+ struct list_head cells;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ struct gpio_desc *wp_gpio;
+ void *priv;
+};
+
+#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+
+#define FLAG_COMPAT BIT(0)
struct nvmem_cell {
const char *name;
@@ -42,6 +66,250 @@ static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
+#ifdef CONFIG_NVMEM_SYSFS
+static const char * const nvmem_type_str[] = {
+ [NVMEM_TYPE_UNKNOWN] = "Unknown",
+ [NVMEM_TYPE_EEPROM] = "EEPROM",
+ [NVMEM_TYPE_OTP] = "OTP",
+ [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key eeprom_lock_key;
+#endif
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *nvmem_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+
+static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from reading */
+ if (pos >= nvmem->size)
+ return 0;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ if (!nvmem->reg_read)
+ return -EPERM;
+
+ rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from writing */
+ if (pos >= nvmem->size)
+ return -EFBIG;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ if (!nvmem->reg_write)
+ return -EPERM;
+
+ rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+ umode_t mode = 0400;
+
+ if (!nvmem->root_only)
+ mode |= 0044;
+
+ if (!nvmem->read_only)
+ mode |= 0200;
+
+ if (!nvmem->reg_write)
+ mode &= ~0200;
+
+ if (!nvmem->reg_read)
+ mode &= ~0444;
+
+ return mode;
+}
+
+/* default read/write permissions */
+static struct bin_attribute bin_attr_rw_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0644,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_attributes[] = {
+ &bin_attr_rw_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_group = {
+ .bin_attrs = nvmem_bin_attributes,
+ .attrs = nvmem_attrs,
+ .is_bin_visible = nvmem_bin_attr_is_visible,
+};
+
+static const struct attribute_group *nvmem_dev_groups[] = {
+ &nvmem_bin_group,
+ NULL,
+};
+
+/* read only permission */
+static struct bin_attribute bin_attr_ro_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0444,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+/* default read/write permissions, root only */
+static struct bin_attribute bin_attr_rw_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0600,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+/* read only permission, root only */
+static struct bin_attribute bin_attr_ro_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0400,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+/*
+ * nvmem_setup_compat() - Create an additional binary entry in
+ * drivers sys directory, to be backwards compatible with the older
+ * drivers/misc/eeprom drivers.
+ */
+static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ int rval;
+
+ if (!config->compat)
+ return 0;
+
+ if (!config->base_dev)
+ return -EINVAL;
+
+ if (nvmem->read_only) {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_ro_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_ro_nvmem;
+ } else {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_rw_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_rw_nvmem;
+ }
+ nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom.size = nvmem->size;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ nvmem->eeprom.attr.key = &eeprom_lock_key;
+#endif
+ nvmem->eeprom.private = &nvmem->dev;
+ nvmem->base_dev = config->base_dev;
+
+ rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
+ if (rval) {
+ dev_err(&nvmem->dev,
+ "Failed to create eeprom binary file %d\n", rval);
+ return rval;
+ }
+
+ nvmem->flags |= FLAG_COMPAT;
+
+ return 0;
+}
+
+static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ if (config->compat)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+}
+
+#else /* CONFIG_NVMEM_SYSFS */
+
+static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ return -ENOSYS;
+}
+static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+}
+
+#endif /* CONFIG_NVMEM_SYSFS */
static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
@@ -72,6 +340,7 @@ static void nvmem_release(struct device *dev)
struct nvmem_device *nvmem = to_nvmem_device(dev);
ida_simple_remove(&nvmem_ida, nvmem->id);
+ gpiod_put(nvmem->wp_gpio);
kfree(nvmem);
}
@@ -338,6 +607,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (!config->dev)
return ERR_PTR(-EINVAL);
+ if (!config->reg_read && !config->reg_write)
+ return ERR_PTR(-EINVAL);
+
nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
if (!nvmem)
return ERR_PTR(-ENOMEM);
@@ -347,14 +619,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
kfree(nvmem);
return ERR_PTR(rval);
}
+
if (config->wp_gpio)
nvmem->wp_gpio = config->wp_gpio;
else
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
- if (IS_ERR(nvmem->wp_gpio))
- return ERR_CAST(nvmem->wp_gpio);
-
+ if (IS_ERR(nvmem->wp_gpio)) {
+ ida_simple_remove(&nvmem_ida, nvmem->id);
+ rval = PTR_ERR(nvmem->wp_gpio);
+ kfree(nvmem);
+ return ERR_PTR(rval);
+ }
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
@@ -369,6 +645,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->dev.type = &nvmem_provider_type;
nvmem->dev.bus = &nvmem_bus_type;
nvmem->dev.parent = config->dev;
+ nvmem->root_only = config->root_only;
nvmem->priv = config->priv;
nvmem->type = config->type;
nvmem->reg_read = config->reg_read;
@@ -387,13 +664,13 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
- nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
-
- device_initialize(&nvmem->dev);
+#ifdef CONFIG_NVMEM_SYSFS
+ nvmem->dev.groups = nvmem_dev_groups;
+#endif
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
- rval = device_add(&nvmem->dev);
+ rval = device_register(&nvmem->dev);
if (rval)
goto err_put_device;
@@ -447,8 +724,7 @@ static void nvmem_device_release(struct kref *kref)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
nvmem_device_remove_all_cells(nvmem);
- device_del(&nvmem->dev);
- put_device(&nvmem->dev);
+ device_unregister(&nvmem->dev);
}
/**
@@ -1088,16 +1364,8 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
}
EXPORT_SYMBOL_GPL(nvmem_cell_write);
-/**
- * nvmem_cell_read_u16() - Read a cell value as an u16
- *
- * @dev: Device that requests the nvmem cell.
- * @cell_id: Name of nvmem cell to read.
- * @val: pointer to output value.
- *
- * Return: 0 on success or negative errno.
- */
-int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
+static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
+ void *val, size_t count)
{
struct nvmem_cell *cell;
void *buf;
@@ -1112,17 +1380,31 @@ int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
nvmem_cell_put(cell);
return PTR_ERR(buf);
}
- if (len != sizeof(*val)) {
+ if (len != count) {
kfree(buf);
nvmem_cell_put(cell);
return -EINVAL;
}
- memcpy(val, buf, sizeof(*val));
+ memcpy(val, buf, count);
kfree(buf);
nvmem_cell_put(cell);
return 0;
}
+
+/**
+ * nvmem_cell_read_u16() - Read a cell value as an u16
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
/**
@@ -1136,33 +1418,26 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
*/
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
{
- struct nvmem_cell *cell;
- void *buf;
- size_t len;
-
- cell = nvmem_cell_get(dev, cell_id);
- if (IS_ERR(cell))
- return PTR_ERR(cell);
-
- buf = nvmem_cell_read(cell, &len);
- if (IS_ERR(buf)) {
- nvmem_cell_put(cell);
- return PTR_ERR(buf);
- }
- if (len != sizeof(*val)) {
- kfree(buf);
- nvmem_cell_put(cell);
- return -EINVAL;
- }
- memcpy(val, buf, sizeof(*val));
-
- kfree(buf);
- nvmem_cell_put(cell);
- return 0;
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
/**
+ * nvmem_cell_read_u64() - Read a cell value as an u64
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
+
+/**
* nvmem_device_cell_read() - Read a given nvmem device and cell
*
* @nvmem: nvmem device to read from.
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 4ba9cc8f76df..50bea2aadc1b 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -44,6 +44,11 @@
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
+#define IMX_OCOTP_BM_CTRL_ADDR_8MP 0x000001FF
+#define IMX_OCOTP_BM_CTRL_BUSY_8MP 0x00000200
+#define IMX_OCOTP_BM_CTRL_ERROR_8MP 0x00000400
+#define IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP 0x00000800
+
#define IMX_OCOTP_BM_CTRL_DEFAULT \
{ \
.bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \
@@ -52,6 +57,14 @@
.bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\
}
+#define IMX_OCOTP_BM_CTRL_8MP \
+ { \
+ .bm_addr = IMX_OCOTP_BM_CTRL_ADDR_8MP, \
+ .bm_busy = IMX_OCOTP_BM_CTRL_BUSY_8MP, \
+ .bm_error = IMX_OCOTP_BM_CTRL_ERROR_8MP, \
+ .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP,\
+ }
+
#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
#define TIMING_STROBE_READ_NS 37 /* Min time before read */
#define TIMING_RELAX_NS 17
@@ -193,9 +206,9 @@ read_end:
static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
{
- unsigned long clk_rate = 0;
+ unsigned long clk_rate;
unsigned long strobe_read, relax, strobe_prog;
- u32 timing = 0;
+ u32 timing;
/* 47.3.1.3.1
* Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
@@ -245,9 +258,9 @@ static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
{
- unsigned long clk_rate = 0;
+ unsigned long clk_rate;
u64 fsource, strobe_prog;
- u32 timing = 0;
+ u32 timing;
/* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
* 6.4.3.3
@@ -520,6 +533,13 @@ static const struct ocotp_params imx8mn_params = {
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
+static const struct ocotp_params imx8mp_params = {
+ .nregs = 384,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_8MP,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
@@ -532,6 +552,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
{ .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
{ .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params },
+ { .compatible = "fsl,imx8mp-ocotp", .data = &imx8mp_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/jz4780-efuse.c b/drivers/nvmem/jz4780-efuse.c
new file mode 100644
index 000000000000..512e1872ba36
--- /dev/null
+++ b/drivers/nvmem/jz4780-efuse.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * JZ4780 EFUSE Memory Support driver
+ *
+ * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+ * Copyright (c) 2020 H. Nikolaus Schaller <hns@goldelico.com>
+ */
+
+/*
+ * Currently supports JZ4780 efuse which has 8K programmable bit.
+ * Efuse is separated into seven segments as below:
+ *
+ * -----------------------------------------------------------------------
+ * | 64 bit | 128 bit | 128 bit | 3520 bit | 8 bit | 2296 bit | 2048 bit |
+ * -----------------------------------------------------------------------
+ *
+ * The rom itself is accessed using a 9 bit address line and an 8 word wide bus
+ * which reads/writes based on strobes. The strobe is configured in the config
+ * register and is based on number of cycles of the bus clock.
+ *
+ * Driver supports read only as the writes are done in the Factory.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/timer.h>
+
+#define JZ_EFUCTRL (0x0) /* Control Register */
+#define JZ_EFUCFG (0x4) /* Configure Register*/
+#define JZ_EFUSTATE (0x8) /* Status Register */
+#define JZ_EFUDATA(n) (0xC + (n) * 4)
+
+/* We read 32 byte chunks to avoid complexity in the driver. */
+#define JZ_EFU_READ_SIZE 32
+
+#define EFUCTRL_ADDR_MASK 0x3FF
+#define EFUCTRL_ADDR_SHIFT 21
+#define EFUCTRL_LEN_MASK 0x1F
+#define EFUCTRL_LEN_SHIFT 16
+#define EFUCTRL_PG_EN BIT(15)
+#define EFUCTRL_WR_EN BIT(1)
+#define EFUCTRL_RD_EN BIT(0)
+
+#define EFUCFG_INT_EN BIT(31)
+#define EFUCFG_RD_ADJ_MASK 0xF
+#define EFUCFG_RD_ADJ_SHIFT 20
+#define EFUCFG_RD_STR_MASK 0xF
+#define EFUCFG_RD_STR_SHIFT 16
+#define EFUCFG_WR_ADJ_MASK 0xF
+#define EFUCFG_WR_ADJ_SHIFT 12
+#define EFUCFG_WR_STR_MASK 0xFFF
+#define EFUCFG_WR_STR_SHIFT 0
+
+#define EFUSTATE_WR_DONE BIT(1)
+#define EFUSTATE_RD_DONE BIT(0)
+
+struct jz4780_efuse {
+ struct device *dev;
+ struct regmap *map;
+ struct clk *clk;
+};
+
+/* main entry point */
+static int jz4780_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct jz4780_efuse *efuse = context;
+
+ while (bytes > 0) {
+ size_t start = offset & ~(JZ_EFU_READ_SIZE - 1);
+ size_t chunk = min(bytes, (start + JZ_EFU_READ_SIZE)
+ - offset);
+ char buf[JZ_EFU_READ_SIZE];
+ unsigned int tmp;
+ u32 ctrl;
+ int ret;
+
+ ctrl = (start << EFUCTRL_ADDR_SHIFT)
+ | ((JZ_EFU_READ_SIZE - 1) << EFUCTRL_LEN_SHIFT)
+ | EFUCTRL_RD_EN;
+
+ regmap_update_bits(efuse->map, JZ_EFUCTRL,
+ (EFUCTRL_ADDR_MASK << EFUCTRL_ADDR_SHIFT) |
+ (EFUCTRL_LEN_MASK << EFUCTRL_LEN_SHIFT) |
+ EFUCTRL_PG_EN | EFUCTRL_WR_EN |
+ EFUCTRL_RD_EN,
+ ctrl);
+
+ ret = regmap_read_poll_timeout(efuse->map, JZ_EFUSTATE,
+ tmp, tmp & EFUSTATE_RD_DONE,
+ 1 * MSEC_PER_SEC,
+ 50 * MSEC_PER_SEC);
+ if (ret < 0) {
+ dev_err(efuse->dev, "Time out while reading efuse data");
+ return ret;
+ }
+
+ ret = regmap_bulk_read(efuse->map, JZ_EFUDATA(0),
+ buf, JZ_EFU_READ_SIZE / sizeof(u32));
+ if (ret < 0)
+ return ret;
+
+ memcpy(val, &buf[offset - start], chunk);
+
+ val += chunk;
+ offset += chunk;
+ bytes -= chunk;
+ }
+
+ return 0;
+}
+
+static struct nvmem_config jz4780_efuse_nvmem_config = {
+ .name = "jz4780-efuse",
+ .size = 1024,
+ .word_size = 1,
+ .stride = 1,
+ .owner = THIS_MODULE,
+ .reg_read = jz4780_efuse_read,
+};
+
+static const struct regmap_config jz4780_efuse_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = JZ_EFUDATA(7),
+};
+
+static void clk_disable_unprepare_helper(void *clock)
+{
+ clk_disable_unprepare(clock);
+}
+
+static int jz4780_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct jz4780_efuse *efuse;
+ struct nvmem_config cfg;
+ unsigned long clk_rate;
+ unsigned long rd_adj;
+ unsigned long rd_strobe;
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ int ret;
+
+ efuse = devm_kzalloc(dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ efuse->map = devm_regmap_init_mmio(dev, regs,
+ &jz4780_efuse_regmap_config);
+ if (IS_ERR(efuse->map))
+ return PTR_ERR(efuse->map);
+
+ efuse->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(efuse->clk))
+ return PTR_ERR(efuse->clk);
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ clk_disable_unprepare_helper,
+ efuse->clk);
+ if (ret < 0)
+ return ret;
+
+ clk_rate = clk_get_rate(efuse->clk);
+
+ efuse->dev = dev;
+
+ /*
+ * rd_adj and rd_strobe are 4 bit values
+ * conditions:
+ * bus clk_period * (rd_adj + 1) > 6.5ns
+ * bus clk_period * (rd_adj + 5 + rd_strobe) > 35ns
+ * i.e. rd_adj >= 6.5ns / clk_period
+ * i.e. rd_strobe >= 35 ns / clk_period - 5 - rd_adj + 1
+ * constants:
+ * 1 / 6.5ns == 153846154 Hz
+ * 1 / 35ns == 28571429 Hz
+ */
+
+ rd_adj = clk_rate / 153846154;
+ rd_strobe = clk_rate / 28571429 - 5 - rd_adj + 1;
+
+ if (rd_adj > EFUCFG_RD_ADJ_MASK ||
+ rd_strobe > EFUCFG_RD_STR_MASK) {
+ dev_err(&pdev->dev, "Cannot set clock configuration\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(efuse->map, JZ_EFUCFG,
+ (EFUCFG_RD_ADJ_MASK << EFUCFG_RD_ADJ_SHIFT) |
+ (EFUCFG_RD_STR_MASK << EFUCFG_RD_STR_SHIFT),
+ (rd_adj << EFUCFG_RD_ADJ_SHIFT) |
+ (rd_strobe << EFUCFG_RD_STR_SHIFT));
+
+ cfg = jz4780_efuse_nvmem_config;
+ cfg.dev = &pdev->dev;
+ cfg.priv = efuse;
+
+ nvmem = devm_nvmem_register(dev, &cfg);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ return 0;
+}
+
+static const struct of_device_id jz4780_efuse_match[] = {
+ { .compatible = "ingenic,jz4780-efuse" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jz4780_efuse_match);
+
+static struct platform_driver jz4780_efuse_driver = {
+ .probe = jz4780_efuse_probe,
+ .driver = {
+ .name = "jz4780-efuse",
+ .of_match_table = jz4780_efuse_match,
+ },
+};
+module_platform_driver(jz4780_efuse_driver);
+
+MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>");
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4780 efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 8e4898dec002..588ab56d75b7 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -130,6 +130,11 @@ static const struct of_device_id mxs_ocotp_match[] = {
};
MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
+static void mxs_ocotp_action(void *data)
+{
+ clk_unprepare(data);
+}
+
static int mxs_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -160,39 +165,26 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
return ret;
}
+ ret = devm_add_action_or_reset(&pdev->dev, mxs_ocotp_action, otp->clk);
+ if (ret)
+ return ret;
+
data = match->data;
ocotp_config.size = data->size;
ocotp_config.priv = otp;
ocotp_config.dev = dev;
otp->nvmem = devm_nvmem_register(dev, &ocotp_config);
- if (IS_ERR(otp->nvmem)) {
- ret = PTR_ERR(otp->nvmem);
- goto err_clk;
- }
+ if (IS_ERR(otp->nvmem))
+ return PTR_ERR(otp->nvmem);
platform_set_drvdata(pdev, otp);
return 0;
-
-err_clk:
- clk_unprepare(otp->clk);
-
- return ret;
-}
-
-static int mxs_ocotp_remove(struct platform_device *pdev)
-{
- struct mxs_ocotp *otp = platform_get_drvdata(pdev);
-
- clk_unprepare(otp->clk);
-
- return 0;
}
static struct platform_driver mxs_ocotp_driver = {
.probe = mxs_ocotp_probe,
- .remove = mxs_ocotp_remove,
.driver = {
.name = "mxs-ocotp",
.of_match_table = mxs_ocotp_match,
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
deleted file mode 100644
index 9e0c429cd08a..000000000000
--- a/drivers/nvmem/nvmem-sysfs.c
+++ /dev/null
@@ -1,263 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019, Linaro Limited
- */
-#include "nvmem.h"
-
-static const char * const nvmem_type_str[] = {
- [NVMEM_TYPE_UNKNOWN] = "Unknown",
- [NVMEM_TYPE_EEPROM] = "EEPROM",
- [NVMEM_TYPE_OTP] = "OTP",
- [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key eeprom_lock_key;
-#endif
-
-static ssize_t type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvmem_device *nvmem = to_nvmem_device(dev);
-
- return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
-}
-
-static DEVICE_ATTR_RO(type);
-
-static struct attribute *nvmem_attrs[] = {
- &dev_attr_type.attr,
- NULL,
-};
-
-static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t pos, size_t count)
-{
- struct device *dev;
- struct nvmem_device *nvmem;
- int rc;
-
- if (attr->private)
- dev = attr->private;
- else
- dev = container_of(kobj, struct device, kobj);
- nvmem = to_nvmem_device(dev);
-
- /* Stop the user from reading */
- if (pos >= nvmem->size)
- return 0;
-
- if (count < nvmem->word_size)
- return -EINVAL;
-
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
- count = round_down(count, nvmem->word_size);
-
- rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
-
- if (rc)
- return rc;
-
- return count;
-}
-
-static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t pos, size_t count)
-{
- struct device *dev;
- struct nvmem_device *nvmem;
- int rc;
-
- if (attr->private)
- dev = attr->private;
- else
- dev = container_of(kobj, struct device, kobj);
- nvmem = to_nvmem_device(dev);
-
- /* Stop the user from writing */
- if (pos >= nvmem->size)
- return -EFBIG;
-
- if (count < nvmem->word_size)
- return -EINVAL;
-
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
- count = round_down(count, nvmem->word_size);
-
- rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
-
- if (rc)
- return rc;
-
- return count;
-}
-
-/* default read/write permissions */
-static struct bin_attribute bin_attr_rw_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0644,
- },
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
-};
-
-static struct bin_attribute *nvmem_bin_rw_attributes[] = {
- &bin_attr_rw_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_rw_group = {
- .bin_attrs = nvmem_bin_rw_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_rw_dev_groups[] = {
- &nvmem_bin_rw_group,
- NULL,
-};
-
-/* read only permission */
-static struct bin_attribute bin_attr_ro_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0444,
- },
- .read = bin_attr_nvmem_read,
-};
-
-static struct bin_attribute *nvmem_bin_ro_attributes[] = {
- &bin_attr_ro_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_ro_group = {
- .bin_attrs = nvmem_bin_ro_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_ro_dev_groups[] = {
- &nvmem_bin_ro_group,
- NULL,
-};
-
-/* default read/write permissions, root only */
-static struct bin_attribute bin_attr_rw_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0600,
- },
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
-};
-
-static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
- &bin_attr_rw_root_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_rw_root_group = {
- .bin_attrs = nvmem_bin_rw_root_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
- &nvmem_bin_rw_root_group,
- NULL,
-};
-
-/* read only permission, root only */
-static struct bin_attribute bin_attr_ro_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0400,
- },
- .read = bin_attr_nvmem_read,
-};
-
-static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
- &bin_attr_ro_root_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_ro_root_group = {
- .bin_attrs = nvmem_bin_ro_root_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
- &nvmem_bin_ro_root_group,
- NULL,
-};
-
-const struct attribute_group **nvmem_sysfs_get_groups(
- struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- if (config->root_only)
- return nvmem->read_only ?
- nvmem_ro_root_dev_groups :
- nvmem_rw_root_dev_groups;
-
- return nvmem->read_only ? nvmem_ro_dev_groups : nvmem_rw_dev_groups;
-}
-
-/*
- * nvmem_setup_compat() - Create an additional binary entry in
- * drivers sys directory, to be backwards compatible with the older
- * drivers/misc/eeprom drivers.
- */
-int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- int rval;
-
- if (!config->compat)
- return 0;
-
- if (!config->base_dev)
- return -EINVAL;
-
- if (nvmem->read_only) {
- if (config->root_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_ro_nvmem;
- } else {
- if (config->root_only)
- nvmem->eeprom = bin_attr_rw_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_nvmem;
- }
- nvmem->eeprom.attr.name = "eeprom";
- nvmem->eeprom.size = nvmem->size;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- nvmem->eeprom.attr.key = &eeprom_lock_key;
-#endif
- nvmem->eeprom.private = &nvmem->dev;
- nvmem->base_dev = config->base_dev;
-
- rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
- if (rval) {
- dev_err(&nvmem->dev,
- "Failed to create eeprom binary file %d\n", rval);
- return rval;
- }
-
- nvmem->flags |= FLAG_COMPAT;
-
- return 0;
-}
-
-void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- if (config->compat)
- device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
-}
diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h
deleted file mode 100644
index be0d66d75c8a..000000000000
--- a/drivers/nvmem/nvmem.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _DRIVERS_NVMEM_H
-#define _DRIVERS_NVMEM_H
-
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/nvmem-provider.h>
-#include <linux/gpio/consumer.h>
-
-struct nvmem_device {
- struct module *owner;
- struct device dev;
- int stride;
- int word_size;
- int id;
- struct kref refcnt;
- size_t size;
- bool read_only;
- int flags;
- enum nvmem_type type;
- struct bin_attribute eeprom;
- struct device *base_dev;
- struct list_head cells;
- nvmem_reg_read_t reg_read;
- nvmem_reg_write_t reg_write;
- struct gpio_desc *wp_gpio;
- void *priv;
-};
-
-#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
-#define FLAG_COMPAT BIT(0)
-
-#ifdef CONFIG_NVMEM_SYSFS
-const struct attribute_group **nvmem_sysfs_get_groups(
- struct nvmem_device *nvmem,
- const struct nvmem_config *config);
-int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config);
-void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config);
-#else
-static inline const struct attribute_group **nvmem_sysfs_get_groups(
- struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- return NULL;
-}
-
-static inline int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- return -ENOSYS;
-}
-static inline void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
-}
-#endif /* CONFIG_NVMEM_SYSFS */
-
-#endif /* _DRIVERS_NVMEM_H */
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
index 2f1e0fbd1901..925feb21d5ad 100644
--- a/drivers/nvmem/sprd-efuse.c
+++ b/drivers/nvmem/sprd-efuse.c
@@ -217,12 +217,14 @@ static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
* Enable the auto-check function to validate if the programming is
* successful.
*/
- sprd_efuse_set_auto_check(efuse, true);
+ if (lock)
+ sprd_efuse_set_auto_check(efuse, true);
writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
/* Disable auto-check and data double after programming */
- sprd_efuse_set_auto_check(efuse, false);
+ if (lock)
+ sprd_efuse_set_auto_check(efuse, false);
sprd_efuse_set_data_double(efuse, false);
/*
@@ -237,9 +239,9 @@ static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
writel(SPRD_EFUSE_ERR_CLR_MASK,
efuse->base + SPRD_EFUSE_ERR_CLR);
ret = -EBUSY;
- } else {
+ } else if (lock) {
sprd_efuse_set_prog_lock(efuse, lock);
- writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+ writel(0, efuse->base + SPRD_EFUSE_MEM(blk));
sprd_efuse_set_prog_lock(efuse, false);
}
@@ -322,6 +324,8 @@ unlock:
static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
{
struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ bool lock;
int ret;
ret = sprd_efuse_lock(efuse);
@@ -332,7 +336,20 @@ static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
if (ret)
goto unlock;
- ret = sprd_efuse_raw_prog(efuse, offset, false, false, val);
+ /*
+ * If the writing bytes are equal with the block width, which means the
+ * whole block will be programmed. For this case, we should not allow
+ * this block to be programmed again by locking this block.
+ *
+ * If the block was programmed partially, we should allow this block to
+ * be programmed again.
+ */
+ if (bytes < SPRD_EFUSE_BLOCK_WIDTH)
+ lock = false;
+ else
+ lock = true;
+
+ ret = sprd_efuse_raw_prog(efuse, offset, blk_double, lock, val);
clk_disable_unprepare(efuse->clk);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index e8a39c3ec4d4..8eea3f6e29a4 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -100,6 +100,28 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
return IORESOURCE_MEM;
}
+static unsigned int of_bus_pci_get_flags(const __be32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = be32_to_cpup(addr);
+
+ if (!IS_ENABLED(CONFIG_PCI))
+ return 0;
+
+ switch((w >> 24) & 0x03) {
+ case 0x01:
+ flags |= IORESOURCE_IO;
+ break;
+ case 0x02: /* 32 bits */
+ case 0x03: /* 64 bits */
+ flags |= IORESOURCE_MEM;
+ break;
+ }
+ if (w & 0x40000000)
+ flags |= IORESOURCE_PREFETCH;
+ return flags;
+}
+
#ifdef CONFIG_PCI
/*
* PCI bus specific translator
@@ -125,25 +147,6 @@ static void of_bus_pci_count_cells(struct device_node *np,
*sizec = 2;
}
-static unsigned int of_bus_pci_get_flags(const __be32 *addr)
-{
- unsigned int flags = 0;
- u32 w = be32_to_cpup(addr);
-
- switch((w >> 24) & 0x03) {
- case 0x01:
- flags |= IORESOURCE_IO;
- break;
- case 0x02: /* 32 bits */
- case 0x03: /* 64 bits */
- flags |= IORESOURCE_MEM;
- break;
- }
- if (w & 0x40000000)
- flags |= IORESOURCE_PREFETCH;
- return flags;
-}
-
static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
int pna)
{
@@ -234,93 +237,6 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-static int parser_init(struct of_pci_range_parser *parser,
- struct device_node *node, const char *name)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
- parser->dma = !strcmp(name, "dma-ranges");
-
- parser->range = of_get_property(node, name, &rlen);
- if (parser->range == NULL)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
-int of_pci_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- return parser_init(parser, node, "ranges");
-}
-EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
-
-int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- return parser_init(parser, node, "dma-ranges");
-}
-EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
-
-struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
- struct of_pci_range *range)
-{
- const int na = 3, ns = 2;
-
- if (!range)
- return NULL;
-
- if (!parser->range || parser->range + parser->np > parser->end)
- return NULL;
-
- range->pci_space = be32_to_cpup(parser->range);
- range->flags = of_bus_pci_get_flags(parser->range);
- range->pci_addr = of_read_number(parser->range + 1, ns);
- if (parser->dma)
- range->cpu_addr = of_translate_dma_address(parser->node,
- parser->range + na);
- else
- range->cpu_addr = of_translate_address(parser->node,
- parser->range + na);
- range->size = of_read_number(parser->range + parser->pna + na, ns);
-
- parser->range += parser->np;
-
- /* Now consume following elements while they are contiguous */
- while (parser->range + parser->np <= parser->end) {
- u32 flags;
- u64 pci_addr, cpu_addr, size;
-
- flags = of_bus_pci_get_flags(parser->range);
- pci_addr = of_read_number(parser->range + 1, ns);
- if (parser->dma)
- cpu_addr = of_translate_dma_address(parser->node,
- parser->range + na);
- else
- cpu_addr = of_translate_address(parser->node,
- parser->range + na);
- size = of_read_number(parser->range + parser->pna + na, ns);
-
- if (flags != range->flags)
- break;
- if (pci_addr != range->pci_addr + range->size ||
- cpu_addr != range->cpu_addr + range->size)
- break;
-
- range->size += size;
- parser->range += parser->np;
- }
-
- return range;
-}
-EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
-
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
* @range: the PCI range that describes the resource
@@ -775,6 +691,101 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
+static int parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node, const char *name)
+{
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->na = of_bus_n_addr_cells(node);
+ parser->ns = of_bus_n_size_cells(node);
+ parser->dma = !strcmp(name, "dma-ranges");
+
+ parser->range = of_get_property(node, name, &rlen);
+ if (parser->range == NULL)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+
+ return 0;
+}
+
+int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "ranges");
+}
+EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
+
+int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "dma-ranges");
+}
+EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
+#define of_dma_range_parser_init of_pci_dma_range_parser_init
+
+struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
+ struct of_pci_range *range)
+{
+ int na = parser->na;
+ int ns = parser->ns;
+ int np = parser->pna + na + ns;
+
+ if (!range)
+ return NULL;
+
+ if (!parser->range || parser->range + np > parser->end)
+ return NULL;
+
+ if (parser->na == 3)
+ range->flags = of_bus_pci_get_flags(parser->range);
+ else
+ range->flags = 0;
+
+ range->pci_addr = of_read_number(parser->range, na);
+
+ if (parser->dma)
+ range->cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ range->cpu_addr = of_translate_address(parser->node,
+ parser->range + na);
+ range->size = of_read_number(parser->range + parser->pna + na, ns);
+
+ parser->range += np;
+
+ /* Now consume following elements while they are contiguous */
+ while (parser->range + np <= parser->end) {
+ u32 flags = 0;
+ u64 pci_addr, cpu_addr, size;
+
+ if (parser->na == 3)
+ flags = of_bus_pci_get_flags(parser->range);
+ pci_addr = of_read_number(parser->range, na);
+ if (parser->dma)
+ cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ cpu_addr = of_translate_address(parser->node,
+ parser->range + na);
+ size = of_read_number(parser->range + parser->pna + na, ns);
+
+ if (flags != range->flags)
+ break;
+ if (pci_addr != range->pci_addr + range->size ||
+ cpu_addr != range->cpu_addr + range->size)
+ break;
+
+ range->size += size;
+ parser->range += np;
+ }
+
+ return range;
+}
+EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
+
static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
u64 size)
{
@@ -928,10 +939,12 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
{
struct device_node *node = of_node_get(np);
const __be32 *ranges = NULL;
- int len, naddr, nsize, pna;
+ int len;
int ret = 0;
bool found_dma_ranges = false;
- u64 dmaaddr;
+ struct of_range_parser parser;
+ struct of_range range;
+ u64 dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
@@ -956,32 +969,38 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
goto out;
}
- naddr = of_bus_n_addr_cells(node);
- nsize = of_bus_n_size_cells(node);
- pna = of_n_addr_cells(node);
- if ((len / sizeof(__be32)) % (pna + naddr + nsize)) {
- ret = -EINVAL;
- goto out;
+ of_dma_range_parser_init(&parser, node);
+
+ for_each_of_range(&parser, &range) {
+ pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
+ range.bus_addr, range.cpu_addr, range.size);
+
+ if (dma_offset && range.cpu_addr - range.bus_addr != dma_offset) {
+ pr_warn("Can't handle multiple dma-ranges with different offsets on node(%pOF)\n", node);
+ /* Don't error out as we'd break some existing DTs */
+ continue;
+ }
+ dma_offset = range.cpu_addr - range.bus_addr;
+
+ /* Take lower and upper limits */
+ if (range.bus_addr < dma_start)
+ dma_start = range.bus_addr;
+ if (range.bus_addr + range.size > dma_end)
+ dma_end = range.bus_addr + range.size;
}
- /* dma-ranges format:
- * DMA addr : naddr cells
- * CPU addr : pna cells
- * size : nsize cells
- */
- dmaaddr = of_read_number(ranges, naddr);
- *paddr = of_translate_dma_address(node, ranges + naddr);
- if (*paddr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
- dmaaddr, np);
+ if (dma_start >= dma_end) {
ret = -EINVAL;
+ pr_debug("Invalid DMA ranges configuration on node(%pOF)\n",
+ node);
goto out;
}
- *dma_addr = dmaaddr;
- *size = of_read_number(ranges + naddr + pna, nsize);
+ *dma_addr = dma_start;
+ *size = dma_end - dma_start;
+ *paddr = dma_start + dma_offset;
- pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
+ pr_debug("final: dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
out:
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 207863c151a5..edc682249c00 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -24,7 +24,7 @@ struct alias_prop {
const char *alias;
struct device_node *np;
int id;
- char stem[0];
+ char stem[];
};
#if defined(CONFIG_SPARC)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 6bd610ee2cd7..1a84bc0d5fa8 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/memblock.h>
-#define MAX_RESERVED_REGIONS 32
+#define MAX_RESERVED_REGIONS 64
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index c9219fddf44b..50bbe0edf538 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop(
of_property_set_flag(new_prop, OF_DYNAMIC);
+ kfree(target_path);
+
return new_prop;
err_free_new_prop:
diff --git a/drivers/of/property.c b/drivers/of/property.c
index e851c57a15b0..b4916dcc9e72 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1204,6 +1204,8 @@ DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
+DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
@@ -1226,6 +1228,8 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_io_channels, },
{ .parse_prop = parse_interrupt_parent, },
{ .parse_prop = parse_dmas, },
+ { .parse_prop = parse_power_domains, },
+ { .parse_prop = parse_hwlocks, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
@@ -1262,7 +1266,7 @@ static int of_link_property(struct device *dev, struct device_node *con_np,
u32 dl_flags;
if (dev->of_node == con_np)
- dl_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ dl_flags = fw_devlink_get_flags();
else
dl_flags = DL_FLAG_SYNC_STATE_ONLY;
@@ -1299,15 +1303,9 @@ static int of_link_to_suppliers(struct device *dev,
return ret;
}
-static bool of_devlink;
-core_param(of_devlink, of_devlink, bool, 0);
-
static int of_fwnode_add_links(const struct fwnode_handle *fwnode,
struct device *dev)
{
- if (!of_devlink)
- return 0;
-
if (unlikely(!is_of_node(fwnode)))
return 0;
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 83c766233181..b278ab4338ce 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -321,8 +321,11 @@ int of_resolve_phandles(struct device_node *overlay)
err = of_property_read_string(tree_symbols,
prop->name, &refpath);
- if (err)
+ if (err) {
+ pr_err("node label '%s' not found in live devicetree symbols table\n",
+ prop->name);
goto out;
+ }
refnode = of_find_node_by_path(refpath);
if (!refnode) {
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 9b6807065827..009f4045c8e4 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -21,7 +21,13 @@ obj-$(CONFIG_OF_OVERLAY) += overlay.dtb.o \
overlay_bad_add_dup_prop.dtb.o \
overlay_bad_phandle.dtb.o \
overlay_bad_symbol.dtb.o \
- overlay_base.dtb.o
+ overlay_base.dtb.o \
+ overlay_gpio_01.dtb.o \
+ overlay_gpio_02a.dtb.o \
+ overlay_gpio_02b.dtb.o \
+ overlay_gpio_03.dtb.o \
+ overlay_gpio_04a.dtb.o \
+ overlay_gpio_04b.dtb.o
# enable creation of __symbols__ node
DTC_FLAGS_overlay += -@
diff --git a/drivers/of/unittest-data/overlay_bad_add_dup_prop.dts b/drivers/of/unittest-data/overlay_bad_add_dup_prop.dts
index c190da54f175..6327d1ffb963 100644
--- a/drivers/of/unittest-data/overlay_bad_add_dup_prop.dts
+++ b/drivers/of/unittest-data/overlay_bad_add_dup_prop.dts
@@ -3,22 +3,37 @@
/plugin/;
/*
- * &electric_1/motor-1 and &spin_ctrl_1 are the same node:
- * /testcase-data-2/substation@100/motor-1
+ * &electric_1/motor-1/electric and &spin_ctrl_1/electric are the same node:
+ * /testcase-data-2/substation@100/motor-1/electric
*
* Thus the property "rpm_avail" in each fragment will
* result in an attempt to update the same property twice.
* This will result in an error and the overlay apply
* will fail.
+ *
+ * The previous version of this test did not include the extra
+ * level of node 'electric'. That resulted in the 'rpm_avail'
+ * property being located in the pre-existing node 'motor-1'.
+ * Modifying a property results in a WARNING that a memory leak
+ * will occur if the overlay is removed. Since the overlay apply
+ * fails, the memory leak does actually occur, and kmemleak will
+ * further report the memory leak if CONFIG_DEBUG_KMEMLEAK is
+ * enabled. Adding the overlay node 'electric' avoids the
+ * memory leak and thus people who use kmemleak will not
+ * have to debug this non-problem again.
*/
&electric_1 {
motor-1 {
- rpm_avail = < 100 >;
+ electric {
+ rpm_avail = < 100 >;
+ };
};
};
&spin_ctrl_1 {
- rpm_avail = < 100 200 >;
+ electric {
+ rpm_avail = < 100 200 >;
+ };
};
diff --git a/drivers/of/unittest-data/overlay_gpio_01.dts b/drivers/of/unittest-data/overlay_gpio_01.dts
new file mode 100644
index 000000000000..699ff104ae10
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_01.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@0 {
+ compatible = "unittest-gpio";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B";
+
+ line-b {
+ gpio-hog;
+ gpios = <2 0>;
+ input;
+ line-name = "line-B-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_02a.dts b/drivers/of/unittest-data/overlay_gpio_02a.dts
new file mode 100644
index 000000000000..ec59aff6ed47
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_02a.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@2 {
+ compatible = "unittest-gpio";
+ reg = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B";
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_02b.dts b/drivers/of/unittest-data/overlay_gpio_02b.dts
new file mode 100644
index 000000000000..43ce111d41ce
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_02b.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@2 {
+ line-a {
+ gpio-hog;
+ gpios = <1 0>;
+ input;
+ line-name = "line-A-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_03.dts b/drivers/of/unittest-data/overlay_gpio_03.dts
new file mode 100644
index 000000000000..6e0312340a1b
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_03.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@3 {
+ compatible = "unittest-gpio";
+ reg = <3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B", "line-C", "line-D";
+
+ line-d {
+ gpio-hog;
+ gpios = <4 0>;
+ input;
+ line-name = "line-D-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_04a.dts b/drivers/of/unittest-data/overlay_gpio_04a.dts
new file mode 100644
index 000000000000..7b1e04ebfa7a
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_04a.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@4 {
+ compatible = "unittest-gpio";
+ reg = <4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B", "line-C", "line-D";
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_04b.dts b/drivers/of/unittest-data/overlay_gpio_04b.dts
new file mode 100644
index 000000000000..a14e95c6699a
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_04b.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@4 {
+ line-c {
+ gpio-hog;
+ gpios = <3 0>;
+ input;
+ line-name = "line-C-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 68b87587b2ef..398de04fd19c 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -24,6 +24,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
@@ -46,6 +47,20 @@ static struct unittest_results {
failed; \
})
+/*
+ * Expected message may have a message level other than KERN_INFO.
+ * Print the expected message only if the current loglevel will allow
+ * the actual message to print.
+ *
+ * Do not use EXPECT_BEGIN() or EXPECT_END() for messages generated by
+ * pr_debug().
+ */
+#define EXPECT_BEGIN(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT \\ : ") fmt, ##__VA_ARGS__)
+
+#define EXPECT_END(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT / : ") fmt, ##__VA_ARGS__)
+
static void __init of_unittest_find_node_by_name(void)
{
struct device_node *np;
@@ -444,29 +459,77 @@ static void __init of_unittest_parse_phandle_with_args(void)
/* Check for missing cells property */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing", 0, &args);
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
rc = of_count_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing");
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells", 0, &args);
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
rc = of_count_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells");
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells");
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -577,20 +640,41 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
/* Check for missing cells,map,mask property */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1");
+
rc = of_parse_phandle_with_args_map(np, "phandle-list",
"phandle-missing", 0, &args);
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
+
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle",
"phandle", 0, &args);
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
"phandle", 1, &args);
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -777,6 +861,10 @@ static void __init of_unittest_changeset(void)
unittest(!of_changeset_revert(&chgset), "revert failed\n");
of_changeset_destroy(&chgset);
+
+ of_node_put(n1);
+ of_node_put(n2);
+ of_node_put(n21);
#endif
}
@@ -1121,7 +1209,15 @@ static void __init of_unittest_platform_populate(void)
np = of_find_node_by_path("/testcase-data/testcase-device2");
pdev = of_find_device_by_node(np);
unittest(pdev, "device 2 creation failed\n");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "platform testcase-data:testcase-device2: IRQ index 0 not found");
+
irq = platform_get_irq(pdev, 0);
+
+ EXPECT_END(KERN_INFO,
+ "platform testcase-data:testcase-device2: IRQ index 0 not found");
+
unittest(irq < 0 && irq != -EPROBE_DEFER,
"device parsing error failed - %d\n", irq);
}
@@ -1151,10 +1247,13 @@ static void __init of_unittest_platform_populate(void)
of_platform_populate(np, match, NULL, &test_bus->dev);
for_each_child_of_node(np, child) {
- for_each_child_of_node(child, grandchild)
- unittest(of_find_device_by_node(grandchild),
+ for_each_child_of_node(child, grandchild) {
+ pdev = of_find_device_by_node(grandchild);
+ unittest(pdev,
"Could not create device for node '%pOFn'\n",
grandchild);
+ of_dev_put(pdev);
+ }
}
of_platform_depopulate(&test_bus->dev);
@@ -1325,6 +1424,9 @@ static int __init unittest_data_add(void)
return 0;
}
+ EXPECT_BEGIN(KERN_INFO,
+ "Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
+
/* attach the sub-tree to live tree */
np = unittest_data_node->child;
while (np) {
@@ -1335,6 +1437,9 @@ static int __init unittest_data_add(void)
np = next;
}
+ EXPECT_END(KERN_INFO,
+ "Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
+
of_overlay_mutex_unlock();
return 0;
@@ -1410,6 +1515,249 @@ static int of_path_platform_device_exists(const char *path)
return pdev != NULL;
}
+#ifdef CONFIG_OF_GPIO
+
+struct unittest_gpio_dev {
+ struct gpio_chip chip;
+};
+
+static int unittest_gpio_chip_request_count;
+static int unittest_gpio_probe_count;
+static int unittest_gpio_probe_pass_count;
+
+static int unittest_gpio_chip_request(struct gpio_chip *chip, unsigned int offset)
+{
+ unittest_gpio_chip_request_count++;
+
+ pr_debug("%s(): %s %d %d\n", __func__, chip->label, offset,
+ unittest_gpio_chip_request_count);
+ return 0;
+}
+
+static int unittest_gpio_probe(struct platform_device *pdev)
+{
+ struct unittest_gpio_dev *devptr;
+ int ret;
+
+ unittest_gpio_probe_count++;
+
+ devptr = kzalloc(sizeof(*devptr), GFP_KERNEL);
+ if (!devptr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, devptr);
+
+ devptr->chip.of_node = pdev->dev.of_node;
+ devptr->chip.label = "of-unittest-gpio";
+ devptr->chip.base = -1; /* dynamic allocation */
+ devptr->chip.ngpio = 5;
+ devptr->chip.request = unittest_gpio_chip_request;
+
+ ret = gpiochip_add_data(&devptr->chip, NULL);
+
+ unittest(!ret,
+ "gpiochip_add_data() for node @%pOF failed, ret = %d\n", devptr->chip.of_node, ret);
+
+ if (!ret)
+ unittest_gpio_probe_pass_count++;
+ return ret;
+}
+
+static int unittest_gpio_remove(struct platform_device *pdev)
+{
+ struct unittest_gpio_dev *gdev = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
+
+ if (!gdev)
+ return -EINVAL;
+
+ if (gdev->chip.base != -1)
+ gpiochip_remove(&gdev->chip);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(gdev);
+
+ return 0;
+}
+
+static const struct of_device_id unittest_gpio_id[] = {
+ { .compatible = "unittest-gpio", },
+ {}
+};
+
+static struct platform_driver unittest_gpio_driver = {
+ .probe = unittest_gpio_probe,
+ .remove = unittest_gpio_remove,
+ .driver = {
+ .name = "unittest-gpio",
+ .of_match_table = of_match_ptr(unittest_gpio_id),
+ },
+};
+
+static void __init of_unittest_overlay_gpio(void)
+{
+ int chip_request_count;
+ int probe_pass_count;
+ int ret;
+
+ /*
+ * tests: apply overlays before registering driver
+ * Similar to installing a driver as a module, the
+ * driver is registered after applying the overlays.
+ *
+ * The overlays are applied by overlay_data_apply()
+ * instead of of_unittest_apply_overlay() so that they
+ * will not be tracked. Thus they will not be removed
+ * by of_unittest_destroy_tracked_overlays().
+ *
+ * - apply overlay_gpio_01
+ * - apply overlay_gpio_02a
+ * - apply overlay_gpio_02b
+ * - register driver
+ *
+ * register driver will result in
+ * - probe and processing gpio hog for overlay_gpio_01
+ * - probe for overlay_gpio_02a
+ * - processing gpio for overlay_gpio_02b
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ /*
+ * overlay_gpio_01 contains gpio node and child gpio hog node
+ * overlay_gpio_02a contains gpio node
+ * overlay_gpio_02b contains child gpio hog node
+ */
+
+ unittest(overlay_data_apply("overlay_gpio_01", NULL),
+ "Adding overlay 'overlay_gpio_01' failed\n");
+
+ unittest(overlay_data_apply("overlay_gpio_02a", NULL),
+ "Adding overlay 'overlay_gpio_02a' failed\n");
+
+ unittest(overlay_data_apply("overlay_gpio_02b", NULL),
+ "Adding overlay 'overlay_gpio_02b' failed\n");
+
+ /*
+ * messages are the result of the probes, after the
+ * driver is registered
+ */
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-B-input) hogged as input\n");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-A-input) hogged as input\n");
+
+ ret = platform_driver_register(&unittest_gpio_driver);
+ if (unittest(ret == 0, "could not register unittest gpio driver\n"))
+ return;
+
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-A-input) hogged as input\n");
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-B-input) hogged as input\n");
+
+ unittest(probe_pass_count + 2 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ unittest(chip_request_count + 2 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+
+ /*
+ * tests: apply overlays after registering driver
+ *
+ * Similar to a driver built-in to the kernel, the
+ * driver is registered before applying the overlays.
+ *
+ * overlay_gpio_03 contains gpio node and child gpio hog node
+ *
+ * - apply overlay_gpio_03
+ *
+ * apply overlay will result in
+ * - probe and processing gpio hog.
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-D-input) hogged as input\n");
+
+ /* overlay_gpio_03 contains gpio node and child gpio hog node */
+
+ unittest(overlay_data_apply("overlay_gpio_03", NULL),
+ "Adding overlay 'overlay_gpio_03' failed\n");
+
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-D-input) hogged as input\n");
+
+ unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+
+ /*
+ * overlay_gpio_04a contains gpio node
+ *
+ * - apply overlay_gpio_04a
+ *
+ * apply the overlay will result in
+ * - probe for overlay_gpio_04a
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ /* overlay_gpio_04a contains gpio node */
+
+ unittest(overlay_data_apply("overlay_gpio_04a", NULL),
+ "Adding overlay 'overlay_gpio_04a' failed\n");
+
+ unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ /*
+ * overlay_gpio_04b contains child gpio hog node
+ *
+ * - apply overlay_gpio_04b
+ *
+ * apply the overlay will result in
+ * - processing gpio for overlay_gpio_04b
+ */
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-C-input) hogged as input\n");
+
+ /* overlay_gpio_04b contains child gpio hog node */
+
+ unittest(overlay_data_apply("overlay_gpio_04b", NULL),
+ "Adding overlay 'overlay_gpio_04b' failed\n");
+
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-C-input) hogged as input\n");
+
+ unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+}
+
+#else
+
+static void __init of_unittest_overlay_gpio(void)
+{
+ /* skip tests */
+}
+
+#endif
+
#if IS_BUILTIN(CONFIG_I2C)
/* get the i2c client device instantiated at the path */
@@ -1511,19 +1859,27 @@ static const char *overlay_name_from_nr(int nr)
static const char *bus_path = "/testcase-data/overlay-node/test-bus";
-/* it is guaranteed that overlay ids are assigned in sequence */
+/* FIXME: it is NOT guaranteed that overlay ids are assigned in sequence */
+
#define MAX_UNITTEST_OVERLAYS 256
static unsigned long overlay_id_bits[BITS_TO_LONGS(MAX_UNITTEST_OVERLAYS)];
static int overlay_first_id = -1;
+static long of_unittest_overlay_tracked(int id)
+{
+ if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ return 0;
+ return overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id);
+}
+
static void of_unittest_track_overlay(int id)
{
if (overlay_first_id < 0)
overlay_first_id = id;
id -= overlay_first_id;
- /* we shouldn't need that many */
- BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ return;
overlay_id_bits[BIT_WORD(id)] |= BIT_MASK(id);
}
@@ -1532,7 +1888,8 @@ static void of_unittest_untrack_overlay(int id)
if (overlay_first_id < 0)
return;
id -= overlay_first_id;
- BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ return;
overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
}
@@ -1548,7 +1905,7 @@ static void of_unittest_destroy_tracked_overlays(void)
defers = 0;
/* remove in reverse order */
for (id = MAX_UNITTEST_OVERLAYS - 1; id >= 0; id--) {
- if (!(overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id)))
+ if (!of_unittest_overlay_tracked(id))
continue;
ovcs_id = id + overlay_first_id;
@@ -1565,7 +1922,7 @@ static void of_unittest_destroy_tracked_overlays(void)
continue;
}
- overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
+ of_unittest_untrack_overlay(id);
}
} while (defers > 0);
}
@@ -1626,7 +1983,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
- int ret, ovcs_id;
+ int ret, ovcs_id, save_id;
/* unittest device must be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -1654,6 +2011,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
return -EINVAL;
}
+ save_id = ovcs_id;
ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "%s failed to be destroyed @\"%s\"\n",
@@ -1661,6 +2019,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
unittest_path(unittest_nr, ovtype));
return ret;
}
+ of_unittest_untrack_overlay(save_id);
/* unittest device must be again in before state */
if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
@@ -1677,8 +2036,18 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
/* test activation of device */
static void __init of_unittest_overlay_0(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status");
+
/* device should enable */
- if (of_unittest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY))
+ ret = of_unittest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 0);
@@ -1687,28 +2056,58 @@ static void __init of_unittest_overlay_0(void)
/* test deactivation of device */
static void __init of_unittest_overlay_1(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status");
+
/* device should disable */
- if (of_unittest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY))
+ ret = of_unittest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 1);
+
}
/* test activation of device */
static void __init of_unittest_overlay_2(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status");
+
/* device should enable */
- if (of_unittest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY))
- return;
+ ret = of_unittest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY);
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status");
+
+ if (ret)
+ return;
unittest(1, "overlay test %d passed\n", 2);
}
/* test deactivation of device */
static void __init of_unittest_overlay_3(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status");
+
/* device should disable */
- if (of_unittest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY))
+ ret = of_unittest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 3);
@@ -1727,8 +2126,18 @@ static void __init of_unittest_overlay_4(void)
/* test overlay apply/revert sequence */
static void __init of_unittest_overlay_5(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status");
+
/* device should disable */
- if (of_unittest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY))
+ ret = of_unittest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 5);
@@ -1742,6 +2151,8 @@ static void __init of_unittest_overlay_6(void)
int before = 0, after = 1;
const char *overlay_name;
+ int ret;
+
/* unittest device must be in before state */
for (i = 0; i < 2; i++) {
if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
@@ -1756,18 +2167,41 @@ static void __init of_unittest_overlay_6(void)
}
/* apply the overlays */
- for (i = 0; i < 2; i++) {
- overlay_name = overlay_name_from_nr(overlay_nr + i);
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 0);
- if (!overlay_data_apply(overlay_name, &ovcs_id)) {
- unittest(0, "could not apply overlay \"%s\"\n",
- overlay_name);
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
+
+ if (!ret) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
+ return;
+ }
+ ov_id[0] = ovcs_id;
+ of_unittest_track_overlay(ov_id[0]);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 1);
+
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
+
+ if (!ret) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
- }
- ov_id[i] = ovcs_id;
- of_unittest_track_overlay(ov_id[i]);
}
+ ov_id[1] = ovcs_id;
+ of_unittest_track_overlay(ov_id[1]);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
+
for (i = 0; i < 2; i++) {
/* unittest device must be in after state */
@@ -1808,6 +2242,7 @@ static void __init of_unittest_overlay_6(void)
}
unittest(1, "overlay test %d passed\n", 6);
+
}
/* test overlay application in sequence */
@@ -1816,26 +2251,65 @@ static void __init of_unittest_overlay_8(void)
int i, ov_id[2], ovcs_id;
int overlay_nr = 8, unittest_nr = 8;
const char *overlay_name;
+ int ret;
/* we don't care about device state in this test */
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status");
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 0);
+
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
+ if (!ret)
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status");
+
+ if (!ret)
+ return;
+
+ ov_id[0] = ovcs_id;
+ of_unittest_track_overlay(ov_id[0]);
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 1);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo");
+
/* apply the overlays */
- for (i = 0; i < 2; i++) {
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
- overlay_name = overlay_name_from_nr(overlay_nr + i);
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo");
- if (!overlay_data_apply(overlay_name, &ovcs_id)) {
- unittest(0, "could not apply overlay \"%s\"\n",
- overlay_name);
- return;
- }
- ov_id[i] = ovcs_id;
- of_unittest_track_overlay(ov_id[i]);
+ if (!ret) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
+ return;
}
+ ov_id[1] = ovcs_id;
+ of_unittest_track_overlay(ov_id[1]);
+
/* now try to remove first overlay (it should fail) */
ovcs_id = ov_id[0];
- if (!of_overlay_remove(&ovcs_id)) {
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: overlay #6 is not topmost");
+
+ ret = of_overlay_remove(&ovcs_id);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: overlay #6 is not topmost");
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
+
+ if (!ret) {
unittest(0, "%s was destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + 0),
unittest_path(unittest_nr,
@@ -1867,6 +2341,7 @@ static void __init of_unittest_overlay_10(void)
/* device should disable */
ret = of_unittest_apply_overlay_check(10, 10, 0, 1, PDEV_OVERLAY);
+
if (unittest(ret == 0,
"overlay test %d failed; overlay application\n", 10))
return;
@@ -1890,6 +2365,7 @@ static void __init of_unittest_overlay_11(void)
/* device should disable */
ret = of_unittest_apply_revert_overlay_check(11, 11, 0, 1,
PDEV_OVERLAY);
+
unittest(ret == 0, "overlay test %d failed; overlay apply\n", 11);
}
@@ -2120,12 +2596,21 @@ static int of_unittest_overlay_i2c_init(void)
return ret;
ret = platform_driver_register(&unittest_i2c_bus_driver);
+
if (unittest(ret == 0,
"could not register unittest i2c bus driver\n"))
return ret;
#if IS_BUILTIN(CONFIG_I2C_MUX)
+
+ EXPECT_BEGIN(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 2");
+
ret = i2c_add_driver(&unittest_i2c_mux_driver);
+
+ EXPECT_END(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 2");
+
if (unittest(ret == 0,
"could not register unittest i2c mux driver\n"))
return ret;
@@ -2145,8 +2630,18 @@ static void of_unittest_overlay_i2c_cleanup(void)
static void __init of_unittest_overlay_i2c_12(void)
{
+ int ret;
+
/* device should enable */
- if (of_unittest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY))
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status");
+
+ ret = of_unittest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 12);
@@ -2155,8 +2650,18 @@ static void __init of_unittest_overlay_i2c_12(void)
/* test deactivation of device */
static void __init of_unittest_overlay_i2c_13(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status");
+
/* device should disable */
- if (of_unittest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY))
+ ret = of_unittest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 13);
@@ -2169,8 +2674,18 @@ static void of_unittest_overlay_i2c_14(void)
static void __init of_unittest_overlay_i2c_15(void)
{
+ int ret;
+
/* device should enable */
- if (of_unittest_apply_overlay_check(15, 15, 0, 1, I2C_OVERLAY))
+ EXPECT_BEGIN(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 3");
+
+ ret = of_unittest_apply_overlay_check(15, 15, 0, 1, I2C_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 3");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 15);
@@ -2242,6 +2757,8 @@ static void __init of_unittest_overlay(void)
of_unittest_overlay_i2c_cleanup();
#endif
+ of_unittest_overlay_gpio();
+
of_unittest_destroy_tracked_overlays();
out:
@@ -2295,6 +2812,12 @@ OVERLAY_INFO_EXTERN(overlay_11);
OVERLAY_INFO_EXTERN(overlay_12);
OVERLAY_INFO_EXTERN(overlay_13);
OVERLAY_INFO_EXTERN(overlay_15);
+OVERLAY_INFO_EXTERN(overlay_gpio_01);
+OVERLAY_INFO_EXTERN(overlay_gpio_02a);
+OVERLAY_INFO_EXTERN(overlay_gpio_02b);
+OVERLAY_INFO_EXTERN(overlay_gpio_03);
+OVERLAY_INFO_EXTERN(overlay_gpio_04a);
+OVERLAY_INFO_EXTERN(overlay_gpio_04b);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_node);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_prop);
OVERLAY_INFO_EXTERN(overlay_bad_phandle);
@@ -2319,6 +2842,12 @@ static struct overlay_info overlays[] = {
OVERLAY_INFO(overlay_12, 0),
OVERLAY_INFO(overlay_13, 0),
OVERLAY_INFO(overlay_15, 0),
+ OVERLAY_INFO(overlay_gpio_01, 0),
+ OVERLAY_INFO(overlay_gpio_02a, 0),
+ OVERLAY_INFO(overlay_gpio_02b, 0),
+ OVERLAY_INFO(overlay_gpio_03, 0),
+ OVERLAY_INFO(overlay_gpio_04a, 0),
+ OVERLAY_INFO(overlay_gpio_04b, 0),
OVERLAY_INFO(overlay_bad_add_dup_node, -EINVAL),
OVERLAY_INFO(overlay_bad_add_dup_prop, -EINVAL),
OVERLAY_INFO(overlay_bad_phandle, -EINVAL),
@@ -2470,6 +2999,7 @@ static __init void of_unittest_overlay_high_level(void)
struct device_node *overlay_base_symbols;
struct device_node **pprev;
struct property *prop;
+ int ret;
if (!overlay_base_root) {
unittest(0, "overlay_base_root not initialized\n");
@@ -2564,8 +3094,11 @@ static __init void of_unittest_overlay_high_level(void)
goto err_unlock;
}
if (__of_add_property(of_symbols, new_prop)) {
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
/* "name" auto-generated by unflatten */
- if (!strcmp(new_prop->name, "name"))
+ if (!strcmp(prop->name, "name"))
continue;
unittest(0, "duplicate property '%s' in overlay_base node __symbols__",
prop->name);
@@ -2584,15 +3117,86 @@ static __init void of_unittest_overlay_high_level(void)
/* now do the normal overlay usage test */
- unittest(overlay_data_apply("overlay", NULL),
- "Adding overlay 'overlay' failed\n");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right");
+
+ ret = overlay_data_apply("overlay", NULL);
+
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status");
+
+ unittest(ret, "Adding overlay 'overlay' failed\n");
+
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name");
unittest(overlay_data_apply("overlay_bad_add_dup_node", NULL),
"Adding overlay 'overlay_bad_add_dup_node' failed\n");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller");
+
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name");
+
unittest(overlay_data_apply("overlay_bad_add_dup_prop", NULL),
"Adding overlay 'overlay_bad_add_dup_prop' failed\n");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric");
+
unittest(overlay_data_apply("overlay_bad_phandle", NULL),
"Adding overlay 'overlay_bad_phandle' failed\n");
@@ -2616,6 +3220,8 @@ static int __init of_unittest(void)
struct device_node *np;
int res;
+ pr_info("start of unittest - you will see error messages\n");
+
/* adding data for unittest */
if (IS_ENABLED(CONFIG_UML))
@@ -2634,7 +3240,6 @@ static int __init of_unittest(void)
}
of_node_put(np);
- pr_info("start of unittest - you will see error messages\n");
of_unittest_check_tree_linkage();
of_unittest_check_phandles();
of_unittest_find_node_by_name();
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index ba43e6a3dc0a..e4f01e7771a2 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -819,6 +819,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
if (unlikely(!target_freq)) {
if (opp_table->required_opp_tables) {
ret = _set_required_opps(dev, opp_table, NULL);
+ } else if (!_get_opp_count(opp_table)) {
+ return 0;
} else {
dev_err(dev, "target frequency can't be 0\n");
ret = -EINVAL;
@@ -849,6 +851,18 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
goto put_opp_table;
}
+ /*
+ * For IO devices which require an OPP on some platforms/SoCs
+ * while just needing to scale the clock on some others
+ * we look for empty OPP tables with just a clock handle and
+ * scale only the clk. This makes dev_pm_opp_set_rate()
+ * equivalent to a clk_set_rate()
+ */
+ if (!_get_opp_count(opp_table)) {
+ ret = _generic_set_opp_clk_only(dev, clk, freq);
+ goto put_opp_table;
+ }
+
temp_freq = old_freq;
old_opp = _find_freq_ceil(opp_table, &temp_freq);
if (IS_ERR(old_opp)) {
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 9d00a24277aa..f96e5eaee87e 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction irq2_action = {
- .handler = dummy_irq2_handler,
- .name = "cascade",
-};
-
static void init_eisa_pic(void)
{
unsigned long flags;
@@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev)
}
/* Reserve IRQ2 */
- setup_irq(2, &irq2_action);
+ if (request_irq(2, dummy_irq2_handler, 0, "cascade", NULL))
+ pr_err("Failed to request irq 2 (cascade)\n");
for (i = 0; i < 16; i++) {
irq_set_chip_and_handler(i, &eisa_interrupt_type,
handle_simple_irq);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 3ef0bb281e7c..390e92f2d8d1 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -366,6 +366,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_enable_pasid);
/**
* pci_disable_pasid - Disable the PASID capability
@@ -390,6 +391,7 @@ void pci_disable_pasid(struct pci_dev *pdev)
pdev->pasid_enabled = 0;
}
+EXPORT_SYMBOL_GPL(pci_disable_pasid);
/**
* pci_restore_pasid_state - Restore PASID capabilities
@@ -441,6 +443,7 @@ int pci_pasid_features(struct pci_dev *pdev)
return supported;
}
+EXPORT_SYMBOL_GPL(pci_pasid_features);
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
@@ -469,4 +472,5 @@ int pci_max_pasids(struct pci_dev *pdev)
return (1 << supported);
}
+EXPORT_SYMBOL_GPL(pci_max_pasids);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 20bf00f587bd..91bfdb784829 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -213,16 +213,6 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
-config PCIE_MOBIVEIL
- bool "Mobiveil AXI PCIe controller"
- depends on ARCH_ZYNQMP || COMPILE_TEST
- depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
- help
- Say Y here if you want to enable support for the Mobiveil AXI PCIe
- Soft IP. It has up to 8 outbound and inbound windows
- for address translation and it is a PCIe Gen4 IP.
-
config PCIE_TANGO_SMP8759
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
depends on ARCH_TANGO && PCI_MSI && OF
@@ -269,5 +259,6 @@ config PCI_HYPERV_INTERFACE
have a common interface with the Hyper-V PCI frontend driver.
source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/mobiveil/Kconfig"
source "drivers/pci/controller/cadence/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 01b2502a5323..158c59771824 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -25,12 +25,12 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
-obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
+obj-y += mobiveil/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 0830dfcfa43a..03dcaf65d159 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -248,14 +248,37 @@ config PCI_MESON
implement the driver.
config PCIE_TEGRA194
- tristate "NVIDIA Tegra194 (and later) PCIe controller"
+ tristate
+
+config PCIE_TEGRA194_HOST
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode"
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_TEGRA194_EP
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
help
- Say Y here if you want support for DesignWare core based PCIe host
- controller found in NVIDIA Tegra194 SoC.
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe controllers"
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 9bf7fa99b103..3b0e58f2de58 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -215,10 +215,6 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
- .host_init = dra7xx_pcie_host_init,
-};
-
static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -233,43 +229,77 @@ static const struct irq_domain_ops intx_domain_ops = {
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+ unsigned long val;
+ int pos, irq;
- if (!pcie_intc_node) {
- dev_err(dev, "No PCIe Intc node found\n");
- return -ENODEV;
- }
+ val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (index * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!val)
+ return 0;
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
- of_node_put(pcie_intc_node);
- if (!dra7xx->irq_domain) {
- dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return -ENODEV;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
+ while (pos != MAX_MSI_IRQS_PER_CTRL) {
+ irq = irq_find_mapping(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
+ generic_handle_irq(irq);
+ pos++;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
}
- return 0;
+ return 1;
}
-static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
{
- struct dra7xx_pcie *dra7xx = arg;
- struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret, i, count, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /**
+ * Need to make sure all MSI status bits read 0 before exiting.
+ * Else, new MSI IRQs are not registered by the wrapper. Have an
+ * upperbound for the loop and exit the IRQ in case of IRQ flood
+ * to avoid locking up system in interrupt context.
+ */
+ count = 0;
+ do {
+ ret = 0;
+
+ for (i = 0; i < num_ctrls; i++)
+ ret |= dra7xx_pcie_handle_msi(pp, i);
+ count++;
+ } while (ret && count <= 1000);
+
+ if (count > 1000)
+ dev_warn_ratelimited(pci->dev,
+ "Too many MSI IRQs to handle\n");
+}
+
+static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dra7xx_pcie *dra7xx;
+ struct dw_pcie *pci;
+ struct pcie_port *pp;
unsigned long reg;
u32 virq, bit;
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ pci = to_dw_pcie_from_pp(pp);
+ dra7xx = to_dra7xx_pcie(pci);
+
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
switch (reg) {
case MSI:
- dw_handle_msi_irq(pp);
+ dra7xx_pcie_handle_msi_irq(pp);
break;
case INTA:
case INTB:
@@ -283,9 +313,7 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
break;
}
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
-
- return IRQ_HANDLED;
+ chained_irq_exit(chip, desc);
}
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
@@ -347,6 +375,145 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
+ pp);
+ dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
+ if (!dra7xx->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u64 msi_target;
+
+ msi_target = (u64)pp->msi_data;
+
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+
+ msg->data = d->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)d->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int dra7xx_pcie_msi_set_affinity(struct irq_data *d,
+ const struct cpumask *mask,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static void dra7xx_pcie_bottom_mask(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
+ pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dra7xx_pcie_bottom_unmask(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
+ pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dra7xx_pcie_bottom_ack(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
+}
+
+static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {
+ .name = "DRA7XX-PCI-MSI",
+ .irq_ack = dra7xx_pcie_bottom_ack,
+ .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg,
+ .irq_set_affinity = dra7xx_pcie_msi_set_affinity,
+ .irq_mask = dra7xx_pcie_bottom_mask,
+ .irq_unmask = dra7xx_pcie_bottom_unmask,
+};
+
+static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 ctrl, num_ctrls;
+
+ pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ pp->irq_mask[ctrl] = ~0;
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+
+ return dw_pcie_allocate_domains(pp);
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+ .host_init = dra7xx_pcie_host_init,
+ .msi_host_init = dra7xx_pcie_msi_host_init,
+};
+
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -467,14 +634,6 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return pp->irq;
}
- ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "dra7-pcie-msi", dra7xx);
- if (ret) {
- dev_err(dev, "failed to request irq\n");
- return ret;
- }
-
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c8c702c494a2..790679fdfa48 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -959,6 +959,9 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
case PCI_EPC_IRQ_MSI:
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
break;
+ case PCI_EPC_IRQ_MSIX:
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ break;
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
@@ -970,7 +973,7 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features ks_pcie_am654_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
+ .msix_capable = true,
.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
.bar_fixed_64bit = 1 << BAR_0,
.bar_fixed_size[2] = SZ_1M,
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 3772b02a5c55..3715dceca1bf 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -66,7 +66,6 @@
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
-#define MESON_PCIE_PHY_POWERUP 0x1c
#define PCIE_RESET_DELAY 500
#define PCIE_SHARED_RESET 1
#define PCIE_NORMAL_RESET 0
@@ -81,26 +80,19 @@ enum pcie_data_rate {
struct meson_pcie_mem_res {
void __iomem *elbi_base;
void __iomem *cfg_base;
- void __iomem *phy_base;
};
struct meson_pcie_clk_res {
struct clk *clk;
- struct clk *mipi_gate;
struct clk *port_clk;
struct clk *general_clk;
};
struct meson_pcie_rc_reset {
- struct reset_control *phy;
struct reset_control *port;
struct reset_control *apb;
};
-struct meson_pcie_param {
- bool has_shared_phy;
-};
-
struct meson_pcie {
struct dw_pcie pci;
struct meson_pcie_mem_res mem_res;
@@ -108,7 +100,6 @@ struct meson_pcie {
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
struct phy *phy;
- const struct meson_pcie_param *param;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -130,13 +121,6 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
- if (!mp->param->has_shared_phy) {
- mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
- if (IS_ERR(mrst->phy))
- return PTR_ERR(mrst->phy);
- reset_control_deassert(mrst->phy);
- }
-
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
return PTR_ERR(mrst->port);
@@ -162,22 +146,6 @@ static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
return devm_ioremap_resource(dev, res);
}
-static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
-{
- struct device *dev = mp->pci.dev;
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
- if (!res) {
- dev_err(dev, "No REG resource %s\n", id);
- return ERR_PTR(-ENXIO);
- }
-
- return devm_ioremap(dev, res->start, resource_size(res));
-}
-
static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
@@ -189,14 +157,6 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
if (IS_ERR(mp->mem_res.cfg_base))
return PTR_ERR(mp->mem_res.cfg_base);
- /* Meson AXG SoC has two PCI controllers use same phy register */
- if (!mp->param->has_shared_phy) {
- mp->mem_res.phy_base =
- meson_pcie_get_mem_shared(pdev, mp, "phy");
- if (IS_ERR(mp->mem_res.phy_base))
- return PTR_ERR(mp->mem_res.phy_base);
- }
-
return 0;
}
@@ -204,37 +164,33 @@ static int meson_pcie_power_on(struct meson_pcie *mp)
{
int ret = 0;
- if (mp->param->has_shared_phy) {
- ret = phy_init(mp->phy);
- if (ret)
- return ret;
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
- ret = phy_power_on(mp->phy);
- if (ret) {
- phy_exit(mp->phy);
- return ret;
- }
- } else
- writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
return 0;
}
+static void meson_pcie_power_off(struct meson_pcie *mp)
+{
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+}
+
static int meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
int ret = 0;
- if (mp->param->has_shared_phy) {
- ret = phy_reset(mp->phy);
- if (ret)
- return ret;
- } else {
- reset_control_assert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- reset_control_deassert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- }
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
@@ -286,12 +242,6 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- if (!mp->param->has_shared_phy) {
- res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
- if (IS_ERR(res->mipi_gate))
- return PTR_ERR(res->mipi_gate);
- }
-
res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
@@ -562,7 +512,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int meson_pcie_probe(struct platform_device *pdev)
{
- const struct meson_pcie_param *match_data;
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct meson_pcie *mp;
@@ -576,17 +525,10 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- match_data = of_device_get_match_data(dev);
- if (!match_data) {
- dev_err(dev, "failed to get match data\n");
- return -ENODEV;
- }
- mp->param = match_data;
-
- if (mp->param->has_shared_phy) {
- mp->phy = devm_phy_get(dev, "pcie");
- if (IS_ERR(mp->phy))
- return PTR_ERR(mp->phy);
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy)) {
+ dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy));
+ return PTR_ERR(mp->phy);
}
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -636,30 +578,16 @@ static int meson_pcie_probe(struct platform_device *pdev)
return 0;
err_phy:
- if (mp->param->has_shared_phy) {
- phy_power_off(mp->phy);
- phy_exit(mp->phy);
- }
-
+ meson_pcie_power_off(mp);
return ret;
}
-static struct meson_pcie_param meson_pcie_axg_param = {
- .has_shared_phy = false,
-};
-
-static struct meson_pcie_param meson_pcie_g12a_param = {
- .has_shared_phy = true,
-};
-
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
- .data = &meson_pcie_axg_param,
},
{
.compatible = "amlogic,g12a-pcie",
- .data = &meson_pcie_g12a_param,
},
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index cfeccd7e9fff..1cdcbd102ce8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -18,6 +18,15 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
pci_epc_linkup(epc);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_init_notify(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
int flags)
@@ -125,6 +134,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
clear_bit(atu_index, ep->ib_window_map);
+ ep->epf_bar[bar] = NULL;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
@@ -158,6 +168,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
dw_pcie_writel_dbi(pci, reg + 4, 0);
}
+ ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -269,7 +280,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -278,12 +290,22 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
if (!ep->msix_cap)
return -EINVAL;
+ dw_pcie_dbi_ro_wr_en(pci);
+
reg = ep->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
- dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, reg, val);
+
+ reg = ep->msix_cap + PCI_MSIX_TABLE;
+ val = offset | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ reg = ep->msix_cap + PCI_MSIX_PBA;
+ val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -409,55 +431,41 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
- u16 tbl_offset, bir;
- u32 bar_addr_upper, bar_addr_lower;
- u32 msg_addr_upper, msg_addr_lower;
+ struct pci_epf_bar *epf_bar;
u32 reg, msg_data, vec_ctrl;
- u64 tbl_addr, msg_addr, reg_u64;
- void __iomem *msix_tbl;
+ unsigned int aligned_offset;
+ u32 tbl_offset;
+ u64 msg_addr;
int ret;
+ u8 bir;
reg = ep->msix_cap + PCI_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- reg = PCI_BASE_ADDRESS_0 + (4 * bir);
- bar_addr_upper = 0;
- bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
- reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
- if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
- bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
-
- tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
- tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
- tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
-
- msix_tbl = ioremap(ep->phys_base + tbl_addr,
- PCI_MSIX_ENTRY_SIZE);
- if (!msix_tbl)
- return -EINVAL;
-
- msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
- msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
- msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
- vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ epf_bar = ep->epf_bar[bir];
+ msix_tbl = epf_bar->addr;
+ msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset);
- iounmap(msix_tbl);
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+ vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
return -EPERM;
}
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ aligned_offset = msg_addr & (epc->mem->page_size - 1);
+ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem);
+ writel(msg_data, ep->msi_mem + aligned_offset);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
@@ -492,19 +500,54 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int offset;
+ unsigned int nbars;
+ u8 hdr_type;
+ u32 reg;
int i;
+
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ dev_err(pci->dev,
+ "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+ hdr_type);
+ return -EIO;
+ }
+
+ ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+ ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
+
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
+ dw_pcie_setup(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
int ret;
- u32 reg;
void *addr;
- u8 hdr_type;
- unsigned int nbars;
- unsigned int offset;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ const struct pci_epc_features *epc_features;
if (!pci->dbi_base || !pci->dbi_base2) {
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
@@ -563,13 +606,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ep->ops->ep_init)
ep->ops->ep_init(ep);
- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
- if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
- dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
- hdr_type);
- return -EIO;
- }
-
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
@@ -587,23 +623,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
}
- ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
-
- ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
-
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
- dw_pcie_dbi_ro_wr_en(pci);
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
- dw_pcie_dbi_ro_wr_dis(pci);
+ if (ep->ops->get_features) {
+ epc_features = ep->ops->get_features(ep);
+ if (epc_features->core_init_notifier)
+ return 0;
}
- dw_pcie_setup(pci);
-
- return 0;
+ return dw_pcie_ep_init_complete(ep);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index a22ea5982817..d6e1f397e6b0 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -233,6 +233,7 @@ struct dw_pcie_ep {
phys_addr_t msi_mem_phys;
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
struct dw_pcie_ops {
@@ -411,6 +412,8 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -428,6 +431,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
+static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+}
+
static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 5ea527a6bd9f..138e1a2d21cc 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1439,7 +1439,13 @@ static void qcom_fixup_class(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cbe95f0ea0ca..ae30a2fd3716 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -53,6 +54,7 @@
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
+#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
@@ -60,19 +62,26 @@
#define APPL_INTR_STATUS_L0 0xC
#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
+#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
+#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
#define APPL_INTR_EN_L1_0_0 0x1C
#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
+#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
#define APPL_INTR_STATUS_L1_0_0 0x20
#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
+#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
+#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
#define APPL_INTR_STATUS_L1_1 0x2C
#define APPL_INTR_STATUS_L1_2 0x30
#define APPL_INTR_STATUS_L1_3 0x34
#define APPL_INTR_STATUS_L1_6 0x3C
#define APPL_INTR_STATUS_L1_7 0x40
+#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
#define APPL_INTR_EN_L1_8_0 0x44
#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
@@ -103,8 +112,12 @@
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+#define APPL_MSI_CTRL_1 0xAC
+
#define APPL_MSI_CTRL_2 0xB0
+#define APPL_LEGACY_INTX 0xB8
+
#define APPL_LTR_MSG_1 0xC4
#define LTR_MSG_REQ BIT(15)
#define LTR_MST_NO_SNOOP_SHIFT 16
@@ -205,6 +218,13 @@
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define MSIX_ADDR_MATCH_LOW_OFF 0x940
+#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
+#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
+
+#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
+#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
+
#define PORT_LOGIC_MSIX_DOORBELL 0x948
#define CAP_SPCIE_CAP_OFF 0x154
@@ -223,6 +243,13 @@
#define GEN3_CORE_CLK_FREQ 250000000
#define GEN4_CORE_CLK_FREQ 500000000
+#define LTR_MSG_TIMEOUT (100 * 1000)
+
+#define PERST_DEBOUNCE_TIME (5 * 1000)
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_ENABLED 1
+
static const unsigned int pcie_gen_freq[] = {
GEN1_CORE_CLK_FREQ,
GEN2_CORE_CLK_FREQ,
@@ -260,6 +287,8 @@ struct tegra_pcie_dw {
struct dw_pcie pci;
struct tegra_bpmp *bpmp;
+ enum dw_pcie_device_mode mode;
+
bool supports_clkreq;
bool enable_cdm_check;
bool link_state;
@@ -283,6 +312,16 @@ struct tegra_pcie_dw {
struct phy **phys;
struct dentry *debugfs;
+
+ /* Endpoint mode specific */
+ struct gpio_desc *pex_rst_gpiod;
+ struct gpio_desc *pex_refclk_sel_gpiod;
+ unsigned int pex_rst_irq;
+ int ep_state;
+};
+
+struct tegra_pcie_dw_of_data {
+ enum dw_pcie_device_mode mode;
};
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@@ -339,8 +378,9 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
}
}
-static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
+static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
u32 val, tmp;
@@ -411,11 +451,121 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
return IRQ_HANDLED;
}
-static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+}
+
+static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed;
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
- return tegra_pcie_rp_irq_handler(pcie);
+ /* If EP doesn't advertise L1SS, just return */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ return IRQ_HANDLED;
+
+ /* Check if BME is set to '1' */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ if (val & PCI_COMMAND_MASTER) {
+ ktime_t timeout;
+
+ /* 110us for both snoop and no-snoop */
+ val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
+ val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
+ /* Send LTR upstream */
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+
+ timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
+ for (;;) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
+ break;
+ if (ktime_after(ktime_get(), timeout))
+ break;
+ usleep_range(1000, 1100);
+ }
+ if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
+ dev_err(pcie->dev, "Failed to send LTR message\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+ int spurious = 1;
+ u32 val, tmp;
+
+ val = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
+
+ if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ pex_ep_event_hot_rst_done(pcie);
+
+ if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ tmp = appl_readl(pcie, APPL_LINK_STATUS);
+ if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ dev_dbg(pcie->dev, "Link is up with Host\n");
+ dw_pcie_ep_linkup(ep);
+ }
+ }
+
+ spurious = 0;
+ }
+
+ if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
+
+ if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ return IRQ_WAKE_THREAD;
+
+ spurious = 0;
+ }
+
+ if (spurious) {
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+ val);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L0);
+ }
+
+ return IRQ_HANDLED;
}
static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
@@ -884,8 +1034,26 @@ static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
pp->num_vectors = MAX_MSI_IRQS;
}
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ enable_irq(pcie->pex_rst_irq);
+
+ return 0;
+}
+
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ disable_irq(pcie->pex_rst_irq);
+}
+
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
.link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
};
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
@@ -986,6 +1154,40 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
pcie->enable_cdm_check =
of_property_read_bool(np, "snps,enable-cdm-check");
+ if (pcie->mode == DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* Endpoint mode specific DT entries */
+ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie->pex_rst_gpiod)) {
+ int err = PTR_ERR(pcie->pex_rst_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get PERST GPIO: %d\n"),
+ err);
+ return err;
+ }
+
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
+ int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
+ err);
+ pcie->pex_refclk_sel_gpiod = NULL;
+ }
+
return 0;
}
@@ -1017,6 +1219,34 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (enable) {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
+ req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
+ } else {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
+ req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct pcie_port *pp = &pcie->pci.pp;
@@ -1427,8 +1657,396 @@ fail_pm_get_sync:
return ret;
}
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_DISABLED)
+ return;
+
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (ret)
+ dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+
+ reset_control_assert(pcie->core_rst);
+
+ tegra_pcie_disable_phy(pcie);
+
+ reset_control_assert(pcie->core_apb_rst);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ pm_runtime_put_sync(pcie->dev);
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+ dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+}
+
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = pcie->dev;
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ return;
+ }
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
+ goto fail_pll_init;
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk_enable;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
+ goto fail_core_apb_rst;
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Clear any stale interrupt statuses */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ /* configure this core for EP mode operation */
+ val = appl_readl(pcie, APPL_DM_TYPE);
+ val &= ~APPL_DM_TYPE_MASK;
+ val |= APPL_DM_TYPE_EP;
+ appl_writel(pcie, val, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_SYS_PRE_DET_STATE;
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= APPL_CFG_MISC_SLV_EP_MODE;
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ appl_writel(pcie, pcie->atu_dma_res->start &
+ APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
+ val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+ reset_control_deassert(pcie->core_rst);
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ /* Configure N_FTS & FTS */
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
+ val &= ~(N_FTS_MASK << N_FTS_SHIFT);
+ val |= N_FTS_VAL << N_FTS_SHIFT;
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
+ val &= ~FTS_MASK;
+ val |= FTS_VAL;
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+
+ /* Configure Max Speed from DT */
+ if (pcie->max_speed && pcie->max_speed != -EINVAL) {
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_SLS;
+ val |= pcie->max_speed;
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
+ val);
+ }
+
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+ val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto fail_init_complete;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ pcie->ep_state = EP_STATE_ENABLED;
+ dev_dbg(dev, "Initialization of endpoint is completed\n");
+
+ return;
+
+fail_init_complete:
+ reset_control_assert(pcie->core_rst);
+ tegra_pcie_disable_phy(pcie);
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk_enable:
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+
+ if (gpiod_get_value(pcie->pex_rst_gpiod))
+ pex_ep_event_pex_rst_assert(pcie);
+ else
+ pex_ep_event_pex_rst_deassert(pcie);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ /* Tegra194 supports only INTA */
+ if (irq > 1)
+ return -EINVAL;
+
+ appl_writel(pcie, 1, APPL_LEGACY_INTX);
+ usleep_range(1000, 2000);
+ appl_writel(pcie, 0, APPL_LEGACY_INTX);
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ if (unlikely(irq > 31))
+ return -EINVAL;
+
+ appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+
+ writel(irq, ep->msi_mem);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSI:
+ return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSIX:
+ return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
+
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features tegra_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = false,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[0] = SZ_1M,
+};
+
+static const struct pci_epc_features*
+tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &tegra_pcie_epc_features;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+};
+
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pcie->dev;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ char *name;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+ ep->page_size = SZ_64K;
+
+ ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gpiod_to_irq(pcie->pex_rst_gpiod);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
+ return ret;
+ }
+ pcie->pex_rst_irq = (unsigned int)ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PERST IRQ string\n");
+ return -ENOMEM;
+ }
+
+ irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+
+ ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
+ tegra_pcie_ep_pex_rst_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, (void *)pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PCIe EP work thread string\n");
+ return -ENOMEM;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int tegra_pcie_dw_probe(struct platform_device *pdev)
{
+ const struct tegra_pcie_dw_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
struct tegra_pcie_dw *pcie;
@@ -1440,6 +2058,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
int ret;
u32 i;
+ data = of_device_get_match_data(dev);
+
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1449,19 +2069,37 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pci->ops = &tegra_dw_pcie_ops;
pp = &pci->pp;
pcie->dev = &pdev->dev;
+ pcie->mode = (enum dw_pcie_device_mode)data->mode;
ret = tegra_pcie_dw_parse_dt(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to parse device tree: %d\n"),
+ ret);
return ret;
}
ret = tegra_pcie_get_slot_regulators(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to get slot regulators: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to get slot regulators: %d\n"),
+ ret);
return ret;
}
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
+
pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
if (IS_ERR(pcie->pex_ctl_supply)) {
ret = PTR_ERR(pcie->pex_ctl_supply);
@@ -1557,24 +2195,49 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
- IRQF_SHARED, "tegra-pcie-intr", pcie);
- if (ret) {
- dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
- return ret;
- }
-
pcie->bpmp = tegra_bpmp_get(dev);
if (IS_ERR(pcie->bpmp))
return PTR_ERR(pcie->bpmp);
platform_set_drvdata(pdev, pcie);
- ret = tegra_pcie_config_rp(pcie);
- if (ret && ret != -ENOMEDIUM)
- goto fail;
- else
- return 0;
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
+ IRQF_SHARED, "tegra-pcie-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_rp(pcie);
+ if (ret && ret != -ENOMEDIUM)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ case DW_PCIE_EP_TYPE:
+ ret = devm_request_threaded_irq(dev, pp->irq,
+ tegra_pcie_ep_hard_irq,
+ tegra_pcie_ep_irq_thread,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "tegra-pcie-ep-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
+ }
fail:
tegra_bpmp_put(pcie->bpmp);
@@ -1593,6 +2256,8 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
tegra_bpmp_put(pcie->bpmp);
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
return 0;
}
@@ -1697,9 +2362,22 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
__deinit_controller(pcie);
}
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id tegra_pcie_dw_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
+ .data = &tegra_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra194-pcie-ep",
+ .data = &tegra_pcie_dw_ep_of_data,
},
{},
};
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
new file mode 100644
index 000000000000..a62d247018cf
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Mobiveil PCIe Core Support"
+ depends on PCI
+
+config PCIE_MOBIVEIL
+ bool
+
+config PCIE_MOBIVEIL_HOST
+ bool
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL
+
+config PCIE_MOBIVEIL_PLAT
+ bool "Mobiveil AXI PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want to enable support for the Mobiveil AXI PCIe
+ Soft IP. It has up to 8 outbound and inbound windows
+ for address translation and it is a PCIe Gen4 IP.
+
+config PCIE_LAYERSCAPE_GEN4
+ bool "Freescale Layerscape PCIe Gen4 controller"
+ depends on PCI
+ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want PCIe Gen4 controller support on
+ Layerscape SoCs.
+endmenu
diff --git a/drivers/pci/controller/mobiveil/Makefile b/drivers/pci/controller/mobiveil/Makefile
new file mode 100644
index 000000000000..99d879de32d6
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
+obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
+obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
+obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie-layerscape-gen4.o
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
new file mode 100644
index 000000000000..a6d2190a6753
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Gen4 host controller driver for NXP Layerscape SoCs
+ *
+ * Copyright 2019-2020 NXP
+ *
+ * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-mobiveil.h"
+
+/* LUT and PF control registers */
+#define PCIE_LUT_OFF 0x80000
+#define PCIE_PF_OFF 0xc0000
+#define PCIE_PF_INT_STAT 0x18
+#define PF_INT_STAT_PABRST BIT(31)
+
+#define PCIE_PF_DBG 0x7fc
+#define PF_DBG_LTSSM_MASK 0x3f
+#define PF_DBG_LTSSM_L0 0x2d /* L0 state */
+#define PF_DBG_WE BIT(31)
+#define PF_DBG_PABR BIT(27)
+
+#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
+
+struct ls_pcie_g4 {
+ struct mobiveil_pcie pci;
+ struct delayed_work dwork;
+ int irq;
+};
+
+static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
+{
+ return ioread32(pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
+}
+
+static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
+ u32 off, u32 val)
+{
+ iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
+}
+
+static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
+{
+ return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
+ u32 off, u32 val)
+{
+ iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
+{
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
+ u32 state;
+
+ state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ state = state & PF_DBG_LTSSM_MASK;
+
+ if (state == PF_DBG_LTSSM_L0)
+ return 1;
+
+ return 0;
+}
+
+static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+
+ mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ /* Clear the interrupt status */
+ mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT);
+
+ val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
+ PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
+ mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ struct device *dev = &mv_pci->pdev->dev;
+ u32 val, act_stat;
+ int to = 100;
+
+ /* Poll for pab_csb_reset to set and PAB activity to clear */
+ do {
+ usleep_range(10, 15);
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
+ act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
+ } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
+ if (to < 0) {
+ dev_err(dev, "Poll PABRST&PABACT timeout\n");
+ return -EIO;
+ }
+
+ /* clear PEX_RESET bit in PEX_PF0_DBG register */
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_WE;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_PABR;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val &= ~PF_DBG_WE;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ mobiveil_host_init(mv_pci, true);
+
+ to = 100;
+ while (!ls_pcie_g4_link_up(mv_pci) && to--)
+ usleep_range(200, 250);
+ if (to < 0) {
+ dev_err(dev, "PCIe link training timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
+{
+ struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PAB_INTP_RESET) {
+ ls_pcie_g4_disable_interrupt(pcie);
+ schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
+ }
+
+ mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
+{
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
+ struct platform_device *pdev = mv_pci->pdev;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "intr");
+ if (pcie->irq < 0) {
+ dev_err(dev, "Can't get 'intr' IRQ, errno = %d\n", pcie->irq);
+ return pcie->irq;
+ }
+ ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ls_pcie_g4_reset(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u16 ctrl;
+
+ ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
+
+ if (!ls_pcie_g4_reinit_hw(pcie))
+ return;
+
+ ls_pcie_g4_enable_interrupt(pcie);
+}
+
+static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
+ .interrupt_init = ls_pcie_g4_interrupt_init,
+};
+
+static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
+ .link_up = ls_pcie_g4_link_up,
+};
+
+static int __init ls_pcie_g4_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct mobiveil_pcie *mv_pci;
+ struct ls_pcie_g4 *pcie;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!of_parse_phandle(np, "msi-parent", 0)) {
+ dev_err(dev, "Failed to find msi-parent\n");
+ return -EINVAL;
+ }
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ mv_pci = &pcie->pci;
+
+ mv_pci->pdev = pdev;
+ mv_pci->ops = &ls_pcie_g4_pab_ops;
+ mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
+ mv_pci->rp.bridge = bridge;
+
+ platform_set_drvdata(pdev, pcie);
+
+ INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
+
+ ret = mobiveil_pcie_host_probe(mv_pci);
+ if (ret) {
+ dev_err(dev, "Fail to probe\n");
+ return ret;
+ }
+
+ ls_pcie_g4_enable_interrupt(pcie);
+
+ return 0;
+}
+
+static const struct of_device_id ls_pcie_g4_of_match[] = {
+ { .compatible = "fsl,lx2160a-pcie", },
+ { },
+};
+
+static struct platform_driver ls_pcie_g4_driver = {
+ .driver = {
+ .name = "layerscape-pcie-gen4",
+ .of_match_table = ls_pcie_g4_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 3a696ca45bfa..a94be264240f 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -3,10 +3,12 @@
* PCIe host controller driver for Mobiveil PCIe Host controller
*
* Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019-2020 NXP
+ *
* Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
*/
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -23,274 +25,22 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "../pci.h"
-
-/* register offsets and bit positions */
-
-/*
- * translation tables are grouped into windows, each window registers are
- * grouped into blocks of 4 or 16 registers each
- */
-#define PAB_REG_BLOCK_SIZE 16
-#define PAB_EXT_REG_BLOCK_SIZE 4
-
-#define PAB_REG_ADDR(offset, win) \
- (offset + (win * PAB_REG_BLOCK_SIZE))
-#define PAB_EXT_REG_ADDR(offset, win) \
- (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
-
-#define LTSSM_STATUS 0x0404
-#define LTSSM_STATUS_L0_MASK 0x3f
-#define LTSSM_STATUS_L0 0x2d
-
-#define PAB_CTRL 0x0808
-#define AMBA_PIO_ENABLE_SHIFT 0
-#define PEX_PIO_ENABLE_SHIFT 1
-#define PAGE_SEL_SHIFT 13
-#define PAGE_SEL_MASK 0x3f
-#define PAGE_LO_MASK 0x3ff
-#define PAGE_SEL_OFFSET_SHIFT 10
-
-#define PAB_AXI_PIO_CTRL 0x0840
-#define APIO_EN_MASK 0xf
-
-#define PAB_PEX_PIO_CTRL 0x08c0
-#define PIO_ENABLE_SHIFT 0
-
-#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
-#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
-#define PAB_INTP_INTX_MASK 0x01e0
-#define PAB_INTP_MSI_MASK 0x8
-
-#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
-#define WIN_ENABLE_SHIFT 0
-#define WIN_TYPE_SHIFT 1
-#define WIN_TYPE_MASK 0x3
-#define WIN_SIZE_MASK 0xfffffc00
-
-#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
-
-#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
-#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
-#define AXI_WINDOW_ALIGN_MASK 3
-
-#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
-#define PAB_BUS_SHIFT 24
-#define PAB_DEVICE_SHIFT 19
-#define PAB_FUNCTION_SHIFT 16
-
-#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
-#define PAB_INTP_AXI_PIO_CLASS 0x474
-
-#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
-#define AMAP_CTRL_EN_SHIFT 0
-#define AMAP_CTRL_TYPE_SHIFT 1
-#define AMAP_CTRL_TYPE_MASK 3
-
-#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
-#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
-#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
-#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
-#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
-
-/* starting offset of INTX bits in status register */
-#define PAB_INTX_START 5
-
-/* supported number of MSI interrupts */
-#define PCI_NUM_MSI 16
-
-/* MSI registers */
-#define MSI_BASE_LO_OFFSET 0x04
-#define MSI_BASE_HI_OFFSET 0x08
-#define MSI_SIZE_OFFSET 0x0c
-#define MSI_ENABLE_OFFSET 0x14
-#define MSI_STATUS_OFFSET 0x18
-#define MSI_DATA_OFFSET 0x20
-#define MSI_ADDR_L_OFFSET 0x24
-#define MSI_ADDR_H_OFFSET 0x28
-
-/* outbound and inbound window definitions */
-#define WIN_NUM_0 0
-#define WIN_NUM_1 1
-#define CFG_WINDOW_TYPE 0
-#define IO_WINDOW_TYPE 1
-#define MEM_WINDOW_TYPE 2
-#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
-#define MAX_PIO_WINDOWS 8
-
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_MIN 90000
-#define LINK_WAIT_MAX 100000
-
-#define PAGED_ADDR_BNDRY 0xc00
-#define OFFSET_TO_PAGE_ADDR(off) \
- ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
-#define OFFSET_TO_PAGE_IDX(off) \
- ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
-
-struct mobiveil_msi { /* MSI information */
- struct mutex lock; /* protect bitmap variable */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- phys_addr_t msi_pages_phys;
- int num_of_vectors;
- DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
-};
-
-struct mobiveil_pcie {
- struct platform_device *pdev;
- void __iomem *config_axi_slave_base; /* endpoint config base */
- void __iomem *csr_axi_slave_base; /* root port config base */
- void __iomem *apb_csr_base; /* MSI register base */
- phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
- struct irq_domain *intx_domain;
- raw_spinlock_t intx_mask_lock;
- int irq;
- int apio_wins;
- int ppio_wins;
- int ob_wins_configured; /* configured outbound windows */
- int ib_wins_configured; /* configured inbound windows */
- struct resource *ob_io_res;
- char root_bus_nr;
- struct mobiveil_msi msi;
-};
-
-/*
- * mobiveil_pcie_sel_page - routine to access paged register
- *
- * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
- * for this scheme to work extracted higher 6 bits of the offset will be
- * written to pg_sel field of PAB_CTRL register and rest of the lower 10
- * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
- */
-static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
-{
- u32 val;
-
- val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
- val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
- val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
-
- writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
-}
-
-static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
-{
- if (off < PAGED_ADDR_BNDRY) {
- /* For directly accessed registers, clear the pg_sel field */
- mobiveil_pcie_sel_page(pcie, 0);
- return pcie->csr_axi_slave_base + off;
- }
-
- mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
- return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
-}
-
-static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
-{
- if ((uintptr_t)addr & (size - 1)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- switch (size) {
- case 4:
- *val = readl(addr);
- break;
- case 2:
- *val = readw(addr);
- break;
- case 1:
- *val = readb(addr);
- break;
- default:
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
-{
- if ((uintptr_t)addr & (size - 1))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- switch (size) {
- case 4:
- writel(val, addr);
- break;
- case 2:
- writew(val, addr);
- break;
- case 1:
- writeb(val, addr);
- break;
- default:
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
-{
- void *addr;
- u32 val;
- int ret;
-
- addr = mobiveil_pcie_comp_addr(pcie, off);
-
- ret = mobiveil_pcie_read(addr, size, &val);
- if (ret)
- dev_err(&pcie->pdev->dev, "read CSR address failed\n");
-
- return val;
-}
-
-static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
- size_t size)
-{
- void *addr;
- int ret;
-
- addr = mobiveil_pcie_comp_addr(pcie, off);
-
- ret = mobiveil_pcie_write(addr, size, val);
- if (ret)
- dev_err(&pcie->pdev->dev, "write CSR address failed\n");
-}
-
-static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
-{
- return mobiveil_csr_read(pcie, off, 0x4);
-}
-
-static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
-{
- mobiveil_csr_write(pcie, val, off, 0x4);
-}
-
-static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
-{
- return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
- LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
-}
+#include "pcie-mobiveil.h"
static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
{
struct mobiveil_pcie *pcie = bus->sysdata;
+ struct mobiveil_root_port *rp = &pcie->rp;
/* Only one device down on each root port */
- if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+ if ((bus->number == rp->root_bus_nr) && (devfn > 0))
return false;
/*
* Do not read more than one device on the bus directly
* attached to RC
*/
- if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
+ if ((bus->primary == rp->root_bus_nr) && (PCI_SLOT(devfn) > 0))
return false;
return true;
@@ -304,13 +54,14 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct mobiveil_pcie *pcie = bus->sysdata;
+ struct mobiveil_root_port *rp = &pcie->rp;
u32 value;
if (!mobiveil_pcie_valid_device(bus, devfn))
return NULL;
/* RC config access */
- if (bus->number == pcie->root_bus_nr)
+ if (bus->number == rp->root_bus_nr)
return pcie->csr_axi_slave_base + where;
/*
@@ -325,7 +76,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
- return pcie->config_axi_slave_base + where;
+ return rp->config_axi_slave_base + where;
}
static struct pci_ops mobiveil_pcie_ops = {
@@ -339,7 +90,8 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
struct device *dev = &pcie->pdev->dev;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct mobiveil_msi *msi = &rp->msi;
u32 msi_data, msi_addr_lo, msi_addr_hi;
u32 intr_status, msi_status;
unsigned long shifted_status;
@@ -365,7 +117,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
- virq = irq_find_mapping(pcie->intx_domain,
+ virq = irq_find_mapping(rp->intx_domain,
bit + 1);
if (virq)
generic_handle_irq(virq);
@@ -424,15 +176,16 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
struct device_node *node = dev->of_node;
+ struct mobiveil_root_port *rp = &pcie->rp;
struct resource *res;
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"config_axi_slave");
- pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie->config_axi_slave_base))
- return PTR_ERR(pcie->config_axi_slave_base);
- pcie->ob_io_res = res;
+ rp->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(rp->config_axi_slave_base))
+ return PTR_ERR(rp->config_axi_slave_base);
+ rp->ob_io_res = res;
/* map csr resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -442,12 +195,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
return PTR_ERR(pcie->csr_axi_slave_base);
pcie->pcie_reg_base = res->start;
- /* map MSI config resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
- pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie->apb_csr_base))
- return PTR_ERR(pcie->apb_csr_base);
-
/* read the number of windows requested */
if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
pcie->apio_wins = MAX_PIO_WINDOWS;
@@ -455,118 +202,15 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
pcie->ppio_wins = MAX_PIO_WINDOWS;
- pcie->irq = platform_get_irq(pdev, 0);
- if (pcie->irq <= 0) {
- dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
- return -ENODEV;
- }
-
return 0;
}
-static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
-{
- u32 value;
- u64 size64 = ~(size - 1);
-
- if (win_num >= pcie->ppio_wins) {
- dev_err(&pcie->pdev->dev,
- "ERROR: max inbound windows reached !\n");
- return;
- }
-
- value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
- value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
- value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
- (lower_32_bits(size64) & WIN_SIZE_MASK);
- mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
-
- mobiveil_csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
-
- mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
- PAB_PEX_AMAP_AXI_WIN(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
-
- mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_L(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_H(win_num));
-
- pcie->ib_wins_configured++;
-}
-
-/*
- * routine to program the outbound windows
- */
-static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
-{
- u32 value;
- u64 size64 = ~(size - 1);
-
- if (win_num >= pcie->apio_wins) {
- dev_err(&pcie->pdev->dev,
- "ERROR: max outbound windows reached !\n");
- return;
- }
-
- /*
- * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
- * to 4 KB in PAB_AXI_AMAP_CTRL register
- */
- value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
- value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
- value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
- (lower_32_bits(size64) & WIN_SIZE_MASK);
- mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
-
- mobiveil_csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_AXI_AMAP_SIZE(win_num));
-
- /*
- * program AXI window base with appropriate value in
- * PAB_AXI_AMAP_AXI_WIN0 register
- */
- mobiveil_csr_writel(pcie,
- lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
-
- mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
-
- pcie->ob_wins_configured++;
-}
-
-static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
-{
- int retries;
-
- /* check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (mobiveil_pcie_link_up(pcie))
- return 0;
-
- usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
- }
-
- dev_err(&pcie->pdev->dev, "link never came up\n");
-
- return -ETIMEDOUT;
-}
-
static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
{
phys_addr_t msg_addr = pcie->pcie_reg_base;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
- pcie->msi.num_of_vectors = PCI_NUM_MSI;
+ msi->num_of_vectors = PCI_NUM_MSI;
msi->msi_pages_phys = (phys_addr_t)msg_addr;
writel_relaxed(lower_32_bits(msg_addr),
@@ -577,17 +221,23 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
}
-static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct pci_host_bridge *bridge = rp->bridge;
u32 value, pab_ctrl, type;
struct resource_entry *win;
- /* setup bus numbers */
- value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
- value &= 0xff000000;
- value |= 0x00ff0100;
- mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ pcie->ib_wins_configured = 0;
+ pcie->ob_wins_configured = 0;
+
+ if (!reinit) {
+ /* setup bus numbers */
+ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
+ value &= 0xff000000;
+ value |= 0x00ff0100;
+ mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ }
/*
* program Bus Master Enable Bit in Command Register in PAB Config
@@ -605,9 +255,6 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
- mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
-
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
@@ -629,8 +276,8 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
*/
/* config outbound translation window */
- program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
- CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
+ program_ob_windows(pcie, WIN_NUM_0, rp->ob_io_res->start, 0,
+ CFG_WINDOW_TYPE, resource_size(rp->ob_io_res));
/* memory inbound translation window */
program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
@@ -657,9 +304,6 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
value |= (PCI_CLASS_BRIDGE_PCI << 16);
mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
- /* setup MSI hardware registers */
- mobiveil_pcie_enable_msi(pcie);
-
return 0;
}
@@ -667,32 +311,36 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
{
struct irq_desc *desc = irq_to_desc(data->irq);
struct mobiveil_pcie *pcie;
+ struct mobiveil_root_port *rp;
unsigned long flags;
u32 mask, shifted_val;
pcie = irq_desc_get_chip_data(desc);
+ rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
- raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
- raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
}
static void mobiveil_unmask_intx_irq(struct irq_data *data)
{
struct irq_desc *desc = irq_to_desc(data->irq);
struct mobiveil_pcie *pcie;
+ struct mobiveil_root_port *rp;
unsigned long flags;
u32 shifted_val, mask;
pcie = irq_desc_get_chip_data(desc);
+ rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
- raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
- raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
}
static struct irq_chip intx_irq_chip = {
@@ -760,7 +408,7 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
unsigned int nr_irqs, void *args)
{
struct mobiveil_pcie *pcie = domain->host_data;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
unsigned long bit;
WARN_ON(nr_irqs != 1);
@@ -787,7 +435,7 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_lock(&msi->lock);
@@ -808,9 +456,9 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
- mutex_init(&pcie->msi.lock);
+ mutex_init(&msi->lock);
msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
&msi_domain_ops, pcie);
if (!msi->dev_domain) {
@@ -834,18 +482,19 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct device_node *node = dev->of_node;
+ struct mobiveil_root_port *rp = &pcie->rp;
int ret;
/* setup INTx */
- pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
- if (!pcie->intx_domain) {
+ if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
}
- raw_spin_lock_init(&pcie->intx_mask_lock);
+ raw_spin_lock_init(&rp->intx_mask_lock);
/* setup MSI */
ret = mobiveil_allocate_msi_domains(pcie);
@@ -855,23 +504,74 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
return 0;
}
-static int mobiveil_pcie_probe(struct platform_device *pdev)
+static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
{
- struct mobiveil_pcie *pcie;
- struct pci_bus *bus;
- struct pci_bus *child;
- struct pci_host_bridge *bridge;
+ struct platform_device *pdev = pcie->pdev;
struct device *dev = &pdev->dev;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct resource *res;
int ret;
- /* allocate the PCIe port */
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
- if (!bridge)
- return -ENOMEM;
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
- pcie = pci_host_bridge_priv(bridge);
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
- pcie->pdev = pdev;
+ rp->irq = platform_get_irq(pdev, 0);
+ if (rp->irq <= 0) {
+ dev_err(dev, "failed to map IRQ: %d\n", rp->irq);
+ return -ENODEV;
+ }
+
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return ret;
+ }
+
+ irq_set_chained_handler_and_data(rp->irq, mobiveil_pcie_isr, pcie);
+
+ /* Enable interrupts */
+ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+
+ return 0;
+}
+
+static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
+{
+ struct mobiveil_root_port *rp = &pcie->rp;
+
+ if (rp->ops->interrupt_init)
+ return rp->ops->interrupt_init(pcie);
+
+ return mobiveil_pcie_integrated_interrupt_init(pcie);
+}
+
+static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie)
+{
+ u32 header_type;
+
+ header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
+{
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct pci_host_bridge *bridge = rp->bridge;
+ struct device *dev = &pcie->pdev->dev;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ int ret;
ret = mobiveil_pcie_parse_dt(pcie);
if (ret) {
@@ -879,6 +579,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return ret;
}
+ if (!mobiveil_pcie_is_bridge(pcie))
+ return -ENODEV;
+
/* parse the host bridge base addresses from the device tree file */
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
@@ -891,25 +594,22 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
* configure all inbound and outbound windows and prepare the RC for
* config access
*/
- ret = mobiveil_host_init(pcie);
+ ret = mobiveil_host_init(pcie, false);
if (ret) {
dev_err(dev, "Failed to initialize host\n");
return ret;
}
- /* initialize the IRQ domains */
- ret = mobiveil_pcie_init_irq_domain(pcie);
+ ret = mobiveil_pcie_interrupt_init(pcie);
if (ret) {
- dev_err(dev, "Failed creating IRQ Domain\n");
+ dev_err(dev, "Interrupt init failed\n");
return ret;
}
- irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
-
/* Initialize bridge */
bridge->dev.parent = dev;
bridge->sysdata = pcie;
- bridge->busnr = pcie->root_bus_nr;
+ bridge->busnr = rp->root_bus_nr;
bridge->ops = &mobiveil_pcie_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
@@ -934,25 +634,3 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return 0;
}
-
-static const struct of_device_id mobiveil_pcie_of_match[] = {
- {.compatible = "mbvl,gpex40-pcie",},
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
-
-static struct platform_driver mobiveil_pcie_driver = {
- .probe = mobiveil_pcie_probe,
- .driver = {
- .name = "mobiveil-pcie",
- .of_match_table = mobiveil_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-builtin_platform_driver(mobiveil_pcie_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
-MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
new file mode 100644
index 000000000000..f6fcd95c2bf5
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pcie-mobiveil.h"
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+
+ /* allocate the PCIe port */
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->rp.bridge = bridge;
+
+ pcie->pdev = pdev;
+
+ return mobiveil_pcie_host_probe(pcie);
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
new file mode 100644
index 000000000000..62ecbaeb0a60
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-mobiveil.h"
+
+/*
+ * mobiveil_pcie_sel_page - routine to access paged register
+ *
+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
+ * for this scheme to work extracted higher 6 bits of the offset will be
+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
+ */
+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
+ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
+
+ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
+}
+
+static void __iomem *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie,
+ u32 off)
+{
+ if (off < PAGED_ADDR_BNDRY) {
+ /* For directly accessed registers, clear the pg_sel field */
+ mobiveil_pcie_sel_page(pcie, 0);
+ return pcie->csr_axi_slave_base + off;
+ }
+
+ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
+ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
+}
+
+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ switch (size) {
+ case 4:
+ *val = readl(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ case 1:
+ *val = readb(addr);
+ break;
+ default:
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
+{
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ switch (size) {
+ case 4:
+ writel(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ case 1:
+ writeb(val, addr);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+{
+ void __iomem *addr;
+ u32 val;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_read(addr, size, &val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
+
+ return val;
+}
+
+void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size)
+{
+ void __iomem *addr;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_write(addr, size, val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
+}
+
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ if (pcie->ops->link_up)
+ return pcie->ops->link_up(pcie);
+
+ return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ if (win_num >= pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ib_wins_configured++;
+}
+
+/*
+ * routine to program the outbound windows
+ */
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ if (win_num >= pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ mobiveil_csr_writel(pcie,
+ lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ob_wins_configured++;
+}
+
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+
+ return -ETIMEDOUT;
+}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
new file mode 100644
index 000000000000..767e36a8522d
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#ifndef _PCIE_MOBIVEIL_H
+#define _PCIE_MOBIVEIL_H
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include "../../pci.h"
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) \
+ (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) \
+ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_OFFSET_SHIFT 10
+
+#define PAB_ACTIVITY_STAT 0x81c
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_RESET BIT(1)
+#define PAB_INTP_MSI BIT(3)
+#define PAB_INTP_INTA BIT(5)
+#define PAB_INTP_INTB BIT(6)
+#define PAB_INTP_INTC BIT(7)
+#define PAB_INTP_INTD BIT(8)
+#define PAB_INTP_PCIE_UE BIT(9)
+#define PAB_INTP_IE_PMREDI BIT(29)
+#define PAB_INTP_IE_EC BIT(30)
+#define PAB_INTP_MSI_MASK PAB_INTP_MSI
+#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\
+ PAB_INTP_INTC | PAB_INTP_INTD)
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+#define WIN_TYPE_MASK 0x3
+#define WIN_SIZE_MASK 0xfffffc00
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+#define AMAP_CTRL_TYPE_MASK 3
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+#define PAGED_ADDR_BNDRY 0xc00
+#define OFFSET_TO_PAGE_ADDR(off) \
+ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
+#define OFFSET_TO_PAGE_IDX(off) \
+ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie;
+
+struct mobiveil_rp_ops {
+ int (*interrupt_init)(struct mobiveil_pcie *pcie);
+};
+
+struct mobiveil_root_port {
+ char root_bus_nr;
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ struct resource *ob_io_res;
+ struct mobiveil_rp_ops *ops;
+ int irq;
+ raw_spinlock_t intx_mask_lock;
+ struct irq_domain *intx_domain;
+ struct mobiveil_msi msi;
+ struct pci_host_bridge *bridge;
+};
+
+struct mobiveil_pab_ops {
+ int (*link_up)(struct mobiveil_pcie *pcie);
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ void __iomem *csr_axi_slave_base; /* root port config base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
+ int apio_wins;
+ int ppio_wins;
+ int ob_wins_configured; /* configured outbound windows */
+ int ib_wins_configured; /* configured inbound windows */
+ const struct mobiveil_pab_ops *ops;
+ struct mobiveil_root_port rp;
+};
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit);
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
+void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size);
+
+static inline u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x4);
+}
+
+static inline u16 mobiveil_csr_readw(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x2);
+}
+
+static inline u8 mobiveil_csr_readb(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x1);
+}
+
+
+static inline void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x4);
+}
+
+static inline void mobiveil_csr_writew(struct mobiveil_pcie *pcie, u16 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x2);
+}
+
+static inline void mobiveil_csr_writeb(struct mobiveil_pcie *pcie, u8 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x1);
+}
+
+#endif /* _PCIE_MOBIVEIL_H */
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 9977abff92fc..e15022ff63e3 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -63,6 +63,7 @@
enum pci_protocol_version_t {
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
+ PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
};
#define CPU_AFFINITY_ALL -1ULL
@@ -72,6 +73,7 @@ enum pci_protocol_version_t {
* first.
*/
static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_3,
PCI_PROTOCOL_VERSION_1_2,
PCI_PROTOCOL_VERSION_1_1,
};
@@ -119,6 +121,7 @@ enum pci_message_type {
PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
+ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
PCI_MESSAGE_MAXIMUM
};
@@ -164,6 +167,26 @@ struct pci_function_description {
u32 ser; /* serial number */
} __packed;
+enum pci_device_description_flags {
+ HV_PCI_DEVICE_FLAG_NONE = 0x0,
+ HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
+};
+
+struct pci_function_description2 {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+ u16 reserved;
+} __packed;
+
/**
* struct hv_msi_desc
* @vector: IDT entry
@@ -260,7 +283,7 @@ struct pci_packet {
int resp_packet_size);
void *compl_ctxt;
- struct pci_message message[0];
+ struct pci_message message[];
};
/*
@@ -296,7 +319,13 @@ struct pci_bus_d0_entry {
struct pci_bus_relations {
struct pci_incoming_message incoming;
u32 device_count;
- struct pci_function_description func[0];
+ struct pci_function_description func[];
+} __packed;
+
+struct pci_bus_relations2 {
+ struct pci_incoming_message incoming;
+ u32 device_count;
+ struct pci_function_description2 func[];
} __packed;
struct pci_q_res_req_response {
@@ -407,42 +436,6 @@ struct pci_eject_response {
static int pci_ring_size = (4 * PAGE_SIZE);
/*
- * Definitions or interrupt steering hypercall.
- */
-#define HV_PARTITION_ID_SELF ((u64)-1)
-#define HVCALL_RETARGET_INTERRUPT 0x7e
-
-struct hv_interrupt_entry {
- u32 source; /* 1 for MSI(-X) */
- u32 reserved1;
- u32 address;
- u32 data;
-};
-
-/*
- * flags for hv_device_interrupt_target.flags
- */
-#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
-#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
-
-struct hv_device_interrupt_target {
- u32 vector;
- u32 flags;
- union {
- u64 vp_mask;
- struct hv_vpset vp_set;
- };
-};
-
-struct retarget_msi_interrupt {
- u64 partition_id; /* use "self" */
- u64 device_id;
- struct hv_interrupt_entry int_entry;
- u64 reserved2;
- struct hv_device_interrupt_target int_target;
-} __packed __aligned(8);
-
-/*
* Driver specific state.
*/
@@ -488,7 +481,7 @@ struct hv_pcibus_device {
struct workqueue_struct *wq;
/* hypercall arg, must not cross page boundary */
- struct retarget_msi_interrupt retarget_msi_interrupt_params;
+ struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
/*
* Don't put anything here: retarget_msi_interrupt_params must be last
@@ -505,10 +498,24 @@ struct hv_dr_work {
struct hv_pcibus_device *bus;
};
+struct hv_pcidev_description {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+};
+
struct hv_dr_state {
struct list_head list_entry;
u32 device_count;
- struct pci_function_description func[0];
+ struct hv_pcidev_description func[];
};
enum hv_pcichild_state {
@@ -525,7 +532,7 @@ struct hv_pci_dev {
refcount_t refs;
enum hv_pcichild_state state;
struct pci_slot *pci_slot;
- struct pci_function_description desc;
+ struct hv_pcidev_description desc;
bool reported_missing;
struct hv_pcibus_device *hbus;
struct work_struct wrk;
@@ -1184,7 +1191,7 @@ static void hv_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data);
- struct retarget_msi_interrupt *params;
+ struct hv_retarget_device_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
cpumask_var_t tmp;
@@ -1206,8 +1213,7 @@ static void hv_irq_unmask(struct irq_data *data)
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
params->int_entry.source = 1; /* MSI(-X) */
- params->int_entry.address = msi_desc->msg.address_lo;
- params->int_entry.data = msi_desc->msg.data;
+ hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
@@ -1401,6 +1407,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
break;
case PCI_PROTOCOL_VERSION_1_2:
+ case PCI_PROTOCOL_VERSION_1_3:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
@@ -1799,6 +1806,27 @@ static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
}
}
+/*
+ * Set NUMA node for the devices on the bus
+ */
+static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus = hbus->pci_bus;
+ struct hv_pci_dev *hv_dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
+ if (!hv_dev)
+ continue;
+
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
+ set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+
+ put_pcichild(hv_dev);
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1821,6 +1849,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
+ hv_pci_assign_numa_node(hbus);
pci_bus_assign_resources(hbus->pci_bus);
hv_pci_assign_slots(hbus);
pci_bus_add_devices(hbus->pci_bus);
@@ -1877,7 +1906,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
* Return: Pointer to the new tracking struct
*/
static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
- struct pci_function_description *desc)
+ struct hv_pcidev_description *desc)
{
struct hv_pci_dev *hpdev;
struct pci_child_message *res_req;
@@ -1988,7 +2017,7 @@ static void pci_devices_present_work(struct work_struct *work)
{
u32 child_no;
bool found;
- struct pci_function_description *new_desc;
+ struct hv_pcidev_description *new_desc;
struct hv_pci_dev *hpdev;
struct hv_pcibus_device *hbus;
struct list_head removed;
@@ -2089,6 +2118,7 @@ static void pci_devices_present_work(struct work_struct *work)
*/
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
+ hv_pci_assign_numa_node(hbus);
hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
break;
@@ -2107,17 +2137,15 @@ static void pci_devices_present_work(struct work_struct *work)
}
/**
- * hv_pci_devices_present() - Handles list of new children
+ * hv_pci_start_relations_work() - Queue work to start device discovery
* @hbus: Root PCI bus, as understood by this driver
- * @relations: Packet from host listing children
+ * @dr: The list of children returned from host
*
- * This function is invoked whenever a new list of devices for
- * this bus appears.
+ * Return: 0 on success, -errno on failure
*/
-static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
- struct pci_bus_relations *relations)
+static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
+ struct hv_dr_state *dr)
{
- struct hv_dr_state *dr;
struct hv_dr_work *dr_wrk;
unsigned long flags;
bool pending_dr;
@@ -2125,29 +2153,15 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
if (hbus->state == hv_pcibus_removing) {
dev_info(&hbus->hdev->device,
"PCI VMBus BUS_RELATIONS: ignored\n");
- return;
+ return -ENOENT;
}
dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
if (!dr_wrk)
- return;
-
- dr = kzalloc(offsetof(struct hv_dr_state, func) +
- (sizeof(struct pci_function_description) *
- (relations->device_count)), GFP_NOWAIT);
- if (!dr) {
- kfree(dr_wrk);
- return;
- }
+ return -ENOMEM;
INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
dr_wrk->bus = hbus;
- dr->device_count = relations->device_count;
- if (dr->device_count != 0) {
- memcpy(dr->func, relations->func,
- sizeof(struct pci_function_description) *
- dr->device_count);
- }
spin_lock_irqsave(&hbus->device_list_lock, flags);
/*
@@ -2165,6 +2179,87 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
get_hvpcibus(hbus);
queue_work(hbus->wq, &dr_wrk->wrk);
}
+
+ return 0;
+}
+
+/**
+ * hv_pci_devices_present() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * Process a new list of devices on the bus. The list of devices is
+ * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
+ * whenever a new list of devices for this bus appears.
+ */
+static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(offsetof(struct hv_dr_state, func) +
+ (sizeof(struct hv_pcidev_description) *
+ (relations->device_count)), GFP_NOWAIT);
+
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
+ }
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
+}
+
+/**
+ * hv_pci_devices_present2() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * This function is the v2 version of hv_pci_devices_present()
+ */
+static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations2 *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(offsetof(struct hv_dr_state, func) +
+ (sizeof(struct hv_pcidev_description) *
+ (relations->device_count)), GFP_NOWAIT);
+
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
+ dr->func[i].flags = relations->func[i].flags;
+ dr->func[i].virtual_numa_node =
+ relations->func[i].virtual_numa_node;
+ }
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
}
/**
@@ -2280,6 +2375,7 @@ static void hv_pci_onchannelcallback(void *context)
struct pci_response *response;
struct pci_incoming_message *new_message;
struct pci_bus_relations *bus_rel;
+ struct pci_bus_relations2 *bus_rel2;
struct pci_dev_inval_block *inval;
struct pci_dev_incoming *dev_message;
struct hv_pci_dev *hpdev;
@@ -2347,6 +2443,21 @@ static void hv_pci_onchannelcallback(void *context)
hv_pci_devices_present(hbus, bus_rel);
break;
+ case PCI_BUS_RELATIONS2:
+
+ bus_rel2 = (struct pci_bus_relations2 *)buffer;
+ if (bytes_recvd <
+ offsetof(struct pci_bus_relations2, func) +
+ (sizeof(struct pci_function_description2) *
+ (bus_rel2->device_count))) {
+ dev_err(&hbus->hdev->device,
+ "bus relations v2 too small\n");
+ break;
+ }
+
+ hv_pci_devices_present2(hbus, bus_rel2);
+ break;
+
case PCI_EJECT:
dev_message = (struct pci_dev_incoming *)buffer;
@@ -2922,7 +3033,7 @@ static int hv_pci_probe(struct hv_device *hdev,
* positive by using kmemleak_alloc() and kmemleak_free() to ask
* kmemleak to track and scan the hbus buffer.
*/
- hbus = (struct hv_pcibus_device *)kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+ hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
@@ -3058,7 +3169,7 @@ destroy_wq:
free_dom:
hv_put_dom_num(hbus->sysdata.domain);
free_bus:
- free_page((unsigned long)hbus);
+ kfree(hbus);
return ret;
}
@@ -3069,7 +3180,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
- struct pci_bus_relations relations;
+ struct hv_dr_state *dr;
struct hv_pci_compl comp_pkt;
int ret;
@@ -3082,8 +3193,9 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
if (!hibernating) {
/* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
+ dr = kzalloc(sizeof(*dr), GFP_KERNEL);
+ if (dr && hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
}
ret = hv_send_resources_released(hdev);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 0e03cef72840..3e64ba6a36a8 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -355,16 +355,6 @@ struct tegra_pcie {
int irq;
struct resource cs;
- struct resource io;
- struct resource pio;
- struct resource mem;
- struct resource prefetch;
- struct resource busn;
-
- struct {
- resource_size_t mem;
- resource_size_t io;
- } offset;
struct clk *pex_clk;
struct clk *afi_clk;
@@ -797,38 +787,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
-static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
-{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct list_head *windows = &host->windows;
- struct device *dev = pcie->dev;
- int err;
-
- pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
- pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
- pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
- pci_add_resource(windows, &pcie->busn);
-
- err = devm_request_pci_bus_resources(dev, windows);
- if (err < 0) {
- pci_free_resource_list(windows);
- return err;
- }
-
- pci_remap_iospace(&pcie->pio, pcie->io.start);
-
- return 0;
-}
-
-static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
-{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct list_head *windows = &host->windows;
-
- pci_unmap_iospace(&pcie->pio);
- pci_free_resource_list(windows);
-}
-
static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
struct tegra_pcie *pcie = pdev->bus->sysdata;
@@ -909,36 +867,49 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
*/
static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
{
- u32 fpci_bar, size, axi_address;
+ u32 size;
+ struct resource_entry *entry;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
/* Bar 0: type 1 extended configuration space */
size = resource_size(&pcie->cs);
afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
- /* Bar 1: downstream IO bar */
- fpci_bar = 0xfdfc0000;
- size = resource_size(&pcie->io);
- axi_address = pcie->io.start;
- afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
- afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
-
- /* Bar 2: prefetchable memory BAR */
- fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
- size = resource_size(&pcie->prefetch);
- axi_address = pcie->prefetch.start;
- afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
- afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
-
- /* Bar 3: non prefetchable memory BAR */
- fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
- size = resource_size(&pcie->mem);
- axi_address = pcie->mem.start;
- afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
- afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ u32 fpci_bar, axi_address;
+ struct resource *res = entry->res;
+
+ size = resource_size(res);
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ /* Bar 1: downstream IO bar */
+ fpci_bar = 0xfdfc0000;
+ axi_address = pci_pio_to_address(res->start);
+ afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
+ break;
+ case IORESOURCE_MEM:
+ fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
+ axi_address = res->start;
+
+ if (res->flags & IORESOURCE_PREFETCH) {
+ /* Bar 2: prefetchable memory BAR */
+ afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
+
+ } else {
+ /* Bar 3: non prefetchable memory BAR */
+ afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+ }
+ break;
+ }
+ }
/* NULL out the remaining BARs as they are not used */
afi_writel(pcie, 0, AFI_AXI_BAR4_START);
@@ -2157,76 +2128,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node, *port;
const struct tegra_pcie_soc *soc = pcie->soc;
- struct of_pci_range_parser parser;
- struct of_pci_range range;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
- struct resource res;
int err;
- if (of_pci_range_parser_init(&parser, np)) {
- dev_err(dev, "missing \"ranges\" property\n");
- return -EINVAL;
- }
-
- for_each_of_pci_range(&parser, &range) {
- err = of_pci_range_to_resource(&range, np, &res);
- if (err < 0)
- return err;
-
- switch (res.flags & IORESOURCE_TYPE_BITS) {
- case IORESOURCE_IO:
- /* Track the bus -> CPU I/O mapping offset. */
- pcie->offset.io = res.start - range.pci_addr;
-
- memcpy(&pcie->pio, &res, sizeof(res));
- pcie->pio.name = np->full_name;
-
- /*
- * The Tegra PCIe host bridge uses this to program the
- * mapping of the I/O space to the physical address,
- * so we override the .start and .end fields here that
- * of_pci_range_to_resource() converted to I/O space.
- * We also set the IORESOURCE_MEM type to clarify that
- * the resource is in the physical memory space.
- */
- pcie->io.start = range.cpu_addr;
- pcie->io.end = range.cpu_addr + range.size - 1;
- pcie->io.flags = IORESOURCE_MEM;
- pcie->io.name = "I/O";
-
- memcpy(&res, &pcie->io, sizeof(res));
- break;
-
- case IORESOURCE_MEM:
- /*
- * Track the bus -> CPU memory mapping offset. This
- * assumes that the prefetchable and non-prefetchable
- * regions will be the last of type IORESOURCE_MEM in
- * the ranges property.
- * */
- pcie->offset.mem = res.start - range.pci_addr;
-
- if (res.flags & IORESOURCE_PREFETCH) {
- memcpy(&pcie->prefetch, &res, sizeof(res));
- pcie->prefetch.name = "prefetchable";
- } else {
- memcpy(&pcie->mem, &res, sizeof(res));
- pcie->mem.name = "non-prefetchable";
- }
- break;
- }
- }
-
- err = of_pci_parse_bus_range(np, &pcie->busn);
- if (err < 0) {
- dev_err(dev, "failed to parse ranges property: %d\n", err);
- pcie->busn.name = np->name;
- pcie->busn.start = 0;
- pcie->busn.end = 0xff;
- pcie->busn.flags = IORESOURCE_BUS;
- }
-
/* parse root ports */
for_each_child_of_node(np, port) {
struct tegra_pcie_port *rp;
@@ -2766,6 +2671,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
struct pci_host_bridge *host;
struct tegra_pcie *pcie;
struct pci_bus *child;
+ struct resource *bus;
int err;
host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
@@ -2780,6 +2686,12 @@ static int tegra_pcie_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&pcie->ports);
pcie->dev = dev;
+ err = pci_parse_request_of_pci_ranges(dev, &host->windows, NULL, &bus);
+ if (err) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return err;
+ }
+
err = tegra_pcie_parse_dt(pcie);
if (err < 0)
return err;
@@ -2803,11 +2715,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
goto teardown_msi;
}
- err = tegra_pcie_request_resources(pcie);
- if (err)
- goto pm_runtime_put;
-
- host->busnr = pcie->busn.start;
+ host->busnr = bus->start;
host->dev.parent = &pdev->dev;
host->ops = &tegra_pcie_ops;
host->map_irq = tegra_pcie_map_irq;
@@ -2816,7 +2724,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = pci_scan_root_bus_bridge(host);
if (err < 0) {
dev_err(dev, "failed to register host: %d\n", err);
- goto free_resources;
+ goto pm_runtime_put;
}
pci_bus_size_bridges(host->bus);
@@ -2835,8 +2743,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
return 0;
-free_resources:
- tegra_pcie_free_resources(pcie);
pm_runtime_put:
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
@@ -2858,7 +2764,6 @@ static int tegra_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(host->bus);
pci_remove_root_bus(host->bus);
- tegra_pcie_free_resources(pcie);
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 3a10e678c7f4..6d79d14527a6 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -824,8 +824,8 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
dev_info(dev, "link up, %s x%u %s\n",
- PCIE_SPEED2STR(cls + PCI_SPEED_133MHz_PCIX_533),
- nlw, ssc_good ? "(SSC)" : "(!SSC)");
+ pci_speed_string(pcie_link_speed[cls]), nlw,
+ ssc_good ? "(SSC)" : "(!SSC)");
/* PCIe->SCB endian mode for BAR */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 5d74f81ddfe4..60330f3e3751 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -8,6 +8,7 @@
#include <linux/crc32.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -39,6 +40,8 @@
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define FLAG_USE_DMA BIT(0)
+
#define TIMER_RESOLUTION 1
static struct workqueue_struct *kpcitest_workqueue;
@@ -47,7 +50,11 @@ struct pci_epf_test {
void *reg[PCI_STD_NUM_BARS];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
+ size_t msix_table_offset;
struct delayed_work cmd_handler;
+ struct dma_chan *dma_chan;
+ struct completion transfer_complete;
+ bool dma_supported;
const struct pci_epc_features *epc_features;
};
@@ -61,6 +68,7 @@ struct pci_epf_test_reg {
u32 checksum;
u32 irq_type;
u32 irq_number;
+ u32 flags;
} __packed;
static struct pci_epf_header test_header = {
@@ -72,13 +80,156 @@ static struct pci_epf_header test_header = {
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+static void pci_epf_test_dma_callback(void *param)
+{
+ struct pci_epf_test *epf_test = param;
+
+ complete(&epf_test->transfer_complete);
+}
+
+/**
+ * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
+ * data between PCIe EP and remote PCIe RC
+ * @epf_test: the EPF test device that performs the data transfer operation
+ * @dma_dst: The destination address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @dma_src: The source address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @len: The size of the data transfer
+ *
+ * Function that uses dmaengine API to transfer data between PCIe EP and remote
+ * PCIe RC. The source and destination address can be a physical address given
+ * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
+ *
+ * The function returns '0' on success and negative value on failure.
+ */
+static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len)
+{
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct dma_chan *chan = epf_test->dma_chan;
+ struct pci_epf *epf = epf_test->epf;
+ struct dma_async_tx_descriptor *tx;
+ struct device *dev = &epf->dev;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (IS_ERR_OR_NULL(chan)) {
+ dev_err(dev, "Invalid DMA memcpy channel\n");
+ return -EINVAL;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+ if (!tx) {
+ dev_err(dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ tx->callback = pci_epf_test_dma_callback;
+ tx->callback_param = epf_test;
+ cookie = tx->tx_submit(tx);
+ reinit_completion(&epf_test->transfer_complete);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
+ return -EIO;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
+ if (ret < 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
+ * @epf_test: the EPF test device that performs data transfer operation
+ *
+ * Function to initialize EPF test DMA channel.
+ */
+static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
+{
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dma_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get DMA channel\n");
+ return ret;
+ }
+ init_completion(&epf_test->transfer_complete);
+
+ epf_test->dma_chan = dma_chan;
+
+ return 0;
+}
+
+/**
+ * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
+ * @epf: the EPF test device that performs data transfer operation
+ *
+ * Helper to cleanup EPF test DMA channel.
+ */
+static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
+{
+ dma_release_channel(epf_test->dma_chan);
+ epf_test->dma_chan = NULL;
+}
+
+static void pci_epf_test_print_rate(const char *ops, u64 size,
+ struct timespec64 *start,
+ struct timespec64 *end, bool dma)
+{
+ struct timespec64 ts;
+ u64 rate, ns;
+
+ ts = timespec64_sub(*end, *start);
+
+ /* convert both size (stored in 'rate') and time in terms of 'ns' */
+ ns = timespec64_to_ns(&ts);
+ rate = size * NSEC_PER_SEC;
+
+ /* Divide both size (stored in 'rate') and ns by a common factor */
+ while (ns > UINT_MAX) {
+ rate >>= 1;
+ ns >>= 1;
+ }
+
+ if (!ns)
+ return;
+
+ /* calculate the rate */
+ do_div(rate, (uint32_t)ns);
+
+ pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
+ "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
+ (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
+}
+
static int pci_epf_test_copy(struct pci_epf_test *epf_test)
{
int ret;
+ bool use_dma;
void __iomem *src_addr;
void __iomem *dst_addr;
phys_addr_t src_phys_addr;
phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
@@ -117,8 +268,26 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_dst_addr;
}
- memcpy(dst_addr, src_addr, reg->size);
+ ktime_get_ts64(&start);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ src_phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ } else {
+ memcpy(dst_addr, src_addr, reg->size);
+ }
+ ktime_get_ts64(&end);
+ pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
+err_map_addr:
pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
err_dst_addr:
@@ -140,10 +309,14 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
void __iomem *src_addr;
void *buf;
u32 crc32;
+ bool use_dma;
phys_addr_t phys_addr;
+ phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -169,12 +342,44 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err_map_addr;
}
- memcpy_fromio(buf, src_addr, reg->size);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_dma_map;
+ }
+
+ dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dst_phys_addr)) {
+ dev_err(dev, "Failed to map destination buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
+ DMA_FROM_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_fromio(buf, src_addr, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
crc32 = crc32_le(~0, buf, reg->size);
if (crc32 != reg->checksum)
ret = -EIO;
+err_dma_map:
kfree(buf);
err_map_addr:
@@ -192,10 +397,14 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
int ret;
void __iomem *dst_addr;
void *buf;
+ bool use_dma;
phys_addr_t phys_addr;
+ phys_addr_t src_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -224,7 +433,38 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
get_random_bytes(buf, reg->size);
reg->checksum = crc32_le(~0, buf, reg->size);
- memcpy_toio(dst_addr, buf, reg->size);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
+ src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, src_phys_addr)) {
+ dev_err(dev, "Failed to map source buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test, phys_addr,
+ src_phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, src_phys_addr, reg->size,
+ DMA_TO_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_toio(dst_addr, buf, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -232,6 +472,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
*/
usleep_range(1000, 2000);
+err_dma_map:
kfree(buf);
err_map_addr:
@@ -360,14 +601,6 @@ reset_handler:
msecs_to_jiffies(1));
}
-static void pci_epf_test_linkup(struct pci_epf *epf)
-{
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
-
- queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
- msecs_to_jiffies(1));
-}
-
static void pci_epf_test_unbind(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -376,6 +609,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
int bar;
cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
@@ -424,11 +658,90 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
return 0;
}
+static int pci_epf_test_core_init(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epf_header *header = epf->header;
+ const struct pci_epc_features *epc_features;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ bool msix_capable = false;
+ bool msi_capable = true;
+ int ret;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no);
+ if (epc_features) {
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+ }
+
+ ret = pci_epc_write_header(epc, epf->func_no, header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
+
+ ret = pci_epf_test_set_bar(epf);
+ if (ret)
+ return ret;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ epf_test->test_reg_bar,
+ epf_test->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI-X configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ int ret;
+
+ switch (val) {
+ case CORE_INIT:
+ ret = pci_epf_test_core_init(epf);
+ if (ret)
+ return NOTIFY_BAD;
+ break;
+
+ case LINK_UP:
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+ break;
+
+ default:
+ dev_err(&epf->dev, "Invalid EPF test notifier event\n");
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_OK;
+}
+
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
struct pci_epf_bar *epf_bar;
+ size_t msix_table_size = 0;
+ size_t test_reg_bar_size;
+ size_t pba_size = 0;
+ bool msix_capable;
void *base;
int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
@@ -437,13 +750,25 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
epc_features = epf_test->epc_features;
- if (epc_features->bar_fixed_size[test_reg_bar])
+ test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
+ epf_test->msix_table_offset = test_reg_bar_size;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
+ }
+ test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
+
+ if (epc_features->bar_fixed_size[test_reg_bar]) {
+ if (test_reg_size > bar_size[test_reg_bar])
+ return -ENOMEM;
test_reg_size = bar_size[test_reg_bar];
- else
- test_reg_size = sizeof(struct pci_epf_test_reg);
+ }
- base = pci_epf_alloc_space(epf, test_reg_size,
- test_reg_bar, epc_features->align);
+ base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
+ epc_features->align);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -492,14 +817,11 @@ static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epf_header *header = epf->header;
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- struct device *dev = &epf->dev;
bool linkup_notifier = false;
- bool msix_capable = false;
- bool msi_capable = true;
+ bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -507,8 +829,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
epc_features = pci_epc_get_features(epc, epf->func_no);
if (epc_features) {
linkup_notifier = epc_features->linkup_notifier;
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
+ core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
pci_epf_configure_bar(epf, epc_features);
}
@@ -516,38 +837,28 @@ static int pci_epf_test_bind(struct pci_epf *epf)
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
- ret = pci_epc_write_header(epc, epf->func_no, header);
- if (ret) {
- dev_err(dev, "Configuration header write failed\n");
- return ret;
- }
-
ret = pci_epf_test_alloc_space(epf);
if (ret)
return ret;
- ret = pci_epf_test_set_bar(epf);
- if (ret)
- return ret;
-
- if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
- if (ret) {
- dev_err(dev, "MSI configuration failed\n");
+ if (!core_init_notifier) {
+ ret = pci_epf_test_core_init(epf);
+ if (ret)
return ret;
- }
}
- if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
- if (ret) {
- dev_err(dev, "MSI-X configuration failed\n");
- return ret;
- }
- }
+ epf_test->dma_supported = true;
- if (!linkup_notifier)
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
+
+ if (linkup_notifier) {
+ epf->nb.notifier_call = pci_epf_test_notifier;
+ pci_epc_register_notifier(epc, &epf->nb);
+ } else {
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+ }
return 0;
}
@@ -580,7 +891,6 @@ static int pci_epf_test_probe(struct pci_epf *epf)
static struct pci_epf_ops ops = {
.unbind = pci_epf_test_unbind,
.bind = pci_epf_test_bind,
- .linkup = pci_epf_test_linkup,
};
static struct pci_epf_driver test_driver = {
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index d1288a0bd530..55edce50be96 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -29,7 +29,6 @@ struct pci_epc_group {
struct config_group group;
struct pci_epc *epc;
bool start;
- unsigned long function_num_map;
};
static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
@@ -58,6 +57,7 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
if (!start) {
pci_epc_stop(epc);
+ epc_group->start = 0;
return len;
}
@@ -89,37 +89,22 @@ static int pci_epc_epf_link(struct config_item *epc_item,
struct config_item *epf_item)
{
int ret;
- u32 func_no = 0;
struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
struct pci_epc *epc = epc_group->epc;
struct pci_epf *epf = epf_group->epf;
- func_no = find_first_zero_bit(&epc_group->function_num_map,
- BITS_PER_LONG);
- if (func_no >= BITS_PER_LONG)
- return -EINVAL;
-
- set_bit(func_no, &epc_group->function_num_map);
- epf->func_no = func_no;
-
ret = pci_epc_add_epf(epc, epf);
if (ret)
- goto err_add_epf;
+ return ret;
ret = pci_epf_bind(epf);
- if (ret)
- goto err_epf_bind;
+ if (ret) {
+ pci_epc_remove_epf(epc, epf);
+ return ret;
+ }
return 0;
-
-err_epf_bind:
- pci_epc_remove_epf(epc, epf);
-
-err_add_epf:
- clear_bit(func_no, &epc_group->function_num_map);
-
- return ret;
}
static void pci_epc_epf_unlink(struct config_item *epc_item,
@@ -134,7 +119,6 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
epc = epc_group->epc;
epf = epf_group->epf;
- clear_bit(epf->func_no, &epc_group->function_num_map);
pci_epf_unbind(epf);
pci_epc_remove_epf(epc, epf);
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 2091508c1620..82ba0dc7f2f5 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -120,7 +120,6 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
u8 func_no)
{
const struct pci_epc_features *epc_features;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
@@ -128,9 +127,9 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
if (!epc->ops->get_features)
return NULL;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc_features = epc->ops->get_features(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return epc_features;
}
@@ -144,14 +143,12 @@ EXPORT_SYMBOL_GPL(pci_epc_get_features);
*/
void pci_epc_stop(struct pci_epc *epc)
{
- unsigned long flags;
-
if (IS_ERR(epc) || !epc->ops->stop)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->stop(epc);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_stop);
@@ -164,7 +161,6 @@ EXPORT_SYMBOL_GPL(pci_epc_stop);
int pci_epc_start(struct pci_epc *epc)
{
int ret;
- unsigned long flags;
if (IS_ERR(epc))
return -EINVAL;
@@ -172,9 +168,9 @@ int pci_epc_start(struct pci_epc *epc)
if (!epc->ops->start)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->start(epc);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -193,7 +189,6 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -201,9 +196,9 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (!epc->ops->raise_irq)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -219,7 +214,6 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
{
int interrupt;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
@@ -227,9 +221,9 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
if (!epc->ops->get_msi)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
interrupt = epc->ops->get_msi(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
if (interrupt < 0)
return 0;
@@ -252,7 +246,6 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
{
int ret;
u8 encode_int;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts > 32)
@@ -263,9 +256,9 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
encode_int = order_base_2(interrupts);
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->set_msi(epc, func_no, encode_int);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -281,7 +274,6 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
{
int interrupt;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
@@ -289,9 +281,9 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
if (!epc->ops->get_msix)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
interrupt = epc->ops->get_msix(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
if (interrupt < 0)
return 0;
@@ -305,13 +297,15 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the endpoint function number in the EPC device
* @interrupts: number of MSI-X interrupts required by the EPF
+ * @bir: BAR where the MSI-X table resides
+ * @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno bir, u32 offset)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts < 1 || interrupts > 2048)
@@ -320,9 +314,9 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
if (!epc->ops->set_msix)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_lock(&epc->lock);
+ ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -339,17 +333,15 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr)
{
- unsigned long flags;
-
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
if (!epc->ops->unmap_addr)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->unmap_addr(epc, func_no, phys_addr);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -367,7 +359,6 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -375,9 +366,9 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (!epc->ops->map_addr)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -394,8 +385,6 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
struct pci_epf_bar *epf_bar)
{
- unsigned long flags;
-
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
(epf_bar->barno == BAR_5 &&
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
@@ -404,9 +393,9 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
if (!epc->ops->clear_bar)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->clear_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -422,7 +411,6 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
struct pci_epf_bar *epf_bar)
{
int ret;
- unsigned long irq_flags;
int flags = epf_bar->flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -437,9 +425,9 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
if (!epc->ops->set_bar)
return 0;
- spin_lock_irqsave(&epc->lock, irq_flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->set_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, irq_flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -460,7 +448,6 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *header)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -468,9 +455,9 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (!epc->ops->write_header)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->write_header(epc, func_no, header);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -487,7 +474,8 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
*/
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
{
- unsigned long flags;
+ u32 func_no;
+ int ret = 0;
if (epf->epc)
return -EBUSY;
@@ -495,16 +483,30 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
if (IS_ERR(epc))
return -EINVAL;
- if (epf->func_no > epc->max_functions - 1)
- return -EINVAL;
+ mutex_lock(&epc->lock);
+ func_no = find_first_zero_bit(&epc->function_num_map,
+ BITS_PER_LONG);
+ if (func_no >= BITS_PER_LONG) {
+ ret = -EINVAL;
+ goto ret;
+ }
+ if (func_no > epc->max_functions - 1) {
+ dev_err(&epc->dev, "Exceeding max supported Function Number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ set_bit(func_no, &epc->function_num_map);
+ epf->func_no = func_no;
epf->epc = epc;
- spin_lock_irqsave(&epc->lock, flags);
list_add_tail(&epf->list, &epc->pci_epf);
- spin_unlock_irqrestore(&epc->lock, flags);
- return 0;
+ret:
+ mutex_unlock(&epc->lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pci_epc_add_epf);
@@ -517,15 +519,14 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
*/
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
{
- unsigned long flags;
-
if (!epc || IS_ERR(epc) || !epf)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
+ clear_bit(epf->func_no, &epc->function_num_map);
list_del(&epf->list);
epf->epc = NULL;
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@@ -539,20 +540,31 @@ EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
*/
void pci_epc_linkup(struct pci_epc *epc)
{
- unsigned long flags;
- struct pci_epf *epf;
-
if (!epc || IS_ERR(epc))
return;
- spin_lock_irqsave(&epc->lock, flags);
- list_for_each_entry(epf, &epc->pci_epf, list)
- pci_epf_linkup(epf);
- spin_unlock_irqrestore(&epc->lock, flags);
+ atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
}
EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
+ * pci_epc_init_notify() - Notify the EPF device that EPC device's core
+ * initialization is completed.
+ * @epc: the EPC device whose core initialization is completeds
+ *
+ * Invoke to Notify the EPF device that the EPC device's initialization
+ * is completed.
+ */
+void pci_epc_init_notify(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_init_notify);
+
+/**
* pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
@@ -610,8 +622,9 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
goto err_ret;
}
- spin_lock_init(&epc->lock);
+ mutex_init(&epc->lock);
INIT_LIST_HEAD(&epc->pci_epf);
+ ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
device_initialize(&epc->dev);
epc->dev.class = pci_epc_class;
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index d2b174ce15de..abfac1109a13 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
+ mutex_init(&mem->lock);
epc->mem = mem;
@@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
int pageno;
- void __iomem *virt_addr;
+ void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
- return NULL;
+ goto ret;
*phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
+ret:
+ mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index fb1306de8f40..244e00f48c5c 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -21,26 +21,6 @@ static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
- * pci_epf_linkup() - Notify the function driver that EPC device has
- * established a connection with the Root Complex.
- * @epf: the EPF device bound to the EPC device which has established
- * the connection with the host
- *
- * Invoke to notify the function driver that EPC device has established
- * a connection with the Root Complex.
- */
-void pci_epf_linkup(struct pci_epf *epf)
-{
- if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
- return;
- }
-
- epf->driver->ops->linkup(epf);
-}
-EXPORT_SYMBOL_GPL(pci_epf_linkup);
-
-/**
* pci_epf_unbind() - Notify the function driver that the binding between the
* EPF device and EPC device has been lost
* @epf: the EPF device which has lost the binding with the EPC device
@@ -55,7 +35,9 @@ void pci_epf_unbind(struct pci_epf *epf)
return;
}
+ mutex_lock(&epf->lock);
epf->driver->ops->unbind(epf);
+ mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
EXPORT_SYMBOL_GPL(pci_epf_unbind);
@@ -69,6 +51,8 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ int ret;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return -EINVAL;
@@ -77,7 +61,11 @@ int pci_epf_bind(struct pci_epf *epf)
if (!try_module_get(epf->driver->owner))
return -EAGAIN;
- return epf->driver->ops->bind(epf);
+ mutex_lock(&epf->lock);
+ ret = epf->driver->ops->bind(epf);
+ mutex_unlock(&epf->lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
@@ -99,6 +87,7 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
epf->bar[bar].phys_addr);
epf->bar[bar].phys_addr = 0;
+ epf->bar[bar].addr = NULL;
epf->bar[bar].size = 0;
epf->bar[bar].barno = 0;
epf->bar[bar].flags = 0;
@@ -135,6 +124,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
}
epf->bar[bar].phys_addr = phys_addr;
+ epf->bar[bar].addr = space;
epf->bar[bar].size = size;
epf->bar[bar].barno = bar;
epf->bar[bar].flags |= upper_32_bits(size) ?
@@ -214,7 +204,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (!driver->ops)
return -EINVAL;
- if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup)
+ if (!driver->ops->bind || !driver->ops->unbind)
return -EINVAL;
driver->driver.bus = &pci_epf_bus_type;
@@ -272,6 +262,7 @@ struct pci_epf *pci_epf_create(const char *name)
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
dev->type = &pci_epf_type;
+ mutex_init(&epf->lock);
ret = dev_set_name(dev, "%s", name);
if (ret) {
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index aa61d4c219d7..ae44f46d1bf3 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -84,6 +84,7 @@ struct controller {
struct pcie_device *pcie;
u32 slot_cap; /* capabilities and quirks */
+ unsigned int inband_presence_disabled:1;
u16 slot_ctrl; /* control register access */
struct mutex ctrl_lock;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8a2cb1764386..53433b37e181 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -14,6 +14,7 @@
#define dev_fmt(fmt) "pciehp: " fmt
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/jiffies.h>
@@ -26,6 +27,24 @@
#include "../pci.h"
#include "pciehp.h"
+static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
+ /*
+ * Match all Dell systems, as some Dell systems have inband
+ * presence disabled on NVMe slots (but don't support the bit to
+ * report it). Setting inband presence disabled should have no
+ * negative effect, except on broken hotplug slots that never
+ * assert presence detect--and those will still work, they will
+ * just have a bit of extra delay before being probed.
+ */
+ {
+ .ident = "Dell System",
+ .matches = {
+ DMI_MATCH(DMI_OEM_STRING, "Dell System"),
+ },
+ },
+ {}
+};
+
static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
{
return ctrl->pcie->port;
@@ -252,6 +271,22 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
return found;
}
+static void pcie_wait_for_presence(struct pci_dev *pdev)
+{
+ int timeout = 1250;
+ u16 slot_status;
+
+ do {
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_PDS)
+ return;
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 0);
+
+ pci_info(pdev, "Timeout waiting for Presence Detect\n");
+}
+
int pciehp_check_link_status(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
@@ -261,6 +296,9 @@ int pciehp_check_link_status(struct controller *ctrl)
if (!pcie_wait_for_link(pdev, true))
return -1;
+ if (ctrl->inband_presence_disabled)
+ pcie_wait_for_presence(pdev);
+
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
@@ -527,7 +565,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct device *parent = pdev->dev.parent;
- u16 status, events;
+ u16 status, events = 0;
/*
* Interrupts only occur in D3hot or shallower and only if enabled
@@ -552,6 +590,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
}
}
+read_status:
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
@@ -564,24 +603,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
* Slot Status contains plain status bits as well as event
* notification bits; right now we only want the event bits.
*/
- events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
- PCI_EXP_SLTSTA_DLLSC);
+ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC;
/*
* If we've already reported a power fault, don't report it again
* until we've done something to handle it.
*/
if (ctrl->power_fault_detected)
- events &= ~PCI_EXP_SLTSTA_PFD;
+ status &= ~PCI_EXP_SLTSTA_PFD;
+ events |= status;
if (!events) {
if (parent)
pm_runtime_put(parent);
return IRQ_NONE;
}
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ if (status) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+
+ /*
+ * In MSI mode, all event bits must be zero before the port
+ * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
+ * So re-read the Slot Status register in case a bit was set
+ * between read and write.
+ */
+ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
+ goto read_status;
+ }
+
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
if (parent)
pm_runtime_put(parent);
@@ -625,17 +677,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
ret = pciehp_isr(irq, dev_id);
enable_irq(irq);
- if (ret != IRQ_WAKE_THREAD) {
- pci_config_pm_runtime_put(pdev);
- return ret;
- }
+ if (ret != IRQ_WAKE_THREAD)
+ goto out;
}
synchronize_hardirq(irq);
events = atomic_xchg(&ctrl->pending_events, 0);
if (!events) {
- pci_config_pm_runtime_put(pdev);
- return IRQ_NONE;
+ ret = IRQ_NONE;
+ goto out;
}
/* Check Attention Button Pressed */
@@ -664,10 +714,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
pciehp_handle_presence_or_link_change(ctrl, events);
up_read(&ctrl->reset_lock);
+ ret = IRQ_HANDLED;
+out:
pci_config_pm_runtime_put(pdev);
ctrl->ist_running = false;
wake_up(&ctrl->requester);
- return IRQ_HANDLED;
+ return ret;
}
static int pciehp_poll(void *data)
@@ -848,7 +900,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
- u32 slot_cap, link_cap;
+ u32 slot_cap, slot_cap2, link_cap;
u8 poweron;
struct pci_dev *pdev = dev->port;
struct pci_bus *subordinate = pdev->subordinate;
@@ -883,6 +935,16 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
up_read(&pci_bus_sem);
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
+ if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
+ PCI_EXP_SLTCTL_IBPD_DISABLE);
+ ctrl->inband_presence_disabled = 1;
+ }
+
+ if (dmi_first_match(inband_presence_disabled_dmi_table))
+ ctrl->inband_presence_disabled = 1;
+
/* Check if Data Link Layer Link Active Reporting is implemented */
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
@@ -892,7 +954,7 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
- ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
@@ -903,6 +965,7 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
+ FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 977946e4e613..c5eb509c72f0 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -140,7 +140,7 @@ static void dlpar_pci_add_bus(struct device_node *dn)
struct pci_controller *phb = pdn->phb;
struct pci_dev *dev = NULL;
- eeh_add_device_tree_early(pdn);
+ pseries_eeh_init_edev_recursive(pdn);
/* Add EADS device to PHB bus, adding new entry to bus->devices */
dev = of_create_pci_dev(dn, phb->bus, pdn->devfn);
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index e408e4021cee..6504869efabc 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -288,11 +288,10 @@ EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);
static int is_php_type(char *drc_type)
{
- unsigned long value;
char *endptr;
/* PCI Hotplug nodes have an integer for drc_type */
- value = simple_strtoul(drc_type, &endptr, 10);
+ simple_strtoul(drc_type, &endptr, 10);
if (endptr == drc_type)
return 0;
@@ -494,6 +493,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return retval;
if (state == PRESENT) {
+ pseries_eeh_init_edev_recursive(PCI_DN(slot->dn));
+
pci_lock_rescan_remove();
pci_hp_add_devices(slot->bus);
pci_unlock_rescan_remove();
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index beca61badeea..c380bdacd146 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -95,8 +95,10 @@ int rpaphp_enable_slot(struct slot *slot)
return -EINVAL;
}
- if (list_empty(&bus->devices))
+ if (list_empty(&bus->devices)) {
+ pseries_eeh_init_edev_recursive(PCI_DN(slot->dn));
pci_hp_add_devices(bus);
+ }
if (!list_empty(&bus->devices)) {
slot->state = CONFIGURED;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 30ee72268790..39295d88f670 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -19,7 +19,6 @@
#include <asm/sclp.h>
#define SLOT_NAME_SIZE 10
-static LIST_HEAD(s390_hotplug_slot_list);
static int zpci_fn_configured(enum zpci_state state)
{
@@ -27,97 +26,86 @@ static int zpci_fn_configured(enum zpci_state state)
state == ZPCI_FN_STATE_ONLINE;
}
-/*
- * struct slot - slot information for each *physical* slot
- */
-struct slot {
- struct list_head slot_list;
- struct hotplug_slot hotplug_slot;
- struct zpci_dev *zdev;
-};
-
-static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+static inline int zdev_configure(struct zpci_dev *zdev)
{
- return container_of(hotplug_slot, struct slot, hotplug_slot);
-}
-
-static inline int slot_configure(struct slot *slot)
-{
- int ret = sclp_pci_configure(slot->zdev->fid);
+ int ret = sclp_pci_configure(zdev->fid);
- zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, ret);
if (!ret)
- slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
return ret;
}
-static inline int slot_deconfigure(struct slot *slot)
+static inline int zdev_deconfigure(struct zpci_dev *zdev)
{
- int ret = sclp_pci_deconfigure(slot->zdev->fid);
+ int ret = sclp_pci_deconfigure(zdev->fid);
- zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
if (!ret)
- slot->zdev->state = ZPCI_FN_STATE_STANDBY;
+ zdev->state = ZPCI_FN_STATE_STANDBY;
return ret;
}
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = to_slot(hotplug_slot);
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
int rc;
- if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
+ if (zdev->state != ZPCI_FN_STATE_STANDBY)
return -EIO;
- rc = slot_configure(slot);
+ rc = zdev_configure(zdev);
if (rc)
return rc;
- rc = zpci_enable_device(slot->zdev);
+ rc = zpci_enable_device(zdev);
if (rc)
goto out_deconfigure;
- pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
+ pci_scan_slot(zdev->bus, ZPCI_DEVFN);
pci_lock_rescan_remove();
- pci_bus_add_devices(slot->zdev->bus);
+ pci_bus_add_devices(zdev->bus);
pci_unlock_rescan_remove();
return rc;
out_deconfigure:
- slot_deconfigure(slot);
+ zdev_deconfigure(zdev);
return rc;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = to_slot(hotplug_slot);
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
struct pci_dev *pdev;
int rc;
- if (!zpci_fn_configured(slot->zdev->state))
+ if (!zpci_fn_configured(zdev->state))
return -EIO;
- pdev = pci_get_slot(slot->zdev->bus, ZPCI_DEVFN);
+ pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
if (pdev) {
pci_stop_and_remove_bus_device_locked(pdev);
pci_dev_put(pdev);
}
- rc = zpci_disable_device(slot->zdev);
+ rc = zpci_disable_device(zdev);
if (rc)
return rc;
- return slot_deconfigure(slot);
+ return zdev_deconfigure(zdev);
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = to_slot(hotplug_slot);
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
- switch (slot->zdev->state) {
+ switch (zdev->state) {
case ZPCI_FN_STATE_STANDBY:
*value = 0;
break;
@@ -145,44 +133,15 @@ static const struct hotplug_slot_ops s390_hotplug_slot_ops = {
int zpci_init_slot(struct zpci_dev *zdev)
{
char name[SLOT_NAME_SIZE];
- struct slot *slot;
- int rc;
- if (!zdev)
- return 0;
-
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->zdev = zdev;
- slot->hotplug_slot.ops = &s390_hotplug_slot_ops;
+ zdev->hotplug_slot.ops = &s390_hotplug_slot_ops;
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
- rc = pci_hp_register(&slot->hotplug_slot, zdev->bus,
- ZPCI_DEVFN, name);
- if (rc)
- goto error_reg;
-
- list_add(&slot->slot_list, &s390_hotplug_slot_list);
- return 0;
-
-error_reg:
- kfree(slot);
-error:
- return -ENOMEM;
+ return pci_hp_register(&zdev->hotplug_slot, zdev->bus,
+ ZPCI_DEVFN, name);
}
void zpci_exit_slot(struct zpci_dev *zdev)
{
- struct slot *slot, *next;
-
- list_for_each_entry_safe(slot, next, &s390_hotplug_slot_list,
- slot_list) {
- if (slot->zdev != zdev)
- continue;
- list_del(&slot->slot_list);
- pci_hp_deregister(&slot->hotplug_slot);
- kfree(slot);
- }
+ pci_hp_deregister(&zdev->hotplug_slot);
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 9a8a38384121..b73b10bce0df 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -291,6 +291,9 @@ static const struct pci_p2pdma_whitelist_entry {
{PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
/* Intel SkyLake-E */
{PCI_VENDOR_ID_INTEL, 0x2030, 0},
+ {PCI_VENDOR_ID_INTEL, 0x2031, 0},
+ {PCI_VENDOR_ID_INTEL, 0x2032, 0},
+ {PCI_VENDOR_ID_INTEL, 0x2033, 0},
{PCI_VENDOR_ID_INTEL, 0x2020, 0},
{}
};
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0c02d500158f..d21969fba6ab 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -439,7 +439,7 @@ enum hpx_type3_dev_type {
static u16 hpx3_device_type(struct pci_dev *dev)
{
u16 pcie_type = pci_pcie_type(dev);
- const int pcie_to_hpx3_type[] = {
+ static const int pcie_to_hpx3_type[] = {
[PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
[PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
[PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
@@ -1241,6 +1241,7 @@ static void pci_acpi_setup(struct device *dev)
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_set_untrusted(pci_dev);
+ pci_acpi_add_edr_notifier(pci_dev);
pci_acpi_add_pm_notifier(adev, pci_dev);
if (!adev->wakeup.flags.valid)
@@ -1268,6 +1269,7 @@ static void pci_acpi_cleanup(struct device *dev)
if (!adev)
return;
+ pci_acpi_remove_edr_notifier(pci_dev);
pci_acpi_remove_pm_notifier(adev);
if (adev->wakeup.flags.valid) {
acpi_device_power_remove_dependent(adev, dev);
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index fffa77093c08..4f4f54bc732e 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -50,12 +50,7 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
(PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
.rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
- .w1c = (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY) << 16,
+ .w1c = PCI_STATUS_ERROR_BITS << 16,
},
[PCI_CLASS_REVISION / 4] = { .ro = ~0 },
@@ -100,12 +95,7 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
PCI_STATUS_DEVSEL_MASK) << 16) |
GENMASK(11, 8) | GENMASK(3, 0)),
- .w1c = (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY) << 16,
+ .w1c = PCI_STATUS_ERROR_BITS << 16,
.rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
},
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 30fbe2ea6eab..aafd58da3a89 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -55,15 +55,13 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = {
.need_resume = mid_pci_need_resume,
};
-#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
-
/*
* This table should be in sync with the one in
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
{}
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 13f766db0684..6d78df981d41 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -156,7 +156,8 @@ static ssize_t max_link_speed_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev)));
+ return sprintf(buf, "%s\n",
+ pci_speed_string(pcie_get_speed_cap(pdev)));
}
static DEVICE_ATTR_RO(max_link_speed);
@@ -175,33 +176,15 @@ static ssize_t current_link_speed_show(struct device *dev,
struct pci_dev *pci_dev = to_pci_dev(dev);
u16 linkstat;
int err;
- const char *speed;
+ enum pci_bus_speed speed;
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
if (err)
return -EINVAL;
- switch (linkstat & PCI_EXP_LNKSTA_CLS) {
- case PCI_EXP_LNKSTA_CLS_32_0GB:
- speed = "32 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_16_0GB:
- speed = "16 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_8_0GB:
- speed = "8 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_5_0GB:
- speed = "5 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_2_5GB:
- speed = "2.5 GT/s";
- break;
- default:
- speed = "Unknown speed";
- }
+ speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
- return sprintf(buf, "%s\n", speed);
+ return sprintf(buf, "%s\n", pci_speed_string(speed));
}
static DEVICE_ATTR_RO(current_link_speed);
@@ -464,7 +447,8 @@ static ssize_t dev_rescan_store(struct device *dev,
}
return count;
}
-static DEVICE_ATTR_WO(dev_rescan);
+static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
+ dev_rescan_store);
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -501,7 +485,8 @@ static ssize_t bus_rescan_store(struct device *dev,
}
return count;
}
-static DEVICE_ATTR_WO(bus_rescan);
+static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
+ bus_rescan_store);
#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d828ca835a98..595fcf59843f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -173,6 +173,29 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
+/**
+ * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
+ * @pdev: the PCI device
+ *
+ * Returns error bits set in PCI_STATUS and clears them.
+ */
+int pci_status_get_and_clear_errors(struct pci_dev *pdev)
+{
+ u16 status;
+ int ret;
+
+ ret = pci_read_config_word(pdev, PCI_STATUS, &status);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return -EIO;
+
+ status &= PCI_STATUS_ERROR_BITS;
+ if (status)
+ pci_write_config_word(pdev, PCI_STATUS, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
+
#ifdef CONFIG_HAS_IOMEM
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
@@ -557,6 +580,40 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
}
EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+/**
+ * pci_get_dsn - Read and return the 8-byte Device Serial Number
+ * @dev: PCI device to query
+ *
+ * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
+ * Number.
+ *
+ * Returns the DSN, or zero if the capability does not exist.
+ */
+u64 pci_get_dsn(struct pci_dev *dev)
+{
+ u32 dword;
+ u64 dsn;
+ int pos;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
+ if (!pos)
+ return 0;
+
+ /*
+ * The Device Serial Number is two dwords offset 4 bytes from the
+ * capability position. The specification says that the first dword is
+ * the lower half, and the second dword is the upper half.
+ */
+ pos += 4;
+ pci_read_config_dword(dev, pos, &dword);
+ dsn = (u64)dword;
+ pci_read_config_dword(dev, pos + 4, &dword);
+ dsn |= ((u64)dword) << 32;
+
+ return dsn;
+}
+EXPORT_SYMBOL_GPL(pci_get_dsn);
+
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
int rc, ttl = PCI_FIND_CAP_TTL;
@@ -1503,7 +1560,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_rebar_state(dev);
pci_restore_dpc_state(dev);
- pci_cleanup_aer_error_status_regs(dev);
+ pci_aer_clear_status(dev);
pci_restore_aer_state(dev);
pci_restore_config_space(dev);
@@ -5784,19 +5841,10 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
* where only 2.5 GT/s and 5.0 GT/s speeds were defined.
*/
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
- if (lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
- return PCIE_SPEED_32_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
- return PCIE_SPEED_16_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- return PCIE_SPEED_8_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- return PCIE_SPEED_5_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- return PCIE_SPEED_2_5GT;
- return PCI_SPEED_UNKNOWN;
- }
+
+ /* PCIe r3.0-compliant */
+ if (lnkcap2)
+ return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
@@ -5872,14 +5920,14 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
if (bw_avail >= bw_cap && verbose)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
bw_cap / 1000, bw_cap % 1000,
- PCIE_SPEED2STR(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap);
else if (bw_avail < bw_cap)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
bw_avail / 1000, bw_avail % 1000,
- PCIE_SPEED2STR(speed), width,
+ pci_speed_string(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
bw_cap / 1000, bw_cap % 1000,
- PCIE_SPEED2STR(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap);
}
/**
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6394e7746fb5..6d3f75867106 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -292,22 +292,25 @@ void pci_disable_bridge_window(struct pci_dev *dev);
struct pci_bus *pci_bus_get(struct pci_bus *bus);
void pci_bus_put(struct pci_bus *bus);
-/* PCIe link information */
-#define PCIE_SPEED2STR(speed) \
- ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \
- (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \
- (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \
- (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \
- "Unknown speed")
+/* PCIe link information from Link Capabilities 2 */
+#define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \
+ ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN)
/* PCIe speed to Mb/s reduced by encoding overhead */
#define PCIE_SPEED2MBS_ENC(speed) \
- ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
+ ((speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
+ (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
(speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \
(speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \
(speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
0)
+const char *pci_speed_string(enum pci_bus_speed speed);
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
@@ -448,9 +451,13 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#ifdef CONFIG_PCIE_DPC
void pci_save_dpc_state(struct pci_dev *dev);
void pci_restore_dpc_state(struct pci_dev *dev);
+void pci_dpc_init(struct pci_dev *pdev);
+void dpc_process_error(struct pci_dev *pdev);
+pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
#else
static inline void pci_save_dpc_state(struct pci_dev *dev) {}
static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
+static inline void pci_dpc_init(struct pci_dev *pdev) {}
#endif
#ifdef CONFIG_PCI_ATS
@@ -547,8 +554,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
#endif
/* PCI error reporting and recovery */
-void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
- u32 service);
+pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ enum pci_channel_state state,
+ pci_ers_result_t (*reset_link)(struct pci_dev *pdev));
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
@@ -651,12 +659,16 @@ void pci_aer_exit(struct pci_dev *dev);
extern const struct attribute_group aer_stats_attr_group;
void pci_aer_clear_fatal_status(struct pci_dev *dev);
void pci_aer_clear_device_status(struct pci_dev *dev);
+int pci_aer_clear_status(struct pci_dev *dev);
+int pci_aer_raw_clear_status(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
static inline void pci_aer_init(struct pci_dev *d) { }
static inline void pci_aer_exit(struct pci_dev *d) { }
static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
+static inline int pci_aer_clear_status(struct pci_dev *dev) { return -EINVAL; }
+static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL; }
#endif
#ifdef CONFIG_ACPI
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 6e3c04b46fb1..66386811cfde 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -34,6 +34,7 @@ config PCIEAER
config PCIEAER_INJECT
tristate "PCI Express error injection support"
depends on PCIEAER
+ select GENERIC_IRQ_INJECTION
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
@@ -140,3 +141,13 @@ config PCIE_BW
This enables PCI Express Bandwidth Change Notification. If
you know link width or rate changes occur only to correct
unreliable links, you may answer Y.
+
+config PCIE_EDR
+ bool "PCI Express Error Disconnect Recover support"
+ depends on PCIE_DPC && ACPI
+ help
+ This option adds Error Disconnect Recover support as specified
+ in the Downstream Port Containment Related Enhancements ECN to
+ the PCI Firmware Specification r3.2. Enable this if you want to
+ support hybrid DPC model which uses both firmware and OS to
+ implement DPC.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index efb9d2e71e9e..68da9280ff11 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
obj-$(CONFIG_PCIE_PTM) += ptm.o
obj-$(CONFIG_PCIE_BW) += bw_notification.o
+obj-$(CONFIG_PCIE_EDR) += edr.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 4a818b07a1af..f4274d301235 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -102,6 +102,7 @@ struct aer_stats {
#define ERR_UNCOR_ID(d) (d >> 16)
static int pcie_aer_disable;
+static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
void pci_no_aer(void)
{
@@ -376,7 +377,7 @@ void pci_aer_clear_device_status(struct pci_dev *dev)
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
}
-int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
int pos;
u32 status, sev;
@@ -397,7 +398,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+EXPORT_SYMBOL_GPL(pci_aer_clear_nonfatal_status);
void pci_aer_clear_fatal_status(struct pci_dev *dev)
{
@@ -419,7 +420,16 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
}
-int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+/**
+ * pci_aer_raw_clear_status - Clear AER error registers.
+ * @dev: the PCI device
+ *
+ * Clearing AER error status registers unconditionally, regardless of
+ * whether they're owned by firmware or the OS.
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_aer_raw_clear_status(struct pci_dev *dev)
{
int pos;
u32 status;
@@ -432,9 +442,6 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
if (!pos)
return -EIO;
- if (pcie_aer_get_firmware_first(dev))
- return -EIO;
-
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
@@ -450,6 +457,14 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
+int pci_aer_clear_status(struct pci_dev *dev)
+{
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
+ return pci_aer_raw_clear_status(dev);
+}
+
void pci_save_aer_state(struct pci_dev *dev)
{
struct pci_cap_saved_state *save_state;
@@ -515,7 +530,7 @@ void pci_aer_init(struct pci_dev *dev)
n = pcie_cap_has_rtctl(dev) ? 5 : 4;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
- pci_cleanup_aer_error_status_regs(dev);
+ pci_aer_clear_status(dev);
}
void pci_aer_exit(struct pci_dev *dev)
@@ -1053,11 +1068,9 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
- pcie_do_recovery(dev, pci_channel_io_normal,
- PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
- pcie_do_recovery(dev, pci_channel_io_frozen,
- PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset);
pci_dev_put(dev);
}
@@ -1094,10 +1107,10 @@ static void aer_recover_work_func(struct work_struct *work)
cper_print_aer(pdev, entry.severity, entry.regs);
if (entry.severity == AER_NONFATAL)
pcie_do_recovery(pdev, pci_channel_io_normal,
- PCIE_PORT_SERVICE_AER);
+ aer_root_reset);
else if (entry.severity == AER_FATAL)
pcie_do_recovery(pdev, pci_channel_io_frozen,
- PCIE_PORT_SERVICE_AER);
+ aer_root_reset);
pci_dev_put(pdev);
}
}
@@ -1501,7 +1514,6 @@ static struct pcie_port_service_driver aerdriver = {
.probe = aer_probe,
.remove = aer_remove,
- .reset_link = aer_root_reset,
};
/**
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 6988fe7389b9..21cc3d3387f7 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj)
}
pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
- local_irq_disable();
- generic_handle_irq(edev->irq);
- local_irq_enable();
+ ret = irq_inject_interrupt(edev->irq);
} else {
pci_err(rpdev, "AER device not found\n");
ret = -ENODEV;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 0dcd44308228..2378ed692534 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -273,7 +273,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
}
if (consistent)
return;
- pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n");
+ pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
}
/* Configure downstream component, all functions */
@@ -747,9 +747,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
/* Enable what we need to enable */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index e06f42f58d3d..762170423fdd 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -17,13 +17,6 @@
#include "portdrv.h"
#include "../pci.h"
-struct dpc_dev {
- struct pcie_device *dev;
- u16 cap_pos;
- bool rp_extensions;
- u8 rp_log_size;
-};
-
static const char * const rp_pio_error_string[] = {
"Configuration Request received UR Completion", /* Bit Position 0 */
"Configuration Request received CA Completion", /* Bit Position 1 */
@@ -46,63 +39,42 @@ static const char * const rp_pio_error_string[] = {
"Memory Request Completion Timeout", /* Bit Position 18 */
};
-static struct dpc_dev *to_dpc_dev(struct pci_dev *dev)
-{
- struct device *device;
-
- device = pcie_port_find_device(dev, PCIE_PORT_SERVICE_DPC);
- if (!device)
- return NULL;
- return get_service_data(to_pcie_device(device));
-}
-
void pci_save_dpc_state(struct pci_dev *dev)
{
- struct dpc_dev *dpc;
struct pci_cap_saved_state *save_state;
u16 *cap;
if (!pci_is_pcie(dev))
return;
- dpc = to_dpc_dev(dev);
- if (!dpc)
- return;
-
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
if (!save_state)
return;
cap = (u16 *)&save_state->cap.data[0];
- pci_read_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, cap);
+ pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
}
void pci_restore_dpc_state(struct pci_dev *dev)
{
- struct dpc_dev *dpc;
struct pci_cap_saved_state *save_state;
u16 *cap;
if (!pci_is_pcie(dev))
return;
- dpc = to_dpc_dev(dev);
- if (!dpc)
- return;
-
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
if (!save_state)
return;
cap = (u16 *)&save_state->cap.data[0];
- pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap);
+ pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
}
-static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
+static int dpc_wait_rp_inactive(struct pci_dev *pdev)
{
unsigned long timeout = jiffies + HZ;
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, status;
+ u16 cap = pdev->dpc_cap, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
while (status & PCI_EXP_DPC_RP_BUSY &&
@@ -117,17 +89,15 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
return 0;
}
-static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
+pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
- struct dpc_dev *dpc;
u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
* already been reset by the time we get here.
*/
- dpc = to_dpc_dev(pdev);
- cap = dpc->cap_pos;
+ cap = pdev->dpc_cap;
/*
* Wait until the Link is inactive, then clear DPC Trigger Status
@@ -135,7 +105,7 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
*/
pcie_wait_for_link(pdev, false);
- if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
+ if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
return PCI_ERS_RESULT_DISCONNECT;
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
@@ -147,10 +117,9 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
+static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, dpc_status, first_error;
+ u16 cap = pdev->dpc_cap, dpc_status, first_error;
u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
int i;
@@ -175,7 +144,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
first_error == i ? " (First)" : "");
}
- if (dpc->rp_log_size < 4)
+ if (pdev->dpc_rp_log_size < 4)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
&dw0);
@@ -188,12 +157,12 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
dw0, dw1, dw2, dw3);
- if (dpc->rp_log_size < 5)
+ if (pdev->dpc_rp_log_size < 5)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
- for (i = 0; i < dpc->rp_log_size - 5; i++) {
+ for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
@@ -224,12 +193,10 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
return 1;
}
-static irqreturn_t dpc_handler(int irq, void *context)
+void dpc_process_error(struct pci_dev *pdev)
{
+ u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
struct aer_err_info info;
- struct dpc_dev *dpc = context;
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
@@ -248,27 +215,33 @@ static irqreturn_t dpc_handler(int irq, void *context)
"reserved error");
/* show RP PIO error detail information */
- if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
- dpc_process_rp_pio_error(dpc);
+ if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
+ dpc_process_rp_pio_error(pdev);
else if (reason == 0 &&
dpc_get_aer_uncorrect_severity(pdev, &info) &&
aer_get_device_error_info(pdev, &info)) {
aer_print_error(pdev, &info);
- pci_cleanup_aer_uncorrect_error_status(pdev);
+ pci_aer_clear_nonfatal_status(pdev);
pci_aer_clear_fatal_status(pdev);
}
+}
+
+static irqreturn_t dpc_handler(int irq, void *context)
+{
+ struct pci_dev *pdev = context;
+
+ dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_recovery(pdev, pci_channel_io_frozen, PCIE_PORT_SERVICE_DPC);
+ pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
return IRQ_HANDLED;
}
static irqreturn_t dpc_irq(int irq, void *context)
{
- struct dpc_dev *dpc = (struct dpc_dev *)context;
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, status;
+ struct pci_dev *pdev = context;
+ u16 cap = pdev->dpc_cap, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
@@ -282,10 +255,30 @@ static irqreturn_t dpc_irq(int irq, void *context)
return IRQ_HANDLED;
}
+void pci_dpc_init(struct pci_dev *pdev)
+{
+ u16 cap;
+
+ pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
+ if (!pdev->dpc_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
+ if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
+ return;
+
+ pdev->dpc_rp_extensions = true;
+ pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
+ if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
+ pdev->dpc_rp_log_size);
+ pdev->dpc_rp_log_size = 0;
+ }
+}
+
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
static int dpc_probe(struct pcie_device *dev)
{
- struct dpc_dev *dpc;
struct pci_dev *pdev = dev->port;
struct device *device = &dev->device;
int status;
@@ -294,43 +287,25 @@ static int dpc_probe(struct pcie_device *dev)
if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
- dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL);
- if (!dpc)
- return -ENOMEM;
-
- dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
- dpc->dev = dev;
- set_service_data(dev, dpc);
-
status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
dpc_handler, IRQF_SHARED,
- "pcie-dpc", dpc);
+ "pcie-dpc", pdev);
if (status) {
pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
status);
return status;
}
- pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
- pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
-
- dpc->rp_extensions = (cap & PCI_EXP_DPC_CAP_RP_EXT);
- if (dpc->rp_extensions) {
- dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
- if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) {
- pci_err(pdev, "RP PIO log size %u is invalid\n",
- dpc->rp_log_size);
- dpc->rp_log_size = 0;
- }
- }
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
- pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
- FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
+ FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
@@ -339,13 +314,12 @@ static int dpc_probe(struct pcie_device *dev)
static void dpc_remove(struct pcie_device *dev)
{
- struct dpc_dev *dpc = get_service_data(dev);
struct pci_dev *pdev = dev->port;
u16 ctl;
- pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
- pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
}
static struct pcie_port_service_driver dpcdriver = {
@@ -354,7 +328,6 @@ static struct pcie_port_service_driver dpcdriver = {
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
- .reset_link = dpc_reset_link,
};
int __init pcie_dpc_init(void)
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
new file mode 100644
index 000000000000..594622a6cb16
--- /dev/null
+++ b/drivers/pci/pcie/edr.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Error Disconnect Recover support
+ * Author: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+ *
+ * Copyright (C) 2020 Intel Corp.
+ */
+
+#define dev_fmt(fmt) "EDR: " fmt
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+
+#include "portdrv.h"
+#include "../pci.h"
+
+#define EDR_PORT_DPC_ENABLE_DSM 0x0C
+#define EDR_PORT_LOCATE_DSM 0x0D
+#define EDR_OST_SUCCESS 0x80
+#define EDR_OST_FAILED 0x81
+
+/*
+ * _DSM wrapper function to enable/disable DPC
+ * @pdev : PCI device structure
+ *
+ * returns 0 on success or errno on failure.
+ */
+static int acpi_enable_dpc(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ union acpi_object *obj, argv4, req;
+ int status = 0;
+
+ /*
+ * Behavior when calling unsupported _DSM functions is undefined,
+ * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ */
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ 1ULL << EDR_PORT_DPC_ENABLE_DSM))
+ return 0;
+
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = 1;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &req;
+
+ /*
+ * Per Downstream Port Containment Related Enhancements ECN to PCI
+ * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+ * optional. Return success if it's not implemented.
+ */
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ EDR_PORT_DPC_ENABLE_DSM, &argv4);
+ if (!obj)
+ return 0;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ pci_err(pdev, FW_BUG "Enable DPC _DSM returned non integer\n");
+ status = -EIO;
+ }
+
+ if (obj->integer.value != 1) {
+ pci_err(pdev, "Enable DPC _DSM failed to enable DPC\n");
+ status = -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return status;
+}
+
+/*
+ * _DSM wrapper function to locate DPC port
+ * @pdev : Device which received EDR event
+ *
+ * Returns pci_dev or NULL. Caller is responsible for dropping a reference
+ * on the returned pci_dev with pci_dev_put().
+ */
+static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ union acpi_object *obj;
+ u16 port;
+
+ /*
+ * Behavior when calling unsupported _DSM functions is undefined,
+ * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ */
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ 1ULL << EDR_PORT_LOCATE_DSM))
+ return pci_dev_get(pdev);
+
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ EDR_PORT_LOCATE_DSM, NULL);
+ if (!obj)
+ return pci_dev_get(pdev);
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ ACPI_FREE(obj);
+ pci_err(pdev, FW_BUG "Locate Port _DSM returned non integer\n");
+ return NULL;
+ }
+
+ /*
+ * Firmware returns DPC port BDF details in following format:
+ * 15:8 = bus
+ * 7:3 = device
+ * 2:0 = function
+ */
+ port = obj->integer.value;
+
+ ACPI_FREE(obj);
+
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ PCI_BUS_NUM(port), port & 0xff);
+}
+
+/*
+ * _OST wrapper function to let firmware know the status of EDR event
+ * @pdev : Device used to send _OST
+ * @edev : Device which experienced EDR event
+ * @status : Status of EDR event
+ */
+static int acpi_send_edr_status(struct pci_dev *pdev, struct pci_dev *edev,
+ u16 status)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ u32 ost_status;
+
+ pci_dbg(pdev, "Status for %s: %#x\n", pci_name(edev), status);
+
+ ost_status = PCI_DEVID(edev->bus->number, edev->devfn) << 16;
+ ost_status |= status;
+
+ status = acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_DISCONNECT_RECOVER,
+ ost_status, NULL);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void edr_handle_event(acpi_handle handle, u32 event, void *data)
+{
+ struct pci_dev *pdev = data, *edev;
+ pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
+ u16 status;
+
+ pci_info(pdev, "ACPI event %#x received\n", event);
+
+ if (event != ACPI_NOTIFY_DISCONNECT_RECOVER)
+ return;
+
+ /* Locate the port which issued EDR event */
+ edev = acpi_dpc_port_get(pdev);
+ if (!edev) {
+ pci_err(pdev, "Firmware failed to locate DPC port\n");
+ return;
+ }
+
+ pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(edev));
+
+ /* If port does not support DPC, just send the OST */
+ if (!edev->dpc_cap) {
+ pci_err(edev, FW_BUG "This device doesn't support DPC\n");
+ goto send_ost;
+ }
+
+ /* Check if there is a valid DPC trigger */
+ pci_read_config_word(edev, edev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
+ if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
+ pci_err(edev, "Invalid DPC trigger %#010x\n", status);
+ goto send_ost;
+ }
+
+ dpc_process_error(edev);
+ pci_aer_raw_clear_status(edev);
+
+ /*
+ * Irrespective of whether the DPC event is triggered by ERR_FATAL
+ * or ERR_NONFATAL, since the link is already down, use the FATAL
+ * error recovery path for both cases.
+ */
+ estate = pcie_do_recovery(edev, pci_channel_io_frozen, dpc_reset_link);
+
+send_ost:
+
+ /*
+ * If recovery is successful, send _OST(0xF, BDF << 16 | 0x80)
+ * to firmware. If not successful, send _OST(0xF, BDF << 16 | 0x81).
+ */
+ if (estate == PCI_ERS_RESULT_RECOVERED) {
+ pci_dbg(edev, "DPC port successfully recovered\n");
+ acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
+ } else {
+ pci_dbg(edev, "DPC port recovery failed\n");
+ acpi_send_edr_status(pdev, edev, EDR_OST_FAILED);
+ }
+
+ pci_dev_put(edev);
+}
+
+void pci_acpi_add_edr_notifier(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ acpi_status status;
+
+ if (!adev) {
+ pci_dbg(pdev, "No valid ACPI node, skipping EDR init\n");
+ return;
+ }
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ edr_handle_event, pdev);
+ if (ACPI_FAILURE(status)) {
+ pci_err(pdev, "Failed to install notify handler\n");
+ return;
+ }
+
+ if (acpi_enable_dpc(pdev))
+ acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ edr_handle_event);
+ else
+ pci_dbg(pdev, "Notify handler installed\n");
+}
+
+void pci_acpi_remove_edr_notifier(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+
+ if (!adev)
+ return;
+
+ acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ edr_handle_event);
+ pci_dbg(pdev, "Notify handler removed\n");
+}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 01dfc8bb7ca0..14bb8f54723e 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -146,49 +146,9 @@ out:
return 0;
}
-/**
- * default_reset_link - default reset function
- * @dev: pointer to pci_dev data structure
- *
- * Invoked when performing link reset on a Downstream Port or a
- * Root Port with no aer driver.
- */
-static pci_ers_result_t default_reset_link(struct pci_dev *dev)
-{
- int rc;
-
- rc = pci_bus_error_reset(dev);
- pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
- return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
-{
- pci_ers_result_t status;
- struct pcie_port_service_driver *driver = NULL;
-
- driver = pcie_port_find_service(dev, service);
- if (driver && driver->reset_link) {
- status = driver->reset_link(dev);
- } else if (pcie_downstream_port(dev)) {
- status = default_reset_link(dev);
- } else {
- pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(dev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED) {
- pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(dev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return status;
-}
-
-void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
- u32 service)
+pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ enum pci_channel_state state,
+ pci_ers_result_t (*reset_link)(struct pci_dev *pdev))
{
pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
struct pci_bus *bus;
@@ -203,14 +163,16 @@ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
bus = dev->subordinate;
pci_dbg(dev, "broadcast error_detected message\n");
- if (state == pci_channel_io_frozen)
+ if (state == pci_channel_io_frozen) {
pci_walk_bus(bus, report_frozen_detected, &status);
- else
+ status = reset_link(dev);
+ if (status != PCI_ERS_RESULT_RECOVERED) {
+ pci_warn(dev, "link reset failed\n");
+ goto failed;
+ }
+ } else {
pci_walk_bus(bus, report_normal_detected, &status);
-
- if (state == pci_channel_io_frozen &&
- reset_link(dev, service) != PCI_ERS_RESULT_RECOVERED)
- goto failed;
+ }
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
@@ -236,13 +198,15 @@ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
pci_walk_bus(bus, report_resume, &status);
pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
+ pci_aer_clear_nonfatal_status(dev);
pci_info(dev, "device recovery successful\n");
- return;
+ return status;
failed:
pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
pci_info(dev, "device recovery failed\n");
+
+ return status;
}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 1e673619b101..64b5e081cdb2 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -92,9 +92,6 @@ struct pcie_port_service_driver {
/* Device driver may resume normal operations */
void (*error_resume)(struct pci_dev *dev);
- /* Link Reset Capability - AER service driver specific */
- pci_ers_result_t (*reset_link)(struct pci_dev *dev);
-
int port_type; /* Type of the port this driver can handle */
u32 service; /* Port service this device represents */
@@ -161,7 +158,5 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
}
#endif
-struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
- u32 service);
struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 5075cb9e850c..50a9522ab07d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -459,27 +459,6 @@ static int find_service_iter(struct device *device, void *data)
}
/**
- * pcie_port_find_service - find the service driver
- * @dev: PCI Express port the service is associated with
- * @service: Service to find
- *
- * Find PCI Express port service driver associated with given service
- */
-struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
- u32 service)
-{
- struct pcie_port_service_driver *drv;
- struct portdrv_service_data pdrvs;
-
- pdrvs.drv = NULL;
- pdrvs.service = service;
- device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
-
- drv = pdrvs.drv;
- return drv;
-}
-
-/**
* pcie_port_find_device - find the struct device
* @dev: PCI Express port the service is associated with
* @service: For the service to find
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 512cb4312ddd..77b8a145c39b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -598,6 +598,7 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
bridge->native_ltr = 1;
+ bridge->native_dpc = 1;
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -640,6 +641,7 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge)
}
EXPORT_SYMBOL(pci_free_host_bridge);
+/* Indexed by PCI_X_SSTATUS_FREQ (secondary bus mode and frequency) */
static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
@@ -659,6 +661,7 @@ static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_133MHz_PCIX_533 /* F */
};
+/* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */
const unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCIE_SPEED_2_5GT, /* 1 */
@@ -677,6 +680,44 @@ const unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* E */
PCI_SPEED_UNKNOWN /* F */
};
+EXPORT_SYMBOL_GPL(pcie_link_speed);
+
+const char *pci_speed_string(enum pci_bus_speed speed)
+{
+ /* Indexed by the pci_bus_speed enum */
+ static const char *speed_strings[] = {
+ "33 MHz PCI", /* 0x00 */
+ "66 MHz PCI", /* 0x01 */
+ "66 MHz PCI-X", /* 0x02 */
+ "100 MHz PCI-X", /* 0x03 */
+ "133 MHz PCI-X", /* 0x04 */
+ NULL, /* 0x05 */
+ NULL, /* 0x06 */
+ NULL, /* 0x07 */
+ NULL, /* 0x08 */
+ "66 MHz PCI-X 266", /* 0x09 */
+ "100 MHz PCI-X 266", /* 0x0a */
+ "133 MHz PCI-X 266", /* 0x0b */
+ "Unknown AGP", /* 0x0c */
+ "1x AGP", /* 0x0d */
+ "2x AGP", /* 0x0e */
+ "4x AGP", /* 0x0f */
+ "8x AGP", /* 0x10 */
+ "66 MHz PCI-X 533", /* 0x11 */
+ "100 MHz PCI-X 533", /* 0x12 */
+ "133 MHz PCI-X 533", /* 0x13 */
+ "2.5 GT/s PCIe", /* 0x14 */
+ "5.0 GT/s PCIe", /* 0x15 */
+ "8.0 GT/s PCIe", /* 0x16 */
+ "16.0 GT/s PCIe", /* 0x17 */
+ "32.0 GT/s PCIe", /* 0x18 */
+ };
+
+ if (speed < ARRAY_SIZE(speed_strings))
+ return speed_strings[speed];
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(pci_speed_string);
void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
{
@@ -2329,6 +2370,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */
pci_ptm_init(dev); /* Precision Time Measurement */
pci_aer_init(dev); /* Advanced Error Reporting */
+ pci_dpc_init(dev); /* Downstream Port Containment */
pcie_report_downtraining(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 29f473ebf20f..ca9ed5774eb1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1970,26 +1970,92 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk
/*
* IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
* 300641-004US, section 5.7.3.
+ *
+ * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003.
+ * Core IO on Xeon E5 v2, see Intel order no 329188-003.
+ * Core IO on Xeon E7 v2, see Intel order no 329595-002.
+ * Core IO on Xeon E5 v3, see Intel order no 330784-003.
+ * Core IO on Xeon E7 v3, see Intel order no 332315-001US.
+ * Core IO on Xeon E5 v4, see Intel order no 333810-002US.
+ * Core IO on Xeon E7 v4, see Intel order no 332315-001US.
+ * Core IO on Xeon D-1500, see Intel order no 332051-001.
+ * Core IO on Xeon Scalable, see Intel order no 610950.
*/
-#define INTEL_6300_IOAPIC_ABAR 0x40
+#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */
#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
+#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */
+#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
+
static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
+ u32 pci_config_dword;
if (noioapicquirk)
return;
- pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
- pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
- pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
-
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_ESB_10:
+ pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ &pci_config_word);
+ pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
+ pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ pci_config_word);
+ break;
+ case 0x3c28: /* Xeon E5 1600/2600/4600 */
+ case 0x0e28: /* Xeon E5/E7 V2 */
+ case 0x2f28: /* Xeon E5/E7 V3,V4 */
+ case 0x6f28: /* Xeon D-1500 */
+ case 0x2034: /* Xeon Scalable Family */
+ pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ &pci_config_dword);
+ pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
+ pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ pci_config_dword);
+ break;
+ default:
+ return;
+ }
pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+/*
+ * Device 29 Func 5 Device IDs of IO-APIC
+ * containing ABAR—APIC1 Alternate Base Address Register
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+
+/*
+ * Device 5 Func 0 Device IDs of Core IO modules/hubs
+ * containing Coherent Interface Protocol Interrupt Control
+ *
+ * Device IDs obtained from volume 2 datasheets of commented
+ * families above.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
/* Disable boot interrupts on HT-1000 */
#define BC_HT1000_FEATURE_REG 0x64
@@ -4400,6 +4466,29 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
}
/*
+ * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability.
+ * But the implementation could block peer-to-peer transactions between them
+ * and provide ACS-like functionality.
+ */
+static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ if (!pci_is_pcie(dev) ||
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENOTTY;
+
+ switch (dev->device) {
+ case 0x0710 ... 0x071e:
+ case 0x0721:
+ case 0x0723 ... 0x0732:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+
+ return false;
+}
+
+/*
* Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
@@ -4701,6 +4790,12 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
+ /* Zhaoxin multi-function devices */
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
+ /* Zhaoxin Root/Downstream Ports */
+ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
{ 0 }
};
@@ -5461,3 +5556,21 @@ out_disable:
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
PCI_CLASS_DISPLAY_VGA, 8,
quirk_reset_lenovo_thinkpad_p50_nvgpu);
+
+/*
+ * Device [1b21:2142]
+ * When in D0, PME# doesn't get asserted when plugging USB 3.0 device.
+ */
+static void pci_fixup_no_d0_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# does not work under D0, disabling it\n");
+ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
+
+static void apex_pci_fixup_class(struct pci_dev *pdev)
+{
+ pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
+ PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 137bf0cee897..8fc9a4e911e3 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
pci_disable_rom(pdev);
}
EXPORT_SYMBOL(pci_unmap_rom);
-
-/**
- * pci_platform_rom - provides a pointer to any ROM image provided by the
- * platform
- * @pdev: pointer to pci device struct
- * @size: pointer to receive size of pci window over ROM
- */
-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
-{
- if (pdev->rom && pdev->romlen) {
- *size = pdev->romlen;
- return phys_to_virt((phys_addr_t)pdev->rom);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f2461bf9243d..bbcef1a053ab 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -846,7 +846,7 @@ static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
* Per spec, I/O windows are 4K-aligned, but some bridges have
* an extension to support 1K alignment.
*/
- if (bus->self->io_window_1k)
+ if (bus->self && bus->self->io_window_1k)
align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
else
align = PCI_P2P_DEFAULT_IO_ALIGN;
@@ -920,7 +920,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
calculate_iosize(size, min_size, size1, add_size, children_add_size,
resource_size(b_res), min_align);
if (!size0 && !size1) {
- if (b_res->start || b_res->end)
+ if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
b_res->flags = 0;
@@ -930,7 +930,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
b_res->start = min_align;
b_res->end = b_res->start + size0 - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
- if (size1 > size0 && realloc_head) {
+ if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
@@ -1073,7 +1073,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
calculate_memsize(size, min_size, add_size, children_add_size,
resource_size(b_res), add_align);
if (!size0 && !size1) {
- if (b_res->start || b_res->end)
+ if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
b_res->flags = 0;
@@ -1082,7 +1082,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->start = min_align;
b_res->end = size0 + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
- if (size1 > size0 && realloc_head) {
+ if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
@@ -1196,8 +1196,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
unsigned long mask, prefmask, type2 = 0, type3 = 0;
resource_size_t additional_io_size = 0, additional_mmio_size = 0,
additional_mmio_pref_size = 0;
- struct resource *b_res;
- int ret;
+ struct resource *pref;
+ struct pci_host_bridge *host;
+ int hdr_type, i, ret;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -1217,10 +1218,20 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
}
/* The root bus? */
- if (pci_is_root_bus(bus))
- return;
+ if (pci_is_root_bus(bus)) {
+ host = to_pci_host_bridge(bus->bridge);
+ if (!host->size_windows)
+ return;
+ pci_bus_for_each_resource(bus, pref, i)
+ if (pref && (pref->flags & IORESOURCE_PREFETCH))
+ break;
+ hdr_type = -1; /* Intentionally invalid - not a PCI device. */
+ } else {
+ pref = &bus->self->resource[PCI_BRIDGE_RESOURCES + 2];
+ hdr_type = bus->self->hdr_type;
+ }
- switch (bus->self->hdr_type) {
+ switch (hdr_type) {
case PCI_HEADER_TYPE_CARDBUS:
/* Don't size CardBuses yet */
break;
@@ -1242,10 +1253,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
* the size required to put all 64-bit prefetchable
* resources in it.
*/
- b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES];
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (b_res[2].flags & IORESOURCE_MEM_64) {
+ if (pref && (pref->flags & IORESOURCE_MEM_64)) {
prefmask |= IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
prefmask, prefmask,
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index ae4aa0e1f2f4..cc386ef2fa12 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,45 +49,9 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
slot->number);
}
-/* these strings match up with the values in pci_bus_speed */
-static const char *pci_bus_speed_strings[] = {
- "33 MHz PCI", /* 0x00 */
- "66 MHz PCI", /* 0x01 */
- "66 MHz PCI-X", /* 0x02 */
- "100 MHz PCI-X", /* 0x03 */
- "133 MHz PCI-X", /* 0x04 */
- NULL, /* 0x05 */
- NULL, /* 0x06 */
- NULL, /* 0x07 */
- NULL, /* 0x08 */
- "66 MHz PCI-X 266", /* 0x09 */
- "100 MHz PCI-X 266", /* 0x0a */
- "133 MHz PCI-X 266", /* 0x0b */
- "Unknown AGP", /* 0x0c */
- "1x AGP", /* 0x0d */
- "2x AGP", /* 0x0e */
- "4x AGP", /* 0x0f */
- "8x AGP", /* 0x10 */
- "66 MHz PCI-X 533", /* 0x11 */
- "100 MHz PCI-X 533", /* 0x12 */
- "133 MHz PCI-X 533", /* 0x13 */
- "2.5 GT/s PCIe", /* 0x14 */
- "5.0 GT/s PCIe", /* 0x15 */
- "8.0 GT/s PCIe", /* 0x16 */
- "16.0 GT/s PCIe", /* 0x17 */
- "32.0 GT/s PCIe", /* 0x18 */
-};
-
static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
{
- const char *speed_string;
-
- if (speed < ARRAY_SIZE(pci_bus_speed_strings))
- speed_string = pci_bus_speed_strings[speed];
- else
- speed_string = "Unknown";
-
- return sprintf(buf, "%s\n", speed_string);
+ return sprintf(buf, "%s\n", pci_speed_string(speed));
}
static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index a823b4b8ef8a..e69cac84b605 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -52,10 +52,11 @@ struct switchtec_user {
enum mrpc_state state;
- struct completion comp;
+ wait_queue_head_t cmd_comp;
struct kref kref;
struct list_head list;
+ bool cmd_done;
u32 cmd;
u32 status;
u32 return_code;
@@ -77,7 +78,7 @@ static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
stuser->stdev = stdev;
kref_init(&stuser->kref);
INIT_LIST_HEAD(&stuser->list);
- init_completion(&stuser->comp);
+ init_waitqueue_head(&stuser->cmd_comp);
stuser->event_cnt = atomic_read(&stdev->event_cnt);
dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
@@ -175,7 +176,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
- init_completion(&stuser->comp);
+ stuser->cmd_done = false;
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
@@ -222,7 +223,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
stuser->read_len);
out:
- complete_all(&stuser->comp);
+ stuser->cmd_done = true;
+ wake_up_interruptible(&stuser->cmd_comp);
list_del_init(&stuser->list);
stuser_put(stuser);
stdev->mrpc_busy = 0;
@@ -529,10 +531,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
mutex_unlock(&stdev->mrpc_mutex);
if (filp->f_flags & O_NONBLOCK) {
- if (!try_wait_for_completion(&stuser->comp))
+ if (!stuser->cmd_done)
return -EAGAIN;
} else {
- rc = wait_for_completion_interruptible(&stuser->comp);
+ rc = wait_event_interruptible(stuser->cmd_comp,
+ stuser->cmd_done);
if (rc < 0)
return rc;
}
@@ -580,7 +583,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
struct switchtec_dev *stdev = stuser->stdev;
__poll_t ret = 0;
- poll_wait(filp, &stuser->comp.wait, wait);
+ poll_wait(filp, &stuser->cmd_comp, wait);
poll_wait(filp, &stdev->event_wq, wait);
if (lock_mutex_and_test_alive(stdev))
@@ -588,7 +591,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
mutex_unlock(&stdev->mrpc_mutex);
- if (try_wait_for_completion(&stuser->comp))
+ if (stuser->cmd_done)
ret |= EPOLLIN | EPOLLRDNORM;
if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
@@ -1272,7 +1275,8 @@ static void stdev_kill(struct switchtec_dev *stdev)
/* Wake up and kill any users waiting on an MRPC request */
list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
- complete_all(&stuser->comp);
+ stuser->cmd_done = true;
+ wake_up_interruptible(&stuser->cmd_comp);
list_del_init(&stuser->list);
stuser_put(stuser);
}
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 33c9b6ea7364..fb9b17fa0fb5 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -40,7 +40,7 @@ struct cis_cache_entry {
unsigned int addr;
unsigned int len;
unsigned int attr;
- unsigned char cache[0];
+ unsigned char cache[];
};
struct pccard_resource_ops {
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 0a04eb04f3a2..d3ef5534991e 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -329,7 +329,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
static struct platform_driver omap_cf_driver = {
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
},
.remove = __exit_p(omap_cf_remove),
};
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 9e6922c08ef6..3b05760e69d6 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -1076,7 +1076,7 @@ static ssize_t show_io_db(struct device *dev,
for (p = data->io_db.next; p != &data->io_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1133,7 +1133,7 @@ static ssize_t show_mem_db(struct device *dev,
p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1142,7 +1142,7 @@ static ssize_t show_mem_db(struct device *dev,
for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index e2e8729afd9d..784ada5b8c4f 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -14,7 +14,7 @@
#include <asm/mach-types.h>
#include <mach/simpad.h>
#include "sa1100_generic.h"
-
+
static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
@@ -66,7 +66,7 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
break;
- case 33:
+ case 33:
simpad_clear_cs3_bit(VCC_3V_EN|EN1);
simpad_set_cs3_bit(VCC_5V_EN|EN0);
break;
@@ -95,7 +95,7 @@ static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
simpad_set_cs3_bit(PCMCIA_RESET);
}
-static struct pcmcia_low_level simpad_pcmcia_ops = {
+static struct pcmcia_low_level simpad_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = simpad_pcmcia_hw_init,
.hw_shutdown = simpad_pcmcia_hw_shutdown,
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index b7f993f1bbd0..222e81c79365 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -88,7 +88,7 @@ struct soc_pcmcia_socket {
struct skt_dev_info {
int nskt;
- struct soc_pcmcia_socket skt[0];
+ struct soc_pcmcia_socket skt[];
};
struct pcmcia_state {
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 49b1c6a1bdbe..bf6529b0b5b0 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -180,12 +180,12 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
for (i = 0; i < 0x24; i += 4) {
unsigned val;
if (!(i & 15))
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
val = cb_readl(socket, i);
- offset += snprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
}
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
for (i = 0; i < 0x45; i++) {
unsigned char val;
if (!(i & 7)) {
@@ -193,10 +193,10 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
memcpy(buf + offset, " -", 2);
offset += 2;
} else
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
}
val = exca_readb(socket, i);
- offset += snprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
}
buf[offset++] = '\n';
return offset;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index fea354d6fb29..d50edef91f59 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -328,15 +328,15 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
struct arm_ccn_pmu_event, attr);
ssize_t res;
- res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+ res = scnprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
if (event->event)
- res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
+ res += scnprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
event->event);
if (event->def)
- res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
+ res += scnprintf(buf + res, PAGE_SIZE - res, ",%s",
event->def);
if (event->mask)
- res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
+ res += scnprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
event->mask);
/* Arguments required by an event */
@@ -344,25 +344,25 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
case CCN_TYPE_CYCLES:
break;
case CCN_TYPE_XP:
- res += snprintf(buf + res, PAGE_SIZE - res,
+ res += scnprintf(buf + res, PAGE_SIZE - res,
",xp=?,vc=?");
if (event->event == CCN_EVENT_WATCHPOINT)
- res += snprintf(buf + res, PAGE_SIZE - res,
+ res += scnprintf(buf + res, PAGE_SIZE - res,
",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
else
- res += snprintf(buf + res, PAGE_SIZE - res,
+ res += scnprintf(buf + res, PAGE_SIZE - res,
",bus=?");
break;
case CCN_TYPE_MN:
- res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
+ res += scnprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
break;
default:
- res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
+ res += scnprintf(buf + res, PAGE_SIZE - res, ",node=?");
break;
}
- res += snprintf(buf + res, PAGE_SIZE - res, "\n");
+ res += scnprintf(buf + res, PAGE_SIZE - res, "\n");
return res;
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 4e4984a55cd1..b72c04852599 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -831,7 +831,7 @@ static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
* parts and give userspace a fighting chance of getting some
* useful data out of it.
*/
- if (!nr_pages || (snapshot && (nr_pages & 1)))
+ if (snapshot && (nr_pages & 1))
return NULL;
if (cpu == -1)
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index af774ac2b934..71801e30d601 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -59,3 +59,25 @@ config PHY_MESON_G12A_USB3_PCIE
Enable this to support the Meson USB3 + PCIE Combo PHY found
in Meson G12A SoCs.
If unsure, say N.
+
+config PHY_MESON_AXG_PCIE
+ tristate "Meson AXG PCIE PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Enable this to support the Meson MIPI + PCIE PHY found
+ in Meson AXG SoCs.
+ If unsure, say N.
+
+config PHY_MESON_AXG_MIPI_PCIE_ANALOG
+ tristate "Meson AXG MIPI + PCIE analog PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Enable this to support the Meson MIPI + PCIE analog PHY
+ found in Meson AXG SoCs.
+ If unsure, say N.
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index 11d1c42ac2be..e2baa133f7af 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
-obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
-obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
-obj-$(CONFIG_PHY_MESON_GXL_USB3) += phy-meson-gxl-usb3.o
-obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
+obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
+obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
+obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
+obj-$(CONFIG_PHY_MESON_GXL_USB3) += phy-meson-gxl-usb3.o
+obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
+obj-$(CONFIG_PHY_MESON_AXG_PCIE) += phy-meson-axg-pcie.o
+obj-$(CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG) += phy-meson-axg-mipi-pcie-analog.o
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
new file mode 100644
index 000000000000..1431cbf885e1
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amlogic AXG MIPI + PCIE analog PHY driver
+ *
+ * Copyright (C) 2019 Remi Pommarel <repk@triplefau.lt>
+ */
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
+
+#define HHI_MIPI_CNTL0 0x00
+#define HHI_MIPI_CNTL0_COMMON_BLOCK GENMASK(31, 28)
+#define HHI_MIPI_CNTL0_ENABLE BIT(29)
+#define HHI_MIPI_CNTL0_BANDGAP BIT(26)
+#define HHI_MIPI_CNTL0_DECODE_TO_RTERM GENMASK(15, 12)
+#define HHI_MIPI_CNTL0_OUTPUT_EN BIT(3)
+
+#define HHI_MIPI_CNTL1 0x01
+#define HHI_MIPI_CNTL1_CH0_CML_PDR_EN BIT(12)
+#define HHI_MIPI_CNTL1_LP_ABILITY GENMASK(5, 4)
+#define HHI_MIPI_CNTL1_LP_RESISTER BIT(3)
+#define HHI_MIPI_CNTL1_INPUT_SETTING BIT(2)
+#define HHI_MIPI_CNTL1_INPUT_SEL BIT(1)
+#define HHI_MIPI_CNTL1_PRBS7_EN BIT(0)
+
+#define HHI_MIPI_CNTL2 0x02
+#define HHI_MIPI_CNTL2_CH_PU GENMASK(31, 25)
+#define HHI_MIPI_CNTL2_CH_CTL GENMASK(24, 19)
+#define HHI_MIPI_CNTL2_CH0_DIGDR_EN BIT(18)
+#define HHI_MIPI_CNTL2_CH_DIGDR_EN BIT(17)
+#define HHI_MIPI_CNTL2_LPULPS_EN BIT(16)
+#define HHI_MIPI_CNTL2_CH_EN(n) BIT(15 - (n))
+#define HHI_MIPI_CNTL2_CH0_LP_CTL GENMASK(10, 1)
+
+struct phy_axg_mipi_pcie_analog_priv {
+ struct phy *phy;
+ unsigned int mode;
+ struct regmap *regmap;
+};
+
+static const struct regmap_config phy_axg_mipi_pcie_analog_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = HHI_MIPI_CNTL2,
+};
+
+static int phy_axg_mipi_pcie_analog_power_on(struct phy *phy)
+{
+ struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+
+ /* MIPI not supported yet */
+ if (priv->mode != PHY_TYPE_PCIE)
+ return -EINVAL;
+
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_BANDGAP, HHI_MIPI_CNTL0_BANDGAP);
+
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_ENABLE, HHI_MIPI_CNTL0_ENABLE);
+ return 0;
+}
+
+static int phy_axg_mipi_pcie_analog_power_off(struct phy *phy)
+{
+ struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+
+ /* MIPI not supported yet */
+ if (priv->mode != PHY_TYPE_PCIE)
+ return -EINVAL;
+
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_BANDGAP, 0);
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_ENABLE, 0);
+ return 0;
+}
+
+static int phy_axg_mipi_pcie_analog_init(struct phy *phy)
+{
+ return 0;
+}
+
+static int phy_axg_mipi_pcie_analog_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static const struct phy_ops phy_axg_mipi_pcie_analog_ops = {
+ .init = phy_axg_mipi_pcie_analog_init,
+ .exit = phy_axg_mipi_pcie_analog_exit,
+ .power_on = phy_axg_mipi_pcie_analog_power_on,
+ .power_off = phy_axg_mipi_pcie_analog_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *phy_axg_mipi_pcie_analog_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct phy_axg_mipi_pcie_analog_priv *priv = dev_get_drvdata(dev);
+ unsigned int mode;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mode = args->args[0];
+
+ /* MIPI mode is not supported yet */
+ if (mode != PHY_TYPE_PCIE) {
+ dev_err(dev, "invalid phy mode select argument\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv->mode = mode;
+ return priv->phy;
+}
+
+static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_axg_mipi_pcie_analog_priv *priv;
+ struct device_node *np = dev->of_node;
+ struct regmap *map;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to get regmap base\n");
+ return PTR_ERR(base);
+ }
+
+ map = devm_regmap_init_mmio(dev, base,
+ &phy_axg_mipi_pcie_analog_regmap_conf);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
+ }
+ priv->regmap = map;
+
+ priv->phy = devm_phy_create(dev, np, &phy_axg_mipi_pcie_analog_ops);
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to create PHY\n");
+ return ret;
+ }
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+
+ phy = devm_of_phy_provider_register(dev,
+ phy_axg_mipi_pcie_analog_xlate);
+
+ return PTR_ERR_OR_ZERO(phy);
+}
+
+static const struct of_device_id phy_axg_mipi_pcie_analog_of_match[] = {
+ {
+ .compatible = "amlogic,axg-mipi-pcie-analog-phy",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_axg_mipi_pcie_analog_of_match);
+
+static struct platform_driver phy_axg_mipi_pcie_analog_driver = {
+ .probe = phy_axg_mipi_pcie_analog_probe,
+ .driver = {
+ .name = "phy-axg-mipi-pcie-analog",
+ .of_match_table = phy_axg_mipi_pcie_analog_of_match,
+ },
+};
+module_platform_driver(phy_axg_mipi_pcie_analog_driver);
+
+MODULE_AUTHOR("Remi Pommarel <repk@triplefau.lt>");
+MODULE_DESCRIPTION("Amlogic AXG MIPI + PCIE analog PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c
new file mode 100644
index 000000000000..377ed0dcd0d9
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amlogic AXG PCIE PHY driver
+ *
+ * Copyright (C) 2020 Remi Pommarel <repk@triplefau.lt>
+ */
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/bitfield.h>
+#include <dt-bindings/phy/phy.h>
+
+#define MESON_PCIE_REG0 0x00
+#define MESON_PCIE_COMMON_CLK BIT(4)
+#define MESON_PCIE_PORT_SEL GENMASK(3, 2)
+#define MESON_PCIE_CLK BIT(1)
+#define MESON_PCIE_POWERDOWN BIT(0)
+
+#define MESON_PCIE_TWO_X1 FIELD_PREP(MESON_PCIE_PORT_SEL, 0x3)
+#define MESON_PCIE_COMMON_REF_CLK FIELD_PREP(MESON_PCIE_COMMON_CLK, 0x1)
+#define MESON_PCIE_PHY_INIT (MESON_PCIE_TWO_X1 | \
+ MESON_PCIE_COMMON_REF_CLK)
+#define MESON_PCIE_RESET_DELAY 500
+
+struct phy_axg_pcie_priv {
+ struct phy *phy;
+ struct phy *analog;
+ struct regmap *regmap;
+ struct reset_control *reset;
+};
+
+static const struct regmap_config phy_axg_pcie_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MESON_PCIE_REG0,
+};
+
+static int phy_axg_pcie_power_on(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_power_on(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ regmap_update_bits(priv->regmap, MESON_PCIE_REG0,
+ MESON_PCIE_POWERDOWN, 0);
+ return 0;
+}
+
+static int phy_axg_pcie_power_off(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_power_off(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ regmap_update_bits(priv->regmap, MESON_PCIE_REG0,
+ MESON_PCIE_POWERDOWN, 1);
+ return 0;
+}
+
+static int phy_axg_pcie_init(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_init(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ regmap_write(priv->regmap, MESON_PCIE_REG0, MESON_PCIE_PHY_INIT);
+ return reset_control_reset(priv->reset);
+}
+
+static int phy_axg_pcie_exit(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_exit(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ return reset_control_reset(priv->reset);
+}
+
+static int phy_axg_pcie_reset(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ ret = phy_reset(priv->analog);
+ if (ret != 0)
+ goto out;
+
+ ret = reset_control_assert(priv->reset);
+ if (ret != 0)
+ goto out;
+ udelay(MESON_PCIE_RESET_DELAY);
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret != 0)
+ goto out;
+ udelay(MESON_PCIE_RESET_DELAY);
+
+out:
+ return ret;
+}
+
+static const struct phy_ops phy_axg_pcie_ops = {
+ .init = phy_axg_pcie_init,
+ .exit = phy_axg_pcie_exit,
+ .power_on = phy_axg_pcie_power_on,
+ .power_off = phy_axg_pcie_power_off,
+ .reset = phy_axg_pcie_reset,
+ .owner = THIS_MODULE,
+};
+
+static int phy_axg_pcie_probe(struct platform_device *pdev)
+{
+ struct phy_provider *pphy;
+ struct device *dev = &pdev->dev;
+ struct phy_axg_pcie_priv *priv;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phy = devm_phy_create(dev, np, &phy_axg_pcie_ops);
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to create PHY\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, base,
+ &phy_axg_pcie_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->reset = devm_reset_control_array_get(dev, false, false);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+
+ priv->analog = devm_phy_get(dev, "analog");
+ if (IS_ERR(priv->analog))
+ return PTR_ERR(priv->analog);
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+ pphy = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(pphy);
+}
+
+static const struct of_device_id phy_axg_pcie_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pcie-phy",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_axg_pcie_of_match);
+
+static struct platform_driver phy_axg_pcie_driver = {
+ .probe = phy_axg_pcie_probe,
+ .driver = {
+ .name = "phy-axg-pcie",
+ .of_match_table = phy_axg_pcie_of_match,
+ },
+};
+module_platform_driver(phy_axg_pcie_driver);
+
+MODULE_AUTHOR("Remi Pommarel <repk@triplefau.lt>");
+MODULE_DESCRIPTION("Amlogic AXG PCIE PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
index 9065ffc85eb4..b26e30e1afaf 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -66,7 +66,7 @@
#define PHY_CTRL_R14 0x38
#define PHY_CTRL_R14_I_RDP_EN BIT(0)
#define PHY_CTRL_R14_I_RPU_SW1_EN BIT(1)
- #define PHY_CTRL_R14_I_RPU_SW2_EN GENMASK(2, 3)
+ #define PHY_CTRL_R14_I_RPU_SW2_EN GENMASK(3, 2)
#define PHY_CTRL_R14_PG_RSTN BIT(4)
#define PHY_CTRL_R14_I_C2L_DATA_16_8 BIT(5)
#define PHY_CTRL_R14_I_C2L_ASSERT_SINGLE_EN_ZERO BIT(6)
@@ -146,11 +146,17 @@
#define RESET_COMPLETE_TIME 1000
#define PLL_RESET_COMPLETE_TIME 100
+enum meson_soc_id {
+ MESON_SOC_G12A = 0,
+ MESON_SOC_A1,
+};
+
struct phy_meson_g12a_usb2_priv {
struct device *dev;
struct regmap *regmap;
struct clk *clk;
struct reset_control *reset;
+ int soc_id;
};
static const struct regmap_config phy_meson_g12a_usb2_regmap_conf = {
@@ -164,6 +170,7 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
{
struct phy_meson_g12a_usb2_priv *priv = phy_get_drvdata(phy);
int ret;
+ unsigned int value;
ret = reset_control_reset(priv->reset);
if (ret)
@@ -192,18 +199,22 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT2, 2) |
FIELD_PREP(PHY_CTRL_R17_MPLL_FILTER_PVT1, 9));
- regmap_write(priv->regmap, PHY_CTRL_R18,
- FIELD_PREP(PHY_CTRL_R18_MPLL_LKW_SEL, 1) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_LK_W, 9) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_LK_S, 0x27) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_PFD_GAIN, 1) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_ROU, 7) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_DATA_SEL, 3) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_BIAS_ADJ, 1) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_BB_MODE, 0) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_ALPHA, 3) |
- FIELD_PREP(PHY_CTRL_R18_MPLL_ADJ_LDO, 1) |
- PHY_CTRL_R18_MPLL_ACG_RANGE);
+ value = FIELD_PREP(PHY_CTRL_R18_MPLL_LKW_SEL, 1) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_LK_W, 9) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_LK_S, 0x27) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_PFD_GAIN, 1) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_ROU, 7) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_DATA_SEL, 3) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_BIAS_ADJ, 1) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_BB_MODE, 0) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_ALPHA, 3) |
+ FIELD_PREP(PHY_CTRL_R18_MPLL_ADJ_LDO, 1) |
+ PHY_CTRL_R18_MPLL_ACG_RANGE;
+
+ if (priv->soc_id == MESON_SOC_A1)
+ value |= PHY_CTRL_R18_MPLL_DCO_CLK_SEL;
+
+ regmap_write(priv->regmap, PHY_CTRL_R18, value);
udelay(PLL_RESET_COMPLETE_TIME);
@@ -227,13 +238,24 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
FIELD_PREP(PHY_CTRL_R20_USB2_BGR_VREF_4_0, 0) |
FIELD_PREP(PHY_CTRL_R20_USB2_BGR_DBG_1_0, 0));
- regmap_write(priv->regmap, PHY_CTRL_R4,
- FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_7_0, 0xf) |
- FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_15_8, 0xf) |
- FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_23_16, 0xf) |
- PHY_CTRL_R4_TEST_BYPASS_MODE_EN |
- FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0, 0) |
- FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2, 0));
+ if (priv->soc_id == MESON_SOC_G12A)
+ regmap_write(priv->regmap, PHY_CTRL_R4,
+ FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_7_0, 0xf) |
+ FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_15_8, 0xf) |
+ FIELD_PREP(PHY_CTRL_R4_CALIB_CODE_23_16, 0xf) |
+ PHY_CTRL_R4_TEST_BYPASS_MODE_EN |
+ FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_1_0, 0) |
+ FIELD_PREP(PHY_CTRL_R4_I_C2L_BIAS_TRIM_3_2, 0));
+ else if (priv->soc_id == MESON_SOC_A1) {
+ regmap_write(priv->regmap, PHY_CTRL_R21,
+ PHY_CTRL_R21_USB2_CAL_ACK_EN |
+ PHY_CTRL_R21_USB2_TX_STRG_PD |
+ FIELD_PREP(PHY_CTRL_R21_USB2_OTG_ACA_TRIM_1_0, 2));
+
+ /* Analog Settings */
+ regmap_write(priv->regmap, PHY_CTRL_R13,
+ FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7));
+ }
/* Tuning Disconnect Threshold */
regmap_write(priv->regmap, PHY_CTRL_R3,
@@ -241,11 +263,13 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
FIELD_PREP(PHY_CTRL_R3_HSDIC_REF, 1) |
FIELD_PREP(PHY_CTRL_R3_DISC_THRESH, 3));
- /* Analog Settings */
- regmap_write(priv->regmap, PHY_CTRL_R14, 0);
- regmap_write(priv->regmap, PHY_CTRL_R13,
- PHY_CTRL_R13_UPDATE_PMA_SIGNALS |
- FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7));
+ if (priv->soc_id == MESON_SOC_G12A) {
+ /* Analog Settings */
+ regmap_write(priv->regmap, PHY_CTRL_R14, 0);
+ regmap_write(priv->regmap, PHY_CTRL_R13,
+ PHY_CTRL_R13_UPDATE_PMA_SIGNALS |
+ FIELD_PREP(PHY_CTRL_R13_MIN_COUNT_FOR_SYNC_DET, 7));
+ }
return 0;
}
@@ -286,6 +310,8 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
+ priv->soc_id = (enum meson_soc_id)of_device_get_match_data(&pdev->dev);
+
priv->regmap = devm_regmap_init_mmio(dev, base,
&phy_meson_g12a_usb2_regmap_conf);
if (IS_ERR(priv->regmap))
@@ -321,8 +347,15 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
}
static const struct of_device_id phy_meson_g12a_usb2_of_match[] = {
- { .compatible = "amlogic,g12a-usb2-phy", },
- { },
+ {
+ .compatible = "amlogic,g12a-usb2-phy",
+ .data = (void *)MESON_SOC_G12A,
+ },
+ {
+ .compatible = "amlogic,a1-usb2-phy",
+ .data = (void *)MESON_SOC_A1,
+ },
+ { /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, phy_meson_g12a_usb2_of_match);
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
index b2db916db64b..459545871608 100644
--- a/drivers/phy/cadence/Kconfig
+++ b/drivers/phy/cadence/Kconfig
@@ -3,13 +3,13 @@
# Phy drivers for Cadence PHYs
#
-config PHY_CADENCE_DP
- tristate "Cadence MHDP DisplayPort PHY driver"
+config PHY_CADENCE_TORRENT
+ tristate "Cadence Torrent PHY driver"
depends on OF
depends on HAS_IOMEM
select GENERIC_PHY
help
- Support for Cadence MHDP DisplayPort PHY.
+ Support for Cadence Torrent PHY.
config PHY_CADENCE_DPHY
tristate "Cadence D-PHY Support"
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
index 8f89560f1711..6a7ffc6ea599 100644
--- a/drivers/phy/cadence/Makefile
+++ b/drivers/phy/cadence/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o
+obj-$(CONFIG_PHY_CADENCE_TORRENT) += phy-cadence-torrent.o
obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o
obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o
diff --git a/drivers/phy/cadence/phy-cadence-dp.c b/drivers/phy/cadence/phy-cadence-dp.c
deleted file mode 100644
index bc10cb264b7a..000000000000
--- a/drivers/phy/cadence/phy-cadence-dp.c
+++ /dev/null
@@ -1,541 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Cadence MHDP DisplayPort SD0801 PHY driver.
- *
- * Copyright 2018 Cadence Design Systems, Inc.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-
-#define DEFAULT_NUM_LANES 2
-#define MAX_NUM_LANES 4
-#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
-
-#define POLL_TIMEOUT_US 2000
-#define LANE_MASK 0x7
-
-/*
- * register offsets from DPTX PHY register block base (i.e MHDP
- * register base + 0x30a00)
- */
-#define PHY_AUX_CONFIG 0x00
-#define PHY_AUX_CTRL 0x04
-#define PHY_RESET 0x20
-#define PHY_PMA_XCVR_PLLCLK_EN 0x24
-#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
-#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
-#define PHY_POWER_STATE_LN_0 0x0000
-#define PHY_POWER_STATE_LN_1 0x0008
-#define PHY_POWER_STATE_LN_2 0x0010
-#define PHY_POWER_STATE_LN_3 0x0018
-#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
-#define PHY_PMA_CMN_READY 0x34
-#define PHY_PMA_XCVR_TX_VMARGIN 0x38
-#define PHY_PMA_XCVR_TX_DEEMPH 0x3c
-
-/*
- * register offsets from SD0801 PHY register block base (i.e MHDP
- * register base + 0x500000)
- */
-#define CMN_SSM_BANDGAP_TMR 0x00084
-#define CMN_SSM_BIAS_TMR 0x00088
-#define CMN_PLLSM0_PLLPRE_TMR 0x000a8
-#define CMN_PLLSM0_PLLLOCK_TMR 0x000b0
-#define CMN_PLLSM1_PLLPRE_TMR 0x000c8
-#define CMN_PLLSM1_PLLLOCK_TMR 0x000d0
-#define CMN_BGCAL_INIT_TMR 0x00190
-#define CMN_BGCAL_ITER_TMR 0x00194
-#define CMN_IBCAL_INIT_TMR 0x001d0
-#define CMN_PLL0_VCOCAL_INIT_TMR 0x00210
-#define CMN_PLL0_VCOCAL_ITER_TMR 0x00214
-#define CMN_PLL0_VCOCAL_REFTIM_START 0x00218
-#define CMN_PLL0_VCOCAL_PLLCNT_START 0x00220
-#define CMN_PLL0_INTDIV_M0 0x00240
-#define CMN_PLL0_FRACDIVL_M0 0x00244
-#define CMN_PLL0_FRACDIVH_M0 0x00248
-#define CMN_PLL0_HIGH_THR_M0 0x0024c
-#define CMN_PLL0_DSM_DIAG_M0 0x00250
-#define CMN_PLL0_LOCK_PLLCNT_START 0x00278
-#define CMN_PLL1_VCOCAL_INIT_TMR 0x00310
-#define CMN_PLL1_VCOCAL_ITER_TMR 0x00314
-#define CMN_PLL1_DSM_DIAG_M0 0x00350
-#define CMN_TXPUCAL_INIT_TMR 0x00410
-#define CMN_TXPUCAL_ITER_TMR 0x00414
-#define CMN_TXPDCAL_INIT_TMR 0x00430
-#define CMN_TXPDCAL_ITER_TMR 0x00434
-#define CMN_RXCAL_INIT_TMR 0x00450
-#define CMN_RXCAL_ITER_TMR 0x00454
-#define CMN_SD_CAL_INIT_TMR 0x00490
-#define CMN_SD_CAL_ITER_TMR 0x00494
-#define CMN_SD_CAL_REFTIM_START 0x00498
-#define CMN_SD_CAL_PLLCNT_START 0x004a0
-#define CMN_PDIAG_PLL0_CTRL_M0 0x00680
-#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x00684
-#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x00690
-#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x00694
-#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x00698
-#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x006d0
-#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x006d4
-#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x00704
-#define XCVR_DIAG_PLLDRC_CTRL 0x10394
-#define XCVR_DIAG_HSCLK_SEL 0x10398
-#define XCVR_DIAG_HSCLK_DIV 0x1039c
-#define TX_PSC_A0 0x10400
-#define TX_PSC_A1 0x10404
-#define TX_PSC_A2 0x10408
-#define TX_PSC_A3 0x1040c
-#define RX_PSC_A0 0x20000
-#define RX_PSC_A1 0x20004
-#define RX_PSC_A2 0x20008
-#define RX_PSC_A3 0x2000c
-#define PHY_PLL_CFG 0x30038
-
-struct cdns_dp_phy {
- void __iomem *base; /* DPTX registers base */
- void __iomem *sd_base; /* SD0801 registers base */
- u32 num_lanes; /* Number of lanes to use */
- u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
- struct device *dev;
-};
-
-static int cdns_dp_phy_init(struct phy *phy);
-static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
- unsigned int lane);
-static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy);
-static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
- unsigned int offset,
- unsigned char start_bit,
- unsigned char num_bits,
- unsigned int val);
-
-static const struct phy_ops cdns_dp_phy_ops = {
- .init = cdns_dp_phy_init,
- .owner = THIS_MODULE,
-};
-
-static int cdns_dp_phy_init(struct phy *phy)
-{
- unsigned char lane_bits;
-
- struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
-
- writel(0x0003, cdns_phy->base + PHY_AUX_CTRL); /* enable AUX */
-
- /* PHY PMA registers configuration function */
- cdns_dp_phy_pma_cfg(cdns_phy);
-
- /*
- * Set lines power state to A0
- * Set lines pll clk enable to 0
- */
-
- cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ,
- PHY_POWER_STATE_LN_0, 6, 0x0000);
-
- if (cdns_phy->num_lanes >= 2) {
- cdns_dp_phy_write_field(cdns_phy,
- PHY_PMA_XCVR_POWER_STATE_REQ,
- PHY_POWER_STATE_LN_1, 6, 0x0000);
-
- if (cdns_phy->num_lanes == 4) {
- cdns_dp_phy_write_field(cdns_phy,
- PHY_PMA_XCVR_POWER_STATE_REQ,
- PHY_POWER_STATE_LN_2, 6, 0);
- cdns_dp_phy_write_field(cdns_phy,
- PHY_PMA_XCVR_POWER_STATE_REQ,
- PHY_POWER_STATE_LN_3, 6, 0);
- }
- }
-
- cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
- 0, 1, 0x0000);
-
- if (cdns_phy->num_lanes >= 2) {
- cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
- 1, 1, 0x0000);
- if (cdns_phy->num_lanes == 4) {
- cdns_dp_phy_write_field(cdns_phy,
- PHY_PMA_XCVR_PLLCLK_EN,
- 2, 1, 0x0000);
- cdns_dp_phy_write_field(cdns_phy,
- PHY_PMA_XCVR_PLLCLK_EN,
- 3, 1, 0x0000);
- }
- }
-
- /*
- * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
- * used lanes
- */
- lane_bits = (1 << cdns_phy->num_lanes) - 1;
- writel(((0xF & ~lane_bits) << 4) | (0xF & lane_bits),
- cdns_phy->base + PHY_RESET);
-
- /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
- writel(0x0001, cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN);
-
- /* PHY PMA registers configuration functions */
- cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy);
- cdns_dp_phy_pma_cmn_rate(cdns_phy);
-
- /* take out of reset */
- cdns_dp_phy_write_field(cdns_phy, PHY_RESET, 8, 1, 1);
- cdns_dp_phy_wait_pma_cmn_ready(cdns_phy);
- cdns_dp_phy_run(cdns_phy);
-
- return 0;
-}
-
-static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy)
-{
- unsigned int reg;
- int ret;
-
- ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_CMN_READY, reg,
- reg & 1, 0, 500);
- if (ret == -ETIMEDOUT)
- dev_err(cdns_phy->dev,
- "timeout waiting for PMA common ready\n");
-}
-
-static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy)
-{
- unsigned int i;
-
- /* PMA common configuration */
- cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
-
- /* PMA lane configuration to deal with multi-link operation */
- for (i = 0; i < cdns_phy->num_lanes; i++)
- cdns_dp_phy_pma_lane_cfg(cdns_phy, i);
-}
-
-static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
-{
- /* refclock registers - assumes 25 MHz refclock */
- writel(0x0019, cdns_phy->sd_base + CMN_SSM_BIAS_TMR);
- writel(0x0032, cdns_phy->sd_base + CMN_PLLSM0_PLLPRE_TMR);
- writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM0_PLLLOCK_TMR);
- writel(0x0032, cdns_phy->sd_base + CMN_PLLSM1_PLLPRE_TMR);
- writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM1_PLLLOCK_TMR);
- writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_INIT_TMR);
- writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_ITER_TMR);
- writel(0x0019, cdns_phy->sd_base + CMN_IBCAL_INIT_TMR);
- writel(0x001E, cdns_phy->sd_base + CMN_TXPUCAL_INIT_TMR);
- writel(0x0006, cdns_phy->sd_base + CMN_TXPUCAL_ITER_TMR);
- writel(0x001E, cdns_phy->sd_base + CMN_TXPDCAL_INIT_TMR);
- writel(0x0006, cdns_phy->sd_base + CMN_TXPDCAL_ITER_TMR);
- writel(0x02EE, cdns_phy->sd_base + CMN_RXCAL_INIT_TMR);
- writel(0x0006, cdns_phy->sd_base + CMN_RXCAL_ITER_TMR);
- writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_INIT_TMR);
- writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_ITER_TMR);
- writel(0x000E, cdns_phy->sd_base + CMN_SD_CAL_REFTIM_START);
- writel(0x012B, cdns_phy->sd_base + CMN_SD_CAL_PLLCNT_START);
- /* PLL registers */
- writel(0x0409, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_PADJ_M0);
- writel(0x1001, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_IADJ_M0);
- writel(0x0F08, cdns_phy->sd_base + CMN_PDIAG_PLL0_FILT_PADJ_M0);
- writel(0x0004, cdns_phy->sd_base + CMN_PLL0_DSM_DIAG_M0);
- writel(0x00FA, cdns_phy->sd_base + CMN_PLL0_VCOCAL_INIT_TMR);
- writel(0x0004, cdns_phy->sd_base + CMN_PLL0_VCOCAL_ITER_TMR);
- writel(0x00FA, cdns_phy->sd_base + CMN_PLL1_VCOCAL_INIT_TMR);
- writel(0x0004, cdns_phy->sd_base + CMN_PLL1_VCOCAL_ITER_TMR);
- writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_REFTIM_START);
-}
-
-static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
-{
- /* Assumes 25 MHz refclock */
- switch (cdns_phy->max_bit_rate) {
- /* Setting VCO for 10.8GHz */
- case 2700:
- case 5400:
- writel(0x01B0, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
- writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
- writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
- writel(0x0120, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
- break;
- /* Setting VCO for 9.72GHz */
- case 2430:
- case 3240:
- writel(0x0184, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
- writel(0xCCCD, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
- writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
- writel(0x0104, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
- break;
- /* Setting VCO for 8.64GHz */
- case 2160:
- case 4320:
- writel(0x0159, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
- writel(0x999A, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
- writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
- writel(0x00E7, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
- break;
- /* Setting VCO for 8.1GHz */
- case 8100:
- writel(0x0144, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
- writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
- writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
- writel(0x00D8, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
- break;
- }
-
- writel(0x0002, cdns_phy->sd_base + CMN_PDIAG_PLL0_CTRL_M0);
- writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_PLLCNT_START);
-}
-
-static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy)
-{
- unsigned int clk_sel_val = 0;
- unsigned int hsclk_div_val = 0;
- unsigned int i;
-
- /* 16'h0000 for single DP link configuration */
- writel(0x0000, cdns_phy->sd_base + PHY_PLL_CFG);
-
- switch (cdns_phy->max_bit_rate) {
- case 1620:
- clk_sel_val = 0x0f01;
- hsclk_div_val = 2;
- break;
- case 2160:
- case 2430:
- case 2700:
- clk_sel_val = 0x0701;
- hsclk_div_val = 1;
- break;
- case 3240:
- clk_sel_val = 0x0b00;
- hsclk_div_val = 2;
- break;
- case 4320:
- case 5400:
- clk_sel_val = 0x0301;
- hsclk_div_val = 0;
- break;
- case 8100:
- clk_sel_val = 0x0200;
- hsclk_div_val = 0;
- break;
- }
-
- writel(clk_sel_val, cdns_phy->sd_base + CMN_PDIAG_PLL0_CLK_SEL_M0);
-
- /* PMA lane configuration to deal with multi-link operation */
- for (i = 0; i < cdns_phy->num_lanes; i++) {
- writel(hsclk_div_val,
- cdns_phy->sd_base + (XCVR_DIAG_HSCLK_DIV | (i<<11)));
- }
-}
-
-static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
- unsigned int lane)
-{
- unsigned int lane_bits = (lane & LANE_MASK) << 11;
-
- /* Writing Tx/Rx Power State Controllers registers */
- writel(0x00FB, cdns_phy->sd_base + (TX_PSC_A0 | lane_bits));
- writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A2 | lane_bits));
- writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A3 | lane_bits));
- writel(0x0000, cdns_phy->sd_base + (RX_PSC_A0 | lane_bits));
- writel(0x0000, cdns_phy->sd_base + (RX_PSC_A2 | lane_bits));
- writel(0x0000, cdns_phy->sd_base + (RX_PSC_A3 | lane_bits));
-
- writel(0x0001, cdns_phy->sd_base + (XCVR_DIAG_PLLDRC_CTRL | lane_bits));
- writel(0x0000, cdns_phy->sd_base + (XCVR_DIAG_HSCLK_SEL | lane_bits));
-}
-
-static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
-{
- unsigned int read_val;
- u32 write_val1 = 0;
- u32 write_val2 = 0;
- u32 mask = 0;
- int ret;
-
- /*
- * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
- * master lane
- */
- ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN_ACK,
- read_val, read_val & 1, 0, POLL_TIMEOUT_US);
- if (ret == -ETIMEDOUT)
- dev_err(cdns_phy->dev,
- "timeout waiting for link PLL clock enable ack\n");
-
- ndelay(100);
-
- switch (cdns_phy->num_lanes) {
-
- case 1: /* lane 0 */
- write_val1 = 0x00000004;
- write_val2 = 0x00000001;
- mask = 0x0000003f;
- break;
- case 2: /* lane 0-1 */
- write_val1 = 0x00000404;
- write_val2 = 0x00000101;
- mask = 0x00003f3f;
- break;
- case 4: /* lane 0-3 */
- write_val1 = 0x04040404;
- write_val2 = 0x01010101;
- mask = 0x3f3f3f3f;
- break;
- }
-
- writel(write_val1, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
-
- ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
- read_val, (read_val & mask) == write_val1, 0,
- POLL_TIMEOUT_US);
- if (ret == -ETIMEDOUT)
- dev_err(cdns_phy->dev,
- "timeout waiting for link power state ack\n");
-
- writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
- ndelay(100);
-
- writel(write_val2, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
-
- ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
- read_val, (read_val & mask) == write_val2, 0,
- POLL_TIMEOUT_US);
- if (ret == -ETIMEDOUT)
- dev_err(cdns_phy->dev,
- "timeout waiting for link power state ack\n");
-
- writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
- ndelay(100);
-}
-
-static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
- unsigned int offset,
- unsigned char start_bit,
- unsigned char num_bits,
- unsigned int val)
-{
- unsigned int read_val;
-
- read_val = readl(cdns_phy->base + offset);
- writel(((val << start_bit) | (read_val & ~(((1 << num_bits) - 1) <<
- start_bit))), cdns_phy->base + offset);
-}
-
-static int cdns_dp_phy_probe(struct platform_device *pdev)
-{
- struct resource *regs;
- struct cdns_dp_phy *cdns_phy;
- struct device *dev = &pdev->dev;
- struct phy_provider *phy_provider;
- struct phy *phy;
- int err;
-
- cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
- if (!cdns_phy)
- return -ENOMEM;
-
- cdns_phy->dev = &pdev->dev;
-
- phy = devm_phy_create(dev, NULL, &cdns_dp_phy_ops);
- if (IS_ERR(phy)) {
- dev_err(dev, "failed to create DisplayPort PHY\n");
- return PTR_ERR(phy);
- }
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cdns_phy->base = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(cdns_phy->base))
- return PTR_ERR(cdns_phy->base);
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(cdns_phy->sd_base))
- return PTR_ERR(cdns_phy->sd_base);
-
- err = device_property_read_u32(dev, "num_lanes",
- &(cdns_phy->num_lanes));
- if (err)
- cdns_phy->num_lanes = DEFAULT_NUM_LANES;
-
- switch (cdns_phy->num_lanes) {
- case 1:
- case 2:
- case 4:
- /* valid number of lanes */
- break;
- default:
- dev_err(dev, "unsupported number of lanes: %d\n",
- cdns_phy->num_lanes);
- return -EINVAL;
- }
-
- err = device_property_read_u32(dev, "max_bit_rate",
- &(cdns_phy->max_bit_rate));
- if (err)
- cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE;
-
- switch (cdns_phy->max_bit_rate) {
- case 2160:
- case 2430:
- case 2700:
- case 3240:
- case 4320:
- case 5400:
- case 8100:
- /* valid bit rate */
- break;
- default:
- dev_err(dev, "unsupported max bit rate: %dMbps\n",
- cdns_phy->max_bit_rate);
- return -EINVAL;
- }
-
- phy_set_drvdata(phy, cdns_phy);
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-
- dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n",
- cdns_phy->num_lanes,
- cdns_phy->max_bit_rate / 1000,
- cdns_phy->max_bit_rate % 1000);
-
- return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static const struct of_device_id cdns_dp_phy_of_match[] = {
- {
- .compatible = "cdns,dp-phy"
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, cdns_dp_phy_of_match);
-
-static struct platform_driver cdns_dp_phy_driver = {
- .probe = cdns_dp_phy_probe,
- .driver = {
- .name = "cdns-dp-phy",
- .of_match_table = cdns_dp_phy_of_match,
- }
-};
-module_platform_driver(cdns_dp_phy_driver);
-
-MODULE_AUTHOR("Cadence Design Systems, Inc.");
-MODULE_DESCRIPTION("Cadence MHDP PHY driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
new file mode 100644
index 000000000000..7116127358ee
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -0,0 +1,1944 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cadence Torrent SD0801 PHY driver.
+ *
+ * Copyright 2018 Cadence Design Systems, Inc.
+ *
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#define REF_CLK_19_2MHz 19200000
+#define REF_CLK_25MHz 25000000
+
+#define DEFAULT_NUM_LANES 4
+#define MAX_NUM_LANES 4
+#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
+
+#define POLL_TIMEOUT_US 5000
+
+#define TORRENT_COMMON_CDB_OFFSET 0x0
+
+#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x4000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define TORRENT_RX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x8000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \
+ (0xC000 << (block_offset))
+
+#define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \
+ (0xE000 << (block_offset))
+
+#define TORRENT_DPTX_PHY_OFFSET 0x0
+
+/*
+ * register offsets from DPTX PHY register block base (i.e MHDP
+ * register base + 0x30a00)
+ */
+#define PHY_AUX_CTRL 0x04
+#define PHY_RESET 0x20
+#define PMA_TX_ELEC_IDLE_MASK 0xF0U
+#define PMA_TX_ELEC_IDLE_SHIFT 4
+#define PHY_L00_RESET_N_MASK 0x01U
+#define PHY_PMA_XCVR_PLLCLK_EN 0x24
+#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
+#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
+#define PHY_POWER_STATE_LN_0 0x0000
+#define PHY_POWER_STATE_LN_1 0x0008
+#define PHY_POWER_STATE_LN_2 0x0010
+#define PHY_POWER_STATE_LN_3 0x0018
+#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU
+#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
+#define PHY_PMA_CMN_READY 0x34
+
+/*
+ * register offsets from SD0801 PHY register block base (i.e MHDP
+ * register base + 0x500000)
+ */
+#define CMN_SSM_BANDGAP_TMR 0x0021U
+#define CMN_SSM_BIAS_TMR 0x0022U
+#define CMN_PLLSM0_PLLPRE_TMR 0x002AU
+#define CMN_PLLSM0_PLLLOCK_TMR 0x002CU
+#define CMN_PLLSM1_PLLPRE_TMR 0x0032U
+#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U
+#define CMN_BGCAL_INIT_TMR 0x0064U
+#define CMN_BGCAL_ITER_TMR 0x0065U
+#define CMN_IBCAL_INIT_TMR 0x0074U
+#define CMN_PLL0_VCOCAL_TCTRL 0x0082U
+#define CMN_PLL0_VCOCAL_INIT_TMR 0x0084U
+#define CMN_PLL0_VCOCAL_ITER_TMR 0x0085U
+#define CMN_PLL0_VCOCAL_REFTIM_START 0x0086U
+#define CMN_PLL0_VCOCAL_PLLCNT_START 0x0088U
+#define CMN_PLL0_INTDIV_M0 0x0090U
+#define CMN_PLL0_FRACDIVL_M0 0x0091U
+#define CMN_PLL0_FRACDIVH_M0 0x0092U
+#define CMN_PLL0_HIGH_THR_M0 0x0093U
+#define CMN_PLL0_DSM_DIAG_M0 0x0094U
+#define CMN_PLL0_SS_CTRL1_M0 0x0098U
+#define CMN_PLL0_SS_CTRL2_M0 0x0099U
+#define CMN_PLL0_SS_CTRL3_M0 0x009AU
+#define CMN_PLL0_SS_CTRL4_M0 0x009BU
+#define CMN_PLL0_LOCK_REFCNT_START 0x009CU
+#define CMN_PLL0_LOCK_PLLCNT_START 0x009EU
+#define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU
+#define CMN_PLL1_VCOCAL_TCTRL 0x00C2U
+#define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U
+#define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U
+#define CMN_PLL1_VCOCAL_REFTIM_START 0x00C6U
+#define CMN_PLL1_VCOCAL_PLLCNT_START 0x00C8U
+#define CMN_PLL1_INTDIV_M0 0x00D0U
+#define CMN_PLL1_FRACDIVL_M0 0x00D1U
+#define CMN_PLL1_FRACDIVH_M0 0x00D2U
+#define CMN_PLL1_HIGH_THR_M0 0x00D3U
+#define CMN_PLL1_DSM_DIAG_M0 0x00D4U
+#define CMN_PLL1_SS_CTRL1_M0 0x00D8U
+#define CMN_PLL1_SS_CTRL2_M0 0x00D9U
+#define CMN_PLL1_SS_CTRL3_M0 0x00DAU
+#define CMN_PLL1_SS_CTRL4_M0 0x00DBU
+#define CMN_PLL1_LOCK_REFCNT_START 0x00DCU
+#define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU
+#define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU
+#define CMN_TXPUCAL_INIT_TMR 0x0104U
+#define CMN_TXPUCAL_ITER_TMR 0x0105U
+#define CMN_TXPDCAL_INIT_TMR 0x010CU
+#define CMN_TXPDCAL_ITER_TMR 0x010DU
+#define CMN_RXCAL_INIT_TMR 0x0114U
+#define CMN_RXCAL_ITER_TMR 0x0115U
+#define CMN_SD_CAL_INIT_TMR 0x0124U
+#define CMN_SD_CAL_ITER_TMR 0x0125U
+#define CMN_SD_CAL_REFTIM_START 0x0126U
+#define CMN_SD_CAL_PLLCNT_START 0x0128U
+#define CMN_PDIAG_PLL0_CTRL_M0 0x01A0U
+#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x01A1U
+#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U
+#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U
+#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U
+#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U
+#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U
+#define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U
+#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U
+#define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U
+#define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U
+#define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U
+
+/* PMA TX Lane registers */
+#define TX_TXCC_CTRL 0x0040U
+#define TX_TXCC_CPOST_MULT_00 0x004CU
+#define TX_TXCC_MGNFS_MULT_000 0x0050U
+#define DRV_DIAG_TX_DRV 0x00C6U
+#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U
+#define XCVR_DIAG_HSCLK_SEL 0x00E6U
+#define XCVR_DIAG_HSCLK_DIV 0x00E7U
+#define XCVR_DIAG_BIDI_CTRL 0x00EAU
+#define TX_PSC_A0 0x0100U
+#define TX_PSC_A2 0x0102U
+#define TX_PSC_A3 0x0103U
+#define TX_RCVDET_ST_TMR 0x0123U
+#define TX_DIAG_ACYA 0x01E7U
+#define TX_DIAG_ACYA_HBDC_MASK 0x0001U
+
+/* PMA RX Lane registers */
+#define RX_PSC_A0 0x0000U
+#define RX_PSC_A2 0x0002U
+#define RX_PSC_A3 0x0003U
+#define RX_PSC_CAL 0x0006U
+#define RX_REE_GCSM1_CTRL 0x0108U
+#define RX_REE_GCSM2_CTRL 0x0110U
+#define RX_REE_PERGCSM_CTRL 0x0118U
+
+/* PHY PCS common registers */
+#define PHY_PLL_CFG 0x000EU
+
+/* PHY PMA common registers */
+#define PHY_PMA_CMN_CTRL2 0x0001U
+#define PHY_PMA_PLL_RAW_CTRL 0x0003U
+
+static const struct reg_field phy_pll_cfg =
+ REG_FIELD(PHY_PLL_CFG, 0, 1);
+
+static const struct reg_field phy_pma_cmn_ctrl_2 =
+ REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7);
+
+static const struct reg_field phy_pma_pll_raw_ctrl =
+ REG_FIELD(PHY_PMA_PLL_RAW_CTRL, 0, 1);
+
+static const struct reg_field phy_reset_ctrl =
+ REG_FIELD(PHY_RESET, 8, 8);
+
+static const struct of_device_id cdns_torrent_phy_of_match[];
+
+struct cdns_torrent_inst {
+ struct phy *phy;
+ u32 mlane;
+ u32 phy_type;
+ u32 num_lanes;
+ struct reset_control *lnk_rst;
+};
+
+struct cdns_torrent_phy {
+ void __iomem *base; /* DPTX registers base */
+ void __iomem *sd_base; /* SD0801 registers base */
+ u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ struct reset_control *phy_rst;
+ struct device *dev;
+ struct clk *clk;
+ unsigned long ref_clk_rate;
+ struct cdns_torrent_inst phys[MAX_NUM_LANES];
+ int nsubnodes;
+ struct regmap *regmap;
+ struct regmap *regmap_common_cdb;
+ struct regmap *regmap_phy_pcs_common_cdb;
+ struct regmap *regmap_phy_pma_common_cdb;
+ struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES];
+ struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES];
+ struct regmap *regmap_dptx_phy_reg;
+ struct regmap_field *phy_pll_cfg;
+ struct regmap_field *phy_pma_cmn_ctrl_2;
+ struct regmap_field *phy_pma_pll_raw_ctrl;
+ struct regmap_field *phy_reset_ctrl;
+};
+
+enum phy_powerstate {
+ POWERSTATE_A0 = 0,
+ /* Powerstate A1 is unused */
+ POWERSTATE_A2 = 2,
+ POWERSTATE_A3 = 3,
+};
+
+static int cdns_torrent_dp_init(struct phy *phy);
+static int cdns_torrent_dp_exit(struct phy *phy);
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
+ u32 num_lanes);
+static
+int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy);
+static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst);
+static
+void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy);
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc);
+static
+void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy);
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc);
+static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy,
+ unsigned int lane);
+static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, u32 num_lanes);
+static int cdns_torrent_dp_configure(struct phy *phy,
+ union phy_configure_opts *opts);
+static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ u32 num_lanes,
+ enum phy_powerstate powerstate);
+static int cdns_torrent_phy_on(struct phy *phy);
+static int cdns_torrent_phy_off(struct phy *phy);
+
+static const struct phy_ops cdns_torrent_phy_ops = {
+ .init = cdns_torrent_dp_init,
+ .exit = cdns_torrent_dp_exit,
+ .configure = cdns_torrent_dp_configure,
+ .power_on = cdns_torrent_phy_on,
+ .power_off = cdns_torrent_phy_off,
+ .owner = THIS_MODULE,
+};
+
+struct cdns_torrent_data {
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+};
+
+struct cdns_regmap_cdb_context {
+ struct device *dev;
+ void __iomem *base;
+ u8 reg_offset_shift;
+};
+
+static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ writew(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ *val = readw(ctx->base + offset);
+ return 0;
+}
+
+static int cdns_regmap_dptx_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg;
+
+ writel(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_dptx_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg;
+
+ *val = readl(ctx->base + offset);
+ return 0;
+}
+
+#define TORRENT_TX_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "torrent_tx_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+#define TORRENT_RX_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "torrent_rx_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static struct regmap_config cdns_torrent_tx_lane_cdb_config[] = {
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("0"),
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("1"),
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("2"),
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("3"),
+};
+
+static struct regmap_config cdns_torrent_rx_lane_cdb_config[] = {
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("0"),
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("1"),
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("2"),
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("3"),
+};
+
+static struct regmap_config cdns_torrent_common_cdb_config = {
+ .name = "torrent_common_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = {
+ .name = "torrent_phy_pcs_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = {
+ .name = "torrent_phy_pma_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static struct regmap_config cdns_torrent_dptx_phy_config = {
+ .name = "torrent_dptx_phy",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_dptx_write,
+ .reg_read = cdns_regmap_dptx_read,
+};
+
+/* PHY mmr access functions */
+
+static void cdns_torrent_phy_write(struct regmap *regmap, u32 offset, u32 val)
+{
+ regmap_write(regmap, offset, val);
+}
+
+static u32 cdns_torrent_phy_read(struct regmap *regmap, u32 offset)
+{
+ unsigned int val;
+
+ regmap_read(regmap, offset, &val);
+ return val;
+}
+
+/* DPTX mmr access functions */
+
+static void cdns_torrent_dp_write(struct regmap *regmap, u32 offset, u32 val)
+{
+ regmap_write(regmap, offset, val);
+}
+
+static u32 cdns_torrent_dp_read(struct regmap *regmap, u32 offset)
+{
+ u32 val;
+
+ regmap_read(regmap, offset, &val);
+ return val;
+}
+
+/*
+ * Structure used to store values of PHY registers for voltage-related
+ * coefficients, for particular voltage swing and pre-emphasis level. Values
+ * are shared across all physical lanes.
+ */
+struct coefficients {
+ /* Value of DRV_DIAG_TX_DRV register to use */
+ u16 diag_tx_drv;
+ /* Value of TX_TXCC_MGNFS_MULT_000 register to use */
+ u16 mgnfs_mult;
+ /* Value of TX_TXCC_CPOST_MULT_00 register to use */
+ u16 cpost_mult;
+};
+
+/*
+ * Array consists of values of voltage-related registers for sd0801 PHY. A value
+ * of 0xFFFF is a placeholder for invalid combination, and will never be used.
+ */
+static const struct coefficients vltg_coeff[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x002A,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F,
+ .cpost_mult = 0x0014},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0012,
+ .cpost_mult = 0x0020},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x002A}
+ },
+
+ /* voltage swing 1, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013,
+ .cpost_mult = 0x0012},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x001F},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF}
+ },
+
+ /* voltage swing 2, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x0013},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF}
+ },
+
+ /* voltage swing 3, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF}
+ }
+};
+
+/*
+ * Enable or disable PLL for selected lanes.
+ */
+static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp,
+ bool enable)
+{
+ u32 rd_val;
+ u32 ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ /*
+ * Used to determine, which bits to check for or enable in
+ * PHY_PMA_XCVR_PLLCLK_EN register.
+ */
+ u32 pll_bits;
+ /* Used to enable or disable lanes. */
+ u32 pll_val;
+
+ /* Select values of registers and mask, depending on enabled lane
+ * count.
+ */
+ switch (dp->lanes) {
+ /* lane 0 */
+ case (1):
+ pll_bits = 0x00000001;
+ break;
+ /* lanes 0-1 */
+ case (2):
+ pll_bits = 0x00000003;
+ break;
+ /* lanes 0-3, all */
+ default:
+ pll_bits = 0x0000000F;
+ break;
+ }
+
+ if (enable)
+ pll_val = pll_bits;
+ else
+ pll_val = 0x00000000;
+
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val);
+
+ /* Wait for acknowledgment from PHY. */
+ ret = regmap_read_poll_timeout(regmap,
+ PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ rd_val,
+ (rd_val & pll_bits) == pll_val,
+ 0, POLL_TIMEOUT_US);
+ ndelay(100);
+ return ret;
+}
+
+/*
+ * Perform register operations related to setting link rate, once powerstate is
+ * set and PLL disable request was processed.
+ */
+static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u32 ret;
+ u32 read_val;
+
+ /* Disable the cmn_pll0_en before re-programming the new data rate. */
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0);
+
+ /*
+ * Wait for PLL ready de-assertion.
+ * For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1
+ */
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 2) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ ndelay(200);
+
+ /* DP Rate Change - VCO Output settings. */
+ if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) {
+ /* PMA common configuration 19.2MHz */
+ cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate,
+ dp->ssc);
+ cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy);
+ } else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) {
+ /* PMA common configuration 25MHz */
+ cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate,
+ dp->ssc);
+ cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy);
+ }
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
+
+ /* Enable the cmn_pll0_en. */
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x3);
+
+ /*
+ * Wait for PLL ready assertion.
+ * For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ (read_val & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ return ret;
+}
+
+/*
+ * Verify, that parameters to configure PHY with are correct.
+ */
+static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst,
+ struct phy_configure_opts_dp *dp)
+{
+ u8 i;
+
+ /* If changing link rate was required, verify it's supported. */
+ if (dp->set_rate) {
+ switch (dp->link_rate) {
+ case 1620:
+ case 2160:
+ case 2430:
+ case 2700:
+ case 3240:
+ case 4320:
+ case 5400:
+ case 8100:
+ /* valid bit rate */
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Verify lane count. */
+ switch (dp->lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid lane count. */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Check against actual number of PHY's lanes. */
+ if (dp->lanes > inst->num_lanes)
+ return -EINVAL;
+
+ /*
+ * If changing voltages is required, check swing and pre-emphasis
+ * levels, per-lane.
+ */
+ if (dp->set_voltages) {
+ /* Lane count verified previously. */
+ for (i = 0; i < dp->lanes; i++) {
+ if (dp->voltage[i] > 3 || dp->pre[i] > 3)
+ return -EINVAL;
+
+ /* Sum of voltage swing and pre-emphasis levels cannot
+ * exceed 3.
+ */
+ if (dp->voltage[i] + dp->pre[i] > 3)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
+static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
+ u32 num_lanes)
+{
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 pwr_state = cdns_torrent_dp_read(regmap,
+ PHY_PMA_XCVR_POWER_STATE_REQ);
+ u32 pll_clk_en = cdns_torrent_dp_read(regmap,
+ PHY_PMA_XCVR_PLLCLK_EN);
+
+ /* Lane 0 is always enabled. */
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_0);
+ pll_clk_en &= ~0x01U;
+
+ if (num_lanes > 1) {
+ /* lane 1 */
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_1);
+ pll_clk_en &= ~(0x01U << 1);
+ }
+
+ if (num_lanes > 2) {
+ /* lanes 2 and 3 */
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_2);
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_3);
+ pll_clk_en &= ~(0x01U << 2);
+ pll_clk_en &= ~(0x01U << 3);
+ }
+
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_clk_en);
+}
+
+/* Configure lane count as required. */
+static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u32 value;
+ u32 ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u8 lane_mask = (1 << dp->lanes) - 1;
+
+ value = cdns_torrent_dp_read(regmap, PHY_RESET);
+ /* clear pma_tx_elec_idle_ln_* bits. */
+ value &= ~PMA_TX_ELEC_IDLE_MASK;
+ /* Assert pma_tx_elec_idle_ln_* for disabled lanes. */
+ value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) &
+ PMA_TX_ELEC_IDLE_MASK;
+ cdns_torrent_dp_write(regmap, PHY_RESET, value);
+
+ /* reset the link by asserting phy_l00_reset_n low */
+ cdns_torrent_dp_write(regmap, PHY_RESET,
+ value & (~PHY_L00_RESET_N_MASK));
+
+ /*
+ * Assert lane reset on unused lanes and lane 0 so they remain in reset
+ * and powered down when re-enabling the link
+ */
+ value = (value & 0x0000FFF0) | (0x0000000E & lane_mask);
+ cdns_torrent_dp_write(regmap, PHY_RESET, value);
+
+ cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes);
+
+ /* release phy_l0*_reset_n based on used laneCount */
+ value = (value & 0x0000FFF0) | (0x0000000F & lane_mask);
+ cdns_torrent_dp_write(regmap, PHY_RESET, value);
+
+ /* Wait, until PHY gets ready after releasing PHY reset signal. */
+ ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
+ if (ret)
+ return ret;
+
+ ndelay(100);
+
+ /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+
+ ret = cdns_torrent_dp_run(cdns_phy, dp->lanes);
+
+ return ret;
+}
+
+/* Configure link rate as required. */
+static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u32 ret;
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ POWERSTATE_A3);
+ if (ret)
+ return ret;
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false);
+ if (ret)
+ return ret;
+ ndelay(200);
+
+ ret = cdns_torrent_dp_configure_rate(cdns_phy, dp);
+ if (ret)
+ return ret;
+ ndelay(200);
+
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true);
+ if (ret)
+ return ret;
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ POWERSTATE_A2);
+ if (ret)
+ return ret;
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ POWERSTATE_A0);
+ if (ret)
+ return ret;
+ ndelay(900);
+
+ return ret;
+}
+
+/* Configure voltage swing and pre-emphasis for all enabled lanes. */
+static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u8 lane;
+ u16 val;
+
+ for (lane = 0; lane < dp->lanes; lane++) {
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA);
+ /*
+ * Write 1 to register bit TX_DIAG_ACYA[0] to freeze the
+ * current state of the analog TX driver.
+ */
+ val |= TX_DIAG_ACYA_HBDC_MASK;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA, val);
+
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_TXCC_CTRL, 0x08A4);
+ val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ DRV_DIAG_TX_DRV, val);
+ val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_TXCC_MGNFS_MULT_000,
+ val);
+ val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_TXCC_CPOST_MULT_00,
+ val);
+
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA);
+ /*
+ * Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of
+ * analog TX driver to reflect the new programmed one.
+ */
+ val &= ~TX_DIAG_ACYA_HBDC_MASK;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA, val);
+ }
+};
+
+static int cdns_torrent_dp_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ ret = cdns_torrent_dp_verify_config(inst, &opts->dp);
+ if (ret) {
+ dev_err(&phy->dev, "invalid params for phy configure\n");
+ return ret;
+ }
+
+ if (opts->dp.set_lanes) {
+ ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp);
+ if (ret) {
+ dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n");
+ return ret;
+ }
+ }
+
+ if (opts->dp.set_rate) {
+ ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp);
+ if (ret) {
+ dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n");
+ return ret;
+ }
+ }
+
+ if (opts->dp.set_voltages)
+ cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_init(struct phy *phy)
+{
+ unsigned char lane_bits;
+ int ret;
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ ret = clk_prepare_enable(cdns_phy->clk);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ return ret;
+ }
+
+ cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
+ if (!(cdns_phy->ref_clk_rate)) {
+ dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+ clk_disable_unprepare(cdns_phy->clk);
+ return -EINVAL;
+ }
+
+ switch (cdns_phy->ref_clk_rate) {
+ case REF_CLK_19_2MHz:
+ case REF_CLK_25MHz:
+ /* Valid Ref Clock Rate */
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+ return -EINVAL;
+ }
+
+ cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */
+
+ /* PHY PMA registers configuration function */
+ cdns_torrent_dp_pma_cfg(cdns_phy, inst);
+
+ /*
+ * Set lines power state to A0
+ * Set lines pll clk enable to 0
+ */
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst->num_lanes);
+
+ /*
+ * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
+ * used lanes
+ */
+ lane_bits = (1 << inst->num_lanes) - 1;
+ cdns_torrent_dp_write(regmap, PHY_RESET,
+ ((0xF & ~lane_bits) << 4) | (0xF & lane_bits));
+
+ /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+
+ /* PHY PMA registers configuration functions */
+ /* Initialize PHY with max supported link rate, without SSC. */
+ if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz)
+ cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy,
+ cdns_phy->max_bit_rate,
+ false);
+ else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+ cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy,
+ cdns_phy->max_bit_rate,
+ false);
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate,
+ inst->num_lanes);
+
+ /* take out of reset */
+ regmap_field_write(cdns_phy->phy_reset_ctrl, 0x1);
+
+ cdns_torrent_phy_on(phy);
+
+ ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_dp_run(cdns_phy, inst->num_lanes);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_exit(struct phy *phy)
+{
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+
+ clk_disable_unprepare(cdns_phy->clk);
+ return 0;
+}
+
+static
+int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
+{
+ unsigned int reg;
+ int ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg,
+ reg & 1, 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev,
+ "timeout waiting for PMA common ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst)
+{
+ unsigned int i;
+
+ if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz)
+ /* PMA common configuration 19.2MHz */
+ cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy);
+ else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+ /* PMA common configuration 25MHz */
+ cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < inst->num_lanes; i++)
+ cdns_torrent_dp_pma_lane_cfg(cdns_phy, i);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* refclock registers - assumes 19.2 MHz refclock */
+ cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0014);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0027);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00A1);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0027);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00A1);
+ cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x0060);
+ cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x0060);
+ cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0014);
+ cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x0018);
+ cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0005);
+ cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x0018);
+ cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0005);
+ cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x0240);
+ cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0005);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000B);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x0137);
+
+ /* PLL registers */
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00C0);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00C0);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0260);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0260);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003);
+}
+
+/*
+ * Set registers responsible for enabling and configuring SSC, with second and
+ * third register values provided by parameters.
+ */
+static
+void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 ctrl2_val, u32 ctrl3_val)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 19.2 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_INTDIV_M0, 0x0119);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_HIGH_THR_M0, 0x00BC);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL0_CTRL_M0, 0x0012);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_INTDIV_M0, 0x0119);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_HIGH_THR_M0, 0x00BC);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL1_CTRL_M0, 0x0012);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A,
+ 0x006A);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_INTDIV_M0, 0x01FA);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_HIGH_THR_M0, 0x0152);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_INTDIV_M0, 0x01FA);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_HIGH_THR_M0, 0x0152);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD,
+ 0x0069);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_INTDIV_M0, 0x01C2);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_HIGH_THR_M0, 0x012C);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_INTDIV_M0, 0x01C2);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_HIGH_THR_M0, 0x012C);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536,
+ 0x0069);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_INTDIV_M0, 0x01A5);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVL_M0, 0xE000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_HIGH_THR_M0, 0x011A);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_INTDIV_M0, 0x01A5);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVL_M0, 0xE000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_HIGH_THR_M0, 0x011A);
+ cdns_torrent_phy_write(regmap,
+ CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7,
+ 0x006A);
+ break;
+ }
+
+ if (ssc) {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
+ } else {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260);
+ /* Set reset register values to disable SSC */
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* refclock registers - assumes 25 MHz refclock */
+ cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0019);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0032);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00D1);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0032);
+ cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00D1);
+ cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x007D);
+ cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x007D);
+ cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0019);
+ cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x001E);
+ cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0006);
+ cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x001E);
+ cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0006);
+ cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x02EE);
+ cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0006);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000E);
+ cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x012B);
+
+ /* PLL registers */
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00FA);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00FA);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0317);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0317);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003);
+}
+
+/*
+ * Set registers responsible for enabling and configuring SSC, with second
+ * register value provided by a parameter.
+ */
+static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 ctrl2_val)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 25 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A);
+ break;
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+
+ if (ssc) {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
+ } else {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317);
+ /* Set reset register values to disable SSC */
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7);
+}
+
+static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, u32 num_lanes)
+{
+ unsigned int clk_sel_val = 0;
+ unsigned int hsclk_div_val = 0;
+ unsigned int i;
+
+ /* 16'h0000 for single DP link configuration */
+ regmap_field_write(cdns_phy->phy_pll_cfg, 0x0);
+
+ switch (rate) {
+ case 1620:
+ clk_sel_val = 0x0f01;
+ hsclk_div_val = 2;
+ break;
+ case 2160:
+ case 2430:
+ case 2700:
+ clk_sel_val = 0x0701;
+ hsclk_div_val = 1;
+ break;
+ case 3240:
+ clk_sel_val = 0x0b00;
+ hsclk_div_val = 2;
+ break;
+ case 4320:
+ case 5400:
+ clk_sel_val = 0x0301;
+ hsclk_div_val = 0;
+ break;
+ case 8100:
+ clk_sel_val = 0x0200;
+ hsclk_div_val = 0;
+ break;
+ }
+
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < num_lanes; i++)
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i],
+ XCVR_DIAG_HSCLK_DIV, hsclk_div_val);
+}
+
+static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy,
+ unsigned int lane)
+{
+ /* Per lane, refclock-dependent receiver detection setting */
+ if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz)
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_RCVDET_ST_TMR, 0x0780);
+ else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_RCVDET_ST_TMR, 0x09C4);
+
+ /* Writing Tx/Rx Power State Controllers registers */
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_PSC_A0, 0x00FB);
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_PSC_A2, 0x04AA);
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_PSC_A3, 0x04AA);
+ cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
+ RX_PSC_A0, 0x0000);
+ cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
+ RX_PSC_A2, 0x0000);
+ cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
+ RX_PSC_A3, 0x0000);
+
+ cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
+ RX_PSC_CAL, 0x0000);
+
+ cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
+ RX_REE_GCSM1_CTRL, 0x0000);
+ cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
+ RX_REE_GCSM2_CTRL, 0x0000);
+ cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
+ RX_REE_PERGCSM_CTRL, 0x0000);
+
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ XCVR_DIAG_BIDI_CTRL, 0x000F);
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ XCVR_DIAG_PLLDRC_CTRL, 0x0001);
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ XCVR_DIAG_HSCLK_SEL, 0x0000);
+}
+
+static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ u32 num_lanes,
+ enum phy_powerstate powerstate)
+{
+ /* Register value for power state for a single byte. */
+ u32 value_part;
+ u32 value;
+ u32 mask;
+ u32 read_val;
+ u32 ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ switch (powerstate) {
+ case (POWERSTATE_A0):
+ value_part = 0x01U;
+ break;
+ case (POWERSTATE_A2):
+ value_part = 0x04U;
+ break;
+ default:
+ /* Powerstate A3 */
+ value_part = 0x08U;
+ break;
+ }
+
+ /* Select values of registers and mask, depending on enabled
+ * lane count.
+ */
+ switch (num_lanes) {
+ /* lane 0 */
+ case (1):
+ value = value_part;
+ mask = 0x0000003FU;
+ break;
+ /* lanes 0-1 */
+ case (2):
+ value = (value_part
+ | (value_part << 8));
+ mask = 0x00003F3FU;
+ break;
+ /* lanes 0-3, all */
+ default:
+ value = (value_part
+ | (value_part << 8)
+ | (value_part << 16)
+ | (value_part << 24));
+ mask = 0x3F3F3F3FU;
+ break;
+ }
+
+ /* Set power state A<n>. */
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value);
+ /* Wait, until PHY acknowledges power state completion. */
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == value, 0,
+ POLL_TIMEOUT_US);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
+ ndelay(100);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
+{
+ unsigned int read_val;
+ int ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ /*
+ * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
+ * master lane
+ */
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ read_val, read_val & 1,
+ 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link PLL clock enable ack\n");
+ return ret;
+ }
+
+ ndelay(100);
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ POWERSTATE_A2);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ POWERSTATE_A0);
+
+ return ret;
+}
+
+static int cdns_torrent_phy_on(struct phy *phy)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
+
+ /* Take the PHY lane group out of reset */
+ return reset_control_deassert(inst->lnk_rst);
+}
+
+static int cdns_torrent_phy_off(struct phy *phy)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ ret = reset_control_assert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
+
+ return reset_control_assert(inst->lnk_rst);
+}
+
+static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
+ u32 block_offset,
+ u8 reg_offset_shift,
+ const struct regmap_config *config)
+{
+ struct cdns_regmap_cdb_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+ ctx->base = base + block_offset;
+ ctx->reg_offset_shift = reg_offset_shift;
+
+ return devm_regmap_init(dev, NULL, ctx, config);
+}
+
+static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ struct regmap_field *field;
+ struct regmap *regmap;
+
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PLL_CFG reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pll_cfg = field;
+
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pma_cmn_ctrl_2 = field;
+
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pma_pll_raw_ctrl);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_PLL_RAW_CTRL reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pma_pll_raw_ctrl = field;
+
+ regmap = cdns_phy->regmap_dptx_phy_reg;
+ field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_RESET reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_reset_ctrl = field;
+
+ return 0;
+}
+
+static int cdns_regmap_init_torrent_dp(struct cdns_torrent_phy *cdns_phy,
+ void __iomem *sd_base,
+ void __iomem *base,
+ u8 block_offset_shift,
+ u8 reg_offset_shift)
+{
+ struct device *dev = cdns_phy->dev;
+ struct regmap *regmap;
+ u32 block_offset;
+ int i;
+
+ for (i = 0; i < MAX_NUM_LANES; i++) {
+ block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_tx_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init tx lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_tx_lane_cdb[i] = regmap;
+
+ block_offset = TORRENT_RX_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_rx_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init rx lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_rx_lane_cdb[i] = regmap;
+ }
+
+ block_offset = TORRENT_COMMON_CDB_OFFSET;
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_common_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_common_cdb = regmap;
+
+ block_offset = TORRENT_PHY_PCS_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_phy_pcs_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_phy_pcs_common_cdb = regmap;
+
+ block_offset = TORRENT_PHY_PMA_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_phy_pma_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_phy_pma_common_cdb = regmap;
+
+ block_offset = TORRENT_DPTX_PHY_OFFSET;
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_dptx_phy_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init DPTX PHY regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_dptx_phy_reg = regmap;
+
+ return 0;
+}
+
+static int cdns_torrent_phy_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct cdns_torrent_phy *cdns_phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ const struct of_device_id *match;
+ struct cdns_torrent_data *data;
+ struct device_node *child;
+ int ret, subnodes, node = 0, i;
+
+ /* Get init data for this PHY */
+ match = of_match_device(cdns_torrent_phy_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_torrent_data *)match->data;
+
+ cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
+ if (!cdns_phy)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, cdns_phy);
+ cdns_phy->dev = dev;
+
+ cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
+ if (IS_ERR(cdns_phy->phy_rst)) {
+ dev_err(dev, "%s: failed to get reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->phy_rst);
+ }
+
+ cdns_phy->clk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(cdns_phy->clk)) {
+ dev_err(dev, "phy ref clock not found\n");
+ return PTR_ERR(cdns_phy->clk);
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(cdns_phy->sd_base))
+ return PTR_ERR(cdns_phy->sd_base);
+
+ subnodes = of_get_available_child_count(dev->of_node);
+ if (subnodes == 0) {
+ dev_err(dev, "No available link subnodes found\n");
+ return -EINVAL;
+ } else if (subnodes != 1) {
+ dev_err(dev, "Driver supports only one link subnode.\n");
+ return -EINVAL;
+ }
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ struct phy *gphy;
+
+ cdns_phy->phys[node].lnk_rst =
+ of_reset_control_array_get_exclusive(child);
+ if (IS_ERR(cdns_phy->phys[node].lnk_rst)) {
+ dev_err(dev, "%s: failed to get reset\n",
+ child->full_name);
+ ret = PTR_ERR(cdns_phy->phys[node].lnk_rst);
+ goto put_lnk_rst;
+ }
+
+ if (of_property_read_u32(child, "reg",
+ &cdns_phy->phys[node].mlane)) {
+ dev_err(dev, "%s: No \"reg\"-property.\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ if (cdns_phy->phys[node].mlane != 0) {
+ dev_err(dev,
+ "%s: Driver supports only lane-0 as master lane.\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ if (of_property_read_u32(child, "cdns,phy-type",
+ &cdns_phy->phys[node].phy_type)) {
+ dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ cdns_phy->phys[node].num_lanes = DEFAULT_NUM_LANES;
+ of_property_read_u32(child, "cdns,num-lanes",
+ &cdns_phy->phys[node].num_lanes);
+
+ if (cdns_phy->phys[node].phy_type == PHY_TYPE_DP) {
+ switch (cdns_phy->phys[node].num_lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid number of lanes */
+ break;
+ default:
+ dev_err(dev, "unsupported number of lanes: %d\n",
+ cdns_phy->phys[node].num_lanes);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE;
+ of_property_read_u32(child, "cdns,max-bit-rate",
+ &cdns_phy->max_bit_rate);
+
+ switch (cdns_phy->max_bit_rate) {
+ case 1620:
+ case 2160:
+ case 2430:
+ case 2700:
+ case 3240:
+ case 4320:
+ case 5400:
+ case 8100:
+ /* valid bit rate */
+ break;
+ default:
+ dev_err(dev, "unsupported max bit rate: %dMbps\n",
+ cdns_phy->max_bit_rate);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ /* DPTX registers */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cdns_phy->base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(cdns_phy->base)) {
+ ret = PTR_ERR(cdns_phy->base);
+ goto put_child;
+ }
+
+ gphy = devm_phy_create(dev, child,
+ &cdns_torrent_phy_ops);
+ if (IS_ERR(gphy)) {
+ ret = PTR_ERR(gphy);
+ goto put_child;
+ }
+
+ dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n",
+ cdns_phy->phys[node].num_lanes,
+ cdns_phy->max_bit_rate / 1000,
+ cdns_phy->max_bit_rate % 1000);
+ } else {
+ dev_err(dev, "Driver supports only PHY_TYPE_DP\n");
+ ret = -ENOTSUPP;
+ goto put_child;
+ }
+ cdns_phy->phys[node].phy = gphy;
+ phy_set_drvdata(gphy, &cdns_phy->phys[node]);
+
+ node++;
+ }
+ cdns_phy->nsubnodes = node;
+
+ ret = cdns_regmap_init_torrent_dp(cdns_phy, cdns_phy->sd_base,
+ cdns_phy->base,
+ data->block_offset_shift,
+ data->reg_offset_shift);
+ if (ret)
+ goto put_lnk_rst;
+
+ ret = cdns_regfield_init(cdns_phy);
+ if (ret)
+ goto put_lnk_rst;
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_lnk_rst;
+ }
+
+ return 0;
+
+put_child:
+ node++;
+put_lnk_rst:
+ for (i = 0; i < node; i++)
+ reset_control_put(cdns_phy->phys[i].lnk_rst);
+ of_node_put(child);
+ return ret;
+}
+
+static int cdns_torrent_phy_remove(struct platform_device *pdev)
+{
+ struct cdns_torrent_phy *cdns_phy = platform_get_drvdata(pdev);
+ int i;
+
+ reset_control_assert(cdns_phy->phy_rst);
+ for (i = 0; i < cdns_phy->nsubnodes; i++) {
+ reset_control_assert(cdns_phy->phys[i].lnk_rst);
+ reset_control_put(cdns_phy->phys[i].lnk_rst);
+ }
+
+ return 0;
+}
+
+static const struct cdns_torrent_data cdns_map_torrent = {
+ .block_offset_shift = 0x2,
+ .reg_offset_shift = 0x2,
+};
+
+static const struct cdns_torrent_data ti_j721e_map_torrent = {
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+};
+
+static const struct of_device_id cdns_torrent_phy_of_match[] = {
+ {
+ .compatible = "cdns,torrent-phy",
+ .data = &cdns_map_torrent,
+ },
+ {
+ .compatible = "ti,j721e-serdes-10g",
+ .data = &ti_j721e_map_torrent,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match);
+
+static struct platform_driver cdns_torrent_phy_driver = {
+ .probe = cdns_torrent_phy_probe,
+ .remove = cdns_torrent_phy_remove,
+ .driver = {
+ .name = "cdns-torrent-phy",
+ .of_match_table = cdns_torrent_phy_of_match,
+ }
+};
+module_platform_driver(cdns_torrent_phy_driver);
+
+MODULE_AUTHOR("Cadence Design Systems, Inc.");
+MODULE_DESCRIPTION("Cadence Torrent PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index cb2ed3b25068..cdbcc49f7115 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -43,6 +43,8 @@
#define PA0_RG_USB20_INTR_EN BIT(5)
#define U3P_USBPHYACR1 0x004
+#define PA1_RG_INTR_CAL GENMASK(23, 19)
+#define PA1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19)
#define PA1_RG_VRT_SEL GENMASK(14, 12)
#define PA1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12)
#define PA1_RG_TERM_SEL GENMASK(10, 8)
@@ -60,6 +62,8 @@
#define U3P_USBPHYACR6 0x018
#define PA6_RG_U2_BC11_SW_EN BIT(23)
#define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20)
+#define PA6_RG_U2_DISCTH GENMASK(7, 4)
+#define PA6_RG_U2_DISCTH_VAL(x) ((0xf & (x)) << 4)
#define PA6_RG_U2_SQTH GENMASK(3, 0)
#define PA6_RG_U2_SQTH_VAL(x) (0xf & (x))
@@ -294,20 +298,21 @@ struct mtk_phy_instance {
struct u2phy_banks u2_banks;
struct u3phy_banks u3_banks;
};
- struct clk *ref_clk; /* reference clock of anolog phy */
+ struct clk *ref_clk; /* reference clock of (digital) phy */
+ struct clk *da_ref_clk; /* reference clock of analog phy */
u32 index;
u8 type;
int eye_src;
int eye_vrt;
int eye_term;
+ int intr;
+ int discth;
bool bc12_en;
};
struct mtk_tphy {
struct device *dev;
void __iomem *sif_base; /* only shared sif */
- /* deprecated, use @ref_clk instead in phy instance */
- struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */
const struct mtk_phy_pdata *pdata;
struct mtk_phy_instance **phys;
int nphys;
@@ -850,9 +855,14 @@ static void phy_parse_property(struct mtk_tphy *tphy,
&instance->eye_vrt);
device_property_read_u32(dev, "mediatek,eye-term",
&instance->eye_term);
- dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d\n",
+ device_property_read_u32(dev, "mediatek,intr",
+ &instance->intr);
+ device_property_read_u32(dev, "mediatek,discth",
+ &instance->discth);
+ dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d, intr:%d, disc:%d\n",
instance->bc12_en, instance->eye_src,
- instance->eye_vrt, instance->eye_term);
+ instance->eye_vrt, instance->eye_term,
+ instance->intr, instance->discth);
}
static void u2_phy_props_set(struct mtk_tphy *tphy,
@@ -888,6 +898,20 @@ static void u2_phy_props_set(struct mtk_tphy *tphy,
tmp |= PA1_RG_TERM_SEL_VAL(instance->eye_term);
writel(tmp, com + U3P_USBPHYACR1);
}
+
+ if (instance->intr) {
+ tmp = readl(com + U3P_USBPHYACR1);
+ tmp &= ~PA1_RG_INTR_CAL;
+ tmp |= PA1_RG_INTR_CAL_VAL(instance->intr);
+ writel(tmp, com + U3P_USBPHYACR1);
+ }
+
+ if (instance->discth) {
+ tmp = readl(com + U3P_USBPHYACR6);
+ tmp &= ~PA6_RG_U2_DISCTH;
+ tmp |= PA6_RG_U2_DISCTH_VAL(instance->discth);
+ writel(tmp, com + U3P_USBPHYACR6);
+ }
}
static int mtk_phy_init(struct phy *phy)
@@ -896,15 +920,16 @@ static int mtk_phy_init(struct phy *phy)
struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
int ret;
- ret = clk_prepare_enable(tphy->u3phya_ref);
+ ret = clk_prepare_enable(instance->ref_clk);
if (ret) {
- dev_err(tphy->dev, "failed to enable u3phya_ref\n");
+ dev_err(tphy->dev, "failed to enable ref_clk\n");
return ret;
}
- ret = clk_prepare_enable(instance->ref_clk);
+ ret = clk_prepare_enable(instance->da_ref_clk);
if (ret) {
- dev_err(tphy->dev, "failed to enable ref_clk\n");
+ dev_err(tphy->dev, "failed to enable da_ref\n");
+ clk_disable_unprepare(instance->ref_clk);
return ret;
}
@@ -967,7 +992,7 @@ static int mtk_phy_exit(struct phy *phy)
u2_phy_instance_exit(tphy, instance);
clk_disable_unprepare(instance->ref_clk);
- clk_disable_unprepare(tphy->u3phya_ref);
+ clk_disable_unprepare(instance->da_ref_clk);
return 0;
}
@@ -1102,11 +1127,6 @@ static int mtk_tphy_probe(struct platform_device *pdev)
}
}
- /* it's deprecated, make it optional for backward compatibility */
- tphy->u3phya_ref = devm_clk_get_optional(dev, "u3phya_ref");
- if (IS_ERR(tphy->u3phya_ref))
- return PTR_ERR(tphy->u3phya_ref);
-
tphy->src_ref_clk = U3P_REF_CLK;
tphy->src_coef = U3P_SLEW_RATE_COEF;
/* update parameters of slew rate calibrate if exist */
@@ -1153,16 +1173,20 @@ static int mtk_tphy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, instance);
port++;
- /* if deprecated clock is provided, ignore instance's one */
- if (tphy->u3phya_ref)
- continue;
-
- instance->ref_clk = devm_clk_get(&phy->dev, "ref");
+ instance->ref_clk = devm_clk_get_optional(&phy->dev, "ref");
if (IS_ERR(instance->ref_clk)) {
dev_err(dev, "failed to get ref_clk(id-%d)\n", port);
retval = PTR_ERR(instance->ref_clk);
goto put_child;
}
+
+ instance->da_ref_clk =
+ devm_clk_get_optional(&phy->dev, "da_ref");
+ if (IS_ERR(instance->da_ref_clk)) {
+ dev_err(dev, "failed to get da_ref_clk(id-%d)\n", port);
+ retval = PTR_ERR(instance->da_ref_clk);
+ goto put_child;
+ }
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index e46824da29f6..98674ed094d9 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -91,3 +91,23 @@ config PHY_QCOM_USB_HSIC
select GENERIC_PHY
help
Support for the USB HSIC ULPI compliant PHY on QCOM chipsets.
+
+config PHY_QCOM_USB_HS_28NM
+ tristate "Qualcomm 28nm High-Speed PHY"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
+ select GENERIC_PHY
+ help
+ Enable this to support the Qualcomm Synopsys DesignWare Core 28nm
+ High-Speed PHY driver. This driver supports the Hi-Speed PHY which
+ is usually paired with either the ChipIdea or Synopsys DWC3 USB
+ IPs on MSM SOCs.
+
+config PHY_QCOM_USB_SS
+ tristate "Qualcomm USB Super-Speed PHY driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
+ select GENERIC_PHY
+ help
+ Enable this to support the Super-Speed USB transceiver on various
+ Qualcomm chipsets.
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 283251d6a5d9..1f14aeacbd70 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o
obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
+obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
+obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 7db2a94f7a99..c190406246ab 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -121,6 +121,11 @@ enum qphy_reg_layout {
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
};
+static const unsigned int msm8996_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x168,
+};
+
static const unsigned int pciephy_regs_layout[] = {
[QPHY_COM_SW_RESET] = 0x400,
[QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
@@ -160,6 +165,18 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
};
+static const unsigned int sdm845_qmp_pciephy_regs_layout[] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x174,
+};
+
+static const unsigned int sdm845_qhp_pciephy_regs_layout[] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_STATUS] = 0x2ac,
+};
+
static const unsigned int sdm845_ufsphy_regs_layout[] = {
[QPHY_START_CTRL] = 0x00,
[QPHY_PCS_READY_STATUS] = 0x160,
@@ -331,6 +348,75 @@ static const struct qmp_phy_init_tbl msm8998_pcie_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03),
};
+static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+};
+
+static const struct qmp_phy_init_tbl msm8996_ufs_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x02),
+};
+
+static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
+};
+
static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
@@ -481,6 +567,229 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
QMP_PHY_INIT_CFG_L(QPHY_START_CTRL, 0x3),
};
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x007),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x7e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x15),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_01, 0x59),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x40),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_ENDPOINT_REFCLK_DRIVE, 0x04),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK_MSB, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PLL_LOCK_CHK_DLY_TIME, 0x73),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xbb),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG4, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qmp_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_CONFIG2, 0x52),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG2, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4, 0x1a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5, 0x06),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MISC_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL, 0x27),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN, 0x18),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_ENABLE1, 0xb0),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0, 0x8c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0, 0x20),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_RESTRIM_CTRL2, 0x05),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VCO_TUNE_MAP, 0x10),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CLK_SELECT, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_HSCLK_SEL1, 0x30),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORE_CLK_EN, 0x73),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL, 0x15),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV1, 0x22),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_VREGCLK_DIV2, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BGV_TRIM, 0x20),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_COM_BG_CTRL, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL0, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_TAP_EN, 0x0d),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TX_BAND_MODE, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_LANE_MODE, 0x1a),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PARALLEL_RATE, 0x2f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE0, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE1, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CML_CTRL_MODE2, 0x1b),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0, 0x31),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1, 0x31),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2, 0x03),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE, 0x02),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CGA_THRESH_DFE, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXENGINE_EN0, 0x12),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME, 0x25),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME, 0x05),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_GAIN, 0x26),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DFE_GAIN, 0x12),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_OFFSET_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PRE_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EQ_INTVAL, 0x15),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_EDAC_INITVAL, 0x28),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB0, 0x7f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_INITB1, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RXEQ_CTRL, 0x70),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0, 0x8b),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2, 0x0a),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_BAND, 0x02),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0, 0x5c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1, 0x3e),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_ENABLES, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_CNTRL, 0xa0),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL, 0x08),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DCC_GAIN, 0x01),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_EN_SIGNAL, 0xc3),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL, 0x00),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0, 0xbc),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_TS0_TIMER, 0x7f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE, 0x15),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_DRVR_CTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET, 0x04),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_VGA_INITVAL, 0x20),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_L0_RSM_START, 0x01),
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_rx_tbl[] = {
+};
+
+static const struct qmp_phy_init_tbl sdm845_qhp_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG, 0x3f),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG, 0x50),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB, 0x19),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB, 0x07),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB, 0x17),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB, 0x09),
+ QMP_PHY_INIT_CFG(PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5, 0x9f),
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -988,6 +1297,8 @@ struct qmp_phy_cfg {
int rx_tbl_num;
const struct qmp_phy_init_tbl *pcs_tbl;
int pcs_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_misc_tbl;
+ int pcs_misc_tbl_num;
/* clock ids to be requested */
const char * const *clk_list;
@@ -1122,10 +1433,18 @@ static const char * const msm8996_phy_clk_l[] = {
"aux", "cfg_ahb", "ref",
};
+static const char * const msm8996_ufs_phy_clk_l[] = {
+ "ref",
+};
+
static const char * const qmp_v3_phy_clk_l[] = {
"aux", "cfg_ahb", "ref", "com_aux",
};
+static const char * const sdm845_pciephy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "refgen",
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -1139,6 +1458,10 @@ static const char * const msm8996_usb3phy_reset_l[] = {
"phy", "common",
};
+static const char * const sdm845_pciephy_reset_l[] = {
+ "phy",
+};
+
/* list of regulators */
static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
@@ -1175,6 +1498,31 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg msm8996_ufs_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_ufs_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl),
+ .tx_tbl = msm8996_ufs_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_ufs_tx_tbl),
+ .rx_tbl = msm8996_ufs_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_ufs_rx_tbl),
+
+ .clk_list = msm8996_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l),
+
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+
+ .regs = msm8996_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .no_pcs_sw_reset = true,
+};
+
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -1234,6 +1582,64 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.pwrdn_delay_max = 1005, /* us */
};
+static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sdm845_qmp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
+ .tx_tbl = sdm845_qmp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl),
+ .rx_tbl = sdm845_qmp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl),
+ .pcs_tbl = sdm845_qmp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl),
+ .pcs_misc_tbl = sdm845_qmp_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_qmp_pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sdm845_qhp_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
+ .tx_tbl = sdm845_qhp_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl),
+ .rx_tbl = sdm845_qhp_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl),
+ .pcs_tbl = sdm845_qhp_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_qhp_pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -1563,6 +1969,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
void __iomem *tx = qphy->tx;
void __iomem *rx = qphy->rx;
void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
void __iomem *dp_com = qmp->dp_com;
void __iomem *status;
unsigned int mask, val, ready;
@@ -1633,6 +2040,9 @@ static int qcom_qmp_phy_enable(struct phy *phy)
if (ret)
goto err_lane_rst;
+ qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
+ cfg->pcs_misc_tbl_num);
+
/*
* Pull out PHY from POWER DOWN state.
* This is active low enable signal to power-down PHY.
@@ -1967,7 +2377,7 @@ static const struct phy_ops qcom_qmp_phy_gen_ops = {
.owner = THIS_MODULE,
};
-static const struct phy_ops qcom_qmp_ufs_ops = {
+static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
.power_on = qcom_qmp_phy_enable,
.power_off = qcom_qmp_phy_disable,
.set_mode = qcom_qmp_phy_set_mode,
@@ -2067,8 +2477,8 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
}
}
- if (qmp->cfg->type == PHY_TYPE_UFS)
- ops = &qcom_qmp_ufs_ops;
+ if (qmp->cfg->type == PHY_TYPE_UFS || qmp->cfg->type == PHY_TYPE_PCIE)
+ ops = &qcom_qmp_pcie_ufs_ops;
generic_phy = devm_phy_create(dev, np, ops);
if (IS_ERR(generic_phy)) {
@@ -2091,6 +2501,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,msm8996-qmp-pcie-phy",
.data = &msm8996_pciephy_cfg,
}, {
+ .compatible = "qcom,msm8996-qmp-ufs-phy",
+ .data = &msm8996_ufs_cfg,
+ }, {
.compatible = "qcom,msm8996-qmp-usb3-phy",
.data = &msm8996_usb3phy_cfg,
}, {
@@ -2103,6 +2516,12 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,ipq8074-qmp-pcie-phy",
.data = &ipq8074_pciephy_cfg,
}, {
+ .compatible = "qcom,sdm845-qhp-pcie-phy",
+ .data = &sdm845_qhp_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-pcie-phy",
+ .data = &sdm845_qmp_pciephy_cfg,
+ }, {
.compatible = "qcom,sdm845-qmp-usb3-phy",
.data = &qmp_v3_usb3phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 90f793c2293d..dece0e67704b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -409,4 +409,118 @@
#define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8
#define QPHY_V4_MULTI_LANE_CTRL1 0x1e0
+/* PCIE GEN3 COM registers */
+#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14
+#define PCIE_GEN3_QHP_COM_SSC_PER1 0x20
+#define PCIE_GEN3_QHP_COM_SSC_PER2 0x24
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1 0x28
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2 0x2c
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE1_MODE1 0x34
+#define PCIE_GEN3_QHP_COM_SSC_STEP_SIZE2_MODE1 0x38
+#define PCIE_GEN3_QHP_COM_BIAS_EN_CKBUFLR_EN 0x54
+#define PCIE_GEN3_QHP_COM_CLK_ENABLE1 0x58
+#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE0 0x6c
+#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE0 0x70
+#define PCIE_GEN3_QHP_COM_LOCK_CMP1_MODE1 0x78
+#define PCIE_GEN3_QHP_COM_LOCK_CMP2_MODE1 0x7c
+#define PCIE_GEN3_QHP_COM_BGV_TRIM 0x98
+#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE0 0xb4
+#define PCIE_GEN3_QHP_COM_CP_CTRL_MODE1 0xb8
+#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE0 0xc0
+#define PCIE_GEN3_QHP_COM_PLL_RCTRL_MODE1 0xc4
+#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE0 0xcc
+#define PCIE_GEN3_QHP_COM_PLL_CCTRL_MODE1 0xd0
+#define PCIE_GEN3_QHP_COM_SYSCLK_EN_SEL 0xdc
+#define PCIE_GEN3_QHP_COM_RESTRIM_CTRL2 0xf0
+#define PCIE_GEN3_QHP_COM_LOCK_CMP_EN 0xf8
+#define PCIE_GEN3_QHP_COM_DEC_START_MODE0 0x100
+#define PCIE_GEN3_QHP_COM_DEC_START_MODE1 0x108
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE0 0x11c
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE0 0x120
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE0 0x124
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START1_MODE1 0x128
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START2_MODE1 0x12c
+#define PCIE_GEN3_QHP_COM_DIV_FRAC_START3_MODE1 0x130
+#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE0 0x150
+#define PCIE_GEN3_QHP_COM_INTEGLOOP_GAIN0_MODE1 0x158
+#define PCIE_GEN3_QHP_COM_VCO_TUNE_MAP 0x178
+#define PCIE_GEN3_QHP_COM_BG_CTRL 0x1c8
+#define PCIE_GEN3_QHP_COM_CLK_SELECT 0x1cc
+#define PCIE_GEN3_QHP_COM_HSCLK_SEL1 0x1d0
+#define PCIE_GEN3_QHP_COM_CORECLK_DIV 0x1e0
+#define PCIE_GEN3_QHP_COM_CORE_CLK_EN 0x1e8
+#define PCIE_GEN3_QHP_COM_CMN_CONFIG 0x1f0
+#define PCIE_GEN3_QHP_COM_SVS_MODE_CLK_SEL 0x1fc
+#define PCIE_GEN3_QHP_COM_CORECLK_DIV_MODE1 0x21c
+#define PCIE_GEN3_QHP_COM_CMN_MODE 0x224
+#define PCIE_GEN3_QHP_COM_VREGCLK_DIV1 0x228
+#define PCIE_GEN3_QHP_COM_VREGCLK_DIV2 0x22c
+
+/* PCIE GEN3 QHP Lane registers */
+#define PCIE_GEN3_QHP_L0_DRVR_CTRL0 0xc
+#define PCIE_GEN3_QHP_L0_DRVR_CTRL1 0x10
+#define PCIE_GEN3_QHP_L0_DRVR_CTRL2 0x14
+#define PCIE_GEN3_QHP_L0_DRVR_TAP_EN 0x18
+#define PCIE_GEN3_QHP_L0_TX_BAND_MODE 0x60
+#define PCIE_GEN3_QHP_L0_LANE_MODE 0x64
+#define PCIE_GEN3_QHP_L0_PARALLEL_RATE 0x7c
+#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE0 0xc0
+#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE1 0xc4
+#define PCIE_GEN3_QHP_L0_CML_CTRL_MODE2 0xc8
+#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE1 0xd0
+#define PCIE_GEN3_QHP_L0_PREAMP_CTRL_MODE2 0xd4
+#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE0 0xd8
+#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE1 0xdc
+#define PCIE_GEN3_QHP_L0_MIXER_CTRL_MODE2 0xe0
+#define PCIE_GEN3_QHP_L0_CTLE_THRESH_DFE 0xfc
+#define PCIE_GEN3_QHP_L0_CGA_THRESH_DFE 0x100
+#define PCIE_GEN3_QHP_L0_RXENGINE_EN0 0x108
+#define PCIE_GEN3_QHP_L0_CTLE_TRAIN_TIME 0x114
+#define PCIE_GEN3_QHP_L0_CTLE_DFE_OVRLP_TIME 0x118
+#define PCIE_GEN3_QHP_L0_DFE_REFRESH_TIME 0x11c
+#define PCIE_GEN3_QHP_L0_DFE_ENABLE_TIME 0x120
+#define PCIE_GEN3_QHP_L0_VGA_GAIN 0x124
+#define PCIE_GEN3_QHP_L0_DFE_GAIN 0x128
+#define PCIE_GEN3_QHP_L0_EQ_GAIN 0x130
+#define PCIE_GEN3_QHP_L0_OFFSET_GAIN 0x134
+#define PCIE_GEN3_QHP_L0_PRE_GAIN 0x138
+#define PCIE_GEN3_QHP_L0_VGA_INITVAL 0x13c
+#define PCIE_GEN3_QHP_L0_EQ_INTVAL 0x154
+#define PCIE_GEN3_QHP_L0_EDAC_INITVAL 0x160
+#define PCIE_GEN3_QHP_L0_RXEQ_INITB0 0x168
+#define PCIE_GEN3_QHP_L0_RXEQ_INITB1 0x16c
+#define PCIE_GEN3_QHP_L0_RCVRDONE_THRESH1 0x178
+#define PCIE_GEN3_QHP_L0_RXEQ_CTRL 0x180
+#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE0 0x184
+#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE1 0x188
+#define PCIE_GEN3_QHP_L0_UCDR_FO_GAIN_MODE2 0x18c
+#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE0 0x190
+#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE1 0x194
+#define PCIE_GEN3_QHP_L0_UCDR_SO_GAIN_MODE2 0x198
+#define PCIE_GEN3_QHP_L0_UCDR_SO_CONFIG 0x19c
+#define PCIE_GEN3_QHP_L0_RX_BAND 0x1a4
+#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE0 0x1c0
+#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE1 0x1c4
+#define PCIE_GEN3_QHP_L0_RX_RCVR_PATH1_MODE2 0x1c8
+#define PCIE_GEN3_QHP_L0_SIGDET_ENABLES 0x230
+#define PCIE_GEN3_QHP_L0_SIGDET_CNTRL 0x234
+#define PCIE_GEN3_QHP_L0_SIGDET_DEGLITCH_CNTRL 0x238
+#define PCIE_GEN3_QHP_L0_DCC_GAIN 0x2a4
+#define PCIE_GEN3_QHP_L0_RSM_START 0x2a8
+#define PCIE_GEN3_QHP_L0_RX_EN_SIGNAL 0x2ac
+#define PCIE_GEN3_QHP_L0_PSM_RX_EN_CAL 0x2b0
+#define PCIE_GEN3_QHP_L0_RX_MISC_CNTRL0 0x2b8
+#define PCIE_GEN3_QHP_L0_TS0_TIMER 0x2c0
+#define PCIE_GEN3_QHP_L0_DLL_HIGHDATARATE 0x2c4
+#define PCIE_GEN3_QHP_L0_RX_RESETCODE_OFFSET 0x2cc
+
+/* PCIE GEN3 PCS registers */
+#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M3P5DB 0x2c
+#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M3P5DB 0x40
+#define PCIE_GEN3_QHP_PHY_TXMGN_MAIN_V0_M6DB 0x54
+#define PCIE_GEN3_QHP_PHY_TXMGN_POST_V0_M6DB 0x68
+#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG 0x15c
+#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c
+#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index bf94a52d3087..393011a05b48 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
@@ -66,6 +66,14 @@
#define IMP_RES_OFFSET_MASK GENMASK(5, 0)
#define IMP_RES_OFFSET_SHIFT 0x0
+/* QUSB2PHY_PLL_BIAS_CONTROL_2 register bits */
+#define BIAS_CTRL2_RES_OFFSET_MASK GENMASK(5, 0)
+#define BIAS_CTRL2_RES_OFFSET_SHIFT 0x0
+
+/* QUSB2PHY_CHG_CONTROL_2 register bits */
+#define CHG_CTRL2_OFFSET_MASK GENMASK(5, 4)
+#define CHG_CTRL2_OFFSET_SHIFT 0x4
+
/* QUSB2PHY_PORT_TUNE1 register bits */
#define HSTX_TRIM_MASK GENMASK(7, 4)
#define HSTX_TRIM_SHIFT 0x4
@@ -73,6 +81,10 @@
#define PREEMPHASIS_EN_MASK GENMASK(1, 0)
#define PREEMPHASIS_EN_SHIFT 0x0
+/* QUSB2PHY_PORT_TUNE2 register bits */
+#define HSDISC_TRIM_MASK GENMASK(1, 0)
+#define HSDISC_TRIM_SHIFT 0x0
+
#define QUSB2PHY_PLL_ANALOG_CONTROLS_TWO 0x04
#define QUSB2PHY_PLL_CLOCK_INVERTERS 0x18c
#define QUSB2PHY_PLL_CMODE 0x2c
@@ -177,7 +189,7 @@ static const struct qusb2_phy_init_tbl msm8998_init_tbl[] = {
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_DIGITAL_TIMERS_TWO, 0x19),
};
-static const unsigned int sdm845_regs_layout[] = {
+static const unsigned int qusb2_v2_regs_layout[] = {
[QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8,
[QUSB2PHY_PLL_STATUS] = 0x1a0,
[QUSB2PHY_PORT_TUNE1] = 0x240,
@@ -191,7 +203,7 @@ static const unsigned int sdm845_regs_layout[] = {
[QUSB2PHY_INTR_CTRL] = 0x230,
};
-static const struct qusb2_phy_init_tbl sdm845_init_tbl[] = {
+static const struct qusb2_phy_init_tbl qusb2_v2_init_tbl[] = {
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_ANALOG_CONTROLS_TWO, 0x03),
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CLOCK_INVERTERS, 0x7c),
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CMODE, 0x80),
@@ -258,10 +270,10 @@ static const struct qusb2_phy_cfg msm8998_phy_cfg = {
.update_tune1_with_efuse = true,
};
-static const struct qusb2_phy_cfg sdm845_phy_cfg = {
- .tbl = sdm845_init_tbl,
- .tbl_num = ARRAY_SIZE(sdm845_init_tbl),
- .regs = sdm845_regs_layout,
+static const struct qusb2_phy_cfg qusb2_v2_phy_cfg = {
+ .tbl = qusb2_v2_init_tbl,
+ .tbl_num = ARRAY_SIZE(qusb2_v2_init_tbl),
+ .regs = qusb2_v2_regs_layout,
.disable_ctrl = (PWR_CTRL1_VREF_SUPPLY_TRIM | PWR_CTRL1_CLAMP_N_EN |
POWER_DOWN),
@@ -277,6 +289,34 @@ static const char * const qusb2_phy_vreg_names[] = {
#define QUSB2_NUM_VREGS ARRAY_SIZE(qusb2_phy_vreg_names)
+/* struct override_param - structure holding qusb2 v2 phy overriding param
+ * set override true if the device tree property exists and read and assign
+ * to value
+ */
+struct override_param {
+ bool override;
+ u8 value;
+};
+
+/*struct override_params - structure holding qusb2 v2 phy overriding params
+ * @imp_res_offset: rescode offset to be updated in IMP_CTRL1 register
+ * @hstx_trim: HSTX_TRIM to be updated in TUNE1 register
+ * @preemphasis: Amplitude Pre-Emphasis to be updated in TUNE1 register
+ * @preemphasis_width: half/full-width Pre-Emphasis updated via TUNE1
+ * @bias_ctrl: bias ctrl to be updated in BIAS_CONTROL_2 register
+ * @charge_ctrl: charge ctrl to be updated in CHG_CTRL2 register
+ * @hsdisc_trim: disconnect threshold to be updated in TUNE2 register
+ */
+struct override_params {
+ struct override_param imp_res_offset;
+ struct override_param hstx_trim;
+ struct override_param preemphasis;
+ struct override_param preemphasis_width;
+ struct override_param bias_ctrl;
+ struct override_param charge_ctrl;
+ struct override_param hsdisc_trim;
+};
+
/**
* struct qusb2_phy - structure holding qusb2 phy attributes
*
@@ -292,14 +332,7 @@ static const char * const qusb2_phy_vreg_names[] = {
* @tcsr: TCSR syscon register map
* @cell: nvmem cell containing phy tuning value
*
- * @override_imp_res_offset: PHY should use different rescode offset
- * @imp_res_offset_value: rescode offset to be updated in IMP_CTRL1 register
- * @override_hstx_trim: PHY should use different HSTX o/p current value
- * @hstx_trim_value: HSTX_TRIM value to be updated in TUNE1 register
- * @override_preemphasis: PHY should use different pre-amphasis amplitude
- * @preemphasis_level: Amplitude Pre-Emphasis to be updated in TUNE1 register
- * @override_preemphasis_width: PHY should use different pre-emphasis duration
- * @preemphasis_width: half/full-width Pre-Emphasis updated via TUNE1
+ * @overrides: pointer to structure for all overriding tuning params
*
* @cfg: phy config data
* @has_se_clk_scheme: indicate if PHY has single-ended ref clock scheme
@@ -319,14 +352,7 @@ struct qusb2_phy {
struct regmap *tcsr;
struct nvmem_cell *cell;
- bool override_imp_res_offset;
- u8 imp_res_offset_value;
- bool override_hstx_trim;
- u8 hstx_trim_value;
- bool override_preemphasis;
- u8 preemphasis_level;
- bool override_preemphasis_width;
- u8 preemphasis_width;
+ struct override_params overrides;
const struct qusb2_phy_cfg *cfg;
bool has_se_clk_scheme;
@@ -394,24 +420,35 @@ void qcom_qusb2_phy_configure(void __iomem *base,
static void qusb2_phy_override_phy_params(struct qusb2_phy *qphy)
{
const struct qusb2_phy_cfg *cfg = qphy->cfg;
+ struct override_params *or = &qphy->overrides;
- if (qphy->override_imp_res_offset)
+ if (or->imp_res_offset.override)
qusb2_write_mask(qphy->base, QUSB2PHY_IMP_CTRL1,
- qphy->imp_res_offset_value << IMP_RES_OFFSET_SHIFT,
+ or->imp_res_offset.value << IMP_RES_OFFSET_SHIFT,
IMP_RES_OFFSET_MASK);
- if (qphy->override_hstx_trim)
+ if (or->bias_ctrl.override)
+ qusb2_write_mask(qphy->base, QUSB2PHY_PLL_BIAS_CONTROL_2,
+ or->bias_ctrl.value << BIAS_CTRL2_RES_OFFSET_SHIFT,
+ BIAS_CTRL2_RES_OFFSET_MASK);
+
+ if (or->charge_ctrl.override)
+ qusb2_write_mask(qphy->base, QUSB2PHY_CHG_CTRL2,
+ or->charge_ctrl.value << CHG_CTRL2_OFFSET_SHIFT,
+ CHG_CTRL2_OFFSET_MASK);
+
+ if (or->hstx_trim.override)
qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
- qphy->hstx_trim_value << HSTX_TRIM_SHIFT,
+ or->hstx_trim.value << HSTX_TRIM_SHIFT,
HSTX_TRIM_MASK);
- if (qphy->override_preemphasis)
+ if (or->preemphasis.override)
qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
- qphy->preemphasis_level << PREEMPHASIS_EN_SHIFT,
+ or->preemphasis.value << PREEMPHASIS_EN_SHIFT,
PREEMPHASIS_EN_MASK);
- if (qphy->override_preemphasis_width) {
- if (qphy->preemphasis_width ==
+ if (or->preemphasis_width.override) {
+ if (or->preemphasis_width.value ==
QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT)
qusb2_setbits(qphy->base,
cfg->regs[QUSB2PHY_PORT_TUNE1],
@@ -421,6 +458,11 @@ static void qusb2_phy_override_phy_params(struct qusb2_phy *qphy)
cfg->regs[QUSB2PHY_PORT_TUNE1],
PREEMPH_WIDTH_HALF_BIT);
}
+
+ if (or->hsdisc_trim.override)
+ qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
+ or->hsdisc_trim.value << HSDISC_TRIM_SHIFT,
+ HSDISC_TRIM_MASK);
}
/*
@@ -774,8 +816,15 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
.compatible = "qcom,msm8998-qusb2-phy",
.data = &msm8998_phy_cfg,
}, {
+ /*
+ * Deprecated. Only here to support legacy device
+ * trees that didn't include "qcom,qusb2-v2-phy"
+ */
.compatible = "qcom,sdm845-qusb2-phy",
- .data = &sdm845_phy_cfg,
+ .data = &qusb2_v2_phy_cfg,
+ }, {
+ .compatible = "qcom,qusb2-v2-phy",
+ .data = &qusb2_v2_phy_cfg,
},
{ },
};
@@ -796,10 +845,12 @@ static int qusb2_phy_probe(struct platform_device *pdev)
int ret, i;
int num;
u32 value;
+ struct override_params *or;
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
if (!qphy)
return -ENOMEM;
+ or = &qphy->overrides;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
qphy->base = devm_ioremap_resource(dev, res);
@@ -864,26 +915,44 @@ static int qusb2_phy_probe(struct platform_device *pdev)
if (!of_property_read_u32(dev->of_node, "qcom,imp-res-offset-value",
&value)) {
- qphy->imp_res_offset_value = (u8)value;
- qphy->override_imp_res_offset = true;
+ or->imp_res_offset.value = (u8)value;
+ or->imp_res_offset.override = true;
+ }
+
+ if (!of_property_read_u32(dev->of_node, "qcom,bias-ctrl-value",
+ &value)) {
+ or->bias_ctrl.value = (u8)value;
+ or->bias_ctrl.override = true;
+ }
+
+ if (!of_property_read_u32(dev->of_node, "qcom,charge-ctrl-value",
+ &value)) {
+ or->charge_ctrl.value = (u8)value;
+ or->charge_ctrl.override = true;
}
if (!of_property_read_u32(dev->of_node, "qcom,hstx-trim-value",
&value)) {
- qphy->hstx_trim_value = (u8)value;
- qphy->override_hstx_trim = true;
+ or->hstx_trim.value = (u8)value;
+ or->hstx_trim.override = true;
}
if (!of_property_read_u32(dev->of_node, "qcom,preemphasis-level",
&value)) {
- qphy->preemphasis_level = (u8)value;
- qphy->override_preemphasis = true;
+ or->preemphasis.value = (u8)value;
+ or->preemphasis.override = true;
}
if (!of_property_read_u32(dev->of_node, "qcom,preemphasis-width",
&value)) {
- qphy->preemphasis_width = (u8)value;
- qphy->override_preemphasis_width = true;
+ or->preemphasis_width.value = (u8)value;
+ or->preemphasis_width.override = true;
+ }
+
+ if (!of_property_read_u32(dev->of_node, "qcom,hsdisc-trim-value",
+ &value)) {
+ or->hsdisc_trim.value = (u8)value;
+ or->hsdisc_trim.override = true;
}
pm_runtime_set_active(dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
new file mode 100644
index 000000000000..a52a9bf13b75
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2009-2018, Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, Linaro Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+/* PHY register and bit definitions */
+#define PHY_CTRL_COMMON0 0x078
+#define SIDDQ BIT(2)
+#define PHY_IRQ_CMD 0x0d0
+#define PHY_INTR_MASK0 0x0d4
+#define PHY_INTR_CLEAR0 0x0dc
+#define DPDM_MASK 0x1e
+#define DP_1_0 BIT(4)
+#define DP_0_1 BIT(3)
+#define DM_1_0 BIT(2)
+#define DM_0_1 BIT(1)
+
+enum hsphy_voltage {
+ VOL_NONE,
+ VOL_MIN,
+ VOL_MAX,
+ VOL_NUM,
+};
+
+enum hsphy_vreg {
+ VDD,
+ VDDA_1P8,
+ VDDA_3P3,
+ VREG_NUM,
+};
+
+struct hsphy_init_seq {
+ int offset;
+ int val;
+ int delay;
+};
+
+struct hsphy_data {
+ const struct hsphy_init_seq *init_seq;
+ unsigned int init_seq_num;
+};
+
+struct hsphy_priv {
+ void __iomem *base;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *phy_reset;
+ struct reset_control *por_reset;
+ struct regulator_bulk_data vregs[VREG_NUM];
+ const struct hsphy_data *data;
+ enum phy_mode mode;
+};
+
+static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct hsphy_priv *priv = phy_get_drvdata(phy);
+
+ priv->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ priv->mode = mode;
+
+ return 0;
+}
+
+static void qcom_snps_hsphy_enable_hv_interrupts(struct hsphy_priv *priv)
+{
+ u32 val;
+
+ /* Clear any existing interrupts before enabling the interrupts */
+ val = readb(priv->base + PHY_INTR_CLEAR0);
+ val |= DPDM_MASK;
+ writeb(val, priv->base + PHY_INTR_CLEAR0);
+
+ writeb(0x0, priv->base + PHY_IRQ_CMD);
+ usleep_range(200, 220);
+ writeb(0x1, priv->base + PHY_IRQ_CMD);
+
+ /* Make sure the interrupts are cleared */
+ usleep_range(200, 220);
+
+ val = readb(priv->base + PHY_INTR_MASK0);
+ switch (priv->mode) {
+ case PHY_MODE_USB_HOST_HS:
+ case PHY_MODE_USB_HOST_FS:
+ case PHY_MODE_USB_DEVICE_HS:
+ case PHY_MODE_USB_DEVICE_FS:
+ val |= DP_1_0 | DM_0_1;
+ break;
+ case PHY_MODE_USB_HOST_LS:
+ case PHY_MODE_USB_DEVICE_LS:
+ val |= DP_0_1 | DM_1_0;
+ break;
+ default:
+ /* No device connected */
+ val |= DP_0_1 | DM_0_1;
+ break;
+ }
+ writeb(val, priv->base + PHY_INTR_MASK0);
+}
+
+static void qcom_snps_hsphy_disable_hv_interrupts(struct hsphy_priv *priv)
+{
+ u32 val;
+
+ val = readb(priv->base + PHY_INTR_MASK0);
+ val &= ~DPDM_MASK;
+ writeb(val, priv->base + PHY_INTR_MASK0);
+
+ /* Clear any pending interrupts */
+ val = readb(priv->base + PHY_INTR_CLEAR0);
+ val |= DPDM_MASK;
+ writeb(val, priv->base + PHY_INTR_CLEAR0);
+
+ writeb(0x0, priv->base + PHY_IRQ_CMD);
+ usleep_range(200, 220);
+
+ writeb(0x1, priv->base + PHY_IRQ_CMD);
+ usleep_range(200, 220);
+}
+
+static void qcom_snps_hsphy_enter_retention(struct hsphy_priv *priv)
+{
+ u32 val;
+
+ val = readb(priv->base + PHY_CTRL_COMMON0);
+ val |= SIDDQ;
+ writeb(val, priv->base + PHY_CTRL_COMMON0);
+}
+
+static void qcom_snps_hsphy_exit_retention(struct hsphy_priv *priv)
+{
+ u32 val;
+
+ val = readb(priv->base + PHY_CTRL_COMMON0);
+ val &= ~SIDDQ;
+ writeb(val, priv->base + PHY_CTRL_COMMON0);
+}
+
+static int qcom_snps_hsphy_power_on(struct phy *phy)
+{
+ struct hsphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = regulator_bulk_enable(VREG_NUM, priv->vregs);
+ if (ret)
+ return ret;
+
+ qcom_snps_hsphy_disable_hv_interrupts(priv);
+ qcom_snps_hsphy_exit_retention(priv);
+
+ return 0;
+}
+
+static int qcom_snps_hsphy_power_off(struct phy *phy)
+{
+ struct hsphy_priv *priv = phy_get_drvdata(phy);
+
+ qcom_snps_hsphy_enter_retention(priv);
+ qcom_snps_hsphy_enable_hv_interrupts(priv);
+ regulator_bulk_disable(VREG_NUM, priv->vregs);
+
+ return 0;
+}
+
+static int qcom_snps_hsphy_reset(struct hsphy_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->phy_reset);
+ if (ret)
+ return ret;
+
+ usleep_range(10, 15);
+
+ ret = reset_control_deassert(priv->phy_reset);
+ if (ret)
+ return ret;
+
+ usleep_range(80, 100);
+
+ return 0;
+}
+
+static void qcom_snps_hsphy_init_sequence(struct hsphy_priv *priv)
+{
+ const struct hsphy_data *data = priv->data;
+ const struct hsphy_init_seq *seq;
+ int i;
+
+ /* Device match data is optional. */
+ if (!data)
+ return;
+
+ seq = data->init_seq;
+
+ for (i = 0; i < data->init_seq_num; i++, seq++) {
+ writeb(seq->val, priv->base + seq->offset);
+ if (seq->delay)
+ usleep_range(seq->delay, seq->delay + 10);
+ }
+}
+
+static int qcom_snps_hsphy_por_reset(struct hsphy_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->por_reset);
+ if (ret)
+ return ret;
+
+ /*
+ * The Femto PHY is POR reset in the following scenarios.
+ *
+ * 1. After overriding the parameter registers.
+ * 2. Low power mode exit from PHY retention.
+ *
+ * Ensure that SIDDQ is cleared before bringing the PHY
+ * out of reset.
+ */
+ qcom_snps_hsphy_exit_retention(priv);
+
+ /*
+ * As per databook, 10 usec delay is required between
+ * PHY POR assert and de-assert.
+ */
+ usleep_range(10, 20);
+ ret = reset_control_deassert(priv->por_reset);
+ if (ret)
+ return ret;
+
+ /*
+ * As per databook, it takes 75 usec for PHY to stabilize
+ * after the reset.
+ */
+ usleep_range(80, 100);
+
+ return 0;
+}
+
+static int qcom_snps_hsphy_init(struct phy *phy)
+{
+ struct hsphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_snps_hsphy_reset(priv);
+ if (ret)
+ goto disable_clocks;
+
+ qcom_snps_hsphy_init_sequence(priv);
+
+ ret = qcom_snps_hsphy_por_reset(priv);
+ if (ret)
+ goto disable_clocks;
+
+ return 0;
+
+disable_clocks:
+ clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
+ return ret;
+}
+
+static int qcom_snps_hsphy_exit(struct phy *phy)
+{
+ struct hsphy_priv *priv = phy_get_drvdata(phy);
+
+ clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
+
+ return 0;
+}
+
+static const struct phy_ops qcom_snps_hsphy_ops = {
+ .init = qcom_snps_hsphy_init,
+ .exit = qcom_snps_hsphy_exit,
+ .power_on = qcom_snps_hsphy_power_on,
+ .power_off = qcom_snps_hsphy_power_off,
+ .set_mode = qcom_snps_hsphy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static const char * const qcom_snps_hsphy_clks[] = {
+ "ref",
+ "ahb",
+ "sleep",
+};
+
+static int qcom_snps_hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct hsphy_priv *priv;
+ struct phy *phy;
+ int ret;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->num_clks = ARRAY_SIZE(qcom_snps_hsphy_clks);
+ priv->clks = devm_kcalloc(dev, priv->num_clks, sizeof(*priv->clks),
+ GFP_KERNEL);
+ if (!priv->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_clks; i++)
+ priv->clks[i].id = qcom_snps_hsphy_clks[i];
+
+ ret = devm_clk_bulk_get(dev, priv->num_clks, priv->clks);
+ if (ret)
+ return ret;
+
+ priv->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(priv->phy_reset))
+ return PTR_ERR(priv->phy_reset);
+
+ priv->por_reset = devm_reset_control_get_exclusive(dev, "por");
+ if (IS_ERR(priv->por_reset))
+ return PTR_ERR(priv->por_reset);
+
+ priv->vregs[VDD].supply = "vdd";
+ priv->vregs[VDDA_1P8].supply = "vdda1p8";
+ priv->vregs[VDDA_3P3].supply = "vdda3p3";
+
+ ret = devm_regulator_bulk_get(dev, VREG_NUM, priv->vregs);
+ if (ret)
+ return ret;
+
+ /* Get device match data */
+ priv->data = device_get_match_data(dev);
+
+ phy = devm_phy_create(dev, dev->of_node, &qcom_snps_hsphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider))
+ return PTR_ERR(provider);
+
+ ret = regulator_set_load(priv->vregs[VDDA_1P8].consumer, 19000);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_set_load(priv->vregs[VDDA_3P3].consumer, 16000);
+ if (ret < 0)
+ goto unset_1p8_load;
+
+ return 0;
+
+unset_1p8_load:
+ regulator_set_load(priv->vregs[VDDA_1P8].consumer, 0);
+
+ return ret;
+}
+
+/*
+ * The macro is used to define an initialization sequence. Each tuple
+ * is meant to program 'value' into phy register at 'offset' with 'delay'
+ * in us followed.
+ */
+#define HSPHY_INIT_CFG(o, v, d) { .offset = o, .val = v, .delay = d, }
+
+static const struct hsphy_init_seq init_seq_femtophy[] = {
+ HSPHY_INIT_CFG(0xc0, 0x01, 0),
+ HSPHY_INIT_CFG(0xe8, 0x0d, 0),
+ HSPHY_INIT_CFG(0x74, 0x12, 0),
+ HSPHY_INIT_CFG(0x98, 0x63, 0),
+ HSPHY_INIT_CFG(0x9c, 0x03, 0),
+ HSPHY_INIT_CFG(0xa0, 0x1d, 0),
+ HSPHY_INIT_CFG(0xa4, 0x03, 0),
+ HSPHY_INIT_CFG(0x8c, 0x23, 0),
+ HSPHY_INIT_CFG(0x78, 0x08, 0),
+ HSPHY_INIT_CFG(0x7c, 0xdc, 0),
+ HSPHY_INIT_CFG(0x90, 0xe0, 20),
+ HSPHY_INIT_CFG(0x74, 0x10, 0),
+ HSPHY_INIT_CFG(0x90, 0x60, 0),
+};
+
+static const struct hsphy_data hsphy_data_femtophy = {
+ .init_seq = init_seq_femtophy,
+ .init_seq_num = ARRAY_SIZE(init_seq_femtophy),
+};
+
+static const struct of_device_id qcom_snps_hsphy_match[] = {
+ { .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match);
+
+static struct platform_driver qcom_snps_hsphy_driver = {
+ .probe = qcom_snps_hsphy_probe,
+ .driver = {
+ .name = "qcom,usb-hs-28nm-phy",
+ .of_match_table = qcom_snps_hsphy_match,
+ },
+};
+module_platform_driver(qcom_snps_hsphy_driver);
+
+MODULE_DESCRIPTION("Qualcomm 28nm Hi-Speed USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-ss.c b/drivers/phy/qualcomm/phy-qcom-usb-ss.c
new file mode 100644
index 000000000000..a3a6d3ce7ea1
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-usb-ss.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2014,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, Linaro Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define PHY_CTRL0 0x6C
+#define PHY_CTRL1 0x70
+#define PHY_CTRL2 0x74
+#define PHY_CTRL4 0x7C
+
+/* PHY_CTRL bits */
+#define REF_PHY_EN BIT(0)
+#define LANE0_PWR_ON BIT(2)
+#define SWI_PCS_CLK_SEL BIT(4)
+#define TST_PWR_DOWN BIT(4)
+#define PHY_RESET BIT(7)
+
+#define NUM_BULK_CLKS 3
+#define NUM_BULK_REGS 2
+
+struct ssphy_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct reset_control *reset_com;
+ struct reset_control *reset_phy;
+ struct regulator_bulk_data regs[NUM_BULK_REGS];
+ struct clk_bulk_data clks[NUM_BULK_CLKS];
+ enum phy_mode mode;
+};
+
+static inline void qcom_ssphy_updatel(void __iomem *addr, u32 mask, u32 val)
+{
+ writel((readl(addr) & ~mask) | val, addr);
+}
+
+static int qcom_ssphy_do_reset(struct ssphy_priv *priv)
+{
+ int ret;
+
+ if (!priv->reset_com) {
+ qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET,
+ PHY_RESET);
+ usleep_range(10, 20);
+ qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET, 0);
+ } else {
+ ret = reset_control_assert(priv->reset_com);
+ if (ret) {
+ dev_err(priv->dev, "Failed to assert reset com\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(priv->reset_phy);
+ if (ret) {
+ dev_err(priv->dev, "Failed to assert reset phy\n");
+ return ret;
+ }
+
+ usleep_range(10, 20);
+
+ ret = reset_control_deassert(priv->reset_com);
+ if (ret) {
+ dev_err(priv->dev, "Failed to deassert reset com\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(priv->reset_phy);
+ if (ret) {
+ dev_err(priv->dev, "Failed to deassert reset phy\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_ssphy_power_on(struct phy *phy)
+{
+ struct ssphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = regulator_bulk_enable(NUM_BULK_REGS, priv->regs);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(NUM_BULK_CLKS, priv->clks);
+ if (ret)
+ goto err_disable_regulator;
+
+ ret = qcom_ssphy_do_reset(priv);
+ if (ret)
+ goto err_disable_clock;
+
+ writeb(SWI_PCS_CLK_SEL, priv->base + PHY_CTRL0);
+ qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, LANE0_PWR_ON);
+ qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, REF_PHY_EN);
+ qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, 0);
+
+ return 0;
+err_disable_clock:
+ clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks);
+err_disable_regulator:
+ regulator_bulk_disable(NUM_BULK_REGS, priv->regs);
+
+ return ret;
+}
+
+static int qcom_ssphy_power_off(struct phy *phy)
+{
+ struct ssphy_priv *priv = phy_get_drvdata(phy);
+
+ qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, 0);
+ qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, 0);
+ qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, TST_PWR_DOWN);
+
+ clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks);
+ regulator_bulk_disable(NUM_BULK_REGS, priv->regs);
+
+ return 0;
+}
+
+static int qcom_ssphy_init_clock(struct ssphy_priv *priv)
+{
+ priv->clks[0].id = "ref";
+ priv->clks[1].id = "ahb";
+ priv->clks[2].id = "pipe";
+
+ return devm_clk_bulk_get(priv->dev, NUM_BULK_CLKS, priv->clks);
+}
+
+static int qcom_ssphy_init_regulator(struct ssphy_priv *priv)
+{
+ int ret;
+
+ priv->regs[0].supply = "vdd";
+ priv->regs[1].supply = "vdda1p8";
+ ret = devm_regulator_bulk_get(priv->dev, NUM_BULK_REGS, priv->regs);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(priv->dev, "Failed to get regulators\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qcom_ssphy_init_reset(struct ssphy_priv *priv)
+{
+ priv->reset_com = devm_reset_control_get_optional_exclusive(priv->dev, "com");
+ if (IS_ERR(priv->reset_com)) {
+ dev_err(priv->dev, "Failed to get reset control com\n");
+ return PTR_ERR(priv->reset_com);
+ }
+
+ if (priv->reset_com) {
+ /* if reset_com is present, reset_phy is no longer optional */
+ priv->reset_phy = devm_reset_control_get_exclusive(priv->dev, "phy");
+ if (IS_ERR(priv->reset_phy)) {
+ dev_err(priv->dev, "Failed to get reset control phy\n");
+ return PTR_ERR(priv->reset_phy);
+ }
+ }
+
+ return 0;
+}
+
+static const struct phy_ops qcom_ssphy_ops = {
+ .power_off = qcom_ssphy_power_off,
+ .power_on = qcom_ssphy_power_on,
+ .owner = THIS_MODULE,
+};
+
+static int qcom_ssphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct ssphy_priv *priv;
+ struct phy *phy;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct ssphy_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->mode = PHY_MODE_INVALID;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = qcom_ssphy_init_clock(priv);
+ if (ret)
+ return ret;
+
+ ret = qcom_ssphy_init_reset(priv);
+ if (ret)
+ return ret;
+
+ ret = qcom_ssphy_init_regulator(priv);
+ if (ret)
+ return ret;
+
+ phy = devm_phy_create(dev, dev->of_node, &qcom_ssphy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Failed to create the SS phy\n");
+ return PTR_ERR(phy);
+ }
+
+ phy_set_drvdata(phy, priv);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id qcom_ssphy_match[] = {
+ { .compatible = "qcom,usb-ss-28nm-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_ssphy_match);
+
+static struct platform_driver qcom_ssphy_driver = {
+ .probe = qcom_ssphy_probe,
+ .driver = {
+ .name = "qcom-usb-ssphy",
+ .of_match_table = qcom_ssphy_match,
+ },
+};
+module_platform_driver(qcom_ssphy_driver);
+
+MODULE_DESCRIPTION("Qualcomm SuperSpeed USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 680cc0c8825c..a84e9f027fc4 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -763,7 +763,7 @@ static void rockchip_chg_detect_work(struct work_struct *work)
/* put the controller in normal mode */
property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
- dev_info(&rport->phy->dev, "charger = %s\n",
+ dev_dbg(&rport->phy->dev, "charger = %s\n",
chg_to_string(rphy->chg_type));
return;
default:
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
index 93ffbd2940fa..e4adab375c73 100644
--- a/drivers/phy/socionext/phy-uniphier-pcie.c
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -19,6 +19,10 @@
#include <linux/resource.h>
/* PHY */
+#define PCL_PHY_CLKCTRL 0x0000
+#define PORT_SEL_MASK GENMASK(11, 9)
+#define PORT_SEL_1 FIELD_PREP(PORT_SEL_MASK, 1)
+
#define PCL_PHY_TEST_I 0x2000
#define PCL_PHY_TEST_O 0x2004
#define TESTI_DAT_MASK GENMASK(13, 6)
@@ -45,13 +49,14 @@
struct uniphier_pciephy_priv {
void __iomem *base;
struct device *dev;
- struct clk *clk;
- struct reset_control *rst;
+ struct clk *clk, *clk_gio;
+ struct reset_control *rst, *rst_gio;
const struct uniphier_pciephy_soc_data *data;
};
struct uniphier_pciephy_soc_data {
- bool has_syscon;
+ bool is_legacy;
+ void (*set_phymode)(struct regmap *regmap);
};
static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv,
@@ -111,16 +116,35 @@ static void uniphier_pciephy_deassert(struct uniphier_pciephy_priv *priv)
static int uniphier_pciephy_init(struct phy *phy)
{
struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
+ u32 val;
int ret;
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
- ret = reset_control_deassert(priv->rst);
+ ret = clk_prepare_enable(priv->clk_gio);
if (ret)
goto out_clk_disable;
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ /* support only 1 port */
+ val = readl(priv->base + PCL_PHY_CLKCTRL);
+ val &= ~PORT_SEL_MASK;
+ val |= PORT_SEL_1;
+ writel(val, priv->base + PCL_PHY_CLKCTRL);
+
+ /* legacy controller doesn't have phy_reset and parameters */
+ if (priv->data->is_legacy)
+ return 0;
+
uniphier_pciephy_set_param(priv, PCL_PHY_R00,
RX_EQ_ADJ_EN, RX_EQ_ADJ_EN);
uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ,
@@ -134,6 +158,10 @@ static int uniphier_pciephy_init(struct phy *phy)
return 0;
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_gio);
out_clk_disable:
clk_disable_unprepare(priv->clk);
@@ -144,8 +172,11 @@ static int uniphier_pciephy_exit(struct phy *phy)
{
struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
- uniphier_pciephy_assert(priv);
+ if (!priv->data->is_legacy)
+ uniphier_pciephy_assert(priv);
+ reset_control_assert(priv->rst_gio);
reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk_gio);
clk_disable_unprepare(priv->clk);
return 0;
@@ -163,7 +194,6 @@ static int uniphier_pciephy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct regmap *regmap;
- struct resource *res;
struct phy *phy;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -176,18 +206,36 @@ static int uniphier_pciephy_probe(struct platform_device *pdev)
priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- priv->rst = devm_reset_control_get_shared(dev, NULL);
- if (IS_ERR(priv->rst))
- return PTR_ERR(priv->rst);
+ if (priv->data->is_legacy) {
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
+
+ priv->rst_gio =
+ devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+
+ priv->clk = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+ } else {
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+ }
phy = devm_phy_create(dev, dev->of_node, &uniphier_pciephy_ops);
if (IS_ERR(phy))
@@ -195,9 +243,8 @@ static int uniphier_pciephy_probe(struct platform_device *pdev)
regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"socionext,syscon");
- if (!IS_ERR(regmap) && priv->data->has_syscon)
- regmap_update_bits(regmap, SG_USBPCIESEL,
- SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE);
+ if (!IS_ERR(regmap) && priv->data->set_phymode)
+ priv->data->set_phymode(regmap);
phy_set_drvdata(phy, priv);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -205,16 +252,31 @@ static int uniphier_pciephy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static void uniphier_pciephy_ld20_setmode(struct regmap *regmap)
+{
+ regmap_update_bits(regmap, SG_USBPCIESEL,
+ SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE);
+}
+
+static const struct uniphier_pciephy_soc_data uniphier_pro5_data = {
+ .is_legacy = true,
+};
+
static const struct uniphier_pciephy_soc_data uniphier_ld20_data = {
- .has_syscon = true,
+ .is_legacy = false,
+ .set_phymode = uniphier_pciephy_ld20_setmode,
};
static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = {
- .has_syscon = false,
+ .is_legacy = false,
};
static const struct of_device_id uniphier_pciephy_match[] = {
{
+ .compatible = "socionext,uniphier-pro5-pcie-phy",
+ .data = &uniphier_pro5_data,
+ },
+ {
.compatible = "socionext,uniphier-ld20-pcie-phy",
.data = &uniphier_ld20_data,
},
diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c
index 50f379fc4e06..a9bc74121f38 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3hs.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c
@@ -41,10 +41,12 @@
#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
+#define RX_CHK_SYNC PHY_F(0, 5, 5) /* RX sync mode */
+#define RX_SYNC_SEL PHY_F(1, 1, 0) /* RX sync length */
#define LS_SLEW PHY_F(10, 6, 6) /* LS mode slew rate */
#define FS_LS_DRV PHY_F(10, 5, 5) /* FS/LS slew rate */
-#define MAX_PHY_PARAMS 2
+#define MAX_PHY_PARAMS 4
struct uniphier_u3hsphy_param {
struct {
@@ -66,13 +68,14 @@ struct uniphier_u3hsphy_trim_param {
struct uniphier_u3hsphy_priv {
struct device *dev;
void __iomem *base;
- struct clk *clk, *clk_parent, *clk_ext;
- struct reset_control *rst, *rst_parent;
+ struct clk *clk, *clk_parent, *clk_ext, *clk_parent_gio;
+ struct reset_control *rst, *rst_parent, *rst_parent_gio;
struct regulator *vbus;
const struct uniphier_u3hsphy_soc_data *data;
};
struct uniphier_u3hsphy_soc_data {
+ bool is_legacy;
int nparams;
const struct uniphier_u3hsphy_param param[MAX_PHY_PARAMS];
u32 config0;
@@ -256,11 +259,20 @@ static int uniphier_u3hsphy_init(struct phy *phy)
if (ret)
return ret;
- ret = reset_control_deassert(priv->rst_parent);
+ ret = clk_prepare_enable(priv->clk_parent_gio);
if (ret)
goto out_clk_disable;
- if (!priv->data->config0 && !priv->data->config1)
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_parent_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ if ((priv->data->is_legacy)
+ || (!priv->data->config0 && !priv->data->config1))
return 0;
config0 = priv->data->config0;
@@ -280,6 +292,8 @@ static int uniphier_u3hsphy_init(struct phy *phy)
out_rst_assert:
reset_control_assert(priv->rst_parent);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_parent_gio);
out_clk_disable:
clk_disable_unprepare(priv->clk_parent);
@@ -290,7 +304,9 @@ static int uniphier_u3hsphy_exit(struct phy *phy)
{
struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+ reset_control_assert(priv->rst_parent_gio);
reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent_gio);
clk_disable_unprepare(priv->clk_parent);
return 0;
@@ -309,7 +325,6 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_u3hsphy_priv *priv;
struct phy_provider *phy_provider;
- struct resource *res;
struct phy *phy;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -322,27 +337,38 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev)
priv->data->nparams > MAX_PHY_PARAMS))
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(dev, "phy");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ if (!priv->data->is_legacy) {
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->clk_ext = devm_clk_get_optional(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext))
+ return PTR_ERR(priv->clk_ext);
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ } else {
+ priv->clk_parent_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_parent_gio))
+ return PTR_ERR(priv->clk_parent_gio);
+
+ priv->rst_parent_gio =
+ devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_parent_gio))
+ return PTR_ERR(priv->rst_parent_gio);
+ }
priv->clk_parent = devm_clk_get(dev, "link");
if (IS_ERR(priv->clk_parent))
return PTR_ERR(priv->clk_parent);
- priv->clk_ext = devm_clk_get_optional(dev, "phy-ext");
- if (IS_ERR(priv->clk_ext))
- return PTR_ERR(priv->clk_ext);
-
- priv->rst = devm_reset_control_get_shared(dev, "phy");
- if (IS_ERR(priv->rst))
- return PTR_ERR(priv->rst);
-
priv->rst_parent = devm_reset_control_get_shared(dev, "link");
if (IS_ERR(priv->rst_parent))
return PTR_ERR(priv->rst_parent);
@@ -364,13 +390,26 @@ static int uniphier_u3hsphy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
-static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = {
+static const struct uniphier_u3hsphy_soc_data uniphier_pro5_data = {
+ .is_legacy = true,
.nparams = 0,
};
-static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = {
+static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = {
+ .is_legacy = false,
.nparams = 2,
.param = {
+ { RX_CHK_SYNC, 1 },
+ { RX_SYNC_SEL, 1 },
+ },
+};
+
+static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = {
+ .is_legacy = false,
+ .nparams = 4,
+ .param = {
+ { RX_CHK_SYNC, 1 },
+ { RX_SYNC_SEL, 1 },
{ LS_SLEW, 1 },
{ FS_LS_DRV, 1 },
},
@@ -380,7 +419,12 @@ static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = {
};
static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = {
- .nparams = 0,
+ .is_legacy = false,
+ .nparams = 2,
+ .param = {
+ { RX_CHK_SYNC, 1 },
+ { RX_SYNC_SEL, 1 },
+ },
.trim_func = uniphier_u3hsphy_trim_ld20,
.config0 = 0x92316680,
.config1 = 0x00000106,
@@ -388,6 +432,10 @@ static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = {
static const struct of_device_id uniphier_u3hsphy_match[] = {
{
+ .compatible = "socionext,uniphier-pro5-usb3-hsphy",
+ .data = &uniphier_pro5_data,
+ },
+ {
.compatible = "socionext,uniphier-pxs2-usb3-hsphy",
.data = &uniphier_pxs2_data,
},
diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
index ec231e40ef2a..6700645bcbe6 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3ss.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
@@ -215,7 +215,6 @@ static int uniphier_u3ssphy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_u3ssphy_priv *priv;
struct phy_provider *phy_provider;
- struct resource *res;
struct phy *phy;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -228,8 +227,7 @@ static int uniphier_u3ssphy_probe(struct platform_device *pdev)
priv->data->nparams > MAX_PHY_PARAMS))
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -315,6 +313,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = {
.data = &uniphier_pro4_data,
},
{
+ .compatible = "socionext,uniphier-pro5-usb3-ssphy",
+ .data = &uniphier_pro4_data,
+ },
+ {
.compatible = "socionext,uniphier-pxs2-usb3-ssphy",
.data = &uniphier_pxs2_data,
},
diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig
index f9817c3ae85f..c591c958f1eb 100644
--- a/drivers/phy/tegra/Kconfig
+++ b/drivers/phy/tegra/Kconfig
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config PHY_TEGRA_XUSB
tristate "NVIDIA Tegra XUSB pad controller driver"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA && USB_SUPPORT
+ select USB_COMMON
+ select USB_CONN_GPIO
+ select USB_PHY
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile
index 320dd389f34d..89b84067cb4c 100644
--- a/drivers/phy/tegra/Makefile
+++ b/drivers/phy/tegra/Makefile
@@ -6,4 +6,5 @@ phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_124_SOC) += xusb-tegra124.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_194_SOC) += xusb-tegra186.o
obj-$(CONFIG_PHY_TEGRA194_P2U) += phy-tegra194-p2u.o
diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c
index 98d84920c676..db56c7fbe60b 100644
--- a/drivers/phy/tegra/xusb-tegra124.c
+++ b/drivers/phy/tegra/xusb-tegra124.c
@@ -1422,6 +1422,8 @@ tegra124_usb2_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra124_usb2_port_ops = {
+ .release = tegra_xusb_usb2_port_release,
+ .remove = tegra_xusb_usb2_port_remove,
.enable = tegra124_usb2_port_enable,
.disable = tegra124_usb2_port_disable,
.map = tegra124_usb2_port_map,
@@ -1443,6 +1445,7 @@ tegra124_ulpi_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra124_ulpi_port_ops = {
+ .release = tegra_xusb_ulpi_port_release,
.enable = tegra124_ulpi_port_enable,
.disable = tegra124_ulpi_port_disable,
.map = tegra124_ulpi_port_map,
@@ -1464,6 +1467,7 @@ tegra124_hsic_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra124_hsic_port_ops = {
+ .release = tegra_xusb_hsic_port_release,
.enable = tegra124_hsic_port_enable,
.disable = tegra124_hsic_port_disable,
.map = tegra124_hsic_port_map,
@@ -1647,6 +1651,8 @@ tegra124_usb3_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra124_usb3_port_ops = {
+ .release = tegra_xusb_usb3_port_release,
+ .remove = tegra_xusb_usb3_port_remove,
.enable = tegra124_usb3_port_enable,
.disable = tegra124_usb3_port_disable,
.map = tegra124_usb3_port_map,
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 84c27394c181..5d64f69b39a9 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -63,6 +63,10 @@
#define SSPX_ELPG_CLAMP_EN(x) BIT(0 + (x) * 3)
#define SSPX_ELPG_CLAMP_EN_EARLY(x) BIT(1 + (x) * 3)
#define SSPX_ELPG_VCORE_DOWN(x) BIT(2 + (x) * 3)
+#define XUSB_PADCTL_SS_PORT_CFG 0x2c
+#define PORTX_SPEED_SUPPORT_SHIFT(x) ((x) * 4)
+#define PORTX_SPEED_SUPPORT_MASK (0x3)
+#define PORT_SPEED_SUPPORT_GEN1 (0x0)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x88 + (x) * 0x40)
#define HS_CURR_LEVEL(x) ((x) & 0x3f)
@@ -301,6 +305,97 @@ static void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
tegra186_utmi_bias_pad_power_off(padctl);
}
+static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+
+ if (status) {
+ value |= VBUS_OVERRIDE;
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ } else {
+ value &= ~VBUS_OVERRIDE;
+ }
+
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+
+ if (status) {
+ if (value & VBUS_OVERRIDE) {
+ value &= ~VBUS_OVERRIDE;
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+ usleep_range(1000, 2000);
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+ }
+
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_GROUNDED;
+ } else {
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ }
+
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port = tegra_xusb_find_usb2_port(padctl,
+ lane->index);
+ int err = 0;
+
+ mutex_lock(&padctl->lock);
+
+ dev_dbg(&port->base.dev, "%s: mode %d", __func__, mode);
+
+ if (mode == PHY_MODE_USB_OTG) {
+ if (submode == USB_ROLE_HOST) {
+ tegra186_xusb_padctl_id_override(padctl, true);
+
+ err = regulator_enable(port->supply);
+ } else if (submode == USB_ROLE_DEVICE) {
+ tegra186_xusb_padctl_vbus_override(padctl, true);
+ } else if (submode == USB_ROLE_NONE) {
+ /*
+ * When port is peripheral only or role transitions to
+ * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
+ * enabled.
+ */
+ if (regulator_is_enabled(port->supply))
+ regulator_disable(port->supply);
+
+ tegra186_xusb_padctl_id_override(padctl, false);
+ tegra186_xusb_padctl_vbus_override(padctl, false);
+ }
+ }
+
+ mutex_unlock(&padctl->lock);
+
+ return err;
+}
+
static int tegra186_utmi_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
@@ -439,6 +534,7 @@ static const struct phy_ops utmi_phy_ops = {
.exit = tegra186_utmi_phy_exit,
.power_on = tegra186_utmi_phy_power_on,
.power_off = tegra186_utmi_phy_power_off,
+ .set_mode = tegra186_utmi_phy_set_mode,
.owner = THIS_MODULE,
};
@@ -503,19 +599,6 @@ static const char * const tegra186_usb2_functions[] = {
"xusb",
};
-static const struct tegra_xusb_lane_soc tegra186_usb2_lanes[] = {
- TEGRA186_LANE("usb2-0", 0, 0, 0, usb2),
- TEGRA186_LANE("usb2-1", 0, 0, 0, usb2),
- TEGRA186_LANE("usb2-2", 0, 0, 0, usb2),
-};
-
-static const struct tegra_xusb_pad_soc tegra186_usb2_pad = {
- .name = "usb2",
- .num_lanes = ARRAY_SIZE(tegra186_usb2_lanes),
- .lanes = tegra186_usb2_lanes,
- .ops = &tegra186_usb2_pad_ops,
-};
-
static int tegra186_usb2_port_enable(struct tegra_xusb_port *port)
{
return 0;
@@ -532,6 +615,8 @@ tegra186_usb2_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra186_usb2_port_ops = {
+ .release = tegra_xusb_usb2_port_release,
+ .remove = tegra_xusb_usb2_port_remove,
.enable = tegra186_usb2_port_enable,
.disable = tegra186_usb2_port_disable,
.map = tegra186_usb2_port_map,
@@ -591,6 +676,8 @@ tegra186_usb3_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra186_usb3_port_ops = {
+ .release = tegra_xusb_usb3_port_release,
+ .remove = tegra_xusb_usb3_port_remove,
.enable = tegra186_usb3_port_enable,
.disable = tegra186_usb3_port_disable,
.map = tegra186_usb3_port_map,
@@ -635,6 +722,15 @@ static int tegra186_usb3_phy_power_on(struct phy *phy)
padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_CAP);
+ if (padctl->soc->supports_gen2 && port->disable_gen2) {
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_CFG);
+ value &= ~(PORTX_SPEED_SUPPORT_MASK <<
+ PORTX_SPEED_SUPPORT_SHIFT(index));
+ value |= (PORT_SPEED_SUPPORT_GEN1 <<
+ PORTX_SPEED_SUPPORT_SHIFT(index));
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_CFG);
+ }
+
value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM_1);
value &= ~SSPX_ELPG_VCORE_DOWN(index);
padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM_1);
@@ -765,27 +861,6 @@ static const char * const tegra186_usb3_functions[] = {
"xusb",
};
-static const struct tegra_xusb_lane_soc tegra186_usb3_lanes[] = {
- TEGRA186_LANE("usb3-0", 0, 0, 0, usb3),
- TEGRA186_LANE("usb3-1", 0, 0, 0, usb3),
- TEGRA186_LANE("usb3-2", 0, 0, 0, usb3),
-};
-
-static const struct tegra_xusb_pad_soc tegra186_usb3_pad = {
- .name = "usb3",
- .num_lanes = ARRAY_SIZE(tegra186_usb3_lanes),
- .lanes = tegra186_usb3_lanes,
- .ops = &tegra186_usb3_pad_ops,
-};
-
-static const struct tegra_xusb_pad_soc * const tegra186_pads[] = {
- &tegra186_usb2_pad,
- &tegra186_usb3_pad,
-#if 0 /* TODO implement */
- &tegra186_hsic_pad,
-#endif
-};
-
static int
tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl)
{
@@ -802,7 +877,9 @@ tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl)
err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
if (err) {
- dev_err(dev, "failed to read calibration fuse: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to read calibration fuse: %d\n",
+ err);
return err;
}
@@ -857,34 +934,13 @@ static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
-static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
- bool status)
-{
- u32 value;
-
- dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
-
- value = padctl_readl(padctl, USB2_VBUS_ID);
-
- if (status) {
- value |= VBUS_OVERRIDE;
- value &= ~ID_OVERRIDE(~0);
- value |= ID_OVERRIDE_FLOATING;
- } else {
- value &= ~VBUS_OVERRIDE;
- }
-
- padctl_writel(padctl, value, USB2_VBUS_ID);
-
- return 0;
-}
-
static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.probe = tegra186_xusb_padctl_probe,
.remove = tegra186_xusb_padctl_remove,
.vbus_override = tegra186_xusb_padctl_vbus_override,
};
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
static const char * const tegra186_xusb_padctl_supply_names[] = {
"avdd-pll-erefeut",
"avdd-usb",
@@ -892,6 +948,40 @@ static const char * const tegra186_xusb_padctl_supply_names[] = {
"vddio-hsic",
};
+static const struct tegra_xusb_lane_soc tegra186_usb2_lanes[] = {
+ TEGRA186_LANE("usb2-0", 0, 0, 0, usb2),
+ TEGRA186_LANE("usb2-1", 0, 0, 0, usb2),
+ TEGRA186_LANE("usb2-2", 0, 0, 0, usb2),
+};
+
+static const struct tegra_xusb_pad_soc tegra186_usb2_pad = {
+ .name = "usb2",
+ .num_lanes = ARRAY_SIZE(tegra186_usb2_lanes),
+ .lanes = tegra186_usb2_lanes,
+ .ops = &tegra186_usb2_pad_ops,
+};
+
+static const struct tegra_xusb_lane_soc tegra186_usb3_lanes[] = {
+ TEGRA186_LANE("usb3-0", 0, 0, 0, usb3),
+ TEGRA186_LANE("usb3-1", 0, 0, 0, usb3),
+ TEGRA186_LANE("usb3-2", 0, 0, 0, usb3),
+};
+
+static const struct tegra_xusb_pad_soc tegra186_usb3_pad = {
+ .name = "usb3",
+ .num_lanes = ARRAY_SIZE(tegra186_usb3_lanes),
+ .lanes = tegra186_usb3_lanes,
+ .ops = &tegra186_usb3_pad_ops,
+};
+
+static const struct tegra_xusb_pad_soc * const tegra186_pads[] = {
+ &tegra186_usb2_pad,
+ &tegra186_usb3_pad,
+#if 0 /* TODO implement */
+ &tegra186_hsic_pad,
+#endif
+};
+
const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = {
.num_pads = ARRAY_SIZE(tegra186_pads),
.pads = tegra186_pads,
@@ -916,6 +1006,67 @@ const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = {
.num_supplies = ARRAY_SIZE(tegra186_xusb_padctl_supply_names),
};
EXPORT_SYMBOL_GPL(tegra186_xusb_padctl_soc);
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+static const char * const tegra194_xusb_padctl_supply_names[] = {
+ "avdd-usb",
+ "vclamp-usb",
+};
+
+static const struct tegra_xusb_lane_soc tegra194_usb2_lanes[] = {
+ TEGRA186_LANE("usb2-0", 0, 0, 0, usb2),
+ TEGRA186_LANE("usb2-1", 0, 0, 0, usb2),
+ TEGRA186_LANE("usb2-2", 0, 0, 0, usb2),
+ TEGRA186_LANE("usb2-3", 0, 0, 0, usb2),
+};
+
+static const struct tegra_xusb_pad_soc tegra194_usb2_pad = {
+ .name = "usb2",
+ .num_lanes = ARRAY_SIZE(tegra194_usb2_lanes),
+ .lanes = tegra194_usb2_lanes,
+ .ops = &tegra186_usb2_pad_ops,
+};
+
+static const struct tegra_xusb_lane_soc tegra194_usb3_lanes[] = {
+ TEGRA186_LANE("usb3-0", 0, 0, 0, usb3),
+ TEGRA186_LANE("usb3-1", 0, 0, 0, usb3),
+ TEGRA186_LANE("usb3-2", 0, 0, 0, usb3),
+ TEGRA186_LANE("usb3-3", 0, 0, 0, usb3),
+};
+
+static const struct tegra_xusb_pad_soc tegra194_usb3_pad = {
+ .name = "usb3",
+ .num_lanes = ARRAY_SIZE(tegra194_usb3_lanes),
+ .lanes = tegra194_usb3_lanes,
+ .ops = &tegra186_usb3_pad_ops,
+};
+
+static const struct tegra_xusb_pad_soc * const tegra194_pads[] = {
+ &tegra194_usb2_pad,
+ &tegra194_usb3_pad,
+};
+
+const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc = {
+ .num_pads = ARRAY_SIZE(tegra194_pads),
+ .pads = tegra194_pads,
+ .ports = {
+ .usb2 = {
+ .ops = &tegra186_usb2_port_ops,
+ .count = 4,
+ },
+ .usb3 = {
+ .ops = &tegra186_usb3_port_ops,
+ .count = 4,
+ },
+ },
+ .ops = &tegra186_xusb_padctl_ops,
+ .supply_names = tegra194_xusb_padctl_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
+ .supports_gen2 = true,
+};
+EXPORT_SYMBOL_GPL(tegra194_xusb_padctl_soc);
+#endif
MODULE_AUTHOR("JC Kuo <jckuo@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra186 XUSB Pad Controller driver");
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
index 394913bb2f20..66bd4613835b 100644
--- a/drivers/phy/tegra/xusb-tegra210.c
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -236,6 +236,7 @@
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT 18
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK 0xf
#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_GROUNDED 0
struct tegra210_xusb_fuse_calibration {
u32 hs_curr_level[4];
@@ -935,6 +936,103 @@ static int tegra210_usb2_phy_exit(struct phy *phy)
return tegra210_xusb_padctl_disable(lane->pad->padctl);
}
+static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
+
+ if (status) {
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
+ } else {
+ value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra210_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
+
+ if (status) {
+ if (value & XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON) {
+ value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
+ usleep_range(1000, 2000);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
+ }
+
+ value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_GROUNDED <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
+ } else {
+ value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra210_usb2_phy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port = tegra_xusb_find_usb2_port(padctl,
+ lane->index);
+ int err = 0;
+
+ mutex_lock(&padctl->lock);
+
+ dev_dbg(&port->base.dev, "%s: mode %d", __func__, mode);
+
+ if (mode == PHY_MODE_USB_OTG) {
+ if (submode == USB_ROLE_HOST) {
+ tegra210_xusb_padctl_id_override(padctl, true);
+
+ err = regulator_enable(port->supply);
+ } else if (submode == USB_ROLE_DEVICE) {
+ tegra210_xusb_padctl_vbus_override(padctl, true);
+ } else if (submode == USB_ROLE_NONE) {
+ /*
+ * When port is peripheral only or role transitions to
+ * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
+ * be enabled.
+ */
+ if (regulator_is_enabled(port->supply))
+ regulator_disable(port->supply);
+
+ tegra210_xusb_padctl_id_override(padctl, false);
+ tegra210_xusb_padctl_vbus_override(padctl, false);
+ }
+ }
+
+ mutex_unlock(&padctl->lock);
+
+ return err;
+}
+
static int tegra210_usb2_phy_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
@@ -1048,9 +1146,11 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
padctl_writel(padctl, value,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
- err = regulator_enable(port->supply);
- if (err)
- return err;
+ if (port->supply && port->mode == USB_DR_MODE_HOST) {
+ err = regulator_enable(port->supply);
+ if (err)
+ return err;
+ }
mutex_lock(&padctl->lock);
@@ -1164,6 +1264,7 @@ static const struct phy_ops tegra210_usb2_phy_ops = {
.exit = tegra210_usb2_phy_exit,
.power_on = tegra210_usb2_phy_power_on,
.power_off = tegra210_usb2_phy_power_off,
+ .set_mode = tegra210_usb2_phy_set_mode,
.owner = THIS_MODULE,
};
@@ -1852,6 +1953,8 @@ tegra210_usb2_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra210_usb2_port_ops = {
+ .release = tegra_xusb_usb2_port_release,
+ .remove = tegra_xusb_usb2_port_remove,
.enable = tegra210_usb2_port_enable,
.disable = tegra210_usb2_port_disable,
.map = tegra210_usb2_port_map,
@@ -1873,6 +1976,7 @@ tegra210_hsic_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra210_hsic_port_ops = {
+ .release = tegra_xusb_hsic_port_release,
.enable = tegra210_hsic_port_enable,
.disable = tegra210_hsic_port_disable,
.map = tegra210_hsic_port_map,
@@ -2018,35 +2122,13 @@ tegra210_usb3_port_map(struct tegra_xusb_port *port)
}
static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
+ .release = tegra_xusb_usb3_port_release,
+ .remove = tegra_xusb_usb3_port_remove,
.enable = tegra210_usb3_port_enable,
.disable = tegra210_usb3_port_disable,
.map = tegra210_usb3_port_map,
};
-static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
- bool status)
-{
- u32 value;
-
- dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
-
- value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
-
- if (status) {
- value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
- value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
- XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
- value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
- XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
- } else {
- value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
- }
-
- padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
-
- return 0;
-}
-
static int tegra210_utmi_port_reset(struct phy *phy)
{
struct tegra_xusb_padctl *padctl;
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index f98ec3922c02..de4a46fe1763 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -66,6 +66,12 @@ static const struct of_device_id tegra_xusb_padctl_of_match[] = {
.data = &tegra186_xusb_padctl_soc,
},
#endif
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+ {
+ .compatible = "nvidia,tegra194-xusb-padctl",
+ .data = &tegra194_xusb_padctl_soc,
+ },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
@@ -501,6 +507,10 @@ tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl, unsigned int index)
static void tegra_xusb_port_release(struct device *dev)
{
+ struct tegra_xusb_port *port = to_tegra_xusb_port(dev);
+
+ if (port->ops->release)
+ port->ops->release(port);
}
static struct device_type tegra_xusb_port_type = {
@@ -541,6 +551,16 @@ unregister:
static void tegra_xusb_port_unregister(struct tegra_xusb_port *port)
{
+ if (!IS_ERR_OR_NULL(port->usb_role_sw)) {
+ of_platform_depopulate(&port->dev);
+ usb_role_switch_unregister(port->usb_role_sw);
+ cancel_work_sync(&port->usb_phy_work);
+ usb_remove_phy(&port->usb_phy);
+ }
+
+ if (port->ops->remove)
+ port->ops->remove(port);
+
device_unregister(&port->dev);
}
@@ -551,11 +571,146 @@ static const char *const modes[] = {
[USB_DR_MODE_OTG] = "otg",
};
+static const char * const usb_roles[] = {
+ [USB_ROLE_NONE] = "none",
+ [USB_ROLE_HOST] = "host",
+ [USB_ROLE_DEVICE] = "device",
+};
+
+static enum usb_phy_events to_usb_phy_event(enum usb_role role)
+{
+ switch (role) {
+ case USB_ROLE_DEVICE:
+ return USB_EVENT_VBUS;
+
+ case USB_ROLE_HOST:
+ return USB_EVENT_ID;
+
+ default:
+ return USB_EVENT_NONE;
+ }
+}
+
+static void tegra_xusb_usb_phy_work(struct work_struct *work)
+{
+ struct tegra_xusb_port *port = container_of(work,
+ struct tegra_xusb_port,
+ usb_phy_work);
+ enum usb_role role = usb_role_switch_get_role(port->usb_role_sw);
+
+ usb_phy_set_event(&port->usb_phy, to_usb_phy_event(role));
+
+ dev_dbg(&port->dev, "%s(): calling notifier for role %s\n", __func__,
+ usb_roles[role]);
+
+ atomic_notifier_call_chain(&port->usb_phy.notifier, 0, &port->usb_phy);
+}
+
+static int tegra_xusb_role_sw_set(struct usb_role_switch *sw,
+ enum usb_role role)
+{
+ struct tegra_xusb_port *port = usb_role_switch_get_drvdata(sw);
+
+ dev_dbg(&port->dev, "%s(): role %s\n", __func__, usb_roles[role]);
+
+ schedule_work(&port->usb_phy_work);
+
+ return 0;
+}
+
+static int tegra_xusb_set_peripheral(struct usb_otg *otg,
+ struct usb_gadget *gadget)
+{
+ struct tegra_xusb_port *port = container_of(otg->usb_phy,
+ struct tegra_xusb_port,
+ usb_phy);
+
+ if (gadget != NULL)
+ schedule_work(&port->usb_phy_work);
+
+ return 0;
+}
+
+static int tegra_xusb_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ struct tegra_xusb_port *port = container_of(otg->usb_phy,
+ struct tegra_xusb_port,
+ usb_phy);
+
+ if (host != NULL)
+ schedule_work(&port->usb_phy_work);
+
+ return 0;
+}
+
+
+static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_lane *lane;
+ struct usb_role_switch_desc role_sx_desc = {
+ .fwnode = dev_fwnode(&port->dev),
+ .set = tegra_xusb_role_sw_set,
+ };
+ int err = 0;
+
+ /*
+ * USB role switch driver needs parent driver owner info. This is a
+ * suboptimal solution. TODO: Need to revisit this in a follow-up patch
+ * where an optimal solution is possible with changes to USB role
+ * switch driver.
+ */
+ port->dev.driver = devm_kzalloc(&port->dev,
+ sizeof(struct device_driver),
+ GFP_KERNEL);
+ port->dev.driver->owner = THIS_MODULE;
+
+ port->usb_role_sw = usb_role_switch_register(&port->dev,
+ &role_sx_desc);
+ if (IS_ERR(port->usb_role_sw)) {
+ err = PTR_ERR(port->usb_role_sw);
+ dev_err(&port->dev, "failed to register USB role switch: %d",
+ err);
+ return err;
+ }
+
+ INIT_WORK(&port->usb_phy_work, tegra_xusb_usb_phy_work);
+ usb_role_switch_set_drvdata(port->usb_role_sw, port);
+
+ port->usb_phy.otg = devm_kzalloc(&port->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!port->usb_phy.otg)
+ return -ENOMEM;
+
+ lane = tegra_xusb_find_lane(port->padctl, "usb2", port->index);
+
+ /*
+ * Assign phy dev to usb-phy dev. Host/device drivers can use phy
+ * reference to retrieve usb-phy details.
+ */
+ port->usb_phy.dev = &lane->pad->lanes[port->index]->dev;
+ port->usb_phy.dev->driver = port->padctl->dev->driver;
+ port->usb_phy.otg->usb_phy = &port->usb_phy;
+ port->usb_phy.otg->set_peripheral = tegra_xusb_set_peripheral;
+ port->usb_phy.otg->set_host = tegra_xusb_set_host;
+
+ err = usb_add_phy_dev(&port->usb_phy);
+ if (err < 0) {
+ dev_err(&port->dev, "Failed to add USB PHY: %d\n", err);
+ return err;
+ }
+
+ /* populate connector entry */
+ of_platform_populate(port->dev.of_node, NULL, NULL, &port->dev);
+
+ return err;
+}
+
static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
{
struct tegra_xusb_port *port = &usb2->base;
struct device_node *np = port->dev.of_node;
const char *mode;
+ int err;
usb2->internal = of_property_read_bool(np, "nvidia,internal");
@@ -572,7 +727,21 @@ static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
usb2->mode = USB_DR_MODE_HOST;
}
- usb2->supply = devm_regulator_get(&port->dev, "vbus");
+ /* usb-role-switch property is mandatory for OTG/Peripheral modes */
+ if (usb2->mode == USB_DR_MODE_PERIPHERAL ||
+ usb2->mode == USB_DR_MODE_OTG) {
+ if (of_property_read_bool(np, "usb-role-switch")) {
+ err = tegra_xusb_setup_usb_role_switch(port);
+ if (err < 0)
+ return err;
+ } else {
+ dev_err(&port->dev, "usb-role-switch not found for %s mode",
+ modes[usb2->mode]);
+ return -EINVAL;
+ }
+ }
+
+ usb2->supply = regulator_get(&port->dev, "vbus");
return PTR_ERR_OR_ZERO(usb2->supply);
}
@@ -591,7 +760,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
if (!np || !of_device_is_available(np))
goto out;
- usb2 = devm_kzalloc(padctl->dev, sizeof(*usb2), GFP_KERNEL);
+ usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
if (!usb2) {
err = -ENOMEM;
goto out;
@@ -622,6 +791,20 @@ out:
return err;
}
+void tegra_xusb_usb2_port_release(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_usb2_port *usb2 = to_usb2_port(port);
+
+ kfree(usb2);
+}
+
+void tegra_xusb_usb2_port_remove(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_usb2_port *usb2 = to_usb2_port(port);
+
+ regulator_put(usb2->supply);
+}
+
static int tegra_xusb_ulpi_port_parse_dt(struct tegra_xusb_ulpi_port *ulpi)
{
struct tegra_xusb_port *port = &ulpi->base;
@@ -643,7 +826,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
if (!np || !of_device_is_available(np))
goto out;
- ulpi = devm_kzalloc(padctl->dev, sizeof(*ulpi), GFP_KERNEL);
+ ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
if (!ulpi) {
err = -ENOMEM;
goto out;
@@ -674,6 +857,13 @@ out:
return err;
}
+void tegra_xusb_ulpi_port_release(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_ulpi_port *ulpi = to_ulpi_port(port);
+
+ kfree(ulpi);
+}
+
static int tegra_xusb_hsic_port_parse_dt(struct tegra_xusb_hsic_port *hsic)
{
/* XXX */
@@ -691,7 +881,7 @@ static int tegra_xusb_add_hsic_port(struct tegra_xusb_padctl *padctl,
if (!np || !of_device_is_available(np))
goto out;
- hsic = devm_kzalloc(padctl->dev, sizeof(*hsic), GFP_KERNEL);
+ hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
if (!hsic) {
err = -ENOMEM;
goto out;
@@ -722,10 +912,18 @@ out:
return err;
}
+void tegra_xusb_hsic_port_release(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_hsic_port *hsic = to_hsic_port(port);
+
+ kfree(hsic);
+}
+
static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
{
struct tegra_xusb_port *port = &usb3->base;
struct device_node *np = port->dev.of_node;
+ enum usb_device_speed maximum_speed;
u32 value;
int err;
@@ -739,7 +937,17 @@ static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
usb3->internal = of_property_read_bool(np, "nvidia,internal");
- usb3->supply = devm_regulator_get(&port->dev, "vbus");
+ if (device_property_present(&port->dev, "maximum-speed")) {
+ maximum_speed = usb_get_maximum_speed(&port->dev);
+ if (maximum_speed == USB_SPEED_SUPER)
+ usb3->disable_gen2 = true;
+ else if (maximum_speed == USB_SPEED_SUPER_PLUS)
+ usb3->disable_gen2 = false;
+ else
+ return -EINVAL;
+ }
+
+ usb3->supply = regulator_get(&port->dev, "vbus");
return PTR_ERR_OR_ZERO(usb3->supply);
}
@@ -759,7 +967,7 @@ static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl,
if (!np || !of_device_is_available(np))
goto out;
- usb3 = devm_kzalloc(padctl->dev, sizeof(*usb3), GFP_KERNEL);
+ usb3 = kzalloc(sizeof(*usb3), GFP_KERNEL);
if (!usb3) {
err = -ENOMEM;
goto out;
@@ -790,6 +998,20 @@ out:
return err;
}
+void tegra_xusb_usb3_port_release(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+
+ kfree(usb3);
+}
+
+void tegra_xusb_usb3_port_remove(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+
+ regulator_put(usb3->supply);
+}
+
static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_port *port, *tmp;
@@ -1001,7 +1223,13 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
err = tegra_xusb_setup_ports(padctl);
if (err) {
- dev_err(&pdev->dev, "failed to setup XUSB ports: %d\n", err);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ dev_fmt("failed to setup XUSB ports: %d\n"), err);
goto remove_pads;
}
@@ -1143,6 +1371,27 @@ int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
}
EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
+int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ unsigned int port)
+{
+ struct tegra_xusb_usb2_port *usb2;
+ struct tegra_xusb_usb3_port *usb3;
+ int i;
+
+ usb2 = tegra_xusb_find_usb2_port(padctl, port);
+ if (!usb2)
+ return -EINVAL;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ usb3 = tegra_xusb_find_usb3_port(padctl, i);
+ if (usb3 && usb3->port == usb2->base.index)
+ return usb3->base.index;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index da94fcce6307..ea35af747066 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -12,6 +12,7 @@
#include <linux/workqueue.h>
#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
/* legacy entry points for backwards-compatibility */
int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev);
@@ -266,9 +267,18 @@ struct tegra_xusb_port {
struct list_head list;
struct device dev;
+ struct usb_role_switch *usb_role_sw;
+ struct work_struct usb_phy_work;
+ struct usb_phy usb_phy;
+
const struct tegra_xusb_port_ops *ops;
};
+static inline struct tegra_xusb_port *to_tegra_xusb_port(struct device *dev)
+{
+ return container_of(dev, struct tegra_xusb_port, dev);
+}
+
struct tegra_xusb_lane_map {
unsigned int port;
const char *type;
@@ -303,6 +313,8 @@ to_usb2_port(struct tegra_xusb_port *port)
struct tegra_xusb_usb2_port *
tegra_xusb_find_usb2_port(struct tegra_xusb_padctl *padctl,
unsigned int index);
+void tegra_xusb_usb2_port_release(struct tegra_xusb_port *port);
+void tegra_xusb_usb2_port_remove(struct tegra_xusb_port *port);
struct tegra_xusb_ulpi_port {
struct tegra_xusb_port base;
@@ -317,6 +329,8 @@ to_ulpi_port(struct tegra_xusb_port *port)
return container_of(port, struct tegra_xusb_ulpi_port, base);
}
+void tegra_xusb_ulpi_port_release(struct tegra_xusb_port *port);
+
struct tegra_xusb_hsic_port {
struct tegra_xusb_port base;
};
@@ -327,12 +341,15 @@ to_hsic_port(struct tegra_xusb_port *port)
return container_of(port, struct tegra_xusb_hsic_port, base);
}
+void tegra_xusb_hsic_port_release(struct tegra_xusb_port *port);
+
struct tegra_xusb_usb3_port {
struct tegra_xusb_port base;
struct regulator *supply;
bool context_saved;
unsigned int port;
bool internal;
+ bool disable_gen2;
u32 tap1;
u32 amp;
@@ -349,8 +366,12 @@ to_usb3_port(struct tegra_xusb_port *port)
struct tegra_xusb_usb3_port *
tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl,
unsigned int index);
+void tegra_xusb_usb3_port_release(struct tegra_xusb_port *port);
+void tegra_xusb_usb3_port_remove(struct tegra_xusb_port *port);
struct tegra_xusb_port_ops {
+ void (*release)(struct tegra_xusb_port *port);
+ void (*remove)(struct tegra_xusb_port *port);
int (*enable)(struct tegra_xusb_port *port);
void (*disable)(struct tegra_xusb_port *port);
struct tegra_xusb_lane *(*map)(struct tegra_xusb_port *port);
@@ -392,6 +413,7 @@ struct tegra_xusb_padctl_soc {
const char * const *supply_names;
unsigned int num_supplies;
+ bool supports_gen2;
bool need_fake_usb3_port;
};
@@ -448,5 +470,8 @@ extern const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc;
#if defined(CONFIG_ARCH_TEGRA_186_SOC)
extern const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc;
#endif
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+extern const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc;
+#endif
#endif /* __PHY_TEGRA_XUSB_H */
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index 6dbe9d0b9ff3..15a3bcf32308 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -106,11 +106,8 @@ config TWL4030_USB
config PHY_TI_GMII_SEL
tristate
- default y if TI_CPSW=y || TI_CPSW_SWITCHDEV=y
- depends on TI_CPSW || TI_CPSW_SWITCHDEV || COMPILE_TEST
select GENERIC_PHY
select REGMAP
- default m
help
This driver supports configuring of the TI CPSW Port mode depending on
the Ethernet PHY connected to the CPSW Port.
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 1c536fc03c83..7edd5c3bc536 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -170,6 +170,21 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = {
.regfields = phy_gmii_sel_fields_am33xx,
};
+static const
+struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = {
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4040, 0, 1),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0),
+ [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0),
+ },
+};
+
+static const
+struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = {
+ .num_ports = 1,
+ .regfields = phy_gmii_sel_fields_am654,
+};
+
static const struct of_device_id phy_gmii_sel_id_table[] = {
{
.compatible = "ti,am3352-phy-gmii-sel",
@@ -187,6 +202,10 @@ static const struct of_device_id phy_gmii_sel_id_table[] = {
.compatible = "ti,dm814-phy-gmii-sel",
.data = &phy_gmii_sel_soc_dm814,
},
+ {
+ .compatible = "ti,am654-phy-gmii-sel",
+ .data = &phy_gmii_sel_soc_am654,
+ },
{}
};
MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index df0ef69dd474..834c59950d1c 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -126,6 +126,18 @@ config PINCTRL_DA850_PUPD
Driver for TI DA850/OMAP-L138/AM18XX pinconf. Used to control
pullup/pulldown pin groups.
+config PINCTRL_DA9062
+ tristate "Dialog Semiconductor DA9062 PMIC pinctrl and GPIO Support"
+ depends on MFD_DA9062
+ select GPIOLIB
+ help
+ The Dialog DA9062 PMIC provides multiple GPIOs that can be muxed for
+ different functions. This driver bundles a pinctrl driver to select the
+ function muxing and a GPIO driver to handle the GPIO when the GPIO
+ function is selected.
+
+ Say yes to enable pinctrl and GPIO support for the DA9062 PMIC.
+
config PINCTRL_DIGICOLOR
bool
depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 879f312bfb75..0b36a1cfca8a 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
+obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index 771d6fd50b45..f579a6593f37 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1125,316 +1125,316 @@ static const struct owl_pingroup s700_groups[] = {
};
static const char * const nor_groups[] = {
- "lcd0_d18",
- "i2s_d0",
- "i2s0_pcm0",
- "i2s1_pcm0",
- "i2s_d1",
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "lcd0_d2",
- "lvds_ee_pn",
- "uart2_rx_tx",
- "spi0_i2c_pcm",
- "lvds_e_pn",
- "sd0_d0",
- "sd0_d1",
- "sd0_d2_d3",
- "sd1_d0_d3",
- "sd0_cmd",
- "sd1_cmd",
- "sens0_ckout",
- "sen0_pclk",
+ "lcd0_d18_mfp",
+ "i2s_d0_mfp",
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "i2s_d1_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lcd0_d2_mfp",
+ "lvds_ee_pn_mfp",
+ "uart2_rx_tx_mfp",
+ "spi0_i2c_pcm_mfp",
+ "lvds_e_pn_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd1_cmd_mfp",
+ "sens0_ckout_mfp",
+ "sen0_pclk_mfp",
};
static const char * const eth_rmii_groups[] = {
- "rgmii_txd23",
- "rgmii_rxd2",
- "rgmii_rxd3",
- "rgmii_txd01",
- "rgmii_txd0",
- "rgmii_txd1",
- "rgmii_txen",
- "rgmii_rxen",
- "rgmii_rxd1",
- "rgmii_rxd0",
- "rgmii_ref_clk",
+ "rgmii_txd23_mfp",
+ "rgmii_rxd2_mfp",
+ "rgmii_rxd3_mfp",
+ "rgmii_txd01_mfp",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
+ "rgmii_txen_mfp",
+ "rgmii_rxen_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
+ "rgmii_ref_clk_mfp",
"eth_smi_dummy",
};
static const char * const eth_smii_groups[] = {
- "rgmii_txd0",
- "rgmii_txd1",
- "rgmii_rxd0",
- "rgmii_rxd1",
- "rgmii_ref_clk",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
+ "rgmii_rxd0_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_ref_clk_mfp",
"eth_smi_dummy",
};
static const char * const spi0_groups[] = {
- "dsi_dn0",
- "dsi_dp2",
- "dsi_dp0",
- "uart2_rx_tx",
- "spi0_i2c_pcm",
- "dsi_dn2",
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp0_mfp",
+ "uart2_rx_tx_mfp",
+ "spi0_i2c_pcm_mfp",
+ "dsi_dn2_mfp",
};
static const char * const spi1_groups[] = {
- "uart0_rx",
- "uart0_tx",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
"i2c0_mfp",
};
static const char * const spi2_groups[] = {
- "rgmii_txd01",
- "rgmii_txd0",
- "rgmii_txd1",
- "rgmii_ref_clk",
- "dnand_acle_ce0",
+ "rgmii_txd01_mfp",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
+ "rgmii_ref_clk_mfp",
+ "dnand_acle_ce0_mfp",
};
static const char * const spi3_groups[] = {
- "rgmii_txen",
- "rgmii_rxen",
- "rgmii_rxd1",
- "rgmii_rxd0",
+ "rgmii_txen_mfp",
+ "rgmii_rxen_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
};
static const char * const sens0_groups[] = {
- "csi_cn_cp",
- "sens0_ckout",
- "csi_dn_dp",
- "sen0_pclk",
+ "csi_cn_cp_mfp",
+ "sens0_ckout_mfp",
+ "csi_dn_dp_mfp",
+ "sen0_pclk_mfp",
};
static const char * const sens1_groups[] = {
- "lcd0_d18",
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "sens0_ckout",
- "pcm1_in",
- "pcm1_clk",
- "pcm1_sync",
- "pcm1_out",
+ "lcd0_d18_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "sens0_ckout_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const uart0_groups[] = {
- "uart2_rtsb",
- "uart2_ctsb",
- "uart0_rx",
- "uart0_tx",
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
};
static const char * const uart1_groups[] = {
- "sd0_d2_d3",
+ "sd0_d2_d3_mfp",
"i2c0_mfp",
};
static const char * const uart2_groups[] = {
- "rgmii_txen",
- "rgmii_rxen",
- "rgmii_rxd1",
- "rgmii_rxd0",
- "dsi_dn0",
- "dsi_dp2",
- "dsi_dp0",
- "uart2_rx_tx",
- "dsi_dn2",
- "uart2_rtsb",
- "uart2_ctsb",
- "sd0_d0",
- "sd0_d1",
- "sd0_d2_d3",
- "uart0_rx",
- "uart0_tx",
+ "rgmii_txen_mfp",
+ "rgmii_rxen_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp0_mfp",
+ "uart2_rx_tx_mfp",
+ "dsi_dn2_mfp",
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
"i2c0_mfp",
"uart2_dummy"
};
static const char * const uart3_groups[] = {
- "rgmii_txd23",
- "rgmii_rxd2",
- "rgmii_rxd3",
- "uart3_rtsb",
- "uart3_ctsb",
+ "rgmii_txd23_mfp",
+ "rgmii_rxd2_mfp",
+ "rgmii_rxd3_mfp",
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
"uart3_dummy"
};
static const char * const uart4_groups[] = {
- "rgmii_txd01",
- "rgmii_ref_clk",
- "ks_out0",
- "ks_out1",
+ "rgmii_txd01_mfp",
+ "rgmii_ref_clk_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
};
static const char * const uart5_groups[] = {
- "rgmii_rxd1",
- "rgmii_rxd0",
- "ks_out0",
- "ks_out2",
- "uart3_rtsb",
- "uart3_ctsb",
- "sd0_d0",
- "sd0_d1",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
+ "ks_out0_mfp",
+ "ks_out2_mfp",
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
};
static const char * const uart6_groups[] = {
- "rgmii_txd0",
- "rgmii_txd1",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
};
static const char * const i2s0_groups[] = {
- "i2s_d0",
- "i2s_pcm1",
- "i2s0_pcm0",
+ "i2s_d0_mfp",
+ "i2s_pcm1_mfp",
+ "i2s0_pcm0_mfp",
};
static const char * const i2s1_groups[] = {
- "i2s1_pcm0",
- "i2s_d1",
+ "i2s1_pcm0_mfp",
+ "i2s_d1_mfp",
"i2s1_dummy",
- "spi0_i2c_pcm",
- "uart0_rx",
- "uart0_tx",
+ "spi0_i2c_pcm_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
};
static const char * const pcm1_groups[] = {
- "i2s_pcm1",
- "spi0_i2c_pcm",
- "uart0_rx",
- "uart0_tx",
- "pcm1_in",
- "pcm1_clk",
- "pcm1_sync",
- "pcm1_out",
+ "i2s_pcm1_mfp",
+ "spi0_i2c_pcm_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const pcm0_groups[] = {
- "i2s0_pcm0",
- "i2s1_pcm0",
- "uart2_rx_tx",
- "spi0_i2c_pcm",
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "uart2_rx_tx_mfp",
+ "spi0_i2c_pcm_mfp",
};
static const char * const ks_groups[] = {
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
};
static const char * const jtag_groups[] = {
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out1",
- "sd0_d0",
- "sd0_d2_d3",
- "sd0_cmd",
- "sd0_clk",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out1_mfp",
+ "sd0_d0_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
};
static const char * const pwm0_groups[] = {
- "rgmii_rxd2",
- "rgmii_txen",
- "ks_in2",
- "sen0_pclk",
+ "rgmii_rxd2_mfp",
+ "rgmii_txen_mfp",
+ "ks_in2_mfp",
+ "sen0_pclk_mfp",
};
static const char * const pwm1_groups[] = {
- "rgmii_rxen",
- "ks_in1",
- "ks_in3",
- "sens0_ckout",
+ "rgmii_rxen_mfp",
+ "ks_in1_mfp",
+ "ks_in3_mfp",
+ "sens0_ckout_mfp",
};
static const char * const pwm2_groups[] = {
- "lcd0_d18",
- "rgmii_rxd3",
- "rgmii_rxd1",
- "ks_out0",
- "ks_out2",
+ "lcd0_d18_mfp",
+ "rgmii_rxd3_mfp",
+ "rgmii_rxd1_mfp",
+ "ks_out0_mfp",
+ "ks_out2_mfp",
};
static const char * const pwm3_groups[] = {
- "rgmii_rxd0",
- "ks_out1",
- "lcd0_d2",
+ "rgmii_rxd0_mfp",
+ "ks_out1_mfp",
+ "lcd0_d2_mfp",
};
static const char * const pwm4_groups[] = {
- "lcd0_d18",
- "rgmii_txd01",
- "rgmii_txd0",
- "ks_in0",
- "pcm1_in",
- "nand_ceb3",
+ "lcd0_d18_mfp",
+ "rgmii_txd01_mfp",
+ "rgmii_txd0_mfp",
+ "ks_in0_mfp",
+ "pcm1_in_mfp",
+ "nand_ceb3_mfp",
};
static const char * const pwm5_groups[] = {
- "rgmii_txd1",
- "ks_in1",
- "pcm1_clk",
- "nand_ceb2",
+ "rgmii_txd1_mfp",
+ "ks_in1_mfp",
+ "pcm1_clk_mfp",
+ "nand_ceb2_mfp",
};
static const char * const p0_groups[] = {
- "ks_in2",
- "ks_in0",
+ "ks_in2_mfp",
+ "ks_in0_mfp",
};
static const char * const sd0_groups[] = {
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "lcd0_d2",
- "dsi_dp3",
- "dsi_dp0",
- "sd0_d0",
- "sd0_d1",
- "sd0_d2_d3",
- "sd1_d0_d3",
- "sd0_cmd",
- "sd0_clk",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lcd0_d2_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dp0_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
};
static const char * const sd1_groups[] = {
- "dsi_dp2",
- "mfp1_16_14",
- "lcd0_d2",
- "mfp1_16_14_d17",
- "dsi_dp3",
- "dsi_dn3",
- "dsi_dnp1_cp_d2",
- "dsi_dnp1_cp_d17",
- "dsi_dn2",
- "sd1_d0_d3",
- "sd1_cmd",
+ "dsi_dp2_mfp",
+ "mfp1_16_14_mfp",
+ "lcd0_d2_mfp",
+ "mfp1_16_14_d17_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "dsi_dnp1_cp_d2_mfp",
+ "dsi_dnp1_cp_d17_mfp",
+ "dsi_dn2_mfp",
+ "sd1_d0_d3_mfp",
+ "sd1_cmd_mfp",
"sd1_dummy",
};
static const char * const sd2_groups[] = {
- "dnand_data_wr",
+ "dnand_data_wr_mfp",
};
static const char * const i2c0_groups[] = {
- "uart0_rx",
- "uart0_tx",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
"i2c0_mfp",
};
@@ -1448,85 +1448,85 @@ static const char * const i2c2_groups[] = {
};
static const char * const i2c3_groups[] = {
- "uart2_rx_tx",
- "pcm1_sync",
- "pcm1_out",
+ "uart2_rx_tx_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const lvds_groups[] = {
- "lvds_o_pn",
- "lvds_ee_pn",
- "lvds_e_pn",
+ "lvds_o_pn_mfp",
+ "lvds_ee_pn_mfp",
+ "lvds_e_pn_mfp",
};
static const char * const bt_groups[] = {
- "i2s_pcm1",
- "i2s0_pcm0",
- "i2s1_pcm0",
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "lvds_o_pn",
- "lvds_ee_pn",
- "pcm1_in",
- "pcm1_clk",
- "pcm1_sync",
- "pcm1_out",
+ "i2s_pcm1_mfp",
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lvds_o_pn_mfp",
+ "lvds_ee_pn_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const lcd0_groups[] = {
- "lcd0_d18",
- "lcd0_d2",
- "mfp1_16_14_d17",
- "lvds_o_pn",
- "dsi_dp3",
- "dsi_dn3",
- "lvds_ee_pn",
- "dsi_dnp1_cp_d2",
- "dsi_dnp1_cp_d17",
- "lvds_e_pn",
+ "lcd0_d18_mfp",
+ "lcd0_d2_mfp",
+ "mfp1_16_14_d17_mfp",
+ "lvds_o_pn_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "lvds_ee_pn_mfp",
+ "dsi_dnp1_cp_d2_mfp",
+ "dsi_dnp1_cp_d17_mfp",
+ "lvds_e_pn_mfp",
};
static const char * const usb30_groups[] = {
- "ks_in1",
+ "ks_in1_mfp",
};
static const char * const clko_25m_groups[] = {
- "clko_25m",
+ "clko_25m_mfp",
};
static const char * const mipi_csi_groups[] = {
- "csi_cn_cp",
- "csi_dn_dp",
+ "csi_cn_cp_mfp",
+ "csi_dn_dp_mfp",
};
static const char * const dsi_groups[] = {
- "dsi_dn0",
- "dsi_dp2",
- "dsi_dp3",
- "dsi_dn3",
- "dsi_dp0",
- "dsi_dnp1_cp_d2",
- "dsi_dnp1_cp_d17",
- "dsi_dn2",
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "dsi_dp0_mfp",
+ "dsi_dnp1_cp_d2_mfp",
+ "dsi_dnp1_cp_d17_mfp",
+ "dsi_dn2_mfp",
"dsi_dummy",
};
static const char * const nand_groups[] = {
- "dnand_data_wr",
- "dnand_acle_ce0",
- "nand_ceb2",
- "nand_ceb3",
+ "dnand_data_wr_mfp",
+ "dnand_acle_ce0_mfp",
+ "nand_ceb2_mfp",
+ "nand_ceb3_mfp",
"nand_dummy",
};
static const char * const spdif_groups[] = {
- "uart0_tx",
+ "uart0_tx_mfp",
};
static const char * const sirq0_groups[] = {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0de1a3a96984..06bd2b70af3c 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -37,12 +37,10 @@
#define MODULE_NAME "pinctrl-bcm2835"
#define BCM2835_NUM_GPIOS 54
+#define BCM2711_NUM_GPIOS 58
#define BCM2835_NUM_BANKS 2
#define BCM2835_NUM_IRQS 3
-#define BCM2835_PIN_BITMAP_SZ \
- DIV_ROUND_UP(BCM2835_NUM_GPIOS, sizeof(unsigned long) * 8)
-
/* GPIO register offsets */
#define GPFSEL0 0x0 /* Function Select */
#define GPSET0 0x1c /* Pin Output Set */
@@ -81,10 +79,11 @@ struct bcm2835_pinctrl {
/* note: locking assumes each bank will have its own unsigned long */
unsigned long enabled_irq_map[BCM2835_NUM_BANKS];
- unsigned int irq_type[BCM2835_NUM_GPIOS];
+ unsigned int irq_type[BCM2711_NUM_GPIOS];
struct pinctrl_dev *pctl_dev;
struct gpio_chip gpio_chip;
+ struct pinctrl_desc pctl_desc;
struct pinctrl_gpio_range gpio_range;
raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
@@ -147,6 +146,10 @@ static struct pinctrl_pin_desc bcm2835_gpio_pins[] = {
BCM2835_GPIO_PIN(51),
BCM2835_GPIO_PIN(52),
BCM2835_GPIO_PIN(53),
+ BCM2835_GPIO_PIN(54),
+ BCM2835_GPIO_PIN(55),
+ BCM2835_GPIO_PIN(56),
+ BCM2835_GPIO_PIN(57),
};
/* one pin per group */
@@ -205,6 +208,10 @@ static const char * const bcm2835_gpio_groups[] = {
"gpio51",
"gpio52",
"gpio53",
+ "gpio54",
+ "gpio55",
+ "gpio56",
+ "gpio57",
};
enum bcm2835_fsel {
@@ -322,7 +329,10 @@ static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offse
if (fsel > BCM2835_FSEL_GPIO_OUT)
return -EINVAL;
- return (fsel == BCM2835_FSEL_GPIO_IN);
+ if (fsel == BCM2835_FSEL_GPIO_IN)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -355,6 +365,22 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.can_sleep = false,
};
+static const struct gpio_chip bcm2711_gpio_chip = {
+ .label = "pinctrl-bcm2711",
+ .owner = THIS_MODULE,
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .direction_input = bcm2835_gpio_direction_input,
+ .direction_output = bcm2835_gpio_direction_output,
+ .get_direction = bcm2835_gpio_get_direction,
+ .get = bcm2835_gpio_get,
+ .set = bcm2835_gpio_set,
+ .set_config = gpiochip_generic_config,
+ .base = -1,
+ .ngpio = BCM2711_NUM_GPIOS,
+ .can_sleep = false,
+};
+
static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
unsigned int bank, u32 mask)
{
@@ -401,7 +427,7 @@ static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
bcm2835_gpio_irq_handle_bank(pc, 0, 0xf0000000);
bcm2835_gpio_irq_handle_bank(pc, 1, 0x00003fff);
break;
- case 2: /* IRQ2 covers GPIOs 46-53 */
+ case 2: /* IRQ2 covers GPIOs 46-57 */
bcm2835_gpio_irq_handle_bank(pc, 1, 0x003fc000);
break;
}
@@ -620,7 +646,7 @@ static struct irq_chip bcm2835_gpio_irq_chip = {
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
{
- return ARRAY_SIZE(bcm2835_gpio_groups);
+ return BCM2835_NUM_GPIOS;
}
static const char *bcm2835_pctl_get_group_name(struct pinctrl_dev *pctldev,
@@ -778,7 +804,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
err = of_property_read_u32_index(np, "brcm,pins", i, &pin);
if (err)
goto out;
- if (pin >= ARRAY_SIZE(bcm2835_gpio_pins)) {
+ if (pin >= pc->pctl_desc.npins) {
dev_err(pc->dev, "%pOF: invalid brcm,pins value %d\n",
np, pin);
err = -EINVAL;
@@ -854,7 +880,7 @@ static int bcm2835_pmx_get_function_groups(struct pinctrl_dev *pctldev,
{
/* every pin can do every function */
*groups = bcm2835_gpio_groups;
- *num_groups = ARRAY_SIZE(bcm2835_gpio_groups);
+ *num_groups = BCM2835_NUM_GPIOS;
return 0;
}
@@ -1054,29 +1080,62 @@ static const struct pinconf_ops bcm2711_pinconf_ops = {
.pin_config_set = bcm2711_pinconf_set,
};
-static struct pinctrl_desc bcm2835_pinctrl_desc = {
+static const struct pinctrl_desc bcm2835_pinctrl_desc = {
.name = MODULE_NAME,
.pins = bcm2835_gpio_pins,
- .npins = ARRAY_SIZE(bcm2835_gpio_pins),
+ .npins = BCM2835_NUM_GPIOS,
.pctlops = &bcm2835_pctl_ops,
.pmxops = &bcm2835_pmx_ops,
.confops = &bcm2835_pinconf_ops,
.owner = THIS_MODULE,
};
-static struct pinctrl_gpio_range bcm2835_pinctrl_gpio_range = {
+static const struct pinctrl_desc bcm2711_pinctrl_desc = {
+ .name = "pinctrl-bcm2711",
+ .pins = bcm2835_gpio_pins,
+ .npins = BCM2711_NUM_GPIOS,
+ .pctlops = &bcm2835_pctl_ops,
+ .pmxops = &bcm2835_pmx_ops,
+ .confops = &bcm2711_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct pinctrl_gpio_range bcm2835_pinctrl_gpio_range = {
.name = MODULE_NAME,
.npins = BCM2835_NUM_GPIOS,
};
+static const struct pinctrl_gpio_range bcm2711_pinctrl_gpio_range = {
+ .name = "pinctrl-bcm2711",
+ .npins = BCM2711_NUM_GPIOS,
+};
+
+struct bcm_plat_data {
+ const struct gpio_chip *gpio_chip;
+ const struct pinctrl_desc *pctl_desc;
+ const struct pinctrl_gpio_range *gpio_range;
+};
+
+static const struct bcm_plat_data bcm2835_plat_data = {
+ .gpio_chip = &bcm2835_gpio_chip,
+ .pctl_desc = &bcm2835_pinctrl_desc,
+ .gpio_range = &bcm2835_pinctrl_gpio_range,
+};
+
+static const struct bcm_plat_data bcm2711_plat_data = {
+ .gpio_chip = &bcm2711_gpio_chip,
+ .pctl_desc = &bcm2711_pinctrl_desc,
+ .gpio_range = &bcm2711_pinctrl_gpio_range,
+};
+
static const struct of_device_id bcm2835_pinctrl_match[] = {
{
.compatible = "brcm,bcm2835-gpio",
- .data = &bcm2835_pinconf_ops,
+ .data = &bcm2835_plat_data,
},
{
.compatible = "brcm,bcm2711-gpio",
- .data = &bcm2711_pinconf_ops,
+ .data = &bcm2711_plat_data,
},
{}
};
@@ -1085,14 +1144,15 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ const struct bcm_plat_data *pdata;
struct bcm2835_pinctrl *pc;
struct gpio_irq_chip *girq;
struct resource iomem;
int err, i;
const struct of_device_id *match;
- BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_pins) != BCM2835_NUM_GPIOS);
- BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_groups) != BCM2835_NUM_GPIOS);
+ BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_pins) != BCM2711_NUM_GPIOS);
+ BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_groups) != BCM2711_NUM_GPIOS);
pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
@@ -1111,7 +1171,13 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
- pc->gpio_chip = bcm2835_gpio_chip;
+ match = of_match_node(bcm2835_pinctrl_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+
+ pdata = match->data;
+
+ pc->gpio_chip = *pdata->gpio_chip;
pc->gpio_chip.parent = dev;
pc->gpio_chip.of_node = np;
@@ -1162,19 +1228,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
return err;
}
- match = of_match_node(bcm2835_pinctrl_match, pdev->dev.of_node);
- if (match) {
- bcm2835_pinctrl_desc.confops =
- (const struct pinconf_ops *)match->data;
- }
-
- pc->pctl_dev = devm_pinctrl_register(dev, &bcm2835_pinctrl_desc, pc);
+ pc->pctl_desc = *pdata->pctl_desc;
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
if (IS_ERR(pc->pctl_dev)) {
gpiochip_remove(&pc->gpio_chip);
return PTR_ERR(pc->pctl_dev);
}
- pc->gpio_range = bcm2835_pinctrl_gpio_range;
+ pc->gpio_range = *pdata->gpio_range;
pc->gpio_range.base = pc->gpio_chip.base;
pc->gpio_range.gc = &pc->gpio_chip;
pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 25166217c3e0..a38f0d5f47ce 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -363,7 +363,10 @@ static int iproc_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
unsigned int offset = IPROC_GPIO_REG(gpio, IPROC_GPIO_OUT_EN_OFFSET);
unsigned int shift = IPROC_GPIO_SHIFT(gpio);
- return !(readl(chip->base + offset) & BIT(shift));
+ if (readl(chip->base + offset) & BIT(shift))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index f23c55e22195..821242bb4b16 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -176,6 +176,7 @@ const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin)
return desc->name;
}
+EXPORT_SYMBOL_GPL(pin_get_name);
/* Deletes a range of pin descriptors */
static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 9357f7c46cf3..c6fe7d64c913 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -103,6 +103,7 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
{
return get_pinctrl_dev_from_of_node(np);
}
+EXPORT_SYMBOL_GPL(of_pinctrl_get);
static int dt_to_map_one_config(struct pinctrl *p,
struct pinctrl_dev *hog_pctldev,
@@ -127,11 +128,12 @@ static int dt_to_map_one_config(struct pinctrl *p,
np_pctldev = of_get_next_parent(np_pctldev);
if (!np_pctldev || of_node_is_root(np_pctldev)) {
of_node_put(np_pctldev);
+ ret = driver_deferred_probe_check_state(p->dev);
/* keep deferring if modules are enabled unless we've timed out */
- if (IS_ENABLED(CONFIG_MODULES) && !allow_default)
- return driver_deferred_probe_check_state_continue(p->dev);
-
- return driver_deferred_probe_check_state(p->dev);
+ if (IS_ENABLED(CONFIG_MODULES) && !allow_default &&
+ (ret == -ENODEV))
+ ret = -EPROBE_DEFER;
+ return ret;
}
/* If we're creating a hog we can use the passed pctldev */
if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index de775a85a51e..c784663b00ad 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -125,28 +125,28 @@ config PINCTRL_IMX7ULP
config PINCTRL_IMX8MM
bool "IMX8MM pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mm pinctrl driver
config PINCTRL_IMX8MN
bool "IMX8MN pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mn pinctrl driver
config PINCTRL_IMX8MP
bool "IMX8MP pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mp pinctrl driver
config PINCTRL_IMX8MQ
bool "IMX8MQ pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mq pinctrl driver
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index b409642f168d..9b821c9cbd16 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1286,6 +1286,7 @@ static const struct gpio_chip byt_gpio_chip = {
.direction_output = byt_gpio_direction_output,
.get = byt_gpio_get,
.set = byt_gpio_set,
+ .set_config = gpiochip_generic_config,
.dbg_show = byt_gpio_dbg_show,
};
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 4c74fdde576d..1093a6105d40 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1479,11 +1479,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
+ unsigned long flags;
u32 intr_line;
chained_irq_enter(chip, desc);
+ raw_spin_lock_irqsave(&chv_lock, flags);
pending = readl(pctrl->regs + CHV_INTSTAT);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
unsigned int irq, offset;
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 330c8f077b73..4d7a86a5a37b 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -15,17 +15,18 @@
#include "pinctrl-intel.h"
-#define SPT_PAD_OWN 0x020
-#define SPT_PADCFGLOCK 0x0a0
-#define SPT_HOSTSW_OWN 0x0d0
-#define SPT_GPI_IS 0x100
-#define SPT_GPI_IE 0x120
+#define SPT_PAD_OWN 0x020
+#define SPT_H_PADCFGLOCK 0x090
+#define SPT_LP_PADCFGLOCK 0x0a0
+#define SPT_HOSTSW_OWN 0x0d0
+#define SPT_GPI_IS 0x100
+#define SPT_GPI_IE 0x120
#define SPT_COMMUNITY(b, s, e) \
{ \
.barno = (b), \
.padown_offset = SPT_PAD_OWN, \
- .padcfglock_offset = SPT_PADCFGLOCK, \
+ .padcfglock_offset = SPT_LP_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
.is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
@@ -47,7 +48,7 @@
{ \
.barno = (b), \
.padown_offset = SPT_PAD_OWN, \
- .padcfglock_offset = SPT_PADCFGLOCK, \
+ .padcfglock_offset = SPT_H_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
.is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
index 32451e8693be..905dae8c3fd8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6765.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
@@ -1070,15 +1070,12 @@ static const struct mtk_pin_soc mt6765_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt6765),
.eint_hw = &mt6765_eint_hw,
.gpio_m = 0,
- .ies_present = true,
.base_names = mt6765_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt6765_pinctrl_register_base_names),
- .bias_disable_set = mtk_pinconf_bias_disable_set,
- .bias_disable_get = mtk_pinconf_bias_disable_get,
- .bias_set = mtk_pinconf_bias_set,
- .bias_get = mtk_pinconf_bias_get,
- .drive_set = mtk_pinconf_drive_set_rev1,
- .drive_get = mtk_pinconf_drive_get_rev1,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_raw,
+ .drive_get = mtk_pinconf_drive_get_raw,
.adv_pull_get = mtk_pinconf_adv_pull_get,
.adv_pull_set = mtk_pinconf_adv_pull_set,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
index 9a74d5025be6..60318339b618 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8183.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
@@ -554,13 +554,10 @@ static const struct mtk_pin_soc mt8183_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt8183),
.eint_hw = &mt8183_eint_hw,
.gpio_m = 0,
- .ies_present = true,
.base_names = mt8183_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt8183_pinctrl_register_base_names),
- .bias_disable_set = mtk_pinconf_bias_disable_set_rev1,
- .bias_disable_get = mtk_pinconf_bias_disable_get_rev1,
- .bias_set = mtk_pinconf_bias_set_rev1,
- .bias_get = mtk_pinconf_bias_get_rev1,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
.drive_set = mtk_pinconf_drive_set_rev1,
.drive_get = mtk_pinconf_drive_get_rev1,
.adv_pull_get = mtk_pinconf_adv_pull_get,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 20e1c890e73b..d3169a87e1b3 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -6,6 +6,7 @@
*
*/
+#include <dt-bindings/pinctrl/mt65xx.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
@@ -66,34 +67,44 @@ static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
int field, struct mtk_pin_field *pfd)
{
- const struct mtk_pin_field_calc *c, *e;
+ const struct mtk_pin_field_calc *c;
const struct mtk_pin_reg_calc *rc;
+ int start = 0, end, check;
+ bool found = false;
u32 bits;
if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) {
rc = &hw->soc->reg_cal[field];
} else {
dev_dbg(hw->dev,
- "Not support field %d for pin %d (%s)\n",
- field, desc->number, desc->name);
+ "Not support field %d for this soc\n", field);
return -ENOTSUPP;
}
- c = rc->range;
- e = c + rc->nranges;
+ end = rc->nranges - 1;
- while (c < e) {
- if (desc->number >= c->s_pin && desc->number <= c->e_pin)
+ while (start <= end) {
+ check = (start + end) >> 1;
+ if (desc->number >= rc->range[check].s_pin
+ && desc->number <= rc->range[check].e_pin) {
+ found = true;
+ break;
+ } else if (start == end)
break;
- c++;
+ else if (desc->number < rc->range[check].s_pin)
+ end = check - 1;
+ else
+ start = check + 1;
}
- if (c >= e) {
+ if (!found) {
dev_dbg(hw->dev, "Not support field %d for pin = %d (%s)\n",
field, desc->number, desc->name);
return -ENOTSUPP;
}
+ c = rc->range + check;
+
if (c->i_base > hw->nbase - 1) {
dev_err(hw->dev,
"Invalid base for field %d for pin = %d (%s)\n",
@@ -182,6 +193,9 @@ int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
if (err)
return err;
+ if (value < 0 || value > pf.mask)
+ return -EINVAL;
+
if (!pf.next)
mtk_rmw(hw, pf.index, pf.offset, pf.mask << pf.bitpos,
(value & pf.mask) << pf.bitpos);
@@ -502,6 +516,226 @@ int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+/* Combo for the following pull register type:
+ * 1. PU + PD
+ * 2. PULLSEL + PULLEN
+ * 3. PUPD + R0 + R1
+ */
+static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err, pu, pd;
+
+ if (arg == MTK_DISABLE) {
+ pu = 0;
+ pd = 0;
+ } else if ((arg == MTK_ENABLE) && pullup) {
+ pu = 1;
+ pd = 0;
+ } else if ((arg == MTK_ENABLE) && !pullup) {
+ pu = 0;
+ pd = 1;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_set_pullsel_pullen(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err, enable;
+
+ if (arg == MTK_DISABLE)
+ enable = 0;
+ else if (arg == MTK_ENABLE)
+ enable = 1;
+ else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN, enable);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, pullup);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_set_pupd_r1_r0(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err, r0, r1;
+
+ if ((arg == MTK_DISABLE) || (arg == MTK_PUPD_SET_R1R0_00)) {
+ pullup = 0;
+ r0 = 0;
+ r1 = 0;
+ } else if (arg == MTK_PUPD_SET_R1R0_01) {
+ r0 = 1;
+ r1 = 0;
+ } else if (arg == MTK_PUPD_SET_R1R0_10) {
+ r0 = 0;
+ r1 = 1;
+ } else if (arg == MTK_PUPD_SET_R1R0_11) {
+ r0 = 1;
+ r1 = 1;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* MTK HW PUPD bit: 1 for pull-down, 0 for pull-up */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PUPD, !pullup);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R0, r0);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R1, r1);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_get_pu_pd(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err, pu, pd;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PU, &pu);
+ if (err)
+ goto out;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd);
+ if (err)
+ goto out;
+
+ if (pu == 0 && pd == 0) {
+ *pullup = 0;
+ *enable = MTK_DISABLE;
+ } else if (pu == 1 && pd == 0) {
+ *pullup = 1;
+ *enable = MTK_ENABLE;
+ } else if (pu == 0 && pd == 1) {
+ *pullup = 0;
+ *enable = MTK_ENABLE;
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_get_pullsel_pullen(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, pullup);
+ if (err)
+ goto out;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, enable);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_get_pupd_r1_r0(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err, r0, r1;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PUPD, pullup);
+ if (err)
+ goto out;
+ /* MTK HW PUPD bit: 1 for pull-down, 0 for pull-up */
+ *pullup = !(*pullup);
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R0, &r0);
+ if (err)
+ goto out;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R1, &r1);
+ if (err)
+ goto out;
+
+ if ((r1 == 0) && (r0 == 0))
+ *enable = MTK_PUPD_SET_R1R0_00;
+ else if ((r1 == 0) && (r0 == 1))
+ *enable = MTK_PUPD_SET_R1R0_01;
+ else if ((r1 == 1) && (r0 == 0))
+ *enable = MTK_PUPD_SET_R1R0_10;
+ else if ((r1 == 1) && (r0 == 1))
+ *enable = MTK_PUPD_SET_R1R0_11;
+ else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err;
+
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc, pullup, arg);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_set_pupd_r1_r0(hw, desc, pullup, arg);
+
+out:
+ return err;
+}
+
+int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err;
+
+ err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc, pullup, enable);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_get_pupd_r1_r0(hw, desc, pullup, enable);
+
+out:
+ return err;
+}
+
/* Revision 0 */
int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg)
@@ -593,6 +827,18 @@ int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+int mtk_pinconf_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg)
+{
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV, arg);
+}
+
+int mtk_pinconf_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val)
+{
+ return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, val);
+}
+
int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
u32 arg)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 1b7da42aa1d5..27df08736396 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -216,6 +216,11 @@ struct mtk_pin_soc {
int (*bias_get)(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup, int *res);
+ int (*bias_set_combo)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 pullup, u32 arg);
+ int (*bias_get_combo)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *pullup, u32 *arg);
+
int (*drive_set)(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg);
int (*drive_get)(struct mtk_pinctrl *hw,
@@ -277,6 +282,12 @@ int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
int *res);
+int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 enable);
+int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable);
int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg);
@@ -288,6 +299,11 @@ int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val);
+int mtk_pinconf_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val);
+
int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
u32 arg);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 67f8444f7a0c..a02ad10ec6fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -804,7 +804,10 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
pctl->devdata->spec_dir_set(&reg_addr, offset);
regmap_read(pctl->regmap1, reg_addr, &read_val);
- return !(read_val & bit);
+ if (read_val & bit)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 923264d0e9ef..ee305f140400 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -78,123 +78,112 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
{
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
u32 param = pinconf_to_config_param(*config);
- int val, val2, err, reg, ret = 1;
+ int pullup, err, reg, ret = 1;
const struct mtk_pin_desc *desc;
+ if (pin >= hw->soc->npins) {
+ err = -EINVAL;
+ goto out;
+ }
desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- if (hw->soc->bias_disable_get) {
- err = hw->soc->bias_disable_get(hw, desc, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
- break;
case PIN_CONFIG_BIAS_PULL_UP:
- if (hw->soc->bias_get) {
- err = hw->soc->bias_get(hw, desc, 1, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
- break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- if (hw->soc->bias_get) {
- err = hw->soc->bias_get(hw, desc, 0, &ret);
+ if (hw->soc->bias_get_combo) {
+ err = hw->soc->bias_get_combo(hw, desc, &pullup, &ret);
if (err)
- return err;
+ goto out;
+ if (param == PIN_CONFIG_BIAS_DISABLE) {
+ if (ret == MTK_PUPD_SET_R1R0_00)
+ ret = MTK_DISABLE;
+ } else if (param == PIN_CONFIG_BIAS_PULL_UP) {
+ /* When desire to get pull-up value, return
+ * error if current setting is pull-down
+ */
+ if (!pullup)
+ err = -EINVAL;
+ } else if (param == PIN_CONFIG_BIAS_PULL_DOWN) {
+ /* When desire to get pull-down value, return
+ * error if current setting is pull-up
+ */
+ if (pullup)
+ err = -EINVAL;
+ }
} else {
- return -ENOTSUPP;
+ err = -ENOTSUPP;
}
break;
case PIN_CONFIG_SLEW_RATE:
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &val);
- if (err)
- return err;
-
- if (!val)
- return -EINVAL;
-
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
break;
case PIN_CONFIG_INPUT_ENABLE:
case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
- return err;
-
- /* HW takes input mode as zero; output mode as non-zero */
- if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
- (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
- return -EINVAL;
+ goto out;
+ /* CONFIG Current direction return value
+ * ------------- ----------------- ----------------------
+ * OUTPUT_ENABLE output 1 (= HW value)
+ * input 0 (= HW value)
+ * INPUT_ENABLE output 0 (= reverse HW value)
+ * input 1 (= reverse HW value)
+ */
+ if (param == PIN_CONFIG_INPUT_ENABLE)
+ ret = !ret;
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
- if (err)
- return err;
-
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &val2);
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
- return err;
+ goto out;
+ /* return error when in output mode
+ * because schmitt trigger only work in input mode
+ */
+ if (ret) {
+ err = -EINVAL;
+ goto out;
+ }
- if (val || !val2)
- return -EINVAL;
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- if (hw->soc->drive_get) {
+ if (hw->soc->drive_get)
err = hw->soc->drive_get(hw, desc, &ret);
- if (err)
- return err;
- } else {
+ else
err = -ENOTSUPP;
- }
break;
case MTK_PIN_CONFIG_TDSEL:
case MTK_PIN_CONFIG_RDSEL:
reg = (param == MTK_PIN_CONFIG_TDSEL) ?
PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
- err = mtk_hw_get_value(hw, desc, reg, &val);
- if (err)
- return err;
-
- ret = val;
-
+ err = mtk_hw_get_value(hw, desc, reg, &ret);
break;
case MTK_PIN_CONFIG_PU_ADV:
case MTK_PIN_CONFIG_PD_ADV:
if (hw->soc->adv_pull_get) {
- bool pullup;
-
pullup = param == MTK_PIN_CONFIG_PU_ADV;
err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ } else
+ err = -ENOTSUPP;
break;
case MTK_PIN_CONFIG_DRV_ADV:
- if (hw->soc->adv_drive_get) {
+ if (hw->soc->adv_drive_get)
err = hw->soc->adv_drive_get(hw, desc, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ else
+ err = -ENOTSUPP;
break;
default:
- return -ENOTSUPP;
+ err = -ENOTSUPP;
}
- *config = pinconf_to_config_packed(param, ret);
+out:
+ if (!err)
+ *config = pinconf_to_config_packed(param, ret);
- return 0;
+ return err;
}
static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
@@ -206,64 +195,55 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
int err = 0;
u32 reg;
+ if (pin >= hw->soc->npins) {
+ err = -EINVAL;
+ goto err;
+ }
desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
switch ((u32)param) {
case PIN_CONFIG_BIAS_DISABLE:
- if (hw->soc->bias_disable_set) {
- err = hw->soc->bias_disable_set(hw, desc);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ if (hw->soc->bias_set_combo)
+ err = hw->soc->bias_set_combo(hw, desc, 0, MTK_DISABLE);
+ else
+ err = -ENOTSUPP;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- if (hw->soc->bias_set) {
- err = hw->soc->bias_set(hw, desc, 1);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ if (hw->soc->bias_set_combo)
+ err = hw->soc->bias_set_combo(hw, desc, 1, arg);
+ else
+ err = -ENOTSUPP;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- if (hw->soc->bias_set) {
- err = hw->soc->bias_set(hw, desc, 0);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ if (hw->soc->bias_set_combo)
+ err = hw->soc->bias_set_combo(hw, desc, 0, arg);
+ else
+ err = -ENOTSUPP;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
MTK_DISABLE);
- if (err)
+ /* Keep set direction to consider the case that a GPIO pin
+ * does not have SMT control
+ */
+ if (err != -ENOTSUPP)
goto err;
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
MTK_OUTPUT);
- if (err)
- goto err;
break;
case PIN_CONFIG_INPUT_ENABLE:
- if (hw->soc->ies_present) {
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES,
- MTK_ENABLE);
- }
+ /* regard all non-zero value as enable */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
+ if (err)
+ goto err;
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
MTK_INPUT);
- if (err)
- goto err;
break;
case PIN_CONFIG_SLEW_RATE:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR,
- arg);
- if (err)
- goto err;
-
+ /* regard all non-zero value as enable */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR, !!arg);
break;
case PIN_CONFIG_OUTPUT:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
@@ -273,41 +253,29 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
arg);
- if (err)
- goto err;
break;
+ case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
/* arg = 1: Input mode & SMT enable ;
* arg = 0: Output mode & SMT disable
*/
- arg = arg ? 2 : 1;
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- arg & 1);
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, !arg);
if (err)
goto err;
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
- !!(arg & 2));
- if (err)
- goto err;
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, !!arg);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- if (hw->soc->drive_set) {
+ if (hw->soc->drive_set)
err = hw->soc->drive_set(hw, desc, arg);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ else
+ err = -ENOTSUPP;
break;
case MTK_PIN_CONFIG_TDSEL:
case MTK_PIN_CONFIG_RDSEL:
reg = (param == MTK_PIN_CONFIG_TDSEL) ?
PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
err = mtk_hw_set_value(hw, desc, reg, arg);
- if (err)
- goto err;
break;
case MTK_PIN_CONFIG_PU_ADV:
case MTK_PIN_CONFIG_PD_ADV:
@@ -317,20 +285,14 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
pullup = param == MTK_PIN_CONFIG_PU_ADV;
err = hw->soc->adv_pull_set(hw, desc, pullup,
arg);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ } else
+ err = -ENOTSUPP;
break;
case MTK_PIN_CONFIG_DRV_ADV:
- if (hw->soc->adv_drive_set) {
+ if (hw->soc->adv_drive_set)
err = hw->soc->adv_drive_set(hw, desc, arg);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ else
+ err = -ENOTSUPP;
break;
default:
err = -ENOTSUPP;
@@ -575,12 +537,120 @@ static int mtk_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
+static int mtk_hw_get_value_wrap(struct mtk_pinctrl *hw, unsigned int gpio, int field)
+{
+ const struct mtk_pin_desc *desc;
+ int value, err;
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+ err = mtk_hw_get_value(hw, desc, field, &value);
+ if (err)
+ return err;
+
+ return value;
+}
+
+#define mtk_pctrl_get_pinmux(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_MODE)
+
+#define mtk_pctrl_get_direction(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DIR)
+
+#define mtk_pctrl_get_out(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DO)
+
+#define mtk_pctrl_get_in(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DI)
+
+#define mtk_pctrl_get_smt(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_SMT)
+
+#define mtk_pctrl_get_ies(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_IES)
+
+#define mtk_pctrl_get_driving(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DRV)
+
+ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
+ unsigned int gpio, char *buf, unsigned int bufLen)
+{
+ int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1;
+ const struct mtk_pin_desc *desc;
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+ pinmux = mtk_pctrl_get_pinmux(hw, gpio);
+ if (pinmux >= hw->soc->nfuncs)
+ pinmux -= hw->soc->nfuncs;
+
+ mtk_pinconf_bias_get_combo(hw, desc, &pullup, &pullen);
+ if (pullen == MTK_PUPD_SET_R1R0_00) {
+ pullen = 0;
+ r1 = 0;
+ r0 = 0;
+ } else if (pullen == MTK_PUPD_SET_R1R0_01) {
+ pullen = 1;
+ r1 = 0;
+ r0 = 1;
+ } else if (pullen == MTK_PUPD_SET_R1R0_10) {
+ pullen = 1;
+ r1 = 1;
+ r0 = 0;
+ } else if (pullen == MTK_PUPD_SET_R1R0_11) {
+ pullen = 1;
+ r1 = 1;
+ r0 = 1;
+ } else if (pullen != MTK_DISABLE && pullen != MTK_ENABLE) {
+ pullen = 0;
+ }
+ len += scnprintf(buf + len, bufLen - len,
+ "%03d: %1d%1d%1d%1d%02d%1d%1d%1d%1d",
+ gpio,
+ pinmux,
+ mtk_pctrl_get_direction(hw, gpio),
+ mtk_pctrl_get_out(hw, gpio),
+ mtk_pctrl_get_in(hw, gpio),
+ mtk_pctrl_get_driving(hw, gpio),
+ mtk_pctrl_get_smt(hw, gpio),
+ mtk_pctrl_get_ies(hw, gpio),
+ pullen,
+ pullup);
+
+ if (r1 != -1) {
+ len += scnprintf(buf + len, bufLen - len, " (%1d %1d)\n",
+ r1, r0);
+ } else {
+ len += scnprintf(buf + len, bufLen - len, "\n");
+ }
+
+ return len;
+}
+
+#define PIN_DBG_BUF_SZ 96
+static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int gpio)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ char buf[PIN_DBG_BUF_SZ];
+
+ (void)mtk_pctrl_show_one_pin(hw, gpio, buf, PIN_DBG_BUF_SZ);
+
+ seq_printf(s, "%s", buf);
+}
+
static const struct pinctrl_ops mtk_pctlops = {
.dt_node_to_map = mtk_pctrl_dt_node_to_map,
.dt_free_map = pinctrl_utils_free_map,
.get_groups_count = mtk_pctrl_get_groups_count,
.get_group_name = mtk_pctrl_get_group_name,
.get_group_pins = mtk_pctrl_get_group_pins,
+ .pin_dbg_show = mtk_pctrl_dbg_show,
};
static int mtk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -677,6 +747,7 @@ static const struct pinconf_ops mtk_confops = {
.pin_config_get = mtk_pinconf_get,
.pin_config_group_get = mtk_pconf_group_get,
.pin_config_group_set = mtk_pconf_group_set,
+ .is_generic = true,
};
static struct pinctrl_desc mtk_desc = {
@@ -693,13 +764,19 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &value);
if (err)
return err;
- return !value;
+ if (value)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
@@ -708,6 +785,9 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
@@ -722,6 +802,9 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
const struct mtk_pin_desc *desc;
+ if (gpio >= hw->soc->npins)
+ return;
+
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
@@ -729,12 +812,22 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
return pinctrl_gpio_direction_input(chip->base + gpio);
}
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
mtk_gpio_set(chip, gpio, value);
return pinctrl_gpio_direction_output(chip->base + gpio);
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.h b/drivers/pinctrl/mediatek/pinctrl-paris.h
index 3d43771074e6..afb7650fd25b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.h
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.h
@@ -60,6 +60,9 @@
int mtk_paris_pinctrl_probe(struct platform_device *pdev,
const struct mtk_pin_soc *soc);
+ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
+ unsigned int gpio, char *buf, unsigned int bufLen);
+
extern const struct dev_pm_ops mtk_paris_pinctrl_pm_ops;
#endif /* __PINCTRL_PARIS_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 926b9997159a..d130c635f74b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -231,10 +231,24 @@ static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int tsin_a_d_valid_pins[] = { GPIOY_0 };
+static const unsigned int tsin_a_sop_pins[] = { GPIOY_1 };
+static const unsigned int tsin_a_clk_pins[] = { GPIOY_2 };
+static const unsigned int tsin_a_d0_pins[] = { GPIOY_3 };
+static const unsigned int tsin_a_dp_pins[] = {
+ GPIOY_4, GPIOY_5, GPIOY_6, GPIOY_7, GPIOY_8, GPIOY_9, GPIOY_10
+};
+
+static const unsigned int tsin_a_fail_pins[] = { GPIOY_11 };
static const unsigned int i2s_out_ch23_y_pins[] = { GPIOY_8 };
static const unsigned int i2s_out_ch45_y_pins[] = { GPIOY_9 };
static const unsigned int i2s_out_ch67_y_pins[] = { GPIOY_10 };
+static const unsigned int tsin_b_d_valid_pins[] = { GPIOX_6 };
+static const unsigned int tsin_b_sop_pins[] = { GPIOX_7 };
+static const unsigned int tsin_b_clk_pins[] = { GPIOX_8 };
+static const unsigned int tsin_b_d0_pins[] = { GPIOX_9 };
+
static const unsigned int spdif_out_y_pins[] = { GPIOY_12 };
static const unsigned int gen_clk_out_pins[] = { GPIOY_15 };
@@ -437,12 +451,22 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(pwm_a_x, 3, 17),
GROUP(pwm_e, 2, 30),
GROUP(pwm_f_x, 3, 18),
+ GROUP(tsin_b_d_valid, 3, 9),
+ GROUP(tsin_b_sop, 3, 8),
+ GROUP(tsin_b_clk, 3, 10),
+ GROUP(tsin_b_d0, 3, 7),
/* Bank Y */
GROUP(uart_cts_c, 1, 17),
GROUP(uart_rts_c, 1, 16),
GROUP(uart_tx_c, 1, 19),
GROUP(uart_rx_c, 1, 18),
+ GROUP(tsin_a_fail, 3, 3),
+ GROUP(tsin_a_d_valid, 3, 2),
+ GROUP(tsin_a_sop, 3, 1),
+ GROUP(tsin_a_clk, 3, 0),
+ GROUP(tsin_a_d0, 3, 4),
+ GROUP(tsin_a_dp, 3, 5),
GROUP(pwm_a_y, 1, 21),
GROUP(pwm_f_y, 1, 20),
GROUP(i2s_out_ch23_y, 1, 5),
@@ -601,6 +625,15 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_20", "GPIOX_21", "GPIOX_22",
};
+static const char * const tsin_a_groups[] = {
+ "tsin_a_clk", "tsin_a_sop", "tsin_a_d_valid", "tsin_a_d0",
+ "tsin_a_dp", "tsin_a_fail",
+};
+
+static const char * const tsin_b_groups[] = {
+ "tsin_b_clk", "tsin_b_sop", "tsin_b_d_valid", "tsin_b_d0",
+};
+
static const char * const emmc_groups[] = {
"emmc_nand_d07", "emmc_clk", "emmc_cmd", "emmc_ds",
};
@@ -792,6 +825,8 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(i2s_out),
FUNCTION(spdif_out),
FUNCTION(gen_clk_out),
+ FUNCTION(tsin_a),
+ FUNCTION(tsin_b),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 2ac921c83da9..32552d795bb2 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -241,6 +241,17 @@ static const unsigned int tsin_a_dp_pins[] = {
GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5, GPIODV_6, GPIODV_7,
};
+static const unsigned int tsin_b_clk_pins[] = { GPIOH_6 };
+static const unsigned int tsin_b_d0_pins[] = { GPIOH_7 };
+static const unsigned int tsin_b_sop_pins[] = { GPIOH_8 };
+static const unsigned int tsin_b_d_valid_pins[] = { GPIOH_9 };
+
+static const unsigned int tsin_b_fail_z4_pins[] = { GPIOZ_4 };
+static const unsigned int tsin_b_clk_z3_pins[] = { GPIOZ_3 };
+static const unsigned int tsin_b_d0_z2_pins[] = { GPIOZ_2 };
+static const unsigned int tsin_b_sop_z1_pins[] = { GPIOZ_1 };
+static const unsigned int tsin_b_d_valid_z0_pins[] = { GPIOZ_0 };
+
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
MESON_PIN(GPIOAO_0),
MESON_PIN(GPIOAO_1),
@@ -438,6 +449,11 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(eth_txd1, 4, 12),
GROUP(eth_txd2, 4, 11),
GROUP(eth_txd3, 4, 10),
+ GROUP(tsin_b_fail_z4, 3, 15),
+ GROUP(tsin_b_clk_z3, 3, 16),
+ GROUP(tsin_b_d0_z2, 3, 17),
+ GROUP(tsin_b_sop_z1, 3, 18),
+ GROUP(tsin_b_d_valid_z0, 3, 19),
GROUP(pwm_c, 3, 20),
GROUP(i2s_out_ch23_z, 3, 26),
GROUP(i2s_out_ch45_z, 3, 25),
@@ -454,6 +470,10 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(i2s_out_lr_clk, 6, 24),
GROUP(i2s_out_ch01, 6, 23),
GROUP(spdif_out_h, 6, 28),
+ GROUP(tsin_b_d0, 6, 17),
+ GROUP(tsin_b_sop, 6, 18),
+ GROUP(tsin_b_d_valid, 6, 19),
+ GROUP(tsin_b_clk, 6, 20),
/* Bank DV */
GROUP(uart_tx_b, 2, 16),
@@ -689,6 +709,12 @@ static const char * const tsin_a_groups[] = {
"tsin_a_dp", "tsin_a_fail",
};
+static const char * const tsin_b_groups[] = {
+ "tsin_b_clk", "tsin_b_sop", "tsin_b_d_valid", "tsin_b_d0",
+ "tsin_b_clk_z3", "tsin_b_sop_z1", "tsin_b_d_valid_z0", "tsin_b_d0_z2",
+ "tsin_b_fail_z4",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -764,6 +790,7 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(spdif_out),
FUNCTION(eth_led),
FUNCTION(tsin_a),
+ FUNCTION(tsin_b),
};
static struct meson_pmx_func meson_gxl_aobus_functions[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 243fba254175..5f125bd6279d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -402,7 +403,10 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
- return !(val & mask);
+ if (val & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
@@ -738,14 +742,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
return ret;
}
- nr_irq_parent = platform_irq_count(pdev);
- if (nr_irq_parent < 0) {
- if (nr_irq_parent != -EPROBE_DEFER)
- dev_err(dev, "Couldn't determine irq count: %pe\n",
- ERR_PTR(nr_irq_parent));
- return nr_irq_parent;
- }
-
+ nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
@@ -782,7 +779,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
if (!girq->parents)
return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
- int irq = platform_get_irq(pdev, i);
+ int irq = irq_of_parse_and_map(np, i);
if (irq < 0)
continue;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 95f864dfdef4..ca7bbe4164c0 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -831,11 +831,14 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
clk_enable(nmk_chip->clk);
- dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ dir = readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset);
clk_disable(nmk_chip->clk);
- return dir;
+ if (dir)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 9eb86309c70b..dfef471201f6 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -286,6 +286,7 @@ out:
kfree(cfg);
return ret;
}
+EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_config);
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 73aff6591de2..1fe62a35bb12 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -46,7 +46,10 @@ static int amd_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
pin_reg = readl(gpio_dev->base + offset * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- return !(pin_reg & BIT(OUTPUT_ENABLE_OFF));
+ if (pin_reg & BIT(OUTPUT_ENABLE_OFF))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 207f266e9cf2..52386ad29f28 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1414,7 +1414,10 @@ static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
u32 osr;
osr = readl_relaxed(pio + PIO_OSR);
- return !(osr & mask);
+ if (osr & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int at91_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index be5b645815e5..207cbae3a7bf 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -149,13 +149,16 @@ static int axp20x_gpio_get_direction(struct gpio_chip *chip,
* going to change the value soon anyway. Default to output.
*/
if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
/*
* The GPIO directions are the three lowest values.
* 2 is input, 0 and 1 are output
*/
- return val & 2;
+ if (val & 2)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
new file mode 100644
index 000000000000..1c08579f0198
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dialog DA9062 pinctrl and GPIO driver.
+ * Based on DA9055 GPIO driver.
+ *
+ * TODO:
+ * - add pinmux and pinctrl support (gpio alternate mode)
+ *
+ * Documents:
+ * [1] https://www.dialog-semiconductor.com/sites/default/files/da9062_datasheet_3v6.pdf
+ *
+ * Copyright (C) 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ */
+#include <linux/bits.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/gpio/driver.h>
+
+#include <linux/mfd/da9062/core.h>
+#include <linux/mfd/da9062/registers.h>
+
+/*
+ * We need this get the gpio_desc from a <gpio_chip,offset> tuple to decide if
+ * the gpio is active low without a vendor specific dt-binding.
+ */
+#include "../gpio/gpiolib.h"
+
+#define DA9062_TYPE(offset) (4 * (offset % 2))
+#define DA9062_PIN_SHIFT(offset) (4 * (offset % 2))
+#define DA9062_PIN_ALTERNATE 0x00 /* gpio alternate mode */
+#define DA9062_PIN_GPI 0x01 /* gpio in */
+#define DA9062_PIN_GPO_OD 0x02 /* gpio out open-drain */
+#define DA9062_PIN_GPO_PP 0x03 /* gpio out push-pull */
+#define DA9062_GPIO_NUM 5
+
+struct da9062_pctl {
+ struct da9062 *da9062;
+ struct gpio_chip gc;
+ unsigned int pin_config[DA9062_GPIO_NUM];
+};
+
+static int da9062_pctl_get_pin_mode(struct da9062_pctl *pctl,
+ unsigned int offset)
+{
+ struct regmap *regmap = pctl->da9062->regmap;
+ int ret, val;
+
+ ret = regmap_read(regmap, DA9062AA_GPIO_0_1 + (offset >> 1), &val);
+ if (ret < 0)
+ return ret;
+
+ val >>= DA9062_PIN_SHIFT(offset);
+ val &= DA9062AA_GPIO0_PIN_MASK;
+
+ return val;
+}
+
+static int da9062_pctl_set_pin_mode(struct da9062_pctl *pctl,
+ unsigned int offset, unsigned int mode_req)
+{
+ struct regmap *regmap = pctl->da9062->regmap;
+ unsigned int mode = mode_req;
+ unsigned int mask;
+ int ret;
+
+ mode &= DA9062AA_GPIO0_PIN_MASK;
+ mode <<= DA9062_PIN_SHIFT(offset);
+ mask = DA9062AA_GPIO0_PIN_MASK << DA9062_PIN_SHIFT(offset);
+
+ ret = regmap_update_bits(regmap, DA9062AA_GPIO_0_1 + (offset >> 1),
+ mask, mode);
+ if (!ret)
+ pctl->pin_config[offset] = mode_req;
+
+ return ret;
+}
+
+static int da9062_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+ int gpio_mode, val;
+ int ret;
+
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return gpio_mode;
+
+ switch (gpio_mode) {
+ case DA9062_PIN_ALTERNATE:
+ return -ENOTSUPP;
+ case DA9062_PIN_GPI:
+ ret = regmap_read(regmap, DA9062AA_STATUS_B, &val);
+ if (ret < 0)
+ return ret;
+ break;
+ case DA9062_PIN_GPO_OD:
+ case DA9062_PIN_GPO_PP:
+ ret = regmap_read(regmap, DA9062AA_GPIO_MODE0_4, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return !!(val & BIT(offset));
+}
+
+static void da9062_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+
+ regmap_update_bits(regmap, DA9062AA_GPIO_MODE0_4, BIT(offset),
+ value << offset);
+}
+
+static int da9062_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ int gpio_mode;
+
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return gpio_mode;
+
+ switch (gpio_mode) {
+ case DA9062_PIN_ALTERNATE:
+ return -ENOTSUPP;
+ case DA9062_PIN_GPI:
+ return GPIO_LINE_DIRECTION_IN;
+ case DA9062_PIN_GPO_OD:
+ case DA9062_PIN_GPO_PP:
+ return GPIO_LINE_DIRECTION_OUT;
+ }
+
+ return -EINVAL;
+}
+
+static int da9062_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+ struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
+ unsigned int gpi_type;
+ int ret;
+
+ ret = da9062_pctl_set_pin_mode(pctl, offset, DA9062_PIN_GPI);
+ if (ret)
+ return ret;
+
+ /*
+ * If the gpio is active low we should set it in hw too. No worries
+ * about gpio_get() because we read and return the gpio-level. So the
+ * gpiolib active_low handling is still correct.
+ *
+ * 0 - active low, 1 - active high
+ */
+ gpi_type = !gpiod_is_active_low(desc);
+
+ return regmap_update_bits(regmap, DA9062AA_GPIO_0_1 + (offset >> 1),
+ DA9062AA_GPIO0_TYPE_MASK << DA9062_TYPE(offset),
+ gpi_type << DA9062_TYPE(offset));
+}
+
+static int da9062_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ unsigned int pin_config = pctl->pin_config[offset];
+ int ret;
+
+ ret = da9062_pctl_set_pin_mode(pctl, offset, pin_config);
+ if (ret)
+ return ret;
+
+ da9062_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int da9062_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+ int gpio_mode;
+
+ /*
+ * We need to meet the following restrictions [1, Figure 18]:
+ * - PIN_CONFIG_BIAS_PULL_DOWN -> only allowed if the pin is used as
+ * gpio input
+ * - PIN_CONFIG_BIAS_PULL_UP -> only allowed if the pin is used as
+ * gpio output open-drain.
+ */
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ return regmap_update_bits(regmap, DA9062AA_CONFIG_K,
+ BIT(offset), 0);
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return -EINVAL;
+ else if (gpio_mode != DA9062_PIN_GPI)
+ return -ENOTSUPP;
+ return regmap_update_bits(regmap, DA9062AA_CONFIG_K,
+ BIT(offset), BIT(offset));
+ case PIN_CONFIG_BIAS_PULL_UP:
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return -EINVAL;
+ else if (gpio_mode != DA9062_PIN_GPO_OD)
+ return -ENOTSUPP;
+ return regmap_update_bits(regmap, DA9062AA_CONFIG_K,
+ BIT(offset), BIT(offset));
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ return da9062_pctl_set_pin_mode(pctl, offset,
+ DA9062_PIN_GPO_OD);
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return da9062_pctl_set_pin_mode(pctl, offset,
+ DA9062_PIN_GPO_PP);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int da9062_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct da9062 *da9062 = pctl->da9062;
+
+ return regmap_irq_get_virq(da9062->regmap_irq,
+ DA9062_IRQ_GPI0 + offset);
+}
+
+static const struct gpio_chip reference_gc = {
+ .owner = THIS_MODULE,
+ .get = da9062_gpio_get,
+ .set = da9062_gpio_set,
+ .get_direction = da9062_gpio_get_direction,
+ .direction_input = da9062_gpio_direction_input,
+ .direction_output = da9062_gpio_direction_output,
+ .set_config = da9062_gpio_set_config,
+ .to_irq = da9062_gpio_to_irq,
+ .can_sleep = true,
+ .ngpio = DA9062_GPIO_NUM,
+ .base = -1,
+};
+
+static int da9062_pctl_probe(struct platform_device *pdev)
+{
+ struct device *parent = pdev->dev.parent;
+ struct da9062_pctl *pctl;
+ int i;
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ pctl->da9062 = dev_get_drvdata(parent);
+ if (!pctl->da9062)
+ return -EINVAL;
+
+ if (!device_property_present(parent, "gpio-controller"))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(pctl->pin_config); i++)
+ pctl->pin_config[i] = DA9062_PIN_GPO_PP;
+
+ /*
+ * Currently the driver handles only the GPIO support. The
+ * pinctrl/pinmux support can be added later if needed.
+ */
+ pctl->gc = reference_gc;
+ pctl->gc.label = dev_name(&pdev->dev);
+ pctl->gc.parent = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ pctl->gc.of_node = parent->of_node;
+#endif
+
+ platform_set_drvdata(pdev, pctl);
+
+ return devm_gpiochip_add_data(&pdev->dev, &pctl->gc, pctl);
+}
+
+static struct platform_driver da9062_pctl_driver = {
+ .probe = da9062_pctl_probe,
+ .driver = {
+ .name = "da9062-gpio",
+ },
+};
+module_platform_driver(da9062_pctl_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("DA9062 PMIC pinctrl and GPIO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:da9062-gpio");
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 96f04d121ebd..e5dcf77fe43d 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
* Copyright (c) 2019 周ç°æ° (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ * Copyright (c) 2017, 2019 Paul Boddie <paul@boddie.org.uk>
*/
#include <linux/compiler.h>
@@ -900,6 +901,7 @@ static int jz4780_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, };
static int jz4780_i2c3_pins[] = { 0x6a, 0x6b, };
static int jz4780_i2c4_e_pins[] = { 0x8c, 0x8d, };
static int jz4780_i2c4_f_pins[] = { 0xb9, 0xb8, };
+static int jz4780_hdmi_ddc_pins[] = { 0xb9, 0xb8, };
static int jz4780_uart2_data_funcs[] = { 1, 1, };
static int jz4780_uart2_hwflow_funcs[] = { 1, 1, };
@@ -908,6 +910,7 @@ static int jz4780_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, };
static int jz4780_i2c3_funcs[] = { 1, 1, };
static int jz4780_i2c4_e_funcs[] = { 1, 1, };
static int jz4780_i2c4_f_funcs[] = { 1, 1, };
+static int jz4780_hdmi_ddc_funcs[] = { 0, 0, };
static const struct group_desc jz4780_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
@@ -950,6 +953,7 @@ static const struct group_desc jz4780_groups[] = {
INGENIC_PIN_GROUP("i2c3-data", jz4780_i2c3),
INGENIC_PIN_GROUP("i2c4-data-e", jz4780_i2c4_e),
INGENIC_PIN_GROUP("i2c4-data-f", jz4780_i2c4_f),
+ INGENIC_PIN_GROUP("hdmi-ddc", jz4780_hdmi_ddc),
INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit),
INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit),
{ "lcd-no-pins", },
@@ -982,6 +986,7 @@ static const char *jz4780_nemc_groups[] = {
static const char *jz4780_i2c3_groups[] = { "i2c3-data", };
static const char *jz4780_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
static const char *jz4780_cim_groups[] = { "cim-data", };
+static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
static const struct function_desc jz4780_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
@@ -1014,6 +1019,8 @@ static const struct function_desc jz4780_functions[] = {
{ "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), },
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
+ { "hdmi-ddc", jz4780_hdmi_ddc_groups,
+ ARRAY_SIZE(jz4780_hdmi_ddc_groups), },
};
static const struct ingenic_chip_info jz4780_chip_info = {
@@ -1437,6 +1444,19 @@ static int x1830_mmc1_4bit_pins[] = { 0x45, 0x46, 0x47, };
static int x1830_i2c0_pins[] = { 0x0c, 0x0d, };
static int x1830_i2c1_pins[] = { 0x39, 0x3a, };
static int x1830_i2c2_pins[] = { 0x5b, 0x5c, };
+static int x1830_lcd_rgb_18bit_pins[] = {
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b,
+};
+static int x1830_lcd_slcd_8bit_pins[] = {
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x6c, 0x6d,
+ 0x69, 0x72, 0x73, 0x7b, 0x7a,
+};
+static int x1830_lcd_slcd_16bit_pins[] = {
+ 0x6e, 0x6f, 0x70, 0x71, 0x76, 0x77, 0x78, 0x79,
+};
static int x1830_pwm_pwm0_b_pins[] = { 0x31, };
static int x1830_pwm_pwm0_c_pins[] = { 0x4b, };
static int x1830_pwm_pwm1_b_pins[] = { 0x32, };
@@ -1486,6 +1506,16 @@ static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
static int x1830_i2c0_funcs[] = { 1, 1, };
static int x1830_i2c1_funcs[] = { 0, 0, };
static int x1830_i2c2_funcs[] = { 1, 1, };
+static int x1830_lcd_rgb_18bit_funcs[] = {
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+};
+static int x1830_lcd_slcd_8bit_funcs[] = {
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+};
+static int x1830_lcd_slcd_16bit_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, };
static int x1830_pwm_pwm0_b_funcs[] = { 0, };
static int x1830_pwm_pwm0_c_funcs[] = { 1, };
static int x1830_pwm_pwm1_b_funcs[] = { 0, };
@@ -1534,6 +1564,10 @@ static const struct group_desc x1830_groups[] = {
INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
+ INGENIC_PIN_GROUP("lcd-rgb-18bit", x1830_lcd_rgb_18bit),
+ INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit),
+ INGENIC_PIN_GROUP("lcd-slcd-16bit", x1830_lcd_slcd_16bit),
+ { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b),
INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c),
INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b),
@@ -1572,6 +1606,9 @@ static const char *x1830_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
static const char *x1830_i2c0_groups[] = { "i2c0-data", };
static const char *x1830_i2c1_groups[] = { "i2c1-data", };
static const char *x1830_i2c2_groups[] = { "i2c2-data", };
+static const char *x1830_lcd_groups[] = {
+ "lcd-rgb-18bit", "lcd-slcd-8bit", "lcd-slcd-16bit", "lcd-no-pins",
+};
static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
static const char *x1830_pwm2_groups[] = { "pwm2-c-8", "pwm2-c-13", };
@@ -1593,6 +1630,7 @@ static const struct function_desc x1830_functions[] = {
{ "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
{ "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
{ "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
+ { "lcd", x1830_lcd_groups, ARRAY_SIZE(x1830_lcd_groups), },
{ "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
{ "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
{ "pwm2", x1830_pwm2_groups, ARRAY_SIZE(x1830_pwm2_groups), },
@@ -1916,13 +1954,19 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4760)
- return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
+ if (jzpc->info->version >= ID_JZ4760) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ return GPIO_LINE_DIRECTION_IN;
+ return GPIO_LINE_DIRECTION_OUT;
+ }
if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
- return true;
+ return GPIO_LINE_DIRECTION_IN;
+
+ if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_DIR))
+ return GPIO_LINE_DIRECTION_OUT;
- return !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_DIR);
+ return GPIO_LINE_DIRECTION_IN;
}
static const struct pinctrl_ops ingenic_pctlops = {
@@ -2158,7 +2202,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
default:
- unreachable();
+ /* unreachable */
+ break;
}
}
@@ -2278,11 +2323,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.direction_input = ingenic_gpio_direction_input;
jzgc->gc.direction_output = ingenic_gpio_direction_output;
jzgc->gc.get_direction = ingenic_gpio_get_direction;
-
- if (of_property_read_bool(node, "gpio-ranges")) {
- jzgc->gc.request = gpiochip_generic_request;
- jzgc->gc.free = gpiochip_generic_free;
- }
+ jzgc->gc.request = gpiochip_generic_request;
+ jzgc->gc.free = gpiochip_generic_free;
jzgc->irq = irq_of_parse_and_map(node, 0);
if (!jzgc->irq)
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index eb3dd0d46d6c..ed8eac6c1494 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -604,7 +604,10 @@ static int ocelot_gpio_get_direction(struct gpio_chip *chip,
regmap_read(info->map, REG(OCELOT_GPIO_OE, info, offset), &val);
- return !(val & BIT(offset % 32));
+ if (val & BIT(offset % 32))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int ocelot_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 674b7b5919df..5a312279b3c7 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -756,7 +756,10 @@ static int oxnas_gpio_get_direction(struct gpio_chip *chip,
struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
u32 mask = BIT(offset);
- return !(readl_relaxed(bank->reg_base + OUTPUT_EN) & mask);
+ if (readl_relaxed(bank->reg_base + OUTPUT_EN) & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int oxnas_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index e5d6d3f9753e..a6e2a4a4ca95 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -1990,7 +1990,10 @@ static int pic32_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
- return !!(readl(bank->reg_base + TRIS_REG) & BIT(offset));
+ if (readl(bank->reg_base + TRIS_REG) & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void pic32_gpio_irq_ack(struct irq_data *data)
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index fa370c171cad..ec761ba2a2da 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1166,7 +1166,10 @@ static int pistachio_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct pistachio_gpio_bank *bank = gpiochip_get_data(chip);
- return !(gpio_readl(bank, GPIO_OUTPUT_EN) & BIT(offset));
+ if (gpio_readl(bank, GPIO_OUTPUT_EN) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int pistachio_gpio_get(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index 26adbe9d6d42..cccbe072274e 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -184,7 +184,7 @@ static int rk805_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
/* default output*/
if (!pci->pin_cfg[offset].dir_msk)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(pci->rk808->regmap,
pci->pin_cfg[offset].reg,
@@ -194,7 +194,10 @@ static int rk805_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
return ret;
}
- return !(val & pci->pin_cfg[offset].dir_msk);
+ if (val & pci->pin_cfg[offset].dir_msk)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static const struct gpio_chip rk805_gpio_chip = {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index fc9a2a9959d9..098951346339 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -2549,7 +2549,10 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
clk_disable(bank->clk);
- return !(data & BIT(offset));
+ if (data & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
/*
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 617585be6a7d..da2d8365c690 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -777,7 +777,10 @@ static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
{
struct rza1_port *port = gpiochip_get_data(chip);
- return !!rza1_get_bit(port, RZA1_PM_REG, gpio);
+ if (rza1_get_bit(port, RZA1_PM_REG, gpio))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int rza1_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
index a205964e839b..c5bf98c86b2b 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/pinctrl-rza2.c
@@ -135,10 +135,10 @@ static int rza2_chip_get_direction(struct gpio_chip *chip, unsigned int offset)
reg16 = (reg16 >> (pin * 2)) & RZA2_PDR_MASK;
if (reg16 == RZA2_PDR_OUTPUT)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
if (reg16 == RZA2_PDR_INPUT)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
/*
* This GPIO controller has a default Hi-Z state that is not input or
@@ -146,7 +146,7 @@ static int rza2_chip_get_direction(struct gpio_chip *chip, unsigned int offset)
*/
rza2_pin_to_gpio(priv->base, offset, 1);
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int rza2_chip_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 4f39a7945d01..7b8c7a0b13de 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -746,7 +746,10 @@ static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
function = st_pctl_get_pin_function(&pc, offset);
if (function) {
st_pinconf_get_direction(&pc, offset, &config);
- return !ST_PINCONF_UNPACK_OE(config);
+ if (ST_PINCONF_UNPACK_OE(config))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
/*
@@ -758,7 +761,10 @@ static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
direction |= ((value >> offset) & 0x1) << i;
}
- return (direction == ST_GPIO_DIRECTION_IN);
+ if (direction == ST_GPIO_DIRECTION_IN)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
/* Pinctrl Groups */
@@ -996,6 +1002,7 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
unsigned int function;
int offset = st_gpio_pin(pin_id);
char f[16];
+ int oe;
mutex_unlock(&pctldev->mutex);
pc = st_get_pio_control(pctldev, pin_id);
@@ -1008,10 +1015,11 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
else
snprintf(f, 5, "GPIO");
+ oe = st_gpio_get_direction(&pc_to_bank(pc)->gpio_chip, offset);
seq_printf(s, "[OE:%d,PU:%ld,OD:%ld]\t%s\n"
"\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
"de:%ld,rt-clk:%ld,rt-delay:%ld]",
- !st_gpio_get_direction(&pc_to_bank(pc)->gpio_chip, offset),
+ (oe == GPIO_LINE_DIRECTION_OUT),
ST_PINCONF_UNPACK_PU(config),
ST_PINCONF_UNPACK_OD(config),
f,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 16723797fa7c..60100b45f5e5 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -134,10 +134,14 @@ static int stmfx_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
ret = regmap_read(pctl->stmfx->map, reg, &val);
/*
* On stmfx, gpio pins direction is (0)input, (1)output.
- * .get_direction returns 0=out, 1=in
*/
+ if (ret)
+ return ret;
- return ret ? ret : !(val & mask);
+ if (val & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int stmfx_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
@@ -223,6 +227,13 @@ static int stmfx_pinconf_get(struct pinctrl_dev *pctldev,
dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
if (dir < 0)
return dir;
+
+ /*
+ * Currently the gpiolib IN is 1 and OUT is 0 but let's not count
+ * on it just to be on the safe side also in the future :)
+ */
+ dir = (dir == GPIO_LINE_DIRECTION_IN) ? 1 : 0;
+
type = stmfx_pinconf_get_type(pctl, pin);
if (type < 0)
return type;
@@ -360,7 +371,7 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
if (val < 0)
return;
- if (!dir) {
+ if (dir == GPIO_LINE_DIRECTION_OUT) {
seq_printf(s, "output %s ", val ? "high" : "low");
if (type)
seq_printf(s, "open drain %s internal pull-up ",
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 566665931a04..6e74bd87d959 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -391,13 +391,16 @@ static int sx150x_gpio_get_direction(struct gpio_chip *chip,
int ret;
if (sx150x_pin_is_oscio(pctl, offset))
- return false;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(pctl->regmap, pctl->data->reg_dir, &value);
if (ret < 0)
return ret;
- return !!(value & BIT(offset));
+ if (value & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int sx150x_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -687,7 +690,7 @@ static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
- if (ret)
+ if (ret == GPIO_LINE_DIRECTION_IN)
return -EINVAL;
ret = sx150x_gpio_get(&pctl->gpio, pin);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 811af2f81c39..c5d4428f1f94 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -50,6 +50,16 @@ config PINCTRL_IPQ8074
Qualcomm Technologies Inc. IPQ8074 platform. Select this for
IPQ8074.
+config PINCTRL_IPQ6018
+ tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for
+ the Qualcomm Technologies Inc. TLMM block found on the
+ Qualcomm Technologies Inc. IPQ6018 platform. Select this for
+ IPQ6018.
+
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index c2c2f9ad6827..d9e09045a776 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
obj-$(CONFIG_PINCTRL_IPQ8074) += pinctrl-ipq8074.o
+obj-$(CONFIG_PINCTRL_IPQ6018) += pinctrl-ipq6018.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
new file mode 100644
index 000000000000..38c33a778cb8
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+static const struct pinctrl_pin_desc ipq6018_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+
+enum ipq6018_functions {
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_audio0,
+ msm_mux_audio1,
+ msm_mux_audio2,
+ msm_mux_audio3,
+ msm_mux_audio_rxbclk,
+ msm_mux_audio_rxfsync,
+ msm_mux_audio_rxmclk,
+ msm_mux_audio_rxmclkin,
+ msm_mux_audio_txbclk,
+ msm_mux_audio_txfsync,
+ msm_mux_audio_txmclk,
+ msm_mux_audio_txmclkin,
+ msm_mux_blsp0_i2c,
+ msm_mux_blsp0_spi,
+ msm_mux_blsp0_uart,
+ msm_mux_blsp1_i2c,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp1_uart,
+ msm_mux_blsp2_i2c,
+ msm_mux_blsp2_spi,
+ msm_mux_blsp2_uart,
+ msm_mux_blsp3_i2c,
+ msm_mux_blsp3_spi,
+ msm_mux_blsp3_uart,
+ msm_mux_blsp4_i2c,
+ msm_mux_blsp4_spi,
+ msm_mux_blsp4_uart,
+ msm_mux_blsp5_i2c,
+ msm_mux_blsp5_uart,
+ msm_mux_burn0,
+ msm_mux_burn1,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_cxc0,
+ msm_mux_cxc1,
+ msm_mux_dbg_out,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_lpass_aud,
+ msm_mux_lpass_aud0,
+ msm_mux_lpass_aud1,
+ msm_mux_lpass_aud2,
+ msm_mux_lpass_pcm,
+ msm_mux_lpass_pdm,
+ msm_mux_mac00,
+ msm_mux_mac01,
+ msm_mux_mac10,
+ msm_mux_mac11,
+ msm_mux_mac12,
+ msm_mux_mac13,
+ msm_mux_mac20,
+ msm_mux_mac21,
+ msm_mux_mdc,
+ msm_mux_mdio,
+ msm_mux_pcie0_clk,
+ msm_mux_pcie0_rst,
+ msm_mux_pcie0_wake,
+ msm_mux_prng_rosc,
+ msm_mux_pta1_0,
+ msm_mux_pta1_1,
+ msm_mux_pta1_2,
+ msm_mux_pta2_0,
+ msm_mux_pta2_1,
+ msm_mux_pta2_2,
+ msm_mux_pwm00,
+ msm_mux_pwm01,
+ msm_mux_pwm02,
+ msm_mux_pwm03,
+ msm_mux_pwm04,
+ msm_mux_pwm10,
+ msm_mux_pwm11,
+ msm_mux_pwm12,
+ msm_mux_pwm13,
+ msm_mux_pwm14,
+ msm_mux_pwm20,
+ msm_mux_pwm21,
+ msm_mux_pwm22,
+ msm_mux_pwm23,
+ msm_mux_pwm24,
+ msm_mux_pwm30,
+ msm_mux_pwm31,
+ msm_mux_pwm32,
+ msm_mux_pwm33,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_qpic_pad,
+ msm_mux_rx0,
+ msm_mux_rx1,
+ msm_mux_rx_swrm,
+ msm_mux_rx_swrm0,
+ msm_mux_rx_swrm1,
+ msm_mux_sd_card,
+ msm_mux_sd_write,
+ msm_mux_tsens_max,
+ msm_mux_tx_swrm,
+ msm_mux_tx_swrm0,
+ msm_mux_tx_swrm1,
+ msm_mux_tx_swrm2,
+ msm_mux_wci20,
+ msm_mux_wci21,
+ msm_mux_wci22,
+ msm_mux_wci23,
+ msm_mux_wsa_swrm,
+ msm_mux__,
+};
+
+static const char * const blsp3_uart_groups[] = {
+ "gpio73", "gpio74", "gpio75", "gpio76",
+};
+
+static const char * const blsp3_i2c_groups[] = {
+ "gpio73", "gpio74",
+};
+
+static const char * const blsp3_spi_groups[] = {
+ "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char * const wci20_groups[] = {
+ "gpio0", "gpio2",
+};
+
+static const char * const qpic_pad_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio9", "gpio10",
+ "gpio11", "gpio17",
+};
+
+static const char * const burn0_groups[] = {
+ "gpio0",
+};
+
+static const char * const mac12_groups[] = {
+ "gpio1", "gpio11",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio1",
+};
+
+static const char * const burn1_groups[] = {
+ "gpio1",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio0",
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8", "gpio9",
+ "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15", "gpio16",
+ "gpio17",
+};
+
+static const char * const mac01_groups[] = {
+ "gpio3", "gpio4",
+};
+
+static const char * const mac21_groups[] = {
+ "gpio5", "gpio6",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio9",
+};
+
+static const char * const cxc0_groups[] = {
+ "gpio9", "gpio16",
+};
+
+static const char * const mac13_groups[] = {
+ "gpio9", "gpio16",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio9",
+};
+
+static const char * const wci22_groups[] = {
+ "gpio11", "gpio17",
+};
+
+static const char * const pwm00_groups[] = {
+ "gpio18",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio18",
+};
+
+static const char * const wci23_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const mac11_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const pwm10_groups[] = {
+ "gpio19",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio19",
+};
+
+static const char * const pwm20_groups[] = {
+ "gpio20",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio20",
+};
+
+static const char * const pwm30_groups[] = {
+ "gpio21",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio21",
+};
+
+static const char * const audio_txmclk_groups[] = {
+ "gpio22",
+};
+
+static const char * const audio_txmclkin_groups[] = {
+ "gpio22",
+};
+
+static const char * const pwm02_groups[] = {
+ "gpio22",
+};
+
+static const char * const tx_swrm0_groups[] = {
+ "gpio22",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio22",
+};
+
+static const char * const audio_txbclk_groups[] = {
+ "gpio23",
+};
+
+static const char * const pwm12_groups[] = {
+ "gpio23",
+};
+
+static const char * const wsa_swrm_groups[] = {
+ "gpio23", "gpio24",
+};
+
+static const char * const tx_swrm1_groups[] = {
+ "gpio23",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio23",
+};
+
+static const char * const audio_txfsync_groups[] = {
+ "gpio24",
+};
+
+static const char * const pwm22_groups[] = {
+ "gpio24",
+};
+
+static const char * const tx_swrm2_groups[] = {
+ "gpio24",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio24",
+};
+
+static const char * const audio0_groups[] = {
+ "gpio25", "gpio32",
+};
+
+static const char * const pwm32_groups[] = {
+ "gpio25",
+};
+
+static const char * const tx_swrm_groups[] = {
+ "gpio25",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio25",
+};
+
+static const char * const audio1_groups[] = {
+ "gpio26", "gpio33",
+};
+
+static const char * const pwm04_groups[] = {
+ "gpio26",
+};
+
+static const char * const audio2_groups[] = {
+ "gpio27",
+};
+
+static const char * const pwm14_groups[] = {
+ "gpio27",
+};
+
+static const char * const audio3_groups[] = {
+ "gpio28",
+};
+
+static const char * const pwm24_groups[] = {
+ "gpio28",
+};
+
+static const char * const audio_rxmclk_groups[] = {
+ "gpio29",
+};
+
+static const char * const audio_rxmclkin_groups[] = {
+ "gpio29",
+};
+
+static const char * const pwm03_groups[] = {
+ "gpio29",
+};
+
+static const char * const lpass_pdm_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const lpass_aud_groups[] = {
+ "gpio29",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio29",
+};
+
+static const char * const audio_rxbclk_groups[] = {
+ "gpio30",
+};
+
+static const char * const pwm13_groups[] = {
+ "gpio30",
+};
+
+static const char * const lpass_aud0_groups[] = {
+ "gpio30",
+};
+
+static const char * const rx_swrm_groups[] = {
+ "gpio30",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio30",
+};
+
+static const char * const audio_rxfsync_groups[] = {
+ "gpio31",
+};
+
+static const char * const pwm23_groups[] = {
+ "gpio31",
+};
+
+static const char * const lpass_aud1_groups[] = {
+ "gpio31",
+};
+
+static const char * const rx_swrm0_groups[] = {
+ "gpio31",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio31",
+};
+
+static const char * const pwm33_groups[] = {
+ "gpio32",
+};
+
+static const char * const lpass_aud2_groups[] = {
+ "gpio32",
+};
+
+static const char * const rx_swrm1_groups[] = {
+ "gpio32",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio32",
+};
+
+static const char * const lpass_pcm_groups[] = {
+ "gpio34", "gpio35", "gpio36", "gpio37",
+};
+
+static const char * const mac10_groups[] = {
+ "gpio34", "gpio35",
+};
+
+static const char * const mac00_groups[] = {
+ "gpio34", "gpio35",
+};
+
+static const char * const mac20_groups[] = {
+ "gpio36", "gpio37",
+};
+
+static const char * const blsp0_uart_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+};
+
+static const char * const blsp0_i2c_groups[] = {
+ "gpio38", "gpio39",
+};
+
+static const char * const blsp0_spi_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+};
+
+static const char * const blsp2_uart_groups[] = {
+ "gpio42", "gpio43", "gpio44", "gpio45",
+};
+
+static const char * const blsp2_i2c_groups[] = {
+ "gpio42", "gpio43",
+};
+
+static const char * const blsp2_spi_groups[] = {
+ "gpio42", "gpio43", "gpio44", "gpio45",
+};
+
+static const char * const blsp5_i2c_groups[] = {
+ "gpio46", "gpio47",
+};
+
+static const char * const blsp5_uart_groups[] = {
+ "gpio48", "gpio49",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio49",
+};
+
+static const char * const pwm01_groups[] = {
+ "gpio50",
+};
+
+static const char * const pta1_1_groups[] = {
+ "gpio51",
+};
+
+static const char * const pwm11_groups[] = {
+ "gpio51",
+};
+
+static const char * const rx1_groups[] = {
+ "gpio51",
+};
+
+static const char * const pta1_2_groups[] = {
+ "gpio52",
+};
+
+static const char * const pwm21_groups[] = {
+ "gpio52",
+};
+
+static const char * const pta1_0_groups[] = {
+ "gpio53",
+};
+
+static const char * const pwm31_groups[] = {
+ "gpio53",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio53",
+};
+
+static const char * const blsp4_uart_groups[] = {
+ "gpio55", "gpio56", "gpio57", "gpio58",
+};
+
+static const char * const blsp4_i2c_groups[] = {
+ "gpio55", "gpio56",
+};
+
+static const char * const blsp4_spi_groups[] = {
+ "gpio55", "gpio56", "gpio57", "gpio58",
+};
+
+static const char * const pcie0_clk_groups[] = {
+ "gpio59",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio59",
+};
+
+static const char * const pcie0_rst_groups[] = {
+ "gpio60",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio60",
+};
+
+static const char * const pcie0_wake_groups[] = {
+ "gpio61",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio61",
+};
+
+static const char * const sd_card_groups[] = {
+ "gpio62",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio63",
+};
+
+static const char * const rx0_groups[] = {
+ "gpio63",
+};
+
+static const char * const tsens_max_groups[] = {
+ "gpio63",
+};
+
+static const char * const mdc_groups[] = {
+ "gpio64",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79",
+};
+
+static const char * const mdio_groups[] = {
+ "gpio65",
+};
+
+static const char * const pta2_0_groups[] = {
+ "gpio66",
+};
+
+static const char * const wci21_groups[] = {
+ "gpio66", "gpio68",
+};
+
+static const char * const cxc1_groups[] = {
+ "gpio66", "gpio68",
+};
+
+static const char * const pta2_1_groups[] = {
+ "gpio67",
+};
+
+static const char * const pta2_2_groups[] = {
+ "gpio68",
+};
+
+static const char * const blsp1_uart_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72",
+};
+
+static const char * const blsp1_i2c_groups[] = {
+ "gpio69", "gpio70",
+};
+
+static const char * const blsp1_spi_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio69", "gpio71",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio70",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79",
+};
+
+static const struct msm_function ipq6018_functions[] = {
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(audio0),
+ FUNCTION(audio1),
+ FUNCTION(audio2),
+ FUNCTION(audio3),
+ FUNCTION(audio_rxbclk),
+ FUNCTION(audio_rxfsync),
+ FUNCTION(audio_rxmclk),
+ FUNCTION(audio_rxmclkin),
+ FUNCTION(audio_txbclk),
+ FUNCTION(audio_txfsync),
+ FUNCTION(audio_txmclk),
+ FUNCTION(audio_txmclkin),
+ FUNCTION(blsp0_i2c),
+ FUNCTION(blsp0_spi),
+ FUNCTION(blsp0_uart),
+ FUNCTION(blsp1_i2c),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp1_uart),
+ FUNCTION(blsp2_i2c),
+ FUNCTION(blsp2_spi),
+ FUNCTION(blsp2_uart),
+ FUNCTION(blsp3_i2c),
+ FUNCTION(blsp3_spi),
+ FUNCTION(blsp3_uart),
+ FUNCTION(blsp4_i2c),
+ FUNCTION(blsp4_spi),
+ FUNCTION(blsp4_uart),
+ FUNCTION(blsp5_i2c),
+ FUNCTION(blsp5_uart),
+ FUNCTION(burn0),
+ FUNCTION(burn1),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(cxc0),
+ FUNCTION(cxc1),
+ FUNCTION(dbg_out),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(gpio),
+ FUNCTION(lpass_aud),
+ FUNCTION(lpass_aud0),
+ FUNCTION(lpass_aud1),
+ FUNCTION(lpass_aud2),
+ FUNCTION(lpass_pcm),
+ FUNCTION(lpass_pdm),
+ FUNCTION(mac00),
+ FUNCTION(mac01),
+ FUNCTION(mac10),
+ FUNCTION(mac11),
+ FUNCTION(mac12),
+ FUNCTION(mac13),
+ FUNCTION(mac20),
+ FUNCTION(mac21),
+ FUNCTION(mdc),
+ FUNCTION(mdio),
+ FUNCTION(pcie0_clk),
+ FUNCTION(pcie0_rst),
+ FUNCTION(pcie0_wake),
+ FUNCTION(prng_rosc),
+ FUNCTION(pta1_0),
+ FUNCTION(pta1_1),
+ FUNCTION(pta1_2),
+ FUNCTION(pta2_0),
+ FUNCTION(pta2_1),
+ FUNCTION(pta2_2),
+ FUNCTION(pwm00),
+ FUNCTION(pwm01),
+ FUNCTION(pwm02),
+ FUNCTION(pwm03),
+ FUNCTION(pwm04),
+ FUNCTION(pwm10),
+ FUNCTION(pwm11),
+ FUNCTION(pwm12),
+ FUNCTION(pwm13),
+ FUNCTION(pwm14),
+ FUNCTION(pwm20),
+ FUNCTION(pwm21),
+ FUNCTION(pwm22),
+ FUNCTION(pwm23),
+ FUNCTION(pwm24),
+ FUNCTION(pwm30),
+ FUNCTION(pwm31),
+ FUNCTION(pwm32),
+ FUNCTION(pwm33),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(qpic_pad),
+ FUNCTION(rx0),
+ FUNCTION(rx1),
+ FUNCTION(rx_swrm),
+ FUNCTION(rx_swrm0),
+ FUNCTION(rx_swrm1),
+ FUNCTION(sd_card),
+ FUNCTION(sd_write),
+ FUNCTION(tsens_max),
+ FUNCTION(tx_swrm),
+ FUNCTION(tx_swrm0),
+ FUNCTION(tx_swrm1),
+ FUNCTION(tx_swrm2),
+ FUNCTION(wci20),
+ FUNCTION(wci21),
+ FUNCTION(wci22),
+ FUNCTION(wci23),
+ FUNCTION(wsa_swrm),
+};
+
+static const struct msm_pingroup ipq6018_groups[] = {
+ PINGROUP(0, qpic_pad, wci20, qdss_traceclk_b, _, burn0, _, _, _, _),
+ PINGROUP(1, qpic_pad, mac12, qdss_tracectl_b, _, burn1, _, _, _, _),
+ PINGROUP(2, qpic_pad, wci20, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(3, qpic_pad, mac01, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(4, qpic_pad, mac01, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(5, qpic_pad, mac21, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(6, qpic_pad, mac21, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(7, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(8, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(9, qpic_pad, atest_char, cxc0, mac13, dbg_out, qdss_tracedata_b, _, _, _),
+ PINGROUP(10, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(11, qpic_pad, wci22, mac12, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(12, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(13, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(14, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(15, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(16, qpic_pad, cxc0, mac13, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(17, qpic_pad, qdss_tracedata_b, wci22, _, _, _, _, _, _),
+ PINGROUP(18, pwm00, atest_char0, wci23, mac11, _, _, _, _, _),
+ PINGROUP(19, pwm10, atest_char1, wci23, mac11, _, _, _, _, _),
+ PINGROUP(20, pwm20, atest_char2, _, _, _, _, _, _, _),
+ PINGROUP(21, pwm30, atest_char3, _, _, _, _, _, _, _),
+ PINGROUP(22, audio_txmclk, audio_txmclkin, pwm02, tx_swrm0, _, qdss_cti_trig_out_b0, _, _, _),
+ PINGROUP(23, audio_txbclk, pwm12, wsa_swrm, tx_swrm1, _, qdss_cti_trig_in_b0, _, _, _),
+ PINGROUP(24, audio_txfsync, pwm22, wsa_swrm, tx_swrm2, _, qdss_cti_trig_out_b1, _, _, _),
+ PINGROUP(25, audio0, pwm32, tx_swrm, _, qdss_cti_trig_in_b1, _, _, _, _),
+ PINGROUP(26, audio1, pwm04, _, _, _, _, _, _, _),
+ PINGROUP(27, audio2, pwm14, _, _, _, _, _, _, _),
+ PINGROUP(28, audio3, pwm24, _, _, _, _, _, _, _),
+ PINGROUP(29, audio_rxmclk, audio_rxmclkin, pwm03, lpass_pdm, lpass_aud, qdss_cti_trig_in_a1, _, _, _),
+ PINGROUP(30, audio_rxbclk, pwm13, lpass_pdm, lpass_aud0, rx_swrm, _, qdss_cti_trig_out_a1, _, _),
+ PINGROUP(31, audio_rxfsync, pwm23, lpass_pdm, lpass_aud1, rx_swrm0, _, qdss_cti_trig_in_a0, _, _),
+ PINGROUP(32, audio0, pwm33, lpass_pdm, lpass_aud2, rx_swrm1, _, qdss_cti_trig_out_a0, _, _),
+ PINGROUP(33, audio1, _, _, _, _, _, _, _, _),
+ PINGROUP(34, lpass_pcm, mac10, mac00, _, _, _, _, _, _),
+ PINGROUP(35, lpass_pcm, mac10, mac00, _, _, _, _, _, _),
+ PINGROUP(36, lpass_pcm, mac20, _, _, _, _, _, _, _),
+ PINGROUP(37, lpass_pcm, mac20, _, _, _, _, _, _, _),
+ PINGROUP(38, blsp0_uart, blsp0_i2c, blsp0_spi, _, _, _, _, _, _),
+ PINGROUP(39, blsp0_uart, blsp0_i2c, blsp0_spi, _, _, _, _, _, _),
+ PINGROUP(40, blsp0_uart, blsp0_spi, _, _, _, _, _, _, _),
+ PINGROUP(41, blsp0_uart, blsp0_spi, _, _, _, _, _, _, _),
+ PINGROUP(42, blsp2_uart, blsp2_i2c, blsp2_spi, _, _, _, _, _, _),
+ PINGROUP(43, blsp2_uart, blsp2_i2c, blsp2_spi, _, _, _, _, _, _),
+ PINGROUP(44, blsp2_uart, blsp2_spi, _, _, _, _, _, _, _),
+ PINGROUP(45, blsp2_uart, blsp2_spi, _, _, _, _, _, _, _),
+ PINGROUP(46, blsp5_i2c, _, _, _, _, _, _, _, _),
+ PINGROUP(47, blsp5_i2c, _, _, _, _, _, _, _, _),
+ PINGROUP(48, blsp5_uart, _, qdss_traceclk_a, _, _, _, _, _, _),
+ PINGROUP(49, blsp5_uart, _, qdss_tracectl_a, _, _, _, _, _, _),
+ PINGROUP(50, pwm01, _, _, _, _, _, _, _, _),
+ PINGROUP(51, pta1_1, pwm11, _, rx1, _, _, _, _, _),
+ PINGROUP(52, pta1_2, pwm21, _, _, _, _, _, _, _),
+ PINGROUP(53, pta1_0, pwm31, prng_rosc, _, _, _, _, _, _),
+ PINGROUP(54, _, _, _, _, _, _, _, _, _),
+ PINGROUP(55, blsp4_uart, blsp4_i2c, blsp4_spi, _, _, _, _, _, _),
+ PINGROUP(56, blsp4_uart, blsp4_i2c, blsp4_spi, _, _, _, _, _, _),
+ PINGROUP(57, blsp4_uart, blsp4_spi, _, _, _, _, _, _, _),
+ PINGROUP(58, blsp4_uart, blsp4_spi, _, _, _, _, _, _, _),
+ PINGROUP(59, pcie0_clk, _, _, cri_trng0, _, _, _, _, _),
+ PINGROUP(60, pcie0_rst, _, _, cri_trng1, _, _, _, _, _),
+ PINGROUP(61, pcie0_wake, _, _, cri_trng, _, _, _, _, _),
+ PINGROUP(62, sd_card, _, _, _, _, _, _, _, _),
+ PINGROUP(63, sd_write, rx0, _, tsens_max, _, _, _, _, _),
+ PINGROUP(64, mdc, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(65, mdio, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(66, pta2_0, wci21, cxc1, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(67, pta2_1, qdss_tracedata_a, _, _, _, _, _, _, _),
+ PINGROUP(68, pta2_2, wci21, cxc1, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(69, blsp1_uart, blsp1_i2c, blsp1_spi, gcc_plltest, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(70, blsp1_uart, blsp1_i2c, blsp1_spi, gcc_tlmm, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(71, blsp1_uart, blsp1_spi, gcc_plltest, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(72, blsp1_uart, blsp1_spi, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(73, blsp3_uart, blsp3_i2c, blsp3_spi, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(74, blsp3_uart, blsp3_i2c, blsp3_spi, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(75, blsp3_uart, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(76, blsp3_uart, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(77, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(78, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(79, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _, _),
+};
+
+static const struct msm_pinctrl_soc_data ipq6018_pinctrl = {
+ .pins = ipq6018_pins,
+ .npins = ARRAY_SIZE(ipq6018_pins),
+ .functions = ipq6018_functions,
+ .nfunctions = ARRAY_SIZE(ipq6018_functions),
+ .groups = ipq6018_groups,
+ .ngroups = ARRAY_SIZE(ipq6018_groups),
+ .ngpios = 80,
+};
+
+static int ipq6018_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &ipq6018_pinctrl);
+}
+
+static const struct of_device_id ipq6018_pinctrl_of_match[] = {
+ { .compatible = "qcom,ipq6018-pinctrl", },
+ { },
+};
+
+static struct platform_driver ipq6018_pinctrl_driver = {
+ .driver = {
+ .name = "ipq6018-pinctrl",
+ .of_match_table = ipq6018_pinctrl_of_match,
+ },
+ .probe = ipq6018_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init ipq6018_pinctrl_init(void)
+{
+ return platform_driver_register(&ipq6018_pinctrl_driver);
+}
+arch_initcall(ipq6018_pinctrl_init);
+
+static void __exit ipq6018_pinctrl_exit(void)
+{
+ platform_driver_unregister(&ipq6018_pinctrl_driver);
+}
+module_exit(ipq6018_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI ipq6018 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, ipq6018_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index c2fb1ddf2f22..ac717ee38416 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -299,7 +299,7 @@ static const char * const gpio_groups[] = {
};
static const char * const mdio_groups[] = {
- "gpio0", "gpio1", "gpio10", "gpio11",
+ "gpio0", "gpio1", "gpio2", "gpio10", "gpio11", "gpio66",
};
static const char * const mi2s_groups[] = {
@@ -403,8 +403,8 @@ static const char * const usb2_hsic_groups[] = {
};
static const char * const rgmii2_groups[] = {
- "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32",
- "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62",
+ "gpio2", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62", "gpio66",
};
static const char * const sata_groups[] = {
@@ -539,7 +539,7 @@ static const struct msm_function ipq8064_functions[] = {
static const struct msm_pingroup ipq8064_groups[] = {
PINGROUP(0, mdio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(1, mdio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(2, gsbi5_spi_cs3, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, gsbi5_spi_cs3, rgmii2, mdio, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(3, pcie1_rst, pcie1_prsnt, pdm, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(4, pcie1_pwren_n, pcie1_pwren, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(5, pcie1_clk_req, pcie1_pwrflt, NA, NA, NA, NA, NA, NA, NA, NA),
@@ -603,7 +603,7 @@ static const struct msm_pingroup ipq8064_groups[] = {
PINGROUP(63, pcie3_rst, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(65, pcie3_clk_req, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, rgmii2, mdio, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(67, usb2_hsic, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(68, usb2_hsic, NA, NA, NA, NA, NA, NA, NA, NA, NA),
SDC_PINGROUP(sdc3_clk, 0x204a, 14, 6),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 1a948c3f54b7..85858c1d56d0 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -22,6 +22,8 @@
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/log2.h>
+#include <linux/qcom_scm.h>
+#include <linux/io.h>
#include <linux/soc/qcom/irq.h>
@@ -60,6 +62,8 @@ struct msm_pinctrl {
struct irq_chip irq_chip;
int irq;
+ bool intr_target_use_scm;
+
raw_spinlock_t lock;
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
@@ -68,6 +72,7 @@ struct msm_pinctrl {
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
+ u32 phys_base[MAX_NR_TILES];
};
#define MSM_ACCESSOR(name) \
@@ -489,8 +494,8 @@ static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
val = msm_readl_ctl(pctrl, g);
- /* 0 = output, 1 = input */
- return val & BIT(g->oe_bit) ? 0 : 1;
+ return val & BIT(g->oe_bit) ? GPIO_LINE_DIRECTION_OUT :
+ GPIO_LINE_DIRECTION_IN;
}
static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -692,7 +697,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
pol = msm_readl_intr_cfg(pctrl, g);
pol ^= BIT(g->intr_polarity_bit);
- msm_writel_intr_cfg(val, pctrl, g);
+ msm_writel_intr_cfg(pol, pctrl, g);
val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
intstat = msm_readl_intr_status(pctrl, g);
@@ -882,11 +887,30 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
else
clear_bit(d->hwirq, pctrl->dual_edge_irqs);
- /* Route interrupts to application cpu */
- val = msm_readl_intr_target(pctrl, g);
- val &= ~(7 << g->intr_target_bit);
- val |= g->intr_target_kpss_val << g->intr_target_bit;
- msm_writel_intr_target(val, pctrl, g);
+ /* Route interrupts to application cpu.
+ * With intr_target_use_scm interrupts are routed to
+ * application cpu using scm calls.
+ */
+ if (pctrl->intr_target_use_scm) {
+ u32 addr = pctrl->phys_base[0] + g->intr_target_reg;
+ int ret;
+
+ qcom_scm_io_readl(addr, &val);
+
+ val &= ~(7 << g->intr_target_bit);
+ val |= g->intr_target_kpss_val << g->intr_target_bit;
+
+ ret = qcom_scm_io_writel(addr, val);
+ if (ret)
+ dev_err(pctrl->dev,
+ "Failed routing %lu interrupt to Apps proc",
+ d->hwirq);
+ } else {
+ val = msm_readl_intr_target(pctrl, g);
+ val &= ~(7 << g->intr_target_bit);
+ val |= g->intr_target_kpss_val << g->intr_target_bit;
+ msm_writel_intr_target(val, pctrl, g);
+ }
/* Update configuration for gpio.
* RAW_STATUS_EN is left on for all gpio irqs. Due to the
@@ -1010,6 +1034,29 @@ static void msm_gpio_irq_relres(struct irq_data *d)
module_put(gc->owner);
}
+static int msm_gpio_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *dest, bool force)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return irq_chip_set_affinity_parent(d, dest, force);
+
+ return 0;
+}
+
+static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return irq_chip_set_vcpu_affinity_parent(d, vcpu_info);
+
+ return 0;
+}
+
static void msm_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1108,6 +1155,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+ pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
+ pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
if (np) {
@@ -1240,6 +1289,9 @@ int msm_pinctrl_probe(struct platform_device *pdev,
pctrl->dev = &pdev->dev;
pctrl->soc = soc_data;
pctrl->chip = msm_gpio_template;
+ pctrl->intr_target_use_scm = of_device_is_compatible(
+ pctrl->dev->of_node,
+ "qcom,ipq8064-pinctrl");
raw_spin_lock_init(&pctrl->lock);
@@ -1252,9 +1304,12 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->regs[i]);
}
} else {
- pctrl->regs[0] = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pctrl->regs[0]))
return PTR_ERR(pctrl->regs[0]);
+
+ pctrl->phys_base[0] = res->start;
}
msm_pinctrl_setup_pm_reset(pctrl);
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index cf0e0dc42b84..9552851b96f1 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -26,8 +26,8 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7792 if ARCH_R8A7792
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
- select PINCTRL_PFC_R8A77950 if ARCH_R8A77950 || ARCH_R8A7795
- select PINCTRL_PFC_R8A77951 if ARCH_R8A77951 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77950 if ARCH_R8A77950
+ select PINCTRL_PFC_R8A77951 if ARCH_R8A77951
select PINCTRL_PFC_R8A77960 if ARCH_R8A77960
select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 82209116955b..a2e19efa26e3 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -726,6 +726,27 @@ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
#ifdef DEBUG
+#define SH_PFC_MAX_REGS 300
+#define SH_PFC_MAX_ENUMS 3000
+
+static unsigned int sh_pfc_errors __initdata = 0;
+static unsigned int sh_pfc_warnings __initdata = 0;
+static u32 *sh_pfc_regs __initdata = NULL;
+static u32 sh_pfc_num_regs __initdata = 0;
+static u16 *sh_pfc_enums __initdata = NULL;
+static u32 sh_pfc_num_enums __initdata = 0;
+
+#define sh_pfc_err(fmt, ...) \
+ do { \
+ pr_err("%s: " fmt, drvname, ##__VA_ARGS__); \
+ sh_pfc_errors++; \
+ } while (0)
+#define sh_pfc_warn(fmt, ...) \
+ do { \
+ pr_warn("%s: " fmt, drvname, ##__VA_ARGS__); \
+ sh_pfc_warnings++; \
+ } while (0)
+
static bool __init is0s(const u16 *enum_ids, unsigned int n)
{
unsigned int i;
@@ -737,77 +758,181 @@ static bool __init is0s(const u16 *enum_ids, unsigned int n)
return true;
}
-static unsigned int sh_pfc_errors __initdata = 0;
-static unsigned int sh_pfc_warnings __initdata = 0;
+static bool __init same_name(const char *a, const char *b)
+{
+ if (!a || !b)
+ return false;
+
+ return !strcmp(a, b);
+}
+
+static void __init sh_pfc_check_reg(const char *drvname, u32 reg)
+{
+ unsigned int i;
+
+ for (i = 0; i < sh_pfc_num_regs; i++)
+ if (reg == sh_pfc_regs[i]) {
+ sh_pfc_err("reg 0x%x conflict\n", reg);
+ return;
+ }
+
+ if (sh_pfc_num_regs == SH_PFC_MAX_REGS) {
+ pr_warn_once("%s: Please increase SH_PFC_MAX_REGS\n", drvname);
+ return;
+ }
+
+ sh_pfc_regs[sh_pfc_num_regs++] = reg;
+}
+
+static int __init sh_pfc_check_enum(const char *drvname, u16 enum_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sh_pfc_num_enums; i++) {
+ if (enum_id == sh_pfc_enums[i])
+ return -EINVAL;
+ }
+
+ if (sh_pfc_num_enums == SH_PFC_MAX_ENUMS) {
+ pr_warn_once("%s: Please increase SH_PFC_MAX_ENUMS\n", drvname);
+ return 0;
+ }
+
+ sh_pfc_enums[sh_pfc_num_enums++] = enum_id;
+ return 0;
+}
+
+static void __init sh_pfc_check_reg_enums(const char *drvname, u32 reg,
+ const u16 *enums, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ if (enums[i] && sh_pfc_check_enum(drvname, enums[i]))
+ sh_pfc_err("reg 0x%x enum_id %u conflict\n", reg,
+ enums[i]);
+ }
+}
+
+static void __init sh_pfc_check_pin(const struct sh_pfc_soc_info *info,
+ u32 reg, unsigned int pin)
+{
+ const char *drvname = info->name;
+ unsigned int i;
+
+ if (pin == SH_PFC_PIN_NONE)
+ return;
+
+ for (i = 0; i < info->nr_pins; i++) {
+ if (pin == info->pins[i].pin)
+ return;
+ }
+
+ sh_pfc_err("reg 0x%x: pin %u not found\n", reg, pin);
+}
static void __init sh_pfc_check_cfg_reg(const char *drvname,
const struct pinmux_cfg_reg *cfg_reg)
{
unsigned int i, n, rw, fw;
+ sh_pfc_check_reg(drvname, cfg_reg->reg);
+
if (cfg_reg->field_width) {
- /* Checked at build time */
- return;
+ n = cfg_reg->reg_width / cfg_reg->field_width;
+ /* Skip field checks (done at build time) */
+ goto check_enum_ids;
}
for (i = 0, n = 0, rw = 0; (fw = cfg_reg->var_field_width[i]); i++) {
- if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw)) {
- pr_warn("%s: reg 0x%x: reserved field [%u:%u] can be split to reduce table size\n",
- drvname, cfg_reg->reg, rw, rw + fw - 1);
- sh_pfc_warnings++;
- }
+ if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw))
+ sh_pfc_warn("reg 0x%x: reserved field [%u:%u] can be split to reduce table size\n",
+ cfg_reg->reg, rw, rw + fw - 1);
n += 1 << fw;
rw += fw;
}
- if (rw != cfg_reg->reg_width) {
- pr_err("%s: reg 0x%x: var_field_width declares %u instead of %u bits\n",
- drvname, cfg_reg->reg, rw, cfg_reg->reg_width);
- sh_pfc_errors++;
- }
+ if (rw != cfg_reg->reg_width)
+ sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
+ cfg_reg->reg, rw, cfg_reg->reg_width);
+
+ if (n != cfg_reg->nr_enum_ids)
+ sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
+ cfg_reg->reg, cfg_reg->nr_enum_ids, n);
+
+check_enum_ids:
+ sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
+}
+
+static void __init sh_pfc_check_drive_reg(const struct sh_pfc_soc_info *info,
+ const struct pinmux_drive_reg *drive)
+{
+ const char *drvname = info->name;
+ unsigned long seen = 0, mask;
+ unsigned int i;
+
+ sh_pfc_check_reg(info->name, drive->reg);
+ for (i = 0; i < ARRAY_SIZE(drive->fields); i++) {
+ const struct pinmux_drive_reg_field *field = &drive->fields[i];
+
+ if (!field->pin && !field->offset && !field->size)
+ continue;
+
+ mask = GENMASK(field->offset + field->size, field->offset);
+ if (mask & seen)
+ sh_pfc_err("drive_reg 0x%x: field %u overlap\n",
+ drive->reg, i);
+ seen |= mask;
- if (n != cfg_reg->nr_enum_ids) {
- pr_err("%s: reg 0x%x: enum_ids[] has %u instead of %u values\n",
- drvname, cfg_reg->reg, cfg_reg->nr_enum_ids, n);
- sh_pfc_errors++;
+ sh_pfc_check_pin(info, drive->reg, field->pin);
}
}
+static void __init sh_pfc_check_bias_reg(const struct sh_pfc_soc_info *info,
+ const struct pinmux_bias_reg *bias)
+{
+ unsigned int i;
+
+ sh_pfc_check_reg(info->name, bias->puen);
+ if (bias->pud)
+ sh_pfc_check_reg(info->name, bias->pud);
+ for (i = 0; i < ARRAY_SIZE(bias->pins); i++)
+ sh_pfc_check_pin(info, bias->puen, bias->pins[i]);
+}
+
static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
{
- const struct sh_pfc_function *func;
const char *drvname = info->name;
unsigned int *refcnts;
unsigned int i, j, k;
pr_info("Checking %s\n", drvname);
+ sh_pfc_num_regs = 0;
+ sh_pfc_num_enums = 0;
/* Check pins */
for (i = 0; i < info->nr_pins; i++) {
+ const struct sh_pfc_pin *pin = &info->pins[i];
+
+ if (!pin->name) {
+ sh_pfc_err("empty pin %u\n", i);
+ continue;
+ }
for (j = 0; j < i; j++) {
- if (!strcmp(info->pins[i].name, info->pins[j].name)) {
- pr_err("%s: pin %s/%s: name conflict\n",
- drvname, info->pins[i].name,
- info->pins[j].name);
- sh_pfc_errors++;
- }
+ const struct sh_pfc_pin *pin2 = &info->pins[j];
- if (info->pins[i].pin != (u16)-1 &&
- info->pins[i].pin == info->pins[j].pin) {
- pr_err("%s: pin %s/%s: pin %u conflict\n",
- drvname, info->pins[i].name,
- info->pins[j].name, info->pins[i].pin);
- sh_pfc_errors++;
- }
+ if (same_name(pin->name, pin2->name))
+ sh_pfc_err("pin %s: name conflict\n",
+ pin->name);
- if (info->pins[i].enum_id &&
- info->pins[i].enum_id == info->pins[j].enum_id) {
- pr_err("%s: pin %s/%s: enum_id %u conflict\n",
- drvname, info->pins[i].name,
- info->pins[j].name,
- info->pins[i].enum_id);
- sh_pfc_errors++;
- }
+ if (pin->pin != (u16)-1 && pin->pin == pin2->pin)
+ sh_pfc_err("pin %s/%s: pin %u conflict\n",
+ pin->name, pin2->name, pin->pin);
+
+ if (pin->enum_id && pin->enum_id == pin2->enum_id)
+ sh_pfc_err("pin %s/%s: enum_id %u conflict\n",
+ pin->name, pin2->name,
+ pin->enum_id);
}
}
@@ -817,45 +942,49 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
return;
for (i = 0; i < info->nr_functions; i++) {
- func = &info->functions[i];
+ const struct sh_pfc_function *func = &info->functions[i];
+
if (!func->name) {
- pr_err("%s: empty function %u\n", drvname, i);
- sh_pfc_errors++;
+ sh_pfc_err("empty function %u\n", i);
continue;
}
+ for (j = 0; j < i; j++) {
+ if (same_name(func->name, info->functions[j].name))
+ sh_pfc_err("function %s: name conflict\n",
+ func->name);
+ }
for (j = 0; j < func->nr_groups; j++) {
for (k = 0; k < info->nr_groups; k++) {
- if (info->groups[k].name &&
- !strcmp(func->groups[j],
- info->groups[k].name)) {
+ if (same_name(func->groups[j],
+ info->groups[k].name)) {
refcnts[k]++;
break;
}
}
- if (k == info->nr_groups) {
- pr_err("%s: function %s: group %s not found\n",
- drvname, func->name, func->groups[j]);
- sh_pfc_errors++;
- }
+ if (k == info->nr_groups)
+ sh_pfc_err("function %s: group %s not found\n",
+ func->name, func->groups[j]);
}
}
for (i = 0; i < info->nr_groups; i++) {
- if (!info->groups[i].name) {
- pr_err("%s: empty group %u\n", drvname, i);
- sh_pfc_errors++;
+ const struct sh_pfc_pin_group *group = &info->groups[i];
+
+ if (!group->name) {
+ sh_pfc_err("empty group %u\n", i);
continue;
}
- if (!refcnts[i]) {
- pr_err("%s: orphan group %s\n", drvname,
- info->groups[i].name);
- sh_pfc_errors++;
- } else if (refcnts[i] > 1) {
- pr_warn("%s: group %s referenced by %u functions\n",
- drvname, info->groups[i].name, refcnts[i]);
- sh_pfc_warnings++;
+ for (j = 0; j < i; j++) {
+ if (same_name(group->name, info->groups[j].name))
+ sh_pfc_err("group %s: name conflict\n",
+ group->name);
}
+ if (!refcnts[i])
+ sh_pfc_err("orphan group %s\n", group->name);
+ else if (refcnts[i] > 1)
+ sh_pfc_warn("group %s referenced by %u functions\n",
+ group->name, refcnts[i]);
}
kfree(refcnts);
@@ -863,12 +992,62 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
/* Check config register descriptions */
for (i = 0; info->cfg_regs && info->cfg_regs[i].reg; i++)
sh_pfc_check_cfg_reg(drvname, &info->cfg_regs[i]);
+
+ /* Check drive strength registers */
+ for (i = 0; info->drive_regs && info->drive_regs[i].reg; i++)
+ sh_pfc_check_drive_reg(info, &info->drive_regs[i]);
+
+ /* Check bias registers */
+ for (i = 0; info->bias_regs && info->bias_regs[i].puen; i++)
+ sh_pfc_check_bias_reg(info, &info->bias_regs[i]);
+
+ /* Check ioctrl registers */
+ for (i = 0; info->ioctrl_regs && info->ioctrl_regs[i].reg; i++)
+ sh_pfc_check_reg(drvname, info->ioctrl_regs[i].reg);
+
+ /* Check data registers */
+ for (i = 0; info->data_regs && info->data_regs[i].reg; i++) {
+ sh_pfc_check_reg(drvname, info->data_regs[i].reg);
+ sh_pfc_check_reg_enums(drvname, info->data_regs[i].reg,
+ info->data_regs[i].enum_ids,
+ info->data_regs[i].reg_width);
+ }
+
+#ifdef CONFIG_PINCTRL_SH_FUNC_GPIO
+ /* Check function GPIOs */
+ for (i = 0; i < info->nr_func_gpios; i++) {
+ const struct pinmux_func *func = &info->func_gpios[i];
+
+ if (!func->name) {
+ sh_pfc_err("empty function gpio %u\n", i);
+ continue;
+ }
+ for (j = 0; j < i; j++) {
+ if (same_name(func->name, info->func_gpios[j].name))
+ sh_pfc_err("func_gpio %s: name conflict\n",
+ func->name);
+ }
+ if (sh_pfc_check_enum(drvname, func->enum_id))
+ sh_pfc_err("%s enum_id %u conflict\n", func->name,
+ func->enum_id);
+ }
+#endif
}
static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
{
unsigned int i;
+ sh_pfc_regs = kcalloc(SH_PFC_MAX_REGS, sizeof(*sh_pfc_regs),
+ GFP_KERNEL);
+ if (!sh_pfc_regs)
+ return;
+
+ sh_pfc_enums = kcalloc(SH_PFC_MAX_ENUMS, sizeof(*sh_pfc_enums),
+ GFP_KERNEL);
+ if (!sh_pfc_enums)
+ goto free_regs;
+
pr_warn("Checking builtin pinmux tables\n");
for (i = 0; pdrv->id_table[i].name[0]; i++)
@@ -881,6 +1060,10 @@ static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
pr_warn("Detected %u errors and %u warnings\n", sh_pfc_errors,
sh_pfc_warnings);
+
+ kfree(sh_pfc_enums);
+free_regs:
+ kfree(sh_pfc_regs);
}
#else /* !DEBUG */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 8213e118aa40..9c6e931ae766 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -205,14 +205,11 @@ static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
for (k = 0; gpios[k] >= 0; k++) {
if (gpios[k] == offset)
- goto found;
+ return pfc->irqs[i];
}
}
return 0;
-
-found:
- return pfc->irqs[i];
}
static int gpio_pin_setup(struct sh_pfc_chip *chip)
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index b1a9611f46b3..50df9e084414 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -352,7 +352,7 @@ struct atlas7_gpio_chip {
int nbank;
raw_spinlock_t lock;
struct gpio_chip chip;
- struct atlas7_gpio_bank banks[0];
+ struct atlas7_gpio_bank banks[];
};
/**
diff --git a/drivers/pinctrl/sprd/Kconfig b/drivers/pinctrl/sprd/Kconfig
index b6c5479b58fb..eef35d01b770 100644
--- a/drivers/pinctrl/sprd/Kconfig
+++ b/drivers/pinctrl/sprd/Kconfig
@@ -4,9 +4,7 @@
#
config PINCTRL_SPRD
- bool "Spreadtrum pinctrl driver"
- depends on OF
- depends on ARCH_SPRD || COMPILE_TEST
+ tristate
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -15,7 +13,9 @@ config PINCTRL_SPRD
Say Y here to enable Spreadtrum pinctrl driver
config PINCTRL_SPRD_SC9860
- bool "Spreadtrum SC9860 pinctrl driver"
- depends on PINCTRL_SPRD
+ tristate "Spreadtrum SC9860 pinctrl driver"
+ depends on OF
+ depends on ARCH_SPRD || COMPILE_TEST
+ select PINCTRL_SPRD
help
Say Y here to enable Spreadtrum SC9860 pinctrl driver
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 157712ab05a8..48cbf2a2837f 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -464,9 +464,15 @@ static int sprd_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin_id,
case PIN_CONFIG_INPUT_ENABLE:
arg = (reg >> SLEEP_INPUT_SHIFT) & SLEEP_INPUT_MASK;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_OUTPUT_ENABLE:
arg = reg & SLEEP_OUTPUT_MASK;
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ if ((reg & SLEEP_OUTPUT) || (reg & SLEEP_INPUT))
+ return -EINVAL;
+
+ arg = 1;
+ break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = (reg >> DRIVE_STRENGTH_SHIFT) &
DRIVE_STRENGTH_MASK;
@@ -635,13 +641,23 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = SLEEP_INPUT_SHIFT;
}
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_OUTPUT_ENABLE:
if (is_sleep_config == true) {
- val |= SLEEP_OUTPUT;
+ if (arg > 0)
+ val |= SLEEP_OUTPUT;
+ else
+ val &= ~SLEEP_OUTPUT;
+
mask = SLEEP_OUTPUT_MASK;
shift = SLEEP_OUTPUT_SHIFT;
}
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ if (is_sleep_config == true) {
+ val = shift = 0;
+ mask = SLEEP_OUTPUT | SLEEP_INPUT;
+ }
+ break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (arg < 2 || arg > 60)
return -EINVAL;
@@ -1090,6 +1106,7 @@ int sprd_pinctrl_core_probe(struct platform_device *pdev,
return 0;
}
+EXPORT_SYMBOL_GPL(sprd_pinctrl_core_probe);
int sprd_pinctrl_remove(struct platform_device *pdev)
{
@@ -1098,6 +1115,7 @@ int sprd_pinctrl_remove(struct platform_device *pdev)
pinctrl_unregister(sprd_pctl->pctl);
return 0;
}
+EXPORT_SYMBOL_GPL(sprd_pinctrl_remove);
void sprd_pinctrl_shutdown(struct platform_device *pdev)
{
@@ -1112,6 +1130,7 @@ void sprd_pinctrl_shutdown(struct platform_device *pdev)
return;
pinctrl_select_state(pinctl, state);
}
+EXPORT_SYMBOL_GPL(sprd_pinctrl_shutdown);
MODULE_DESCRIPTION("SPREADTRUM Pin Controller Driver");
MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 2d5e0435af0a..a657cd829ce6 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -92,6 +92,7 @@ struct stm32_gpio_bank {
u32 bank_nr;
u32 bank_ioport_nr;
u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
+ u8 irq_type[STM32_GPIO_PINS_PER_BANK];
};
struct stm32_pinctrl {
@@ -283,9 +284,9 @@ static int stm32_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
stm32_pmx_get_mode(bank, pin, &mode, &alt);
if ((alt == 0) && (mode == 0))
- ret = 1;
+ ret = GPIO_LINE_DIRECTION_IN;
else if ((alt == 0) && (mode == 1))
- ret = 0;
+ ret = GPIO_LINE_DIRECTION_OUT;
else
ret = -EINVAL;
@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = {
.get_direction = stm32_gpio_get_direction,
};
+static void stm32_gpio_irq_trigger(struct irq_data *d)
+{
+ struct stm32_gpio_bank *bank = d->domain->host_data;
+ int level;
+
+ /* If level interrupt type then retrig */
+ level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
+ if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
+ (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
+ irq_chip_retrigger_hierarchy(d);
+}
+
+static void stm32_gpio_irq_eoi(struct irq_data *d)
+{
+ irq_chip_eoi_parent(d);
+ stm32_gpio_irq_trigger(d);
+};
+
+static int stm32_gpio_set_type(struct irq_data *d, unsigned int type)
+{
+ struct stm32_gpio_bank *bank = d->domain->host_data;
+ u32 parent_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ parent_type = type;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ parent_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ parent_type = IRQ_TYPE_EDGE_FALLING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bank->irq_type[d->hwirq] = type;
+
+ return irq_chip_set_type_parent(d, parent_type);
+};
+
static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
+static void stm32_gpio_irq_unmask(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ stm32_gpio_irq_trigger(d);
+}
+
static struct irq_chip stm32_gpio_irq_chip = {
.name = "stm32gpio",
- .irq_eoi = irq_chip_eoi_parent,
+ .irq_eoi = stm32_gpio_irq_eoi,
.irq_ack = irq_chip_ack_parent,
.irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_set_type = irq_chip_set_type_parent,
+ .irq_unmask = stm32_gpio_irq_unmask,
+ .irq_set_type = stm32_gpio_set_type,
.irq_set_wake = irq_chip_set_wake_parent,
.irq_request_resources = stm32_gpio_irq_request_resources,
.irq_release_resources = stm32_gpio_irq_release_resources,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index b35c3245ab3f..8e792f8e2dc9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/export.h>
@@ -1058,6 +1059,14 @@ static void sunxi_pinctrl_irq_ack_unmask(struct irq_data *d)
sunxi_pinctrl_irq_unmask(d);
}
+static int sunxi_pinctrl_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ u8 bank = d->hwirq / IRQ_PER_BANK;
+
+ return irq_set_irq_wake(pctl->irq[bank], on);
+}
+
static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
.name = "sunxi_pio_edge",
.irq_ack = sunxi_pinctrl_irq_ack,
@@ -1066,7 +1075,8 @@ static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
.irq_request_resources = sunxi_pinctrl_irq_request_resources,
.irq_release_resources = sunxi_pinctrl_irq_release_resources,
.irq_set_type = sunxi_pinctrl_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .irq_set_wake = sunxi_pinctrl_irq_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static struct irq_chip sunxi_pinctrl_level_irq_chip = {
@@ -1081,7 +1091,9 @@ static struct irq_chip sunxi_pinctrl_level_irq_chip = {
.irq_request_resources = sunxi_pinctrl_irq_request_resources,
.irq_release_resources = sunxi_pinctrl_irq_release_resources,
.irq_set_type = sunxi_pinctrl_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_EOI_THREADED |
+ .irq_set_wake = sunxi_pinctrl_irq_set_wake,
+ .flags = IRQCHIP_EOI_THREADED |
+ IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_EOI_IF_HANDLED,
};
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index cefbbb8d1a68..21661f6490d6 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -275,11 +275,57 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
+static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const struct tegra_pingroup *group;
+ u32 value;
+
+ if (!pmx->soc->sfsel_in_mux)
+ return 0;
+
+ group = &pmx->soc->groups[offset];
+
+ if (group->mux_reg < 0 || group->sfsel_bit < 0)
+ return -EINVAL;
+
+ value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
+ value &= ~BIT(group->sfsel_bit);
+ pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
+
+ return 0;
+}
+
+static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const struct tegra_pingroup *group;
+ u32 value;
+
+ if (!pmx->soc->sfsel_in_mux)
+ return;
+
+ group = &pmx->soc->groups[offset];
+
+ if (group->mux_reg < 0 || group->sfsel_bit < 0)
+ return;
+
+ value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
+ value |= BIT(group->sfsel_bit);
+ pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
+}
+
static const struct pinmux_ops tegra_pinmux_ops = {
.get_functions_count = tegra_pinctrl_get_funcs_count,
.get_function_name = tegra_pinctrl_get_func_name,
.get_function_groups = tegra_pinctrl_get_func_groups,
.set_mux = tegra_pinctrl_set_mux,
+ .gpio_request_enable = tegra_pinctrl_gpio_request_enable,
+ .gpio_disable_free = tegra_pinctrl_gpio_disable_free,
};
static int tegra_pinconf_reg(struct tegra_pmx *pmx,
@@ -689,12 +735,12 @@ const struct dev_pm_ops tegra_pinctrl_pm = {
.resume = &tegra_pinctrl_resume
};
-static bool gpio_node_has_range(const char *compatible)
+static bool tegra_pinctrl_gpio_node_has_range(struct tegra_pmx *pmx)
{
struct device_node *np;
bool has_prop = false;
- np = of_find_compatible_node(NULL, NULL, compatible);
+ np = of_find_compatible_node(NULL, NULL, pmx->soc->gpio_compatible);
if (!np)
return has_prop;
@@ -794,7 +840,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
tegra_pinctrl_clear_parked_bits(pmx);
- if (!gpio_node_has_range(pmx->soc->gpio_compatible))
+ if (pmx->soc->ngpios > 0 && !tegra_pinctrl_gpio_node_has_range(pmx))
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index 0fc82eea9cf1..fcad7f74c5a2 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -107,7 +107,8 @@ struct tegra_function {
* drvup, slwr, slwf, and drvtype parameters.
* @drv_bank: Drive fields register bank.
* @hsm_bit: High Speed Mode register bit.
- * @schmitt_bit: Scmitt register bit.
+ * @sfsel_bit: GPIO/SFIO selection register bit.
+ * @schmitt_bit: Schmitt register bit.
* @lpmd_bit: Low Power Mode register bit.
* @drvdn_bit: Drive Down register bit.
* @drvdn_width: Drive Down field width.
@@ -153,6 +154,7 @@ struct tegra_pingroup {
s32 ioreset_bit:6;
s32 rcv_sel_bit:6;
s32 hsm_bit:6;
+ s32 sfsel_bit:6;
s32 schmitt_bit:6;
s32 lpmd_bit:6;
s32 drvdn_bit:6;
@@ -192,6 +194,7 @@ struct tegra_pinctrl_soc_data {
bool hsm_in_mux;
bool schmitt_in_mux;
bool drvtype_in_mux;
+ bool sfsel_in_mux;
};
extern const struct dev_pm_ops tegra_pinctrl_pm;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c
index daf44cf240c9..2e0b5f7bb095 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra194.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c
@@ -24,17 +24,14 @@
/* Define unique ID for each pins */
enum pin_id {
- TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0 = 256,
- TEGRA_PIN_PEX_L5_RST_N_PGG1 = 257,
- TEGRA_PIN_NUM_GPIOS = 258,
+ TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0,
+ TEGRA_PIN_PEX_L5_RST_N_PGG1,
};
/* Table for pin descriptor */
static const struct pinctrl_pin_desc tegra194_pins[] = {
- PINCTRL_PIN(TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0,
- "TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0"),
- PINCTRL_PIN(TEGRA_PIN_PEX_L5_RST_N_PGG1,
- "TEGRA_PIN_PEX_L5_RST_N_PGG1"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0, "PEX_L5_CLKREQ_N_PGG0"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L5_RST_N_PGG1, "PEX_L5_RST_N_PGG1"),
};
static const unsigned int pex_l5_clkreq_n_pgg0_pins[] = {
@@ -59,6 +56,7 @@ enum tegra_mux_dt {
{ \
.name = #lid, \
}
+
static struct tegra_function tegra194_functions[] = {
TEGRA_PIN_FUNCTION(rsvd0),
TEGRA_PIN_FUNCTION(rsvd1),
@@ -70,7 +68,7 @@ static struct tegra_function tegra194_functions[] = {
#define DRV_PINGROUP_ENTRY_Y(r, drvdn_b, drvdn_w, drvup_b, \
drvup_w, slwr_b, slwr_w, slwf_b, \
slwf_w, bank) \
- .drv_reg = ((r)), \
+ .drv_reg = ((r)), \
.drv_bank = bank, \
.drvdn_bit = drvdn_b, \
.drvdn_width = drvdn_w, \
@@ -89,7 +87,7 @@ static struct tegra_function tegra194_functions[] = {
.hsm_bit = -1, \
.mux_bank = bank, \
.mux_bit = 0, \
- .pupd_reg = ((r)), \
+ .pupd_reg = ((r)), \
.pupd_bank = bank, \
.pupd_bit = 2, \
.tri_reg = ((r)), \
@@ -97,6 +95,7 @@ static struct tegra_function tegra194_functions[] = {
.tri_bit = 4, \
.einput_bit = e_input, \
.odrain_bit = e_od, \
+ .sfsel_bit = 10, \
.schmitt_bit = schmitt_b, \
.drvtype_bit = 13, \
.drv_reg = -1, \
@@ -109,20 +108,20 @@ static struct tegra_function tegra194_functions[] = {
#define PINGROUP(pg_name, f0, f1, f2, f3, r, bank, pupd, e_lpbk, \
e_input, e_lpdr, e_od, schmitt_b, drvtype, io_rail) \
- { \
- .name = #pg_name, \
- .pins = pg_name##_pins, \
- .npins = ARRAY_SIZE(pg_name##_pins), \
- .funcs = { \
- TEGRA_MUX_##f0, \
- TEGRA_MUX_##f1, \
- TEGRA_MUX_##f2, \
- TEGRA_MUX_##f3, \
- }, \
- PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_lpbk, \
- e_input, e_od, \
- schmitt_b, drvtype), \
- drive_##pg_name, \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
+ }, \
+ PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_lpbk, \
+ e_input, e_od, \
+ schmitt_b, drvtype), \
+ drive_##pg_name, \
}
static const struct tegra_pingroup tegra194_groups[] = {
@@ -133,7 +132,6 @@ static const struct tegra_pingroup tegra194_groups[] = {
};
static const struct tegra_pinctrl_soc_data tegra194_pinctrl = {
- .ngpios = TEGRA_PIN_NUM_GPIOS,
.pins = tegra194_pins,
.npins = ARRAY_SIZE(tegra194_pins),
.functions = tegra194_functions,
@@ -143,6 +141,7 @@ static const struct tegra_pinctrl_soc_data tegra194_pinctrl = {
.hsm_in_mux = true,
.schmitt_in_mux = true,
.drvtype_in_mux = true,
+ .sfsel_in_mux = true,
};
static int tegra194_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 57babf31e320..ade348b49b31 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -29,7 +29,7 @@ struct uniphier_pinctrl_reg_region {
struct list_head node;
unsigned int base;
unsigned int nregs;
- u32 vals[0];
+ u32 vals[];
};
struct uniphier_pinctrl_priv {
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index ea910a18b4d7..65b97e240196 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -486,8 +486,10 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
u32 val;
val = readl_relaxed(data->base + reg_dir);
- /* Return 0 == output, 1 == input */
- return !(val & BIT(bit));
+ if (val & BIT(bit))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 5f57282a28da..03ea5129ed0c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -7,7 +7,7 @@ config MFD_CROS_EC
tristate "Platform support for Chrome hardware (transitional)"
select CHROME_PLATFORMS
select CROS_EC
- select CONFIG_MFD_CROS_EC_DEV
+ select MFD_CROS_EC_DEV
depends on X86 || ARM || ARM64 || COMPILE_TEST
help
This is a transitional Kconfig option and will be removed after
@@ -214,6 +214,17 @@ config CROS_EC_SYSFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_sysfs.
+config CROS_EC_TYPEC
+ tristate "ChromeOS EC Type-C Connector Control"
+ depends on MFD_CROS_EC_DEV && TYPEC
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for accessing Type C connector
+ information from the Chrome OS EC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cros_ec_typec.
+
config CROS_USBPD_LOGGER
tristate "Logging driver for USB PD charger"
depends on CHARGER_CROS_USBPD
@@ -226,6 +237,20 @@ config CROS_USBPD_LOGGER
To compile this driver as a module, choose M here: the
module will be called cros_usbpd_logger.
+config CROS_USBPD_NOTIFY
+ tristate "ChromeOS Type-C power delivery event notifier"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for Type-C PD event notifications
+ from the ChromeOS EC. On ACPI platorms this driver will bind to the
+ GOOG0003 ACPI device, and on platforms which don't have this device it
+ will get initialized on ECs which support the feature
+ EC_FEATURE_USB_PD.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_usbpd_notify.
+
source "drivers/platform/chrome/wilco_ec/Kconfig"
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index aacd5920d8a1..41baccba033f 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CROS_EC_ISHTP) += cros_ec_ishtp.o
obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
+obj-$(CONFIG_CROS_EC_TYPEC) += cros_ec_typec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
@@ -19,8 +20,10 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
+obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
+obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o
obj-$(CONFIG_WILCO_EC) += wilco_ec/
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 4f3651fcd9fe..472a03daa869 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -103,7 +103,7 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
pr_debug("%d-%02x is probed at %02x\n",
adapter->nr, info->addr, dummy->addr);
i2c_unregister_device(dummy);
- client = i2c_new_device(adapter, info);
+ client = i2c_new_client_device(adapter, info);
}
}
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 6fc8f2c3ac51..3104680b7485 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -120,7 +120,7 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
/* For now, report failure to transition to S0ix with a warning. */
if (ret >= 0 && ec_dev->host_sleep_v1 &&
@@ -138,6 +138,24 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
return ret;
}
+static int cros_ec_ready_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec_dev = container_of(nb, struct cros_ec_device,
+ notifier_ready);
+ u32 host_event = cros_ec_get_host_event(ec_dev);
+
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_INTERFACE_READY)) {
+ mutex_lock(&ec_dev->lock);
+ cros_ec_query_all(ec_dev);
+ mutex_unlock(&ec_dev->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
/**
* cros_ec_register() - Register a new ChromeOS EC, using the provided info.
* @ec_dev: Device to register.
@@ -237,6 +255,18 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
dev_dbg(ec_dev->dev, "Error %d clearing sleep event to ec",
err);
+ if (ec_dev->mkbp_event_supported) {
+ /*
+ * Register the notifier for EC_HOST_EVENT_INTERFACE_READY
+ * event.
+ */
+ ec_dev->notifier_ready.notifier_call = cros_ec_ready_event;
+ err = blocking_notifier_chain_register(&ec_dev->event_notifier,
+ &ec_dev->notifier_ready);
+ if (err)
+ return err;
+ }
+
dev_info(dev, "Chrome EC device registered\n");
return 0;
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index c65e70bc168d..e0bce869c49a 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -48,7 +48,7 @@ struct ec_event {
struct list_head node;
size_t size;
u8 event_type;
- u8 data[0];
+ u8 data[];
};
static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
@@ -301,7 +301,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
}
s_cmd->command += ec->cmd_offset;
- ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index b4c110c5fee0..b59180bff5a3 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -116,7 +116,7 @@ static int get_lightbar_version(struct cros_ec_dev *ec,
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_VERSION;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
ret = 0;
goto exit;
@@ -193,15 +193,10 @@ static ssize_t brightness_store(struct device *dev,
if (ret)
goto exit;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto exit;
- }
-
ret = count;
exit:
kfree(msg);
@@ -258,13 +253,10 @@ static ssize_t led_rgb_store(struct device *dev, struct device_attribute *attr,
goto exit;
}
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS)
- goto exit;
-
i = 0;
ok = 1;
}
@@ -305,14 +297,13 @@ static ssize_t sequence_show(struct device *dev,
if (ret)
goto exit;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- goto exit;
-
- if (msg->result != EC_RES_SUCCESS) {
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
ret = scnprintf(buf, PAGE_SIZE,
"ERROR: EC returned %d\n", msg->result);
goto exit;
+ } else if (ret < 0) {
+ goto exit;
}
resp = (struct ec_response_lightbar *)msg->data;
@@ -344,13 +335,10 @@ static int lb_send_empty_cmd(struct cros_ec_dev *ec, uint8_t cmd)
if (ret)
goto error;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto error;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto error;
- }
+
ret = 0;
error:
kfree(msg);
@@ -377,13 +365,10 @@ static int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable)
if (ret)
goto error;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto error;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto error;
- }
+
ret = 0;
error:
kfree(msg);
@@ -425,15 +410,10 @@ static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
if (ret)
goto exit;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto exit;
- }
-
ret = count;
exit:
kfree(msg);
@@ -487,13 +467,9 @@ static ssize_t program_store(struct device *dev, struct device_attribute *attr,
*/
msg->outsize = count + extra_bytes;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto exit;
- }
ret = count;
exit:
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 3cfa643f1d07..3e745e0fe092 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -553,7 +553,10 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer);
* replied with success status. It's not necessary to check msg->result when
* using this function.
*
- * Return: The number of bytes transferred on success or negative error code.
+ * Return:
+ * >=0 - The number of bytes transferred
+ * -ENOTSUPP - Operation not supported
+ * -EPROTO - Protocol error
*/
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
@@ -563,6 +566,10 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret < 0) {
dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret);
+ } else if (msg->result == EC_RES_INVALID_VERSION) {
+ dev_dbg(ec_dev->dev, "Command invalid version (err:%d)\n",
+ msg->result);
+ return -ENOTSUPP;
} else if (msg->result != EC_RES_SUCCESS) {
dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result);
return -EPROTO;
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index dbc3f5523b83..7e8629e3db74 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -44,6 +44,8 @@ struct cros_ec_rpmsg {
struct completion xfer_ack;
struct work_struct host_event_work;
struct rpmsg_endpoint *ept;
+ bool has_pending_host_event;
+ bool probe_done;
};
/**
@@ -177,7 +179,14 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
memcpy(ec_dev->din, resp->data, len);
complete(&ec_rpmsg->xfer_ack);
} else if (resp->type == HOST_EVENT_MARK) {
- schedule_work(&ec_rpmsg->host_event_work);
+ /*
+ * If the host event is sent before cros_ec_register is
+ * finished, queue the host event.
+ */
+ if (ec_rpmsg->probe_done)
+ schedule_work(&ec_rpmsg->host_event_work);
+ else
+ ec_rpmsg->has_pending_host_event = true;
} else {
dev_warn(ec_dev->dev, "rpmsg received invalid type = %d",
resp->type);
@@ -240,6 +249,11 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
return ret;
}
+ ec_rpmsg->probe_done = true;
+
+ if (ec_rpmsg->has_pending_host_event)
+ schedule_work(&ec_rpmsg->host_event_work);
+
return 0;
}
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
index 79fefd3bb0fa..9c4af76a9956 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -50,42 +50,19 @@ static int cros_ec_sensorhub_register(struct device *dev,
struct cros_ec_sensorhub *sensorhub)
{
int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
+ struct cros_ec_command *msg = sensorhub->msg;
struct cros_ec_dev *ec = sensorhub->ec;
- struct ec_params_motion_sense *params;
- struct ec_response_motion_sense *resp;
- struct cros_ec_command *msg;
- int ret, i, sensor_num;
+ int ret, i;
char *name;
- sensor_num = cros_ec_get_sensor_count(ec);
- if (sensor_num < 0) {
- dev_err(dev,
- "Unable to retrieve sensor information (err:%d)\n",
- sensor_num);
- return sensor_num;
- }
-
- if (sensor_num == 0) {
- dev_err(dev, "Zero sensors reported.\n");
- return -EINVAL;
- }
-
- /* Prepare a message to send INFO command to each sensor. */
- msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
- GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
msg->version = 1;
- msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
- msg->outsize = sizeof(*params);
- msg->insize = sizeof(*resp);
- params = (struct ec_params_motion_sense *)msg->data;
- resp = (struct ec_response_motion_sense *)msg->data;
+ msg->insize = sizeof(struct ec_response_motion_sense);
+ msg->outsize = sizeof(struct ec_params_motion_sense);
- for (i = 0; i < sensor_num; i++) {
- params->cmd = MOTIONSENSE_CMD_INFO;
- params->info.sensor_num = i;
+ for (i = 0; i < sensorhub->sensor_num; i++) {
+ sensorhub->params->cmd = MOTIONSENSE_CMD_INFO;
+ sensorhub->params->info.sensor_num = i;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
@@ -94,7 +71,7 @@ static int cros_ec_sensorhub_register(struct device *dev,
continue;
}
- switch (resp->info.type) {
+ switch (sensorhub->resp->info.type) {
case MOTIONSENSE_TYPE_ACCEL:
name = "cros-ec-accel";
break;
@@ -117,15 +94,16 @@ static int cros_ec_sensorhub_register(struct device *dev,
name = "cros-ec-activity";
break;
default:
- dev_warn(dev, "unknown type %d\n", resp->info.type);
+ dev_warn(dev, "unknown type %d\n",
+ sensorhub->resp->info.type);
continue;
}
ret = cros_ec_sensorhub_allocate_sensor(dev, name, i);
if (ret)
- goto error;
+ return ret;
- sensor_type[resp->info.type]++;
+ sensor_type[sensorhub->resp->info.type]++;
}
if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
@@ -137,42 +115,96 @@ static int cros_ec_sensorhub_register(struct device *dev,
"cros-ec-lid-angle",
0);
if (ret)
- goto error;
+ return ret;
}
- kfree(msg);
return 0;
-
-error:
- kfree(msg);
- return ret;
}
static int cros_ec_sensorhub_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
struct cros_ec_sensorhub *data;
- int ret;
- int i;
+ struct cros_ec_command *msg;
+ int ret, i, sensor_num;
+
+ msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) +
+ max((u16)sizeof(struct ec_params_motion_sense),
+ ec->ec_dev->max_response), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->ec = dev_get_drvdata(dev->parent);
+ mutex_init(&data->cmd_lock);
+
+ data->dev = dev;
+ data->ec = ec;
+ data->msg = msg;
+ data->params = (struct ec_params_motion_sense *)msg->data;
+ data->resp = (struct ec_response_motion_sense *)msg->data;
+
dev_set_drvdata(dev, data);
/* Check whether this EC is a sensor hub. */
- if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) {
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) {
+ sensor_num = cros_ec_get_sensor_count(ec);
+ if (sensor_num < 0) {
+ dev_err(dev,
+ "Unable to retrieve sensor information (err:%d)\n",
+ sensor_num);
+ return sensor_num;
+ }
+ if (sensor_num == 0) {
+ dev_err(dev, "Zero sensors reported.\n");
+ return -EINVAL;
+ }
+ data->sensor_num = sensor_num;
+
+ /*
+ * Prepare the ring handler before enumering the
+ * sensors.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ ret = cros_ec_sensorhub_ring_allocate(data);
+ if (ret)
+ return ret;
+ }
+
+ /* Enumerate the sensors.*/
ret = cros_ec_sensorhub_register(dev, data);
if (ret)
return ret;
+
+ /*
+ * When the EC does not have a FIFO, the sensors will query
+ * their data themselves via sysfs or a software trigger.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ ret = cros_ec_sensorhub_ring_add(data);
+ if (ret)
+ return ret;
+ /*
+ * The msg and its data is not under the control of the
+ * ring handler.
+ */
+ return devm_add_action_or_reset(dev,
+ cros_ec_sensorhub_ring_remove,
+ data);
+ }
+
} else {
/*
* If the device has sensors but does not claim to
* be a sensor hub, we are in legacy mode.
*/
- for (i = 0; i < 2; i++) {
+ data->sensor_num = 2;
+ for (i = 0; i < data->sensor_num; i++) {
ret = cros_ec_sensorhub_allocate_sensor(dev,
"cros-ec-accel-legacy", i);
if (ret)
@@ -180,12 +212,47 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
}
}
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * When the EC is suspending, we must stop sending interrupt,
+ * we may use the same interrupt line for waking up the device.
+ * Tell the EC to stop sending non-interrupt event on the iio ring.
+ */
+static int cros_ec_sensorhub_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev);
+ struct cros_ec_dev *ec = sensorhub->ec;
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
return 0;
}
+static int cros_ec_sensorhub_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev);
+ struct cros_ec_dev *ec = sensorhub->ec;
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops,
+ cros_ec_sensorhub_suspend,
+ cros_ec_sensorhub_resume);
+
static struct platform_driver cros_ec_sensorhub_driver = {
.driver = {
.name = DRV_NAME,
+ .pm = &cros_ec_sensorhub_pm_ops,
},
.probe = cros_ec_sensorhub_probe,
};
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
new file mode 100644
index 000000000000..24e48d96ed76
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Chrome OS EC Sensor hub FIFO.
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+
+/* Precision of fixed point for the m values from the filter */
+#define M_PRECISION BIT(23)
+
+/* Only activate the filter once we have at least this many elements. */
+#define TS_HISTORY_THRESHOLD 8
+
+/*
+ * If we don't have any history entries for this long, empty the filter to
+ * make sure there are no big discontinuities.
+ */
+#define TS_HISTORY_BORED_US 500000
+
+/* To measure by how much the filter is overshooting, if it happens. */
+#define FUTURE_TS_ANALYTICS_COUNT_MAX 100
+
+static inline int
+cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub,
+ struct cros_ec_sensors_ring_sample *sample)
+{
+ cros_ec_sensorhub_push_data_cb_t cb;
+ int id = sample->sensor_id;
+ struct iio_dev *indio_dev;
+
+ if (id >= sensorhub->sensor_num)
+ return -EINVAL;
+
+ cb = sensorhub->push_data[id].push_data_cb;
+ if (!cb)
+ return 0;
+
+ indio_dev = sensorhub->push_data[id].indio_dev;
+
+ if (sample->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
+ return 0;
+
+ return cb(indio_dev, sample->vector, sample->timestamp);
+}
+
+/**
+ * cros_ec_sensorhub_register_push_data() - register the callback to the hub.
+ *
+ * @sensorhub : Sensor Hub object
+ * @sensor_num : The sensor the caller is interested in.
+ * @indio_dev : The iio device to use when a sample arrives.
+ * @cb : The callback to call when a sample arrives.
+ *
+ * The callback cb will be used by cros_ec_sensorhub_ring to distribute events
+ * from the EC.
+ *
+ * Return: 0 when callback is registered.
+ * EINVAL is the sensor number is invalid or the slot already used.
+ */
+int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t cb)
+{
+ if (sensor_num >= sensorhub->sensor_num)
+ return -EINVAL;
+ if (sensorhub->push_data[sensor_num].indio_dev)
+ return -EINVAL;
+
+ sensorhub->push_data[sensor_num].indio_dev = indio_dev;
+ sensorhub->push_data[sensor_num].push_data_cb = cb;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensorhub_register_push_data);
+
+void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num)
+{
+ sensorhub->push_data[sensor_num].indio_dev = NULL;
+ sensorhub->push_data[sensor_num].push_data_cb = NULL;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data);
+
+/**
+ * cros_ec_sensorhub_ring_fifo_enable() - Enable or disable interrupt generation
+ * for FIFO events.
+ * @sensorhub: Sensor Hub object
+ * @on: true when events are requested.
+ *
+ * To be called before sleeping or when noone is listening.
+ * Return: 0 on success, or an error when we can not communicate with the EC.
+ *
+ */
+int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
+ bool on)
+{
+ int ret, i;
+
+ mutex_lock(&sensorhub->cmd_lock);
+ if (sensorhub->tight_timestamps)
+ for (i = 0; i < sensorhub->sensor_num; i++)
+ sensorhub->batch_state[i].last_len = 0;
+
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INT_ENABLE;
+ sensorhub->params->fifo_int_enable.enable = on;
+
+ sensorhub->msg->outsize = sizeof(struct ec_params_motion_sense);
+ sensorhub->msg->insize = sizeof(struct ec_response_motion_sense);
+
+ ret = cros_ec_cmd_xfer_status(sensorhub->ec->ec_dev, sensorhub->msg);
+ mutex_unlock(&sensorhub->cmd_lock);
+
+ /* We expect to receive a payload of 4 bytes, ignore. */
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int cros_ec_sensor_ring_median_cmp(const void *pv1, const void *pv2)
+{
+ s64 v1 = *(s64 *)pv1;
+ s64 v2 = *(s64 *)pv2;
+
+ if (v1 > v2)
+ return 1;
+ else if (v1 < v2)
+ return -1;
+ else
+ return 0;
+}
+
+/*
+ * cros_ec_sensor_ring_median: Gets median of an array of numbers
+ *
+ * For now it's implemented using an inefficient > O(n) sort then return
+ * the middle element. A more optimal method would be something like
+ * quickselect, but given that n = 64 we can probably live with it in the
+ * name of clarity.
+ *
+ * Warning: the input array gets modified (sorted)!
+ */
+static s64 cros_ec_sensor_ring_median(s64 *array, size_t length)
+{
+ sort(array, length, sizeof(s64), cros_ec_sensor_ring_median_cmp, NULL);
+ return array[length / 2];
+}
+
+/*
+ * IRQ Timestamp Filtering
+ *
+ * Lower down in cros_ec_sensor_ring_process_event(), for each sensor event
+ * we have to calculate it's timestamp in the AP timebase. There are 3 time
+ * points:
+ * a - EC timebase, sensor event
+ * b - EC timebase, IRQ
+ * c - AP timebase, IRQ
+ * a' - what we want: sensor even in AP timebase
+ *
+ * While a and b are recorded at accurate times (due to the EC real time
+ * nature); c is pretty untrustworthy, even though it's recorded the
+ * first thing in ec_irq_handler(). There is a very good change we'll get
+ * added lantency due to:
+ * other irqs
+ * ddrfreq
+ * cpuidle
+ *
+ * Normally a' = c - b + a, but if we do that naive math any jitter in c
+ * will get coupled in a', which we don't want. We want a function
+ * a' = cros_ec_sensor_ring_ts_filter(a) which will filter out outliers in c.
+ *
+ * Think of a graph of AP time(b) on the y axis vs EC time(c) on the x axis.
+ * The slope of the line won't be exactly 1, there will be some clock drift
+ * between the 2 chips for various reasons (mechanical stress, temperature,
+ * voltage). We need to extrapolate values for a future x, without trusting
+ * recent y values too much.
+ *
+ * We use a median filter for the slope, then another median filter for the
+ * y-intercept to calculate this function:
+ * dx[n] = x[n-1] - x[n]
+ * dy[n] = x[n-1] - x[n]
+ * m[n] = dy[n] / dx[n]
+ * median_m = median(m[n-k:n])
+ * error[i] = y[n-i] - median_m * x[n-i]
+ * median_error = median(error[:k])
+ * predicted_y = median_m * x + median_error
+ *
+ * Implementation differences from above:
+ * - Redefined y to be actually c - b, this gives us a lot more precision
+ * to do the math. (c-b)/b variations are more obvious than c/b variations.
+ * - Since we don't have floating point, any operations involving slope are
+ * done using fixed point math (*M_PRECISION)
+ * - Since x and y grow with time, we keep zeroing the graph (relative to
+ * the last sample), this way math involving *x[n-i] will not overflow
+ * - EC timestamps are kept in us, it improves the slope calculation precision
+ */
+
+/**
+ * cros_ec_sensor_ring_ts_filter_update() - Update filter history.
+ *
+ * @state: Filter information.
+ * @b: IRQ timestamp, EC timebase (us)
+ * @c: IRQ timestamp, AP timebase (ns)
+ *
+ * Given a new IRQ timestamp pair (EC and AP timebases), add it to the filter
+ * history.
+ */
+static void
+cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
+ *state,
+ s64 b, s64 c)
+{
+ s64 x, y;
+ s64 dx, dy;
+ s64 m; /* stored as *M_PRECISION */
+ s64 *m_history_copy = state->temp_buf;
+ s64 *error = state->temp_buf;
+ int i;
+
+ /* we trust b the most, that'll be our independent variable */
+ x = b;
+ /* y is the offset between AP and EC times, in ns */
+ y = c - b * 1000;
+
+ dx = (state->x_history[0] + state->x_offset) - x;
+ if (dx == 0)
+ return; /* we already have this irq in the history */
+ dy = (state->y_history[0] + state->y_offset) - y;
+ m = div64_s64(dy * M_PRECISION, dx);
+
+ /* Empty filter if we haven't seen any action in a while. */
+ if (-dx > TS_HISTORY_BORED_US)
+ state->history_len = 0;
+
+ /* Move everything over, also update offset to all absolute coords .*/
+ for (i = state->history_len - 1; i >= 1; i--) {
+ state->x_history[i] = state->x_history[i - 1] + dx;
+ state->y_history[i] = state->y_history[i - 1] + dy;
+
+ state->m_history[i] = state->m_history[i - 1];
+ /*
+ * Also use the same loop to copy m_history for future
+ * median extraction.
+ */
+ m_history_copy[i] = state->m_history[i - 1];
+ }
+
+ /* Store the x and y, but remember offset is actually last sample. */
+ state->x_offset = x;
+ state->y_offset = y;
+ state->x_history[0] = 0;
+ state->y_history[0] = 0;
+
+ state->m_history[0] = m;
+ m_history_copy[0] = m;
+
+ if (state->history_len < CROS_EC_SENSORHUB_TS_HISTORY_SIZE)
+ state->history_len++;
+
+ /* Precalculate things for the filter. */
+ if (state->history_len > TS_HISTORY_THRESHOLD) {
+ state->median_m =
+ cros_ec_sensor_ring_median(m_history_copy,
+ state->history_len - 1);
+
+ /*
+ * Calculate y-intercepts as if m_median is the slope and
+ * points in the history are on the line. median_error will
+ * still be in the offset coordinate system.
+ */
+ for (i = 0; i < state->history_len; i++)
+ error[i] = state->y_history[i] -
+ div_s64(state->median_m * state->x_history[i],
+ M_PRECISION);
+ state->median_error =
+ cros_ec_sensor_ring_median(error, state->history_len);
+ } else {
+ state->median_m = 0;
+ state->median_error = 0;
+ }
+}
+
+/**
+ * cros_ec_sensor_ring_ts_filter() - Translate EC timebase timestamp to AP
+ * timebase
+ *
+ * @state: filter information.
+ * @x: any ec timestamp (us):
+ *
+ * cros_ec_sensor_ring_ts_filter(a) => a' event timestamp, AP timebase
+ * cros_ec_sensor_ring_ts_filter(b) => calculated timestamp when the EC IRQ
+ * should have happened on the AP, with low jitter
+ *
+ * Note: The filter will only activate once state->history_len goes
+ * over TS_HISTORY_THRESHOLD. Otherwise it'll just do the naive c - b + a
+ * transform.
+ *
+ * How to derive the formula, starting from:
+ * f(x) = median_m * x + median_error
+ * That's the calculated AP - EC offset (at the x point in time)
+ * Undo the coordinate system transform:
+ * f(x) = median_m * (x - x_offset) + median_error + y_offset
+ * Remember to undo the "y = c - b * 1000" modification:
+ * f(x) = median_m * (x - x_offset) + median_error + y_offset + x * 1000
+ *
+ * Return: timestamp in AP timebase (ns)
+ */
+static s64
+cros_ec_sensor_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state,
+ s64 x)
+{
+ return div_s64(state->median_m * (x - state->x_offset), M_PRECISION)
+ + state->median_error + state->y_offset + x * 1000;
+}
+
+/*
+ * Since a and b were originally 32 bit values from the EC,
+ * they overflow relatively often, casting is not enough, so we need to
+ * add an offset.
+ */
+static void
+cros_ec_sensor_ring_fix_overflow(s64 *ts,
+ const s64 overflow_period,
+ struct cros_ec_sensors_ec_overflow_state
+ *state)
+{
+ s64 adjust;
+
+ *ts += state->offset;
+ if (abs(state->last - *ts) > (overflow_period / 2)) {
+ adjust = state->last > *ts ? overflow_period : -overflow_period;
+ state->offset += adjust;
+ *ts += adjust;
+ }
+ state->last = *ts;
+}
+
+static void
+cros_ec_sensor_ring_check_for_past_timestamp(struct cros_ec_sensorhub
+ *sensorhub,
+ struct cros_ec_sensors_ring_sample
+ *sample)
+{
+ const u8 sensor_id = sample->sensor_id;
+
+ /* If this event is earlier than one we saw before... */
+ if (sensorhub->batch_state[sensor_id].newest_sensor_event >
+ sample->timestamp)
+ /* mark it for spreading. */
+ sample->timestamp =
+ sensorhub->batch_state[sensor_id].last_ts;
+ else
+ sensorhub->batch_state[sensor_id].newest_sensor_event =
+ sample->timestamp;
+}
+
+/**
+ * cros_ec_sensor_ring_process_event() - Process one EC FIFO event
+ *
+ * @sensorhub: Sensor Hub object.
+ * @fifo_info: FIFO information from the EC (includes b point, EC timebase).
+ * @fifo_timestamp: EC IRQ, kernel timebase (aka c).
+ * @current_timestamp: calculated event timestamp, kernel timebase (aka a').
+ * @in: incoming FIFO event from EC (includes a point, EC timebase).
+ * @out: outgoing event to user space (includes a').
+ *
+ * Process one EC event, add it in the ring if necessary.
+ *
+ * Return: true if out event has been populated.
+ */
+static bool
+cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
+ const struct ec_response_motion_sense_fifo_info
+ *fifo_info,
+ const ktime_t fifo_timestamp,
+ ktime_t *current_timestamp,
+ struct ec_response_motion_sensor_data *in,
+ struct cros_ec_sensors_ring_sample *out)
+{
+ const s64 now = cros_ec_get_time_ns();
+ int axis, async_flags;
+
+ /* Do not populate the filter based on asynchronous events. */
+ async_flags = in->flags &
+ (MOTIONSENSE_SENSOR_FLAG_ODR | MOTIONSENSE_SENSOR_FLAG_FLUSH);
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP && !async_flags) {
+ s64 a = in->timestamp;
+ s64 b = fifo_info->timestamp;
+ s64 c = fifo_timestamp;
+
+ cros_ec_sensor_ring_fix_overflow(&a, 1LL << 32,
+ &sensorhub->overflow_a);
+ cros_ec_sensor_ring_fix_overflow(&b, 1LL << 32,
+ &sensorhub->overflow_b);
+
+ if (sensorhub->tight_timestamps) {
+ cros_ec_sensor_ring_ts_filter_update(
+ &sensorhub->filter, b, c);
+ *current_timestamp = cros_ec_sensor_ring_ts_filter(
+ &sensorhub->filter, a);
+ } else {
+ s64 new_timestamp;
+
+ /*
+ * Disable filtering since we might add more jitter
+ * if b is in a random point in time.
+ */
+ new_timestamp = fifo_timestamp -
+ fifo_info->timestamp * 1000 +
+ in->timestamp * 1000;
+ /*
+ * The timestamp can be stale if we had to use the fifo
+ * info timestamp.
+ */
+ if (new_timestamp - *current_timestamp > 0)
+ *current_timestamp = new_timestamp;
+ }
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
+ if (sensorhub->tight_timestamps) {
+ sensorhub->batch_state[in->sensor_num].last_len = 0;
+ sensorhub->batch_state[in->sensor_num].penul_len = 0;
+ }
+ /*
+ * ODR change is only useful for the sensor_ring, it does not
+ * convey information to clients.
+ */
+ return false;
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
+ out->sensor_id = in->sensor_num;
+ out->timestamp = *current_timestamp;
+ out->flag = in->flags;
+ if (sensorhub->tight_timestamps)
+ sensorhub->batch_state[out->sensor_id].last_len = 0;
+ /*
+ * No other payload information provided with
+ * flush ack.
+ */
+ return true;
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP)
+ /* If we just have a timestamp, skip this entry. */
+ return false;
+
+ /* Regular sample */
+ out->sensor_id = in->sensor_num;
+ if (*current_timestamp - now > 0) {
+ /*
+ * This fix is needed to overcome the timestamp filter putting
+ * events in the future.
+ */
+ sensorhub->future_timestamp_total_ns +=
+ *current_timestamp - now;
+ if (++sensorhub->future_timestamp_count ==
+ FUTURE_TS_ANALYTICS_COUNT_MAX) {
+ s64 avg = div_s64(sensorhub->future_timestamp_total_ns,
+ sensorhub->future_timestamp_count);
+ dev_warn_ratelimited(sensorhub->dev,
+ "100 timestamps in the future, %lldns shaved on average\n",
+ avg);
+ sensorhub->future_timestamp_count = 0;
+ sensorhub->future_timestamp_total_ns = 0;
+ }
+ out->timestamp = now;
+ } else {
+ out->timestamp = *current_timestamp;
+ }
+
+ out->flag = in->flags;
+ for (axis = 0; axis < 3; axis++)
+ out->vector[axis] = in->data[axis];
+
+ if (sensorhub->tight_timestamps)
+ cros_ec_sensor_ring_check_for_past_timestamp(sensorhub, out);
+ return true;
+}
+
+/*
+ * cros_ec_sensor_ring_spread_add: Calculate proper timestamps then add to
+ * ringbuffer.
+ *
+ * This is the new spreading code, assumes every sample's timestamp
+ * preceeds the sample. Run if tight_timestamps == true.
+ *
+ * Sometimes the EC receives only one interrupt (hence timestamp) for
+ * a batch of samples. Only the first sample will have the correct
+ * timestamp. So we must interpolate the other samples.
+ * We use the previous batch timestamp and our current batch timestamp
+ * as a way to calculate period, then spread the samples evenly.
+ *
+ * s0 int, 0ms
+ * s1 int, 10ms
+ * s2 int, 20ms
+ * 30ms point goes by, no interrupt, previous one is still asserted
+ * downloading s2 and s3
+ * s3 sample, 20ms (incorrect timestamp)
+ * s4 int, 40ms
+ *
+ * The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch
+ * has 2 samples in them, we adjust the timestamp of s3.
+ * s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have
+ * been part of a bigger batch things would have gotten a little
+ * more complicated.
+ *
+ * Note: we also assume another sensor sample doesn't break up a batch
+ * in 2 or more partitions. Example, there can't ever be a sync sensor
+ * in between S2 and S3. This simplifies the following code.
+ */
+static void
+cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub,
+ unsigned long sensor_mask,
+ struct cros_ec_sensors_ring_sample *last_out)
+{
+ struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start;
+ int id;
+
+ for_each_set_bit(id, &sensor_mask, sensorhub->sensor_num) {
+ for (batch_start = sensorhub->ring; batch_start < last_out;
+ batch_start = next_batch_start) {
+ /*
+ * For each batch (where all samples have the same
+ * timestamp).
+ */
+ int batch_len, sample_idx;
+ struct cros_ec_sensors_ring_sample *batch_end =
+ batch_start;
+ struct cros_ec_sensors_ring_sample *s;
+ s64 batch_timestamp = batch_start->timestamp;
+ s64 sample_period;
+
+ /*
+ * Skip over batches that start with the sensor types
+ * we're not looking at right now.
+ */
+ if (batch_start->sensor_id != id) {
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ /*
+ * Do not start a batch
+ * from a flush, as it happens asynchronously to the
+ * regular flow of events.
+ */
+ if (batch_start->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
+ cros_sensorhub_send_sample(sensorhub,
+ batch_start);
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ if (batch_start->timestamp <=
+ sensorhub->batch_state[id].last_ts) {
+ batch_timestamp =
+ sensorhub->batch_state[id].last_ts;
+ batch_len = sensorhub->batch_state[id].last_len;
+
+ sample_idx = batch_len;
+
+ sensorhub->batch_state[id].last_ts =
+ sensorhub->batch_state[id].penul_ts;
+ sensorhub->batch_state[id].last_len =
+ sensorhub->batch_state[id].penul_len;
+ } else {
+ /*
+ * Push first sample in the batch to the,
+ * kifo, it's guaranteed to be correct, the
+ * rest will follow later on.
+ */
+ sample_idx = 1;
+ batch_len = 1;
+ cros_sensorhub_send_sample(sensorhub,
+ batch_start);
+ batch_start++;
+ }
+
+ /* Find all samples have the same timestamp. */
+ for (s = batch_start; s < last_out; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't count them.
+ */
+ continue;
+ if (s->timestamp != batch_timestamp)
+ /* we discovered the next batch */
+ break;
+ if (s->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
+ /* break on flush packets */
+ break;
+ batch_end = s;
+ batch_len++;
+ }
+
+ if (batch_len == 1)
+ goto done_with_this_batch;
+
+ /* Can we calculate period? */
+ if (sensorhub->batch_state[id].last_len == 0) {
+ dev_warn(sensorhub->dev, "Sensor %d: lost %d samples when spreading\n",
+ id, batch_len - 1);
+ goto done_with_this_batch;
+ /*
+ * Note: we're dropping the rest of the samples
+ * in this batch since we have no idea where
+ * they're supposed to go without a period
+ * calculation.
+ */
+ }
+
+ sample_period = div_s64(batch_timestamp -
+ sensorhub->batch_state[id].last_ts,
+ sensorhub->batch_state[id].last_len);
+ dev_dbg(sensorhub->dev,
+ "Adjusting %d samples, sensor %d last_batch @%lld (%d samples) batch_timestamp=%lld => period=%lld\n",
+ batch_len, id,
+ sensorhub->batch_state[id].last_ts,
+ sensorhub->batch_state[id].last_len,
+ batch_timestamp,
+ sample_period);
+
+ /*
+ * Adjust timestamps of the samples then push them to
+ * kfifo.
+ */
+ for (s = batch_start; s <= batch_end; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't change them.
+ */
+ continue;
+
+ s->timestamp = batch_timestamp +
+ sample_period * sample_idx;
+ sample_idx++;
+
+ cros_sensorhub_send_sample(sensorhub, s);
+ }
+
+done_with_this_batch:
+ sensorhub->batch_state[id].penul_ts =
+ sensorhub->batch_state[id].last_ts;
+ sensorhub->batch_state[id].penul_len =
+ sensorhub->batch_state[id].last_len;
+
+ sensorhub->batch_state[id].last_ts =
+ batch_timestamp;
+ sensorhub->batch_state[id].last_len = batch_len;
+
+ next_batch_start = batch_end + 1;
+ }
+ }
+}
+
+/*
+ * cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then
+ * add to ringbuffer (legacy).
+ *
+ * Note: This assumes we're running old firmware, where every sample's timestamp
+ * is after the sample. Run if tight_timestamps == false.
+ *
+ * If there is a sample with a proper timestamp
+ *
+ * timestamp | count
+ * -----------------
+ * older_unprocess_out --> TS1 | 1
+ * TS1 | 2
+ * out --> TS1 | 3
+ * next_out --> TS2 |
+ *
+ * We spread time for the samples [older_unprocess_out .. out]
+ * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2].
+ *
+ * If we reach the end of the samples, we compare with the
+ * current timestamp:
+ *
+ * older_unprocess_out --> TS1 | 1
+ * TS1 | 2
+ * out --> TS1 | 3
+ *
+ * We know have [TS1+1/3, TS1+2/3, current timestamp]
+ */
+static void
+cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
+ unsigned long sensor_mask,
+ s64 current_timestamp,
+ struct cros_ec_sensors_ring_sample
+ *last_out)
+{
+ struct cros_ec_sensors_ring_sample *out;
+ int i;
+
+ for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) {
+ s64 older_timestamp;
+ s64 timestamp;
+ struct cros_ec_sensors_ring_sample *older_unprocess_out =
+ sensorhub->ring;
+ struct cros_ec_sensors_ring_sample *next_out;
+ int count = 1;
+
+ for (out = sensorhub->ring; out < last_out; out = next_out) {
+ s64 time_period;
+
+ next_out = out + 1;
+ if (out->sensor_id != i)
+ continue;
+
+ /* Timestamp to start with */
+ older_timestamp = out->timestamp;
+
+ /* Find next sample. */
+ while (next_out < last_out && next_out->sensor_id != i)
+ next_out++;
+
+ if (next_out >= last_out) {
+ timestamp = current_timestamp;
+ } else {
+ timestamp = next_out->timestamp;
+ if (timestamp == older_timestamp) {
+ count++;
+ continue;
+ }
+ }
+
+ /*
+ * The next sample has a new timestamp, spread the
+ * unprocessed samples.
+ */
+ if (next_out < last_out)
+ count++;
+ time_period = div_s64(timestamp - older_timestamp,
+ count);
+
+ for (; older_unprocess_out <= out;
+ older_unprocess_out++) {
+ if (older_unprocess_out->sensor_id != i)
+ continue;
+ older_timestamp += time_period;
+ older_unprocess_out->timestamp =
+ older_timestamp;
+ }
+ count = 1;
+ /* The next_out sample has a valid timestamp, skip. */
+ next_out++;
+ older_unprocess_out = next_out;
+ }
+ }
+
+ /* Push the event into the kfifo */
+ for (out = sensorhub->ring; out < last_out; out++)
+ cros_sensorhub_send_sample(sensorhub, out);
+}
+
+/**
+ * cros_ec_sensorhub_ring_handler() - The trigger handler function
+ *
+ * @sensorhub: Sensor Hub object.
+ *
+ * Called by the notifier, process the EC sensor FIFO queue.
+ */
+static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub)
+{
+ struct ec_response_motion_sense_fifo_info *fifo_info =
+ sensorhub->fifo_info;
+ struct cros_ec_dev *ec = sensorhub->ec;
+ ktime_t fifo_timestamp, current_timestamp;
+ int i, j, number_data, ret;
+ unsigned long sensor_mask = 0;
+ struct ec_response_motion_sensor_data *in;
+ struct cros_ec_sensors_ring_sample *out, *last_out;
+
+ mutex_lock(&sensorhub->cmd_lock);
+
+ /* Get FIFO information if there are lost vectors. */
+ if (fifo_info->total_lost) {
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Need to retrieve the number of lost vectors per sensor */
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
+ sensorhub->msg->outsize = 1;
+ sensorhub->msg->insize = fifo_info_length;
+
+ if (cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg) < 0)
+ goto error;
+
+ memcpy(fifo_info, &sensorhub->resp->fifo_info,
+ fifo_info_length);
+
+ /*
+ * Update collection time, will not be as precise as the
+ * non-error case.
+ */
+ fifo_timestamp = cros_ec_get_time_ns();
+ } else {
+ fifo_timestamp = sensorhub->fifo_timestamp[
+ CROS_EC_SENSOR_NEW_TS];
+ }
+
+ if (fifo_info->count > sensorhub->fifo_size ||
+ fifo_info->size != sensorhub->fifo_size) {
+ dev_warn(sensorhub->dev,
+ "Mismatch EC data: count %d, size %d - expected %d\n",
+ fifo_info->count, fifo_info->size,
+ sensorhub->fifo_size);
+ goto error;
+ }
+
+ /* Copy elements in the main fifo */
+ current_timestamp = sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS];
+ out = sensorhub->ring;
+ for (i = 0; i < fifo_info->count; i += number_data) {
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_READ;
+ sensorhub->params->fifo_read.max_data_vector =
+ fifo_info->count - i;
+ sensorhub->msg->outsize =
+ sizeof(struct ec_params_motion_sense);
+ sensorhub->msg->insize =
+ sizeof(sensorhub->resp->fifo_read) +
+ sensorhub->params->fifo_read.max_data_vector *
+ sizeof(struct ec_response_motion_sensor_data);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
+ if (ret < 0) {
+ dev_warn(sensorhub->dev, "Fifo error: %d\n", ret);
+ break;
+ }
+ number_data = sensorhub->resp->fifo_read.number_data;
+ if (number_data == 0) {
+ dev_dbg(sensorhub->dev, "Unexpected empty FIFO\n");
+ break;
+ }
+ if (number_data > fifo_info->count - i) {
+ dev_warn(sensorhub->dev,
+ "Invalid EC data: too many entry received: %d, expected %d\n",
+ number_data, fifo_info->count - i);
+ break;
+ }
+ if (out + number_data >
+ sensorhub->ring + fifo_info->count) {
+ dev_warn(sensorhub->dev,
+ "Too many samples: %d (%zd data) to %d entries for expected %d entries\n",
+ i, out - sensorhub->ring, i + number_data,
+ fifo_info->count);
+ break;
+ }
+
+ for (in = sensorhub->resp->fifo_read.data, j = 0;
+ j < number_data; j++, in++) {
+ if (cros_ec_sensor_ring_process_event(
+ sensorhub, fifo_info,
+ fifo_timestamp,
+ &current_timestamp,
+ in, out)) {
+ sensor_mask |= BIT(in->sensor_num);
+ out++;
+ }
+ }
+ }
+ mutex_unlock(&sensorhub->cmd_lock);
+ last_out = out;
+
+ if (out == sensorhub->ring)
+ /* Unexpected empty FIFO. */
+ goto ring_handler_end;
+
+ /*
+ * Check if current_timestamp is ahead of the last sample. Normally,
+ * the EC appends a timestamp after the last sample, but if the AP
+ * is slow to respond to the IRQ, the EC may have added new samples.
+ * Use the FIFO info timestamp as last timestamp then.
+ */
+ if (!sensorhub->tight_timestamps &&
+ (last_out - 1)->timestamp == current_timestamp)
+ current_timestamp = fifo_timestamp;
+
+ /* Warn on lost samples. */
+ if (fifo_info->total_lost)
+ for (i = 0; i < sensorhub->sensor_num; i++) {
+ if (fifo_info->lost[i]) {
+ dev_warn_ratelimited(sensorhub->dev,
+ "Sensor %d: lost: %d out of %d\n",
+ i, fifo_info->lost[i],
+ fifo_info->total_lost);
+ if (sensorhub->tight_timestamps)
+ sensorhub->batch_state[i].last_len = 0;
+ }
+ }
+
+ /*
+ * Spread samples in case of batching, then add them to the
+ * ringbuffer.
+ */
+ if (sensorhub->tight_timestamps)
+ cros_ec_sensor_ring_spread_add(sensorhub, sensor_mask,
+ last_out);
+ else
+ cros_ec_sensor_ring_spread_add_legacy(sensorhub, sensor_mask,
+ current_timestamp,
+ last_out);
+
+ring_handler_end:
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = current_timestamp;
+ return;
+
+error:
+ mutex_unlock(&sensorhub->cmd_lock);
+}
+
+static int cros_ec_sensorhub_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_sensorhub *sensorhub;
+ struct cros_ec_device *ec_dev;
+
+ sensorhub = container_of(nb, struct cros_ec_sensorhub, notifier);
+ ec_dev = sensorhub->ec->ec_dev;
+
+ if (ec_dev->event_data.event_type != EC_MKBP_EVENT_SENSOR_FIFO)
+ return NOTIFY_DONE;
+
+ if (ec_dev->event_size != sizeof(ec_dev->event_data.data.sensor_fifo)) {
+ dev_warn(ec_dev->dev, "Invalid fifo info size\n");
+ return NOTIFY_DONE;
+ }
+
+ if (queued_during_suspend)
+ return NOTIFY_OK;
+
+ memcpy(sensorhub->fifo_info, &ec_dev->event_data.data.sensor_fifo.info,
+ sizeof(*sensorhub->fifo_info));
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_NEW_TS] =
+ ec_dev->last_event_time;
+ cros_ec_sensorhub_ring_handler(sensorhub);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * cros_ec_sensorhub_ring_allocate() - Prepare the FIFO functionality if the EC
+ * supports it.
+ *
+ * @sensorhub : Sensor Hub object.
+ *
+ * Return: 0 on success.
+ */
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub)
+{
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Allocate the array for lost events. */
+ sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length,
+ GFP_KERNEL);
+ if (!sensorhub->fifo_info)
+ return -ENOMEM;
+
+ /*
+ * Allocate the callback area based on the number of sensors.
+ * Add one for the sensor ring.
+ */
+ sensorhub->push_data = devm_kcalloc(sensorhub->dev,
+ sensorhub->sensor_num,
+ sizeof(*sensorhub->push_data),
+ GFP_KERNEL);
+ if (!sensorhub->push_data)
+ return -ENOMEM;
+
+ sensorhub->tight_timestamps = cros_ec_check_features(
+ sensorhub->ec,
+ EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
+
+ if (sensorhub->tight_timestamps) {
+ sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
+ sensorhub->sensor_num,
+ sizeof(*sensorhub->batch_state),
+ GFP_KERNEL);
+ if (!sensorhub->batch_state)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
+ * supports it.
+ *
+ * @sensorhub : Sensor Hub object.
+ *
+ * Return: 0 on success.
+ */
+int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
+{
+ struct cros_ec_dev *ec = sensorhub->ec;
+ int ret;
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Retrieve FIFO information */
+ sensorhub->msg->version = 2;
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
+ sensorhub->msg->outsize = 1;
+ sensorhub->msg->insize = fifo_info_length;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Allocate the full fifo. We need to copy the whole FIFO to set
+ * timestamps properly.
+ */
+ sensorhub->fifo_size = sensorhub->resp->fifo_info.size;
+ sensorhub->ring = devm_kcalloc(sensorhub->dev, sensorhub->fifo_size,
+ sizeof(*sensorhub->ring), GFP_KERNEL);
+ if (!sensorhub->ring)
+ return -ENOMEM;
+
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] =
+ cros_ec_get_time_ns();
+
+ /* Register the notifier that will act as a top half interrupt. */
+ sensorhub->notifier.notifier_call = cros_ec_sensorhub_event;
+ ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier,
+ &sensorhub->notifier);
+ if (ret < 0)
+ return ret;
+
+ /* Start collection samples. */
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
+}
+
+void cros_ec_sensorhub_ring_remove(void *arg)
+{
+ struct cros_ec_sensorhub *sensorhub = arg;
+ struct cros_ec_device *ec_dev = sensorhub->ec->ec_dev;
+
+ /* Disable the ring, prevent EC interrupt to the AP for nothing. */
+ cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
+ blocking_notifier_chain_unregister(&ec_dev->event_notifier,
+ &sensorhub->notifier);
+}
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 46786d2d679a..debea5c4c829 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -127,7 +127,8 @@ static int terminate_request(struct cros_ec_device *ec_dev)
*/
spi_message_init(&msg);
memset(&trans, 0, sizeof(trans));
- trans.delay_usecs = ec_spi->end_of_msg_delay;
+ trans.delay.value = ec_spi->end_of_msg_delay;
+ trans.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&trans, &msg);
ret = spi_sync_locked(ec_spi->spi, &msg);
@@ -416,7 +417,8 @@ static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
spi_message_init(&msg);
if (ec_spi->start_of_msg_delay) {
memset(&trans_delay, 0, sizeof(trans_delay));
- trans_delay.delay_usecs = ec_spi->start_of_msg_delay;
+ trans_delay.delay.value = ec_spi->start_of_msg_delay;
+ trans_delay.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&trans_delay, &msg);
}
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 07dac97ad57c..d45ea5d5bfa4 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -149,14 +149,14 @@ static ssize_t version_show(struct device *dev,
/* Get build info. */
msg->command = EC_CMD_GET_BUILD_INFO + ec->cmd_offset;
msg->insize = EC_HOST_PARAM_SIZE;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Build info: XFER ERROR %d\n", ret);
- else if (msg->result != EC_RES_SUCCESS)
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: EC error %d\n", msg->result);
- else {
+ } else if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: XFER ERROR %d\n", ret);
+ } else {
msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: %s\n", msg->data);
@@ -165,14 +165,14 @@ static ssize_t version_show(struct device *dev,
/* Get chip info. */
msg->command = EC_CMD_GET_CHIP_INFO + ec->cmd_offset;
msg->insize = sizeof(*r_chip);
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Chip info: XFER ERROR %d\n", ret);
- else if (msg->result != EC_RES_SUCCESS)
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
count += scnprintf(buf + count, PAGE_SIZE - count,
"Chip info: EC error %d\n", msg->result);
- else {
+ } else if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip info: XFER ERROR %d\n", ret);
+ } else {
r_chip = (struct ec_response_get_chip_info *)msg->data;
r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0';
@@ -189,14 +189,14 @@ static ssize_t version_show(struct device *dev,
/* Get board version */
msg->command = EC_CMD_GET_BOARD_VERSION + ec->cmd_offset;
msg->insize = sizeof(*r_board);
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Board version: XFER ERROR %d\n", ret);
- else if (msg->result != EC_RES_SUCCESS)
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
count += scnprintf(buf + count, PAGE_SIZE - count,
"Board version: EC error %d\n", msg->result);
- else {
+ } else if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: XFER ERROR %d\n", ret);
+ } else {
r_board = (struct ec_response_board_version *)msg->data;
count += scnprintf(buf + count, PAGE_SIZE - count,
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
new file mode 100644
index 000000000000..874269c07073
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google LLC
+ *
+ * This driver provides the ability to view and manage Type C ports through the
+ * Chrome OS EC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/usb/typec.h>
+
+#define DRV_NAME "cros-ec-typec"
+
+/* Platform-specific data for the Chrome OS EC Type C controller. */
+struct cros_typec_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ int num_ports;
+ unsigned int cmd_ver;
+ /* Array of ports, indexed by port number. */
+ struct typec_port *ports[EC_USB_PD_MAX_PORTS];
+ /* Initial capabilities for each port. */
+ struct typec_capability *caps[EC_USB_PD_MAX_PORTS];
+};
+
+static int cros_typec_parse_port_props(struct typec_capability *cap,
+ struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ const char *buf;
+ int ret;
+
+ memset(cap, 0, sizeof(*cap));
+ ret = fwnode_property_read_string(fwnode, "power-role", &buf);
+ if (ret) {
+ dev_err(dev, "power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->type = ret;
+
+ ret = fwnode_property_read_string(fwnode, "data-role", &buf);
+ if (ret) {
+ dev_err(dev, "data-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_data_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->data = ret;
+
+ ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
+ if (ret) {
+ dev_err(dev, "try-power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->prefer_role = ret;
+
+ cap->fwnode = fwnode;
+
+ return 0;
+}
+
+static int cros_typec_init_ports(struct cros_typec_data *typec)
+{
+ struct device *dev = typec->dev;
+ struct typec_capability *cap;
+ struct fwnode_handle *fwnode;
+ const char *port_prop;
+ int ret;
+ int i;
+ int nports;
+ u32 port_num = 0;
+
+ nports = device_get_child_node_count(dev);
+ if (nports == 0) {
+ dev_err(dev, "No port entries found.\n");
+ return -ENODEV;
+ }
+
+ if (nports > typec->num_ports) {
+ dev_err(dev, "More ports listed than can be supported.\n");
+ return -EINVAL;
+ }
+
+ /* DT uses "reg" to specify port number. */
+ port_prop = dev->of_node ? "reg" : "port-number";
+ device_for_each_child_node(dev, fwnode) {
+ if (fwnode_property_read_u32(fwnode, port_prop, &port_num)) {
+ ret = -EINVAL;
+ dev_err(dev, "No port-number for port, aborting.\n");
+ goto unregister_ports;
+ }
+
+ if (port_num >= typec->num_ports) {
+ dev_err(dev, "Invalid port number.\n");
+ ret = -EINVAL;
+ goto unregister_ports;
+ }
+
+ dev_dbg(dev, "Registering port %d\n", port_num);
+
+ cap = devm_kzalloc(dev, sizeof(*cap), GFP_KERNEL);
+ if (!cap) {
+ ret = -ENOMEM;
+ goto unregister_ports;
+ }
+
+ typec->caps[port_num] = cap;
+
+ ret = cros_typec_parse_port_props(cap, fwnode, dev);
+ if (ret < 0)
+ goto unregister_ports;
+
+ typec->ports[port_num] = typec_register_port(dev, cap);
+ if (IS_ERR(typec->ports[port_num])) {
+ dev_err(dev, "Failed to register port %d\n", port_num);
+ ret = PTR_ERR(typec->ports[port_num]);
+ goto unregister_ports;
+ }
+ }
+
+ return 0;
+
+unregister_ports:
+ for (i = 0; i < typec->num_ports; i++)
+ typec_unregister_port(typec->ports[i]);
+ return ret;
+}
+
+static int cros_typec_ec_command(struct cros_typec_data *typec,
+ unsigned int version,
+ unsigned int command,
+ void *outdata,
+ unsigned int outsize,
+ void *indata,
+ unsigned int insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(typec->ec, msg);
+ if (ret >= 0 && insize)
+ memcpy(indata, msg->data, insize);
+
+ kfree(msg);
+ return ret;
+}
+
+static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control *resp)
+{
+ struct typec_port *port = typec->ports[port_num];
+ enum typec_orientation polarity;
+
+ if (!resp->enabled)
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+
+ typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_orientation(port, polarity);
+}
+
+static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control_v1 *resp)
+{
+ struct typec_port *port = typec->ports[port_num];
+ enum typec_orientation polarity;
+
+ if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+ typec_set_orientation(port, polarity);
+ typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
+ TYPEC_HOST : TYPEC_DEVICE);
+ typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
+ TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
+ TYPEC_SOURCE : TYPEC_SINK);
+}
+
+static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_params_usb_pd_control req;
+ struct ec_response_usb_pd_control_v1 resp;
+ int ret;
+
+ if (port_num < 0 || port_num >= typec->num_ports) {
+ dev_err(typec->dev, "cannot get status for invalid port %d\n",
+ port_num);
+ return -EINVAL;
+ }
+
+ req.port = port_num;
+ req.role = USB_PD_CTRL_ROLE_NO_CHANGE;
+ req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ req.swap = USB_PD_CTRL_SWAP_NONE;
+
+ ret = cros_typec_ec_command(typec, typec->cmd_ver,
+ EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled);
+ dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role);
+ dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
+ dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state);
+
+ if (typec->cmd_ver == 1)
+ cros_typec_set_port_params_v1(typec, port_num, &resp);
+ else
+ cros_typec_set_port_params_v0(typec, port_num,
+ (struct ec_response_usb_pd_control *) &resp);
+
+ return 0;
+}
+
+static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
+{
+ struct ec_params_get_cmd_versions_v1 req_v1;
+ struct ec_response_get_cmd_versions resp;
+ int ret;
+
+ /* We're interested in the PD control command version. */
+ req_v1.cmd = EC_CMD_USB_PD_CONTROL;
+ ret = cros_typec_ec_command(typec, 1, EC_CMD_GET_CMD_VERSIONS,
+ &req_v1, sizeof(req_v1), &resp,
+ sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (resp.version_mask & EC_VER_MASK(1))
+ typec->cmd_ver = 1;
+ else
+ typec->cmd_ver = 0;
+
+ dev_dbg(typec->dev, "PD Control has version mask 0x%hhx\n",
+ typec->cmd_ver);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_typec_acpi_id[] = {
+ { "GOOG0014", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cros_typec_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id cros_typec_of_match[] = {
+ { .compatible = "google,cros-ec-typec", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cros_typec_of_match);
+#endif
+
+static int cros_typec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_typec_data *typec;
+ struct ec_response_usb_pd_ports resp;
+ int ret, i;
+
+ typec = devm_kzalloc(dev, sizeof(*typec), GFP_KERNEL);
+ if (!typec)
+ return -ENOMEM;
+
+ typec->dev = dev;
+ typec->ec = dev_get_drvdata(pdev->dev.parent);
+ platform_set_drvdata(pdev, typec);
+
+ ret = cros_typec_get_cmd_version(typec);
+ if (ret < 0) {
+ dev_err(dev, "failed to get PD command version info\n");
+ return ret;
+ }
+
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ typec->num_ports = resp.num_ports;
+ if (typec->num_ports > EC_USB_PD_MAX_PORTS) {
+ dev_warn(typec->dev,
+ "Too many ports reported: %d, limiting to max: %d\n",
+ typec->num_ports, EC_USB_PD_MAX_PORTS);
+ typec->num_ports = EC_USB_PD_MAX_PORTS;
+ }
+
+ ret = cros_typec_init_ports(typec);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ ret = cros_typec_port_update(typec, i);
+ if (ret < 0)
+ goto unregister_ports;
+ }
+
+ return 0;
+
+unregister_ports:
+ for (i = 0; i < typec->num_ports; i++)
+ if (typec->ports[i])
+ typec_unregister_port(typec->ports[i]);
+ return ret;
+}
+
+static struct platform_driver cros_typec_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = ACPI_PTR(cros_typec_acpi_id),
+ .of_match_table = of_match_ptr(cros_typec_of_match),
+ },
+ .probe = cros_typec_probe,
+};
+
+module_platform_driver(cros_typec_driver);
+
+MODULE_AUTHOR("Prashant Malani <pmalani@chromium.org>");
+MODULE_DESCRIPTION("Chrome OS EC Type C control");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 8edae465105c..46482d12cffe 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -40,7 +40,7 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
msg->outsize = para_sz;
msg->insize = resp_sz;
- err = cros_ec_cmd_xfer(ecdev, msg);
+ err = cros_ec_cmd_xfer_status(ecdev, msg);
if (err < 0) {
dev_err(dev, "Error sending read request: %d\n", err);
kfree(msg);
@@ -83,7 +83,7 @@ static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
msg->outsize = para_sz;
msg->insize = 0;
- err = cros_ec_cmd_xfer(ecdev, msg);
+ err = cros_ec_cmd_xfer_status(ecdev, msg);
if (err < 0) {
dev_err(dev, "Error sending write request: %d\n", err);
kfree(msg);
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
new file mode 100644
index 000000000000..7f36142ab12a
--- /dev/null
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google LLC
+ *
+ * This driver serves as the receiver of cros_ec PD host events.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "cros-usbpd-notify"
+#define DRV_NAME_PLAT_ACPI "cros-usbpd-notify-acpi"
+#define ACPI_DRV_NAME "GOOG0003"
+
+static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list);
+
+struct cros_usbpd_notify_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct notifier_block nb;
+};
+
+/**
+ * cros_usbpd_register_notify - Register a notifier callback for PD events.
+ * @nb: Notifier block pointer to register
+ *
+ * On ACPI platforms this corresponds to host events on the ECPD
+ * "GOOG0003" ACPI device. On non-ACPI platforms this will filter mkbp events
+ * for USB PD events.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_usbpd_register_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&cros_usbpd_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL_GPL(cros_usbpd_register_notify);
+
+/**
+ * cros_usbpd_unregister_notify - Unregister notifier callback for PD events.
+ * @nb: Notifier block pointer to unregister
+ *
+ * Unregister a notifier callback that was previously registered with
+ * cros_usbpd_register_notify().
+ */
+void cros_usbpd_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&cros_usbpd_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cros_usbpd_unregister_notify);
+
+/**
+ * cros_ec_pd_command - Send a command to the EC.
+ *
+ * @ec_dev: EC device
+ * @command: EC command
+ * @outdata: EC command output data
+ * @outsize: Size of outdata
+ * @indata: EC command input data
+ * @insize: Size of indata
+ *
+ * Return: >= 0 on success, negative error number on failure.
+ */
+static int cros_ec_pd_command(struct cros_ec_device *ec_dev,
+ int command,
+ uint8_t *outdata,
+ int outsize,
+ uint8_t *indata,
+ int insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(insize, outsize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret < 0)
+ goto error;
+
+ if (insize)
+ memcpy(indata, msg->data, insize);
+error:
+ kfree(msg);
+ return ret;
+}
+
+static void cros_usbpd_get_event_and_notify(struct device *dev,
+ struct cros_ec_device *ec_dev)
+{
+ struct ec_response_host_event_status host_event_status;
+ u32 event = 0;
+ int ret;
+
+ /*
+ * We still send a 0 event out to older devices which don't
+ * have the updated device heirarchy.
+ */
+ if (!ec_dev) {
+ dev_dbg(dev,
+ "EC device inaccessible; sending 0 event status.\n");
+ goto send_notify;
+ }
+
+ /* Check for PD host events on EC. */
+ ret = cros_ec_pd_command(ec_dev, EC_CMD_PD_HOST_EVENT_STATUS,
+ NULL, 0,
+ (uint8_t *)&host_event_status,
+ sizeof(host_event_status));
+ if (ret < 0) {
+ dev_warn(dev, "Can't get host event status (err: %d)\n", ret);
+ goto send_notify;
+ }
+
+ event = host_event_status.status;
+
+send_notify:
+ blocking_notifier_call_chain(&cros_usbpd_notifier_list, event, NULL);
+}
+
+#ifdef CONFIG_ACPI
+
+static void cros_usbpd_notify_acpi(acpi_handle device, u32 event, void *data)
+{
+ struct cros_usbpd_notify_data *pdnotify = data;
+
+ cros_usbpd_get_event_and_notify(pdnotify->dev, pdnotify->ec);
+}
+
+static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev)
+{
+ struct cros_usbpd_notify_data *pdnotify;
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ struct cros_ec_device *ec_dev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(dev);
+
+ pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
+ if (!pdnotify)
+ return -ENOMEM;
+
+ /* Get the EC device pointer needed to talk to the EC. */
+ ec_dev = dev_get_drvdata(dev->parent);
+ if (!ec_dev) {
+ /*
+ * We continue even for older devices which don't have the
+ * correct device heirarchy, namely, GOOG0003 is a child
+ * of GOOG0004.
+ */
+ dev_warn(dev, "Couldn't get Chrome EC device pointer.\n");
+ }
+
+ pdnotify->dev = dev;
+ pdnotify->ec = ec_dev;
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_ALL_NOTIFY,
+ cros_usbpd_notify_acpi,
+ pdnotify);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(dev, "Failed to register notify handler %08x\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cros_usbpd_notify_remove_acpi(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
+ cros_usbpd_notify_acpi);
+
+ return 0;
+}
+
+static const struct acpi_device_id cros_usbpd_notify_acpi_device_ids[] = {
+ { ACPI_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_usbpd_notify_acpi_device_ids);
+
+static struct platform_driver cros_usbpd_notify_acpi_driver = {
+ .driver = {
+ .name = DRV_NAME_PLAT_ACPI,
+ .acpi_match_table = cros_usbpd_notify_acpi_device_ids,
+ },
+ .probe = cros_usbpd_notify_probe_acpi,
+ .remove = cros_usbpd_notify_remove_acpi,
+};
+
+#endif /* CONFIG_ACPI */
+
+static int cros_usbpd_notify_plat(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *data)
+{
+ struct cros_usbpd_notify_data *pdnotify = container_of(nb,
+ struct cros_usbpd_notify_data, nb);
+ struct cros_ec_device *ec_dev = (struct cros_ec_device *)data;
+ u32 host_event = cros_ec_get_host_event(ec_dev);
+
+ if (!host_event)
+ return NOTIFY_DONE;
+
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
+ cros_usbpd_get_event_and_notify(pdnotify->dev, ec_dev);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static int cros_usbpd_notify_probe_plat(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
+ struct cros_usbpd_notify_data *pdnotify;
+ int ret;
+
+ pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
+ if (!pdnotify)
+ return -ENOMEM;
+
+ pdnotify->dev = dev;
+ pdnotify->ec = ecdev->ec_dev;
+ pdnotify->nb.notifier_call = cros_usbpd_notify_plat;
+
+ dev_set_drvdata(dev, pdnotify);
+
+ ret = blocking_notifier_chain_register(&ecdev->ec_dev->event_notifier,
+ &pdnotify->nb);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register notifier\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_usbpd_notify_remove_plat(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
+ struct cros_usbpd_notify_data *pdnotify =
+ (struct cros_usbpd_notify_data *)dev_get_drvdata(dev);
+
+ blocking_notifier_chain_unregister(&ecdev->ec_dev->event_notifier,
+ &pdnotify->nb);
+
+ return 0;
+}
+
+static struct platform_driver cros_usbpd_notify_plat_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_usbpd_notify_probe_plat,
+ .remove = cros_usbpd_notify_remove_plat,
+};
+
+static int __init cros_usbpd_notify_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cros_usbpd_notify_plat_driver);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_ACPI
+ platform_driver_register(&cros_usbpd_notify_acpi_driver);
+#endif
+ return 0;
+}
+
+static void __exit cros_usbpd_notify_exit(void)
+{
+#ifdef CONFIG_ACPI
+ platform_driver_unregister(&cros_usbpd_notify_acpi_driver);
+#endif
+ platform_driver_unregister(&cros_usbpd_notify_plat_driver);
+}
+
+module_init(cros_usbpd_notify_init);
+module_exit(cros_usbpd_notify_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
+MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index dba3d445623f..814518509739 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -79,7 +79,7 @@ static DEFINE_IDA(event_ida);
struct ec_event {
u16 size;
u16 type;
- u16 event[0];
+ u16 event[];
} __packed;
#define ec_event_num_words(ev) (ev->size - 1)
@@ -96,7 +96,7 @@ struct ec_event_queue {
int capacity;
int head;
int tail;
- struct ec_event *entries[0];
+ struct ec_event *entries[];
};
/* Maximum number of events to store in ec_event_queue */
diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c
index 62f27610dd33..c2bf4c95c5d2 100644
--- a/drivers/platform/chrome/wilco_ec/properties.c
+++ b/drivers/platform/chrome/wilco_ec/properties.c
@@ -3,8 +3,11 @@
* Copyright 2019 Google LLC
*/
+#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/string.h>
+#include <linux/types.h>
#include <asm/unaligned.h>
/* Operation code; what the EC should do with the property */
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index f0d174b6bb21..3c587b4054a5 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -8,8 +8,12 @@
* See Documentation/ABI/testing/sysfs-platform-wilco-ec for more information.
*/
+#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/platform_data/wilco-ec.h>
+#include <linux/string.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
#define CMD_KB_CMOS 0x7C
#define SUB_CMD_KB_CMOS_AUTO_ON 0x03
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 587403c44598..0ad7ad8cf8e1 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -16,40 +16,103 @@ menuconfig X86_PLATFORM_DEVICES
if X86_PLATFORM_DEVICES
-config ACER_WMI
- tristate "Acer WMI Laptop Extras"
+config ACPI_WMI
+ tristate "WMI"
+ depends on ACPI
+ help
+ This driver adds support for the ACPI-WMI (Windows Management
+ Instrumentation) mapper device (PNP0C14) found on some systems.
+
+ ACPI-WMI is a proprietary extension to ACPI to expose parts of the
+ ACPI firmware to userspace - this is done through various vendor
+ defined methods and data blocks in a PNP0C14 device, which are then
+ made available for userspace to call.
+
+ The implementation of this in Linux currently only exposes this to
+ other kernel space drivers.
+
+ This driver is a required dependency to build the firmware specific
+ drivers needed on many machines, including Acer and HP laptops.
+
+ It is safe to enable this driver even if your DSDT doesn't define
+ any ACPI-WMI devices.
+
+config WMI_BMOF
+ tristate "WMI embedded Binary MOF driver"
+ depends on ACPI_WMI
+ default ACPI_WMI
+ ---help---
+ Say Y here if you want to be able to read a firmware-embedded
+ WMI Binary MOF data. Using this requires userspace tools and may be
+ rather tedious.
+
+ To compile this driver as a module, choose M here: the module will
+ be called wmi-bmof.
+
+config ALIENWARE_WMI
+ tristate "Alienware Special feature control"
depends on ACPI
+ depends on LEDS_CLASS
+ depends on NEW_LEDS
+ depends on ACPI_WMI
+ ---help---
+ This is a driver for controlling Alienware BIOS driven
+ features. It exposes an interface for controlling the AlienFX
+ zones on Alienware machines that don't contain a dedicated AlienFX
+ USB MCU such as the X51 and X51-R2.
+
+config HUAWEI_WMI
+ tristate "Huawei WMI laptop extras driver"
+ depends on ACPI_BATTERY
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
select NEW_LEDS
- depends on BACKLIGHT_CLASS_DEVICE
- depends on SERIO_I8042
- depends on INPUT
- depends on RFKILL || RFKILL = n
+ help
+ This driver provides support for Huawei WMI hotkeys, battery charge
+ control, fn-lock, mic-mute LED, and other extra features.
+
+ To compile this driver as a module, choose M here: the module
+ will be called huawei-wmi.
+
+config INTEL_WMI_THUNDERBOLT
+ tristate "Intel WMI thunderbolt force power driver"
depends on ACPI_WMI
- select INPUT_SPARSEKMAP
- # Acer WMI depends on ACPI_VIDEO when ACPI is enabled
- select ACPI_VIDEO if ACPI
---help---
- This is a driver for newer Acer (and Wistron) laptops. It adds
- wireless radio and bluetooth control, and on some laptops,
- exposes the mail LED and LCD backlight.
+ Say Y here if you want to be able to use the WMI interface on select
+ systems to force the power control of Intel Thunderbolt controllers.
+ This is useful for updating the firmware when devices are not plugged
+ into the controller.
- If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
- here.
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-thunderbolt.
-config ACER_WIRELESS
- tristate "Acer Wireless Radio Control Driver"
- depends on ACPI
- depends on INPUT
- ---help---
- The Acer Wireless Radio Control handles the airplane mode hotkey
- present on new Acer laptops.
+config MXM_WMI
+ tristate "WMI support for MXM Laptop Graphics"
+ depends on ACPI_WMI
+ ---help---
+ MXM is a standard for laptop graphics cards, the WMI interface
+ is required for switchable nvidia graphics machines
- Say Y or M here if you have an Acer notebook with an airplane mode
- hotkey.
+config PEAQ_WMI
+ tristate "PEAQ 2-in-1 WMI hotkey driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ help
+ Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
- If you choose to compile this driver as a module the module will be
- called acer-wireless.
+config XIAOMI_WMI
+ tristate "Xiaomi WMI key driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ help
+ Say Y here if you want to support WMI-based keys on Xiaomi notebooks.
+
+ To compile this driver as a module, choose M here: the module will
+ be called xiaomi-wmi.
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
@@ -72,17 +135,53 @@ config ACERHDF
If you have an Acer Aspire One netbook, say Y or M
here.
-config ALIENWARE_WMI
- tristate "Alienware Special feature control"
+config ACER_WIRELESS
+ tristate "Acer Wireless Radio Control Driver"
+ depends on ACPI
+ depends on INPUT
+ ---help---
+ The Acer Wireless Radio Control handles the airplane mode hotkey
+ present on new Acer laptops.
+
+ Say Y or M here if you have an Acer notebook with an airplane mode
+ hotkey.
+
+ If you choose to compile this driver as a module the module will be
+ called acer-wireless.
+
+config ACER_WMI
+ tristate "Acer WMI Laptop Extras"
depends on ACPI
- depends on LEDS_CLASS
- depends on NEW_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on SERIO_I8042
+ depends on INPUT
+ depends on RFKILL || RFKILL = n
depends on ACPI_WMI
+ select INPUT_SPARSEKMAP
+ # Acer WMI depends on ACPI_VIDEO when ACPI is enabled
+ select ACPI_VIDEO if ACPI
---help---
- This is a driver for controlling Alienware BIOS driven
- features. It exposes an interface for controlling the AlienFX
- zones on Alienware machines that don't contain a dedicated AlienFX
- USB MCU such as the X51 and X51-R2.
+ This is a driver for newer Acer (and Wistron) laptops. It adds
+ wireless radio and bluetooth control, and on some laptops,
+ exposes the mail LED and LCD backlight.
+
+ If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
+ here.
+
+config APPLE_GMUX
+ tristate "Apple Gmux Driver"
+ depends on ACPI && PCI
+ depends on PNP
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
+ depends on ACPI_VIDEO=n || ACPI_VIDEO
+ ---help---
+ This driver provides support for the gmux device found on many
+ Apple laptops, which controls the display mux for the hybrid
+ graphics as well as the backlight. Currently only backlight
+ control is supported by the driver.
config ASUS_LAPTOP
tristate "Asus Laptop Extras"
@@ -108,6 +207,91 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
+config ASUS_WIRELESS
+ tristate "Asus Wireless Radio Control Driver"
+ depends on ACPI
+ depends on INPUT
+ select NEW_LEDS
+ select LEDS_CLASS
+ ---help---
+ The Asus Wireless Radio Control handles the airplane mode hotkey
+ present on some Asus laptops.
+
+ Say Y or M here if you have an ASUS notebook with an airplane mode
+ hotkey.
+
+ If you choose to compile this driver as a module the module will be
+ called asus-wireless.
+
+config ASUS_WMI
+ tristate "ASUS WMI Driver"
+ depends on ACPI_WMI
+ depends on ACPI_BATTERY
+ depends on INPUT
+ depends on HWMON
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
+ ---help---
+ Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
+ Asus Notebooks).
+
+ To compile this driver as a module, choose M here: the module will
+ be called asus-wmi.
+
+config ASUS_NB_WMI
+ tristate "Asus Notebook WMI Driver"
+ depends on ASUS_WMI
+ depends on SERIO_I8042 || SERIO_I8042 = n
+ ---help---
+ This is a driver for newer Asus notebooks. It adds extra features
+ like wireless radio and bluetooth control, leds, hotkeys, backlight...
+
+ For more information, see
+ <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
+
+ If you have an ACPI-WMI compatible Asus Notebook, say Y or M
+ here.
+
+config EEEPC_LAPTOP
+ tristate "Eee PC Hotkey Driver"
+ depends on ACPI
+ depends on INPUT
+ depends on RFKILL || RFKILL = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on HOTPLUG_PCI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select HWMON
+ select LEDS_CLASS
+ select NEW_LEDS
+ select INPUT_SPARSEKMAP
+ ---help---
+ This driver supports the Fn-Fx keys on Eee PC laptops.
+
+ It also gives access to some extra laptop functionalities like
+ Bluetooth, backlight and allows powering on/off some other
+ devices.
+
+ If you have an Eee PC laptop, say Y or M here. If this driver
+ doesn't work on your Eee PC, try eeepc-wmi instead.
+
+config EEEPC_WMI
+ tristate "Eee PC WMI Driver"
+ depends on ASUS_WMI
+ ---help---
+ This is a driver for newer Eee PC laptops. It adds extra features
+ like wireless radio and bluetooth control, leds, hotkeys, backlight...
+
+ For more information, see
+ <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
+
+ If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M
+ here.
+
config DCDBAS
tristate "Dell Systems Management Base Driver"
depends on X86
@@ -183,6 +367,44 @@ config DELL_LAPTOP
This driver adds support for rfkill and backlight control to Dell
laptops (except for some models covered by the Compal driver).
+config DELL_RBTN
+ tristate "Dell Airplane Mode Switch driver"
+ depends on ACPI
+ depends on INPUT
+ depends on RFKILL
+ ---help---
+ Say Y here if you want to support Dell Airplane Mode Switch ACPI
+ device on Dell laptops. Sometimes it has names: DELLABCE or DELRBTN.
+ This driver register rfkill device or input hotkey device depending
+ on hardware type (hw switch slider or keyboard toggle button). For
+ rfkill devices it receive HW switch events and set correct hard
+ rfkill state.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-rbtn.
+
+config DELL_RBU
+ tristate "BIOS update support for DELL systems via sysfs"
+ depends on X86
+ select FW_LOADER
+ select FW_LOADER_USER_HELPER
+ help
+ Say m if you want to have the option of updating the BIOS for your
+ DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
+ supporting application to communicate with the BIOS regarding the new
+ image for the image update to take effect.
+ See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
+
+config DELL_SMO8800
+ tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
+ depends on ACPI
+ ---help---
+ Say Y here if you want to support SMO88XX freefall devices
+ on Dell Latitude laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called dell-smo8800.
+
config DELL_WMI
tristate "Dell WMI notifications"
depends on ACPI_WMI
@@ -222,44 +444,13 @@ config DELL_WMI_LED
This adds support for the Latitude 2100 and similar
notebooks that have an external LED.
-config DELL_SMO8800
- tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
- depends on ACPI
- ---help---
- Say Y here if you want to support SMO88XX freefall devices
- on Dell Latitude laptops.
-
- To compile this driver as a module, choose M here: the module will
- be called dell-smo8800.
-
-config DELL_RBTN
- tristate "Dell Airplane Mode Switch driver"
- depends on ACPI
- depends on INPUT
+config AMILO_RFKILL
+ tristate "Fujitsu-Siemens Amilo rfkill support"
depends on RFKILL
+ depends on SERIO_I8042
---help---
- Say Y here if you want to support Dell Airplane Mode Switch ACPI
- device on Dell laptops. Sometimes it has names: DELLABCE or DELRBTN.
- This driver register rfkill device or input hotkey device depending
- on hardware type (hw switch slider or keyboard toggle button). For
- rfkill devices it receive HW switch events and set correct hard
- rfkill state.
-
- To compile this driver as a module, choose M here: the module will
- be called dell-rbtn.
-
-config DELL_RBU
- tristate "BIOS update support for DELL systems via sysfs"
- depends on X86
- select FW_LOADER
- select FW_LOADER_USER_HELPER
- help
- Say m if you want to have the option of updating the BIOS for your
- DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
- supporting application to communicate with the BIOS regarding the new
- image for the image update to take effect.
- See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
-
+ This is a driver for enabling wifi on some Fujitsu-Siemens Amilo
+ laptops.
config FUJITSU_LAPTOP
tristate "Fujitsu Laptop Extras"
@@ -297,14 +488,6 @@ config FUJITSU_TABLET
If you have a Fujitsu convertible or slate, say Y or M here.
-config AMILO_RFKILL
- tristate "Fujitsu-Siemens Amilo rfkill support"
- depends on RFKILL
- depends on SERIO_I8042
- ---help---
- This is a driver for enabling wifi on some Fujitsu-Siemens Amilo
- laptops.
-
config GPD_POCKET_FAN
tristate "GPD Pocket Fan Controller support"
depends on ACPI
@@ -317,15 +500,6 @@ config GPD_POCKET_FAN
of the CPU temperature. Say Y or M if the kernel may be used on a
GPD pocket.
-config TC1100_WMI
- tristate "HP Compaq TC1100 Tablet WMI Extras"
- depends on !X86_64
- depends on ACPI
- depends on ACPI_WMI
- ---help---
- This is a driver for the WMI extensions (wireless and bluetooth power
- control) of the HP Compaq TC1100 tablet.
-
config HP_ACCEL
tristate "HP laptop accelerometer"
depends on INPUT && ACPI
@@ -369,91 +543,30 @@ config HP_WMI
To compile this driver as a module, choose M here: the module will
be called hp-wmi.
-config LG_LAPTOP
- tristate "LG Laptop Extras"
+config TC1100_WMI
+ tristate "HP Compaq TC1100 Tablet WMI Extras"
+ depends on !X86_64
depends on ACPI
depends on ACPI_WMI
- depends on INPUT
- select INPUT_SPARSEKMAP
- select LEDS_CLASS
- help
- This driver adds support for hotkeys as well as control of keyboard
- backlight, battery maximum charge level and various other ACPI
- features.
-
- If you have an LG Gram laptop, say Y or M here.
-
-config MSI_LAPTOP
- tristate "MSI Laptop Extras"
- depends on ACPI
- depends on BACKLIGHT_CLASS_DEVICE
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on RFKILL
- depends on INPUT && SERIO_I8042
- select INPUT_SPARSEKMAP
- ---help---
- This is a driver for laptops built by MSI (MICRO-STAR
- INTERNATIONAL):
-
- MSI MegaBook S270 (MS-1013)
- Cytron/TCM/Medion/Tchibo MD96100/SAM2000
-
- It adds support for Bluetooth, WLAN and LCD brightness control.
-
- More information about this driver is available at
- <http://0pointer.de/lennart/tchibo.html>.
-
- If you have an MSI S270 laptop, say Y or M here.
-
-config PANASONIC_LAPTOP
- tristate "Panasonic Laptop Extras"
- depends on INPUT && ACPI
- depends on BACKLIGHT_CLASS_DEVICE
- select INPUT_SPARSEKMAP
---help---
- This driver adds support for access to backlight control and hotkeys
- on Panasonic Let's Note laptops.
-
- If you have a Panasonic Let's note laptop (such as the R1(N variant),
- R2, R3, R5, T2, W2 and Y2 series), say Y.
+ This is a driver for the WMI extensions (wireless and bluetooth power
+ control) of the HP Compaq TC1100 tablet.
-config COMPAL_LAPTOP
- tristate "Compal (and others) Laptop Extras"
- depends on ACPI
- depends on BACKLIGHT_CLASS_DEVICE
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on RFKILL
- depends on HWMON
- depends on POWER_SUPPLY
+config IBM_RTL
+ tristate "Device driver to enable PRTL support"
+ depends on PCI
---help---
- This is a driver for laptops built by Compal, and some models by
- other brands (e.g. Dell, Toshiba).
-
- It adds support for rfkill, Bluetooth, WLAN, LCD brightness, hwmon
- and battery charging level control.
-
-config SONY_LAPTOP
- tristate "Sony Laptop Extras"
- depends on ACPI
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on BACKLIGHT_CLASS_DEVICE
- depends on INPUT
- depends on RFKILL
- ---help---
- This mini-driver drives the SNC and SPIC devices present in the ACPI
- BIOS of the Sony Vaio laptops.
-
- It gives access to some extra laptop functionalities like Bluetooth,
- screen brightness control, Fn keys and allows powering on/off some
- devices.
-
- Read <file:Documentation/admin-guide/laptops/sony-laptop.rst> for more information.
+ Enable support for IBM Premium Real Time Mode (PRTM).
+ This module will allow you the enter and exit PRTM in the BIOS via
+ sysfs on platforms that support this feature. System in PRTM will
+ not receive CPU-generated SMIs for recoverable errors. Use of this
+ feature without proper support may void your hardware warranty.
-config SONYPI_COMPAT
- bool "Sonypi compatibility"
- depends on SONY_LAPTOP
- ---help---
- Build the sonypi driver compatibility code into the sony-laptop driver.
+ If the proper BIOS support is found the driver will load and create
+ /sys/devices/system/ibm_rtl/. The "state" variable will indicate
+ whether or not the BIOS is in PRTM.
+ state = 0 (BIOS SMIs on)
+ state = 1 (BIOS SMIs off)
config IDEAPAD_LAPTOP
tristate "Lenovo IdeaPad Laptop Extras"
@@ -468,17 +581,23 @@ config IDEAPAD_LAPTOP
This is a driver for Lenovo IdeaPad netbooks contains drivers for
rfkill switch, hotkey, fan control and backlight control.
-config SURFACE3_WMI
- tristate "Surface 3 WMI Driver"
- depends on ACPI_WMI
- depends on DMI
+config SENSORS_HDAPS
+ tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
depends on INPUT
- depends on SPI
- ---help---
- Say Y here if you have a Surface 3.
+ help
+ This driver provides support for the IBM Hard Drive Active Protection
+ System (hdaps), which provides an accelerometer and other misc. data.
+ ThinkPads starting with the R50, T41, and X40 are supported. The
+ accelerometer data is readable via sysfs.
- To compile this driver as a module, choose M here: the module will
- be called surface3-wmi.
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ If your ThinkPad is not recognized by the driver, please update to latest
+ BIOS. This is especially the case for some R52 ThinkPads.
+
+ Say Y here if you have an applicable laptop and want to experience
+ the awesome power of hdaps.
config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
@@ -619,23 +738,72 @@ config THINKPAD_ACPI_HOTKEY_POLL
If you are not sure, say Y here. The driver enables polling only if
it is strictly necessary to do so.
-config SENSORS_HDAPS
- tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
+config INTEL_ATOMISP2_PM
+ tristate "Intel AtomISP2 dummy / power-management driver"
+ depends on PCI && IOSF_MBI && PM
+ help
+ Power-management driver for Intel's Image Signal Processor found on
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_pm.
+
+config INTEL_CHT_INT33FE
+ tristate "Intel Cherry Trail ACPI INT33FE Driver"
+ depends on X86 && ACPI && I2C && REGULATOR
+ depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
+ depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
+ depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
+ ---help---
+ This driver add support for the INT33FE ACPI device found on
+ some Intel Cherry Trail devices.
+
+ There are two kinds of INT33FE ACPI device possible: for hardware
+ with USB Type-C and Micro-B connectors. This driver supports both.
+
+ The INT33FE ACPI device has a CRS table with I2cSerialBusV2
+ resources for Fuel Gauge Controller and (in the Type-C variant)
+ FUSB302 USB Type-C Controller and PI3USB30532 USB switch.
+ This driver instantiates i2c-clients for these, so that standard
+ i2c drivers for these chips can bind to the them.
+
+ If you enable this driver it is advised to also select
+ CONFIG_BATTERY_BQ27XXX=m or CONFIG_BATTERY_BQ27XXX_I2C=m for Micro-B
+ device and CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m
+ for Type-C device.
+
+config INTEL_HID_EVENT
+ tristate "INTEL HID Event"
+ depends on ACPI
depends on INPUT
+ select INPUT_SPARSEKMAP
help
- This driver provides support for the IBM Hard Drive Active Protection
- System (hdaps), which provides an accelerometer and other misc. data.
- ThinkPads starting with the R50, T41, and X40 are supported. The
- accelerometer data is readable via sysfs.
+ This driver provides support for the Intel HID Event hotkey interface.
+ Some laptops require this driver for hotkey support.
- This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick.
+ To compile this driver as a module, choose M here: the module will
+ be called intel_hid.
- If your ThinkPad is not recognized by the driver, please update to latest
- BIOS. This is especially the case for some R52 ThinkPads.
+config INTEL_INT0002_VGPIO
+ tristate "Intel ACPI INT0002 Virtual GPIO driver"
+ depends on GPIOLIB && ACPI
+ select GPIOLIB_IRQCHIP
+ ---help---
+ Some peripherals on Bay Trail and Cherry Trail platforms signal a
+ Power Management Event (PME) to the Power Management Controller (PMC)
+ to wakeup the system. When this happens software needs to explicitly
+ clear the PME bus 0 status bit in the GPE0a_STS register to avoid an
+ IRQ storm on IRQ 9.
- Say Y here if you have an applicable laptop and want to experience
- the awesome power of hdaps.
+ This is modelled in ACPI through the INT0002 ACPI device, which is
+ called a "Virtual GPIO controller" in ACPI because it defines the
+ event handler to call when the PME triggers through _AEI and _L02
+ methods as would be done for a real GPIO interrupt in ACPI.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_int0002_vgpio.
config INTEL_MENLOW
tristate "Thermal Management driver for Intel menlow platform"
@@ -647,145 +815,80 @@ config INTEL_MENLOW
If unsure, say N.
-config EEEPC_LAPTOP
- tristate "Eee PC Hotkey Driver"
+config INTEL_OAKTRAIL
+ tristate "Intel Oaktrail Platform Extras"
depends on ACPI
- depends on INPUT
- depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on HOTPLUG_PCI
- depends on BACKLIGHT_CLASS_DEVICE
- select HWMON
- select LEDS_CLASS
- select NEW_LEDS
- select INPUT_SPARSEKMAP
+ depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
---help---
- This driver supports the Fn-Fx keys on Eee PC laptops.
-
- It also gives access to some extra laptop functionalities like
- Bluetooth, backlight and allows powering on/off some other
- devices.
-
- If you have an Eee PC laptop, say Y or M here. If this driver
- doesn't work on your Eee PC, try eeepc-wmi instead.
+ Intel Oaktrail platform need this driver to provide interfaces to
+ enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
+ here; it will only load on supported platforms.
-config ASUS_WMI
- tristate "ASUS WMI Driver"
- depends on ACPI_WMI
- depends on ACPI_BATTERY
+config INTEL_VBTN
+ tristate "INTEL VIRTUAL BUTTON"
+ depends on ACPI
depends on INPUT
- depends on HWMON
- depends on BACKLIGHT_CLASS_DEVICE
- depends on RFKILL || RFKILL = n
- depends on HOTPLUG_PCI
- depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
- select LEDS_CLASS
- select NEW_LEDS
- ---help---
- Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
- Asus Notebooks).
+ help
+ This driver provides support for the Intel Virtual Button interface.
+ Some laptops require this driver for power button support.
To compile this driver as a module, choose M here: the module will
- be called asus-wmi.
+ be called intel_vbtn.
-config ASUS_NB_WMI
- tristate "Asus Notebook WMI Driver"
- depends on ASUS_WMI
- depends on SERIO_I8042 || SERIO_I8042 = n
+config SURFACE3_WMI
+ tristate "Surface 3 WMI Driver"
+ depends on ACPI_WMI
+ depends on DMI
+ depends on INPUT
+ depends on SPI
---help---
- This is a driver for newer Asus notebooks. It adds extra features
- like wireless radio and bluetooth control, leds, hotkeys, backlight...
-
- For more information, see
- <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
+ Say Y here if you have a Surface 3.
- If you have an ACPI-WMI compatible Asus Notebook, say Y or M
- here.
+ To compile this driver as a module, choose M here: the module will
+ be called surface3-wmi.
-config EEEPC_WMI
- tristate "Eee PC WMI Driver"
- depends on ASUS_WMI
+config SURFACE_3_BUTTON
+ tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
+ depends on ACPI && KEYBOARD_GPIO && I2C
---help---
- This is a driver for newer Eee PC laptops. It adds extra features
- like wireless radio and bluetooth control, leds, hotkeys, backlight...
-
- For more information, see
- <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
+ This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
- If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M
- here.
+config SURFACE_3_POWER_OPREGION
+ tristate "Surface 3 battery platform operation region support"
+ depends on ACPI && I2C
+ help
+ This driver provides support for ACPI operation
+ region of the Surface 3 battery platform driver.
-config ASUS_WIRELESS
- tristate "Asus Wireless Radio Control Driver"
- depends on ACPI
- depends on INPUT
- select NEW_LEDS
- select LEDS_CLASS
+config SURFACE_PRO3_BUTTON
+ tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
+ depends on ACPI && INPUT
---help---
- The Asus Wireless Radio Control handles the airplane mode hotkey
- present on some Asus laptops.
-
- Say Y or M here if you have an ASUS notebook with an airplane mode
- hotkey.
-
- If you choose to compile this driver as a module the module will be
- called asus-wireless.
+ This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
-config ACPI_WMI
- tristate "WMI"
+config MSI_LAPTOP
+ tristate "MSI Laptop Extras"
depends on ACPI
- help
- This driver adds support for the ACPI-WMI (Windows Management
- Instrumentation) mapper device (PNP0C14) found on some systems.
-
- ACPI-WMI is a proprietary extension to ACPI to expose parts of the
- ACPI firmware to userspace - this is done through various vendor
- defined methods and data blocks in a PNP0C14 device, which are then
- made available for userspace to call.
-
- The implementation of this in Linux currently only exposes this to
- other kernel space drivers.
-
- This driver is a required dependency to build the firmware specific
- drivers needed on many machines, including Acer and HP laptops.
-
- It is safe to enable this driver even if your DSDT doesn't define
- any ACPI-WMI devices.
-
-config WMI_BMOF
- tristate "WMI embedded Binary MOF driver"
- depends on ACPI_WMI
- default ACPI_WMI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on RFKILL
+ depends on INPUT && SERIO_I8042
+ select INPUT_SPARSEKMAP
---help---
- Say Y here if you want to be able to read a firmware-embedded
- WMI Binary MOF data. Using this requires userspace tools and may be
- rather tedious.
-
- To compile this driver as a module, choose M here: the module will
- be called wmi-bmof.
+ This is a driver for laptops built by MSI (MICRO-STAR
+ INTERNATIONAL):
-config INTEL_WMI_THUNDERBOLT
- tristate "Intel WMI thunderbolt force power driver"
- depends on ACPI_WMI
- ---help---
- Say Y here if you want to be able to use the WMI interface on select
- systems to force the power control of Intel Thunderbolt controllers.
- This is useful for updating the firmware when devices are not plugged
- into the controller.
+ MSI MegaBook S270 (MS-1013)
+ Cytron/TCM/Medion/Tchibo MD96100/SAM2000
- To compile this driver as a module, choose M here: the module will
- be called intel-wmi-thunderbolt.
+ It adds support for Bluetooth, WLAN and LCD brightness control.
-config XIAOMI_WMI
- tristate "Xiaomi WMI key driver"
- depends on ACPI_WMI
- depends on INPUT
- help
- Say Y here if you want to support WMI-based keys on Xiaomi notebooks.
+ More information about this driver is available at
+ <http://0pointer.de/lennart/tchibo.html>.
- To compile this driver as a module, choose M here: the module will
- be called xiaomi-wmi.
+ If you have an MSI S270 laptop, say Y or M here.
config MSI_WMI
tristate "MSI WMI extras"
@@ -800,24 +903,63 @@ config MSI_WMI
To compile this driver as a module, choose M here: the module will
be called msi-wmi.
-config PEAQ_WMI
- tristate "PEAQ 2-in-1 WMI hotkey driver"
- depends on ACPI_WMI
- depends on INPUT
+config XO15_EBOOK
+ tristate "OLPC XO-1.5 ebook switch"
+ depends on OLPC || COMPILE_TEST
+ depends on ACPI && INPUT
+ ---help---
+ Support for the ebook switch on the OLPC XO-1.5 laptop.
+
+ This switch is triggered as the screen is rotated and folded down to
+ convert the device into ebook form.
+
+config XO1_RFKILL
+ tristate "OLPC XO-1 software RF kill switch"
+ depends on OLPC || COMPILE_TEST
+ depends on RFKILL
+ ---help---
+ Support for enabling/disabling the WLAN interface on the OLPC XO-1
+ laptop.
+
+config PCENGINES_APU2
+ tristate "PC Engines APUv2/3 front button and LEDs driver"
+ depends on INPUT && INPUT_KEYBOARD && GPIOLIB
+ depends on LEDS_CLASS
+ select GPIO_AMD_FCH
+ select KEYBOARD_GPIO_POLLED
+ select LEDS_GPIO
help
- Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
+ This driver provides support for the front button and LEDs on
+ PC Engines APUv2/APUv3 board.
-config TOPSTAR_LAPTOP
- tristate "Topstar Laptop Extras"
- depends on ACPI
- depends on INPUT
- select INPUT_SPARSEKMAP
+ To compile this driver as a module, choose M here: the module
+ will be called pcengines-apuv2.
+
+config SAMSUNG_LAPTOP
+ tristate "Samsung Laptop driver"
+ depends on RFKILL || RFKILL = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on BACKLIGHT_CLASS_DEVICE
select LEDS_CLASS
select NEW_LEDS
---help---
- This driver adds support for hotkeys found on Topstar laptops.
+ This module implements a driver for a wide range of different
+ Samsung laptops. It offers control over the different
+ function keys, wireless LED, LCD backlight level.
- If you have a Topstar laptop, say Y or M here.
+ It may also provide some sysfs files described in
+ <file:Documentation/ABI/testing/sysfs-driver-samsung-laptop>
+
+ To compile this driver as a module, choose M here: the module
+ will be called samsung-laptop.
+
+config SAMSUNG_Q10
+ tristate "Samsung Q10 Extras"
+ depends on ACPI
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver provides support for backlight control on Samsung Q10
+ and related laptops, including Dell Latitude X200.
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
@@ -917,115 +1059,129 @@ config ACPI_CMPC
keys as input device, backlight device, tablet and accelerometer
devices.
-config INTEL_CHT_INT33FE
- tristate "Intel Cherry Trail ACPI INT33FE Driver"
- depends on X86 && ACPI && I2C && REGULATOR
- depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
- depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
- depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
+config COMPAL_LAPTOP
+ tristate "Compal (and others) Laptop Extras"
+ depends on ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on RFKILL
+ depends on HWMON
+ depends on POWER_SUPPLY
---help---
- This driver add support for the INT33FE ACPI device found on
- some Intel Cherry Trail devices.
+ This is a driver for laptops built by Compal, and some models by
+ other brands (e.g. Dell, Toshiba).
- There are two kinds of INT33FE ACPI device possible: for hardware
- with USB Type-C and Micro-B connectors. This driver supports both.
+ It adds support for rfkill, Bluetooth, WLAN, LCD brightness, hwmon
+ and battery charging level control.
- The INT33FE ACPI device has a CRS table with I2cSerialBusV2
- resources for Fuel Gauge Controller and (in the Type-C variant)
- FUSB302 USB Type-C Controller and PI3USB30532 USB switch.
- This driver instantiates i2c-clients for these, so that standard
- i2c drivers for these chips can bind to the them.
+config LG_LAPTOP
+ tristate "LG Laptop Extras"
+ depends on ACPI
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ help
+ This driver adds support for hotkeys as well as control of keyboard
+ backlight, battery maximum charge level and various other ACPI
+ features.
- If you enable this driver it is advised to also select
- CONFIG_BATTERY_BQ27XXX=m or CONFIG_BATTERY_BQ27XXX_I2C=m for Micro-B
- device and CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m
- for Type-C device.
+ If you have an LG Gram laptop, say Y or M here.
+config PANASONIC_LAPTOP
+ tristate "Panasonic Laptop Extras"
+ depends on INPUT && ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select INPUT_SPARSEKMAP
+ ---help---
+ This driver adds support for access to backlight control and hotkeys
+ on Panasonic Let's Note laptops.
-config INTEL_INT0002_VGPIO
- tristate "Intel ACPI INT0002 Virtual GPIO driver"
- depends on GPIOLIB && ACPI
- select GPIOLIB_IRQCHIP
+ If you have a Panasonic Let's note laptop (such as the R1(N variant),
+ R2, R3, R5, T2, W2 and Y2 series), say Y.
+
+config SONY_LAPTOP
+ tristate "Sony Laptop Extras"
+ depends on ACPI
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ depends on RFKILL
---help---
- Some peripherals on Bay Trail and Cherry Trail platforms signal a
- Power Management Event (PME) to the Power Management Controller (PMC)
- to wakeup the system. When this happens software needs to explicitly
- clear the PME bus 0 status bit in the GPE0a_STS register to avoid an
- IRQ storm on IRQ 9.
+ This mini-driver drives the SNC and SPIC devices present in the ACPI
+ BIOS of the Sony Vaio laptops.
- This is modelled in ACPI through the INT0002 ACPI device, which is
- called a "Virtual GPIO controller" in ACPI because it defines the
- event handler to call when the PME triggers through _AEI and _L02
- methods as would be done for a real GPIO interrupt in ACPI.
+ It gives access to some extra laptop functionalities like Bluetooth,
+ screen brightness control, Fn keys and allows powering on/off some
+ devices.
- To compile this driver as a module, choose M here: the module will
- be called intel_int0002_vgpio.
+ Read <file:Documentation/admin-guide/laptops/sony-laptop.rst> for more information.
-config INTEL_HID_EVENT
- tristate "INTEL HID Event"
+config SONYPI_COMPAT
+ bool "Sonypi compatibility"
+ depends on SONY_LAPTOP
+ ---help---
+ Build the sonypi driver compatibility code into the sony-laptop driver.
+
+config SYSTEM76_ACPI
+ tristate "System76 ACPI Driver"
depends on ACPI
- depends on INPUT
- select INPUT_SPARSEKMAP
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
help
- This driver provides support for the Intel HID Event hotkey interface.
- Some laptops require this driver for hotkey support.
+ This is a driver for System76 laptops running open firmware. It adds
+ support for Fn-Fx key combinations, keyboard backlight, and airplane mode
+ LEDs.
- To compile this driver as a module, choose M here: the module will
- be called intel_hid.
+ If you have a System76 laptop running open firmware, say Y or M here.
-config INTEL_VBTN
- tristate "INTEL VIRTUAL BUTTON"
+config TOPSTAR_LAPTOP
+ tristate "Topstar Laptop Extras"
depends on ACPI
depends on INPUT
select INPUT_SPARSEKMAP
- help
- This driver provides support for the Intel Virtual Button interface.
- Some laptops require this driver for power button support.
-
- To compile this driver as a module, choose M here: the module will
- be called intel_vbtn.
-
-config INTEL_SCU_IPC
- bool "Intel SCU IPC Support"
- depends on X86_INTEL_MID
- default y
+ select LEDS_CLASS
+ select NEW_LEDS
---help---
- IPC is used to bridge the communications between kernel and SCU on
- some embedded Intel x86 platforms. This is not needed for PC-type
- machines.
+ This driver adds support for hotkeys found on Topstar laptops.
-config INTEL_SCU_IPC_UTIL
- tristate "Intel SCU IPC utility driver"
- depends on INTEL_SCU_IPC
- ---help---
- The IPC Util driver provides an interface with the SCU enabling
- low level access for debug work and updating the firmware. Say
- N unless you will be doing this on an Intel MID platform.
+ If you have a Topstar laptop, say Y or M here.
-config INTEL_MID_POWER_BUTTON
- tristate "power button driver for Intel MID platforms"
- depends on INTEL_SCU_IPC && INPUT
+config I2C_MULTI_INSTANTIATE
+ tristate "I2C multi instantiate pseudo device driver"
+ depends on I2C && ACPI
help
- This driver handles the power button on the Intel MID platforms.
+ Some ACPI-based systems list multiple i2c-devices in a single ACPI
+ firmware-node. This driver will instantiate separate i2c-clients
+ for each device in the firmware-node.
- If unsure, say N.
+ To compile this driver as a module, choose M here: the module
+ will be called i2c-multi-instantiate.
-config INTEL_MFLD_THERMAL
- tristate "Thermal driver for Intel Medfield platform"
- depends on MFD_INTEL_MSIC && THERMAL
- help
- Say Y here to enable thermal driver support for the Intel Medfield
- platform.
+config MLX_PLATFORM
+ tristate "Mellanox Technologies platform support"
+ depends on I2C && REGMAP
+ ---help---
+ This option enables system support for the Mellanox Technologies
+ platform. The Mellanox systems provide data center networking
+ solutions based on Virtual Protocol Interconnect (VPI) technology
+ enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
+ connection.
-config INTEL_IPS
- tristate "Intel Intelligent Power Sharing"
- depends on ACPI && PCI
+ If you have a Mellanox system, say Y or M here.
+
+config TOUCHSCREEN_DMI
+ bool "DMI based touchscreen configuration info"
+ depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD
+ select EFI_EMBEDDED_FIRMWARE if EFI
---help---
- Intel Calpella platforms support dynamic power sharing between the
- CPU and GPU, maximizing performance in a given TDP. This driver,
- along with the CPU frequency and i915 drivers, provides that
- functionality. If in doubt, say Y here; it will only load on
- supported platforms.
+ Certain ACPI based tablets with e.g. Silead or Chipone touchscreens
+ do not have enough data in ACPI tables for the touchscreen driver to
+ handle the touchscreen properly, as OEMs expect the data to be baked
+ into the tablet model specific version of the driver shipped with the
+ the OS-image for the device. This option supplies the missing info.
+ Enable this for x86 tablets with Silead or Chipone touchscreens.
config INTEL_IMR
bool "Intel Isolated Memory Region support"
@@ -1051,116 +1207,21 @@ config INTEL_IMR
If you are running on a Galileo/Quark say Y here.
-config INTEL_PMC_CORE
- tristate "Intel PMC Core driver"
- depends on PCI
- ---help---
- The Intel Platform Controller Hub for Intel Core SoCs provides access
- to Power Management Controller registers via a PCI interface. This
- driver can utilize debugging capabilities and supported features as
- exposed by the Power Management Controller.
-
- Supported features:
- - SLP_S0_RESIDENCY counter
- - PCH IP Power Gating status
- - LTR Ignore
- - MPHY/PLL gating status (Sunrisepoint PCH only)
-
-config IBM_RTL
- tristate "Device driver to enable PRTL support"
- depends on PCI
- ---help---
- Enable support for IBM Premium Real Time Mode (PRTM).
- This module will allow you the enter and exit PRTM in the BIOS via
- sysfs on platforms that support this feature. System in PRTM will
- not receive CPU-generated SMIs for recoverable errors. Use of this
- feature without proper support may void your hardware warranty.
-
- If the proper BIOS support is found the driver will load and create
- /sys/devices/system/ibm_rtl/. The "state" variable will indicate
- whether or not the BIOS is in PRTM.
- state = 0 (BIOS SMIs on)
- state = 1 (BIOS SMIs off)
-
-config XO1_RFKILL
- tristate "OLPC XO-1 software RF kill switch"
- depends on OLPC || COMPILE_TEST
- depends on RFKILL
- ---help---
- Support for enabling/disabling the WLAN interface on the OLPC XO-1
- laptop.
-
-config XO15_EBOOK
- tristate "OLPC XO-1.5 ebook switch"
- depends on OLPC || COMPILE_TEST
- depends on ACPI && INPUT
- ---help---
- Support for the ebook switch on the OLPC XO-1.5 laptop.
-
- This switch is triggered as the screen is rotated and folded down to
- convert the device into ebook form.
-
-config SAMSUNG_LAPTOP
- tristate "Samsung Laptop driver"
- depends on RFKILL || RFKILL = n
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on BACKLIGHT_CLASS_DEVICE
- select LEDS_CLASS
- select NEW_LEDS
- ---help---
- This module implements a driver for a wide range of different
- Samsung laptops. It offers control over the different
- function keys, wireless LED, LCD backlight level.
-
- It may also provide some sysfs files described in
- <file:Documentation/ABI/testing/sysfs-driver-samsung-laptop>
-
- To compile this driver as a module, choose M here: the module
- will be called samsung-laptop.
-
-config MXM_WMI
- tristate "WMI support for MXM Laptop Graphics"
- depends on ACPI_WMI
- ---help---
- MXM is a standard for laptop graphics cards, the WMI interface
- is required for switchable nvidia graphics machines
-
-config INTEL_OAKTRAIL
- tristate "Intel Oaktrail Platform Extras"
- depends on ACPI
- depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
- ---help---
- Intel Oaktrail platform need this driver to provide interfaces to
- enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
- here; it will only load on supported platforms.
-
-config SAMSUNG_Q10
- tristate "Samsung Q10 Extras"
- depends on ACPI
- select BACKLIGHT_CLASS_DEVICE
- ---help---
- This driver provides support for backlight control on Samsung Q10
- and related laptops, including Dell Latitude X200.
-
-config APPLE_GMUX
- tristate "Apple Gmux Driver"
+config INTEL_IPS
+ tristate "Intel Intelligent Power Sharing"
depends on ACPI && PCI
- depends on PNP
- depends on BACKLIGHT_CLASS_DEVICE
- depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
- depends on ACPI_VIDEO=n || ACPI_VIDEO
---help---
- This driver provides support for the gmux device found on many
- Apple laptops, which controls the display mux for the hybrid
- graphics as well as the backlight. Currently only backlight
- control is supported by the driver.
+ Intel Calpella platforms support dynamic power sharing between the
+ CPU and GPU, maximizing performance in a given TDP. This driver,
+ along with the CPU frequency and i915 drivers, provides that
+ functionality. If in doubt, say Y here; it will only load on
+ supported platforms.
config INTEL_RST
tristate "Intel Rapid Start Technology Driver"
depends on ACPI
---help---
- This driver provides support for modifying paramaters on systems
+ This driver provides support for modifying parameters on systems
equipped with Intel's Rapid Start Technology. When put in an ACPI
sleep state, these devices will wake after either a configured
timeout or when the system battery reaches a critical state,
@@ -1182,62 +1243,7 @@ config INTEL_SMARTCONNECT
This driver checks to determine whether the device has Intel Smart
Connect enabled, and if so disables it.
-config INTEL_PMC_IPC
- tristate "Intel PMC IPC Driver"
- depends on ACPI && PCI
- ---help---
- This driver provides support for PMC control on some Intel platforms.
- The PMC is an ARC processor which defines IPC commands for communication
- with other entities in the CPU.
-
-config INTEL_BXTWC_PMIC_TMU
- tristate "Intel BXT Whiskey Cove TMU Driver"
- depends on REGMAP
- depends on INTEL_SOC_PMIC_BXTWC && INTEL_PMC_IPC
- ---help---
- Select this driver to use Intel BXT Whiskey Cove PMIC TMU feature.
- This driver enables the alarm wakeup functionality in the TMU unit
- of Whiskey Cove PMIC.
-
-config SURFACE_PRO3_BUTTON
- tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
- depends on ACPI && INPUT
- ---help---
- This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
-
-config SURFACE_3_BUTTON
- tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
- depends on ACPI && KEYBOARD_GPIO && I2C
- ---help---
- This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
-
-config INTEL_PUNIT_IPC
- tristate "Intel P-Unit IPC Driver"
- ---help---
- This driver provides support for Intel P-Unit Mailbox IPC mechanism,
- which is used to bridge the communications between kernel and P-Unit.
-
-config INTEL_TELEMETRY
- tristate "Intel SoC Telemetry Driver"
- depends on INTEL_PMC_IPC && INTEL_PUNIT_IPC && X86_64
- ---help---
- This driver provides interfaces to configure and use
- telemetry for INTEL SoC from APL onwards. It is also
- used to get various SoC events and parameters
- directly via debugfs files. Various tools may use
- this interface for SoC state monitoring.
-
-config MLX_PLATFORM
- tristate "Mellanox Technologies platform support"
- depends on I2C && REGMAP
- ---help---
- This option enables system support for the Mellanox Technologies
- platform. The Mellanox systems provide data center networking
- solutions based on Virtual Protocol Interconnect (VPI) technology
- enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
- connection.
-
- If you have a Mellanox system, say Y or M here.
+source "drivers/platform/x86/intel_speed_select_if/Kconfig"
config INTEL_TURBO_MAX_3
bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
@@ -1249,16 +1255,25 @@ config INTEL_TURBO_MAX_3
This driver is only required when the system is not using Hardware
P-States (HWP). In HWP mode, priority can be read from ACPI tables.
-config TOUCHSCREEN_DMI
- bool "DMI based touchscreen configuration info"
- depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ help
+ This driver allows control of uncore frequency limits on
+ supported server platforms.
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
+
+config INTEL_BXTWC_PMIC_TMU
+ tristate "Intel BXT Whiskey Cove TMU Driver"
+ depends on REGMAP
+ depends on INTEL_SOC_PMIC_BXTWC && INTEL_PMC_IPC
---help---
- Certain ACPI based tablets with e.g. Silead or Chipone touchscreens
- do not have enough data in ACPI tables for the touchscreen driver to
- handle the touchscreen properly, as OEMs expect the data to be baked
- into the tablet model specific version of the driver shipped with the
- the OS-image for the device. This option supplies the missing info.
- Enable this for x86 tablets with Silead or Chipone touchscreens.
+ Select this driver to use Intel BXT Whiskey Cove PMIC TMU feature.
+ This driver enables the alarm wakeup functionality in the TMU unit
+ of Whiskey Cove PMIC.
config INTEL_CHTDC_TI_PWRBTN
tristate "Intel Cherry Trail Dollar Cove TI power button driver"
@@ -1271,6 +1286,21 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn.
+config INTEL_MFLD_THERMAL
+ tristate "Thermal driver for Intel Medfield platform"
+ depends on MFD_INTEL_MSIC && THERMAL
+ help
+ Say Y here to enable thermal driver support for the Intel Medfield
+ platform.
+
+config INTEL_MID_POWER_BUTTON
+ tristate "power button driver for Intel MID platforms"
+ depends on INTEL_SCU_IPC && INPUT
+ help
+ This driver handles the power button on the Intel MID platforms.
+
+ If unsure, say N.
+
config INTEL_MRFLD_PWRBTN
tristate "Intel Merrifield Basin Cove power button driver"
depends on INTEL_SOC_PMIC_MRFLD
@@ -1282,85 +1312,61 @@ config INTEL_MRFLD_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_mrfld_pwrbtn.
-config I2C_MULTI_INSTANTIATE
- tristate "I2C multi instantiate pseudo device driver"
- depends on I2C && ACPI
- help
- Some ACPI-based systems list multiple i2c-devices in a single ACPI
- firmware-node. This driver will instantiate separate i2c-clients
- for each device in the firmware-node.
-
- To compile this driver as a module, choose M here: the module
- will be called i2c-multi-instantiate.
-
-config INTEL_ATOMISP2_PM
- tristate "Intel AtomISP2 dummy / power-management driver"
- depends on PCI && IOSF_MBI && PM
- help
- Power-management driver for Intel's Image Signal Processor found on
- Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
- is to turn the ISP off (put it in D3) to save power and to allow
- entering of S0ix modes.
-
- To compile this driver as a module, choose M here: the module
- will be called intel_atomisp2_pm.
-
-config HUAWEI_WMI
- tristate "Huawei WMI laptop extras driver"
- depends on ACPI_BATTERY
- depends on ACPI_WMI
- depends on INPUT
- select INPUT_SPARSEKMAP
- select LEDS_CLASS
- select LEDS_TRIGGERS
- select LEDS_TRIGGER_AUDIO
- select NEW_LEDS
- help
- This driver provides support for Huawei WMI hotkeys, battery charge
- control, fn-lock, mic-mute LED, and other extra features.
-
- To compile this driver as a module, choose M here: the module
- will be called huawei-wmi.
-
-config PCENGINES_APU2
- tristate "PC Engines APUv2/3 front button and LEDs driver"
- depends on INPUT && INPUT_KEYBOARD && GPIOLIB
- depends on LEDS_CLASS
- select GPIO_AMD_FCH
- select KEYBOARD_GPIO_POLLED
- select LEDS_GPIO
- help
- This driver provides support for the front button and LEDs on
- PC Engines APUv2/APUv3 board.
+config INTEL_PMC_CORE
+ tristate "Intel PMC Core driver"
+ depends on PCI
+ ---help---
+ The Intel Platform Controller Hub for Intel Core SoCs provides access
+ to Power Management Controller registers via a PCI interface. This
+ driver can utilize debugging capabilities and supported features as
+ exposed by the Power Management Controller.
- To compile this driver as a module, choose M here: the module
- will be called pcengines-apuv2.
+ Supported features:
+ - SLP_S0_RESIDENCY counter
+ - PCH IP Power Gating status
+ - LTR Ignore
+ - MPHY/PLL gating status (Sunrisepoint PCH only)
-config INTEL_UNCORE_FREQ_CONTROL
- tristate "Intel Uncore frequency control driver"
- depends on X86_64
- help
- This driver allows control of uncore frequency limits on
- supported server platforms.
- Uncore frequency controls RING/LLC (last-level cache) clocks.
+config INTEL_PMC_IPC
+ tristate "Intel PMC IPC Driver"
+ depends on ACPI && PCI
+ ---help---
+ This driver provides support for PMC control on some Intel platforms.
+ The PMC is an ARC processor which defines IPC commands for communication
+ with other entities in the CPU.
- To compile this driver as a module, choose M here: the module
- will be called intel-uncore-frequency.
+config INTEL_PUNIT_IPC
+ tristate "Intel P-Unit IPC Driver"
+ ---help---
+ This driver provides support for Intel P-Unit Mailbox IPC mechanism,
+ which is used to bridge the communications between kernel and P-Unit.
-source "drivers/platform/x86/intel_speed_select_if/Kconfig"
+config INTEL_SCU_IPC
+ bool "Intel SCU IPC Support"
+ depends on X86_INTEL_MID
+ default y
+ ---help---
+ IPC is used to bridge the communications between kernel and SCU on
+ some embedded Intel x86 platforms. This is not needed for PC-type
+ machines.
-config SYSTEM76_ACPI
- tristate "System76 ACPI Driver"
- depends on ACPI
- select NEW_LEDS
- select LEDS_CLASS
- select LEDS_TRIGGERS
- help
- This is a driver for System76 laptops running open firmware. It adds
- support for Fn-Fx key combinations, keyboard backlight, and airplane mode
- LEDs.
+config INTEL_SCU_IPC_UTIL
+ tristate "Intel SCU IPC utility driver"
+ depends on INTEL_SCU_IPC
+ ---help---
+ The IPC Util driver provides an interface with the SCU enabling
+ low level access for debug work and updating the firmware. Say
+ N unless you will be doing this on an Intel MID platform.
- If you have a System76 laptop running open firmware, say Y or M here.
+config INTEL_TELEMETRY
+ tristate "Intel SoC Telemetry Driver"
+ depends on INTEL_PMC_IPC && INTEL_PUNIT_IPC && X86_64
+ ---help---
+ This driver provides interfaces to configure and use
+ telemetry for INTEL SoC from APL onwards. It is also
+ used to get various SoC events and parameters
+ directly via debugfs files. Various tools may use
+ this interface for SoC state monitoring.
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 3747b1f07cf1..53408d965874 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -3,106 +3,146 @@
# Makefile for linux/drivers/platform/x86
# x86 Platform-Specific Drivers
#
+
+# Windows Management Interface
+obj-$(CONFIG_ACPI_WMI) += wmi.o
+obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
+
+# WMI drivers
+obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
+obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
+obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
+obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
+obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
+obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
+
+# Acer
+obj-$(CONFIG_ACERHDF) += acerhdf.o
+obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o
+obj-$(CONFIG_ACER_WMI) += acer-wmi.o
+
+# Apple
+obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
+
+# ASUS
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
+obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o
obj-$(CONFIG_ASUS_WMI) += asus-wmi.o
obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
-obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
-obj-$(CONFIG_LG_LAPTOP) += lg-laptop.o
-obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
-obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
-obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
-obj-$(CONFIG_DCDBAS) += dcdbas.o
-obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
-dell-smbios-objs := dell-smbios-base.o
+
+# Dell
+obj-$(CONFIG_DCDBAS) += dcdbas.o
+obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
+dell-smbios-objs := dell-smbios-base.o
dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
-obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
-obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
+obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
+obj-$(CONFIG_DELL_RBU) += dell_rbu.o
+obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
-obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
-obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
-obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
-obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
-obj-$(CONFIG_DELL_RBU) += dell_rbu.o
-obj-$(CONFIG_ACER_WMI) += acer-wmi.o
-obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o
-obj-$(CONFIG_ACERHDF) += acerhdf.o
+obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
+obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
+
+# Fujitsu
+obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
+obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
+obj-$(CONFIG_FUJITSU_TABLET) += fujitsu-tablet.o
+
+# GPD
+obj-$(CONFIG_GPD_POCKET_FAN) += gpd-pocket-fan.o
+
+# Hewlett Packard
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
obj-$(CONFIG_HP_WIRELESS) += hp-wireless.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
-obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
-obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
-obj-$(CONFIG_GPD_POCKET_FAN) += gpd-pocket-fan.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
-obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
+
+# IBM Thinkpad and Lenovo
+obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
-obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
-obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
-obj-$(CONFIG_FUJITSU_TABLET) += fujitsu-tablet.o
-obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
-obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
-obj-$(CONFIG_ACPI_WMI) += wmi.o
+obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
+
+# Intel
+obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
+obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
+intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
+ intel_cht_int33fe_typec.o \
+ intel_cht_int33fe_microb.o
+obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
+obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
+obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
+obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
+
+# Microsoft
+obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
+obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o
+obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
+obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
+
+# MSI
+obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_MSI_WMI) += msi-wmi.o
-obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
-obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
-obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
-obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
-obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
-obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
-# toshiba_acpi must link after wmi to ensure that wmi devices are found
-# before toshiba_acpi initializes
-obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+# OLPC
+obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
+obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
+# PC Engines
+obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
+
+# Samsung
+obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
+obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
+
+# Toshiba
obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o
obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
-obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
-intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
- intel_cht_int33fe_typec.o \
- intel_cht_int33fe_microb.o
-
-obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
-obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
-obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
-obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
-obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
-obj-$(CONFIG_INTEL_IPS) += intel_ips.o
-obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
-obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
-obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
-obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
-obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
-obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
-obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
-obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
-obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
-obj-$(CONFIG_INTEL_RST) += intel-rst.o
-obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
-obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
-obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
-obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
-obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
-obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o
-obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+# toshiba_acpi must link after wmi to ensure that wmi devices are found
+# before toshiba_acpi initializes
+obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+
+# Laptop drivers
+obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
+obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
+obj-$(CONFIG_LG_LAPTOP) += lg-laptop.o
+obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
+obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
+obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
+obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
+
+# Platform drivers
+obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
+obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
+obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
+
+# Intel uncore drivers
+obj-$(CONFIG_INTEL_IPS) += intel_ips.o
+obj-$(CONFIG_INTEL_RST) += intel-rst.o
+obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
+obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
+
+# Intel PMIC / PMC / P-Unit devices
obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
-obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
- intel_telemetry_pltdrv.o \
- intel_telemetry_debugfs.o
-obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv.o
-obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
-obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
-obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
+obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
-obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
-obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
-obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
-obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
-obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
-obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv.o
+obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
+obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
+ intel_telemetry_pltdrv.o \
+ intel_telemetry_debugfs.o
+obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 6f12747a359a..c4404d9c1de4 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -515,9 +515,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
.detect_quirks = asus_nb_wmi_quirks,
};
+static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = {
+ {
+ /*
+ * asus-nb-wm adds no functionality. The T100TA has a detachable
+ * USB kbd, so no hotkeys and it has no WMI rfkill; and loading
+ * asus-nb-wm causes the camera LED to turn and _stay_ on.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ },
+ {
+ /* The Asus T200TA has the same issue as the T100TA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ },
+ {} /* Terminating entry */
+};
static int __init asus_nb_wmi_init(void)
{
+ if (dmi_check_system(asus_nb_wmi_blacklist))
+ return -ENODEV;
+
return asus_wmi_register_driver(&asus_nb_wmi_driver);
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 612ef5526226..bb7c529d7d16 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -426,8 +426,11 @@ static int asus_wmi_battery_add(struct power_supply *battery)
{
/* The WMI method does not provide a way to specific a battery, so we
* just assume it is the first battery.
+ * Note: On some newer ASUS laptops (Zenbook UM431DA), the primary/first
+ * battery is named BATT.
*/
- if (strcmp(battery->desc->name, "BAT0") != 0)
+ if (strcmp(battery->desc->name, "BAT0") != 0 &&
+ strcmp(battery->desc->name, "BATT") != 0)
return -ENODEV;
if (device_create_file(&battery->dev,
@@ -1719,7 +1722,7 @@ static ssize_t fan_boost_mode_store(struct device *dev,
asus->fan_boost_mode = new_mode;
fan_boost_mode_write(asus);
- return result;
+ return count;
}
// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 74e988f839e8..f8d3e3bd1bb5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
@@ -2295,6 +2295,6 @@ module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index a6b856cd86bd..a89fad47ff13 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Dell Airplane Mode Switch driver
- Copyright (C) 2014-2015 Pali Rohár <pali.rohar@gmail.com>
+ Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
*/
@@ -495,5 +495,5 @@ MODULE_PARM_DESC(auto_remove_rfkill, "Automatically remove rfkill devices when "
"(default true)");
MODULE_DEVICE_TABLE(acpi, rbtn_ids);
MODULE_DESCRIPTION("Dell Airplane Mode Switch driver");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-rbtn.h b/drivers/platform/x86/dell-rbtn.h
index 0fdc81644458..5e030f926c58 100644
--- a/drivers/platform/x86/dell-rbtn.h
+++ b/drivers/platform/x86/dell-rbtn.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Dell Airplane Mode Switch driver
- Copyright (C) 2014-2015 Pali Rohár <pali.rohar@gmail.com>
+ Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
*/
diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
index fe59b0ebff31..2e2cd565926a 100644
--- a/drivers/platform/x86/dell-smbios-base.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
@@ -645,7 +645,7 @@ module_exit(dell_smbios_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
index d6854d1c4119..97c52a839a3e 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
* Copyright (c) 2017 Dell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index a7ff9803f41a..75fa8ea0476d 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index bfcc1d1b9b96..5d9304a7de1b 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -3,7 +3,7 @@
* dell-smo8800.c - Dell Latitude ACPI SMO88XX freefall sensor driver
*
* Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
- * Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2014 Pali Rohár <pali@kernel.org>
*
* This is loosely based on lis3lv02d driver.
*/
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
+#include <linux/fs.h>
struct smo8800_device {
u32 irq; /* acpi device irq */
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 6669db2555fb..86e8dd6a8b33 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -3,7 +3,7 @@
* Dell WMI hotkeys
*
* Copyright (C) 2008 Red Hat <mjg@redhat.com>
- * Copyright (C) 2014-2015 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
@@ -29,7 +29,7 @@
#include "dell-wmi-descriptor.h"
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell_rbu.c b/drivers/platform/x86/dell_rbu.c
index 7d5453326b43..03c3ff34bcf5 100644
--- a/drivers/platform/x86/dell_rbu.c
+++ b/drivers/platform/x86/dell_rbu.c
@@ -26,6 +26,9 @@
*
* See Documentation/admin-guide/dell_rbu.rst for more info.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -61,13 +64,11 @@ static struct _rbu_data {
static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
module_param_string(image_type, image_type, sizeof (image_type), 0);
-MODULE_PARM_DESC(image_type,
- "BIOS image type. choose- mono or packet or init");
+MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet or init");
static unsigned long allocation_floor = 0x100000;
module_param(allocation_floor, ulong, 0644);
-MODULE_PARM_DESC(allocation_floor,
- "Minimum address for allocations when using Packet mode");
+MODULE_PARM_DESC(allocation_floor, "Minimum address for allocations when using Packet mode");
struct packet_data {
struct list_head list;
@@ -100,10 +101,10 @@ static int create_packet(void *data, size_t length)
void *packet_data_temp_buf = NULL;
unsigned int idx = 0;
- pr_debug("create_packet: entry \n");
+ pr_debug("entry\n");
if (!rbu_data.packetsize) {
- pr_debug("create_packet: packetsize not specified\n");
+ pr_debug("packetsize not specified\n");
retval = -EINVAL;
goto out_noalloc;
}
@@ -113,9 +114,7 @@ static int create_packet(void *data, size_t length)
newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL);
if (!newpacket) {
- printk(KERN_WARNING
- "dell_rbu:%s: failed to allocate new "
- "packet\n", __func__);
+ pr_warn("failed to allocate new packet\n");
retval = -ENOMEM;
spin_lock(&rbu_data.lock);
goto out_noalloc;
@@ -134,17 +133,12 @@ static int create_packet(void *data, size_t length)
* due to BIOS errata. This shouldn't be used for higher floors
* or you will run out of mem trying to allocate the array.
*/
- packet_array_size = max(
- (unsigned int)(allocation_floor / rbu_data.packetsize),
- (unsigned int)1);
+ packet_array_size = max_t(unsigned int, allocation_floor / rbu_data.packetsize, 1);
invalid_addr_packet_array = kcalloc(packet_array_size, sizeof(void *),
GFP_KERNEL);
if (!invalid_addr_packet_array) {
- printk(KERN_WARNING
- "dell_rbu:%s: failed to allocate "
- "invalid_addr_packet_array \n",
- __func__);
+ pr_warn("failed to allocate invalid_addr_packet_array\n");
retval = -ENOMEM;
spin_lock(&rbu_data.lock);
goto out_alloc_packet;
@@ -154,9 +148,7 @@ static int create_packet(void *data, size_t length)
packet_data_temp_buf = (unsigned char *)
__get_free_pages(GFP_KERNEL, ordernum);
if (!packet_data_temp_buf) {
- printk(KERN_WARNING
- "dell_rbu:%s: failed to allocate new "
- "packet\n", __func__);
+ pr_warn("failed to allocate new packet\n");
retval = -ENOMEM;
spin_lock(&rbu_data.lock);
goto out_alloc_packet_array;
@@ -164,7 +156,7 @@ static int create_packet(void *data, size_t length)
if ((unsigned long)virt_to_phys(packet_data_temp_buf)
< allocation_floor) {
- pr_debug("packet 0x%lx below floor at 0x%lx.\n",
+ pr_debug("packet 0x%lx below floor at 0x%lx\n",
(unsigned long)virt_to_phys(
packet_data_temp_buf),
allocation_floor);
@@ -181,7 +173,7 @@ static int create_packet(void *data, size_t length)
newpacket->data = packet_data_temp_buf;
- pr_debug("create_packet: newpacket at physical addr %lx\n",
+ pr_debug("newpacket at physical addr %lx\n",
(unsigned long)virt_to_phys(newpacket->data));
/* packets may not have fixed size */
@@ -195,16 +187,14 @@ static int create_packet(void *data, size_t length)
memcpy(newpacket->data, data, length);
- pr_debug("create_packet: exit \n");
+ pr_debug("exit\n");
out_alloc_packet_array:
/* always free packet array */
- for (;idx>0;idx--) {
- pr_debug("freeing unused packet below floor 0x%lx.\n",
- (unsigned long)virt_to_phys(
- invalid_addr_packet_array[idx-1]));
- free_pages((unsigned long)invalid_addr_packet_array[idx-1],
- ordernum);
+ while (idx--) {
+ pr_debug("freeing unused packet below floor 0x%lx\n",
+ (unsigned long)virt_to_phys(invalid_addr_packet_array[idx]));
+ free_pages((unsigned long)invalid_addr_packet_array[idx], ordernum);
}
kfree(invalid_addr_packet_array);
@@ -224,10 +214,9 @@ static int packetize_data(const u8 *data, size_t length)
int packet_length;
u8 *temp;
u8 *end = (u8 *) data + length;
- pr_debug("packetize_data: data length %zd\n", length);
+ pr_debug("data length %zd\n", length);
if (!rbu_data.packetsize) {
- printk(KERN_WARNING
- "dell_rbu: packetsize not specified\n");
+ pr_warn("packetsize not specified\n");
return -EIO;
}
@@ -255,15 +244,13 @@ static int packetize_data(const u8 *data, size_t length)
return rc;
}
-static int do_packet_read(char *data, struct list_head *ptemp_list,
+static int do_packet_read(char *data, struct packet_data *newpacket,
int length, int bytes_read, int *list_read_count)
{
void *ptemp_buf;
- struct packet_data *newpacket = NULL;
int bytes_copied = 0;
int j = 0;
- newpacket = list_entry(ptemp_list, struct packet_data, list);
*list_read_count += newpacket->length;
if (*list_read_count > bytes_read) {
@@ -291,7 +278,7 @@ static int do_packet_read(char *data, struct list_head *ptemp_list,
static int packet_read_list(char *data, size_t * pread_length)
{
- struct list_head *ptemp_list;
+ struct packet_data *newpacket;
int temp_count = 0;
int bytes_copied = 0;
int bytes_read = 0;
@@ -305,9 +292,8 @@ static int packet_read_list(char *data, size_t * pread_length)
remaining_bytes = *pread_length;
bytes_read = rbu_data.packet_read_count;
- ptemp_list = (&packet_data_head.list)->next;
- while (!list_empty(ptemp_list)) {
- bytes_copied = do_packet_read(pdest, ptemp_list,
+ list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) {
+ bytes_copied = do_packet_read(pdest, newpacket,
remaining_bytes, bytes_read, &temp_count);
remaining_bytes -= bytes_copied;
bytes_read += bytes_copied;
@@ -318,8 +304,6 @@ static int packet_read_list(char *data, size_t * pread_length)
*/
if (remaining_bytes == 0)
break;
-
- ptemp_list = ptemp_list->next;
}
/*finally set the bytes read */
*pread_length = bytes_read - rbu_data.packet_read_count;
@@ -329,17 +313,11 @@ static int packet_read_list(char *data, size_t * pread_length)
static void packet_empty_list(void)
{
- struct list_head *ptemp_list;
- struct list_head *pnext_list;
- struct packet_data *newpacket;
+ struct packet_data *newpacket, *tmp;
+
+ list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) {
+ list_del(&newpacket->list);
- ptemp_list = (&packet_data_head.list)->next;
- while (!list_empty(ptemp_list)) {
- newpacket =
- list_entry(ptemp_list, struct packet_data, list);
- pnext_list = ptemp_list->next;
- list_del(ptemp_list);
- ptemp_list = pnext_list;
/*
* zero out the RBU packet memory before freeing
* to make sure there are no stale RBU packets left in memory
@@ -407,8 +385,7 @@ static int img_update_realloc(unsigned long size)
* check for corruption
*/
if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
- printk(KERN_ERR "dell_rbu:%s: corruption "
- "check failed\n", __func__);
+ pr_err("corruption check failed\n");
return -EINVAL;
}
/*
@@ -430,8 +407,7 @@ static int img_update_realloc(unsigned long size)
(unsigned char *)__get_free_pages(GFP_DMA32, ordernum);
spin_lock(&rbu_data.lock);
if (!image_update_buffer) {
- pr_debug("Not enough memory for image update:"
- "size = %ld\n", size);
+ pr_debug("Not enough memory for image update: size = %ld\n", size);
return -ENOMEM;
}
@@ -455,15 +431,14 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
/* check to see if we have something to return */
if (rbu_data.num_packets == 0) {
- pr_debug("read_packet_data: no packets written\n");
+ pr_debug("no packets written\n");
retval = -ENOMEM;
goto read_rbu_data_exit;
}
if (pos > rbu_data.imagesize) {
retval = 0;
- printk(KERN_WARNING "dell_rbu:read_packet_data: "
- "data underrun\n");
+ pr_warn("data underrun\n");
goto read_rbu_data_exit;
}
@@ -489,8 +464,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
/* check to see if we have something to return */
if ((rbu_data.image_update_buffer == NULL) ||
(rbu_data.bios_image_size == 0)) {
- pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
- "bios_image_size %lu\n",
+ pr_debug("image_update_buffer %p, bios_image_size %lu\n",
rbu_data.image_update_buffer,
rbu_data.bios_image_size);
return -ENOMEM;
@@ -500,9 +474,9 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
rbu_data.image_update_buffer, rbu_data.bios_image_size);
}
-static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t pos, size_t count)
+static ssize_t data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
ssize_t ret_count = 0;
@@ -513,11 +487,12 @@ static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
else if (!strcmp(image_type, "packet"))
ret_count = read_packet_data(buffer, pos, count);
else
- pr_debug("read_rbu_data: invalid image type specified\n");
+ pr_debug("invalid image type specified\n");
spin_unlock(&rbu_data.lock);
return ret_count;
}
+static BIN_ATTR_RO(data, 0);
static void callbackfn_rbu(const struct firmware *fw, void *context)
{
@@ -548,15 +523,15 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
*/
packet_empty_list();
} else
- pr_debug("invalid image type specified.\n");
+ pr_debug("invalid image type specified\n");
spin_unlock(&rbu_data.lock);
out:
release_firmware(fw);
}
-static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t pos, size_t count)
+static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
int size = 0;
if (!pos)
@@ -564,9 +539,9 @@ static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t pos, size_t count)
+static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
int rc = count;
int req_firm_rc = 0;
@@ -602,9 +577,7 @@ static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
&rbu_device->dev, GFP_KERNEL, &context,
callbackfn_rbu);
if (req_firm_rc) {
- printk(KERN_ERR
- "dell_rbu:%s request_firmware_nowait"
- " failed %d\n", __func__, rc);
+ pr_err("request_firmware_nowait failed %d\n", rc);
rc = -EIO;
} else
rbu_data.entry_created = 1;
@@ -612,7 +585,7 @@ static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
spin_lock(&rbu_data.lock);
}
} else {
- printk(KERN_WARNING "dell_rbu: image_type is invalid\n");
+ pr_warn("image_type is invalid\n");
spin_unlock(&rbu_data.lock);
return -EINVAL;
}
@@ -624,10 +597,11 @@ static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
return rc;
}
+static BIN_ATTR_RW(image_type, 0);
-static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t pos, size_t count)
+static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
int size = 0;
if (!pos) {
@@ -638,9 +612,9 @@ static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t pos, size_t count)
+static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
unsigned long temp;
spin_lock(&rbu_data.lock);
@@ -652,22 +626,17 @@ static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
spin_unlock(&rbu_data.lock);
return count;
}
+static BIN_ATTR_RW(packet_size, 0);
-static struct bin_attribute rbu_data_attr = {
- .attr = {.name = "data", .mode = 0444},
- .read = read_rbu_data,
-};
-
-static struct bin_attribute rbu_image_type_attr = {
- .attr = {.name = "image_type", .mode = 0644},
- .read = read_rbu_image_type,
- .write = write_rbu_image_type,
+static struct bin_attribute *rbu_bin_attrs[] = {
+ &bin_attr_data,
+ &bin_attr_image_type,
+ &bin_attr_packet_size,
+ NULL
};
-static struct bin_attribute rbu_packet_size_attr = {
- .attr = {.name = "packet_size", .mode = 0644},
- .read = read_rbu_packet_size,
- .write = write_rbu_packet_size,
+static const struct attribute_group rbu_group = {
+ .bin_attrs = rbu_bin_attrs,
};
static int __init dcdrbu_init(void)
@@ -678,30 +647,17 @@ static int __init dcdrbu_init(void)
init_packet_head();
rbu_device = platform_device_register_simple("dell_rbu", -1, NULL, 0);
if (IS_ERR(rbu_device)) {
- printk(KERN_ERR
- "dell_rbu:%s:platform_device_register_simple "
- "failed\n", __func__);
+ pr_err("platform_device_register_simple failed\n");
return PTR_ERR(rbu_device);
}
- rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
+ rc = sysfs_create_group(&rbu_device->dev.kobj, &rbu_group);
if (rc)
goto out_devreg;
- rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
- if (rc)
- goto out_data;
- rc = sysfs_create_bin_file(&rbu_device->dev.kobj,
- &rbu_packet_size_attr);
- if (rc)
- goto out_imtype;
rbu_data.entry_created = 0;
return 0;
-out_imtype:
- sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
-out_data:
- sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
out_devreg:
platform_device_unregister(rbu_device);
return rc;
@@ -713,6 +669,7 @@ static __exit void dcdrbu_exit(void)
packet_empty_list();
img_update_free();
spin_unlock(&rbu_data.lock);
+ sysfs_remove_group(&rbu_device->dev.kobj, &rbu_group);
platform_device_unregister(rbu_device);
}
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
index b471b86c28fe..5b516e4c2bfb 100644
--- a/drivers/platform/x86/gpd-pocket-fan.c
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -128,7 +128,7 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
- dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
+ dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 20000 and 90000)\n",
temp_limits[i]);
temp_limits[0] = TEMP_LIMIT0_DEFAULT;
temp_limits[1] = TEMP_LIMIT1_DEFAULT;
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index ffb8d5d1eb5f..6acc8457866e 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -28,7 +28,7 @@ struct i2c_inst_data {
struct i2c_multi_inst_data {
int num_clients;
- struct i2c_client *clients[0];
+ struct i2c_client *clients[];
};
static int i2c_multi_inst_count(struct acpi_resource *ares, void *data)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 43d590250228..cc7dd4d87cce 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -19,10 +19,11 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
- {"INT1051", 0},
{"INT33D5", 0},
+ {"INTC1051", 0},
{"", 0},
};
+MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
/* In theory, these are HID usages. */
static const struct key_entry intel_hid_keymap[] = {
@@ -541,7 +542,6 @@ static struct platform_driver intel_hid_pl_driver = {
.probe = intel_hid_probe,
.remove = intel_hid_remove,
};
-MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
/*
* Unfortunately, some laptops provide a _HID="INT33D5" device with
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c
index 2b1a0734c3f8..12d5ab7e1f5d 100644
--- a/drivers/platform/x86/intel-uncore-frequency.c
+++ b/drivers/platform/x86/intel-uncore-frequency.c
@@ -38,6 +38,7 @@
*/
struct uncore_data {
struct kobject kobj;
+ struct completion kobj_unregister;
u64 stored_uncore_data;
u32 initial_min_freq_khz;
u32 initial_max_freq_khz;
@@ -52,7 +53,7 @@ static int uncore_max_entries __read_mostly;
/* Storage for uncore data for all instances */
static struct uncore_data *uncore_instances;
/* Root of the all uncore sysfs kobjs */
-struct kobject uncore_root_kobj;
+static struct kobject *uncore_root_kobj;
/* Stores the CPU mask of the target CPUs to use during uncore read/write */
static cpumask_t uncore_cpu_mask;
/* CPU online callback register instance */
@@ -97,6 +98,9 @@ static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
u64 cap;
int ret;
+ if (data->control_cpu < 0)
+ return -ENXIO;
+
ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
if (ret)
return ret;
@@ -116,6 +120,11 @@ static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
mutex_lock(&uncore_lock);
+ if (data->control_cpu < 0) {
+ ret = -ENXIO;
+ goto finish_write;
+ }
+
input /= UNCORE_FREQ_KHZ_MULTIPLIER;
if (!input || input > 0x7F) {
ret = -EINVAL;
@@ -217,15 +226,19 @@ static struct attribute *uncore_attrs[] = {
NULL
};
+static void uncore_sysfs_entry_release(struct kobject *kobj)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+
+ complete(&data->kobj_unregister);
+}
+
static struct kobj_type uncore_ktype = {
+ .release = uncore_sysfs_entry_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_attrs = uncore_attrs,
};
-static struct kobj_type uncore_root_ktype = {
- .sysfs_ops = &kobj_sysfs_ops,
-};
-
/* Caller provides protection */
static struct uncore_data *uncore_get_instance(unsigned int cpu)
{
@@ -263,8 +276,10 @@ static void uncore_add_die_entry(int cpu)
uncore_read_ratio(data, &data->initial_min_freq_khz,
&data->initial_max_freq_khz);
+ init_completion(&data->kobj_unregister);
+
ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
- &uncore_root_kobj, str);
+ uncore_root_kobj, str);
if (!ret) {
data->control_cpu = cpu;
data->valid = true;
@@ -273,18 +288,15 @@ static void uncore_add_die_entry(int cpu)
mutex_unlock(&uncore_lock);
}
-/* Last CPU in this die is offline, so remove sysfs entries */
+/* Last CPU in this die is offline, make control cpu invalid */
static void uncore_remove_die_entry(int cpu)
{
struct uncore_data *data;
mutex_lock(&uncore_lock);
data = uncore_get_instance(cpu);
- if (data) {
- kobject_put(&data->kobj);
+ if (data)
data->control_cpu = -1;
- data->valid = false;
- }
mutex_unlock(&uncore_lock);
}
@@ -358,15 +370,13 @@ static struct notifier_block uncore_pm_nb = {
.notifier_call = uncore_pm_notify,
};
-#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
-
static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
- ICPU(INTEL_FAM6_BROADWELL_G),
- ICPU(INTEL_FAM6_BROADWELL_X),
- ICPU(INTEL_FAM6_BROADWELL_D),
- ICPU(INTEL_FAM6_SKYLAKE_X),
- ICPU(INTEL_FAM6_ICELAKE_X),
- ICPU(INTEL_FAM6_ICELAKE_D),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
{}
};
@@ -386,11 +396,12 @@ static int __init intel_uncore_init(void)
if (!uncore_instances)
return -ENOMEM;
- ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
- &cpu_subsys.dev_root->kobj,
- "intel_uncore_frequency");
- if (ret)
+ uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency",
+ &cpu_subsys.dev_root->kobj);
+ if (!uncore_root_kobj) {
+ ret = -ENOMEM;
goto err_free;
+ }
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/uncore-freq:online",
@@ -410,7 +421,7 @@ static int __init intel_uncore_init(void)
err_rem_state:
cpuhp_remove_state(uncore_hp_state);
err_rem_kobj:
- kobject_put(&uncore_root_kobj);
+ kobject_put(uncore_root_kobj);
err_free:
kfree(uncore_instances);
@@ -425,10 +436,12 @@ static void __exit intel_uncore_exit(void)
unregister_pm_notifier(&uncore_pm_nb);
cpuhp_remove_state(uncore_hp_state);
for (i = 0; i < uncore_max_entries; ++i) {
- if (uncore_instances[i].valid)
+ if (uncore_instances[i].valid) {
kobject_put(&uncore_instances[i].kobj);
+ wait_for_completion(&uncore_instances[i].kobj_unregister);
+ }
}
- kobject_put(&uncore_root_kobj);
+ kobject_put(uncore_root_kobj);
kfree(uncore_instances);
}
module_exit(intel_uncore_exit)
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index b74932307d69..b5880936d785 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -26,6 +26,7 @@ static const struct acpi_device_id intel_vbtn_ids[] = {
{"INT33D6", 0},
{"", 0},
};
+MODULE_DEVICE_TABLE(acpi, intel_vbtn_ids);
/* In theory, these are HID usages. */
static const struct key_entry intel_vbtn_keymap[] = {
@@ -239,7 +240,6 @@ static struct platform_driver intel_vbtn_pl_driver = {
.probe = intel_vbtn_probe,
.remove = intel_vbtn_remove,
};
-MODULE_DEVICE_TABLE(acpi, intel_vbtn_ids);
static acpi_status __init
check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index f14e2c5f9da5..289c6655d425 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static bool int0002_check_wake(void *data)
+{
+ u32 gpe_sts_reg;
+
+ gpe_sts_reg = inl(GPE0A_STS_PORT);
+ return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
+}
+
static struct irq_chip int0002_byt_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
@@ -148,8 +156,8 @@ static struct irq_chip int0002_cht_irqchip = {
};
static const struct x86_cpu_id int0002_cpu_ids[] = {
- INTEL_CPU_FAM6(ATOM_SILVERMONT, int0002_byt_irqchip), /* Valleyview, Bay Trail */
- INTEL_CPU_FAM6(ATOM_AIRMONT, int0002_cht_irqchip), /* Braswell, Cherry Trail */
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &int0002_byt_irqchip),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &int0002_cht_irqchip),
{}
};
@@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
+ acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
device_init_wakeup(dev, true);
return 0;
}
@@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev)
static int int0002_remove(struct platform_device *pdev)
{
device_init_wakeup(&pdev->dev, false);
+ acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
return 0;
}
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 6f436836fe50..9c9f209c8a33 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -113,8 +113,8 @@ static const struct mid_pb_ddata mrfld_ddata = {
};
static const struct x86_cpu_id mid_pb_cpu_ids[] = {
- INTEL_CPU_FAM6(ATOM_SALTWELL_MID, mfld_ddata),
- INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, mrfld_ddata),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &mfld_ddata),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &mrfld_ddata),
{}
};
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 144faa8bad3d..7c8bdab078cf 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/uaccess.h>
@@ -193,7 +194,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"Fuse", BIT(6)},
/*
* Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
- * Tiger Lake and Elkhart Lake.
+ * Tiger Lake, Elkhart Lake and Jasper Lake.
*/
{"SBR8", BIT(7)},
@@ -240,7 +241,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD6", BIT(4)},
/*
* Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
- * Tiger Lake and ELkhart Lake.
+ * Tiger Lake, ELkhart Lake and Jasper Lake.
*/
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
@@ -254,7 +255,7 @@ static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
};
static const struct pmc_bit_map icl_pfear_map[] = {
- /* Ice Lake generation onwards only */
+ /* Ice Lake and Jasper Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -408,13 +409,157 @@ static const struct pmc_reg_map icl_reg_map = {
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
};
+static const struct pmc_bit_map tgl_lpm0_map[] = {
+ {"USB2PLL_OFF_STS", BIT(18)},
+ {"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)},
+ {"PCIe_Gen3PLL_OFF_STS", BIT(20)},
+ {"OPIOPLL_OFF_STS", BIT(21)},
+ {"OCPLL_OFF_STS", BIT(22)},
+ {"AudioPLL_OFF_STS", BIT(23)},
+ {"MIPIPLL_OFF_STS", BIT(24)},
+ {"Fast_XTAL_Osc_OFF_STS", BIT(25)},
+ {"AC_Ring_Osc_OFF_STS", BIT(26)},
+ {"MC_Ring_Osc_OFF_STS", BIT(27)},
+ {"SATAPLL_OFF_STS", BIT(29)},
+ {"XTAL_USB2PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_lpm1_map[] = {
+ {"SPI_PG_STS", BIT(2)},
+ {"xHCI_PG_STS", BIT(3)},
+ {"PCIe_Ctrller_A_PG_STS", BIT(4)},
+ {"PCIe_Ctrller_B_PG_STS", BIT(5)},
+ {"PCIe_Ctrller_C_PG_STS", BIT(6)},
+ {"GBE_PG_STS", BIT(7)},
+ {"SATA_PG_STS", BIT(8)},
+ {"HDA0_PG_STS", BIT(9)},
+ {"HDA1_PG_STS", BIT(10)},
+ {"HDA2_PG_STS", BIT(11)},
+ {"HDA3_PG_STS", BIT(12)},
+ {"PCIe_Ctrller_D_PG_STS", BIT(13)},
+ {"ISIO_PG_STS", BIT(14)},
+ {"SMB_PG_STS", BIT(16)},
+ {"ISH_PG_STS", BIT(17)},
+ {"ITH_PG_STS", BIT(19)},
+ {"SDX_PG_STS", BIT(20)},
+ {"xDCI_PG_STS", BIT(25)},
+ {"DCI_PG_STS", BIT(26)},
+ {"CSME0_PG_STS", BIT(27)},
+ {"CSME_KVM_PG_STS", BIT(28)},
+ {"CSME1_PG_STS", BIT(29)},
+ {"CSME_CLINK_PG_STS", BIT(30)},
+ {"CSME2_PG_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_lpm2_map[] = {
+ {"ADSP_D3_STS", BIT(0)},
+ {"SATA_D3_STS", BIT(1)},
+ {"xHCI0_D3_STS", BIT(2)},
+ {"xDCI1_D3_STS", BIT(5)},
+ {"SDX_D3_STS", BIT(6)},
+ {"EMMC_D3_STS", BIT(7)},
+ {"IS_D3_STS", BIT(8)},
+ {"THC0_D3_STS", BIT(9)},
+ {"THC1_D3_STS", BIT(10)},
+ {"GBE_D3_STS", BIT(11)},
+ {"GBE_TSN_D3_STS", BIT(12)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_lpm3_map[] = {
+ {"GPIO_COM0_VNN_REQ_STS", BIT(1)},
+ {"GPIO_COM1_VNN_REQ_STS", BIT(2)},
+ {"GPIO_COM2_VNN_REQ_STS", BIT(3)},
+ {"GPIO_COM3_VNN_REQ_STS", BIT(4)},
+ {"GPIO_COM4_VNN_REQ_STS", BIT(5)},
+ {"GPIO_COM5_VNN_REQ_STS", BIT(6)},
+ {"Audio_VNN_REQ_STS", BIT(7)},
+ {"ISH_VNN_REQ_STS", BIT(8)},
+ {"CNVI_VNN_REQ_STS", BIT(9)},
+ {"eSPI_VNN_REQ_STS", BIT(10)},
+ {"Display_VNN_REQ_STS", BIT(11)},
+ {"DTS_VNN_REQ_STS", BIT(12)},
+ {"SMBUS_VNN_REQ_STS", BIT(14)},
+ {"CSME_VNN_REQ_STS", BIT(15)},
+ {"SMLINK0_VNN_REQ_STS", BIT(16)},
+ {"SMLINK1_VNN_REQ_STS", BIT(17)},
+ {"CLINK_VNN_REQ_STS", BIT(20)},
+ {"DCI_VNN_REQ_STS", BIT(21)},
+ {"ITH_VNN_REQ_STS", BIT(22)},
+ {"CSME_VNN_REQ_STS", BIT(24)},
+ {"GBE_VNN_REQ_STS", BIT(25)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_lpm4_map[] = {
+ {"CPU_C10_REQ_STS_0", BIT(0)},
+ {"PCIe_LPM_En_REQ_STS_3", BIT(3)},
+ {"ITH_REQ_STS_5", BIT(5)},
+ {"CNVI_REQ_STS_6", BIT(6)},
+ {"ISH_REQ_STS_7", BIT(7)},
+ {"USB2_SUS_PG_Sys_REQ_STS_10", BIT(10)},
+ {"PCIe_Clk_REQ_STS_12", BIT(12)},
+ {"MPHY_Core_DL_REQ_STS_16", BIT(16)},
+ {"Break-even_En_REQ_STS_17", BIT(17)},
+ {"Auto-demo_En_REQ_STS_18", BIT(18)},
+ {"MPHY_SUS_REQ_STS_22", BIT(22)},
+ {"xDCI_attached_REQ_STS_24", BIT(24)},
+ {}
+};
+
+static const struct pmc_bit_map tgl_lpm5_map[] = {
+ {"LSX_Wake0_En_STS", BIT(0)},
+ {"LSX_Wake0_Pol_STS", BIT(1)},
+ {"LSX_Wake1_En_STS", BIT(2)},
+ {"LSX_Wake1_Pol_STS", BIT(3)},
+ {"LSX_Wake2_En_STS", BIT(4)},
+ {"LSX_Wake2_Pol_STS", BIT(5)},
+ {"LSX_Wake3_En_STS", BIT(6)},
+ {"LSX_Wake3_Pol_STS", BIT(7)},
+ {"LSX_Wake4_En_STS", BIT(8)},
+ {"LSX_Wake4_Pol_STS", BIT(9)},
+ {"LSX_Wake5_En_STS", BIT(10)},
+ {"LSX_Wake5_Pol_STS", BIT(11)},
+ {"LSX_Wake6_En_STS", BIT(12)},
+ {"LSX_Wake6_Pol_STS", BIT(13)},
+ {"LSX_Wake7_En_STS", BIT(14)},
+ {"LSX_Wake7_Pol_STS", BIT(15)},
+ {"Intel_Se_IO_Wake0_En_STS", BIT(16)},
+ {"Intel_Se_IO_Wake0_Pol_STS", BIT(17)},
+ {"Intel_Se_IO_Wake1_En_STS", BIT(18)},
+ {"Intel_Se_IO_Wake1_Pol_STS", BIT(19)},
+ {"Int_Timer_SS_Wake0_En_STS", BIT(20)},
+ {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)},
+ {"Int_Timer_SS_Wake1_En_STS", BIT(22)},
+ {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)},
+ {"Int_Timer_SS_Wake2_En_STS", BIT(24)},
+ {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)},
+ {"Int_Timer_SS_Wake3_En_STS", BIT(26)},
+ {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)},
+ {"Int_Timer_SS_Wake4_En_STS", BIT(28)},
+ {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)},
+ {"Int_Timer_SS_Wake5_En_STS", BIT(30)},
+ {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)},
+ {}
+};
+
+static const struct pmc_bit_map *tgl_lpm_maps[] = {
+ tgl_lpm0_map,
+ tgl_lpm1_map,
+ tgl_lpm2_map,
+ tgl_lpm3_map,
+ tgl_lpm4_map,
+ tgl_lpm5_map,
+ NULL
+};
+
static const struct pmc_reg_map tgl_reg_map = {
.pfear_sts = ext_tgl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
- .slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
.msr_sts = msr_map,
- .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.regmap_length = CNP_PMC_MMIO_REG_LEN,
.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
@@ -422,6 +567,12 @@ static const struct pmc_reg_map tgl_reg_map = {
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+ .lpm_modes = tgl_lpm_modes,
+ .lpm_en_offset = TGL_LPM_EN_OFFSET,
+ .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = tgl_lpm_maps,
+ .lpm_status_offset = TGL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
};
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
@@ -463,7 +614,84 @@ static int pmc_core_check_read_lock_bit(void)
return value & BIT(pmcdev->map->pm_read_disable_bit);
}
-#if IS_ENABLED(CONFIG_DEBUG_FS)
+static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev,
+ struct seq_file *s)
+{
+ const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
+ const struct pmc_bit_map *map;
+ int offset = pmcdev->map->slps0_dbg_offset;
+ u32 data;
+
+ while (*maps) {
+ map = *maps;
+ data = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ while (map->name) {
+ if (dev)
+ dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ if (s)
+ seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ ++map;
+ }
+ ++maps;
+ }
+}
+
+static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
+{
+ int idx;
+
+ for (idx = 0; maps[idx]; idx++)
+ ;/* Nothing */
+
+ return idx;
+}
+
+static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
+ struct seq_file *s, u32 offset,
+ const char *str,
+ const struct pmc_bit_map **maps)
+{
+ int index, idx, len = 32, bit_mask, arr_size;
+ u32 *lpm_regs;
+
+ arr_size = pmc_core_lpm_get_arr_size(maps);
+ lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
+ if (!lpm_regs)
+ return;
+
+ for (index = 0; index < arr_size; index++) {
+ lpm_regs[index] = pmc_core_reg_read(pmcdev, offset);
+ offset += 4;
+ }
+
+ for (idx = 0; idx < arr_size; idx++) {
+ if (dev)
+ dev_dbg(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
+ lpm_regs[idx]);
+ if (s)
+ seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx,
+ lpm_regs[idx]);
+ for (index = 0; maps[idx][index].name && index < len; index++) {
+ bit_mask = maps[idx][index].bit_mask;
+ if (dev)
+ dev_dbg(dev, "%-30s %-30d\n",
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ if (s)
+ seq_printf(s, "%-30s %-30d\n",
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ }
+ }
+
+ kfree(lpm_regs);
+}
+
static bool slps0_dbg_latch;
static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
@@ -698,27 +926,11 @@ out_unlock:
static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
- const struct pmc_bit_map *map;
- int offset;
- u32 data;
pmc_core_slps0_dbg_latch(pmcdev, false);
- offset = pmcdev->map->slps0_dbg_offset;
- while (*maps) {
- map = *maps;
- data = pmc_core_reg_read(pmcdev, offset);
- offset += 4;
- while (map->name) {
- seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
- map->name,
- data & map->bit_mask ?
- "Yes" : "No");
- ++map;
- }
- ++maps;
- }
+ pmc_core_slps0_display(pmcdev, NULL, s);
pmc_core_slps0_dbg_latch(pmcdev, true);
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
@@ -794,6 +1006,51 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const char **lpm_modes = pmcdev->map->lpm_modes;
+ u32 offset = pmcdev->map->lpm_residency_offset;
+ u32 lpm_en;
+ int index;
+
+ lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
+ seq_printf(s, "status substate residency\n");
+ for (index = 0; lpm_modes[index]; index++) {
+ seq_printf(s, "%7s %7s %-15u\n",
+ BIT(index) & lpm_en ? "Enabled" : " ",
+ lpm_modes[index], pmc_core_reg_read(pmcdev, offset));
+ offset += 4;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
+
+static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ u32 offset = pmcdev->map->lpm_status_offset;
+
+ pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
+
+static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ u32 offset = pmcdev->map->lpm_live_status_offset;
+
+ pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
+
static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
@@ -859,30 +1116,37 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_bool("slp_s0_dbg_latch", 0644,
dir, &slps0_dbg_latch);
}
-}
-#else
-static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
-{
-}
-static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
-{
+ if (pmcdev->map->lpm_en_offset) {
+ debugfs_create_file("substate_residencies", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_res_fops);
+ }
+
+ if (pmcdev->map->lpm_status_offset) {
+ debugfs_create_file("substate_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_sts_regs_fops);
+ debugfs_create_file("substate_live_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_l_sts_regs_fops);
+ }
}
-#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map),
- INTEL_CPU_FAM6(SKYLAKE, spt_reg_map),
- INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map),
- INTEL_CPU_FAM6(KABYLAKE, spt_reg_map),
- INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
- INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
- INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
- INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
- INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
- INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
- INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
- INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &spt_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &cnp_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
{}
};
@@ -986,13 +1250,11 @@ static int pmc_core_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
static bool warn_on_s0ix_failures;
module_param(warn_on_s0ix_failures, bool, 0644);
MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
-static int pmc_core_suspend(struct device *dev)
+static __maybe_unused int pmc_core_suspend(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
@@ -1044,13 +1306,11 @@ static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
return false;
}
-static int pmc_core_resume(struct device *dev)
+static __maybe_unused int pmc_core_resume(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
- const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
- int offset = pmcdev->map->slps0_dbg_offset;
- const struct pmc_bit_map *map;
- u32 data;
+ const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+ int offset = pmcdev->map->lpm_status_offset;
if (!pmcdev->check_counters)
return 0;
@@ -1068,23 +1328,14 @@ static int pmc_core_resume(struct device *dev)
/* The real interesting case - S0ix failed - lets ask PMC why. */
dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
pmcdev->s0ix_counter);
- while (*maps) {
- map = *maps;
- data = pmc_core_reg_read(pmcdev, offset);
- offset += 4;
- while (map->name) {
- dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
- map->name,
- data & map->bit_mask ? "Yes" : "No");
- map++;
- }
- maps++;
- }
+ if (pmcdev->map->slps0_dbg_maps)
+ pmc_core_slps0_display(pmcdev, dev, NULL);
+ if (pmcdev->map->lpm_sts)
+ pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps);
+
return 0;
}
-#endif
-
static const struct dev_pm_ops pmc_core_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
};
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index f1a0792b3f91..5eae55d80226 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -188,6 +188,28 @@ enum ppfear_regs {
#define TGL_NUM_IP_IGN_ALLOWED 22
+/*
+ * Tigerlake Power Management Controller register offsets
+ */
+#define TGL_LPM_EN_OFFSET 0x1C78
+#define TGL_LPM_RESIDENCY_OFFSET 0x1C80
+
+/* Tigerlake Low Power Mode debug registers */
+#define TGL_LPM_STATUS_OFFSET 0x1C3C
+#define TGL_LPM_LIVE_STATUS_OFFSET 0x1C5C
+
+const char *tgl_lpm_modes[] = {
+ "S0i2.0",
+ "S0i2.1",
+ "S0i2.2",
+ "S0i3.0",
+ "S0i3.1",
+ "S0i3.2",
+ "S0i3.3",
+ "S0i3.4",
+ NULL
+};
+
struct pmc_bit_map {
const char *name;
u32 bit_mask;
@@ -221,6 +243,7 @@ struct pmc_reg_map {
const struct pmc_bit_map **slps0_dbg_maps;
const struct pmc_bit_map *ltr_show_sts;
const struct pmc_bit_map *msr_sts;
+ const struct pmc_bit_map **lpm_sts;
const u32 slp_s0_offset;
const u32 ltr_ignore_offset;
const int regmap_length;
@@ -231,6 +254,12 @@ struct pmc_reg_map {
const u32 slps0_dbg_offset;
const u32 ltr_ignore_max;
const u32 pm_vric1_offset;
+ /* Low Power Mode registers */
+ const char **lpm_modes;
+ const u32 lpm_en_offset;
+ const u32 lpm_residency_offset;
+ const u32 lpm_status_offset;
+ const u32 lpm_live_status_offset;
};
/**
@@ -253,9 +282,7 @@ struct pmc_dev {
u32 base_addr;
void __iomem *regbase;
const struct pmc_reg_map *map;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
index e1266f5c6359..731281855cc8 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
@@ -38,14 +38,14 @@ static struct platform_device pmc_core_device = {
* other list may grow, but this list should not.
*/
static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
- INTEL_CPU_FAM6(SKYLAKE_L, pmc_core_device),
- INTEL_CPU_FAM6(SKYLAKE, pmc_core_device),
- INTEL_CPU_FAM6(KABYLAKE_L, pmc_core_device),
- INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
- INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
- INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
- INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
- INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c
index 89b042aecef3..1b6eab071068 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c
@@ -160,10 +160,8 @@ static struct notifier_block isst_pm_nb = {
.notifier_call = isst_pm_notify,
};
-#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
-
static const struct x86_cpu_id isst_if_cpu_ids[] = {
- ICPU(INTEL_FAM6_SKYLAKE_X),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
index ad8c7c0df4d9..3584859fcc42 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
@@ -126,7 +126,7 @@ static void isst_if_remove(struct pci_dev *pdev)
struct isst_if_device *punit_dev;
punit_dev = pci_get_drvdata(pdev);
- isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+ isst_if_cdev_unregister(ISST_IF_DEV_MMIO);
mutex_destroy(&punit_dev->mutex);
}
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index 8e3fb55ac1ae..8a53d3b485b3 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -308,11 +308,10 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
- INTEL_CPU_FAM6(ATOM_GOLDMONT, telem_apl_debugfs_conf),
- INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
{}
};
-
MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids);
static int telemetry_debugfs_check_evts(void)
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index c4c742bb23cf..987a24e3344e 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -67,9 +67,6 @@
#define TELEM_CLEAR_VERBOSITY_BITS(x) ((x) &= ~(BIT(27) | BIT(28)))
#define TELEM_SET_VERBOSITY_BITS(x, y) ((x) |= ((y) << 27))
-#define TELEM_CPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data }
-
enum telemetry_action {
TELEM_UPDATE = 0,
TELEM_ADD,
@@ -183,8 +180,8 @@ static struct telemetry_plt_config telem_glk_config = {
};
static const struct x86_cpu_id telemetry_cpu_ids[] = {
- TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
- TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_glk_config),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config),
{}
};
diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c
index 7b9cc841ab65..892140b62898 100644
--- a/drivers/platform/x86/intel_turbo_max_3.c
+++ b/drivers/platform/x86/intel_turbo_max_3.c
@@ -113,11 +113,9 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
return 0;
}
-#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
-
static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
- ICPU(INTEL_FAM6_BROADWELL_X),
- ICPU(INTEL_FAM6_SKYLAKE_X),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
{}
};
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 3e3c66dfec2e..ca684ed760d1 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -385,6 +385,14 @@ static const struct dmi_system_id critclk_systems[] = {
},
{
/* pmc_plt_clk* - are used for ethernet controllers */
+ .ident = "Lex 2I385SW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "2I385SW"),
+ },
+ },
+ {
+ /* pmc_plt_clk* - are used for ethernet controllers */
.ident = "Beckhoff CB3163",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index fb088dd8529e..51309f7ceede 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -827,10 +827,10 @@ static ssize_t sony_nc_handles_show(struct device *dev,
int i;
for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
- len += snprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ",
+ len += scnprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ",
handles->cap[i]);
}
- len += snprintf(buffer + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buffer + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -2187,10 +2187,10 @@ static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
if (!cnt || (th_handle->profiles & cnt))
- idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
+ idx += scnprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
snc_thermal_profiles[cnt]);
}
- idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
+ idx += scnprintf(buffer + idx, PAGE_SIZE - idx, "\n");
return idx;
}
diff --git a/drivers/platform/x86/surface3_power.c b/drivers/platform/x86/surface3_power.c
new file mode 100644
index 000000000000..cc4f9cba6856
--- /dev/null
+++ b/drivers/platform/x86/surface3_power.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Supports for the power IC on the Surface 3 tablet.
+ *
+ * (C) Copyright 2016-2018 Red Hat, Inc
+ * (C) Copyright 2016-2018 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * (C) Copyright 2016 Stephen Just <stephenjust@gmail.com>
+ *
+ * This driver has been reverse-engineered by parsing the DSDT of the Surface 3
+ * and looking at the registers of the chips.
+ *
+ * The DSDT allowed to find out that:
+ * - the driver is required for the ACPI BAT0 device to communicate to the chip
+ * through an operation region.
+ * - the various defines for the operation region functions to communicate with
+ * this driver
+ * - the DSM 3f99e367-6220-4955-8b0f-06ef2ae79412 allows to trigger ACPI
+ * events to BAT0 (the code is all available in the DSDT).
+ *
+ * Further findings regarding the 2 chips declared in the MSHW0011 are:
+ * - there are 2 chips declared:
+ * . 0x22 seems to control the ADP1 line status (and probably the charger)
+ * . 0x55 controls the battery directly
+ * - the battery chip uses a SMBus protocol (using plain SMBus allows non
+ * destructive commands):
+ * . the commands/registers used are in the range 0x00..0x7F
+ * . if bit 8 (0x80) is set in the SMBus command, the returned value is the
+ * same as when it is not set. There is a high chance this bit is the
+ * read/write
+ * . the various registers semantic as been deduced by observing the register
+ * dumps.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/freezer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <asm/unaligned.h>
+
+#define SURFACE_3_POLL_INTERVAL (2 * HZ)
+#define SURFACE_3_STRLEN 10
+
+struct mshw0011_data {
+ struct i2c_client *adp1;
+ struct i2c_client *bat0;
+ unsigned short notify_mask;
+ struct task_struct *poll_task;
+ bool kthread_running;
+
+ bool charging;
+ bool bat_charging;
+ u8 trip_point;
+ s32 full_capacity;
+};
+
+struct mshw0011_handler_data {
+ struct acpi_connection_info info;
+ struct i2c_client *client;
+};
+
+struct bix {
+ u32 revision;
+ u32 power_unit;
+ u32 design_capacity;
+ u32 last_full_charg_capacity;
+ u32 battery_technology;
+ u32 design_voltage;
+ u32 design_capacity_of_warning;
+ u32 design_capacity_of_low;
+ u32 cycle_count;
+ u32 measurement_accuracy;
+ u32 max_sampling_time;
+ u32 min_sampling_time;
+ u32 max_average_interval;
+ u32 min_average_interval;
+ u32 battery_capacity_granularity_1;
+ u32 battery_capacity_granularity_2;
+ char model[SURFACE_3_STRLEN];
+ char serial[SURFACE_3_STRLEN];
+ char type[SURFACE_3_STRLEN];
+ char OEM[SURFACE_3_STRLEN];
+} __packed;
+
+struct bst {
+ u32 battery_state;
+ s32 battery_present_rate;
+ u32 battery_remaining_capacity;
+ u32 battery_present_voltage;
+} __packed;
+
+struct gsb_command {
+ u8 arg0;
+ u8 arg1;
+ u8 arg2;
+} __packed;
+
+struct gsb_buffer {
+ u8 status;
+ u8 len;
+ u8 ret;
+ union {
+ struct gsb_command cmd;
+ struct bst bst;
+ struct bix bix;
+ } __packed;
+} __packed;
+
+#define ACPI_BATTERY_STATE_DISCHARGING BIT(0)
+#define ACPI_BATTERY_STATE_CHARGING BIT(1)
+#define ACPI_BATTERY_STATE_CRITICAL BIT(2)
+
+#define MSHW0011_CMD_DEST_BAT0 0x01
+#define MSHW0011_CMD_DEST_ADP1 0x03
+
+#define MSHW0011_CMD_BAT0_STA 0x01
+#define MSHW0011_CMD_BAT0_BIX 0x02
+#define MSHW0011_CMD_BAT0_BCT 0x03
+#define MSHW0011_CMD_BAT0_BTM 0x04
+#define MSHW0011_CMD_BAT0_BST 0x05
+#define MSHW0011_CMD_BAT0_BTP 0x06
+#define MSHW0011_CMD_ADP1_PSR 0x07
+#define MSHW0011_CMD_BAT0_PSOC 0x09
+#define MSHW0011_CMD_BAT0_PMAX 0x0a
+#define MSHW0011_CMD_BAT0_PSRC 0x0b
+#define MSHW0011_CMD_BAT0_CHGI 0x0c
+#define MSHW0011_CMD_BAT0_ARTG 0x0d
+
+#define MSHW0011_NOTIFY_GET_VERSION 0x00
+#define MSHW0011_NOTIFY_ADP1 0x01
+#define MSHW0011_NOTIFY_BAT0_BST 0x02
+#define MSHW0011_NOTIFY_BAT0_BIX 0x05
+
+#define MSHW0011_ADP1_REG_PSR 0x04
+
+#define MSHW0011_BAT0_REG_CAPACITY 0x0c
+#define MSHW0011_BAT0_REG_FULL_CHG_CAPACITY 0x0e
+#define MSHW0011_BAT0_REG_DESIGN_CAPACITY 0x40
+#define MSHW0011_BAT0_REG_VOLTAGE 0x08
+#define MSHW0011_BAT0_REG_RATE 0x14
+#define MSHW0011_BAT0_REG_OEM 0x45
+#define MSHW0011_BAT0_REG_TYPE 0x4e
+#define MSHW0011_BAT0_REG_SERIAL_NO 0x56
+#define MSHW0011_BAT0_REG_CYCLE_CNT 0x6e
+
+#define MSHW0011_EV_2_5_MASK GENMASK(8, 0)
+
+/* 3f99e367-6220-4955-8b0f-06ef2ae79412 */
+static const guid_t mshw0011_guid =
+ GUID_INIT(0x3F99E367, 0x6220, 0x4955, 0x8B, 0x0F, 0x06, 0xEF,
+ 0x2A, 0xE7, 0x94, 0x12);
+
+static int
+mshw0011_notify(struct mshw0011_data *cdata, u8 arg1, u8 arg2,
+ unsigned int *ret_value)
+{
+ union acpi_object *obj;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ unsigned int i;
+
+ handle = ACPI_HANDLE(&cdata->adp1->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return -ENODEV;
+
+ obj = acpi_evaluate_dsm_typed(handle, &mshw0011_guid, arg1, arg2, NULL,
+ ACPI_TYPE_BUFFER);
+ if (!obj) {
+ dev_err(&cdata->adp1->dev, "device _DSM execution failed\n");
+ return -ENODEV;
+ }
+
+ *ret_value = 0;
+ for (i = 0; i < obj->buffer.length; i++)
+ *ret_value |= obj->buffer.pointer[i] << (i * 8);
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static const struct bix default_bix = {
+ .revision = 0x00,
+ .power_unit = 0x01,
+ .design_capacity = 0x1dca,
+ .last_full_charg_capacity = 0x1dca,
+ .battery_technology = 0x01,
+ .design_voltage = 0x10df,
+ .design_capacity_of_warning = 0x8f,
+ .design_capacity_of_low = 0x47,
+ .cycle_count = 0xffffffff,
+ .measurement_accuracy = 0x00015f90,
+ .max_sampling_time = 0x03e8,
+ .min_sampling_time = 0x03e8,
+ .max_average_interval = 0x03e8,
+ .min_average_interval = 0x03e8,
+ .battery_capacity_granularity_1 = 0x45,
+ .battery_capacity_granularity_2 = 0x11,
+ .model = "P11G8M",
+ .serial = "",
+ .type = "LION",
+ .OEM = "",
+};
+
+static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
+{
+ struct i2c_client *client = cdata->bat0;
+ char buf[SURFACE_3_STRLEN];
+ int ret;
+
+ *bix = default_bix;
+
+ /* get design capacity */
+ ret = i2c_smbus_read_word_data(client,
+ MSHW0011_BAT0_REG_DESIGN_CAPACITY);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading design capacity: %d\n",
+ ret);
+ return ret;
+ }
+ bix->design_capacity = ret;
+
+ /* get last full charge capacity */
+ ret = i2c_smbus_read_word_data(client,
+ MSHW0011_BAT0_REG_FULL_CHG_CAPACITY);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Error reading last full charge capacity: %d\n", ret);
+ return ret;
+ }
+ bix->last_full_charg_capacity = ret;
+
+ /* get serial number */
+ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
+ sizeof(buf), buf);
+ if (ret != sizeof(buf)) {
+ dev_err(&client->dev, "Error reading serial no: %d\n", ret);
+ return ret;
+ }
+ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
+
+ /* get cycle count */
+ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading cycle count: %d\n", ret);
+ return ret;
+ }
+ bix->cycle_count = ret;
+
+ /* get OEM name */
+ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_OEM,
+ 4, buf);
+ if (ret != 4) {
+ dev_err(&client->dev, "Error reading cycle count: %d\n", ret);
+ return ret;
+ }
+ snprintf(bix->OEM, ARRAY_SIZE(bix->OEM), "%3pE", buf);
+
+ return 0;
+}
+
+static int mshw0011_bst(struct mshw0011_data *cdata, struct bst *bst)
+{
+ struct i2c_client *client = cdata->bat0;
+ int rate, capacity, voltage, state;
+ s16 tmp;
+
+ rate = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_RATE);
+ if (rate < 0)
+ return rate;
+
+ capacity = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CAPACITY);
+ if (capacity < 0)
+ return capacity;
+
+ voltage = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_VOLTAGE);
+ if (voltage < 0)
+ return voltage;
+
+ tmp = rate;
+ bst->battery_present_rate = abs((s32)tmp);
+
+ state = 0;
+ if ((s32) tmp > 0)
+ state |= ACPI_BATTERY_STATE_CHARGING;
+ else if ((s32) tmp < 0)
+ state |= ACPI_BATTERY_STATE_DISCHARGING;
+ bst->battery_state = state;
+
+ bst->battery_remaining_capacity = capacity;
+ bst->battery_present_voltage = voltage;
+
+ return 0;
+}
+
+static int mshw0011_adp_psr(struct mshw0011_data *cdata)
+{
+ return i2c_smbus_read_byte_data(cdata->adp1, MSHW0011_ADP1_REG_PSR);
+}
+
+static int mshw0011_isr(struct mshw0011_data *cdata)
+{
+ struct bst bst;
+ struct bix bix;
+ int ret;
+ bool status, bat_status;
+
+ ret = mshw0011_adp_psr(cdata);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ if (status != cdata->charging)
+ mshw0011_notify(cdata, cdata->notify_mask,
+ MSHW0011_NOTIFY_ADP1, &ret);
+
+ cdata->charging = status;
+
+ ret = mshw0011_bst(cdata, &bst);
+ if (ret < 0)
+ return ret;
+
+ bat_status = bst.battery_state;
+ if (bat_status != cdata->bat_charging)
+ mshw0011_notify(cdata, cdata->notify_mask,
+ MSHW0011_NOTIFY_BAT0_BST, &ret);
+
+ cdata->bat_charging = bat_status;
+
+ ret = mshw0011_bix(cdata, &bix);
+ if (ret < 0)
+ return ret;
+
+ if (bix.last_full_charg_capacity != cdata->full_capacity)
+ mshw0011_notify(cdata, cdata->notify_mask,
+ MSHW0011_NOTIFY_BAT0_BIX, &ret);
+
+ cdata->full_capacity = bix.last_full_charg_capacity;
+
+ return 0;
+}
+
+static int mshw0011_poll_task(void *data)
+{
+ struct mshw0011_data *cdata = data;
+ int ret = 0;
+
+ cdata->kthread_running = true;
+
+ set_freezable();
+
+ while (!kthread_should_stop()) {
+ schedule_timeout_interruptible(SURFACE_3_POLL_INTERVAL);
+ try_to_freeze();
+ ret = mshw0011_isr(data);
+ if (ret)
+ break;
+ }
+
+ cdata->kthread_running = false;
+ return ret;
+}
+
+static acpi_status
+mshw0011_space_handler(u32 function, acpi_physical_address command,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
+ struct mshw0011_handler_data *data = handler_context;
+ struct acpi_connection_info *info = &data->info;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_client *client = data->client;
+ struct mshw0011_data *cdata = i2c_get_clientdata(client);
+ struct acpi_resource *ares;
+ u32 accessor_type = function >> 16;
+ acpi_status ret;
+ int status = 1;
+
+ ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
+ if (ACPI_FAILURE(ret))
+ return ret;
+
+ if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ if (gsb->cmd.arg0 == MSHW0011_CMD_DEST_ADP1 &&
+ gsb->cmd.arg1 == MSHW0011_CMD_ADP1_PSR) {
+ status = mshw0011_adp_psr(cdata);
+ if (status >= 0) {
+ ret = AE_OK;
+ goto out;
+ } else {
+ ret = AE_ERROR;
+ goto err;
+ }
+ }
+
+ if (gsb->cmd.arg0 != MSHW0011_CMD_DEST_BAT0) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ switch (gsb->cmd.arg1) {
+ case MSHW0011_CMD_BAT0_STA:
+ break;
+ case MSHW0011_CMD_BAT0_BIX:
+ ret = mshw0011_bix(cdata, &gsb->bix);
+ break;
+ case MSHW0011_CMD_BAT0_BTP:
+ cdata->trip_point = gsb->cmd.arg2;
+ break;
+ case MSHW0011_CMD_BAT0_BST:
+ ret = mshw0011_bst(cdata, &gsb->bst);
+ break;
+ default:
+ dev_info(&cdata->bat0->dev, "command(0x%02x) is not supported.\n", gsb->cmd.arg1);
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ out:
+ gsb->ret = status;
+ gsb->status = 0;
+
+ err:
+ ACPI_FREE(ares);
+ return ret;
+}
+
+static int mshw0011_install_space_handler(struct i2c_client *client)
+{
+ acpi_handle handle;
+ struct mshw0011_handler_data *data;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct mshw0011_handler_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ status = acpi_bus_attach_private_data(handle, (void *)data);
+ if (ACPI_FAILURE(status)) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ status = acpi_install_address_space_handler(handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &mshw0011_space_handler,
+ NULL,
+ data);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&client->dev, "Error installing i2c space handler\n");
+ acpi_bus_detach_private_data(handle);
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ acpi_walk_dep_device_list(handle);
+ return 0;
+}
+
+static void mshw0011_remove_space_handler(struct i2c_client *client)
+{
+ struct mshw0011_handler_data *data;
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle)
+ return;
+
+ acpi_remove_address_space_handler(handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &mshw0011_space_handler);
+
+ status = acpi_bus_get_private_data(handle, (void **)&data);
+ if (ACPI_SUCCESS(status))
+ kfree(data);
+
+ acpi_bus_detach_private_data(handle);
+}
+
+static int mshw0011_probe(struct i2c_client *client)
+{
+ struct i2c_board_info board_info;
+ struct device *dev = &client->dev;
+ struct i2c_client *bat0;
+ struct mshw0011_data *data;
+ int error, mask;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->adp1 = client;
+ i2c_set_clientdata(client, data);
+
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
+
+ bat0 = i2c_acpi_new_device(dev, 1, &board_info);
+ if (IS_ERR(bat0))
+ return PTR_ERR(bat0);
+
+ data->bat0 = bat0;
+ i2c_set_clientdata(bat0, data);
+
+ error = mshw0011_notify(data, 1, MSHW0011_NOTIFY_GET_VERSION, &mask);
+ if (error)
+ goto out_err;
+
+ data->notify_mask = mask == MSHW0011_EV_2_5_MASK;
+
+ data->poll_task = kthread_run(mshw0011_poll_task, data, "mshw0011_adp");
+ if (IS_ERR(data->poll_task)) {
+ error = PTR_ERR(data->poll_task);
+ dev_err(&client->dev, "Unable to run kthread err %d\n", error);
+ goto out_err;
+ }
+
+ error = mshw0011_install_space_handler(client);
+ if (error)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ if (data->kthread_running)
+ kthread_stop(data->poll_task);
+ i2c_unregister_device(data->bat0);
+ return error;
+}
+
+static int mshw0011_remove(struct i2c_client *client)
+{
+ struct mshw0011_data *cdata = i2c_get_clientdata(client);
+
+ mshw0011_remove_space_handler(client);
+
+ if (cdata->kthread_running)
+ kthread_stop(cdata->poll_task);
+
+ i2c_unregister_device(cdata->bat0);
+
+ return 0;
+}
+
+static const struct acpi_device_id mshw0011_acpi_match[] = {
+ { "MSHW0011", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mshw0011_acpi_match);
+
+static struct i2c_driver mshw0011_driver = {
+ .probe_new = mshw0011_probe,
+ .remove = mshw0011_remove,
+ .driver = {
+ .name = "mshw0011",
+ .acpi_match_table = mshw0011_acpi_match,
+ },
+};
+module_i2c_driver(mshw0011_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_DESCRIPTION("mshw0011 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 8eaadbaf8ffa..0f704484ae1d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -9548,7 +9548,7 @@ static ssize_t tpacpi_battery_store(int what,
if (!battery_info.batteries[battery].start_support)
return -ENODEV;
/* valid values are [0, 99] */
- if (value < 0 || value > 99)
+ if (value > 99)
return -EINVAL;
if (value > battery_info.batteries[battery].charge_stop)
return -EINVAL;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 93177e6e5ecd..6ec8923dec1a 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -11,12 +11,15 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/dmi.h>
+#include <linux/efi_embedded_fw.h>
#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/string.h>
struct ts_dmi_data {
+ /* The EFI embedded-fw code expects this to be the first member! */
+ struct efi_embedded_fw_desc embedded_fw;
const char *acpi_name;
const struct property_entry *properties;
};
@@ -64,6 +67,15 @@ static const struct property_entry chuwi_hi8_pro_props[] = {
};
static const struct ts_dmi_data chuwi_hi8_pro_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3680-chuwi-hi8-pro.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 39864,
+ .sha256 = { 0xc0, 0x88, 0xc5, 0xef, 0xd1, 0x70, 0x77, 0x59,
+ 0x4e, 0xe9, 0xc4, 0xd8, 0x2e, 0xcd, 0xbf, 0x95,
+ 0x32, 0xd9, 0x03, 0x28, 0x0d, 0x48, 0x9f, 0x92,
+ 0x35, 0x37, 0xf6, 0x8b, 0x2a, 0xe4, 0x73, 0xff },
+ },
.acpi_name = "MSSL1680:00",
.properties = chuwi_hi8_pro_props,
};
@@ -120,6 +132,18 @@ static const struct ts_dmi_data chuwi_vi8_data = {
.properties = chuwi_vi8_props,
};
+static const struct ts_dmi_data chuwi_vi8_plus_data = {
+ .embedded_fw = {
+ .name = "chipone/icn8505-HAMP0002.fw",
+ .prefix = { 0xb0, 0x07, 0x00, 0x00, 0xe4, 0x07, 0x00, 0x00 },
+ .length = 35012,
+ .sha256 = { 0x93, 0xe5, 0x49, 0xe0, 0xb6, 0xa2, 0xb4, 0xb3,
+ 0x88, 0x96, 0x34, 0x97, 0x5e, 0xa8, 0x13, 0x78,
+ 0x72, 0x98, 0xb8, 0x29, 0xeb, 0x5c, 0xa7, 0xf1,
+ 0x25, 0x13, 0x43, 0xf4, 0x30, 0x7c, 0xfc, 0x7c },
+ },
+};
+
static const struct property_entry chuwi_vi10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
@@ -181,6 +205,15 @@ static const struct property_entry cube_iwork8_air_props[] = {
};
static const struct ts_dmi_data cube_iwork8_air_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3670-cube-iwork8-air.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 38808,
+ .sha256 = { 0xff, 0x62, 0x2d, 0xd1, 0x8a, 0x78, 0x04, 0x7b,
+ 0x33, 0x06, 0xb0, 0x4f, 0x7f, 0x02, 0x08, 0x9c,
+ 0x96, 0xd4, 0x9f, 0x04, 0xe1, 0x47, 0x25, 0x25,
+ 0x60, 0x77, 0x41, 0x33, 0xeb, 0x12, 0x82, 0xfc },
+ },
.acpi_name = "MSSL1680:00",
.properties = cube_iwork8_air_props,
};
@@ -387,6 +420,15 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
};
static const struct ts_dmi_data onda_v80_plus_v3_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3676-onda-v80-plus-v3.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 37224,
+ .sha256 = { 0x8f, 0xbd, 0x8f, 0x0c, 0x6b, 0xba, 0x5b, 0xf5,
+ 0xa3, 0xc7, 0xa3, 0xc0, 0x4f, 0xcd, 0xdf, 0x32,
+ 0xcc, 0xe4, 0x70, 0xd6, 0x46, 0x9c, 0xd7, 0xa7,
+ 0x4b, 0x82, 0x3f, 0xab, 0xc7, 0x90, 0xea, 0x23 },
+ },
.acpi_name = "MSSL1680:00",
.properties = onda_v80_plus_v3_props,
};
@@ -449,6 +491,15 @@ static const struct property_entry pipo_w2s_props[] = {
};
static const struct ts_dmi_data pipo_w2s_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-pipo-w2s.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 39072,
+ .sha256 = { 0xd0, 0x58, 0xc4, 0x7d, 0x55, 0x2d, 0x62, 0x18,
+ 0xd1, 0x6a, 0x71, 0x73, 0x0b, 0x3f, 0xbe, 0x60,
+ 0xbb, 0x45, 0x8c, 0x52, 0x27, 0xb7, 0x18, 0xf4,
+ 0x31, 0x00, 0x6a, 0x49, 0x76, 0xd8, 0x7c, 0xd3 },
+ },
.acpi_name = "MSSL1680:00",
.properties = pipo_w2s_props,
};
@@ -641,7 +692,7 @@ static const struct ts_dmi_data trekstor_surftab_wintron70_data = {
};
/* NOTE: Please keep this table sorted alphabetically */
-static const struct dmi_system_id touchscreen_dmi_table[] = {
+const struct dmi_system_id touchscreen_dmi_table[] = {
{
/* Chuwi Hi8 */
.driver_data = (void *)&chuwi_hi8_data,
@@ -704,6 +755,15 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 Plus (CWI519) */
+ .driver_data = (void *)&chuwi_vi8_plus_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "D2D3_Vi8A1"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
/* Chuwi Vi10 (CWI505) */
.driver_data = (void *)&chuwi_vi10_data,
.matches = {
@@ -1106,6 +1166,9 @@ static int __init ts_dmi_init(void)
return 0; /* Not an error */
ts_data = dmi_id->driver_data;
+ /* Some dmi table entries only provide an efi_embedded_fw_desc */
+ if (!ts_data->properties)
+ return 0;
error = bus_register_notifier(&i2c_bus_type, &ts_dmi_notifier);
if (error)
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index dc2e966a5c25..941739db7199 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -29,6 +29,7 @@
#include <linux/uaccess.h>
#include <linux/uuid.h>
#include <linux/wmi.h>
+#include <linux/fs.h>
#include <uapi/linux/wmi.h>
ACPI_MODULE_NAME("wmi");
diff --git a/drivers/platform/x86/xiaomi-wmi.c b/drivers/platform/x86/xiaomi-wmi.c
index 601cbb282f54..54a2546bb93b 100644
--- a/drivers/platform/x86/xiaomi-wmi.c
+++ b/drivers/platform/x86/xiaomi-wmi.c
@@ -23,7 +23,7 @@ struct xiaomi_wmi {
unsigned int key_code;
};
-int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
+static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct xiaomi_wmi *data;
@@ -48,7 +48,7 @@ int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
return input_register_device(data->input_dev);
}
-void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
+static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
{
struct xiaomi_wmi *data;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 513efe8e7628..890380302080 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -248,7 +248,7 @@ config SYSCON_REBOOT_MODE
action according to the mode.
config POWER_RESET_SC27XX
- bool "Spreadtrum SC27xx PMIC power-off driver"
+ tristate "Spreadtrum SC27xx PMIC power-off driver"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
help
This driver supports powering off a system through
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index d94e3267c3b6..3ff9d93a5226 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -35,6 +35,7 @@
#define AT91_RSTC_MR 0x08 /* Reset Controller Mode Register */
#define AT91_RSTC_URSTEN BIT(0) /* User Reset Enable */
+#define AT91_RSTC_URSTASYNC BIT(2) /* User Reset Asynchronous Control */
#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */
#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */
@@ -49,105 +50,63 @@ enum reset_type {
RESET_TYPE_ULP2 = 8,
};
-static void __iomem *at91_ramc_base[2], *at91_rstc_base;
-static struct clk *sclk;
+struct at91_reset {
+ void __iomem *rstc_base;
+ void __iomem *ramc_base[2];
+ struct clk *sclk;
+ struct notifier_block nb;
+ u32 args;
+ u32 ramc_lpr;
+};
/*
* unless the SDRAM is cleanly shutdown before we hit the
* reset register it can be left driving the data bus and
* killing the chance of a subsequent boot from NAND
*/
-static int at91sam9260_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
+static int at91_reset(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
- asm volatile(
- /* Align to cache lines */
- ".balign 32\n\t"
-
- /* Disable SDRAM accesses */
- "str %2, [%0, #" __stringify(AT91_SDRAMC_TR) "]\n\t"
-
- /* Power down SDRAM */
- "str %3, [%0, #" __stringify(AT91_SDRAMC_LPR) "]\n\t"
-
- /* Reset CPU */
- "str %4, [%1, #" __stringify(AT91_RSTC_CR) "]\n\t"
-
- "b .\n\t"
- :
- : "r" (at91_ramc_base[0]),
- "r" (at91_rstc_base),
- "r" (1),
- "r" cpu_to_le32(AT91_SDRAMC_LPCB_POWER_DOWN),
- "r" cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST));
+ struct at91_reset *reset = container_of(this, struct at91_reset, nb);
- return NOTIFY_DONE;
-}
-
-static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
-{
asm volatile(
- /*
- * Test wether we have a second RAM controller to care
- * about.
- *
- * First, test that we can dereference the virtual address.
- */
- "cmp %1, #0\n\t"
- "beq 1f\n\t"
-
- /* Then, test that the RAM controller is enabled */
- "ldr r0, [%1]\n\t"
- "cmp r0, #0\n\t"
-
/* Align to cache lines */
".balign 32\n\t"
/* Disable SDRAM0 accesses */
- "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
+ " tst %0, #0\n\t"
+ " beq 1f\n\t"
+ " str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
/* Power down SDRAM0 */
- " str %4, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ " str %4, [%0, %6]\n\t"
/* Disable SDRAM1 accesses */
+ "1: tst %1, #0\n\t"
+ " beq 2f\n\t"
" strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
/* Power down SDRAM1 */
- " strne %4, [%1, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ " strne %4, [%1, %6]\n\t"
/* Reset CPU */
- " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
+ "2: str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
" b .\n\t"
:
- : "r" (at91_ramc_base[0]),
- "r" (at91_ramc_base[1]),
- "r" (at91_rstc_base),
+ : "r" (reset->ramc_base[0]),
+ "r" (reset->ramc_base[1]),
+ "r" (reset->rstc_base),
"r" (1),
"r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
- "r" cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST)
- : "r0");
-
- return NOTIFY_DONE;
-}
-
-static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
-{
- writel(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
- at91_rstc_base);
+ "r" (reset->args),
+ "r" (reset->ramc_lpr)
+ : "r4");
return NOTIFY_DONE;
}
-static int samx7_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
-{
- writel(AT91_RSTC_KEY | AT91_RSTC_PROCRST, at91_rstc_base);
- return NOTIFY_DONE;
-}
-
-static void __init at91_reset_status(struct platform_device *pdev)
+static void __init at91_reset_status(struct platform_device *pdev,
+ void __iomem *base)
{
const char *reason;
- u32 reg = readl(at91_rstc_base + AT91_RSTC_SR);
+ u32 reg = readl(base + AT91_RSTC_SR);
switch ((reg & AT91_RSTC_RSTTYP) >> 8) {
case RESET_TYPE_GENERAL:
@@ -183,42 +142,68 @@ static void __init at91_reset_status(struct platform_device *pdev)
}
static const struct of_device_id at91_ramc_of_match[] = {
- { .compatible = "atmel,at91sam9260-sdramc", },
- { .compatible = "atmel,at91sam9g45-ddramc", },
+ {
+ .compatible = "atmel,at91sam9260-sdramc",
+ .data = (void *)AT91_SDRAMC_LPR,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-ddramc",
+ .data = (void *)AT91_DDRSDRC_LPR,
+ },
{ /* sentinel */ }
};
static const struct of_device_id at91_reset_of_match[] = {
- { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart },
- { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
- { .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
- { .compatible = "atmel,samx7-rstc", .data = samx7_restart },
- { .compatible = "microchip,sam9x60-rstc", .data = samx7_restart },
+ {
+ .compatible = "atmel,at91sam9260-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
+ AT91_RSTC_PROCRST),
+ },
+ {
+ .compatible = "atmel,at91sam9g45-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
+ AT91_RSTC_PROCRST)
+ },
+ {
+ .compatible = "atmel,sama5d3-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
+ AT91_RSTC_PROCRST)
+ },
+ {
+ .compatible = "atmel,samx7-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ },
+ {
+ .compatible = "microchip,sam9x60-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_reset_of_match);
-static struct notifier_block at91_restart_nb = {
- .priority = 192,
-};
-
static int __init at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
+ struct at91_reset *reset;
struct device_node *np;
int ret, idx = 0;
- at91_rstc_base = of_iomap(pdev->dev.of_node, 0);
- if (!at91_rstc_base) {
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rstc_base = of_iomap(pdev->dev.of_node, 0);
+ if (!reset->rstc_base) {
dev_err(&pdev->dev, "Could not map reset controller address\n");
return -ENODEV;
}
if (!of_device_is_compatible(pdev->dev.of_node, "atmel,sama5d3-rstc")) {
/* we need to shutdown the ddr controller, so get ramc base */
- for_each_matching_node(np, at91_ramc_of_match) {
- at91_ramc_base[idx] = of_iomap(np, 0);
- if (!at91_ramc_base[idx]) {
+ for_each_matching_node_and_match(np, at91_ramc_of_match, &match) {
+ reset->ramc_lpr = (u32)match->data;
+ reset->ramc_base[idx] = of_iomap(np, 0);
+ if (!reset->ramc_base[idx]) {
dev_err(&pdev->dev, "Could not map ram controller address\n");
of_node_put(np);
return -ENODEV;
@@ -228,33 +213,46 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
- at91_restart_nb.notifier_call = match->data;
+ reset->nb.notifier_call = at91_reset;
+ reset->nb.priority = 192;
+ reset->args = (u32)match->data;
- sclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sclk))
- return PTR_ERR(sclk);
+ reset->sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(reset->sclk))
+ return PTR_ERR(reset->sclk);
- ret = clk_prepare_enable(sclk);
+ ret = clk_prepare_enable(reset->sclk);
if (ret) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return ret;
}
- ret = register_restart_handler(&at91_restart_nb);
+ platform_set_drvdata(pdev, reset);
+
+ if (of_device_is_compatible(pdev->dev.of_node, "microchip,sam9x60-rstc")) {
+ u32 val = readl(reset->rstc_base + AT91_RSTC_MR);
+
+ writel(AT91_RSTC_KEY | AT91_RSTC_URSTASYNC | val,
+ reset->rstc_base + AT91_RSTC_MR);
+ }
+
+ ret = register_restart_handler(&reset->nb);
if (ret) {
- clk_disable_unprepare(sclk);
+ clk_disable_unprepare(reset->sclk);
return ret;
}
- at91_reset_status(pdev);
+ at91_reset_status(pdev, reset->rstc_base);
return 0;
}
static int __exit at91_reset_remove(struct platform_device *pdev)
{
- unregister_restart_handler(&at91_restart_nb);
- clk_disable_unprepare(sclk);
+ struct at91_reset *reset = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&reset->nb);
+ clk_disable_unprepare(reset->sclk);
return 0;
}
diff --git a/drivers/power/reset/sc27xx-poweroff.c b/drivers/power/reset/sc27xx-poweroff.c
index 29fb08b8faa0..90287c31992c 100644
--- a/drivers/power/reset/sc27xx-poweroff.c
+++ b/drivers/power/reset/sc27xx-poweroff.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
@@ -13,6 +14,8 @@
#define SC27XX_PWR_PD_HW 0xc2c
#define SC27XX_PWR_OFF_EN BIT(0)
+#define SC27XX_SLP_CTRL 0xdf0
+#define SC27XX_LDO_XTL_EN BIT(3)
static struct regmap *regmap;
@@ -27,10 +30,13 @@ static struct regmap *regmap;
*/
static void sc27xx_poweroff_shutdown(void)
{
-#ifdef CONFIG_PM_SLEEP_SMP
- int cpu = smp_processor_id();
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpu;
- freeze_secondary_cpus(cpu);
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ remove_cpu(cpu);
+ }
#endif
}
@@ -40,6 +46,9 @@ static struct syscore_ops poweroff_syscore_ops = {
static void sc27xx_poweroff_do_poweroff(void)
{
+ /* Disable the external subsys connection's power firstly */
+ regmap_write(regmap, SC27XX_SLP_CTRL, SC27XX_LDO_XTL_EN);
+
regmap_write(regmap, SC27XX_PWR_PD_HW, SC27XX_PWR_OFF_EN);
}
@@ -63,4 +72,8 @@ static struct platform_driver sc27xx_poweroff_driver = {
.name = "sc27xx-poweroff",
},
};
-builtin_platform_driver(sc27xx_poweroff_driver);
+module_platform_driver(sc27xx_poweroff_driver);
+
+MODULE_DESCRIPTION("Power off driver for SC27XX PMIC Device");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 9a5591ab90d0..f3424fdce341 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -480,7 +480,7 @@ config CHARGER_GPIO
called gpio-charger.
config CHARGER_MANAGER
- bool "Battery charger manager for multiple chargers"
+ tristate "Battery charger manager for multiple chargers"
depends on REGULATOR
select EXTCON
help
@@ -659,7 +659,7 @@ config CHARGER_RT9455
config CHARGER_CROS_USBPD
tristate "ChromeOS EC based USBPD charger"
- depends on CROS_EC
+ depends on CROS_USBPD_NOTIFY
default n
help
Say Y here to enable ChromeOS EC based USBPD charger
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index f69550d64f09..9469fe182d02 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -404,7 +404,7 @@ disable_otp:
}
/**
- * ab8500_power_supply_changed - a wrapper with local extentions for
+ * ab8500_power_supply_changed - a wrapper with local extensions for
* power_supply_changed
* @di: pointer to the ab8500_charger structure
* @psy: pointer to power_supply_that have changed.
@@ -683,7 +683,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
/*
* Platform only supports USB 2.0.
* This means that charging current from USB source
- * is maximum 500 mA. Every occurence of USB_STAT_*_HOST_*
+ * is maximum 500 mA. Every occurrence of USB_STAT_*_HOST_*
* should set USB_CH_IP_CUR_LVL_0P5.
*/
@@ -1379,13 +1379,13 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
/*
* Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
- * will be triggered everytime we enable the VDD ADC supply.
+ * will be triggered every time we enable the VDD ADC supply.
* This will turn off charging for a short while.
* It can be avoided by having the supply on when
* there is a charger enabled. Normally the VDD ADC supply
- * is enabled everytime a GPADC conversion is triggered. We will
- * force it to be enabled from this driver to have
- * the GPADC module independant of the AB8500 chargers
+ * is enabled every time a GPADC conversion is triggered.
+ * We will force it to be enabled from this driver to have
+ * the GPADC module independent of the AB8500 chargers
*/
if (!di->vddadc_en_ac) {
ret = regulator_enable(di->regu);
@@ -1455,7 +1455,7 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
if (is_ab8500_1p1_or_earlier(di->parent)) {
/*
* For ABB revision 1.0 and 1.1 there is a bug in the
- * watchdog logic. That means we have to continously
+ * watchdog logic. That means we have to continuously
* kick the charger watchdog even when no charger is
* connected. This is only valid once the AC charger
* has been enabled. This is a bug that is not handled
@@ -1552,13 +1552,13 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
/*
* Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
- * will be triggered everytime we enable the VDD ADC supply.
+ * will be triggered every time we enable the VDD ADC supply.
* This will turn off charging for a short while.
* It can be avoided by having the supply on when
* there is a charger enabled. Normally the VDD ADC supply
- * is enabled everytime a GPADC conversion is triggered. We will
- * force it to be enabled from this driver to have
- * the GPADC module independant of the AB8500 chargers
+ * is enabled every time a GPADC conversion is triggered.
+ * We will force it to be enabled from this driver to have
+ * the GPADC module independent of the AB8500 chargers
*/
if (!di->vddadc_en_usb) {
ret = regulator_enable(di->regu);
@@ -1582,7 +1582,10 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
return -ENXIO;
}
- /* ChVoltLevel: max voltage upto which battery can be charged */
+ /*
+ * ChVoltLevel: max voltage up to which battery can be
+ * charged
+ */
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
if (ret) {
@@ -2014,7 +2017,7 @@ static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
* Work queue function for kicking the charger watchdog.
*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
- * logic. That means we have to continously kick the charger
+ * logic. That means we have to continuously kick the charger
* watchdog even when no charger is connected. This is only
* valid once the AC charger has been enabled. This is
* a bug that is not handled by the algorithm and the
@@ -2262,7 +2265,7 @@ static void ab8500_charger_usb_link_status_work(struct work_struct *work)
* Some chargers that breaks the USB spec is
* identified as invalid by AB8500 and it refuse
* to start the charging process. but by jumping
- * thru a few hoops it can be forced to start.
+ * through a few hoops it can be forced to start.
*/
if (is_ab8500(di->parent))
ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
@@ -3214,7 +3217,7 @@ static int ab8500_charger_resume(struct platform_device *pdev)
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
- * logic. That means we have to continously kick the charger
+ * logic. That means we have to continuously kick the charger
* watchdog even when no charger is connected. This is only
* valid once the AC charger has been enabled. This is
* a bug that is not handled by the algorithm and the
@@ -3483,7 +3486,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
- * logic. That means we have to continously kick the charger
+ * logic. That means we have to continuously kick the charger
* watchdog even when no charger is connected. This is only
* valid once the AC charger has been enabled. This is
* a bug that is not handled by the algorithm and the
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 1bbba6bba673..cf4c67b2d235 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -21,6 +21,7 @@
#include <linux/property.h>
#include <linux/mfd/axp20x.h>
#include <linux/extcon.h>
+#include <linux/dmi.h>
#define PS_STAT_VBUS_TRIGGER BIT(0)
#define PS_STAT_BAT_CHRG_DIR BIT(2)
@@ -545,6 +546,49 @@ out:
return IRQ_HANDLED;
}
+/*
+ * The HP Pavilion x2 10 series comes in a number of variants:
+ * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
+ * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
+ * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
+ *
+ * The variants with the AXP288 PMIC are all kinds of special:
+ *
+ * 1. All variants use a Type-C connector which the AXP288 does not support, so
+ * when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
+ * this model actually has mostly working ACPI AC / Battery code, the ACPI code
+ * "solves" this by simply setting the input_current_limit to 3A.
+ * There are still some issues with the ACPI code, so we use this native driver,
+ * and to solve the charging not working (500mA is not enough) issue we hardcode
+ * the 3A input_current_limit like the ACPI code does.
+ *
+ * 2. If no charger is connected the machine boots with the vbus-path disabled.
+ * Normally this is done when a 5V boost converter is active to avoid the PMIC
+ * trying to charge from the 5V boost converter's output. This is done when
+ * an OTG host cable is inserted and the ID pin on the micro-B receptacle is
+ * pulled low and the ID pin has an ACPI event handler associated with it
+ * which re-enables the vbus-path when the ID pin is pulled high when the
+ * OTG host cable is removed. The Type-C connector has no ID pin, there is
+ * no ID pin handler and there appears to be no 5V boost converter, so we
+ * end up not charging because the vbus-path is disabled, until we unplug
+ * the charger which automatically clears the vbus-path disable bit and then
+ * on the second plug-in of the adapter we start charging. To solve the not
+ * charging on first charger plugin we unconditionally enable the vbus-path at
+ * probe on this model, which is safe since there is no 5V boost converter.
+ */
+static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
+ {
+ /*
+ * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
+ * Trail model has "HP", so we only match on product_name.
+ */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ },
+ },
+ {} /* Terminating entry */
+};
+
static void axp288_charger_extcon_evt_worker(struct work_struct *work)
{
struct axp288_chrg_info *info =
@@ -568,7 +612,11 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
}
/* Determine cable/charger type */
- if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
+ if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
+ /* See comment above axp288_hp_x2_dmi_ids declaration */
+ dev_dbg(&info->pdev->dev, "HP X2 with Type-C, setting inlmt to 3A\n");
+ current_limit = 3000000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
current_limit = 500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
@@ -685,6 +733,13 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return ret;
}
+ if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
+ /* See comment above axp288_hp_x2_dmi_ids declaration */
+ ret = axp288_charger_vbus_path_select(info, true);
+ if (ret < 0)
+ return ret;
+ }
+
/* Read current charge voltage and current limit */
ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val);
if (ret < 0) {
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index e1bc4e6e6f30..f40fa0e63b6e 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -706,14 +706,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
/* Intel Cherry Trail Compute Stick, Windows version */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
},
},
{
/* Intel Cherry Trail Compute Stick, version without an OS */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
},
},
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 532f6e4fcafb..a1f00ae1c180 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -2,7 +2,7 @@
/*
* bq2415x charger driver
*
- * Copyright (C) 2011-2013 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2011-2013 Pali Rohár <pali@kernel.org>
*
* Datasheets:
* http://www.ti.com/product/bq24150
@@ -1788,6 +1788,6 @@ static struct i2c_driver bq2415x_driver = {
};
module_i2c_driver(bq2415x_driver);
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("bq2415x charger driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 195c18c2f426..942c92127b6d 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
* Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
- * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2011 Pali Rohár <pali@kernel.org>
* Copyright (C) 2017 Liam Breck <kernel@networkimprov.net>
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
@@ -1885,7 +1885,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
if (IS_ERR(di->bat)) {
- dev_err(di->dev, "failed to register battery\n");
+ if (PTR_ERR(di->bat) == -EPROBE_DEFER)
+ dev_dbg(di->dev, "failed to register battery, deferring probe\n");
+ else
+ dev_err(di->dev, "failed to register battery\n");
return PTR_ERR(di->bat);
}
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 30c3d37511c9..2a45e84447fe 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
@@ -517,32 +518,21 @@ static int cros_usbpd_charger_property_is_writeable(struct power_supply *psy,
}
static int cros_usbpd_charger_ec_event(struct notifier_block *nb,
- unsigned long queued_during_suspend,
+ unsigned long host_event,
void *_notify)
{
- struct cros_ec_device *ec_device;
- struct charger_data *charger;
- u32 host_event;
+ struct charger_data *charger = container_of(nb, struct charger_data,
+ notifier);
- charger = container_of(nb, struct charger_data, notifier);
- ec_device = charger->ec_device;
-
- host_event = cros_ec_get_host_event(ec_device);
- if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
- cros_usbpd_charger_power_changed(charger->ports[0]->psy);
- return NOTIFY_OK;
- } else {
- return NOTIFY_DONE;
- }
+ cros_usbpd_charger_power_changed(charger->ports[0]->psy);
+ return NOTIFY_OK;
}
static void cros_usbpd_charger_unregister_notifier(void *data)
{
struct charger_data *charger = data;
- struct cros_ec_device *ec_device = charger->ec_device;
- blocking_notifier_chain_unregister(&ec_device->event_notifier,
- &charger->notifier);
+ cros_usbpd_unregister_notify(&charger->notifier);
}
static int cros_usbpd_charger_probe(struct platform_device *pd)
@@ -676,21 +666,17 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
goto fail;
}
- if (ec_device->mkbp_event_supported) {
- /* Get PD events from the EC */
- charger->notifier.notifier_call = cros_usbpd_charger_ec_event;
- ret = blocking_notifier_chain_register(
- &ec_device->event_notifier,
- &charger->notifier);
- if (ret < 0) {
- dev_warn(dev, "failed to register notifier\n");
- } else {
- ret = devm_add_action_or_reset(dev,
- cros_usbpd_charger_unregister_notifier,
- charger);
- if (ret < 0)
- goto fail;
- }
+ /* Get PD events from the EC */
+ charger->notifier.notifier_call = cros_usbpd_charger_ec_event;
+ ret = cros_usbpd_register_notify(&charger->notifier);
+ if (ret < 0) {
+ dev_warn(dev, "failed to register notifier\n");
+ } else {
+ ret = devm_add_action_or_reset(dev,
+ cros_usbpd_charger_unregister_notifier,
+ charger);
+ if (ret < 0)
+ goto fail;
}
return 0;
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 2748715c4c75..dd3d93dfe3eb 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -148,7 +148,8 @@ static int ingenic_battery_probe(struct platform_device *pdev)
bat->battery = devm_power_supply_register(dev, desc, &psy_cfg);
if (IS_ERR(bat->battery)) {
- dev_err(dev, "Unable to register battery\n");
+ if (PTR_ERR(bat->battery) != -EPROBE_DEFER)
+ dev_err(dev, "Unable to register battery\n");
return PTR_ERR(bat->battery);
}
diff --git a/drivers/power/supply/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c
index 4812ac1ff2df..b6efc454e4f0 100644
--- a/drivers/power/supply/isp1704_charger.c
+++ b/drivers/power/supply/isp1704_charger.c
@@ -3,7 +3,7 @@
* ISP1704 USB Charger Detection driver
*
* Copyright (C) 2010 Nokia Corporation
- * Copyright (C) 2012 - 2013 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2012 - 2013 Pali Rohár <pali@kernel.org>
*/
#include <linux/kernel.h>
diff --git a/drivers/power/supply/rx51_battery.c b/drivers/power/supply/rx51_battery.c
index 8548b639ff2f..6e488ecf4dcb 100644
--- a/drivers/power/supply/rx51_battery.c
+++ b/drivers/power/supply/rx51_battery.c
@@ -2,7 +2,7 @@
/*
* Nokia RX-51 battery driver
*
- * Copyright (C) 2012 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2012 Pali Rohár <pali@kernel.org>
*/
#include <linux/module.h>
@@ -278,6 +278,6 @@ static struct platform_driver rx51_battery_driver = {
module_platform_driver(rx51_battery_driver);
MODULE_ALIAS("platform:rx51-battery");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Nokia RX-51 battery driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index 469c83fdaa8e..a7c8a8453db1 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -614,6 +614,17 @@ static int sc27xx_fgu_get_property(struct power_supply *psy,
val->intval = data->total_cap * 1000;
break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = sc27xx_fgu_get_clbcnt(data, &value);
+ if (ret)
+ goto error;
+
+ value = DIV_ROUND_CLOSEST(value * 10,
+ 36 * SC27XX_FGU_SAMPLE_HZ);
+ val->intval = sc27xx_fgu_adc_to_current(data, value);
+
+ break;
+
default:
ret = -EINVAL;
break;
@@ -682,6 +693,7 @@ static enum power_supply_property sc27xx_fgu_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_CHARGE_NOW
};
static const struct power_supply_desc sc27xx_fgu_desc = {
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 648ab80288c9..1bc49b2e12e8 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -726,10 +726,10 @@ twl4030_bci_mode_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(modes); i++)
if (mode == i)
- len += snprintf(buf+len, PAGE_SIZE-len,
+ len += scnprintf(buf+len, PAGE_SIZE-len,
"[%s] ", modes[i]);
else
- len += snprintf(buf+len, PAGE_SIZE-len,
+ len += scnprintf(buf+len, PAGE_SIZE-len,
"%s ", modes[i]);
buf[len-1] = '\n';
return len;
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index cd1270614cc6..e9bbd3c42eef 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -67,7 +67,7 @@ struct idle_inject_device {
struct hrtimer timer;
unsigned int idle_duration_us;
unsigned int run_duration_us;
- unsigned long int cpumask[0];
+ unsigned long cpumask[];
};
static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 73257cf107d9..eb328655bc01 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -951,52 +951,51 @@ static const struct rapl_defaults rapl_defaults_cht = {
};
static const struct x86_cpu_id rapl_ids[] __initconst = {
- INTEL_CPU_FAM6(SANDYBRIDGE, rapl_defaults_core),
- INTEL_CPU_FAM6(SANDYBRIDGE_X, rapl_defaults_core),
-
- INTEL_CPU_FAM6(IVYBRIDGE, rapl_defaults_core),
- INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core),
-
- INTEL_CPU_FAM6(HASWELL, rapl_defaults_core),
- INTEL_CPU_FAM6(HASWELL_L, rapl_defaults_core),
- INTEL_CPU_FAM6(HASWELL_G, rapl_defaults_core),
- INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server),
-
- INTEL_CPU_FAM6(BROADWELL, rapl_defaults_core),
- INTEL_CPU_FAM6(BROADWELL_G, rapl_defaults_core),
- INTEL_CPU_FAM6(BROADWELL_D, rapl_defaults_core),
- INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server),
-
- INTEL_CPU_FAM6(SKYLAKE, rapl_defaults_core),
- INTEL_CPU_FAM6(SKYLAKE_L, rapl_defaults_core),
- INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(KABYLAKE_L, rapl_defaults_core),
- INTEL_CPU_FAM6(KABYLAKE, rapl_defaults_core),
- INTEL_CPU_FAM6(CANNONLAKE_L, rapl_defaults_core),
- INTEL_CPU_FAM6(ICELAKE_L, rapl_defaults_core),
- INTEL_CPU_FAM6(ICELAKE, rapl_defaults_core),
- INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core),
- INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
- INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
- INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core),
-
- INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
- INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
- INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, rapl_defaults_tng),
- INTEL_CPU_FAM6(ATOM_AIRMONT_MID, rapl_defaults_ann),
- INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core),
- INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
- INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
- INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
- INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core),
-
- INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
+ X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &rapl_defaults_core),
+
+ X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &rapl_defaults_core),
+
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &rapl_defaults_hsw_server),
+
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &rapl_defaults_hsw_server),
+
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &rapl_defaults_hsw_server),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &rapl_defaults_hsw_server),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &rapl_defaults_hsw_server),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
+
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &rapl_defaults_tng),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &rapl_defaults_ann),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core),
+
+ X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &rapl_defaults_hsw_server),
+ X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &rapl_defaults_hsw_server),
{}
};
-
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
/* Read once for all raw primitive data for domains */
diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c
index 24709c572c0c..e061b7d0632b 100644
--- a/drivers/ps3/sys-manager-core.c
+++ b/drivers/ps3/sys-manager-core.c
@@ -31,7 +31,7 @@ void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
{
BUG_ON(!ops);
BUG_ON(!ops->dev);
- ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops;
+ ps3_sys_manager_ops = *ops;
}
EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 475c60dccaa4..86400c708150 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -115,6 +115,18 @@ config PTP_1588_CLOCK_KVM
To compile this driver as a module, choose M here: the module
will be called ptp_kvm.
+config PTP_1588_CLOCK_IDT82P33
+ tristate "IDT 82P33xxx PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using the IDT 82P33xxx as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_idt82p33.
+
config PTP_1588_CLOCK_IDTCM
tristate "IDT CLOCKMATRIX as PTP clock"
depends on PTP_1588_CLOCK && I2C
@@ -127,4 +139,16 @@ config PTP_1588_CLOCK_IDTCM
To compile this driver as a module, choose M here: the module
will be called ptp_clockmatrix.
+config PTP_1588_CLOCK_VMW
+ tristate "VMware virtual PTP clock"
+ depends on ACPI && HYPERVISOR_GUEST && X86
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using VMware virtual precision
+ clock device as a PTP clock. This is only useful in virtual
+ machines running on VMware virtual infrastructure.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_vmw.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 8c830336f178..7aff75f745dc 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
+obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 9d72ab593f13..93d574faf1fe 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -175,7 +175,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
}
req.type = PTP_CLK_REQ_EXTTS;
enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_PEROUT_REQUEST:
@@ -206,7 +209,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
}
req.type = PTP_CLK_REQ_PEROUT;
enable = req.perout.period.sec || req.perout.period.nsec;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_ENABLE_PPS:
@@ -217,7 +223,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
return -EPERM;
req.type = PTP_CLK_REQ_PPS;
enable = arg ? 1 : 0;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_SYS_OFFSET_PRECISE:
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index ac1f2bf9e888..acabbe72e55e 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -348,7 +348,6 @@ int ptp_find_pin(struct ptp_clock *ptp,
struct ptp_pin_desc *pin = NULL;
int i;
- mutex_lock(&ptp->pincfg_mux);
for (i = 0; i < ptp->info->n_pins; i++) {
if (ptp->info->pin_config[i].func == func &&
ptp->info->pin_config[i].chan == chan) {
@@ -356,12 +355,26 @@ int ptp_find_pin(struct ptp_clock *ptp,
break;
}
}
- mutex_unlock(&ptp->pincfg_mux);
return pin ? i : -1;
}
EXPORT_SYMBOL(ptp_find_pin);
+int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ int result;
+
+ mutex_lock(&ptp->pincfg_mux);
+
+ result = ptp_find_pin(ptp, func, chan);
+
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return result;
+}
+EXPORT_SYMBOL(ptp_find_pin_unlocked);
+
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
{
return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
new file mode 100644
index 000000000000..b63ac240308b
--- /dev/null
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 Integrated Device Technology, Inc
+//
+
+#define pr_fmt(fmt) "IDT_82p33xxx: " fmt
+
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/bitops.h>
+
+#include "ptp_private.h"
+#include "ptp_idt82p33.h"
+
+MODULE_DESCRIPTION("Driver for IDT 82p33xxx clock devices");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/* Module Parameters */
+u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
+module_param(sync_tod_timeout, uint, 0);
+MODULE_PARM_DESC(sync_tod_timeout,
+"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)");
+
+u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
+module_param(phase_snap_threshold, uint, 0);
+MODULE_PARM_DESC(phase_snap_threshold,
+"threshold (150000ns by default) below which adjtime would ignore");
+
+static void idt82p33_byte_array_to_timespec(struct timespec64 *ts,
+ u8 buf[TOD_BYTE_COUNT])
+{
+ time64_t sec;
+ s32 nsec;
+ u8 i;
+
+ nsec = buf[3];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[2 - i];
+ }
+
+ sec = buf[9];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[8 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
+ u8 buf[TOD_BYTE_COUNT])
+{
+ time64_t sec;
+ s32 nsec;
+ u8 i;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 4; i < TOD_BYTE_COUNT; i++) {
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+}
+
+static int idt82p33_xfer(struct idt82p33 *idt82p33,
+ unsigned char regaddr,
+ unsigned char *buf,
+ unsigned int count,
+ int write)
+{
+ struct i2c_client *client = idt82p33->client;
+ struct i2c_msg msg[2];
+ int cnt;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &regaddr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+ if (cnt < 0) {
+ dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
+{
+ int err;
+
+ if (idt82p33->page_offset == val)
+ return 0;
+
+ err = idt82p33_xfer(idt82p33, PAGE_ADDR, &val, sizeof(val), 1);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "failed to set page offset %d\n", val);
+ else
+ idt82p33->page_offset = val;
+
+ return err;
+}
+
+static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count, bool write)
+{
+ u8 offset, page;
+ int err;
+
+ page = _PAGE(regaddr);
+ offset = _OFFSET(regaddr);
+
+ err = idt82p33_page_offset(idt82p33, page);
+ if (err)
+ goto out;
+
+ err = idt82p33_xfer(idt82p33, offset, buf, count, write);
+out:
+ return err;
+}
+
+static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count)
+{
+ return idt82p33_rdwr(idt82p33, regaddr, buf, count, false);
+}
+
+static int idt82p33_write(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count)
+{
+ return idt82p33_rdwr(idt82p33, regaddr, buf, count, true);
+}
+
+static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
+ enum pll_mode mode)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 dpll_mode;
+ int err;
+
+ if (channel->pll_mode == mode)
+ return 0;
+
+ err = idt82p33_read(idt82p33, channel->dpll_mode_cnfg,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (mode << PLL_MODE_SHIFT);
+
+ err = idt82p33_write(idt82p33, channel->dpll_mode_cnfg,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ channel->pll_mode = dpll_mode;
+
+ return 0;
+}
+
+static int _idt82p33_gettime(struct idt82p33_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idt82p33->calculate_overhead_flag)
+ idt82p33->start_time = ktime_get_raw();
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ idt82p33_byte_array_to_timespec(ts, buf);
+
+ return 0;
+}
+
+/*
+ * TOD Trigger:
+ * Bits[7:4] Write 0x9, MSB write
+ * Bits[3:0] Read 0x9, LSB read
+ */
+
+static int _idt82p33_settime(struct idt82p33_channel *channel,
+ struct timespec64 const *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 local_ts = *ts;
+ char buf[TOD_BYTE_COUNT];
+ s64 dynamic_overhead_ns;
+ unsigned char trigger;
+ int err;
+ u8 i;
+
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idt82p33->calculate_overhead_flag) {
+ dynamic_overhead_ns = ktime_to_ns(ktime_get_raw())
+ - ktime_to_ns(idt82p33->start_time);
+
+ timespec64_add_ns(&local_ts, dynamic_overhead_ns);
+
+ idt82p33->calculate_overhead_flag = 0;
+ }
+
+ idt82p33_timespec_to_byte_array(&local_ts, buf);
+
+ /*
+ * Store the new time value.
+ */
+ for (i = 0; i < TOD_BYTE_COUNT; i++) {
+ err = idt82p33_write(idt82p33, channel->dpll_tod_cnfg + i,
+ &buf[i], sizeof(buf[i]));
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idt82p33_adjtime(struct idt82p33_channel *channel, s64 delta_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 ts;
+ s64 now_ns;
+ int err;
+
+ idt82p33->calculate_overhead_flag = 1;
+
+ err = _idt82p33_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now_ns = timespec64_to_ns(&ts);
+ now_ns += delta_ns + idt82p33->tod_write_overhead_ns;
+
+ ts = ns_to_timespec64(now_ns);
+
+ err = _idt82p33_settime(channel, &ts);
+
+ return err;
+}
+
+static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ unsigned char buf[5] = {0};
+ int neg_adj = 0;
+ int err, i;
+ s64 fcw;
+
+ if (scaled_ppm == channel->current_freq_ppb)
+ return 0;
+
+ /*
+ * Frequency Control Word unit is: 1.68 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 168
+ *
+ * adjfine:
+ * scaled_ppm * 5^12
+ * FCW = -------------
+ * 168 * 2^4
+ */
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ fcw = scaled_ppm * 244140625ULL;
+ fcw = div_u64(fcw, 2688);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 5; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+
+ if (err)
+ return err;
+
+ err = idt82p33_write(idt82p33, channel->dpll_freq_cnfg,
+ buf, sizeof(buf));
+
+ if (err == 0)
+ channel->current_freq_ppb = scaled_ppm;
+
+ return err;
+}
+
+static int idt82p33_measure_one_byte_write_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ ktime_t start, stop;
+ s64 total_ns;
+ u8 trigger;
+ int err;
+ u8 i;
+
+ total_ns = 0;
+ *overhead_ns = 0;
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ stop = ktime_get_raw();
+
+ if (err)
+ return err;
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ *overhead_ns = div_s64(total_ns, MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
+static int idt82p33_measure_tod_write_9_byte_overhead(
+ struct idt82p33_channel *channel)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ ktime_t start, stop;
+ s64 total_ns;
+ int err = 0;
+ u8 i, j;
+
+ total_ns = 0;
+ idt82p33->tod_write_overhead_ns = 0;
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ /* Need one less byte for applicable overhead */
+ for (j = 0; j < (TOD_BYTE_COUNT - 1); j++) {
+ err = idt82p33_write(idt82p33,
+ channel->dpll_tod_cnfg + i,
+ &buf[i], sizeof(buf[i]));
+ if (err)
+ return err;
+ }
+
+ stop = ktime_get_raw();
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ idt82p33->tod_write_overhead_ns = div_s64(total_ns,
+ MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
+static int idt82p33_measure_settime_gettime_gap_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct timespec64 ts1 = {0, 0};
+ struct timespec64 ts2;
+ int err;
+
+ *overhead_ns = 0;
+
+ err = _idt82p33_settime(channel, &ts1);
+
+ if (err)
+ return err;
+
+ err = _idt82p33_gettime(channel, &ts2);
+
+ if (!err)
+ *overhead_ns = timespec64_to_ns(&ts2) - timespec64_to_ns(&ts1);
+
+ return err;
+}
+
+static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
+{
+ s64 trailing_overhead_ns, one_byte_write_ns, gap_ns;
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ idt82p33->tod_write_overhead_ns = 0;
+
+ err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_one_byte_write_overhead(channel,
+ &one_byte_write_ns);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_tod_write_9_byte_overhead(channel);
+
+ if (err)
+ return err;
+
+ trailing_overhead_ns = gap_ns - (2 * one_byte_write_ns);
+
+ idt82p33->tod_write_overhead_ns -= trailing_overhead_ns;
+
+ return err;
+}
+
+static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33,
+ u8 page,
+ u8 offset,
+ u8 val)
+{
+ int err = 0;
+
+ if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) {
+ if ((val & 0xfc) || !(val & 0x3)) {
+ dev_err(&idt82p33->client->dev,
+ "Invalid PLL mask 0x%hhx\n", val);
+ err = -EINVAL;
+ } else {
+ idt82p33->pll_mask = val;
+ }
+ } else if (page == PLL0_OUTMASK_ADDR_HI &&
+ offset == PLL0_OUTMASK_ADDR_LO) {
+ idt82p33->channel[0].output_mask = val;
+ } else if (page == PLL1_OUTMASK_ADDR_HI &&
+ offset == PLL1_OUTMASK_ADDR_LO) {
+ idt82p33->channel[1].output_mask = val;
+ }
+
+ return err;
+}
+
+static void idt82p33_display_masks(struct idt82p33 *idt82p33)
+{
+ u8 mask, i;
+
+ dev_info(&idt82p33->client->dev,
+ "pllmask = 0x%02x\n", idt82p33->pll_mask);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if (mask & idt82p33->pll_mask)
+ dev_info(&idt82p33->client->dev,
+ "PLL%d output_mask = 0x%04x\n",
+ i, idt82p33->channel[i].output_mask);
+ }
+}
+
+static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 sync_cnfg;
+ int err;
+
+ if (enable == channel->sync_tod_on) {
+ if (enable && sync_tod_timeout) {
+ mod_delayed_work(system_wq, &channel->sync_tod_work,
+ sync_tod_timeout * HZ);
+ }
+ return 0;
+ }
+
+ err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
+ if (err)
+ return err;
+
+ sync_cnfg &= ~SYNC_TOD;
+
+ if (enable)
+ sync_cnfg |= SYNC_TOD;
+
+ err = idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
+ if (err)
+ return err;
+
+ channel->sync_tod_on = enable;
+
+ if (enable && sync_tod_timeout) {
+ mod_delayed_work(system_wq, &channel->sync_tod_work,
+ sync_tod_timeout * HZ);
+ }
+
+ return 0;
+}
+
+static void idt82p33_sync_tod_work_handler(struct work_struct *work)
+{
+ struct idt82p33_channel *channel =
+ container_of(work, struct idt82p33_channel, sync_tod_work.work);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ (void)idt82p33_sync_tod(channel, false);
+
+ mutex_unlock(&idt82p33->reg_lock);
+}
+
+static int idt82p33_pps_enable(struct idt82p33_channel *channel, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 mask, outn, val;
+ int err;
+
+ mask = channel->output_mask;
+ outn = 0;
+
+ while (mask) {
+ if (mask & 0x1) {
+ err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn),
+ &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable)
+ val &= ~SQUELCH_ENABLE;
+ else
+ val |= SQUELCH_ENABLE;
+
+ err = idt82p33_write(idt82p33, OUT_MUX_CNFG(outn),
+ &val, sizeof(val));
+
+ if (err)
+ return err;
+ }
+ mask >>= 0x1;
+ outn++;
+ }
+
+ return 0;
+}
+
+static int idt82p33_enable_tod(struct idt82p33_channel *channel)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 ts = {0, 0};
+ int err;
+ u8 val;
+
+ val = 0;
+ err = idt82p33_write(idt82p33, channel->dpll_input_mode_cnfg,
+ &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idt82p33_pps_enable(channel, false);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_tod_write_overhead(channel);
+
+ if (err)
+ return err;
+
+ err = _idt82p33_settime(channel, &ts);
+
+ if (err)
+ return err;
+
+ return idt82p33_sync_tod(channel, true);
+}
+
+static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
+{
+ struct idt82p33_channel *channel;
+ u8 i;
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+
+ channel = &idt82p33->channel[i];
+
+ if (channel->ptp_clock) {
+ ptp_clock_unregister(channel->ptp_clock);
+ cancel_delayed_work_sync(&channel->sync_tod_work);
+ }
+ }
+}
+
+static int idt82p33_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ err = -EOPNOTSUPP;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ if (rq->type == PTP_CLK_REQ_PEROUT) {
+ if (!on)
+ err = idt82p33_pps_enable(channel, false);
+
+ /* Only accept a 1-PPS aligned to the second. */
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec) {
+ err = -ERANGE;
+ } else
+ err = idt82p33_pps_enable(channel, true);
+ }
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_adjfine(channel, scaled_ppm);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ if (abs(delta_ns) < phase_snap_threshold) {
+ mutex_unlock(&idt82p33->reg_lock);
+ return 0;
+ }
+
+ err = _idt82p33_adjtime(channel, delta_ns);
+
+ if (err) {
+ mutex_unlock(&idt82p33->reg_lock);
+ return err;
+ }
+
+ err = idt82p33_sync_tod(channel, true);
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_gettime(channel, ts);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_settime(channel, ts);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
+{
+ switch (index) {
+ case 0:
+ channel->dpll_tod_cnfg = DPLL1_TOD_CNFG;
+ channel->dpll_tod_trigger = DPLL1_TOD_TRIGGER;
+ channel->dpll_tod_sts = DPLL1_TOD_STS;
+ channel->dpll_mode_cnfg = DPLL1_OPERATING_MODE_CNFG;
+ channel->dpll_freq_cnfg = DPLL1_HOLDOVER_FREQ_CNFG;
+ channel->dpll_phase_cnfg = DPLL1_PHASE_OFFSET_CNFG;
+ channel->dpll_sync_cnfg = DPLL1_SYNC_EDGE_CNFG;
+ channel->dpll_input_mode_cnfg = DPLL1_INPUT_MODE_CNFG;
+ break;
+ case 1:
+ channel->dpll_tod_cnfg = DPLL2_TOD_CNFG;
+ channel->dpll_tod_trigger = DPLL2_TOD_TRIGGER;
+ channel->dpll_tod_sts = DPLL2_TOD_STS;
+ channel->dpll_mode_cnfg = DPLL2_OPERATING_MODE_CNFG;
+ channel->dpll_freq_cnfg = DPLL2_HOLDOVER_FREQ_CNFG;
+ channel->dpll_phase_cnfg = DPLL2_PHASE_OFFSET_CNFG;
+ channel->dpll_sync_cnfg = DPLL2_SYNC_EDGE_CNFG;
+ channel->dpll_input_mode_cnfg = DPLL2_INPUT_MODE_CNFG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ INIT_DELAYED_WORK(&channel->sync_tod_work,
+ idt82p33_sync_tod_work_handler);
+ channel->sync_tod_on = false;
+ channel->current_freq_ppb = 0;
+
+ return 0;
+}
+
+static void idt82p33_caps_init(struct ptp_clock_info *caps)
+{
+ caps->owner = THIS_MODULE;
+ caps->max_adj = 92000;
+ caps->adjfine = idt82p33_adjfine;
+ caps->adjtime = idt82p33_adjtime;
+ caps->gettime64 = idt82p33_gettime;
+ caps->settime64 = idt82p33_settime;
+ caps->enable = idt82p33_enable;
+}
+
+static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
+{
+ struct idt82p33_channel *channel;
+ int err;
+
+ if (!(index < MAX_PHC_PLL))
+ return -EINVAL;
+
+ channel = &idt82p33->channel[index];
+
+ err = idt82p33_channel_init(channel, index);
+ if (err)
+ return err;
+
+ channel->idt82p33 = idt82p33;
+
+ idt82p33_caps_init(&channel->caps);
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT 82P33 PLL%u", index);
+ channel->caps.n_per_out = hweight8(channel->output_mask);
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+ if (err)
+ return err;
+
+ err = idt82p33_enable_tod(channel);
+ if (err)
+ return err;
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
+{
+ const struct firmware *fw;
+ struct idt82p33_fwrc *rec;
+ u8 loaddr, page, val;
+ int err;
+ s32 len;
+
+ dev_dbg(&idt82p33->client->dev,
+ "requesting firmware '%s'\n", FW_FILENAME);
+
+ err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev);
+
+ if (err)
+ return err;
+
+ dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idt82p33_fwrc *) fw->data;
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idt82p33->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ val = rec->value;
+ loaddr = rec->loaddr;
+ page = rec->hiaddr;
+
+ rec++;
+
+ err = idt82p33_check_and_set_masks(idt82p33, page,
+ loaddr, val);
+ }
+
+ if (err == 0) {
+ /* maximum 8 pages */
+ if (page >= PAGE_NUM)
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ continue;
+
+ err = idt82p33_write(idt82p33, _ADDR(page, loaddr),
+ &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ idt82p33_display_masks(idt82p33);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+
+static int idt82p33_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idt82p33 *idt82p33;
+ int err;
+ u8 i;
+
+ (void)id;
+
+ idt82p33 = devm_kzalloc(&client->dev,
+ sizeof(struct idt82p33), GFP_KERNEL);
+ if (!idt82p33)
+ return -ENOMEM;
+
+ mutex_init(&idt82p33->reg_lock);
+
+ idt82p33->client = client;
+ idt82p33->page_offset = 0xff;
+ idt82p33->tod_write_overhead_ns = 0;
+ idt82p33->calculate_overhead_flag = 0;
+ idt82p33->pll_mask = DEFAULT_PLL_MASK;
+ idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ err = idt82p33_load_firmware(idt82p33);
+
+ if (err)
+ dev_warn(&idt82p33->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idt82p33->pll_mask) {
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ if (idt82p33->pll_mask & (1 << i)) {
+ err = idt82p33_enable_channel(idt82p33, i);
+ if (err)
+ break;
+ }
+ }
+ } else {
+ dev_err(&idt82p33->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ if (err) {
+ idt82p33_ptp_clock_unregister_all(idt82p33);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idt82p33);
+
+ return 0;
+}
+
+static int idt82p33_remove(struct i2c_client *client)
+{
+ struct idt82p33 *idt82p33 = i2c_get_clientdata(client);
+
+ idt82p33_ptp_clock_unregister_all(idt82p33);
+ mutex_destroy(&idt82p33->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idt82p33_dt_id[] = {
+ { .compatible = "idt,82p33810" },
+ { .compatible = "idt,82p33813" },
+ { .compatible = "idt,82p33814" },
+ { .compatible = "idt,82p33831" },
+ { .compatible = "idt,82p33910" },
+ { .compatible = "idt,82p33913" },
+ { .compatible = "idt,82p33914" },
+ { .compatible = "idt,82p33931" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idt82p33_dt_id);
+#endif
+
+static const struct i2c_device_id idt82p33_i2c_id[] = {
+ { "idt82p33810", },
+ { "idt82p33813", },
+ { "idt82p33814", },
+ { "idt82p33831", },
+ { "idt82p33910", },
+ { "idt82p33913", },
+ { "idt82p33914", },
+ { "idt82p33931", },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idt82p33_i2c_id);
+
+static struct i2c_driver idt82p33_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idt82p33_dt_id),
+ .name = "idt82p33",
+ },
+ .probe = idt82p33_probe,
+ .remove = idt82p33_remove,
+ .id_table = idt82p33_i2c_id,
+};
+
+module_i2c_driver(idt82p33_driver);
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
new file mode 100644
index 000000000000..9d46966d25f1
--- /dev/null
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT 82P33XXX family of clocks.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDT82P33_H
+#define PTP_IDT82P33_H
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+
+/* Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf */
+#define PAGE_NUM (8)
+#define _ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+#define _PAGE(addr) (((addr) >> 0x7) & 0x7)
+#define _OFFSET(addr) ((addr) & 0x7f)
+
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0X140
+#define DPLL2_SYNC_EDGE_CNFG 0X1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0X116
+#define DPLL2_INPUT_MODE_CNFG 0X196
+
+#define OUT_MUX_CNFG(outn) _ADDR(0x6, (0xC * (outn)))
+
+#define PAGE_ADDR 0x7F
+/* Register Map end */
+
+/* Register definitions - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf*/
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/* Register bit definitions end */
+#define FW_FILENAME "idt82p33xxx.bin"
+#define MAX_PHC_PLL (2)
+#define TOD_BYTE_COUNT (10)
+#define MAX_MEASURMENT_COUNT (5)
+#define SNAP_THRESHOLD_NS (150000)
+#define SYNC_TOD_TIMEOUT_SEC (5)
+
+#define PLLMASK_ADDR_HI 0xFF
+#define PLLMASK_ADDR_LO 0xA5
+
+#define PLL0_OUTMASK_ADDR_HI 0xFF
+#define PLL0_OUTMASK_ADDR_LO 0xB0
+
+#define PLL1_OUTMASK_ADDR_HI 0xFF
+#define PLL1_OUTMASK_ADDR_LO 0xB2
+
+#define PLL2_OUTMASK_ADDR_HI 0xFF
+#define PLL2_OUTMASK_ADDR_LO 0xB4
+
+#define PLL3_OUTMASK_ADDR_HI 0xFF
+#define PLL3_OUTMASK_ADDR_LO 0xB6
+
+#define DEFAULT_PLL_MASK (0x01)
+#define DEFAULT_OUTPUT_MASK_PLL0 (0xc0)
+#define DEFAULT_OUTPUT_MASK_PLL1 DEFAULT_OUTPUT_MASK_PLL0
+
+/* PTP Hardware Clock interface */
+struct idt82p33_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idt82p33 *idt82p33;
+ enum pll_mode pll_mode;
+ /* task to turn off SYNC_TOD bit after pps sync */
+ struct delayed_work sync_tod_work;
+ bool sync_tod_on;
+ s32 current_freq_ppb;
+ u8 output_mask;
+ u16 dpll_tod_cnfg;
+ u16 dpll_tod_trigger;
+ u16 dpll_tod_sts;
+ u16 dpll_mode_cnfg;
+ u16 dpll_freq_cnfg;
+ u16 dpll_phase_cnfg;
+ u16 dpll_sync_cnfg;
+ u16 dpll_input_mode_cnfg;
+};
+
+struct idt82p33 {
+ struct idt82p33_channel channel[MAX_PHC_PLL];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 pll_mask;
+ ktime_t start_time;
+ int calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+/* firmware interface */
+struct idt82p33_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+/**
+ * @brief Maximum absolute value for write phase offset in femtoseconds
+ */
+#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll)
+
+/** @brief Phase offset resolution
+ *
+ * DPLL phase offset = 10^15 fs / ( System Clock * 2^13)
+ * = 10^15 fs / ( 1638400000 * 2^23)
+ * = 74.5058059692382 fs
+ */
+#define IDT_T0DPLL_PHASE_RESOL 74506
+
+
+#endif /* PTP_IDT82P33_H */
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index dfda54cbd866..52d77db39829 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -400,8 +400,8 @@ static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
ines_write32(port, ts_stat_rx, ts_stat_rx);
ines_write32(port, ts_stat_tx, ts_stat_tx);
- port->rxts_enabled = ts_stat_rx == TS_ENABLE ? true : false;
- port->txts_enabled = ts_stat_tx == TS_ENABLE ? true : false;
+ port->rxts_enabled = ts_stat_rx == TS_ENABLE;
+ port->txts_enabled = ts_stat_tx == TS_ENABLE;
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index b27c46ebfc8f..c09c16be0edf 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -131,8 +131,7 @@ irqreturn_t ptp_qoriq_isr(int irq, void *priv)
struct ptp_qoriq *ptp_qoriq = priv;
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
- u64 ns;
- u32 ack = 0, lo, hi, mask, val, irqs;
+ u32 ack = 0, mask, val, irqs;
spin_lock(&ptp_qoriq->lock);
@@ -153,32 +152,6 @@ irqreturn_t ptp_qoriq_isr(int irq, void *priv)
extts_clean_up(ptp_qoriq, 1, true);
}
- if (irqs & ALM2) {
- ack |= ALM2;
- if (ptp_qoriq->alarm_value) {
- event.type = PTP_CLOCK_ALARM;
- event.index = 0;
- event.timestamp = ptp_qoriq->alarm_value;
- ptp_clock_event(ptp_qoriq->clock, &event);
- }
- if (ptp_qoriq->alarm_interval) {
- ns = ptp_qoriq->alarm_value + ptp_qoriq->alarm_interval;
- hi = ns >> 32;
- lo = ns & 0xffffffff;
- ptp_qoriq->write(&regs->alarm_regs->tmr_alarm2_l, lo);
- ptp_qoriq->write(&regs->alarm_regs->tmr_alarm2_h, hi);
- ptp_qoriq->alarm_value = ns;
- } else {
- spin_lock(&ptp_qoriq->lock);
- mask = ptp_qoriq->read(&regs->ctrl_regs->tmr_temask);
- mask &= ~ALM2EN;
- ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, mask);
- spin_unlock(&ptp_qoriq->lock);
- ptp_qoriq->alarm_value = 0;
- ptp_qoriq->alarm_interval = 0;
- }
- }
-
if (irqs & PP1) {
ack |= PP1;
event.type = PTP_CLOCK_PPS;
diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c
new file mode 100644
index 000000000000..5dca26e14bdc
--- /dev/null
+++ b/drivers/ptp/ptp_vmw.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (C) 2020 VMware, Inc., Palo Alto, CA., USA
+ *
+ * PTP clock driver for VMware precision clock virtual device.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <asm/hypervisor.h>
+#include <asm/vmware.h>
+
+#define VMWARE_MAGIC 0x564D5868
+#define VMWARE_CMD_PCLK(nr) ((nr << 16) | 97)
+#define VMWARE_CMD_PCLK_GETTIME VMWARE_CMD_PCLK(0)
+
+static struct acpi_device *ptp_vmw_acpi_device;
+static struct ptp_clock *ptp_vmw_clock;
+
+
+static int ptp_vmw_pclk_read(u64 *ns)
+{
+ u32 ret, nsec_hi, nsec_lo, unused1, unused2, unused3;
+
+ asm volatile (VMWARE_HYPERCALL :
+ "=a"(ret), "=b"(nsec_hi), "=c"(nsec_lo), "=d"(unused1),
+ "=S"(unused2), "=D"(unused3) :
+ "a"(VMWARE_MAGIC), "b"(0),
+ "c"(VMWARE_CMD_PCLK_GETTIME), "d"(0) :
+ "memory");
+
+ if (ret == 0)
+ *ns = ((u64)nsec_hi << 32) | nsec_lo;
+ return ret;
+}
+
+/*
+ * PTP clock ops.
+ */
+
+static int ptp_vmw_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ u64 ns;
+
+ if (ptp_vmw_pclk_read(&ns) != 0)
+ return -EIO;
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int ptp_vmw_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_vmw_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ptp_vmw",
+ .max_adj = 0,
+ .adjtime = ptp_vmw_adjtime,
+ .adjfreq = ptp_vmw_adjfreq,
+ .gettime64 = ptp_vmw_gettime,
+ .settime64 = ptp_vmw_settime,
+ .enable = ptp_vmw_enable,
+};
+
+/*
+ * ACPI driver ops for VMware "precision clock" virtual device.
+ */
+
+static int ptp_vmw_acpi_add(struct acpi_device *device)
+{
+ ptp_vmw_clock = ptp_clock_register(&ptp_vmw_clock_info, NULL);
+ if (IS_ERR(ptp_vmw_clock)) {
+ pr_err("failed to register ptp clock\n");
+ return PTR_ERR(ptp_vmw_clock);
+ }
+
+ ptp_vmw_acpi_device = device;
+ return 0;
+}
+
+static int ptp_vmw_acpi_remove(struct acpi_device *device)
+{
+ ptp_clock_unregister(ptp_vmw_clock);
+ return 0;
+}
+
+static const struct acpi_device_id ptp_vmw_acpi_device_ids[] = {
+ { "VMW0005", 0 },
+ { "", 0 },
+};
+
+MODULE_DEVICE_TABLE(acpi, ptp_vmw_acpi_device_ids);
+
+static struct acpi_driver ptp_vmw_acpi_driver = {
+ .name = "ptp_vmw",
+ .ids = ptp_vmw_acpi_device_ids,
+ .ops = {
+ .add = ptp_vmw_acpi_add,
+ .remove = ptp_vmw_acpi_remove
+ },
+ .owner = THIS_MODULE
+};
+
+static int __init ptp_vmw_init(void)
+{
+ if (x86_hyper_type != X86_HYPER_VMWARE)
+ return -1;
+ return acpi_bus_register_driver(&ptp_vmw_acpi_driver);
+}
+
+static void __exit ptp_vmw_exit(void)
+{
+ acpi_bus_unregister_driver(&ptp_vmw_acpi_driver);
+}
+
+module_init(ptp_vmw_init);
+module_exit(ptp_vmw_exit);
+
+MODULE_DESCRIPTION("VMware virtual PTP clock driver");
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 30190beeb6e9..eebbc917ac97 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -33,6 +33,15 @@ config PWM_SYSFS
bool
default y if SYSFS
+config PWM_DEBUG
+ bool "PWM lowlevel drivers additional checks and debug messages"
+ depends on DEBUG_KERNEL
+ help
+ This option enables some additional checks to help lowlevel driver
+ authors to get their callbacks implemented correctly.
+ It is expected to introduce some runtime overhead and diagnostic
+ output to the kernel log, so only enable while working on a driver.
+
config PWM_AB8500
tristate "AB8500 PWM support"
depends on AB8500_CORE && ARCH_U8500
@@ -44,7 +53,8 @@ config PWM_AB8500
config PWM_ATMEL
tristate "Atmel PWM support"
- depends on ARCH_AT91 && OF
+ depends on OF
+ depends on ARCH_AT91 || COMPILE_TEST
help
Generic PWM framework driver for Atmel SoC.
@@ -100,7 +110,7 @@ config PWM_BCM_KONA
config PWM_BCM2835
tristate "BCM2835 PWM support"
- depends on ARCH_BCM2835 || ARCH_BRCMSTB
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
help
PWM framework driver for BCM2835 controller (Raspberry Pi)
@@ -109,7 +119,7 @@ config PWM_BCM2835
config PWM_BERLIN
tristate "Marvell Berlin PWM support"
- depends on ARCH_BERLIN
+ depends on ARCH_BERLIN || COMPILE_TEST
help
PWM framework driver for Marvell Berlin SoCs.
@@ -118,7 +128,7 @@ config PWM_BERLIN
config PWM_BRCMSTB
tristate "Broadcom STB PWM support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
help
Generic PWM framework driver for the Broadcom Set-top-Box
SoCs (BCM7xxx).
@@ -152,7 +162,7 @@ config PWM_CROS_EC
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
help
Generic PWM framework driver for Cirrus Logic EP93xx.
@@ -195,7 +205,7 @@ config PWM_IMG
config PWM_IMX1
tristate "i.MX1 PWM support"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Generic PWM framework driver for i.MX1 and i.MX21
@@ -204,7 +214,7 @@ config PWM_IMX1
config PWM_IMX27
tristate "i.MX27 PWM support"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Generic PWM framework driver for i.MX27 and later i.MX SoCs.
@@ -225,6 +235,8 @@ config PWM_IMX_TPM
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
depends on MACH_INGENIC
+ depends on COMMON_CLK
+ select MFD_SYSCON
help
Generic PWM framework driver for Ingenic JZ47xx based
machines.
@@ -244,7 +256,7 @@ config PWM_LP3943
config PWM_LPC18XX_SCT
tristate "LPC18xx/43xx PWM/SCT support"
- depends on ARCH_LPC18XX
+ depends on ARCH_LPC18XX || COMPILE_TEST
help
Generic PWM framework driver for NXP LPC18xx PWM/SCT which
supports 16 channels.
@@ -256,7 +268,7 @@ config PWM_LPC18XX_SCT
config PWM_LPC32XX
tristate "LPC32XX PWM support"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
help
Generic PWM framework driver for LPC32XX. The LPC32XX SOC has two
PWM controllers.
@@ -289,7 +301,8 @@ config PWM_LPSS_PLATFORM
config PWM_MESON
tristate "Amlogic Meson PWM driver"
- depends on ARCH_MESON
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
help
The platform driver for Amlogic Meson PWM controller.
@@ -318,7 +331,8 @@ config PWM_MEDIATEK
config PWM_MXS
tristate "Freescale MXS PWM support"
- depends on ARCH_MXS && OF
+ depends on OF
+ depends on ARCH_MXS || COMPILE_TEST
select STMP_DEVICE
help
Generic PWM framework driver for Freescale MXS.
@@ -357,7 +371,7 @@ config PWM_PUV3
config PWM_PXA
tristate "PXA PWM support"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
help
Generic PWM framework driver for PXA.
@@ -388,14 +402,14 @@ config PWM_RENESAS_TPU
config PWM_ROCKCHIP
tristate "Rockchip PWM support"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
help
Generic PWM framework driver for the PWM controller found on
Rockchip SoCs.
config PWM_SAMSUNG
tristate "Samsung PWM support"
- depends on PLAT_SAMSUNG || ARCH_EXYNOS
+ depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
help
Generic PWM framework driver for Samsung.
@@ -415,7 +429,7 @@ config PWM_SIFIVE
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
depends on OF
help
Generic PWM framework driver for the PWM controller on ST
@@ -437,7 +451,7 @@ config PWM_SPRD
config PWM_STI
tristate "STiH4xx PWM support"
- depends on ARCH_STI
+ depends on ARCH_STI || COMPILE_TEST
depends on OF
help
Generic PWM framework driver for STiH4xx SoCs.
@@ -447,7 +461,7 @@ config PWM_STI
config PWM_STM32
tristate "STMicroelectronics STM32 PWM"
- depends on MFD_STM32_TIMERS
+ depends on MFD_STM32_TIMERS || COMPILE_TEST
help
Generic PWM framework driver for STM32 SoCs.
@@ -483,7 +497,7 @@ config PWM_SUN4I
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
help
Generic PWM framework driver for the PWFM controller found on NVIDIA
Tegra SoCs.
@@ -493,7 +507,7 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
- depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
help
PWM driver support for the ECAP APWM controller found on TI SOCs
@@ -502,7 +516,7 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
- depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3 || COMPILE_TEST
help
PWM driver support for the EHRPWM controller found on TI SOCs
@@ -529,7 +543,7 @@ config PWM_TWL_LED
config PWM_VT8500
tristate "vt8500 PWM support"
- depends on ARCH_VT8500
+ depends on ARCH_VT8500 || COMPILE_TEST
help
Generic PWM framework driver for vt8500.
@@ -538,7 +552,7 @@ config PWM_VT8500
config PWM_ZX
tristate "ZTE ZX PWM support"
- depends on ARCH_ZX
+ depends on ARCH_ZX || COMPILE_TEST
help
Generic PWM framework driver for ZTE ZX family SoCs.
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 5a7f6598c05f..9973c442b455 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -120,6 +120,9 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
if (pwm->chip->ops->get_state) {
pwm->chip->ops->get_state(pwm->chip, pwm, &pwm->state);
trace_pwm_get(pwm, &pwm->state);
+
+ if (IS_ENABLED(PWM_DEBUG))
+ pwm->last = pwm->state;
}
set_bit(PWMF_REQUESTED, &pwm->flags);
@@ -232,17 +235,28 @@ void *pwm_get_chip_data(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_get_chip_data);
-static bool pwm_ops_check(const struct pwm_ops *ops)
+static bool pwm_ops_check(const struct pwm_chip *chip)
{
+
+ const struct pwm_ops *ops = chip->ops;
+
/* driver supports legacy, non-atomic operation */
- if (ops->config && ops->enable && ops->disable)
- return true;
+ if (ops->config && ops->enable && ops->disable) {
+ if (IS_ENABLED(CONFIG_PWM_DEBUG))
+ dev_warn(chip->dev,
+ "Driver needs updating to atomic API\n");
- /* driver supports atomic operation */
- if (ops->apply)
return true;
+ }
- return false;
+ if (!ops->apply)
+ return false;
+
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && !ops->get_state)
+ dev_warn(chip->dev,
+ "Please implement the .get_state() callback\n");
+
+ return true;
}
/**
@@ -266,7 +280,7 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
if (!chip || !chip->dev || !chip->ops || !chip->npwm)
return -EINVAL;
- if (!pwm_ops_check(chip->ops))
+ if (!pwm_ops_check(chip))
return -EINVAL;
mutex_lock(&pwm_lock);
@@ -450,6 +464,107 @@ void pwm_free(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_free);
+static void pwm_apply_state_debug(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct pwm_state *last = &pwm->last;
+ struct pwm_chip *chip = pwm->chip;
+ struct pwm_state s1, s2;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_PWM_DEBUG))
+ return;
+
+ /* No reasonable diagnosis possible without .get_state() */
+ if (!chip->ops->get_state)
+ return;
+
+ /*
+ * *state was just applied. Read out the hardware state and do some
+ * checks.
+ */
+
+ chip->ops->get_state(chip, pwm, &s1);
+ trace_pwm_get(pwm, &s1);
+
+ /*
+ * The lowlevel driver either ignored .polarity (which is a bug) or as
+ * best effort inverted .polarity and fixed .duty_cycle respectively.
+ * Undo this inversion and fixup for further tests.
+ */
+ if (s1.enabled && s1.polarity != state->polarity) {
+ s2.polarity = state->polarity;
+ s2.duty_cycle = s1.period - s1.duty_cycle;
+ s2.period = s1.period;
+ s2.enabled = s1.enabled;
+ } else {
+ s2 = s1;
+ }
+
+ if (s2.polarity != state->polarity &&
+ state->duty_cycle < state->period)
+ dev_warn(chip->dev, ".apply ignored .polarity\n");
+
+ if (state->enabled &&
+ last->polarity == state->polarity &&
+ last->period > s2.period &&
+ last->period <= state->period)
+ dev_warn(chip->dev,
+ ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n",
+ state->period, s2.period, last->period);
+
+ if (state->enabled && state->period < s2.period)
+ dev_warn(chip->dev,
+ ".apply is supposed to round down period (requested: %u, applied: %u)\n",
+ state->period, s2.period);
+
+ if (state->enabled &&
+ last->polarity == state->polarity &&
+ last->period == s2.period &&
+ last->duty_cycle > s2.duty_cycle &&
+ last->duty_cycle <= state->duty_cycle)
+ dev_warn(chip->dev,
+ ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n",
+ state->duty_cycle, state->period,
+ s2.duty_cycle, s2.period,
+ last->duty_cycle, last->period);
+
+ if (state->enabled && state->duty_cycle < s2.duty_cycle)
+ dev_warn(chip->dev,
+ ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n",
+ state->duty_cycle, state->period,
+ s2.duty_cycle, s2.period);
+
+ if (!state->enabled && s2.enabled && s2.duty_cycle > 0)
+ dev_warn(chip->dev,
+ "requested disabled, but yielded enabled with duty > 0");
+
+ /* reapply the state that the driver reported being configured. */
+ err = chip->ops->apply(chip, pwm, &s1);
+ if (err) {
+ *last = s1;
+ dev_err(chip->dev, "failed to reapply current setting\n");
+ return;
+ }
+
+ trace_pwm_apply(pwm, &s1);
+
+ chip->ops->get_state(chip, pwm, last);
+ trace_pwm_get(pwm, last);
+
+ /* reapplication of the current state should give an exact match */
+ if (s1.enabled != last->enabled ||
+ s1.polarity != last->polarity ||
+ (s1.enabled && s1.period != last->period) ||
+ (s1.enabled && s1.duty_cycle != last->duty_cycle)) {
+ dev_err(chip->dev,
+ ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n",
+ s1.enabled, s1.polarity, s1.duty_cycle, s1.period,
+ last->enabled, last->polarity, last->duty_cycle,
+ last->period);
+ }
+}
+
/**
* pwm_apply_state() - atomically apply a new state to a PWM device
* @pwm: PWM device
@@ -480,6 +595,12 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
trace_pwm_apply(pwm, state);
pwm->state = *state;
+
+ /*
+ * only do this after pwm->state was applied as some
+ * implementations of .get_state depend on this
+ */
+ pwm_apply_state_debug(pwm, state);
} else {
/*
* FIXME: restore the initial state in case of error.
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 91e24f01b54e..d78f86f8e462 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
+ pc->chip.base = -1;
pc->chip.npwm = 2;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 9145f6160649..5f3d7f7e6aef 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -18,10 +18,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 35a7ac42269c..a6e40d4c485f 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -96,9 +95,8 @@ struct pwm_imx27_chip {
#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip)
-static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip)
+static int pwm_imx27_clk_prepare_enable(struct pwm_imx27_chip *imx)
{
- struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
int ret;
ret = clk_prepare_enable(imx->clk_ipg);
@@ -114,10 +112,8 @@ static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip)
return 0;
}
-static void pwm_imx27_clk_disable_unprepare(struct pwm_chip *chip)
+static void pwm_imx27_clk_disable_unprepare(struct pwm_imx27_chip *imx)
{
- struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
-
clk_disable_unprepare(imx->clk_per);
clk_disable_unprepare(imx->clk_ipg);
}
@@ -130,7 +126,7 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
u64 tmp;
int ret;
- ret = pwm_imx27_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(imx);
if (ret < 0)
return;
@@ -174,8 +170,7 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
tmp = NSEC_PER_SEC * (u64)(val);
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
- if (!state->enabled)
- pwm_imx27_clk_disable_unprepare(chip);
+ pwm_imx27_clk_disable_unprepare(imx);
}
static void pwm_imx27_sw_reset(struct pwm_chip *chip)
@@ -259,7 +254,7 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (cstate.enabled) {
pwm_imx27_wait_fifo_slot(chip, pwm);
} else {
- ret = pwm_imx27_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(imx);
if (ret)
return ret;
@@ -289,8 +284,8 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
writel(cr, imx->mmio_base + MX3_PWMCR);
- if (!state->enabled && cstate.enabled)
- pwm_imx27_clk_disable_unprepare(chip);
+ if (!state->enabled)
+ pwm_imx27_clk_disable_unprepare(imx);
return 0;
}
@@ -310,6 +305,8 @@ MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
static int pwm_imx27_probe(struct platform_device *pdev)
{
struct pwm_imx27_chip *imx;
+ int ret;
+ u32 pwmcr;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (imx == NULL)
@@ -352,6 +349,15 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
+ ret = pwm_imx27_clk_prepare_enable(imx);
+ if (ret)
+ return ret;
+
+ /* keep clks on if pwm is running */
+ pwmcr = readl(imx->mmio_base + MX3_PWMCR);
+ if (!(pwmcr & MX3_PWMCR_EN))
+ pwm_imx27_clk_disable_unprepare(imx);
+
return pwmchip_add(&imx->chip);
}
@@ -361,8 +367,6 @@ static int pwm_imx27_remove(struct platform_device *pdev)
imx = platform_get_drvdata(pdev);
- pwm_imx27_clk_disable_unprepare(&imx->chip);
-
return pwmchip_remove(&imx->chip);
}
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 9d78cc21cb12..3cd5c054ad9a 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -13,18 +13,19 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
+#include <linux/mfd/ingenic-tcu.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
-
-#include <asm/mach-jz4740/timer.h>
+#include <linux/regmap.h>
#define NUM_PWM 8
struct jz4740_pwm_chip {
struct pwm_chip chip;
- struct clk *clk;
+ struct regmap *map;
};
static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
@@ -32,82 +33,134 @@ static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
return container_of(chip, struct jz4740_pwm_chip, chip);
}
+static bool jz4740_pwm_can_use_chn(struct jz4740_pwm_chip *jz,
+ unsigned int channel)
+{
+ /* Enable all TCU channels for PWM use by default except channels 0/1 */
+ u32 pwm_channels_mask = GENMASK(NUM_PWM - 1, 2);
+
+ device_property_read_u32(jz->chip.dev->parent,
+ "ingenic,pwm-channels-mask",
+ &pwm_channels_mask);
+
+ return !!(pwm_channels_mask & BIT(channel));
+}
+
static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- /*
- * Timers 0 and 1 are used for system tasks, so they are unavailable
- * for use as PWMs.
- */
- if (pwm->hwpwm < 2)
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
+ struct clk *clk;
+ char name[16];
+ int err;
+
+ if (!jz4740_pwm_can_use_chn(jz, pwm->hwpwm))
return -EBUSY;
- jz4740_timer_start(pwm->hwpwm);
+ snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
+
+ clk = clk_get(chip->dev, name);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(chip->dev, "Failed to get clock: %pe", clk);
+
+ return PTR_ERR(clk);
+ }
+
+ err = clk_prepare_enable(clk);
+ if (err < 0) {
+ clk_put(clk);
+ return err;
+ }
+
+ pwm_set_chip_data(pwm, clk);
return 0;
}
static void jz4740_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- jz4740_timer_set_ctrl(pwm->hwpwm, 0);
+ struct clk *clk = pwm_get_chip_data(pwm);
- jz4740_timer_stop(pwm->hwpwm);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
}
static int jz4740_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- uint32_t ctrl = jz4740_timer_get_ctrl(pwm->pwm);
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
+
+ /* Enable PWM output */
+ regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_EN, TCU_TCSR_PWM_EN);
- ctrl |= JZ_TIMER_CTRL_PWM_ENABLE;
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
- jz4740_timer_enable(pwm->hwpwm);
+ /* Start counter */
+ regmap_write(jz->map, TCU_REG_TESR, BIT(pwm->hwpwm));
return 0;
}
static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- uint32_t ctrl = jz4740_timer_get_ctrl(pwm->hwpwm);
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
/*
* Set duty > period. This trick allows the TCU channels in TCU2 mode to
* properly return to their init level.
*/
- jz4740_timer_set_duty(pwm->hwpwm, 0xffff);
- jz4740_timer_set_period(pwm->hwpwm, 0x0);
+ regmap_write(jz->map, TCU_REG_TDHRc(pwm->hwpwm), 0xffff);
+ regmap_write(jz->map, TCU_REG_TDFRc(pwm->hwpwm), 0x0);
/*
* Disable PWM output.
* In TCU2 mode (channel 1/2 on JZ4750+), this must be done before the
* counter is stopped, while in TCU1 mode the order does not matter.
*/
- ctrl &= ~JZ_TIMER_CTRL_PWM_ENABLE;
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+ regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_EN, 0);
/* Stop counter */
- jz4740_timer_disable(pwm->hwpwm);
+ regmap_write(jz->map, TCU_REG_TECR, BIT(pwm->hwpwm));
}
static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
- unsigned long long tmp;
+ unsigned long long tmp = 0xffffull * NSEC_PER_SEC;
+ struct clk *clk = pwm_get_chip_data(pwm);
unsigned long period, duty;
- unsigned int prescaler = 0;
- uint16_t ctrl;
+ long rate;
+ int err;
- tmp = (unsigned long long)clk_get_rate(jz4740->clk) * state->period;
- do_div(tmp, 1000000000);
- period = tmp;
+ /*
+ * Limit the clock to a maximum rate that still gives us a period value
+ * which fits in 16 bits.
+ */
+ do_div(tmp, state->period);
- while (period > 0xffff && prescaler < 6) {
- period >>= 2;
- ++prescaler;
+ /*
+ * /!\ IMPORTANT NOTE:
+ * -------------------
+ * This code relies on the fact that clk_round_rate() will always round
+ * down, which is not a valid assumption given by the clk API, but only
+ * happens to be true with the clk drivers used for Ingenic SoCs.
+ *
+ * Right now, there is no alternative as the clk API does not have a
+ * round-down function (and won't have one for a while), but if it ever
+ * comes to light, a round-down function should be used instead.
+ */
+ rate = clk_round_rate(clk, tmp);
+ if (rate < 0) {
+ dev_err(chip->dev, "Unable to round rate: %ld", rate);
+ return rate;
}
- if (prescaler == 6)
- return -EINVAL;
+ /* Calculate period value */
+ tmp = (unsigned long long)rate * state->period;
+ do_div(tmp, NSEC_PER_SEC);
+ period = (unsigned long)tmp;
+ /* Calculate duty value */
tmp = (unsigned long long)period * state->duty_cycle;
do_div(tmp, state->period);
duty = period - tmp;
@@ -117,26 +170,38 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
jz4740_pwm_disable(chip, pwm);
- jz4740_timer_set_count(pwm->hwpwm, 0);
- jz4740_timer_set_duty(pwm->hwpwm, duty);
- jz4740_timer_set_period(pwm->hwpwm, period);
+ err = clk_set_rate(clk, rate);
+ if (err) {
+ dev_err(chip->dev, "Unable to set rate: %d", err);
+ return err;
+ }
+
+ /* Reset counter to 0 */
+ regmap_write(jz4740->map, TCU_REG_TCNTc(pwm->hwpwm), 0);
- ctrl = JZ_TIMER_CTRL_PRESCALER(prescaler) | JZ_TIMER_CTRL_SRC_EXT |
- JZ_TIMER_CTRL_PWM_ABBRUPT_SHUTDOWN;
+ /* Set duty */
+ regmap_write(jz4740->map, TCU_REG_TDHRc(pwm->hwpwm), duty);
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+ /* Set period */
+ regmap_write(jz4740->map, TCU_REG_TDFRc(pwm->hwpwm), period);
+ /* Set abrupt shutdown */
+ regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_SD, TCU_TCSR_PWM_SD);
+
+ /* Set polarity */
switch (state->polarity) {
case PWM_POLARITY_NORMAL:
- ctrl &= ~JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_INITL_HIGH, 0);
break;
case PWM_POLARITY_INVERSED:
- ctrl |= JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_INITL_HIGH,
+ TCU_TCSR_PWM_INITL_HIGH);
break;
}
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
-
if (state->enabled)
jz4740_pwm_enable(chip, pwm);
@@ -152,17 +217,20 @@ static const struct pwm_ops jz4740_pwm_ops = {
static int jz4740_pwm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct jz4740_pwm_chip *jz4740;
- jz4740 = devm_kzalloc(&pdev->dev, sizeof(*jz4740), GFP_KERNEL);
+ jz4740 = devm_kzalloc(dev, sizeof(*jz4740), GFP_KERNEL);
if (!jz4740)
return -ENOMEM;
- jz4740->clk = devm_clk_get(&pdev->dev, "ext");
- if (IS_ERR(jz4740->clk))
- return PTR_ERR(jz4740->clk);
+ jz4740->map = device_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(jz4740->map)) {
+ dev_err(dev, "regmap not found: %ld\n", PTR_ERR(jz4740->map));
+ return PTR_ERR(jz4740->map);
+ }
- jz4740->chip.dev = &pdev->dev;
+ jz4740->chip.dev = dev;
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = NUM_PWM;
jz4740->chip.base = -1;
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 6245bbdb6e6c..bd0d7336b898 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -136,7 +136,7 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
dev_err(dev, "failed to set parent %s for %s: %d\n",
__clk_get_name(channel->clk_parent),
__clk_get_name(channel->clk), err);
- return err;
+ return err;
}
}
@@ -163,7 +163,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
unsigned int duty, period, pre_div, cnt, duty_cnt;
- unsigned long fin_freq = -1;
+ unsigned long fin_freq;
duty = state->duty_cycle;
period = state->period;
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index f2e57fcf8f8b..7ce616923c52 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 88a3c5690fea..0d31833db2e2 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -10,7 +10,27 @@
*
* Description:
* This file is the core OMAP support for the generic, Linux
- * PWM driver / controller, using the OMAP's dual-mode timers.
+ * PWM driver / controller, using the OMAP's dual-mode timers
+ * with a timer counter that goes up. When it overflows it gets
+ * reloaded with the load value and the pwm output goes up.
+ * When counter matches with match register, the output goes down.
+ * Reference Manual: http://www.ti.com/lit/ug/spruh73q/spruh73q.pdf
+ *
+ * Limitations:
+ * - When PWM is stopped, timer counter gets stopped immediately. This
+ * doesn't allow the current PWM period to complete and stops abruptly.
+ * - When PWM is running and changing both duty cycle and period,
+ * we cannot prevent in software that the output might produce
+ * a period with mixed settings. Especially when period/duty_cyle
+ * is updated while the pwm pin is high, current pwm period/duty_cycle
+ * can get updated as below based on the current timer counter:
+ * - period for current cycle = current_period + new period
+ * - duty_cycle for current period = current period + new duty_cycle.
+ * - PWM OMAP DM timer cannot change the polarity when pwm is active. When
+ * user requests a change in polarity when in active state:
+ * - PWM is stopped abruptly(without completing the current cycle)
+ * - Polarity is changed
+ * - A fresh cycle is started.
*/
#include <linux/clk.h>
@@ -20,8 +40,8 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <clocksource/timer-ti-dm.h>
#include <linux/platform_data/dmtimer-omap.h>
-#include <linux/platform_data/pwm_omap_dmtimer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
@@ -31,10 +51,20 @@
#define DM_TIMER_LOAD_MIN 0xfffffffe
#define DM_TIMER_MAX 0xffffffff
+/**
+ * struct pwm_omap_dmtimer_chip - Structure representing a pwm chip
+ * corresponding to omap dmtimer.
+ * @chip: PWM chip structure representing PWM controller
+ * @mutex: Mutex to protect pwm apply state
+ * @dm_timer: Pointer to omap dm timer.
+ * @pdata: Pointer to omap dm timer ops.
+ * dm_timer_pdev: Pointer to omap dm timer platform device
+ */
struct pwm_omap_dmtimer_chip {
struct pwm_chip chip;
+ /* Mutex to protect pwm apply state */
struct mutex mutex;
- pwm_omap_dmtimer *dm_timer;
+ struct omap_dm_timer *dm_timer;
const struct omap_dm_timer_ops *pdata;
struct platform_device *dm_timer_pdev;
};
@@ -45,11 +75,22 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
}
+/**
+ * pwm_omap_dmtimer_get_clock_cycles() - Get clock cycles in a time frame
+ * @clk_rate: pwm timer clock rate
+ * @ns: time frame in nano seconds.
+ *
+ * Return number of clock cycles in a given period(ins ns).
+ */
static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
{
return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
}
+/**
+ * pwm_omap_dmtimer_start() - Start the pwm omap dm timer in pwm mode
+ * @omap: Pointer to pwm omap dm timer chip
+ */
static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
{
/*
@@ -67,28 +108,46 @@ static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
omap->pdata->start(omap->dm_timer);
}
-static int pwm_omap_dmtimer_enable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+/**
+ * pwm_omap_dmtimer_is_enabled() - Detect if the pwm is enabled.
+ * @omap: Pointer to pwm omap dm timer chip
+ *
+ * Return true if pwm is enabled else false.
+ */
+static bool pwm_omap_dmtimer_is_enabled(struct pwm_omap_dmtimer_chip *omap)
{
- struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ u32 status;
- mutex_lock(&omap->mutex);
- pwm_omap_dmtimer_start(omap);
- mutex_unlock(&omap->mutex);
+ status = omap->pdata->get_pwm_status(omap->dm_timer);
- return 0;
+ return !!(status & OMAP_TIMER_CTRL_ST);
}
-static void pwm_omap_dmtimer_disable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+/**
+ * pwm_omap_dmtimer_polarity() - Detect the polarity of pwm.
+ * @omap: Pointer to pwm omap dm timer chip
+ *
+ * Return the polarity of pwm.
+ */
+static int pwm_omap_dmtimer_polarity(struct pwm_omap_dmtimer_chip *omap)
{
- struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ u32 status;
- mutex_lock(&omap->mutex);
- omap->pdata->stop(omap->dm_timer);
- mutex_unlock(&omap->mutex);
+ status = omap->pdata->get_pwm_status(omap->dm_timer);
+
+ return !!(status & OMAP_TIMER_CTRL_SCPWM);
}
+/**
+ * pwm_omap_dmtimer_config() - Update the configuration of pwm omap dm timer
+ * @chip: Pointer to PWM controller
+ * @pwm: Pointer to PWM channel
+ * @duty_ns: New duty cycle in nano seconds
+ * @period_ns: New period in nano seconds
+ *
+ * Return 0 if successfully changed the period/duty_cycle else appropriate
+ * error.
+ */
static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
struct pwm_device *pwm,
int duty_ns, int period_ns)
@@ -96,31 +155,26 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
u32 period_cycles, duty_cycles;
u32 load_value, match_value;
- struct clk *fclk;
unsigned long clk_rate;
- bool timer_active;
+ struct clk *fclk;
dev_dbg(chip->dev, "requested duty cycle: %d ns, period: %d ns\n",
duty_ns, period_ns);
- mutex_lock(&omap->mutex);
if (duty_ns == pwm_get_duty_cycle(pwm) &&
- period_ns == pwm_get_period(pwm)) {
- /* No change - don't cause any transients. */
- mutex_unlock(&omap->mutex);
+ period_ns == pwm_get_period(pwm))
return 0;
- }
fclk = omap->pdata->get_fclk(omap->dm_timer);
if (!fclk) {
dev_err(chip->dev, "invalid pmtimer fclk\n");
- goto err_einval;
+ return -EINVAL;
}
clk_rate = clk_get_rate(fclk);
if (!clk_rate) {
dev_err(chip->dev, "invalid pmtimer fclk rate\n");
- goto err_einval;
+ return -EINVAL;
}
dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
@@ -148,7 +202,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
dev_info(chip->dev,
"period %d ns too short for clock rate %lu Hz\n",
period_ns, clk_rate);
- goto err_einval;
+ return -EINVAL;
}
if (duty_cycles < 1) {
@@ -174,79 +228,103 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
load_value = (DM_TIMER_MAX - period_cycles) + 1;
match_value = load_value + duty_cycles - 1;
- /*
- * We MUST stop the associated dual-mode timer before attempting to
- * write its registers, but calls to omap_dm_timer_start/stop must
- * be balanced so check if timer is active before calling timer_stop.
- */
- timer_active = pm_runtime_active(&omap->dm_timer_pdev->dev);
- if (timer_active)
- omap->pdata->stop(omap->dm_timer);
-
- omap->pdata->set_load(omap->dm_timer, true, load_value);
+ omap->pdata->set_load(omap->dm_timer, load_value);
omap->pdata->set_match(omap->dm_timer, true, match_value);
dev_dbg(chip->dev, "load value: %#08x (%d), match value: %#08x (%d)\n",
load_value, load_value, match_value, match_value);
- omap->pdata->set_pwm(omap->dm_timer,
- pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED,
- true,
- PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE);
-
- /* If config was called while timer was running it must be reenabled. */
- if (timer_active)
- pwm_omap_dmtimer_start(omap);
+ return 0;
+}
- mutex_unlock(&omap->mutex);
+/**
+ * pwm_omap_dmtimer_set_polarity() - Changes the polarity of the pwm dm timer.
+ * @chip: Pointer to PWM controller
+ * @pwm: Pointer to PWM channel
+ * @polarity: New pwm polarity to be set
+ */
+static void pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ bool enabled;
- return 0;
+ /* Disable the PWM before changing the polarity. */
+ enabled = pwm_omap_dmtimer_is_enabled(omap);
+ if (enabled)
+ omap->pdata->stop(omap->dm_timer);
-err_einval:
- mutex_unlock(&omap->mutex);
+ omap->pdata->set_pwm(omap->dm_timer,
+ polarity == PWM_POLARITY_INVERSED,
+ true, OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE,
+ true);
- return -EINVAL;
+ if (enabled)
+ pwm_omap_dmtimer_start(omap);
}
-static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
+/**
+ * pwm_omap_dmtimer_apply() - Changes the state of the pwm omap dm timer.
+ * @chip: Pointer to PWM controller
+ * @pwm: Pointer to PWM channel
+ * @state: New state to apply
+ *
+ * Return 0 if successfully changed the state else appropriate error.
+ */
+static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ int ret = 0;
- /*
- * PWM core will not call set_polarity while PWM is enabled so it's
- * safe to reconfigure the timer here without stopping it first.
- */
mutex_lock(&omap->mutex);
- omap->pdata->set_pwm(omap->dm_timer,
- polarity == PWM_POLARITY_INVERSED,
- true,
- PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE);
+
+ if (pwm_omap_dmtimer_is_enabled(omap) && !state->enabled) {
+ omap->pdata->stop(omap->dm_timer);
+ goto unlock_mutex;
+ }
+
+ if (pwm_omap_dmtimer_polarity(omap) != state->polarity)
+ pwm_omap_dmtimer_set_polarity(chip, pwm, state->polarity);
+
+ ret = pwm_omap_dmtimer_config(chip, pwm, state->duty_cycle,
+ state->period);
+ if (ret)
+ goto unlock_mutex;
+
+ if (!pwm_omap_dmtimer_is_enabled(omap) && state->enabled) {
+ omap->pdata->set_pwm(omap->dm_timer,
+ state->polarity == PWM_POLARITY_INVERSED,
+ true,
+ OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE,
+ true);
+ pwm_omap_dmtimer_start(omap);
+ }
+
+unlock_mutex:
mutex_unlock(&omap->mutex);
- return 0;
+ return ret;
}
static const struct pwm_ops pwm_omap_dmtimer_ops = {
- .enable = pwm_omap_dmtimer_enable,
- .disable = pwm_omap_dmtimer_disable,
- .config = pwm_omap_dmtimer_config,
- .set_polarity = pwm_omap_dmtimer_set_polarity,
+ .apply = pwm_omap_dmtimer_apply,
.owner = THIS_MODULE,
};
static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *timer;
- struct platform_device *timer_pdev;
- struct pwm_omap_dmtimer_chip *omap;
struct dmtimer_platform_data *timer_pdata;
const struct omap_dm_timer_ops *pdata;
- pwm_omap_dmtimer *dm_timer;
- u32 v;
+ struct platform_device *timer_pdev;
+ struct pwm_omap_dmtimer_chip *omap;
+ struct omap_dm_timer *dm_timer;
+ struct device_node *timer;
int ret = 0;
+ u32 v;
timer = of_parse_phandle(np, "ti,timers", 0);
if (!timer)
@@ -279,6 +357,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
!pdata->set_load ||
!pdata->set_match ||
!pdata->set_pwm ||
+ !pdata->get_pwm_status ||
!pdata->set_prescaler ||
!pdata->write_counter) {
dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index b07bdca3d510..76cd22bd6614 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/bitmap.h>
/*
* Because the PCA9685 has only one prescaler per chip, changing the period of
@@ -69,11 +70,11 @@
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
- int duty_ns;
int period_ns;
#if IS_ENABLED(CONFIG_GPIOLIB)
struct mutex lock;
struct gpio_chip gpio;
+ DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1);
#endif
};
@@ -83,51 +84,51 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip)
}
#if IS_ENABLED(CONFIG_GPIOLIB)
-static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
+static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm;
+ bool is_inuse;
mutex_lock(&pca->lock);
-
- pwm = &pca->chip.pwms[offset];
-
- if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) {
- mutex_unlock(&pca->lock);
- return -EBUSY;
+ if (pwm_idx >= PCA9685_MAXCHAN) {
+ /*
+ * "all LEDs" channel:
+ * pretend already in use if any of the PWMs are requested
+ */
+ if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) {
+ is_inuse = true;
+ goto out;
+ }
+ } else {
+ /*
+ * regular channel:
+ * pretend already in use if the "all LEDs" channel is requested
+ */
+ if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) {
+ is_inuse = true;
+ goto out;
+ }
}
-
- pwm_set_chip_data(pwm, (void *)1);
-
+ is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse);
+out:
mutex_unlock(&pca->lock);
- pm_runtime_get_sync(pca->chip.dev);
- return 0;
+ return is_inuse;
}
-static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm)
+static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
{
- bool is_gpio = false;
-
mutex_lock(&pca->lock);
+ clear_bit(pwm_idx, pca->pwms_inuse);
+ mutex_unlock(&pca->lock);
+}
- if (pwm->hwpwm >= PCA9685_MAXCHAN) {
- unsigned int i;
-
- /*
- * Check if any of the GPIOs are requested and in that case
- * prevent using the "all LEDs" channel.
- */
- for (i = 0; i < pca->gpio.ngpio; i++)
- if (gpiochip_is_requested(&pca->gpio, i)) {
- is_gpio = true;
- break;
- }
- } else if (pwm_get_chip_data(pwm)) {
- is_gpio = true;
- }
+static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct pca9685 *pca = gpiochip_get_data(gpio);
- mutex_unlock(&pca->lock);
- return is_gpio;
+ if (pca9685_pwm_test_and_set_inuse(pca, offset))
+ return -EBUSY;
+ pm_runtime_get_sync(pca->chip.dev);
+ return 0;
}
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
@@ -162,13 +163,14 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
pca9685_pwm_gpio_set(gpio, offset, 0);
pm_runtime_put(pca->chip.dev);
+ pca9685_pwm_clear_inuse(pca, offset);
}
static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
/* Always out */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int pca9685_pwm_gpio_direction_input(struct gpio_chip *gpio,
@@ -213,12 +215,17 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
return devm_gpiochip_add_data(dev, &pca->gpio, pca);
}
#else
-static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca,
- struct pwm_device *pwm)
+static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
+ int pwm_idx)
{
return false;
}
+static inline void
+pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
+{
+}
+
static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
{
return 0;
@@ -272,8 +279,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
}
- pca->duty_ns = duty_ns;
-
if (duty_ns < 1) {
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_H;
@@ -402,7 +407,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
- if (pca9685_pwm_is_gpio(pca, pwm))
+ if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm))
return -EBUSY;
pm_runtime_get_sync(chip->dev);
@@ -411,8 +416,11 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct pca9685 *pca = to_pca(chip);
+
pca9685_pwm_disable(chip, pwm);
pm_runtime_put(chip->dev);
+ pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
static const struct pwm_ops pca9685_pwm_ops = {
@@ -449,7 +457,6 @@ static int pca9685_pwm_probe(struct i2c_client *client,
ret);
return ret;
}
- pca->duty_ns = 0;
pca->period_ns = PCA9685_DEFAULT_PERIOD;
i2c_set_clientdata(client, pca);
@@ -512,8 +519,7 @@ static int pca9685_pwm_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int pca9685_pwm_runtime_suspend(struct device *dev)
+static int __maybe_unused pca9685_pwm_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pca9685 *pca = i2c_get_clientdata(client);
@@ -522,7 +528,7 @@ static int pca9685_pwm_runtime_suspend(struct device *dev)
return 0;
}
-static int pca9685_pwm_runtime_resume(struct device *dev)
+static int __maybe_unused pca9685_pwm_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pca9685 *pca = i2c_get_clientdata(client);
@@ -530,7 +536,6 @@ static int pca9685_pwm_runtime_resume(struct device *dev)
pca9685_set_sleep_mode(pca, false);
return 0;
}
-#endif
static const struct i2c_device_id pca9685_id[] = {
{ "pca9685", 0 },
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 2685577b6dd4..7ab9eb6616d9 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -229,24 +229,28 @@ static int rcar_pwm_probe(struct platform_device *pdev)
rcar_pwm->chip.base = -1;
rcar_pwm->chip.npwm = 1;
+ pm_runtime_enable(&pdev->dev);
+
ret = pwmchip_add(&rcar_pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
- pm_runtime_enable(&pdev->dev);
-
return 0;
}
static int rcar_pwm_remove(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&rcar_pwm->chip);
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&rcar_pwm->chip);
+ return ret;
}
static const struct of_device_id rcar_pwm_of_table[] = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 4a855a21b782..81ad5a551455 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -415,16 +415,15 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.base = -1;
tpu->chip.npwm = TPU_CHANNEL_MAX;
+ pm_runtime_enable(&pdev->dev);
+
ret = pwmchip_add(&tpu->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
- dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id);
-
- pm_runtime_enable(&pdev->dev);
-
return 0;
}
@@ -434,12 +433,10 @@ static int tpu_remove(struct platform_device *pdev)
int ret;
ret = pwmchip_remove(&tpu->chip);
- if (ret)
- return ret;
pm_runtime_disable(&pdev->dev);
- return 0;
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 3e3efa6c768f..5c677c563349 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -90,7 +90,6 @@ struct sun4i_pwm_chip {
spinlock_t ctrl_lock;
const struct sun4i_pwm_data *data;
unsigned long next_period[2];
- bool needs_delay[2];
};
static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
@@ -287,7 +286,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
usecs_to_jiffies(cstate.period / 1000 + 1);
- sun4i_pwm->needs_delay[pwm->hwpwm] = true;
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
@@ -298,7 +296,7 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled) {
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
- } else if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
+ } else {
ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
}
@@ -310,15 +308,9 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled)
return 0;
- if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
- clk_disable_unprepare(sun4i_pwm->clk);
- return 0;
- }
-
/* We need a full period to elapse before disabling the channel. */
now = jiffies;
- if (sun4i_pwm->needs_delay[pwm->hwpwm] &&
- time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) {
+ if (time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) {
delay_us = jiffies_to_usecs(sun4i_pwm->next_period[pwm->hwpwm] -
now);
if ((delay_us / 500) > MAX_UDELAY_MS)
@@ -326,7 +318,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
else
usleep_range(delay_us, delay_us * 2);
}
- sun4i_pwm->needs_delay[pwm->hwpwm] = false;
spin_lock(&sun4i_pwm->ctrl_lock);
ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index aa12fb3ed92e..d26ed8f579ff 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -282,9 +282,15 @@ static const struct tegra_pwm_soc tegra186_pwm_soc = {
.max_frequency = 102000000UL,
};
+static const struct tegra_pwm_soc tegra194_pwm_soc = {
+ .num_channels = 1,
+ .max_frequency = 408000000UL,
+};
+
static const struct of_device_id tegra_pwm_of_match[] = {
{ .compatible = "nvidia,tegra20-pwm", .data = &tegra20_pwm_soc },
{ .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc },
+ { .compatible = "nvidia,tegra194-pwm", .data = &tegra194_pwm_soc },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 8155f59ece38..10af330153b5 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -877,6 +877,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
rmcd_error("pinned %ld out of %ld pages",
pinned, nr_pages);
ret = -EFAULT;
+ /*
+ * Set nr_pages up to mean "how many pages to unpin, in
+ * the error handler:
+ */
+ nr_pages = pinned;
goto err_pg;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 074a2ef55943..f4b72cb098ef 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -107,6 +107,7 @@ config REGULATOR_AD5398
config REGULATOR_ANATOP
tristate "Freescale i.MX on-chip ANATOP LDO regulators"
+ depends on ARCH_MXC || COMPILE_TEST
depends on MFD_SYSCON
help
Say y here to support Freescale i.MX on-chip ANATOP LDOs
@@ -613,6 +614,16 @@ config REGULATOR_MCP16502
through the regulator interface. In addition it enables
suspend-to-ram/standby transition.
+config REGULATOR_MP5416
+ tristate "Monolithic MP5416 PMIC"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ Say y here to support the MP5416 PMIC. This will enable supports
+ the software controllable 4 buck and 4 LDO regulators.
+ Say M here if you want to include support for the regulator as a
+ module.
+
config REGULATOR_MP8859
tristate "MPS MP8859 regulator driver"
depends on I2C
@@ -624,6 +635,13 @@ config REGULATOR_MP8859
Say M here if you want to include support for the regulator as a
module. The module will be named "mp8859".
+config REGULATOR_MP886X
+ tristate "MPS MP8869 regulator driver"
+ depends on I2C && (OF || COMPILE_TEST)
+ select REGMAP_I2C
+ help
+ This driver supports the MP8869 voltage regulator.
+
config REGULATOR_MPQ7920
tristate "Monolithic MPQ7920 PMIC"
depends on I2C && OF
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index c0d6b96ebd78..6610ee001d9a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -78,7 +78,9 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
+obj-$(CONFIG_REGULATOR_MP5416) += mp5416.o
obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o
+obj-$(CONFIG_REGULATOR_MP886X) += mp886x.o
obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 754739d004e5..ca92b3de0e9c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -305,9 +305,13 @@ static int anatop_regulator_probe(struct platform_device *pdev)
/* register regulator */
rdev = devm_regulator_register(dev, rdesc, &config);
if (IS_ERR(rdev)) {
- dev_err(dev, "failed to register %s\n",
- rdesc->name);
- return PTR_ERR(rdev);
+ ret = PTR_ERR(rdev);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(dev, "failed to register %s, deferring...\n",
+ rdesc->name);
+ else
+ dev_err(dev, "failed to register %s\n", rdesc->name);
+ return ret;
}
platform_set_drvdata(pdev, rdev);
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 16f0c8570036..1e6eb5b1f8d8 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -381,8 +381,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
mask = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE_MASK |
AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN_MASK;
enable = (ramp > 0) ?
- AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN :
- !AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN;
+ AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN : 0;
break;
}
@@ -393,8 +392,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
mask = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE_MASK |
AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN_MASK;
enable = (ramp > 0) ?
- AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN :
- !AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN;
+ AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN : 0;
break;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d015d99cb59d..7486f6e4e613 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1849,7 +1849,6 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
{
struct regulator_dev *rdev;
struct regulator *regulator;
- const char *devname = dev ? dev_name(dev) : "deviceless";
struct device_link *link;
int ret;
@@ -1887,9 +1886,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
* enabled, even if it isn't hooked up, and just
* provide a dummy.
*/
- dev_warn(dev,
- "%s supply %s not found, using dummy regulator\n",
- devname, id);
+ dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
rdev = dummy_regulator_rdev;
get_device(&rdev->dev);
break;
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index d3ce0278bfbe..d8112f56e94e 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -73,7 +73,7 @@ struct da9062_regulators {
int irq_ldo_lim;
unsigned n_regulators;
/* Array size to be defined during init. Keep at end. */
- struct da9062_regulator regulator[0];
+ struct da9062_regulator regulator[];
};
/* Regulator operations */
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 2aceb3b7afc2..e1d6c8f6d40b 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -66,7 +66,7 @@ struct da9063_regulator_data {
};
struct da9063_regulators_pdata {
- unsigned n_regulators;
+ unsigned int n_regulators;
struct da9063_regulator_data *regulator_data;
};
@@ -100,6 +100,7 @@ struct da9063_regulator_info {
.desc.vsel_mask = DA9063_V##regl_name##_MASK, \
.desc.linear_min_sel = DA9063_V##regl_name##_BIAS, \
.sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_LDO_SL), \
+ .suspend = BFIELD(DA9063_REG_##regl_name##_CONT, DA9063_LDO_CONF), \
.suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_LDO_SL), \
.suspend_vsel_reg = DA9063_REG_V##regl_name##_B
@@ -124,6 +125,7 @@ struct da9063_regulator_info {
.desc.vsel_mask = DA9063_VBUCK_MASK, \
.desc.linear_min_sel = DA9063_VBUCK_BIAS, \
.sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_BUCK_SL), \
+ .suspend = BFIELD(DA9063_REG_##regl_name##_CONT, DA9063_BUCK_CONF), \
.suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_BUCK_SL), \
.suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \
.mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK)
@@ -131,7 +133,7 @@ struct da9063_regulator_info {
/* Defines asignment of regulators info table to chip model */
struct da9063_dev_model {
const struct da9063_regulator_info *regulator_info;
- unsigned n_regulators;
+ unsigned int n_regulators;
enum da9063_type type;
};
@@ -150,9 +152,9 @@ struct da9063_regulator {
/* Encapsulates all information for the regulators driver */
struct da9063_regulators {
- unsigned n_regulators;
+ unsigned int n_regulators;
/* Array size to be defined during init. Keep at end. */
- struct da9063_regulator regulator[0];
+ struct da9063_regulator regulator[];
};
/* BUCK modes for DA9063 */
@@ -165,38 +167,46 @@ enum {
/* Regulator operations */
-/* Current limits array (in uA) for BCORE1, BCORE2, BPRO.
- Entry indexes corresponds to register values. */
+/*
+ * Current limits array (in uA) for BCORE1, BCORE2, BPRO.
+ * Entry indexes corresponds to register values.
+ */
static const unsigned int da9063_buck_a_limits[] = {
500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000,
1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
};
-/* Current limits array (in uA) for BMEM, BIO, BPERI.
- Entry indexes corresponds to register values. */
+/*
+ * Current limits array (in uA) for BMEM, BIO, BPERI.
+ * Entry indexes corresponds to register values.
+ */
static const unsigned int da9063_buck_b_limits[] = {
1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
};
-/* Current limits array (in uA) for merged BCORE1 and BCORE2.
- Entry indexes corresponds to register values. */
+/*
+ * Current limits array (in uA) for merged BCORE1 and BCORE2.
+ * Entry indexes corresponds to register values.
+ */
static const unsigned int da9063_bcores_merged_limits[] = {
1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000,
2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000
};
-/* Current limits array (in uA) for merged BMEM and BIO.
- Entry indexes corresponds to register values. */
+/*
+ * Current limits array (in uA) for merged BMEM and BIO.
+ * Entry indexes corresponds to register values.
+ */
static const unsigned int da9063_bmem_bio_merged_limits[] = {
3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
};
-static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
+static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- unsigned val;
+ unsigned int val;
switch (mode) {
case REGULATOR_MODE_FAST:
@@ -221,10 +231,9 @@ static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
* There are 3 modes to map to: FAST, NORMAL, and STANDBY.
*/
-static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
+static unsigned int da9063_buck_get_mode(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
unsigned int val;
int ret;
@@ -245,18 +254,7 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
@@ -271,10 +269,10 @@ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
* There are 2 modes to map to: NORMAL and STANDBY (sleep) for each state.
*/
-static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode)
+static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- unsigned val;
+ unsigned int val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -290,24 +288,12 @@ static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode)
return regmap_field_write(regl->sleep, val);
}
-static unsigned da9063_ldo_get_mode(struct regulator_dev *rdev)
+static unsigned int da9063_ldo_get_mode(struct regulator_dev *rdev)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
int ret, val;
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
@@ -383,7 +369,8 @@ static int da9063_suspend_disable(struct regulator_dev *rdev)
return regmap_field_write(regl->suspend, 0);
}
-static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mode)
+static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev,
+ unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
int val;
@@ -405,10 +392,11 @@ static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mod
return regmap_field_write(regl->mode, val);
}
-static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, unsigned mode)
+static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev,
+ unsigned int mode)
{
struct da9063_regulator *regl = rdev_get_drvdata(rdev);
- unsigned val;
+ unsigned int val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -465,42 +453,36 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
da9063_buck_a_limits,
DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE1),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
},
{
DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570,
da9063_buck_a_limits,
DA9063_REG_BUCK_ILIM_C, DA9063_BCORE2_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BCORE2),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE2_SEL),
},
{
DA9063_BUCK(DA9063, BPRO, 530, 10, 1800,
da9063_buck_a_limits,
DA9063_REG_BUCK_ILIM_B, DA9063_BPRO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPRO),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPRO_SEL),
},
{
DA9063_BUCK(DA9063, BMEM, 800, 20, 3340,
da9063_buck_b_limits,
DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BMEM),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
},
{
DA9063_BUCK(DA9063, BIO, 800, 20, 3340,
da9063_buck_b_limits,
DA9063_REG_BUCK_ILIM_A, DA9063_BIO_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BIO),
- .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VBIO_SEL),
},
{
DA9063_BUCK(DA9063, BPERI, 800, 20, 3340,
da9063_buck_b_limits,
DA9063_REG_BUCK_ILIM_B, DA9063_BPERI_ILIM_MASK),
DA9063_BUCK_COMMON_FIELDS(BPERI),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPERI_SEL),
},
{
DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570,
@@ -508,7 +490,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK),
/* BCORES_MERGED uses the same register fields as BCORE1 */
DA9063_BUCK_COMMON_FIELDS(BCORE1),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
},
{
DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340,
@@ -516,21 +497,17 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK),
/* BMEM_BIO_MERGED uses the same register fields as BMEM */
DA9063_BUCK_COMMON_FIELDS(BMEM),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
},
{
DA9063_LDO(DA9063, LDO3, 900, 20, 3440),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO3_SEL),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM),
},
{
DA9063_LDO(DA9063, LDO7, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO7_CONT, DA9063_VLDO7_SEL),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM),
},
{
DA9063_LDO(DA9063, LDO8, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO8_CONT, DA9063_VLDO8_SEL),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM),
},
{
@@ -539,36 +516,29 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
},
{
DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO11_CONT, DA9063_VLDO11_SEL),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM),
},
/* The following LDOs are present only on DA9063, not on DA9063L */
{
DA9063_LDO(DA9063, LDO1, 600, 20, 1860),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO1_SEL),
},
{
DA9063_LDO(DA9063, LDO2, 600, 20, 1860),
- .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO2_SEL),
},
{
DA9063_LDO(DA9063, LDO4, 900, 20, 3440),
- .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VLDO4_SEL),
.oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO4_LIM),
},
{
DA9063_LDO(DA9063, LDO5, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO5_CONT, DA9063_VLDO5_SEL),
},
{
DA9063_LDO(DA9063, LDO6, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO6_CONT, DA9063_VLDO6_SEL),
},
{
DA9063_LDO(DA9063, LDO10, 900, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO10_CONT, DA9063_VLDO10_SEL),
},
};
@@ -593,7 +563,7 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
struct da9063_regulators *regulators = data;
struct da9063 *hw = regulators->regulator[0].hw;
struct da9063_regulator *regl;
- int bits, i , ret;
+ int bits, i, ret;
ret = regmap_read(hw->regmap, DA9063_REG_STATUS_D, &bits);
if (ret < 0)
@@ -605,10 +575,10 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
continue;
if (BIT(regl->info->oc_event.lsb) & bits) {
- regulator_lock(regl->rdev);
+ regulator_lock(regl->rdev);
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
- regulator_unlock(regl->rdev);
+ regulator_unlock(regl->rdev);
}
}
@@ -833,7 +803,7 @@ static int da9063_regulator_probe(struct platform_device *pdev)
if (regl->info->suspend_sleep.reg) {
regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev,
- da9063->regmap, regl->info->suspend_sleep);
+ da9063->regmap, regl->info->suspend_sleep);
if (IS_ERR(regl->suspend_sleep))
return PTR_ERR(regl->suspend_sleep);
}
@@ -867,12 +837,10 @@ static int da9063_regulator_probe(struct platform_device *pdev)
NULL, da9063_ldo_lim_event,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"LDO_LIM", regulators);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "Failed to request LDO_LIM IRQ.\n");
- return ret;
- }
- return 0;
+ return ret;
}
static struct platform_driver da9063_regulator_driver = {
diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c
new file mode 100644
index 000000000000..67ce1b52a1a1
--- /dev/null
+++ b/drivers/regulator/mp5416.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// mp5416.c - regulator driver for mps mp5416
+//
+// Copyright 2020 Monolithic Power Systems, Inc
+//
+// Author: Saravanan Sekar <sravanhome@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/i2c.h>
+
+#define MP5416_REG_CTL0 0x00
+#define MP5416_REG_CTL1 0x01
+#define MP5416_REG_CTL2 0x02
+#define MP5416_REG_ILIM 0x03
+#define MP5416_REG_BUCK1 0x04
+#define MP5416_REG_BUCK2 0x05
+#define MP5416_REG_BUCK3 0x06
+#define MP5416_REG_BUCK4 0x07
+#define MP5416_REG_LDO1 0x08
+#define MP5416_REG_LDO2 0x09
+#define MP5416_REG_LDO3 0x0a
+#define MP5416_REG_LDO4 0x0b
+
+#define MP5416_REGULATOR_EN BIT(7)
+#define MP5416_MASK_VSET 0x7f
+#define MP5416_MASK_BUCK1_ILIM 0xc0
+#define MP5416_MASK_BUCK2_ILIM 0x0c
+#define MP5416_MASK_BUCK3_ILIM 0x30
+#define MP5416_MASK_BUCK4_ILIM 0x03
+#define MP5416_MASK_DVS_SLEWRATE 0xc0
+
+/* values in uV */
+#define MP5416_VOLT1_MIN 600000
+#define MP5416_VOLT1_MAX 2187500
+#define MP5416_VOLT1_STEP 12500
+#define MP5416_VOLT2_MIN 800000
+#define MP5416_VOLT2_MAX 3975000
+#define MP5416_VOLT2_STEP 25000
+
+#define MP5416_VOLT1_RANGE \
+ ((MP5416_VOLT1_MAX - MP5416_VOLT1_MIN)/MP5416_VOLT1_STEP + 1)
+#define MP5416_VOLT2_RANGE \
+ ((MP5416_VOLT2_MAX - MP5416_VOLT2_MIN)/MP5416_VOLT2_STEP + 1)
+
+#define MP5416BUCK(_name, _id, _ilim, _dreg, _dval, _vsel) \
+ [MP5416_BUCK ## _id] = { \
+ .id = MP5416_BUCK ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .ops = &mp5416_buck_ops, \
+ .min_uV = MP5416_VOLT ##_vsel## _MIN, \
+ .uV_step = MP5416_VOLT ##_vsel## _STEP, \
+ .n_voltages = MP5416_VOLT ##_vsel## _RANGE, \
+ .curr_table = _ilim, \
+ .n_current_limits = ARRAY_SIZE(_ilim), \
+ .csel_reg = MP5416_REG_ILIM, \
+ .csel_mask = MP5416_MASK_BUCK ## _id ##_ILIM, \
+ .vsel_reg = MP5416_REG_BUCK ## _id, \
+ .vsel_mask = MP5416_MASK_VSET, \
+ .enable_reg = MP5416_REG_BUCK ## _id, \
+ .enable_mask = MP5416_REGULATOR_EN, \
+ .active_discharge_on = _dval, \
+ .active_discharge_reg = _dreg, \
+ .active_discharge_mask = _dval, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MP5416LDO(_name, _id, _dval) \
+ [MP5416_LDO ## _id] = { \
+ .id = MP5416_LDO ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .ops = &mp5416_ldo_ops, \
+ .min_uV = MP5416_VOLT2_MIN, \
+ .uV_step = MP5416_VOLT2_STEP, \
+ .n_voltages = MP5416_VOLT2_RANGE, \
+ .vsel_reg = MP5416_REG_LDO ##_id, \
+ .vsel_mask = MP5416_MASK_VSET, \
+ .enable_reg = MP5416_REG_LDO ##_id, \
+ .enable_mask = MP5416_REGULATOR_EN, \
+ .active_discharge_on = _dval, \
+ .active_discharge_reg = MP5416_REG_CTL2, \
+ .active_discharge_mask = _dval, \
+ .owner = THIS_MODULE, \
+ }
+
+enum mp5416_regulators {
+ MP5416_BUCK1,
+ MP5416_BUCK2,
+ MP5416_BUCK3,
+ MP5416_BUCK4,
+ MP5416_LDO1,
+ MP5416_LDO2,
+ MP5416_LDO3,
+ MP5416_LDO4,
+ MP5416_MAX_REGULATORS,
+};
+
+static const struct regmap_config mp5416_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0d,
+};
+
+/* Current limits array (in uA)
+ * ILIM1 & ILIM3
+ */
+static const unsigned int mp5416_I_limits1[] = {
+ 3800000, 4600000, 5600000, 6800000
+};
+
+/* ILIM2 & ILIM4 */
+static const unsigned int mp5416_I_limits2[] = {
+ 2200000, 3200000, 4200000, 5200000
+};
+
+static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+
+static const struct regulator_ops mp5416_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops mp5416_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .set_ramp_delay = mp5416_set_ramp_delay,
+};
+
+static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
+ MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1),
+ MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 2),
+ MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1),
+ MP5416BUCK("buck4", 4, mp5416_I_limits2, MP5416_REG_CTL2, BIT(5), 2),
+ MP5416LDO("ldo1", 1, BIT(4)),
+ MP5416LDO("ldo2", 2, BIT(3)),
+ MP5416LDO("ldo3", 3, BIT(2)),
+ MP5416LDO("ldo4", 4, BIT(1)),
+};
+
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00: 32mV/us
+ * 01: 16mV/us
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_val;
+
+ if (ramp_delay > 32000 || ramp_delay < 0)
+ return -EINVAL;
+
+ if (ramp_delay <= 4000)
+ ramp_val = 3;
+ else if (ramp_delay <= 8000)
+ ramp_val = 2;
+ else if (ramp_delay <= 16000)
+ ramp_val = 1;
+ else
+ ramp_val = 0;
+
+ return regmap_update_bits(rdev->regmap, MP5416_REG_CTL2,
+ MP5416_MASK_DVS_SLEWRATE, ramp_val << 6);
+}
+
+static int mp5416_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct regulator_config config = { NULL, };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i;
+
+ regmap = devm_regmap_init_i2c(client, &mp5416_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+
+ config.dev = dev;
+ config.regmap = regmap;
+
+ for (i = 0; i < MP5416_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev,
+ &mp5416_regulators_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator!\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mp5416_of_match[] = {
+ { .compatible = "mps,mp5416" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mp5416_of_match);
+
+static const struct i2c_device_id mp5416_id[] = {
+ { "mp5416", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mp5416_id);
+
+static struct i2c_driver mp5416_regulator_driver = {
+ .driver = {
+ .name = "mp5416",
+ .of_match_table = of_match_ptr(mp5416_of_match),
+ },
+ .probe_new = mp5416_i2c_probe,
+ .id_table = mp5416_id,
+};
+module_i2c_driver(mp5416_regulator_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP5416 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
index 1d26b506ee5b..6ed987648188 100644
--- a/drivers/regulator/mp8859.c
+++ b/drivers/regulator/mp8859.c
@@ -95,6 +95,7 @@ static const struct regulator_desc mp8859_regulators[] = {
.id = 0,
.type = REGULATOR_VOLTAGE,
.name = "mp8859_dcdc",
+ .supply_name = "vin",
.of_match = of_match_ptr("mp8859_dcdc"),
.n_voltages = VOL_MAX_IDX + 1,
.linear_ranges = mp8859_dcdc_ranges,
diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c
new file mode 100644
index 000000000000..1786f7162019
--- /dev/null
+++ b/drivers/regulator/mp886x.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MP8867/MP8869 regulator driver
+//
+// Copyright (C) 2020 Synaptics Incorporated
+//
+// Author: Jisheng Zhang <jszhang@kernel.org>
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MP886X_VSEL 0x00
+#define MP886X_V_BOOT (1 << 7)
+#define MP886X_SYSCNTLREG1 0x01
+#define MP886X_MODE (1 << 0)
+#define MP886X_GO (1 << 6)
+#define MP886X_EN (1 << 7)
+
+struct mp886x_device_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_init_data *regulator;
+ struct gpio_desc *en_gpio;
+ u32 r[2];
+ unsigned int sel;
+};
+
+static int mp886x_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1,
+ MP886X_MODE, MP886X_MODE);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1,
+ MP886X_MODE, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned int mp886x_get_mode(struct regulator_dev *rdev)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MP886X_SYSCNTLREG1, &val);
+ if (ret < 0)
+ return ret;
+ if (val & MP886X_MODE)
+ return REGULATOR_MODE_FAST;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int mp8869_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+ int ret;
+
+ ret = regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1,
+ MP886X_GO, MP886X_GO);
+ if (ret < 0)
+ return ret;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+ return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ MP886X_V_BOOT | rdev->desc->vsel_mask, sel);
+}
+
+static inline unsigned int mp8869_scale(unsigned int uv, u32 r1, u32 r2)
+{
+ u32 tmp = uv * r1 / r2;
+
+ return uv + tmp;
+}
+
+static int mp8869_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct mp886x_device_info *di = rdev_get_drvdata(rdev);
+ int ret, uv;
+ unsigned int val;
+ bool fbloop;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+ if (ret)
+ return ret;
+
+ fbloop = val & MP886X_V_BOOT;
+ if (fbloop) {
+ uv = rdev->desc->min_uV;
+ uv = mp8869_scale(uv, di->r[0], di->r[1]);
+ return regulator_map_voltage_linear(rdev, uv, uv);
+ }
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ return val;
+}
+
+static const struct regulator_ops mp8869_regulator_ops = {
+ .set_voltage_sel = mp8869_set_voltage_sel,
+ .get_voltage_sel = mp8869_get_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .map_voltage = regulator_map_voltage_linear,
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = mp886x_set_mode,
+ .get_mode = mp886x_get_mode,
+};
+
+static int mp8867_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+ struct mp886x_device_info *di = rdev_get_drvdata(rdev);
+ int ret, delta;
+
+ ret = mp8869_set_voltage_sel(rdev, sel);
+ if (ret < 0)
+ return ret;
+
+ delta = di->sel - sel;
+ if (abs(delta) <= 5)
+ ret = regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1,
+ MP886X_GO, 0);
+ di->sel = sel;
+
+ return ret;
+}
+
+static int mp8867_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct mp886x_device_info *di = rdev_get_drvdata(rdev);
+ int ret, uv;
+ unsigned int val;
+ bool fbloop;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+ if (ret)
+ return ret;
+
+ fbloop = val & MP886X_V_BOOT;
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ if (fbloop) {
+ uv = regulator_list_voltage_linear(rdev, val);
+ uv = mp8869_scale(uv, di->r[0], di->r[1]);
+ return regulator_map_voltage_linear(rdev, uv, uv);
+ }
+
+ return val;
+}
+
+static const struct regulator_ops mp8867_regulator_ops = {
+ .set_voltage_sel = mp8867_set_voltage_sel,
+ .get_voltage_sel = mp8867_get_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .map_voltage = regulator_map_voltage_linear,
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = mp886x_set_mode,
+ .get_mode = mp886x_get_mode,
+};
+
+static int mp886x_regulator_register(struct mp886x_device_info *di,
+ struct regulator_config *config)
+{
+ struct regulator_desc *rdesc = &di->desc;
+ struct regulator_dev *rdev;
+
+ rdesc->name = "mp886x-reg";
+ rdesc->supply_name = "vin";
+ rdesc->ops = of_device_get_match_data(di->dev);
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->n_voltages = 128;
+ rdesc->enable_reg = MP886X_SYSCNTLREG1;
+ rdesc->enable_mask = MP886X_EN;
+ rdesc->min_uV = 600000;
+ rdesc->uV_step = 10000;
+ rdesc->vsel_reg = MP886X_VSEL;
+ rdesc->vsel_mask = 0x3f;
+ rdesc->owner = THIS_MODULE;
+
+ rdev = devm_regulator_register(di->dev, &di->desc, config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+ di->sel = rdesc->ops->get_voltage_sel(rdev);
+ return 0;
+}
+
+static const struct regmap_config mp886x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int mp886x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct mp886x_device_info *di;
+ struct regulator_config config = { };
+ struct regmap *regmap;
+ int ret;
+
+ di = devm_kzalloc(dev, sizeof(struct mp886x_device_info), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ di->regulator = of_get_regulator_init_data(dev, np, &di->desc);
+ if (!di->regulator) {
+ dev_err(dev, "Platform data not found!\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(np, "mps,fb-voltage-divider",
+ di->r, 2);
+ if (ret)
+ return ret;
+
+ di->en_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(di->en_gpio))
+ return PTR_ERR(di->en_gpio);
+
+ di->dev = dev;
+
+ regmap = devm_regmap_init_i2c(client, &mp886x_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+ i2c_set_clientdata(client, di);
+
+ config.dev = di->dev;
+ config.init_data = di->regulator;
+ config.regmap = regmap;
+ config.driver_data = di;
+ config.of_node = np;
+
+ ret = mp886x_regulator_register(di, &config);
+ if (ret < 0)
+ dev_err(dev, "Failed to register regulator!\n");
+ return ret;
+}
+
+static const struct of_device_id mp886x_dt_ids[] = {
+ {
+ .compatible = "mps,mp8867",
+ .data = &mp8867_regulator_ops
+ },
+ {
+ .compatible = "mps,mp8869",
+ .data = &mp8869_regulator_ops
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mp886x_dt_ids);
+
+static const struct i2c_device_id mp886x_id[] = {
+ { "mp886x", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mp886x_id);
+
+static struct i2c_driver mp886x_regulator_driver = {
+ .driver = {
+ .name = "mp886x-regulator",
+ .of_match_table = of_match_ptr(mp886x_dt_ids),
+ },
+ .probe = mp886x_i2c_probe,
+ .id_table = mp886x_id,
+};
+module_i2c_driver(mp886x_regulator_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_DESCRIPTION("MP886x regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index e74e11101fc1..638329bd0745 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -354,7 +354,11 @@ static int pwm_regulator_probe(struct platform_device *pdev)
drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(drvdata->pwm)) {
ret = PTR_ERR(drvdata->pwm);
- dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev,
+ "Failed to get PWM, deferring probe\n");
+ else
+ dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret);
return ret;
}
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 7407cd5a1b74..7fc97f23fcf4 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -925,12 +925,21 @@ static const struct rpm_regulator_data rpm_pm8921_regulators[] = {
{ }
};
+static const struct rpm_regulator_data rpm_smb208_regulators[] = {
+ { "s1a", QCOM_RPM_SMB208_S1a, &smb208_smps, "vin_s1a" },
+ { "s1b", QCOM_RPM_SMB208_S1b, &smb208_smps, "vin_s1b" },
+ { "s2a", QCOM_RPM_SMB208_S2a, &smb208_smps, "vin_s2a" },
+ { "s2b", QCOM_RPM_SMB208_S2b, &smb208_smps, "vin_s2b" },
+ { }
+};
+
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8018-regulators",
.data = &rpm_pm8018_regulators },
{ .compatible = "qcom,rpm-pm8058-regulators", .data = &rpm_pm8058_regulators },
{ .compatible = "qcom,rpm-pm8901-regulators", .data = &rpm_pm8901_regulators },
{ .compatible = "qcom,rpm-pm8921-regulators", .data = &rpm_pm8921_regulators },
+ { .compatible = "qcom,rpm-smb208-regulators", .data = &rpm_smb208_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index fff8d5fdef6a..fdde4195cefb 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -445,6 +445,44 @@ static const struct regulator_desc pm8994_lnldo = {
.ops = &rpm_smps_ldo_ops_fixed,
};
+static const struct regulator_desc pmi8994_ftsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000),
+ REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 350,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pmi8994_hfsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 80, 12500),
+ REGULATOR_LINEAR_RANGE(700000, 81, 141, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 142,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pmi8994_bby = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(3000000, 0, 44, 50000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 45,
+ .ops = &rpm_bob_ops,
+};
+
+static const struct regulator_desc pmi8994_boost = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 31,
+ .ops = &rpm_smps_ldo_ops,
+};
+
static const struct regulator_desc pm8998_ftsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
@@ -780,6 +818,14 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
+ { "s2", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
+ { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8998_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8998_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8998_ftsmps, "vdd_s2" },
@@ -862,6 +908,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
+ { .compatible = "qcom,rpm-pmi8994-regulators", .data = &rpm_pmi8994_regulators },
{ .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators },
{ .compatible = "qcom,rpm-pms405-regulators", .data = &rpm_pms405_regulators },
{}
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index de3862c15fcc..fbaed079b299 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -35,7 +35,7 @@ config MTK_SCP
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
- depends on ARCH_OMAP4 || SOC_OMAP5
+ depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX
depends on OMAP_IOMMU
select MAILBOX
select OMAP2PLUS_MBOX
@@ -52,6 +52,18 @@ config OMAP_REMOTEPROC
It's safe to say N here if you're not interested in multimedia
offloading or just want a bare minimum kernel.
+config OMAP_REMOTEPROC_WATCHDOG
+ bool "OMAP remoteproc watchdog timer"
+ depends on OMAP_REMOTEPROC
+ default n
+ help
+ Say Y here to enable watchdog timer for remote processors.
+
+ This option controls the watchdog functionality for the remote
+ processors in OMAP. Dedicated OMAP DMTimers are used by the remote
+ processors and triggers the timer interrupt upon a watchdog
+ detection.
+
config WKUP_M3_RPROC
tristate "AMx3xx Wakeup M3 remoteproc support"
depends on SOC_AM33XX || SOC_AM43XX
@@ -128,6 +140,7 @@ config QCOM_Q6V5_MSS
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
+ select QCOM_Q6V5_IPA_NOTIFY
select QCOM_RPROC_COMMON
select QCOM_SCM
help
@@ -167,6 +180,9 @@ config QCOM_Q6V5_WCSS
Say y here to support the Qualcomm Peripheral Image Loader for the
Hexagon V5 based WCSS remote processors.
+config QCOM_Q6V5_IPA_NOTIFY
+ tristate
+
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
depends on RPMSG
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index e30a1b15fbac..0effd3825035 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
+obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 3e72b6f38d4b..8957ed271d20 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -186,7 +186,7 @@ static int imx_rproc_stop(struct rproc *rproc)
}
static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
- int len, u64 *sys)
+ size_t len, u64 *sys)
{
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
int i;
@@ -203,19 +203,19 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
}
}
- dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%x\n",
+ dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n",
da, len);
return -ENOENT;
}
-static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct imx_rproc *priv = rproc->priv;
void *va = NULL;
u64 sys;
int i;
- if (len <= 0)
+ if (len == 0)
return NULL;
/*
@@ -235,7 +235,8 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va);
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n",
+ da, len, va);
return va;
}
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 5c4658f00b3d..cd266163a65f 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid)
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
-static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct keystone_rproc *ksproc = rproc->priv;
void __iomem *va = NULL;
@@ -255,7 +255,7 @@ static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
size_t size;
int i;
- if (len <= 0)
+ if (len == 0)
return NULL;
for (i = 0; i < ksproc->num_mems; i++) {
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index deb20096146a..0066c83636d0 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -68,7 +68,7 @@ struct mtk_scp {
wait_queue_head_t ack_wq;
void __iomem *cpu_addr;
- phys_addr_t phys_addr;
+ dma_addr_t dma_addr;
size_t dram_size;
struct rproc_subdev *rpmsg_subdev;
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 7ccdf64ff3ea..2bead57c9cf9 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -320,7 +320,7 @@ stop:
return ret;
}
-static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
int offset;
@@ -330,7 +330,7 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
if (offset >= 0 && (offset + len) < scp->sram_size)
return (void __force *)scp->sram_base + offset;
} else {
- offset = da - scp->phys_addr;
+ offset = da - scp->dma_addr;
if (offset >= 0 && (offset + len) < scp->dram_size)
return (void __force *)scp->cpu_addr + offset;
}
@@ -451,7 +451,7 @@ static int scp_map_memory_region(struct mtk_scp *scp)
/* Reserved SCP code size */
scp->dram_size = MAX_CODE_SIZE;
scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
- &scp->phys_addr, GFP_KERNEL);
+ &scp->dma_addr, GFP_KERNEL);
if (!scp->cpu_addr)
return -ENOMEM;
@@ -461,7 +461,7 @@ static int scp_map_memory_region(struct mtk_scp *scp)
static void scp_unmap_memory_region(struct mtk_scp *scp)
{
dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
- scp->phys_addr);
+ scp->dma_addr);
of_reserved_mem_device_release(scp->dev);
}
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 6398194075aa..6955fab0a78b 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -2,7 +2,7 @@
/*
* OMAP Remote Processor driver
*
- * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011-2020 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <ohad@wizery.com>
@@ -15,31 +15,466 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk/ti.h>
#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
#include <linux/remoteproc.h>
#include <linux/mailbox_client.h>
+#include <linux/omap-iommu.h>
#include <linux/omap-mailbox.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+#include <clocksource/timer-ti-dm.h>
-#include <linux/platform_data/remoteproc-omap.h>
+#include <linux/platform_data/dmtimer-omap.h>
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
+/* default auto-suspend delay (ms) */
+#define DEFAULT_AUTOSUSPEND_DELAY 10000
+
+/**
+ * struct omap_rproc_boot_data - boot data structure for the DSP omap rprocs
+ * @syscon: regmap handle for the system control configuration module
+ * @boot_reg: boot register offset within the @syscon regmap
+ * @boot_reg_shift: bit-field shift required for the boot address value in
+ * @boot_reg
+ */
+struct omap_rproc_boot_data {
+ struct regmap *syscon;
+ unsigned int boot_reg;
+ unsigned int boot_reg_shift;
+};
+
+/**
+ * struct omap_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: bus address used to access the memory region
+ * @dev_addr: device address of the memory region from DSP view
+ * @size: size of the memory region
+ */
+struct omap_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct omap_rproc_timer - data structure for a timer used by a omap rproc
+ * @odt: timer pointer
+ * @timer_ops: OMAP dmtimer ops for @odt timer
+ * @irq: timer irq
+ */
+struct omap_rproc_timer {
+ struct omap_dm_timer *odt;
+ const struct omap_dm_timer_ops *timer_ops;
+ int irq;
+};
+
/**
* struct omap_rproc - omap remote processor state
* @mbox: mailbox channel handle
* @client: mailbox client to request the mailbox channel
+ * @boot_data: boot data structure for setting processor boot address
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @num_timers: number of rproc timer(s)
+ * @num_wd_timers: number of rproc watchdog timers
+ * @timers: timer(s) info used by rproc
+ * @autosuspend_delay: auto-suspend delay value to be used for runtime pm
+ * @need_resume: if true a resume is needed in the system resume callback
* @rproc: rproc handle
+ * @reset: reset handle
+ * @pm_comp: completion primitive to sync for suspend response
+ * @fck: functional clock for the remoteproc
+ * @suspend_acked: state machine flag to store the suspend request ack
*/
struct omap_rproc {
struct mbox_chan *mbox;
struct mbox_client client;
+ struct omap_rproc_boot_data *boot_data;
+ struct omap_rproc_mem *mem;
+ int num_mems;
+ int num_timers;
+ int num_wd_timers;
+ struct omap_rproc_timer *timers;
+ int autosuspend_delay;
+ bool need_resume;
struct rproc *rproc;
+ struct reset_control *reset;
+ struct completion pm_comp;
+ struct clk *fck;
+ bool suspend_acked;
};
/**
+ * struct omap_rproc_mem_data - memory definitions for an omap remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct omap_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct omap_rproc_dev_data - device data for the omap remote processor
+ * @device_name: device name of the remote processor
+ * @mems: memory definitions for this remote processor
+ */
+struct omap_rproc_dev_data {
+ const char *device_name;
+ const struct omap_rproc_mem_data *mems;
+};
+
+/**
+ * omap_rproc_request_timer() - request a timer for a remoteproc
+ * @dev: device requesting the timer
+ * @np: device node pointer to the desired timer
+ * @timer: handle to a struct omap_rproc_timer to return the timer handle
+ *
+ * This helper function is used primarily to request a timer associated with
+ * a remoteproc. The returned handle is stored in the .odt field of the
+ * @timer structure passed in, and is used to invoke other timer specific
+ * ops (like starting a timer either during device initialization or during
+ * a resume operation, or for stopping/freeing a timer).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_request_timer(struct device *dev, struct device_node *np,
+ struct omap_rproc_timer *timer)
+{
+ int ret;
+
+ timer->odt = timer->timer_ops->request_by_node(np);
+ if (!timer->odt) {
+ dev_err(dev, "request for timer node %p failed\n", np);
+ return -EBUSY;
+ }
+
+ ret = timer->timer_ops->set_source(timer->odt, OMAP_TIMER_SRC_SYS_CLK);
+ if (ret) {
+ dev_err(dev, "error setting OMAP_TIMER_SRC_SYS_CLK as source for timer node %p\n",
+ np);
+ timer->timer_ops->free(timer->odt);
+ return ret;
+ }
+
+ /* clean counter, remoteproc code will set the value */
+ timer->timer_ops->set_load(timer->odt, 0);
+
+ return 0;
+}
+
+/**
+ * omap_rproc_start_timer() - start a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to start a timer associated with a remoteproc,
+ * obtained using the request_timer ops. The helper function needs to be
+ * invoked by the driver to start the timer (during device initialization)
+ * or to just resume the timer.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_start_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->start(timer->odt);
+}
+
+/**
+ * omap_rproc_stop_timer() - stop a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to disable a timer associated with a
+ * remoteproc, and needs to be called either during a device shutdown
+ * or suspend operation. The separate helper function allows the driver
+ * to just stop a timer without having to release the timer during a
+ * suspend operation.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_stop_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->stop(timer->odt);
+}
+
+/**
+ * omap_rproc_release_timer() - release a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used primarily to release a timer associated
+ * with a remoteproc. The dmtimer will be available for other clients to
+ * use once released.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_release_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->free(timer->odt);
+}
+
+/**
+ * omap_rproc_get_timer_irq() - get the irq for a timer
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to get the irq associated with a watchdog timer. The
+ * function is called by the OMAP remoteproc driver to register a interrupt
+ * handler to handle watchdog events on the remote processor.
+ *
+ * Return: irq id on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->get_irq(timer->odt);
+}
+
+/**
+ * omap_rproc_ack_timer_irq() - acknowledge a timer irq
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to clear the irq associated with a watchdog timer. The
+ * The function is called by the OMAP remoteproc upon a watchdog event on the
+ * remote processor to clear the interrupt status of the watchdog timer.
+ */
+static inline void omap_rproc_ack_timer_irq(struct omap_rproc_timer *timer)
+{
+ timer->timer_ops->write_status(timer->odt, OMAP_TIMER_INT_OVERFLOW);
+}
+
+/**
+ * omap_rproc_watchdog_isr() - Watchdog ISR handler for remoteproc device
+ * @irq: IRQ number associated with a watchdog timer
+ * @data: IRQ handler data
+ *
+ * This ISR routine executes the required necessary low-level code to
+ * acknowledge a watchdog timer interrupt. There can be multiple watchdog
+ * timers associated with a rproc (like IPUs which have 2 watchdog timers,
+ * one per Cortex M3/M4 core), so a lookup has to be performed to identify
+ * the timer to acknowledge its interrupt.
+ *
+ * The function also invokes rproc_report_crash to report the watchdog event
+ * to the remoteproc driver core, to trigger a recovery.
+ *
+ * Return: IRQ_HANDLED on success, otherwise IRQ_NONE
+ */
+static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
+{
+ struct rproc *rproc = data;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct omap_rproc_timer *wd_timer = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+ int i;
+
+ for (i = oproc->num_timers; i < num_timers; i++) {
+ if (timers[i].irq > 0 && irq == timers[i].irq) {
+ wd_timer = &timers[i];
+ break;
+ }
+ }
+
+ if (!wd_timer) {
+ dev_err(dev, "invalid timer\n");
+ return IRQ_NONE;
+ }
+
+ omap_rproc_ack_timer_irq(wd_timer);
+
+ rproc_report_crash(rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * omap_rproc_enable_timers() - enable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to acquire and configure the timer handle
+ *
+ * This function is used primarily to enable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver to
+ * to either acquire and start a timer (during device initialization) or
+ * to just start a timer (during a resume operation).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_enable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ int ret = 0;
+ struct platform_device *tpdev;
+ struct dmtimer_platform_data *tpdata;
+ const struct omap_dm_timer_ops *timer_ops;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ if (!configure)
+ goto start_timers;
+
+ for (i = 0; i < num_timers; i++) {
+ if (i < oproc->num_timers)
+ np = of_parse_phandle(dev->of_node, "ti,timers", i);
+ else
+ np = of_parse_phandle(dev->of_node,
+ "ti,watchdog-timers",
+ (i - oproc->num_timers));
+ if (!np) {
+ ret = -ENXIO;
+ dev_err(dev, "device node lookup for timer at index %d failed: %d\n",
+ i < oproc->num_timers ? i :
+ i - oproc->num_timers, ret);
+ goto free_timers;
+ }
+
+ tpdev = of_find_device_by_node(np);
+ if (!tpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "could not get timer platform device\n");
+ goto put_node;
+ }
+
+ tpdata = dev_get_platdata(&tpdev->dev);
+ put_device(&tpdev->dev);
+ if (!tpdata) {
+ ret = -EINVAL;
+ dev_err(dev, "dmtimer pdata structure NULL\n");
+ goto put_node;
+ }
+
+ timer_ops = tpdata->timer_ops;
+ if (!timer_ops || !timer_ops->request_by_node ||
+ !timer_ops->set_source || !timer_ops->set_load ||
+ !timer_ops->free || !timer_ops->start ||
+ !timer_ops->stop || !timer_ops->get_irq ||
+ !timer_ops->write_status) {
+ ret = -EINVAL;
+ dev_err(dev, "device does not have required timer ops\n");
+ goto put_node;
+ }
+
+ timers[i].irq = -1;
+ timers[i].timer_ops = timer_ops;
+ ret = omap_rproc_request_timer(dev, np, &timers[i]);
+ if (ret) {
+ dev_err(dev, "request for timer %p failed: %d\n", np,
+ ret);
+ goto put_node;
+ }
+ of_node_put(np);
+
+ if (i >= oproc->num_timers) {
+ timers[i].irq = omap_rproc_get_timer_irq(&timers[i]);
+ if (timers[i].irq < 0) {
+ dev_err(dev, "get_irq for timer %p failed: %d\n",
+ np, timers[i].irq);
+ ret = -EBUSY;
+ goto free_timers;
+ }
+
+ ret = request_irq(timers[i].irq,
+ omap_rproc_watchdog_isr, IRQF_SHARED,
+ "rproc-wdt", rproc);
+ if (ret) {
+ dev_err(dev, "error requesting irq for timer %p\n",
+ np);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ goto free_timers;
+ }
+ }
+ }
+
+start_timers:
+ for (i = 0; i < num_timers; i++) {
+ ret = omap_rproc_start_timer(&timers[i]);
+ if (ret) {
+ dev_err(dev, "start timer %p failed failed: %d\n", np,
+ ret);
+ break;
+ }
+ }
+ if (ret) {
+ while (i >= 0) {
+ omap_rproc_stop_timer(&timers[i]);
+ i--;
+ }
+ goto put_node;
+ }
+ return 0;
+
+put_node:
+ if (configure)
+ of_node_put(np);
+free_timers:
+ while (i--) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * omap_rproc_disable_timers() - disable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to release the timer handle
+ *
+ * This function is used primarily to disable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver to
+ * to either stop and release a timer (during device shutdown) or to just
+ * stop a timer (during a suspend operation).
+ *
+ * Return: 0 on success or no timers
+ */
+static int omap_rproc_disable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ for (i = 0; i < num_timers; i++) {
+ omap_rproc_stop_timer(&timers[i]);
+ if (configure) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
* omap_rproc_mbox_callback() - inbound mailbox message handler
* @client: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
@@ -65,13 +500,29 @@ static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
switch (msg) {
case RP_MBOX_CRASH:
- /* just log this for now. later, we'll also do recovery */
+ /*
+ * remoteproc detected an exception, notify the rproc core.
+ * The remoteproc core will handle the recovery.
+ */
dev_err(dev, "omap rproc %s crashed\n", name);
+ rproc_report_crash(oproc->rproc, RPROC_FATAL_ERROR);
break;
case RP_MBOX_ECHO_REPLY:
dev_info(dev, "received echo reply from %s\n", name);
break;
+ case RP_MBOX_SUSPEND_ACK:
+ /* Fall through */
+ case RP_MBOX_SUSPEND_CANCEL:
+ oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK;
+ complete(&oproc->pm_comp);
+ break;
default:
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > oproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
/* msg contains the index of the triggered vring */
if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
@@ -85,11 +536,52 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
struct device *dev = rproc->dev.parent;
int ret;
+ /* wake up the rproc before kicking it */
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0)) {
+ dev_err(dev, "pm_runtime_get_sync() failed during kick, ret = %d\n",
+ ret);
+ pm_runtime_put_noidle(dev);
+ return;
+ }
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(oproc->mbox, (void *)vqid);
if (ret < 0)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+/**
+ * omap_rproc_write_dsp_boot_addr() - set boot address for DSP remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Set boot address for a supported DSP remote processor.
+ *
+ * Return: 0 on success, or -EINVAL if boot address is not aligned properly
+ */
+static int omap_rproc_write_dsp_boot_addr(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_boot_data *bdata = oproc->boot_data;
+ u32 offset = bdata->boot_reg;
+ u32 value;
+ u32 mask;
+
+ if (rproc->bootaddr & (SZ_1K - 1)) {
+ dev_err(dev, "invalid boot address 0x%llx, must be aligned on a 1KB boundary\n",
+ rproc->bootaddr);
+ return -EINVAL;
+ }
+
+ value = rproc->bootaddr >> bdata->boot_reg_shift;
+ mask = ~(SZ_1K - 1) >> bdata->boot_reg_shift;
+
+ return regmap_update_bits(bdata->syscon, offset, mask, value);
}
/*
@@ -103,13 +595,14 @@ static int omap_rproc_start(struct rproc *rproc)
{
struct omap_rproc *oproc = rproc->priv;
struct device *dev = rproc->dev.parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
int ret;
struct mbox_client *client = &oproc->client;
- if (pdata->set_bootaddr)
- pdata->set_bootaddr(rproc->bootaddr);
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret)
+ return ret;
+ }
client->dev = dev;
client->tx_done = NULL;
@@ -117,7 +610,7 @@ static int omap_rproc_start(struct rproc *rproc)
client->tx_block = false;
client->knows_txdone = false;
- oproc->mbox = omap_mbox_request_channel(client, pdata->mbox_name);
+ oproc->mbox = mbox_request_channel(client, 0);
if (IS_ERR(oproc->mbox)) {
ret = -EBUSY;
dev_err(dev, "mbox_request_channel failed: %ld\n",
@@ -138,14 +631,34 @@ static int omap_rproc_start(struct rproc *rproc)
goto put_mbox;
}
- ret = pdata->device_enable(pdev);
+ ret = omap_rproc_enable_timers(rproc, true);
if (ret) {
- dev_err(dev, "omap_device_enable failed: %d\n", ret);
+ dev_err(dev, "omap_rproc_enable_timers failed: %d\n", ret);
goto put_mbox;
}
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset control deassert failed: %d\n", ret);
+ goto disable_timers;
+ }
+
+ /*
+ * remote processor is up, so update the runtime pm status and
+ * enable the auto-suspend. The device usage count is incremented
+ * manually for balancing it for auto-suspend
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
+disable_timers:
+ omap_rproc_disable_timers(rproc, true);
put_mbox:
mbox_free_channel(oproc->mbox);
return ret;
@@ -155,32 +668,638 @@ put_mbox:
static int omap_rproc_stop(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
struct omap_rproc *oproc = rproc->priv;
int ret;
- ret = pdata->device_shutdown(pdev);
- if (ret)
+ /*
+ * cancel any possible scheduled runtime suspend by incrementing
+ * the device usage count, and resuming the device. The remoteproc
+ * also needs to be woken up if suspended, to avoid the remoteproc
+ * OS to continue to remember any context that it has saved, and
+ * avoid potential issues in misindentifying a subsequent device
+ * reboot as a power restore boot
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
return ret;
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret)
+ goto out;
+
+ ret = omap_rproc_disable_timers(rproc, true);
+ if (ret)
+ goto enable_device;
mbox_free_channel(oproc->mbox);
+ /*
+ * update the runtime pm states and status now that the remoteproc
+ * has stopped
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
return 0;
+
+enable_device:
+ reset_control_deassert(oproc->reset);
+out:
+ /* schedule the next auto-suspend */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+/**
+ * omap_rproc_da_to_va() - internal memory translation helper
+ * @rproc: remote processor to apply the address translation for
+ * @da: device address to translate
+ * @len: length of the memory buffer
+ *
+ * Custom function implementing the rproc .da_to_va ops to provide address
+ * translation (device address to kernel virtual address) for internal RAMs
+ * present in a DSP or IPU device). The translated addresses can be used
+ * either by the remoteproc core for loading, or by any rpmsg bus drivers.
+ *
+ * Return: translated virtual address in kernel memory space on success,
+ * or NULL on failure.
+ */
+static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ int i;
+ u32 offset;
+
+ if (len <= 0)
+ return NULL;
+
+ if (!oproc->num_mems)
+ return NULL;
+
+ for (i = 0; i < oproc->num_mems; i++) {
+ if (da >= oproc->mem[i].dev_addr && da + len <=
+ oproc->mem[i].dev_addr + oproc->mem[i].size) {
+ offset = da - oproc->mem[i].dev_addr;
+ /* __force to make sparse happy with type conversion */
+ return (__force void *)(oproc->mem[i].cpu_addr +
+ offset);
+ }
+ }
+
+ return NULL;
}
static const struct rproc_ops omap_rproc_ops = {
.start = omap_rproc_start,
.stop = omap_rproc_stop,
.kick = omap_rproc_kick,
+ .da_to_va = omap_rproc_da_to_va,
+};
+
+#ifdef CONFIG_PM
+static bool _is_rproc_in_standby(struct omap_rproc *oproc)
+{
+ return ti_clk_is_in_standby(oproc->fck);
+}
+
+/* 1 sec is long enough time to let the remoteproc side suspend the device */
+#define DEF_SUSPEND_TIMEOUT 1000
+static int _omap_rproc_suspend(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ unsigned long to = msecs_to_jiffies(DEF_SUSPEND_TIMEOUT);
+ unsigned long ta = jiffies + to;
+ u32 suspend_msg = auto_suspend ?
+ RP_MBOX_SUSPEND_AUTO : RP_MBOX_SUSPEND_SYSTEM;
+ int ret;
+
+ reinit_completion(&oproc->pm_comp);
+ oproc->suspend_acked = false;
+ ret = mbox_send_message(oproc->mbox, (void *)suspend_msg);
+ if (ret < 0) {
+ dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&oproc->pm_comp, to);
+ if (!oproc->suspend_acked)
+ return -EBUSY;
+
+ /*
+ * The remoteproc side is returning the ACK message before saving the
+ * context, because the context saving is performed within a SYS/BIOS
+ * function, and it cannot have any inter-dependencies against the IPC
+ * layer. Also, as the SYS/BIOS needs to preserve properly the processor
+ * register set, sending this ACK or signalling the completion of the
+ * context save through a shared memory variable can never be the
+ * absolute last thing to be executed on the remoteproc side, and the
+ * MPU cannot use the ACK message as a sync point to put the remoteproc
+ * into reset. The only way to ensure that the remote processor has
+ * completed saving the context is to check that the module has reached
+ * STANDBY state (after saving the context, the SYS/BIOS executes the
+ * appropriate target-specific WFI instruction causing the module to
+ * enter STANDBY).
+ */
+ while (!_is_rproc_in_standby(oproc)) {
+ if (time_after(jiffies, ta))
+ return -ETIME;
+ schedule();
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset assert during suspend failed %d\n", ret);
+ return ret;
+ }
+
+ ret = omap_rproc_disable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "disabling timers during suspend failed %d\n",
+ ret);
+ goto enable_device;
+ }
+
+ /*
+ * IOMMUs would have to be disabled specifically for runtime suspend.
+ * They are handled automatically through System PM callbacks for
+ * regular system suspend
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_deactivate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "iommu domain deactivate failed %d\n",
+ ret);
+ goto enable_timers;
+ }
+ }
+
+ return 0;
+
+enable_timers:
+ /* ignore errors on re-enabling code */
+ omap_rproc_enable_timers(rproc, false);
+enable_device:
+ reset_control_deassert(oproc->reset);
+ return ret;
+}
+
+static int _omap_rproc_resume(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ /*
+ * IOMMUs would have to be enabled specifically for runtime resume.
+ * They would have been already enabled automatically through System
+ * PM callbacks for regular system resume
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_activate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "omap_iommu activate failed %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* boot address could be lost after suspend, so restore it */
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret) {
+ dev_err(dev, "boot address restore failed %d\n", ret);
+ goto suspend_iommu;
+ }
+ }
+
+ ret = omap_rproc_enable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "enabling timers during resume failed %d\n", ret);
+ goto suspend_iommu;
+ }
+
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset deassert during resume failed %d\n", ret);
+ goto disable_timers;
+ }
+
+ return 0;
+
+disable_timers:
+ omap_rproc_disable_timers(rproc, false);
+suspend_iommu:
+ if (auto_suspend)
+ omap_iommu_domain_deactivate(rproc->domain);
+out:
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state == RPROC_SUSPENDED)
+ goto out;
+
+ if (rproc->state != RPROC_RUNNING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_suspend(rproc, false);
+ if (ret) {
+ dev_err(dev, "suspend failed %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * remoteproc is running at the time of system suspend, so remember
+ * it so as to wake it up during system resume
+ */
+ oproc->need_resume = true;
+ rproc->state = RPROC_SUSPENDED;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state != RPROC_SUSPENDED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * remoteproc was auto-suspended at the time of system suspend,
+ * so no need to wake-up the processor (leave it in suspended
+ * state, will be woken up during a subsequent runtime_resume)
+ */
+ if (!oproc->need_resume)
+ goto out;
+
+ ret = _omap_rproc_resume(rproc, false);
+ if (ret) {
+ dev_err(dev, "resume failed %d\n", ret);
+ goto out;
+ }
+
+ oproc->need_resume = false;
+ rproc->state = RPROC_RUNNING;
+
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_CRASHED) {
+ dev_dbg(dev, "rproc cannot be runtime suspended when crashed!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (WARN_ON(rproc->state != RPROC_RUNNING)) {
+ dev_err(dev, "rproc cannot be runtime suspended when not running!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * do not even attempt suspend if the remote processor is not
+ * idled for runtime auto-suspend
+ */
+ if (!_is_rproc_in_standby(oproc)) {
+ ret = -EBUSY;
+ goto abort;
+ }
+
+ ret = _omap_rproc_suspend(rproc, true);
+ if (ret)
+ goto abort;
+
+ rproc->state = RPROC_SUSPENDED;
+ mutex_unlock(&rproc->lock);
+ return 0;
+
+abort:
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (WARN_ON(rproc->state != RPROC_SUSPENDED)) {
+ dev_err(dev, "rproc cannot be runtime resumed if not suspended! state=%d\n",
+ rproc->state);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_resume(rproc, true);
+ if (ret) {
+ dev_err(dev, "runtime resume failed %d\n", ret);
+ goto out;
+ }
+
+ rproc->state = RPROC_RUNNING;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct omap_rproc_mem_data ipu_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x20000000 },
+ { },
+};
+
+static const struct omap_rproc_mem_data dra7_dsp_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+ { },
+};
+
+static const struct omap_rproc_dev_data omap4_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap4_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
};
+static const struct omap_rproc_dev_data omap5_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap5_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_dsp_dev_data = {
+ .device_name = "dsp",
+ .mems = dra7_dsp_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct of_device_id omap_rproc_of_match[] = {
+ {
+ .compatible = "ti,omap4-dsp",
+ .data = &omap4_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap4-ipu",
+ .data = &omap4_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-dsp",
+ .data = &omap5_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-ipu",
+ .data = &omap5_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-dsp",
+ .data = &dra7_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-ipu",
+ .data = &dra7_ipu_dev_data,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_rproc_of_match);
+
+static const char *omap_rproc_get_firmware(struct platform_device *pdev)
+{
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw_name;
+}
+
+static int omap_rproc_get_boot_data(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ const struct omap_rproc_dev_data *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!of_property_read_bool(np, "ti,bootreg"))
+ return 0;
+
+ oproc->boot_data = devm_kzalloc(&pdev->dev, sizeof(*oproc->boot_data),
+ GFP_KERNEL);
+ if (!oproc->boot_data)
+ return -ENOMEM;
+
+ oproc->boot_data->syscon =
+ syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
+ if (IS_ERR(oproc->boot_data->syscon)) {
+ ret = PTR_ERR(oproc->boot_data->syscon);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "ti,bootreg", 1,
+ &oproc->boot_data->boot_reg)) {
+ dev_err(&pdev->dev, "couldn't get the boot register\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_index(np, "ti,bootreg", 2,
+ &oproc->boot_data->boot_reg_shift);
+
+ return 0;
+}
+
+static int omap_rproc_of_get_internal_memories(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ const struct omap_rproc_dev_data *data;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!data->mems)
+ return 0;
+
+ num_mems = of_property_count_elems_of_size(dev->of_node, "reg",
+ sizeof(u32)) / 2;
+
+ oproc->mem = devm_kcalloc(dev, num_mems, sizeof(*oproc->mem),
+ GFP_KERNEL);
+ if (!oproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; data->mems[i].name; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "no memory defined for %s\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ oproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(oproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to parse and map %s memory\n",
+ data->mems[i].name);
+ return PTR_ERR(oproc->mem[i].cpu_addr);
+ }
+ oproc->mem[i].bus_addr = res->start;
+ oproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ oproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n",
+ data->mems[i].name, &oproc->mem[i].bus_addr,
+ oproc->mem[i].size, oproc->mem[i].cpu_addr,
+ oproc->mem[i].dev_addr);
+ }
+ oproc->num_mems = num_mems;
+
+ return 0;
+}
+
+#ifdef CONFIG_OMAP_REMOTEPROC_WATCHDOG
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_count_phandle_with_args(np, "ti,watchdog-timers", NULL);
+ if (ret <= 0) {
+ dev_dbg(dev, "device does not have watchdog timers, status = %d\n",
+ ret);
+ ret = 0;
+ }
+
+ return ret;
+}
+#else
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static int omap_rproc_of_get_timers(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ int num_timers;
+
+ /*
+ * Timer nodes are directly used in client nodes as phandles, so
+ * retrieve the count using appropriate size
+ */
+ oproc->num_timers = of_count_phandle_with_args(np, "ti,timers", NULL);
+ if (oproc->num_timers <= 0) {
+ dev_dbg(dev, "device does not have timers, status = %d\n",
+ oproc->num_timers);
+ oproc->num_timers = 0;
+ }
+
+ oproc->num_wd_timers = omap_rproc_count_wdog_timers(dev);
+
+ num_timers = oproc->num_timers + oproc->num_wd_timers;
+ if (num_timers) {
+ oproc->timers = devm_kcalloc(dev, num_timers,
+ sizeof(*oproc->timers),
+ GFP_KERNEL);
+ if (!oproc->timers)
+ return -ENOMEM;
+
+ dev_dbg(dev, "device has %d tick timers and %d watchdog timers\n",
+ oproc->num_timers, oproc->num_wd_timers);
+ }
+
+ return 0;
+}
+
static int omap_rproc_probe(struct platform_device *pdev)
{
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc;
struct rproc *rproc;
+ const char *firmware;
int ret;
+ struct reset_control *reset;
+
+ if (!np) {
+ dev_err(&pdev->dev, "only DT-based devices are supported\n");
+ return -ENODEV;
+ }
+
+ reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ firmware = omap_rproc_get_firmware(pdev);
+ if (IS_ERR(firmware))
+ return PTR_ERR(firmware);
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -188,24 +1307,60 @@ static int omap_rproc_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, pdata->name, &omap_rproc_ops,
- pdata->firmware, sizeof(*oproc));
+ rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
+ firmware, sizeof(*oproc));
if (!rproc)
return -ENOMEM;
oproc = rproc->priv;
oproc->rproc = rproc;
+ oproc->reset = reset;
/* All existing OMAP IPU and DSP processors have an MMU */
rproc->has_iommu = true;
+ ret = omap_rproc_of_get_internal_memories(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_get_boot_data(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_of_get_timers(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ init_completion(&oproc->pm_comp);
+ oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY;
+
+ of_property_read_u32(pdev->dev.of_node, "ti,autosuspend-delay-ms",
+ &oproc->autosuspend_delay);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay);
+
+ oproc->fck = devm_clk_get(&pdev->dev, 0);
+ if (IS_ERR(oproc->fck)) {
+ ret = PTR_ERR(oproc->fck);
+ goto free_rproc;
+ }
+
+ ret = of_reserved_mem_device_init(&pdev->dev);
+ if (ret) {
+ dev_warn(&pdev->dev, "device does not have specific CMA pool.\n");
+ dev_warn(&pdev->dev, "Typically this should be provided,\n");
+ dev_warn(&pdev->dev, "only omit if you know what you are doing.\n");
+ }
+
platform_set_drvdata(pdev, rproc);
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto release_mem;
return 0;
+release_mem:
+ of_reserved_mem_device_release(&pdev->dev);
free_rproc:
rproc_free(rproc);
return ret;
@@ -217,15 +1372,24 @@ static int omap_rproc_remove(struct platform_device *pdev)
rproc_del(rproc);
rproc_free(rproc);
+ of_reserved_mem_device_release(&pdev->dev);
return 0;
}
+static const struct dev_pm_ops omap_rproc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rproc_suspend, omap_rproc_resume)
+ SET_RUNTIME_PM_OPS(omap_rproc_runtime_suspend,
+ omap_rproc_runtime_resume, NULL)
+};
+
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
.remove = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
+ .pm = &omap_rproc_pm_ops,
+ .of_match_table = omap_rproc_of_match,
},
};
diff --git a/drivers/remoteproc/omap_remoteproc.h b/drivers/remoteproc/omap_remoteproc.h
index f6d2036d383d..828e13256c02 100644
--- a/drivers/remoteproc/omap_remoteproc.h
+++ b/drivers/remoteproc/omap_remoteproc.h
@@ -1,35 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Remote processor messaging
*
- * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011-2020 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Texas Instruments nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _OMAP_RPMSG_H
@@ -56,6 +31,22 @@
*
* @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the
* recovery mechanism (to some extent).
+ *
+ * @RP_MBOX_SUSPEND_AUTO: auto suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a
+ * suspend request
+ *
+ * @RP_MBOX_SUSPEND_CANCEL: a cancel suspend response from a remote processor
+ * on a suspend request
+ *
+ * Introduce new message definitions if any here.
+ *
+ * @RP_MBOX_END_MSG: Indicates end of known/defined messages from remote core
+ * This should be the last definition.
+ *
*/
enum omap_rp_mbox_messages {
RP_MBOX_READY = 0xFFFFFF00,
@@ -64,6 +55,11 @@ enum omap_rp_mbox_messages {
RP_MBOX_ECHO_REQUEST = 0xFFFFFF03,
RP_MBOX_ECHO_REPLY = 0xFFFFFF04,
RP_MBOX_ABORT_REQUEST = 0xFFFFFF05,
+ RP_MBOX_SUSPEND_AUTO = 0xFFFFFF10,
+ RP_MBOX_SUSPEND_SYSTEM = 0xFFFFFF11,
+ RP_MBOX_SUSPEND_ACK = 0xFFFFFF12,
+ RP_MBOX_SUSPEND_CANCEL = 0xFFFFFF13,
+ RP_MBOX_END_MSG = 0xFFFFFF14,
};
#endif /* _OMAP_RPMSG_H */
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index cb0f4a0be032..111a442c993c 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -15,6 +15,8 @@
#include <linux/remoteproc.h>
#include "qcom_q6v5.h"
+#define Q6V5_PANIC_DELAY_MS 200
+
/**
* qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start
* @q6v5: reference to qcom_q6v5 context to be reinitialized
@@ -163,6 +165,24 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop);
/**
+ * qcom_q6v5_panic() - panic handler to invoke a stop on the remote
+ * @q6v5: reference to qcom_q6v5 context
+ *
+ * Set the stop bit and sleep in order to allow the remote processor to flush
+ * its caches etc for post mortem debugging.
+ *
+ * Return: 200ms
+ */
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5)
+{
+ qcom_smem_state_update_bits(q6v5->state,
+ BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+
+ return Q6V5_PANIC_DELAY_MS;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_panic);
+
+/**
* qcom_q6v5_init() - initializer of the q6v5 common struct
* @q6v5: handle to be initialized
* @pdev: platform_device reference for acquiring resources
diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h
index 7ac92c1e0f49..c4ed887c1499 100644
--- a/drivers/remoteproc/qcom_q6v5.h
+++ b/drivers/remoteproc/qcom_q6v5.h
@@ -42,5 +42,6 @@ int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5);
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5);
int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5);
int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout);
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5);
#endif
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index e953886b2eb7..24a3db961d5e 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -270,7 +270,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -282,12 +282,20 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
return adsp->mem_region + offset;
}
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
static const struct rproc_ops adsp_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
+ .panic = adsp_panic,
};
static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids)
diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
new file mode 100644
index 000000000000..e1c10a128bfd
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Qualcomm IPA notification subdev support
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+
+static void
+ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event)
+{
+ struct qcom_rproc_ipa_notify *ipa_notify;
+ qcom_ipa_notify_t notify;
+
+ ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev);
+ notify = ipa_notify->notify;
+ if (notify)
+ notify(ipa_notify->data, event);
+}
+
+static int ipa_notify_prepare(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_STARTING);
+
+ return 0;
+}
+
+static int ipa_notify_start(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_RUNNING);
+
+ return 0;
+}
+
+static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed)
+
+{
+ ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING);
+}
+
+static void ipa_notify_unprepare(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_OFFLINE);
+}
+
+static void ipa_notify_removing(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_REMOVING);
+}
+
+/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */
+void qcom_add_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify)
+{
+ ipa_notify->notify = NULL;
+ ipa_notify->data = NULL;
+ ipa_notify->subdev.prepare = ipa_notify_prepare;
+ ipa_notify->subdev.start = ipa_notify_start;
+ ipa_notify->subdev.stop = ipa_notify_stop;
+ ipa_notify->subdev.unprepare = ipa_notify_unprepare;
+
+ rproc_add_subdev(rproc, &ipa_notify->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev);
+
+/* Remove the IPA notification subdevice */
+void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify)
+{
+ struct rproc_subdev *subdev = &ipa_notify->subdev;
+
+ ipa_notify_removing(subdev);
+
+ rproc_remove_subdev(rproc, subdev);
+ ipa_notify->notify = NULL; /* Make it obvious */
+}
+EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index a1cc9cbe038f..5475d4f808a8 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
+#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/iopoll.h>
@@ -201,6 +202,7 @@ struct q6v5 {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
+ struct qcom_rproc_ipa_notify ipa_notify_subdev;
struct qcom_sysmon *sysmon;
bool need_mem_protection;
bool has_alt_reset;
@@ -365,7 +367,7 @@ unroll_pd_votes:
}
return ret;
-};
+}
static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
@@ -379,23 +381,33 @@ static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
}
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
- bool remote_owner, phys_addr_t addr,
+ bool local, bool remote, phys_addr_t addr,
size_t size)
{
- struct qcom_scm_vmperm next;
+ struct qcom_scm_vmperm next[2];
+ int perms = 0;
if (!qproc->need_mem_protection)
return 0;
- if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
- return 0;
- if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
+
+ if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
+ remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
return 0;
- next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
- next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
+ if (local) {
+ next[perms].vmid = QCOM_SCM_VMID_HLOS;
+ next[perms].perm = QCOM_SCM_PERM_RWX;
+ perms++;
+ }
+
+ if (remote) {
+ next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
+ next[perms].perm = QCOM_SCM_PERM_RW;
+ perms++;
+ }
return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
- current_perm, &next, 1);
+ current_perm, next, perms);
}
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
@@ -801,7 +813,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
- ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
+ ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
+ phys, size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to metadata failed: %d\n", ret);
@@ -819,7 +832,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
/* Metadata authentication done, remove modem access */
- xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
+ xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
+ phys, size);
if (xferop_ret)
dev_warn(qproc->dev,
"mdt buffer not reclaimed system may become unstable\n");
@@ -906,7 +920,7 @@ static int q6v5_mba_load(struct q6v5 *qproc)
}
/* Assign MBA image access in DDR to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
qproc->mba_phys, qproc->mba_size);
if (ret) {
dev_err(qproc->dev,
@@ -943,8 +957,8 @@ halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
reclaim_mba:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret) {
dev_err(qproc->dev,
@@ -1001,11 +1015,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
- false, qproc->mpss_phys,
- qproc->mpss_size);
- WARN_ON(ret);
-
q6v5_reset_assert(qproc);
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
@@ -1019,7 +1028,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
/* In case of failure or coredump scenario where reclaiming MBA memory
* could not happen reclaim it here.
*/
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
qproc->mba_phys,
qproc->mba_size);
WARN_ON(ret);
@@ -1035,6 +1044,23 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
}
}
+static int q6v5_reload_mba(struct rproc *rproc)
+{
+ struct q6v5 *qproc = rproc->priv;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, rproc->firmware, qproc->dev);
+ if (ret < 0)
+ return ret;
+
+ q6v5_load(rproc, fw);
+ ret = q6v5_mba_load(qproc);
+ release_firmware(fw);
+
+ return ret;
+}
+
static int q6v5_mpss_load(struct q6v5 *qproc)
{
const struct elf32_phdr *phdrs;
@@ -1046,6 +1072,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
phys_addr_t boot_addr;
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
+ u32 code_length;
bool relocate = false;
char *fw_name;
size_t fw_name_len;
@@ -1095,6 +1122,24 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
}
+ /**
+ * In case of a modem subsystem restart on secure devices, the modem
+ * memory can be reclaimed only after MBA is loaded. For modem cold
+ * boot this will be a nop
+ */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
+ qproc->mpss_phys, qproc->mpss_size);
+
+ /* Share ownership between Linux and MSS, during segment loading */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mpss memory failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto release_firmware;
+ }
+
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
qproc->mpss_reloc = mpss_reloc;
/* Load firmware segments */
@@ -1143,10 +1188,25 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
phdr->p_memsz - phdr->p_filesz);
}
size += phdr->p_memsz;
+
+ code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ if (!code_length) {
+ boot_addr = relocate ? qproc->mpss_phys : min_addr;
+ writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ }
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
+ ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ if (ret < 0) {
+ dev_err(qproc->dev, "MPSS authentication failed: %d\n",
+ ret);
+ goto release_firmware;
+ }
}
/* Transfer ownership of modem ddr region to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys, qproc->mpss_size);
if (ret) {
dev_err(qproc->dev,
@@ -1155,11 +1215,6 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
- boot_addr = relocate ? qproc->mpss_phys : min_addr;
- writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
- writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
- writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
-
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "MPSS authentication timed out\n");
@@ -1184,8 +1239,16 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
/* Unlock mba before copying segments */
- if (!qproc->dump_mba_loaded)
- ret = q6v5_mba_load(qproc);
+ if (!qproc->dump_mba_loaded) {
+ ret = q6v5_reload_mba(rproc);
+ if (!ret) {
+ /* Reset ownership back to Linux to copy segments */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ true, false,
+ qproc->mpss_phys,
+ qproc->mpss_size);
+ }
+ }
if (!ptr || ret)
memset(dest, 0xff, segment->size);
@@ -1196,8 +1259,14 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
/* Reclaim mba after copying segments */
if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
- if (qproc->dump_mba_loaded)
+ if (qproc->dump_mba_loaded) {
+ /* Try to reset ownership back to Q6 */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, true,
+ qproc->mpss_phys,
+ qproc->mpss_size);
q6v5_mba_reclaim(qproc);
+ }
}
}
@@ -1223,8 +1292,8 @@ static int q6v5_start(struct rproc *rproc)
goto reclaim_mpss;
}
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret)
dev_err(qproc->dev,
@@ -1237,10 +1306,6 @@ static int q6v5_start(struct rproc *rproc)
return 0;
reclaim_mpss:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
- false, qproc->mpss_phys,
- qproc->mpss_size);
- WARN_ON(xfermemop_ret);
q6v5_mba_reclaim(qproc);
return ret;
@@ -1262,7 +1327,7 @@ static int q6v5_stop(struct rproc *rproc)
return 0;
}
-static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *q6v5_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct q6v5 *qproc = rproc->priv;
int offset;
@@ -1462,7 +1527,7 @@ unroll_attach:
dev_pm_domain_detach(devs[i], false);
return ret;
-};
+}
static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
@@ -1540,6 +1605,39 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
return 0;
}
+#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
+
+/* Register IPA notification function */
+int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
+ void *data)
+{
+ struct qcom_rproc_ipa_notify *ipa_notify;
+ struct q6v5 *qproc = rproc->priv;
+
+ if (!notify)
+ return -EINVAL;
+
+ ipa_notify = &qproc->ipa_notify_subdev;
+ if (ipa_notify->notify)
+ return -EBUSY;
+
+ ipa_notify->notify = notify;
+ ipa_notify->data = data;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
+
+/* Deregister IPA notification function */
+void qcom_deregister_ipa_notify(struct rproc *rproc)
+{
+ struct q6v5 *qproc = rproc->priv;
+
+ qproc->ipa_notify_subdev.notify = NULL;
+}
+EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
+#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
+
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
@@ -1577,7 +1675,7 @@ static int q6v5_probe(struct platform_device *pdev)
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1, &qproc->hexagon_mdt_image);
if (ret < 0 && ret != -EINVAL)
- return ret;
+ goto free_rproc;
platform_set_drvdata(pdev, qproc);
@@ -1664,18 +1762,26 @@ static int q6v5_probe(struct platform_device *pdev)
qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
+ qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
ret = PTR_ERR(qproc->sysmon);
- goto detach_proxy_pds;
+ goto remove_subdevs;
}
ret = rproc_add(rproc);
if (ret)
- goto detach_proxy_pds;
+ goto remove_sysmon_subdev;
return 0;
+remove_sysmon_subdev:
+ qcom_remove_sysmon_subdev(qproc->sysmon);
+remove_subdevs:
+ qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
+ qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
detach_proxy_pds:
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
detach_active_pds:
@@ -1689,18 +1795,20 @@ free_rproc:
static int q6v5_remove(struct platform_device *pdev)
{
struct q6v5 *qproc = platform_get_drvdata(pdev);
+ struct rproc *rproc = qproc->rproc;
- rproc_del(qproc->rproc);
+ rproc_del(rproc);
qcom_remove_sysmon_subdev(qproc->sysmon);
- qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
- qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
- qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
+ qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
+ qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
- q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+ q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
- rproc_free(qproc->rproc);
+ rproc_free(rproc);
return 0;
}
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index edf9d0e1a235..7a63efb85405 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -222,7 +222,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -234,12 +234,20 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
return adsp->mem_region + offset;
}
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
static const struct rproc_ops adsp_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
+ .panic = adsp_panic,
};
static int adsp_init_clock(struct qcom_adsp *adsp)
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index f93e1e4a1cc0..f1924b740a10 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -406,7 +406,7 @@ static int q6v5_wcss_stop(struct rproc *rproc)
return 0;
}
-static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct q6v5_wcss *wcss = rproc->priv;
int offset;
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index dc135754bb9c..0c7afd038f0d 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -287,7 +287,7 @@ static int wcnss_stop(struct rproc *rproc)
return ret;
}
-static void *wcnss_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
int offset;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 097f33e4f1f3..e12a54e67588 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -26,6 +27,7 @@
#include <linux/string.h>
#include <linux/debugfs.h>
#include <linux/devcoredump.h>
+#include <linux/rculist.h>
#include <linux/remoteproc.h>
#include <linux/iommu.h>
#include <linux/idr.h>
@@ -38,11 +40,13 @@
#include <linux/platform_device.h>
#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
+static struct notifier_block rproc_panic_nb;
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
void *, int offset, int avail);
@@ -185,7 +189,7 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* here the output of the DMA API for the carveouts, which should be more
* correct.
*/
-void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
@@ -224,7 +228,8 @@ EXPORT_SYMBOL(rproc_da_to_va);
/**
* rproc_find_carveout_by_name() - lookup the carveout region by a name
* @rproc: handle of a remote processor
- * @name,..: carveout name to find (standard printf format)
+ * @name: carveout name to find (format string)
+ * @...: optional parameters matching @name string
*
* Platform driver has the capability to register some pre-allacoted carveout
* (physically contiguous memory regions) before rproc firmware loading and
@@ -318,8 +323,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
struct device *dev = &rproc->dev;
struct rproc_vring *rvring = &rvdev->vring[i];
struct fw_rsc_vdev *rsc;
- int ret, size, notifyid;
+ int ret, notifyid;
struct rproc_mem_entry *mem;
+ size_t size;
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
@@ -445,6 +451,7 @@ static void rproc_rvdev_release(struct device *dev)
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
* @rsc: the vring resource descriptor
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* This resource entry requests the host to statically register a virtio
@@ -587,6 +594,7 @@ void rproc_vdev_release(struct kref *ref)
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
* @rsc: the trace resource descriptor
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* In case the remote processor dumps trace logs into memory,
@@ -652,6 +660,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
* @rsc: the devmem resource entry
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* Remote processors commonly need to access certain on-chip peripherals.
@@ -746,11 +755,12 @@ static int rproc_alloc_carveout(struct rproc *rproc,
va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
if (!va) {
dev_err(dev->parent,
- "failed to allocate dma memory: len 0x%x\n", mem->len);
+ "failed to allocate dma memory: len 0x%zx\n",
+ mem->len);
return -ENOMEM;
}
- dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
+ dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
va, &dma, mem->len);
if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
@@ -853,6 +863,7 @@ static int rproc_release_carveout(struct rproc *rproc,
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
* @rsc: the resource entry
+ * @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
* This function will handle firmware requests for allocation of physically
@@ -957,7 +968,7 @@ EXPORT_SYMBOL(rproc_add_carveout);
*/
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
- void *va, dma_addr_t dma, int len, u32 da,
+ void *va, dma_addr_t dma, size_t len, u32 da,
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
int (*release)(struct rproc *, struct rproc_mem_entry *),
const char *name, ...)
@@ -999,7 +1010,7 @@ EXPORT_SYMBOL(rproc_mem_entry_init);
* provided by client.
*/
struct rproc_mem_entry *
-rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...)
{
struct rproc_mem_entry *mem;
@@ -1022,7 +1033,7 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
}
EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
-/**
+/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
*/
@@ -1270,7 +1281,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
- dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
+ dev_err(dev, "failed to unmap %zx/%zu\n", entry->len,
unmapped);
}
@@ -1564,20 +1575,21 @@ EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
static void rproc_coredump(struct rproc *rproc)
{
struct rproc_dump_segment *segment;
- struct elf32_phdr *phdr;
- struct elf32_hdr *ehdr;
+ void *phdr;
+ void *ehdr;
size_t data_size;
size_t offset;
void *data;
void *ptr;
+ u8 class = rproc->elf_class;
int phnum = 0;
if (list_empty(&rproc->dump_segments))
return;
- data_size = sizeof(*ehdr);
+ data_size = elf_size_of_hdr(class);
list_for_each_entry(segment, &rproc->dump_segments, node) {
- data_size += sizeof(*phdr) + segment->size;
+ data_size += elf_size_of_phdr(class) + segment->size;
phnum++;
}
@@ -1588,33 +1600,33 @@ static void rproc_coredump(struct rproc *rproc)
ehdr = data;
- memset(ehdr, 0, sizeof(*ehdr));
- memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
- ehdr->e_ident[EI_CLASS] = ELFCLASS32;
- ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
- ehdr->e_ident[EI_VERSION] = EV_CURRENT;
- ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
- ehdr->e_type = ET_CORE;
- ehdr->e_machine = EM_NONE;
- ehdr->e_version = EV_CURRENT;
- ehdr->e_entry = rproc->bootaddr;
- ehdr->e_phoff = sizeof(*ehdr);
- ehdr->e_ehsize = sizeof(*ehdr);
- ehdr->e_phentsize = sizeof(*phdr);
- ehdr->e_phnum = phnum;
-
- phdr = data + ehdr->e_phoff;
- offset = ehdr->e_phoff + sizeof(*phdr) * ehdr->e_phnum;
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, EM_NONE);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
+ elf_hdr_set_e_phnum(class, ehdr, phnum);
+
+ phdr = data + elf_hdr_get_e_phoff(class, ehdr);
+ offset = elf_hdr_get_e_phoff(class, ehdr);
+ offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
+
list_for_each_entry(segment, &rproc->dump_segments, node) {
- memset(phdr, 0, sizeof(*phdr));
- phdr->p_type = PT_LOAD;
- phdr->p_offset = offset;
- phdr->p_vaddr = segment->da;
- phdr->p_paddr = segment->da;
- phdr->p_filesz = segment->size;
- phdr->p_memsz = segment->size;
- phdr->p_flags = PF_R | PF_W | PF_X;
- phdr->p_align = 0;
+ memset(phdr, 0, elf_size_of_phdr(class));
+ elf_phdr_set_p_type(class, phdr, PT_LOAD);
+ elf_phdr_set_p_offset(class, phdr, offset);
+ elf_phdr_set_p_vaddr(class, phdr, segment->da);
+ elf_phdr_set_p_paddr(class, phdr, segment->da);
+ elf_phdr_set_p_filesz(class, phdr, segment->size);
+ elf_phdr_set_p_memsz(class, phdr, segment->size);
+ elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
+ elf_phdr_set_p_align(class, phdr, 0);
if (segment->dump) {
segment->dump(rproc, segment, data + offset);
@@ -1630,8 +1642,8 @@ static void rproc_coredump(struct rproc *rproc)
}
}
- offset += phdr->p_filesz;
- phdr++;
+ offset += elf_phdr_get_p_filesz(class, phdr);
+ phdr += elf_size_of_phdr(class);
}
dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
@@ -1653,12 +1665,16 @@ int rproc_trigger_recovery(struct rproc *rproc)
struct device *dev = &rproc->dev;
int ret;
- dev_err(dev, "recovering %s\n", rproc->name);
-
ret = mutex_lock_interruptible(&rproc->lock);
if (ret)
return ret;
+ /* State could have changed before we got the mutex */
+ if (rproc->state != RPROC_CRASHED)
+ goto unlock_mutex;
+
+ dev_err(dev, "recovering %s\n", rproc->name);
+
ret = rproc_stop(rproc, true);
if (ret)
goto unlock_mutex;
@@ -1685,6 +1701,7 @@ unlock_mutex:
/**
* rproc_crash_handler_work() - handle a crash
+ * @work: work treating the crash
*
* This function needs to handle everything related to a crash, like cpu
* registers and stack dump, information to help to debug the fatal error, etc.
@@ -1854,8 +1871,8 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
if (!np)
return NULL;
- mutex_lock(&rproc_list_mutex);
- list_for_each_entry(r, &rproc_list, node) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(r, &rproc_list, node) {
if (r->dev.parent && r->dev.parent->of_node == np) {
/* prevent underlying implementation from being removed */
if (!try_module_get(r->dev.parent->driver->owner)) {
@@ -1868,7 +1885,7 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
break;
}
}
- mutex_unlock(&rproc_list_mutex);
+ rcu_read_unlock();
of_node_put(np);
@@ -1925,7 +1942,7 @@ int rproc_add(struct rproc *rproc)
/* expose to rproc_get_by_phandle users */
mutex_lock(&rproc_list_mutex);
- list_add(&rproc->node, &rproc_list);
+ list_add_rcu(&rproc->node, &rproc_list);
mutex_unlock(&rproc_list_mutex);
return 0;
@@ -2029,6 +2046,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->name = name;
rproc->priv = &rproc[1];
rproc->auto_boot = true;
+ rproc->elf_class = ELFCLASS32;
device_initialize(&rproc->dev);
rproc->dev.parent = dev;
@@ -2053,7 +2071,8 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->ops->load = rproc_elf_load_segments;
rproc->ops->parse_fw = rproc_elf_load_rsc_table;
rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
- rproc->ops->sanity_check = rproc_elf_sanity_check;
+ if (!rproc->ops->sanity_check)
+ rproc->ops->sanity_check = rproc_elf32_sanity_check;
rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
}
@@ -2140,9 +2159,12 @@ int rproc_del(struct rproc *rproc)
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
- list_del(&rproc->node);
+ list_del_rcu(&rproc->node);
mutex_unlock(&rproc_list_mutex);
+ /* Ensure that no readers of rproc_list are still active */
+ synchronize_rcu();
+
device_del(&rproc->dev);
return 0;
@@ -2216,10 +2238,50 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
}
EXPORT_SYMBOL(rproc_report_crash);
+static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ unsigned int longest = 0;
+ struct rproc *rproc;
+ unsigned int d;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rproc, &rproc_list, node) {
+ if (!rproc->ops->panic || rproc->state != RPROC_RUNNING)
+ continue;
+
+ d = rproc->ops->panic(rproc);
+ longest = max(longest, d);
+ }
+ rcu_read_unlock();
+
+ /*
+ * Delay for the longest requested duration before returning. This can
+ * be used by the remoteproc drivers to give the remote processor time
+ * to perform any requested operations (such as flush caches), when
+ * it's not possible to signal the Linux side due to the panic.
+ */
+ mdelay(longest);
+
+ return NOTIFY_DONE;
+}
+
+static void __init rproc_init_panic(void)
+{
+ rproc_panic_nb.notifier_call = rproc_panic_handler;
+ atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb);
+}
+
+static void __exit rproc_exit_panic(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb);
+}
+
static int __init remoteproc_init(void)
{
rproc_init_sysfs();
rproc_init_debugfs();
+ rproc_init_panic();
return 0;
}
@@ -2229,6 +2291,7 @@ static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
+ rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
}
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index dd93cf04e17f..d734cadb16e3 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -138,16 +138,16 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
buf[count - 1] = '\0';
if (!strncmp(buf, "enabled", count)) {
+ /* change the flag and begin the recovery process if needed */
rproc->recovery_disabled = false;
- /* if rproc has crashed, trigger recovery */
- if (rproc->state == RPROC_CRASHED)
- rproc_trigger_recovery(rproc);
+ rproc_trigger_recovery(rproc);
} else if (!strncmp(buf, "disabled", count)) {
rproc->recovery_disabled = true;
} else if (!strncmp(buf, "recover", count)) {
- /* if rproc has crashed, trigger recovery */
- if (rproc->state == RPROC_CRASHED)
- rproc_trigger_recovery(rproc);
+ /* begin the recovery process without changing the flag */
+ rproc_trigger_recovery(rproc);
+ } else {
+ return -EINVAL;
}
return count;
@@ -293,7 +293,7 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p)
seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
- seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len);
+ seq_printf(seq, "\tLength: 0x%zx Bytes\n\n", carveout->len);
}
return 0;
@@ -349,7 +349,7 @@ void rproc_create_debug_dir(struct rproc *rproc)
debugfs_create_file("name", 0400, rproc->dbg_dir,
rproc, &rproc_name_ops);
- debugfs_create_file("recovery", 0400, rproc->dbg_dir,
+ debugfs_create_file("recovery", 0600, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
debugfs_create_file("crash", 0200, rproc->dbg_dir,
rproc, &rproc_crash_ops);
diff --git a/drivers/remoteproc/remoteproc_elf_helpers.h b/drivers/remoteproc/remoteproc_elf_helpers.h
new file mode 100644
index 000000000000..4b6be7b6bf4d
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_elf_helpers.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Remote processor elf helpers defines
+ *
+ * Copyright (C) 2020 Kalray, Inc.
+ */
+
+#ifndef REMOTEPROC_ELF_LOADER_H
+#define REMOTEPROC_ELF_LOADER_H
+
+#include <linux/elf.h>
+#include <linux/types.h>
+
+/**
+ * fw_elf_get_class - Get elf class
+ * @fw: the ELF firmware image
+ *
+ * Note that we use and elf32_hdr to access the class since the start of the
+ * struct is the same for both elf class
+ *
+ * Return: elf class of the firmware
+ */
+static inline u8 fw_elf_get_class(const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
+
+ return ehdr->e_ident[EI_CLASS];
+}
+
+static inline void elf_hdr_init_ident(struct elf32_hdr *hdr, u8 class)
+{
+ memcpy(hdr->e_ident, ELFMAG, SELFMAG);
+ hdr->e_ident[EI_CLASS] = class;
+ hdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ hdr->e_ident[EI_VERSION] = EV_CURRENT;
+ hdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+}
+
+/* Generate getter and setter for a specific elf struct/field */
+#define ELF_GEN_FIELD_GET_SET(__s, __field, __type) \
+static inline __type elf_##__s##_get_##__field(u8 class, const void *arg) \
+{ \
+ if (class == ELFCLASS32) \
+ return (__type) ((const struct elf32_##__s *) arg)->__field; \
+ else \
+ return (__type) ((const struct elf64_##__s *) arg)->__field; \
+} \
+static inline void elf_##__s##_set_##__field(u8 class, void *arg, \
+ __type value) \
+{ \
+ if (class == ELFCLASS32) \
+ ((struct elf32_##__s *) arg)->__field = (__type) value; \
+ else \
+ ((struct elf64_##__s *) arg)->__field = (__type) value; \
+}
+
+ELF_GEN_FIELD_GET_SET(hdr, e_entry, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_phnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_shnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_phoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shstrndx, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_machine, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_type, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_version, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_ehsize, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_phentsize, u16)
+
+ELF_GEN_FIELD_GET_SET(phdr, p_paddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_vaddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_filesz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_memsz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_type, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_offset, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_flags, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_align, u64)
+
+ELF_GEN_FIELD_GET_SET(shdr, sh_size, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_offset, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_name, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_addr, u64)
+
+#define ELF_STRUCT_SIZE(__s) \
+static inline unsigned long elf_size_of_##__s(u8 class) \
+{ \
+ if (class == ELFCLASS32)\
+ return sizeof(struct elf32_##__s); \
+ else \
+ return sizeof(struct elf64_##__s); \
+}
+
+ELF_STRUCT_SIZE(shdr)
+ELF_STRUCT_SIZE(phdr)
+ELF_STRUCT_SIZE(hdr)
+
+#endif /* REMOTEPROC_ELF_LOADER_H */
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index 606aae166eba..16e2c496fd45 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -23,20 +23,29 @@
#include <linux/elf.h>
#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
/**
- * rproc_elf_sanity_check() - Sanity Check ELF firmware image
+ * rproc_elf_sanity_check() - Sanity Check for ELF32/ELF64 firmware image
* @rproc: the remote processor handle
* @fw: the ELF firmware image
*
- * Make sure this fw image is sane.
+ * Make sure this fw image is sane (ie a correct ELF32/ELF64 file).
*/
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
const char *name = rproc->firmware;
struct device *dev = &rproc->dev;
+ /*
+ * Elf files are beginning with the same structure. Thus, to simplify
+ * header parsing, we can use the elf32_hdr one for both elf64 and
+ * elf32.
+ */
struct elf32_hdr *ehdr;
+ u32 elf_shdr_get_size;
+ u64 phoff, shoff;
char class;
+ u16 phnum;
if (!fw) {
dev_err(dev, "failed to load %s\n", name);
@@ -50,13 +59,22 @@ int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
ehdr = (struct elf32_hdr *)fw->data;
- /* We only support ELF32 at this point */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
class = ehdr->e_ident[EI_CLASS];
- if (class != ELFCLASS32) {
+ if (class != ELFCLASS32 && class != ELFCLASS64) {
dev_err(dev, "Unsupported class: %d\n", class);
return -EINVAL;
}
+ if (class == ELFCLASS64 && fw->size < sizeof(struct elf64_hdr)) {
+ dev_err(dev, "elf64 header is too small\n");
+ return -EINVAL;
+ }
+
/* We assume the firmware has the same endianness as the host */
# ifdef __LITTLE_ENDIAN
if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
@@ -67,31 +85,55 @@ int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
return -EINVAL;
}
- if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
- dev_err(dev, "Image is too small\n");
- return -EINVAL;
- }
+ phoff = elf_hdr_get_e_phoff(class, fw->data);
+ shoff = elf_hdr_get_e_shoff(class, fw->data);
+ phnum = elf_hdr_get_e_phnum(class, fw->data);
+ elf_shdr_get_size = elf_size_of_shdr(class);
- if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
- dev_err(dev, "Image is corrupted (bad magic)\n");
+ if (fw->size < shoff + elf_shdr_get_size) {
+ dev_err(dev, "Image is too small\n");
return -EINVAL;
}
- if (ehdr->e_phnum == 0) {
+ if (phnum == 0) {
dev_err(dev, "No loadable segments\n");
return -EINVAL;
}
- if (ehdr->e_phoff > fw->size) {
+ if (phoff > fw->size) {
dev_err(dev, "Firmware size is too small\n");
return -EINVAL;
}
+ dev_dbg(dev, "Firmware is an elf%d file\n",
+ class == ELFCLASS32 ? 32 : 64);
+
return 0;
}
EXPORT_SYMBOL(rproc_elf_sanity_check);
/**
+ * rproc_elf_sanity_check() - Sanity Check ELF32 firmware image
+ * @rproc: the remote processor handle
+ * @fw: the ELF32 firmware image
+ *
+ * Make sure this fw image is sane.
+ */
+int rproc_elf32_sanity_check(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret = rproc_elf_sanity_check(rproc, fw);
+
+ if (ret)
+ return ret;
+
+ if (fw_elf_get_class(fw) == ELFCLASS32)
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(rproc_elf32_sanity_check);
+
+/**
* rproc_elf_get_boot_addr() - Get rproc's boot address.
* @rproc: the remote processor handle
* @fw: the ELF firmware image
@@ -102,11 +144,9 @@ EXPORT_SYMBOL(rproc_elf_sanity_check);
* Note that the boot address is not a configurable property of all remote
* processors. Some will always boot at a specific hard-coded address.
*/
-u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
- struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
-
- return ehdr->e_entry;
+ return elf_hdr_get_e_entry(fw_elf_get_class(fw), fw->data);
}
EXPORT_SYMBOL(rproc_elf_get_boot_addr);
@@ -137,53 +177,65 @@ EXPORT_SYMBOL(rproc_elf_get_boot_addr);
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
- struct elf32_hdr *ehdr;
- struct elf32_phdr *phdr;
+ const void *ehdr, *phdr;
int i, ret = 0;
+ u16 phnum;
const u8 *elf_data = fw->data;
+ u8 class = fw_elf_get_class(fw);
+ u32 elf_phdr_get_size = elf_size_of_phdr(class);
- ehdr = (struct elf32_hdr *)elf_data;
- phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+ ehdr = elf_data;
+ phnum = elf_hdr_get_e_phnum(class, ehdr);
+ phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
/* go through the available ELF segments */
- for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- u32 da = phdr->p_paddr;
- u32 memsz = phdr->p_memsz;
- u32 filesz = phdr->p_filesz;
- u32 offset = phdr->p_offset;
+ for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
+ u64 da = elf_phdr_get_p_paddr(class, phdr);
+ u64 memsz = elf_phdr_get_p_memsz(class, phdr);
+ u64 filesz = elf_phdr_get_p_filesz(class, phdr);
+ u64 offset = elf_phdr_get_p_offset(class, phdr);
+ u32 type = elf_phdr_get_p_type(class, phdr);
void *ptr;
- if (phdr->p_type != PT_LOAD)
+ if (type != PT_LOAD)
continue;
- dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
+ dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
+ type, da, memsz, filesz);
if (filesz > memsz) {
- dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
- dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
+ if (!rproc_u64_fit_in_size_t(memsz)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ memsz);
+ ret = -EOVERFLOW;
+ break;
+ }
+
/* grab the kernel address for this device address */
ptr = rproc_da_to_va(rproc, da, memsz);
if (!ptr) {
- dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
+ memsz);
ret = -EINVAL;
break;
}
/* put the segment where the remote processor expects it */
- if (phdr->p_filesz)
- memcpy(ptr, elf_data + phdr->p_offset, filesz);
+ if (filesz)
+ memcpy(ptr, elf_data + offset, filesz);
/*
* Zero out remaining memory for this segment.
@@ -196,28 +248,42 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
memset(ptr + filesz, 0, memsz - filesz);
}
+ if (ret == 0)
+ rproc->elf_class = class;
+
return ret;
}
EXPORT_SYMBOL(rproc_elf_load_segments);
-static struct elf32_shdr *
-find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size)
+static const void *
+find_table(struct device *dev, const struct firmware *fw)
{
- struct elf32_shdr *shdr;
+ const void *shdr, *name_table_shdr;
int i;
const char *name_table;
struct resource_table *table = NULL;
- const u8 *elf_data = (void *)ehdr;
+ const u8 *elf_data = (void *)fw->data;
+ u8 class = fw_elf_get_class(fw);
+ size_t fw_size = fw->size;
+ const void *ehdr = elf_data;
+ u16 shnum = elf_hdr_get_e_shnum(class, ehdr);
+ u32 elf_shdr_get_size = elf_size_of_shdr(class);
+ u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
/* look for the resource table and handle it */
- shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
- name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
-
- for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
- u32 size = shdr->sh_size;
- u32 offset = shdr->sh_offset;
-
- if (strcmp(name_table + shdr->sh_name, ".resource_table"))
+ /* First, get the section header according to the elf class */
+ shdr = elf_data + elf_hdr_get_e_shoff(class, ehdr);
+ /* Compute name table section header entry in shdr array */
+ name_table_shdr = shdr + (shstrndx * elf_shdr_get_size);
+ /* Finally, compute the name table section address in elf */
+ name_table = elf_data + elf_shdr_get_sh_offset(class, name_table_shdr);
+
+ for (i = 0; i < shnum; i++, shdr += elf_shdr_get_size) {
+ u64 size = elf_shdr_get_sh_size(class, shdr);
+ u64 offset = elf_shdr_get_sh_offset(class, shdr);
+ u32 name = elf_shdr_get_sh_name(class, shdr);
+
+ if (strcmp(name_table + name, ".resource_table"))
continue;
table = (struct resource_table *)(elf_data + offset);
@@ -270,21 +336,21 @@ find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size)
*/
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw)
{
- struct elf32_hdr *ehdr;
- struct elf32_shdr *shdr;
+ const void *shdr;
struct device *dev = &rproc->dev;
struct resource_table *table = NULL;
const u8 *elf_data = fw->data;
size_t tablesz;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
- ehdr = (struct elf32_hdr *)elf_data;
-
- shdr = find_table(dev, ehdr, fw->size);
+ shdr = find_table(dev, fw);
if (!shdr)
return -EINVAL;
- table = (struct resource_table *)(elf_data + shdr->sh_offset);
- tablesz = shdr->sh_size;
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ table = (struct resource_table *)(elf_data + sh_offset);
+ tablesz = elf_shdr_get_sh_size(class, shdr);
/*
* Create a copy of the resource table. When a virtio device starts
@@ -317,13 +383,24 @@ EXPORT_SYMBOL(rproc_elf_load_rsc_table);
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw)
{
- struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
- struct elf32_shdr *shdr;
+ const void *shdr;
+ u64 sh_addr, sh_size;
+ u8 class = fw_elf_get_class(fw);
+ struct device *dev = &rproc->dev;
- shdr = find_table(&rproc->dev, ehdr, fw->size);
+ shdr = find_table(&rproc->dev, fw);
if (!shdr)
return NULL;
- return rproc_da_to_va(rproc, shdr->sh_addr, shdr->sh_size);
+ sh_addr = elf_shdr_get_sh_addr(class, shdr);
+ sh_size = elf_shdr_get_sh_size(class, shdr);
+
+ if (!rproc_u64_fit_in_size_t(sh_size)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ sh_size);
+ return NULL;
+ }
+
+ return rproc_da_to_va(rproc, sh_addr, sh_size);
}
EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 493ef9262411..b389dc79da81 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -50,12 +50,13 @@ void rproc_exit_sysfs(void);
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
-void *rproc_da_to_va(struct rproc *rproc, u64 da, int len);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
+int rproc_elf32_sanity_check(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
-u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw);
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
@@ -73,7 +74,7 @@ int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
}
static inline
-u32 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+u64 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
if (rproc->ops->get_boot_addr)
return rproc->ops->get_boot_addr(rproc, fw);
@@ -119,4 +120,13 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
return NULL;
}
+static inline
+bool rproc_u64_fit_in_size_t(u64 val)
+{
+ if (sizeof(size_t) == sizeof(u64))
+ return true;
+
+ return (val <= (size_t) -1);
+}
+
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 7f8536b73295..52b871327b55 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -4,6 +4,7 @@
*/
#include <linux/remoteproc.h>
+#include <linux/slab.h>
#include "remoteproc_internal.h"
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 8c07cb2ca8ba..e61d738d9b47 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -320,6 +320,7 @@ static void rproc_virtio_dev_release(struct device *dev)
/**
* rproc_add_virtio_dev() - register an rproc-induced virtio device
* @rvdev: the remote vdev
+ * @id: the device type identification (used to match it with a driver).
*
* This function registers a virtio device. This vdev's partent is
* the rproc device.
@@ -334,6 +335,13 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
struct rproc_mem_entry *mem;
int ret;
+ if (rproc->ops->kick == NULL) {
+ ret = -EINVAL;
+ dev_err(dev, ".kick method not defined for %s",
+ rproc->name);
+ goto out;
+ }
+
/* Try to find dedicated vdev buffer carveout */
mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
if (mem) {
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index ee13d23b43a9..a6cbfa452764 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -190,7 +190,7 @@ static int st_rproc_start(struct rproc *rproc)
}
}
- dev_info(&rproc->dev, "Started from 0x%x\n", rproc->bootaddr);
+ dev_info(&rproc->dev, "Started from 0x%llx\n", rproc->bootaddr);
return 0;
@@ -233,7 +233,7 @@ static const struct rproc_ops st_rproc_ops = {
.parse_fw = st_rproc_parse_fw,
.load = rproc_elf_load_segments,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
- .sanity_check = rproc_elf_sanity_check,
+ .sanity_check = rproc_elf32_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 04492fead3c8..3cca8b65a8db 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc)
return 0;
}
-static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
void *va = NULL;
@@ -191,7 +191,7 @@ static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%pK\n",
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n",
da, len, va);
return va;
@@ -203,7 +203,7 @@ static const struct rproc_ops slim_rproc_ops = {
.da_to_va = slim_rproc_da_to_va,
.get_boot_addr = rproc_elf_get_boot_addr,
.load = rproc_elf_load_segments,
- .sanity_check = rproc_elf_sanity_check,
+ .sanity_check = rproc_elf32_sanity_check,
};
/**
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index a18f88044111..0bdd56f02f18 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -19,6 +19,7 @@
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <linux/workqueue.h>
#include "remoteproc_internal.h"
@@ -505,7 +506,7 @@ static struct rproc_ops st_rproc_ops = {
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
- .sanity_check = rproc_elf_sanity_check,
+ .sanity_check = rproc_elf32_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
@@ -602,7 +603,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
if (err)
- dev_warn(dev, "failed to get pdds\n");
+ dev_info(dev, "failed to get pdds\n");
rproc->auto_boot = of_property_read_bool(np, "st,auto-boot");
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 3984e585c847..b9349d684258 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -80,14 +80,14 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
return 0;
}
-static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
void *va = NULL;
int i;
u32 offset;
- if (len <= 0)
+ if (len == 0)
return NULL;
for (i = 0; i < WKUPM3_MEM_MAX; i++) {
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
index 232aa4e40133..83f2b8804ee9 100644
--- a/drivers/rpmsg/mtk_rpmsg.c
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -8,6 +8,7 @@
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/rpmsg/mtk_rpmsg.h>
+#include <linux/slab.h>
#include <linux/workqueue.h>
#include "rpmsg_internal.h"
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8e503881d9d6..ec873f09c763 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -41,9 +41,6 @@ config RTC_HCTOSYS_DEVICE
device should record time in UTC, since the kernel won't do
timezone correction.
- The driver for this RTC device must be loaded before late_initcall
- functions run, so it must usually be statically linked.
-
This clock should be battery-backed, so that it reads the correct
time when the system boots from a power-off state. Otherwise, your
system will need an external clock source (like an NTP server).
@@ -241,6 +238,7 @@ config RTC_DRV_AS3722
config RTC_DRV_DS1307
tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057"
select REGMAP_I2C
+ select WATCHDOG_CORE if WATCHDOG
help
If you say yes here you get support for various compatible RTC
chips (often with battery backup) connected with I2C. This driver
@@ -592,6 +590,16 @@ config RTC_DRV_RC5T583
This driver can also be built as a module. If so, the module
will be called rtc-rc5t583.
+config RTC_DRV_RC5T619
+ tristate "RICOH RC5T619 RTC driver"
+ depends on MFD_RN5T618
+ help
+ If you say yes here you get support for the RTC on the
+ RICOH RC5T619 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rc5t619.
+
config RTC_DRV_S35390A
tristate "Seiko Instruments S-35390A"
select BITREVERSE
@@ -1335,7 +1343,7 @@ config RTC_DRV_IMXDI
config RTC_DRV_FSL_FTM_ALARM
tristate "Freescale FlexTimer alarm timer"
- depends on ARCH_LAYERSCAPE || SOC_LS1021A
+ depends on ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
help
For the FlexTimer in LS1012A, LS1021A, LS1028A, LS1043A, LS1046A,
LS1088A, LS208xA, we can use FTM as the wakeup source.
@@ -1762,6 +1770,7 @@ config RTC_DRV_MXC_V2
config RTC_DRV_SNVS
tristate "Freescale SNVS RTC support"
select REGMAP_MMIO
+ depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
depends on OF
help
@@ -1807,6 +1816,16 @@ config RTC_DRV_MOXART
This driver can also be built as a module. If so, the module
will be called rtc-moxart
+config RTC_DRV_MT2712
+ tristate "MediaTek MT2712 SoC based RTC"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for the real time clock built in the MediaTek
+ SoCs for MT2712.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-mt2712.
+
config RTC_DRV_MT6397
tristate "MediaTek PMIC based RTC"
depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 4ac8f19fb631..0721752c6ed4 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -6,16 +6,11 @@
ccflags-$(CONFIG_RTC_DEBUG) := -DDEBUG
obj-$(CONFIG_RTC_LIB) += lib.o
-obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o
rtc-core-y := class.o interface.o
-ifdef CONFIG_RTC_DRV_EFI
-rtc-core-y += rtc-efi-platform.o
-endif
-
rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o
rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o
@@ -110,6 +105,7 @@ obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o
obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
+obj-$(CONFIG_RTC_DRV_MT2712) += rtc-mt2712.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
@@ -137,6 +133,7 @@ obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-rc5t583.o
+obj-$(CONFIG_RTC_DRV_RC5T619) += rtc-rc5t619.o
obj-$(CONFIG_RTC_DRV_RK808) += rtc-rk808.o
obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 9458e6d6686a..7c88d190c51f 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -34,6 +34,50 @@ static void rtc_device_release(struct device *dev)
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
/* Result of the last RTC to system clock attempt. */
int rtc_hctosys_ret = -ENODEV;
+
+/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
+ * whether it stores the most close value or the value with partial
+ * seconds truncated. However, it is important that we use it to store
+ * the truncated value. This is because otherwise it is necessary,
+ * in an rtc sync function, to read both xtime.tv_sec and
+ * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
+ * of >32bits is not possible. So storing the most close value would
+ * slow down the sync API. So here we have the truncated value and
+ * the best guess is to add 0.5s.
+ */
+
+static void rtc_hctosys(struct rtc_device *rtc)
+{
+ int err;
+ struct rtc_time tm;
+ struct timespec64 tv64 = {
+ .tv_nsec = NSEC_PER_SEC >> 1,
+ };
+
+ err = rtc_read_time(rtc, &tm);
+ if (err) {
+ dev_err(rtc->dev.parent,
+ "hctosys: unable to read the hardware clock\n");
+ goto err_read;
+ }
+
+ tv64.tv_sec = rtc_tm_to_time64(&tm);
+
+#if BITS_PER_LONG == 32
+ if (tv64.tv_sec > INT_MAX) {
+ err = -ERANGE;
+ goto err_read;
+ }
+#endif
+
+ err = do_settimeofday64(&tv64);
+
+ dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n",
+ &tm, (long long)tv64.tv_sec);
+
+err_read:
+ rtc_hctosys_ret = err;
+}
#endif
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
@@ -375,6 +419,11 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
dev_info(rtc->dev.parent, "registered as %s\n",
dev_name(&rtc->dev));
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+ if (!strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE))
+ rtc_hctosys(rtc);
+#endif
+
return 0;
}
EXPORT_SYMBOL_GPL(__rtc_register_device);
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
deleted file mode 100644
index a74d0d890600..000000000000
--- a/drivers/rtc/hctosys.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * RTC subsystem, initialize system time on startup
- *
- * Copyright (C) 2005 Tower Technologies
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/rtc.h>
-
-/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
- * whether it stores the most close value or the value with partial
- * seconds truncated. However, it is important that we use it to store
- * the truncated value. This is because otherwise it is necessary,
- * in an rtc sync function, to read both xtime.tv_sec and
- * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
- * of >32bits is not possible. So storing the most close value would
- * slow down the sync API. So here we have the truncated value and
- * the best guess is to add 0.5s.
- */
-
-static int __init rtc_hctosys(void)
-{
- int err = -ENODEV;
- struct rtc_time tm;
- struct timespec64 tv64 = {
- .tv_nsec = NSEC_PER_SEC >> 1,
- };
- struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
-
- if (!rtc) {
- pr_info("unable to open rtc device (%s)\n",
- CONFIG_RTC_HCTOSYS_DEVICE);
- goto err_open;
- }
-
- err = rtc_read_time(rtc, &tm);
- if (err) {
- dev_err(rtc->dev.parent,
- "hctosys: unable to read the hardware clock\n");
- goto err_read;
- }
-
- tv64.tv_sec = rtc_tm_to_time64(&tm);
-
-#if BITS_PER_LONG == 32
- if (tv64.tv_sec > INT_MAX) {
- err = -ERANGE;
- goto err_read;
- }
-#endif
-
- err = do_settimeofday64(&tv64);
-
- dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n",
- &tm, (long long)tv64.tv_sec);
-
-err_read:
- rtc_class_close(rtc);
-
-err_open:
- rtc_hctosys_ret = err;
-
- return err;
-}
-
-late_initcall(rtc_hctosys);
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 4743b16a8d84..cc9b14ef90f1 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -28,7 +28,6 @@ struct pm860x_rtc_info {
int irq;
int vrtc;
- int (*sync)(unsigned int ticks);
};
#define REG_VRTC_MEAS1 0x7D
@@ -76,33 +75,6 @@ static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-/*
- * Calculate the next alarm time given the requested alarm time mask
- * and the current time.
- */
-static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
- struct rtc_time *alrm)
-{
- unsigned long next_time;
- unsigned long now_time;
-
- next->tm_year = now->tm_year;
- next->tm_mon = now->tm_mon;
- next->tm_mday = now->tm_mday;
- next->tm_hour = alrm->tm_hour;
- next->tm_min = alrm->tm_min;
- next->tm_sec = alrm->tm_sec;
-
- rtc_tm_to_time(now, &now_time);
- rtc_tm_to_time(next, &next_time);
-
- if (next_time < now_time) {
- /* Advance one day */
- next_time += 60 * 60 * 24;
- rtc_time_to_tm(next_time, next);
- }
-}
-
static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
@@ -123,7 +95,7 @@ static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
- rtc_time_to_tm(ticks, tm);
+ rtc_time64_to_tm(ticks, tm);
return 0;
}
@@ -140,7 +112,7 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
1900 + tm->tm_year);
return -EINVAL;
}
- rtc_tm_to_time(tm, &ticks);
+ ticks = rtc_tm_to_time64(tm);
/* load 32-bit read-only counter */
pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
@@ -155,8 +127,6 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF);
pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF);
- if (info->sync)
- info->sync(ticks);
return 0;
}
@@ -180,7 +150,7 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
- rtc_time_to_tm(ticks, &alrm->time);
+ rtc_time64_to_tm(ticks, &alrm->time);
ret = pm860x_reg_read(info->i2c, PM8607_RTC1);
alrm->enabled = (ret & ALARM_EN) ? 1 : 0;
alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0;
@@ -190,7 +160,6 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
- struct rtc_time now_tm, alarm_tm;
unsigned long ticks, base, data;
unsigned char buf[8];
int mask;
@@ -203,18 +172,7 @@ static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
(buf[5] << 8) | buf[7];
- /* load 32-bit read-only counter */
- pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
- data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
- (buf[1] << 8) | buf[0];
- ticks = base + data;
- dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
- base, data, ticks);
-
- rtc_time_to_tm(ticks, &now_tm);
- rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
- /* get new ticks for alarm in 24 hours */
- rtc_tm_to_time(&alarm_tm, &ticks);
+ ticks = rtc_tm_to_time64(&alrm->time);
data = ticks - base;
buf[0] = data & 0xff;
@@ -309,20 +267,15 @@ static int pm860x_rtc_dt_init(struct platform_device *pdev,
return 0;
}
#else
-#define pm860x_rtc_dt_init(x, y) (-1)
+#define pm860x_rtc_dt_init(x, y) do { } while (0)
#endif
static int pm860x_rtc_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_rtc_pdata *pdata = NULL;
struct pm860x_rtc_info *info;
- struct rtc_time tm;
- unsigned long ticks = 0;
int ret;
- pdata = dev_get_platdata(&pdev->dev);
-
info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_rtc_info),
GFP_KERNEL);
if (!info)
@@ -336,6 +289,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, info);
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc_dev))
+ return PTR_ERR(info->rtc_dev);
+
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
rtc_update_handler, IRQF_ONESHOT, "rtc",
info);
@@ -351,39 +308,14 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA);
pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA);
- ret = pm860x_rtc_read_time(&pdev->dev, &tm);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to read initial time.\n");
- return ret;
- }
- if ((tm.tm_year < 70) || (tm.tm_year > 138)) {
- tm.tm_year = 70;
- tm.tm_mon = 0;
- tm.tm_mday = 1;
- tm.tm_hour = 0;
- tm.tm_min = 0;
- tm.tm_sec = 0;
- ret = pm860x_rtc_set_time(&pdev->dev, &tm);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to set initial time.\n");
- return ret;
- }
- }
- rtc_tm_to_time(&tm, &ticks);
- if (pm860x_rtc_dt_init(pdev, info)) {
- if (pdata && pdata->sync) {
- pdata->sync(ticks);
- info->sync = pdata->sync;
- }
- }
+ pm860x_rtc_dt_init(pdev, info);
+
+ info->rtc_dev->ops = &pm860x_rtc_ops;
+ info->rtc_dev->range_max = U32_MAX;
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc",
- &pm860x_rtc_ops, THIS_MODULE);
- ret = PTR_ERR(info->rtc_dev);
- if (IS_ERR(info->rtc_dev)) {
- dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ ret = rtc_register_device(info->rtc_dev);
+ if (ret)
return ret;
- }
/*
* enable internal XO instead of internal 3.25MHz clock since it can
@@ -393,12 +325,6 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
#ifdef VRTC_CALIBRATION
/* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */
- if (pm860x_rtc_dt_init(pdev, info)) {
- if (pdata && pdata->vrtc)
- info->vrtc = pdata->vrtc & 0x3;
- else
- info->vrtc = 1;
- }
pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC);
/* calibrate VRTC */
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 8492ffed4ca6..3d60f3283f11 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -100,7 +100,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
secs = secs / COUNTS_PER_SEC;
secs = secs + (mins * 60);
- rtc_time_to_tm(secs, tm);
+ rtc_time64_to_tm(secs, tm);
return 0;
}
@@ -110,7 +110,7 @@ static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
unsigned long no_secs, no_mins, secs = 0;
- rtc_tm_to_time(tm, &secs);
+ secs = rtc_tm_to_time64(tm);
no_mins = secs / 60;
@@ -168,7 +168,7 @@ static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
secs = mins * 60;
- rtc_time_to_tm(secs, &alarm->time);
+ rtc_time64_to_tm(secs, &alarm->time);
return 0;
}
@@ -188,7 +188,7 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct rtc_time curtm;
/* Get the number of seconds since 1970 */
- rtc_tm_to_time(&alarm->time, &secs);
+ secs = rtc_tm_to_time64(&alarm->time);
/*
* Check whether alarm is set less than 1min.
@@ -196,7 +196,7 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
* return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON
*/
ab8500_rtc_read_time(dev, &curtm); /* Read current time */
- rtc_tm_to_time(&curtm, &cursec);
+ cursec = rtc_tm_to_time64(&curtm);
if ((secs - cursec) < 59) {
dev_dbg(dev, "Alarm less than 1 minute not supported\r\n");
return -EINVAL;
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index 7c5530c71285..791bebcb6f47 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -34,7 +34,7 @@ static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm)
t = alchemy_rdsys(AU1000_SYS_TOYREAD);
- rtc_time_to_tm(t, tm);
+ rtc_time64_to_tm(t, tm);
return 0;
}
@@ -43,7 +43,7 @@ static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned long t;
- rtc_tm_to_time(tm, &t);
+ t = rtc_tm_to_time64(tm);
alchemy_wrsys(t, AU1000_SYS_TOYWRITE);
@@ -65,17 +65,13 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtcdev;
unsigned long t;
- int ret;
t = alchemy_rdsys(AU1000_SYS_CNTRCTRL);
if (!(t & CNTR_OK)) {
dev_err(&pdev->dev, "counters not working; aborting.\n");
- ret = -ENODEV;
- goto out_err;
+ return -ENODEV;
}
- ret = -ETIMEDOUT;
-
/* set counter0 tickrate to 1Hz if necessary */
if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767) {
/* wait until hardware gives access to TRIM register */
@@ -88,7 +84,7 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
* counters are unusable.
*/
dev_err(&pdev->dev, "timeout waiting for access\n");
- goto out_err;
+ return -ETIMEDOUT;
}
/* set 1Hz TOY tick rate */
@@ -99,19 +95,16 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S)
msleep(1);
- rtcdev = devm_rtc_device_register(&pdev->dev, "rtc-au1xxx",
- &au1xtoy_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtcdev)) {
- ret = PTR_ERR(rtcdev);
- goto out_err;
- }
+ rtcdev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdev))
+ return PTR_ERR(rtcdev);
- platform_set_drvdata(pdev, rtcdev);
+ rtcdev->ops = &au1xtoy_rtc_ops;
+ rtcdev->range_max = U32_MAX;
- return 0;
+ platform_set_drvdata(pdev, rtcdev);
-out_err:
- return ret;
+ return rtc_register_device(rtcdev);
}
static struct platform_driver au1xrtc_driver = {
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index bbbb1f07c91f..4492b770422c 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -542,10 +542,8 @@ static int bd70528_probe(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, irq_name);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq\n");
+ if (irq < 0)
return irq;
- }
platform_set_drvdata(pdev, bd_rtc);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index b795fe4cbd2e..bcc96ab7793f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -649,10 +649,11 @@ static struct cmos_rtc cmos_rtc;
static irqreturn_t cmos_interrupt(int irq, void *p)
{
+ unsigned long flags;
u8 irqstat;
u8 rtc_control;
- spin_lock(&rtc_lock);
+ spin_lock_irqsave(&rtc_lock, flags);
/* When the HPET interrupt handler calls us, the interrupt
* status is passed as arg1 instead of the irq number. But
@@ -686,7 +687,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
}
- spin_unlock(&rtc_lock);
+ spin_unlock_irqrestore(&rtc_lock, flags);
if (is_intr(irqstat)) {
rtc_update_irq(p, 1, irqstat);
@@ -1345,7 +1346,7 @@ static const struct pnp_device_id rtc_ids[] = {
MODULE_DEVICE_TABLE(pnp, rtc_ids);
static struct pnp_driver cmos_pnp_driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
.id_table = rtc_ids,
.probe = cmos_pnp_probe,
.remove = cmos_pnp_remove,
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index 6a3b70fd7e1f..a603f1f21125 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -56,14 +56,14 @@ static void cpcap2rtc_time(struct rtc_time *rtc, struct cpcap_time *cpcap)
tod = (cpcap->tod1 & TOD1_MASK) | ((cpcap->tod2 & TOD2_MASK) << 8);
time = tod + ((cpcap->day & DAY_MASK) * SECS_PER_DAY);
- rtc_time_to_tm(time, rtc);
+ rtc_time64_to_tm(time, rtc);
}
static void rtc2cpcap_time(struct cpcap_time *cpcap, struct rtc_time *rtc)
{
unsigned long time;
- rtc_tm_to_time(rtc, &time);
+ time = rtc_tm_to_time64(rtc);
cpcap->day = time / SECS_PER_DAY;
time %= SECS_PER_DAY;
@@ -256,12 +256,13 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
return -ENODEV;
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_device_register(dev, "cpcap_rtc",
- &cpcap_rtc_ops, THIS_MODULE);
-
+ rtc->rtc_dev = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
+ rtc->rtc_dev->ops = &cpcap_rtc_ops;
+ rtc->rtc_dev->range_max = (1 << 14) * SECS_PER_DAY - 1;
+
err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor);
if (err)
return err;
@@ -298,7 +299,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
/* ignore error and continue without wakeup support */
}
- return 0;
+ return rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id cpcap_rtc_of_match[] = {
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 204eb7cf1aa4..58de10da37b1 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -103,13 +103,11 @@ static int da9052_set_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
int ret;
uint8_t v[3];
- ret = rtc_tm_to_time(rtc_tm, &alm_time);
- if (ret != 0)
- return ret;
+ alm_time = rtc_tm_to_time64(rtc_tm);
if (rtc_tm->tm_sec > 0) {
alm_time += 60 - rtc_tm->tm_sec;
- rtc_time_to_tm(alm_time, rtc_tm);
+ rtc_time64_to_tm(alm_time, rtc_tm);
}
BUG_ON(rtc_tm->tm_sec); /* it will cause repeated irqs if not zero */
@@ -298,12 +296,18 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc_err(rtc, "Failed to disable TICKS: %d\n", ret);
device_init_wakeup(&pdev->dev, true);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &da9052_rtc_ops, THIS_MODULE);
-
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc))
return PTR_ERR(rtc->rtc);
+ rtc->rtc->ops = &da9052_rtc_ops;
+ rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->rtc->range_max = RTC_TIMESTAMP_END_2063;
+
+ ret = rtc_register_device(rtc->rtc);
+ if (ret)
+ return ret;
+
ret = da9052_request_irq(rtc->da9052, DA9052_IRQ_ALARM, "ALM",
da9052_rtc_irq, rtc);
if (ret != 0) {
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 390b7351e0fe..73f87a17cdf3 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -227,7 +227,7 @@ davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
return ret;
}
-static int convertfromdays(u16 days, struct rtc_time *tm)
+static void convertfromdays(u16 days, struct rtc_time *tm)
{
int tmp_days, year, mon;
@@ -250,24 +250,17 @@ static int convertfromdays(u16 days, struct rtc_time *tm)
break;
}
}
- return 0;
}
-static int convert2days(u16 *days, struct rtc_time *tm)
+static void convert2days(u16 *days, struct rtc_time *tm)
{
int i;
*days = 0;
- /* epoch == 1900 */
- if (tm->tm_year < 100 || tm->tm_year > 199)
- return -EINVAL;
-
for (i = 2000; i < 1900 + tm->tm_year; i++)
*days += rtc_year_days(1, 12, i);
*days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year);
-
- return 0;
}
static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -300,8 +293,7 @@ static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
days <<= 8;
days |= day0;
- if (convertfromdays(days, tm) < 0)
- return -EINVAL;
+ convertfromdays(days, tm);
return 0;
}
@@ -313,8 +305,7 @@ static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm)
u8 rtc_cctrl;
unsigned long flags;
- if (convert2days(&days, tm) < 0)
- return -EINVAL;
+ convert2days(&days, tm);
spin_lock_irqsave(&davinci_rtc_lock, flags);
@@ -396,8 +387,7 @@ static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
days <<= 8;
days |= day0;
- if (convertfromdays(days, &alm->time) < 0)
- return -EINVAL;
+ convertfromdays(days, &alm->time);
alm->pending = !!(rtcss_read(davinci_rtc,
PRTCSS_RTC_CCTRL) &
@@ -413,29 +403,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
unsigned long flags;
u16 days;
- if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0
- && alm->time.tm_year < 0) {
- struct rtc_time tm;
- unsigned long now, then;
-
- davinci_rtc_read_time(dev, &tm);
- rtc_tm_to_time(&tm, &now);
-
- alm->time.tm_mday = tm.tm_mday;
- alm->time.tm_mon = tm.tm_mon;
- alm->time.tm_year = tm.tm_year;
- rtc_tm_to_time(&alm->time, &then);
-
- if (then < now) {
- rtc_time_to_tm(now + 24 * 60 * 60, &tm);
- alm->time.tm_mday = tm.tm_mday;
- alm->time.tm_mon = tm.tm_mon;
- alm->time.tm_year = tm.tm_year;
- }
- }
-
- if (convert2days(&days, &alm->time) < 0)
- return -EINVAL;
+ convert2days(&days, &alm->time);
spin_lock_irqsave(&davinci_rtc_lock, flags);
@@ -485,13 +453,13 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, davinci_rtc);
- davinci_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &davinci_rtc_ops, THIS_MODULE);
- if (IS_ERR(davinci_rtc->rtc)) {
- dev_err(dev, "unable to register RTC device, err %d\n",
- ret);
+ davinci_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(davinci_rtc->rtc))
return PTR_ERR(davinci_rtc->rtc);
- }
+
+ davinci_rtc->rtc->ops = &davinci_rtc_ops;
+ davinci_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ davinci_rtc->rtc->range_max = RTC_TIMESTAMP_BEGIN_2000 + (1 << 16) * 86400ULL - 1;
rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
@@ -516,7 +484,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
- return 0;
+ return rtc_register_device(davinci_rtc->rtc);
}
static int __exit davinci_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 4420fbf2f8fe..a3d790889eea 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -325,17 +325,13 @@ static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
u8 buf[1 + DS1305_ALM_LEN];
/* convert desired alarm to time_t */
- status = rtc_tm_to_time(&alm->time, &later);
- if (status < 0)
- return status;
+ later = rtc_tm_to_time64(&alm->time);
/* Read current time as time_t */
status = ds1305_get_time(dev, &tm);
if (status < 0)
return status;
- status = rtc_tm_to_time(&tm, &now);
- if (status < 0)
- return status;
+ now = rtc_tm_to_time64(&tm);
/* make sure alarm fires within the next 24 hours */
if (later <= now)
@@ -694,6 +690,8 @@ static int ds1305_probe(struct spi_device *spi)
return PTR_ERR(ds1305->rtc);
ds1305->rtc->ops = &ds1305_ops;
+ ds1305->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ ds1305->rtc->range_max = RTC_TIMESTAMP_END_2099;
ds1305_nvmem_cfg.priv = ds1305;
ds1305->rtc->nvram_old_abi = true;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 1f7e8aefc1eb..49702942bb08 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -22,6 +22,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
+#include <linux/watchdog.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -144,6 +145,15 @@ enum ds_type {
# define M41TXX_BIT_CALIB_SIGN BIT(5)
# define M41TXX_M_CALIBRATION GENMASK(4, 0)
+#define DS1388_REG_WDOG_HUN_SECS 0x08
+#define DS1388_REG_WDOG_SECS 0x09
+#define DS1388_REG_FLAG 0x0b
+# define DS1388_BIT_WF BIT(6)
+# define DS1388_BIT_OSF BIT(7)
+#define DS1388_REG_CONTROL 0x0c
+# define DS1388_BIT_RST BIT(0)
+# define DS1388_BIT_WDE BIT(1)
+
/* negative offset step is -2.034ppm */
#define M41TXX_NEG_OFFSET_STEP_PPB 2034
/* positive offset step is +4.068ppm */
@@ -252,6 +262,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
if (tmp & DS1340_BIT_OSF)
return -EINVAL;
break;
+ case ds_1388:
+ ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp);
+ if (ret)
+ return ret;
+ if (tmp & DS1388_BIT_OSF)
+ return -EINVAL;
+ break;
case mcp794xx:
if (!(tmp & MCP794XX_BIT_ST))
return -EINVAL;
@@ -845,6 +862,72 @@ static int m41txx_rtc_set_offset(struct device *dev, long offset)
ctrl_reg);
}
+#ifdef CONFIG_WATCHDOG_CORE
+static int ds1388_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+ u8 regs[2];
+ int ret;
+
+ ret = regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
+ DS1388_BIT_WF, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
+ DS1388_BIT_WDE | DS1388_BIT_RST, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * watchdog timeouts are measured in seconds. So ignore hundredths of
+ * seconds field.
+ */
+ regs[0] = 0;
+ regs[1] = bin2bcd(wdt_dev->timeout);
+
+ ret = regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
+ sizeof(regs));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
+ DS1388_BIT_WDE | DS1388_BIT_RST,
+ DS1388_BIT_WDE | DS1388_BIT_RST);
+}
+
+static int ds1388_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
+ DS1388_BIT_WDE | DS1388_BIT_RST, 0);
+}
+
+static int ds1388_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+ u8 regs[2];
+
+ return regmap_bulk_read(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
+ sizeof(regs));
+}
+
+static int ds1388_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int val)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+ u8 regs[2];
+
+ wdt_dev->timeout = val;
+ regs[0] = 0;
+ regs[1] = bin2bcd(wdt_dev->timeout);
+
+ return regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
+ sizeof(regs));
+}
+#endif
+
static const struct rtc_class_ops rx8130_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
@@ -1567,6 +1650,48 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
#endif /* CONFIG_COMMON_CLK */
+#ifdef CONFIG_WATCHDOG_CORE
+static const struct watchdog_info ds1388_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "DS1388 watchdog",
+};
+
+static const struct watchdog_ops ds1388_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = ds1388_wdt_start,
+ .stop = ds1388_wdt_stop,
+ .ping = ds1388_wdt_ping,
+ .set_timeout = ds1388_wdt_set_timeout,
+
+};
+
+static void ds1307_wdt_register(struct ds1307 *ds1307)
+{
+ struct watchdog_device *wdt;
+
+ if (ds1307->type != ds_1388)
+ return;
+
+ wdt = devm_kzalloc(ds1307->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return;
+
+ wdt->info = &ds1388_wdt_info;
+ wdt->ops = &ds1388_wdt_ops;
+ wdt->timeout = 99;
+ wdt->max_timeout = 99;
+ wdt->min_timeout = 1;
+
+ watchdog_init_timeout(wdt, 0, ds1307->dev);
+ watchdog_set_drvdata(wdt, ds1307);
+ devm_watchdog_register_device(ds1307->dev, wdt);
+}
+#else
+static void ds1307_wdt_register(struct ds1307 *ds1307)
+{
+}
+#endif /* CONFIG_WATCHDOG_CORE */
+
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -1856,6 +1981,7 @@ static int ds1307_probe(struct i2c_client *client,
ds1307_hwmon_register(ds1307);
ds1307_clks_register(ds1307);
+ ds1307_wdt_register(ds1307);
return 0;
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 6e9ddcd03992..9c51a12cf70f 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -164,7 +164,7 @@ static int ds1374_read_time(struct device *dev, struct rtc_time *time)
ret = ds1374_read_rtc(client, &itime, DS1374_REG_TOD0, 4);
if (!ret)
- rtc_time_to_tm(itime, time);
+ rtc_time64_to_tm(itime, time);
return ret;
}
@@ -172,9 +172,8 @@ static int ds1374_read_time(struct device *dev, struct rtc_time *time)
static int ds1374_set_time(struct device *dev, struct rtc_time *time)
{
struct i2c_client *client = to_i2c_client(dev);
- unsigned long itime;
+ unsigned long itime = rtc_tm_to_time64(time);
- rtc_tm_to_time(time, &itime);
return ds1374_write_rtc(client, itime, DS1374_REG_TOD0, 4);
}
@@ -212,7 +211,7 @@ static int ds1374_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (ret)
goto out;
- rtc_time_to_tm(now + cur_alarm, &alarm->time);
+ rtc_time64_to_tm(now + cur_alarm, &alarm->time);
alarm->enabled = !!(cr & DS1374_REG_CR_WACE);
alarm->pending = !!(sr & DS1374_REG_SR_AF);
@@ -237,8 +236,8 @@ static int ds1374_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (ret < 0)
return ret;
- rtc_tm_to_time(&alarm->time, &new_alarm);
- rtc_tm_to_time(&now, &itime);
+ new_alarm = rtc_tm_to_time64(&alarm->time);
+ itime = rtc_tm_to_time64(&now);
/* This can happen due to races, in addition to dates that are
* truly in the past. To avoid requiring the caller to check for
@@ -620,6 +619,10 @@ static int ds1374_probe(struct i2c_client *client,
if (!ds1374)
return -ENOMEM;
+ ds1374->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(ds1374->rtc))
+ return PTR_ERR(ds1374->rtc);
+
ds1374->client = client;
i2c_set_clientdata(client, ds1374);
@@ -641,12 +644,12 @@ static int ds1374_probe(struct i2c_client *client,
device_set_wakeup_capable(&client->dev, 1);
}
- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name,
- &ds1374_rtc_ops, THIS_MODULE);
- if (IS_ERR(ds1374->rtc)) {
- dev_err(&client->dev, "unable to register the class device\n");
- return PTR_ERR(ds1374->rtc);
- }
+ ds1374->rtc->ops = &ds1374_rtc_ops;
+ ds1374->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(ds1374->rtc);
+ if (ret)
+ return ret;
#ifdef CONFIG_RTC_DRV_DS1374_WDT
save_client = client;
diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c
deleted file mode 100644
index 6c037dc4e3dc..000000000000
--- a/drivers/rtc/rtc-efi-platform.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Moved from arch/ia64/kernel/time.c
- *
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
- * Copyright (C) 1999-2000 VA Linux Systems
- * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/efi.h>
-#include <linux/platform_device.h>
-
-static struct platform_device rtc_efi_dev = {
- .name = "rtc-efi",
- .id = -1,
-};
-
-static int __init rtc_init(void)
-{
- if (efi_enabled(EFI_RUNTIME_SERVICES))
- if (platform_device_register(&rtc_efi_dev) < 0)
- pr_err("unable to register rtc device...\n");
-
- /* not necessarily an error */
- return 0;
-}
-module_init(rtc_init);
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 9e6e994cce99..756af62b0486 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -20,6 +20,7 @@
#include <linux/fsl/ftm.h>
#include <linux/rtc.h>
#include <linux/time.h>
+#include <linux/acpi.h>
#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
@@ -151,6 +152,8 @@ static irqreturn_t ftm_rtc_alarm_interrupt(int irq, void *dev)
{
struct ftm_rtc *rtc = dev;
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
ftm_irq_acknowledge(rtc);
ftm_irq_disable(rtc);
ftm_clean_alarm(rtc);
@@ -242,7 +245,6 @@ static const struct rtc_class_ops ftm_rtc_ops = {
static int ftm_rtc_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
int irq;
int ret;
struct ftm_rtc *rtc;
@@ -265,10 +267,10 @@ static int ftm_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc->base);
}
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "unable to get IRQ from DT, %d\n", irq);
- return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "can't get irq number\n");
+ return irq;
}
ret = devm_request_irq(&pdev->dev, irq, ftm_rtc_alarm_interrupt,
@@ -278,7 +280,9 @@ static int ftm_rtc_probe(struct platform_device *pdev)
return ret;
}
- rtc->big_endian = of_property_read_bool(np, "big-endian");
+ rtc->big_endian =
+ device_property_read_bool(&pdev->dev, "big-endian");
+
rtc->alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV;
rtc->rtc_dev->ops = &ftm_rtc_ops;
@@ -305,11 +309,18 @@ static const struct of_device_id ftm_rtc_match[] = {
{ },
};
+static const struct acpi_device_id ftm_imx_acpi_ids[] = {
+ {"NXP0011",},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ftm_imx_acpi_ids);
+
static struct platform_driver ftm_rtc_driver = {
.probe = ftm_rtc_probe,
.driver = {
.name = "ftm-alarm",
.of_match_table = ftm_rtc_match,
+ .acpi_match_table = ACPI_PTR(ftm_imx_acpi_ids),
},
};
diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c
index cf2c12107f2b..a5f59e6f862e 100644
--- a/drivers/rtc/rtc-imx-sc.c
+++ b/drivers/rtc/rtc-imx-sc.c
@@ -37,7 +37,7 @@ struct imx_sc_msg_timer_rtc_set_alarm {
u8 hour;
u8 min;
u8 sec;
-} __packed;
+} __packed __aligned(4);
static int imx_sc_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 18023e472cbc..e4c719085c31 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -46,6 +46,7 @@
enum jz4740_rtc_type {
ID_JZ4740,
+ ID_JZ4760,
ID_JZ4780,
};
@@ -106,7 +107,7 @@ static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg,
{
int ret = 0;
- if (rtc->type >= ID_JZ4780)
+ if (rtc->type >= ID_JZ4760)
ret = jz4780_rtc_enable_write(rtc);
if (ret == 0)
ret = jz4740_rtc_wait_write_ready(rtc);
@@ -298,6 +299,7 @@ static void jz4740_rtc_power_off(void)
static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
+ { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
{},
};
@@ -372,13 +374,14 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (!pm_power_off) {
/* Default: 60ms */
rtc->reset_pin_assert_time = 60;
- of_property_read_u32(np, "reset-pin-assert-time-ms",
+ of_property_read_u32(np,
+ "ingenic,reset-pin-assert-time-ms",
&rtc->reset_pin_assert_time);
/* Default: 100ms */
rtc->min_wakeup_pin_assert_time = 100;
of_property_read_u32(np,
- "min-wakeup-pin-assert-time-ms",
+ "ingenic,min-wakeup-pin-assert-time-ms",
&rtc->min_wakeup_pin_assert_time);
dev_for_power_off = &pdev->dev;
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index e8194f1f01a8..92f19bf997b2 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -160,15 +160,10 @@ static int m48t35_probe(struct platform_device *pdev)
return -ENOMEM;
priv->size = resource_size(res);
- /*
- * kludge: remove the #ifndef after ioc3 resource
- * conflicts are resolved
- */
-#ifndef CONFIG_SGI_IP27
if (!devm_request_mem_region(&pdev->dev, res->start, priv->size,
pdev->name))
return -EBUSY;
-#endif
+
priv->baseaddr = res->start;
priv->reg = devm_ioremap(&pdev->dev, priv->baseaddr, priv->size);
if (!priv->reg)
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 15a9d0278778..3040844129ce 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -111,7 +111,7 @@ static int mpc5121_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
now = in_be32(&regs->actual_time) + in_be32(&regs->target_time);
- rtc_time_to_tm(now, tm);
+ rtc_time64_to_tm(now, tm);
/*
* update second minute hour registers
@@ -126,16 +126,14 @@ static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
- int ret;
unsigned long now;
/*
* The actual_time register is read only so we write the offset
* between it and linux time to the target_time register.
*/
- ret = rtc_tm_to_time(tm, &now);
- if (ret == 0)
- out_be32(&regs->target_time, now - in_be32(&regs->actual_time));
+ now = rtc_tm_to_time64(tm);
+ out_be32(&regs->target_time, now - in_be32(&regs->actual_time));
/*
* update second minute hour registers
@@ -315,8 +313,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
if (!rtc)
return -ENOMEM;
- rtc->regs = of_iomap(op->dev.of_node, 0);
- if (!rtc->regs) {
+ rtc->regs = devm_platform_ioremap_resource(op, 0);
+ if (IS_ERR(rtc->regs)) {
dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
return -ENOSYS;
}
@@ -326,8 +324,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
platform_set_drvdata(op, rtc);
rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1);
- err = request_irq(rtc->irq, mpc5121_rtc_handler, 0,
- "mpc5121-rtc", &op->dev);
+ err = devm_request_irq(&op->dev, rtc->irq, mpc5121_rtc_handler, 0,
+ "mpc5121-rtc", &op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq);
@@ -335,14 +333,26 @@ static int mpc5121_rtc_probe(struct platform_device *op)
}
rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0);
- err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd,
- 0, "mpc5121-rtc_upd", &op->dev);
+ err = devm_request_irq(&op->dev, rtc->irq_periodic,
+ mpc5121_rtc_handler_upd, 0, "mpc5121-rtc_upd",
+ &op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq_periodic);
goto out_dispose2;
}
+ rtc->rtc = devm_rtc_allocate_device(&op->dev);
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto out_dispose2;
+ }
+
+ rtc->rtc->ops = &mpc5200_rtc_ops;
+ rtc->rtc->uie_unsupported = 1;
+ rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */
+
if (of_device_is_compatible(op->dev.of_node, "fsl,mpc5121-rtc")) {
u32 ka;
ka = in_be32(&rtc->regs->keep_alive);
@@ -351,30 +361,26 @@ static int mpc5121_rtc_probe(struct platform_device *op)
"mpc5121-rtc: Battery or oscillator failure!\n");
out_be32(&rtc->regs->keep_alive, ka);
}
-
- rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5121-rtc",
- &mpc5121_rtc_ops, THIS_MODULE);
- } else {
- rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5200-rtc",
- &mpc5200_rtc_ops, THIS_MODULE);
+ rtc->rtc->ops = &mpc5121_rtc_ops;
+ /*
+ * This is a limitation of the driver that abuses the target
+ * time register, the actual maximum year for the mpc5121 is
+ * also 4052.
+ */
+ rtc->rtc->range_min = 0;
+ rtc->rtc->range_max = U32_MAX;
}
- if (IS_ERR(rtc->rtc)) {
- err = PTR_ERR(rtc->rtc);
- goto out_free_irq;
- }
- rtc->rtc->uie_unsupported = 1;
+ err = rtc_register_device(rtc->rtc);
+ if (err)
+ goto out_dispose2;
return 0;
-out_free_irq:
- free_irq(rtc->irq_periodic, &op->dev);
out_dispose2:
irq_dispose_mapping(rtc->irq_periodic);
- free_irq(rtc->irq, &op->dev);
out_dispose:
irq_dispose_mapping(rtc->irq);
- iounmap(rtc->regs);
return err;
}
@@ -388,9 +394,6 @@ static int mpc5121_rtc_remove(struct platform_device *op)
out_8(&regs->alm_enable, 0);
out_8(&regs->int_enable, in_8(&regs->int_enable) & ~0x1);
- iounmap(rtc->regs);
- free_irq(rtc->irq, &op->dev);
- free_irq(rtc->irq_periodic, &op->dev);
irq_dispose_mapping(rtc->irq);
irq_dispose_mapping(rtc->irq_periodic);
diff --git a/drivers/rtc/rtc-mt2712.c b/drivers/rtc/rtc-mt2712.c
new file mode 100644
index 000000000000..581b8731fb8a
--- /dev/null
+++ b/drivers/rtc/rtc-mt2712.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ran Bi <ran.bi@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define MT2712_BBPU 0x0000
+#define MT2712_BBPU_CLRPKY BIT(4)
+#define MT2712_BBPU_RELOAD BIT(5)
+#define MT2712_BBPU_CBUSY BIT(6)
+#define MT2712_BBPU_KEY (0x43 << 8)
+
+#define MT2712_IRQ_STA 0x0004
+#define MT2712_IRQ_STA_AL BIT(0)
+#define MT2712_IRQ_STA_TC BIT(1)
+
+#define MT2712_IRQ_EN 0x0008
+#define MT2712_IRQ_EN_AL BIT(0)
+#define MT2712_IRQ_EN_TC BIT(1)
+#define MT2712_IRQ_EN_ONESHOT BIT(2)
+
+#define MT2712_CII_EN 0x000c
+
+#define MT2712_AL_MASK 0x0010
+#define MT2712_AL_MASK_DOW BIT(4)
+
+#define MT2712_TC_SEC 0x0014
+#define MT2712_TC_MIN 0x0018
+#define MT2712_TC_HOU 0x001c
+#define MT2712_TC_DOM 0x0020
+#define MT2712_TC_DOW 0x0024
+#define MT2712_TC_MTH 0x0028
+#define MT2712_TC_YEA 0x002c
+
+#define MT2712_AL_SEC 0x0030
+#define MT2712_AL_MIN 0x0034
+#define MT2712_AL_HOU 0x0038
+#define MT2712_AL_DOM 0x003c
+#define MT2712_AL_DOW 0x0040
+#define MT2712_AL_MTH 0x0044
+#define MT2712_AL_YEA 0x0048
+
+#define MT2712_SEC_MASK 0x003f
+#define MT2712_MIN_MASK 0x003f
+#define MT2712_HOU_MASK 0x001f
+#define MT2712_DOM_MASK 0x001f
+#define MT2712_DOW_MASK 0x0007
+#define MT2712_MTH_MASK 0x000f
+#define MT2712_YEA_MASK 0x007f
+
+#define MT2712_POWERKEY1 0x004c
+#define MT2712_POWERKEY2 0x0050
+#define MT2712_POWERKEY1_KEY 0xa357
+#define MT2712_POWERKEY2_KEY 0x67d2
+
+#define MT2712_CON0 0x005c
+#define MT2712_CON1 0x0060
+
+#define MT2712_PROT 0x0070
+#define MT2712_PROT_UNLOCK1 0x9136
+#define MT2712_PROT_UNLOCK2 0x586a
+
+#define MT2712_WRTGR 0x0078
+
+#define MT2712_RTC_TIMESTAMP_END_2127 4985971199LL
+
+struct mt2712_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ int irq;
+ u8 irq_wake_enabled;
+ u8 powerlost;
+};
+
+static inline u32 mt2712_readl(struct mt2712_rtc *mt2712_rtc, u32 reg)
+{
+ return readl(mt2712_rtc->base + reg);
+}
+
+static inline void mt2712_writel(struct mt2712_rtc *mt2712_rtc,
+ u32 reg, u32 val)
+{
+ writel(val, mt2712_rtc->base + reg);
+}
+
+static void mt2712_rtc_write_trigger(struct mt2712_rtc *mt2712_rtc)
+{
+ unsigned long timeout = jiffies + HZ / 10;
+
+ mt2712_writel(mt2712_rtc, MT2712_WRTGR, 1);
+ while (1) {
+ if (!(mt2712_readl(mt2712_rtc, MT2712_BBPU)
+ & MT2712_BBPU_CBUSY))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&mt2712_rtc->rtc->dev,
+ "%s time out!\n", __func__);
+ break;
+ }
+ cpu_relax();
+ }
+}
+
+static void mt2712_rtc_writeif_unlock(struct mt2712_rtc *mt2712_rtc)
+{
+ mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK1);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+ mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK2);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+}
+
+static irqreturn_t rtc_irq_handler_thread(int irq, void *data)
+{
+ struct mt2712_rtc *mt2712_rtc = data;
+ u16 irqsta;
+
+ /* Clear interrupt */
+ irqsta = mt2712_readl(mt2712_rtc, MT2712_IRQ_STA);
+ if (irqsta & MT2712_IRQ_STA_AL) {
+ rtc_update_irq(mt2712_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void __mt2712_rtc_read_time(struct mt2712_rtc *mt2712_rtc,
+ struct rtc_time *tm, int *sec)
+{
+ tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC)
+ & MT2712_SEC_MASK;
+ tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_TC_MIN)
+ & MT2712_MIN_MASK;
+ tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_TC_HOU)
+ & MT2712_HOU_MASK;
+ tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_TC_DOM)
+ & MT2712_DOM_MASK;
+ tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_TC_MTH) - 1)
+ & MT2712_MTH_MASK;
+ tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_TC_YEA) + 100)
+ & MT2712_YEA_MASK;
+
+ *sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC) & MT2712_SEC_MASK;
+}
+
+static int mt2712_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ int sec;
+
+ if (mt2712_rtc->powerlost)
+ return -EINVAL;
+
+ do {
+ __mt2712_rtc_read_time(mt2712_rtc, tm, &sec);
+ } while (sec < tm->tm_sec); /* SEC has carried */
+
+ return 0;
+}
+
+static int mt2712_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+
+ mt2712_writel(mt2712_rtc, MT2712_TC_SEC, tm->tm_sec & MT2712_SEC_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_MIN, tm->tm_min & MT2712_MIN_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_HOU, tm->tm_hour & MT2712_HOU_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_DOM, tm->tm_mday & MT2712_DOM_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_MTH,
+ (tm->tm_mon + 1) & MT2712_MTH_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_YEA,
+ (tm->tm_year - 100) & MT2712_YEA_MASK);
+
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ if (mt2712_rtc->powerlost)
+ mt2712_rtc->powerlost = false;
+
+ return 0;
+}
+
+static int mt2712_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alm->time;
+ u16 irqen;
+
+ irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN);
+ alm->enabled = !!(irqen & MT2712_IRQ_EN_AL);
+
+ tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_AL_SEC) & MT2712_SEC_MASK;
+ tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_AL_MIN) & MT2712_MIN_MASK;
+ tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_AL_HOU) & MT2712_HOU_MASK;
+ tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_AL_DOM) & MT2712_DOM_MASK;
+ tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_AL_MTH) - 1)
+ & MT2712_MTH_MASK;
+ tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_AL_YEA) + 100)
+ & MT2712_YEA_MASK;
+
+ return 0;
+}
+
+static int mt2712_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ u16 irqen;
+
+ irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN);
+ if (enabled)
+ irqen |= MT2712_IRQ_EN_AL;
+ else
+ irqen &= ~MT2712_IRQ_EN_AL;
+ mt2712_writel(mt2712_rtc, MT2712_IRQ_EN, irqen);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ return 0;
+}
+
+static int mt2712_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alm->time;
+
+ dev_dbg(&mt2712_rtc->rtc->dev, "set al time: %ptR, alm en: %d\n",
+ tm, alm->enabled);
+
+ mt2712_writel(mt2712_rtc, MT2712_AL_SEC,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_SEC)
+ & ~(MT2712_SEC_MASK)) | (tm->tm_sec & MT2712_SEC_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_MIN,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_MIN)
+ & ~(MT2712_MIN_MASK)) | (tm->tm_min & MT2712_MIN_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_HOU,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_HOU)
+ & ~(MT2712_HOU_MASK)) | (tm->tm_hour & MT2712_HOU_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_DOM,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_DOM)
+ & ~(MT2712_DOM_MASK)) | (tm->tm_mday & MT2712_DOM_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_MTH,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_MTH)
+ & ~(MT2712_MTH_MASK))
+ | ((tm->tm_mon + 1) & MT2712_MTH_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_YEA,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_YEA)
+ & ~(MT2712_YEA_MASK))
+ | ((tm->tm_year - 100) & MT2712_YEA_MASK));
+
+ /* mask day of week */
+ mt2712_writel(mt2712_rtc, MT2712_AL_MASK, MT2712_AL_MASK_DOW);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ mt2712_rtc_alarm_irq_enable(dev, alm->enabled);
+
+ return 0;
+}
+
+/* Init RTC register */
+static void mt2712_rtc_hw_init(struct mt2712_rtc *mt2712_rtc)
+{
+ u32 p1, p2;
+
+ mt2712_writel(mt2712_rtc, MT2712_BBPU,
+ MT2712_BBPU_KEY | MT2712_BBPU_RELOAD);
+
+ mt2712_writel(mt2712_rtc, MT2712_CII_EN, 0);
+ mt2712_writel(mt2712_rtc, MT2712_AL_MASK, 0);
+ /* necessary before set MT2712_POWERKEY */
+ mt2712_writel(mt2712_rtc, MT2712_CON0, 0x4848);
+ mt2712_writel(mt2712_rtc, MT2712_CON1, 0x0048);
+
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ p1 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY1);
+ p2 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY2);
+ if (p1 != MT2712_POWERKEY1_KEY || p2 != MT2712_POWERKEY2_KEY) {
+ mt2712_rtc->powerlost = true;
+ dev_dbg(&mt2712_rtc->rtc->dev,
+ "powerkey not set (lost power)\n");
+ } else {
+ mt2712_rtc->powerlost = false;
+ }
+
+ /* RTC need POWERKEY1/2 match, then goto normal work mode */
+ mt2712_writel(mt2712_rtc, MT2712_POWERKEY1, MT2712_POWERKEY1_KEY);
+ mt2712_writel(mt2712_rtc, MT2712_POWERKEY2, MT2712_POWERKEY2_KEY);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ mt2712_rtc_writeif_unlock(mt2712_rtc);
+}
+
+static const struct rtc_class_ops mt2712_rtc_ops = {
+ .read_time = mt2712_rtc_read_time,
+ .set_time = mt2712_rtc_set_time,
+ .read_alarm = mt2712_rtc_read_alarm,
+ .set_alarm = mt2712_rtc_set_alarm,
+ .alarm_irq_enable = mt2712_rtc_alarm_irq_enable,
+};
+
+static int mt2712_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct mt2712_rtc *mt2712_rtc;
+ int ret;
+
+ mt2712_rtc = devm_kzalloc(&pdev->dev,
+ sizeof(struct mt2712_rtc), GFP_KERNEL);
+ if (!mt2712_rtc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mt2712_rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mt2712_rtc->base))
+ return PTR_ERR(mt2712_rtc->base);
+
+ /* rtc hw init */
+ mt2712_rtc_hw_init(mt2712_rtc);
+
+ mt2712_rtc->irq = platform_get_irq(pdev, 0);
+ if (mt2712_rtc->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return mt2712_rtc->irq;
+ }
+
+ platform_set_drvdata(pdev, mt2712_rtc);
+
+ mt2712_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(mt2712_rtc->rtc))
+ return PTR_ERR(mt2712_rtc->rtc);
+
+ ret = devm_request_threaded_irq(&pdev->dev, mt2712_rtc->irq, NULL,
+ rtc_irq_handler_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(&mt2712_rtc->rtc->dev),
+ mt2712_rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ mt2712_rtc->irq, ret);
+ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+
+ mt2712_rtc->rtc->ops = &mt2712_rtc_ops;
+ mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127;
+
+ ret = rtc_register_device(mt2712_rtc->rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "register rtc device failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mt2712_rtc_suspend(struct device *dev)
+{
+ int wake_status = 0;
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev)) {
+ wake_status = enable_irq_wake(mt2712_rtc->irq);
+ if (!wake_status)
+ mt2712_rtc->irq_wake_enabled = true;
+ }
+
+ return 0;
+}
+
+static int mt2712_rtc_resume(struct device *dev)
+{
+ int wake_status = 0;
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev) && mt2712_rtc->irq_wake_enabled) {
+ wake_status = disable_irq_wake(mt2712_rtc->irq);
+ if (!wake_status)
+ mt2712_rtc->irq_wake_enabled = false;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mt2712_pm_ops, mt2712_rtc_suspend,
+ mt2712_rtc_resume);
+#endif
+
+static const struct of_device_id mt2712_rtc_of_match[] = {
+ { .compatible = "mediatek,mt2712-rtc", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, mt2712_rtc_of_match);
+
+static struct platform_driver mt2712_rtc_driver = {
+ .driver = {
+ .name = "mt2712-rtc",
+ .of_match_table = mt2712_rtc_of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mt2712_pm_ops,
+#endif
+ },
+ .probe = mt2712_rtc_probe,
+};
+
+module_platform_driver(mt2712_rtc_driver);
+
+MODULE_DESCRIPTION("MediaTek MT2712 SoC based RTC Driver");
+MODULE_AUTHOR("Ran Bi <ran.bi@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 902d57dcd0d4..a8cfbde048f4 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -307,6 +307,14 @@ static const struct rtc_class_ops mxc_rtc_ops = {
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
};
+static void mxc_rtc_action(void *p)
+{
+ struct rtc_plat_data *pdata = p;
+
+ clk_disable_unprepare(pdata->clk_ref);
+ clk_disable_unprepare(pdata->clk_ipg);
+}
+
static int mxc_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
@@ -366,14 +374,20 @@ static int mxc_rtc_probe(struct platform_device *pdev)
pdata->clk_ref = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(pdata->clk_ref)) {
+ clk_disable_unprepare(pdata->clk_ipg);
dev_err(&pdev->dev, "unable to get ref clock!\n");
- ret = PTR_ERR(pdata->clk_ref);
- goto exit_put_clk_ipg;
+ return PTR_ERR(pdata->clk_ref);
}
ret = clk_prepare_enable(pdata->clk_ref);
+ if (ret) {
+ clk_disable_unprepare(pdata->clk_ipg);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, mxc_rtc_action, pdata);
if (ret)
- goto exit_put_clk_ipg;
+ return ret;
rate = clk_get_rate(pdata->clk_ref);
@@ -385,16 +399,14 @@ static int mxc_rtc_probe(struct platform_device *pdev)
reg = RTC_INPUT_CLK_38400HZ;
else {
dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
- ret = -EINVAL;
- goto exit_put_clk_ref;
+ return -EINVAL;
}
reg |= RTC_ENABLE_BIT;
writew(reg, (pdata->ioaddr + RTC_RTCCTL));
if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
dev_err(&pdev->dev, "hardware module can't be enabled!\n");
- ret = -EIO;
- goto exit_put_clk_ref;
+ return -EIO;
}
platform_set_drvdata(pdev, pdata);
@@ -417,29 +429,10 @@ static int mxc_rtc_probe(struct platform_device *pdev)
}
ret = rtc_register_device(rtc);
- if (ret)
- goto exit_put_clk_ref;
-
- return 0;
-
-exit_put_clk_ref:
- clk_disable_unprepare(pdata->clk_ref);
-exit_put_clk_ipg:
- clk_disable_unprepare(pdata->clk_ipg);
return ret;
}
-static int mxc_rtc_remove(struct platform_device *pdev)
-{
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(pdata->clk_ref);
- clk_disable_unprepare(pdata->clk_ipg);
-
- return 0;
-}
-
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
@@ -447,7 +440,6 @@ static struct platform_driver mxc_rtc_driver = {
},
.id_table = imx_rtc_devtype,
.probe = mxc_rtc_probe,
- .remove = mxc_rtc_remove,
};
module_platform_driver(mxc_rtc_driver)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index d4ed20fb3194..c20fc7937dfa 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -9,7 +9,6 @@
* Copyright (C) 2014 Johan Hovold <johan@kernel.org>
*/
-#include <dt-bindings/gpio/gpio.h>
#include <linux/bcd.h>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 1db17ba1fc64..7a87f461bec8 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -9,6 +9,7 @@
* Copyright (C) 2019 Micro Crystal AG
* Author: Alexandre Belloni <alexandre.belloni@bootlin.com>
*/
+#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
@@ -44,6 +45,10 @@
#define PCF85063_OFFSET_STEP0 4340
#define PCF85063_OFFSET_STEP1 4069
+#define PCF85063_REG_CLKO_F_MASK 0x07 /* frequency mask */
+#define PCF85063_REG_CLKO_F_32768HZ 0x00
+#define PCF85063_REG_CLKO_F_OFF 0x07
+
#define PCF85063_REG_RAM 0x03
#define PCF85063_REG_SC 0x04 /* datetime */
@@ -61,6 +66,9 @@ struct pcf85063_config {
struct pcf85063 {
struct rtc_device *rtc;
struct regmap *regmap;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
};
static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -357,6 +365,150 @@ static int pcf85063_load_capacitance(struct pcf85063 *pcf85063,
PCF85063_REG_CTRL1_CAP_SEL, reg);
}
+#ifdef CONFIG_COMMON_CLK
+/*
+ * Handling of the clkout
+ */
+
+#define clkout_hw_to_pcf85063(_hw) container_of(_hw, struct pcf85063, clkout_hw)
+
+static int clkout_rates[] = {
+ 32768,
+ 16384,
+ 8192,
+ 4096,
+ 2048,
+ 1024,
+ 1,
+ 0
+};
+
+static unsigned long pcf85063_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ unsigned int buf;
+ int ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf);
+
+ if (ret < 0)
+ return 0;
+
+ buf &= PCF85063_REG_CLKO_F_MASK;
+ return clkout_rates[buf];
+}
+
+static long pcf85063_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int pcf85063_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate)
+ return regmap_update_bits(pcf85063->regmap,
+ PCF85063_REG_CTRL2,
+ PCF85063_REG_CLKO_F_MASK, i);
+
+ return -EINVAL;
+}
+
+static int pcf85063_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ unsigned int buf;
+ int ret;
+
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf);
+ if (ret < 0)
+ return ret;
+ buf &= PCF85063_REG_CLKO_F_MASK;
+
+ if (enable) {
+ if (buf == PCF85063_REG_CLKO_F_OFF)
+ buf = PCF85063_REG_CLKO_F_32768HZ;
+ else
+ return 0;
+ } else {
+ if (buf != PCF85063_REG_CLKO_F_OFF)
+ buf = PCF85063_REG_CLKO_F_OFF;
+ else
+ return 0;
+ }
+
+ return regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL2,
+ PCF85063_REG_CLKO_F_MASK, buf);
+}
+
+static int pcf85063_clkout_prepare(struct clk_hw *hw)
+{
+ return pcf85063_clkout_control(hw, 1);
+}
+
+static void pcf85063_clkout_unprepare(struct clk_hw *hw)
+{
+ pcf85063_clkout_control(hw, 0);
+}
+
+static int pcf85063_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ unsigned int buf;
+ int ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf);
+
+ if (ret < 0)
+ return 0;
+
+ return (buf & PCF85063_REG_CLKO_F_MASK) != PCF85063_REG_CLKO_F_OFF;
+}
+
+static const struct clk_ops pcf85063_clkout_ops = {
+ .prepare = pcf85063_clkout_prepare,
+ .unprepare = pcf85063_clkout_unprepare,
+ .is_prepared = pcf85063_clkout_is_prepared,
+ .recalc_rate = pcf85063_clkout_recalc_rate,
+ .round_rate = pcf85063_clkout_round_rate,
+ .set_rate = pcf85063_clkout_set_rate,
+};
+
+static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = "pcf85063-clkout";
+ init.ops = &pcf85063_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ pcf85063->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(pcf85063->rtc->dev.of_node,
+ "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(pcf85063->rtc->dev.of_node,
+ of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
static const struct pcf85063_config pcf85063a_config = {
.regmap = {
.reg_bits = 8,
@@ -457,6 +609,11 @@ static int pcf85063_probe(struct i2c_client *client)
nvmem_cfg.priv = pcf85063->regmap;
rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg);
+#ifdef CONFIG_COMMON_CLK
+ /* register clk in common clk framework */
+ pcf85063_clkout_register_clk(pcf85063);
+#endif
+
return rtc_register_device(pcf85063->rtc);
}
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index d4a5f8afafbc..ebe03eba8f5f 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -36,32 +36,24 @@ static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(rtc->base + RTC_MR), &alrm->time);
+ rtc_time64_to_tm(readl(rtc->base + RTC_MR), &alrm->time);
return 0;
}
static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- /*
- * At the moment, we can only deal with non-wildcarded alarm times.
- */
- ret = rtc_valid_tm(&alrm->time);
- if (ret == 0)
- ret = rtc_tm_to_time(&alrm->time, &time);
- if (ret == 0)
- writel(time, rtc->base + RTC_MR);
- return ret;
+ writel(rtc_tm_to_time64(&alrm->time), rtc->base + RTC_MR);
+
+ return 0;
}
static int pl030_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(rtc->base + RTC_DR), tm);
+ rtc_time64_to_tm(readl(rtc->base + RTC_DR), tm);
return 0;
}
@@ -77,14 +69,10 @@ static int pl030_read_time(struct device *dev, struct rtc_time *tm)
static int pl030_set_time(struct device *dev, struct rtc_time *tm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- ret = rtc_tm_to_time(tm, &time);
- if (ret == 0)
- writel(time + 1, rtc->base + RTC_LR);
+ writel(rtc_tm_to_time64(tm) + 1, rtc->base + RTC_LR);
- return ret;
+ return 0;
}
static const struct rtc_class_ops pl030_ops = {
@@ -116,6 +104,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
}
rtc->rtc->ops = &pl030_ops;
+ rtc->rtc->range_max = U32_MAX;
rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!rtc->base) {
ret = -ENOMEM;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 180caebbd355..40d7450a1ce4 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -80,6 +80,8 @@ struct pl031_vendor_data {
bool clockwatch;
bool st_weekday;
unsigned long irqflags;
+ time64_t range_min;
+ timeu64_t range_max;
};
struct pl031_local {
@@ -123,11 +125,9 @@ static int pl031_stv2_tm_to_time(struct device *dev,
return -EINVAL;
} else if (wday == -1) {
/* wday is not provided, calculate it here */
- unsigned long time;
struct rtc_time calc_tm;
- rtc_tm_to_time(tm, &time);
- rtc_time_to_tm(time, &calc_tm);
+ rtc_time64_to_tm(rtc_tm_to_time64(tm), &calc_tm);
wday = calc_tm.tm_wday;
}
@@ -210,17 +210,13 @@ static int pl031_stv2_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
unsigned long bcd_year;
int ret;
- /* At the moment, we can only deal with non-wildcarded alarm times. */
- ret = rtc_valid_tm(&alarm->time);
+ ret = pl031_stv2_tm_to_time(dev, &alarm->time,
+ &time, &bcd_year);
if (ret == 0) {
- ret = pl031_stv2_tm_to_time(dev, &alarm->time,
- &time, &bcd_year);
- if (ret == 0) {
- writel(bcd_year, ldata->base + RTC_YMR);
- writel(time, ldata->base + RTC_MR);
+ writel(bcd_year, ldata->base + RTC_YMR);
+ writel(time, ldata->base + RTC_MR);
- pl031_alarm_irq_enable(dev, alarm->enabled);
- }
+ pl031_alarm_irq_enable(dev, alarm->enabled);
}
return ret;
@@ -248,30 +244,25 @@ static int pl031_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(ldata->base + RTC_DR), tm);
+ rtc_time64_to_tm(readl(ldata->base + RTC_DR), tm);
return 0;
}
static int pl031_set_time(struct device *dev, struct rtc_time *tm)
{
- unsigned long time;
struct pl031_local *ldata = dev_get_drvdata(dev);
- int ret;
- ret = rtc_tm_to_time(tm, &time);
+ writel(rtc_tm_to_time64(tm), ldata->base + RTC_LR);
- if (ret == 0)
- writel(time, ldata->base + RTC_LR);
-
- return ret;
+ return 0;
}
static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
+ rtc_time64_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
alarm->pending = readl(ldata->base + RTC_RIS) & RTC_BIT_AI;
alarm->enabled = readl(ldata->base + RTC_IMSC) & RTC_BIT_AI;
@@ -282,20 +273,10 @@ static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- /* At the moment, we can only deal with non-wildcarded alarm times. */
- ret = rtc_valid_tm(&alarm->time);
- if (ret == 0) {
- ret = rtc_tm_to_time(&alarm->time, &time);
- if (ret == 0) {
- writel(time, ldata->base + RTC_MR);
- pl031_alarm_irq_enable(dev, alarm->enabled);
- }
- }
+ writel(rtc_tm_to_time64(&alarm->time), ldata->base + RTC_MR);
- return ret;
+ return 0;
}
static int pl031_remove(struct amba_device *adev)
@@ -383,6 +364,8 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(ldata->rtc);
ldata->rtc->ops = ops;
+ ldata->rtc->range_min = vendor->range_min;
+ ldata->rtc->range_max = vendor->range_max;
ret = rtc_register_device(ldata->rtc);
if (ret)
@@ -413,6 +396,7 @@ static struct pl031_vendor_data arm_pl031 = {
.set_alarm = pl031_set_alarm,
.alarm_irq_enable = pl031_alarm_irq_enable,
},
+ .range_max = U32_MAX,
};
/* The First ST derivative */
@@ -426,6 +410,7 @@ static struct pl031_vendor_data stv1_pl031 = {
},
.clockwatch = true,
.st_weekday = true,
+ .range_max = U32_MAX,
};
/* And the second ST derivative */
@@ -446,6 +431,8 @@ static struct pl031_vendor_data stv2_pl031 = {
* remove IRQF_COND_SUSPEND
*/
.irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
+ .range_min = RTC_TIMESTAMP_BEGIN_0000,
+ .range_max = RTC_TIMESTAMP_END_9999,
};
static const struct amba_id pl031_ids[] = {
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 07ea1be3abb9..b45ee2cb2c04 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -84,7 +84,7 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
if (!rtc_dd->allow_set_time)
return -EACCES;
- rtc_tm_to_time(tm, &secs);
+ secs = rtc_tm_to_time64(tm);
dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
@@ -208,7 +208,7 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
secs = value[0] | (value[1] << 8) | (value[2] << 16) |
((unsigned long)value[3] << 24);
- rtc_time_to_tm(secs, tm);
+ rtc_time64_to_tm(secs, tm);
dev_dbg(dev, "secs = %lu, h:m:s == %ptRt, y-m-d = %ptRdr\n", secs, tm, tm);
@@ -224,7 +224,7 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
- rtc_tm_to_time(&alarm->time, &secs);
+ secs = rtc_tm_to_time64(&alarm->time);
for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
value[i] = secs & 0xFF;
@@ -280,13 +280,7 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
secs = value[0] | (value[1] << 8) | (value[2] << 16) |
((unsigned long)value[3] << 24);
- rtc_time_to_tm(secs, &alarm->time);
-
- rc = rtc_valid_tm(&alarm->time);
- if (rc < 0) {
- dev_err(dev, "Invalid alarm time read from RTC\n");
- return rc;
- }
+ rtc_time64_to_tm(secs, &alarm->time);
dev_dbg(dev, "Alarm set for - h:m:s=%ptRt, y-m-d=%ptRdr\n",
&alarm->time, &alarm->time);
@@ -301,6 +295,7 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
unsigned int ctrl_reg;
+ u8 value[NUM_8_BIT_RTC_REGS] = {0};
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
@@ -319,6 +314,16 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
goto rtc_rw_fail;
}
+ /* Clear Alarm register */
+ if (!enable) {
+ rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
+ sizeof(value));
+ if (rc) {
+ dev_err(dev, "Clear RTC ALARM register failed\n");
+ goto rtc_rw_fail;
+ }
+ }
+
rtc_rw_fail:
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
return rc;
@@ -486,13 +491,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
/* Register the RTC device */
- rtc_dd->rtc = devm_rtc_device_register(&pdev->dev, "pm8xxx_rtc",
- &pm8xxx_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc_dd->rtc)) {
- dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n",
- __func__, PTR_ERR(rtc_dd->rtc));
+ rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc_dd->rtc))
return PTR_ERR(rtc_dd->rtc);
- }
+
+ rtc_dd->rtc->ops = &pm8xxx_rtc_ops;
+ rtc_dd->rtc->range_max = U32_MAX;
/* Request the alarm IRQ */
rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->rtc_alarm_irq,
@@ -504,9 +508,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
return rc;
}
- dev_dbg(&pdev->dev, "Probe success !!\n");
-
- return 0;
+ return rtc_register_device(rtc_dd->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 89ff713163dd..954b88d2485f 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -85,7 +85,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
/* Time read/write */
static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
- rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
+ rtc_time64_to_tm(readl(RTC_RCNR), rtc_tm);
dev_dbg(dev, "read time %ptRr\n", rtc_tm);
@@ -94,12 +94,9 @@ static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm)
{
- unsigned long rtc_count = 0;
-
dev_dbg(dev, "set time %ptRr\n", tm);
- rtc_tm_to_time(tm, &rtc_count);
- writel(rtc_count, RTC_RCNR);
+ writel(rtc_tm_to_time64(tm), RTC_RCNR);
return 0;
}
@@ -108,7 +105,7 @@ static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *alm_tm = &alrm->time;
- rtc_time_to_tm(readl(RTC_RTAR), alm_tm);
+ rtc_time64_to_tm(readl(RTC_RTAR), alm_tm);
alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
@@ -120,12 +117,10 @@ static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *tm = &alrm->time;
- unsigned long rtcalarm_count = 0;
dev_dbg(dev, "set alarm: %d, %ptRr\n", alrm->enabled, tm);
- rtc_tm_to_time(tm, &rtcalarm_count);
- writel(rtcalarm_count, RTC_RTAR);
+ writel(rtc_tm_to_time64(tm), RTC_RTAR);
puv3_rtc_setaie(dev, alrm->enabled);
@@ -234,6 +229,7 @@ static int puv3_rtc_probe(struct platform_device *pdev)
/* register RTC and exit */
rtc->ops = &puv3_rtcops;
+ rtc->range_max = U32_MAX;
ret = rtc_register_device(rtc);
if (ret)
goto err_nortc;
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
new file mode 100644
index 000000000000..24e386ecbc7e
--- /dev/null
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/rtc/rtc-rc5t619.c
+ *
+ * Real time clock driver for RICOH RC5T619 power management chip.
+ *
+ * Copyright (C) 2019 Andreas Kemnade
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mfd/rn5t618.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+
+struct rc5t619_rtc {
+ int irq;
+ struct rtc_device *rtc;
+ struct rn5t618 *rn5t618;
+};
+
+#define CTRL1_ALARM_ENABLED 0x40
+#define CTRL1_24HR 0x20
+#define CTRL1_PERIODIC_MASK 0xf
+
+#define CTRL2_PON 0x10
+#define CTRL2_ALARM_STATUS 0x80
+#define CTRL2_CTFG 0x4
+#define CTRL2_CTC 0x1
+
+#define MONTH_CENTFLAG 0x80
+#define HOUR_PMFLAG 0x20
+#define MDAY_DAL_EXT 0x80
+
+static uint8_t rtc5t619_12hour_bcd2bin(uint8_t hour)
+{
+ if (hour & HOUR_PMFLAG) {
+ hour = bcd2bin(hour & ~HOUR_PMFLAG);
+ return hour == 12 ? 12 : 12 + hour;
+ }
+
+ hour = bcd2bin(hour);
+ return hour == 12 ? 0 : hour;
+}
+
+static uint8_t rtc5t619_12hour_bin2bcd(uint8_t hour)
+{
+ if (!hour)
+ return 0x12;
+
+ if (hour < 12)
+ return bin2bcd(hour);
+
+ if (hour == 12)
+ return 0x12 | HOUR_PMFLAG;
+
+ return bin2bcd(hour - 12) | HOUR_PMFLAG;
+}
+
+static int rc5t619_rtc_periodic_disable(struct device *dev)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ int err;
+
+ /* disable function */
+ err = regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL1, CTRL1_PERIODIC_MASK, 0);
+ if (err < 0)
+ return err;
+
+ /* clear alarm flag and CTFG */
+ err = regmap_update_bits(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2,
+ CTRL2_ALARM_STATUS | CTRL2_CTFG | CTRL2_CTC,
+ 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/* things to be done once after power on */
+static int rc5t619_rtc_pon_setup(struct device *dev)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ int err;
+ unsigned int reg_data;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &reg_data);
+ if (err < 0)
+ return err;
+
+ /* clear VDET PON */
+ reg_data &= ~(CTRL2_PON | CTRL2_CTC | 0x4a); /* 0101-1011 */
+ reg_data |= 0x20; /* 0010-0000 */
+ err = regmap_write(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, reg_data);
+ if (err < 0)
+ return err;
+
+ /* clearing RTC Adjust register */
+ err = regmap_write(rtc->rn5t618->regmap, RN5T618_RTC_ADJUST, 0);
+ if (err)
+ return err;
+
+ return regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL1,
+ CTRL1_24HR, CTRL1_24HR);
+}
+
+static int rc5t619_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[7];
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+ unsigned int ctrl2;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2);
+ if (err < 0)
+ return err;
+
+ if (ctrl2 & CTRL2_PON)
+ return -EINVAL;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err < 0)
+ return err;
+
+ err = regmap_bulk_read(rtc->rn5t618->regmap, RN5T618_RTC_SECONDS,
+ buff, sizeof(buff));
+ if (err < 0)
+ return err;
+
+ if (buff[5] & MONTH_CENTFLAG)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ tm->tm_sec = bcd2bin(buff[0]);
+ tm->tm_min = bcd2bin(buff[1]);
+
+ if (ctrl1 & CTRL1_24HR)
+ tm->tm_hour = bcd2bin(buff[2]);
+ else
+ tm->tm_hour = rtc5t619_12hour_bcd2bin(buff[2]);
+
+ tm->tm_wday = bcd2bin(buff[3]);
+ tm->tm_mday = bcd2bin(buff[4]);
+ tm->tm_mon = bcd2bin(buff[5] & 0x1f) - 1; /* back to system 0-11 */
+ tm->tm_year = bcd2bin(buff[6]) + 100 * cent_flag;
+
+ return 0;
+}
+
+static int rc5t619_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[7];
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+ unsigned int ctrl2;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2);
+ if (err < 0)
+ return err;
+
+ if (ctrl2 & CTRL2_PON)
+ rc5t619_rtc_pon_setup(dev);
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err < 0)
+ return err;
+
+ if (tm->tm_year >= 100)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ buff[0] = bin2bcd(tm->tm_sec);
+ buff[1] = bin2bcd(tm->tm_min);
+
+ if (ctrl1 & CTRL1_24HR)
+ buff[2] = bin2bcd(tm->tm_hour);
+ else
+ buff[2] = rtc5t619_12hour_bin2bcd(tm->tm_hour);
+
+ buff[3] = bin2bcd(tm->tm_wday);
+ buff[4] = bin2bcd(tm->tm_mday);
+ buff[5] = bin2bcd(tm->tm_mon + 1); /* system set 0-11 */
+ buff[6] = bin2bcd(tm->tm_year - cent_flag * 100);
+
+ if (cent_flag)
+ buff[5] |= MONTH_CENTFLAG;
+
+ err = regmap_bulk_write(rtc->rn5t618->regmap, RN5T618_RTC_SECONDS,
+ buff, sizeof(buff));
+ if (err < 0) {
+ dev_err(dev, "failed to program new time: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/* 0-disable, 1-enable */
+static int rc5t619_rtc_alarm_enable(struct device *dev, unsigned int enabled)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+
+ return regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL1,
+ CTRL1_ALARM_ENABLED,
+ enabled ? CTRL1_ALARM_ENABLED : 0);
+}
+
+static int rc5t619_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[6];
+ unsigned int buff_cent;
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err)
+ return err;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_MONTH, &buff_cent);
+ if (err < 0) {
+ dev_err(dev, "failed to read time: %d\n", err);
+ return err;
+ }
+
+ if (buff_cent & MONTH_CENTFLAG)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ err = regmap_bulk_read(rtc->rn5t618->regmap, RN5T618_RTC_ALARM_Y_SEC,
+ buff, sizeof(buff));
+ if (err)
+ return err;
+
+ buff[3] = buff[3] & 0x3f;
+
+ alrm->time.tm_sec = bcd2bin(buff[0]);
+ alrm->time.tm_min = bcd2bin(buff[1]);
+
+ if (ctrl1 & CTRL1_24HR)
+ alrm->time.tm_hour = bcd2bin(buff[2]);
+ else
+ alrm->time.tm_hour = rtc5t619_12hour_bcd2bin(buff[2]);
+
+ alrm->time.tm_mday = bcd2bin(buff[3]);
+ alrm->time.tm_mon = bcd2bin(buff[4]) - 1;
+ alrm->time.tm_year = bcd2bin(buff[5]) + 100 * cent_flag;
+ alrm->enabled = !!(ctrl1 & CTRL1_ALARM_ENABLED);
+ dev_dbg(dev, "read alarm: %ptR\n", &alrm->time);
+
+ return 0;
+}
+
+static int rc5t619_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[6];
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err)
+ return err;
+
+ err = rc5t619_rtc_alarm_enable(dev, 0);
+ if (err < 0)
+ return err;
+
+ if (rtc->irq == -1)
+ return -EINVAL;
+
+ if (alrm->enabled == 0)
+ return 0;
+
+ if (alrm->time.tm_year >= 100)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ alrm->time.tm_mon += 1;
+ buff[0] = bin2bcd(alrm->time.tm_sec);
+ buff[1] = bin2bcd(alrm->time.tm_min);
+
+ if (ctrl1 & CTRL1_24HR)
+ buff[2] = bin2bcd(alrm->time.tm_hour);
+ else
+ buff[2] = rtc5t619_12hour_bin2bcd(alrm->time.tm_hour);
+
+ buff[3] = bin2bcd(alrm->time.tm_mday);
+ buff[4] = bin2bcd(alrm->time.tm_mon);
+ buff[5] = bin2bcd(alrm->time.tm_year - 100 * cent_flag);
+ buff[3] |= MDAY_DAL_EXT;
+
+ err = regmap_bulk_write(rtc->rn5t618->regmap, RN5T618_RTC_ALARM_Y_SEC,
+ buff, sizeof(buff));
+ if (err < 0)
+ return err;
+
+ return rc5t619_rtc_alarm_enable(dev, alrm->enabled);
+}
+
+static const struct rtc_class_ops rc5t619_rtc_ops = {
+ .read_time = rc5t619_rtc_read_time,
+ .set_time = rc5t619_rtc_set_time,
+ .set_alarm = rc5t619_rtc_set_alarm,
+ .read_alarm = rc5t619_rtc_read_alarm,
+ .alarm_irq_enable = rc5t619_rtc_alarm_enable,
+};
+
+static int rc5t619_rtc_alarm_flag_clr(struct device *dev)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+
+ /* clear alarm-D status bits.*/
+ return regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL2,
+ CTRL2_ALARM_STATUS | CTRL2_CTC, 0);
+}
+
+static irqreturn_t rc5t619_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+
+ rc5t619_rtc_alarm_flag_clr(dev);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int rc5t619_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
+ struct rc5t619_rtc *rtc;
+ unsigned int ctrl2;
+ int err;
+
+ rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
+ if (IS_ERR(rtc)) {
+ err = PTR_ERR(rtc);
+ return -ENOMEM;
+ }
+
+ rtc->rn5t618 = rn5t618;
+
+ dev_set_drvdata(dev, rtc);
+ rtc->irq = -1;
+
+ if (rn5t618->irq_data)
+ rtc->irq = regmap_irq_get_virq(rn5t618->irq_data,
+ RN5T618_IRQ_RTC);
+
+ if (rtc->irq < 0)
+ rtc->irq = -1;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2);
+ if (err < 0)
+ return err;
+
+ /* disable rtc periodic function */
+ err = rc5t619_rtc_periodic_disable(&pdev->dev);
+ if (err)
+ return err;
+
+ if (ctrl2 & CTRL2_PON) {
+ err = rc5t619_rtc_alarm_flag_clr(&pdev->dev);
+ if (err)
+ return err;
+ }
+
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ dev_err(dev, "RTC device register: err %d\n", err);
+ return err;
+ }
+
+ rtc->rtc->ops = &rc5t619_rtc_ops;
+ rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ /* set interrupt and enable it */
+ if (rtc->irq != -1) {
+ err = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ rc5t619_rtc_irq,
+ IRQF_ONESHOT,
+ "rtc-rc5t619",
+ &pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request IRQ:%d fail\n", rtc->irq);
+ rtc->irq = -1;
+
+ err = rc5t619_rtc_alarm_enable(&pdev->dev, 0);
+ if (err)
+ return err;
+
+ } else {
+ /* enable wake */
+ device_init_wakeup(&pdev->dev, 1);
+ enable_irq_wake(rtc->irq);
+ }
+ } else {
+ /* system don't want to using alarm interrupt, so close it */
+ err = rc5t619_rtc_alarm_enable(&pdev->dev, 0);
+ if (err)
+ return err;
+
+ dev_warn(&pdev->dev, "rc5t619 interrupt is disabled\n");
+ }
+
+ return rtc_register_device(rtc->rtc);
+}
+
+static struct platform_driver rc5t619_rtc_driver = {
+ .driver = {
+ .name = "rc5t619-rtc",
+ },
+ .probe = rc5t619_rtc_probe,
+};
+
+module_platform_driver(rc5t619_rtc_driver);
+MODULE_ALIAS("platform:rc5t619-rtc");
+MODULE_DESCRIPTION("RICOH RC5T619 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index d37893f6eaee..9ccc97cf5e09 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -111,20 +111,17 @@ static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
- rtc_time_to_tm(readl_relaxed(info->rcnr), tm);
+ rtc_time64_to_tm(readl_relaxed(info->rcnr), tm);
return 0;
}
static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- ret = rtc_tm_to_time(tm, &time);
- if (ret == 0)
- writel_relaxed(time, info->rcnr);
- return ret;
+ writel_relaxed(rtc_tm_to_time64(tm), info->rcnr);
+
+ return 0;
}
static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -141,24 +138,18 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
spin_lock_irq(&info->lock);
- ret = rtc_tm_to_time(&alrm->time, &time);
- if (ret != 0)
- goto out;
writel_relaxed(readl_relaxed(info->rtsr) &
(RTSR_HZE | RTSR_ALE | RTSR_AL), info->rtsr);
- writel_relaxed(time, info->rtar);
+ writel_relaxed(rtc_tm_to_time64(&alrm->time), info->rtar);
if (alrm->enabled)
writel_relaxed(readl_relaxed(info->rtsr) | RTSR_ALE, info->rtsr);
else
writel_relaxed(readl_relaxed(info->rtsr) & ~RTSR_ALE, info->rtsr);
-out:
spin_unlock_irq(&info->lock);
- return ret;
+ return 0;
}
static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -182,7 +173,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
{
- struct rtc_device *rtc;
int ret;
spin_lock_init(&info->lock);
@@ -211,15 +201,15 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
writel_relaxed(0, info->rcnr);
}
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(rtc)) {
+ info->rtc->ops = &sa1100_rtc_ops;
+ info->rtc->max_user_freq = RTC_FREQ;
+ info->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(info->rtc);
+ if (ret) {
clk_disable_unprepare(info->clk);
- return PTR_ERR(rtc);
+ return ret;
}
- info->rtc = rtc;
-
- rtc->max_user_freq = RTC_FREQ;
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
@@ -267,6 +257,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
info->irq_1hz = irq_1hz;
info->irq_alarm = irq_alarm;
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc))
+ return PTR_ERR(info->rtc);
+
ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0,
"rtc 1Hz", &pdev->dev);
if (ret) {
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index feb1f8e52c00..9167b48014a1 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -504,8 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc->res))
return -EBUSY;
- rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start,
- rtc->regsize);
+ rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, rtc->regsize);
if (unlikely(!rtc->regbase))
return -EINVAL;
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index a2c9c55667cd..abf19435dbad 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -90,13 +90,13 @@ static int sirfsoc_rtc_read_alarm(struct device *dev,
*/
/* if alarm is in next overflow cycle */
if (rtc_count > rtc_alarm)
- rtc_time_to_tm((rtcdrv->overflow_rtc + 1)
- << (BITS_PER_LONG - RTC_SHIFT)
- | rtc_alarm >> RTC_SHIFT, &(alrm->time));
+ rtc_time64_to_tm((rtcdrv->overflow_rtc + 1)
+ << (BITS_PER_LONG - RTC_SHIFT)
+ | rtc_alarm >> RTC_SHIFT, &alrm->time);
else
- rtc_time_to_tm(rtcdrv->overflow_rtc
- << (BITS_PER_LONG - RTC_SHIFT)
- | rtc_alarm >> RTC_SHIFT, &(alrm->time));
+ rtc_time64_to_tm(rtcdrv->overflow_rtc
+ << (BITS_PER_LONG - RTC_SHIFT)
+ | rtc_alarm >> RTC_SHIFT, &alrm->time);
if (sirfsoc_rtc_readl(rtcdrv, RTC_STATUS) & SIRFSOC_RTC_AL0E)
alrm->enabled = 1;
@@ -113,7 +113,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
rtcdrv = dev_get_drvdata(dev);
if (alrm->enabled) {
- rtc_tm_to_time(&(alrm->time), &rtc_alarm);
+ rtc_alarm = rtc_tm_to_time64(&alrm->time);
spin_lock_irq(&rtcdrv->lock);
@@ -181,8 +181,8 @@ static int sirfsoc_rtc_read_time(struct device *dev,
cpu_relax();
} while (tmp_rtc != sirfsoc_rtc_readl(rtcdrv, RTC_CN));
- rtc_time_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT) |
- tmp_rtc >> RTC_SHIFT, tm);
+ rtc_time64_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT)
+ | tmp_rtc >> RTC_SHIFT, tm);
return 0;
}
@@ -193,7 +193,7 @@ static int sirfsoc_rtc_set_time(struct device *dev,
struct sirfsoc_rtc_drv *rtcdrv;
rtcdrv = dev_get_drvdata(dev);
- rtc_tm_to_time(tm, &rtc_time);
+ rtc_time = rtc_tm_to_time64(tm);
rtcdrv->overflow_rtc = rtc_time >> (BITS_PER_LONG - RTC_SHIFT);
@@ -341,28 +341,22 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
rtcdrv->overflow_rtc =
sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
- rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &sirfsoc_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtcdrv->rtc)) {
- err = PTR_ERR(rtcdrv->rtc);
- dev_err(&pdev->dev, "can't register RTC device\n");
- return err;
- }
+ rtcdrv->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdrv->rtc))
+ return PTR_ERR(rtcdrv->rtc);
+
+ rtcdrv->rtc->ops = &sirfsoc_rtc_ops;
+ rtcdrv->rtc->range_max = (1ULL << 60) - 1;
rtcdrv->irq = platform_get_irq(pdev, 0);
- err = devm_request_irq(
- &pdev->dev,
- rtcdrv->irq,
- sirfsoc_rtc_irq_handler,
- IRQF_SHARED,
- pdev->name,
- rtcdrv);
+ err = devm_request_irq(&pdev->dev, rtcdrv->irq, sirfsoc_rtc_irq_handler,
+ IRQF_SHARED, pdev->name, rtcdrv);
if (err) {
dev_err(&pdev->dev, "Unable to register for the SiRF SOC RTC IRQ\n");
return err;
}
- return 0;
+ return rtc_register_device(rtcdrv->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 757f4daa7181..35ee08aa7584 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/rtc.h>
@@ -264,6 +263,12 @@ static const struct regmap_config snvs_rtc_config = {
.reg_stride = 4,
};
+static void snvs_rtc_action(void *data)
+{
+ if (data)
+ clk_disable_unprepare(data);
+}
+
static int snvs_rtc_probe(struct platform_device *pdev)
{
struct snvs_rtc_data *data;
@@ -314,6 +319,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
}
}
+ ret = devm_add_action_or_reset(&pdev->dev, snvs_rtc_action, data->clk);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, data);
/* Initialize glitch detect */
@@ -326,7 +335,7 @@ static int snvs_rtc_probe(struct platform_device *pdev)
ret = snvs_rtc_enable(data, true);
if (ret) {
dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
- goto error_rtc_device_register;
+ return ret;
}
device_init_wakeup(&pdev->dev, true);
@@ -339,24 +348,13 @@ static int snvs_rtc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request irq %d: %d\n",
data->irq, ret);
- goto error_rtc_device_register;
+ return ret;
}
data->rtc->ops = &snvs_rtc_ops;
data->rtc->range_max = U32_MAX;
- ret = rtc_register_device(data->rtc);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
- goto error_rtc_device_register;
- }
-
- return 0;
-error_rtc_device_register:
- if (data->clk)
- clk_disable_unprepare(data->clk);
-
- return ret;
+ return rtc_register_device(data->rtc);
}
static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev)
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index a7d49329d626..37a26279e107 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -27,7 +27,7 @@ static u32 starfire_get_time(void)
static int starfire_read_time(struct device *dev, struct rtc_time *tm)
{
- rtc_time_to_tm(starfire_get_time(), tm);
+ rtc_time64_to_tm(starfire_get_time(), tm);
return 0;
}
@@ -39,14 +39,16 @@ static int __init starfire_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- rtc = devm_rtc_device_register(&pdev->dev, "starfire",
- &starfire_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &starfire_rtc_ops;
+ rtc->range_max = U32_MAX;
+
platform_set_drvdata(pdev, rtc);
- return 0;
+ return rtc_register_device(rtc);
}
static struct platform_driver starfire_rtc_driver = {
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 852f5f3b3592..e2b8b150bcb4 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -108,7 +108,6 @@
* driver, even though it is somewhat limited.
*/
#define SUN6I_YEAR_MIN 1970
-#define SUN6I_YEAR_MAX 2033
#define SUN6I_YEAR_OFF (SUN6I_YEAR_MIN - 1900)
/*
@@ -250,19 +249,17 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
}
- /* Switch to the external, more precise, oscillator */
- reg |= SUN6I_LOSC_CTRL_EXT_OSC;
- if (rtc->data->has_losc_en)
- reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN;
+ /* Switch to the external, more precise, oscillator, if present */
+ if (of_get_property(node, "clocks", NULL)) {
+ reg |= SUN6I_LOSC_CTRL_EXT_OSC;
+ if (rtc->data->has_losc_en)
+ reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN;
+ }
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
/* Yes, I know, this is ugly. */
sun6i_rtc = rtc;
- /* Deal with old DTs */
- if (!of_get_property(node, "clocks", NULL))
- goto err;
-
/* Only read IOSC name from device tree if it is exported */
if (rtc->data->export_iosc)
of_property_read_string_index(node, "clock-output-names", 2,
@@ -279,11 +276,13 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
}
parents[0] = clk_hw_get_name(rtc->int_osc);
+ /* If there is no external oscillator, this will be NULL and ... */
parents[1] = of_clk_get_parent_name(node, 0);
rtc->hw.init = &init;
init.parent_names = parents;
+ /* ... number of clock parents will be 1. */
init.num_parents = of_clk_get_parent_count(node) + 1;
of_property_read_string_index(node, "clock-output-names", 0,
&init.name);
@@ -499,7 +498,7 @@ static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN);
wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN);
- rtc_time_to_tm(chip->alarm, &wkalrm->time);
+ rtc_time64_to_tm(chip->alarm, &wkalrm->time);
return 0;
}
@@ -520,8 +519,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
return -EINVAL;
}
- rtc_tm_to_time(alrm_tm, &time_set);
- rtc_tm_to_time(&tm_now, &time_now);
+ time_set = rtc_tm_to_time64(alrm_tm);
+ time_now = rtc_tm_to_time64(&tm_now);
if (time_set <= time_now) {
dev_err(dev, "Date to set in the past\n");
return -EINVAL;
@@ -569,14 +568,6 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
u32 date = 0;
u32 time = 0;
- int year;
-
- year = rtc_tm->tm_year + 1900;
- if (year < SUN6I_YEAR_MIN || year > SUN6I_YEAR_MAX) {
- dev_err(dev, "rtc only supports year in range %d - %d\n",
- SUN6I_YEAR_MIN, SUN6I_YEAR_MAX);
- return -EINVAL;
- }
rtc_tm->tm_year -= SUN6I_YEAR_OFF;
rtc_tm->tm_mon += 1;
@@ -585,7 +576,7 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
- if (is_leap_year(year))
+ if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
date |= SUN6I_LEAP_SET_VALUE(1);
time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
@@ -726,12 +717,16 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-sun6i",
- &sun6i_rtc_ops, THIS_MODULE);
- if (IS_ERR(chip->rtc)) {
- dev_err(&pdev->dev, "unable to register device\n");
+ chip->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
- }
+
+ chip->rtc->ops = &sun6i_rtc_ops;
+ chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
+
+ ret = rtc_register_device(chip->rtc);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "RTC enabled\n");
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 5786866c09e9..4b1077e2f826 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -38,6 +38,8 @@
#define RTC_CALIB_DEF 0x198233
#define RTC_CALIB_MASK 0x1FFFFF
+#define RTC_ALRM_MASK BIT(1)
+#define RTC_MSEC 1000
struct xlnx_rtc_dev {
struct rtc_device *rtc;
@@ -123,11 +125,28 @@ static int xlnx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int xlnx_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
{
struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned int status;
+ ulong timeout;
+
+ timeout = jiffies + msecs_to_jiffies(RTC_MSEC);
+
+ if (enabled) {
+ while (1) {
+ status = readl(xrtcdev->reg_base + RTC_INT_STS);
+ if (!((status & RTC_ALRM_MASK) == RTC_ALRM_MASK))
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(dev, "Time out occur, while clearing alarm status bit\n");
+ return -ETIMEDOUT;
+ }
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS);
+ }
- if (enabled)
writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN);
- else
+ } else {
writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
+ }
return 0;
}
@@ -183,8 +202,8 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
if (!(status & (RTC_INT_SEC | RTC_INT_ALRM)))
return IRQ_NONE;
- /* Clear RTC_INT_ALRM interrupt only */
- writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS);
+ /* Disable RTC_INT_ALRM interrupt only */
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
if (status & RTC_INT_ALRM)
rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_AF);
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index b7ca7d79fb28..950fac0d41ff 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -279,7 +279,7 @@ static bool rtc_does_wakealarm(struct rtc_device *rtc)
static umode_t rtc_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct rtc_device *rtc = to_rtc_device(dev);
umode_t mode = attr->mode;
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index a8682f69effc..376f1efbbb86 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -26,7 +26,6 @@ config DASD
def_tristate y
prompt "Support for DASD devices"
depends on CCW && BLOCK
- select IOSCHED_DEADLINE
help
Enable this option if you want to access DASDs directly utilizing
S/390s channel subsystem commands. This is necessary for running
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index ee73b0607e47..4691a3c35d72 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1987,7 +1987,7 @@ dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
* DASD_3990_ERP_COMPOUND_CONFIG
*
* DESCRIPTION
- * Handles the compound ERP action for configruation
+ * Handles the compound ERP action for configuration
* dependent error.
* Note: duplex handling is not implemented (yet).
*
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 8d4971645cf1..facb588d09e4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -58,7 +58,7 @@ struct dasd_diag_private {
struct dasd_diag_req {
unsigned int block_count;
- struct dasd_diag_bio bio[0];
+ struct dasd_diag_bio bio[];
};
static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 6943508d0f1d..ca24a78a256e 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -220,7 +220,7 @@ struct LRE_eckd_data {
__u8 imbedded_count;
__u8 extended_operation;
__u16 extended_parameter_length;
- __u8 extended_parameter[0];
+ __u8 extended_parameter[];
} __attribute__ ((packed));
/* Prefix data for format 0x00 and 0x01 */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 63502ca537eb..384edffe5cb4 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -57,11 +57,26 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
return copy_to_iter(addr, bytes, i);
}
+static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
+ pgoff_t pgoff, size_t nr_pages)
+{
+ long rc;
+ void *kaddr;
+
+ rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
+ if (rc < 0)
+ return rc;
+ memset(kaddr, 0, nr_pages << PAGE_SHIFT);
+ dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
+ return 0;
+}
+
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
.dax_supported = generic_fsdax_supported,
.copy_from_iter = dcssblk_dax_copy_from_iter,
.copy_to_iter = dcssblk_dax_copy_to_iter,
+ .zero_page_range = dcssblk_dax_zero_page_range,
};
struct dcssblk_dev_info {
@@ -636,10 +651,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->fops = &dcssblk_devops;
- dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
+ dev_info->dcssblk_queue =
+ blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
- blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
@@ -680,8 +695,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
&dcssblk_dax_ops, DAXDEV_F_SYNC);
- if (!dev_info->dax_dev) {
- rc = -ENOMEM;
+ if (IS_ERR(dev_info->dax_dev)) {
+ rc = PTR_ERR(dev_info->dax_dev);
+ dev_info->dax_dev = NULL;
goto put_dev;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 3df5d68d09f0..45a04daec89e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -343,14 +343,14 @@ static int __init xpram_setup_blkdev(void)
xpram_disks[i] = alloc_disk(1);
if (!xpram_disks[i])
goto out;
- xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
+ xpram_queues[i] = blk_alloc_queue(xpram_make_request,
+ NUMA_NO_NODE);
if (!xpram_queues[i]) {
put_disk(xpram_disks[i]);
goto out;
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
- blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096);
}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index e7cf0a1d4f71..92757f9bd010 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -398,7 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
}
if (dstat == 0x08)
break;
- /* else, fall through */
+ fallthrough;
case 0x04:
/* Device end interrupt. */
if ((raw = req->info) == NULL)
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 0e70397d6e04..37ee8f698c3b 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -137,7 +137,7 @@ static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
while ((*cmd != '\0') && !iscntrl(*cmd))
++cmd;
ftp->fname = start;
- /* fall through */
+ fallthrough;
default:
*cmd = '\0';
break;
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 3afaa35f7351..08f36e973b43 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -211,7 +211,7 @@ struct string
struct list_head update;
unsigned long size;
unsigned long len;
- char string[0];
+ char string[];
} __attribute__ ((aligned(8)));
static inline struct string *
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 37d42de06079..a864b21af602 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -406,7 +406,7 @@ static void __init add_memory_merged(u16 rn)
if (!size)
goto skip_add;
for (addr = start; addr < start + size; addr += block_size)
- add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size);
+ add_memory(0, addr, block_size);
skip_add:
first_rn = rn;
num = 1;
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index 995e9196852e..a3e5a5fb0c1e 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -39,7 +39,7 @@ struct err_notify_evbuf {
u8 atype;
u32 fh;
u32 fid;
- u8 data[0];
+ u8 data[];
} __packed;
struct err_notify_sccb {
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 13f97fd73aca..644b61013679 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -214,7 +214,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
break;
case SDIAS_EVSTATE_NO_DATA:
TRACE("no data\n");
- /* fall through */
+ fallthrough;
default:
pr_err("Error from SCLP while copying hsa. Event status = %x\n",
sdias_evbuf.event_status);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 3e0b2f63a9d2..380e6a67719c 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -677,7 +677,7 @@ tape_generic_remove(struct ccw_device *cdev)
switch (device->tape_state) {
case TS_INIT:
tape_state_set(device, TS_NOT_OPER);
- /* fallthrough */
+ fallthrough;
case TS_NOT_OPER:
/*
* Nothing to do.
@@ -950,7 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
break;
if (device->tape_state == TS_UNUSED)
break;
- /* fallthrough */
+ fallthrough;
default:
if (device->tape_state == TS_BLKUSE)
break;
@@ -1118,7 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case -ETIMEDOUT:
DBF_LH(1, "(%08x): Request timed out\n",
device->cdev_id);
- /* fallthrough */
+ fallthrough;
case -EIO:
__tape_end_request(device, request, -EIO);
break;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 427b2e24a8ce..cb466ed7eb5e 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -105,16 +105,12 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-static struct irqaction airq_interrupt = {
- .name = "AIO",
- .handler = do_airq_interrupt,
-};
-
void __init init_airq_interrupts(void)
{
irq_set_chip_and_handler(THIN_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
- setup_irq(THIN_INTERRUPT, &airq_interrupt);
+ if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL))
+ panic("Failed to register AIO interrupt\n");
}
static inline unsigned long iv_size(unsigned long bits)
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b42a93736668..483a9ecfcbb1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -485,79 +485,10 @@ static void ccwgroup_shutdown(struct device *dev)
gdrv->shutdown(gdev);
}
-static int ccwgroup_pm_prepare(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- /* Fail while device is being set online/offline. */
- if (atomic_read(&gdev->onoff))
- return -EAGAIN;
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->prepare ? gdrv->prepare(gdev) : 0;
-}
-
-static void ccwgroup_pm_complete(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return;
-
- if (gdrv->complete)
- gdrv->complete(gdev);
-}
-
-static int ccwgroup_pm_freeze(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->freeze ? gdrv->freeze(gdev) : 0;
-}
-
-static int ccwgroup_pm_thaw(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->thaw ? gdrv->thaw(gdev) : 0;
-}
-
-static int ccwgroup_pm_restore(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->restore ? gdrv->restore(gdev) : 0;
-}
-
-static const struct dev_pm_ops ccwgroup_pm_ops = {
- .prepare = ccwgroup_pm_prepare,
- .complete = ccwgroup_pm_complete,
- .freeze = ccwgroup_pm_freeze,
- .thaw = ccwgroup_pm_thaw,
- .restore = ccwgroup_pm_restore,
-};
-
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
- .pm = &ccwgroup_pm_ops,
};
bool dev_is_ccwgroup(struct device *dev)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6392a1b95b02..1ca73c2e5a8f 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -180,11 +180,12 @@ EXPORT_SYMBOL_GPL(chsc_ssqd);
* @scssc: request and response block for SADC
* @summary_indicator_addr: summary indicator address
* @subchannel_indicator_addr: subchannel indicator address
+ * @isc: Interruption Subclass for this subchannel
*
* Returns 0 on success.
*/
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
{
memset(scssc, 0, sizeof(*scssc));
scssc->request.length = 0x0fe0;
@@ -196,7 +197,7 @@ int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
scssc->ks = PAGE_DEFAULT_KEY >> 4;
scssc->kc = PAGE_DEFAULT_KEY >> 4;
- scssc->isc = QDIO_AIRQ_ISC;
+ scssc->isc = isc;
scssc->schid = schid;
/* enable the time delay disablement facility */
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index e57d68e325a3..34de6d77442c 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -163,7 +163,8 @@ void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr);
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr,
+ u8 isc);
int chsc_sgib(u32 origin);
int chsc_error_from_response(int response);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 18f5458f90e8..6d716db2a46a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -563,16 +563,12 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-static struct irqaction io_interrupt = {
- .name = "I/O",
- .handler = do_cio_interrupt,
-};
-
void __init init_cio_interrupts(void)
{
irq_set_chip_and_handler(IO_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
- setup_irq(IO_INTERRUPT, &io_interrupt);
+ if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL))
+ panic("Failed to register I/O interrupt\n");
}
#ifdef CONFIG_CCW_CONSOLE
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 0c6245fc7706..b29fe8d50baf 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -849,8 +849,10 @@ static void io_subchannel_register(struct ccw_device *cdev)
* Now we know this subchannel will stay, we can throw
* our delayed uevent.
*/
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
/* make it known to the system */
ret = ccw_device_add(cdev);
if (ret) {
@@ -1058,8 +1060,11 @@ static int io_subchannel_probe(struct subchannel *sch)
* Throw the delayed uevent for the subchannel, register
* the ccw_device and exit.
*/
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ /* should always be the case for the console */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
cdev = sch_get_cdev(sch);
rc = ccw_device_add(cdev);
if (rc) {
@@ -1262,7 +1267,7 @@ static int recovery_check(struct device *dev, void *data)
sch = to_subchannel(cdev->dev.parent);
if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
break;
- /* fall through */
+ fallthrough;
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
@@ -2091,7 +2096,7 @@ static void ccw_device_todo(struct work_struct *work)
case CDEV_TODO_UNREG_EVAL:
if (!sch_is_pseudo_sch(sch))
css_schedule_eval(sch->schid);
- /* fall-through */
+ fallthrough;
case CDEV_TODO_UNREG:
if (sch_is_pseudo_sch(sch))
ccw_device_unregister(cdev);
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 835de44dbbcc..77d0ea7b381b 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -13,7 +13,7 @@
struct idset {
int num_ssid;
int num_id;
- unsigned long bitmap[0];
+ unsigned long bitmap[];
};
static inline unsigned long bitmap_size(int num_ssid, int num_id)
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ff74eb5fce50..b8453b594679 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -177,8 +177,8 @@ struct qdio_queue_perf_stat {
unsigned int nr_sbal_total;
};
-enum qdio_queue_irq_states {
- QDIO_QUEUE_IRQS_DISABLED,
+enum qdio_irq_poll_states {
+ QDIO_IRQ_DISABLED,
};
struct qdio_input_q {
@@ -188,10 +188,6 @@ struct qdio_input_q {
int ack_count;
/* last time of noticing incoming data */
u64 timestamp;
- /* upper-layer polling flag */
- unsigned long queue_irq_state;
- /* callback to start upper-layer polling */
- void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
};
struct qdio_output_q {
@@ -254,7 +250,6 @@ struct qdio_q {
/* upper-layer program handler */
qdio_handler_t (*handler);
- struct dentry *debugfs_q;
struct qdio_irq *irq_ptr;
struct sl *sl;
/*
@@ -270,7 +265,6 @@ struct qdio_irq {
struct ccw_device *cdev;
struct list_head entry; /* list of thinint devices */
struct dentry *debugfs_dev;
- struct dentry *debugfs_perf;
unsigned long int_parm;
struct subchannel_id schid;
@@ -299,6 +293,9 @@ struct qdio_irq {
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
+ unsigned long poll_state;
+
debug_info_t *debug_area;
struct mutex setup_mutex;
struct qdio_dev_perf_stat perf_stat;
@@ -377,7 +374,6 @@ int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
-void clear_nonshared_ind(struct qdio_irq *);
int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
@@ -392,12 +388,9 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid,
struct qdio_ssqd_desc *data);
-int qdio_setup_irq(struct qdio_initialize *init_data);
-void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev);
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
void qdio_release_memory(struct qdio_irq *irq_ptr);
-int qdio_setup_create_sysfs(struct ccw_device *cdev);
-void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
int qdio_enable_async_operation(struct qdio_output_q *q);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 9c0370b27426..286b044fb027 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -58,30 +58,16 @@ static void qdio_clear_dbf_list(void)
mutex_unlock(&qdio_dbf_list_mutex);
}
-int qdio_allocate_dbf(struct qdio_initialize *init_data,
- struct qdio_irq *irq_ptr)
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
{
char text[QDIO_DBF_NAME_LEN];
struct qdio_dbf_entry *new_entry;
- DBF_EVENT("qfmt:%1d", init_data->q_format);
- DBF_HEX(init_data->adapter_name, 8);
- DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
- DBF_HEX(&init_data->qib_param_field, sizeof(void *));
- DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
- DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
- DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
- init_data->no_output_qs);
- DBF_HEX(&init_data->input_handler, sizeof(void *));
- DBF_HEX(&init_data->output_handler, sizeof(void *));
- DBF_HEX(&init_data->int_parm, sizeof(long));
- DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
- DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
/* allocate trace view for the interface */
snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
- dev_name(&init_data->cdev->dev));
+ dev_name(&irq_ptr->cdev->dev));
irq_ptr->debug_area = qdio_get_dbf_entry(text);
if (irq_ptr->debug_area)
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
@@ -128,8 +114,8 @@ static int qstat_show(struct seq_file *m, void *v)
q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
*(u8 *)q->irq_ptr->dsci,
- test_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state));
+ test_bit(QDIO_IRQ_DISABLED,
+ &q->irq_ptr->poll_state));
}
seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
@@ -190,6 +176,23 @@ static int qstat_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(qstat);
+static int ssqd_show(struct seq_file *m, void *v)
+{
+ struct ccw_device *cdev = m->private;
+ struct qdio_ssqd_desc ssqd;
+ int rc;
+
+ rc = qdio_get_ssqd_desc(cdev, &ssqd);
+ if (rc)
+ return rc;
+
+ seq_hex_dump(m, "", DUMP_PREFIX_NONE, 16, 4, &ssqd, sizeof(ssqd),
+ false);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(ssqd);
+
static char *qperf_names[] = {
"Assumed adapter interrupts",
"QDIO interrupts",
@@ -284,53 +287,37 @@ static const struct file_operations debugfs_perf_fops = {
.release = single_release,
};
-static void setup_debugfs_entry(struct qdio_q *q)
+static void setup_debugfs_entry(struct dentry *parent, struct qdio_q *q)
{
char name[QDIO_DEBUGFS_NAME_LEN];
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
- q->debugfs_q = debugfs_create_file(name, 0444,
- q->irq_ptr->debugfs_dev, q, &qstat_fops);
- if (IS_ERR(q->debugfs_q))
- q->debugfs_q = NULL;
+ debugfs_create_file(name, 0444, parent, q, &qstat_fops);
}
-void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
- irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+ irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&irq_ptr->cdev->dev),
debugfs_root);
- if (IS_ERR(irq_ptr->debugfs_dev))
- irq_ptr->debugfs_dev = NULL;
-
- irq_ptr->debugfs_perf = debugfs_create_file("statistics",
- S_IFREG | S_IRUGO | S_IWUSR,
- irq_ptr->debugfs_dev, irq_ptr,
- &debugfs_perf_fops);
- if (IS_ERR(irq_ptr->debugfs_perf))
- irq_ptr->debugfs_perf = NULL;
+ debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr, &debugfs_perf_fops);
+ debugfs_create_file("ssqd", 0444, irq_ptr->debugfs_dev, irq_ptr->cdev,
+ &ssqd_fops);
for_each_input_queue(irq_ptr, q, i)
- setup_debugfs_entry(q);
+ setup_debugfs_entry(irq_ptr->debugfs_dev, q);
for_each_output_queue(irq_ptr, q, i)
- setup_debugfs_entry(q);
+ setup_debugfs_entry(irq_ptr->debugfs_dev, q);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
- int i;
-
- for_each_input_queue(irq_ptr, q, i)
- debugfs_remove(q->debugfs_q);
- for_each_output_queue(irq_ptr, q, i)
- debugfs_remove(q->debugfs_q);
- debugfs_remove(irq_ptr->debugfs_perf);
- debugfs_remove(irq_ptr->debugfs_dev);
+ debugfs_remove_recursive(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
@@ -352,7 +339,7 @@ int __init qdio_debug_init(void)
void qdio_debug_exit(void)
{
qdio_clear_dbf_list();
- debugfs_remove(debugfs_root);
+ debugfs_remove_recursive(debugfs_root);
debug_unregister(qdio_dbf_setup);
debug_unregister(qdio_dbf_error);
}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index f85f5fa7cefc..0dfba085f360 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -64,10 +64,8 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
debug_event(dev->debug_area, level, addr, len);
}
-int qdio_allocate_dbf(struct qdio_initialize *init_data,
- struct qdio_irq *irq_ptr);
-void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev);
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr);
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
int qdio_debug_init(void);
void qdio_debug_exit(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3475317c42e5..bcc3ab14e72d 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -950,19 +950,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- for_each_input_queue(irq_ptr, q, i) {
- if (q->u.in.queue_start_poll) {
- /* skip if polling is enabled or already in work */
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state)) {
- QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
- continue;
- }
- q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
- q->irq_ptr->int_parm);
- } else {
+ if (irq_ptr->irq_poll) {
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
+ irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
+ } else {
+ for_each_input_queue(irq_ptr, q, i)
tasklet_schedule(&q->tasklet);
- }
}
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
@@ -1105,9 +1100,8 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-static void qdio_shutdown_queues(struct ccw_device *cdev)
+static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
int i;
@@ -1155,7 +1149,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
tiqdio_remove_device(irq_ptr);
- qdio_shutdown_queues(cdev);
+ qdio_shutdown_queues(irq_ptr);
qdio_shutdown_debug_entries(irq_ptr);
/* cleanup subchannel */
@@ -1226,26 +1220,21 @@ EXPORT_SYMBOL_GPL(qdio_free);
/**
* qdio_allocate - allocate qdio queues and associated data
- * @init_data: initialization data
+ * @cdev: associated ccw device
+ * @no_input_qs: allocate this number of Input Queues
+ * @no_output_qs: allocate this number of Output Queues
*/
-int qdio_allocate(struct qdio_initialize *init_data)
+int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
+ unsigned int no_output_qs)
{
struct subchannel_id schid;
struct qdio_irq *irq_ptr;
- ccw_device_get_schid(init_data->cdev, &schid);
+ ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qallocate:%4x", schid.sch_no);
- if ((init_data->no_input_qs && !init_data->input_handler) ||
- (init_data->no_output_qs && !init_data->output_handler))
- return -EINVAL;
-
- if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
- (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
- return -EINVAL;
-
- if ((!init_data->input_sbal_addr_array) ||
- (!init_data->output_sbal_addr_array))
+ if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
+ no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
return -EINVAL;
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
@@ -1253,10 +1242,14 @@ int qdio_allocate(struct qdio_initialize *init_data)
if (!irq_ptr)
goto out_err;
+ irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
- if (qdio_allocate_dbf(init_data, irq_ptr))
+ if (qdio_allocate_dbf(irq_ptr))
goto out_rel;
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
+ no_output_qs);
+
/*
* Allocate a page for the chsc calls in qdio_establish.
* Must be pre-allocated since a zfcp recovery will call
@@ -1272,12 +1265,11 @@ int qdio_allocate(struct qdio_initialize *init_data)
if (!irq_ptr->qdr)
goto out_rel;
- if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
- init_data->no_output_qs))
+ if (qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs))
goto out_rel;
INIT_LIST_HEAD(&irq_ptr->entry);
- init_data->cdev->private->qdio_data = irq_ptr;
+ cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
out_rel:
@@ -1309,26 +1301,54 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
DBF_EVENT("use_cq:%d", use_cq);
}
+static void qdio_trace_init_data(struct qdio_irq *irq,
+ struct qdio_initialize *data)
+{
+ DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
+ DBF_DEV_HEX(irq, data->adapter_name, 8, DBF_ERR);
+ DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
+ DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
+ data->no_output_qs);
+ DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
+ DBF_ERR);
+}
+
/**
* qdio_establish - establish queues on a qdio subchannel
+ * @cdev: associated ccw device
* @init_data: initialization data
*/
-int qdio_establish(struct qdio_initialize *init_data)
+int qdio_establish(struct ccw_device *cdev,
+ struct qdio_initialize *init_data)
{
- struct ccw_device *cdev = init_data->cdev;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- struct qdio_irq *irq_ptr;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qestablish:%4x", schid.sch_no);
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
+ (init_data->no_output_qs && !init_data->output_handler))
+ return -EINVAL;
+
+ if (!init_data->input_sbal_addr_array ||
+ !init_data->output_sbal_addr_array)
+ return -EINVAL;
+
mutex_lock(&irq_ptr->setup_mutex);
- qdio_setup_irq(init_data);
+ qdio_trace_init_data(irq_ptr, init_data);
+ qdio_setup_irq(irq_ptr, init_data);
rc = qdio_establish_thinint(irq_ptr);
if (rc) {
@@ -1374,8 +1394,8 @@ int qdio_establish(struct qdio_initialize *init_data)
qdio_init_buf_states(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_print_subchannel_info(irq_ptr, cdev);
- qdio_setup_debug_entries(irq_ptr, cdev);
+ qdio_print_subchannel_info(irq_ptr);
+ qdio_setup_debug_entries(irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_establish);
@@ -1386,14 +1406,13 @@ EXPORT_SYMBOL_GPL(qdio_establish);
*/
int qdio_activate(struct ccw_device *cdev)
{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- struct qdio_irq *irq_ptr;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qactivate:%4x", schid.sch_no);
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
@@ -1610,24 +1629,24 @@ EXPORT_SYMBOL_GPL(do_QDIO);
/**
* qdio_start_irq - process input buffers
* @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
*
* Return codes
* 0 - success
* 1 - irqs not started since new data is available
*/
-int qdio_start_irq(struct ccw_device *cdev, int nr)
+int qdio_start_irq(struct ccw_device *cdev)
{
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ unsigned int i;
if (!irq_ptr)
return -ENODEV;
- q = irq_ptr->input_qs[nr];
- clear_nonshared_ind(irq_ptr);
- qdio_stop_polling(q);
- clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+ for_each_input_queue(irq_ptr, q, i)
+ qdio_stop_polling(q);
+
+ clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
/*
* We need to check again to not lose initiative after
@@ -1635,13 +1654,16 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
- if (!qdio_inbound_q_done(q, q->first_to_check))
- goto rescan;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ if (!qdio_inbound_q_done(q, q->first_to_check))
+ goto rescan;
+ }
+
return 0;
rescan:
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state))
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
@@ -1729,23 +1751,19 @@ EXPORT_SYMBOL(qdio_get_next_buffers);
/**
* qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
*
* Return codes
* 0 - interrupts were already disabled
* 1 - interrupts successfully disabled
*/
-int qdio_stop_irq(struct ccw_device *cdev, int nr)
+int qdio_stop_irq(struct ccw_device *cdev)
{
- struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- q = irq_ptr->input_qs[nr];
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state))
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index e115623b86b2..3083edd61f0c 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -213,8 +213,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
struct qdio_q *q;
- struct qdio_buffer **input_sbal_array = qdio_init->input_sbal_addr_array;
- struct qdio_buffer **output_sbal_array = qdio_init->output_sbal_addr_array;
struct qdio_outbuf_state *output_sbal_state_array =
qdio_init->output_sbal_state_array;
int i;
@@ -224,11 +222,9 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
- qdio_init->queue_start_poll_array[i] : NULL;
- setup_storage_lists(q, irq_ptr, input_sbal_array, i);
- input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+ setup_storage_lists(q, irq_ptr,
+ qdio_init->input_sbal_addr_array[i], i);
if (is_thinint_irq(irq_ptr)) {
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
@@ -247,8 +243,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
q->is_input_q = 0;
- setup_storage_lists(q, irq_ptr, output_sbal_array, i);
- output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+ setup_storage_lists(q, irq_ptr,
+ qdio_init->output_sbal_addr_array[i], i);
tasklet_init(&q->tasklet, qdio_outbound_processing,
(unsigned long) q);
@@ -451,10 +447,10 @@ static void setup_qib(struct qdio_irq *irq_ptr,
memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
}
-int qdio_setup_irq(struct qdio_initialize *init_data)
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
{
+ struct ccw_device *cdev = irq_ptr->cdev;
struct ciw *ciw;
- struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -462,8 +458,9 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
- irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
- irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+ irq_ptr->debugfs_dev = NULL;
+ irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
+ irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
@@ -471,11 +468,17 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
- irq_ptr->cdev = init_data->cdev;
irq_ptr->scan_threshold = init_data->scan_threshold;
- ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
+ ccw_device_get_schid(cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
+ if (init_data->irq_poll) {
+ irq_ptr->irq_poll = init_data->irq_poll;
+ set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
+ } else {
+ irq_ptr->irq_poll = NULL;
+ }
+
setup_qib(irq_ptr, init_data);
qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
@@ -489,14 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
/* get qdio commands */
- ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
return -EINVAL;
}
irq_ptr->equeue = *ciw;
- ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
return -EINVAL;
@@ -504,21 +507,20 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
- spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
- irq_ptr->orig_handler = init_data->cdev->handler;
- init_data->cdev->handler = qdio_int_handler;
- spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ irq_ptr->orig_handler = cdev->handler;
+ cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
return 0;
}
-void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev)
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
{
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
"AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
- dev_name(&cdev->dev),
+ dev_name(&irq_ptr->cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
irq_ptr->schid.sch_no,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 7c4e4ec08a12..ae50373617cd 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -82,36 +82,16 @@ void tiqdio_remove_device(struct qdio_irq *irq_ptr)
INIT_LIST_HEAD(&irq_ptr->entry);
}
-static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
-{
- return irq_ptr->nr_input_qs > 1;
-}
-
static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
-static inline int shared_ind(struct qdio_irq *irq_ptr)
-{
- return references_shared_dsci(irq_ptr) ||
- has_multiple_inq_on_dsci(irq_ptr);
-}
-
-void clear_nonshared_ind(struct qdio_irq *irq_ptr)
-{
- if (!is_thinint_irq(irq_ptr))
- return;
- if (shared_ind(irq_ptr))
- return;
- xchg(irq_ptr->dsci, 0);
-}
-
int test_nonshared_ind(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
- if (shared_ind(irq_ptr))
+ if (references_shared_dsci(irq_ptr))
return 0;
if (*irq_ptr->dsci)
return 1;
@@ -131,32 +111,24 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
struct qdio_q *q;
int i;
- if (!references_shared_dsci(irq) &&
- has_multiple_inq_on_dsci(irq))
+ if (!references_shared_dsci(irq))
xchg(irq->dsci, 0);
+ if (irq->irq_poll) {
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
+ irq->irq_poll(irq->cdev, irq->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq, int_discarded);
+
+ return;
+ }
+
for_each_input_queue(irq, q, i) {
- if (q->u.in.queue_start_poll) {
- /* skip if polling is enabled or already in work */
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state)) {
- QDIO_PERF_STAT_INC(irq, int_discarded);
- continue;
- }
-
- /* avoid dsci clear here, done after processing */
- q->u.in.queue_start_poll(irq->cdev, q->nr,
- irq->int_parm);
- } else {
- if (!shared_ind(irq))
- xchg(irq->dsci, 0);
-
- /*
- * Call inbound processing but not directly
- * since that could starve other thinint queues.
- */
- tasklet_schedule(&q->tasklet);
- }
+ /*
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
+ */
+ tasklet_schedule(&q->tasklet);
}
}
@@ -211,7 +183,7 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
}
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
- subchannel_indicator_addr);
+ subchannel_indicator_addr, tiqdio_airq.isc);
if (rc) {
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
scssc->response.code);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index e401a3d0aa57..339a6bc0339b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -167,6 +167,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_disable;
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5256e3ce84e5..35064443e748 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -18,13 +18,13 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
-#include <linux/suspend.h>
#include <asm/airq.h>
#include <linux/atomic.h>
#include <asm/isc.h>
@@ -103,16 +103,9 @@ static struct hrtimer ap_poll_timer;
*/
static unsigned long long poll_timeout = 250000;
-/* Suspend flag */
-static int ap_suspend_flag;
/* Maximum domain id */
static int ap_max_domain_id;
-/*
- * Flag to check if domain was set through module parameter domain=. This is
- * important when supsend and resume is done in a z/VM environment where the
- * domain might change.
- */
-static int user_set_domain;
+
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
@@ -360,7 +353,7 @@ void ap_wait(enum ap_wait wait)
wake_up(&ap_poll_wait);
break;
}
- /* Fall through */
+ fallthrough;
case AP_WAIT_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
@@ -386,8 +379,6 @@ void ap_request_timeout(struct timer_list *t)
{
struct ap_queue *aq = from_timer(aq, t, timeout);
- if (ap_suspend_flag)
- return;
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
spin_unlock_bh(&aq->lock);
@@ -401,8 +392,7 @@ void ap_request_timeout(struct timer_list *t)
*/
static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
{
- if (!ap_suspend_flag)
- tasklet_schedule(&ap_tasklet);
+ tasklet_schedule(&ap_tasklet);
return HRTIMER_NORESTART;
}
@@ -413,8 +403,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
inc_irq_stat(IRQIO_APB);
- if (!ap_suspend_flag)
- tasklet_schedule(&ap_tasklet);
+ tasklet_schedule(&ap_tasklet);
}
/**
@@ -486,7 +475,7 @@ static int ap_poll_thread(void *data)
while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (ap_suspend_flag || !ap_pending_requests()) {
+ if (!ap_pending_requests()) {
schedule();
try_to_freeze();
}
@@ -587,51 +576,6 @@ static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
-static int ap_dev_suspend(struct device *dev)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
-
- if (ap_dev->drv && ap_dev->drv->suspend)
- ap_dev->drv->suspend(ap_dev);
- return 0;
-}
-
-static int ap_dev_resume(struct device *dev)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
-
- if (ap_dev->drv && ap_dev->drv->resume)
- ap_dev->drv->resume(ap_dev);
- return 0;
-}
-
-static void ap_bus_suspend(void)
-{
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
- ap_suspend_flag = 1;
- /*
- * Disable scanning for devices, thus we do not want to scan
- * for them after removing.
- */
- flush_work(&ap_scan_work);
- tasklet_disable(&ap_tasklet);
-}
-
-static int __ap_card_devices_unregister(struct device *dev, void *dummy)
-{
- if (is_card_dev(dev))
- device_unregister(dev);
- return 0;
-}
-
-static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
-{
- if (is_queue_dev(dev))
- device_unregister(dev);
- return 0;
-}
-
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
if (is_queue_dev(dev) &&
@@ -640,67 +584,10 @@ static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
return 0;
}
-static void ap_bus_resume(void)
-{
- int rc;
-
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
- /* remove all queue devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_queue_devices_unregister);
- /* remove all card devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_card_devices_unregister);
-
- /* Reset thin interrupt setting */
- if (ap_interrupts_available() && !ap_using_interrupts()) {
- rc = register_adapter_interrupt(&ap_airq);
- ap_airq_flag = (rc == 0);
- }
- if (!ap_interrupts_available() && ap_using_interrupts()) {
- unregister_adapter_interrupt(&ap_airq);
- ap_airq_flag = 0;
- }
- /* Reset domain */
- if (!user_set_domain)
- ap_domain_index = -1;
- /* Get things going again */
- ap_suspend_flag = 0;
- if (ap_airq_flag)
- xchg(ap_airq.lsi_ptr, 0);
- tasklet_enable(&ap_tasklet);
- queue_work(system_long_wq, &ap_scan_work);
-}
-
-static int ap_power_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- switch (event) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- ap_bus_suspend();
- break;
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- ap_bus_resume();
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-static struct notifier_block ap_power_notifier = {
- .notifier_call = ap_power_event,
-};
-
-static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
-
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
- .pm = &ap_bus_pm_ops,
};
static int __ap_revise_reserved(struct device *dev, void *dummy)
@@ -873,8 +760,6 @@ EXPORT_SYMBOL(ap_driver_unregister);
void ap_bus_force_rescan(void)
{
- if (ap_suspend_flag)
- return;
/* processing a asynchronous bus rescan */
del_timer(&ap_config_timer);
queue_work(system_long_wq, &ap_scan_work);
@@ -1021,7 +906,7 @@ EXPORT_SYMBOL(ap_parse_mask_str);
static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
}
static ssize_t ap_domain_store(struct bus_type *bus,
@@ -1047,14 +932,14 @@ static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->adm[0], ap_configuration->adm[1],
- ap_configuration->adm[2], ap_configuration->adm[3],
- ap_configuration->adm[4], ap_configuration->adm[5],
- ap_configuration->adm[6], ap_configuration->adm[7]);
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->adm[0], ap_configuration->adm[1],
+ ap_configuration->adm[2], ap_configuration->adm[3],
+ ap_configuration->adm[4], ap_configuration->adm[5],
+ ap_configuration->adm[6], ap_configuration->adm[7]);
}
static BUS_ATTR_RO(ap_control_domain_mask);
@@ -1062,14 +947,14 @@ static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->aqm[0], ap_configuration->aqm[1],
- ap_configuration->aqm[2], ap_configuration->aqm[3],
- ap_configuration->aqm[4], ap_configuration->aqm[5],
- ap_configuration->aqm[6], ap_configuration->aqm[7]);
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->aqm[0], ap_configuration->aqm[1],
+ ap_configuration->aqm[2], ap_configuration->aqm[3],
+ ap_configuration->aqm[4], ap_configuration->aqm[5],
+ ap_configuration->aqm[6], ap_configuration->aqm[7]);
}
static BUS_ATTR_RO(ap_usage_domain_mask);
@@ -1077,29 +962,29 @@ static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->apm[0], ap_configuration->apm[1],
- ap_configuration->apm[2], ap_configuration->apm[3],
- ap_configuration->apm[4], ap_configuration->apm[5],
- ap_configuration->apm[6], ap_configuration->apm[7]);
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->apm[0], ap_configuration->apm[1],
+ ap_configuration->apm[2], ap_configuration->apm[3],
+ ap_configuration->apm[4], ap_configuration->apm[5],
+ ap_configuration->apm[6], ap_configuration->apm[7]);
}
static BUS_ATTR_RO(ap_adapter_mask);
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- ap_using_interrupts() ? 1 : 0);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ ap_using_interrupts() ? 1 : 0);
}
static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
}
static ssize_t config_time_store(struct bus_type *bus,
@@ -1118,7 +1003,7 @@ static BUS_ATTR_RW(config_time);
static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
}
static ssize_t poll_thread_store(struct bus_type *bus,
@@ -1141,7 +1026,7 @@ static BUS_ATTR_RW(poll_thread);
static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
}
static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
@@ -1176,7 +1061,7 @@ static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
max_domain_id = ap_max_domain_id ? : -1;
else
max_domain_id = 15;
- return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
}
static BUS_ATTR_RO(ap_max_domain_id);
@@ -1187,10 +1072,10 @@ static ssize_t apmask_show(struct bus_type *bus, char *buf)
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- rc = snprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.apm[0], ap_perms.apm[1],
- ap_perms.apm[2], ap_perms.apm[3]);
+ rc = scnprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.apm[0], ap_perms.apm[1],
+ ap_perms.apm[2], ap_perms.apm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
@@ -1218,10 +1103,10 @@ static ssize_t aqmask_show(struct bus_type *bus, char *buf)
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- rc = snprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.aqm[0], ap_perms.aqm[1],
- ap_perms.aqm[2], ap_perms.aqm[3]);
+ rc = scnprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.aqm[0], ap_perms.aqm[1],
+ ap_perms.aqm[2], ap_perms.aqm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
@@ -1567,8 +1452,6 @@ static void ap_scan_bus(struct work_struct *unused)
static void ap_config_timeout(struct timer_list *unused)
{
- if (ap_suspend_flag)
- return;
queue_work(system_long_wq, &ap_scan_work);
}
@@ -1641,11 +1524,6 @@ static int __init ap_module_init(void)
ap_domain_index);
ap_domain_index = -1;
}
- /* In resume callback we need to know if the user had set the domain.
- * If so, we can not just reset it.
- */
- if (ap_domain_index >= 0)
- user_set_domain = 1;
if (ap_interrupts_available()) {
rc = register_adapter_interrupt(&ap_airq);
@@ -1688,17 +1566,11 @@ static int __init ap_module_init(void)
goto out_work;
}
- rc = register_pm_notifier(&ap_power_notifier);
- if (rc)
- goto out_pm;
-
queue_work(system_long_wq, &ap_scan_work);
initialised = true;
return 0;
-out_pm:
- ap_poll_thread_stop();
out_work:
hrtimer_cancel(&ap_poll_timer);
root_device_unregister(ap_root_device);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4348fdff1c61..8e8e37b6c0ee 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,7 +91,6 @@ enum ap_state {
AP_STATE_IDLE,
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
- AP_STATE_SUSPEND_WAIT,
AP_STATE_REMOVE, /* about to be removed from driver */
AP_STATE_UNBOUND, /* momentary not bound to a driver */
AP_STATE_BORKED, /* broken */
@@ -136,8 +135,6 @@ struct ap_driver {
int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *);
- void (*suspend)(struct ap_device *);
- void (*resume)(struct ap_device *);
};
#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -259,8 +256,6 @@ void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
-void ap_queue_suspend(struct ap_device *ap_dev);
-void ap_queue_resume(struct ap_device *ap_dev);
void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index e85bfca1ed16..0a39dfdb6a1d 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -23,7 +23,7 @@ static ssize_t hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
}
static DEVICE_ATTR_RO(hwtype);
@@ -33,7 +33,7 @@ static ssize_t raw_hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
}
static DEVICE_ATTR_RO(raw_hwtype);
@@ -43,7 +43,7 @@ static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
}
static DEVICE_ATTR_RO(depth);
@@ -53,7 +53,7 @@ static ssize_t ap_functions_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+ return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
}
static DEVICE_ATTR_RO(ap_functions);
@@ -69,7 +69,7 @@ static ssize_t request_count_show(struct device *dev,
spin_lock_bh(&ap_list_lock);
req_cnt = atomic64_read(&ac->total_request_count);
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -102,7 +102,7 @@ static ssize_t requestq_count_show(struct device *dev,
for_each_ap_queue(aq, ac)
reqq_cnt += aq->requestq_count;
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -119,7 +119,7 @@ static ssize_t pendingq_count_show(struct device *dev,
for_each_ap_queue(aq, ac)
penq_cnt += aq->pendingq_count;
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -127,7 +127,8 @@ static DEVICE_ATTR_RO(pendingq_count);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+ return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n",
+ to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR_RO(modalias);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index a317ab484932..0eaf1d04e8df 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -152,7 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
ap_msg->receive(aq, ap_msg, aq->reply);
break;
}
- /* fall through */
+ fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
@@ -201,31 +201,6 @@ static enum ap_wait ap_sm_read(struct ap_queue *aq)
}
/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @aq: pointer to the AP queue
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
-{
- struct ap_queue_status status;
-
- if (!aq->reply)
- return AP_WAIT_NONE;
- status = ap_sm_recv(aq);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (aq->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fall through */
- default:
- return AP_WAIT_NONE;
- }
-}
-
-/**
* ap_sm_write(): Send messages from the request queue to an AP queue.
* @aq: pointer to the AP queue
*
@@ -254,7 +229,7 @@ static enum ap_wait ap_sm_write(struct ap_queue *aq)
aq->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
- /* fall through */
+ fallthrough;
case AP_RESPONSE_Q_FULL:
aq->state = AP_STATE_QUEUE_FULL;
return AP_WAIT_INTERRUPT;
@@ -380,7 +355,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
return AP_WAIT_AGAIN;
- /* fallthrough */
+ fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
return AP_WAIT_TIMEOUT;
default:
@@ -417,10 +392,6 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_read,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
- [AP_STATE_SUSPEND_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_suspend_read,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
[AP_STATE_REMOVE] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -450,28 +421,6 @@ enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
}
/*
- * Power management for queue devices
- */
-void ap_queue_suspend(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-
- /* Poll on the device until all requests are finished. */
- spin_lock_bh(&aq->lock);
- aq->state = AP_STATE_SUSPEND_WAIT;
- while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
- ;
- aq->state = AP_STATE_BORKED;
- spin_unlock_bh(&aq->lock);
-}
-EXPORT_SYMBOL(ap_queue_suspend);
-
-void ap_queue_resume(struct ap_device *ap_dev)
-{
-}
-EXPORT_SYMBOL(ap_queue_resume);
-
-/*
* AP queue related attributes.
*/
static ssize_t request_count_show(struct device *dev,
@@ -484,7 +433,7 @@ static ssize_t request_count_show(struct device *dev,
spin_lock_bh(&aq->lock);
req_cnt = aq->total_request_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -511,7 +460,7 @@ static ssize_t requestq_count_show(struct device *dev,
spin_lock_bh(&aq->lock);
reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -525,7 +474,7 @@ static ssize_t pendingq_count_show(struct device *dev,
spin_lock_bh(&aq->lock);
penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -540,14 +489,14 @@ static ssize_t reset_show(struct device *dev,
switch (aq->state) {
case AP_STATE_RESET_START:
case AP_STATE_RESET_WAIT:
- rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
break;
case AP_STATE_WORKING:
case AP_STATE_QUEUE_FULL:
- rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
break;
default:
- rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
}
spin_unlock_bh(&aq->lock);
return rc;
@@ -581,11 +530,11 @@ static ssize_t interrupt_show(struct device *dev,
spin_lock_bh(&aq->lock);
if (aq->state == AP_STATE_SETIRQ_WAIT)
- rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
else if (aq->interrupt == AP_INTR_ENABLED)
- rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
else
- rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
spin_unlock_bh(&aq->lock);
return rc;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 2f33c5fcf676..74e63ec49068 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -80,7 +80,7 @@ struct clearaeskeytoken {
u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
u32 len; /* bytes actually stored in clearkey[] */
- u8 clearkey[0]; /* clear key value */
+ u8 clearkey[]; /* clear key value */
} __packed;
/*
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 5c0f53c6dde7..e0bde8518745 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -90,7 +90,7 @@ static void vfio_ap_wait_for_irqclear(int apqn)
case AP_RESPONSE_RESET_IN_PROGRESS:
if (!status.irq_enabled)
return;
- /* Fall through */
+ fallthrough;
case AP_RESPONSE_BUSY:
msleep(20);
break;
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index d4f35a183c15..c53cab4b0c9e 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -41,7 +41,7 @@ static ssize_t type_show(struct device *dev,
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
}
static DEVICE_ATTR_RO(type);
@@ -52,7 +52,7 @@ static ssize_t online_show(struct device *dev,
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", zc->online);
}
static ssize_t online_store(struct device *dev,
@@ -86,7 +86,7 @@ static ssize_t load_show(struct device *dev,
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
}
static DEVICE_ATTR_RO(load);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 110fe9d0cb91..1b835398feec 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -592,7 +592,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
u8 pad2[1];
u8 vptype;
u8 vp[32]; /* verification pattern */
- } keyblock;
+ } ckb;
} lv3;
} __packed * prepparm;
@@ -650,15 +650,16 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
/* check the returned keyblock */
- if (prepparm->lv3.keyblock.version != 0x01) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n",
- __func__, (int) prepparm->lv3.keyblock.version);
+ if (prepparm->lv3.ckb.version != 0x01 &&
+ prepparm->lv3.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int) prepparm->lv3.ckb.version);
rc = -EIO;
goto out;
}
/* copy the tanslated protected key */
- switch (prepparm->lv3.keyblock.len) {
+ switch (prepparm->lv3.ckb.len) {
case 16+32:
/* AES 128 protected key */
if (protkeytype)
@@ -676,13 +677,13 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
break;
default:
DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->lv3.keyblock.len);
+ __func__, prepparm->lv3.ckb.len);
rc = -EIO;
goto out;
}
- memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len);
+ memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len);
if (protkeylen)
- *protkeylen = prepparm->lv3.keyblock.len;
+ *protkeylen = prepparm->lv3.ckb.len;
out:
free_cprbmem(mem, PARMBSIZE, 0);
@@ -1260,10 +1261,10 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
/* check the returned keyblock */
- if (prepparm->vud.ckb.version != 0x01) {
- DEBUG_ERR(
- "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
- __func__, (int) prepparm->vud.ckb.version);
+ if (prepparm->vud.ckb.version != 0x01 &&
+ prepparm->vud.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int) prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
@@ -1568,9 +1569,9 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
return -EINVAL;
/* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
@@ -1640,7 +1641,7 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
} else
rc = -ENODEV;
- kfree(device_status);
+ kvfree(device_status);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 3a9876d5ab0e..8b7a641671c9 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -90,7 +90,7 @@ struct cipherkeytoken {
u16 kmf1; /* key management field 1 */
u16 kmf2; /* key management field 2 */
u16 kmf3; /* key management field 3 */
- u8 vdata[0]; /* variable part data follows */
+ u8 vdata[]; /* variable part data follows */
} __packed;
/* Some defines for the CCA AES cipherkeytoken kmf1 field */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 7cbb384ec535..b447f3e9e4a2 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -204,8 +204,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
static struct ap_driver zcrypt_cex2a_queue_driver = {
.probe = zcrypt_cex2a_queue_probe,
.remove = zcrypt_cex2a_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
.ids = zcrypt_cex2a_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index c78c0d119806..266440168bb7 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -260,8 +260,6 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
static struct ap_driver zcrypt_cex2c_queue_driver = {
.probe = zcrypt_cex2c_queue_probe,
.remove = zcrypt_cex2c_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
.ids = zcrypt_cex2c_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 9a9d02e19774..cdaa8348ad04 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -87,7 +87,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
if (ap_domain_index >= 0)
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
- return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
}
static struct device_attribute dev_attr_cca_serialnr =
@@ -122,22 +122,24 @@ static ssize_t cca_mkvps_show(struct device *dev,
&ci, zq->online);
if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
- n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_mk_state - '1'], ci.new_mkvp);
else
- n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
- n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n",
- cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
else
- n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
- n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n",
- cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
else
- n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
return n;
}
@@ -170,9 +172,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.API_ord_nr > 0)
- return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static struct device_attribute dev_attr_ep11_api_ordinalnr =
@@ -191,11 +193,11 @@ static ssize_t ep11_fw_version_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.FW_version > 0)
- return snprintf(buf, PAGE_SIZE, "%d.%d\n",
- (int)(ci.FW_version >> 8),
- (int)(ci.FW_version & 0xFF));
+ return scnprintf(buf, PAGE_SIZE, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static struct device_attribute dev_attr_ep11_fw_version =
@@ -214,9 +216,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.serial[0])
- return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+ return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static struct device_attribute dev_attr_ep11_serialnr =
@@ -251,11 +253,11 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
- n += snprintf(buf + n, PAGE_SIZE - n,
- "%s", ep11_op_modes[i].mode_txt);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
}
}
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
return n;
}
@@ -298,28 +300,28 @@ static ssize_t ep11_mkvps_show(struct device *dev,
&di);
if (di.cur_wk_state == '0') {
- n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
- cwk_state[di.cur_wk_state - '0']);
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
} else if (di.cur_wk_state == '1') {
- n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
- cwk_state[di.cur_wk_state - '0']);
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
n += 2 * sizeof(di.cur_wkvp);
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
} else
- n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
if (di.new_wk_state == '0') {
- n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
- nwk_state[di.new_wk_state - '0']);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
- n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
- nwk_state[di.new_wk_state - '0']);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
n += 2 * sizeof(di.new_wkvp);
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
} else
- n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
return n;
}
@@ -346,11 +348,11 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
- n += snprintf(buf + n, PAGE_SIZE - n,
- "%s", ep11_op_modes[i].mode_txt);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
}
}
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
return n;
}
@@ -654,8 +656,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
static struct ap_driver zcrypt_cex4_queue_driver = {
.probe = zcrypt_cex4_queue_probe,
.remove = zcrypt_cex4_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
.ids = zcrypt_cex4_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 2afe2153b34e..004ce022fc78 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -1217,9 +1217,9 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
struct ep11_card_info eci;
/* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
@@ -1227,7 +1227,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
/* allocate 1k space for up to 256 apqns */
_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
if (!_apqns) {
- kfree(device_status);
+ kvfree(device_status);
return -ENOMEM;
}
@@ -1282,7 +1282,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
rc = 0;
}
- kfree(device_status);
+ kvfree(device_status);
return rc;
}
EXPORT_SYMBOL(ep11_findcard2);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index a36251d138fb..fd1cbb2d6b3f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -590,7 +590,7 @@ struct type86x_reply {
struct CPRBX cprbx;
unsigned char pad[4]; /* 4 byte function code/rules block ? */
unsigned short length;
- char text[0];
+ char text[];
} __packed;
struct type86_ep11_reply {
@@ -801,7 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
- /* fall through - wrong cprb version is an unknown response */
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -834,7 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zq, reply, xcRB);
- /* fall through - wrong cprb version is an unknown response */
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
@@ -864,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
return convert_type86_ep11_xcrb(zq, reply, xcRB);
- /* fall through - wrong cprb version is an unknown resp */
+ fallthrough; /* wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -894,7 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
- /* fall through - wrong cprb version is an unknown response */
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 522c4bc69a08..b7d9fa567880 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -42,7 +42,7 @@ static ssize_t online_show(struct device *dev,
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", zq->online);
}
static ssize_t online_store(struct device *dev,
@@ -78,7 +78,7 @@ static ssize_t load_show(struct device *dev,
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
}
static DEVICE_ATTR_RO(load);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index ced896d1534a..3850a0f5f0bc 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -91,6 +91,23 @@ config QETH_L3
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
+config QETH_OSN
+ def_bool !HAVE_MARCH_Z14_FEATURES
+ prompt "qeth OSN device support"
+ depends on QETH
+ help
+ This enables the qeth driver to support devices in OSN mode.
+ This feature will be removed in 2021.
+ If unsure, choose N.
+
+config QETH_OSX
+ def_bool !HAVE_MARCH_Z15_FEATURES
+ prompt "qeth OSX device support"
+ depends on QETH
+ help
+ This enables the qeth driver to support devices in OSX mode.
+ If unsure, choose N.
+
config CCWGROUP
tristate
default (LCS || CTCM || QETH)
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 4fc2056bd227..c7fade836d83 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
- if (!ism->smcd)
+ if (!ism->smcd) {
+ ret = -ENOMEM;
goto err_resource;
+ }
ism->smcd->priv = ism;
ret = ism_dev_init(ism);
@@ -567,31 +569,11 @@ static void ism_remove(struct pci_dev *pdev)
kfree(ism);
}
-static int ism_suspend(struct device *dev)
-{
- struct ism_dev *ism = dev_get_drvdata(dev);
-
- ism_dev_exit(ism);
- return 0;
-}
-
-static int ism_resume(struct device *dev)
-{
- struct ism_dev *ism = dev_get_drvdata(dev);
-
- return ism_dev_init(ism);
-}
-
-static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
-
static struct pci_driver ism_driver = {
.name = DRV_NAME,
.id_table = ism_device_table,
.probe = ism_probe,
.remove = ism_remove,
- .driver = {
- .pm = &ism_pm_ops,
- },
};
static int __init ism_init(void)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 468cada49e72..e0b26310ecab 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -178,17 +178,16 @@ struct qeth_vnicc_info {
#define QETH_RECLAIM_WORK_TIME HZ
#define QETH_MAX_PORTNO 15
-/*IPv6 address autoconfiguration stuff*/
-#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
-#define UNIQUE_ID_NOT_BY_CARD 0x10000
-
/*****************************************************************************/
/* QDIO queue and buffer handling */
/*****************************************************************************/
-#define QETH_MAX_QUEUES 4
+#define QETH_MAX_OUT_QUEUES 4
#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
#define QETH_IQD_MCAST_TXQ 0
#define QETH_IQD_MIN_UCAST_TXQ 1
+
+#define QETH_MAX_IN_QUEUES 2
+#define QETH_RX_COPYBREAK (PAGE_SIZE >> 1)
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
@@ -213,15 +212,13 @@ struct qeth_vnicc_info {
#define QETH_PRIO_Q_ING_TOS 2
#define QETH_PRIO_Q_ING_SKB 3
#define QETH_PRIO_Q_ING_VLAN 4
+#define QETH_PRIO_Q_ING_FIXED 5
/* Packing */
#define QETH_LOW_WATERMARK_PACK 2
#define QETH_HIGH_WATERMARK_PACK 5
#define QETH_WATERMARK_PACK_FUZZ 1
-/* large receive scatter gather copy break */
-#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
-
struct qeth_hdr_layer3 {
__u8 id;
__u8 flags;
@@ -407,6 +404,7 @@ struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
int next_element_to_fill;
+ unsigned int frames;
unsigned int bytes;
struct sk_buff_head skb_list;
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
@@ -462,6 +460,8 @@ struct qeth_out_q_stats {
u64 tso_bytes;
u64 packing_mode_switch;
u64 stopped;
+ u64 doorbell;
+ u64 coal_frames;
u64 completion_yield;
u64 completion_timer;
@@ -472,6 +472,8 @@ struct qeth_out_q_stats {
u64 tx_dropped;
};
+#define QETH_TX_MAX_COALESCED_FRAMES 1
+#define QETH_TX_COALESCE_USECS 25
#define QETH_TX_TIMER_USECS 500
struct qeth_qdio_out_q {
@@ -495,9 +497,13 @@ struct qeth_qdio_out_q {
struct napi_struct napi;
struct timer_list timer;
struct qeth_hdr *prev_hdr;
+ unsigned int coalesced_frames;
u8 bulk_start;
u8 bulk_count;
u8 bulk_max;
+
+ unsigned int coalesce_usecs;
+ unsigned int max_coalesced_frames;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -506,12 +512,10 @@ struct qeth_qdio_out_q {
#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
-static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
+static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue,
+ unsigned long usecs)
{
- if (timer_pending(&queue->timer))
- return;
- mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
- jiffies);
+ timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies);
}
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
@@ -536,7 +540,7 @@ struct qeth_qdio_info {
/* output */
int no_out_queues;
- struct qeth_qdio_out_q *out_qs[QETH_MAX_QUEUES];
+ struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
struct qdio_outbuf_state *out_bufstates;
/* priority queueing */
@@ -673,22 +677,20 @@ struct qeth_card_blkt {
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
-#define QETH_LAYER2_MAC_REGISTERED 0x02
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
u8 chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
+ u8 dev_addr_is_registered:1;
u8 open_when_online:1;
u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;
- int mac_bits;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;
- int unique_id;
bool layer_enforced;
struct qeth_card_blkt blkt;
__u32 diagass_support;
@@ -709,9 +711,7 @@ struct qeth_card_options {
struct qeth_ipa_caps adp; /* Adapter parameters */
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
struct qeth_vnicc_info vnicc; /* VNICC options */
- int fake_broadcast;
enum qeth_discipline_id layer;
- int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
enum qeth_ipa_isolation_modes prev_isolation;
int sniffer;
@@ -754,7 +754,7 @@ enum qeth_addr_disposition {
struct qeth_rx {
int b_count;
int b_index;
- struct qdio_buffer_element *b_element;
+ u8 buf_element;
int e_offset;
int qdio_err;
};
@@ -770,6 +770,10 @@ struct qeth_switch_info {
__u32 settings;
};
+struct qeth_priv {
+ unsigned int rx_copybreak;
+};
+
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
@@ -845,11 +849,6 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
-static inline bool qeth_netdev_is_registered(struct net_device *dev)
-{
- return dev->netdev_ops != NULL;
-}
-
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
{
if (txq == QETH_IQD_MCAST_TXQ)
@@ -1051,6 +1050,7 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
+int qeth_setup_netdev(struct qeth_card *card);
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
@@ -1058,6 +1058,7 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6d3f2f14b414..569966bdc513 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -244,7 +244,7 @@ static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
return NULL;
for (i = 0; i < pages; i++) {
- entry->elements[i] = alloc_page(GFP_KERNEL);
+ entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
if (!entry->elements[i]) {
qeth_free_pool_entry(entry);
@@ -538,23 +538,16 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
for (i = 0;
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
i++) {
- if (aob->sba[i] && buffer->is_header[i])
- kmem_cache_free(qeth_core_header_cache,
- (void *) aob->sba[i]);
+ void *data = phys_to_virt(aob->sba[i]);
+
+ if (data && buffer->is_header[i])
+ kmem_cache_free(qeth_core_header_cache, data);
}
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
qdio_release_aob(aob);
}
-static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
-{
- return card->options.cq == QETH_CQ_ENABLED &&
- card->qdio.c_q != NULL &&
- queue != 0 &&
- queue == card->qdio.no_in_queues - 1;
-}
-
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
@@ -813,7 +806,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
/* fall through */
default:
qeth_clear_ipacmd_list(card);
- goto out;
+ goto err_idx;
}
cmd = __ipa_reply(iob);
@@ -866,8 +859,9 @@ out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
QETH_SEQ_NO_LENGTH);
- qeth_put_cmd(iob);
__qeth_issue_next_read(card);
+err_idx:
+ qeth_put_cmd(iob);
}
static int qeth_set_thread_start_bit(struct qeth_card *card,
@@ -1161,17 +1155,20 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
QETH_TXQ_STAT_INC(queue, bufs);
QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
+ if (error) {
+ QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
+ } else {
+ QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
+ }
+
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
unsigned int bytes = qdisc_pkt_len(skb);
bool is_tso = skb_is_gso(skb);
unsigned int packets;
packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
- if (error) {
- QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
- } else {
- QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
- QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
+ if (!error) {
if (skb->ip_summed == CHECKSUM_PARTIAL)
QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
if (skb_is_nonlinear(skb))
@@ -1208,6 +1205,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
buf->next_element_to_fill = 0;
+ buf->frames = 0;
buf->bytes = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
@@ -1243,9 +1241,12 @@ EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
- unsigned int count = single ? 1 : card->dev->num_tx_queues;
+ unsigned int max = single ? 1 : card->dev->num_tx_queues;
+ unsigned int count;
int rc;
+ count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
+
rtnl_lock();
rc = netif_set_real_num_tx_queues(card->dev, count);
rtnl_unlock();
@@ -1253,16 +1254,16 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
if (rc)
return rc;
- if (card->qdio.no_out_queues == count)
+ if (card->qdio.no_out_queues == max)
return 0;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
- if (count == 1)
+ if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
- card->qdio.no_out_queues = count;
+ card->qdio.no_out_queues = max;
return 0;
}
@@ -1314,7 +1315,6 @@ static void qeth_set_initial_options(struct qeth_card *card)
{
card->options.route4.type = NO_ROUTER;
card->options.route6.type = NO_ROUTER;
- card->options.rx_sg_cb = QETH_RX_SG_CB;
card->options.isolation = ISOLATION_MODE_NONE;
card->options.cq = QETH_CQ_DISABLED;
card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
@@ -1682,17 +1682,16 @@ static void qeth_set_blkt_defaults(struct qeth_card *card)
}
}
-static void qeth_init_tokens(struct qeth_card *card)
+static void qeth_idx_init(struct qeth_card *card)
{
+ memset(&card->seqno, 0, sizeof(card->seqno));
+
card->token.issuer_rm_w = 0x00010103UL;
card->token.cm_filter_w = 0x00010108UL;
card->token.cm_connection_w = 0x0001010aUL;
card->token.ulp_filter_w = 0x0001010bUL;
card->token.ulp_connection_w = 0x0001010dUL;
-}
-static void qeth_init_func_level(struct qeth_card *card)
-{
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
@@ -2405,6 +2404,8 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
queue->card = card;
queue->queue_no = i;
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
+ queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
+ queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2628,15 +2629,13 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct qeth_card *card)
{
- struct list_head *plh;
struct qeth_buffer_pool_entry *entry;
int i, free;
if (list_empty(&card->qdio.in_buf_pool.entry_list))
return NULL;
- list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
- entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
+ list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
free = 1;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(entry->elements[i]) > 1) {
@@ -2651,11 +2650,11 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
}
/* no free buffer in pool so take first one and swap pages */
- entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
- struct qeth_buffer_pool_entry, list);
+ entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
+ struct qeth_buffer_pool_entry, list);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(entry->elements[i]) > 1) {
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = dev_alloc_page();
if (!page)
return NULL;
@@ -2763,6 +2762,7 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
queue->next_buf_to_fill = 0;
queue->do_pack = 0;
queue->prev_hdr = NULL;
+ queue->coalesced_frames = 0;
queue->bulk_start = 0;
queue->bulk_count = 0;
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
@@ -3353,16 +3353,21 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
for (i = index; i < index + count; ++i) {
unsigned int bidx = QDIO_BUFNR(i);
+ struct sk_buff *skb;
buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
+ queue->coalesced_frames += buf->frames;
if (queue->bufstates)
queue->bufstates[bidx].user = buf;
- if (IS_IQD(queue->card))
+ if (IS_IQD(card)) {
+ skb_queue_walk(&buf->skb_list, skb)
+ skb_tx_timestamp(skb);
continue;
+ }
if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
@@ -3390,6 +3395,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
+ QETH_TXQ_STAT_INC(queue, doorbell);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
@@ -3397,8 +3403,18 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
queue->queue_no, index, count);
/* Fake the TX completion interrupt: */
- if (IS_IQD(card))
- napi_schedule(&queue->napi);
+ if (IS_IQD(card)) {
+ unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
+ unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+
+ if (frames && queue->coalesced_frames >= frames) {
+ napi_schedule(&queue->napi);
+ queue->coalesced_frames = 0;
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (usecs) {
+ qeth_tx_arm_timer(queue, usecs);
+ }
+ }
if (rc) {
/* ignore temporary SIGA errors without busy condition */
@@ -3462,13 +3478,11 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
}
}
-static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
- unsigned long card_ptr)
+static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev->flags & IFF_UP)
- napi_schedule_irqoff(&card->napi);
+ napi_schedule_irqoff(&card->napi);
}
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
@@ -3502,9 +3516,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
int i;
int rc;
- if (!qeth_is_cq(card, queue))
- return;
-
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
@@ -3550,9 +3561,7 @@ static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
- if (qeth_is_cq(card, queue))
- qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
- else if (qdio_err)
+ if (qdio_err)
qeth_schedule_recovery(card);
}
@@ -3635,6 +3644,8 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
return ~ntohs(veth->h_vlan_TCI) >>
(VLAN_PRIO_SHIFT + 1) & 3;
break;
+ case QETH_PRIO_Q_ING_FIXED:
+ return card->qdio.default_out_queue;
default:
break;
}
@@ -3707,6 +3718,7 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
+ gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
const unsigned int contiguous = proto_len ? proto_len : 1;
const unsigned int max_elements = queue->max_elements;
unsigned int __elements;
@@ -3762,10 +3774,11 @@ check_layout:
*hdr = skb_push(skb, hdr_len);
return hdr_len;
}
- /* fall back */
+
+ /* Fall back to cache element with known-good alignment: */
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
return -E2BIG;
- *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
if (!*hdr)
return -ENOMEM;
/* Copy protocol headers behind HW header: */
@@ -3948,6 +3961,7 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
buffer->bytes += bytes;
+ buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
queue->prev_hdr = hdr;
flush = __netdev_tx_sent_queue(txq, bytes,
@@ -4038,6 +4052,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
+ buffer->bytes += qdisc_pkt_len(skb);
+ buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
if (queue->do_pack)
QETH_TXQ_STAT_INC(queue, skbs_pack);
@@ -4796,34 +4812,13 @@ out:
return;
}
-static void qeth_qdio_establish_cq(struct qeth_card *card,
- struct qdio_buffer **in_sbal_ptrs,
- void (**queue_start_poll)
- (struct ccw_device *, int,
- unsigned long))
-{
- int i;
-
- if (card->options.cq == QETH_CQ_ENABLED) {
- int offset = QDIO_MAX_BUFFERS_PER_Q *
- (card->qdio.no_in_queues - 1);
-
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
- in_sbal_ptrs[offset + i] =
- card->qdio.c_q->bufs[i].buffer;
-
- queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
- }
-}
-
static int qeth_qdio_establish(struct qeth_card *card)
{
+ struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
+ struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
struct qdio_initialize init_data;
char *qib_param_field;
- struct qdio_buffer **in_sbal_ptrs;
- void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
- struct qdio_buffer **out_sbal_ptrs;
- int i, j, k;
+ unsigned int i;
int rc = 0;
QETH_CARD_TEXT(card, 2, "qdioest");
@@ -4837,44 +4832,14 @@ static int qeth_qdio_establish(struct qeth_card *card)
qeth_create_qib_param_field(card, qib_param_field);
qeth_create_qib_param_field_blkt(card, qib_param_field);
- in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
- sizeof(void *),
- GFP_KERNEL);
- if (!in_sbal_ptrs) {
- rc = -ENOMEM;
- goto out_free_qib_param;
- }
-
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
- in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
-
- queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
- GFP_KERNEL);
- if (!queue_start_poll) {
- rc = -ENOMEM;
- goto out_free_in_sbals;
- }
- for (i = 0; i < card->qdio.no_in_queues; ++i)
- queue_start_poll[i] = qeth_qdio_start_poll;
-
- qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
-
- out_sbal_ptrs =
- kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
- sizeof(void *),
- GFP_KERNEL);
- if (!out_sbal_ptrs) {
- rc = -ENOMEM;
- goto out_free_queue_start_poll;
- }
+ in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
+ if (card->options.cq == QETH_CQ_ENABLED)
+ in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
- for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
- out_sbal_ptrs[k] =
- card->qdio.out_qs[i]->bufs[j]->buffer;
+ for (i = 0; i < card->qdio.no_out_queues; i++)
+ out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
memset(&init_data, 0, sizeof(struct qdio_initialize));
- init_data.cdev = CARD_DDEV(card);
init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
@@ -4883,7 +4848,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = qeth_qdio_input_handler;
init_data.output_handler = qeth_qdio_output_handler;
- init_data.queue_start_poll_array = queue_start_poll;
+ init_data.irq_poll = qeth_qdio_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
@@ -4892,12 +4857,13 @@ static int qeth_qdio_establish(struct qeth_card *card)
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
- rc = qdio_allocate(&init_data);
+ rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
+ init_data.no_output_qs);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
goto out;
}
- rc = qdio_establish(&init_data);
+ rc = qdio_establish(CARD_DDEV(card), &init_data);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
qdio_free(CARD_DDEV(card));
@@ -4915,12 +4881,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
break;
}
out:
- kfree(out_sbal_ptrs);
-out_free_queue_start_poll:
- kfree(queue_start_poll);
-out_free_in_sbals:
- kfree(in_sbal_ptrs);
-out_free_qib_param:
kfree(qib_param_field);
out_free_nothing:
return rc;
@@ -4952,12 +4912,16 @@ static struct ccw_device_id qeth_ids[] = {
.driver_info = QETH_CARD_TYPE_OSD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
.driver_info = QETH_CARD_TYPE_IQD},
+#ifdef CONFIG_QETH_OSN
{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
.driver_info = QETH_CARD_TYPE_OSN},
+#endif
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
.driver_info = QETH_CARD_TYPE_OSM},
+#ifdef CONFIG_QETH_OSX
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
.driver_info = QETH_CARD_TYPE_OSX},
+#endif
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -5012,9 +4976,9 @@ retriable:
else
goto retry;
}
+
qeth_determine_capabilities(card);
- qeth_init_tokens(card);
- qeth_init_func_level(card);
+ qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card);
if (rc == -EINTR) {
@@ -5324,13 +5288,13 @@ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
}
static int qeth_extract_skb(struct qeth_card *card,
- struct qeth_qdio_buffer *qethbuffer,
- struct qdio_buffer_element **__element,
+ struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
int *__offset)
{
- struct qdio_buffer_element *element = *__element;
+ struct qeth_priv *priv = netdev_priv(card->dev);
struct qdio_buffer *buffer = qethbuffer->buffer;
struct napi_struct *napi = &card->napi;
+ struct qdio_buffer_element *element;
unsigned int linear_len = 0;
bool uses_frags = false;
int offset = *__offset;
@@ -5340,6 +5304,8 @@ static int qeth_extract_skb(struct qeth_card *card,
struct sk_buff *skb;
int skb_len = 0;
+ element = &buffer->element[*element_no];
+
next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
@@ -5404,7 +5370,7 @@ next_packet:
}
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
- (skb_len > card->options.rx_sg_cb &&
+ (skb_len > READ_ONCE(priv->rx_copybreak) &&
!atomic_read(&card->force_alloc_skb) &&
!IS_OSN(card));
@@ -5495,22 +5461,20 @@ walk_packet:
if (!skb)
goto next_packet;
- *__element = element;
+ *element_no = element - &buffer->element[0];
*__offset = offset;
qeth_receive_skb(card, skb, hdr, uses_frags);
return 0;
}
-static int qeth_extract_skbs(struct qeth_card *card, int budget,
- struct qeth_qdio_buffer *buf, bool *done)
+static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
+ struct qeth_qdio_buffer *buf, bool *done)
{
- int work_done = 0;
-
- *done = false;
+ unsigned int work_done = 0;
while (budget) {
- if (qeth_extract_skb(card, buf, &card->rx.b_element,
+ if (qeth_extract_skb(card, buf, &card->rx.buf_element,
&card->rx.e_offset)) {
*done = true;
break;
@@ -5523,15 +5487,16 @@ static int qeth_extract_skbs(struct qeth_card *card, int budget,
return work_done;
}
-int qeth_poll(struct napi_struct *napi, int budget)
+static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
{
- struct qeth_card *card = container_of(napi, struct qeth_card, napi);
- int work_done = 0;
- struct qeth_qdio_buffer *buffer;
- int new_budget = budget;
- bool done;
+ unsigned int work_done = 0;
- while (1) {
+ while (budget > 0) {
+ struct qeth_qdio_buffer *buffer;
+ unsigned int skbs_done = 0;
+ bool done = false;
+
+ /* Fetch completed RX buffers: */
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
card->rx.b_count = qdio_get_next_buffers(
@@ -5541,50 +5506,73 @@ int qeth_poll(struct napi_struct *napi, int budget)
card->rx.b_count = 0;
break;
}
- card->rx.b_element =
- &card->qdio.in_q->bufs[card->rx.b_index]
- .buffer->element[0];
- card->rx.e_offset = 0;
}
- while (card->rx.b_count) {
- buffer = &card->qdio.in_q->bufs[card->rx.b_index];
- if (!(card->rx.qdio_err &&
- qeth_check_qdio_errors(card, buffer->buffer,
- card->rx.qdio_err, "qinerr")))
- work_done += qeth_extract_skbs(card, new_budget,
- buffer, &done);
- else
- done = true;
-
- if (done) {
- QETH_CARD_STAT_INC(card, rx_bufs);
- qeth_put_buffer_pool_entry(card,
- buffer->pool_entry);
- qeth_queue_input_buffer(card, card->rx.b_index);
- card->rx.b_count--;
- if (card->rx.b_count) {
- card->rx.b_index =
- QDIO_BUFNR(card->rx.b_index + 1);
- card->rx.b_element =
- &card->qdio.in_q
- ->bufs[card->rx.b_index]
- .buffer->element[0];
- card->rx.e_offset = 0;
- }
- }
+ /* Process one completed RX buffer: */
+ buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+ if (!(card->rx.qdio_err &&
+ qeth_check_qdio_errors(card, buffer->buffer,
+ card->rx.qdio_err, "qinerr")))
+ skbs_done = qeth_extract_skbs(card, budget, buffer,
+ &done);
+ else
+ done = true;
- if (work_done >= budget)
- goto out;
- else
- new_budget = budget - work_done;
+ work_done += skbs_done;
+ budget -= skbs_done;
+
+ if (done) {
+ QETH_CARD_STAT_INC(card, rx_bufs);
+ qeth_put_buffer_pool_entry(card, buffer->pool_entry);
+ qeth_queue_input_buffer(card, card->rx.b_index);
+ card->rx.b_count--;
+
+ /* Step forward to next buffer: */
+ card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
+ card->rx.buf_element = 0;
+ card->rx.e_offset = 0;
}
}
+ return work_done;
+}
+
+static void qeth_cq_poll(struct qeth_card *card)
+{
+ unsigned int work_done = 0;
+
+ while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
+ unsigned int start, error;
+ int completed;
+
+ completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
+ &error);
+ if (completed <= 0)
+ return;
+
+ qeth_qdio_cq_handler(card, error, 1, start, completed);
+ work_done += completed;
+ }
+}
+
+int qeth_poll(struct napi_struct *napi, int budget)
+{
+ struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+ unsigned int work_done;
+
+ work_done = qeth_rx_poll(card, budget);
+
+ if (card->options.cq == QETH_CQ_ENABLED)
+ qeth_cq_poll(card);
+
+ /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
+ if (budget && work_done >= budget)
+ return work_done;
+
if (napi_complete_done(napi, work_done) &&
- qdio_start_irq(CARD_DDEV(card), 0))
+ qdio_start_irq(CARD_DDEV(card)))
napi_schedule(napi);
-out:
+
return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);
@@ -5658,7 +5646,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
if (completed <= 0) {
/* Ensure we see TX completion for pending work: */
if (napi_complete_done(napi, 0))
- qeth_tx_arm_timer(queue);
+ qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
return 0;
}
@@ -5667,7 +5655,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
unsigned int bidx = QDIO_BUFNR(i);
buffer = queue->bufs[bidx];
- packets += skb_queue_len(&buffer->skb_list);
+ packets += buffer->frames;
bytes += buffer->bytes;
qeth_handle_send_error(card, buffer, error);
@@ -5953,25 +5941,30 @@ static void qeth_clear_dbf_list(void)
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
struct net_device *dev;
+ struct qeth_priv *priv;
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
- dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
- ether_setup, QETH_MAX_QUEUES, 1);
+ dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
+ ether_setup, QETH_MAX_OUT_QUEUES, 1);
break;
case QETH_CARD_TYPE_OSM:
- dev = alloc_etherdev(0);
+ dev = alloc_etherdev(sizeof(*priv));
break;
case QETH_CARD_TYPE_OSN:
- dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
+ dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
+ ether_setup);
break;
default:
- dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
+ dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
}
if (!dev)
return NULL;
+ priv = netdev_priv(dev);
+ priv->rx_copybreak = QETH_RX_COPYBREAK;
+
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
dev->min_mtu = IS_OSN(card) ? 64 : 576;
@@ -5981,22 +5974,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- if (IS_OSN(card)) {
- dev->ethtool_ops = &qeth_osn_ethtool_ops;
- } else {
- dev->ethtool_ops = &qeth_ethtool_ops;
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->hw_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card)) {
- dev->features |= NETIF_F_SG;
- if (netif_set_real_num_tx_queues(dev,
- QETH_IQD_MIN_TXQ)) {
- free_netdev(dev);
- return NULL;
- }
- }
- }
+ dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
+ &qeth_ethtool_ops;
return dev;
}
@@ -6012,6 +5991,28 @@ struct net_device *qeth_clone_netdev(struct net_device *orig)
return clone;
}
+int qeth_setup_netdev(struct qeth_card *card)
+{
+ struct net_device *dev = card->dev;
+ unsigned int num_tx_queues;
+
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+
+ if (IS_IQD(card)) {
+ dev->features |= NETIF_F_SG;
+ num_tx_queues = QETH_IQD_MIN_TXQ;
+ } else if (IS_VM_NIC(card)) {
+ num_tx_queues = 1;
+ } else {
+ num_tx_queues = dev->real_num_tx_queues;
+ }
+
+ return qeth_set_real_num_tx_queues(card, num_tx_queues);
+}
+EXPORT_SYMBOL_GPL(qeth_setup_netdev);
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -6051,12 +6052,13 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
+ qeth_determine_capabilities(card);
+ qeth_set_blkt_defaults(card);
+
card->qdio.no_out_queues = card->dev->num_tx_queues;
rc = qeth_update_from_chp_desc(card);
if (rc)
goto err_chp_desc;
- qeth_determine_capabilities(card);
- qeth_set_blkt_defaults(card);
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
@@ -6241,9 +6243,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *mii_data;
int rc = 0;
- if (!card)
- return -ENODEV;
-
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
@@ -6623,12 +6622,59 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);
+#define TC_IQD_UCAST 0
+static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
+ unsigned int ucast_txqs)
+{
+ unsigned int prio;
+
+ /* IQD requires mcast traffic to be placed on a dedicated queue, and
+ * qeth_iqd_select_queue() deals with this.
+ * For unicast traffic, we defer the queue selection to the stack.
+ * By installing a trivial prio map that spans over only the unicast
+ * queues, we can encourage the stack to spread the ucast traffic evenly
+ * without selecting the mcast queue.
+ */
+
+ /* One traffic class, spanning over all active ucast queues: */
+ netdev_set_num_tc(dev, 1);
+ netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
+ QETH_IQD_MIN_UCAST_TXQ);
+
+ /* Map all priorities to this traffic class: */
+ for (prio = 0; prio <= TC_BITMASK; prio++)
+ netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
+}
+
+int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
+{
+ struct net_device *dev = card->dev;
+ int rc;
+
+ /* Per netif_setup_tc(), adjust the mapping first: */
+ if (IS_IQD(card))
+ qeth_iqd_set_prio_tc_map(dev, count - 1);
+
+ rc = netif_set_real_num_tx_queues(dev, count);
+
+ if (rc && IS_IQD(card))
+ qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
+
+ return rc;
+}
+
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev)
{
+ u16 txq;
+
if (cast_type != RTN_UNICAST)
return QETH_IQD_MCAST_TXQ;
- return QETH_IQD_MIN_UCAST_TXQ;
+ if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
+ return QETH_IQD_MIN_UCAST_TXQ;
+
+ txq = netdev_pick_tx(dev, skb, sb_dev);
+ return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
@@ -6638,9 +6684,6 @@ int qeth_open(struct net_device *dev)
QETH_CARD_TEXT(card, 4, "qethopen");
- if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
- return -EIO;
-
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
@@ -6674,22 +6717,24 @@ int qeth_stop(struct net_device *dev)
unsigned int i;
/* Quiesce the NAPI instances: */
- qeth_for_each_output_queue(card, queue, i) {
+ qeth_for_each_output_queue(card, queue, i)
napi_disable(&queue->napi);
- del_timer_sync(&queue->timer);
- }
/* Stop .ndo_start_xmit, might still access queue->napi. */
netif_tx_disable(dev);
- /* Queues may get re-allocated, so remove the NAPIs here. */
- qeth_for_each_output_queue(card, queue, i)
+ qeth_for_each_output_queue(card, queue, i) {
+ del_timer_sync(&queue->timer);
+ /* Queues may get re-allocated, so remove the NAPIs. */
netif_napi_del(&queue->napi);
+ }
} else {
netif_tx_disable(dev);
}
napi_disable(&card->napi);
+ qdio_stop_irq(CARD_DDEV(card));
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 3865f7258449..d89a04bfd8b0 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -74,8 +74,19 @@ enum qeth_card_types {
#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
+
+#ifdef CONFIG_QETH_OSN
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
+#else
+#define IS_OSN(card) false
+#endif
+
+#ifdef CONFIG_QETH_OSX
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
+#else
+#define IS_OSX(card) false
+#endif
+
#define IS_VM_NIC(card) ((card)->info.is_vm_nic)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -93,10 +104,6 @@ enum qeth_link_types {
QETH_LINK_TYPE_LANE = 0x88,
};
-/*
- * Routing stuff
- */
-#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
enum qeth_routing_types {
/* TODO: set to bit flag used in IPA Command */
NO_ROUTER = 0,
@@ -427,7 +434,6 @@ struct qeth_ipacmd_setassparms {
struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
- __u8 ip[16];
} data;
} __attribute__ ((packed));
@@ -550,8 +556,9 @@ struct qeth_ipacmd_setadpparms {
/* CREATE_ADDR IPA Command: ***********************************************/
struct qeth_create_destroy_address {
- __u8 unique_id[8];
-} __attribute__ ((packed));
+ u8 mac_addr[ETH_ALEN];
+ u16 uid;
+};
/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 78cae61bc924..d7e429f6631e 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -176,7 +176,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
- if (IS_IQD(card))
+ if (IS_IQD(card) || IS_VM_NIC(card))
return -EOPNOTSUPP;
mutex_lock(&card->conf_mutex);
@@ -211,16 +211,16 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "no_prio_queueing:0")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 0;
} else if (sysfs_streq(buf, "no_prio_queueing:1")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 1;
} else if (sysfs_streq(buf, "no_prio_queueing:2")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index ab59bc975719..ebdc03210608 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped),
+ QETH_TXQ_STAT("Doorbell", doorbell),
+ QETH_TXQ_STAT("IRQ for frames", coal_frames),
QETH_TXQ_STAT("Completion yield", completion_yield),
QETH_TXQ_STAT("Completion timer", completion_timer),
};
@@ -108,6 +110,38 @@ static void qeth_get_ethtool_stats(struct net_device *dev,
txq_stats, TXQ_STATS_LEN);
}
+static void __qeth_set_coalesce(struct net_device *dev,
+ struct qeth_qdio_out_q *queue,
+ struct ethtool_coalesce *coal)
+{
+ WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs);
+ WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames);
+
+ if (coal->tx_coalesce_usecs &&
+ netif_running(dev) &&
+ !qeth_out_queue_is_empty(queue))
+ qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs);
+}
+
+static int qeth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
+ return -EINVAL;
+
+ qeth_for_each_output_queue(card, queue, i)
+ __qeth_set_coalesce(dev, queue, coal);
+
+ return 0;
+}
+
static void qeth_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
@@ -153,7 +187,6 @@ static void qeth_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
- strlcpy(info->version, "1.0", sizeof(info->version));
strlcpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
@@ -175,6 +208,112 @@ static void qeth_get_channels(struct net_device *dev,
channels->combined_count = 0;
}
+static int qeth_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (channels->rx_count == 0 || channels->tx_count == 0)
+ return -EINVAL;
+ if (channels->tx_count > card->qdio.no_out_queues)
+ return -EINVAL;
+
+ if (IS_IQD(card)) {
+ if (channels->tx_count < QETH_IQD_MIN_TXQ)
+ return -EINVAL;
+
+ /* Reject downgrade while running. It could push displaced
+ * ucast flows onto txq0, which is reserved for mcast.
+ */
+ if (netif_running(dev) &&
+ channels->tx_count < dev->real_num_tx_queues)
+ return -EPERM;
+ } else {
+ /* OSA still uses the legacy prio-queue mechanism: */
+ if (!IS_VM_NIC(card))
+ return -EOPNOTSUPP;
+ }
+
+ return qeth_set_real_num_tx_queues(card, channels->tx_count);
+}
+
+static int qeth_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static int qeth_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qeth_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ WRITE_ONCE(priv->rx_copybreak, *(u32 *)data);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qeth_get_per_queue_coalesce(struct net_device *dev, u32 __queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ if (__queue >= card->qdio.no_out_queues)
+ return -EINVAL;
+
+ queue = card->qdio.out_qs[__queue];
+
+ coal->tx_coalesce_usecs = queue->coalesce_usecs;
+ coal->tx_max_coalesced_frames = queue->max_coalesced_frames;
+ return 0;
+}
+
+static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ if (queue >= card->qdio.no_out_queues)
+ return -EINVAL;
+
+ if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
+ return -EINVAL;
+
+ __qeth_set_coalesce(dev, card->qdio.out_qs[queue], coal);
+ return 0;
+}
+
/* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
@@ -374,13 +513,22 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
}
const struct ethtool_ops qeth_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
.get_link = ethtool_op_get_link,
+ .set_coalesce = qeth_set_coalesce,
.get_ringparam = qeth_get_ringparam,
.get_strings = qeth_get_strings,
.get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
.get_channels = qeth_get_channels,
+ .set_channels = qeth_set_channels,
+ .get_ts_info = qeth_get_ts_info,
+ .get_tunable = qeth_get_tunable,
+ .set_tunable = qeth_set_tunable,
+ .get_per_queue_coalesce = qeth_get_per_queue_coalesce,
+ .set_per_queue_coalesce = qeth_set_per_queue_coalesce,
.get_link_ksettings = qeth_get_link_ksettings,
};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8fb29371788b..0bd5b09e7a22 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -52,11 +52,11 @@ static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
break;
case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC:
- rc = -EEXIST;
+ rc = -EADDRINUSE;
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
- rc = -EPERM;
+ rc = -EADDRNOTAVAIL;
break;
case IPA_RC_L2_MAC_NOT_FOUND:
rc = -ENOENT;
@@ -105,11 +105,11 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
"MAC address %pM successfully registered\n", mac);
} else {
switch (rc) {
- case -EEXIST:
+ case -EADDRINUSE:
dev_warn(&card->gdev->dev,
"MAC address %pM already exists\n", mac);
break;
- case -EPERM:
+ case -EADDRNOTAVAIL:
dev_warn(&card->gdev->dev,
"MAC address %pM is not authorized\n", mac);
break;
@@ -126,7 +126,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
- if (rc == -EEXIST)
+ if (rc == -EADDRINUSE)
QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
CARD_DEVID(card));
else if (rc)
@@ -291,7 +291,6 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->info.promisc_mode = 0;
}
@@ -337,14 +336,16 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card)
qeth_l2_request_initial_mac(card);
if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ card->info.dev_addr_is_registered = 1;
+ else
+ card->info.dev_addr_is_registered = 0;
}
static int qeth_l2_validate_addr(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ if (card->info.dev_addr_is_registered)
return eth_validate_addr(dev);
QETH_CARD_TEXT(card, 4, "nomacadr");
@@ -370,7 +371,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
- (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ card->info.dev_addr_is_registered)
return 0;
/* add the new address, switch over, drop the old */
@@ -380,9 +381,9 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data);
- if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ if (card->info.dev_addr_is_registered)
qeth_l2_remove_mac(card, old_addr);
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ card->info.dev_addr_is_registered = 1;
return 0;
}
@@ -499,6 +500,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue)
{
+ gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
addr_t end = (addr_t)(skb->data + sizeof(*hdr));
addr_t start = (addr_t)skb->data;
@@ -511,7 +513,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
if (qeth_get_elements_for_range(start, end) > 1) {
/* Misaligned HW header, move it to its own buffer element. */
- hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
if (!hdr)
return -ENOMEM;
hd_len = sizeof(*hdr);
@@ -570,7 +572,9 @@ static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
return qeth_iqd_select_queue(dev, skb,
qeth_get_ether_cast_type(skb),
sb_dev);
- return qeth_get_priority_queue(card, skb);
+
+ return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
+ qeth_get_priority_queue(card, skb);
}
static const struct device_type qeth_l2_devtype = {
@@ -583,6 +587,9 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ if (IS_OSN(card))
+ dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
+
qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
@@ -610,7 +617,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
- if (qeth_netdev_is_registered(card->dev))
+ if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
}
@@ -648,7 +655,7 @@ static const struct net_device_ops qeth_osn_netdev_ops = {
.ndo_tx_timeout = qeth_tx_timeout,
};
-static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
+static int qeth_l2_setup_netdev(struct qeth_card *card)
{
int rc;
@@ -658,6 +665,10 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
goto add_napi;
}
+ rc = qeth_setup_netdev(card);
+ if (rc)
+ return rc;
+
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -704,13 +715,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
add_napi:
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- rc = register_netdev(card->dev);
- if (!rc && carrier_ok)
- netif_carrier_on(card->dev);
-
- if (rc)
- card->dev->netdev_ops = NULL;
- return rc;
+ return register_netdev(card->dev);
}
static void qeth_l2_trace_features(struct qeth_card *card)
@@ -783,10 +788,13 @@ static int qeth_l2_set_online(struct qeth_card *card)
qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (!qeth_netdev_is_registered(dev)) {
- rc = qeth_l2_setup_netdev(card, carrier_ok);
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rc = qeth_l2_setup_netdev(card);
if (rc)
goto out_remove;
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
} else {
rtnl_lock();
if (carrier_ok)
@@ -864,6 +872,7 @@ struct qeth_discipline qeth_l2_discipline = {
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
+#ifdef CONFIG_QETH_OSN
static void qeth_osn_assist_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
@@ -940,6 +949,7 @@ void qeth_osn_deregister(struct net_device *dev)
return;
}
EXPORT_SYMBOL(qeth_osn_deregister);
+#endif
/* SETBRIDGEPORT support, async notifications */
@@ -1512,8 +1522,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
struct ccw_device *ddev;
struct subchannel_id schid;
- if (!card)
- return -EINVAL;
if (!card->options.sbp.supported_funcs)
return -EOPNOTSUPP;
ddev = CARD_DDEV(card);
@@ -1568,23 +1576,11 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
return rc;
}
-/* generic VNICC request call back control */
-struct _qeth_l2_vnicc_request_cbctl {
- struct {
- union{
- u32 *sup_cmds;
- u32 *timeout;
- };
- } result;
-};
-
/* generic VNICC request call back */
static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
- struct _qeth_l2_vnicc_request_cbctl *cbctl =
- (struct _qeth_l2_vnicc_request_cbctl *) reply->param;
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
@@ -1597,9 +1593,9 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (sub_cmd == IPA_VNICC_QUERY_CMDS)
- *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds;
+ *(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
- *cbctl->result.timeout = rep->data.getset_timeout.timeout;
+ *(u32 *)reply->param = rep->data.getset_timeout.timeout;
return 0;
}
@@ -1640,7 +1636,6 @@ static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
u32 *sup_cmds)
{
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqcm");
@@ -1651,10 +1646,7 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
- /* prepare callback control */
- cbctl.result.sup_cmds = sup_cmds;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
}
/* VNICC enable/disable characteristic request */
@@ -1678,7 +1670,6 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
u32 cmd, u32 *timeout)
{
struct qeth_vnicc_getset_timeout *getset_timeout;
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccgst");
@@ -1693,11 +1684,7 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
if (cmd == IPA_VNICC_SET_TIMEOUT)
getset_timeout->timeout = *timeout;
- /* prepare callback control */
- if (cmd == IPA_VNICC_GET_TIMEOUT)
- cbctl.result.timeout = timeout;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
}
/* set current VNICC flag state; called from sysfs store function */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 82f800d1d7b3..0742a749d26e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -920,9 +920,11 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
if (cmd->hdr.return_code)
return -EIO;
+ if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
+ return -EADDRNOTAVAIL;
ether_addr_copy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.unique_id);
+ cmd->data.create_destroy_addr.mac_addr);
return 0;
}
@@ -930,7 +932,6 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
{
int rc = 0;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "hsrmac");
@@ -938,9 +939,6 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
return -ENOMEM;
- cmd = __ipa_cmd(iob);
- *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
- card->info.unique_id;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
NULL);
@@ -951,43 +949,36 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ u16 *uid = reply->param;
if (cmd->hdr.return_code == 0) {
- card->info.unique_id = *((__u16 *)
- &cmd->data.create_destroy_addr.unique_id[6]);
+ *uid = cmd->data.create_destroy_addr.uid;
return 0;
}
- card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
- UNIQUE_ID_NOT_BY_CARD;
dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
return -EIO;
}
-static int qeth_l3_get_unique_id(struct qeth_card *card)
+static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
{
- int rc = 0;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "guniqeid");
- if (!qeth_is_supported(card, IPA_IPV6)) {
- card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
- UNIQUE_ID_NOT_BY_CARD;
- return 0;
- }
+ if (!qeth_is_supported(card, IPA_IPV6))
+ goto out;
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
- return -ENOMEM;
- cmd = __ipa_cmd(iob);
- *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
- card->info.unique_id;
+ goto out;
- rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
- return rc;
+ __ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
+ qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
+
+out:
+ return uid;
}
static int
@@ -1886,7 +1877,8 @@ static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct qeth_card *card = dev->ml_priv;
- return qeth_get_priority_queue(card, skb);
+ return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
+ qeth_get_priority_queue(card, skb);
}
static const struct net_device_ops qeth_l3_netdev_ops = {
@@ -1923,11 +1915,16 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
-static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
+static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ struct net_device *dev = card->dev;
unsigned int headroom;
int rc;
+ rc = qeth_setup_netdev(card);
+ if (rc)
+ return rc;
+
if (IS_OSD(card) || IS_OSX(card)) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
@@ -1938,9 +1935,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
/*IPv6 address autoconfiguration stuff*/
- qeth_l3_get_unique_id(card);
- if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
- card->dev->dev_id = card->info.unique_id & 0xffff;
+ dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
if (!IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
@@ -1973,7 +1968,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
- goto out;
+ return rc;
} else
return -ENODEV;
@@ -1988,14 +1983,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- rc = register_netdev(card->dev);
- if (!rc && carrier_ok)
- netif_carrier_on(card->dev);
-
-out:
- if (rc)
- card->dev->netdev_ops = NULL;
- return rc;
+ return register_netdev(card->dev);
}
static const struct device_type qeth_l3_devtype = {
@@ -2042,7 +2030,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
- if (qeth_netdev_is_registered(card->dev))
+ if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
flush_workqueue(card->cmd_wq);
@@ -2089,10 +2077,13 @@ static int qeth_l3_set_online(struct qeth_card *card)
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
- if (!qeth_netdev_is_registered(dev)) {
- rc = qeth_l3_setup_netdev(card, carrier_ok);
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rc = qeth_l3_setup_netdev(card);
if (rc)
goto out_remove;
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
} else {
rtnl_lock();
if (carrier_ok)
@@ -2204,9 +2195,6 @@ static int qeth_l3_ip_event(struct notifier_block *this,
struct qeth_ipaddr addr;
struct qeth_card *card;
- if (dev_net(dev) != &init_net)
- return NOTIFY_DONE;
-
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index a3d1c3bdfadb..dd0b39082534 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -133,40 +133,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
qeth_l3_dev_route6_store);
-static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
-}
-
-static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i, rc = 0;
-
- mutex_lock(&card->conf_mutex);
- if (card->state != CARD_STATE_DOWN) {
- rc = -EPERM;
- goto out;
- }
-
- i = simple_strtoul(buf, &tmp, 16);
- if ((i == 0) || (i == 1))
- card->options.fake_broadcast = i;
- else
- rc = -EINVAL;
-out:
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
-static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
- qeth_l3_dev_fake_broadcast_store);
-
static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -305,7 +271,6 @@ static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_route4.attr,
&dev_attr_route6.attr,
- &dev_attr_fake_broadcast.attr,
&dev_attr_sniffer.attr,
&dev_attr_hsuid.attr,
NULL,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 1234294700c4..673e42defb91 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -104,6 +104,48 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
}
/**
+ * zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
+ * FC Endpoint Security (FCES)
+ * @tag: tag indicating which kind of FC Endpoint Security event has occurred
+ * @req: request for which a response was received
+ * @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
+ * @fc_security_old: old FC Endpoint Security of FCP device or connection
+ * @fc_security_new: new FC Endpoint Security of FCP device or connection
+ */
+void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
+ u32 fc_security_old, u32 fc_security_new)
+{
+ struct zfcp_dbf *dbf = req->adapter->dbf;
+ struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+ struct fsf_qtcb_header *q_head = &req->qtcb->header;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+ static int const level = 3;
+ unsigned long flags;
+
+ if (unlikely(!debug_level_enabled(dbf->hba, level)))
+ return;
+
+ spin_lock_irqsave(&dbf->hba_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_FCES;
+ rec->fsf_req_id = req->req_id;
+ rec->fsf_req_status = req->status;
+ rec->fsf_cmd = q_head->fsf_command;
+ rec->fsf_seq_no = q_pref->req_seq_no;
+ rec->u.fces.req_issued = req->issued;
+ rec->u.fces.fsf_status = q_head->fsf_status;
+ rec->u.fces.port_handle = q_head->port_handle;
+ rec->u.fces.wwpn = wwpn;
+ rec->u.fces.fc_security_old = fc_security_old;
+ rec->u.fces.fc_security_new = fc_security_new;
+
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
* zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request providing the unsolicited status
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 900c779cc39b..4d1435c573bc 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -3,7 +3,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2017
+ * Copyright IBM Corp. 2008, 2020
*/
#ifndef ZFCP_DBF_H
@@ -16,6 +16,7 @@
#define ZFCP_DBF_TAG_LEN 7
+#define ZFCP_DBF_INVALID_WWPN 0x0000000000000000ull
#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
enum zfcp_dbf_pseudo_erp_act_type {
@@ -158,17 +159,38 @@ struct zfcp_dbf_hba_uss {
} __packed;
/**
+ * struct zfcp_dbf_hba_fces - trace record for FC Endpoint Security
+ * @req_issued: timestamp when request was issued
+ * @fsf_status: fsf status
+ * @port_handle: handle for port
+ * @wwpn: remote FC port WWPN
+ * @fc_security_old: old FC Endpoint Security
+ * @fc_security_new: new FC Endpoint Security
+ *
+ */
+struct zfcp_dbf_hba_fces {
+ u64 req_issued;
+ u32 fsf_status;
+ u32 port_handle;
+ u64 wwpn;
+ u32 fc_security_old;
+ u32 fc_security_new;
+} __packed;
+
+/**
* enum zfcp_dbf_hba_id - HBA trace record identifier
* @ZFCP_DBF_HBA_RES: response trace record
* @ZFCP_DBF_HBA_USS: unsolicited status trace record
* @ZFCP_DBF_HBA_BIT: bit error trace record
* @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
+ * @ZFCP_DBF_HBA_FCES: FC Endpoint Security trace record
*/
enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
ZFCP_DBF_HBA_USS = 2,
ZFCP_DBF_HBA_BIT = 3,
ZFCP_DBF_HBA_BASIC = 4,
+ ZFCP_DBF_HBA_FCES = 5,
};
/**
@@ -181,9 +203,10 @@ enum zfcp_dbf_hba_id {
* @fsf_seq_no: fsf sequence number
* @pl_len: length of payload stored as zfcp_dbf_pay
* @u: record type specific data
- * @u.res: data for fsf responses
- * @u.uss: data for unsolicited status buffer
- * @u.be: data for bit error unsolicited status buffer
+ * @u.res: data for fsf responses
+ * @u.uss: data for unsolicited status buffer
+ * @u.be: data for bit error unsolicited status buffer
+ * @u.fces: data for FC Endpoint Security
*/
struct zfcp_dbf_hba {
u8 id;
@@ -197,6 +220,7 @@ struct zfcp_dbf_hba {
struct zfcp_dbf_hba_res res;
struct zfcp_dbf_hba_uss uss;
struct fsf_bit_error_payload be;
+ struct zfcp_dbf_hba_fces fces;
} u;
} __packed;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 8cc0eefe4ccc..da8a5ceb615c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#ifndef ZFCP_DEF_H
@@ -158,6 +158,8 @@ struct zfcp_adapter {
u32 adapter_features; /* FCP channel features */
u32 connection_features; /* host connection features */
u32 hardware_version; /* of FCP channel */
+ u32 fc_security_algorithms; /* of FCP channel */
+ u32 fc_security_algorithms_old; /* of FCP channel */
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
struct list_head port_list; /* remote port list */
@@ -218,6 +220,8 @@ struct zfcp_port {
atomic_t erp_counter;
u32 maxframe_size;
u32 supported_classes;
+ u32 connection_info;
+ u32 connection_info_old;
struct work_struct gid_pn_work;
struct work_struct test_link_work;
struct work_struct rport_work;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 93655b85b73f..3d0bc000f500 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -178,12 +178,12 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT:
p_status = atomic_read(&port->status);
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
@@ -196,7 +196,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return need;
if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
a_status = atomic_read(&adapter->status);
if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
@@ -725,7 +725,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
- _zfcp_erp_port_reopen(port, 0, "ereptp1");
+ zfcp_erp_port_reopen(port, 0, "ereptp1");
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
@@ -1086,7 +1086,7 @@ static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_lun_strategy_close(erp_action);
/* already closed */
- /* fall through */
+ fallthrough;
case ZFCP_ERP_STEP_LUN_CLOSING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
@@ -1415,7 +1415,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_erp_try_rport_unblock(port);
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c8556787cfdc..88294ca0e2ea 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#ifndef ZFCP_EXT_H
@@ -44,6 +44,9 @@ extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req,
+ u64 wwpn, u32 fc_security_old,
+ u32 fc_security_new);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
@@ -135,6 +138,13 @@ extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
+enum zfcp_fsf_print_fmt {
+ ZFCP_FSF_PRINT_FMT_LIST,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM,
+};
+extern ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size,
+ u32 fc_security,
+ enum zfcp_fsf_print_fmt fmt);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index cae9b7ff79b0..111fe3fc32d7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4,7 +4,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -120,6 +120,23 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
+static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
+{
+ struct Scsi_Host *shost = adapter->scsi_host;
+
+ fc_host_port_id(shost) = 0;
+ fc_host_fabric_name(shost) = 0;
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+ adapter->hydra_version = 0;
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
+ memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
+
+ adapter->peer_wwpn = 0;
+ adapter->peer_wwnn = 0;
+ adapter->peer_d_id = 0;
+}
+
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
struct fsf_link_down_info *link_down)
{
@@ -132,6 +149,8 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
zfcp_scsi_schedule_rports_block(adapter);
+ zfcp_fsf_fc_host_link_down(adapter);
+
if (!link_down)
goto out;
@@ -502,6 +521,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
+ snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
+ "IBM");
fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
@@ -510,9 +531,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
- if (fc_host_permanent_port_name(shost) == -1)
- fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
-
zfcp_scsi_set_prot(adapter);
/* no error return above here, otherwise must fix call chains */
@@ -525,6 +543,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
adapter->hydra_version = bottom->adapter_type;
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
+ bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
@@ -532,8 +552,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
+ fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
else
@@ -541,8 +563,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
- /* fall through */
+ fc_host_fabric_name(shost) = 0;
+ fallthrough;
default:
+ fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
@@ -565,6 +589,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->lic_version);
adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features;
@@ -598,13 +624,6 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
- fc_host_node_name(shost) = 0;
- fc_host_port_name(shost) = 0;
- fc_host_port_id(shost) = 0;
- fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
- fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
- adapter->hydra_version = 0;
-
/* avoids adapter shutdown to be able to recognize
* events such as LINK UP */
atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -621,6 +640,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
adapter->hardware_version = bottom->hardware_version;
+ snprintf(fc_host_hardware_version(shost),
+ FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->hardware_version);
memcpy(fc_host_serial_number(shost), bottom->serial_number,
min(FC_SERIAL_NUMBER_SIZE, 17));
EBCASC(fc_host_serial_number(shost),
@@ -642,6 +664,99 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
}
}
+/*
+ * Mapping of FC Endpoint Security flag masks to mnemonics
+ *
+ * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
+ * changes.
+ */
+static const struct {
+ u32 mask;
+ char *name;
+} zfcp_fsf_fc_security_mnemonics[] = {
+ { FSF_FC_SECURITY_AUTH, "Authentication" },
+ { FSF_FC_SECURITY_ENC_FCSP2 |
+ FSF_FC_SECURITY_ENC_ERAS, "Encryption" },
+};
+
+/* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
+#define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
+
+/**
+ * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
+ * mnemonics and place in a buffer
+ * @buf : the buffer to place the translated FC Endpoint Security flag(s)
+ * into
+ * @size : the size of the buffer, including the trailing null space
+ * @fc_security: one or more FC Endpoint Security flags, or zero
+ * @fmt : specifies whether a list or a single item is to be put into the
+ * buffer
+ *
+ * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
+ * If the FC Endpoint Security flags are zero "none" is placed into the buffer.
+ *
+ * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
+ * a comma followed by a space into the buffer. If one or more FC Endpoint
+ * Security flags cannot be translated into a mnemonic, as they are undefined
+ * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
+ * representation is placed into the buffer.
+ *
+ * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
+ * the buffer. If the FC Endpoint Security flag cannot be translated, as it is
+ * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
+ * representation is placed into the buffer. If more than one FC Endpoint
+ * Security flag was specified, their value in hexadecimal representation is
+ * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
+ * can be used to define a buffer that is large enough to hold one mnemonic.
+ *
+ * Return: The number of characters written into buf not including the trailing
+ * '\0'. If size is == 0 the function returns 0.
+ */
+ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
+ enum zfcp_fsf_print_fmt fmt)
+{
+ const char *prefix = "";
+ ssize_t len = 0;
+ int i;
+
+ if (fc_security == 0)
+ return scnprintf(buf, size, "none");
+ if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
+ return scnprintf(buf, size, "0x%08x", fc_security);
+
+ for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
+ if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
+ continue;
+
+ len += scnprintf(buf + len, size - len, "%s%s", prefix,
+ zfcp_fsf_fc_security_mnemonics[i].name);
+ prefix = ", ";
+ fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
+ }
+
+ if (fc_security != 0)
+ len += scnprintf(buf + len, size - len, "%s0x%08x",
+ prefix, fc_security);
+
+ return len;
+}
+
+static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
+ struct zfcp_fsf_req *req)
+{
+ if (adapter->fc_security_algorithms ==
+ adapter->fc_security_algorithms_old) {
+ /* no change, no trace */
+ return;
+ }
+
+ zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
+ adapter->fc_security_algorithms_old,
+ adapter->fc_security_algorithms);
+
+ adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
+}
+
static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
@@ -651,10 +766,7 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
- fc_host_permanent_port_name(shost) = bottom->wwpn;
- } else
- fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+ fc_host_permanent_port_name(shost) = bottom->wwpn;
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) =
zfcp_fsf_convert_portspeed(bottom->supported_speed);
@@ -662,6 +774,12 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
FC_FC4_LIST_SIZE);
+ if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
+ adapter->fc_security_algorithms =
+ bottom->fc_security_algorithms;
+ else
+ adapter->fc_security_algorithms = 0;
+ zfcp_fsf_dbf_adapter_fc_security(adapter, req);
}
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -688,9 +806,9 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
- zfcp_fsf_exchange_port_evaluate(req);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+ zfcp_fsf_exchange_port_evaluate(req);
break;
}
}
@@ -914,7 +1032,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
switch (fsq->word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1009,7 +1127,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
- /* fall through */
+ fallthrough;
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1195,7 +1313,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
break;
case FSF_SBAL_MISMATCH:
/* should never occur, avoided in zfcp_fsf_send_els */
- /* fall through */
+ fallthrough;
default:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1287,7 +1405,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT |
- FSF_FEATURE_REQUEST_SFP_DATA;
+ FSF_FEATURE_REQUEST_SFP_DATA |
+ FSF_FEATURE_FC_SECURITY;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id;
@@ -1341,7 +1460,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT |
- FSF_FEATURE_REQUEST_SFP_DATA;
+ FSF_FEATURE_REQUEST_SFP_DATA |
+ FSF_FEATURE_FC_SECURITY;
if (data)
req->data = data;
@@ -1478,10 +1598,117 @@ out_unlock:
return retval;
}
+static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
+ struct zfcp_fsf_req *req)
+{
+ char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
+ char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
+
+ if (port->connection_info == port->connection_info_old) {
+ /* no change, no log nor trace */
+ return;
+ }
+
+ zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
+ port->connection_info_old,
+ port->connection_info);
+
+ zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
+ port->connection_info_old,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+ zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
+ port->connection_info,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+
+ if (strncmp(mnemonic_old, mnemonic_new,
+ ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
+ /* no change in string representation, no log */
+ goto out;
+ }
+
+ if (port->connection_info_old == 0) {
+ /* activation */
+ dev_info(&port->adapter->ccw_device->dev,
+ "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
+ port->wwpn, mnemonic_new);
+ } else if (port->connection_info == 0) {
+ /* deactivation */
+ dev_warn(&port->adapter->ccw_device->dev,
+ "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
+ port->wwpn, mnemonic_old);
+ } else {
+ /* change */
+ dev_warn(&port->adapter->ccw_device->dev,
+ "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
+ port->wwpn, mnemonic_old, mnemonic_new);
+ }
+
+out:
+ port->connection_info_old = port->connection_info;
+}
+
+static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
+ u64 wwpn)
+{
+ switch (fsf_sqw0) {
+
+ /*
+ * Open Port command error codes
+ */
+
+ case FSF_SQ_SECURITY_REQUIRED:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_TIMEOUT:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_KM_UNAVAILABLE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_AUTH_FAILURE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
+ wwpn);
+ break;
+
+ /*
+ * Send FCP command error codes
+ */
+
+ case FSF_SQ_SECURITY_ENC_FAILURE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
+ wwpn);
+ break;
+
+ /*
+ * Unknown error codes
+ */
+
+ default:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
+ fsf_sqw0, wwpn);
+ }
+}
+
static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
{
+ struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
+ struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
struct fc_els_flogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -1491,7 +1718,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
case FSF_PORT_ALREADY_OPEN:
break;
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
- dev_warn(&req->adapter->ccw_device->dev,
+ dev_warn(&adapter->ccw_device->dev,
"Not enough FCP adapter resources to open "
"remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
@@ -1499,11 +1726,17 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
ZFCP_STATUS_COMMON_ERP_FAILED);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
+ case FSF_SECURITY_ERROR:
+ zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
+ header->fsf_status_qual.word[0],
+ port->wwpn);
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
/* no zfcp_fc_test_link() with failed open port */
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_NO_RETRY_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1512,6 +1745,11 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
break;
case FSF_GOOD:
port->handle = header->port_handle;
+ if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
+ port->connection_info = bottom->connection_info;
+ else
+ port->connection_info = 0;
+ zfcp_fsf_log_port_fc_security(port, req);
atomic_or(ZFCP_STATUS_COMMON_OPEN |
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
@@ -1531,10 +1769,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
* another GID_PN straight after a port has been opened.
* Alternately, an ADISC/PDISC ELS should suffice, as well.
*/
- plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
- if (req->qtcb->bottom.support.els1_length >=
- FSF_PLOGI_MIN_LEN)
- zfcp_fc_plogi_evaluate(port, plogi);
+ plogi = (struct fc_els_flogi *) bottom->els;
+ if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
+ zfcp_fc_plogi_evaluate(port, plogi);
break;
case FSF_UNKNOWN_OP_SUBTYPE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1672,14 +1909,14 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Opening WKA port 0x%x failed\n", wka_port->d_id);
- /* fall through */
+ fallthrough;
case FSF_ADAPTER_STATUS_AVAILABLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
break;
case FSF_GOOD:
wka_port->handle = header->port_handle;
- /* fall through */
+ fallthrough;
case FSF_PORT_ALREADY_OPEN:
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
@@ -1822,7 +2059,6 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- /* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1907,7 +2143,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
- /* fall through */
+ fallthrough;
case FSF_LUN_ALREADY_OPEN:
break;
case FSF_PORT_BOXED:
@@ -1938,7 +2174,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
- /* fall through */
+ fallthrough;
case FSF_INVALID_COMMAND_OPTION:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1946,7 +2182,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2040,7 +2276,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2225,6 +2461,13 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
zfcp_fc_test_link(zfcp_sdev->port);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
+ case FSF_SECURITY_ERROR:
+ zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
+ header->fsf_status_qual.word[0],
+ zfcp_sdev->port->wwpn);
+ zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
}
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 4bfb79f20588..09d73d0061ef 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -4,7 +4,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#ifndef FSF_H
@@ -78,6 +78,7 @@
#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
+#define FSF_SECURITY_ERROR 0x00000090
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
@@ -110,6 +111,14 @@
#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
+/* FSF status qualifier, security error */
+#define FSF_SQ_SECURITY_REQUIRED 0x00000001
+#define FSF_SQ_SECURITY_TIMEOUT 0x00000002
+#define FSF_SQ_SECURITY_KM_UNAVAILABLE 0x00000003
+#define FSF_SQ_SECURITY_RKM_UNAVAILABLE 0x00000004
+#define FSF_SQ_SECURITY_AUTH_FAILURE 0x00000005
+#define FSF_SQ_SECURITY_ENC_FAILURE 0x00000010
+
/* payload size in status read buffer */
#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
@@ -165,6 +174,7 @@
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200
#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800
+#define FSF_FEATURE_FC_SECURITY 0x00001000
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
@@ -174,6 +184,11 @@
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
+/* FC security algorithms */
+#define FSF_FC_SECURITY_AUTH 0x00000001
+#define FSF_FC_SECURITY_ENC_FCSP2 0x00000002
+#define FSF_FC_SECURITY_ENC_ERAS 0x00000004
+
struct fsf_queue_designator {
u8 cssid;
u8 chpid;
@@ -338,7 +353,8 @@ struct fsf_qtcb_bottom_support {
u8 res3[3];
u8 timeout;
u32 lun_access_info;
- u8 res4[180];
+ u32 connection_info;
+ u8 res4[176];
u32 els1_length;
u32 els2_length;
u32 req_buf_length;
@@ -426,7 +442,8 @@ struct fsf_qtcb_bottom_port {
u16 port_tx_type :4;
};
} sfp_flags;
- u8 res3[240];
+ u32 fc_security_algorithms;
+ u8 res3[236];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index f0d6296e673b..26702b56a7ab 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -277,29 +277,6 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
return 0;
}
-
-static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
- struct zfcp_qdio *qdio)
-{
- memset(id, 0, sizeof(*id));
- id->cdev = qdio->adapter->ccw_device;
- id->q_format = QDIO_ZFCP_QFMT;
- memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
- ASCEBC(id->adapter_name, 8);
- id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
- if (enable_multibuffer)
- id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
- id->no_input_qs = 1;
- id->no_output_qs = 1;
- id->input_handler = zfcp_qdio_int_resp;
- id->output_handler = zfcp_qdio_int_req;
- id->int_parm = (unsigned long) qdio;
- id->input_sbal_addr_array = qdio->res_q;
- id->output_sbal_addr_array = qdio->req_q;
- id->scan_threshold =
- QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
-}
-
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @qdio: pointer to struct zfcp_qdio
@@ -308,7 +285,6 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
*/
static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
- struct qdio_initialize init_data;
int ret;
ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
@@ -319,10 +295,9 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
if (ret)
goto free_req_q;
- zfcp_qdio_setup_init_data(&init_data, qdio);
init_waitqueue_head(&qdio->req_q_wq);
- ret = qdio_allocate(&init_data);
+ ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
if (ret)
goto free_res_q;
@@ -374,8 +349,10 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
*/
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
+ struct qdio_buffer **input_sbals[1] = {qdio->res_q};
+ struct qdio_buffer **output_sbals[1] = {qdio->req_q};
struct qdio_buffer_element *sbale;
- struct qdio_initialize init_data;
+ struct qdio_initialize init_data = {0};
struct zfcp_adapter *adapter = qdio->adapter;
struct ccw_device *cdev = adapter->ccw_device;
struct qdio_ssqd_desc ssqd;
@@ -387,12 +364,26 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status);
- zfcp_qdio_setup_init_data(&init_data, qdio);
+ init_data.q_format = QDIO_ZFCP_QFMT;
+ memcpy(init_data.adapter_name, dev_name(&cdev->dev), 8);
+ ASCEBC(init_data.adapter_name, 8);
+ init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
+ if (enable_multibuffer)
+ init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
+ init_data.no_input_qs = 1;
+ init_data.no_output_qs = 1;
+ init_data.input_handler = zfcp_qdio_int_resp;
+ init_data.output_handler = zfcp_qdio_int_req;
+ init_data.int_parm = (unsigned long) qdio;
+ init_data.input_sbal_addr_array = input_sbals;
+ init_data.output_sbal_addr_array = output_sbals;
+ init_data.scan_threshold =
+ QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
- if (qdio_establish(&init_data))
+ if (qdio_establish(cdev, &init_data))
goto failed_establish;
- if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
+ if (qdio_get_ssqd_desc(cdev, &ssqd))
goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 3910d529c15a..13d873f806e4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -856,6 +856,10 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
+ .show_host_manufacturer = 1,
+ .show_host_model = 1,
+ .show_host_hardware_version = 1,
+ .show_host_firmware_version = 1,
.get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
.reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
@@ -871,5 +875,6 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
+ .show_host_fabric_name = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index a711a0d15100..7ec30ded0169 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -4,7 +4,7 @@
*
* sysfs attributes.
*
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -370,6 +370,42 @@ static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
zfcp_sysfs_adapter_diag_max_age_show,
zfcp_sysfs_adapter_diag_max_age_store);
+static ssize_t zfcp_sysfs_adapter_fc_security_show(
+ struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ unsigned int status;
+ int i;
+
+ if (!adapter)
+ return -ENODEV;
+
+ /*
+ * Adapter status COMMON_OPEN implies xconf data and xport data
+ * was done. Adapter FC Endpoint Security capability remains
+ * unchanged in case of COMMON_ERP_FAILED (e.g. due to local link
+ * down).
+ */
+ status = atomic_read(&adapter->status);
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN))
+ i = sprintf(buf, "unknown\n");
+ else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
+ i = sprintf(buf, "unsupported\n");
+ else {
+ i = zfcp_fsf_scnprint_fc_security(
+ buf, PAGE_SIZE - 1, adapter->fc_security_algorithms,
+ ZFCP_FSF_PRINT_FMT_LIST);
+ i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+ }
+
+ zfcp_ccw_adapter_put(adapter);
+ return i;
+}
+static ZFCP_DEV_ATTR(adapter, fc_security, S_IRUGO,
+ zfcp_sysfs_adapter_fc_security_show,
+ NULL);
+
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
@@ -383,6 +419,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
&dev_attr_adapter_diag_max_age.attr,
+ &dev_attr_adapter_fc_security.attr,
NULL
};
@@ -426,6 +463,36 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
+static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+ struct zfcp_adapter *adapter = port->adapter;
+ unsigned int status = atomic_read(&port->status);
+ int i;
+
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+ 0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+ i = sprintf(buf, "unknown\n");
+ else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
+ i = sprintf(buf, "unsupported\n");
+ else {
+ i = zfcp_fsf_scnprint_fc_security(
+ buf, PAGE_SIZE - 1, port->connection_info,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+ i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+ }
+
+ return i;
+}
+static ZFCP_DEV_ATTR(port, fc_security, S_IRUGO,
+ zfcp_sysfs_port_fc_security_show,
+ NULL);
+
static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
@@ -433,6 +500,7 @@ static struct attribute *zfcp_port_attrs[] = {
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
+ &dev_attr_port_fc_security.attr,
NULL
};
static struct attribute_group zfcp_port_attr_group = {
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 12d66aa61ede..843e830b5f87 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -37,8 +37,6 @@
#define DRIVER_NAME "envctrl"
#define PFX DRIVER_NAME ": "
-#define ENVCTRL_MINOR 162
-
#define PCF8584_ADDRESS 0x55
#define CONTROL_PIN 0x80
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index e85a05aca4d6..4147d22fd448 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -31,8 +31,6 @@ static struct {
unsigned long busy; /* In use? */
} flash;
-#define FLASH_MINOR 152
-
static int
flash_mmap(struct file *file, struct vm_area_struct *vma)
{
@@ -157,7 +155,7 @@ static const struct file_operations flash_fops = {
.release = flash_release,
};
-static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
+static struct miscdevice flash_dev = { SBUS_FLASH_MINOR, "flash", &flash_fops };
static int flash_probe(struct platform_device *op)
{
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 7173a2e4e8cf..37d252f2548d 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -23,8 +23,6 @@
#include <asm/io.h>
#include <asm/pgtable.h>
-#define UCTRL_MINOR 174
-
#define DEBUG 1
#ifdef DEBUG
#define dprintk(x) printk x
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index e2956741fbd1..5f65cb75f534 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
53c700_d.h
scsi_devinfo_tbl.c
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 3170b295a5da..b5b3154e2c28 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -36,6 +36,7 @@
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/msdos_partition.h>
#include <scsi/scsicam.h>
#include <asm/dma.h>
@@ -3410,9 +3411,10 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
a partition table entry whose end_head matches one of the
standard BusLogic geometry translations (64/32, 128/32, or 255/63).
*/
- if (*(unsigned short *) (buf + 64) == 0xAA55) {
- struct partition *part1_entry = (struct partition *) buf;
- struct partition *part_entry = part1_entry;
+ if (*(unsigned short *) (buf + 64) == MSDOS_LABEL_MAGIC) {
+ struct msdos_partition *part1_entry =
+ (struct msdos_partition *)buf;
+ struct msdos_partition *part_entry = part1_entry;
int saved_cyl = diskparam->cylinders, part_no;
unsigned char part_end_head = 0, part_end_sector = 0;
@@ -3652,7 +3654,7 @@ static bool __init blogic_parse(char **str, char *keyword)
selected host adapter.
The BusLogic Driver Probing Options are described in
- <file:Documentation/scsi/BusLogic.txt>.
+ <file:Documentation/scsi/BusLogic.rst>.
*/
static int __init blogic_parseopts(char *options)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a7881f8eb05e..2017c43dac1b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -33,7 +33,7 @@ config SCSI
Channel, and FireWire storage.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called scsi_mod.
However, do not compile this as a module if your root file system
@@ -79,7 +79,7 @@ config BLK_DEV_SD
CD-ROMs.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called sd_mod.
Do not compile this driver as a module if your root file system
@@ -94,11 +94,11 @@ config CHR_DEV_ST
If you want to use a SCSI tape drive under Linux, say Y and read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, and
- <file:Documentation/scsi/st.txt> in the kernel source. This is NOT
+ <file:Documentation/scsi/st.rst> in the kernel source. This is NOT
for SCSI CD-ROMs.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>. The module will be called st.
+ <file:Documentation/scsi/scsi.rst>. The module will be called st.
config BLK_DEV_SR
tristate "SCSI CDROM support"
@@ -112,18 +112,9 @@ config BLK_DEV_SR
Make sure to say Y or M to "ISO 9660 CD-ROM file system support".
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called sr_mod.
-config BLK_DEV_SR_VENDOR
- bool "Enable vendor-specific extensions (for SCSI CDROM)"
- depends on BLK_DEV_SR
- help
- This enables the usage of vendor specific SCSI commands. This is
- required to support multisession CDs with old NEC/TOSHIBA cdrom
- drives (and HP Writers). If you have such a drive and get the first
- session only, try saying Y here; everybody else says N.
-
config CHR_DEV_SG
tristate "SCSI generic support"
depends on SCSI
@@ -136,16 +127,16 @@ config CHR_DEV_SG
For scanners, look at SANE (<http://www.sane-project.org/>). For CD
writer software look at Cdrtools
- (<http://cdrecord.berlios.de/private/cdrecord.html>)
+ (<http://cdrtools.sourceforge.net/>)
and for burning a "disk at once": CDRDAO
(<http://cdrdao.sourceforge.net/>). Cdparanoia is a high
quality digital reader of audio CDs (<http://www.xiph.org/paranoia/>).
For other devices, it's possible that you'll have to write the
driver software yourself. Please read the file
- <file:Documentation/scsi/scsi-generic.txt> for more information.
+ <file:Documentation/scsi/scsi-generic.rst> for more information.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>. The module will be called sg.
+ <file:Documentation/scsi/scsi.rst>. The module will be called sg.
If unsure, say N.
@@ -158,12 +149,12 @@ config CHR_DEV_SCH
don't need this for those tiny 6-slot cdrom changers. Media
changers are listed as "Type: Medium Changer" in /proc/scsi/scsi.
If you have such hardware and want to use it with linux, say Y
- here. Check <file:Documentation/scsi/scsi-changer.txt> for details.
+ here. Check <file:Documentation/scsi/scsi-changer.rst> for details.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.rst> and
- <file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
+ <file:Documentation/scsi/scsi.rst>. The module will be called ch.o.
If unsure, say N.
config SCSI_ENCLOSURE
@@ -392,7 +383,7 @@ config SCSI_AHA152X
It is explained in section 3.3 of the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. You might also want to
- read the file <file:Documentation/scsi/aha152x.txt>.
+ read the file <file:Documentation/scsi/aha152x.rst>.
To compile this driver as a module, choose M here: the
module will be called aha152x.
@@ -430,7 +421,7 @@ config SCSI_AACRAID
help
This driver supports a variety of Dell, HP, Adaptec, IBM and
ICP storage products. For a list of supported products, refer
- to <file:Documentation/scsi/aacraid.txt>.
+ to <file:Documentation/scsi/aacraid.rst>.
To compile this driver as a module, choose M here: the module
will be called aacraid.
@@ -457,7 +448,7 @@ config SCSI_DPT_I2O
help
This driver supports all of Adaptec's I2O based RAID controllers as
well as the DPT SmartRaid V cards. This is an Adaptec maintained
- driver by Deanna Bonds. See <file:Documentation/scsi/dpti.txt>.
+ driver by Deanna Bonds. See <file:Documentation/scsi/dpti.rst>.
To compile this driver as a module, choose M here: the
module will be called dpt_i2o.
@@ -511,8 +502,8 @@ config SCSI_BUSLOGIC
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, and the files
- <file:Documentation/scsi/BusLogic.txt> and
- <file:Documentation/scsi/FlashPoint.txt> for more information.
+ <file:Documentation/scsi/BusLogic.rst> and
+ <file:Documentation/scsi/FlashPoint.rst> for more information.
Note that support for FlashPoint is only available for 32-bit
x86 configurations.
@@ -613,7 +604,7 @@ config FCOE_FNIC
This is support for the Cisco PCI-Express FCoE HBA.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called fnic.
config SCSI_SNIC
@@ -623,7 +614,7 @@ config SCSI_SNIC
This is support for the Cisco PCI-Express SCSI HBA.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called snic.
config SCSI_SNIC_DEBUG_FS
@@ -813,7 +804,7 @@ config SCSI_PPA
newer drives)", below.
For more information about this driver and how to use it you should
- read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ read the file <file:Documentation/scsi/ppa.rst>. You should also read
the SCSI-HOWTO, which is available from
<http://www.tldp.org/docs.html#howto>. If you use this driver,
you will still be able to use the parallel port for other tasks,
@@ -840,7 +831,7 @@ config SCSI_IMM
here and Y to "IOMEGA Parallel Port (ppa - older drives)", above.
For more information about this driver and how to use it you should
- read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ read the file <file:Documentation/scsi/ppa.rst>. You should also read
the SCSI-HOWTO, which is available from
<http://www.tldp.org/docs.html#howto>. If you use this driver,
you will still be able to use the parallel port for other tasks,
@@ -930,7 +921,7 @@ config SCSI_SYM53C8XX_2
language. It does not support LSI53C10XX Ultra-320 PCI-X SCSI
controllers; you need to use the Fusion MPT driver for that.
- Please read <file:Documentation/scsi/sym53c8xx_2.txt> for more
+ Please read <file:Documentation/scsi/sym53c8xx_2.rst> for more
information.
config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
@@ -989,6 +980,7 @@ config SCSI_SYM53C8XX_MMIO
config SCSI_IPR
tristate "IBM Power Linux RAID adapter support"
depends on PCI && SCSI && ATA
+ select SATA_HOST
select FW_LOADER
select IRQ_POLL
select SGL_ALLOC
@@ -1126,7 +1118,7 @@ config SCSI_QLOGIC_FAS
SCSI support"), below.
Information about this driver is contained in
- <file:Documentation/scsi/qlogicfas.txt>. You should also read the
+ <file:Documentation/scsi/qlogicfas.rst>. You should also read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
@@ -1196,7 +1188,7 @@ config SCSI_DC395x
This driver works, but is still in experimental status. So better
have a bootable disk and a backup in case of emergency.
- Documentation can be found in <file:Documentation/scsi/dc395x.txt>.
+ Documentation can be found in <file:Documentation/scsi/dc395x.rst>.
To compile this driver as a module, choose M here: the
module will be called dc395x.
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 33dbc051bff9..eb72ac8136c3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -798,6 +798,11 @@ static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
return 0;
}
+static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
+{
+ aac_probe_container_callback1(scsi_cmnd);
+}
+
int aac_probe_container(struct aac_dev *dev, int cid)
{
struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
@@ -810,7 +815,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
return -ENOMEM;
}
scsicmd->list.next = NULL;
- scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
+ scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
@@ -2601,9 +2606,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
static void synchronize_callback(void *context, struct fib *fibptr)
{
struct aac_synchronize_reply *synchronizereply;
- struct scsi_cmnd *cmd;
-
- cmd = context;
+ struct scsi_cmnd *cmd = context;
if (!aac_valid_context(cmd, fibptr))
return;
@@ -2644,77 +2647,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
int status;
struct fib *cmd_fibcontext;
struct aac_synchronize *synchronizecmd;
- struct scsi_cmnd *cmd;
struct scsi_device *sdev = scsicmd->device;
- int active = 0;
struct aac_dev *aac;
- u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
- (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
- u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
- unsigned long flags;
-
- /*
- * Wait for all outstanding queued commands to complete to this
- * specific target (block).
- */
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(cmd, &sdev->cmd_list, list)
- if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
- u64 cmnd_lba;
- u32 cmnd_count;
-
- if (cmd->cmnd[0] == WRITE_6) {
- cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
- (cmd->cmnd[2] << 8) |
- cmd->cmnd[3];
- cmnd_count = cmd->cmnd[4];
- if (cmnd_count == 0)
- cmnd_count = 256;
- } else if (cmd->cmnd[0] == WRITE_16) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
- ((u64)cmd->cmnd[3] << 48) |
- ((u64)cmd->cmnd[4] << 40) |
- ((u64)cmd->cmnd[5] << 32) |
- ((u64)cmd->cmnd[6] << 24) |
- (cmd->cmnd[7] << 16) |
- (cmd->cmnd[8] << 8) |
- cmd->cmnd[9];
- cmnd_count = (cmd->cmnd[10] << 24) |
- (cmd->cmnd[11] << 16) |
- (cmd->cmnd[12] << 8) |
- cmd->cmnd[13];
- } else if (cmd->cmnd[0] == WRITE_12) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
- (cmd->cmnd[3] << 16) |
- (cmd->cmnd[4] << 8) |
- cmd->cmnd[5];
- cmnd_count = (cmd->cmnd[6] << 24) |
- (cmd->cmnd[7] << 16) |
- (cmd->cmnd[8] << 8) |
- cmd->cmnd[9];
- } else if (cmd->cmnd[0] == WRITE_10) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
- (cmd->cmnd[3] << 16) |
- (cmd->cmnd[4] << 8) |
- cmd->cmnd[5];
- cmnd_count = (cmd->cmnd[7] << 8) |
- cmd->cmnd[8];
- } else
- continue;
- if (((cmnd_lba + cmnd_count) < lba) ||
- (count && ((lba + count) < cmnd_lba)))
- continue;
- ++active;
- break;
- }
-
- spin_unlock_irqrestore(&sdev->list_lock, flags);
-
- /*
- * Yield the processor (requeue for later)
- */
- if (active)
- return SCSI_MLQUEUE_DEVICE_BUSY;
aac = (struct aac_dev *)sdev->host->hostdata;
if (aac->in_reset)
@@ -2723,8 +2657,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
/*
* Allocate and initialize a Fib
*/
- if (!(cmd_fibcontext = aac_fib_alloc(aac)))
- return SCSI_MLQUEUE_HOST_BUSY;
+ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
aac_fib_init(cmd_fibcontext);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index f75878d773cf..355b16f0b145 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -272,36 +272,35 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd)
+{
+ int *active = data;
+
+ if (cmd->SCp.phase == AAC_OWNER_FIRMWARE)
+ *active = *active + 1;
+ return true;
+}
static void aac_wait_for_io_completion(struct aac_dev *aac)
{
- unsigned long flagv = 0;
- int i = 0;
+ int i = 0, active;
for (i = 60; i; --i) {
- struct scsi_device *dev;
- struct scsi_cmnd *command;
- int active = 0;
-
- __shost_for_each_device(dev, aac->scsi_host_ptr) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
- }
+ active = 0;
+ scsi_host_busy_iter(aac->scsi_host_ptr,
+ wait_for_io_iter, &active);
/*
* We can exit If all the commands are complete
*/
if (active == 0)
break;
+ dev_info(&aac->pdev->dev,
+ "Wait for %d commands to complete\n", active);
ssleep(1);
}
+ if (active)
+ dev_err(&aac->pdev->dev,
+ "%d outstanding commands during shutdown\n", active);
}
/**
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 5a8a999606ea..ddd73f6798af 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -729,7 +729,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
hbacmd->request_id =
cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
+ } else
return -EINVAL;
@@ -1476,10 +1476,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
int retval;
- struct Scsi_Host *host;
- struct scsi_device *dev;
- struct scsi_cmnd *command;
- struct scsi_cmnd *command_list;
+ struct Scsi_Host *host = aac->scsi_host_ptr;
int jafo = 0;
int bled;
u64 dmamask;
@@ -1495,8 +1492,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* - The card is dead, or will be very shortly ;-/ so no new
* commands are completing in the interrupt service.
*/
- host = aac->scsi_host_ptr;
- scsi_block_requests(host);
aac_adapter_disable_int(aac);
if (aac->thread && aac->thread->pid != current->pid) {
spin_unlock_irq(host->host_lock);
@@ -1607,39 +1602,11 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* This is where the assumption that the Adapter is quiesced
* is important.
*/
- command_list = NULL;
- __shost_for_each_device(dev, host) {
- unsigned long flags;
- spin_lock_irqsave(&dev->list_lock, flags);
- list_for_each_entry(command, &dev->cmd_list, list)
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- command->SCp.buffer = (struct scatterlist *)command_list;
- command_list = command;
- }
- spin_unlock_irqrestore(&dev->list_lock, flags);
- }
- while ((command = command_list)) {
- command_list = (struct scsi_cmnd *)command->SCp.buffer;
- command->SCp.buffer = NULL;
- command->result = DID_OK << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_TASK_SET_FULL;
- command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
- command->scsi_done(command);
- }
- /*
- * Any Device that was already marked offline needs to be marked
- * running
- */
- __shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev))
- scsi_device_set_state(dev, SDEV_RUNNING);
- }
- retval = 0;
+ scsi_host_complete_all_commands(host, DID_RESET);
+ retval = 0;
out:
aac->in_reset = 0;
- scsi_unblock_requests(host);
/*
* Issue bus rescan to catch any configuration that might have
@@ -1659,8 +1626,8 @@ out:
int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
unsigned long flagv = 0;
- int retval;
- struct Scsi_Host * host;
+ int retval, unblock_retval;
+ struct Scsi_Host *host = aac->scsi_host_ptr;
int bled;
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1678,8 +1645,7 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* target (block maximum 60 seconds). Although not necessary,
* it does make us a good storage citizen.
*/
- host = aac->scsi_host_ptr;
- scsi_block_requests(host);
+ scsi_host_block(host);
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1690,6 +1656,9 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
retval = _aac_reset_adapter(aac, bled, reset_type);
spin_unlock_irqrestore(host->host_lock, flagv);
+ unblock_retval = scsi_host_unblock(host, SDEV_RUNNING);
+ if (!retval)
+ retval = unblock_retval;
if ((forced < 2) && (retval == -ENODEV)) {
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc(aac);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index ee6bc2f9b80a..83a60b0a8cd8 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -33,6 +33,7 @@
#include <linux/syscalls.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/msdos_partition.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -328,9 +329,9 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
buf = scsi_bios_ptable(bdev);
if (!buf)
return 0;
- if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
- struct partition *first = (struct partition * )buf;
- struct partition *entry = first;
+ if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) {
+ struct msdos_partition *first = (struct msdos_partition *)buf;
+ struct msdos_partition *entry = first;
int saved_cylinders = param->cylinders;
int num;
unsigned char end_head, end_sec;
@@ -622,54 +623,56 @@ static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
return aac_do_ioctl(dev, cmd, arg);
}
-static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+struct fib_count_data {
+ int mlcnt;
+ int llcnt;
+ int ehcnt;
+ int fwcnt;
+ int krlcnt;
+};
+
+static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
{
+ struct fib_count_data *fib_count = data;
- unsigned long flags;
- struct scsi_device *sdev = NULL;
+ switch (scmnd->SCp.phase) {
+ case AAC_OWNER_FIRMWARE:
+ fib_count->fwcnt++;
+ break;
+ case AAC_OWNER_ERROR_HANDLER:
+ fib_count->ehcnt++;
+ break;
+ case AAC_OWNER_LOWLEVEL:
+ fib_count->llcnt++;
+ break;
+ case AAC_OWNER_MIDLEVEL:
+ fib_count->mlcnt++;
+ break;
+ default:
+ fib_count->krlcnt++;
+ break;
+ }
+ return true;
+}
+
+/* Called during SCSI EH, so we don't need to block requests */
+static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+{
struct Scsi_Host *shost = aac->scsi_host_ptr;
- struct scsi_cmnd *scmnd = NULL;
struct device *ctrl_dev;
+ struct fib_count_data fcnt = { };
- int mlcnt = 0;
- int llcnt = 0;
- int ehcnt = 0;
- int fwcnt = 0;
- int krlcnt = 0;
-
- __shost_for_each_device(sdev, shost) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(scmnd, &sdev->cmd_list, list) {
- switch (scmnd->SCp.phase) {
- case AAC_OWNER_FIRMWARE:
- fwcnt++;
- break;
- case AAC_OWNER_ERROR_HANDLER:
- ehcnt++;
- break;
- case AAC_OWNER_LOWLEVEL:
- llcnt++;
- break;
- case AAC_OWNER_MIDLEVEL:
- mlcnt++;
- break;
- default:
- krlcnt++;
- break;
- }
- }
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
+ scsi_host_busy_iter(shost, fib_count_iter, &fcnt);
ctrl_dev = &aac->pdev->dev;
- dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
- dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
- dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
- dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
- dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", fcnt.mlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", fcnt.llcnt);
+ dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", fcnt.ehcnt);
+ dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fcnt.fwcnt);
+ dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", fcnt.krlcnt);
- return mlcnt + llcnt + ehcnt + fwcnt;
+ return fcnt.mlcnt + fcnt.llcnt + fcnt.ehcnt + fcnt.fwcnt;
}
static int aac_eh_abort(struct scsi_cmnd* cmd)
@@ -731,7 +734,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
(fib_callback) aac_hba_callback,
(void *) cmd);
-
+ if (status != -EINPROGRESS) {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 secs for completion */
for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
@@ -910,11 +917,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host device reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -929,7 +936,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
status = aac_hba_send(command, fib,
(fib_callback) aac_tmf_callback,
(void *) info);
-
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state == 0) {
@@ -968,11 +980,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host target reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -989,6 +1001,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
(fib_callback) aac_tmf_callback,
(void *) info);
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
+
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state <= 0) {
@@ -1041,7 +1060,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
}
}
- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
/*
* Check the health of the controller
@@ -1269,20 +1288,21 @@ static ssize_t aac_show_flags(struct device *cdev,
if (nblank(dprintk(x)))
len = snprintf(buf, PAGE_SIZE, "dprintk\n");
#ifdef AAC_DETAILED_STATUS_INFO
- len += snprintf(buf + len, PAGE_SIZE - len,
- "AAC_DETAILED_STATUS_INFO\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "AAC_DETAILED_STATUS_INFO\n");
#endif
if (dev->raw_io_interface && dev->raw_io_64)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "SAI_READ_CAPACITY_16\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SAI_READ_CAPACITY_16\n");
if (dev->jbod)
- len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_JBOD\n");
if (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "SUPPORTED_POWER_MANAGEMENT\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_POWER_MANAGEMENT\n");
if (dev->msi)
- len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
return len;
}
@@ -1675,7 +1695,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->irq = pdev->irq;
shost->unique_id = unique_id;
shost->max_cmd_len = 16;
- shost->use_cmd_list = 1;
if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
aac_init_char();
@@ -1894,7 +1913,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- scsi_block_requests(shost);
+ scsi_host_block(shost);
aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac);
@@ -1930,7 +1949,7 @@ static int aac_resume(struct pci_dev *pdev)
* aac_send_shutdown() to block ioctls from upperlayer
*/
aac->adapter_shutdown = 0;
- scsi_unblock_requests(shost);
+ scsi_host_unblock(shost, SDEV_RUNNING);
return 0;
@@ -1945,7 +1964,8 @@ fail_device:
static void aac_shutdown(struct pci_dev *dev)
{
struct Scsi_Host *shost = pci_get_drvdata(dev);
- scsi_block_requests(shost);
+
+ scsi_host_block(shost);
__aac_shutdown((struct aac_dev *)shost->hostdata);
}
@@ -1977,26 +1997,6 @@ static void aac_remove_one(struct pci_dev *pdev)
}
}
-static void aac_flush_ios(struct aac_dev *aac)
-{
- int i;
- struct scsi_cmnd *cmd;
-
- for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
- cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
- if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
- scsi_dma_unmap(cmd);
-
- if (aac->handle_pci_error)
- cmd->result = DID_NO_CONNECT << 16;
- else
- cmd->result = DID_RESET << 16;
-
- cmd->scsi_done(cmd);
- }
- }
-}
-
static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
enum pci_channel_state error)
{
@@ -2011,9 +2011,9 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_frozen:
aac->handle_pci_error = 1;
- scsi_block_requests(aac->scsi_host_ptr);
+ scsi_host_block(shost);
aac_cancel_rescan_worker(aac);
- aac_flush_ios(aac);
+ scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
aac_release_resources(aac);
pci_disable_pcie_error_reporting(pdev);
@@ -2023,7 +2023,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
aac->handle_pci_error = 1;
- aac_flush_ios(aac);
+ scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2064,7 +2064,6 @@ fail_device:
static void aac_pci_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct scsi_device *sdev = NULL;
struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
if (aac_adapter_ioremap(aac, aac->base_size)) {
@@ -2091,10 +2090,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
aac->adapter_shutdown = 0;
aac->handle_pci_error = 0;
- shost_for_each_device(sdev, shost)
- if (sdev->sdev_state == SDEV_OFFLINE)
- sdev->sdev_state = SDEV_RUNNING;
- scsi_unblock_requests(aac->scsi_host_ptr);
+ scsi_host_unblock(shost, SDEV_RUNNING);
aac_scan_host(aac);
pci_save_state(pdev);
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index a242a62caaa1..c2c7850ff7b4 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -316,7 +316,7 @@ typedef struct asc_sg_head {
ushort queue_cnt;
ushort entry_to_copy;
ushort res;
- ASC_SG_LIST sg_list[0];
+ ASC_SG_LIST sg_list[];
} ASC_SG_HEAD;
typedef struct asc_scsi_q {
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index eb466c2e1839..90f97df1c42a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -220,7 +220,7 @@
*
**************************************************************************
- see Documentation/scsi/aha152x.txt for configuration details
+ see Documentation/scsi/aha152x.rst for configuration details
**************************************************************************/
@@ -1249,7 +1249,7 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
"aha152x: unable to verify geometry for disk with >1GB.\n"
" Using default translation. Please verify yourself.\n"
" Perhaps you need to enable extended translation in the driver.\n"
- " See Documentation/scsi/aha152x.txt for details.\n");
+ " See Documentation/scsi/aha152x.rst for details.\n");
}
} else {
info_array[0] = info[0];
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index da4150c17781..5a227c03895f 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -592,7 +592,6 @@ static int aha1740_probe (struct device *dev)
DMA_BIDIRECTIONAL);
if (!host->ecb_dma_addr) {
printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n");
- scsi_host_put (shpnt);
goto err_host_put;
}
diff --git a/drivers/scsi/aic7xxx/.gitignore b/drivers/scsi/aic7xxx/.gitignore
index b8ee24d5748a..9aa780221718 100644
--- a/drivers/scsi/aic7xxx/.gitignore
+++ b/drivers/scsi/aic7xxx/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
aic79xx_reg.h
aic79xx_reg_print.c
aic79xx_seq.h
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 16743fb9eead..d4c50b8fce29 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -32,7 +32,7 @@ config AIC79XX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic79xx.txt for details.
+ "tag_info" option. See Documentation/scsi/aic79xx.rst for details.
config AIC79XX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 3546b8cc401f..9d027549d698 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -37,7 +37,7 @@ config AIC7XXX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic7xxx.txt for details.
+ "tag_info" option. See Documentation/scsi/aic7xxx.rst for details.
config AIC7XXX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 7e5044bf05c0..a336a458c978 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3107,19 +3107,6 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printerror = 0;
} else if (ahd_sent_msg(ahd, AHDMSG_1B,
MSG_BUS_DEV_RESET, TRUE)) {
-#ifdef __FreeBSD__
- /*
- * Don't mark the user's request for this BDR
- * as completing with CAM_BDR_SENT. CAM3
- * specifies CAM_REQ_CMP.
- */
- if (scb != NULL
- && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
- && ahd_match_scb(ahd, scb, target, 'A',
- CAM_LUN_WILDCARD, SCB_LIST_NULL,
- ROLE_INITIATOR))
- ahd_set_transaction_status(scb, CAM_REQ_CMP);
-#endif
ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
CAM_BDR_SENT, "Bus Device Reset",
/*verbose_level*/0);
@@ -6067,22 +6054,17 @@ ahd_alloc(void *platform_arg, char *name)
{
struct ahd_softc *ahd;
-#ifndef __FreeBSD__
ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
if (!ahd) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
-#else
- ahd = device_get_softc((device_t)platform_arg);
-#endif
+
memset(ahd, 0, sizeof(*ahd));
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
if (ahd->seep_config == NULL) {
-#ifndef __FreeBSD__
kfree(ahd);
-#endif
kfree(name);
return (NULL);
}
@@ -6206,9 +6188,7 @@ ahd_free(struct ahd_softc *ahd)
kfree(ahd->seep_config);
if (ahd->saved_stack != NULL)
kfree(ahd->saved_stack);
-#ifndef __FreeBSD__
kfree(ahd);
-#endif
return;
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 57992519384e..dc4fe334efd0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -723,24 +723,17 @@ static int
ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
- uint8_t *bh;
int heads;
int sectors;
int cylinders;
- int ret;
int extended;
struct ahd_softc *ahd;
ahd = *((struct ahd_softc **)sdev->host->hostdata);
- bh = scsi_bios_ptable(bdev);
- if (bh) {
- ret = scsi_partsize(bh, capacity,
- &geom[2], &geom[0], &geom[1]);
- kfree(bh);
- if (ret != -1)
- return (ret);
- }
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
+
heads = 64;
sectors = 32;
cylinders = aic_sector_div(capacity, heads, sectors);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 4190a025381a..84fc499cb1e6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -1834,21 +1834,6 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_1B,
MSG_BUS_DEV_RESET, TRUE)) {
-#ifdef __FreeBSD__
- /*
- * Don't mark the user's request for this BDR
- * as completing with CAM_BDR_SENT. CAM3
- * specifies CAM_REQ_CMP.
- */
- if (scb != NULL
- && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
- && ahc_match_scb(ahc, scb, target, channel,
- CAM_LUN_WILDCARD,
- SCB_LIST_NULL,
- ROLE_INITIATOR)) {
- ahc_set_transaction_status(scb, CAM_REQ_CMP);
- }
-#endif
ahc_compile_devinfo(&devinfo,
initiator_role_id,
target,
@@ -4399,22 +4384,16 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc;
int i;
-#ifndef __FreeBSD__
ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
-#else
- ahc = device_get_softc((device_t)platform_arg);
-#endif
memset(ahc, 0, sizeof(*ahc));
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) {
-#ifndef __FreeBSD__
kfree(ahc);
-#endif
kfree(name);
return (NULL);
}
@@ -4540,9 +4519,7 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->name);
if (ahc->seep_config != NULL)
kfree(ahc->seep_config);
-#ifndef __FreeBSD__
kfree(ahc);
-#endif
return;
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index d5c4a0d23706..2edfa0594f18 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -695,11 +695,9 @@ static int
ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
- uint8_t *bh;
int heads;
int sectors;
int cylinders;
- int ret;
int extended;
struct ahc_softc *ahc;
u_int channel;
@@ -707,14 +705,9 @@ ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
ahc = *((struct ahc_softc **)sdev->host->hostdata);
channel = sdev_channel(sdev);
- bh = scsi_bios_ptable(bdev);
- if (bh) {
- ret = scsi_partsize(bh, capacity,
- &geom[2], &geom[0], &geom[1]);
- kfree(bh);
- if (ret != -1)
- return (ret);
- }
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
+
heads = 64;
sectors = 32;
cylinders = aic_sector_div(capacity, heads, sectors);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 259d9c20bf25..57be9609d504 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -41,7 +41,7 @@
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
-** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
*******************************************************************************
*/
#include <linux/module.h>
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 40dc8eac0e3a..30914c8f29cc 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -41,7 +41,7 @@
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
-** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
*******************************************************************************
*/
#include <linux/module.h>
@@ -353,16 +353,11 @@ static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
static int arcmsr_bios_param(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int *geom)
{
- int ret, heads, sectors, cylinders, total_capacity;
- unsigned char *buffer;/* return copy of block device's partition table */
+ int heads, sectors, cylinders, total_capacity;
+
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
- buffer = scsi_bios_ptable(bdev);
- if (buffer) {
- ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
- kfree(buffer);
- if (ret != -1)
- return ret;
- }
total_capacity = capacity;
heads = 64;
sectors = 32;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index d4febaadfaa3..a2d69b287c7b 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1178,12 +1178,12 @@ beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num,
- (total_cids - avlbl_cids));
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ (total_cids - avlbl_cids));
} else
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num, 0);
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
}
return len;
@@ -1208,12 +1208,12 @@ beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num,
- BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
else
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num, 0);
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
}
return len;
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 3b84db8d13a9..b6e8ed757252 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -66,7 +66,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.12.10"
+#define BNX2FC_VERSION "2.12.13"
#define PFX "bnx2fc: "
@@ -482,7 +482,10 @@ struct io_bdt {
struct bnx2fc_work {
struct list_head list;
struct bnx2fc_rport *tgt;
+ struct fcoe_task_ctx_entry *task;
+ unsigned char rq_data[BNX2FC_RQ_BUF_SZ];
u16 wqe;
+ u8 num_rq;
};
struct bnx2fc_unsol_els {
struct fc_lport *lport;
@@ -550,7 +553,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
enum fc_rport_event event);
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@@ -559,7 +562,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
u8 num_rq);
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@@ -577,7 +580,9 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout);
void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task);
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
u32 port_id);
void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index b4bfab5edf8f..1cbb431fa682 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -660,7 +660,10 @@ static int bnx2fc_percpu_io_thread(void *arg)
list_for_each_entry_safe(work, tmp, &work_list, list) {
list_del_init(&work->list);
- bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe,
+ work->rq_data,
+ work->num_rq,
+ work->task);
kfree(work);
}
@@ -2655,7 +2658,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu)
/* Free all work in the list */
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list);
- bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data,
+ work->num_rq, work->task);
kfree(work);
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 6f8335ddb1f2..1f7c58b4c535 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -863,36 +863,22 @@ ret_warn_rqe:
}
}
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_cmd *io_req;
- int task_idx, index;
+
u16 xid;
u8 cmd_type;
u8 rx_state = 0;
- u8 num_rq;
spin_lock_bh(&tgt->tgt_lock);
- xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
- if (xid >= hba->max_tasks) {
- printk(KERN_ERR PFX "ERROR:xid out of range\n");
- spin_unlock_bh(&tgt->tgt_lock);
- return;
- }
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
- task = &(task_page[index]);
-
- num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (io_req == NULL) {
@@ -912,7 +898,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
- bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
+ rq_data);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
@@ -929,7 +916,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
case BNX2FC_TASK_MGMT_CMD:
BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
- bnx2fc_process_tm_compl(io_req, task, num_rq);
+ bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
break;
case BNX2FC_ABTS:
@@ -987,7 +974,9 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
}
-static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
struct bnx2fc_work *work;
work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
@@ -997,29 +986,87 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
INIT_LIST_HEAD(&work->list);
work->tgt = tgt;
work->wqe = wqe;
+ work->num_rq = num_rq;
+ work->task = task;
+ if (rq_data)
+ memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
+
return work;
}
/* Pending work request completion */
-static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
{
unsigned int cpu = wqe % num_possible_cpus();
struct bnx2fc_percpu_s *fps;
struct bnx2fc_work *work;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ unsigned char *rq_data = NULL;
+ unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
+ int task_idx, index;
+ unsigned char *dummy;
+ u16 xid;
+ u8 num_rq;
+ int i;
+
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= hba->max_tasks) {
+ pr_err(PFX "ERROR:xid out of range\n");
+ return false;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &task_page[index];
+
+ num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+
+ memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
+
+ if (!num_rq)
+ goto num_rq_zero;
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ dummy = bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ if (rq_data)
+ memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+
+num_rq_zero:
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (fps->iothread) {
- work = bnx2fc_alloc_work(tgt, wqe);
+ work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
+ num_rq, task);
if (work) {
list_add_tail(&work->list, &fps->work_list);
wake_up_process(fps->iothread);
spin_unlock_bh(&fps->fp_work_lock);
- return;
+ return true;
}
}
spin_unlock_bh(&fps->fp_work_lock);
- bnx2fc_process_cq_compl(tgt, wqe);
+ bnx2fc_process_cq_compl(tgt, wqe,
+ rq_data_buff, num_rq, task);
+
+ return true;
}
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
@@ -1056,8 +1103,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
- bnx2fc_pending_work(tgt, wqe);
- num_free_sqes++;
+ if (bnx2fc_pending_work(tgt, wqe))
+ num_free_sqes++;
}
cqe++;
tgt->cq_cons_idx++;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4c8122a82322..2b070f0835df 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -24,7 +24,7 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
unsigned int timer_msec)
@@ -1518,7 +1518,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
}
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
- struct fcoe_task_ctx_entry *task, u8 num_rq)
+ struct fcoe_task_ctx_entry *task, u8 num_rq,
+ unsigned char *rq_data)
{
struct bnx2fc_mp_req *tm_req;
struct fc_frame_header *fc_hdr;
@@ -1557,7 +1558,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
bnx2fc_parse_fcp_rsp(io_req,
(struct fcoe_fcp_rsp_payload *)
- rsp_buf, num_rq);
+ rsp_buf, num_rq, rq_data);
if (io_req->fcp_rsp_code == 0) {
/* TM successful */
if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
@@ -1755,15 +1756,11 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq)
+ u8 num_rq, unsigned char *rq_data)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- struct bnx2fc_rport *tgt = io_req->tgt;
u8 rsp_flags = fcp_rsp->fcp_flags.flags;
u32 rq_buff_len = 0;
- int i;
- unsigned char *rq_data;
- unsigned char *dummy;
int fcp_sns_len = 0;
int fcp_rsp_len = 0;
@@ -1809,14 +1806,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
}
- rq_data = bnx2fc_get_next_rqe(tgt, 1);
-
- if (num_rq > 1) {
- /* We do not need extra sense data */
- for (i = 1; i < num_rq; i++)
- dummy = bnx2fc_get_next_rqe(tgt, 1);
- }
-
/* fetch fcp_rsp_code */
if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
/* Only for task management function */
@@ -1837,9 +1826,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
- /* return RQ entries */
- for (i = 0; i < num_rq; i++)
- bnx2fc_return_rqe(tgt, 1);
}
}
@@ -1918,7 +1904,7 @@ exit_qcmd:
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq)
+ u8 num_rq, unsigned char *rq_data)
{
struct fcoe_fcp_rsp_payload *fcp_rsp;
struct bnx2fc_rport *tgt = io_req->tgt;
@@ -1931,6 +1917,12 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* we will not receive ABTS response for this IO */
BNX2FC_IO_DBG(io_req, "Timer context finished processing "
"this scsi cmd\n");
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req,
+ "Actual completion after cleanup request cleaning up\n");
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ }
return;
}
@@ -1950,7 +1942,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
&(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
/* parse fcp_rsp and obtain sense data from RQ if available */
- bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
if (!sc_cmd->SCp.ptr) {
printk(KERN_ERR PFX "SCp.ptr is NULL\n");
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index ed5f4a6ae270..cb74ab1ae5a4 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -44,7 +44,6 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER);
-static DEFINE_MUTEX(ch_mutex);
static int init = 1;
module_param(init, int, 0444);
MODULE_PARM_DESC(init, \
@@ -569,6 +568,7 @@ static void ch_destroy(struct kref *ref)
{
scsi_changer *ch = container_of(ref, scsi_changer, ref);
+ ch->device = NULL;
kfree(ch->dt);
kfree(ch);
}
@@ -590,20 +590,22 @@ ch_open(struct inode *inode, struct file *file)
scsi_changer *ch;
int minor = iminor(inode);
- mutex_lock(&ch_mutex);
spin_lock(&ch_index_lock);
ch = idr_find(&ch_index_idr, minor);
- if (NULL == ch || scsi_device_get(ch->device)) {
+ if (ch == NULL || !kref_get_unless_zero(&ch->ref)) {
spin_unlock(&ch_index_lock);
- mutex_unlock(&ch_mutex);
return -ENXIO;
}
- kref_get(&ch->ref);
spin_unlock(&ch_index_lock);
-
+ if (scsi_device_get(ch->device)) {
+ kref_put(&ch->ref, ch_destroy);
+ return -ENXIO;
+ }
+ /* Synchronize with ch_probe() */
+ mutex_lock(&ch->lock);
file->private_data = ch;
- mutex_unlock(&ch_mutex);
+ mutex_unlock(&ch->lock);
return 0;
}
@@ -938,7 +940,16 @@ static int ch_probe(struct device *dev)
ch->minor = ret;
sprintf(ch->name,"ch%d",ch->minor);
+ ret = scsi_device_get(sd);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sd, "ch%d: failed to get device\n",
+ ch->minor);
+ goto remove_idr;
+ }
+ mutex_init(&ch->lock);
+ kref_init(&ch->ref);
+ ch->device = sd;
class_dev = device_create(ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
@@ -946,24 +957,27 @@ static int ch_probe(struct device *dev)
sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n",
ch->minor);
ret = PTR_ERR(class_dev);
- goto remove_idr;
+ goto put_device;
}
- mutex_init(&ch->lock);
- kref_init(&ch->ref);
- ch->device = sd;
+ mutex_lock(&ch->lock);
ret = ch_readconfig(ch);
- if (ret)
+ if (ret) {
+ mutex_unlock(&ch->lock);
goto destroy_dev;
+ }
if (init)
ch_init_elem(ch);
+ mutex_unlock(&ch->lock);
dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
destroy_dev:
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
+put_device:
+ scsi_device_put(sd);
remove_idr:
idr_remove(&ch_index_idr, ch->minor);
free_ch:
@@ -977,9 +991,11 @@ static int ch_remove(struct device *dev)
spin_lock(&ch_index_lock);
idr_remove(&ch_index_idr, ch->minor);
+ dev_set_drvdata(dev, NULL);
spin_unlock(&ch_index_lock);
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
+ scsi_device_put(ch->device);
kref_put(&ch->ref, ch_destroy);
return 0;
}
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index d4c2a2e4c5d4..84d73f57292b 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -404,7 +404,7 @@ static const char * const hostbyte_table[]={
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
-"DID_NEXUS_FAILURE" };
+"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" };
static const char * const driverbyte_table[]={
"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 13fbb2eab842..e95f5b3bef4d 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -61,7 +61,6 @@
#include <asm/io.h>
#include <scsi/scsi.h>
-#include <scsi/scsicam.h> /* needed for scsicam_bios_param */
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
@@ -1053,38 +1052,6 @@ complete:
static DEF_SCSI_QCMD(dc395x_queue_command)
-/*
- * Return the disk geometry for the given SCSI device.
- */
-static int dc395x_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *info)
-{
-#ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
- int heads, sectors, cylinders;
- struct AdapterCtlBlk *acb;
- int size = capacity;
-
- dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
- acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
- heads = 64;
- sectors = 32;
- cylinders = size / (heads * sectors);
-
- if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
- heads = 255;
- sectors = 63;
- cylinders = size / (heads * sectors);
- }
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
- return 0;
-#else
- return scsicam_bios_param(bdev, capacity, info);
-#endif
-}
-
-
static void dump_register_info(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
{
@@ -4622,7 +4589,6 @@ static struct scsi_host_template dc395x_driver_template = {
.show_info = dc395x_show_info,
.name = DC395X_BANNER " " DC395X_VERSION,
.queuecommand = dc395x_queue_command,
- .bios_param = dc395x_bios_param,
.slave_alloc = dc395x_slave_alloc,
.slave_destroy = dc395x_slave_destroy,
.can_queue = DC395x_MAX_CAN_QUEUE,
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index 6bc33f4f020d..25e9251f8c78 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -5,7 +5,7 @@
begin : Thu Sep 7 2000
copyright : (C) 2001 by Adaptec
- See Documentation/scsi/dpti.txt for history, notes, license info
+ See Documentation/scsi/dpti.rst for history, notes, license info
and credits
***************************************************************************/
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index abc74fd474dc..02dff3a684e0 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -8,7 +8,7 @@
July 30, 2001 First version being submitted
for inclusion in the kernel. V2.4
- See Documentation/scsi/dpti.txt for history, notes, license info
+ See Documentation/scsi/dpti.rst for history, notes, license info
and credits
***************************************************************************/
@@ -817,7 +817,7 @@ static int adpt_hba_reset(adpt_hba* pHba)
}
pHba->state &= ~DPTI_STATE_RESET;
- adpt_fail_posted_scbs(pHba);
+ scsi_host_complete_all_commands(pHba->host, DID_RESET);
return 0; /* return success */
}
@@ -2173,7 +2173,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
readl(reply + 12) - 1);
if(cmd != NULL){
scsi_dma_unmap(cmd);
- adpt_i2o_to_scsi(reply, cmd);
+ adpt_i2o_scsi_complete(reply, cmd);
}
}
writel(m, pHba->reply_port);
@@ -2335,13 +2335,12 @@ static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
host->unique_id = (u32)sys_tbl_pa + pHba->unit;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
- host->use_cmd_list = 1;
return 0;
}
-static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
+static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
{
adpt_hba* pHba;
u32 hba_status;
@@ -2459,7 +2458,6 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
if(cmd->scsi_done != NULL){
cmd->scsi_done(cmd);
}
- return cmd->result;
}
@@ -2647,23 +2645,6 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
return 0;
}
-static void adpt_fail_posted_scbs(adpt_hba* pHba)
-{
- struct scsi_cmnd* cmd = NULL;
- struct scsi_device* d = NULL;
-
- shost_for_each_device(d, pHba->host) {
- unsigned long flags;
- spin_lock_irqsave(&d->list_lock, flags);
- list_for_each_entry(cmd, &d->cmd_list, list) {
- cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
- cmd->scsi_done(cmd);
- }
- spin_unlock_irqrestore(&d->list_lock, flags);
- }
-}
-
-
/*============================================================================
* Routines from i2o subsystem
*============================================================================
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 42b1e28b5884..8a079e8d7f65 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -5,7 +5,7 @@
begin : Thu Sep 7 2000
copyright : (C) 2001 by Adaptec
- See Documentation/scsi/dpti.txt for history, notes, license info
+ See Documentation/scsi/dpti.rst for history, notes, license info
and credits
***************************************************************************/
@@ -286,7 +286,7 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba);
static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
-static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
+static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd);
static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
static s32 adpt_hba_reset(adpt_hba* pHba);
static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
@@ -295,7 +295,6 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
static void adpt_i2o_delete_hba(adpt_hba* pHba);
static void adpt_inquiry(adpt_hba* pHba);
-static void adpt_fail_posted_scbs(adpt_hba* pHba);
static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
static int adpt_i2o_online_hba(adpt_hba* pHba);
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index a0d01aea28f7..9d52d83161ed 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -138,7 +138,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
@@ -180,7 +180,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
@@ -220,12 +220,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
struct timespec64 val1, val2;
ktime_get_real_ts64(&val1);
- len = snprintf(debug->debug_buffer + len, buf_size - len,
+ len = scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Current time : [%lld:%ld]\n"
"Last stats reset time: [%lld:%09ld]\n"
"Last stats read time: [%lld:%ld]\n"
@@ -243,11 +243,11 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
stats->stats_timestamps.last_read_time = val1;
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
"Number of IOs: %lld\nNumber of IO Completions: %lld\n"
"Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
@@ -280,16 +280,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
(u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\nCurrent Max IO time : %lld\n",
(u64)atomic64_read(&stats->io_stats.current_max_io_time));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tAbort Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Aborts: %lld\n"
"Number of Abort Failures: %lld\n"
"Number of Abort Driver Timeouts: %lld\n"
@@ -318,12 +318,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tTerminate Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Terminates: %lld\n"
"Maximum Terminates: %lld\n"
"Number of Terminate Driver Timeouts: %lld\n"
@@ -337,12 +337,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
(u64)atomic64_read(&stats->term_stats.terminate_failures));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tReset Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Device Resets: %lld\n"
"Number of Device Reset Failures: %lld\n"
"Number of Device Reset Aborts: %lld\n"
@@ -368,12 +368,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
&stats->reset_stats.fnic_reset_completions),
(u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tFirmware Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active FW Requests %lld\n"
"Maximum FW Requests: %lld\n"
"Number of FW out of resources: %lld\n"
@@ -383,12 +383,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
(u64)atomic64_read(&stats->fw_stats.io_fw_errs));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tVlan Discovery Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Vlan Discovery Requests Sent %lld\n"
"Vlan Response Received with no FCF VLAN ID: %lld\n"
"No solicitations recvd after vlan set, expiry count: %lld\n"
@@ -398,7 +398,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
(u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
@@ -406,7 +406,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Last ISR time: %llu (%8llu.%09lu)\n"
"Last ACK time: %llu (%8llu.%09lu)\n"
"Max ISR jiffies: %llu\n"
@@ -452,7 +452,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
(u64)atomic64_read(&stats->misc_stats.frame_errors));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Firmware reported port speed: %llu\n",
(u64)atomic64_read(
&stats->misc_stats.current_port_speed));
@@ -742,7 +742,7 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
rd_idx = fc_trace_entries.rd_idx;
wr_idx = fc_trace_entries.wr_idx;
if (rdata_flag == 0) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"Time Stamp (UTC)\t\t"
"Host No: F Type: len: FCoE_FRAME:\n");
@@ -762,11 +762,11 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
} else {
fc_trace = (char *)tdata;
for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3)
- len, "%02x", fc_trace[j] & 0xff);
} /* for loop */
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"\n");
}
@@ -810,7 +810,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len,
fmt,
tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
@@ -823,25 +823,25 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
for (j = 0; j < min_t(u8, tdata->frame_len,
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
if (tdata->frame_type == FNIC_FC_LE) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%c", fc_trace[j]);
} else {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%02x", fc_trace[j] & 0xff);
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, " ");
if (j == ethhdr_len ||
j == ethhdr_len + fcoehdr_len ||
j == ethhdr_len + fcoehdr_len + fchdr_len ||
(i > 3 && j%fchdr_len == 0)) {
- len += snprintf(fnic_dbgfs_prt->buffer
+ len += scnprintf(fnic_dbgfs_prt->buffer
+ len, max_size - len,
"\n\t\t\t\t\t\t\t\t");
i++;
}
} /* end of else*/
} /* End of for loop*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "\n");
*orig_len = len;
}
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index c5dde556dc7c..c20d30e36dfc 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -442,7 +442,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/*
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 2ab774e62e40..2cc676e3df6a 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -20,7 +20,7 @@
* Added ISAPNP support for DTC436 adapters,
* Thomas Sailer, sailer@ife.ee.ethz.ch
*
- * See Documentation/scsi/g_NCR5380.txt for more info.
+ * See Documentation/scsi/g_NCR5380.rst for more info.
*/
#include <asm/io.h>
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 381d849726ac..c764312f9ba0 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -193,7 +193,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
for (i = 1; i < MAX_RES_ARGS; i++) {
if (reserve_list[i] == 0xff)
break;
- hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
+ hlen += scnprintf(hrec + hlen, 161 - hlen, ",%d", reserve_list[i]);
}
}
seq_printf(m,
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 90a17452a50d..13ed9073fc72 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -6,6 +6,7 @@ config SCSI_HISI_SAS
select SCSI_SAS_LIBSAS
select BLK_DEV_INTEGRITY
depends on ATA
+ select SATA_HOST
help
This driver supports HiSilicon's SAS HBA, including support based
on platform device
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a2debe0c8185..374885aa8d77 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2938,6 +2938,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
{
u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ *
HISI_SAS_IOST_ITCT_CACHE_NUM;
+ struct device *dev = hisi_hba->dev;
u32 *buf = cache;
u32 i, val;
@@ -2950,7 +2951,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
}
if (val != 0xffffffff) {
- pr_err("Issue occur when reading IOST/ITCT cache!\n");
+ dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n");
return;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 1d669e47b692..7ec91c3a66ca 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -650,3 +650,68 @@ void scsi_flush_work(struct Scsi_Host *shost)
flush_workqueue(shost->work_q);
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
+
+static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
+{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ int status = *(int *)data;
+
+ scsi_dma_unmap(scmd);
+ scmd->result = status << 16;
+ scmd->scsi_done(scmd);
+ return true;
+}
+
+/**
+ * scsi_host_complete_all_commands - Terminate all running commands
+ * @shost: Scsi Host on which commands should be terminated
+ * @status: Status to be set for the terminated commands
+ *
+ * There is no protection against modification of the number
+ * of outstanding commands. It is the responsibility of the
+ * caller to ensure that concurrent I/O submission and/or
+ * completion is stopped when calling this function.
+ */
+void scsi_host_complete_all_commands(struct Scsi_Host *shost, int status)
+{
+ blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
+ &status);
+}
+EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
+
+struct scsi_host_busy_iter_data {
+ bool (*fn)(struct scsi_cmnd *, void *, bool);
+ void *priv;
+};
+
+static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
+ bool reserved)
+{
+ struct scsi_host_busy_iter_data *iter_data = priv;
+ struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
+
+ return iter_data->fn(sc, iter_data->priv, reserved);
+}
+
+/**
+ * scsi_host_busy_iter - Iterate over all busy commands
+ * @shost: Pointer to Scsi_Host.
+ * @fn: Function to call on each busy command
+ * @priv: Data pointer passed to @fn
+ *
+ * If locking against concurrent command completions is required
+ * ithas to be provided by the caller
+ **/
+void scsi_host_busy_iter(struct Scsi_Host *shost,
+ bool (*fn)(struct scsi_cmnd *, void *, bool),
+ void *priv)
+{
+ struct scsi_host_busy_iter_data iter_data = {
+ .fn = fn,
+ .priv = priv,
+ };
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn,
+ &iter_data);
+}
+EXPORT_SYMBOL_GPL(scsi_host_busy_iter);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1a4ddfacb458..1e9302e99d05 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -504,6 +504,12 @@ static ssize_t host_store_rescan(struct device *dev,
return count;
}
+static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
+{
+ device->offload_enabled = 0;
+ device->offload_to_be_enabled = 0;
+}
+
static ssize_t host_show_firmware_revision(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1738,8 +1744,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
__func__,
h->scsi_host->host_no, logical_drive->bus,
logical_drive->target, logical_drive->lun);
- logical_drive->offload_enabled = 0;
- logical_drive->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(logical_drive);
logical_drive->queue_depth = 8;
}
}
@@ -2499,8 +2504,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
IOACCEL2_SERV_RESPONSE_FAILURE) {
if (c2->error_data.status ==
IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
- dev->offload_enabled = 0;
- dev->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(dev);
}
if (dev->in_reset) {
@@ -3670,10 +3674,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_to_be_enabled =
+ bool offload_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
- if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_to_be_enabled = 0;
+ /*
+ * Check to see if offload can be enabled.
+ */
+ if (offload_enabled) {
+ rc = hpsa_get_raid_map(h, scsi3addr, this_device);
+ if (rc) /* could not load raid_map */
+ goto out;
+ this_device->offload_to_be_enabled = 1;
+ }
}
out:
@@ -3996,8 +4007,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
- this_device->offload_enabled = 0;
- this_device->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(this_device);
this_device->hba_ioaccel_enabled = 0;
this_device->volume_offline = 0;
this_device->queue_depth = h->nr_cmds;
@@ -5230,8 +5240,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
/* Handles load balance across RAID 1 members.
* (2-drive R1 and R10 with even # of drives.)
* Appropriate for SSDs, not optimal for HDDs
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
+ if (le16_to_cpu(map->layout_map_count) != 2) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
if (dev->offload_to_mirror)
map_index += le16_to_cpu(map->data_disks_per_row);
dev->offload_to_mirror = !dev->offload_to_mirror;
@@ -5239,8 +5253,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
case HPSA_RAID_ADM:
/* Handles N-way mirrors (R1-ADM)
* and R10 with # of drives divisible by 3.)
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
+ if (le16_to_cpu(map->layout_map_count) != 3) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
offload_to_mirror = dev->offload_to_mirror;
raid_map_helper(map, offload_to_mirror,
@@ -5265,7 +5283,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
r5or6_blocks_per_row =
le16_to_cpu(map->strip_size) *
le16_to_cpu(map->data_disks_per_row);
- BUG_ON(r5or6_blocks_per_row == 0);
+ if (r5or6_blocks_per_row == 0) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
stripesize = r5or6_blocks_per_row *
le16_to_cpu(map->layout_map_count);
#if BITS_PER_LONG == 32
@@ -8285,7 +8306,7 @@ static int detect_controller_lockup(struct ctlr_info *h)
*
* Called from monitor controller worker (hpsa_event_monitor_worker)
*
- * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * A Volume (or Volumes that comprise an Array set) may be undergoing a
* transformation, so we will be turning off ioaccel for all volumes that
* make up the Array.
*/
@@ -8308,6 +8329,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
* Run through current device list used during I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
+ int offload_to_be_enabled = 0;
+ int offload_config = 0;
+
device = h->dev[i];
if (!device)
@@ -8325,25 +8349,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
continue;
ioaccel_status = buf[IOACCEL_STATUS_BYTE];
- device->offload_config =
+
+ /*
+ * Check if offload is still configured on
+ */
+ offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
- if (device->offload_config)
- device->offload_to_be_enabled =
+ /*
+ * If offload is configured on, check to see if ioaccel
+ * needs to be enabled.
+ */
+ if (offload_config)
+ offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
/*
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ */
+ if (offload_to_be_enabled)
+ continue;
+
+ /*
* Immediately turn off ioaccel for any volume the
* controller tells us to. Some of the reasons could be:
* transformation - change to the LVs of an Array.
* degraded volume - component failure
- *
- * If ioaccel is to be re-enabled, re-enable later during the
- * scan operation so the driver can get a fresh raidmap
- * before turning ioaccel back on.
- *
*/
- if (!device->offload_to_be_enabled)
- device->offload_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(device);
}
kfree(buf);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index df897df5cafe..635f6f9cffc4 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -133,6 +133,7 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
static const char *unknown_error = "unknown error";
@@ -413,22 +414,44 @@ static const char *ibmvfc_get_fc_type(u16 status)
* @tgt: ibmvfc target struct
* @action: action to perform
*
+ * Returns:
+ * 0 if action changed / non-zero if not changed
**/
-static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
enum ibmvfc_target_action action)
{
+ int rc = -EINVAL;
+
switch (tgt->action) {
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
+ action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
case IBMVFC_TGT_ACTION_DEL_RPORT:
- if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
+ if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
tgt->action = action;
+ rc = 0;
+ }
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
- if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
+ if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
tgt->add_rport = 0;
tgt->action = action;
+ rc = 0;
break;
}
+
+ return rc;
}
/**
@@ -537,6 +560,19 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_del_tgt - Schedule cleanup and removal of the target
+ * @tgt: ibmvfc target struct
+ * @job_step: job step to perform
+ *
+ **/
+static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
+{
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
+ tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
+ wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
* ibmvfc_link_down - Handle a link down event from the adapter
* @vhost: ibmvfc host struct
* @state: ibmvfc host state to enter
@@ -550,7 +586,7 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
ENTER;
scsi_block_requests(vhost->host);
list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_set_host_state(vhost, state);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
@@ -583,7 +619,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
vhost->async_crq.cur = 0;
list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -1500,7 +1536,7 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
list_for_each_entry(tgt, &vhost->targets, queue) {
if (rport == tgt->rport) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
}
}
@@ -2686,7 +2722,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
tgt->logo_rcvd = 1;
if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_reinit_host(vhost);
}
}
@@ -3220,8 +3256,8 @@ static void ibmvfc_tasklet(void *data)
static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
- tgt->job_step = job_step;
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
+ tgt->job_step = job_step;
wake_up(&tgt->vhost->work_wait_q);
}
@@ -3237,7 +3273,7 @@ static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
wake_up(&tgt->vhost->work_wait_q);
return 0;
} else
@@ -3312,13 +3348,13 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
tgt->add_rport = 1;
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else if (prli_rsp[index].retry)
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3335,7 +3371,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3434,7 +3470,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3515,33 +3551,28 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
break;
}
- if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
- else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
- tgt->scsi_id != tgt->new_scsi_id)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
kref_put(&tgt->kref, ibmvfc_release_tgt);
wake_up(&vhost->work_wait_q);
}
/**
- * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
* @tgt: ibmvfc target struct
*
+ * Returns:
+ * Allocated and initialized ibmvfc_event struct
**/
-static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
+ void (*done) (struct ibmvfc_event *))
{
struct ibmvfc_implicit_logout *mad;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
- if (vhost->discovery_threads >= disc_threads)
- return;
-
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
- vhost->discovery_threads++;
- ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+ ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
mad = &evt->iu.implicit_logout;
memset(mad, 0, sizeof(*mad));
@@ -3549,6 +3580,25 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
mad->common.length = cpu_to_be16(sizeof(*mad));
mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ return evt;
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_done);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -3560,6 +3610,58 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
+ u32 status = be16_to_cpu(mad->common.status);
+
+ vhost->discovery_threads--;
+ ibmvfc_free_event(evt);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (!vhost->logged_in) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ return;
+ }
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_and_del_done);
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
@@ -3600,13 +3702,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3799,9 +3901,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Query Target succeeded\n");
- tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id);
if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ ibmvfc_del_tgt(tgt);
else
ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
@@ -3815,11 +3916,11 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3896,7 +3997,6 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
- tgt->new_scsi_id = scsi_id;
tgt->vhost = vhost;
tgt->need_login = 1;
tgt->cancel_key = vhost->task_set++;
@@ -4189,6 +4289,25 @@ static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_dev_logo_to_do - Is there target logout work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
+ tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* __ibmvfc_work_to_do - Is there task level work to do? (no locking)
* @vhost: ibmvfc host struct
*
@@ -4217,11 +4336,20 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 0;
return 1;
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ if (vhost->discovery_threads == disc_threads)
+ return 0;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ return 1;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 0;
+ return 1;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
@@ -4391,6 +4519,18 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (ibmvfc_dev_logo_to_do(vhost)) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
rport = tgt->rport;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 7da89f4d26b2..907889f1fa9d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -596,6 +596,8 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT,
+ IBMVFC_TGT_ACTION_LOGOUT_RPORT,
+ IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT,
IBMVFC_TGT_ACTION_DEL_RPORT,
IBMVFC_TGT_ACTION_DELETED_RPORT,
};
@@ -604,7 +606,6 @@ struct ibmvfc_target {
struct list_head queue;
struct ibmvfc_host *vhost;
u64 scsi_id;
- u64 new_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7f66a7783209..59f0f1030c54 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2320,16 +2320,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
- unsigned long flags;
srp_remove_host(hostdata->host);
scsi_remove_host(hostdata->host);
purge_requests(hostdata, DID_ERROR);
-
- spin_lock_irqsave(hostdata->host->host_lock, flags);
release_event_pool(&hostdata->pool, hostdata);
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cd8db1349871..d48a8fa997b9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1299,9 +1299,9 @@ static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
char *p = buffer;
*p = '\0';
- p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
+ p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
- p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
+ p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
return buffer;
}
@@ -1322,7 +1322,7 @@ static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
char *p = buffer;
*p = '\0';
- p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
__ipr_format_res_path(res_path, p, len - (buffer - p));
return buffer;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b97aa9ac2ffe..9a0d3d729320 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -451,12 +451,12 @@ struct ipr_config_table_hdr64 {
struct ipr_config_table {
struct ipr_config_table_hdr hdr;
- struct ipr_config_table_entry dev[0];
+ struct ipr_config_table_entry dev[];
}__attribute__((packed, aligned (4)));
struct ipr_config_table64 {
struct ipr_config_table_hdr64 hdr64;
- struct ipr_config_table_entry64 dev[0];
+ struct ipr_config_table_entry64 dev[];
}__attribute__((packed, aligned (8)));
struct ipr_config_table_entry_wrapper {
@@ -792,7 +792,7 @@ struct ipr_mode_page28 {
struct ipr_mode_page_hdr hdr;
u8 num_entries;
u8 entry_length;
- struct ipr_dev_bus_entry bus[0];
+ struct ipr_dev_bus_entry bus[];
}__attribute__((packed));
struct ipr_mode_page24 {
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index b48aac8dfcb8..974c3b9116d5 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -621,7 +621,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
pci_set_drvdata(pdev, pci_info);
- if (efi_enabled(EFI_RUNTIME_SERVICES))
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
orom = isci_get_efi_var(pdev);
if (!orom)
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
index dc26b4aea99e..15d8f3631ab7 100644
--- a/drivers/scsi/isci/sas.h
+++ b/drivers/scsi/isci/sas.h
@@ -201,7 +201,7 @@ struct smp_req {
u8 func; /* byte 1 */
u8 alloc_resp_len; /* byte 2 */
u8 req_len; /* byte 3 */
- u8 req_data[0];
+ u8 req_data[];
} __packed;
/*
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index da6e97d8dc3b..773c45af9387 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -632,6 +632,8 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
fc_rport_enter_ready(rdata);
break;
case RPORT_ST_PRLI:
+ fc_rport_enter_plogi(rdata);
+ break;
case RPORT_ST_ADISC:
fc_rport_enter_logo(rdata);
break;
@@ -1208,9 +1210,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if (!rjt)
FC_RPORT_DBG(rdata, "PRLI bad response\n");
- else
+ else {
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
+ if (rjt->er_reason == ELS_RJT_UNAB &&
+ rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
+ fc_rport_enter_plogi(rdata);
+ goto out;
+ }
+ }
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 70b99c0e2e67..874dd4beed10 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2771,7 +2771,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
"must be a power of 2.\n", total_cmds);
total_cmds = rounddown_pow_of_two(total_cmds);
if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
- return NULL;
+ goto dec_session_count;
printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
total_cmds);
}
@@ -3153,13 +3153,18 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
+ cls_conn->state = ISCSI_CONN_FAILED;
+ break;
case STOP_CONN_TERM:
- iscsi_start_session_recovery(session, conn, flag);
+ cls_conn->state = ISCSI_CONN_DOWN;
break;
default:
iscsi_conn_printk(KERN_ERR, conn,
"invalid stop flag %d\n", flag);
+ return;
}
+
+ iscsi_start_session_recovery(session, conn, flag);
}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 5c6a5eff2f8e..052ee3a26f6e 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -19,6 +19,7 @@ config SCSI_SAS_ATA
bool "ATA support for libsas (requires libata)"
depends on SCSI_SAS_LIBSAS
depends on ATA = y || ATA = SCSI_SAS_LIBSAS
+ select SATA_HOST
help
Builds in ATA support into libsas. Will necessitate
the loading of libata along with libsas.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 04d73e2be373..8e2a356911a9 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -207,8 +207,7 @@ typedef struct lpfc_vpd {
} rev;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd3 :19; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 :20; /* Reserved */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
@@ -230,8 +229,7 @@ typedef struct lpfc_vpd {
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 3; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd3 :19; /* Reserved */
+ uint32_t rsvd3 :20; /* Reserved */
#endif
} sli3Feat;
} lpfc_vpd_t;
@@ -262,7 +260,6 @@ struct lpfc_stats {
uint32_t elsRcvPRLI;
uint32_t elsRcvLIRR;
uint32_t elsRcvRLS;
- uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsRcvRRQ;
uint32_t elsRcvRTV;
@@ -481,8 +478,8 @@ struct lpfc_vport {
struct dentry *debug_nodelist;
struct dentry *debug_nvmestat;
struct dentry *debug_scsistat;
- struct dentry *debug_nvmektime;
- struct dentry *debug_cpucheck;
+ struct dentry *debug_ioktime;
+ struct dentry *debug_hdwqstat;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@@ -749,6 +746,7 @@ struct lpfc_hba {
* capability
*/
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
+#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -887,7 +885,6 @@ struct lpfc_hba {
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
- uint32_t cfg_enable_dss;
uint32_t cfg_fdmi_on;
#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
@@ -1156,8 +1153,6 @@ struct lpfc_hba {
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
- uint8_t fips_spec_rev;
- uint8_t fips_level;
spinlock_t devicelock; /* lock for luns list */
mempool_t *device_data_mem_pool;
struct list_head luns;
@@ -1175,12 +1170,11 @@ struct lpfc_hba {
uint16_t sfp_warning;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint16_t cpucheck_on;
+ uint16_t hdwqstat_on;
#define LPFC_CHECK_OFF 0
#define LPFC_CHECK_NVME_IO 1
-#define LPFC_CHECK_NVMET_RCV 2
-#define LPFC_CHECK_NVMET_IO 4
-#define LPFC_CHECK_SCSI_IO 8
+#define LPFC_CHECK_NVMET_IO 2
+#define LPFC_CHECK_SCSI_IO 4
uint16_t ktime_on;
uint64_t ktime_data_samples;
uint64_t ktime_status_samples;
@@ -1225,6 +1219,11 @@ struct lpfc_hba {
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
char os_host_name[MAXHOSTNAMELEN];
+
+ /* SCSI host template information - for physical port */
+ struct scsi_host_template port_template;
+ /* SCSI host template information - for all vports */
+ struct scsi_host_template vport_template;
};
static inline struct Scsi_Host *
@@ -1353,3 +1352,32 @@ lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq,
writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr);
eq->q_mode = delay;
}
+
+
+/*
+ * Macro that declares tables and a routine to perform enum type to
+ * ascii string lookup.
+ *
+ * Defines a <key,value> table for an enum. Uses xxx_INIT defines for
+ * the enum to populate the table. Macro defines a routine (named
+ * by caller) that will search all elements of the table for the key
+ * and return the name string if found or "Unrecognized" if not found.
+ */
+#define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \
+static struct { \
+ enum enum_name value; \
+ char *name; \
+} fc_##enum_name##_e2str_names[] = enum_init; \
+static const char *routine(enum enum_name table_key) \
+{ \
+ int i; \
+ char *name = "Unrecognized"; \
+ \
+ for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\
+ if (fc_##enum_name##_e2str_names[i].value == table_key) {\
+ name = fc_##enum_name##_e2str_names[i].name; \
+ break; \
+ } \
+ } \
+ return name; \
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 46f56f30f77e..1354c141d614 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2231,66 +2231,6 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_fips_level_show - Return the current FIPS level for the HBA
- * @dev: class unused variable.
- * @attr: device attribute, not used.
- * @buf: on return contains the module description text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
-}
-
-/**
- * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
- * @dev: class unused variable.
- * @attr: device attribute, not used.
- * @buf: on return contains the module description text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
-}
-
-/**
- * lpfc_dss_show - Return the current state of dss and the configured state
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the formatted text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_dss_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
- (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
- (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
- "" : "Not ");
-}
-
-/**
* lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -2705,9 +2645,6 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR_RO(lpfc_temp_sensor);
-static DEVICE_ATTR_RO(lpfc_fips_level);
-static DEVICE_ATTR_RO(lpfc_fips_rev);
-static DEVICE_ATTR_RO(lpfc_dss);
static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
@@ -3868,12 +3805,9 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
-# commands per FCP LUN. Value range is [1,512]. Default value is 30.
-# If this parameter value is greater than 1/8th the maximum number of exchanges
-# supported by the HBA port, then the lun queue depth will be reduced to
-# 1/8th the maximum number of exchanges.
+# commands per FCP LUN.
*/
-LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
+LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
"Max number of FCP commands we can queue to a specific LUN");
/*
@@ -4783,7 +4717,7 @@ static DEVICE_ATTR_RW(lpfc_aer_support);
* Description:
* If the @buf contains 1 and the device currently has the AER support
* enabled, then invokes the kernel AER helper routine
- * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
+ * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
* error status register.
*
* Notes:
@@ -4809,7 +4743,7 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (phba->hba_flag & HBA_AER_ENABLED)
- rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
+ rc = pci_aer_clear_nonfatal_status(phba->pcidev);
if (rc == 0)
return strlen(buf);
@@ -6254,9 +6188,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
- &dev_attr_lpfc_fips_level,
- &dev_attr_lpfc_fips_rev,
- &dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
@@ -6292,8 +6223,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport,
- &dev_attr_lpfc_fips_level,
- &dev_attr_lpfc_fips_rev,
NULL,
};
@@ -7402,7 +7331,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
- phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 25d3dd39bc05..76dc8d9493d2 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -140,9 +140,10 @@ int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
-int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
+int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -403,9 +404,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
-extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
-extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
@@ -589,6 +588,7 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
int);
void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
+void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd);
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 58b35a1442c1..2aa578d20f8c 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2073,8 +2073,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
/* This string MUST be consistent with other FC platforms
* supported by Broadcom.
@@ -2115,8 +2115,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->SerialNumber,
sizeof(ae->un.AttrString));
@@ -2137,8 +2137,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -2158,8 +2158,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelDesc,
sizeof(ae->un.AttrString));
@@ -2181,8 +2181,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t i, j, incr, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
/* Convert JEDEC ID to ascii for hardware version */
incr = vp->rev.biuRev;
@@ -2211,8 +2211,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, lpfc_release_version,
sizeof(ae->un.AttrString));
@@ -2233,8 +2233,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
@@ -2258,8 +2258,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
len = strnlen(ae->un.AttrString,
@@ -2278,8 +2278,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
init_utsname()->sysname,
@@ -2301,7 +2301,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
size = FOURBYTES + sizeof(uint32_t);
@@ -2317,8 +2317,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_node_name(vport,
ae->un.AttrString, 256);
@@ -2336,7 +2336,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Nothing is defined for this currently */
ae->un.AttrInt = cpu_to_be32(0);
@@ -2353,7 +2353,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Each driver instance corresponds to a single port */
ae->un.AttrInt = cpu_to_be32(1);
@@ -2370,8 +2370,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
sizeof(struct lpfc_name));
@@ -2389,8 +2389,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strlcat(ae->un.AttrString, phba->BIOSVersion,
sizeof(ae->un.AttrString));
@@ -2410,7 +2410,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Driver doesn't have access to this information */
ae->un.AttrInt = cpu_to_be32(0);
@@ -2427,8 +2427,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "EMULEX",
sizeof(ae->un.AttrString));
@@ -2450,10 +2450,9 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
- ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
@@ -2476,7 +2475,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -2530,7 +2529,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
@@ -2600,7 +2599,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
hsp = (struct serv_parm *)&vport->fc_sparam;
ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
@@ -2620,8 +2619,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
"/sys/class/scsi_host/host%d", shost->host_no);
@@ -2641,8 +2640,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
vport->phba->os_host_name);
@@ -2662,8 +2661,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2680,8 +2679,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -2698,8 +2697,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -2717,7 +2716,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
else
@@ -2735,7 +2734,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2750,8 +2749,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
sizeof(struct lpfc_name));
@@ -2768,10 +2767,9 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
- ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
@@ -2792,7 +2790,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Link Up - operational */
ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
size = FOURBYTES + sizeof(uint32_t);
@@ -2808,7 +2806,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
size = FOURBYTES + sizeof(uint32_t);
@@ -2824,7 +2822,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2839,8 +2837,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Initiator",
sizeof(ae->un.AttrString));
@@ -2860,8 +2858,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2881,8 +2879,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
sizeof(ae->un.AttrString));
@@ -2903,8 +2901,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -2923,7 +2921,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* SRIOV (type 3) is not supported */
if (vport->vpi)
@@ -2943,7 +2941,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(0);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2958,7 +2956,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(1);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -3106,7 +3104,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Registered Port List */
/* One entry (port) per adapter */
rh->rpl.EntryCnt = cpu_to_be32(1);
- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
+ memcpy(&rh->rpl.pe.PortName,
+ &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
/* point to the HBA attribute block */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 819335b16c2e..8a6e02aa553f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1300,8 +1300,88 @@ buffer_done:
return len;
}
+void
+lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+{
+ uint64_t seg1, seg2, seg3, seg4;
+ uint64_t segsum;
+
+ if (!lpfc_cmd->ts_last_cmd ||
+ !lpfc_cmd->ts_cmd_start ||
+ !lpfc_cmd->ts_cmd_wqput ||
+ !lpfc_cmd->ts_isr_cmpl ||
+ !lpfc_cmd->ts_data_io)
+ return;
+
+ if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start)
+ return;
+ if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd)
+ return;
+ if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start)
+ return;
+ if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput)
+ return;
+ if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl)
+ return;
+ /*
+ * Segment 1 - Time from Last FCP command cmpl is handed
+ * off to NVME Layer to start of next command.
+ * Segment 2 - Time from Driver receives a IO cmd start
+ * from NVME Layer to WQ put is done on IO cmd.
+ * Segment 3 - Time from Driver WQ put is done on IO cmd
+ * to MSI-X ISR for IO cmpl.
+ * Segment 4 - Time from MSI-X ISR for IO cmpl to when
+ * cmpl is handled off to the NVME Layer.
+ */
+ seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd;
+ if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
+ seg1 = 0;
+
+ /* Calculate times relative to start of IO */
+ seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start);
+ segsum = seg2;
+ seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+
+ phba->ktime_data_samples++;
+ phba->ktime_seg1_total += seg1;
+ if (seg1 < phba->ktime_seg1_min)
+ phba->ktime_seg1_min = seg1;
+ else if (seg1 > phba->ktime_seg1_max)
+ phba->ktime_seg1_max = seg1;
+ phba->ktime_seg2_total += seg2;
+ if (seg2 < phba->ktime_seg2_min)
+ phba->ktime_seg2_min = seg2;
+ else if (seg2 > phba->ktime_seg2_max)
+ phba->ktime_seg2_max = seg2;
+ phba->ktime_seg3_total += seg3;
+ if (seg3 < phba->ktime_seg3_min)
+ phba->ktime_seg3_min = seg3;
+ else if (seg3 > phba->ktime_seg3_max)
+ phba->ktime_seg3_max = seg3;
+ phba->ktime_seg4_total += seg4;
+ if (seg4 < phba->ktime_seg4_min)
+ phba->ktime_seg4_min = seg4;
+ else if (seg4 > phba->ktime_seg4_max)
+ phba->ktime_seg4_max = seg4;
+
+ lpfc_cmd->ts_last_cmd = 0;
+ lpfc_cmd->ts_cmd_start = 0;
+ lpfc_cmd->ts_cmd_wqput = 0;
+ lpfc_cmd->ts_isr_cmpl = 0;
+ lpfc_cmd->ts_data_io = 0;
+}
+
/**
- * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
+ * lpfc_debugfs_ioktime_data - Dump target node list to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
@@ -1314,13 +1394,13 @@ buffer_done:
* not exceed @size.
**/
static int
-lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
+lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
int len = 0;
if (phba->nvmet_support == 0) {
- /* NVME Initiator */
+ /* Initiator */
len += scnprintf(buf + len, PAGE_SIZE - len,
"ktime %s: Total Samples: %lld\n",
(phba->ktime_on ? "Enabled" : "Disabled"),
@@ -1330,8 +1410,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "Segment 1: Last NVME Cmd cmpl "
- "done -to- Start of next NVME cnd (in driver)\n");
+ "Segment 1: Last Cmd cmpl "
+ "done -to- Start of next Cmd (in driver)\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@@ -1341,7 +1421,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
phba->ktime_seg1_max);
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "Segment 2: Driver start of NVME cmd "
+ "Segment 2: Driver start of Cmd "
"-to- Firmware WQ doorbell\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -1364,7 +1444,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 4: MSI-X ISR cmpl -to- "
- "NVME cmpl done\n");
+ "Cmd cmpl done\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@@ -1603,42 +1683,50 @@ out:
}
/**
- * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
+ * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
- * This routine dumps the NVME statistics associated with @vport
+ * This routine dumps the NVME + SCSI statistics associated with @vport
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
-lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
+lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli4_hdw_queue *qp;
- int i, j, max_cnt;
- int len = 0;
+ struct lpfc_hdwq_stat *c_stat;
+ int i, j, len;
uint32_t tot_xmt;
uint32_t tot_rcv;
uint32_t tot_cmpl;
+ char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPUcheck %s ",
- (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
- "Enabled" : "Disabled"));
- if (phba->nvmet_support) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%s\n",
- (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
- "Rcv Enabled\n" : "Rcv Disabled\n"));
- } else {
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
- }
- max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
+ scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ",
+ (phba->hdwqstat_on &
+ (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ?
+ "Enabled" : "Disabled"));
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ",
+ (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ?
+ "Enabled" : "Disabled"));
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "\n\n");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
@@ -1646,46 +1734,76 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
tot_rcv = 0;
tot_xmt = 0;
tot_cmpl = 0;
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
- tot_xmt += qp->cpucheck_xmt_io[j];
- tot_cmpl += qp->cpucheck_cmpl_io[j];
- if (phba->nvmet_support)
- tot_rcv += qp->cpucheck_rcv_io[j];
- }
- /* Only display Hardware Qs with something */
- if (!tot_xmt && !tot_cmpl && !tot_rcv)
- continue;
+ for_each_present_cpu(j) {
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j);
+
+ /* Only display for this HDWQ */
+ if (i != c_stat->hdwq_no)
+ continue;
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "HDWQ %03d: ", i);
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
/* Only display non-zero counters */
- if (!qp->cpucheck_xmt_io[j] &&
- !qp->cpucheck_cmpl_io[j] &&
- !qp->cpucheck_rcv_io[j])
+ if (!c_stat->xmt_io && !c_stat->cmpl_io &&
+ !c_stat->rcv_io)
continue;
+
+ if (!tot_xmt && !tot_cmpl && !tot_rcv) {
+ /* Print HDWQ string only the first time */
+ scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ }
+
+ tot_xmt += c_stat->xmt_io;
+ tot_cmpl += c_stat->cmpl_io;
+ if (phba->nvmet_support)
+ tot_rcv += c_stat->rcv_io;
+
+ scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
if (phba->nvmet_support) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPU %03d: %x/%x/%x ", j,
- qp->cpucheck_rcv_io[j],
- qp->cpucheck_xmt_io[j],
- qp->cpucheck_cmpl_io[j]);
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x RCV 0x%x |",
+ c_stat->xmt_io, c_stat->cmpl_io,
+ c_stat->rcv_io);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
} else {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPU %03d: %x/%x ", j,
- qp->cpucheck_xmt_io[j],
- qp->cpucheck_cmpl_io[j]);
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x |",
+ c_stat->xmt_io, c_stat->cmpl_io);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
}
}
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Total: %x\n", tot_xmt);
- if (len >= max_cnt) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Truncated ...\n");
- return len;
+
+ /* Check if nothing to display */
+ if (!tot_xmt && !tot_cmpl && !tot_rcv)
+ continue;
+
+ scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: ");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ if (phba->nvmet_support) {
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n",
+ tot_xmt, tot_cmpl, tot_rcv);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ } else {
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x]\n\n",
+ tot_xmt, tot_cmpl);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
}
}
+
+buffer_done:
+ len = strnlen(buf, size);
return len;
}
@@ -2689,7 +2807,7 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
}
static int
-lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
+lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@@ -2700,14 +2818,14 @@ lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL);
+ debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer,
- LPFC_NVMEKTIME_SIZE);
+ debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer,
+ LPFC_IOKTIME_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@@ -2718,8 +2836,8 @@ out:
}
static ssize_t
-lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
+lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
@@ -2921,7 +3039,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
}
static int
-lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
+lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@@ -2932,14 +3050,14 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
+ debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
- LPFC_CPUCHECK_SIZE);
+ debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer,
+ LPFC_SCSISTAT_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@@ -2950,16 +3068,16 @@ out:
}
static ssize_t
-lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
+lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_hdwq_stat *c_stat;
char mybuf[64];
char *pbuf;
- int i, j;
+ int i;
if (nbytes > 64)
nbytes = 64;
@@ -2972,41 +3090,39 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
- phba->cpucheck_on |= (LPFC_CHECK_NVME_IO |
+ phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO |
LPFC_CHECK_SCSI_IO);
return strlen(pbuf);
} else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) {
if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
- phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVME_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) {
- phba->cpucheck_on |= LPFC_CHECK_SCSI_IO;
+ if (!phba->nvmet_support)
+ phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
- } else if ((strncmp(pbuf, "rcv",
- sizeof("rcv") - 1) == 0)) {
- if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV;
- else
- return -EINVAL;
+ } else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) {
+ phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO |
+ LPFC_CHECK_NVMET_IO);
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) {
+ phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "off",
sizeof("off") - 1) == 0)) {
- phba->cpucheck_on = LPFC_CHECK_OFF;
+ phba->hdwqstat_on = LPFC_CHECK_OFF;
return strlen(pbuf);
} else if ((strncmp(pbuf, "zero",
sizeof("zero") - 1) == 0)) {
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- qp = &phba->sli4_hba.hdwq[i];
-
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
- qp->cpucheck_rcv_io[j] = 0;
- qp->cpucheck_xmt_io[j] = 0;
- qp->cpucheck_cmpl_io[j] = 0;
- }
+ for_each_present_cpu(i) {
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i);
+ c_stat->xmt_io = 0;
+ c_stat->cmpl_io = 0;
+ c_stat->rcv_io = 0;
}
return strlen(pbuf);
}
@@ -5431,13 +5547,13 @@ static const struct file_operations lpfc_debugfs_op_scsistat = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_nvmektime
-static const struct file_operations lpfc_debugfs_op_nvmektime = {
+#undef lpfc_debugfs_op_ioktime
+static const struct file_operations lpfc_debugfs_op_ioktime = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_nvmektime_open,
+ .open = lpfc_debugfs_ioktime_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
- .write = lpfc_debugfs_nvmektime_write,
+ .write = lpfc_debugfs_ioktime_write,
.release = lpfc_debugfs_release,
};
@@ -5451,13 +5567,13 @@ static const struct file_operations lpfc_debugfs_op_nvmeio_trc = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_cpucheck
-static const struct file_operations lpfc_debugfs_op_cpucheck = {
+#undef lpfc_debugfs_op_hdwqstat
+static const struct file_operations lpfc_debugfs_op_hdwqstat = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_cpucheck_open,
+ .open = lpfc_debugfs_hdwqstat_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
- .write = lpfc_debugfs_cpucheck_write,
+ .write = lpfc_debugfs_hdwqstat_write,
.release = lpfc_debugfs_release,
};
@@ -6075,17 +6191,22 @@ nvmeio_off:
goto debug_failed;
}
- snprintf(name, sizeof(name), "nvmektime");
- vport->debug_nvmektime =
+ snprintf(name, sizeof(name), "ioktime");
+ vport->debug_ioktime =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_nvmektime);
+ vport, &lpfc_debugfs_op_ioktime);
+ if (!vport->debug_ioktime) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0815 Cannot create debugfs ioktime\n");
+ goto debug_failed;
+ }
- snprintf(name, sizeof(name), "cpucheck");
- vport->debug_cpucheck =
+ snprintf(name, sizeof(name), "hdwqstat");
+ vport->debug_hdwqstat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_cpucheck);
+ vport, &lpfc_debugfs_op_hdwqstat);
/*
* The following section is for additional directories/files for the
@@ -6216,11 +6337,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_scsistat); /* scsistat */
vport->debug_scsistat = NULL;
- debugfs_remove(vport->debug_nvmektime); /* nvmektime */
- vport->debug_nvmektime = NULL;
+ debugfs_remove(vport->debug_ioktime); /* ioktime */
+ vport->debug_ioktime = NULL;
- debugfs_remove(vport->debug_cpucheck); /* cpucheck */
- vport->debug_cpucheck = NULL;
+ debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
+ vport->debug_hdwqstat = NULL;
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 20f2537af511..7ab6d3b08698 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -46,8 +46,7 @@
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
-#define LPFC_NVMEKTIME_SIZE 8192
-#define LPFC_CPUCHECK_SIZE 8192
+#define LPFC_IOKTIME_SIZE 8192
#define LPFC_NVMEIO_TRC_SIZE 8192
/* scsistat output buffer size */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 42a2bf38eaea..80d1e661b0d4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -3008,10 +3008,9 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* This routine is a generic completion callback function for ELS commands.
* Specifically, it is the callback function which does not need to perform
* any command specific operations. It is currently used by the ELS command
- * issuing routines for the ELS State Change Request (SCR),
- * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
- * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
- * certain debug loggings, this callback function simply invokes the
+ * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
+ * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
+ * Other than certain debug loggings, this callback function simply invokes the
* lpfc_els_chk_latt() routine to check whether link went down during the
* discovery process.
**/
@@ -3025,14 +3024,117 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ELS cmd cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
+ irsp->ulpIoTag, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout);
+
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(vport);
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for Discovery ELS cmd.
+ * Currently used by the ELS command issuing routines for the ELS State Change
+ * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
+ * These commands will be retried once only for ELS timeout errors.
+ **/
+static void
+lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+ struct lpfc_els_rdf_rsp *prdf;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ u32 *pdata;
+ u32 cmd;
+
+ irsp = &rspiocb->iocb;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->un.elsreq64.remoteID);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
+ "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
+ "x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ irsp->un.ulpWord[4], irsp->ulpTimeout,
+ cmdiocb->retry);
+
+ pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ if (!pcmd)
+ goto out;
+
+ pdata = (u32 *)pcmd->virt;
+ if (!pdata)
+ goto out;
+ cmd = *pdata;
+
+ /* Only 1 retry for ELS Timeout only */
+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT)) {
+ cmdiocb->retry++;
+ if (cmdiocb->retry <= 1) {
+ switch (cmd) {
+ case ELS_CMD_SCR:
+ lpfc_issue_els_scr(vport, cmdiocb->retry);
+ break;
+ case ELS_CMD_RDF:
+ cmdiocb->context1 = NULL; /* save ndlp refcnt */
+ lpfc_issue_els_rdf(vport, cmdiocb->retry);
+ break;
+ }
+ goto out;
+ }
+ phba->fc_stat.elsRetryExceeded++;
+ }
+ if (irsp->ulpStatus) {
+ /* ELS discovery cmd completes with error */
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "4203 ELS cmd x%x error: x%x x%X\n", cmd,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto out;
+ }
+
+ /* The RDF response doesn't have any impact on the running driver
+ * but the notification descriptors are dumped here for support.
+ */
+ if (cmd == ELS_CMD_RDF) {
+ int i;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
+ if (!prdf)
+ goto out;
+
+ for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
+ i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "4677 Fabric RDF Notification Grant Data: "
+ "0x%08x\n",
+ be32_to_cpu(
+ prdf->reg_d1.desc_tags[i]));
+ }
+
+out:
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
lpfc_els_free_iocb(phba, cmdiocb);
@@ -3042,11 +3144,10 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_issue_els_scr - Issue a scr to an node on a vport
* @vport: pointer to a host virtual N_Port data structure.
- * @nportid: N_Port identifier to the remote node.
- * @retry: number of retries to the command IOCB.
+ * @retry: retry counter for the command IOCB.
*
* This routine issues a State Change Request (SCR) to a fabric node
- * on a @vport. The remote node @nportid is passed into the function. It
+ * on a @vport. The remote node is Fabric Controller (0xfffffd). It
* first search the @vport node list to find the matching ndlp. If no such
* ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
* IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
@@ -3062,7 +3163,7 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* 1 - Failed to issue scr command
**/
int
-lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
@@ -3072,9 +3173,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
- ndlp = lpfc_findnode_did(vport, nportid);
+ ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
if (!ndlp) {
- ndlp = lpfc_nlp_init(vport, nportid);
+ ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
@@ -3109,7 +3210,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitSCR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
@@ -3339,6 +3440,102 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of the node.
*/
+ /* Don't release reference count as RDF is likely outstanding */
+ return 0;
+}
+
+/**
+ * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @retry: retry counter for the command IOCB.
+ *
+ * This routine issues an ELS RDF to the Fabric Controller to register
+ * for diagnostic functions.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RDF ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued rdf command
+ * 1 - Failed to issue rdf command
+ **/
+int
+lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_els_rdf_req *prdf;
+ struct lpfc_nodelist *ndlp;
+ uint16_t cmdsize;
+
+ cmdsize = sizeof(*prdf);
+
+ ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
+ if (!ndlp) {
+ ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
+ if (!ndlp)
+ return -ENODEV;
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return -ENODEV;
+ }
+
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT) {
+ lpfc_nlp_put(ndlp);
+ return -EACCES;
+ }
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_RDF);
+ if (!elsiocb) {
+ /* This will trigger the release of the node just
+ * allocated
+ */
+ lpfc_nlp_put(ndlp);
+ return -ENOMEM;
+ }
+
+ /* Configure the payload for the supported FPIN events. */
+ prdf = (struct lpfc_els_rdf_req *)
+ (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ memset(prdf, 0, cmdsize);
+ prdf->rdf.fpin_cmd = ELS_RDF;
+ prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
+ sizeof(struct fc_els_rdf));
+ prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
+ prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
+ prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
+ prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue RDF: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6444 Xmit RDF to remote NPORT x%x\n",
+ ndlp->nlp_DID);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the rlease of
+ * the node.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
+ }
+
+ /* An RDF was issued - this put ensures the ndlp is cleaned up
+ * when the RDF completes.
+ */
lpfc_nlp_put(ndlp);
return 0;
}
@@ -7135,108 +7332,12 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/**
- * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
- * @phba: pointer to lpfc hba data structure.
- * @pmb: pointer to the driver internal queue element for mailbox command.
- *
- * This routine is the completion callback function for the MBX_READ_LNK_STAT
- * mailbox command. This callback function is to actually send the Accept
- * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
- * collects the link statistics from the completion of the MBX_READ_LNK_STAT
- * mailbox command, constructs the RPS response with the link statistics
- * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
- * response to the RPS.
- *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
- *
- **/
-static void
-lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
-{
- MAILBOX_t *mb;
- IOCB_t *icmd;
- RPS_RSP *rps_rsp;
- uint8_t *pcmd;
- struct lpfc_iocbq *elsiocb;
- struct lpfc_nodelist *ndlp;
- uint16_t status;
- uint16_t oxid;
- uint16_t rxid;
- uint32_t cmdsize;
-
- mb = &pmb->u.mb;
-
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
- oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
- pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
-
- if (mb->mbxStatus) {
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
- }
-
- cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
- mempool_free(pmb, phba->mbox_mem_pool);
- elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
- lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_ACC);
-
- /* Decrement the ndlp reference count from previous mbox command */
- lpfc_nlp_put(ndlp);
-
- if (!elsiocb)
- return;
-
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rxid;
- icmd->unsli3.rcvsli3.ox_id = oxid;
-
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof(uint32_t); /* Skip past command */
- rps_rsp = (RPS_RSP *)pcmd;
-
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
- status = 0x10;
- else
- status = 0x8;
- if (phba->pport->fc_flag & FC_FABRIC)
- status |= 0x4;
-
- rps_rsp->rsvd1 = 0;
- rps_rsp->portStatus = cpu_to_be16(status);
- rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
- rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
- rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
- rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
- rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
- rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
- /* Xmit ELS RPS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
- "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
- lpfc_els_free_iocb(phba, elsiocb);
- return;
-}
-
-/**
* lpfc_els_rcv_rls - Process an unsolicited rls iocb
* @vport: pointer to a host virtual N_Port data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
*
- * This routine processes Read Port Status (RPL) IOCB received as an
+ * This routine processes Read Link Status (RLS) IOCB received as an
* ELS unsolicited event. It first checks the remote port state. If the
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
@@ -7258,7 +7359,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
+ /* reject the unsolicited RLS request and done with it */
goto reject_out;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
@@ -7306,7 +7407,7 @@ reject_out:
* Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
* will be incremented by 1 for holding the ndlp and the reference to ndlp
* will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
+ * callback function to the RTV Accept Response ELS IOCB command.
*
* Return codes
* 0 - Successfully processed rtv iocb (currently always return 0)
@@ -7325,7 +7426,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
+ /* reject the unsolicited RTV request and done with it */
goto reject_out;
cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
@@ -7378,84 +7479,7 @@ reject_out:
return 0;
}
-/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
- * @vport: pointer to a host virtual N_Port data structure.
- * @cmdiocb: pointer to lpfc command iocb data structure.
- * @ndlp: pointer to a node-list data structure.
- *
- * This routine processes Read Port Status (RPS) IOCB received as an
- * ELS unsolicited event. It first checks the remote port state. If the
- * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
- * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
- * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
- * for reading the HBA link statistics. It is for the callback function,
- * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
- * to actually sending out RPS Accept (ACC) response.
- *
- * Return codes
- * 0 - Successfully processed rps iocb (currently always return 0)
- **/
-static int
-lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
- struct lpfc_nodelist *ndlp)
-{
- struct lpfc_hba *phba = vport->phba;
- uint32_t *lp;
- uint8_t flag;
- LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *pcmd;
- RPS *rps;
- struct ls_rjt stat;
-
- if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
- goto reject_out;
-
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- flag = (be32_to_cpu(*lp++) & 0xf);
- rps = (RPS *) lp;
-
- if ((flag == 0) ||
- ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
- ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
- sizeof(struct lpfc_name)) == 0))) {
-
- printk("Fix me....\n");
- dump_stack();
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
- if (mbox) {
- lpfc_read_lnk_stat(phba, mbox);
- mbox->ctx_buf = (void *)((unsigned long)
- ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
- cmdiocb->iocb.ulpContext)); /* rx_id */
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->vport = vport;
- mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
- if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
- != MBX_NOT_FINISHED)
- /* Mbox completion will send ELS Response */
- return 0;
- /* Decrement reference count used for the failed mbox
- * command.
- */
- lpfc_nlp_put(ndlp);
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- }
-
-reject_out:
- /* issue rejection response */
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
- stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
- return 0;
-}
-
-/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+/* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
* @did: DID of the target.
@@ -8310,6 +8334,90 @@ lpfc_send_els_event(struct lpfc_vport *vport,
}
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
+ FC_LS_TLV_DTAG_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
+ FC_FPIN_LI_EVT_TYPES_INIT);
+
+/**
+ * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
+ * @vport: Pointer to vport object.
+ * @lnk_not: Pointer to the Link Integrity Notification Descriptor.
+ *
+ * This function processes a link integrity FPIN event by
+ * logging a message
+ **/
+static void
+lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv)
+{
+ struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
+ const char *li_evt_str;
+ u32 li_evt;
+
+ li_evt = be16_to_cpu(li->event_type);
+ li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "4680 FPIN Link Integrity %s (x%x) "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "Duration %d mSecs Count %d Port Cnt %d\n",
+ li_evt_str, li_evt,
+ be64_to_cpu(li->detecting_wwpn),
+ be64_to_cpu(li->attached_wwpn),
+ be32_to_cpu(li->event_threshold),
+ be32_to_cpu(li->event_count),
+ be32_to_cpu(li->pname_count));
+}
+
+static void
+lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
+ u32 fpin_length)
+{
+ struct fc_tlv_desc *tlv;
+ const char *dtag_nm;
+ uint32_t desc_cnt = 0, bytes_remain;
+ u32 dtag;
+
+ /* FPINs handled only if we are in the right discovery state */
+ if (vport->port_state < LPFC_DISC_AUTH)
+ return;
+
+ /* make sure there is the full fpin header */
+ if (fpin_length < sizeof(struct fc_els_fpin))
+ return;
+
+ tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
+ bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
+ bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
+
+ /* process each descriptor */
+ while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
+ bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
+
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_INTEGRITY:
+ lpfc_els_rcv_fpin_li(vport, tlv);
+ break;
+ default:
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "4678 skipped FPIN descriptor[%d]: "
+ "tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+ break;
+ }
+
+ desc_cnt++;
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ }
+
+ fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length,
+ (char *)fpin);
+}
+
/**
* lpfc_els_unsol_buffer - Process an unsolicited event data buffer
* @phba: pointer to lpfc hba data structure.
@@ -8331,7 +8439,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
- uint32_t *payload;
+ uint32_t *payload, payload_len;
uint32_t cmd, did, newnode;
uint8_t rjt_exp, rjt_err = 0, init_link = 0;
IOCB_t *icmd = &elsiocb->iocb;
@@ -8342,6 +8450,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
newnode = 0;
payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
lpfc_post_buffer(phba, pring, 1);
@@ -8632,16 +8741,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
- case ELS_CMD_RPS:
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
- "RCV RPS: did:x%x/ste:x%x flg:x%x",
- did, vport->port_state, ndlp->nlp_flag);
-
- phba->fc_stat.elsRcvRPS++;
- lpfc_els_rcv_rps(vport, elsiocb, ndlp);
- if (newnode)
- lpfc_nlp_put(ndlp);
- break;
case ELS_CMD_RPL:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RPL: did:x%x/ste:x%x flg:x%x",
@@ -8697,12 +8796,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_INVALID_OX_RX;
break;
case ELS_CMD_FPIN:
- /*
- * Received FPIN from fabric - pass it to the
- * transport FPIN handler.
- */
- fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len,
- (char *)payload);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FPIN: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
+ payload_len);
+
+ /* There are no replies, so no rjt codes */
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index dcc8999c6a68..789eecbf32eb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1163,13 +1163,16 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Start discovery by sending a FLOGI. port_state is identically
- * LPFC_FLOGI while waiting for FLOGI cmpl
+ * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
+ * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
*/
- if (vport->port_state != LPFC_FLOGI)
- lpfc_initial_flogi(vport);
- else if (vport->fc_flag & FC_PT2PT)
- lpfc_disc_start(vport);
-
+ if (vport->port_state != LPFC_FLOGI) {
+ if (!(phba->hba_flag & HBA_DEFER_FLOGI))
+ lpfc_initial_flogi(vport);
+ } else {
+ if (vport->fc_flag & FC_PT2PT)
+ lpfc_disc_start(vport);
+ }
return;
out:
@@ -3094,6 +3097,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Check if sending the FLOGI is being deferred to after we get
+ * up to date CSPs from MBX_READ_SPARAM.
+ */
+ if (phba->hba_flag & HBA_DEFER_FLOGI) {
+ lpfc_initial_flogi(vport);
+ phba->hba_flag &= ~HBA_DEFER_FLOGI;
+ }
return;
out:
@@ -3224,6 +3235,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
lpfc_linkup(phba);
+ sparam_mbox = NULL;
+
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!cfglink_mbox)
+ goto out;
+ vport->port_state = LPFC_LOCAL_CFG_LINK;
+ lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->vport = vport;
+ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
+
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
goto out;
@@ -3244,20 +3272,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!cfglink_mbox)
- goto out;
- vport->port_state = LPFC_LOCAL_CFG_LINK;
- lpfc_config_link(phba, cfglink_mbox);
- cfglink_mbox->vport = vport;
- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
- goto out;
- }
- } else {
+ if (phba->hba_flag & HBA_FCOE_MODE) {
vport->port_state = LPFC_VPORT_UNKNOWN;
/*
* Add the driver's default FCF record at FCF index 0 now. This
@@ -3314,6 +3329,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
+ } else {
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
+ !(phba->link_flag & LS_LOOPBACK_MODE))
+ phba->hba_flag |= HBA_DEFER_FLOGI;
}
/* Prepare for LINK up registrations */
@@ -4070,7 +4089,9 @@ out:
FC_TYPE_NVME);
/* Issue SCR just before NameServer GID_FT Query */
- lpfc_issue_els_scr(vport, SCR_DID, 0);
+ lpfc_issue_els_scr(vport, 0);
+
+ lpfc_issue_els_rdf(vport, 0);
}
vport->fc_ns_retry = 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 436cdc8c5ef4..c20034b3101c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -22,7 +22,7 @@
#define FDMI_DID 0xfffffaU
#define NameServer_DID 0xfffffcU
-#define SCR_DID 0xfffffdU
+#define Fabric_Cntl_DID 0xfffffdU
#define Fabric_DID 0xfffffeU
#define Bcast_DID 0xffffffU
#define Mask_DID 0xffffffU
@@ -588,6 +588,7 @@ struct fc_vft_header {
#define ELS_CMD_RRQ 0x12000000
#define ELS_CMD_REC 0x13000000
#define ELS_CMD_RDP 0x18000000
+#define ELS_CMD_RDF 0x19000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_NVMEPRLI 0x20140018
#define ELS_CMD_PRLO 0x21100014
@@ -597,7 +598,6 @@ struct fc_vft_header {
#define ELS_CMD_ADISC 0x52000000
#define ELS_CMD_FARP 0x54000000
#define ELS_CMD_FARPR 0x55000000
-#define ELS_CMD_RPS 0x56000000
#define ELS_CMD_RPL 0x57000000
#define ELS_CMD_FAN 0x60000000
#define ELS_CMD_RSCN 0x61040000
@@ -630,6 +630,7 @@ struct fc_vft_header {
#define ELS_CMD_RRQ 0x12
#define ELS_CMD_REC 0x13
#define ELS_CMD_RDP 0x18
+#define ELS_CMD_RDF 0x19
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_NVMEPRLI 0x18001420
#define ELS_CMD_PRLO 0x14001021
@@ -639,7 +640,6 @@ struct fc_vft_header {
#define ELS_CMD_ADISC 0x52
#define ELS_CMD_FARP 0x54
#define ELS_CMD_FARPR 0x55
-#define ELS_CMD_RPS 0x56
#define ELS_CMD_RPL 0x57
#define ELS_CMD_FAN 0x60
#define ELS_CMD_RSCN 0x0461
@@ -919,24 +919,6 @@ typedef struct _RNID { /* Structure is in Big Endian format */
} un;
} __packed RNID;
-typedef struct _RPS { /* Structure is in Big Endian format */
- union {
- uint32_t portNum;
- struct lpfc_name portName;
- } un;
-} RPS;
-
-typedef struct _RPS_RSP { /* Structure is in Big Endian format */
- uint16_t rsvd1;
- uint16_t portStatus;
- uint32_t linkFailureCnt;
- uint32_t lossSyncCnt;
- uint32_t lossSignalCnt;
- uint32_t primSeqErrCnt;
- uint32_t invalidXmitWord;
- uint32_t crcCnt;
-} RPS_RSP;
-
struct RLS { /* Structure is in Big Endian format */
uint32_t rls;
#define rls_rsvd_SHIFT 24
@@ -1340,25 +1322,8 @@ struct fc_rdp_res_frame {
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
-/*
- * Registered Port List Format
- */
-struct lpfc_fdmi_reg_port_list {
- uint32_t EntryCnt;
- uint32_t pe; /* Variable-length array */
-};
-
-
/* Definitions for HBA / Port attribute entries */
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
-};
-
-
/* Attribute Entry */
struct lpfc_fdmi_attr_entry {
union {
@@ -1369,7 +1334,13 @@ struct lpfc_fdmi_attr_entry {
} un;
};
-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
+struct lpfc_fdmi_attr_def { /* Defined in TLV format */
+ /* Structure is in Big Endian format */
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ /* Marks start of Value (ATTRIBUTE_ENTRY) */
+ struct lpfc_fdmi_attr_entry AttrValue;
+} __packed;
/*
* HBA Attribute Block
@@ -1394,12 +1365,19 @@ struct lpfc_fdmi_hba_ident {
};
/*
+ * Registered Port List Format
+ */
+struct lpfc_fdmi_reg_port_list {
+ uint32_t EntryCnt;
+ struct lpfc_fdmi_port_entry pe;
+} __packed;
+
+/*
* Register HBA(RHBA)
*/
struct lpfc_fdmi_reg_hba {
struct lpfc_fdmi_hba_ident hi;
- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
-/* struct lpfc_fdmi_attr_block ab; */
+ struct lpfc_fdmi_reg_port_list rpl;
};
/*
@@ -3284,8 +3262,7 @@ typedef struct {
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1 : 19; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd1 : 20; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
@@ -3309,12 +3286,10 @@ typedef struct {
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
- uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd1 : 19; /* Reserved */
+ uint32_t rsvd1 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd3 : 19; /* Reserved */
- uint32_t gdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 : 20; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
@@ -3338,8 +3313,7 @@ typedef struct {
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
- uint32_t gdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd3 : 19; /* Reserved */
+ uint32_t rsvd3 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@@ -3361,15 +3335,11 @@ typedef struct {
uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t fips_rev : 3; /* FIPS Spec Revision */
- uint32_t fips_level : 4; /* FIPS Level */
- uint32_t sec_err : 9; /* security crypto error */
+ uint32_t rsvd7 : 16;
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
- uint32_t sec_err : 9; /* security crypto error */
- uint32_t fips_level : 4; /* FIPS Level */
- uint32_t fips_rev : 3; /* FIPS Spec Revision */
+ uint32_t rsvd7 : 16;
#endif
} CONFIG_PORT_VAR;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 9a064b96e570..10c5d1c3122e 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -20,6 +20,8 @@
* included with this package. *
*******************************************************************/
+#include <uapi/scsi/fc/fc_els.h>
+
/* Macros to deal with bit fields. Each bit field must have 3 #defines
* associated with it (_SHIFT, _MASK, and _WORD).
* EG. For a bit field that is in the 7th bit of the "field4" field of a
@@ -4795,6 +4797,23 @@ struct send_frame_wqe {
uint32_t fc_hdr_wd5; /* word 15 */
};
+#define ELS_RDF_REG_TAG_CNT 1
+struct lpfc_els_rdf_reg_desc {
+ struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
+ __be32 desc_tags[ELS_RDF_REG_TAG_CNT];
+ /* tags in reg_desc */
+};
+
+struct lpfc_els_rdf_req {
+ struct fc_els_rdf rdf; /* hdr up to descriptors */
+ struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
+};
+
+struct lpfc_els_rdf_rsp {
+ struct fc_els_rdf_resp rdf_resp; /* hdr up to descriptors */
+ struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
+};
+
union lpfc_wqe {
uint32_t words[16];
struct lpfc_wqe_generic generic;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5a605773dd0a..4104bdcdbb6f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -512,21 +512,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_sli_read_link_ste(phba);
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- i = (mb->un.varRdConfig.max_xri + 1);
- if (phba->cfg_hba_queue_depth > i) {
+ if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"3359 HBA queue depth changed from %d to %d\n",
- phba->cfg_hba_queue_depth, i);
- phba->cfg_hba_queue_depth = i;
- }
-
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
- i = (mb->un.varRdConfig.max_xri >> 3);
- if (phba->pport->cfg_lun_queue_depth > i) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "3360 LUN queue depth changed from %d to %d\n",
- phba->pport->cfg_lun_queue_depth, i);
- phba->pport->cfg_lun_queue_depth = i;
+ phba->cfg_hba_queue_depth,
+ mb->un.varRdConfig.max_xri);
+ phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
}
phba->lmt = mb->un.varRdConfig.lmt;
@@ -4240,6 +4231,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
+ struct scsi_host_template *template;
int error = 0;
int i;
uint64_t wwn;
@@ -4268,22 +4260,50 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
}
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- if (dev != &phba->pcidev->dev) {
- shost = scsi_host_alloc(&lpfc_vport_template,
- sizeof(struct lpfc_vport));
+ /* Seed template for SCSI host registration */
+ if (dev == &phba->pcidev->dev) {
+ template = &phba->port_template;
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ /* Seed physical port template */
+ memcpy(template, &lpfc_template, sizeof(*template));
+
+ if (use_no_reset_hba) {
+ /* template is for a no reset SCSI Host */
+ template->max_sectors = 0xffff;
+ template->eh_host_reset_handler = NULL;
+ }
+
+ /* Template for all vports this physical port creates */
+ memcpy(&phba->vport_template, &lpfc_template,
+ sizeof(*template));
+ phba->vport_template.max_sectors = 0xffff;
+ phba->vport_template.shost_attrs = lpfc_vport_attrs;
+ phba->vport_template.eh_bus_reset_handler = NULL;
+ phba->vport_template.eh_host_reset_handler = NULL;
+ phba->vport_template.vendor_id = 0;
+
+ /* Initialize the host templates with updated value */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ template->sg_tablesize = phba->cfg_scsi_seg_cnt;
+ phba->vport_template.sg_tablesize =
+ phba->cfg_scsi_seg_cnt;
+ } else {
+ template->sg_tablesize = phba->cfg_sg_seg_cnt;
+ phba->vport_template.sg_tablesize =
+ phba->cfg_sg_seg_cnt;
+ }
+
} else {
- if (!use_no_reset_hba)
- shost = scsi_host_alloc(&lpfc_template,
- sizeof(struct lpfc_vport));
- else
- shost = scsi_host_alloc(&lpfc_template_no_hr,
- sizeof(struct lpfc_vport));
+ /* NVMET is for physical port only */
+ memcpy(template, &lpfc_template_nvme,
+ sizeof(*template));
}
- } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- shost = scsi_host_alloc(&lpfc_template_nvme,
- sizeof(struct lpfc_vport));
+ } else {
+ template = &phba->vport_template;
}
+
+ shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
if (!shost)
goto out;
@@ -4338,6 +4358,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->port_type = LPFC_PHYSICAL_PORT;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9081 CreatePort TMPLATE type %x TBLsize %d "
+ "SEGcnt %d/%d\n",
+ vport->port_type, shost->sg_tablesize,
+ phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
+
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
@@ -6310,11 +6336,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
* used to create the sg_dma_buf_pool must be dynamically calculated.
*/
- /* Initialize the host templates the configured values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
if (phba->sli_rev == LPFC_SLI_REV4)
entry_sz = sizeof(struct sli4_sge);
else
@@ -6355,7 +6376,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+ "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt);
@@ -6825,11 +6846,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
- /* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
-
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_seg_cnt:%d dmabuf_size:%d "
"total:%d scsi:%d nvme:%d\n",
@@ -6935,6 +6951,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_free_hba_cpu_map;
}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
+ if (!phba->sli4_hba.c_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3332 Failed allocating per cpu hdwq stats\n");
+ rc = -ENOMEM;
+ goto out_free_hba_eq_info;
+ }
+#endif
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6954,6 +6981,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+out_free_hba_eq_info:
+ free_percpu(phba->sli4_hba.eq_info);
+#endif
out_free_hba_cpu_map:
kfree(phba->sli4_hba.cpu_map);
out_free_hba_eq_hdl:
@@ -6992,6 +7023,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
free_percpu(phba->sli4_hba.eq_info);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ free_percpu(phba->sli4_hba.c_stat);
+#endif
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map);
@@ -9235,6 +9269,7 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
/* Free the CQ/WQ corresponding to the Hardware Queue */
lpfc_sli4_queue_free(hdwq[idx].io_cq);
lpfc_sli4_queue_free(hdwq[idx].io_wq);
+ hdwq[idx].hba_eq = NULL;
hdwq[idx].io_cq = NULL;
hdwq[idx].io_wq = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support)
@@ -10831,6 +10866,9 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hdwq_stat *c_stat;
+#endif
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
@@ -11082,10 +11120,17 @@ found_any:
idx = 0;
for_each_possible_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
continue;
cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3340 Set Affinity: not present "
"CPU %d hdwq %d\n",
@@ -11105,15 +11150,19 @@ found_any:
* @cpu: cpu going offline
* @eqlist:
*/
-static void
+static int
lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
struct list_head *eqlist)
{
const struct cpumask *maskp;
struct lpfc_queue *eq;
- cpumask_t tmp;
+ struct cpumask *tmp;
u16 idx;
+ tmp = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
maskp = pci_irq_get_affinity(phba->pcidev, idx);
if (!maskp)
@@ -11123,7 +11172,7 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
* then we don't need to poll the eq attached
* to it.
*/
- if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
continue;
/* get the cpus that are online and are affini-
* tized to this irq vector. If the count is
@@ -11131,8 +11180,8 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
* down this vector. Since this cpu has not
* gone offline yet, we need >1.
*/
- cpumask_and(&tmp, maskp, cpu_online_mask);
- if (cpumask_weight(&tmp) > 1)
+ cpumask_and(tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(tmp) > 1)
continue;
/* Now that we have an irq to shutdown, get the eq
@@ -11143,6 +11192,8 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
list_add(&eq->_poll_list, eqlist);
}
+ kfree(tmp);
+ return 0;
}
static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
@@ -11175,11 +11226,9 @@ static void lpfc_cpuhp_add(struct lpfc_hba *phba)
rcu_read_lock();
- if (!list_empty(&phba->poll_list)) {
- timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
- }
rcu_read_unlock();
@@ -11313,7 +11362,9 @@ static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
lpfc_irq_rebalance(phba, cpu, true);
- lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ if (retval)
+ return retval;
/* start polling on these eq's */
list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
@@ -13145,6 +13196,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_sli4_ras_setup(phba);
INIT_LIST_HEAD(&phba->poll_list);
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index d1773c01d2b3..e35b52b66d6c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1299,8 +1299,6 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
- if (phba->cfg_enable_dss)
- mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index f6c8963c915d..a45936e08031 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -382,13 +382,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- }
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&vport->phba->hbalock);
- /* Remove original register reference. The host transport
- * won't reference this rport/remoteport any further.
- */
- lpfc_nlp_put(ndlp);
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
+ lpfc_nlp_put(ndlp);
+ } else {
+ spin_unlock_irq(&vport->phba->hbalock);
+ }
rport_err:
return;
@@ -897,88 +899,6 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
sgl->sge_len = cpu_to_le32(nCmd->rsplen);
}
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-static void
-lpfc_nvme_ktime(struct lpfc_hba *phba,
- struct lpfc_io_buf *lpfc_ncmd)
-{
- uint64_t seg1, seg2, seg3, seg4;
- uint64_t segsum;
-
- if (!lpfc_ncmd->ts_last_cmd ||
- !lpfc_ncmd->ts_cmd_start ||
- !lpfc_ncmd->ts_cmd_wqput ||
- !lpfc_ncmd->ts_isr_cmpl ||
- !lpfc_ncmd->ts_data_nvme)
- return;
-
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
- return;
- if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
- return;
- if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
- return;
- if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
- return;
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
- return;
- /*
- * Segment 1 - Time from Last FCP command cmpl is handed
- * off to NVME Layer to start of next command.
- * Segment 2 - Time from Driver receives a IO cmd start
- * from NVME Layer to WQ put is done on IO cmd.
- * Segment 3 - Time from Driver WQ put is done on IO cmd
- * to MSI-X ISR for IO cmpl.
- * Segment 4 - Time from MSI-X ISR for IO cmpl to when
- * cmpl is handled off to the NVME Layer.
- */
- seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
- if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
- seg1 = 0;
-
- /* Calculate times relative to start of IO */
- seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
- segsum = seg2;
- seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
- if (segsum > seg3)
- return;
- seg3 -= segsum;
- segsum += seg3;
-
- seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
- if (segsum > seg4)
- return;
- seg4 -= segsum;
-
- phba->ktime_data_samples++;
- phba->ktime_seg1_total += seg1;
- if (seg1 < phba->ktime_seg1_min)
- phba->ktime_seg1_min = seg1;
- else if (seg1 > phba->ktime_seg1_max)
- phba->ktime_seg1_max = seg1;
- phba->ktime_seg2_total += seg2;
- if (seg2 < phba->ktime_seg2_min)
- phba->ktime_seg2_min = seg2;
- else if (seg2 > phba->ktime_seg2_max)
- phba->ktime_seg2_max = seg2;
- phba->ktime_seg3_total += seg3;
- if (seg3 < phba->ktime_seg3_min)
- phba->ktime_seg3_min = seg3;
- else if (seg3 > phba->ktime_seg3_max)
- phba->ktime_seg3_max = seg3;
- phba->ktime_seg4_total += seg4;
- if (seg4 < phba->ktime_seg4_min)
- phba->ktime_seg4_min = seg4;
- else if (seg4 > phba->ktime_seg4_max)
- phba->ktime_seg4_max = seg4;
-
- lpfc_ncmd->ts_last_cmd = 0;
- lpfc_ncmd->ts_cmd_start = 0;
- lpfc_ncmd->ts_cmd_wqput = 0;
- lpfc_ncmd->ts_isr_cmpl = 0;
- lpfc_ncmd->ts_data_nvme = 0;
-}
-#endif
/**
* lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
@@ -1010,6 +930,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int cpu;
+#endif
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
@@ -1178,23 +1101,19 @@ out_err:
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
- lpfc_ncmd->ts_data_nvme = ktime_get_ns();
- phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
- lpfc_nvme_ktime(phba, lpfc_ncmd);
+ lpfc_ncmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_ncmd);
}
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
- uint32_t cpu;
- idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- if (lpfc_ncmd->cpu != cpu)
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_NVME_IOERR,
- "6701 CPU Check cmpl: "
- "cpu %d expect %d\n",
- cpu, lpfc_ncmd->cpu);
- phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+ if (lpfc_ncmd->cpu != cpu)
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_IOERR,
+ "6701 CPU Check cmpl: "
+ "cpu %d expect %d\n",
+ cpu, lpfc_ncmd->cpu);
}
#endif
@@ -1743,19 +1662,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- lpfc_ncmd->cpu = cpu;
- if (idx != cpu)
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_NVME_IOERR,
- "6702 CPU Check cmd: "
- "cpu %d wq %d\n",
- lpfc_ncmd->cpu,
- lpfc_queue_info->index);
- phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+ lpfc_ncmd->cpu = cpu;
+ if (idx != cpu)
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_IOERR,
+ "6702 CPU Check cmd: "
+ "cpu %d wq %d\n",
+ lpfc_ncmd->cpu,
+ lpfc_queue_info->index);
}
#endif
return 0;
@@ -1985,8 +1902,6 @@ out_unlock:
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
- .module = THIS_MODULE,
-
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 9dc9afe1c255..565419bf8d74 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -707,7 +707,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_rcv_ctx *ctxp;
uint32_t status, result, op, start_clean, logerr;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint32_t id;
+ int id;
#endif
ctxp = cmdwqe->context2;
@@ -814,16 +814,14 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
rsp->done(rsp);
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
id = raw_smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT) {
- if (ctxp->cpu != id)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6704 CPU Check cmdcmpl: "
- "cpu %d expect %d\n",
- id, ctxp->cpu);
- phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+ if (ctxp->cpu != id)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6704 CPU Check cmdcmpl: "
+ "cpu %d expect %d\n",
+ id, ctxp->cpu);
}
#endif
}
@@ -931,6 +929,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct lpfc_sli_ring *pring;
unsigned long iflags;
int rc;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int id;
+#endif
if (phba->pport->load_flag & FC_UNLOADING) {
rc = -ENODEV;
@@ -954,16 +955,14 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- int id = raw_smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT) {
- if (rsp->hwqid != id)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6705 CPU Check OP: "
- "cpu %d expect %d\n",
- id, rsp->hwqid);
- phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
- }
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
+ id = raw_smp_processor_id();
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+ if (rsp->hwqid != id)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6705 CPU Check OP: "
+ "cpu %d expect %d\n",
+ id, rsp->hwqid);
ctxp->cpu = id; /* Setup cpu for cmpl check */
}
#endif
@@ -2270,15 +2269,13 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
size = nvmebuf->bytes_recv;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
- if (current_cpu < LPFC_CHECK_CPU_CNT) {
- if (idx != current_cpu)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6703 CPU Check rcv: "
- "cpu %d expect %d\n",
- current_cpu, idx);
- phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
- }
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
+ this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
+ if (idx != current_cpu)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6703 CPU Check rcv: "
+ "cpu %d expect %d\n",
+ current_cpu, idx);
}
#endif
@@ -2598,7 +2595,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
dma_addr_t physaddr;
- int i, cnt;
+ int i, cnt, nsegs;
int do_pbde;
int xc = 1;
@@ -2629,6 +2626,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
phba->cfg_nvme_seg_cnt);
return NULL;
}
+ nsegs = rsp->sg_cnt;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
nvmewqe = ctxp->wqeq;
@@ -2868,7 +2866,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
wqe->fcp_trsp.rsvd_12_15[0] = 0;
/* Use rspbuf, NOT sg list */
- rsp->sg_cnt = 0;
+ nsegs = 0;
sgl->word2 = 0;
atomic_inc(&tgtp->xmt_fcp_rsp);
break;
@@ -2885,7 +2883,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
nvmewqe->context1 = ndlp;
- for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
+ for_each_sg(rsp->sg, sgel, nsegs, i) {
physaddr = sg_dma_address(sgel);
cnt = sg_dma_len(sgel);
sgl->addr_hi = putPaddrHigh(physaddr);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2c7e0b22db2f..ad62fb3f3a54 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -671,8 +671,10 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_cmd->prot_data_type = 0;
#endif
tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
- if (!tmp)
+ if (!tmp) {
+ lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
return NULL;
+ }
lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
@@ -3803,9 +3805,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
int idx;
uint32_t logit = LOG_FCP;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- int cpu;
-#endif
/* Guard against abort handler being called at same time */
spin_lock(&lpfc_cmd->buf_lock);
@@ -3824,11 +3823,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
- cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
- phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
- }
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
#endif
shost = cmd->device->host;
@@ -4029,6 +4025,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->pCmd = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->ts_cmd_start) {
+ lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
+ lpfc_cmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_cmd);
+ }
+#endif
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
@@ -4502,7 +4506,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- int cpu;
+ uint64_t start = 0L;
+
+ if (phba->ktime_on)
+ start = ktime_get_ns();
#endif
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
@@ -4624,17 +4631,20 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
- cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- struct lpfc_sli4_hdw_queue *hdwq =
- &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
- hdwq->cpucheck_xmt_io[cpu]++;
- }
- }
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (start) {
+ lpfc_cmd->ts_cmd_start = start;
+ lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
+ lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
+ } else {
+ lpfc_cmd->ts_cmd_start = 0;
+ }
+#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"3376 FCP could not issue IOCB err %x"
@@ -6021,31 +6031,6 @@ struct scsi_host_template lpfc_template_nvme = {
.track_queue_depth = 0,
};
-struct scsi_host_template lpfc_template_no_hr = {
- .module = THIS_MODULE,
- .name = LPFC_DRIVER_NAME,
- .proc_name = LPFC_DRIVER_NAME,
- .info = lpfc_info,
- .queuecommand = lpfc_queuecommand,
- .eh_timed_out = fc_eh_timed_out,
- .eh_abort_handler = lpfc_abort_handler,
- .eh_device_reset_handler = lpfc_device_reset_handler,
- .eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
- .scan_finished = lpfc_scan_finished,
- .this_id = -1,
- .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
- .cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFFFFFF,
- .vendor_id = LPFC_NL_VENDOR_ID,
- .change_queue_depth = scsi_change_queue_depth,
- .track_queue_depth = 1,
-};
-
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
@@ -6071,26 +6056,3 @@ struct scsi_host_template lpfc_template = {
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
-
-struct scsi_host_template lpfc_vport_template = {
- .module = THIS_MODULE,
- .name = LPFC_DRIVER_NAME,
- .proc_name = LPFC_DRIVER_NAME,
- .info = lpfc_info,
- .queuecommand = lpfc_queuecommand,
- .eh_timed_out = fc_eh_timed_out,
- .eh_abort_handler = lpfc_abort_handler,
- .eh_device_reset_handler = lpfc_device_reset_handler,
- .eh_target_reset_handler = lpfc_target_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
- .scan_finished = lpfc_scan_finished,
- .this_id = -1,
- .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
- .cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_vport_attrs,
- .max_sectors = 0xFFFF,
- .change_queue_depth = scsi_change_queue_depth,
- .track_queue_depth = 1,
-};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 64002b0cb02d..b6fb665e6ec4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -230,25 +230,16 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
* This routine will update the HBA index of a queue to reflect consumption of
* Work Queue Entries by the HBA. When the HBA indicates that it has consumed
* an entry the host calls this function to update the queue's internal
- * pointers. This routine returns the number of entries that were consumed by
- * the HBA.
+ * pointers.
**/
-static uint32_t
+static void
lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
- uint32_t released = 0;
-
/* sanity check on queue memory */
if (unlikely(!q))
- return 0;
+ return;
- if (q->hba_index == index)
- return 0;
- do {
- q->hba_index = ((q->hba_index + 1) % q->entry_count);
- released++;
- } while (q->hba_index != index);
- return released;
+ q->hba_index = index;
}
/**
@@ -2511,6 +2502,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -4044,6 +4037,11 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_IOQ_FLUSH ||
+ !phba->sli4_hba.hdwq) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -5034,23 +5032,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
} else
phba->max_vpi = 0;
- phba->fips_level = 0;
- phba->fips_spec_rev = 0;
- if (pmb->u.mb.un.varCfgPort.gdss) {
- phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
- phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
- phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2850 Security Crypto Active. FIPS x%d "
- "(Spec Rev: x%d)",
- phba->fips_level, phba->fips_spec_rev);
- }
- if (pmb->u.mb.un.varCfgPort.sec_err) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2856 Config Port Security Crypto "
- "Error: x%x ",
- pmb->u.mb.un.varCfgPort.sec_err);
- }
if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
@@ -7371,15 +7352,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
- rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
- if (phba->pport->cfg_lun_queue_depth > rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "3362 LUN queue depth changed from %d to %d\n",
- phba->pport->cfg_lun_queue_depth, rc);
- phba->pport->cfg_lun_queue_depth = rc;
- }
-
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
@@ -9468,6 +9440,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
*pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_RDF ||
*pcmd == ELS_CMD_RSCN_XMT ||
*pcmd == ELS_CMD_FDISC ||
*pcmd == ELS_CMD_LOGO ||
@@ -14450,12 +14423,10 @@ static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
- if (list_empty(&phba->poll_list)) {
- timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
- /* kickstart slowpath processing for this eq */
+ /* kickstart slowpath processing if needed */
+ if (list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
- }
list_add_rcu(&eq->_poll_list, &phba->poll_list);
synchronize_rcu();
@@ -17950,6 +17921,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
+ /* Free the sequence's header buffer */
+ if (!first_iocbq)
+ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
+
return first_iocbq;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7bcf922a8be2..93d976ea8c5d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -446,6 +446,6 @@ struct lpfc_io_buf {
uint64_t ts_last_cmd;
uint64_t ts_cmd_wqput;
uint64_t ts_isr_cmpl;
- uint64_t ts_data_nvme;
+ uint64_t ts_data_io;
#endif
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d963ca871383..8da7429e385a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -697,13 +697,6 @@ struct lpfc_sli4_hdw_queue {
struct lpfc_lock_stat lock_conflict;
#endif
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-#define LPFC_CHECK_CPU_CNT 128
- uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
-#endif
-
/* Per HDWQ pool resources */
struct list_head sgl_list;
struct list_head cmd_rsp_buf_list;
@@ -740,6 +733,15 @@ struct lpfc_sli4_hdw_queue {
#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
#endif
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+struct lpfc_hdwq_stat {
+ u32 hdwq_no;
+ u32 rcv_io;
+ u32 xmt_io;
+ u32 cmpl_io;
+};
+#endif
+
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
* config space registers
@@ -921,6 +923,9 @@ struct lpfc_sli4_hba {
struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hdwq_stat __percpu *c_stat;
+#endif
uint32_t conf_trunk;
#define lpfc_conf_trunk_port0_WORD conf_trunk
#define lpfc_conf_trunk_port0_SHIFT 0
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9563c49f36ab..ca40c47cfbe0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.6.0.3"
+#define LPFC_DRIVER_VERSION "12.8.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index ff6d4aa92421..f27ffd088c8a 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2795,11 +2795,9 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
adapter_t *adapter;
- unsigned char *bh;
int heads;
int sectors;
int cylinders;
- int rval;
/* Get pointer to host config structure */
adapter = (adapter_t *)sdev->host->hostdata;
@@ -2826,15 +2824,8 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
geom[2] = cylinders;
}
else {
- bh = scsi_bios_ptable(bdev);
-
- if( bh ) {
- rval = scsi_partsize(bh, capacity,
- &geom[2], &geom[0], &geom[1]);
- kfree(bh);
- if( rval != -1 )
- return rval;
- }
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
dev_info(&adapter->dev->dev,
"invalid partition on this disk on channel %d\n",
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index fd4b5ac6ac5b..babe85d7b537 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2987,9 +2987,10 @@ megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
u32 __iomem *reg = (u32 __iomem *)reg_set;
for (i = 0; i < sz / sizeof(u32); i++) {
- bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
- "%08x: %08x\n", (i * 4),
- readl(&reg[i]));
+ bytes_wrote += scnprintf(loc + bytes_wrote,
+ PAGE_SIZE - bytes_wrote,
+ "%08x: %08x\n", (i * 4),
+ readl(&reg[i]));
}
return bytes_wrote;
}
@@ -8224,8 +8225,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
cmd->cmd_status_drv);
- error = -EBUSY;
- goto out;
+ error = -EBUSY;
+ goto out;
}
cmd->sync_cmd = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c597d544eb39..04a40afe60e3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -207,7 +207,7 @@ struct fw_event_work {
u8 ignore;
u16 event;
struct kref refcount;
- char event_data[0] __aligned(4);
+ char event_data[] __aligned(4);
};
static void fw_event_work_free(struct kref *r)
@@ -9908,8 +9908,8 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
@@ -9992,8 +9992,8 @@ scsih_shutdown(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 519edc796691..327fdd5ee962 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -394,7 +394,7 @@ struct mvs_info {
dma_addr_t bulk_buffer_dma1;
#define TRASH_BUCKET_SIZE 0x20000
void *dma_pool;
- struct mvs_slot_info slot_info[0];
+ struct mvs_slot_info slot_info[];
};
struct mvs_prv_info{
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index ec8cc2207536..60d5691fc4ab 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -130,7 +130,7 @@ enum {
struct mvumi_hotplug_event {
u16 size;
u8 dummy[2];
- u8 bitmap[0];
+ u8 bitmap[];
};
struct mvumi_driver_event {
@@ -290,7 +290,7 @@ struct mvumi_rsp_frame {
struct mvumi_ob_data {
struct list_head list;
- unsigned char data[0];
+ unsigned char data[];
};
struct version_info {
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 11a2cb844ecb..f88adab3f913 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -2203,7 +2203,7 @@ static struct script script0 __initdata = {
** Possible data corruption during Memory Write and Invalidate.
** This work-around resets the addressing logic prior to the
** start of the first MOVE of a DATA IN phase.
- ** (See Documentation/scsi/ncr53c8xx.txt for more information)
+ ** (See Documentation/scsi/ncr53c8xx.rst for more information)
*/
SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
20,
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index dc9b74c9348a..9696b6b5591f 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -36,7 +36,7 @@ config PCMCIA_NINJA_SCSI
help
If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read
- <file:Documentation/scsi/NinjaSCSI.txt>.
+ <file:Documentation/scsi/NinjaSCSI.rst>.
Supported cards:
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7c6be2ec110d..3c9f42779dd0 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -463,7 +463,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
pm8001_ha->nvmd_completion = &completion;
payload.minor_function = 7;
payload.offset = 0;
- payload.length = 4096;
+ payload.rd_length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
@@ -554,6 +554,49 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+/**
+ ** non_fatal_log_show - non fatal error logging
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **
+ ** A sysfs 'read-only' shost attribute.
+ **/
+static ssize_t non_fatal_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 count;
+
+ count = pm80xx_get_non_fatal_dump(cdev, attr, buf);
+ return count;
+}
+static DEVICE_ATTR_RO(non_fatal_log);
+
+static ssize_t non_fatal_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%08x",
+ pm8001_ha->non_fatal_count);
+}
+
+static ssize_t non_fatal_count_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int val = 0;
+
+ if (kstrtoint(buf, 16, &val) != 0)
+ return -EINVAL;
+
+ pm8001_ha->non_fatal_count = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(non_fatal_count);
/**
** pm8001_ctl_gsm_log_show - gsm dump collection
@@ -631,7 +674,7 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
pm8001_ha->fw_image->size);
- payload->length = pm8001_ha->fw_image->size;
+ payload->wr_length = pm8001_ha->fw_image->size;
payload->id = 0;
payload->minor_function = 0x1;
pm8001_ha->nvmd_completion = &completion;
@@ -677,7 +720,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
IOCTL_BUF_SIZE);
for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
- payload->length = 1024*16;
+ payload->wr_length = 1024*16;
payload->id = 0;
fwControl =
(struct fw_control_info *)&payload->func_specific;
@@ -829,6 +872,8 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_aap_log,
&dev_attr_iop_log,
&dev_attr_fatal_log,
+ &dev_attr_non_fatal_log,
+ &dev_attr_non_fatal_count,
&dev_attr_gsm_log,
&dev_attr_max_out_io,
&dev_attr_max_devices,
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 48e0624ecc68..1c7f15fd69ce 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -75,7 +75,7 @@ enum port_type {
};
/* driver compile-time configuration */
-#define PM8001_MAX_CCB 512 /* max ccbs supported */
+#define PM8001_MAX_CCB 256 /* max ccbs supported */
#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
#define PM8001_MAX_INB_NUM 1
#define PM8001_MAX_OUTB_NUM 1
@@ -99,7 +99,8 @@ enum port_type {
#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
-#define PM8001_MAX_DMA_SG SG_ALL
+#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528
+#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG
enum memory_region_num {
AAP1 = 0x0, /* application acceleration processor */
IOP, /* IO processor */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 2328ff1349ac..fb9848e1d481 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4793,7 +4793,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
- fw_control_context->len = ioctl_payload->length;
+ fw_control_context->len = ioctl_payload->rd_length;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4814,7 +4814,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
twi_page_size << 8 | TWI_DEVICE);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4823,7 +4823,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case C_SEEPROM: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4832,7 +4832,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case VPD_FLASH: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4841,7 +4841,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case EXPAN_ROM: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4850,7 +4850,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case IOP_RDUMP: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4890,7 +4890,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
&ioctl_payload->func_specific,
- ioctl_payload->length);
+ ioctl_payload->wr_length);
memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc) {
@@ -4909,7 +4909,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
twi_page_size << 8 | TWI_DEVICE);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4918,7 +4918,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case C_SEEPROM:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4927,7 +4927,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
case VPD_FLASH:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4936,7 +4936,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
case EXPAN_ROM:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3c6076e4c6d2..a8f5344fdfda 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -95,7 +95,7 @@ static struct scsi_host_template pm8001_sht = {
.bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = PM8001_MAX_DMA_SG,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
@@ -251,6 +251,9 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
return ret;
}
+static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
+static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
+
/**
* pm8001_alloc - initiate our hba structure and 6 DMAs area.
* @pm8001_ha:our hba structure.
@@ -483,6 +486,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = logging_level;
+ pm8001_ha->non_fatal_count = 0;
if (link_rate >= 1 && link_rate <= 15)
pm8001_ha->link_rate = (link_rate << 8);
else {
@@ -635,22 +639,22 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
if (pm8001_ha->chip_id == chip_8001) {
if (deviceid == 0x8081 || deviceid == 0x0042) {
payload.minor_function = 4;
- payload.length = 4096;
+ payload.rd_length = 4096;
} else {
payload.minor_function = 0;
- payload.length = 128;
+ payload.rd_length = 128;
}
} else if ((pm8001_ha->chip_id == chip_8070 ||
pm8001_ha->chip_id == chip_8072) &&
pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) {
payload.minor_function = 4;
- payload.length = 4096;
+ payload.rd_length = 4096;
} else {
payload.minor_function = 1;
- payload.length = 4096;
+ payload.rd_length = 4096;
}
payload.offset = 0;
- payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
+ payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
if (!payload.func_specific) {
PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n"));
return;
@@ -720,7 +724,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
/* SAS ADDRESS read from flash / EEPROM */
payload.minor_function = 6;
payload.offset = 0;
- payload.length = 4096;
+ payload.rd_length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
@@ -893,9 +897,7 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
- u32 i = 0, j = 0;
u32 number_of_intr;
- int flag = 0;
int rc;
/* SPCv controllers supports 64 msi-x */
@@ -903,11 +905,11 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
number_of_intr = 1;
} else {
number_of_intr = PM8001_MAX_MSIX_VEC;
- flag &= ~IRQF_SHARED;
}
rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
number_of_intr, PCI_IRQ_MSIX);
+ number_of_intr = rc;
if (rc < 0)
return rc;
pm8001_ha->number_of_intr = number_of_intr;
@@ -915,8 +917,22 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
"pci_alloc_irq_vectors request ret:%d no of intr %d\n",
rc, pm8001_ha->number_of_intr));
+ return 0;
+}
- for (i = 0; i < number_of_intr; i++) {
+static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 i = 0, j = 0;
+ int flag = 0, rc = 0;
+
+ if (pm8001_ha->chip_id != chip_8001)
+ flag &= ~IRQF_SHARED;
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("pci_enable_msix request number of intr %d\n",
+ pm8001_ha->number_of_intr));
+
+ for (i = 0; i < pm8001_ha->number_of_intr; i++) {
snprintf(pm8001_ha->intr_drvname[i],
sizeof(pm8001_ha->intr_drvname[0]),
"%s-%d", pm8001_ha->name, i);
@@ -941,6 +957,21 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
}
#endif
+static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
+{
+ struct pci_dev *pdev;
+
+ pdev = pm8001_ha->pdev;
+
+#ifdef PM8001_USE_MSIX
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ return pm8001_setup_msix(pm8001_ha);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("MSIX not supported!!!\n"));
+#endif
+ return 0;
+}
+
/**
* pm8001_request_irq - register interrupt
* @chip_info: our ha struct.
@@ -954,7 +985,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
#ifdef PM8001_USE_MSIX
if (pdev->msix_cap && pci_msi_enabled())
- return pm8001_setup_msix(pm8001_ha);
+ return pm8001_request_msix(pm8001_ha);
else {
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("MSIX not supported!!!\n"));
@@ -989,6 +1020,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
struct pm8001_hba_info *pm8001_ha;
struct Scsi_Host *shost = NULL;
const struct pm8001_chip_info *chip;
+ struct sas_ha_struct *sha;
dev_printk(KERN_INFO, &pdev->dev,
"pm80xx: driver version %s\n", DRV_VERSION);
@@ -1017,12 +1049,12 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_regions;
}
chip = &pm8001_chips[ent->driver_data];
- SHOST_TO_SAS_HA(shost) =
- kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
- if (!SHOST_TO_SAS_HA(shost)) {
+ sha = kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
+ if (!sha) {
rc = -ENOMEM;
goto err_out_free_host;
}
+ SHOST_TO_SAS_HA(shost) = sha;
rc = pm8001_prep_sas_ha_init(shost, chip);
if (rc) {
@@ -1036,7 +1068,14 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
rc = -ENOMEM;
goto err_out_free;
}
- list_add_tail(&pm8001_ha->list, &hba_list);
+ /* Setup Interrupt */
+ rc = pm8001_setup_irq(pm8001_ha);
+ if (rc) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "pm8001_setup_irq failed [ret: %d]\n", rc));
+ goto err_out_shost;
+ }
+
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc) {
@@ -1048,6 +1087,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
+ /* Request Interrupt */
rc = pm8001_request_irq(pm8001_ha);
if (rc) {
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
@@ -1070,8 +1110,12 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
- if (rc)
+ if (rc) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "sas_register_ha failed [ret: %d]\n", rc));
goto err_out_shost;
+ }
+ list_add_tail(&pm8001_ha->list, &hba_list);
scsi_scan_host(pm8001_ha->shost);
pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
@@ -1081,7 +1125,7 @@ err_out_shost:
err_out_ha_free:
pm8001_free(pm8001_ha);
err_out_free:
- kfree(SHOST_TO_SAS_HA(shost));
+ kfree(sha);
err_out_free_host:
scsi_host_put(shost);
err_out_regions:
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 93438c8f67da..ae7ba9b3c4bc 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -137,10 +137,11 @@ struct pm8001_ioctl_payload {
u32 signature;
u16 major_function;
u16 minor_function;
- u16 length;
u16 status;
u16 offset;
u16 id;
+ u32 wr_length;
+ u32 rd_length;
u8 *func_specific;
};
@@ -558,6 +559,8 @@ struct pm8001_hba_info {
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
u32 reset_in_progress;
+ u32 non_fatal_count;
+ u32 non_fatal_read_length;
};
struct pm8001_work {
@@ -741,6 +744,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf);
+ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf);
ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index d1d95f1a2c6a..4d205ebaee87 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -393,6 +393,136 @@ moreData:
(char *)buf;
}
+/* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma
+ * location by the firmware.
+ */
+ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ void __iomem *nonfatal_table_address = pm8001_ha->fatal_tbl_addr;
+ u32 accum_len = 0;
+ u32 total_len = 0;
+ u32 reg_val = 0;
+ u32 *temp = NULL;
+ u32 index = 0;
+ u32 output_length;
+ unsigned long start = 0;
+ char *buf_copy = buf;
+
+ temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+ if (++pm8001_ha->non_fatal_count == 1) {
+ if (pm8001_ha->chip_id == chip_8001) {
+ snprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ PAGE_SIZE, "Not supported for SPC controller");
+ return 0;
+ }
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("forensic_info TYPE_NON_FATAL...\n"));
+ /*
+ * Step 1: Write the host buffer parameters in the MPI Fatal and
+ * Non-Fatal Error Dump Capture Table.This is the buffer
+ * where debug data will be DMAed to.
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_LO_OFFSET,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_lo);
+
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HI_OFFSET,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_hi);
+
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_LENGTH, SYSFS_OFFSET);
+
+ /* Optionally, set the DUMPCTRL bit to 1 if the host
+ * keeps sending active I/Os while capturing the non-fatal
+ * debug data. Otherwise, leave this bit set to zero
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+ /*
+ * Step 2: Clear Accumulative Length of Debug Data Transferred
+ * [ACCDDLEN] field in the MPI Fatal and Non-Fatal Error Dump
+ * Capture Table to zero.
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN, 0);
+
+ /* initiallize previous accumulated length to 0 */
+ pm8001_ha->forensic_preserved_accumulated_transfer = 0;
+ pm8001_ha->non_fatal_read_length = 0;
+ }
+
+ total_len = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_TOTAL_LEN);
+ /*
+ * Step 3:Clear Fatal/Non-Fatal Debug Data Transfer Status [FDDTSTAT]
+ * field and then request that the SPCv controller transfer the debug
+ * data by setting bit 7 of the Inbound Doorbell Set Register.
+ */
+ pm8001_mw32(nonfatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS, 0);
+ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET,
+ SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP);
+
+ /*
+ * Step 4.1: Read back the Inbound Doorbell Set Register (by polling for
+ * 2 seconds) until register bit 7 is cleared.
+ * This step only indicates the request is accepted by the controller.
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+ do {
+ reg_val = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET) &
+ SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP;
+ } while ((reg_val != 0) && time_before(jiffies, start));
+
+ /* Step 4.2: To check the completion of the transfer, poll the Fatal/Non
+ * Fatal Debug Data Transfer Status [FDDTSTAT] field for 2 seconds in
+ * the MPI Fatal and Non-Fatal Error Dump Capture Table.
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+ do {
+ reg_val = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS);
+ } while ((!reg_val) && time_before(jiffies, start));
+
+ if ((reg_val == 0x00) ||
+ (reg_val == MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED) ||
+ (reg_val > MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE)) {
+ pm8001_ha->non_fatal_read_length = 0;
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF);
+ pm8001_ha->non_fatal_count = 0;
+ return (buf_copy - buf);
+ } else if (reg_val ==
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA) {
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2);
+ } else if ((reg_val == MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) ||
+ (pm8001_ha->non_fatal_read_length >= total_len)) {
+ pm8001_ha->non_fatal_read_length = 0;
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4);
+ pm8001_ha->non_fatal_count = 0;
+ }
+ accum_len = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+ output_length = accum_len -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+
+ for (index = 0; index < output_length/4; index++)
+ buf_copy += snprintf(buf_copy, PAGE_SIZE,
+ "%08x ", *(temp+index));
+
+ pm8001_ha->non_fatal_read_length += output_length;
+
+ /* store current accumulated length to use in next iteration as
+ * the previous accumulated length
+ */
+ pm8001_ha->forensic_preserved_accumulated_transfer = accum_len;
+ return (buf_copy - buf);
+}
+
/**
* read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information
@@ -1438,11 +1568,18 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (!pm8001_ha->controller_fatal_error) {
/* Check if MPI is in ready state to reset */
if (mpi_uninit_check(pm8001_ha) != 0) {
- regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ u32 r0 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+ u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "MPI state is not ready scratch1 :0x%x\n",
- regval));
- return -1;
+ "MPI state is not ready scratch: %x:%x:%x:%x\n",
+ r0, r1, r2, r3));
+ /* if things aren't ready but the bootloader is ok then
+ * try the reset anyway.
+ */
+ if (r1 & SCRATCH_PAD1_BOOTSTATE_MASK)
+ return -1;
}
}
/* checked for reset register normal state; 0x0 */
@@ -3708,28 +3845,32 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
+ u32 tag;
u8 page_code;
+ int rc = 0;
struct set_phy_profile_resp *pPayload =
(struct set_phy_profile_resp *)(piomb + 4);
u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
u32 status = le32_to_cpu(pPayload->status);
+ tag = le32_to_cpu(pPayload->tag);
page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
if (status) {
/* status is FAILED */
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("PhyProfile command failed with status "
"0x%08X \n", status));
- return -1;
+ rc = -1;
} else {
if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Invalid page code 0x%X\n",
page_code));
- return -1;
+ rc = -1;
}
}
- return 0;
+ pm8001_tag_free(pm8001_ha, tag);
+ return rc;
}
/**
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index a4f7eb8f50a3..15c962108075 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -623,7 +623,7 @@ struct pmcraid_aen_msg {
u32 hostno;
u32 length;
u8 reserved[8];
- u8 data[0];
+ u8 data[];
};
/* Controller state event message type */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 604856e72cfb..5b19f5175c5c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1577,8 +1577,7 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
- u8 buf[8];
- int i, pos;
+ u64 dsn;
/*
* fdmi_enabled needs to be set for libfc to execute FDMI registration.
@@ -1591,18 +1590,11 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
*/
/* Get the PCI-e Device Serial Number Capability */
- pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- pos += 4;
- for (i = 0; i < 8; i++)
- pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
-
+ dsn = pci_get_dsn(qedf->pdev);
+ if (dsn)
snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number),
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- buf[7], buf[6], buf[5], buf[4],
- buf[3], buf[2], buf[1], buf[0]);
- } else
+ sizeof(fc_host->serial_number), "%016llX", dsn);
+ else
snprintf(fc_host->serial_number,
sizeof(fc_host->serial_number), "Unknown");
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 9513fd320ffd..9498279ae80d 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -36,6 +36,7 @@ struct qedi_endpoint;
*/
#define QEDI_MODE_NORMAL 0
#define QEDI_MODE_RECOVERY 1
+#define QEDI_MODE_SHUTDOWN 2
#define ISCSI_WQE_SET_PTU_INVALIDATE 1
#define QEDI_MAX_ISCSI_TASK 4096
@@ -278,6 +279,7 @@ struct qedi_ctx {
#define QEDI_IOTHREAD_WAKE 2
#define QEDI_IN_RECOVERY 5
#define QEDI_IN_OFFLINE 6
+#define QEDI_IN_SHUTDOWN 7
u8 mac[ETH_ALEN];
u32 src_ip[4];
@@ -331,6 +333,7 @@ struct qedi_ctx {
u16 ll2_mtu;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
spinlock_t task_idx_lock; /* To protect gbl context */
s32 last_tidx_alloc;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8ba7c771ce4d..116645c08c71 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -73,5 +73,6 @@ void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
void qedi_clearsq(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn,
struct iscsi_task *task);
+void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess);
#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8829880a54c3..1f4a5fb00a05 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -392,6 +392,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
+ qedi_conn->iscsi_ep = ep;
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
qedi_conn->fw_cid = qedi_ep->fw_cid;
qedi_conn->cmd_cleanup_req = 0;
@@ -782,6 +783,9 @@ static int qedi_task_xmit(struct iscsi_task *task)
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags))
+ return -ENODEV;
+
cmd->state = 0;
cmd->task = NULL;
cmd->use_slowpath = false;
@@ -1596,6 +1600,20 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep,
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
+void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *session = cls_sess->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ if (iscsi_is_session_online(cls_sess))
+ qedi_ep_disconnect(qedi_conn->iscsi_ep);
+
+ qedi_conn_destroy(qedi_conn->cls_conn);
+
+ qedi_session_destroy(cls_sess);
+}
+
void qedi_process_tcp_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data)
{
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 67c3b7349271..39dc27c85e3c 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -149,6 +149,7 @@ struct qedi_conn {
struct iscsi_cls_conn *cls_conn;
struct qedi_ctx *qedi;
struct qedi_endpoint *ep;
+ struct iscsi_endpoint *iscsi_ep;
struct list_head active_cmd_list;
spinlock_t list_lock; /* internal conn lock */
u32 active_cmd_count;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index acb930b8c6a6..b995b19865ca 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -58,6 +58,7 @@ static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
+static void qedi_recovery_handler(struct work_struct *work);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@@ -1113,6 +1114,20 @@ exit_get_data:
return;
}
+static void qedi_schedule_recovery_handler(void *dev)
+{
+ struct qedi_ctx *qedi = dev;
+
+ QEDI_ERR(&qedi->dbg_ctx, "Recovery handler scheduled.\n");
+
+ if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags))
+ return;
+
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+ schedule_delayed_work(&qedi->recovery_work, 0);
+}
+
static void qedi_link_update(void *dev, struct qed_link_output *link)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@@ -1130,6 +1145,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
static struct qed_iscsi_cb_ops qedi_cb_ops = {
{
.link_update = qedi_link_update,
+ .schedule_recovery_handler = qedi_schedule_recovery_handler,
.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
.get_generic_tlv_data = qedi_get_generic_tlv_data,
}
@@ -2328,16 +2344,22 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
int rval;
- if (qedi->tmf_thread) {
- flush_workqueue(qedi->tmf_thread);
- destroy_workqueue(qedi->tmf_thread);
- qedi->tmf_thread = NULL;
- }
+ if (mode == QEDI_MODE_SHUTDOWN)
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_clear_session_ctx);
+
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
+ if (qedi->tmf_thread) {
+ flush_workqueue(qedi->tmf_thread);
+ destroy_workqueue(qedi->tmf_thread);
+ qedi->tmf_thread = NULL;
+ }
- if (qedi->offload_thread) {
- flush_workqueue(qedi->offload_thread);
- destroy_workqueue(qedi->offload_thread);
- qedi->offload_thread = NULL;
+ if (qedi->offload_thread) {
+ flush_workqueue(qedi->offload_thread);
+ destroy_workqueue(qedi->offload_thread);
+ qedi->offload_thread = NULL;
+ }
}
#ifdef CONFIG_DEBUG_FS
@@ -2353,8 +2375,7 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_ops->ll2->stop(qedi->cdev);
}
- if (mode == QEDI_MODE_NORMAL)
- qedi_free_iscsi_pf_param(qedi);
+ qedi_free_iscsi_pf_param(qedi);
rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
if (rval)
@@ -2367,15 +2388,12 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_destroy_fp(qedi);
- if (mode == QEDI_MODE_NORMAL) {
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
qedi_release_cid_que(qedi);
qedi_cm_free_mem(qedi);
qedi_free_uio(qedi->udev);
qedi_free_itt(qedi);
- iscsi_host_remove(qedi->shost);
- iscsi_host_free(qedi->shost);
-
if (qedi->ll2_recv_thread) {
kthread_stop(qedi->ll2_recv_thread);
qedi->ll2_recv_thread = NULL;
@@ -2384,9 +2402,22 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
if (qedi->boot_kset)
iscsi_boot_destroy_kset(qedi->boot_kset);
+
+ iscsi_host_remove(qedi->shost);
+ iscsi_host_free(qedi->shost);
}
}
+static void qedi_shutdown(struct pci_dev *pdev)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ QEDI_ERR(&qedi->dbg_ctx, "%s: Shutdown qedi\n", __func__);
+ if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
+ return;
+ __qedi_remove(pdev, QEDI_MODE_SHUTDOWN);
+}
+
static int __qedi_probe(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi;
@@ -2435,14 +2466,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi->dev_info.common.num_hwfns,
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
- if (mode != QEDI_MODE_RECOVERY) {
- rc = qedi_set_iscsi_pf_param(qedi);
- if (rc) {
- rc = -ENOMEM;
- QEDI_ERR(&qedi->dbg_ctx,
- "Set iSCSI pf param fail\n");
- goto free_host;
- }
+ rc = qedi_set_iscsi_pf_param(qedi);
+ if (rc) {
+ rc = -ENOMEM;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Set iSCSI pf param fail\n");
+ goto free_host;
}
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
@@ -2633,6 +2662,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
goto free_cid_que;
}
+ INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
+
/* F/w needs 1st task context memory entry for performance */
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
atomic_set(&qedi->num_offloads, 0);
@@ -2673,6 +2704,32 @@ exit_probe:
return rc;
}
+static void qedi_mark_conn_recovery(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+static void qedi_recovery_handler(struct work_struct *work)
+{
+ struct qedi_ctx *qedi =
+ container_of(work, struct qedi_ctx, recovery_work.work);
+
+ iscsi_host_for_each_session(qedi->shost, qedi_mark_conn_recovery);
+
+ /* Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qedi_ops->common->recovery_prolog(qedi->cdev);
+
+ __qedi_remove(qedi->pdev, QEDI_MODE_RECOVERY);
+ __qedi_probe(qedi->pdev, QEDI_MODE_RECOVERY);
+ clear_bit(QEDI_IN_RECOVERY, &qedi->flags);
+}
+
static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
return __qedi_probe(pdev, QEDI_MODE_NORMAL);
@@ -2697,6 +2754,7 @@ static struct pci_driver qedi_pci_driver = {
.id_table = qedi_pci_tbl,
.probe = qedi_probe,
.remove = qedi_remove,
+ .shutdown = qedi_shutdown,
};
static int __init qedi_init(void)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d7e7043f9eab..2c9e5ac24692 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1324,6 +1324,79 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t led[3] = { 0 };
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ if (ql26xx_led_config(vha, 0, led))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
+ led[0], led[1], led[2]);
+}
+
+static ssize_t
+qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t options = BIT_0;
+ uint16_t led[3] = { 0 };
+ uint16_t word[4];
+ int n;
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
+ if (n == 4) {
+ if (word[0] == 3) {
+ options |= BIT_3|BIT_2|BIT_1;
+ led[0] = word[1];
+ led[1] = word[2];
+ led[2] = word[3];
+ goto write;
+ }
+ return -EINVAL;
+ }
+
+ if (n == 2) {
+ /* check led index */
+ if (word[0] == 0) {
+ options |= BIT_2;
+ led[0] = word[1];
+ goto write;
+ }
+ if (word[0] == 1) {
+ options |= BIT_3;
+ led[1] = word[1];
+ goto write;
+ }
+ if (word[0] == 2) {
+ options |= BIT_1;
+ led[2] = word[1];
+ goto write;
+ }
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+
+write:
+ if (ql26xx_led_config(vha, options, led))
+ return -EFAULT;
+
+ return count;
+}
+
+static ssize_t
qla2x00_optrom_bios_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1777,9 +1850,6 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ql_log(ql_log_info, vha, 0x70d6,
- "port speed:%d\n", ha->link_data_rate);
-
return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
}
@@ -2250,6 +2320,26 @@ qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
}
+static ssize_t
+qla2x00_dport_diagnostics_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+ !IS_QLA28XX(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ if (!*vha->dport_data)
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
+ vha->dport_data[0], vha->dport_data[1],
+ vha->dport_data[2], vha->dport_data[3]);
+}
+static DEVICE_ATTR(dport_diagnostics, 0444,
+ qla2x00_dport_diagnostics_show, NULL);
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -2264,6 +2354,8 @@ static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
qla2x00_zio_timer_store);
static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
qla2x00_beacon_store);
+static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
+ qla2x00_beacon_config_store);
static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
qla2x00_optrom_bios_version_show, NULL);
static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
@@ -2327,6 +2419,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_zio,
&dev_attr_zio_timer,
&dev_attr_beacon,
+ &dev_attr_beacon_config,
&dev_attr_optrom_bios_version,
&dev_attr_optrom_efi_version,
&dev_attr_optrom_fcode_version,
@@ -2355,6 +2448,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_port_speed,
&dev_attr_port_no,
&dev_attr_fw_attr,
+ &dev_attr_dport_diagnostics,
NULL, /* reserve for qlini_mode */
NULL, /* reserve for ql2xiniexchg */
NULL, /* reserve for ql2xexchoffld */
@@ -2648,22 +2742,28 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (rval != QLA_SUCCESS)
goto done_free;
- p->link_failure_count = stats->link_fail_cnt;
- p->loss_of_sync_count = stats->loss_sync_cnt;
- p->loss_of_signal_count = stats->loss_sig_cnt;
- p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
- p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
- p->invalid_crc_count = stats->inval_crc_cnt;
+ p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
+ p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
+ p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
+ p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
+ p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
+ p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
if (IS_FWI2_CAPABLE(ha)) {
- p->lip_count = stats->lip_cnt;
- p->tx_frames = stats->tx_frames;
- p->rx_frames = stats->rx_frames;
- p->dumped_frames = stats->discarded_frames;
- p->nos_count = stats->nos_rcvd;
+ p->lip_count = le32_to_cpu(stats->lip_cnt);
+ p->tx_frames = le32_to_cpu(stats->tx_frames);
+ p->rx_frames = le32_to_cpu(stats->rx_frames);
+ p->dumped_frames = le32_to_cpu(stats->discarded_frames);
+ p->nos_count = le32_to_cpu(stats->nos_rcvd);
p->error_frames =
- stats->dropped_frames + stats->discarded_frames;
- p->rx_words = vha->qla_stats.input_bytes;
- p->tx_words = vha->qla_stats.output_bytes;
+ le32_to_cpu(stats->dropped_frames) +
+ le32_to_cpu(stats->discarded_frames);
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
+ p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
+ } else {
+ p->rx_words = vha->qla_stats.input_bytes;
+ p->tx_words = vha->qla_stats.output_bytes;
+ }
}
p->fcp_control_requests = vha->qla_stats.control_requests;
p->fcp_input_requests = vha->qla_stats.input_requests;
@@ -2671,7 +2771,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
p->seconds_since_last_reset =
- get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+ get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
do_div(p->seconds_since_last_reset, HZ);
done_free:
@@ -2928,11 +3028,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
- qla_nvme_delete(vha);
qla24xx_disable_vp(vha);
qla2x00_wait_for_sess_deletion(vha);
+ qla_nvme_delete(vha);
vha->flags.delete_progress = 1;
qlt_remove_target(ha, vha);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index d7169e43f5e1..97b51c477972 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,6 +11,14 @@
#include <linux/delay.h>
#include <linux/bsg-lib.h>
+static void qla2xxx_free_fcport_work(struct work_struct *work)
+{
+ struct fc_port *fcport = container_of(work, typeof(*fcport),
+ free_work);
+
+ qla2x00_free_fcport(fcport);
+}
+
/* BSG support for ELS/CT pass through */
void qla2x00_bsg_job_done(srb_t *sp, int res)
{
@@ -53,8 +61,10 @@ void qla2x00_bsg_sp_free(srb_t *sp)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_FXIOCB_BCMD ||
- sp->type == SRB_ELS_CMD_HST)
- qla2x00_free_fcport(sp->fcport);
+ sp->type == SRB_ELS_CMD_HST) {
+ INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
+ queue_work(ha->wq, &sp->fcport->free_work);
+ }
qla2x00_rel_sp(sp);
}
@@ -718,7 +728,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
uint16_t response[MAILBOX_REGISTER_COUNT];
uint16_t config[4], new_config[4];
uint8_t *fw_sts_ptr;
- uint8_t *req_data = NULL;
+ void *req_data = NULL;
dma_addr_t req_data_dma;
uint32_t req_data_len;
uint8_t *rsp_data = NULL;
@@ -796,10 +806,11 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
bsg_request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
- (ha->current_topology == ISP_CFG_F ||
- (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
- req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
- elreq.options == EXTERNAL_LOOPBACK) {
+ ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
+ get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
+ req_data_len == MAX_ELS_FRAME_PAYLOAD &&
+ elreq.options == EXTERNAL_LOOPBACK))) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
ql_dbg(ql_dbg_user, vha, 0x701e,
"BSG request type: %s.\n", type);
@@ -1506,10 +1517,15 @@ qla2x00_update_optrom(struct bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
- ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
- bsg_reply->result = DID_OK;
+ if (rval) {
+ bsg_reply->result = -EINVAL;
+ rval = -EINVAL;
+ } else {
+ bsg_reply->result = DID_OK;
+ }
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
@@ -2404,7 +2420,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
regions.global_image = active_regions.global;
if (IS_QLA28XX(ha)) {
- qla27xx_get_active_image(vha, &active_regions);
+ qla28xx_get_aux_images(vha, &active_regions);
regions.board_config = active_regions.aux.board_config;
regions.vpd_nvram = active_regions.aux.vpd_nvram;
regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 88a56e8480f7..bf1e98f11990 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -73,6 +73,8 @@
#include "qla_def.h"
#include <linux/delay.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/qla.h>
static uint32_t ql_dbg_offset = 0x800;
@@ -2538,14 +2540,30 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
va_list va;
struct va_format vaf;
- if (!ql_mask_match(level))
- return;
-
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
+ if (!ql_mask_match(level)) {
+ char pbuf[64];
+
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id,
+ vha->host_no);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+ trace_ql_dbg_log(pbuf, &vaf);
+ va_end(va);
+ return;
+ }
+
if (vha != NULL) {
const struct pci_dev *pdev = vha->hw->pdev;
/* <module-name> <pci-name> <msg-id>:<host> Message */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ed32e9715794..47c7a56438b5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -119,7 +119,10 @@ typedef struct {
#define LSD(x) ((uint32_t)((uint64_t)(x)))
#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
-#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
+static inline uint32_t make_handle(uint16_t x, uint16_t y)
+{
+ return ((uint32_t)x << 16) | y;
+}
/*
* I/O register
@@ -414,7 +417,7 @@ struct els_logo_payload {
struct els_plogi_payload {
uint8_t opcode;
uint8_t rsvd[3];
- uint8_t data[112];
+ __be32 data[112 / 4];
};
struct ct_arg {
@@ -597,9 +600,6 @@ typedef struct srb {
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
- unsigned int abort:1;
- unsigned int aborted:1;
- unsigned int completed:1;
uint32_t handle;
uint16_t flags;
@@ -1049,6 +1049,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */
#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
#define MBA_TRANS_INSERT 0x8130 /* Transceiver Insertion */
+#define MBA_TRANS_REMOVE 0x8131 /* Transceiver Removal */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
@@ -1134,6 +1135,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */
#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/
#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */
+#define MBC_SET_GET_FC_LED_CONFIG 0x3b /* Set/Get FC LED config */
#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */
#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */
#define MBC_GET_RESOURCE_COUNTS 0x42 /* Get Resource Counts. */
@@ -1260,10 +1262,15 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_ELS_CMD 0x5
#define RNID_TYPE_PORT_LOGIN 0x7
+#define RNID_BUFFER_CREDITS 0x8
#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
+#define ELS_CMD_MAP_SIZE 32
+#define ELS_COMMAND_RDP 0x18
+
/*
* Firmware state codes from get firmware state mailbox command
*/
@@ -1474,47 +1481,44 @@ typedef struct {
#define GLSO_USE_DID BIT_3
struct link_statistics {
- uint32_t link_fail_cnt;
- uint32_t loss_sync_cnt;
- uint32_t loss_sig_cnt;
- uint32_t prim_seq_err_cnt;
- uint32_t inval_xmit_word_cnt;
- uint32_t inval_crc_cnt;
- uint32_t lip_cnt;
- uint32_t link_up_cnt;
- uint32_t link_down_loop_init_tmo;
- uint32_t link_down_los;
- uint32_t link_down_loss_rcv_clk;
+ __le32 link_fail_cnt;
+ __le32 loss_sync_cnt;
+ __le32 loss_sig_cnt;
+ __le32 prim_seq_err_cnt;
+ __le32 inval_xmit_word_cnt;
+ __le32 inval_crc_cnt;
+ __le32 lip_cnt;
+ __le32 link_up_cnt;
+ __le32 link_down_loop_init_tmo;
+ __le32 link_down_los;
+ __le32 link_down_loss_rcv_clk;
uint32_t reserved0[5];
- uint32_t port_cfg_chg;
+ __le32 port_cfg_chg;
uint32_t reserved1[11];
- uint32_t rsp_q_full;
- uint32_t atio_q_full;
- uint32_t drop_ae;
- uint32_t els_proto_err;
- uint32_t reserved2;
- uint32_t tx_frames;
- uint32_t rx_frames;
- uint32_t discarded_frames;
- uint32_t dropped_frames;
+ __le32 rsp_q_full;
+ __le32 atio_q_full;
+ __le32 drop_ae;
+ __le32 els_proto_err;
+ __le32 reserved2;
+ __le32 tx_frames;
+ __le32 rx_frames;
+ __le32 discarded_frames;
+ __le32 dropped_frames;
uint32_t reserved3;
- uint32_t nos_rcvd;
+ __le32 nos_rcvd;
uint32_t reserved4[4];
- uint32_t tx_prjt;
- uint32_t rcv_exfail;
- uint32_t rcv_abts;
- uint32_t seq_frm_miss;
- uint32_t corr_err;
- uint32_t mb_rqst;
- uint32_t nport_full;
- uint32_t eofa;
+ __le32 tx_prjt;
+ __le32 rcv_exfail;
+ __le32 rcv_abts;
+ __le32 seq_frm_miss;
+ __le32 corr_err;
+ __le32 mb_rqst;
+ __le32 nport_full;
+ __le32 eofa;
uint32_t reserved5;
- uint32_t fpm_recv_word_cnt_lo;
- uint32_t fpm_recv_word_cnt_hi;
- uint32_t fpm_disc_word_cnt_lo;
- uint32_t fpm_disc_word_cnt_hi;
- uint32_t fpm_xmit_word_cnt_lo;
- uint32_t fpm_xmit_word_cnt_hi;
+ __le64 fpm_recv_word_cnt;
+ __le64 fpm_disc_word_cnt;
+ __le64 fpm_xmit_word_cnt;
uint32_t reserved6[70];
};
@@ -2624,10 +2628,11 @@ static const char * const port_dstate_str[] = {
#define GFF_ID_RSP_SIZE (16 + 128)
/*
- * HBA attribute types.
+ * FDMI HBA attribute types.
*/
-#define FDMI_HBA_ATTR_COUNT 9
-#define FDMIV2_HBA_ATTR_COUNT 17
+#define FDMI1_HBA_ATTR_COUNT 9
+#define FDMI2_HBA_ATTR_COUNT 17
+
#define FDMI_HBA_NODE_NAME 0x1
#define FDMI_HBA_MANUFACTURER 0x2
#define FDMI_HBA_SERIAL_NUMBER 0x3
@@ -2639,12 +2644,13 @@ static const char * const port_dstate_str[] = {
#define FDMI_HBA_FIRMWARE_VERSION 0x9
#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
+
#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc
-#define FDMI_HBA_VENDOR_ID 0xd
+#define FDMI_HBA_VENDOR_SPECIFIC_INFO 0xd
#define FDMI_HBA_NUM_PORTS 0xe
#define FDMI_HBA_FABRIC_NAME 0xf
#define FDMI_HBA_BOOT_BIOS_NAME 0x10
-#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0
+#define FDMI_HBA_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
uint16_t type;
@@ -2661,31 +2667,9 @@ struct ct_fdmi_hba_attr {
uint8_t fw_version[32];
uint8_t os_version[128];
uint32_t max_ct_len;
- } a;
-};
-
-struct ct_fdmi_hba_attributes {
- uint32_t count;
- struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
-};
-struct ct_fdmiv2_hba_attr {
- uint16_t type;
- uint16_t len;
- union {
- uint8_t node_name[WWN_SIZE];
- uint8_t manufacturer[64];
- uint8_t serial_num[32];
- uint8_t model[16+1];
- uint8_t model_desc[80];
- uint8_t hw_version[16];
- uint8_t driver_version[32];
- uint8_t orom_version[16];
- uint8_t fw_version[32];
- uint8_t os_version[128];
- uint32_t max_ct_len;
uint8_t sym_name[256];
- uint32_t vendor_id;
+ uint32_t vendor_specific_info;
uint32_t num_ports;
uint8_t fabric_name[WWN_SIZE];
uint8_t bios_name[32];
@@ -2693,22 +2677,30 @@ struct ct_fdmiv2_hba_attr {
} a;
};
-struct ct_fdmiv2_hba_attributes {
+struct ct_fdmi1_hba_attributes {
uint32_t count;
- struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+ struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT];
+};
+
+struct ct_fdmi2_hba_attributes {
+ uint32_t count;
+ struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT];
};
/*
- * Port attribute types.
+ * FDMI Port attribute types.
*/
-#define FDMI_PORT_ATTR_COUNT 6
-#define FDMIV2_PORT_ATTR_COUNT 16
+#define FDMI1_PORT_ATTR_COUNT 6
+#define FDMI2_PORT_ATTR_COUNT 16
+#define FDMI2_SMARTSAN_PORT_ATTR_COUNT 23
+
#define FDMI_PORT_FC4_TYPES 0x1
#define FDMI_PORT_SUPPORT_SPEED 0x2
#define FDMI_PORT_CURRENT_SPEED 0x3
#define FDMI_PORT_MAX_FRAME_SIZE 0x4
#define FDMI_PORT_OS_DEVICE_NAME 0x5
#define FDMI_PORT_HOST_NAME 0x6
+
#define FDMI_PORT_NODE_NAME 0x7
#define FDMI_PORT_NAME 0x8
#define FDMI_PORT_SYM_NAME 0x9
@@ -2718,7 +2710,15 @@ struct ct_fdmiv2_hba_attributes {
#define FDMI_PORT_FC4_TYPE 0xd
#define FDMI_PORT_STATE 0x101
#define FDMI_PORT_COUNT 0x102
-#define FDMI_PORT_ID 0x103
+#define FDMI_PORT_IDENTIFIER 0x103
+
+#define FDMI_SMARTSAN_SERVICE 0xF100
+#define FDMI_SMARTSAN_GUID 0xF101
+#define FDMI_SMARTSAN_VERSION 0xF102
+#define FDMI_SMARTSAN_PROD_NAME 0xF103
+#define FDMI_SMARTSAN_PORT_INFO 0xF104
+#define FDMI_SMARTSAN_QOS_SUPPORT 0xF105
+#define FDMI_SMARTSAN_SECURITY_SUPPORT 0xF106
#define FDMI_PORT_SPEED_1GB 0x1
#define FDMI_PORT_SPEED_2GB 0x2
@@ -2734,7 +2734,7 @@ struct ct_fdmiv2_hba_attributes {
#define FC_CLASS_3 0x08
#define FC_CLASS_2_3 0x0C
-struct ct_fdmiv2_port_attr {
+struct ct_fdmi_port_attr {
uint16_t type;
uint16_t len;
union {
@@ -2744,6 +2744,7 @@ struct ct_fdmiv2_port_attr {
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[256];
+
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
@@ -2754,35 +2755,38 @@ struct ct_fdmiv2_port_attr {
uint32_t port_state;
uint32_t num_ports;
uint32_t port_id;
+
+ uint8_t smartsan_service[24];
+ uint8_t smartsan_guid[16];
+ uint8_t smartsan_version[24];
+ uint8_t smartsan_prod_name[16];
+ uint32_t smartsan_port_info;
+ uint32_t smartsan_qos_support;
+ uint32_t smartsan_security_support;
} a;
};
-/*
- * Port Attribute Block.
- */
-struct ct_fdmiv2_port_attributes {
+struct ct_fdmi1_port_attributes {
uint32_t count;
- struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
-};
-
-struct ct_fdmi_port_attr {
- uint16_t type;
- uint16_t len;
- union {
- uint8_t fc4_types[32];
- uint32_t sup_speed;
- uint32_t cur_speed;
- uint32_t max_frame_size;
- uint8_t os_dev_name[32];
- uint8_t host_name[256];
- } a;
+ struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT];
};
-struct ct_fdmi_port_attributes {
+struct ct_fdmi2_port_attributes {
uint32_t count;
- struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
+ struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT];
};
+#define FDMI_ATTR_TYPELEN(obj) \
+ (sizeof((obj)->type) + sizeof((obj)->len))
+
+#define FDMI_ATTR_ALIGNMENT(len) \
+ (4 - ((len) & 3))
+
+/* FDMI register call options */
+#define CALLOPT_FDMI1 0
+#define CALLOPT_FDMI2 1
+#define CALLOPT_FDMI2_SMARTSAN 2
+
/* FDMI definitions. */
#define GRHL_CMD 0x100
#define GHAT_CMD 0x101
@@ -2793,10 +2797,13 @@ struct ct_fdmi_port_attributes {
#define RHBA_RSP_SIZE 16
#define RHAT_CMD 0x201
+
#define RPRT_CMD 0x210
+#define RPRT_RSP_SIZE 24
#define RPA_CMD 0x211
#define RPA_RSP_SIZE 16
+#define SMARTSAN_RPA_RSP_SIZE 24
#define DHBA_CMD 0x300
#define DHBA_REQ_SIZE (16 + 8)
@@ -2879,30 +2886,24 @@ struct ct_sns_req {
uint8_t hba_identifier[8];
uint32_t entry_count;
uint8_t port_name[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi2_hba_attributes attrs;
} rhba;
struct {
uint8_t hba_identifier[8];
- uint32_t entry_count;
- uint8_t port_name[8];
- struct ct_fdmiv2_hba_attributes attrs;
- } rhba2;
-
- struct {
- uint8_t hba_identifier[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi1_hba_attributes attrs;
} rhat;
struct {
uint8_t port_name[8];
- struct ct_fdmi_port_attributes attrs;
+ struct ct_fdmi2_port_attributes attrs;
} rpa;
struct {
+ uint8_t hba_identifier[8];
uint8_t port_name[8];
- struct ct_fdmiv2_port_attributes attrs;
- } rpa2;
+ struct ct_fdmi2_port_attributes attrs;
+ } rprt;
struct {
uint8_t port_name[8];
@@ -3016,7 +3017,7 @@ struct ct_sns_rsp {
struct {
uint32_t entry_count;
uint8_t port_name[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi1_hba_attributes attrs;
} ghat;
struct {
@@ -3250,6 +3251,7 @@ struct isp_operations {
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
@@ -3562,6 +3564,134 @@ struct qlfc_fw {
uint32_t len;
};
+struct rdp_req_payload {
+ uint32_t els_request;
+ uint32_t desc_list_len;
+
+ /* NPIV descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t reserved;
+ uint8_t nport_id[3];
+ } npiv_desc;
+};
+
+struct rdp_rsp_payload {
+ struct {
+ uint32_t cmd;
+ uint32_t len;
+ } hdr;
+
+ /* LS Request Info descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t req_payload_word_0;
+ } ls_req_info_desc;
+
+ /* LS Request Info descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t req_payload_word_0;
+ } ls_req_info_desc2;
+
+ /* SFP diagnostic param descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint16_t temperature;
+ uint16_t vcc;
+ uint16_t tx_bias;
+ uint16_t tx_power;
+ uint16_t rx_power;
+ uint16_t sfp_flags;
+ } sfp_diag_desc;
+
+ /* Port Speed Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint16_t speed_capab;
+ uint16_t operating_speed;
+ } port_speed_desc;
+
+ /* Link Error Status Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t link_fail_cnt;
+ uint32_t loss_sync_cnt;
+ uint32_t loss_sig_cnt;
+ uint32_t prim_seq_err_cnt;
+ uint32_t inval_xmit_word_cnt;
+ uint32_t inval_crc_cnt;
+ uint8_t pn_port_phy_type;
+ uint8_t reserved[3];
+ } ls_err_desc;
+
+ /* Port name description with diag param */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t WWNN[WWN_SIZE];
+ uint8_t WWPN[WWN_SIZE];
+ } port_name_diag_desc;
+
+ /* Port Name desc for Direct attached Fx_Port or Nx_Port */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t WWNN[WWN_SIZE];
+ uint8_t WWPN[WWN_SIZE];
+ } port_name_direct_desc;
+
+ /* Buffer Credit descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t fcport_b2b;
+ uint32_t attached_fcport_b2b;
+ uint32_t fcport_rtt;
+ } buffer_credit_desc;
+
+ /* Optical Element Data Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint16_t high_alarm;
+ uint16_t low_alarm;
+ uint16_t high_warn;
+ uint16_t low_warn;
+ uint32_t element_flags;
+ } optical_elmt_desc[5];
+
+ /* Optical Product Data Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t vendor_name[16];
+ uint8_t part_number[16];
+ uint8_t serial_number[16];
+ uint8_t revision[4];
+ uint8_t date[8];
+ } optical_prod_desc;
+};
+
+#define RDP_DESC_LEN(obj) \
+ (sizeof(obj) - sizeof((obj).desc_tag) - sizeof((obj).desc_len))
+
+#define RDP_PORT_SPEED_1GB BIT_15
+#define RDP_PORT_SPEED_2GB BIT_14
+#define RDP_PORT_SPEED_4GB BIT_13
+#define RDP_PORT_SPEED_10GB BIT_12
+#define RDP_PORT_SPEED_8GB BIT_11
+#define RDP_PORT_SPEED_16GB BIT_10
+#define RDP_PORT_SPEED_32GB BIT_9
+#define RDP_PORT_SPEED_64GB BIT_8
+#define RDP_PORT_SPEED_UNKNOWN BIT_0
+
struct scsi_qlt_host {
void *target_lport_ptr;
struct mutex tgt_mutex;
@@ -3673,8 +3803,8 @@ struct qla_hw_data {
uint32_t fw_started:1;
uint32_t fw_init_done:1;
- uint32_t detected_lr_sfp:1;
- uint32_t using_lr_setting:1;
+ uint32_t lr_detected:1;
+
uint32_t rida_fmt2:1;
uint32_t purge_mbox:1;
uint32_t n2n_bigger:1;
@@ -3683,7 +3813,7 @@ struct qla_hw_data {
} flags;
uint16_t max_exchg;
- uint16_t long_range_distance; /* 32G & above */
+ uint16_t lr_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3965,6 +4095,8 @@ struct qla_hw_data {
#define SFP_DEV_SIZE 512
#define SFP_BLOCK_SIZE 64
+#define SFP_RTDI_LEN SFP_BLOCK_SIZE
+
void *sfp_data;
dma_addr_t sfp_data_dma;
@@ -4344,6 +4476,15 @@ struct active_regions {
#define QLA_SET_DATA_RATE_NOLR 1
#define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */
+struct purex_item {
+ struct list_head list;
+ struct scsi_qla_host *vha;
+ void (*process_item)(struct scsi_qla_host *vha, void *pkt);
+ struct {
+ uint8_t iocb[64];
+ } iocb;
+};
+
/*
* Qlogic scsi host structure
*/
@@ -4424,6 +4565,8 @@ typedef struct scsi_qla_host {
#define ISP_ABORT_TO_ROM 33
#define VPORT_DELETE 34
+#define PROCESS_PUREX_IOCB 63
+
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
@@ -4461,6 +4604,7 @@ typedef struct scsi_qla_host {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t fabric_node_name[WWN_SIZE];
+ uint8_t fabric_port_name[WWN_SIZE];
struct nvme_fc_local_port *nvme_local_port;
struct completion nvme_del_done;
@@ -4531,6 +4675,11 @@ typedef struct scsi_qla_host {
uint16_t ql2xexchoffld;
uint16_t ql2xiniexchg;
+ struct purex_list {
+ struct list_head head;
+ spinlock_t lock;
+ } purex_list;
+
struct name_list_extended gnl;
/* Count of active session/fcport */
int fcport_count;
@@ -4540,6 +4689,7 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ __le16 dport_data[4];
struct list_head gpnid_list;
struct fab_scan scan;
@@ -4822,11 +4972,14 @@ struct sff_8247_a0 {
u8 resv2[128];
};
-#define AUTO_DETECT_SFP_SUPPORT(_vha)\
- (ql2xautodetectsfp && !_vha->vp_idx && \
- (IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
- IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \
- IS_QLA28XX(_vha->hw)))
+/* BPM -- Buffer Plus Management support. */
+#define IS_BPM_CAPABLE(ha) \
+ (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BPM_RANGE_CAPABLE(ha) \
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BPM_ENABLED(vha) \
+ (ql2xautodetectsfp && !vha->vp_idx && IS_BPM_CAPABLE(vha->hw))
#define FLASH_SEMAPHORE_REGISTER_ADDR 0x00101016
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0a6fb359f4d5..e62b2115235e 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -134,11 +134,11 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
} else {
seq_puts(s, "FW Resource count\n\n");
seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
- seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
- seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
- seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
- seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
- seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
seq_printf(s, "MAX VP count[%d]\n", mb[11]);
seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
@@ -149,7 +149,6 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
mb[22]);
seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
mb[23]);
-
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d641918cdd46..f9bad5bd7198 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -31,6 +31,9 @@
#define PDO_FORCE_ADISC BIT_1
#define PDO_FORCE_PLOGI BIT_0
+struct buffer_credit_24xx {
+ u32 parameter[28];
+};
#define PORT_DATABASE_24XX_SIZE 64
struct port_database_24xx {
@@ -721,6 +724,48 @@ struct ct_entry_24xx {
};
/*
+ * ISP queue - PUREX IOCB entry structure definition
+ */
+#define PUREX_IOCB_TYPE 0x51 /* CT Pass Through IOCB entry */
+struct purex_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint16_t reserved1;
+ uint8_t vp_idx;
+ uint8_t reserved2;
+
+ uint16_t status_flags;
+ uint16_t nport_handle;
+
+ uint16_t frame_size;
+ uint16_t trunc_frame_size;
+
+ uint32_t rx_xchg_addr;
+
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+
+ uint8_t f_ctl[3];
+ uint8_t type;
+
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t param;
+
+ uint8_t els_frame_payload[20];
+};
+
+/*
* ISP queue - ELS Pass-Through entry structure definition.
*/
#define ELS_IOCB_TYPE 0x53 /* ELS Pass-Through IOCB entry */
@@ -732,9 +777,8 @@ struct els_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t reserved_1;
-
- uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t comp_status; /* response only */
+ uint16_t nport_handle;
uint16_t tx_dsd_count;
@@ -749,7 +793,7 @@ struct els_entry_24xx {
uint8_t opcode;
uint8_t reserved_2;
- uint8_t port_id[3];
+ uint8_t d_id[3];
uint8_t s_id[3];
uint16_t control_flags; /* Control flags. */
@@ -761,13 +805,24 @@ struct els_entry_24xx {
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
- __le32 rx_byte_count;
- __le32 tx_byte_count;
+ union {
+ struct {
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
- __le64 tx_address __packed; /* Data segment 0 address. */
- __le32 tx_len; /* Data segment 0 length. */
- __le64 rx_address __packed; /* Data segment 1 address. */
- __le32 rx_len; /* Data segment 1 length. */
+ __le64 tx_address __packed; /* DSD 0 address. */
+ __le32 tx_len; /* DSD 0 length. */
+
+ __le64 rx_address __packed; /* DSD 1 address. */
+ __le32 rx_len; /* DSD 1 length. */
+ };
+ struct {
+ uint32_t total_byte_count;
+ uint32_t error_subcode_1;
+ uint32_t error_subcode_2;
+ uint32_t error_subcode_3;
+ };
+ };
};
struct els_sts_entry_24xx {
@@ -793,15 +848,16 @@ struct els_sts_entry_24xx {
uint8_t opcode;
uint8_t reserved_3;
- uint8_t port_id[3];
- uint8_t reserved_4;
-
- uint16_t reserved_5;
+ uint8_t d_id[3];
+ uint8_t s_id[3];
uint16_t control_flags; /* Control flags. */
uint32_t total_byte_count;
uint32_t error_subcode_1;
uint32_t error_subcode_2;
+ uint32_t error_subcode_3;
+
+ uint32_t reserved_4[4];
};
/*
* ISP queue - Mailbox Command entry structure definition.
@@ -942,6 +998,91 @@ struct abort_entry_24xx {
uint8_t reserved_2[12];
};
+#define ABTS_RCV_TYPE 0x54
+#define ABTS_RSP_TYPE 0x55
+struct abts_entry_24xx {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t handle_count;
+ uint8_t entry_status;
+
+ uint32_t handle; /* type 0x55 only */
+
+ uint16_t comp_status; /* type 0x55 only */
+ uint16_t nport_handle; /* type 0x54 only */
+
+ uint16_t control_flags; /* type 0x55 only */
+ uint8_t vp_idx;
+ uint8_t sof_type; /* sof_type is upper nibble */
+
+ uint32_t rx_xch_addr;
+
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+
+ uint8_t f_ctl[3];
+ uint8_t type;
+
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+
+ uint16_t rx_id;
+ uint16_t ox_id;
+
+ uint32_t param;
+
+ union {
+ struct {
+ uint32_t subcode3;
+ uint32_t rsvd;
+ uint32_t subcode1;
+ uint32_t subcode2;
+ } error;
+ struct {
+ uint16_t rsrvd1;
+ uint8_t last_seq_id;
+ uint8_t seq_id_valid;
+ uint16_t aborted_rx_id;
+ uint16_t aborted_ox_id;
+ uint16_t high_seq_cnt;
+ uint16_t low_seq_cnt;
+ } ba_acc;
+ struct {
+ uint8_t vendor_unique;
+ uint8_t explanation;
+ uint8_t reason;
+ } ba_rjt;
+ } payload;
+
+ uint32_t rx_xch_addr_to_abort;
+} __packed;
+
+/* ABTS payload explanation values */
+#define BA_RJT_EXP_NO_ADDITIONAL 0
+#define BA_RJT_EXP_INV_OX_RX_ID 3
+#define BA_RJT_EXP_SEQ_ABORTED 5
+
+/* ABTS payload reason values */
+#define BA_RJT_RSN_INV_CMD_CODE 1
+#define BA_RJT_RSN_LOGICAL_ERROR 3
+#define BA_RJT_RSN_LOGICAL_BUSY 5
+#define BA_RJT_RSN_PROTOCOL_ERROR 7
+#define BA_RJT_RSN_UNABLE_TO_PERFORM 9
+#define BA_RJT_RSN_VENDOR_SPECIFIC 0xff
+
+/* FC_F values */
+#define FC_TYPE_BLD 0x000 /* Basic link data */
+#define FC_F_CTL_RSP_CNTXT 0x800000 /* Responder of exchange */
+#define FC_F_CTL_LAST_SEQ 0x100000 /* Last sequence */
+#define FC_F_CTL_END_SEQ 0x80000 /* Last sequence */
+#define FC_F_CTL_SEQ_INIT 0x010000 /* Sequence initiative */
+#define FC_ROUTING_BLD 0x80 /* Basic link data frame */
+#define FC_R_CTL_BLD_BA_ACC 0x04 /* BA_ACC (basic accept) */
+
/*
* ISP I/O Register Set structure definitions.
*/
@@ -1726,9 +1867,8 @@ struct access_chip_rsp_84xx {
/* LR Distance bit positions */
#define LR_DIST_NV_POS 2
+#define LR_DIST_NV_MASK 0xf
#define LR_DIST_FW_POS 12
-#define LR_DIST_FW_SHIFT (LR_DIST_FW_POS - LR_DIST_NV_POS)
-#define LR_DIST_FW_FIELD(x) ((x) << LR_DIST_FW_SHIFT & 0xf000)
/* FAC semaphore defines */
#define FAC_SEMAPHORE_UNLOCK 0
@@ -1883,6 +2023,7 @@ struct nvram_81xx {
* BIT 6-15 = Unused
*/
uint16_t enhanced_features;
+
uint16_t reserved_24[4];
/* Offset 416. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2a64729a2bc5..1b93f5b4d77d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -31,7 +31,7 @@ extern int qla24xx_nvram_config(struct scsi_qla_host *);
extern int qla81xx_nvram_config(struct scsi_qla_host *);
extern void qla2x00_update_fw_options(struct scsi_qla_host *);
extern void qla24xx_update_fw_options(scsi_qla_host_t *);
-extern void qla81xx_update_fw_options(scsi_qla_host_t *);
+
extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
@@ -109,7 +109,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_detect_sfp(scsi_qla_host_t *vha);
+int qla24xx_detect_sfp(scsi_qla_host_t *);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
extern void qla28xx_get_aux_images(struct scsi_qla_host *,
@@ -142,6 +142,8 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xrdpenable;
+extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
@@ -226,6 +228,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
+void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt);
/*
* Global Functions in qla_mid.c source file.
@@ -354,6 +357,9 @@ extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
+qla24xx_get_port_database(scsi_qla_host_t *, u16, struct port_database_24xx *);
+
+extern int
qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *);
extern int
@@ -452,6 +458,13 @@ extern int
qla25xx_set_driver_version(scsi_qla_host_t *, char *);
extern int
+qla25xx_set_els_cmds_supported(scsi_qla_host_t *);
+
+extern int
+qla24xx_get_buffer_credits(scsi_qla_host_t *, struct buffer_credit_24xx *,
+ dma_addr_t);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -552,6 +565,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
extern irqreturn_t
qla2xxx_msix_rsp_q(int irq, void *dev_id);
+extern irqreturn_t
+qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
@@ -656,7 +671,7 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
-extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
@@ -844,6 +859,7 @@ extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int);
+extern int ql26xx_led_config(scsi_qla_host_t *, uint16_t, uint16_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct bsg_job *);
@@ -913,6 +929,7 @@ void qlt_remove_target_resources(struct qla_hw_data *);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
+extern void qla24xx_process_purex_list(struct purex_list *);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index aaa4a5bbf2ff..42c3ad27f1cb 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -19,6 +19,8 @@ static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
static int qla_async_rsnn_nn(scsi_qla_host_t *);
+
+
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
* @vha: HA context
@@ -844,19 +846,18 @@ done:
return rval;
}
-void
+size_t
qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
{
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(ha))
- snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
- ha->mr.fw_version, qla2x00_version_str);
- else
- snprintf(snn, size,
- "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
- ha->fw_major_version, ha->fw_minor_version,
- ha->fw_subminor_version, qla2x00_version_str);
+ return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
+ ha->model_number, ha->mr.fw_version, qla2x00_version_str);
+
+ return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
+ ha->model_number, ha->fw_major_version, ha->fw_minor_version,
+ ha->fw_subminor_version, qla2x00_version_str);
}
/**
@@ -1501,747 +1502,732 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
return &p->p.req;
}
+static uint
+qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
+{
+ if (IS_CNA_CAPABLE(ha))
+ return FDMI_PORT_SPEED_10GB;
+ if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
+ uint speeds = 0;
+
+ if (ha->max_supported_speed == 2) {
+ if (ha->min_supported_speed <= 6)
+ speeds |= FDMI_PORT_SPEED_64GB;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1) {
+ if (ha->min_supported_speed <= 5)
+ speeds |= FDMI_PORT_SPEED_32GB;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 4)
+ speeds |= FDMI_PORT_SPEED_16GB;
+ }
+ if (ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 3)
+ speeds |= FDMI_PORT_SPEED_8GB;
+ }
+ if (ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 2)
+ speeds |= FDMI_PORT_SPEED_4GB;
+ }
+ return speeds;
+ }
+ if (IS_QLA2031(ha))
+ return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB;
+ if (IS_QLA25XX(ha))
+ return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
+ if (IS_QLA24XX_TYPE(ha))
+ return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB;
+ if (IS_QLA23XX(ha))
+ return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
+ return FDMI_PORT_SPEED_1GB;
+}
+static uint
+qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
+{
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ return FDMI_PORT_SPEED_1GB;
+ case PORT_SPEED_2GB:
+ return FDMI_PORT_SPEED_2GB;
+ case PORT_SPEED_4GB:
+ return FDMI_PORT_SPEED_4GB;
+ case PORT_SPEED_8GB:
+ return FDMI_PORT_SPEED_8GB;
+ case PORT_SPEED_10GB:
+ return FDMI_PORT_SPEED_10GB;
+ case PORT_SPEED_16GB:
+ return FDMI_PORT_SPEED_16GB;
+ case PORT_SPEED_32GB:
+ return FDMI_PORT_SPEED_32GB;
+ case PORT_SPEED_64GB:
+ return FDMI_PORT_SPEED_64GB;
+ default:
+ return FDMI_PORT_SPEED_UNKNOWN;
+ }
+}
+
/**
- * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * qla2x00_hba_attributes() perform HBA attributes registration
* @vha: HA context
+ * @entries: number of entries to use
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
-static int
-qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
+static unsigned long
+qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
+ unsigned int callopt)
{
- int rval, alen;
- uint32_t size, sn;
-
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
- struct ct_fdmi_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
-
- /* Issue RHBA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
- ct_req->req.rhba.entry_count = cpu_to_be32(1);
- memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
- size = 2 * WWN_SIZE + 4 + 4;
-
- /* Attributes */
- ct_req->req.rhba.attrs.count =
- cpu_to_be32(FDMI_HBA_ATTR_COUNT);
- entries = &ct_req->req;
+ struct init_cb_24xx *icb24 = (void *)ha->init_cb;
+ struct new_utsname *p_sysid = utsname();
+ struct ct_fdmi_hba_attr *eiter;
+ uint16_t alen;
+ unsigned long size = 0;
/* Nodename. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x2025,
- "NodeName = %8phN.\n", eiter->a.node_name);
-
+ memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
+ alen = sizeof(eiter->a.node_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a0,
+ "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
/* Manufacturer. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
- alen = strlen(QLA2XXX_MANUFACTURER);
- snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2026,
- "Manufacturer = %s.\n", eiter->a.manufacturer);
-
+ alen = scnprintf(
+ eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a1,
+ "MANUFACTURER = %s.\n", eiter->a.manufacturer);
/* Serial number. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- if (IS_FWI2_CAPABLE(ha))
- qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
- sizeof(eiter->a.serial_num));
- else {
- sn = ((ha->serial0 & 0x1f) << 16) |
+ alen = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ alen = qla2xxx_get_vpd_field(vha, "SN",
+ eiter->a.serial_num, sizeof(eiter->a.serial_num));
+ }
+ if (!alen) {
+ uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
(ha->serial2 << 8) | ha->serial1;
- snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
- "%c%05d", 'A' + sn / 100000, sn % 100000);
+ alen = scnprintf(
+ eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
}
- alen = strlen(eiter->a.serial_num);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2027,
- "Serial no. = %s.\n", eiter->a.serial_num);
-
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a2,
+ "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
/* Model name. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
- snprintf(eiter->a.model, sizeof(eiter->a.model),
- "%s", ha->model_number);
- alen = strlen(eiter->a.model);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2028,
- "Model Name = %s.\n", eiter->a.model);
-
+ alen = scnprintf(
+ eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
+ "MODEL NAME = %s.\n", eiter->a.model);
/* Model description. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
- "%s", ha->model_desc);
- alen = strlen(eiter->a.model_desc);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2029,
- "Model Desc = %s.\n", eiter->a.model_desc);
-
+ alen = scnprintf(
+ eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a4,
+ "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
/* Hardware version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- if (!IS_FWI2_CAPABLE(ha)) {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
+ alen = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (!alen) {
+ alen = qla2xxx_get_vpd_field(vha, "MN",
+ eiter->a.hw_version, sizeof(eiter->a.hw_version));
+ }
+ if (!alen) {
+ alen = qla2xxx_get_vpd_field(vha, "EC",
+ eiter->a.hw_version, sizeof(eiter->a.hw_version));
+ }
}
- alen = strlen(eiter->a.hw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202a,
- "Hardware ver = %s.\n", eiter->a.hw_version);
-
+ if (!alen) {
+ alen = scnprintf(
+ eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a5,
+ "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
/* Driver version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
- "%s", qla2x00_version_str);
- alen = strlen(eiter->a.driver_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202b,
- "Driver ver = %s.\n", eiter->a.driver_version);
-
+ alen = scnprintf(
+ eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a6,
+ "DRIVER VERSION = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
- "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.orom_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha , 0x202c,
- "Optrom vers = %s.\n", eiter->a.orom_version);
+ alen = scnprintf(
+ eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a7,
+ "OPTROM VERSION = %d.%02d.\n",
+ eiter->a.orom_version[1], eiter->a.orom_version[0]);
/* Firmware version */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
sizeof(eiter->a.fw_version));
- alen = strlen(eiter->a.fw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202d,
- "Firmware vers = %s.\n", eiter->a.fw_version);
-
- /* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(vha, size + 16);
-
- ql_dbg(ql_dbg_disc, vha, 0x202e,
- "RHBA identifier = %8phN size=%d.\n",
- ct_req->req.rhba.hba_identifier, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
- entries, size);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2030,
- "RHBA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
- ct_rsp->header.explanation_code ==
- CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x2034,
- "HBA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20ad,
- "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
- }
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2035,
- "RHBA exiting normally.\n");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a8,
+ "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
+ if (callopt == CALLOPT_FDMI1)
+ goto done;
+ /* OS Name and Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+ alen = 0;
+ if (p_sysid) {
+ alen = scnprintf(
+ eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s %s",
+ p_sysid->sysname, p_sysid->release, p_sysid->machine);
}
-
- return rval;
+ if (!alen) {
+ alen = scnprintf(
+ eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s",
+ "Linux", fc_host_system_hostname(vha->host));
+ }
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a9,
+ "OS VERSION = %s.\n", eiter->a.os_version);
+ /* MAX CT Payload Length */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+ eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
+ icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ alen = sizeof(eiter->a.max_ct_len);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20aa,
+ "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
+ /* Node Sybolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+ alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+ sizeof(eiter->a.sym_name));
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ab,
+ "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
+ /* Vendor Specific information */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
+ eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
+ alen = sizeof(eiter->a.vendor_specific_info);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ac,
+ "VENDOR SPECIFIC INFO = 0x%x.\n",
+ be32_to_cpu(eiter->a.vendor_specific_info));
+ /* Num Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+ eiter->a.num_ports = cpu_to_be32(1);
+ alen = sizeof(eiter->a.num_ports);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ad,
+ "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
+ /* Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name,
+ sizeof(eiter->a.fabric_name));
+ alen = sizeof(eiter->a.fabric_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ae,
+ "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ /* BIOS Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+ alen = scnprintf(
+ eiter->a.bios_name, sizeof(eiter->a.bios_name),
+ "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20af,
+ "BIOS NAME = %s\n", eiter->a.bios_name);
+ /* Vendor Identifier */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
+ alen = scnprintf(
+ eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
+ "%s", "QLGC");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20b0,
+ "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
+done:
+ return size;
}
/**
- * qla2x00_fdmi_rpa() - perform RPA registration
+ * qla2x00_port_attributes() perform Port attributes registration
* @vha: HA context
+ * @entries: number of entries to use
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
-static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+static unsigned long
+qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
+ unsigned int callopt)
{
- int rval, alen;
- uint32_t size;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
+ struct init_cb_24xx *icb24 = (void *)ha->init_cb;
+ struct new_utsname *p_sysid = utsname();
+ char *hostname = p_sysid ?
+ p_sysid->nodename : fc_host_system_hostname(vha->host);
struct ct_fdmi_port_attr *eiter;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RPA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
- RPA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
- size = WWN_SIZE + 4;
-
- /* Attributes */
- ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
- entries = &ct_req->req;
+ uint16_t alen;
+ unsigned long size = 0;
/* FC4 types. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = cpu_to_be16(4 + 32);
+ eiter->a.fc4_types[0] = 0x00;
+ eiter->a.fc4_types[1] = 0x00;
eiter->a.fc4_types[2] = 0x01;
- size += 4 + 32;
-
- ql_dbg(ql_dbg_disc, vha, 0x2039,
- "FC4_TYPES=%02x %02x.\n",
- eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]);
-
+ eiter->a.fc4_types[3] = 0x00;
+ alen = sizeof(eiter->a.fc4_types);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c0,
+ "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
+ if (vha->flags.nvme_enabled) {
+ eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
+ ql_dbg(ql_dbg_disc, vha, 0x211f,
+ "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+ eiter->a.fc4_types[6]);
+ }
/* Supported speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_32GB|
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB);
- else if (IS_QLA2031(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB);
- else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA23XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_1GB);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203a,
- "Supported_Speed=%x.\n", eiter->a.sup_speed);
-
+ eiter->a.sup_speed = cpu_to_be32(
+ qla25xx_fdmi_port_speed_capability(ha));
+ alen = sizeof(eiter->a.sup_speed);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c1,
+ "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
/* Current speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_1GB);
- break;
- case PORT_SPEED_2GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_2GB);
- break;
- case PORT_SPEED_4GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_4GB);
- break;
- case PORT_SPEED_8GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_8GB);
- break;
- case PORT_SPEED_10GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_10GB);
- break;
- case PORT_SPEED_16GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_16GB);
- break;
- case PORT_SPEED_32GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_32GB);
- break;
- default:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
- break;
- }
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203b,
- "Current_Speed=%x.\n", eiter->a.cur_speed);
-
+ eiter->a.cur_speed = cpu_to_be32(
+ qla25xx_fdmi_port_speed_currently(ha));
+ alen = sizeof(eiter->a.cur_speed);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c2,
+ "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = cpu_to_be16(4 + 4);
- eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203c,
- "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
-
+ eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
+ icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ alen = sizeof(eiter->a.max_frame_size);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c3,
+ "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
/* OS device name. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
- "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
- alen = strlen(eiter->a.os_dev_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x204b,
- "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
-
+ alen = scnprintf(
+ eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c4,
+ "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
/* Hostname. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", p_sysid->nodename);
- } else {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", fc_host_system_hostname(vha->host));
- }
- alen = strlen(eiter->a.host_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
-
- /* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(vha, size + 16);
-
- ql_dbg(ql_dbg_disc, vha, 0x203e,
- "RPA portname %016llx, size = %d.\n",
- wwn_to_u64(ct_req->req.rpa.port_name), size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
- entries, size);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2040,
- "RPA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
- ct_rsp->header.explanation_code ==
- CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20cd,
- "RPA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- }
-
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2041,
- "RPA exiting normally.\n");
- }
-
- return rval;
-}
-
-/**
- * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
- * @vha: HA context
- *
- * Returns 0 on success.
- */
-static int
-qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
-{
- int rval, alen;
- uint32_t size, sn;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
- struct ct_fdmiv2_hba_attr *eiter;
- struct qla_hw_data *ha = vha->hw;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RHBA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
- RHBA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
- ct_req->req.rhba2.entry_count = cpu_to_be32(1);
- memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
- size = 2 * WWN_SIZE + 4 + 4;
+ if (!*hostname || !strncmp(hostname, "(none)", 6))
+ hostname = "Linux-default";
+ alen = scnprintf(
+ eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", hostname);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c5,
+ "HOSTNAME = %s.\n", eiter->a.host_name);
- /* Attributes */
- ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
- entries = &ct_req->req;
+ if (callopt == CALLOPT_FDMI1)
+ goto done;
- /* Nodename. */
+ /* Node Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x207d,
- "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+ eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+ memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
+ alen = sizeof(eiter->a.node_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c6,
+ "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
- /* Manufacturer. */
+ /* Port Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
- snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
- eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
- alen = strlen(eiter->a.manufacturer);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a5,
- "Manufacturer = %s.\n", eiter->a.manufacturer);
+ eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+ memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
+ alen = sizeof(eiter->a.port_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c7,
+ "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
- /* Serial number. */
+ /* Port Symbolic Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- if (IS_FWI2_CAPABLE(ha))
- qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
- sizeof(eiter->a.serial_num));
- else {
- sn = ((ha->serial0 & 0x1f) << 16) |
- (ha->serial2 << 8) | ha->serial1;
- snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
- "%c%05d", 'A' + sn / 100000, sn % 100000);
- }
- alen = strlen(eiter->a.serial_num);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a6,
- "Serial no. = %s.\n", eiter->a.serial_num);
+ eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+ alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+ sizeof(eiter->a.port_sym_name));
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
- /* Model name. */
+ /* Port Type */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
- snprintf(eiter->a.model, sizeof(eiter->a.model),
- "%s", ha->model_number);
- alen = strlen(eiter->a.model);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a7,
- "Model Name = %s.\n", eiter->a.model);
-
- /* Model description. */
+ eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+ eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+ alen = sizeof(eiter->a.port_type);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c9,
+ "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
+
+ /* Supported Class of Service */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
- "%s", ha->model_desc);
- alen = strlen(eiter->a.model_desc);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a8,
- "Model Desc = %s.\n", eiter->a.model_desc);
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+ eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+ alen = sizeof(eiter->a.port_supported_cos);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ca,
+ "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
- /* Hardware version. */
+ /* Port Fabric Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- if (!IS_FWI2_CAPABLE(ha)) {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- }
- alen = strlen(eiter->a.hw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a9,
- "Hardware ver = %s.\n", eiter->a.hw_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name,
+ sizeof(eiter->a.fabric_name));
+ alen = sizeof(eiter->a.fabric_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cb,
+ "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
- /* Driver version. */
+ /* FC4_type */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
- "%s", qla2x00_version_str);
- alen = strlen(eiter->a.driver_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20aa,
- "Driver ver = %s.\n", eiter->a.driver_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+ eiter->a.port_fc4_type[0] = 0x00;
+ eiter->a.port_fc4_type[1] = 0x00;
+ eiter->a.port_fc4_type[2] = 0x01;
+ eiter->a.port_fc4_type[3] = 0x00;
+ alen = sizeof(eiter->a.port_fc4_type);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cc,
+ "PORT ACTIVE FC4 TYPE = %016llx.\n",
+ *(uint64_t *)eiter->a.port_fc4_type);
- /* Option ROM version. */
+ /* Port State */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
- "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.orom_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha , 0x20ab,
- "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
- eiter->a.orom_version[0]);
+ eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+ eiter->a.port_state = cpu_to_be32(2);
+ alen = sizeof(eiter->a.port_state);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cd,
+ "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
- /* Firmware version */
+ /* Number of Ports */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
- ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
- sizeof(eiter->a.fw_version));
- alen = strlen(eiter->a.fw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ac,
- "Firmware vers = %s.\n", eiter->a.fw_version);
-
- /* OS Name and Version */
+ eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+ eiter->a.num_ports = cpu_to_be32(1);
+ alen = sizeof(eiter->a.num_ports);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ce,
+ "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
+
+ /* Port Identifier */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
- "%s %s %s",
- p_sysid->sysname, p_sysid->release, p_sysid->version);
- } else {
- snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
- "%s %s", "Linux", fc_host_system_hostname(vha->host));
- }
- alen = strlen(eiter->a.os_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ae,
- "OS Name and Version = %s.\n", eiter->a.os_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
+ eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+ alen = sizeof(eiter->a.port_id);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cf,
+ "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
+
+ if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
+ goto done;
- /* MAX CT Payload Length */
+ /* Smart SAN Service Category (Populate Smart SAN Initiator)*/
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
- eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
+ alen = scnprintf(
+ eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
+ "%s", "Smart SAN Initiator");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d0,
+ "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
+
+ /* Smart SAN GUID (NWWN+PWWN) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
+ memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
+ memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
+ alen = sizeof(eiter->a.smartsan_guid);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d1,
+ "Smart SAN GUID = %016llx-%016llx\n",
+ wwn_to_u64(eiter->a.smartsan_guid),
+ wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
+
+ /* Smart SAN Version (populate "Smart SAN Version 1.0") */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
+ alen = scnprintf(
+ eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
+ "%s", "Smart SAN Version 2.0");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
+
+ /* Smart SAN Product Name (Specify Adapter Model No) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
+ alen = scnprintf(eiter->a.smartsan_prod_name,
+ sizeof(eiter->a.smartsan_prod_name),
+ "ISP%04x", ha->pdev->device);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
+
+ /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
+ eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
+ alen = sizeof(eiter->a.smartsan_port_info);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d4,
+ "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
+
+ /* Smart SAN Security Support */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
+ eiter->a.smartsan_security_support = cpu_to_be32(1);
+ alen = sizeof(eiter->a.smartsan_security_support);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "SMARTSAN SECURITY SUPPORT = %d\n",
+ be32_to_cpu(eiter->a.smartsan_security_support));
- ql_dbg(ql_dbg_disc, vha, 0x20af,
- "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+done:
+ return size;
+}
- /* Node Sybolic Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
- qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
- sizeof(eiter->a.sym_name));
- alen = strlen(eiter->a.sym_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+/**
+ * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * @vha: HA context
+ * @callopt: Option to issue FDMI registration
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long size = 0;
+ unsigned int rval, count;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
- ql_dbg(ql_dbg_disc, vha, 0x20b0,
- "Symbolic Name = %s.\n", eiter->a.sym_name);
+ count = callopt != CALLOPT_FDMI1 ?
+ FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
- /* Vendor Id */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
- eiter->a.vendor_id = cpu_to_be32(0x1077);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ size = RHBA_RSP_SIZE;
- ql_dbg(ql_dbg_disc, vha, 0x20b1,
- "Vendor Id = %x.\n", eiter->a.vendor_id);
+ ql_dbg(ql_dbg_disc, vha, 0x20e0,
+ "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
- /* Num Ports */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
- eiter->a.num_ports = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
- ql_dbg(ql_dbg_disc, vha, 0x20b2,
- "Port Num = %x.\n", eiter->a.num_ports);
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
+ ct_rsp = &ha->ct_sns->p.rsp;
- /* Fabric Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
- memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
+ /* Prepare FDMI command entries */
+ memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
+ sizeof(ct_req->req.rhba.hba_identifier));
+ size += sizeof(ct_req->req.rhba.hba_identifier);
- ql_dbg(ql_dbg_disc, vha, 0x20b3,
- "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ ct_req->req.rhba.entry_count = cpu_to_be32(1);
+ size += sizeof(ct_req->req.rhba.entry_count);
- /* BIOS Version */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
- snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
- "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.bios_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+ memcpy(ct_req->req.rhba.port_name, vha->port_name,
+ sizeof(ct_req->req.rhba.port_name));
+ size += sizeof(ct_req->req.rhba.port_name);
- ql_dbg(ql_dbg_disc, vha, 0x20b4,
- "BIOS Name = %s\n", eiter->a.bios_name);
+ /* Attribute count */
+ ct_req->req.rhba.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rhba.attrs.count);
- /* Vendor Identifier */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
- snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
- "%s", "QLGC");
- alen = strlen(eiter->a.vendor_identifier);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+ /* Attribute block */
+ entries = &ct_req->req.rhba.attrs.entry;
- ql_dbg(ql_dbg_disc, vha, 0x201b,
- "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
+ size += qla2x00_hba_attributes(vha, entries, callopt);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x20b5,
- "RHBA identifier = %016llx.\n",
- wwn_to_u64(ct_req->req.rhba2.hba_identifier));
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+ ql_dbg(ql_dbg_disc, vha, 0x20e1,
+ "RHBA %016llx %016llx.\n",
+ wwn_to_u64(ct_req->req.rhba.hba_identifier),
+ wwn_to_u64(ct_req->req.rhba.port_name));
+
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x20b7,
- "RHBA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "RHBA iocb failed (%d).\n", rval);
+ return rval;
+ }
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
+ if (rval) {
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20b8,
- "HBA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2016,
- "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
+ ql_dbg(ql_dbg_disc, vha, 0x20e4,
+ "RHBA already registered.\n");
+ return QLA_ALREADY_REGISTERED;
}
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20b9,
- "RHBA FDMI V2 exiting normally.\n");
+
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
+ ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
return rval;
}
-/**
- * qla2x00_fdmi_dhba() -
- * @vha: HA context
- *
- * Returns 0 on success.
- */
+
static int
qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
{
@@ -2250,22 +2236,17 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
-
/* Issue RPA */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
-
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
-
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
-
ql_dbg(ql_dbg_disc, vha, 0x2036,
"DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
-
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
@@ -2280,337 +2261,178 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2038,
"DHBA exiting normally.\n");
}
-
return rval;
}
/**
- * qla2x00_fdmiv2_rpa() -
+ * qla2x00_fdmi_rprt() perform RPRT registration
* @vha: HA context
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
static int
-qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
+qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
{
- int rval, alen;
- uint32_t size;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
struct qla_hw_data *ha = vha->hw;
+ ulong size = 0;
+ uint rval, count;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
void *entries;
- struct ct_fdmiv2_port_attr *eiter;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RPA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
-
+ count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
+ FDMI2_SMARTSAN_PORT_ATTR_COUNT :
+ callopt != CALLOPT_FDMI1 ?
+ FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
+
+ size = RPRT_RSP_SIZE;
+ ql_dbg(ql_dbg_disc, vha, 0x20e8,
+ "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
- size = WWN_SIZE + 4;
-
- /* Attributes */
- ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
- entries = &ct_req->req;
-
- /* FC4 types. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = cpu_to_be16(4 + 32);
- eiter->a.fc4_types[2] = 0x01;
- size += 4 + 32;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ba,
- "FC4_TYPES=%02x %02x.\n",
- eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]);
-
- if (vha->flags.nvme_enabled) {
- eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
- ql_dbg(ql_dbg_disc, vha, 0x211f,
- "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
- eiter->a.fc4_types[6]);
- }
-
- /* Supported speed. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_32GB|
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB);
- else if (IS_QLA2031(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB);
- else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA23XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_1GB);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20bb,
- "Supported Port Speed = %x.\n", eiter->a.sup_speed);
-
- /* Current speed. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
- break;
- case PORT_SPEED_2GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
- break;
- case PORT_SPEED_4GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
- break;
- case PORT_SPEED_8GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
- break;
- case PORT_SPEED_10GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
- break;
- case PORT_SPEED_16GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
- break;
- case PORT_SPEED_32GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
- break;
- default:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
- break;
+ /* Prepare FDMI command entries */
+ memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
+ sizeof(ct_req->req.rprt.hba_identifier));
+ size += sizeof(ct_req->req.rprt.hba_identifier);
+ memcpy(ct_req->req.rprt.port_name, vha->port_name,
+ sizeof(ct_req->req.rprt.port_name));
+ size += sizeof(ct_req->req.rprt.port_name);
+ /* Attribute count */
+ ct_req->req.rprt.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rprt.attrs.count);
+ /* Attribute block */
+ entries = ct_req->req.rprt.attrs.entry;
+ size += qla2x00_port_attributes(vha, entries, callopt);
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+ ql_dbg(ql_dbg_disc, vha, 0x20e9,
+ "RPRT %016llx %016llx.\n",
+ wwn_to_u64(ct_req->req.rprt.port_name),
+ wwn_to_u64(ct_req->req.rprt.port_name));
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
+ entries, size);
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "RPRT iocb failed (%d).\n", rval);
+ return rval;
}
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x2017,
- "Current_Speed = %x.\n", eiter->a.cur_speed);
-
- /* Max frame size. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = cpu_to_be16(4 + 4);
- eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20bc,
- "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
-
- /* OS device name. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- alen = strlen(QLA2XXX_DRIVER_NAME);
- snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
- "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20be,
- "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
+ if (rval) {
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "RPRT already registered.\n");
+ return QLA_ALREADY_REGISTERED;
+ }
- /* Hostname. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", p_sysid->nodename);
- } else {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", fc_host_system_hostname(vha->host));
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
- alen = strlen(eiter->a.host_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x201a,
- "HostName=%s.\n", eiter->a.host_name);
-
- /* Node Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c0,
- "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
-
- /* Port Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_NAME);
- memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c1,
- "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
-
- /* Port Symbolic Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
- qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
- sizeof(eiter->a.port_sym_name));
- alen = strlen(eiter->a.port_sym_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c2,
- "port symbolic name = %s\n", eiter->a.port_sym_name);
-
- /* Port Type */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
- eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c3,
- "Port Type = %x.\n", eiter->a.port_type);
-
- /* Class of Service */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
- eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c4,
- "Supported COS = %08x\n", eiter->a.port_supported_cos);
+ ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
+ return rval;
+}
- /* Port Fabric Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
- memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
+/**
+ * qla2x00_fdmi_rpa() - perform RPA registration
+ * @vha: HA context
+ * @callopt: Option to issue FDMI registration
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ulong size = 0;
+ uint rval, count;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
- ql_dbg(ql_dbg_disc, vha, 0x20c5,
- "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ count =
+ callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
+ FDMI2_SMARTSAN_PORT_ATTR_COUNT :
+ callopt != CALLOPT_FDMI1 ?
+ FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
- /* FC4_type */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
- eiter->a.port_fc4_type[0] = 0;
- eiter->a.port_fc4_type[1] = 0;
- eiter->a.port_fc4_type[2] = 1;
- eiter->a.port_fc4_type[3] = 0;
- eiter->len = cpu_to_be16(4 + 32);
- size += 4 + 32;
+ size =
+ callopt != CALLOPT_FDMI1 ?
+ SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
- ql_dbg(ql_dbg_disc, vha, 0x20c6,
- "Port Active FC4 Type = %02x %02x.\n",
- eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
- if (vha->flags.nvme_enabled) {
- eiter->a.port_fc4_type[4] = 0;
- eiter->a.port_fc4_type[5] = 0;
- eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
- ql_dbg(ql_dbg_disc, vha, 0x2120,
- "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
- eiter->a.port_fc4_type[6]);
- }
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
- /* Port State */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_STATE);
- eiter->a.port_state = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
+ ct_rsp = &ha->ct_sns->p.rsp;
- ql_dbg(ql_dbg_disc, vha, 0x20c7,
- "Port State = %x.\n", eiter->a.port_state);
+ /* Prepare FDMI command entries. */
+ memcpy(ct_req->req.rpa.port_name, vha->port_name,
+ sizeof(ct_req->req.rpa.port_name));
+ size += sizeof(ct_req->req.rpa.port_name);
- /* Number of Ports */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
- eiter->a.num_ports = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c8,
- "Number of ports = %x.\n", eiter->a.num_ports);
+ /* Attribute count */
+ ct_req->req.rpa.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rpa.attrs.count);
- /* Port Id */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_ID);
- eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Attribute block */
+ entries = ct_req->req.rpa.attrs.entry;
- ql_dbg(ql_dbg_disc, vha, 0x201c,
- "Port Id = %x.\n", eiter->a.port_id);
+ size += qla2x00_port_attributes(vha, entries, callopt);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x2018,
- "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
+ ql_dbg(ql_dbg_disc, vha, 0x20f1,
+ "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
+
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x20cb,
- "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f3,
+ "RPA iocb failed (%d).\n", rval);
+ return rval;
+ }
+
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
+ if (rval) {
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20ce,
- "RPA FDMI v2 already registered\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2020,
- "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
+ ql_dbg(ql_dbg_disc, vha, 0x20f4,
+ "RPA already registered.\n");
+ return QLA_ALREADY_REGISTERED;
}
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cc,
- "RPA FDMI V2 exiting normally.\n");
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f5,
+ "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
+ ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
return rval;
}
@@ -2623,18 +2445,31 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
int
qla2x00_fdmi_register(scsi_qla_host_t *vha)
{
- int rval = QLA_FUNCTION_FAILED;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
IS_QLAFX00(ha))
- return QLA_FUNCTION_FAILED;
+ return rval;
rval = qla2x00_mgmt_svr_login(vha);
if (rval)
return rval;
- rval = qla2x00_fdmiv2_rhba(vha);
+ /* For npiv/vport send rprt only */
+ if (vha->vp_idx) {
+ if (ql2xsmartsan)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
+ if (rval || !ql2xsmartsan)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
+ if (rval)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
+
+ return rval;
+ }
+
+ /* Try fdmi2 first, if fails then try fdmi1 */
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
goto try_fdmi;
@@ -2643,18 +2478,22 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
if (rval)
goto try_fdmi;
- rval = qla2x00_fdmiv2_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
if (rval)
goto try_fdmi;
}
- rval = qla2x00_fdmiv2_rpa(vha);
+
+ if (ql2xsmartsan)
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
+ if (rval || !ql2xsmartsan)
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
if (rval)
goto try_fdmi;
- goto out;
+ return rval;
try_fdmi:
- rval = qla2x00_fdmi_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
return rval;
@@ -2663,12 +2502,13 @@ try_fdmi:
if (rval)
return rval;
- rval = qla2x00_fdmi_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
if (rval)
return rval;
}
- rval = qla2x00_fdmi_rpa(vha);
-out:
+
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
+
return rval;
}
@@ -2893,7 +2733,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Set default FC4 Type as UNKNOWN so the default is to
* Process this port */
- list[i].fc4_type = FC4_TYPE_UNKNOWN;
+ list[i].fc4_type = 0;
/* Do not attempt GFF_ID if we are not FWI_2 capable */
if (!IS_FWI2_CAPABLE(ha))
@@ -3243,7 +3083,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post new sess\n",
__func__, __LINE__, ea->port_name);
qla24xx_post_newsess_work(vha, &ea->id,
- ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ ea->port_name, NULL, NULL, 0);
}
}
}
@@ -3647,6 +3487,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->last_rscn_gen = fcport->rscn_gen;
found = true;
/*
* If device was not a fabric device before.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9e6b56527b25..caa6b840e459 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1043,7 +1043,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
__func__, __LINE__, (u8 *)&wwn, id.b24);
wwnn = wwn_to_u64(e->node_name);
qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
- (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ (u8 *)&wwnn, NULL, 0);
}
}
@@ -2219,10 +2219,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Check for secure flash support */
if (IS_QLA28XX(ha)) {
- if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) {
- ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
+ if (RD_REG_DWORD(&reg->mailbox12) & BIT_0)
ha->flags.secure_adapter = 1;
- }
+ ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
+ (ha->flags.secure_adapter) ? "Yes" : "No");
}
@@ -2270,6 +2270,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x0078,
"Verifying loaded RISC code...\n");
+ /* If smartsan enabled then require fdmi and rdp enabled */
+ if (ql2xsmartsan) {
+ ql2xfdmienable = 1;
+ ql2xrdpenable = 1;
+ }
+
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
rval = ha->isp_ops->chip_diag(vha);
if (rval)
@@ -3544,53 +3550,75 @@ static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
}
-/*
- * Return Code:
- * QLA_SUCCESS: no action
- * QLA_INTERFACE_ERROR: SFP is not there.
- * QLA_FUNCTION_FAILED: detected New SFP
+/**
+ * qla24xx_detect_sfp()
+ *
+ * @vha: adapter state pointer.
+ *
+ * @return
+ * 0 -- Configure firmware to use short-range settings -- normal
+ * buffer-to-buffer credits.
+ *
+ * 1 -- Configure firmware to use long-range settings -- extra
+ * buffer-to-buffer credits should be allocated with
+ * ha->lr_distance containing distance settings from NVRAM or SFP
+ * (if supported).
*/
int
qla24xx_detect_sfp(scsi_qla_host_t *vha)
{
- int rc = QLA_SUCCESS;
+ int rc, used_nvram;
struct sff_8247_a0 *a;
struct qla_hw_data *ha = vha->hw;
-
- if (!AUTO_DETECT_SFP_SUPPORT(vha))
+ struct nvram_81xx *nv = ha->nvram;
+#define LR_DISTANCE_UNKNOWN 2
+ static const char * const types[] = { "Short", "Long" };
+ static const char * const lengths[] = { "(10km)", "(5km)", "" };
+ u8 ll = 0;
+
+ /* Seed with NVRAM settings. */
+ used_nvram = 0;
+ ha->flags.lr_detected = 0;
+ if (IS_BPM_RANGE_CAPABLE(ha) &&
+ (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
+ used_nvram = 1;
+ ha->flags.lr_detected = 1;
+ ha->lr_distance =
+ (nv->enhanced_features >> LR_DIST_NV_POS)
+ & LR_DIST_NV_MASK;
+ }
+
+ if (!IS_BPM_ENABLED(vha))
goto out;
-
+ /* Determine SR/LR capabilities of SFP/Transceiver. */
rc = qla2x00_read_sfp_dev(vha, NULL, 0);
if (rc)
goto out;
+ used_nvram = 0;
a = (struct sff_8247_a0 *)vha->hw->sfp_data;
qla2xxx_print_sfp_info(vha);
- if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
- /* long range */
- ha->flags.detected_lr_sfp = 1;
+ ha->flags.lr_detected = 0;
+ ll = a->fc_ll_cc7;
+ if (ll & FC_LL_VL || ll & FC_LL_L) {
+ /* Long range, track length. */
+ ha->flags.lr_detected = 1;
if (a->length_km > 5 || a->length_100m > 50)
- ha->long_range_distance = LR_DISTANCE_10K;
+ ha->lr_distance = LR_DISTANCE_10K;
else
- ha->long_range_distance = LR_DISTANCE_5K;
-
- if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
- ql_dbg(ql_dbg_async, vha, 0x507b,
- "Detected Long Range SFP.\n");
- } else {
- /* short range */
- ha->flags.detected_lr_sfp = 0;
- if (ha->flags.using_lr_setting)
- ql_dbg(ql_dbg_async, vha, 0x5084,
- "Detected Short Range SFP.\n");
+ ha->lr_distance = LR_DISTANCE_5K;
}
- if (!vha->flags.init_done)
- rc = QLA_SUCCESS;
out:
- return rc;
+ ql_dbg(ql_dbg_async, vha, 0x507b,
+ "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
+ types[ha->flags.lr_detected],
+ ha->flags.lr_detected ? lengths[ha->lr_distance] :
+ lengths[LR_DISTANCE_UNKNOWN],
+ used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
+ return ha->flags.lr_detected;
}
/**
@@ -3608,6 +3636,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
uint16_t fw_major_version;
+ int done_once = 0;
if (IS_P3P_TYPE(ha)) {
rval = ha->isp_ops->load_risc(vha, &srisc_address);
@@ -3628,6 +3657,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
qla81xx_mpi_sync(vha);
+execute_fw_with_lr:
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
@@ -3649,7 +3679,15 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
- qla24xx_detect_sfp(vha);
+ /* Enable BPM support? */
+ if (!done_once++ && qla24xx_detect_sfp(vha)) {
+ ql_dbg(ql_dbg_init, vha, 0x00ca,
+ "Re-starting firmware -- BPM.\n");
+ /* Best-effort - re-init. */
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->chip_diag(vha);
+ goto execute_fw_with_lr;
+ }
if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) &&
@@ -3708,6 +3746,10 @@ enable_82xx_npiv:
"ISP Firmware failed checksum.\n");
goto failed;
}
+
+ /* Enable PUREX PASSTHRU */
+ if (ql2xrdpenable)
+ qla25xx_set_els_cmds_supported(vha);
} else
goto failed;
@@ -3919,6 +3961,13 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_8;
}
+ if (ql2xrdpenable)
+ ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
+
+ /* Enable Async 8130/8131 events -- transceiver insertion/removal */
+ if (IS_BPM_RANGE_CAPABLE(ha))
+ ha->fw_options[3] |= BIT_10;
+
ql_dbg(ql_dbg_init, vha, 0x00e8,
"%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
__func__, ha->fw_options[1], ha->fw_options[2],
@@ -5060,7 +5109,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (N2N_TOPO(ha)) {
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
/* borrowing */
- u32 *bp, i, sz;
+ u32 *bp, sz;
memset(ha->init_cb, 0, ha->init_cb_size);
sz = min_t(int, sizeof(struct els_plogi_payload),
@@ -5068,13 +5117,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
rval = qla24xx_get_port_login_templ(vha,
ha->init_cb_dma, (void *)ha->init_cb, sz);
if (rval == QLA_SUCCESS) {
+ __be32 *q = &ha->plogi_els_payld.data[0];
+
bp = (uint32_t *)ha->init_cb;
- for (i = 0; i < sz/4 ; i++, bp++)
- *bp = cpu_to_be32(*bp);
+ cpu_to_be32_array(q, bp, sz / 4);
- memcpy(&ha->plogi_els_payld.data,
- (void *)ha->init_cb,
- sizeof(ha->plogi_els_payld.data));
+ memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
} else {
ql_dbg(ql_dbg_init, vha, 0x00d1,
"PLOGI ELS param read fail.\n");
@@ -5097,6 +5145,7 @@ skip_login:
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
+ return QLA_FUNCTION_FAILED;
}
found_devs = 0;
@@ -5541,24 +5590,22 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
vha->device_flags |= SWITCH_FOUND;
+ rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
+ if (rval != QLA_SUCCESS)
+ ql_dbg(ql_dbg_disc, vha, 0x20ff,
+ "Failed to get Fabric Port Name\n");
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
rval = qla2x00_send_change_request(vha, 0x3, 0);
if (rval != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x121,
- "Failed to enable receiving of RSCN requests: 0x%x.\n",
- rval);
+ "Failed to enable receiving of RSCN requests: 0x%x.\n",
+ rval);
}
-
do {
qla2x00_mgmt_svr_login(vha);
- /* FDMI support. */
- if (ql2xfdmienable &&
- test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
- qla2x00_fdmi_register(vha);
-
/* Ensure we are logged into the SNS. */
loop_id = NPH_SNS_LID(ha);
rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
@@ -5570,6 +5617,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return rval;
}
+
+ /* FDMI support. */
+ if (ql2xfdmienable &&
+ test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
+ qla2x00_fdmi_register(vha);
+
if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
if (qla2x00_rft_id(vha)) {
/* EMPTY */
@@ -5812,7 +5865,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
if (ql2xgffidenable &&
(!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
- new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
+ new_fcport->fc4_type != 0))
continue;
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -6656,7 +6709,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
- ha->flags.fw_started = 0;
+ QLA_FW_STOPPED(ha);
ha->flags.fw_init_done = 0;
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
@@ -8663,61 +8716,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
return status;
}
-void
-qla81xx_update_fw_options(scsi_qla_host_t *vha)
-{
- struct qla_hw_data *ha = vha->hw;
-
- /* Hold status IOCBs until ABTS response received. */
- if (ql2xfwholdabts)
- ha->fw_options[3] |= BIT_12;
-
- /* Set Retry FLOGI in case of P2P connection */
- if (ha->operating_mode == P2P) {
- ha->fw_options[2] |= BIT_3;
- ql_dbg(ql_dbg_disc, vha, 0x2103,
- "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
- __func__, ha->fw_options[2]);
- }
-
- /* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio) {
- if (qla_tgt_mode_enabled(vha) ||
- qla_dual_mode_enabled(vha))
- ha->fw_options[2] |= BIT_11;
- else
- ha->fw_options[2] &= ~BIT_11;
- }
-
- if (qla_tgt_mode_enabled(vha) ||
- qla_dual_mode_enabled(vha)) {
- /* FW auto send SCSI status during */
- ha->fw_options[1] |= BIT_8;
- ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
-
- /* FW perform Exchange validation */
- ha->fw_options[2] |= BIT_4;
- } else {
- ha->fw_options[1] &= ~BIT_8;
- ha->fw_options[10] &= 0x00ff;
-
- ha->fw_options[2] &= ~BIT_4;
- }
-
- if (ql2xetsenable) {
- /* Enable ETS Burst. */
- memset(ha->fw_options, 0, sizeof(ha->fw_options));
- ha->fw_options[2] |= BIT_9;
- }
-
- ql_dbg(ql_dbg_init, vha, 0x00e9,
- "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
- __func__, ha->fw_options[1], ha->fw_options[2],
- ha->fw_options[3], vha->host->active_mode);
-
- qla2x00_set_fw_options(vha, ha->fw_options);
-}
-
/*
* qla24xx_get_fcp_prio
* Gets the fcp cmd priority value for the logged in port.
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 47bf60a9490a..182bd68c79ac 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -530,7 +530,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
- mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
+ mrk24->handle = make_handle(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16((uint16_t)lun);
@@ -1655,7 +1655,7 @@ qla24xx_start_scsi(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1843,7 +1843,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Fill-in common area */
cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -1975,7 +1975,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -2178,7 +2178,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Fill-in common area */
cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -2362,6 +2362,8 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+
if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
} else {
@@ -2489,7 +2491,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->entry_type = TSK_MGMT_IOCB_TYPE;
tsk->entry_count = 1;
- tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+ tsk->handle = make_handle(req->id, tsk->handle);
tsk->nport_handle = cpu_to_le16(fcport->loop_id);
tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->control_flags = cpu_to_le32(flags);
@@ -2684,9 +2686,9 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_dsd_count = 0;
els_iocb->opcode = elsio->u.els_logo.els_cmd;
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
/* For SID the byte order is different than DID */
els_iocb->s_id[1] = vha->d_id.b.al_pa;
els_iocb->s_id[2] = vha->d_id.b.area;
@@ -2939,7 +2941,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
- init_completion(&elsio->u.els_plogi.comp);
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
@@ -2949,7 +2950,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
if (!elsio->u.els_plogi.els_plogi_pyld) {
@@ -2958,7 +2959,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
resp_ptr = elsio->u.els_plogi.els_resp_pyld =
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
&elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
if (!elsio->u.els_plogi.els_resp_pyld) {
@@ -2982,6 +2983,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
sizeof(*elsio->u.els_plogi.els_plogi_pyld));
+ init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -3030,9 +3032,9 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->type == SRB_ELS_CMD_RPT ?
bsg_request->rqst_data.r_els.els_code :
bsg_request->rqst_data.h_els.command_code;
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
els_iocb->control_flags = 0;
els_iocb->rx_byte_count =
cpu_to_le32(bsg_job->reply_payload.payload_len);
@@ -3358,7 +3360,7 @@ sufficient_dsds:
}
cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -3429,7 +3431,7 @@ sufficient_dsds:
goto queuing_error;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
@@ -3534,7 +3536,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle));
if (sp->fcport) {
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3542,7 +3544,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
}
abt_iocb->handle_to_abort =
- cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ cpu_to_le32(make_handle(aio->u.abt.req_que_no,
aio->u.abt.cmd_hndl));
abt_iocb->vp_index = vha->vp_idx;
abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
@@ -3905,7 +3907,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
}
cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e40705d38cea..8a78d395bbc8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -31,6 +31,143 @@ const char *const port_state_str[] = {
"ONLINE"
};
+static void qla24xx_purex_iocb(scsi_qla_host_t *vha, void *pkt,
+ void (*process_item)(struct scsi_qla_host *vha, void *pkt))
+{
+ struct purex_list *list = &vha->purex_list;
+ struct purex_item *item;
+ ulong flags;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ ql_log(ql_log_warn, vha, 0x5092,
+ ">> Failed allocate purex list item.\n");
+ return;
+ }
+
+ item->vha = vha;
+ item->process_item = process_item;
+ memcpy(&item->iocb, pkt, sizeof(item->iocb));
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_add_tail(&item->list, &list->head);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
+}
+
+static void
+qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
+{
+ struct abts_entry_24xx *abts = pkt;
+ struct qla_hw_data *ha = vha->hw;
+ struct els_entry_24xx *rsp_els;
+ struct abts_entry_24xx *abts_rsp;
+ dma_addr_t dma;
+ uint32_t fctl;
+ int rval;
+
+ ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
+
+ ql_log(ql_log_warn, vha, 0x0287,
+ "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
+ abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
+ abts->seq_id, abts->seq_cnt);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
+ "-------- ABTS RCV -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
+ (uint8_t *)abts, sizeof(*abts));
+
+ rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
+ GFP_KERNEL);
+ if (!rsp_els) {
+ ql_log(ql_log_warn, vha, 0x0287,
+ "Failed allocate dma buffer ABTS/ELS RSP.\n");
+ return;
+ }
+
+ /* terminate exchange */
+ rsp_els->entry_type = ELS_IOCB_TYPE;
+ rsp_els->entry_count = 1;
+ rsp_els->nport_handle = ~0;
+ rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
+ rsp_els->control_flags = EPD_RX_XCHG;
+ ql_dbg(ql_dbg_init, vha, 0x0283,
+ "Sending ELS Response to terminate exchange %#x...\n",
+ abts->rx_xch_addr_to_abort);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
+ (uint8_t *)rsp_els, sizeof(*rsp_els));
+ rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0288,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (rsp_els->comp_status) {
+ ql_log(ql_log_warn, vha, 0x0289,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, rsp_els->comp_status,
+ rsp_els->error_subcode_1, rsp_els->error_subcode_2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x028a,
+ "%s: abort exchange done.\n", __func__);
+ }
+
+ /* send ABTS response */
+ abts_rsp = (void *)rsp_els;
+ memset(abts_rsp, 0, sizeof(*abts_rsp));
+ abts_rsp->entry_type = ABTS_RSP_TYPE;
+ abts_rsp->entry_count = 1;
+ abts_rsp->nport_handle = abts->nport_handle;
+ abts_rsp->vp_idx = abts->vp_idx;
+ abts_rsp->sof_type = abts->sof_type & 0xf0;
+ abts_rsp->rx_xch_addr = abts->rx_xch_addr;
+ abts_rsp->d_id[0] = abts->s_id[0];
+ abts_rsp->d_id[1] = abts->s_id[1];
+ abts_rsp->d_id[2] = abts->s_id[2];
+ abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
+ abts_rsp->s_id[0] = abts->d_id[0];
+ abts_rsp->s_id[1] = abts->d_id[1];
+ abts_rsp->s_id[2] = abts->d_id[2];
+ abts_rsp->cs_ctl = abts->cs_ctl;
+ /* include flipping bit23 in fctl */
+ fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
+ FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
+ abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
+ abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
+ abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
+ abts_rsp->type = FC_TYPE_BLD;
+ abts_rsp->rx_id = abts->rx_id;
+ abts_rsp->ox_id = abts->ox_id;
+ abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
+ abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
+ abts_rsp->payload.ba_acc.high_seq_cnt = ~0;
+ abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
+ ql_dbg(ql_dbg_init, vha, 0x028b,
+ "Sending BA ACC response to ABTS %#x...\n",
+ abts->rx_xch_addr_to_abort);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
+ (uint8_t *)abts_rsp, sizeof(*abts_rsp));
+ rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x028c,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (abts_rsp->comp_status) {
+ ql_log(ql_log_warn, vha, 0x028d,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, abts_rsp->comp_status,
+ abts_rsp->payload.error.subcode1,
+ abts_rsp->payload.error.subcode2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x028ea,
+ "%s: done.\n", __func__);
+ }
+
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
+}
+
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
* @irq: interrupt number
@@ -716,12 +853,24 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) ?
- RD_REG_WORD(&reg24->mailbox7) : 0;
- ql_log(ql_log_warn, vha, 0x5003,
- "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
- "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
+ mbx = 0;
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ u16 m[4];
+
+ m[0] = RD_REG_WORD(&reg24->mailbox4);
+ m[1] = RD_REG_WORD(&reg24->mailbox5);
+ m[2] = RD_REG_WORD(&reg24->mailbox6);
+ mbx = m[3] = RD_REG_WORD(&reg24->mailbox7);
+
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
+ mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
+ } else
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
+ mb[1], mb[2], mb[3]);
+
ha->fw_dump_mpi =
(IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
RD_REG_WORD(&reg24->mailbox7) & BIT_8;
@@ -813,13 +962,15 @@ skip_rio:
"LOOP UP detected (%s Gbps).\n",
qla2x00_get_link_speed_str(ha, ha->link_data_rate));
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mb[2] & BIT_0)
+ ql_log(ql_log_info, vha, 0x11a0,
+ "FEC=enabled (link up).\n");
+ }
+
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
- if (AUTO_DETECT_SFP_SUPPORT(vha)) {
- set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
@@ -1254,6 +1405,7 @@ global_port_update:
ql_dbg(ql_dbg_async, vha, 0x5052,
"D-Port Diagnostics: %04x %04x %04x %04x\n",
mb[0], mb[1], mb[2], mb[3]);
+ memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
static char *results[] = {
"start", "done(pass)", "done(error)", "undefined" };
@@ -1291,6 +1443,11 @@ global_port_update:
case MBA_TRANS_INSERT:
ql_dbg(ql_dbg_async, vha, 0x5091,
"Transceiver Insertion: %04x\n", mb[1]);
+ set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
+ break;
+
+ case MBA_TRANS_REMOVE:
+ ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
break;
default:
@@ -1754,11 +1911,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
- ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
- "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
- "iop0=%x.\n", type, fcport->port_name, sp->handle,
- fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ ql_dbg(ql_dbg_async, sp->vha, 0x5036,
+ "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
le32_to_cpu(logio->io_parameter[0]));
vha->hw->exch_starvation = 0;
@@ -1837,11 +1992,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
- "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n", type, fcport->port_name,
- sp->handle, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ ql_dbg(ql_dbg_async, sp->vha, 0x5037,
+ "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
le32_to_cpu(logio->io_parameter[1]));
@@ -1910,6 +2063,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct nvmefc_fcp_req *fd;
uint16_t ret = QLA_SUCCESS;
uint16_t comp_status = le16_to_cpu(sts->comp_status);
+ int logit = 0;
iocb = &sp->u.iocb_cmd;
fcport = sp->fcport;
@@ -1920,6 +2074,12 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (unlikely(iocb->u.nvme.aen_op))
atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
+ if (unlikely(comp_status != CS_COMPLETE))
+ logit = 1;
+
+ fd->transferred_length = fd->payload_length -
+ le32_to_cpu(sts->residual_len);
+
/*
* State flags: Bit 6 and 0.
* If 0 is set, we don't care about 6.
@@ -1930,8 +2090,20 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
*/
if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
iocb->u.nvme.rsp_pyld_len = 0;
- } else if ((state_flags & SF_FCP_RSP_DMA)) {
+ } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
+ (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
+ /* Response already DMA'd to fd->rspaddr. */
iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ } else if ((state_flags & SF_FCP_RSP_DMA)) {
+ /*
+ * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
+ * as an error.
+ */
+ iocb->u.nvme.rsp_pyld_len = 0;
+ fd->transferred_length = 0;
+ ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
+ "Unexpected values in NVMe_RSP IU.\n");
+ logit = 1;
} else if (state_flags & SF_NVME_ERSP) {
uint32_t *inbuf, *outbuf;
uint16_t iter;
@@ -1954,16 +2126,28 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iter = iocb->u.nvme.rsp_pyld_len >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
- } else { /* unhandled case */
- ql_log(ql_log_warn, fcport->vha, 0x503a,
- "NVME-%s error. Unhandled state_flags of %x\n",
- sp->name, state_flags);
}
- fd->transferred_length = fd->payload_length -
- le32_to_cpu(sts->residual_len);
+ if (state_flags & SF_NVME_ERSP) {
+ struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
+ u32 tgt_xfer_len;
- if (unlikely(comp_status != CS_COMPLETE))
+ tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
+ if (fd->transferred_length != tgt_xfer_len) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3079,
+ "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
+ tgt_xfer_len, fd->transferred_length);
+ logit = 1;
+ } else if (comp_status == CS_DATA_UNDERRUN) {
+ /*
+ * Do not log if this is just an underflow and there
+ * is no data loss.
+ */
+ logit = 0;
+ }
+ }
+
+ if (unlikely(logit))
ql_log(ql_log_warn, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
@@ -2516,11 +2700,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
- if (sp->abort)
- sp->aborted = 1;
- else
- sp->completed = 1;
-
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
ql_dbg(ql_dbg_io, vha, 0x3015,
@@ -3083,6 +3262,11 @@ process_err:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
+ if (qla_ini_mode_enabled(vha)) {
+ qla24xx_purex_iocb(vha, pkt,
+ qla24xx_process_abts);
+ break;
+ }
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) {
/* ensure that the ATIO queue is empty */
@@ -3127,6 +3311,19 @@ process_err:
qla_ctrlvp_completed(vha, rsp->req,
(struct vp_ctrl_entry_24xx *)pkt);
break;
+ case PUREX_IOCB_TYPE:
+ {
+ struct purex_entry_24xx *purex = (void *)pkt;
+
+ if (purex->els_frame_payload[3] != ELS_COMMAND_RDP) {
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "Discarding ELS Request opcode %#x...\n",
+ purex->els_frame_payload[3]);
+ break;
+ }
+ qla24xx_purex_iocb(vha, pkt, qla24xx_process_purex_rdp);
+ break;
+ }
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -3442,6 +3639,25 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
+
+ qpair = dev_id;
+ if (!qpair) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = qpair->hw;
+
+ queue_work(ha->wq, &qpair->q_work);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
struct device_reg_24xx __iomem *reg;
unsigned long flags;
@@ -3453,13 +3669,10 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
}
ha = qpair->hw;
- /* Clear the interrupt, if enabled, for this response queue */
- if (unlikely(!ha->flags.disable_msix_handshake)) {
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
queue_work(ha->wq, &qpair->q_work);
@@ -3478,6 +3691,7 @@ static const struct qla_init_msix_entry msix_entries[] = {
{ "rsp_q", qla24xx_msix_rsp_q },
{ "atio_q", qla83xx_msix_atio_q },
{ "qpair_multiq", qla2xxx_msix_rsp_q },
+ { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9e09964f5c0e..d6c991bd1bde 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -117,10 +117,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
- if (ha->pdev->error_state > pci_channel_io_frozen) {
+ if (ha->pdev->error_state == pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x1001,
- "error_state is greater than pci_channel_io_frozen, "
- "exiting.\n");
+ "PCI channel failed permanently, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -643,30 +642,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
return rval;
}
-#define EXTENDED_BB_CREDITS BIT_0
#define NVME_ENABLE_FLAG BIT_3
-static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
-{
- uint16_t mb4 = BIT_0;
-
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
- mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
-
- return mb4;
-}
-
-static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
-{
- uint16_t mb4 = BIT_0;
-
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- struct nvram_81xx *nv = ha->nvram;
-
- mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
- }
-
- return mb4;
-}
/*
* qla2x00_execute_fw
@@ -690,10 +666,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
struct qla_hw_data *ha = vha->hw;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ u8 semaphore = 0;
+#define EXE_FW_FORCE_SEMAPHORE BIT_7
+ u8 retry = 3;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
"Entered %s.\n", __func__);
+again:
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
@@ -703,25 +683,13 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[3] = 0;
mcp->mb[4] = 0;
mcp->mb[11] = 0;
- ha->flags.using_lr_setting = 0;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
- IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ql2xautodetectsfp) {
- if (ha->flags.detected_lr_sfp) {
- mcp->mb[4] |=
- qla25xx_set_sfp_lr_dist(ha);
- ha->flags.using_lr_setting = 1;
- }
- } else {
- struct nvram_81xx *nv = ha->nvram;
- /* set LR distance if specified in nvram */
- if (nv->enhanced_features &
- NEF_LR_DIST_ENABLE) {
- mcp->mb[4] |=
- qla25xx_set_nvr_lr_dist(ha);
- ha->flags.using_lr_setting = 1;
- }
- }
+
+ /* Enable BPM? */
+ if (ha->flags.lr_detected) {
+ mcp->mb[4] = BIT_0;
+ if (IS_BPM_RANGE_CAPABLE(ha))
+ mcp->mb[4] |=
+ ha->lr_distance << LR_DIST_FW_POS;
}
if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
@@ -747,6 +715,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
if (ha->flags.exchoffld_enabled)
mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
+ if (semaphore)
+ mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
+
mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
} else {
@@ -763,6 +734,15 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
+ if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
+ mcp->mb[1] == 0x27 && retry) {
+ semaphore = 1;
+ retry--;
+ ql_dbg(ql_dbg_async, vha, 0x1026,
+ "Exe FW: force semaphore.\n");
+ goto again;
+ }
+
ql_dbg(ql_dbg_mbx, vha, 0x1026,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
return rval;
@@ -1137,11 +1117,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
if (IS_QLA28XX(ha)) {
- if (mcp->mb[16] & BIT_10) {
- ql_log(ql_log_info, vha, 0xffff,
- "FW support secure flash updates\n");
+ if (mcp->mb[16] & BIT_10)
ha->flags.secure_fw = 1;
- }
+
+ ql_log(ql_log_info, vha, 0xffff,
+ "Secure Flash Update in FW: %s\n",
+ (ha->flags.secure_fw) ? "Supported" :
+ "Not Supported");
}
}
@@ -1405,17 +1387,20 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ if (!vha->hw->flags.fw_started)
+ return QLA_INVALID_COMMAND;
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
"Entered %s.\n", __func__);
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
- mcp->mb[2] = MSW(phys_addr);
- mcp->mb[3] = LSW(phys_addr);
+ mcp->mb[2] = MSW(LSD(phys_addr));
+ mcp->mb[3] = LSW(LSD(phys_addr));
mcp->mb[6] = MSW(MSD(phys_addr));
mcp->mb[7] = LSW(MSD(phys_addr));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_2|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = tov;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1424,13 +1409,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
} else {
- sts_entry_t *sts_entry = (sts_entry_t *) buffer;
+ sts_entry_t *sts_entry = buffer;
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
- "Done %s.\n", __func__);
+ "Done %s (status=%x).\n", __func__,
+ sts_entry->entry_status);
}
return rval;
@@ -1475,7 +1461,7 @@ qla2x00_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
"Entered %s.\n", __func__);
- if (vha->flags.qpairs_available && sp->qpair)
+ if (sp->qpair)
req = sp->qpair->req;
else
req = vha->req;
@@ -2045,6 +2031,57 @@ gpd_error_out:
return rval;
}
+int
+qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
+ struct port_database_24xx *pdb)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ dma_addr_t pdb_dma;
+ int rval;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
+ "Entered %s.\n", __func__);
+
+ memset(pdb, 0, sizeof(*pdb));
+
+ pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
+ sizeof(*pdb), DMA_FROM_DEVICE);
+ if (!pdb_dma) {
+ ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ mcp->mb[0] = MBC_GET_PORT_DATABASE;
+ mcp->mb[1] = nport_handle;
+ mcp->mb[2] = MSW(LSD(pdb_dma));
+ mcp->mb[3] = LSW(LSD(pdb_dma));
+ mcp->mb[6] = MSW(MSD(pdb_dma));
+ mcp->mb[7] = LSW(MSD(pdb_dma));
+ mcp->mb[9] = 0;
+ mcp->mb[10] = 0;
+ mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = sizeof(*pdb);
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = vha->hw->login_timeout * 2;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x111a,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
+ sizeof(*pdb), DMA_FROM_DEVICE);
+
+ return rval;
+}
+
/*
* qla2x00_get_firmware_state
* Get adapter firmware state.
@@ -2384,7 +2421,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
- lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->handle = make_handle(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
if (opt & BIT_0)
@@ -2654,7 +2691,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
- lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->handle = make_handle(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
@@ -3060,18 +3097,19 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter, dwords;
+ uint32_t *iter = (void *)stats;
+ ushort dwords = sizeof(*stats)/sizeof(*iter);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
- mc.mb[2] = MSW(stats_dma);
- mc.mb[3] = LSW(stats_dma);
+ mc.mb[2] = MSW(LSD(stats_dma));
+ mc.mb[3] = LSW(LSD(stats_dma));
mc.mb[6] = MSW(MSD(stats_dma));
mc.mb[7] = LSW(MSD(stats_dma));
- mc.mb[8] = sizeof(struct link_statistics) / 4;
+ mc.mb[8] = dwords;
mc.mb[9] = cpu_to_le16(vha->vp_idx);
mc.mb[10] = cpu_to_le16(options);
@@ -3086,8 +3124,6 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
"Done %s.\n", __func__);
/* Re-endianize - firmware data is le32. */
- dwords = sizeof(struct link_statistics) / 4;
- iter = &stats->link_fail_cnt;
for ( ; dwords--; iter++)
le32_to_cpus(iter);
}
@@ -3117,7 +3153,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
- if (vha->flags.qpairs_available && sp->qpair)
+ if (sp->qpair)
req = sp->qpair->req;
else
return QLA_FUNCTION_FAILED;
@@ -3145,9 +3181,9 @@ qla24xx_abort_command(srb_t *sp)
abt->entry_type = ABORT_IOCB_TYPE;
abt->entry_count = 1;
- abt->handle = MAKE_HANDLE(req->id, abt->handle);
+ abt->handle = make_handle(req->id, abt->handle);
abt->nport_handle = cpu_to_le16(fcport->loop_id);
- abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
+ abt->handle_to_abort = make_handle(req->id, handle);
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
@@ -3224,7 +3260,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk->p.tsk.entry_count = 1;
- tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
+ tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -3888,11 +3924,29 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_SCAN;
fcport->n2n_flag = 0;
}
+ id.b24 = 0;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ vha->d_id.b24 = 0;
+ vha->d_id.b.al_pa = 1;
+ ha->flags.n2n_bigger = 1;
+
+ id.b.al_pa = 2;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: assign local id %x remote id %x\n",
+ vha->d_id.b24, id.b24);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ ha->flags.n2n_bigger = 0;
+ }
fcport = qla2x00_find_fcport_by_wwpn(vha,
rptid_entry->u.f1.port_name, 1);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
if (fcport) {
fcport->plogi_nack_done_deadline = jiffies + HZ;
fcport->dm_login_expire = jiffies + 2*HZ;
@@ -3903,6 +3957,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vha->flags.nvme_enabled)
fcport->fc4_type |= FS_FC4TYPE_NVME;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(fcport->port_name)) {
+ fcport->d_id = id;
+ }
+
switch (fcport->disc_state) {
case DSC_DELETED:
set_bit(RELOGIN_NEEDED,
@@ -3915,25 +3974,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
break;
}
} else {
- id.b24 = 0;
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(rptid_entry->u.f1.port_name)) {
- vha->d_id.b24 = 0;
- vha->d_id.b.al_pa = 1;
- ha->flags.n2n_bigger = 1;
- ha->flags.n2n_ae = 0;
-
- id.b.al_pa = 2;
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: assign local id %x remote id %x\n",
- vha->d_id.b24, id.b24);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Remote login - Waiting for WWPN %8phC.\n",
- rptid_entry->u.f1.port_name);
- ha->flags.n2n_bigger = 0;
- ha->flags.n2n_ae = 1;
- }
qla24xx_post_newsess_work(vha, &id,
rptid_entry->u.f1.port_name,
rptid_entry->u.f1.node_name,
@@ -4827,6 +4867,101 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
return rval;
}
+int
+qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint8_t *els_cmd_map;
+ dma_addr_t els_cmd_map_dma;
+ uint cmd_opcode = ELS_COMMAND_RDP;
+ uint index = cmd_opcode / 8;
+ uint bit = cmd_opcode % 8;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
+ return QLA_SUCCESS;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
+ "Entered %s.\n", __func__);
+
+ els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
+ &els_cmd_map_dma, GFP_KERNEL);
+ if (!els_cmd_map) {
+ ql_log(ql_log_warn, vha, 0x7101,
+ "Failed to allocate RDP els command param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ els_cmd_map[index] |= 1 << bit;
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
+ mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
+ mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
+ mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
+ mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->buf_size = ELS_CMD_MAP_SIZE;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x118d,
+ "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
+ "Done %s.\n", __func__);
+ }
+
+ dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ els_cmd_map, els_cmd_map_dma);
+
+ return rval;
+}
+
+int
+qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc,
+ dma_addr_t bbc_dma)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_BUFFER_CREDITS << 8;
+ mcp->mb[2] = MSW(LSD(bbc_dma));
+ mcp->mb[3] = LSW(LSD(bbc_dma));
+ mcp->mb[6] = MSW(MSD(bbc_dma));
+ mcp->mb[7] = LSW(MSD(bbc_dma));
+ mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter);
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = sizeof(*bbc);
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x118f,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
@@ -4880,8 +5015,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mcp->mb[0] = MBC_READ_SFP;
mcp->mb[1] = dev;
- mcp->mb[2] = MSW(sfp_dma);
- mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[2] = MSW(LSD(sfp_dma));
+ mcp->mb[3] = LSW(LSD(sfp_dma));
mcp->mb[6] = MSW(MSD(sfp_dma));
mcp->mb[7] = LSW(MSD(sfp_dma));
mcp->mb[8] = len;
@@ -4934,8 +5069,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mcp->mb[0] = MBC_WRITE_SFP;
mcp->mb[1] = dev;
- mcp->mb[2] = MSW(sfp_dma);
- mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[2] = MSW(LSD(sfp_dma));
+ mcp->mb[3] = LSW(LSD(sfp_dma));
mcp->mb[6] = MSW(MSD(sfp_dma));
mcp->mb[7] = LSW(MSD(sfp_dma));
mcp->mb[8] = len;
@@ -5170,10 +5305,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->in_mb |= MBX_1;
- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -5407,6 +5543,15 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1107,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
+ if (mcp->mb[1] != 0x7)
+ ha->link_data_rate = mcp->mb[1];
+
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mcp->mb[4] & BIT_0)
+ ql_log(ql_log_info, vha, 0x11a2,
+ "FEC=enabled (data rate).\n");
+ }
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
"Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
@@ -6688,3 +6833,60 @@ int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
return rval;
}
+
+int
+ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval;
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
+ __func__, options);
+
+ mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
+ mcp->mb[1] = options;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ if (options & BIT_0) {
+ if (options & BIT_1) {
+ mcp->mb[2] = led[2];
+ mcp->out_mb |= MBX_2;
+ }
+ if (options & BIT_2) {
+ mcp->mb[3] = led[0];
+ mcp->out_mb |= MBX_3;
+ }
+ if (options & BIT_3) {
+ mcp->mb[4] = led[1];
+ mcp->out_mb |= MBX_4;
+ }
+ } else {
+ mcp->in_mb |= MBX_4|MBX_3|MBX_2;
+ }
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval) {
+ ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
+ __func__, rval, mcp->mb[0], mcp->mb[1]);
+ return rval;
+ }
+
+ if (options & BIT_0) {
+ ha->beacon_blink_led = 0;
+ ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
+ } else {
+ led[2] = mcp->mb[2];
+ led[0] = mcp->mb[3];
+ led[1] = mcp->mb[4];
+ ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
+ __func__, led[0], led[1], led[2]);
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8ae639d089d1..d82e92da529a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -361,6 +361,13 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
+ if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
+ if (atomic_read(&vha->loop_state) == LOOP_READY) {
+ qla24xx_process_purex_list(&vha->purex_list);
+ clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
+ }
+ }
+
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, vha, 0x4016,
"FCPort update scheduled.\n");
@@ -509,6 +516,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
vha->dpc_flags = 0L;
+ ha->dpc_active = 0;
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
/*
* To fix the issue of processing a parent's RSCN for the vport before
@@ -886,7 +896,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->rsp_q_out);
ret = qla25xx_request_irq(ha, qpair, qpair->msix,
- QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
+ ha->flags.disable_msix_handshake ?
+ QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
if (ret)
goto que_failed;
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index cad1fc2a1b28..df99911b8bb9 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -53,10 +53,9 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- if (ha->pdev->error_state > pci_channel_io_frozen) {
+ if (ha->pdev->error_state == pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x115c,
- "error_state is greater than pci_channel_io_frozen, "
- "exiting.\n");
+ "PCI channel failed permanently, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -3136,7 +3135,7 @@ qlafx00_start_scsi(srb_t *sp)
memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
- lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+ lcmd_pkt.handle = make_handle(req->id, sp->handle);
lcmd_pkt.reserved_0 = 0;
lcmd_pkt.port_path_ctrl = 0;
lcmd_pkt.reserved_1 = 0;
@@ -3206,7 +3205,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
- tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ tm_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3232,9 +3231,9 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1;
- abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
abt_iocb.abort_handle =
- cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+ cpu_to_le32(make_handle(req->id, fxio->u.abt.cmd_hndl));
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index bfcd02fdf2b8..4886d247df6f 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -413,7 +413,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -610,7 +610,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
- .module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7a94e1171c72..1d9a4866f9a7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -113,7 +113,8 @@ module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
- "0 - no FDMI. Default is 1 - perform FDMI.");
+ "0 - no FDMI registrations. "
+ "1 - provide FDMI registrations (default).");
#define MAX_Q_DEPTH 64
static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -122,11 +123,7 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 64.");
-#if (IS_ENABLED(CONFIG_NVME_FC))
-int ql2xenabledif;
-#else
int ql2xenabledif = 2;
-#endif
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
@@ -306,6 +303,22 @@ MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
"0 (Default). Based on check.\n"
"1 Force using internal buffers\n");
+int ql2xsmartsan;
+module_param(ql2xsmartsan, int, 0444);
+module_param_named(smartsan, ql2xsmartsan, int, 0444);
+MODULE_PARM_DESC(ql2xsmartsan,
+ "Send SmartSAN Management Attributes for FDMI Registration."
+ " Default is 0 - No SmartSAN registration,"
+ " 1 - Register SmartSAN Management Attributes.");
+
+int ql2xrdpenable;
+module_param(ql2xrdpenable, int, 0444);
+module_param_named(rdpenable, ql2xrdpenable, int, 0444);
+MODULE_PARM_DESC(ql2xrdpenable,
+ "Enables RDP responses. "
+ "0 - no RDP responses (default). "
+ "1 - provide RDP responses.");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
@@ -583,6 +596,9 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
case 3:
speed_str = "8.0GT/s";
break;
+ case 4:
+ speed_str = "16.0GT/s";
+ break;
default:
speed_str = "<unknown>";
break;
@@ -1253,17 +1269,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return SUCCESS;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (sp->completed) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return SUCCESS;
- }
-
- if (sp->abort || sp->aborted) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return FAILED;
- }
-
- sp->abort = 1;
sp->comp = &comp;
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -1688,6 +1693,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
unsigned long *flags)
__releases(qp->qp_lock_ptr)
@@ -1696,10 +1705,13 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
DECLARE_COMPLETION_ONSTACK(comp);
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
int rval;
bool ret_cmd;
uint32_t ratov_j;
+ lockdep_assert_held(qp->qp_lock_ptr);
+
if (qla2x00_chip_is_down(vha)) {
sp->done(sp, res);
return;
@@ -1715,7 +1727,6 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
sp->comp = &comp;
- sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
rval = ha->isp_ops->abort_command(sp);
@@ -1739,13 +1750,17 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- if (ret_cmd && (!sp->completed || !sp->aborted))
+ if (ret_cmd && blk_mq_request_started(cmd->request))
sp->done(sp, res);
} else {
sp->done(sp, res);
}
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
@@ -1792,6 +1807,10 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
@@ -2285,7 +2304,7 @@ static struct isp_operations qla81xx_isp_ops = {
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
- .update_fw_options = qla81xx_update_fw_options,
+ .update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
@@ -2402,7 +2421,7 @@ static struct isp_operations qla83xx_isp_ops = {
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
- .update_fw_options = qla81xx_update_fw_options,
+ .update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
@@ -3439,13 +3458,6 @@ skip_dpc:
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return -ENODEV;
- if (ha->flags.detected_lr_sfp) {
- ql_log(ql_log_info, base_vha, 0xffff,
- "Reset chip to pick up LR SFP setting\n");
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- qla2xxx_wake_dpc(base_vha);
- }
-
return 0;
probe_failed:
@@ -3720,6 +3732,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) {
if (ha->flags.fw_started)
@@ -3738,15 +3757,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_wait_for_sess_deletion(base_vha);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla_nvme_delete(base_vha);
dma_free_coherent(&ha->pdev->dev,
@@ -3806,6 +3816,20 @@ qla2x00_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static inline void
+qla24xx_free_purex_list(struct purex_list *list)
+{
+ struct list_head *item, *next;
+ ulong flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_for_each_safe(item, next, &list->head) {
+ list_del(item);
+ kfree(list_entry(item, struct purex_item, list));
+ }
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
static void
qla2x00_free_device(scsi_qla_host_t *vha)
{
@@ -3838,6 +3862,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
}
+ qla24xx_free_purex_list(&vha->purex_list);
+
qla2x00_mem_free(ha);
qla82xx_md_free(vha);
@@ -3907,19 +3933,6 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
-/*
- * qla2x00_mark_all_devices_lost
- * Updates fcport state when device goes offline.
- *
- * Input:
- * ha = adapter block pointer.
- * fcport = port structure pointer.
- *
- * Return:
- * None.
- *
- * Context:
- */
void
qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
{
@@ -3931,16 +3944,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
qlt_schedule_sess_for_deletion(fcport);
-
- if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
- continue;
-
- /*
- * No point in marking the device as lost, if the device is
- * already DEAD.
- */
- if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
- continue;
}
}
@@ -4811,6 +4814,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->gpnid_list);
INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
+ INIT_LIST_HEAD(&vha->purex_list.head);
+ spin_lock_init(&vha->purex_list.lock);
+
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
@@ -4856,6 +4862,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
struct qla_work_evt *e;
uint8_t bail;
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return NULL;
+
QLA_VHA_MARK_BUSY(vha, bail);
if (bail)
return NULL;
@@ -5168,9 +5177,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->n2n_flag = 1;
}
fcport->fw_login_state = 0;
- /*
- * wait link init done before sending login
- */
+
+ schedule_delayed_work(&vha->scan.scan_work, 5);
} else {
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -5731,6 +5739,583 @@ retry_lock:
return;
}
+static bool
+qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
+ struct purex_entry_24xx *purex)
+{
+ char fwstr[16];
+ u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
+ struct port_database_24xx *pdb;
+
+ /* Domain Controller is always logged-out. */
+ /* if RDP request is not from Domain Controller: */
+ if (sid != 0xfffc01)
+ return false;
+
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
+
+ pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
+ if (!pdb) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Failed allocate pdb\n", __func__);
+ } else if (qla24xx_get_port_database(vha, purex->nport_handle, pdb)) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Failed get pdb sid=%x\n", __func__, sid);
+ } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
+ pdb->current_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Port not logged in sid=%#x\n", __func__, sid);
+ } else {
+ /* RDP request is from logged in port */
+ kfree(pdb);
+ return false;
+ }
+ kfree(pdb);
+
+ vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
+ fwstr[strcspn(fwstr, " ")] = 0;
+ /* if FW version allows RDP response length upto 2048 bytes: */
+ if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
+ return false;
+
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
+
+ /* RDP response length is to be reduced to maximum 256 bytes */
+ return true;
+}
+
+static uint
+qla25xx_rdp_port_speed_capability(struct qla_hw_data *ha)
+{
+ if (IS_CNA_CAPABLE(ha))
+ return RDP_PORT_SPEED_10GB;
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ unsigned int speeds = 0;
+
+ if (ha->max_supported_speed == 2) {
+ if (ha->min_supported_speed <= 6)
+ speeds |= RDP_PORT_SPEED_64GB;
+ }
+
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1) {
+ if (ha->min_supported_speed <= 5)
+ speeds |= RDP_PORT_SPEED_32GB;
+ }
+
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 4)
+ speeds |= RDP_PORT_SPEED_16GB;
+ }
+
+ if (ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 3)
+ speeds |= RDP_PORT_SPEED_8GB;
+ }
+
+ if (ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 2)
+ speeds |= RDP_PORT_SPEED_4GB;
+ }
+
+ return speeds;
+ }
+
+ if (IS_QLA2031(ha))
+ return RDP_PORT_SPEED_16GB|RDP_PORT_SPEED_8GB|
+ RDP_PORT_SPEED_4GB;
+
+ if (IS_QLA25XX(ha))
+ return RDP_PORT_SPEED_8GB|RDP_PORT_SPEED_4GB|
+ RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
+
+ if (IS_QLA24XX_TYPE(ha))
+ return RDP_PORT_SPEED_4GB|RDP_PORT_SPEED_2GB|
+ RDP_PORT_SPEED_1GB;
+
+ if (IS_QLA23XX(ha))
+ return RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
+
+ return RDP_PORT_SPEED_1GB;
+}
+
+static uint
+qla25xx_rdp_port_speed_currently(struct qla_hw_data *ha)
+{
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ return RDP_PORT_SPEED_1GB;
+
+ case PORT_SPEED_2GB:
+ return RDP_PORT_SPEED_2GB;
+
+ case PORT_SPEED_4GB:
+ return RDP_PORT_SPEED_4GB;
+
+ case PORT_SPEED_8GB:
+ return RDP_PORT_SPEED_8GB;
+
+ case PORT_SPEED_10GB:
+ return RDP_PORT_SPEED_10GB;
+
+ case PORT_SPEED_16GB:
+ return RDP_PORT_SPEED_16GB;
+
+ case PORT_SPEED_32GB:
+ return RDP_PORT_SPEED_32GB;
+
+ case PORT_SPEED_64GB:
+ return RDP_PORT_SPEED_64GB;
+
+ default:
+ return RDP_PORT_SPEED_UNKNOWN;
+ }
+}
+
+/*
+ * Function Name: qla24xx_process_purex_iocb
+ *
+ * Description:
+ * Prepare a RDP response and send to Fabric switch
+ *
+ * PARAMETERS:
+ * vha: SCSI qla host
+ * purex: RDP request received by HBA
+ */
+void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct purex_entry_24xx *purex = pkt;
+ dma_addr_t rsp_els_dma;
+ dma_addr_t rsp_payload_dma;
+ dma_addr_t stat_dma;
+ dma_addr_t bbc_dma;
+ dma_addr_t sfp_dma;
+ struct els_entry_24xx *rsp_els = NULL;
+ struct rdp_rsp_payload *rsp_payload = NULL;
+ struct link_statistics *stat = NULL;
+ struct buffer_credit_24xx *bbc = NULL;
+ uint8_t *sfp = NULL;
+ uint16_t sfp_flags = 0;
+ uint rsp_payload_length = sizeof(*rsp_payload);
+ int rval;
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
+ "%s: Enter\n", __func__);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
+ "-------- ELS REQ -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
+ (void *)purex, sizeof(*purex));
+
+ if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
+ rsp_payload_length =
+ offsetof(typeof(*rsp_payload), optical_elmt_desc);
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "Reducing RSP payload length to %u bytes...\n",
+ rsp_payload_length);
+ }
+
+ rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
+ &rsp_els_dma, GFP_KERNEL);
+ if (!rsp_els) {
+ ql_log(ql_log_warn, vha, 0x0183,
+ "Failed allocate dma buffer ELS RSP.\n");
+ goto dealloc;
+ }
+
+ rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
+ &rsp_payload_dma, GFP_KERNEL);
+ if (!rsp_payload) {
+ ql_log(ql_log_warn, vha, 0x0184,
+ "Failed allocate dma buffer ELS RSP payload.\n");
+ goto dealloc;
+ }
+
+ sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
+ &sfp_dma, GFP_KERNEL);
+
+ stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
+ &stat_dma, GFP_KERNEL);
+
+ bbc = dma_alloc_coherent(&ha->pdev->dev, sizeof(*bbc),
+ &bbc_dma, GFP_KERNEL);
+
+ /* Prepare Response IOCB */
+ rsp_els->entry_type = ELS_IOCB_TYPE;
+ rsp_els->entry_count = 1;
+ rsp_els->sys_define = 0;
+ rsp_els->entry_status = 0;
+ rsp_els->handle = 0;
+ rsp_els->nport_handle = purex->nport_handle;
+ rsp_els->tx_dsd_count = 1;
+ rsp_els->vp_index = purex->vp_idx;
+ rsp_els->sof_type = EST_SOFI3;
+ rsp_els->rx_xchg_address = purex->rx_xchg_addr;
+ rsp_els->rx_dsd_count = 0;
+ rsp_els->opcode = purex->els_frame_payload[0];
+
+ rsp_els->d_id[0] = purex->s_id[0];
+ rsp_els->d_id[1] = purex->s_id[1];
+ rsp_els->d_id[2] = purex->s_id[2];
+
+ rsp_els->control_flags = EPD_ELS_ACC;
+ rsp_els->rx_byte_count = 0;
+ rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
+
+ put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
+ rsp_els->tx_len = rsp_els->tx_byte_count;
+
+ rsp_els->rx_address = 0;
+ rsp_els->rx_len = 0;
+
+ /* Prepare Response Payload */
+ rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
+ rsp_payload->hdr.len = cpu_to_be32(
+ rsp_els->tx_byte_count - sizeof(rsp_payload->hdr));
+
+ /* Link service Request Info Descriptor */
+ rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
+ rsp_payload->ls_req_info_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
+ rsp_payload->ls_req_info_desc.req_payload_word_0 =
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
+
+ /* Link service Request Info Descriptor 2 */
+ rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
+ rsp_payload->ls_req_info_desc2.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
+ rsp_payload->ls_req_info_desc2.req_payload_word_0 =
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
+
+
+ rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
+ rsp_payload->sfp_diag_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
+
+ if (sfp) {
+ /* SFP Flags */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
+ if (!rval) {
+ /* SFP Flags bits 3-0: Port Tx Laser Type */
+ if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
+ sfp_flags |= BIT_0; /* short wave */
+ else if (sfp[0] & BIT_1)
+ sfp_flags |= BIT_1; /* long wave 1310nm */
+ else if (sfp[1] & BIT_4)
+ sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
+ }
+
+ /* SFP Type */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
+ if (!rval) {
+ sfp_flags |= BIT_4; /* optical */
+ if (sfp[0] == 0x3)
+ sfp_flags |= BIT_6; /* sfp+ */
+ }
+
+ rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
+
+ /* SFP Diagnostics */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
+ if (!rval) {
+ uint16_t *trx = (void *)sfp; /* already be16 */
+ rsp_payload->sfp_diag_desc.temperature = trx[0];
+ rsp_payload->sfp_diag_desc.vcc = trx[1];
+ rsp_payload->sfp_diag_desc.tx_bias = trx[2];
+ rsp_payload->sfp_diag_desc.tx_power = trx[3];
+ rsp_payload->sfp_diag_desc.rx_power = trx[4];
+ }
+ }
+
+ /* Port Speed Descriptor */
+ rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
+ rsp_payload->port_speed_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
+ rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
+ qla25xx_rdp_port_speed_capability(ha));
+ rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
+ qla25xx_rdp_port_speed_currently(ha));
+
+ /* Link Error Status Descriptor */
+ rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
+ rsp_payload->ls_err_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
+
+ if (stat) {
+ rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
+ if (!rval) {
+ rsp_payload->ls_err_desc.link_fail_cnt =
+ cpu_to_be32(stat->link_fail_cnt);
+ rsp_payload->ls_err_desc.loss_sync_cnt =
+ cpu_to_be32(stat->loss_sync_cnt);
+ rsp_payload->ls_err_desc.loss_sig_cnt =
+ cpu_to_be32(stat->loss_sig_cnt);
+ rsp_payload->ls_err_desc.prim_seq_err_cnt =
+ cpu_to_be32(stat->prim_seq_err_cnt);
+ rsp_payload->ls_err_desc.inval_xmit_word_cnt =
+ cpu_to_be32(stat->inval_xmit_word_cnt);
+ rsp_payload->ls_err_desc.inval_crc_cnt =
+ cpu_to_be32(stat->inval_crc_cnt);
+ rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
+ }
+ }
+
+ /* Portname Descriptor */
+ rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
+ rsp_payload->port_name_diag_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
+ memcpy(rsp_payload->port_name_diag_desc.WWNN,
+ vha->node_name,
+ sizeof(rsp_payload->port_name_diag_desc.WWNN));
+ memcpy(rsp_payload->port_name_diag_desc.WWPN,
+ vha->port_name,
+ sizeof(rsp_payload->port_name_diag_desc.WWPN));
+
+ /* F-Port Portname Descriptor */
+ rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
+ rsp_payload->port_name_direct_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
+ memcpy(rsp_payload->port_name_direct_desc.WWNN,
+ vha->fabric_node_name,
+ sizeof(rsp_payload->port_name_direct_desc.WWNN));
+ memcpy(rsp_payload->port_name_direct_desc.WWPN,
+ vha->fabric_port_name,
+ sizeof(rsp_payload->port_name_direct_desc.WWPN));
+
+ /* Bufer Credit Descriptor */
+ rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
+ rsp_payload->buffer_credit_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
+ rsp_payload->buffer_credit_desc.fcport_b2b = 0;
+ rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
+ rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
+
+ if (bbc) {
+ memset(bbc, 0, sizeof(*bbc));
+ rval = qla24xx_get_buffer_credits(vha, bbc, bbc_dma);
+ if (!rval) {
+ rsp_payload->buffer_credit_desc.fcport_b2b =
+ cpu_to_be32(LSW(bbc->parameter[0]));
+ }
+ }
+
+ if (rsp_payload_length < sizeof(*rsp_payload))
+ goto send;
+
+ /* Optical Element Descriptor, Temperature */
+ rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[0].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Voltage */
+ rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[1].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Tx Bias Current */
+ rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[2].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Tx Power */
+ rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[3].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Rx Power */
+ rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[4].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+
+ if (sfp) {
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
+ if (!rval) {
+ uint16_t *trx = (void *)sfp; /* already be16 */
+
+ /* Optical Element Descriptor, Temperature */
+ rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
+ rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
+ rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
+ rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
+ rsp_payload->optical_elmt_desc[0].element_flags =
+ cpu_to_be32(1 << 28);
+
+ /* Optical Element Descriptor, Voltage */
+ rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
+ rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
+ rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
+ rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
+ rsp_payload->optical_elmt_desc[1].element_flags =
+ cpu_to_be32(2 << 28);
+
+ /* Optical Element Descriptor, Tx Bias Current */
+ rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
+ rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
+ rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
+ rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
+ rsp_payload->optical_elmt_desc[2].element_flags =
+ cpu_to_be32(3 << 28);
+
+ /* Optical Element Descriptor, Tx Power */
+ rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
+ rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
+ rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
+ rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
+ rsp_payload->optical_elmt_desc[3].element_flags =
+ cpu_to_be32(4 << 28);
+
+ /* Optical Element Descriptor, Rx Power */
+ rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
+ rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
+ rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
+ rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
+ rsp_payload->optical_elmt_desc[4].element_flags =
+ cpu_to_be32(5 << 28);
+ }
+
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
+ if (!rval) {
+ /* Temperature high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[0].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 7 & 1) << 3 |
+ (sfp[0] >> 6 & 1) << 2 |
+ (sfp[4] >> 7 & 1) << 1 |
+ (sfp[4] >> 6 & 1) << 0);
+
+ /* Voltage high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[1].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 5 & 1) << 3 |
+ (sfp[0] >> 4 & 1) << 2 |
+ (sfp[4] >> 5 & 1) << 1 |
+ (sfp[4] >> 4 & 1) << 0);
+
+ /* Tx Bias Current high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[2].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 3 & 1) << 3 |
+ (sfp[0] >> 2 & 1) << 2 |
+ (sfp[4] >> 3 & 1) << 1 |
+ (sfp[4] >> 2 & 1) << 0);
+
+ /* Tx Power high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[3].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 1 & 1) << 3 |
+ (sfp[0] >> 0 & 1) << 2 |
+ (sfp[4] >> 1 & 1) << 1 |
+ (sfp[4] >> 0 & 1) << 0);
+
+ /* Rx Power high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[4].element_flags |=
+ cpu_to_be32(
+ (sfp[1] >> 7 & 1) << 3 |
+ (sfp[1] >> 6 & 1) << 2 |
+ (sfp[5] >> 7 & 1) << 1 |
+ (sfp[5] >> 6 & 1) << 0);
+ }
+ }
+
+ /* Optical Product Data Descriptor */
+ rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
+ rsp_payload->optical_prod_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
+
+ if (sfp) {
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
+ if (!rval) {
+ memcpy(rsp_payload->optical_prod_desc.vendor_name,
+ sfp + 0,
+ sizeof(rsp_payload->optical_prod_desc.vendor_name));
+ memcpy(rsp_payload->optical_prod_desc.part_number,
+ sfp + 20,
+ sizeof(rsp_payload->optical_prod_desc.part_number));
+ memcpy(rsp_payload->optical_prod_desc.revision,
+ sfp + 36,
+ sizeof(rsp_payload->optical_prod_desc.revision));
+ memcpy(rsp_payload->optical_prod_desc.serial_number,
+ sfp + 48,
+ sizeof(rsp_payload->optical_prod_desc.serial_number));
+ }
+
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
+ if (!rval) {
+ memcpy(rsp_payload->optical_prod_desc.date,
+ sfp + 0,
+ sizeof(rsp_payload->optical_prod_desc.date));
+ }
+ }
+
+send:
+ ql_dbg(ql_dbg_init, vha, 0x0183,
+ "Sending ELS Response to RDP Request...\n");
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
+ (void *)rsp_els, sizeof(*rsp_els));
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
+ "-------- ELS RSP PAYLOAD -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
+ (void *)rsp_payload, rsp_payload_length);
+
+ rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0188,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (rsp_els->comp_status) {
+ ql_log(ql_log_warn, vha, 0x0189,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, rsp_els->comp_status,
+ rsp_els->error_subcode_1, rsp_els->error_subcode_2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
+ }
+
+dealloc:
+ if (bbc)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*bbc),
+ bbc, bbc_dma);
+ if (stat)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
+ stat, stat_dma);
+ if (sfp)
+ dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
+ sfp, sfp_dma);
+ if (rsp_payload)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
+ rsp_payload, rsp_payload_dma);
+ if (rsp_els)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
+ rsp_els, rsp_els_dma);
+}
+
+void qla24xx_process_purex_list(struct purex_list *list)
+{
+ struct list_head head = LIST_HEAD_INIT(head);
+ struct purex_item *item, *next;
+ ulong flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_splice_init(&list->head, &head);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ list_for_each_entry_safe(item, next, &head, list) {
+ list_del(&item->list);
+ item->process_item(item->vha, &item->iocb);
+ kfree(item);
+ }
+}
+
void
qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
@@ -6044,13 +6629,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");
@@ -6061,9 +6639,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
return;
}
- qla2x00_wait_for_sess_deletion(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
- set_bit(UNLOADING, &base_vha->dpc_flags);
+ qla2x00_wait_for_sess_deletion(base_vha);
qla2x00_delete_all_vps(ha, base_vha);
@@ -6254,13 +6837,14 @@ qla2x00_do_dpc(void *data)
}
if (test_and_clear_bit(DETECT_SFP_CHANGE,
- &base_vha->dpc_flags) &&
- !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) {
- qla24xx_detect_sfp(base_vha);
-
- if (ha->flags.detected_lr_sfp !=
- ha->flags.using_lr_setting)
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ &base_vha->dpc_flags)) {
+ /* Semantic:
+ * - NO-OP -- await next ISP-ABORT. Preferred method
+ * to minimize disruptions that will occur
+ * when a forced chip-reset occurs.
+ * - Force -- ISP-ABORT scheduled.
+ */
+ /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
}
if (test_and_clear_bit
@@ -6301,6 +6885,15 @@ qla2x00_do_dpc(void *data)
}
}
+ if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
+ if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
+ qla24xx_process_purex_list
+ (&base_vha->purex_list);
+ clear_bit(PROCESS_PUREX_IOCB,
+ &base_vha->dpc_flags);
+ }
+ }
+
if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
&base_vha->dpc_flags)) {
qla2x00_update_fcports(base_vha);
@@ -6692,7 +7285,8 @@ qla2x00_timer(struct timer_list *t)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -6705,12 +7299,13 @@ qla2x00_timer(struct timer_list *t)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d.\n",
+ "relogin_needed=%d, Process_purex_iocb=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 76a38bf86cbc..3da79ee1d88e 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2683,7 +2683,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t sec_mask, rest_addr, fdata;
void *optrom = NULL;
dma_addr_t optrom_dma;
- int rval;
+ int rval, ret;
struct secure_flash_update_block *sfub;
dma_addr_t sfub_dma;
uint32_t offset = faddr << 2;
@@ -2939,11 +2939,12 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
write_protect:
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
"Protect flash...\n");
- rval = qla24xx_protect_flash(vha);
- if (rval) {
+ ret = qla24xx_protect_flash(vha);
+ if (ret) {
qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
ql_log(ql_log_warn, vha, 0x7099,
"Failed protect flash\n");
+ rval = QLA_COMMAND_ERROR;
}
if (reset_to_rom == true) {
@@ -2951,10 +2952,12 @@ write_protect:
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- rval = qla2x00_wait_for_hba_online(vha);
- if (rval != QLA_SUCCESS)
+ ret = qla2x00_wait_for_hba_online(vha);
+ if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xffff,
"Adapter did not come out of reset\n");
+ rval = QLA_COMMAND_ERROR;
+ }
}
done:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 70081b395fb2..622e7337affc 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -27,8 +27,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
-#include <target/target_core_base.h>
-#include <target/target_core_fabric.h>
#include "qla_def.h"
#include "qla_target.h"
@@ -1760,7 +1758,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
}
- resp->handle = MAKE_HANDLE(qpair->req->id, h);
+ resp->handle = make_handle(qpair->req->id, h);
resp->entry_type = ABTS_RESP_24XX;
resp->entry_count = 1;
resp->nport_handle = abts->nport_handle;
@@ -2582,7 +2580,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
} else
qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle = make_handle(qpair->req->id, h);
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
@@ -3095,7 +3093,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
} else
qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle = make_handle(qpair->req->id, h);
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
@@ -3816,7 +3814,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
return;
}
cmd->jiffies_at_free = get_jiffies_64();
- target_free_tag(sess->se_sess, &cmd->se_cmd);
+ cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -4150,7 +4148,7 @@ out_term:
qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
- target_free_tag(sess->se_sess, &cmd->se_cmd);
+ cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
ha->tgt.tgt_ops->put_sess(sess);
@@ -4277,24 +4275,18 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
{
- struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
- int tag, cpu;
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
- if (tag < 0)
+ cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
+ if (!cmd)
return NULL;
- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
- memset(cmd, 0, sizeof(struct qla_tgt_cmd));
cmd->cmd_type = TYPE_TGT_CMD;
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
- cmd->se_cmd.map_tag = tag;
- cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
@@ -4747,11 +4739,11 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
qla24xx_post_newsess_work(vha, &port_id,
iocb->u.isp24.port_name,
iocb->u.isp24.u.plogi.node_name,
- pla, FC4_TYPE_UNKNOWN);
+ pla, 0);
else
qla24xx_post_newsess_work(vha, &port_id,
iocb->u.isp24.port_name, NULL,
- pla, FC4_TYPE_UNKNOWN);
+ pla, 0);
goto out;
}
@@ -5352,9 +5344,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
- struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
- int tag, cpu;
unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
@@ -5384,10 +5374,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
if (!sess)
return;
- se_sess = sess->se_sess;
-
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
- if (tag < 0) {
+ cmd = ha->tgt.tgt_ops->get_cmd(sess);
+ if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3009,
"qla_target(%d): %s: Allocation of cmd failed\n",
vha->vp_idx, __func__);
@@ -5402,9 +5390,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
return;
}
- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
- memset(cmd, 0, sizeof(struct qla_tgt_cmd));
-
qlt_incr_num_pend_cmds(vha);
INIT_LIST_HEAD(&cmd->cmd_list);
memcpy(&cmd->atio, atio, sizeof(*atio));
@@ -5414,7 +5399,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
- cmd->se_cmd.map_cpu = cpu;
if (qfull) {
cmd->q_full = 1;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 6539499e9e95..3cf8590feeac 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -671,6 +671,8 @@ struct qla_tgt_func_tmpl {
void (*handle_data)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
+ struct qla_tgt_cmd *(*get_cmd)(struct fc_port *);
+ void (*rel_cmd)(struct qla_tgt_cmd *);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
void (*free_session)(struct fc_port *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 5b0c057def2b..6aeb1c3fb7a8 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -870,7 +870,7 @@ bailout:
static void
qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
{
- tmp->capture_timestamp = jiffies;
+ tmp->capture_timestamp = cpu_to_le32(jiffies);
}
static void
@@ -882,9 +882,10 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
"%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
v+0, v+1, v+2, v+3, v+4, v+5) != 6);
- tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
- tmp->driver_info[1] = v[5] << 8 | v[4];
- tmp->driver_info[2] = 0x12345678;
+ tmp->driver_info[0] = cpu_to_le32(
+ v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
+ tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]);
+ tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678);
}
static void
@@ -894,10 +895,10 @@ qla27xx_firmware_info(struct scsi_qla_host *vha,
tmp->firmware_version[0] = vha->hw->fw_major_version;
tmp->firmware_version[1] = vha->hw->fw_minor_version;
tmp->firmware_version[2] = vha->hw->fw_subminor_version;
- tmp->firmware_version[3] =
- vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
- tmp->firmware_version[4] =
- vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+ tmp->firmware_version[3] = cpu_to_le32(
+ vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
+ tmp->firmware_version[4] = cpu_to_le32(
+ vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]);
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index d2a0014e8b21..bba8dc90acfb 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -18,11 +18,11 @@ struct __packed qla27xx_fwdt_template {
__le32 entry_count;
uint32_t template_version;
- uint32_t capture_timestamp;
+ __le32 capture_timestamp;
uint32_t template_checksum;
uint32_t reserved_2;
- uint32_t driver_info[3];
+ __le32 driver_info[3];
uint32_t saved_state[16];
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index bb03c022e023..8ccd9ba1ddef 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.22-k"
+#define QLA2XXX_VERSION "10.01.00.25-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index abe7f79bb789..1f0a185b2a95 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -268,6 +268,29 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
+static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag, cpu;
+
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ return NULL;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+ cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
+
+ return cmd;
+}
+
+static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
+{
+ target_free_tag(cmd->sess->se_sess, &cmd->se_cmd);
+}
+
/*
* Called from qla_target_template->free_cmd(), and will call
* tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
@@ -1549,6 +1572,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
+ .get_cmd = tcm_qla2xxx_get_cmd,
+ .rel_cmd = tcm_qla2xxx_rel_cmd,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
.free_session = tcm_qla2xxx_free_session,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 930e4803d888..56c24a73e0c7 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -94,20 +94,6 @@ EXPORT_SYMBOL(scsi_logging_level);
ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
EXPORT_SYMBOL(scsi_sd_pm_domain);
-/**
- * scsi_put_command - Free a scsi command block
- * @cmd: command block to free
- *
- * Returns: Nothing.
- *
- * Notes: The command must not belong to any lists.
- */
-void scsi_put_command(struct scsi_cmnd *cmd)
-{
- scsi_del_cmd_from_list(cmd);
- BUG_ON(delayed_work_pending(&cmd->abort_work));
-}
-
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd)
{
@@ -764,10 +750,6 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
-/* This should go away in the future, it doesn't do anything anymore */
-bool scsi_use_blk_mq = true;
-module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
-
static int __init init_scsi(void)
{
int error;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 44cb054d5e66..4c6c448dc2df 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -38,6 +38,7 @@
#include <linux/hrtimer.h>
#include <linux/uuid.h>
#include <linux/t10-pi.h>
+#include <linux/msdos_partition.h>
#include <net/checksum.h>
@@ -4146,7 +4147,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
static void __init sdebug_build_parts(unsigned char *ramp,
unsigned long store_size)
{
- struct partition *pp;
+ struct msdos_partition *pp;
int starts[SDEBUG_MAX_PARTS + 2];
int sectors_per_part, num_sectors, k;
int heads_by_sects, start_sec, end_sec;
@@ -4171,7 +4172,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
ramp[510] = 0x55; /* magic partition markings */
ramp[511] = 0xAA;
- pp = (struct partition *)(ramp + 0x1be);
+ pp = (struct msdos_partition *)(ramp + 0x1be);
for (k = 0; starts[k + 1]; ++k, ++pp) {
start_sec = starts[k];
end_sec = starts[k + 1] - 1;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ae2fa170f6ad..978be1602f71 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2412,7 +2412,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
wake_up(&shost->host_wait);
scsi_run_host_queues(shost);
- scsi_put_command(scmd);
kfree(rq);
out_put_autopm_host:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 610ee41fa54c..06c260f6cdae 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -562,7 +562,6 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
- scsi_del_cmd_from_list(cmd);
}
/* Returns false when no more bytes to process, true if there are more */
@@ -1098,36 +1097,7 @@ static void scsi_cleanup_rq(struct request *rq)
}
}
-/* Add a command to the list used by the aacraid and dpt_i2o drivers */
-void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
-{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
- if (shost->use_cmd_list) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_add_tail(&cmd->list, &sdev->cmd_list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
-}
-
-/* Remove a command from the list used by the aacraid and dpt_i2o drivers */
-void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
-{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
- if (shost->use_cmd_list) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- BUG_ON(list_empty(&cmd->list));
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
-}
-
-/* Called after a request has been started. */
+/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
@@ -1135,7 +1105,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
struct request *rq = blk_mq_rq_from_pdu(cmd);
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
- int retries;
+ int retries, to_clear;
bool in_flight;
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
@@ -1146,9 +1116,15 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
jiffies_at_alloc = cmd->jiffies_at_alloc;
retries = cmd->retries;
in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /* zero out the cmd, except for the embedded scsi_request */
- memset((char *)cmd + sizeof(cmd->req), 0,
- sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
+ /*
+ * Zero out the cmd, except for the embedded scsi_request. Only clear
+ * the driver-private command data if the LLD does not supply a
+ * function to initialize that data.
+ */
+ to_clear = sizeof(*cmd) - sizeof(cmd->req);
+ if (!dev->host->hostt->init_cmd_priv)
+ to_clear += dev->host->hostt->cmd_size;
+ memset((char *)cmd + sizeof(cmd->req), 0, to_clear);
cmd->device = dev;
cmd->sense_buffer = buf;
@@ -1160,7 +1136,6 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- scsi_add_cmd_to_list(cmd);
}
static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
@@ -1240,8 +1215,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* commands. The device must be brought online
* before trying any recovery commands.
*/
- sdev_printk(KERN_ERR, sdev,
- "rejecting I/O to offline device\n");
+ if (!sdev->offline_already) {
+ sdev->offline_already = true;
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ }
return BLK_STS_IOERR;
case SDEV_DEL:
/*
@@ -1742,6 +1720,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
struct scatterlist *sg;
+ int ret = 0;
if (unchecked_isa_dma)
cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
@@ -1757,14 +1736,24 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
}
- return 0;
+ if (shost->hostt->init_cmd_priv) {
+ ret = shost->hostt->init_cmd_priv(shost, cmd);
+ if (ret < 0)
+ scsi_free_sense_buffer(unchecked_isa_dma,
+ cmd->sense_buffer);
+ }
+
+ return ret;
}
static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
+ struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ if (shost->hostt->exit_cmd_priv)
+ shost->hostt->exit_cmd_priv(shost, cmd);
scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
cmd->sense_buffer);
}
@@ -2295,6 +2284,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_CREATED_BLOCK:
+ case SDEV_QUIESCE:
case SDEV_OFFLINE:
break;
default:
@@ -2340,6 +2330,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
break;
}
+ sdev->offline_already = false;
sdev->sdev_state = state;
return 0;
@@ -2845,6 +2836,36 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);
+int
+scsi_host_block(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ int ret = 0;
+
+ shost_for_each_device(sdev, shost) {
+ ret = scsi_internal_device_block(sdev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_host_block);
+
+int
+scsi_host_unblock(struct Scsi_Host *shost, int new_state)
+{
+ struct scsi_device *sdev;
+ int ret = 0;
+
+ shost_for_each_device(sdev, shost) {
+ ret = scsi_internal_device_unblock(sdev, new_state);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_host_unblock);
+
/**
* scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
* @sgl: scatter-gather list
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 3717eea37ecb..5f0ad8b32e3a 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev,
dev_dbg(dev, "scsi resume: %d\n", err);
if (err == 0) {
+ bool was_runtime_suspended;
+
+ was_runtime_suspended = pm_runtime_suspended(dev);
+
pm_runtime_disable(dev);
err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev,
*/
if (!err && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
-
- blk_set_runtime_active(sdev->request_queue);
+ if (was_runtime_suspended)
+ blk_post_runtime_resume(sdev->request_queue, 0);
+ else
+ blk_set_runtime_active(sdev->request_queue);
}
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3bff9f7aa684..22b6585e28b4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -29,7 +29,6 @@ extern int scsi_init_hosts(void);
extern void scsi_exit_hosts(void);
/* scsi.c */
-extern bool scsi_use_blk_mq;
int scsi_init_sense_cache(struct Scsi_Host *shost);
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
@@ -84,8 +83,6 @@ int scsi_eh_get_sense(struct list_head *work_q,
int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
-extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
-extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 058079f915f1..f2437a7570ce 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -236,7 +236,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
- INIT_LIST_HEAD(&sdev->cmd_list);
INIT_LIST_HEAD(&sdev->starved_entry);
INIT_LIST_HEAD(&sdev->event_list);
spin_lock_init(&sdev->list_lock);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 677b5c5403d2..163dbcb741c1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -856,7 +856,7 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \
struct bin_attribute *bin_attr, \
char *buf, loff_t off, size_t count) \
{ \
- struct device *dev = container_of(kobj, struct device, kobj); \
+ struct device *dev = kobj_to_dev(kobj); \
struct scsi_device *sdev = to_scsi_device(dev); \
struct scsi_vpd *vpd_page; \
int ret = -EINVAL; \
@@ -884,7 +884,7 @@ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
if (!sdev->inquiry)
@@ -1045,14 +1045,14 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
name = sdev_bflags_name[i];
if (name)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%s", len ? " " : "", name);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%s%s", len ? " " : "", name);
else
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%sINVALID_BIT(%d)", len ? " " : "", i);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%sINVALID_BIT(%d)", len ? " " : "", i);
}
if (len)
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL);
@@ -1181,7 +1181,7 @@ static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
@@ -1207,7 +1207,7 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
struct bin_attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index ac35c301c792..41a950075913 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -18,11 +18,9 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- u32 lba = 0, txlen;
+ u32 lba, txlen;
- lba |= ((cdb[1] & 0x1F) << 16);
- lba |= (cdb[2] << 8);
- lba |= cdb[3];
+ lba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
/*
* From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be read (READ(6)) or written (WRITE(6)).
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index dfc726fa34e3..b2a803c51288 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,9 +86,17 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+/* Worker to perform connection failure on unresponsive connections
+ * completely in kernel space.
+ */
+static void stop_conn_work_fn(struct work_struct *work);
+static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
+static struct workqueue_struct *iscsi_destroy_workq;
+
static DEFINE_IDA(iscsi_sess_ida);
/*
* list of registered transports and lock that must
@@ -1609,8 +1617,10 @@ static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
static LIST_HEAD(sesslist);
+static LIST_HEAD(sessdestroylist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
+static LIST_HEAD(connlist_err);
static DEFINE_SPINLOCK(connlock);
static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn)
@@ -2012,7 +2022,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
- return;
+ goto unbind_session_exit;
}
target_id = session->target_id;
@@ -2024,10 +2034,20 @@ static void __iscsi_unbind_session(struct work_struct *work)
ida_simple_remove(&iscsi_sess_ida, target_id);
scsi_remove_target(&session->dev);
+
+unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
+static void __iscsi_destroy_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, destroy_work);
+
+ session->transport->destroy_session(session);
+}
+
struct iscsi_cls_session *
iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
int dd_size)
@@ -2050,6 +2070,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
INIT_WORK(&session->block_work, __iscsi_block_session);
INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
INIT_WORK(&session->scan_work, iscsi_scan_session);
+ INIT_WORK(&session->destroy_work, __iscsi_destroy_session);
spin_lock_init(&session->lock);
/* this is released in the dev's release function */
@@ -2254,8 +2275,10 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_list_err);
conn->transport = transport;
conn->cid = cid;
+ conn->state = ISCSI_CONN_DOWN;
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@@ -2307,6 +2330,7 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_lock_irqsave(&connlock, flags);
list_del(&conn->conn_list);
+ list_del(&conn->conn_list_err);
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
@@ -2421,6 +2445,51 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+static void stop_conn_work_fn(struct work_struct *work)
+{
+ struct iscsi_cls_conn *conn, *tmp;
+ unsigned long flags;
+ LIST_HEAD(recovery_list);
+
+ spin_lock_irqsave(&connlock, flags);
+ if (list_empty(&connlist_err)) {
+ spin_unlock_irqrestore(&connlock, flags);
+ return;
+ }
+ list_splice_init(&connlist_err, &recovery_list);
+ spin_unlock_irqrestore(&connlock, flags);
+
+ list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
+ uint32_t sid = iscsi_conn_get_sid(conn);
+ struct iscsi_cls_session *session;
+
+ mutex_lock(&rx_queue_mutex);
+
+ session = iscsi_session_lookup(sid);
+ if (session) {
+ if (system_state != SYSTEM_RUNNING) {
+ session->recovery_tmo = 0;
+ conn->transport->stop_conn(conn,
+ STOP_CONN_TERM);
+ } else {
+ conn->transport->stop_conn(conn,
+ STOP_CONN_RECOVER);
+ }
+ }
+
+ list_del_init(&conn->conn_list_err);
+
+ mutex_unlock(&rx_queue_mutex);
+
+ /* we don't want to hold rx_queue_mutex for too long,
+ * for instance if many conns failed at the same time,
+ * since this stall other iscsi maintenance operations.
+ * Give other users a chance to proceed.
+ */
+ cond_resched();
+ }
+}
+
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
@@ -2428,6 +2497,12 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_add(&conn->conn_list_err, &connlist_err);
+ spin_unlock_irqrestore(&connlock, flags);
+ queue_work(system_unbound_wq, &stop_conn_work);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2757,11 +2832,19 @@ static int
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
+ unsigned long flags;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
+ spin_lock_irqsave(&connlock, flags);
+ if (!list_empty(&conn->conn_list_err)) {
+ spin_unlock_irqrestore(&connlock, flags);
+ return -EAGAIN;
+ }
+ spin_unlock_irqrestore(&connlock, flags);
+
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
if (transport->destroy_conn)
transport->destroy_conn(conn);
@@ -3563,6 +3646,23 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
else
transport->destroy_session(session);
break;
+ case ISCSI_UEVENT_DESTROY_SESSION_ASYNC:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+ if (!session)
+ err = -EINVAL;
+ else if (iscsi_session_has_conns(ev->u.d_session.sid))
+ err = -EBUSY;
+ else {
+ unsigned long flags;
+
+ /* Prevent this session from being found again */
+ spin_lock_irqsave(&sesslock, flags);
+ list_move(&session->sess_list, &sessdestroylist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ queue_work(iscsi_destroy_workq, &session->destroy_work);
+ }
+ break;
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
if (session)
@@ -3612,8 +3712,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
- if (conn)
+ if (conn) {
ev->r.retcode = transport->start_conn(conn);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_UP;
+ }
else
err = -EINVAL;
break;
@@ -3810,6 +3913,26 @@ iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
+static const char *const connection_state_names[] = {
+ [ISCSI_CONN_UP] = "up",
+ [ISCSI_CONN_DOWN] = "down",
+ [ISCSI_CONN_FAILED] = "failed"
+};
+
+static ssize_t show_conn_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
+ const char *state = "unknown";
+
+ if (conn->state >= 0 &&
+ conn->state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn->state];
+
+ return sprintf(buf, "%s\n", state);
+}
+static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state,
+ NULL);
#define iscsi_conn_ep_attr_show(param) \
static ssize_t show_conn_ep_param_##param(struct device *dev, \
@@ -3879,6 +4002,7 @@ static struct attribute *iscsi_conn_attrs[] = {
&dev_attr_conn_tcp_xmit_wsf.attr,
&dev_attr_conn_tcp_recv_wsf.attr,
&dev_attr_conn_local_ipaddr.attr,
+ &dev_attr_conn_state.attr,
NULL,
};
@@ -3950,6 +4074,8 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_TCP_RECV_WSF;
else if (attr == &dev_attr_conn_local_ipaddr.attr)
param = ISCSI_PARAM_LOCAL_IPADDR;
+ else if (attr == &dev_attr_conn_state.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid conn attr");
return 0;
@@ -4608,8 +4734,16 @@ static __init int iscsi_transport_init(void)
goto release_nls;
}
+ iscsi_destroy_workq = create_singlethread_workqueue("iscsi_destroy");
+ if (!iscsi_destroy_workq) {
+ err = -ENOMEM;
+ goto destroy_wq;
+ }
+
return 0;
+destroy_wq:
+ destroy_workqueue(iscsi_eh_timer_workq);
release_nls:
netlink_kernel_release(nls);
unregister_flashnode_bus:
@@ -4631,6 +4765,7 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
+ destroy_workqueue(iscsi_destroy_workq);
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index e969138051c7..682cf08ab041 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -17,14 +17,11 @@
#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
+#include <linux/msdos_partition.h>
#include <asm/unaligned.h>
#include <scsi/scsicam.h>
-
-static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds,
- unsigned int *secs);
-
/**
* scsi_bios_ptable - Read PC partition table out of first sector of device.
* @dev: from this device
@@ -35,105 +32,48 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
*/
unsigned char *scsi_bios_ptable(struct block_device *dev)
{
- unsigned char *res = kmalloc(66, GFP_KERNEL);
- if (res) {
- struct block_device *bdev = dev->bd_contains;
- Sector sect;
- void *data = read_dev_sector(bdev, 0, &sect);
- if (data) {
- memcpy(res, data + 0x1be, 66);
- put_dev_sector(sect);
- } else {
- kfree(res);
- res = NULL;
- }
- }
- return res;
-}
-EXPORT_SYMBOL(scsi_bios_ptable);
-
-/**
- * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
- * @bdev: which device
- * @capacity: size of the disk in sectors
- * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
- *
- * Description : determine the BIOS mapping/geometry used for a drive in a
- * SCSI-CAM system, storing the results in ip as required
- * by the HDIO_GETGEO ioctl().
- *
- * Returns : -1 on failure, 0 on success.
- */
-
-int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
-{
- unsigned char *p;
- u64 capacity64 = capacity; /* Suppress gcc warning */
- int ret;
-
- p = scsi_bios_ptable(bdev);
- if (!p)
- return -1;
-
- /* try to infer mapping from partition table */
- ret = scsi_partsize(p, (unsigned long)capacity, (unsigned int *)ip + 2,
- (unsigned int *)ip + 0, (unsigned int *)ip + 1);
- kfree(p);
-
- if (ret == -1 && capacity64 < (1ULL << 32)) {
- /* pick some standard mapping with at most 1024 cylinders,
- and at most 62 sectors per track - this works up to
- 7905 MB */
- ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2,
- (unsigned int *)ip + 0, (unsigned int *)ip + 1);
- }
-
- /* if something went wrong, then apparently we have to return
- a geometry with more than 1024 cylinders */
- if (ret || ip[0] > 255 || ip[1] > 63) {
- if ((capacity >> 11) > 65534) {
- ip[0] = 255;
- ip[1] = 63;
- } else {
- ip[0] = 64;
- ip[1] = 32;
- }
+ struct address_space *mapping = dev->bd_contains->bd_inode->i_mapping;
+ unsigned char *res = NULL;
+ struct page *page;
- if (capacity > 65535*63*255)
- ip[2] = 65535;
- else
- ip[2] = (unsigned long)capacity / (ip[0] * ip[1]);
- }
+ page = read_mapping_page(mapping, 0, NULL);
+ if (IS_ERR(page))
+ return NULL;
- return 0;
+ if (!PageError(page))
+ res = kmemdup(page_address(page) + 0x1be, 66, GFP_KERNEL);
+ put_page(page);
+ return res;
}
-EXPORT_SYMBOL(scsicam_bios_param);
+EXPORT_SYMBOL(scsi_bios_ptable);
/**
* scsi_partsize - Parse cylinders/heads/sectors from PC partition table
- * @buf: partition table, see scsi_bios_ptable()
+ * @bdev: block device to parse
* @capacity: size of the disk in sectors
- * @cyls: put cylinders here
- * @hds: put heads here
- * @secs: put sectors here
+ * @geom: output in form of [hds, cylinders, sectors]
*
* Determine the BIOS mapping/geometry used to create the partition
- * table, storing the results in @cyls, @hds, and @secs
+ * table, storing the results in @geom.
*
- * Returns: -1 on failure, 0 on success.
+ * Returns: %false on failure, %true on success.
*/
-
-int scsi_partsize(unsigned char *buf, unsigned long capacity,
- unsigned int *cyls, unsigned int *hds, unsigned int *secs)
+bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3])
{
- struct partition *p = (struct partition *)buf, *largest = NULL;
- int i, largest_cyl;
int cyl, ext_cyl, end_head, end_cyl, end_sector;
unsigned int logical_end, physical_end, ext_physical_end;
+ struct msdos_partition *p, *largest = NULL;
+ void *buf;
+ int ret = false;
+ buf = scsi_bios_ptable(bdev);
+ if (!buf)
+ return false;
if (*(unsigned short *) (buf + 64) == 0xAA55) {
- for (largest_cyl = -1, i = 0; i < 4; ++i, ++p) {
+ int largest_cyl = -1, i;
+
+ for (i = 0, p = buf; i < 4; i++, p++) {
if (!p->sys_ind)
continue;
#ifdef DEBUG
@@ -153,7 +93,7 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity,
end_sector = largest->end_sector & 0x3f;
if (end_head + 1 == 0 || end_sector == 0)
- return -1;
+ goto out_free_buf;
#ifdef DEBUG
printk("scsicam_bios_param : end at h = %d, c = %d, s = %d\n",
@@ -178,19 +118,24 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity,
,logical_end, physical_end, ext_physical_end, ext_cyl);
#endif
- if ((logical_end == physical_end) ||
- (end_cyl == 1023 && ext_physical_end == logical_end)) {
- *secs = end_sector;
- *hds = end_head + 1;
- *cyls = capacity / ((end_head + 1) * end_sector);
- return 0;
+ if (logical_end == physical_end ||
+ (end_cyl == 1023 && ext_physical_end == logical_end)) {
+ geom[0] = end_head + 1;
+ geom[1] = end_sector;
+ geom[2] = (unsigned long)capacity /
+ ((end_head + 1) * end_sector);
+ ret = true;
+ goto out_free_buf;
}
#ifdef DEBUG
printk("scsicam_bios_param : logical (%u) != physical (%u)\n",
logical_end, physical_end);
#endif
}
- return -1;
+
+out_free_buf:
+ kfree(buf);
+ return ret;
}
EXPORT_SYMBOL(scsi_partsize);
@@ -258,3 +203,56 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
*hds = (unsigned int) heads;
return (rv);
}
+
+/**
+ * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
+ * @bdev: which device
+ * @capacity: size of the disk in sectors
+ * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
+ *
+ * Description : determine the BIOS mapping/geometry used for a drive in a
+ * SCSI-CAM system, storing the results in ip as required
+ * by the HDIO_GETGEO ioctl().
+ *
+ * Returns : -1 on failure, 0 on success.
+ */
+int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
+{
+ u64 capacity64 = capacity; /* Suppress gcc warning */
+ int ret = 0;
+
+ /* try to infer mapping from partition table */
+ if (scsi_partsize(bdev, capacity, ip))
+ return 0;
+
+ if (capacity64 < (1ULL << 32)) {
+ /*
+ * Pick some standard mapping with at most 1024 cylinders, and
+ * at most 62 sectors per track - this works up to 7905 MB.
+ */
+ ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2,
+ (unsigned int *)ip + 0, (unsigned int *)ip + 1);
+ }
+
+ /*
+ * If something went wrong, then apparently we have to return a geometry
+ * with more than 1024 cylinders.
+ */
+ if (ret || ip[0] > 255 || ip[1] > 63) {
+ if ((capacity >> 11) > 65534) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+
+ if (capacity > 65535*63*255)
+ ip[2] = 65535;
+ else
+ ip[2] = (unsigned long)capacity / (ip[0] * ip[1]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(scsicam_bios_param);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2710a0e5ae6d..a793cb08d025 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3189,7 +3189,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->first_scan = 0;
- set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
+ set_capacity_revalidate_and_notify(disk,
+ logical_to_sectors(sdp, sdkp->capacity), false);
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4e6af592f018..20472aaaf630 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -685,8 +685,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (copy_from_user(cmnd, buf, cmd_size))
+ if (copy_from_user(cmnd, buf, cmd_size)) {
+ sg_remove_request(sfp, srp);
return -EFAULT;
+ }
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
@@ -793,8 +795,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index bc6506884e3b..d3311c014863 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -53,4 +53,4 @@ config SCSI_SMARTPQI
Note: the aacraid driver will not manage a smartpqi
controller. You need to enable smartpqi for smartpqi
controllers. For more information, please see
- Documentation/scsi/smartpqi.txt
+ Documentation/scsi/smartpqi.rst
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b7492568e02f..cd157f11eb22 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1614,28 +1614,28 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
if (device->target_lun_valid)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"%d:%d",
device->target,
device->lun);
else
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"-:-");
if (pqi_is_logical_device(device))
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %08x%08x",
*((u32 *)&device->scsi3addr),
*((u32 *)&device->scsi3addr[4]));
else
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %016llx", device->sas_address);
- count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
+ count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
pqi_device_type(device),
device->vendor,
@@ -1643,19 +1643,19 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
if (pqi_is_logical_device(device)) {
if (device->devtype == TYPE_DISK)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"SSDSmartPathCap%c En%c %-12s",
device->raid_bypass_configured ? '+' : '-',
device->raid_bypass_enabled ? '+' : '-',
pqi_raid_level_to_string(device->raid_level));
} else {
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"AIO%c", device->aio_enabled ? '+' : '-');
if (device->devtype == TYPE_DISK ||
device->devtype == TYPE_ZBC)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" qd=%-6d", device->queue_depth);
}
@@ -6191,14 +6191,14 @@ static ssize_t pqi_lockup_action_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
if (pqi_lockup_actions[i].action == pqi_lockup_action)
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"[%s] ", pqi_lockup_actions[i].name);
else
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"%s ", pqi_lockup_actions[i].name);
}
- count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
+ count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
return count;
}
diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h
index d81b4f0ceaaa..0e0fa38f8d90 100644
--- a/drivers/scsi/snic/vnic_devcmd.h
+++ b/drivers/scsi/snic/vnic_devcmd.h
@@ -208,7 +208,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/*
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e4240e4ae8bb..d2fe3fa470f9 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -79,7 +79,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
CDC_MRW|CDC_MRW_W|CDC_RAM)
-static DEFINE_MUTEX(sr_mutex);
static int sr_probe(struct device *);
static int sr_remove(struct device *);
static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt);
@@ -536,9 +535,9 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
scsi_autopm_get_device(sdev);
check_disk_change(bdev);
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = cdrom_open(&cd->cdi, bdev, mode);
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
scsi_autopm_put_device(sdev);
if (ret)
@@ -551,10 +550,12 @@ out:
static void sr_block_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_cd *cd = scsi_cd(disk);
- mutex_lock(&sr_mutex);
+
+ mutex_lock(&cd->lock);
cdrom_release(&cd->cdi, mode);
+ mutex_unlock(&cd->lock);
+
scsi_cd_put(cd);
- mutex_unlock(&sr_mutex);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -565,7 +566,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
void __user *argp = (void __user *)arg;
int ret;
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
@@ -595,7 +596,7 @@ put:
scsi_autopm_put_device(sdev);
out:
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
return ret;
}
@@ -608,7 +609,7 @@ static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsign
void __user *argp = compat_ptr(arg);
int ret;
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
@@ -638,7 +639,7 @@ put:
scsi_autopm_put_device(sdev);
out:
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
return ret;
}
@@ -745,6 +746,7 @@ static int sr_probe(struct device *dev)
disk = alloc_disk(1);
if (!disk)
goto fail_free;
+ mutex_init(&cd->lock);
spin_lock(&sr_index_lock);
minor = find_first_zero_bit(sr_index_bits, SR_DISKS);
@@ -1055,6 +1057,8 @@ static void sr_kref_release(struct kref *kref)
put_disk(disk);
+ mutex_destroy(&cd->lock);
+
kfree(cd);
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index a2bb7b8bace5..339c624e04d8 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -20,6 +20,7 @@
#include <linux/genhd.h>
#include <linux/kref.h>
+#include <linux/mutex.h>
#define MAX_RETRIES 3
#define SR_TIMEOUT (30 * HZ)
@@ -51,6 +52,7 @@ typedef struct scsi_cd {
bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */
struct cdrom_device_info cdi;
+ struct mutex lock;
/* We hold gendisk and scsi_device references on probe and use
* the refs on this kref to decide when to release them */
struct kref kref;
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 17a56c87d383..1f988a1b9166 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -67,9 +67,6 @@
void sr_vendor_init(Scsi_CD *cd)
{
-#ifndef CONFIG_BLK_DEV_SR_VENDOR
- cd->vendor = VENDOR_SCSI3;
-#else
const char *vendor = cd->device->vendor;
const char *model = cd->device->model;
@@ -118,7 +115,6 @@ void sr_vendor_init(Scsi_CD *cd)
CDC_PLAY_AUDIO
);
}
-#endif
}
@@ -132,10 +128,8 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
struct ccs_modesel_head *modesel;
int rc, density = 0;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
-#endif
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
@@ -223,7 +217,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
}
break;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
case VENDOR_NEC:{
unsigned long min, sec, frame;
cgc.cmd[0] = 0xde;
@@ -316,7 +309,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
sector = buffer[11] + (buffer[10] << 8) +
(buffer[9] << 16) + (buffer[8] << 24);
break;
-#endif /* CONFIG_BLK_DEV_SR_VENDOR */
default:
/* should not happen */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 393f3019ccac..c5f9b348b438 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
- file Documentation/scsi/st.txt for more information.
+ file Documentation/scsi/st.rst for more information.
History:
Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
@@ -45,6 +45,7 @@ static const char *verstr = "20160209";
#include <linux/uaccess.h>
#include <asm/dma.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
@@ -2680,8 +2681,7 @@ static void deb_space_print(struct scsi_tape *STp, int direction, char *units, u
if (!debugging)
return;
- sc = cmd[2] & 0x80 ? 0xff000000 : 0;
- sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
+ sc = sign_extend32(get_unaligned_be24(&cmd[2]), 23);
if (direction)
sc = -sc;
st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n",
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 33287b6bdf0e..d4f10c0d813c 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -236,7 +236,7 @@ struct req_msg {
u8 data_dir;
u8 payload_sz; /* payload size in 4-byte, not used */
u8 cdb[STEX_CDB_LENGTH];
- u32 variable[0];
+ u32 variable[];
};
struct status_msg {
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index d14c2243e02a..e2005aeddc2d 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -46,7 +46,7 @@ config SCSI_UFSHCD
The module will be called ufshcd.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/ufs.txt>.
+ <file:Documentation/scsi/ufs.rst>.
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a UFS device.
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 56a6a1ed5ec2..da065a259f6e 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -192,7 +192,7 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
* and device TX LCC are disabled once link startup is
* completed.
*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+ ufshcd_disable_host_tx_lcc(hba);
/*
* Disabling Autohibern8 feature in cadence UFS
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 5d6487350a6c..074a6a055a4c 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -235,7 +235,7 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
/* Unipro PA_Local_TX_LCC_Enable */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
+ ufshcd_disable_host_tx_lcc(hba);
/* close Unipro VS_Mk2ExtnSupport */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 53eae5fe2ade..673c16596fb2 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -66,6 +66,21 @@ static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
}
}
+static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE) {
+ if (host->unipro_lpm)
+ hba->hba_enable_delay_us = 0;
+ else
+ hba->hba_enable_delay_us = 600;
+ }
+
+ return 0;
+}
+
static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -107,6 +122,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
if (on) {
ufs_mtk_ref_clk_notify(on, res);
+ ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
@@ -132,12 +148,40 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
out:
host->ref_clk_enabled = on;
- if (!on)
+ if (!on) {
+ ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
ufs_mtk_ref_clk_notify(on, res);
+ }
return 0;
}
+static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
+ u16 gating_us, u16 ungating_us)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (hba->dev_info.clk_gating_wait_us) {
+ host->ref_clk_gating_wait_us =
+ hba->dev_info.clk_gating_wait_us;
+ } else {
+ host->ref_clk_gating_wait_us = gating_us;
+ }
+
+ host->ref_clk_ungating_wait_us = ungating_us;
+}
+
+static u32 ufs_mtk_link_get_state(struct ufs_hba *hba)
+{
+ u32 val;
+
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ val = val >> 28;
+
+ return val;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -150,7 +194,7 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- int ret = -EINVAL;
+ int ret = 0;
/*
* In case ufs_mtk_init() is not yet done, simply ignore.
@@ -160,19 +204,24 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
- switch (status) {
- case PRE_CHANGE:
- if (!on) {
+ if (!on && status == PRE_CHANGE) {
+ if (!ufshcd_is_link_active(hba)) {
ufs_mtk_setup_ref_clk(hba, on);
ret = phy_power_off(host->mphy);
+ } else {
+ /*
+ * Gate ref-clk if link state is in Hibern8
+ * triggered by Auto-Hibern8.
+ */
+ if (!ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_auto_hibern8_enabled(hba) &&
+ ufs_mtk_link_get_state(hba) ==
+ VS_LINK_HIBERN8)
+ ufs_mtk_setup_ref_clk(hba, on);
}
- break;
- case POST_CHANGE:
- if (on) {
- ret = phy_power_on(host->mphy);
- ufs_mtk_setup_ref_clk(hba, on);
- }
- break;
+ } else if (on && status == POST_CHANGE) {
+ ret = phy_power_on(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, on);
}
return ret;
@@ -285,11 +334,36 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
return ret;
}
+static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
+{
+ int ret;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ lpm);
+ if (!ret)
+ host->unipro_lpm = lpm;
+
+ return ret;
+}
+
static int ufs_mtk_pre_link(struct ufs_hba *hba)
{
int ret;
u32 tmp;
+ ufs_mtk_unipro_set_pm(hba, 0);
+
+ /*
+ * Setting PA_Local_TX_LCC_Enable to 0 before link startup
+ * to make sure that both host and device TX LCC are disabled
+ * once link startup is completed.
+ */
+ ret = ufshcd_disable_host_tx_lcc(hba);
+ if (ret)
+ return ret;
+
/* disable deep stall */
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
if (ret)
@@ -321,9 +395,6 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
static int ufs_mtk_post_link(struct ufs_hba *hba)
{
- /* disable device LCC */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
-
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
@@ -390,9 +461,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err)
return err;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 0);
+ err = ufs_mtk_unipro_set_pm(hba, 0);
if (err)
return err;
@@ -413,14 +482,10 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 1);
+ err = ufs_mtk_unipro_set_pm(hba, 1);
if (err) {
/* Resume UniPro state for following error recovery */
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 0);
+ ufs_mtk_unipro_set_pm(hba, 0);
return err;
}
@@ -434,12 +499,20 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
- if (err)
+ if (err) {
+ /*
+ * Set link as off state enforcedly to trigger
+ * ufshcd_host_reset_and_restore() in ufshcd_suspend()
+ * for completed host reset.
+ */
+ ufshcd_set_link_off(hba);
return -EAGAIN;
- phy_power_off(host->mphy);
- ufs_mtk_setup_ref_clk(hba, false);
+ }
}
+ if (!ufshcd_is_link_active(hba))
+ phy_power_off(host->mphy);
+
return 0;
}
@@ -448,12 +521,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
int err;
- if (ufshcd_is_link_hibern8(hba)) {
- ufs_mtk_setup_ref_clk(hba, true);
+ if (!ufshcd_is_link_active(hba))
phy_power_on(host->mphy);
+
+ if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
- if (err)
+ if (err) {
+ err = ufshcd_link_recovery(hba);
return err;
+ }
}
return 0;
@@ -477,9 +553,24 @@ static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 mid = dev_info->wmanufacturerid;
- if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG)
+ if (mid == UFS_VENDOR_SAMSUNG) {
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
+ }
+
+ /*
+ * Decide waiting time before gating reference clock and
+ * after ungating reference clock according to vendors'
+ * requirements.
+ */
+ if (mid == UFS_VENDOR_SAMSUNG)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
+ else if (mid == UFS_VENDOR_SKHYNIX)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
+ else if (mid == UFS_VENDOR_TOSHIBA)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
return 0;
}
@@ -494,6 +585,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.init = ufs_mtk_init,
.setup_clocks = ufs_mtk_setup_clocks,
+ .hce_enable_notify = ufs_mtk_hce_enable_notify,
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index fccdd979d6fb..5bbd3e9cbae2 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -54,6 +54,18 @@
#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
/*
+ * Vendor specific link state
+ */
+enum {
+ VS_LINK_DISABLED = 0,
+ VS_LINK_DOWN = 1,
+ VS_LINK_UP = 2,
+ VS_LINK_HIBERN8 = 3,
+ VS_LINK_LOST = 4,
+ VS_LINK_CFG = 5,
+};
+
+/*
* SiP commands
*/
#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
@@ -79,7 +91,10 @@ enum {
struct ufs_mtk_host {
struct ufs_hba *hba;
struct phy *mphy;
+ bool unipro_lpm;
bool ref_clk_enabled;
+ u16 ref_clk_ungating_wait_us;
+ u16 ref_clk_gating_wait_us;
};
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index c69c29a1ceb9..19aa5c44e0da 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -10,6 +10,7 @@
#include <linux/phy/phy.h>
#include <linux/gpio/consumer.h>
#include <linux/reset-controller.h>
+#include <linux/devfreq.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -38,7 +39,6 @@ enum {
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
@@ -554,9 +554,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
* completed.
*/
if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
- 0);
+ err = ufshcd_disable_host_tx_lcc(hba);
break;
case POST_CHANGE:
@@ -674,7 +672,7 @@ static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
}
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
{
int err = 0;
@@ -705,7 +703,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
vote = ufs_qcom_get_bus_vote(host, mode);
if (vote >= 0)
- err = ufs_qcom_set_bus_vote(host, vote);
+ err = __ufs_qcom_set_bus_vote(host, vote);
else
err = vote;
@@ -716,6 +714,35 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return err;
}
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int vote, err;
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_set_bus_vote() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (on) {
+ vote = host->bus_vote.saved_vote;
+ if (vote == host->bus_vote.min_bw_vote)
+ ufs_qcom_update_bus_bw_vote(host);
+ } else {
+ vote = host->bus_vote.min_bw_vote;
+ }
+
+ err = __ufs_qcom_set_bus_vote(host, vote);
+ if (err)
+ dev_err(hba->dev, "%s: set bus vote failed %d\n",
+ __func__, err);
+
+ return err;
+}
+
static ssize_t
show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -792,7 +819,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return 0;
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int ufs_qcom_set_bus_vote(struct ufs_hba *host, bool on)
{
return 0;
}
@@ -817,11 +844,27 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
/*
* If we are here to disable this clock it might be immediately
* after entering into hibern8 in which case we need to make
- * sure that device ref_clk is active at least 1us after the
+ * sure that device ref_clk is active for specific time after
* hibern8 enter.
*/
- if (!enable)
- udelay(1);
+ if (!enable) {
+ unsigned long gating_wait;
+
+ gating_wait = host->hba->dev_info.clk_gating_wait_us;
+ if (!gating_wait) {
+ udelay(1);
+ } else {
+ /*
+ * bRefClkGatingWaitTime defines the minimum
+ * time for which the reference clock is
+ * required by device during transition from
+ * HS-MODE to LS-MODE or HIBERN8 state. Give it
+ * more delay to be on the safe side.
+ */
+ gating_wait += 10;
+ usleep_range(gating_wait, gating_wait + 10);
+ }
+ }
writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
@@ -898,6 +941,20 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, true);
+
+ if (host->hw_ver.major >= 0x4) {
+ if (dev_req_params->gear_tx == UFS_HS_G4) {
+ /* INITIAL ADAPT */
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_INITIAL_ADAPT);
+ } else {
+ /* NO ADAPT */
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_NO_ADAPT);
+ }
+ }
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
@@ -956,6 +1013,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+ if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
+ hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+
return err;
}
@@ -1030,8 +1090,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- int vote = 0;
+ int err = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1041,28 +1100,28 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
- if (on && (status == POST_CHANGE)) {
- /* enable the device ref clock for HS mode*/
- if (ufshcd_is_hs_mode(&hba->pwr_info))
- ufs_qcom_dev_ref_clk_ctrl(host, true);
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
-
- } else if (!on && (status == PRE_CHANGE)) {
- if (!ufs_qcom_is_link_active(hba)) {
- /* disable device ref_clk */
- ufs_qcom_dev_ref_clk_ctrl(host, false);
+ switch (status) {
+ case PRE_CHANGE:
+ if (on) {
+ err = ufs_qcom_set_bus_vote(hba, true);
+ } else {
+ if (!ufs_qcom_is_link_active(hba)) {
+ /* disable device ref_clk */
+ ufs_qcom_dev_ref_clk_ctrl(host, false);
+ }
}
-
- vote = host->bus_vote.min_bw_vote;
+ break;
+ case POST_CHANGE:
+ if (on) {
+ /* enable the device ref clock for HS mode*/
+ if (ufshcd_is_hs_mode(&hba->pwr_info))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
+ } else {
+ err = ufs_qcom_set_bus_vote(hba, false);
+ }
+ break;
}
- err = ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
return err;
}
@@ -1238,6 +1297,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
+ ufs_qcom_set_bus_vote(hba, true);
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
@@ -1630,6 +1690,29 @@ static void ufs_qcom_device_reset(struct ufs_hba *hba)
usleep_range(10, 15);
}
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *p,
+ void *data)
+{
+ static struct devfreq_simple_ondemand_data *d;
+
+ if (!data)
+ return;
+
+ d = (struct devfreq_simple_ondemand_data *)data;
+ p->polling_ms = 60;
+ d->upthreshold = 70;
+ d->downdifferential = 5;
+}
+#else
+static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *p,
+ void *data)
+{
+}
+#endif
+
/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1651,6 +1734,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
.device_reset = ufs_qcom_device_reset,
+ .config_scaling_param = ufs_qcom_config_scaling_param,
};
/**
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index dbdf8b01abed..92a63eebdca9 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -210,8 +210,10 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
if (param_size > 8)
return -EINVAL;
+ pm_runtime_get_sync(hba->dev);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
+ pm_runtime_put_sync(hba->dev);
if (ret)
return -EINVAL;
switch (param_size) {
@@ -558,6 +560,7 @@ static ssize_t _name##_show(struct device *dev, \
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
if (!desc_buf) \
return -ENOMEM; \
+ pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
0, 0, desc_buf, &desc_len); \
@@ -574,6 +577,7 @@ static ssize_t _name##_show(struct device *dev, \
goto out; \
ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \
out: \
+ pm_runtime_put_sync(hba->dev); \
kfree(desc_buf); \
return ret; \
} \
@@ -604,9 +608,13 @@ static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
bool flag; \
+ int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
- if (ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, &flag)) \
+ pm_runtime_get_sync(hba->dev); \
+ ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
+ QUERY_FLAG_IDN##_uname, &flag); \
+ pm_runtime_put_sync(hba->dev); \
+ if (ret) \
return -EINVAL; \
return sprintf(buf, "%s\n", flag ? "true" : "false"); \
} \
@@ -644,8 +652,12 @@ static ssize_t _name##_show(struct device *dev, \
{ \
struct ufs_hba *hba = dev_get_drvdata(dev); \
u32 value; \
- if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, 0, 0, &value)) \
+ int ret; \
+ pm_runtime_get_sync(hba->dev); \
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
+ QUERY_ATTR_IDN##_uname, 0, 0, &value); \
+ pm_runtime_put_sync(hba->dev); \
+ if (ret) \
return -EINVAL; \
return sprintf(buf, "0x%08X\n", value); \
} \
@@ -766,9 +778,13 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba = shost_priv(sdev->host);
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
+ int ret;
- if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value))
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
+ pm_runtime_put_sync(hba->dev);
+ if (ret)
return -EINVAL;
return sprintf(buf, "0x%08X\n", value);
}
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index cfe380348bf0..990cb48e2403 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -167,6 +167,7 @@ enum attr_idn {
QUERY_ATTR_IDN_FFU_STATUS = 0x14,
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
};
/* Descriptor idn for Query requests */
@@ -534,6 +535,8 @@ struct ufs_dev_info {
u16 wmanufacturerid;
/*UFS device Product Name */
u8 *model;
+ u16 wspecversion;
+ u32 clk_gating_wait_us;
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index d0ab147f98d3..df7a1e6805a3 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -15,6 +15,7 @@
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
#define UFS_VENDOR_SKHYNIX 0x1AD
+#define UFS_VENDOR_WDC 0x145
/**
* ufs_dev_fix - ufs device quirk info
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 3b19de3ae9a3..8f78a8151499 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -44,7 +44,7 @@ static int ufs_intel_disable_lcc(struct ufs_hba *hba)
ufshcd_dme_get(hba, attr, &lcc_enable);
if (lcc_enable)
- ufshcd_dme_set(hba, attr, 0);
+ ufshcd_disable_host_tx_lcc(hba);
return 0;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2d705694636c..698e8d20b4ba 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -42,6 +42,7 @@
#include <linux/nls.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/blk-pm.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -91,6 +92,9 @@
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
+/* Default value of wait time before gating device ref clock */
+#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -168,19 +172,6 @@ enum {
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
-#define ufshcd_set_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
-#define ufshcd_set_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
-#define ufshcd_set_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
-#define ufshcd_is_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
-#define ufshcd_is_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
-#define ufshcd_is_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
-
struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
{UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
@@ -532,6 +523,18 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
+{
+ if (!us)
+ return;
+
+ if (us < 10)
+ udelay(us);
+ else
+ usleep_range(us, us + tolerance);
+}
+EXPORT_SYMBOL_GPL(ufshcd_delay_us);
+
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
@@ -642,11 +645,7 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
*/
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos),
- REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
}
/**
@@ -656,10 +655,7 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
*/
static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
}
/**
@@ -859,28 +855,29 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+/**
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
+ * @hba: per adapter instance
+ * @scale_up: If True, set max possible frequency othewise set low frequency
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- ktime_t start = ktime_get();
- bool clk_state_changed = false;
if (list_empty(head))
goto out;
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
- if (ret)
- return ret;
-
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
- clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@@ -899,7 +896,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
if (clki->curr_freq == clki->min_freq)
continue;
- clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->min_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@@ -918,11 +914,37 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
clki->name, clk_get_rate(clki->clk));
}
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_set_clk_freq(hba, scale_up);
+ if (ret)
+ goto out;
+
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ if (ret)
+ ufshcd_set_clk_freq(hba, !scale_up);
out:
- if (clk_state_changed)
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -1056,8 +1078,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
/* check if the power mode needs to be changed or not? */
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
-
+ ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
__func__, ret,
@@ -1110,35 +1131,32 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
- return ret;
+ goto out;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
ret = ufshcd_scale_gear(hba, false);
if (ret)
- goto out;
+ goto out_unprepare;
}
ret = ufshcd_scale_clks(hba, scale_up);
if (ret) {
if (!scale_up)
ufshcd_scale_gear(hba, true);
- goto out;
+ goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
- if (ret) {
+ if (ret)
ufshcd_scale_clks(hba, false);
- goto out;
- }
}
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
-
-out:
+out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
+out:
ufshcd_release(hba);
return ret;
}
@@ -1191,6 +1209,9 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ /* Override with the closest supported frequency */
+ *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@@ -1205,8 +1226,11 @@ static int ufshcd_devfreq_target(struct device *dev,
goto out;
}
- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ /* Decide based on the rounded-off frequency and update */
scale_up = (*freq == clki->max_freq) ? true : false;
+ if (!scale_up)
+ *freq = clki->min_freq;
+ /* Update the frequency */
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
@@ -1254,6 +1278,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1264,6 +1290,13 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
if (!scaling->window_start_t)
goto start_window;
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ /*
+ * If current frequency is 0, then the ondemand governor considers
+ * there's no initial frequency set. And it always requests to set
+ * to max. frequency.
+ */
+ stat->current_frequency = clki->curr_freq;
if (scaling->is_busy_started)
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
@@ -1292,6 +1325,17 @@ static struct devfreq_dev_profile ufs_devfreq_profile = {
.get_dev_status = ufshcd_devfreq_get_dev_status,
};
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
+ .upthreshold = 70,
+ .downdifferential = 5,
+};
+
+static void *gov_data = &ufs_ondemand_data;
+#else
+static void *gov_data; /* NULL */
+#endif
+
static int ufshcd_devfreq_init(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
@@ -1307,10 +1351,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+ ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
+ gov_data);
devfreq = devfreq_add_device(hba->dev,
&ufs_devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
- NULL);
+ gov_data);
if (IS_ERR(devfreq)) {
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
@@ -1518,6 +1564,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2093,13 +2144,8 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return sg_segments;
if (sg_segments) {
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16)(sg_segments *
- sizeof(struct ufshcd_sg_entry)));
- else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)sg_segments);
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -2363,6 +2409,27 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
+{
+ struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
+ dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
+ i * sizeof(struct utp_transfer_cmd_desc);
+ u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
+ response_upiu);
+ u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ lrb->utr_descriptor_ptr = utrdlp + i;
+ lrb->utrd_dma_addr = hba->utrdl_dma_addr +
+ i * sizeof(struct utp_transfer_req_desc);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+ lrb->ucd_req_dma_addr = cmd_desc_element_addr;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+}
+
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @host: SCSI host pointer
@@ -2452,7 +2519,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_vops_setup_xfer_req(hba, tag, true);
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2639,7 +2706,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_vops_setup_xfer_req(hba, tag, false);
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -3276,6 +3343,31 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
param_offset, param_read_buf, param_size);
}
+static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
+
+ if (hba->dev_info.wspecversion >= 0x300) {
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
+ &gating_wait);
+ if (err)
+ dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
+ err, gating_wait);
+
+ if (gating_wait == 0) {
+ gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
+ dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
+ gating_wait);
+ }
+
+ hba->dev_info.clk_gating_wait_us = gating_wait;
+ }
+
+ return err;
+}
+
/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
@@ -3373,7 +3465,6 @@ out:
*/
static void ufshcd_host_memory_configure(struct ufs_hba *hba)
{
- struct utp_transfer_cmd_desc *cmd_descp;
struct utp_transfer_req_desc *utrdlp;
dma_addr_t cmd_desc_dma_addr;
dma_addr_t cmd_desc_element_addr;
@@ -3383,7 +3474,6 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
int i;
utrdlp = hba->utrdl_base_addr;
- cmd_descp = hba->ucdl_base_addr;
response_offset =
offsetof(struct utp_transfer_cmd_desc, response_upiu);
@@ -3403,36 +3493,13 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16(response_offset);
- utrdlp[i].prd_table_offset =
- cpu_to_le16(prdt_offset);
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE);
- } else {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
- cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
- }
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset >> 2);
+ utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
- hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
- hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
- (i * sizeof(struct utp_transfer_req_desc));
- hba->lrb[i].ucd_req_ptr =
- (struct utp_upiu_req *)(cmd_descp + i);
- hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
- hba->lrb[i].ucd_rsp_ptr =
- (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
- hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
- response_offset;
- hba->lrb[i].ucd_prdt_ptr =
- (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
- hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
- prdt_offset;
+ ufshcd_init_lrb(hba, &hba->lrb[i], i);
}
}
@@ -3460,52 +3527,6 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
"dme-link-startup: error code %d\n", ret);
return ret;
}
-/**
- * ufshcd_dme_reset - UIC command for DME_RESET
- * @hba: per adapter instance
- *
- * DME_RESET command is issued in order to reset UniPro stack.
- * This function now deal with cold reset.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_reset(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_RESET;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
-
- return ret;
-}
-
-/**
- * ufshcd_dme_enable - UIC command for DME_ENABLE
- * @hba: per adapter instance
- *
- * DME_ENABLE command is issued in order to enable UniPro stack.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_enable(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_ENABLE;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
-
- return ret;
-}
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -3773,7 +3794,7 @@ out:
return ret;
}
-static int ufshcd_link_recovery(struct ufs_hba *hba)
+int ufshcd_link_recovery(struct ufs_hba *hba)
{
int ret;
unsigned long flags;
@@ -3800,6 +3821,7 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
@@ -4100,8 +4122,6 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba,
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
ret = ufshcd_change_power_mode(hba, &final_params);
- if (!ret)
- ufshcd_print_pwr_info(hba);
return ret;
}
@@ -4224,7 +4244,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
}
/**
- * ufshcd_hba_execute_hce - initialize the controller
+ * ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance
*
* The controller resets itself and controller firmware initialization
@@ -4233,7 +4253,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
+int ufshcd_hba_enable(struct ufs_hba *hba)
{
int retry;
@@ -4259,10 +4279,10 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
* instruction might be read back.
* This delay can be changed based on the controller.
*/
- usleep_range(1000, 1100);
+ ufshcd_delay_us(hba->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
- retry = 10;
+ retry = 50;
while (ufshcd_is_hba_active(hba)) {
if (retry) {
retry--;
@@ -4271,7 +4291,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
"Controller enable failed\n");
return -EIO;
}
- usleep_range(5000, 5100);
+ usleep_range(1000, 1100);
}
/* enable UIC related interrupts */
@@ -4281,37 +4301,11 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
return 0;
}
-
-int ufshcd_hba_enable(struct ufs_hba *hba)
-{
- int ret;
-
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
- ufshcd_set_link_off(hba);
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
-
- /* enable UIC related interrupts */
- ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
- ret = ufshcd_dme_reset(hba);
- if (!ret) {
- ret = ufshcd_dme_enable(hba);
- if (!ret)
- ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
- if (ret)
- dev_err(hba->dev,
- "Host controller enable failed with non-hce\n");
- }
- } else {
- ret = ufshcd_hba_execute_hce(hba);
- }
-
- return ret;
-}
EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
- int tx_lanes, i, err = 0;
+ int tx_lanes = 0, i, err = 0;
if (!peer)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
@@ -4737,8 +4731,15 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* UFS device needs urgent BKOPs.
*/
if (!hba->pm_op_in_progress &&
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
- schedule_work(&hba->eeh_work);
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
+ schedule_work(&hba->eeh_work)) {
+ /*
+ * Prevent suspend once eeh_work is scheduled
+ * to avoid deadlock between ufshcd_suspend
+ * and exception event handler.
+ */
+ pm_runtime_get_noresume(hba->dev);
+ }
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -4876,8 +4877,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
- if (ufshcd_is_intr_aggr_allowed(hba) &&
- !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
+ if (ufshcd_is_intr_aggr_allowed(hba))
ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -5191,7 +5191,14 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
out:
ufshcd_scsi_unblock_requests(hba);
- pm_runtime_put_sync(hba->dev);
+ /*
+ * pm_runtime_get_noresume is called while scheduling
+ * eeh_work to avoid suspend racing with exception work.
+ * Hence decrement usage counter using pm_runtime_put_noidle
+ * to allow suspend on completion of exception event handler.
+ */
+ pm_runtime_put_noidle(hba->dev);
+ pm_runtime_put(hba->dev);
return;
}
@@ -5486,7 +5493,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
u32 intr_mask)
{
- if (!ufshcd_is_auto_hibern8_supported(hba))
+ if (!ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_auto_hibern8_enabled(hba))
return false;
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
@@ -6299,7 +6307,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, true);
+ ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
if (err)
@@ -6482,11 +6490,12 @@ out:
return icc_level;
}
-static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
int buff_len = hba->desc_size.pwr_desc;
u8 *desc_buf;
+ u32 icc_level;
desc_buf = kmalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
@@ -6501,25 +6510,32 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
goto out;
}
- hba->init_prefetch_data.icc_level =
- ufshcd_find_max_sup_active_icc_level(hba,
- desc_buf, buff_len);
- dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
- __func__, hba->init_prefetch_data.icc_level);
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
+ buff_len);
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
if (ret)
dev_err(hba->dev,
"%s: Failed configuring bActiveICCLevel = %d ret = %d",
- __func__, hba->init_prefetch_data.icc_level , ret);
+ __func__, icc_level, ret);
out:
kfree(desc_buf);
}
+static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
+{
+ scsi_autopm_get_device(sdev);
+ blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
+ if (sdev->rpm_autosuspend)
+ pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
+ RPM_AUTOSUSPEND_DELAY_MS);
+ scsi_autopm_put_device(sdev);
+}
+
/**
* ufshcd_scsi_add_wlus - Adds required W-LUs
* @hba: per-adapter instance
@@ -6559,6 +6575,7 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
hba->sdev_ufs_device = NULL;
goto out;
}
+ ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
scsi_device_put(hba->sdev_ufs_device);
sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
@@ -6567,14 +6584,17 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
ret = PTR_ERR(sdev_rpmb);
goto remove_sdev_ufs_device;
}
+ ufshcd_blk_pm_runtime_init(sdev_rpmb);
scsi_device_put(sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
- if (IS_ERR(sdev_boot))
+ if (IS_ERR(sdev_boot)) {
dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
- else
+ } else {
+ ufshcd_blk_pm_runtime_init(sdev_boot);
scsi_device_put(sdev_boot);
+ }
goto out;
remove_sdev_ufs_device:
@@ -6614,6 +6634,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+ /* getting Specification Version in big endian format */
+ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
@@ -6811,14 +6835,14 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
ufshcd_tune_pa_hibern8time(hba);
}
+ ufshcd_vops_apply_dev_quirks(hba);
+
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
-
- ufshcd_vops_apply_dev_quirks(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -6991,6 +7015,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
goto out;
}
+ ufshcd_get_ref_clk_gating_wait(hba);
+
ufs_fixup_device_setup(hba);
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
@@ -7014,8 +7040,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
{
int ret;
- ufshcd_init_icc_levels(hba);
-
/* Add required well known logical units to scsi mid layer */
ret = ufshcd_scsi_add_wlus(hba);
if (ret)
@@ -7111,8 +7135,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
__func__, ret);
goto out;
}
+ ufshcd_print_pwr_info(hba);
}
+ /*
+ * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
+ * and for removable UFS card as well, hence always set the parameter.
+ * Note: Error handler may issue the device reset hence resetting
+ * bActiveICCLevel as well so it is always safe to set this here.
+ */
+ ufshcd_set_active_icc_lvl(hba);
+
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -7241,6 +7274,11 @@ static int ufshcd_config_vreg(struct device *dev,
name = vreg->name;
if (regulator_count_voltages(reg) > 0) {
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+
if (vreg->min_uV && vreg->max_uV) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
@@ -7251,11 +7289,6 @@ static int ufshcd_config_vreg(struct device *dev,
goto out;
}
}
-
- uA_load = on ? vreg->max_uA : 0;
- ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
- if (ret)
- goto out;
}
out:
return ret;
@@ -7395,16 +7428,9 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (list_empty(head))
goto out;
- /*
- * vendor specific setup_clocks ops may depend on clocks managed by
- * this standard driver hence call the vendor specific setup_clocks
- * before disabling the clocks managed here.
- */
- if (!on) {
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
- if (ret)
- return ret;
- }
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
@@ -7428,16 +7454,9 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- /*
- * vendor specific setup_clocks ops may depend on clocks managed by
- * this standard driver hence call the vendor specific setup_clocks
- * after enabling the clocks managed here.
- */
- if (on) {
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
- if (ret)
- return ret;
- }
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
out:
if (ret) {
@@ -7931,6 +7950,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto enable_gating;
}
+ flush_work(&hba->eeh_work);
ret = ufshcd_link_state_transition(hba, req_link_state, 1);
if (ret)
goto set_dev_active;
@@ -8402,6 +8422,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
+ hba->hba_enable_delay_us = 1000;
err = ufshcd_hba_init(hba);
if (err)
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2ae6c7c8528c..6ffc08ad85f6 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -55,6 +55,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
+#include <linux/bitfield.h>
+#include <linux/devfreq.h>
#include "unipro.h"
#include <asm/irq.h>
@@ -128,6 +130,19 @@ enum uic_link_state {
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
/*
* UFS Power management levels.
* Each level is in increasing order of power savings.
@@ -326,6 +341,9 @@ struct ufs_hba_variant_ops {
void (*dbg_register_dump)(struct ufs_hba *hba);
int (*phy_initialization)(struct ufs_hba *);
void (*device_reset)(struct ufs_hba *hba);
+ void (*config_scaling_param)(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ void *data);
};
/* clock gating state */
@@ -403,15 +421,6 @@ struct ufs_clk_scaling {
bool is_suspended;
};
-/**
- * struct ufs_init_prefetch - contains data that is pre-fetched once during
- * initialization
- * @icc_level: icc level which was read during initialization
- */
-struct ufs_init_prefetch {
- u32 icc_level;
-};
-
#define UFS_ERR_REG_HIST_LENGTH 8
/**
* struct ufs_err_reg_hist - keeps history of errors
@@ -469,6 +478,85 @@ struct ufs_stats {
struct ufs_err_reg_hist task_abort;
};
+enum ufshcd_quirks {
+ /* Interrupt aggregation support is broken */
+ UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
+
+ /*
+ * delay before each dme command is required as the unipro
+ * layer has shown instabilities
+ */
+ UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
+
+ /*
+ * If UFS host controller is having issue in processing LCC (Line
+ * Control Command) coming from device then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
+ * attribute of device to 0).
+ */
+ UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
+
+ /*
+ * The attribute PA_RXHSUNTERMCAP specifies whether or not the
+ * inbound Link supports unterminated line in HS mode. Setting this
+ * attribute to 1 fixes moving to HS gear.
+ */
+ UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
+
+ /*
+ * This quirk needs to be enabled if the host controller only allows
+ * accessing the peer dme attributes in AUTO mode (FAST AUTO or
+ * SLOW AUTO).
+ */
+ UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
+
+ /*
+ * This quirk needs to be enabled if the host controller doesn't
+ * advertise the correct version in UFS_VER register. If this quirk
+ * is enabled, standard UFS host driver will call the vendor specific
+ * ops (get_ufs_hci_version) to get the correct version.
+ */
+ UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
+};
+
+enum ufshcd_caps {
+ /* Allow dynamic clk gating */
+ UFSHCD_CAP_CLK_GATING = 1 << 0,
+
+ /* Allow hiberb8 with clk gating */
+ UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
+
+ /* Allow dynamic clk scaling */
+ UFSHCD_CAP_CLK_SCALING = 1 << 2,
+
+ /* Allow auto bkops to enabled during runtime suspend */
+ UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
+
+ /*
+ * This capability allows host controller driver to use the UFS HCI's
+ * interrupt aggregation capability.
+ * CAUTION: Enabling this might reduce overall UFS throughput.
+ */
+ UFSHCD_CAP_INTR_AGGR = 1 << 4,
+
+ /*
+ * This capability allows the device auto-bkops to be always enabled
+ * except during suspend (both runtime and suspend).
+ * Enabling this capability means that device will always be allowed
+ * to do background operation when it's active but it might degrade
+ * the performance of ongoing read/write operations.
+ */
+ UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
+
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+ UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -501,7 +589,6 @@ struct ufs_stats {
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
- * @init_prefetch_data: data pre-fetched during initialization
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
@@ -572,68 +659,6 @@ struct ufs_hba {
bool is_irq_enabled;
enum ufs_ref_clk_freq dev_ref_clk_freq;
- /* Interrupt aggregation support is broken */
- #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
-
- /*
- * delay before each dme command is required as the unipro
- * layer has shown instabilities
- */
- #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
-
- /*
- * If UFS host controller is having issue in processing LCC (Line
- * Control Command) coming from device then enable this quirk.
- * When this quirk is enabled, host controller driver should disable
- * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
- * attribute of device to 0).
- */
- #define UFSHCD_QUIRK_BROKEN_LCC 0x4
-
- /*
- * The attribute PA_RXHSUNTERMCAP specifies whether or not the
- * inbound Link supports unterminated line in HS mode. Setting this
- * attribute to 1 fixes moving to HS gear.
- */
- #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
-
- /*
- * This quirk needs to be enabled if the host contoller only allows
- * accessing the peer dme attributes in AUTO mode (FAST AUTO or
- * SLOW AUTO).
- */
- #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
-
- /*
- * This quirk needs to be enabled if the host contoller doesn't
- * advertise the correct version in UFS_VER register. If this quirk
- * is enabled, standard UFS host driver will call the vendor specific
- * ops (get_ufs_hci_version) to get the correct version.
- */
- #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
-
- /*
- * This quirk needs to be enabled if the host contoller regards
- * resolution of the values of PRDTO and PRDTL in UTRD as byte.
- */
- #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
-
- /*
- * Clear handling for transfer/task request list is just opposite.
- */
- #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
-
- /*
- * This quirk needs to be enabled if host controller doesn't allow
- * that the interrupt aggregation timer and counter are reset by s/w.
- */
- #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
-
- /*
- * This quirks needs to be enabled if host controller cannot be
- * enabled via HCE register.
- */
- #define UFSHCI_QUIRK_BROKEN_HCE 0x400
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -650,8 +675,8 @@ struct ufs_hba {
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
+ u16 hba_enable_delay_us;
bool is_powered;
- struct ufs_init_prefetch init_prefetch_data;
/* Work Queues */
struct work_struct eh_work;
@@ -688,34 +713,6 @@ struct ufs_hba {
struct ufs_clk_gating clk_gating;
/* Control to enable/disable host capabilities */
u32 caps;
- /* Allow dynamic clk gating */
-#define UFSHCD_CAP_CLK_GATING (1 << 0)
- /* Allow hiberb8 with clk gating */
-#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
- /* Allow dynamic clk scaling */
-#define UFSHCD_CAP_CLK_SCALING (1 << 2)
- /* Allow auto bkops to enabled during runtime suspend */
-#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
- /*
- * This capability allows host controller driver to use the UFS HCI's
- * interrupt aggregation capability.
- * CAUTION: Enabling this might reduce overall UFS throughput.
- */
-#define UFSHCD_CAP_INTR_AGGR (1 << 4)
- /*
- * This capability allows the device auto-bkops to be always enabled
- * except during suspend (both runtime and suspend).
- * Enabling this capability means that device will always be allowed
- * to do background operation when it's active but it might degrade
- * the performance of ongoing read/write operations.
- */
-#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
- /*
- * This capability allows host controller driver to automatically
- * enable runtime power management by itself instead of waiting
- * for userspace to control the power management.
- */
-#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -773,6 +770,11 @@ static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
}
+static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
+{
+ return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -799,9 +801,11 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_link_recovery(struct ufs_hba *hba);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep);
@@ -908,6 +912,11 @@ static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
pwr_info->pwr_tx == FASTAUTO_MODE);
}
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+}
+
/* Expose Query-Request API */
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -1088,10 +1097,19 @@ static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->device_reset) {
hba->vops->device_reset(hba);
+ ufshcd_set_ufs_dev_active(hba);
ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
}
}
+static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile
+ *profile, void *data)
+{
+ if (hba->vops && hba->vops->config_scaling_param)
+ hba->vops->config_scaling_param(hba, profile, data);
+}
+
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/*
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 3dc4d8b76509..766d551df3fc 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -146,6 +146,12 @@
#define PA_SLEEPNOCONFIGTIME 0x15A2
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
+#define PA_TXHSADAPTTYPE 0x15D4
+
+/* Adpat type for PA_TXHSADAPTTYPE attribute */
+#define PA_REFRESH_ADAPT 0x00
+#define PA_INITIAL_ADAPT 0x01
+#define PA_NO_ADAPT 0x03
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
@@ -203,6 +209,7 @@ enum ufs_hs_gear_tag {
UFS_HS_G1, /* HS Gear 1 (default for reset) */
UFS_HS_G2, /* HS Gear 2 */
UFS_HS_G3, /* HS Gear 3 */
+ UFS_HS_G4, /* HS Gear 4 */
};
enum ufs_unipro_ver {
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index bfec84aacd90..0e0910c5b942 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -742,7 +742,6 @@ static struct scsi_host_template virtscsi_host_template = {
.dma_boundary = UINT_MAX,
.map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
- .force_blk_mq = 1,
};
#define virtscsi_config_get(vdev, fld) \
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bdd82e497d5f..c6727bcbc2e3 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -801,8 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
/* additional setup required for Fastlane */
if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
/* map full address space up to ESP base for DMA */
- zep->board_base = ioremap(board,
- FASTLANE_ESP_ADDR-1);
+ zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1);
if (!zep->board_base) {
pr_err("Cannot allocate board address space\n");
err = -ENOMEM;
@@ -843,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
* dma_registers size if adding any more
*/
esp->dma_regs = ioremap(dmaaddr,
- sizeof(struct fastlane_dma_registers));
+ sizeof(struct fastlane_dma_registers));
} else
/* ZorroII address space remapped nocache by early startup */
esp->dma_regs = ZTWO_VADDR(dmaaddr);
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 1778f8c62861..425ab6f7e375 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -22,5 +22,6 @@ source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
source "drivers/soc/xilinx/Kconfig"
source "drivers/soc/zte/Kconfig"
+source "drivers/soc/kendryte/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 8b49d782a1ab..36452bed86ef 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
-obj-$(CONFIG_ARCH_MXC) += imx/
+obj-y += imx/
obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
@@ -28,3 +28,4 @@ obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
obj-y += xilinx/
obj-$(CONFIG_ARCH_ZX) += zte/
+obj-$(CONFIG_SOC_KENDRYTE) += kendryte/
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index bc2c912949bd..321c5e26a268 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -48,6 +48,19 @@ config MESON_EE_PM_DOMAINS
Say yes to expose Amlogic Meson Everything-Else Power Domains as
Generic Power Domains.
+config MESON_SECURE_PM_DOMAINS
+ bool "Amlogic Meson Secure Power Domains driver"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ depends on PM && OF
+ depends on HAVE_ARM_SMCCC
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Support for the power controller on Amlogic A1/C1 series.
+ Say yes to expose Amlogic Meson Secure Power Domains as Generic
+ Power Domains.
+
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
index de79d044b545..7b8c5d323f5c 100644
--- a/drivers/soc/amlogic/Makefile
+++ b/drivers/soc/amlogic/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
+obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
new file mode 100644
index 000000000000..5fb29a475879
--- /dev/null
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Amlogic, Inc.
+ * Author: Jianxin Pan <jianxin.pan@amlogic.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <dt-bindings/power/meson-a1-power.h>
+#include <linux/arm-smccc.h>
+#include <linux/firmware/meson/meson_sm.h>
+
+#define PWRC_ON 1
+#define PWRC_OFF 0
+
+struct meson_secure_pwrc_domain {
+ struct generic_pm_domain base;
+ unsigned int index;
+ struct meson_secure_pwrc *pwrc;
+};
+
+struct meson_secure_pwrc {
+ struct meson_secure_pwrc_domain *domains;
+ struct genpd_onecell_data xlate;
+ struct meson_sm_firmware *fw;
+};
+
+struct meson_secure_pwrc_domain_desc {
+ unsigned int index;
+ unsigned int flags;
+ char *name;
+ bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain);
+};
+
+struct meson_secure_pwrc_domain_data {
+ unsigned int count;
+ struct meson_secure_pwrc_domain_desc *domains;
+};
+
+static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain)
+{
+ int is_off = 1;
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off,
+ pwrc_domain->index, 0, 0, 0, 0) < 0)
+ pr_err("failed to get power domain status\n");
+
+ return is_off;
+}
+
+static int meson_secure_pwrc_off(struct generic_pm_domain *domain)
+{
+ int ret = 0;
+ struct meson_secure_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_secure_pwrc_domain, base);
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
+ pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) {
+ pr_err("failed to set power domain off\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int meson_secure_pwrc_on(struct generic_pm_domain *domain)
+{
+ int ret = 0;
+ struct meson_secure_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_secure_pwrc_domain, base);
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
+ pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) {
+ pr_err("failed to set power domain on\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+#define SEC_PD(__name, __flag) \
+[PWRC_##__name##_ID] = \
+{ \
+ .name = #__name, \
+ .index = PWRC_##__name##_ID, \
+ .is_off = pwrc_secure_is_off, \
+ .flags = __flag, \
+}
+
+static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
+ SEC_PD(DSPA, 0),
+ SEC_PD(DSPB, 0),
+ /* UART should keep working in ATF after suspend and before resume */
+ SEC_PD(UART, GENPD_FLAG_ALWAYS_ON),
+ /* DMC is for DDR PHY ana/dig and DMC, and should be always on */
+ SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(I2C, 0),
+ SEC_PD(PSRAM, 0),
+ SEC_PD(ACODEC, 0),
+ SEC_PD(AUDIO, 0),
+ SEC_PD(OTP, 0),
+ SEC_PD(DMA, 0),
+ SEC_PD(SD_EMMC, 0),
+ SEC_PD(RAMA, 0),
+ /* SRAMB is used as ATF runtime memory, and should be always on */
+ SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(IR, 0),
+ SEC_PD(SPICC, 0),
+ SEC_PD(SPIFC, 0),
+ SEC_PD(USB, 0),
+ /* NIC is for the Arm NIC-400 interconnect, and should be always on */
+ SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(PDMIN, 0),
+ SEC_PD(RSA, 0),
+};
+
+static int meson_secure_pwrc_probe(struct platform_device *pdev)
+{
+ int i;
+ struct device_node *sm_np;
+ struct meson_secure_pwrc *pwrc;
+ const struct meson_secure_pwrc_domain_data *match;
+
+ match = of_device_get_match_data(&pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
+ sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm");
+ if (!sm_np) {
+ dev_err(&pdev->dev, "no secure-monitor node\n");
+ return -ENODEV;
+ }
+
+ pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
+ if (!pwrc)
+ return -ENOMEM;
+
+ pwrc->fw = meson_sm_get(sm_np);
+ of_node_put(sm_np);
+ if (!pwrc->fw)
+ return -EPROBE_DEFER;
+
+ pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->xlate.domains),
+ GFP_KERNEL);
+ if (!pwrc->xlate.domains)
+ return -ENOMEM;
+
+ pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->domains), GFP_KERNEL);
+ if (!pwrc->domains)
+ return -ENOMEM;
+
+ pwrc->xlate.num_domains = match->count;
+ platform_set_drvdata(pdev, pwrc);
+
+ for (i = 0 ; i < match->count ; ++i) {
+ struct meson_secure_pwrc_domain *dom = &pwrc->domains[i];
+
+ if (!match->domains[i].index)
+ continue;
+
+ dom->pwrc = pwrc;
+ dom->index = match->domains[i].index;
+ dom->base.name = match->domains[i].name;
+ dom->base.flags = match->domains[i].flags;
+ dom->base.power_on = meson_secure_pwrc_on;
+ dom->base.power_off = meson_secure_pwrc_off;
+
+ pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom));
+
+ pwrc->xlate.domains[i] = &dom->base;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
+}
+
+static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
+ .domains = a1_pwrc_domains,
+ .count = ARRAY_SIZE(a1_pwrc_domains),
+};
+
+static const struct of_device_id meson_secure_pwrc_match_table[] = {
+ {
+ .compatible = "amlogic,meson-a1-pwrc",
+ .data = &meson_secure_a1_pwrc_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson_secure_pwrc_driver = {
+ .probe = meson_secure_pwrc_probe,
+ .driver = {
+ .name = "meson_secure_pwrc",
+ .of_match_table = meson_secure_pwrc_match_table,
+ },
+};
+builtin_platform_driver(meson_secure_pwrc_driver);
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 518a8e081b49..bcdcd3e7d7f1 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <linux/types.h>
@@ -433,6 +433,78 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
/**
+ * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
+ * to a frame queue using one fqid.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
+
+/**
+ * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
+ * to different frame queue using a list of fqids.
+ * @d: the given DPIO service.
+ * @fqid: the given list of frame queue ids.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
+ u32 *fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc *ed;
+ int i, ret;
+
+ ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ d = service_select(d);
+ if (!d) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < nb; i++) {
+ qbman_eq_desc_clear(&ed[i]);
+ qbman_eq_desc_set_no_orp(&ed[i], 0);
+ qbman_eq_desc_set_fq(&ed[i], fqid[i]);
+ }
+
+ ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
+out:
+ kfree(ed);
+ return ret;
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
+
+/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
@@ -526,7 +598,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
/**
* dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
- * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
* @dev: the device to allow mapping/unmapping the DMAable region.
*
* The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
@@ -541,7 +613,7 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
struct dpaa2_io_store *ret;
size_t size;
- if (!max_frames || (max_frames > 16))
+ if (!max_frames || (max_frames > 32))
return NULL;
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index c66f5b73777c..804b8ba9bf5c 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -1,24 +1,18 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <asm/cacheflush.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <soc/fsl/dpaa2-global.h>
#include "qbman-portal.h"
-#define QMAN_REV_4000 0x04000000
-#define QMAN_REV_4100 0x04010000
-#define QMAN_REV_4101 0x04010001
-#define QMAN_REV_5000 0x05000000
-
-#define QMAN_REV_MASK 0xffff0000
-
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((u32)0x80)
@@ -28,6 +22,7 @@
/* CINH register offsets */
#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
#define QBMAN_CINH_SWP_EQAR 0x8c0
#define QBMAN_CINH_SWP_CR_RT 0x900
#define QBMAN_CINH_SWP_VDQCR_RT 0x940
@@ -51,6 +46,8 @@
#define QBMAN_CENA_SWP_CR 0x600
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
/* CENA register offsets in memory-backed mode */
#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
@@ -78,6 +75,12 @@
/* opaque token for static dequeues */
#define QMAN_SDQCR_TOKEN 0xbb
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
+#define EQ_DESC_SIZE_WITHOUT_FD 29
+#define EQ_DESC_SIZE_FD_START 32
+
enum qbman_sdqcr_dct {
qbman_sdqcr_dct_null = 0,
qbman_sdqcr_dct_prio_ics,
@@ -90,6 +93,82 @@ enum qbman_sdqcr_fc {
qbman_sdqcr_fc_up_to_3 = 1
};
+/* Internal Function declaration */
+static int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static int qbman_swp_pull_direct(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
+
+static int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Function pointers */
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+ = qbman_swp_enqueue_direct;
+
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_direct;
+
+int
+(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_desc_direct;
+
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
+ = qbman_swp_pull_direct;
+
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
+ = qbman_swp_dqrr_next_direct;
+
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+ = qbman_swp_release_direct;
+
/* Portal Access */
static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
@@ -146,6 +225,15 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
#define QMAN_RT_MODE 0x00000100
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ else
+ return (2 * ringsize) - (first - last);
+}
+
/**
* qbman_swp_init() - Create a functional object representing the given
* QBMan portal descriptor.
@@ -156,11 +244,16 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
*/
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
- struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
u32 reg;
+ u32 mask_size;
+ u32 eqcr_pi;
if (!p)
return NULL;
+
+ spin_lock_init(&p->access_spinlock);
+
p->desc = d;
p->mc.valid_bit = QB_VALID_BIT;
p->sdq = 0;
@@ -186,25 +279,38 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar;
- if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
- memset(p->addr_cena, 0, 64 * 1024);
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
- 1, /* Writes Non-cacheable */
- 0, /* EQCR_CI stashing threshold */
- 3, /* RPM: Valid bit mode, RCR in array mode */
- 2, /* DCM: Discrete consumption ack mode */
- 3, /* EPM: Valid bit mode, EQCR in array mode */
- 1, /* mem stashing drop enable == TRUE */
- 1, /* mem stashing priority == TRUE */
- 1, /* mem stashing enable == TRUE */
- 1, /* dequeue stashing priority == TRUE */
- 0, /* dequeue stashing enable == FALSE */
- 0); /* EQCR_CI stashing priority == FALSE */
- if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 2, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable enable */
+ 0); /* EQCR_CI stashing priority enable */
+ } else {
+ memset(p->addr_cena, 0, 64 * 1024);
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 1, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 0, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable */
+ 0); /* EQCR_CI stashing priority enable */
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+ }
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@@ -225,6 +331,30 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
* applied when dequeues from a specific channel are enabled.
*/
qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ p->eqcr.pi_ring_size = 32;
+ qbman_swp_enqueue_ptr =
+ qbman_swp_enqueue_mem_back;
+ qbman_swp_enqueue_multiple_ptr =
+ qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_desc_ptr =
+ qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
+ }
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
+ eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
+ & p->eqcr.pi_ci_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size;
+
return p;
}
@@ -378,6 +508,7 @@ enum qb_enqueue_commands {
#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
/**
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
@@ -453,8 +584,9 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
QMAN_RT_MODE);
}
+#define QB_RT_BIT ((u32)0x100)
/**
- * qbman_swp_enqueue() - Issue an enqueue command
+ * qbman_swp_enqueue_direct() - Issue an enqueue command
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: the frame descriptor to be enqueued
@@ -464,30 +596,349 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
*
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
- const struct dpaa2_fd *fd)
+static
+int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
{
- struct qbman_eq_desc *p;
- u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
- if (!EQAR_SUCCESS(eqar))
- return -EBUSY;
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qbman_swp_enqueue_mem_back() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static
+int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
- memcpy(&p->dca, &d->dca, 31);
- memcpy(&p->fd, fd, sizeof(*fd));
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- /* Set the verb byte, have to substitute in the valid-bit */
- dma_wmb();
- p->verb = d->verb | EQAR_VB(eqar);
- } else {
- p->verb = d->verb | EQAR_VB(eqar);
- dma_wmb();
- qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
+/**
+ * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)d;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ spin_lock(&s->access_spinlock);
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock(&s->access_spinlock);
+ return 0;
+ }
}
- return 0;
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (size_t)s->addr_cena;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+ spin_unlock(&s->access_spinlock);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ unsigned long irq_flags;
+
+ spin_lock(&s->access_spinlock);
+ local_irq_save(irq_flags);
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
+ s->eqcr.ci = *p & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ local_irq_restore(irq_flags);
+ spin_unlock(&s->access_spinlock);
+ return 0;
+ }
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ local_irq_restore(irq_flags);
+ spin_unlock(&s->access_spinlock);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
+ s->eqcr.ci = *p & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+
+ return num_enqueued;
}
/* Static (push) dequeue */
@@ -645,7 +1096,7 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
}
/**
- * qbman_swp_pull() - Issue the pull dequeue command
+ * qbman_swp_pull_direct() - Issue the pull dequeue command
* @s: the software portal object
* @d: the software portal descriptor which has been configured with
* the set of qbman_pull_desc_set_*() calls
@@ -653,7 +1104,8 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
-int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+static
+int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
{
struct qbman_pull_desc *p;
@@ -671,18 +1123,48 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
p->dq_src = d->dq_src;
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
+ dma_wmb();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- dma_wmb();
- /* Set the verb byte, have to substitute in the valid-bit */
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
- } else {
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
- dma_wmb();
- qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+ return 0;
+}
+
+/**
+ * qbman_swp_pull_mem_back() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static
+int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
}
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
return 0;
}
@@ -690,14 +1172,14 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
#define QMAN_DQRR_PI_MASK 0xf
/**
- * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
* @s: the software portal object
*
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
-const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
{
u32 verb;
u32 response_verb;
@@ -740,10 +1222,99 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
- else
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb;
/*
@@ -872,7 +1443,7 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
#define RAR_SUCCESS(rar) ((rar) & 0x100)
/**
- * qbman_swp_release() - Issue a buffer release command
+ * qbman_swp_release_direct() - Issue a buffer release command
* @s: the software portal object
* @d: the release descriptor
* @buffers: a pointer pointing to the buffer address to be released
@@ -880,8 +1451,9 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const u64 *buffers, unsigned int num_buffers)
+int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
{
int i;
struct qbman_release_desc *p;
@@ -895,28 +1467,59 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
return -EBUSY;
/* Start the release command */
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
- else
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
/* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- /*
- * Set the verb byte, have to substitute in the valid-bit
- * and the number of buffers.
- */
- dma_wmb();
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
- } else {
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
- dma_wmb();
- qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
- RAR_IDX(rar) * 4, QMAN_RT_MODE);
- }
+ /*
+ * Set the verb byte, have to substitute in the valid-bit
+ * and the number of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_release_mem_back() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
return 0;
}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index f3ec5d2044fb..c7c2225b7d91 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -9,6 +9,13 @@
#include <soc/fsl/dpaa2-fd.h>
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+
+#define QMAN_REV_MASK 0xffff0000
+
struct dpaa2_dq;
struct qbman_swp;
@@ -81,6 +88,10 @@ struct qbman_eq_desc {
u8 wae;
u8 rspid;
__le64 rsp_addr;
+};
+
+struct qbman_eq_desc_with_fd {
+ struct qbman_eq_desc desc;
u8 fd[32];
};
@@ -132,8 +143,48 @@ struct qbman_swp {
u8 dqrr_size;
int reset_bug; /* indicates dqrr reset workaround is needed */
} dqrr;
+
+ struct {
+ u32 pi;
+ u32 pi_vb;
+ u32 pi_ring_size;
+ u32 pi_ci_mask;
+ u32 ci;
+ int available;
+ u32 pend;
+ u32 no_pfdr;
+ } eqcr;
+
+ spinlock_t access_spinlock;
};
+/* Function pointers */
+extern
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+extern
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+extern
+int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+extern
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
+extern
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
+extern
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Functions */
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
@@ -158,9 +209,6 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
enum qbman_pull_type_e dct);
-int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
-
-const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
@@ -172,15 +220,11 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio);
-int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
- const struct dpaa2_fd *fd);
void qbman_release_desc_clear(struct qbman_release_desc *d);
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const u64 *buffers, unsigned int num_buffers);
int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
unsigned int num_buffers);
int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
@@ -194,6 +238,61 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
void *qbman_swp_mc_result(struct qbman_swp *p);
/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static inline int
+qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ return qbman_swp_enqueue_ptr(s, d, fd);
+}
+
+/**
+ * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
+}
+
+/**
* qbman_result_is_DQ() - check if the dequeue result is a dequeue response
* @dq: the dequeue result to be checked
*
@@ -504,4 +603,49 @@ int qbman_bp_query(struct qbman_swp *s, u16 bpid,
u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+static inline int qbman_swp_release(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ return qbman_swp_release_ptr(s, d, buffers, num_buffers);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static inline int qbman_swp_pull(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+{
+ return qbman_swp_pull_ptr(s, d);
+}
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ return qbman_swp_dqrr_next_ptr(s);
+}
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 96c2057d8d8e..447146861c2c 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -423,7 +423,7 @@ static void qe_upload_microcode(const void *base,
qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
- qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
+ qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
/*
@@ -525,7 +525,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
*/
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
- qe_firmware_info.extended_modes = firmware->extended_modes;
+ qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
sizeof(firmware->vtraps));
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index a81a1a79f1ca..75075591f630 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -46,7 +46,7 @@ int cpm_muram_init(void)
{
struct device_node *np;
struct resource r;
- u32 zero[OF_MAX_ADDR_CELLS] = {};
+ __be32 zero[OF_MAX_ADDR_CELLS] = {};
resource_size_t max = 0;
int i = 0;
int ret = 0;
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 0dd5bdb04a14..0390af999900 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -44,7 +44,7 @@
struct qe_ic {
/* Control registers offset */
- u32 __iomem *regs;
+ __be32 __iomem *regs;
/* The remapper for this QEIC */
struct irq_domain *irqhost;
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index 90157acc5ba6..d6c93970df4d 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -632,7 +632,7 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
{
int source;
u32 shift;
- struct qe_mux *qe_mux_reg;
+ struct qe_mux __iomem *qe_mux_reg;
qe_mux_reg = &qe_immr->qmx;
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
index 274d34449846..7e11be41ab62 100644
--- a/drivers/soc/fsl/qe/ucc_slow.c
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(ucc_slow_restart_tx);
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
- struct ucc_slow *us_regs;
+ struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
@@ -93,7 +93,7 @@ EXPORT_SYMBOL(ucc_slow_enable);
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
- struct ucc_slow *us_regs;
+ struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
@@ -122,7 +122,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
u32 i;
struct ucc_slow __iomem *us_regs;
u32 gumr;
- struct qe_bd *bd;
+ struct qe_bd __iomem *bd;
u32 id;
u32 command;
int ret = 0;
@@ -168,16 +168,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return -ENOMEM;
}
- uccs->saved_uccm = 0;
- uccs->p_rx_frame = 0;
us_regs = uccs->us_regs;
- uccs->p_ucce = (u16 *) & (us_regs->ucce);
- uccs->p_uccm = (u16 *) & (us_regs->uccm);
-#ifdef STATISTICS
- uccs->rx_frames = 0;
- uccs->tx_frames = 0;
- uccs->rx_discarded = 0;
-#endif /* STATISTICS */
+ uccs->p_ucce = &us_regs->ucce;
+ uccs->p_uccm = &us_regs->uccm;
/* Get PRAM base */
uccs->us_pram_offset =
@@ -231,24 +224,24 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
/* set bd status and length */
- qe_iowrite32be(0, (u32 *)bd);
+ qe_iowrite32be(0, (u32 __iomem *)bd);
bd++;
}
/* for last BD set Wrap bit */
qe_iowrite32be(0, &bd->buf);
- qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
+ qe_iowrite32be(T_W, (u32 __iomem *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
- qe_iowrite32be(0, (u32 *)bd);
+ qe_iowrite32be(0, (u32 __iomem *)bd);
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
- qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
+ qe_iowrite32be(R_W, (u32 __iomem *)bd);
qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
@@ -273,8 +266,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
- gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
- us_info->diag | us_info->mode;
+ gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
+ (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
if (us_info->tci)
gumr |= UCC_SLOW_GUMR_L_TCI;
if (us_info->rinv)
@@ -289,8 +282,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* if the data is in cachable memory, the 'global' */
/* in the function code should be set. */
- uccs->us_pram->tbmr = UCC_BMR_BO_BE;
- uccs->us_pram->rbmr = UCC_BMR_BO_BE;
+ qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
+ qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
/* rbase, tbase are offsets from MURAM base */
qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 0281ef9a1800..d515d2cc20ed 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -10,11 +10,21 @@ config IMX_GPCV2_PM_DOMAINS
config IMX_SCU_SOC
bool "i.MX System Controller Unit SoC info support"
- depends on IMX_SCU || COMPILE_TEST
+ depends on IMX_SCU
select SOC_BUS
help
If you say yes here you get support for the NXP i.MX System
Controller Unit SoC info module, it will provide the SoC info
like SoC family, ID and revision etc.
+config SOC_IMX8M
+ bool "i.MX8M SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ select SOC_BUS
+ help
+ If you say yes here you get support for the NXP i.MX8M family
+ support, it will provide the SoC info like SoC family,
+ ID and revision etc.
+
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index cf9ca42ff739..103e2c93c342 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
-obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
+obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 98b9d9a902ae..90a8b2c0676f 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
{
struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
- int i, ret, sw, sw2iso;
- u32 val;
+ int i, ret;
+ u32 val, req;
if (pd->supply) {
ret = regulator_enable(pd->supply);
@@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
0x1, 0x1);
- /* Read ISO and ISO2SW power up delays */
- regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
- sw = val & 0x3f;
- sw2iso = (val >> 8) & 0x3f;
-
/* Request GPC to power up domain */
- val = BIT(pd->cntr_pdn_bit + 1);
- regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
+ req = BIT(pd->cntr_pdn_bit + 1);
+ regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
- /* Wait ISO + ISO2SW IPG clock cycles */
- udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz));
+ /* Wait for the PGC to handle the request */
+ ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
+ 1, 50);
+ if (ret)
+ pr_err("powerup request on domain %s timed out\n", genpd->name);
+
+ /* Wait for reset to propagate through peripherals */
+ usleep_range(5, 10);
/* Disable reset clocks for all devices in the domain */
for (i = 0; i < pd->num_clks; i++)
@@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = {
.rd_table = &access_table,
.wr_table = &access_table,
.max_register = 0x2ac,
+ .fast_io = true,
};
static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index b0dffb06c05d..6cf8a7a412bd 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -14,6 +14,7 @@
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
#include <dt-bindings/power/imx7-power.h>
#include <dt-bindings/power/imx8mq-power.h>
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8m.c
index 719e1f189ebf..719e1f189ebf 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8m.c
diff --git a/drivers/soc/kendryte/Kconfig b/drivers/soc/kendryte/Kconfig
new file mode 100644
index 000000000000..49785b1b0217
--- /dev/null
+++ b/drivers/soc/kendryte/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if SOC_KENDRYTE
+
+config K210_SYSCTL
+ bool "Kendryte K210 system controller"
+ default y
+ depends on RISCV
+ help
+ Enables controlling the K210 various clocks and to enable
+ general purpose use of the extra 2MB of SRAM normally
+ reserved for the AI engine.
+
+endif
diff --git a/drivers/soc/kendryte/Makefile b/drivers/soc/kendryte/Makefile
new file mode 100644
index 000000000000..002d9ce95c0d
--- /dev/null
+++ b/drivers/soc/kendryte/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_K210_SYSCTL) += k210-sysctl.o
diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c
new file mode 100644
index 000000000000..4608fbca20e1
--- /dev/null
+++ b/drivers/soc/kendryte/k210-sysctl.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Christoph Hellwig.
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ */
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/bitfield.h>
+#include <asm/soc.h>
+
+#define K210_SYSCTL_CLK0_FREQ 26000000UL
+
+/* Registers base address */
+#define K210_SYSCTL_SYSCTL_BASE_ADDR 0x50440000ULL
+
+/* Registers */
+#define K210_SYSCTL_PLL0 0x08
+#define K210_SYSCTL_PLL1 0x0c
+/* clkr: 4bits, clkf1: 6bits, clkod: 4bits, bwadj: 4bits */
+#define PLL_RESET (1 << 20)
+#define PLL_PWR (1 << 21)
+#define PLL_INTFB (1 << 22)
+#define PLL_BYPASS (1 << 23)
+#define PLL_TEST (1 << 24)
+#define PLL_OUT_EN (1 << 25)
+#define PLL_TEST_EN (1 << 26)
+#define K210_SYSCTL_PLL_LOCK 0x18
+#define PLL0_LOCK1 (1 << 0)
+#define PLL0_LOCK2 (1 << 1)
+#define PLL0_SLIP_CLEAR (1 << 2)
+#define PLL0_TEST_CLK_OUT (1 << 3)
+#define PLL1_LOCK1 (1 << 8)
+#define PLL1_LOCK2 (1 << 9)
+#define PLL1_SLIP_CLEAR (1 << 10)
+#define PLL1_TEST_CLK_OUT (1 << 11)
+#define PLL2_LOCK1 (1 << 16)
+#define PLL2_LOCK2 (1 << 16)
+#define PLL2_SLIP_CLEAR (1 << 18)
+#define PLL2_TEST_CLK_OUT (1 << 19)
+#define K210_SYSCTL_CLKSEL0 0x20
+#define CLKSEL_ACLK (1 << 0)
+#define K210_SYSCTL_CLKEN_CENT 0x28
+#define CLKEN_CPU (1 << 0)
+#define CLKEN_SRAM0 (1 << 1)
+#define CLKEN_SRAM1 (1 << 2)
+#define CLKEN_APB0 (1 << 3)
+#define CLKEN_APB1 (1 << 4)
+#define CLKEN_APB2 (1 << 5)
+#define K210_SYSCTL_CLKEN_PERI 0x2c
+#define CLKEN_ROM (1 << 0)
+#define CLKEN_DMA (1 << 1)
+#define CLKEN_AI (1 << 2)
+#define CLKEN_DVP (1 << 3)
+#define CLKEN_FFT (1 << 4)
+#define CLKEN_GPIO (1 << 5)
+#define CLKEN_SPI0 (1 << 6)
+#define CLKEN_SPI1 (1 << 7)
+#define CLKEN_SPI2 (1 << 8)
+#define CLKEN_SPI3 (1 << 9)
+#define CLKEN_I2S0 (1 << 10)
+#define CLKEN_I2S1 (1 << 11)
+#define CLKEN_I2S2 (1 << 12)
+#define CLKEN_I2C0 (1 << 13)
+#define CLKEN_I2C1 (1 << 14)
+#define CLKEN_I2C2 (1 << 15)
+#define CLKEN_UART1 (1 << 16)
+#define CLKEN_UART2 (1 << 17)
+#define CLKEN_UART3 (1 << 18)
+#define CLKEN_AES (1 << 19)
+#define CLKEN_FPIO (1 << 20)
+#define CLKEN_TIMER0 (1 << 21)
+#define CLKEN_TIMER1 (1 << 22)
+#define CLKEN_TIMER2 (1 << 23)
+#define CLKEN_WDT0 (1 << 24)
+#define CLKEN_WDT1 (1 << 25)
+#define CLKEN_SHA (1 << 26)
+#define CLKEN_OTP (1 << 27)
+#define CLKEN_RTC (1 << 29)
+
+struct k210_sysctl {
+ void __iomem *regs;
+ struct clk_hw hw;
+};
+
+static void k210_set_bits(u32 val, void __iomem *reg)
+{
+ writel(readl(reg) | val, reg);
+}
+
+static void k210_clear_bits(u32 val, void __iomem *reg)
+{
+ writel(readl(reg) & ~val, reg);
+}
+
+static void k210_pll1_enable(void __iomem *regs)
+{
+ u32 val;
+
+ val = readl(regs + K210_SYSCTL_PLL1);
+ val &= ~GENMASK(19, 0); /* clkr1 = 0 */
+ val |= FIELD_PREP(GENMASK(9, 4), 0x3B); /* clkf1 = 59 */
+ val |= FIELD_PREP(GENMASK(13, 10), 0x3); /* clkod1 = 3 */
+ val |= FIELD_PREP(GENMASK(19, 14), 0x3B); /* bwadj1 = 59 */
+ writel(val, regs + K210_SYSCTL_PLL1);
+
+ k210_clear_bits(PLL_BYPASS, regs + K210_SYSCTL_PLL1);
+ k210_set_bits(PLL_PWR, regs + K210_SYSCTL_PLL1);
+
+ /*
+ * Reset the pll. The magic NOPs come from the Kendryte reference SDK.
+ */
+ k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+ k210_set_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+ nop();
+ nop();
+ k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+
+ for (;;) {
+ val = readl(regs + K210_SYSCTL_PLL_LOCK);
+ if (val & PLL1_LOCK2)
+ break;
+ writel(val | PLL1_SLIP_CLEAR, regs + K210_SYSCTL_PLL_LOCK);
+ }
+
+ k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL1);
+}
+
+static unsigned long k210_sysctl_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_sysctl *s = container_of(hw, struct k210_sysctl, hw);
+ u32 clksel0, pll0;
+ u64 pll0_freq, clkr0, clkf0, clkod0;
+
+ /*
+ * If the clock selector is not set, use the base frequency.
+ * Otherwise, use PLL0 frequency with a frequency divisor.
+ */
+ clksel0 = readl(s->regs + K210_SYSCTL_CLKSEL0);
+ if (!(clksel0 & CLKSEL_ACLK))
+ return K210_SYSCTL_CLK0_FREQ;
+
+ /*
+ * Get PLL0 frequency:
+ * freq = base frequency * clkf0 / (clkr0 * clkod0)
+ */
+ pll0 = readl(s->regs + K210_SYSCTL_PLL0);
+ clkr0 = 1 + FIELD_GET(GENMASK(3, 0), pll0);
+ clkf0 = 1 + FIELD_GET(GENMASK(9, 4), pll0);
+ clkod0 = 1 + FIELD_GET(GENMASK(13, 10), pll0);
+ pll0_freq = clkf0 * K210_SYSCTL_CLK0_FREQ / (clkr0 * clkod0);
+
+ /* Get the frequency divisor from the clock selector */
+ return pll0_freq / (2ULL << FIELD_GET(0x00000006, clksel0));
+}
+
+static const struct clk_ops k210_sysctl_clk_ops = {
+ .recalc_rate = k210_sysctl_clk_recalc_rate,
+};
+
+static const struct clk_init_data k210_clk_init_data = {
+ .name = "k210-sysctl-pll1",
+ .ops = &k210_sysctl_clk_ops,
+};
+
+static int k210_sysctl_probe(struct platform_device *pdev)
+{
+ struct k210_sysctl *s;
+ int error;
+
+ pr_info("Kendryte K210 SoC sysctl\n");
+
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->regs = devm_ioremap_resource(&pdev->dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(s->regs))
+ return PTR_ERR(s->regs);
+
+ s->hw.init = &k210_clk_init_data;
+ error = devm_clk_hw_register(&pdev->dev, &s->hw);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register clk");
+ return error;
+ }
+
+ error = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
+ &s->hw);
+ if (error) {
+ dev_err(&pdev->dev, "adding clk provider failed\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id k210_sysctl_of_match[] = {
+ { .compatible = "kendryte,k210-sysctl", },
+ {}
+};
+
+static struct platform_driver k210_sysctl_driver = {
+ .driver = {
+ .name = "k210-sysctl",
+ .of_match_table = k210_sysctl_of_match,
+ },
+ .probe = k210_sysctl_probe,
+};
+
+static int __init k210_sysctl_init(void)
+{
+ return platform_driver_register(&k210_sysctl_driver);
+}
+core_initcall(k210_sysctl_init);
+
+/*
+ * This needs to be called very early during initialization, given that
+ * PLL1 needs to be enabled to be able to use all SRAM.
+ */
+static void __init k210_soc_early_init(const void *fdt)
+{
+ void __iomem *regs;
+
+ regs = ioremap(K210_SYSCTL_SYSCTL_BASE_ADDR, 0x1000);
+ if (!regs)
+ panic("K210 sysctl ioremap");
+
+ /* Enable PLL1 to make the KPU SRAM useable */
+ k210_pll1_enable(regs);
+
+ k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL0);
+
+ k210_set_bits(CLKEN_CPU | CLKEN_SRAM0 | CLKEN_SRAM1,
+ regs + K210_SYSCTL_CLKEN_CENT);
+ k210_set_bits(CLKEN_ROM | CLKEN_TIMER0 | CLKEN_RTC,
+ regs + K210_SYSCTL_CLKEN_PERI);
+
+ k210_set_bits(CLKSEL_ACLK, regs + K210_SYSCTL_CLKSEL0);
+
+ iounmap(regs);
+}
+SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init);
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index de20e6cba83b..87ee9f767b7a 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -78,6 +78,7 @@ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
client->pkt_cnt = 0;
client->client.dev = dev;
client->client.tx_block = false;
+ client->client.knows_txdone = true;
client->chan = mbox_request_channel(&client->client, index);
if (IS_ERR(client->chan)) {
@@ -350,7 +351,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
spin_unlock_irqrestore(&client->lock, flags);
}
- mbox_send_message(client->chan, pkt);
+ err = mbox_send_message(client->chan, pkt);
+ if (err < 0)
+ return err;
/* We can send next packet immediately, so just call txdone. */
mbox_client_txdone(client->chan, 0);
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index c725315cf6a8..5d34e8b9c988 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -111,6 +111,28 @@ enum dew_regs {
PWRAP_RG_SPI_CON13,
PWRAP_SPISLV_KEY,
+ /* MT6359 only regs */
+ PWRAP_DEW_CRC_SWRST,
+ PWRAP_DEW_RG_EN_RECORD,
+ PWRAP_DEW_RECORD_CMD0,
+ PWRAP_DEW_RECORD_CMD1,
+ PWRAP_DEW_RECORD_CMD2,
+ PWRAP_DEW_RECORD_CMD3,
+ PWRAP_DEW_RECORD_CMD4,
+ PWRAP_DEW_RECORD_CMD5,
+ PWRAP_DEW_RECORD_WDATA0,
+ PWRAP_DEW_RECORD_WDATA1,
+ PWRAP_DEW_RECORD_WDATA2,
+ PWRAP_DEW_RECORD_WDATA3,
+ PWRAP_DEW_RECORD_WDATA4,
+ PWRAP_DEW_RECORD_WDATA5,
+ PWRAP_DEW_RG_ADDR_TARGET,
+ PWRAP_DEW_RG_ADDR_MASK,
+ PWRAP_DEW_RG_WDATA_TARGET,
+ PWRAP_DEW_RG_WDATA_MASK,
+ PWRAP_DEW_RG_SPI_RECORD_CLR,
+ PWRAP_DEW_RG_CMD_ALERT_CLR,
+
/* MT6397 only regs */
PWRAP_DEW_EVENT_OUT_EN,
PWRAP_DEW_EVENT_SRC_EN,
@@ -197,6 +219,42 @@ static const u32 mt6358_regs[] = {
[PWRAP_SPISLV_KEY] = 0x044a,
};
+static const u32 mt6359_regs[] = {
+ [PWRAP_DEW_RG_EN_RECORD] = 0x040a,
+ [PWRAP_DEW_DIO_EN] = 0x040c,
+ [PWRAP_DEW_READ_TEST] = 0x040e,
+ [PWRAP_DEW_WRITE_TEST] = 0x0410,
+ [PWRAP_DEW_CRC_SWRST] = 0x0412,
+ [PWRAP_DEW_CRC_EN] = 0x0414,
+ [PWRAP_DEW_CRC_VAL] = 0x0416,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0418,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x041a,
+ [PWRAP_DEW_CIPHER_EN] = 0x041c,
+ [PWRAP_DEW_CIPHER_RDY] = 0x041e,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0420,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x0422,
+ [PWRAP_DEW_RDDMY_NO] = 0x0424,
+ [PWRAP_DEW_RECORD_CMD0] = 0x0428,
+ [PWRAP_DEW_RECORD_CMD1] = 0x042a,
+ [PWRAP_DEW_RECORD_CMD2] = 0x042c,
+ [PWRAP_DEW_RECORD_CMD3] = 0x042e,
+ [PWRAP_DEW_RECORD_CMD4] = 0x0430,
+ [PWRAP_DEW_RECORD_CMD5] = 0x0432,
+ [PWRAP_DEW_RECORD_WDATA0] = 0x0434,
+ [PWRAP_DEW_RECORD_WDATA1] = 0x0436,
+ [PWRAP_DEW_RECORD_WDATA2] = 0x0438,
+ [PWRAP_DEW_RECORD_WDATA3] = 0x043a,
+ [PWRAP_DEW_RECORD_WDATA4] = 0x043c,
+ [PWRAP_DEW_RECORD_WDATA5] = 0x043e,
+ [PWRAP_DEW_RG_ADDR_TARGET] = 0x0440,
+ [PWRAP_DEW_RG_ADDR_MASK] = 0x0442,
+ [PWRAP_DEW_RG_WDATA_TARGET] = 0x0444,
+ [PWRAP_DEW_RG_WDATA_MASK] = 0x0446,
+ [PWRAP_DEW_RG_SPI_RECORD_CLR] = 0x0448,
+ [PWRAP_DEW_RG_CMD_ALERT_CLR] = 0x0448,
+ [PWRAP_SPISLV_KEY] = 0x044a,
+};
+
static const u32 mt6397_regs[] = {
[PWRAP_DEW_BASE] = 0xbc00,
[PWRAP_DEW_EVENT_OUT_EN] = 0xbc00,
@@ -497,6 +555,45 @@ static int mt6765_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x1E0,
};
+static int mt6779_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_RDDMY] = 0x20,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_CSLEXT_WRITE] = 0x2C,
+ [PWRAP_CSLEXT_READ] = 0x30,
+ [PWRAP_EXT_CK_WRITE] = 0x34,
+ [PWRAP_STAUPD_CTRL] = 0x3C,
+ [PWRAP_STAUPD_GRPEN] = 0x40,
+ [PWRAP_EINT_STA0_ADR] = 0x44,
+ [PWRAP_HARB_HPRIO] = 0x68,
+ [PWRAP_HIPRIO_ARB_EN] = 0x6C,
+ [PWRAP_MAN_EN] = 0x7C,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_WACS0_EN] = 0x8C,
+ [PWRAP_INIT_DONE0] = 0x90,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_WACS2_EN] = 0x9C,
+ [PWRAP_INIT_DONE1] = 0x98,
+ [PWRAP_INIT_DONE2] = 0xA0,
+ [PWRAP_INT_EN] = 0xBC,
+ [PWRAP_INT_FLG_RAW] = 0xC0,
+ [PWRAP_INT_FLG] = 0xC4,
+ [PWRAP_INT_CLR] = 0xC8,
+ [PWRAP_INT1_EN] = 0xCC,
+ [PWRAP_INT1_FLG] = 0xD4,
+ [PWRAP_INT1_CLR] = 0xD8,
+ [PWRAP_TIMER_EN] = 0xF0,
+ [PWRAP_WDT_UNIT] = 0xF8,
+ [PWRAP_WDT_SRC_EN] = 0xFC,
+ [PWRAP_WDT_SRC_EN_1] = 0x100,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+};
+
static int mt6797_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -938,6 +1035,7 @@ enum pmic_type {
PMIC_MT6351,
PMIC_MT6357,
PMIC_MT6358,
+ PMIC_MT6359,
PMIC_MT6380,
PMIC_MT6397,
};
@@ -945,6 +1043,7 @@ enum pmic_type {
enum pwrap_type {
PWRAP_MT2701,
PWRAP_MT6765,
+ PWRAP_MT6779,
PWRAP_MT6797,
PWRAP_MT7622,
PWRAP_MT8135,
@@ -1377,6 +1476,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
break;
case PWRAP_MT2701:
case PWRAP_MT6765:
+ case PWRAP_MT6779:
case PWRAP_MT6797:
case PWRAP_MT8173:
case PWRAP_MT8516:
@@ -1711,6 +1811,15 @@ static const struct pwrap_slv_type pmic_mt6358 = {
.pwrap_write = pwrap_write16,
};
+static const struct pwrap_slv_type pmic_mt6359 = {
+ .dew_regs = mt6359_regs,
+ .type = PMIC_MT6359,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_DUALIO,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
static const struct pwrap_slv_type pmic_mt6380 = {
.dew_regs = NULL,
.type = PMIC_MT6380,
@@ -1744,6 +1853,9 @@ static const struct of_device_id of_slave_match_tbl[] = {
.compatible = "mediatek,mt6358",
.data = &pmic_mt6358,
}, {
+ .compatible = "mediatek,mt6359",
+ .data = &pmic_mt6359,
+ }, {
/* The MT6380 PMIC only implements a regulator, so we bind it
* directly instead of using a MFD.
*/
@@ -1783,6 +1895,19 @@ static const struct pmic_wrapper_type pwrap_mt6765 = {
.init_soc_specific = NULL,
};
+static const struct pmic_wrapper_type pwrap_mt6779 = {
+ .regs = mt6779_regs,
+ .type = PWRAP_MT6779,
+ .arb_en_all = 0xfbb7f,
+ .int_en_all = 0xfffffffe,
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct pmic_wrapper_type pwrap_mt6797 = {
.regs = mt6797_regs,
.type = PWRAP_MT6797,
@@ -1868,6 +1993,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt6765-pwrap",
.data = &pwrap_mt6765,
}, {
+ .compatible = "mediatek,mt6779-pwrap",
+ .data = &pwrap_mt6779,
+ }, {
.compatible = "mediatek,mt6797-pwrap",
.data = &pwrap_mt6797,
}, {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index d0a73e76d563..bf42a17a45de 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -76,6 +76,10 @@ config QCOM_OCMEM
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
+config QCOM_PDR_HELPERS
+ tristate
+ select QCOM_QMI_HELPERS
+
config QCOM_PM
bool "Qualcomm Power Management"
depends on ARCH_QCOM && !ARM64
@@ -88,7 +92,6 @@ config QCOM_PM
config QCOM_QMI_HELPERS
tristate
- depends on ARCH_QCOM || COMPILE_TEST
depends on NET
config QCOM_RMTFS_MEM
@@ -197,6 +200,8 @@ config QCOM_APR
tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
depends on ARCH_QCOM || COMPILE_TEST
depends on RPMSG
+ depends on NET
+ select QCOM_PDR_HELPERS
help
Enable APR IPC protocol support between
application processor and QDSP6. APR is
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9fb35c8a495e..5d6b83dc58e8 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
+obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 4fcc32420c47..1f35b097c635 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/of_device.h>
#include <linux/soc/qcom/apr.h>
+#include <linux/soc/qcom/pdr.h>
#include <linux/rpmsg.h>
#include <linux/of.h>
@@ -21,6 +22,7 @@ struct apr {
spinlock_t rx_lock;
struct idr svcs_idr;
int dest_domain_id;
+ struct pdr_handle *pdr;
struct workqueue_struct *rxwq;
struct work_struct rx_work;
struct list_head rx_list;
@@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np,
id->svc_id + 1, GFP_ATOMIC);
spin_unlock(&apr->svcs_lock);
+ of_property_read_string_index(np, "qcom,protection-domain",
+ 1, &adev->service_path);
+
dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
ret = device_register(&adev->dev);
@@ -300,14 +305,75 @@ static int apr_add_device(struct device *dev, struct device_node *np,
return ret;
}
-static void of_register_apr_devices(struct device *dev)
+static int of_apr_add_pd_lookups(struct device *dev)
+{
+ const char *service_name, *service_path;
+ struct apr *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+ struct pdr_service *pds;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 0, &service_name);
+ if (ret < 0)
+ continue;
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (ret < 0) {
+ dev_err(dev, "pdr service path missing: %d\n", ret);
+ return ret;
+ }
+
+ pds = pdr_add_lookup(apr->pdr, service_name, service_path);
+ if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
+ dev_err(dev, "pdr add lookup failed: %d\n", ret);
+ return PTR_ERR(pds);
+ }
+ }
+
+ return 0;
+}
+
+static void of_register_apr_devices(struct device *dev, const char *svc_path)
{
struct apr *apr = dev_get_drvdata(dev);
struct device_node *node;
+ const char *service_path;
+ int ret;
for_each_child_of_node(dev->of_node, node) {
struct apr_device_id id = { {0} };
+ /*
+ * This function is called with svc_path NULL during
+ * apr_probe(), in which case we register any apr devices
+ * without a qcom,protection-domain specified.
+ *
+ * Then as the protection domains becomes available
+ * (if applicable) this function is again called, but with
+ * svc_path representing the service becoming available. In
+ * this case we register any apr devices with a matching
+ * qcom,protection-domain.
+ */
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (svc_path) {
+ /* skip APR services that are PD independent */
+ if (ret)
+ continue;
+
+ /* skip APR services whose PD paths don't match */
+ if (strcmp(service_path, svc_path))
+ continue;
+ } else {
+ /* skip APR services whose PD lookups are registered */
+ if (ret == 0)
+ continue;
+ }
+
if (of_property_read_u32(node, "reg", &id.svc_id))
continue;
@@ -318,6 +384,34 @@ static void of_register_apr_devices(struct device *dev)
}
}
+static int apr_remove_device(struct device *dev, void *svc_path)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ if (svc_path && adev->service_path) {
+ if (!strcmp(adev->service_path, (char *)svc_path))
+ device_unregister(&adev->dev);
+ } else {
+ device_unregister(&adev->dev);
+ }
+
+ return 0;
+}
+
+static void apr_pd_status(int state, char *svc_path, void *priv)
+{
+ struct apr *apr = (struct apr *)priv;
+
+ switch (state) {
+ case SERVREG_SERVICE_STATE_UP:
+ of_register_apr_devices(apr->dev, svc_path);
+ break;
+ case SERVREG_SERVICE_STATE_DOWN:
+ device_for_each_child(apr->dev, svc_path, apr_remove_device);
+ break;
+ }
+}
+
static int apr_probe(struct rpmsg_device *rpdev)
{
struct device *dev = &rpdev->dev;
@@ -343,28 +437,39 @@ static int apr_probe(struct rpmsg_device *rpdev)
return -ENOMEM;
}
INIT_WORK(&apr->rx_work, apr_rxwq);
+
+ apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
+ if (IS_ERR(apr->pdr)) {
+ dev_err(dev, "Failed to init PDR handle\n");
+ ret = PTR_ERR(apr->pdr);
+ goto destroy_wq;
+ }
+
INIT_LIST_HEAD(&apr->rx_list);
spin_lock_init(&apr->rx_lock);
spin_lock_init(&apr->svcs_lock);
idr_init(&apr->svcs_idr);
- of_register_apr_devices(dev);
-
- return 0;
-}
-static int apr_remove_device(struct device *dev, void *null)
-{
- struct apr_device *adev = to_apr_device(dev);
+ ret = of_apr_add_pd_lookups(dev);
+ if (ret)
+ goto handle_release;
- device_unregister(&adev->dev);
+ of_register_apr_devices(dev, NULL);
return 0;
+
+handle_release:
+ pdr_handle_release(apr->pdr);
+destroy_wq:
+ destroy_workqueue(apr->rxwq);
+ return ret;
}
static void apr_remove(struct rpmsg_device *rpdev)
{
struct apr *apr = dev_get_drvdata(&rpdev->dev);
+ pdr_handle_release(apr->pdr);
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
flush_workqueue(apr->rxwq);
destroy_workqueue(apr->rxwq);
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
new file mode 100644
index 000000000000..17ad3b8698e1
--- /dev/null
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "pdr_internal.h"
+
+struct pdr_service {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ char service_path[SERVREG_NAME_LENGTH + 1];
+
+ struct sockaddr_qrtr addr;
+
+ unsigned int instance;
+ unsigned int service;
+ u8 service_data_valid;
+ u32 service_data;
+ int state;
+
+ bool need_notifier_register;
+ bool need_notifier_remove;
+ bool need_locator_lookup;
+ bool service_connected;
+
+ struct list_head node;
+};
+
+struct pdr_handle {
+ struct qmi_handle locator_hdl;
+ struct qmi_handle notifier_hdl;
+
+ struct sockaddr_qrtr locator_addr;
+
+ struct list_head lookups;
+ struct list_head indack_list;
+
+ /* control access to pdr lookup/indack lists */
+ struct mutex list_lock;
+
+ /* serialize pd status invocation */
+ struct mutex status_lock;
+
+ /* control access to the locator state */
+ struct mutex lock;
+
+ bool locator_init_complete;
+
+ struct work_struct locator_work;
+ struct work_struct notifier_work;
+ struct work_struct indack_work;
+
+ struct workqueue_struct *notifier_wq;
+ struct workqueue_struct *indack_wq;
+
+ void (*status)(int state, char *service_path, void *priv);
+ void *priv;
+};
+
+struct pdr_list_node {
+ enum servreg_service_state curr_state;
+ u16 transaction_id;
+ struct pdr_service *pds;
+ struct list_head node;
+};
+
+static int pdr_locator_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+ struct pdr_service *pds;
+
+ /* Create a local client port for QMI communication */
+ pdr->locator_addr.sq_family = AF_QIPCRTR;
+ pdr->locator_addr.sq_node = svc->node;
+ pdr->locator_addr.sq_port = svc->port;
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = true;
+ mutex_unlock(&pdr->lock);
+
+ /* Service pending lookup requests */
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->need_locator_lookup)
+ schedule_work(&pdr->locator_work);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_locator_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = false;
+ mutex_unlock(&pdr->lock);
+
+ pdr->locator_addr.sq_node = 0;
+ pdr->locator_addr.sq_port = 0;
+}
+
+static struct qmi_ops pdr_locator_ops = {
+ .new_server = pdr_locator_new_server,
+ .del_server = pdr_locator_del_server,
+};
+
+static int pdr_register_listener(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ bool enable)
+{
+ struct servreg_register_listener_resp resp;
+ struct servreg_register_listener_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_register_listener_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.enable = enable;
+ strcpy(req.service_path, pds->service_path);
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_REGISTER_LISTENER_REQ,
+ SERVREG_REGISTER_LISTENER_REQ_LEN,
+ servreg_register_listener_req_ei,
+ &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s register listener txn wait failed: %d\n",
+ pds->service_path, ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s register listener failed: 0x%x\n",
+ pds->service_path, resp.resp.error);
+ return ret;
+ }
+
+ if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX)
+ pr_err("PDR: %s notification state invalid: 0x%x\n",
+ pds->service_path, resp.curr_state);
+
+ pds->state = resp.curr_state;
+
+ return 0;
+}
+
+static void pdr_notifier_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ notifier_work);
+ struct pdr_service *pds;
+ int ret;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service_connected) {
+ if (!pds->need_notifier_register)
+ continue;
+
+ pds->need_notifier_register = false;
+ ret = pdr_register_listener(pdr, pds, true);
+ if (ret < 0)
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ } else {
+ if (!pds->need_notifier_remove)
+ continue;
+
+ pds->need_notifier_remove = false;
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static int pdr_notifier_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = true;
+ pds->need_notifier_register = true;
+ pds->addr.sq_family = AF_QIPCRTR;
+ pds->addr.sq_node = svc->node;
+ pds->addr.sq_port = svc->port;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_notifier_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = false;
+ pds->need_notifier_remove = true;
+ pds->addr.sq_node = 0;
+ pds->addr.sq_port = 0;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static struct qmi_ops pdr_notifier_ops = {
+ .new_server = pdr_notifier_new_server,
+ .del_server = pdr_notifier_del_server,
+};
+
+static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
+ u16 tid)
+{
+ struct servreg_set_ack_resp resp;
+ struct servreg_set_ack_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.transaction_id = tid;
+ strcpy(req.service_path, pds->service_path);
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_SET_ACK_REQ,
+ SERVREG_SET_ACK_REQ_LEN,
+ servreg_set_ack_req_ei,
+ &req);
+
+ /* Skip waiting for response */
+ qmi_txn_cancel(&txn);
+ return ret;
+}
+
+static void pdr_indack_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ indack_work);
+ struct pdr_list_node *ind, *tmp;
+ struct pdr_service *pds;
+
+ list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
+ pds = ind->pds;
+ pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+
+ mutex_lock(&pdr->status_lock);
+ pds->state = ind->curr_state;
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+
+ mutex_lock(&pdr->list_lock);
+ list_del(&ind->node);
+ mutex_unlock(&pdr->list_lock);
+
+ kfree(ind);
+ }
+}
+
+static void pdr_indication_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ const struct servreg_state_updated_ind *ind_msg = data;
+ struct pdr_list_node *ind;
+ struct pdr_service *pds;
+ bool found = false;
+
+ if (!ind_msg || !ind_msg->service_path[0] ||
+ strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (strcmp(pds->service_path, ind_msg->service_path))
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!found)
+ return;
+
+ pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
+ ind_msg->service_path, ind_msg->curr_state,
+ ind_msg->transaction_id);
+
+ ind = kzalloc(sizeof(*ind), GFP_KERNEL);
+ if (!ind)
+ return;
+
+ ind->transaction_id = ind_msg->transaction_id;
+ ind->curr_state = ind_msg->curr_state;
+ ind->pds = pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_add_tail(&ind->node, &pdr->indack_list);
+ mutex_unlock(&pdr->list_lock);
+
+ queue_work(pdr->indack_wq, &pdr->indack_work);
+}
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SERVREG_STATE_UPDATED_IND_ID,
+ .ei = servreg_state_updated_ind_ei,
+ .decoded_size = sizeof(struct servreg_state_updated_ind),
+ .fn = pdr_indication_cb,
+ },
+ {}
+};
+
+static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+ struct servreg_get_domain_list_resp *resp,
+ struct pdr_handle *pdr)
+{
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->locator_hdl, &txn,
+ servreg_get_domain_list_resp_ei, resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->locator_hdl,
+ &pdr->locator_addr,
+ &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ servreg_get_domain_list_req_ei,
+ req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s get domain list txn wait failed: %d\n",
+ req->service_name, ret);
+ return ret;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s get domain list failed: 0x%x\n",
+ req->service_name, resp->resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_get_domain_list_resp *resp;
+ struct servreg_get_domain_list_req req;
+ struct servreg_location_entry *entry;
+ int domains_read = 0;
+ int ret, i;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Prepare req message */
+ strcpy(req.service_name, pds->service_name);
+ req.domain_offset_valid = true;
+ req.domain_offset = 0;
+
+ do {
+ req.domain_offset = domains_read;
+ ret = pdr_get_domain_list(&req, resp, pdr);
+ if (ret < 0)
+ goto out;
+
+ for (i = domains_read; i < resp->domain_list_len; i++) {
+ entry = &resp->domain_list[i];
+
+ if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+ continue;
+
+ if (!strcmp(entry->name, pds->service_path)) {
+ pds->service_data_valid = entry->service_data_valid;
+ pds->service_data = entry->service_data;
+ pds->instance = entry->instance;
+ goto out;
+ }
+ }
+
+ /* Update ret to indicate that the service is not yet found */
+ ret = -ENXIO;
+
+ /* Always read total_domains from the response msg */
+ if (resp->domain_list_len > resp->total_domains)
+ resp->domain_list_len = resp->total_domains;
+
+ domains_read += resp->domain_list_len;
+ } while (domains_read < resp->total_domains);
+out:
+ kfree(resp);
+ return ret;
+}
+
+static void pdr_notify_lookup_failure(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ int err)
+{
+ pr_err("PDR: service lookup for %s failed: %d\n",
+ pds->service_name, err);
+
+ if (err == -ENXIO)
+ return;
+
+ list_del(&pds->node);
+ pds->state = SERVREG_LOCATOR_ERR;
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ kfree(pds);
+}
+
+static void pdr_locator_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ locator_work);
+ struct pdr_service *pds, *tmp;
+ int ret = 0;
+
+ /* Bail out early if the SERVREG LOCATOR QMI service is not up */
+ mutex_lock(&pdr->lock);
+ if (!pdr->locator_init_complete) {
+ mutex_unlock(&pdr->lock);
+ pr_debug("PDR: SERVICE LOCATOR service not available\n");
+ return;
+ }
+ mutex_unlock(&pdr->lock);
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ if (!pds->need_locator_lookup)
+ continue;
+
+ ret = pdr_locate_service(pdr, pds);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1,
+ pds->instance);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ pds->need_locator_lookup = false;
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+/**
+ * pdr_add_lookup() - register a tracking request for a PD
+ * @pdr: PDR client handle
+ * @service_name: service name of the tracking request
+ * @service_path: service path of the tracking request
+ *
+ * Registering a pdr lookup allows for tracking the life cycle of the PD.
+ *
+ * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is
+ * returned if a lookup is already in progress for the given service path.
+ */
+struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
+ const char *service_name,
+ const char *service_path)
+{
+ struct pdr_service *pds, *tmp;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return ERR_PTR(-EINVAL);
+
+ if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH ||
+ !service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
+ return ERR_PTR(-EINVAL);
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return ERR_PTR(-ENOMEM);
+
+ pds->service = SERVREG_NOTIFIER_SERVICE;
+ strcpy(pds->service_name, service_name);
+ strcpy(pds->service_path, service_path);
+ pds->need_locator_lookup = true;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (strcmp(tmp->service_path, service_path))
+ continue;
+
+ mutex_unlock(&pdr->list_lock);
+ ret = -EALREADY;
+ goto err;
+ }
+
+ list_add(&pds->node, &pdr->lookups);
+ mutex_unlock(&pdr->list_lock);
+
+ schedule_work(&pdr->locator_work);
+
+ return pds;
+err:
+ kfree(pds);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_add_lookup);
+
+/**
+ * pdr_restart_pd() - restart PD
+ * @pdr: PDR client handle
+ * @pds: PD service handle
+ *
+ * Restarts the PD tracked by the PDR client handle for a given service path.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_restart_pd_resp resp;
+ struct servreg_restart_pd_req req;
+ struct sockaddr_qrtr addr;
+ struct pdr_service *tmp;
+ struct qmi_txn txn;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds))
+ return -EINVAL;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (tmp != pds)
+ continue;
+
+ if (!pds->service_connected)
+ break;
+
+ /* Prepare req message */
+ strcpy(req.service_path, pds->service_path);
+ addr = pds->addr;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!req.service_path[0])
+ return -EINVAL;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_restart_pd_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &addr,
+ &txn, SERVREG_RESTART_PD_REQ,
+ SERVREG_RESTART_PD_REQ_MAX_LEN,
+ servreg_restart_pd_req_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s PD restart txn wait failed: %d\n",
+ req.service_path, ret);
+ return ret;
+ }
+
+ /* Check response if PDR is disabled */
+ if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+ resp.resp.error == QMI_ERR_DISABLED_V01) {
+ pr_err("PDR: %s PD restart is disabled: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EOPNOTSUPP;
+ }
+
+ /* Check the response for other error case*/
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s request for PD restart failed: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pdr_restart_pd);
+
+/**
+ * pdr_handle_alloc() - initialize the PDR client handle
+ * @status: function to be called on PD state change
+ * @priv: handle for client's use
+ *
+ * Initializes the PDR client handle to allow for tracking/restart of PDs.
+ *
+ * Return: pdr_handle object on success, ERR_PTR on failure.
+ */
+struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
+ char *service_path,
+ void *priv), void *priv)
+{
+ struct pdr_handle *pdr;
+ int ret;
+
+ if (!status)
+ return ERR_PTR(-EINVAL);
+
+ pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
+ if (!pdr)
+ return ERR_PTR(-ENOMEM);
+
+ pdr->status = status;
+ pdr->priv = priv;
+
+ mutex_init(&pdr->status_lock);
+ mutex_init(&pdr->list_lock);
+ mutex_init(&pdr->lock);
+
+ INIT_LIST_HEAD(&pdr->lookups);
+ INIT_LIST_HEAD(&pdr->indack_list);
+
+ INIT_WORK(&pdr->locator_work, pdr_locator_work);
+ INIT_WORK(&pdr->notifier_work, pdr_notifier_work);
+ INIT_WORK(&pdr->indack_work, pdr_indack_work);
+
+ pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
+ if (!pdr->notifier_wq) {
+ ret = -ENOMEM;
+ goto free_pdr_handle;
+ }
+
+ pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
+ if (!pdr->indack_wq) {
+ ret = -ENOMEM;
+ goto destroy_notifier;
+ }
+
+ ret = qmi_handle_init(&pdr->locator_hdl,
+ SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+ &pdr_locator_ops, NULL);
+ if (ret < 0)
+ goto destroy_indack;
+
+ ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ ret = qmi_handle_init(&pdr->notifier_hdl,
+ SERVREG_STATE_UPDATED_IND_MAX_LEN,
+ &pdr_notifier_ops,
+ qmi_indication_handler);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ return pdr;
+
+release_qmi_handle:
+ qmi_handle_release(&pdr->locator_hdl);
+destroy_indack:
+ destroy_workqueue(pdr->indack_wq);
+destroy_notifier:
+ destroy_workqueue(pdr->notifier_wq);
+free_pdr_handle:
+ kfree(pdr);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_handle_alloc);
+
+/**
+ * pdr_handle_release() - release the PDR client handle
+ * @pdr: PDR client handle
+ *
+ * Cleans up pending tracking requests and releases the underlying qmi handles.
+ */
+void pdr_handle_release(struct pdr_handle *pdr)
+{
+ struct pdr_service *pds, *tmp;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ list_del(&pds->node);
+ kfree(pds);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ cancel_work_sync(&pdr->locator_work);
+ cancel_work_sync(&pdr->notifier_work);
+ cancel_work_sync(&pdr->indack_work);
+
+ destroy_workqueue(pdr->notifier_wq);
+ destroy_workqueue(pdr->indack_wq);
+
+ qmi_handle_release(&pdr->locator_hdl);
+ qmi_handle_release(&pdr->notifier_hdl);
+
+ kfree(pdr);
+}
+EXPORT_SYMBOL(pdr_handle_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
new file mode 100644
index 000000000000..15b5002e4127
--- /dev/null
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER_INTERNAL__
+#define __QCOM_PDR_HELPER_INTERNAL__
+
+#include <linux/soc/qcom/pdr.h>
+
+#define SERVREG_LOCATOR_SERVICE 0x40
+#define SERVREG_NOTIFIER_SERVICE 0x42
+
+#define SERVREG_REGISTER_LISTENER_REQ 0x20
+#define SERVREG_GET_DOMAIN_LIST_REQ 0x21
+#define SERVREG_STATE_UPDATED_IND_ID 0x22
+#define SERVREG_SET_ACK_REQ 0x23
+#define SERVREG_RESTART_PD_REQ 0x24
+
+#define SERVREG_DOMAIN_LIST_LENGTH 32
+#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
+#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
+#define SERVREG_SET_ACK_REQ_LEN 72
+#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
+#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
+#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
+
+struct servreg_location_entry {
+ char name[SERVREG_NAME_LENGTH + 1];
+ u8 service_data_valid;
+ u32 service_data;
+ u32 instance;
+};
+
+struct qmi_elem_info servreg_location_entry_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ instance),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_req {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ u8 domain_offset_valid;
+ u32 domain_offset;
+};
+
+struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ service_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_resp {
+ struct qmi_response_type_v01 resp;
+ u8 total_domains_valid;
+ u16 total_domains;
+ u8 db_rev_count_valid;
+ u16 db_rev_count;
+ u8 domain_list_valid;
+ u32 domain_list_len;
+ struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
+};
+
+struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
+ .elem_size = sizeof(struct servreg_location_entry),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list),
+ .ei_array = servreg_location_entry_ei,
+ },
+ {}
+};
+
+struct servreg_register_listener_req {
+ u8 enable;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_register_listener_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_register_listener_req,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_register_listener_resp {
+ struct qmi_response_type_v01 resp;
+ u8 curr_state_valid;
+ enum servreg_service_state curr_state;
+};
+
+struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum servreg_service_state),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state),
+ },
+ {}
+};
+
+struct servreg_restart_pd_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_restart_pd_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_restart_pd_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_restart_pd_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct servreg_state_updated_ind {
+ enum servreg_service_state curr_state;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_set_ack_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_set_ack_req,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_req,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+#endif
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 006ac40c526a..f43a2e07ee83 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -200,7 +200,7 @@ static irqreturn_t qmp_intr(int irq, void *data)
{
struct qmp *qmp = data;
- wake_up_interruptible_all(&qmp->event);
+ wake_up_all(&qmp->event);
return IRQ_HANDLED;
}
@@ -225,6 +225,7 @@ static bool qmp_message_empty(struct qmp *qmp)
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
+ size_t tlen;
int ret;
if (WARN_ON(len + sizeof(u32) > qmp->size))
@@ -239,6 +240,9 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
data, len / sizeof(u32));
writel(len, qmp->msgram + qmp->offset);
+
+ /* Read back len to confirm data written in message RAM */
+ tlen = readl(qmp->msgram + qmp->offset);
qmp_kick(qmp);
time_left = wait_event_interruptible_timeout(qmp->event,
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index a7bbbb67991c..6eec32b97f83 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -110,5 +110,6 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
int rpmh_rsc_invalidate(struct rsc_drv *drv);
void rpmh_tx_done(const struct tcs_request *msg, int r);
+int rpmh_flush(struct rpmh_ctrlr *ctrlr);
#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index e278fc11fe5c..b71822131f59 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -277,7 +277,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
- trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+ trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 035091fd44b8..eb0ded059d2e 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -23,7 +23,7 @@
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
-#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
+#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
struct rpmh_request name = { \
.msg = { \
.state = s, \
@@ -33,7 +33,7 @@
}, \
.cmd = { { 0 } }, \
.completion = q, \
- .dev = dev, \
+ .dev = device, \
.needs_free = false, \
}
@@ -427,11 +427,10 @@ static int is_req_valid(struct cache_req *req)
req->sleep_val != req->wake_val);
}
-static int send_single(const struct device *dev, enum rpmh_state state,
+static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
u32 addr, u32 data)
{
- DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
/* Wake sets are always complete and sleep sets are not */
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
@@ -445,7 +444,7 @@ static int send_single(const struct device *dev, enum rpmh_state state,
/**
* rpmh_flush: Flushes the buffered active and sleep sets to TCS
*
- * @dev: The device making the request
+ * @ctrlr: controller making request to flush cached data
*
* Return: -EBUSY if the controller is busy, probably waiting on a response
* to a RPMH request sent earlier.
@@ -454,10 +453,9 @@ static int send_single(const struct device *dev, enum rpmh_state state,
* that is powering down the entire system. Since no other RPMH API would be
* executing at this time, it is safe to run lockless.
*/
-int rpmh_flush(const struct device *dev)
+int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
int ret;
if (!ctrlr->dirty) {
@@ -480,11 +478,12 @@ int rpmh_flush(const struct device *dev)
__func__, p->addr, p->sleep_val, p->wake_val);
continue;
}
- ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+ ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
+ p->sleep_val);
if (ret)
return ret;
- ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
- p->addr, p->wake_val);
+ ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
+ p->wake_val);
if (ret)
return ret;
}
@@ -493,7 +492,6 @@ int rpmh_flush(const struct device *dev)
return 0;
}
-EXPORT_SYMBOL(rpmh_flush);
/**
* rpmh_invalidate: Invalidate all sleep and active sets
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 7864b75ce569..ebb49aee179b 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -277,7 +277,7 @@ static int show_image_##type(struct seq_file *seq, void *p) \
{ \
struct smem_image_version *image_version = seq->private; \
seq_puts(seq, image_version->type); \
- seq_puts(seq, "\n"); \
+ seq_putc(seq, '\n'); \
return 0; \
} \
static int open_image_##type(struct inode *inode, struct file *file) \
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index ba2b8b51d2d9..1982c7fb45fa 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -116,6 +116,7 @@ config ARCH_R8A7779
bool "R-Car H1 (R8A77790)"
select ARCH_RCAR_GEN1
select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select SYSC_R8A7779
@@ -163,6 +164,7 @@ config ARCH_SH73A0
bool "SH-Mobile AG5 (R8A73A00)"
select ARCH_RMOBILE
select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select RENESAS_INTC_IRQPIN
@@ -193,19 +195,19 @@ config ARCH_R8A774C0
This enables support for the Renesas RZ/G2E SoC.
config ARCH_R8A77950
- bool
+ bool "Renesas R-Car H3 ES1.x SoC Platform"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A7795
+ help
+ This enables support for the Renesas R-Car H3 SoC (revision 1.x).
config ARCH_R8A77951
- bool
-
-config ARCH_R8A7795
- bool "Renesas R-Car H3 SoC Platform"
- select ARCH_R8A77950
- select ARCH_R8A77951
+ bool "Renesas R-Car H3 ES2.0+ SoC Platform"
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
- This enables support for the Renesas R-Car H3 SoC.
+ This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
+ later).
config ARCH_R8A77960
bool "Renesas R-Car M3-W SoC Platform"
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 8d074489fba9..0fc3b119930a 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Renesas R-Car System Controller
*
* Copyright (C) 2016 Glider bvba
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 850f5733dc88..35dba8b8814e 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -259,7 +259,7 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7794
{ .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
#endif
-#ifdef CONFIG_ARCH_R8A7795
+#if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951)
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 1699dda6b393..1c533a969f54 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3,7 +3,7 @@
* drivers/soc/tegra/pmc.c
*
* Copyright (c) 2010 Google, Inc
- * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -13,9 +13,13 @@
#include <linux/arm-smccc.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/clk-conf.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -48,6 +52,7 @@
#include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
+#include <dt-bindings/soc/tegra-pmc.h>
#define PMC_CNTRL 0x0
#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
@@ -57,12 +62,15 @@
#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
#define PMC_CNTRL_PWRREQ_POLARITY BIT(8)
+#define PMC_CNTRL_BLINK_EN 7
#define PMC_CNTRL_MAIN_RST BIT(4)
#define PMC_WAKE_MASK 0x0c
#define PMC_WAKE_LEVEL 0x10
#define PMC_WAKE_STATUS 0x14
#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK 20
#define DPD_SAMPLE 0x020
#define DPD_SAMPLE_ENABLE BIT(0)
@@ -75,6 +83,7 @@
#define PWRGATE_STATUS 0x38
+#define PMC_BLINK_TIMER 0x40
#define PMC_IMPL_E_33V_PWR 0x40
#define PMC_PWR_DET 0x48
@@ -100,6 +109,8 @@
#define PMC_WAKE2_STATUS 0x168
#define PMC_SW_WAKE2_STATUS 0x16c
+#define PMC_CLK_OUT_CNTRL 0x1a8
+#define PMC_CLK_OUT_MUX_MASK GENMASK(1, 0)
#define PMC_SENSOR_CTRL 0x1b0
#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
@@ -155,6 +166,71 @@
#define TEGRA_SMC_PMC_READ 0xaa
#define TEGRA_SMC_PMC_WRITE 0xbb
+struct pmc_clk {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 mux_shift;
+ u32 force_en_shift;
+};
+
+#define to_pmc_clk(_hw) container_of(_hw, struct pmc_clk, hw)
+
+struct pmc_clk_gate {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 shift;
+};
+
+#define to_pmc_clk_gate(_hw) container_of(_hw, struct pmc_clk_gate, hw)
+
+struct pmc_clk_init_data {
+ char *name;
+ const char *const *parents;
+ int num_parents;
+ int clk_id;
+ u8 mux_shift;
+ u8 force_en_shift;
+};
+
+static const char * const clk_out1_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern1",
+};
+
+static const char * const clk_out2_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern2",
+};
+
+static const char * const clk_out3_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern3",
+};
+
+static const struct pmc_clk_init_data tegra_pmc_clks_data[] = {
+ {
+ .name = "pmc_clk_out_1",
+ .parents = clk_out1_parents,
+ .num_parents = ARRAY_SIZE(clk_out1_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_1,
+ .mux_shift = 6,
+ .force_en_shift = 2,
+ },
+ {
+ .name = "pmc_clk_out_2",
+ .parents = clk_out2_parents,
+ .num_parents = ARRAY_SIZE(clk_out2_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_2,
+ .mux_shift = 14,
+ .force_en_shift = 10,
+ },
+ {
+ .name = "pmc_clk_out_3",
+ .parents = clk_out3_parents,
+ .num_parents = ARRAY_SIZE(clk_out3_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_3,
+ .mux_shift = 22,
+ .force_en_shift = 18,
+ },
+};
+
struct tegra_powergate {
struct generic_pm_domain genpd;
struct tegra_pmc *pmc;
@@ -254,6 +330,10 @@ struct tegra_pmc_soc {
*/
const struct tegra_wake_event *wake_events;
unsigned int num_wake_events;
+
+ const struct pmc_clk_init_data *pmc_clks_data;
+ unsigned int num_pmc_clks;
+ bool has_blink_output;
};
static const char * const tegra186_reset_sources[] = {
@@ -2163,6 +2243,258 @@ static int tegra_pmc_clk_notify_cb(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void pmc_clk_fence_udelay(u32 offset)
+{
+ tegra_pmc_readl(pmc, offset);
+ /* pmc clk propagation delay 2 us */
+ udelay(2);
+}
+
+static u8 pmc_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) >> clk->mux_shift;
+ val &= PMC_CLK_OUT_MUX_MASK;
+
+ return val;
+}
+
+static int pmc_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs);
+ val &= ~(PMC_CLK_OUT_MUX_MASK << clk->mux_shift);
+ val |= index << clk->mux_shift;
+ tegra_pmc_writel(pmc, val, clk->offs);
+ pmc_clk_fence_udelay(clk->offs);
+
+ return 0;
+}
+
+static int pmc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) & BIT(clk->force_en_shift);
+
+ return val ? 1 : 0;
+}
+
+static void pmc_clk_set_state(unsigned long offs, u32 shift, int state)
+{
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, offs);
+ val = state ? (val | BIT(shift)) : (val & ~BIT(shift));
+ tegra_pmc_writel(pmc, val, offs);
+ pmc_clk_fence_udelay(offs);
+}
+
+static int pmc_clk_enable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_disable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 0);
+}
+
+static const struct clk_ops pmc_clk_ops = {
+ .get_parent = pmc_clk_mux_get_parent,
+ .set_parent = pmc_clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+ .is_enabled = pmc_clk_is_enabled,
+ .enable = pmc_clk_enable,
+ .disable = pmc_clk_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_out_register(struct tegra_pmc *pmc,
+ const struct pmc_clk_init_data *data,
+ unsigned long offset)
+{
+ struct clk_init_data init;
+ struct pmc_clk *pmc_clk;
+
+ pmc_clk = devm_kzalloc(pmc->dev, sizeof(*pmc_clk), GFP_KERNEL);
+ if (!pmc_clk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = data->name;
+ init.ops = &pmc_clk_ops;
+ init.parent_names = data->parents;
+ init.num_parents = data->num_parents;
+ init.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT |
+ CLK_SET_PARENT_GATE;
+
+ pmc_clk->hw.init = &init;
+ pmc_clk->offs = offset;
+ pmc_clk->mux_shift = data->mux_shift;
+ pmc_clk->force_en_shift = data->force_en_shift;
+
+ return clk_register(NULL, &pmc_clk->hw);
+}
+
+static int pmc_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ return tegra_pmc_readl(pmc, gate->offs) & BIT(gate->shift) ? 1 : 0;
+}
+
+static int pmc_clk_gate_enable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_gate_disable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 0);
+}
+
+static const struct clk_ops pmc_clk_gate_ops = {
+ .is_enabled = pmc_clk_gate_is_enabled,
+ .enable = pmc_clk_gate_enable,
+ .disable = pmc_clk_gate_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_gate_register(struct tegra_pmc *pmc, const char *name,
+ const char *parent_name, unsigned long offset,
+ u32 shift)
+{
+ struct clk_init_data init;
+ struct pmc_clk_gate *gate;
+
+ gate = devm_kzalloc(pmc->dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pmc_clk_gate_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ gate->hw.init = &init;
+ gate->offs = offset;
+ gate->shift = shift;
+
+ return clk_register(NULL, &gate->hw);
+}
+
+static void tegra_pmc_clock_register(struct tegra_pmc *pmc,
+ struct device_node *np)
+{
+ struct clk *clk;
+ struct clk_onecell_data *clk_data;
+ unsigned int num_clks;
+ int i, err;
+
+ num_clks = pmc->soc->num_pmc_clks;
+ if (pmc->soc->has_blink_output)
+ num_clks += 1;
+
+ if (!num_clks)
+ return;
+
+ clk_data = devm_kmalloc(pmc->dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clks = devm_kcalloc(pmc->dev, TEGRA_PMC_CLK_MAX,
+ sizeof(*clk_data->clks), GFP_KERNEL);
+ if (!clk_data->clks)
+ return;
+
+ clk_data->clk_num = TEGRA_PMC_CLK_MAX;
+
+ for (i = 0; i < TEGRA_PMC_CLK_MAX; i++)
+ clk_data->clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < pmc->soc->num_pmc_clks; i++) {
+ const struct pmc_clk_init_data *data;
+
+ data = pmc->soc->pmc_clks_data + i;
+
+ clk = tegra_pmc_clk_out_register(pmc, data, PMC_CLK_OUT_CNTRL);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev, "unable to register clock %s: %d\n",
+ data->name, PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, data->name, NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register %s clock lookup: %d\n",
+ data->name, err);
+ return;
+ }
+
+ clk_data->clks[data->clk_id] = clk;
+ }
+
+ if (pmc->soc->has_blink_output) {
+ tegra_pmc_writel(pmc, 0x0, PMC_BLINK_TIMER);
+ clk = tegra_pmc_clk_gate_register(pmc,
+ "pmc_blink_override",
+ "clk_32k",
+ PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink_override: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ clk = tegra_pmc_clk_gate_register(pmc, "pmc_blink",
+ "pmc_blink_override",
+ PMC_CNTRL,
+ PMC_CNTRL_BLINK_EN);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, "pmc_blink", NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink lookup: %d\n",
+ err);
+ return;
+ }
+
+ clk_data->clks[TEGRA_PMC_CLK_BLINK] = clk;
+ }
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ if (err)
+ dev_warn(pmc->dev, "failed to add pmc clock provider: %d\n",
+ err);
+}
+
static int tegra_pmc_probe(struct platform_device *pdev)
{
void __iomem *base;
@@ -2281,6 +2613,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->base = base;
mutex_unlock(&pmc->powergates_lock);
+ tegra_pmc_clock_register(pmc, pdev->dev.of_node);
platform_set_drvdata(pdev, pmc);
return 0;
@@ -2422,6 +2755,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.num_reset_sources = 0,
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = true,
};
static const char * const tegra30_powergates[] = {
@@ -2469,6 +2805,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra114_powergates[] = {
@@ -2520,6 +2859,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra124_powergates[] = {
@@ -2569,38 +2911,38 @@ static const u8 tegra124_cpu_powergates[] = {
.name = (_name) \
})
-#define TEGRA124_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
- _pad(TEGRA_IO_PAD_BB, 15, UINT_MAX, "bb"), \
- _pad(TEGRA_IO_PAD_CAM, 36, UINT_MAX, "cam"), \
- _pad(TEGRA_IO_PAD_COMP, 22, UINT_MAX, "comp"), \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csb"), \
- _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "cse"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_HV, 38, UINT_MAX, "hv"), \
- _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_NAND, 13, UINT_MAX, "nand"), \
- _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC1, 33, UINT_MAX, "sdmmc1"), \
- _pad(TEGRA_IO_PAD_SDMMC3, 34, UINT_MAX, "sdmmc3"), \
- _pad(TEGRA_IO_PAD_SDMMC4, 35, UINT_MAX, "sdmmc4"), \
- _pad(TEGRA_IO_PAD_SYS_DDC, 58, UINT_MAX, "sys_ddc"), \
- _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb_bias")
+#define TEGRA124_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_BB, 15, UINT_MAX, "bb"), \
+ _pad(TEGRA_IO_PAD_CAM, 36, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_COMP, 22, UINT_MAX, "comp"), \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csb"), \
+ _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "cse"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_HV, 38, UINT_MAX, "hv"), \
+ _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_NAND, 13, UINT_MAX, "nand"), \
+ _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC1, 33, UINT_MAX, "sdmmc1"), \
+ _pad(TEGRA_IO_PAD_SDMMC3, 34, UINT_MAX, "sdmmc3"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 35, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_SYS_DDC, 58, UINT_MAX, "sys_ddc"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb_bias")
static const struct tegra_io_pad_soc tegra124_io_pads[] = {
TEGRA124_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2631,6 +2973,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra210_powergates[] = {
@@ -2667,46 +3012,46 @@ static const u8 tegra210_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
-#define TEGRA210_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_AUDIO, 17, 5, "audio"), \
- _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 18, "audio-hv"), \
- _pad(TEGRA_IO_PAD_CAM, 36, 10, "cam"), \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
- _pad(TEGRA_IO_PAD_CSIC, 42, UINT_MAX, "csic"), \
- _pad(TEGRA_IO_PAD_CSID, 43, UINT_MAX, "csid"), \
- _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "csie"), \
- _pad(TEGRA_IO_PAD_CSIF, 45, UINT_MAX, "csif"), \
- _pad(TEGRA_IO_PAD_DBG, 25, 19, "dbg"), \
- _pad(TEGRA_IO_PAD_DEBUG_NONAO, 26, UINT_MAX, "debug-nonao"), \
- _pad(TEGRA_IO_PAD_DMIC, 50, 20, "dmic"), \
- _pad(TEGRA_IO_PAD_DP, 51, UINT_MAX, "dp"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_EMMC, 35, UINT_MAX, "emmc"), \
- _pad(TEGRA_IO_PAD_EMMC2, 37, UINT_MAX, "emmc2"), \
- _pad(TEGRA_IO_PAD_GPIO, 27, 21, "gpio"), \
- _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, UINT_MAX, 11, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC1, 33, 12, "sdmmc1"), \
- _pad(TEGRA_IO_PAD_SDMMC3, 34, 13, "sdmmc3"), \
- _pad(TEGRA_IO_PAD_SPI, 46, 22, "spi"), \
- _pad(TEGRA_IO_PAD_SPI_HV, 47, 23, "spi-hv"), \
- _pad(TEGRA_IO_PAD_UART, 14, 2, "uart"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB3, 18, UINT_MAX, "usb3"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias")
+#define TEGRA210_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, 5, "audio"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 18, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_CAM, 36, 10, "cam"), \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_CSIC, 42, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 43, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 45, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, 19, "dbg"), \
+ _pad(TEGRA_IO_PAD_DEBUG_NONAO, 26, UINT_MAX, "debug-nonao"), \
+ _pad(TEGRA_IO_PAD_DMIC, 50, 20, "dmic"), \
+ _pad(TEGRA_IO_PAD_DP, 51, UINT_MAX, "dp"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_EMMC, 35, UINT_MAX, "emmc"), \
+ _pad(TEGRA_IO_PAD_EMMC2, 37, UINT_MAX, "emmc2"), \
+ _pad(TEGRA_IO_PAD_GPIO, 27, 21, "gpio"), \
+ _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, UINT_MAX, 11, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC1, 33, 12, "sdmmc1"), \
+ _pad(TEGRA_IO_PAD_SDMMC3, 34, 13, "sdmmc3"), \
+ _pad(TEGRA_IO_PAD_SPI, 46, 22, "spi"), \
+ _pad(TEGRA_IO_PAD_SPI_HV, 47, 23, "spi-hv"), \
+ _pad(TEGRA_IO_PAD_UART, 14, 2, "uart"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB3, 18, UINT_MAX, "usb3"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias")
static const struct tegra_io_pad_soc tegra210_io_pads[] = {
TEGRA210_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2745,48 +3090,51 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_reset_levels = 0,
.num_wake_events = ARRAY_SIZE(tegra210_wake_events),
.wake_events = tegra210_wake_events,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
-#define TEGRA186_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias"), \
- _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
- _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
- _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
- _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC2_HV, 34, 5, "sdmmc2-hv"), \
- _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
- _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
- _pad(TEGRA_IO_PAD_DSIB, 40, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 41, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 42, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
- _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
- _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
- _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
- _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
- _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
- _pad(TEGRA_IO_PAD_DMIC_HV, 52, 2, "dmic-hv"), \
- _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
- _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
- _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
- _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
- _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
- _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
+#define TEGRA186_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC2_HV, 34, 5, "sdmmc2-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_DSIB, 40, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 41, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 42, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
+ _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
+ _pad(TEGRA_IO_PAD_DMIC_HV, 52, 2, "dmic-hv"), \
+ _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
+ _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
+ _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
static const struct tegra_io_pad_soc tegra186_io_pads[] = {
TEGRA186_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2874,56 +3222,69 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra186_wake_events),
.wake_events = tegra186_wake_events,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
};
+#define TEGRA194_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_EQOS, 8, UINT_MAX, "eqos"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_2_BIAS, 9, UINT_MAX, "pex-clk-2-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_2, 10, UINT_MAX, "pex-clk-2"), \
+ _pad(TEGRA_IO_PAD_DAP3, 11, UINT_MAX, "dap3"), \
+ _pad(TEGRA_IO_PAD_DAP5, 12, UINT_MAX, "dap5"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_PWR_CTL, 15, UINT_MAX, "pwr-ctl"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO53, 16, UINT_MAX, "soc-gpio53"), \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_GP_PWM2, 18, UINT_MAX, "gp-pwm2"), \
+ _pad(TEGRA_IO_PAD_GP_PWM3, 19, UINT_MAX, "gp-pwm3"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO12, 20, UINT_MAX, "soc-gpio12"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO13, 21, UINT_MAX, "soc-gpio13"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO10, 22, UINT_MAX, "soc-gpio10"), \
+ _pad(TEGRA_IO_PAD_UART4, 23, UINT_MAX, "uart4"), \
+ _pad(TEGRA_IO_PAD_UART5, 24, UINT_MAX, "uart5"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP3, 26, UINT_MAX, "hdmi-dp3"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP2, 27, UINT_MAX, "hdmi-dp2"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_PEX_CTL2, 33, UINT_MAX, "pex-ctl2"), \
+ _pad(TEGRA_IO_PAD_PEX_L0_RST_N, 34, UINT_MAX, "pex-l0-rst"), \
+ _pad(TEGRA_IO_PAD_PEX_L1_RST_N, 35, UINT_MAX, "pex-l1-rst"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_PEX_L5_RST_N, 37, UINT_MAX, "pex-l5-rst"), \
+ _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
+ _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
+ _pad(TEGRA_IO_PAD_CSIG, 50, UINT_MAX, "csig"), \
+ _pad(TEGRA_IO_PAD_CSIH, 51, UINT_MAX, "csih"), \
+ _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
+ _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
+ _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
+
static const struct tegra_io_pad_soc tegra194_io_pads[] = {
- { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK_BIAS, .dpd = 4, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK3, .dpd = 5, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 7, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_EQOS, .dpd = 8, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2_BIAS, .dpd = 9, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 10, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DAP3, .dpd = 11, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DAP5, .dpd = 12, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PWR_CTL, .dpd = 15, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO53, .dpd = 16, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_GP_PWM2, .dpd = 18, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_GP_PWM3, .dpd = 19, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO12, .dpd = 20, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO13, .dpd = 21, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO10, .dpd = 22, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART4, .dpd = 23, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART5, .dpd = 24, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP3, .dpd = 26, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP2, .dpd = 27, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP0, .dpd = 28, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP1, .dpd = 29, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CTL2, .dpd = 33, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L0_RST_N, .dpd = 34, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L1_RST_N, .dpd = 35, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 36, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L5_RST_N, .dpd = 37, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIC, .dpd = 43, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSID, .dpd = 44, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIE, .dpd = 45, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIF, .dpd = 46, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SPI, .dpd = 47, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UFS, .dpd = 49, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIG, .dpd = 50, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIH, .dpd = 51, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_EDP, .dpd = 53, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC1_HV, .dpd = 55, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC3_HV, .dpd = 56, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CONN, .dpd = 60, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = UINT_MAX },
+ TEGRA194_IO_PAD_TABLE(TEGRA_IO_PAD)
+};
+
+static const struct pinctrl_pin_desc tegra194_pin_descs[] = {
+ TEGRA194_IO_PAD_TABLE(TEGRA_IO_PIN_DESC)
};
static const struct tegra_pmc_regs tegra194_pmc_regs = {
@@ -2976,10 +3337,12 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.has_tsense_reset = false,
.has_gpu_clamps = false,
.needs_mbist_war = false,
- .has_impl_33v_pwr = false,
+ .has_impl_33v_pwr = true,
.maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra194_io_pads),
.io_pads = tegra194_io_pads,
+ .num_pin_descs = ARRAY_SIZE(tegra194_pin_descs),
+ .pin_descs = tegra194_pin_descs,
.regs = &tegra194_pmc_regs,
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
@@ -2991,6 +3354,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra194_wake_events),
.wake_events = tegra194_wake_events,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
};
static const struct of_device_id tegra_pmc_match[] = {
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index ccc6d53fe788..de0123ec8ad6 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -130,6 +130,19 @@ static int am33xx_push_sram_idle(void)
return 0;
}
+static int am33xx_do_sram_idle(u32 wfi_flags)
+{
+ int ret = 0;
+
+ if (!m3_ipc || !pm_ops)
+ return 0;
+
+ if (wfi_flags & WFI_FLAG_WAKE_M3)
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+
+ return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
+}
+
static int __init am43xx_map_gic(void)
{
gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
@@ -260,6 +273,8 @@ static int am33xx_pm_begin(suspend_state_t state)
rtc_only_idle = 0;
}
+ pm_ops->begin_suspend();
+
switch (state) {
case PM_SUSPEND_MEM:
ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
@@ -301,6 +316,8 @@ static void am33xx_pm_end(void)
}
rtc_only_idle = 0;
+
+ pm_ops->finish_suspend();
}
static int am33xx_pm_valid(suspend_state_t state)
@@ -503,7 +520,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
#endif /* CONFIG_SUSPEND */
- ret = pm_ops->init();
+ ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
ret = -ENODEV;
@@ -522,6 +539,8 @@ err_free_sram:
static int am33xx_pm_remove(struct platform_device *pdev)
{
+ if (pm_ops->deinit)
+ pm_ops->deinit();
suspend_set_ops(NULL);
wkup_m3_ipc_put(m3_ipc);
am33xx_pm_free_sram();
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 223f1f9d0922..646512d7276f 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -19,7 +19,7 @@ config XILINX_VCU
config ZYNQMP_POWER
bool "Enable Xilinx Zynq MPSoC Power Management driver"
- depends on PM && ARCH_ZYNQMP
+ depends on PM && ZYNQMP_FIRMWARE
default y
select MAILBOX
select ZYNQMP_IPI_MBOX
@@ -35,7 +35,7 @@ config ZYNQMP_POWER
config ZYNQMP_PM_DOMAINS
bool "Enable Zynq MPSoC generic PM domains"
default y
- depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE
+ depends on PM && ZYNQMP_FIRMWARE
select PM_GENERIC_DOMAINS
help
Say yes to enable device power management through PM domains
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 6106577fb3ed..488c3c9e4947 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -2,6 +2,7 @@
// Copyright(c) 2015-17 Intel Corporation.
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_registers.h>
@@ -113,6 +114,8 @@ static int sdw_delete_slave(struct device *dev, void *data)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
struct sdw_bus *bus = slave->bus;
+ pm_runtime_disable(dev);
+
sdw_slave_debugfs_exit(slave);
mutex_lock(&bus->bus_lock);
@@ -317,6 +320,92 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
return 0;
}
+/*
+ * Read/Write IO functions.
+ * no_pm versions can only be called by the bus, e.g. while enumerating or
+ * handling suspend-resume sequences.
+ * all clients need to use the pm versions
+ */
+
+static int
+sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, slave, addr, count,
+ slave->dev_num, SDW_MSG_FLAG_READ, val);
+ if (ret < 0)
+ return ret;
+
+ return sdw_transfer(slave->bus, &msg);
+}
+
+static int
+sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, slave, addr, count,
+ slave->dev_num, SDW_MSG_FLAG_WRITE, val);
+ if (ret < 0)
+ return ret;
+
+ return sdw_transfer(slave->bus, &msg);
+}
+
+static int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ return sdw_nwrite_no_pm(slave, addr, 1, &value);
+}
+
+static int
+sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
+{
+ struct sdw_msg msg;
+ u8 buf;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
+ SDW_MSG_FLAG_READ, &buf);
+ if (ret)
+ return ret;
+
+ ret = sdw_transfer(bus, &msg);
+ if (ret < 0)
+ return ret;
+ else
+ return buf;
+}
+
+static int
+sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
+ SDW_MSG_FLAG_WRITE, &value);
+ if (ret)
+ return ret;
+
+ return sdw_transfer(bus, &msg);
+}
+
+static int
+sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+{
+ u8 buf;
+ int ret;
+
+ ret = sdw_nread_no_pm(slave, addr, 1, &buf);
+ if (ret < 0)
+ return ret;
+ else
+ return buf;
+}
+
/**
* sdw_nread() - Read "n" contiguous SDW Slave registers
* @slave: SDW Slave
@@ -326,19 +415,17 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
*/
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
- struct sdw_msg msg;
int ret;
- ret = sdw_fill_msg(&msg, slave, addr, count,
- slave->dev_num, SDW_MSG_FLAG_READ, val);
- if (ret < 0)
- return ret;
-
ret = pm_runtime_get_sync(slave->bus->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_noidle(slave->bus->dev);
return ret;
+ }
+
+ ret = sdw_nread_no_pm(slave, addr, count, val);
- ret = sdw_transfer(slave->bus, &msg);
+ pm_runtime_mark_last_busy(slave->bus->dev);
pm_runtime_put(slave->bus->dev);
return ret;
@@ -354,19 +441,17 @@ EXPORT_SYMBOL(sdw_nread);
*/
int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
- struct sdw_msg msg;
int ret;
- ret = sdw_fill_msg(&msg, slave, addr, count,
- slave->dev_num, SDW_MSG_FLAG_WRITE, val);
- if (ret < 0)
- return ret;
-
ret = pm_runtime_get_sync(slave->bus->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_noidle(slave->bus->dev);
return ret;
+ }
+
+ ret = sdw_nwrite_no_pm(slave, addr, count, val);
- ret = sdw_transfer(slave->bus, &msg);
+ pm_runtime_mark_last_busy(slave->bus->dev);
pm_runtime_put(slave->bus->dev);
return ret;
@@ -486,7 +571,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
dev_num = slave->dev_num;
slave->dev_num = 0;
- ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
+ ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
if (ret < 0) {
dev_err(&slave->dev, "Program device_num %d failed: %d\n",
dev_num, ret);
@@ -504,22 +589,11 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
{
dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
- /*
- * Spec definition
- * Register Bit Contents
- * DevId_0 [7:4] 47:44 sdw_version
- * DevId_0 [3:0] 43:40 unique_id
- * DevId_1 39:32 mfg_id [15:8]
- * DevId_2 31:24 mfg_id [7:0]
- * DevId_3 23:16 part_id [15:8]
- * DevId_4 15:08 part_id [7:0]
- * DevId_5 07:00 class_id
- */
- id->sdw_version = (addr >> 44) & GENMASK(3, 0);
- id->unique_id = (addr >> 40) & GENMASK(3, 0);
- id->mfg_id = (addr >> 24) & GENMASK(15, 0);
- id->part_id = (addr >> 8) & GENMASK(15, 0);
- id->class_id = addr & GENMASK(7, 0);
+ id->sdw_version = SDW_VERSION(addr);
+ id->unique_id = SDW_UNIQUE_ID(addr);
+ id->mfg_id = SDW_MFG_ID(addr);
+ id->part_id = SDW_PART_ID(addr);
+ id->class_id = SDW_CLASS_ID(addr);
dev_dbg(bus->dev,
"SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x\n",
@@ -610,10 +684,320 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
mutex_lock(&slave->bus->bus_lock);
+
+ dev_vdbg(&slave->dev,
+ "%s: changing status slave %d status %d new status %d\n",
+ __func__, slave->dev_num, slave->status, status);
+
+ if (status == SDW_SLAVE_UNATTACHED) {
+ dev_dbg(&slave->dev,
+ "%s: initializing completion for Slave %d\n",
+ __func__, slave->dev_num);
+
+ init_completion(&slave->enumeration_complete);
+ init_completion(&slave->initialization_complete);
+
+ } else if ((status == SDW_SLAVE_ATTACHED) &&
+ (slave->status == SDW_SLAVE_UNATTACHED)) {
+ dev_dbg(&slave->dev,
+ "%s: signaling completion for Slave %d\n",
+ __func__, slave->dev_num);
+
+ complete(&slave->enumeration_complete);
+ }
slave->status = status;
mutex_unlock(&slave->bus->bus_lock);
}
+static enum sdw_clk_stop_mode sdw_get_clk_stop_mode(struct sdw_slave *slave)
+{
+ enum sdw_clk_stop_mode mode;
+
+ /*
+ * Query for clock stop mode if Slave implements
+ * ops->get_clk_stop_mode, else read from property.
+ */
+ if (slave->ops && slave->ops->get_clk_stop_mode) {
+ mode = slave->ops->get_clk_stop_mode(slave);
+ } else {
+ if (slave->prop.clk_stop_mode1)
+ mode = SDW_CLK_STOP_MODE1;
+ else
+ mode = SDW_CLK_STOP_MODE0;
+ }
+
+ return mode;
+}
+
+static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
+ enum sdw_clk_stop_mode mode,
+ enum sdw_clk_stop_type type)
+{
+ int ret;
+
+ if (slave->ops && slave->ops->clk_stop) {
+ ret = slave->ops->clk_stop(slave, mode, type);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "Clk Stop type =%d failed: %d\n", type, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
+ enum sdw_clk_stop_mode mode,
+ bool prepare)
+{
+ bool wake_en;
+ u32 val = 0;
+ int ret;
+
+ wake_en = slave->prop.wake_capable;
+
+ if (prepare) {
+ val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
+
+ if (mode == SDW_CLK_STOP_MODE1)
+ val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
+
+ if (wake_en)
+ val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
+ } else {
+ val = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
+
+ val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
+ }
+
+ ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
+
+ if (ret != 0)
+ dev_err(&slave->dev,
+ "Clock Stop prepare failed for slave: %d", ret);
+
+ return ret;
+}
+
+static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
+{
+ int retry = bus->clk_stop_timeout;
+ int val;
+
+ do {
+ val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT) &
+ SDW_SCP_STAT_CLK_STP_NF;
+ if (!val) {
+ dev_info(bus->dev, "clock stop prep/de-prep done slave:%d",
+ dev_num);
+ return 0;
+ }
+
+ usleep_range(1000, 1500);
+ retry--;
+ } while (retry);
+
+ dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d",
+ dev_num);
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
+ *
+ * @bus: SDW bus instance
+ *
+ * Query Slave for clock stop mode and prepare for that mode.
+ */
+int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
+{
+ enum sdw_clk_stop_mode slave_mode;
+ bool simple_clk_stop = true;
+ struct sdw_slave *slave;
+ bool is_slave = false;
+ int ret = 0;
+
+ /*
+ * In order to save on transition time, prepare
+ * each Slave and then wait for all Slave(s) to be
+ * prepared for clock stop.
+ */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ slave_mode = sdw_get_clk_stop_mode(slave);
+ slave->curr_clk_stop_mode = slave_mode;
+
+ ret = sdw_slave_clk_stop_callback(slave, slave_mode,
+ SDW_CLK_PRE_PREPARE);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "pre-prepare failed:%d", ret);
+ return ret;
+ }
+
+ ret = sdw_slave_clk_stop_prepare(slave,
+ slave_mode, true);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "pre-prepare failed:%d", ret);
+ return ret;
+ }
+
+ if (slave_mode == SDW_CLK_STOP_MODE1)
+ simple_clk_stop = false;
+ }
+
+ if (is_slave && !simple_clk_stop) {
+ ret = sdw_bus_wait_for_clk_prep_deprep(bus,
+ SDW_BROADCAST_DEV_NUM);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Inform slaves that prep is done */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ slave_mode = slave->curr_clk_stop_mode;
+
+ if (slave_mode == SDW_CLK_STOP_MODE1) {
+ ret = sdw_slave_clk_stop_callback(slave,
+ slave_mode,
+ SDW_CLK_POST_PREPARE);
+
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "post-prepare failed:%d", ret);
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
+
+/**
+ * sdw_bus_clk_stop: stop bus clock
+ *
+ * @bus: SDW bus instance
+ *
+ * After preparing the Slaves for clock stop, stop the clock by broadcasting
+ * write to SCP_CTRL register.
+ */
+int sdw_bus_clk_stop(struct sdw_bus *bus)
+{
+ int ret;
+
+ /*
+ * broadcast clock stop now, attached Slaves will ACK this,
+ * unattached will ignore
+ */
+ ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
+ SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
+ if (ret < 0) {
+ if (ret == -ENODATA)
+ dev_dbg(bus->dev,
+ "ClockStopNow Broadcast msg ignored %d", ret);
+ else
+ dev_err(bus->dev,
+ "ClockStopNow Broadcast msg failed %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_bus_clk_stop);
+
+/**
+ * sdw_bus_exit_clk_stop: Exit clock stop mode
+ *
+ * @bus: SDW bus instance
+ *
+ * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
+ * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
+ * back.
+ */
+int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
+{
+ enum sdw_clk_stop_mode mode;
+ bool simple_clk_stop = true;
+ struct sdw_slave *slave;
+ bool is_slave = false;
+ int ret;
+
+ /*
+ * In order to save on transition time, de-prepare
+ * each Slave and then wait for all Slave(s) to be
+ * de-prepared after clock resume.
+ */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ mode = slave->curr_clk_stop_mode;
+
+ if (mode == SDW_CLK_STOP_MODE1) {
+ simple_clk_stop = false;
+ continue;
+ }
+
+ ret = sdw_slave_clk_stop_callback(slave, mode,
+ SDW_CLK_PRE_DEPREPARE);
+ if (ret < 0)
+ dev_warn(&slave->dev,
+ "clk stop deprep failed:%d", ret);
+
+ ret = sdw_slave_clk_stop_prepare(slave, mode,
+ false);
+
+ if (ret < 0)
+ dev_warn(&slave->dev,
+ "clk stop deprep failed:%d", ret);
+ }
+
+ if (is_slave && !simple_clk_stop)
+ sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ mode = slave->curr_clk_stop_mode;
+ sdw_slave_clk_stop_callback(slave, mode,
+ SDW_CLK_POST_DEPREPARE);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
+
int sdw_configure_dpn_intr(struct sdw_slave *slave,
int port, bool enable, int mask)
{
@@ -672,13 +1056,10 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
- if (ret < 0) {
+ if (ret < 0)
dev_err(slave->bus->dev,
"SDW_DP0_INTMASK read failed:%d\n", ret);
- return val;
- }
-
- return 0;
+ return ret;
}
static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
@@ -831,12 +1212,19 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
+ ret = pm_runtime_get_sync(&slave->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
+ pm_runtime_put_noidle(slave->bus->dev);
+ return ret;
+ }
+
/* Read Instat 1, Instat 2 and Instat 3 registers */
ret = sdw_read(slave, SDW_SCP_INT1);
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT1 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
buf = ret;
@@ -844,7 +1232,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT2/3 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
do {
@@ -924,7 +1312,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT1 write failed:%d\n", ret);
- return ret;
+ goto io_err;
}
/*
@@ -935,7 +1323,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT1 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
_buf = ret;
@@ -943,7 +1331,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT2/3 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
/* Make sure no interrupts are pending */
@@ -964,16 +1352,39 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (count == SDW_READ_INTR_CLEAR_RETRY)
dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read\n");
+io_err:
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put_autosuspend(&slave->dev);
+
return ret;
}
static int sdw_update_slave_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
- if (slave->ops && slave->ops->update_status)
- return slave->ops->update_status(slave, status);
+ unsigned long time;
- return 0;
+ if (!slave->probed) {
+ /*
+ * the slave status update is typically handled in an
+ * interrupt thread, which can race with the driver
+ * probe, e.g. when a module needs to be loaded.
+ *
+ * make sure the probe is complete before updating
+ * status.
+ */
+ time = wait_for_completion_timeout(&slave->probe_complete,
+ msecs_to_jiffies(DEFAULT_PROBE_TIMEOUT));
+ if (!time) {
+ dev_err(&slave->dev, "Probe not complete, timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (!slave->ops || !slave->ops->update_status)
+ return 0;
+
+ return slave->ops->update_status(slave, status);
}
/**
@@ -986,6 +1397,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
{
enum sdw_slave_status prev_status;
struct sdw_slave *slave;
+ bool attached_initializing;
int i, ret = 0;
/* first check if any Slaves fell off the bus */
@@ -1031,6 +1443,8 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (!slave)
continue;
+ attached_initializing = false;
+
switch (status[i]) {
case SDW_SLAVE_UNATTACHED:
if (slave->status == SDW_SLAVE_UNATTACHED)
@@ -1057,6 +1471,8 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (prev_status == SDW_SLAVE_ALERT)
break;
+ attached_initializing = true;
+
ret = sdw_initialize_slave(slave);
if (ret)
dev_err(bus->dev,
@@ -1075,8 +1491,37 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (ret)
dev_err(slave->bus->dev,
"Update Slave status failed:%d\n", ret);
+ if (attached_initializing)
+ complete(&slave->initialization_complete);
}
return ret;
}
EXPORT_SYMBOL(sdw_handle_slave_status);
+
+void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
+{
+ struct sdw_slave *slave;
+ int i;
+
+ /* Check all non-zero devices */
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+ mutex_lock(&bus->bus_lock);
+ if (test_bit(i, bus->assigned) == false) {
+ mutex_unlock(&bus->bus_lock);
+ continue;
+ }
+ mutex_unlock(&bus->bus_lock);
+
+ slave = sdw_get_slave(bus, i);
+ if (!slave)
+ continue;
+
+ if (slave->status != SDW_SLAVE_UNATTACHED)
+ sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+
+ /* keep track of request, used in pm_runtime resume */
+ slave->unattach_request = request;
+ }
+}
+EXPORT_SYMBOL(sdw_clear_slave_status);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index cb482da914da..204204a26db8 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -5,6 +5,7 @@
#define __SDW_BUS_H
#define DEFAULT_BANK_SWITCH_TIMEOUT 3000
+#define DEFAULT_PROBE_TIMEOUT 2000
#if IS_ENABLED(CONFIG_ACPI)
int sdw_acpi_find_slaves(struct sdw_bus *bus);
@@ -164,4 +165,12 @@ sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
return sdw_write(slave, addr, tmp);
}
+/*
+ * At the moment we only track Master-initiated hw_reset.
+ * Additional fields can be added as needed
+ */
+#define SDW_UNATTACH_REQUEST_MASTER_RESET BIT(0)
+
+void sdw_clear_slave_status(struct sdw_bus *bus, u32 request);
+
#endif /* __SDW_BUS_H */
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 4a465f55039f..17f096dd6806 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -110,6 +110,11 @@ static int sdw_drv_probe(struct device *dev)
slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout,
slave->prop.clk_stop_timeout);
+ slave->probed = true;
+ complete(&slave->probe_complete);
+
+ dev_dbg(dev, "probe complete\n");
+
return 0;
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 9bec270d0fa4..ecd357d1c63d 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -183,7 +183,6 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_PDI_CONFIG_PORT GENMASK(4, 0)
/* Driver defaults */
-#define CDNS_DEFAULT_SSP_INTERVAL 0x18
#define CDNS_TX_TIMEOUT 2000
#define CDNS_SCP_RX_FIFOLEVEL 0x2
@@ -211,34 +210,45 @@ static inline void cdns_updatel(struct sdw_cdns *cdns,
cdns_writel(cdns, offset, tmp);
}
-static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
+static int cdns_set_wait(struct sdw_cdns *cdns, int offset, u32 mask, u32 value)
{
int timeout = 10;
u32 reg_read;
- writel(value, cdns->registers + offset);
-
- /* Wait for bit to be self cleared */
+ /* Wait for bit to be set */
do {
reg_read = readl(cdns->registers + offset);
- if ((reg_read & value) == 0)
+ if ((reg_read & mask) == value)
return 0;
timeout--;
- udelay(50);
+ usleep_range(50, 100);
} while (timeout != 0);
- return -EAGAIN;
+ return -ETIMEDOUT;
+}
+
+static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
+{
+ writel(value, cdns->registers + offset);
+
+ /* Wait for bit to be self cleared */
+ return cdns_set_wait(cdns, offset, value, 0);
}
/*
* all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL
* need to be confirmed with a write to MCP_CONFIG_UPDATE
*/
-static int cdns_update_config(struct sdw_cdns *cdns)
+static int cdns_config_update(struct sdw_cdns *cdns)
{
int ret;
+ if (sdw_cdns_is_clock_stop(cdns)) {
+ dev_err(cdns->dev, "Cannot program MCP_CONFIG_UPDATE in ClockStopMode\n");
+ return -EINVAL;
+ }
+
ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
CDNS_MCP_CONFIG_UPDATE_BIT);
if (ret < 0)
@@ -832,17 +842,36 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
CDNS_MCP_CONTROL_HW_RST,
CDNS_MCP_CONTROL_HW_RST);
- /* enable bus operations with clock and data */
- cdns_updatel(cdns, CDNS_MCP_CONFIG,
- CDNS_MCP_CONFIG_OP,
- CDNS_MCP_CONFIG_OP_NORMAL);
-
/* commit changes */
- return cdns_update_config(cdns);
+ cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+
+ /* don't wait here */
+ return 0;
+
}
EXPORT_SYMBOL(sdw_cdns_exit_reset);
/**
+ * sdw_cdns_enable_slave_interrupt() - Enable SDW slave interrupts
+ * @cdns: Cadence instance
+ * @state: boolean for true/false
+ */
+static void cdns_enable_slave_interrupts(struct sdw_cdns *cdns, bool state)
+{
+ u32 mask;
+
+ mask = cdns_readl(cdns, CDNS_MCP_INTMASK);
+ if (state)
+ mask |= CDNS_MCP_INT_SLAVE_MASK;
+ else
+ mask &= ~CDNS_MCP_INT_SLAVE_MASK;
+
+ cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
+}
+
+/**
* sdw_cdns_enable_interrupt() - Enable SDW interrupts
* @cdns: Cadence instance
* @state: True if we are trying to enable interrupt.
@@ -1014,26 +1043,13 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
return val;
}
-/**
- * sdw_cdns_init() - Cadence initialization
- * @cdns: Cadence instance
- */
-int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
+static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
u32 val;
+ u32 ssp_interval;
int divider;
- int ret;
-
- if (clock_stop_exit) {
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CLK_STOP_CLR);
- if (ret < 0) {
- dev_err(cdns->dev, "Couldn't exit from clock stop\n");
- return ret;
- }
- }
/* Set clock divider */
divider = (prop->mclk_freq / prop->max_clk_freq) - 1;
@@ -1052,8 +1068,23 @@ int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
cdns_writel(cdns, CDNS_MCP_FRAME_SHAPE_INIT, val);
/* Set SSP interval to default value */
- cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
- cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+ ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ;
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval);
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval);
+}
+
+/**
+ * sdw_cdns_init() - Cadence initialization
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_init(struct sdw_cdns *cdns)
+{
+ u32 val;
+
+ cdns_init_clock_ctrl(cdns);
+
+ /* reset msg_count to default value of FIFOLEVEL */
+ cdns->msg_count = cdns_readl(cdns, CDNS_MCP_FIFOLEVEL);
/* flush command FIFOs */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST,
@@ -1066,25 +1097,31 @@ int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
/* Configure mcp config */
val = cdns_readl(cdns, CDNS_MCP_CONFIG);
- /* Set Max cmd retry to 15 */
- val |= CDNS_MCP_CONFIG_MCMD_RETRY;
+ /* enable bus operations with clock and data */
+ val &= ~CDNS_MCP_CONFIG_OP;
+ val |= CDNS_MCP_CONFIG_OP_NORMAL;
- /* Set frame delay between PREQ and ping frame to 15 frames */
- val |= 0xF << SDW_REG_SHIFT(CDNS_MCP_CONFIG_MPREQ_DELAY);
+ /* Set cmd mode for Tx and Rx cmds */
+ val &= ~CDNS_MCP_CONFIG_CMD;
+
+ /* Disable sniffer mode */
+ val &= ~CDNS_MCP_CONFIG_SNIFFER;
/* Disable auto bus release */
val &= ~CDNS_MCP_CONFIG_BUS_REL;
- /* Disable sniffer mode */
- val &= ~CDNS_MCP_CONFIG_SNIFFER;
+ if (cdns->bus.multi_link)
+ /* Set Multi-master mode to take gsync into account */
+ val |= CDNS_MCP_CONFIG_MMASTER;
- /* Set cmd mode for Tx and Rx cmds */
- val &= ~CDNS_MCP_CONFIG_CMD;
+ /* leave frame delay to hardware default of 0x1F */
+
+ /* leave command retry to hardware default of 0 */
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
- /* commit changes */
- return cdns_update_config(cdns);
+ /* changes will be committed later */
+ return 0;
}
EXPORT_SYMBOL(sdw_cdns_init);
@@ -1218,6 +1255,166 @@ static const struct sdw_master_port_ops cdns_port_ops = {
};
/**
+ * sdw_cdns_is_clock_stop: Check clock status
+ *
+ * @cdns: Cadence instance
+ */
+bool sdw_cdns_is_clock_stop(struct sdw_cdns *cdns)
+{
+ return !!(cdns_readl(cdns, CDNS_MCP_STAT) & CDNS_MCP_STAT_CLK_STOP);
+}
+EXPORT_SYMBOL(sdw_cdns_is_clock_stop);
+
+/**
+ * sdw_cdns_clock_stop: Cadence clock stop configuration routine
+ *
+ * @cdns: Cadence instance
+ * @block_wake: prevent wakes if required by the platform
+ */
+int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
+{
+ bool slave_present = false;
+ struct sdw_slave *slave;
+ int ret;
+
+ /* Check suspend status */
+ if (sdw_cdns_is_clock_stop(cdns)) {
+ dev_dbg(cdns->dev, "Clock is already stopped\n");
+ return 0;
+ }
+
+ /*
+ * Before entering clock stop we mask the Slave
+ * interrupts. This helps avoid having to deal with e.g. a
+ * Slave becoming UNATTACHED while the clock is being stopped
+ */
+ cdns_enable_slave_interrupts(cdns, false);
+
+ /*
+ * For specific platforms, it is required to be able to put
+ * master into a state in which it ignores wake-up trials
+ * in clock stop state
+ */
+ if (block_wake)
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_BLOCK_WAKEUP,
+ CDNS_MCP_CONTROL_BLOCK_WAKEUP);
+
+ list_for_each_entry(slave, &cdns->bus.slaves, node) {
+ if (slave->status == SDW_SLAVE_ATTACHED ||
+ slave->status == SDW_SLAVE_ALERT) {
+ slave_present = true;
+ break;
+ }
+ }
+
+ /*
+ * This CMD_ACCEPT should be used when there are no devices
+ * attached on the link when entering clock stop mode. If this is
+ * not set and there is a broadcast write then the command ignored
+ * will be treated as a failure
+ */
+ if (!slave_present)
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CMD_ACCEPT,
+ CDNS_MCP_CONTROL_CMD_ACCEPT);
+ else
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CMD_ACCEPT, 0);
+
+ /* commit changes */
+ ret = cdns_config_update(cdns);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: config_update failed\n", __func__);
+ return ret;
+ }
+
+ /* Prepare slaves for clock stop */
+ ret = sdw_bus_prep_clk_stop(&cdns->bus);
+ if (ret < 0) {
+ dev_err(cdns->dev, "prepare clock stop failed %d", ret);
+ return ret;
+ }
+
+ /*
+ * Enter clock stop mode and only report errors if there are
+ * Slave devices present (ALERT or ATTACHED)
+ */
+ ret = sdw_bus_clk_stop(&cdns->bus);
+ if (ret < 0 && slave_present && ret != -ENODATA) {
+ dev_err(cdns->dev, "bus clock stop failed %d", ret);
+ return ret;
+ }
+
+ ret = cdns_set_wait(cdns, CDNS_MCP_STAT,
+ CDNS_MCP_STAT_CLK_STOP,
+ CDNS_MCP_STAT_CLK_STOP);
+ if (ret < 0)
+ dev_err(cdns->dev, "Clock stop failed %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_clock_stop);
+
+/**
+ * sdw_cdns_clock_restart: Cadence PM clock restart configuration routine
+ *
+ * @cdns: Cadence instance
+ * @bus_reset: context may be lost while in low power modes and the bus
+ * may require a Severe Reset and re-enumeration after a wake.
+ */
+int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset)
+{
+ int ret;
+
+ /* unmask Slave interrupts that were masked when stopping the clock */
+ cdns_enable_slave_interrupts(cdns, true);
+
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ if (ret < 0) {
+ dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+ return ret;
+ }
+
+ ret = cdns_set_wait(cdns, CDNS_MCP_STAT, CDNS_MCP_STAT_CLK_STOP, 0);
+ if (ret < 0) {
+ dev_err(cdns->dev, "clock stop exit failed %d\n", ret);
+ return ret;
+ }
+
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_BLOCK_WAKEUP, 0);
+
+ /*
+ * clear CMD_ACCEPT so that the command ignored
+ * will be treated as a failure during a broadcast write
+ */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, 0);
+
+ if (!bus_reset) {
+
+ /* enable bus operations with clock and data */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG,
+ CDNS_MCP_CONFIG_OP,
+ CDNS_MCP_CONFIG_OP_NORMAL);
+
+ ret = cdns_config_update(cdns);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: config_update failed\n", __func__);
+ return ret;
+ }
+
+ ret = sdw_bus_exit_clk_stop(&cdns->bus);
+ if (ret < 0)
+ dev_err(cdns->dev, "bus failed to exit clock stop %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_clock_restart);
+
+/**
* sdw_cdns_probe() - Cadence probe routine
* @cdns: Cadence instance
*/
@@ -1306,6 +1503,7 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val);
val = pdi->num;
+ val |= CDNS_PDI_CONFIG_SOFT_RESET;
val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 001457cbe5ad..b410656f8194 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -5,6 +5,9 @@
#ifndef __SDW_CADENCE_H
#define __SDW_CADENCE_H
+#define SDW_CADENCE_GSYNC_KHZ 4 /* 4 kHz */
+#define SDW_CADENCE_GSYNC_HZ (SDW_CADENCE_GSYNC_KHZ * 1000)
+
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
@@ -138,30 +141,26 @@ extern struct sdw_master_ops sdw_cdns_master_ops;
irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
-int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit);
+int sdw_cdns_init(struct sdw_cdns *cdns);
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config);
int sdw_cdns_exit_reset(struct sdw_cdns *cdns);
int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state);
+bool sdw_cdns_is_clock_stop(struct sdw_cdns *cdns);
+int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake);
+int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset);
+
#ifdef CONFIG_DEBUG_FS
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
#endif
-int sdw_cdns_get_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- u32 ch, u32 dir);
struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
u32 ch, u32 dir, int dai_id);
void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
-int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
- void *stream, int direction);
-int sdw_cdns_pdm_set_stream(struct snd_soc_dai *dai,
- void *stream, int direction);
-
enum sdw_command_response
cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 06ef3a3ac080..3c83e76c6bf9 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -103,7 +103,7 @@ enum intel_pdi_type {
struct sdw_intel {
struct sdw_cdns cdns;
int instance;
- struct sdw_intel_link_res *res;
+ struct sdw_intel_link_res *link_res;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
@@ -193,8 +193,8 @@ static ssize_t intel_sprintf(void __iomem *mem, bool l,
static int intel_reg_show(struct seq_file *s_file, void *data)
{
struct sdw_intel *sdw = s_file->private;
- void __iomem *s = sdw->res->shim;
- void __iomem *a = sdw->res->alh;
+ void __iomem *s = sdw->link_res->shim;
+ void __iomem *a = sdw->link_res->alh;
char *buf;
ssize_t ret;
int i, j;
@@ -289,7 +289,7 @@ static void intel_debugfs_exit(struct sdw_intel *sdw) {}
static int intel_link_power_up(struct sdw_intel *sdw)
{
unsigned int link_id = sdw->instance;
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
int spa_mask, cpa_mask;
int link_control, ret;
@@ -309,7 +309,7 @@ static int intel_link_power_up(struct sdw_intel *sdw)
static int intel_shim_init(struct sdw_intel *sdw)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int sync_reg, ret;
u16 ioctl = 0, act = 0;
@@ -370,7 +370,7 @@ static int intel_shim_init(struct sdw_intel *sdw)
static void intel_pdi_init(struct sdw_intel *sdw,
struct sdw_cdns_stream_config *config)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int pcm_cap, pdm_cap;
@@ -404,7 +404,7 @@ static void intel_pdi_init(struct sdw_intel *sdw,
static int
intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int count;
@@ -476,7 +476,7 @@ static int intel_pdi_ch_update(struct sdw_intel *sdw)
static void
intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int pdi_conf = 0;
@@ -508,7 +508,7 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
static void
intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
{
- void __iomem *alh = sdw->res->alh;
+ void __iomem *alh = sdw->link_res->alh;
unsigned int link_id = sdw->instance;
unsigned int conf;
@@ -535,7 +535,7 @@ static int intel_params_stream(struct sdw_intel *sdw,
struct snd_pcm_hw_params *hw_params,
int link_id, int alh_stream_id)
{
- struct sdw_intel_link_res *res = sdw->res;
+ struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_intel_stream_params_data params_data;
params_data.substream = substream;
@@ -550,6 +550,25 @@ static int intel_params_stream(struct sdw_intel *sdw,
return -EIO;
}
+static int intel_free_stream(struct sdw_intel *sdw,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int link_id)
+{
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_intel_stream_free_data free_data;
+
+ free_data.substream = substream;
+ free_data.dai = dai;
+ free_data.link_id = link_id;
+
+ if (res->ops && res->ops->free_stream && res->dev)
+ return res->ops->free_stream(res->dev,
+ &free_data);
+
+ return 0;
+}
+
/*
* bank switch routines
*/
@@ -558,7 +577,7 @@ static int intel_pre_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
int sync_reg;
/* Write to register only for multi-link */
@@ -577,7 +596,7 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
int sync_reg, ret;
/* Write to register only for multi-link */
@@ -617,6 +636,68 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
+static int sdw_stream_setup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sdw_stream = NULL;
+ char *name;
+ int i, ret;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ name = kasprintf(GFP_KERNEL, "%s-Playback", dai->name);
+ else
+ name = kasprintf(GFP_KERNEL, "%s-Capture", dai->name);
+
+ if (!name)
+ return -ENOMEM;
+
+ sdw_stream = sdw_alloc_stream(name);
+ if (!sdw_stream) {
+ dev_err(dai->dev, "alloc stream failed for DAI %s", dai->name);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Set stream pointer on CPU DAI */
+ ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
+ if (ret < 0) {
+ dev_err(dai->dev, "failed to set stream pointer on cpu dai %s",
+ dai->name);
+ goto release_stream;
+ }
+
+ /* Set stream pointer on all CODEC DAIs */
+ for (i = 0; i < rtd->num_codecs; i++) {
+ ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sdw_stream,
+ substream->stream);
+ if (ret < 0) {
+ dev_err(dai->dev, "failed to set stream pointer on codec dai %s",
+ rtd->codec_dais[i]->name);
+ goto release_stream;
+ }
+ }
+
+ return 0;
+
+release_stream:
+ sdw_release_stream(sdw_stream);
+error:
+ kfree(name);
+ return ret;
+}
+
+static int intel_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ /*
+ * TODO: add pm_runtime support here, the startup callback
+ * will make sure the IP is 'active'
+ */
+
+ return sdw_stream_setup(substream, dai);
+}
+
static int intel_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -699,10 +780,63 @@ error:
return ret;
}
+static int intel_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns_dma_data *dma;
+
+ dma = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma) {
+ dev_err(dai->dev, "failed to get dma data in %s",
+ __func__);
+ return -EIO;
+ }
+
+ return sdw_prepare_stream(dma->stream);
+}
+
+static int intel_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns_dma_data *dma;
+ int ret;
+
+ dma = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma) {
+ dev_err(dai->dev, "failed to get dma data in %s", __func__);
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ ret = sdw_enable_stream(dma->stream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ ret = sdw_disable_stream(dma->stream);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ dev_err(dai->dev,
+ "%s trigger %d failed: %d",
+ __func__, cmd, ret);
+ return ret;
+}
+
static int
intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dma_data *dma;
int ret;
@@ -710,12 +844,29 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
if (!dma)
return -EIO;
+ ret = sdw_deprepare_stream(dma->stream);
+ if (ret) {
+ dev_err(dai->dev, "sdw_deprepare_stream: failed %d", ret);
+ return ret;
+ }
+
ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
dma->stream->name, ret);
+ return ret;
+ }
- return ret;
+ ret = intel_free_stream(sdw, substream, dai, sdw->instance);
+ if (ret < 0) {
+ dev_err(dai->dev, "intel_free_stream: failed %d", ret);
+ return ret;
+ }
+
+ kfree(dma->stream->name);
+ sdw_release_stream(dma->stream);
+
+ return 0;
}
static void intel_shutdown(struct snd_pcm_substream *substream,
@@ -744,14 +895,20 @@ static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
}
static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
+ .startup = intel_startup,
.hw_params = intel_hw_params,
+ .prepare = intel_prepare,
+ .trigger = intel_trigger,
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pcm_set_sdw_stream,
};
static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
+ .startup = intel_startup,
.hw_params = intel_hw_params,
+ .prepare = intel_prepare,
+ .trigger = intel_trigger,
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pdm_set_sdw_stream,
@@ -920,7 +1077,7 @@ static int intel_init(struct sdw_intel *sdw)
intel_link_power_up(sdw);
intel_shim_init(sdw);
- return sdw_cdns_init(&sdw->cdns, false);
+ return sdw_cdns_init(&sdw->cdns);
}
/*
@@ -937,9 +1094,9 @@ static int intel_probe(struct platform_device *pdev)
return -ENOMEM;
sdw->instance = pdev->id;
- sdw->res = dev_get_platdata(&pdev->dev);
+ sdw->link_res = dev_get_platdata(&pdev->dev);
sdw->cdns.dev = &pdev->dev;
- sdw->cdns.registers = sdw->res->registers;
+ sdw->cdns.registers = sdw->link_res->registers;
sdw->cdns.instance = sdw->instance;
sdw->cdns.msg_count = 0;
sdw->cdns.bus.dev = &pdev->dev;
@@ -979,11 +1136,12 @@ static int intel_probe(struct platform_device *pdev)
intel_pdi_ch_update(sdw);
/* Acquire IRQ */
- ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq, sdw_cdns_thread,
+ ret = request_threaded_irq(sdw->link_res->irq,
+ sdw_cdns_irq, sdw_cdns_thread,
IRQF_SHARED, KBUILD_MODNAME, &sdw->cdns);
if (ret < 0) {
dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
- sdw->res->irq);
+ sdw->link_res->irq);
goto err_init;
}
@@ -1013,7 +1171,7 @@ static int intel_probe(struct platform_device *pdev)
err_interrupt:
sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->res->irq, sdw);
+ free_irq(sdw->link_res->irq, sdw);
err_init:
sdw_delete_bus_master(&sdw->cdns.bus);
return ret;
@@ -1028,7 +1186,7 @@ static int intel_remove(struct platform_device *pdev)
if (!sdw->cdns.bus.prop.hw_disabled) {
intel_debugfs_exit(sdw);
sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->res->irq, sdw);
+ free_irq(sdw->link_res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
sdw_delete_bus_master(&sdw->cdns.bus);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 1c6c6a2e0def..d6c9ad231873 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -588,12 +588,20 @@ static int qcom_swrm_set_sdw_stream(struct snd_soc_dai *dai,
return 0;
}
+static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+ return ctrl->sruntime[dai->id];
+}
+
static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sdw_stream_runtime *sruntime;
+ struct snd_soc_dai *codec_dai;
int ret, i;
sruntime = sdw_alloc_stream(dai->name);
@@ -602,12 +610,12 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
ctrl->sruntime[dai->id] = sruntime;
- for (i = 0; i < rtd->num_codecs; i++) {
- ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sruntime,
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime,
substream->stream);
if (ret < 0 && ret != -ENOTSUPP) {
dev_err(dai->dev, "Failed to set sdw stream on %s",
- rtd->codec_dais[i]->name);
+ codec_dai->name);
sdw_release_stream(sruntime);
return ret;
}
@@ -631,6 +639,7 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.startup = qcom_swrm_startup,
.shutdown = qcom_swrm_shutdown,
.set_sdw_stream = qcom_swrm_set_sdw_stream,
+ .get_sdw_stream = qcom_swrm_get_sdw_stream,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 19919975bb6d..aace57fae7f8 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -46,7 +46,11 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev.of_node = of_node_get(to_of_node(fwnode));
slave->bus = bus;
slave->status = SDW_SLAVE_UNATTACHED;
+ init_completion(&slave->enumeration_complete);
+ init_completion(&slave->initialization_complete);
slave->dev_num = 0;
+ init_completion(&slave->probe_complete);
+ slave->probed = false;
mutex_lock(&bus->bus_lock);
list_add_tail(&slave->node, &bus->slaves);
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 178ae92b8cc1..a9a72574b34a 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -167,13 +167,15 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
return ret;
}
- /* Program DPN_BlockCtrl1 register */
- ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1));
- if (ret < 0) {
- dev_err(&s_rt->slave->dev,
- "DPN_BlockCtrl1 register write failed for port %d\n",
- t_params->port_num);
- return ret;
+ if (!dpn_prop->read_only_wordlength) {
+ /* Program DPN_BlockCtrl1 register */
+ ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1));
+ if (ret < 0) {
+ dev_err(&s_rt->slave->dev,
+ "DPN_BlockCtrl1 register write failed for port %d\n",
+ t_params->port_num);
+ return ret;
+ }
}
/* Program DPN_SampleCtrl1 register */
@@ -313,9 +315,9 @@ static int sdw_enable_disable_slave_ports(struct sdw_bus *bus,
* it is safe to reset this register
*/
if (en)
- ret = sdw_update(s_rt->slave, addr, 0xFF, p_rt->ch_mask);
+ ret = sdw_write(s_rt->slave, addr, p_rt->ch_mask);
else
- ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0);
+ ret = sdw_write(s_rt->slave, addr, 0x0);
if (ret < 0)
dev_err(&s_rt->slave->dev,
@@ -464,10 +466,9 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
addr = SDW_DPN_PREPARECTRL(p_rt->num);
if (prep)
- ret = sdw_update(s_rt->slave, addr,
- 0xFF, p_rt->ch_mask);
+ ret = sdw_write(s_rt->slave, addr, p_rt->ch_mask);
else
- ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0);
+ ret = sdw_write(s_rt->slave, addr, 0x0);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
@@ -587,10 +588,11 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
if (slave->ops->bus_config) {
ret = slave->ops->bus_config(slave, &bus->params);
- if (ret < 0)
+ if (ret < 0) {
dev_err(bus->dev, "Notify Slave: %d failed\n",
slave->dev_num);
- return ret;
+ return ret;
+ }
}
}
@@ -602,13 +604,25 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
* and Slave(s)
*
* @bus: SDW bus instance
+ * @prepare: true if sdw_program_params() is called by _prepare.
*/
-static int sdw_program_params(struct sdw_bus *bus)
+static int sdw_program_params(struct sdw_bus *bus, bool prepare)
{
struct sdw_master_runtime *m_rt;
int ret = 0;
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+
+ /*
+ * this loop walks through all master runtimes for a
+ * bus, but the ports can only be configured while
+ * explicitly preparing a stream or handling an
+ * already-prepared stream otherwise.
+ */
+ if (!prepare &&
+ m_rt->stream->state == SDW_STREAM_CONFIGURED)
+ continue;
+
ret = sdw_program_port_params(m_rt);
if (ret < 0) {
dev_err(bus->dev,
@@ -1460,7 +1474,8 @@ static void sdw_release_bus_lock(struct sdw_stream_runtime *stream)
}
}
-static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
+static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
+ bool update_params)
{
struct sdw_master_runtime *m_rt;
struct sdw_bus *bus = NULL;
@@ -1480,6 +1495,9 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
+ if (!update_params)
+ goto program_params;
+
/* Increment cumulative bus bandwidth */
/* TODO: Update this during Device-Device support */
bus->params.bandwidth += m_rt->stream->params.rate *
@@ -1495,8 +1513,9 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
}
}
+program_params:
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, true);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
goto restore_params;
@@ -1544,7 +1563,8 @@ restore_params:
*/
int sdw_prepare_stream(struct sdw_stream_runtime *stream)
{
- int ret = 0;
+ bool update_params = true;
+ int ret;
if (!stream) {
pr_err("SoundWire: Handle not found for stream\n");
@@ -1553,8 +1573,32 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
- ret = _sdw_prepare_stream(stream);
+ if (stream->state == SDW_STREAM_PREPARED) {
+ ret = 0;
+ goto state_err;
+ }
+
+ if (stream->state != SDW_STREAM_CONFIGURED &&
+ stream->state != SDW_STREAM_DEPREPARED &&
+ stream->state != SDW_STREAM_DISABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
+ /*
+ * when the stream is DISABLED, this means sdw_prepare_stream()
+ * is called as a result of an underflow or a resume operation.
+ * In this case, the bus parameters shall not be recomputed, but
+ * still need to be re-applied
+ */
+ if (stream->state == SDW_STREAM_DISABLED)
+ update_params = false;
+
+ ret = _sdw_prepare_stream(stream, update_params);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
@@ -1571,7 +1615,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
bus = m_rt->bus;
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
@@ -1619,8 +1663,17 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
+ if (stream->state != SDW_STREAM_PREPARED &&
+ stream->state != SDW_STREAM_DISABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
ret = _sdw_enable_stream(stream);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
@@ -1647,7 +1700,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
struct sdw_bus *bus = m_rt->bus;
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
@@ -1693,8 +1746,16 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
+ if (stream->state != SDW_STREAM_ENABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
ret = _sdw_disable_stream(stream);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
@@ -1721,7 +1782,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
m_rt->ch_count * m_rt->stream->params.bps;
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
@@ -1749,8 +1810,18 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
}
sdw_acquire_bus_lock(stream);
+
+ if (stream->state != SDW_STREAM_PREPARED &&
+ stream->state != SDW_STREAM_DISABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
ret = _sdw_deprepare_stream(stream);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d6ed0c355954..741b9140992a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -62,6 +62,13 @@ config SPI_ALTERA
help
This is the driver for the Altera SPI Controller.
+config SPI_AR934X
+ tristate "Qualcomm Atheros AR934X/QCA95XX SPI controller driver"
+ depends on ATH79 || COMPILE_TEST
+ help
+ This enables support for the SPI controller present on the
+ Qualcomm Atheros AR934X/QCA95XX SoCs.
+
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
depends on ATH79 || COMPILE_TEST
@@ -264,6 +271,13 @@ config SPI_FALCON
has only been tested with m25p80 type chips. The hardware has no
support for other types of SPI peripherals.
+config SPI_FSI
+ tristate "FSI SPI driver"
+ depends on FSI
+ help
+ This enables support for the driver for FSI bus attached SPI
+ controllers.
+
config SPI_FSL_LPSPI
tristate "Freescale i.MX LPSPI controller"
depends on ARCH_MXC || COMPILE_TEST
@@ -285,7 +299,6 @@ config SPI_HISI_SFC_V3XX
tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
depends on (ARM64 && ACPI) || COMPILE_TEST
depends on HAS_IOMEM
- select CONFIG_MTD_SPI_NOR
help
This enables support for HiSilicon v3xx SPI-NOR flash controller
found in hi16xx chipsets.
@@ -415,6 +428,7 @@ config SPI_FSL_ESPI
config SPI_MESON_SPICC
tristate "Amlogic Meson SPICC controller"
+ depends on COMMON_CLK
depends on ARCH_MESON || COMPILE_TEST
help
This enables master mode support for the SPICC (SPI communication
@@ -443,6 +457,16 @@ config SPI_MT7621
help
This selects a driver for the MediaTek MT7621 SPI Controller.
+config SPI_MTK_NOR
+ tristate "MediaTek SPI NOR controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for SPI NOR controller found on MediaTek
+ ARM SoCs. This is a controller specifically for SPI-NOR flash.
+ It can perform generic SPI transfers up to 6 bytes via generic
+ SPI interface as well as several SPI-NOR specific instructions
+ via SPI MEM interface.
+
config SPI_NPCM_FIU
tristate "Nuvoton NPCM FLASH Interface Unit"
depends on ARCH_NPCM || COMPILE_TEST
@@ -551,7 +575,7 @@ config SPI_PPC4xx
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
- depends on (ARCH_PXA || ARCH_MMP || PCI || ACPI)
+ depends on ARCH_PXA || ARCH_MMP || PCI || ACPI || COMPILE_TEST
select PXA_SSP if ARCH_PXA || ARCH_MMP
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
@@ -890,6 +914,17 @@ config SPI_ZYNQMP_GQSPI
# Add new SPI master controllers in alphabetical order above this line
#
+comment "SPI Multiplexer support"
+
+config SPI_MUX
+ tristate "SPI multiplexer support"
+ select MULTIPLEXER
+ help
+ This adds support for SPI multiplexers. Each SPI mux will be
+ accessible as a SPI controller, the devices behind the mux will appear
+ to be chip selects on this controller. It is still necessary to
+ select one or more specific mux-controller drivers.
+
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 9b65ec5afc5e..28f601327f8c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -9,11 +9,13 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
# config declarations into driver model code
obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
+obj-$(CONFIG_SPI_MUX) += spi-mux.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
# SPI master controller drivers (bus)
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
+obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
@@ -40,6 +42,7 @@ spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
+obj-$(CONFIG_SPI_FSI) += spi-fsi.o
obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
@@ -62,6 +65,7 @@ obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
+obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 13def7f78b9e..cb44d1e169aa 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -173,6 +173,81 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
+#ifdef VERBOSE_DEBUG
+static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
+{
+ switch (offset) {
+ case QSPI_CR:
+ return "CR";
+ case QSPI_MR:
+ return "MR";
+ case QSPI_RD:
+ return "MR";
+ case QSPI_TD:
+ return "TD";
+ case QSPI_SR:
+ return "SR";
+ case QSPI_IER:
+ return "IER";
+ case QSPI_IDR:
+ return "IDR";
+ case QSPI_IMR:
+ return "IMR";
+ case QSPI_SCR:
+ return "SCR";
+ case QSPI_IAR:
+ return "IAR";
+ case QSPI_ICR:
+ return "ICR/WICR";
+ case QSPI_IFR:
+ return "IFR";
+ case QSPI_RICR:
+ return "RICR";
+ case QSPI_SMR:
+ return "SMR";
+ case QSPI_SKR:
+ return "SKR";
+ case QSPI_WPMR:
+ return "WPMR";
+ case QSPI_WPSR:
+ return "WPSR";
+ case QSPI_VERSION:
+ return "VERSION";
+ default:
+ snprintf(tmp, sz, "0x%02x", offset);
+ break;
+ }
+
+ return tmp;
+}
+#endif /* VERBOSE_DEBUG */
+
+static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
+{
+ u32 value = readl_relaxed(aq->regs + offset);
+
+#ifdef VERBOSE_DEBUG
+ char tmp[8];
+
+ dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ return value;
+}
+
+static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
+{
+#ifdef VERBOSE_DEBUG
+ char tmp[8];
+
+ dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ writel_relaxed(value, aq->regs + offset);
+}
+
static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
const struct atmel_qspi_mode *mode)
{
@@ -293,32 +368,32 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
* Serial Memory Mode (SMM).
*/
if (aq->mr != QSPI_MR_SMM) {
- writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
aq->mr = QSPI_MR_SMM;
}
/* Clear pending interrupts */
- (void)readl_relaxed(aq->regs + QSPI_SR);
+ (void)atmel_qspi_read(aq, QSPI_SR);
if (aq->caps->has_ricr) {
if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN)
ifr |= QSPI_IFR_APBTFRTYP_READ;
/* Set QSPI Instruction Frame registers */
- writel_relaxed(iar, aq->regs + QSPI_IAR);
+ atmel_qspi_write(iar, aq, QSPI_IAR);
if (op->data.dir == SPI_MEM_DATA_IN)
- writel_relaxed(icr, aq->regs + QSPI_RICR);
+ atmel_qspi_write(icr, aq, QSPI_RICR);
else
- writel_relaxed(icr, aq->regs + QSPI_WICR);
- writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ atmel_qspi_write(icr, aq, QSPI_WICR);
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
} else {
if (op->data.dir == SPI_MEM_DATA_OUT)
ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
/* Set QSPI Instruction Frame registers */
- writel_relaxed(iar, aq->regs + QSPI_IAR);
- writel_relaxed(icr, aq->regs + QSPI_ICR);
- writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+ atmel_qspi_write(icr, aq, QSPI_ICR);
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
}
return 0;
@@ -345,7 +420,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Skip to the final steps if there is no data */
if (op->data.nbytes) {
/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)readl_relaxed(aq->regs + QSPI_IFR);
+ (void)atmel_qspi_read(aq, QSPI_IFR);
/* Send/Receive data */
if (op->data.dir == SPI_MEM_DATA_IN)
@@ -356,22 +431,22 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
op->data.nbytes);
/* Release the chip-select */
- writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
}
/* Poll INSTRuction End status */
- sr = readl_relaxed(aq->regs + QSPI_SR);
+ sr = atmel_qspi_read(aq, QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
return err;
/* Wait for INSTRuction End interrupt */
reinit_completion(&aq->cmd_completion);
aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER);
+ atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
if (!wait_for_completion_timeout(&aq->cmd_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
- writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR);
+ atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
return err;
}
@@ -410,7 +485,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
scbr--;
aq->scr = QSPI_SCR_SCBR(scbr);
- writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
return 0;
}
@@ -418,14 +493,14 @@ static int atmel_qspi_setup(struct spi_device *spi)
static void atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
- writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
aq->mr = QSPI_MR_SMM;
/* Enable the QSPI controller */
- writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
@@ -433,8 +508,8 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
struct atmel_qspi *aq = dev_id;
u32 status, mask, pending;
- status = readl_relaxed(aq->regs + QSPI_SR);
- mask = readl_relaxed(aq->regs + QSPI_IMR);
+ status = atmel_qspi_read(aq, QSPI_SR);
+ mask = atmel_qspi_read(aq, QSPI_IMR);
pending = status & mask;
if (!pending)
@@ -569,7 +644,7 @@ static int atmel_qspi_remove(struct platform_device *pdev)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
spi_unregister_controller(ctrl);
- writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
return 0;
@@ -596,7 +671,7 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
atmel_qspi_init(aq);
- writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
return 0;
}
diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c
new file mode 100644
index 000000000000..d08dec09d423
--- /dev/null
+++ b/drivers/spi/spi-ar934x.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// SPI controller driver for Qualcomm Atheros AR934x/QCA95xx SoCs
+//
+// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
+//
+// Based on spi-mt7621.c:
+// Copyright (C) 2011 Sergiy <piratfm@gmail.com>
+// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi-ar934x"
+
+#define AR934X_SPI_REG_FS 0x00
+#define AR934X_SPI_ENABLE BIT(0)
+
+#define AR934X_SPI_REG_IOC 0x08
+#define AR934X_SPI_IOC_INITVAL 0x70000
+
+#define AR934X_SPI_REG_CTRL 0x04
+#define AR934X_SPI_CLK_MASK GENMASK(5, 0)
+
+#define AR934X_SPI_DATAOUT 0x10
+
+#define AR934X_SPI_REG_SHIFT_CTRL 0x14
+#define AR934X_SPI_SHIFT_EN BIT(31)
+#define AR934X_SPI_SHIFT_CS(n) BIT(28 + (n))
+#define AR934X_SPI_SHIFT_TERM 26
+#define AR934X_SPI_SHIFT_VAL(cs, term, count) \
+ (AR934X_SPI_SHIFT_EN | AR934X_SPI_SHIFT_CS(cs) | \
+ (term) << AR934X_SPI_SHIFT_TERM | (count))
+
+#define AR934X_SPI_DATAIN 0x18
+
+struct ar934x_spi {
+ struct spi_controller *ctlr;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int clk_freq;
+};
+
+static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq)
+{
+ int div = DIV_ROUND_UP(sp->clk_freq, freq * 2) - 1;
+
+ if (div < 0)
+ return 0;
+ else if (div > AR934X_SPI_CLK_MASK)
+ return -EINVAL;
+ else
+ return div;
+}
+
+static int ar934x_spi_setup(struct spi_device *spi)
+{
+ struct ar934x_spi *sp = spi_controller_get_devdata(spi->master);
+
+ if ((spi->max_speed_hz == 0) ||
+ (spi->max_speed_hz > (sp->clk_freq / 2))) {
+ spi->max_speed_hz = sp->clk_freq / 2;
+ } else if (spi->max_speed_hz < (sp->clk_freq / 128)) {
+ dev_err(&spi->dev, "spi clock is too low\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ar934x_spi_transfer_one_message(struct spi_controller *master,
+ struct spi_message *m)
+{
+ struct ar934x_spi *sp = spi_controller_get_devdata(master);
+ struct spi_transfer *t = NULL;
+ struct spi_device *spi = m->spi;
+ unsigned long trx_done, trx_cur;
+ int stat = 0;
+ u8 term = 0;
+ int div, i;
+ u32 reg;
+ const u8 *tx_buf;
+ u8 *buf;
+
+ m->actual_length = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->speed_hz)
+ div = ar934x_spi_clk_div(sp, t->speed_hz);
+ else
+ div = ar934x_spi_clk_div(sp, spi->max_speed_hz);
+ if (div < 0) {
+ stat = -EIO;
+ goto msg_done;
+ }
+
+ reg = ioread32(sp->base + AR934X_SPI_REG_CTRL);
+ reg &= ~AR934X_SPI_CLK_MASK;
+ reg |= div;
+ iowrite32(reg, sp->base + AR934X_SPI_REG_CTRL);
+ iowrite32(0, sp->base + AR934X_SPI_DATAOUT);
+
+ for (trx_done = 0; trx_done < t->len; trx_done += 4) {
+ trx_cur = t->len - trx_done;
+ if (trx_cur > 4)
+ trx_cur = 4;
+ else if (list_is_last(&t->transfer_list, &m->transfers))
+ term = 1;
+
+ if (t->tx_buf) {
+ tx_buf = t->tx_buf + trx_done;
+ reg = tx_buf[0];
+ for (i = 1; i < trx_cur; i++)
+ reg = reg << 8 | tx_buf[i];
+ iowrite32(reg, sp->base + AR934X_SPI_DATAOUT);
+ }
+
+ reg = AR934X_SPI_SHIFT_VAL(spi->chip_select, term,
+ trx_cur * 8);
+ iowrite32(reg, sp->base + AR934X_SPI_REG_SHIFT_CTRL);
+ stat = readl_poll_timeout(
+ sp->base + AR934X_SPI_REG_SHIFT_CTRL, reg,
+ !(reg & AR934X_SPI_SHIFT_EN), 0, 5);
+ if (stat < 0)
+ goto msg_done;
+
+ if (t->rx_buf) {
+ reg = ioread32(sp->base + AR934X_SPI_DATAIN);
+ buf = t->rx_buf + trx_done;
+ for (i = 0; i < trx_cur; i++) {
+ buf[trx_cur - i - 1] = reg & 0xff;
+ reg >>= 8;
+ }
+ }
+ }
+ m->actual_length += t->len;
+ }
+
+msg_done:
+ m->status = stat;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static const struct of_device_id ar934x_spi_match[] = {
+ { .compatible = "qca,ar934x-spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ar934x_spi_match);
+
+static int ar934x_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct ar934x_spi *sp;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (!ctlr) {
+ dev_info(&pdev->dev, "failed to allocate spi controller\n");
+ return -ENOMEM;
+ }
+
+ /* disable flash mapping and expose spi controller registers */
+ iowrite32(AR934X_SPI_ENABLE, base + AR934X_SPI_REG_FS);
+ /* restore pins to default state: CSn=1 DO=CLK=0 */
+ iowrite32(AR934X_SPI_IOC_INITVAL, base + AR934X_SPI_REG_IOC);
+
+ ctlr->mode_bits = SPI_LSB_FIRST;
+ ctlr->setup = ar934x_spi_setup;
+ ctlr->transfer_one_message = ar934x_spi_transfer_one_message;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->num_chipselect = 3;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+
+ sp = spi_controller_get_devdata(ctlr);
+ sp->base = base;
+ sp->clk = clk;
+ sp->clk_freq = clk_get_rate(clk);
+ sp->ctlr = ctlr;
+
+ return devm_spi_register_controller(&pdev->dev, ctlr);
+}
+
+static int ar934x_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct ar934x_spi *sp;
+
+ ctlr = dev_get_drvdata(&pdev->dev);
+ sp = spi_controller_get_devdata(ctlr);
+
+ clk_disable_unprepare(sp->clk);
+
+ return 0;
+}
+
+static struct platform_driver ar934x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ar934x_spi_match,
+ },
+ .probe = ar934x_spi_probe,
+ .remove = ar934x_spi_remove,
+};
+
+module_platform_driver(ar934x_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Qualcomm Atheros AR934x/QCA95xx");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index 64d4c441b641..ea6e4a7b3feb 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -6,14 +6,13 @@
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/efm32-spi.h>
+#include <linux/of.h>
#define DRIVER_NAME "efm32-spi"
@@ -82,9 +81,6 @@ struct efm32_spi_ddata {
const u8 *tx_buf;
u8 *rx_buf;
unsigned tx_len, rx_len;
-
- /* chip selects */
- unsigned csgpio[];
};
#define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev))
@@ -102,14 +98,6 @@ static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset)
return readl_relaxed(ddata->base + offset);
}
-static void efm32_spi_chipselect(struct spi_device *spi, int is_on)
-{
- struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
- int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE);
-
- gpio_set_value(ddata->csgpio[spi->chip_select], value);
-}
-
static int efm32_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
@@ -320,17 +308,11 @@ static int efm32_spi_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
struct device_node *np = pdev->dev.of_node;
- int num_cs, i;
if (!np)
return -EINVAL;
- num_cs = of_gpio_named_count(np, "cs-gpios");
- if (num_cs < 0)
- return num_cs;
-
- master = spi_alloc_master(&pdev->dev,
- sizeof(*ddata) + num_cs * sizeof(unsigned));
+ master = spi_alloc_master(&pdev->dev, sizeof(*ddata));
if (!master) {
dev_dbg(&pdev->dev,
"failed to allocate spi master controller\n");
@@ -340,14 +322,13 @@ static int efm32_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = num_cs;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ master->use_gpio_descriptors = true;
ddata = spi_master_get_devdata(master);
ddata->bitbang.master = master;
- ddata->bitbang.chipselect = efm32_spi_chipselect;
ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
@@ -361,25 +342,6 @@ static int efm32_spi_probe(struct platform_device *pdev)
goto err;
}
- for (i = 0; i < num_cs; ++i) {
- ret = of_get_named_gpio(np, "cs-gpios", i);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n",
- i, ret);
- goto err;
- }
- ddata->csgpio[i] = ret;
- dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]);
- ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i],
- GPIOF_OUT_INIT_LOW, DRIVER_NAME);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to configure csgpio#%u (%d)\n",
- i, ret);
- goto err;
- }
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
new file mode 100644
index 000000000000..37a3e0f8e752
--- /dev/null
+++ b/drivers/spi/spi-fsi.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (C) IBM Corporation 2020
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/fsi.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define FSI_ENGID_SPI 0x23
+#define FSI_MBOX_ROOT_CTRL_8 0x2860
+
+#define FSI2SPI_DATA0 0x00
+#define FSI2SPI_DATA1 0x04
+#define FSI2SPI_CMD 0x08
+#define FSI2SPI_CMD_WRITE BIT(31)
+#define FSI2SPI_RESET 0x18
+#define FSI2SPI_STATUS 0x1c
+#define FSI2SPI_STATUS_ANY_ERROR BIT(31)
+#define FSI2SPI_IRQ 0x20
+
+#define SPI_FSI_BASE 0x70000
+#define SPI_FSI_INIT_TIMEOUT_MS 1000
+#define SPI_FSI_MAX_TRANSFER_SIZE 2048
+
+#define SPI_FSI_ERROR 0x0
+#define SPI_FSI_COUNTER_CFG 0x1
+#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
+#define SPI_FSI_CFG1 0x2
+#define SPI_FSI_CLOCK_CFG 0x3
+#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
+#define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33))
+#define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38))
+#define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39))
+#define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42))
+#define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44)
+#define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51)
+#define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52)
+#define SPI_FSI_MMAP 0x4
+#define SPI_FSI_DATA_TX 0x5
+#define SPI_FSI_DATA_RX 0x6
+#define SPI_FSI_SEQUENCE 0x7
+#define SPI_FSI_SEQUENCE_STOP 0x00
+#define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0
+#define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf))
+#define SPI_FSI_STATUS 0x8
+#define SPI_FSI_STATUS_ERROR \
+ (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12))
+#define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48)
+#define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48)
+#define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57)
+#define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58)
+#define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59)
+#define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61)
+#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
+#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
+#define SPI_FSI_STATUS_ANY_ERROR \
+ (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \
+ SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
+ SPI_FSI_STATUS_RDR_OVERRUN)
+#define SPI_FSI_PORT_CTRL 0x9
+
+struct fsi_spi {
+ struct device *dev; /* SPI controller device */
+ struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
+ u32 base;
+};
+
+struct fsi_spi_sequence {
+ int bit;
+ u64 data;
+};
+
+static int fsi_spi_check_status(struct fsi_spi *ctx)
+{
+ int rc;
+ u32 sts;
+ __be32 sts_be;
+
+ rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be,
+ sizeof(sts_be));
+ if (rc)
+ return rc;
+
+ sts = be32_to_cpu(sts_be);
+ if (sts & FSI2SPI_STATUS_ANY_ERROR) {
+ dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
+{
+ int rc;
+ __be32 cmd_be;
+ __be32 data_be;
+ u32 cmd = offset + ctx->base;
+
+ *value = 0ULL;
+
+ if (cmd & FSI2SPI_CMD_WRITE)
+ return -EINVAL;
+
+ cmd_be = cpu_to_be32(cmd);
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
+ if (rc)
+ return rc;
+
+ rc = fsi_spi_check_status(ctx);
+ if (rc)
+ return rc;
+
+ rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ *value |= (u64)be32_to_cpu(data_be) << 32;
+
+ rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ *value |= (u64)be32_to_cpu(data_be);
+ dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
+
+ return 0;
+}
+
+static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
+{
+ int rc;
+ __be32 cmd_be;
+ __be32 data_be;
+ u32 cmd = offset + ctx->base;
+
+ if (cmd & FSI2SPI_CMD_WRITE)
+ return -EINVAL;
+
+ dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
+
+ data_be = cpu_to_be32(upper_32_bits(value));
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ data_be = cpu_to_be32(lower_32_bits(value));
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
+ if (rc)
+ return rc;
+
+ return fsi_spi_check_status(ctx);
+}
+
+static int fsi_spi_data_in(u64 in, u8 *rx, int len)
+{
+ int i;
+ int num_bytes = min(len, 8);
+
+ for (i = 0; i < num_bytes; ++i)
+ rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i)));
+
+ return num_bytes;
+}
+
+static int fsi_spi_data_out(u64 *out, const u8 *tx, int len)
+{
+ int i;
+ int num_bytes = min(len, 8);
+ u8 *out_bytes = (u8 *)out;
+
+ /* Unused bytes of the tx data should be 0. */
+ *out = 0ULL;
+
+ for (i = 0; i < num_bytes; ++i)
+ out_bytes[8 - (i + 1)] = tx[i];
+
+ return num_bytes;
+}
+
+static int fsi_spi_reset(struct fsi_spi *ctx)
+{
+ int rc;
+
+ dev_dbg(ctx->dev, "Resetting SPI controller.\n");
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ SPI_FSI_CLOCK_CFG_RESET1);
+ if (rc)
+ return rc;
+
+ return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ SPI_FSI_CLOCK_CFG_RESET2);
+}
+
+static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+{
+ /*
+ * Add the next byte of instruction to the 8-byte sequence register.
+ * Then decrement the counter so that the next instruction will go in
+ * the right place. Return the number of "slots" left in the sequence
+ * register.
+ */
+ seq->data |= (u64)val << seq->bit;
+ seq->bit -= 8;
+
+ return ((64 - seq->bit) / 8) - 2;
+}
+
+static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
+{
+ seq->bit = 56;
+ seq->data = 0ULL;
+}
+
+static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
+ struct fsi_spi_sequence *seq,
+ struct spi_transfer *transfer)
+{
+ int loops;
+ int idx;
+ int rc;
+ u8 len = min(transfer->len, 8U);
+ u8 rem = transfer->len % len;
+
+ loops = transfer->len / len;
+
+ if (transfer->tx_buf) {
+ idx = fsi_spi_sequence_add(seq,
+ SPI_FSI_SEQUENCE_SHIFT_OUT(len));
+ if (rem)
+ rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
+ } else if (transfer->rx_buf) {
+ idx = fsi_spi_sequence_add(seq,
+ SPI_FSI_SEQUENCE_SHIFT_IN(len));
+ if (rem)
+ rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
+ } else {
+ return -EINVAL;
+ }
+
+ if (loops > 1) {
+ fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
+
+ if (rem)
+ fsi_spi_sequence_add(seq, rem);
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG,
+ SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int fsi_spi_transfer_data(struct fsi_spi *ctx,
+ struct spi_transfer *transfer)
+{
+ int rc = 0;
+ u64 status = 0ULL;
+
+ if (transfer->tx_buf) {
+ int nb;
+ int sent = 0;
+ u64 out = 0ULL;
+ const u8 *tx = transfer->tx_buf;
+
+ while (transfer->len > sent) {
+ nb = fsi_spi_data_out(&out, &tx[sent],
+ (int)transfer->len - sent);
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out);
+ if (rc)
+ return rc;
+
+ do {
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & SPI_FSI_STATUS_ANY_ERROR) {
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ return -EREMOTEIO;
+ }
+ } while (status & SPI_FSI_STATUS_TDR_FULL);
+
+ sent += nb;
+ }
+ } else if (transfer->rx_buf) {
+ int recv = 0;
+ u64 in = 0ULL;
+ u8 *rx = transfer->rx_buf;
+
+ while (transfer->len > recv) {
+ do {
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & SPI_FSI_STATUS_ANY_ERROR) {
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ return -EREMOTEIO;
+ }
+ } while (!(status & SPI_FSI_STATUS_RDR_FULL));
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
+ if (rc)
+ return rc;
+
+ recv += fsi_spi_data_in(in, &rx[recv],
+ (int)transfer->len - recv);
+ }
+ }
+
+ return 0;
+}
+
+static int fsi_spi_transfer_init(struct fsi_spi *ctx)
+{
+ int rc;
+ bool reset = false;
+ unsigned long end;
+ u64 seq_state;
+ u64 clock_cfg = 0ULL;
+ u64 status = 0ULL;
+ u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
+ SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
+ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4);
+
+ end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
+ do {
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
+ if (rc)
+ return rc;
+
+ seq_state = status & SPI_FSI_STATUS_SEQ_STATE;
+
+ if (status & (SPI_FSI_STATUS_ANY_ERROR |
+ SPI_FSI_STATUS_TDR_FULL |
+ SPI_FSI_STATUS_RDR_FULL)) {
+ if (reset)
+ return -EIO;
+
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ reset = true;
+ continue;
+ }
+ } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
+ if (rc)
+ return rc;
+
+ if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE |
+ SPI_FSI_CLOCK_CFG_ECC_DISABLE |
+ SPI_FSI_CLOCK_CFG_MODE |
+ SPI_FSI_CLOCK_CFG_SCK_RECV_DEL |
+ SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg)
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ wanted_clock_cfg);
+
+ return rc;
+}
+
+static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *mesg)
+{
+ int rc = 0;
+ u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
+ struct spi_transfer *transfer;
+ struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
+
+ list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
+ struct fsi_spi_sequence seq;
+ struct spi_transfer *next = NULL;
+
+ /* Sequencer must do shift out (tx) first. */
+ if (!transfer->tx_buf ||
+ transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len);
+
+ rc = fsi_spi_transfer_init(ctx);
+ if (rc < 0)
+ goto error;
+
+ fsi_spi_sequence_init(&seq);
+ fsi_spi_sequence_add(&seq, seq_slave);
+
+ rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
+ if (rc)
+ goto error;
+
+ if (!list_is_last(&transfer->transfer_list,
+ &mesg->transfers)) {
+ next = list_next_entry(transfer, transfer_list);
+
+ /* Sequencer can only do shift in (rx) after tx. */
+ if (next->rx_buf) {
+ if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
+ next->len);
+
+ rc = fsi_spi_sequence_transfer(ctx, &seq,
+ next);
+ if (rc)
+ goto error;
+ } else {
+ next = NULL;
+ }
+ }
+
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0));
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data);
+ if (rc)
+ goto error;
+
+ rc = fsi_spi_transfer_data(ctx, transfer);
+ if (rc)
+ goto error;
+
+ if (next) {
+ rc = fsi_spi_transfer_data(ctx, next);
+ if (rc)
+ goto error;
+
+ transfer = next;
+ }
+ }
+
+error:
+ mesg->status = rc;
+ spi_finalize_current_message(ctlr);
+
+ return rc;
+}
+
+static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
+{
+ return SPI_FSI_MAX_TRANSFER_SIZE;
+}
+
+static int fsi_spi_probe(struct device *dev)
+{
+ int rc;
+ u32 root_ctrl_8;
+ struct device_node *np;
+ int num_controllers_registered = 0;
+ struct fsi_device *fsi = to_fsi_dev(dev);
+
+ /*
+ * Check the SPI mux before attempting to probe. If the mux isn't set
+ * then the SPI controllers can't access their slave devices.
+ */
+ rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8,
+ sizeof(root_ctrl_8));
+ if (rc)
+ return rc;
+
+ if (!root_ctrl_8) {
+ dev_dbg(dev, "SPI mux not set, aborting probe.\n");
+ return -ENODEV;
+ }
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ u32 base;
+ struct fsi_spi *ctx;
+ struct spi_controller *ctlr;
+
+ if (of_property_read_u32(np, "reg", &base))
+ continue;
+
+ ctlr = spi_alloc_master(dev, sizeof(*ctx));
+ if (!ctlr)
+ break;
+
+ ctlr->dev.of_node = np;
+ ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctlr->max_transfer_size = fsi_spi_max_transfer_size;
+ ctlr->transfer_one_message = fsi_spi_transfer_one_message;
+
+ ctx = spi_controller_get_devdata(ctlr);
+ ctx->dev = &ctlr->dev;
+ ctx->fsi = fsi;
+ ctx->base = base + SPI_FSI_BASE;
+
+ rc = devm_spi_register_controller(dev, ctlr);
+ if (rc)
+ spi_controller_put(ctlr);
+ else
+ num_controllers_registered++;
+ }
+
+ if (!num_controllers_registered)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct fsi_device_id fsi_spi_ids[] = {
+ { FSI_ENGID_SPI, FSI_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(fsi, fsi_spi_ids);
+
+static struct fsi_driver fsi_spi_driver = {
+ .id_table = fsi_spi_ids,
+ .drv = {
+ .name = "spi-fsi",
+ .bus = &fsi_bus_type,
+ .probe = fsi_spi_probe,
+ },
+};
+module_fsi_driver(fsi_spi_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("FSI attached SPI controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 6ec2dcb8c57a..50e41f66a2d7 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -20,16 +20,9 @@
#define DRIVER_NAME "fsl-dspi"
-#ifdef CONFIG_M5441x
-#define DSPI_FIFO_SIZE 16
-#else
-#define DSPI_FIFO_SIZE 4
-#endif
-#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
-
#define SPI_MCR 0x00
#define SPI_MCR_MASTER BIT(31)
-#define SPI_MCR_PCSIS (0x3F << 16)
+#define SPI_MCR_PCSIS(x) ((x) << 16)
#define SPI_MCR_CLR_TXF BIT(11)
#define SPI_MCR_CLR_RXF BIT(10)
#define SPI_MCR_XSPI BIT(3)
@@ -79,6 +72,7 @@
#define SPI_RSER 0x30
#define SPI_RSER_TCFQE BIT(31)
#define SPI_RSER_EOQFE BIT(28)
+#define SPI_RSER_CMDTCFE BIT(23)
#define SPI_PUSHR 0x34
#define SPI_PUSHR_CMD_CONT BIT(15)
@@ -109,57 +103,95 @@
#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
-/* Register offsets for regmap_pushr */
-#define PUSHR_CMD 0x0
-#define PUSHR_TX 0x2
-
#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
struct chip_data {
u32 ctar_val;
- u16 void_write_data;
};
enum dspi_trans_mode {
DSPI_EOQ_MODE = 0,
- DSPI_TCFQ_MODE,
+ DSPI_XSPI_MODE,
DSPI_DMA_MODE,
};
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
- bool ptp_sts_supported;
- bool xspi_mode;
-};
-
-static const struct fsl_dspi_devtype_data vf610_data = {
- .trans_mode = DSPI_DMA_MODE,
- .max_clock_factor = 2,
-};
-
-static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
- .trans_mode = DSPI_TCFQ_MODE,
- .max_clock_factor = 8,
- .ptp_sts_supported = true,
- .xspi_mode = true,
+ int fifo_size;
};
-static const struct fsl_dspi_devtype_data ls2085a_data = {
- .trans_mode = DSPI_TCFQ_MODE,
- .max_clock_factor = 8,
- .ptp_sts_supported = true,
+enum {
+ LS1021A,
+ LS1012A,
+ LS1028A,
+ LS1043A,
+ LS1046A,
+ LS2080A,
+ LS2085A,
+ LX2160A,
+ MCF5441X,
+ VF610,
};
-static const struct fsl_dspi_devtype_data coldfire_data = {
- .trans_mode = DSPI_EOQ_MODE,
- .max_clock_factor = 8,
+static const struct fsl_dspi_devtype_data devtype_data[] = {
+ [VF610] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 2,
+ .fifo_size = 4,
+ },
+ [LS1021A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS1012A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS1028A] = {
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS1043A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS1046A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS2080A] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS2085A] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LX2160A] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [MCF5441X] = {
+ .trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
};
struct fsl_dspi_dma {
- /* Length of transfer in words of DSPI_FIFO_SIZE */
- u32 curr_xfer_len;
-
u32 *tx_dma_buf;
struct dma_chan *chan_tx;
dma_addr_t tx_dma_phys;
@@ -189,36 +221,99 @@ struct fsl_dspi {
size_t len;
const void *tx;
void *rx;
- void *rx_end;
- u16 void_write_data;
u16 tx_cmd;
- u8 bits_per_word;
- u8 bytes_per_word;
const struct fsl_dspi_devtype_data *devtype_data;
- wait_queue_head_t waitq;
- u32 waitflags;
+ struct completion xfer_done;
struct fsl_dspi_dma *dma;
+
+ int oper_word_size;
+ int oper_bits_per_word;
+
+ int words_in_flight;
+
+ /*
+ * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
+ * individually (in XSPI mode)
+ */
+ int pushr_cmd;
+ int pushr_tx;
+
+ void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
+ void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
};
+static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ memcpy(txdata, dspi->tx, dspi->oper_word_size);
+ dspi->tx += dspi->oper_word_size;
+}
+
+static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ memcpy(dspi->rx, &rxdata, dspi->oper_word_size);
+ dspi->rx += dspi->oper_word_size;
+}
+
+static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ *txdata = cpu_to_be32(*(u32 *)dspi->tx);
+ dspi->tx += sizeof(u32);
+}
+
+static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ *(u32 *)dspi->rx = be32_to_cpu(rxdata);
+ dspi->rx += sizeof(u32);
+}
+
+static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ *txdata = cpu_to_be16(*(u16 *)dspi->tx);
+ dspi->tx += sizeof(u16);
+}
+
+static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ *(u16 *)dspi->rx = be16_to_cpu(rxdata);
+ dspi->rx += sizeof(u16);
+}
+
+static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ u16 hi = *(u16 *)dspi->tx;
+ u16 lo = *(u16 *)(dspi->tx + 2);
+
+ *txdata = (u32)hi << 16 | lo;
+ dspi->tx += sizeof(u32);
+}
+
+static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ u16 hi = rxdata & 0xffff;
+ u16 lo = rxdata >> 16;
+
+ *(u16 *)dspi->rx = lo;
+ *(u16 *)(dspi->rx + 2) = hi;
+ dspi->rx += sizeof(u32);
+}
+
+/*
+ * Pop one word from the TX buffer for pushing into the
+ * PUSHR register (TX FIFO)
+ */
static u32 dspi_pop_tx(struct fsl_dspi *dspi)
{
u32 txdata = 0;
- if (dspi->tx) {
- if (dspi->bytes_per_word == 1)
- txdata = *(u8 *)dspi->tx;
- else if (dspi->bytes_per_word == 2)
- txdata = *(u16 *)dspi->tx;
- else /* dspi->bytes_per_word == 4 */
- txdata = *(u32 *)dspi->tx;
- dspi->tx += dspi->bytes_per_word;
- }
- dspi->len -= dspi->bytes_per_word;
+ if (dspi->tx)
+ dspi->host_to_dev(dspi, &txdata);
+ dspi->len -= dspi->oper_word_size;
return txdata;
}
+/* Prepare one TX FIFO entry (txdata plus cmd) */
static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
@@ -231,21 +326,12 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
return cmd << 16 | data;
}
+/* Push one word to the RX buffer from the POPR register (RX FIFO) */
static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
{
if (!dspi->rx)
return;
-
- /* Mask off undefined bits */
- rxdata &= (1 << dspi->bits_per_word) - 1;
-
- if (dspi->bytes_per_word == 1)
- *(u8 *)dspi->rx = rxdata;
- else if (dspi->bytes_per_word == 2)
- *(u16 *)dspi->rx = rxdata;
- else /* dspi->bytes_per_word == 4 */
- *(u32 *)dspi->rx = rxdata;
- dspi->rx += dspi->bytes_per_word;
+ dspi->dev_to_host(dspi, rxdata);
}
static void dspi_tx_dma_callback(void *arg)
@@ -263,7 +349,7 @@ static void dspi_rx_dma_callback(void *arg)
int i;
if (dspi->rx) {
- for (i = 0; i < dma->curr_xfer_len; i++)
+ for (i = 0; i < dspi->words_in_flight; i++)
dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
}
@@ -277,12 +363,12 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
int time_left;
int i;
- for (i = 0; i < dma->curr_xfer_len; i++)
+ for (i = 0; i < dspi->words_in_flight; i++)
dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
dma->tx_dma_phys,
- dma->curr_xfer_len *
+ dspi->words_in_flight *
DMA_SLAVE_BUSWIDTH_4_BYTES,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -300,7 +386,7 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
dma->rx_dma_phys,
- dma->curr_xfer_len *
+ dspi->words_in_flight *
DMA_SLAVE_BUSWIDTH_4_BYTES,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -348,45 +434,42 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
return 0;
}
+static void dspi_setup_accel(struct fsl_dspi *dspi);
+
static int dspi_dma_xfer(struct fsl_dspi *dspi)
{
struct spi_message *message = dspi->cur_msg;
struct device *dev = &dspi->pdev->dev;
- struct fsl_dspi_dma *dma = dspi->dma;
- int curr_remaining_bytes;
- int bytes_per_buffer;
int ret = 0;
- curr_remaining_bytes = dspi->len;
- bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
- while (curr_remaining_bytes) {
- /* Check if current transfer fits the DMA buffer */
- dma->curr_xfer_len = curr_remaining_bytes
- / dspi->bytes_per_word;
- if (dma->curr_xfer_len > bytes_per_buffer)
- dma->curr_xfer_len = bytes_per_buffer;
+ /*
+ * dspi->len gets decremented by dspi_pop_tx_pushr in
+ * dspi_next_xfer_dma_submit
+ */
+ while (dspi->len) {
+ /* Figure out operational bits-per-word for this chunk */
+ dspi_setup_accel(dspi);
+
+ dspi->words_in_flight = dspi->len / dspi->oper_word_size;
+ if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
+ dspi->words_in_flight = dspi->devtype_data->fifo_size;
+
+ message->actual_length += dspi->words_in_flight *
+ dspi->oper_word_size;
ret = dspi_next_xfer_dma_submit(dspi);
if (ret) {
dev_err(dev, "DMA transfer failed\n");
- goto exit;
-
- } else {
- const int len =
- dma->curr_xfer_len * dspi->bytes_per_word;
- curr_remaining_bytes -= len;
- message->actual_length += len;
- if (curr_remaining_bytes < 0)
- curr_remaining_bytes = 0;
+ break;
}
}
-exit:
return ret;
}
static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
{
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct device *dev = &dspi->pdev->dev;
struct dma_slave_config cfg;
struct fsl_dspi_dma *dma;
@@ -410,15 +493,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
goto err_tx_channel;
}
- dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
- &dma->tx_dma_phys, GFP_KERNEL);
+ dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
+ dma_bufsize, &dma->tx_dma_phys,
+ GFP_KERNEL);
if (!dma->tx_dma_buf) {
ret = -ENOMEM;
goto err_tx_dma_buf;
}
- dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
- &dma->rx_dma_phys, GFP_KERNEL);
+ dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
+ dma_bufsize, &dma->rx_dma_phys,
+ GFP_KERNEL);
if (!dma->rx_dma_buf) {
ret = -ENOMEM;
goto err_rx_dma_buf;
@@ -454,11 +539,11 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
return 0;
err_slave_config:
- dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
- dma->rx_dma_buf, dma->rx_dma_phys);
+ dma_free_coherent(dma->chan_rx->device->dev,
+ dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
err_rx_dma_buf:
- dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
- dma->tx_dma_buf, dma->tx_dma_phys);
+ dma_free_coherent(dma->chan_tx->device->dev,
+ dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
err_tx_dma_buf:
dma_release_channel(dma->chan_tx);
err_tx_channel:
@@ -472,21 +557,21 @@ err_tx_channel:
static void dspi_release_dma(struct fsl_dspi *dspi)
{
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct fsl_dspi_dma *dma = dspi->dma;
- struct device *dev = &dspi->pdev->dev;
if (!dma)
return;
if (dma->chan_tx) {
- dma_unmap_single(dev, dma->tx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
+ dma_bufsize, DMA_TO_DEVICE);
dma_release_channel(dma->chan_tx);
}
if (dma->chan_rx) {
- dma_unmap_single(dev, dma->rx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+ dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
+ dma_bufsize, DMA_FROM_DEVICE);
dma_release_channel(dma->chan_rx);
}
}
@@ -562,124 +647,220 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns,
}
}
-static void fifo_write(struct fsl_dspi *dspi)
+static void dspi_pushr_write(struct fsl_dspi *dspi)
{
regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
}
-static void cmd_fifo_write(struct fsl_dspi *dspi)
+static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
{
- u16 cmd = dspi->tx_cmd;
-
- if (dspi->len > 0)
+ /*
+ * The only time when the PCS doesn't need continuation after this word
+ * is when it's last. We need to look ahead, because we actually call
+ * dspi_pop_tx (the function that decrements dspi->len) _after_
+ * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
+ * word is enough. If there's more to transmit than that,
+ * dspi_xspi_write will know to split the FIFO writes in 2, and
+ * generate a new PUSHR command with the final word that will have PCS
+ * deasserted (not continued) here.
+ */
+ if (dspi->len > dspi->oper_word_size)
cmd |= SPI_PUSHR_CMD_CONT;
- regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
+ regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
}
-static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
+static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
{
- regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
+ regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
}
-static void dspi_tcfq_write(struct fsl_dspi *dspi)
+static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
{
- /* Clear transfer count */
- dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
+ int num_bytes = num_words * dspi->oper_word_size;
+ u16 tx_cmd = dspi->tx_cmd;
- if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
- /* Write the CMD FIFO entry first, and then the two
- * corresponding TX FIFO entries.
- */
- u32 data = dspi_pop_tx(dspi);
+ /*
+ * If the PCS needs to de-assert (i.e. we're at the end of the buffer
+ * and cs_change does not want the PCS to stay on), then we need a new
+ * PUSHR command, since this one (for the body of the buffer)
+ * necessarily has the CONT bit set.
+ * So send one word less during this go, to force a split and a command
+ * with a single word next time, when CONT will be unset.
+ */
+ if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
+ tx_cmd |= SPI_PUSHR_CMD_EOQ;
- cmd_fifo_write(dspi);
- tx_fifo_write(dspi, data & 0xFFFF);
- tx_fifo_write(dspi, data >> 16);
- } else {
- /* Write one entry to both TX FIFO and CMD FIFO
- * simultaneously.
- */
- fifo_write(dspi);
- }
-}
+ /* Update CTARE */
+ regmap_write(dspi->regmap, SPI_CTARE(0),
+ SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
+ SPI_CTARE_DTCP(num_words));
-static u32 fifo_read(struct fsl_dspi *dspi)
-{
- u32 rxdata = 0;
+ /*
+ * Write the CMD FIFO entry first, and then the two
+ * corresponding TX FIFO entries (or one...).
+ */
+ dspi_pushr_cmd_write(dspi, tx_cmd);
- regmap_read(dspi->regmap, SPI_POPR, &rxdata);
- return rxdata;
-}
+ /* Fill TX FIFO with as many transfers as possible */
+ while (num_words--) {
+ u32 data = dspi_pop_tx(dspi);
-static void dspi_tcfq_read(struct fsl_dspi *dspi)
-{
- dspi_push_rx(dspi, fifo_read(dspi));
+ dspi_pushr_txdata_write(dspi, data & 0xFFFF);
+ if (dspi->oper_bits_per_word > 16)
+ dspi_pushr_txdata_write(dspi, data >> 16);
+ }
}
-static void dspi_eoq_write(struct fsl_dspi *dspi)
+static void dspi_eoq_fifo_write(struct fsl_dspi *dspi, int num_words)
{
- int fifo_size = DSPI_FIFO_SIZE;
u16 xfer_cmd = dspi->tx_cmd;
/* Fill TX FIFO with as many transfers as possible */
- while (dspi->len && fifo_size--) {
+ while (num_words--) {
dspi->tx_cmd = xfer_cmd;
/* Request EOQF for last transfer in FIFO */
- if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
+ if (num_words == 0)
dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
- /* Clear transfer count for first transfer in FIFO */
- if (fifo_size == (DSPI_FIFO_SIZE - 1))
- dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
/* Write combined TX FIFO and CMD FIFO entry */
- fifo_write(dspi);
+ dspi_pushr_write(dspi);
}
}
-static void dspi_eoq_read(struct fsl_dspi *dspi)
+static u32 dspi_popr_read(struct fsl_dspi *dspi)
{
- int fifo_size = DSPI_FIFO_SIZE;
+ u32 rxdata = 0;
+
+ regmap_read(dspi->regmap, SPI_POPR, &rxdata);
+ return rxdata;
+}
+
+static void dspi_fifo_read(struct fsl_dspi *dspi)
+{
+ int num_fifo_entries = dspi->words_in_flight;
/* Read one FIFO entry and push to rx buffer */
- while ((dspi->rx < dspi->rx_end) && fifo_size--)
- dspi_push_rx(dspi, fifo_read(dspi));
+ while (num_fifo_entries--)
+ dspi_push_rx(dspi, dspi_popr_read(dspi));
}
-static int dspi_rxtx(struct fsl_dspi *dspi)
+static void dspi_setup_accel(struct fsl_dspi *dspi)
{
+ struct spi_transfer *xfer = dspi->cur_transfer;
+ bool odd = !!(dspi->len & 1);
+
+ /* No accel for frames not multiple of 8 bits at the moment */
+ if (xfer->bits_per_word % 8)
+ goto no_accel;
+
+ if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
+ dspi->oper_bits_per_word = 16;
+ } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
+ dspi->oper_bits_per_word = 8;
+ } else {
+ /* Start off with maximum supported by hardware */
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
+ dspi->oper_bits_per_word = 32;
+ else
+ dspi->oper_bits_per_word = 16;
+
+ /*
+ * And go down only if the buffer can't be sent with
+ * words this big
+ */
+ do {
+ if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
+ break;
+
+ dspi->oper_bits_per_word /= 2;
+ } while (dspi->oper_bits_per_word > 8);
+ }
+
+ if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
+ dspi->dev_to_host = dspi_8on32_dev_to_host;
+ dspi->host_to_dev = dspi_8on32_host_to_dev;
+ } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
+ dspi->dev_to_host = dspi_8on16_dev_to_host;
+ dspi->host_to_dev = dspi_8on16_host_to_dev;
+ } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
+ dspi->dev_to_host = dspi_16on32_dev_to_host;
+ dspi->host_to_dev = dspi_16on32_host_to_dev;
+ } else {
+no_accel:
+ dspi->dev_to_host = dspi_native_dev_to_host;
+ dspi->host_to_dev = dspi_native_host_to_dev;
+ dspi->oper_bits_per_word = xfer->bits_per_word;
+ }
+
+ dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
+
+ /*
+ * Update CTAR here (code is common for EOQ, XSPI and DMA modes).
+ * We will update CTARE in the portion specific to XSPI, when we
+ * also know the preload value (DTCP).
+ */
+ regmap_write(dspi->regmap, SPI_CTAR(0),
+ dspi->cur_chip->ctar_val |
+ SPI_FRAME_BITS(dspi->oper_bits_per_word));
+}
+
+static void dspi_fifo_write(struct fsl_dspi *dspi)
+{
+ int num_fifo_entries = dspi->devtype_data->fifo_size;
+ struct spi_transfer *xfer = dspi->cur_transfer;
struct spi_message *msg = dspi->cur_msg;
- enum dspi_trans_mode trans_mode;
- u16 spi_tcnt;
- u32 spi_tcr;
+ int num_words, num_bytes;
- spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
- dspi->progress, !dspi->irq);
+ dspi_setup_accel(dspi);
+
+ /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
+ if (dspi->oper_word_size == 4)
+ num_fifo_entries /= 2;
- /* Get transfer counter (in number of SPI transfers). It was
- * reset to 0 when transfer(s) were started.
+ /*
+ * Integer division intentionally trims off odd (or non-multiple of 4)
+ * numbers of bytes at the end of the buffer, which will be sent next
+ * time using a smaller oper_word_size.
*/
- regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
- spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
+ num_words = dspi->len / dspi->oper_word_size;
+ if (num_words > num_fifo_entries)
+ num_words = num_fifo_entries;
+
/* Update total number of bytes that were transferred */
- msg->actual_length += spi_tcnt * dspi->bytes_per_word;
- dspi->progress += spi_tcnt;
+ num_bytes = num_words * dspi->oper_word_size;
+ msg->actual_length += num_bytes;
+ dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
- trans_mode = dspi->devtype_data->trans_mode;
- if (trans_mode == DSPI_EOQ_MODE)
- dspi_eoq_read(dspi);
- else if (trans_mode == DSPI_TCFQ_MODE)
- dspi_tcfq_read(dspi);
+ /*
+ * Update shared variable for use in the next interrupt (both in
+ * dspi_fifo_read and in dspi_fifo_write).
+ */
+ dspi->words_in_flight = num_words;
+
+ spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
+
+ if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
+ dspi_eoq_fifo_write(dspi, num_words);
+ else
+ dspi_xspi_fifo_write(dspi, num_words);
+ /*
+ * Everything after this point is in a potential race with the next
+ * interrupt, so we must never use dspi->words_in_flight again since it
+ * might already be modified by the next dspi_fifo_write.
+ */
+
+ spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
+ dspi->progress, !dspi->irq);
+}
+
+static int dspi_rxtx(struct fsl_dspi *dspi)
+{
+ dspi_fifo_read(dspi);
if (!dspi->len)
/* Success! */
return 0;
- spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->progress, !dspi->irq);
-
- if (trans_mode == DSPI_EOQ_MODE)
- dspi_eoq_write(dspi);
- else if (trans_mode == DSPI_TCFQ_MODE)
- dspi_tcfq_write(dspi);
+ dspi_fifo_write(dspi);
return -EINPROGRESS;
}
@@ -693,7 +874,7 @@ static int dspi_poll(struct fsl_dspi *dspi)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF))
+ if (spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF))
break;
} while (--tries);
@@ -711,13 +892,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (!(spi_sr & SPI_SR_EOQF))
+ if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF)))
return IRQ_NONE;
- if (dspi_rxtx(dspi) == 0) {
- dspi->waitflags = 1;
- wake_up_interruptible(&dspi->waitq);
- }
+ if (dspi_rxtx(dspi) == 0)
+ complete(&dspi->xfer_done);
return IRQ_HANDLED;
}
@@ -727,7 +906,6 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
{
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
- enum dspi_trans_mode trans_mode;
struct spi_transfer *transfer;
int status = 0;
@@ -757,76 +935,38 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
- dspi->void_write_data = dspi->cur_chip->void_write_data;
-
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
- dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len;
dspi->progress = 0;
- /* Validated transfer specific frame size (defaults applied) */
- dspi->bits_per_word = transfer->bits_per_word;
- if (transfer->bits_per_word <= 8)
- dspi->bytes_per_word = 1;
- else if (transfer->bits_per_word <= 16)
- dspi->bytes_per_word = 2;
- else
- dspi->bytes_per_word = 4;
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
- regmap_write(dspi->regmap, SPI_CTAR(0),
- dspi->cur_chip->ctar_val |
- SPI_FRAME_BITS(transfer->bits_per_word));
- if (dspi->devtype_data->xspi_mode)
- regmap_write(dspi->regmap, SPI_CTARE(0),
- SPI_FRAME_EBITS(transfer->bits_per_word) |
- SPI_CTARE_DTCP(1));
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
- trans_mode = dspi->devtype_data->trans_mode;
- switch (trans_mode) {
- case DSPI_EOQ_MODE:
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
- dspi_eoq_write(dspi);
- break;
- case DSPI_TCFQ_MODE:
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
- dspi_tcfq_write(dspi);
- break;
- case DSPI_DMA_MODE:
- regmap_write(dspi->regmap, SPI_RSER,
- SPI_RSER_TFFFE | SPI_RSER_TFFFD |
- SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
status = dspi_dma_xfer(dspi);
- break;
- default:
- dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
- trans_mode);
- status = -EINVAL;
- goto out;
- }
-
- if (!dspi->irq) {
- do {
- status = dspi_poll(dspi);
- } while (status == -EINPROGRESS);
- } else if (trans_mode != DSPI_DMA_MODE) {
- status = wait_event_interruptible(dspi->waitq,
- dspi->waitflags);
- dspi->waitflags = 0;
+ } else {
+ dspi_fifo_write(dspi);
+
+ if (dspi->irq) {
+ wait_for_completion(&dspi->xfer_done);
+ reinit_completion(&dspi->xfer_done);
+ } else {
+ do {
+ status = dspi_poll(dspi);
+ } while (status == -EINPROGRESS);
+ }
}
if (status)
- dev_err(&dspi->pdev->dev,
- "Waiting for transfer to complete failed!\n");
+ break;
spi_transfer_delay_exec(transfer);
}
-out:
message->status = status;
spi_finalize_current_message(ctlr);
@@ -864,8 +1004,6 @@ static int dspi_setup(struct spi_device *spi)
sck_cs_delay = pdata->sck_cs_delay;
}
- chip->void_write_data = 0;
-
clkrate = clk_get_rate(dspi->clk);
hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
@@ -909,9 +1047,34 @@ static void dspi_cleanup(struct spi_device *spi)
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
- { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
- { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
+ {
+ .compatible = "fsl,vf610-dspi",
+ .data = &devtype_data[VF610],
+ }, {
+ .compatible = "fsl,ls1021a-v1.0-dspi",
+ .data = &devtype_data[LS1021A],
+ }, {
+ .compatible = "fsl,ls1012a-dspi",
+ .data = &devtype_data[LS1012A],
+ }, {
+ .compatible = "fsl,ls1028a-dspi",
+ .data = &devtype_data[LS1028A],
+ }, {
+ .compatible = "fsl,ls1043a-dspi",
+ .data = &devtype_data[LS1043A],
+ }, {
+ .compatible = "fsl,ls1046a-dspi",
+ .data = &devtype_data[LS1046A],
+ }, {
+ .compatible = "fsl,ls2080a-dspi",
+ .data = &devtype_data[LS2080A],
+ }, {
+ .compatible = "fsl,ls2085a-dspi",
+ .data = &devtype_data[LS2085A],
+ }, {
+ .compatible = "fsl,lx2160a-dspi",
+ .data = &devtype_data[LX2160A],
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -997,20 +1160,40 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
},
};
-static void dspi_init(struct fsl_dspi *dspi)
+static int dspi_init(struct fsl_dspi *dspi)
{
- unsigned int mcr = SPI_MCR_PCSIS;
+ unsigned int mcr;
+
+ /* Set idle states for all chip select signals to high */
+ mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->num_chipselect - 1, 0));
- if (dspi->devtype_data->xspi_mode)
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
mcr |= SPI_MCR_XSPI;
if (!spi_controller_is_slave(dspi->ctlr))
mcr |= SPI_MCR_MASTER;
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
- if (dspi->devtype_data->xspi_mode)
- regmap_write(dspi->regmap, SPI_CTARE(0),
- SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
+
+ switch (dspi->devtype_data->trans_mode) {
+ case DSPI_EOQ_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+ break;
+ case DSPI_XSPI_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
+ break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ break;
+ default:
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
+ dspi->devtype_data->trans_mode);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int dspi_slave_abort(struct spi_master *master)
@@ -1021,8 +1204,10 @@ static int dspi_slave_abort(struct spi_master *master)
* Terminate all pending DMA transactions for the SPI working
* in SLAVE mode.
*/
- dmaengine_terminate_sync(dspi->dma->chan_rx);
- dmaengine_terminate_sync(dspi->dma->chan_tx);
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ dmaengine_terminate_sync(dspi->dma->chan_rx);
+ dmaengine_terminate_sync(dspi->dma->chan_tx);
+ }
/* Clear the internal DSPI RX and TX FIFO buffers */
regmap_update_bits(dspi->regmap, SPI_MCR,
@@ -1032,16 +1217,33 @@ static int dspi_slave_abort(struct spi_master *master)
return 0;
}
+/*
+ * EOQ mode will inevitably deassert its PCS signal on last word in a queue
+ * (hardware limitation), so we need to inform the spi_device that larger
+ * buffers than the FIFO size are going to have the chip select randomly
+ * toggling, so it has a chance to adapt its message sizes.
+ */
+static size_t dspi_max_message_size(struct spi_device *spi)
+{
+ struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
+
+ if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
+ return dspi->devtype_data->fifo_size;
+
+ return SIZE_MAX;
+}
+
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct regmap_config *regmap_config;
struct fsl_dspi_platform_data *pdata;
struct spi_controller *ctlr;
- int ret, cs_num, bus_num;
+ int ret, cs_num, bus_num = -1;
struct fsl_dspi *dspi;
struct resource *res;
void __iomem *base;
+ bool big_endian;
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
if (!ctlr)
@@ -1053,6 +1255,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->setup = dspi_setup;
ctlr->transfer_one_message = dspi_transfer_one_message;
+ ctlr->max_message_size = dspi_max_message_size;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
@@ -1064,7 +1267,9 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->num_chipselect = pdata->cs_num;
ctlr->bus_num = pdata->bus_num;
- dspi->devtype_data = &coldfire_data;
+ /* Only Coldfire uses platform data */
+ dspi->devtype_data = &devtype_data[MCF5441X];
+ big_endian = true;
} else {
ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
@@ -1074,11 +1279,7 @@ static int dspi_probe(struct platform_device *pdev)
}
ctlr->num_chipselect = cs_num;
- ret = of_property_read_u32(np, "bus-num", &bus_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get bus-num\n");
- goto out_ctlr_put;
- }
+ of_property_read_u32(np, "bus-num", &bus_num);
ctlr->bus_num = bus_num;
if (of_property_read_bool(np, "spi-slave"))
@@ -1090,9 +1291,18 @@ static int dspi_probe(struct platform_device *pdev)
ret = -EFAULT;
goto out_ctlr_put;
}
+
+ big_endian = of_device_is_big_endian(np);
+ }
+ if (big_endian) {
+ dspi->pushr_cmd = 0;
+ dspi->pushr_tx = 2;
+ } else {
+ dspi->pushr_cmd = 2;
+ dspi->pushr_tx = 0;
}
- if (dspi->devtype_data->xspi_mode)
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
else
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
@@ -1104,7 +1314,7 @@ static int dspi_probe(struct platform_device *pdev)
goto out_ctlr_put;
}
- if (dspi->devtype_data->xspi_mode)
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
regmap_config = &dspi_xspi_regmap_config[0];
else
regmap_config = &dspi_regmap_config;
@@ -1116,7 +1326,7 @@ static int dspi_probe(struct platform_device *pdev)
goto out_ctlr_put;
}
- if (dspi->devtype_data->xspi_mode) {
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
dspi->regmap_pushr = devm_regmap_init_mmio(
&pdev->dev, base + SPI_PUSHR,
&dspi_xspi_regmap_config[1]);
@@ -1139,10 +1349,9 @@ static int dspi_probe(struct platform_device *pdev)
if (ret)
goto out_ctlr_put;
- dspi_init(dspi);
-
- if (dspi->devtype_data->trans_mode == DSPI_TCFQ_MODE)
- goto poll_mode;
+ ret = dspi_init(dspi);
+ if (ret)
+ goto out_clk_put;
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
@@ -1159,7 +1368,7 @@ static int dspi_probe(struct platform_device *pdev)
goto out_clk_put;
}
- init_waitqueue_head(&dspi->waitq);
+ init_completion(&dspi->xfer_done);
poll_mode:
@@ -1174,7 +1383,8 @@ poll_mode:
ctlr->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
- ctlr->ptp_sts_supported = dspi->devtype_data->ptp_sts_supported;
+ if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
+ ctlr->ptp_sts_supported = true;
platform_set_drvdata(pdev, ctlr);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index d0b8cc741a24..8b41b70f6f5c 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -86,8 +86,6 @@
#define TCR_RXMSK BIT(19)
#define TCR_TXMSK BIT(18)
-static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128};
-
struct lpspi_config {
u8 bpw;
u8 chip_select;
@@ -125,7 +123,7 @@ struct fsl_lpspi_data {
struct completion dma_rx_completion;
struct completion dma_tx_completion;
- int chipselect[0];
+ int chipselect[];
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
@@ -331,15 +329,14 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
}
for (prescale = 0; prescale < 8; prescale++) {
- scldiv = perclk_rate /
- (clkdivs[prescale] * config.speed_hz) - 2;
+ scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
if (scldiv < 256) {
fsl_lpspi->config.prescale = prescale;
break;
}
}
- if (prescale == 8 && scldiv >= 256)
+ if (scldiv >= 256)
return -EINVAL;
writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index e8a499cd1f13..02e5cba0a5bb 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -484,7 +484,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
}
if (needs_wakeup_wait_mode(q))
- pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&q->pm_qos_req, 0);
return 0;
}
@@ -492,7 +492,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
{
if (needs_wakeup_wait_mode(q))
- pm_qos_remove_request(&q->pm_qos_req);
+ cpu_latency_qos_remove_request(&q->pm_qos_req);
clk_disable_unprepare(q->clk);
clk_disable_unprepare(q->clk_en);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 6f3d64a1a2b3..c3972424af71 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -6,7 +6,6 @@
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
@@ -536,6 +535,7 @@ static int spi_geni_probe(struct platform_device *pdev)
struct spi_geni_master *mas;
void __iomem *base;
struct clk *clk;
+ struct device *dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -545,28 +545,25 @@ static int spi_geni_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- clk = devm_clk_get(&pdev->dev, "se");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Err getting SE Core clk %ld\n",
- PTR_ERR(clk));
+ clk = devm_clk_get(dev, "se");
+ if (IS_ERR(clk))
return PTR_ERR(clk);
- }
- spi = spi_alloc_master(&pdev->dev, sizeof(*mas));
+ spi = spi_alloc_master(dev, sizeof(*mas));
if (!spi)
return -ENOMEM;
platform_set_drvdata(pdev, spi);
mas = spi_master_get_devdata(spi);
mas->irq = irq;
- mas->dev = &pdev->dev;
- mas->se.dev = &pdev->dev;
- mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ mas->dev = dev;
+ mas->se.dev = dev;
+ mas->se.wrapper = dev_get_drvdata(dev->parent);
mas->se.base = base;
mas->se.clk = clk;
spi->bus_num = -1;
- spi->dev.of_node = pdev->dev.of_node;
+ spi->dev.of_node = dev->of_node;
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
@@ -579,14 +576,13 @@ static int spi_geni_probe(struct platform_device *pdev)
init_completion(&mas->xfer_done);
spin_lock_init(&mas->lock);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
ret = spi_geni_init(mas);
if (ret)
goto spi_geni_probe_runtime_disable;
- ret = request_irq(mas->irq, geni_spi_isr,
- IRQF_TRIGGER_HIGH, "spi_geni", spi);
+ ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
goto spi_geni_probe_runtime_disable;
@@ -598,7 +594,7 @@ static int spi_geni_probe(struct platform_device *pdev)
spi_geni_probe_free_irq:
free_irq(mas->irq, spi);
spi_geni_probe_runtime_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
spi_master_put(spi);
return ret;
}
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index 4cf8fc80a7b7..e3b57252d075 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/dmi.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -17,6 +18,12 @@
#define HISI_SFC_V3XX_VERSION (0x1f8)
#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT (1 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_DUAL_IO (2 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_FULL_DIO (3 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT (5 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_QUAD_IO (6 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_FULL_QIO (7 << 17)
#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
@@ -161,6 +168,43 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
if (op->addr.nbytes)
config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+ switch (op->data.buswidth) {
+ case 0 ... 1:
+ break;
+ case 2:
+ if (op->addr.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT;
+ } else if (op->addr.buswidth == 2) {
+ if (op->cmd.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IO;
+ } else if (op->cmd.buswidth == 2) {
+ config |= HISI_SFC_V3XX_CMD_CFG_FULL_DIO;
+ } else {
+ return -EIO;
+ }
+ } else {
+ return -EIO;
+ }
+ break;
+ case 4:
+ if (op->addr.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT;
+ } else if (op->addr.buswidth == 4) {
+ if (op->cmd.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IO;
+ } else if (op->cmd.buswidth == 4) {
+ config |= HISI_SFC_V3XX_CMD_CFG_FULL_QIO;
+ } else {
+ return -EIO;
+ }
+ } else {
+ return -EIO;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (op->data.dir != SPI_MEM_NO_DATA) {
config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
@@ -207,6 +251,44 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
.exec_op = hisi_sfc_v3xx_exec_op,
};
+static int hisi_sfc_v3xx_buswidth_override_bits;
+
+/*
+ * ACPI FW does not allow us to currently set the device buswidth, so quirk it
+ * depending on the board.
+ */
+static int __init hisi_sfc_v3xx_dmi_quirk(const struct dmi_system_id *d)
+{
+ hisi_sfc_v3xx_buswidth_override_bits = SPI_RX_QUAD | SPI_TX_QUAD;
+
+ return 0;
+}
+
+static const struct dmi_system_id hisi_sfc_v3xx_dmi_quirk_table[] = {
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "D06"),
+ },
+ },
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 2280 V2"),
+ },
+ },
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 200 (Model 2280)"),
+ },
+ },
+ {}
+};
+
static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -222,6 +304,8 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->buswidth_override_bits = hisi_sfc_v3xx_buswidth_override_bits;
+
host = spi_controller_get_devdata(ctlr);
host->dev = dev;
@@ -277,7 +361,20 @@ static struct platform_driver hisi_sfc_v3xx_spi_driver = {
.probe = hisi_sfc_v3xx_probe,
};
-module_platform_driver(hisi_sfc_v3xx_spi_driver);
+static int __init hisi_sfc_v3xx_spi_init(void)
+{
+ dmi_check_system(hisi_sfc_v3xx_dmi_quirk_table);
+
+ return platform_driver_register(&hisi_sfc_v3xx_spi_driver);
+}
+
+static void __exit hisi_sfc_v3xx_spi_exit(void)
+{
+ platform_driver_unregister(&hisi_sfc_v3xx_spi_driver);
+}
+
+module_init(hisi_sfc_v3xx_spi_init);
+module_exit(hisi_sfc_v3xx_spi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index e5a46f0eb93b..adaa0c49f966 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -418,12 +418,13 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
struct spi_controller *ctlr = mem->spi->controller;
size_t len;
- len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
-
if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
return ctlr->mem_ops->adjust_op_size(mem, op);
if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
+ len = sizeof(op->cmd.opcode) + op->addr.nbytes +
+ op->dummy.nbytes;
+
if (len > spi_max_transfer_size(mem->spi))
return -EINVAL;
@@ -487,7 +488,7 @@ static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
* This function is creating a direct mapping descriptor which can then be used
* to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
* If the SPI controller driver does not support direct mapping, this function
- * fallback to an implementation using spi_mem_exec_op(), so that the caller
+ * falls back to an implementation using spi_mem_exec_op(), so that the caller
* doesn't have to bother implementing a fallback on his own.
*
* Return: a valid pointer in case of success, and ERR_PTR() otherwise.
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7f5680fe2568..77f7d0e0e46a 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -9,11 +9,13 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
@@ -33,7 +35,6 @@
* to have a CS go down over the full transfer
*/
-#define SPICC_MAX_FREQ 30000000
#define SPICC_MAX_BURST 128
/* Register Map */
@@ -105,7 +106,21 @@
#define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */
#define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */
#define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */
-#define SPICC_DLYCTL_W1_MASK GENMASK(21, 16) /* Delay Control Write-Only */
+#define SPICC_MO_DELAY_MASK GENMASK(17, 16) /* Master Output Delay */
+#define SPICC_MO_NO_DELAY 0
+#define SPICC_MO_DELAY_1_CYCLE 1
+#define SPICC_MO_DELAY_2_CYCLE 2
+#define SPICC_MO_DELAY_3_CYCLE 3
+#define SPICC_MI_DELAY_MASK GENMASK(19, 18) /* Master Input Delay */
+#define SPICC_MI_NO_DELAY 0
+#define SPICC_MI_DELAY_1_CYCLE 1
+#define SPICC_MI_DELAY_2_CYCLE 2
+#define SPICC_MI_DELAY_3_CYCLE 3
+#define SPICC_MI_CAP_DELAY_MASK GENMASK(21, 20) /* Master Capture Delay */
+#define SPICC_CAP_AHEAD_2_CYCLE 0
+#define SPICC_CAP_AHEAD_1_CYCLE 1
+#define SPICC_CAP_NO_DELAY 2
+#define SPICC_CAP_DELAY_1_CYCLE 3
#define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */
#define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */
@@ -113,31 +128,59 @@
#define SPICC_DWADDR 0x24 /* Write Address of DMA */
+#define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */
+#define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0)
+#define SPICC_ENH_DATARATE_MASK GENMASK(23, 16)
+#define SPICC_ENH_DATARATE_EN BIT(24)
+#define SPICC_ENH_MOSI_OEN BIT(25)
+#define SPICC_ENH_CLK_OEN BIT(26)
+#define SPICC_ENH_CS_OEN BIT(27)
+#define SPICC_ENH_CLK_CS_DELAY_EN BIT(28)
+#define SPICC_ENH_MAIN_CLK_AO BIT(29)
+
#define writel_bits_relaxed(mask, val, addr) \
writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
-#define SPICC_BURST_MAX 16
-#define SPICC_FIFO_HALF 10
+struct meson_spicc_data {
+ unsigned int max_speed_hz;
+ unsigned int min_speed_hz;
+ unsigned int fifo_size;
+ bool has_oen;
+ bool has_enhance_clk_div;
+ bool has_pclk;
+};
struct meson_spicc_device {
struct spi_master *master;
struct platform_device *pdev;
void __iomem *base;
struct clk *core;
+ struct clk *pclk;
+ struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
+ const struct meson_spicc_data *data;
u8 *tx_buf;
u8 *rx_buf;
unsigned int bytes_per_word;
unsigned long tx_remain;
- unsigned long txb_remain;
unsigned long rx_remain;
- unsigned long rxb_remain;
unsigned long xfer_remain;
- bool is_burst_end;
- bool is_last_burst;
};
+static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
+{
+ u32 conf;
+
+ if (!spicc->data->has_oen)
+ return;
+
+ conf = readl_relaxed(spicc->base + SPICC_ENH_CTL0) |
+ SPICC_ENH_MOSI_OEN | SPICC_ENH_CLK_OEN | SPICC_ENH_CS_OEN;
+
+ writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0);
+}
+
static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
{
return !!FIELD_GET(SPICC_TF,
@@ -146,7 +189,7 @@ static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc)
{
- return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF_EN,
+ return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF,
readl_relaxed(spicc->base + SPICC_STATREG));
}
@@ -201,34 +244,22 @@ static inline void meson_spicc_tx(struct meson_spicc_device *spicc)
spicc->base + SPICC_TXDATA);
}
-static inline u32 meson_spicc_setup_rx_irq(struct meson_spicc_device *spicc,
- u32 irq_ctrl)
+static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc)
{
- if (spicc->rx_remain > SPICC_FIFO_HALF)
- irq_ctrl |= SPICC_RH_EN;
- else
- irq_ctrl |= SPICC_RR_EN;
-
- return irq_ctrl;
-}
-static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc,
- unsigned int burst_len)
-{
+ unsigned int burst_len = min_t(unsigned int,
+ spicc->xfer_remain /
+ spicc->bytes_per_word,
+ spicc->data->fifo_size);
/* Setup Xfer variables */
spicc->tx_remain = burst_len;
spicc->rx_remain = burst_len;
spicc->xfer_remain -= burst_len * spicc->bytes_per_word;
- spicc->is_burst_end = false;
- if (burst_len < SPICC_BURST_MAX || !spicc->xfer_remain)
- spicc->is_last_burst = true;
- else
- spicc->is_last_burst = false;
/* Setup burst length */
writel_bits_relaxed(SPICC_BURSTLENGTH_MASK,
FIELD_PREP(SPICC_BURSTLENGTH_MASK,
- burst_len),
+ burst_len - 1),
spicc->base + SPICC_CONREG);
/* Fill TX FIFO */
@@ -238,97 +269,71 @@ static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc,
static irqreturn_t meson_spicc_irq(int irq, void *data)
{
struct meson_spicc_device *spicc = (void *) data;
- u32 ctrl = readl_relaxed(spicc->base + SPICC_INTREG);
- u32 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
- ctrl &= ~(SPICC_RH_EN | SPICC_RR_EN);
+ writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG);
/* Empty RX FIFO */
meson_spicc_rx(spicc);
- /* Enable TC interrupt since we transferred everything */
- if (!spicc->tx_remain && !spicc->rx_remain) {
- spicc->is_burst_end = true;
-
- /* Enable TC interrupt */
- ctrl |= SPICC_TC_EN;
+ if (!spicc->xfer_remain) {
+ /* Disable all IRQs */
+ writel(0, spicc->base + SPICC_INTREG);
- /* Reload IRQ status */
- stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
- }
-
- /* Check transfer complete */
- if ((stat & SPICC_TC) && spicc->is_burst_end) {
- unsigned int burst_len;
-
- /* Clear TC bit */
- writel_relaxed(SPICC_TC, spicc->base + SPICC_STATREG);
-
- /* Disable TC interrupt */
- ctrl &= ~SPICC_TC_EN;
-
- if (spicc->is_last_burst) {
- /* Disable all IRQs */
- writel(0, spicc->base + SPICC_INTREG);
-
- spi_finalize_current_transfer(spicc->master);
-
- return IRQ_HANDLED;
- }
-
- burst_len = min_t(unsigned int,
- spicc->xfer_remain / spicc->bytes_per_word,
- SPICC_BURST_MAX);
+ spi_finalize_current_transfer(spicc->master);
- /* Setup burst */
- meson_spicc_setup_burst(spicc, burst_len);
-
- /* Restart burst */
- writel_bits_relaxed(SPICC_XCH, SPICC_XCH,
- spicc->base + SPICC_CONREG);
+ return IRQ_HANDLED;
}
- /* Setup RX interrupt trigger */
- ctrl = meson_spicc_setup_rx_irq(spicc, ctrl);
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
- /* Reconfigure interrupts */
- writel(ctrl, spicc->base + SPICC_INTREG);
+ /* Start burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
return IRQ_HANDLED;
}
-static u32 meson_spicc_setup_speed(struct meson_spicc_device *spicc, u32 conf,
- u32 speed)
+static void meson_spicc_auto_io_delay(struct meson_spicc_device *spicc)
{
- unsigned long parent, value;
- unsigned int i, div;
-
- parent = clk_get_rate(spicc->core);
-
- /* Find closest inferior/equal possible speed */
- for (i = 0 ; i < 7 ; ++i) {
- /* 2^(data_rate+2) */
- value = parent >> (i + 2);
-
- if (value <= speed)
- break;
+ u32 div, hz;
+ u32 mi_delay, cap_delay;
+ u32 conf;
+
+ if (spicc->data->has_enhance_clk_div) {
+ div = FIELD_GET(SPICC_ENH_DATARATE_MASK,
+ readl_relaxed(spicc->base + SPICC_ENH_CTL0));
+ div++;
+ div <<= 1;
+ } else {
+ div = FIELD_GET(SPICC_DATARATE_MASK,
+ readl_relaxed(spicc->base + SPICC_CONREG));
+ div += 2;
+ div = 1 << div;
}
- /* If provided speed it lower than max divider, use max divider */
- if (i > 7) {
- div = 7;
- dev_warn_once(&spicc->pdev->dev, "unable to get close to speed %u\n",
- speed);
- } else
- div = i;
-
- dev_dbg(&spicc->pdev->dev, "parent %lu, speed %u -> %lu (%u)\n",
- parent, speed, value, div);
-
- conf &= ~SPICC_DATARATE_MASK;
- conf |= FIELD_PREP(SPICC_DATARATE_MASK, div);
-
- return conf;
+ mi_delay = SPICC_MI_NO_DELAY;
+ cap_delay = SPICC_CAP_AHEAD_2_CYCLE;
+ hz = clk_get_rate(spicc->clk);
+
+ if (hz >= 100000000)
+ cap_delay = SPICC_CAP_DELAY_1_CYCLE;
+ else if (hz >= 80000000)
+ cap_delay = SPICC_CAP_NO_DELAY;
+ else if (hz >= 40000000)
+ cap_delay = SPICC_CAP_AHEAD_1_CYCLE;
+ else if (div >= 16)
+ mi_delay = SPICC_MI_DELAY_3_CYCLE;
+ else if (div >= 8)
+ mi_delay = SPICC_MI_DELAY_2_CYCLE;
+ else if (div >= 6)
+ mi_delay = SPICC_MI_DELAY_1_CYCLE;
+
+ conf = readl_relaxed(spicc->base + SPICC_TESTREG);
+ conf &= ~(SPICC_MO_DELAY_MASK | SPICC_MI_DELAY_MASK
+ | SPICC_MI_CAP_DELAY_MASK);
+ conf |= FIELD_PREP(SPICC_MI_DELAY_MASK, mi_delay);
+ conf |= FIELD_PREP(SPICC_MI_CAP_DELAY_MASK, cap_delay);
+ writel_relaxed(conf, spicc->base + SPICC_TESTREG);
}
static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
@@ -339,9 +344,6 @@ static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
/* Read original configuration */
conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG);
- /* Select closest divider */
- conf = meson_spicc_setup_speed(spicc, conf, xfer->speed_hz);
-
/* Setup word width */
conf &= ~SPICC_BITLENGTH_MASK;
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK,
@@ -350,6 +352,32 @@ static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
/* Ignore if unchanged */
if (conf != conf_orig)
writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
+ clk_set_rate(spicc->clk, xfer->speed_hz);
+
+ meson_spicc_auto_io_delay(spicc);
+
+ writel_relaxed(0, spicc->base + SPICC_DMAREG);
+}
+
+static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc)
+{
+ u32 data;
+
+ if (spicc->data->has_oen)
+ writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO,
+ SPICC_ENH_MAIN_CLK_AO,
+ spicc->base + SPICC_ENH_CTL0);
+
+ writel_bits_relaxed(SPICC_FIFORST_W1_MASK, SPICC_FIFORST_W1_MASK,
+ spicc->base + SPICC_TESTREG);
+
+ while (meson_spicc_rxready(spicc))
+ data = readl_relaxed(spicc->base + SPICC_RXDATA);
+
+ if (spicc->data->has_oen)
+ writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 0,
+ spicc->base + SPICC_ENH_CTL0);
}
static int meson_spicc_transfer_one(struct spi_master *master,
@@ -357,8 +385,6 @@ static int meson_spicc_transfer_one(struct spi_master *master,
struct spi_transfer *xfer)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
- unsigned int burst_len;
- u32 irq = 0;
/* Store current transfer */
spicc->xfer = xfer;
@@ -372,22 +398,22 @@ static int meson_spicc_transfer_one(struct spi_master *master,
spicc->bytes_per_word =
DIV_ROUND_UP(spicc->xfer->bits_per_word, 8);
+ if (xfer->len % spicc->bytes_per_word)
+ return -EINVAL;
+
/* Setup transfer parameters */
meson_spicc_setup_xfer(spicc, xfer);
- burst_len = min_t(unsigned int,
- spicc->xfer_remain / spicc->bytes_per_word,
- SPICC_BURST_MAX);
+ meson_spicc_reset_fifo(spicc);
- meson_spicc_setup_burst(spicc, burst_len);
-
- irq = meson_spicc_setup_rx_irq(spicc, irq);
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
/* Start burst */
writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
/* Enable interrupts */
- writel_relaxed(irq, spicc->base + SPICC_INTREG);
+ writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
return 1;
}
@@ -444,7 +470,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Setup no wait cycles by default */
writel_relaxed(0, spicc->base + SPICC_PERIODREG);
- writel_bits_relaxed(BIT(24), BIT(24), spicc->base + SPICC_TESTREG);
+ writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG);
return 0;
}
@@ -456,9 +482,6 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
- /* Disable controller */
- writel_bits_relaxed(SPICC_ENABLE, 0, spicc->base + SPICC_CONREG);
-
device_reset_optional(&spicc->pdev->dev);
return 0;
@@ -477,11 +500,167 @@ static void meson_spicc_cleanup(struct spi_device *spi)
spi->controller_state = NULL;
}
+/*
+ * The Clock Mux
+ * x-----------------x x------------x x------\
+ * |---| pow2 fixed div |---| pow2 div |----| |
+ * | x-----------------x x------------x | |
+ * src ---| | mux |-- out
+ * | x-----------------x x------------x | |
+ * |---| enh fixed div |---| enh div |0---| |
+ * x-----------------x x------------x x------/
+ *
+ * Clk path for GX series:
+ * src -> pow2 fixed div -> pow2 div -> out
+ *
+ * Clk path for AXG series:
+ * src -> pow2 fixed div -> pow2 div -> mux -> out
+ * src -> enh fixed div -> enh div -> mux -> out
+ *
+ * Clk path for G12A series:
+ * pclk -> pow2 fixed div -> pow2 div -> mux -> out
+ * pclk -> enh fixed div -> enh div -> mux -> out
+ */
+
+static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
+ struct clk_divider *pow2_div, *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
+
+ /* algorithm for pow2 div: rate = freq / 4 / (2 ^ N) */
+
+ pow2_fixed_div = devm_kzalloc(dev, sizeof(*pow2_fixed_div), GFP_KERNEL);
+ if (!pow2_fixed_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ if (spicc->data->has_pclk)
+ parent_data[0].hw = __clk_get_hw(spicc->pclk);
+ else
+ parent_data[0].hw = __clk_get_hw(spicc->core);
+ init.num_parents = 1;
+
+ pow2_fixed_div->mult = 1,
+ pow2_fixed_div->div = 4,
+ pow2_fixed_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &pow2_fixed_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
+ if (!pow2_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ parent_data[0].hw = &pow2_fixed_div->hw;
+ init.num_parents = 1;
+
+ pow2_div->shift = 16,
+ pow2_div->width = 3,
+ pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
+ pow2_div->reg = spicc->base + SPICC_CONREG;
+ pow2_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &pow2_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ if (!spicc->data->has_enhance_clk_div) {
+ spicc->clk = clk;
+ return 0;
+ }
+
+ /* algorithm for enh div: rate = freq / 2 / (N + 1) */
+
+ enh_fixed_div = devm_kzalloc(dev, sizeof(*enh_fixed_div), GFP_KERNEL);
+ if (!enh_fixed_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ if (spicc->data->has_pclk)
+ parent_data[0].hw = __clk_get_hw(spicc->pclk);
+ else
+ parent_data[0].hw = __clk_get_hw(spicc->core);
+ init.num_parents = 1;
+
+ enh_fixed_div->mult = 1,
+ enh_fixed_div->div = 2,
+ enh_fixed_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &enh_fixed_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ enh_div = devm_kzalloc(dev, sizeof(*enh_div), GFP_KERNEL);
+ if (!enh_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#enh_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ parent_data[0].hw = &enh_fixed_div->hw;
+ init.num_parents = 1;
+
+ enh_div->shift = 16,
+ enh_div->width = 8,
+ enh_div->reg = spicc->base + SPICC_ENH_CTL0;
+ enh_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &enh_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_mux_ops;
+ parent_data[0].hw = &pow2_div->hw;
+ parent_data[1].hw = &enh_div->hw;
+ init.num_parents = 2;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ mux->mask = 0x1,
+ mux->shift = 24,
+ mux->reg = spicc->base + SPICC_ENH_CTL0;
+ mux->hw.init = &init;
+
+ spicc->clk = devm_clk_register(dev, &mux->hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
+
+ return 0;
+}
+
static int meson_spicc_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct meson_spicc_device *spicc;
- int ret, irq, rate;
+ int ret, irq;
master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
if (!master) {
@@ -491,6 +670,13 @@ static int meson_spicc_probe(struct platform_device *pdev)
spicc = spi_master_get_devdata(master);
spicc->master = master;
+ spicc->data = of_device_get_match_data(&pdev->dev);
+ if (!spicc->data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ ret = -EINVAL;
+ goto out_master;
+ }
+
spicc->pdev = pdev;
platform_set_drvdata(pdev, spicc);
@@ -501,6 +687,10 @@ static int meson_spicc_probe(struct platform_device *pdev)
goto out_master;
}
+ /* Set master mode and enable controller */
+ writel_relaxed(SPICC_ENABLE | SPICC_MODE_MASTER,
+ spicc->base + SPICC_CONREG);
+
/* Disable all IRQs */
writel_relaxed(0, spicc->base + SPICC_INTREG);
@@ -519,12 +709,26 @@ static int meson_spicc_probe(struct platform_device *pdev)
goto out_master;
}
+ if (spicc->data->has_pclk) {
+ spicc->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(spicc->pclk)) {
+ dev_err(&pdev->dev, "pclk clock request failed\n");
+ ret = PTR_ERR(spicc->pclk);
+ goto out_master;
+ }
+ }
+
ret = clk_prepare_enable(spicc->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
goto out_master;
}
- rate = clk_get_rate(spicc->core);
+
+ ret = clk_prepare_enable(spicc->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "pclk clock enable failed\n");
+ goto out_master;
+ }
device_reset_optional(&pdev->dev);
@@ -536,7 +740,8 @@ static int meson_spicc_probe(struct platform_device *pdev)
SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
- master->min_speed_hz = rate >> 9;
+ master->min_speed_hz = spicc->data->min_speed_hz;
+ master->max_speed_hz = spicc->data->max_speed_hz;
master->setup = meson_spicc_setup;
master->cleanup = meson_spicc_cleanup;
master->prepare_message = meson_spicc_prepare_message;
@@ -544,11 +749,13 @@ static int meson_spicc_probe(struct platform_device *pdev)
master->transfer_one = meson_spicc_transfer_one;
master->use_gpio_descriptors = true;
- /* Setup max rate according to the Meson GX datasheet */
- if ((rate >> 2) > SPICC_MAX_FREQ)
- master->max_speed_hz = SPICC_MAX_FREQ;
- else
- master->max_speed_hz = rate >> 2;
+ meson_spicc_oen_enable(spicc);
+
+ ret = meson_spicc_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_master;
+ }
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
@@ -560,6 +767,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
out_clk:
clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
out_master:
spi_master_put(master);
@@ -575,13 +783,47 @@ static int meson_spicc_remove(struct platform_device *pdev)
writel(0, spicc->base + SPICC_CONREG);
clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
return 0;
}
+static const struct meson_spicc_data meson_spicc_gx_data = {
+ .max_speed_hz = 30000000,
+ .min_speed_hz = 325000,
+ .fifo_size = 16,
+};
+
+static const struct meson_spicc_data meson_spicc_axg_data = {
+ .max_speed_hz = 80000000,
+ .min_speed_hz = 325000,
+ .fifo_size = 16,
+ .has_oen = true,
+ .has_enhance_clk_div = true,
+};
+
+static const struct meson_spicc_data meson_spicc_g12a_data = {
+ .max_speed_hz = 166666666,
+ .min_speed_hz = 50000,
+ .fifo_size = 15,
+ .has_oen = true,
+ .has_enhance_clk_div = true,
+ .has_pclk = true,
+};
+
static const struct of_device_id meson_spicc_of_match[] = {
- { .compatible = "amlogic,meson-gx-spicc", },
- { .compatible = "amlogic,meson-axg-spicc", },
+ {
+ .compatible = "amlogic,meson-gx-spicc",
+ .data = &meson_spicc_gx_data,
+ },
+ {
+ .compatible = "amlogic,meson-axg-spicc",
+ .data = &meson_spicc_axg_data,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-spicc",
+ .data = &meson_spicc_g12a_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
new file mode 100644
index 000000000000..c15a9910549f
--- /dev/null
+++ b/drivers/spi/spi-mtk-nor.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek SPI NOR controller driver
+//
+// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
+
+#define DRIVER_NAME "mtk-spi-nor"
+
+#define MTK_NOR_REG_CMD 0x00
+#define MTK_NOR_CMD_WRITE BIT(4)
+#define MTK_NOR_CMD_PROGRAM BIT(2)
+#define MTK_NOR_CMD_READ BIT(0)
+#define MTK_NOR_CMD_MASK GENMASK(5, 0)
+
+#define MTK_NOR_REG_PRG_CNT 0x04
+#define MTK_NOR_REG_RDATA 0x0c
+
+#define MTK_NOR_REG_RADR0 0x10
+#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
+#define MTK_NOR_REG_RADR3 0xc8
+
+#define MTK_NOR_REG_WDATA 0x1c
+
+#define MTK_NOR_REG_PRGDATA0 0x20
+#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
+#define MTK_NOR_REG_PRGDATA_MAX 5
+
+#define MTK_NOR_REG_SHIFT0 0x38
+#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
+#define MTK_NOR_REG_SHIFT_MAX 9
+
+#define MTK_NOR_REG_CFG1 0x60
+#define MTK_NOR_FAST_READ BIT(0)
+
+#define MTK_NOR_REG_CFG2 0x64
+#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
+#define MTK_NOR_WR_BUF_EN BIT(0)
+
+#define MTK_NOR_REG_PP_DATA 0x98
+
+#define MTK_NOR_REG_IRQ_STAT 0xa8
+#define MTK_NOR_REG_IRQ_EN 0xac
+#define MTK_NOR_IRQ_DMA BIT(7)
+#define MTK_NOR_IRQ_MASK GENMASK(7, 0)
+
+#define MTK_NOR_REG_CFG3 0xb4
+#define MTK_NOR_DISABLE_WREN BIT(7)
+#define MTK_NOR_DISABLE_SR_POLL BIT(5)
+
+#define MTK_NOR_REG_WP 0xc4
+#define MTK_NOR_ENABLE_SF_CMD 0x30
+
+#define MTK_NOR_REG_BUSCFG 0xcc
+#define MTK_NOR_4B_ADDR BIT(4)
+#define MTK_NOR_QUAD_ADDR BIT(3)
+#define MTK_NOR_QUAD_READ BIT(2)
+#define MTK_NOR_DUAL_ADDR BIT(1)
+#define MTK_NOR_DUAL_READ BIT(0)
+#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
+
+#define MTK_NOR_REG_DMA_CTL 0x718
+#define MTK_NOR_DMA_START BIT(0)
+
+#define MTK_NOR_REG_DMA_FADR 0x71c
+#define MTK_NOR_REG_DMA_DADR 0x720
+#define MTK_NOR_REG_DMA_END_DADR 0x724
+
+#define MTK_NOR_PRG_MAX_SIZE 6
+// Reading DMA src/dst addresses have to be 16-byte aligned
+#define MTK_NOR_DMA_ALIGN 16
+#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
+// and we allocate a bounce buffer if destination address isn't aligned.
+#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
+
+// Buffered page program can do one 128-byte transfer
+#define MTK_NOR_PP_SIZE 128
+
+#define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq)
+
+struct mtk_nor {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ u8 *buffer;
+ struct clk *spi_clk;
+ struct clk *ctlr_clk;
+ unsigned int spi_freq;
+ bool wbuf_en;
+ bool has_irq;
+ struct completion op_done;
+};
+
+static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
+{
+ u32 val = readl(sp->base + reg);
+
+ val &= ~clr;
+ val |= set;
+ writel(val, sp->base + reg);
+}
+
+static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
+{
+ ulong delay = CLK_TO_US(sp, clk);
+ u32 reg;
+ int ret;
+
+ writel(cmd, sp->base + MTK_NOR_REG_CMD);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
+ delay / 3, (delay + 1) * 200);
+ if (ret < 0)
+ dev_err(sp->dev, "command %u timeout.\n", cmd);
+ return ret;
+}
+
+static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u32 addr = op->addr.val;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
+ addr >>= 8;
+ }
+ if (op->addr.nbytes == 4) {
+ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
+ } else {
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
+ }
+}
+
+static bool mtk_nor_match_read(const struct spi_mem_op *op)
+{
+ int dummy = 0;
+
+ if (op->dummy.buswidth)
+ dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
+
+ if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
+ if (op->addr.buswidth == 1)
+ return dummy == 8;
+ else if (op->addr.buswidth == 2)
+ return dummy == 4;
+ else if (op->addr.buswidth == 4)
+ return dummy == 6;
+ } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
+ if (op->cmd.opcode == 0x03)
+ return dummy == 0;
+ else if (op->cmd.opcode == 0x0b)
+ return dummy == 8;
+ }
+ return false;
+}
+
+static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ size_t len;
+
+ if (!op->data.nbytes)
+ return 0;
+
+ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ if ((op->data.dir == SPI_MEM_DATA_IN) &&
+ mtk_nor_match_read(op)) {
+ if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
+ (op->data.nbytes < MTK_NOR_DMA_ALIGN))
+ op->data.nbytes = 1;
+ else if (!((ulong)(op->data.buf.in) &
+ MTK_NOR_DMA_ALIGN_MASK))
+ op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
+ else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
+ op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
+ return 0;
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes >= MTK_NOR_PP_SIZE)
+ op->data.nbytes = MTK_NOR_PP_SIZE;
+ else
+ op->data.nbytes = 1;
+ return 0;
+ }
+ }
+
+ len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes -
+ op->dummy.nbytes;
+ if (op->data.nbytes > len)
+ op->data.nbytes = len;
+
+ return 0;
+}
+
+static bool mtk_nor_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ size_t len;
+
+ if (op->cmd.buswidth != 1)
+ return false;
+
+ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op))
+ return true;
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ return (op->addr.buswidth == 1) &&
+ (op->dummy.buswidth == 0) &&
+ (op->data.buswidth == 1);
+ }
+ len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+ if ((len > MTK_NOR_PRG_MAX_SIZE) ||
+ ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE)))
+ return false;
+ return true;
+}
+
+static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u32 reg = 0;
+
+ if (op->addr.nbytes == 4)
+ reg |= MTK_NOR_4B_ADDR;
+
+ if (op->data.buswidth == 4) {
+ reg |= MTK_NOR_QUAD_READ;
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
+ if (op->addr.buswidth == 4)
+ reg |= MTK_NOR_QUAD_ADDR;
+ } else if (op->data.buswidth == 2) {
+ reg |= MTK_NOR_DUAL_READ;
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
+ if (op->addr.buswidth == 2)
+ reg |= MTK_NOR_DUAL_ADDR;
+ } else {
+ if (op->cmd.opcode == 0x0b)
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
+ else
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
+ }
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
+}
+
+static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length,
+ u8 *buffer)
+{
+ int ret = 0;
+ ulong delay;
+ u32 reg;
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(sp->dev, dma_addr)) {
+ dev_err(sp->dev, "failed to map dma buffer.\n");
+ return -EINVAL;
+ }
+
+ writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
+ writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
+ writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
+
+ if (sp->has_irq) {
+ reinit_completion(&sp->op_done);
+ mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
+ }
+
+ mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
+
+ delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
+
+ if (sp->has_irq) {
+ if (!wait_for_completion_timeout(&sp->op_done,
+ (delay + 1) * 100))
+ ret = -ETIMEDOUT;
+ } else {
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
+ !(reg & MTK_NOR_DMA_START), delay / 3,
+ (delay + 1) * 100);
+ }
+
+ dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE);
+ if (ret < 0)
+ dev_err(sp->dev, "dma read timeout.\n");
+
+ return ret;
+}
+
+static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from,
+ unsigned int length, u8 *buffer)
+{
+ unsigned int rdlen;
+ int ret;
+
+ if (length & MTK_NOR_DMA_ALIGN_MASK)
+ rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
+ else
+ rdlen = length;
+
+ ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer);
+ if (ret)
+ return ret;
+
+ memcpy(buffer, sp->buffer, length);
+ return 0;
+}
+
+static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u8 *buf = op->data.buf.in;
+ int ret;
+
+ ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
+ if (!ret)
+ buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
+ return ret;
+}
+
+static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
+{
+ int ret;
+ u32 val;
+
+ if (sp->wbuf_en)
+ return 0;
+
+ val = readl(sp->base + MTK_NOR_REG_CFG2);
+ writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ val & MTK_NOR_WR_BUF_EN, 0, 10000);
+ if (!ret)
+ sp->wbuf_en = true;
+ return ret;
+}
+
+static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
+{
+ int ret;
+ u32 val;
+
+ if (!sp->wbuf_en)
+ return 0;
+ val = readl(sp->base + MTK_NOR_REG_CFG2);
+ writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
+ if (!ret)
+ sp->wbuf_en = false;
+ return ret;
+}
+
+static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ u32 val;
+ int ret, i;
+
+ ret = mtk_nor_write_buffer_enable(sp);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < op->data.nbytes; i += 4) {
+ val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
+ buf[i];
+ writel(val, sp->base + MTK_NOR_REG_PP_DATA);
+ }
+ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
+ (op->data.nbytes + 5) * BITS_PER_BYTE);
+}
+
+static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
+ const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ int ret;
+
+ ret = mtk_nor_write_buffer_disable(sp);
+ if (ret < 0)
+ return ret;
+ writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
+ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
+}
+
+int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ if ((op->data.nbytes == 0) ||
+ ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
+ return -ENOTSUPP;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ mtk_nor_set_addr(sp, op);
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
+ if (op->data.nbytes == MTK_NOR_PP_SIZE)
+ return mtk_nor_pp_buffered(sp, op);
+ return mtk_nor_pp_unbuffered(sp, op);
+ }
+
+ if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
+ ret = mtk_nor_write_buffer_disable(sp);
+ if (ret < 0)
+ return ret;
+ mtk_nor_setup_bus(sp, op);
+ if (op->data.nbytes == 1) {
+ mtk_nor_set_addr(sp, op);
+ return mtk_nor_read_pio(sp, op);
+ } else if (((ulong)(op->data.buf.in) &
+ MTK_NOR_DMA_ALIGN_MASK)) {
+ return mtk_nor_read_bounce(sp, op->addr.val,
+ op->data.nbytes,
+ op->data.buf.in);
+ } else {
+ return mtk_nor_read_dma(sp, op->addr.val,
+ op->data.nbytes,
+ op->data.buf.in);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static int mtk_nor_setup(struct spi_device *spi)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
+
+ if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
+ dev_err(&spi->dev, "spi clock should be %u Hz.\n",
+ sp->spi_freq);
+ return -EINVAL;
+ }
+ spi->max_speed_hz = sp->spi_freq;
+
+ return 0;
+}
+
+static int mtk_nor_transfer_one_message(struct spi_controller *master,
+ struct spi_message *m)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(master);
+ struct spi_transfer *t = NULL;
+ unsigned long trx_len = 0;
+ int stat = 0;
+ int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
+ void __iomem *reg;
+ const u8 *txbuf;
+ u8 *rxbuf;
+ int i;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ txbuf = t->tx_buf;
+ for (i = 0; i < t->len; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ if (txbuf)
+ writeb(txbuf[i], reg);
+ else
+ writeb(0, reg);
+ }
+ trx_len += t->len;
+ }
+
+ writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+
+ stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
+ trx_len * BITS_PER_BYTE);
+ if (stat < 0)
+ goto msg_done;
+
+ reg_offset = trx_len - 1;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ rxbuf = t->rx_buf;
+ for (i = 0; i < t->len; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
+ if (rxbuf)
+ rxbuf[i] = readb(reg);
+ }
+ }
+
+ m->actual_length = trx_len;
+msg_done:
+ m->status = stat;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static void mtk_nor_disable_clk(struct mtk_nor *sp)
+{
+ clk_disable_unprepare(sp->spi_clk);
+ clk_disable_unprepare(sp->ctlr_clk);
+}
+
+static int mtk_nor_enable_clk(struct mtk_nor *sp)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sp->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sp->ctlr_clk);
+ if (ret) {
+ clk_disable_unprepare(sp->spi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_nor_init(struct mtk_nor *sp)
+{
+ int ret;
+
+ ret = mtk_nor_enable_clk(sp);
+ if (ret)
+ return ret;
+
+ sp->spi_freq = clk_get_rate(sp->spi_clk);
+
+ writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
+ MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
+
+ return ret;
+}
+
+static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
+{
+ struct mtk_nor *sp = data;
+ u32 irq_status, irq_enabled;
+
+ irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
+ irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
+ // write status back to clear interrupt
+ writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ if (irq_status & MTK_NOR_IRQ_DMA) {
+ complete(&sp->op_done);
+ writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static size_t mtk_max_msg_size(struct spi_device *spi)
+{
+ return MTK_NOR_PRG_MAX_SIZE;
+}
+
+static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
+ .adjust_op_size = mtk_nor_adjust_op_size,
+ .supports_op = mtk_nor_supports_op,
+ .exec_op = mtk_nor_exec_op
+};
+
+static const struct of_device_id mtk_nor_match[] = {
+ { .compatible = "mediatek,mt8173-nor" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_nor_match);
+
+static int mtk_nor_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mtk_nor *sp;
+ void __iomem *base;
+ u8 *buffer;
+ struct clk *spi_clk, *ctlr_clk;
+ int ret, irq;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(spi_clk))
+ return PTR_ERR(spi_clk);
+
+ ctlr_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(ctlr_clk))
+ return PTR_ERR(ctlr_clk);
+
+ buffer = devm_kmalloc(&pdev->dev,
+ MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
+ GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
+ buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
+ ~MTK_NOR_DMA_ALIGN_MASK);
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (!ctlr) {
+ dev_err(&pdev->dev, "failed to allocate spi controller\n");
+ return -ENOMEM;
+ }
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->max_message_size = mtk_max_msg_size;
+ ctlr->mem_ops = &mtk_nor_mem_ops;
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->num_chipselect = 1;
+ ctlr->setup = mtk_nor_setup;
+ ctlr->transfer_one_message = mtk_nor_transfer_one_message;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+
+ sp = spi_controller_get_devdata(ctlr);
+ sp->base = base;
+ sp->buffer = buffer;
+ sp->has_irq = false;
+ sp->wbuf_en = false;
+ sp->ctlr = ctlr;
+ sp->dev = &pdev->dev;
+ sp->spi_clk = spi_clk;
+ sp->ctlr_clk = ctlr_clk;
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0) {
+ dev_warn(sp->dev, "IRQ not available.");
+ } else {
+ writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT);
+ writel(0, base + MTK_NOR_REG_IRQ_EN);
+ ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
+ pdev->name, sp);
+ if (ret < 0) {
+ dev_warn(sp->dev, "failed to request IRQ.");
+ } else {
+ init_completion(&sp->op_done);
+ sp->has_irq = true;
+ }
+ }
+
+ ret = mtk_nor_init(sp);
+ if (ret < 0) {
+ kfree(ctlr);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
+
+ return devm_spi_register_controller(&pdev->dev, ctlr);
+}
+
+static int mtk_nor_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mtk_nor *sp;
+
+ ctlr = dev_get_drvdata(&pdev->dev);
+ sp = spi_controller_get_devdata(ctlr);
+
+ mtk_nor_disable_clk(sp);
+
+ return 0;
+}
+
+static struct platform_driver mtk_nor_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mtk_nor_match,
+ },
+ .probe = mtk_nor_probe,
+ .remove = mtk_nor_remove,
+};
+
+module_platform_driver(mtk_nor_driver);
+
+MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
new file mode 100644
index 000000000000..4f94c9127fc1
--- /dev/null
+++ b/drivers/spi/spi-mux.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// General Purpose SPI multiplexer
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#define SPI_MUX_NO_CS ((unsigned int)-1)
+
+/**
+ * DOC: Driver description
+ *
+ * This driver supports a MUX on an SPI bus. This can be useful when you need
+ * more chip selects than the hardware peripherals support, or than are
+ * available in a particular board setup.
+ *
+ * The driver will create an additional SPI controller. Devices added under the
+ * mux will be handled as 'chip selects' on this controller.
+ */
+
+/**
+ * struct spi_mux_priv - the basic spi_mux structure
+ * @spi: pointer to the device struct attached to the parent
+ * spi controller
+ * @current_cs: The current chip select set in the mux
+ * @child_msg_complete: The mux replaces the complete callback in the child's
+ * message to its own callback; this field is used by the
+ * driver to store the child's callback during a transfer
+ * @child_msg_context: Used to store the child's context to the callback
+ * @child_msg_dev: Used to store the spi_device pointer to the child
+ * @mux: mux_control structure used to provide chip selects for
+ * downstream spi devices
+ */
+struct spi_mux_priv {
+ struct spi_device *spi;
+ unsigned int current_cs;
+
+ void (*child_msg_complete)(void *context);
+ void *child_msg_context;
+ struct spi_device *child_msg_dev;
+ struct mux_control *mux;
+};
+
+/* should not get called when the parent controller is doing a transfer */
+static int spi_mux_select(struct spi_device *spi)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
+ int ret;
+
+ if (priv->current_cs == spi->chip_select)
+ return 0;
+
+ dev_dbg(&priv->spi->dev, "setting up the mux for cs %d\n",
+ spi->chip_select);
+
+ /* copy the child device's settings except for the cs */
+ priv->spi->max_speed_hz = spi->max_speed_hz;
+ priv->spi->mode = spi->mode;
+ priv->spi->bits_per_word = spi->bits_per_word;
+
+ ret = mux_control_select(priv->mux, spi->chip_select);
+ if (ret)
+ return ret;
+
+ priv->current_cs = spi->chip_select;
+
+ return 0;
+}
+
+static int spi_mux_setup(struct spi_device *spi)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
+
+ /*
+ * can be called multiple times, won't do a valid setup now but we will
+ * change the settings when we do a transfer (necessary because we
+ * can't predict from which device it will be anyway)
+ */
+ return spi_setup(priv->spi);
+}
+
+static void spi_mux_complete_cb(void *context)
+{
+ struct spi_mux_priv *priv = (struct spi_mux_priv *)context;
+ struct spi_controller *ctlr = spi_get_drvdata(priv->spi);
+ struct spi_message *m = ctlr->cur_msg;
+
+ m->complete = priv->child_msg_complete;
+ m->context = priv->child_msg_context;
+ m->spi = priv->child_msg_dev;
+ spi_finalize_current_message(ctlr);
+ mux_control_deselect(priv->mux);
+}
+
+static int spi_mux_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *m)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = m->spi;
+ int ret;
+
+ ret = spi_mux_select(spi);
+ if (ret)
+ return ret;
+
+ /*
+ * Replace the complete callback, context and spi_device with our own
+ * pointers. Save originals
+ */
+ priv->child_msg_complete = m->complete;
+ priv->child_msg_context = m->context;
+ priv->child_msg_dev = m->spi;
+
+ m->complete = spi_mux_complete_cb;
+ m->context = priv;
+ m->spi = priv->spi;
+
+ /* do the transfer */
+ return spi_async(priv->spi, m);
+}
+
+static int spi_mux_probe(struct spi_device *spi)
+{
+ struct spi_controller *ctlr;
+ struct spi_mux_priv *priv;
+ int ret;
+
+ ctlr = spi_alloc_master(&spi->dev, sizeof(*priv));
+ if (!ctlr)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ctlr);
+ priv = spi_controller_get_devdata(ctlr);
+ priv->spi = spi;
+
+ priv->mux = devm_mux_control_get(&spi->dev, NULL);
+ if (IS_ERR(priv->mux)) {
+ ret = PTR_ERR(priv->mux);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&spi->dev, "failed to get control-mux\n");
+ goto err_put_ctlr;
+ }
+
+ priv->current_cs = SPI_MUX_NO_CS;
+
+ /* supported modes are the same as our parent's */
+ ctlr->mode_bits = spi->controller->mode_bits;
+ ctlr->flags = spi->controller->flags;
+ ctlr->transfer_one_message = spi_mux_transfer_one_message;
+ ctlr->setup = spi_mux_setup;
+ ctlr->num_chipselect = mux_control_states(priv->mux);
+ ctlr->bus_num = -1;
+ ctlr->dev.of_node = spi->dev.of_node;
+
+ ret = devm_spi_register_controller(&spi->dev, ctlr);
+ if (ret)
+ goto err_put_ctlr;
+
+ return 0;
+
+err_put_ctlr:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static const struct of_device_id spi_mux_of_match[] = {
+ { .compatible = "spi-mux" },
+ { }
+};
+
+static struct spi_driver spi_mux_driver = {
+ .probe = spi_mux_probe,
+ .driver = {
+ .name = "spi-mux",
+ .of_match_table = spi_mux_of_match,
+ },
+};
+
+module_spi_driver(spi_mux_driver);
+
+MODULE_DESCRIPTION("SPI multiplexer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index dce85ee07cd0..918918a9e049 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -22,7 +22,6 @@
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -32,7 +31,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/completion.h>
-#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 8c5084a3a617..1ccda82da206 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -307,6 +307,7 @@
#define POLL_TOUT 5000
#define NXP_FSPI_MAX_CHIPSELECT 4
+#define NXP_FSPI_MIN_IOMAP SZ_4M
struct nxp_fspi_devtype_data {
unsigned int rxfifo;
@@ -324,11 +325,29 @@ static const struct nxp_fspi_devtype_data lx2160a_data = {
.little_endian = true, /* little-endian */
};
+static const struct nxp_fspi_devtype_data imx8mm_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+static const struct nxp_fspi_devtype_data imx8qxp_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
struct nxp_fspi {
void __iomem *iobase;
void __iomem *ahb_addr;
u32 memmap_phy;
u32 memmap_phy_size;
+ u32 memmap_start;
+ u32 memmap_len;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
@@ -641,12 +660,35 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
f->selected = spi->chip_select;
}
-static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
{
+ u32 start = op->addr.val;
u32 len = op->data.nbytes;
+ /* if necessary, ioremap before AHB read */
+ if ((!f->ahb_addr) || start < f->memmap_start ||
+ start + len > f->memmap_start + f->memmap_len) {
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+
+ f->memmap_start = start;
+ f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ len : NXP_FSPI_MIN_IOMAP;
+
+ f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
+ f->memmap_len);
+
+ if (!f->ahb_addr) {
+ dev_err(f->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+ }
+
/* Read out the data directly from the AHB buffer. */
- memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len);
+ memcpy_fromio(op->data.buf.in,
+ f->ahb_addr + start - f->memmap_start, len);
+
+ return 0;
}
static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
@@ -806,7 +848,7 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
*/
if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
op->data.dir == SPI_MEM_DATA_IN) {
- nxp_fspi_read_ahb(f, op);
+ err = nxp_fspi_read_ahb(f, op);
} else {
if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
nxp_fspi_fill_txfifo(f, op);
@@ -871,8 +913,9 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
/* enable module */
- fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF),
- base + FSPI_MCR0);
+ fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) |
+ FSPI_MCR0_IP_TIMEOUT(0xFF) | (u32) FSPI_MCR0_OCTCOMB_EN,
+ base + FSPI_MCR0);
/*
* Disable same device enable bit and configure all slave devices
@@ -976,9 +1019,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
/* find the resources - controller memory mapped space */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
- f->ahb_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(f->ahb_addr)) {
- ret = PTR_ERR(f->ahb_addr);
+ if (!res) {
+ ret = -ENODEV;
goto err_put_ctrl;
}
@@ -1057,6 +1099,9 @@ static int nxp_fspi_remove(struct platform_device *pdev)
mutex_destroy(&f->lock);
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+
return 0;
}
@@ -1076,6 +1121,8 @@ static int nxp_fspi_resume(struct device *dev)
static const struct of_device_id nxp_fspi_dt_ids[] = {
{ .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
+ { .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, },
+ { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 2e318158fca9..73d2a65d0b6e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -192,6 +192,11 @@ static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
return drv_data->ssp_type == QUARK_X1000_SSP;
}
+static bool is_mmp2_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == MMP2_SSP;
+}
+
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
@@ -486,8 +491,8 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
static void pxa2xx_spi_off(struct driver_data *drv_data)
{
- /* On MMP, disabling SSE seems to corrupt the rx fifo */
- if (drv_data->ssp_type == MMP2_SSP)
+ /* On MMP, disabling SSE seems to corrupt the Rx FIFO */
+ if (is_mmp2_ssp(drv_data))
return;
pxa2xx_spi_write(drv_data, SSCR0,
@@ -1093,7 +1098,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
|| (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
!= (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
- if (drv_data->ssp_type != MMP2_SSP)
+ if (!is_mmp2_ssp(drv_data))
pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
@@ -1107,7 +1112,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
}
- if (drv_data->ssp_type == MMP2_SSP) {
+ if (is_mmp2_ssp(drv_data)) {
u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR)
& SSSR_TFL_MASK) >> 8;
@@ -1571,18 +1576,18 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
else if (pcidev_id)
type = (enum pxa_ssp_type)pcidev_id->driver_data;
else
- return NULL;
+ return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ssp = &pdata->ssp;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ssp->mmio_base))
- return NULL;
+ return ERR_CAST(ssp->mmio_base);
ssp->phys_base = res->start;
@@ -1596,11 +1601,11 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
ssp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ssp->clk))
- return NULL;
+ return ERR_CAST(ssp->clk);
ssp->irq = platform_get_irq(pdev, 0);
if (ssp->irq < 0)
- return NULL;
+ return ERR_PTR(ssp->irq);
ssp->type = type;
ssp->dev = &pdev->dev;
@@ -1657,9 +1662,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
platform_info = dev_get_platdata(dev);
if (!platform_info) {
platform_info = pxa2xx_spi_init_pdata(pdev);
- if (!platform_info) {
+ if (IS_ERR(platform_info)) {
dev_err(&pdev->dev, "missing platform data\n");
- return -ENODEV;
+ return PTR_ERR(platform_info);
}
}
@@ -1907,11 +1912,7 @@ out_error_controller_alloc:
static int pxa2xx_spi_remove(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
- struct ssp_device *ssp;
-
- if (!drv_data)
- return 0;
- ssp = drv_data->ssp;
+ struct ssp_device *ssp = drv_data->ssp;
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 2cc6d9951b52..70ef63e0b6b8 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -843,14 +843,17 @@ static const struct dev_pm_ops rockchip_spi_pm = {
};
static const struct of_device_id rockchip_spi_dt_match[] = {
- { .compatible = "rockchip,rv1108-spi", },
+ { .compatible = "rockchip,px30-spi", },
{ .compatible = "rockchip,rk3036-spi", },
{ .compatible = "rockchip,rk3066-spi", },
{ .compatible = "rockchip,rk3188-spi", },
{ .compatible = "rockchip,rk3228-spi", },
{ .compatible = "rockchip,rk3288-spi", },
+ { .compatible = "rockchip,rk3308-spi", },
+ { .compatible = "rockchip,rk3328-spi", },
{ .compatible = "rockchip,rk3368-spi", },
{ .compatible = "rockchip,rk3399-spi", },
+ { .compatible = "rockchip,rv1108-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 85575d45901c..06192c9ea813 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -24,6 +24,7 @@
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
+#include <linux/spinlock.h>
#define RSPI_SPCR 0x00 /* Control Register */
#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
@@ -79,8 +80,7 @@
#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
/* SSLP - Slave Select Polarity Register */
-#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
-#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
+#define SSLP_SSLP(i) BIT(i) /* SSLi Signal Polarity Setting */
/* SPPCR - Pin Control Register */
#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
@@ -181,7 +181,9 @@ struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_controller *ctlr;
+ struct platform_device *pdev;
wait_queue_head_t wait;
+ spinlock_t lock; /* Protects RMW-access to RSPI_SSLP */
struct clk *clk;
u16 spcmd;
u8 spsr;
@@ -239,7 +241,7 @@ struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
int (*transfer_one)(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer);
- u16 mode_bits;
+ u16 extra_mode_bits;
u16 flags;
u16 fifo_size;
u8 num_hw_ss;
@@ -919,6 +921,29 @@ static int qspi_setup_sequencer(struct rspi_data *rspi,
return 0;
}
+static int rspi_setup(struct spi_device *spi)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
+ u8 sslp;
+
+ if (spi->cs_gpiod)
+ return 0;
+
+ pm_runtime_get_sync(&rspi->pdev->dev);
+ spin_lock_irq(&rspi->lock);
+
+ sslp = rspi_read8(rspi, RSPI_SSLP);
+ if (spi->mode & SPI_CS_HIGH)
+ sslp |= SSLP_SSLP(spi->chip_select);
+ else
+ sslp &= ~SSLP_SSLP(spi->chip_select);
+ rspi_write8(rspi, sslp, RSPI_SSLP);
+
+ spin_unlock_irq(&rspi->lock);
+ pm_runtime_put(&rspi->pdev->dev);
+ return 0;
+}
+
static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
@@ -933,6 +958,8 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
rspi->spcmd |= SPCMD_CPOL;
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ rspi->spcmd |= SPCMD_LSBF;
/* Configure slave signal to assert */
rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
@@ -1122,7 +1149,6 @@ static int rspi_remove(struct platform_device *pdev)
static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
- .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
.num_hw_ss = 2,
@@ -1131,7 +1157,6 @@ static const struct spi_ops rspi_ops = {
static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
- .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
.num_hw_ss = 1,
@@ -1140,8 +1165,7 @@ static const struct spi_ops rspi_rz_ops = {
static const struct spi_ops qspi_ops = {
.set_config_register = qspi_set_config_register,
.transfer_one = qspi_transfer_one,
- .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
- SPI_TX_DUAL | SPI_TX_QUAD |
+ .extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
@@ -1249,16 +1273,20 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
+ rspi->pdev = pdev;
pm_runtime_enable(&pdev->dev);
init_waitqueue_head(&rspi->wait);
+ spin_lock_init(&rspi->lock);
ctlr->bus_num = pdev->id;
+ ctlr->setup = rspi_setup;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = ops->transfer_one;
ctlr->prepare_message = rspi_prepare_message;
ctlr->unprepare_message = rspi_unprepare_message;
- ctlr->mode_bits = ops->mode_bits;
+ ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_LOOP | ops->extra_mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->use_gpio_descriptors = true;
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 2d6e37f25e2d..2cb3b611c294 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -227,7 +227,7 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
struct spi_fiq_code {
u32 length;
u32 ack_offset;
- u8 data[0];
+ u8 data[];
};
extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 4ef569b47aa6..d066f5144c3e 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -565,7 +565,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->io_base)) {
ret = PTR_ERR(qspi->io_base);
- goto err;
+ goto err_master_put;
}
qspi->phys_base = res->start;
@@ -574,24 +574,26 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->mm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->mm_base)) {
ret = PTR_ERR(qspi->mm_base);
- goto err;
+ goto err_master_put;
}
qspi->mm_size = resource_size(res);
if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
ret = -EINVAL;
- goto err;
+ goto err_master_put;
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto err_master_put;
+ }
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err;
+ goto err_master_put;
}
init_completion(&qspi->data_completion);
@@ -599,23 +601,27 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk)) {
ret = PTR_ERR(qspi->clk);
- goto err;
+ goto err_master_put;
}
qspi->clk_rate = clk_get_rate(qspi->clk);
if (!qspi->clk_rate) {
ret = -EINVAL;
- goto err;
+ goto err_master_put;
}
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- goto err;
+ goto err_master_put;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
- if (!IS_ERR(rstc)) {
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ goto err_qspi_release;
+ } else {
reset_control_assert(rstc);
udelay(2);
reset_control_deassert(rstc);
@@ -625,7 +631,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qspi);
ret = stm32_qspi_dma_setup(qspi);
if (ret)
- goto err;
+ goto err_qspi_release;
mutex_init(&qspi->lock);
@@ -641,8 +647,9 @@ static int stm32_qspi_probe(struct platform_device *pdev)
if (!ret)
return 0;
-err:
+err_qspi_release:
stm32_qspi_release(qspi);
+err_master_put:
spi_master_put(qspi->ctrl);
return ret;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index e041f9c4ec47..44ac6eb3298d 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -175,7 +175,7 @@
#define SPI_DMA_MIN_BYTES 16
/**
- * stm32_spi_reg - stm32 SPI register & bitfield desc
+ * struct stm32_spi_reg - stm32 SPI register & bitfield desc
* @reg: register offset
* @mask: bitfield mask
* @shift: left shift
@@ -187,16 +187,16 @@ struct stm32_spi_reg {
};
/**
- * stm32_spi_regspec - stm32 registers definition, compatible dependent data
- * en: enable register and SPI enable bit
- * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
- * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
- * cpol: clock polarity register and polarity bit
- * cpha: clock phase register and phase bit
- * lsb_first: LSB transmitted first register and bit
- * br: baud rate register and bitfields
- * rx: SPI RX data register
- * tx: SPI TX data register
+ * struct stm32_spi_regspec - stm32 registers definition, compatible dependent data
+ * @en: enable register and SPI enable bit
+ * @dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
+ * @dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
+ * @cpol: clock polarity register and polarity bit
+ * @cpha: clock phase register and phase bit
+ * @lsb_first: LSB transmitted first register and bit
+ * @br: baud rate register and bitfields
+ * @rx: SPI RX data register
+ * @tx: SPI TX data register
*/
struct stm32_spi_regspec {
const struct stm32_spi_reg en;
@@ -213,7 +213,7 @@ struct stm32_spi_regspec {
struct stm32_spi;
/**
- * stm32_spi_cfg - stm32 compatible configuration data
+ * struct stm32_spi_cfg - stm32 compatible configuration data
* @regs: registers descriptions
* @get_fifo_size: routine to get fifo size
* @get_bpw_mask: routine to get bits per word mask
@@ -223,13 +223,13 @@ struct stm32_spi;
* @set_mode: routine to configure registers to desired mode
* @set_data_idleness: optional routine to configure registers to desired idle
* time between frames (if driver has this functionality)
- * set_number_of_data: optional routine to configure registers to desired
+ * @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
* @can_dma: routine to determine if the transfer is eligible for DMA use
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
- * @dma_rx cb: routine to call after DMA RX channel operation is complete
- * @dma_tx cb: routine to call after DMA TX channel operation is complete
+ * @dma_rx_cb: routine to call after DMA RX channel operation is complete
+ * @dma_tx_cb: routine to call after DMA TX channel operation is complete
* @transfer_one_irq: routine to configure interrupts for driver
* @irq_handler_event: Interrupt handler for SPI controller events
* @irq_handler_thread: thread of interrupt handler for SPI controller
@@ -587,6 +587,7 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
/**
* stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
+ * @flush: boolean indicating that FIFO should be flushed
*
* Write in rx_buf depends on remaining bytes to avoid to write beyond
* rx_buf end.
@@ -756,6 +757,9 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
/**
* stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
+ * @master: controller master interface
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
*
* If driver has fifo and the current transfer size is greater than fifo size,
* use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
@@ -974,6 +978,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
+ * @master: controller master interface
+ * @msg: pointer to spi message
*/
static int stm32_spi_prepare_msg(struct spi_master *master,
struct spi_message *msg)
@@ -1026,6 +1032,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
/**
* stm32f4_spi_dma_tx_cb - dma callback
+ * @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA TX channel.
*/
@@ -1041,6 +1048,7 @@ static void stm32f4_spi_dma_tx_cb(void *data)
/**
* stm32f4_spi_dma_rx_cb - dma callback
+ * @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA RX channel.
*/
@@ -1054,6 +1062,7 @@ static void stm32f4_spi_dma_rx_cb(void *data)
/**
* stm32h7_spi_dma_cb - dma callback
+ * @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete or when an error
* occurs. If the transfer is complete, EOT flag is raised.
@@ -1079,6 +1088,9 @@ static void stm32h7_spi_dma_cb(void *data)
/**
* stm32_spi_dma_config - configure dma slave channel depending on current
* transfer bits_per_word.
+ * @spi: pointer to the spi controller data structure
+ * @dma_conf: pointer to the dma_slave_config structure
+ * @dir: direction of the dma transfer
*/
static void stm32_spi_dma_config(struct stm32_spi *spi,
struct dma_slave_config *dma_conf,
@@ -1126,6 +1138,7 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
/**
* stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
+ * @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1166,6 +1179,7 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
/**
* stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
+ * @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1207,6 +1221,7 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
/**
* stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
+ * @spi: pointer to the spi controller data structure
*/
static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
@@ -1227,6 +1242,7 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
/**
* stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
+ * @spi: pointer to the spi controller data structure
*/
static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
@@ -1243,6 +1259,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
/**
* stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
+ * @spi: pointer to the spi controller data structure
+ * @xfer: pointer to the spi_transfer structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1405,7 +1423,7 @@ static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
/**
* stm32_spi_communication_type - return transfer communication type
* @spi_dev: pointer to the spi device
- * transfer: pointer to spi transfer
+ * @transfer: pointer to spi transfer
*/
static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
struct spi_transfer *transfer)
@@ -1522,7 +1540,7 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
/**
* stm32h7_spi_number_of_data - configure number of data at current transfer
* @spi: pointer to the spi controller data structure
- * @len: transfer length
+ * @nb_words: transfer length (in words)
*/
static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
{
@@ -1546,6 +1564,9 @@ static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
* stm32_spi_transfer_one_setup - common setup to transfer a single
* spi_transfer either using DMA or
* interrupts.
+ * @spi: pointer to the spi controller data structure
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
*/
static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
struct spi_device *spi_dev,
@@ -1625,6 +1646,9 @@ out:
/**
* stm32_spi_transfer_one - transfer a single spi_transfer
+ * @master: controller master interface
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
*
* It must return 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1658,6 +1682,8 @@ static int stm32_spi_transfer_one(struct spi_master *master,
/**
* stm32_spi_unprepare_msg - relax the hardware
+ * @master: controller master interface
+ * @msg: pointer to the spi message
*/
static int stm32_spi_unprepare_msg(struct spi_master *master,
struct spi_message *msg)
@@ -1671,6 +1697,7 @@ static int stm32_spi_unprepare_msg(struct spi_master *master,
/**
* stm32f4_spi_config - Configure SPI controller as SPI master
+ * @spi: pointer to the spi controller data structure
*/
static int stm32f4_spi_config(struct stm32_spi *spi)
{
@@ -1701,6 +1728,7 @@ static int stm32f4_spi_config(struct stm32_spi *spi)
/**
* stm32h7_spi_config - Configure SPI controller as SPI master
+ * @spi: pointer to the spi controller data structure
*/
static int stm32h7_spi_config(struct stm32_spi *spi)
{
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 755221bc3745..c92c89467e7e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -510,6 +510,7 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->cs_gpio = -ENOENT;
+ spi->mode = ctlr->buswidth_override_bits;
spin_lock_init(&spi->statistics.lock);
@@ -1514,17 +1515,15 @@ void spi_take_timestamp_pre(struct spi_controller *ctlr,
if (!xfer->ptp_sts)
return;
- if (xfer->timestamped_pre)
+ if (xfer->timestamped)
return;
- if (progress < xfer->ptp_sts_word_pre)
+ if (progress > xfer->ptp_sts_word_pre)
return;
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_pre = progress;
- xfer->timestamped_pre = true;
-
if (irqs_off) {
local_irq_save(ctlr->irq_flags);
preempt_disable();
@@ -1553,7 +1552,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
if (!xfer->ptp_sts)
return;
- if (xfer->timestamped_post)
+ if (xfer->timestamped)
return;
if (progress < xfer->ptp_sts_word_post)
@@ -1569,7 +1568,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_post = progress;
- xfer->timestamped_post = true;
+ xfer->timestamped = true;
}
EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
@@ -1674,12 +1673,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
}
}
- if (unlikely(ctlr->ptp_sts_supported)) {
- list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
- WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
- WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
- }
- }
+ if (unlikely(ctlr->ptp_sts_supported))
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list)
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
spi_unmap_msg(ctlr, mesg);
@@ -1955,13 +1951,8 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi->mode |= SPI_CS_HIGH;
/* Device speed */
- rc = of_property_read_u32(nc, "spi-max-frequency", &value);
- if (rc) {
- dev_err(&ctlr->dev,
- "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
- return rc;
- }
- spi->max_speed_hz = value;
+ if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+ spi->max_speed_hz = value;
return 0;
}
@@ -2181,9 +2172,10 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
return AE_NO_MEMORY;
}
+
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
- spi->mode = lookup.mode;
+ spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
spi->chip_select = lookup.chip_select;
@@ -4034,7 +4026,7 @@ static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
struct device *dev;
dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
- return dev ? to_spi_device(dev) : NULL;
+ return to_spi_device(dev);
}
static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2ab6e782f14c..80dd1025b953 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -275,14 +275,14 @@ static int spidev_message(struct spidev_data *spidev,
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
" xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
- u_tmp->len,
- u_tmp->rx_buf ? "rx " : "",
- u_tmp->tx_buf ? "tx " : "",
- u_tmp->cs_change ? "cs " : "",
- u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
- u_tmp->delay_usecs,
- u_tmp->word_delay_usecs,
- u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
+ k_tmp->len,
+ k_tmp->rx_buf ? "rx " : "",
+ k_tmp->tx_buf ? "tx " : "",
+ k_tmp->cs_change ? "cs " : "",
+ k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
+ k_tmp->delay.value,
+ k_tmp->word_delay.value,
+ k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
}
@@ -454,10 +454,11 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
spi->max_speed_hz = tmp;
retval = spi_setup(spi);
- if (retval >= 0)
+ if (retval == 0) {
spidev->speed_hz = tmp;
- else
- dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
+ dev_dbg(&spi->dev, "%d Hz (max)\n",
+ spidev->speed_hz);
+ }
spi->max_speed_hz = save;
}
break;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 4f028a80d6c4..52d2e0f33be7 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -26,9 +26,9 @@ static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len,
int i, pos = 0;
for (i = 0; i < sprom_size_words; i++)
- pos += snprintf(buf + pos, buf_len - pos - 1,
+ pos += scnprintf(buf + pos, buf_len - pos - 1,
"%04X", swab16(sprom[i]) & 0xFFFF);
- pos += snprintf(buf + pos, buf_len - pos - 1, "\n");
+ pos += scnprintf(buf + pos, buf_len - pos - 1, "\n");
return pos + 1;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index baccd7c883cc..a9939ff9490e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -42,6 +42,10 @@ source "drivers/staging/rtl8188eu/Kconfig"
source "drivers/staging/rts5208/Kconfig"
+source "drivers/staging/octeon/Kconfig"
+
+source "drivers/staging/octeon-usb/Kconfig"
+
source "drivers/staging/vt6655/Kconfig"
source "drivers/staging/vt6656/Kconfig"
@@ -112,15 +116,8 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
-source "drivers/staging/wusbcore/Kconfig"
-source "drivers/staging/uwb/Kconfig"
-
-source "drivers/staging/exfat/Kconfig"
-
source "drivers/staging/qlge/Kconfig"
-source "drivers/staging/hp/Kconfig"
-
source "drivers/staging/wfx/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index fdd03fd6e704..4d34198151b3 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
+obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
+obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
@@ -46,9 +48,5 @@ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
-obj-$(CONFIG_UWB) += uwb/
-obj-$(CONFIG_USB_WUSB) += wusbcore/
-obj-$(CONFIG_STAGING_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
-obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 08d1bbbebf2d..e84b4fb493d6 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2725,8 +2725,10 @@ static int comedi_open(struct inode *inode, struct file *file)
}
cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
- if (!cfp)
+ if (!cfp) {
+ comedi_dev_put(dev);
return -ENOMEM;
+ }
cfp->dev = dev;
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index 83026ba63d1c..78a7c1b3448a 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
int ret;
for (i = 0; i < insn->n; i++) {
+ /* FIXME: lo bit 0 chooses voltage output or current output */
lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
hi = (data[i] & 0xff0) >> 4;
@@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
if (ret)
return ret;
+ outb(hi, dev->iobase + DT2815_DATA);
+
devpriv->ao_readback[chan] = data[i];
}
return i;
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index e15e33ed94ae..89dc84d3c803 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -484,14 +484,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
s->async->events |= COMEDI_CB_EOA;
return;
}
-#if 0
- /* clear the dual dma flag, making this the last dma segment */
- /* XXX probably wrong */
- if (!devpriv->ntrig) {
- devpriv->supcsr &= ~DT2821_SUPCSR_DDMA;
- outw(devpriv->supcsr, dev->iobase + DT2821_SUPCSR_REG);
- }
-#endif
+
/* restart the channel */
dt282x_prep_ai_dma(dev, dma->cur_dma, 0);
@@ -534,28 +527,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
s_ao->async->events |= COMEDI_CB_ERROR;
handled = 1;
}
-#if 0
- if (adcsr & DT2821_ADCSR_ADDONE) {
- unsigned short data;
-
- data = inw(dev->iobase + DT2821_ADDAT_REG);
- data &= s->maxdata;
- if (devpriv->ad_2scomp)
- data = comedi_offset_munge(s, data);
- comedi_buf_write_samples(s, &data, 1);
-
- devpriv->nread--;
- if (!devpriv->nread) {
- s->async->events |= COMEDI_CB_EOA;
- } else {
- if (supcsr & DT2821_SUPCSR_SCDN)
- outw(devpriv->supcsr | DT2821_SUPCSR_STRIG,
- dev->iobase + DT2821_SUPCSR_REG);
- }
- handled = 1;
- }
-#endif
comedi_handle_events(dev, s);
if (s_ao)
comedi_handle_events(dev, s_ao);
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index f7c365b70106..011e19161b78 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -439,9 +439,8 @@ static int dt3k_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- arg);
+ err |= comedi_check_trigger_arg_min(
+ &cmd->scan_begin_arg, arg);
}
}
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 4ee9b260eab0..75d5c9c24596 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1035,7 +1035,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
ni_660x_init_tio_chips(dev, board->n_chips);
/* prepare the device for globally-named routes. */
- if (ni_assign_device_routes("ni_660x", board->name,
+ if (ni_assign_device_routes("ni_660x", board->name, NULL,
&devpriv->routing_tables) < 0) {
dev_warn(dev->class_dev, "%s: %s device has no signal routing table.\n",
__func__, board->name);
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index 68ad9676f962..0bca7d752015 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -266,19 +266,9 @@ static int atmio16d_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_FOLLOW) {
/* internal trigger */
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
- } else {
-#if 0
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-#endif
}
err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 10000);
-#if 0
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER);
-#endif
-
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index 406952f5521d..ce0f85026277 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -560,14 +560,13 @@ static int labpc_ai_cmdtest(struct comedi_device *dev,
/* make sure scan timing is not too fast */
if (cmd->scan_begin_src == TRIG_TIMER) {
if (cmd->convert_src == TRIG_TIMER) {
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- cmd->convert_arg *
- cmd->chanlist_len);
+ err |= comedi_check_trigger_arg_min(
+ &cmd->scan_begin_arg,
+ cmd->convert_arg * cmd->chanlist_len);
}
- err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
- board->ai_speed *
- cmd->chanlist_len);
+ err |= comedi_check_trigger_arg_min(
+ &cmd->scan_begin_arg,
+ board->ai_speed * cmd->chanlist_len);
}
switch (cmd->stop_src) {
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index f98e3ae27bff..d99f4065b96d 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2199,9 +2199,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
case TRIG_EXT:
ai_trig |= NISTC_AI_TRIG_START1_SEL(
- ni_get_reg_value_roffs(CR_CHAN(cmd->start_arg),
- NI_AI_StartTrigger,
- &devpriv->routing_tables, 1));
+ ni_get_reg_value_roffs(
+ CR_CHAN(cmd->start_arg),
+ NI_AI_StartTrigger,
+ &devpriv->routing_tables, 1));
if (cmd->start_arg & CR_INVERT)
ai_trig |= NISTC_AI_TRIG_START1_POLARITY;
@@ -2311,10 +2312,12 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
(cmd->scan_begin_arg & ~CR_EDGE) !=
(cmd->convert_arg & ~CR_EDGE))
start_stop_select |= NISTC_AI_START_SYNC;
+
start_stop_select |= NISTC_AI_START_SEL(
- ni_get_reg_value_roffs(CR_CHAN(cmd->scan_begin_arg),
- NI_AI_SampleClock,
- &devpriv->routing_tables, 1));
+ ni_get_reg_value_roffs(
+ CR_CHAN(cmd->scan_begin_arg),
+ NI_AI_SampleClock,
+ &devpriv->routing_tables, 1));
ni_stc_writew(dev, start_stop_select, NISTC_AI_START_STOP_REG);
break;
}
@@ -2343,9 +2346,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
case TRIG_EXT:
mode1 |= NISTC_AI_MODE1_CONVERT_SRC(
- ni_get_reg_value_roffs(CR_CHAN(cmd->convert_arg),
- NI_AI_ConvertClock,
- &devpriv->routing_tables, 1));
+ ni_get_reg_value_roffs(
+ CR_CHAN(cmd->convert_arg),
+ NI_AI_ConvertClock,
+ &devpriv->routing_tables, 1));
if ((cmd->convert_arg & CR_INVERT) == 0)
mode1 |= NISTC_AI_MODE1_CONVERT_POLARITY;
ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG);
@@ -2970,9 +2974,11 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
NISTC_AO_TRIG_START1_SYNC;
} else { /* TRIG_EXT */
trigsel = NISTC_AO_TRIG_START1_SEL(
- ni_get_reg_value_roffs(CR_CHAN(cmd->start_arg),
- NI_AO_StartTrigger,
- &devpriv->routing_tables, 1));
+ ni_get_reg_value_roffs(
+ CR_CHAN(cmd->start_arg),
+ NI_AO_StartTrigger,
+ &devpriv->routing_tables, 1));
+
/* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
if (cmd->start_arg & CR_INVERT)
trigsel |= NISTC_AO_TRIG_START1_POLARITY;
@@ -3079,12 +3085,10 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
* zero out these bit fields to be set below. Does an ao-reset do this
* automatically?
*/
- devpriv->ao_mode1 &= ~(
- NISTC_AO_MODE1_UI_SRC_MASK |
- NISTC_AO_MODE1_UI_SRC_POLARITY |
- NISTC_AO_MODE1_UPDATE_SRC_MASK |
- NISTC_AO_MODE1_UPDATE_SRC_POLARITY
- );
+ devpriv->ao_mode1 &= ~(NISTC_AO_MODE1_UI_SRC_MASK |
+ NISTC_AO_MODE1_UI_SRC_POLARITY |
+ NISTC_AO_MODE1_UPDATE_SRC_MASK |
+ NISTC_AO_MODE1_UPDATE_SRC_POLARITY);
if (cmd->scan_begin_src == TRIG_TIMER) {
unsigned int trigvar;
@@ -3134,9 +3138,10 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC(
- ni_get_reg_value(CR_CHAN(cmd->scan_begin_arg),
- NI_AO_SampleClock,
- &devpriv->routing_tables));
+ ni_get_reg_value(
+ CR_CHAN(cmd->scan_begin_arg),
+ NI_AO_SampleClock,
+ &devpriv->routing_tables));
if (cmd->scan_begin_arg & CR_INVERT)
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
}
@@ -3673,9 +3678,10 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
cdo_mode_bits = NI_M_CDO_MODE_FIFO_MODE |
NI_M_CDO_MODE_HALT_ON_ERROR |
NI_M_CDO_MODE_SAMPLE_SRC(
- ni_get_reg_value(CR_CHAN(cmd->scan_begin_arg),
- NI_DO_SampleClock,
- &devpriv->routing_tables));
+ ni_get_reg_value(
+ CR_CHAN(cmd->scan_begin_arg),
+ NI_DO_SampleClock,
+ &devpriv->routing_tables));
if (cmd->scan_begin_arg & CR_INVERT)
cdo_mode_bits |= NI_M_CDO_MODE_POLARITY;
ni_writel(dev, cdo_mode_bits, NI_M_CDO_MODE_REG);
@@ -5975,6 +5981,7 @@ static int ni_E_init(struct comedi_device *dev,
/* prepare the device for globally-named routes. */
if (ni_assign_device_routes(dev_family, board->name,
+ board->alt_route_name,
&devpriv->routing_tables) < 0) {
dev_warn(dev->class_dev, "%s: %s device has no signal routing table.\n",
__func__, board->name);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 14b26fffe049..7c82d5f9778f 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -888,6 +888,7 @@ static const struct ni_board_struct ni_boards[] = {
},
[BOARD_PCIE6251] = {
.name = "pcie-6251",
+ .alt_route_name = "pci-6251",
.n_adchan = 16,
.ai_maxdata = 0xffff,
.ai_fifo_depth = 4095,
@@ -976,6 +977,7 @@ static const struct ni_board_struct ni_boards[] = {
},
[BOARD_PCIE6259] = {
.name = "pcie-6259",
+ .alt_route_name = "pci-6259",
.n_adchan = 32,
.ai_maxdata = 0xffff,
.ai_fifo_depth = 4095,
diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c
index 8f398b30f5bf..07cb970340db 100644
--- a/drivers/staging/comedi/drivers/ni_routes.c
+++ b/drivers/staging/comedi/drivers/ni_routes.c
@@ -50,20 +50,13 @@
#define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)])
/*
- * Find the proper route_values and ni_device_routes tables for this particular
- * device.
- *
- * Return: -ENODATA if either was not found; 0 if both were found.
+ * Find the route values for a device family.
*/
-static int ni_find_device_routes(const char *device_family,
- const char *board_name,
- struct ni_route_tables *tables)
+static const u8 *ni_find_route_values(const char *device_family)
{
- const struct ni_device_routes *dr = NULL;
const u8 *rv = NULL;
int i;
- /* First, find the register_values table for this device family */
for (i = 0; ni_all_route_values[i]; ++i) {
if (memcmp(ni_all_route_values[i]->family, device_family,
strnlen(device_family, 30)) == 0) {
@@ -71,8 +64,18 @@ static int ni_find_device_routes(const char *device_family,
break;
}
}
+ return rv;
+}
+
+/*
+ * Find the valid routes for a board.
+ */
+static const struct ni_device_routes *
+ni_find_valid_routes(const char *board_name)
+{
+ const struct ni_device_routes *dr = NULL;
+ int i;
- /* Second, find the set of routes valid for this device. */
for (i = 0; ni_device_routes_list[i]; ++i) {
if (memcmp(ni_device_routes_list[i]->device, board_name,
strnlen(board_name, 30)) == 0) {
@@ -80,6 +83,31 @@ static int ni_find_device_routes(const char *device_family,
break;
}
}
+ return dr;
+}
+
+/*
+ * Find the proper route_values and ni_device_routes tables for this particular
+ * device. Possibly try an alternate board name if device routes not found
+ * for the actual board name.
+ *
+ * Return: -ENODATA if either was not found; 0 if both were found.
+ */
+static int ni_find_device_routes(const char *device_family,
+ const char *board_name,
+ const char *alt_board_name,
+ struct ni_route_tables *tables)
+{
+ const struct ni_device_routes *dr;
+ const u8 *rv;
+
+ /* First, find the register_values table for this device family */
+ rv = ni_find_route_values(device_family);
+
+ /* Second, find the set of routes valid for this device. */
+ dr = ni_find_valid_routes(board_name);
+ if (!dr && alt_board_name)
+ dr = ni_find_valid_routes(alt_board_name);
tables->route_values = rv;
tables->valid_routes = dr;
@@ -93,15 +121,28 @@ static int ni_find_device_routes(const char *device_family,
/**
* ni_assign_device_routes() - Assign the proper lookup table for NI signal
* routing to the specified NI device.
+ * @device_family: Device family name (determines route values).
+ * @board_name: Board name (determines set of routes).
+ * @alt_board_name: Optional alternate board name to try on failure.
+ * @tables: Pointer to assigned routing information.
+ *
+ * Finds the route values for the device family and the set of valid routes
+ * for the board. If valid routes could not be found for the actual board
+ * name and an alternate board name has been specified, try that one.
+ *
+ * On failure, the assigned routing information may be partially filled
+ * (for example, with the route values but not the set of valid routes).
*
* Return: -ENODATA if assignment was not successful; 0 if successful.
*/
int ni_assign_device_routes(const char *device_family,
const char *board_name,
+ const char *alt_board_name,
struct ni_route_tables *tables)
{
memset(tables, 0, sizeof(struct ni_route_tables));
- return ni_find_device_routes(device_family, board_name, tables);
+ return ni_find_device_routes(device_family, board_name, alt_board_name,
+ tables);
}
EXPORT_SYMBOL_GPL(ni_assign_device_routes);
diff --git a/drivers/staging/comedi/drivers/ni_routes.h b/drivers/staging/comedi/drivers/ni_routes.h
index 3211a16adc6f..b7680fd2afe1 100644
--- a/drivers/staging/comedi/drivers/ni_routes.h
+++ b/drivers/staging/comedi/drivers/ni_routes.h
@@ -76,6 +76,7 @@ struct ni_route_tables {
*/
int ni_assign_device_routes(const char *device_family,
const char *board_name,
+ const char *alt_board_name,
struct ni_route_tables *tables);
/*
diff --git a/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore b/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore
index ef38008280a9..e3ebffcd900e 100644
--- a/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore
+++ b/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
comedi_h.py
*.pyc
ni_values.py
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 35427f8bf8f7..fbc0b753a0f5 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -941,6 +941,7 @@ enum ni_reg_type {
struct ni_board_struct {
const char *name;
+ const char *alt_route_name;
int device_id;
int isapnp_id;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index b264db32a411..f6154addaa95 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -1342,8 +1342,8 @@ static int ni_m_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
static inline unsigned int ni_tio_get_gate_mode(struct ni_gpct *counter)
{
- unsigned int mode = ni_tio_get_soft_copy(
- counter, NITIO_MODE_REG(counter->counter_index));
+ unsigned int mode = ni_tio_get_soft_copy(counter,
+ NITIO_MODE_REG(counter->counter_index));
unsigned int ret = 0;
if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED)
@@ -1358,8 +1358,8 @@ static inline unsigned int ni_tio_get_gate_mode(struct ni_gpct *counter)
static inline unsigned int ni_tio_get_gate2_mode(struct ni_gpct *counter)
{
- unsigned int mode = ni_tio_get_soft_copy(
- counter, NITIO_GATE2_REG(counter->counter_index));
+ unsigned int mode = ni_tio_get_soft_copy(counter,
+ NITIO_GATE2_REG(counter->counter_index));
unsigned int ret = 0;
if (!(mode & GI_GATE2_MODE))
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index bb400e08f0bc..8c04af09be2c 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -815,9 +815,8 @@ static int rtd_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= comedi_check_trigger_arg_min(&cmd->
- scan_begin_arg,
- arg);
+ err |= comedi_check_trigger_arg_min(
+ &cmd->scan_begin_arg, arg);
}
}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 39049d3c56d7..084a8e7b9fc2 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -1892,8 +1892,7 @@ static int s626_ai_cmdtest(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER) {
arg = cmd->convert_arg * cmd->scan_end_arg;
err |= comedi_check_trigger_arg_min(
- &cmd->scan_begin_arg,
- arg);
+ &cmd->scan_begin_arg, arg);
}
}
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
deleted file mode 100644
index 292a19dfcaf5..000000000000
--- a/drivers/staging/exfat/Kconfig
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config STAGING_EXFAT_FS
- tristate "exFAT fs support"
- depends on BLOCK
- select NLS
- help
- This adds support for the exFAT file system.
-
-config STAGING_EXFAT_DISCARD
- bool "enable discard support"
- depends on STAGING_EXFAT_FS
- default y
-
-config STAGING_EXFAT_DELAYED_SYNC
- bool "enable delayed sync"
- depends on STAGING_EXFAT_FS
- default n
-
-config STAGING_EXFAT_KERNEL_DEBUG
- bool "enable kernel debug features via ioctl"
- depends on STAGING_EXFAT_FS
- default n
-
-config STAGING_EXFAT_DEBUG_MSG
- bool "print debug messages"
- depends on STAGING_EXFAT_FS
- default n
-
-config STAGING_EXFAT_DEFAULT_CODEPAGE
- int "Default codepage for exFAT"
- default 437
- depends on STAGING_EXFAT_FS
- help
- This option should be set to the codepage of your exFAT filesystems.
-
-config STAGING_EXFAT_DEFAULT_IOCHARSET
- string "Default iocharset for exFAT"
- default "utf8"
- depends on STAGING_EXFAT_FS
- help
- Set this to the default input/output character set you'd like exFAT to use.
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
deleted file mode 100644
index 057556eeca0c..000000000000
--- a/drivers/staging/exfat/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-obj-$(CONFIG_STAGING_EXFAT_FS) += exfat.o
-
-exfat-y := exfat_core.o \
- exfat_super.o \
- exfat_blkdev.o \
- exfat_cache.o \
- exfat_nls.o \
- exfat_upcase.o
diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO
deleted file mode 100644
index a283ce534cf4..000000000000
--- a/drivers/staging/exfat/TODO
+++ /dev/null
@@ -1,69 +0,0 @@
-A laundry list of things that need looking at, most of which will
-require more work than the average checkpatch cleanup...
-
-Note that some of these entries may not be bugs - they're things
-that need to be looked at, and *possibly* fixed.
-
-Clean up the ffsCamelCase function names.
-
-Fix (thing)->flags to not use magic numbers - multiple offenders
-
-Sort out all the s32/u32/u8 nonsense - most of these should be plain int.
-
-exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse().
-same for ffsWriteFile.
-
-All the calls to fs_sync() need to be looked at, particularly in the
-context of EXFAT_DELAYED_SYNC. Currently, if that's defined, we only
-flush to disk when sync() gets called. We should be doing at least
-metadata flushes at appropriate times.
-
-ffsTruncateFile - if (old_size <= new_size) {
-That doesn't look right. How did it ever work? Are they relying on lazy
-block allocation when actual writes happen? If nothing else, it never
-does the 'fid->size = new_size' and do the inode update....
-
-ffsSetAttr() is just dangling in the breeze, not wired up at all...
-
-Convert global mutexes to a per-superblock mutex.
-
-Right now, we load exactly one UTF-8 table. Check to see
-if that plays nice with different codepage and iocharset values
-for simultanous mounts of different devices
-
-exfat_rmdir() checks for -EBUSY but ffsRemoveDir() doesn't return it.
-In fact, there's a complete lack of -EBUSY testing anywhere.
-
-There's probably a few missing checks for -EEXIST
-
-check return codes of sync_dirty_buffer()
-
-Why is remove_file doing a num_entries++??
-
-Double check a lot of can't-happen parameter checks (for null pointers for
-things that have only one call site and can't pass a null, etc).
-
-All the DEBUG stuff can probably be tossed, including the ioctl(). Either
-that, or convert to a proper fault-injection system.
-
-exfat_remount does exactly one thing. Fix to actually deal with remount
-options, particularly handling R/O correctly. For that matter, allow
-R/O mounts in the first place.
-
-Figure out why the VFAT code used multi_sector_(read|write) but the
-exfat code doesn't use it. The difference matters on SSDs with wear leveling.
-
-exfat_fat_sync(), exfat_buf_sync(), and sync_alloc_bitmap()
-aren't called anyplace....
-
-Create helper function for exfat_set_entry_time() and exfat_set_entry_type()
-because it's sort of ugly to be calling the same functionn directly and
-other code calling through the fs_func struc ponters...
-
-clean up the remaining vol_type checks, which are of two types:
-some are ?: operators with magic numbers, and the rest are places
-where we're doing stuff with '.' and '..'.
-
-Patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- Valdis Kletnieks <valdis.kletnieks@vt.edu>
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
deleted file mode 100644
index 4d87360fab35..000000000000
--- a/drivers/staging/exfat/exfat.h
+++ /dev/null
@@ -1,824 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
- */
-
-#ifndef _EXFAT_H
-#define _EXFAT_H
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- /* For Debugging Purpose */
- /* IOCTL code 'f' used by
- * - file systems typically #0~0x1F
- * - embedded terminal devices #128~
- * - exts for debugging purpose #99
- * number 100 and 101 is available now but has possible conflicts
- */
-#define EXFAT_IOC_GET_DEBUGFLAGS _IOR('f', 100, long)
-#define EXFAT_IOC_SET_DEBUGFLAGS _IOW('f', 101, long)
-
-#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01
-#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-
-#ifdef CONFIG_STAGING_EXFAT_DEBUG_MSG
-#define DEBUG 1
-#else
-#undef DEBUG
-#endif
-
-#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
-
-#define DENTRY_SIZE 32 /* dir entry size */
-#define DENTRY_SIZE_BITS 5
-
-/* PBR entries */
-#define PBR_SIGNATURE 0xAA55
-#define EXT_SIGNATURE 0xAA550000
-#define VOL_LABEL "NO NAME " /* size should be 11 */
-#define OEM_NAME "MSWIN4.1" /* size should be 8 */
-#define STR_FAT12 "FAT12 " /* size should be 8 */
-#define STR_FAT16 "FAT16 " /* size should be 8 */
-#define STR_FAT32 "FAT32 " /* size should be 8 */
-#define STR_EXFAT "EXFAT " /* size should be 8 */
-#define VOL_CLEAN 0x0000
-#define VOL_DIRTY 0x0002
-
-/* max number of clusters */
-#define FAT12_THRESHOLD 4087 /* 2^12 - 1 + 2 (clu 0 & 1) */
-#define FAT16_THRESHOLD 65527 /* 2^16 - 1 + 2 */
-#define FAT32_THRESHOLD 268435457 /* 2^28 - 1 + 2 */
-#define EXFAT_THRESHOLD 268435457 /* 2^28 - 1 + 2 */
-
-/* file types */
-#define TYPE_UNUSED 0x0000
-#define TYPE_DELETED 0x0001
-#define TYPE_INVALID 0x0002
-#define TYPE_CRITICAL_PRI 0x0100
-#define TYPE_BITMAP 0x0101
-#define TYPE_UPCASE 0x0102
-#define TYPE_VOLUME 0x0103
-#define TYPE_DIR 0x0104
-#define TYPE_FILE 0x011F
-#define TYPE_SYMLINK 0x015F
-#define TYPE_CRITICAL_SEC 0x0200
-#define TYPE_STREAM 0x0201
-#define TYPE_EXTEND 0x0202
-#define TYPE_ACL 0x0203
-#define TYPE_BENIGN_PRI 0x0400
-#define TYPE_GUID 0x0401
-#define TYPE_PADDING 0x0402
-#define TYPE_ACLTAB 0x0403
-#define TYPE_BENIGN_SEC 0x0800
-#define TYPE_ALL 0x0FFF
-
-/* time modes */
-#define TM_CREATE 0
-#define TM_MODIFY 1
-#define TM_ACCESS 2
-
-/* checksum types */
-#define CS_DIR_ENTRY 0
-#define CS_PBR_SECTOR 1
-#define CS_DEFAULT 2
-
-#define CLUSTER_16(x) ((u16)(x))
-#define CLUSTER_32(x) ((u32)(x))
-
-#define START_SECTOR(x) \
- ((((sector_t)((x) - 2)) << p_fs->sectors_per_clu_bits) + \
- p_fs->data_start_sector)
-
-#define IS_LAST_SECTOR_IN_CLUSTER(sec) \
- ((((sec) - p_fs->data_start_sector + 1) & \
- ((1 << p_fs->sectors_per_clu_bits) - 1)) == 0)
-
-#define GET_CLUSTER_FROM_SECTOR(sec) \
- ((u32)((((sec) - p_fs->data_start_sector) >> \
- p_fs->sectors_per_clu_bits) + 2))
-
-#define GET16(p_src) \
- (((u16)(p_src)[0]) | (((u16)(p_src)[1]) << 8))
-#define GET32(p_src) \
- (((u32)(p_src)[0]) | (((u32)(p_src)[1]) << 8) | \
- (((u32)(p_src)[2]) << 16) | (((u32)(p_src)[3]) << 24))
-#define GET64(p_src) \
- (((u64)(p_src)[0]) | (((u64)(p_src)[1]) << 8) | \
- (((u64)(p_src)[2]) << 16) | (((u64)(p_src)[3]) << 24) | \
- (((u64)(p_src)[4]) << 32) | (((u64)(p_src)[5]) << 40) | \
- (((u64)(p_src)[6]) << 48) | (((u64)(p_src)[7]) << 56))
-
-#define SET16(p_dst, src) \
- do { \
- (p_dst)[0] = (u8)(src); \
- (p_dst)[1] = (u8)(((u16)(src)) >> 8); \
- } while (0)
-#define SET32(p_dst, src) \
- do { \
- (p_dst)[0] = (u8)(src); \
- (p_dst)[1] = (u8)(((u32)(src)) >> 8); \
- (p_dst)[2] = (u8)(((u32)(src)) >> 16); \
- (p_dst)[3] = (u8)(((u32)(src)) >> 24); \
- } while (0)
-#define SET64(p_dst, src) \
- do { \
- (p_dst)[0] = (u8)(src); \
- (p_dst)[1] = (u8)(((u64)(src)) >> 8); \
- (p_dst)[2] = (u8)(((u64)(src)) >> 16); \
- (p_dst)[3] = (u8)(((u64)(src)) >> 24); \
- (p_dst)[4] = (u8)(((u64)(src)) >> 32); \
- (p_dst)[5] = (u8)(((u64)(src)) >> 40); \
- (p_dst)[6] = (u8)(((u64)(src)) >> 48); \
- (p_dst)[7] = (u8)(((u64)(src)) >> 56); \
- } while (0)
-
-#ifdef __LITTLE_ENDIAN
-#define GET16_A(p_src) (*((u16 *)(p_src)))
-#define GET32_A(p_src) (*((u32 *)(p_src)))
-#define GET64_A(p_src) (*((u64 *)(p_src)))
-#define SET16_A(p_dst, src) (*((u16 *)(p_dst)) = (u16)(src))
-#define SET32_A(p_dst, src) (*((u32 *)(p_dst)) = (u32)(src))
-#define SET64_A(p_dst, src) (*((u64 *)(p_dst)) = (u64)(src))
-#else /* BIG_ENDIAN */
-#define GET16_A(p_src) GET16(p_src)
-#define GET32_A(p_src) GET32(p_src)
-#define GET64_A(p_src) GET64(p_src)
-#define SET16_A(p_dst, src) SET16(p_dst, src)
-#define SET32_A(p_dst, src) SET32(p_dst, src)
-#define SET64_A(p_dst, src) SET64(p_dst, src)
-#endif
-
-/* cache size (in number of sectors) */
-/* (should be an exponential value of 2) */
-#define FAT_CACHE_SIZE 128
-#define FAT_CACHE_HASH_SIZE 64
-#define BUF_CACHE_SIZE 256
-#define BUF_CACHE_HASH_SIZE 64
-
-/* Upcase table macro */
-#define HIGH_INDEX_BIT (8)
-#define HIGH_INDEX_MASK (0xFF00)
-#define LOW_INDEX_BIT (16 - HIGH_INDEX_BIT)
-#define UTBL_ROW_COUNT BIT(LOW_INDEX_BIT)
-#define UTBL_COL_COUNT BIT(HIGH_INDEX_BIT)
-
-static inline u16 get_col_index(u16 i)
-{
- return i >> LOW_INDEX_BIT;
-}
-
-static inline u16 get_row_index(u16 i)
-{
- return i & ~HIGH_INDEX_MASK;
-}
-
-#define EXFAT_SUPER_MAGIC (0x2011BAB0L)
-#define EXFAT_ROOT_INO 1
-
-/* FAT types */
-#define FAT12 0x01 /* FAT12 */
-#define FAT16 0x0E /* Win95 FAT16 (LBA) */
-#define FAT32 0x0C /* Win95 FAT32 (LBA) */
-#define EXFAT 0x07 /* exFAT */
-
-/* file name lengths */
-#define MAX_CHARSET_SIZE 3 /* max size of multi-byte character */
-#define MAX_PATH_DEPTH 15 /* max depth of path name */
-#define MAX_NAME_LENGTH 256 /* max len of filename including NULL */
-#define MAX_PATH_LENGTH 260 /* max len of pathname including NULL */
-#define DOS_NAME_LENGTH 11 /* DOS filename length excluding NULL */
-#define DOS_PATH_LENGTH 80 /* DOS pathname length excluding NULL */
-
-/* file attributes */
-#define ATTR_NORMAL 0x0000
-#define ATTR_READONLY 0x0001
-#define ATTR_HIDDEN 0x0002
-#define ATTR_SYSTEM 0x0004
-#define ATTR_VOLUME 0x0008
-#define ATTR_SUBDIR 0x0010
-#define ATTR_ARCHIVE 0x0020
-#define ATTR_SYMLINK 0x0040
-#define ATTR_EXTEND 0x000F
-#define ATTR_RWMASK 0x007E
-
-/* file creation modes */
-#define FM_REGULAR 0x00
-#define FM_SYMLINK 0x40
-
-#define NUM_UPCASE 2918
-
-#define DOS_CUR_DIR_NAME ". "
-#define DOS_PAR_DIR_NAME ".. "
-
-#ifdef __LITTLE_ENDIAN
-#define UNI_CUR_DIR_NAME ".\0"
-#define UNI_PAR_DIR_NAME ".\0.\0"
-#else
-#define UNI_CUR_DIR_NAME "\0."
-#define UNI_PAR_DIR_NAME "\0.\0."
-#endif
-
-struct date_time_t {
- u16 Year;
- u16 Month;
- u16 Day;
- u16 Hour;
- u16 Minute;
- u16 Second;
- u16 MilliSecond;
-};
-
-struct part_info_t {
- u32 Offset; /* start sector number of the partition */
- u32 Size; /* in sectors */
-};
-
-struct dev_info_t {
- u32 SecSize; /* sector size in bytes */
- u32 DevSize; /* block device size in sectors */
-};
-
-struct vol_info_t {
- u32 FatType;
- u32 ClusterSize;
- u32 NumClusters;
- u32 FreeClusters;
- u32 UsedClusters;
-};
-
-/* directory structure */
-struct chain_t {
- u32 dir;
- s32 size;
- u8 flags;
-};
-
-struct file_id_t {
- struct chain_t dir;
- s32 entry;
- u32 type;
- u32 attr;
- u32 start_clu;
- u64 size;
- u8 flags;
- s64 rwoffset;
- s32 hint_last_off;
- u32 hint_last_clu;
-};
-
-struct dir_entry_t {
- char Name[MAX_NAME_LENGTH * MAX_CHARSET_SIZE];
-
- /* used only for FAT12/16/32, not used for exFAT */
- char ShortName[DOS_NAME_LENGTH + 2];
-
- u32 Attr;
- u64 Size;
- u32 NumSubdirs;
- struct date_time_t CreateTimestamp;
- struct date_time_t ModifyTimestamp;
- struct date_time_t AccessTimestamp;
-};
-
-struct timestamp_t {
- u16 sec; /* 0 ~ 59 */
- u16 min; /* 0 ~ 59 */
- u16 hour; /* 0 ~ 23 */
- u16 day; /* 1 ~ 31 */
- u16 mon; /* 1 ~ 12 */
- u16 year; /* 0 ~ 127 (since 1980) */
-};
-
-/* MS_DOS FAT partition boot record (512 bytes) */
-struct pbr_sector_t {
- u8 jmp_boot[3];
- u8 oem_name[8];
- u8 bpb[109];
- u8 boot_code[390];
- u8 signature[2];
-};
-
-/* MS-DOS FAT12/16 BIOS parameter block (51 bytes) */
-struct bpb16_t {
- u8 sector_size[2];
- u8 sectors_per_clu;
- u8 num_reserved[2];
- u8 num_fats;
- u8 num_root_entries[2];
- u8 num_sectors[2];
- u8 media_type;
- u8 num_fat_sectors[2];
- u8 sectors_in_track[2];
- u8 num_heads[2];
- u8 num_hid_sectors[4];
- u8 num_huge_sectors[4];
-
- u8 phy_drv_no;
- u8 reserved;
- u8 ext_signature;
- u8 vol_serial[4];
- u8 vol_label[11];
- u8 vol_type[8];
-};
-
-/* MS-DOS FAT32 BIOS parameter block (79 bytes) */
-struct bpb32_t {
- u8 sector_size[2];
- u8 sectors_per_clu;
- u8 num_reserved[2];
- u8 num_fats;
- u8 num_root_entries[2];
- u8 num_sectors[2];
- u8 media_type;
- u8 num_fat_sectors[2];
- u8 sectors_in_track[2];
- u8 num_heads[2];
- u8 num_hid_sectors[4];
- u8 num_huge_sectors[4];
- u8 num_fat32_sectors[4];
- u8 ext_flags[2];
- u8 fs_version[2];
- u8 root_cluster[4];
- u8 fsinfo_sector[2];
- u8 backup_sector[2];
- u8 reserved[12];
-
- u8 phy_drv_no;
- u8 ext_reserved;
- u8 ext_signature;
- u8 vol_serial[4];
- u8 vol_label[11];
- u8 vol_type[8];
-};
-
-/* MS-DOS EXFAT BIOS parameter block (109 bytes) */
-struct bpbex_t {
- u8 reserved1[53];
- u8 vol_offset[8];
- u8 vol_length[8];
- u8 fat_offset[4];
- u8 fat_length[4];
- u8 clu_offset[4];
- u8 clu_count[4];
- u8 root_cluster[4];
- u8 vol_serial[4];
- u8 fs_version[2];
- u8 vol_flags[2];
- u8 sector_size_bits;
- u8 sectors_per_clu_bits;
- u8 num_fats;
- u8 phy_drv_no;
- u8 perc_in_use;
- u8 reserved2[7];
-};
-
-/* MS-DOS FAT file system information sector (512 bytes) */
-struct fsi_sector_t {
- u8 signature1[4];
- u8 reserved1[480];
- u8 signature2[4];
- u8 free_cluster[4];
- u8 next_cluster[4];
- u8 reserved2[14];
- u8 signature3[2];
-};
-
-/* MS-DOS FAT directory entry (32 bytes) */
-struct dentry_t {
- u8 dummy[32];
-};
-
-struct dos_dentry_t {
- u8 name[DOS_NAME_LENGTH];
- u8 attr;
- u8 lcase;
- u8 create_time_ms;
- u8 create_time[2];
- u8 create_date[2];
- u8 access_date[2];
- u8 start_clu_hi[2];
- u8 modify_time[2];
- u8 modify_date[2];
- u8 start_clu_lo[2];
- u8 size[4];
-};
-
-/* MS-DOS FAT extended directory entry (32 bytes) */
-struct ext_dentry_t {
- u8 order;
- u8 unicode_0_4[10];
- u8 attr;
- u8 sysid;
- u8 checksum;
- u8 unicode_5_10[12];
- u8 start_clu[2];
- u8 unicode_11_12[4];
-};
-
-/* MS-DOS EXFAT file directory entry (32 bytes) */
-struct file_dentry_t {
- u8 type;
- u8 num_ext;
- u8 checksum[2];
- u8 attr[2];
- u8 reserved1[2];
- u8 create_time[2];
- u8 create_date[2];
- u8 modify_time[2];
- u8 modify_date[2];
- u8 access_time[2];
- u8 access_date[2];
- u8 create_time_ms;
- u8 modify_time_ms;
- u8 access_time_ms;
- u8 reserved2[9];
-};
-
-/* MS-DOS EXFAT stream extension directory entry (32 bytes) */
-struct strm_dentry_t {
- u8 type;
- u8 flags;
- u8 reserved1;
- u8 name_len;
- u8 name_hash[2];
- u8 reserved2[2];
- u8 valid_size[8];
- u8 reserved3[4];
- u8 start_clu[4];
- u8 size[8];
-};
-
-/* MS-DOS EXFAT file name directory entry (32 bytes) */
-struct name_dentry_t {
- u8 type;
- u8 flags;
- u8 unicode_0_14[30];
-};
-
-/* MS-DOS EXFAT allocation bitmap directory entry (32 bytes) */
-struct bmap_dentry_t {
- u8 type;
- u8 flags;
- u8 reserved[18];
- u8 start_clu[4];
- u8 size[8];
-};
-
-/* MS-DOS EXFAT up-case table directory entry (32 bytes) */
-struct case_dentry_t {
- u8 type;
- u8 reserved1[3];
- u8 checksum[4];
- u8 reserved2[12];
- u8 start_clu[4];
- u8 size[8];
-};
-
-/* MS-DOS EXFAT volume label directory entry (32 bytes) */
-struct volm_dentry_t {
- u8 type;
- u8 label_len;
- u8 unicode_0_10[22];
- u8 reserved[8];
-};
-
-/* unused entry hint information */
-struct uentry_t {
- u32 dir;
- s32 entry;
- struct chain_t clu;
-};
-
-/* DOS name structure */
-struct dos_name_t {
- u8 name[DOS_NAME_LENGTH];
- u8 name_case;
-};
-
-/* unicode name structure */
-struct uni_name_t {
- u16 name[MAX_NAME_LENGTH];
- u16 name_hash;
- u8 name_len;
-};
-
-struct buf_cache_t {
- struct buf_cache_t *next;
- struct buf_cache_t *prev;
- struct buf_cache_t *hash_next;
- struct buf_cache_t *hash_prev;
- s32 drv;
- sector_t sec;
- u32 flag;
- struct buffer_head *buf_bh;
-};
-
-struct fs_info_t {
- u32 drv; /* drive ID */
- u32 vol_type; /* volume FAT type */
- u32 vol_id; /* volume serial number */
-
- u64 num_sectors; /* num of sectors in volume */
- u32 num_clusters; /* num of clusters in volume */
- u32 cluster_size; /* cluster size in bytes */
- u32 cluster_size_bits;
- u32 sectors_per_clu; /* cluster size in sectors */
- u32 sectors_per_clu_bits;
-
- u32 PBR_sector; /* PBR sector */
- u32 FAT1_start_sector; /* FAT1 start sector */
- u32 FAT2_start_sector; /* FAT2 start sector */
- u32 root_start_sector; /* root dir start sector */
- u32 data_start_sector; /* data area start sector */
- u32 num_FAT_sectors; /* num of FAT sectors */
-
- u32 root_dir; /* root dir cluster */
- u32 dentries_in_root; /* num of dentries in root dir */
- u32 dentries_per_clu; /* num of dentries per cluster */
-
- u32 vol_flag; /* volume dirty flag */
- struct buffer_head *pbr_bh; /* PBR sector */
-
- u32 map_clu; /* allocation bitmap start cluster */
- u32 map_sectors; /* num of allocation bitmap sectors */
- struct buffer_head **vol_amap; /* allocation bitmap */
-
- u16 **vol_utbl; /* upcase table */
-
- u32 clu_srch_ptr; /* cluster search pointer */
- u32 used_clusters; /* number of used clusters */
- struct uentry_t hint_uentry; /* unused entry hint information */
-
- u32 dev_ejected; /* block device operation error flag */
-
- struct mutex v_mutex;
-
- /* FAT cache */
- struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE];
- struct buf_cache_t FAT_cache_lru_list;
- struct buf_cache_t FAT_cache_hash_list[FAT_CACHE_HASH_SIZE];
-
- /* buf cache */
- struct buf_cache_t buf_cache_array[BUF_CACHE_SIZE];
- struct buf_cache_t buf_cache_lru_list;
- struct buf_cache_t buf_cache_hash_list[BUF_CACHE_HASH_SIZE];
-};
-
-#define ES_2_ENTRIES 2
-#define ES_3_ENTRIES 3
-#define ES_ALL_ENTRIES 0
-
-struct entry_set_cache_t {
- /* sector number that contains file_entry */
- sector_t sector;
-
- /* byte offset in the sector */
- s32 offset;
-
- /*
- * flag in stream entry.
- * 01 for cluster chain,
- * 03 for contig. clusteres.
- */
- s32 alloc_flag;
-
- u32 num_entries;
-
- /* __buf should be the last member */
- void *__buf;
-};
-
-#define EXFAT_ERRORS_CONT 1 /* ignore error and continue */
-#define EXFAT_ERRORS_PANIC 2 /* panic on error */
-#define EXFAT_ERRORS_RO 3 /* remount r/o on error */
-
-/* ioctl command */
-#define EXFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32)
-
-struct exfat_mount_options {
- kuid_t fs_uid;
- kgid_t fs_gid;
- unsigned short fs_fmask;
- unsigned short fs_dmask;
-
- /* permission for setting the [am]time */
- unsigned short allow_utime;
-
- /* codepage for shortname conversions */
- unsigned short codepage;
-
- /* charset for filename input/display */
- char *iocharset;
-
- unsigned char casesensitive;
-
- /* on error: continue, panic, remount-ro */
- unsigned char errors;
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- /* flag on if -o dicard specified and device support discard() */
- unsigned char discard;
-#endif /* CONFIG_STAGING_EXFAT_DISCARD */
-};
-
-#define EXFAT_HASH_BITS 8
-#define EXFAT_HASH_SIZE BIT(EXFAT_HASH_BITS)
-
-/*
- * EXFAT file system in-core superblock data
- */
-struct bd_info_t {
- s32 sector_size; /* in bytes */
- s32 sector_size_bits;
- s32 sector_size_mask;
-
- /* total number of sectors in this block device */
- s32 num_sectors;
-
- /* opened or not */
- bool opened;
-};
-
-struct exfat_sb_info {
- struct fs_info_t fs_info;
- struct bd_info_t bd_info;
-
- struct exfat_mount_options options;
-
- int s_dirt;
- struct mutex s_lock;
- struct nls_table *nls_disk; /* Codepage used on disk */
- struct nls_table *nls_io; /* Charset used for input and display */
-
- struct inode *fat_inode;
-
- spinlock_t inode_hash_lock;
- struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- long debug_flags;
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-};
-
-/*
- * EXFAT file system inode data in memory
- */
-struct exfat_inode_info {
- struct file_id_t fid;
- char *target;
- /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */
- loff_t mmu_private; /* physically allocated size */
- loff_t i_pos; /* on-disk position of directory entry or 0 */
- struct hlist_node i_hash_fat; /* hash by i_location */
- struct rw_semaphore truncate_lock;
- struct inode vfs_inode;
- struct rw_semaphore i_alloc_sem; /* protect bmap against truncate */
-};
-
-#define EXFAT_SB(sb) ((struct exfat_sb_info *)((sb)->s_fs_info))
-
-static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
-{
- return container_of(inode, struct exfat_inode_info, vfs_inode);
-}
-
-/* NLS management function */
-u16 nls_upper(struct super_block *sb, u16 a);
-int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b);
-void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
- struct uni_name_t *p_uniname);
-void nls_cstring_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname, u8 *p_cstring,
- bool *p_lossy);
-
-/* buffer cache management */
-void exfat_buf_init(struct super_block *sb);
-void exfat_buf_shutdown(struct super_block *sb);
-int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content);
-s32 exfat_fat_write(struct super_block *sb, u32 loc, u32 content);
-u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec);
-void exfat_fat_modify(struct super_block *sb, sector_t sec);
-void exfat_fat_release_all(struct super_block *sb);
-void exfat_fat_sync(struct super_block *sb);
-u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec);
-void exfat_buf_modify(struct super_block *sb, sector_t sec);
-void exfat_buf_lock(struct super_block *sb, sector_t sec);
-void exfat_buf_unlock(struct super_block *sb, sector_t sec);
-void exfat_buf_release(struct super_block *sb, sector_t sec);
-void exfat_buf_release_all(struct super_block *sb);
-void exfat_buf_sync(struct super_block *sb);
-
-/* fs management functions */
-void fs_set_vol_flags(struct super_block *sb, u32 new_flag);
-void fs_error(struct super_block *sb);
-
-/* cluster management functions */
-s32 count_num_clusters(struct super_block *sb, struct chain_t *dir);
-void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len);
-
-/* allocation bitmap management functions */
-s32 load_alloc_bitmap(struct super_block *sb);
-void free_alloc_bitmap(struct super_block *sb);
-void sync_alloc_bitmap(struct super_block *sb);
-
-/* upcase table management functions */
-s32 load_upcase_table(struct super_block *sb);
-void free_upcase_table(struct super_block *sb);
-
-/* dir entry management functions */
-struct timestamp_t *tm_current(struct timestamp_t *tm);
-
-struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, sector_t *sector);
-struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u32 type,
- struct dentry_t **file_ep);
-void release_entry_set(struct entry_set_cache_t *es);
-s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
- u32 type);
-void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
- s32 entry);
-void update_dir_checksum_with_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es);
-bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir);
-
-/* name conversion functions */
-s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 *entries,
- struct dos_name_t *p_dosname);
-u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type);
-
-/* name resolution functions */
-s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
- struct uni_name_t *p_uniname);
-
-/* file operation functions */
-s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
-s32 create_dir(struct inode *inode, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, struct file_id_t *fid);
-s32 create_file(struct inode *inode, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid);
-void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry);
-s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry,
- struct uni_name_t *p_uniname, struct file_id_t *fid);
-s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
- struct chain_t *p_newdir, struct uni_name_t *p_uniname,
- struct file_id_t *fid);
-
-/* sector read/write functions */
-int sector_read(struct super_block *sb, sector_t sec,
- struct buffer_head **bh, bool read);
-int sector_write(struct super_block *sb, sector_t sec,
- struct buffer_head *bh, bool sync);
-int multi_sector_read(struct super_block *sb, sector_t sec,
- struct buffer_head **bh, s32 num_secs, bool read);
-int multi_sector_write(struct super_block *sb, sector_t sec,
- struct buffer_head *bh, s32 num_secs, bool sync);
-
-void exfat_bdev_open(struct super_block *sb);
-void exfat_bdev_close(struct super_block *sb);
-int exfat_bdev_read(struct super_block *sb, sector_t secno,
- struct buffer_head **bh, u32 num_secs, bool read);
-int exfat_bdev_write(struct super_block *sb, sector_t secno,
- struct buffer_head *bh, u32 num_secs, bool sync);
-int exfat_bdev_sync(struct super_block *sb);
-
-/* cluster operation functions */
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
-s32 exfat_count_used_clusters(struct super_block *sb);
-
-/* dir operation functions */
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries);
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry);
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
-
-/* dir entry getter/setter */
-u32 exfat_get_entry_type(struct dentry_t *p_entry);
-u32 exfat_get_entry_attr(struct dentry_t *p_entry);
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
-u8 exfat_get_entry_flag(struct dentry_t *p_entry);
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags);
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
-u64 exfat_get_entry_size(struct dentry_t *p_entry);
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-
-extern const u8 uni_upcase[];
-#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
deleted file mode 100644
index 0a3dc3568293..000000000000
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include "exfat.h"
-
-void exfat_bdev_open(struct super_block *sb)
-{
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bd->opened)
- return;
-
- p_bd->sector_size = bdev_logical_block_size(sb->s_bdev);
- p_bd->sector_size_bits = ilog2(p_bd->sector_size);
- p_bd->sector_size_mask = p_bd->sector_size - 1;
- p_bd->num_sectors = i_size_read(sb->s_bdev->bd_inode) >>
- p_bd->sector_size_bits;
- p_bd->opened = true;
-}
-
-void exfat_bdev_close(struct super_block *sb)
-{
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- p_bd->opened = false;
-}
-
-int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
- u32 num_secs, bool read)
-{
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- long flags = sbi->debug_flags;
-
- if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return -EIO;
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-
- if (!p_bd->opened)
- return -ENODEV;
-
- if (*bh)
- __brelse(*bh);
-
- if (read)
- *bh = __bread(sb->s_bdev, secno,
- num_secs << p_bd->sector_size_bits);
- else
- *bh = __getblk(sb->s_bdev, secno,
- num_secs << p_bd->sector_size_bits);
-
- if (*bh)
- return 0;
-
- WARN(!p_fs->dev_ejected,
- "[EXFAT] No bh, device seems wrong or to be ejected.\n");
-
- return -EIO;
-}
-
-int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
- u32 num_secs, bool sync)
-{
- s32 count;
- struct buffer_head *bh2;
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- long flags = sbi->debug_flags;
-
- if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return -EIO;
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-
- if (!p_bd->opened)
- return -ENODEV;
-
- if (secno == bh->b_blocknr) {
- lock_buffer(bh);
- set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
- unlock_buffer(bh);
- if (sync && (sync_dirty_buffer(bh) != 0))
- return -EIO;
- } else {
- count = num_secs << p_bd->sector_size_bits;
-
- bh2 = __getblk(sb->s_bdev, secno, count);
- if (!bh2)
- goto no_bh;
-
- lock_buffer(bh2);
- memcpy(bh2->b_data, bh->b_data, count);
- set_buffer_uptodate(bh2);
- mark_buffer_dirty(bh2);
- unlock_buffer(bh2);
- if (sync && (sync_dirty_buffer(bh2) != 0)) {
- __brelse(bh2);
- goto no_bh;
- }
- __brelse(bh2);
- }
-
- return 0;
-
-no_bh:
- WARN(!p_fs->dev_ejected,
- "[EXFAT] No bh, device seems wrong or to be ejected.\n");
-
- return -EIO;
-}
-
-int exfat_bdev_sync(struct super_block *sb)
-{
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- long flags = sbi->debug_flags;
-
- if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return -EIO;
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-
- if (!p_bd->opened)
- return -ENODEV;
-
- return sync_blockdev(sb->s_bdev);
-}
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
deleted file mode 100644
index 3fd5604058a9..000000000000
--- a/drivers/staging/exfat/exfat_cache.c
+++ /dev/null
@@ -1,555 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/mutex.h>
-#include "exfat.h"
-
-#define LOCKBIT 0x01
-#define DIRTYBIT 0x02
-
-/* Local variables */
-static DEFINE_MUTEX(f_mutex);
-static DEFINE_MUTEX(b_mutex);
-
-static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec)
-{
- s32 off;
- struct buf_cache_t *bp, *hp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- off = (sec +
- (sec >> p_fs->sectors_per_clu_bits)) & (FAT_CACHE_HASH_SIZE - 1);
-
- hp = &p_fs->FAT_cache_hash_list[off];
- for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) {
- if ((bp->drv == p_fs->drv) && (bp->sec == sec)) {
- WARN(!bp->buf_bh,
- "[EXFAT] FAT_cache has no bh. It will make system panic.\n");
-
- touch_buffer(bp->buf_bh);
- return bp;
- }
- }
- return NULL;
-}
-
-static void push_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list)
-{
- bp->next = list->next;
- bp->prev = list;
- list->next->prev = bp;
- list->next = bp;
-}
-
-static void push_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list)
-{
- bp->prev = list->prev;
- bp->next = list;
- list->prev->next = bp;
- list->prev = bp;
-}
-
-static void move_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list)
-{
- bp->prev->next = bp->next;
- bp->next->prev = bp->prev;
- push_to_mru(bp, list);
-}
-
-static void move_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list)
-{
- bp->prev->next = bp->next;
- bp->next->prev = bp->prev;
- push_to_lru(bp, list);
-}
-
-static struct buf_cache_t *FAT_cache_get(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- bp = p_fs->FAT_cache_lru_list.prev;
-
- move_to_mru(bp, &p_fs->FAT_cache_lru_list);
- return bp;
-}
-
-static void FAT_cache_insert_hash(struct super_block *sb,
- struct buf_cache_t *bp)
-{
- s32 off;
- struct buf_cache_t *hp;
- struct fs_info_t *p_fs;
-
- p_fs = &(EXFAT_SB(sb)->fs_info);
- off = (bp->sec +
- (bp->sec >> p_fs->sectors_per_clu_bits)) &
- (FAT_CACHE_HASH_SIZE - 1);
-
- hp = &p_fs->FAT_cache_hash_list[off];
- bp->hash_next = hp->hash_next;
- bp->hash_prev = hp;
- hp->hash_next->hash_prev = bp;
- hp->hash_next = bp;
-}
-
-static void FAT_cache_remove_hash(struct buf_cache_t *bp)
-{
- (bp->hash_prev)->hash_next = bp->hash_next;
- (bp->hash_next)->hash_prev = bp->hash_prev;
-}
-
-static void buf_cache_insert_hash(struct super_block *sb,
- struct buf_cache_t *bp)
-{
- s32 off;
- struct buf_cache_t *hp;
- struct fs_info_t *p_fs;
-
- p_fs = &(EXFAT_SB(sb)->fs_info);
- off = (bp->sec +
- (bp->sec >> p_fs->sectors_per_clu_bits)) &
- (BUF_CACHE_HASH_SIZE - 1);
-
- hp = &p_fs->buf_cache_hash_list[off];
- bp->hash_next = hp->hash_next;
- bp->hash_prev = hp;
- hp->hash_next->hash_prev = bp;
- hp->hash_next = bp;
-}
-
-static void buf_cache_remove_hash(struct buf_cache_t *bp)
-{
- (bp->hash_prev)->hash_next = bp->hash_next;
- (bp->hash_next)->hash_prev = bp->hash_prev;
-}
-
-void exfat_buf_init(struct super_block *sb)
-{
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- int i;
-
- /* LRU list */
- p_fs->FAT_cache_lru_list.next = &p_fs->FAT_cache_lru_list;
- p_fs->FAT_cache_lru_list.prev = &p_fs->FAT_cache_lru_list;
-
- for (i = 0; i < FAT_CACHE_SIZE; i++) {
- p_fs->FAT_cache_array[i].drv = -1;
- p_fs->FAT_cache_array[i].sec = ~0;
- p_fs->FAT_cache_array[i].flag = 0;
- p_fs->FAT_cache_array[i].buf_bh = NULL;
- p_fs->FAT_cache_array[i].prev = NULL;
- p_fs->FAT_cache_array[i].next = NULL;
- push_to_mru(&p_fs->FAT_cache_array[i],
- &p_fs->FAT_cache_lru_list);
- }
-
- p_fs->buf_cache_lru_list.next = &p_fs->buf_cache_lru_list;
- p_fs->buf_cache_lru_list.prev = &p_fs->buf_cache_lru_list;
-
- for (i = 0; i < BUF_CACHE_SIZE; i++) {
- p_fs->buf_cache_array[i].drv = -1;
- p_fs->buf_cache_array[i].sec = ~0;
- p_fs->buf_cache_array[i].flag = 0;
- p_fs->buf_cache_array[i].buf_bh = NULL;
- p_fs->buf_cache_array[i].prev = NULL;
- p_fs->buf_cache_array[i].next = NULL;
- push_to_mru(&p_fs->buf_cache_array[i],
- &p_fs->buf_cache_lru_list);
- }
-
- /* HASH list */
- for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) {
- p_fs->FAT_cache_hash_list[i].drv = -1;
- p_fs->FAT_cache_hash_list[i].sec = ~0;
- p_fs->FAT_cache_hash_list[i].hash_next =
- &p_fs->FAT_cache_hash_list[i];
- p_fs->FAT_cache_hash_list[i].hash_prev =
- &p_fs->FAT_cache_hash_list[i];
- }
-
- for (i = 0; i < FAT_CACHE_SIZE; i++)
- FAT_cache_insert_hash(sb, &p_fs->FAT_cache_array[i]);
-
- for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) {
- p_fs->buf_cache_hash_list[i].drv = -1;
- p_fs->buf_cache_hash_list[i].sec = ~0;
- p_fs->buf_cache_hash_list[i].hash_next =
- &p_fs->buf_cache_hash_list[i];
- p_fs->buf_cache_hash_list[i].hash_prev =
- &p_fs->buf_cache_hash_list[i];
- }
-
- for (i = 0; i < BUF_CACHE_SIZE; i++)
- buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]);
-}
-
-void exfat_buf_shutdown(struct super_block *sb)
-{
-}
-
-static int __exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
-{
- s32 off;
- u32 _content;
- sector_t sec;
- u8 *fat_sector, *fat_entry;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = exfat_fat_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
- _content = GET32_A(fat_entry);
-
- if (_content >= CLUSTER_32(0xFFFFFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
-}
-
-/* in : sb, loc
- * out: content
- * returns 0 on success
- * -1 on error
- */
-int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
-{
- s32 ret;
-
- mutex_lock(&f_mutex);
- ret = __exfat_fat_read(sb, loc, content);
- mutex_unlock(&f_mutex);
-
- return ret;
-}
-
-static s32 __exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
-{
- s32 off;
- sector_t sec;
- u8 *fat_sector, *fat_entry;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = exfat_fat_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- SET32_A(fat_entry, content);
-
- exfat_fat_modify(sb, sec);
- return 0;
-}
-
-int exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
-{
- s32 ret;
-
- mutex_lock(&f_mutex);
- ret = __exfat_fat_write(sb, loc, content);
- mutex_unlock(&f_mutex);
-
- return ret;
-}
-
-u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- bp = FAT_cache_find(sb, sec);
- if (bp) {
- move_to_mru(bp, &p_fs->FAT_cache_lru_list);
- return bp->buf_bh->b_data;
- }
-
- bp = FAT_cache_get(sb, sec);
-
- FAT_cache_remove_hash(bp);
-
- bp->drv = p_fs->drv;
- bp->sec = sec;
- bp->flag = 0;
-
- FAT_cache_insert_hash(sb, bp);
-
- if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
- FAT_cache_remove_hash(bp);
- bp->drv = -1;
- bp->sec = ~0;
- bp->flag = 0;
- bp->buf_bh = NULL;
-
- move_to_lru(bp, &p_fs->FAT_cache_lru_list);
- return NULL;
- }
-
- return bp->buf_bh->b_data;
-}
-
-void exfat_fat_modify(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
-
- bp = FAT_cache_find(sb, sec);
- if (bp)
- sector_write(sb, sec, bp->buf_bh, 0);
-}
-
-void exfat_fat_release_all(struct super_block *sb)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- mutex_lock(&f_mutex);
-
- bp = p_fs->FAT_cache_lru_list.next;
- while (bp != &p_fs->FAT_cache_lru_list) {
- if (bp->drv == p_fs->drv) {
- bp->drv = -1;
- bp->sec = ~0;
- bp->flag = 0;
-
- if (bp->buf_bh) {
- __brelse(bp->buf_bh);
- bp->buf_bh = NULL;
- }
- }
- bp = bp->next;
- }
-
- mutex_unlock(&f_mutex);
-}
-
-void exfat_fat_sync(struct super_block *sb)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- mutex_lock(&f_mutex);
-
- bp = p_fs->FAT_cache_lru_list.next;
- while (bp != &p_fs->FAT_cache_lru_list) {
- if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) {
- sync_dirty_buffer(bp->buf_bh);
- bp->flag &= ~(DIRTYBIT);
- }
- bp = bp->next;
- }
-
- mutex_unlock(&f_mutex);
-}
-
-static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec)
-{
- s32 off;
- struct buf_cache_t *bp, *hp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- off = (sec + (sec >> p_fs->sectors_per_clu_bits)) &
- (BUF_CACHE_HASH_SIZE - 1);
-
- hp = &p_fs->buf_cache_hash_list[off];
- for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) {
- if ((bp->drv == p_fs->drv) && (bp->sec == sec)) {
- touch_buffer(bp->buf_bh);
- return bp;
- }
- }
- return NULL;
-}
-
-static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- bp = p_fs->buf_cache_lru_list.prev;
- while (bp->flag & LOCKBIT)
- bp = bp->prev;
-
- move_to_mru(bp, &p_fs->buf_cache_lru_list);
- return bp;
-}
-
-static u8 *__exfat_buf_getblk(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- bp = buf_cache_find(sb, sec);
- if (bp) {
- move_to_mru(bp, &p_fs->buf_cache_lru_list);
- return bp->buf_bh->b_data;
- }
-
- bp = buf_cache_get(sb, sec);
-
- buf_cache_remove_hash(bp);
-
- bp->drv = p_fs->drv;
- bp->sec = sec;
- bp->flag = 0;
-
- buf_cache_insert_hash(sb, bp);
-
- if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
- buf_cache_remove_hash(bp);
- bp->drv = -1;
- bp->sec = ~0;
- bp->flag = 0;
- bp->buf_bh = NULL;
-
- move_to_lru(bp, &p_fs->buf_cache_lru_list);
- return NULL;
- }
-
- return bp->buf_bh->b_data;
-}
-
-u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec)
-{
- u8 *buf;
-
- mutex_lock(&b_mutex);
- buf = __exfat_buf_getblk(sb, sec);
- mutex_unlock(&b_mutex);
-
- return buf;
-}
-
-void exfat_buf_modify(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
-
- mutex_lock(&b_mutex);
-
- bp = buf_cache_find(sb, sec);
- if (likely(bp))
- sector_write(sb, sec, bp->buf_bh, 0);
-
- WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
- (unsigned long long)sec);
-
- mutex_unlock(&b_mutex);
-}
-
-void exfat_buf_lock(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
-
- mutex_lock(&b_mutex);
-
- bp = buf_cache_find(sb, sec);
- if (likely(bp))
- bp->flag |= LOCKBIT;
-
- WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
- (unsigned long long)sec);
-
- mutex_unlock(&b_mutex);
-}
-
-void exfat_buf_unlock(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
-
- mutex_lock(&b_mutex);
-
- bp = buf_cache_find(sb, sec);
- if (likely(bp))
- bp->flag &= ~(LOCKBIT);
-
- WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
- (unsigned long long)sec);
-
- mutex_unlock(&b_mutex);
-}
-
-void exfat_buf_release(struct super_block *sb, sector_t sec)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- mutex_lock(&b_mutex);
-
- bp = buf_cache_find(sb, sec);
- if (likely(bp)) {
- bp->drv = -1;
- bp->sec = ~0;
- bp->flag = 0;
-
- if (bp->buf_bh) {
- __brelse(bp->buf_bh);
- bp->buf_bh = NULL;
- }
-
- move_to_lru(bp, &p_fs->buf_cache_lru_list);
- }
-
- mutex_unlock(&b_mutex);
-}
-
-void exfat_buf_release_all(struct super_block *sb)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- mutex_lock(&b_mutex);
-
- bp = p_fs->buf_cache_lru_list.next;
- while (bp != &p_fs->buf_cache_lru_list) {
- if (bp->drv == p_fs->drv) {
- bp->drv = -1;
- bp->sec = ~0;
- bp->flag = 0;
-
- if (bp->buf_bh) {
- __brelse(bp->buf_bh);
- bp->buf_bh = NULL;
- }
- }
- bp = bp->next;
- }
-
- mutex_unlock(&b_mutex);
-}
-
-void exfat_buf_sync(struct super_block *sb)
-{
- struct buf_cache_t *bp;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- mutex_lock(&b_mutex);
-
- bp = p_fs->buf_cache_lru_list.next;
- while (bp != &p_fs->buf_cache_lru_list) {
- if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) {
- sync_dirty_buffer(bp->buf_bh);
- bp->flag &= ~(DIRTYBIT);
- }
- bp = bp->next;
- }
-
- mutex_unlock(&b_mutex);
-}
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
deleted file mode 100644
index 07b460d01334..000000000000
--- a/drivers/staging/exfat/exfat_core.c
+++ /dev/null
@@ -1,2582 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/mutex.h>
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-#include "exfat.h"
-
-static void __set_sb_dirty(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- sbi->s_dirt = 1;
-}
-
-static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE];
-
-static u8 free_bit[] = {
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */
- 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */
- 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 40 ~ 59 */
- 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 60 ~ 79 */
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, /* 80 ~ 99 */
- 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, /* 100 ~ 119 */
- 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 120 ~ 139 */
- 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, /* 140 ~ 159 */
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 160 ~ 179 */
- 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, /* 180 ~ 199 */
- 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 200 ~ 219 */
- 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 220 ~ 239 */
- 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /* 240 ~ 254 */
-};
-
-static u8 used_bit[] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, /* 0 ~ 19 */
- 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, /* 20 ~ 39 */
- 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, /* 40 ~ 59 */
- 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, /* 60 ~ 79 */
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, /* 80 ~ 99 */
- 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, /* 100 ~ 119 */
- 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, /* 120 ~ 139 */
- 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, /* 140 ~ 159 */
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, /* 160 ~ 179 */
- 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, /* 180 ~ 199 */
- 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, /* 200 ~ 219 */
- 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, /* 220 ~ 239 */
- 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /* 240 ~ 255 */
-};
-
-#define BITMAP_LOC(v) ((v) >> 3)
-#define BITMAP_SHIFT(v) ((v) & 0x07)
-
-static inline s32 exfat_bitmap_test(u8 *bitmap, int i)
-{
- u8 data;
-
- data = bitmap[BITMAP_LOC(i)];
- if ((data >> BITMAP_SHIFT(i)) & 0x01)
- return 1;
- return 0;
-}
-
-static inline void exfat_bitmap_set(u8 *bitmap, int i)
-{
- bitmap[BITMAP_LOC(i)] |= (0x01 << BITMAP_SHIFT(i));
-}
-
-static inline void exfat_bitmap_clear(u8 *bitmap, int i)
-{
- bitmap[BITMAP_LOC(i)] &= ~(0x01 << BITMAP_SHIFT(i));
-}
-
-/*
- * File System Management Functions
- */
-
-void fs_set_vol_flags(struct super_block *sb, u32 new_flag)
-{
- struct pbr_sector_t *p_pbr;
- struct bpbex_t *p_bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_fs->vol_flag == new_flag)
- return;
-
- p_fs->vol_flag = new_flag;
-
- if (!p_fs->pbr_bh) {
- if (sector_read(sb, p_fs->PBR_sector,
- &p_fs->pbr_bh, 1) != 0)
- return;
- }
-
- p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
- p_bpb = (struct bpbex_t *)p_pbr->bpb;
- SET16(p_bpb->vol_flags, (u16)new_flag);
-
- /* XXX duyoung
- * what can we do here? (cuz fs_set_vol_flags() is void)
- */
- if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
- else
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
-}
-
-void fs_error(struct super_block *sb)
-{
- struct exfat_mount_options *opts = &EXFAT_SB(sb)->options;
-
- if (opts->errors == EXFAT_ERRORS_PANIC) {
- panic("[EXFAT] Filesystem panic from previous error\n");
- } else if ((opts->errors == EXFAT_ERRORS_RO) && !sb_rdonly(sb)) {
- sb->s_flags |= SB_RDONLY;
- pr_err("[EXFAT] Filesystem has been set read-only\n");
- }
-}
-
-/*
- * Cluster Management Functions
- */
-
-static s32 clear_cluster(struct super_block *sb, u32 clu)
-{
- sector_t s, n;
- s32 ret = 0;
- struct buffer_head *tmp_bh = NULL;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (clu == CLUSTER_32(0)) { /* FAT16 root_dir */
- s = p_fs->root_start_sector;
- n = p_fs->data_start_sector;
- } else {
- s = START_SECTOR(clu);
- n = s + p_fs->sectors_per_clu;
- }
-
- for (; s < n; s++) {
- ret = sector_read(sb, s, &tmp_bh, 0);
- if (ret != 0)
- return ret;
-
- memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size);
- ret = sector_write(sb, s, tmp_bh, 0);
- if (ret != 0)
- break;
- }
-
- brelse(tmp_bh);
- return ret;
-}
-
-static s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-}
-
-static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_mount_options *opts = &sbi->options;
- int ret;
-#endif /* CONFIG_STAGING_EXFAT_DISCARD */
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
-
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- if (opts->discard) {
- ret = sb_issue_discard(sb, START_SECTOR(clu),
- (1 << p_fs->sectors_per_clu_bits),
- GFP_NOFS, 0);
- if (ret == -EOPNOTSUPP) {
- pr_warn("discard not supported by device, disabling");
- opts->discard = 0;
- } else {
- return ret;
- }
- }
-#endif /* CONFIG_STAGING_EXFAT_DISCARD */
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-}
-
-static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, map_i, map_b;
- u32 clu_base, clu_free;
- u8 k, clu_mask;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- clu_base = (clu & ~(0x7)) + 2;
- clu_mask = (1 << (clu - clu_base + 2)) - 1;
-
- map_i = clu >> (p_bd->sector_size_bits + 3);
- map_b = (clu >> 3) & p_bd->sector_size_mask;
-
- for (i = 2; i < p_fs->num_clusters; i += 8) {
- k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
- if (clu_mask > 0) {
- k |= clu_mask;
- clu_mask = 0;
- }
- if (k < 0xFF) {
- clu_free = clu_base + free_bit[k];
- if (clu_free < p_fs->num_clusters)
- return clu_free;
- }
- clu_base += 8;
-
- if (((++map_b) >= p_bd->sector_size) ||
- (clu_base >= p_fs->num_clusters)) {
- if ((++map_i) >= p_fs->map_sectors) {
- clu_base = 2;
- map_i = 0;
- }
- map_b = 0;
- }
- }
-
- return CLUSTER_32(~0);
-}
-
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain)
-{
- s32 num_clusters = 0;
- u32 hint_clu, new_clu, last_clu = CLUSTER_32(~0);
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- hint_clu = p_chain->dir;
- if (hint_clu == CLUSTER_32(~0)) {
- hint_clu = test_alloc_bitmap(sb, p_fs->clu_srch_ptr - 2);
- if (hint_clu == CLUSTER_32(~0))
- return 0;
- } else if (hint_clu >= p_fs->num_clusters) {
- hint_clu = 2;
- p_chain->flags = 0x01;
- }
-
- __set_sb_dirty(sb);
-
- p_chain->dir = CLUSTER_32(~0);
-
- while ((new_clu = test_alloc_bitmap(sb, hint_clu - 2)) != CLUSTER_32(~0)) {
- if (new_clu != hint_clu) {
- if (p_chain->flags == 0x03) {
- exfat_chain_cont_cluster(sb, p_chain->dir,
- num_clusters);
- p_chain->flags = 0x01;
- }
- }
-
- if (set_alloc_bitmap(sb, new_clu - 2) != 0)
- return -EIO;
-
- num_clusters++;
-
- if (p_chain->flags == 0x01) {
- if (exfat_fat_write(sb, new_clu, CLUSTER_32(~0)) < 0)
- return -EIO;
- }
-
- if (p_chain->dir == CLUSTER_32(~0)) {
- p_chain->dir = new_clu;
- } else {
- if (p_chain->flags == 0x01) {
- if (exfat_fat_write(sb, last_clu, new_clu) < 0)
- return -EIO;
- }
- }
- last_clu = new_clu;
-
- if ((--num_alloc) == 0) {
- p_fs->clu_srch_ptr = hint_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
-
- p_chain->size += num_clusters;
- return num_clusters;
- }
-
- hint_clu = new_clu + 1;
- if (hint_clu >= p_fs->num_clusters) {
- hint_clu = 2;
-
- if (p_chain->flags == 0x03) {
- exfat_chain_cont_cluster(sb, p_chain->dir,
- num_clusters);
- p_chain->flags = 0x01;
- }
- }
- }
-
- p_fs->clu_srch_ptr = hint_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
-
- p_chain->size += num_clusters;
- return num_clusters;
-}
-
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse)
-{
- s32 num_clusters = 0;
- u32 clu;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int i;
- sector_t sector;
-
- if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
- return;
-
- if (p_chain->size <= 0) {
- pr_err("[EXFAT] free_cluster : skip free-req clu:%u, because of zero-size truncation\n",
- p_chain->dir);
- return;
- }
-
- __set_sb_dirty(sb);
- clu = p_chain->dir;
-
- if (p_chain->flags == 0x03) {
- do {
- if (do_relse) {
- sector = START_SECTOR(clu);
- for (i = 0; i < p_fs->sectors_per_clu; i++)
- exfat_buf_release(sb, sector + i);
- }
-
- if (clr_alloc_bitmap(sb, clu - 2) != 0)
- break;
- clu++;
-
- num_clusters++;
- } while (num_clusters < p_chain->size);
- } else {
- do {
- if (p_fs->dev_ejected)
- break;
-
- if (do_relse) {
- sector = START_SECTOR(clu);
- for (i = 0; i < p_fs->sectors_per_clu; i++)
- exfat_buf_release(sb, sector + i);
- }
-
- if (clr_alloc_bitmap(sb, clu - 2) != 0)
- break;
-
- if (exfat_fat_read(sb, clu, &clu) == -1)
- break;
- num_clusters++;
- } while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0)));
- }
-
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters -= num_clusters;
-}
-
-static u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
-{
- u32 clu, next;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- clu = p_chain->dir;
-
- if (p_chain->flags == 0x03) {
- clu += p_chain->size - 1;
- } else {
- while ((exfat_fat_read(sb, clu, &next) == 0) &&
- (next != CLUSTER_32(~0))) {
- if (p_fs->dev_ejected)
- break;
- clu = next;
- }
- }
-
- return clu;
-}
-
-s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
-{
- int i, count = 0;
- u32 clu;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
- return 0;
-
- clu = p_chain->dir;
-
- if (p_chain->flags == 0x03) {
- count = p_chain->size;
- } else {
- for (i = 2; i < p_fs->num_clusters; i++) {
- count++;
- if (exfat_fat_read(sb, clu, &clu) != 0)
- return 0;
- if (clu == CLUSTER_32(~0))
- break;
- }
- }
-
- return count;
-}
-
-s32 exfat_count_used_clusters(struct super_block *sb)
-{
- int i, map_i, map_b, count = 0;
- u8 k;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- map_i = 0;
- map_b = 0;
-
- for (i = 2; i < p_fs->num_clusters; i += 8) {
- k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
- count += used_bit[k];
-
- if ((++map_b) >= p_bd->sector_size) {
- map_i++;
- map_b = 0;
- }
- }
-
- return count;
-}
-
-void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len)
-{
- if (len == 0)
- return;
-
- while (len > 1) {
- if (exfat_fat_write(sb, chain, chain + 1) < 0)
- break;
- chain++;
- len--;
- }
- exfat_fat_write(sb, chain, CLUSTER_32(~0));
-}
-
-/*
- * Allocation Bitmap Management Functions
- */
-
-s32 load_alloc_bitmap(struct super_block *sb)
-{
- int i, j, ret;
- u32 map_size;
- u32 type;
- sector_t sector;
- struct chain_t clu;
- struct bmap_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- clu.dir = p_fs->root_dir;
- clu.flags = 0x01;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < p_fs->dentries_per_clu; i++) {
- ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu,
- i, NULL);
- if (!ep)
- return -ENOENT;
-
- type = exfat_get_entry_type((struct dentry_t *)ep);
-
- if (type == TYPE_UNUSED)
- break;
- if (type != TYPE_BITMAP)
- continue;
-
- if (ep->flags == 0x0) {
- p_fs->map_clu = GET32_A(ep->start_clu);
- map_size = (u32)GET64_A(ep->size);
-
- p_fs->map_sectors = ((map_size - 1) >> p_bd->sector_size_bits) + 1;
-
- p_fs->vol_amap = kmalloc_array(p_fs->map_sectors,
- sizeof(struct buffer_head *),
- GFP_KERNEL);
- if (!p_fs->vol_amap)
- return -ENOMEM;
-
- sector = START_SECTOR(p_fs->map_clu);
-
- for (j = 0; j < p_fs->map_sectors; j++) {
- p_fs->vol_amap[j] = NULL;
- ret = sector_read(sb, sector + j, &p_fs->vol_amap[j], 1);
- if (ret != 0) {
- /* release all buffers and free vol_amap */
- i = 0;
- while (i < j)
- brelse(p_fs->vol_amap[i++]);
-
- kfree(p_fs->vol_amap);
- p_fs->vol_amap = NULL;
- return ret;
- }
- }
-
- p_fs->pbr_bh = NULL;
- return 0;
- }
- }
-
- if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
- return -EIO;
- }
-
- return -EFSCORRUPTED;
-}
-
-void free_alloc_bitmap(struct super_block *sb)
-{
- int i;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- brelse(p_fs->pbr_bh);
-
- for (i = 0; i < p_fs->map_sectors; i++)
- __brelse(p_fs->vol_amap[i]);
-
- kfree(p_fs->vol_amap);
- p_fs->vol_amap = NULL;
-}
-
-void sync_alloc_bitmap(struct super_block *sb)
-{
- int i;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (!p_fs->vol_amap)
- return;
-
- for (i = 0; i < p_fs->map_sectors; i++)
- sync_dirty_buffer(p_fs->vol_amap[i]);
-}
-
-/*
- * Upcase table Management Functions
- */
-static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
- u32 num_sectors, u32 utbl_checksum)
-{
- int i, ret = -EINVAL;
- u32 j;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct buffer_head *tmp_bh = NULL;
- sector_t end_sector = num_sectors + sector;
-
- bool skip = false;
- u32 index = 0;
- u16 uni = 0;
- u16 **upcase_table;
-
- u32 checksum = 0;
-
- upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
- p_fs->vol_utbl = upcase_table;
- if (!upcase_table)
- return -ENOMEM;
- memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
-
- while (sector < end_sector) {
- ret = sector_read(sb, sector, &tmp_bh, 1);
- if (ret != 0) {
- pr_debug("sector read (0x%llX)fail\n",
- (unsigned long long)sector);
- goto error;
- }
- sector++;
-
- for (i = 0; i < p_bd->sector_size && index <= 0xFFFF; i += 2) {
- uni = GET16(((u8 *)tmp_bh->b_data) + i);
-
- checksum = ((checksum & 1) ? 0x80000000 : 0) +
- (checksum >> 1) + *(((u8 *)tmp_bh->b_data) +
- i);
- checksum = ((checksum & 1) ? 0x80000000 : 0) +
- (checksum >> 1) + *(((u8 *)tmp_bh->b_data) +
- (i + 1));
-
- if (skip) {
- pr_debug("skip from 0x%X ", index);
- index += uni;
- pr_debug("to 0x%X (amount of 0x%X)\n",
- index, uni);
- skip = false;
- } else if (uni == index) {
- index++;
- } else if (uni == 0xFFFF) {
- skip = true;
- } else { /* uni != index , uni != 0xFFFF */
- u16 col_index = get_col_index(index);
-
- if (!upcase_table[col_index]) {
- pr_debug("alloc = 0x%X\n", col_index);
- upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
- sizeof(u16), GFP_KERNEL);
- if (!upcase_table[col_index]) {
- ret = -ENOMEM;
- goto error;
- }
-
- for (j = 0; j < UTBL_ROW_COUNT; j++)
- upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
- }
-
- upcase_table[col_index][get_row_index(index)] = uni;
- index++;
- }
- }
- }
- if (index >= 0xFFFF && utbl_checksum == checksum) {
- if (tmp_bh)
- brelse(tmp_bh);
- return 0;
- }
- ret = -EINVAL;
-error:
- if (tmp_bh)
- brelse(tmp_bh);
- free_upcase_table(sb);
- return ret;
-}
-
-static s32 __load_default_upcase_table(struct super_block *sb)
-{
- int i, ret = -EINVAL;
- u32 j;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- bool skip = false;
- u32 index = 0;
- u16 uni = 0;
- u16 **upcase_table;
-
- upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
- p_fs->vol_utbl = upcase_table;
- if (!upcase_table)
- return -ENOMEM;
- memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
-
- for (i = 0; index <= 0xFFFF && i < NUM_UPCASE * 2; i += 2) {
- uni = GET16(uni_upcase + i);
- if (skip) {
- pr_debug("skip from 0x%X ", index);
- index += uni;
- pr_debug("to 0x%X (amount of 0x%X)\n", index, uni);
- skip = false;
- } else if (uni == index) {
- index++;
- } else if (uni == 0xFFFF) {
- skip = true;
- } else { /* uni != index , uni != 0xFFFF */
- u16 col_index = get_col_index(index);
-
- if (!upcase_table[col_index]) {
- pr_debug("alloc = 0x%X\n", col_index);
- upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
- sizeof(u16),
- GFP_KERNEL);
- if (!upcase_table[col_index]) {
- ret = -ENOMEM;
- goto error;
- }
-
- for (j = 0; j < UTBL_ROW_COUNT; j++)
- upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
- }
-
- upcase_table[col_index][get_row_index(index)] = uni;
- index++;
- }
- }
-
- if (index >= 0xFFFF)
- return 0;
-
-error:
- /* FATAL error: default upcase table has error */
- free_upcase_table(sb);
- return ret;
-}
-
-s32 load_upcase_table(struct super_block *sb)
-{
- int i;
- u32 tbl_clu, tbl_size;
- sector_t sector;
- u32 type, num_sectors;
- struct chain_t clu;
- struct case_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- clu.dir = p_fs->root_dir;
- clu.flags = 0x01;
-
- if (p_fs->dev_ejected)
- return -EIO;
-
- while (clu.dir != CLUSTER_32(~0)) {
- for (i = 0; i < p_fs->dentries_per_clu; i++) {
- ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu,
- i, NULL);
- if (!ep)
- return -ENOENT;
-
- type = exfat_get_entry_type((struct dentry_t *)ep);
-
- if (type == TYPE_UNUSED)
- break;
- if (type != TYPE_UPCASE)
- continue;
-
- tbl_clu = GET32_A(ep->start_clu);
- tbl_size = (u32)GET64_A(ep->size);
-
- sector = START_SECTOR(tbl_clu);
- num_sectors = ((tbl_size - 1) >> p_bd->sector_size_bits) + 1;
- if (__load_upcase_table(sb, sector, num_sectors,
- GET32_A(ep->checksum)) != 0)
- break;
- return 0;
- }
- if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
- return -EIO;
- }
- /* load default upcase table */
- return __load_default_upcase_table(sb);
-}
-
-void free_upcase_table(struct super_block *sb)
-{
- u32 i;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- u16 **upcase_table;
-
- upcase_table = p_fs->vol_utbl;
- for (i = 0; i < UTBL_COL_COUNT; i++)
- kfree(upcase_table[i]);
-
- kfree(p_fs->vol_utbl);
- p_fs->vol_utbl = NULL;
-}
-
-/*
- * Directory Entry Management Functions
- */
-
-u32 exfat_get_entry_type(struct dentry_t *p_entry)
-{
- struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
-
- if (ep->type == 0x0) {
- return TYPE_UNUSED;
- } else if (ep->type < 0x80) {
- return TYPE_DELETED;
- } else if (ep->type == 0x80) {
- return TYPE_INVALID;
- } else if (ep->type < 0xA0) {
- if (ep->type == 0x81) {
- return TYPE_BITMAP;
- } else if (ep->type == 0x82) {
- return TYPE_UPCASE;
- } else if (ep->type == 0x83) {
- return TYPE_VOLUME;
- } else if (ep->type == 0x85) {
- if (GET16_A(ep->attr) & ATTR_SUBDIR)
- return TYPE_DIR;
- else
- return TYPE_FILE;
- }
- return TYPE_CRITICAL_PRI;
- } else if (ep->type < 0xC0) {
- if (ep->type == 0xA0)
- return TYPE_GUID;
- else if (ep->type == 0xA1)
- return TYPE_PADDING;
- else if (ep->type == 0xA2)
- return TYPE_ACLTAB;
- return TYPE_BENIGN_PRI;
- } else if (ep->type < 0xE0) {
- if (ep->type == 0xC0)
- return TYPE_STREAM;
- else if (ep->type == 0xC1)
- return TYPE_EXTEND;
- else if (ep->type == 0xC2)
- return TYPE_ACL;
- return TYPE_CRITICAL_SEC;
- }
-
- return TYPE_BENIGN_SEC;
-}
-
-static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
-{
- struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
-
- if (type == TYPE_UNUSED) {
- ep->type = 0x0;
- } else if (type == TYPE_DELETED) {
- ep->type &= ~0x80;
- } else if (type == TYPE_STREAM) {
- ep->type = 0xC0;
- } else if (type == TYPE_EXTEND) {
- ep->type = 0xC1;
- } else if (type == TYPE_BITMAP) {
- ep->type = 0x81;
- } else if (type == TYPE_UPCASE) {
- ep->type = 0x82;
- } else if (type == TYPE_VOLUME) {
- ep->type = 0x83;
- } else if (type == TYPE_DIR) {
- ep->type = 0x85;
- SET16_A(ep->attr, ATTR_SUBDIR);
- } else if (type == TYPE_FILE) {
- ep->type = 0x85;
- SET16_A(ep->attr, ATTR_ARCHIVE);
- } else if (type == TYPE_SYMLINK) {
- ep->type = 0x85;
- SET16_A(ep->attr, ATTR_ARCHIVE | ATTR_SYMLINK);
- }
-}
-
-u32 exfat_get_entry_attr(struct dentry_t *p_entry)
-{
- struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
-
- return (u32)GET16_A(ep->attr);
-}
-
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
-{
- struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
-
- SET16_A(ep->attr, (u16)attr);
-}
-
-u8 exfat_get_entry_flag(struct dentry_t *p_entry)
-{
- struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
-
- return ep->flags;
-}
-
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
-{
- struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
-
- ep->flags = flags;
-}
-
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
-{
- struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
-
- return GET32_A(ep->start_clu);
-}
-
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
-{
- struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
-
- SET32_A(ep->start_clu, start_clu);
-}
-
-u64 exfat_get_entry_size(struct dentry_t *p_entry)
-{
- struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
-
- return GET64_A(ep->valid_size);
-}
-
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
-{
- struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
-
- SET64_A(ep->valid_size, size);
- SET64_A(ep->size, size);
-}
-
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t = 0x00, d = 0x21;
- struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
-
- switch (mode) {
- case TM_CREATE:
- t = GET16_A(ep->create_time);
- d = GET16_A(ep->create_date);
- break;
- case TM_MODIFY:
- t = GET16_A(ep->modify_time);
- d = GET16_A(ep->modify_date);
- break;
- case TM_ACCESS:
- t = GET16_A(ep->access_time);
- d = GET16_A(ep->access_date);
- break;
- }
-
- tp->sec = (t & 0x001F) << 1;
- tp->min = (t >> 5) & 0x003F;
- tp->hour = (t >> 11);
- tp->day = (d & 0x001F);
- tp->mon = (d >> 5) & 0x000F;
- tp->year = (d >> 9);
-}
-
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t, d;
- struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
-
- t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
- d = (tp->year << 9) | (tp->mon << 5) | tp->day;
-
- switch (mode) {
- case TM_CREATE:
- SET16_A(ep->create_time, t);
- SET16_A(ep->create_date, d);
- break;
- case TM_MODIFY:
- SET16_A(ep->modify_time, t);
- SET16_A(ep->modify_date, d);
- break;
- case TM_ACCESS:
- SET16_A(ep->access_time, t);
- SET16_A(ep->access_date, d);
- break;
- }
-}
-
-static void init_file_entry(struct file_dentry_t *ep, u32 type)
-{
- struct timestamp_t tm, *tp;
-
- exfat_set_entry_type((struct dentry_t *)ep, type);
-
- tp = tm_current(&tm);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
- ep->create_time_ms = 0;
- ep->modify_time_ms = 0;
- ep->access_time_ms = 0;
-}
-
-static void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
-{
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
- ep->flags = flags;
- SET32_A(ep->start_clu, start_clu);
- SET64_A(ep->valid_size, size);
- SET64_A(ep->size, size);
-}
-
-static void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
-{
- int i;
-
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
- ep->flags = 0x0;
-
- for (i = 0; i < 30; i++, i++) {
- SET16_A(ep->unicode_0_14 + i, *uniname);
- if (*uniname == 0x0)
- break;
- uniname++;
- }
-}
-
-static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size)
-{
- sector_t sector;
- u8 flags;
- struct file_dentry_t *file_ep;
- struct strm_dentry_t *strm_ep;
-
- flags = (type == TYPE_FILE) ? 0x01 : 0x03;
-
- /* we cannot use get_entry_set_in_dir here because file ep is not initialized yet */
- file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!file_ep)
- return -ENOENT;
-
- strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
- &sector);
- if (!strm_ep)
- return -ENOENT;
-
- init_file_entry(file_ep, type);
- exfat_buf_modify(sb, sector);
-
- init_strm_entry(strm_ep, flags, start_clu, size);
- exfat_buf_modify(sb, sector);
-
- return 0;
-}
-
-static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname)
-{
- int i;
- sector_t sector;
- u16 *uniname = p_uniname->name;
- struct file_dentry_t *file_ep;
- struct strm_dentry_t *strm_ep;
- struct name_dentry_t *name_ep;
-
- file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!file_ep)
- return -ENOENT;
-
- file_ep->num_ext = (u8)(num_entries - 1);
- exfat_buf_modify(sb, sector);
-
- strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
- &sector);
- if (!strm_ep)
- return -ENOENT;
-
- strm_ep->name_len = p_uniname->name_len;
- SET16_A(strm_ep->name_hash, p_uniname->name_hash);
- exfat_buf_modify(sb, sector);
-
- for (i = 2; i < num_entries; i++) {
- name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir,
- entry + i,
- &sector);
- if (!name_ep)
- return -ENOENT;
-
- init_name_entry(name_ep, uniname);
- exfat_buf_modify(sb, sector);
- uniname += 15;
- }
-
- update_dir_checksum(sb, p_dir, entry);
-
- return 0;
-}
-
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries)
-{
- int i;
- sector_t sector;
- struct dentry_t *ep;
-
- for (i = order; i < num_entries; i++) {
- ep = get_entry_in_dir(sb, p_dir, entry + i, &sector);
- if (!ep)
- return;
-
- exfat_set_entry_type(ep, TYPE_DELETED);
- exfat_buf_modify(sb, sector);
- }
-}
-
-void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
- s32 entry)
-{
- int i, num_entries;
- sector_t sector;
- u16 chksum;
- struct file_dentry_t *file_ep;
- struct dentry_t *ep;
-
- file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!file_ep)
- return;
-
- exfat_buf_lock(sb, sector);
-
- num_entries = (s32)file_ep->num_ext + 1;
- chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0,
- CS_DIR_ENTRY);
-
- for (i = 1; i < num_entries; i++) {
- ep = get_entry_in_dir(sb, p_dir, entry + i, NULL);
- if (!ep) {
- exfat_buf_unlock(sb, sector);
- return;
- }
-
- chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
- CS_DEFAULT);
- }
-
- SET16_A(file_ep->checksum, chksum);
- exfat_buf_modify(sb, sector);
- exfat_buf_unlock(sb, sector);
-}
-
-static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es,
- sector_t sec, s32 off, u32 count)
-{
- s32 num_entries, buf_off = (off - es->offset);
- u32 remaining_byte_in_sector, copy_entries;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- u32 clu;
- u8 *buf, *esbuf = (u8 *)&es->__buf;
-
- pr_debug("%s entered es %p sec %llu off %d count %d\n",
- __func__, es, (unsigned long long)sec, off, count);
- num_entries = count;
-
- while (num_entries) {
- /* white per sector base */
- remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
- copy_entries = min_t(s32,
- remaining_byte_in_sector >> DENTRY_SIZE_BITS,
- num_entries);
- buf = exfat_buf_getblk(sb, sec);
- if (!buf)
- goto err_out;
- pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
- pr_debug("copying %d entries from %p to sector %llu\n",
- copy_entries, (esbuf + buf_off),
- (unsigned long long)sec);
- memcpy(buf + off, esbuf + buf_off,
- copy_entries << DENTRY_SIZE_BITS);
- exfat_buf_modify(sb, sec);
- num_entries -= copy_entries;
-
- if (num_entries) {
- /* get next sector */
- if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
- clu = GET_CLUSTER_FROM_SECTOR(sec);
- if (es->alloc_flag == 0x03) {
- clu++;
- } else {
- if (exfat_fat_read(sb, clu, &clu) == -1)
- goto err_out;
- }
- sec = START_SECTOR(clu);
- } else {
- sec++;
- }
- off = 0;
- buf_off += copy_entries << DENTRY_SIZE_BITS;
- }
- }
-
- pr_debug("%s exited successfully\n", __func__);
- return 0;
-err_out:
- pr_debug("%s failed\n", __func__);
- return -EINVAL;
-}
-
-/* write back all entries in entry set */
-static s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
-{
- return __write_partial_entries_in_entry_set(sb, es, es->sector,
- es->offset,
- es->num_entries);
-}
-
-void update_dir_checksum_with_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es)
-{
- struct dentry_t *ep;
- u16 chksum = 0;
- s32 chksum_type = CS_DIR_ENTRY, i;
-
- ep = (struct dentry_t *)&es->__buf;
- for (i = 0; i < es->num_entries; i++) {
- pr_debug("%s ep %p\n", __func__, ep);
- chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
- chksum_type);
- ep++;
- chksum_type = CS_DEFAULT;
- }
-
- ep = (struct dentry_t *)&es->__buf;
- SET16_A(((struct file_dentry_t *)ep)->checksum, chksum);
- write_whole_entry_set(sb, es);
-}
-
-static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
- s32 byte_offset, u32 *clu)
-{
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- s32 clu_offset;
- u32 cur_clu;
-
- clu_offset = byte_offset >> p_fs->cluster_size_bits;
- cur_clu = p_dir->dir;
-
- if (p_dir->flags == 0x03) {
- cur_clu += clu_offset;
- } else {
- while (clu_offset > 0) {
- if (exfat_fat_read(sb, cur_clu, &cur_clu) == -1)
- return -EIO;
- clu_offset--;
- }
- }
-
- if (clu)
- *clu = cur_clu;
- return 0;
-}
-
-static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- sector_t *sector, s32 *offset)
-{
- s32 off, ret;
- u32 clu = 0;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- off = entry << DENTRY_SIZE_BITS;
-
- if (p_dir->dir == CLUSTER_32(0)) { /* FAT16 root_dir */
- *offset = off & p_bd->sector_size_mask;
- *sector = off >> p_bd->sector_size_bits;
- *sector += p_fs->root_start_sector;
- } else {
- ret = _walk_fat_chain(sb, p_dir, off, &clu);
- if (ret != 0)
- return ret;
-
- /* byte offset in cluster */
- off &= p_fs->cluster_size - 1;
-
- /* byte offset in sector */
- *offset = off & p_bd->sector_size_mask;
-
- /* sector offset in cluster */
- *sector = off >> p_bd->sector_size_bits;
- *sector += START_SECTOR(clu);
- }
- return 0;
-}
-
-struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, sector_t *sector)
-{
- s32 off;
- sector_t sec;
- u8 *buf;
-
- if (find_location(sb, p_dir, entry, &sec, &off) != 0)
- return NULL;
-
- buf = exfat_buf_getblk(sb, sec);
-
- if (!buf)
- return NULL;
-
- if (sector)
- *sector = sec;
- return (struct dentry_t *)(buf + off);
-}
-
-/* returns a set of dentries for a file or dir.
- * Note that this is a copy (dump) of dentries so that user should call write_entry_set()
- * to apply changes made in this entry set to the real device.
- * in:
- * sb+p_dir+entry: indicates a file/dir
- * type: specifies how many dentries should be included.
- * out:
- * file_ep: will point the first dentry(= file dentry) on success
- * return:
- * pointer of entry set on success,
- * NULL on failure.
- */
-
-#define ES_MODE_STARTED 0
-#define ES_MODE_GET_FILE_ENTRY 1
-#define ES_MODE_GET_STRM_ENTRY 2
-#define ES_MODE_GET_NAME_ENTRY 3
-#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4
-struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u32 type,
- struct dentry_t **file_ep)
-{
- s32 off, ret, byte_offset;
- u32 clu = 0;
- sector_t sec;
- u32 entry_type;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct entry_set_cache_t *es = NULL;
- struct dentry_t *ep, *pos;
- u8 *buf;
- u8 num_entries;
- s32 mode = ES_MODE_STARTED;
- size_t bufsize;
-
- pr_debug("%s entered p_dir dir %u flags %x size %d\n",
- __func__, p_dir->dir, p_dir->flags, p_dir->size);
-
- byte_offset = entry << DENTRY_SIZE_BITS;
- ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu);
- if (ret != 0)
- return NULL;
-
- /* byte offset in cluster */
- byte_offset &= p_fs->cluster_size - 1;
-
- /* byte offset in sector */
- off = byte_offset & p_bd->sector_size_mask;
-
- /* sector offset in cluster */
- sec = byte_offset >> p_bd->sector_size_bits;
- sec += START_SECTOR(clu);
-
- buf = exfat_buf_getblk(sb, sec);
- if (!buf)
- goto err_out;
-
- ep = (struct dentry_t *)(buf + off);
- entry_type = exfat_get_entry_type(ep);
-
- if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR))
- goto err_out;
-
- if (type == ES_ALL_ENTRIES)
- num_entries = ((struct file_dentry_t *)ep)->num_ext + 1;
- else
- num_entries = type;
-
- bufsize = offsetof(struct entry_set_cache_t, __buf) + (num_entries) *
- sizeof(struct dentry_t);
- pr_debug("%s: trying to kmalloc %zx bytes for %d entries\n", __func__,
- bufsize, num_entries);
- es = kmalloc(bufsize, GFP_KERNEL);
- if (!es)
- goto err_out;
-
- es->num_entries = num_entries;
- es->sector = sec;
- es->offset = off;
- es->alloc_flag = p_dir->flags;
-
- pos = (struct dentry_t *)&es->__buf;
-
- while (num_entries) {
- /*
- * instead of copying whole sector, we will check every entry.
- * this will provide minimum stablity and consistency.
- */
- entry_type = exfat_get_entry_type(ep);
-
- if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
- goto err_out;
-
- switch (mode) {
- case ES_MODE_STARTED:
- if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR))
- mode = ES_MODE_GET_FILE_ENTRY;
- else
- goto err_out;
- break;
- case ES_MODE_GET_FILE_ENTRY:
- if (entry_type == TYPE_STREAM)
- mode = ES_MODE_GET_STRM_ENTRY;
- else
- goto err_out;
- break;
- case ES_MODE_GET_STRM_ENTRY:
- if (entry_type == TYPE_EXTEND)
- mode = ES_MODE_GET_NAME_ENTRY;
- else
- goto err_out;
- break;
- case ES_MODE_GET_NAME_ENTRY:
- if (entry_type == TYPE_EXTEND)
- break;
- else if (entry_type == TYPE_STREAM)
- goto err_out;
- else if (entry_type & TYPE_CRITICAL_SEC)
- mode = ES_MODE_GET_CRITICAL_SEC_ENTRY;
- else
- goto err_out;
- break;
- case ES_MODE_GET_CRITICAL_SEC_ENTRY:
- if ((entry_type == TYPE_EXTEND) ||
- (entry_type == TYPE_STREAM))
- goto err_out;
- else if ((entry_type & TYPE_CRITICAL_SEC) !=
- TYPE_CRITICAL_SEC)
- goto err_out;
- break;
- }
-
- memcpy(pos, ep, sizeof(struct dentry_t));
-
- if (--num_entries == 0)
- break;
-
- if (((off + DENTRY_SIZE) & p_bd->sector_size_mask) <
- (off & p_bd->sector_size_mask)) {
- /* get the next sector */
- if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
- if (es->alloc_flag == 0x03) {
- clu++;
- } else {
- if (exfat_fat_read(sb, clu, &clu) == -1)
- goto err_out;
- }
- sec = START_SECTOR(clu);
- } else {
- sec++;
- }
- buf = exfat_buf_getblk(sb, sec);
- if (!buf)
- goto err_out;
- off = 0;
- ep = (struct dentry_t *)(buf);
- } else {
- ep++;
- off += DENTRY_SIZE;
- }
- pos++;
- }
-
- if (file_ep)
- *file_ep = (struct dentry_t *)&es->__buf;
-
- pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n",
- __func__, es, (unsigned long long)es->sector, es->offset,
- es->alloc_flag, es->num_entries, &es->__buf);
- return es;
-err_out:
- pr_debug("%s exited NULL (es %p)\n", __func__, es);
- kfree(es);
- return NULL;
-}
-
-void release_entry_set(struct entry_set_cache_t *es)
-{
- pr_debug("%s es=%p\n", __func__, es);
- kfree(es);
-}
-
-/* search EMPTY CONTINUOUS "num_entries" entries */
-static s32 search_deleted_or_unused_entry(struct super_block *sb,
- struct chain_t *p_dir,
- s32 num_entries)
-{
- int i, dentry, num_empty = 0;
- s32 dentries_per_clu;
- u32 type;
- struct chain_t clu;
- struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- if (p_fs->hint_uentry.dir == p_dir->dir) {
- if (p_fs->hint_uentry.entry == -1)
- return -1;
-
- clu.dir = p_fs->hint_uentry.clu.dir;
- clu.size = p_fs->hint_uentry.clu.size;
- clu.flags = p_fs->hint_uentry.clu.flags;
-
- dentry = p_fs->hint_uentry.entry;
- } else {
- p_fs->hint_uentry.entry = -1;
-
- clu.dir = p_dir->dir;
- clu.size = p_dir->size;
- clu.flags = p_dir->flags;
-
- dentry = 0;
- }
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- i = dentry % dentries_per_clu;
- else
- i = dentry & (dentries_per_clu - 1);
-
- for (; i < dentries_per_clu; i++, dentry++) {
- ep = get_entry_in_dir(sb, &clu, i, NULL);
- if (!ep)
- return -1;
-
- type = exfat_get_entry_type(ep);
-
- if (type == TYPE_UNUSED) {
- num_empty++;
- if (p_fs->hint_uentry.entry == -1) {
- p_fs->hint_uentry.dir = p_dir->dir;
- p_fs->hint_uentry.entry = dentry;
-
- p_fs->hint_uentry.clu.dir = clu.dir;
- p_fs->hint_uentry.clu.size = clu.size;
- p_fs->hint_uentry.clu.flags = clu.flags;
- }
- } else if (type == TYPE_DELETED) {
- num_empty++;
- } else {
- num_empty = 0;
- }
-
- if (num_empty >= num_entries) {
- p_fs->hint_uentry.dir = CLUSTER_32(~0);
- p_fs->hint_uentry.entry = -1;
-
- if (p_fs->vol_type == EXFAT)
- return dentry - (num_entries - 1);
- else
- return dentry;
- }
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (clu.flags == 0x03) {
- if ((--clu.size) > 0)
- clu.dir++;
- else
- clu.dir = CLUSTER_32(~0);
- } else {
- if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
- return -1;
- }
- }
-
- return -1;
-}
-
-static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
-{
- s32 ret, dentry;
- u32 last_clu;
- sector_t sector;
- u64 size = 0;
- struct chain_t clu;
- struct dentry_t *ep = NULL;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- return search_deleted_or_unused_entry(sb, p_dir, num_entries);
-
- while ((dentry = search_deleted_or_unused_entry(sb, p_dir, num_entries)) < 0) {
- if (p_fs->dev_ejected)
- break;
-
- if (p_dir->dir != p_fs->root_dir)
- size = i_size_read(inode);
-
- last_clu = find_last_cluster(sb, p_dir);
- clu.dir = last_clu + 1;
- clu.size = 0;
- clu.flags = p_dir->flags;
-
- /* (1) allocate a cluster */
- ret = exfat_alloc_cluster(sb, 1, &clu);
- if (ret < 1)
- return -EIO;
-
- if (clear_cluster(sb, clu.dir) != 0)
- return -EIO;
-
- /* (2) append to the FAT chain */
- if (clu.flags != p_dir->flags) {
- exfat_chain_cont_cluster(sb, p_dir->dir, p_dir->size);
- p_dir->flags = 0x01;
- p_fs->hint_uentry.clu.flags = 0x01;
- }
- if (clu.flags == 0x01)
- if (exfat_fat_write(sb, last_clu, clu.dir) < 0)
- return -EIO;
-
- if (p_fs->hint_uentry.entry == -1) {
- p_fs->hint_uentry.dir = p_dir->dir;
- p_fs->hint_uentry.entry = p_dir->size << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS);
-
- p_fs->hint_uentry.clu.dir = clu.dir;
- p_fs->hint_uentry.clu.size = 0;
- p_fs->hint_uentry.clu.flags = clu.flags;
- }
- p_fs->hint_uentry.clu.size++;
- p_dir->size++;
-
- /* (3) update the directory entry */
- if (p_dir->dir != p_fs->root_dir) {
- size += p_fs->cluster_size;
-
- ep = get_entry_in_dir(sb, &fid->dir,
- fid->entry + 1, &sector);
- if (!ep)
- return -ENOENT;
- exfat_set_entry_size(ep, size);
- exfat_set_entry_flag(ep, p_dir->flags);
- exfat_buf_modify(sb, sector);
-
- update_dir_checksum(sb, &fid->dir,
- fid->entry);
- }
-
- i_size_write(inode, i_size_read(inode) + p_fs->cluster_size);
- EXFAT_I(inode)->mmu_private += p_fs->cluster_size;
- EXFAT_I(inode)->fid.size += p_fs->cluster_size;
- EXFAT_I(inode)->fid.flags = p_dir->flags;
- inode->i_blocks += 1 << (p_fs->cluster_size_bits - 9);
- }
-
- return dentry;
-}
-
-static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
- s32 order)
-{
- int i, len = 0;
-
- for (i = 0; i < 30; i += 2) {
- *uniname = GET16_A(ep->unicode_0_14 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- *uniname = 0x0;
- return len;
-}
-
-/* return values of exfat_find_dir_entry()
- * >= 0 : return dir entiry position with the name in dir
- * -1 : (root dir, ".") it is the root dir itself
- * -2 : entry with the name does not exist
- */
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type)
-{
- int i = 0, dentry = 0, num_ext_entries = 0, len, step;
- s32 order = 0;
- bool is_feasible_entry = false;
- s32 dentries_per_clu, num_empty = 0;
- u32 entry_type;
- u16 entry_uniname[16], *uniname = NULL, unichar;
- struct chain_t clu;
- struct dentry_t *ep;
- struct file_dentry_t *file_ep;
- struct strm_dentry_t *strm_ep;
- struct name_dentry_t *name_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_dir->dir == p_fs->root_dir) {
- if ((!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_CUR_DIR_NAME)) ||
- (!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_PAR_DIR_NAME)))
- return -1; // special case, root directory itself
- }
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.size = p_dir->size;
- clu.flags = p_dir->flags;
-
- p_fs->hint_uentry.dir = p_dir->dir;
- p_fs->hint_uentry.entry = -1;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- while (i < dentries_per_clu) {
- ep = get_entry_in_dir(sb, &clu, i, NULL);
- if (!ep)
- return -2;
-
- entry_type = exfat_get_entry_type(ep);
- step = 1;
-
- if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
- is_feasible_entry = false;
-
- if (p_fs->hint_uentry.entry == -1) {
- num_empty++;
-
- if (num_empty == 1) {
- p_fs->hint_uentry.clu.dir = clu.dir;
- p_fs->hint_uentry.clu.size = clu.size;
- p_fs->hint_uentry.clu.flags = clu.flags;
- }
- if ((num_empty >= num_entries) || (entry_type == TYPE_UNUSED))
- p_fs->hint_uentry.entry = dentry - (num_empty - 1);
- }
-
- if (entry_type == TYPE_UNUSED)
- return -2;
- } else {
- num_empty = 0;
-
- if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
- file_ep = (struct file_dentry_t *)ep;
- if ((type == TYPE_ALL) || (type == entry_type)) {
- num_ext_entries = file_ep->num_ext;
- is_feasible_entry = true;
- } else {
- is_feasible_entry = false;
- step = file_ep->num_ext + 1;
- }
- } else if (entry_type == TYPE_STREAM) {
- if (is_feasible_entry) {
- strm_ep = (struct strm_dentry_t *)ep;
- if (p_uniname->name_hash == GET16_A(strm_ep->name_hash) &&
- p_uniname->name_len == strm_ep->name_len) {
- order = 1;
- } else {
- is_feasible_entry = false;
- step = num_ext_entries;
- }
- }
- } else if (entry_type == TYPE_EXTEND) {
- if (is_feasible_entry) {
- name_ep = (struct name_dentry_t *)ep;
-
- if ((++order) == 2)
- uniname = p_uniname->name;
- else
- uniname += 15;
-
- len = extract_uni_name_from_name_entry(name_ep,
- entry_uniname, order);
-
- unichar = *(uniname + len);
- *(uniname + len) = 0x0;
-
- if (nls_uniname_cmp(sb, uniname, entry_uniname)) {
- is_feasible_entry = false;
- step = num_ext_entries - order + 1;
- } else if (order == num_ext_entries) {
- p_fs->hint_uentry.dir = CLUSTER_32(~0);
- p_fs->hint_uentry.entry = -1;
- return dentry - (num_ext_entries);
- }
-
- *(uniname + len) = unichar;
- }
- } else {
- is_feasible_entry = false;
- }
- }
-
- i += step;
- dentry += step;
- }
-
- i -= dentries_per_clu;
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (clu.flags == 0x03) {
- if ((--clu.size) > 0)
- clu.dir++;
- else
- clu.dir = CLUSTER_32(~0);
- } else {
- if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
- return -2;
- }
- }
-
- return -2;
-}
-
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry)
-{
- int i, count = 0;
- u32 type;
- struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry;
- struct dentry_t *ext_ep;
-
- for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) {
- ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL);
- if (!ext_ep)
- return -1;
-
- type = exfat_get_entry_type(ext_ep);
- if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
- count++;
- else
- return count;
- }
-
- return count;
-}
-
-s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
- u32 type)
-{
- int i, count = 0;
- s32 dentries_per_clu;
- u32 entry_type;
- struct chain_t clu;
- struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.size = p_dir->size;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++) {
- ep = get_entry_in_dir(sb, &clu, i, NULL);
- if (!ep)
- return -ENOENT;
-
- entry_type = exfat_get_entry_type(ep);
-
- if (entry_type == TYPE_UNUSED)
- return count;
- if (!(type & TYPE_CRITICAL_PRI) &&
- !(type & TYPE_BENIGN_PRI))
- continue;
-
- if ((type == TYPE_ALL) || (type == entry_type))
- count++;
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (clu.flags == 0x03) {
- if ((--clu.size) > 0)
- clu.dir++;
- else
- clu.dir = CLUSTER_32(~0);
- } else {
- if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
- return -EIO;
- }
- }
-
- return count;
-}
-
-bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
-{
- int i, count = 0;
- s32 dentries_per_clu;
- u32 type;
- struct chain_t clu;
- struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.size = p_dir->size;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++) {
- ep = get_entry_in_dir(sb, &clu, i, NULL);
- if (!ep)
- break;
-
- type = exfat_get_entry_type(ep);
-
- if (type == TYPE_UNUSED)
- return true;
- if ((type != TYPE_FILE) && (type != TYPE_DIR))
- continue;
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- return false;
-
- if (p_fs->vol_type == EXFAT)
- return false;
- if ((p_dir->dir == p_fs->root_dir) || ((++count) > 2))
- return false;
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (clu.flags == 0x03) {
- if ((--clu.size) > 0)
- clu.dir++;
- else
- clu.dir = CLUSTER_32(~0);
- }
- if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
- break;
- }
-
- return true;
-}
-
-/*
- * Name Conversion Functions
- */
-
-/* input : dir, uni_name
- * output : num_of_entry, dos_name(format : aaaaaa~1.bbb)
- */
-s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 *entries,
- struct dos_name_t *p_dosname)
-{
- s32 num_entries;
-
- num_entries = exfat_calc_num_entries(p_uniname);
- if (num_entries == 0)
- return -EINVAL;
-
- *entries = num_entries;
-
- return 0;
-}
-
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname)
-{
- int i;
- struct dentry_t *ep;
- struct entry_set_cache_t *es;
-
- es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
- if (!es || es->num_entries < 3) {
- if (es)
- release_entry_set(es);
- return;
- }
-
- ep += 2;
-
- /*
- * First entry : file entry
- * Second entry : stream-extension entry
- * Third entry : first file-name entry
- * So, the index of first file-name dentry should start from 2.
- */
- for (i = 2; i < es->num_entries; i++, ep++) {
- if (exfat_get_entry_type(ep) == TYPE_EXTEND)
- extract_uni_name_from_name_entry((struct name_dentry_t *)
- ep, uniname, i);
- else
- goto out;
- uniname += 15;
- }
-
-out:
- release_entry_set(es);
-}
-
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
-{
- s32 len;
-
- len = p_uniname->name_len;
- if (len == 0)
- return 0;
-
- /* 1 file entry + 1 stream entry + name entries */
- return (len - 1) / 15 + 3;
-}
-
-u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
-{
- int i;
- u8 *c = (u8 *)data;
-
- switch (type) {
- case CS_DIR_ENTRY:
- for (i = 0; i < len; i++, c++) {
- if ((i == 2) || (i == 3))
- continue;
- chksum = (((chksum & 1) << 15) |
- ((chksum & 0xFFFE) >> 1)) + (u16)*c;
- }
- break;
- default
- :
- for (i = 0; i < len; i++, c++)
- chksum = (((chksum & 1) << 15) |
- ((chksum & 0xFFFE) >> 1)) + (u16)*c;
- }
-
- return chksum;
-}
-
-/*
- * Name Resolution Functions
- */
-
-/* return values of resolve_path()
- * > 0 : return the length of the path
- * < 0 : return error
- */
-s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
- struct uni_name_t *p_uniname)
-{
- bool lossy = false;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
-
- if (strscpy(name_buf, path, sizeof(name_buf)) < 0)
- return -EINVAL;
-
- nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy);
- if (lossy)
- return -EINVAL;
-
- fid->size = i_size_read(inode);
-
- p_dir->dir = fid->start_clu;
- p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits);
- p_dir->flags = fid->flags;
-
- return 0;
-}
-
-s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
-{
- struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bpb->num_fats == 0)
- return -EFSCORRUPTED;
-
- p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits;
- p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits;
- p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
- p_bd->sector_size_bits;
- p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
-
- p_fs->num_FAT_sectors = GET32(p_bpb->fat_length);
-
- p_fs->FAT1_start_sector = p_fs->PBR_sector + GET32(p_bpb->fat_offset);
- if (p_bpb->num_fats == 1)
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
- else
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
- p_fs->num_FAT_sectors;
-
- p_fs->root_start_sector = p_fs->PBR_sector + GET32(p_bpb->clu_offset);
- p_fs->data_start_sector = p_fs->root_start_sector;
-
- p_fs->num_sectors = GET64(p_bpb->vol_length);
- p_fs->num_clusters = GET32(p_bpb->clu_count) + 2;
- /* because the cluster index starts with 2 */
-
- p_fs->vol_type = EXFAT;
- p_fs->vol_id = GET32(p_bpb->vol_serial);
-
- p_fs->root_dir = GET32(p_bpb->root_cluster);
- p_fs->dentries_in_root = 0;
- p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
- DENTRY_SIZE_BITS);
-
- p_fs->vol_flag = (u32)GET16(p_bpb->vol_flags);
- p_fs->clu_srch_ptr = 2;
- p_fs->used_clusters = UINT_MAX;
-
- return 0;
-}
-
-s32 create_dir(struct inode *inode, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, struct file_id_t *fid)
-{
- s32 ret, dentry, num_entries;
- u64 size;
- struct chain_t clu;
- struct dos_name_t dos_name;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
- &dos_name);
- if (ret)
- return ret;
-
- /* find_empty_entry must be called before alloc_cluster */
- dentry = find_empty_entry(inode, p_dir, num_entries);
- if (dentry < 0)
- return -ENOSPC;
-
- clu.dir = CLUSTER_32(~0);
- clu.size = 0;
- clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-
- /* (1) allocate a cluster */
- ret = exfat_alloc_cluster(sb, 1, &clu);
- if (ret < 0)
- return ret;
- else if (ret == 0)
- return -ENOSPC;
-
- ret = clear_cluster(sb, clu.dir);
- if (ret != 0)
- return ret;
-
- size = p_fs->cluster_size;
-
- /* (2) update the directory entry */
- /* make sub-dir entry in parent directory */
- ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
- size);
- if (ret != 0)
- return ret;
-
- ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
- &dos_name);
- if (ret != 0)
- return ret;
-
- fid->dir.dir = p_dir->dir;
- fid->dir.size = p_dir->size;
- fid->dir.flags = p_dir->flags;
- fid->entry = dentry;
-
- fid->attr = ATTR_SUBDIR;
- fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
- fid->size = size;
- fid->start_clu = clu.dir;
-
- fid->type = TYPE_DIR;
- fid->rwoffset = 0;
- fid->hint_last_off = -1;
-
- return 0;
-}
-
-s32 create_file(struct inode *inode, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid)
-{
- s32 ret, dentry, num_entries;
- struct dos_name_t dos_name;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
- &dos_name);
- if (ret)
- return ret;
-
- /* find_empty_entry must be called before alloc_cluster() */
- dentry = find_empty_entry(inode, p_dir, num_entries);
- if (dentry < 0)
- return -ENOSPC;
-
- /* (1) update the directory entry */
- /* fill the dos name directory entry information of the created file.
- * the first cluster is not determined yet. (0)
- */
- ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
- CLUSTER_32(0), 0);
- if (ret != 0)
- return ret;
-
- ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
- &dos_name);
- if (ret != 0)
- return ret;
-
- fid->dir.dir = p_dir->dir;
- fid->dir.size = p_dir->size;
- fid->dir.flags = p_dir->flags;
- fid->entry = dentry;
-
- fid->attr = ATTR_ARCHIVE | mode;
- fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
- fid->size = 0;
- fid->start_clu = CLUSTER_32(~0);
-
- fid->type = TYPE_FILE;
- fid->rwoffset = 0;
- fid->hint_last_off = -1;
-
- return 0;
-}
-
-void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
-{
- s32 num_entries;
- sector_t sector;
- struct dentry_t *ep;
- struct super_block *sb = inode->i_sb;
-
- ep = get_entry_in_dir(sb, p_dir, entry, &sector);
- if (!ep)
- return;
-
- exfat_buf_lock(sb, sector);
-
- /* exfat_buf_lock() before call count_ext_entries() */
- num_entries = exfat_count_ext_entries(sb, p_dir, entry, ep);
- if (num_entries < 0) {
- exfat_buf_unlock(sb, sector);
- return;
- }
- num_entries++;
-
- exfat_buf_unlock(sb, sector);
-
- /* (1) update the directory entry */
- exfat_delete_dir_entry(sb, p_dir, entry, 0, num_entries);
-}
-
-s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
- struct uni_name_t *p_uniname, struct file_id_t *fid)
-{
- s32 ret, newentry = -1, num_old_entries, num_new_entries;
- sector_t sector_old, sector_new;
- struct dos_name_t dos_name;
- struct dentry_t *epold, *epnew;
- struct super_block *sb = inode->i_sb;
-
- epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
- if (!epold)
- return -ENOENT;
-
- exfat_buf_lock(sb, sector_old);
-
- /* exfat_buf_lock() before call count_ext_entries() */
- num_old_entries = exfat_count_ext_entries(sb, p_dir, oldentry,
- epold);
- if (num_old_entries < 0) {
- exfat_buf_unlock(sb, sector_old);
- return -ENOENT;
- }
- num_old_entries++;
-
- ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname,
- &num_new_entries, &dos_name);
- if (ret) {
- exfat_buf_unlock(sb, sector_old);
- return ret;
- }
-
- if (num_old_entries < num_new_entries) {
- newentry = find_empty_entry(inode, p_dir, num_new_entries);
- if (newentry < 0) {
- exfat_buf_unlock(sb, sector_old);
- return -ENOSPC;
- }
-
- epnew = get_entry_in_dir(sb, p_dir, newentry, &sector_new);
- if (!epnew) {
- exfat_buf_unlock(sb, sector_old);
- return -ENOENT;
- }
-
- memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
- if (exfat_get_entry_type(epnew) == TYPE_FILE) {
- exfat_set_entry_attr(epnew,
- exfat_get_entry_attr(epnew) |
- ATTR_ARCHIVE);
- fid->attr |= ATTR_ARCHIVE;
- }
- exfat_buf_modify(sb, sector_new);
- exfat_buf_unlock(sb, sector_old);
-
- epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
- &sector_old);
- exfat_buf_lock(sb, sector_old);
- epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
- &sector_new);
-
- if (!epold || !epnew) {
- exfat_buf_unlock(sb, sector_old);
- return -ENOENT;
- }
-
- memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
- exfat_buf_modify(sb, sector_new);
- exfat_buf_unlock(sb, sector_old);
-
- ret = exfat_init_ext_entry(sb, p_dir, newentry,
- num_new_entries, p_uniname,
- &dos_name);
- if (ret != 0)
- return ret;
-
- exfat_delete_dir_entry(sb, p_dir, oldentry, 0,
- num_old_entries);
- fid->entry = newentry;
- } else {
- if (exfat_get_entry_type(epold) == TYPE_FILE) {
- exfat_set_entry_attr(epold,
- exfat_get_entry_attr(epold) |
- ATTR_ARCHIVE);
- fid->attr |= ATTR_ARCHIVE;
- }
- exfat_buf_modify(sb, sector_old);
- exfat_buf_unlock(sb, sector_old);
-
- ret = exfat_init_ext_entry(sb, p_dir, oldentry,
- num_new_entries, p_uniname,
- &dos_name);
- if (ret != 0)
- return ret;
-
- exfat_delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
- num_old_entries);
- }
-
- return 0;
-}
-
-s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
- struct chain_t *p_newdir, struct uni_name_t *p_uniname,
- struct file_id_t *fid)
-{
- s32 ret, newentry, num_new_entries, num_old_entries;
- sector_t sector_mov, sector_new;
- struct dos_name_t dos_name;
- struct dentry_t *epmov, *epnew;
- struct super_block *sb = inode->i_sb;
-
- epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
- if (!epmov)
- return -ENOENT;
-
- /* check if the source and target directory is the same */
- if (exfat_get_entry_type(epmov) == TYPE_DIR &&
- exfat_get_entry_clu0(epmov) == p_newdir->dir)
- return -EINVAL;
-
- exfat_buf_lock(sb, sector_mov);
-
- /* exfat_buf_lock() before call count_ext_entries() */
- num_old_entries = exfat_count_ext_entries(sb, p_olddir, oldentry,
- epmov);
- if (num_old_entries < 0) {
- exfat_buf_unlock(sb, sector_mov);
- return -ENOENT;
- }
- num_old_entries++;
-
- ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname,
- &num_new_entries, &dos_name);
- if (ret) {
- exfat_buf_unlock(sb, sector_mov);
- return ret;
- }
-
- newentry = find_empty_entry(inode, p_newdir, num_new_entries);
- if (newentry < 0) {
- exfat_buf_unlock(sb, sector_mov);
- return -ENOSPC;
- }
-
- epnew = get_entry_in_dir(sb, p_newdir, newentry, &sector_new);
- if (!epnew) {
- exfat_buf_unlock(sb, sector_mov);
- return -ENOENT;
- }
-
- memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
- if (exfat_get_entry_type(epnew) == TYPE_FILE) {
- exfat_set_entry_attr(epnew, exfat_get_entry_attr(epnew) |
- ATTR_ARCHIVE);
- fid->attr |= ATTR_ARCHIVE;
- }
- exfat_buf_modify(sb, sector_new);
- exfat_buf_unlock(sb, sector_mov);
-
- epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
- &sector_mov);
- exfat_buf_lock(sb, sector_mov);
- epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
- &sector_new);
- if (!epmov || !epnew) {
- exfat_buf_unlock(sb, sector_mov);
- return -ENOENT;
- }
-
- memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
- exfat_buf_modify(sb, sector_new);
- exfat_buf_unlock(sb, sector_mov);
-
- ret = exfat_init_ext_entry(sb, p_newdir, newentry, num_new_entries,
- p_uniname, &dos_name);
- if (ret != 0)
- return ret;
-
- exfat_delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
-
- fid->dir.dir = p_newdir->dir;
- fid->dir.size = p_newdir->size;
- fid->dir.flags = p_newdir->flags;
-
- fid->entry = newentry;
-
- return 0;
-}
-
-/*
- * Sector Read/Write Functions
- */
-
-int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
- bool read)
-{
- s32 ret = -EIO;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) &&
- (p_fs->num_sectors > 0)) {
- pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n",
- __func__, (unsigned long long)sec);
- fs_error(sb);
- return ret;
- }
-
- if (!p_fs->dev_ejected) {
- ret = exfat_bdev_read(sb, sec, bh, 1, read);
- if (ret != 0)
- p_fs->dev_ejected = 1;
- }
-
- return ret;
-}
-
-int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
- bool sync)
-{
- s32 ret = -EIO;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) &&
- (p_fs->num_sectors > 0)) {
- pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n",
- __func__, (unsigned long long)sec);
- fs_error(sb);
- return ret;
- }
-
- if (!bh) {
- pr_err("[EXFAT] %s: bh is NULL!\n", __func__);
- fs_error(sb);
- return ret;
- }
-
- if (!p_fs->dev_ejected) {
- ret = exfat_bdev_write(sb, sec, bh, 1, sync);
- if (ret != 0)
- p_fs->dev_ejected = 1;
- }
-
- return ret;
-}
-
-int multi_sector_read(struct super_block *sb, sector_t sec,
- struct buffer_head **bh, s32 num_secs, bool read)
-{
- s32 ret = -EIO;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) &&
- (p_fs->num_sectors > 0)) {
- pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n",
- __func__, (unsigned long long)sec, num_secs);
- fs_error(sb);
- return ret;
- }
-
- if (!p_fs->dev_ejected) {
- ret = exfat_bdev_read(sb, sec, bh, num_secs, read);
- if (ret != 0)
- p_fs->dev_ejected = 1;
- }
-
- return ret;
-}
-
-int multi_sector_write(struct super_block *sb, sector_t sec,
- struct buffer_head *bh, s32 num_secs, bool sync)
-{
- s32 ret = -EIO;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) &&
- (p_fs->num_sectors > 0)) {
- pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n",
- __func__, (unsigned long long)sec, num_secs);
- fs_error(sb);
- return ret;
- }
- if (!bh) {
- pr_err("[EXFAT] %s: bh is NULL!\n", __func__);
- fs_error(sb);
- return ret;
- }
-
- if (!p_fs->dev_ejected) {
- ret = exfat_bdev_write(sb, sec, bh, num_secs, sync);
- if (ret != 0)
- p_fs->dev_ejected = 1;
- }
-
- return ret;
-}
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
deleted file mode 100644
index 91e8b0c4dce7..000000000000
--- a/drivers/staging/exfat/exfat_nls.c
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/string.h>
-#include <linux/nls.h>
-#include "exfat.h"
-
-static u16 bad_uni_chars[] = {
- /* " * / : < > ? \ | */
- 0x0022, 0x002A, 0x002F, 0x003A,
- 0x003C, 0x003E, 0x003F, 0x005C, 0x007C,
- 0
-};
-
-static int convert_ch_to_uni(struct nls_table *nls, u16 *uni, u8 *ch,
- bool *lossy)
-{
- int len;
-
- *uni = 0x0;
-
- if (ch[0] < 0x80) {
- *uni = (u16)ch[0];
- return 1;
- }
-
- len = nls->char2uni(ch, NLS_MAX_CHARSET_SIZE, uni);
- if (len < 0) {
- /* conversion failed */
- pr_info("%s: fail to use nls\n", __func__);
- if (lossy)
- *lossy = true;
- *uni = (u16)'_';
- if (!strcmp(nls->charset, "utf8"))
- return 1;
- else
- return 2;
- }
-
- return len;
-}
-
-static int convert_uni_to_ch(struct nls_table *nls, u8 *ch, u16 uni,
- bool *lossy)
-{
- int len;
-
- ch[0] = 0x0;
-
- if (uni < 0x0080) {
- ch[0] = (u8)uni;
- return 1;
- }
-
- len = nls->uni2char(uni, ch, NLS_MAX_CHARSET_SIZE);
- if (len < 0) {
- /* conversion failed */
- pr_info("%s: fail to use nls\n", __func__);
- if (lossy)
- *lossy = true;
- ch[0] = '_';
- return 1;
- }
-
- return len;
-}
-
-u16 nls_upper(struct super_block *sb, u16 a)
-{
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (EXFAT_SB(sb)->options.casesensitive)
- return a;
- if (p_fs->vol_utbl && p_fs->vol_utbl[get_col_index(a)])
- return p_fs->vol_utbl[get_col_index(a)][get_row_index(a)];
- else
- return a;
-}
-
-static u16 *nls_wstrchr(u16 *str, u16 wchar)
-{
- while (*str) {
- if (*(str++) == wchar)
- return str;
- }
-
- return NULL;
-}
-
-int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
-{
- int i;
-
- for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) {
- if (nls_upper(sb, *a) != nls_upper(sb, *b))
- return 1;
- if (*a == 0x0)
- return 0;
- }
- return 0;
-}
-
-void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
- struct uni_name_t *p_uniname)
-{
- int i, j, len;
- u8 buf[MAX_CHARSET_SIZE];
- u16 *uniname = p_uniname->name;
- struct nls_table *nls = EXFAT_SB(sb)->nls_io;
-
- if (!nls) {
- len = utf16s_to_utf8s(uniname, MAX_NAME_LENGTH,
- UTF16_HOST_ENDIAN, p_cstring,
- MAX_NAME_LENGTH);
- p_cstring[len] = 0;
- return;
- }
-
- i = 0;
- while (i < (MAX_NAME_LENGTH - 1)) {
- if (*uniname == (u16)'\0')
- break;
-
- len = convert_uni_to_ch(nls, buf, *uniname, NULL);
-
- if (len > 1) {
- for (j = 0; j < len; j++)
- *p_cstring++ = (char)*(buf + j);
- } else { /* len == 1 */
- *p_cstring++ = (char)*buf;
- }
-
- uniname++;
- i++;
- }
-
- *p_cstring = '\0';
-}
-
-void nls_cstring_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname, u8 *p_cstring,
- bool *p_lossy)
-{
- int i, j;
- bool lossy = false;
- u8 *end_of_name;
- u8 upname[MAX_NAME_LENGTH * 2];
- u16 *uniname = p_uniname->name;
- struct nls_table *nls = EXFAT_SB(sb)->nls_io;
-
- /* strip all trailing spaces */
- end_of_name = p_cstring + strlen(p_cstring);
-
- while (*(--end_of_name) == ' ') {
- if (end_of_name < p_cstring)
- break;
- }
- *(++end_of_name) = '\0';
-
- if (strcmp(p_cstring, ".") && strcmp(p_cstring, "..")) {
- /* strip all trailing periods */
- while (*(--end_of_name) == '.') {
- if (end_of_name < p_cstring)
- break;
- }
- *(++end_of_name) = '\0';
- }
-
- if (*p_cstring == '\0')
- lossy = true;
-
- if (!nls) {
- i = utf8s_to_utf16s(p_cstring, MAX_NAME_LENGTH,
- UTF16_HOST_ENDIAN, uniname,
- MAX_NAME_LENGTH);
- for (j = 0; j < i; j++)
- SET16_A(upname + j * 2, nls_upper(sb, uniname[j]));
- uniname[i] = '\0';
- } else {
- i = 0;
- j = 0;
- while (j < (MAX_NAME_LENGTH - 1)) {
- if (*(p_cstring + i) == '\0')
- break;
-
- i += convert_ch_to_uni(nls, uniname,
- (u8 *)(p_cstring + i), &lossy);
-
- if ((*uniname < 0x0020) ||
- nls_wstrchr(bad_uni_chars, *uniname))
- lossy = true;
-
- SET16_A(upname + j * 2, nls_upper(sb, *uniname));
-
- uniname++;
- j++;
- }
-
- if (*(p_cstring + i) != '\0')
- lossy = true;
- *uniname = (u16)'\0';
- }
-
- p_uniname->name_len = j;
- p_uniname->name_hash = calc_checksum_2byte(upname, j << 1, 0,
- CS_DEFAULT);
-
- if (p_lossy)
- *p_lossy = lossy;
-}
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
deleted file mode 100644
index b81d2a87b82e..000000000000
--- a/drivers/staging/exfat/exfat_super.c
+++ /dev/null
@@ -1,3883 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/seq_file.h>
-#include <linux/pagemap.h>
-#include <linux/mpage.h>
-#include <linux/buffer_head.h>
-#include <linux/exportfs.h>
-#include <linux/mount.h>
-#include <linux/vfs.h>
-#include <linux/aio.h>
-#include <linux/iversion.h>
-#include <linux/parser.h>
-#include <linux/uio.h>
-#include <linux/writeback.h>
-#include <linux/log2.h>
-#include <linux/hash.h>
-#include <linux/backing-dev.h>
-#include <linux/sched.h>
-#include <linux/fs_struct.h>
-#include <linux/namei.h>
-#include <linux/random.h>
-#include <linux/string.h>
-#include <linux/nls.h>
-#include <linux/mutex.h>
-#include <linux/swap.h>
-
-#define EXFAT_VERSION "1.3.0"
-
-#include "exfat.h"
-
-static struct kmem_cache *exfat_inode_cachep;
-
-static int exfat_default_codepage = CONFIG_STAGING_EXFAT_DEFAULT_CODEPAGE;
-static char exfat_default_iocharset[] = CONFIG_STAGING_EXFAT_DEFAULT_IOCHARSET;
-
-#define INC_IVERSION(x) (inode_inc_iversion(x))
-#define GET_IVERSION(x) (inode_peek_iversion_raw(x))
-#define SET_IVERSION(x, y) (inode_set_iversion(x, y))
-
-static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos);
-static int exfat_sync_inode(struct inode *inode);
-static struct inode *exfat_build_inode(struct super_block *sb,
- struct file_id_t *fid, loff_t i_pos);
-static int exfat_write_inode(struct inode *inode,
- struct writeback_control *wbc);
-static void exfat_write_super(struct super_block *sb);
-
-#define UNIX_SECS_1980 315532800L
-#define UNIX_SECS_2108 4354819200L
-
-/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
-static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp)
-{
- ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day,
- tp->Hour, tp->Minute, tp->Second);
-
- ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC;
-}
-
-/* Convert linear UNIX date to a FAT time/date pair. */
-static void exfat_time_unix2fat(struct timespec64 *ts, struct date_time_t *tp)
-{
- time64_t second = ts->tv_sec;
- struct tm tm;
-
- time64_to_tm(second, 0, &tm);
-
- if (second < UNIX_SECS_1980) {
- tp->MilliSecond = 0;
- tp->Second = 0;
- tp->Minute = 0;
- tp->Hour = 0;
- tp->Day = 1;
- tp->Month = 1;
- tp->Year = 0;
- return;
- }
-
- if (second >= UNIX_SECS_2108) {
- tp->MilliSecond = 999;
- tp->Second = 59;
- tp->Minute = 59;
- tp->Hour = 23;
- tp->Day = 31;
- tp->Month = 12;
- tp->Year = 127;
- return;
- }
-
- tp->MilliSecond = ts->tv_nsec / NSEC_PER_MSEC;
- tp->Second = tm.tm_sec;
- tp->Minute = tm.tm_min;
- tp->Hour = tm.tm_hour;
- tp->Day = tm.tm_mday;
- tp->Month = tm.tm_mon + 1;
- tp->Year = tm.tm_year + 1900 - 1980;
-}
-
-struct timestamp_t *tm_current(struct timestamp_t *tp)
-{
- time64_t second = ktime_get_real_seconds();
- struct tm tm;
-
- time64_to_tm(second, 0, &tm);
-
- if (second < UNIX_SECS_1980) {
- tp->sec = 0;
- tp->min = 0;
- tp->hour = 0;
- tp->day = 1;
- tp->mon = 1;
- tp->year = 0;
- return tp;
- }
-
- if (second >= UNIX_SECS_2108) {
- tp->sec = 59;
- tp->min = 59;
- tp->hour = 23;
- tp->day = 31;
- tp->mon = 12;
- tp->year = 127;
- return tp;
- }
-
- tp->sec = tm.tm_sec;
- tp->min = tm.tm_min;
- tp->hour = tm.tm_hour;
- tp->day = tm.tm_mday;
- tp->mon = tm.tm_mon + 1;
- tp->year = tm.tm_year + 1900 - 1980;
-
- return tp;
-}
-
-static void __lock_super(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- mutex_lock(&sbi->s_lock);
-}
-
-static void __unlock_super(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- mutex_unlock(&sbi->s_lock);
-}
-
-static int __is_sb_dirty(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- return sbi->s_dirt;
-}
-
-static void __set_sb_clean(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- sbi->s_dirt = 0;
-}
-
-static int __exfat_revalidate(struct dentry *dentry)
-{
- return 0;
-}
-
-static int exfat_revalidate(struct dentry *dentry, unsigned int flags)
-{
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- if (dentry->d_inode)
- return 1;
- return __exfat_revalidate(dentry);
-}
-
-static int exfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
-{
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- if (dentry->d_inode)
- return 1;
-
- if (!flags)
- return 0;
-
- if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
- return 0;
-
- return __exfat_revalidate(dentry);
-}
-
-static unsigned int __exfat_striptail_len(unsigned int len, const char *name)
-{
- while (len && name[len - 1] == '.')
- len--;
- return len;
-}
-
-static unsigned int exfat_striptail_len(const struct qstr *qstr)
-{
- return __exfat_striptail_len(qstr->len, qstr->name);
-}
-
-static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
-{
- qstr->hash = full_name_hash(dentry, qstr->name,
- exfat_striptail_len(qstr));
- return 0;
-}
-
-static int exfat_d_hashi(const struct dentry *dentry, struct qstr *qstr)
-{
- struct super_block *sb = dentry->d_sb;
- const unsigned char *name;
- unsigned int len;
- unsigned long hash;
-
- name = qstr->name;
- len = exfat_striptail_len(qstr);
-
- hash = init_name_hash(dentry);
- while (len--)
- hash = partial_name_hash(nls_upper(sb, *name++), hash);
- qstr->hash = end_name_hash(hash);
-
- return 0;
-}
-
-static int exfat_cmpi(const struct dentry *dentry, unsigned int len,
- const char *str, const struct qstr *name)
-{
- struct nls_table *t = EXFAT_SB(dentry->d_sb)->nls_io;
- unsigned int alen, blen;
-
- alen = exfat_striptail_len(name);
- blen = __exfat_striptail_len(len, str);
- if (alen == blen) {
- if (!t) {
- if (strncasecmp(name->name, str, alen) == 0)
- return 0;
- } else {
- if (nls_strnicmp(t, name->name, str, alen) == 0)
- return 0;
- }
- }
- return 1;
-}
-
-static int exfat_cmp(const struct dentry *dentry, unsigned int len,
- const char *str, const struct qstr *name)
-{
- unsigned int alen, blen;
-
- alen = exfat_striptail_len(name);
- blen = __exfat_striptail_len(len, str);
- if (alen == blen) {
- if (strncmp(name->name, str, alen) == 0)
- return 0;
- }
- return 1;
-}
-
-static const struct dentry_operations exfat_ci_dentry_ops = {
- .d_revalidate = exfat_revalidate_ci,
- .d_hash = exfat_d_hashi,
- .d_compare = exfat_cmpi,
-};
-
-static const struct dentry_operations exfat_dentry_ops = {
- .d_revalidate = exfat_revalidate,
- .d_hash = exfat_d_hash,
- .d_compare = exfat_cmp,
-};
-
-static DEFINE_MUTEX(z_mutex);
-
-static inline void fs_sync(struct super_block *sb, bool do_sync)
-{
- if (do_sync)
- exfat_bdev_sync(sb);
-}
-
-/*
- * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to
- * save ATTR_RO instead of ->i_mode.
- *
- * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only
- * bit, it's just used as flag for app.
- */
-static inline int exfat_mode_can_hold_ro(struct inode *inode)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
-
- if (S_ISDIR(inode->i_mode))
- return 0;
-
- if ((~sbi->options.fs_fmask) & 0222)
- return 1;
- return 0;
-}
-
-/* Convert attribute bits and a mask to the UNIX mode. */
-static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi, u32 attr,
- mode_t mode)
-{
- if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR))
- mode &= ~0222;
-
- if (attr & ATTR_SUBDIR)
- return (mode & ~sbi->options.fs_dmask) | S_IFDIR;
- else if (attr & ATTR_SYMLINK)
- return (mode & ~sbi->options.fs_dmask) | S_IFLNK;
- else
- return (mode & ~sbi->options.fs_fmask) | S_IFREG;
-}
-
-/* Return the FAT attribute byte for this inode */
-static inline u32 exfat_make_attr(struct inode *inode)
-{
- if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222))
- return (EXFAT_I(inode)->fid.attr) | ATTR_READONLY;
- else
- return EXFAT_I(inode)->fid.attr;
-}
-
-static inline void exfat_save_attr(struct inode *inode, u32 attr)
-{
- if (exfat_mode_can_hold_ro(inode))
- EXFAT_I(inode)->fid.attr = attr & ATTR_RWMASK;
- else
- EXFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY);
-}
-
-static int ffsMountVol(struct super_block *sb)
-{
- int i, ret;
- struct pbr_sector_t *p_pbr;
- struct buffer_head *tmp_bh = NULL;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- pr_info("[EXFAT] trying to mount...\n");
-
- mutex_lock(&z_mutex);
-
- exfat_buf_init(sb);
-
- mutex_init(&p_fs->v_mutex);
- p_fs->dev_ejected = 0;
-
- /* open the block device */
- exfat_bdev_open(sb);
-
- if (p_bd->sector_size < sb->s_blocksize) {
- printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n",
- p_bd->sector_size, sb->s_blocksize);
- ret = -EINVAL;
- goto out;
- }
- if (p_bd->sector_size > sb->s_blocksize)
- sb_set_blocksize(sb, p_bd->sector_size);
-
- /* read Sector 0 */
- if (sector_read(sb, 0, &tmp_bh, 1) != 0) {
- ret = -EIO;
- goto out;
- }
-
- p_fs->PBR_sector = 0;
-
- p_pbr = (struct pbr_sector_t *)tmp_bh->b_data;
-
- /* check the validity of PBR */
- if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) {
- brelse(tmp_bh);
- exfat_bdev_close(sb);
- ret = -EFSCORRUPTED;
- goto out;
- }
-
- /* fill fs_struct */
- for (i = 0; i < 53; i++)
- if (p_pbr->bpb[i])
- break;
-
- if (i < 53) {
- /* Not sure how we'd get here, but complain if it does */
- ret = -EINVAL;
- pr_info("EXFAT: Attempted to mount VFAT filesystem\n");
- goto out;
- } else {
- ret = exfat_mount(sb, p_pbr);
- }
-
- brelse(tmp_bh);
-
- if (ret) {
- exfat_bdev_close(sb);
- goto out;
- }
-
- ret = load_alloc_bitmap(sb);
- if (ret) {
- exfat_bdev_close(sb);
- goto out;
- }
- ret = load_upcase_table(sb);
- if (ret) {
- free_alloc_bitmap(sb);
- exfat_bdev_close(sb);
- goto out;
- }
-
- if (p_fs->dev_ejected) {
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
- exfat_bdev_close(sb);
- ret = -EIO;
- goto out;
- }
-
- pr_info("[EXFAT] mounted successfully\n");
-
-out:
- mutex_unlock(&z_mutex);
-
- return ret;
-}
-
-static int ffsUmountVol(struct super_block *sb)
-{
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int err = 0;
-
- pr_info("[EXFAT] trying to unmount...\n");
-
- mutex_lock(&z_mutex);
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
-
- exfat_fat_release_all(sb);
- exfat_buf_release_all(sb);
-
- /* close the block device */
- exfat_bdev_close(sb);
-
- if (p_fs->dev_ejected) {
- pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n");
- err = -EIO;
- }
-
- exfat_buf_shutdown(sb);
-
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
- mutex_unlock(&z_mutex);
-
- pr_info("[EXFAT] unmounted successfully\n");
-
- return err;
-}
-
-static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
-{
- int err = 0;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- /* check the validity of pointer parameters */
- if (!info)
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- if (p_fs->used_clusters == UINT_MAX)
- p_fs->used_clusters = exfat_count_used_clusters(sb);
-
- info->FatType = p_fs->vol_type;
- info->ClusterSize = p_fs->cluster_size;
- info->NumClusters = p_fs->num_clusters - 2; /* clu 0 & 1 */
- info->UsedClusters = p_fs->used_clusters;
- info->FreeClusters = info->NumClusters - info->UsedClusters;
-
- if (p_fs->dev_ejected)
- err = -EIO;
-
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return err;
-}
-
-static int ffsSyncVol(struct super_block *sb, bool do_sync)
-{
- int err = 0;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* synchronize the file system */
- fs_sync(sb, do_sync);
- fs_set_vol_flags(sb, VOL_CLEAN);
-
- if (p_fs->dev_ejected)
- err = -EIO;
-
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return err;
-}
-
-/*----------------------------------------------------------------------*/
-/* File Operation Functions */
-/*----------------------------------------------------------------------*/
-
-static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
-{
- int ret, dentry, num_entries;
- struct chain_t dir;
- struct uni_name_t uni_name;
- struct dos_name_t dos_name;
- struct dentry_t *ep, *ep2;
- struct entry_set_cache_t *es = NULL;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- pr_debug("%s entered\n", __func__);
-
- /* check the validity of pointer parameters */
- if (!fid || !path || (*path == '\0'))
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* check the validity of directory name in the given pathname */
- ret = resolve_path(inode, path, &dir, &uni_name);
- if (ret)
- goto out;
-
- ret = get_num_entries_and_dos_name(sb, &dir, &uni_name, &num_entries,
- &dos_name);
- if (ret)
- goto out;
-
- /* search the file name for directories */
- dentry = exfat_find_dir_entry(sb, &dir, &uni_name, num_entries,
- &dos_name, TYPE_ALL);
- if (dentry < -1) {
- ret = -ENOENT;
- goto out;
- }
-
- fid->dir.dir = dir.dir;
- fid->dir.size = dir.size;
- fid->dir.flags = dir.flags;
- fid->entry = dentry;
-
- if (dentry == -1) {
- fid->type = TYPE_DIR;
- fid->rwoffset = 0;
- fid->hint_last_off = -1;
-
- fid->attr = ATTR_SUBDIR;
- fid->flags = 0x01;
- fid->size = 0;
- fid->start_clu = p_fs->root_dir;
- } else {
- es = get_entry_set_in_dir(sb, &dir, dentry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = -ENOENT;
- goto out;
- }
- ep2 = ep + 1;
-
- fid->type = exfat_get_entry_type(ep);
- fid->rwoffset = 0;
- fid->hint_last_off = -1;
- fid->attr = exfat_get_entry_attr(ep);
-
- fid->size = exfat_get_entry_size(ep2);
- if ((fid->type == TYPE_FILE) && (fid->size == 0)) {
- fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
- fid->start_clu = CLUSTER_32(~0);
- } else {
- fid->flags = exfat_get_entry_flag(ep2);
- fid->start_clu = exfat_get_entry_clu0(ep2);
- }
-
- release_entry_set(es);
- }
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
- struct file_id_t *fid)
-{
- struct chain_t dir;
- struct uni_name_t uni_name;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int ret = 0;
-
- /* check the validity of pointer parameters */
- if (!fid || !path || (*path == '\0'))
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* check the validity of directory name in the given pathname */
- ret = resolve_path(inode, path, &dir, &uni_name);
- if (ret)
- goto out;
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- /* create a new file */
- ret = create_file(inode, &dir, &uni_name, mode, fid);
-
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
- u64 count, u64 *rcount)
-{
- s32 offset, sec_offset, clu_offset;
- u32 clu;
- int ret = 0;
- sector_t LogSector;
- u64 oneblkread, read_bytes;
- struct buffer_head *tmp_bh = NULL;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- /* check the validity of the given file id */
- if (!fid)
- return -EINVAL;
-
- /* check the validity of pointer parameters */
- if (!buffer)
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* check if the given file ID is opened */
- if (fid->type != TYPE_FILE) {
- ret = -EPERM;
- goto out;
- }
-
- if (fid->rwoffset > fid->size)
- fid->rwoffset = fid->size;
-
- if (count > (fid->size - fid->rwoffset))
- count = fid->size - fid->rwoffset;
-
- if (count == 0) {
- if (rcount)
- *rcount = 0;
- ret = 0;
- goto out;
- }
-
- read_bytes = 0;
-
- while (count > 0) {
- clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
- clu = fid->start_clu;
-
- if (fid->flags == 0x03) {
- clu += clu_offset;
- } else {
- /* hint information */
- if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
- (clu_offset >= fid->hint_last_off)) {
- clu_offset -= fid->hint_last_off;
- clu = fid->hint_last_clu;
- }
-
- while (clu_offset > 0) {
- /* clu = exfat_fat_read(sb, clu); */
- if (exfat_fat_read(sb, clu, &clu) == -1) {
- ret = -EIO;
- goto out;
- }
-
- clu_offset--;
- }
- }
-
- /* hint information */
- fid->hint_last_off = (s32)(fid->rwoffset >>
- p_fs->cluster_size_bits);
- fid->hint_last_clu = clu;
-
- /* byte offset in cluster */
- offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1));
-
- /* sector offset in cluster */
- sec_offset = offset >> p_bd->sector_size_bits;
-
- /* byte offset in sector */
- offset &= p_bd->sector_size_mask;
-
- LogSector = START_SECTOR(clu) + sec_offset;
-
- oneblkread = (u64)(p_bd->sector_size - offset);
- if (oneblkread > count)
- oneblkread = count;
-
- if ((offset == 0) && (oneblkread == p_bd->sector_size)) {
- if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- 0)
- goto err_out;
- memcpy((char *)buffer + read_bytes,
- (char *)tmp_bh->b_data, (s32)oneblkread);
- } else {
- if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- 0)
- goto err_out;
- memcpy((char *)buffer + read_bytes,
- (char *)tmp_bh->b_data + offset,
- (s32)oneblkread);
- }
- count -= oneblkread;
- read_bytes += oneblkread;
- fid->rwoffset += oneblkread;
- }
- brelse(tmp_bh);
-
-/* How did this ever work and not leak a brlse()?? */
-err_out:
- /* set the size of read bytes */
- if (rcount)
- *rcount = read_bytes;
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
- void *buffer, u64 count, u64 *wcount)
-{
- bool modified = false;
- s32 offset, sec_offset, clu_offset;
- s32 num_clusters, num_alloc, num_alloced = (s32)~0;
- int ret = 0;
- u32 clu, last_clu;
- sector_t LogSector;
- u64 oneblkwrite, write_bytes;
- struct chain_t new_clu;
- struct timestamp_t tm;
- struct dentry_t *ep, *ep2;
- struct entry_set_cache_t *es = NULL;
- struct buffer_head *tmp_bh = NULL;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- /* check the validity of the given file id */
- if (!fid)
- return -EINVAL;
-
- /* check the validity of pointer parameters */
- if (!buffer)
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* check if the given file ID is opened */
- if (fid->type != TYPE_FILE) {
- ret = -EPERM;
- goto out;
- }
-
- if (fid->rwoffset > fid->size)
- fid->rwoffset = fid->size;
-
- if (count == 0) {
- if (wcount)
- *wcount = 0;
- ret = 0;
- goto out;
- }
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- if (fid->size == 0)
- num_clusters = 0;
- else
- num_clusters = (s32)((fid->size - 1) >>
- p_fs->cluster_size_bits) + 1;
-
- write_bytes = 0;
-
- while (count > 0) {
- clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
- clu = fid->start_clu;
- last_clu = fid->start_clu;
-
- if (fid->flags == 0x03) {
- if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
- last_clu += clu_offset - 1;
-
- if (clu_offset == num_clusters)
- clu = CLUSTER_32(~0);
- else
- clu += clu_offset;
- }
- } else {
- /* hint information */
- if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
- (clu_offset >= fid->hint_last_off)) {
- clu_offset -= fid->hint_last_off;
- clu = fid->hint_last_clu;
- }
-
- while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
- last_clu = clu;
- /* clu = exfat_fat_read(sb, clu); */
- if (exfat_fat_read(sb, clu, &clu) == -1) {
- ret = -EIO;
- goto out;
- }
- clu_offset--;
- }
- }
-
- if (clu == CLUSTER_32(~0)) {
- num_alloc = (s32)((count - 1) >>
- p_fs->cluster_size_bits) + 1;
- new_clu.dir = (last_clu == CLUSTER_32(~0)) ?
- CLUSTER_32(~0) : last_clu + 1;
- new_clu.size = 0;
- new_clu.flags = fid->flags;
-
- /* (1) allocate a chain of clusters */
- num_alloced = exfat_alloc_cluster(sb,
- num_alloc,
- &new_clu);
- if (num_alloced == 0)
- break;
- if (num_alloced < 0) {
- ret = num_alloced;
- goto out;
- }
-
- /* (2) append to the FAT chain */
- if (last_clu == CLUSTER_32(~0)) {
- if (new_clu.flags == 0x01)
- fid->flags = 0x01;
- fid->start_clu = new_clu.dir;
- modified = true;
- } else {
- if (new_clu.flags != fid->flags) {
- exfat_chain_cont_cluster(sb,
- fid->start_clu,
- num_clusters);
- fid->flags = 0x01;
- modified = true;
- }
- if (new_clu.flags == 0x01)
- exfat_fat_write(sb, last_clu, new_clu.dir);
- }
-
- num_clusters += num_alloced;
- clu = new_clu.dir;
- }
-
- /* hint information */
- fid->hint_last_off = (s32)(fid->rwoffset >>
- p_fs->cluster_size_bits);
- fid->hint_last_clu = clu;
-
- /* byte offset in cluster */
- offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1));
-
- /* sector offset in cluster */
- sec_offset = offset >> p_bd->sector_size_bits;
-
- /* byte offset in sector */
- offset &= p_bd->sector_size_mask;
-
- LogSector = START_SECTOR(clu) + sec_offset;
-
- oneblkwrite = (u64)(p_bd->sector_size - offset);
- if (oneblkwrite > count)
- oneblkwrite = count;
-
- if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) {
- if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- 0)
- goto err_out;
- memcpy((char *)tmp_bh->b_data,
- (char *)buffer + write_bytes, (s32)oneblkwrite);
- if (sector_write(sb, LogSector, tmp_bh, 0) !=
- 0) {
- brelse(tmp_bh);
- goto err_out;
- }
- } else {
- if ((offset > 0) ||
- ((fid->rwoffset + oneblkwrite) < fid->size)) {
- if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- 0)
- goto err_out;
- } else {
- if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- 0)
- goto err_out;
- }
-
- memcpy((char *)tmp_bh->b_data + offset,
- (char *)buffer + write_bytes, (s32)oneblkwrite);
- if (sector_write(sb, LogSector, tmp_bh, 0) !=
- 0) {
- brelse(tmp_bh);
- goto err_out;
- }
- }
-
- count -= oneblkwrite;
- write_bytes += oneblkwrite;
- fid->rwoffset += oneblkwrite;
-
- fid->attr |= ATTR_ARCHIVE;
-
- if (fid->size < fid->rwoffset) {
- fid->size = fid->rwoffset;
- modified = true;
- }
- }
-
- brelse(tmp_bh);
-
- /* (3) update the direcoty entry */
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es)
- goto err_out;
- ep2 = ep + 1;
-
- exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
- exfat_set_entry_attr(ep, fid->attr);
-
- if (modified) {
- if (exfat_get_entry_flag(ep2) != fid->flags)
- exfat_set_entry_flag(ep2, fid->flags);
-
- if (exfat_get_entry_size(ep2) != fid->size)
- exfat_set_entry_size(ep2, fid->size);
-
- if (exfat_get_entry_clu0(ep2) != fid->start_clu)
- exfat_set_entry_clu0(ep2, fid->start_clu);
- }
-
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
-
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
-err_out:
- /* set the size of written bytes */
- if (wcount)
- *wcount = write_bytes;
-
- if (num_alloced == 0)
- ret = -ENOSPC;
-
- else if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
-{
- s32 num_clusters;
- u32 last_clu = CLUSTER_32(0);
- int ret = 0;
- struct chain_t clu;
- struct timestamp_t tm;
- struct dentry_t *ep, *ep2;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
- struct entry_set_cache_t *es = NULL;
-
- pr_debug("%s entered (inode %p size %llu)\n", __func__, inode,
- new_size);
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* check if the given file ID is opened */
- if (fid->type != TYPE_FILE) {
- ret = -EPERM;
- goto out;
- }
-
- if (fid->size != old_size) {
- pr_err("[EXFAT] truncate : can't skip it because of size-mismatch(old:%lld->fid:%lld).\n",
- old_size, fid->size);
- }
-
- if (old_size <= new_size) {
- ret = 0;
- goto out;
- }
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- clu.dir = fid->start_clu;
- clu.size = (s32)((old_size - 1) >> p_fs->cluster_size_bits) + 1;
- clu.flags = fid->flags;
-
- if (new_size > 0) {
- num_clusters = (s32)((new_size - 1) >>
- p_fs->cluster_size_bits) + 1;
-
- if (clu.flags == 0x03) {
- clu.dir += num_clusters;
- } else {
- while (num_clusters > 0) {
- last_clu = clu.dir;
- if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
- ret = -EIO;
- goto out;
- }
- num_clusters--;
- }
- }
-
- clu.size -= num_clusters;
- }
-
- fid->size = new_size;
- fid->attr |= ATTR_ARCHIVE;
- if (new_size == 0) {
- fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
- fid->start_clu = CLUSTER_32(~0);
- }
-
- /* (1) update the directory entry */
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = -ENOENT;
- goto out;
- }
- ep2 = ep + 1;
-
- exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
- exfat_set_entry_attr(ep, fid->attr);
-
- exfat_set_entry_size(ep2, new_size);
- if (new_size == 0) {
- exfat_set_entry_flag(ep2, 0x01);
- exfat_set_entry_clu0(ep2, CLUSTER_32(0));
- }
-
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
-
- /* (2) cut off from the FAT chain */
- if (last_clu != CLUSTER_32(0)) {
- if (fid->flags == 0x01)
- exfat_fat_write(sb, last_clu, CLUSTER_32(~0));
- }
-
- /* (3) free the clusters */
- exfat_free_cluster(sb, &clu, 0);
-
- /* hint information */
- fid->hint_last_off = -1;
- if (fid->rwoffset > fid->size)
- fid->rwoffset = fid->size;
-
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- pr_debug("%s exited (%d)\n", __func__, ret);
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static void update_parent_info(struct file_id_t *fid,
- struct inode *parent_inode)
-{
- struct fs_info_t *p_fs = &(EXFAT_SB(parent_inode->i_sb)->fs_info);
- struct file_id_t *parent_fid = &(EXFAT_I(parent_inode)->fid);
-
- if (unlikely((parent_fid->flags != fid->dir.flags) ||
- (parent_fid->size !=
- (fid->dir.size << p_fs->cluster_size_bits)) ||
- (parent_fid->start_clu != fid->dir.dir))) {
- fid->dir.dir = parent_fid->start_clu;
- fid->dir.flags = parent_fid->flags;
- fid->dir.size = ((parent_fid->size + (p_fs->cluster_size - 1))
- >> p_fs->cluster_size_bits);
- }
-}
-
-static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
- struct inode *new_parent_inode, struct dentry *new_dentry)
-{
- s32 ret;
- s32 dentry;
- struct chain_t olddir, newdir;
- struct chain_t *p_dir = NULL;
- struct uni_name_t uni_name;
- struct dentry_t *ep;
- struct super_block *sb = old_parent_inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- u8 *new_path = (u8 *)new_dentry->d_name.name;
- struct inode *new_inode = new_dentry->d_inode;
- int num_entries;
- struct file_id_t *new_fid = NULL;
- s32 new_entry = 0;
-
- /* check the validity of the given file id */
- if (!fid)
- return -EINVAL;
-
- /* check the validity of pointer parameters */
- if (!new_path || (*new_path == '\0'))
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- update_parent_info(fid, old_parent_inode);
-
- olddir.dir = fid->dir.dir;
- olddir.size = fid->dir.size;
- olddir.flags = fid->dir.flags;
-
- dentry = fid->entry;
-
- /* check if the old file is "." or ".." */
- if (p_fs->vol_type != EXFAT) {
- if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) {
- ret = -EPERM;
- goto out2;
- }
- }
-
- ep = get_entry_in_dir(sb, &olddir, dentry, NULL);
- if (!ep) {
- ret = -ENOENT;
- goto out2;
- }
-
- if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
- ret = -EPERM;
- goto out2;
- }
-
- /* check whether new dir is existing directory and empty */
- if (new_inode) {
- u32 entry_type;
-
- ret = -ENOENT;
- new_fid = &EXFAT_I(new_inode)->fid;
-
- update_parent_info(new_fid, new_parent_inode);
-
- p_dir = &new_fid->dir;
- new_entry = new_fid->entry;
- ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
- if (!ep)
- goto out;
-
- entry_type = exfat_get_entry_type(ep);
-
- if (entry_type == TYPE_DIR) {
- struct chain_t new_clu;
-
- new_clu.dir = new_fid->start_clu;
- new_clu.size = (s32)((new_fid->size - 1) >>
- p_fs->cluster_size_bits) + 1;
- new_clu.flags = new_fid->flags;
-
- if (!is_dir_empty(sb, &new_clu)) {
- ret = -EEXIST;
- goto out;
- }
- }
- }
-
- /* check the validity of directory name in the given new pathname */
- ret = resolve_path(new_parent_inode, new_path, &newdir, &uni_name);
- if (ret)
- goto out2;
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- if (olddir.dir == newdir.dir)
- ret = exfat_rename_file(new_parent_inode, &olddir, dentry,
- &uni_name, fid);
- else
- ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
- &uni_name, fid);
-
- if ((ret == 0) && new_inode) {
- /* delete entries of new_dir */
- ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
- if (!ep)
- goto out;
-
- num_entries = exfat_count_ext_entries(sb, p_dir,
- new_entry, ep);
- if (num_entries < 0)
- goto out;
- exfat_delete_dir_entry(sb, p_dir, new_entry, 0,
- num_entries + 1);
- }
-out:
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-out2:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
-{
- s32 dentry;
- int ret = 0;
- struct chain_t dir, clu_to_free;
- struct dentry_t *ep;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- /* check the validity of the given file id */
- if (!fid)
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- dir.dir = fid->dir.dir;
- dir.size = fid->dir.size;
- dir.flags = fid->dir.flags;
-
- dentry = fid->entry;
-
- ep = get_entry_in_dir(sb, &dir, dentry, NULL);
- if (!ep) {
- ret = -ENOENT;
- goto out;
- }
-
- if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
- ret = -EPERM;
- goto out;
- }
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- /* (1) update the directory entry */
- remove_file(inode, &dir, dentry);
-
- clu_to_free.dir = fid->start_clu;
- clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1;
- clu_to_free.flags = fid->flags;
-
- /* (2) free the clusters */
- exfat_free_cluster(sb, &clu_to_free, 0);
-
- fid->size = 0;
- fid->start_clu = CLUSTER_32(~0);
- fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-#if 0
-/* Not currently wired up */
-static int ffsSetAttr(struct inode *inode, u32 attr)
-{
- u32 type;
- int ret = 0;
- sector_t sector = 0;
- struct dentry_t *ep;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
- u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
- struct entry_set_cache_t *es = NULL;
-
- if (fid->attr == attr) {
- if (p_fs->dev_ejected)
- return -EIO;
- return 0;
- }
-
- if (is_dir) {
- if ((fid->dir.dir == p_fs->root_dir) &&
- (fid->entry == -1)) {
- if (p_fs->dev_ejected)
- return -EIO;
- return 0;
- }
- }
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* get the directory entry of given file */
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = -ENOENT;
- goto out;
- }
-
- type = exfat_get_entry_type(ep);
-
- if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
- ((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
- if (p_fs->dev_ejected)
- ret = -EIO;
- else
- ret = -EINVAL;
-
- release_entry_set(es);
- goto out;
- }
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- /* set the file attribute */
- fid->attr = attr;
- exfat_set_entry_attr(ep, attr);
-
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
-
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-#endif
-
-static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
-{
- s32 count;
- int ret = 0;
- struct chain_t dir;
- struct uni_name_t uni_name;
- struct timestamp_t tm;
- struct dentry_t *ep, *ep2;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
- struct entry_set_cache_t *es = NULL;
- u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
-
- pr_debug("%s entered\n", __func__);
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- if (is_dir) {
- if ((fid->dir.dir == p_fs->root_dir) &&
- (fid->entry == -1)) {
- info->Attr = ATTR_SUBDIR;
- memset((char *)&info->CreateTimestamp, 0,
- sizeof(struct date_time_t));
- memset((char *)&info->ModifyTimestamp, 0,
- sizeof(struct date_time_t));
- memset((char *)&info->AccessTimestamp, 0,
- sizeof(struct date_time_t));
- strcpy(info->ShortName, ".");
- strcpy(info->Name, ".");
-
- dir.dir = p_fs->root_dir;
- dir.flags = 0x01;
-
- if (p_fs->root_dir == CLUSTER_32(0)) {
- /* FAT16 root_dir */
- info->Size = p_fs->dentries_in_root <<
- DENTRY_SIZE_BITS;
- } else {
- info->Size = count_num_clusters(sb, &dir) <<
- p_fs->cluster_size_bits;
- }
-
- count = count_dos_name_entries(sb, &dir, TYPE_DIR);
- if (count < 0) {
- ret = count; /* propagate error upward */
- goto out;
- }
- info->NumSubdirs = count;
-
- if (p_fs->dev_ejected)
- ret = -EIO;
- goto out;
- }
- }
-
- /* get the directory entry of given file or directory */
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = -ENOENT;
- goto out;
- }
- ep2 = ep + 1;
-
- /* set FILE_INFO structure using the acquired struct dentry_t */
- info->Attr = exfat_get_entry_attr(ep);
-
- exfat_get_entry_time(ep, &tm, TM_CREATE);
- info->CreateTimestamp.Year = tm.year;
- info->CreateTimestamp.Month = tm.mon;
- info->CreateTimestamp.Day = tm.day;
- info->CreateTimestamp.Hour = tm.hour;
- info->CreateTimestamp.Minute = tm.min;
- info->CreateTimestamp.Second = tm.sec;
- info->CreateTimestamp.MilliSecond = 0;
-
- exfat_get_entry_time(ep, &tm, TM_MODIFY);
- info->ModifyTimestamp.Year = tm.year;
- info->ModifyTimestamp.Month = tm.mon;
- info->ModifyTimestamp.Day = tm.day;
- info->ModifyTimestamp.Hour = tm.hour;
- info->ModifyTimestamp.Minute = tm.min;
- info->ModifyTimestamp.Second = tm.sec;
- info->ModifyTimestamp.MilliSecond = 0;
-
- memset((char *)&info->AccessTimestamp, 0, sizeof(struct date_time_t));
-
- *uni_name.name = 0x0;
- /* XXX this is very bad for exfat cuz name is already included in es.
- * API should be revised
- */
- exfat_get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
- uni_name.name);
- nls_uniname_to_cstring(sb, info->Name, &uni_name);
-
- info->NumSubdirs = 2;
-
- info->Size = exfat_get_entry_size(ep2);
-
- release_entry_set(es);
-
- if (is_dir) {
- dir.dir = fid->start_clu;
- dir.flags = 0x01;
-
- if (info->Size == 0)
- info->Size = (u64)count_num_clusters(sb, &dir) <<
- p_fs->cluster_size_bits;
-
- count = count_dos_name_entries(sb, &dir, TYPE_DIR);
- if (count < 0) {
- ret = count; /* propagate error upward */
- goto out;
- }
- info->NumSubdirs += count;
- }
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- pr_debug("%s exited successfully\n", __func__);
- return ret;
-}
-
-static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
-{
- int ret = 0;
- struct timestamp_t tm;
- struct dentry_t *ep, *ep2;
- struct entry_set_cache_t *es = NULL;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
- u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
-
- pr_debug("%s entered (inode %p info %p\n", __func__, inode, info);
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- if (is_dir) {
- if ((fid->dir.dir == p_fs->root_dir) &&
- (fid->entry == -1)) {
- if (p_fs->dev_ejected)
- ret = -EIO;
- ret = 0;
- goto out;
- }
- }
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- /* get the directory entry of given file or directory */
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = -ENOENT;
- goto out;
- }
- ep2 = ep + 1;
-
- exfat_set_entry_attr(ep, info->Attr);
-
- /* set FILE_INFO structure using the acquired struct dentry_t */
- tm.sec = info->CreateTimestamp.Second;
- tm.min = info->CreateTimestamp.Minute;
- tm.hour = info->CreateTimestamp.Hour;
- tm.day = info->CreateTimestamp.Day;
- tm.mon = info->CreateTimestamp.Month;
- tm.year = info->CreateTimestamp.Year;
- exfat_set_entry_time(ep, &tm, TM_CREATE);
-
- tm.sec = info->ModifyTimestamp.Second;
- tm.min = info->ModifyTimestamp.Minute;
- tm.hour = info->ModifyTimestamp.Hour;
- tm.day = info->ModifyTimestamp.Day;
- tm.mon = info->ModifyTimestamp.Month;
- tm.year = info->ModifyTimestamp.Year;
- exfat_set_entry_time(ep, &tm, TM_MODIFY);
-
- exfat_set_entry_size(ep2, info->Size);
-
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- pr_debug("%s exited (%d)\n", __func__, ret);
-
- return ret;
-}
-
-static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
-{
- s32 num_clusters, num_alloced;
- bool modified = false;
- u32 last_clu;
- int ret = 0;
- struct chain_t new_clu;
- struct dentry_t *ep;
- struct entry_set_cache_t *es = NULL;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
-
- /* check the validity of pointer parameters */
- if (!clu)
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits;
-
- if (EXFAT_I(inode)->mmu_private == 0)
- num_clusters = 0;
- else
- num_clusters = (s32)((EXFAT_I(inode)->mmu_private - 1) >>
- p_fs->cluster_size_bits) + 1;
-
- *clu = last_clu = fid->start_clu;
-
- if (fid->flags == 0x03) {
- if ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
- last_clu += clu_offset - 1;
-
- if (clu_offset == num_clusters)
- *clu = CLUSTER_32(~0);
- else
- *clu += clu_offset;
- }
- } else {
- /* hint information */
- if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
- (clu_offset >= fid->hint_last_off)) {
- clu_offset -= fid->hint_last_off;
- *clu = fid->hint_last_clu;
- }
-
- while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
- last_clu = *clu;
- if (exfat_fat_read(sb, *clu, clu) == -1) {
- ret = -EIO;
- goto out;
- }
- clu_offset--;
- }
- }
-
- if (*clu == CLUSTER_32(~0)) {
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- new_clu.dir = (last_clu == CLUSTER_32(~0)) ? CLUSTER_32(~0) :
- last_clu + 1;
- new_clu.size = 0;
- new_clu.flags = fid->flags;
-
- /* (1) allocate a cluster */
- num_alloced = exfat_alloc_cluster(sb, 1, &new_clu);
- if (num_alloced < 0) {
- ret = -EIO;
- goto out;
- } else if (num_alloced == 0) {
- ret = -ENOSPC;
- goto out;
- }
-
- /* (2) append to the FAT chain */
- if (last_clu == CLUSTER_32(~0)) {
- if (new_clu.flags == 0x01)
- fid->flags = 0x01;
- fid->start_clu = new_clu.dir;
- modified = true;
- } else {
- if (new_clu.flags != fid->flags) {
- exfat_chain_cont_cluster(sb, fid->start_clu,
- num_clusters);
- fid->flags = 0x01;
- modified = true;
- }
- if (new_clu.flags == 0x01)
- exfat_fat_write(sb, last_clu, new_clu.dir);
- }
-
- num_clusters += num_alloced;
- *clu = new_clu.dir;
-
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = -ENOENT;
- goto out;
- }
- /* get stream entry */
- ep++;
-
- /* (3) update directory entry */
- if (modified) {
- if (exfat_get_entry_flag(ep) != fid->flags)
- exfat_set_entry_flag(ep, fid->flags);
-
- if (exfat_get_entry_clu0(ep) != fid->start_clu)
- exfat_set_entry_clu0(ep, fid->start_clu);
- }
-
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
-
- /* add number of new blocks to inode */
- inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9);
- }
-
- /* hint information */
- fid->hint_last_off = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
- fid->hint_last_clu = *clu;
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-/*----------------------------------------------------------------------*/
-/* Directory Operation Functions */
-/*----------------------------------------------------------------------*/
-
-static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
-{
- int ret = 0;
- struct chain_t dir;
- struct uni_name_t uni_name;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- pr_debug("%s entered\n", __func__);
-
- /* check the validity of pointer parameters */
- if (!fid || !path || (*path == '\0'))
- return -EINVAL;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- /* check the validity of directory name in the given old pathname */
- ret = resolve_path(inode, path, &dir, &uni_name);
- if (ret)
- goto out;
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- ret = create_dir(inode, &dir, &uni_name, fid);
-
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
-{
- int i, dentry, clu_offset;
- int ret = 0;
- s32 dentries_per_clu, dentries_per_clu_bits = 0;
- u32 type;
- sector_t sector;
- struct chain_t dir, clu;
- struct uni_name_t uni_name;
- struct timestamp_t tm;
- struct dentry_t *ep;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
-
- /* check the validity of pointer parameters */
- if (!dir_entry)
- return -EINVAL;
-
- /* check if the given file ID is opened */
- if (fid->type != TYPE_DIR)
- return -ENOTDIR;
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- if (fid->entry == -1) {
- dir.dir = p_fs->root_dir;
- dir.flags = 0x01;
- } else {
- dir.dir = fid->start_clu;
- dir.size = (s32)(fid->size >> p_fs->cluster_size_bits);
- dir.flags = fid->flags;
- }
-
- dentry = (s32)fid->rwoffset;
-
- if (dir.dir == CLUSTER_32(0)) {
- /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
-
- if (dentry == dentries_per_clu) {
- clu.dir = CLUSTER_32(~0);
- } else {
- clu.dir = dir.dir;
- clu.size = dir.size;
- clu.flags = dir.flags;
- }
- } else {
- dentries_per_clu = p_fs->dentries_per_clu;
- dentries_per_clu_bits = ilog2(dentries_per_clu);
-
- clu_offset = dentry >> dentries_per_clu_bits;
- clu.dir = dir.dir;
- clu.size = dir.size;
- clu.flags = dir.flags;
-
- if (clu.flags == 0x03) {
- clu.dir += clu_offset;
- clu.size -= clu_offset;
- } else {
- /* hint_information */
- if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
- (clu_offset >= fid->hint_last_off)) {
- clu_offset -= fid->hint_last_off;
- clu.dir = fid->hint_last_clu;
- }
-
- while (clu_offset > 0) {
- /* clu.dir = exfat_fat_read(sb, clu.dir); */
- if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
- ret = -EIO;
- goto out;
- }
- clu_offset--;
- }
- }
- }
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- if (dir.dir == CLUSTER_32(0)) /* FAT16 root_dir */
- i = dentry % dentries_per_clu;
- else
- i = dentry & (dentries_per_clu - 1);
-
- for ( ; i < dentries_per_clu; i++, dentry++) {
- ep = get_entry_in_dir(sb, &clu, i, &sector);
- if (!ep) {
- ret = -ENOENT;
- goto out;
- }
- type = exfat_get_entry_type(ep);
-
- if (type == TYPE_UNUSED)
- break;
-
- if ((type != TYPE_FILE) && (type != TYPE_DIR))
- continue;
-
- exfat_buf_lock(sb, sector);
- dir_entry->Attr = exfat_get_entry_attr(ep);
-
- exfat_get_entry_time(ep, &tm, TM_CREATE);
- dir_entry->CreateTimestamp.Year = tm.year;
- dir_entry->CreateTimestamp.Month = tm.mon;
- dir_entry->CreateTimestamp.Day = tm.day;
- dir_entry->CreateTimestamp.Hour = tm.hour;
- dir_entry->CreateTimestamp.Minute = tm.min;
- dir_entry->CreateTimestamp.Second = tm.sec;
- dir_entry->CreateTimestamp.MilliSecond = 0;
-
- exfat_get_entry_time(ep, &tm, TM_MODIFY);
- dir_entry->ModifyTimestamp.Year = tm.year;
- dir_entry->ModifyTimestamp.Month = tm.mon;
- dir_entry->ModifyTimestamp.Day = tm.day;
- dir_entry->ModifyTimestamp.Hour = tm.hour;
- dir_entry->ModifyTimestamp.Minute = tm.min;
- dir_entry->ModifyTimestamp.Second = tm.sec;
- dir_entry->ModifyTimestamp.MilliSecond = 0;
-
- memset((char *)&dir_entry->AccessTimestamp, 0,
- sizeof(struct date_time_t));
-
- *uni_name.name = 0x0;
- exfat_get_uni_name_from_ext_entry(sb, &dir, dentry,
- uni_name.name);
- nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
- exfat_buf_unlock(sb, sector);
-
- ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
- if (!ep) {
- ret = -ENOENT;
- goto out;
- }
-
- dir_entry->Size = exfat_get_entry_size(ep);
-
- /* hint information */
- if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */
- } else {
- fid->hint_last_off = dentry >>
- dentries_per_clu_bits;
- fid->hint_last_clu = clu.dir;
- }
-
- fid->rwoffset = (s64)(++dentry);
-
- if (p_fs->dev_ejected)
- ret = -EIO;
- goto out;
- }
-
- if (dir.dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (clu.flags == 0x03) {
- if ((--clu.size) > 0)
- clu.dir++;
- else
- clu.dir = CLUSTER_32(~0);
- } else {
- /* clu.dir = exfat_fat_read(sb, clu.dir); */
- if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
- ret = -EIO;
- goto out;
- }
- }
- }
-
- *dir_entry->Name = '\0';
-
- fid->rwoffset = (s64)(++dentry);
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
-{
- s32 dentry;
- int ret = 0;
- struct chain_t dir, clu_to_free;
- struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- /* check the validity of the given file id */
- if (!fid)
- return -EINVAL;
-
- dir.dir = fid->dir.dir;
- dir.size = fid->dir.size;
- dir.flags = fid->dir.flags;
-
- dentry = fid->entry;
-
- /* check if the file is "." or ".." */
- if (p_fs->vol_type != EXFAT) {
- if ((dir.dir != p_fs->root_dir) && (dentry < 2))
- return -EPERM;
- }
-
- /* acquire the lock for file system critical section */
- mutex_lock(&p_fs->v_mutex);
-
- clu_to_free.dir = fid->start_clu;
- clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1;
- clu_to_free.flags = fid->flags;
-
- if (!is_dir_empty(sb, &clu_to_free)) {
- ret = -ENOTEMPTY;
- goto out;
- }
-
- fs_set_vol_flags(sb, VOL_DIRTY);
-
- /* (1) update the directory entry */
- remove_file(inode, &dir, dentry);
-
- /* (2) free the clusters */
- exfat_free_cluster(sb, &clu_to_free, 1);
-
- fid->size = 0;
- fid->start_clu = CLUSTER_32(~0);
- fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-
-#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
- fs_sync(sb, true);
- fs_set_vol_flags(sb, VOL_CLEAN);
-#endif
-
- if (p_fs->dev_ejected)
- ret = -EIO;
-
-out:
- /* release the lock for file system critical section */
- mutex_unlock(&p_fs->v_mutex);
-
- return ret;
-}
-
-/*======================================================================*/
-/* Directory Entry Operations */
-/*======================================================================*/
-
-static int exfat_readdir(struct file *filp, struct dir_context *ctx)
-{
- struct inode *inode = file_inode(filp);
- struct super_block *sb = inode->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &sbi->fs_info;
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct dir_entry_t de;
- unsigned long inum;
- loff_t cpos;
- int err = 0;
-
- __lock_super(sb);
-
- cpos = ctx->pos;
- /* Fake . and .. for the root directory. */
- if ((p_fs->vol_type == EXFAT) || (inode->i_ino == EXFAT_ROOT_INO)) {
- while (cpos < 2) {
- if (inode->i_ino == EXFAT_ROOT_INO)
- inum = EXFAT_ROOT_INO;
- else if (cpos == 0)
- inum = inode->i_ino;
- else /* (cpos == 1) */
- inum = parent_ino(filp->f_path.dentry);
-
- if (!dir_emit_dots(filp, ctx))
- goto out;
- cpos++;
- ctx->pos++;
- }
- if (cpos == 2)
- cpos = 0;
- }
- if (cpos & (DENTRY_SIZE - 1)) {
- err = -ENOENT;
- goto out;
- }
-
-get_new:
- EXFAT_I(inode)->fid.size = i_size_read(inode);
- EXFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS;
-
- err = ffsReadDir(inode, &de);
- if (err) {
- /* at least we tried to read a sector
- * move cpos to next sector position (should be aligned)
- */
- if (err == -EIO) {
- cpos += 1 << p_bd->sector_size_bits;
- cpos &= ~((1 << p_bd->sector_size_bits) - 1);
- }
-
- goto end_of_dir;
- }
-
- cpos = EXFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS;
-
- if (!de.Name[0])
- goto end_of_dir;
-
- if (!memcmp(de.ShortName, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
- inum = inode->i_ino;
- } else if (!memcmp(de.ShortName, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
- inum = parent_ino(filp->f_path.dentry);
- } else {
- loff_t i_pos = ((loff_t)EXFAT_I(inode)->fid.start_clu << 32) |
- ((EXFAT_I(inode)->fid.rwoffset - 1) & 0xffffffff);
- struct inode *tmp = exfat_iget(sb, i_pos);
-
- if (tmp) {
- inum = tmp->i_ino;
- iput(tmp);
- } else {
- inum = iunique(sb, EXFAT_ROOT_INO);
- }
- }
-
- if (!dir_emit(ctx, de.Name, strlen(de.Name), inum,
- (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
- goto out;
-
- ctx->pos = cpos;
- goto get_new;
-
-end_of_dir:
- ctx->pos = cpos;
-out:
- __unlock_super(sb);
- return err;
-}
-
-static int exfat_ioctl_volume_id(struct inode *dir)
-{
- struct super_block *sb = dir->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &sbi->fs_info;
-
- return p_fs->vol_id;
-}
-
-static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct inode *inode = filp->f_path.dentry->d_inode;
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- unsigned int flags;
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-
- switch (cmd) {
- case EXFAT_IOCTL_GET_VOLUME_ID:
- return exfat_ioctl_volume_id(inode);
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- case EXFAT_IOC_GET_DEBUGFLAGS: {
- struct super_block *sb = inode->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- flags = sbi->debug_flags;
- return put_user(flags, (int __user *)arg);
- }
- case EXFAT_IOC_SET_DEBUGFLAGS: {
- struct super_block *sb = inode->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (get_user(flags, (int __user *)arg))
- return -EFAULT;
-
- __lock_super(sb);
- sbi->debug_flags = flags;
- __unlock_super(sb);
-
- return 0;
- }
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
- default:
- return -ENOTTY; /* Inappropriate ioctl for device */
- }
-}
-
-static const struct file_operations exfat_dir_operations = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .iterate = exfat_readdir,
- .unlocked_ioctl = exfat_generic_ioctl,
- .fsync = generic_file_fsync,
-};
-
-static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
-{
- struct super_block *sb = dir->i_sb;
- struct timespec64 curtime;
- struct inode *inode;
- struct file_id_t fid;
- loff_t i_pos;
- int err;
-
- __lock_super(sb);
-
- pr_debug("%s entered\n", __func__);
-
- err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_REGULAR, &fid);
- if (err)
- goto out;
-
- INC_IVERSION(dir);
- curtime = current_time(dir);
- dir->i_ctime = curtime;
- dir->i_mtime = curtime;
- dir->i_atime = curtime;
- if (IS_DIRSYNC(dir))
- (void)exfat_sync_inode(dir);
- else
- mark_inode_dirty(dir);
-
- i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
-
- inode = exfat_build_inode(sb, &fid, i_pos);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto out;
- }
- INC_IVERSION(inode);
- curtime = current_time(inode);
- inode->i_mtime = curtime;
- inode->i_atime = curtime;
- inode->i_ctime = curtime;
- /*
- * timestamp is already written, so mark_inode_dirty() is unnecessary.
- */
-
- dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
- d_instantiate(dentry, inode);
-
-out:
- __unlock_super(sb);
- pr_debug("%s exited\n", __func__);
- return err;
-}
-
-static int exfat_find(struct inode *dir, struct qstr *qname,
- struct file_id_t *fid)
-{
- int err;
-
- if (qname->len == 0)
- return -ENOENT;
-
- err = ffsLookupFile(dir, (u8 *)qname->name, fid);
- if (err)
- return -ENOENT;
-
- return 0;
-}
-
-static int exfat_d_anon_disconn(struct dentry *dentry)
-{
- return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
-}
-
-static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
-{
- struct super_block *sb = dir->i_sb;
- struct inode *inode;
- struct dentry *alias;
- int err;
- struct file_id_t fid;
- loff_t i_pos;
- u64 ret;
- mode_t i_mode;
-
- __lock_super(sb);
- pr_debug("%s entered\n", __func__);
- err = exfat_find(dir, &dentry->d_name, &fid);
- if (err) {
- if (err == -ENOENT) {
- inode = NULL;
- goto out;
- }
- goto error;
- }
-
- i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
- inode = exfat_build_inode(sb, &fid, i_pos);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto error;
- }
-
- i_mode = inode->i_mode;
- if (S_ISLNK(i_mode) && !EXFAT_I(inode)->target) {
- EXFAT_I(inode)->target = kmalloc(i_size_read(inode) + 1,
- GFP_KERNEL);
- if (!EXFAT_I(inode)->target) {
- err = -ENOMEM;
- goto error;
- }
- ffsReadFile(dir, &fid, EXFAT_I(inode)->target,
- i_size_read(inode), &ret);
- *(EXFAT_I(inode)->target + i_size_read(inode)) = '\0';
- }
-
- alias = d_find_alias(inode);
- if (alias && !exfat_d_anon_disconn(alias)) {
- BUG_ON(d_unhashed(alias));
- if (!S_ISDIR(i_mode))
- d_move(alias, dentry);
- iput(inode);
- __unlock_super(sb);
- pr_debug("%s exited 1\n", __func__);
- return alias;
- }
- dput(alias);
-out:
- __unlock_super(sb);
- dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
- dentry = d_splice_alias(inode, dentry);
- if (dentry)
- dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
- pr_debug("%s exited 2\n", __func__);
- return dentry;
-
-error:
- __unlock_super(sb);
- pr_debug("%s exited 3\n", __func__);
- return ERR_PTR(err);
-}
-
-static inline unsigned long exfat_hash(loff_t i_pos)
-{
- return hash_32(i_pos, EXFAT_HASH_BITS);
-}
-
-static void exfat_attach(struct inode *inode, loff_t i_pos)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
- struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
-
- spin_lock(&sbi->inode_hash_lock);
- EXFAT_I(inode)->i_pos = i_pos;
- hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head);
- spin_unlock(&sbi->inode_hash_lock);
-}
-
-static void exfat_detach(struct inode *inode)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
-
- spin_lock(&sbi->inode_hash_lock);
- hlist_del_init(&EXFAT_I(inode)->i_hash_fat);
- EXFAT_I(inode)->i_pos = 0;
- spin_unlock(&sbi->inode_hash_lock);
-}
-
-static int exfat_unlink(struct inode *dir, struct dentry *dentry)
-{
- struct inode *inode = dentry->d_inode;
- struct super_block *sb = dir->i_sb;
- struct timespec64 curtime;
- int err;
-
- __lock_super(sb);
-
- pr_debug("%s entered\n", __func__);
-
- EXFAT_I(inode)->fid.size = i_size_read(inode);
-
- err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid));
- if (err)
- goto out;
-
- INC_IVERSION(dir);
- curtime = current_time(dir);
- dir->i_mtime = curtime;
- dir->i_atime = curtime;
- if (IS_DIRSYNC(dir))
- (void)exfat_sync_inode(dir);
- else
- mark_inode_dirty(dir);
-
- clear_nlink(inode);
- curtime = current_time(inode);
- inode->i_mtime = curtime;
- inode->i_atime = curtime;
- exfat_detach(inode);
- remove_inode_hash(inode);
-
-out:
- __unlock_super(sb);
- pr_debug("%s exited\n", __func__);
- return err;
-}
-
-static int exfat_symlink(struct inode *dir, struct dentry *dentry,
- const char *target)
-{
- struct super_block *sb = dir->i_sb;
- struct timespec64 curtime;
- struct inode *inode;
- struct file_id_t fid;
- loff_t i_pos;
- int err;
- u64 len = (u64)strlen(target);
- u64 ret;
-
- __lock_super(sb);
-
- pr_debug("%s entered\n", __func__);
-
- err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_SYMLINK, &fid);
- if (err)
- goto out;
-
-
- err = ffsWriteFile(dir, &fid, (char *)target, len, &ret);
-
- if (err) {
- ffsRemoveFile(dir, &fid);
- goto out;
- }
-
- INC_IVERSION(dir);
- curtime = current_time(dir);
- dir->i_ctime = curtime;
- dir->i_mtime = curtime;
- dir->i_atime = curtime;
- if (IS_DIRSYNC(dir))
- (void)exfat_sync_inode(dir);
- else
- mark_inode_dirty(dir);
-
- i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
-
- inode = exfat_build_inode(sb, &fid, i_pos);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto out;
- }
- INC_IVERSION(inode);
- curtime = current_time(inode);
- inode->i_mtime = curtime;
- inode->i_atime = curtime;
- inode->i_ctime = curtime;
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
-
- EXFAT_I(inode)->target = kmemdup(target, len + 1, GFP_KERNEL);
- if (!EXFAT_I(inode)->target) {
- err = -ENOMEM;
- goto out;
- }
-
- dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
- d_instantiate(dentry, inode);
-
-out:
- __unlock_super(sb);
- pr_debug("%s exited\n", __func__);
- return err;
-}
-
-static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
- struct super_block *sb = dir->i_sb;
- struct timespec64 curtime;
- struct inode *inode;
- struct file_id_t fid;
- loff_t i_pos;
- int err;
-
- __lock_super(sb);
-
- pr_debug("%s entered\n", __func__);
-
- err = ffsCreateDir(dir, (u8 *)dentry->d_name.name, &fid);
- if (err)
- goto out;
-
- INC_IVERSION(dir);
- curtime = current_time(dir);
- dir->i_ctime = curtime;
- dir->i_mtime = curtime;
- dir->i_atime = curtime;
- if (IS_DIRSYNC(dir))
- (void)exfat_sync_inode(dir);
- else
- mark_inode_dirty(dir);
- inc_nlink(dir);
-
- i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
-
- inode = exfat_build_inode(sb, &fid, i_pos);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto out;
- }
- INC_IVERSION(inode);
- curtime = current_time(inode);
- inode->i_mtime = curtime;
- inode->i_atime = curtime;
- inode->i_ctime = curtime;
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
-
- dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
- d_instantiate(dentry, inode);
-
-out:
- __unlock_super(sb);
- pr_debug("%s exited\n", __func__);
- return err;
-}
-
-static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
-{
- struct inode *inode = dentry->d_inode;
- struct super_block *sb = dir->i_sb;
- struct timespec64 curtime;
- int err;
-
- __lock_super(sb);
-
- pr_debug("%s entered\n", __func__);
-
- EXFAT_I(inode)->fid.size = i_size_read(inode);
-
- err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid));
- if (err)
- goto out;
-
- INC_IVERSION(dir);
- curtime = current_time(dir);
- dir->i_mtime = curtime;
- dir->i_atime = curtime;
- if (IS_DIRSYNC(dir))
- (void)exfat_sync_inode(dir);
- else
- mark_inode_dirty(dir);
- drop_nlink(dir);
-
- clear_nlink(inode);
- curtime = current_time(inode);
- inode->i_mtime = curtime;
- inode->i_atime = curtime;
- exfat_detach(inode);
- remove_inode_hash(inode);
-
-out:
- __unlock_super(sb);
- pr_debug("%s exited\n", __func__);
- return err;
-}
-
-static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
-{
- struct inode *old_inode, *new_inode;
- struct super_block *sb = old_dir->i_sb;
- struct timespec64 curtime;
- loff_t i_pos;
- int err;
-
- if (flags)
- return -EINVAL;
-
- __lock_super(sb);
-
- pr_debug("%s entered\n", __func__);
-
- old_inode = old_dentry->d_inode;
- new_inode = new_dentry->d_inode;
-
- EXFAT_I(old_inode)->fid.size = i_size_read(old_inode);
-
- err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir,
- new_dentry);
- if (err)
- goto out;
-
- INC_IVERSION(new_dir);
- curtime = current_time(new_dir);
- new_dir->i_ctime = curtime;
- new_dir->i_mtime = curtime;
- new_dir->i_atime = curtime;
-
- if (IS_DIRSYNC(new_dir))
- (void)exfat_sync_inode(new_dir);
- else
- mark_inode_dirty(new_dir);
-
- i_pos = ((loff_t)EXFAT_I(old_inode)->fid.dir.dir << 32) |
- (EXFAT_I(old_inode)->fid.entry & 0xffffffff);
-
- exfat_detach(old_inode);
- exfat_attach(old_inode, i_pos);
- if (IS_DIRSYNC(new_dir))
- (void)exfat_sync_inode(old_inode);
- else
- mark_inode_dirty(old_inode);
-
- if ((S_ISDIR(old_inode->i_mode)) && (old_dir != new_dir)) {
- drop_nlink(old_dir);
- if (!new_inode)
- inc_nlink(new_dir);
- }
- INC_IVERSION(old_dir);
- curtime = current_time(old_dir);
- old_dir->i_ctime = curtime;
- old_dir->i_mtime = curtime;
- if (IS_DIRSYNC(old_dir))
- (void)exfat_sync_inode(old_dir);
- else
- mark_inode_dirty(old_dir);
-
- if (new_inode) {
- exfat_detach(new_inode);
- drop_nlink(new_inode);
- if (S_ISDIR(new_inode->i_mode))
- drop_nlink(new_inode);
- new_inode->i_ctime = current_time(new_inode);
- }
-
-out:
- __unlock_super(sb);
- pr_debug("%s exited\n", __func__);
- return err;
-}
-
-static int exfat_cont_expand(struct inode *inode, loff_t size)
-{
- struct address_space *mapping = inode->i_mapping;
- loff_t start = i_size_read(inode), count = size - i_size_read(inode);
- struct timespec64 curtime;
- int err, err2;
-
- err = generic_cont_expand_simple(inode, size);
- if (err != 0)
- return err;
-
- curtime = current_time(inode);
- inode->i_ctime = curtime;
- inode->i_mtime = curtime;
- mark_inode_dirty(inode);
-
- if (IS_SYNC(inode)) {
- err = filemap_fdatawrite_range(mapping, start,
- start + count - 1);
- err2 = sync_mapping_buffers(mapping);
- err = (err) ? (err) : (err2);
- err2 = write_inode_now(inode, 1);
- err = (err) ? (err) : (err2);
- if (!err)
- err = filemap_fdatawait_range(mapping, start,
- start + count - 1);
- }
- return err;
-}
-
-static int exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode)
-{
- mode_t allow_utime = sbi->options.allow_utime;
-
- if (!uid_eq(current_fsuid(), inode->i_uid)) {
- if (in_group_p(inode->i_gid))
- allow_utime >>= 3;
- if (allow_utime & MAY_WRITE)
- return 1;
- }
-
- /* use a default check */
- return 0;
-}
-
-static int exfat_sanitize_mode(const struct exfat_sb_info *sbi,
- struct inode *inode, umode_t *mode_ptr)
-{
- mode_t i_mode, mask, perm;
-
- i_mode = inode->i_mode;
-
- if (S_ISREG(i_mode) || S_ISLNK(i_mode))
- mask = sbi->options.fs_fmask;
- else
- mask = sbi->options.fs_dmask;
-
- perm = *mode_ptr & ~(S_IFMT | mask);
-
- /* Of the r and x bits, all (subject to umask) must be present.*/
- if ((perm & 0555) != (i_mode & 0555))
- return -EPERM;
-
- if (exfat_mode_can_hold_ro(inode)) {
- /*
- * Of the w bits, either all (subject to umask) or none must be
- * present.
- */
- if ((perm & 0222) && ((perm & 0222) != (0222 & ~mask)))
- return -EPERM;
- } else {
- /*
- * If exfat_mode_can_hold_ro(inode) is false, can't change w
- * bits.
- */
- if ((perm & 0222) != (0222 & ~mask))
- return -EPERM;
- }
-
- *mode_ptr &= S_IFMT | perm;
-
- return 0;
-}
-
-static void exfat_truncate(struct inode *inode, loff_t old_size)
-{
- struct super_block *sb = inode->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &sbi->fs_info;
- struct timespec64 curtime;
- int err;
-
- __lock_super(sb);
-
- /*
- * This protects against truncating a file bigger than it was then
- * trying to write into the hole.
- */
- if (EXFAT_I(inode)->mmu_private > i_size_read(inode))
- EXFAT_I(inode)->mmu_private = i_size_read(inode);
-
- if (EXFAT_I(inode)->fid.start_clu == 0)
- goto out;
-
- err = ffsTruncateFile(inode, old_size, i_size_read(inode));
- if (err)
- goto out;
-
- curtime = current_time(inode);
- inode->i_ctime = curtime;
- inode->i_mtime = curtime;
- if (IS_DIRSYNC(inode))
- (void)exfat_sync_inode(inode);
- else
- mark_inode_dirty(inode);
-
- inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) &
- ~((loff_t)p_fs->cluster_size - 1)) >> 9;
-out:
- __unlock_super(sb);
-}
-
-static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb);
- struct inode *inode = dentry->d_inode;
- unsigned int ia_valid;
- int error;
- loff_t old_size;
-
- pr_debug("%s entered\n", __func__);
-
- if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size > i_size_read(inode)) {
- error = exfat_cont_expand(inode, attr->ia_size);
- if (error || attr->ia_valid == ATTR_SIZE)
- return error;
- attr->ia_valid &= ~ATTR_SIZE;
- }
-
- ia_valid = attr->ia_valid;
-
- if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) &&
- exfat_allow_set_time(sbi, inode)) {
- attr->ia_valid &= ~(ATTR_MTIME_SET |
- ATTR_ATIME_SET |
- ATTR_TIMES_SET);
- }
-
- error = setattr_prepare(dentry, attr);
- attr->ia_valid = ia_valid;
- if (error)
- return error;
-
- if (((attr->ia_valid & ATTR_UID) &&
- (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) ||
- ((attr->ia_valid & ATTR_GID) &&
- (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) ||
- ((attr->ia_valid & ATTR_MODE) &&
- (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) {
- return -EPERM;
- }
-
- /*
- * We don't return -EPERM here. Yes, strange, but this is too
- * old behavior.
- */
- if (attr->ia_valid & ATTR_MODE) {
- if (exfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0)
- attr->ia_valid &= ~ATTR_MODE;
- }
-
- EXFAT_I(inode)->fid.size = i_size_read(inode);
-
- if (attr->ia_valid & ATTR_SIZE) {
- old_size = i_size_read(inode);
- down_write(&EXFAT_I(inode)->truncate_lock);
- truncate_setsize(inode, attr->ia_size);
- exfat_truncate(inode, old_size);
- up_write(&EXFAT_I(inode)->truncate_lock);
- }
- setattr_copy(inode, attr);
- mark_inode_dirty(inode);
-
- pr_debug("%s exited\n", __func__);
- return error;
-}
-
-static int exfat_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
-{
- struct inode *inode = path->dentry->d_inode;
-
- pr_debug("%s entered\n", __func__);
-
- generic_fillattr(inode, stat);
- stat->blksize = EXFAT_SB(inode->i_sb)->fs_info.cluster_size;
-
- pr_debug("%s exited\n", __func__);
- return 0;
-}
-
-static const struct inode_operations exfat_dir_inode_operations = {
- .create = exfat_create,
- .lookup = exfat_lookup,
- .unlink = exfat_unlink,
- .symlink = exfat_symlink,
- .mkdir = exfat_mkdir,
- .rmdir = exfat_rmdir,
- .rename = exfat_rename,
- .setattr = exfat_setattr,
- .getattr = exfat_getattr,
-};
-
-/*======================================================================*/
-/* File Operations */
-/*======================================================================*/
-static const char *exfat_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *done)
-{
- struct exfat_inode_info *ei = EXFAT_I(inode);
-
- if (ei->target) {
- char *cookie = ei->target;
-
- if (cookie)
- return (char *)(ei->target);
- }
- return NULL;
-}
-
-static const struct inode_operations exfat_symlink_inode_operations = {
- .get_link = exfat_get_link,
-};
-
-static int exfat_file_release(struct inode *inode, struct file *filp)
-{
- struct super_block *sb = inode->i_sb;
-
- EXFAT_I(inode)->fid.size = i_size_read(inode);
- ffsSyncVol(sb, false);
- return 0;
-}
-
-static const struct file_operations exfat_file_operations = {
- .llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .release = exfat_file_release,
- .unlocked_ioctl = exfat_generic_ioctl,
- .fsync = generic_file_fsync,
- .splice_read = generic_file_splice_read,
-};
-
-static const struct inode_operations exfat_file_inode_operations = {
- .setattr = exfat_setattr,
- .getattr = exfat_getattr,
-};
-
-/*======================================================================*/
-/* Address Space Operations */
-/*======================================================================*/
-
-static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
- unsigned long *mapped_blocks, int *create)
-{
- struct super_block *sb = inode->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &sbi->fs_info;
- const unsigned long blocksize = sb->s_blocksize;
- const unsigned char blocksize_bits = sb->s_blocksize_bits;
- sector_t last_block;
- int err, clu_offset, sec_offset;
- unsigned int cluster;
-
- *phys = 0;
- *mapped_blocks = 0;
-
- last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
- if (sector >= last_block) {
- if (*create == 0)
- return 0;
- } else {
- *create = 0;
- }
-
- /* cluster offset */
- clu_offset = sector >> p_fs->sectors_per_clu_bits;
-
- /* sector offset in cluster */
- sec_offset = sector & (p_fs->sectors_per_clu - 1);
-
- EXFAT_I(inode)->fid.size = i_size_read(inode);
-
- err = ffsMapCluster(inode, clu_offset, &cluster);
-
- if (!err && (cluster != CLUSTER_32(~0))) {
- *phys = START_SECTOR(cluster) + sec_offset;
- *mapped_blocks = p_fs->sectors_per_clu - sec_offset;
- }
-
- return 0;
-}
-
-static int exfat_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- struct super_block *sb = inode->i_sb;
- unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
- int err;
- unsigned long mapped_blocks;
- sector_t phys;
-
- __lock_super(sb);
-
- err = exfat_bmap(inode, iblock, &phys, &mapped_blocks, &create);
- if (err) {
- __unlock_super(sb);
- return err;
- }
-
- if (phys) {
- max_blocks = min(mapped_blocks, max_blocks);
- if (create) {
- EXFAT_I(inode)->mmu_private += max_blocks <<
- sb->s_blocksize_bits;
- set_buffer_new(bh_result);
- }
- map_bh(bh_result, sb, phys);
- }
-
- bh_result->b_size = max_blocks << sb->s_blocksize_bits;
- __unlock_super(sb);
-
- return 0;
-}
-
-static int exfat_readpage(struct file *file, struct page *page)
-{
- return mpage_readpage(page, exfat_get_block);
-}
-
-static int exfat_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned int nr_pages)
-{
- return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
-}
-
-static int exfat_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, exfat_get_block, wbc);
-}
-
-static int exfat_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- return mpage_writepages(mapping, wbc, exfat_get_block);
-}
-
-static void exfat_write_failed(struct address_space *mapping, loff_t to)
-{
- struct inode *inode = mapping->host;
-
- if (to > i_size_read(inode)) {
- truncate_pagecache(inode, i_size_read(inode));
- EXFAT_I(inode)->fid.size = i_size_read(inode);
- exfat_truncate(inode, i_size_read(inode));
- }
-}
-
-static int exfat_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
- struct page **pagep, void **fsdata)
-{
- int ret;
-
- *pagep = NULL;
- ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- exfat_get_block,
- &EXFAT_I(mapping->host)->mmu_private);
-
- if (ret < 0)
- exfat_write_failed(mapping, pos + len);
- return ret;
-}
-
-static int exfat_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *pagep, void *fsdata)
-{
- struct inode *inode = mapping->host;
- struct file_id_t *fid = &(EXFAT_I(inode)->fid);
- struct timespec64 curtime;
- int err;
-
- err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
-
- if (err < len)
- exfat_write_failed(mapping, pos + len);
-
- if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
- curtime = current_time(inode);
- inode->i_mtime = curtime;
- inode->i_ctime = curtime;
- fid->attr |= ATTR_ARCHIVE;
- mark_inode_dirty(inode);
- }
- return err;
-}
-
-static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct inode *inode = iocb->ki_filp->f_mapping->host;
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- ssize_t ret;
- int rw;
-
- rw = iov_iter_rw(iter);
-
- if (rw == WRITE) {
- if (EXFAT_I(inode)->mmu_private < iov_iter_count(iter))
- return 0;
- }
- ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
-
- if ((ret < 0) && (rw & WRITE))
- exfat_write_failed(mapping, iov_iter_count(iter));
- return ret;
-}
-
-static sector_t _exfat_bmap(struct address_space *mapping, sector_t block)
-{
- sector_t blocknr;
-
- /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */
- down_read(&EXFAT_I(mapping->host)->truncate_lock);
- blocknr = generic_block_bmap(mapping, block, exfat_get_block);
- up_read(&EXFAT_I(mapping->host)->truncate_lock);
-
- return blocknr;
-}
-
-static const struct address_space_operations exfat_aops = {
- .readpage = exfat_readpage,
- .readpages = exfat_readpages,
- .writepage = exfat_writepage,
- .writepages = exfat_writepages,
- .write_begin = exfat_write_begin,
- .write_end = exfat_write_end,
- .direct_IO = exfat_direct_IO,
- .bmap = _exfat_bmap
-};
-
-/*======================================================================*/
-/* Super Operations */
-/*======================================================================*/
-
-static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_inode_info *info;
- struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
- struct inode *inode = NULL;
-
- spin_lock(&sbi->inode_hash_lock);
- hlist_for_each_entry(info, head, i_hash_fat) {
- BUG_ON(info->vfs_inode.i_sb != sb);
-
- if (i_pos != info->i_pos)
- continue;
- inode = igrab(&info->vfs_inode);
- if (inode)
- break;
- }
- spin_unlock(&sbi->inode_hash_lock);
- return inode;
-}
-
-/* doesn't deal with root inode */
-static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
- struct fs_info_t *p_fs = &sbi->fs_info;
- struct dir_entry_t info;
-
- memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t));
-
- ffsReadStat(inode, &info);
-
- EXFAT_I(inode)->i_pos = 0;
- EXFAT_I(inode)->target = NULL;
- inode->i_uid = sbi->options.fs_uid;
- inode->i_gid = sbi->options.fs_gid;
- INC_IVERSION(inode);
- inode->i_generation = prandom_u32();
-
- if (info.Attr & ATTR_SUBDIR) { /* directory */
- inode->i_generation &= ~1;
- inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
- inode->i_op = &exfat_dir_inode_operations;
- inode->i_fop = &exfat_dir_operations;
-
- i_size_write(inode, info.Size);
- EXFAT_I(inode)->mmu_private = i_size_read(inode);
- set_nlink(inode, info.NumSubdirs);
- } else if (info.Attr & ATTR_SYMLINK) { /* symbolic link */
- inode->i_generation |= 1;
- inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
- inode->i_op = &exfat_symlink_inode_operations;
-
- i_size_write(inode, info.Size);
- EXFAT_I(inode)->mmu_private = i_size_read(inode);
- } else { /* regular file */
- inode->i_generation |= 1;
- inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
- inode->i_op = &exfat_file_inode_operations;
- inode->i_fop = &exfat_file_operations;
- inode->i_mapping->a_ops = &exfat_aops;
- inode->i_mapping->nrpages = 0;
-
- i_size_write(inode, info.Size);
- EXFAT_I(inode)->mmu_private = i_size_read(inode);
- }
- exfat_save_attr(inode, info.Attr);
-
- inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1))
- & ~((loff_t)p_fs->cluster_size - 1)) >> 9;
-
- exfat_time_fat2unix(&inode->i_mtime, &info.ModifyTimestamp);
- exfat_time_fat2unix(&inode->i_ctime, &info.CreateTimestamp);
- exfat_time_fat2unix(&inode->i_atime, &info.AccessTimestamp);
-
- return 0;
-}
-
-static struct inode *exfat_build_inode(struct super_block *sb,
- struct file_id_t *fid, loff_t i_pos)
-{
- struct inode *inode;
- int err;
-
- inode = exfat_iget(sb, i_pos);
- if (inode)
- goto out;
- inode = new_inode(sb);
- if (!inode) {
- inode = ERR_PTR(-ENOMEM);
- goto out;
- }
- inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
- SET_IVERSION(inode, 1);
- err = exfat_fill_inode(inode, fid);
- if (err) {
- iput(inode);
- inode = ERR_PTR(err);
- goto out;
- }
- exfat_attach(inode, i_pos);
- insert_inode_hash(inode);
-out:
- return inode;
-}
-
-static int exfat_sync_inode(struct inode *inode)
-{
- return exfat_write_inode(inode, NULL);
-}
-
-static struct inode *exfat_alloc_inode(struct super_block *sb)
-{
- struct exfat_inode_info *ei;
-
- ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS);
- if (!ei)
- return NULL;
-
- init_rwsem(&ei->truncate_lock);
-
- return &ei->vfs_inode;
-}
-
-static void exfat_destroy_inode(struct inode *inode)
-{
- kfree(EXFAT_I(inode)->target);
- EXFAT_I(inode)->target = NULL;
-
- kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode));
-}
-
-static int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
- struct dir_entry_t info;
-
- if (inode->i_ino == EXFAT_ROOT_INO)
- return 0;
-
- info.Attr = exfat_make_attr(inode);
- info.Size = i_size_read(inode);
-
- exfat_time_unix2fat(&inode->i_mtime, &info.ModifyTimestamp);
- exfat_time_unix2fat(&inode->i_ctime, &info.CreateTimestamp);
- exfat_time_unix2fat(&inode->i_atime, &info.AccessTimestamp);
-
- ffsWriteStat(inode, &info);
-
- return 0;
-}
-
-static void exfat_evict_inode(struct inode *inode)
-{
- truncate_inode_pages(&inode->i_data, 0);
-
- if (!inode->i_nlink)
- i_size_write(inode, 0);
- invalidate_inode_buffers(inode);
- clear_inode(inode);
- exfat_detach(inode);
-
- remove_inode_hash(inode);
-}
-
-static void exfat_free_super(struct exfat_sb_info *sbi)
-{
- if (sbi->nls_disk)
- unload_nls(sbi->nls_disk);
- if (sbi->nls_io)
- unload_nls(sbi->nls_io);
- if (sbi->options.iocharset != exfat_default_iocharset)
- kfree(sbi->options.iocharset);
- /* mutex_init is in exfat_fill_super function. only for 3.7+ */
- mutex_destroy(&sbi->s_lock);
- kvfree(sbi);
-}
-
-static void exfat_put_super(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
-
- if (__is_sb_dirty(sb))
- exfat_write_super(sb);
-
- ffsUmountVol(sb);
-
- sb->s_fs_info = NULL;
- exfat_free_super(sbi);
-}
-
-static void exfat_write_super(struct super_block *sb)
-{
- __lock_super(sb);
-
- __set_sb_clean(sb);
-
- if (!sb_rdonly(sb))
- ffsSyncVol(sb, true);
-
- __unlock_super(sb);
-}
-
-static int exfat_sync_fs(struct super_block *sb, int wait)
-{
- int err = 0;
-
- if (__is_sb_dirty(sb)) {
- __lock_super(sb);
- __set_sb_clean(sb);
- err = ffsSyncVol(sb, true);
- __unlock_super(sb);
- }
-
- return err;
-}
-
-static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct super_block *sb = dentry->d_sb;
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct vol_info_t info;
-
- if (p_fs->used_clusters == UINT_MAX) {
- if (ffsGetVolInfo(sb, &info) == -EIO)
- return -EIO;
-
- } else {
- info.FatType = p_fs->vol_type;
- info.ClusterSize = p_fs->cluster_size;
- info.NumClusters = p_fs->num_clusters - 2;
- info.UsedClusters = p_fs->used_clusters;
- info.FreeClusters = info.NumClusters - info.UsedClusters;
-
- if (p_fs->dev_ejected)
- pr_info("[EXFAT] statfs on device that is ejected\n");
- }
-
- buf->f_type = sb->s_magic;
- buf->f_bsize = info.ClusterSize;
- buf->f_blocks = info.NumClusters;
- buf->f_bfree = info.FreeClusters;
- buf->f_bavail = info.FreeClusters;
- buf->f_fsid.val[0] = (u32)id;
- buf->f_fsid.val[1] = (u32)(id >> 32);
- buf->f_namelen = 260;
-
- return 0;
-}
-
-static int exfat_remount(struct super_block *sb, int *flags, char *data)
-{
- *flags |= SB_NODIRATIME;
- return 0;
-}
-
-static int exfat_show_options(struct seq_file *m, struct dentry *root)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(root->d_sb);
- struct exfat_mount_options *opts = &sbi->options;
-
- if (__kuid_val(opts->fs_uid))
- seq_printf(m, ",uid=%u", __kuid_val(opts->fs_uid));
- if (__kgid_val(opts->fs_gid))
- seq_printf(m, ",gid=%u", __kgid_val(opts->fs_gid));
- seq_printf(m, ",fmask=%04o", opts->fs_fmask);
- seq_printf(m, ",dmask=%04o", opts->fs_dmask);
- if (opts->allow_utime)
- seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
- if (sbi->nls_disk)
- seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
- if (sbi->nls_io)
- seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
- seq_printf(m, ",namecase=%u", opts->casesensitive);
- if (opts->errors == EXFAT_ERRORS_CONT)
- seq_puts(m, ",errors=continue");
- else if (opts->errors == EXFAT_ERRORS_PANIC)
- seq_puts(m, ",errors=panic");
- else
- seq_puts(m, ",errors=remount-ro");
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- if (opts->discard)
- seq_puts(m, ",discard");
-#endif
- return 0;
-}
-
-static const struct super_operations exfat_sops = {
- .alloc_inode = exfat_alloc_inode,
- .destroy_inode = exfat_destroy_inode,
- .write_inode = exfat_write_inode,
- .evict_inode = exfat_evict_inode,
- .put_super = exfat_put_super,
- .sync_fs = exfat_sync_fs,
- .statfs = exfat_statfs,
- .remount_fs = exfat_remount,
- .show_options = exfat_show_options,
-};
-
-/*======================================================================*/
-/* Export Operations */
-/*======================================================================*/
-
-static struct inode *exfat_nfs_get_inode(struct super_block *sb, u64 ino,
- u32 generation)
-{
- struct inode *inode = NULL;
-
- if (ino < EXFAT_ROOT_INO)
- return inode;
- inode = ilookup(sb, ino);
-
- if (inode && generation && (inode->i_generation != generation)) {
- iput(inode);
- inode = NULL;
- }
-
- return inode;
-}
-
-static struct dentry *exfat_fh_to_dentry(struct super_block *sb,
- struct fid *fid, int fh_len,
- int fh_type)
-{
- return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
- exfat_nfs_get_inode);
-}
-
-static struct dentry *exfat_fh_to_parent(struct super_block *sb,
- struct fid *fid, int fh_len,
- int fh_type)
-{
- return generic_fh_to_parent(sb, fid, fh_len, fh_type,
- exfat_nfs_get_inode);
-}
-
-static const struct export_operations exfat_export_ops = {
- .fh_to_dentry = exfat_fh_to_dentry,
- .fh_to_parent = exfat_fh_to_parent,
-};
-
-/*======================================================================*/
-/* Super Block Read Operations */
-/*======================================================================*/
-
-enum {
- Opt_uid,
- Opt_gid,
- Opt_umask,
- Opt_dmask,
- Opt_fmask,
- Opt_allow_utime,
- Opt_codepage,
- Opt_charset,
- Opt_namecase,
- Opt_debug,
- Opt_err_cont,
- Opt_err_panic,
- Opt_err_ro,
- Opt_utf8_hack,
- Opt_err,
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- Opt_discard,
-#endif /* EXFAT_CONFIG_DISCARD */
-};
-
-static const match_table_t exfat_tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_umask, "umask=%o"},
- {Opt_dmask, "dmask=%o"},
- {Opt_fmask, "fmask=%o"},
- {Opt_allow_utime, "allow_utime=%o"},
- {Opt_codepage, "codepage=%u"},
- {Opt_charset, "iocharset=%s"},
- {Opt_namecase, "namecase=%u"},
- {Opt_debug, "debug"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_utf8_hack, "utf8"},
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- {Opt_discard, "discard"},
-#endif /* CONFIG_STAGING_EXFAT_DISCARD */
- {Opt_err, NULL}
-};
-
-static int parse_options(char *options, int silent, int *debug,
- struct exfat_mount_options *opts)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- char *iocharset;
-
- opts->fs_uid = current_uid();
- opts->fs_gid = current_gid();
- opts->fs_fmask = current->fs->umask;
- opts->fs_dmask = current->fs->umask;
- opts->allow_utime = U16_MAX;
- opts->codepage = exfat_default_codepage;
- opts->iocharset = exfat_default_iocharset;
- opts->casesensitive = 0;
- opts->errors = EXFAT_ERRORS_RO;
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- opts->discard = 0;
-#endif
- *debug = 0;
-
- if (!options)
- goto out;
-
- while ((p = strsep(&options, ","))) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, exfat_tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return 0;
- opts->fs_uid = KUIDT_INIT(option);
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return 0;
- opts->fs_gid = KGIDT_INIT(option);
- break;
- case Opt_umask:
- case Opt_dmask:
- case Opt_fmask:
- if (match_octal(&args[0], &option))
- return 0;
- if (token != Opt_dmask)
- opts->fs_fmask = option;
- if (token != Opt_fmask)
- opts->fs_dmask = option;
- break;
- case Opt_allow_utime:
- if (match_octal(&args[0], &option))
- return 0;
- opts->allow_utime = option & 0022;
- break;
- case Opt_codepage:
- if (match_int(&args[0], &option))
- return 0;
- opts->codepage = option;
- break;
- case Opt_charset:
- if (opts->iocharset != exfat_default_iocharset)
- kfree(opts->iocharset);
- iocharset = match_strdup(&args[0]);
- if (!iocharset)
- return -ENOMEM;
- opts->iocharset = iocharset;
- break;
- case Opt_namecase:
- if (match_int(&args[0], &option))
- return 0;
- opts->casesensitive = option;
- break;
- case Opt_err_cont:
- opts->errors = EXFAT_ERRORS_CONT;
- break;
- case Opt_err_panic:
- opts->errors = EXFAT_ERRORS_PANIC;
- break;
- case Opt_err_ro:
- opts->errors = EXFAT_ERRORS_RO;
- break;
- case Opt_debug:
- *debug = 1;
- break;
-#ifdef CONFIG_STAGING_EXFAT_DISCARD
- case Opt_discard:
- opts->discard = 1;
- break;
-#endif /* CONFIG_STAGING_EXFAT_DISCARD */
- case Opt_utf8_hack:
- break;
- default:
- if (!silent)
- pr_err("[EXFAT] Unrecognized mount option %s or missing value\n",
- p);
- return -EINVAL;
- }
- }
-
-out:
- if (opts->allow_utime == U16_MAX)
- opts->allow_utime = ~opts->fs_dmask & 0022;
-
- return 0;
-}
-
-static void exfat_hash_init(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- int i;
-
- spin_lock_init(&sbi->inode_hash_lock);
- for (i = 0; i < EXFAT_HASH_SIZE; i++)
- INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
-}
-
-static int exfat_read_root(struct inode *inode)
-{
- struct super_block *sb = inode->i_sb;
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &sbi->fs_info;
- struct timespec64 curtime;
- struct dir_entry_t info;
-
- EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir;
- EXFAT_I(inode)->fid.dir.flags = 0x01;
- EXFAT_I(inode)->fid.entry = -1;
- EXFAT_I(inode)->fid.start_clu = p_fs->root_dir;
- EXFAT_I(inode)->fid.flags = 0x01;
- EXFAT_I(inode)->fid.type = TYPE_DIR;
- EXFAT_I(inode)->fid.rwoffset = 0;
- EXFAT_I(inode)->fid.hint_last_off = -1;
-
- EXFAT_I(inode)->target = NULL;
-
- ffsReadStat(inode, &info);
-
- inode->i_uid = sbi->options.fs_uid;
- inode->i_gid = sbi->options.fs_gid;
- INC_IVERSION(inode);
- inode->i_generation = 0;
- inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, 0777);
- inode->i_op = &exfat_dir_inode_operations;
- inode->i_fop = &exfat_dir_operations;
-
- i_size_write(inode, info.Size);
- inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1))
- & ~((loff_t)p_fs->cluster_size - 1)) >> 9;
- EXFAT_I(inode)->i_pos = ((loff_t)p_fs->root_dir << 32) | 0xffffffff;
- EXFAT_I(inode)->mmu_private = i_size_read(inode);
-
- exfat_save_attr(inode, ATTR_SUBDIR);
- curtime = current_time(inode);
- inode->i_mtime = curtime;
- inode->i_atime = curtime;
- inode->i_ctime = curtime;
- set_nlink(inode, info.NumSubdirs + 2);
-
- return 0;
-}
-
-static void setup_dops(struct super_block *sb)
-{
- if (EXFAT_SB(sb)->options.casesensitive == 0)
- sb->s_d_op = &exfat_ci_dentry_ops;
- else
- sb->s_d_op = &exfat_dentry_ops;
-}
-
-static int exfat_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct inode *root_inode = NULL;
- struct exfat_sb_info *sbi;
- int debug, ret;
- long error;
-
- /*
- * GFP_KERNEL is ok here, because while we do hold the
- * supeblock lock, memory pressure can't call back into
- * the filesystem, since we're only just about to mount
- * it and have no inodes etc active!
- */
- sbi = kvzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
- mutex_init(&sbi->s_lock);
- sb->s_fs_info = sbi;
- sb->s_flags |= SB_NODIRATIME;
- sb->s_magic = EXFAT_SUPER_MAGIC;
- sb->s_op = &exfat_sops;
- sb->s_export_op = &exfat_export_ops;
-
- error = parse_options(data, silent, &debug, &sbi->options);
- if (error)
- goto out_fail;
-
- setup_dops(sb);
-
- error = -EIO;
- sb_min_blocksize(sb, 512);
- sb->s_maxbytes = 0x7fffffffffffffffLL; /* maximum file size */
-
- ret = ffsMountVol(sb);
- if (ret) {
- if (!silent)
- pr_err("[EXFAT] ffsMountVol failed\n");
-
- goto out_fail;
- }
-
- /* set up enough so that it can read an inode */
- exfat_hash_init(sb);
-
- /*
- * The low byte of FAT's first entry must have same value with
- * media-field. But in real world, too many devices is
- * writing wrong value. So, removed that validity check.
- *
- * if (FAT_FIRST_ENT(sb, media) != first)
- */
-
- sbi->nls_io = load_nls(sbi->options.iocharset);
-
- error = -ENOMEM;
- root_inode = new_inode(sb);
- if (!root_inode)
- goto out_fail2;
- root_inode->i_ino = EXFAT_ROOT_INO;
- SET_IVERSION(root_inode, 1);
-
- error = exfat_read_root(root_inode);
- if (error < 0)
- goto out_fail2;
- error = -ENOMEM;
- exfat_attach(root_inode, EXFAT_I(root_inode)->i_pos);
- insert_inode_hash(root_inode);
- sb->s_root = d_make_root(root_inode);
- if (!sb->s_root) {
- pr_err("[EXFAT] Getting the root inode failed\n");
- goto out_fail2;
- }
-
- return 0;
-
-out_fail2:
- ffsUmountVol(sb);
-out_fail:
- if (root_inode)
- iput(root_inode);
- sb->s_fs_info = NULL;
- exfat_free_super(sbi);
- return error;
-}
-
-static struct dentry *exfat_fs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, exfat_fill_super);
-}
-
-static void init_once(void *foo)
-{
- struct exfat_inode_info *ei = (struct exfat_inode_info *)foo;
-
- INIT_HLIST_NODE(&ei->i_hash_fat);
- inode_init_once(&ei->vfs_inode);
-}
-
-static int __init exfat_init_inodecache(void)
-{
- exfat_inode_cachep = kmem_cache_create("exfat_inode_cache",
- sizeof(struct exfat_inode_info),
- 0,
- (SLAB_RECLAIM_ACCOUNT |
- SLAB_MEM_SPREAD),
- init_once);
- if (!exfat_inode_cachep)
- return -ENOMEM;
- return 0;
-}
-
-static void __exit exfat_destroy_inodecache(void)
-{
- /*
- * Make sure all delayed rcu free inodes are flushed before we
- * destroy cache.
- */
- rcu_barrier();
- kmem_cache_destroy(exfat_inode_cachep);
-}
-
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
-static void exfat_debug_kill_sb(struct super_block *sb)
-{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct block_device *bdev = sb->s_bdev;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- long flags;
-
- if (sbi) {
- flags = sbi->debug_flags;
-
- if (flags & EXFAT_DEBUGFLAGS_INVALID_UMOUNT) {
- /*
- * invalidate_bdev drops all device cache include
- * dirty. We use this to simulate device removal.
- */
- mutex_lock(&p_fs->v_mutex);
- exfat_fat_release_all(sb);
- exfat_buf_release_all(sb);
- mutex_unlock(&p_fs->v_mutex);
-
- invalidate_bdev(bdev);
- }
- }
-
- kill_block_super(sb);
-}
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-
-static struct file_system_type exfat_fs_type = {
- .owner = THIS_MODULE,
- .name = "exfat",
- .mount = exfat_fs_mount,
-#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
- .kill_sb = exfat_debug_kill_sb,
-#else
- .kill_sb = kill_block_super,
-#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
- .fs_flags = FS_REQUIRES_DEV,
-};
-
-static int __init init_exfat(void)
-{
- int err;
-
- BUILD_BUG_ON(sizeof(struct dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct dos_dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct ext_dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct file_dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct strm_dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct name_dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct bmap_dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct case_dentry_t) != DENTRY_SIZE);
- BUILD_BUG_ON(sizeof(struct volm_dentry_t) != DENTRY_SIZE);
-
- pr_info("exFAT: Version %s\n", EXFAT_VERSION);
-
- err = exfat_init_inodecache();
- if (err)
- return err;
-
- err = register_filesystem(&exfat_fs_type);
- if (err)
- return err;
-
- return 0;
-}
-
-static void __exit exit_exfat(void)
-{
- exfat_destroy_inodecache();
- unregister_filesystem(&exfat_fs_type);
-}
-
-module_init(init_exfat);
-module_exit(exit_exfat);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("exFAT Filesystem Driver");
-MODULE_ALIAS_FS("exfat");
diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c
deleted file mode 100644
index b91a1faa0e50..000000000000
--- a/drivers/staging/exfat/exfat_upcase.c
+++ /dev/null
@@ -1,740 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/types.h>
-#include "exfat.h"
-
-const u8 uni_upcase[NUM_UPCASE << 1] = {
- 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00,
- 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00,
- 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B, 0x00,
- 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F, 0x00,
- 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00,
- 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00,
- 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B, 0x00,
- 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00,
- 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00,
- 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00,
- 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B, 0x00,
- 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F, 0x00,
- 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00,
- 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37, 0x00,
- 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B, 0x00,
- 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F, 0x00,
- 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00,
- 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
- 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00,
- 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
- 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
- 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
- 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B, 0x00,
- 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F, 0x00,
- 0x60, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00,
- 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
- 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00,
- 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
- 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
- 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
- 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x7B, 0x00,
- 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F, 0x00,
- 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00,
- 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00,
- 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00,
- 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00,
- 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00,
- 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00,
- 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00,
- 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00,
- 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00,
- 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00,
- 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00,
- 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00,
- 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00,
- 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00,
- 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00,
- 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00,
- 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00,
- 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
- 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00,
- 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
- 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00,
- 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00,
- 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00,
- 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, 0x00,
- 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00,
- 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
- 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00,
- 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
- 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00,
- 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xF7, 0x00,
- 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00,
- 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0x78, 0x01,
- 0x00, 0x01, 0x00, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x04, 0x01, 0x04, 0x01, 0x06, 0x01, 0x06, 0x01,
- 0x08, 0x01, 0x08, 0x01, 0x0A, 0x01, 0x0A, 0x01,
- 0x0C, 0x01, 0x0C, 0x01, 0x0E, 0x01, 0x0E, 0x01,
- 0x10, 0x01, 0x10, 0x01, 0x12, 0x01, 0x12, 0x01,
- 0x14, 0x01, 0x14, 0x01, 0x16, 0x01, 0x16, 0x01,
- 0x18, 0x01, 0x18, 0x01, 0x1A, 0x01, 0x1A, 0x01,
- 0x1C, 0x01, 0x1C, 0x01, 0x1E, 0x01, 0x1E, 0x01,
- 0x20, 0x01, 0x20, 0x01, 0x22, 0x01, 0x22, 0x01,
- 0x24, 0x01, 0x24, 0x01, 0x26, 0x01, 0x26, 0x01,
- 0x28, 0x01, 0x28, 0x01, 0x2A, 0x01, 0x2A, 0x01,
- 0x2C, 0x01, 0x2C, 0x01, 0x2E, 0x01, 0x2E, 0x01,
- 0x30, 0x01, 0x31, 0x01, 0x32, 0x01, 0x32, 0x01,
- 0x34, 0x01, 0x34, 0x01, 0x36, 0x01, 0x36, 0x01,
- 0x38, 0x01, 0x39, 0x01, 0x39, 0x01, 0x3B, 0x01,
- 0x3B, 0x01, 0x3D, 0x01, 0x3D, 0x01, 0x3F, 0x01,
- 0x3F, 0x01, 0x41, 0x01, 0x41, 0x01, 0x43, 0x01,
- 0x43, 0x01, 0x45, 0x01, 0x45, 0x01, 0x47, 0x01,
- 0x47, 0x01, 0x49, 0x01, 0x4A, 0x01, 0x4A, 0x01,
- 0x4C, 0x01, 0x4C, 0x01, 0x4E, 0x01, 0x4E, 0x01,
- 0x50, 0x01, 0x50, 0x01, 0x52, 0x01, 0x52, 0x01,
- 0x54, 0x01, 0x54, 0x01, 0x56, 0x01, 0x56, 0x01,
- 0x58, 0x01, 0x58, 0x01, 0x5A, 0x01, 0x5A, 0x01,
- 0x5C, 0x01, 0x5C, 0x01, 0x5E, 0x01, 0x5E, 0x01,
- 0x60, 0x01, 0x60, 0x01, 0x62, 0x01, 0x62, 0x01,
- 0x64, 0x01, 0x64, 0x01, 0x66, 0x01, 0x66, 0x01,
- 0x68, 0x01, 0x68, 0x01, 0x6A, 0x01, 0x6A, 0x01,
- 0x6C, 0x01, 0x6C, 0x01, 0x6E, 0x01, 0x6E, 0x01,
- 0x70, 0x01, 0x70, 0x01, 0x72, 0x01, 0x72, 0x01,
- 0x74, 0x01, 0x74, 0x01, 0x76, 0x01, 0x76, 0x01,
- 0x78, 0x01, 0x79, 0x01, 0x79, 0x01, 0x7B, 0x01,
- 0x7B, 0x01, 0x7D, 0x01, 0x7D, 0x01, 0x7F, 0x01,
- 0x43, 0x02, 0x81, 0x01, 0x82, 0x01, 0x82, 0x01,
- 0x84, 0x01, 0x84, 0x01, 0x86, 0x01, 0x87, 0x01,
- 0x87, 0x01, 0x89, 0x01, 0x8A, 0x01, 0x8B, 0x01,
- 0x8B, 0x01, 0x8D, 0x01, 0x8E, 0x01, 0x8F, 0x01,
- 0x90, 0x01, 0x91, 0x01, 0x91, 0x01, 0x93, 0x01,
- 0x94, 0x01, 0xF6, 0x01, 0x96, 0x01, 0x97, 0x01,
- 0x98, 0x01, 0x98, 0x01, 0x3D, 0x02, 0x9B, 0x01,
- 0x9C, 0x01, 0x9D, 0x01, 0x20, 0x02, 0x9F, 0x01,
- 0xA0, 0x01, 0xA0, 0x01, 0xA2, 0x01, 0xA2, 0x01,
- 0xA4, 0x01, 0xA4, 0x01, 0xA6, 0x01, 0xA7, 0x01,
- 0xA7, 0x01, 0xA9, 0x01, 0xAA, 0x01, 0xAB, 0x01,
- 0xAC, 0x01, 0xAC, 0x01, 0xAE, 0x01, 0xAF, 0x01,
- 0xAF, 0x01, 0xB1, 0x01, 0xB2, 0x01, 0xB3, 0x01,
- 0xB3, 0x01, 0xB5, 0x01, 0xB5, 0x01, 0xB7, 0x01,
- 0xB8, 0x01, 0xB8, 0x01, 0xBA, 0x01, 0xBB, 0x01,
- 0xBC, 0x01, 0xBC, 0x01, 0xBE, 0x01, 0xF7, 0x01,
- 0xC0, 0x01, 0xC1, 0x01, 0xC2, 0x01, 0xC3, 0x01,
- 0xC4, 0x01, 0xC5, 0x01, 0xC4, 0x01, 0xC7, 0x01,
- 0xC8, 0x01, 0xC7, 0x01, 0xCA, 0x01, 0xCB, 0x01,
- 0xCA, 0x01, 0xCD, 0x01, 0xCD, 0x01, 0xCF, 0x01,
- 0xCF, 0x01, 0xD1, 0x01, 0xD1, 0x01, 0xD3, 0x01,
- 0xD3, 0x01, 0xD5, 0x01, 0xD5, 0x01, 0xD7, 0x01,
- 0xD7, 0x01, 0xD9, 0x01, 0xD9, 0x01, 0xDB, 0x01,
- 0xDB, 0x01, 0x8E, 0x01, 0xDE, 0x01, 0xDE, 0x01,
- 0xE0, 0x01, 0xE0, 0x01, 0xE2, 0x01, 0xE2, 0x01,
- 0xE4, 0x01, 0xE4, 0x01, 0xE6, 0x01, 0xE6, 0x01,
- 0xE8, 0x01, 0xE8, 0x01, 0xEA, 0x01, 0xEA, 0x01,
- 0xEC, 0x01, 0xEC, 0x01, 0xEE, 0x01, 0xEE, 0x01,
- 0xF0, 0x01, 0xF1, 0x01, 0xF2, 0x01, 0xF1, 0x01,
- 0xF4, 0x01, 0xF4, 0x01, 0xF6, 0x01, 0xF7, 0x01,
- 0xF8, 0x01, 0xF8, 0x01, 0xFA, 0x01, 0xFA, 0x01,
- 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x01,
- 0x00, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x04, 0x02, 0x04, 0x02, 0x06, 0x02, 0x06, 0x02,
- 0x08, 0x02, 0x08, 0x02, 0x0A, 0x02, 0x0A, 0x02,
- 0x0C, 0x02, 0x0C, 0x02, 0x0E, 0x02, 0x0E, 0x02,
- 0x10, 0x02, 0x10, 0x02, 0x12, 0x02, 0x12, 0x02,
- 0x14, 0x02, 0x14, 0x02, 0x16, 0x02, 0x16, 0x02,
- 0x18, 0x02, 0x18, 0x02, 0x1A, 0x02, 0x1A, 0x02,
- 0x1C, 0x02, 0x1C, 0x02, 0x1E, 0x02, 0x1E, 0x02,
- 0x20, 0x02, 0x21, 0x02, 0x22, 0x02, 0x22, 0x02,
- 0x24, 0x02, 0x24, 0x02, 0x26, 0x02, 0x26, 0x02,
- 0x28, 0x02, 0x28, 0x02, 0x2A, 0x02, 0x2A, 0x02,
- 0x2C, 0x02, 0x2C, 0x02, 0x2E, 0x02, 0x2E, 0x02,
- 0x30, 0x02, 0x30, 0x02, 0x32, 0x02, 0x32, 0x02,
- 0x34, 0x02, 0x35, 0x02, 0x36, 0x02, 0x37, 0x02,
- 0x38, 0x02, 0x39, 0x02, 0x65, 0x2C, 0x3B, 0x02,
- 0x3B, 0x02, 0x3D, 0x02, 0x66, 0x2C, 0x3F, 0x02,
- 0x40, 0x02, 0x41, 0x02, 0x41, 0x02, 0x43, 0x02,
- 0x44, 0x02, 0x45, 0x02, 0x46, 0x02, 0x46, 0x02,
- 0x48, 0x02, 0x48, 0x02, 0x4A, 0x02, 0x4A, 0x02,
- 0x4C, 0x02, 0x4C, 0x02, 0x4E, 0x02, 0x4E, 0x02,
- 0x50, 0x02, 0x51, 0x02, 0x52, 0x02, 0x81, 0x01,
- 0x86, 0x01, 0x55, 0x02, 0x89, 0x01, 0x8A, 0x01,
- 0x58, 0x02, 0x8F, 0x01, 0x5A, 0x02, 0x90, 0x01,
- 0x5C, 0x02, 0x5D, 0x02, 0x5E, 0x02, 0x5F, 0x02,
- 0x93, 0x01, 0x61, 0x02, 0x62, 0x02, 0x94, 0x01,
- 0x64, 0x02, 0x65, 0x02, 0x66, 0x02, 0x67, 0x02,
- 0x97, 0x01, 0x96, 0x01, 0x6A, 0x02, 0x62, 0x2C,
- 0x6C, 0x02, 0x6D, 0x02, 0x6E, 0x02, 0x9C, 0x01,
- 0x70, 0x02, 0x71, 0x02, 0x9D, 0x01, 0x73, 0x02,
- 0x74, 0x02, 0x9F, 0x01, 0x76, 0x02, 0x77, 0x02,
- 0x78, 0x02, 0x79, 0x02, 0x7A, 0x02, 0x7B, 0x02,
- 0x7C, 0x02, 0x64, 0x2C, 0x7E, 0x02, 0x7F, 0x02,
- 0xA6, 0x01, 0x81, 0x02, 0x82, 0x02, 0xA9, 0x01,
- 0x84, 0x02, 0x85, 0x02, 0x86, 0x02, 0x87, 0x02,
- 0xAE, 0x01, 0x44, 0x02, 0xB1, 0x01, 0xB2, 0x01,
- 0x45, 0x02, 0x8D, 0x02, 0x8E, 0x02, 0x8F, 0x02,
- 0x90, 0x02, 0x91, 0x02, 0xB7, 0x01, 0x93, 0x02,
- 0x94, 0x02, 0x95, 0x02, 0x96, 0x02, 0x97, 0x02,
- 0x98, 0x02, 0x99, 0x02, 0x9A, 0x02, 0x9B, 0x02,
- 0x9C, 0x02, 0x9D, 0x02, 0x9E, 0x02, 0x9F, 0x02,
- 0xA0, 0x02, 0xA1, 0x02, 0xA2, 0x02, 0xA3, 0x02,
- 0xA4, 0x02, 0xA5, 0x02, 0xA6, 0x02, 0xA7, 0x02,
- 0xA8, 0x02, 0xA9, 0x02, 0xAA, 0x02, 0xAB, 0x02,
- 0xAC, 0x02, 0xAD, 0x02, 0xAE, 0x02, 0xAF, 0x02,
- 0xB0, 0x02, 0xB1, 0x02, 0xB2, 0x02, 0xB3, 0x02,
- 0xB4, 0x02, 0xB5, 0x02, 0xB6, 0x02, 0xB7, 0x02,
- 0xB8, 0x02, 0xB9, 0x02, 0xBA, 0x02, 0xBB, 0x02,
- 0xBC, 0x02, 0xBD, 0x02, 0xBE, 0x02, 0xBF, 0x02,
- 0xC0, 0x02, 0xC1, 0x02, 0xC2, 0x02, 0xC3, 0x02,
- 0xC4, 0x02, 0xC5, 0x02, 0xC6, 0x02, 0xC7, 0x02,
- 0xC8, 0x02, 0xC9, 0x02, 0xCA, 0x02, 0xCB, 0x02,
- 0xCC, 0x02, 0xCD, 0x02, 0xCE, 0x02, 0xCF, 0x02,
- 0xD0, 0x02, 0xD1, 0x02, 0xD2, 0x02, 0xD3, 0x02,
- 0xD4, 0x02, 0xD5, 0x02, 0xD6, 0x02, 0xD7, 0x02,
- 0xD8, 0x02, 0xD9, 0x02, 0xDA, 0x02, 0xDB, 0x02,
- 0xDC, 0x02, 0xDD, 0x02, 0xDE, 0x02, 0xDF, 0x02,
- 0xE0, 0x02, 0xE1, 0x02, 0xE2, 0x02, 0xE3, 0x02,
- 0xE4, 0x02, 0xE5, 0x02, 0xE6, 0x02, 0xE7, 0x02,
- 0xE8, 0x02, 0xE9, 0x02, 0xEA, 0x02, 0xEB, 0x02,
- 0xEC, 0x02, 0xED, 0x02, 0xEE, 0x02, 0xEF, 0x02,
- 0xF0, 0x02, 0xF1, 0x02, 0xF2, 0x02, 0xF3, 0x02,
- 0xF4, 0x02, 0xF5, 0x02, 0xF6, 0x02, 0xF7, 0x02,
- 0xF8, 0x02, 0xF9, 0x02, 0xFA, 0x02, 0xFB, 0x02,
- 0xFC, 0x02, 0xFD, 0x02, 0xFE, 0x02, 0xFF, 0x02,
- 0x00, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x03,
- 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x07, 0x03,
- 0x08, 0x03, 0x09, 0x03, 0x0A, 0x03, 0x0B, 0x03,
- 0x0C, 0x03, 0x0D, 0x03, 0x0E, 0x03, 0x0F, 0x03,
- 0x10, 0x03, 0x11, 0x03, 0x12, 0x03, 0x13, 0x03,
- 0x14, 0x03, 0x15, 0x03, 0x16, 0x03, 0x17, 0x03,
- 0x18, 0x03, 0x19, 0x03, 0x1A, 0x03, 0x1B, 0x03,
- 0x1C, 0x03, 0x1D, 0x03, 0x1E, 0x03, 0x1F, 0x03,
- 0x20, 0x03, 0x21, 0x03, 0x22, 0x03, 0x23, 0x03,
- 0x24, 0x03, 0x25, 0x03, 0x26, 0x03, 0x27, 0x03,
- 0x28, 0x03, 0x29, 0x03, 0x2A, 0x03, 0x2B, 0x03,
- 0x2C, 0x03, 0x2D, 0x03, 0x2E, 0x03, 0x2F, 0x03,
- 0x30, 0x03, 0x31, 0x03, 0x32, 0x03, 0x33, 0x03,
- 0x34, 0x03, 0x35, 0x03, 0x36, 0x03, 0x37, 0x03,
- 0x38, 0x03, 0x39, 0x03, 0x3A, 0x03, 0x3B, 0x03,
- 0x3C, 0x03, 0x3D, 0x03, 0x3E, 0x03, 0x3F, 0x03,
- 0x40, 0x03, 0x41, 0x03, 0x42, 0x03, 0x43, 0x03,
- 0x44, 0x03, 0x45, 0x03, 0x46, 0x03, 0x47, 0x03,
- 0x48, 0x03, 0x49, 0x03, 0x4A, 0x03, 0x4B, 0x03,
- 0x4C, 0x03, 0x4D, 0x03, 0x4E, 0x03, 0x4F, 0x03,
- 0x50, 0x03, 0x51, 0x03, 0x52, 0x03, 0x53, 0x03,
- 0x54, 0x03, 0x55, 0x03, 0x56, 0x03, 0x57, 0x03,
- 0x58, 0x03, 0x59, 0x03, 0x5A, 0x03, 0x5B, 0x03,
- 0x5C, 0x03, 0x5D, 0x03, 0x5E, 0x03, 0x5F, 0x03,
- 0x60, 0x03, 0x61, 0x03, 0x62, 0x03, 0x63, 0x03,
- 0x64, 0x03, 0x65, 0x03, 0x66, 0x03, 0x67, 0x03,
- 0x68, 0x03, 0x69, 0x03, 0x6A, 0x03, 0x6B, 0x03,
- 0x6C, 0x03, 0x6D, 0x03, 0x6E, 0x03, 0x6F, 0x03,
- 0x70, 0x03, 0x71, 0x03, 0x72, 0x03, 0x73, 0x03,
- 0x74, 0x03, 0x75, 0x03, 0x76, 0x03, 0x77, 0x03,
- 0x78, 0x03, 0x79, 0x03, 0x7A, 0x03, 0xFD, 0x03,
- 0xFE, 0x03, 0xFF, 0x03, 0x7E, 0x03, 0x7F, 0x03,
- 0x80, 0x03, 0x81, 0x03, 0x82, 0x03, 0x83, 0x03,
- 0x84, 0x03, 0x85, 0x03, 0x86, 0x03, 0x87, 0x03,
- 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, 0x8B, 0x03,
- 0x8C, 0x03, 0x8D, 0x03, 0x8E, 0x03, 0x8F, 0x03,
- 0x90, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03,
- 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
- 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03,
- 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
- 0xA0, 0x03, 0xA1, 0x03, 0xA2, 0x03, 0xA3, 0x03,
- 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
- 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03,
- 0x86, 0x03, 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03,
- 0xB0, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03,
- 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
- 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03,
- 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
- 0xA0, 0x03, 0xA1, 0x03, 0xA3, 0x03, 0xA3, 0x03,
- 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
- 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03,
- 0x8C, 0x03, 0x8E, 0x03, 0x8F, 0x03, 0xCF, 0x03,
- 0xD0, 0x03, 0xD1, 0x03, 0xD2, 0x03, 0xD3, 0x03,
- 0xD4, 0x03, 0xD5, 0x03, 0xD6, 0x03, 0xD7, 0x03,
- 0xD8, 0x03, 0xD8, 0x03, 0xDA, 0x03, 0xDA, 0x03,
- 0xDC, 0x03, 0xDC, 0x03, 0xDE, 0x03, 0xDE, 0x03,
- 0xE0, 0x03, 0xE0, 0x03, 0xE2, 0x03, 0xE2, 0x03,
- 0xE4, 0x03, 0xE4, 0x03, 0xE6, 0x03, 0xE6, 0x03,
- 0xE8, 0x03, 0xE8, 0x03, 0xEA, 0x03, 0xEA, 0x03,
- 0xEC, 0x03, 0xEC, 0x03, 0xEE, 0x03, 0xEE, 0x03,
- 0xF0, 0x03, 0xF1, 0x03, 0xF9, 0x03, 0xF3, 0x03,
- 0xF4, 0x03, 0xF5, 0x03, 0xF6, 0x03, 0xF7, 0x03,
- 0xF7, 0x03, 0xF9, 0x03, 0xFA, 0x03, 0xFA, 0x03,
- 0xFC, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03,
- 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04,
- 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
- 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04,
- 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
- 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04,
- 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
- 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04,
- 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
- 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04,
- 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
- 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04,
- 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
- 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04,
- 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
- 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04,
- 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
- 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04,
- 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
- 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04,
- 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
- 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04,
- 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
- 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04,
- 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
- 0x60, 0x04, 0x60, 0x04, 0x62, 0x04, 0x62, 0x04,
- 0x64, 0x04, 0x64, 0x04, 0x66, 0x04, 0x66, 0x04,
- 0x68, 0x04, 0x68, 0x04, 0x6A, 0x04, 0x6A, 0x04,
- 0x6C, 0x04, 0x6C, 0x04, 0x6E, 0x04, 0x6E, 0x04,
- 0x70, 0x04, 0x70, 0x04, 0x72, 0x04, 0x72, 0x04,
- 0x74, 0x04, 0x74, 0x04, 0x76, 0x04, 0x76, 0x04,
- 0x78, 0x04, 0x78, 0x04, 0x7A, 0x04, 0x7A, 0x04,
- 0x7C, 0x04, 0x7C, 0x04, 0x7E, 0x04, 0x7E, 0x04,
- 0x80, 0x04, 0x80, 0x04, 0x82, 0x04, 0x83, 0x04,
- 0x84, 0x04, 0x85, 0x04, 0x86, 0x04, 0x87, 0x04,
- 0x88, 0x04, 0x89, 0x04, 0x8A, 0x04, 0x8A, 0x04,
- 0x8C, 0x04, 0x8C, 0x04, 0x8E, 0x04, 0x8E, 0x04,
- 0x90, 0x04, 0x90, 0x04, 0x92, 0x04, 0x92, 0x04,
- 0x94, 0x04, 0x94, 0x04, 0x96, 0x04, 0x96, 0x04,
- 0x98, 0x04, 0x98, 0x04, 0x9A, 0x04, 0x9A, 0x04,
- 0x9C, 0x04, 0x9C, 0x04, 0x9E, 0x04, 0x9E, 0x04,
- 0xA0, 0x04, 0xA0, 0x04, 0xA2, 0x04, 0xA2, 0x04,
- 0xA4, 0x04, 0xA4, 0x04, 0xA6, 0x04, 0xA6, 0x04,
- 0xA8, 0x04, 0xA8, 0x04, 0xAA, 0x04, 0xAA, 0x04,
- 0xAC, 0x04, 0xAC, 0x04, 0xAE, 0x04, 0xAE, 0x04,
- 0xB0, 0x04, 0xB0, 0x04, 0xB2, 0x04, 0xB2, 0x04,
- 0xB4, 0x04, 0xB4, 0x04, 0xB6, 0x04, 0xB6, 0x04,
- 0xB8, 0x04, 0xB8, 0x04, 0xBA, 0x04, 0xBA, 0x04,
- 0xBC, 0x04, 0xBC, 0x04, 0xBE, 0x04, 0xBE, 0x04,
- 0xC0, 0x04, 0xC1, 0x04, 0xC1, 0x04, 0xC3, 0x04,
- 0xC3, 0x04, 0xC5, 0x04, 0xC5, 0x04, 0xC7, 0x04,
- 0xC7, 0x04, 0xC9, 0x04, 0xC9, 0x04, 0xCB, 0x04,
- 0xCB, 0x04, 0xCD, 0x04, 0xCD, 0x04, 0xC0, 0x04,
- 0xD0, 0x04, 0xD0, 0x04, 0xD2, 0x04, 0xD2, 0x04,
- 0xD4, 0x04, 0xD4, 0x04, 0xD6, 0x04, 0xD6, 0x04,
- 0xD8, 0x04, 0xD8, 0x04, 0xDA, 0x04, 0xDA, 0x04,
- 0xDC, 0x04, 0xDC, 0x04, 0xDE, 0x04, 0xDE, 0x04,
- 0xE0, 0x04, 0xE0, 0x04, 0xE2, 0x04, 0xE2, 0x04,
- 0xE4, 0x04, 0xE4, 0x04, 0xE6, 0x04, 0xE6, 0x04,
- 0xE8, 0x04, 0xE8, 0x04, 0xEA, 0x04, 0xEA, 0x04,
- 0xEC, 0x04, 0xEC, 0x04, 0xEE, 0x04, 0xEE, 0x04,
- 0xF0, 0x04, 0xF0, 0x04, 0xF2, 0x04, 0xF2, 0x04,
- 0xF4, 0x04, 0xF4, 0x04, 0xF6, 0x04, 0xF6, 0x04,
- 0xF8, 0x04, 0xF8, 0x04, 0xFA, 0x04, 0xFA, 0x04,
- 0xFC, 0x04, 0xFC, 0x04, 0xFE, 0x04, 0xFE, 0x04,
- 0x00, 0x05, 0x00, 0x05, 0x02, 0x05, 0x02, 0x05,
- 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x05,
- 0x08, 0x05, 0x08, 0x05, 0x0A, 0x05, 0x0A, 0x05,
- 0x0C, 0x05, 0x0C, 0x05, 0x0E, 0x05, 0x0E, 0x05,
- 0x10, 0x05, 0x10, 0x05, 0x12, 0x05, 0x12, 0x05,
- 0x14, 0x05, 0x15, 0x05, 0x16, 0x05, 0x17, 0x05,
- 0x18, 0x05, 0x19, 0x05, 0x1A, 0x05, 0x1B, 0x05,
- 0x1C, 0x05, 0x1D, 0x05, 0x1E, 0x05, 0x1F, 0x05,
- 0x20, 0x05, 0x21, 0x05, 0x22, 0x05, 0x23, 0x05,
- 0x24, 0x05, 0x25, 0x05, 0x26, 0x05, 0x27, 0x05,
- 0x28, 0x05, 0x29, 0x05, 0x2A, 0x05, 0x2B, 0x05,
- 0x2C, 0x05, 0x2D, 0x05, 0x2E, 0x05, 0x2F, 0x05,
- 0x30, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05,
- 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
- 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05,
- 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
- 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05,
- 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
- 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05,
- 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
- 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05,
- 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0x57, 0x05,
- 0x58, 0x05, 0x59, 0x05, 0x5A, 0x05, 0x5B, 0x05,
- 0x5C, 0x05, 0x5D, 0x05, 0x5E, 0x05, 0x5F, 0x05,
- 0x60, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05,
- 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
- 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05,
- 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
- 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05,
- 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
- 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05,
- 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
- 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05,
- 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0xFF, 0xFF,
- 0xF6, 0x17, 0x63, 0x2C, 0x7E, 0x1D, 0x7F, 0x1D,
- 0x80, 0x1D, 0x81, 0x1D, 0x82, 0x1D, 0x83, 0x1D,
- 0x84, 0x1D, 0x85, 0x1D, 0x86, 0x1D, 0x87, 0x1D,
- 0x88, 0x1D, 0x89, 0x1D, 0x8A, 0x1D, 0x8B, 0x1D,
- 0x8C, 0x1D, 0x8D, 0x1D, 0x8E, 0x1D, 0x8F, 0x1D,
- 0x90, 0x1D, 0x91, 0x1D, 0x92, 0x1D, 0x93, 0x1D,
- 0x94, 0x1D, 0x95, 0x1D, 0x96, 0x1D, 0x97, 0x1D,
- 0x98, 0x1D, 0x99, 0x1D, 0x9A, 0x1D, 0x9B, 0x1D,
- 0x9C, 0x1D, 0x9D, 0x1D, 0x9E, 0x1D, 0x9F, 0x1D,
- 0xA0, 0x1D, 0xA1, 0x1D, 0xA2, 0x1D, 0xA3, 0x1D,
- 0xA4, 0x1D, 0xA5, 0x1D, 0xA6, 0x1D, 0xA7, 0x1D,
- 0xA8, 0x1D, 0xA9, 0x1D, 0xAA, 0x1D, 0xAB, 0x1D,
- 0xAC, 0x1D, 0xAD, 0x1D, 0xAE, 0x1D, 0xAF, 0x1D,
- 0xB0, 0x1D, 0xB1, 0x1D, 0xB2, 0x1D, 0xB3, 0x1D,
- 0xB4, 0x1D, 0xB5, 0x1D, 0xB6, 0x1D, 0xB7, 0x1D,
- 0xB8, 0x1D, 0xB9, 0x1D, 0xBA, 0x1D, 0xBB, 0x1D,
- 0xBC, 0x1D, 0xBD, 0x1D, 0xBE, 0x1D, 0xBF, 0x1D,
- 0xC0, 0x1D, 0xC1, 0x1D, 0xC2, 0x1D, 0xC3, 0x1D,
- 0xC4, 0x1D, 0xC5, 0x1D, 0xC6, 0x1D, 0xC7, 0x1D,
- 0xC8, 0x1D, 0xC9, 0x1D, 0xCA, 0x1D, 0xCB, 0x1D,
- 0xCC, 0x1D, 0xCD, 0x1D, 0xCE, 0x1D, 0xCF, 0x1D,
- 0xD0, 0x1D, 0xD1, 0x1D, 0xD2, 0x1D, 0xD3, 0x1D,
- 0xD4, 0x1D, 0xD5, 0x1D, 0xD6, 0x1D, 0xD7, 0x1D,
- 0xD8, 0x1D, 0xD9, 0x1D, 0xDA, 0x1D, 0xDB, 0x1D,
- 0xDC, 0x1D, 0xDD, 0x1D, 0xDE, 0x1D, 0xDF, 0x1D,
- 0xE0, 0x1D, 0xE1, 0x1D, 0xE2, 0x1D, 0xE3, 0x1D,
- 0xE4, 0x1D, 0xE5, 0x1D, 0xE6, 0x1D, 0xE7, 0x1D,
- 0xE8, 0x1D, 0xE9, 0x1D, 0xEA, 0x1D, 0xEB, 0x1D,
- 0xEC, 0x1D, 0xED, 0x1D, 0xEE, 0x1D, 0xEF, 0x1D,
- 0xF0, 0x1D, 0xF1, 0x1D, 0xF2, 0x1D, 0xF3, 0x1D,
- 0xF4, 0x1D, 0xF5, 0x1D, 0xF6, 0x1D, 0xF7, 0x1D,
- 0xF8, 0x1D, 0xF9, 0x1D, 0xFA, 0x1D, 0xFB, 0x1D,
- 0xFC, 0x1D, 0xFD, 0x1D, 0xFE, 0x1D, 0xFF, 0x1D,
- 0x00, 0x1E, 0x00, 0x1E, 0x02, 0x1E, 0x02, 0x1E,
- 0x04, 0x1E, 0x04, 0x1E, 0x06, 0x1E, 0x06, 0x1E,
- 0x08, 0x1E, 0x08, 0x1E, 0x0A, 0x1E, 0x0A, 0x1E,
- 0x0C, 0x1E, 0x0C, 0x1E, 0x0E, 0x1E, 0x0E, 0x1E,
- 0x10, 0x1E, 0x10, 0x1E, 0x12, 0x1E, 0x12, 0x1E,
- 0x14, 0x1E, 0x14, 0x1E, 0x16, 0x1E, 0x16, 0x1E,
- 0x18, 0x1E, 0x18, 0x1E, 0x1A, 0x1E, 0x1A, 0x1E,
- 0x1C, 0x1E, 0x1C, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
- 0x20, 0x1E, 0x20, 0x1E, 0x22, 0x1E, 0x22, 0x1E,
- 0x24, 0x1E, 0x24, 0x1E, 0x26, 0x1E, 0x26, 0x1E,
- 0x28, 0x1E, 0x28, 0x1E, 0x2A, 0x1E, 0x2A, 0x1E,
- 0x2C, 0x1E, 0x2C, 0x1E, 0x2E, 0x1E, 0x2E, 0x1E,
- 0x30, 0x1E, 0x30, 0x1E, 0x32, 0x1E, 0x32, 0x1E,
- 0x34, 0x1E, 0x34, 0x1E, 0x36, 0x1E, 0x36, 0x1E,
- 0x38, 0x1E, 0x38, 0x1E, 0x3A, 0x1E, 0x3A, 0x1E,
- 0x3C, 0x1E, 0x3C, 0x1E, 0x3E, 0x1E, 0x3E, 0x1E,
- 0x40, 0x1E, 0x40, 0x1E, 0x42, 0x1E, 0x42, 0x1E,
- 0x44, 0x1E, 0x44, 0x1E, 0x46, 0x1E, 0x46, 0x1E,
- 0x48, 0x1E, 0x48, 0x1E, 0x4A, 0x1E, 0x4A, 0x1E,
- 0x4C, 0x1E, 0x4C, 0x1E, 0x4E, 0x1E, 0x4E, 0x1E,
- 0x50, 0x1E, 0x50, 0x1E, 0x52, 0x1E, 0x52, 0x1E,
- 0x54, 0x1E, 0x54, 0x1E, 0x56, 0x1E, 0x56, 0x1E,
- 0x58, 0x1E, 0x58, 0x1E, 0x5A, 0x1E, 0x5A, 0x1E,
- 0x5C, 0x1E, 0x5C, 0x1E, 0x5E, 0x1E, 0x5E, 0x1E,
- 0x60, 0x1E, 0x60, 0x1E, 0x62, 0x1E, 0x62, 0x1E,
- 0x64, 0x1E, 0x64, 0x1E, 0x66, 0x1E, 0x66, 0x1E,
- 0x68, 0x1E, 0x68, 0x1E, 0x6A, 0x1E, 0x6A, 0x1E,
- 0x6C, 0x1E, 0x6C, 0x1E, 0x6E, 0x1E, 0x6E, 0x1E,
- 0x70, 0x1E, 0x70, 0x1E, 0x72, 0x1E, 0x72, 0x1E,
- 0x74, 0x1E, 0x74, 0x1E, 0x76, 0x1E, 0x76, 0x1E,
- 0x78, 0x1E, 0x78, 0x1E, 0x7A, 0x1E, 0x7A, 0x1E,
- 0x7C, 0x1E, 0x7C, 0x1E, 0x7E, 0x1E, 0x7E, 0x1E,
- 0x80, 0x1E, 0x80, 0x1E, 0x82, 0x1E, 0x82, 0x1E,
- 0x84, 0x1E, 0x84, 0x1E, 0x86, 0x1E, 0x86, 0x1E,
- 0x88, 0x1E, 0x88, 0x1E, 0x8A, 0x1E, 0x8A, 0x1E,
- 0x8C, 0x1E, 0x8C, 0x1E, 0x8E, 0x1E, 0x8E, 0x1E,
- 0x90, 0x1E, 0x90, 0x1E, 0x92, 0x1E, 0x92, 0x1E,
- 0x94, 0x1E, 0x94, 0x1E, 0x96, 0x1E, 0x97, 0x1E,
- 0x98, 0x1E, 0x99, 0x1E, 0x9A, 0x1E, 0x9B, 0x1E,
- 0x9C, 0x1E, 0x9D, 0x1E, 0x9E, 0x1E, 0x9F, 0x1E,
- 0xA0, 0x1E, 0xA0, 0x1E, 0xA2, 0x1E, 0xA2, 0x1E,
- 0xA4, 0x1E, 0xA4, 0x1E, 0xA6, 0x1E, 0xA6, 0x1E,
- 0xA8, 0x1E, 0xA8, 0x1E, 0xAA, 0x1E, 0xAA, 0x1E,
- 0xAC, 0x1E, 0xAC, 0x1E, 0xAE, 0x1E, 0xAE, 0x1E,
- 0xB0, 0x1E, 0xB0, 0x1E, 0xB2, 0x1E, 0xB2, 0x1E,
- 0xB4, 0x1E, 0xB4, 0x1E, 0xB6, 0x1E, 0xB6, 0x1E,
- 0xB8, 0x1E, 0xB8, 0x1E, 0xBA, 0x1E, 0xBA, 0x1E,
- 0xBC, 0x1E, 0xBC, 0x1E, 0xBE, 0x1E, 0xBE, 0x1E,
- 0xC0, 0x1E, 0xC0, 0x1E, 0xC2, 0x1E, 0xC2, 0x1E,
- 0xC4, 0x1E, 0xC4, 0x1E, 0xC6, 0x1E, 0xC6, 0x1E,
- 0xC8, 0x1E, 0xC8, 0x1E, 0xCA, 0x1E, 0xCA, 0x1E,
- 0xCC, 0x1E, 0xCC, 0x1E, 0xCE, 0x1E, 0xCE, 0x1E,
- 0xD0, 0x1E, 0xD0, 0x1E, 0xD2, 0x1E, 0xD2, 0x1E,
- 0xD4, 0x1E, 0xD4, 0x1E, 0xD6, 0x1E, 0xD6, 0x1E,
- 0xD8, 0x1E, 0xD8, 0x1E, 0xDA, 0x1E, 0xDA, 0x1E,
- 0xDC, 0x1E, 0xDC, 0x1E, 0xDE, 0x1E, 0xDE, 0x1E,
- 0xE0, 0x1E, 0xE0, 0x1E, 0xE2, 0x1E, 0xE2, 0x1E,
- 0xE4, 0x1E, 0xE4, 0x1E, 0xE6, 0x1E, 0xE6, 0x1E,
- 0xE8, 0x1E, 0xE8, 0x1E, 0xEA, 0x1E, 0xEA, 0x1E,
- 0xEC, 0x1E, 0xEC, 0x1E, 0xEE, 0x1E, 0xEE, 0x1E,
- 0xF0, 0x1E, 0xF0, 0x1E, 0xF2, 0x1E, 0xF2, 0x1E,
- 0xF4, 0x1E, 0xF4, 0x1E, 0xF6, 0x1E, 0xF6, 0x1E,
- 0xF8, 0x1E, 0xF8, 0x1E, 0xFA, 0x1E, 0xFB, 0x1E,
- 0xFC, 0x1E, 0xFD, 0x1E, 0xFE, 0x1E, 0xFF, 0x1E,
- 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
- 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F,
- 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
- 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F,
- 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
- 0x1C, 0x1F, 0x1D, 0x1F, 0x16, 0x1F, 0x17, 0x1F,
- 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
- 0x1C, 0x1F, 0x1D, 0x1F, 0x1E, 0x1F, 0x1F, 0x1F,
- 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
- 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F,
- 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
- 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F,
- 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
- 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F,
- 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
- 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F,
- 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
- 0x4C, 0x1F, 0x4D, 0x1F, 0x46, 0x1F, 0x47, 0x1F,
- 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
- 0x4C, 0x1F, 0x4D, 0x1F, 0x4E, 0x1F, 0x4F, 0x1F,
- 0x50, 0x1F, 0x59, 0x1F, 0x52, 0x1F, 0x5B, 0x1F,
- 0x54, 0x1F, 0x5D, 0x1F, 0x56, 0x1F, 0x5F, 0x1F,
- 0x58, 0x1F, 0x59, 0x1F, 0x5A, 0x1F, 0x5B, 0x1F,
- 0x5C, 0x1F, 0x5D, 0x1F, 0x5E, 0x1F, 0x5F, 0x1F,
- 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
- 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F,
- 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
- 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F,
- 0xBA, 0x1F, 0xBB, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F,
- 0xCA, 0x1F, 0xCB, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F,
- 0xF8, 0x1F, 0xF9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
- 0xFA, 0x1F, 0xFB, 0x1F, 0x7E, 0x1F, 0x7F, 0x1F,
- 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
- 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F,
- 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
- 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F,
- 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
- 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F,
- 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
- 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F,
- 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
- 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F,
- 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
- 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F,
- 0xB8, 0x1F, 0xB9, 0x1F, 0xB2, 0x1F, 0xBC, 0x1F,
- 0xB4, 0x1F, 0xB5, 0x1F, 0xB6, 0x1F, 0xB7, 0x1F,
- 0xB8, 0x1F, 0xB9, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F,
- 0xBC, 0x1F, 0xBD, 0x1F, 0xBE, 0x1F, 0xBF, 0x1F,
- 0xC0, 0x1F, 0xC1, 0x1F, 0xC2, 0x1F, 0xC3, 0x1F,
- 0xC4, 0x1F, 0xC5, 0x1F, 0xC6, 0x1F, 0xC7, 0x1F,
- 0xC8, 0x1F, 0xC9, 0x1F, 0xCA, 0x1F, 0xCB, 0x1F,
- 0xC3, 0x1F, 0xCD, 0x1F, 0xCE, 0x1F, 0xCF, 0x1F,
- 0xD8, 0x1F, 0xD9, 0x1F, 0xD2, 0x1F, 0xD3, 0x1F,
- 0xD4, 0x1F, 0xD5, 0x1F, 0xD6, 0x1F, 0xD7, 0x1F,
- 0xD8, 0x1F, 0xD9, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F,
- 0xDC, 0x1F, 0xDD, 0x1F, 0xDE, 0x1F, 0xDF, 0x1F,
- 0xE8, 0x1F, 0xE9, 0x1F, 0xE2, 0x1F, 0xE3, 0x1F,
- 0xE4, 0x1F, 0xEC, 0x1F, 0xE6, 0x1F, 0xE7, 0x1F,
- 0xE8, 0x1F, 0xE9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
- 0xEC, 0x1F, 0xED, 0x1F, 0xEE, 0x1F, 0xEF, 0x1F,
- 0xF0, 0x1F, 0xF1, 0x1F, 0xF2, 0x1F, 0xF3, 0x1F,
- 0xF4, 0x1F, 0xF5, 0x1F, 0xF6, 0x1F, 0xF7, 0x1F,
- 0xF8, 0x1F, 0xF9, 0x1F, 0xFA, 0x1F, 0xFB, 0x1F,
- 0xF3, 0x1F, 0xFD, 0x1F, 0xFE, 0x1F, 0xFF, 0x1F,
- 0x00, 0x20, 0x01, 0x20, 0x02, 0x20, 0x03, 0x20,
- 0x04, 0x20, 0x05, 0x20, 0x06, 0x20, 0x07, 0x20,
- 0x08, 0x20, 0x09, 0x20, 0x0A, 0x20, 0x0B, 0x20,
- 0x0C, 0x20, 0x0D, 0x20, 0x0E, 0x20, 0x0F, 0x20,
- 0x10, 0x20, 0x11, 0x20, 0x12, 0x20, 0x13, 0x20,
- 0x14, 0x20, 0x15, 0x20, 0x16, 0x20, 0x17, 0x20,
- 0x18, 0x20, 0x19, 0x20, 0x1A, 0x20, 0x1B, 0x20,
- 0x1C, 0x20, 0x1D, 0x20, 0x1E, 0x20, 0x1F, 0x20,
- 0x20, 0x20, 0x21, 0x20, 0x22, 0x20, 0x23, 0x20,
- 0x24, 0x20, 0x25, 0x20, 0x26, 0x20, 0x27, 0x20,
- 0x28, 0x20, 0x29, 0x20, 0x2A, 0x20, 0x2B, 0x20,
- 0x2C, 0x20, 0x2D, 0x20, 0x2E, 0x20, 0x2F, 0x20,
- 0x30, 0x20, 0x31, 0x20, 0x32, 0x20, 0x33, 0x20,
- 0x34, 0x20, 0x35, 0x20, 0x36, 0x20, 0x37, 0x20,
- 0x38, 0x20, 0x39, 0x20, 0x3A, 0x20, 0x3B, 0x20,
- 0x3C, 0x20, 0x3D, 0x20, 0x3E, 0x20, 0x3F, 0x20,
- 0x40, 0x20, 0x41, 0x20, 0x42, 0x20, 0x43, 0x20,
- 0x44, 0x20, 0x45, 0x20, 0x46, 0x20, 0x47, 0x20,
- 0x48, 0x20, 0x49, 0x20, 0x4A, 0x20, 0x4B, 0x20,
- 0x4C, 0x20, 0x4D, 0x20, 0x4E, 0x20, 0x4F, 0x20,
- 0x50, 0x20, 0x51, 0x20, 0x52, 0x20, 0x53, 0x20,
- 0x54, 0x20, 0x55, 0x20, 0x56, 0x20, 0x57, 0x20,
- 0x58, 0x20, 0x59, 0x20, 0x5A, 0x20, 0x5B, 0x20,
- 0x5C, 0x20, 0x5D, 0x20, 0x5E, 0x20, 0x5F, 0x20,
- 0x60, 0x20, 0x61, 0x20, 0x62, 0x20, 0x63, 0x20,
- 0x64, 0x20, 0x65, 0x20, 0x66, 0x20, 0x67, 0x20,
- 0x68, 0x20, 0x69, 0x20, 0x6A, 0x20, 0x6B, 0x20,
- 0x6C, 0x20, 0x6D, 0x20, 0x6E, 0x20, 0x6F, 0x20,
- 0x70, 0x20, 0x71, 0x20, 0x72, 0x20, 0x73, 0x20,
- 0x74, 0x20, 0x75, 0x20, 0x76, 0x20, 0x77, 0x20,
- 0x78, 0x20, 0x79, 0x20, 0x7A, 0x20, 0x7B, 0x20,
- 0x7C, 0x20, 0x7D, 0x20, 0x7E, 0x20, 0x7F, 0x20,
- 0x80, 0x20, 0x81, 0x20, 0x82, 0x20, 0x83, 0x20,
- 0x84, 0x20, 0x85, 0x20, 0x86, 0x20, 0x87, 0x20,
- 0x88, 0x20, 0x89, 0x20, 0x8A, 0x20, 0x8B, 0x20,
- 0x8C, 0x20, 0x8D, 0x20, 0x8E, 0x20, 0x8F, 0x20,
- 0x90, 0x20, 0x91, 0x20, 0x92, 0x20, 0x93, 0x20,
- 0x94, 0x20, 0x95, 0x20, 0x96, 0x20, 0x97, 0x20,
- 0x98, 0x20, 0x99, 0x20, 0x9A, 0x20, 0x9B, 0x20,
- 0x9C, 0x20, 0x9D, 0x20, 0x9E, 0x20, 0x9F, 0x20,
- 0xA0, 0x20, 0xA1, 0x20, 0xA2, 0x20, 0xA3, 0x20,
- 0xA4, 0x20, 0xA5, 0x20, 0xA6, 0x20, 0xA7, 0x20,
- 0xA8, 0x20, 0xA9, 0x20, 0xAA, 0x20, 0xAB, 0x20,
- 0xAC, 0x20, 0xAD, 0x20, 0xAE, 0x20, 0xAF, 0x20,
- 0xB0, 0x20, 0xB1, 0x20, 0xB2, 0x20, 0xB3, 0x20,
- 0xB4, 0x20, 0xB5, 0x20, 0xB6, 0x20, 0xB7, 0x20,
- 0xB8, 0x20, 0xB9, 0x20, 0xBA, 0x20, 0xBB, 0x20,
- 0xBC, 0x20, 0xBD, 0x20, 0xBE, 0x20, 0xBF, 0x20,
- 0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20,
- 0xC4, 0x20, 0xC5, 0x20, 0xC6, 0x20, 0xC7, 0x20,
- 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20, 0xCB, 0x20,
- 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20,
- 0xD0, 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20,
- 0xD4, 0x20, 0xD5, 0x20, 0xD6, 0x20, 0xD7, 0x20,
- 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB, 0x20,
- 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20,
- 0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20,
- 0xE4, 0x20, 0xE5, 0x20, 0xE6, 0x20, 0xE7, 0x20,
- 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20, 0xEB, 0x20,
- 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20,
- 0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20,
- 0xF4, 0x20, 0xF5, 0x20, 0xF6, 0x20, 0xF7, 0x20,
- 0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20,
- 0xFC, 0x20, 0xFD, 0x20, 0xFE, 0x20, 0xFF, 0x20,
- 0x00, 0x21, 0x01, 0x21, 0x02, 0x21, 0x03, 0x21,
- 0x04, 0x21, 0x05, 0x21, 0x06, 0x21, 0x07, 0x21,
- 0x08, 0x21, 0x09, 0x21, 0x0A, 0x21, 0x0B, 0x21,
- 0x0C, 0x21, 0x0D, 0x21, 0x0E, 0x21, 0x0F, 0x21,
- 0x10, 0x21, 0x11, 0x21, 0x12, 0x21, 0x13, 0x21,
- 0x14, 0x21, 0x15, 0x21, 0x16, 0x21, 0x17, 0x21,
- 0x18, 0x21, 0x19, 0x21, 0x1A, 0x21, 0x1B, 0x21,
- 0x1C, 0x21, 0x1D, 0x21, 0x1E, 0x21, 0x1F, 0x21,
- 0x20, 0x21, 0x21, 0x21, 0x22, 0x21, 0x23, 0x21,
- 0x24, 0x21, 0x25, 0x21, 0x26, 0x21, 0x27, 0x21,
- 0x28, 0x21, 0x29, 0x21, 0x2A, 0x21, 0x2B, 0x21,
- 0x2C, 0x21, 0x2D, 0x21, 0x2E, 0x21, 0x2F, 0x21,
- 0x30, 0x21, 0x31, 0x21, 0x32, 0x21, 0x33, 0x21,
- 0x34, 0x21, 0x35, 0x21, 0x36, 0x21, 0x37, 0x21,
- 0x38, 0x21, 0x39, 0x21, 0x3A, 0x21, 0x3B, 0x21,
- 0x3C, 0x21, 0x3D, 0x21, 0x3E, 0x21, 0x3F, 0x21,
- 0x40, 0x21, 0x41, 0x21, 0x42, 0x21, 0x43, 0x21,
- 0x44, 0x21, 0x45, 0x21, 0x46, 0x21, 0x47, 0x21,
- 0x48, 0x21, 0x49, 0x21, 0x4A, 0x21, 0x4B, 0x21,
- 0x4C, 0x21, 0x4D, 0x21, 0x32, 0x21, 0x4F, 0x21,
- 0x50, 0x21, 0x51, 0x21, 0x52, 0x21, 0x53, 0x21,
- 0x54, 0x21, 0x55, 0x21, 0x56, 0x21, 0x57, 0x21,
- 0x58, 0x21, 0x59, 0x21, 0x5A, 0x21, 0x5B, 0x21,
- 0x5C, 0x21, 0x5D, 0x21, 0x5E, 0x21, 0x5F, 0x21,
- 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
- 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21,
- 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
- 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21,
- 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
- 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21,
- 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
- 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21,
- 0x80, 0x21, 0x81, 0x21, 0x82, 0x21, 0x83, 0x21,
- 0x83, 0x21, 0xFF, 0xFF, 0x4B, 0x03, 0xB6, 0x24,
- 0xB7, 0x24, 0xB8, 0x24, 0xB9, 0x24, 0xBA, 0x24,
- 0xBB, 0x24, 0xBC, 0x24, 0xBD, 0x24, 0xBE, 0x24,
- 0xBF, 0x24, 0xC0, 0x24, 0xC1, 0x24, 0xC2, 0x24,
- 0xC3, 0x24, 0xC4, 0x24, 0xC5, 0x24, 0xC6, 0x24,
- 0xC7, 0x24, 0xC8, 0x24, 0xC9, 0x24, 0xCA, 0x24,
- 0xCB, 0x24, 0xCC, 0x24, 0xCD, 0x24, 0xCE, 0x24,
- 0xCF, 0x24, 0xFF, 0xFF, 0x46, 0x07, 0x00, 0x2C,
- 0x01, 0x2C, 0x02, 0x2C, 0x03, 0x2C, 0x04, 0x2C,
- 0x05, 0x2C, 0x06, 0x2C, 0x07, 0x2C, 0x08, 0x2C,
- 0x09, 0x2C, 0x0A, 0x2C, 0x0B, 0x2C, 0x0C, 0x2C,
- 0x0D, 0x2C, 0x0E, 0x2C, 0x0F, 0x2C, 0x10, 0x2C,
- 0x11, 0x2C, 0x12, 0x2C, 0x13, 0x2C, 0x14, 0x2C,
- 0x15, 0x2C, 0x16, 0x2C, 0x17, 0x2C, 0x18, 0x2C,
- 0x19, 0x2C, 0x1A, 0x2C, 0x1B, 0x2C, 0x1C, 0x2C,
- 0x1D, 0x2C, 0x1E, 0x2C, 0x1F, 0x2C, 0x20, 0x2C,
- 0x21, 0x2C, 0x22, 0x2C, 0x23, 0x2C, 0x24, 0x2C,
- 0x25, 0x2C, 0x26, 0x2C, 0x27, 0x2C, 0x28, 0x2C,
- 0x29, 0x2C, 0x2A, 0x2C, 0x2B, 0x2C, 0x2C, 0x2C,
- 0x2D, 0x2C, 0x2E, 0x2C, 0x5F, 0x2C, 0x60, 0x2C,
- 0x60, 0x2C, 0x62, 0x2C, 0x63, 0x2C, 0x64, 0x2C,
- 0x65, 0x2C, 0x66, 0x2C, 0x67, 0x2C, 0x67, 0x2C,
- 0x69, 0x2C, 0x69, 0x2C, 0x6B, 0x2C, 0x6B, 0x2C,
- 0x6D, 0x2C, 0x6E, 0x2C, 0x6F, 0x2C, 0x70, 0x2C,
- 0x71, 0x2C, 0x72, 0x2C, 0x73, 0x2C, 0x74, 0x2C,
- 0x75, 0x2C, 0x75, 0x2C, 0x77, 0x2C, 0x78, 0x2C,
- 0x79, 0x2C, 0x7A, 0x2C, 0x7B, 0x2C, 0x7C, 0x2C,
- 0x7D, 0x2C, 0x7E, 0x2C, 0x7F, 0x2C, 0x80, 0x2C,
- 0x80, 0x2C, 0x82, 0x2C, 0x82, 0x2C, 0x84, 0x2C,
- 0x84, 0x2C, 0x86, 0x2C, 0x86, 0x2C, 0x88, 0x2C,
- 0x88, 0x2C, 0x8A, 0x2C, 0x8A, 0x2C, 0x8C, 0x2C,
- 0x8C, 0x2C, 0x8E, 0x2C, 0x8E, 0x2C, 0x90, 0x2C,
- 0x90, 0x2C, 0x92, 0x2C, 0x92, 0x2C, 0x94, 0x2C,
- 0x94, 0x2C, 0x96, 0x2C, 0x96, 0x2C, 0x98, 0x2C,
- 0x98, 0x2C, 0x9A, 0x2C, 0x9A, 0x2C, 0x9C, 0x2C,
- 0x9C, 0x2C, 0x9E, 0x2C, 0x9E, 0x2C, 0xA0, 0x2C,
- 0xA0, 0x2C, 0xA2, 0x2C, 0xA2, 0x2C, 0xA4, 0x2C,
- 0xA4, 0x2C, 0xA6, 0x2C, 0xA6, 0x2C, 0xA8, 0x2C,
- 0xA8, 0x2C, 0xAA, 0x2C, 0xAA, 0x2C, 0xAC, 0x2C,
- 0xAC, 0x2C, 0xAE, 0x2C, 0xAE, 0x2C, 0xB0, 0x2C,
- 0xB0, 0x2C, 0xB2, 0x2C, 0xB2, 0x2C, 0xB4, 0x2C,
- 0xB4, 0x2C, 0xB6, 0x2C, 0xB6, 0x2C, 0xB8, 0x2C,
- 0xB8, 0x2C, 0xBA, 0x2C, 0xBA, 0x2C, 0xBC, 0x2C,
- 0xBC, 0x2C, 0xBE, 0x2C, 0xBE, 0x2C, 0xC0, 0x2C,
- 0xC0, 0x2C, 0xC2, 0x2C, 0xC2, 0x2C, 0xC4, 0x2C,
- 0xC4, 0x2C, 0xC6, 0x2C, 0xC6, 0x2C, 0xC8, 0x2C,
- 0xC8, 0x2C, 0xCA, 0x2C, 0xCA, 0x2C, 0xCC, 0x2C,
- 0xCC, 0x2C, 0xCE, 0x2C, 0xCE, 0x2C, 0xD0, 0x2C,
- 0xD0, 0x2C, 0xD2, 0x2C, 0xD2, 0x2C, 0xD4, 0x2C,
- 0xD4, 0x2C, 0xD6, 0x2C, 0xD6, 0x2C, 0xD8, 0x2C,
- 0xD8, 0x2C, 0xDA, 0x2C, 0xDA, 0x2C, 0xDC, 0x2C,
- 0xDC, 0x2C, 0xDE, 0x2C, 0xDE, 0x2C, 0xE0, 0x2C,
- 0xE0, 0x2C, 0xE2, 0x2C, 0xE2, 0x2C, 0xE4, 0x2C,
- 0xE5, 0x2C, 0xE6, 0x2C, 0xE7, 0x2C, 0xE8, 0x2C,
- 0xE9, 0x2C, 0xEA, 0x2C, 0xEB, 0x2C, 0xEC, 0x2C,
- 0xED, 0x2C, 0xEE, 0x2C, 0xEF, 0x2C, 0xF0, 0x2C,
- 0xF1, 0x2C, 0xF2, 0x2C, 0xF3, 0x2C, 0xF4, 0x2C,
- 0xF5, 0x2C, 0xF6, 0x2C, 0xF7, 0x2C, 0xF8, 0x2C,
- 0xF9, 0x2C, 0xFA, 0x2C, 0xFB, 0x2C, 0xFC, 0x2C,
- 0xFD, 0x2C, 0xFE, 0x2C, 0xFF, 0x2C, 0xA0, 0x10,
- 0xA1, 0x10, 0xA2, 0x10, 0xA3, 0x10, 0xA4, 0x10,
- 0xA5, 0x10, 0xA6, 0x10, 0xA7, 0x10, 0xA8, 0x10,
- 0xA9, 0x10, 0xAA, 0x10, 0xAB, 0x10, 0xAC, 0x10,
- 0xAD, 0x10, 0xAE, 0x10, 0xAF, 0x10, 0xB0, 0x10,
- 0xB1, 0x10, 0xB2, 0x10, 0xB3, 0x10, 0xB4, 0x10,
- 0xB5, 0x10, 0xB6, 0x10, 0xB7, 0x10, 0xB8, 0x10,
- 0xB9, 0x10, 0xBA, 0x10, 0xBB, 0x10, 0xBC, 0x10,
- 0xBD, 0x10, 0xBE, 0x10, 0xBF, 0x10, 0xC0, 0x10,
- 0xC1, 0x10, 0xC2, 0x10, 0xC3, 0x10, 0xC4, 0x10,
- 0xC5, 0x10, 0xFF, 0xFF, 0x1B, 0xD2, 0x21, 0xFF,
- 0x22, 0xFF, 0x23, 0xFF, 0x24, 0xFF, 0x25, 0xFF,
- 0x26, 0xFF, 0x27, 0xFF, 0x28, 0xFF, 0x29, 0xFF,
- 0x2A, 0xFF, 0x2B, 0xFF, 0x2C, 0xFF, 0x2D, 0xFF,
- 0x2E, 0xFF, 0x2F, 0xFF, 0x30, 0xFF, 0x31, 0xFF,
- 0x32, 0xFF, 0x33, 0xFF, 0x34, 0xFF, 0x35, 0xFF,
- 0x36, 0xFF, 0x37, 0xFF, 0x38, 0xFF, 0x39, 0xFF,
- 0x3A, 0xFF, 0x5B, 0xFF, 0x5C, 0xFF, 0x5D, 0xFF,
- 0x5E, 0xFF, 0x5F, 0xFF, 0x60, 0xFF, 0x61, 0xFF,
- 0x62, 0xFF, 0x63, 0xFF, 0x64, 0xFF, 0x65, 0xFF,
- 0x66, 0xFF, 0x67, 0xFF, 0x68, 0xFF, 0x69, 0xFF,
- 0x6A, 0xFF, 0x6B, 0xFF, 0x6C, 0xFF, 0x6D, 0xFF,
- 0x6E, 0xFF, 0x6F, 0xFF, 0x70, 0xFF, 0x71, 0xFF,
- 0x72, 0xFF, 0x73, 0xFF, 0x74, 0xFF, 0x75, 0xFF,
- 0x76, 0xFF, 0x77, 0xFF, 0x78, 0xFF, 0x79, 0xFF,
- 0x7A, 0xFF, 0x7B, 0xFF, 0x7C, 0xFF, 0x7D, 0xFF,
- 0x7E, 0xFF, 0x7F, 0xFF, 0x80, 0xFF, 0x81, 0xFF,
- 0x82, 0xFF, 0x83, 0xFF, 0x84, 0xFF, 0x85, 0xFF,
- 0x86, 0xFF, 0x87, 0xFF, 0x88, 0xFF, 0x89, 0xFF,
- 0x8A, 0xFF, 0x8B, 0xFF, 0x8C, 0xFF, 0x8D, 0xFF,
- 0x8E, 0xFF, 0x8F, 0xFF, 0x90, 0xFF, 0x91, 0xFF,
- 0x92, 0xFF, 0x93, 0xFF, 0x94, 0xFF, 0x95, 0xFF,
- 0x96, 0xFF, 0x97, 0xFF, 0x98, 0xFF, 0x99, 0xFF,
- 0x9A, 0xFF, 0x9B, 0xFF, 0x9C, 0xFF, 0x9D, 0xFF,
- 0x9E, 0xFF, 0x9F, 0xFF, 0xA0, 0xFF, 0xA1, 0xFF,
- 0xA2, 0xFF, 0xA3, 0xFF, 0xA4, 0xFF, 0xA5, 0xFF,
- 0xA6, 0xFF, 0xA7, 0xFF, 0xA8, 0xFF, 0xA9, 0xFF,
- 0xAA, 0xFF, 0xAB, 0xFF, 0xAC, 0xFF, 0xAD, 0xFF,
- 0xAE, 0xFF, 0xAF, 0xFF, 0xB0, 0xFF, 0xB1, 0xFF,
- 0xB2, 0xFF, 0xB3, 0xFF, 0xB4, 0xFF, 0xB5, 0xFF,
- 0xB6, 0xFF, 0xB7, 0xFF, 0xB8, 0xFF, 0xB9, 0xFF,
- 0xBA, 0xFF, 0xBB, 0xFF, 0xBC, 0xFF, 0xBD, 0xFF,
- 0xBE, 0xFF, 0xBF, 0xFF, 0xC0, 0xFF, 0xC1, 0xFF,
- 0xC2, 0xFF, 0xC3, 0xFF, 0xC4, 0xFF, 0xC5, 0xFF,
- 0xC6, 0xFF, 0xC7, 0xFF, 0xC8, 0xFF, 0xC9, 0xFF,
- 0xCA, 0xFF, 0xCB, 0xFF, 0xCC, 0xFF, 0xCD, 0xFF,
- 0xCE, 0xFF, 0xCF, 0xFF, 0xD0, 0xFF, 0xD1, 0xFF,
- 0xD2, 0xFF, 0xD3, 0xFF, 0xD4, 0xFF, 0xD5, 0xFF,
- 0xD6, 0xFF, 0xD7, 0xFF, 0xD8, 0xFF, 0xD9, 0xFF,
- 0xDA, 0xFF, 0xDB, 0xFF, 0xDC, 0xFF, 0xDD, 0xFF,
- 0xDE, 0xFF, 0xDF, 0xFF, 0xE0, 0xFF, 0xE1, 0xFF,
- 0xE2, 0xFF, 0xE3, 0xFF, 0xE4, 0xFF, 0xE5, 0xFF,
- 0xE6, 0xFF, 0xE7, 0xFF, 0xE8, 0xFF, 0xE9, 0xFF,
- 0xEA, 0xFF, 0xEB, 0xFF, 0xEC, 0xFF, 0xED, 0xFF,
- 0xEE, 0xFF, 0xEF, 0xFF, 0xF0, 0xFF, 0xF1, 0xFF,
- 0xF2, 0xFF, 0xF3, 0xFF, 0xF4, 0xFF, 0xF5, 0xFF,
- 0xF6, 0xFF, 0xF7, 0xFF, 0xF8, 0xFF, 0xF9, 0xFF,
- 0xFA, 0xFF, 0xFB, 0xFF, 0xFC, 0xFF, 0xFD, 0xFF,
- 0xFE, 0xFF, 0xFF, 0xFF
-};
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index d3e098b41b1a..4f362dad4436 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1186,7 +1186,9 @@ static struct fbtft_platform_data *fbtft_properties_read(struct device *dev)
if (device_property_present(dev, "led-gpios"))
pdata->display.backlight = 1;
if (device_property_present(dev, "init"))
- pdata->display.fbtftops.init_display = fbtft_init_display_from_property;
+ pdata->display.fbtftops.init_display =
+ fbtft_init_display_from_property;
+
pdata->display.fbtftops.request_gpios = fbtft_request_gpios;
return pdata;
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
index 2a5c630dab87..26e52cc2de64 100644
--- a/drivers/staging/fbtft/fbtft-sysfs.c
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -25,6 +25,7 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves,
unsigned long val = 0;
int ret = 0;
int curve_counter, value_counter;
+ int _count;
fbtft_par_dbg(DEBUG_SYSFS, par, "%s() str=\n", __func__);
@@ -68,7 +69,10 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves,
ret = get_next_ulong(&curve_p, &val, " ", 16);
if (ret)
goto out;
- curves[curve_counter * par->gamma.num_values + value_counter] = val;
+
+ _count = curve_counter * par->gamma.num_values +
+ value_counter;
+ curves[_count] = val;
value_counter++;
}
if (value_counter != par->gamma.num_values) {
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 5f782da51959..76f8c090a837 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -348,9 +348,17 @@ module_exit(fbtft_driver_module_exit);
/* shorthand debug levels */
#define DEBUG_LEVEL_1 DEBUG_REQUEST_GPIOS
-#define DEBUG_LEVEL_2 (DEBUG_LEVEL_1 | DEBUG_DRIVER_INIT_FUNCTIONS | DEBUG_TIME_FIRST_UPDATE)
-#define DEBUG_LEVEL_3 (DEBUG_LEVEL_2 | DEBUG_RESET | DEBUG_INIT_DISPLAY | DEBUG_BLANK | DEBUG_REQUEST_GPIOS | DEBUG_FREE_GPIOS | DEBUG_VERIFY_GPIOS | DEBUG_BACKLIGHT | DEBUG_SYSFS)
-#define DEBUG_LEVEL_4 (DEBUG_LEVEL_2 | DEBUG_FB_READ | DEBUG_FB_WRITE | DEBUG_FB_FILLRECT | DEBUG_FB_COPYAREA | DEBUG_FB_IMAGEBLIT | DEBUG_FB_BLANK)
+#define DEBUG_LEVEL_2 (DEBUG_LEVEL_1 | DEBUG_DRIVER_INIT_FUNCTIONS \
+ | DEBUG_TIME_FIRST_UPDATE)
+#define DEBUG_LEVEL_3 (DEBUG_LEVEL_2 | DEBUG_RESET | DEBUG_INIT_DISPLAY \
+ | DEBUG_BLANK | DEBUG_REQUEST_GPIOS \
+ | DEBUG_FREE_GPIOS \
+ | DEBUG_VERIFY_GPIOS \
+ | DEBUG_BACKLIGHT | DEBUG_SYSFS)
+#define DEBUG_LEVEL_4 (DEBUG_LEVEL_2 | DEBUG_FB_READ | DEBUG_FB_WRITE \
+ | DEBUG_FB_FILLRECT \
+ | DEBUG_FB_COPYAREA \
+ | DEBUG_FB_IMAGEBLIT | DEBUG_FB_BLANK)
#define DEBUG_LEVEL_5 (DEBUG_LEVEL_3 | DEBUG_UPDATE_DISPLAY)
#define DEBUG_LEVEL_6 (DEBUG_LEVEL_4 | DEBUG_LEVEL_5)
#define DEBUG_LEVEL_7 0xFFFFFFFF
@@ -398,8 +406,8 @@ do { \
#define fbtft_par_dbg(level, par, format, arg...) \
do { \
- if (unlikely(par->debug & level)) \
- dev_info(par->info->device, format, ##arg); \
+ if (unlikely((par)->debug & (level))) \
+ dev_info((par)->info->device, format, ##arg); \
} while (0)
#define fbtft_par_dbg_hex(level, par, dev, type, buf, num, format, arg...) \
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 39c0fe347188..676d1ad1b50d 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -1492,7 +1492,8 @@ static void ethsw_unregister_notifier(struct device *dev)
err = unregister_switchdev_blocking_notifier(nb);
if (err)
dev_err(dev,
- "Failed to unregister switchdev blocking notifier (%d)\n", err);
+ "Failed to unregister switchdev blocking notifier (%d)\n",
+ err);
err = unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err)
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index 46199c8ca441..f12f81c8dd2f 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -570,13 +570,6 @@ static const struct pci_device_id apex_pci_ids[] = {
{ PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
};
-static void apex_pci_fixup_class(struct pci_dev *pdev)
-{
- pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
-}
-DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID,
- PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
-
static int apex_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index be6b50f454b4..67325fbaf760 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -689,11 +689,10 @@ static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
/* Make sure that no wrong flags are set. */
requested_permissions =
- (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
+ (vma->vm_flags & VM_ACCESS_FLAGS);
if (requested_permissions & ~(bar_permissions)) {
dev_dbg(gasket_dev->dev,
- "Attempting to map a region with requested permissions "
- "0x%x, but region has permissions 0x%x.\n",
+ "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n",
requested_permissions, bar_permissions);
return false;
}
@@ -926,6 +925,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
gasket_get_bar_index(gasket_dev,
(vma->vm_pgoff << PAGE_SHIFT) +
driver_desc->legacy_mmap_address_offset);
+
+ if (bar_index < 0)
+ return DO_MAP_REGION_INVALID;
+
phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
while (mapped_bytes < map_length) {
/*
@@ -1180,8 +1183,7 @@ static int gasket_open(struct inode *inode, struct file *filp)
inode->i_size = 0;
dev_dbg(gasket_dev->dev,
- "Attempting to open with tgid %u (%s) (f_mode: 0%03o, "
- "fmode_write: %d is_root: %u)\n",
+ "Attempting to open with tgid %u (%s) (f_mode: 0%03o, fmode_write: %d is_root: %u)\n",
current->tgid, task_name, filp->f_mode,
(filp->f_mode & FMODE_WRITE), is_root);
@@ -1258,8 +1260,7 @@ static int gasket_release(struct inode *inode, struct file *file)
mutex_lock(&gasket_dev->mutex);
dev_dbg(gasket_dev->dev,
- "Releasing device node. Call origin: tgid %u (%s) "
- "(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
+ "Releasing device node. Call origin: tgid %u (%s) (f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
current->tgid, task_name, file->f_mode,
(file->f_mode & FMODE_WRITE), is_root);
dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
index a2d67c28f530..5f0e089573a2 100644
--- a/drivers/staging/gasket/gasket_sysfs.c
+++ b/drivers/staging/gasket/gasket_sysfs.c
@@ -228,8 +228,7 @@ int gasket_sysfs_create_entries(struct device *device,
}
mutex_lock(&mapping->mutex);
- for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER);
- i++) {
+ for (i = 0; attrs[i].attr.attr.name != NULL; i++) {
if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) {
dev_err(device,
"Maximum number of sysfs nodes reached for device\n");
diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h
index 1d0eed66a7f4..ab5aa351d555 100644
--- a/drivers/staging/gasket/gasket_sysfs.h
+++ b/drivers/staging/gasket/gasket_sysfs.h
@@ -30,10 +30,6 @@
*/
#define GASKET_SYSFS_MAX_NODES 196
-/* End markers for sysfs struct arrays. */
-#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END
-#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN)
-
/*
* Terminator struct for a gasket_sysfs_attr array. Must be at the end of
* all gasket_sysfs_attribute arrays.
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index db11498f6fc7..354727f0a1fc 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -513,7 +513,7 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
length = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
hci->len) + HCI_HEADER_SIZE;
- return netlink_send(lte_event.sock, idx, 0, buf, length);
+ return netlink_send(lte_event.sock, idx, 0, buf, length, dev);
}
static void gdm_lte_event_rcv(struct net_device *dev, u16 type,
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
index 51c22e3d8aeb..87b8d921fdc8 100644
--- a/drivers/staging/gdm724x/gdm_mux.h
+++ b/drivers/staging/gdm724x/gdm_mux.h
@@ -29,7 +29,7 @@ struct mux_pkt_header {
__le32 seq_num;
__le32 payload_size;
__le16 packet_type;
- unsigned char data[0];
+ unsigned char data[];
};
struct mux_tx {
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index 6dea3694afdd..faecdfbc664f 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -28,7 +28,7 @@
struct hci_packet {
__dev16 cmd_evt;
__dev16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct tlv {
@@ -51,7 +51,7 @@ struct sdu {
__dev32 dft_eps_ID;
__dev32 bearer_ID;
__dev32 nic_type;
- u8 data[0];
+ u8 data[];
} __packed;
struct multi_sdu {
@@ -59,7 +59,7 @@ struct multi_sdu {
__dev16 len;
__dev16 num_packet;
__dev16 reserved;
- u8 data[0];
+ u8 data[];
} __packed;
struct hci_pdn_table_ind {
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index 92440c3f055b..7902e52a699b 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -89,7 +89,8 @@ struct sock *netlink_init(int unit,
return sock;
}
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
+int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len,
+ struct net_device *dev)
{
static u32 seq;
struct sk_buff *skb = NULL;
@@ -118,8 +119,8 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
return len;
if (ret != -ESRCH)
- pr_err("nl broadcast g=%d, t=%d, l=%d, r=%d\n",
- group, type, len, ret);
+ netdev_err(dev, "nl broadcast g=%d, t=%d, l=%d, r=%d\n",
+ group, type, len, ret);
else if (netlink_has_listeners(sock, group + 1))
return -EAGAIN;
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
index c9e1d3b2d54f..d42eea9bea3e 100644
--- a/drivers/staging/gdm724x/netlink_k.h
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -10,6 +10,7 @@
struct sock *netlink_init(int unit,
void (*cb)(struct net_device *dev,
u16 type, void *msg, int len));
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
+int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len,
+ struct net_device *dev);
#endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
index 3f1f4dd2c61a..efec0f815efd 100644
--- a/drivers/staging/greybus/audio_apbridgea.h
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -65,7 +65,7 @@
struct audio_apbridgea_hdr {
__u8 type;
__le16 i2s_port;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct audio_apbridgea_set_config_request {
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 1ff34abd5692..36d99f9e419e 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -364,8 +364,7 @@ static int gb_gpio_request_handler(struct gb_operation *op)
struct gb_message *request;
struct gb_gpio_irq_event_request *event;
u8 type = op->type;
- int irq;
- struct irq_desc *desc;
+ int irq, ret;
if (type != GB_GPIO_TYPE_IRQ_EVENT) {
dev_err(dev, "unsupported unsolicited request: %u\n", type);
@@ -391,17 +390,15 @@ static int gb_gpio_request_handler(struct gb_operation *op)
dev_err(dev, "failed to find IRQ\n");
return -EINVAL;
}
- desc = irq_to_desc(irq);
- if (!desc) {
- dev_err(dev, "failed to look up irq\n");
- return -EINVAL;
- }
local_irq_disable();
- generic_handle_irq_desc(desc);
+ ret = generic_handle_irq(irq);
local_irq_enable();
- return 0;
+ if (ret)
+ dev_err(dev, "failed to invoke irq handler\n");
+
+ return ret;
}
static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
index ab06fc3b9e7e..de2f6516da09 100644
--- a/drivers/staging/greybus/i2c.c
+++ b/drivers/staging/greybus/i2c.c
@@ -215,20 +215,6 @@ static int gb_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
return gb_i2c_transfer_operation(gb_i2c_dev, msgs, msg_count);
}
-#if 0
-/* Later */
-static int gb_i2c_smbus_xfer(struct i2c_adapter *adap,
- u16 addr, unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data *data)
-{
- struct gb_i2c_device *gb_i2c_dev;
-
- gb_i2c_dev = i2c_get_adapdata(adap);
-
- return 0;
-}
-#endif
-
static u32 gb_i2c_functionality(struct i2c_adapter *adap)
{
struct gb_i2c_device *gb_i2c_dev = i2c_get_adapdata(adap);
@@ -238,7 +224,6 @@ static u32 gb_i2c_functionality(struct i2c_adapter *adap)
static const struct i2c_algorithm gb_i2c_algorithm = {
.master_xfer = gb_i2c_master_xfer,
- /* .smbus_xfer = gb_i2c_smbus_xfer, */
.functionality = gb_i2c_functionality,
};
@@ -281,7 +266,6 @@ static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
adapter->owner = THIS_MODULE;
adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adapter->algo = &gb_i2c_algorithm;
- /* adapter->algo_data = what? */
adapter->dev.parent = &gbphy_dev->dev;
snprintf(adapter->name, sizeof(adapter->name), "Greybus i2c adapter");
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index 64a17dfe3b6e..2a375f407d38 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -29,7 +29,7 @@ struct gb_raw {
struct raw_data {
struct list_head entry;
u32 len;
- u8 data[0];
+ u8 data[];
};
static struct class *raw_class;
diff --git a/drivers/staging/greybus/tools/.gitignore b/drivers/staging/greybus/tools/.gitignore
index 023654c83068..1fd364aba774 100644
--- a/drivers/staging/greybus/tools/.gitignore
+++ b/drivers/staging/greybus/tools/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
loopback_test
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index 69c6dce9be31..867bf289df2e 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -802,8 +802,9 @@ static void prepare_devices(struct loopback_test *t)
write_sysfs_val(t->devices[i].sysfs_entry,
"outstanding_operations_max",
t->async_outstanding_operations);
- } else
+ } else {
write_sysfs_val(t->devices[i].sysfs_entry, "async", 0);
+ }
}
}
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 55c51143bb09..4ffb334cd5cd 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -537,9 +537,9 @@ static void gb_tty_set_termios(struct tty_struct *tty,
}
if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
- newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
+ newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN;
else
- newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
+ newline.flow_control = 0;
if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
diff --git a/drivers/staging/hp/Kconfig b/drivers/staging/hp/Kconfig
deleted file mode 100644
index f20ab21a6b2a..000000000000
--- a/drivers/staging/hp/Kconfig
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# HP network device configuration
-#
-
-config NET_VENDOR_HP
- bool "HP devices"
- default y
- depends on ETHERNET
- depends on ISA || EISA || PCI
- ---help---
- If you have a network (Ethernet) card belonging to this class, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about HP cards. If you say Y, you will be asked for
- your specific card in the following questions.
-
-if NET_VENDOR_HP
-
-config HP100
- tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
- depends on (ISA || EISA || PCI)
- ---help---
- If you have a network (Ethernet) card of this type, say Y here.
-
- To compile this driver as a module, choose M here. The module
- will be called hp100.
-
-endif # NET_VENDOR_HP
diff --git a/drivers/staging/hp/Makefile b/drivers/staging/hp/Makefile
deleted file mode 100644
index 5ed723bb11e2..000000000000
--- a/drivers/staging/hp/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the HP network device drivers.
-#
-
-obj-$(CONFIG_HP100) += hp100.o
diff --git a/drivers/staging/hp/hp100.c b/drivers/staging/hp/hp100.c
deleted file mode 100644
index e2f0b58e5dfd..000000000000
--- a/drivers/staging/hp/hp100.c
+++ /dev/null
@@ -1,3034 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-** hp100.c
-** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters
-**
-** $Id: hp100.c,v 1.58 2001/09/24 18:03:01 perex Exp perex $
-**
-** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz>
-** Extended for new busmaster capable chipsets by
-** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>
-**
-** Maintained by: Jaroslav Kysela <perex@perex.cz>
-**
-** This driver has only been tested with
-** -- HP J2585B 10/100 Mbit/s PCI Busmaster
-** -- HP J2585A 10/100 Mbit/s PCI
-** -- HP J2970A 10 Mbit/s PCI Combo 10base-T/BNC
-** -- HP J2973A 10 Mbit/s PCI 10base-T
-** -- HP J2573 10/100 ISA
-** -- Compex ReadyLink ENET100-VG4 10/100 Mbit/s PCI / EISA
-** -- Compex FreedomLine 100/VG 10/100 Mbit/s ISA / EISA / PCI
-**
-** but it should also work with the other CASCADE based adapters.
-**
-** TODO:
-** - J2573 seems to hang sometimes when in shared memory mode.
-** - Mode for Priority TX
-** - Check PCI registers, performance might be improved?
-** - To reduce interrupt load in busmaster, one could switch off
-** the interrupts that are used to refill the queues whenever the
-** queues are filled up to more than a certain threshold.
-** - some updates for EISA version of card
-**
-**
-**
-** 1.57c -> 1.58
-** - used indent to change coding-style
-** - added KTI DP-200 EISA ID
-** - ioremap is also used for low (<1MB) memory (multi-architecture support)
-**
-** 1.57b -> 1.57c - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-** - release resources on failure in init_module
-**
-** 1.57 -> 1.57b - Jean II
-** - fix spinlocks, SMP is now working !
-**
-** 1.56 -> 1.57
-** - updates for new PCI interface for 2.1 kernels
-**
-** 1.55 -> 1.56
-** - removed printk in misc. interrupt and update statistics to allow
-** monitoring of card status
-** - timing changes in xmit routines, relogin to 100VG hub added when
-** driver does reset
-** - included fix for Compex FreedomLine PCI adapter
-**
-** 1.54 -> 1.55
-** - fixed bad initialization in init_module
-** - added Compex FreedomLine adapter
-** - some fixes in card initialization
-**
-** 1.53 -> 1.54
-** - added hardware multicast filter support (doesn't work)
-** - little changes in hp100_sense_lan routine
-** - added support for Coax and AUI (J2970)
-** - fix for multiple cards and hp100_mode parameter (insmod)
-** - fix for shared IRQ
-**
-** 1.52 -> 1.53
-** - fixed bug in multicast support
-**
-*/
-
-#define HP100_DEFAULT_PRIORITY_TX 0
-
-#undef HP100_DEBUG
-#undef HP100_DEBUG_B /* Trace */
-#undef HP100_DEBUG_BM /* Debug busmaster code (PDL stuff) */
-
-#undef HP100_DEBUG_TRAINING /* Debug login-to-hub procedure */
-#undef HP100_DEBUG_TX
-#undef HP100_DEBUG_IRQ
-#undef HP100_DEBUG_RX
-
-#undef HP100_MULTICAST_FILTER /* Need to be debugged... */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/eisa.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-
-#include "hp100.h"
-
-/*
- * defines
- */
-
-#define HP100_BUS_ISA 0
-#define HP100_BUS_EISA 1
-#define HP100_BUS_PCI 2
-
-#define HP100_REGION_SIZE 0x20 /* for ioports */
-#define HP100_SIG_LEN 8 /* same as EISA_SIG_LEN */
-
-#define HP100_MAX_PACKET_SIZE (1536+4)
-#define HP100_MIN_PACKET_SIZE 60
-
-#ifndef HP100_DEFAULT_RX_RATIO
-/* default - 75% onboard memory on the card are used for RX packets */
-#define HP100_DEFAULT_RX_RATIO 75
-#endif
-
-#ifndef HP100_DEFAULT_PRIORITY_TX
-/* default - don't enable transmit outgoing packets as priority */
-#define HP100_DEFAULT_PRIORITY_TX 0
-#endif
-
-/*
- * structures
- */
-
-struct hp100_private {
- spinlock_t lock;
- char id[HP100_SIG_LEN];
- u_short chip;
- u_short soft_model;
- u_int memory_size;
- u_int virt_memory_size;
- u_short rx_ratio; /* 1 - 99 */
- u_short priority_tx; /* != 0 - priority tx */
- u_short mode; /* PIO, Shared Mem or Busmaster */
- u_char bus;
- struct pci_dev *pci_dev;
- short mem_mapped; /* memory mapped access */
- void __iomem *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */
- unsigned long mem_ptr_phys; /* physical memory mapped area */
- short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */
- int hub_status; /* was login to hub successful? */
- u_char mac1_mode;
- u_char mac2_mode;
- u_char hash_bytes[8];
-
- /* Rings for busmaster mode: */
- hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */
- hp100_ring_t *rxrtail; /* Tail (newest) index into rxring */
- hp100_ring_t *txrhead; /* Head (oldest) index into txring */
- hp100_ring_t *txrtail; /* Tail (newest) index into txring */
-
- hp100_ring_t rxring[MAX_RX_PDL];
- hp100_ring_t txring[MAX_TX_PDL];
-
- u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
- u_long whatever_offset; /* Offset to bus/phys/dma address */
- int rxrcommit; /* # Rx PDLs committed to adapter */
- int txrcommit; /* # Tx PDLs committed to adapter */
-};
-
-/*
- * variables
- */
-#ifdef CONFIG_ISA
-static const char *hp100_isa_tbl[] = {
- "HWPF150", /* HP J2573 rev A */
- "HWP1950", /* HP J2573 */
-};
-#endif
-
-static const struct eisa_device_id hp100_eisa_tbl[] = {
- { "HWPF180" }, /* HP J2577 rev A */
- { "HWP1920" }, /* HP 27248B */
- { "HWP1940" }, /* HP J2577 */
- { "HWP1990" }, /* HP J2577 */
- { "CPX0301" }, /* ReadyLink ENET100-VG4 */
- { "CPX0401" }, /* FreedomLine 100/VG */
- { "" } /* Mandatory final entry ! */
-};
-MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
-
-static const struct pci_device_id hp100_pci_tbl[] = {
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2973A, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG, PCI_ANY_ID, PCI_ANY_ID,},
-/* {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_DP200, PCI_ANY_ID, PCI_ANY_ID }, */
- {} /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
-
-static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
-static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
-static int hp100_mode = 1;
-
-module_param(hp100_rx_ratio, int, 0);
-module_param(hp100_priority_tx, int, 0);
-module_param(hp100_mode, int, 0);
-
-/*
- * prototypes
- */
-
-static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
- struct pci_dev *pci_dev);
-
-
-static int hp100_open(struct net_device *dev);
-static int hp100_close(struct net_device *dev);
-static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
- struct net_device *dev);
-static void hp100_rx(struct net_device *dev);
-static struct net_device_stats *hp100_get_stats(struct net_device *dev);
-static void hp100_misc_interrupt(struct net_device *dev);
-static void hp100_update_stats(struct net_device *dev);
-static void hp100_clear_stats(struct hp100_private *lp, int ioaddr);
-static void hp100_set_multicast_list(struct net_device *dev);
-static irqreturn_t hp100_interrupt(int irq, void *dev_id);
-static void hp100_start_interface(struct net_device *dev);
-static void hp100_stop_interface(struct net_device *dev);
-static void hp100_load_eeprom(struct net_device *dev, u_short ioaddr);
-static int hp100_sense_lan(struct net_device *dev);
-static int hp100_login_to_vg_hub(struct net_device *dev,
- u_short force_relogin);
-static int hp100_down_vg_link(struct net_device *dev);
-static void hp100_cascade_reset(struct net_device *dev, u_short enable);
-static void hp100_BM_shutdown(struct net_device *dev);
-static void hp100_mmuinit(struct net_device *dev);
-static void hp100_init_pdls(struct net_device *dev);
-static int hp100_init_rxpdl(struct net_device *dev,
- register hp100_ring_t * ringptr,
- register u_int * pdlptr);
-static int hp100_init_txpdl(struct net_device *dev,
- register hp100_ring_t * ringptr,
- register u_int * pdlptr);
-static void hp100_rxfill(struct net_device *dev);
-static void hp100_hwinit(struct net_device *dev);
-static void hp100_clean_txring(struct net_device *dev);
-#ifdef HP100_DEBUG
-static void hp100_RegisterDump(struct net_device *dev);
-#endif
-
-/* Conversion to new PCI API :
- * Convert an address in a kernel buffer to a bus/phys/dma address.
- * This work *only* for memory fragments part of lp->page_vaddr,
- * because it was properly DMA allocated via pci_alloc_consistent(),
- * so we just need to "retrieve" the original mapping to bus/phys/dma
- * address - Jean II */
-static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr)
-{
- struct hp100_private *lp = netdev_priv(dev);
- return ((u_long) ptr) + lp->whatever_offset;
-}
-
-static inline u_int pdl_map_data(struct hp100_private *lp, void *data)
-{
- return pci_map_single(lp->pci_dev, data,
- MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
-}
-
-/* TODO: This function should not really be needed in a good design... */
-static void wait(void)
-{
- mdelay(1);
-}
-
-/*
- * probe functions
- * These functions should - if possible - avoid doing write operations
- * since this could cause problems when the card is not installed.
- */
-
-/*
- * Read board id and convert to string.
- * Effectively same code as decode_eisa_sig
- */
-static const char *hp100_read_id(int ioaddr)
-{
- int i;
- static char str[HP100_SIG_LEN];
- unsigned char sig[4], sum;
- unsigned short rev;
-
- hp100_page(ID_MAC_ADDR);
- sum = 0;
- for (i = 0; i < 4; i++) {
- sig[i] = hp100_inb(BOARD_ID + i);
- sum += sig[i];
- }
-
- sum += hp100_inb(BOARD_ID + i);
- if (sum != 0xff)
- return NULL; /* bad checksum */
-
- str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
- str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
- str[2] = (sig[1] & 0x1f) + ('A' - 1);
- rev = (sig[2] << 8) | sig[3];
- sprintf(str + 3, "%04X", rev);
-
- return str;
-}
-
-#ifdef CONFIG_ISA
-static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
-{
- const char *sig;
- int i;
-
- if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
- goto err;
-
- if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) {
- release_region(ioaddr, HP100_REGION_SIZE);
- goto err;
- }
-
- sig = hp100_read_id(ioaddr);
- release_region(ioaddr, HP100_REGION_SIZE);
-
- if (sig == NULL)
- goto err;
-
- i = match_string(hp100_isa_tbl, ARRAY_SIZE(hp100_isa_tbl), sig);
- if (i < 0)
- goto err;
-
- return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
- err:
- return -ENODEV;
-
-}
-/*
- * Probe for ISA board.
- * EISA and PCI are handled by device infrastructure.
- */
-
-static int __init hp100_isa_probe(struct net_device *dev, int addr)
-{
- int err = -ENODEV;
-
- /* Probe for a specific ISA address */
- if (addr > 0xff && addr < 0x400)
- err = hp100_isa_probe1(dev, addr);
-
- else if (addr != 0)
- err = -ENXIO;
-
- else {
- /* Probe all ISA possible port regions */
- for (addr = 0x100; addr < 0x400; addr += 0x20) {
- err = hp100_isa_probe1(dev, addr);
- if (!err)
- break;
- }
- }
- return err;
-}
-#endif /* CONFIG_ISA */
-
-#if !defined(MODULE) && defined(CONFIG_ISA)
-struct net_device * __init hp100_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4200, TRACE);
- printk("hp100: %s: probe\n", dev->name);
-#endif
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- err = hp100_isa_probe(dev, dev->base_addr);
- if (err)
- goto out;
-
- return dev;
- out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif /* !MODULE && CONFIG_ISA */
-
-static const struct net_device_ops hp100_bm_netdev_ops = {
- .ndo_open = hp100_open,
- .ndo_stop = hp100_close,
- .ndo_start_xmit = hp100_start_xmit_bm,
- .ndo_get_stats = hp100_get_stats,
- .ndo_set_rx_mode = hp100_set_multicast_list,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static const struct net_device_ops hp100_netdev_ops = {
- .ndo_open = hp100_open,
- .ndo_stop = hp100_close,
- .ndo_start_xmit = hp100_start_xmit,
- .ndo_get_stats = hp100_get_stats,
- .ndo_set_rx_mode = hp100_set_multicast_list,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
- struct pci_dev *pci_dev)
-{
- int i;
- int err = -ENODEV;
- const char *eid;
- u_int chip;
- u_char uc;
- u_int memory_size = 0, virt_memory_size = 0;
- u_short local_mode, lsw;
- short mem_mapped;
- unsigned long mem_ptr_phys;
- void __iomem *mem_ptr_virt;
- struct hp100_private *lp;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4201, TRACE);
- printk("hp100: %s: probe1\n", dev->name);
-#endif
-
- /* memory region for programmed i/o */
- if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
- goto out1;
-
- if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE)
- goto out2;
-
- chip = hp100_inw(PAGING) & HP100_CHIPID_MASK;
-#ifdef HP100_DEBUG
- if (chip == HP100_CHIPID_SHASTA)
- printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name);
- else if (chip == HP100_CHIPID_RAINIER)
- printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name);
- else if (chip == HP100_CHIPID_LASSEN)
- printk("hp100: %s: Lassen Chip detected.\n", dev->name);
- else
- printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n", dev->name, chip);
-#endif
-
- dev->base_addr = ioaddr;
-
- eid = hp100_read_id(ioaddr);
- if (eid == NULL) { /* bad checksum? */
- printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n",
- __func__, ioaddr);
- goto out2;
- }
-
- hp100_page(ID_MAC_ADDR);
- for (i = uc = 0; i < 7; i++)
- uc += hp100_inb(LAN_ADDR + i);
- if (uc != 0xff) {
- printk(KERN_WARNING
- "%s: bad lan address checksum at port 0x%x)\n",
- __func__, ioaddr);
- err = -EIO;
- goto out2;
- }
-
- /* Make sure, that all registers are correctly updated... */
-
- hp100_load_eeprom(dev, ioaddr);
- wait();
-
- /*
- * Determine driver operation mode
- *
- * Use the variable "hp100_mode" upon insmod or as kernel parameter to
- * force driver modes:
- * hp100_mode=1 -> default, use busmaster mode if configured.
- * hp100_mode=2 -> enable shared memory mode
- * hp100_mode=3 -> force use of i/o mapped mode.
- * hp100_mode=4 -> same as 1, but re-set the enable bit on the card.
- */
-
- /*
- * LSW values:
- * 0x2278 -> J2585B, PnP shared memory mode
- * 0x2270 -> J2585B, shared memory mode, 0xdc000
- * 0xa23c -> J2585B, I/O mapped mode
- * 0x2240 -> EISA COMPEX, BusMaster (Shasta Chip)
- * 0x2220 -> EISA HP, I/O (Shasta Chip)
- * 0x2260 -> EISA HP, BusMaster (Shasta Chip)
- */
-
-#if 0
- local_mode = 0x2270;
- hp100_outw(0xfefe, OPTION_LSW);
- hp100_outw(local_mode | HP100_SET_LB | HP100_SET_HB, OPTION_LSW);
-#endif
-
- /* hp100_mode value maybe used in future by another card */
- local_mode = hp100_mode;
- if (local_mode < 1 || local_mode > 4)
- local_mode = 1; /* default */
-#ifdef HP100_DEBUG
- printk("hp100: %s: original LSW = 0x%x\n", dev->name,
- hp100_inw(OPTION_LSW));
-#endif
-
- if (local_mode == 3) {
- hp100_outw(HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
- hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
- hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
- printk("hp100: IO mapped mode forced.\n");
- } else if (local_mode == 2) {
- hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
- hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
- hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
- printk("hp100: Shared memory mode requested.\n");
- } else if (local_mode == 4) {
- if (chip == HP100_CHIPID_LASSEN) {
- hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_SET_HB, OPTION_LSW);
- hp100_outw(HP100_IO_EN | HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
- printk("hp100: Busmaster mode requested.\n");
- }
- local_mode = 1;
- }
-
- if (local_mode == 1) { /* default behaviour */
- lsw = hp100_inw(OPTION_LSW);
-
- if ((lsw & HP100_IO_EN) && (~lsw & HP100_MEM_EN) &&
- (~lsw & (HP100_BM_WRITE | HP100_BM_READ))) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: IO_EN bit is set on card.\n", dev->name);
-#endif
- local_mode = 3;
- } else if (chip == HP100_CHIPID_LASSEN &&
- (lsw & (HP100_BM_WRITE | HP100_BM_READ)) == (HP100_BM_WRITE | HP100_BM_READ)) {
- /* Conversion to new PCI API :
- * I don't have the doc, but I assume that the card
- * can map the full 32bit address space.
- * Also, we can have EISA Busmaster cards (not tested),
- * so beware !!! - Jean II */
- if((bus == HP100_BUS_PCI) &&
- (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)))) {
- /* Gracefully fallback to shared memory */
- goto busmasterfail;
- }
- printk("hp100: Busmaster mode enabled.\n");
- hp100_outw(HP100_MEM_EN | HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
- } else {
- busmasterfail:
-#ifdef HP100_DEBUG
- printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name);
- printk("hp100: %s: Trying shared memory mode.\n", dev->name);
-#endif
- /* In this case, try shared memory mode */
- local_mode = 2;
- hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
- /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */
- }
- }
-#ifdef HP100_DEBUG
- printk("hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW));
-#endif
-
- /* Check for shared memory on the card, eventually remap it */
- hp100_page(HW_MAP);
- mem_mapped = ((hp100_inw(OPTION_LSW) & (HP100_MEM_EN)) != 0);
- mem_ptr_phys = 0UL;
- mem_ptr_virt = NULL;
- memory_size = (8192 << ((hp100_inb(SRAM) >> 5) & 0x07));
- virt_memory_size = 0;
-
- /* For memory mapped or busmaster mode, we want the memory address */
- if (mem_mapped || (local_mode == 1)) {
- mem_ptr_phys = (hp100_inw(MEM_MAP_LSW) | (hp100_inw(MEM_MAP_MSW) << 16));
- mem_ptr_phys &= ~0x1fff; /* 8k alignment */
-
- if (bus == HP100_BUS_ISA && (mem_ptr_phys & ~0xfffff) != 0) {
- printk("hp100: Can only use programmed i/o mode.\n");
- mem_ptr_phys = 0;
- mem_mapped = 0;
- local_mode = 3; /* Use programmed i/o */
- }
-
- /* We do not need access to shared memory in busmaster mode */
- /* However in slave mode we need to remap high (>1GB) card memory */
- if (local_mode != 1) { /* = not busmaster */
- /* We try with smaller memory sizes, if ioremap fails */
- for (virt_memory_size = memory_size; virt_memory_size > 16383; virt_memory_size >>= 1) {
- if ((mem_ptr_virt = ioremap((u_long) mem_ptr_phys, virt_memory_size)) == NULL) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, mem_ptr_phys);
-#endif
- } else {
-#ifdef HP100_DEBUG
- printk("hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to %p.\n", dev->name, virt_memory_size, mem_ptr_phys, mem_ptr_virt);
-#endif
- break;
- }
- }
-
- if (mem_ptr_virt == NULL) { /* all ioremap tries failed */
- printk("hp100: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n");
- local_mode = 3;
- virt_memory_size = 0;
- }
- }
- }
-
- if (local_mode == 3) { /* io mapped forced */
- mem_mapped = 0;
- mem_ptr_phys = 0;
- mem_ptr_virt = NULL;
- printk("hp100: Using (slow) programmed i/o mode.\n");
- }
-
- /* Initialise the "private" data structure for this card. */
- lp = netdev_priv(dev);
-
- spin_lock_init(&lp->lock);
- strlcpy(lp->id, eid, HP100_SIG_LEN);
- lp->chip = chip;
- lp->mode = local_mode;
- lp->bus = bus;
- lp->pci_dev = pci_dev;
- lp->priority_tx = hp100_priority_tx;
- lp->rx_ratio = hp100_rx_ratio;
- lp->mem_ptr_phys = mem_ptr_phys;
- lp->mem_ptr_virt = mem_ptr_virt;
- hp100_page(ID_MAC_ADDR);
- lp->soft_model = hp100_inb(SOFT_MODEL);
- lp->mac1_mode = HP100_MAC1MODE3;
- lp->mac2_mode = HP100_MAC2MODE3;
- memset(&lp->hash_bytes, 0x00, 8);
-
- dev->base_addr = ioaddr;
-
- lp->memory_size = memory_size;
- lp->virt_memory_size = virt_memory_size;
- lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */
-
- if (lp->mode == 1) /* busmaster */
- dev->netdev_ops = &hp100_bm_netdev_ops;
- else
- dev->netdev_ops = &hp100_netdev_ops;
-
- /* Ask the card for which IRQ line it is configured */
- if (bus == HP100_BUS_PCI) {
- dev->irq = pci_dev->irq;
- } else {
- hp100_page(HW_MAP);
- dev->irq = hp100_inb(IRQ_CHANNEL) & HP100_IRQMASK;
- if (dev->irq == 2)
- dev->irq = 9;
- }
-
- if (lp->mode == 1) /* busmaster */
- dev->dma = 4;
-
- /* Ask the card for its MAC address and store it for later use. */
- hp100_page(ID_MAC_ADDR);
- for (i = uc = 0; i < 6; i++)
- dev->dev_addr[i] = hp100_inb(LAN_ADDR + i);
-
- /* Reset statistics (counters) */
- hp100_clear_stats(lp, ioaddr);
-
- /* If busmaster mode is wanted, a dma-capable memory area is needed for
- * the rx and tx PDLs
- * PCI cards can access the whole PC memory. Therefore GFP_DMA is not
- * needed for the allocation of the memory area.
- */
-
- /* TODO: We do not need this with old cards, where PDLs are stored
- * in the cards shared memory area. But currently, busmaster has been
- * implemented/tested only with the lassen chip anyway... */
- if (lp->mode == 1) { /* busmaster */
- dma_addr_t page_baddr;
- /* Get physically continuous memory for TX & RX PDLs */
- /* Conversion to new PCI API :
- * Pages are always aligned and zeroed, no need to it ourself.
- * Doc says should be OK for EISA bus as well - Jean II */
- lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr);
- if (!lp->page_vaddr_algn) {
- err = -ENOMEM;
- goto out_mem_ptr;
- }
- lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
-
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n", dev->name, (u_int) lp->page_vaddr_algn, (u_int) lp->page_vaddr_algn + MAX_RINGSIZE);
-#endif
- lp->rxrcommit = lp->txrcommit = 0;
- lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
- lp->txrhead = lp->txrtail = &(lp->txring[0]);
- }
-
- /* Initialise the card. */
- /* (I'm not really sure if it's a good idea to do this during probing, but
- * like this it's assured that the lan connection type can be sensed
- * correctly)
- */
- hp100_hwinit(dev);
-
- /* Try to find out which kind of LAN the card is connected to. */
- lp->lan_type = hp100_sense_lan(dev);
-
- /* Print out a message what about what we think we have probed. */
- printk("hp100: at 0x%x, IRQ %d, ", ioaddr, dev->irq);
- switch (bus) {
- case HP100_BUS_EISA:
- printk("EISA");
- break;
- case HP100_BUS_PCI:
- printk("PCI");
- break;
- default:
- printk("ISA");
- break;
- }
- printk(" bus, %dk SRAM (rx/tx %d%%).\n", lp->memory_size >> 10, lp->rx_ratio);
-
- if (lp->mode == 2) { /* memory mapped */
- printk("hp100: Memory area at 0x%lx-0x%lx", mem_ptr_phys,
- (mem_ptr_phys + (mem_ptr_phys > 0x100000 ? (u_long) lp->memory_size : 16 * 1024)) - 1);
- if (mem_ptr_virt)
- printk(" (virtual base %p)", mem_ptr_virt);
- printk(".\n");
-
- /* Set for info when doing ifconfig */
- dev->mem_start = mem_ptr_phys;
- dev->mem_end = mem_ptr_phys + lp->memory_size;
- }
-
- printk("hp100: ");
- if (lp->lan_type != HP100_LAN_ERR)
- printk("Adapter is attached to ");
- switch (lp->lan_type) {
- case HP100_LAN_100:
- printk("100Mb/s Voice Grade AnyLAN network.\n");
- break;
- case HP100_LAN_10:
- printk("10Mb/s network (10baseT).\n");
- break;
- case HP100_LAN_COAX:
- printk("10Mb/s network (coax).\n");
- break;
- default:
- printk("Warning! Link down.\n");
- }
-
- err = register_netdev(dev);
- if (err)
- goto out3;
-
- return 0;
-out3:
- if (local_mode == 1)
- pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f,
- lp->page_vaddr_algn,
- virt_to_whatever(dev, lp->page_vaddr_algn));
-out_mem_ptr:
- if (mem_ptr_virt)
- iounmap(mem_ptr_virt);
-out2:
- release_region(ioaddr, HP100_REGION_SIZE);
-out1:
- return err;
-}
-
-/* This procedure puts the card into a stable init state */
-static void hp100_hwinit(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4202, TRACE);
- printk("hp100: %s: hwinit\n", dev->name);
-#endif
-
- /* Initialise the card. -------------------------------------------- */
-
- /* Clear all pending Ints and disable Ints */
- hp100_page(PERFORMANCE);
- hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
- hp100_outw(0xffff, IRQ_STATUS); /* clear all pending ints */
-
- hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
- hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
-
- if (lp->mode == 1) {
- hp100_BM_shutdown(dev); /* disables BM, puts cascade in reset */
- wait();
- } else {
- hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
- hp100_cascade_reset(dev, 1);
- hp100_page(MAC_CTRL);
- hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
- }
-
- /* Initiate EEPROM reload */
- hp100_load_eeprom(dev, 0);
-
- wait();
-
- /* Go into reset again. */
- hp100_cascade_reset(dev, 1);
-
- /* Set Option Registers to a safe state */
- hp100_outw(HP100_DEBUG_EN |
- HP100_RX_HDR |
- HP100_EE_EN |
- HP100_BM_WRITE |
- HP100_BM_READ | HP100_RESET_HB |
- HP100_FAKE_INT |
- HP100_INT_EN |
- HP100_MEM_EN |
- HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
-
- hp100_outw(HP100_TRI_INT |
- HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
-
- hp100_outb(HP100_PRIORITY_TX |
- HP100_ADV_NXT_PKT |
- HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
-
- /* TODO: Configure MMU for Ram Test. */
- /* TODO: Ram Test. */
-
- /* Re-check if adapter is still at same i/o location */
- /* (If the base i/o in eeprom has been changed but the */
- /* registers had not been changed, a reload of the eeprom */
- /* would move the adapter to the address stored in eeprom */
-
- /* TODO: Code to implement. */
-
- /* Until here it was code from HWdiscover procedure. */
- /* Next comes code from mmuinit procedure of SCO BM driver which is
- * called from HWconfigure in the SCO driver. */
-
- /* Initialise MMU, eventually switch on Busmaster Mode, initialise
- * multicast filter...
- */
- hp100_mmuinit(dev);
-
- /* We don't turn the interrupts on here - this is done by start_interface. */
- wait(); /* TODO: Do we really need this? */
-
- /* Enable Hardware (e.g. unreset) */
- hp100_cascade_reset(dev, 0);
-
- /* ------- initialisation complete ----------- */
-
- /* Finally try to log in the Hub if there may be a VG connection. */
- if ((lp->lan_type == HP100_LAN_100) || (lp->lan_type == HP100_LAN_ERR))
- hp100_login_to_vg_hub(dev, 0); /* relogin */
-
-}
-
-
-/*
- * mmuinit - Reinitialise Cascade MMU and MAC settings.
- * Note: Must already be in reset and leaves card in reset.
- */
-static void hp100_mmuinit(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
- int i;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4203, TRACE);
- printk("hp100: %s: mmuinit\n", dev->name);
-#endif
-
-#ifdef HP100_DEBUG
- if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
- printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n", dev->name);
- return;
- }
-#endif
-
- /* Make sure IRQs are masked off and ack'ed. */
- hp100_page(PERFORMANCE);
- hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
- hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
-
- /*
- * Enable Hardware
- * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En
- * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable
- * - Clear Priority, Advance Pkt and Xmit Cmd
- */
-
- hp100_outw(HP100_DEBUG_EN |
- HP100_RX_HDR |
- HP100_EE_EN | HP100_RESET_HB |
- HP100_IO_EN |
- HP100_FAKE_INT |
- HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
-
- hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
-
- if (lp->mode == 1) { /* busmaster */
- hp100_outw(HP100_BM_WRITE |
- HP100_BM_READ |
- HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
- } else if (lp->mode == 2) { /* memory mapped */
- hp100_outw(HP100_BM_WRITE |
- HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
- hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
- hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
- hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
- } else if (lp->mode == 3) { /* i/o mapped mode */
- hp100_outw(HP100_MMAP_DIS | HP100_SET_HB |
- HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
- }
-
- hp100_page(HW_MAP);
- hp100_outb(0, EARLYRXCFG);
- hp100_outw(0, EARLYTXCFG);
-
- /*
- * Enable Bus Master mode
- */
- if (lp->mode == 1) { /* busmaster */
- /* Experimental: Set some PCI configuration bits */
- hp100_page(HW_MAP);
- hp100_andb(~HP100_PDL_USE3, MODECTRL1); /* BM engine read maximum */
- hp100_andb(~HP100_TX_DUALQ, MODECTRL1); /* No Queue for Priority TX */
-
- /* PCI Bus failures should result in a Misc. Interrupt */
- hp100_orb(HP100_EN_BUS_FAIL, MODECTRL2);
-
- hp100_outw(HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW);
- hp100_page(HW_MAP);
- /* Use Burst Mode and switch on PAGE_CK */
- hp100_orb(HP100_BM_BURST_RD | HP100_BM_BURST_WR, BM);
- if ((lp->chip == HP100_CHIPID_RAINIER) || (lp->chip == HP100_CHIPID_SHASTA))
- hp100_orb(HP100_BM_PAGE_CK, BM);
- hp100_orb(HP100_BM_MASTER, BM);
- } else { /* not busmaster */
-
- hp100_page(HW_MAP);
- hp100_andb(~HP100_BM_MASTER, BM);
- }
-
- /*
- * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs
- */
- hp100_page(MMU_CFG);
- if (lp->mode == 1) { /* only needed for Busmaster */
- int xmit_stop, recv_stop;
-
- if ((lp->chip == HP100_CHIPID_RAINIER) ||
- (lp->chip == HP100_CHIPID_SHASTA)) {
- int pdl_stop;
-
- /*
- * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and
- * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded
- * to the next higher 1k boundary) bytes for the rx-pdl's
- * Note: For non-etr chips the transmit stop register must be
- * programmed on a 1k boundary, i.e. bits 9:0 must be zero.
- */
- pdl_stop = lp->memory_size;
- xmit_stop = (pdl_stop - 508 * (MAX_RX_PDL) - 16) & ~(0x03ff);
- recv_stop = (xmit_stop * (lp->rx_ratio) / 100) & ~(0x03ff);
- hp100_outw((pdl_stop >> 4) - 1, PDL_MEM_STOP);
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop);
-#endif
- } else {
- /* ETR chip (Lassen) in busmaster mode */
- xmit_stop = (lp->memory_size) - 1;
- recv_stop = ((lp->memory_size * lp->rx_ratio) / 100) & ~(0x03ff);
- }
-
- hp100_outw(xmit_stop >> 4, TX_MEM_STOP);
- hp100_outw(recv_stop >> 4, RX_MEM_STOP);
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: TX_STOP = 0x%x\n", dev->name, xmit_stop >> 4);
- printk("hp100: %s: RX_STOP = 0x%x\n", dev->name, recv_stop >> 4);
-#endif
- } else {
- /* Slave modes (memory mapped and programmed io) */
- hp100_outw((((lp->memory_size * lp->rx_ratio) / 100) >> 4), RX_MEM_STOP);
- hp100_outw(((lp->memory_size - 1) >> 4), TX_MEM_STOP);
-#ifdef HP100_DEBUG
- printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(TX_MEM_STOP));
- printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(RX_MEM_STOP));
-#endif
- }
-
- /* Write MAC address into page 1 */
- hp100_page(MAC_ADDRESS);
- for (i = 0; i < 6; i++)
- hp100_outb(dev->dev_addr[i], MAC_ADDR + i);
-
- /* Zero the multicast hash registers */
- for (i = 0; i < 8; i++)
- hp100_outb(0x0, HASH_BYTE0 + i);
-
- /* Set up MAC defaults */
- hp100_page(MAC_CTRL);
-
- /* Go to LAN Page and zero all filter bits */
- /* Zero accept error, accept multicast, accept broadcast and accept */
- /* all directed packet bits */
- hp100_andb(~(HP100_RX_EN |
- HP100_TX_EN |
- HP100_ACC_ERRORED |
- HP100_ACC_MC |
- HP100_ACC_BC | HP100_ACC_PHY), MAC_CFG_1);
-
- hp100_outb(0x00, MAC_CFG_2);
-
- /* Zero the frame format bit. This works around a training bug in the */
- /* new hubs. */
- hp100_outb(0x00, VG_LAN_CFG_2); /* (use 802.3) */
-
- if (lp->priority_tx)
- hp100_outb(HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW);
- else
- hp100_outb(HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW);
-
- hp100_outb(HP100_ADV_NXT_PKT |
- HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
-
- /* If busmaster, initialize the PDLs */
- if (lp->mode == 1)
- hp100_init_pdls(dev);
-
- /* Go to performance page and initialize isr and imr registers */
- hp100_page(PERFORMANCE);
- hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
- hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
-}
-
-/*
- * open/close functions
- */
-
-static int hp100_open(struct net_device *dev)
-{
- struct hp100_private *lp = netdev_priv(dev);
-#ifdef HP100_DEBUG_B
- int ioaddr = dev->base_addr;
-#endif
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4204, TRACE);
- printk("hp100: %s: open\n", dev->name);
-#endif
-
- /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
- if (request_irq(dev->irq, hp100_interrupt,
- lp->bus == HP100_BUS_PCI || lp->bus ==
- HP100_BUS_EISA ? IRQF_SHARED : 0,
- dev->name, dev)) {
- printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- netif_trans_update(dev); /* prevent tx timeout */
- netif_start_queue(dev);
-
- lp->lan_type = hp100_sense_lan(dev);
- lp->mac1_mode = HP100_MAC1MODE3;
- lp->mac2_mode = HP100_MAC2MODE3;
- memset(&lp->hash_bytes, 0x00, 8);
-
- hp100_stop_interface(dev);
-
- hp100_hwinit(dev);
-
- hp100_start_interface(dev); /* sets mac modes, enables interrupts */
-
- return 0;
-}
-
-/* The close function is called when the interface is to be brought down */
-static int hp100_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4205, TRACE);
- printk("hp100: %s: close\n", dev->name);
-#endif
-
- hp100_page(PERFORMANCE);
- hp100_outw(0xfefe, IRQ_MASK); /* mask off all IRQs */
-
- hp100_stop_interface(dev);
-
- if (lp->lan_type == HP100_LAN_100)
- lp->hub_status = hp100_login_to_vg_hub(dev, 0);
-
- netif_stop_queue(dev);
-
- free_irq(dev->irq, dev);
-
-#ifdef HP100_DEBUG
- printk("hp100: %s: close LSW = 0x%x\n", dev->name,
- hp100_inw(OPTION_LSW));
-#endif
-
- return 0;
-}
-
-
-/*
- * Configure the PDL Rx rings and LAN
- */
-static void hp100_init_pdls(struct net_device *dev)
-{
- struct hp100_private *lp = netdev_priv(dev);
- hp100_ring_t *ringptr;
- u_int *pageptr; /* Warning : increment by 4 - Jean II */
- int i;
-
-#ifdef HP100_DEBUG_B
- int ioaddr = dev->base_addr;
-#endif
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4206, TRACE);
- printk("hp100: %s: init pdls\n", dev->name);
-#endif
-
- if (!lp->page_vaddr_algn)
- printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n", dev->name);
- else {
- /* pageptr shall point into the DMA accessible memory region */
- /* we use this pointer to status the upper limit of allocated */
- /* memory in the allocated page. */
- /* note: align the pointers to the pci cache line size */
- memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE); /* Zero Rx/Tx ring page */
- pageptr = lp->page_vaddr_algn;
-
- lp->rxrcommit = 0;
- ringptr = lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
-
- /* Initialise Rx Ring */
- for (i = MAX_RX_PDL - 1; i >= 0; i--) {
- lp->rxring[i].next = ringptr;
- ringptr = &(lp->rxring[i]);
- pageptr += hp100_init_rxpdl(dev, ringptr, pageptr);
- }
-
- /* Initialise Tx Ring */
- lp->txrcommit = 0;
- ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]);
- for (i = MAX_TX_PDL - 1; i >= 0; i--) {
- lp->txring[i].next = ringptr;
- ringptr = &(lp->txring[i]);
- pageptr += hp100_init_txpdl(dev, ringptr, pageptr);
- }
- }
-}
-
-
-/* These functions "format" the entries in the pdl structure */
-/* They return how much memory the fragments need. */
-static int hp100_init_rxpdl(struct net_device *dev,
- register hp100_ring_t * ringptr,
- register u32 * pdlptr)
-{
- /* pdlptr is starting address for this pdl */
-
- if (0 != (((unsigned long) pdlptr) & 0xf))
- printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%lx.\n",
- dev->name, (unsigned long) pdlptr);
-
- ringptr->pdl = pdlptr + 1;
- ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
- ringptr->skb = NULL;
-
- /*
- * Write address and length of first PDL Fragment (which is used for
- * storing the RX-Header
- * We use the 4 bytes _before_ the PDH in the pdl memory area to
- * store this information. (PDH is at offset 0x04)
- */
- /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */
-
- *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr); /* Address Frag 1 */
- *(pdlptr + 3) = 4; /* Length Frag 1 */
-
- return roundup(MAX_RX_FRAG * 2 + 2, 4);
-}
-
-
-static int hp100_init_txpdl(struct net_device *dev,
- register hp100_ring_t * ringptr,
- register u32 * pdlptr)
-{
- if (0 != (((unsigned long) pdlptr) & 0xf))
- printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%lx.\n", dev->name, (unsigned long) pdlptr);
-
- ringptr->pdl = pdlptr; /* +1; */
- ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
- ringptr->skb = NULL;
-
- return roundup(MAX_TX_FRAG * 2 + 2, 4);
-}
-
-/*
- * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes
- * for possible odd word alignment rounding up to next dword and set PDL
- * address for fragment#2
- * Returns: 0 if unable to allocate skb_buff
- * 1 if successful
- */
-static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
- struct net_device *dev)
-{
-#ifdef HP100_DEBUG_B
- int ioaddr = dev->base_addr;
-#endif
-#ifdef HP100_DEBUG_BM
- u_int *p;
-#endif
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4207, TRACE);
- printk("hp100: %s: build rx pdl\n", dev->name);
-#endif
-
- /* Allocate skb buffer of maximum size */
- /* Note: This depends on the alloc_skb functions allocating more
- * space than requested, i.e. aligning to 16bytes */
-
- ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4));
-
- if (NULL != ringptr->skb) {
- /*
- * Reserve 2 bytes at the head of the buffer to land the IP header
- * on a long word boundary (According to the Network Driver section
- * in the Linux KHG, this should help to increase performance.)
- */
- skb_reserve(ringptr->skb, 2);
-
- ringptr->skb->data = skb_put(ringptr->skb, MAX_ETHER_SIZE);
-
- /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
- /* Note: 1st Fragment is used for the 4 byte packet status
- * (receive header). Its PDL entries are set up by init_rxpdl. So
- * here we only have to set up the PDL fragment entries for the data
- * part. Those 4 bytes will be stored in the DMA memory region
- * directly before the PDL.
- */
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
- dev->name, (u_int) ringptr->pdl,
- roundup(MAX_ETHER_SIZE + 2, 4),
- (unsigned int) ringptr->skb->data);
-#endif
-
- /* Conversion to new PCI API : map skbuf data to PCI bus.
- * Doc says it's OK for EISA as well - Jean II */
- ringptr->pdl[0] = 0x00020000; /* Write PDH */
- ringptr->pdl[3] = pdl_map_data(netdev_priv(dev),
- ringptr->skb->data);
- ringptr->pdl[4] = MAX_ETHER_SIZE; /* Length of Data */
-
-#ifdef HP100_DEBUG_BM
- for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
- printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
-#endif
- return 1;
- }
- /* else: */
- /* alloc_skb failed (no memory) -> still can receive the header
- * fragment into PDL memory. make PDL safe by clearing msgptr and
- * making the PDL only 1 fragment (i.e. the 4 byte packet status)
- */
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n", dev->name, (u_int) ringptr->pdl);
-#endif
-
- ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */
-
- return 0;
-}
-
-/*
- * hp100_rxfill - attempt to fill the Rx Ring will empty skb's
- *
- * Makes assumption that skb's are always contiguous memory areas and
- * therefore PDLs contain only 2 physical fragments.
- * - While the number of Rx PDLs with buffers is less than maximum
- * a. Get a maximum packet size skb
- * b. Put the physical address of the buffer into the PDL.
- * c. Output physical address of PDL to adapter.
- */
-static void hp100_rxfill(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- struct hp100_private *lp = netdev_priv(dev);
- hp100_ring_t *ringptr;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4208, TRACE);
- printk("hp100: %s: rxfill\n", dev->name);
-#endif
-
- hp100_page(PERFORMANCE);
-
- while (lp->rxrcommit < MAX_RX_PDL) {
- /*
- ** Attempt to get a buffer and build a Rx PDL.
- */
- ringptr = lp->rxrtail;
- if (0 == hp100_build_rx_pdl(ringptr, dev)) {
- return; /* None available, return */
- }
-
- /* Hand this PDL over to the card */
- /* Note: This needs performance page selected! */
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n",
- dev->name, lp->rxrcommit, (u_int) ringptr->pdl,
- (u_int) ringptr->pdl_paddr, (u_int) ringptr->pdl[3]);
-#endif
-
- hp100_outl((u32) ringptr->pdl_paddr, RX_PDA);
-
- lp->rxrcommit += 1;
- lp->rxrtail = ringptr->next;
- }
-}
-
-/*
- * BM_shutdown - shutdown bus mastering and leave chip in reset state
- */
-
-static void hp100_BM_shutdown(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
- unsigned long time;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4209, TRACE);
- printk("hp100: %s: bm shutdown\n", dev->name);
-#endif
-
- hp100_page(PERFORMANCE);
- hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
- hp100_outw(0xffff, IRQ_STATUS); /* Ack all ints */
-
- /* Ensure Interrupts are off */
- hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
-
- /* Disable all MAC activity */
- hp100_page(MAC_CTRL);
- hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */
-
- /* If cascade MMU is not already in reset */
- if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
- /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so
- * MMU pointers will not be reset out from underneath
- */
- hp100_page(MAC_CTRL);
- for (time = 0; time < 5000; time++) {
- if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE))
- break;
- }
-
- /* Shutdown algorithm depends on the generation of Cascade */
- if (lp->chip == HP100_CHIPID_LASSEN) { /* ETR shutdown/reset */
- /* Disable Busmaster mode and wait for bit to go to zero. */
- hp100_page(HW_MAP);
- hp100_andb(~HP100_BM_MASTER, BM);
- /* 100 ms timeout */
- for (time = 0; time < 32000; time++) {
- if (0 == (hp100_inb(BM) & HP100_BM_MASTER))
- break;
- }
- } else { /* Shasta or Rainier Shutdown/Reset */
- /* To ensure all bus master inloading activity has ceased,
- * wait for no Rx PDAs or no Rx packets on card.
- */
- hp100_page(PERFORMANCE);
- /* 100 ms timeout */
- for (time = 0; time < 10000; time++) {
- /* RX_PDL: PDLs not executed. */
- /* RX_PKT_CNT: RX'd packets on card. */
- if ((hp100_inb(RX_PDL) == 0) && (hp100_inb(RX_PKT_CNT) == 0))
- break;
- }
-
- if (time >= 10000)
- printk("hp100: %s: BM shutdown error.\n", dev->name);
-
- /* To ensure all bus master outloading activity has ceased,
- * wait until the Tx PDA count goes to zero or no more Tx space
- * available in the Tx region of the card.
- */
- /* 100 ms timeout */
- for (time = 0; time < 10000; time++) {
- if ((0 == hp100_inb(TX_PKT_CNT)) &&
- (0 != (hp100_inb(TX_MEM_FREE) & HP100_AUTO_COMPARE)))
- break;
- }
-
- /* Disable Busmaster mode */
- hp100_page(HW_MAP);
- hp100_andb(~HP100_BM_MASTER, BM);
- } /* end of shutdown procedure for non-etr parts */
-
- hp100_cascade_reset(dev, 1);
- }
- hp100_page(PERFORMANCE);
- /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */
- /* Busmaster mode should be shut down now. */
-}
-
-static int hp100_check_lan(struct net_device *dev)
-{
- struct hp100_private *lp = netdev_priv(dev);
-
- if (lp->lan_type < 0) { /* no LAN type detected yet? */
- hp100_stop_interface(dev);
- if ((lp->lan_type = hp100_sense_lan(dev)) < 0) {
- printk("hp100: %s: no connection found - check wire\n", dev->name);
- hp100_start_interface(dev); /* 10Mb/s RX packets maybe handled */
- return -EIO;
- }
- if (lp->lan_type == HP100_LAN_100)
- lp->hub_status = hp100_login_to_vg_hub(dev, 0); /* relogin */
- hp100_start_interface(dev);
- }
- return 0;
-}
-
-/*
- * transmit functions
- */
-
-/* tx function for busmaster mode */
-static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
- struct net_device *dev)
-{
- unsigned long flags;
- int i, ok_flag;
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
- hp100_ring_t *ringptr;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4210, TRACE);
- printk("hp100: %s: start_xmit_bm\n", dev->name);
-#endif
- if (skb->len <= 0)
- goto drop;
-
- if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
-
- /* Get Tx ring tail pointer */
- if (lp->txrtail->next == lp->txrhead) {
- /* No memory. */
-#ifdef HP100_DEBUG
- printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
-#endif
- /* not waited long enough since last tx? */
- if (time_before(jiffies, dev_trans_start(dev) + HZ))
- goto drop;
-
- if (hp100_check_lan(dev))
- goto drop;
-
- if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
- /* we have a 100Mb/s adapter but it isn't connected to hub */
- printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
- hp100_stop_interface(dev);
- lp->hub_status = hp100_login_to_vg_hub(dev, 0);
- hp100_start_interface(dev);
- } else {
- spin_lock_irqsave(&lp->lock, flags);
- hp100_ints_off(); /* Useful ? Jean II */
- i = hp100_sense_lan(dev);
- hp100_ints_on();
- spin_unlock_irqrestore(&lp->lock, flags);
- if (i == HP100_LAN_ERR)
- printk("hp100: %s: link down detected\n", dev->name);
- else if (lp->lan_type != i) { /* cable change! */
- /* it's very hard - all network settings must be changed!!! */
- printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
- lp->lan_type = i;
- hp100_stop_interface(dev);
- if (lp->lan_type == HP100_LAN_100)
- lp->hub_status = hp100_login_to_vg_hub(dev, 0);
- hp100_start_interface(dev);
- } else {
- printk("hp100: %s: interface reset\n", dev->name);
- hp100_stop_interface(dev);
- if (lp->lan_type == HP100_LAN_100)
- lp->hub_status = hp100_login_to_vg_hub(dev, 0);
- hp100_start_interface(dev);
- }
- }
-
- goto drop;
- }
-
- /*
- * we have to turn int's off before modifying this, otherwise
- * a tx_pdl_cleanup could occur at the same time
- */
- spin_lock_irqsave(&lp->lock, flags);
- ringptr = lp->txrtail;
- lp->txrtail = ringptr->next;
-
- /* Check whether packet has minimal packet size */
- ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
- i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
-
- ringptr->skb = skb;
- ringptr->pdl[0] = ((1 << 16) | i); /* PDH: 1 Fragment & length */
- if (lp->chip == HP100_CHIPID_SHASTA) {
- /* TODO:Could someone who has the EISA card please check if this works? */
- ringptr->pdl[2] = i;
- } else { /* Lassen */
- /* In the PDL, don't use the padded size but the real packet size: */
- ringptr->pdl[2] = skb->len; /* 1st Frag: Length of frag */
- }
- /* Conversion to new PCI API : map skbuf data to PCI bus.
- * Doc says it's OK for EISA as well - Jean II */
- ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE)); /* 1st Frag: Adr. of data */
-
- /* Hand this PDL to the card. */
- hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
-
- lp->txrcommit++;
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return NETDEV_TX_OK;
-
-drop:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-/* clean_txring checks if packets have been sent by the card by reading
- * the TX_PDL register from the performance page and comparing it to the
- * number of committed packets. It then frees the skb's of the packets that
- * obviously have been sent to the network.
- *
- * Needs the PERFORMANCE page selected.
- */
-static void hp100_clean_txring(struct net_device *dev)
-{
- struct hp100_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int donecount;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4211, TRACE);
- printk("hp100: %s: clean txring\n", dev->name);
-#endif
-
- /* How many PDLs have been transmitted? */
- donecount = (lp->txrcommit) - hp100_inb(TX_PDL);
-
-#ifdef HP100_DEBUG
- if (donecount > MAX_TX_PDL)
- printk("hp100: %s: Warning: More PDLs transmitted than committed to card???\n", dev->name);
-#endif
-
- for (; 0 != donecount; donecount--) {
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n",
- dev->name, (u_int) lp->txrhead->skb->data,
- lp->txrcommit, hp100_inb(TX_PDL), donecount);
-#endif
- /* Conversion to new PCI API : NOP */
- pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
- dev_consume_skb_any(lp->txrhead->skb);
- lp->txrhead->skb = NULL;
- lp->txrhead = lp->txrhead->next;
- lp->txrcommit--;
- }
-}
-
-/* tx function for slave modes */
-static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- unsigned long flags;
- int i, ok_flag;
- int ioaddr = dev->base_addr;
- u_short val;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4212, TRACE);
- printk("hp100: %s: start_xmit\n", dev->name);
-#endif
- if (skb->len <= 0)
- goto drop;
-
- if (hp100_check_lan(dev))
- goto drop;
-
- /* If there is not enough free memory on the card... */
- i = hp100_inl(TX_MEM_FREE) & 0x7fffffff;
- if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
-#endif
- /* not waited long enough since last failed tx try? */
- if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: trans_start timing problem\n",
- dev->name);
-#endif
- goto drop;
- }
- if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
- /* we have a 100Mb/s adapter but it isn't connected to hub */
- printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
- hp100_stop_interface(dev);
- lp->hub_status = hp100_login_to_vg_hub(dev, 0);
- hp100_start_interface(dev);
- } else {
- spin_lock_irqsave(&lp->lock, flags);
- hp100_ints_off(); /* Useful ? Jean II */
- i = hp100_sense_lan(dev);
- hp100_ints_on();
- spin_unlock_irqrestore(&lp->lock, flags);
- if (i == HP100_LAN_ERR)
- printk("hp100: %s: link down detected\n", dev->name);
- else if (lp->lan_type != i) { /* cable change! */
- /* it's very hard - all network setting must be changed!!! */
- printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
- lp->lan_type = i;
- hp100_stop_interface(dev);
- if (lp->lan_type == HP100_LAN_100)
- lp->hub_status = hp100_login_to_vg_hub(dev, 0);
- hp100_start_interface(dev);
- } else {
- printk("hp100: %s: interface reset\n", dev->name);
- hp100_stop_interface(dev);
- if (lp->lan_type == HP100_LAN_100)
- lp->hub_status = hp100_login_to_vg_hub(dev, 0);
- hp100_start_interface(dev);
- mdelay(1);
- }
- }
- goto drop;
- }
-
- for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) {
-#ifdef HP100_DEBUG_TX
- printk("hp100: %s: start_xmit: busy\n", dev->name);
-#endif
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- hp100_ints_off();
- val = hp100_inw(IRQ_STATUS);
- /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set
- * when the current packet being transmitted on the wire is completed. */
- hp100_outw(HP100_TX_COMPLETE, IRQ_STATUS);
-#ifdef HP100_DEBUG_TX
- printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n",
- dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len);
-#endif
-
- ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
- i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
-
- hp100_outw(i, DATA32); /* tell card the total packet length */
- hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */
-
- if (lp->mode == 2) { /* memory mapped */
- /* Note: The J2585B needs alignment to 32bits here! */
- memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3);
- if (!ok_flag)
- memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
- } else { /* programmed i/o */
- outsl(ioaddr + HP100_REG_DATA32, skb->data,
- (skb->len + 3) >> 2);
- if (!ok_flag)
- for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4)
- hp100_outl(0, DATA32);
- }
-
- hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- hp100_ints_on();
- spin_unlock_irqrestore(&lp->lock, flags);
-
- dev_consume_skb_any(skb);
-
-#ifdef HP100_DEBUG_TX
- printk("hp100: %s: start_xmit: end\n", dev->name);
-#endif
-
- return NETDEV_TX_OK;
-
-drop:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-
-}
-
-
-/*
- * Receive Function (Non-Busmaster mode)
- * Called when an "Receive Packet" interrupt occurs, i.e. the receive
- * packet counter is non-zero.
- * For non-busmaster, this function does the whole work of transferring
- * the packet to the host memory and then up to higher layers via skb
- * and netif_rx.
- */
-
-static void hp100_rx(struct net_device *dev)
-{
- int packets, pkt_len;
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
- u_int header;
- struct sk_buff *skb;
-
-#ifdef DEBUG_B
- hp100_outw(0x4213, TRACE);
- printk("hp100: %s: rx\n", dev->name);
-#endif
-
- /* First get indication of received lan packet */
- /* RX_PKT_CND indicates the number of packets which have been fully */
- /* received onto the card but have not been fully transferred of the card */
- packets = hp100_inb(RX_PKT_CNT);
-#ifdef HP100_DEBUG_RX
- if (packets > 1)
- printk("hp100: %s: rx: waiting packets = %d\n", dev->name, packets);
-#endif
-
- while (packets-- > 0) {
- /* If ADV_NXT_PKT is still set, we have to wait until the card has */
- /* really advanced to the next packet. */
- for (pkt_len = 0; pkt_len < 6000 && (hp100_inb(OPTION_MSW) & HP100_ADV_NXT_PKT); pkt_len++) {
-#ifdef HP100_DEBUG_RX
- printk ("hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets);
-#endif
- }
-
- /* First we get the header, which contains information about the */
- /* actual length of the received packet. */
- if (lp->mode == 2) { /* memory mapped mode */
- header = readl(lp->mem_ptr_virt);
- } else /* programmed i/o */
- header = hp100_inl(DATA32);
-
- pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3;
-
-#ifdef HP100_DEBUG_RX
- printk("hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n",
- dev->name, header & HP100_PKT_LEN_MASK,
- (header >> 16) & 0xfff8, (header >> 16) & 7);
-#endif
-
- /* Now we allocate the skb and transfer the data into it. */
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) { /* Not enough memory->drop packet */
-#ifdef HP100_DEBUG
- printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
- dev->name, pkt_len);
-#endif
- dev->stats.rx_dropped++;
- } else { /* skb successfully allocated */
-
- u_char *ptr;
-
- skb_reserve(skb,2);
-
- /* ptr to start of the sk_buff data area */
- skb_put(skb, pkt_len);
- ptr = skb->data;
-
- /* Now transfer the data from the card into that area */
- if (lp->mode == 2)
- memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len);
- else /* io mapped */
- insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2);
-
- skb->protocol = eth_type_trans(skb, dev);
-
-#ifdef HP100_DEBUG_RX
- printk("hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- dev->name, ptr[0], ptr[1], ptr[2], ptr[3],
- ptr[4], ptr[5], ptr[6], ptr[7], ptr[8],
- ptr[9], ptr[10], ptr[11]);
-#endif
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-
- /* Indicate the card that we have got the packet */
- hp100_outb(HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW);
-
- switch (header & 0x00070000) {
- case (HP100_MULTI_ADDR_HASH << 16):
- case (HP100_MULTI_ADDR_NO_HASH << 16):
- dev->stats.multicast++;
- break;
- }
- } /* end of while(there are packets) loop */
-#ifdef HP100_DEBUG_RX
- printk("hp100_rx: %s: end\n", dev->name);
-#endif
-}
-
-/*
- * Receive Function for Busmaster Mode
- */
-static void hp100_rx_bm(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
- hp100_ring_t *ptr;
- u_int header;
- int pkt_len;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4214, TRACE);
- printk("hp100: %s: rx_bm\n", dev->name);
-#endif
-
-#ifdef HP100_DEBUG
- if (0 == lp->rxrcommit) {
- printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name);
- return;
- } else
- /* RX_PKT_CNT states how many PDLs are currently formatted and available to
- * the cards BM engine */
- if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) {
- printk("hp100: %s: More packets received than committed? RX_PKT_CNT=0x%x, commit=0x%x\n",
- dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff,
- lp->rxrcommit);
- return;
- }
-#endif
-
- while ((lp->rxrcommit > hp100_inb(RX_PDL))) {
- /*
- * The packet was received into the pdl pointed to by lp->rxrhead (
- * the oldest pdl in the ring
- */
-
- /* First we get the header, which contains information about the */
- /* actual length of the received packet. */
-
- ptr = lp->rxrhead;
-
- header = *(ptr->pdl - 1);
- pkt_len = (header & HP100_PKT_LEN_MASK);
-
- /* Conversion to new PCI API : NOP */
- pci_unmap_single(lp->pci_dev, (dma_addr_t) ptr->pdl[3], MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
-
-#ifdef HP100_DEBUG_BM
- printk("hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n",
- dev->name, (u_int) (ptr->pdl - 1), (u_int) header,
- pkt_len, (header >> 16) & 0xfff8, (header >> 16) & 7);
- printk("hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n",
- dev->name, hp100_inb(RX_PDL), hp100_inb(TX_PDL),
- hp100_inb(RX_PKT_CNT), (u_int) * (ptr->pdl),
- (u_int) * (ptr->pdl + 3), (u_int) * (ptr->pdl + 4));
-#endif
-
- if ((pkt_len >= MIN_ETHER_SIZE) &&
- (pkt_len <= MAX_ETHER_SIZE)) {
- if (ptr->skb == NULL) {
- printk("hp100: %s: rx_bm: skb null\n", dev->name);
- /* can happen if we only allocated room for the pdh due to memory shortage. */
- dev->stats.rx_dropped++;
- } else {
- skb_trim(ptr->skb, pkt_len); /* Shorten it */
- ptr->skb->protocol =
- eth_type_trans(ptr->skb, dev);
-
- netif_rx(ptr->skb); /* Up and away... */
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-
- switch (header & 0x00070000) {
- case (HP100_MULTI_ADDR_HASH << 16):
- case (HP100_MULTI_ADDR_NO_HASH << 16):
- dev->stats.multicast++;
- break;
- }
- } else {
-#ifdef HP100_DEBUG
- printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n", dev->name, pkt_len);
-#endif
- if (ptr->skb != NULL)
- dev_kfree_skb_any(ptr->skb);
- dev->stats.rx_errors++;
- }
-
- lp->rxrhead = lp->rxrhead->next;
-
- /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */
- if (0 == hp100_build_rx_pdl(lp->rxrtail, dev)) {
- /* No space for skb, header can still be received. */
-#ifdef HP100_DEBUG
- printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name);
-#endif
- return;
- } else { /* successfully allocated new PDL - put it in ringlist at tail. */
- hp100_outl((u32) lp->rxrtail->pdl_paddr, RX_PDA);
- lp->rxrtail = lp->rxrtail->next;
- }
-
- }
-}
-
-/*
- * statistics
- */
-static struct net_device_stats *hp100_get_stats(struct net_device *dev)
-{
- unsigned long flags;
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4215, TRACE);
-#endif
-
- spin_lock_irqsave(&lp->lock, flags);
- hp100_ints_off(); /* Useful ? Jean II */
- hp100_update_stats(dev);
- hp100_ints_on();
- spin_unlock_irqrestore(&lp->lock, flags);
- return &(dev->stats);
-}
-
-static void hp100_update_stats(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- u_short val;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4216, TRACE);
- printk("hp100: %s: update-stats\n", dev->name);
-#endif
-
- /* Note: Statistics counters clear when read. */
- hp100_page(MAC_CTRL);
- val = hp100_inw(DROPPED) & 0x0fff;
- dev->stats.rx_errors += val;
- dev->stats.rx_over_errors += val;
- val = hp100_inb(CRC);
- dev->stats.rx_errors += val;
- dev->stats.rx_crc_errors += val;
- val = hp100_inb(ABORT);
- dev->stats.tx_errors += val;
- dev->stats.tx_aborted_errors += val;
- hp100_page(PERFORMANCE);
-}
-
-static void hp100_misc_interrupt(struct net_device *dev)
-{
-#ifdef HP100_DEBUG_B
- int ioaddr = dev->base_addr;
-#endif
-
-#ifdef HP100_DEBUG_B
- int ioaddr = dev->base_addr;
- hp100_outw(0x4216, TRACE);
- printk("hp100: %s: misc_interrupt\n", dev->name);
-#endif
-
- /* Note: Statistics counters clear when read. */
- dev->stats.rx_errors++;
- dev->stats.tx_errors++;
-}
-
-static void hp100_clear_stats(struct hp100_private *lp, int ioaddr)
-{
- unsigned long flags;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4217, TRACE);
- printk("hp100: %s: clear_stats\n", dev->name);
-#endif
-
- spin_lock_irqsave(&lp->lock, flags);
- hp100_page(MAC_CTRL); /* get all statistics bytes */
- hp100_inw(DROPPED);
- hp100_inb(CRC);
- hp100_inb(ABORT);
- hp100_page(PERFORMANCE);
- spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-
-/*
- * multicast setup
- */
-
-/*
- * Set or clear the multicast filter for this adapter.
- */
-
-static void hp100_set_multicast_list(struct net_device *dev)
-{
- unsigned long flags;
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4218, TRACE);
- printk("hp100: %s: set_mc_list\n", dev->name);
-#endif
-
- spin_lock_irqsave(&lp->lock, flags);
- hp100_ints_off();
- hp100_page(MAC_CTRL);
- hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */
-
- if (dev->flags & IFF_PROMISC) {
- lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
- lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
- memset(&lp->hash_bytes, 0xff, 8);
- } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
- lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
- lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
-#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
- if (dev->flags & IFF_ALLMULTI) {
- /* set hash filter to receive all multicast packets */
- memset(&lp->hash_bytes, 0xff, 8);
- } else {
- int i, idx;
- u_char *addrs;
- struct netdev_hw_addr *ha;
-
- memset(&lp->hash_bytes, 0x00, 8);
-#ifdef HP100_DEBUG
- printk("hp100: %s: computing hash filter - mc_count = %i\n",
- dev->name, netdev_mc_count(dev));
-#endif
- netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-#ifdef HP100_DEBUG
- printk("hp100: %s: multicast = %pM, ",
- dev->name, addrs);
-#endif
- for (i = idx = 0; i < 6; i++) {
- idx ^= *addrs++ & 0x3f;
- printk(":%02x:", idx);
- }
-#ifdef HP100_DEBUG
- printk("idx = %i\n", idx);
-#endif
- lp->hash_bytes[idx >> 3] |= (1 << (idx & 7));
- }
- }
-#else
- memset(&lp->hash_bytes, 0xff, 8);
-#endif
- } else {
- lp->mac2_mode = HP100_MAC2MODE3; /* normal mode = get packets for me */
- lp->mac1_mode = HP100_MAC1MODE3; /* and broadcasts */
- memset(&lp->hash_bytes, 0x00, 8);
- }
-
- if (((hp100_inb(MAC_CFG_1) & 0x0f) != lp->mac1_mode) ||
- (hp100_inb(MAC_CFG_2) != lp->mac2_mode)) {
- int i;
-
- hp100_outb(lp->mac2_mode, MAC_CFG_2);
- hp100_andb(HP100_MAC1MODEMASK, MAC_CFG_1); /* clear mac1 mode bits */
- hp100_orb(lp->mac1_mode, MAC_CFG_1); /* and set the new mode */
-
- hp100_page(MAC_ADDRESS);
- for (i = 0; i < 8; i++)
- hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
-#ifdef HP100_DEBUG
- printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- dev->name, lp->mac1_mode, lp->mac2_mode,
- lp->hash_bytes[0], lp->hash_bytes[1],
- lp->hash_bytes[2], lp->hash_bytes[3],
- lp->hash_bytes[4], lp->hash_bytes[5],
- lp->hash_bytes[6], lp->hash_bytes[7]);
-#endif
-
- if (lp->lan_type == HP100_LAN_100) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
-#endif
- lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
- }
- } else {
- int i;
- u_char old_hash_bytes[8];
-
- hp100_page(MAC_ADDRESS);
- for (i = 0; i < 8; i++)
- old_hash_bytes[i] = hp100_inb(HASH_BYTE0 + i);
- if (memcmp(old_hash_bytes, &lp->hash_bytes, 8)) {
- for (i = 0; i < 8; i++)
- hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
-#ifdef HP100_DEBUG
- printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- dev->name, lp->hash_bytes[0],
- lp->hash_bytes[1], lp->hash_bytes[2],
- lp->hash_bytes[3], lp->hash_bytes[4],
- lp->hash_bytes[5], lp->hash_bytes[6],
- lp->hash_bytes[7]);
-#endif
-
- if (lp->lan_type == HP100_LAN_100) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
-#endif
- lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
- }
- }
- }
-
- hp100_page(MAC_CTRL);
- hp100_orb(HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
- HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1); /* enable tx */
-
- hp100_page(PERFORMANCE);
- hp100_ints_on();
- spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-/*
- * hardware interrupt handling
- */
-
-static irqreturn_t hp100_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct hp100_private *lp = netdev_priv(dev);
-
- int ioaddr;
- u_int val;
-
- if (dev == NULL)
- return IRQ_NONE;
- ioaddr = dev->base_addr;
-
- spin_lock(&lp->lock);
-
- hp100_ints_off();
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4219, TRACE);
-#endif
-
- /* hp100_page( PERFORMANCE ); */
- val = hp100_inw(IRQ_STATUS);
-#ifdef HP100_DEBUG_IRQ
- printk("hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n",
- dev->name, lp->mode, (u_int) val, hp100_inb(RX_PKT_CNT),
- hp100_inb(RX_PDL), hp100_inb(TX_PKT_CNT), hp100_inb(TX_PDL));
-#endif
-
- if (val == 0) { /* might be a shared interrupt */
- spin_unlock(&lp->lock);
- hp100_ints_on();
- return IRQ_NONE;
- }
- /* We're only interested in those interrupts we really enabled. */
- /* val &= hp100_inw( IRQ_MASK ); */
-
- /*
- * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL
- * is considered executed whenever the RX_PDL data structure is no longer
- * needed.
- */
- if (val & HP100_RX_PDL_FILL_COMPL) {
- if (lp->mode == 1)
- hp100_rx_bm(dev);
- else {
- printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name);
- }
- }
-
- /*
- * The RX_PACKET interrupt is set, when the receive packet counter is
- * non zero. We use this interrupt for receiving in slave mode. In
- * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill
- * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then
- * we somehow have missed a rx_pdl_fill_compl interrupt.
- */
-
- if (val & HP100_RX_PACKET) { /* Receive Packet Counter is non zero */
- if (lp->mode != 1) /* non busmaster */
- hp100_rx(dev);
- else if (!(val & HP100_RX_PDL_FILL_COMPL)) {
- /* Shouldn't happen - maybe we missed a RX_PDL_FILL Interrupt? */
- hp100_rx_bm(dev);
- }
- }
-
- /*
- * Ack. that we have noticed the interrupt and thereby allow next one.
- * Note that this is now done after the slave rx function, since first
- * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt
- * on the J2573.
- */
- hp100_outw(val, IRQ_STATUS);
-
- /*
- * RX_ERROR is set when a packet is dropped due to no memory resources on
- * the card or when a RCV_ERR occurs.
- * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists
- * only in the 802.3 MAC and happens when 16 collisions occur during a TX
- */
- if (val & (HP100_TX_ERROR | HP100_RX_ERROR)) {
-#ifdef HP100_DEBUG_IRQ
- printk("hp100: %s: TX/RX Error IRQ\n", dev->name);
-#endif
- hp100_update_stats(dev);
- if (lp->mode == 1) {
- hp100_rxfill(dev);
- hp100_clean_txring(dev);
- }
- }
-
- /*
- * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero.
- */
- if ((lp->mode == 1) && (val & (HP100_RX_PDA_ZERO)))
- hp100_rxfill(dev);
-
- /*
- * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire
- * is completed
- */
- if ((lp->mode == 1) && (val & (HP100_TX_COMPLETE)))
- hp100_clean_txring(dev);
-
- /*
- * MISC_ERROR is set when either the LAN link goes down or a detected
- * bus error occurs.
- */
- if (val & HP100_MISC_ERROR) { /* New for J2585B */
-#ifdef HP100_DEBUG_IRQ
- printk
- ("hp100: %s: Misc. Error Interrupt - Check cabling.\n",
- dev->name);
-#endif
- if (lp->mode == 1) {
- hp100_clean_txring(dev);
- hp100_rxfill(dev);
- }
- hp100_misc_interrupt(dev);
- }
-
- spin_unlock(&lp->lock);
- hp100_ints_on();
- return IRQ_HANDLED;
-}
-
-/*
- * some misc functions
- */
-
-static void hp100_start_interface(struct net_device *dev)
-{
- unsigned long flags;
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4220, TRACE);
- printk("hp100: %s: hp100_start_interface\n", dev->name);
-#endif
-
- spin_lock_irqsave(&lp->lock, flags);
-
- /* Ensure the adapter does not want to request an interrupt when */
- /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */
- hp100_page(PERFORMANCE);
- hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
- hp100_outw(0xffff, IRQ_STATUS); /* ack all IRQs */
- hp100_outw(HP100_FAKE_INT | HP100_INT_EN | HP100_RESET_LB,
- OPTION_LSW);
- /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */
- hp100_outw(HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW);
-
- if (lp->mode == 1) {
- /* Make sure BM bit is set... */
- hp100_page(HW_MAP);
- hp100_orb(HP100_BM_MASTER, BM);
- hp100_rxfill(dev);
- } else if (lp->mode == 2) {
- /* Enable memory mapping. Note: Don't do this when busmaster. */
- hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
- }
-
- hp100_page(PERFORMANCE);
- hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
- hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
-
- /* enable a few interrupts: */
- if (lp->mode == 1) { /* busmaster mode */
- hp100_outw(HP100_RX_PDL_FILL_COMPL |
- HP100_RX_PDA_ZERO | HP100_RX_ERROR |
- /* HP100_RX_PACKET | */
- /* HP100_RX_EARLY_INT | */ HP100_SET_HB |
- /* HP100_TX_PDA_ZERO | */
- HP100_TX_COMPLETE |
- /* HP100_MISC_ERROR | */
- HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
- } else {
- hp100_outw(HP100_RX_PACKET |
- HP100_RX_ERROR | HP100_SET_HB |
- HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
- }
-
- /* Note : before hp100_set_multicast_list(), because it will play with
- * spinlock itself... Jean II */
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Enable MAC Tx and RX, set MAC modes, ... */
- hp100_set_multicast_list(dev);
-}
-
-static void hp100_stop_interface(struct net_device *dev)
-{
- struct hp100_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- u_int val;
-
-#ifdef HP100_DEBUG_B
- printk("hp100: %s: hp100_stop_interface\n", dev->name);
- hp100_outw(0x4221, TRACE);
-#endif
-
- if (lp->mode == 1)
- hp100_BM_shutdown(dev);
- else {
- /* Note: MMAP_DIS will be reenabled by start_interface */
- hp100_outw(HP100_INT_EN | HP100_RESET_LB |
- HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB,
- OPTION_LSW);
- val = hp100_inw(OPTION_LSW);
-
- hp100_page(MAC_CTRL);
- hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
-
- if (!(val & HP100_HW_RST))
- return; /* If reset, imm. return ... */
- /* ... else: busy wait until idle */
- for (val = 0; val < 6000; val++)
- if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) {
- hp100_page(PERFORMANCE);
- return;
- }
- printk("hp100: %s: hp100_stop_interface - timeout\n", dev->name);
- hp100_page(PERFORMANCE);
- }
-}
-
-static void hp100_load_eeprom(struct net_device *dev, u_short probe_ioaddr)
-{
- int i;
- int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4222, TRACE);
-#endif
-
- hp100_page(EEPROM_CTRL);
- hp100_andw(~HP100_EEPROM_LOAD, EEPROM_CTRL);
- hp100_orw(HP100_EEPROM_LOAD, EEPROM_CTRL);
- for (i = 0; i < 10000; i++)
- if (!(hp100_inb(OPTION_MSW) & HP100_EE_LOAD))
- return;
- printk("hp100: %s: hp100_load_eeprom - timeout\n", dev->name);
-}
-
-/* Sense connection status.
- * return values: LAN_10 - Connected to 10Mbit/s network
- * LAN_100 - Connected to 100Mbit/s network
- * LAN_ERR - not connected or 100Mbit/s Hub down
- */
-static int hp100_sense_lan(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- u_short val_VG, val_10;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4223, TRACE);
-#endif
-
- hp100_page(MAC_CTRL);
- val_10 = hp100_inb(10_LAN_CFG_1);
- val_VG = hp100_inb(VG_LAN_CFG_1);
- hp100_page(PERFORMANCE);
-#ifdef HP100_DEBUG
- printk("hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n",
- dev->name, val_VG, val_10);
-#endif
-
- if (val_10 & HP100_LINK_BEAT_ST) /* 10Mb connection is active */
- return HP100_LAN_10;
-
- if (val_10 & HP100_AUI_ST) { /* have we BNC or AUI onboard? */
- /*
- * This can be overriden by dos utility, so if this has no effect,
- * perhaps you need to download that utility from HP and set card
- * back to "auto detect".
- */
- val_10 |= HP100_AUI_SEL | HP100_LOW_TH;
- hp100_page(MAC_CTRL);
- hp100_outb(val_10, 10_LAN_CFG_1);
- hp100_page(PERFORMANCE);
- return HP100_LAN_COAX;
- }
-
- /* Those cards don't have a 100 Mbit connector */
- if ( !strcmp(lp->id, "HWP1920") ||
- (lp->pci_dev &&
- lp->pci_dev->vendor == PCI_VENDOR_ID &&
- (lp->pci_dev->device == PCI_DEVICE_ID_HP_J2970A ||
- lp->pci_dev->device == PCI_DEVICE_ID_HP_J2973A)))
- return HP100_LAN_ERR;
-
- if (val_VG & HP100_LINK_CABLE_ST) /* Can hear the HUBs tone. */
- return HP100_LAN_100;
- return HP100_LAN_ERR;
-}
-
-static int hp100_down_vg_link(struct net_device *dev)
-{
- struct hp100_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long time;
- long savelan, newlan;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4224, TRACE);
- printk("hp100: %s: down_vg_link\n", dev->name);
-#endif
-
- hp100_page(MAC_CTRL);
- time = jiffies + (HZ / 4);
- do {
- if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
- break;
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_after(time, jiffies));
-
- if (time_after_eq(jiffies, time)) /* no signal->no logout */
- return 0;
-
- /* Drop the VG Link by clearing the link up cmd and load addr. */
-
- hp100_andb(~(HP100_LOAD_ADDR | HP100_LINK_CMD), VG_LAN_CFG_1);
- hp100_orb(HP100_VG_SEL, VG_LAN_CFG_1);
-
- /* Conditionally stall for >250ms on Link-Up Status (to go down) */
- time = jiffies + (HZ / 2);
- do {
- if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
- break;
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_after(time, jiffies));
-
-#ifdef HP100_DEBUG
- if (time_after_eq(jiffies, time))
- printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name);
-#endif
-
- /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */
- /* logout under traffic (even though all the status bits are cleared), */
- /* do this workaround to get the Rev 1 MAC in its idle state */
- if (lp->chip == HP100_CHIPID_LASSEN) {
- /* Reset VG MAC to insure it leaves the logoff state even if */
- /* the Hub is still emitting tones */
- hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1);
- udelay(1500); /* wait for >1ms */
- hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); /* Release Reset */
- udelay(1500);
- }
-
- /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */
- /* to get the VG mac to full reset. This is not req.d with later chips */
- /* Note: It will take the between 1 and 2 seconds for the VG mac to be */
- /* selected again! This will be left to the connect hub function to */
- /* perform if desired. */
- if (lp->chip == HP100_CHIPID_LASSEN) {
- /* Have to write to 10 and 100VG control registers simultaneously */
- savelan = newlan = hp100_inl(10_LAN_CFG_1); /* read 10+100 LAN_CFG regs */
- newlan &= ~(HP100_VG_SEL << 16);
- newlan |= (HP100_DOT3_MAC) << 8;
- hp100_andb(~HP100_AUTO_MODE, MAC_CFG_3); /* Autosel off */
- hp100_outl(newlan, 10_LAN_CFG_1);
-
- /* Conditionally stall for 5sec on VG selected. */
- time = jiffies + (HZ * 5);
- do {
- if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
- break;
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_after(time, jiffies));
-
- hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */
- hp100_outl(savelan, 10_LAN_CFG_1);
- }
-
- time = jiffies + (3 * HZ); /* Timeout 3s */
- do {
- if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
- break;
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_after(time, jiffies));
-
- if (time_before_eq(time, jiffies)) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: down_vg_link: timeout\n", dev->name);
-#endif
- return -EIO;
- }
-
- time = jiffies + (2 * HZ); /* This seems to take a while.... */
- do {
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_after(time, jiffies));
-
- return 0;
-}
-
-static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
-{
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
- u_short val = 0;
- unsigned long time;
- int startst;
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4225, TRACE);
- printk("hp100: %s: login_to_vg_hub\n", dev->name);
-#endif
-
- /* Initiate a login sequence iff VG MAC is enabled and either Load Address
- * bit is zero or the force relogin flag is set (e.g. due to MAC address or
- * promiscuous mode change)
- */
- hp100_page(MAC_CTRL);
- startst = hp100_inb(VG_LAN_CFG_1);
- if ((force_relogin == 1) || (hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) {
-#ifdef HP100_DEBUG_TRAINING
- printk("hp100: %s: Start training\n", dev->name);
-#endif
-
- /* Ensure VG Reset bit is 1 (i.e., do not reset) */
- hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1);
-
- /* If Lassen AND auto-select-mode AND VG tones were sensed on */
- /* entry then temporarily put them into force 100Mbit mode */
- if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST))
- hp100_andb(~HP100_DOT3_MAC, 10_LAN_CFG_2);
-
- /* Drop the VG link by zeroing Link Up Command and Load Address */
- hp100_andb(~(HP100_LINK_CMD /* |HP100_LOAD_ADDR */ ), VG_LAN_CFG_1);
-
-#ifdef HP100_DEBUG_TRAINING
- printk("hp100: %s: Bring down the link\n", dev->name);
-#endif
-
- /* Wait for link to drop */
- time = jiffies + (HZ / 10);
- do {
- if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
- break;
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_after(time, jiffies));
-
- /* Start an addressed training and optionally request promiscuous port */
- if ((dev->flags) & IFF_PROMISC) {
- hp100_orb(HP100_PROM_MODE, VG_LAN_CFG_2);
- if (lp->chip == HP100_CHIPID_LASSEN)
- hp100_orw(HP100_MACRQ_PROMSC, TRAIN_REQUEST);
- } else {
- hp100_andb(~HP100_PROM_MODE, VG_LAN_CFG_2);
- /* For ETR parts we need to reset the prom. bit in the training
- * register, otherwise promiscious mode won't be disabled.
- */
- if (lp->chip == HP100_CHIPID_LASSEN) {
- hp100_andw(~HP100_MACRQ_PROMSC, TRAIN_REQUEST);
- }
- }
-
- /* With ETR parts, frame format request bits can be set. */
- if (lp->chip == HP100_CHIPID_LASSEN)
- hp100_orb(HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST);
-
- hp100_orb(HP100_LINK_CMD | HP100_LOAD_ADDR | HP100_VG_RESET, VG_LAN_CFG_1);
-
- /* Note: Next wait could be omitted for Hood and earlier chips under */
- /* certain circumstances */
- /* TODO: check if hood/earlier and skip wait. */
-
- /* Wait for either short timeout for VG tones or long for login */
- /* Wait for the card hardware to signalise link cable status ok... */
- hp100_page(MAC_CTRL);
- time = jiffies + (1 * HZ); /* 1 sec timeout for cable st */
- do {
- if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
- break;
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_before(jiffies, time));
-
- if (time_after_eq(jiffies, time)) {
-#ifdef HP100_DEBUG_TRAINING
- printk("hp100: %s: Link cable status not ok? Training aborted.\n", dev->name);
-#endif
- } else {
-#ifdef HP100_DEBUG_TRAINING
- printk
- ("hp100: %s: HUB tones detected. Trying to train.\n",
- dev->name);
-#endif
-
- time = jiffies + (2 * HZ); /* again a timeout */
- do {
- val = hp100_inb(VG_LAN_CFG_1);
- if ((val & (HP100_LINK_UP_ST))) {
-#ifdef HP100_DEBUG_TRAINING
- printk("hp100: %s: Passed training.\n", dev->name);
-#endif
- break;
- }
- if (!in_interrupt())
- schedule_timeout_interruptible(1);
- } while (time_after(time, jiffies));
- }
-
- /* If LINK_UP_ST is set, then we are logged into the hub. */
- if (time_before_eq(jiffies, time) && (val & HP100_LINK_UP_ST)) {
-#ifdef HP100_DEBUG_TRAINING
- printk("hp100: %s: Successfully logged into the HUB.\n", dev->name);
- if (lp->chip == HP100_CHIPID_LASSEN) {
- val = hp100_inw(TRAIN_ALLOW);
- printk("hp100: %s: Card supports 100VG MAC Version \"%s\" ",
- dev->name, (hp100_inw(TRAIN_REQUEST) & HP100_CARD_MACVER) ? "802.12" : "Pre");
- printk("Driver will use MAC Version \"%s\"\n", (val & HP100_HUB_MACVER) ? "802.12" : "Pre");
- printk("hp100: %s: Frame format is %s.\n", dev->name, (val & HP100_MALLOW_FRAMEFMT) ? "802.5" : "802.3");
- }
-#endif
- } else {
- /* If LINK_UP_ST is not set, login was not successful */
- printk("hp100: %s: Problem logging into the HUB.\n", dev->name);
- if (lp->chip == HP100_CHIPID_LASSEN) {
- /* Check allowed Register to find out why there is a problem. */
- val = hp100_inw(TRAIN_ALLOW); /* won't work on non-ETR card */
-#ifdef HP100_DEBUG_TRAINING
- printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val);
-#endif
- if (val & HP100_MALLOW_ACCDENIED)
- printk("hp100: %s: HUB access denied.\n", dev->name);
- if (val & HP100_MALLOW_CONFIGURE)
- printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name);
- if (val & HP100_MALLOW_DUPADDR)
- printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name);
- }
- }
-
- /* If we have put the chip into forced 100 Mbit mode earlier, go back */
- /* to auto-select mode */
-
- if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) {
- hp100_page(MAC_CTRL);
- hp100_orb(HP100_DOT3_MAC, 10_LAN_CFG_2);
- }
-
- val = hp100_inb(VG_LAN_CFG_1);
-
- /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */
- hp100_page(PERFORMANCE);
- hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
-
- if (val & HP100_LINK_UP_ST)
- return 0; /* login was ok */
- else {
- printk("hp100: %s: Training failed.\n", dev->name);
- hp100_down_vg_link(dev);
- return -EIO;
- }
- }
- /* no forced relogin & already link there->no training. */
- return -EIO;
-}
-
-static void hp100_cascade_reset(struct net_device *dev, u_short enable)
-{
- int ioaddr = dev->base_addr;
- struct hp100_private *lp = netdev_priv(dev);
-
-#ifdef HP100_DEBUG_B
- hp100_outw(0x4226, TRACE);
- printk("hp100: %s: cascade_reset\n", dev->name);
-#endif
-
- if (enable) {
- hp100_outw(HP100_HW_RST | HP100_RESET_LB, OPTION_LSW);
- if (lp->chip == HP100_CHIPID_LASSEN) {
- /* Lassen requires a PCI transmit fifo reset */
- hp100_page(HW_MAP);
- hp100_andb(~HP100_PCI_RESET, PCICTRL2);
- hp100_orb(HP100_PCI_RESET, PCICTRL2);
- /* Wait for min. 300 ns */
- /* we can't use jiffies here, because it may be */
- /* that we have disabled the timer... */
- udelay(400);
- hp100_andb(~HP100_PCI_RESET, PCICTRL2);
- hp100_page(PERFORMANCE);
- }
- } else { /* bring out of reset */
- hp100_outw(HP100_HW_RST | HP100_SET_LB, OPTION_LSW);
- udelay(400);
- hp100_page(PERFORMANCE);
- }
-}
-
-#ifdef HP100_DEBUG
-void hp100_RegisterDump(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int Page;
- int Register;
-
- /* Dump common registers */
- printk("hp100: %s: Cascade Register Dump\n", dev->name);
- printk("hardware id #1: 0x%.2x\n", hp100_inb(HW_ID));
- printk("hardware id #2/paging: 0x%.2x\n", hp100_inb(PAGING));
- printk("option #1: 0x%.4x\n", hp100_inw(OPTION_LSW));
- printk("option #2: 0x%.4x\n", hp100_inw(OPTION_MSW));
-
- /* Dump paged registers */
- for (Page = 0; Page < 8; Page++) {
- /* Dump registers */
- printk("page: 0x%.2x\n", Page);
- outw(Page, ioaddr + 0x02);
- for (Register = 0x8; Register < 0x22; Register += 2) {
- /* Display Register contents except data port */
- if (((Register != 0x10) && (Register != 0x12)) || (Page > 0)) {
- printk("0x%.2x = 0x%.4x\n", Register, inw(ioaddr + Register));
- }
- }
- }
- hp100_page(PERFORMANCE);
-}
-#endif
-
-
-static void cleanup_dev(struct net_device *d)
-{
- struct hp100_private *p = netdev_priv(d);
-
- unregister_netdev(d);
- release_region(d->base_addr, HP100_REGION_SIZE);
-
- if (p->mode == 1) /* busmaster */
- pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f,
- p->page_vaddr_algn,
- virt_to_whatever(d, p->page_vaddr_algn));
- if (p->mem_ptr_virt)
- iounmap(p->mem_ptr_virt);
-
- free_netdev(d);
-}
-
-static int hp100_eisa_probe(struct device *gendev)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
- struct eisa_device *edev = to_eisa_device(gendev);
- int err;
-
- if (!dev)
- return -ENOMEM;
-
- SET_NETDEV_DEV(dev, &edev->dev);
-
- err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL);
- if (err)
- goto out1;
-
-#ifdef HP100_DEBUG
- printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name,
- dev->base_addr);
-#endif
- dev_set_drvdata(gendev, dev);
- return 0;
- out1:
- free_netdev(dev);
- return err;
-}
-
-static int hp100_eisa_remove(struct device *gendev)
-{
- struct net_device *dev = dev_get_drvdata(gendev);
- cleanup_dev(dev);
- return 0;
-}
-
-static struct eisa_driver hp100_eisa_driver = {
- .id_table = hp100_eisa_tbl,
- .driver = {
- .name = "hp100",
- .probe = hp100_eisa_probe,
- .remove = hp100_eisa_remove,
- }
-};
-
-static int hp100_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct net_device *dev;
- int ioaddr;
- u_short pci_command;
- int err;
-
- if (pci_enable_device(pdev))
- return -ENODEV;
-
- dev = alloc_etherdev(sizeof(struct hp100_private));
- if (!dev) {
- err = -ENOMEM;
- goto out0;
- }
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
- if (!(pci_command & PCI_COMMAND_IO)) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name);
-#endif
- pci_command |= PCI_COMMAND_IO;
- pci_write_config_word(pdev, PCI_COMMAND, pci_command);
- }
-
- if (!(pci_command & PCI_COMMAND_MASTER)) {
-#ifdef HP100_DEBUG
- printk("hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name);
-#endif
- pci_command |= PCI_COMMAND_MASTER;
- pci_write_config_word(pdev, PCI_COMMAND, pci_command);
- }
-
- ioaddr = pci_resource_start(pdev, 0);
- err = hp100_probe1(dev, ioaddr, HP100_BUS_PCI, pdev);
- if (err)
- goto out1;
-
-#ifdef HP100_DEBUG
- printk("hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr);
-#endif
- pci_set_drvdata(pdev, dev);
- return 0;
- out1:
- free_netdev(dev);
- out0:
- pci_disable_device(pdev);
- return err;
-}
-
-static void hp100_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- cleanup_dev(dev);
- pci_disable_device(pdev);
-}
-
-
-static struct pci_driver hp100_pci_driver = {
- .name = "hp100",
- .id_table = hp100_pci_tbl,
- .probe = hp100_pci_probe,
- .remove = hp100_pci_remove,
-};
-
-/*
- * module section
- */
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, "
- "Siegfried \"Frieder\" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>");
-MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters");
-
-/*
- * Note: to register three isa devices, use:
- * option hp100 hp100_port=0,0,0
- * to register one card at io 0x280 as eth239, use:
- * option hp100 hp100_port=0x280
- */
-#if defined(MODULE) && defined(CONFIG_ISA)
-#define HP100_DEVICES 5
-/* Parameters set by insmod */
-static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 };
-module_param_hw_array(hp100_port, int, ioport, NULL, 0);
-
-/* List of devices */
-static struct net_device *hp100_devlist[HP100_DEVICES];
-
-static int __init hp100_isa_init(void)
-{
- struct net_device *dev;
- int i, err, cards = 0;
-
- /* Don't autoprobe ISA bus */
- if (hp100_port[0] == 0)
- return -ENODEV;
-
- /* Loop on all possible base addresses */
- for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) {
- dev = alloc_etherdev(sizeof(struct hp100_private));
- if (!dev) {
- while (cards > 0)
- cleanup_dev(hp100_devlist[--cards]);
-
- return -ENOMEM;
- }
-
- err = hp100_isa_probe(dev, hp100_port[i]);
- if (!err)
- hp100_devlist[cards++] = dev;
- else
- free_netdev(dev);
- }
-
- return cards > 0 ? 0 : -ENODEV;
-}
-
-static void hp100_isa_cleanup(void)
-{
- int i;
-
- for (i = 0; i < HP100_DEVICES; i++) {
- struct net_device *dev = hp100_devlist[i];
- if (dev)
- cleanup_dev(dev);
- }
-}
-#else
-#define hp100_isa_init() (0)
-#define hp100_isa_cleanup() do { } while(0)
-#endif
-
-static int __init hp100_module_init(void)
-{
- int err;
-
- err = hp100_isa_init();
- if (err && err != -ENODEV)
- goto out;
- err = eisa_driver_register(&hp100_eisa_driver);
- if (err && err != -ENODEV)
- goto out2;
- err = pci_register_driver(&hp100_pci_driver);
- if (err && err != -ENODEV)
- goto out3;
- out:
- return err;
- out3:
- eisa_driver_unregister (&hp100_eisa_driver);
- out2:
- hp100_isa_cleanup();
- goto out;
-}
-
-
-static void __exit hp100_module_exit(void)
-{
- hp100_isa_cleanup();
- eisa_driver_unregister (&hp100_eisa_driver);
- pci_unregister_driver (&hp100_pci_driver);
-}
-
-module_init(hp100_module_init)
-module_exit(hp100_module_exit)
diff --git a/drivers/staging/hp/hp100.h b/drivers/staging/hp/hp100.h
deleted file mode 100644
index 7239b94c9de5..000000000000
--- a/drivers/staging/hp/hp100.h
+++ /dev/null
@@ -1,611 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
- *
- * $Id: hp100.h,v 1.51 1997/04/08 14:26:42 floeff Exp floeff $
- *
- * Authors: Jaroslav Kysela, <perex@pf.jcu.cz>
- * Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de>
- *
- * This driver is based on the 'hpfepkt' crynwr packet driver.
- */
-
-/****************************************************************************
- * Hardware Constants
- ****************************************************************************/
-
-/*
- * Page Identifiers
- * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02)
- */
-
-#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */
-#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */
-#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */
-#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */
-#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */
-#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */
-#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */
-#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */
-
-
-/* Registers that are present on all pages */
-
-#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */
-#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */
-#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */
- /* W: (16),3:0 Switch pages */
-#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */
-#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */
-
-/* Page 0 - Performance */
-
-#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */
-#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */
-#define HP100_REG_FRAGMENT_LEN 0x0c /* W: (16)12:0 Current fragment len */
-/* Note: For 32 bit systems, fragment len and offset registers are available */
-/* at offset 0x28 and 0x2c, where they can be written as 32bit values. */
-#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */
-#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */
-#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */
-#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */
-#define HP100_REG_TX_PDA_L 0x14 /* W: (32) BM: Ptr to PDL, Low Pri */
-#define HP100_REG_TX_PDA_H 0x1c /* W: (32) BM: Ptr to PDL, High Pri */
-#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */
-#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */
-#define HP100_REG_RX_PDL 0x1a /* R: (8) BM: # rx pdl not executed */
-#define HP100_REG_TX_PDL 0x1b /* R: (8) BM: # tx pdl not executed */
-#define HP100_REG_RX_PDA 0x18 /* W: (32) BM: Up to 31 addresses */
- /* which point to a PDL */
-#define HP100_REG_SL_EARLY 0x1c /* (32) Enhanced Slave Early Rx */
-#define HP100_REG_STAT_DROPPED 0x20 /* R (12) Dropped Packet Counter */
-#define HP100_REG_STAT_ERRORED 0x22 /* R (8) Errored Packet Counter */
-#define HP100_REG_STAT_ABORT 0x23 /* R (8) Abort Counter/OW Coll. Flag */
-#define HP100_REG_RX_RING 0x24 /* W (32) Slave: RX Ring Pointers */
-#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */
-#define HP100_REG_32_OFFSET 0x2c /* W (16) Slave: Offset Register */
-
-/* Page 1 - MAC Address/Hash Table */
-
-#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */
-#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */
-
-/* Page 2 - Hardware Mapping */
-
-#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */
-#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */
-#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */
-#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */
-#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */
-#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */
-
-/* New on Page 2 for ETR chips: */
-#define HP100_REG_MODECTRL1 0x10 /* RW: (8) Mode Control 1 */
-#define HP100_REG_MODECTRL2 0x11 /* RW: (8) Mode Control 2 */
-#define HP100_REG_PCICTRL1 0x12 /* RW: (8) PCI Cfg 1 */
-#define HP100_REG_PCICTRL2 0x13 /* RW: (8) PCI Cfg 2 */
-#define HP100_REG_PCIBUSMLAT 0x15 /* RW: (8) PCI Bus Master Latency */
-#define HP100_REG_EARLYTXCFG 0x16 /* RW: (16) Early TX Cfg/Cntrl Reg */
-#define HP100_REG_EARLYRXCFG 0x18 /* RW: (8) Early RX Cfg/Cntrl Reg */
-#define HP100_REG_ISAPNPCFG1 0x1a /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */
-#define HP100_REG_ISAPNPCFG2 0x1b /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */
-
-/* Page 3 - EEPROM/Boot ROM */
-
-#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */
-#define HP100_REG_BOOTROM_CTRL 0x0a
-
-/* Page 4 - LAN Configuration (MAC_CTRL) */
-
-#define HP100_REG_10_LAN_CFG_1 0x08 /* RW: (8) Set 10M XCVR functions */
-#define HP100_REG_10_LAN_CFG_2 0x09 /* RW: (8) 10M XCVR functions */
-#define HP100_REG_VG_LAN_CFG_1 0x0a /* RW: (8) Set 100M XCVR functions */
-#define HP100_REG_VG_LAN_CFG_2 0x0b /* RW: (8) 100M LAN Training cfgregs */
-#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */
-#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */
-#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */
-#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */
-#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts can't fit in mem */
-#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */
-#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */
-#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register. */
-#define HP100_REG_TRAIN_ALLOW 0x16 /* R: (16) Hub allowed register */
-
-/* Page 5 - MMU */
-
-#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */
-#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */
-#define HP100_REG_PDL_MEM_STOP 0x10 /* Not used by 802.12 devices */
-#define HP100_REG_ECB_MEM_STOP 0x14 /* I've no idea what this is */
-
-/* Page 6 - Card ID/Physical LAN Address */
-
-#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */
-#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */
-#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */
-#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */
-#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */
-
-/* Page 7 - MMU Current Pointers */
-
-#define HP100_REG_PTR_RXSTART 0x08 /* R: (16) Current begin of Rx ring */
-#define HP100_REG_PTR_RXEND 0x0a /* R: (16) Current end of Rx ring */
-#define HP100_REG_PTR_TXSTART 0x0c /* R: (16) Current begin of Tx ring */
-#define HP100_REG_PTR_TXEND 0x0e /* R: (16) Current end of Rx ring */
-#define HP100_REG_PTR_RPDLSTART 0x10
-#define HP100_REG_PTR_RPDLEND 0x12
-#define HP100_REG_PTR_RINGPTRS 0x14
-#define HP100_REG_PTR_MEMDEBUG 0x1a
-/* ------------------------------------------------------------------------ */
-
-
-/*
- * Hardware ID Register I (Always available, HW_ID, Offset 0x00)
- */
-#define HP100_HW_ID_CASCADE 0x4850 /* Identifies Cascade Chip */
-
-/*
- * Hardware ID Register 2 & Paging Register
- * (Always available, PAGING, Offset 0x02)
- * Bits 15:4 are for the Chip ID
- */
-#define HP100_CHIPID_MASK 0xFFF0
-#define HP100_CHIPID_SHASTA 0x5350 /* Not 802.12 compliant */
- /* EISA BM/SL, MCA16/32 SL, ISA SL */
-#define HP100_CHIPID_RAINIER 0x5360 /* Not 802.12 compliant EISA BM, */
- /* PCI SL, MCA16/32 SL, ISA SL */
-#define HP100_CHIPID_LASSEN 0x5370 /* 802.12 compliant PCI BM, PCI SL */
- /* LRF supported */
-
-/*
- * Option Registers I and II
- * (Always available, OPTION_LSW, Offset 0x04-0x05)
- */
-#define HP100_DEBUG_EN 0x8000 /* 0:Dis., 1:Enable Debug Dump Ptr. */
-#define HP100_RX_HDR 0x4000 /* 0:Dis., 1:Enable putting pkt into */
- /* system mem. before Rx interrupt */
-#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable mem.mapping. */
- /* MMAP_DIS must be 0 and MEM_EN */
- /* must be 1 for memory-mapped */
- /* mode to be enabled */
-#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */
-#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */
-#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */
-#define HP100_TRI_INT 0x0200 /* 0:Don't, 1:Do tri-state the int */
-#define HP100_MEM_EN 0x0040 /* Config program set this to */
- /* 0:Disable, 1:Enable mem map. */
- /* See MMAP_DIS. */
-#define HP100_IO_EN 0x0020 /* 1:Enable I/O transfers */
-#define HP100_BOOT_EN 0x0010 /* 1:Enable boot ROM access */
-#define HP100_FAKE_INT 0x0008 /* 1:int */
-#define HP100_INT_EN 0x0004 /* 1:Enable ints from card */
-#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */
- /* NIC reset on 0 to 1 transition */
-
-/*
- * Option Register III
- * (Always available, OPTION_MSW, Offset 0x06)
- */
-#define HP100_PRIORITY_TX 0x0080 /* 1:Do all Tx pkts as priority */
-#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */
-#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue */
- /* h/w will set to 0 when done */
-#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w */
- /* will set to 0 when done */
-
-/*
- * Interrupt Status Registers I and II
- * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09)
- * Note: With old chips, these Registers will clear when 1 is written to them
- * with new chips this depends on setting of CLR_ISMODE
- */
-#define HP100_RX_EARLY_INT 0x2000
-#define HP100_RX_PDA_ZERO 0x1000
-#define HP100_RX_PDL_FILL_COMPL 0x0800
-#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */
-#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */
-#define HP100_TX_PDA_ZERO 0x0020 /* 1 when PDA count goes to zero */
-#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */
-#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */
-#define HP100_MISC_ERROR 0x0004 /* 0:No, 1:Lan Link down or bus error */
-#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */
-
-/*
- * Xmit Memory Free Count
- * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit)
- */
-#define HP100_AUTO_COMPARE 0x80000000 /* Tx Space avail & pkts<255 */
-#define HP100_FREE_SPACE 0x7fffffe0 /* Tx free memory */
-
-/*
- * IRQ Channel
- * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d)
- */
-#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes asserts NOWS signal */
-#define HP100_IRQ_SCRAMBLE 0x40
-#define HP100_BOND_HP 0x20
-#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */
- /* (Only valid on EISA cards) */
-#define HP100_IRQMASK 0x0F /* Isolate the IRQ bits */
-
-/*
- * SRAM Parameters
- * (Page HW_MAP, SRAM, Offset 0x0e)
- */
-#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */
-#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count(put index in lwr bits) */
-
-/*
- * Bus Master Register
- * (Page HW_MAP, BM, Offset 0x0f)
- */
-#define HP100_BM_BURST_RD 0x01 /* EISA only: 1=Use burst trans. fm system */
- /* memory to chip (tx) */
-#define HP100_BM_BURST_WR 0x02 /* EISA only: 1=Use burst trans. fm system */
- /* memory to chip (rx) */
-#define HP100_BM_MASTER 0x04 /* 0:Slave, 1:BM mode */
-#define HP100_BM_PAGE_CK 0x08 /* This bit should be set whenever in */
- /* an EISA system */
-#define HP100_BM_PCI_8CLK 0x40 /* ... cycles 8 clocks apart */
-
-
-/*
- * Mode Control Register I
- * (Page HW_MAP, MODECTRL1, Offset0x10)
- */
-#define HP100_TX_DUALQ 0x10
- /* If set and BM -> dual tx pda queues */
-#define HP100_ISR_CLRMODE 0x02 /* If set ISR will clear all pending */
- /* interrupts on read (etr only?) */
-#define HP100_EE_NOLOAD 0x04 /* Status whether res will be loaded */
- /* from the eeprom */
-#define HP100_TX_CNT_FLG 0x08 /* Controls Early TX Reg Cnt Field */
-#define HP100_PDL_USE3 0x10 /* If set BM engine will read only */
- /* first three data elements of a PDL */
- /* on the first access. */
-#define HP100_BUSTYPE_MASK 0xe0 /* Three bit bus type info */
-
-/*
- * Mode Control Register II
- * (Page HW_MAP, MODECTRL2, Offset0x11)
- */
-#define HP100_EE_MASK 0x0f /* Tell EEPROM circuit not to load */
- /* certain resources */
-#define HP100_DIS_CANCEL 0x20 /* For tx dualq mode operation */
-#define HP100_EN_PDL_WB 0x40 /* 1: Status of PDL completion may be */
- /* written back to system mem */
-#define HP100_EN_BUS_FAIL 0x80 /* Enables bus-fail portion of misc */
- /* interrupt */
-
-/*
- * PCI Configuration and Control Register I
- * (Page HW_MAP, PCICTRL1, Offset 0x12)
- */
-#define HP100_LO_MEM 0x01 /* 1: Mapped Mem requested below 1MB */
-#define HP100_NO_MEM 0x02 /* 1: Disables Req for sysmem to PCI */
- /* bios */
-#define HP100_USE_ISA 0x04 /* 1: isa type decodes will occur */
- /* simultaneously with PCI decodes */
-#define HP100_IRQ_HI_MASK 0xf0 /* pgmed by pci bios */
-#define HP100_PCI_IRQ_HI_MASK 0x78 /* Isolate 4 bits for PCI IRQ */
-
-/*
- * PCI Configuration and Control Register II
- * (Page HW_MAP, PCICTRL2, Offset 0x13)
- */
-#define HP100_RD_LINE_PDL 0x01 /* 1: PCI command Memory Read Line en */
-#define HP100_RD_TX_DATA_MASK 0x06 /* choose PCI memread cmds for TX */
-#define HP100_MWI 0x08 /* 1: en. PCI memory write invalidate */
-#define HP100_ARB_MODE 0x10 /* Select PCI arbitor type */
-#define HP100_STOP_EN 0x20 /* Enables PCI state machine to issue */
- /* pci stop if cascade not ready */
-#define HP100_IGNORE_PAR 0x40 /* 1: PCI state machine ignores parity */
-#define HP100_PCI_RESET 0x80 /* 0->1: Reset PCI block */
-
-/*
- * Early TX Configuration and Control Register
- * (Page HW_MAP, EARLYTXCFG, Offset 0x16)
- */
-#define HP100_EN_EARLY_TX 0x8000 /* 1=Enable Early TX */
-#define HP100_EN_ADAPTIVE 0x4000 /* 1=Enable adaptive mode */
-#define HP100_EN_TX_UR_IRQ 0x2000 /* reserved, must be 0 */
-#define HP100_EN_LOW_TX 0x1000 /* reserved, must be 0 */
-#define HP100_ET_CNT_MASK 0x0fff /* bits 11..0: ET counters */
-
-/*
- * Early RX Configuration and Control Register
- * (Page HW_MAP, EARLYRXCFG, Offset 0x18)
- */
-#define HP100_EN_EARLY_RX 0x80 /* 1=Enable Early RX */
-#define HP100_EN_LOW_RX 0x40 /* reserved, must be 0 */
-#define HP100_RX_TRIP_MASK 0x1f /* bits 4..0: threshold at which the
- * early rx circuit will start the
- * dma of received packet into system
- * memory for BM */
-
-/*
- * Serial Devices Control Register
- * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08)
- */
-#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads EEPROM into registers. */
- /* When it goes back to 0, load is */
- /* complete. This should take ~600us. */
-
-/*
- * 10MB LAN Control and Configuration Register I
- * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08)
- */
-#define HP100_MAC10_SEL 0xc0 /* Get bits to indicate MAC */
-#define HP100_AUI_SEL 0x20 /* Status of AUI selection */
-#define HP100_LOW_TH 0x10 /* 0:No, 1:Yes allow better cabling */
-#define HP100_LINK_BEAT_DIS 0x08 /* 0:Enable, 1:Disable link beat */
-#define HP100_LINK_BEAT_ST 0x04 /* 0:No, 1:Yes link beat being Rx */
-#define HP100_R_ROL_ST 0x02 /* 0:No, 1:Yes Rx twisted pair has */
- /* been reversed */
-#define HP100_AUI_ST 0x01 /* 0:No, 1:Yes use AUI on TP card */
-
-/*
- * 10 MB LAN Control and Configuration Register II
- * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09)
- */
-#define HP100_SQU_ST 0x01 /* 0:No, 1:Yes collision signal sent */
- /* after Tx.Only used for AUI. */
-#define HP100_FULLDUP 0x02 /* 1: LXT901 XCVR fullduplx enabled */
-#define HP100_DOT3_MAC 0x04 /* 1: DOT 3 Mac sel. unless Autosel */
-
-/*
- * MAC Selection, use with MAC10_SEL bits
- */
-#define HP100_AUTO_SEL_10 0x0 /* Auto select */
-#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */
-#define HP100_XCVR_7213 0x2 /* 7213 transceiver */
-#define HP100_XCVR_82503 0x3 /* 82503 transceiver */
-
-/*
- * 100MB LAN Training Register
- * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12)
- */
-#define HP100_FRAME_FORMAT 0x08 /* 0:802.3, 1:802.5 frames */
-#define HP100_BRIDGE 0x04 /* 0:No, 1:Yes tell hub i am a bridge */
-#define HP100_PROM_MODE 0x02 /* 0:No, 1:Yes tell hub card is */
- /* promiscuous */
-#define HP100_REPEATER 0x01 /* 0:No, 1:Yes tell hub MAC wants to */
- /* be a cascaded repeater */
-
-/*
- * 100MB LAN Control and Configuration Register
- * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a)
- */
-#define HP100_VG_SEL 0x80 /* 0:No, 1:Yes use 100 Mbit MAC */
-#define HP100_LINK_UP_ST 0x40 /* 0:No, 1:Yes endnode logged in */
-#define HP100_LINK_CABLE_ST 0x20 /* 0:No, 1:Yes cable can hear tones */
- /* from hub */
-#define HP100_LOAD_ADDR 0x10 /* 0->1 card addr will be sent */
- /* 100ms later the link status */
- /* bits are valid */
-#define HP100_LINK_CMD 0x08 /* 0->1 link will attempt to log in. */
- /* 100ms later the link status */
- /* bits are valid */
-#define HP100_TRN_DONE 0x04 /* NEW ETR-Chips only: Will be reset */
- /* after LinkUp Cmd is given and set */
- /* when training has completed. */
-#define HP100_LINK_GOOD_ST 0x02 /* 0:No, 1:Yes cable passed training */
-#define HP100_VG_RESET 0x01 /* 0:Yes, 1:No reset the 100VG MAC */
-
-
-/*
- * MAC Configuration Register I
- * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c)
- */
-#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */
-#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */
-#define HP100_RX_EN 0x20 /* 1: allow receiving of pkts */
-#define HP100_TX_EN 0x10 /* 1: allow transmitting of pkts */
-#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */
-#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */
-#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */
-#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL phys. pkts */
-#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */
-#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */
-#define HP100_MAC1MODE2 0x00
-#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC
-#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC
-#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */
-#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
-/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
- a mode 7 defined to be LAN Analyzer mode, which will receive errored and
- runt packets, and keep the CRC bytes. */
-#define HP100_MAC1MODE7 HP100_MAC1MODE6 | HP100_ACC_ERRORED
-
-/*
- * MAC Configuration Register II
- * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d)
- */
-#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */
-#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */
-#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */
- /* transceiver */
-#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */
-#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */
-#define HP100_ACCNA 0x04 /* 1: For 802.5: Accept only token ring
- * group addr that maches NA mask */
-#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */
- /* The length will reflect this. */
-#define HP100_ACCFA 0x01 /* 1: For 802.5: Accept only functional
- * addrs that match FA mask (page1) */
-#define HP100_MAC2MODEMASK 0x02
-#define HP100_MAC2MODE1 0x00
-#define HP100_MAC2MODE2 0x00
-#define HP100_MAC2MODE3 0x00
-#define HP100_MAC2MODE4 0x00
-#define HP100_MAC2MODE5 0x00
-#define HP100_MAC2MODE6 0x00
-#define HP100_MAC2MODE7 KEEP_CRC
-
-/*
- * MAC Configuration Register III
- * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e)
- */
-#define HP100_PACKET_PACE 0x03 /* Packet Pacing:
- * 00: No packet pacing
- * 01: 8 to 16 uS delay
- * 10: 16 to 32 uS delay
- * 11: 32 to 64 uS delay
- */
-#define HP100_LRF_EN 0x04 /* 1: External LAN Rcv Filter and
- * TCP/IP Checksumming enabled. */
-#define HP100_AUTO_MODE 0x10 /* 1: AutoSelect between 10/100 */
-
-/*
- * MAC Configuration Register IV
- * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f)
- */
-#define HP100_MAC_SEL_ST 0x01 /* (R): Status of external VGSEL
- * Signal, 1=100VG, 0=10Mbit sel. */
-#define HP100_LINK_FAIL_ST 0x02 /* (R): Status of Link Fail portion
- * of the Misc. Interrupt */
-
-/*
- * 100 MB LAN Training Request/Allowed Registers
- * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only)
- */
-#define HP100_MACRQ_REPEATER 0x0001 /* 1: MAC tells HUB it wants to be
- * a cascaded repeater
- * 0: ... wants to be a DTE */
-#define HP100_MACRQ_PROMSC 0x0006 /* 2 bits: Promiscious mode
- * 00: Rcv only unicast packets
- * specifically addr to this
- * endnode
- * 10: Rcv all pckts fwded by
- * the local repeater */
-#define HP100_MACRQ_FRAMEFMT_EITHER 0x0018 /* 11: either format allowed */
-#define HP100_MACRQ_FRAMEFMT_802_3 0x0000 /* 00: 802.3 is requested */
-#define HP100_MACRQ_FRAMEFMT_802_5 0x0010 /* 10: 802.5 format is requested */
-#define HP100_CARD_MACVER 0xe000 /* R: 3 bit Cards 100VG MAC version */
-#define HP100_MALLOW_REPEATER 0x0001 /* If reset, requested access as an
- * end node is allowed */
-#define HP100_MALLOW_PROMSC 0x0004 /* 2 bits: Promiscious mode
- * 00: Rcv only unicast packets
- * specifically addr to this
- * endnode
- * 10: Rcv all pckts fwded by
- * the local repeater */
-#define HP100_MALLOW_FRAMEFMT 0x00e0 /* 2 bits: Frame Format
- * 00: 802.3 format will be used
- * 10: 802.5 format will be used */
-#define HP100_MALLOW_ACCDENIED 0x0400 /* N bit */
-#define HP100_MALLOW_CONFIGURE 0x0f00 /* C bit */
-#define HP100_MALLOW_DUPADDR 0x1000 /* D bit */
-#define HP100_HUB_MACVER 0xe000 /* R: 3 bit 802.12 MAC/RMAC training */
- /* protocol of repeater */
-
-/* ****************************************************************************** */
-
-/*
- * Set/Reset bits
- */
-#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */
-#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */
-#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */
-#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */
-
-/*
- * Misc. Constants
- */
-#define HP100_LAN_100 100 /* lan_type value for VG */
-#define HP100_LAN_10 10 /* lan_type value for 10BaseT */
-#define HP100_LAN_COAX 9 /* lan_type value for Coax */
-#define HP100_LAN_ERR (-1) /* lan_type value for link down */
-
-/*
- * Bus Master Data Structures ----------------------------------------------
- */
-
-#define MAX_RX_PDL 30 /* Card limit = 31 */
-#define MAX_RX_FRAG 2 /* Don't need more... */
-#define MAX_TX_PDL 29
-#define MAX_TX_FRAG 2 /* Limit = 31 */
-
-/* Define total PDL area size in bytes (should be 4096) */
-/* This is the size of kernel (dma) memory that will be allocated. */
-#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16
-
-/* Ethernet Packet Sizes */
-#define MIN_ETHER_SIZE 60
-#define MAX_ETHER_SIZE 1514 /* Needed for preallocation of */
- /* skb buffer when busmastering */
-
-/* Tx or Rx Ring Entry */
-typedef struct hp100_ring {
- u_int *pdl; /* Address of PDLs PDH, dword before
- * this address is used for rx hdr */
- u_int pdl_paddr; /* Physical address of PDL */
- struct sk_buff *skb;
- struct hp100_ring *next;
-} hp100_ring_t;
-
-
-
-/* Mask for Header Descriptor */
-#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length */
-
-
-/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED
- bit in the MAC Configuration Register 1 is set. */
-#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */
-#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */
-#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */
-#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */
-#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */
- /* marker */
-#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */
-#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */
-#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */
-#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */
- /* Length Reg. */
-#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */
-#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */
-
-/* The last three bits indicate the type of destination address */
-
-#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */
-#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */
-#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */
-#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */
-#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */
-
-/*
- * macros
- */
-
-#define hp100_inb( reg ) \
- inb( ioaddr + HP100_REG_##reg )
-#define hp100_inw( reg ) \
- inw( ioaddr + HP100_REG_##reg )
-#define hp100_inl( reg ) \
- inl( ioaddr + HP100_REG_##reg )
-#define hp100_outb( data, reg ) \
- outb( data, ioaddr + HP100_REG_##reg )
-#define hp100_outw( data, reg ) \
- outw( data, ioaddr + HP100_REG_##reg )
-#define hp100_outl( data, reg ) \
- outl( data, ioaddr + HP100_REG_##reg )
-#define hp100_orb( data, reg ) \
- outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
-#define hp100_orw( data, reg ) \
- outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
-#define hp100_andb( data, reg ) \
- outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
-#define hp100_andw( data, reg ) \
- outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
-
-#define hp100_page( page ) \
- outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
-#define hp100_ints_off() \
- outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
-#define hp100_ints_on() \
- outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
-#define hp100_mem_map_enable() \
- outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
-#define hp100_mem_map_disable() \
- outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192 b/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192
deleted file mode 100644
index 1c35c507cc05..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192
+++ /dev/null
@@ -1,20 +0,0 @@
-What: /sys/.../iio:deviceX/ac_excitation_en
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This attribute, if available, is used to enable the AC
- excitation mode found on some converters. In ac excitation mode,
- the polarity of the excitation voltage is reversed on
- alternate cycles, to eliminate DC errors.
-
-What: /sys/.../iio:deviceX/bridge_switch_en
-KernelVersion: 3.1.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This attribute, if available, is used to close or open the
- bridge power down switch found on some converters.
- In bridge applications, such as strain gauges and load cells,
- the bridge itself consumes the majority of the current in the
- system. To minimize the current consumption of the system,
- the bridge can be disconnected (when it is not being used
- using the bridge_switch_en attribute.
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 1b8ebf2c1b69..4d469016a13a 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -1,10 +1,4 @@
-2018-04-15
-
-All affected drivers:
-Convert all uses of the old GPIO API from <linux/gpio.h> to the
-GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
-lines from device tree, ACPI or board files, board files should
-use <linux/gpio/machine.h>.
+2020-02-25
ADI Drivers:
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index 39dfe3f7f254..fef52d9b5346 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -250,6 +250,7 @@ static const struct adis_data adis16203_data = {
.diag_stat_reg = ADIS16203_DIAG_STAT,
.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
+ .self_test_reg = ADIS16203_MSC_CTRL,
.self_test_no_autoclear = true,
.timeouts = &adis16203_timeouts,
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 39eb8364aa95..8bd35c6c56a1 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -373,6 +373,7 @@ static const struct adis_data adis16240_data = {
.diag_stat_reg = ADIS16240_DIAG_STAT,
.self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
+ .self_test_reg = ADIS16240_MSC_CTRL,
.self_test_no_autoclear = true,
.timeouts = &adis16240_timeouts,
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 31cd9a12f40f..b25f41053fac 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -15,18 +15,6 @@ config AD7816
To compile this driver as a module, choose M here: the
module will be called ad7816.
-config AD7192
- tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver"
- depends on SPI
- select AD_SIGMA_DELTA
- help
- Say yes here to build support for Analog Devices AD7190,
- AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC).
- If unsure, say N (but it's safe to say "Y").
-
- To compile this driver as a module, choose M here: the
- module will be called ad7192.
-
config AD7280
tristate "Analog Devices AD7280A Lithium Ion Battery Monitoring System"
depends on SPI
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 4b76769b32bc..6436a62b6278 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -4,5 +4,4 @@
#
obj-$(CONFIG_AD7816) += ad7816.o
-obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7280) += ad7280a.o
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 19a5f244dcae..bef6bd1295ea 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -824,6 +824,10 @@ out:
return IRQ_HANDLED;
}
+/* Note: No need to fix checkpatch warning that reads:
+ * CHECK: spaces preferred around that '-' (ctx:VxV)
+ * The function argument is stringified and doesn't need a fix
+ */
static IIO_DEVICE_ATTR_NAMED(in_thresh_low_value,
in_voltage-voltage_thresh_low_value,
0644,
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 4b25a3a314ed..ed404355ea4c 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -130,17 +130,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
static int ad2s1210_config_read(struct ad2s1210_state *st,
unsigned char address)
{
- struct spi_transfer xfer = {
- .len = 2,
- .rx_buf = st->rx,
- .tx_buf = st->tx,
+ struct spi_transfer xfers[] = {
+ {
+ .len = 1,
+ .rx_buf = &st->rx[0],
+ .tx_buf = &st->tx[0],
+ .cs_change = 1,
+ }, {
+ .len = 1,
+ .rx_buf = &st->rx[1],
+ .tx_buf = &st->tx[1],
+ },
};
int ret = 0;
ad2s1210_set_mode(MOD_CONFIG, st);
st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
st->tx[1] = AD2S1210_REG_FAULT;
- ret = spi_sync_transfer(st->sdev, &xfer, 1);
+ ret = spi_sync_transfer(st->sdev, xfers, 2);
if (ret < 0)
return ret;
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index 93cf28febdf6..358d7b2f4ad1 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -110,10 +110,10 @@ static ssize_t cpld_reconfigure(struct device *dev,
const char *buf, size_t count)
{
struct kp2000_device *pcard = dev_get_drvdata(dev);
- long wr_val;
+ unsigned long wr_val;
int rv;
- rv = kstrtol(buf, 0, &wr_val);
+ rv = kstrtoul(buf, 0, &wr_val);
if (rv < 0)
return rv;
if (wr_val > 7)
@@ -298,7 +298,6 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
{
int err = 0;
struct kp2000_device *pcard;
- int rv;
unsigned long reg_bar_phys_addr;
unsigned long reg_bar_phys_len;
unsigned long dma_bar_phys_addr;
@@ -445,11 +444,11 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
if (err < 0)
goto err_release_dma;
- rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED,
- pcard->name, pcard);
- if (rv) {
+ err = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED,
+ pcard->name, pcard);
+ if (err) {
dev_err(&pcard->pdev->dev,
- "%s: failed to request_irq: %d\n", __func__, rv);
+ "%s: failed to request_irq: %d\n", __func__, err);
goto err_disable_msi;
}
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 1c360daa703d..44017d523da5 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -386,8 +386,8 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
}
}
- if (transfer->delay_usecs)
- udelay(transfer->delay_usecs);
+ if (transfer->delay.value)
+ ndelay(spi_delay_to_ns(&transfer->delay, transfer));
}
/* de-assert chip select to end the sequence */
diff --git a/drivers/staging/kpc2000/kpc_dma/dma.c b/drivers/staging/kpc2000/kpc_dma/dma.c
index 51a4dd534a0d..452a3f7c835d 100644
--- a/drivers/staging/kpc2000/kpc_dma/dma.c
+++ b/drivers/staging/kpc2000/kpc_dma/dma.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -97,11 +97,10 @@ int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt)
if (WARN(!(caps & ENG_CAP_PRESENT), "%s() called for DMA Engine at %p which isn't present in hardware!\n", __func__, eng))
return -ENXIO;
- if (caps & ENG_CAP_DIRECTION) {
+ if (caps & ENG_CAP_DIRECTION)
eng->dir = DMA_FROM_DEVICE;
- } else {
+ else
eng->dir = DMA_TO_DEVICE;
- }
eng->desc_pool_cnt = desc_cnt;
eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096);
@@ -236,7 +235,7 @@ int count_descriptors_available(struct kpc_dma_device *eng)
struct kpc_dma_descriptor *cur = eng->desc_next;
while (cur != eng->desc_completed) {
- BUG_ON(cur == NULL);
+ BUG_ON(!cur);
count++;
cur = cur->Next;
}
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index 40525540dde6..7caabdd77bbf 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -18,8 +18,8 @@
static inline
unsigned int count_pages(unsigned long iov_base, size_t iov_len)
{
- unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT;
- unsigned long last = ((iov_base+iov_len-1) & PAGE_MASK) >> PAGE_SHIFT;
+ unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT;
+ unsigned long last = ((iov_base + iov_len - 1) & PAGE_MASK) >> PAGE_SHIFT;
return last - first + 1;
}
@@ -66,7 +66,8 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
acd->page_count = count_pages(iov_base, iov_len);
// Allocate an array of page pointers
- acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL);
+ acd->user_pages = kcalloc(acd->page_count, sizeof(struct page *),
+ GFP_KERNEL);
if (!acd->user_pages) {
dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n");
rv = -ENOMEM;
@@ -83,7 +84,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
}
// Allocate and setup the sg_table (scatterlist entries)
- rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-1), iov_len, GFP_KERNEL);
+ rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE - 1), iov_len, GFP_KERNEL);
if (rv) {
dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%ld)\n", rv);
goto err_alloc_sg_table;
@@ -124,19 +125,19 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
pcnt = count_parts_for_sge(sg);
for (p = 0 ; p < pcnt ; p++) {
// Fill out the descriptor
- BUG_ON(desc == NULL);
+ BUG_ON(!desc);
clear_desc(desc);
- if (p != pcnt-1) {
+ if (p != pcnt - 1)
desc->DescByteCount = 0x80000;
- } else {
+ else
desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000);
- }
+
desc->DescBufferByteCount = desc->DescByteCount;
desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR;
if (i == 0 && p == 0)
desc->DescControlFlags |= DMA_DESC_CTL_SOP;
- if (i == acd->mapped_entry_count-1 && p == pcnt-1)
+ if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE;
desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF);
@@ -148,13 +149,13 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000UL) >> 32;
user_ctl = acd->priv->user_ctl;
- if (i == acd->mapped_entry_count-1 && p == pcnt-1) {
+ if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
user_ctl = acd->priv->user_ctl_last;
- }
+
desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFFUL) >> 0;
desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000UL) >> 32;
- if (i == acd->mapped_entry_count-1 && p == pcnt-1)
+ if (i == acd->mapped_entry_count - 1 && p == pcnt - 1)
desc->acd = acd;
dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd);
@@ -188,9 +189,9 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
sg_free_table(&acd->sgt);
err_dma_map_sg:
err_alloc_sg_table:
- for (i = 0 ; i < acd->page_count ; i++) {
+ for (i = 0 ; i < acd->page_count ; i++)
put_page(acd->user_pages[i]);
- }
+
err_get_user_pages:
kfree(acd->user_pages);
err_alloc_userpages:
@@ -203,23 +204,21 @@ void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags)
{
unsigned int i;
- BUG_ON(acd == NULL);
- BUG_ON(acd->user_pages == NULL);
- BUG_ON(acd->sgt.sgl == NULL);
- BUG_ON(acd->ldev == NULL);
- BUG_ON(acd->ldev->pldev == NULL);
+ BUG_ON(!acd);
+ BUG_ON(!acd->user_pages);
+ BUG_ON(!acd->sgt.sgl);
+ BUG_ON(!acd->ldev);
+ BUG_ON(!acd->ldev->pldev);
for (i = 0 ; i < acd->page_count ; i++) {
- if (!PageReserved(acd->user_pages[i])) {
+ if (!PageReserved(acd->user_pages[i]))
set_page_dirty(acd->user_pages[i]);
- }
}
dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir);
- for (i = 0 ; i < acd->page_count ; i++) {
+ for (i = 0 ; i < acd->page_count ; i++)
put_page(acd->user_pages[i]);
- }
sg_free_table(&acd->sgt);
@@ -253,7 +252,7 @@ int kpc_dma_open(struct inode *inode, struct file *filp)
return -EBUSY; /* already open */
}
- priv = kzalloc(sizeof(struct dev_private_data), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
index ec79a8500caf..c3b30551e0ca 100644
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
@@ -1,8 +1,8 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -26,9 +26,8 @@ struct kpc_dma_device *kpc_dma_lookup_device(int minor)
mutex_lock(&kpc_dma_mtx);
list_for_each_entry(c, &kpc_dma_list, list) {
- if (c->pldev->id == minor) {
+ if (c->pldev->id == minor)
goto out;
- }
}
c = NULL; // not-found case
out:
@@ -98,7 +97,7 @@ int kpc_dma_probe(struct platform_device *pldev)
int rv = 0;
dev_t dev;
- struct kpc_dma_device *ldev = kzalloc(sizeof(struct kpc_dma_device), GFP_KERNEL);
+ struct kpc_dma_device *ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
if (!ldev) {
dev_err(&pldev->dev, "%s: unable to kzalloc space for kpc_dma_device\n", __func__);
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
index 4c8cc866b826..8b9c978257b9 100644
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h
@@ -198,14 +198,14 @@ u32 GetEngineCompletePtr(struct kpc_dma_device *eng)
static inline
void lock_engine(struct kpc_dma_device *eng)
{
- BUG_ON(eng == NULL);
+ BUG_ON(!eng);
mutex_lock(&eng->sem);
}
static inline
void unlock_engine(struct kpc_dma_device *eng)
{
- BUG_ON(eng == NULL);
+ BUG_ON(!eng);
mutex_unlock(&eng->sem);
}
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
index 87a6dac4890d..ab6f39175d99 100644
--- a/drivers/staging/ks7010/TODO
+++ b/drivers/staging/ks7010/TODO
@@ -30,5 +30,4 @@ Now the TODOs:
Please send any patches to:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Wolfram Sang <wsa@the-dreams.de>
Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 4b379542ecd5..6b2660c94f4e 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -446,7 +446,8 @@ static void ks_wlan_hw_rx(struct ks_wlan_private *priv, size_t size)
DUMP_PREFIX_OFFSET,
rx_buffer->data, 32);
#endif
- ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE);
+ ret = ks7010_sdio_writeb(priv, READ_STATUS_REG,
+ REG_STATUS_IDLE);
if (ret)
netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
index ca7dc8f5166c..39138191a556 100644
--- a/drivers/staging/ks7010/ks_hostif.h
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -70,7 +70,7 @@ struct hostif_data_request {
#define TYPE_DATA 0x0000
#define TYPE_AUTH 0x0001
__le16 reserved;
- u8 data[0];
+ u8 data[];
} __packed;
#define TYPE_PMK1 0x0001
@@ -194,7 +194,7 @@ enum mib_data_type {
struct hostif_mib_value {
__le16 size;
__le16 type;
- u8 body[0];
+ u8 body[];
} __packed;
struct hostif_mib_get_confirm_t {
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index c394abffea86..e59a846bc909 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -42,4 +42,8 @@ source "drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig"
source "drivers/staging/media/rkisp1/Kconfig"
+if MEDIA_ANALOG_TV_SUPPORT
+source "drivers/staging/media/usbvision/Kconfig"
+endif
+
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index ea9fce8014bb..23c682461b62 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0/
obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rkisp1/
+obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
diff --git a/drivers/staging/media/allegro-dvt/Makefile b/drivers/staging/media/allegro-dvt/Makefile
index 80817160815c..8e306dcdc55c 100644
--- a/drivers/staging/media/allegro-dvt/Makefile
+++ b/drivers/staging/media/allegro-dvt/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-allegro-objs := allegro-core.o nal-h264.o
+allegro-objs := allegro-core.o nal-h264.o allegro-mail.o
obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro.o
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
index 3be41698df4c..70f133a842dd 100644
--- a/drivers/staging/media/allegro-dvt/allegro-core.c
+++ b/drivers/staging/media/allegro-dvt/allegro-core.c
@@ -5,7 +5,9 @@
* Allegro DVT video encoder driver
*/
+#include <linux/bits.h>
#include <linux/firmware.h>
+#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -26,6 +28,7 @@
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-v4l2.h>
+#include "allegro-mail.h"
#include "nal-h264.h"
/*
@@ -40,6 +43,8 @@
#define ALLEGRO_HEIGHT_DEFAULT 1080
#define ALLEGRO_HEIGHT_MAX 2160
+#define ALLEGRO_FRAMERATE_DEFAULT ((struct v4l2_fract) { 30, 1 })
+
#define ALLEGRO_GOP_SIZE_DEFAULT 25
#define ALLEGRO_GOP_SIZE_MAX 1000
@@ -176,6 +181,7 @@ struct allegro_channel {
unsigned int width;
unsigned int height;
unsigned int stride;
+ struct v4l2_fract framerate;
enum v4l2_colorspace colorspace;
enum v4l2_ycbcr_encoding ycbcr_enc;
@@ -192,7 +198,7 @@ struct allegro_channel {
unsigned int sizeimage_encoded;
unsigned int csequence;
- enum v4l2_mpeg_video_bitrate_mode bitrate_mode;
+ bool frame_rc_enable;
unsigned int bitrate;
unsigned int bitrate_peak;
unsigned int cpb_size;
@@ -200,9 +206,17 @@ struct allegro_channel {
struct v4l2_ctrl *mpeg_video_h264_profile;
struct v4l2_ctrl *mpeg_video_h264_level;
- struct v4l2_ctrl *mpeg_video_bitrate_mode;
- struct v4l2_ctrl *mpeg_video_bitrate;
- struct v4l2_ctrl *mpeg_video_bitrate_peak;
+ struct v4l2_ctrl *mpeg_video_h264_i_frame_qp;
+ struct v4l2_ctrl *mpeg_video_h264_max_qp;
+ struct v4l2_ctrl *mpeg_video_h264_min_qp;
+ struct v4l2_ctrl *mpeg_video_h264_p_frame_qp;
+ struct v4l2_ctrl *mpeg_video_h264_b_frame_qp;
+ struct v4l2_ctrl *mpeg_video_frame_rc_enable;
+ struct { /* video bitrate mode control cluster */
+ struct v4l2_ctrl *mpeg_video_bitrate_mode;
+ struct v4l2_ctrl *mpeg_video_bitrate;
+ struct v4l2_ctrl *mpeg_video_bitrate_peak;
+ };
struct v4l2_ctrl *mpeg_video_cpb_size;
struct v4l2_ctrl *mpeg_video_gop_size;
@@ -215,6 +229,11 @@ struct allegro_channel {
struct list_head buffers_reference;
struct list_head buffers_intermediate;
+ struct list_head source_shadow_list;
+ struct list_head stream_shadow_list;
+ /* protect shadow lists of buffers passed to firmware */
+ struct mutex shadow_list_lock;
+
struct list_head list;
struct completion completion;
@@ -236,6 +255,14 @@ allegro_get_state(struct allegro_channel *channel)
return channel->state;
}
+struct allegro_m2m_buffer {
+ struct v4l2_m2m_buffer buf;
+ struct list_head head;
+};
+
+#define to_allegro_m2m_buffer(__buf) \
+ container_of(__buf, struct allegro_m2m_buffer, buf)
+
struct fw_info {
unsigned int id;
unsigned int id_codec;
@@ -258,276 +285,33 @@ static const struct fw_info supported_firmware[] = {
},
};
-enum mcu_msg_type {
- MCU_MSG_TYPE_INIT = 0x0000,
- MCU_MSG_TYPE_CREATE_CHANNEL = 0x0005,
- MCU_MSG_TYPE_DESTROY_CHANNEL = 0x0006,
- MCU_MSG_TYPE_ENCODE_FRAME = 0x0007,
- MCU_MSG_TYPE_PUT_STREAM_BUFFER = 0x0012,
- MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE = 0x000e,
- MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE = 0x000f,
-};
+static inline u32 to_mcu_addr(struct allegro_dev *dev, dma_addr_t phys)
+{
+ if (upper_32_bits(phys) || (lower_32_bits(phys) & MCU_CACHE_OFFSET))
+ v4l2_warn(&dev->v4l2_dev,
+ "address %pad is outside mcu window\n", &phys);
+
+ return lower_32_bits(phys) | MCU_CACHE_OFFSET;
+}
-static const char *msg_type_name(enum mcu_msg_type type)
+static inline u32 to_mcu_size(struct allegro_dev *dev, size_t size)
{
- static char buf[9];
+ return lower_32_bits(size);
+}
- switch (type) {
- case MCU_MSG_TYPE_INIT:
- return "INIT";
- case MCU_MSG_TYPE_CREATE_CHANNEL:
- return "CREATE_CHANNEL";
- case MCU_MSG_TYPE_DESTROY_CHANNEL:
- return "DESTROY_CHANNEL";
- case MCU_MSG_TYPE_ENCODE_FRAME:
- return "ENCODE_FRAME";
- case MCU_MSG_TYPE_PUT_STREAM_BUFFER:
- return "PUT_STREAM_BUFFER";
- case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE:
- return "PUSH_BUFFER_INTERMEDIATE";
- case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE:
- return "PUSH_BUFFER_REFERENCE";
- default:
- snprintf(buf, sizeof(buf), "(0x%04x)", type);
- return buf;
- }
-}
-
-struct mcu_msg_header {
- u16 length; /* length of the body in bytes */
- u16 type;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_init_request {
- struct mcu_msg_header header;
- u32 reserved0; /* maybe a unused channel id */
- u32 suballoc_dma;
- u32 suballoc_size;
- s32 l2_cache[3];
-} __attribute__ ((__packed__));
-
-struct mcu_msg_init_response {
- struct mcu_msg_header header;
- u32 reserved0;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_create_channel {
- struct mcu_msg_header header;
- u32 user_id;
- u16 width;
- u16 height;
- u32 format;
- u32 colorspace;
- u32 src_mode;
- u8 profile;
- u16 constraint_set_flags;
- s8 codec;
- u16 level;
- u16 tier;
- u32 sps_param;
- u32 pps_param;
-
- u32 enc_option;
-#define AL_OPT_WPP BIT(0)
-#define AL_OPT_TILE BIT(1)
-#define AL_OPT_LF BIT(2)
-#define AL_OPT_LF_X_SLICE BIT(3)
-#define AL_OPT_LF_X_TILE BIT(4)
-#define AL_OPT_SCL_LST BIT(5)
-#define AL_OPT_CONST_INTRA_PRED BIT(6)
-#define AL_OPT_QP_TAB_RELATIVE BIT(7)
-#define AL_OPT_FIX_PREDICTOR BIT(8)
-#define AL_OPT_CUSTOM_LDA BIT(9)
-#define AL_OPT_ENABLE_AUTO_QP BIT(10)
-#define AL_OPT_ADAPT_AUTO_QP BIT(11)
-#define AL_OPT_TRANSFO_SKIP BIT(13)
-#define AL_OPT_FORCE_REC BIT(15)
-#define AL_OPT_FORCE_MV_OUT BIT(16)
-#define AL_OPT_FORCE_MV_CLIP BIT(17)
-#define AL_OPT_LOWLAT_SYNC BIT(18)
-#define AL_OPT_LOWLAT_INT BIT(19)
-#define AL_OPT_RDO_COST_MODE BIT(20)
-
- s8 beta_offset;
- s8 tc_offset;
- u16 reserved10;
- u32 unknown11;
- u32 unknown12;
- u16 num_slices;
- u16 prefetch_auto;
- u32 prefetch_mem_offset;
- u32 prefetch_mem_size;
- u16 clip_hrz_range;
- u16 clip_vrt_range;
- u16 me_range[4];
- u8 max_cu_size;
- u8 min_cu_size;
- u8 max_tu_size;
- u8 min_tu_size;
- u8 max_transfo_depth_inter;
- u8 max_transfo_depth_intra;
- u16 reserved20;
- u32 entropy_mode;
- u32 wp_mode;
-
- /* rate control param */
- u32 rate_control_mode;
- u32 initial_rem_delay;
- u32 cpb_size;
- u16 framerate;
- u16 clk_ratio;
- u32 target_bitrate;
- u32 max_bitrate;
- u16 initial_qp;
- u16 min_qp;
- u16 max_qp;
- s16 ip_delta;
- s16 pb_delta;
- u16 golden_ref;
- u16 golden_delta;
- u16 golden_ref_frequency;
- u32 rate_control_option;
-
- /* gop param */
- u32 gop_ctrl_mode;
- u32 freq_ird;
- u32 freq_lt;
- u32 gdr_mode;
- u32 gop_length;
- u32 unknown39;
-
- u32 subframe_latency;
- u32 lda_control_mode;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_create_channel_response {
- struct mcu_msg_header header;
- u32 channel_id;
- u32 user_id;
- u32 options;
- u32 num_core;
- u32 pps_param;
- u32 int_buffers_count;
- u32 int_buffers_size;
- u32 rec_buffers_count;
- u32 rec_buffers_size;
- u32 reserved;
- u32 error_code;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_destroy_channel {
- struct mcu_msg_header header;
- u32 channel_id;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_destroy_channel_response {
- struct mcu_msg_header header;
- u32 channel_id;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_push_buffers_internal_buffer {
- u32 dma_addr;
- u32 mcu_addr;
- u32 size;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_push_buffers_internal {
- struct mcu_msg_header header;
- u32 channel_id;
- struct mcu_msg_push_buffers_internal_buffer buffer[0];
-} __attribute__ ((__packed__));
-
-struct mcu_msg_put_stream_buffer {
- struct mcu_msg_header header;
- u32 channel_id;
- u32 dma_addr;
- u32 mcu_addr;
- u32 size;
- u32 offset;
- u64 stream_id;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_encode_frame {
- struct mcu_msg_header header;
- u32 channel_id;
- u32 reserved;
-
- u32 encoding_options;
-#define AL_OPT_USE_QP_TABLE BIT(0)
-#define AL_OPT_FORCE_LOAD BIT(1)
-#define AL_OPT_USE_L2 BIT(2)
-#define AL_OPT_DISABLE_INTRA BIT(3)
-#define AL_OPT_DEPENDENT_SLICES BIT(4)
-
- s16 pps_qp;
- u16 padding;
- u64 user_param;
- u64 src_handle;
+static inline u32 to_codec_addr(struct allegro_dev *dev, dma_addr_t phys)
+{
+ if (upper_32_bits(phys))
+ v4l2_warn(&dev->v4l2_dev,
+ "address %pad cannot be used by codec\n", &phys);
- u32 request_options;
-#define AL_OPT_SCENE_CHANGE BIT(0)
-#define AL_OPT_RESTART_GOP BIT(1)
-#define AL_OPT_USE_LONG_TERM BIT(2)
-#define AL_OPT_UPDATE_PARAMS BIT(3)
-
- /* u32 scene_change_delay (optional) */
- /* rate control param (optional) */
- /* gop param (optional) */
- u32 src_y;
- u32 src_uv;
- u32 stride;
- u32 ep2;
- u64 ep2_v;
-} __attribute__ ((__packed__));
-
-struct mcu_msg_encode_frame_response {
- struct mcu_msg_header header;
- u32 channel_id;
- u64 stream_id; /* see mcu_msg_put_stream_buffer */
- u64 user_param; /* see mcu_msg_encode_frame */
- u64 src_handle; /* see mcu_msg_encode_frame */
- u16 skip;
- u16 is_ref;
- u32 initial_removal_delay;
- u32 dpb_output_delay;
- u32 size;
- u32 frame_tag_size;
- s32 stuffing;
- s32 filler;
- u16 num_column;
- u16 num_row;
- u16 qp;
- u8 num_ref_idx_l0;
- u8 num_ref_idx_l1;
- u32 partition_table_offset;
- s32 partition_table_size;
- u32 sum_complex;
- s32 tile_width[4];
- s32 tile_height[22];
- u32 error_code;
-
- u32 slice_type;
-#define AL_ENC_SLICE_TYPE_B 0
-#define AL_ENC_SLICE_TYPE_P 1
-#define AL_ENC_SLICE_TYPE_I 2
-
- u32 pic_struct;
- u8 is_idr;
- u8 is_first_slice;
- u8 is_last_slice;
- u8 reserved;
- u16 pps_qp;
- u16 reserved1;
- u32 reserved2;
-} __attribute__ ((__packed__));
-
-union mcu_msg_response {
- struct mcu_msg_header header;
- struct mcu_msg_init_response init;
- struct mcu_msg_create_channel_response create_channel;
- struct mcu_msg_destroy_channel_response destroy_channel;
- struct mcu_msg_encode_frame_response encode_frame;
-};
+ return lower_32_bits(phys);
+}
+
+static inline u64 ptr_to_u64(const void *ptr)
+{
+ return (uintptr_t)ptr;
+}
/* Helper functions for channel and user operations */
@@ -572,6 +356,56 @@ static inline bool channel_exists(struct allegro_channel *channel)
return channel->mcu_channel_id != -1;
}
+#define AL_ERROR 0x80
+#define AL_ERR_INIT_FAILED 0x81
+#define AL_ERR_NO_FRAME_DECODED 0x82
+#define AL_ERR_RESOLUTION_CHANGE 0x85
+#define AL_ERR_NO_MEMORY 0x87
+#define AL_ERR_STREAM_OVERFLOW 0x88
+#define AL_ERR_TOO_MANY_SLICES 0x89
+#define AL_ERR_BUF_NOT_READY 0x8c
+#define AL_ERR_NO_CHANNEL_AVAILABLE 0x8d
+#define AL_ERR_RESOURCE_UNAVAILABLE 0x8e
+#define AL_ERR_NOT_ENOUGH_CORES 0x8f
+#define AL_ERR_REQUEST_MALFORMED 0x90
+#define AL_ERR_CMD_NOT_ALLOWED 0x91
+#define AL_ERR_INVALID_CMD_VALUE 0x92
+
+static inline const char *allegro_err_to_string(unsigned int err)
+{
+ switch (err) {
+ case AL_ERR_INIT_FAILED:
+ return "initialization failed";
+ case AL_ERR_NO_FRAME_DECODED:
+ return "no frame decoded";
+ case AL_ERR_RESOLUTION_CHANGE:
+ return "resolution change";
+ case AL_ERR_NO_MEMORY:
+ return "out of memory";
+ case AL_ERR_STREAM_OVERFLOW:
+ return "stream buffer overflow";
+ case AL_ERR_TOO_MANY_SLICES:
+ return "too many slices";
+ case AL_ERR_BUF_NOT_READY:
+ return "buffer not ready";
+ case AL_ERR_NO_CHANNEL_AVAILABLE:
+ return "no channel available";
+ case AL_ERR_RESOURCE_UNAVAILABLE:
+ return "resource unavailable";
+ case AL_ERR_NOT_ENOUGH_CORES:
+ return "not enough cores";
+ case AL_ERR_REQUEST_MALFORMED:
+ return "request malformed";
+ case AL_ERR_CMD_NOT_ALLOWED:
+ return "command not allowed";
+ case AL_ERR_INVALID_CMD_VALUE:
+ return "invalid command value";
+ case AL_ERROR:
+ default:
+ return "unknown error";
+ }
+}
+
static unsigned int estimate_stream_size(unsigned int width,
unsigned int height)
{
@@ -781,7 +615,7 @@ static int allegro_mbox_write(struct allegro_dev *dev,
if (size > mbox->size) {
v4l2_err(&dev->v4l2_dev,
- "message (%zu bytes) to large for mailbox (%zu bytes)\n",
+ "message (%zu bytes) too large for mailbox (%zu bytes)\n",
size, mbox->size);
return -EINVAL;
}
@@ -894,8 +728,8 @@ static void allegro_mcu_send_init(struct allegro_dev *dev,
msg.header.type = MCU_MSG_TYPE_INIT;
msg.header.length = sizeof(msg) - sizeof(msg.header);
- msg.suballoc_dma = lower_32_bits(suballoc_dma) | MCU_CACHE_OFFSET;
- msg.suballoc_size = suballoc_size;
+ msg.suballoc_dma = to_mcu_addr(dev, suballoc_dma);
+ msg.suballoc_size = to_mcu_size(dev, suballoc_size);
/* disable L2 cache */
msg.l2_cache[0] = -1;
@@ -1001,6 +835,103 @@ v4l2_bitrate_mode_to_mcu_mode(enum v4l2_mpeg_video_bitrate_mode mode)
}
}
+static u32 v4l2_cpb_size_to_mcu(unsigned int cpb_size, unsigned int bitrate)
+{
+ unsigned int cpb_size_kbit;
+ unsigned int bitrate_kbps;
+
+ /*
+ * The mcu expects the CPB size in units of a 90 kHz clock, but the
+ * channel follows the V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE and stores
+ * the CPB size in kilobytes.
+ */
+ cpb_size_kbit = cpb_size * BITS_PER_BYTE;
+ bitrate_kbps = bitrate / 1000;
+
+ return (cpb_size_kbit * 90000) / bitrate_kbps;
+}
+
+static s16 get_qp_delta(int minuend, int subtrahend)
+{
+ if (minuend == subtrahend)
+ return -1;
+ else
+ return minuend - subtrahend;
+}
+
+static int fill_create_channel_param(struct allegro_channel *channel,
+ struct create_channel_param *param)
+{
+ int i_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_i_frame_qp);
+ int p_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_p_frame_qp);
+ int b_frame_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_b_frame_qp);
+ int bitrate_mode = v4l2_ctrl_g_ctrl(channel->mpeg_video_bitrate_mode);
+
+ param->width = channel->width;
+ param->height = channel->height;
+ param->format = v4l2_pixelformat_to_mcu_format(channel->pixelformat);
+ param->colorspace =
+ v4l2_colorspace_to_mcu_colorspace(channel->colorspace);
+ param->src_mode = 0x0;
+ param->profile = v4l2_profile_to_mcu_profile(channel->profile);
+ param->constraint_set_flags = BIT(1);
+ param->codec = v4l2_pixelformat_to_mcu_codec(channel->codec);
+ param->level = v4l2_level_to_mcu_level(channel->level);
+ param->tier = 0;
+ param->sps_param = BIT(20) | 0x4a;
+ param->pps_param = BIT(2);
+ param->enc_option = AL_OPT_RDO_COST_MODE | AL_OPT_LF_X_TILE |
+ AL_OPT_LF_X_SLICE | AL_OPT_LF;
+ param->beta_offset = -1;
+ param->tc_offset = -1;
+ param->num_slices = 1;
+ param->me_range[0] = 8;
+ param->me_range[1] = 8;
+ param->me_range[2] = 16;
+ param->me_range[3] = 16;
+ param->max_cu_size = ilog2(SIZE_MACROBLOCK);
+ param->min_cu_size = ilog2(8);
+ param->max_tu_size = 2;
+ param->min_tu_size = 2;
+ param->max_transfo_depth_intra = 1;
+ param->max_transfo_depth_inter = 1;
+
+ param->prefetch_auto = 0;
+ param->prefetch_mem_offset = 0;
+ param->prefetch_mem_size = 0;
+
+ param->rate_control_mode = channel->frame_rc_enable ?
+ v4l2_bitrate_mode_to_mcu_mode(bitrate_mode) : 0;
+
+ param->cpb_size = v4l2_cpb_size_to_mcu(channel->cpb_size,
+ channel->bitrate_peak);
+ /* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */
+ param->initial_rem_delay = param->cpb_size;
+ param->framerate = DIV_ROUND_UP(channel->framerate.numerator,
+ channel->framerate.denominator);
+ param->clk_ratio = channel->framerate.denominator == 1001 ? 1001 : 1000;
+ param->target_bitrate = channel->bitrate;
+ param->max_bitrate = channel->bitrate_peak;
+ param->initial_qp = i_frame_qp;
+ param->min_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_min_qp);
+ param->max_qp = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_max_qp);
+ param->ip_delta = get_qp_delta(i_frame_qp, p_frame_qp);
+ param->pb_delta = get_qp_delta(p_frame_qp, b_frame_qp);
+ param->golden_ref = 0;
+ param->golden_delta = 2;
+ param->golden_ref_frequency = 10;
+ param->rate_control_option = 0x00000000;
+
+ param->gop_ctrl_mode = 0x00000000;
+ param->freq_idr = channel->gop_size;
+ param->freq_lt = 0;
+ param->gdr_mode = 0x00000000;
+ param->gop_length = channel->gop_size;
+ param->subframe_latency = 0x00000000;
+
+ return 0;
+}
+
static int allegro_mcu_send_create_channel(struct allegro_dev *dev,
struct allegro_channel *channel)
{
@@ -1012,63 +943,8 @@ static int allegro_mcu_send_create_channel(struct allegro_dev *dev,
msg.header.length = sizeof(msg) - sizeof(msg.header);
msg.user_id = channel->user_id;
- msg.width = channel->width;
- msg.height = channel->height;
- msg.format = v4l2_pixelformat_to_mcu_format(channel->pixelformat);
- msg.colorspace = v4l2_colorspace_to_mcu_colorspace(channel->colorspace);
- msg.src_mode = 0x0;
- msg.profile = v4l2_profile_to_mcu_profile(channel->profile);
- msg.constraint_set_flags = BIT(1);
- msg.codec = v4l2_pixelformat_to_mcu_codec(channel->codec);
- msg.level = v4l2_level_to_mcu_level(channel->level);
- msg.tier = 0;
- msg.sps_param = BIT(20) | 0x4a;
- msg.pps_param = BIT(2);
- msg.enc_option = AL_OPT_RDO_COST_MODE | AL_OPT_LF_X_TILE |
- AL_OPT_LF_X_SLICE | AL_OPT_LF;
- msg.beta_offset = -1;
- msg.tc_offset = -1;
- msg.num_slices = 1;
- msg.me_range[0] = 8;
- msg.me_range[1] = 8;
- msg.me_range[2] = 16;
- msg.me_range[3] = 16;
- msg.max_cu_size = ilog2(SIZE_MACROBLOCK);
- msg.min_cu_size = ilog2(8);
- msg.max_tu_size = 2;
- msg.min_tu_size = 2;
- msg.max_transfo_depth_intra = 1;
- msg.max_transfo_depth_inter = 1;
-
- msg.rate_control_mode =
- v4l2_bitrate_mode_to_mcu_mode(channel->bitrate_mode);
- /* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */
- msg.initial_rem_delay =
- ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000;
- /* Encoder expects cpb_size in units of a 90 kHz clock. */
- msg.cpb_size =
- ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000;
- msg.framerate = 25;
- msg.clk_ratio = 1000;
- msg.target_bitrate = channel->bitrate;
- msg.max_bitrate = channel->bitrate_peak;
- msg.initial_qp = 25;
- msg.min_qp = 10;
- msg.max_qp = 51;
- msg.ip_delta = -1;
- msg.pb_delta = -1;
- msg.golden_ref = 0;
- msg.golden_delta = 2;
- msg.golden_ref_frequency = 10;
- msg.rate_control_option = 0x00000000;
-
- msg.gop_ctrl_mode = 0x00000000;
- msg.freq_ird = 0x7fffffff;
- msg.freq_lt = 0;
- msg.gdr_mode = 0x00000000;
- msg.gop_length = channel->gop_size;
- msg.subframe_latency = 0x00000000;
- msg.lda_control_mode = 0x700d0000;
+
+ fill_create_channel_param(channel, &msg.param);
allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
allegro_mcu_interrupt(dev);
@@ -1097,7 +973,8 @@ static int allegro_mcu_send_destroy_channel(struct allegro_dev *dev,
static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev,
struct allegro_channel *channel,
dma_addr_t paddr,
- unsigned long size)
+ unsigned long size,
+ u64 stream_id)
{
struct mcu_msg_put_stream_buffer msg;
@@ -1107,11 +984,12 @@ static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev,
msg.header.length = sizeof(msg) - sizeof(msg.header);
msg.channel_id = channel->mcu_channel_id;
- msg.dma_addr = paddr;
- msg.mcu_addr = paddr | MCU_CACHE_OFFSET;
+ msg.dma_addr = to_codec_addr(dev, paddr);
+ msg.mcu_addr = to_mcu_addr(dev, paddr);
msg.size = size;
msg.offset = ENCODER_STREAM_OFFSET;
- msg.stream_id = 0; /* copied to mcu_msg_encode_frame_response */
+ /* copied to mcu_msg_encode_frame_response */
+ msg.stream_id = stream_id;
allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
allegro_mcu_interrupt(dev);
@@ -1121,7 +999,8 @@ static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev,
static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
struct allegro_channel *channel,
- dma_addr_t src_y, dma_addr_t src_uv)
+ dma_addr_t src_y, dma_addr_t src_uv,
+ u64 src_handle)
{
struct mcu_msg_encode_frame msg;
@@ -1134,12 +1013,13 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
msg.encoding_options = AL_OPT_FORCE_LOAD;
msg.pps_qp = 26; /* qp are relative to 26 */
msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */
- msg.src_handle = 0; /* copied to mcu_msg_encode_frame_response */
- msg.src_y = src_y;
- msg.src_uv = src_uv;
+ /* src_handle is copied to mcu_msg_encode_frame_response */
+ msg.src_handle = src_handle;
+ msg.src_y = to_codec_addr(dev, src_y);
+ msg.src_uv = to_codec_addr(dev, src_uv);
msg.stride = channel->stride;
msg.ep2 = 0x0;
- msg.ep2_v = msg.ep2 | MCU_CACHE_OFFSET;
+ msg.ep2_v = to_mcu_addr(dev, msg.ep2);
allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
allegro_mcu_interrupt(dev);
@@ -1198,10 +1078,9 @@ static int allegro_mcu_push_buffer_internal(struct allegro_channel *channel,
buffer = msg->buffer;
list_for_each_entry(al_buffer, list, head) {
- buffer->dma_addr = lower_32_bits(al_buffer->paddr);
- buffer->mcu_addr =
- lower_32_bits(al_buffer->paddr) | MCU_CACHE_OFFSET;
- buffer->size = al_buffer->size;
+ buffer->dma_addr = to_codec_addr(dev, al_buffer->paddr);
+ buffer->mcu_addr = to_mcu_addr(dev, al_buffer->paddr);
+ buffer->size = to_mcu_size(dev, al_buffer->size);
buffer++;
}
@@ -1360,9 +1239,11 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui.chroma_loc_info_present_flag = 1;
sps->vui.chroma_sample_loc_type_top_field = 0;
sps->vui.chroma_sample_loc_type_bottom_field = 0;
+
sps->vui.timing_info_present_flag = 1;
- sps->vui.num_units_in_tick = 1;
- sps->vui.time_scale = 50;
+ sps->vui.num_units_in_tick = channel->framerate.denominator;
+ sps->vui.time_scale = 2 * channel->framerate.numerator;
+
sps->vui.fixed_frame_rate_flag = 1;
sps->vui.nal_hrd_parameters_present_flag = 0;
sps->vui.vcl_hrd_parameters_present_flag = 1;
@@ -1375,7 +1256,8 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */
sps->vui.vcl_hrd_parameters.cpb_size_value_minus1[0] =
(channel->cpb_size * 1000) / (1 << (4 + sps->vui.vcl_hrd_parameters.cpb_size_scale)) - 1;
- sps->vui.vcl_hrd_parameters.cbr_flag[0] = 1;
+ sps->vui.vcl_hrd_parameters.cbr_flag[0] =
+ !v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable);
sps->vui.vcl_hrd_parameters.initial_cpb_removal_delay_length_minus1 = 31;
sps->vui.vcl_hrd_parameters.cpb_removal_delay_length_minus1 = 31;
sps->vui.vcl_hrd_parameters.dpb_output_delay_length_minus1 = 31;
@@ -1438,8 +1320,11 @@ static bool allegro_channel_is_at_eos(struct allegro_channel *channel)
break;
case ALLEGRO_STATE_DRAIN:
case ALLEGRO_STATE_WAIT_FOR_BUFFER:
- if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) == 0)
+ mutex_lock(&channel->shadow_list_lock);
+ if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) == 0 &&
+ list_empty(&channel->source_shadow_list))
is_at_eos = true;
+ mutex_unlock(&channel->shadow_list_lock);
break;
default:
break;
@@ -1466,6 +1351,41 @@ static void allegro_channel_buf_done(struct allegro_channel *channel,
v4l2_m2m_buf_done(buf, state);
}
+static u64 allegro_put_buffer(struct allegro_channel *channel,
+ struct list_head *list,
+ struct vb2_v4l2_buffer *buffer)
+{
+ struct v4l2_m2m_buffer *b = container_of(buffer,
+ struct v4l2_m2m_buffer, vb);
+ struct allegro_m2m_buffer *shadow = to_allegro_m2m_buffer(b);
+
+ mutex_lock(&channel->shadow_list_lock);
+ list_add_tail(&shadow->head, list);
+ mutex_unlock(&channel->shadow_list_lock);
+
+ return ptr_to_u64(buffer);
+}
+
+static struct vb2_v4l2_buffer *
+allegro_get_buffer(struct allegro_channel *channel,
+ struct list_head *list, u64 handle)
+{
+ struct allegro_m2m_buffer *shadow, *tmp;
+ struct vb2_v4l2_buffer *buffer = NULL;
+
+ mutex_lock(&channel->shadow_list_lock);
+ list_for_each_entry_safe(shadow, tmp, list, head) {
+ if (handle == ptr_to_u64(&shadow->buf.vb)) {
+ buffer = &shadow->buf.vb;
+ list_del_init(&shadow->head);
+ break;
+ }
+ }
+ mutex_unlock(&channel->shadow_list_lock);
+
+ return buffer;
+}
+
static void allegro_channel_finish_frame(struct allegro_channel *channel,
struct mcu_msg_encode_frame_response *msg)
{
@@ -1481,15 +1401,31 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
ssize_t len;
ssize_t free;
- src_buf = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx);
+ src_buf = allegro_get_buffer(channel, &channel->source_shadow_list,
+ msg->src_handle);
+ if (!src_buf)
+ v4l2_warn(&dev->v4l2_dev,
+ "channel %d: invalid source buffer\n",
+ channel->mcu_channel_id);
+
+ dst_buf = allegro_get_buffer(channel, &channel->stream_shadow_list,
+ msg->stream_id);
+ if (!dst_buf)
+ v4l2_warn(&dev->v4l2_dev,
+ "channel %d: invalid stream buffer\n",
+ channel->mcu_channel_id);
+
+ if (!src_buf || !dst_buf)
+ goto err;
- dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx);
dst_buf->sequence = channel->csequence++;
- if (msg->error_code) {
+ if (msg->error_code & AL_ERROR) {
v4l2_err(&dev->v4l2_dev,
- "channel %d: error while encoding frame: %x\n",
- channel->mcu_channel_id, msg->error_code);
+ "channel %d: failed to encode frame: %s (%x)\n",
+ channel->mcu_channel_id,
+ allegro_err_to_string(msg->error_code),
+ msg->error_code);
goto err;
}
@@ -1562,17 +1498,22 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
channel->mcu_channel_id, len);
}
- len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free);
- if (len < 0) {
- v4l2_err(&dev->v4l2_dev,
- "failed to write %zd filler data\n", free);
- goto err;
+ if (msg->slice_type != AL_ENC_SLICE_TYPE_I && !msg->is_idr) {
+ dst_buf->vb2_buf.planes[0].data_offset = free;
+ free = 0;
+ } else {
+ len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to write %zd filler data\n", free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd bytes filler nal unit\n",
+ channel->mcu_channel_id, len);
}
- curr += len;
- free -= len;
- v4l2_dbg(2, debug, &dev->v4l2_dev,
- "channel %d: wrote %zd bytes filler nal unit\n",
- channel->mcu_channel_id, len);
if (free != 0) {
v4l2_err(&dev->v4l2_dev,
@@ -1590,20 +1531,20 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
v4l2_dbg(1, debug, &dev->v4l2_dev,
- "channel %d: encoded frame #%03d (%s%s, %d bytes)\n",
+ "channel %d: encoded frame #%03d (%s%s, QP %d, %d bytes)\n",
channel->mcu_channel_id,
dst_buf->sequence,
msg->is_idr ? "IDR, " : "",
msg->slice_type == AL_ENC_SLICE_TYPE_I ? "I slice" :
msg->slice_type == AL_ENC_SLICE_TYPE_P ? "P slice" : "unknown",
- partition->size);
+ msg->qp, partition->size);
err:
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
-
- allegro_channel_buf_done(channel, dst_buf, state);
+ if (src_buf)
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- v4l2_m2m_job_finish(dev->m2m_dev, channel->fh.m2m_ctx);
+ if (dst_buf)
+ allegro_channel_buf_done(channel, dst_buf, state);
}
static int allegro_handle_init(struct allegro_dev *dev,
@@ -1621,6 +1562,12 @@ allegro_handle_create_channel(struct allegro_dev *dev,
struct allegro_channel *channel;
int err = 0;
+ if (msg->header.length != sizeof(*msg) - sizeof(msg->header))
+ v4l2_warn(&dev->v4l2_dev,
+ "received message has %d bytes, but expected %zu\n",
+ msg->header.length,
+ sizeof(*msg) - sizeof(msg->header));
+
channel = allegro_find_channel_by_user_id(dev, msg->user_id);
if (IS_ERR(channel)) {
v4l2_warn(&dev->v4l2_dev,
@@ -1632,8 +1579,10 @@ allegro_handle_create_channel(struct allegro_dev *dev,
if (msg->error_code) {
v4l2_err(&dev->v4l2_dev,
- "user %d: mcu failed to create channel: error %x\n",
- channel->user_id, msg->error_code);
+ "user %d: mcu failed to create channel: %s (%x)\n",
+ channel->user_id,
+ allegro_err_to_string(msg->error_code),
+ msg->error_code);
err = -EIO;
goto out;
}
@@ -1712,6 +1661,12 @@ allegro_handle_encode_frame(struct allegro_dev *dev,
{
struct allegro_channel *channel;
+ if (msg->header.length != sizeof(*msg) - sizeof(msg->header))
+ v4l2_warn(&dev->v4l2_dev,
+ "received message has %d bytes, but expected %zu\n",
+ msg->header.length,
+ sizeof(*msg) - sizeof(msg->header));
+
channel = allegro_find_channel_by_channel_id(dev, msg->channel_id);
if (IS_ERR(channel)) {
v4l2_err(&dev->v4l2_dev,
@@ -1920,6 +1875,14 @@ static int allegro_mcu_reset(struct allegro_dev *dev)
{
int err;
+ /*
+ * Ensure that the AL5_MCU_WAKEUP bit is set to 0 otherwise the mcu
+ * does not go to sleep after the reset.
+ */
+ err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, 0);
+ if (err)
+ return err;
+
err = regmap_write(dev->regmap,
AL5_MCU_RESET_MODE, AL5_MCU_RESET_MODE_SLEEP);
if (err < 0)
@@ -1955,6 +1918,12 @@ static void allegro_destroy_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_h264_profile, false);
v4l2_ctrl_grab(channel->mpeg_video_h264_level, false);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_i_frame_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_max_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, false);
+ v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, false);
v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, false);
v4l2_ctrl_grab(channel->mpeg_video_bitrate, false);
v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, false);
@@ -2000,7 +1969,9 @@ static int allegro_create_channel(struct allegro_channel *channel)
v4l2_dbg(1, debug, &dev->v4l2_dev,
"user %d: creating channel (%4.4s, %dx%d@%d)\n",
channel->user_id,
- (char *)&channel->codec, channel->width, channel->height, 25);
+ (char *)&channel->codec, channel->width, channel->height,
+ DIV_ROUND_UP(channel->framerate.numerator,
+ channel->framerate.denominator));
min_level = select_minimum_h264_level(channel->width, channel->height);
if (channel->level < min_level) {
@@ -2014,6 +1985,12 @@ static int allegro_create_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_h264_profile, true);
v4l2_ctrl_grab(channel->mpeg_video_h264_level, true);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_i_frame_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_max_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, true);
+ v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, true);
v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, true);
v4l2_ctrl_grab(channel->mpeg_video_bitrate, true);
v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, true);
@@ -2046,6 +2023,7 @@ static void allegro_set_default_params(struct allegro_channel *channel)
channel->width = ALLEGRO_WIDTH_DEFAULT;
channel->height = ALLEGRO_HEIGHT_DEFAULT;
channel->stride = round_up(channel->width, 32);
+ channel->framerate = ALLEGRO_FRAMERATE_DEFAULT;
channel->colorspace = V4L2_COLORSPACE_REC709;
channel->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
@@ -2062,7 +2040,6 @@ static void allegro_set_default_params(struct allegro_channel *channel)
channel->sizeimage_encoded =
estimate_stream_size(channel->width, channel->height);
- channel->bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
channel->bitrate = maximum_bitrate(channel->level);
channel->bitrate_peak = maximum_bitrate(channel->level);
channel->cpb_size = maximum_cpb_size(channel->level);
@@ -2163,16 +2140,33 @@ static void allegro_stop_streaming(struct vb2_queue *q)
struct allegro_channel *channel = vb2_get_drv_priv(q);
struct allegro_dev *dev = channel->dev;
struct vb2_v4l2_buffer *buffer;
+ struct allegro_m2m_buffer *shadow, *tmp;
v4l2_dbg(2, debug, &dev->v4l2_dev,
"%s: stop streaming\n",
V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ mutex_lock(&channel->shadow_list_lock);
+ list_for_each_entry_safe(shadow, tmp,
+ &channel->source_shadow_list, head) {
+ list_del(&shadow->head);
+ v4l2_m2m_buf_done(&shadow->buf.vb, VB2_BUF_STATE_ERROR);
+ }
+ mutex_unlock(&channel->shadow_list_lock);
+
allegro_set_state(channel, ALLEGRO_STATE_STOPPED);
while ((buffer = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx)))
v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ mutex_lock(&channel->shadow_list_lock);
+ list_for_each_entry_safe(shadow, tmp,
+ &channel->stream_shadow_list, head) {
+ list_del(&shadow->head);
+ v4l2_m2m_buf_done(&shadow->buf.vb, VB2_BUF_STATE_ERROR);
+ }
+ mutex_unlock(&channel->shadow_list_lock);
+
allegro_destroy_channel(channel);
while ((buffer = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx)))
v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
@@ -2203,7 +2197,7 @@ static int allegro_queue_init(void *priv,
src_vq->drv_priv = channel;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->ops = &allegro_queue_ops;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->buf_struct_size = sizeof(struct allegro_m2m_buffer);
src_vq->lock = &channel->dev->lock;
err = vb2_queue_init(src_vq);
if (err)
@@ -2216,7 +2210,7 @@ static int allegro_queue_init(void *priv,
dst_vq->drv_priv = channel;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->ops = &allegro_queue_ops;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->buf_struct_size = sizeof(struct allegro_m2m_buffer);
dst_vq->lock = &channel->dev->lock;
err = vb2_queue_init(dst_vq);
if (err)
@@ -2225,6 +2219,52 @@ static int allegro_queue_init(void *priv,
return 0;
}
+static int allegro_clamp_qp(struct allegro_channel *channel,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl *next_ctrl;
+
+ if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP)
+ next_ctrl = channel->mpeg_video_h264_p_frame_qp;
+ else if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP)
+ next_ctrl = channel->mpeg_video_h264_b_frame_qp;
+ else
+ return 0;
+
+ /* Modify range automatically updates the value */
+ __v4l2_ctrl_modify_range(next_ctrl, ctrl->val, 51, 1, ctrl->val);
+
+ return allegro_clamp_qp(channel, next_ctrl);
+}
+
+static int allegro_clamp_bitrate(struct allegro_channel *channel,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl *ctrl_bitrate = channel->mpeg_video_bitrate;
+ struct v4l2_ctrl *ctrl_bitrate_peak = channel->mpeg_video_bitrate_peak;
+
+ if (ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
+ ctrl_bitrate_peak->val < ctrl_bitrate->val)
+ ctrl_bitrate_peak->val = ctrl_bitrate->val;
+
+ return 0;
+}
+
+static int allegro_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct allegro_channel *channel = container_of(ctrl->handler,
+ struct allegro_channel,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ allegro_clamp_bitrate(channel, ctrl);
+ break;
+ }
+
+ return 0;
+}
+
static int allegro_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct allegro_channel *channel = container_of(ctrl->handler,
@@ -2239,14 +2279,14 @@ static int allegro_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
channel->level = ctrl->val;
break;
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- channel->bitrate_mode = ctrl->val;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ channel->frame_rc_enable = ctrl->val;
break;
- case V4L2_CID_MPEG_VIDEO_BITRATE:
- channel->bitrate = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
- channel->bitrate_peak = ctrl->val;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ channel->bitrate = channel->mpeg_video_bitrate->val;
+ channel->bitrate_peak = channel->mpeg_video_bitrate_peak->val;
+ v4l2_ctrl_activate(channel->mpeg_video_bitrate_peak,
+ ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
break;
case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
channel->cpb_size = ctrl->val;
@@ -2254,12 +2294,18 @@ static int allegro_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
channel->gop_size = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ allegro_clamp_qp(channel, ctrl);
+ break;
}
return 0;
}
static const struct v4l2_ctrl_ops allegro_ctrl_ops = {
+ .try_ctrl = allegro_try_ctrl,
.s_ctrl = allegro_s_ctrl,
};
@@ -2270,16 +2316,18 @@ static int allegro_open(struct file *file)
struct allegro_channel *channel = NULL;
struct v4l2_ctrl_handler *handler;
u64 mask;
+ int ret;
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return -ENOMEM;
v4l2_fh_init(&channel->fh, vdev);
- file->private_data = &channel->fh;
- v4l2_fh_add(&channel->fh);
init_completion(&channel->completion);
+ INIT_LIST_HEAD(&channel->source_shadow_list);
+ INIT_LIST_HEAD(&channel->stream_shadow_list);
+ mutex_init(&channel->shadow_list_lock);
channel->dev = dev;
@@ -2298,11 +2346,42 @@ static int allegro_open(struct file *file)
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_5_1, mask,
V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
+ channel->mpeg_video_h264_i_frame_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+ 0, 51, 1, 30);
+ channel->mpeg_video_h264_max_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 51, 1, 51);
+ channel->mpeg_video_h264_min_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ 0, 51, 1, 0);
+ channel->mpeg_video_h264_p_frame_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+ 0, 51, 1, 30);
+ channel->mpeg_video_h264_b_frame_qp =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+ 0, 51, 1, 30);
+ channel->mpeg_video_frame_rc_enable =
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ false, 0x1,
+ true, false);
channel->mpeg_video_bitrate_mode = v4l2_ctrl_new_std_menu(handler,
&allegro_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
- channel->bitrate_mode);
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
channel->mpeg_video_bitrate = v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE,
@@ -2324,12 +2403,19 @@ static int allegro_open(struct file *file)
0, ALLEGRO_GOP_SIZE_MAX,
1, channel->gop_size);
v4l2_ctrl_new_std(handler,
- &allegro_ctrl_ops,
- V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
- 1, 32,
- 1, 1);
+ &allegro_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ 1, 32,
+ 1, 1);
+ if (handler->error != 0) {
+ ret = handler->error;
+ goto error;
+ }
+
channel->fh.ctrl_handler = handler;
+ v4l2_ctrl_cluster(3, &channel->mpeg_video_bitrate_mode);
+
channel->mcu_channel_id = -1;
channel->user_id = -1;
@@ -2341,7 +2427,20 @@ static int allegro_open(struct file *file)
channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel,
allegro_queue_init);
+ if (IS_ERR(channel->fh.m2m_ctx)) {
+ ret = PTR_ERR(channel->fh.m2m_ctx);
+ goto error;
+ }
+
+ file->private_data = &channel->fh;
+ v4l2_fh_add(&channel->fh);
+
return 0;
+
+error:
+ v4l2_ctrl_handler_free(handler);
+ kfree(channel);
+ return ret;
}
static int allegro_release(struct file *file)
@@ -2636,6 +2735,46 @@ static int allegro_ioctl_streamon(struct file *file, void *priv,
return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
}
+static int allegro_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+ struct v4l2_fract *timeperframe;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ timeperframe = &a->parm.output.timeperframe;
+ timeperframe->numerator = channel->framerate.denominator;
+ timeperframe->denominator = channel->framerate.numerator;
+
+ return 0;
+}
+
+static int allegro_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+ struct v4l2_fract *timeperframe;
+ int div;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ timeperframe = &a->parm.output.timeperframe;
+
+ if (timeperframe->numerator == 0 || timeperframe->denominator == 0)
+ return allegro_g_parm(file, fh, a);
+
+ div = gcd(timeperframe->denominator, timeperframe->numerator);
+ channel->framerate.numerator = timeperframe->denominator / div;
+ channel->framerate.denominator = timeperframe->numerator / div;
+
+ return 0;
+}
+
static int allegro_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
@@ -2674,6 +2813,9 @@ static const struct v4l2_ioctl_ops allegro_ioctl_ops = {
.vidioc_encoder_cmd = allegro_encoder_cmd,
.vidioc_enum_framesizes = allegro_enum_framesizes,
+ .vidioc_g_parm = allegro_g_parm,
+ .vidioc_s_parm = allegro_s_parm,
+
.vidioc_subscribe_event = allegro_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -2701,7 +2843,7 @@ static int allegro_register_device(struct allegro_dev *dev)
video_dev->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
video_set_drvdata(video_dev, dev);
- return video_register_device(video_dev, VFL_TYPE_GRABBER, 0);
+ return video_register_device(video_dev, VFL_TYPE_VIDEO, 0);
}
static void allegro_device_run(void *priv)
@@ -2714,18 +2856,26 @@ static void allegro_device_run(void *priv)
dma_addr_t src_uv;
dma_addr_t dst_addr;
unsigned long dst_size;
+ u64 src_handle;
+ u64 dst_handle;
- dst_buf = v4l2_m2m_next_dst_buf(channel->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx);
dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
dst_size = vb2_plane_size(&dst_buf->vb2_buf, 0);
- allegro_mcu_send_put_stream_buffer(dev, channel, dst_addr, dst_size);
+ dst_handle = allegro_put_buffer(channel, &channel->stream_shadow_list,
+ dst_buf);
+ allegro_mcu_send_put_stream_buffer(dev, channel, dst_addr, dst_size,
+ dst_handle);
- src_buf = v4l2_m2m_next_src_buf(channel->fh.m2m_ctx);
+ src_buf = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx);
src_buf->sequence = channel->osequence++;
-
src_y = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
src_uv = src_y + (channel->stride * channel->height);
- allegro_mcu_send_encode_frame(dev, channel, src_y, src_uv);
+ src_handle = allegro_put_buffer(channel, &channel->source_shadow_list,
+ src_buf);
+ allegro_mcu_send_encode_frame(dev, channel, src_y, src_uv, src_handle);
+
+ v4l2_m2m_job_finish(dev->m2m_dev, channel->fh.m2m_ctx);
}
static const struct v4l2_m2m_ops allegro_m2m_ops = {
@@ -2933,8 +3083,8 @@ static int allegro_probe(struct platform_device *pdev)
return -EINVAL;
}
sram_regs = devm_ioremap(&pdev->dev,
- sram_res->start,
- resource_size(sram_res));
+ sram_res->start,
+ resource_size(sram_res));
if (IS_ERR(sram_regs)) {
dev_err(&pdev->dev, "failed to map sram\n");
return PTR_ERR(sram_regs);
diff --git a/drivers/staging/media/allegro-dvt/allegro-mail.c b/drivers/staging/media/allegro-dvt/allegro-mail.c
new file mode 100644
index 000000000000..df0d8d26a6fb
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/allegro-mail.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Helper functions for handling messages that are send via mailbox to the
+ * Allegro VCU firmware.
+ */
+
+#include <linux/export.h>
+
+#include "allegro-mail.h"
+
+const char *msg_type_name(enum mcu_msg_type type)
+{
+ static char buf[9];
+
+ switch (type) {
+ case MCU_MSG_TYPE_INIT:
+ return "INIT";
+ case MCU_MSG_TYPE_CREATE_CHANNEL:
+ return "CREATE_CHANNEL";
+ case MCU_MSG_TYPE_DESTROY_CHANNEL:
+ return "DESTROY_CHANNEL";
+ case MCU_MSG_TYPE_ENCODE_FRAME:
+ return "ENCODE_FRAME";
+ case MCU_MSG_TYPE_PUT_STREAM_BUFFER:
+ return "PUT_STREAM_BUFFER";
+ case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE:
+ return "PUSH_BUFFER_INTERMEDIATE";
+ case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE:
+ return "PUSH_BUFFER_REFERENCE";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", type);
+ return buf;
+ }
+}
+EXPORT_SYMBOL(msg_type_name);
diff --git a/drivers/staging/media/allegro-dvt/allegro-mail.h b/drivers/staging/media/allegro-dvt/allegro-mail.h
new file mode 100644
index 000000000000..17db665f8e1e
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/allegro-mail.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Allegro VCU firmware mailbox mail definitions
+ */
+
+#ifndef ALLEGRO_MAIL_H
+#define ALLEGRO_MAIL_H
+
+#include <linux/kernel.h>
+
+enum mcu_msg_type {
+ MCU_MSG_TYPE_INIT = 0x0000,
+ MCU_MSG_TYPE_CREATE_CHANNEL = 0x0005,
+ MCU_MSG_TYPE_DESTROY_CHANNEL = 0x0006,
+ MCU_MSG_TYPE_ENCODE_FRAME = 0x0007,
+ MCU_MSG_TYPE_PUT_STREAM_BUFFER = 0x0012,
+ MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE = 0x000e,
+ MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE = 0x000f,
+};
+
+const char *msg_type_name(enum mcu_msg_type type);
+
+struct mcu_msg_header {
+ u16 length; /* length of the body in bytes */
+ u16 type;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_init_request {
+ struct mcu_msg_header header;
+ u32 reserved0; /* maybe a unused channel id */
+ u32 suballoc_dma;
+ u32 suballoc_size;
+ s32 l2_cache[3];
+} __attribute__ ((__packed__));
+
+struct mcu_msg_init_response {
+ struct mcu_msg_header header;
+ u32 reserved0;
+} __attribute__ ((__packed__));
+
+struct create_channel_param {
+ u16 width;
+ u16 height;
+ u32 format;
+ u32 colorspace;
+ u32 src_mode;
+ u8 profile;
+ u16 constraint_set_flags;
+ s8 codec;
+ u16 level;
+ u16 tier;
+ u32 sps_param;
+ u32 pps_param;
+
+ u32 enc_option;
+#define AL_OPT_WPP BIT(0)
+#define AL_OPT_TILE BIT(1)
+#define AL_OPT_LF BIT(2)
+#define AL_OPT_LF_X_SLICE BIT(3)
+#define AL_OPT_LF_X_TILE BIT(4)
+#define AL_OPT_SCL_LST BIT(5)
+#define AL_OPT_CONST_INTRA_PRED BIT(6)
+#define AL_OPT_QP_TAB_RELATIVE BIT(7)
+#define AL_OPT_FIX_PREDICTOR BIT(8)
+#define AL_OPT_CUSTOM_LDA BIT(9)
+#define AL_OPT_ENABLE_AUTO_QP BIT(10)
+#define AL_OPT_ADAPT_AUTO_QP BIT(11)
+#define AL_OPT_TRANSFO_SKIP BIT(13)
+#define AL_OPT_FORCE_REC BIT(15)
+#define AL_OPT_FORCE_MV_OUT BIT(16)
+#define AL_OPT_FORCE_MV_CLIP BIT(17)
+#define AL_OPT_LOWLAT_SYNC BIT(18)
+#define AL_OPT_LOWLAT_INT BIT(19)
+#define AL_OPT_RDO_COST_MODE BIT(20)
+
+ s8 beta_offset;
+ s8 tc_offset;
+ u16 reserved10;
+ u32 unknown11;
+ u32 unknown12;
+ u16 num_slices;
+ u16 prefetch_auto;
+ u32 prefetch_mem_offset;
+ u32 prefetch_mem_size;
+ u16 clip_hrz_range;
+ u16 clip_vrt_range;
+ u16 me_range[4];
+ u8 max_cu_size;
+ u8 min_cu_size;
+ u8 max_tu_size;
+ u8 min_tu_size;
+ u8 max_transfo_depth_inter;
+ u8 max_transfo_depth_intra;
+ u16 reserved20;
+ u32 entropy_mode;
+ u32 wp_mode;
+
+ /* rate control param */
+ u32 rate_control_mode;
+ u32 initial_rem_delay;
+ u32 cpb_size;
+ u16 framerate;
+ u16 clk_ratio;
+ u32 target_bitrate;
+ u32 max_bitrate;
+ u16 initial_qp;
+ u16 min_qp;
+ u16 max_qp;
+ s16 ip_delta;
+ s16 pb_delta;
+ u16 golden_ref;
+ u16 golden_delta;
+ u16 golden_ref_frequency;
+ u32 rate_control_option;
+
+ /* gop param */
+ u32 gop_ctrl_mode;
+ u32 freq_idr;
+ u32 freq_lt;
+ u32 gdr_mode;
+ u16 gop_length;
+ u8 num_b;
+ u8 freq_golden_ref;
+
+ u32 subframe_latency;
+ u32 lda_control_mode;
+ u32 unknown41;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_create_channel {
+ struct mcu_msg_header header;
+ u32 user_id;
+ struct create_channel_param param;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_create_channel_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 user_id;
+ u32 options;
+ u32 num_core;
+ u32 pps_param;
+ u32 int_buffers_count;
+ u32 int_buffers_size;
+ u32 rec_buffers_count;
+ u32 rec_buffers_size;
+ u32 reserved;
+ u32 error_code;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_destroy_channel {
+ struct mcu_msg_header header;
+ u32 channel_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_destroy_channel_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_push_buffers_internal_buffer {
+ u32 dma_addr;
+ u32 mcu_addr;
+ u32 size;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_push_buffers_internal {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ struct mcu_msg_push_buffers_internal_buffer buffer[];
+} __attribute__ ((__packed__));
+
+struct mcu_msg_put_stream_buffer {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 dma_addr;
+ u32 mcu_addr;
+ u32 size;
+ u32 offset;
+ u64 stream_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_encode_frame {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 reserved;
+
+ u32 encoding_options;
+#define AL_OPT_USE_QP_TABLE BIT(0)
+#define AL_OPT_FORCE_LOAD BIT(1)
+#define AL_OPT_USE_L2 BIT(2)
+#define AL_OPT_DISABLE_INTRA BIT(3)
+#define AL_OPT_DEPENDENT_SLICES BIT(4)
+
+ s16 pps_qp;
+ u16 padding;
+ u64 user_param;
+ u64 src_handle;
+
+ u32 request_options;
+#define AL_OPT_SCENE_CHANGE BIT(0)
+#define AL_OPT_RESTART_GOP BIT(1)
+#define AL_OPT_USE_LONG_TERM BIT(2)
+#define AL_OPT_UPDATE_PARAMS BIT(3)
+
+ /* u32 scene_change_delay (optional) */
+ /* rate control param (optional) */
+ /* gop param (optional) */
+ u32 src_y;
+ u32 src_uv;
+ u32 stride;
+ u32 ep2;
+ u64 ep2_v;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_encode_frame_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u64 stream_id; /* see mcu_msg_put_stream_buffer */
+ u64 user_param; /* see mcu_msg_encode_frame */
+ u64 src_handle; /* see mcu_msg_encode_frame */
+ u16 skip;
+ u16 is_ref;
+ u32 initial_removal_delay;
+ u32 dpb_output_delay;
+ u32 size;
+ u32 frame_tag_size;
+ s32 stuffing;
+ s32 filler;
+ u16 num_column;
+ u16 num_row;
+ u16 qp;
+ u8 num_ref_idx_l0;
+ u8 num_ref_idx_l1;
+ u32 partition_table_offset;
+ s32 partition_table_size;
+ u32 sum_complex;
+ s32 tile_width[4];
+ s32 tile_height[22];
+ u32 error_code;
+
+ u32 slice_type;
+#define AL_ENC_SLICE_TYPE_B 0
+#define AL_ENC_SLICE_TYPE_P 1
+#define AL_ENC_SLICE_TYPE_I 2
+
+ u32 pic_struct;
+ u8 is_idr;
+ u8 is_first_slice;
+ u8 is_last_slice;
+ u8 reserved;
+ u16 pps_qp;
+ u16 reserved1;
+ u32 reserved2;
+} __attribute__ ((__packed__));
+
+union mcu_msg_response {
+ struct mcu_msg_header header;
+ struct mcu_msg_init_response init;
+ struct mcu_msg_create_channel_response create_channel;
+ struct mcu_msg_destroy_channel_response destroy_channel;
+ struct mcu_msg_encode_frame_response encode_frame;
+};
+
+#endif
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
index de77fe6554e7..99aed9a5b0b9 100644
--- a/drivers/staging/media/hantro/Kconfig
+++ b/drivers/staging/media/hantro/Kconfig
@@ -1,19 +1,27 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_HANTRO
tristate "Hantro VPU driver"
- depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on ARCH_MXC || ARCH_ROCKCHIP || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
depends on MEDIA_CONTROLLER_REQUEST_API
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
help
- Support for the Hantro IP based Video Processing Unit present on
- Rockchip SoC, which accelerates video and image encoding and
- decoding.
+ Support for the Hantro IP based Video Processing Units present on
+ Rockchip and NXP i.MX8M SoCs, which accelerate video and image
+ encoding and decoding.
To compile this driver as a module, choose M here: the module
will be called hantro-vpu.
+config VIDEO_HANTRO_IMX8M
+ bool "Hantro VPU i.MX8M support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_MXC || COMPILE_TEST
+ default y
+ help
+ Enable support for i.MX8M SoCs.
+
config VIDEO_HANTRO_ROCKCHIP
bool "Hantro VPU Rockchip support"
depends on VIDEO_HANTRO
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
index 496b30c3c396..68c29a9c4946 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/staging/media/hantro/Makefile
@@ -16,6 +16,9 @@ hantro-vpu-y += \
hantro_mpeg2.o \
hantro_vp8.o
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_IMX8M) += \
+ imx8m_vpu_hw.o
+
hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
rk3288_vpu_hw.o \
rk3399_vpu_hw.o
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index b0faa43b3f79..327ddef45345 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -423,7 +423,7 @@ hantro_get_dst_buf(struct hantro_ctx *ctx)
static inline bool
hantro_needs_postproc(struct hantro_ctx *ctx, const struct hantro_fmt *fmt)
{
- return fmt->fourcc != V4L2_PIX_FMT_NV12;
+ return !hantro_is_encoder_ctx(ctx) && fmt->fourcc != V4L2_PIX_FMT_NV12;
}
static inline dma_addr_t
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index c98835326135..ace13973e2d0 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -362,6 +362,16 @@ static const struct hantro_ctrl controls[] = {
.max = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
},
}, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ }
+ }, {
},
};
@@ -489,6 +499,9 @@ static const struct of_device_id of_hantro_match[] = {
{ .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
{ .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
#endif
+#ifdef CONFIG_VIDEO_HANTRO_IMX8M
+ { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
+#endif
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_hantro_match);
@@ -664,7 +677,7 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
video_set_drvdata(vfd, vpu);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
return ret;
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index 0d8afc3e5d71..b22418436823 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -67,12 +67,23 @@ hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
unsigned char *chroma_qtable)
{
u32 reg, i;
+ __be32 *luma_qtable_p;
+ __be32 *chroma_qtable_p;
+ luma_qtable_p = (__be32 *)luma_qtable;
+ chroma_qtable_p = (__be32 *)chroma_qtable;
+
+ /*
+ * Quantization table registers must be written in contiguous blocks.
+ * DO NOT collapse the below two "for" loops into one.
+ */
for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
- reg = get_unaligned_be32(&luma_qtable[i]);
+ reg = get_unaligned_be32(&luma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i));
+ }
- reg = get_unaligned_be32(&chroma_qtable[i]);
+ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&chroma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i));
}
}
@@ -103,8 +114,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
hantro_h1_set_src_img_ctrl(vpu, ctx);
hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
hantro_h1_jpeg_enc_set_qtable(vpu,
- hantro_jpeg_get_qtable(&jpeg_ctx, 0),
- hantro_jpeg_get_qtable(&jpeg_ctx, 1));
+ hantro_jpeg_get_qtable(0),
+ hantro_jpeg_get_qtable(1));
reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16
| H1_REG_AXI_CTRL_INPUT_SWAP16
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 2398d4c1f207..7dfc9bad7297 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -151,6 +151,7 @@ enum hantro_enc_fmt {
extern const struct hantro_variant rk3399_vpu_variant;
extern const struct hantro_variant rk3328_vpu_variant;
extern const struct hantro_variant rk3288_vpu_variant;
+extern const struct hantro_variant imx8mq_vpu_variant;
extern const struct hantro_postproc_regs hantro_g1_postproc_regs;
diff --git a/drivers/staging/media/hantro/hantro_jpeg.c b/drivers/staging/media/hantro/hantro_jpeg.c
index 125eb41f2ede..36c140fc6a36 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.c
+++ b/drivers/staging/media/hantro/hantro_jpeg.c
@@ -23,19 +23,21 @@
#define HUFF_CHROMA_AC_OFF 409
/* Default tables from JPEG ITU-T.81
- * (ISO/IEC 10918-1) Annex K.3, I
+ * (ISO/IEC 10918-1) Annex K, tables K.1 and K.2
*/
static const unsigned char luma_q_table[] = {
- 0x10, 0x0b, 0x0a, 0x10, 0x7c, 0x8c, 0x97, 0xa1,
- 0x0c, 0x0c, 0x0e, 0x13, 0x7e, 0x9e, 0xa0, 0x9b,
- 0x0e, 0x0d, 0x10, 0x18, 0x8c, 0x9d, 0xa9, 0x9c,
- 0x0e, 0x11, 0x16, 0x1d, 0x97, 0xbb, 0xb4, 0xa2,
- 0x12, 0x16, 0x25, 0x38, 0xa8, 0x6d, 0x67, 0xb1,
- 0x18, 0x23, 0x37, 0x40, 0xb5, 0x68, 0x71, 0xc0,
+ 0x10, 0x0b, 0x0a, 0x10, 0x18, 0x28, 0x33, 0x3d,
+ 0x0c, 0x0c, 0x0e, 0x13, 0x1a, 0x3a, 0x3c, 0x37,
+ 0x0e, 0x0d, 0x10, 0x18, 0x28, 0x39, 0x45, 0x38,
+ 0x0e, 0x11, 0x16, 0x1d, 0x33, 0x57, 0x50, 0x3e,
+ 0x12, 0x16, 0x25, 0x38, 0x44, 0x6d, 0x67, 0x4d,
+ 0x18, 0x23, 0x37, 0x40, 0x51, 0x68, 0x71, 0x5c,
0x31, 0x40, 0x4e, 0x57, 0x67, 0x79, 0x78, 0x65,
- 0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0xc7,
+ 0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0x63
};
+static unsigned char luma_q_table_reordered[ARRAY_SIZE(luma_q_table)];
+
static const unsigned char chroma_q_table[] = {
0x11, 0x12, 0x18, 0x2f, 0x63, 0x63, 0x63, 0x63,
0x12, 0x15, 0x1a, 0x42, 0x63, 0x63, 0x63, 0x63,
@@ -47,6 +49,30 @@ static const unsigned char chroma_q_table[] = {
0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
};
+static unsigned char chroma_q_table_reordered[ARRAY_SIZE(chroma_q_table)];
+
+static const unsigned char zigzag[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+static const u32 hw_reorder[64] = {
+ 0, 8, 16, 24, 1, 9, 17, 25,
+ 32, 40, 48, 56, 33, 41, 49, 57,
+ 2, 10, 18, 26, 3, 11, 19, 27,
+ 34, 42, 50, 58, 35, 43, 51, 59,
+ 4, 12, 20, 28, 5, 13, 21, 29,
+ 36, 44, 52, 60, 37, 45, 53, 61,
+ 6, 14, 22, 30, 7, 15, 23, 31,
+ 38, 46, 54, 62, 39, 47, 55, 63
+};
+
/* Huffman tables are shared with CODA */
static const unsigned char luma_dc_table[] = {
0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
@@ -225,20 +251,29 @@ static const unsigned char hantro_jpeg_header[JPEG_HEADER_SIZE] = {
0x11, 0x03, 0x11, 0x00, 0x3f, 0x00,
};
+static unsigned char jpeg_scale_qp(const unsigned char qp, int scale)
+{
+ unsigned int temp;
+
+ temp = DIV_ROUND_CLOSEST((unsigned int)qp * scale, 100);
+ if (temp <= 0)
+ temp = 1;
+ if (temp > 255)
+ temp = 255;
+
+ return (unsigned char)temp;
+}
+
static void
-jpeg_scale_quant_table(unsigned char *q_tab,
+jpeg_scale_quant_table(unsigned char *file_q_tab,
+ unsigned char *reordered_q_tab,
const unsigned char *tab, int scale)
{
- unsigned int temp;
int i;
for (i = 0; i < 64; i++) {
- temp = DIV_ROUND_CLOSEST((unsigned int)tab[i] * scale, 100);
- if (temp <= 0)
- temp = 1;
- if (temp > 255)
- temp = 255;
- q_tab[i] = (unsigned char)temp;
+ file_q_tab[i] = jpeg_scale_qp(tab[zigzag[i]], scale);
+ reordered_q_tab[i] = jpeg_scale_qp(tab[hw_reorder[i]], scale);
}
}
@@ -256,17 +291,18 @@ static void jpeg_set_quality(unsigned char *buffer, int quality)
scale = 200 - 2 * quality;
jpeg_scale_quant_table(buffer + LUMA_QUANT_OFF,
+ luma_q_table_reordered,
luma_q_table, scale);
jpeg_scale_quant_table(buffer + CHROMA_QUANT_OFF,
+ chroma_q_table_reordered,
chroma_q_table, scale);
}
-unsigned char *
-hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index)
+unsigned char *hantro_jpeg_get_qtable(int index)
{
if (index == 0)
- return ctx->buffer + LUMA_QUANT_OFF;
- return ctx->buffer + CHROMA_QUANT_OFF;
+ return luma_q_table_reordered;
+ return chroma_q_table_reordered;
}
void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_jpeg.h b/drivers/staging/media/hantro/hantro_jpeg.h
index 9e8397c71388..9474a00277f8 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.h
+++ b/drivers/staging/media/hantro/hantro_jpeg.h
@@ -9,5 +9,5 @@ struct hantro_jpeg_ctx {
unsigned char *buffer;
};
-unsigned char *hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index);
+unsigned char *hantro_jpeg_get_qtable(int index);
void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
index 28a85d301d7f..44062ffceaea 100644
--- a/drivers/staging/media/hantro/hantro_postproc.c
+++ b/drivers/staging/media/hantro/hantro_postproc.c
@@ -14,16 +14,16 @@
#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
{ \
- hantro_reg_write((vpu), \
- &((vpu)->variant->postproc_regs->reg_name), \
- (val)); \
+ hantro_reg_write(vpu, \
+ &(vpu)->variant->postproc_regs->reg_name, \
+ val); \
}
#define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \
{ \
- hantro_reg_write_s((vpu), \
- &((vpu)->variant->postproc_regs->reg_name), \
- (val)); \
+ hantro_reg_write_s(vpu, \
+ &(vpu)->variant->postproc_regs->reg_name, \
+ val); \
}
#define VPU_PP_IN_YUYV 0x0
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 0198bcda26b7..f4ae2cee0f18 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -295,7 +295,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
* +---------------------------+
*/
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE &&
- !hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ !hantro_needs_postproc(ctx, fmt))
pix_mp->plane_fmt[0].sizeimage +=
64 * MB_WIDTH(pix_mp->width) *
MB_WIDTH(pix_mp->height) + 32;
diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/staging/media/hantro/imx8m_vpu_hw.c
new file mode 100644
index 000000000000..cb2420c5526e
--- /dev/null
+++ b/drivers/staging/media/hantro/imx8m_vpu_hw.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+
+#define CTRL_SOFT_RESET 0x00
+#define RESET_G1 BIT(1)
+#define RESET_G2 BIT(0)
+
+#define CTRL_CLOCK_ENABLE 0x04
+#define CLOCK_G1 BIT(1)
+#define CLOCK_G2 BIT(0)
+
+#define CTRL_G1_DEC_FUSE 0x08
+#define CTRL_G1_PP_FUSE 0x0c
+#define CTRL_G2_DEC_FUSE 0x10
+
+static void imx8m_soft_reset(struct hantro_dev *vpu, u32 reset_bits)
+{
+ u32 val;
+
+ /* Assert */
+ val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
+ val &= ~reset_bits;
+ writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
+
+ udelay(2);
+
+ /* Release */
+ val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
+ val |= reset_bits;
+ writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
+}
+
+static void imx8m_clk_enable(struct hantro_dev *vpu, u32 clock_bits)
+{
+ u32 val;
+
+ val = readl(vpu->ctrl_base + CTRL_CLOCK_ENABLE);
+ val |= clock_bits;
+ writel(val, vpu->ctrl_base + CTRL_CLOCK_ENABLE);
+}
+
+static int imx8mq_runtime_resume(struct hantro_dev *vpu)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(vpu->dev, "Failed to enable clocks\n");
+ return ret;
+ }
+
+ imx8m_soft_reset(vpu, RESET_G1 | RESET_G2);
+ imx8m_clk_enable(vpu, CLOCK_G1 | CLOCK_G2);
+
+ /* Set values of the fuse registers */
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G1_DEC_FUSE);
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G1_PP_FUSE);
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G2_DEC_FUSE);
+
+ clk_bulk_disable_unprepare(vpu->variant->num_clocks, vpu->clocks);
+
+ return 0;
+}
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = 16,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = 16,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, 0, state);
+
+ return IRQ_HANDLED;
+}
+
+static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
+{
+ vpu->dec_base = vpu->reg_bases[0];
+ vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
+
+ return 0;
+}
+
+static void imx8m_vpu_g1_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ imx8m_soft_reset(vpu, RESET_G1);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops imx8mq_vpu_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+/*
+ * VPU variants.
+ */
+
+static const struct hantro_irq imx8mq_irqs[] = {
+ { "g1", imx8m_vpu_g1_irq },
+ { "g2", NULL /* TODO: imx8m_vpu_g2_irq */ },
+};
+
+static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" };
+static const char * const imx8mq_reg_names[] = { "g1", "g2", "ctrl" };
+
+const struct hantro_variant imx8mq_vpu_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .postproc_fmts = imx8m_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
+ .postproc_regs = &hantro_g1_postproc_regs,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_codec_ops,
+ .init = imx8mq_vpu_hw_init,
+ .runtime_resume = imx8mq_runtime_resume,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_clk_names),
+ .reg_names = imx8mq_reg_names,
+ .num_regs = ARRAY_SIZE(imx8mq_reg_names)
+};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 4c2d43fb6fd1..3498e6124acd 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -18,9 +18,8 @@
*
* Quantization luma table values are written to registers
* VEPU_swreg_0-VEPU_swreg_15, and chroma table values to
- * VEPU_swreg_16-VEPU_swreg_31.
- *
- * JPEG zigzag order is expected on the quantization tables.
+ * VEPU_swreg_16-VEPU_swreg_31. A special order is needed, neither
+ * zigzag, nor linear.
*/
#include <asm/unaligned.h>
@@ -98,12 +97,23 @@ rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu,
unsigned char *chroma_qtable)
{
u32 reg, i;
+ __be32 *luma_qtable_p;
+ __be32 *chroma_qtable_p;
+
+ luma_qtable_p = (__be32 *)luma_qtable;
+ chroma_qtable_p = (__be32 *)chroma_qtable;
+ /*
+ * Quantization table registers must be written in contiguous blocks.
+ * DO NOT collapse the below two "for" loops into one.
+ */
for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
- reg = get_unaligned_be32(&luma_qtable[i]);
+ reg = get_unaligned_be32(&luma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
+ }
- reg = get_unaligned_be32(&chroma_qtable[i]);
+ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&chroma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
}
}
@@ -134,8 +144,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
rk3399_vpu_set_src_img_ctrl(vpu, ctx);
rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
rk3399_vpu_jpeg_enc_set_qtable(vpu,
- hantro_jpeg_get_qtable(&jpeg_ctx, 0),
- hantro_jpeg_get_qtable(&jpeg_ctx, 1));
+ hantro_jpeg_get_qtable(0),
+ hantro_jpeg_get_qtable(1));
reg = VEPU_REG_OUTPUT_SWAP32
| VEPU_REG_OUTPUT_SWAP16
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 7712e7be8625..d37b776ff86d 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -643,7 +643,7 @@ static int capture_open(struct file *file)
if (ret)
v4l2_err(priv->src_sd, "v4l2_fh_open failed\n");
- ret = v4l2_pipeline_pm_use(&vfd->entity, 1);
+ ret = v4l2_pipeline_pm_get(&vfd->entity);
if (ret)
v4l2_fh_release(file);
@@ -664,7 +664,7 @@ static int capture_release(struct file *file)
vq->owner = NULL;
}
- v4l2_pipeline_pm_use(&vfd->entity, 0);
+ v4l2_pipeline_pm_put(&vfd->entity);
v4l2_fh_release(file);
mutex_unlock(&priv->mutex);
@@ -742,7 +742,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
vfd->v4l2_dev = v4l2_dev;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(sd, "Failed to register video device\n");
return ret;
@@ -778,7 +778,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
/* setup default format */
fmt_src.pad = priv->src_sd_pad;
fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
if (ret) {
v4l2_err(sd, "failed to get src_sd format\n");
goto unreg;
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index 2b635ebf62d6..2cc77f6e84b6 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -849,7 +849,7 @@ int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev)
vfd->v4l2_dev = &priv->md->v4l2_dev;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(vfd->v4l2_dev, "Failed to register video device\n");
return ret;
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index b60ed4f22f6d..e76a6a85baa3 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -457,7 +457,8 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
case V4L2_PIX_FMT_SGBRG16:
case V4L2_PIX_FMT_SGRBG16:
case V4L2_PIX_FMT_SRGGB16:
- case V4L2_PIX_FMT_Y16:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
burst_size = 8;
passthrough_bits = 16;
break;
@@ -1459,6 +1460,8 @@ static void csi_try_fmt(struct csi_priv *priv,
/* propagate colorimetry from sink */
sdformat->format.colorspace = infmt->colorspace;
sdformat->format.xfer_func = infmt->xfer_func;
+ sdformat->format.quantization = infmt->quantization;
+ sdformat->format.ycbcr_enc = infmt->ycbcr_enc;
break;
case CSI_SINK_PAD:
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 0788a1874557..fae981698c49 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -161,17 +161,24 @@ static const struct imx_media_pixfmt rgb_formats[] = {
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_GREY,
- .codes = {MEDIA_BUS_FMT_Y8_1X8},
- .cs = IPUV3_COLORSPACE_RGB,
- .bpp = 8,
- .bayer = true,
- }, {
- .fourcc = V4L2_PIX_FMT_Y16,
.codes = {
+ MEDIA_BUS_FMT_Y8_1X8,
MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y12_1X12,
},
.cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .codes = {MEDIA_BUS_FMT_Y10_1X10},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .codes = {MEDIA_BUS_FMT_Y12_1X12},
+ .cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
},
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index cd3dd6e33ef0..8ab823042c09 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -592,22 +592,19 @@ static int csi2_probe(struct platform_device *pdev)
csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(csi2->pllref_clk)) {
v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
- ret = PTR_ERR(csi2->pllref_clk);
- return ret;
+ return PTR_ERR(csi2->pllref_clk);
}
csi2->dphy_clk = devm_clk_get(&pdev->dev, "dphy");
if (IS_ERR(csi2->dphy_clk)) {
v4l2_err(&csi2->sd, "failed to get dphy clock\n");
- ret = PTR_ERR(csi2->dphy_clk);
- return ret;
+ return PTR_ERR(csi2->dphy_clk);
}
csi2->pix_clk = devm_clk_get(&pdev->dev, "pix");
if (IS_ERR(csi2->pix_clk)) {
v4l2_err(&csi2->sd, "failed to get pixel clock\n");
- ret = PTR_ERR(csi2->pix_clk);
- return ret;
+ return PTR_ERR(csi2->pix_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index db30e2c70f2f..acbdffb77668 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -292,7 +292,7 @@ static void imx7_csi_hw_disable(struct imx7_csi *csi)
static void imx7_csi_dma_reflash(struct imx7_csi *csi)
{
- u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr3;
cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
cr3 |= BIT_DMA_REFLASH_RFF;
@@ -804,6 +804,22 @@ static int imx7_csi_configure(struct imx7_csi *csi)
case V4L2_PIX_FMT_YUYV:
cr18 |= BIT_MIPI_DATA_FORMAT_YUV422_8B;
break;
+ case V4L2_PIX_FMT_GREY:
+ if (in_code == MEDIA_BUS_FMT_Y8_1X8)
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW8;
+ else if (in_code == MEDIA_BUS_FMT_Y10_1X10)
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW10;
+ else
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW12;
+ break;
+ case V4L2_PIX_FMT_Y10:
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW10;
+ cr1 |= BIT_PIXEL_BIT;
+ break;
+ case V4L2_PIX_FMT_Y12:
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW12;
+ cr1 |= BIT_PIXEL_BIT;
+ break;
case V4L2_PIX_FMT_SBGGR8:
cr18 |= BIT_MIPI_DATA_FORMAT_RAW8;
break;
@@ -1009,10 +1025,13 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
sdformat->format.width = in_fmt->width;
sdformat->format.height = in_fmt->height;
sdformat->format.code = in_fmt->code;
+ sdformat->format.field = in_fmt->field;
*cc = in_cc;
sdformat->format.colorspace = in_fmt->colorspace;
sdformat->format.xfer_func = in_fmt->xfer_func;
+ sdformat->format.quantization = in_fmt->quantization;
+ sdformat->format.ycbcr_enc = in_fmt->ycbcr_enc;
break;
case IMX7_CSI_PAD_SINK:
*cc = imx_media_find_mbus_format(sdformat->format.code,
@@ -1023,6 +1042,9 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
false);
sdformat->format.code = (*cc)->codes[0];
}
+
+ if (sdformat->format.field != V4L2_FIELD_INTERLACED)
+ sdformat->format.field = V4L2_FIELD_NONE;
break;
default:
return -EINVAL;
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 383abecb3bec..fbc1a924652a 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -280,6 +280,18 @@ static const struct csis_pix_format mipi_csis_formats[] = {
.code = MEDIA_BUS_FMT_YUYV8_2X8,
.fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
.data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_alignment = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_alignment = 16,
}
};
@@ -301,6 +313,7 @@ static int mipi_csis_dump_regs(struct csi_state *state)
{ 0x20, "DPHYSTS" },
{ 0x10, "INTMSK" },
{ 0x40, "CONFIG_CH0" },
+ { 0x44, "RESOL_CH0" },
{ 0xC0, "DBG_CONFIG" },
{ 0x38, "DPHYSLAVE_L" },
{ 0x3C, "DPHYSLAVE_H" },
@@ -404,7 +417,7 @@ static void mipi_csis_set_hsync_settle(struct csi_state *state, int hs_settle)
{
u32 val = mipi_csis_read(state, MIPI_CSIS_DPHYCTRL);
- val = ((val & ~MIPI_CSIS_DPHYCTRL_HSS_MASK) | (hs_settle << 24));
+ val = (val & ~MIPI_CSIS_DPHYCTRL_HSS_MASK) | (hs_settle << 24);
mipi_csis_write(state, MIPI_CSIS_DPHYCTRL, val);
}
@@ -417,6 +430,7 @@ static void mipi_csis_set_params(struct csi_state *state)
val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
val &= ~MIPI_CSIS_CMN_CTRL_LANE_NR_MASK;
val |= (lanes - 1) << MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET;
+ val |= MIPI_CSIS_CMN_CTRL_INTER_MODE;
mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
__mipi_csis_set_format(state);
@@ -577,7 +591,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
state->flags |= ST_STREAMING;
} else {
v4l2_subdev_call(state->src_sd, video, s_stream, 0);
- ret = v4l2_subdev_call(state->src_sd, core, s_power, 1);
+ ret = v4l2_subdev_call(state->src_sd, core, s_power, 0);
mipi_csis_stop_stream(state);
state->flags &= ~ST_STREAMING;
if (state->debug)
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index dc356d6f03c4..52063b651810 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -13,8 +13,6 @@ staging directory.
- Elaborate the functionality of different selection rectangles in driver
documentation. This may require driver changes as well.
-- More detailed documentation on calculating BDS, GCD etc. sizes needed.
-
- Document different operation modes, and which buffer queues are relevant
in each mode. To process an image, which queues require a buffer an in
which ones is it optional?
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index f36de501edc6..4f04fe838b0c 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -210,12 +210,12 @@ static int imgu_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
/* Initialize the IPU3 CSS hardware and associated h/w blocks */
-int imgu_css_set_powerup(struct device *dev, void __iomem *base)
+int imgu_css_set_powerup(struct device *dev, void __iomem *base,
+ unsigned int freq)
{
- static const unsigned int freq = 450;
u32 pm_ctrl, state, val;
- dev_dbg(dev, "%s\n", __func__);
+ dev_dbg(dev, "%s with freq %u\n", __func__, freq);
/* Clear the CSS busy signal */
readl(base + IMGU_REG_GP_BUSY);
writel(0, base + IMGU_REG_GP_BUSY);
diff --git a/drivers/staging/media/ipu3/ipu3-css.h b/drivers/staging/media/ipu3/ipu3-css.h
index 6b8bab27ab1f..6108a068b228 100644
--- a/drivers/staging/media/ipu3/ipu3-css.h
+++ b/drivers/staging/media/ipu3/ipu3-css.h
@@ -187,7 +187,8 @@ bool imgu_css_is_streaming(struct imgu_css *css);
bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe);
/******************* css hw *******************/
-int imgu_css_set_powerup(struct device *dev, void __iomem *base);
+int imgu_css_set_powerup(struct device *dev, void __iomem *base,
+ unsigned int freq);
void imgu_css_set_powerdown(struct device *dev, void __iomem *base);
int imgu_css_irq_ack(struct imgu_css *css);
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
index 3d969b0522ab..5f3ff964f3e7 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.c
+++ b/drivers/staging/media/ipu3/ipu3-mmu.c
@@ -130,7 +130,7 @@ static u32 *imgu_mmu_alloc_page_table(u32 pteval)
for (pte = 0; pte < IPU3_PT_PTES; pte++)
pt[pte] = pteval;
- set_memory_uc((unsigned long int)pt, IPU3_PT_ORDER);
+ set_memory_uc((unsigned long)pt, IPU3_PT_ORDER);
return pt;
}
@@ -141,7 +141,7 @@ static u32 *imgu_mmu_alloc_page_table(u32 pteval)
*/
static void imgu_mmu_free_page_table(u32 *pt)
{
- set_memory_wb((unsigned long int)pt, IPU3_PT_ORDER);
+ set_memory_wb((unsigned long)pt, IPU3_PT_ORDER);
free_page((unsigned long)pt);
}
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 569e27b824c8..09c8ede1457c 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -1245,7 +1245,7 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
vdev->queue = &node->vbq;
vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
video_set_drvdata(vdev, imgu);
- r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
dev_err(dev, "failed to register video device (%d)", r);
media_entity_cleanup(&vdev->entity);
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
index 06a61f31ca50..4d53aad31483 100644
--- a/drivers/staging/media/ipu3/ipu3.c
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -345,8 +345,20 @@ failed:
static int imgu_powerup(struct imgu_device *imgu)
{
int r;
+ unsigned int pipe;
+ unsigned int freq = 200;
+ struct v4l2_mbus_framefmt *fmt;
+
+ /* input larger than 2048*1152, ask imgu to work on high freq */
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt;
+ dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u",
+ pipe, fmt->width, fmt->height);
+ if ((fmt->width * fmt->height) >= (2048 * 1152))
+ freq = 450;
+ }
- r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base);
+ r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq);
if (r)
return r;
@@ -666,7 +678,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
atomic_set(&imgu->qbuf_barrier, 0);
init_waitqueue_head(&imgu->buf_drain_wq);
- r = imgu_css_set_powerup(&pci_dev->dev, imgu->base);
+ r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200);
if (r) {
dev_err(&pci_dev->dev,
"failed to power up CSS (%d)\n", r);
diff --git a/drivers/staging/media/meson/vdec/Makefile b/drivers/staging/media/meson/vdec/Makefile
index 6bea129084b7..6e726af84ac9 100644
--- a/drivers/staging/media/meson/vdec/Makefile
+++ b/drivers/staging/media/meson/vdec/Makefile
@@ -2,7 +2,7 @@
# Makefile for Amlogic meson video decoder driver
meson-vdec-objs = esparser.o vdec.o vdec_helpers.o vdec_platform.o
-meson-vdec-objs += vdec_1.o
-meson-vdec-objs += codec_mpeg12.o
+meson-vdec-objs += vdec_1.o vdec_hevc.o
+meson-vdec-objs += codec_mpeg12.o codec_h264.o codec_hevc_common.o codec_vp9.o
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson-vdec.o
diff --git a/drivers/staging/media/meson/vdec/codec_h264.c b/drivers/staging/media/meson/vdec/codec_h264.c
new file mode 100644
index 000000000000..c61128fc4bb9
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_h264.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec_helpers.h"
+#include "dos_regs.h"
+#include "codec_h264.h"
+
+#define SIZE_EXT_FW (20 * SZ_1K)
+#define SIZE_WORKSPACE 0x1ee000
+#define SIZE_SEI (8 * SZ_1K)
+
+/*
+ * Offset added by the firmware which must be substracted
+ * from the workspace phyaddr
+ */
+#define WORKSPACE_BUF_OFFSET 0x1000000
+
+/* ISR status */
+#define CMD_MASK GENMASK(7, 0)
+#define CMD_SRC_CHANGE 1
+#define CMD_FRAMES_READY 2
+#define CMD_FATAL_ERROR 6
+#define CMD_BAD_WIDTH 7
+#define CMD_BAD_HEIGHT 8
+
+#define SEI_DATA_READY BIT(15)
+
+/* Picture type */
+#define PIC_TOP_BOT 5
+#define PIC_BOT_TOP 6
+
+/* Size of Motion Vector per macroblock */
+#define MB_MV_SIZE 96
+
+/* Frame status data */
+#define PIC_STRUCT_BIT 5
+#define PIC_STRUCT_MASK GENMASK(2, 0)
+#define BUF_IDX_MASK GENMASK(4, 0)
+#define ERROR_FLAG BIT(9)
+#define OFFSET_BIT 16
+#define OFFSET_MASK GENMASK(15, 0)
+
+/* Bitstream parsed data */
+#define MB_TOTAL_BIT 8
+#define MB_TOTAL_MASK GENMASK(15, 0)
+#define MB_WIDTH_MASK GENMASK(7, 0)
+#define MAX_REF_BIT 24
+#define MAX_REF_MASK GENMASK(6, 0)
+#define AR_IDC_BIT 16
+#define AR_IDC_MASK GENMASK(7, 0)
+#define AR_PRESENT_FLAG BIT(0)
+#define AR_EXTEND 0xff
+
+/*
+ * Buffer to send to the ESPARSER to signal End Of Stream for H.264.
+ * This is a 16x16 encoded picture that will trigger drain firmware-side.
+ * There is no known alternative.
+ */
+static const u8 eos_sequence[SZ_4K] = {
+ 0x00, 0x00, 0x00, 0x01, 0x06, 0x05, 0xff, 0xe4, 0xdc, 0x45, 0xe9, 0xbd,
+ 0xe6, 0xd9, 0x48, 0xb7, 0x96, 0x2c, 0xd8, 0x20, 0xd9, 0x23, 0xee, 0xef,
+ 0x78, 0x32, 0x36, 0x34, 0x20, 0x2d, 0x20, 0x63, 0x6f, 0x72, 0x65, 0x20,
+ 0x36, 0x37, 0x20, 0x72, 0x31, 0x31, 0x33, 0x30, 0x20, 0x38, 0x34, 0x37,
+ 0x35, 0x39, 0x37, 0x37, 0x20, 0x2d, 0x20, 0x48, 0x2e, 0x32, 0x36, 0x34,
+ 0x2f, 0x4d, 0x50, 0x45, 0x47, 0x2d, 0x34, 0x20, 0x41, 0x56, 0x43, 0x20,
+ 0x63, 0x6f, 0x64, 0x65, 0x63, 0x20, 0x2d, 0x20, 0x43, 0x6f, 0x70, 0x79,
+ 0x6c, 0x65, 0x66, 0x74, 0x20, 0x32, 0x30, 0x30, 0x33, 0x2d, 0x32, 0x30,
+ 0x30, 0x39, 0x20, 0x2d, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x78, 0x32, 0x36, 0x34, 0x2e, 0x68, 0x74,
+ 0x6d, 0x6c, 0x20, 0x2d, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x3a, 0x20, 0x63, 0x61, 0x62, 0x61, 0x63, 0x3d, 0x31, 0x20, 0x72, 0x65,
+ 0x66, 0x3d, 0x31, 0x20, 0x64, 0x65, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3d,
+ 0x31, 0x3a, 0x30, 0x3a, 0x30, 0x20, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x73,
+ 0x65, 0x3d, 0x30, 0x78, 0x31, 0x3a, 0x30, 0x78, 0x31, 0x31, 0x31, 0x20,
+ 0x6d, 0x65, 0x3d, 0x68, 0x65, 0x78, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x65,
+ 0x3d, 0x36, 0x20, 0x70, 0x73, 0x79, 0x5f, 0x72, 0x64, 0x3d, 0x31, 0x2e,
+ 0x30, 0x3a, 0x30, 0x2e, 0x30, 0x20, 0x6d, 0x69, 0x78, 0x65, 0x64, 0x5f,
+ 0x72, 0x65, 0x66, 0x3d, 0x30, 0x20, 0x6d, 0x65, 0x5f, 0x72, 0x61, 0x6e,
+ 0x67, 0x65, 0x3d, 0x31, 0x36, 0x20, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x61,
+ 0x5f, 0x6d, 0x65, 0x3d, 0x31, 0x20, 0x74, 0x72, 0x65, 0x6c, 0x6c, 0x69,
+ 0x73, 0x3d, 0x30, 0x20, 0x38, 0x78, 0x38, 0x64, 0x63, 0x74, 0x3d, 0x30,
+ 0x20, 0x63, 0x71, 0x6d, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x61, 0x64, 0x7a,
+ 0x6f, 0x6e, 0x65, 0x3d, 0x32, 0x31, 0x2c, 0x31, 0x31, 0x20, 0x63, 0x68,
+ 0x72, 0x6f, 0x6d, 0x61, 0x5f, 0x71, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x3d, 0x2d, 0x32, 0x20, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64,
+ 0x73, 0x3d, 0x31, 0x20, 0x6e, 0x72, 0x3d, 0x30, 0x20, 0x64, 0x65, 0x63,
+ 0x69, 0x6d, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x20, 0x6d, 0x62, 0x61, 0x66,
+ 0x66, 0x3d, 0x30, 0x20, 0x62, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x3d,
+ 0x30, 0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x3d, 0x32, 0x35, 0x30,
+ 0x20, 0x6b, 0x65, 0x79, 0x69, 0x6e, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x3d,
+ 0x32, 0x35, 0x20, 0x73, 0x63, 0x65, 0x6e, 0x65, 0x63, 0x75, 0x74, 0x3d,
+ 0x34, 0x30, 0x20, 0x72, 0x63, 0x3d, 0x61, 0x62, 0x72, 0x20, 0x62, 0x69,
+ 0x74, 0x72, 0x61, 0x74, 0x65, 0x3d, 0x31, 0x30, 0x20, 0x72, 0x61, 0x74,
+ 0x65, 0x74, 0x6f, 0x6c, 0x3d, 0x31, 0x2e, 0x30, 0x20, 0x71, 0x63, 0x6f,
+ 0x6d, 0x70, 0x3d, 0x30, 0x2e, 0x36, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x69,
+ 0x6e, 0x3d, 0x31, 0x30, 0x20, 0x71, 0x70, 0x6d, 0x61, 0x78, 0x3d, 0x35,
+ 0x31, 0x20, 0x71, 0x70, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x34, 0x20, 0x69,
+ 0x70, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x3d, 0x31, 0x2e, 0x34, 0x30,
+ 0x20, 0x61, 0x71, 0x3d, 0x31, 0x3a, 0x31, 0x2e, 0x30, 0x30, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x4d, 0x40, 0x0a, 0x9a, 0x74, 0xf4, 0x20,
+ 0x00, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x06, 0x51, 0xe2, 0x44, 0xd4,
+ 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x32, 0xc8, 0x00, 0x00, 0x00, 0x01,
+ 0x65, 0x88, 0x80, 0x20, 0x00, 0x08, 0x7f, 0xea, 0x6a, 0xe2, 0x99, 0xb6,
+ 0x57, 0xae, 0x49, 0x30, 0xf5, 0xfe, 0x5e, 0x46, 0x0b, 0x72, 0x44, 0xc4,
+ 0xe1, 0xfc, 0x62, 0xda, 0xf1, 0xfb, 0xa2, 0xdb, 0xd6, 0xbe, 0x5c, 0xd7,
+ 0x24, 0xa3, 0xf5, 0xb9, 0x2f, 0x57, 0x16, 0x49, 0x75, 0x47, 0x77, 0x09,
+ 0x5c, 0xa1, 0xb4, 0xc3, 0x4f, 0x60, 0x2b, 0xb0, 0x0c, 0xc8, 0xd6, 0x66,
+ 0xba, 0x9b, 0x82, 0x29, 0x33, 0x92, 0x26, 0x99, 0x31, 0x1c, 0x7f, 0x9b,
+ 0x00, 0x00, 0x01, 0x0ff,
+};
+
+static const u8 *codec_h264_eos_sequence(u32 *len)
+{
+ *len = ARRAY_SIZE(eos_sequence);
+ return eos_sequence;
+}
+
+struct codec_h264 {
+ /* H.264 decoder requires an extended firmware */
+ void *ext_fw_vaddr;
+ dma_addr_t ext_fw_paddr;
+
+ /* Buffer for the H.264 Workspace */
+ void *workspace_vaddr;
+ dma_addr_t workspace_paddr;
+
+ /* Buffer for the H.264 references MV */
+ void *ref_vaddr;
+ dma_addr_t ref_paddr;
+ u32 ref_size;
+
+ /* Buffer for parsed SEI data */
+ void *sei_vaddr;
+ dma_addr_t sei_paddr;
+
+ u32 mb_width;
+ u32 mb_height;
+ u32 max_refs;
+};
+
+static int codec_h264_can_recycle(struct amvdec_core *core)
+{
+ return !amvdec_read_dos(core, AV_SCRATCH_7) ||
+ !amvdec_read_dos(core, AV_SCRATCH_8);
+}
+
+static void codec_h264_recycle(struct amvdec_core *core, u32 buf_idx)
+{
+ /*
+ * Tell the firmware it can recycle this buffer.
+ * AV_SCRATCH_8 serves the same purpose.
+ */
+ if (!amvdec_read_dos(core, AV_SCRATCH_7))
+ amvdec_write_dos(core, AV_SCRATCH_7, buf_idx + 1);
+ else
+ amvdec_write_dos(core, AV_SCRATCH_8, buf_idx + 1);
+}
+
+static int codec_h264_start(struct amvdec_session *sess)
+{
+ u32 workspace_offset;
+ struct amvdec_core *core = sess->core;
+ struct codec_h264 *h264 = sess->priv;
+
+ /* Allocate some memory for the H.264 decoder's state */
+ h264->workspace_vaddr =
+ dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
+ &h264->workspace_paddr, GFP_KERNEL);
+ if (!h264->workspace_vaddr)
+ return -ENOMEM;
+
+ /* Allocate some memory for the H.264 SEI dump */
+ h264->sei_vaddr = dma_alloc_coherent(core->dev, SIZE_SEI,
+ &h264->sei_paddr, GFP_KERNEL);
+ if (!h264->sei_vaddr)
+ return -ENOMEM;
+
+ amvdec_write_dos_bits(core, POWER_CTL_VLD, BIT(9) | BIT(6));
+
+ workspace_offset = h264->workspace_paddr - WORKSPACE_BUF_OFFSET;
+ amvdec_write_dos(core, AV_SCRATCH_1, workspace_offset);
+ amvdec_write_dos(core, AV_SCRATCH_G, h264->ext_fw_paddr);
+ amvdec_write_dos(core, AV_SCRATCH_I, h264->sei_paddr -
+ workspace_offset);
+
+ /* Enable "error correction" */
+ amvdec_write_dos(core, AV_SCRATCH_F,
+ (amvdec_read_dos(core, AV_SCRATCH_F) & 0xffffffc3) |
+ BIT(4) | BIT(7));
+
+ amvdec_write_dos(core, MDEC_PIC_DC_THRESH, 0x404038aa);
+
+ return 0;
+}
+
+static int codec_h264_stop(struct amvdec_session *sess)
+{
+ struct codec_h264 *h264 = sess->priv;
+ struct amvdec_core *core = sess->core;
+
+ if (h264->ext_fw_vaddr)
+ dma_free_coherent(core->dev, SIZE_EXT_FW,
+ h264->ext_fw_vaddr, h264->ext_fw_paddr);
+
+ if (h264->workspace_vaddr)
+ dma_free_coherent(core->dev, SIZE_WORKSPACE,
+ h264->workspace_vaddr, h264->workspace_paddr);
+
+ if (h264->ref_vaddr)
+ dma_free_coherent(core->dev, h264->ref_size,
+ h264->ref_vaddr, h264->ref_paddr);
+
+ if (h264->sei_vaddr)
+ dma_free_coherent(core->dev, SIZE_SEI,
+ h264->sei_vaddr, h264->sei_paddr);
+
+ return 0;
+}
+
+static int codec_h264_load_extended_firmware(struct amvdec_session *sess,
+ const u8 *data, u32 len)
+{
+ struct codec_h264 *h264;
+ struct amvdec_core *core = sess->core;
+
+ if (len < SIZE_EXT_FW)
+ return -EINVAL;
+
+ h264 = kzalloc(sizeof(*h264), GFP_KERNEL);
+ if (!h264)
+ return -ENOMEM;
+
+ h264->ext_fw_vaddr = dma_alloc_coherent(core->dev, SIZE_EXT_FW,
+ &h264->ext_fw_paddr,
+ GFP_KERNEL);
+ if (!h264->ext_fw_vaddr) {
+ kfree(h264);
+ return -ENOMEM;
+ }
+
+ memcpy(h264->ext_fw_vaddr, data, SIZE_EXT_FW);
+ sess->priv = h264;
+
+ return 0;
+}
+
+static const struct v4l2_fract par_table[] = {
+ { 1, 1 }, { 1, 1 }, { 12, 11 }, { 10, 11 },
+ { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 },
+ { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 },
+ { 64, 33 }, { 160, 99 }, { 4, 3 }, { 3, 2 },
+ { 2, 1 }
+};
+
+static void codec_h264_set_par(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 seq_info = amvdec_read_dos(core, AV_SCRATCH_2);
+ u32 ar_idc = (seq_info >> AR_IDC_BIT) & AR_IDC_MASK;
+
+ if (!(seq_info & AR_PRESENT_FLAG))
+ return;
+
+ if (ar_idc == AR_EXTEND) {
+ u32 ar_info = amvdec_read_dos(core, AV_SCRATCH_3);
+
+ sess->pixelaspect.numerator = ar_info & 0xffff;
+ sess->pixelaspect.denominator = (ar_info >> 16) & 0xffff;
+ return;
+ }
+
+ if (ar_idc >= ARRAY_SIZE(par_table))
+ return;
+
+ sess->pixelaspect = par_table[ar_idc];
+}
+
+static void codec_h264_resume(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_h264 *h264 = sess->priv;
+ u32 mb_width, mb_height, mb_total;
+
+ amvdec_set_canvases(sess,
+ (u32[]){ ANC0_CANVAS_ADDR, 0 },
+ (u32[]){ 24, 0 });
+
+ dev_dbg(core->dev, "max_refs = %u; actual_dpb_size = %u\n",
+ h264->max_refs, sess->num_dst_bufs);
+
+ /* Align to a multiple of 4 macroblocks */
+ mb_width = ALIGN(h264->mb_width, 4);
+ mb_height = ALIGN(h264->mb_height, 4);
+ mb_total = mb_width * mb_height;
+
+ h264->ref_size = mb_total * MB_MV_SIZE * h264->max_refs;
+ h264->ref_vaddr = dma_alloc_coherent(core->dev, h264->ref_size,
+ &h264->ref_paddr, GFP_KERNEL);
+ if (!h264->ref_vaddr) {
+ amvdec_abort(sess);
+ return;
+ }
+
+ /* Address to store the references' MVs */
+ amvdec_write_dos(core, AV_SCRATCH_1, h264->ref_paddr);
+ /* End of ref MV */
+ amvdec_write_dos(core, AV_SCRATCH_4, h264->ref_paddr + h264->ref_size);
+
+ amvdec_write_dos(core, AV_SCRATCH_0, (h264->max_refs << 24) |
+ (sess->num_dst_bufs << 16) |
+ ((h264->max_refs - 1) << 8));
+}
+
+/*
+ * Configure the H.264 decoder when the parser detected a parameter set change
+ */
+static void codec_h264_src_change(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_h264 *h264 = sess->priv;
+ u32 parsed_info, mb_total;
+ u32 crop_infor, crop_bottom, crop_right;
+ u32 frame_width, frame_height;
+
+ sess->keyframe_found = 1;
+
+ parsed_info = amvdec_read_dos(core, AV_SCRATCH_1);
+
+ /* Total number of 16x16 macroblocks */
+ mb_total = (parsed_info >> MB_TOTAL_BIT) & MB_TOTAL_MASK;
+ /* Number of macroblocks per line */
+ h264->mb_width = parsed_info & MB_WIDTH_MASK;
+ /* Number of macroblock lines */
+ h264->mb_height = mb_total / h264->mb_width;
+
+ h264->max_refs = ((parsed_info >> MAX_REF_BIT) & MAX_REF_MASK) + 1;
+
+ crop_infor = amvdec_read_dos(core, AV_SCRATCH_6);
+ crop_bottom = (crop_infor & 0xff);
+ crop_right = (crop_infor >> 16) & 0xff;
+
+ frame_width = h264->mb_width * 16 - crop_right;
+ frame_height = h264->mb_height * 16 - crop_bottom;
+
+ dev_dbg(core->dev, "frame: %ux%u; crop: %u %u\n",
+ frame_width, frame_height, crop_right, crop_bottom);
+
+ codec_h264_set_par(sess);
+ amvdec_src_change(sess, frame_width, frame_height, h264->max_refs + 5);
+}
+
+/*
+ * The bitstream offset is split in half in 2 different registers.
+ * Fetch its MSB here, which location depends on the frame number.
+ */
+static u32 get_offset_msb(struct amvdec_core *core, int frame_num)
+{
+ int take_msb = frame_num % 2;
+ int reg_offset = (frame_num / 2) * 4;
+ u32 offset_msb = amvdec_read_dos(core, AV_SCRATCH_A + reg_offset);
+
+ if (take_msb)
+ return offset_msb & 0xffff0000;
+
+ return (offset_msb & 0x0000ffff) << 16;
+}
+
+static void codec_h264_frames_ready(struct amvdec_session *sess, u32 status)
+{
+ struct amvdec_core *core = sess->core;
+ int error_count;
+ int num_frames;
+ int i;
+
+ error_count = amvdec_read_dos(core, AV_SCRATCH_D);
+ num_frames = (status >> 8) & 0xff;
+ if (error_count) {
+ dev_warn(core->dev,
+ "decoder error(s) happened, count %d\n", error_count);
+ amvdec_write_dos(core, AV_SCRATCH_D, 0);
+ }
+
+ for (i = 0; i < num_frames; i++) {
+ u32 frame_status = amvdec_read_dos(core, AV_SCRATCH_1 + i * 4);
+ u32 buffer_index = frame_status & BUF_IDX_MASK;
+ u32 pic_struct = (frame_status >> PIC_STRUCT_BIT) &
+ PIC_STRUCT_MASK;
+ u32 offset = (frame_status >> OFFSET_BIT) & OFFSET_MASK;
+ u32 field = V4L2_FIELD_NONE;
+
+ /*
+ * A buffer decode error means it was decoded,
+ * but part of the picture will have artifacts.
+ * Typical reason is a temporarily corrupted bitstream
+ */
+ if (frame_status & ERROR_FLAG)
+ dev_dbg(core->dev, "Buffer %d decode error\n",
+ buffer_index);
+
+ if (pic_struct == PIC_TOP_BOT)
+ field = V4L2_FIELD_INTERLACED_TB;
+ else if (pic_struct == PIC_BOT_TOP)
+ field = V4L2_FIELD_INTERLACED_BT;
+
+ offset |= get_offset_msb(core, i);
+ amvdec_dst_buf_done_idx(sess, buffer_index, offset, field);
+ }
+}
+
+static irqreturn_t codec_h264_threaded_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 status;
+ u32 size;
+ u8 cmd;
+
+ status = amvdec_read_dos(core, AV_SCRATCH_0);
+ cmd = status & CMD_MASK;
+
+ switch (cmd) {
+ case CMD_SRC_CHANGE:
+ codec_h264_src_change(sess);
+ break;
+ case CMD_FRAMES_READY:
+ codec_h264_frames_ready(sess, status);
+ break;
+ case CMD_FATAL_ERROR:
+ dev_err(core->dev, "H.264 decoder fatal error\n");
+ goto abort;
+ case CMD_BAD_WIDTH:
+ size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16;
+ dev_err(core->dev, "Unsupported video width: %u\n", size);
+ goto abort;
+ case CMD_BAD_HEIGHT:
+ size = (amvdec_read_dos(core, AV_SCRATCH_1) + 1) * 16;
+ dev_err(core->dev, "Unsupported video height: %u\n", size);
+ goto abort;
+ case 0: /* Unused but not worth printing for */
+ case 9:
+ break;
+ default:
+ dev_info(core->dev, "Unexpected H264 ISR: %08X\n", cmd);
+ break;
+ }
+
+ if (cmd && cmd != CMD_SRC_CHANGE)
+ amvdec_write_dos(core, AV_SCRATCH_0, 0);
+
+ /* Decoder has some SEI data for us ; ignore */
+ if (amvdec_read_dos(core, AV_SCRATCH_J) & SEI_DATA_READY)
+ amvdec_write_dos(core, AV_SCRATCH_J, 0);
+
+ return IRQ_HANDLED;
+abort:
+ amvdec_abort(sess);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t codec_h264_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+
+ return IRQ_WAKE_THREAD;
+}
+
+struct amvdec_codec_ops codec_h264_ops = {
+ .start = codec_h264_start,
+ .stop = codec_h264_stop,
+ .load_extended_firmware = codec_h264_load_extended_firmware,
+ .isr = codec_h264_isr,
+ .threaded_isr = codec_h264_threaded_isr,
+ .can_recycle = codec_h264_can_recycle,
+ .recycle = codec_h264_recycle,
+ .eos_sequence = codec_h264_eos_sequence,
+ .resume = codec_h264_resume,
+};
diff --git a/drivers/staging/media/meson/vdec/codec_h264.h b/drivers/staging/media/meson/vdec/codec_h264.h
new file mode 100644
index 000000000000..7cb4fb86ff36
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_h264.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CODEC_H264_H_
+#define __MESON_VDEC_CODEC_H264_H_
+
+#include "vdec.h"
+
+extern struct amvdec_codec_ops codec_h264_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.c b/drivers/staging/media/meson/vdec/codec_hevc_common.c
new file mode 100644
index 000000000000..0315cc0911cd
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_hevc_common.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "codec_hevc_common.h"
+#include "vdec_helpers.h"
+#include "hevc_regs.h"
+
+#define MMU_COMPRESS_HEADER_SIZE 0x48000
+#define MMU_MAP_SIZE 0x4800
+
+const u16 vdec_hevc_parser_cmd[] = {
+ 0x0401, 0x8401, 0x0800, 0x0402,
+ 0x9002, 0x1423, 0x8CC3, 0x1423,
+ 0x8804, 0x9825, 0x0800, 0x04FE,
+ 0x8406, 0x8411, 0x1800, 0x8408,
+ 0x8409, 0x8C2A, 0x9C2B, 0x1C00,
+ 0x840F, 0x8407, 0x8000, 0x8408,
+ 0x2000, 0xA800, 0x8410, 0x04DE,
+ 0x840C, 0x840D, 0xAC00, 0xA000,
+ 0x08C0, 0x08E0, 0xA40E, 0xFC00,
+ 0x7C00
+};
+
+/* Configure decode head read mode */
+void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ u32 body_size = amvdec_am21c_body_size(sess->width, sess->height);
+ u32 head_size = amvdec_am21c_head_size(sess->width, sess->height);
+
+ if (!codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) {
+ /* Enable 2-plane reference read mode */
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(31));
+ return;
+ }
+
+ if (codec_hevc_use_mmu(core->platform->revision,
+ sess->pixfmt_cap, is_10bit))
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(4));
+ else
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, 0);
+
+ if (core->platform->revision < VDEC_REVISION_SM1)
+ amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL2, body_size / 32);
+ amvdec_write_dos(core, HEVC_CM_BODY_LENGTH, body_size);
+ amvdec_write_dos(core, HEVC_CM_HEADER_OFFSET, body_size);
+ amvdec_write_dos(core, HEVC_CM_HEADER_LENGTH, head_size);
+}
+EXPORT_SYMBOL_GPL(codec_hevc_setup_decode_head);
+
+static void codec_hevc_setup_buffers_gxbb(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ struct v4l2_m2m_buffer *buf;
+ u32 buf_num = v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
+ dma_addr_t buf_y_paddr = 0;
+ dma_addr_t buf_uv_paddr = 0;
+ u32 idx = 0;
+ u32 val;
+ int i;
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ struct vb2_buffer *vb = &buf->vb.vb2_buf;
+
+ idx = vb->index;
+
+ if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit))
+ buf_y_paddr = comm->fbc_buffer_paddr[idx];
+ else
+ buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) {
+ val = buf_y_paddr | (idx << 8) | 1;
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
+ val);
+ } else {
+ buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1);
+ val = buf_y_paddr | ((idx * 2) << 8) | 1;
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
+ val);
+ val = buf_uv_paddr | ((idx * 2 + 1) << 8) | 1;
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
+ val);
+ }
+ }
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit))
+ val = buf_y_paddr | (idx << 8) | 1;
+ else
+ val = buf_y_paddr | ((idx * 2) << 8) | 1;
+
+ /* Fill the remaining unused slots with the last buffer's Y addr */
+ for (i = buf_num; i < MAX_REF_PIC_NUM; ++i)
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR, val);
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
+ for (i = 0; i < 32; ++i)
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
+}
+
+static void codec_hevc_setup_buffers_gxl(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ struct v4l2_m2m_buffer *buf;
+ u32 revision = core->platform->revision;
+ u32 pixfmt_cap = sess->pixfmt_cap;
+ int i;
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR,
+ BIT(2) | BIT(1));
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ struct vb2_buffer *vb = &buf->vb.vb2_buf;
+ dma_addr_t buf_y_paddr = 0;
+ dma_addr_t buf_uv_paddr = 0;
+ u32 idx = vb->index;
+
+ if (codec_hevc_use_mmu(revision, pixfmt_cap, is_10bit))
+ buf_y_paddr = comm->mmu_header_paddr[idx];
+ else if (codec_hevc_use_downsample(pixfmt_cap, is_10bit))
+ buf_y_paddr = comm->fbc_buffer_paddr[idx];
+ else
+ buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA,
+ buf_y_paddr >> 5);
+
+ if (!codec_hevc_use_fbc(pixfmt_cap, is_10bit)) {
+ buf_uv_paddr = vb2_dma_contig_plane_dma_addr(vb, 1);
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_DATA,
+ buf_uv_paddr >> 5);
+ }
+ }
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
+ for (i = 0; i < 32; ++i)
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
+}
+
+void codec_hevc_free_fbc_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
+ int i;
+
+ for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
+ if (comm->fbc_buffer_vaddr[i]) {
+ dma_free_coherent(dev, am21_size,
+ comm->fbc_buffer_vaddr[i],
+ comm->fbc_buffer_paddr[i]);
+ comm->fbc_buffer_vaddr[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(codec_hevc_free_fbc_buffers);
+
+static int codec_hevc_alloc_fbc_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ struct v4l2_m2m_buffer *buf;
+ u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ u32 idx = buf->vb.vb2_buf.index;
+ dma_addr_t paddr;
+ void *vaddr = dma_alloc_coherent(dev, am21_size, &paddr,
+ GFP_KERNEL);
+ if (!vaddr) {
+ codec_hevc_free_fbc_buffers(sess, comm);
+ return -ENOMEM;
+ }
+
+ comm->fbc_buffer_vaddr[idx] = vaddr;
+ comm->fbc_buffer_paddr[idx] = paddr;
+ }
+
+ return 0;
+}
+
+void codec_hevc_free_mmu_headers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ int i;
+
+ for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
+ if (comm->mmu_header_vaddr[i]) {
+ dma_free_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
+ comm->mmu_header_vaddr[i],
+ comm->mmu_header_paddr[i]);
+ comm->mmu_header_vaddr[i] = NULL;
+ }
+ }
+
+ if (comm->mmu_map_vaddr) {
+ dma_free_coherent(dev, MMU_MAP_SIZE,
+ comm->mmu_map_vaddr,
+ comm->mmu_map_paddr);
+ comm->mmu_map_vaddr = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(codec_hevc_free_mmu_headers);
+
+static int codec_hevc_alloc_mmu_headers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm)
+{
+ struct device *dev = sess->core->dev;
+ struct v4l2_m2m_buffer *buf;
+
+ comm->mmu_map_vaddr = dma_alloc_coherent(dev, MMU_MAP_SIZE,
+ &comm->mmu_map_paddr,
+ GFP_KERNEL);
+ if (!comm->mmu_map_vaddr)
+ return -ENOMEM;
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ u32 idx = buf->vb.vb2_buf.index;
+ dma_addr_t paddr;
+ void *vaddr = dma_alloc_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
+ &paddr, GFP_KERNEL);
+ if (!vaddr) {
+ codec_hevc_free_mmu_headers(sess, comm);
+ return -ENOMEM;
+ }
+
+ comm->mmu_header_vaddr[idx] = vaddr;
+ comm->mmu_header_paddr[idx] = paddr;
+ }
+
+ return 0;
+}
+
+int codec_hevc_setup_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit)
+{
+ struct amvdec_core *core = sess->core;
+ int ret;
+
+ if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit)) {
+ ret = codec_hevc_alloc_fbc_buffers(sess, comm);
+ if (ret)
+ return ret;
+ }
+
+ if (codec_hevc_use_mmu(core->platform->revision,
+ sess->pixfmt_cap, is_10bit)) {
+ ret = codec_hevc_alloc_mmu_headers(sess, comm);
+ if (ret) {
+ codec_hevc_free_fbc_buffers(sess, comm);
+ return ret;
+ }
+ }
+
+ if (core->platform->revision == VDEC_REVISION_GXBB)
+ codec_hevc_setup_buffers_gxbb(sess, comm, is_10bit);
+ else
+ codec_hevc_setup_buffers_gxl(sess, comm, is_10bit);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(codec_hevc_setup_buffers);
+
+void codec_hevc_fill_mmu_map(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ struct vb2_buffer *vb)
+{
+ u32 size = amvdec_am21c_size(sess->width, sess->height);
+ u32 nb_pages = size / PAGE_SIZE;
+ u32 *mmu_map = comm->mmu_map_vaddr;
+ u32 first_page;
+ u32 i;
+
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
+ first_page = comm->fbc_buffer_paddr[vb->index] >> PAGE_SHIFT;
+ else
+ first_page = vb2_dma_contig_plane_dma_addr(vb, 0) >> PAGE_SHIFT;
+
+ for (i = 0; i < nb_pages; ++i)
+ mmu_map[i] = first_page + i;
+}
+EXPORT_SYMBOL_GPL(codec_hevc_fill_mmu_map);
diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.h b/drivers/staging/media/meson/vdec/codec_hevc_common.h
new file mode 100644
index 000000000000..88e4379ba1ee
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_hevc_common.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_HEVC_COMMON_H_
+#define __MESON_VDEC_HEVC_COMMON_H_
+
+#include "vdec.h"
+
+#define PARSER_CMD_SKIP_CFG_0 0x0000090b
+#define PARSER_CMD_SKIP_CFG_1 0x1b14140f
+#define PARSER_CMD_SKIP_CFG_2 0x001b1910
+
+#define VDEC_HEVC_PARSER_CMD_LEN 37
+extern const u16 vdec_hevc_parser_cmd[VDEC_HEVC_PARSER_CMD_LEN];
+
+#define MAX_REF_PIC_NUM 24
+
+struct codec_hevc_common {
+ void *fbc_buffer_vaddr[MAX_REF_PIC_NUM];
+ dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM];
+
+ void *mmu_header_vaddr[MAX_REF_PIC_NUM];
+ dma_addr_t mmu_header_paddr[MAX_REF_PIC_NUM];
+
+ void *mmu_map_vaddr;
+ dma_addr_t mmu_map_paddr;
+};
+
+/* Returns 1 if we must use framebuffer compression */
+static inline int codec_hevc_use_fbc(u32 pixfmt, int is_10bit)
+{
+ /* TOFIX: Handle Amlogic Compressed buffer for 8bit also */
+ return is_10bit;
+}
+
+/* Returns 1 if we are decoding 10-bit but outputting 8-bit NV12 */
+static inline int codec_hevc_use_downsample(u32 pixfmt, int is_10bit)
+{
+ return is_10bit;
+}
+
+/* Returns 1 if we are decoding using the IOMMU */
+static inline int codec_hevc_use_mmu(u32 revision, u32 pixfmt, int is_10bit)
+{
+ return revision >= VDEC_REVISION_G12A &&
+ codec_hevc_use_fbc(pixfmt, is_10bit);
+}
+
+/**
+ * Configure decode head read mode
+ */
+void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit);
+
+void codec_hevc_free_fbc_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm);
+
+void codec_hevc_free_mmu_headers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm);
+
+int codec_hevc_setup_buffers(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ int is_10bit);
+
+void codec_hevc_fill_mmu_map(struct amvdec_session *sess,
+ struct codec_hevc_common *comm,
+ struct vb2_buffer *vb);
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/codec_vp9.c b/drivers/staging/media/meson/vdec/codec_vp9.c
new file mode 100644
index 000000000000..60e4fc0052b3
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_vp9.c
@@ -0,0 +1,2141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Maxime Jourdan <mjourdan@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "dos_regs.h"
+#include "hevc_regs.h"
+#include "codec_vp9.h"
+#include "vdec_helpers.h"
+#include "codec_hevc_common.h"
+
+/* HEVC reg mapping */
+#define VP9_DEC_STATUS_REG HEVC_ASSIST_SCRATCH_0
+ #define VP9_10B_DECODE_SLICE 5
+ #define VP9_HEAD_PARSER_DONE 0xf0
+#define VP9_RPM_BUFFER HEVC_ASSIST_SCRATCH_1
+#define VP9_SHORT_TERM_RPS HEVC_ASSIST_SCRATCH_2
+#define VP9_ADAPT_PROB_REG HEVC_ASSIST_SCRATCH_3
+#define VP9_MMU_MAP_BUFFER HEVC_ASSIST_SCRATCH_4
+#define VP9_PPS_BUFFER HEVC_ASSIST_SCRATCH_5
+#define VP9_SAO_UP HEVC_ASSIST_SCRATCH_6
+#define VP9_STREAM_SWAP_BUFFER HEVC_ASSIST_SCRATCH_7
+#define VP9_STREAM_SWAP_BUFFER2 HEVC_ASSIST_SCRATCH_8
+#define VP9_PROB_SWAP_BUFFER HEVC_ASSIST_SCRATCH_9
+#define VP9_COUNT_SWAP_BUFFER HEVC_ASSIST_SCRATCH_A
+#define VP9_SEG_MAP_BUFFER HEVC_ASSIST_SCRATCH_B
+#define VP9_SCALELUT HEVC_ASSIST_SCRATCH_D
+#define VP9_WAIT_FLAG HEVC_ASSIST_SCRATCH_E
+#define LMEM_DUMP_ADR HEVC_ASSIST_SCRATCH_F
+#define NAL_SEARCH_CTL HEVC_ASSIST_SCRATCH_I
+#define VP9_DECODE_MODE HEVC_ASSIST_SCRATCH_J
+ #define DECODE_MODE_SINGLE 0
+#define DECODE_STOP_POS HEVC_ASSIST_SCRATCH_K
+#define HEVC_DECODE_COUNT HEVC_ASSIST_SCRATCH_M
+#define HEVC_DECODE_SIZE HEVC_ASSIST_SCRATCH_N
+
+/* VP9 Constants */
+#define LCU_SIZE 64
+#define MAX_REF_PIC_NUM 24
+#define REFS_PER_FRAME 3
+#define REF_FRAMES 8
+#define MV_MEM_UNIT 0x240
+#define ADAPT_PROB_SIZE 0xf80
+
+enum FRAME_TYPE {
+ KEY_FRAME = 0,
+ INTER_FRAME = 1,
+ FRAME_TYPES,
+};
+
+/* VP9 Workspace layout */
+#define MPRED_MV_BUF_SIZE 0x120000
+
+#define IPP_SIZE 0x4000
+#define SAO_ABV_SIZE 0x30000
+#define SAO_VB_SIZE 0x30000
+#define SH_TM_RPS_SIZE 0x800
+#define VPS_SIZE 0x800
+#define SPS_SIZE 0x800
+#define PPS_SIZE 0x2000
+#define SAO_UP_SIZE 0x2800
+#define SWAP_BUF_SIZE 0x800
+#define SWAP_BUF2_SIZE 0x800
+#define SCALELUT_SIZE 0x8000
+#define DBLK_PARA_SIZE 0x80000
+#define DBLK_DATA_SIZE 0x80000
+#define SEG_MAP_SIZE 0xd800
+#define PROB_SIZE 0x5000
+#define COUNT_SIZE 0x3000
+#define MMU_VBH_SIZE 0x5000
+#define MPRED_ABV_SIZE 0x10000
+#define MPRED_MV_SIZE (MPRED_MV_BUF_SIZE * MAX_REF_PIC_NUM)
+#define RPM_BUF_SIZE 0x100
+#define LMEM_SIZE 0x800
+
+#define IPP_OFFSET 0x00
+#define SAO_ABV_OFFSET (IPP_OFFSET + IPP_SIZE)
+#define SAO_VB_OFFSET (SAO_ABV_OFFSET + SAO_ABV_SIZE)
+#define SH_TM_RPS_OFFSET (SAO_VB_OFFSET + SAO_VB_SIZE)
+#define VPS_OFFSET (SH_TM_RPS_OFFSET + SH_TM_RPS_SIZE)
+#define SPS_OFFSET (VPS_OFFSET + VPS_SIZE)
+#define PPS_OFFSET (SPS_OFFSET + SPS_SIZE)
+#define SAO_UP_OFFSET (PPS_OFFSET + PPS_SIZE)
+#define SWAP_BUF_OFFSET (SAO_UP_OFFSET + SAO_UP_SIZE)
+#define SWAP_BUF2_OFFSET (SWAP_BUF_OFFSET + SWAP_BUF_SIZE)
+#define SCALELUT_OFFSET (SWAP_BUF2_OFFSET + SWAP_BUF2_SIZE)
+#define DBLK_PARA_OFFSET (SCALELUT_OFFSET + SCALELUT_SIZE)
+#define DBLK_DATA_OFFSET (DBLK_PARA_OFFSET + DBLK_PARA_SIZE)
+#define SEG_MAP_OFFSET (DBLK_DATA_OFFSET + DBLK_DATA_SIZE)
+#define PROB_OFFSET (SEG_MAP_OFFSET + SEG_MAP_SIZE)
+#define COUNT_OFFSET (PROB_OFFSET + PROB_SIZE)
+#define MMU_VBH_OFFSET (COUNT_OFFSET + COUNT_SIZE)
+#define MPRED_ABV_OFFSET (MMU_VBH_OFFSET + MMU_VBH_SIZE)
+#define MPRED_MV_OFFSET (MPRED_ABV_OFFSET + MPRED_ABV_SIZE)
+#define RPM_OFFSET (MPRED_MV_OFFSET + MPRED_MV_SIZE)
+#define LMEM_OFFSET (RPM_OFFSET + RPM_BUF_SIZE)
+
+#define SIZE_WORKSPACE ALIGN(LMEM_OFFSET + LMEM_SIZE, 64 * SZ_1K)
+
+#define NONE -1
+#define INTRA_FRAME 0
+#define LAST_FRAME 1
+#define GOLDEN_FRAME 2
+#define ALTREF_FRAME 3
+#define MAX_REF_FRAMES 4
+
+/*
+ * Defines, declarations, sub-functions for vp9 de-block loop
+ filter Thr/Lvl table update
+ * - struct segmentation is for loop filter only (removed something)
+ * - function "vp9_loop_filter_init" and "vp9_loop_filter_frame_init" will
+ be instantiated in C_Entry
+ * - vp9_loop_filter_init run once before decoding start
+ * - vp9_loop_filter_frame_init run before every frame decoding start
+ * - set video format to VP9 is in vp9_loop_filter_init
+ */
+#define MAX_LOOP_FILTER 63
+#define MAX_REF_LF_DELTAS 4
+#define MAX_MODE_LF_DELTAS 2
+#define SEGMENT_DELTADATA 0
+#define SEGMENT_ABSDATA 1
+#define MAX_SEGMENTS 8
+
+/* VP9 PROB processing defines */
+#define VP9_PARTITION_START 0
+#define VP9_PARTITION_SIZE_STEP (3 * 4)
+#define VP9_PARTITION_ONE_SIZE (4 * VP9_PARTITION_SIZE_STEP)
+#define VP9_PARTITION_KEY_START 0
+#define VP9_PARTITION_P_START VP9_PARTITION_ONE_SIZE
+#define VP9_PARTITION_SIZE (2 * VP9_PARTITION_ONE_SIZE)
+#define VP9_SKIP_START (VP9_PARTITION_START + VP9_PARTITION_SIZE)
+#define VP9_SKIP_SIZE 4 /* only use 3*/
+#define VP9_TX_MODE_START (VP9_SKIP_START + VP9_SKIP_SIZE)
+#define VP9_TX_MODE_8_0_OFFSET 0
+#define VP9_TX_MODE_8_1_OFFSET 1
+#define VP9_TX_MODE_16_0_OFFSET 2
+#define VP9_TX_MODE_16_1_OFFSET 4
+#define VP9_TX_MODE_32_0_OFFSET 6
+#define VP9_TX_MODE_32_1_OFFSET 9
+#define VP9_TX_MODE_SIZE 12
+#define VP9_COEF_START (VP9_TX_MODE_START + VP9_TX_MODE_SIZE)
+#define VP9_COEF_BAND_0_OFFSET 0
+#define VP9_COEF_BAND_1_OFFSET (VP9_COEF_BAND_0_OFFSET + 3 * 3 + 1)
+#define VP9_COEF_BAND_2_OFFSET (VP9_COEF_BAND_1_OFFSET + 6 * 3)
+#define VP9_COEF_BAND_3_OFFSET (VP9_COEF_BAND_2_OFFSET + 6 * 3)
+#define VP9_COEF_BAND_4_OFFSET (VP9_COEF_BAND_3_OFFSET + 6 * 3)
+#define VP9_COEF_BAND_5_OFFSET (VP9_COEF_BAND_4_OFFSET + 6 * 3)
+#define VP9_COEF_SIZE_ONE_SET 100 /* ((3 + 5 * 6) * 3 + 1 padding)*/
+#define VP9_COEF_4X4_START (VP9_COEF_START + 0 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_8X8_START (VP9_COEF_START + 4 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_16X16_START (VP9_COEF_START + 8 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_32X32_START (VP9_COEF_START + 12 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_SIZE_PLANE (2 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_COEF_SIZE (4 * 2 * 2 * VP9_COEF_SIZE_ONE_SET)
+#define VP9_INTER_MODE_START (VP9_COEF_START + VP9_COEF_SIZE)
+#define VP9_INTER_MODE_SIZE 24 /* only use 21 (# * 7)*/
+#define VP9_INTERP_START (VP9_INTER_MODE_START + VP9_INTER_MODE_SIZE)
+#define VP9_INTERP_SIZE 8
+#define VP9_INTRA_INTER_START (VP9_INTERP_START + VP9_INTERP_SIZE)
+#define VP9_INTRA_INTER_SIZE 4
+#define VP9_INTERP_INTRA_INTER_START VP9_INTERP_START
+#define VP9_INTERP_INTRA_INTER_SIZE (VP9_INTERP_SIZE + VP9_INTRA_INTER_SIZE)
+#define VP9_COMP_INTER_START \
+ (VP9_INTERP_INTRA_INTER_START + VP9_INTERP_INTRA_INTER_SIZE)
+#define VP9_COMP_INTER_SIZE 5
+#define VP9_COMP_REF_START (VP9_COMP_INTER_START + VP9_COMP_INTER_SIZE)
+#define VP9_COMP_REF_SIZE 5
+#define VP9_SINGLE_REF_START (VP9_COMP_REF_START + VP9_COMP_REF_SIZE)
+#define VP9_SINGLE_REF_SIZE 10
+#define VP9_REF_MODE_START VP9_COMP_INTER_START
+#define VP9_REF_MODE_SIZE \
+ (VP9_COMP_INTER_SIZE + VP9_COMP_REF_SIZE + VP9_SINGLE_REF_SIZE)
+#define VP9_IF_Y_MODE_START (VP9_REF_MODE_START + VP9_REF_MODE_SIZE)
+#define VP9_IF_Y_MODE_SIZE 36
+#define VP9_IF_UV_MODE_START (VP9_IF_Y_MODE_START + VP9_IF_Y_MODE_SIZE)
+#define VP9_IF_UV_MODE_SIZE 92 /* only use 90*/
+#define VP9_MV_JOINTS_START (VP9_IF_UV_MODE_START + VP9_IF_UV_MODE_SIZE)
+#define VP9_MV_JOINTS_SIZE 3
+#define VP9_MV_SIGN_0_START (VP9_MV_JOINTS_START + VP9_MV_JOINTS_SIZE)
+#define VP9_MV_SIGN_0_SIZE 1
+#define VP9_MV_CLASSES_0_START (VP9_MV_SIGN_0_START + VP9_MV_SIGN_0_SIZE)
+#define VP9_MV_CLASSES_0_SIZE 10
+#define VP9_MV_CLASS0_0_START \
+ (VP9_MV_CLASSES_0_START + VP9_MV_CLASSES_0_SIZE)
+#define VP9_MV_CLASS0_0_SIZE 1
+#define VP9_MV_BITS_0_START (VP9_MV_CLASS0_0_START + VP9_MV_CLASS0_0_SIZE)
+#define VP9_MV_BITS_0_SIZE 10
+#define VP9_MV_SIGN_1_START (VP9_MV_BITS_0_START + VP9_MV_BITS_0_SIZE)
+#define VP9_MV_SIGN_1_SIZE 1
+#define VP9_MV_CLASSES_1_START \
+ (VP9_MV_SIGN_1_START + VP9_MV_SIGN_1_SIZE)
+#define VP9_MV_CLASSES_1_SIZE 10
+#define VP9_MV_CLASS0_1_START \
+ (VP9_MV_CLASSES_1_START + VP9_MV_CLASSES_1_SIZE)
+#define VP9_MV_CLASS0_1_SIZE 1
+#define VP9_MV_BITS_1_START \
+ (VP9_MV_CLASS0_1_START + VP9_MV_CLASS0_1_SIZE)
+#define VP9_MV_BITS_1_SIZE 10
+#define VP9_MV_CLASS0_FP_0_START \
+ (VP9_MV_BITS_1_START + VP9_MV_BITS_1_SIZE)
+#define VP9_MV_CLASS0_FP_0_SIZE 9
+#define VP9_MV_CLASS0_FP_1_START \
+ (VP9_MV_CLASS0_FP_0_START + VP9_MV_CLASS0_FP_0_SIZE)
+#define VP9_MV_CLASS0_FP_1_SIZE 9
+#define VP9_MV_CLASS0_HP_0_START \
+ (VP9_MV_CLASS0_FP_1_START + VP9_MV_CLASS0_FP_1_SIZE)
+#define VP9_MV_CLASS0_HP_0_SIZE 2
+#define VP9_MV_CLASS0_HP_1_START \
+ (VP9_MV_CLASS0_HP_0_START + VP9_MV_CLASS0_HP_0_SIZE)
+#define VP9_MV_CLASS0_HP_1_SIZE 2
+#define VP9_MV_START VP9_MV_JOINTS_START
+#define VP9_MV_SIZE 72 /*only use 69*/
+
+#define VP9_TOTAL_SIZE (VP9_MV_START + VP9_MV_SIZE)
+
+/* VP9 COUNT mem processing defines */
+#define VP9_COEF_COUNT_START 0
+#define VP9_COEF_COUNT_BAND_0_OFFSET 0
+#define VP9_COEF_COUNT_BAND_1_OFFSET \
+ (VP9_COEF_COUNT_BAND_0_OFFSET + 3 * 5)
+#define VP9_COEF_COUNT_BAND_2_OFFSET \
+ (VP9_COEF_COUNT_BAND_1_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_BAND_3_OFFSET \
+ (VP9_COEF_COUNT_BAND_2_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_BAND_4_OFFSET \
+ (VP9_COEF_COUNT_BAND_3_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_BAND_5_OFFSET \
+ (VP9_COEF_COUNT_BAND_4_OFFSET + 6 * 5)
+#define VP9_COEF_COUNT_SIZE_ONE_SET 165 /* ((3 + 5 * 6) * 5 */
+#define VP9_COEF_COUNT_4X4_START \
+ (VP9_COEF_COUNT_START + 0 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_8X8_START \
+ (VP9_COEF_COUNT_START + 4 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_16X16_START \
+ (VP9_COEF_COUNT_START + 8 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_32X32_START \
+ (VP9_COEF_COUNT_START + 12 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_SIZE_PLANE (2 * VP9_COEF_COUNT_SIZE_ONE_SET)
+#define VP9_COEF_COUNT_SIZE (4 * 2 * 2 * VP9_COEF_COUNT_SIZE_ONE_SET)
+
+#define VP9_INTRA_INTER_COUNT_START \
+ (VP9_COEF_COUNT_START + VP9_COEF_COUNT_SIZE)
+#define VP9_INTRA_INTER_COUNT_SIZE (4 * 2)
+#define VP9_COMP_INTER_COUNT_START \
+ (VP9_INTRA_INTER_COUNT_START + VP9_INTRA_INTER_COUNT_SIZE)
+#define VP9_COMP_INTER_COUNT_SIZE (5 * 2)
+#define VP9_COMP_REF_COUNT_START \
+ (VP9_COMP_INTER_COUNT_START + VP9_COMP_INTER_COUNT_SIZE)
+#define VP9_COMP_REF_COUNT_SIZE (5 * 2)
+#define VP9_SINGLE_REF_COUNT_START \
+ (VP9_COMP_REF_COUNT_START + VP9_COMP_REF_COUNT_SIZE)
+#define VP9_SINGLE_REF_COUNT_SIZE (10 * 2)
+#define VP9_TX_MODE_COUNT_START \
+ (VP9_SINGLE_REF_COUNT_START + VP9_SINGLE_REF_COUNT_SIZE)
+#define VP9_TX_MODE_COUNT_SIZE (12 * 2)
+#define VP9_SKIP_COUNT_START \
+ (VP9_TX_MODE_COUNT_START + VP9_TX_MODE_COUNT_SIZE)
+#define VP9_SKIP_COUNT_SIZE (3 * 2)
+#define VP9_MV_SIGN_0_COUNT_START \
+ (VP9_SKIP_COUNT_START + VP9_SKIP_COUNT_SIZE)
+#define VP9_MV_SIGN_0_COUNT_SIZE (1 * 2)
+#define VP9_MV_SIGN_1_COUNT_START \
+ (VP9_MV_SIGN_0_COUNT_START + VP9_MV_SIGN_0_COUNT_SIZE)
+#define VP9_MV_SIGN_1_COUNT_SIZE (1 * 2)
+#define VP9_MV_BITS_0_COUNT_START \
+ (VP9_MV_SIGN_1_COUNT_START + VP9_MV_SIGN_1_COUNT_SIZE)
+#define VP9_MV_BITS_0_COUNT_SIZE (10 * 2)
+#define VP9_MV_BITS_1_COUNT_START \
+ (VP9_MV_BITS_0_COUNT_START + VP9_MV_BITS_0_COUNT_SIZE)
+#define VP9_MV_BITS_1_COUNT_SIZE (10 * 2)
+#define VP9_MV_CLASS0_HP_0_COUNT_START \
+ (VP9_MV_BITS_1_COUNT_START + VP9_MV_BITS_1_COUNT_SIZE)
+#define VP9_MV_CLASS0_HP_0_COUNT_SIZE (2 * 2)
+#define VP9_MV_CLASS0_HP_1_COUNT_START \
+ (VP9_MV_CLASS0_HP_0_COUNT_START + VP9_MV_CLASS0_HP_0_COUNT_SIZE)
+#define VP9_MV_CLASS0_HP_1_COUNT_SIZE (2 * 2)
+
+/* Start merge_tree */
+#define VP9_INTER_MODE_COUNT_START \
+ (VP9_MV_CLASS0_HP_1_COUNT_START + VP9_MV_CLASS0_HP_1_COUNT_SIZE)
+#define VP9_INTER_MODE_COUNT_SIZE (7 * 4)
+#define VP9_IF_Y_MODE_COUNT_START \
+ (VP9_INTER_MODE_COUNT_START + VP9_INTER_MODE_COUNT_SIZE)
+#define VP9_IF_Y_MODE_COUNT_SIZE (10 * 4)
+#define VP9_IF_UV_MODE_COUNT_START \
+ (VP9_IF_Y_MODE_COUNT_START + VP9_IF_Y_MODE_COUNT_SIZE)
+#define VP9_IF_UV_MODE_COUNT_SIZE (10 * 10)
+#define VP9_PARTITION_P_COUNT_START \
+ (VP9_IF_UV_MODE_COUNT_START + VP9_IF_UV_MODE_COUNT_SIZE)
+#define VP9_PARTITION_P_COUNT_SIZE (4 * 4 * 4)
+#define VP9_INTERP_COUNT_START \
+ (VP9_PARTITION_P_COUNT_START + VP9_PARTITION_P_COUNT_SIZE)
+#define VP9_INTERP_COUNT_SIZE (4 * 3)
+#define VP9_MV_JOINTS_COUNT_START \
+ (VP9_INTERP_COUNT_START + VP9_INTERP_COUNT_SIZE)
+#define VP9_MV_JOINTS_COUNT_SIZE (1 * 4)
+#define VP9_MV_CLASSES_0_COUNT_START \
+ (VP9_MV_JOINTS_COUNT_START + VP9_MV_JOINTS_COUNT_SIZE)
+#define VP9_MV_CLASSES_0_COUNT_SIZE (1 * 11)
+#define VP9_MV_CLASS0_0_COUNT_START \
+ (VP9_MV_CLASSES_0_COUNT_START + VP9_MV_CLASSES_0_COUNT_SIZE)
+#define VP9_MV_CLASS0_0_COUNT_SIZE (1 * 2)
+#define VP9_MV_CLASSES_1_COUNT_START \
+ (VP9_MV_CLASS0_0_COUNT_START + VP9_MV_CLASS0_0_COUNT_SIZE)
+#define VP9_MV_CLASSES_1_COUNT_SIZE (1 * 11)
+#define VP9_MV_CLASS0_1_COUNT_START \
+ (VP9_MV_CLASSES_1_COUNT_START + VP9_MV_CLASSES_1_COUNT_SIZE)
+#define VP9_MV_CLASS0_1_COUNT_SIZE (1 * 2)
+#define VP9_MV_CLASS0_FP_0_COUNT_START \
+ (VP9_MV_CLASS0_1_COUNT_START + VP9_MV_CLASS0_1_COUNT_SIZE)
+#define VP9_MV_CLASS0_FP_0_COUNT_SIZE (3 * 4)
+#define VP9_MV_CLASS0_FP_1_COUNT_START \
+ (VP9_MV_CLASS0_FP_0_COUNT_START + VP9_MV_CLASS0_FP_0_COUNT_SIZE)
+#define VP9_MV_CLASS0_FP_1_COUNT_SIZE (3 * 4)
+
+#define DC_PRED 0 /* Average of above and left pixels */
+#define V_PRED 1 /* Vertical */
+#define H_PRED 2 /* Horizontal */
+#define D45_PRED 3 /* Directional 45 deg = round(arctan(1/1) * 180/pi) */
+#define D135_PRED 4 /* Directional 135 deg = 180 - 45 */
+#define D117_PRED 5 /* Directional 117 deg = 180 - 63 */
+#define D153_PRED 6 /* Directional 153 deg = 180 - 27 */
+#define D207_PRED 7 /* Directional 207 deg = 180 + 27 */
+#define D63_PRED 8 /* Directional 63 deg = round(arctan(2/1) * 180/pi) */
+#define TM_PRED 9 /* True-motion */
+
+/* Use a static inline to avoid possible side effect from num being reused */
+static inline int round_power_of_two(int value, int num)
+{
+ return (value + (1 << (num - 1))) >> num;
+}
+
+#define MODE_MV_COUNT_SAT 20
+static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = {
+ 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64,
+ 70, 76, 83, 89, 96, 102, 108, 115, 121, 128
+};
+
+union rpm_param {
+ struct {
+ u16 data[RPM_BUF_SIZE];
+ } l;
+ struct {
+ u16 profile;
+ u16 show_existing_frame;
+ u16 frame_to_show_idx;
+ u16 frame_type; /*1 bit*/
+ u16 show_frame; /*1 bit*/
+ u16 error_resilient_mode; /*1 bit*/
+ u16 intra_only; /*1 bit*/
+ u16 display_size_present; /*1 bit*/
+ u16 reset_frame_context;
+ u16 refresh_frame_flags;
+ u16 width;
+ u16 height;
+ u16 display_width;
+ u16 display_height;
+ u16 ref_info;
+ u16 same_frame_size;
+ u16 mode_ref_delta_enabled;
+ u16 ref_deltas[4];
+ u16 mode_deltas[2];
+ u16 filter_level;
+ u16 sharpness_level;
+ u16 bit_depth;
+ u16 seg_quant_info[8];
+ u16 seg_enabled;
+ u16 seg_abs_delta;
+ /* bit 15: feature enabled; bit 8, sign; bit[5:0], data */
+ u16 seg_lf_info[8];
+ } p;
+};
+
+enum SEG_LVL_FEATURES {
+ SEG_LVL_ALT_Q = 0, /* Use alternate Quantizer */
+ SEG_LVL_ALT_LF = 1, /* Use alternate loop filter value */
+ SEG_LVL_REF_FRAME = 2, /* Optional Segment reference frame */
+ SEG_LVL_SKIP = 3, /* Optional Segment (0,0) + skip mode */
+ SEG_LVL_MAX = 4 /* Number of features supported */
+};
+
+struct segmentation {
+ u8 enabled;
+ u8 update_map;
+ u8 update_data;
+ u8 abs_delta;
+ u8 temporal_update;
+ s16 feature_data[MAX_SEGMENTS][SEG_LVL_MAX];
+ unsigned int feature_mask[MAX_SEGMENTS];
+};
+
+struct loop_filter_thresh {
+ u8 mblim;
+ u8 lim;
+ u8 hev_thr;
+};
+
+struct loop_filter_info_n {
+ struct loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1];
+ u8 lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS];
+};
+
+struct loopfilter {
+ int filter_level;
+
+ int sharpness_level;
+ int last_sharpness_level;
+
+ u8 mode_ref_delta_enabled;
+ u8 mode_ref_delta_update;
+
+ /*0 = Intra, Last, GF, ARF*/
+ signed char ref_deltas[MAX_REF_LF_DELTAS];
+ signed char last_ref_deltas[MAX_REF_LF_DELTAS];
+
+ /*0 = ZERO_MV, MV*/
+ signed char mode_deltas[MAX_MODE_LF_DELTAS];
+ signed char last_mode_deltas[MAX_MODE_LF_DELTAS];
+};
+
+struct vp9_frame {
+ struct list_head list;
+ struct vb2_v4l2_buffer *vbuf;
+ int index;
+ int intra_only;
+ int show;
+ int type;
+ int done;
+ unsigned int width;
+ unsigned int height;
+};
+
+struct codec_vp9 {
+ /* VP9 context lock */
+ struct mutex lock;
+
+ /* Common part with the HEVC decoder */
+ struct codec_hevc_common common;
+
+ /* Buffer for the VP9 Workspace */
+ void *workspace_vaddr;
+ dma_addr_t workspace_paddr;
+
+ /* Contains many information parsed from the bitstream */
+ union rpm_param rpm_param;
+
+ /* Whether we detected the bitstream as 10-bit */
+ int is_10bit;
+
+ /* Coded resolution reported by the hardware */
+ u32 width, height;
+
+ /* All ref frames used by the HW at a given time */
+ struct list_head ref_frames_list;
+ u32 frames_num;
+
+ /* In case of downsampling (decoding with FBC but outputting in NV12M),
+ * we need to allocate additional buffers for FBC.
+ */
+ void *fbc_buffer_vaddr[MAX_REF_PIC_NUM];
+ dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM];
+
+ int ref_frame_map[REF_FRAMES];
+ int next_ref_frame_map[REF_FRAMES];
+ struct vp9_frame *frame_refs[REFS_PER_FRAME];
+
+ u32 lcu_total;
+
+ /* loop filter */
+ int default_filt_lvl;
+ struct loop_filter_info_n lfi;
+ struct loopfilter lf;
+ struct segmentation seg_4lf;
+
+ struct vp9_frame *cur_frame;
+ struct vp9_frame *prev_frame;
+};
+
+static int div_r32(s64 m, int n)
+{
+ s64 qu = div_s64(m, n);
+
+ return (int)qu;
+}
+
+static int clip_prob(int p)
+{
+ return clamp_val(p, 1, 255);
+}
+
+static int segfeature_active(struct segmentation *seg, int segment_id,
+ enum SEG_LVL_FEATURES feature_id)
+{
+ return seg->enabled &&
+ (seg->feature_mask[segment_id] & (1 << feature_id));
+}
+
+static int get_segdata(struct segmentation *seg, int segment_id,
+ enum SEG_LVL_FEATURES feature_id)
+{
+ return seg->feature_data[segment_id][feature_id];
+}
+
+static void vp9_update_sharpness(struct loop_filter_info_n *lfi,
+ int sharpness_lvl)
+{
+ int lvl;
+
+ /* For each possible value for the loop filter fill out limits*/
+ for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) {
+ /* Set loop filter parameters that control sharpness.*/
+ int block_inside_limit = lvl >> ((sharpness_lvl > 0) +
+ (sharpness_lvl > 4));
+
+ if (sharpness_lvl > 0) {
+ if (block_inside_limit > (9 - sharpness_lvl))
+ block_inside_limit = (9 - sharpness_lvl);
+ }
+
+ if (block_inside_limit < 1)
+ block_inside_limit = 1;
+
+ lfi->lfthr[lvl].lim = (u8)block_inside_limit;
+ lfi->lfthr[lvl].mblim = (u8)(2 * (lvl + 2) +
+ block_inside_limit);
+ }
+}
+
+/* Instantiate this function once when decode is started */
+static void
+vp9_loop_filter_init(struct amvdec_core *core, struct codec_vp9 *vp9)
+{
+ struct loop_filter_info_n *lfi = &vp9->lfi;
+ struct loopfilter *lf = &vp9->lf;
+ struct segmentation *seg_4lf = &vp9->seg_4lf;
+ int i;
+
+ memset(lfi, 0, sizeof(struct loop_filter_info_n));
+ memset(lf, 0, sizeof(struct loopfilter));
+ memset(seg_4lf, 0, sizeof(struct segmentation));
+ lf->sharpness_level = 0;
+ vp9_update_sharpness(lfi, lf->sharpness_level);
+ lf->last_sharpness_level = lf->sharpness_level;
+
+ for (i = 0; i < 32; i++) {
+ unsigned int thr;
+
+ thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2 + 1].mblim & 0xff);
+ thr = (thr << 16) | ((lfi->lfthr[i * 2].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2].mblim & 0xff);
+
+ amvdec_write_dos(core, HEVC_DBLK_CFG9, thr);
+ }
+
+ if (core->platform->revision >= VDEC_REVISION_SM1)
+ amvdec_write_dos(core, HEVC_DBLK_CFGB,
+ (0x3 << 14) | /* dw fifo thres r and b */
+ (0x3 << 12) | /* dw fifo thres r or b */
+ (0x3 << 10) | /* dw fifo thres not r/b */
+ BIT(0)); /* VP9 video format */
+ else if (core->platform->revision >= VDEC_REVISION_G12A)
+ /* VP9 video format */
+ amvdec_write_dos(core, HEVC_DBLK_CFGB, (0x54 << 8) | BIT(0));
+ else
+ amvdec_write_dos(core, HEVC_DBLK_CFGB, 0x40400001);
+}
+
+static void
+vp9_loop_filter_frame_init(struct amvdec_core *core, struct segmentation *seg,
+ struct loop_filter_info_n *lfi,
+ struct loopfilter *lf, int default_filt_lvl)
+{
+ int i;
+ int seg_id;
+
+ /*
+ * n_shift is the multiplier for lf_deltas
+ * the multiplier is:
+ * - 1 for when filter_lvl is between 0 and 31
+ * - 2 when filter_lvl is between 32 and 63
+ */
+ const int scale = 1 << (default_filt_lvl >> 5);
+
+ /* update limits if sharpness has changed */
+ if (lf->last_sharpness_level != lf->sharpness_level) {
+ vp9_update_sharpness(lfi, lf->sharpness_level);
+ lf->last_sharpness_level = lf->sharpness_level;
+
+ /* Write to register */
+ for (i = 0; i < 32; i++) {
+ unsigned int thr;
+
+ thr = ((lfi->lfthr[i * 2 + 1].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2 + 1].mblim & 0xff);
+ thr = (thr << 16) |
+ ((lfi->lfthr[i * 2].lim & 0x3f) << 8) |
+ (lfi->lfthr[i * 2].mblim & 0xff);
+
+ amvdec_write_dos(core, HEVC_DBLK_CFG9, thr);
+ }
+ }
+
+ for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {
+ int lvl_seg = default_filt_lvl;
+
+ if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
+ const int data = get_segdata(seg, seg_id,
+ SEG_LVL_ALT_LF);
+ lvl_seg = clamp_t(int,
+ seg->abs_delta == SEGMENT_ABSDATA ?
+ data : default_filt_lvl + data,
+ 0, MAX_LOOP_FILTER);
+ }
+
+ if (!lf->mode_ref_delta_enabled) {
+ /*
+ * We could get rid of this if we assume that deltas
+ * are set to zero when not in use.
+ * encoder always uses deltas
+ */
+ memset(lfi->lvl[seg_id], lvl_seg,
+ sizeof(lfi->lvl[seg_id]));
+ } else {
+ int ref, mode;
+ const int intra_lvl =
+ lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
+ lfi->lvl[seg_id][INTRA_FRAME][0] =
+ clamp_val(intra_lvl, 0, MAX_LOOP_FILTER);
+
+ for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) {
+ for (mode = 0; mode < MAX_MODE_LF_DELTAS;
+ ++mode) {
+ const int inter_lvl =
+ lvl_seg +
+ lf->ref_deltas[ref] * scale +
+ lf->mode_deltas[mode] * scale;
+ lfi->lvl[seg_id][ref][mode] =
+ clamp_val(inter_lvl, 0,
+ MAX_LOOP_FILTER);
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ unsigned int level;
+
+ level = ((lfi->lvl[i >> 1][3][i & 1] & 0x3f) << 24) |
+ ((lfi->lvl[i >> 1][2][i & 1] & 0x3f) << 16) |
+ ((lfi->lvl[i >> 1][1][i & 1] & 0x3f) << 8) |
+ (lfi->lvl[i >> 1][0][i & 1] & 0x3f);
+ if (!default_filt_lvl)
+ level = 0;
+
+ amvdec_write_dos(core, HEVC_DBLK_CFGA, level);
+ }
+}
+
+static void codec_vp9_flush_output(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ struct vp9_frame *tmp, *n;
+
+ mutex_lock(&vp9->lock);
+ list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) {
+ if (!tmp->done) {
+ if (tmp->show)
+ amvdec_dst_buf_done(sess, tmp->vbuf,
+ V4L2_FIELD_NONE);
+ else
+ v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf);
+
+ vp9->frames_num--;
+ }
+
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ mutex_unlock(&vp9->lock);
+}
+
+static u32 codec_vp9_num_pending_bufs(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+
+ if (!vp9)
+ return 0;
+
+ return vp9->frames_num;
+}
+
+static int codec_vp9_alloc_workspace(struct amvdec_core *core,
+ struct codec_vp9 *vp9)
+{
+ /* Allocate some memory for the VP9 decoder's state */
+ vp9->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
+ &vp9->workspace_paddr,
+ GFP_KERNEL);
+ if (!vp9->workspace_vaddr) {
+ dev_err(core->dev, "Failed to allocate VP9 Workspace\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void codec_vp9_setup_workspace(struct amvdec_session *sess,
+ struct codec_vp9 *vp9)
+{
+ struct amvdec_core *core = sess->core;
+ u32 revision = core->platform->revision;
+ dma_addr_t wkaddr = vp9->workspace_paddr;
+
+ amvdec_write_dos(core, HEVCD_IPP_LINEBUFF_BASE, wkaddr + IPP_OFFSET);
+ amvdec_write_dos(core, VP9_RPM_BUFFER, wkaddr + RPM_OFFSET);
+ amvdec_write_dos(core, VP9_SHORT_TERM_RPS, wkaddr + SH_TM_RPS_OFFSET);
+ amvdec_write_dos(core, VP9_PPS_BUFFER, wkaddr + PPS_OFFSET);
+ amvdec_write_dos(core, VP9_SAO_UP, wkaddr + SAO_UP_OFFSET);
+
+ amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER,
+ wkaddr + SWAP_BUF_OFFSET);
+ amvdec_write_dos(core, VP9_STREAM_SWAP_BUFFER2,
+ wkaddr + SWAP_BUF2_OFFSET);
+ amvdec_write_dos(core, VP9_SCALELUT, wkaddr + SCALELUT_OFFSET);
+
+ if (core->platform->revision >= VDEC_REVISION_G12A)
+ amvdec_write_dos(core, HEVC_DBLK_CFGE,
+ wkaddr + DBLK_PARA_OFFSET);
+
+ amvdec_write_dos(core, HEVC_DBLK_CFG4, wkaddr + DBLK_PARA_OFFSET);
+ amvdec_write_dos(core, HEVC_DBLK_CFG5, wkaddr + DBLK_DATA_OFFSET);
+ amvdec_write_dos(core, VP9_SEG_MAP_BUFFER, wkaddr + SEG_MAP_OFFSET);
+ amvdec_write_dos(core, VP9_PROB_SWAP_BUFFER, wkaddr + PROB_OFFSET);
+ amvdec_write_dos(core, VP9_COUNT_SWAP_BUFFER, wkaddr + COUNT_OFFSET);
+ amvdec_write_dos(core, LMEM_DUMP_ADR, wkaddr + LMEM_OFFSET);
+
+ if (codec_hevc_use_mmu(revision, sess->pixfmt_cap, vp9->is_10bit)) {
+ amvdec_write_dos(core, HEVC_SAO_MMU_VH0_ADDR,
+ wkaddr + MMU_VBH_OFFSET);
+ amvdec_write_dos(core, HEVC_SAO_MMU_VH1_ADDR,
+ wkaddr + MMU_VBH_OFFSET + (MMU_VBH_SIZE / 2));
+
+ if (revision >= VDEC_REVISION_G12A)
+ amvdec_write_dos(core, HEVC_ASSIST_MMU_MAP_ADDR,
+ vp9->common.mmu_map_paddr);
+ else
+ amvdec_write_dos(core, VP9_MMU_MAP_BUFFER,
+ vp9->common.mmu_map_paddr);
+ }
+}
+
+static int codec_vp9_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9;
+ u32 val;
+ int i;
+ int ret;
+
+ vp9 = kzalloc(sizeof(*vp9), GFP_KERNEL);
+ if (!vp9)
+ return -ENOMEM;
+
+ ret = codec_vp9_alloc_workspace(core, vp9);
+ if (ret)
+ goto free_vp9;
+
+ codec_vp9_setup_workspace(sess, vp9);
+ amvdec_write_dos_bits(core, HEVC_STREAM_CONTROL, BIT(0));
+ /* stream_fifo_hole */
+ if (core->platform->revision >= VDEC_REVISION_G12A)
+ amvdec_write_dos_bits(core, HEVC_STREAM_FIFO_CTL, BIT(29));
+
+ val = amvdec_read_dos(core, HEVC_PARSER_INT_CONTROL) & 0x7fffffff;
+ val |= (3 << 29) | BIT(24) | BIT(22) | BIT(7) | BIT(4) | BIT(0);
+ amvdec_write_dos(core, HEVC_PARSER_INT_CONTROL, val);
+ amvdec_write_dos_bits(core, HEVC_SHIFT_STATUS, BIT(0));
+ amvdec_write_dos(core, HEVC_SHIFT_CONTROL, BIT(10) | BIT(9) |
+ (3 << 6) | BIT(5) | BIT(2) | BIT(1) | BIT(0));
+ amvdec_write_dos(core, HEVC_CABAC_CONTROL, BIT(0));
+ amvdec_write_dos(core, HEVC_PARSER_CORE_CONTROL, BIT(0));
+ amvdec_write_dos(core, HEVC_SHIFT_STARTCODE, 0x00000001);
+
+ amvdec_write_dos(core, VP9_DEC_STATUS_REG, 0);
+
+ amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE, BIT(16));
+ for (i = 0; i < ARRAY_SIZE(vdec_hevc_parser_cmd); ++i)
+ amvdec_write_dos(core, HEVC_PARSER_CMD_WRITE,
+ vdec_hevc_parser_cmd[i]);
+
+ amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_0, PARSER_CMD_SKIP_CFG_0);
+ amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_1, PARSER_CMD_SKIP_CFG_1);
+ amvdec_write_dos(core, HEVC_PARSER_CMD_SKIP_2, PARSER_CMD_SKIP_CFG_2);
+ amvdec_write_dos(core, HEVC_PARSER_IF_CONTROL,
+ BIT(5) | BIT(2) | BIT(0));
+
+ amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(0));
+ amvdec_write_dos(core, HEVCD_IPP_TOP_CNTL, BIT(1));
+
+ amvdec_write_dos(core, VP9_WAIT_FLAG, 1);
+
+ /* clear mailbox interrupt */
+ amvdec_write_dos(core, HEVC_ASSIST_MBOX1_CLR_REG, 1);
+ /* enable mailbox interrupt */
+ amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 1);
+ /* disable PSCALE for hardware sharing */
+ amvdec_write_dos(core, HEVC_PSCALE_CTRL, 0);
+ /* Let the uCode do all the parsing */
+ amvdec_write_dos(core, NAL_SEARCH_CTL, 0x8);
+
+ amvdec_write_dos(core, DECODE_STOP_POS, 0);
+ amvdec_write_dos(core, VP9_DECODE_MODE, DECODE_MODE_SINGLE);
+
+ pr_debug("decode_count: %u; decode_size: %u\n",
+ amvdec_read_dos(core, HEVC_DECODE_COUNT),
+ amvdec_read_dos(core, HEVC_DECODE_SIZE));
+
+ vp9_loop_filter_init(core, vp9);
+
+ INIT_LIST_HEAD(&vp9->ref_frames_list);
+ mutex_init(&vp9->lock);
+ memset(&vp9->ref_frame_map, -1, sizeof(vp9->ref_frame_map));
+ memset(&vp9->next_ref_frame_map, -1, sizeof(vp9->next_ref_frame_map));
+ for (i = 0; i < REFS_PER_FRAME; ++i)
+ vp9->frame_refs[i] = NULL;
+ sess->priv = vp9;
+
+ return 0;
+
+free_vp9:
+ kfree(vp9);
+ return ret;
+}
+
+static int codec_vp9_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+
+ mutex_lock(&vp9->lock);
+ if (vp9->workspace_vaddr)
+ dma_free_coherent(core->dev, SIZE_WORKSPACE,
+ vp9->workspace_vaddr,
+ vp9->workspace_paddr);
+
+ codec_hevc_free_fbc_buffers(sess, &vp9->common);
+ mutex_unlock(&vp9->lock);
+
+ return 0;
+}
+
+static void codec_vp9_set_sao(struct amvdec_session *sess,
+ struct vb2_buffer *vb)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+
+ dma_addr_t buf_y_paddr;
+ dma_addr_t buf_u_v_paddr;
+ u32 val;
+
+ if (codec_hevc_use_downsample(sess->pixfmt_cap, vp9->is_10bit))
+ buf_y_paddr =
+ vp9->common.fbc_buffer_paddr[vb->index];
+ else
+ buf_y_paddr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) {
+ val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0200;
+ amvdec_write_dos(core, HEVC_SAO_CTRL5, val);
+ amvdec_write_dos(core, HEVC_CM_BODY_START_ADDR, buf_y_paddr);
+ }
+
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M) {
+ buf_y_paddr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf_u_v_paddr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ amvdec_write_dos(core, HEVC_SAO_Y_START_ADDR, buf_y_paddr);
+ amvdec_write_dos(core, HEVC_SAO_C_START_ADDR, buf_u_v_paddr);
+ amvdec_write_dos(core, HEVC_SAO_Y_WPTR, buf_y_paddr);
+ amvdec_write_dos(core, HEVC_SAO_C_WPTR, buf_u_v_paddr);
+ }
+
+ if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
+ vp9->is_10bit)) {
+ amvdec_write_dos(core, HEVC_CM_HEADER_START_ADDR,
+ vp9->common.mmu_header_paddr[vb->index]);
+ /* use HEVC_CM_HEADER_START_ADDR */
+ amvdec_write_dos_bits(core, HEVC_SAO_CTRL5, BIT(10));
+ }
+
+ amvdec_write_dos(core, HEVC_SAO_Y_LENGTH,
+ amvdec_get_output_size(sess));
+ amvdec_write_dos(core, HEVC_SAO_C_LENGTH,
+ (amvdec_get_output_size(sess) / 2));
+
+ if (core->platform->revision >= VDEC_REVISION_G12A) {
+ amvdec_clear_dos_bits(core, HEVC_DBLK_CFGB,
+ BIT(4) | BIT(5) | BIT(8) | BIT(9));
+ /* enable first, compressed write */
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
+ amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(8));
+
+ /* enable second, uncompressed write */
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
+ amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(9));
+
+ /* dblk pipeline mode=1 for performance */
+ if (sess->width >= 1280)
+ amvdec_write_dos_bits(core, HEVC_DBLK_CFGB, BIT(4));
+
+ pr_debug("HEVC_DBLK_CFGB: %08X\n",
+ amvdec_read_dos(core, HEVC_DBLK_CFGB));
+ }
+
+ val = amvdec_read_dos(core, HEVC_SAO_CTRL1) & ~0x3ff0;
+ val |= 0xff0; /* Set endianness for 2-bytes swaps (nv12) */
+ if (core->platform->revision < VDEC_REVISION_G12A) {
+ val &= ~0x3;
+ if (!codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
+ val |= BIT(0); /* disable cm compression */
+ /* TOFIX: Handle Amlogic Framebuffer compression */
+ }
+
+ amvdec_write_dos(core, HEVC_SAO_CTRL1, val);
+ pr_debug("HEVC_SAO_CTRL1: %08X\n", val);
+
+ /* no downscale for NV12 */
+ val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0000;
+ amvdec_write_dos(core, HEVC_SAO_CTRL5, val);
+
+ val = amvdec_read_dos(core, HEVCD_IPP_AXIIF_CONFIG) & ~0x30;
+ val |= 0xf;
+ val &= ~BIT(12); /* NV12 */
+ amvdec_write_dos(core, HEVCD_IPP_AXIIF_CONFIG, val);
+}
+
+static dma_addr_t codec_vp9_get_frame_mv_paddr(struct codec_vp9 *vp9,
+ struct vp9_frame *frame)
+{
+ return vp9->workspace_paddr + MPRED_MV_OFFSET +
+ (frame->index * MPRED_MV_BUF_SIZE);
+}
+
+static void codec_vp9_set_mpred_mv(struct amvdec_core *core,
+ struct codec_vp9 *vp9)
+{
+ int mpred_mv_rd_end_addr;
+ int use_prev_frame_mvs = vp9->prev_frame->width ==
+ vp9->cur_frame->width &&
+ vp9->prev_frame->height ==
+ vp9->cur_frame->height &&
+ !vp9->prev_frame->intra_only &&
+ vp9->prev_frame->show &&
+ vp9->prev_frame->type != KEY_FRAME;
+
+ amvdec_write_dos(core, HEVC_MPRED_CTRL3, 0x24122412);
+ amvdec_write_dos(core, HEVC_MPRED_ABV_START_ADDR,
+ vp9->workspace_paddr + MPRED_ABV_OFFSET);
+
+ amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
+ if (use_prev_frame_mvs)
+ amvdec_write_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
+
+ amvdec_write_dos(core, HEVC_MPRED_MV_WR_START_ADDR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame));
+ amvdec_write_dos(core, HEVC_MPRED_MV_WPTR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->cur_frame));
+
+ amvdec_write_dos(core, HEVC_MPRED_MV_RD_START_ADDR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame));
+ amvdec_write_dos(core, HEVC_MPRED_MV_RPTR,
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame));
+
+ mpred_mv_rd_end_addr =
+ codec_vp9_get_frame_mv_paddr(vp9, vp9->prev_frame) +
+ (vp9->lcu_total * MV_MEM_UNIT);
+ amvdec_write_dos(core, HEVC_MPRED_MV_RD_END_ADDR, mpred_mv_rd_end_addr);
+}
+
+static void codec_vp9_update_next_ref(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ u32 buf_idx = vp9->cur_frame->index;
+ int ref_index = 0;
+ int refresh_frame_flags;
+ int mask;
+
+ refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ?
+ 0xff : param->p.refresh_frame_flags;
+
+ for (mask = refresh_frame_flags; mask; mask >>= 1) {
+ pr_debug("mask=%08X; ref_index=%d\n", mask, ref_index);
+ if (mask & 1)
+ vp9->next_ref_frame_map[ref_index] = buf_idx;
+ else
+ vp9->next_ref_frame_map[ref_index] =
+ vp9->ref_frame_map[ref_index];
+
+ ++ref_index;
+ }
+
+ for (; ref_index < REF_FRAMES; ++ref_index)
+ vp9->next_ref_frame_map[ref_index] =
+ vp9->ref_frame_map[ref_index];
+}
+
+static void codec_vp9_save_refs(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int i;
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ const int ref = (param->p.ref_info >>
+ (((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7;
+
+ if (vp9->ref_frame_map[ref] < 0)
+ continue;
+
+ pr_warn("%s: FIXME, would need to save ref %d\n",
+ __func__, vp9->ref_frame_map[ref]);
+ }
+}
+
+static void codec_vp9_update_ref(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int ref_index = 0;
+ int mask;
+ int refresh_frame_flags;
+
+ if (!vp9->cur_frame)
+ return;
+
+ refresh_frame_flags = vp9->cur_frame->type == KEY_FRAME ?
+ 0xff : param->p.refresh_frame_flags;
+
+ for (mask = refresh_frame_flags; mask; mask >>= 1) {
+ vp9->ref_frame_map[ref_index] =
+ vp9->next_ref_frame_map[ref_index];
+ ++ref_index;
+ }
+
+ if (param->p.show_existing_frame)
+ return;
+
+ for (; ref_index < REF_FRAMES; ++ref_index)
+ vp9->ref_frame_map[ref_index] =
+ vp9->next_ref_frame_map[ref_index];
+}
+
+static struct vp9_frame *codec_vp9_get_frame_by_idx(struct codec_vp9 *vp9,
+ int idx)
+{
+ struct vp9_frame *frame;
+
+ list_for_each_entry(frame, &vp9->ref_frames_list, list) {
+ if (frame->index == idx)
+ return frame;
+ }
+
+ return NULL;
+}
+
+static void codec_vp9_sync_ref(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int i;
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ const int ref = (param->p.ref_info >>
+ (((REFS_PER_FRAME - i - 1) * 4) + 1)) & 0x7;
+ const int idx = vp9->ref_frame_map[ref];
+
+ vp9->frame_refs[i] = codec_vp9_get_frame_by_idx(vp9, idx);
+ if (!vp9->frame_refs[i])
+ pr_warn("%s: couldn't find VP9 ref %d\n", __func__,
+ idx);
+ }
+}
+
+static void codec_vp9_set_refs(struct amvdec_session *sess,
+ struct codec_vp9 *vp9)
+{
+ struct amvdec_core *core = sess->core;
+ int i;
+
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ struct vp9_frame *frame = vp9->frame_refs[i];
+ int id_y;
+ int id_u_v;
+
+ if (!frame)
+ continue;
+
+ if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) {
+ id_y = frame->index;
+ id_u_v = id_y;
+ } else {
+ id_y = frame->index * 2;
+ id_u_v = id_y + 1;
+ }
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_DATA_ADDR,
+ (id_u_v << 16) | (id_u_v << 8) | id_y);
+ }
+}
+
+static void codec_vp9_set_mc(struct amvdec_session *sess,
+ struct codec_vp9 *vp9)
+{
+ struct amvdec_core *core = sess->core;
+ u32 scale = 0;
+ u32 sz;
+ int i;
+
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, 1);
+ codec_vp9_set_refs(sess, vp9);
+ amvdec_write_dos(core, HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
+ (16 << 8) | 1);
+ codec_vp9_set_refs(sess, vp9);
+
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_TBL_ACCCONFIG, BIT(2));
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ if (!vp9->frame_refs[i])
+ continue;
+
+ if (vp9->frame_refs[i]->width != vp9->width ||
+ vp9->frame_refs[i]->height != vp9->height)
+ scale = 1;
+
+ sz = amvdec_am21c_body_size(vp9->frame_refs[i]->width,
+ vp9->frame_refs[i]->height);
+
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ vp9->frame_refs[i]->width);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ vp9->frame_refs[i]->height);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ (vp9->frame_refs[i]->width << 14) /
+ vp9->width);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
+ (vp9->frame_refs[i]->height << 14) /
+ vp9->height);
+ amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA, sz >> 5);
+ }
+
+ amvdec_write_dos(core, VP9D_MPP_REF_SCALE_ENBL, scale);
+}
+
+static struct vp9_frame *codec_vp9_get_new_frame(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ union rpm_param *param = &vp9->rpm_param;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vp9_frame *new_frame;
+
+ new_frame = kzalloc(sizeof(*new_frame), GFP_KERNEL);
+ if (!new_frame)
+ return NULL;
+
+ vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx);
+ if (!vbuf) {
+ dev_err(sess->core->dev, "No dst buffer available\n");
+ kfree(new_frame);
+ return NULL;
+ }
+
+ while (codec_vp9_get_frame_by_idx(vp9, vbuf->vb2_buf.index)) {
+ struct vb2_v4l2_buffer *old_vbuf = vbuf;
+
+ vbuf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx);
+ v4l2_m2m_buf_queue(sess->m2m_ctx, old_vbuf);
+ if (!vbuf) {
+ dev_err(sess->core->dev, "No dst buffer available\n");
+ kfree(new_frame);
+ return NULL;
+ }
+ }
+
+ new_frame->vbuf = vbuf;
+ new_frame->index = vbuf->vb2_buf.index;
+ new_frame->intra_only = param->p.intra_only;
+ new_frame->show = param->p.show_frame;
+ new_frame->type = param->p.frame_type;
+ new_frame->width = vp9->width;
+ new_frame->height = vp9->height;
+ list_add_tail(&new_frame->list, &vp9->ref_frames_list);
+ vp9->frames_num++;
+
+ return new_frame;
+}
+
+static void codec_vp9_show_existing_frame(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+
+ if (!param->p.show_existing_frame)
+ return;
+
+ pr_debug("showing frame %u\n", param->p.frame_to_show_idx);
+}
+
+static void codec_vp9_rm_noshow_frame(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ struct vp9_frame *tmp;
+
+ list_for_each_entry(tmp, &vp9->ref_frames_list, list) {
+ if (tmp->show)
+ continue;
+
+ pr_debug("rm noshow: %u\n", tmp->index);
+ v4l2_m2m_buf_queue(sess->m2m_ctx, tmp->vbuf);
+ list_del(&tmp->list);
+ kfree(tmp);
+ vp9->frames_num--;
+ return;
+ }
+}
+
+static void codec_vp9_process_frame(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+ union rpm_param *param = &vp9->rpm_param;
+ int intra_only;
+
+ if (!param->p.show_frame)
+ codec_vp9_rm_noshow_frame(sess);
+
+ vp9->cur_frame = codec_vp9_get_new_frame(sess);
+ if (!vp9->cur_frame)
+ return;
+
+ pr_debug("frame %d: type: %08X; show_exist: %u; show: %u, intra_only: %u\n",
+ vp9->cur_frame->index,
+ param->p.frame_type, param->p.show_existing_frame,
+ param->p.show_frame, param->p.intra_only);
+
+ if (param->p.frame_type != KEY_FRAME)
+ codec_vp9_sync_ref(vp9);
+ codec_vp9_update_next_ref(vp9);
+ codec_vp9_show_existing_frame(vp9);
+
+ if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
+ vp9->is_10bit))
+ codec_hevc_fill_mmu_map(sess, &vp9->common,
+ &vp9->cur_frame->vbuf->vb2_buf);
+
+ intra_only = param->p.show_frame ? 0 : param->p.intra_only;
+
+ /* clear mpred (for keyframe only) */
+ if (param->p.frame_type != KEY_FRAME && !intra_only) {
+ codec_vp9_set_mc(sess, vp9);
+ codec_vp9_set_mpred_mv(core, vp9);
+ } else {
+ amvdec_clear_dos_bits(core, HEVC_MPRED_CTRL4, BIT(6));
+ }
+
+ amvdec_write_dos(core, HEVC_PARSER_PICTURE_SIZE,
+ (vp9->height << 16) | vp9->width);
+ codec_vp9_set_sao(sess, &vp9->cur_frame->vbuf->vb2_buf);
+
+ vp9_loop_filter_frame_init(core, &vp9->seg_4lf,
+ &vp9->lfi, &vp9->lf,
+ vp9->default_filt_lvl);
+
+ /* ask uCode to start decoding */
+ amvdec_write_dos(core, VP9_DEC_STATUS_REG, VP9_10B_DECODE_SLICE);
+}
+
+static void codec_vp9_process_lf(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int i;
+
+ vp9->lf.mode_ref_delta_enabled = param->p.mode_ref_delta_enabled;
+ vp9->lf.sharpness_level = param->p.sharpness_level;
+ vp9->default_filt_lvl = param->p.filter_level;
+ vp9->seg_4lf.enabled = param->p.seg_enabled;
+ vp9->seg_4lf.abs_delta = param->p.seg_abs_delta;
+
+ for (i = 0; i < 4; i++)
+ vp9->lf.ref_deltas[i] = param->p.ref_deltas[i];
+
+ for (i = 0; i < 2; i++)
+ vp9->lf.mode_deltas[i] = param->p.mode_deltas[i];
+
+ for (i = 0; i < MAX_SEGMENTS; i++)
+ vp9->seg_4lf.feature_mask[i] =
+ (param->p.seg_lf_info[i] & 0x8000) ?
+ (1 << SEG_LVL_ALT_LF) : 0;
+
+ for (i = 0; i < MAX_SEGMENTS; i++)
+ vp9->seg_4lf.feature_data[i][SEG_LVL_ALT_LF] =
+ (param->p.seg_lf_info[i] & 0x100) ?
+ -(param->p.seg_lf_info[i] & 0x3f)
+ : (param->p.seg_lf_info[i] & 0x3f);
+}
+
+static void codec_vp9_resume(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+
+ mutex_lock(&vp9->lock);
+ if (codec_hevc_setup_buffers(sess, &vp9->common, vp9->is_10bit)) {
+ mutex_unlock(&vp9->lock);
+ amvdec_abort(sess);
+ return;
+ }
+
+ codec_vp9_setup_workspace(sess, vp9);
+ codec_hevc_setup_decode_head(sess, vp9->is_10bit);
+ codec_vp9_process_lf(vp9);
+ codec_vp9_process_frame(sess);
+
+ mutex_unlock(&vp9->lock);
+}
+
+/*
+ * The RPM section within the workspace contains
+ * many information regarding the parsed bitstream
+ */
+static void codec_vp9_fetch_rpm(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ u16 *rpm_vaddr = vp9->workspace_vaddr + RPM_OFFSET;
+ int i, j;
+
+ for (i = 0; i < RPM_BUF_SIZE; i += 4)
+ for (j = 0; j < 4; j++)
+ vp9->rpm_param.l.data[i + j] = rpm_vaddr[i + 3 - j];
+}
+
+static int codec_vp9_process_rpm(struct codec_vp9 *vp9)
+{
+ union rpm_param *param = &vp9->rpm_param;
+ int src_changed = 0;
+ int is_10bit = 0;
+ int pic_width_64 = ALIGN(param->p.width, 64);
+ int pic_height_32 = ALIGN(param->p.height, 32);
+ int pic_width_lcu = (pic_width_64 % LCU_SIZE) ?
+ pic_width_64 / LCU_SIZE + 1
+ : pic_width_64 / LCU_SIZE;
+ int pic_height_lcu = (pic_height_32 % LCU_SIZE) ?
+ pic_height_32 / LCU_SIZE + 1
+ : pic_height_32 / LCU_SIZE;
+ vp9->lcu_total = pic_width_lcu * pic_height_lcu;
+
+ if (param->p.bit_depth == 10)
+ is_10bit = 1;
+
+ if (vp9->width != param->p.width || vp9->height != param->p.height ||
+ vp9->is_10bit != is_10bit)
+ src_changed = 1;
+
+ vp9->width = param->p.width;
+ vp9->height = param->p.height;
+ vp9->is_10bit = is_10bit;
+
+ pr_debug("width: %u; height: %u; is_10bit: %d; src_changed: %d\n",
+ vp9->width, vp9->height, is_10bit, src_changed);
+
+ return src_changed;
+}
+
+static bool codec_vp9_is_ref(struct codec_vp9 *vp9, struct vp9_frame *frame)
+{
+ int i;
+
+ for (i = 0; i < REF_FRAMES; ++i)
+ if (vp9->ref_frame_map[i] == frame->index)
+ return true;
+
+ return false;
+}
+
+static void codec_vp9_show_frame(struct amvdec_session *sess)
+{
+ struct codec_vp9 *vp9 = sess->priv;
+ struct vp9_frame *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &vp9->ref_frames_list, list) {
+ if (!tmp->show || tmp == vp9->cur_frame)
+ continue;
+
+ if (!tmp->done) {
+ pr_debug("Doning %u\n", tmp->index);
+ amvdec_dst_buf_done(sess, tmp->vbuf, V4L2_FIELD_NONE);
+ tmp->done = 1;
+ vp9->frames_num--;
+ }
+
+ if (codec_vp9_is_ref(vp9, tmp) || tmp == vp9->prev_frame)
+ continue;
+
+ pr_debug("deleting %d\n", tmp->index);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vp9_tree_merge_probs(unsigned int *prev_prob,
+ unsigned int *cur_prob,
+ int coef_node_start, int tree_left,
+ int tree_right,
+ int tree_i, int node)
+{
+ int prob_32, prob_res, prob_shift;
+ int pre_prob, new_prob;
+ int den, m_count, get_prob, factor;
+
+ prob_32 = prev_prob[coef_node_start / 4 * 2];
+ prob_res = coef_node_start & 3;
+ prob_shift = prob_res * 8;
+ pre_prob = (prob_32 >> prob_shift) & 0xff;
+
+ den = tree_left + tree_right;
+
+ if (den == 0) {
+ new_prob = pre_prob;
+ } else {
+ m_count = den < MODE_MV_COUNT_SAT ? den : MODE_MV_COUNT_SAT;
+ get_prob =
+ clip_prob(div_r32(((int64_t)tree_left * 256 +
+ (den >> 1)),
+ den));
+
+ /* weighted_prob */
+ factor = count_to_update_factor[m_count];
+ new_prob = round_power_of_two(pre_prob * (256 - factor) +
+ get_prob * factor, 8);
+ }
+
+ cur_prob[coef_node_start / 4 * 2] =
+ (cur_prob[coef_node_start / 4 * 2] & (~(0xff << prob_shift))) |
+ (new_prob << prob_shift);
+}
+
+static void adapt_coef_probs_cxt(unsigned int *prev_prob,
+ unsigned int *cur_prob,
+ unsigned int *count,
+ int update_factor,
+ int cxt_num,
+ int coef_cxt_start,
+ int coef_count_cxt_start)
+{
+ int prob_32, prob_res, prob_shift;
+ int pre_prob, new_prob;
+ int num, den, m_count, get_prob, factor;
+ int node, coef_node_start;
+ int count_sat = 24;
+ int cxt;
+
+ for (cxt = 0; cxt < cxt_num; cxt++) {
+ const int n0 = count[coef_count_cxt_start];
+ const int n1 = count[coef_count_cxt_start + 1];
+ const int n2 = count[coef_count_cxt_start + 2];
+ const int neob = count[coef_count_cxt_start + 3];
+ const int nneob = count[coef_count_cxt_start + 4];
+ const unsigned int branch_ct[3][2] = {
+ { neob, nneob },
+ { n0, n1 + n2 },
+ { n1, n2 }
+ };
+
+ coef_node_start = coef_cxt_start;
+ for (node = 0 ; node < 3 ; node++) {
+ prob_32 = prev_prob[coef_node_start / 4 * 2];
+ prob_res = coef_node_start & 3;
+ prob_shift = prob_res * 8;
+ pre_prob = (prob_32 >> prob_shift) & 0xff;
+
+ /* get binary prob */
+ num = branch_ct[node][0];
+ den = branch_ct[node][0] + branch_ct[node][1];
+ m_count = den < count_sat ? den : count_sat;
+
+ get_prob = (den == 0) ?
+ 128u :
+ clip_prob(div_r32(((int64_t)num * 256 +
+ (den >> 1)), den));
+
+ factor = update_factor * m_count / count_sat;
+ new_prob =
+ round_power_of_two(pre_prob * (256 - factor) +
+ get_prob * factor, 8);
+
+ cur_prob[coef_node_start / 4 * 2] =
+ (cur_prob[coef_node_start / 4 * 2] &
+ (~(0xff << prob_shift))) |
+ (new_prob << prob_shift);
+
+ coef_node_start += 1;
+ }
+
+ coef_cxt_start = coef_cxt_start + 3;
+ coef_count_cxt_start = coef_count_cxt_start + 5;
+ }
+}
+
+static void adapt_coef_probs(int prev_kf, int cur_kf, int pre_fc,
+ unsigned int *prev_prob, unsigned int *cur_prob,
+ unsigned int *count)
+{
+ int tx_size, coef_tx_size_start, coef_count_tx_size_start;
+ int plane, coef_plane_start, coef_count_plane_start;
+ int type, coef_type_start, coef_count_type_start;
+ int band, coef_band_start, coef_count_band_start;
+ int cxt_num;
+ int coef_cxt_start, coef_count_cxt_start;
+ int node, coef_node_start, coef_count_node_start;
+
+ int tree_i, tree_left, tree_right;
+ int mvd_i;
+
+ int update_factor = cur_kf ? 112 : (prev_kf ? 128 : 112);
+
+ int prob_32;
+ int prob_res;
+ int prob_shift;
+ int pre_prob;
+
+ int den;
+ int get_prob;
+ int m_count;
+ int factor;
+
+ int new_prob;
+
+ for (tx_size = 0 ; tx_size < 4 ; tx_size++) {
+ coef_tx_size_start = VP9_COEF_START +
+ tx_size * 4 * VP9_COEF_SIZE_ONE_SET;
+ coef_count_tx_size_start = VP9_COEF_COUNT_START +
+ tx_size * 4 * VP9_COEF_COUNT_SIZE_ONE_SET;
+ coef_plane_start = coef_tx_size_start;
+ coef_count_plane_start = coef_count_tx_size_start;
+
+ for (plane = 0 ; plane < 2 ; plane++) {
+ coef_type_start = coef_plane_start;
+ coef_count_type_start = coef_count_plane_start;
+
+ for (type = 0 ; type < 2 ; type++) {
+ coef_band_start = coef_type_start;
+ coef_count_band_start = coef_count_type_start;
+
+ for (band = 0 ; band < 6 ; band++) {
+ if (band == 0)
+ cxt_num = 3;
+ else
+ cxt_num = 6;
+ coef_cxt_start = coef_band_start;
+ coef_count_cxt_start =
+ coef_count_band_start;
+
+ adapt_coef_probs_cxt(prev_prob,
+ cur_prob,
+ count,
+ update_factor,
+ cxt_num,
+ coef_cxt_start,
+ coef_count_cxt_start);
+
+ if (band == 0) {
+ coef_band_start += 10;
+ coef_count_band_start += 15;
+ } else {
+ coef_band_start += 18;
+ coef_count_band_start += 30;
+ }
+ }
+ coef_type_start += VP9_COEF_SIZE_ONE_SET;
+ coef_count_type_start +=
+ VP9_COEF_COUNT_SIZE_ONE_SET;
+ }
+
+ coef_plane_start += 2 * VP9_COEF_SIZE_ONE_SET;
+ coef_count_plane_start +=
+ 2 * VP9_COEF_COUNT_SIZE_ONE_SET;
+ }
+ }
+
+ if (cur_kf == 0) {
+ /* mode_mv_merge_probs - merge_intra_inter_prob */
+ for (coef_count_node_start = VP9_INTRA_INTER_COUNT_START;
+ coef_count_node_start < (VP9_MV_CLASS0_HP_1_COUNT_START +
+ VP9_MV_CLASS0_HP_1_COUNT_SIZE);
+ coef_count_node_start += 2) {
+ if (coef_count_node_start ==
+ VP9_INTRA_INTER_COUNT_START)
+ coef_node_start = VP9_INTRA_INTER_START;
+ else if (coef_count_node_start ==
+ VP9_COMP_INTER_COUNT_START)
+ coef_node_start = VP9_COMP_INTER_START;
+ else if (coef_count_node_start ==
+ VP9_TX_MODE_COUNT_START)
+ coef_node_start = VP9_TX_MODE_START;
+ else if (coef_count_node_start ==
+ VP9_SKIP_COUNT_START)
+ coef_node_start = VP9_SKIP_START;
+ else if (coef_count_node_start ==
+ VP9_MV_SIGN_0_COUNT_START)
+ coef_node_start = VP9_MV_SIGN_0_START;
+ else if (coef_count_node_start ==
+ VP9_MV_SIGN_1_COUNT_START)
+ coef_node_start = VP9_MV_SIGN_1_START;
+ else if (coef_count_node_start ==
+ VP9_MV_BITS_0_COUNT_START)
+ coef_node_start = VP9_MV_BITS_0_START;
+ else if (coef_count_node_start ==
+ VP9_MV_BITS_1_COUNT_START)
+ coef_node_start = VP9_MV_BITS_1_START;
+ else if (coef_count_node_start ==
+ VP9_MV_CLASS0_HP_0_COUNT_START)
+ coef_node_start = VP9_MV_CLASS0_HP_0_START;
+
+ den = count[coef_count_node_start] +
+ count[coef_count_node_start + 1];
+
+ prob_32 = prev_prob[coef_node_start / 4 * 2];
+ prob_res = coef_node_start & 3;
+ prob_shift = prob_res * 8;
+ pre_prob = (prob_32 >> prob_shift) & 0xff;
+
+ if (den == 0) {
+ new_prob = pre_prob;
+ } else {
+ m_count = den < MODE_MV_COUNT_SAT ?
+ den : MODE_MV_COUNT_SAT;
+ get_prob =
+ clip_prob(div_r32(((int64_t)
+ count[coef_count_node_start] * 256 +
+ (den >> 1)),
+ den));
+
+ /* weighted prob */
+ factor = count_to_update_factor[m_count];
+ new_prob =
+ round_power_of_two(pre_prob *
+ (256 - factor) +
+ get_prob * factor,
+ 8);
+ }
+
+ cur_prob[coef_node_start / 4 * 2] =
+ (cur_prob[coef_node_start / 4 * 2] &
+ (~(0xff << prob_shift))) |
+ (new_prob << prob_shift);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_node_start = VP9_INTER_MODE_START;
+ coef_count_node_start = VP9_INTER_MODE_COUNT_START;
+ for (tree_i = 0 ; tree_i < 7 ; tree_i++) {
+ for (node = 0 ; node < 3 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 2:
+ tree_left = count[start + 1];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 2];
+ tree_right = count[start + 0] +
+ count[start + 1] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_count_node_start = coef_count_node_start + 4;
+ }
+
+ coef_node_start = VP9_IF_Y_MODE_START;
+ coef_count_node_start = VP9_IF_Y_MODE_COUNT_START;
+ for (tree_i = 0 ; tree_i < 14 ; tree_i++) {
+ for (node = 0 ; node < 9 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 8:
+ tree_left =
+ count[start + D153_PRED];
+ tree_right =
+ count[start + D207_PRED];
+ break;
+ case 7:
+ tree_left =
+ count[start + D63_PRED];
+ tree_right =
+ count[start + D207_PRED] +
+ count[start + D153_PRED];
+ break;
+ case 6:
+ tree_left =
+ count[start + D45_PRED];
+ tree_right =
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ case 5:
+ tree_left =
+ count[start + D135_PRED];
+ tree_right =
+ count[start + D117_PRED];
+ break;
+ case 4:
+ tree_left =
+ count[start + H_PRED];
+ tree_right =
+ count[start + D117_PRED] +
+ count[start + D135_PRED];
+ break;
+ case 3:
+ tree_left =
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED];
+ tree_right =
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ case 2:
+ tree_left =
+ count[start + V_PRED];
+ tree_right =
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED] +
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ case 1:
+ tree_left =
+ count[start + TM_PRED];
+ tree_right =
+ count[start + V_PRED] +
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED] +
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ default:
+ tree_left =
+ count[start + DC_PRED];
+ tree_right =
+ count[start + TM_PRED] +
+ count[start + V_PRED] +
+ count[start + H_PRED] +
+ count[start + D117_PRED] +
+ count[start + D135_PRED] +
+ count[start + D45_PRED] +
+ count[start + D207_PRED] +
+ count[start + D153_PRED] +
+ count[start + D63_PRED];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start = coef_count_node_start + 10;
+ }
+
+ coef_node_start = VP9_PARTITION_P_START;
+ coef_count_node_start = VP9_PARTITION_P_COUNT_START;
+ for (tree_i = 0 ; tree_i < 16 ; tree_i++) {
+ for (node = 0 ; node < 3 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 2:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_count_node_start = coef_count_node_start + 4;
+ }
+
+ coef_node_start = VP9_INTERP_START;
+ coef_count_node_start = VP9_INTERP_COUNT_START;
+ for (tree_i = 0 ; tree_i < 4 ; tree_i++) {
+ for (node = 0 ; node < 2 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start = coef_count_node_start + 3;
+ }
+
+ coef_node_start = VP9_MV_JOINTS_START;
+ coef_count_node_start = VP9_MV_JOINTS_COUNT_START;
+ for (tree_i = 0 ; tree_i < 1 ; tree_i++) {
+ for (node = 0 ; node < 3 ; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 2:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start = coef_count_node_start + 4;
+ }
+
+ for (mvd_i = 0 ; mvd_i < 2 ; mvd_i++) {
+ coef_node_start = mvd_i ? VP9_MV_CLASSES_1_START :
+ VP9_MV_CLASSES_0_START;
+ coef_count_node_start = mvd_i ?
+ VP9_MV_CLASSES_1_COUNT_START :
+ VP9_MV_CLASSES_0_COUNT_START;
+ tree_i = 0;
+ for (node = 0; node < 10; node++) {
+ unsigned int start = coef_count_node_start;
+
+ switch (node) {
+ case 9:
+ tree_left = count[start + 9];
+ tree_right = count[start + 10];
+ break;
+ case 8:
+ tree_left = count[start + 7];
+ tree_right = count[start + 8];
+ break;
+ case 7:
+ tree_left = count[start + 7] +
+ count[start + 8];
+ tree_right = count[start + 9] +
+ count[start + 10];
+ break;
+ case 6:
+ tree_left = count[start + 6];
+ tree_right = count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ case 5:
+ tree_left = count[start + 4];
+ tree_right = count[start + 5];
+ break;
+ case 4:
+ tree_left = count[start + 4] +
+ count[start + 5];
+ tree_right = count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ case 3:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 2:
+ tree_left = count[start + 2] +
+ count[start + 3];
+ tree_right = count[start + 4] +
+ count[start + 5] +
+ count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3] +
+ count[start + 4] +
+ count[start + 5] +
+ count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3] +
+ count[start + 4] +
+ count[start + 5] +
+ count[start + 6] +
+ count[start + 7] +
+ count[start + 8] +
+ count[start + 9] +
+ count[start + 10];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+
+ coef_node_start = mvd_i ? VP9_MV_CLASS0_1_START :
+ VP9_MV_CLASS0_0_START;
+ coef_count_node_start = mvd_i ?
+ VP9_MV_CLASS0_1_COUNT_START :
+ VP9_MV_CLASS0_0_COUNT_START;
+ tree_i = 0;
+ node = 0;
+ tree_left = count[coef_count_node_start + 0];
+ tree_right = count[coef_count_node_start + 1];
+
+ vp9_tree_merge_probs(prev_prob, cur_prob,
+ coef_node_start,
+ tree_left, tree_right,
+ tree_i, node);
+ coef_node_start = mvd_i ? VP9_MV_CLASS0_FP_1_START :
+ VP9_MV_CLASS0_FP_0_START;
+ coef_count_node_start = mvd_i ?
+ VP9_MV_CLASS0_FP_1_COUNT_START :
+ VP9_MV_CLASS0_FP_0_COUNT_START;
+
+ for (tree_i = 0; tree_i < 3; tree_i++) {
+ for (node = 0; node < 3; node++) {
+ unsigned int start =
+ coef_count_node_start;
+ switch (node) {
+ case 2:
+ tree_left = count[start + 2];
+ tree_right = count[start + 3];
+ break;
+ case 1:
+ tree_left = count[start + 1];
+ tree_right = count[start + 2] +
+ count[start + 3];
+ break;
+ default:
+ tree_left = count[start + 0];
+ tree_right = count[start + 1] +
+ count[start + 2] +
+ count[start + 3];
+ break;
+ }
+
+ vp9_tree_merge_probs(prev_prob,
+ cur_prob,
+ coef_node_start,
+ tree_left,
+ tree_right,
+ tree_i, node);
+
+ coef_node_start = coef_node_start + 1;
+ }
+ coef_count_node_start =
+ coef_count_node_start + 4;
+ }
+ }
+ }
+}
+
+static irqreturn_t codec_vp9_threaded_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_vp9 *vp9 = sess->priv;
+ u32 dec_status = amvdec_read_dos(core, VP9_DEC_STATUS_REG);
+ u32 prob_status = amvdec_read_dos(core, VP9_ADAPT_PROB_REG);
+ int i;
+
+ if (!vp9)
+ return IRQ_HANDLED;
+
+ mutex_lock(&vp9->lock);
+ if (dec_status != VP9_HEAD_PARSER_DONE) {
+ dev_err(core->dev_dec, "Unrecognized dec_status: %08X\n",
+ dec_status);
+ amvdec_abort(sess);
+ goto unlock;
+ }
+
+ pr_debug("ISR: %08X;%08X\n", dec_status, prob_status);
+ sess->keyframe_found = 1;
+
+ if ((prob_status & 0xff) == 0xfd && vp9->cur_frame) {
+ /* VP9_REQ_ADAPT_PROB */
+ u8 *prev_prob_b = ((u8 *)vp9->workspace_vaddr +
+ PROB_OFFSET) +
+ ((prob_status >> 8) * 0x1000);
+ u8 *cur_prob_b = ((u8 *)vp9->workspace_vaddr +
+ PROB_OFFSET) + 0x4000;
+ u8 *count_b = (u8 *)vp9->workspace_vaddr +
+ COUNT_OFFSET;
+ int last_frame_type = vp9->prev_frame ?
+ vp9->prev_frame->type :
+ KEY_FRAME;
+
+ adapt_coef_probs(last_frame_type == KEY_FRAME,
+ vp9->cur_frame->type == KEY_FRAME ? 1 : 0,
+ prob_status >> 8,
+ (unsigned int *)prev_prob_b,
+ (unsigned int *)cur_prob_b,
+ (unsigned int *)count_b);
+
+ memcpy(prev_prob_b, cur_prob_b, ADAPT_PROB_SIZE);
+ amvdec_write_dos(core, VP9_ADAPT_PROB_REG, 0);
+ }
+
+ /* Invalidate first 3 refs */
+ for (i = 0; i < REFS_PER_FRAME ; ++i)
+ vp9->frame_refs[i] = NULL;
+
+ vp9->prev_frame = vp9->cur_frame;
+ codec_vp9_update_ref(vp9);
+
+ codec_vp9_fetch_rpm(sess);
+ if (codec_vp9_process_rpm(vp9)) {
+ amvdec_src_change(sess, vp9->width, vp9->height, 16);
+
+ /* No frame is actually processed */
+ vp9->cur_frame = NULL;
+
+ /* Show the remaining frame */
+ codec_vp9_show_frame(sess);
+
+ /* FIXME: Save refs for resized frame */
+ if (vp9->frames_num)
+ codec_vp9_save_refs(vp9);
+
+ goto unlock;
+ }
+
+ codec_vp9_process_lf(vp9);
+ codec_vp9_process_frame(sess);
+ codec_vp9_show_frame(sess);
+
+unlock:
+ mutex_unlock(&vp9->lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t codec_vp9_isr(struct amvdec_session *sess)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+struct amvdec_codec_ops codec_vp9_ops = {
+ .start = codec_vp9_start,
+ .stop = codec_vp9_stop,
+ .isr = codec_vp9_isr,
+ .threaded_isr = codec_vp9_threaded_isr,
+ .num_pending_bufs = codec_vp9_num_pending_bufs,
+ .drain = codec_vp9_flush_output,
+ .resume = codec_vp9_resume,
+};
diff --git a/drivers/staging/media/meson/vdec/codec_vp9.h b/drivers/staging/media/meson/vdec/codec_vp9.h
new file mode 100644
index 000000000000..62db65a2b939
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_vp9.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr>
+ */
+
+#ifndef __MESON_VDEC_CODEC_VP9_H_
+#define __MESON_VDEC_CODEC_VP9_H_
+
+#include "vdec.h"
+
+extern struct amvdec_codec_ops codec_vp9_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
index 95102a4bdc62..db7022707ff8 100644
--- a/drivers/staging/media/meson/vdec/esparser.c
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -52,6 +52,7 @@
#define PARSER_VIDEO_HOLE 0x90
#define SEARCH_PATTERN_LEN 512
+#define VP9_HEADER_SIZE 16
static DECLARE_WAIT_QUEUE_HEAD(wq);
static int search_done;
@@ -74,27 +75,140 @@ static irqreturn_t esparser_isr(int irq, void *dev)
return IRQ_HANDLED;
}
+/*
+ * VP9 frame headers need to be appended by a 16-byte long
+ * Amlogic custom header
+ */
+static int vp9_update_header(struct amvdec_core *core, struct vb2_buffer *buf)
+{
+ u8 *dp;
+ u8 marker;
+ int dsize;
+ int num_frames, cur_frame;
+ int cur_mag, mag, mag_ptr;
+ int frame_size[8], tot_frame_size[8];
+ int total_datasize = 0;
+ int new_frame_size;
+ unsigned char *old_header = NULL;
+
+ dp = (uint8_t *)vb2_plane_vaddr(buf, 0);
+ dsize = vb2_get_plane_payload(buf, 0);
+
+ if (dsize == vb2_plane_size(buf, 0)) {
+ dev_warn(core->dev, "%s: unable to update header\n", __func__);
+ return 0;
+ }
+
+ marker = dp[dsize - 1];
+ if ((marker & 0xe0) == 0xc0) {
+ num_frames = (marker & 0x7) + 1;
+ mag = ((marker >> 3) & 0x3) + 1;
+ mag_ptr = dsize - mag * num_frames - 2;
+ if (dp[mag_ptr] != marker)
+ return 0;
+
+ mag_ptr++;
+ for (cur_frame = 0; cur_frame < num_frames; cur_frame++) {
+ frame_size[cur_frame] = 0;
+ for (cur_mag = 0; cur_mag < mag; cur_mag++) {
+ frame_size[cur_frame] |=
+ (dp[mag_ptr] << (cur_mag * 8));
+ mag_ptr++;
+ }
+ if (cur_frame == 0)
+ tot_frame_size[cur_frame] =
+ frame_size[cur_frame];
+ else
+ tot_frame_size[cur_frame] =
+ tot_frame_size[cur_frame - 1] +
+ frame_size[cur_frame];
+ total_datasize += frame_size[cur_frame];
+ }
+ } else {
+ num_frames = 1;
+ frame_size[0] = dsize;
+ tot_frame_size[0] = dsize;
+ total_datasize = dsize;
+ }
+
+ new_frame_size = total_datasize + num_frames * VP9_HEADER_SIZE;
+
+ if (new_frame_size >= vb2_plane_size(buf, 0)) {
+ dev_warn(core->dev, "%s: unable to update header\n", __func__);
+ return 0;
+ }
+
+ for (cur_frame = num_frames - 1; cur_frame >= 0; cur_frame--) {
+ int framesize = frame_size[cur_frame];
+ int framesize_header = framesize + 4;
+ int oldframeoff = tot_frame_size[cur_frame] - framesize;
+ int outheaderoff = oldframeoff + cur_frame * VP9_HEADER_SIZE;
+ u8 *fdata = dp + outheaderoff;
+ u8 *old_framedata = dp + oldframeoff;
+
+ memmove(fdata + VP9_HEADER_SIZE, old_framedata, framesize);
+
+ fdata[0] = (framesize_header >> 24) & 0xff;
+ fdata[1] = (framesize_header >> 16) & 0xff;
+ fdata[2] = (framesize_header >> 8) & 0xff;
+ fdata[3] = (framesize_header >> 0) & 0xff;
+ fdata[4] = ((framesize_header >> 24) & 0xff) ^ 0xff;
+ fdata[5] = ((framesize_header >> 16) & 0xff) ^ 0xff;
+ fdata[6] = ((framesize_header >> 8) & 0xff) ^ 0xff;
+ fdata[7] = ((framesize_header >> 0) & 0xff) ^ 0xff;
+ fdata[8] = 0;
+ fdata[9] = 0;
+ fdata[10] = 0;
+ fdata[11] = 1;
+ fdata[12] = 'A';
+ fdata[13] = 'M';
+ fdata[14] = 'L';
+ fdata[15] = 'V';
+
+ if (!old_header) {
+ /* nothing */
+ } else if (old_header > fdata + 16 + framesize) {
+ dev_dbg(core->dev, "%s: data has gaps, setting to 0\n",
+ __func__);
+ memset(fdata + 16 + framesize, 0,
+ (old_header - fdata + 16 + framesize));
+ } else if (old_header < fdata + 16 + framesize) {
+ dev_err(core->dev, "%s: data overwritten\n", __func__);
+ }
+ old_header = fdata;
+ }
+
+ return new_frame_size;
+}
+
/* Pad the packet to at least 4KiB bytes otherwise the VDEC unit won't trigger
* ISRs.
* Also append a start code 000001ff at the end to trigger
* the ESPARSER interrupt.
*/
-static u32 esparser_pad_start_code(struct vb2_buffer *vb)
+static u32 esparser_pad_start_code(struct amvdec_core *core,
+ struct vb2_buffer *vb,
+ u32 payload_size)
{
- u32 payload_size = vb2_get_plane_payload(vb, 0);
u32 pad_size = 0;
- u8 *vaddr = vb2_plane_vaddr(vb, 0) + payload_size;
+ u8 *vaddr = vb2_plane_vaddr(vb, 0);
if (payload_size < ESPARSER_MIN_PACKET_SIZE) {
pad_size = ESPARSER_MIN_PACKET_SIZE - payload_size;
- memset(vaddr, 0, pad_size);
+ memset(vaddr + payload_size, 0, pad_size);
+ }
+
+ if ((payload_size + pad_size + SEARCH_PATTERN_LEN) >
+ vb2_plane_size(vb, 0)) {
+ dev_warn(core->dev, "%s: unable to pad start code\n", __func__);
+ return pad_size;
}
- memset(vaddr + pad_size, 0, SEARCH_PATTERN_LEN);
- vaddr[pad_size] = 0x00;
- vaddr[pad_size + 1] = 0x00;
- vaddr[pad_size + 2] = 0x01;
- vaddr[pad_size + 3] = 0xff;
+ memset(vaddr + payload_size + pad_size, 0, SEARCH_PATTERN_LEN);
+ vaddr[payload_size + pad_size] = 0x00;
+ vaddr[payload_size + pad_size + 1] = 0x00;
+ vaddr[payload_size + pad_size + 2] = 0x01;
+ vaddr[payload_size + pad_size + 3] = 0xff;
return pad_size;
}
@@ -181,30 +295,60 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
struct vb2_buffer *vb = &vbuf->vb2_buf;
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
- u32 num_dst_bufs = 0;
u32 payload_size = vb2_get_plane_payload(vb, 0);
dma_addr_t phy = vb2_dma_contig_plane_dma_addr(vb, 0);
+ u32 num_dst_bufs = 0;
u32 offset;
u32 pad_size;
- if (codec_ops->num_pending_bufs)
- num_dst_bufs = codec_ops->num_pending_bufs(sess);
-
- num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
-
- if (esparser_vififo_get_free_space(sess) < payload_size ||
- atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs)
+ /*
+ * When max ref frame is held by VP9, this should be -= 3 to prevent a
+ * shortage of CAPTURE buffers on the decoder side.
+ * For the future, a good enhancement of the way this is handled could
+ * be to notify new capture buffers to the decoding modules, so that
+ * they could pause when there is no capture buffer available and
+ * resume on this notification.
+ */
+ if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) {
+ if (codec_ops->num_pending_bufs)
+ num_dst_bufs = codec_ops->num_pending_bufs(sess);
+
+ num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
+ if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9)
+ num_dst_bufs -= 3;
+
+ if (esparser_vififo_get_free_space(sess) < payload_size ||
+ atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs)
+ return -EAGAIN;
+ } else if (esparser_vififo_get_free_space(sess) < payload_size) {
return -EAGAIN;
+ }
v4l2_m2m_src_buf_remove_by_buf(sess->m2m_ctx, vbuf);
offset = esparser_get_offset(sess);
- amvdec_add_ts_reorder(sess, vb->timestamp, offset);
- dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X\n",
- vb->timestamp, payload_size, offset);
+ amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
+ dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n",
+ vb->timestamp, payload_size, offset, vbuf->flags);
+
+ vbuf->flags = 0;
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = sess->sequence_out++;
+
+ if (sess->fmt_out->pixfmt == V4L2_PIX_FMT_VP9) {
+ payload_size = vp9_update_header(core, vb);
- pad_size = esparser_pad_start_code(vb);
+ /* If unable to alter buffer to add headers */
+ if (payload_size == 0) {
+ amvdec_remove_ts(sess, vb->timestamp);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+
+ return 0;
+ }
+ }
+
+ pad_size = esparser_pad_start_code(core, vb, payload_size);
ret = esparser_write_data(core, phy, payload_size + pad_size);
if (ret <= 0) {
@@ -216,19 +360,7 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
return 0;
}
- /* We need to wait until we parse the first keyframe.
- * All buffers prior to the first keyframe must be dropped.
- */
- if (!sess->keyframe_found)
- usleep_range(1000, 2000);
-
- if (sess->keyframe_found)
- atomic_inc(&sess->esparser_queued_bufs);
- else
- amvdec_remove_ts(sess, vb->timestamp);
-
- vbuf->flags = 0;
- vbuf->field = V4L2_FIELD_NONE;
+ atomic_inc(&sess->esparser_queued_bufs);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
return 0;
diff --git a/drivers/staging/media/meson/vdec/hevc_regs.h b/drivers/staging/media/meson/vdec/hevc_regs.h
new file mode 100644
index 000000000000..0392f41a1eed
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/hevc_regs.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __MESON_VDEC_HEVC_REGS_H_
+#define __MESON_VDEC_HEVC_REGS_H_
+
+#define HEVC_ASSIST_MMU_MAP_ADDR 0xc024
+
+#define HEVC_ASSIST_MBOX1_CLR_REG 0xc1d4
+#define HEVC_ASSIST_MBOX1_MASK 0xc1d8
+
+#define HEVC_ASSIST_SCRATCH_0 0xc300
+#define HEVC_ASSIST_SCRATCH_1 0xc304
+#define HEVC_ASSIST_SCRATCH_2 0xc308
+#define HEVC_ASSIST_SCRATCH_3 0xc30c
+#define HEVC_ASSIST_SCRATCH_4 0xc310
+#define HEVC_ASSIST_SCRATCH_5 0xc314
+#define HEVC_ASSIST_SCRATCH_6 0xc318
+#define HEVC_ASSIST_SCRATCH_7 0xc31c
+#define HEVC_ASSIST_SCRATCH_8 0xc320
+#define HEVC_ASSIST_SCRATCH_9 0xc324
+#define HEVC_ASSIST_SCRATCH_A 0xc328
+#define HEVC_ASSIST_SCRATCH_B 0xc32c
+#define HEVC_ASSIST_SCRATCH_C 0xc330
+#define HEVC_ASSIST_SCRATCH_D 0xc334
+#define HEVC_ASSIST_SCRATCH_E 0xc338
+#define HEVC_ASSIST_SCRATCH_F 0xc33c
+#define HEVC_ASSIST_SCRATCH_G 0xc340
+#define HEVC_ASSIST_SCRATCH_H 0xc344
+#define HEVC_ASSIST_SCRATCH_I 0xc348
+#define HEVC_ASSIST_SCRATCH_J 0xc34c
+#define HEVC_ASSIST_SCRATCH_K 0xc350
+#define HEVC_ASSIST_SCRATCH_L 0xc354
+#define HEVC_ASSIST_SCRATCH_M 0xc358
+#define HEVC_ASSIST_SCRATCH_N 0xc35c
+
+#define HEVC_PARSER_VERSION 0xc400
+#define HEVC_STREAM_CONTROL 0xc404
+#define HEVC_STREAM_START_ADDR 0xc408
+#define HEVC_STREAM_END_ADDR 0xc40c
+#define HEVC_STREAM_WR_PTR 0xc410
+#define HEVC_STREAM_RD_PTR 0xc414
+#define HEVC_STREAM_LEVEL 0xc418
+#define HEVC_STREAM_FIFO_CTL 0xc41c
+#define HEVC_SHIFT_CONTROL 0xc420
+#define HEVC_SHIFT_STARTCODE 0xc424
+#define HEVC_SHIFT_EMULATECODE 0xc428
+#define HEVC_SHIFT_STATUS 0xc42c
+#define HEVC_SHIFTED_DATA 0xc430
+#define HEVC_SHIFT_BYTE_COUNT 0xc434
+#define HEVC_SHIFT_COMMAND 0xc438
+#define HEVC_ELEMENT_RESULT 0xc43c
+#define HEVC_CABAC_CONTROL 0xc440
+#define HEVC_PARSER_SLICE_INFO 0xc444
+#define HEVC_PARSER_CMD_WRITE 0xc448
+#define HEVC_PARSER_CORE_CONTROL 0xc44c
+#define HEVC_PARSER_CMD_FETCH 0xc450
+#define HEVC_PARSER_CMD_STATUS 0xc454
+#define HEVC_PARSER_LCU_INFO 0xc458
+#define HEVC_PARSER_HEADER_INFO 0xc45c
+#define HEVC_PARSER_INT_CONTROL 0xc480
+#define HEVC_PARSER_INT_STATUS 0xc484
+#define HEVC_PARSER_IF_CONTROL 0xc488
+#define HEVC_PARSER_PICTURE_SIZE 0xc48c
+#define HEVC_PARSER_LCU_START 0xc490
+#define HEVC_PARSER_HEADER_INFO2 0xc494
+#define HEVC_PARSER_QUANT_READ 0xc498
+#define HEVC_PARSER_RESERVED_27 0xc49c
+#define HEVC_PARSER_CMD_SKIP_0 0xc4a0
+#define HEVC_PARSER_CMD_SKIP_1 0xc4a4
+#define HEVC_PARSER_CMD_SKIP_2 0xc4a8
+#define HEVC_SAO_IF_STATUS 0xc4c0
+#define HEVC_SAO_IF_DATA_Y 0xc4c4
+#define HEVC_SAO_IF_DATA_U 0xc4c8
+#define HEVC_SAO_IF_DATA_V 0xc4cc
+#define HEVC_STREAM_SWAP_ADDR 0xc4d0
+#define HEVC_STREAM_SWAP_CTRL 0xc4d4
+#define HEVC_IQIT_IF_WAIT_CNT 0xc4d8
+#define HEVC_MPRED_IF_WAIT_CNT 0xc4dc
+#define HEVC_SAO_IF_WAIT_CNT 0xc4e0
+
+#define HEVC_MPRED_VERSION 0xc800
+#define HEVC_MPRED_CTRL0 0xc804
+ #define MPRED_CTRL0_NEW_PIC BIT(2)
+ #define MPRED_CTRL0_NEW_TILE BIT(3)
+ #define MPRED_CTRL0_NEW_SLI_SEG BIT(4)
+ #define MPRED_CTRL0_TMVP BIT(5)
+ #define MPRED_CTRL0_LDC BIT(6)
+ #define MPRED_CTRL0_COL_FROM_L0 BIT(7)
+ #define MPRED_CTRL0_ABOVE_EN BIT(9)
+ #define MPRED_CTRL0_MV_WR_EN BIT(10)
+ #define MPRED_CTRL0_MV_RD_EN BIT(11)
+ #define MPRED_CTRL0_BUF_LINEAR BIT(13)
+#define HEVC_MPRED_CTRL1 0xc808
+#define HEVC_MPRED_INT_EN 0xc80c
+#define HEVC_MPRED_INT_STATUS 0xc810
+#define HEVC_MPRED_PIC_SIZE 0xc814
+#define HEVC_MPRED_PIC_SIZE_LCU 0xc818
+#define HEVC_MPRED_TILE_START 0xc81c
+#define HEVC_MPRED_TILE_SIZE_LCU 0xc820
+#define HEVC_MPRED_REF_NUM 0xc824
+#define HEVC_MPRED_REF_EN_L0 0xc830
+#define HEVC_MPRED_REF_EN_L1 0xc834
+#define HEVC_MPRED_COLREF_EN_L0 0xc838
+#define HEVC_MPRED_COLREF_EN_L1 0xc83c
+#define HEVC_MPRED_AXI_WCTRL 0xc840
+#define HEVC_MPRED_AXI_RCTRL 0xc844
+#define HEVC_MPRED_ABV_START_ADDR 0xc848
+#define HEVC_MPRED_MV_WR_START_ADDR 0xc84c
+#define HEVC_MPRED_MV_RD_START_ADDR 0xc850
+#define HEVC_MPRED_MV_WPTR 0xc854
+#define HEVC_MPRED_MV_RPTR 0xc858
+#define HEVC_MPRED_MV_WR_ROW_JUMP 0xc85c
+#define HEVC_MPRED_MV_RD_ROW_JUMP 0xc860
+#define HEVC_MPRED_CURR_LCU 0xc864
+#define HEVC_MPRED_ABV_WPTR 0xc868
+#define HEVC_MPRED_ABV_RPTR 0xc86c
+#define HEVC_MPRED_CTRL2 0xc870
+#define HEVC_MPRED_CTRL3 0xc874
+#define HEVC_MPRED_L0_REF00_POC 0xc880
+#define HEVC_MPRED_L1_REF00_POC 0xc8c0
+
+#define HEVC_MPRED_CTRL4 0xc930
+
+#define HEVC_MPRED_CUR_POC 0xc980
+#define HEVC_MPRED_COL_POC 0xc984
+#define HEVC_MPRED_MV_RD_END_ADDR 0xc988
+
+#define HEVC_MSP 0xcc00
+#define HEVC_MPSR 0xcc04
+#define HEVC_MCPU_INTR_MSK 0xcc10
+#define HEVC_MCPU_INTR_REQ 0xcc14
+#define HEVC_CPSR 0xcc84
+
+#define HEVC_IMEM_DMA_CTRL 0xcd00
+#define HEVC_IMEM_DMA_ADR 0xcd04
+#define HEVC_IMEM_DMA_COUNT 0xcd08
+
+#define HEVCD_IPP_TOP_CNTL 0xd000
+#define HEVCD_IPP_LINEBUFF_BASE 0xd024
+#define HEVCD_IPP_AXIIF_CONFIG 0xd02c
+
+#define VP9D_MPP_REF_SCALE_ENBL 0xd104
+#define VP9D_MPP_REFINFO_TBL_ACCCONFIG 0xd108
+#define VP9D_MPP_REFINFO_DATA 0xd10c
+
+#define HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR 0xd180
+#define HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR 0xd184
+#define HEVCD_MPP_ANC2AXI_TBL_DATA 0xd190
+
+#define HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR 0xd300
+#define HEVCD_MPP_ANC_CANVAS_DATA_ADDR 0xd304
+#define HEVCD_MPP_DECOMP_CTL1 0xd308
+#define HEVCD_MPP_DECOMP_CTL2 0xd30c
+#define HEVCD_MCRCC_CTL1 0xd3c0
+#define HEVCD_MCRCC_CTL2 0xd3c4
+#define HEVCD_MCRCC_CTL3 0xd3c8
+
+#define HEVC_DBLK_CFG0 0xd400
+#define HEVC_DBLK_CFG1 0xd404
+#define HEVC_DBLK_CFG2 0xd408
+#define HEVC_DBLK_CFG3 0xd40c
+#define HEVC_DBLK_CFG4 0xd410
+#define HEVC_DBLK_CFG5 0xd414
+#define HEVC_DBLK_CFG6 0xd418
+#define HEVC_DBLK_CFG7 0xd41c
+#define HEVC_DBLK_CFG8 0xd420
+#define HEVC_DBLK_CFG9 0xd424
+#define HEVC_DBLK_CFGA 0xd428
+#define HEVC_DBLK_STS0 0xd42c
+#define HEVC_DBLK_CFGB 0xd42c
+#define HEVC_DBLK_STS1 0xd430
+#define HEVC_DBLK_CFGE 0xd438
+
+#define HEVC_SAO_VERSION 0xd800
+#define HEVC_SAO_CTRL0 0xd804
+#define HEVC_SAO_CTRL1 0xd808
+#define HEVC_SAO_PIC_SIZE 0xd814
+#define HEVC_SAO_PIC_SIZE_LCU 0xd818
+#define HEVC_SAO_TILE_START 0xd81c
+#define HEVC_SAO_TILE_SIZE_LCU 0xd820
+#define HEVC_SAO_Y_START_ADDR 0xd82c
+#define HEVC_SAO_Y_LENGTH 0xd830
+#define HEVC_SAO_C_START_ADDR 0xd834
+#define HEVC_SAO_C_LENGTH 0xd838
+#define HEVC_SAO_Y_WPTR 0xd83c
+#define HEVC_SAO_C_WPTR 0xd840
+#define HEVC_SAO_ABV_START_ADDR 0xd844
+#define HEVC_SAO_VB_WR_START_ADDR 0xd848
+#define HEVC_SAO_VB_RD_START_ADDR 0xd84c
+#define HEVC_SAO_ABV_WPTR 0xd850
+#define HEVC_SAO_ABV_RPTR 0xd854
+#define HEVC_SAO_VB_WPTR 0xd858
+#define HEVC_SAO_VB_RPTR 0xd85c
+#define HEVC_SAO_CTRL2 0xd880
+#define HEVC_SAO_CTRL3 0xd884
+#define HEVC_SAO_CTRL4 0xd888
+#define HEVC_SAO_CTRL5 0xd88c
+#define HEVC_SAO_CTRL6 0xd890
+#define HEVC_SAO_CTRL7 0xd894
+#define HEVC_CM_BODY_START_ADDR 0xd898
+#define HEVC_CM_BODY_LENGTH 0xd89c
+#define HEVC_CM_HEADER_START_ADDR 0xd8a0
+#define HEVC_CM_HEADER_LENGTH 0xd8a4
+#define HEVC_CM_HEADER_OFFSET 0xd8ac
+#define HEVC_SAO_MMU_VH0_ADDR 0xd8e8
+#define HEVC_SAO_MMU_VH1_ADDR 0xd8ec
+
+#define HEVC_IQIT_CLK_RST_CTRL 0xdc00
+#define HEVC_IQIT_SCALELUT_WR_ADDR 0xdc08
+#define HEVC_IQIT_SCALELUT_RD_ADDR 0xdc0c
+#define HEVC_IQIT_SCALELUT_DATA 0xdc10
+
+#define HEVC_PSCALE_CTRL 0xe444
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 5c5dabed2f09..3040136ceb77 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -168,7 +168,10 @@ static void process_num_buffers(struct vb2_queue *q,
{
const struct amvdec_format *fmt_out = sess->fmt_out;
unsigned int buffers_total = q->num_buffers + *num_buffers;
+ u32 min_buf_capture = v4l2_ctrl_g_ctrl(sess->ctrl_min_buf_capture);
+ if (q->num_buffers + *num_buffers < min_buf_capture)
+ *num_buffers = min_buf_capture - q->num_buffers;
if (is_reqbufs && buffers_total < fmt_out->min_buffers)
*num_buffers = fmt_out->min_buffers - q->num_buffers;
if (buffers_total > fmt_out->max_buffers)
@@ -193,7 +196,8 @@ static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
if (*num_planes) {
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (*num_planes != 1 || sizes[0] < output_size)
+ if (*num_planes != 1 ||
+ sizes[0] < sess->src_buffer_size)
return -EINVAL;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
@@ -224,7 +228,7 @@ static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- sizes[0] = amvdec_get_output_size(sess);
+ sizes[0] = sess->src_buffer_size;
*num_planes = 1;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
@@ -250,6 +254,7 @@ static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
return -EINVAL;
}
+ sess->changed_format = 1;
return 0;
}
@@ -261,10 +266,11 @@ static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
- if (!sess->streamon_out || !sess->streamon_cap)
+ if (!sess->streamon_out)
return;
- if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ if (sess->streamon_cap &&
+ vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
vdec_codec_needs_recycle(sess))
vdec_queue_recycle(sess, vb);
@@ -289,16 +295,22 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
else
sess->streamon_cap = 1;
- if (!sess->streamon_out || !sess->streamon_cap)
+ if (!sess->streamon_out)
return 0;
if (sess->status == STATUS_NEEDS_RESUME &&
- q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ sess->changed_format) {
codec_ops->resume(sess);
sess->status = STATUS_RUNNING;
return 0;
}
+ if (sess->status == STATUS_RUNNING ||
+ sess->status == STATUS_NEEDS_RESUME ||
+ sess->status == STATUS_INIT)
+ return 0;
+
sess->vififo_size = SIZE_VIFIFO;
sess->vififo_vaddr =
dma_alloc_coherent(sess->core->dev, sess->vififo_size,
@@ -323,13 +335,14 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
goto vififo_free;
sess->sequence_cap = 0;
+ sess->sequence_out = 0;
if (vdec_codec_needs_recycle(sess))
sess->recycle_thread = kthread_run(vdec_recycle_thread, sess,
"vdec_recycle");
- sess->status = STATUS_RUNNING;
+ sess->status = STATUS_INIT;
core->cur_sess = sess;
-
+ schedule_work(&sess->esparser_queue_work);
return 0;
vififo_free:
@@ -382,10 +395,12 @@ static void vdec_reset_bufs_recycle(struct amvdec_session *sess)
static void vdec_stop_streaming(struct vb2_queue *q)
{
struct amvdec_session *sess = vb2_get_drv_priv(q);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
struct amvdec_core *core = sess->core;
struct vb2_v4l2_buffer *buf;
if (sess->status == STATUS_RUNNING ||
+ sess->status == STATUS_INIT ||
(sess->status == STATUS_NEEDS_RESUME &&
(!sess->streamon_out || !sess->streamon_cap))) {
if (vdec_codec_needs_recycle(sess))
@@ -409,6 +424,10 @@ static void vdec_stop_streaming(struct vb2_queue *q)
sess->streamon_out = 0;
} else {
+ /* Drain remaining refs if was still running */
+ if (sess->status >= STATUS_RUNNING && codec_ops->drain)
+ codec_ops->drain(sess);
+
while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
@@ -476,20 +495,34 @@ vdec_try_fmt_common(struct amvdec_session *sess, u32 size,
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct amvdec_format *fmts = sess->core->platform->formats;
- const struct amvdec_format *fmt_out;
+ const struct amvdec_format *fmt_out = NULL;
+ u32 output_size = 0;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
fmt_out = find_format(fmts, size, pixmp->pixelformat);
if (!fmt_out) {
pixmp->pixelformat = V4L2_PIX_FMT_MPEG2;
fmt_out = find_format(fmts, size, pixmp->pixelformat);
}
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ fmt_out = sess->fmt_out;
+ break;
+ default:
+ return NULL;
+ }
+
+ pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width);
+ pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height);
+ output_size = get_output_size(pixmp->width, pixmp->height);
- pfmt[0].sizeimage =
- get_output_size(pixmp->width, pixmp->height);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!pfmt[0].sizeimage)
+ pfmt[0].sizeimage = sess->src_buffer_size;
pfmt[0].bytesperline = 0;
pixmp->num_planes = 1;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -499,35 +532,25 @@ vdec_try_fmt_common(struct amvdec_session *sess, u32 size,
memset(pfmt[1].reserved, 0, sizeof(pfmt[1].reserved));
if (pixmp->pixelformat == V4L2_PIX_FMT_NV12M) {
- pfmt[0].sizeimage =
- get_output_size(pixmp->width, pixmp->height);
- pfmt[0].bytesperline = ALIGN(pixmp->width, 64);
+ pfmt[0].sizeimage = output_size;
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 32);
- pfmt[1].sizeimage =
- get_output_size(pixmp->width, pixmp->height) / 2;
- pfmt[1].bytesperline = ALIGN(pixmp->width, 64);
+ pfmt[1].sizeimage = output_size / 2;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 32);
pixmp->num_planes = 2;
} else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420M) {
- pfmt[0].sizeimage =
- get_output_size(pixmp->width, pixmp->height);
- pfmt[0].bytesperline = ALIGN(pixmp->width, 64);
+ pfmt[0].sizeimage = output_size;
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 32);
- pfmt[1].sizeimage =
- get_output_size(pixmp->width, pixmp->height) / 4;
- pfmt[1].bytesperline = ALIGN(pixmp->width, 64) / 2;
+ pfmt[1].sizeimage = output_size / 4;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 32) / 2;
- pfmt[2].sizeimage =
- get_output_size(pixmp->width, pixmp->height) / 4;
- pfmt[2].bytesperline = ALIGN(pixmp->width, 64) / 2;
+ pfmt[2].sizeimage = output_size / 2;
+ pfmt[2].bytesperline = ALIGN(pixmp->width, 32) / 2;
pixmp->num_planes = 3;
}
- } else {
- return NULL;
}
- pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width);
- pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height);
-
if (pixmp->field == V4L2_FIELD_ANY)
pixmp->field = V4L2_FIELD_NONE;
@@ -586,6 +609,8 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
orig_pixmp = *pixmp;
fmt_out = vdec_try_fmt_common(sess, num_formats, f);
+ if (!fmt_out)
+ return -EINVAL;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pixfmt_out = pixmp->pixelformat;
@@ -610,6 +635,7 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
sess->ycbcr_enc = pixmp->ycbcr_enc;
sess->quantization = pixmp->quantization;
sess->xfer_func = pixmp->xfer_func;
+ sess->src_buffer_size = pixmp->plane_fmt[0].sizeimage;
}
memset(&format, 0, sizeof(format));
@@ -701,29 +727,31 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
if (!(sess->streamon_out & sess->streamon_cap))
return 0;
- /* Currently not handled since we do not support dynamic resolution
- * for MPEG2. We consider both queues streaming to mean that the
- * decoding session is started
- */
- if (cmd->cmd == V4L2_DEC_CMD_START)
+ if (cmd->cmd == V4L2_DEC_CMD_START) {
+ v4l2_m2m_clear_state(sess->m2m_ctx);
+ sess->should_stop = 0;
return 0;
+ }
/* Should not happen */
if (cmd->cmd != V4L2_DEC_CMD_STOP)
return -EINVAL;
dev_dbg(dev, "Received V4L2_DEC_CMD_STOP\n");
+
sess->should_stop = 1;
- vdec_wait_inactive(sess);
+ v4l2_m2m_mark_stopped(sess->m2m_ctx);
if (codec_ops->drain) {
+ vdec_wait_inactive(sess);
codec_ops->drain(sess);
} else if (codec_ops->eos_sequence) {
u32 len;
const u8 *data = codec_ops->eos_sequence(&len);
esparser_queue_eos(sess->core, data, len);
+ vdec_wait_inactive(sess);
}
return ret;
@@ -883,6 +911,7 @@ static int vdec_open(struct file *file)
sess->height = 720;
sess->pixelaspect.numerator = 1;
sess->pixelaspect.denominator = 1;
+ sess->src_buffer_size = SZ_1M;
INIT_LIST_HEAD(&sess->timestamps);
INIT_LIST_HEAD(&sess->bufs_recycle);
@@ -1076,7 +1105,7 @@ static int vdec_probe(struct platform_device *pdev)
video_set_drvdata(vdev, core);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(dev, "Failed registering video device\n");
goto err_vdev_release;
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index 0faa1ec4858e..f95445ac0658 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -29,13 +29,19 @@ struct amvdec_buffer {
* struct amvdec_timestamp - stores a src timestamp along with a VIFIFO offset
*
* @list: used to make lists out of this struct
- * @ts: timestamp
+ * @tc: timecode from the v4l2 buffer
+ * @ts: timestamp from the VB2 buffer
* @offset: offset in the VIFIFO where the associated packet was written
+ * @flags: flags from the v4l2 buffer
+ * @used_count: times this timestamp was checked for a match with a dst buffer
*/
struct amvdec_timestamp {
struct list_head list;
+ struct v4l2_timecode tc;
u64 ts;
u32 offset;
+ u32 flags;
+ u32 used_count;
};
struct amvdec_session;
@@ -165,6 +171,7 @@ struct amvdec_format {
enum amvdec_status {
STATUS_STOPPED,
+ STATUS_INIT,
STATUS_RUNNING,
STATUS_NEEDS_RESUME,
};
@@ -180,6 +187,7 @@ enum amvdec_status {
* @ctrl_min_buf_capture: V4L2 control V4L2_CID_MIN_BUFFERS_FOR_CAPTURE
* @fmt_out: vdec pixel format for the OUTPUT queue
* @pixfmt_cap: V4L2 pixel format for the CAPTURE queue
+ * @src_buffer_size: size in bytes of the OUTPUT buffers' only plane
* @width: current picture width
* @height: current picture height
* @colorspace: current colorspace
@@ -221,6 +229,7 @@ struct amvdec_session {
const struct amvdec_format *fmt_out;
u32 pixfmt_cap;
+ u32 src_buffer_size;
u32 width;
u32 height;
@@ -235,10 +244,11 @@ struct amvdec_session {
struct work_struct esparser_queue_work;
unsigned int streamon_cap, streamon_out;
- unsigned int sequence_cap;
+ unsigned int sequence_cap, sequence_out;
unsigned int should_stop;
unsigned int keyframe_found;
unsigned int num_dst_bufs;
+ unsigned int changed_format;
u8 canvas_alloc[MAX_CANVAS];
u32 canvas_num;
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
index f16948bdbf2f..7f07a9175815 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.c
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -50,6 +50,33 @@ void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val)
}
EXPORT_SYMBOL_GPL(amvdec_write_parser);
+/* 4 KiB per 64x32 block */
+u32 amvdec_am21c_body_size(u32 width, u32 height)
+{
+ u32 width_64 = ALIGN(width, 64) / 64;
+ u32 height_32 = ALIGN(height, 32) / 32;
+
+ return SZ_4K * width_64 * height_32;
+}
+EXPORT_SYMBOL_GPL(amvdec_am21c_body_size);
+
+/* 32 bytes per 128x64 block */
+u32 amvdec_am21c_head_size(u32 width, u32 height)
+{
+ u32 width_128 = ALIGN(width, 128) / 128;
+ u32 height_64 = ALIGN(height, 64) / 64;
+
+ return 32 * width_128 * height_64;
+}
+EXPORT_SYMBOL_GPL(amvdec_am21c_head_size);
+
+u32 amvdec_am21c_size(u32 width, u32 height)
+{
+ return ALIGN(amvdec_am21c_body_size(width, height) +
+ amvdec_am21c_head_size(width, height), SZ_64K);
+}
+EXPORT_SYMBOL_GPL(amvdec_am21c_size);
+
static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id)
{
int ret;
@@ -154,8 +181,8 @@ int amvdec_set_canvases(struct amvdec_session *sess,
{
struct v4l2_m2m_buffer *buf;
u32 pixfmt = sess->pixfmt_cap;
- u32 width = ALIGN(sess->width, 64);
- u32 height = ALIGN(sess->height, 64);
+ u32 width = ALIGN(sess->width, 32);
+ u32 height = ALIGN(sess->height, 32);
u32 reg_cur = reg_base[0];
u32 reg_num_cur = 0;
u32 reg_base_cur = 0;
@@ -200,33 +227,23 @@ int amvdec_set_canvases(struct amvdec_session *sess,
}
EXPORT_SYMBOL_GPL(amvdec_set_canvases);
-void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset)
+void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
{
- struct amvdec_timestamp *new_ts, *tmp;
+ struct amvdec_timestamp *new_ts;
unsigned long flags;
- new_ts = kmalloc(sizeof(*new_ts), GFP_KERNEL);
+ new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL);
new_ts->ts = ts;
+ new_ts->tc = tc;
new_ts->offset = offset;
+ new_ts->flags = vbuf_flags;
spin_lock_irqsave(&sess->ts_spinlock, flags);
-
- if (list_empty(&sess->timestamps))
- goto add_tail;
-
- list_for_each_entry(tmp, &sess->timestamps, list) {
- if (ts <= tmp->ts) {
- list_add_tail(&new_ts->list, &tmp->list);
- goto unlock;
- }
- }
-
-add_tail:
list_add_tail(&new_ts->list, &sess->timestamps);
-unlock:
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
}
-EXPORT_SYMBOL_GPL(amvdec_add_ts_reorder);
+EXPORT_SYMBOL_GPL(amvdec_add_ts);
void amvdec_remove_ts(struct amvdec_session *sess, u64 ts)
{
@@ -251,8 +268,8 @@ EXPORT_SYMBOL_GPL(amvdec_remove_ts);
static void dst_buf_done(struct amvdec_session *sess,
struct vb2_v4l2_buffer *vbuf,
- u32 field,
- u64 timestamp)
+ u32 field, u64 timestamp,
+ struct v4l2_timecode timecode, u32 flags)
{
struct device *dev = sess->core->dev_dec;
u32 output_size = amvdec_get_output_size(sess);
@@ -271,19 +288,27 @@ static void dst_buf_done(struct amvdec_session *sess,
vbuf->vb2_buf.timestamp = timestamp;
vbuf->sequence = sess->sequence_cap++;
+ vbuf->flags = flags;
+ vbuf->timecode = timecode;
if (sess->should_stop &&
- atomic_read(&sess->esparser_queued_bufs) <= 2) {
+ atomic_read(&sess->esparser_queued_bufs) <= 1) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
- dev_dbg(dev, "Signaling EOS\n");
+ dev_dbg(dev, "Signaling EOS, sequence_cap = %u\n",
+ sess->sequence_cap - 1);
v4l2_event_queue_fh(&sess->fh, &ev);
vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ } else if (sess->status == STATUS_NEEDS_RESUME) {
+ /* Mark LAST for drained show frames during a source change */
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ sess->sequence_cap = 0;
} else if (sess->should_stop)
dev_dbg(dev, "should_stop, %u bufs remain\n",
atomic_read(&sess->esparser_queued_bufs));
- dev_dbg(dev, "Buffer %u done\n", vbuf->vb2_buf.index);
+ dev_dbg(dev, "Buffer %u done, ts = %llu, flags = %08X\n",
+ vbuf->vb2_buf.index, timestamp, flags);
vbuf->field = field;
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
@@ -297,7 +322,9 @@ void amvdec_dst_buf_done(struct amvdec_session *sess,
struct device *dev = sess->core->dev_dec;
struct amvdec_timestamp *tmp;
struct list_head *timestamps = &sess->timestamps;
+ struct v4l2_timecode timecode;
u64 timestamp;
+ u32 vbuf_flags;
unsigned long flags;
spin_lock_irqsave(&sess->ts_spinlock, flags);
@@ -312,11 +339,13 @@ void amvdec_dst_buf_done(struct amvdec_session *sess,
tmp = list_first_entry(timestamps, struct amvdec_timestamp, list);
timestamp = tmp->ts;
+ timecode = tmp->tc;
+ vbuf_flags = tmp->flags;
list_del(&tmp->list);
kfree(tmp);
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
- dst_buf_done(sess, vbuf, field, timestamp);
+ dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
atomic_dec(&sess->esparser_queued_bufs);
}
EXPORT_SYMBOL_GPL(amvdec_dst_buf_done);
@@ -328,48 +357,43 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
struct device *dev = sess->core->dev_dec;
struct amvdec_timestamp *match = NULL;
struct amvdec_timestamp *tmp, *n;
+ struct v4l2_timecode timecode = { 0 };
u64 timestamp = 0;
+ u32 vbuf_flags = 0;
unsigned long flags;
spin_lock_irqsave(&sess->ts_spinlock, flags);
/* Look for our vififo offset to get the corresponding timestamp. */
list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
- s64 delta = (s64)offset - tmp->offset;
-
- /* Offsets reported by codecs usually differ slightly,
- * so we need some wiggle room.
- * 4KiB being the minimum packet size, there is no risk here.
- */
- if (delta > (-1 * (s32)SZ_4K) && delta < SZ_4K) {
- match = tmp;
+ if (tmp->offset > offset) {
+ /*
+ * Delete any record that remained unused for 32 match
+ * checks
+ */
+ if (tmp->used_count++ >= 32) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
break;
}
- if (!allow_drop)
- continue;
-
- /* Delete any timestamp entry that appears before our target
- * (not all src packets/timestamps lead to a frame)
- */
- if (delta > 0 || delta < -1 * (s32)sess->vififo_size) {
- atomic_dec(&sess->esparser_queued_bufs);
- list_del(&tmp->list);
- kfree(tmp);
- }
+ match = tmp;
}
if (!match) {
- dev_dbg(dev, "Buffer %u done but can't match offset (%08X)\n",
+ dev_err(dev, "Buffer %u done but can't match offset (%08X)\n",
vbuf->vb2_buf.index, offset);
} else {
timestamp = match->ts;
+ timecode = match->tc;
+ vbuf_flags = match->flags;
list_del(&match->list);
kfree(match);
}
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
- dst_buf_done(sess, vbuf, field, timestamp);
+ dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
if (match)
atomic_dec(&sess->esparser_queued_bufs);
}
@@ -420,16 +444,19 @@ void amvdec_src_change(struct amvdec_session *sess, u32 width,
v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, dpb_size);
- /* Check if the capture queue is already configured well for our
+ /*
+ * Check if the capture queue is already configured well for our
* usecase. If so, keep decoding with it and do not send the event
*/
- if (sess->width == width &&
+ if (sess->streamon_cap &&
+ sess->width == width &&
sess->height == height &&
dpb_size <= sess->num_dst_bufs) {
sess->fmt_out->codec_ops->resume(sess);
return;
}
+ sess->changed_format = 0;
sess->width = width;
sess->height = height;
sess->status = STATUS_NEEDS_RESUME;
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
index a455a9ee1cc2..cfaed52ab526 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.h
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -27,6 +27,10 @@ void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val);
u32 amvdec_read_parser(struct amvdec_core *core, u32 reg);
void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val);
+u32 amvdec_am21c_body_size(u32 width, u32 height);
+u32 amvdec_am21c_head_size(u32 width, u32 height);
+u32 amvdec_am21c_size(u32 width, u32 height);
+
/**
* amvdec_dst_buf_done_idx() - Signal that a buffer is done decoding
*
@@ -44,13 +48,15 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
u32 offset, u32 field, bool allow_drop);
/**
- * amvdec_add_ts_reorder() - Add a timestamp to the list in chronological order
+ * amvdec_add_ts() - Add a timestamp to the list
*
* @sess: current session
* @ts: timestamp to add
* @offset: offset in the VIFIFO where the associated packet was written
+ * @flags the vb2_v4l2_buffer flags
*/
-void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset);
+void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 flags);
void amvdec_remove_ts(struct amvdec_session *sess, u64 ts);
/**
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c
new file mode 100644
index 000000000000..9530e580e57a
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr>
+ *
+ * VDEC_HEVC is a video decoding block that allows decoding of
+ * HEVC, VP9
+ */
+
+#include <linux/firmware.h>
+#include <linux/clk.h>
+
+#include "vdec_1.h"
+#include "vdec_helpers.h"
+#include "vdec_hevc.h"
+#include "hevc_regs.h"
+#include "dos_regs.h"
+
+/* AO Registers */
+#define AO_RTI_GEN_PWR_SLEEP0 0xe8
+#define AO_RTI_GEN_PWR_ISO0 0xec
+ #define GEN_PWR_VDEC_HEVC (BIT(7) | BIT(6))
+ #define GEN_PWR_VDEC_HEVC_SM1 (BIT(2))
+
+#define MC_SIZE (4096 * 4)
+
+static int vdec_hevc_load_firmware(struct amvdec_session *sess,
+ const char *fwname)
+{
+ struct amvdec_core *core = sess->core;
+ struct device *dev = core->dev_dec;
+ const struct firmware *fw;
+ static void *mc_addr;
+ static dma_addr_t mc_addr_map;
+ int ret;
+ u32 i = 100;
+
+ ret = request_firmware(&fw, fwname, dev);
+ if (ret < 0) {
+ dev_err(dev, "Unable to request firmware %s\n", fwname);
+ return ret;
+ }
+
+ if (fw->size < MC_SIZE) {
+ dev_err(dev, "Firmware size %zu is too small. Expected %u.\n",
+ fw->size, MC_SIZE);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ mc_addr = dma_alloc_coherent(core->dev, MC_SIZE, &mc_addr_map,
+ GFP_KERNEL);
+ if (!mc_addr) {
+ ret = -ENOMEM;
+ goto release_firmware;
+ }
+
+ memcpy(mc_addr, fw->data, MC_SIZE);
+
+ amvdec_write_dos(core, HEVC_MPSR, 0);
+ amvdec_write_dos(core, HEVC_CPSR, 0);
+
+ amvdec_write_dos(core, HEVC_IMEM_DMA_ADR, mc_addr_map);
+ amvdec_write_dos(core, HEVC_IMEM_DMA_COUNT, MC_SIZE / 4);
+ amvdec_write_dos(core, HEVC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
+
+ while (i && (readl(core->dos_base + HEVC_IMEM_DMA_CTRL) & 0x8000))
+ i--;
+
+ if (i == 0) {
+ dev_err(dev, "Firmware load fail (DMA hang?)\n");
+ ret = -ENODEV;
+ }
+
+ dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map);
+release_firmware:
+ release_firmware(fw);
+ return ret;
+}
+
+static void vdec_hevc_stbuf_init(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ amvdec_write_dos(core, HEVC_STREAM_CONTROL,
+ amvdec_read_dos(core, HEVC_STREAM_CONTROL) & ~1);
+ amvdec_write_dos(core, HEVC_STREAM_START_ADDR, sess->vififo_paddr);
+ amvdec_write_dos(core, HEVC_STREAM_END_ADDR,
+ sess->vififo_paddr + sess->vififo_size);
+ amvdec_write_dos(core, HEVC_STREAM_RD_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, HEVC_STREAM_WR_PTR, sess->vififo_paddr);
+}
+
+/* VDEC_HEVC specific ESPARSER configuration */
+static void vdec_hevc_conf_esparser(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ /* set vififo_vbuf_rp_sel=>vdec_hevc */
+ amvdec_write_dos(core, DOS_GEN_CTRL0, 3 << 1);
+ amvdec_write_dos(core, HEVC_STREAM_CONTROL,
+ amvdec_read_dos(core, HEVC_STREAM_CONTROL) | BIT(3));
+ amvdec_write_dos(core, HEVC_STREAM_CONTROL,
+ amvdec_read_dos(core, HEVC_STREAM_CONTROL) | 1);
+ amvdec_write_dos(core, HEVC_STREAM_FIFO_CTL,
+ amvdec_read_dos(core, HEVC_STREAM_FIFO_CTL) | BIT(29));
+}
+
+static u32 vdec_hevc_vififo_level(struct amvdec_session *sess)
+{
+ return readl_relaxed(sess->core->dos_base + HEVC_STREAM_LEVEL);
+}
+
+static int vdec_hevc_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ /* Disable interrupt */
+ amvdec_write_dos(core, HEVC_ASSIST_MBOX1_MASK, 0);
+ /* Disable firmware processor */
+ amvdec_write_dos(core, HEVC_MPSR, 0);
+
+ if (sess->priv)
+ codec_ops->stop(sess);
+
+ /* Enable VDEC_HEVC Isolation */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_HEVC_SM1,
+ GEN_PWR_VDEC_HEVC_SM1);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ 0xc00, 0xc00);
+
+ /* VDEC_HEVC Memories */
+ amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0xffffffffUL);
+
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC_SM1,
+ GEN_PWR_VDEC_HEVC_SM1);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC, GEN_PWR_VDEC_HEVC);
+
+ clk_disable_unprepare(core->vdec_hevc_clk);
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
+
+ return 0;
+}
+
+static int vdec_hevc_start(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1) {
+ clk_set_rate(core->vdec_hevcf_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_hevcf_clk);
+ if (ret)
+ return ret;
+ }
+
+ clk_set_rate(core->vdec_hevc_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_hevc_clk);
+ if (ret)
+ return ret;
+
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC_SM1, 0);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_HEVC, 0);
+ usleep_range(10, 20);
+
+ /* Reset VDEC_HEVC*/
+ amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff);
+ amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000);
+
+ amvdec_write_dos(core, DOS_GCLK_EN3, 0xffffffff);
+
+ /* VDEC_HEVC Memories */
+ amvdec_write_dos(core, DOS_MEM_PD_HEVC, 0x00000000);
+
+ /* Remove VDEC_HEVC Isolation */
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_HEVC_SM1, 0);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ 0xc00, 0);
+
+ amvdec_write_dos(core, DOS_SW_RESET3, 0xffffffff);
+ amvdec_write_dos(core, DOS_SW_RESET3, 0x00000000);
+
+ vdec_hevc_stbuf_init(sess);
+
+ ret = vdec_hevc_load_firmware(sess, sess->fmt_out->firmware_path);
+ if (ret)
+ goto stop;
+
+ ret = codec_ops->start(sess);
+ if (ret)
+ goto stop;
+
+ amvdec_write_dos(core, DOS_SW_RESET3, BIT(12) | BIT(11));
+ amvdec_write_dos(core, DOS_SW_RESET3, 0);
+ amvdec_read_dos(core, DOS_SW_RESET3);
+
+ amvdec_write_dos(core, HEVC_MPSR, 1);
+ /* Let the firmware settle */
+ usleep_range(10, 20);
+
+ return 0;
+
+stop:
+ vdec_hevc_stop(sess);
+ return ret;
+}
+
+struct amvdec_ops vdec_hevc_ops = {
+ .start = vdec_hevc_start,
+ .stop = vdec_hevc_stop,
+ .conf_esparser = vdec_hevc_conf_esparser,
+ .vififo_level = vdec_hevc_vififo_level,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.h b/drivers/staging/media/meson/vdec/vdec_hevc.h
new file mode 100644
index 000000000000..cd576a73a966
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Maxime Jourdan <maxi.jourdan@wanadoo.fr>
+ */
+
+#ifndef __MESON_VDEC_VDEC_HEVC_H_
+#define __MESON_VDEC_VDEC_HEVC_H_
+
+#include "vdec.h"
+
+extern struct amvdec_ops vdec_hevc_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index ea39f8209ec7..eabbebab2da2 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -8,10 +8,25 @@
#include "vdec.h"
#include "vdec_1.h"
+#include "vdec_hevc.h"
#include "codec_mpeg12.h"
+#include "codec_h264.h"
+#include "codec_vp9.h"
static const struct amvdec_format vdec_formats_gxbb[] = {
{
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxbb_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
@@ -21,6 +36,7 @@ static const struct amvdec_format vdec_formats_gxbb[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
@@ -31,11 +47,36 @@ static const struct amvdec_format vdec_formats_gxbb[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_gxl[] = {
{
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/gxl_vp9.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxl_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
@@ -45,6 +86,7 @@ static const struct amvdec_format vdec_formats_gxl[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
@@ -55,11 +97,24 @@ static const struct amvdec_format vdec_formats_gxl[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_gxm[] = {
{
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxm_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
@@ -69,6 +124,7 @@ static const struct amvdec_format vdec_formats_gxm[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
@@ -79,11 +135,36 @@ static const struct amvdec_format vdec_formats_gxm[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_g12a[] = {
{
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/g12a_vp9.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/g12a_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
@@ -93,6 +174,7 @@ static const struct amvdec_format vdec_formats_g12a[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
@@ -103,11 +185,36 @@ static const struct amvdec_format vdec_formats_g12a[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
static const struct amvdec_format vdec_formats_sm1[] = {
{
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .min_buffers = 16,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_hevc_ops,
+ .codec_ops = &codec_vp9_ops,
+ .firmware_path = "meson/vdec/sm1_vp9_mmu.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/g12a_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
.pixfmt = V4L2_PIX_FMT_MPEG1,
.min_buffers = 8,
.max_buffers = 8,
@@ -117,6 +224,7 @@ static const struct amvdec_format vdec_formats_sm1[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.min_buffers = 8,
@@ -127,6 +235,7 @@ static const struct amvdec_format vdec_formats_sm1[] = {
.codec_ops = &codec_mpeg12_ops,
.firmware_path = "meson/vdec/gxl_mpeg12.bin",
.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
},
};
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 673aa3a5f2bd..66975a37dc85 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -1111,7 +1111,7 @@ static int iss_video_open(struct file *file)
goto done;
}
- ret = v4l2_pipeline_pm_use(&video->video.entity, 1);
+ ret = v4l2_pipeline_pm_get(&video->video.entity);
if (ret < 0) {
omap4iss_put(video->iss);
goto done;
@@ -1160,7 +1160,7 @@ static int iss_video_release(struct file *file)
/* Disable streaming and free the buffers queue resources. */
iss_video_streamoff(file, vfh, video->type);
- v4l2_pipeline_pm_use(&video->video.entity, 0);
+ v4l2_pipeline_pm_put(&video->video.entity);
/* Release the videobuf2 queue */
vb2_queue_release(&handle->queue);
@@ -1242,7 +1242,7 @@ int omap4iss_video_init(struct iss_video *video, const char *name)
video->video.fops = &iss_video_fops;
snprintf(video->video.name, sizeof(video->video.name),
"OMAP4 ISS %s %s", name, direction);
- video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.vfl_type = VFL_TYPE_VIDEO;
video->video.release = video_device_release_empty;
video->video.ioctl_ops = &iss_video_ioctl_ops;
video->pipe.stream_state = ISS_PIPELINE_STREAM_STOPPED;
@@ -1270,7 +1270,7 @@ int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT;
video->video.device_caps |= V4L2_CAP_STREAMING;
- ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1);
if (ret < 0)
dev_err(video->iss->dev,
"could not register video device (%d)\n", ret);
diff --git a/drivers/staging/media/rkisp1/TODO b/drivers/staging/media/rkisp1/TODO
index 03cd9a4e70f7..0aa9877dd64a 100644
--- a/drivers/staging/media/rkisp1/TODO
+++ b/drivers/staging/media/rkisp1/TODO
@@ -1,4 +1,3 @@
-* Fix serialization on subdev ops.
* Don't use v4l2_async_notifier_parse_fwnode_endpoints_by_port().
e.g. isp_parse_of_endpoints in drivers/media/platform/omap3isp/isp.c
cio2_parse_firmware in drivers/media/pci/intel/ipu3/ipu3-cio2.c.
diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c
index 524e0dd38c1b..24fe6a7888aa 100644
--- a/drivers/staging/media/rkisp1/rkisp1-capture.c
+++ b/drivers/staging/media/rkisp1/rkisp1-capture.c
@@ -937,10 +937,7 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
rkisp1_return_all_buffers(cap, VB2_BUF_STATE_ERROR);
- ret = v4l2_pipeline_pm_use(&node->vdev.entity, 0);
- if (ret)
- dev_err(rkisp1->dev, "pipeline close failed error:%d\n", ret);
-
+ v4l2_pipeline_pm_put(&node->vdev.entity);
ret = pm_runtime_put(rkisp1->dev);
if (ret)
dev_err(rkisp1->dev, "power down failed error:%d\n", ret);
@@ -999,7 +996,7 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
dev_err(cap->rkisp1->dev, "power up failed %d\n", ret);
goto err_destroy_dummy;
}
- ret = v4l2_pipeline_pm_use(entity, 1);
+ ret = v4l2_pipeline_pm_get(entity);
if (ret) {
dev_err(cap->rkisp1->dev, "open cif pipeline failed %d\n", ret);
goto err_pipe_pm_put;
@@ -1025,7 +1022,7 @@ err_pipe_disable:
rkisp1_pipeline_sink_walk(entity, NULL, rkisp1_pipeline_disable_cb);
err_stop_stream:
rkisp1_stream_stop(cap);
- v4l2_pipeline_pm_use(entity, 0);
+ v4l2_pipeline_pm_put(entity);
err_pipe_pm_put:
pm_runtime_put(cap->rkisp1->dev);
err_destroy_dummy:
@@ -1344,7 +1341,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap)
q = &node->buf_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
q->drv_priv = cap;
q->ops = &rkisp1_vb2_ops;
q->mem_ops = &vb2_dma_contig_memops;
@@ -1362,7 +1359,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap)
vdev->queue = q;
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(cap->rkisp1->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h
index 369a401b098a..b291cc60de8e 100644
--- a/drivers/staging/media/rkisp1/rkisp1-common.h
+++ b/drivers/staging/media/rkisp1/rkisp1-common.h
@@ -96,6 +96,7 @@ struct rkisp1_sensor_async {
* @sink_crop: crop for sink pad
* @src_fmt: output format
* @src_crop: output size
+ * @ops_lock: ops serialization
*
* @is_dphy_errctrl_disabled : if dphy errctrl is disabled (avoid endless interrupt)
* @frame_sequence: used to synchronize frame_id between video devices.
@@ -107,6 +108,7 @@ struct rkisp1_isp {
struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
const struct rkisp1_isp_mbus_info *sink_fmt;
const struct rkisp1_isp_mbus_info *src_fmt;
+ struct mutex ops_lock;
bool is_dphy_errctrl_disabled;
atomic_t frame_sequence;
};
@@ -224,6 +226,7 @@ struct rkisp1_resizer {
struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
const struct rkisp1_rsz_config *config;
enum rkisp1_fmt_pix_type fmt_type;
+ struct mutex ops_lock;
};
struct rkisp1_debug {
diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c
index 558126e66465..b1b3c058e957 100644
--- a/drivers/staging/media/rkisp1/rkisp1-dev.c
+++ b/drivers/staging/media/rkisp1/rkisp1-dev.c
@@ -128,7 +128,7 @@ static int rkisp1_create_links(struct rkisp1_device *rkisp1)
ret = media_entity_get_fwnode_pad(&sd->entity, sd->fwnode,
MEDIA_PAD_FL_SOURCE);
- if (ret) {
+ if (ret < 0) {
dev_err(sd->dev, "failed to find src pad for %s\n",
sd->name);
return ret;
@@ -145,14 +145,15 @@ static int rkisp1_create_links(struct rkisp1_device *rkisp1)
flags = 0;
}
- flags = MEDIA_LNK_FL_ENABLED;
+ flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
/* create ISP->RSZ->CAP links */
for (i = 0; i < 2; i++) {
source = &rkisp1->isp.sd.entity;
sink = &rkisp1->resizer_devs[i].sd.entity;
ret = media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_VIDEO,
- sink, RKISP1_RSZ_PAD_SINK, flags);
+ sink, RKISP1_RSZ_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
@@ -219,19 +220,17 @@ static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier)
container_of(notifier, struct rkisp1_device, notifier);
int ret;
- mutex_lock(&rkisp1->media_dev.graph_mutex);
ret = rkisp1_create_links(rkisp1);
if (ret)
- goto unlock;
+ return ret;
+
ret = v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev);
if (ret)
- goto unlock;
+ return ret;
dev_dbg(rkisp1->dev, "Async subdev notifier completed\n");
-unlock:
- mutex_unlock(&rkisp1->media_dev.graph_mutex);
- return ret;
+ return 0;
}
static int rkisp1_fwnode_parse(struct device *dev,
@@ -502,8 +501,7 @@ static int rkisp1_probe(struct platform_device *pdev)
strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
sizeof(rkisp1->media_dev.model));
rkisp1->media_dev.dev = &pdev->dev;
- strscpy(rkisp1->media_dev.bus_info,
- "platform: " RKISP1_DRIVER_NAME,
+ strscpy(rkisp1->media_dev.bus_info, RKISP1_BUS_INFO,
sizeof(rkisp1->media_dev.bus_info));
media_device_init(&rkisp1->media_dev);
diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c
index 328c7ea60971..fa53f05e37d8 100644
--- a/drivers/staging/media/rkisp1/rkisp1-isp.c
+++ b/drivers/staging/media/rkisp1/rkisp1-isp.c
@@ -28,9 +28,9 @@
#define RKISP1_DIR_SINK_SRC (RKISP1_DIR_SINK | RKISP1_DIR_SRC)
/*
- * NOTE: MIPI controller and input MUX are also configured in this file,
- * because ISP Subdev is not only describe ISP submodule(input size,format,
- * output size, format), but also a virtual route device.
+ * NOTE: MIPI controller and input MUX are also configured in this file.
+ * This is because ISP Subdev describes not only ISP submodule (input size,
+ * format, output size, format), but also a virtual route device.
*/
/*
@@ -504,7 +504,7 @@ static int rkisp1_config_cif(struct rkisp1_device *rkisp1)
return 0;
}
-static int rkisp1_isp_stop(struct rkisp1_device *rkisp1)
+static void rkisp1_isp_stop(struct rkisp1_device *rkisp1)
{
u32 val;
@@ -540,8 +540,6 @@ static int rkisp1_isp_stop(struct rkisp1_device *rkisp1)
RKISP1_CIF_IRCL_MIPI_SW_RST | RKISP1_CIF_IRCL_ISP_SW_RST,
RKISP1_CIF_IRCL);
rkisp1_write(rkisp1, 0x0, RKISP1_CIF_IRCL);
-
- return 0;
}
static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
@@ -555,7 +553,7 @@ static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL);
}
-static int rkisp1_isp_start(struct rkisp1_device *rkisp1)
+static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
{
struct rkisp1_sensor_async *sensor = rkisp1->active_sensor;
u32 val;
@@ -580,8 +578,6 @@ static int rkisp1_isp_start(struct rkisp1_device *rkisp1)
* the MIPI interface and before starting the sensor output.
*/
usleep_range(1000, 1200);
-
- return 0;
}
/* ----------------------------------------------------------------------------
@@ -683,7 +679,7 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
src_fmt->code = format->code;
mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
- if (!mbus_info) {
+ if (!mbus_info || !(mbus_info->direction & RKISP1_DIR_SRC)) {
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
}
@@ -767,7 +763,7 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
which);
sink_fmt->code = format->code;
mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
- if (!mbus_info) {
+ if (!mbus_info || !(mbus_info->direction & RKISP1_DIR_SINK)) {
sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
}
@@ -795,7 +791,9 @@ static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ mutex_lock(&isp->ops_lock);
fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which);
+ mutex_unlock(&isp->ops_lock);
return 0;
}
@@ -805,6 +803,7 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ mutex_lock(&isp->ops_lock);
if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
rkisp1_isp_set_sink_fmt(isp, cfg, &fmt->format, fmt->which);
else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
@@ -813,6 +812,7 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad,
fmt->which);
+ mutex_unlock(&isp->ops_lock);
return 0;
}
@@ -821,11 +821,13 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ int ret = 0;
if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO &&
sel->pad != RKISP1_ISP_PAD_SINK_VIDEO)
return -EINVAL;
+ mutex_lock(&isp->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
@@ -848,10 +850,10 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
sel->which);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
-
- return 0;
+ mutex_unlock(&isp->ops_lock);
+ return ret;
}
static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
@@ -861,21 +863,23 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
struct rkisp1_device *rkisp1 =
container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+ int ret = 0;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
dev_dbg(rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
-
+ mutex_lock(&isp->ops_lock);
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
rkisp1_isp_set_sink_crop(isp, cfg, &sel->r, sel->which);
else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
rkisp1_isp_set_src_crop(isp, cfg, &sel->r, sel->which);
else
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ mutex_unlock(&isp->ops_lock);
+ return ret;
}
static int rkisp1_subdev_link_validate(struct media_link *link)
@@ -936,13 +940,12 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rkisp1_device *rkisp1 =
container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
+ struct rkisp1_isp *isp = &rkisp1->isp;
struct v4l2_subdev *sensor_sd;
int ret = 0;
if (!enable) {
- ret = rkisp1_isp_stop(rkisp1);
- if (ret)
- return ret;
+ rkisp1_isp_stop(rkisp1);
rkisp1_mipi_csi2_stop(rkisp1->active_sensor);
return 0;
}
@@ -953,22 +956,23 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
rkisp1->active_sensor = container_of(sensor_sd->asd,
struct rkisp1_sensor_async, asd);
+ if (rkisp1->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)
+ return -EINVAL;
+
atomic_set(&rkisp1->isp.frame_sequence, -1);
+ mutex_lock(&isp->ops_lock);
ret = rkisp1_config_cif(rkisp1);
if (ret)
- return ret;
-
- if (rkisp1->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)
- return -EINVAL;
+ goto mutex_unlock;
ret = rkisp1_mipi_csi2_start(&rkisp1->isp, rkisp1->active_sensor);
if (ret)
- return ret;
+ goto mutex_unlock;
- ret = rkisp1_isp_start(rkisp1);
- if (ret)
- rkisp1_mipi_csi2_stop(rkisp1->active_sensor);
+ rkisp1_isp_start(rkisp1);
+mutex_unlock:
+ mutex_unlock(&isp->ops_lock);
return ret;
}
@@ -1028,6 +1032,7 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1,
isp->sink_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SINK_PAD_FMT);
isp->src_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SRC_PAD_FMT);
+ mutex_init(&isp->ops_lock);
ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads);
if (ret)
return ret;
diff --git a/drivers/staging/media/rkisp1/rkisp1-params.c b/drivers/staging/media/rkisp1/rkisp1-params.c
index 781f0ca85af1..44d542caf32b 100644
--- a/drivers/staging/media/rkisp1/rkisp1-params.c
+++ b/drivers/staging/media/rkisp1/rkisp1-params.c
@@ -1605,7 +1605,7 @@ int rkisp1_params_register(struct rkisp1_params *params,
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
goto err_release_queue;
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(&vdev->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
index 8cdc29c1a178..87799fbf0363 100644
--- a/drivers/staging/media/rkisp1/rkisp1-resizer.c
+++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
@@ -503,6 +503,8 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
sink_crop->top = 0;
sink_crop->width = sink_fmt->width;
sink_crop->height = sink_fmt->height;
+
+ *r = *sink_crop;
return;
}
@@ -537,15 +539,6 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
rsz->fmt_type = mbus_info->fmt_type;
- if (rsz->id == RKISP1_MAINPATH &&
- mbus_info->fmt_type == RKISP1_FMT_BAYER) {
- sink_crop->left = 0;
- sink_crop->top = 0;
- sink_crop->width = sink_fmt->width;
- sink_crop->height = sink_fmt->height;
- return;
- }
-
/* Propagete to source pad */
src_fmt->code = sink_fmt->code;
@@ -569,7 +562,9 @@ static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
+ mutex_lock(&rsz->ops_lock);
fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, cfg, fmt->pad, fmt->which);
+ mutex_unlock(&rsz->ops_lock);
return 0;
}
@@ -580,11 +575,13 @@ static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
+ mutex_lock(&rsz->ops_lock);
if (fmt->pad == RKISP1_RSZ_PAD_SINK)
rkisp1_rsz_set_sink_fmt(rsz, cfg, &fmt->format, fmt->which);
else
rkisp1_rsz_set_src_fmt(rsz, cfg, &fmt->format, fmt->which);
+ mutex_unlock(&rsz->ops_lock);
return 0;
}
@@ -595,10 +592,12 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
struct v4l2_mbus_framefmt *mf_sink;
+ int ret = 0;
if (sel->pad == RKISP1_RSZ_PAD_SRC)
return -EINVAL;
+ mutex_lock(&rsz->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
mf_sink = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK,
@@ -613,10 +612,11 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
sel->which);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ mutex_unlock(&rsz->ops_lock);
+ return ret;
}
static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
@@ -632,7 +632,9 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
dev_dbg(sd->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+ mutex_lock(&rsz->ops_lock);
rkisp1_rsz_set_sink_crop(rsz, cfg, &sel->r, sel->which);
+ mutex_unlock(&rsz->ops_lock);
return 0;
}
@@ -672,9 +674,11 @@ static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
if (other->is_streaming)
when = RKISP1_SHADOW_REGS_ASYNC;
+ mutex_lock(&rsz->ops_lock);
rkisp1_rsz_config(rsz, when);
rkisp1_dcrop_config(rsz);
+ mutex_unlock(&rsz->ops_lock);
return 0;
}
@@ -720,6 +724,7 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
rsz->fmt_type = RKISP1_DEF_FMT_TYPE;
+ mutex_init(&rsz->ops_lock);
ret = media_entity_pads_init(&sd->entity, 2, pads);
if (ret)
return ret;
diff --git a/drivers/staging/media/rkisp1/rkisp1-stats.c b/drivers/staging/media/rkisp1/rkisp1-stats.c
index d98ea15837de..6dfcbdc3deb8 100644
--- a/drivers/staging/media/rkisp1/rkisp1-stats.c
+++ b/drivers/staging/media/rkisp1/rkisp1-stats.c
@@ -70,8 +70,7 @@ static int rkisp1_stats_querycap(struct file *file,
strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, vdev->name, sizeof(cap->card));
- strscpy(cap->bus_info, "platform: " RKISP1_DRIVER_NAME,
- sizeof(cap->bus_info));
+ strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
return 0;
}
@@ -487,7 +486,7 @@ int rkisp1_stats_register(struct rkisp1_stats *stats,
if (ret)
goto err_release_queue;
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(&vdev->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
diff --git a/drivers/staging/media/soc_camera/soc_camera.c b/drivers/staging/media/soc_camera/soc_camera.c
index 7b9448e3c9ba..39f513f69b89 100644
--- a/drivers/staging/media/soc_camera/soc_camera.c
+++ b/drivers/staging/media/soc_camera/soc_camera.c
@@ -2068,7 +2068,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
v4l2_disable_ioctl(icd->vdev, VIDIOC_S_STD);
v4l2_disable_ioctl(icd->vdev, VIDIOC_ENUMSTD);
}
- ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(icd->vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
return ret;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index c6ddd46eff82..05a85517ff60 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -414,7 +414,7 @@ static int cedrus_probe(struct platform_device *pdev)
dev->mdev.ops = &cedrus_m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto err_m2m;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index bfb4a4820a67..54ee2aa423e2 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -610,8 +610,12 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
goto err_mv_col_buf;
}
+ /*
+ * NOTE: Multiplying by two deviates from CedarX logic, but it
+ * is for some unknown reason needed for H264 4K decoding on H6.
+ */
ctx->codec.h264.intra_pred_buf_size =
- ALIGN(ctx->src_fmt.width, 64) * 5;
+ ALIGN(ctx->src_fmt.width, 64) * 5 * 2;
ctx->codec.h264.intra_pred_buf =
dma_alloc_coherent(dev->dev,
ctx->codec.h264.intra_pred_buf_size,
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
index e18fd48981da..d3e63512a765 100644
--- a/drivers/staging/media/tegra-vde/vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -949,7 +949,6 @@ static int tegra_vde_runtime_resume(struct device *dev)
static int tegra_vde_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *regs;
struct tegra_vde *vde;
int irq, err;
@@ -959,75 +958,39 @@ static int tegra_vde_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vde);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sxe");
- if (!regs)
- return -ENODEV;
-
- vde->sxe = devm_ioremap_resource(dev, regs);
+ vde->sxe = devm_platform_ioremap_resource_byname(pdev, "sxe");
if (IS_ERR(vde->sxe))
return PTR_ERR(vde->sxe);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bsev");
- if (!regs)
- return -ENODEV;
-
- vde->bsev = devm_ioremap_resource(dev, regs);
+ vde->bsev = devm_platform_ioremap_resource_byname(pdev, "bsev");
if (IS_ERR(vde->bsev))
return PTR_ERR(vde->bsev);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbe");
- if (!regs)
- return -ENODEV;
-
- vde->mbe = devm_ioremap_resource(dev, regs);
+ vde->mbe = devm_platform_ioremap_resource_byname(pdev, "mbe");
if (IS_ERR(vde->mbe))
return PTR_ERR(vde->mbe);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe");
- if (!regs)
- return -ENODEV;
-
- vde->ppe = devm_ioremap_resource(dev, regs);
+ vde->ppe = devm_platform_ioremap_resource_byname(pdev, "ppe");
if (IS_ERR(vde->ppe))
return PTR_ERR(vde->ppe);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mce");
- if (!regs)
- return -ENODEV;
-
- vde->mce = devm_ioremap_resource(dev, regs);
+ vde->mce = devm_platform_ioremap_resource_byname(pdev, "mce");
if (IS_ERR(vde->mce))
return PTR_ERR(vde->mce);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tfe");
- if (!regs)
- return -ENODEV;
-
- vde->tfe = devm_ioremap_resource(dev, regs);
+ vde->tfe = devm_platform_ioremap_resource_byname(pdev, "tfe");
if (IS_ERR(vde->tfe))
return PTR_ERR(vde->tfe);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppb");
- if (!regs)
- return -ENODEV;
-
- vde->ppb = devm_ioremap_resource(dev, regs);
+ vde->ppb = devm_platform_ioremap_resource_byname(pdev, "ppb");
if (IS_ERR(vde->ppb))
return PTR_ERR(vde->ppb);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vdma");
- if (!regs)
- return -ENODEV;
-
- vde->vdma = devm_ioremap_resource(dev, regs);
+ vde->vdma = devm_platform_ioremap_resource_byname(pdev, "vdma");
if (IS_ERR(vde->vdma))
return PTR_ERR(vde->vdma);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "frameid");
- if (!regs)
- return -ENODEV;
-
- vde->frameid = devm_ioremap_resource(dev, regs);
+ vde->frameid = devm_platform_ioremap_resource_byname(pdev, "frameid");
if (IS_ERR(vde->frameid))
return PTR_ERR(vde->frameid);
diff --git a/drivers/media/usb/usbvision/Kconfig b/drivers/staging/media/usbvision/Kconfig
index e1039fdfb0ea..c6e1afb5ac48 100644
--- a/drivers/media/usb/usbvision/Kconfig
+++ b/drivers/staging/media/usbvision/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_USBVISION
- tristate "USB video devices based on Nogatech NT1003/1004/1005"
- depends on I2C && VIDEO_V4L2
+ tristate "USB video devices based on Nogatech NT1003/1004/1005 (Deprecated)"
+ depends on MEDIA_USB_SUPPORT && I2C && VIDEO_V4L2
select VIDEO_TUNER
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
help
@@ -9,5 +9,10 @@ config VIDEO_USBVISION
NT1003/1004/1005 USB Bridges. This driver enables using those
devices.
+ This driver is deprecated and scheduled for removal by the
+ end of 2020. See the TODO file in drivers/staging/media/usbvision
+ for a list of actions that have to be done in order to prevent
+ removal of this driver.
+
To compile this driver as a module, choose M here: the
module will be called usbvision.
diff --git a/drivers/media/usb/usbvision/Makefile b/drivers/staging/media/usbvision/Makefile
index 4d8541b9d4f8..4d8541b9d4f8 100644
--- a/drivers/media/usb/usbvision/Makefile
+++ b/drivers/staging/media/usbvision/Makefile
diff --git a/drivers/staging/media/usbvision/TODO b/drivers/staging/media/usbvision/TODO
new file mode 100644
index 000000000000..e9fb4d125581
--- /dev/null
+++ b/drivers/staging/media/usbvision/TODO
@@ -0,0 +1,11 @@
+The driver is deprecated and scheduled for removal by the end
+of 2020.
+
+In order to prevent removal the following actions would have to
+be taken:
+
+- clean up the code
+- convert to the vb2 framework
+- fix the disconnect and free-on-last-user handling (i.e., add
+ a release callback for struct v4l2_device and rework the code
+ to use that correctly).
diff --git a/drivers/media/usb/usbvision/usbvision-cards.c b/drivers/staging/media/usbvision/usbvision-cards.c
index 5e0cbbfe7c86..5e0cbbfe7c86 100644
--- a/drivers/media/usb/usbvision/usbvision-cards.c
+++ b/drivers/staging/media/usbvision/usbvision-cards.c
diff --git a/drivers/media/usb/usbvision/usbvision-cards.h b/drivers/staging/media/usbvision/usbvision-cards.h
index 07ec83512743..07ec83512743 100644
--- a/drivers/media/usb/usbvision/usbvision-cards.h
+++ b/drivers/staging/media/usbvision/usbvision-cards.h
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/staging/media/usbvision/usbvision-core.c
index f05a5c84dc18..f05a5c84dc18 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/staging/media/usbvision/usbvision-core.c
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/staging/media/usbvision/usbvision-i2c.c
index 6e4df3335b1b..6e4df3335b1b 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/staging/media/usbvision/usbvision-i2c.c
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/staging/media/usbvision/usbvision-video.c
index 5ca2c2f35fe2..3ea25fdcf767 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/staging/media/usbvision/usbvision-video.c
@@ -1271,7 +1271,7 @@ static int usbvision_register_video(struct usb_usbvision *usbvision)
if (usbvision->have_tuner)
usbvision->vdev.device_caps |= V4L2_CAP_TUNER;
- if (video_register_device(&usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ if (video_register_device(&usbvision->vdev, VFL_TYPE_VIDEO, video_nr) < 0)
goto err_exit;
printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
usbvision->nr, video_device_node_name(&usbvision->vdev));
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/staging/media/usbvision/usbvision.h
index 11539578e8d2..11539578e8d2 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/staging/media/usbvision/usbvision.h
diff --git a/drivers/staging/most/Documentation/ABI/configfs-most.txt b/drivers/staging/most/Documentation/ABI/configfs-most.txt
deleted file mode 100644
index 2bf811449b0b..000000000000
--- a/drivers/staging/most/Documentation/ABI/configfs-most.txt
+++ /dev/null
@@ -1,204 +0,0 @@
-What: /sys/kernel/config/most_<component>
-Date: March 8, 2019
-KernelVersion: 5.2
-Description: Interface is used to configure and connect device channels
- to component drivers.
-
- Attributes are visible only when configfs is mounted. To mount
- configfs in /sys/kernel/config directory use:
- # mount -t configfs none /sys/kernel/config/
-
-
-What: /sys/kernel/config/most_cdev/<link>
-Date: March 8, 2019
-KernelVersion: 5.2
-Description:
- The attributes:
-
- buffer_size configure the buffer size for this channel
-
- subbuffer_size configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
-
-
- num_buffers configure number of buffers used for this
- channel
-
- datatype configure type of data that will travel over
- this channel
-
- direction configure whether this link will be an input
- or output
-
- dbr_size configure DBR data buffer size (this is used
- for MediaLB communication only)
-
- packets_per_xact
- configure the number of packets that will be
- collected from the network before being
- transmitted via USB (this is used for USB
- communication only)
-
- device name of the device the link is to be attached to
-
- channel name of the channel the link is to be attached to
-
- comp_params pass parameters needed by some components
-
- create_link write '1' to this attribute to trigger the
- creation of the link. In case of speculative
- configuration, the creation is post-poned until
- a physical device is being attached to the bus.
-
- destroy_link write '1' to this attribute to destroy an
- active link
-
-What: /sys/kernel/config/most_video/<link>
-Date: March 8, 2019
-KernelVersion: 5.2
-Description:
- The attributes:
-
- buffer_size configure the buffer size for this channel
-
- subbuffer_size configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
-
-
- num_buffers configure number of buffers used for this
- channel
-
- datatype configure type of data that will travel over
- this channel
-
- direction configure whether this link will be an input
- or output
-
- dbr_size configure DBR data buffer size (this is used
- for MediaLB communication only)
-
- packets_per_xact
- configure the number of packets that will be
- collected from the network before being
- transmitted via USB (this is used for USB
- communication only)
-
- device name of the device the link is to be attached to
-
- channel name of the channel the link is to be attached to
-
- comp_params pass parameters needed by some components
-
- create_link write '1' to this attribute to trigger the
- creation of the link. In case of speculative
- configuration, the creation is post-poned until
- a physical device is being attached to the bus.
-
- destroy_link write '1' to this attribute to destroy an
- active link
-
-What: /sys/kernel/config/most_net/<link>
-Date: March 8, 2019
-KernelVersion: 5.2
-Description:
- The attributes:
-
- buffer_size configure the buffer size for this channel
-
- subbuffer_size configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
-
-
- num_buffers configure number of buffers used for this
- channel
-
- datatype configure type of data that will travel over
- this channel
-
- direction configure whether this link will be an input
- or output
-
- dbr_size configure DBR data buffer size (this is used
- for MediaLB communication only)
-
- packets_per_xact
- configure the number of packets that will be
- collected from the network before being
- transmitted via USB (this is used for USB
- communication only)
-
- device name of the device the link is to be attached to
-
- channel name of the channel the link is to be attached to
-
- comp_params pass parameters needed by some components
-
- create_link write '1' to this attribute to trigger the
- creation of the link. In case of speculative
- configuration, the creation is post-poned until
- a physical device is being attached to the bus.
-
- destroy_link write '1' to this attribute to destroy an
- active link
-
-What: /sys/kernel/config/most_sound/<card>
-Date: March 8, 2019
-KernelVersion: 5.2
-Description:
- The attributes:
-
- create_card write '1' to this attribute to trigger the
- registration of the sound card with the ALSA
- subsystem.
-
-What: /sys/kernel/config/most_sound/<card>/<link>
-Date: March 8, 2019
-KernelVersion: 5.2
-Description:
- The attributes:
-
- buffer_size configure the buffer size for this channel
-
- subbuffer_size configure the sub-buffer size for this channel
- (needed for synchronous and isochrnous data)
-
-
- num_buffers configure number of buffers used for this
- channel
-
- datatype configure type of data that will travel over
- this channel
-
- direction configure whether this link will be an input
- or output
-
- dbr_size configure DBR data buffer size (this is used
- for MediaLB communication only)
-
- packets_per_xact
- configure the number of packets that will be
- collected from the network before being
- transmitted via USB (this is used for USB
- communication only)
-
- device name of the device the link is to be attached to
-
- channel name of the channel the link is to be attached to
-
- comp_params pass parameters needed by some components
-
- create_link write '1' to this attribute to trigger the
- creation of the link. In case of speculative
- configuration, the creation is post-poned until
- a physical device is being attached to the bus.
-
- destroy_link write '1' to this attribute to destroy an
- active link
-
-What: /sys/kernel/config/rdma_cm/<hca>/ports/<port-num>/default_roce_tos
-Date: March 8, 2019
-KernelVersion: 5.2
-Description: RDMA-CM QPs from HCA <hca> at port <port-num>
- will be created with this TOS as default.
- This can be overridden by using the rdma_set_option API.
- The possible RoCE TOS values are 0-255.
diff --git a/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt
deleted file mode 100644
index d8fa841e3742..000000000000
--- a/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt
+++ /dev/null
@@ -1,313 +0,0 @@
-What: /sys/bus/most/devices/.../description
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Provides information about the interface type and the physical
- location of the device. Hardware attached via USB, for instance,
- might return <usb_device 1-1.1:1.0>
-Users:
-
-What: /sys/bus/most/devices/.../interface
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the type of peripheral interface the device uses.
-Users:
-
-What: /sys/bus/most/devices/.../dci
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- If the network interface controller is attached via USB, a dci
- directory is created that allows applications to read and
- write the controller's DCI registers.
-Users:
-
-What: /sys/bus/most/devices/.../dci/arb_address
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to set an arbitrary DCI register address an
- application wants to read from or write to.
-Users:
-
-What: /sys/bus/most/devices/.../dci/arb_value
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to read and write the DCI register whose address
- is stored in arb_address.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_eui48_hi
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MAC address.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_eui48_lo
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MAC address.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_eui48_mi
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MAC address.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_filter
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MEP filter address.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_hash0
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MEP hash table.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_hash1
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MEP hash table.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_hash2
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MEP hash table.
-Users:
-
-What: /sys/bus/most/devices/.../dci/mep_hash3
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to check and configure the MEP hash table.
-Users:
-
-What: /sys/bus/most/devices/.../dci/ni_state
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the current network interface state.
-Users:
-
-What: /sys/bus/most/devices/.../dci/node_address
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the current node address.
-Users:
-
-What: /sys/bus/most/devices/.../dci/node_position
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the current node position.
-Users:
-
-What: /sys/bus/most/devices/.../dci/packet_bandwidth
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the configured packet bandwidth.
-Users:
-
-What: /sys/bus/most/devices/.../dci/sync_ep
-Date: June 2016
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Triggers the controller's synchronization process for a certain
- endpoint.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- For every channel of the device a directory is created, whose
- name is dictated by the HDM. This enables an application to
- collect information about the channel's capabilities and
- configure it.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/available_datatypes
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the data types the current channel can transport.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/available_directions
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the directions the current channel is capable of.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/number_of_packet_buffers
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the number of packet buffers the current channel can
- handle.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/number_of_stream_buffers
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the number of streaming buffers the current channel can
- handle.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/size_of_packet_buffer
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the size of a packet buffer the current channel can
- handle.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/size_of_stream_buffer
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates the size of a streaming buffer the current channel can
- handle.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/set_number_of_buffers
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is to configure the number of buffers of the current channel.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/set_buffer_size
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is to configure the size of a buffer of the current channel.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/set_direction
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is to configure the direction of the current channel.
- The following strings will be accepted:
- 'dir_tx',
- 'dir_rx'
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/set_datatype
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is to configure the data type of the current channel.
- The following strings will be accepted:
- 'control',
- 'async',
- 'sync',
- 'isoc_avp'
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/set_subbuffer_size
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is to configure the subbuffer size of the current channel.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/set_packets_per_xact
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is to configure the number of packets per transaction of
- the current channel. This is only needed network interface
- controller is attached via USB.
-Users:
-
-What: /sys/bus/most/devices/.../<channel>/channel_starving
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- Indicates whether current channel ran out of buffers.
-Users:
-
-What: /sys/bus/most/drivers/mostcore/add_link
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to link a channel to a component of the
- mostcore. A link created by writing to this file is
- referred to as pipe.
-Users:
-
-What: /sys/bus/most/drivers/mostcore/remove_link
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to unlink a channel from a component.
-Users:
-
-What: /sys/bus/most/drivers/mostcore/components
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to retrieve a list of registered components.
-Users:
-
-What: /sys/bus/most/drivers/mostcore/links
-Date: March 2017
-KernelVersion: 4.15
-Contact: Christian Gromm <christian.gromm@microchip.com>
-Description:
- This is used to retrieve a list of established links.
-Users:
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 6262eb25c80b..c5a99f750abe 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-menuconfig MOST
+menuconfig MOST_COMPONENTS
tristate "MOST support"
- depends on HAS_DMA && CONFIGFS_FS
+ depends on HAS_DMA && CONFIGFS_FS && MOST
default n
help
Say Y here if you want to enable MOST support.
@@ -16,7 +16,7 @@ menuconfig MOST
-if MOST
+if MOST_COMPONENTS
source "drivers/staging/most/cdev/Kconfig"
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 20a99ecb37c4..a803a98654a8 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MOST) += most_core.o
-most_core-y := core.o
-most_core-y += configfs.o
obj-$(CONFIG_MOST_CDEV) += cdev/
obj-$(CONFIG_MOST_NET) += net/
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index 71943d17f825..cc1e3dea196d 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -16,8 +16,7 @@
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/idr.h>
-
-#include "../most.h"
+#include <linux/most.h>
#define CHRDEV_REGION_SIZE 50
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 16593281fcda..8e0f27e61652 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -20,8 +20,7 @@
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/kthread.h>
-
-#include "../most.h"
+#include <linux/most.h>
#include "hal.h"
#include "errors.h"
#include "sysfs.h"
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 2980f7065846..893a8babdb2f 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -13,8 +13,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/err.h>
-
-#include "../most.h"
+#include <linux/most.h>
enum { CH_RX, CH_TX, NUM_CHANNELS };
diff --git a/drivers/staging/most/most.h b/drivers/staging/most/most.h
deleted file mode 100644
index 232e01b7f5d2..000000000000
--- a/drivers/staging/most/most.h
+++ /dev/null
@@ -1,337 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * most.h - API for component and adapter drivers
- *
- * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- */
-
-#ifndef __MOST_CORE_H__
-#define __MOST_CORE_H__
-
-#include <linux/types.h>
-#include <linux/device.h>
-
-struct module;
-struct interface_private;
-
-/**
- * Interface type
- */
-enum most_interface_type {
- ITYPE_LOOPBACK = 1,
- ITYPE_I2C,
- ITYPE_I2S,
- ITYPE_TSI,
- ITYPE_HBI,
- ITYPE_MEDIALB_DIM,
- ITYPE_MEDIALB_DIM2,
- ITYPE_USB,
- ITYPE_PCIE
-};
-
-/**
- * Channel direction.
- */
-enum most_channel_direction {
- MOST_CH_RX = 1 << 0,
- MOST_CH_TX = 1 << 1,
-};
-
-/**
- * Channel data type.
- */
-enum most_channel_data_type {
- MOST_CH_CONTROL = 1 << 0,
- MOST_CH_ASYNC = 1 << 1,
- MOST_CH_ISOC = 1 << 2,
- MOST_CH_SYNC = 1 << 5,
-};
-
-enum most_status_flags {
- /* MBO was processed successfully (data was send or received )*/
- MBO_SUCCESS = 0,
- /* The MBO contains wrong or missing information. */
- MBO_E_INVAL,
- /* MBO was completed as HDM Channel will be closed */
- MBO_E_CLOSE,
-};
-
-/**
- * struct most_channel_capability - Channel capability
- * @direction: Supported channel directions.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_direction. Zero is allowed value and means
- * "channel may not be used".
- * @data_type: Supported channel data types.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_data_type. Zero is allowed value and means
- * "channel may not be used".
- * @num_buffers_packet: Maximum number of buffers supported by this channel
- * for packet data types (Async,Control,QoS)
- * @buffer_size_packet: Maximum buffer size supported by this channel
- * for packet data types (Async,Control,QoS)
- * @num_buffers_streaming: Maximum number of buffers supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @buffer_size_streaming: Maximum buffer size supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @name_suffix: Optional suffix providean by an HDM that is attached to the
- * regular channel name.
- *
- * Describes the capabilities of a MOST channel like supported Data Types
- * and directions. This information is provided by an HDM for the MostCore.
- *
- * The Core creates read only sysfs attribute files in
- * /sys/devices/most/mdev#/<channel>/ with the
- * following attributes:
- * -available_directions
- * -available_datatypes
- * -number_of_packet_buffers
- * -number_of_stream_buffers
- * -size_of_packet_buffer
- * -size_of_stream_buffer
- * where content of each file is a string with all supported properties of this
- * very channel attribute.
- */
-struct most_channel_capability {
- u16 direction;
- u16 data_type;
- u16 num_buffers_packet;
- u16 buffer_size_packet;
- u16 num_buffers_streaming;
- u16 buffer_size_streaming;
- const char *name_suffix;
-};
-
-/**
- * struct most_channel_config - stores channel configuration
- * @direction: direction of the channel
- * @data_type: data type travelling over this channel
- * @num_buffers: number of buffers
- * @buffer_size: size of a buffer for AIM.
- * Buffer size may be cutted down by HDM in a configure callback
- * to match to a given interface and channel type.
- * @extra_len: additional buffer space for internal HDM purposes like padding.
- * May be set by HDM in a configure callback if needed.
- * @subbuffer_size: size of a subbuffer
- * @packets_per_xact: number of MOST frames that are packet inside one USB
- * packet. This is USB specific
- *
- * Describes the configuration for a MOST channel. This information is
- * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
- * parameter of the "configure" function call.
- */
-struct most_channel_config {
- enum most_channel_direction direction;
- enum most_channel_data_type data_type;
- u16 num_buffers;
- u16 buffer_size;
- u16 extra_len;
- u16 subbuffer_size;
- u16 packets_per_xact;
- u16 dbr_size;
-};
-
-/*
- * struct mbo - MOST Buffer Object.
- * @context: context for core completion handler
- * @priv: private data for HDM
- *
- * public: documented fields that are used for the communications
- * between MostCore and HDMs
- *
- * @list: list head for use by the mbo's current owner
- * @ifp: (in) associated interface instance
- * @num_buffers_ptr: amount of pool buffers
- * @hdm_channel_id: (in) HDM channel instance
- * @virt_address: (in) kernel virtual address of the buffer
- * @bus_address: (in) bus address of the buffer
- * @buffer_length: (in) buffer payload length
- * @processed_length: (out) processed length
- * @status: (out) transfer status
- * @complete: (in) completion routine
- *
- * The core allocates and initializes the MBO.
- *
- * The HDM receives MBO for transfer from the core with the call to enqueue().
- * The HDM copies the data to- or from the buffer depending on configured
- * channel direction, set "processed_length" and "status" and completes
- * the transfer procedure by calling the completion routine.
- *
- * Finally, the MBO is being deallocated or recycled for further
- * transfers of the same or a different HDM.
- *
- * Directions of usage:
- * The core driver should never access any MBO fields (even if marked
- * as "public") while the MBO is owned by an HDM. The ownership starts with
- * the call of enqueue() and ends with the call of its complete() routine.
- *
- * II.
- * Every HDM attached to the core driver _must_ ensure that it returns any MBO
- * it owns (due to a previous call to enqueue() by the core driver) before it
- * de-registers an interface or gets unloaded from the kernel. If this direction
- * is violated memory leaks will occur, since the core driver does _not_ track
- * MBOs it is currently not in control of.
- *
- */
-struct mbo {
- void *context;
- void *priv;
- struct list_head list;
- struct most_interface *ifp;
- int *num_buffers_ptr;
- u16 hdm_channel_id;
- void *virt_address;
- dma_addr_t bus_address;
- u16 buffer_length;
- u16 processed_length;
- enum most_status_flags status;
- void (*complete)(struct mbo *mbo);
-};
-
-/**
- * Interface instance description.
- *
- * Describes an interface of a MOST device the core driver is bound to.
- * This structure is allocated and initialized in the HDM. MostCore may not
- * modify this structure.
- *
- * @dev: the actual device
- * @mod: module
- * @interface Interface type. \sa most_interface_type.
- * @description PRELIMINARY.
- * Unique description of the device instance from point of view of the
- * interface in free text form (ASCII).
- * It may be a hexadecimal presentation of the memory address for the MediaLB
- * IP or USB device ID with USB properties for USB interface, etc.
- * @num_channels Number of channels and size of the channel_vector.
- * @channel_vector Properties of the channels.
- * Array index represents channel ID by the driver.
- * @configure Callback to change data type for the channel of the
- * interface instance. May be zero if the instance of the interface is not
- * configurable. Parameter channel_config describes direction and data
- * type for the channel, configured by the higher level. The content of
- * @enqueue Delivers MBO to the HDM for processing.
- * After HDM completes Rx- or Tx- operation the processed MBO shall
- * be returned back to the MostCore using completion routine.
- * The reason to get the MBO delivered from the MostCore after the channel
- * is poisoned is the re-opening of the channel by the application.
- * In this case the HDM shall hold MBOs and service the channel as usual.
- * The HDM must be able to hold at least one MBO for each channel.
- * The callback returns a negative value on error, otherwise 0.
- * @poison_channel Informs HDM about closing the channel. The HDM shall
- * cancel all transfers and synchronously or asynchronously return
- * all enqueued for this channel MBOs using the completion routine.
- * The callback returns a negative value on error, otherwise 0.
- * @request_netinfo: triggers retrieving of network info from the HDM by
- * means of "Message exchange over MDP/MEP"
- * The call of the function request_netinfo with the parameter on_netinfo as
- * NULL prohibits use of the previously obtained function pointer.
- * @priv Private field used by mostcore to store context information.
- */
-struct most_interface {
- struct device *dev;
- struct device *driver_dev;
- struct module *mod;
- enum most_interface_type interface;
- const char *description;
- unsigned int num_channels;
- struct most_channel_capability *channel_vector;
- void *(*dma_alloc)(struct mbo *mbo, u32 size);
- void (*dma_free)(struct mbo *mbo, u32 size);
- int (*configure)(struct most_interface *iface, int channel_idx,
- struct most_channel_config *channel_config);
- int (*enqueue)(struct most_interface *iface, int channel_idx,
- struct mbo *mbo);
- int (*poison_channel)(struct most_interface *iface, int channel_idx);
- void (*request_netinfo)(struct most_interface *iface, int channel_idx,
- void (*on_netinfo)(struct most_interface *iface,
- unsigned char link_stat,
- unsigned char *mac_addr));
- void *priv;
- struct interface_private *p;
-};
-
-/**
- * struct most_component - identifies a loadable component for the mostcore
- * @list: list_head
- * @name: component name
- * @probe_channel: function for core to notify driver about channel connection
- * @disconnect_channel: callback function to disconnect a certain channel
- * @rx_completion: completion handler for received packets
- * @tx_completion: completion handler for transmitted packets
- */
-struct most_component {
- struct list_head list;
- const char *name;
- struct module *mod;
- int (*probe_channel)(struct most_interface *iface, int channel_idx,
- struct most_channel_config *cfg, char *name,
- char *param);
- int (*disconnect_channel)(struct most_interface *iface,
- int channel_idx);
- int (*rx_completion)(struct mbo *mbo);
- int (*tx_completion)(struct most_interface *iface, int channel_idx);
- int (*cfg_complete)(void);
-};
-
-/**
- * most_register_interface - Registers instance of the interface.
- * @iface: Pointer to the interface instance description.
- *
- * Returns a pointer to the kobject of the generated instance.
- *
- * Note: HDM has to ensure that any reference held on the kobj is
- * released before deregistering the interface.
- */
-int most_register_interface(struct most_interface *iface);
-
-/**
- * Deregisters instance of the interface.
- * @intf_instance Pointer to the interface instance description.
- */
-void most_deregister_interface(struct most_interface *iface);
-void most_submit_mbo(struct mbo *mbo);
-
-/**
- * most_stop_enqueue - prevents core from enqueing MBOs
- * @iface: pointer to interface
- * @channel_idx: channel index
- */
-void most_stop_enqueue(struct most_interface *iface, int channel_idx);
-
-/**
- * most_resume_enqueue - allow core to enqueue MBOs again
- * @iface: pointer to interface
- * @channel_idx: channel index
- *
- * This clears the enqueue halt flag and enqueues all MBOs currently
- * in wait fifo.
- */
-void most_resume_enqueue(struct most_interface *iface, int channel_idx);
-int most_register_component(struct most_component *comp);
-int most_deregister_component(struct most_component *comp);
-struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
- struct most_component *comp);
-void most_put_mbo(struct mbo *mbo);
-int channel_has_mbo(struct most_interface *iface, int channel_idx,
- struct most_component *comp);
-int most_start_channel(struct most_interface *iface, int channel_idx,
- struct most_component *comp);
-int most_stop_channel(struct most_interface *iface, int channel_idx,
- struct most_component *comp);
-int __init configfs_init(void);
-int most_register_configfs_subsys(struct most_component *comp);
-void most_deregister_configfs_subsys(struct most_component *comp);
-int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
- char *comp_param);
-int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
-int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val);
-int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf);
-int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf);
-int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val);
-int most_cfg_complete(char *comp_name);
-void most_interface_register_notify(const char *mdev_name);
-#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index 5547e36e09de..830f089f1a88 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -15,8 +15,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/kobject.h>
-
-#include "../most.h"
+#include <linux/most.h>
#define MEP_HDR_LEN 8
#define MDP_HDR_LEN 16
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 44cf2334834f..1527f410af2b 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -17,8 +17,7 @@
#include <sound/pcm_params.h>
#include <linux/sched.h>
#include <linux/kthread.h>
-
-#include "../most.h"
+#include <linux/most.h>
#define DRIVER_NAME "sound"
#define STRING_SIZE 80
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index 0bda88c4bc89..e8c5a8c98375 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -23,8 +23,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
-
-#include "../most.h"
+#include <linux/most.h>
#define USB_MTU 512
#define NO_ISOCHRONOUS_URB 0
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index d32ae49d617b..829df899b993 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -20,8 +20,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
-
-#include "../most.h"
+#include <linux/most.h>
#define V4L2_CMP_MAX_INPUT 1
@@ -74,7 +73,7 @@ static int comp_vdev_open(struct file *filp)
struct comp_fh *fh;
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
break;
default:
return -EINVAL;
@@ -424,7 +423,7 @@ static int comp_register_videodev(struct most_video_dev *mdev)
/* Register the v4l2 device */
video_set_drvdata(mdev->vdev, mdev);
- ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(mdev->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
ret);
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index 20b898954416..14592ed9ce98 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -220,8 +220,7 @@ static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
- dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, "
- "intr_stat %08x, intr_mask %08x\n",
+ dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index 1fb560ff059c..a7c0d3115d72 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -114,6 +114,10 @@
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pcie_pins>;
+
+ reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>,
+ <&gpio 8 GPIO_ACTIVE_LOW>,
+ <&gpio 7 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index d89d68ffa7bc..9e5cf68731bb 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -286,7 +286,7 @@
pcie_pins: pcie0 {
pcie0 {
groups = "pcie";
- function = "pcie rst";
+ function = "gpio";
};
};
@@ -512,7 +512,6 @@
#address-cells = <3>;
#size-cells = <2>;
- perst-gpio = <&gpio 19 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_pins>;
@@ -532,12 +531,14 @@
status = "disabled";
- resets = <&rstctrl 23 &rstctrl 24 &rstctrl 25 &rstctrl 26>;
- reset-names = "pcie", "pcie0", "pcie1", "pcie2";
+ resets = <&rstctrl 24 &rstctrl 25 &rstctrl 26>;
+ reset-names = "pcie0", "pcie1", "pcie2";
clocks = <&clkctrl 24 &clkctrl 25 &clkctrl 26>;
clock-names = "pcie0", "pcie1", "pcie2";
- phys = <&pcie0_phy 0>, <&pcie0_phy 1>, <&pcie1_phy 0>;
- phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
+ phys = <&pcie0_phy 1>, <&pcie2_phy 0>;
+ phy-names = "pcie-phy0", "pcie-phy2";
+
+ reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>;
pcie@0,0 {
reg = <0x0000 0 0 0 0>;
@@ -570,7 +571,7 @@
#phy-cells = <1>;
};
- pcie1_phy: pcie-phy@1e14a000 {
+ pcie2_phy: pcie-phy@1e14a000 {
compatible = "mediatek,mt7621-pci-phy";
reg = <0x1e14a000 0x0700>;
#phy-cells = <1>;
diff --git a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
index d2a07f145143..57743fd22be4 100644
--- a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
+++ b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
@@ -75,34 +75,27 @@
#define RG_PE1_FRC_MSTCKDIV BIT(5)
-#define MAX_PHYS 2
+#define XTAL_MODE_SEL_SHIFT 6
+#define XTAL_MODE_SEL_MASK 0x7
-/**
- * struct mt7621_pci_phy_instance - Mt7621 Pcie PHY device
- * @phy: pointer to the kernel PHY device
- * @port_base: base register
- * @index: internal ID to identify the Mt7621 PCIe PHY
- */
-struct mt7621_pci_phy_instance {
- struct phy *phy;
- void __iomem *port_base;
- u32 index;
-};
+#define MAX_PHYS 2
/**
* struct mt7621_pci_phy - Mt7621 Pcie PHY core
* @dev: pointer to device
* @regmap: kernel regmap pointer
- * @phys: pointer to Mt7621 PHY device
- * @nphys: number of PHY devices for this core
+ * @phy: pointer to the kernel PHY device
+ * @port_base: base register
+ * @has_dual_port: if the phy has dual ports.
* @bypass_pipe_rst: mark if 'mt7621_bypass_pipe_rst'
* needs to be executed. Depends on chip revision.
*/
struct mt7621_pci_phy {
struct device *dev;
struct regmap *regmap;
- struct mt7621_pci_phy_instance **phys;
- int nphys;
+ struct phy *phy;
+ void __iomem *port_base;
+ bool has_dual_port;
bool bypass_pipe_rst;
};
@@ -120,162 +113,152 @@ static inline void phy_write(struct mt7621_pci_phy *phy, u32 val, u32 reg)
regmap_write(phy->regmap, reg, val);
}
-static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy,
- struct mt7621_pci_phy_instance *instance)
+static inline void mt7621_phy_rmw(struct mt7621_pci_phy *phy,
+ u32 reg, u32 clr, u32 set)
+{
+ u32 val = phy_read(phy, reg);
+
+ val &= ~clr;
+ val |= set;
+ phy_write(phy, val, reg);
+}
+
+static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy)
{
- u32 offset = (instance->index != 1) ?
- RG_PE1_PIPE_REG : RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH;
- u32 reg;
-
- reg = phy_read(phy, offset);
- reg &= ~(RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
- reg |= (RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
- phy_write(phy, reg, offset);
+ mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_RST);
+ mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_CMD_FRC);
+
+ if (phy->has_dual_port) {
+ mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH,
+ 0, RG_PE1_PIPE_RST);
+ mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH,
+ 0, RG_PE1_PIPE_CMD_FRC);
+ }
}
-static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy,
- struct mt7621_pci_phy_instance *instance)
+static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
{
struct device *dev = phy->dev;
- u32 reg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0);
- u32 offset;
- u32 val;
+ u32 xtal_mode;
+
+ xtal_mode = (rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0)
+ >> XTAL_MODE_SEL_SHIFT) & XTAL_MODE_SEL_MASK;
- reg = (reg >> 6) & 0x7;
/* Set PCIe Port PHY to disable SSC */
/* Debug Xtal Type */
- val = phy_read(phy, RG_PE1_FRC_H_XTAL_REG);
- val &= ~(RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE);
- val |= RG_PE1_FRC_H_XTAL_TYPE;
- val |= RG_PE1_H_XTAL_TYPE_VAL(0x00);
- phy_write(phy, val, RG_PE1_FRC_H_XTAL_REG);
+ mt7621_phy_rmw(phy, RG_PE1_FRC_H_XTAL_REG,
+ RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE,
+ RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE_VAL(0x00));
/* disable port */
- offset = (instance->index != 1) ?
- RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
- val = phy_read(phy, offset);
- val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- val |= RG_PE1_FRC_PHY_EN;
- phy_write(phy, val, offset);
-
- /* Set Pre-divider ratio (for host mode) */
- val = phy_read(phy, RG_PE1_H_PLL_REG);
- val &= ~(RG_PE1_H_PLL_PREDIV);
-
- if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
- val |= RG_PE1_H_PLL_PREDIV_VAL(0x01);
- phy_write(phy, val, RG_PE1_H_PLL_REG);
+ mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG,
+ RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
+
+ if (phy->has_dual_port) {
+ mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH,
+ RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
+ }
+
+ if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */
+ /* Set Pre-divider ratio (for host mode) */
+ mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
+ RG_PE1_H_PLL_PREDIV,
+ RG_PE1_H_PLL_PREDIV_VAL(0x01));
dev_info(dev, "Xtal is 40MHz\n");
- } else { /* 25MHz | 20MHz Xtal */
- val |= RG_PE1_H_PLL_PREDIV_VAL(0x00);
- phy_write(phy, val, RG_PE1_H_PLL_REG);
- if (reg >= 6) {
- dev_info(dev, "Xtal is 25MHz\n");
-
- /* Select feedback clock */
- val = phy_read(phy, RG_PE1_H_PLL_FBKSEL_REG);
- val &= ~(RG_PE1_H_PLL_FBKSEL);
- val |= RG_PE1_H_PLL_FBKSEL_VAL(0x01);
- phy_write(phy, val, RG_PE1_H_PLL_FBKSEL_REG);
-
- /* DDS NCPO PCW (for host mode) */
- val = phy_read(phy, RG_PE1_H_LCDDS_SSC_PRD_REG);
- val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
- val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000);
- phy_write(phy, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
-
- /* DDS SSC dither period control */
- val = phy_read(phy, RG_PE1_H_LCDDS_SSC_PRD_REG);
- val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
- val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d);
- phy_write(phy, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
-
- /* DDS SSC dither amplitude control */
- val = phy_read(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG);
- val &= ~(RG_PE1_H_LCDDS_SSC_DELTA |
- RG_PE1_H_LCDDS_SSC_DELTA1);
- val |= RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a);
- val |= RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a);
- phy_write(phy, val, RG_PE1_H_LCDDS_SSC_DELTA_REG);
- } else {
- dev_info(dev, "Xtal is 20MHz\n");
- }
+ } else if (xtal_mode >= 6) { /* 25MHz Xal */
+ mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
+ RG_PE1_H_PLL_PREDIV,
+ RG_PE1_H_PLL_PREDIV_VAL(0x00));
+ /* Select feedback clock */
+ mt7621_phy_rmw(phy, RG_PE1_H_PLL_FBKSEL_REG,
+ RG_PE1_H_PLL_FBKSEL,
+ RG_PE1_H_PLL_FBKSEL_VAL(0x01));
+ /* DDS NCPO PCW (for host mode) */
+ mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG,
+ RG_PE1_H_LCDDS_SSC_PRD,
+ RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000));
+ /* DDS SSC dither period control */
+ mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG,
+ RG_PE1_H_LCDDS_SSC_PRD,
+ RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d));
+ /* DDS SSC dither amplitude control */
+ mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG,
+ RG_PE1_H_LCDDS_SSC_DELTA |
+ RG_PE1_H_LCDDS_SSC_DELTA1,
+ RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a) |
+ RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a));
+ dev_info(dev, "Xtal is 25MHz\n");
+ } else { /* 20MHz Xtal */
+ mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
+ RG_PE1_H_PLL_PREDIV,
+ RG_PE1_H_PLL_PREDIV_VAL(0x00));
+
+ dev_info(dev, "Xtal is 20MHz\n");
}
/* DDS clock inversion */
- val = phy_read(phy, RG_PE1_LCDDS_CLK_PH_INV_REG);
- val &= ~(RG_PE1_LCDDS_CLK_PH_INV);
- val |= RG_PE1_LCDDS_CLK_PH_INV;
- phy_write(phy, val, RG_PE1_LCDDS_CLK_PH_INV_REG);
+ mt7621_phy_rmw(phy, RG_PE1_LCDDS_CLK_PH_INV_REG,
+ RG_PE1_LCDDS_CLK_PH_INV, RG_PE1_LCDDS_CLK_PH_INV);
/* Set PLL bits */
- val = phy_read(phy, RG_PE1_H_PLL_REG);
- val &= ~(RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR |
- RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN);
- val |= RG_PE1_H_PLL_BC_VAL(0x02);
- val |= RG_PE1_H_PLL_BP_VAL(0x06);
- val |= RG_PE1_H_PLL_IR_VAL(0x02);
- val |= RG_PE1_H_PLL_IC_VAL(0x01);
- val |= RG_PE1_PLL_DIVEN_VAL(0x02);
- phy_write(phy, val, RG_PE1_H_PLL_REG);
-
- val = phy_read(phy, RG_PE1_H_PLL_BR_REG);
- val &= ~(RG_PE1_H_PLL_BR);
- val |= RG_PE1_H_PLL_BR_VAL(0x00);
- phy_write(phy, val, RG_PE1_H_PLL_BR_REG);
-
- if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
+ mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
+ RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR |
+ RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN,
+ RG_PE1_H_PLL_BC_VAL(0x02) | RG_PE1_H_PLL_BP_VAL(0x06) |
+ RG_PE1_H_PLL_IR_VAL(0x02) | RG_PE1_H_PLL_IC_VAL(0x01) |
+ RG_PE1_PLL_DIVEN_VAL(0x02));
+
+ mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG,
+ RG_PE1_H_PLL_BR, RG_PE1_H_PLL_BR_VAL(0x00));
+
+ if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */
/* set force mode enable of da_pe1_mstckdiv */
- val = phy_read(phy, RG_PE1_MSTCKDIV_REG);
- val &= ~(RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV);
- val |= (RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV);
- phy_write(phy, val, RG_PE1_MSTCKDIV_REG);
+ mt7621_phy_rmw(phy, RG_PE1_MSTCKDIV_REG,
+ RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV,
+ RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV);
}
}
static int mt7621_pci_phy_init(struct phy *phy)
{
- struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
- struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent);
+ struct mt7621_pci_phy *mphy = phy_get_drvdata(phy);
if (mphy->bypass_pipe_rst)
- mt7621_bypass_pipe_rst(mphy, instance);
+ mt7621_bypass_pipe_rst(mphy);
- mt7621_set_phy_for_ssc(mphy, instance);
+ mt7621_set_phy_for_ssc(mphy);
return 0;
}
static int mt7621_pci_phy_power_on(struct phy *phy)
{
- struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
- struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent);
- u32 offset = (instance->index != 1) ?
- RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
- u32 val;
+ struct mt7621_pci_phy *mphy = phy_get_drvdata(phy);
/* Enable PHY and disable force mode */
- val = phy_read(mphy, offset);
- val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- val |= (RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- phy_write(mphy, val, offset);
+ mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG,
+ RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN);
+
+ if (mphy->has_dual_port) {
+ mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH,
+ RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN);
+ }
return 0;
}
static int mt7621_pci_phy_power_off(struct phy *phy)
{
- struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
- struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent);
- u32 offset = (instance->index != 1) ?
- RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
- u32 val;
+ struct mt7621_pci_phy *mphy = phy_get_drvdata(phy);
/* Disable PHY */
- val = phy_read(mphy, offset);
- val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- val |= RG_PE1_FRC_PHY_EN;
- phy_write(mphy, val, offset);
+ mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG,
+ RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
+
+ if (mphy->has_dual_port) {
+ mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH,
+ RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
+ }
return 0;
}
@@ -298,13 +281,15 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
{
struct mt7621_pci_phy *mt7621_phy = dev_get_drvdata(dev);
- if (args->args_count == 0)
- return mt7621_phy->phys[0]->phy;
-
if (WARN_ON(args->args[0] >= MAX_PHYS))
return ERR_PTR(-ENODEV);
- return mt7621_phy->phys[args->args[0]]->phy;
+ mt7621_phy->has_dual_port = args->args[0];
+
+ dev_info(dev, "PHY for 0x%08x (dual port = %d)\n",
+ (unsigned int)mt7621_phy->port_base, mt7621_phy->has_dual_port);
+
+ return mt7621_phy->phy;
}
static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
@@ -325,19 +310,11 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
struct phy_provider *provider;
struct mt7621_pci_phy *phy;
struct resource *res;
- int port;
- void __iomem *port_base;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- phy->nphys = MAX_PHYS;
- phy->phys = devm_kcalloc(dev, phy->nphys,
- sizeof(*phy->phys), GFP_KERNEL);
- if (!phy->phys)
- return -ENOMEM;
-
attr = soc_device_match(mt7621_pci_quirks_match);
if (attr)
phy->bypass_pipe_rst = true;
@@ -351,39 +328,25 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
return -ENXIO;
}
- port_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(port_base)) {
+ phy->port_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->port_base)) {
dev_err(dev, "failed to remap phy regs\n");
- return PTR_ERR(port_base);
+ return PTR_ERR(phy->port_base);
}
- phy->regmap = devm_regmap_init_mmio(phy->dev, port_base,
+ phy->regmap = devm_regmap_init_mmio(phy->dev, phy->port_base,
&mt7621_pci_phy_regmap_config);
if (IS_ERR(phy->regmap))
return PTR_ERR(phy->regmap);
- for (port = 0; port < MAX_PHYS; port++) {
- struct mt7621_pci_phy_instance *instance;
- struct phy *pphy;
-
- instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
- if (!instance)
- return -ENOMEM;
-
- phy->phys[port] = instance;
-
- pphy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
- if (IS_ERR(phy)) {
- dev_err(dev, "failed to create phy\n");
- return PTR_ERR(phy);
- }
-
- instance->port_base = port_base;
- instance->phy = pphy;
- instance->index = port;
- phy_set_drvdata(pphy, instance);
+ phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ return PTR_ERR(phy);
}
+ phy_set_drvdata(phy->phy, phy);
+
provider = devm_of_phy_provider_register(dev, mt7621_pcie_phy_of_xlate);
return PTR_ERR_OR_ZERO(provider);
@@ -403,12 +366,7 @@ static struct platform_driver mt7621_pci_phy_driver = {
},
};
-static int __init mt7621_pci_phy_drv_init(void)
-{
- return platform_driver_register(&mt7621_pci_phy_driver);
-}
-
-module_init(mt7621_pci_phy_drv_init);
+builtin_platform_driver(mt7621_pci_phy_driver);
MODULE_AUTHOR("Sergio Paracuellos <sergio.paracuellos@gmail.com>");
MODULE_DESCRIPTION("MediaTek MT7621 PCIe PHY driver");
diff --git a/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt b/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt
index 604ec813bd45..327a68267309 100644
--- a/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt
+++ b/drivers/staging/mt7621-pci/mediatek,mt7621-pci.txt
@@ -6,7 +6,6 @@ Required properties:
- reg: Base addresses and lengths of the PCIe subsys and root ports.
- bus-range: Range of bus numbers associated with this controller.
- #address-cells: Address representation for root ports (must be 3)
-- perst-gpio: PCIe reset signal line.
- pinctrl-names : The pin control state names.
- pinctrl-0: The "default" pinctrl state.
- #size-cells: Size representation for root ports (must be 2)
@@ -24,6 +23,7 @@ Required properties:
See ../clocks/clock-bindings.txt for details.
- clock-names: Must be "pcie0", "pcie1", "pcieN"... based on the number of
root ports.
+- reset-gpios: GPIO specs for the reset pins.
In addition, the device tree node must have sub-nodes describing each PCIe port
interface, having the following mandatory properties:
@@ -49,7 +49,6 @@ Example for MT7621:
#address-cells = <3>;
#size-cells = <2>;
- perst-gpio = <&gpio 19 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_pins>;
@@ -74,6 +73,10 @@ Example for MT7621:
clocks = <&clkctrl 24 &clkctrl 25 &clkctrl 26>;
clock-names = "pcie0", "pcie1", "pcie2";
+ reset-gpios = <&gpio 19 GPIO_ACTIVE_LOW>,
+ <&gpio 8 GPIO_ACTIVE_LOW>,
+ <&gpio 7 GPIO_ACTIVE_LOW>;
+
pcie@0,0 {
reg = <0x0000 0 0 0 0>;
#address-cells = <3>;
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 3633c924848e..f58e3a51fc71 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -45,8 +45,6 @@
/* rt_sysc_membase relative registers */
#define RALINK_CLKCFG1 0x30
-#define RALINK_PCIE_CLK_GEN 0x7c
-#define RALINK_PCIE_CLK_GEN1 0x80
/* Host-PCI bridge registers */
#define RALINK_PCI_PCICFG_ADDR 0x0000
@@ -57,13 +55,13 @@
#define RALINK_PCI_IOBASE 0x002C
/* PCICFG virtual bridges */
-#define MT7621_BR0_MASK GENMASK(19, 16)
-#define MT7621_BR1_MASK GENMASK(23, 20)
-#define MT7621_BR2_MASK GENMASK(27, 24)
-#define MT7621_BR_ALL_MASK GENMASK(27, 16)
-#define MT7621_BR0_SHIFT 16
-#define MT7621_BR1_SHIFT 20
-#define MT7621_BR2_SHIFT 24
+#define PCIE_P2P_MAX 3
+#define PCIE_P2P_BR_DEVNUM_SHIFT(p) (16 + (p) * 4)
+#define PCIE_P2P_BR_DEVNUM0_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(0)
+#define PCIE_P2P_BR_DEVNUM1_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(1)
+#define PCIE_P2P_BR_DEVNUM2_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(2)
+#define PCIE_P2P_BR_DEVNUM_MASK 0xf
+#define PCIE_P2P_BR_DEVNUM_MASK_FULL (0xfff << PCIE_P2P_BR_DEVNUM0_SHIFT)
/* PCIe RC control registers */
#define MT7621_PCIE_OFFSET 0x2000
@@ -85,14 +83,10 @@
#define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
#define PCIE_PORT_LINKUP BIT(0)
-#define PCIE_CLK_GEN_EN BIT(31)
-#define PCIE_CLK_GEN_DIS 0
-#define PCIE_CLK_GEN1_DIS GENMASK(30, 24)
-#define PCIE_CLK_GEN1_EN (BIT(27) | BIT(25))
#define MEMORY_BASE 0x0
#define PERST_MODE_MASK GENMASK(11, 10)
#define PERST_MODE_GPIO BIT(10)
-#define PERST_DELAY_US 1000
+#define PERST_DELAY_MS 100
/**
* struct mt7621_pcie_port - PCIe port information
@@ -101,6 +95,7 @@
* @pcie: pointer to PCIe host info
* @phy: pointer to PHY control block
* @pcie_rst: pointer to port reset control
+ * @gpio_rst: gpio reset
* @slot: port slot
* @enabled: indicates if port is enabled
*/
@@ -110,6 +105,7 @@ struct mt7621_pcie_port {
struct mt7621_pcie *pcie;
struct phy *phy;
struct reset_control *pcie_rst;
+ struct gpio_desc *gpio_rst;
u32 slot;
bool enabled;
};
@@ -122,9 +118,8 @@ struct mt7621_pcie_port {
* @busn: bus range
* @offset: IO / Memory offset
* @dev: Pointer to PCIe device
+ * @io_map_base: virtual memory base address for io
* @ports: pointer to PCIe port information
- * @perst: gpio reset
- * @rst: pointer to pcie reset
* @resets_inverted: depends on chip revision
* reset lines are inverted.
*/
@@ -138,9 +133,8 @@ struct mt7621_pcie {
resource_size_t mem;
resource_size_t io;
} offset;
+ unsigned long io_map_base;
struct list_head ports;
- struct gpio_desc *perst;
- struct reset_control *rst;
bool resets_inverted;
};
@@ -154,6 +148,15 @@ static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
writel(val, pcie->base + reg);
}
+static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
+{
+ u32 val = readl(pcie->base + reg);
+
+ val &= ~clr;
+ val |= set;
+ writel(val, pcie->base + reg);
+}
+
static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
{
return readl(port->base + reg);
@@ -207,16 +210,16 @@ static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
}
-static inline void mt7621_perst_gpio_pcie_assert(struct mt7621_pcie *pcie)
+static inline void mt7621_rst_gpio_pcie_assert(struct mt7621_pcie_port *port)
{
- gpiod_set_value(pcie->perst, 0);
- mdelay(PERST_DELAY_US);
+ if (port->gpio_rst)
+ gpiod_set_value(port->gpio_rst, 1);
}
-static inline void mt7621_perst_gpio_pcie_deassert(struct mt7621_pcie *pcie)
+static inline void mt7621_rst_gpio_pcie_deassert(struct mt7621_pcie_port *port)
{
- gpiod_set_value(pcie->perst, 1);
- mdelay(PERST_DELAY_US);
+ if (port->gpio_rst)
+ gpiod_set_value(port->gpio_rst, 0);
}
static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port)
@@ -224,6 +227,11 @@ static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port)
return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0;
}
+static inline void mt7621_pcie_port_clk_enable(struct mt7621_pcie_port *port)
+{
+ rt_sysc_m32(0, PCIE_PORT_CLK_EN(port->slot), RALINK_CLKCFG1);
+}
+
static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
{
rt_sysc_m32(PCIE_PORT_CLK_EN(port->slot), 0, RALINK_CLKCFG1);
@@ -249,13 +257,6 @@ static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
reset_control_assert(port->pcie_rst);
}
-static void mt7621_reset_port(struct mt7621_pcie_port *port)
-{
- mt7621_control_assert(port);
- msleep(100);
- mt7621_control_deassert(port);
-}
-
static void setup_cm_memory_region(struct mt7621_pcie *pcie)
{
struct resource *mem_resource = &pcie->mem;
@@ -292,22 +293,21 @@ static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
}
for_each_of_pci_range(&parser, &range) {
- struct resource *res = NULL;
-
switch (range.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
- ioremap(range.cpu_addr, range.size);
- res = &pcie->io;
+ pcie->io_map_base =
+ (unsigned long)ioremap(range.cpu_addr,
+ range.size);
+ of_pci_range_to_resource(&range, node, &pcie->io);
+ pcie->io.start = range.cpu_addr;
+ pcie->io.end = range.cpu_addr + range.size - 1;
pcie->offset.io = 0x00000000UL;
break;
case IORESOURCE_MEM:
- res = &pcie->mem;
+ of_pci_range_to_resource(&range, node, &pcie->mem);
pcie->offset.mem = 0x00000000UL;
break;
}
-
- if (res)
- of_pci_range_to_resource(&range, node, res);
}
err = of_pci_parse_bus_range(node, &pcie->busn);
@@ -319,6 +319,8 @@ static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
pcie->busn.flags = IORESOURCE_BUS;
}
+ set_io_port_base(pcie->io_map_base);
+
return 0;
}
@@ -356,9 +358,16 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
snprintf(name, sizeof(name), "pcie-phy%d", slot);
port->phy = devm_phy_get(dev, name);
- if (IS_ERR(port->phy))
+ if (IS_ERR(port->phy) && slot != 1)
return PTR_ERR(port->phy);
+ port->gpio_rst = devm_gpiod_get_index_optional(dev, "reset", slot,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(port->gpio_rst)) {
+ dev_err(dev, "Failed to get GPIO for PCIe%d\n", slot);
+ return PTR_ERR(port->gpio_rst);
+ }
+
port->slot = slot;
port->pcie = pcie;
@@ -375,12 +384,6 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
struct resource regs;
int err;
- pcie->perst = devm_gpiod_get(dev, "perst", GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->perst)) {
- dev_err(dev, "failed to get gpio perst\n");
- return PTR_ERR(pcie->perst);
- }
-
err = of_address_to_resource(node, 0, &regs);
if (err) {
dev_err(dev, "missing \"reg\" property\n");
@@ -391,12 +394,6 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->rst = devm_reset_control_get_exclusive(dev, "pcie");
- if (PTR_ERR(pcie->rst) == -EPROBE_DEFER) {
- dev_err(dev, "failed to get pcie reset control\n");
- return PTR_ERR(pcie->rst);
- }
-
for_each_available_child_of_node(node, child) {
int slot;
@@ -426,12 +423,6 @@ static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
u32 slot = port->slot;
int err;
- /*
- * Any MT7621 Ralink pcie controller that doesn't have 0x0101 at
- * the end of the chip_id has inverted PCI resets.
- */
- mt7621_reset_port(port);
-
err = phy_init(port->phy);
if (err) {
dev_err(dev, "failed to initialize port%d phy\n", slot);
@@ -450,34 +441,66 @@ static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
return 0;
}
+static void mt7621_pcie_reset_assert(struct mt7621_pcie *pcie)
+{
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ /* PCIe RC reset assert */
+ mt7621_control_assert(port);
+
+ /* PCIe EP reset assert */
+ mt7621_rst_gpio_pcie_assert(port);
+ }
+
+ mdelay(PERST_DELAY_MS);
+}
+
+static void mt7621_pcie_reset_rc_deassert(struct mt7621_pcie *pcie)
+{
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ mt7621_control_deassert(port);
+}
+
+static void mt7621_pcie_reset_ep_deassert(struct mt7621_pcie *pcie)
+{
+ struct mt7621_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ mt7621_rst_gpio_pcie_deassert(port);
+
+ mdelay(PERST_DELAY_MS);
+}
+
static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
struct mt7621_pcie_port *port, *tmp;
- u32 val = 0;
int err;
rt_sysc_m32(PERST_MODE_MASK, PERST_MODE_GPIO, MT7621_GPIO_MODE);
- mt7621_perst_gpio_pcie_assert(pcie);
+ mt7621_pcie_reset_assert(pcie);
+ mt7621_pcie_reset_rc_deassert(pcie);
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
u32 slot = port->slot;
+ if (slot == 1) {
+ port->enabled = true;
+ continue;
+ }
+
err = mt7621_pcie_init_port(port);
if (err) {
dev_err(dev, "Initiating port %d failed\n", slot);
list_del(&port->list);
- } else {
- val = read_config(pcie, slot, PCIE_FTS_NUM);
- dev_info(dev, "Port %d N_FTS = %x\n", slot,
- (unsigned int)val);
}
}
- reset_control_assert(pcie->rst);
-
- mt7621_perst_gpio_pcie_deassert(pcie);
+ mt7621_pcie_reset_ep_deassert(pcie);
list_for_each_entry(port, &pcie->ports, list) {
u32 slot = port->slot;
@@ -485,19 +508,13 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
if (!mt7621_pcie_port_is_linkup(port)) {
dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
slot);
- phy_power_off(port->phy);
+ if (slot != 1)
+ phy_power_off(port->phy);
mt7621_control_assert(port);
mt7621_pcie_port_clk_disable(port);
port->enabled = false;
}
}
-
- rt_sysc_m32(0x30, 2 << 4, SYSC_REG_SYSTEM_CONFIG1);
- rt_sysc_m32(PCIE_CLK_GEN_EN, PCIE_CLK_GEN_DIS, RALINK_PCIE_CLK_GEN);
- rt_sysc_m32(PCIE_CLK_GEN1_DIS, PCIE_CLK_GEN1_EN, RALINK_PCIE_CLK_GEN1);
- rt_sysc_m32(PCIE_CLK_GEN_DIS, PCIE_CLK_GEN_EN, RALINK_PCIE_CLK_GEN);
- msleep(50);
- reset_control_deassert(pcie->rst);
}
static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
@@ -531,10 +548,15 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
u32 slot;
u32 val;
+ /* Setup MEMWIN and IOWIN */
+ pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
+ pcie_write(pcie, pcie->io.start, RALINK_PCI_IOBASE);
+
list_for_each_entry(port, &pcie->ports, list) {
if (port->enabled) {
+ mt7621_pcie_port_clk_enable(port);
mt7621_pcie_enable_port(port);
- dev_info(dev, "PCIE%d enabled\n", num_slots_enabled);
+ dev_info(dev, "PCIE%d enabled\n", port->slot);
num_slots_enabled++;
}
}
@@ -554,7 +576,9 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
{
u32 pcie_link_status = 0;
- u32 val = 0;
+ u32 n;
+ int i;
+ u32 p2p_br_devnum[PCIE_P2P_MAX];
struct mt7621_pcie_port *port;
list_for_each_entry(port, &pcie->ports, list) {
@@ -567,50 +591,20 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
if (pcie_link_status == 0)
return -1;
- /*
- * pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
- * 3'b000 x x x
- * 3'b001 x x 0
- * 3'b010 x 0 x
- * 3'b011 x 1 0
- * 3'b100 0 x x
- * 3'b101 1 x 0
- * 3'b110 1 0 x
- * 3'b111 2 1 0
- */
- switch (pcie_link_status) {
- case 2:
- val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
- val &= ~(MT7621_BR0_MASK | MT7621_BR1_MASK);
- val |= 0x1 << MT7621_BR0_SHIFT;
- val |= 0x0 << MT7621_BR1_SHIFT;
- pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
- break;
- case 4:
- val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
- val &= ~MT7621_BR_ALL_MASK;
- val |= 0x1 << MT7621_BR0_SHIFT;
- val |= 0x2 << MT7621_BR1_SHIFT;
- val |= 0x0 << MT7621_BR2_SHIFT;
- pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
- break;
- case 5:
- val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
- val &= ~MT7621_BR_ALL_MASK;
- val |= 0x0 << MT7621_BR0_SHIFT;
- val |= 0x2 << MT7621_BR1_SHIFT;
- val |= 0x1 << MT7621_BR2_SHIFT;
- pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
- break;
- case 6:
- val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
- val &= ~MT7621_BR_ALL_MASK;
- val |= 0x2 << MT7621_BR0_SHIFT;
- val |= 0x0 << MT7621_BR1_SHIFT;
- val |= 0x1 << MT7621_BR2_SHIFT;
- pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
- break;
- }
+ n = 0;
+ for (i = 0; i < PCIE_P2P_MAX; i++)
+ if (pcie_link_status & BIT(i))
+ p2p_br_devnum[i] = n++;
+
+ for (i = 0; i < PCIE_P2P_MAX; i++)
+ if ((pcie_link_status & BIT(i)) == 0)
+ p2p_br_devnum[i] = n++;
+
+ pcie_rmw(pcie, RALINK_PCI_PCICFG_ADDR,
+ PCIE_P2P_BR_DEVNUM_MASK_FULL,
+ (p2p_br_devnum[0] << PCIE_P2P_BR_DEVNUM0_SHIFT) |
+ (p2p_br_devnum[1] << PCIE_P2P_BR_DEVNUM1_SHIFT) |
+ (p2p_br_devnum[2] << PCIE_P2P_BR_DEVNUM2_SHIFT));
return 0;
}
@@ -678,11 +672,15 @@ static int mt7621_pci_probe(struct platform_device *pdev)
return err;
}
+ err = mt7621_pci_parse_request_of_pci_ranges(pcie);
+ if (err) {
+ dev_err(dev, "Error requesting pci resources from ranges");
+ return err;
+ }
+
/* set resources limits */
- iomem_resource.start = 0;
- iomem_resource.end = ~0UL; /* no limit */
- ioport_resource.start = 0;
- ioport_resource.end = ~0UL; /* no limit */
+ ioport_resource.start = pcie->io.start;
+ ioport_resource.end = pcie->io.end;
mt7621_pcie_init_ports(pcie);
@@ -694,12 +692,6 @@ static int mt7621_pci_probe(struct platform_device *pdev)
mt7621_pcie_enable_ports(pcie);
- err = mt7621_pci_parse_request_of_pci_ranges(pcie);
- if (err) {
- dev_err(dev, "Error requesting pci resources from ranges");
- return err;
- }
-
setup_cm_memory_region(pcie);
err = mt7621_pcie_request_resources(pcie, &res);
@@ -731,9 +723,4 @@ static struct platform_driver mt7621_pci_driver = {
},
};
-static int __init mt7621_pci_init(void)
-{
- return platform_driver_register(&mt7621_pci_driver);
-}
-
-module_init(mt7621_pci_init);
+builtin_platform_driver(mt7621_pci_driver);
diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h
index f152d84099a2..c8d4c13424c6 100644
--- a/drivers/staging/netlogic/platform_net.h
+++ b/drivers/staging/netlogic/platform_net.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
- *
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*/
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
index 518ea809b8fa..8365b744f9b3 100644
--- a/drivers/staging/netlogic/xlr_net.h
+++ b/drivers/staging/netlogic/xlr_net.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
- *
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*/
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
new file mode 100644
index 000000000000..6a5d842ee0f2
--- /dev/null
+++ b/drivers/staging/octeon-usb/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+config OCTEON_USB
+ tristate "Cavium Networks Octeon USB support"
+ depends on CAVIUM_OCTEON_SOC && USB
+ help
+ This driver supports USB host controller on some Cavium
+ Networks' products in the Octeon family.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-hcd.
+
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
new file mode 100644
index 000000000000..9873a0130ad5
--- /dev/null
+++ b/drivers/staging/octeon-usb/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
new file mode 100644
index 000000000000..2b29acca5caa
--- /dev/null
+++ b/drivers/staging/octeon-usb/TODO
@@ -0,0 +1,8 @@
+This driver is functional and has been tested on EdgeRouter Lite,
+D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage.
+
+TODO:
+ - kernel coding style
+ - checkpatch warnings
+
+Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
new file mode 100644
index 000000000000..61471a19d4e6
--- /dev/null
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -0,0 +1,3737 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Cavium Networks
+ *
+ * Some parts of the code were originally released under BSD license:
+ *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
+ * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ */
+
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/usb/hcd.h>
+#include <linux/prefetch.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "octeon-hcd.h"
+
+/**
+ * enum cvmx_usb_speed - the possible USB device speeds
+ *
+ * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
+ * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
+ * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
+ */
+enum cvmx_usb_speed {
+ CVMX_USB_SPEED_HIGH = 0,
+ CVMX_USB_SPEED_FULL = 1,
+ CVMX_USB_SPEED_LOW = 2,
+};
+
+/**
+ * enum cvmx_usb_transfer - the possible USB transfer types
+ *
+ * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
+ * transfers
+ * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
+ * priority periodic transfers
+ * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
+ * transfers
+ * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
+ * periodic transfers
+ */
+enum cvmx_usb_transfer {
+ CVMX_USB_TRANSFER_CONTROL = 0,
+ CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
+ CVMX_USB_TRANSFER_BULK = 2,
+ CVMX_USB_TRANSFER_INTERRUPT = 3,
+};
+
+/**
+ * enum cvmx_usb_direction - the transfer directions
+ *
+ * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
+ * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
+ */
+enum cvmx_usb_direction {
+ CVMX_USB_DIRECTION_OUT,
+ CVMX_USB_DIRECTION_IN,
+};
+
+/**
+ * enum cvmx_usb_status - possible callback function status codes
+ *
+ * @CVMX_USB_STATUS_OK: The transaction / operation finished without
+ * any errors
+ * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented
+ * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight
+ * by a user call to cvmx_usb_cancel
+ * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected
+ * error status
+ * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response
+ * from the device
+ * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the
+ * device even after a number of retries
+ * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle
+ * error even after a number of retries
+ * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error
+ * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error
+ * even after a number of retries
+ */
+enum cvmx_usb_status {
+ CVMX_USB_STATUS_OK,
+ CVMX_USB_STATUS_SHORT,
+ CVMX_USB_STATUS_CANCEL,
+ CVMX_USB_STATUS_ERROR,
+ CVMX_USB_STATUS_STALL,
+ CVMX_USB_STATUS_XACTERR,
+ CVMX_USB_STATUS_DATATGLERR,
+ CVMX_USB_STATUS_BABBLEERR,
+ CVMX_USB_STATUS_FRAMEERR,
+};
+
+/**
+ * struct cvmx_usb_port_status - the USB port status information
+ *
+ * @port_enabled: 1 = Usb port is enabled, 0 = disabled
+ * @port_over_current: 1 = Over current detected, 0 = Over current not
+ * detected. Octeon doesn't support over current detection.
+ * @port_powered: 1 = Port power is being supplied to the device, 0 =
+ * power is off. Octeon doesn't support turning port power
+ * off.
+ * @port_speed: Current port speed.
+ * @connected: 1 = A device is connected to the port, 0 = No device is
+ * connected.
+ * @connect_change: 1 = Device connected state changed since the last set
+ * status call.
+ */
+struct cvmx_usb_port_status {
+ u32 reserved : 25;
+ u32 port_enabled : 1;
+ u32 port_over_current : 1;
+ u32 port_powered : 1;
+ enum cvmx_usb_speed port_speed : 2;
+ u32 connected : 1;
+ u32 connect_change : 1;
+};
+
+/**
+ * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
+ *
+ * @offset: This is the offset in bytes into the main buffer where this data
+ * is stored.
+ * @length: This is the length in bytes of the data.
+ * @status: This is the status of this individual packet transfer.
+ */
+struct cvmx_usb_iso_packet {
+ int offset;
+ int length;
+ enum cvmx_usb_status status;
+};
+
+/**
+ * enum cvmx_usb_initialize_flags - flags used by the initialization function
+ *
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
+ * as clock source at USB_XO and
+ * USB_XI.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
+ * board clock source at USB_XO.
+ * USB_XI should be tied to GND.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
+ * crystal
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
+ * data transfer use for the USB
+ */
+enum cvmx_usb_initialize_flags {
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
+ /* Bits 3-4 used to encode the clock frequency */
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
+};
+
+/**
+ * enum cvmx_usb_pipe_flags - internal flags for a pipe.
+ *
+ * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
+ * actively using hardware.
+ * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed
+ * pipe is in the ping state.
+ */
+enum cvmx_usb_pipe_flags {
+ CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
+ CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
+};
+
+/* Maximum number of times to retry failed transactions */
+#define MAX_RETRIES 3
+
+/* Maximum number of hardware channels supported by the USB block */
+#define MAX_CHANNELS 8
+
+/*
+ * The low level hardware can transfer a maximum of this number of bytes in each
+ * transfer. The field is 19 bits wide
+ */
+#define MAX_TRANSFER_BYTES ((1 << 19) - 1)
+
+/*
+ * The low level hardware can transfer a maximum of this number of packets in
+ * each transfer. The field is 10 bits wide
+ */
+#define MAX_TRANSFER_PACKETS ((1 << 10) - 1)
+
+/**
+ * Logical transactions may take numerous low level
+ * transactions, especially when splits are concerned. This
+ * enum represents all of the possible stages a transaction can
+ * be in. Note that split completes are always even. This is so
+ * the NAK handler can backup to the previous low level
+ * transaction with a simple clearing of bit 0.
+ */
+enum cvmx_usb_stage {
+ CVMX_USB_STAGE_NON_CONTROL,
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_SETUP,
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_DATA,
+ CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_STATUS,
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
+};
+
+/**
+ * struct cvmx_usb_transaction - describes each pending USB transaction
+ * regardless of type. These are linked together
+ * to form a list of pending requests for a pipe.
+ *
+ * @node: List node for transactions in the pipe.
+ * @type: Type of transaction, duplicated of the pipe.
+ * @flags: State flags for this transaction.
+ * @buffer: User's physical buffer address to read/write.
+ * @buffer_length: Size of the user's buffer in bytes.
+ * @control_header: For control transactions, physical address of the 8
+ * byte standard header.
+ * @iso_start_frame: For ISO transactions, the starting frame number.
+ * @iso_number_packets: For ISO transactions, the number of packets in the
+ * request.
+ * @iso_packets: For ISO transactions, the sub packets in the request.
+ * @actual_bytes: Actual bytes transfer for this transaction.
+ * @stage: For control transactions, the current stage.
+ * @urb: URB.
+ */
+struct cvmx_usb_transaction {
+ struct list_head node;
+ enum cvmx_usb_transfer type;
+ u64 buffer;
+ int buffer_length;
+ u64 control_header;
+ int iso_start_frame;
+ int iso_number_packets;
+ struct cvmx_usb_iso_packet *iso_packets;
+ int xfersize;
+ int pktcnt;
+ int retries;
+ int actual_bytes;
+ enum cvmx_usb_stage stage;
+ struct urb *urb;
+};
+
+/**
+ * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
+ * and some USB device. It contains a list of pending
+ * request to the device.
+ *
+ * @node: List node for pipe list
+ * @next: Pipe after this one in the list
+ * @transactions: List of pending transactions
+ * @interval: For periodic pipes, the interval between packets in
+ * frames
+ * @next_tx_frame: The next frame this pipe is allowed to transmit on
+ * @flags: State flags for this pipe
+ * @device_speed: Speed of device connected to this pipe
+ * @transfer_type: Type of transaction supported by this pipe
+ * @transfer_dir: IN or OUT. Ignored for Control
+ * @multi_count: Max packet in a row for the device
+ * @max_packet: The device's maximum packet size in bytes
+ * @device_addr: USB device address at other end of pipe
+ * @endpoint_num: USB endpoint number at other end of pipe
+ * @hub_device_addr: Hub address this device is connected to
+ * @hub_port: Hub port this device is connected to
+ * @pid_toggle: This toggles between 0/1 on every packet send to track
+ * the data pid needed
+ * @channel: Hardware DMA channel for this pipe
+ * @split_sc_frame: The low order bits of the frame number the split
+ * complete should be sent on
+ */
+struct cvmx_usb_pipe {
+ struct list_head node;
+ struct list_head transactions;
+ u64 interval;
+ u64 next_tx_frame;
+ enum cvmx_usb_pipe_flags flags;
+ enum cvmx_usb_speed device_speed;
+ enum cvmx_usb_transfer transfer_type;
+ enum cvmx_usb_direction transfer_dir;
+ int multi_count;
+ u16 max_packet;
+ u8 device_addr;
+ u8 endpoint_num;
+ u8 hub_device_addr;
+ u8 hub_port;
+ u8 pid_toggle;
+ u8 channel;
+ s8 split_sc_frame;
+};
+
+struct cvmx_usb_tx_fifo {
+ struct {
+ int channel;
+ int size;
+ u64 address;
+ } entry[MAX_CHANNELS + 1];
+ int head;
+ int tail;
+};
+
+/**
+ * struct octeon_hcd - the state of the USB block
+ *
+ * lock: Serialization lock.
+ * init_flags: Flags passed to initialize.
+ * index: Which USB block this is for.
+ * idle_hardware_channels: Bit set for every idle hardware channel.
+ * usbcx_hprt: Stored port status so we don't need to read a CSR to
+ * determine splits.
+ * pipe_for_channel: Map channels to pipes.
+ * pipe: Storage for pipes.
+ * indent: Used by debug output to indent functions.
+ * port_status: Last port status used for change notification.
+ * idle_pipes: List of open pipes that have no transactions.
+ * active_pipes: Active pipes indexed by transfer type.
+ * frame_number: Increments every SOF interrupt for time keeping.
+ * active_split: Points to the current active split, or NULL.
+ */
+struct octeon_hcd {
+ spinlock_t lock; /* serialization lock */
+ int init_flags;
+ int index;
+ int idle_hardware_channels;
+ union cvmx_usbcx_hprt usbcx_hprt;
+ struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
+ int indent;
+ struct cvmx_usb_port_status port_status;
+ struct list_head idle_pipes;
+ struct list_head active_pipes[4];
+ u64 frame_number;
+ struct cvmx_usb_transaction *active_split;
+ struct cvmx_usb_tx_fifo periodic;
+ struct cvmx_usb_tx_fifo nonperiodic;
+};
+
+/*
+ * This macro logically sets a single field in a CSR. It does the sequence
+ * read, modify, and write
+ */
+#define USB_SET_FIELD32(address, _union, field, value) \
+ do { \
+ union _union c; \
+ \
+ c.u32 = cvmx_usb_read_csr32(usb, address); \
+ c.s.field = value; \
+ cvmx_usb_write_csr32(usb, address, c.u32); \
+ } while (0)
+
+/* Returns the IO address to push/pop stuff data from the FIFOs */
+#define USB_FIFO_ADDRESS(channel, usb_index) \
+ (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000)
+
+/**
+ * struct octeon_temp_buffer - a bounce buffer for USB transfers
+ * @orig_buffer: the original buffer passed by the USB stack
+ * @data: the newly allocated temporary buffer (excluding meta-data)
+ *
+ * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
+ * the buffer is too short, we need to allocate a temporary one, and this struct
+ * represents it.
+ */
+struct octeon_temp_buffer {
+ void *orig_buffer;
+ u8 data[];
+};
+
+static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
+{
+ return container_of((void *)p, struct usb_hcd, hcd_priv);
+}
+
+/**
+ * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
+ * (if needed)
+ * @urb: URB.
+ * @mem_flags: Memory allocation flags.
+ *
+ * This function allocates a temporary bounce buffer whenever it's needed
+ * due to HW limitations.
+ */
+static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ struct octeon_temp_buffer *temp;
+
+ if (urb->num_sgs || urb->sg ||
+ (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
+ !(urb->transfer_buffer_length % sizeof(u32)))
+ return 0;
+
+ temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
+ sizeof(*temp), mem_flags);
+ if (!temp)
+ return -ENOMEM;
+
+ temp->orig_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+/**
+ * octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
+ * @urb: URB.
+ *
+ * Frees a buffer allocated by octeon_alloc_temp_buffer().
+ */
+static void octeon_free_temp_buffer(struct urb *urb)
+{
+ struct octeon_temp_buffer *temp;
+ size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
+ data);
+ if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(temp->orig_buffer, urb->transfer_buffer, length);
+ }
+ urb->transfer_buffer = temp->orig_buffer;
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+ kfree(temp);
+}
+
+/**
+ * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
+ * @hcd: USB HCD structure.
+ * @urb: URB.
+ * @mem_flags: Memory allocation flags.
+ */
+static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = octeon_alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ octeon_free_temp_buffer(urb);
+
+ return ret;
+}
+
+/**
+ * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
+ * @hcd: USB HCD structure.
+ * @urb: URB.
+ */
+static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ octeon_free_temp_buffer(urb);
+}
+
+/**
+ * Read a USB 32bit CSR. It performs the necessary address swizzle
+ * for 32bit CSRs and logs the value in a readable format if
+ * debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to read
+ *
+ * Returns: Result of the read
+ */
+static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address)
+{
+ return cvmx_read64_uint32(address ^ 4);
+}
+
+/**
+ * Write a USB 32bit CSR. It performs the necessary address
+ * swizzle for 32bit CSRs and logs the value in a readable format
+ * if debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to write
+ * @value: Value to write
+ */
+static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb,
+ u64 address, u32 value)
+{
+ cvmx_write64_uint32(address ^ 4, value);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+}
+
+/**
+ * Return non zero if this pipe connects to a non HIGH speed
+ * device through a high speed hub.
+ *
+ * @usb: USB block this access is for
+ * @pipe: Pipe to check
+ *
+ * Returns: Non zero if we need to do split transactions
+ */
+static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe)
+{
+ return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
+ usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
+}
+
+/**
+ * Trivial utility function to return the correct PID for a pipe
+ *
+ * @pipe: pipe to check
+ *
+ * Returns: PID for pipe
+ */
+static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
+{
+ if (pipe->pid_toggle)
+ return 2; /* Data1 */
+ return 0; /* Data0 */
+}
+
+/* Loops through register until txfflsh or rxfflsh become zero.*/
+static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type)
+{
+ int result;
+ u64 address = CVMX_USBCX_GRSTCTL(usb->index);
+ u64 done = cvmx_get_cycle() + 100 *
+ (u64)octeon_get_clock_rate / 1000000;
+ union cvmx_usbcx_grstctl c;
+
+ while (1) {
+ c.u32 = cvmx_usb_read_csr32(usb, address);
+ if (fflsh_type == 0 && c.s.txfflsh == 0) {
+ result = 0;
+ break;
+ } else if (fflsh_type == 1 && c.s.rxfflsh == 0) {
+ result = 0;
+ break;
+ } else if (cvmx_get_cycle() > done) {
+ result = -1;
+ break;
+ }
+
+ __delay(100);
+ }
+ return result;
+}
+
+static void cvmx_fifo_setup(struct octeon_hcd *usb)
+{
+ union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
+ union cvmx_usbcx_gnptxfsiz npsiz;
+ union cvmx_usbcx_hptxfsiz psiz;
+
+ usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GHWCFG3(usb->index));
+
+ /*
+ * Program the USBC_GRXFSIZ register to select the size of the receive
+ * FIFO (25%).
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz,
+ rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
+
+ /*
+ * Program the USBC_GNPTXFSIZ register to select the size and the start
+ * address of the non-periodic transmit FIFO for nonperiodic
+ * transactions (50%).
+ */
+ npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
+ npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32);
+
+ /*
+ * Program the USBC_HPTXFSIZ register to select the size and start
+ * address of the periodic transmit FIFO for periodic transactions
+ * (25%).
+ */
+ psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
+ psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
+ psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32);
+
+ /* Flush all FIFOs */
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ cvmx_usbcx_grstctl, txfnum, 0x10);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ cvmx_usbcx_grstctl, txfflsh, 1);
+ cvmx_wait_tx_rx(usb, 0);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ cvmx_usbcx_grstctl, rxfflsh, 1);
+ cvmx_wait_tx_rx(usb, 1);
+}
+
+/**
+ * Shutdown a USB port after a call to cvmx_usb_initialize().
+ * The port should be disabled with all pipes closed when this
+ * function is called.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_shutdown(struct octeon_hcd *usb)
+{
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+
+ /* Make sure all pipes are closed */
+ if (!list_empty(&usb->idle_pipes) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) ||
+ !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK]))
+ return -EBUSY;
+
+ /* Disable the clocks and put them in power on reset */
+ usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.enable = 1;
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hclk_rst = 1;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hrst = 0;
+ cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ return 0;
+}
+
+/**
+ * Initialize a USB port for use. This must be called before any
+ * other access to the Octeon USB port is made. The port starts
+ * off in the disabled state.
+ *
+ * @dev: Pointer to struct device for logging purposes.
+ * @usb: Pointer to struct octeon_hcd.
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_initialize(struct device *dev,
+ struct octeon_hcd *usb)
+{
+ int channel;
+ int divisor;
+ int retries = 0;
+ union cvmx_usbcx_hcfg usbcx_hcfg;
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+ union cvmx_usbcx_gintsts usbc_gintsts;
+ union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
+ union cvmx_usbcx_gintmsk usbcx_gintmsk;
+ union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
+ union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
+
+retry:
+ /*
+ * Power On Reset and PHY Initialization
+ *
+ * 1. Wait for DCOK to assert (nothing to do)
+ *
+ * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
+ * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
+ */
+ usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hrst = 0;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hclk_rst = 0;
+ usbn_clk_ctl.s.enable = 0;
+ /*
+ * 2b. Select the USB reference clock/crystal parameters by writing
+ * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
+ /*
+ * The USB port uses 12/24/48MHz 2.5V board clock
+ * source at USB_XO. USB_XI should be tied to GND.
+ * Most Octeon evaluation boards require this setting
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN50XX))
+ /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */
+ usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */
+ else
+ /* From CN52XX manual */
+ usbn_clk_ctl.s.p_rtype = 1;
+
+ switch (usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
+ usbn_clk_ctl.s.p_c_sel = 0;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
+ usbn_clk_ctl.s.p_c_sel = 1;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
+ usbn_clk_ctl.s.p_c_sel = 2;
+ break;
+ }
+ } else {
+ /*
+ * The USB port uses a 12MHz crystal as clock source
+ * at USB_XO and USB_XI
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */
+ else
+ /* From CN56XX,CN52XX,CN50XX manuals. */
+ usbn_clk_ctl.s.p_rtype = 0;
+
+ usbn_clk_ctl.s.p_c_sel = 0;
+ }
+ /*
+ * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
+ * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
+ * such that USB is as close as possible to 125Mhz
+ */
+ divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
+ /* Lower than 4 doesn't seem to work properly */
+ if (divisor < 4)
+ divisor = 4;
+ usbn_clk_ctl.s.divide = divisor;
+ usbn_clk_ctl.s.divide2 = 0;
+ cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+
+ /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
+ usbn_clk_ctl.s.hclk_rst = 1;
+ cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
+ __delay(64);
+ /*
+ * 3. Program the power-on reset field in the USBN clock-control
+ * register:
+ * USBN_CLK_CTL[POR] = 0
+ */
+ usbn_clk_ctl.s.por = 0;
+ cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 4. Wait 1 ms for PHY clock to start */
+ mdelay(1);
+ /*
+ * 5. Program the Reset input from automatic test equipment field in the
+ * USBP control and status register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
+ */
+ usbn_usbp_ctl_status.u64 =
+ cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.s.ate_reset = 1;
+ cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 6. Wait 10 cycles */
+ __delay(10);
+ /*
+ * 7. Clear ATE_RESET field in the USBN clock-control register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
+ */
+ usbn_usbp_ctl_status.s.ate_reset = 0;
+ cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /*
+ * 8. Program the PHY reset field in the USBN clock-control register:
+ * USBN_CLK_CTL[PRST] = 1
+ */
+ usbn_clk_ctl.s.prst = 1;
+ cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /*
+ * 9. Program the USBP control and status register to select host or
+ * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
+ * device
+ */
+ usbn_usbp_ctl_status.s.hst_mode = 0;
+ cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 10. Wait 1 us */
+ udelay(1);
+ /*
+ * 11. Program the hreset_n field in the USBN clock-control register:
+ * USBN_CLK_CTL[HRST] = 1
+ */
+ usbn_clk_ctl.s.hrst = 1;
+ cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ /* 12. Proceed to USB core initialization */
+ usbn_clk_ctl.s.enable = 1;
+ cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
+ udelay(1);
+
+ /*
+ * USB Core Initialization
+ *
+ * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
+ * determine USB core configuration parameters.
+ *
+ * Nothing needed
+ *
+ * 2. Program the following fields in the global AHB configuration
+ * register (USBC_GAHBCFG)
+ * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
+ * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
+ * Nonperiodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[NPTXFEMPLVL]
+ * Periodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[PTXFEMPLVL]
+ * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
+ */
+ usbcx_gahbcfg.u32 = 0;
+ usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
+ usbcx_gahbcfg.s.hbstlen = 0;
+ usbcx_gahbcfg.s.nptxfemplvl = 1;
+ usbcx_gahbcfg.s.ptxfemplvl = 1;
+ usbcx_gahbcfg.s.glblintrmsk = 1;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
+ usbcx_gahbcfg.u32);
+
+ /*
+ * 3. Program the following fields in USBC_GUSBCFG register.
+ * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
+ * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
+ * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
+ * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
+ */
+ usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.s.toutcal = 0;
+ usbcx_gusbcfg.s.ddrsel = 0;
+ usbcx_gusbcfg.s.usbtrdtim = 0x5;
+ usbcx_gusbcfg.s.phylpwrclksel = 0;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
+ usbcx_gusbcfg.u32);
+
+ /*
+ * 4. The software must unmask the following bits in the USBC_GINTMSK
+ * register.
+ * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
+ * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
+ */
+ usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.s.otgintmsk = 1;
+ usbcx_gintmsk.s.modemismsk = 1;
+ usbcx_gintmsk.s.hchintmsk = 1;
+ usbcx_gintmsk.s.sofmsk = 0;
+ /* We need RX FIFO interrupts if we don't have DMA */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usbcx_gintmsk.s.rxflvlmsk = 1;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
+ usbcx_gintmsk.u32);
+
+ /*
+ * Disable all channel interrupts. We'll enable them per channel later.
+ */
+ for (channel = 0; channel < 8; channel++)
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel, usb->index),
+ 0);
+
+ /*
+ * Host Port Initialization
+ *
+ * 1. Program the host-port interrupt-mask field to unmask,
+ * USBC_GINTMSK[PRTINT] = 1
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ cvmx_usbcx_gintmsk, prtintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ cvmx_usbcx_gintmsk, disconnintmsk, 1);
+
+ /*
+ * 2. Program the USBC_HCFG register to select full-speed host
+ * or high-speed host.
+ */
+ usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
+ usbcx_hcfg.s.fslssupp = 0;
+ usbcx_hcfg.s.fslspclksel = 0;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
+
+ cvmx_fifo_setup(usb);
+
+ /*
+ * If the controller is getting port events right after the reset, it
+ * means the initialization failed. Try resetting the controller again
+ * in such case. This is seen to happen after cold boot on DSR-1000N.
+ */
+ usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GINTSTS(usb->index));
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
+ usbc_gintsts.u32);
+ dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32);
+ if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint)
+ return 0;
+ if (retries++ >= 5)
+ return -EAGAIN;
+ dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n",
+ (int)usbc_gintsts.u32);
+ msleep(50);
+ cvmx_usb_shutdown(usb);
+ msleep(50);
+ goto retry;
+}
+
+/**
+ * Reset a USB port. After this call succeeds, the USB port is
+ * online and servicing requests.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ */
+static void cvmx_usb_reset_port(struct octeon_hcd *usb)
+{
+ usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
+
+ /* Program the port reset bit to start the reset process */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
+ prtrst, 1);
+
+ /*
+ * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
+ * process to complete.
+ */
+ mdelay(50);
+
+ /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
+ prtrst, 0);
+
+ /*
+ * Read the port speed field to get the enumerated speed,
+ * USBC_HPRT[PRTSPD].
+ */
+ usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
+}
+
+/**
+ * Disable a USB port. After this call the USB port will not
+ * generate data transfers and will not generate events.
+ * Transactions in process will fail and call their
+ * associated callbacks.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_disable(struct octeon_hcd *usb)
+{
+ /* Disable the port */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
+ prtena, 1);
+ return 0;
+}
+
+/**
+ * Get the current state of the USB port. Use this call to
+ * determine if the usb port has anything connected, is enabled,
+ * or has some sort of error condition. The return value of this
+ * call has "changed" bits to signal of the value of some fields
+ * have changed between calls.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: Port status information
+ */
+static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb)
+{
+ union cvmx_usbcx_hprt usbc_hprt;
+ struct cvmx_usb_port_status result;
+
+ memset(&result, 0, sizeof(result));
+
+ usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ result.port_enabled = usbc_hprt.s.prtena;
+ result.port_over_current = usbc_hprt.s.prtovrcurract;
+ result.port_powered = usbc_hprt.s.prtpwr;
+ result.port_speed = usbc_hprt.s.prtspd;
+ result.connected = usbc_hprt.s.prtconnsts;
+ result.connect_change =
+ result.connected != usb->port_status.connected;
+
+ return result;
+}
+
+/**
+ * Open a virtual pipe between the host and a USB device. A pipe
+ * must be opened before data can be transferred between a device
+ * and Octeon.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @device_addr:
+ * USB device address to open the pipe to
+ * (0-127).
+ * @endpoint_num:
+ * USB endpoint number to open the pipe to
+ * (0-15).
+ * @device_speed:
+ * The speed of the device the pipe is going
+ * to. This must match the device's speed,
+ * which may be different than the port speed.
+ * @max_packet: The maximum packet length the device can
+ * transmit/receive (low speed=0-8, full
+ * speed=0-1023, high speed=0-1024). This value
+ * comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <10:0>.
+ * @transfer_type:
+ * The type of transfer this pipe is for.
+ * @transfer_dir:
+ * The direction the pipe is in. This is not
+ * used for control pipes.
+ * @interval: For ISOCHRONOUS and INTERRUPT transfers,
+ * this is how often the transfer is scheduled
+ * for. All other transfers should specify
+ * zero. The units are in frames (8000/sec at
+ * high speed, 1000/sec for full speed).
+ * @multi_count:
+ * For high speed devices, this is the maximum
+ * allowed number of packet per microframe.
+ * Specify zero for non high speed devices. This
+ * value comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <12:11>.
+ * @hub_device_addr:
+ * Hub device address this device is connected
+ * to. Devices connected directly to Octeon
+ * use zero. This is only used when the device
+ * is full/low speed behind a high speed hub.
+ * The address will be of the high speed hub,
+ * not and full speed hubs after it.
+ * @hub_port: Which port on the hub the device is
+ * connected. Use zero for devices connected
+ * directly to Octeon. Like hub_device_addr,
+ * this is only used for full/low speed
+ * devices behind a high speed hub.
+ *
+ * Returns: A non-NULL value is a pipe. NULL means an error.
+ */
+static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb,
+ int device_addr,
+ int endpoint_num,
+ enum cvmx_usb_speed
+ device_speed,
+ int max_packet,
+ enum cvmx_usb_transfer
+ transfer_type,
+ enum cvmx_usb_direction
+ transfer_dir,
+ int interval, int multi_count,
+ int hub_device_addr,
+ int hub_port)
+{
+ struct cvmx_usb_pipe *pipe;
+
+ pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC);
+ if (!pipe)
+ return NULL;
+ if ((device_speed == CVMX_USB_SPEED_HIGH) &&
+ (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (transfer_type == CVMX_USB_TRANSFER_BULK))
+ pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
+ pipe->device_addr = device_addr;
+ pipe->endpoint_num = endpoint_num;
+ pipe->device_speed = device_speed;
+ pipe->max_packet = max_packet;
+ pipe->transfer_type = transfer_type;
+ pipe->transfer_dir = transfer_dir;
+ INIT_LIST_HEAD(&pipe->transactions);
+
+ /*
+ * All pipes use interval to rate limit NAK processing. Force an
+ * interval if one wasn't supplied
+ */
+ if (!interval)
+ interval = 1;
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ pipe->interval = interval * 8;
+ /* Force start splits to be schedule on uFrame 0 */
+ pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) +
+ pipe->interval;
+ } else {
+ pipe->interval = interval;
+ pipe->next_tx_frame = usb->frame_number + pipe->interval;
+ }
+ pipe->multi_count = multi_count;
+ pipe->hub_device_addr = hub_device_addr;
+ pipe->hub_port = hub_port;
+ pipe->pid_toggle = 0;
+ pipe->split_sc_frame = -1;
+ list_add_tail(&pipe->node, &usb->idle_pipes);
+
+ /*
+ * We don't need to tell the hardware about this pipe yet since
+ * it doesn't have any submitted requests
+ */
+
+ return pipe;
+}
+
+/**
+ * Poll the RX FIFOs and remove data as needed. This function is only used
+ * in non DMA mode. It is very important that this function be called quickly
+ * enough to prevent FIFO overflow.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ */
+static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb)
+{
+ union cvmx_usbcx_grxstsph rx_status;
+ int channel;
+ int bytes;
+ u64 address;
+ u32 *ptr;
+
+ rx_status.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GRXSTSPH(usb->index));
+ /* Only read data if IN data is there */
+ if (rx_status.s.pktsts != 2)
+ return;
+ /* Check if no data is available */
+ if (!rx_status.s.bcnt)
+ return;
+
+ channel = rx_status.s.chnum;
+ bytes = rx_status.s.bcnt;
+ if (!bytes)
+ return;
+
+ /* Get where the DMA engine would have written this data */
+ address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) +
+ channel * 8);
+
+ ptr = cvmx_phys_to_ptr(address);
+ cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8,
+ address + bytes);
+
+ /* Loop writing the FIFO data for this packet into memory */
+ while (bytes > 0) {
+ *ptr++ = cvmx_usb_read_csr32(usb,
+ USB_FIFO_ADDRESS(channel, usb->index));
+ bytes -= 4;
+ }
+ CVMX_SYNCW;
+}
+
+/**
+ * Fill the TX hardware fifo with data out of the software
+ * fifos
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @fifo: Software fifo to use
+ * @available: Amount of space in the hardware fifo
+ *
+ * Returns: Non zero if the hardware fifo was too small and needs
+ * to be serviced again.
+ */
+static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
+ struct cvmx_usb_tx_fifo *fifo, int available)
+{
+ /*
+ * We're done either when there isn't anymore space or the software FIFO
+ * is empty
+ */
+ while (available && (fifo->head != fifo->tail)) {
+ int i = fifo->tail;
+ const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
+ u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
+ usb->index) ^ 4;
+ int words = available;
+
+ /* Limit the amount of data to what the SW fifo has */
+ if (fifo->entry[i].size <= available) {
+ words = fifo->entry[i].size;
+ fifo->tail++;
+ if (fifo->tail > MAX_CHANNELS)
+ fifo->tail = 0;
+ }
+
+ /* Update the next locations and counts */
+ available -= words;
+ fifo->entry[i].address += words * 4;
+ fifo->entry[i].size -= words;
+
+ /*
+ * Write the HW fifo data. The read every three writes is due
+ * to an errata on CN3XXX chips
+ */
+ while (words > 3) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_read64_uint64(
+ CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ words -= 3;
+ }
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words)
+ cvmx_write64_uint32(csr_address, *ptr++);
+ }
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ }
+ return fifo->head != fifo->tail;
+}
+
+/**
+ * Check the hardware FIFOs and fill them as needed
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ */
+static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb)
+{
+ if (usb->periodic.head != usb->periodic.tail) {
+ union cvmx_usbcx_hptxsts tx_status;
+
+ tx_status.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPTXSTS(usb->index));
+ if (cvmx_usb_fill_tx_hw(usb, &usb->periodic,
+ tx_status.s.ptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ cvmx_usbcx_gintmsk, ptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ cvmx_usbcx_gintmsk, ptxfempmsk, 0);
+ }
+
+ if (usb->nonperiodic.head != usb->nonperiodic.tail) {
+ union cvmx_usbcx_gnptxsts tx_status;
+
+ tx_status.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GNPTXSTS(usb->index));
+ if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
+ tx_status.s.nptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ cvmx_usbcx_gintmsk, nptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ cvmx_usbcx_gintmsk, nptxfempmsk, 0);
+ }
+}
+
+/**
+ * Fill the TX FIFO with an outgoing packet
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @channel: Channel number to get packet from
+ */
+static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel)
+{
+ union cvmx_usbcx_hccharx hcchar;
+ union cvmx_usbcx_hcspltx usbc_hcsplt;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ struct cvmx_usb_tx_fifo *fifo;
+
+ /* We only need to fill data on outbound channels */
+ hcchar.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
+ if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
+ return;
+
+ /* OUT Splits only have data on the start and not the complete */
+ usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCSPLTX(channel, usb->index));
+ if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
+ return;
+
+ /*
+ * Find out how many bytes we need to fill and convert it into 32bit
+ * words.
+ */
+ usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
+ if (!usbc_hctsiz.s.xfersize)
+ return;
+
+ if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
+ (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ fifo = &usb->periodic;
+ else
+ fifo = &usb->nonperiodic;
+
+ fifo->entry[fifo->head].channel = channel;
+ fifo->entry[fifo->head].address =
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
+ channel * 8);
+ fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2;
+ fifo->head++;
+ if (fifo->head > MAX_CHANNELS)
+ fifo->head = 0;
+
+ cvmx_usb_poll_tx_fifo(usb);
+}
+
+/**
+ * Perform channel specific setup for Control transactions. All
+ * the generic stuff will already have been done in cvmx_usb_start_channel().
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe for control transaction
+ */
+static void cvmx_usb_start_channel_control(struct octeon_hcd *usb,
+ int channel,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct usb_hcd *hcd = octeon_to_hcd(usb);
+ struct device *dev = hcd->self.controller;
+ struct cvmx_usb_transaction *transaction =
+ list_first_entry(&pipe->transactions, typeof(*transaction),
+ node);
+ struct usb_ctrlrequest *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ int bytes_to_transfer = transaction->buffer_length -
+ transaction->actual_bytes;
+ int packets_to_transfer;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+
+ usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = sizeof(*header);
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx, epdir,
+ CVMX_USB_DIRECTION_OUT);
+ /*
+ * Setup send the control header instead of the buffer data. The
+ * buffer data will be used in the next stage
+ */
+ cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
+ channel * 8,
+ transaction->control_header);
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = 0;
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx, epdir,
+ CVMX_USB_DIRECTION_OUT);
+
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
+ cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_DATA:
+ usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (header->bRequestType & USB_DIR_IN)
+ bytes_to_transfer = 0;
+ else if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+ }
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx, epdir,
+ ((header->bRequestType & USB_DIR_IN) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
+ if (!(header->bRequestType & USB_DIR_IN))
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx, epdir,
+ ((header->bRequestType & USB_DIR_IN) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
+ cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx, epdir,
+ ((header->bRequestType & USB_DIR_IN) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx, epdir,
+ ((header->bRequestType & USB_DIR_IN) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
+ cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the hardware.
+ * Further bytes will be sent as continued transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is zero
+ * we still need to transfer one packet
+ */
+ packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
+ pipe->max_packet);
+ if (packets_to_transfer == 0) {
+ packets_to_transfer = 1;
+ } else if ((packets_to_transfer > 1) &&
+ (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must be
+ * restarted between every packet for IN transactions, so there
+ * is no reason to do multiple packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to what the
+ * hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
+ usbc_hctsiz.u32);
+}
+
+/**
+ * Start a channel to perform the pipe's head transaction
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe to start
+ */
+static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct cvmx_usb_transaction *transaction =
+ list_first_entry(&pipe->transactions, typeof(*transaction),
+ node);
+
+ /* Make sure all writes to the DMA region get flushed */
+ CVMX_SYNCW;
+
+ /* Attach the channel to the pipe */
+ usb->pipe_for_channel[channel] = pipe;
+ pipe->channel = channel;
+ pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /* Mark this channel as in use */
+ usb->idle_hardware_channels &= ~(1 << channel);
+
+ /* Enable the channel interrupt bits */
+ {
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hcintmskx usbc_hcintmsk;
+ union cvmx_usbcx_haintmsk usbc_haintmsk;
+
+ /* Clear all channel status bits */
+ usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index));
+
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index),
+ usbc_hcint.u32);
+
+ usbc_hcintmsk.u32 = 0;
+ usbc_hcintmsk.s.chhltdmsk = 1;
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /*
+ * Channels need these extra interrupts when we aren't
+ * in DMA mode.
+ */
+ usbc_hcintmsk.s.datatglerrmsk = 1;
+ usbc_hcintmsk.s.frmovrunmsk = 1;
+ usbc_hcintmsk.s.bblerrmsk = 1;
+ usbc_hcintmsk.s.xacterrmsk = 1;
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * Splits don't generate xfercompl, so we need
+ * ACK and NYET.
+ */
+ usbc_hcintmsk.s.nyetmsk = 1;
+ usbc_hcintmsk.s.ackmsk = 1;
+ }
+ usbc_hcintmsk.s.nakmsk = 1;
+ usbc_hcintmsk.s.stallmsk = 1;
+ usbc_hcintmsk.s.xfercomplmsk = 1;
+ }
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel, usb->index),
+ usbc_hcintmsk.u32);
+
+ /* Enable the channel interrupt to propagate */
+ usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HAINTMSK(usb->index));
+ usbc_haintmsk.s.haintmsk |= 1 << channel;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
+ usbc_haintmsk.u32);
+ }
+
+ /* Setup the location the DMA engine uses. */
+ {
+ u64 reg;
+ u64 dma_address = transaction->buffer +
+ transaction->actual_bytes;
+
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ dma_address = transaction->buffer +
+ transaction->iso_packets[0].offset +
+ transaction->actual_bytes;
+
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
+ reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index);
+ else
+ reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index);
+ cvmx_write64_uint64(reg + channel * 8, dma_address);
+ }
+
+ /* Setup both the size of the transfer and the SPLIT characteristics */
+ {
+ union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
+ int packets_to_transfer;
+ int bytes_to_transfer = transaction->buffer_length -
+ transaction->actual_bytes;
+
+ /*
+ * ISOCHRONOUS transactions store each individual transfer size
+ * in the packet structure, not the global buffer_length
+ */
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ bytes_to_transfer =
+ transaction->iso_packets[0].length -
+ transaction->actual_bytes;
+
+ /*
+ * We need to do split transactions when we are talking to non
+ * high speed devices that are behind a high speed hub
+ */
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * On the start split phase (stage is even) record the
+ * frame number we will need to send the split complete.
+ * We only store the lower two bits since the time ahead
+ * can only be two frames
+ */
+ if ((transaction->stage & 1) == 0) {
+ if (transaction->type == CVMX_USB_TRANSFER_BULK)
+ pipe->split_sc_frame =
+ (usb->frame_number + 1) & 0x7f;
+ else
+ pipe->split_sc_frame =
+ (usb->frame_number + 2) & 0x7f;
+ } else {
+ pipe->split_sc_frame = -1;
+ }
+
+ usbc_hcsplt.s.spltena = 1;
+ usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
+ usbc_hcsplt.s.prtaddr = pipe->hub_port;
+ usbc_hcsplt.s.compsplt = (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
+
+ /*
+ * SPLIT transactions can only ever transmit one data
+ * packet so limit the transfer size to the max packet
+ * size
+ */
+ if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+
+ /*
+ * ISOCHRONOUS OUT splits are unique in that they limit
+ * data transfers to 188 byte chunks representing the
+ * begin/middle/end of the data or all
+ */
+ if (!usbc_hcsplt.s.compsplt &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type ==
+ CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /*
+ * Clear the split complete frame number as
+ * there isn't going to be a split complete
+ */
+ pipe->split_sc_frame = -1;
+ /*
+ * See if we've started this transfer and sent
+ * data
+ */
+ if (transaction->actual_bytes == 0) {
+ /*
+ * Nothing sent yet, this is either a
+ * begin or the entire payload
+ */
+ if (bytes_to_transfer <= 188)
+ /* Entire payload in one go */
+ usbc_hcsplt.s.xactpos = 3;
+ else
+ /* First part of payload */
+ usbc_hcsplt.s.xactpos = 2;
+ } else {
+ /*
+ * Continuing the previous data, we must
+ * either be in the middle or at the end
+ */
+ if (bytes_to_transfer <= 188)
+ /* End of payload */
+ usbc_hcsplt.s.xactpos = 1;
+ else
+ /* Middle of payload */
+ usbc_hcsplt.s.xactpos = 0;
+ }
+ /*
+ * Again, the transfer size is limited to 188
+ * bytes
+ */
+ if (bytes_to_transfer > 188)
+ bytes_to_transfer = 188;
+ }
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the
+ * hardware. Further bytes will be sent as continued
+ * transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /*
+ * Round MAX_TRANSFER_BYTES to a multiple of out packet
+ * size
+ */
+ bytes_to_transfer = MAX_TRANSFER_BYTES /
+ pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is
+ * zero we still need to transfer one packet
+ */
+ packets_to_transfer =
+ DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
+ if (packets_to_transfer == 0) {
+ packets_to_transfer = 1;
+ } else if ((packets_to_transfer > 1) &&
+ (usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must
+ * be restarted between every packet for IN
+ * transactions, so there is no reason to do multiple
+ * packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer *
+ pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to
+ * what the hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer *
+ pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ /* Update the DATA0/DATA1 toggle */
+ usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
+ /*
+ * High speed pipes may need a hardware ping before they start
+ */
+ if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING)
+ usbc_hctsiz.s.dopng = 1;
+
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCSPLTX(channel, usb->index),
+ usbc_hcsplt.u32);
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index),
+ usbc_hctsiz.u32);
+ }
+
+ /* Setup the Host Channel Characteristics Register */
+ {
+ union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
+
+ /*
+ * Set the startframe odd/even properly. This is only used for
+ * periodic
+ */
+ usbc_hcchar.s.oddfrm = usb->frame_number & 1;
+
+ /*
+ * Set the number of back to back packets allowed by this
+ * endpoint. Split transactions interpret "ec" as the number of
+ * immediate retries of failure. These retries happen too
+ * quickly, so we disable these entirely for splits
+ */
+ if (cvmx_usb_pipe_needs_split(usb, pipe))
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count < 1)
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count > 3)
+ usbc_hcchar.s.ec = 3;
+ else
+ usbc_hcchar.s.ec = pipe->multi_count;
+
+ /* Set the rest of the endpoint specific settings */
+ usbc_hcchar.s.devaddr = pipe->device_addr;
+ usbc_hcchar.s.eptype = transaction->type;
+ usbc_hcchar.s.lspddev =
+ (pipe->device_speed == CVMX_USB_SPEED_LOW);
+ usbc_hcchar.s.epdir = pipe->transfer_dir;
+ usbc_hcchar.s.epnum = pipe->endpoint_num;
+ usbc_hcchar.s.mps = pipe->max_packet;
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index),
+ usbc_hcchar.u32);
+ }
+
+ /* Do transaction type specific fixups as needed */
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ cvmx_usb_start_channel_control(usb, channel, pipe);
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISO transactions require different PIDs depending on
+ * direction and how many packets are needed
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ if (pipe->multi_count < 2) /* Need DATA0 */
+ USB_SET_FIELD32(
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index),
+ cvmx_usbcx_hctsizx, pid, 0);
+ else /* Need MDATA */
+ USB_SET_FIELD32(
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index),
+ cvmx_usbcx_hctsizx, pid, 3);
+ }
+ }
+ break;
+ }
+ {
+ union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 =
+ cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index))
+ };
+ transaction->xfersize = usbc_hctsiz.s.xfersize;
+ transaction->pktcnt = usbc_hctsiz.s.pktcnt;
+ }
+ /* Remember when we start a split transaction */
+ if (cvmx_usb_pipe_needs_split(usb, pipe))
+ usb->active_split = transaction;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ cvmx_usbcx_hccharx, chena, 1);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ cvmx_usb_fill_tx_fifo(usb, channel);
+}
+
+/**
+ * Find a pipe that is ready to be scheduled to hardware.
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @xfer_type: Transfer type
+ *
+ * Returns: Pipe or NULL if none are ready
+ */
+static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
+ enum cvmx_usb_transfer xfer_type)
+{
+ struct list_head *list = usb->active_pipes + xfer_type;
+ u64 current_frame = usb->frame_number;
+ struct cvmx_usb_pipe *pipe;
+
+ list_for_each_entry(pipe, list, node) {
+ struct cvmx_usb_transaction *t =
+ list_first_entry(&pipe->transactions, typeof(*t),
+ node);
+ if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
+ (pipe->next_tx_frame <= current_frame) &&
+ ((pipe->split_sc_frame == -1) ||
+ ((((int)current_frame - pipe->split_sc_frame) & 0x7f) <
+ 0x40)) &&
+ (!usb->active_split || (usb->active_split == t))) {
+ prefetch(t);
+ return pipe;
+ }
+ }
+ return NULL;
+}
+
+static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb,
+ int is_sof)
+{
+ struct cvmx_usb_pipe *pipe;
+
+ /* Find a pipe needing service. */
+ if (is_sof) {
+ /*
+ * Only process periodic pipes on SOF interrupts. This way we
+ * are sure that the periodic data is sent in the beginning of
+ * the frame.
+ */
+ pipe = cvmx_usb_find_ready_pipe(usb,
+ CVMX_USB_TRANSFER_ISOCHRONOUS);
+ if (pipe)
+ return pipe;
+ pipe = cvmx_usb_find_ready_pipe(usb,
+ CVMX_USB_TRANSFER_INTERRUPT);
+ if (pipe)
+ return pipe;
+ }
+ pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL);
+ if (pipe)
+ return pipe;
+ return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK);
+}
+
+/**
+ * Called whenever a pipe might need to be scheduled to the
+ * hardware.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @is_sof: True if this schedule was called on a SOF interrupt.
+ */
+static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof)
+{
+ int channel;
+ struct cvmx_usb_pipe *pipe;
+ int need_sof;
+ enum cvmx_usb_transfer ttype;
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /*
+ * Without DMA we need to be careful to not schedule something
+ * at the end of a frame and cause an overrun.
+ */
+ union cvmx_usbcx_hfnum hfnum = {
+ .u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HFNUM(usb->index))
+ };
+
+ union cvmx_usbcx_hfir hfir = {
+ .u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HFIR(usb->index))
+ };
+
+ if (hfnum.s.frrem < hfir.s.frint / 4)
+ goto done;
+ }
+
+ while (usb->idle_hardware_channels) {
+ /* Find an idle channel */
+ channel = __fls(usb->idle_hardware_channels);
+ if (unlikely(channel > 7))
+ break;
+
+ pipe = cvmx_usb_next_pipe(usb, is_sof);
+ if (!pipe)
+ break;
+
+ cvmx_usb_start_channel(usb, channel, pipe);
+ }
+
+done:
+ /*
+ * Only enable SOF interrupts when we have transactions pending in the
+ * future that might need to be scheduled
+ */
+ need_sof = 0;
+ for (ttype = CVMX_USB_TRANSFER_CONTROL;
+ ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+ list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
+ if (pipe->next_tx_frame > usb->frame_number) {
+ need_sof = 1;
+ break;
+ }
+ }
+ }
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ cvmx_usbcx_gintmsk, sofmsk, need_sof);
+}
+
+static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb,
+ enum cvmx_usb_status status,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction
+ *transaction,
+ int bytes_transferred,
+ struct urb *urb)
+{
+ struct usb_hcd *hcd = octeon_to_hcd(usb);
+ struct device *dev = hcd->self.controller;
+
+ if (likely(status == CVMX_USB_STATUS_OK))
+ urb->actual_length = bytes_transferred;
+ else
+ urb->actual_length = 0;
+
+ urb->hcpriv = NULL;
+
+ /* For Isochronous transactions we need to update the URB packet status
+ * list from data in our private copy
+ */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ /*
+ * The pointer to the private list is stored in the setup_packet
+ * field.
+ */
+ struct cvmx_usb_iso_packet *iso_packet =
+ (struct cvmx_usb_iso_packet *)urb->setup_packet;
+ /* Recalculate the transfer size by adding up each packet */
+ urb->actual_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++) {
+ if (iso_packet[i].status == CVMX_USB_STATUS_OK) {
+ urb->iso_frame_desc[i].status = 0;
+ urb->iso_frame_desc[i].actual_length =
+ iso_packet[i].length;
+ urb->actual_length +=
+ urb->iso_frame_desc[i].actual_length;
+ } else {
+ dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
+ i, urb->number_of_packets,
+ iso_packet[i].status, pipe,
+ transaction, iso_packet[i].length);
+ urb->iso_frame_desc[i].status = -EREMOTEIO;
+ }
+ }
+ /* Free the private list now that we don't need it anymore */
+ kfree(iso_packet);
+ urb->setup_packet = NULL;
+ }
+
+ switch (status) {
+ case CVMX_USB_STATUS_OK:
+ urb->status = 0;
+ break;
+ case CVMX_USB_STATUS_CANCEL:
+ if (urb->status == 0)
+ urb->status = -ENOENT;
+ break;
+ case CVMX_USB_STATUS_STALL:
+ dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
+ pipe, transaction, bytes_transferred);
+ urb->status = -EPIPE;
+ break;
+ case CVMX_USB_STATUS_BABBLEERR:
+ dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
+ pipe, transaction, bytes_transferred);
+ urb->status = -EPIPE;
+ break;
+ case CVMX_USB_STATUS_SHORT:
+ dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
+ pipe, transaction, bytes_transferred);
+ urb->status = -EREMOTEIO;
+ break;
+ case CVMX_USB_STATUS_ERROR:
+ case CVMX_USB_STATUS_XACTERR:
+ case CVMX_USB_STATUS_DATATGLERR:
+ case CVMX_USB_STATUS_FRAMEERR:
+ dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
+ status, pipe, transaction, bytes_transferred);
+ urb->status = -EPROTO;
+ break;
+ }
+ usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb);
+ spin_unlock(&usb->lock);
+ usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status);
+ spin_lock(&usb->lock);
+}
+
+/**
+ * Signal the completion of a transaction and free it. The
+ * transaction will be removed from the pipe transaction list.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe the transaction is on
+ * @transaction:
+ * Transaction that completed
+ * @complete_code:
+ * Completion code
+ */
+static void cvmx_usb_complete(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_status complete_code)
+{
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+
+ /*
+ * Isochronous transactions need extra processing as they might not be
+ * done after a single data transfer
+ */
+ if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /* Update the number of bytes transferred in this ISO packet */
+ transaction->iso_packets[0].length = transaction->actual_bytes;
+ transaction->iso_packets[0].status = complete_code;
+
+ /*
+ * If there are more ISOs pending and we succeeded, schedule the
+ * next one
+ */
+ if ((transaction->iso_number_packets > 1) &&
+ (complete_code == CVMX_USB_STATUS_OK)) {
+ /* No bytes transferred for this packet as of yet */
+ transaction->actual_bytes = 0;
+ /* One less ISO waiting to transfer */
+ transaction->iso_number_packets--;
+ /* Increment to the next location in our packet array */
+ transaction->iso_packets++;
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ return;
+ }
+ }
+
+ /* Remove the transaction from the pipe list */
+ list_del(&transaction->node);
+ if (list_empty(&pipe->transactions))
+ list_move_tail(&pipe->node, &usb->idle_pipes);
+ octeon_usb_urb_complete_callback(usb, complete_code, pipe,
+ transaction,
+ transaction->actual_bytes,
+ transaction->urb);
+ kfree(transaction);
+}
+
+/**
+ * Submit a usb transaction to a pipe. Called for all types
+ * of transactions.
+ *
+ * @usb:
+ * @pipe: Which pipe to submit to.
+ * @type: Transaction type
+ * @buffer: User buffer for the transaction
+ * @buffer_length:
+ * User buffer's length in bytes
+ * @control_header:
+ * For control transactions, the 8 byte standard header
+ * @iso_start_frame:
+ * For ISO transactions, the start frame
+ * @iso_number_packets:
+ * For ISO, the number of packet in the transaction.
+ * @iso_packets:
+ * A description of each ISO packet
+ * @urb: URB for the callback
+ *
+ * Returns: Transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
+ struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ enum cvmx_usb_transfer type,
+ u64 buffer,
+ int buffer_length,
+ u64 control_header,
+ int iso_start_frame,
+ int iso_number_packets,
+ struct cvmx_usb_iso_packet *iso_packets,
+ struct urb *urb)
+{
+ struct cvmx_usb_transaction *transaction;
+
+ if (unlikely(pipe->transfer_type != type))
+ return NULL;
+
+ transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC);
+ if (unlikely(!transaction))
+ return NULL;
+
+ transaction->type = type;
+ transaction->buffer = buffer;
+ transaction->buffer_length = buffer_length;
+ transaction->control_header = control_header;
+ /* FIXME: This is not used, implement it. */
+ transaction->iso_start_frame = iso_start_frame;
+ transaction->iso_number_packets = iso_number_packets;
+ transaction->iso_packets = iso_packets;
+ transaction->urb = urb;
+ if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_SETUP;
+ else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+
+ if (!list_empty(&pipe->transactions)) {
+ list_add_tail(&transaction->node, &pipe->transactions);
+ } else {
+ list_add_tail(&transaction->node, &pipe->transactions);
+ list_move_tail(&pipe->node,
+ &usb->active_pipes[pipe->transfer_type]);
+
+ /*
+ * We may need to schedule the pipe if this was the head of the
+ * pipe.
+ */
+ cvmx_usb_schedule(usb, 0);
+ }
+
+ return transaction;
+}
+
+/**
+ * Call to submit a USB Bulk transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
+ struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ urb);
+}
+
+/**
+ * Call to submit a USB Interrupt transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB returned when the callback is called.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
+ struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ return cvmx_usb_submit_transaction(usb, pipe,
+ CVMX_USB_TRANSFER_INTERRUPT,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ urb);
+}
+
+/**
+ * Call to submit a USB Control transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_control(
+ struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ int buffer_length = urb->transfer_buffer_length;
+ u64 control_header = urb->setup_dma;
+ struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
+
+ if ((header->bRequestType & USB_DIR_IN) == 0)
+ buffer_length = le16_to_cpu(header->wLength);
+
+ return cvmx_usb_submit_transaction(usb, pipe,
+ CVMX_USB_TRANSFER_CONTROL,
+ urb->transfer_dma, buffer_length,
+ control_header,
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ urb);
+}
+
+/**
+ * Call to submit a USB Isochronous transfer to a pipe.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Handle to the pipe for the transfer.
+ * @urb: URB returned when the callback is called.
+ *
+ * Returns: A submitted transaction or NULL on failure.
+ */
+static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
+ struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
+{
+ struct cvmx_usb_iso_packet *packets;
+
+ packets = (struct cvmx_usb_iso_packet *)urb->setup_packet;
+ return cvmx_usb_submit_transaction(usb, pipe,
+ CVMX_USB_TRANSFER_ISOCHRONOUS,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ 0, /* control_header */
+ urb->start_frame,
+ urb->number_of_packets,
+ packets, urb);
+}
+
+/**
+ * Cancel one outstanding request in a pipe. Canceling a request
+ * can fail if the transaction has already completed before cancel
+ * is called. Even after a successful cancel call, it may take
+ * a frame or two for the cvmx_usb_poll() function to call the
+ * associated callback.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe to cancel requests in.
+ * @transaction: Transaction to cancel, returned by the submit function.
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_cancel(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction)
+{
+ /*
+ * If the transaction is the HEAD of the queue and scheduled. We need to
+ * treat it special
+ */
+ if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
+ transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
+ union cvmx_usbcx_hccharx usbc_hcchar;
+
+ usb->pipe_for_channel[pipe->channel] = NULL;
+ pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ CVMX_SYNCW;
+
+ usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
+ /*
+ * If the channel isn't enabled then the transaction already
+ * completed.
+ */
+ if (usbc_hcchar.s.chena) {
+ usbc_hcchar.s.chdis = 1;
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(pipe->channel,
+ usb->index),
+ usbc_hcchar.u32);
+ }
+ }
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL);
+ return 0;
+}
+
+/**
+ * Cancel all outstanding requests in a pipe. Logically all this
+ * does is call cvmx_usb_cancel() in a loop.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe to cancel requests in.
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_cancel_all(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct cvmx_usb_transaction *transaction, *next;
+
+ /* Simply loop through and attempt to cancel each transaction */
+ list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
+ int result = cvmx_usb_cancel(usb, pipe, transaction);
+
+ if (unlikely(result != 0))
+ return result;
+ }
+ return 0;
+}
+
+/**
+ * Close a pipe created with cvmx_usb_open_pipe().
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ * @pipe: Pipe to close.
+ *
+ * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
+ * outstanding transfers.
+ */
+static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe)
+{
+ /* Fail if the pipe has pending transactions */
+ if (!list_empty(&pipe->transactions))
+ return -EBUSY;
+
+ list_del(&pipe->node);
+ kfree(pipe);
+
+ return 0;
+}
+
+/**
+ * Get the current USB protocol level frame number. The frame
+ * number is always in the range of 0-0x7ff.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: USB frame number
+ */
+static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
+{
+ union cvmx_usbcx_hfnum usbc_hfnum;
+
+ usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+
+ return usbc_hfnum.s.frnum;
+}
+
+static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ union cvmx_usbcx_hccharx usbc_hcchar,
+ int buffer_space_left,
+ int bytes_in_last_packet)
+{
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ /* This should be impossible */
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_ERROR);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ pipe->pid_toggle = 1;
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->stage =
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ } else {
+ struct usb_ctrlrequest *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->wLength)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ {
+ struct usb_ctrlrequest *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->wLength)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA:
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ /*
+ * For setup OUT data that are splits,
+ * the hardware doesn't appear to count
+ * transferred data. Here we manually
+ * update the data transferred
+ */
+ if (!usbc_hcchar.s.epdir) {
+ if (buffer_space_left < pipe->max_packet)
+ transaction->actual_bytes +=
+ buffer_space_left;
+ else
+ transaction->actual_bytes +=
+ pipe->max_packet;
+ }
+ } else if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ } else {
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ }
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ if (cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ else
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+ break;
+ }
+}
+
+static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ union cvmx_usbcx_hcintx usbc_hcint,
+ int buffer_space_left,
+ int bytes_in_last_packet)
+{
+ /*
+ * The only time a bulk transfer isn't complete when it finishes with
+ * an ACK is during a split transaction. For splits we need to continue
+ * the transfer if more data is needed.
+ */
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ else if (buffer_space_left &&
+ (bytes_in_last_packet == pipe->max_packet))
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ else
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ } else {
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (usbc_hcint.s.nak))
+ pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left ||
+ (bytes_in_last_packet < pipe->max_packet))
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+}
+
+static void cvmx_usb_transfer_intr(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ int buffer_space_left,
+ int bytes_in_last_packet)
+{
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) {
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ } else if (buffer_space_left &&
+ (bytes_in_last_packet == pipe->max_packet)) {
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ } else {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+ } else if (!buffer_space_left ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+ }
+}
+
+static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ int buffer_space_left,
+ int bytes_in_last_packet,
+ int bytes_this_transfer)
+{
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISOCHRONOUS OUT splits don't require a complete split stage.
+ * Instead they use a sequence of begin OUT splits to transfer
+ * the data 188 bytes at a time. Once the transfer is complete,
+ * the pipe sleeps until the next schedule interval.
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ /*
+ * If no space left or this wasn't a max size packet
+ * then this transfer is complete. Otherwise start it
+ * again to send the next 188 bytes
+ */
+ if (!buffer_space_left || (bytes_this_transfer < 188)) {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+ return;
+ }
+ if (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+ /*
+ * We are in the incoming data phase. Keep getting data
+ * until we run out of space or get a small packet
+ */
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+ } else {
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ }
+ } else {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+ }
+}
+
+/**
+ * Poll a channel for status
+ *
+ * @usb: USB device
+ * @channel: Channel to poll
+ *
+ * Returns: Zero on success
+ */
+static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel)
+{
+ struct usb_hcd *hcd = octeon_to_hcd(usb);
+ struct device *dev = hcd->self.controller;
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ union cvmx_usbcx_hccharx usbc_hcchar;
+ struct cvmx_usb_pipe *pipe;
+ struct cvmx_usb_transaction *transaction;
+ int bytes_this_transfer;
+ int bytes_in_last_packet;
+ int packets_processed;
+ int buffer_space_left;
+
+ /* Read the interrupt status bits for the channel */
+ usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index));
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
+
+ if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
+ /*
+ * There seems to be a bug in CN31XX which can cause
+ * interrupt IN transfers to get stuck until we do a
+ * write of HCCHARX without changing things
+ */
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel,
+ usb->index),
+ usbc_hcchar.u32);
+ return 0;
+ }
+
+ /*
+ * In non DMA mode the channels don't halt themselves. We need
+ * to manually disable channels that are left running
+ */
+ if (!usbc_hcint.s.chhltd) {
+ if (usbc_hcchar.s.chena) {
+ union cvmx_usbcx_hcintmskx hcintmsk;
+ /* Disable all interrupts except CHHLTD */
+ hcintmsk.u32 = 0;
+ hcintmsk.s.chhltdmsk = 1;
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel, usb->index),
+ hcintmsk.u32);
+ usbc_hcchar.s.chdis = 1;
+ cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index),
+ usbc_hcchar.u32);
+ return 0;
+ } else if (usbc_hcint.s.xfercompl) {
+ /*
+ * Successful IN/OUT with transfer complete.
+ * Channel halt isn't needed.
+ */
+ } else {
+ dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
+ usb->index, channel);
+ return 0;
+ }
+ }
+ } else {
+ /*
+ * There is are no interrupts that we need to process when the
+ * channel is still running
+ */
+ if (!usbc_hcint.s.chhltd)
+ return 0;
+ }
+
+ /* Disable the channel interrupts now that it is done */
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ usb->idle_hardware_channels |= (1 << channel);
+
+ /* Make sure this channel is tied to a valid pipe */
+ pipe = usb->pipe_for_channel[channel];
+ prefetch(pipe);
+ if (!pipe)
+ return 0;
+ transaction = list_first_entry(&pipe->transactions,
+ typeof(*transaction),
+ node);
+ prefetch(transaction);
+
+ /*
+ * Disconnect this pipe from the HW channel. Later the schedule
+ * function will figure out which pipe needs to go
+ */
+ usb->pipe_for_channel[channel] = NULL;
+ pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /*
+ * Read the channel config info so we can figure out how much data
+ * transferred
+ */
+ usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ /*
+ * Calculating the number of bytes successfully transferred is dependent
+ * on the transfer direction
+ */
+ packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
+ if (usbc_hcchar.s.epdir) {
+ /*
+ * IN transactions are easy. For every byte received the
+ * hardware decrements xfersize. All we need to do is subtract
+ * the current value of xfersize from its starting value and we
+ * know how many bytes were written to the buffer
+ */
+ bytes_this_transfer = transaction->xfersize -
+ usbc_hctsiz.s.xfersize;
+ } else {
+ /*
+ * OUT transaction don't decrement xfersize. Instead pktcnt is
+ * decremented on every successful packet send. The hardware
+ * does this when it receives an ACK, or NYET. If it doesn't
+ * receive one of these responses pktcnt doesn't change
+ */
+ bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
+ /*
+ * The last packet may not be a full transfer if we didn't have
+ * enough data
+ */
+ if (bytes_this_transfer > transaction->xfersize)
+ bytes_this_transfer = transaction->xfersize;
+ }
+ /* Figure out how many bytes were in the last packet of the transfer */
+ if (packets_processed)
+ bytes_in_last_packet = bytes_this_transfer -
+ (packets_processed - 1) * usbc_hcchar.s.mps;
+ else
+ bytes_in_last_packet = bytes_this_transfer;
+
+ /*
+ * As a special case, setup transactions output the setup header, not
+ * the user's data. For this reason we don't count setup data as bytes
+ * transferred
+ */
+ if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
+ (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
+ bytes_this_transfer = 0;
+
+ /*
+ * Add the bytes transferred to the running total. It is important that
+ * bytes_this_transfer doesn't count any data that needs to be
+ * retransmitted
+ */
+ transaction->actual_bytes += bytes_this_transfer;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ buffer_space_left = transaction->iso_packets[0].length -
+ transaction->actual_bytes;
+ else
+ buffer_space_left = transaction->buffer_length -
+ transaction->actual_bytes;
+
+ /*
+ * We need to remember the PID toggle state for the next transaction.
+ * The hardware already updated it for the next transaction
+ */
+ pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
+
+ /*
+ * For high speed bulk out, assume the next transaction will need to do
+ * a ping before proceeding. If this isn't true the ACK processing below
+ * will clear this flag
+ */
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
+ pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ if (WARN_ON_ONCE(bytes_this_transfer < 0)) {
+ /*
+ * In some rare cases the DMA engine seems to get stuck and
+ * keeps substracting same byte count over and over again. In
+ * such case we just need to fail every transaction.
+ */
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_ERROR);
+ return 0;
+ }
+
+ if (usbc_hcint.s.stall) {
+ /*
+ * STALL as a response means this transaction cannot be
+ * completed because the device can't process transactions. Tell
+ * the user. Any data that was transferred will be counted on
+ * the actual bytes transferred
+ */
+ pipe->pid_toggle = 0;
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_STALL);
+ } else if (usbc_hcint.s.xacterr) {
+ /*
+ * XactErr as a response means the device signaled
+ * something wrong with the transfer. For example, PID
+ * toggle errors cause these.
+ */
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_XACTERR);
+ } else if (usbc_hcint.s.bblerr) {
+ /* Babble Error (BblErr) */
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_BABBLEERR);
+ } else if (usbc_hcint.s.datatglerr) {
+ /* Data toggle error */
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_DATATGLERR);
+ } else if (usbc_hcint.s.nyet) {
+ /*
+ * NYET as a response is only allowed in three cases: as a
+ * response to a ping, as a response to a split transaction, and
+ * as a response to a bulk out. The ping case is handled by
+ * hardware, so we only have splits and bulk out
+ */
+ if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->retries = 0;
+ /*
+ * If there is more data to go then we need to try
+ * again. Otherwise this transaction is complete
+ */
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet))
+ cvmx_usb_complete(usb, pipe,
+ transaction,
+ CVMX_USB_STATUS_OK);
+ } else {
+ /*
+ * Split transactions retry the split complete 4 times
+ * then rewind to the start split and do the entire
+ * transactions again
+ */
+ transaction->retries++;
+ if ((transaction->retries & 0x3) == 0) {
+ /*
+ * Rewind to the beginning of the transaction by
+ * anding off the split complete bit
+ */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ }
+ }
+ } else if (usbc_hcint.s.ack) {
+ transaction->retries = 0;
+ /*
+ * The ACK bit can only be checked after the other error bits.
+ * This is because a multi packet transfer may succeed in a
+ * number of packets and then get a different response on the
+ * last packet. In this case both ACK and the last response bit
+ * will be set. If none of the other response bits is set, then
+ * the last packet must have been an ACK
+ *
+ * Since we got an ACK, we know we don't need to do a ping on
+ * this pipe
+ */
+ pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ cvmx_usb_transfer_control(usb, pipe, transaction,
+ usbc_hcchar,
+ buffer_space_left,
+ bytes_in_last_packet);
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ cvmx_usb_transfer_bulk(usb, pipe, transaction,
+ usbc_hcint, buffer_space_left,
+ bytes_in_last_packet);
+ break;
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ cvmx_usb_transfer_intr(usb, pipe, transaction,
+ buffer_space_left,
+ bytes_in_last_packet);
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ cvmx_usb_transfer_isoc(usb, pipe, transaction,
+ buffer_space_left,
+ bytes_in_last_packet,
+ bytes_this_transfer);
+ break;
+ }
+ } else if (usbc_hcint.s.nak) {
+ /*
+ * If this was a split then clear our split in progress marker.
+ */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /*
+ * NAK as a response means the device couldn't accept the
+ * transaction, but it should be retried in the future. Rewind
+ * to the beginning of the transaction by anding off the split
+ * complete bit. Retry in the next interval
+ */
+ transaction->retries = 0;
+ transaction->stage &= ~1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number +
+ pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) %
+ pipe->interval;
+ } else {
+ struct cvmx_usb_port_status port;
+
+ port = cvmx_usb_get_status(usb);
+ if (port.port_enabled) {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ } else {
+ /*
+ * We get channel halted interrupts with no result bits
+ * sets when the cable is unplugged
+ */
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_ERROR);
+ }
+ }
+ return 0;
+}
+
+static void octeon_usb_port_callback(struct octeon_hcd *usb)
+{
+ spin_unlock(&usb->lock);
+ usb_hcd_poll_rh_status(octeon_to_hcd(usb));
+ spin_lock(&usb->lock);
+}
+
+/**
+ * Poll the USB block for status and call all needed callback
+ * handlers. This function is meant to be called in the interrupt
+ * handler for the USB controller. It can also be called
+ * periodically in a loop for non-interrupt based operation.
+ *
+ * @usb: USB device state populated by cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+static int cvmx_usb_poll(struct octeon_hcd *usb)
+{
+ union cvmx_usbcx_hfnum usbc_hfnum;
+ union cvmx_usbcx_gintsts usbc_gintsts;
+
+ prefetch_range(usb, sizeof(*usb));
+
+ /* Update the frame counter */
+ usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum)
+ usb->frame_number += 0x4000;
+ usb->frame_number &= ~0x3fffull;
+ usb->frame_number |= usbc_hfnum.s.frnum;
+
+ /* Read the pending interrupts */
+ usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GINTSTS(usb->index));
+
+ /* Clear the interrupts now that we know about them */
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
+ usbc_gintsts.u32);
+
+ if (usbc_gintsts.s.rxflvl) {
+ /*
+ * RxFIFO Non-Empty (RxFLvl)
+ * Indicates that there is at least one packet pending to be
+ * read from the RxFIFO.
+ *
+ * In DMA mode this is handled by hardware
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ cvmx_usb_poll_rx_fifo(usb);
+ }
+ if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
+ /* Fill the Tx FIFOs when not in DMA mode */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ cvmx_usb_poll_tx_fifo(usb);
+ }
+ if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
+ union cvmx_usbcx_hprt usbc_hprt;
+ /*
+ * Disconnect Detected Interrupt (DisconnInt)
+ * Asserted when a device disconnect is detected.
+ *
+ * Host Port Interrupt (PrtInt)
+ * The core sets this bit to indicate a change in port status of
+ * one of the O2P USB core ports in Host mode. The application
+ * must read the Host Port Control and Status (HPRT) register to
+ * determine the exact event that caused this interrupt. The
+ * application must clear the appropriate status bit in the Host
+ * Port Control and Status register to clear this bit.
+ *
+ * Call the user's port callback
+ */
+ octeon_usb_port_callback(usb);
+ /* Clear the port change bits */
+ usbc_hprt.u32 =
+ cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.s.prtena = 0;
+ cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
+ usbc_hprt.u32);
+ }
+ if (usbc_gintsts.s.hchint) {
+ /*
+ * Host Channels Interrupt (HChInt)
+ * The core sets this bit to indicate that an interrupt is
+ * pending on one of the channels of the core (in Host mode).
+ * The application must read the Host All Channels Interrupt
+ * (HAINT) register to determine the exact number of the channel
+ * on which the interrupt occurred, and then read the
+ * corresponding Host Channel-n Interrupt (HCINTn) register to
+ * determine the exact cause of the interrupt. The application
+ * must clear the appropriate status bit in the HCINTn register
+ * to clear this bit.
+ */
+ union cvmx_usbcx_haint usbc_haint;
+
+ usbc_haint.u32 = cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HAINT(usb->index));
+ while (usbc_haint.u32) {
+ int channel;
+
+ channel = __fls(usbc_haint.u32);
+ cvmx_usb_poll_channel(usb, channel);
+ usbc_haint.u32 ^= 1 << channel;
+ }
+ }
+
+ cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
+
+ return 0;
+}
+
+/* convert between an HCD pointer and the corresponding struct octeon_hcd */
+static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
+{
+ return (struct octeon_hcd *)(hcd->hcd_priv);
+}
+
+static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_poll(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int octeon_usb_start(struct usb_hcd *hcd)
+{
+ hcd->state = HC_STATE_RUNNING;
+ return 0;
+}
+
+static void octeon_usb_stop(struct usb_hcd *hcd)
+{
+ hcd->state = HC_STATE_HALT;
+}
+
+static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+
+ return cvmx_usb_get_frame_number(usb);
+}
+
+static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+ struct device *dev = hcd->self.controller;
+ struct cvmx_usb_transaction *transaction = NULL;
+ struct cvmx_usb_pipe *pipe;
+ unsigned long flags;
+ struct cvmx_usb_iso_packet *iso_packet;
+ struct usb_host_endpoint *ep = urb->ep;
+ int rc;
+
+ urb->status = 0;
+ spin_lock_irqsave(&usb->lock, flags);
+
+ rc = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (rc) {
+ spin_unlock_irqrestore(&usb->lock, flags);
+ return rc;
+ }
+
+ if (!ep->hcpriv) {
+ enum cvmx_usb_transfer transfer_type;
+ enum cvmx_usb_speed speed;
+ int split_device = 0;
+ int split_port = 0;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS;
+ break;
+ case PIPE_INTERRUPT:
+ transfer_type = CVMX_USB_TRANSFER_INTERRUPT;
+ break;
+ case PIPE_CONTROL:
+ transfer_type = CVMX_USB_TRANSFER_CONTROL;
+ break;
+ default:
+ transfer_type = CVMX_USB_TRANSFER_BULK;
+ break;
+ }
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ speed = CVMX_USB_SPEED_LOW;
+ break;
+ case USB_SPEED_FULL:
+ speed = CVMX_USB_SPEED_FULL;
+ break;
+ default:
+ speed = CVMX_USB_SPEED_HIGH;
+ break;
+ }
+ /*
+ * For slow devices on high speed ports we need to find the hub
+ * that does the speed translation so we know where to send the
+ * split transactions.
+ */
+ if (speed != CVMX_USB_SPEED_HIGH) {
+ /*
+ * Start at this device and work our way up the usb
+ * tree.
+ */
+ struct usb_device *dev = urb->dev;
+
+ while (dev->parent) {
+ /*
+ * If our parent is high speed then he'll
+ * receive the splits.
+ */
+ if (dev->parent->speed == USB_SPEED_HIGH) {
+ split_device = dev->parent->devnum;
+ split_port = dev->portnum;
+ break;
+ }
+ /*
+ * Move up the tree one level. If we make it all
+ * the way up the tree, then the port must not
+ * be in high speed mode and we don't need a
+ * split.
+ */
+ dev = dev->parent;
+ }
+ }
+ pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe), speed,
+ le16_to_cpu(ep->desc.wMaxPacketSize)
+ & 0x7ff,
+ transfer_type,
+ usb_pipein(urb->pipe) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT,
+ urb->interval,
+ (le16_to_cpu(ep->desc.wMaxPacketSize)
+ >> 11) & 0x3,
+ split_device, split_port);
+ if (!pipe) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ dev_dbg(dev, "Failed to create pipe\n");
+ return -ENOMEM;
+ }
+ ep->hcpriv = pipe;
+ } else {
+ pipe = ep->hcpriv;
+ }
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ dev_dbg(dev, "Submit isochronous to %d.%d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
+ /*
+ * Allocate a structure to use for our private list of
+ * isochronous packets.
+ */
+ iso_packet = kmalloc_array(urb->number_of_packets,
+ sizeof(struct cvmx_usb_iso_packet),
+ GFP_ATOMIC);
+ if (iso_packet) {
+ int i;
+ /* Fill the list with the data from the URB */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ iso_packet[i].offset =
+ urb->iso_frame_desc[i].offset;
+ iso_packet[i].length =
+ urb->iso_frame_desc[i].length;
+ iso_packet[i].status = CVMX_USB_STATUS_ERROR;
+ }
+ /*
+ * Store a pointer to the list in the URB setup_packet
+ * field. We know this currently isn't being used and
+ * this saves us a bunch of logic.
+ */
+ urb->setup_packet = (char *)iso_packet;
+ transaction = cvmx_usb_submit_isochronous(usb,
+ pipe, urb);
+ /*
+ * If submit failed we need to free our private packet
+ * list.
+ */
+ if (!transaction) {
+ urb->setup_packet = NULL;
+ kfree(iso_packet);
+ }
+ }
+ break;
+ case PIPE_INTERRUPT:
+ dev_dbg(dev, "Submit interrupt to %d.%d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
+ transaction = cvmx_usb_submit_interrupt(usb, pipe, urb);
+ break;
+ case PIPE_CONTROL:
+ dev_dbg(dev, "Submit control to %d.%d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
+ transaction = cvmx_usb_submit_control(usb, pipe, urb);
+ break;
+ case PIPE_BULK:
+ dev_dbg(dev, "Submit bulk to %d.%d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
+ transaction = cvmx_usb_submit_bulk(usb, pipe, urb);
+ break;
+ }
+ if (!transaction) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ dev_dbg(dev, "Failed to submit\n");
+ return -ENOMEM;
+ }
+ urb->hcpriv = transaction;
+ spin_unlock_irqrestore(&usb->lock, flags);
+ return 0;
+}
+
+static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
+ struct urb *urb,
+ int status)
+{
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+ unsigned long flags;
+ int rc;
+
+ if (!urb->dev)
+ return -EINVAL;
+
+ spin_lock_irqsave(&usb->lock, flags);
+
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto out;
+
+ urb->status = status;
+ cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv);
+
+out:
+ spin_unlock_irqrestore(&usb->lock, flags);
+
+ return rc;
+}
+
+static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct device *dev = hcd->self.controller;
+
+ if (ep->hcpriv) {
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+ struct cvmx_usb_pipe *pipe = ep->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_cancel_all(usb, pipe);
+ if (cvmx_usb_close_pipe(usb, pipe))
+ dev_dbg(dev, "Closing pipe %p failed\n", pipe);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ ep->hcpriv = NULL;
+ }
+}
+
+static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+ struct cvmx_usb_port_status port_status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb->lock, flags);
+ port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ buf[0] = port_status.connect_change << 1;
+
+ return buf[0] != 0;
+}
+
+static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+ struct device *dev = hcd->self.controller;
+ struct cvmx_usb_port_status usb_port_status;
+ int port_status;
+ struct usb_hub_descriptor *desc;
+ unsigned long flags;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ dev_dbg(dev, "ClearHubFeature\n");
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case ClearPortFeature:
+ dev_dbg(dev, "ClearPortFeature\n");
+ if (wIndex != 1) {
+ dev_dbg(dev, " INVALID\n");
+ return -EINVAL;
+ }
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ dev_dbg(dev, " ENABLE\n");
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_disable(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(dev, " SUSPEND\n");
+ /* Not supported on Octeon */
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, " POWER\n");
+ /* Not supported on Octeon */
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ dev_dbg(dev, " INDICATOR\n");
+ /* Port inidicator not supported */
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ dev_dbg(dev, " C_CONNECTION\n");
+ /* Clears drivers internal connect status change flag */
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ dev_dbg(dev, " C_RESET\n");
+ /*
+ * Clears the driver's internal Port Reset Change flag.
+ */
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ dev_dbg(dev, " C_ENABLE\n");
+ /*
+ * Clears the driver's internal Port Enable/Disable
+ * Change flag.
+ */
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ dev_dbg(dev, " C_SUSPEND\n");
+ /*
+ * Clears the driver's internal Port Suspend Change
+ * flag, which is set when resume signaling on the host
+ * port is complete.
+ */
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(dev, " C_OVER_CURRENT\n");
+ /* Clears the driver's overcurrent Change flag */
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ break;
+ default:
+ dev_dbg(dev, " UNKNOWN\n");
+ return -EINVAL;
+ }
+ break;
+ case GetHubDescriptor:
+ dev_dbg(dev, "GetHubDescriptor\n");
+ desc = (struct usb_hub_descriptor *)buf;
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = cpu_to_le16(0x08);
+ desc->bPwrOn2PwrGood = 1;
+ desc->bHubContrCurrent = 0;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
+ break;
+ case GetHubStatus:
+ dev_dbg(dev, "GetHubStatus\n");
+ *(__le32 *)buf = 0;
+ break;
+ case GetPortStatus:
+ dev_dbg(dev, "GetPortStatus\n");
+ if (wIndex != 1) {
+ dev_dbg(dev, " INVALID\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&usb->lock, flags);
+ usb_port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ port_status = 0;
+
+ if (usb_port_status.connect_change) {
+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+ dev_dbg(dev, " C_CONNECTION\n");
+ }
+
+ if (usb_port_status.port_enabled) {
+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+ dev_dbg(dev, " C_ENABLE\n");
+ }
+
+ if (usb_port_status.connected) {
+ port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+ dev_dbg(dev, " CONNECTION\n");
+ }
+
+ if (usb_port_status.port_enabled) {
+ port_status |= (1 << USB_PORT_FEAT_ENABLE);
+ dev_dbg(dev, " ENABLE\n");
+ }
+
+ if (usb_port_status.port_over_current) {
+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+ dev_dbg(dev, " OVER_CURRENT\n");
+ }
+
+ if (usb_port_status.port_powered) {
+ port_status |= (1 << USB_PORT_FEAT_POWER);
+ dev_dbg(dev, " POWER\n");
+ }
+
+ if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) {
+ port_status |= USB_PORT_STAT_HIGH_SPEED;
+ dev_dbg(dev, " HIGHSPEED\n");
+ } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) {
+ port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+ dev_dbg(dev, " LOWSPEED\n");
+ }
+
+ *((__le32 *)buf) = cpu_to_le32(port_status);
+ break;
+ case SetHubFeature:
+ dev_dbg(dev, "SetHubFeature\n");
+ /* No HUB features supported */
+ break;
+ case SetPortFeature:
+ dev_dbg(dev, "SetPortFeature\n");
+ if (wIndex != 1) {
+ dev_dbg(dev, " INVALID\n");
+ return -EINVAL;
+ }
+
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(dev, " SUSPEND\n");
+ return -EINVAL;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, " POWER\n");
+ /*
+ * Program the port power bit to drive VBUS on the USB.
+ */
+ spin_lock_irqsave(&usb->lock, flags);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
+ cvmx_usbcx_hprt, prtpwr, 1);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ return 0;
+ case USB_PORT_FEAT_RESET:
+ dev_dbg(dev, " RESET\n");
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_reset_port(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ return 0;
+ case USB_PORT_FEAT_INDICATOR:
+ dev_dbg(dev, " INDICATOR\n");
+ /* Not supported */
+ break;
+ default:
+ dev_dbg(dev, " UNKNOWN\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_dbg(dev, "Unknown root hub request\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct hc_driver octeon_hc_driver = {
+ .description = "Octeon USB",
+ .product_desc = "Octeon Host Controller",
+ .hcd_priv_size = sizeof(struct octeon_hcd),
+ .irq = octeon_usb_irq,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
+ .start = octeon_usb_start,
+ .stop = octeon_usb_stop,
+ .urb_enqueue = octeon_usb_urb_enqueue,
+ .urb_dequeue = octeon_usb_urb_dequeue,
+ .endpoint_disable = octeon_usb_endpoint_disable,
+ .get_frame_number = octeon_usb_get_frame_number,
+ .hub_status_data = octeon_usb_hub_status_data,
+ .hub_control = octeon_usb_hub_control,
+ .map_urb_for_dma = octeon_map_urb_for_dma,
+ .unmap_urb_for_dma = octeon_unmap_urb_for_dma,
+};
+
+static int octeon_usb_probe(struct platform_device *pdev)
+{
+ int status;
+ int initialize_flags;
+ int usb_num;
+ struct resource *res_mem;
+ struct device_node *usbn_node;
+ int irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
+ struct octeon_hcd *usb;
+ struct usb_hcd *hcd;
+ u32 clock_rate = 48000000;
+ bool is_crystal_clock = false;
+ const char *clock_type;
+ int i;
+
+ if (!dev->of_node) {
+ dev_err(dev, "Error: empty of_node\n");
+ return -ENXIO;
+ }
+ usbn_node = dev->of_node->parent;
+
+ i = of_property_read_u32(usbn_node,
+ "clock-frequency", &clock_rate);
+ if (i)
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No USBN \"clock-frequency\"\n");
+ return -ENXIO;
+ }
+ switch (clock_rate) {
+ case 12000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case 24000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case 48000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n",
+ clock_rate);
+ return -ENXIO;
+ }
+
+ i = of_property_read_string(usbn_node,
+ "cavium,refclk-type", &clock_type);
+ if (i)
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+
+ if (is_crystal_clock)
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ else
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem) {
+ dev_err(dev, "found no memory resource\n");
+ return -ENXIO;
+ }
+ usb_num = (res_mem->start >> 44) & 1;
+
+ if (irq < 0) {
+ /* Defective device tree, but we know how to fix it. */
+ irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+
+ irq = irq_create_mapping(NULL, hwirq);
+ }
+
+ /*
+ * Set the DMA mask to 64bits so we get buffers already translated for
+ * DMA.
+ */
+ i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (i)
+ return i;
+
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
+ hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
+ if (!hcd) {
+ dev_dbg(dev, "Failed to allocate memory for HCD\n");
+ return -1;
+ }
+ hcd->uses_new_polling = 1;
+ usb = (struct octeon_hcd *)hcd->hcd_priv;
+
+ spin_lock_init(&usb->lock);
+
+ usb->init_flags = initialize_flags;
+
+ /* Initialize the USB state structure */
+ usb->index = usb_num;
+ INIT_LIST_HEAD(&usb->idle_pipes);
+ for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
+ INIT_LIST_HEAD(&usb->active_pipes[i]);
+
+ /* Due to an errata, CN31XX doesn't support DMA */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
+ usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
+ /* Only use one channel with non DMA */
+ usb->idle_hardware_channels = 0x1;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
+ /* CN5XXX have an errata with channel 3 */
+ usb->idle_hardware_channels = 0xf7;
+ } else {
+ usb->idle_hardware_channels = 0xff;
+ }
+
+ status = cvmx_usb_initialize(dev, usb);
+ if (status) {
+ dev_dbg(dev, "USB initialization failed with %d\n", status);
+ usb_put_hcd(hcd);
+ return -1;
+ }
+
+ status = usb_add_hcd(hcd, irq, 0);
+ if (status) {
+ dev_dbg(dev, "USB add HCD failed with %d\n", status);
+ usb_put_hcd(hcd);
+ return -1;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+
+ return 0;
+}
+
+static int octeon_usb_remove(struct platform_device *pdev)
+{
+ int status;
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ usb_remove_hcd(hcd);
+ spin_lock_irqsave(&usb->lock, flags);
+ status = cvmx_usb_shutdown(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
+ if (status)
+ dev_dbg(dev, "USB shutdown failed with %d\n", status);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static const struct of_device_id octeon_usb_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-usbc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_usb_match);
+
+static struct platform_driver octeon_usb_driver = {
+ .driver = {
+ .name = "octeon-hcd",
+ .of_match_table = octeon_usb_match,
+ },
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
+};
+
+static int __init octeon_usb_driver_init(void)
+{
+ if (usb_disabled())
+ return 0;
+
+ return platform_driver_register(&octeon_usb_driver);
+}
+module_init(octeon_usb_driver_init);
+
+static void __exit octeon_usb_driver_exit(void)
+{
+ if (usb_disabled())
+ return;
+
+ platform_driver_unregister(&octeon_usb_driver);
+}
+module_exit(octeon_usb_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
new file mode 100644
index 000000000000..9ed619c93a4e
--- /dev/null
+++ b/drivers/staging/octeon-usb/octeon-hcd.h
@@ -0,0 +1,1847 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Octeon HCD hardware register definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Some parts of the code were originally released under BSD license:
+ *
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
+ * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ */
+
+#ifndef __OCTEON_HCD_H__
+#define __OCTEON_HCD_H__
+
+#include <asm/bitfield.h>
+
+#define CVMX_USBCXBASE 0x00016F0010000000ull
+#define CVMX_USBCXREG1(reg, bid) \
+ (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
+ ((bid) & 1) * 0x100000000000ull)
+#define CVMX_USBCXREG2(reg, bid, off) \
+ (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
+ (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32)
+
+#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid)
+#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid)
+#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid)
+#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid)
+#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid)
+#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid)
+#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid)
+#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid)
+#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid)
+#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid)
+#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid)
+#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid)
+#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid)
+#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off)
+#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid)
+#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off)
+#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off)
+#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off)
+#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off)
+#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid)
+#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid)
+#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid)
+#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid)
+#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid)
+
+#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
+#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
+
+#define CVMX_USBNXREG1(reg, bid) \
+ (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
+#define CVMX_USBNXREG2(reg, bid) \
+ (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
+
+#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid)
+#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid)
+#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid)
+#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid)
+
+/**
+ * cvmx_usbc#_gahbcfg
+ *
+ * Core AHB Configuration Register (GAHBCFG)
+ *
+ * This register can be used to configure the core after power-on or a change in
+ * mode of operation. This register mainly contains AHB system-related
+ * configuration parameters. The AHB is the processor interface to the O2P USB
+ * core. In general, software need not know about this interface except to
+ * program the values as specified.
+ *
+ * The application must program this register as part of the O2P USB core
+ * initialization. Do not change this register after the initial programming.
+ */
+union cvmx_usbcx_gahbcfg {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_gahbcfg_s
+ * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
+ * Software should set this bit to 0x1.
+ * Indicates when the Periodic TxFIFO Empty Interrupt bit in the
+ * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This
+ * bit is used only in Slave mode.
+ * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic
+ * TxFIFO is half empty
+ * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic
+ * TxFIFO is completely empty
+ * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl)
+ * Software should set this bit to 0x1.
+ * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in
+ * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered.
+ * This bit is used only in Slave mode.
+ * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non-
+ * Periodic TxFIFO is half empty
+ * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non-
+ * Periodic TxFIFO is completely empty
+ * @dmaen: DMA Enable (DMAEn)
+ * * 1'b0: Core operates in Slave mode
+ * * 1'b1: Core operates in a DMA mode
+ * @hbstlen: Burst Length/Type (HBstLen)
+ * This field has not effect and should be left as 0x0.
+ * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk)
+ * Software should set this field to 0x1.
+ * The application uses this bit to mask or unmask the interrupt
+ * line assertion to itself. Irrespective of this bit's setting,
+ * the interrupt status registers are updated by the core.
+ * * 1'b0: Mask the interrupt assertion to the application.
+ * * 1'b1: Unmask the interrupt assertion to the application.
+ */
+ struct cvmx_usbcx_gahbcfg_s {
+ __BITFIELD_FIELD(u32 reserved_9_31 : 23,
+ __BITFIELD_FIELD(u32 ptxfemplvl : 1,
+ __BITFIELD_FIELD(u32 nptxfemplvl : 1,
+ __BITFIELD_FIELD(u32 reserved_6_6 : 1,
+ __BITFIELD_FIELD(u32 dmaen : 1,
+ __BITFIELD_FIELD(u32 hbstlen : 4,
+ __BITFIELD_FIELD(u32 glblintrmsk : 1,
+ ;)))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_ghwcfg3
+ *
+ * User HW Config3 Register (GHWCFG3)
+ *
+ * This register contains the configuration options of the O2P USB core.
+ */
+union cvmx_usbcx_ghwcfg3 {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_ghwcfg3_s
+ * @dfifodepth: DFIFO Depth (DfifoDepth)
+ * This value is in terms of 32-bit words.
+ * * Minimum value is 32
+ * * Maximum value is 32768
+ * @ahbphysync: AHB and PHY Synchronous (AhbPhySync)
+ * Indicates whether AHB and PHY clocks are synchronous to
+ * each other.
+ * * 1'b0: No
+ * * 1'b1: Yes
+ * This bit is tied to 1.
+ * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType)
+ * * 1'b0: Asynchronous reset is used in the core
+ * * 1'b1: Synchronous reset is used in the core
+ * @optfeature: Optional Features Removed (OptFeature)
+ * Indicates whether the User ID register, GPIO interface ports,
+ * and SOF toggle and counter ports were removed for gate count
+ * optimization.
+ * @vendor_control_interface_support: Vendor Control Interface Support
+ * * 1'b0: Vendor Control Interface is not available on the core.
+ * * 1'b1: Vendor Control Interface is available.
+ * @i2c_selection: I2C Selection
+ * * 1'b0: I2C Interface is not available on the core.
+ * * 1'b1: I2C Interface is available on the core.
+ * @otgen: OTG Function Enabled (OtgEn)
+ * The application uses this bit to indicate the O2P USB core's
+ * OTG capabilities.
+ * * 1'b0: Not OTG capable
+ * * 1'b1: OTG Capable
+ * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth)
+ * * 3'b000: 4 bits
+ * * 3'b001: 5 bits
+ * * 3'b010: 6 bits
+ * * 3'b011: 7 bits
+ * * 3'b100: 8 bits
+ * * 3'b101: 9 bits
+ * * 3'b110: 10 bits
+ * * Others: Reserved
+ * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth)
+ * * 4'b0000: 11 bits
+ * * 4'b0001: 12 bits
+ * - ...
+ * * 4'b1000: 19 bits
+ * * Others: Reserved
+ */
+ struct cvmx_usbcx_ghwcfg3_s {
+ __BITFIELD_FIELD(u32 dfifodepth : 16,
+ __BITFIELD_FIELD(u32 reserved_13_15 : 3,
+ __BITFIELD_FIELD(u32 ahbphysync : 1,
+ __BITFIELD_FIELD(u32 rsttype : 1,
+ __BITFIELD_FIELD(u32 optfeature : 1,
+ __BITFIELD_FIELD(u32 vendor_control_interface_support : 1,
+ __BITFIELD_FIELD(u32 i2c_selection : 1,
+ __BITFIELD_FIELD(u32 otgen : 1,
+ __BITFIELD_FIELD(u32 pktsizewidth : 3,
+ __BITFIELD_FIELD(u32 xfersizewidth : 4,
+ ;))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gintmsk
+ *
+ * Core Interrupt Mask Register (GINTMSK)
+ *
+ * This register works with the Core Interrupt register to interrupt the
+ * application. When an interrupt bit is masked, the interrupt associated with
+ * that bit will not be generated. However, the Core Interrupt (GINTSTS)
+ * register bit corresponding to that interrupt will still be set.
+ * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_gintmsk {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_gintmsk_s
+ * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
+ * (WkUpIntMsk)
+ * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask
+ * (SessReqIntMsk)
+ * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk)
+ * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk)
+ * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk)
+ * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk)
+ * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk)
+ * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk)
+ * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk)
+ * Incomplete Isochronous OUT Transfer Mask
+ * (incompISOOUTMsk)
+ * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask
+ * (incompISOINMsk)
+ * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk)
+ * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk)
+ * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk)
+ * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk)
+ * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask
+ * (ISOOutDropMsk)
+ * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk)
+ * @usbrstmsk: USB Reset Mask (USBRstMsk)
+ * @usbsuspmsk: USB Suspend Mask (USBSuspMsk)
+ * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk)
+ * @i2cint: I2C Interrupt Mask (I2CINT)
+ * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk)
+ * I2C Carkit Interrupt Mask (I2CCKINTMsk)
+ * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk)
+ * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask
+ * (GINNakEffMsk)
+ * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk)
+ * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk)
+ * @sofmsk: Start of (micro)Frame Mask (SofMsk)
+ * @otgintmsk: OTG Interrupt Mask (OTGIntMsk)
+ * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
+ */
+ struct cvmx_usbcx_gintmsk_s {
+ __BITFIELD_FIELD(u32 wkupintmsk : 1,
+ __BITFIELD_FIELD(u32 sessreqintmsk : 1,
+ __BITFIELD_FIELD(u32 disconnintmsk : 1,
+ __BITFIELD_FIELD(u32 conidstschngmsk : 1,
+ __BITFIELD_FIELD(u32 reserved_27_27 : 1,
+ __BITFIELD_FIELD(u32 ptxfempmsk : 1,
+ __BITFIELD_FIELD(u32 hchintmsk : 1,
+ __BITFIELD_FIELD(u32 prtintmsk : 1,
+ __BITFIELD_FIELD(u32 reserved_23_23 : 1,
+ __BITFIELD_FIELD(u32 fetsuspmsk : 1,
+ __BITFIELD_FIELD(u32 incomplpmsk : 1,
+ __BITFIELD_FIELD(u32 incompisoinmsk : 1,
+ __BITFIELD_FIELD(u32 oepintmsk : 1,
+ __BITFIELD_FIELD(u32 inepintmsk : 1,
+ __BITFIELD_FIELD(u32 epmismsk : 1,
+ __BITFIELD_FIELD(u32 reserved_16_16 : 1,
+ __BITFIELD_FIELD(u32 eopfmsk : 1,
+ __BITFIELD_FIELD(u32 isooutdropmsk : 1,
+ __BITFIELD_FIELD(u32 enumdonemsk : 1,
+ __BITFIELD_FIELD(u32 usbrstmsk : 1,
+ __BITFIELD_FIELD(u32 usbsuspmsk : 1,
+ __BITFIELD_FIELD(u32 erlysuspmsk : 1,
+ __BITFIELD_FIELD(u32 i2cint : 1,
+ __BITFIELD_FIELD(u32 ulpickintmsk : 1,
+ __BITFIELD_FIELD(u32 goutnakeffmsk : 1,
+ __BITFIELD_FIELD(u32 ginnakeffmsk : 1,
+ __BITFIELD_FIELD(u32 nptxfempmsk : 1,
+ __BITFIELD_FIELD(u32 rxflvlmsk : 1,
+ __BITFIELD_FIELD(u32 sofmsk : 1,
+ __BITFIELD_FIELD(u32 otgintmsk : 1,
+ __BITFIELD_FIELD(u32 modemismsk : 1,
+ __BITFIELD_FIELD(u32 reserved_0_0 : 1,
+ ;))))))))))))))))))))))))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gintsts
+ *
+ * Core Interrupt Register (GINTSTS)
+ *
+ * This register interrupts the application for system-level events in the
+ * current mode of operation (Device mode or Host mode). It is shown in
+ * Interrupt. Some of the bits in this register are valid only in Host mode,
+ * while others are valid in Device mode only. This register also indicates the
+ * current mode of operation. In order to clear the interrupt status bits of
+ * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status
+ * interrupts are read only; once software reads from or writes to the FIFO
+ * while servicing these interrupts, FIFO interrupt conditions are cleared
+ * automatically.
+ */
+union cvmx_usbcx_gintsts {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_gintsts_s
+ * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
+ * In Device mode, this interrupt is asserted when a resume is
+ * detected on the USB. In Host mode, this interrupt is asserted
+ * when a remote wakeup is detected on the USB.
+ * For more information on how to use this interrupt, see "Partial
+ * Power-Down and Clock Gating Programming Model" on
+ * page 353.
+ * @sessreqint: Session Request/New Session Detected Interrupt
+ * (SessReqInt)
+ * In Host mode, this interrupt is asserted when a session request
+ * is detected from the device. In Device mode, this interrupt is
+ * asserted when the utmiotg_bvalid signal goes high.
+ * For more information on how to use this interrupt, see "Partial
+ * Power-Down and Clock Gating Programming Model" on
+ * page 353.
+ * @disconnint: Disconnect Detected Interrupt (DisconnInt)
+ * Asserted when a device disconnect is detected.
+ * @conidstschng: Connector ID Status Change (ConIDStsChng)
+ * The core sets this bit when there is a change in connector ID
+ * status.
+ * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp)
+ * Asserted when the Periodic Transmit FIFO is either half or
+ * completely empty and there is space for at least one entry to be
+ * written in the Periodic Request Queue. The half or completely
+ * empty status is determined by the Periodic TxFIFO Empty Level
+ * bit in the Core AHB Configuration register
+ * (GAHBCFG.PTxFEmpLvl).
+ * @hchint: Host Channels Interrupt (HChInt)
+ * The core sets this bit to indicate that an interrupt is pending
+ * on one of the channels of the core (in Host mode). The
+ * application must read the Host All Channels Interrupt (HAINT)
+ * register to determine the exact number of the channel on which
+ * the interrupt occurred, and then read the corresponding Host
+ * Channel-n Interrupt (HCINTn) register to determine the exact
+ * cause of the interrupt. The application must clear the
+ * appropriate status bit in the HCINTn register to clear this bit.
+ * @prtint: Host Port Interrupt (PrtInt)
+ * The core sets this bit to indicate a change in port status of
+ * one of the O2P USB core ports in Host mode. The application must
+ * read the Host Port Control and Status (HPRT) register to
+ * determine the exact event that caused this interrupt. The
+ * application must clear the appropriate status bit in the Host
+ * Port Control and Status register to clear this bit.
+ * @fetsusp: Data Fetch Suspended (FetSusp)
+ * This interrupt is valid only in DMA mode. This interrupt
+ * indicates that the core has stopped fetching data for IN
+ * endpoints due to the unavailability of TxFIFO space or Request
+ * Queue space. This interrupt is used by the application for an
+ * endpoint mismatch algorithm.
+ * @incomplp: Incomplete Periodic Transfer (incomplP)
+ * In Host mode, the core sets this interrupt bit when there are
+ * incomplete periodic transactions still pending which are
+ * scheduled for the current microframe.
+ * Incomplete Isochronous OUT Transfer (incompISOOUT)
+ * The Device mode, the core sets this interrupt to indicate that
+ * there is at least one isochronous OUT endpoint on which the
+ * transfer is not completed in the current microframe. This
+ * interrupt is asserted along with the End of Periodic Frame
+ * Interrupt (EOPF) bit in this register.
+ * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN)
+ * The core sets this interrupt to indicate that there is at least
+ * one isochronous IN endpoint on which the transfer is not
+ * completed in the current microframe. This interrupt is asserted
+ * along with the End of Periodic Frame Interrupt (EOPF) bit in
+ * this register.
+ * @oepint: OUT Endpoints Interrupt (OEPInt)
+ * The core sets this bit to indicate that an interrupt is pending
+ * on one of the OUT endpoints of the core (in Device mode). The
+ * application must read the Device All Endpoints Interrupt
+ * (DAINT) register to determine the exact number of the OUT
+ * endpoint on which the interrupt occurred, and then read the
+ * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn)
+ * register to determine the exact cause of the interrupt. The
+ * application must clear the appropriate status bit in the
+ * corresponding DOEPINTn register to clear this bit.
+ * @iepint: IN Endpoints Interrupt (IEPInt)
+ * The core sets this bit to indicate that an interrupt is pending
+ * on one of the IN endpoints of the core (in Device mode). The
+ * application must read the Device All Endpoints Interrupt
+ * (DAINT) register to determine the exact number of the IN
+ * endpoint on which the interrupt occurred, and then read the
+ * corresponding Device IN Endpoint-n Interrupt (DIEPINTn)
+ * register to determine the exact cause of the interrupt. The
+ * application must clear the appropriate status bit in the
+ * corresponding DIEPINTn register to clear this bit.
+ * @epmis: Endpoint Mismatch Interrupt (EPMis)
+ * Indicates that an IN token has been received for a non-periodic
+ * endpoint, but the data for another endpoint is present in the
+ * top of the Non-Periodic Transmit FIFO and the IN endpoint
+ * mismatch count programmed by the application has expired.
+ * @eopf: End of Periodic Frame Interrupt (EOPF)
+ * Indicates that the period specified in the Periodic Frame
+ * Interval field of the Device Configuration register
+ * (DCFG.PerFrInt) has been reached in the current microframe.
+ * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop)
+ * The core sets this bit when it fails to write an isochronous OUT
+ * packet into the RxFIFO because the RxFIFO doesn't have
+ * enough space to accommodate a maximum packet size packet
+ * for the isochronous OUT endpoint.
+ * @enumdone: Enumeration Done (EnumDone)
+ * The core sets this bit to indicate that speed enumeration is
+ * complete. The application must read the Device Status (DSTS)
+ * register to obtain the enumerated speed.
+ * @usbrst: USB Reset (USBRst)
+ * The core sets this bit to indicate that a reset is detected on
+ * the USB.
+ * @usbsusp: USB Suspend (USBSusp)
+ * The core sets this bit to indicate that a suspend was detected
+ * on the USB. The core enters the Suspended state when there
+ * is no activity on the phy_line_state_i signal for an extended
+ * period of time.
+ * @erlysusp: Early Suspend (ErlySusp)
+ * The core sets this bit to indicate that an Idle state has been
+ * detected on the USB for 3 ms.
+ * @i2cint: I2C Interrupt (I2CINT)
+ * This bit is always 0x0.
+ * @ulpickint: ULPI Carkit Interrupt (ULPICKINT)
+ * This bit is always 0x0.
+ * @goutnakeff: Global OUT NAK Effective (GOUTNakEff)
+ * Indicates that the Set Global OUT NAK bit in the Device Control
+ * register (DCTL.SGOUTNak), set by the application, has taken
+ * effect in the core. This bit can be cleared by writing the Clear
+ * Global OUT NAK bit in the Device Control register
+ * (DCTL.CGOUTNak).
+ * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff)
+ * Indicates that the Set Global Non-Periodic IN NAK bit in the
+ * Device Control register (DCTL.SGNPInNak), set by the
+ * application, has taken effect in the core. That is, the core has
+ * sampled the Global IN NAK bit set by the application. This bit
+ * can be cleared by clearing the Clear Global Non-Periodic IN
+ * NAK bit in the Device Control register (DCTL.CGNPInNak).
+ * This interrupt does not necessarily mean that a NAK handshake
+ * is sent out on the USB. The STALL bit takes precedence over
+ * the NAK bit.
+ * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp)
+ * This interrupt is asserted when the Non-Periodic TxFIFO is
+ * either half or completely empty, and there is space for at least
+ * one entry to be written to the Non-Periodic Transmit Request
+ * Queue. The half or completely empty status is determined by
+ * the Non-Periodic TxFIFO Empty Level bit in the Core AHB
+ * Configuration register (GAHBCFG.NPTxFEmpLvl).
+ * @rxflvl: RxFIFO Non-Empty (RxFLvl)
+ * Indicates that there is at least one packet pending to be read
+ * from the RxFIFO.
+ * @sof: Start of (micro)Frame (Sof)
+ * In Host mode, the core sets this bit to indicate that an SOF
+ * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the
+ * USB. The application must write a 1 to this bit to clear the
+ * interrupt.
+ * In Device mode, in the core sets this bit to indicate that an
+ * SOF token has been received on the USB. The application can read
+ * the Device Status register to get the current (micro)frame
+ * number. This interrupt is seen only when the core is operating
+ * at either HS or FS.
+ * @otgint: OTG Interrupt (OTGInt)
+ * The core sets this bit to indicate an OTG protocol event. The
+ * application must read the OTG Interrupt Status (GOTGINT)
+ * register to determine the exact event that caused this
+ * interrupt. The application must clear the appropriate status bit
+ * in the GOTGINT register to clear this bit.
+ * @modemis: Mode Mismatch Interrupt (ModeMis)
+ * The core sets this bit when the application is trying to access:
+ * * A Host mode register, when the core is operating in Device
+ * mode
+ * * A Device mode register, when the core is operating in Host
+ * mode
+ * The register access is completed on the AHB with an OKAY
+ * response, but is ignored by the core internally and doesn't
+ * affect the operation of the core.
+ * @curmod: Current Mode of Operation (CurMod)
+ * Indicates the current mode of operation.
+ * * 1'b0: Device mode
+ * * 1'b1: Host mode
+ */
+ struct cvmx_usbcx_gintsts_s {
+ __BITFIELD_FIELD(u32 wkupint : 1,
+ __BITFIELD_FIELD(u32 sessreqint : 1,
+ __BITFIELD_FIELD(u32 disconnint : 1,
+ __BITFIELD_FIELD(u32 conidstschng : 1,
+ __BITFIELD_FIELD(u32 reserved_27_27 : 1,
+ __BITFIELD_FIELD(u32 ptxfemp : 1,
+ __BITFIELD_FIELD(u32 hchint : 1,
+ __BITFIELD_FIELD(u32 prtint : 1,
+ __BITFIELD_FIELD(u32 reserved_23_23 : 1,
+ __BITFIELD_FIELD(u32 fetsusp : 1,
+ __BITFIELD_FIELD(u32 incomplp : 1,
+ __BITFIELD_FIELD(u32 incompisoin : 1,
+ __BITFIELD_FIELD(u32 oepint : 1,
+ __BITFIELD_FIELD(u32 iepint : 1,
+ __BITFIELD_FIELD(u32 epmis : 1,
+ __BITFIELD_FIELD(u32 reserved_16_16 : 1,
+ __BITFIELD_FIELD(u32 eopf : 1,
+ __BITFIELD_FIELD(u32 isooutdrop : 1,
+ __BITFIELD_FIELD(u32 enumdone : 1,
+ __BITFIELD_FIELD(u32 usbrst : 1,
+ __BITFIELD_FIELD(u32 usbsusp : 1,
+ __BITFIELD_FIELD(u32 erlysusp : 1,
+ __BITFIELD_FIELD(u32 i2cint : 1,
+ __BITFIELD_FIELD(u32 ulpickint : 1,
+ __BITFIELD_FIELD(u32 goutnakeff : 1,
+ __BITFIELD_FIELD(u32 ginnakeff : 1,
+ __BITFIELD_FIELD(u32 nptxfemp : 1,
+ __BITFIELD_FIELD(u32 rxflvl : 1,
+ __BITFIELD_FIELD(u32 sof : 1,
+ __BITFIELD_FIELD(u32 otgint : 1,
+ __BITFIELD_FIELD(u32 modemis : 1,
+ __BITFIELD_FIELD(u32 curmod : 1,
+ ;))))))))))))))))))))))))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gnptxfsiz
+ *
+ * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ)
+ *
+ * The application can program the RAM size and the memory start address for the
+ * Non-Periodic TxFIFO.
+ */
+union cvmx_usbcx_gnptxfsiz {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_gnptxfsiz_s
+ * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
+ * This value is in terms of 32-bit words.
+ * Minimum value is 16
+ * Maximum value is 32768
+ * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr)
+ * This field contains the memory start address for Non-Periodic
+ * Transmit FIFO RAM.
+ */
+ struct cvmx_usbcx_gnptxfsiz_s {
+ __BITFIELD_FIELD(u32 nptxfdep : 16,
+ __BITFIELD_FIELD(u32 nptxfstaddr : 16,
+ ;))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gnptxsts
+ *
+ * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS)
+ *
+ * This read-only register contains the free space information for the
+ * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
+ */
+union cvmx_usbcx_gnptxsts {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_gnptxsts_s
+ * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
+ * Entry in the Non-Periodic Tx Request Queue that is currently
+ * being processed by the MAC.
+ * * Bits [30:27]: Channel/endpoint number
+ * * Bits [26:25]:
+ * - 2'b00: IN/OUT token
+ * - 2'b01: Zero-length transmit packet (device IN/host OUT)
+ * - 2'b10: PING/CSPLIT token
+ * - 2'b11: Channel halt command
+ * * Bit [24]: Terminate (last entry for selected channel/endpoint)
+ * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available
+ * (NPTxQSpcAvail)
+ * Indicates the amount of free space available in the Non-
+ * Periodic Transmit Request Queue. This queue holds both IN
+ * and OUT requests in Host mode. Device mode has only IN
+ * requests.
+ * * 8'h0: Non-Periodic Transmit Request Queue is full
+ * * 8'h1: 1 location available
+ * * 8'h2: 2 locations available
+ * * n: n locations available (0..8)
+ * * Others: Reserved
+ * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail)
+ * Indicates the amount of free space available in the Non-
+ * Periodic TxFIFO.
+ * Values are in terms of 32-bit words.
+ * * 16'h0: Non-Periodic TxFIFO is full
+ * * 16'h1: 1 word available
+ * * 16'h2: 2 words available
+ * * 16'hn: n words available (where 0..32768)
+ * * 16'h8000: 32768 words available
+ * * Others: Reserved
+ */
+ struct cvmx_usbcx_gnptxsts_s {
+ __BITFIELD_FIELD(u32 reserved_31_31 : 1,
+ __BITFIELD_FIELD(u32 nptxqtop : 7,
+ __BITFIELD_FIELD(u32 nptxqspcavail : 8,
+ __BITFIELD_FIELD(u32 nptxfspcavail : 16,
+ ;))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_grstctl
+ *
+ * Core Reset Register (GRSTCTL)
+ *
+ * The application uses this register to reset various hardware features inside
+ * the core.
+ */
+union cvmx_usbcx_grstctl {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_grstctl_s
+ * @ahbidle: AHB Master Idle (AHBIdle)
+ * Indicates that the AHB Master State Machine is in the IDLE
+ * condition.
+ * @dmareq: DMA Request Signal (DMAReq)
+ * Indicates that the DMA request is in progress. Used for debug.
+ * @txfnum: TxFIFO Number (TxFNum)
+ * This is the FIFO number that must be flushed using the TxFIFO
+ * Flush bit. This field must not be changed until the core clears
+ * the TxFIFO Flush bit.
+ * * 5'h0: Non-Periodic TxFIFO flush
+ * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic
+ * TxFIFO flush in Host mode
+ * * 5'h2: Periodic TxFIFO 2 flush in Device mode
+ * - ...
+ * * 5'hF: Periodic TxFIFO 15 flush in Device mode
+ * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the
+ * core
+ * @txfflsh: TxFIFO Flush (TxFFlsh)
+ * This bit selectively flushes a single or all transmit FIFOs, but
+ * cannot do so if the core is in the midst of a transaction.
+ * The application must only write this bit after checking that the
+ * core is neither writing to the TxFIFO nor reading from the
+ * TxFIFO.
+ * The application must wait until the core clears this bit before
+ * performing any operations. This bit takes 8 clocks (of phy_clk
+ * or hclk, whichever is slower) to clear.
+ * @rxfflsh: RxFIFO Flush (RxFFlsh)
+ * The application can flush the entire RxFIFO using this bit, but
+ * must first ensure that the core is not in the middle of a
+ * transaction.
+ * The application must only write to this bit after checking that
+ * the core is neither reading from the RxFIFO nor writing to the
+ * RxFIFO.
+ * The application must wait until the bit is cleared before
+ * performing any other operations. This bit will take 8 clocks
+ * (slowest of PHY or AHB clock) to clear.
+ * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh)
+ * The application writes this bit to flush the IN Token Sequence
+ * Learning Queue.
+ * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst)
+ * The application writes this bit to reset the (micro)frame number
+ * counter inside the core. When the (micro)frame counter is reset,
+ * the subsequent SOF sent out by the core will have a
+ * (micro)frame number of 0.
+ * @hsftrst: HClk Soft Reset (HSftRst)
+ * The application uses this bit to flush the control logic in the
+ * AHB Clock domain. Only AHB Clock Domain pipelines are reset.
+ * * FIFOs are not flushed with this bit.
+ * * All state machines in the AHB clock domain are reset to the
+ * Idle state after terminating the transactions on the AHB,
+ * following the protocol.
+ * * CSR control bits used by the AHB clock domain state
+ * machines are cleared.
+ * * To clear this interrupt, status mask bits that control the
+ * interrupt status and are generated by the AHB clock domain
+ * state machine are cleared.
+ * * Because interrupt status bits are not cleared, the application
+ * can get the status of any core events that occurred after it set
+ * this bit.
+ * This is a self-clearing bit that the core clears after all
+ * necessary logic is reset in the core. This may take several
+ * clocks, depending on the core's current state.
+ * @csftrst: Core Soft Reset (CSftRst)
+ * Resets the hclk and phy_clock domains as follows:
+ * * Clears the interrupts and all the CSR registers except the
+ * following register bits:
+ * - PCGCCTL.RstPdwnModule
+ * - PCGCCTL.GateHclk
+ * - PCGCCTL.PwrClmp
+ * - PCGCCTL.StopPPhyLPwrClkSelclk
+ * - GUSBCFG.PhyLPwrClkSel
+ * - GUSBCFG.DDRSel
+ * - GUSBCFG.PHYSel
+ * - GUSBCFG.FSIntf
+ * - GUSBCFG.ULPI_UTMI_Sel
+ * - GUSBCFG.PHYIf
+ * - HCFG.FSLSPclkSel
+ * - DCFG.DevSpd
+ * * All module state machines (except the AHB Slave Unit) are
+ * reset to the IDLE state, and all the transmit FIFOs and the
+ * receive FIFO are flushed.
+ * * Any transactions on the AHB Master are terminated as soon
+ * as possible, after gracefully completing the last data phase of
+ * an AHB transfer. Any transactions on the USB are terminated
+ * immediately.
+ * The application can write to this bit any time it wants to reset
+ * the core. This is a self-clearing bit and the core clears this
+ * bit after all the necessary logic is reset in the core, which
+ * may take several clocks, depending on the current state of the
+ * core. Once this bit is cleared software should wait at least 3
+ * PHY clocks before doing any access to the PHY domain
+ * (synchronization delay). Software should also should check that
+ * bit 31 of this register is 1 (AHB Master is IDLE) before
+ * starting any operation.
+ * Typically software reset is used during software development
+ * and also when you dynamically change the PHY selection bits
+ * in the USB configuration registers listed above. When you
+ * change the PHY, the corresponding clock for the PHY is
+ * selected and used in the PHY domain. Once a new clock is
+ * selected, the PHY domain has to be reset for proper operation.
+ */
+ struct cvmx_usbcx_grstctl_s {
+ __BITFIELD_FIELD(u32 ahbidle : 1,
+ __BITFIELD_FIELD(u32 dmareq : 1,
+ __BITFIELD_FIELD(u32 reserved_11_29 : 19,
+ __BITFIELD_FIELD(u32 txfnum : 5,
+ __BITFIELD_FIELD(u32 txfflsh : 1,
+ __BITFIELD_FIELD(u32 rxfflsh : 1,
+ __BITFIELD_FIELD(u32 intknqflsh : 1,
+ __BITFIELD_FIELD(u32 frmcntrrst : 1,
+ __BITFIELD_FIELD(u32 hsftrst : 1,
+ __BITFIELD_FIELD(u32 csftrst : 1,
+ ;))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_grxfsiz
+ *
+ * Receive FIFO Size Register (GRXFSIZ)
+ *
+ * The application can program the RAM size that must be allocated to the
+ * RxFIFO.
+ */
+union cvmx_usbcx_grxfsiz {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_grxfsiz_s
+ * @rxfdep: RxFIFO Depth (RxFDep)
+ * This value is in terms of 32-bit words.
+ * * Minimum value is 16
+ * * Maximum value is 32768
+ */
+ struct cvmx_usbcx_grxfsiz_s {
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 rxfdep : 16,
+ ;))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_grxstsph
+ *
+ * Receive Status Read and Pop Register, Host Mode (GRXSTSPH)
+ *
+ * A read to the Receive Status Read and Pop register returns and additionally
+ * pops the top data entry out of the RxFIFO.
+ * This Description is only valid when the core is in Host Mode. For Device Mode
+ * use USBC_GRXSTSPD instead.
+ * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the
+ * same offset in the O2P USB core. The offset difference shown in this
+ * document is for software clarity and is actually ignored by the
+ * hardware.
+ */
+union cvmx_usbcx_grxstsph {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_grxstsph_s
+ * @pktsts: Packet Status (PktSts)
+ * Indicates the status of the received packet
+ * * 4'b0010: IN data packet received
+ * * 4'b0011: IN transfer completed (triggers an interrupt)
+ * * 4'b0101: Data toggle error (triggers an interrupt)
+ * * 4'b0111: Channel halted (triggers an interrupt)
+ * * Others: Reserved
+ * @dpid: Data PID (DPID)
+ * * 2'b00: DATA0
+ * * 2'b10: DATA1
+ * * 2'b01: DATA2
+ * * 2'b11: MDATA
+ * @bcnt: Byte Count (BCnt)
+ * Indicates the byte count of the received IN data packet
+ * @chnum: Channel Number (ChNum)
+ * Indicates the channel number to which the current received
+ * packet belongs.
+ */
+ struct cvmx_usbcx_grxstsph_s {
+ __BITFIELD_FIELD(u32 reserved_21_31 : 11,
+ __BITFIELD_FIELD(u32 pktsts : 4,
+ __BITFIELD_FIELD(u32 dpid : 2,
+ __BITFIELD_FIELD(u32 bcnt : 11,
+ __BITFIELD_FIELD(u32 chnum : 4,
+ ;)))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gusbcfg
+ *
+ * Core USB Configuration Register (GUSBCFG)
+ *
+ * This register can be used to configure the core after power-on or a changing
+ * to Host mode or Device mode. It contains USB and USB-PHY related
+ * configuration parameters. The application must program this register before
+ * starting any transactions on either the AHB or the USB. Do not make changes
+ * to this register after the initial programming.
+ */
+union cvmx_usbcx_gusbcfg {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_gusbcfg_s
+ * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
+ * This bit is always 0x0.
+ * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel)
+ * Software should set this bit to 0x0.
+ * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In
+ * FS and LS modes, the PHY can usually operate on a 48-MHz
+ * clock to save power.
+ * * 1'b0: 480-MHz Internal PLL clock
+ * * 1'b1: 48-MHz External Clock
+ * In 480 MHz mode, the UTMI interface operates at either 60 or
+ * 30-MHz, depending upon whether 8- or 16-bit data width is
+ * selected. In 48-MHz mode, the UTMI interface operates at 48
+ * MHz in FS mode and at either 48 or 6 MHz in LS mode
+ * (depending on the PHY vendor).
+ * This bit drives the utmi_fsls_low_power core output signal, and
+ * is valid only for UTMI+ PHYs.
+ * @usbtrdtim: USB Turnaround Time (USBTrdTim)
+ * Sets the turnaround time in PHY clocks.
+ * Specifies the response time for a MAC request to the Packet
+ * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM).
+ * This must be programmed to 0x5.
+ * @hnpcap: HNP-Capable (HNPCap)
+ * This bit is always 0x0.
+ * @srpcap: SRP-Capable (SRPCap)
+ * This bit is always 0x0.
+ * @ddrsel: ULPI DDR Select (DDRSel)
+ * Software should set this bit to 0x0.
+ * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial
+ * Software should set this bit to 0x0.
+ * @fsintf: Full-Speed Serial Interface Select (FSIntf)
+ * Software should set this bit to 0x0.
+ * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel)
+ * This bit is always 0x0.
+ * @phyif: PHY Interface (PHYIf)
+ * This bit is always 0x1.
+ * @toutcal: HS/FS Timeout Calibration (TOutCal)
+ * The number of PHY clocks that the application programs in this
+ * field is added to the high-speed/full-speed interpacket timeout
+ * duration in the core to account for any additional delays
+ * introduced by the PHY. This may be required, since the delay
+ * introduced by the PHY in generating the linestate condition may
+ * vary from one PHY to another.
+ * The USB standard timeout value for high-speed operation is
+ * 736 to 816 (inclusive) bit times. The USB standard timeout
+ * value for full-speed operation is 16 to 18 (inclusive) bit
+ * times. The application must program this field based on the
+ * speed of enumeration. The number of bit times added per PHY
+ * clock are:
+ * High-speed operation:
+ * * One 30-MHz PHY clock = 16 bit times
+ * * One 60-MHz PHY clock = 8 bit times
+ * Full-speed operation:
+ * * One 30-MHz PHY clock = 0.4 bit times
+ * * One 60-MHz PHY clock = 0.2 bit times
+ * * One 48-MHz PHY clock = 0.25 bit times
+ */
+ struct cvmx_usbcx_gusbcfg_s {
+ __BITFIELD_FIELD(u32 reserved_17_31 : 15,
+ __BITFIELD_FIELD(u32 otgi2csel : 1,
+ __BITFIELD_FIELD(u32 phylpwrclksel : 1,
+ __BITFIELD_FIELD(u32 reserved_14_14 : 1,
+ __BITFIELD_FIELD(u32 usbtrdtim : 4,
+ __BITFIELD_FIELD(u32 hnpcap : 1,
+ __BITFIELD_FIELD(u32 srpcap : 1,
+ __BITFIELD_FIELD(u32 ddrsel : 1,
+ __BITFIELD_FIELD(u32 physel : 1,
+ __BITFIELD_FIELD(u32 fsintf : 1,
+ __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1,
+ __BITFIELD_FIELD(u32 phyif : 1,
+ __BITFIELD_FIELD(u32 toutcal : 3,
+ ;)))))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_haint
+ *
+ * Host All Channels Interrupt Register (HAINT)
+ *
+ * When a significant event occurs on a channel, the Host All Channels Interrupt
+ * register interrupts the application using the Host Channels Interrupt bit of
+ * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt.
+ * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in
+ * this register are set and cleared when the application sets and clears bits
+ * in the corresponding Host Channel-n Interrupt register.
+ */
+union cvmx_usbcx_haint {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_haint_s
+ * @haint: Channel Interrupts (HAINT)
+ * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
+ */
+ struct cvmx_usbcx_haint_s {
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 haint : 16,
+ ;))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_haintmsk
+ *
+ * Host All Channels Interrupt Mask Register (HAINTMSK)
+ *
+ * The Host All Channel Interrupt Mask register works with the Host All Channel
+ * Interrupt register to interrupt the application when an event occurs on a
+ * channel. There is one interrupt mask bit per channel, up to a maximum of 16
+ * bits.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_haintmsk {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_haintmsk_s
+ * @haintmsk: Channel Interrupt Mask (HAINTMsk)
+ * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
+ */
+ struct cvmx_usbcx_haintmsk_s {
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 haintmsk : 16,
+ ;))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcchar#
+ *
+ * Host Channel-n Characteristics Register (HCCHAR)
+ *
+ */
+union cvmx_usbcx_hccharx {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hccharx_s
+ * @chena: Channel Enable (ChEna)
+ * This field is set by the application and cleared by the OTG
+ * host.
+ * * 1'b0: Channel disabled
+ * * 1'b1: Channel enabled
+ * @chdis: Channel Disable (ChDis)
+ * The application sets this bit to stop transmitting/receiving
+ * data on a channel, even before the transfer for that channel is
+ * complete. The application must wait for the Channel Disabled
+ * interrupt before treating the channel as disabled.
+ * @oddfrm: Odd Frame (OddFrm)
+ * This field is set (reset) by the application to indicate that
+ * the OTG host must perform a transfer in an odd (micro)frame.
+ * This field is applicable for only periodic (isochronous and
+ * interrupt) transactions.
+ * * 1'b0: Even (micro)frame
+ * * 1'b1: Odd (micro)frame
+ * @devaddr: Device Address (DevAddr)
+ * This field selects the specific device serving as the data
+ * source or sink.
+ * @ec: Multi Count (MC) / Error Count (EC)
+ * When the Split Enable bit of the Host Channel-n Split Control
+ * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates
+ * to the host the number of transactions that should be executed
+ * per microframe for this endpoint.
+ * * 2'b00: Reserved. This field yields undefined results.
+ * * 2'b01: 1 transaction
+ * * 2'b10: 2 transactions to be issued for this endpoint per
+ * microframe
+ * * 2'b11: 3 transactions to be issued for this endpoint per
+ * microframe
+ * When HCSPLTn.SpltEna is set (1'b1), this field indicates the
+ * number of immediate retries to be performed for a periodic split
+ * transactions on transaction errors. This field must be set to at
+ * least 2'b01.
+ * @eptype: Endpoint Type (EPType)
+ * Indicates the transfer type selected.
+ * * 2'b00: Control
+ * * 2'b01: Isochronous
+ * * 2'b10: Bulk
+ * * 2'b11: Interrupt
+ * @lspddev: Low-Speed Device (LSpdDev)
+ * This field is set by the application to indicate that this
+ * channel is communicating to a low-speed device.
+ * @epdir: Endpoint Direction (EPDir)
+ * Indicates whether the transaction is IN or OUT.
+ * * 1'b0: OUT
+ * * 1'b1: IN
+ * @epnum: Endpoint Number (EPNum)
+ * Indicates the endpoint number on the device serving as the
+ * data source or sink.
+ * @mps: Maximum Packet Size (MPS)
+ * Indicates the maximum packet size of the associated endpoint.
+ */
+ struct cvmx_usbcx_hccharx_s {
+ __BITFIELD_FIELD(u32 chena : 1,
+ __BITFIELD_FIELD(u32 chdis : 1,
+ __BITFIELD_FIELD(u32 oddfrm : 1,
+ __BITFIELD_FIELD(u32 devaddr : 7,
+ __BITFIELD_FIELD(u32 ec : 2,
+ __BITFIELD_FIELD(u32 eptype : 2,
+ __BITFIELD_FIELD(u32 lspddev : 1,
+ __BITFIELD_FIELD(u32 reserved_16_16 : 1,
+ __BITFIELD_FIELD(u32 epdir : 1,
+ __BITFIELD_FIELD(u32 epnum : 4,
+ __BITFIELD_FIELD(u32 mps : 11,
+ ;)))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcfg
+ *
+ * Host Configuration Register (HCFG)
+ *
+ * This register configures the core after power-on. Do not make changes to this
+ * register after initializing the host.
+ */
+union cvmx_usbcx_hcfg {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hcfg_s
+ * @fslssupp: FS- and LS-Only Support (FSLSSupp)
+ * The application uses this bit to control the core's enumeration
+ * speed. Using this bit, the application can make the core
+ * enumerate as a FS host, even if the connected device supports
+ * HS traffic. Do not make changes to this field after initial
+ * programming.
+ * * 1'b0: HS/FS/LS, based on the maximum speed supported by
+ * the connected device
+ * * 1'b1: FS/LS-only, even if the connected device can support HS
+ * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel)
+ * When the core is in FS Host mode
+ * * 2'b00: PHY clock is running at 30/60 MHz
+ * * 2'b01: PHY clock is running at 48 MHz
+ * * Others: Reserved
+ * When the core is in LS Host mode
+ * * 2'b00: PHY clock is running at 30/60 MHz. When the
+ * UTMI+/ULPI PHY Low Power mode is not selected, use
+ * 30/60 MHz.
+ * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+
+ * PHY Low Power mode is selected, use 48MHz if the PHY
+ * supplies a 48 MHz clock during LS mode.
+ * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode,
+ * use 6 MHz when the UTMI+ PHY Low Power mode is
+ * selected and the PHY supplies a 6 MHz clock during LS
+ * mode. If you select a 6 MHz clock during LS mode, you must
+ * do a soft reset.
+ * * 2'b11: Reserved
+ */
+ struct cvmx_usbcx_hcfg_s {
+ __BITFIELD_FIELD(u32 reserved_3_31 : 29,
+ __BITFIELD_FIELD(u32 fslssupp : 1,
+ __BITFIELD_FIELD(u32 fslspclksel : 2,
+ ;)))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcint#
+ *
+ * Host Channel-n Interrupt Register (HCINT)
+ *
+ * This register indicates the status of a channel with respect to USB- and
+ * AHB-related events. The application must read this register when the Host
+ * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is
+ * set. Before the application can read this register, it must first read
+ * the Host All Channels Interrupt (HAINT) register to get the exact channel
+ * number for the Host Channel-n Interrupt register. The application must clear
+ * the appropriate bit in this register to clear the corresponding bits in the
+ * HAINT and GINTSTS registers.
+ */
+union cvmx_usbcx_hcintx {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hcintx_s
+ * @datatglerr: Data Toggle Error (DataTglErr)
+ * @frmovrun: Frame Overrun (FrmOvrun)
+ * @bblerr: Babble Error (BblErr)
+ * @xacterr: Transaction Error (XactErr)
+ * @nyet: NYET Response Received Interrupt (NYET)
+ * @ack: ACK Response Received Interrupt (ACK)
+ * @nak: NAK Response Received Interrupt (NAK)
+ * @stall: STALL Response Received Interrupt (STALL)
+ * @ahberr: This bit is always 0x0.
+ * @chhltd: Channel Halted (ChHltd)
+ * Indicates the transfer completed abnormally either because of
+ * any USB transaction error or in response to disable request by
+ * the application.
+ * @xfercompl: Transfer Completed (XferCompl)
+ * Transfer completed normally without any errors.
+ */
+ struct cvmx_usbcx_hcintx_s {
+ __BITFIELD_FIELD(u32 reserved_11_31 : 21,
+ __BITFIELD_FIELD(u32 datatglerr : 1,
+ __BITFIELD_FIELD(u32 frmovrun : 1,
+ __BITFIELD_FIELD(u32 bblerr : 1,
+ __BITFIELD_FIELD(u32 xacterr : 1,
+ __BITFIELD_FIELD(u32 nyet : 1,
+ __BITFIELD_FIELD(u32 ack : 1,
+ __BITFIELD_FIELD(u32 nak : 1,
+ __BITFIELD_FIELD(u32 stall : 1,
+ __BITFIELD_FIELD(u32 ahberr : 1,
+ __BITFIELD_FIELD(u32 chhltd : 1,
+ __BITFIELD_FIELD(u32 xfercompl : 1,
+ ;))))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcintmsk#
+ *
+ * Host Channel-n Interrupt Mask Register (HCINTMSKn)
+ *
+ * This register reflects the mask for each channel status described in the
+ * previous section.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_hcintmskx {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hcintmskx_s
+ * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
+ * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk)
+ * @bblerrmsk: Babble Error Mask (BblErrMsk)
+ * @xacterrmsk: Transaction Error Mask (XactErrMsk)
+ * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk)
+ * @ackmsk: ACK Response Received Interrupt Mask (AckMsk)
+ * @nakmsk: NAK Response Received Interrupt Mask (NakMsk)
+ * @stallmsk: STALL Response Received Interrupt Mask (StallMsk)
+ * @ahberrmsk: AHB Error Mask (AHBErrMsk)
+ * @chhltdmsk: Channel Halted Mask (ChHltdMsk)
+ * @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
+ */
+ struct cvmx_usbcx_hcintmskx_s {
+ __BITFIELD_FIELD(u32 reserved_11_31 : 21,
+ __BITFIELD_FIELD(u32 datatglerrmsk : 1,
+ __BITFIELD_FIELD(u32 frmovrunmsk : 1,
+ __BITFIELD_FIELD(u32 bblerrmsk : 1,
+ __BITFIELD_FIELD(u32 xacterrmsk : 1,
+ __BITFIELD_FIELD(u32 nyetmsk : 1,
+ __BITFIELD_FIELD(u32 ackmsk : 1,
+ __BITFIELD_FIELD(u32 nakmsk : 1,
+ __BITFIELD_FIELD(u32 stallmsk : 1,
+ __BITFIELD_FIELD(u32 ahberrmsk : 1,
+ __BITFIELD_FIELD(u32 chhltdmsk : 1,
+ __BITFIELD_FIELD(u32 xfercomplmsk : 1,
+ ;))))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcsplt#
+ *
+ * Host Channel-n Split Control Register (HCSPLT)
+ *
+ */
+union cvmx_usbcx_hcspltx {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hcspltx_s
+ * @spltena: Split Enable (SpltEna)
+ * The application sets this field to indicate that this channel is
+ * enabled to perform split transactions.
+ * @compsplt: Do Complete Split (CompSplt)
+ * The application sets this field to request the OTG host to
+ * perform a complete split transaction.
+ * @xactpos: Transaction Position (XactPos)
+ * This field is used to determine whether to send all, first,
+ * middle, or last payloads with each OUT transaction.
+ * * 2'b11: All. This is the entire data payload is of this
+ * transaction (which is less than or equal to 188 bytes).
+ * * 2'b10: Begin. This is the first data payload of this
+ * transaction (which is larger than 188 bytes).
+ * * 2'b00: Mid. This is the middle payload of this transaction
+ * (which is larger than 188 bytes).
+ * * 2'b01: End. This is the last payload of this transaction
+ * (which is larger than 188 bytes).
+ * @hubaddr: Hub Address (HubAddr)
+ * This field holds the device address of the transaction
+ * translator's hub.
+ * @prtaddr: Port Address (PrtAddr)
+ * This field is the port number of the recipient transaction
+ * translator.
+ */
+ struct cvmx_usbcx_hcspltx_s {
+ __BITFIELD_FIELD(u32 spltena : 1,
+ __BITFIELD_FIELD(u32 reserved_17_30 : 14,
+ __BITFIELD_FIELD(u32 compsplt : 1,
+ __BITFIELD_FIELD(u32 xactpos : 2,
+ __BITFIELD_FIELD(u32 hubaddr : 7,
+ __BITFIELD_FIELD(u32 prtaddr : 7,
+ ;))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hctsiz#
+ *
+ * Host Channel-n Transfer Size Register (HCTSIZ)
+ *
+ */
+union cvmx_usbcx_hctsizx {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hctsizx_s
+ * @dopng: Do Ping (DoPng)
+ * Setting this field to 1 directs the host to do PING protocol.
+ * @pid: PID (Pid)
+ * The application programs this field with the type of PID to use
+ * for the initial transaction. The host will maintain this field
+ * for the rest of the transfer.
+ * * 2'b00: DATA0
+ * * 2'b01: DATA2
+ * * 2'b10: DATA1
+ * * 2'b11: MDATA (non-control)/SETUP (control)
+ * @pktcnt: Packet Count (PktCnt)
+ * This field is programmed by the application with the expected
+ * number of packets to be transmitted (OUT) or received (IN).
+ * The host decrements this count on every successful
+ * transmission or reception of an OUT/IN packet. Once this count
+ * reaches zero, the application is interrupted to indicate normal
+ * completion.
+ * @xfersize: Transfer Size (XferSize)
+ * For an OUT, this field is the number of data bytes the host will
+ * send during the transfer.
+ * For an IN, this field is the buffer size that the application
+ * has reserved for the transfer. The application is expected to
+ * program this field as an integer multiple of the maximum packet
+ * size for IN transactions (periodic and non-periodic).
+ */
+ struct cvmx_usbcx_hctsizx_s {
+ __BITFIELD_FIELD(u32 dopng : 1,
+ __BITFIELD_FIELD(u32 pid : 2,
+ __BITFIELD_FIELD(u32 pktcnt : 10,
+ __BITFIELD_FIELD(u32 xfersize : 19,
+ ;))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hfir
+ *
+ * Host Frame Interval Register (HFIR)
+ *
+ * This register stores the frame interval information for the current speed to
+ * which the O2P USB core has enumerated.
+ */
+union cvmx_usbcx_hfir {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hfir_s
+ * @frint: Frame Interval (FrInt)
+ * The value that the application programs to this field specifies
+ * the interval between two consecutive SOFs (FS) or micro-
+ * SOFs (HS) or Keep-Alive tokens (HS). This field contains the
+ * number of PHY clocks that constitute the required frame
+ * interval. The default value set in this field for a FS operation
+ * when the PHY clock frequency is 60 MHz. The application can
+ * write a value to this register only after the Port Enable bit of
+ * the Host Port Control and Status register (HPRT.PrtEnaPort)
+ * has been set. If no value is programmed, the core calculates
+ * the value based on the PHY clock specified in the FS/LS PHY
+ * Clock Select field of the Host Configuration register
+ * (HCFG.FSLSPclkSel). Do not change the value of this field
+ * after the initial configuration.
+ * * 125 us (PHY clock frequency for HS)
+ * * 1 ms (PHY clock frequency for FS/LS)
+ */
+ struct cvmx_usbcx_hfir_s {
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 frint : 16,
+ ;))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hfnum
+ *
+ * Host Frame Number/Frame Time Remaining Register (HFNUM)
+ *
+ * This register indicates the current frame number.
+ * It also indicates the time remaining (in terms of the number of PHY clocks)
+ * in the current (micro)frame.
+ */
+union cvmx_usbcx_hfnum {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hfnum_s
+ * @frrem: Frame Time Remaining (FrRem)
+ * Indicates the amount of time remaining in the current
+ * microframe (HS) or frame (FS/LS), in terms of PHY clocks.
+ * This field decrements on each PHY clock. When it reaches
+ * zero, this field is reloaded with the value in the Frame
+ * Interval register and a new SOF is transmitted on the USB.
+ * @frnum: Frame Number (FrNum)
+ * This field increments when a new SOF is transmitted on the
+ * USB, and is reset to 0 when it reaches 16'h3FFF.
+ */
+ struct cvmx_usbcx_hfnum_s {
+ __BITFIELD_FIELD(u32 frrem : 16,
+ __BITFIELD_FIELD(u32 frnum : 16,
+ ;))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hprt
+ *
+ * Host Port Control and Status Register (HPRT)
+ *
+ * This register is available in both Host and Device modes.
+ * Currently, the OTG Host supports only one port.
+ * A single register holds USB port-related information such as USB reset,
+ * enable, suspend, resume, connect status, and test mode for each port. The
+ * R_SS_WC bits in this register can trigger an interrupt to the application
+ * through the Host Port Interrupt bit of the Core Interrupt register
+ * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this
+ * register and clear the bit that caused the interrupt. For the R_SS_WC bits,
+ * the application must write a 1 to the bit to clear the interrupt.
+ */
+union cvmx_usbcx_hprt {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hprt_s
+ * @prtspd: Port Speed (PrtSpd)
+ * Indicates the speed of the device attached to this port.
+ * * 2'b00: High speed
+ * * 2'b01: Full speed
+ * * 2'b10: Low speed
+ * * 2'b11: Reserved
+ * @prttstctl: Port Test Control (PrtTstCtl)
+ * The application writes a nonzero value to this field to put
+ * the port into a Test mode, and the corresponding pattern is
+ * signaled on the port.
+ * * 4'b0000: Test mode disabled
+ * * 4'b0001: Test_J mode
+ * * 4'b0010: Test_K mode
+ * * 4'b0011: Test_SE0_NAK mode
+ * * 4'b0100: Test_Packet mode
+ * * 4'b0101: Test_Force_Enable
+ * * Others: Reserved
+ * PrtSpd must be zero (i.e. the interface must be in high-speed
+ * mode) to use the PrtTstCtl test modes.
+ * @prtpwr: Port Power (PrtPwr)
+ * The application uses this field to control power to this port,
+ * and the core clears this bit on an overcurrent condition.
+ * * 1'b0: Power off
+ * * 1'b1: Power on
+ * @prtlnsts: Port Line Status (PrtLnSts)
+ * Indicates the current logic level USB data lines
+ * * Bit [10]: Logic level of D-
+ * * Bit [11]: Logic level of D+
+ * @prtrst: Port Reset (PrtRst)
+ * When the application sets this bit, a reset sequence is
+ * started on this port. The application must time the reset
+ * period and clear this bit after the reset sequence is
+ * complete.
+ * * 1'b0: Port not in reset
+ * * 1'b1: Port in reset
+ * The application must leave this bit set for at least a
+ * minimum duration mentioned below to start a reset on the
+ * port. The application can leave it set for another 10 ms in
+ * addition to the required minimum duration, before clearing
+ * the bit, even though there is no maximum limit set by the
+ * USB standard.
+ * * High speed: 50 ms
+ * * Full speed/Low speed: 10 ms
+ * @prtsusp: Port Suspend (PrtSusp)
+ * The application sets this bit to put this port in Suspend
+ * mode. The core only stops sending SOFs when this is set.
+ * To stop the PHY clock, the application must set the Port
+ * Clock Stop bit, which will assert the suspend input pin of
+ * the PHY.
+ * The read value of this bit reflects the current suspend
+ * status of the port. This bit is cleared by the core after a
+ * remote wakeup signal is detected or the application sets
+ * the Port Reset bit or Port Resume bit in this register or the
+ * Resume/Remote Wakeup Detected Interrupt bit or
+ * Disconnect Detected Interrupt bit in the Core Interrupt
+ * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt,
+ * respectively).
+ * * 1'b0: Port not in Suspend mode
+ * * 1'b1: Port in Suspend mode
+ * @prtres: Port Resume (PrtRes)
+ * The application sets this bit to drive resume signaling on
+ * the port. The core continues to drive the resume signal
+ * until the application clears this bit.
+ * If the core detects a USB remote wakeup sequence, as
+ * indicated by the Port Resume/Remote Wakeup Detected
+ * Interrupt bit of the Core Interrupt register
+ * (GINTSTS.WkUpInt), the core starts driving resume
+ * signaling without application intervention and clears this bit
+ * when it detects a disconnect condition. The read value of
+ * this bit indicates whether the core is currently driving
+ * resume signaling.
+ * * 1'b0: No resume driven
+ * * 1'b1: Resume driven
+ * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng)
+ * The core sets this bit when the status of the Port
+ * Overcurrent Active bit (bit 4) in this register changes.
+ * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct)
+ * Indicates the overcurrent condition of the port.
+ * * 1'b0: No overcurrent condition
+ * * 1'b1: Overcurrent condition
+ * @prtenchng: Port Enable/Disable Change (PrtEnChng)
+ * The core sets this bit when the status of the Port Enable bit
+ * [2] of this register changes.
+ * @prtena: Port Enable (PrtEna)
+ * A port is enabled only by the core after a reset sequence,
+ * and is disabled by an overcurrent condition, a disconnect
+ * condition, or by the application clearing this bit. The
+ * application cannot set this bit by a register write. It can only
+ * clear it to disable the port. This bit does not trigger any
+ * interrupt to the application.
+ * * 1'b0: Port disabled
+ * * 1'b1: Port enabled
+ * @prtconndet: Port Connect Detected (PrtConnDet)
+ * The core sets this bit when a device connection is detected
+ * to trigger an interrupt to the application using the Host Port
+ * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt).
+ * The application must write a 1 to this bit to clear the
+ * interrupt.
+ * @prtconnsts: Port Connect Status (PrtConnSts)
+ * * 0: No device is attached to the port.
+ * * 1: A device is attached to the port.
+ */
+ struct cvmx_usbcx_hprt_s {
+ __BITFIELD_FIELD(u32 reserved_19_31 : 13,
+ __BITFIELD_FIELD(u32 prtspd : 2,
+ __BITFIELD_FIELD(u32 prttstctl : 4,
+ __BITFIELD_FIELD(u32 prtpwr : 1,
+ __BITFIELD_FIELD(u32 prtlnsts : 2,
+ __BITFIELD_FIELD(u32 reserved_9_9 : 1,
+ __BITFIELD_FIELD(u32 prtrst : 1,
+ __BITFIELD_FIELD(u32 prtsusp : 1,
+ __BITFIELD_FIELD(u32 prtres : 1,
+ __BITFIELD_FIELD(u32 prtovrcurrchng : 1,
+ __BITFIELD_FIELD(u32 prtovrcurract : 1,
+ __BITFIELD_FIELD(u32 prtenchng : 1,
+ __BITFIELD_FIELD(u32 prtena : 1,
+ __BITFIELD_FIELD(u32 prtconndet : 1,
+ __BITFIELD_FIELD(u32 prtconnsts : 1,
+ ;)))))))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hptxfsiz
+ *
+ * Host Periodic Transmit FIFO Size Register (HPTXFSIZ)
+ *
+ * This register holds the size and the memory start address of the Periodic
+ * TxFIFO, as shown in Figures 310 and 311.
+ */
+union cvmx_usbcx_hptxfsiz {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hptxfsiz_s
+ * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
+ * This value is in terms of 32-bit words.
+ * * Minimum value is 16
+ * * Maximum value is 32768
+ * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
+ */
+ struct cvmx_usbcx_hptxfsiz_s {
+ __BITFIELD_FIELD(u32 ptxfsize : 16,
+ __BITFIELD_FIELD(u32 ptxfstaddr : 16,
+ ;))
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hptxsts
+ *
+ * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS)
+ *
+ * This read-only register contains the free space information for the Periodic
+ * TxFIFO and the Periodic Transmit Request Queue
+ */
+union cvmx_usbcx_hptxsts {
+ u32 u32;
+ /**
+ * struct cvmx_usbcx_hptxsts_s
+ * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
+ * This indicates the entry in the Periodic Tx Request Queue that
+ * is currently being processes by the MAC.
+ * This register is used for debugging.
+ * * Bit [31]: Odd/Even (micro)frame
+ * - 1'b0: send in even (micro)frame
+ * - 1'b1: send in odd (micro)frame
+ * * Bits [30:27]: Channel/endpoint number
+ * * Bits [26:25]: Type
+ * - 2'b00: IN/OUT
+ * - 2'b01: Zero-length packet
+ * - 2'b10: CSPLIT
+ * - 2'b11: Disable channel command
+ * * Bit [24]: Terminate (last entry for the selected
+ * channel/endpoint)
+ * @ptxqspcavail: Periodic Transmit Request Queue Space Available
+ * (PTxQSpcAvail)
+ * Indicates the number of free locations available to be written
+ * in the Periodic Transmit Request Queue. This queue holds both
+ * IN and OUT requests.
+ * * 8'h0: Periodic Transmit Request Queue is full
+ * * 8'h1: 1 location available
+ * * 8'h2: 2 locations available
+ * * n: n locations available (0..8)
+ * * Others: Reserved
+ * @ptxfspcavail: Periodic Transmit Data FIFO Space Available
+ * (PTxFSpcAvail)
+ * Indicates the number of free locations available to be written
+ * to in the Periodic TxFIFO.
+ * Values are in terms of 32-bit words
+ * * 16'h0: Periodic TxFIFO is full
+ * * 16'h1: 1 word available
+ * * 16'h2: 2 words available
+ * * 16'hn: n words available (where 0..32768)
+ * * 16'h8000: 32768 words available
+ * * Others: Reserved
+ */
+ struct cvmx_usbcx_hptxsts_s {
+ __BITFIELD_FIELD(u32 ptxqtop : 8,
+ __BITFIELD_FIELD(u32 ptxqspcavail : 8,
+ __BITFIELD_FIELD(u32 ptxfspcavail : 16,
+ ;)))
+ } s;
+};
+
+/**
+ * cvmx_usbn#_clk_ctl
+ *
+ * USBN_CLK_CTL = USBN's Clock Control
+ *
+ * This register is used to control the frequency of the hclk and the
+ * hreset and phy_rst signals.
+ */
+union cvmx_usbnx_clk_ctl {
+ u64 u64;
+ /**
+ * struct cvmx_usbnx_clk_ctl_s
+ * @divide2: The 'hclk' used by the USB subsystem is derived
+ * from the eclk.
+ * Also see the field DIVIDE. DIVIDE2<1> must currently
+ * be zero because it is not implemented, so the maximum
+ * ratio of eclk/hclk is currently 16.
+ * The actual divide number for hclk is:
+ * (DIVIDE2 + 1) * (DIVIDE + 1)
+ * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
+ * generate the hclk in the USB Subsystem is held
+ * in reset. This bit must be set to '0' before
+ * changing the value os DIVIDE in this register.
+ * The reset to the HCLK_DIVIDERis also asserted
+ * when core reset is asserted.
+ * @p_x_on: Force USB-PHY on during suspend.
+ * '1' USB-PHY XO block is powered-down during
+ * suspend.
+ * '0' USB-PHY XO block is powered-up during
+ * suspend.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_rtype: PHY reference clock type
+ * On CN50XX/CN52XX/CN56XX the values are:
+ * '0' The USB-PHY uses a 12MHz crystal as a clock source
+ * at the USB_XO and USB_XI pins.
+ * '1' Reserved.
+ * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the
+ * USB_XO pin. USB_XI should be tied to ground in this
+ * case.
+ * '3' Reserved.
+ * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are:
+ * '0' Reserved.
+ * '1' Reserved.
+ * '2' The PHY PLL uses the XO block output as a reference.
+ * The XO block uses an external clock supplied on the
+ * XO pin. USB_XI should be tied to ground for this
+ * usage.
+ * '3' The XO block uses the clock from a crystal.
+ * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ * remain powered in Suspend Mode.
+ * '1' The USB-PHY XO Bias, Bandgap and PLL are
+ * powered down in suspend mode.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_c_sel: Phy clock speed select.
+ * Selects the reference clock / crystal frequency.
+ * '11': Reserved
+ * '10': 48 MHz (reserved when a crystal is used)
+ * '01': 24 MHz (reserved when a crystal is used)
+ * '00': 12 MHz
+ * The value of this field must be set while POR is
+ * active.
+ * NOTE: if a crystal is used as a reference clock,
+ * this field must be set to 12 MHz.
+ * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
+ * @sd_mode: Scaledown mode for the USBC. Control timing events
+ * in the USBC, for normal operation this must be '0'.
+ * @s_bist: Starts bist on the hclk memories, during the '0'
+ * to '1' transition.
+ * @por: Power On Reset for the PHY.
+ * Resets all the PHYS registers and state machines.
+ * @enable: When '1' allows the generation of the hclk. When
+ * '0' the hclk will not be generated. SEE DIVIDE
+ * field of this register.
+ * @prst: When this field is '0' the reset associated with
+ * the phy_clk functionality in the USB Subsystem is
+ * help in reset. This bit should not be set to '1'
+ * until the time it takes 6 clocks (hclk or phy_clk,
+ * whichever is slower) has passed. Under normal
+ * operation once this bit is set to '1' it should not
+ * be set to '0'.
+ * @hrst: When this field is '0' the reset associated with
+ * the hclk functioanlity in the USB Subsystem is
+ * held in reset.This bit should not be set to '1'
+ * until 12ms after phy_clk is stable. Under normal
+ * operation, once this bit is set to '1' it should
+ * not be set to '0'.
+ * @divide: The frequency of 'hclk' used by the USB subsystem
+ * is the eclk frequency divided by the value of
+ * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
+ * DIVIDE2 of this register.
+ * The hclk frequency should be less than 125Mhz.
+ * After writing a value to this field the SW should
+ * read the field for the value written.
+ * The ENABLE field of this register should not be set
+ * until AFTER this field is set and then read.
+ */
+ struct cvmx_usbnx_clk_ctl_s {
+ __BITFIELD_FIELD(u64 reserved_20_63 : 44,
+ __BITFIELD_FIELD(u64 divide2 : 2,
+ __BITFIELD_FIELD(u64 hclk_rst : 1,
+ __BITFIELD_FIELD(u64 p_x_on : 1,
+ __BITFIELD_FIELD(u64 p_rtype : 2,
+ __BITFIELD_FIELD(u64 p_com_on : 1,
+ __BITFIELD_FIELD(u64 p_c_sel : 2,
+ __BITFIELD_FIELD(u64 cdiv_byp : 1,
+ __BITFIELD_FIELD(u64 sd_mode : 2,
+ __BITFIELD_FIELD(u64 s_bist : 1,
+ __BITFIELD_FIELD(u64 por : 1,
+ __BITFIELD_FIELD(u64 enable : 1,
+ __BITFIELD_FIELD(u64 prst : 1,
+ __BITFIELD_FIELD(u64 hrst : 1,
+ __BITFIELD_FIELD(u64 divide : 3,
+ ;)))))))))))))))
+ } s;
+};
+
+/**
+ * cvmx_usbn#_usbp_ctl_status
+ *
+ * USBN_USBP_CTL_STATUS = USBP Control And Status Register
+ *
+ * Contains general control and status information for the USBN block.
+ */
+union cvmx_usbnx_usbp_ctl_status {
+ u64 u64;
+ /**
+ * struct cvmx_usbnx_usbp_ctl_status_s
+ * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
+ * @txvreftune: HS DC Voltage Level Adjustment
+ * @txfslstune: FS/LS Source Impedance Adjustment
+ * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
+ * @sqrxtune: Squelch Threshold Adjustment
+ * @compdistune: Disconnect Threshold Adjustment
+ * @otgtune: VBUS Valid Threshold Adjustment
+ * @otgdisable: OTG Block Disable
+ * @portreset: Per_Port Reset
+ * @drvvbus: Drive VBUS
+ * @lsbist: Low-Speed BIST Enable.
+ * @fsbist: Full-Speed BIST Enable.
+ * @hsbist: High-Speed BIST Enable.
+ * @bist_done: PHY Bist Done.
+ * Asserted at the end of the PHY BIST sequence.
+ * @bist_err: PHY Bist Error.
+ * Indicates an internal error was detected during
+ * the BIST sequence.
+ * @tdata_out: PHY Test Data Out.
+ * Presents either internally generated signals or
+ * test register contents, based upon the value of
+ * test_data_out_sel.
+ * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
+ * Normally should be set to zero.
+ * When customers have no intent to use USB PHY
+ * interface, they should:
+ * - still provide 3.3V to USB_VDD33, and
+ * - tie USB_REXT to 3.3V supply, and
+ * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
+ * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
+ * @dma_bmode: When set to 1 the L2C DMA address will be updated
+ * with byte-counts between packets. When set to 0
+ * the L2C DMA address is incremented to the next
+ * 4-byte aligned address after adding byte-count.
+ * @usbc_end: Bigendian input to the USB Core. This should be
+ * set to '0' for operation.
+ * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
+ * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
+ * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D+ line. '1' pull down-resistance is connected
+ * to D+/ '0' pull down resistance is not connected
+ * to D+. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal operation.
+ * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D- line. '1' pull down-resistance is connected
+ * to D-. '0' pull down resistance is not connected
+ * to D-. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal operation.
+ * @hst_mode: When '0' the USB is acting as HOST, when '1'
+ * USB is acting as device. This field needs to be
+ * set while the USB is in reset.
+ * @tuning: Transmitter Tuning for High-Speed Operation.
+ * Tunes the current supply and rise/fall output
+ * times for high-speed operation.
+ * [20:19] == 11: Current supply increased
+ * approximately 9%
+ * [20:19] == 10: Current supply increased
+ * approximately 4.5%
+ * [20:19] == 01: Design default.
+ * [20:19] == 00: Current supply decreased
+ * approximately 4.5%
+ * [22:21] == 11: Rise and fall times are increased.
+ * [22:21] == 10: Design default.
+ * [22:21] == 01: Rise and fall times are decreased.
+ * [22:21] == 00: Rise and fall times are decreased
+ * further as compared to the 01 setting.
+ * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
+ * Enables or disables bit stuffing on data[15:8]
+ * when bit-stuffing is enabled.
+ * @tx_bs_en: Transmit Bit Stuffing on [7:0].
+ * Enables or disables bit stuffing on data[7:0]
+ * when bit-stuffing is enabled.
+ * @loop_enb: PHY Loopback Test Enable.
+ * '1': During data transmission the receive is
+ * enabled.
+ * '0': During data transmission the receive is
+ * disabled.
+ * Must be '0' for normal operation.
+ * @vtest_enb: Analog Test Pin Enable.
+ * '1' The PHY's analog_test pin is enabled for the
+ * input and output of applicable analog test signals.
+ * '0' THe analog_test pin is disabled.
+ * @bist_enb: Built-In Self Test Enable.
+ * Used to activate BIST in the PHY.
+ * @tdata_sel: Test Data Out Select.
+ * '1' test_data_out[3:0] (PHY) register contents
+ * are output. '0' internally generated signals are
+ * output.
+ * @taddr_in: Mode Address for Test Interface.
+ * Specifies the register address for writing to or
+ * reading from the PHY test interface register.
+ * @tdata_in: Internal Testing Register Input Data and Select
+ * This is a test bus. Data is present on [3:0],
+ * and its corresponding select (enable) is present
+ * on bits [7:4].
+ * @ate_reset: Reset input from automatic test equipment.
+ * This is a test signal. When the USB Core is
+ * powered up (not in Susned Mode), an automatic
+ * tester can use this to disable phy_clock and
+ * free_clk, then re-enable them with an aligned
+ * phase.
+ * '1': The phy_clk and free_clk outputs are
+ * disabled. "0": The phy_clock and free_clk outputs
+ * are available within a specific period after the
+ * de-assertion.
+ */
+ struct cvmx_usbnx_usbp_ctl_status_s {
+ __BITFIELD_FIELD(u64 txrisetune : 1,
+ __BITFIELD_FIELD(u64 txvreftune : 4,
+ __BITFIELD_FIELD(u64 txfslstune : 4,
+ __BITFIELD_FIELD(u64 txhsxvtune : 2,
+ __BITFIELD_FIELD(u64 sqrxtune : 3,
+ __BITFIELD_FIELD(u64 compdistune : 3,
+ __BITFIELD_FIELD(u64 otgtune : 3,
+ __BITFIELD_FIELD(u64 otgdisable : 1,
+ __BITFIELD_FIELD(u64 portreset : 1,
+ __BITFIELD_FIELD(u64 drvvbus : 1,
+ __BITFIELD_FIELD(u64 lsbist : 1,
+ __BITFIELD_FIELD(u64 fsbist : 1,
+ __BITFIELD_FIELD(u64 hsbist : 1,
+ __BITFIELD_FIELD(u64 bist_done : 1,
+ __BITFIELD_FIELD(u64 bist_err : 1,
+ __BITFIELD_FIELD(u64 tdata_out : 4,
+ __BITFIELD_FIELD(u64 siddq : 1,
+ __BITFIELD_FIELD(u64 txpreemphasistune : 1,
+ __BITFIELD_FIELD(u64 dma_bmode : 1,
+ __BITFIELD_FIELD(u64 usbc_end : 1,
+ __BITFIELD_FIELD(u64 usbp_bist : 1,
+ __BITFIELD_FIELD(u64 tclk : 1,
+ __BITFIELD_FIELD(u64 dp_pulld : 1,
+ __BITFIELD_FIELD(u64 dm_pulld : 1,
+ __BITFIELD_FIELD(u64 hst_mode : 1,
+ __BITFIELD_FIELD(u64 tuning : 4,
+ __BITFIELD_FIELD(u64 tx_bs_enh : 1,
+ __BITFIELD_FIELD(u64 tx_bs_en : 1,
+ __BITFIELD_FIELD(u64 loop_enb : 1,
+ __BITFIELD_FIELD(u64 vtest_enb : 1,
+ __BITFIELD_FIELD(u64 bist_enb : 1,
+ __BITFIELD_FIELD(u64 tdata_sel : 1,
+ __BITFIELD_FIELD(u64 taddr_in : 4,
+ __BITFIELD_FIELD(u64 tdata_in : 8,
+ __BITFIELD_FIELD(u64 ate_reset : 1,
+ ;)))))))))))))))))))))))))))))))))))
+ } s;
+};
+
+#endif /* __OCTEON_HCD_H__ */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
new file mode 100644
index 000000000000..5319909eb2f6
--- /dev/null
+++ b/drivers/staging/octeon/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config OCTEON_ETHERNET
+ tristate "Cavium Networks Octeon Ethernet support"
+ depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
+ depends on NETDEVICES
+ select PHYLIB
+ select MDIO_OCTEON
+ help
+ This driver supports the builtin ethernet ports on Cavium
+ Networks' products in the Octeon family. This driver supports the
+ CN3XXX and CN5XXX Octeon processors.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-ethernet.
+
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
new file mode 100644
index 000000000000..3887cf5f1e84
--- /dev/null
+++ b/drivers/staging/octeon/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2005-2009 Cavium Networks
+#
+
+#
+# Makefile for Cavium OCTEON on-board ethernet driver
+#
+
+obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
+
+octeon-ethernet-y := ethernet.o
+octeon-ethernet-y += ethernet-mdio.o
+octeon-ethernet-y += ethernet-mem.o
+octeon-ethernet-y += ethernet-rgmii.o
+octeon-ethernet-y += ethernet-rx.o
+octeon-ethernet-y += ethernet-sgmii.o
+octeon-ethernet-y += ethernet-spi.o
+octeon-ethernet-y += ethernet-tx.o
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
new file mode 100644
index 000000000000..67a0a1f6b922
--- /dev/null
+++ b/drivers/staging/octeon/TODO
@@ -0,0 +1,9 @@
+This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3
+chips at least up to CN7030.
+
+TODO:
+ - general code review and clean up
+ - make driver self-contained instead of being split between staging and
+ arch/mips/cavium-octeon.
+
+Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
new file mode 100644
index 000000000000..ef9e767b0e2e
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+/*
+ * A few defines are used to control the operation of this driver:
+ * USE_ASYNC_IOBDMA
+ * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
+ * IOBDMAs to issue IO accesses without stalling. Set this to zero
+ * to disable this. Note that IOBDMAs require CVMSEG.
+ * REUSE_SKBUFFS_WITHOUT_FREE
+ * Allows the TX path to free an skbuff into the FPA hardware pool. This
+ * can significantly improve performance for forwarding and bridging, but
+ * may be somewhat dangerous. Checks are made, but if any buffer is reused
+ * without the proper Linux cleanup, the networking stack may have very
+ * bizarre bugs.
+ */
+#ifndef __ETHERNET_DEFINES_H__
+#define __ETHERNET_DEFINES_H__
+
+#ifdef CONFIG_NETFILTER
+#define REUSE_SKBUFFS_WITHOUT_FREE 0
+#else
+#define REUSE_SKBUFFS_WITHOUT_FREE 1
+#endif
+
+#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
+
+/* Maximum number of SKBs to try to free per xmit packet. */
+#define MAX_OUT_QUEUE_DEPTH 1000
+
+#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(u32))
+#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(u32))
+
+#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS + 1)
+
+#endif /* __ETHERNET_DEFINES_H__ */
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
new file mode 100644
index 000000000000..c798672d61b2
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/ratelimit.h>
+#include <linux/of_mdio.h>
+#include <generated/utsrelease.h>
+#include <net/dst.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+#include "ethernet-mdio.h"
+#include "ethernet-util.h"
+
+static void cvm_oct_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, UTS_RELEASE, sizeof(info->version));
+ strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
+}
+
+static int cvm_oct_nway_reset(struct net_device *dev)
+{
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (dev->phydev)
+ return phy_start_aneg(dev->phydev);
+
+ return -EINVAL;
+}
+
+const struct ethtool_ops cvm_oct_ethtool_ops = {
+ .get_drvinfo = cvm_oct_get_drvinfo,
+ .nway_reset = cvm_oct_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+/**
+ * cvm_oct_ioctl - IOCTL support for PHY control
+ * @dev: Device to change
+ * @rq: the request
+ * @cmd: the command
+ *
+ * Returns Zero on success
+ */
+int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!dev->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
+}
+
+void cvm_oct_note_carrier(struct octeon_ethernet *priv,
+ union cvmx_helper_link_info li)
+{
+ if (li.s.link_up) {
+ pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
+ netdev_name(priv->netdev), li.s.speed,
+ (li.s.full_duplex) ? "Full" : "Half",
+ priv->port, priv->queue);
+ } else {
+ pr_notice_ratelimited("%s: Link down\n",
+ netdev_name(priv->netdev));
+ }
+}
+
+void cvm_oct_adjust_link(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_helper_link_info link_info;
+
+ link_info.u64 = 0;
+ link_info.s.link_up = dev->phydev->link ? 1 : 0;
+ link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0;
+ link_info.s.speed = dev->phydev->speed;
+ priv->link_info = link_info.u64;
+
+ /*
+ * The polling task need to know about link status changes.
+ */
+ if (priv->poll)
+ priv->poll(dev);
+
+ if (priv->last_link != dev->phydev->link) {
+ priv->last_link = dev->phydev->link;
+ cvmx_helper_link_set(priv->port, link_info);
+ cvm_oct_note_carrier(priv, link_info);
+ }
+}
+
+int cvm_oct_common_stop(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ union cvmx_helper_link_info link_info;
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int index = INDEX(priv->port);
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ priv->poll = NULL;
+
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
+
+ if (priv->last_link) {
+ link_info.u64 = 0;
+ priv->last_link = 0;
+
+ cvmx_helper_link_set(priv->port, link_info);
+ cvm_oct_note_carrier(priv, link_info);
+ }
+ return 0;
+}
+
+/**
+ * cvm_oct_phy_setup_device - setup the PHY
+ *
+ * @dev: Device to setup
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvm_oct_phy_setup_device(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ struct device_node *phy_node;
+ struct phy_device *phydev = NULL;
+
+ if (!priv->of_node)
+ goto no_phy;
+
+ phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
+ int rc;
+
+ rc = of_phy_register_fixed_link(priv->of_node);
+ if (rc)
+ return rc;
+
+ phy_node = of_node_get(priv->of_node);
+ }
+ if (!phy_node)
+ goto no_phy;
+
+ phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
+ priv->phy_mode);
+ of_node_put(phy_node);
+
+ if (!phydev)
+ return -ENODEV;
+
+ priv->last_link = 0;
+ phy_start(phydev);
+
+ return 0;
+no_phy:
+ /* If there is no phy, assume a direct MAC connection and that
+ * the link is up.
+ */
+ netif_carrier_on(dev);
+ return 0;
+}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
new file mode 100644
index 000000000000..e3771d48c49b
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/ethtool.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <net/dst.h>
+#ifdef CONFIG_XFRM
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#endif /* CONFIG_XFRM */
+
+extern const struct ethtool_ops cvm_oct_ethtool_ops;
+
+void octeon_mdiobus_force_mod_depencency(void);
+
+int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
new file mode 100644
index 000000000000..532594957ebc
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2010 Cavium Networks
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-mem.h"
+#include "ethernet-defines.h"
+
+/**
+ * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
+ * @pool: Pool to allocate an skbuff for
+ * @size: Size of the buffer needed for the pool
+ * @elements: Number of buffers to allocate
+ *
+ * Returns the actual number of buffers allocated.
+ */
+static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
+{
+ int freed = elements;
+
+ while (freed) {
+ struct sk_buff *skb = dev_alloc_skb(size + 256);
+
+ if (unlikely(!skb))
+ break;
+ skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
+ *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
+ cvmx_fpa_free(skb->data, pool, size / 128);
+ freed--;
+ }
+ return elements - freed;
+}
+
+/**
+ * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
+ * @pool: Pool to allocate an skbuff for
+ * @size: Size of the buffer needed for the pool
+ * @elements: Number of buffers to allocate
+ */
+static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
+{
+ char *memory;
+
+ do {
+ memory = cvmx_fpa_alloc(pool);
+ if (memory) {
+ struct sk_buff *skb =
+ *(struct sk_buff **)(memory - sizeof(void *));
+ elements--;
+ dev_kfree_skb(skb);
+ }
+ } while (memory);
+
+ if (elements < 0)
+ pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
+ pool, elements);
+ else if (elements > 0)
+ pr_warn("Freeing of pool %u is missing %d skbuffs\n",
+ pool, elements);
+}
+
+/**
+ * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
+ * @pool: Pool to populate
+ * @size: Size of each buffer in the pool
+ * @elements: Number of buffers to allocate
+ *
+ * Returns the actual number of buffers allocated.
+ */
+static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
+{
+ char *memory;
+ char *fpa;
+ int freed = elements;
+
+ while (freed) {
+ /*
+ * FPA memory must be 128 byte aligned. Since we are
+ * aligning we need to save the original pointer so we
+ * can feed it to kfree when the memory is returned to
+ * the kernel.
+ *
+ * We allocate an extra 256 bytes to allow for
+ * alignment and space for the original pointer saved
+ * just before the block.
+ */
+ memory = kmalloc(size + 256, GFP_ATOMIC);
+ if (unlikely(!memory)) {
+ pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
+ elements * size, pool);
+ break;
+ }
+ fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
+ *((char **)fpa - 1) = memory;
+ cvmx_fpa_free(fpa, pool, 0);
+ freed--;
+ }
+ return elements - freed;
+}
+
+/**
+ * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
+ * @pool: FPA pool to free
+ * @size: Size of each buffer in the pool
+ * @elements: Number of buffers that should be in the pool
+ */
+static void cvm_oct_free_hw_memory(int pool, int size, int elements)
+{
+ char *memory;
+ char *fpa;
+
+ do {
+ fpa = cvmx_fpa_alloc(pool);
+ if (fpa) {
+ elements--;
+ fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
+ memory = *((char **)fpa - 1);
+ kfree(memory);
+ }
+ } while (fpa);
+
+ if (elements < 0)
+ pr_warn("Freeing of pool %u had too many buffers (%d)\n",
+ pool, elements);
+ else if (elements > 0)
+ pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
+ pool, elements);
+}
+
+int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
+{
+ int freed;
+
+ if (pool == CVMX_FPA_PACKET_POOL)
+ freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
+ else
+ freed = cvm_oct_fill_hw_memory(pool, size, elements);
+ return freed;
+}
+
+void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
+{
+ if (pool == CVMX_FPA_PACKET_POOL)
+ cvm_oct_free_hw_skbuff(pool, size, elements);
+ else
+ cvm_oct_free_hw_memory(pool, size, elements);
+}
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
new file mode 100644
index 000000000000..692dcdb7154d
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mem.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
+void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
new file mode 100644
index 000000000000..0c4fac31540a
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/phy.h>
+#include <linux/ratelimit.h>
+#include <net/dst.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+#include "ethernet-util.h"
+#include "ethernet-mdio.h"
+
+static DEFINE_SPINLOCK(global_register_lock);
+
+static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
+{
+ union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
+ union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
+ union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ /* Set preamble checking. */
+ gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index,
+ interface));
+ gmxx_rxx_frm_ctl.s.pre_chk = enable;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
+ gmxx_rxx_frm_ctl.u64);
+
+ /* Set FCS stripping. */
+ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
+ if (enable)
+ ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
+ else
+ ipd_sub_port_fcs.s.port_bit &=
+ 0xffffffffull ^ (1ull << priv->port);
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+
+ /* Clear any error bits. */
+ gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index,
+ interface));
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
+ gmxx_rxx_int_reg.u64);
+}
+
+static void cvm_oct_check_preamble_errors(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_helper_link_info link_info;
+ unsigned long flags;
+
+ link_info.u64 = priv->link_info;
+
+ /*
+ * Take the global register lock since we are going to
+ * touch registers that affect more than one port.
+ */
+ spin_lock_irqsave(&global_register_lock, flags);
+
+ if (link_info.s.speed == 10 && priv->last_speed == 10) {
+ /*
+ * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
+ * getting preamble errors.
+ */
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
+
+ gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface));
+ if (gmxx_rxx_int_reg.s.pcterr) {
+ /*
+ * We are getting preamble errors at 10Mbps. Most
+ * likely the PHY is giving us packets with misaligned
+ * preambles. In order to get these packets we need to
+ * disable preamble checking and do it in software.
+ */
+ cvm_oct_set_hw_preamble(priv, false);
+ printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
+ dev->name);
+ }
+ } else {
+ /*
+ * Since the 10Mbps preamble workaround is allowed we need to
+ * enable preamble checking, FCS stripping, and clear error
+ * bits on every speed change. If errors occur during 10Mbps
+ * operation the above code will change this stuff
+ */
+ if (priv->last_speed != link_info.s.speed)
+ cvm_oct_set_hw_preamble(priv, true);
+ priv->last_speed = link_info.s.speed;
+ }
+ spin_unlock_irqrestore(&global_register_lock, flags);
+}
+
+static void cvm_oct_rgmii_poll(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_helper_link_info link_info;
+ bool status_change;
+
+ link_info = cvmx_helper_link_get(priv->port);
+ if (priv->link_info != link_info.u64 &&
+ cvmx_helper_link_set(priv->port, link_info))
+ link_info.u64 = priv->link_info;
+ status_change = priv->link_info != link_info.u64;
+ priv->link_info = link_info.u64;
+
+ cvm_oct_check_preamble_errors(dev);
+
+ if (likely(!status_change))
+ return;
+
+ /* Tell core. */
+ if (link_info.s.link_up) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else if (netif_carrier_ok(dev)) {
+ netif_carrier_off(dev);
+ }
+ cvm_oct_note_carrier(priv, link_info);
+}
+
+int cvm_oct_rgmii_open(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int ret;
+
+ ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
+ if (ret)
+ return ret;
+
+ if (dev->phydev) {
+ /*
+ * In phydev mode, we need still periodic polling for the
+ * preamble error checking, and we also need to call this
+ * function on every link state change.
+ *
+ * Only true RGMII ports need to be polled. In GMII mode, port
+ * 0 is really a RGMII port.
+ */
+ if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
+ priv->port == 0) ||
+ (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
+ priv->poll = cvm_oct_check_preamble_errors;
+ cvm_oct_check_preamble_errors(dev);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
new file mode 100644
index 000000000000..2c16230f993c
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2010 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+#include <linux/cpumask.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/prefetch.h>
+#include <linux/ratelimit.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <net/dst.h>
+#ifdef CONFIG_XFRM
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#endif /* CONFIG_XFRM */
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+#include "ethernet-mem.h"
+#include "ethernet-rx.h"
+#include "ethernet-util.h"
+
+static atomic_t oct_rx_ready = ATOMIC_INIT(0);
+
+static struct oct_rx_group {
+ int irq;
+ int group;
+ struct napi_struct napi;
+} oct_rx_group[16];
+
+/**
+ * cvm_oct_do_interrupt - interrupt handler.
+ * @irq: Interrupt number.
+ * @napi_id: Cookie to identify the NAPI instance.
+ *
+ * The interrupt occurs whenever the POW has packets in our group.
+ *
+ */
+static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
+{
+ /* Disable the IRQ and start napi_poll. */
+ disable_irq_nosync(irq);
+ napi_schedule(napi_id);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * cvm_oct_check_rcv_error - process receive errors
+ * @work: Work queue entry pointing to the packet.
+ *
+ * Returns Non-zero if the packet can be dropped, zero otherwise.
+ */
+static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
+{
+ int port;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ port = work->word0.pip.cn68xx.pknd;
+ else
+ port = work->word1.cn38xx.ipprt;
+
+ if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
+ /*
+ * Ignore length errors on min size packets. Some
+ * equipment incorrectly pads packets to 64+4FCS
+ * instead of 60+4FCS. Note these packets still get
+ * counted as frame errors.
+ */
+ } else if (work->word2.snoip.err_code == 5 ||
+ work->word2.snoip.err_code == 7) {
+ /*
+ * We received a packet with either an alignment error
+ * or a FCS error. This may be signalling that we are
+ * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
+ * off. If this is the case we need to parse the
+ * packet to determine if we can remove a non spec
+ * preamble and generate a correct packet.
+ */
+ int interface = cvmx_helper_get_interface_num(port);
+ int index = cvmx_helper_get_interface_index_num(port);
+ union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
+
+ gmxx_rxx_frm_ctl.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
+ if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
+ u8 *ptr =
+ cvmx_phys_to_ptr(work->packet_ptr.s.addr);
+ int i = 0;
+
+ while (i < work->word1.len - 1) {
+ if (*ptr != 0x55)
+ break;
+ ptr++;
+ i++;
+ }
+
+ if (*ptr == 0xd5) {
+ /* Port received 0xd5 preamble */
+ work->packet_ptr.s.addr += i + 1;
+ work->word1.len -= i + 5;
+ } else if ((*ptr & 0xf) == 0xd) {
+ /* Port received 0xd preamble */
+ work->packet_ptr.s.addr += i;
+ work->word1.len -= i + 4;
+ for (i = 0; i < work->word1.len; i++) {
+ *ptr =
+ ((*ptr & 0xf0) >> 4) |
+ ((*(ptr + 1) & 0xf) << 4);
+ ptr++;
+ }
+ } else {
+ printk_ratelimited("Port %d unknown preamble, packet dropped\n",
+ port);
+ cvm_oct_free_work(work);
+ return 1;
+ }
+ }
+ } else {
+ printk_ratelimited("Port %d receive error code %d, packet dropped\n",
+ port, work->word2.snoip.err_code);
+ cvm_oct_free_work(work);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
+{
+ int segments = work->word2.s.bufs;
+ union cvmx_buf_ptr segment_ptr = work->packet_ptr;
+ int len = work->word1.len;
+ int segment_size;
+
+ while (segments--) {
+ union cvmx_buf_ptr next_ptr;
+
+ next_ptr = *(union cvmx_buf_ptr *)
+ cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
+
+ /*
+ * Octeon Errata PKI-100: The segment size is wrong.
+ *
+ * Until it is fixed, calculate the segment size based on
+ * the packet pool buffer size.
+ * When it is fixed, the following line should be replaced
+ * with this one:
+ * int segment_size = segment_ptr.s.size;
+ */
+ segment_size =
+ CVMX_FPA_PACKET_POOL_SIZE -
+ (segment_ptr.s.addr -
+ (((segment_ptr.s.addr >> 7) -
+ segment_ptr.s.back) << 7));
+
+ /* Don't copy more than what is left in the packet */
+ if (segment_size > len)
+ segment_size = len;
+
+ /* Copy the data into the packet */
+ skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
+ segment_size);
+ len -= segment_size;
+ segment_ptr = next_ptr;
+ }
+}
+
+static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
+{
+ const int coreid = cvmx_get_core_num();
+ u64 old_group_mask;
+ u64 old_scratch;
+ int rx_count = 0;
+ int did_work_request = 0;
+ int packet_not_copied;
+
+ /* Prefetch cvm_oct_device since we know we need it soon */
+ prefetch(cvm_oct_device);
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Save scratch in case userspace is using it */
+ CVMX_SYNCIOBDMA;
+ old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ }
+
+ /* Only allow work for our group (and preserve priorities) */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
+ cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
+ BIT(rx_group->group));
+ cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+ } else {
+ old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
+ (old_group_mask & ~0xFFFFull) |
+ BIT(rx_group->group));
+ }
+
+ if (USE_ASYNC_IOBDMA) {
+ cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ did_work_request = 1;
+ }
+
+ while (rx_count < budget) {
+ struct sk_buff *skb = NULL;
+ struct sk_buff **pskb = NULL;
+ int skb_in_hw;
+ struct cvmx_wqe *work;
+ int port;
+
+ if (USE_ASYNC_IOBDMA && did_work_request)
+ work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
+ else
+ work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
+
+ prefetch(work);
+ did_work_request = 0;
+ if (!work) {
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
+ BIT(rx_group->group));
+ cvmx_write_csr(CVMX_SSO_WQ_INT,
+ BIT(rx_group->group));
+ } else {
+ union cvmx_pow_wq_int wq_int;
+
+ wq_int.u64 = 0;
+ wq_int.s.iq_dis = BIT(rx_group->group);
+ wq_int.s.wq_int = BIT(rx_group->group);
+ cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+ }
+ break;
+ }
+ pskb = (struct sk_buff **)
+ (cvm_oct_get_buffer_ptr(work->packet_ptr) -
+ sizeof(void *));
+ prefetch(pskb);
+
+ if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
+ cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
+ CVMX_POW_NO_WAIT);
+ did_work_request = 1;
+ }
+ rx_count++;
+
+ skb_in_hw = work->word2.s.bufs == 1;
+ if (likely(skb_in_hw)) {
+ skb = *pskb;
+ prefetch(&skb->head);
+ prefetch(&skb->len);
+ }
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ port = work->word0.pip.cn68xx.pknd;
+ else
+ port = work->word1.cn38xx.ipprt;
+
+ prefetch(cvm_oct_device[port]);
+
+ /* Immediately throw away all packets with receive errors */
+ if (unlikely(work->word2.snoip.rcv_error)) {
+ if (cvm_oct_check_rcv_error(work))
+ continue;
+ }
+
+ /*
+ * We can only use the zero copy path if skbuffs are
+ * in the FPA pool and the packet fits in a single
+ * buffer.
+ */
+ if (likely(skb_in_hw)) {
+ skb->data = skb->head + work->packet_ptr.s.addr -
+ cvmx_ptr_to_phys(skb->head);
+ prefetch(skb->data);
+ skb->len = work->word1.len;
+ skb_set_tail_pointer(skb, skb->len);
+ packet_not_copied = 1;
+ } else {
+ /*
+ * We have to copy the packet. First allocate
+ * an skbuff for it.
+ */
+ skb = dev_alloc_skb(work->word1.len);
+ if (!skb) {
+ cvm_oct_free_work(work);
+ continue;
+ }
+
+ /*
+ * Check if we've received a packet that was
+ * entirely stored in the work entry.
+ */
+ if (unlikely(work->word2.s.bufs == 0)) {
+ u8 *ptr = work->packet_data;
+
+ if (likely(!work->word2.s.not_IP)) {
+ /*
+ * The beginning of the packet
+ * moves for IP packets.
+ */
+ if (work->word2.s.is_v6)
+ ptr += 2;
+ else
+ ptr += 6;
+ }
+ skb_put_data(skb, ptr, work->word1.len);
+ /* No packet buffers to free */
+ } else {
+ copy_segments_to_skb(work, skb);
+ }
+ packet_not_copied = 0;
+ }
+ if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
+ cvm_oct_device[port])) {
+ struct net_device *dev = cvm_oct_device[port];
+
+ /*
+ * Only accept packets for devices that are
+ * currently up.
+ */
+ if (likely(dev->flags & IFF_UP)) {
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
+
+ if (unlikely(work->word2.s.not_IP ||
+ work->word2.s.IP_exc ||
+ work->word2.s.L4_error ||
+ !work->word2.s.tcp_or_udp))
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Increment RX stats for virtual ports */
+ if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ }
+ netif_receive_skb(skb);
+ } else {
+ /*
+ * Drop any packet received for a device that
+ * isn't up.
+ */
+ dev->stats.rx_dropped++;
+ dev_kfree_skb_irq(skb);
+ }
+ } else {
+ /*
+ * Drop any packet received for a device that
+ * doesn't exist.
+ */
+ printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
+ port);
+ dev_kfree_skb_irq(skb);
+ }
+ /*
+ * Check to see if the skbuff and work share the same
+ * packet buffer.
+ */
+ if (likely(packet_not_copied)) {
+ /*
+ * This buffer needs to be replaced, increment
+ * the number of buffers we need to free by
+ * one.
+ */
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ 1);
+
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
+ } else {
+ cvm_oct_free_work(work);
+ }
+ }
+ /* Restore the original POW group mask */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
+ cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+ } else {
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+ }
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Restore the scratch area */
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+ }
+ cvm_oct_rx_refill_pool(0);
+
+ return rx_count;
+}
+
+/**
+ * cvm_oct_napi_poll - the NAPI poll function.
+ * @napi: The NAPI instance.
+ * @budget: Maximum number of packets to receive.
+ *
+ * Returns the number of packets processed.
+ */
+static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
+ napi);
+ int rx_count;
+
+ rx_count = cvm_oct_poll(rx_group, budget);
+
+ if (rx_count < budget) {
+ /* No more work */
+ napi_complete_done(napi, rx_count);
+ enable_irq(rx_group->irq);
+ }
+ return rx_count;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * cvm_oct_poll_controller - poll for receive packets
+ * device.
+ *
+ * @dev: Device to poll. Unused
+ */
+void cvm_oct_poll_controller(struct net_device *dev)
+{
+ int i;
+
+ if (!atomic_read(&oct_rx_ready))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
+
+ cvm_oct_poll(&oct_rx_group[i], 16);
+ }
+}
+#endif
+
+void cvm_oct_rx_initialize(void)
+{
+ int i;
+ struct net_device *dev_for_napi = NULL;
+
+ for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
+ if (cvm_oct_device[i]) {
+ dev_for_napi = cvm_oct_device[i];
+ break;
+ }
+ }
+
+ if (!dev_for_napi)
+ panic("No net_devices were allocated.");
+
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ int ret;
+
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
+
+ netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
+ cvm_oct_napi_poll, rx_napi_weight);
+ napi_enable(&oct_rx_group[i].napi);
+
+ oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
+ oct_rx_group[i].group = i;
+
+ /* Register an IRQ handler to receive POW interrupts */
+ ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
+ "Ethernet", &oct_rx_group[i].napi);
+ if (ret)
+ panic("Could not acquire Ethernet IRQ %d\n",
+ oct_rx_group[i].irq);
+
+ disable_irq_nosync(oct_rx_group[i].irq);
+
+ /* Enable POW interrupt when our port has at least one packet */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_sso_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+ } else {
+ union cvmx_pow_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+ }
+
+ /* Schedule NAPI now. This will indirectly enable the
+ * interrupt.
+ */
+ napi_schedule(&oct_rx_group[i].napi);
+ }
+ atomic_inc(&oct_rx_ready);
+}
+
+void cvm_oct_rx_shutdown(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
+
+ /* Disable POW interrupt */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
+ else
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
+
+ /* Free the interrupt handler */
+ free_irq(oct_rx_group[i].irq, cvm_oct_device);
+
+ netif_napi_del(&oct_rx_group[i].napi);
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
new file mode 100644
index 000000000000..ff6482fa20d6
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+void cvm_oct_poll_controller(struct net_device *dev);
+void cvm_oct_rx_initialize(void);
+void cvm_oct_rx_shutdown(void);
+
+static inline void cvm_oct_rx_refill_pool(int fill_threshold)
+{
+ int number_to_free;
+ int num_freed;
+ /* Refill the packet buffer pool */
+ number_to_free =
+ cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+
+ if (number_to_free > fill_threshold) {
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ -number_to_free);
+ num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
+ CVMX_FPA_PACKET_POOL_SIZE,
+ number_to_free);
+ if (num_freed != number_to_free) {
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ number_to_free - num_freed);
+ }
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
new file mode 100644
index 000000000000..d7fbd9159302
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+#include <linux/phy.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/ratelimit.h>
+#include <net/dst.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+#include "ethernet-util.h"
+#include "ethernet-mdio.h"
+
+int cvm_oct_sgmii_open(struct net_device *dev)
+{
+ return cvm_oct_common_open(dev, cvm_oct_link_poll);
+}
+
+int cvm_oct_sgmii_init(struct net_device *dev)
+{
+ cvm_oct_common_init(dev);
+
+ /* FIXME: Need autoneg logic */
+ return 0;
+}
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
new file mode 100644
index 000000000000..c582403e6a1f
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <net/dst.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+#include "ethernet-util.h"
+
+static int number_spi_ports;
+static int need_retrain[2] = { 0, 0 };
+
+static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index)
+{
+ if (spx_int_reg.s.spf)
+ pr_err("SPI%d: SRX Spi4 interface down\n", index);
+ if (spx_int_reg.s.calerr)
+ pr_err("SPI%d: SRX Spi4 Calendar table parity error\n", index);
+ if (spx_int_reg.s.syncerr)
+ pr_err("SPI%d: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n",
+ index);
+ if (spx_int_reg.s.diperr)
+ pr_err("SPI%d: SRX Spi4 DIP4 error\n", index);
+ if (spx_int_reg.s.tpaovr)
+ pr_err("SPI%d: SRX Selected port has hit TPA overflow\n",
+ index);
+ if (spx_int_reg.s.rsverr)
+ pr_err("SPI%d: SRX Spi4 reserved control word detected\n",
+ index);
+ if (spx_int_reg.s.drwnng)
+ pr_err("SPI%d: SRX Spi4 receive FIFO drowning/overflow\n",
+ index);
+ if (spx_int_reg.s.clserr)
+ pr_err("SPI%d: SRX Spi4 packet closed on non-16B alignment without EOP\n",
+ index);
+ if (spx_int_reg.s.spiovr)
+ pr_err("SPI%d: SRX Spi4 async FIFO overflow\n", index);
+ if (spx_int_reg.s.abnorm)
+ pr_err("SPI%d: SRX Abnormal packet termination (ERR bit)\n",
+ index);
+ if (spx_int_reg.s.prtnxa)
+ pr_err("SPI%d: SRX Port out of range\n", index);
+}
+
+static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index)
+{
+ if (stx_int_reg.s.syncerr)
+ pr_err("SPI%d: STX Interface encountered a fatal error\n",
+ index);
+ if (stx_int_reg.s.frmerr)
+ pr_err("SPI%d: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n",
+ index);
+ if (stx_int_reg.s.unxfrm)
+ pr_err("SPI%d: STX Unexpected framing sequence\n", index);
+ if (stx_int_reg.s.nosync)
+ pr_err("SPI%d: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n",
+ index);
+ if (stx_int_reg.s.diperr)
+ pr_err("SPI%d: STX DIP2 error on the Spi4 Status channel\n",
+ index);
+ if (stx_int_reg.s.datovr)
+ pr_err("SPI%d: STX Spi4 FIFO overflow error\n", index);
+ if (stx_int_reg.s.ovrbst)
+ pr_err("SPI%d: STX Transmit packet burst too big\n", index);
+ if (stx_int_reg.s.calpar1)
+ pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
+ index, 1);
+ if (stx_int_reg.s.calpar0)
+ pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
+ index, 0);
+}
+
+static irqreturn_t cvm_oct_spi_spx_int(int index)
+{
+ union cvmx_spxx_int_reg spx_int_reg;
+ union cvmx_stxx_int_reg stx_int_reg;
+
+ spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(index));
+ cvmx_write_csr(CVMX_SPXX_INT_REG(index), spx_int_reg.u64);
+ if (!need_retrain[index]) {
+ spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(index));
+ cvm_oct_spxx_int_pr(spx_int_reg, index);
+ }
+
+ stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(index));
+ cvmx_write_csr(CVMX_STXX_INT_REG(index), stx_int_reg.u64);
+ if (!need_retrain[index]) {
+ stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(index));
+ cvm_oct_stxx_int_pr(stx_int_reg, index);
+ }
+
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(index), 0);
+ cvmx_write_csr(CVMX_STXX_INT_MSK(index), 0);
+ need_retrain[index] = 1;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
+{
+ irqreturn_t return_status = IRQ_NONE;
+ union cvmx_npi_rsl_int_blocks rsl_int_blocks;
+
+ /* Check and see if this interrupt was caused by the GMX block */
+ rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
+ if (rsl_int_blocks.s.spx1) /* 19 - SPX1_INT_REG & STX1_INT_REG */
+ return_status = cvm_oct_spi_spx_int(1);
+
+ if (rsl_int_blocks.s.spx0) /* 18 - SPX0_INT_REG & STX0_INT_REG */
+ return_status = cvm_oct_spi_spx_int(0);
+
+ return return_status;
+}
+
+static void cvm_oct_spi_enable_error_reporting(int interface)
+{
+ union cvmx_spxx_int_msk spxx_int_msk;
+ union cvmx_stxx_int_msk stxx_int_msk;
+
+ spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
+ spxx_int_msk.s.calerr = 1;
+ spxx_int_msk.s.syncerr = 1;
+ spxx_int_msk.s.diperr = 1;
+ spxx_int_msk.s.tpaovr = 1;
+ spxx_int_msk.s.rsverr = 1;
+ spxx_int_msk.s.drwnng = 1;
+ spxx_int_msk.s.clserr = 1;
+ spxx_int_msk.s.spiovr = 1;
+ spxx_int_msk.s.abnorm = 1;
+ spxx_int_msk.s.prtnxa = 1;
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
+
+ stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
+ stxx_int_msk.s.frmerr = 1;
+ stxx_int_msk.s.unxfrm = 1;
+ stxx_int_msk.s.nosync = 1;
+ stxx_int_msk.s.diperr = 1;
+ stxx_int_msk.s.datovr = 1;
+ stxx_int_msk.s.ovrbst = 1;
+ stxx_int_msk.s.calpar1 = 1;
+ stxx_int_msk.s.calpar0 = 1;
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
+}
+
+static void cvm_oct_spi_poll(struct net_device *dev)
+{
+ static int spi4000_port;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface;
+
+ for (interface = 0; interface < 2; interface++) {
+ if ((priv->port == interface * 16) && need_retrain[interface]) {
+ if (cvmx_spi_restart_interface
+ (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
+ need_retrain[interface] = 0;
+ cvm_oct_spi_enable_error_reporting(interface);
+ }
+ }
+
+ /*
+ * The SPI4000 TWSI interface is very slow. In order
+ * not to bring the system to a crawl, we only poll a
+ * single port every second. This means negotiation
+ * speed changes take up to 10 seconds, but at least
+ * we don't waste absurd amounts of time waiting for
+ * TWSI.
+ */
+ if (priv->port == spi4000_port) {
+ /*
+ * This function does nothing if it is called on an
+ * interface without a SPI4000.
+ */
+ cvmx_spi4000_check_speed(interface, priv->port);
+ /*
+ * Normal ordering increments. By decrementing
+ * we only match once per iteration.
+ */
+ spi4000_port--;
+ if (spi4000_port < 0)
+ spi4000_port = 10;
+ }
+ }
+}
+
+int cvm_oct_spi_init(struct net_device *dev)
+{
+ int r;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (number_spi_ports == 0) {
+ r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
+ IRQF_SHARED, "SPI", &number_spi_ports);
+ if (r)
+ return r;
+ }
+ number_spi_ports++;
+
+ if ((priv->port == 0) || (priv->port == 16)) {
+ cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
+ priv->poll = cvm_oct_spi_poll;
+ }
+ cvm_oct_common_init(dev);
+ return 0;
+}
+
+void cvm_oct_spi_uninit(struct net_device *dev)
+{
+ int interface;
+
+ cvm_oct_common_uninit(dev);
+ number_spi_ports--;
+ if (number_spi_ports == 0) {
+ for (interface = 0; interface < 2; interface++) {
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
+ }
+ free_irq(OCTEON_IRQ_RML, &number_spi_ports);
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
new file mode 100644
index 000000000000..ab7dd8216006
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2010 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/ratelimit.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <net/dst.h>
+#ifdef CONFIG_XFRM
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#endif /* CONFIG_XFRM */
+
+#include <linux/atomic.h>
+#include <net/sch_generic.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+#include "ethernet-tx.h"
+#include "ethernet-util.h"
+
+#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
+
+/*
+ * You can define GET_SKBUFF_QOS() to override how the skbuff output
+ * function determines which output queue is used. The default
+ * implementation always uses the base queue for the port. If, for
+ * example, you wanted to use the skb->priority field, define
+ * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
+ */
+#ifndef GET_SKBUFF_QOS
+#define GET_SKBUFF_QOS(skb) 0
+#endif
+
+static void cvm_oct_tx_do_cleanup(unsigned long arg);
+static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
+
+/* Maximum number of SKBs to try to free per xmit packet. */
+#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
+
+static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
+{
+ int undo;
+
+ undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
+ MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(fau, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
+ -skb_to_free;
+ return skb_to_free;
+}
+
+static void cvm_oct_kick_tx_poll_watchdog(void)
+{
+ union cvmx_ciu_timx ciu_timx;
+
+ ciu_timx.u64 = 0;
+ ciu_timx.s.one_shot = 1;
+ ciu_timx.s.len = cvm_oct_tx_poll_interval;
+ cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
+}
+
+static void cvm_oct_free_tx_skbs(struct net_device *dev)
+{
+ int skb_to_free;
+ int qos, queues_per_port;
+ int total_freed = 0;
+ int total_remaining = 0;
+ unsigned long flags;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ queues_per_port = cvmx_pko_get_num_queues(priv->port);
+ /* Drain any pending packets in the free list */
+ for (qos = 0; qos < queues_per_port; qos++) {
+ if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
+ continue;
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau + qos * 4);
+ total_freed += skb_to_free;
+ if (skb_to_free > 0) {
+ struct sk_buff *to_free_list = NULL;
+
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ while (skb_to_free > 0) {
+ struct sk_buff *t;
+
+ t = __skb_dequeue(&priv->tx_free_list[qos]);
+ t->next = to_free_list;
+ to_free_list = t;
+ skb_to_free--;
+ }
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
+ /* Do the actual freeing outside of the lock. */
+ while (to_free_list) {
+ struct sk_buff *t = to_free_list;
+
+ to_free_list = to_free_list->next;
+ dev_kfree_skb_any(t);
+ }
+ }
+ total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
+ }
+ if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ if (total_remaining)
+ cvm_oct_kick_tx_poll_watchdog();
+}
+
+/**
+ * cvm_oct_xmit - transmit a packet
+ * @skb: Packet to send
+ * @dev: Device info structure
+ *
+ * Returns Always returns NETDEV_TX_OK
+ */
+int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ union cvmx_pko_command_word0 pko_command;
+ union cvmx_buf_ptr hw_buffer;
+ u64 old_scratch;
+ u64 old_scratch2;
+ int qos;
+ int i;
+ enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ struct sk_buff *to_free_list;
+ int skb_to_free;
+ int buffers_to_free;
+ u32 total_to_clean;
+ unsigned long flags;
+#if REUSE_SKBUFFS_WITHOUT_FREE
+ unsigned char *fpa_head;
+#endif
+
+ /*
+ * Prefetch the private data structure. It is larger than the
+ * one cache line.
+ */
+ prefetch(priv);
+
+ /*
+ * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
+ * completely remove "qos" in the event neither interface
+ * supports multiple queues per port.
+ */
+ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
+ (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
+ qos = GET_SKBUFF_QOS(skb);
+ if (qos <= 0)
+ qos = 0;
+ else if (qos >= cvmx_pko_get_num_queues(priv->port))
+ qos = 0;
+ } else {
+ qos = 0;
+ }
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Save scratch in case userspace is using it */
+ CVMX_SYNCIOBDMA;
+ old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
+
+ /*
+ * Fetch and increment the number of packets to be
+ * freed.
+ */
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
+ FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ 0);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+ priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
+ }
+
+ /*
+ * We have space for 6 segment pointers, If there will be more
+ * than that, we must linearize.
+ */
+ if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
+ if (unlikely(__skb_linearize(skb))) {
+ queue_type = QUEUE_DROP;
+ if (USE_ASYNC_IOBDMA) {
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
+ CVMX_SYNCIOBDMA;
+ skb_to_free =
+ cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ } else {
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
+ skb_to_free =
+ cvmx_fau_fetch_and_add32(priv->fau +
+ qos * 4,
+ MAX_SKB_TO_FREE);
+ }
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau +
+ qos * 4);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ goto skip_xmit;
+ }
+ }
+
+ /*
+ * The CN3XXX series of parts has an errata (GMX-401) which
+ * causes the GMX block to hang if a collision occurs towards
+ * the end of a <68 byte packet. As a workaround for this, we
+ * pad packets to be 68 bytes whenever we are in half duplex
+ * mode. We don't handle the case of having a small packet but
+ * no room to add the padding. The kernel should always give
+ * us at least a cache line
+ */
+ if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ if (interface < 2) {
+ /* We only need to pad packet in half duplex mode */
+ gmx_prt_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ if (gmx_prt_cfg.s.duplex == 0) {
+ int add_bytes = 64 - skb->len;
+
+ if ((skb_tail_pointer(skb) + add_bytes) <=
+ skb_end_pointer(skb))
+ __skb_put_zero(skb, add_bytes);
+ }
+ }
+ }
+
+ /* Build the PKO command */
+ pko_command.u64 = 0;
+#ifdef __LITTLE_ENDIAN
+ pko_command.s.le = 1;
+#endif
+ pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
+ pko_command.s.segs = 1;
+ pko_command.s.total_bytes = skb->len;
+ pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
+ pko_command.s.subone0 = 1;
+
+ pko_command.s.dontfree = 1;
+
+ /* Build the PKO buffer pointer */
+ hw_buffer.u64 = 0;
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
+ hw_buffer.s.pool = 0;
+ hw_buffer.s.size = skb->len;
+ } else {
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
+ hw_buffer.s.pool = 0;
+ hw_buffer.s.size = skb_headlen(skb);
+ CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *fs = skb_shinfo(skb)->frags + i;
+
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
+ hw_buffer.s.size = skb_frag_size(fs);
+ CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
+ }
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
+ hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
+ pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
+ pko_command.s.gather = 1;
+ goto dont_put_skbuff_in_hw;
+ }
+
+ /*
+ * See if we can put this skb in the FPA pool. Any strange
+ * behavior from the Linux networking stack will most likely
+ * be caused by a bug in the following code. If some field is
+ * in use by the network stack and gets carried over when a
+ * buffer is reused, bad things may happen. If in doubt and
+ * you dont need the absolute best performance, disable the
+ * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
+ * shown a 25% increase in performance under some loads.
+ */
+#if REUSE_SKBUFFS_WITHOUT_FREE
+ fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
+ if (unlikely(skb->data < fpa_head)) {
+ /* TX buffer beginning can't meet FPA alignment constraints */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely
+ ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
+ /* TX buffer isn't large enough for the FPA */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_shared(skb))) {
+ /* TX buffer sharing data with someone else */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_cloned(skb))) {
+ /* TX buffer has been cloned */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_header_cloned(skb))) {
+ /* TX buffer header has been cloned */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb->destructor)) {
+ /* TX buffer has a destructor */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_shinfo(skb)->nr_frags)) {
+ /* TX buffer has fragments */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely
+ (skb->truesize !=
+ sizeof(*skb) + skb_end_offset(skb))) {
+ /* TX buffer truesize has been changed */
+ goto dont_put_skbuff_in_hw;
+ }
+
+ /*
+ * We can use this buffer in the FPA. We don't need the FAU
+ * update anymore
+ */
+ pko_command.s.dontfree = 0;
+
+ hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
+ ((unsigned long)fpa_head >> 7);
+
+ *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
+
+ /*
+ * The skbuff will be reused without ever being freed. We must
+ * cleanup a bunch of core things.
+ */
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, NULL);
+ skb_ext_reset(skb);
+ nf_reset_ct(skb);
+ skb_reset_redirect(skb);
+
+#ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+#endif /* CONFIG_NET_SCHED */
+#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
+
+dont_put_skbuff_in_hw:
+
+ /* Check if we can use the hardware checksumming */
+ if ((skb->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->version == 4) &&
+ (ip_hdr(skb)->ihl == 5) &&
+ ((ip_hdr(skb)->frag_off == 0) ||
+ (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
+ ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
+ /* Use hardware checksum calc */
+ pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
+ }
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Get the number of skbuffs in use by the hardware */
+ CVMX_SYNCIOBDMA;
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
+ } else {
+ /* Get the number of skbuffs in use by the hardware */
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
+ buffers_to_free =
+ cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+ }
+
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau + qos * 4);
+
+ /*
+ * If we're sending faster than the receive can free them then
+ * don't do the HW free.
+ */
+ if ((buffers_to_free < -100) && !pko_command.s.dontfree)
+ pko_command.s.dontfree = 1;
+
+ if (pko_command.s.dontfree) {
+ queue_type = QUEUE_CORE;
+ pko_command.s.reg0 = priv->fau + qos * 4;
+ } else {
+ queue_type = QUEUE_HW;
+ }
+ if (USE_ASYNC_IOBDMA)
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+ FAU_TOTAL_TX_TO_CLEAN, 1);
+
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+
+ /* Drop this packet if we have too many already queued to the HW */
+ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
+ MAX_OUT_QUEUE_DEPTH)) {
+ if (dev->tx_queue_len != 0) {
+ /* Drop the lock when notifying the core. */
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock,
+ flags);
+ } else {
+ /* If not using normal queueing. */
+ queue_type = QUEUE_DROP;
+ goto skip_xmit;
+ }
+ }
+
+ cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
+ CVMX_PKO_LOCK_NONE);
+
+ /* Send the packet to the output queue */
+ if (unlikely(cvmx_pko_send_packet_finish(priv->port,
+ priv->queue + qos,
+ pko_command, hw_buffer,
+ CVMX_PKO_LOCK_NONE))) {
+ printk_ratelimited("%s: Failed to send the packet\n",
+ dev->name);
+ queue_type = QUEUE_DROP;
+ }
+skip_xmit:
+ to_free_list = NULL;
+
+ switch (queue_type) {
+ case QUEUE_DROP:
+ skb->next = to_free_list;
+ to_free_list = skb;
+ dev->stats.tx_dropped++;
+ break;
+ case QUEUE_HW:
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
+ break;
+ case QUEUE_CORE:
+ __skb_queue_tail(&priv->tx_free_list[qos], skb);
+ break;
+ default:
+ BUG();
+ }
+
+ while (skb_to_free > 0) {
+ struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+
+ t->next = to_free_list;
+ to_free_list = t;
+ skb_to_free--;
+ }
+
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+
+ /* Do the actual freeing outside of the lock. */
+ while (to_free_list) {
+ struct sk_buff *t = to_free_list;
+
+ to_free_list = to_free_list->next;
+ dev_kfree_skb_any(t);
+ }
+
+ if (USE_ASYNC_IOBDMA) {
+ CVMX_SYNCIOBDMA;
+ total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ /* Restore the scratch area */
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
+ } else {
+ total_to_clean =
+ cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
+ }
+
+ if (total_to_clean & 0x3ff) {
+ /*
+ * Schedule the cleanup tasklet every 1024 packets for
+ * the pathological case of high traffic on one port
+ * delaying clean up of packets on a different port
+ * that is blocked waiting for the cleanup.
+ */
+ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
+ }
+
+ cvm_oct_kick_tx_poll_watchdog();
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * cvm_oct_xmit_pow - transmit a packet to the POW
+ * @skb: Packet to send
+ * @dev: Device info structure
+
+ * Returns Always returns zero
+ */
+int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ void *packet_buffer;
+ void *copy_location;
+
+ /* Get a work queue entry */
+ struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+
+ if (unlikely(!work)) {
+ printk_ratelimited("%s: Failed to allocate a work queue entry\n",
+ dev->name);
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ /* Get a packet buffer */
+ packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
+ if (unlikely(!packet_buffer)) {
+ printk_ratelimited("%s: Failed to allocate a packet buffer\n",
+ dev->name);
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ /*
+ * Calculate where we need to copy the data to. We need to
+ * leave 8 bytes for a next pointer (unused). We also need to
+ * include any configure skip. Then we need to align the IP
+ * packet src and dest into the same 64bit word. The below
+ * calculation may add a little extra, but that doesn't
+ * hurt.
+ */
+ copy_location = packet_buffer + sizeof(u64);
+ copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
+
+ /*
+ * We have to copy the packet since whoever processes this
+ * packet will free it to a hardware pool. We can't use the
+ * trick of counting outstanding packets like in
+ * cvm_oct_xmit.
+ */
+ memcpy(copy_location, skb->data, skb->len);
+
+ /*
+ * Fill in some of the work queue fields. We may need to add
+ * more if the software at the other end needs them.
+ */
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ work->word0.pip.cn38xx.hw_chksum = skb->csum;
+ work->word1.len = skb->len;
+ cvmx_wqe_set_port(work, priv->port);
+ cvmx_wqe_set_qos(work, priv->port & 0x7);
+ cvmx_wqe_set_grp(work, pow_send_group);
+ work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ work->word1.tag = pow_send_group; /* FIXME */
+ /* Default to zero. Sets of zero later are commented out */
+ work->word2.u64 = 0;
+ work->word2.s.bufs = 1;
+ work->packet_ptr.u64 = 0;
+ work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
+ work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
+ work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
+ work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ work->word2.s.ip_offset = 14;
+#if 0
+ work->word2.s.vlan_valid = 0; /* FIXME */
+ work->word2.s.vlan_cfi = 0; /* FIXME */
+ work->word2.s.vlan_id = 0; /* FIXME */
+ work->word2.s.dec_ipcomp = 0; /* FIXME */
+#endif
+ work->word2.s.tcp_or_udp =
+ (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP);
+#if 0
+ /* FIXME */
+ work->word2.s.dec_ipsec = 0;
+ /* We only support IPv4 right now */
+ work->word2.s.is_v6 = 0;
+ /* Hardware would set to zero */
+ work->word2.s.software = 0;
+ /* No error, packet is internal */
+ work->word2.s.L4_error = 0;
+#endif
+ work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
+ (ip_hdr(skb)->frag_off ==
+ cpu_to_be16(1 << 14)));
+#if 0
+ /* Assume Linux is sending a good packet */
+ work->word2.s.IP_exc = 0;
+#endif
+ work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
+ work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
+#if 0
+ /* This is an IP packet */
+ work->word2.s.not_IP = 0;
+ /* No error, packet is internal */
+ work->word2.s.rcv_error = 0;
+ /* No error, packet is internal */
+ work->word2.s.err_code = 0;
+#endif
+
+ /*
+ * When copying the data, include 4 bytes of the
+ * ethernet header to align the same way hardware
+ * does.
+ */
+ memcpy(work->packet_data, skb->data + 10,
+ sizeof(work->packet_data));
+ } else {
+#if 0
+ work->word2.snoip.vlan_valid = 0; /* FIXME */
+ work->word2.snoip.vlan_cfi = 0; /* FIXME */
+ work->word2.snoip.vlan_id = 0; /* FIXME */
+ work->word2.snoip.software = 0; /* Hardware would set to zero */
+#endif
+ work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
+ work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
+ work->word2.snoip.is_bcast =
+ (skb->pkt_type == PACKET_BROADCAST);
+ work->word2.snoip.is_mcast =
+ (skb->pkt_type == PACKET_MULTICAST);
+ work->word2.snoip.not_IP = 1; /* IP was done up above */
+#if 0
+ /* No error, packet is internal */
+ work->word2.snoip.rcv_error = 0;
+ /* No error, packet is internal */
+ work->word2.snoip.err_code = 0;
+#endif
+ memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
+ }
+
+ /* Submit the packet to the POW */
+ cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
+ cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev_consume_skb_any(skb);
+ return 0;
+}
+
+/**
+ * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
+ * @dev: Device being shutdown
+ *
+ */
+void cvm_oct_tx_shutdown_dev(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ unsigned long flags;
+ int qos;
+
+ for (qos = 0; qos < 16; qos++) {
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ while (skb_queue_len(&priv->tx_free_list[qos]))
+ dev_kfree_skb_any(__skb_dequeue
+ (&priv->tx_free_list[qos]));
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ }
+}
+
+static void cvm_oct_tx_do_cleanup(unsigned long arg)
+{
+ int port;
+
+ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
+ if (cvm_oct_device[port]) {
+ struct net_device *dev = cvm_oct_device[port];
+
+ cvm_oct_free_tx_skbs(dev);
+ }
+ }
+}
+
+static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
+{
+ /* Disable the interrupt. */
+ cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
+ /* Do the work in the tasklet. */
+ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
+ return IRQ_HANDLED;
+}
+
+void cvm_oct_tx_initialize(void)
+{
+ int i;
+
+ /* Disable the interrupt. */
+ cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
+ /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
+ i = request_irq(OCTEON_IRQ_TIMER1,
+ cvm_oct_tx_cleanup_watchdog, 0,
+ "Ethernet", cvm_oct_device);
+
+ if (i)
+ panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
+}
+
+void cvm_oct_tx_shutdown(void)
+{
+ /* Free the interrupt handler */
+ free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
+}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
new file mode 100644
index 000000000000..78936e9b33b0
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
+int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
+int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
+ int do_free, int qos);
+void cvm_oct_tx_initialize(void);
+void cvm_oct_tx_shutdown(void);
+void cvm_oct_tx_shutdown_dev(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
new file mode 100644
index 000000000000..2af83a12ca78
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+/**
+ * cvm_oct_get_buffer_ptr - convert packet data address to pointer
+ * @packet_ptr: Packet data hardware address
+ *
+ * Returns Packet buffer pointer
+ */
+static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
+{
+ return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
+ << 7);
+}
+
+/**
+ * INTERFACE - convert IPD port to logical interface
+ * @ipd_port: Port to check
+ *
+ * Returns Logical interface
+ */
+static inline int INTERFACE(int ipd_port)
+{
+ int interface;
+
+ if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
+ return 10;
+ interface = cvmx_helper_get_interface_num(ipd_port);
+ if (interface >= 0)
+ return interface;
+ panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__);
+}
+
+/**
+ * INDEX - convert IPD/PKO port number to the port's interface index
+ * @ipd_port: Port to check
+ *
+ * Returns Index into interface port list
+ */
+static inline int INDEX(int ipd_port)
+{
+ return cvmx_helper_get_interface_index_num(ipd_port);
+}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
new file mode 100644
index 000000000000..f42c3816ce49
--- /dev/null
+++ b/drivers/staging/octeon/ethernet.c
@@ -0,0 +1,992 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/of_net.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+
+#include <net/dst.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+#include "ethernet-mem.h"
+#include "ethernet-rx.h"
+#include "ethernet-tx.h"
+#include "ethernet-mdio.h"
+#include "ethernet-util.h"
+
+#define OCTEON_MAX_MTU 65392
+
+static int num_packet_buffers = 1024;
+module_param(num_packet_buffers, int, 0444);
+MODULE_PARM_DESC(num_packet_buffers, "\n"
+ "\tNumber of packet buffers to allocate and store in the\n"
+ "\tFPA. By default, 1024 packet buffers are used.\n");
+
+static int pow_receive_group = 15;
+module_param(pow_receive_group, int, 0444);
+MODULE_PARM_DESC(pow_receive_group, "\n"
+ "\tPOW group to receive packets from. All ethernet hardware\n"
+ "\twill be configured to send incoming packets to this POW\n"
+ "\tgroup. Also any other software can submit packets to this\n"
+ "\tgroup for the kernel to process.");
+
+static int receive_group_order;
+module_param(receive_group_order, int, 0444);
+MODULE_PARM_DESC(receive_group_order, "\n"
+ "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
+ "\twill be configured to send incoming packets to multiple POW\n"
+ "\tgroups. pow_receive_group parameter is ignored when multiple\n"
+ "\tgroups are taken into use and groups are allocated starting\n"
+ "\tfrom 0. By default, a single group is used.\n");
+
+int pow_send_group = -1;
+module_param(pow_send_group, int, 0644);
+MODULE_PARM_DESC(pow_send_group, "\n"
+ "\tPOW group to send packets to other software on. This\n"
+ "\tcontrols the creation of the virtual device pow0.\n"
+ "\talways_use_pow also depends on this value.");
+
+int always_use_pow;
+module_param(always_use_pow, int, 0444);
+MODULE_PARM_DESC(always_use_pow, "\n"
+ "\tWhen set, always send to the pow group. This will cause\n"
+ "\tpackets sent to real ethernet devices to be sent to the\n"
+ "\tPOW group instead of the hardware. Unless some other\n"
+ "\tapplication changes the config, packets will still be\n"
+ "\treceived from the low level hardware. Use this option\n"
+ "\tto allow a CVMX app to intercept all packets from the\n"
+ "\tlinux kernel. You must specify pow_send_group along with\n"
+ "\tthis option.");
+
+char pow_send_list[128] = "";
+module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
+MODULE_PARM_DESC(pow_send_list, "\n"
+ "\tComma separated list of ethernet devices that should use the\n"
+ "\tPOW for transmit instead of the actual ethernet hardware. This\n"
+ "\tis a per port version of always_use_pow. always_use_pow takes\n"
+ "\tprecedence over this list. For example, setting this to\n"
+ "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
+ "\tusing the pow_send_group.");
+
+int rx_napi_weight = 32;
+module_param(rx_napi_weight, int, 0444);
+MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
+
+/* Mask indicating which receive groups are in use. */
+int pow_receive_groups;
+
+/*
+ * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
+ *
+ * Set to one right before cvm_oct_poll_queue is destroyed.
+ */
+atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
+
+/*
+ * Array of every ethernet device owned by this driver indexed by
+ * the ipd input port number.
+ */
+struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
+
+u64 cvm_oct_tx_poll_interval;
+
+static void cvm_oct_rx_refill_worker(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
+
+static void cvm_oct_rx_refill_worker(struct work_struct *work)
+{
+ /*
+ * FPA 0 may have been drained, try to refill it if we need
+ * more than num_packet_buffers / 2, otherwise normal receive
+ * processing will refill it. If it were drained, no packets
+ * could be received so cvm_oct_napi_poll would never be
+ * invoked to do the refill.
+ */
+ cvm_oct_rx_refill_pool(num_packet_buffers / 2);
+
+ if (!atomic_read(&cvm_oct_poll_queue_stopping))
+ schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
+}
+
+static void cvm_oct_periodic_worker(struct work_struct *work)
+{
+ struct octeon_ethernet *priv = container_of(work,
+ struct octeon_ethernet,
+ port_periodic_work.work);
+
+ if (priv->poll)
+ priv->poll(cvm_oct_device[priv->port]);
+
+ cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats
+ (cvm_oct_device[priv->port]);
+
+ if (!atomic_read(&cvm_oct_poll_queue_stopping))
+ schedule_delayed_work(&priv->port_periodic_work, HZ);
+}
+
+static void cvm_oct_configure_common_hw(void)
+{
+ /* Setup the FPA */
+ cvmx_fpa_enable();
+ cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
+ num_packet_buffers);
+ cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
+ num_packet_buffers);
+ if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
+ cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
+
+#ifdef __LITTLE_ENDIAN
+ {
+ union cvmx_ipd_ctl_status ipd_ctl_status;
+
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.pkt_lend = 1;
+ ipd_ctl_status.s.wqe_lend = 1;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ }
+#endif
+
+ cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
+}
+
+/**
+ * cvm_oct_free_work- Free a work queue entry
+ *
+ * @work_queue_entry: Work queue entry to free
+ *
+ * Returns Zero on success, Negative on failure.
+ */
+int cvm_oct_free_work(void *work_queue_entry)
+{
+ struct cvmx_wqe *work = work_queue_entry;
+
+ int segments = work->word2.s.bufs;
+ union cvmx_buf_ptr segment_ptr = work->packet_ptr;
+
+ while (segments--) {
+ union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
+ cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
+ if (unlikely(!segment_ptr.s.i))
+ cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
+ segment_ptr.s.pool,
+ CVMX_FPA_PACKET_POOL_SIZE / 128);
+ segment_ptr = next_ptr;
+ }
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL(cvm_oct_free_work);
+
+/**
+ * cvm_oct_common_get_stats - get the low level ethernet statistics
+ * @dev: Device to get the statistics from
+ *
+ * Returns Pointer to the statistics
+ */
+static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
+{
+ cvmx_pip_port_status_t rx_status;
+ cvmx_pko_port_status_t tx_status;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
+ if (octeon_is_simulation()) {
+ /* The simulator doesn't support statistics */
+ memset(&rx_status, 0, sizeof(rx_status));
+ memset(&tx_status, 0, sizeof(tx_status));
+ } else {
+ cvmx_pip_get_port_status(priv->port, 1, &rx_status);
+ cvmx_pko_get_port_status(priv->port, 1, &tx_status);
+ }
+
+ dev->stats.rx_packets += rx_status.inb_packets;
+ dev->stats.tx_packets += tx_status.packets;
+ dev->stats.rx_bytes += rx_status.inb_octets;
+ dev->stats.tx_bytes += tx_status.octets;
+ dev->stats.multicast += rx_status.multicast_packets;
+ dev->stats.rx_crc_errors += rx_status.inb_errors;
+ dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
+ dev->stats.rx_dropped += rx_status.dropped_packets;
+ }
+
+ return &dev->stats;
+}
+
+/**
+ * cvm_oct_common_change_mtu - change the link MTU
+ * @dev: Device to change
+ * @new_mtu: The new MTU
+ *
+ * Returns Zero on success
+ */
+static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ int vlan_bytes = VLAN_HLEN;
+#else
+ int vlan_bytes = 0;
+#endif
+ int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
+
+ dev->mtu = new_mtu;
+
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int index = INDEX(priv->port);
+ /* Add ethernet header and FCS, and VLAN if configured. */
+ int max_packet = new_mtu + mtu_overhead;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Signal errors on packets larger than the MTU */
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
+ max_packet);
+ } else {
+ /*
+ * Set the hardware to truncate packets larger
+ * than the MTU and smaller the 64 bytes.
+ */
+ union cvmx_pip_frm_len_chkx frm_len_chk;
+
+ frm_len_chk.u64 = 0;
+ frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
+ frm_len_chk.s.maxlen = max_packet;
+ cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
+ frm_len_chk.u64);
+ }
+ /*
+ * Set the hardware to truncate packets larger than
+ * the MTU. The jabber register must be set to a
+ * multiple of 8 bytes, so round up.
+ */
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
+ (max_packet + 7) & ~7u);
+ }
+ return 0;
+}
+
+/**
+ * cvm_oct_common_set_multicast_list - set the multicast list
+ * @dev: Device to work on
+ */
+static void cvm_oct_common_set_multicast_list(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ union cvmx_gmxx_rxx_adr_ctl control;
+ int index = INDEX(priv->port);
+
+ control.u64 = 0;
+ control.s.bcst = 1; /* Allow broadcast MAC addresses */
+
+ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
+ (dev->flags & IFF_PROMISC))
+ /* Force accept multicast packets */
+ control.s.mcst = 2;
+ else
+ /* Force reject multicast packets */
+ control.s.mcst = 1;
+
+ if (dev->flags & IFF_PROMISC)
+ /*
+ * Reject matches if promisc. Since CAM is
+ * shut off, should accept everything.
+ */
+ control.s.cam_mode = 0;
+ else
+ /* Filter packets based on the CAM */
+ control.s.cam_mode = 1;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
+ control.u64);
+ if (dev->flags & IFF_PROMISC)
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 0);
+ else
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 1);
+
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+}
+
+static int cvm_oct_set_mac_filter(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int interface = INTERFACE(priv->port);
+
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int i;
+ u8 *ptr = dev->dev_addr;
+ u64 mac = 0;
+ int index = INDEX(priv->port);
+
+ for (i = 0; i < 6; i++)
+ mac = (mac << 8) | (u64)ptr[i];
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
+ ptr[0]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
+ ptr[1]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
+ ptr[2]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
+ ptr[3]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
+ ptr[4]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
+ ptr[5]);
+ cvm_oct_common_set_multicast_list(dev);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+ return 0;
+}
+
+/**
+ * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
+ * @dev: The device in question.
+ * @addr: Socket address.
+ *
+ * Returns Zero on success
+ */
+static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+{
+ int r = eth_mac_addr(dev, addr);
+
+ if (r)
+ return r;
+ return cvm_oct_set_mac_filter(dev);
+}
+
+/**
+ * cvm_oct_common_init - per network device initialization
+ * @dev: Device to initialize
+ *
+ * Returns Zero on success
+ */
+int cvm_oct_common_init(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ const u8 *mac = NULL;
+
+ if (priv->of_node)
+ mac = of_get_mac_address(priv->of_node);
+
+ if (!IS_ERR_OR_NULL(mac))
+ ether_addr_copy(dev->dev_addr, mac);
+ else
+ eth_hw_addr_random(dev);
+
+ /*
+ * Force the interface to use the POW send if always_use_pow
+ * was specified or it is in the pow send list.
+ */
+ if ((pow_send_group != -1) &&
+ (always_use_pow || strstr(pow_send_list, dev->name)))
+ priv->queue = -1;
+
+ if (priv->queue != -1)
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+
+ /* We do our own locking, Linux doesn't need to */
+ dev->features |= NETIF_F_LLTX;
+ dev->ethtool_ops = &cvm_oct_ethtool_ops;
+
+ cvm_oct_set_mac_filter(dev);
+ dev_set_mtu(dev, dev->mtu);
+
+ /*
+ * Zero out stats for port so we won't mistakenly show
+ * counters from the bootloader.
+ */
+ memset(dev->netdev_ops->ndo_get_stats(dev), 0,
+ sizeof(struct net_device_stats));
+
+ if (dev->netdev_ops->ndo_stop)
+ dev->netdev_ops->ndo_stop(dev);
+
+ return 0;
+}
+
+void cvm_oct_common_uninit(struct net_device *dev)
+{
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
+}
+
+int cvm_oct_common_open(struct net_device *dev,
+ void (*link_poll)(struct net_device *))
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ union cvmx_helper_link_info link_info;
+ int rv;
+
+ rv = cvm_oct_phy_setup_device(dev);
+ if (rv)
+ return rv;
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 1;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ gmx_cfg.s.pknd = priv->port;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ if (octeon_is_simulation())
+ return 0;
+
+ if (dev->phydev) {
+ int r = phy_read_status(dev->phydev);
+
+ if (r == 0 && dev->phydev->link == 0)
+ netif_carrier_off(dev);
+ cvm_oct_adjust_link(dev);
+ } else {
+ link_info = cvmx_helper_link_get(priv->port);
+ if (!link_info.s.link_up)
+ netif_carrier_off(dev);
+ priv->poll = link_poll;
+ link_poll(dev);
+ }
+
+ return 0;
+}
+
+void cvm_oct_link_poll(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_helper_link_info link_info;
+
+ link_info = cvmx_helper_link_get(priv->port);
+ if (link_info.u64 == priv->link_info)
+ return;
+
+ if (cvmx_helper_link_set(priv->port, link_info))
+ link_info.u64 = priv->link_info;
+ else
+ priv->link_info = link_info.u64;
+
+ if (link_info.s.link_up) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else if (netif_carrier_ok(dev)) {
+ netif_carrier_off(dev);
+ }
+ cvm_oct_note_carrier(priv, link_info);
+}
+
+static int cvm_oct_xaui_open(struct net_device *dev)
+{
+ return cvm_oct_common_open(dev, cvm_oct_link_poll);
+}
+
+static const struct net_device_ops cvm_oct_npi_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
+ .ndo_open = cvm_oct_xaui_open,
+ .ndo_stop = cvm_oct_common_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
+ .ndo_init = cvm_oct_sgmii_init,
+ .ndo_uninit = cvm_oct_common_uninit,
+ .ndo_open = cvm_oct_sgmii_open,
+ .ndo_stop = cvm_oct_common_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+static const struct net_device_ops cvm_oct_spi_netdev_ops = {
+ .ndo_init = cvm_oct_spi_init,
+ .ndo_uninit = cvm_oct_spi_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
+ .ndo_open = cvm_oct_rgmii_open,
+ .ndo_stop = cvm_oct_common_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+static const struct net_device_ops cvm_oct_pow_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_start_xmit = cvm_oct_xmit_pow,
+ .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+static struct device_node *cvm_oct_of_get_child
+ (const struct device_node *parent, int reg_val)
+{
+ struct device_node *node = NULL;
+ int size;
+ const __be32 *addr;
+
+ for (;;) {
+ node = of_get_next_child(parent, node);
+ if (!node)
+ break;
+ addr = of_get_property(node, "reg", &size);
+ if (addr && (be32_to_cpu(*addr) == reg_val))
+ break;
+ }
+ return node;
+}
+
+static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
+ int interface, int port)
+{
+ struct device_node *ni, *np;
+
+ ni = cvm_oct_of_get_child(pip, interface);
+ if (!ni)
+ return NULL;
+
+ np = cvm_oct_of_get_child(ni, port);
+ of_node_put(ni);
+
+ return np;
+}
+
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+ int port)
+{
+ struct device_node *np = priv->of_node;
+ u32 delay_value;
+ bool rx_delay;
+ bool tx_delay;
+
+ /* By default, both RX/TX delay is enabled in
+ * __cvmx_helper_rgmii_enable().
+ */
+ rx_delay = true;
+ tx_delay = true;
+
+ if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
+ rx_delay = delay_value > 0;
+ }
+ if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+ tx_delay = delay_value > 0;
+ }
+
+ if (!rx_delay && !tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+ else if (!rx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+ else if (!tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+ else
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
+}
+
+static int cvm_oct_probe(struct platform_device *pdev)
+{
+ int num_interfaces;
+ int interface;
+ int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
+ int qos;
+ struct device_node *pip;
+ int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ mtu_overhead += VLAN_HLEN;
+#endif
+
+ octeon_mdiobus_force_mod_depencency();
+
+ pip = pdev->dev.of_node;
+ if (!pip) {
+ pr_err("Error: No 'pip' in /aliases\n");
+ return -EINVAL;
+ }
+
+ cvm_oct_configure_common_hw();
+
+ cvmx_helper_initialize_packet_io_global();
+
+ if (receive_group_order) {
+ if (receive_group_order > 4)
+ receive_group_order = 4;
+ pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
+ } else {
+ pow_receive_groups = BIT(pow_receive_group);
+ }
+
+ /* Change the input group for all ports before input is enabled */
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+
+ for (port = cvmx_helper_get_ipd_port(interface, 0);
+ port < cvmx_helper_get_ipd_port(interface, num_ports);
+ port++) {
+ union cvmx_pip_prt_tagx pip_prt_tagx;
+
+ pip_prt_tagx.u64 =
+ cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
+
+ if (receive_group_order) {
+ int tag_mask;
+
+ /* We support only 16 groups at the moment, so
+ * always disable the two additional "hidden"
+ * tag_mask bits on CN68XX.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ pip_prt_tagx.u64 |= 0x3ull << 44;
+
+ tag_mask = ~((1 << receive_group_order) - 1);
+ pip_prt_tagx.s.grptagbase = 0;
+ pip_prt_tagx.s.grptagmask = tag_mask;
+ pip_prt_tagx.s.grptag = 1;
+ pip_prt_tagx.s.tag_mode = 0;
+ pip_prt_tagx.s.inc_prt_flag = 1;
+ pip_prt_tagx.s.ip6_dprt_flag = 1;
+ pip_prt_tagx.s.ip4_dprt_flag = 1;
+ pip_prt_tagx.s.ip6_sprt_flag = 1;
+ pip_prt_tagx.s.ip4_sprt_flag = 1;
+ pip_prt_tagx.s.ip6_dst_flag = 1;
+ pip_prt_tagx.s.ip4_dst_flag = 1;
+ pip_prt_tagx.s.ip6_src_flag = 1;
+ pip_prt_tagx.s.ip4_src_flag = 1;
+ pip_prt_tagx.s.grp = 0;
+ } else {
+ pip_prt_tagx.s.grptag = 0;
+ pip_prt_tagx.s.grp = pow_receive_group;
+ }
+
+ cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
+ pip_prt_tagx.u64);
+ }
+ }
+
+ cvmx_helper_ipd_and_packet_input_enable();
+
+ memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
+
+ /*
+ * Initialize the FAU used for counting packet buffers that
+ * need to be freed.
+ */
+ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+
+ /* Initialize the FAU used for counting tx SKBs that need to be freed */
+ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
+
+ if ((pow_send_group != -1)) {
+ struct net_device *dev;
+
+ dev = alloc_etherdev(sizeof(struct octeon_ethernet));
+ if (dev) {
+ /* Initialize the device private structure. */
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->netdev_ops = &cvm_oct_pow_netdev_ops;
+ priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ priv->port = CVMX_PIP_NUM_INPUT_PORTS;
+ priv->queue = -1;
+ strscpy(dev->name, "pow%d", sizeof(dev->name));
+ for (qos = 0; qos < 16; qos++)
+ skb_queue_head_init(&priv->tx_free_list[qos]);
+ dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
+ dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
+
+ if (register_netdev(dev) < 0) {
+ pr_err("Failed to register ethernet device for POW\n");
+ free_netdev(dev);
+ } else {
+ cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
+ pr_info("%s: POW send group %d, receive group %d\n",
+ dev->name, pow_send_group,
+ pow_receive_group);
+ }
+ } else {
+ pr_err("Failed to allocate ethernet device for POW\n");
+ }
+ }
+
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ cvmx_helper_interface_mode_t imode =
+ cvmx_helper_interface_get_mode(interface);
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+ int port_index;
+
+ for (port_index = 0,
+ port = cvmx_helper_get_ipd_port(interface, 0);
+ port < cvmx_helper_get_ipd_port(interface, num_ports);
+ port_index++, port++) {
+ struct octeon_ethernet *priv;
+ struct net_device *dev =
+ alloc_etherdev(sizeof(struct octeon_ethernet));
+ if (!dev) {
+ pr_err("Failed to allocate ethernet device for port %d\n",
+ port);
+ continue;
+ }
+
+ /* Initialize the device private structure. */
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ priv = netdev_priv(dev);
+ priv->netdev = dev;
+ priv->of_node = cvm_oct_node_for_port(pip, interface,
+ port_index);
+
+ INIT_DELAYED_WORK(&priv->port_periodic_work,
+ cvm_oct_periodic_worker);
+ priv->imode = imode;
+ priv->port = port;
+ priv->queue = cvmx_pko_get_base_queue(priv->port);
+ priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+ priv->phy_mode = PHY_INTERFACE_MODE_NA;
+ for (qos = 0; qos < 16; qos++)
+ skb_queue_head_init(&priv->tx_free_list[qos]);
+ for (qos = 0; qos < cvmx_pko_get_num_queues(port);
+ qos++)
+ cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
+ dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
+ dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
+
+ switch (priv->imode) {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
+ strscpy(dev->name, "npi%d", sizeof(dev->name));
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
+ strscpy(dev->name, "xaui%d", sizeof(dev->name));
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
+ strscpy(dev->name, "loop%d", sizeof(dev->name));
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
+ dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ dev->netdev_ops = &cvm_oct_spi_netdev_ops;
+ strscpy(dev->name, "spi%d", sizeof(dev->name));
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_GMII;
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
+ cvm_set_rgmii_delay(priv, interface,
+ port_index);
+ break;
+ }
+
+ if (!dev->netdev_ops) {
+ free_netdev(dev);
+ } else if (register_netdev(dev) < 0) {
+ pr_err("Failed to register ethernet device for interface %d, port %d\n",
+ interface, priv->port);
+ free_netdev(dev);
+ } else {
+ cvm_oct_device[priv->port] = dev;
+ fau -=
+ cvmx_pko_get_num_queues(priv->port) *
+ sizeof(u32);
+ schedule_delayed_work(&priv->port_periodic_work,
+ HZ);
+ }
+ }
+ }
+
+ cvm_oct_tx_initialize();
+ cvm_oct_rx_initialize();
+
+ /*
+ * 150 uS: about 10 1500-byte packets at 1GE.
+ */
+ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
+
+ schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
+
+ return 0;
+}
+
+static int cvm_oct_remove(struct platform_device *pdev)
+{
+ int port;
+
+ cvmx_ipd_disable();
+
+ atomic_inc_return(&cvm_oct_poll_queue_stopping);
+ cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
+
+ cvm_oct_rx_shutdown();
+ cvm_oct_tx_shutdown();
+
+ cvmx_pko_disable();
+
+ /* Free the ethernet devices */
+ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
+ if (cvm_oct_device[port]) {
+ struct net_device *dev = cvm_oct_device[port];
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&priv->port_periodic_work);
+
+ cvm_oct_tx_shutdown_dev(dev);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ cvm_oct_device[port] = NULL;
+ }
+ }
+
+ cvmx_pko_shutdown();
+
+ cvmx_ipd_free_ptr();
+
+ /* Free the HW pools */
+ cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
+ num_packet_buffers);
+ cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
+ num_packet_buffers);
+ if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
+ cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+ return 0;
+}
+
+static const struct of_device_id cvm_oct_match[] = {
+ {
+ .compatible = "cavium,octeon-3860-pip",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cvm_oct_match);
+
+static struct platform_driver cvm_oct_driver = {
+ .probe = cvm_oct_probe,
+ .remove = cvm_oct_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = cvm_oct_match,
+ },
+};
+
+module_platform_driver(cvm_oct_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
+MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
new file mode 100644
index 000000000000..a6140705706f
--- /dev/null
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is based on code from OCTEON SDK by Cavium Networks.
+ *
+ * Copyright (c) 2003-2010 Cavium Networks
+ */
+
+/*
+ * External interface for the Cavium Octeon ethernet driver.
+ */
+#ifndef OCTEON_ETHERNET_H
+#define OCTEON_ETHERNET_H
+
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-pow.h>
+#include <asm/octeon/cvmx-scratch.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+#include <asm/octeon/cvmx-wqe.h>
+
+#else
+
+#include "octeon-stubs.h"
+
+#endif
+
+/**
+ * This is the definition of the Ethernet driver's private
+ * driver state stored in netdev_priv(dev).
+ */
+struct octeon_ethernet {
+ /* PKO hardware output port */
+ int port;
+ /* PKO hardware queue for the port */
+ int queue;
+ /* Hardware fetch and add to count outstanding tx buffers */
+ int fau;
+ /* My netdev. */
+ struct net_device *netdev;
+ /*
+ * Type of port. This is one of the enums in
+ * cvmx_helper_interface_mode_t
+ */
+ int imode;
+ /* PHY mode */
+ phy_interface_t phy_mode;
+ /* List of outstanding tx buffers per queue */
+ struct sk_buff_head tx_free_list[16];
+ unsigned int last_speed;
+ unsigned int last_link;
+ /* Last negotiated link state */
+ u64 link_info;
+ /* Called periodically to check link status */
+ void (*poll)(struct net_device *dev);
+ struct delayed_work port_periodic_work;
+ struct device_node *of_node;
+};
+
+int cvm_oct_free_work(void *work_queue_entry);
+
+int cvm_oct_rgmii_open(struct net_device *dev);
+
+int cvm_oct_sgmii_init(struct net_device *dev);
+int cvm_oct_sgmii_open(struct net_device *dev);
+
+int cvm_oct_spi_init(struct net_device *dev);
+void cvm_oct_spi_uninit(struct net_device *dev);
+
+int cvm_oct_common_init(struct net_device *dev);
+void cvm_oct_common_uninit(struct net_device *dev);
+void cvm_oct_adjust_link(struct net_device *dev);
+int cvm_oct_common_stop(struct net_device *dev);
+int cvm_oct_common_open(struct net_device *dev,
+ void (*link_poll)(struct net_device *));
+void cvm_oct_note_carrier(struct octeon_ethernet *priv,
+ union cvmx_helper_link_info li);
+void cvm_oct_link_poll(struct net_device *dev);
+
+extern int always_use_pow;
+extern int pow_send_group;
+extern int pow_receive_groups;
+extern char pow_send_list[];
+extern struct net_device *cvm_oct_device[];
+extern atomic_t cvm_oct_poll_queue_stopping;
+extern u64 cvm_oct_tx_poll_interval;
+
+extern int rx_napi_weight;
+
+#endif
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
new file mode 100644
index 000000000000..d06743504f2b
--- /dev/null
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -0,0 +1,1434 @@
+#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
+
+#ifndef XKPHYS_TO_PHYS
+# define XKPHYS_TO_PHYS(p) (p)
+#endif
+
+#define OCTEON_IRQ_WORKQ0 0
+#define OCTEON_IRQ_RML 0
+#define OCTEON_IRQ_TIMER1 0
+#define OCTEON_IS_MODEL(x) 0
+#define octeon_has_feature(x) 0
+#define octeon_get_clock_rate() 0
+
+#define CVMX_SYNCIOBDMA do { } while (0)
+
+#define CVMX_HELPER_INPUT_TAG_TYPE 0
+#define CVMX_HELPER_FIRST_MBUFF_SKIP 7
+#define CVMX_FAU_REG_END (2048)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE 16
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE 16
+#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a) + (b))
+#define CVMX_GMXX_PRTX_CFG(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_JABBER(a, b) ((a) + (b))
+#define CVMX_IPD_CTL_STATUS 0
+#define CVMX_PIP_FRM_LEN_CHKX(a) (a)
+#define CVMX_PIP_NUM_INPUT_PORTS 1
+#define CVMX_SCR_SCRATCH 0
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2
+#define CVMX_IPD_SUB_PORT_FCS 0
+#define CVMX_SSO_WQ_IQ_DIS 0
+#define CVMX_SSO_WQ_INT 0
+#define CVMX_POW_WQ_INT 0
+#define CVMX_SSO_WQ_INT_PC 0
+#define CVMX_NPI_RSL_INT_BLOCKS 0
+#define CVMX_POW_WQ_INT_PC 0
+
+union cvmx_pip_wqe_word2 {
+ uint64_t u64;
+ struct {
+ uint64_t bufs:8;
+ uint64_t ip_offset:8;
+ uint64_t vlan_valid:1;
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_cfi:1;
+ uint64_t vlan_id:12;
+ uint64_t pr:4;
+ uint64_t unassigned2:8;
+ uint64_t dec_ipcomp:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipsec:1;
+ uint64_t is_v6:1;
+ uint64_t software:1;
+ uint64_t L4_error:1;
+ uint64_t is_frag:1;
+ uint64_t IP_exc:1;
+ uint64_t is_bcast:1;
+ uint64_t is_mcast:1;
+ uint64_t not_IP:1;
+ uint64_t rcv_error:1;
+ uint64_t err_code:8;
+ } s;
+ struct {
+ uint64_t bufs:8;
+ uint64_t ip_offset:8;
+ uint64_t vlan_valid:1;
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_cfi:1;
+ uint64_t vlan_id:12;
+ uint64_t port:12;
+ uint64_t dec_ipcomp:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipsec:1;
+ uint64_t is_v6:1;
+ uint64_t software:1;
+ uint64_t L4_error:1;
+ uint64_t is_frag:1;
+ uint64_t IP_exc:1;
+ uint64_t is_bcast:1;
+ uint64_t is_mcast:1;
+ uint64_t not_IP:1;
+ uint64_t rcv_error:1;
+ uint64_t err_code:8;
+ } s_cn68xx;
+
+ struct {
+ uint64_t unused1:16;
+ uint64_t vlan:16;
+ uint64_t unused2:32;
+ } svlan;
+ struct {
+ uint64_t bufs:8;
+ uint64_t unused:8;
+ uint64_t vlan_valid:1;
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_cfi:1;
+ uint64_t vlan_id:12;
+ uint64_t pr:4;
+ uint64_t unassigned2:12;
+ uint64_t software:1;
+ uint64_t unassigned3:1;
+ uint64_t is_rarp:1;
+ uint64_t is_arp:1;
+ uint64_t is_bcast:1;
+ uint64_t is_mcast:1;
+ uint64_t not_IP:1;
+ uint64_t rcv_error:1;
+ uint64_t err_code:8;
+ } snoip;
+
+};
+
+union cvmx_pip_wqe_word0 {
+ struct {
+ uint64_t next_ptr:40;
+ uint8_t unused;
+ __wsum hw_chksum;
+ } cn38xx;
+ struct {
+ uint64_t pknd:6; /* 0..5 */
+ uint64_t unused2:2; /* 6..7 */
+ uint64_t bpid:6; /* 8..13 */
+ uint64_t unused1:18; /* 14..31 */
+ uint64_t l2ptr:8; /* 32..39 */
+ uint64_t l3ptr:8; /* 40..47 */
+ uint64_t unused0:8; /* 48..55 */
+ uint64_t l4ptr:8; /* 56..63 */
+ } cn68xx;
+};
+
+union cvmx_wqe_word0 {
+ uint64_t u64;
+ union cvmx_pip_wqe_word0 pip;
+};
+
+union cvmx_wqe_word1 {
+ uint64_t u64;
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t varies:14;
+ uint64_t len:16;
+ };
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:3;
+ uint64_t grp:6;
+ uint64_t zero_1:1;
+ uint64_t qos:3;
+ uint64_t zero_0:1;
+ uint64_t len:16;
+ } cn68xx;
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:1;
+ uint64_t grp:4;
+ uint64_t qos:3;
+ uint64_t ipprt:6;
+ uint64_t len:16;
+ } cn38xx;
+};
+
+union cvmx_buf_ptr {
+ void *ptr;
+ uint64_t u64;
+ struct {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t addr:40;
+ } s;
+};
+
+struct cvmx_wqe {
+ union cvmx_wqe_word0 word0;
+ union cvmx_wqe_word1 word1;
+ union cvmx_pip_wqe_word2 word2;
+ union cvmx_buf_ptr packet_ptr;
+ uint8_t packet_data[96];
+};
+
+union cvmx_helper_link_info {
+ uint64_t u64;
+ struct {
+ uint64_t reserved_20_63:44;
+ uint64_t link_up:1; /**< Is the physical link up? */
+ uint64_t full_duplex:1; /**< 1 if the link is full duplex */
+ uint64_t speed:18; /**< Speed of the link in Mbps */
+ } s;
+};
+
+enum cvmx_fau_reg_32 {
+ CVMX_FAU_REG_32_START = 0,
+};
+
+enum cvmx_fau_op_size {
+ CVMX_FAU_OP_SIZE_8 = 0,
+ CVMX_FAU_OP_SIZE_16 = 1,
+ CVMX_FAU_OP_SIZE_32 = 2,
+ CVMX_FAU_OP_SIZE_64 = 3
+};
+
+typedef enum {
+ CVMX_SPI_MODE_UNKNOWN = 0,
+ CVMX_SPI_MODE_TX_HALFPLEX = 1,
+ CVMX_SPI_MODE_RX_HALFPLEX = 2,
+ CVMX_SPI_MODE_DUPLEX = 3
+} cvmx_spi_mode_t;
+
+typedef enum {
+ CVMX_HELPER_INTERFACE_MODE_DISABLED,
+ CVMX_HELPER_INTERFACE_MODE_RGMII,
+ CVMX_HELPER_INTERFACE_MODE_GMII,
+ CVMX_HELPER_INTERFACE_MODE_SPI,
+ CVMX_HELPER_INTERFACE_MODE_PCIE,
+ CVMX_HELPER_INTERFACE_MODE_XAUI,
+ CVMX_HELPER_INTERFACE_MODE_SGMII,
+ CVMX_HELPER_INTERFACE_MODE_PICMG,
+ CVMX_HELPER_INTERFACE_MODE_NPI,
+ CVMX_HELPER_INTERFACE_MODE_LOOP,
+} cvmx_helper_interface_mode_t;
+
+typedef enum {
+ CVMX_POW_WAIT = 1,
+ CVMX_POW_NO_WAIT = 0,
+} cvmx_pow_wait_t;
+
+typedef enum {
+ CVMX_PKO_LOCK_NONE = 0,
+ CVMX_PKO_LOCK_ATOMIC_TAG = 1,
+ CVMX_PKO_LOCK_CMD_QUEUE = 2,
+} cvmx_pko_lock_t;
+
+typedef enum {
+ CVMX_PKO_SUCCESS,
+ CVMX_PKO_INVALID_PORT,
+ CVMX_PKO_INVALID_QUEUE,
+ CVMX_PKO_INVALID_PRIORITY,
+ CVMX_PKO_NO_MEMORY,
+ CVMX_PKO_PORT_ALREADY_SETUP,
+ CVMX_PKO_CMD_QUEUE_INIT_ERROR
+} cvmx_pko_status_t;
+
+enum cvmx_pow_tag_type {
+ CVMX_POW_TAG_TYPE_ORDERED = 0L,
+ CVMX_POW_TAG_TYPE_ATOMIC = 1L,
+ CVMX_POW_TAG_TYPE_NULL = 2L,
+ CVMX_POW_TAG_TYPE_NULL_NULL = 3L
+};
+
+union cvmx_ipd_ctl_status {
+ uint64_t u64;
+ struct cvmx_ipd_ctl_status_s {
+ uint64_t reserved_18_63:46;
+ uint64_t use_sop:1;
+ uint64_t rst_done:1;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } s;
+ struct cvmx_ipd_ctl_status_cn30xx {
+ uint64_t reserved_10_63:54;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn30xx;
+ struct cvmx_ipd_ctl_status_cn38xxp2 {
+ uint64_t reserved_9_63:55;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn38xxp2;
+ struct cvmx_ipd_ctl_status_cn50xx {
+ uint64_t reserved_15_63:49;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn50xx;
+ struct cvmx_ipd_ctl_status_cn58xx {
+ uint64_t reserved_12_63:52;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn58xx;
+ struct cvmx_ipd_ctl_status_cn63xxp1 {
+ uint64_t reserved_16_63:48;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn63xxp1;
+};
+
+union cvmx_ipd_sub_port_fcs {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_fcs_s {
+ uint64_t port_bit:32;
+ uint64_t reserved_32_35:4;
+ uint64_t port_bit2:4;
+ uint64_t reserved_40_63:24;
+ } s;
+ struct cvmx_ipd_sub_port_fcs_cn30xx {
+ uint64_t port_bit:3;
+ uint64_t reserved_3_63:61;
+ } cn30xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx {
+ uint64_t port_bit:32;
+ uint64_t reserved_32_63:32;
+ } cn38xx;
+};
+
+union cvmx_ipd_sub_port_qos_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_qos_cnt_s {
+ uint64_t cnt:32;
+ uint64_t port_qos:9;
+ uint64_t reserved_41_63:23;
+ } s;
+};
+
+typedef struct {
+ uint32_t dropped_octets;
+ uint32_t dropped_packets;
+ uint32_t pci_raw_packets;
+ uint32_t octets;
+ uint32_t packets;
+ uint32_t multicast_packets;
+ uint32_t broadcast_packets;
+ uint32_t len_64_packets;
+ uint32_t len_65_127_packets;
+ uint32_t len_128_255_packets;
+ uint32_t len_256_511_packets;
+ uint32_t len_512_1023_packets;
+ uint32_t len_1024_1518_packets;
+ uint32_t len_1519_max_packets;
+ uint32_t fcs_align_err_packets;
+ uint32_t runt_packets;
+ uint32_t runt_crc_packets;
+ uint32_t oversize_packets;
+ uint32_t oversize_crc_packets;
+ uint32_t inb_packets;
+ uint64_t inb_octets;
+ uint16_t inb_errors;
+} cvmx_pip_port_status_t;
+
+typedef struct {
+ uint32_t packets;
+ uint64_t octets;
+ uint64_t doorbell;
+} cvmx_pko_port_status_t;
+
+union cvmx_pip_frm_len_chkx {
+ uint64_t u64;
+ struct cvmx_pip_frm_len_chkx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t maxlen:16;
+ uint64_t minlen:16;
+ } s;
+};
+
+union cvmx_gmxx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_ctl_s {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+ } s;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t reserved_9_63:55;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t reserved_8_63:56;
+ } cn31xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t reserved_10_63:54;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+ } cn61xx;
+};
+
+union cvmx_gmxx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_reg_s {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } s;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t reserved_19_63:45;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_reg_cn50xx {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t reserved_27_63:37;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } cn61xx;
+};
+
+union cvmx_gmxx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cfg_s {
+ uint64_t reserved_22_63:42;
+ uint64_t pknd:6;
+ uint64_t reserved_14_15:2;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } s;
+ struct cvmx_gmxx_prtx_cfg_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn30xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx {
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn52xx;
+};
+
+union cvmx_gmxx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_ctl_s {
+ uint64_t reserved_4_63:60;
+ uint64_t cam_mode:1;
+ uint64_t mcst:2;
+ uint64_t bcst:1;
+ } s;
+};
+
+union cvmx_pip_prt_tagx {
+ uint64_t u64;
+ struct cvmx_pip_prt_tagx_s {
+ uint64_t reserved_54_63:10;
+ uint64_t portadd_en:1;
+ uint64_t inc_hwchk:1;
+ uint64_t reserved_50_51:2;
+ uint64_t grptagbase_msb:2;
+ uint64_t reserved_46_47:2;
+ uint64_t grptagmask_msb:2;
+ uint64_t reserved_42_43:2;
+ uint64_t grp_msb:2;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } s;
+ struct cvmx_pip_prt_tagx_cn30xx {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t reserved_30_30:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } cn30xx;
+ struct cvmx_pip_prt_tagx_cn50xx {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } cn50xx;
+};
+
+union cvmx_spxx_int_reg {
+ uint64_t u64;
+ struct cvmx_spxx_int_reg_s {
+ uint64_t reserved_32_63:32;
+ uint64_t spf:1;
+ uint64_t reserved_12_30:19;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+};
+
+union cvmx_spxx_int_msk {
+ uint64_t u64;
+ struct cvmx_spxx_int_msk_s {
+ uint64_t reserved_12_63:52;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+};
+
+union cvmx_pow_wq_int {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_s {
+ uint64_t wq_int:16;
+ uint64_t iq_dis:16;
+ uint64_t reserved_32_63:32;
+ } s;
+};
+
+union cvmx_sso_wq_int_thrx {
+ uint64_t u64;
+ struct {
+ uint64_t iq_thr:12;
+ uint64_t reserved_12_13:2;
+ uint64_t ds_thr:12;
+ uint64_t reserved_26_27:2;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_33_63:31;
+ } s;
+};
+
+union cvmx_stxx_int_reg {
+ uint64_t u64;
+ struct cvmx_stxx_int_reg_s {
+ uint64_t reserved_9_63:55;
+ uint64_t syncerr:1;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+};
+
+union cvmx_stxx_int_msk {
+ uint64_t u64;
+ struct cvmx_stxx_int_msk_s {
+ uint64_t reserved_8_63:56;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+};
+
+union cvmx_pow_wq_int_pc {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_pc_s {
+ uint64_t reserved_0_7:8;
+ uint64_t pc_thr:20;
+ uint64_t reserved_28_31:4;
+ uint64_t pc:28;
+ uint64_t reserved_60_63:4;
+ } s;
+};
+
+union cvmx_pow_wq_int_thrx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_thrx_s {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_thr:11;
+ } s;
+ struct cvmx_pow_wq_int_thrx_cn30xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_18_23:6;
+ uint64_t ds_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t iq_thr:6;
+ } cn30xx;
+ struct cvmx_pow_wq_int_thrx_cn31xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_20_23:4;
+ uint64_t ds_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t iq_thr:8;
+ } cn31xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ds_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t iq_thr:9;
+ } cn52xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_thr:10;
+ } cn63xx;
+};
+
+union cvmx_npi_rsl_int_blocks {
+ uint64_t u64;
+ struct cvmx_npi_rsl_int_blocks_s {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t reserved_28_29:2;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t reserved_13_14:2;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } s;
+ struct cvmx_npi_rsl_int_blocks_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn30xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t rint_13:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn38xx;
+ struct cvmx_npi_rsl_int_blocks_cn50xx {
+ uint64_t reserved_31_63:33;
+ uint64_t iob:1;
+ uint64_t lmc1:1;
+ uint64_t agl:1;
+ uint64_t reserved_24_27:4;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t reserved_21_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t reserved_15_15:1;
+ uint64_t rad:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t reserved_8_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn50xx;
+};
+
+union cvmx_pko_command_word0 {
+ uint64_t u64;
+ struct {
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
+ } s;
+};
+
+union cvmx_ciu_timx {
+ uint64_t u64;
+ struct cvmx_ciu_timx_s {
+ uint64_t reserved_37_63:27;
+ uint64_t one_shot:1;
+ uint64_t len:36;
+ } s;
+};
+
+union cvmx_gmxx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_rx_inbnd_s {
+ uint64_t status:1;
+ uint64_t speed:2;
+ uint64_t duplex:1;
+ uint64_t reserved_4_63:60;
+ } s;
+};
+
+static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{
+ return value;
+}
+
+static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{ }
+
+static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{ }
+
+static inline uint64_t cvmx_scratch_read64(uint64_t address)
+{
+ return 0;
+}
+
+static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
+{ }
+
+static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
+{
+ return 0;
+}
+
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+ return (void *)(uintptr_t)(physical_address);
+}
+
+static inline uint64_t cvmx_ptr_to_phys(void *ptr)
+{
+ return (unsigned long)ptr;
+}
+
+static inline int cvmx_helper_get_interface_num(int ipd_port)
+{
+ return ipd_port;
+}
+
+static inline int cvmx_helper_get_interface_index_num(int ipd_port)
+{
+ return ipd_port;
+}
+
+static inline void cvmx_fpa_enable(void)
+{ }
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+ return 0;
+}
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{ }
+
+static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
+{
+ return 0;
+}
+
+static inline void *cvmx_fpa_alloc(uint64_t pool)
+{
+ return NULL;
+}
+
+static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
+ uint64_t num_cache_lines)
+{ }
+
+static inline int octeon_is_simulation(void)
+{
+ return 1;
+}
+
+static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pip_port_status_t *status)
+{ }
+
+static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pko_port_status_t *status)
+{ }
+
+static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
+ interface)
+{
+ return 0;
+}
+
+static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
+{
+ union cvmx_helper_link_info ret = { .u64 = 0 };
+
+ return ret;
+}
+
+static inline int cvmx_helper_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_initialize_packet_io_global(void)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_get_number_of_interfaces(void)
+{
+ return 2;
+}
+
+static inline int cvmx_helper_ports_on_interface(int interface)
+{
+ return 1;
+}
+
+static inline int cvmx_helper_get_ipd_port(int interface, int port)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_ipd_and_packet_input_enable(void)
+{
+ return 0;
+}
+
+static inline void cvmx_ipd_disable(void)
+{ }
+
+static inline void cvmx_ipd_free_ptr(void)
+{ }
+
+static inline void cvmx_pko_disable(void)
+{ }
+
+static inline void cvmx_pko_shutdown(void)
+{ }
+
+static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
+{
+ return port;
+}
+
+static inline int cvmx_pko_get_base_queue(int port)
+{
+ return port;
+}
+
+static inline int cvmx_pko_get_num_queues(int port)
+{
+ return port;
+}
+
+static inline unsigned int cvmx_get_core_num(void)
+{
+ return 0;
+}
+
+static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
+ cvmx_pow_wait_t wait)
+{ }
+
+static inline void cvmx_pow_work_request_async(int scr_addr,
+ cvmx_pow_wait_t wait)
+{ }
+
+static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
+{
+ struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
+
+ return wqe;
+}
+
+static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+{
+ return (void *)(unsigned long)wait;
+}
+
+static inline int cvmx_spi_restart_interface(int interface,
+ cvmx_spi_mode_t mode, int timeout)
+{
+ return 0;
+}
+
+static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
+ enum cvmx_fau_reg_32 reg,
+ int32_t value)
+{ }
+
+static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
+ int interface,
+ int port)
+{
+ union cvmx_gmxx_rxx_rx_inbnd r;
+
+ r.u64 = 0;
+ return r;
+}
+
+static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
+ cvmx_pko_lock_t use_locking)
+{ }
+
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
+ uint64_t queue, union cvmx_pko_command_word0 pko_command,
+ union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
+{
+ return 0;
+}
+
+static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
+{ }
+
+static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
+{ }
+
+static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
+{
+ return 0;
+}
+
+static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
+{ }
+
+static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t qos, uint64_t grp)
+{ }
+
+#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a) + (b))
+#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a) + (b))
+#define CVMX_CIU_TIMX(a) (a)
+#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a) + (b))
+#define CVMX_GMXX_RXX_INT_REG(a, b) ((a) + (b))
+#define CVMX_GMXX_SMACX(a, b) ((a) + (b))
+#define CVMX_PIP_PRT_TAGX(a) (a)
+#define CVMX_POW_PP_GRP_MSKX(a) (a)
+#define CVMX_POW_WQ_INT_THRX(a) (a)
+#define CVMX_SPXX_INT_MSK(a) (a)
+#define CVMX_SPXX_INT_REG(a) (a)
+#define CVMX_SSO_PPX_GRP_MSK(a) (a)
+#define CVMX_SSO_WQ_INT_THRX(a) (a)
+#define CVMX_STXX_INT_MSK(a) (a)
+#define CVMX_STXX_INT_REG(a) (a)
diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
index 61fad96818c2..096137fcd5cc 100644
--- a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
+++ b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts
@@ -3,51 +3,46 @@
/plugin/;
/ {
- compatible = "bcm,bcm2835", "bcm,bcm2708", "bcm,bcm2709";
+ compatible = "brcm,bcm2835", "brcm,bcm2708", "brcm,bcm2709";
+};
- fragment@0 {
- target = <&spi0>;
- __overlay__ {
- status = "okay";
+&spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
- spidev@0{
- status = "disabled";
- };
+ spidev@0{
+ reg = <0>;
+ status = "disabled";
+ };
- spidev@1{
- status = "disabled";
- };
- };
+ spidev@1{
+ reg = <1>;
+ status = "disabled";
};
+};
- fragment@1 {
- target = <&gpio>;
- __overlay__ {
- pi433_pins: pi433_pins {
- brcm,pins = <7 25 24>;
- brcm,function = <0 0 0>; // in in in
- };
- };
+&gpio {
+ pi433_pins: pi433_pins {
+ brcm,pins = <7 25 24>;
+ brcm,function = <0 0 0>; // in in in
};
+};
- fragment@2 {
- target = <&spi0>;
- __overlay__ {
- #address-cells = <1>;
- #size-cells = <0>;
- status = "okay";
-
- pi433: pi433@0 {
- compatible = "Smarthome-Wolf,pi433";
- reg = <0>;
- spi-max-frequency = <10000000>;
- status = "okay";
-
- pinctrl-0 = <&pi433_pins>;
- DIO0-gpio = <&gpio 24 0>;
- DIO1-gpio = <&gpio 25 0>;
- DIO2-gpio = <&gpio 7 0>;
- };
- };
+&spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ pi433: pi433@0 {
+ compatible = "Smarthome-Wolf,pi433";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ status = "okay";
+
+ pinctrl-0 = <&pi433_pins>;
+ DIO0-gpio = <&gpio 24 0>;
+ DIO1-gpio = <&gpio 25 0>;
+ DIO2-gpio = <&gpio 7 0>;
};
};
diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h
index 9feb95c431cb..16c5b7fba249 100644
--- a/drivers/staging/pi433/pi433_if.h
+++ b/drivers/staging/pi433/pi433_if.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* include/linux/TODO
*
* userspace interface for pi433 radio module
diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h
index d43a8d87d5d3..b648ba5fff89 100644
--- a/drivers/staging/pi433/rf69.h
+++ b/drivers/staging/pi433/rf69.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* hardware abstraction/register access for HopeRf rf69 radio module
*
* Copyright (C) 2016 Wolf-Entwicklungen
diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h
index 3ee1952245c2..fbf56fcf5fe8 100644
--- a/drivers/staging/pi433/rf69_enum.h
+++ b/drivers/staging/pi433/rf69_enum.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* enumerations for HopeRf rf69 radio module
*
* Copyright (C) 2016 Wolf-Entwicklungen
diff --git a/drivers/staging/pi433/rf69_registers.h b/drivers/staging/pi433/rf69_registers.h
index be5497cdace0..a170c66c3d5b 100644
--- a/drivers/staging/pi433/rf69_registers.h
+++ b/drivers/staging/pi433/rf69_registers.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* register description for HopeRf rf69 radio module
*
* Copyright (C) 2016 Wolf-Entwicklungen
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 4bc5d5fce9bf..fc8c5ca8935d 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -16,8 +16,8 @@
/*
* General definitions...
*/
-#define DRV_NAME "qlge"
-#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
+#define DRV_NAME "qlge"
+#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
#define DRV_VERSION "1.00.00.35"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -59,7 +59,7 @@
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
-#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
+#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT / 2)
#define UDELAY_COUNT 3
#define UDELAY_DELAY 100
@@ -119,7 +119,6 @@ enum {
* Processor Address Register (PROC_ADDR) bit definitions.
*/
enum {
-
/* Misc. stuff */
MAILBOX_COUNT = 16,
MAILBOX_TIMEOUT = 5,
@@ -1077,11 +1076,11 @@ struct tx_buf_desc {
* IOCB Definitions...
*/
-#define OPCODE_OB_MAC_IOCB 0x01
+#define OPCODE_OB_MAC_IOCB 0x01
#define OPCODE_OB_MAC_TSO_IOCB 0x02
-#define OPCODE_IB_MAC_IOCB 0x20
-#define OPCODE_IB_MPI_IOCB 0x21
-#define OPCODE_IB_AE_IOCB 0x3f
+#define OPCODE_IB_MAC_IOCB 0x20
+#define OPCODE_IB_MPI_IOCB 0x21
+#define OPCODE_IB_AE_IOCB 0x3f
struct ob_mac_iocb_req {
u8 opcode;
@@ -1173,15 +1172,15 @@ struct ib_mac_iocb_rsp {
u8 flags1;
#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
-#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
+#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
-#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
-#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
u8 flags2;
#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
@@ -1198,16 +1197,16 @@ struct ib_mac_iocb_rsp {
#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
u8 flags3;
#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
-#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
-#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
-#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
-#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
-#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
-#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
-#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
-#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
+#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
+#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
__le32 data_len; /* */
__le64 data_addr; /* */
__le32 rss; /* */
@@ -1233,17 +1232,17 @@ struct ib_ae_iocb_rsp {
#define IB_AE_IOCB_RSP_OI 0x01
#define IB_AE_IOCB_RSP_I 0x02
u8 event;
-#define LINK_UP_EVENT 0x00
-#define LINK_DOWN_EVENT 0x01
-#define CAM_LOOKUP_ERR_EVENT 0x06
-#define SOFT_ECC_ERROR_EVENT 0x07
-#define MGMT_ERR_EVENT 0x08
-#define TEN_GIG_MAC_EVENT 0x09
-#define GPI0_H2L_EVENT 0x10
-#define GPI0_L2H_EVENT 0x20
-#define GPI1_H2L_EVENT 0x11
-#define GPI1_L2H_EVENT 0x21
-#define PCI_ERR_ANON_BUF_RD 0x40
+#define LINK_UP_EVENT 0x00
+#define LINK_DOWN_EVENT 0x01
+#define CAM_LOOKUP_ERR_EVENT 0x06
+#define SOFT_ECC_ERROR_EVENT 0x07
+#define MGMT_ERR_EVENT 0x08
+#define TEN_GIG_MAC_EVENT 0x09
+#define GPI0_H2L_EVENT 0x10
+#define GPI0_L2H_EVENT 0x20
+#define GPI1_H2L_EVENT 0x11
+#define GPI1_L2H_EVENT 0x21
+#define PCI_ERR_ANON_BUF_RD 0x40
u8 q_id;
__le32 reserved[15];
} __packed;
@@ -1367,7 +1366,7 @@ struct tx_ring_desc {
struct tx_ring_desc *next;
};
-#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
+#define QL_TXQ_IDX(qdev, skb) (smp_processor_id() % (qdev->tx_ring_count))
struct tx_ring {
/*
@@ -1762,7 +1761,6 @@ struct ql_nic_misc {
};
struct ql_reg_dump {
-
/* segment 0 */
struct mpi_coredump_global_header mpi_global_header;
@@ -1792,7 +1790,7 @@ struct ql_reg_dump {
/* segment 34 */
struct mpi_coredump_segment_header ets_seg_hdr;
- u32 ets[8+2];
+ u32 ets[8 + 2];
};
struct ql_mpi_coredump {
@@ -2059,7 +2057,6 @@ enum {
};
struct nic_operations {
-
int (*get_flash) (struct ql_adapter *);
int (*port_initialize) (struct ql_adapter *);
};
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 8cf39615c520..1795533cbd3a 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -29,15 +29,13 @@ static int ql_write_other_func_reg(struct ql_adapter *qdev,
u32 reg, u32 reg_val)
{
u32 register_to_read;
- int status = 0;
register_to_read = MPI_NIC_REG_BLOCK
| MPI_NIC_READ
| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
| reg;
- status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
- return status;
+ return ql_write_mpi_reg(qdev, register_to_read, reg_val);
}
static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
@@ -499,6 +497,7 @@ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
u32 offset, u32 count)
{
int i, status = 0;
+
for (i = 0; i < count; i++, buf++) {
status = ql_read_mpi_reg(qdev, offset + i, buf);
if (status)
@@ -552,7 +551,6 @@ static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
PRB_MX_ADDR_VALID_FC_MOD, buf);
return 0;
-
}
/* Read out the routing index registers */
@@ -610,7 +608,6 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
switch (type) {
-
case 0: /* CAM */
initial_val |= MAC_ADDR_ADR;
max_index = MAC_ADDR_MAX_CAM_ENTRIES;
@@ -1204,7 +1201,6 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
err:
ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
return status;
-
}
static void ql_get_core_dump(struct ql_adapter *qdev)
@@ -1324,27 +1320,10 @@ void ql_mpi_core_to_log(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_core_to_log.work);
- u32 *tmp, count;
- int i;
- count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
- tmp = (u32 *)qdev->mpi_coredump;
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "Core is dumping to log file!\n");
-
- for (i = 0; i < count; i += 8) {
- pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
- "%.08x %.08x %.08x\n", i,
- tmp[i + 0],
- tmp[i + 1],
- tmp[i + 2],
- tmp[i + 3],
- tmp[i + 4],
- tmp[i + 5],
- tmp[i + 6],
- tmp[i + 7]);
- msleep(5);
- }
+ print_hex_dump(KERN_DEBUG, "Core is dumping to log file!\n",
+ DUMP_PREFIX_OFFSET, 32, 4, qdev->mpi_coredump,
+ sizeof(*qdev->mpi_coredump), false);
}
#ifdef QL_REG_DUMP
@@ -1352,6 +1331,7 @@ static void ql_dump_intr_states(struct ql_adapter *qdev)
{
int i;
u32 value;
+
for (i = 0; i < qdev->intr_count; i++) {
ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
value = ql_read32(qdev, INTR_EN);
@@ -1437,6 +1417,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
{
int i;
u32 value;
+
i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (i)
return;
@@ -1525,7 +1506,7 @@ void ql_dump_regs(struct ql_adapter *qdev)
#ifdef QL_STAT_DUMP
#define DUMP_STAT(qdev, stat) \
- pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
+ pr_err("%s = %ld\n", #stat, (unsigned long)(qdev)->nic_stats.stat)
void ql_dump_stat(struct ql_adapter *qdev)
{
@@ -1578,15 +1559,16 @@ void ql_dump_stat(struct ql_adapter *qdev)
#ifdef QL_DEV_DUMP
#define DUMP_QDEV_FIELD(qdev, type, field) \
- pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
+ pr_err("qdev->%-24s = " type "\n", #field, (qdev)->(field))
#define DUMP_QDEV_DMA_FIELD(qdev, field) \
pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
pr_err("%s[%d].%s = " type "\n", \
- #array, index, #field, qdev->array[index].field);
+ #array, index, #field, (qdev)->array[index].field);
void ql_dump_qdev(struct ql_adapter *qdev)
{
int i;
+
DUMP_QDEV_FIELD(qdev, "%lx", flags);
DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
DUMP_QDEV_FIELD(qdev, "%p", pdev);
@@ -1640,9 +1622,9 @@ void ql_dump_wqicb(struct wqicb *wqicb)
le16_to_cpu(wqicb->cq_id_rss));
pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
pr_err("wqicb->wq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(wqicb->addr));
+ (unsigned long long)le64_to_cpu(wqicb->addr));
pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
+ (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr));
}
void ql_dump_tx_ring(struct tx_ring *tx_ring)
@@ -1653,7 +1635,7 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
tx_ring->wq_id);
pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
pr_err("tx_ring->base_dma = 0x%llx\n",
- (unsigned long long) tx_ring->wq_base_dma);
+ (unsigned long long)tx_ring->wq_base_dma);
pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
tx_ring->cnsmr_idx_sh_reg,
tx_ring->cnsmr_idx_sh_reg
@@ -1672,6 +1654,7 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
void ql_dump_ricb(struct ricb *ricb)
{
int i;
+
pr_err("===================== Dumping ricb ===============\n");
pr_err("Dumping ricb stuff...\n");
@@ -1706,21 +1689,21 @@ void ql_dump_cqicb(struct cqicb *cqicb)
pr_err("cqicb->flags = %x\n", cqicb->flags);
pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
pr_err("cqicb->addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->addr));
+ (unsigned long long)le64_to_cpu(cqicb->addr));
pr_err("cqicb->prod_idx_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
+ (unsigned long long)le64_to_cpu(cqicb->prod_idx_addr));
pr_err("cqicb->pkt_delay = 0x%.04x\n",
le16_to_cpu(cqicb->pkt_delay));
pr_err("cqicb->irq_delay = 0x%.04x\n",
le16_to_cpu(cqicb->irq_delay));
pr_err("cqicb->lbq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
+ (unsigned long long)le64_to_cpu(cqicb->lbq_addr));
pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
le16_to_cpu(cqicb->lbq_buf_size));
pr_err("cqicb->lbq_len = 0x%.04x\n",
le16_to_cpu(cqicb->lbq_len));
pr_err("cqicb->sbq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
+ (unsigned long long)le64_to_cpu(cqicb->sbq_addr));
pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
le16_to_cpu(cqicb->sbq_buf_size));
pr_err("cqicb->sbq_len = 0x%.04x\n",
@@ -1748,7 +1731,7 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
pr_err("rx_ring->cq_base_dma = %llx\n",
- (unsigned long long) rx_ring->cq_base_dma);
+ (unsigned long long)rx_ring->cq_base_dma);
pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
@@ -1756,7 +1739,7 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
rx_ring->prod_idx_sh_reg
? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
- (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
+ (unsigned long long)rx_ring->prod_idx_sh_reg_dma);
pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
rx_ring->cnsmr_idx_db_reg);
pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
@@ -1855,7 +1838,6 @@ void ql_dump_tx_desc(struct tx_buf_desc *tbd)
pr_err("tbd->flags = %s %s\n",
tbd->len & TX_DESC_C ? "C" : ".",
tbd->len & TX_DESC_E ? "E" : ".");
-
}
void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
@@ -1980,7 +1962,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
pr_err("data_len = %d\n",
le32_to_cpu(ib_mac_rsp->data_len));
pr_err("data_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
+ (unsigned long long)le64_to_cpu(ib_mac_rsp->data_addr));
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
pr_err("rss = %x\n",
le32_to_cpu(ib_mac_rsp->rss));
@@ -1997,7 +1979,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
pr_err("hdr length = %d\n",
le32_to_cpu(ib_mac_rsp->hdr_len));
pr_err("hdr addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
+ (unsigned long long)le64_to_cpu(ib_mac_rsp->hdr_addr));
}
}
#endif
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index 790997aff995..949abd53a7a9 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -259,8 +259,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
"Error reading status register 0x%.04x.\n",
i);
goto end;
- } else
+ } else {
*iter = data;
+ }
iter++;
}
@@ -273,8 +274,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
"Error reading status register 0x%.04x.\n",
i);
goto end;
- } else
+ } else {
*iter = data;
+ }
iter++;
}
@@ -290,8 +292,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
"Error reading status register 0x%.04x.\n",
i);
goto end;
- } else
+ } else {
*iter = data;
+ }
iter++;
}
@@ -304,8 +307,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
"Error reading status register 0x%.04x.\n",
i);
goto end;
- } else
+ } else {
*iter = data;
+ }
iter++;
}
@@ -316,8 +320,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n", i);
goto end;
- } else
+ } else {
*iter = data;
+ }
end:
ql_sem_unlock(qdev, qdev->xg_sem_mask);
quit:
@@ -488,8 +493,9 @@ static int ql_start_loopback(struct ql_adapter *qdev)
if (netif_carrier_ok(qdev->ndev)) {
set_bit(QL_LB_LINK_UP, &qdev->flags);
netif_carrier_off(qdev->ndev);
- } else
+ } else {
clear_bit(QL_LB_LINK_UP, &qdev->flags);
+ }
qdev->link_config |= CFG_LOOPBACK_PCS;
return ql_mb_set_port_cfg(qdev);
}
@@ -686,7 +692,6 @@ static int ql_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ql_adapter *qdev = netdev_priv(netdev);
- int status = 0;
if ((pause->rx_pause) && (pause->tx_pause))
qdev->link_config |= CFG_PAUSE_STD;
@@ -695,8 +700,7 @@ static int ql_set_pauseparam(struct net_device *netdev,
else
return -EINVAL;
- status = ql_mb_set_port_cfg(qdev);
- return status;
+ return ql_mb_set_port_cfg(qdev);
}
static u32 ql_get_msglevel(struct net_device *ndev)
@@ -714,6 +718,8 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
}
const struct ethtool_ops qlge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = ql_get_drvinfo,
.get_wol = ql_get_wol,
.set_wol = ql_set_wol,
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index ef8037d0b52e..c92820f07968 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -52,16 +52,12 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static const u32 default_msg =
- NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
-/* NETIF_MSG_TIMER | */
- NETIF_MSG_IFDOWN |
- NETIF_MSG_IFUP |
- NETIF_MSG_RX_ERR |
- NETIF_MSG_TX_ERR |
-/* NETIF_MSG_TX_QUEUED | */
-/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
-/* NETIF_MSG_PKTDATA | */
- NETIF_MSG_HW | NETIF_MSG_WOL | 0;
+ NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
+ NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP |
+ NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR |
+ NETIF_MSG_HW | NETIF_MSG_WOL | 0;
static int debug = -1; /* defaults above */
module_param(debug, int, 0664);
@@ -143,6 +139,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
{
unsigned int wait_count = 30;
+
do {
if (!ql_sem_trylock(qdev, sem_mask))
return 0;
@@ -1210,6 +1207,7 @@ static void ql_unmap_send(struct ql_adapter *qdev,
struct tx_ring_desc *tx_ring_desc, int mapped)
{
int i;
+
for (i = 0; i < mapped; i++) {
if (i == 0 || (i == 7 && mapped > 7)) {
/*
@@ -1290,6 +1288,7 @@ static int ql_map_send(struct ql_adapter *qdev,
*/
for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
+
tbd++;
if (frag_idx == 6 && frag_cnt > 7) {
/* Let's tack on an sglist.
@@ -1649,6 +1648,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
+
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1818,6 +1818,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* eventually be in trouble.
*/
int size, i = 0;
+
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
@@ -1936,6 +1937,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
+
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2391,6 +2393,7 @@ static void qlge_restore_vlan(struct ql_adapter *qdev)
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
+
napi_schedule(&rx_ring->napi);
return IRQ_HANDLED;
}
@@ -2497,6 +2500,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
if (likely(l3_proto == htons(ETH_P_IP))) {
struct iphdr *iph = ip_hdr(skb);
+
iph->check = 0;
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
@@ -2521,6 +2525,7 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
int len;
struct iphdr *iph = ip_hdr(skb);
__sum16 *check;
+
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
mac_iocb_ptr->net_trans_offset =
@@ -3896,14 +3901,11 @@ static void ql_release_adapter_resources(struct ql_adapter *qdev)
static int ql_get_adapter_resources(struct ql_adapter *qdev)
{
- int status = 0;
-
if (ql_alloc_mem_resources(qdev)) {
netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
- status = ql_request_irq(qdev);
- return status;
+ return ql_request_irq(qdev);
}
static int qlge_close(struct net_device *ndev)
@@ -4265,6 +4267,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+
ql_queue_asic_error(qdev);
}
@@ -4273,6 +4276,7 @@ static void ql_asic_reset_work(struct work_struct *work)
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, asic_reset_work.work);
int status;
+
rtnl_lock();
status = ql_adapter_down(qdev);
if (status)
@@ -4344,6 +4348,7 @@ static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
static int ql_get_board_info(struct ql_adapter *qdev)
{
int status;
+
qdev->func =
(ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
if (qdev->func > 3)
@@ -4652,6 +4657,7 @@ static void qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
+
del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index bb03b2fa7233..60c08d9cc034 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -90,9 +90,7 @@ exit:
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
- int status;
- status = ql_write_mpi_reg(qdev, 0x00001010, 1);
- return status;
+ return ql_write_mpi_reg(qdev, 0x00001010, 1);
}
/* Determine if we are in charge of the firwmare. If
@@ -237,6 +235,7 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
{
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
+
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
@@ -255,6 +254,7 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int status;
+
mbcp->out_count = 2;
status = ql_get_mb_sts(qdev, mbcp);
@@ -353,6 +353,7 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
else {
int i;
+
netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
for (i = 0; i < mbcp->out_count; i++)
netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
@@ -912,6 +913,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
int status = -ETIMEDOUT;
long wait_time = 1 * HZ;
struct mbox_params *mbcp = &qdev->idc_mbc;
+
do {
/* Wait here for the command to complete
* via the IDC process.
@@ -1096,6 +1098,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
static int ql_set_port_cfg(struct ql_adapter *qdev)
{
int status;
+
status = ql_mb_set_port_cfg(qdev);
if (status)
return status;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 815dfee11968..f69e9453ad45 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -199,7 +199,7 @@ _next:
rtw_free_cmd_obj(pcmd);
} else {
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
- pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+ pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
}
} else {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode));
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 6c2fe1a112ac..d0e41f2ef1ce 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -15,7 +15,7 @@ int proc_get_drv_version(char *page, char **start,
{
int len = 0;
- len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION);
+ len += scnprintf(page + len, count - len, "%s\n", DRIVERVERSION);
*eof = 1;
return len;
@@ -86,16 +86,16 @@ int proc_get_read_reg(char *page, char **start,
switch (proc_get_read_len) {
case 1:
- len += snprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(padapter, proc_get_read_addr));
+ len += scnprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(padapter, proc_get_read_addr));
break;
case 2:
- len += snprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16(padapter, proc_get_read_addr));
+ len += scnprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16(padapter, proc_get_read_addr));
break;
case 4:
- len += snprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32(padapter, proc_get_read_addr));
+ len += scnprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32(padapter, proc_get_read_addr));
break;
default:
- len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len);
+ len += scnprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len);
break;
}
@@ -138,7 +138,7 @@ int proc_get_adapter_state(char *page, char **start,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
int len = 0;
- len += snprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
+ len += scnprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
padapter->bSurpriseRemoved, padapter->bDriverStopped);
*eof = 1;
@@ -170,11 +170,11 @@ int proc_get_best_channel(char *page, char **start,
}
/* debug */
- len += snprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n",
+ len += scnprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n",
pmlmeext->channel_set[i].ChannelNum, pmlmeext->channel_set[i].rx_count);
}
- len += snprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G);
+ len += scnprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G);
*eof = 1;
return len;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 29f615443e8f..e186982d5908 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -236,14 +236,10 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->ssid.ssid_length, pdev_network->ssid.ssid, &sz);
/* supported rates */
- if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
- if (pdev_network->Configuration.DSConfig > 14)
- wireless_mode = WIRELESS_11A_5N;
- else
- wireless_mode = WIRELESS_11BG_24N;
- } else {
+ if (pregistrypriv->wireless_mode == WIRELESS_11ABGN)
+ wireless_mode = WIRELESS_11BG_24N;
+ else
wireless_mode = pregistrypriv->wireless_mode;
- }
rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index e764436e120f..9de2d421f6b1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -149,7 +149,7 @@ static void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
lifetime = 1;
if (!isfreeall) {
- delta_time = (curr_time - pnetwork->last_scanned)/HZ;
+ delta_time = (curr_time - pnetwork->last_scanned) / HZ;
if (delta_time < lifetime)/* unit:sec */
return;
}
@@ -249,8 +249,8 @@ void rtw_generate_random_ibss(u8 *pibss)
pibss[1] = 0x11;
pibss[2] = 0x87;
pibss[3] = (u8)(curtime & 0xff);/* p[0]; */
- pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */
- pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */
+ pibss[4] = (u8)((curtime >> 8) & 0xff);/* p[1]; */
+ pibss[5] = (u8)((curtime >> 16) & 0xff);/* p[2]; */
}
u8 *rtw_get_capability_from_ie(u8 *ie)
@@ -357,9 +357,9 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
rssi_final = rssi_ori;
} else {
if (sq_smp != 101) { /* from the right channel */
- ss_final = ((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5;
- sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
- rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ ss_final = ((u32)(src->PhyInfo.SignalStrength) + (u32)(dst->PhyInfo.SignalStrength) * 4) / 5;
+ sq_final = ((u32)(src->PhyInfo.SignalQuality) + (u32)(dst->PhyInfo.SignalQuality) * 4) / 5;
+ rssi_final = (src->Rssi + dst->Rssi * 4) / 5;
} else {
/* bss info not receiving from the right channel, use the original RX signal infos */
ss_final = dst->PhyInfo.SignalStrength;
@@ -510,7 +510,7 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
privacy = pnetwork->network.Privacy;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (rtw_get_wps_ie(pnetwork->network.ies+_FIXED_IE_LENGTH_, pnetwork->network.ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
+ if (rtw_get_wps_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, pnetwork->network.ie_length - _FIXED_IE_LENGTH_, NULL, &wps_ielen))
return true;
else
return false;
@@ -924,8 +924,8 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
/* update fw_state will clr _FW_UNDER_LINKING here indirectly */
switch (pnetwork->network.InfrastructureMode) {
case Ndis802_11Infrastructure:
- if (pmlmepriv->fw_state&WIFI_UNDER_WPS)
- pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS;
+ if (pmlmepriv->fw_state & WIFI_UNDER_WPS)
+ pmlmepriv->fw_state = WIFI_STATION_STATE | WIFI_UNDER_WPS;
else
pmlmepriv->fw_state = WIFI_STATION_STATE;
break;
@@ -1097,14 +1097,14 @@ static u8 search_max_mac_id(struct adapter *padapter)
#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
for (aid = pstapriv->max_num_sta; aid > 0; aid--) {
- if (pstapriv->sta_aid[aid-1])
+ if (pstapriv->sta_aid[aid - 1])
break;
}
mac_id = aid + 1;
} else
#endif
{/* adhoc id = 31~2 */
- for (mac_id = NUM_STA-1; mac_id >= IBSS_START_MAC_ID; mac_id--) {
+ for (mac_id = NUM_STA - 1; mac_id >= IBSS_START_MAC_ID; mac_id--) {
if (pmlmeinfo->FW_sta_info[mac_id].status == 1)
break;
}
@@ -1123,7 +1123,7 @@ void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta)
macid = search_max_mac_id(adapter);
rtw_hal_set_hwreg(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
- media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE:1 connect */
+ media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE:1 connect */
rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
@@ -1213,7 +1213,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
if (mac_id >= 0) {
u16 media_status;
- media_status = (mac_id<<8)|0; /* MACID|OPMODE:0 means disconnect */
+ media_status = (mac_id << 8) | 0; /* MACID|OPMODE:0 means disconnect */
/* for STA, AP, ADHOC mode, report disconnect stauts to FW */
rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
@@ -1640,7 +1640,7 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
for (i = 12; i < in_len; i += (in_ie[i + 1] + 2) /* to the next IE element */) {
ielength = initial_out_len;
- if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) {
+ if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) {
/* WMM element ID and OUI */
/* Append WMM IE to the last index of out_ie */
@@ -1734,13 +1734,13 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
authmode = _WPA2_IE_ID_;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);
+ memcpy(out_ie + ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);
ielength += psecuritypriv->wps_ie_len;
} else if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) {
/* copy RSN or SSN */
- memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1]+2);
- ielength += psecuritypriv->supplicant_ie[1]+2;
+ memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1] + 2);
+ ielength += psecuritypriv->supplicant_ie[1] + 2;
rtw_report_sec_ie(adapter, authmode, psecuritypriv->supplicant_ie);
}
@@ -1865,7 +1865,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
phtpriv->ht_option = false;
- p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12);
+ p = rtw_get_ie(in_ie + 12, _HT_CAPABILITY_IE_, &ielen, in_len - 12);
if (p && ielen > 0) {
struct ieee80211_ht_cap ht_cap;
@@ -1904,16 +1904,16 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
else
ht_cap.ampdu_params_info |= IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00;
- rtw_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
+ rtw_set_ie(out_ie + out_len, _HT_CAPABILITY_IE_,
sizeof(struct ieee80211_ht_cap),
(unsigned char *)&ht_cap, pout_len);
phtpriv->ht_option = true;
- p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12);
+ p = rtw_get_ie(in_ie + 12, _HT_ADD_INFO_IE_, &ielen, in_len - 12);
if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
out_len = *pout_len;
- rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2, pout_len);
+ rtw_set_ie(out_ie + out_len, _HT_ADD_INFO_IE_, ielen, p + 2, pout_len);
}
}
return phtpriv->ht_option;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 36841d20c3c1..04897cd48370 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1151,7 +1151,7 @@ static void issue_assocreq(struct adapter *padapter)
if (!padapter->registrypriv.wifi_spec) {
/* Commented by Kurt 20110629 */
/* In some older APs, WPS handshake */
- /* would be fail if we append vender extensions informations to AP */
+ /* would be fail if we append vender extensions information to AP */
if (!memcmp(pIE->data, WPS_OUI, 4))
pIE->Length = 14;
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 95f1b1431373..ebe19e076ff2 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -18,26 +18,26 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
uint cnt = 0;
char buf[128];
- cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_");
- cnt += sprintf((buf+cnt), "%s_", chip_vers.ChipType == NORMAL_CHIP ?
+ cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_");
+ cnt += sprintf((buf + cnt), "%s_", chip_vers.ChipType == NORMAL_CHIP ?
"Normal_Chip" : "Test_Chip");
- cnt += sprintf((buf+cnt), "%s_", chip_vers.VendorType == CHIP_VENDOR_TSMC ?
+ cnt += sprintf((buf + cnt), "%s_", chip_vers.VendorType == CHIP_VENDOR_TSMC ?
"TSMC" : "UMC");
if (chip_vers.CUTVersion == A_CUT_VERSION)
- cnt += sprintf((buf+cnt), "A_CUT_");
+ cnt += sprintf((buf + cnt), "A_CUT_");
else if (chip_vers.CUTVersion == B_CUT_VERSION)
- cnt += sprintf((buf+cnt), "B_CUT_");
+ cnt += sprintf((buf + cnt), "B_CUT_");
else if (chip_vers.CUTVersion == C_CUT_VERSION)
- cnt += sprintf((buf+cnt), "C_CUT_");
+ cnt += sprintf((buf + cnt), "C_CUT_");
else if (chip_vers.CUTVersion == D_CUT_VERSION)
- cnt += sprintf((buf+cnt), "D_CUT_");
+ cnt += sprintf((buf + cnt), "D_CUT_");
else if (chip_vers.CUTVersion == E_CUT_VERSION)
- cnt += sprintf((buf+cnt), "E_CUT_");
+ cnt += sprintf((buf + cnt), "E_CUT_");
else
- cnt += sprintf((buf+cnt), "UNKNOWN_CUT(%d)_",
+ cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_",
chip_vers.CUTVersion);
- cnt += sprintf((buf+cnt), "1T1R_");
- cnt += sprintf((buf+cnt), "RomVer(0)\n");
+ cnt += sprintf((buf + cnt), "1T1R_");
+ cnt += sprintf((buf + cnt), "RomVer(0)\n");
pr_info("%s", buf);
}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 7489491f5aaa..698377ea60ee 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -342,7 +342,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
u8 CurrentIGI = pDM_DigTable->CurIGValue;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG()==>\n"));
- if ((!(pDM_Odm->SupportAbility&ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility&ODM_BB_FA_CNT))) {
+ if ((!(pDM_Odm->SupportAbility & ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))) {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
("odm_DIG() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n"));
return;
@@ -419,7 +419,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
}
if (pDM_DigTable->LargeFAHit >= 3) {
- if ((pDM_DigTable->ForbiddenIGI+1) > pDM_DigTable->rx_gain_range_max)
+ if ((pDM_DigTable->ForbiddenIGI + 1) > pDM_DigTable->rx_gain_range_max)
pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
else
pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
@@ -432,7 +432,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
pDM_DigTable->Recover_cnt--;
} else {
if (pDM_DigTable->LargeFAHit < 3) {
- if ((pDM_DigTable->ForbiddenIGI-1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
+ if ((pDM_DigTable->ForbiddenIGI - 1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: At Lower Bound\n"));
@@ -518,24 +518,24 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
phy_set_bb_reg(adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
- FalseAlmCnt->Cnt_SB_Search_fail = (ret_value & 0xffff0000)>>16;
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = (ret_value & 0xffff0000) >> 16;
ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
- FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Parity_Fail = (ret_value & 0xffff0000)>>16;
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = (ret_value & 0xffff0000) >> 16;
ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Crc8_fail = (ret_value & 0xffff0000)>>16;
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = (ret_value & 0xffff0000) >> 16;
ret_value = phy_query_bb_reg(adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value & 0xffff);
FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
ret_value = phy_query_bb_reg(adapter, ODM_REG_SC_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff);
- FalseAlmCnt->Cnt_BW_USC = (ret_value & 0xffff0000)>>16;
+ FalseAlmCnt->Cnt_BW_LSC = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_BW_USC = (ret_value & 0xffff0000) >> 16;
/* hold cck counter */
phy_set_bb_reg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
@@ -544,10 +544,10 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
ret_value = phy_query_bb_reg(adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
FalseAlmCnt->Cnt_Cck_fail = ret_value;
ret_value = phy_query_bb_reg(adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
- FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8;
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8;
ret_value = phy_query_bb_reg(adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
+ FalseAlmCnt->Cnt_CCK_CCA = ((ret_value & 0xFF) << 8) | ((ret_value & 0xFF00) >> 8);
FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
FalseAlmCnt->Cnt_SB_Search_fail +
@@ -583,14 +583,14 @@ void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
u8 CurCCK_CCAThres;
struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
- if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD|ODM_BB_FA_CNT)))
+ if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD | ODM_BB_FA_CNT)))
return;
if (pDM_Odm->ExtLNA)
return;
if (pDM_Odm->bLinked) {
if (pDM_Odm->RSSI_Min > 25) {
CurCCK_CCAThres = 0xcd;
- } else if ((pDM_Odm->RSSI_Min <= 25) && (pDM_Odm->RSSI_Min > 10)) {
+ } else if (pDM_Odm->RSSI_Min > 10) {
CurCCK_CCAThres = 0x83;
} else {
if (FalseAlmCnt->Cnt_Cck_fail > 1000)
@@ -630,10 +630,10 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
Rssi_Low_bound = 45;
}
if (pDM_PSTable->initialize == 0) {
- pDM_PSTable->Reg874 = (phy_query_bb_reg(adapter, 0x874, bMaskDWord)&0x1CC000)>>14;
- pDM_PSTable->RegC70 = (phy_query_bb_reg(adapter, 0xc70, bMaskDWord) & BIT(3))>>3;
- pDM_PSTable->Reg85C = (phy_query_bb_reg(adapter, 0x85c, bMaskDWord)&0xFF000000)>>24;
- pDM_PSTable->RegA74 = (phy_query_bb_reg(adapter, 0xa74, bMaskDWord)&0xF000)>>12;
+ pDM_PSTable->Reg874 = (phy_query_bb_reg(adapter, 0x874, bMaskDWord) & 0x1CC000) >> 14;
+ pDM_PSTable->RegC70 = (phy_query_bb_reg(adapter, 0xc70, bMaskDWord) & BIT(3)) >> 3;
+ pDM_PSTable->Reg85C = (phy_query_bb_reg(adapter, 0x85c, bMaskDWord) & 0xFF000000) >> 24;
+ pDM_PSTable->RegA74 = (phy_query_bb_reg(adapter, 0xa74, bMaskDWord) & 0xF000) >> 12;
pDM_PSTable->initialize = 1;
}
@@ -718,13 +718,13 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
else
rate_bitmap = 0x0000000f;
break;
- case (ODM_WM_A|ODM_WM_G):
+ case (ODM_WM_A | ODM_WM_G):
if (rssi_level == DM_RATR_STA_HIGH)
rate_bitmap = 0x00000f00;
else
rate_bitmap = 0x00000ff0;
break;
- case (ODM_WM_B|ODM_WM_G):
+ case (ODM_WM_B | ODM_WM_G):
if (rssi_level == DM_RATR_STA_HIGH)
rate_bitmap = 0x00000f00;
else if (rssi_level == DM_RATR_STA_MIDDLE)
@@ -732,8 +732,8 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
else
rate_bitmap = 0x00000ff5;
break;
- case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
- case (ODM_WM_A|ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
+ case (ODM_WM_A | ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
if (rssi_level == DM_RATR_STA_HIGH) {
rate_bitmap = 0x000f0000;
} else if (rssi_level == DM_RATR_STA_MIDDLE) {
@@ -911,7 +911,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
if (psta->rssi_stat.UndecoratedSmoothedPWDB > tmpEntryMaxPWDB)
tmpEntryMaxPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
if (psta->rssi_stat.UndecoratedSmoothedPWDB != (-1))
- PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB<<16));
+ PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB << 16));
}
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
index d5a9ac51e907..a6f2731b076d 100644
--- a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
@@ -103,33 +103,33 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
switch (LNA_idx) {
case 7:
if (VGA_idx <= 27)
- rx_pwr_all = -100 + 2 * (27-VGA_idx); /* VGA_idx = 27~2 */
+ rx_pwr_all = -100 + 2 * (27 - VGA_idx); /* VGA_idx = 27~2 */
else
rx_pwr_all = -100;
break;
case 6:
- rx_pwr_all = -48 + 2 * (2-VGA_idx); /* VGA_idx = 2~0 */
+ rx_pwr_all = -48 + 2 * (2 - VGA_idx); /* VGA_idx = 2~0 */
break;
case 5:
- rx_pwr_all = -42 + 2 * (7-VGA_idx); /* VGA_idx = 7~5 */
+ rx_pwr_all = -42 + 2 * (7 - VGA_idx); /* VGA_idx = 7~5 */
break;
case 4:
- rx_pwr_all = -36 + 2 * (7-VGA_idx); /* VGA_idx = 7~4 */
+ rx_pwr_all = -36 + 2 * (7 - VGA_idx); /* VGA_idx = 7~4 */
break;
case 3:
- rx_pwr_all = -24 + 2 * (7-VGA_idx); /* VGA_idx = 7~0 */
+ rx_pwr_all = -24 + 2 * (7 - VGA_idx); /* VGA_idx = 7~0 */
break;
case 2:
if (cck_highpwr)
- rx_pwr_all = -12 + 2 * (5-VGA_idx); /* VGA_idx = 5~0 */
+ rx_pwr_all = -12 + 2 * (5 - VGA_idx); /* VGA_idx = 5~0 */
else
- rx_pwr_all = -6 + 2 * (5-VGA_idx);
+ rx_pwr_all = -6 + 2 * (5 - VGA_idx);
break;
case 1:
- rx_pwr_all = 8-2 * VGA_idx;
+ rx_pwr_all = 8 - 2 * VGA_idx;
break;
case 0:
- rx_pwr_all = 14-2 * VGA_idx;
+ rx_pwr_all = 14 - 2 * VGA_idx;
break;
default:
break;
@@ -138,7 +138,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all);
if (!cck_highpwr) {
if (PWDB_ALL >= 80)
- PWDB_ALL = ((PWDB_ALL-80)<<1) + ((PWDB_ALL-80)>>1) + 80;
+ PWDB_ALL = ((PWDB_ALL - 80) << 1) + ((PWDB_ALL - 80) >> 1) + 80;
else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
PWDB_ALL += 3;
if (PWDB_ALL > 100)
@@ -162,7 +162,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
else if (SQ_rpt < 20)
SQ = 100;
else
- SQ = ((64-SQ_rpt) * 100) / 44;
+ SQ = ((64 - SQ_rpt) * 100) / 44;
}
pPhyInfo->SignalQuality = SQ;
pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = SQ;
@@ -200,8 +200,8 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI;
/* Get Rx snr value in DB */
- pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
- dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
+ pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
+ dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
}
/* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
@@ -280,8 +280,8 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
if (pPktinfo->bPacketToSelf) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
- (pDM_FatTable->antsel_rx_keep_1<<1) |
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
+ (pDM_FatTable->antsel_rx_keep_1 << 1) |
pDM_FatTable->antsel_rx_keep_0;
pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
@@ -289,8 +289,8 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
}
} else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
- (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0;
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
+ (pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0;
rtl88eu_dm_ant_sel_statistics(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
}
}
@@ -328,17 +328,17 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
} else {
if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) {
UndecoratedSmoothedOFDM =
- (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor-1)) +
+ (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor - 1)) +
(RSSI_Ave)) / (Rx_Smooth_Factor);
UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1;
} else {
UndecoratedSmoothedOFDM =
- (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor-1)) +
+ (((UndecoratedSmoothedOFDM) * (Rx_Smooth_Factor - 1)) +
(RSSI_Ave)) / (Rx_Smooth_Factor);
}
}
- pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap<<1) | BIT(0);
+ pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap << 1) | BIT(0);
} else {
RSSI_Ave = pPhyInfo->RxPWDBAll;
@@ -349,16 +349,16 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
} else {
if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) {
UndecoratedSmoothedCCK =
- ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) +
+ ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor - 1)) +
pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1;
} else {
UndecoratedSmoothedCCK =
- ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) +
+ ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor - 1)) +
pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
}
}
- pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap<<1;
+ pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap << 1;
}
/* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
if (pEntry->rssi_stat.ValidBit >= 64)
@@ -367,16 +367,16 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
pEntry->rssi_stat.ValidBit++;
for (i = 0; i < pEntry->rssi_stat.ValidBit; i++)
- OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap>>i) & BIT(0);
+ OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap >> i) & BIT(0);
if (pEntry->rssi_stat.ValidBit == 64) {
Weighting = min_t(u32, OFDM_pkt << 4, 64);
- UndecoratedSmoothedPWDB = (Weighting * UndecoratedSmoothedOFDM + (64-Weighting) * UndecoratedSmoothedCCK)>>6;
+ UndecoratedSmoothedPWDB = (Weighting * UndecoratedSmoothedOFDM + (64 - Weighting) * UndecoratedSmoothedCCK) >> 6;
} else {
if (pEntry->rssi_stat.ValidBit != 0)
UndecoratedSmoothedPWDB = (OFDM_pkt * UndecoratedSmoothedOFDM +
- (pEntry->rssi_stat.ValidBit-OFDM_pkt) *
- UndecoratedSmoothedCCK)/pEntry->rssi_stat.ValidBit;
+ (pEntry->rssi_stat.ValidBit - OFDM_pkt) *
+ UndecoratedSmoothedCCK) / pEntry->rssi_stat.ValidBit;
else
UndecoratedSmoothedPWDB = 0;
}
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index afaf9e55195a..b9025815b682 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -69,10 +69,10 @@ static u32 rf_serial_read(struct adapter *adapt,
bMaskDWord);
tmplong2 = (tmplong2 & (~bLSSIReadAddress)) |
- (offset<<23) | bLSSIReadEdge;
+ (offset << 23) | bLSSIReadEdge;
phy_set_bb_reg(adapt, rFPGA0_XA_HSSIParameter2, bMaskDWord,
- tmplong&(~bLSSIReadEdge));
+ tmplong & (~bLSSIReadEdge));
udelay(10);
phy_set_bb_reg(adapt, phyreg->rfHSSIPara2, bMaskDWord, tmplong2);
@@ -102,7 +102,7 @@ static void rf_serial_write(struct adapter *adapt,
struct bb_reg_def *phyreg = &adapt->HalData->PHYRegDef[rfpath];
offset &= 0xff;
- data_and_addr = ((offset<<20) | (data&0x000fffff)) & 0x0fffffff;
+ data_and_addr = ((offset << 20) | (data & 0x000fffff)) & 0x0fffffff;
phy_set_bb_reg(adapt, phyreg->rf3wireOffset, bMaskDWord, data_and_addr);
}
@@ -143,20 +143,20 @@ static void get_tx_power_index(struct adapter *adapt, u8 channel, u8 *cck_pwr,
for (TxCount = 0; TxCount < path_nums; TxCount++) {
if (TxCount == RF_PATH_A) {
cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index];
- ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
+ ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] +
hal_data->OFDM_24G_Diff[TxCount][RF_PATH_A];
- bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
+ bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] +
hal_data->BW20_24G_Diff[TxCount][RF_PATH_A];
bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
} else if (TxCount == RF_PATH_B) {
cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index];
- ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_A][index]+
+ ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] +
+ hal_data->BW20_24G_Diff[RF_PATH_A][index] +
hal_data->BW20_24G_Diff[TxCount][index];
- bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[TxCount][RF_PATH_A]+
+ bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index] +
+ hal_data->BW20_24G_Diff[TxCount][RF_PATH_A] +
hal_data->BW20_24G_Diff[TxCount][index];
bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
}
@@ -205,7 +205,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
/* Set MAC register */
reg_bw_opmode = usb_read8(adapt, REG_BWOPMODE);
- reg_prsr_rsc = usb_read8(adapt, REG_RRSR+2);
+ reg_prsr_rsc = usb_read8(adapt, REG_RRSR + 2);
switch (hal_data->CurrentChannelBW) {
case HT_CHANNEL_WIDTH_20:
@@ -215,9 +215,9 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
case HT_CHANNEL_WIDTH_40:
reg_bw_opmode &= ~BW_OPMODE_20MHZ;
usb_write8(adapt, REG_BWOPMODE, reg_bw_opmode);
- reg_prsr_rsc = (reg_prsr_rsc&0x90) |
- (hal_data->nCur40MhzPrimeSC<<5);
- usb_write8(adapt, REG_RRSR+2, reg_prsr_rsc);
+ reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
+ (hal_data->nCur40MhzPrimeSC << 5);
+ usb_write8(adapt, REG_RRSR + 2, reg_prsr_rsc);
break;
default:
break;
@@ -236,7 +236,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
* These settings are required only for 40MHz
*/
phy_set_bb_reg(adapt, rCCK0_System, bCCKSideBand,
- (hal_data->nCur40MhzPrimeSC>>1));
+ (hal_data->nCur40MhzPrimeSC >> 1));
phy_set_bb_reg(adapt, rOFDM1_LSTF, 0xC00,
hal_data->nCur40MhzPrimeSC);
phy_set_bb_reg(adapt, 0x818, (BIT(26) | BIT(27)),
@@ -337,8 +337,8 @@ void rtl88eu_dm_txpower_track_adjust(struct odm_dm_struct *dm_odm, u8 type,
if (pwr_value >= ODM_TXPWRTRACK_MAX_IDX_88E && *direction == 1)
pwr_value = ODM_TXPWRTRACK_MAX_IDX_88E;
- *out_write_val = pwr_value | (pwr_value<<8) | (pwr_value<<16) |
- (pwr_value<<24);
+ *out_write_val = pwr_value | (pwr_value << 8) | (pwr_value << 16) |
+ (pwr_value << 24);
}
static void dm_txpwr_track_setpwr(struct odm_dm_struct *dm_odm)
@@ -389,9 +389,9 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
if (thermal_val) {
/* Query OFDM path A default setting */
- ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord) & bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (OFDMSwingTable[i]&bMaskOFDM_D)) {
+ if (ele_d == (OFDMSwingTable[i] & bMaskOFDM_D)) {
ofdm_index_old[0] = (u8)i;
dm_odm->BbSwingIdxOfdmBase = (u8)i;
break;
@@ -472,18 +472,18 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
}
}
if (offset >= index_mapping_NUM_88E)
- offset = index_mapping_NUM_88E-1;
+ offset = index_mapping_NUM_88E - 1;
/* Updating ofdm_index values with new OFDM / CCK offset */
ofdm_index[0] = dm_odm->RFCalibrateInfo.OFDM_index[0] + ofdm_index_mapping[j][offset];
- if (ofdm_index[0] > OFDM_TABLE_SIZE_92D-1)
- ofdm_index[0] = OFDM_TABLE_SIZE_92D-1;
+ if (ofdm_index[0] > OFDM_TABLE_SIZE_92D - 1)
+ ofdm_index[0] = OFDM_TABLE_SIZE_92D - 1;
else if (ofdm_index[0] < ofdm_min_index)
ofdm_index[0] = ofdm_min_index;
cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset];
- if (cck_index > CCK_TABLE_SIZE-1)
- cck_index = CCK_TABLE_SIZE-1;
+ if (cck_index > CCK_TABLE_SIZE - 1)
+ cck_index = CCK_TABLE_SIZE - 1;
else if (cck_index < 0)
cck_index = 0;
@@ -548,8 +548,8 @@ static u8 phy_path_a_iqk(struct adapter *adapt, bool config_pathb)
reg_e9c = phy_query_bb_reg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
if (!(reg_eac & BIT(28)) &&
- (((reg_e94 & 0x03FF0000)>>16) != 0x142) &&
- (((reg_e9c & 0x03FF0000)>>16) != 0x42))
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
result |= 0x01;
return result;
}
@@ -600,13 +600,13 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB)
reg_e9c = phy_query_bb_reg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
if (!(reg_eac & BIT(28)) &&
- (((reg_e94 & 0x03FF0000)>>16) != 0x142) &&
- (((reg_e9c & 0x03FF0000)>>16) != 0x42))
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
result |= 0x01;
else /* if Tx not OK, ignore Rx */
return result;
- u4tmp = 0x80007C00 | (reg_e94&0x3FF0000) | ((reg_e9c&0x3FF0000) >> 16);
+ u4tmp = 0x80007C00 | (reg_e94 & 0x3FF0000) | ((reg_e9c & 0x3FF0000) >> 16);
phy_set_bb_reg(adapt, rTx_IQK, bMaskDWord, u4tmp);
/* 1 RX IQK */
@@ -648,8 +648,8 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB)
phy_set_rf_reg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
if (!(reg_eac & BIT(27)) && /* if Tx is OK, check whether Rx is OK */
- (((reg_ea4 & 0x03FF0000)>>16) != 0x132) &&
- (((reg_eac & 0x03FF0000)>>16) != 0x36))
+ (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_eac & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
else
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -677,15 +677,15 @@ static u8 phy_path_b_iqk(struct adapter *adapt)
regecc = phy_query_bb_reg(adapt, rRx_Power_After_IQK_B_2, bMaskDWord);
if (!(regeac & BIT(31)) &&
- (((regeb4 & 0x03FF0000)>>16) != 0x142) &&
- (((regebc & 0x03FF0000)>>16) != 0x42))
+ (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((regebc & 0x03FF0000) >> 16) != 0x42))
result |= 0x01;
else
return result;
if (!(regeac & BIT(30)) &&
- (((regec4 & 0x03FF0000)>>16) != 0x132) &&
- (((regecc & 0x03FF0000)>>16) != 0x36))
+ (((regec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((regecc & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
else
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION,
@@ -711,7 +711,7 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8],
tx0_a = (x * oldval_0) >> 8;
phy_set_bb_reg(adapt, rOFDM0_XATxIQImbalance, 0x3FF, tx0_a);
phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(31),
- ((x * oldval_0>>7) & 0x1));
+ ((x * oldval_0 >> 7) & 0x1));
y = result[final_candidate][1];
if ((y & 0x00000200) != 0)
@@ -719,11 +719,11 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8],
tx0_c = (y * oldval_0) >> 8;
phy_set_bb_reg(adapt, rOFDM0_XCTxAFE, 0xF0000000,
- ((tx0_c&0x3C0)>>6));
+ ((tx0_c & 0x3C0) >> 6));
phy_set_bb_reg(adapt, rOFDM0_XATxIQImbalance, 0x003F0000,
- (tx0_c&0x3F));
+ (tx0_c & 0x3F));
phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(29),
- ((y * oldval_0>>7) & 0x1));
+ ((y * oldval_0 >> 7) & 0x1));
if (txonly)
return;
@@ -757,7 +757,7 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8],
phy_set_bb_reg(adapt, rOFDM0_XBTxIQImbalance, 0x3FF, tx1_a);
phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(27),
- ((x * oldval_1>>7) & 0x1));
+ ((x * oldval_1 >> 7) & 0x1));
y = result[final_candidate][5];
if ((y & 0x00000200) != 0)
@@ -766,11 +766,11 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8],
tx1_c = (y * oldval_1) >> 8;
phy_set_bb_reg(adapt, rOFDM0_XDTxAFE, 0xF0000000,
- ((tx1_c&0x3C0)>>6));
+ ((tx1_c & 0x3C0) >> 6));
phy_set_bb_reg(adapt, rOFDM0_XBTxIQImbalance, 0x003F0000,
- (tx1_c&0x3F));
+ (tx1_c & 0x3F));
phy_set_bb_reg(adapt, rOFDM0_ECCAThreshold, BIT(25),
- ((y * oldval_1>>7) & 0x1));
+ ((y * oldval_1 >> 7) & 0x1));
if (txonly)
return;
@@ -851,9 +851,9 @@ static void mac_setting_calibration(struct adapter *adapt, u32 *mac_reg, u32 *ba
usb_write8(adapt, mac_reg[i], 0x3F);
for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
- usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(3))));
+ usb_write8(adapt, mac_reg[i], (u8)(backup[i] & (~BIT(3))));
- usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(5))));
+ usb_write8(adapt, mac_reg[i], (u8)(backup[i] & (~BIT(5))));
}
static void path_a_standby(struct adapter *adapt)
@@ -902,22 +902,22 @@ static bool simularity_compare(struct adapter *adapt, s32 resulta[][8],
if (diff > MAX_TOLERANCE) {
if ((i == 2 || i == 6) && !sim_bitmap) {
- if (resulta[c1][i] + resulta[c1][i+1] == 0)
- final_candidate[(i/4)] = c2;
- else if (resulta[c2][i] + resulta[c2][i+1] == 0)
- final_candidate[(i/4)] = c1;
+ if (resulta[c1][i] + resulta[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (resulta[c2][i] + resulta[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
else
- sim_bitmap = sim_bitmap | (1<<i);
+ sim_bitmap = sim_bitmap | (1 << i);
} else {
- sim_bitmap = sim_bitmap | (1<<i);
+ sim_bitmap = sim_bitmap | (1 << i);
}
}
}
if (sim_bitmap == 0) {
- for (i = 0; i < (bound/4); i++) {
+ for (i = 0; i < (bound / 4); i++) {
if (final_candidate[i] != 0xFF) {
- for (j = i*4; j < (i+1)*4-2; j++)
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
resulta[3][j] = resulta[final_candidate[i]][j];
result = false;
}
@@ -1038,9 +1038,9 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
path_a_ok = phy_path_a_iqk(adapt, is2t);
if (path_a_ok == 0x01) {
result[t][0] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_A,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
result[t][1] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_A,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
break;
}
}
@@ -1049,9 +1049,9 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
path_a_ok = phy_path_a_rx_iqk(adapt, is2t);
if (path_a_ok == 0x03) {
result[t][2] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_A_2,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
break;
}
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -1073,19 +1073,19 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
path_b_ok = phy_path_b_iqk(adapt);
if (path_b_ok == 0x03) {
result[t][4] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_B,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
result[t][5] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_B,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
result[t][6] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_B_2,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
result[t][7] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_B_2,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
break;
} else if (i == (retry_count - 1) && path_b_ok == 0x01) { /* Tx IQK OK */
result[t][4] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_B,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
result[t][5] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_B,
- bMaskDWord)&0x3FF0000)>>16;
+ bMaskDWord) & 0x3FF0000) >> 16;
}
}
@@ -1138,12 +1138,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t)
/* Check continuous TX and Packet TX */
tmpreg = usb_read8(adapt, 0xd03);
- if ((tmpreg&0x70) != 0)
- usb_write8(adapt, 0xd03, tmpreg&0x8F);
+ if ((tmpreg & 0x70) != 0)
+ usb_write8(adapt, 0xd03, tmpreg & 0x8F);
else
usb_write8(adapt, REG_TXPAUSE, 0xFF);
- if ((tmpreg&0x70) != 0) {
+ if ((tmpreg & 0x70) != 0) {
/* 1. Read original RF mode */
/* Path-A */
rf_a_mode = rtw_hal_read_rfreg(adapt, RF_PATH_A, RF_AC,
@@ -1157,12 +1157,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t)
/* 2. Set RF mode = standby mode */
/* Path-A */
phy_set_rf_reg(adapt, RF_PATH_A, RF_AC, bMask12Bits,
- (rf_a_mode&0x8FFFF)|0x10000);
+ (rf_a_mode & 0x8FFFF) | 0x10000);
/* Path-B */
if (is2t)
phy_set_rf_reg(adapt, RF_PATH_B, RF_AC, bMask12Bits,
- (rf_b_mode&0x8FFFF)|0x10000);
+ (rf_b_mode & 0x8FFFF) | 0x10000);
}
/* 3. Read RF reg18 */
@@ -1170,12 +1170,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t)
/* 4. Set LC calibration begin bit15 */
phy_set_rf_reg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits,
- lc_cal|0x08000);
+ lc_cal | 0x08000);
msleep(100);
/* Restore original situation */
- if ((tmpreg&0x70) != 0) {
+ if ((tmpreg & 0x70) != 0) {
/* Deal with continuous TX case */
/* Path-A */
usb_write8(adapt, 0xd03, tmpreg);
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index 249cbc375074..77edd7ad19a1 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -85,7 +85,7 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
else
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000);
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000);
break;
case PWR_CMD_END:
/* When this command is parsed, end the process */
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 6fe4daea8fd5..00a9f692bb06 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -49,9 +49,9 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
tx_agc[RF_PATH_B] = 0x3f3f3f3f;
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
tx_agc[idx1] = powerlevel[idx1] |
- (powerlevel[idx1]<<8) |
- (powerlevel[idx1]<<16) |
- (powerlevel[idx1]<<24);
+ (powerlevel[idx1] << 8) |
+ (powerlevel[idx1] << 16) |
+ (powerlevel[idx1] << 24);
}
} else {
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
@@ -63,17 +63,17 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
} else {
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
tx_agc[idx1] = powerlevel[idx1] |
- (powerlevel[idx1]<<8) |
- (powerlevel[idx1]<<16) |
- (powerlevel[idx1]<<24);
+ (powerlevel[idx1] << 8) |
+ (powerlevel[idx1] << 16) |
+ (powerlevel[idx1] << 24);
}
if (hal_data->EEPROMRegulatory == 0) {
tmpval = hal_data->MCSTxPowerLevelOriginalOffset[0][6] +
- (hal_data->MCSTxPowerLevelOriginalOffset[0][7]<<8);
+ (hal_data->MCSTxPowerLevelOriginalOffset[0][7] << 8);
tx_agc[RF_PATH_A] += tmpval;
tmpval = hal_data->MCSTxPowerLevelOriginalOffset[0][14] +
- (hal_data->MCSTxPowerLevelOriginalOffset[0][15]<<24);
+ (hal_data->MCSTxPowerLevelOriginalOffset[0][15] << 24);
tx_agc[RF_PATH_B] += tmpval;
}
}
@@ -100,15 +100,15 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
}
/* rf-A cck tx power */
- tmpval = tx_agc[RF_PATH_A]&0xff;
+ tmpval = tx_agc[RF_PATH_A] & 0xff;
phy_set_bb_reg(adapt, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
- tmpval = tx_agc[RF_PATH_A]>>8;
+ tmpval = tx_agc[RF_PATH_A] >> 8;
phy_set_bb_reg(adapt, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
/* rf-B cck tx power */
- tmpval = tx_agc[RF_PATH_B]>>24;
+ tmpval = tx_agc[RF_PATH_B] >> 24;
phy_set_bb_reg(adapt, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
- tmpval = tx_agc[RF_PATH_B]&0x00ffffff;
+ tmpval = tx_agc[RF_PATH_B] & 0x00ffffff;
phy_set_bb_reg(adapt, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
}
@@ -124,9 +124,9 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
for (i = 0; i < 2; i++) {
powerbase0 = pwr_level_ofdm[i];
- powerbase0 = (powerbase0<<24) | (powerbase0<<16) |
- (powerbase0<<8) | powerbase0;
- *(ofdmbase+i) = powerbase0;
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
}
/* Check HT20 to HT40 diff */
if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
@@ -134,8 +134,8 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
else
powerlevel[0] = pwr_level_bw40[0];
powerbase1 = powerlevel[0];
- powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
- (powerbase1<<8) | powerbase1;
+ powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
+ (powerbase1 << 8) | powerbase1;
*mcs_base = powerbase1;
}
static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
@@ -157,7 +157,7 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
switch (regulatory) {
case 0:
chnlGroup = 0;
- write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] +
((index < 2) ? powerbase0[rf] : powerbase1[rf]);
break;
case 1: /* Realtek regulatory */
@@ -167,7 +167,7 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
if (hal_data->pwrGroupCnt >= hal_data->PGMaxGroup)
Hal_GetChnlGroup88E(channel, &chnlGroup);
- write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ write_val = hal_data->MCSTxPowerLevelOriginalOffset[chnlGroup][index + (rf ? 8 : 0)] +
((index < 2) ? powerbase0[rf] : powerbase1[rf]);
break;
case 2: /* Better regulatory */
@@ -179,14 +179,14 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
chnlGroup = 0;
if (index < 2)
- pwr_diff = hal_data->TxPwrLegacyHtDiff[rf][channel-1];
+ pwr_diff = hal_data->TxPwrLegacyHtDiff[rf][channel - 1];
else if (hal_data->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- pwr_diff = hal_data->TxPwrHt20Diff[rf][channel-1];
+ pwr_diff = hal_data->TxPwrHt20Diff[rf][channel - 1];
if (hal_data->CurrentChannelBW == HT_CHANNEL_WIDTH_40)
- customer_pwr_limit = hal_data->PwrGroupHT40[rf][channel-1];
+ customer_pwr_limit = hal_data->PwrGroupHT40[rf][channel - 1];
else
- customer_pwr_limit = hal_data->PwrGroupHT20[rf][channel-1];
+ customer_pwr_limit = hal_data->PwrGroupHT20[rf][channel - 1];
if (pwr_diff >= customer_pwr_limit)
pwr_diff = 0;
@@ -200,9 +200,9 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
if (pwr_diff_limit[i] > pwr_diff)
pwr_diff_limit[i] = pwr_diff;
}
- customer_limit = (pwr_diff_limit[3]<<24) |
- (pwr_diff_limit[2]<<16) |
- (pwr_diff_limit[1]<<8) |
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) |
(pwr_diff_limit[0]);
write_val = customer_limit + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
break;
@@ -221,7 +221,7 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2)
write_val = 0x00000000;
- *(out_val+rf) = write_val;
+ *(out_val + rf) = write_val;
}
}
@@ -240,12 +240,12 @@ static void write_ofdm_pwr_reg(struct adapter *adapt, u8 index, u32 *pvalue)
for (rf = 0; rf < 2; rf++) {
write_val = pvalue[rf];
for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8)((write_val & (0x7f<<(i*8)))>>(i*8));
+ pwr_val[i] = (u8)((write_val & (0x7f << (i * 8))) >> (i * 8));
if (pwr_val[i] > RF6052_MAX_TX_PWR)
pwr_val[i] = RF6052_MAX_TX_PWR;
}
- write_val = (pwr_val[3]<<24) | (pwr_val[2]<<16) |
- (pwr_val[1]<<8) | pwr_val[0];
+ write_val = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
if (rf == 0)
regoffset = regoffset_a[index];
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 47b1bf5a6143..0b20e62f9a68 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -15,7 +15,7 @@ static bool check_condition(struct adapter *adapt, const u32 condition)
u32 _board = odm->BoardType;
u32 _platform = odm->SupportPlatform;
u32 _interface = odm->SupportInterface;
- u32 cond = condition;
+ u32 cond;
if (condition == 0xCDCDCDCD)
return true;
@@ -143,7 +143,7 @@ static u32 Array_RadioA_1T_8188E[] = {
#define READ_NEXT_PAIR(v1, v2, i) \
do { \
i += 2; v1 = array[i]; \
- v2 = array[i+1]; \
+ v2 = array[i + 1]; \
} while (0)
#define RFREG_OFFSET_MASK 0xfffff
@@ -190,7 +190,7 @@ static bool rtl88e_phy_config_rf_with_headerfile(struct adapter *adapt)
for (i = 0; i < array_len; i += 2) {
u32 v1 = array[i];
- u32 v2 = array[i+1];
+ u32 v2 = array[i + 1];
if (v1 < 0xCDCDCDCD) {
rtl8188e_config_rf_reg(adapt, v1, v2);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 7646167a0b36..371e746915dd 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -113,24 +113,24 @@ void rtw_hal_add_ra_tid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_le
struct odm_dm_struct *odmpriv = &pAdapter->HalData->odmpriv;
u8 macid, init_rate, raid, shortGIrate = false;
- macid = arg&0x1f;
+ macid = arg & 0x1f;
- raid = (bitmap>>28) & 0x0f;
+ raid = (bitmap >> 28) & 0x0f;
bitmap &= 0x0fffffff;
if (rssi_level != DM_RATR_STA_INIT)
bitmap = ODM_Get_Rate_Bitmap(odmpriv, macid, bitmap, rssi_level);
- bitmap |= ((raid<<28)&0xf0000000);
+ bitmap |= ((raid << 28) & 0xf0000000);
- init_rate = get_highest_rate_idx(bitmap&0x0fffffff)&0x3f;
+ init_rate = get_highest_rate_idx(bitmap & 0x0fffffff) & 0x3f;
shortGIrate = (arg & BIT(5)) ? true : false;
if (shortGIrate)
init_rate |= BIT(6);
- raid = (bitmap>>28) & 0x0f;
+ raid = (bitmap >> 28) & 0x0f;
bitmap &= 0x0fffffff;
@@ -172,7 +172,7 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
break;
}
- H2CSetPwrMode.SmartPS_RLBM = (((pwrpriv->smart_ps<<4)&0xf0) | (RLBM & 0x0f));
+ H2CSetPwrMode.SmartPS_RLBM = (((pwrpriv->smart_ps << 4) & 0xf0) | (RLBM & 0x0f));
H2CSetPwrMode.AwakeInterval = 1;
@@ -239,9 +239,9 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
pframe += 2;
pktlen += 2;
- if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
pktlen += cur_network->ie_length - sizeof(struct ndis_802_11_fixed_ie);
- memcpy(pframe, cur_network->ies+sizeof(struct ndis_802_11_fixed_ie), pktlen);
+ memcpy(pframe, cur_network->ies + sizeof(struct ndis_802_11_fixed_ie), pktlen);
goto _ConstructBeacon;
}
@@ -258,7 +258,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
/* DS parameter set */
pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pktlen);
- if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
u32 ATIMWindow;
/* IBSS Parameter Set... */
ATIMWindow = 0;
@@ -473,7 +473,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
/* 3 (2) ps-poll *1 page */
RsvdPageLoc.LocPsPoll = PageNum;
ConstructPSPoll(adapt, &ReservedPagePacket[BufIndex], &PSPollLength);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], PSPollLength, true, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], PSPollLength, true, false);
PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength);
PageNum += PageNeed;
@@ -483,7 +483,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
/* 3 (3) null data * 1 page */
RsvdPageLoc.LocNullData = PageNum;
ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &NullDataLength, pnetwork->MacAddress, false, 0, 0, false);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], NullDataLength, false, false);
PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
PageNum += PageNeed;
@@ -493,7 +493,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
/* 3 (4) probe response * 1page */
RsvdPageLoc.LocProbeRsp = PageNum;
ConstructProbeRsp(adapt, &ReservedPagePacket[BufIndex], &ProbeRspLength, pnetwork->MacAddress, false);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], ProbeRspLength, false, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], ProbeRspLength, false, false);
PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
PageNum += PageNeed;
@@ -504,7 +504,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
RsvdPageLoc.LocQosNull = PageNum;
ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex],
&QosNullLength, pnetwork->MacAddress, true, 0, 0, false);
- rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex - TxDescLen], QosNullLength, false, false);
PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength);
PageNum += PageNeed;
@@ -546,17 +546,17 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
if (mstatus == 1) {
/* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
/* Suggested by filen. Added by tynli. */
- usb_write16(adapt, REG_BCN_PSR_RPT, (0xC000|pmlmeinfo->aid));
+ usb_write16(adapt, REG_BCN_PSR_RPT, (0xC000 | pmlmeinfo->aid));
/* Do not set TSF again here or vWiFi beacon DMA INT will not work. */
/* Set REG_CR bit 8. DMA beacon by SW. */
haldata->RegCR_1 |= BIT(0);
- usb_write8(adapt, REG_CR+1, haldata->RegCR_1);
+ usb_write8(adapt, REG_CR + 1, haldata->RegCR_1);
/* Disable Hw protection for a time which revserd for Hw sending beacon. */
/* Fix download reserved page packet fail that access collision with the protection time. */
/* 2010.05.11. Added by tynli. */
- usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL)&(~BIT(3)));
+ usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) & (~BIT(3)));
usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) | BIT(4));
if (haldata->RegFwHwTxQCtrl & BIT(6)) {
@@ -565,7 +565,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
}
/* Set FWHW_TXQ_CTRL 0x422[6]=0 to tell Hw the packet is not a real beacon frame. */
- usb_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl&(~BIT(6))));
+ usb_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl & (~BIT(6))));
haldata->RegFwHwTxQCtrl &= (~BIT(6));
/* Clear beacon valid check bit. */
@@ -582,7 +582,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* check rsvd page download OK. */
rtw_hal_get_hwreg(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
poll++;
- } while (!bcn_valid && (poll%10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
+ } while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
} while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
if (adapt->bSurpriseRemoved || adapt->bDriverStopped)
@@ -600,7 +600,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* Enable Bcn */
usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) | BIT(3));
- usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL)&(~BIT(4)));
+ usb_write8(adapt, REG_BCN_CTRL, usb_read8(adapt, REG_BCN_CTRL) & (~BIT(4)));
/* To make sure that if there exists an adapter which would like to send beacon. */
/* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
@@ -608,7 +608,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* the beacon cannot be sent by HW. */
/* 2010.06.23. Added by tynli. */
if (bSendBeacon) {
- usb_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl | BIT(6)));
+ usb_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl | BIT(6)));
haldata->RegFwHwTxQCtrl |= BIT(6);
}
@@ -621,6 +621,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
/* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
haldata->RegCR_1 &= (~BIT(0));
- usb_write8(adapt, REG_CR+1, haldata->RegCR_1);
+ usb_write8(adapt, REG_CR + 1, haldata->RegCR_1);
}
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 57ae0e83dd3e..740004d71a15 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -22,7 +22,7 @@ void iol_mode_enable(struct adapter *padapter, u8 enable)
if (enable) {
/* Enable initial offload */
reg_0xf0 = usb_read8(padapter, REG_SYS_CFG);
- usb_write8(padapter, REG_SYS_CFG, reg_0xf0|SW_OFFLOAD_EN);
+ usb_write8(padapter, REG_SYS_CFG, reg_0xf0 | SW_OFFLOAD_EN);
if (!padapter->bFWReady) {
DBG_88E("bFWReady == false call reset 8051...\n");
@@ -42,9 +42,9 @@ s32 iol_execute(struct adapter *padapter, u8 control)
u8 reg_0x88 = 0;
unsigned long start = 0;
- control = control&0x0f;
+ control = control & 0x0f;
reg_0x88 = usb_read8(padapter, REG_HMEBOX_E0);
- usb_write8(padapter, REG_HMEBOX_E0, reg_0x88|control);
+ usb_write8(padapter, REG_HMEBOX_E0, reg_0x88 | control);
start = jiffies;
while ((reg_0x88 = usb_read8(padapter, REG_HMEBOX_E0)) & control &&
@@ -54,7 +54,7 @@ s32 iol_execute(struct adapter *padapter, u8 control)
reg_0x88 = usb_read8(padapter, REG_HMEBOX_E0);
status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
- if (reg_0x88 & control<<4)
+ if (reg_0x88 & control << 4)
status = _FAIL;
return status;
}
@@ -64,7 +64,7 @@ static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
s32 rst = _SUCCESS;
iol_mode_enable(padapter, 1);
- usb_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
+ usb_write8(padapter, REG_TDECTRL + 1, txpktbuf_bndy);
rst = iol_execute(padapter, CMD_INIT_LLT);
iol_mode_enable(padapter, 0);
return rst;
@@ -92,9 +92,9 @@ void _8051Reset88E(struct adapter *padapter)
{
u8 u1bTmp;
- u1bTmp = usb_read8(padapter, REG_SYS_FUNC_EN+1);
- usb_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp&(~BIT(2)));
- usb_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp|(BIT(2)));
+ u1bTmp = usb_read8(padapter, REG_SYS_FUNC_EN + 1);
+ usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp & (~BIT(2)));
+ usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp | (BIT(2)));
DBG_88E("=====> _8051Reset88E(): 8051 reset success .\n");
}
@@ -122,7 +122,7 @@ void rtw_hal_read_chip_version(struct adapter *padapter)
value32 = usb_read32(padapter, REG_SYS_CFG);
ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
- ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK)>>CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
+ ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
dump_chip_info(ChipVersion);
@@ -163,10 +163,10 @@ void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
{
if (enable) {
DBG_88E("Enable notch filter\n");
- usb_write8(adapter, rOFDM0_RxDSP+1, usb_read8(adapter, rOFDM0_RxDSP+1) | BIT(1));
+ usb_write8(adapter, rOFDM0_RxDSP + 1, usb_read8(adapter, rOFDM0_RxDSP + 1) | BIT(1));
} else {
DBG_88E("Disable notch filter\n");
- usb_write8(adapter, rOFDM0_RxDSP+1, usb_read8(adapter, rOFDM0_RxDSP+1) & ~BIT(1));
+ usb_write8(adapter, rOFDM0_RxDSP + 1, usb_read8(adapter, rOFDM0_RxDSP + 1) & ~BIT(1));
}
}
@@ -308,7 +308,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G,
if (pwrInfo24G->IndexCCK_Base[rfPath][group] == 0xFF)
pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
}
- for (group = 0; group < MAX_CHNL_GROUP_24G-1; group++) {
+ for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) {
pwrInfo24G->IndexBW40_Base[rfPath][group] = PROMContent[eeAddr++];
if (pwrInfo24G->IndexBW40_Base[rfPath][group] == 0xFF)
pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
@@ -319,7 +319,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G,
if (PROMContent[eeAddr] == 0xFF) {
pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_HT20_DIFF;
} else {
- pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4;
if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
}
@@ -327,7 +327,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G,
if (PROMContent[eeAddr] == 0xFF) {
pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_OFDM_DIFF;
} else {
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f);
if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
}
@@ -337,7 +337,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G,
if (PROMContent[eeAddr] == 0xFF) {
pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
} else {
- pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4;
if (pwrInfo24G->BW40_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
pwrInfo24G->BW40_Diff[rfPath][TxCount] |= 0xF0;
}
@@ -345,7 +345,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G,
if (PROMContent[eeAddr] == 0xFF) {
pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
} else {
- pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f);
if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
}
@@ -354,7 +354,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G,
if (PROMContent[eeAddr] == 0xFF) {
pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
} else {
- pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0xf0) >> 4;
if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
}
@@ -362,7 +362,7 @@ static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G,
if (PROMContent[eeAddr] == 0xFF) {
pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
} else {
- pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr] & 0x0f);
if (pwrInfo24G->CCK_Diff[rfPath][TxCount] & BIT(3)) /* 4bit sign number to 8 bit sign number */
pwrInfo24G->CCK_Diff[rfPath][TxCount] |= 0xF0;
}
@@ -450,9 +450,9 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
/* 2010/10/19 MH Add Regulator recognize for CU. */
if (!AutoLoadFail) {
- pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x7); /* bit0~2 */
+ pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_88E] & 0x7); /* bit0~2 */
if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
- pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION&0x7); /* bit0~2 */
+ pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION & 0x7); /* bit0~2 */
} else {
pHalData->EEPROMRegulatory = 0;
}
@@ -532,9 +532,9 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
if (!AutoLoadFail) {
/* Antenna Diversity setting. */
if (registry_par->antdiv_cfg == 2) { /* 2:By EFUSE */
- pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x18)>>3;
+ pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E] & 0x18) >> 3;
if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
- pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3;
+ pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION & 0x18) >> 3;
} else {
pHalData->AntDivCfg = registry_par->antdiv_cfg; /* 0:OFF , 1:ON, 2:By EFUSE */
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 0a900827c4fc..7d0135fde795 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -182,7 +182,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe,
rtl8188e_process_phy_info(padapter, precvframe);
}
} else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
- if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
if (psta)
precvframe->psta = psta;
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 2808f2b119bf..7d315bd438d4 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -58,12 +58,12 @@ void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u
/* offset 0 */
ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
- ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
+ ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00ff0000); /* 32 bytes for TX Desc */
- ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /* Buffer size + command header */
+ ptxdesc->txdw0 |= cpu_to_le32(BufferLen & 0x0000ffff); /* Buffer size + command header */
/* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /* Fixed queue of Mgnt queue */
+ ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT << QSEL_SHT) & 0x00001f00); /* Fixed queue of Mgnt queue */
/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
if (ispspoll) {
@@ -91,16 +91,16 @@ static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxd
/* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
case _WEP40_:
case _WEP104_:
- ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000);
ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
break;
case _TKIP_:
case _TKIP_WTMIC_:
- ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000);
ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
break;
case _AES_:
- ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw1 |= cpu_to_le32((0x03 << SEC_TYPE_SHT) & 0x00c00000);
ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
break;
case _NO_PRIVACY_:
@@ -127,7 +127,7 @@ static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
*pdw |= cpu_to_le32(HW_RTS_EN);
/* Set RTS BW */
if (pattrib->ht_en) {
- *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
+ *pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
*pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
@@ -144,7 +144,7 @@ static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
{
if (pattrib->ht_en) {
- *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
+ *pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
*pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
@@ -171,7 +171,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
if (adapt->registrypriv.mp_mode == 0) {
if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
- ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
+ ptxdesc = (struct tx_desc *)(pmem + PACKET_OFFSET_SZ);
pull = 1;
}
}
@@ -263,11 +263,11 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
}
- } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
+ } else if ((pxmitframe->frame_tag & 0x0f) == MGNT_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
- qsel = (uint)(pattrib->qsel&0x0000001f);
+ qsel = (uint)(pattrib->qsel & 0x0000001f);
ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
@@ -278,7 +278,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
/* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
/* offset 20 */
ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
@@ -288,7 +288,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
- } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
+ } else if ((pxmitframe->frame_tag & 0x0f) == TXAGG_FRAMETAG) {
DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
} else {
DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
@@ -301,7 +301,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
/* offset 8 */
/* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0fff0000);
/* offset 20 */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
@@ -466,7 +466,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
/* 3 2. aggregate same priority and same DA(AP or STA) frames */
pfirstframe = pxmitframe;
- len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
+ len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
pbuf_tail = len;
pbuf = round_up(pbuf_tail, 8);
@@ -517,7 +517,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
pxmitframe->agg_num = 0; /* not first frame of aggregation */
pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
- len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
+ len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
pxmitframe->agg_num = 1;
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index ba7e15fbde72..b9f11ef327e7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -112,7 +112,7 @@ struct pkt_attrib {
u32 last_txcmdsz;
u8 nr_frags;
u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorith
+ * indicate the encrypt algorithm
*/
u8 iv_len;
u8 icv_len;
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index ba53959e1303..9a89791720e0 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -193,12 +193,12 @@ static char *translate_scan(struct adapter *padapter,
/*Add basic and extended rates */
max_rate = 0;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
while (pnetwork->network.SupportedRates[i] != 0) {
rate = pnetwork->network.SupportedRates[i]&0x7F;
if (rate > max_rate)
max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
i++;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 69d4b1d66b6f..4d6d0347ab8e 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -26,18 +26,17 @@ void _rtw_init_queue(struct __queue *pqueue)
struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv)
{
- struct net_device *pnetdev;
+ struct net_device *netdev;
struct rtw_netdev_priv_indicator *pnpi;
- pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
- if (!pnetdev)
- goto RETURN;
+ netdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
+ if (!netdev)
+ return NULL;
- pnpi = netdev_priv(pnetdev);
+ pnpi = netdev_priv(netdev);
pnpi->priv = old_priv;
-RETURN:
- return pnetdev;
+ return netdev;
}
void rtw_free_netdev(struct net_device *netdev)
@@ -45,18 +44,15 @@ void rtw_free_netdev(struct net_device *netdev)
struct rtw_netdev_priv_indicator *pnpi;
if (!netdev)
- goto RETURN;
+ return;
pnpi = netdev_priv(netdev);
if (!pnpi->priv)
- goto RETURN;
+ return;
vfree(pnpi->priv);
free_netdev(netdev);
-
-RETURN:
- return;
}
void rtw_buf_free(u8 **buf, u32 *buf_len)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 845c8817281c..f7f09c0d273f 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -32,6 +32,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
/****** 8188EUS ********/
{USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x0B05, 0x18F0)}, /* ASUS USB-N10 Nano B1 */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 980b850d729a..ddcd7885d190 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -304,7 +304,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
u16 i, usValue, IC_Version;
u16 EEPROMId;
- RT_TRACE(COMP_INIT, "====> _rtl92e_read_eeprom_info\n");
+ RT_TRACE(COMP_INIT, "====> %s\n", __func__);
EEPROMId = rtl92e_eeprom_read(dev, 0);
if (EEPROMId != RTL8190_EEPROM_ID) {
@@ -1354,8 +1354,8 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
default:
RT_TRACE(COMP_RECV,
- "_rtl92e_rate_hw_to_mgn(): Non supportedRate [%x], bIsHT = %d!!!\n",
- rate, bIsHT);
+ "%s: Non supportedRate [%x], bIsHT = %d!!!\n",
+ __func__, rate, bIsHT);
break;
}
@@ -1415,8 +1415,8 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
default:
RT_TRACE(COMP_RECV,
- "_rtl92e_rate_hw_to_mgn(): Non supported Rate [%x], bIsHT = %d!!!\n",
- rate, bIsHT);
+ "%s: Non supported Rate [%x], bIsHT = %d!!!\n",
+ __func__, rate, bIsHT);
break;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 7d78f16efc1d..411138102948 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -1124,14 +1124,14 @@ static void _rtl92e_cck_tx_power_track_bw_switch_thermal(struct net_device *dev)
priv->Record_CCK_20Mindex = 6;
priv->CCK_index = priv->Record_CCK_20Mindex;
RT_TRACE(COMP_POWER_TRACKING,
- "20MHz, _rtl92e_cck_tx_power_track_bw_switch_thermal(),CCK_index = %d\n",
+ "20MHz, %s,CCK_index = %d\n", __func__,
priv->CCK_index);
break;
case HT_CHANNEL_WIDTH_20_40:
priv->CCK_index = priv->Record_CCK_40Mindex;
RT_TRACE(COMP_POWER_TRACKING,
- "40MHz, _rtl92e_cck_tx_power_track_bw_switch_thermal(), CCK_index = %d\n",
+ "40MHz, %s, CCK_index = %d\n", __func__,
priv->CCK_index);
break;
}
@@ -1155,7 +1155,7 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
u8 regBwOpMode;
RT_TRACE(COMP_SWBW,
- "==>_rtl92e_set_bw_mode_work_item() Switch to %s bandwidth\n",
+ "==>%s Switch to %s bandwidth\n", __func__,
priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ?
"20MHz" : "40MHz");
@@ -1416,15 +1416,14 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
if (priv->SetRFPowerStateInProgress)
return false;
- RT_TRACE(COMP_PS, "===========> _rtl92e_set_rf_power_state()!\n");
+ RT_TRACE(COMP_PS, "===========> %s!\n", __func__);
priv->SetRFPowerStateInProgress = true;
switch (priv->rf_chip) {
case RF_8256:
switch (eRFPowerState) {
case eRfOn:
- RT_TRACE(COMP_PS,
- "_rtl92e_set_rf_power_state() eRfOn!\n");
+ RT_TRACE(COMP_PS, "%s eRfOn!\n", __func__);
if ((priv->rtllib->eRFPowerState == eRfOff) &&
RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
@@ -1490,10 +1489,8 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(COMP_POWER,
- "\n\n\n TimeOut!! _rtl92e_set_rf_power_state(): eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n",
- MAX_DOZE_WAITING_TIMES_9x,
- QueueID);
+ RT_TRACE(COMP_POWER, "\n\n\n TimeOut!! %s: eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n",
+ __func__, MAX_DOZE_WAITING_TIMES_9x, QueueID);
break;
}
}
@@ -1501,8 +1498,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
break;
case eRfOff:
- RT_TRACE(COMP_PS,
- "_rtl92e_set_rf_power_state() eRfOff/Sleep !\n");
+ RT_TRACE(COMP_PS, "%s eRfOff/Sleep !\n", __func__);
for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) {
ring = &priv->tx_ring[QueueID];
@@ -1567,9 +1563,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
}
priv->SetRFPowerStateInProgress = false;
- RT_TRACE(COMP_PS,
- "<=========== _rtl92e_set_rf_power_state() bResult = %d!\n",
- bResult);
+ RT_TRACE(COMP_PS, "<=========== %s bResult = %d!\n", __func__, bResult);
return bResult;
}
@@ -1581,21 +1575,17 @@ bool rtl92e_set_rf_power_state(struct net_device *dev,
bool bResult = false;
RT_TRACE(COMP_PS,
- "---------> rtl92e_set_rf_power_state(): eRFPowerState(%d)\n",
- eRFPowerState);
+ "---------> %s: eRFPowerState(%d)\n", __func__, eRFPowerState);
if (eRFPowerState == priv->rtllib->eRFPowerState &&
priv->bHwRfOffAction == 0) {
- RT_TRACE(COMP_PS,
- "<--------- rtl92e_set_rf_power_state(): discard the request for eRFPowerState(%d) is the same.\n",
- eRFPowerState);
+ RT_TRACE(COMP_PS, "<--------- %s: discard the request for eRFPowerState(%d) is the same.\n",
+ __func__, eRFPowerState);
return bResult;
}
bResult = _rtl92e_set_rf_power_state(dev, eRFPowerState);
- RT_TRACE(COMP_PS,
- "<--------- rtl92e_set_rf_power_state(): bResult(%d)\n",
- bResult);
+ RT_TRACE(COMP_PS, "<--------- %s: bResult(%d)\n", __func__, bResult);
return bResult;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index 627ea1029509..c8506517cc8d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -108,8 +108,8 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
}
RT_TRACE(COMP_SEC,
- "====>to rtl92e_set_key(), dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
- dev, EntryNo, KeyIndex, KeyType, MacAddr);
+ "====>to %s, dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
+ __func__, dev, EntryNo, KeyIndex, KeyType, MacAddr);
if (DefaultKey)
usConfig |= BIT15 | (KeyType<<2);
@@ -163,7 +163,7 @@ void rtl92e_cam_restore(struct net_device *dev)
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
- RT_TRACE(COMP_SEC, "rtl92e_cam_restore:\n");
+ RT_TRACE(COMP_SEC, "%s:\n", __func__);
if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 11183b9f757a..d3664e508cbe 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -145,21 +145,21 @@ bool rtl92e_set_rf_state(struct net_device *dev,
unsigned long flag;
RT_TRACE((COMP_PS | COMP_RF),
- "===>rtl92e_set_rf_state(): StateToSet(%d)\n", StateToSet);
+ "===>%s: StateToSet(%d)\n", __func__, StateToSet);
while (true) {
spin_lock_irqsave(&priv->rf_ps_lock, flag);
if (priv->RFChangeInProgress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
RT_TRACE((COMP_PS | COMP_RF),
- "rtl92e_set_rf_state(): RF Change in progress! Wait to set..StateToSet(%d).\n",
- StateToSet);
+ "%s: RF Change in progress! Wait to set..StateToSet(%d).\n",
+ __func__, StateToSet);
while (priv->RFChangeInProgress) {
RFWaitCounter++;
RT_TRACE((COMP_PS | COMP_RF),
- "rtl92e_set_rf_state(): Wait 1 ms (%d times)...\n",
- RFWaitCounter);
+ "%s: Wait 1 ms (%d times)...\n",
+ __func__, RFWaitCounter);
mdelay(1);
if (RFWaitCounter > 100) {
@@ -195,8 +195,8 @@ bool rtl92e_set_rf_state(struct net_device *dev,
bConnectBySSID = true;
} else {
RT_TRACE((COMP_PS | COMP_RF),
- "rtl92e_set_rf_state - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
- priv->rtllib->RfOffReason, ChangeSource);
+ "%s - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
+ __func__, priv->rtllib->RfOffReason, ChangeSource);
}
break;
@@ -232,8 +232,8 @@ bool rtl92e_set_rf_state(struct net_device *dev,
if (bActionAllowed) {
RT_TRACE((COMP_PS | COMP_RF),
- "rtl92e_set_rf_state(): Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
- StateToSet, priv->rtllib->RfOffReason);
+ "%s: Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
+ __func__, StateToSet, priv->rtllib->RfOffReason);
PHY_SetRFPowerState(dev, StateToSet);
if (StateToSet == eRfOn) {
@@ -245,15 +245,15 @@ bool rtl92e_set_rf_state(struct net_device *dev,
}
} else {
RT_TRACE((COMP_PS | COMP_RF),
- "rtl92e_set_rf_state(): Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
- StateToSet, ChangeSource, priv->rtllib->RfOffReason);
+ "%s: Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
+ __func__, StateToSet, ChangeSource, priv->rtllib->RfOffReason);
}
spin_lock_irqsave(&priv->rf_ps_lock, flag);
priv->RFChangeInProgress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- RT_TRACE((COMP_PS | COMP_RF), "<===rtl92e_set_rf_state()\n");
+ RT_TRACE((COMP_PS | COMP_RF), "<===%s\n", __func__);
return bActionAllowed;
}
@@ -732,7 +732,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->PowerSaveControl);
- bool init_status = true;
+ bool init_status;
priv->bDriverIsGoingToUnload = false;
priv->bdisable_nic = false;
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 816d31c1d5c7..2d5e4a0330c6 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -119,8 +119,8 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
}
#ifdef VERBOSE_DEBUG
- print_hex_dump_bytes("rtllib_ADDBA(): ", DUMP_PREFIX_NONE, skb->data,
- skb->len);
+ print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data,
+ __func__, skb->len);
#endif
return skb;
}
@@ -170,8 +170,8 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
tag += 2;
#ifdef VERBOSE_DEBUG
- print_hex_dump_bytes("rtllib_DELBA(): ", DUMP_PREFIX_NONE, skb->data,
- skb->len);
+ print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data,
+ __func__, skb->len);
#endif
return skb;
}
@@ -235,7 +235,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
}
#ifdef VERBOSE_DEBUG
- print_hex_dump_bytes("rtllib_rx_ADDBAReq(): ", DUMP_PREFIX_NONE,
+ print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, __func__,
skb->data, skb->len);
#endif
@@ -433,8 +433,8 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
}
#ifdef VERBOSE_DEBUG
- print_hex_dump_bytes("rtllib_rx_DELBA(): ", DUMP_PREFIX_NONE, skb->data,
- skb->len);
+ print_hex_dump_bytes("%s: ", DUMP_PREFIX_NONE, skb->data,
+ __func__, skb->len);
#endif
delba = (struct rtllib_hdr_3addr *)skb->data;
dst = (u8 *)(&delba->addr2[0]);
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index f02263af9624..d83d72594312 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -545,7 +545,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
#ifdef VERBOSE_DEBUG
- print_hex_dump_bytes("HTOnAssocRsp(): ", DUMP_PREFIX_NONE,
+ print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE,
pPeerHTCap, sizeof(struct ht_capab_ele));
#endif
HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 672bf0987943..47b2669a3a8e 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -440,7 +440,7 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
{
struct ts_common_info *pTS, *pTmpTS;
- netdev_info(ieee->dev, "===========>RemovePeerTS, %pM\n", Addr);
+ netdev_info(ieee->dev, "===========>%s, %pM\n", __func__, Addr);
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 328f410daa03..b84f00b8d18b 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -728,14 +728,14 @@ struct rtllib_pspoll_hdr {
struct rtllib_hdr {
__le16 frame_ctl;
__le16 duration_id;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtllib_hdr_1addr {
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtllib_hdr_2addr {
@@ -743,7 +743,7 @@ struct rtllib_hdr_2addr {
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtllib_hdr_3addr {
@@ -753,7 +753,7 @@ struct rtllib_hdr_3addr {
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtllib_hdr_4addr {
@@ -764,7 +764,7 @@ struct rtllib_hdr_4addr {
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
u8 addr4[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtllib_hdr_3addrqos {
@@ -775,7 +775,7 @@ struct rtllib_hdr_3addrqos {
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
__le16 qos_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtllib_hdr_4addrqos {
@@ -787,13 +787,13 @@ struct rtllib_hdr_4addrqos {
__le16 seq_ctl;
u8 addr4[ETH_ALEN];
__le16 qos_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtllib_info_element {
u8 id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct rtllib_authentication {
@@ -802,7 +802,7 @@ struct rtllib_authentication {
__le16 transaction;
__le16 status;
/*challenge*/
- struct rtllib_info_element info_element[0];
+ struct rtllib_info_element info_element[];
} __packed;
struct rtllib_disauth {
@@ -818,7 +818,7 @@ struct rtllib_disassoc {
struct rtllib_probe_request {
struct rtllib_hdr_3addr header;
/* SSID, supported rates */
- struct rtllib_info_element info_element[0];
+ struct rtllib_info_element info_element[];
} __packed;
struct rtllib_probe_response {
@@ -829,7 +829,7 @@ struct rtllib_probe_response {
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN
*/
- struct rtllib_info_element info_element[0];
+ struct rtllib_info_element info_element[];
} __packed;
/* Alias beacon for probe_response */
@@ -840,7 +840,7 @@ struct rtllib_assoc_request_frame {
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
- struct rtllib_info_element info_element[0];
+ struct rtllib_info_element info_element[];
} __packed;
struct rtllib_assoc_response_frame {
@@ -848,7 +848,7 @@ struct rtllib_assoc_response_frame {
__le16 capability;
__le16 status;
__le16 aid;
- struct rtllib_info_element info_element[0]; /* supported rates */
+ struct rtllib_info_element info_element[]; /* supported rates */
} __packed;
struct rtllib_txb {
@@ -859,7 +859,7 @@ struct rtllib_txb {
u16 reserved;
__le16 frag_size;
__le16 payload_size;
- struct sk_buff *fragments[0];
+ struct sk_buff *fragments[];
};
#define MAX_SUBFRAME_COUNT 64
@@ -1792,7 +1792,7 @@ struct rtllib_device {
/* This must be the last item so that it points to the data
* allocated beyond this structure by alloc_rtllib
*/
- u8 priv[0];
+ u8 priv[];
};
#define IEEE_A (1<<0)
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 0bae0a0a4cbe..d31b5e1c8df4 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -2092,7 +2092,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
MAX_RATES_LENGTH);
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
if (rtllib_is_ofdm_rate
@@ -2120,7 +2120,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
MAX_RATES_EX_LENGTH);
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates_ex[i]);
if (rtllib_is_ofdm_rate
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 8cddb2e12dc4..79d7ad7c0a4a 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -241,7 +241,7 @@ static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
return 0;
#ifdef VERBOSE_DEBUG
- print_hex_dump_bytes("rtllib_classify(): ", DUMP_PREFIX_NONE, skb->data,
+ print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, skb->data,
skb->len);
#endif
ip = ip_hdr(skb);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index beb40967936a..7e7df50164fb 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -114,7 +114,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
/* Add basic and extended rates */
max_rate = 0;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
for (i = 0, j = 0; i < network->rates_len;) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
@@ -124,12 +124,12 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
rate = network->rates[i++] & 0x7F;
if (rate > max_rate)
max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
if (rate > max_rate)
max_rate = rate;
@@ -226,7 +226,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
*/
iwe.cmd = IWEVCUSTOM;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %lums ago",
(jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 9576b647f6b1..39f4ddd86796 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -886,14 +886,14 @@ enum ieee80211_mfie {
struct rtl_80211_hdr {
__le16 frame_ctl;
__le16 duration_id;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtl_80211_hdr_1addr {
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtl_80211_hdr_2addr {
@@ -901,7 +901,7 @@ struct rtl_80211_hdr_2addr {
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtl_80211_hdr_3addr {
@@ -911,7 +911,7 @@ struct rtl_80211_hdr_3addr {
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtl_80211_hdr_4addr {
@@ -922,7 +922,7 @@ struct rtl_80211_hdr_4addr {
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
u8 addr4[ETH_ALEN];
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtl_80211_hdr_3addrqos {
@@ -951,7 +951,7 @@ struct rtl_80211_hdr_4addrqos {
struct ieee80211_info_element {
u8 id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct ieee80211_authentication {
@@ -960,7 +960,7 @@ struct ieee80211_authentication {
__le16 transaction;
__le16 status;
/*challenge*/
- struct ieee80211_info_element info_element[0];
+ struct ieee80211_info_element info_element[];
} __packed;
struct ieee80211_disassoc {
@@ -971,7 +971,7 @@ struct ieee80211_disassoc {
struct ieee80211_probe_request {
struct rtl_80211_hdr_3addr header;
/* SSID, supported rates */
- struct ieee80211_info_element info_element[0];
+ struct ieee80211_info_element info_element[];
} __packed;
struct ieee80211_probe_response {
@@ -982,7 +982,7 @@ struct ieee80211_probe_response {
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN
*/
- struct ieee80211_info_element info_element[0];
+ struct ieee80211_info_element info_element[];
} __packed;
/* Alias beacon for probe_response */
@@ -993,7 +993,7 @@ struct ieee80211_assoc_request_frame {
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[0];
+ struct ieee80211_info_element info_element[];
} __packed;
struct ieee80211_reassoc_request_frame {
@@ -1002,7 +1002,7 @@ struct ieee80211_reassoc_request_frame {
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* SSID, supported rates, RSN */
- struct ieee80211_info_element info_element[0];
+ struct ieee80211_info_element info_element[];
} __packed;
struct ieee80211_assoc_response_frame {
@@ -1010,7 +1010,7 @@ struct ieee80211_assoc_response_frame {
__le16 capability;
__le16 status;
__le16 aid;
- struct ieee80211_info_element info_element[0]; /* supported rates */
+ struct ieee80211_info_element info_element[]; /* supported rates */
} __packed;
struct ieee80211_txb {
@@ -1021,7 +1021,7 @@ struct ieee80211_txb {
u16 reserved;
__le16 frag_size;
__le16 payload_size;
- struct sk_buff *fragments[0];
+ struct sk_buff *fragments[];
};
#define MAX_TX_AGG_COUNT 16
@@ -2007,7 +2007,7 @@ struct ieee80211_device {
/* This must be the last item so that it points to the data
* allocated beyond this structure by alloc_ieee80211
*/
- u8 priv[0];
+ u8 priv[];
};
#define IEEE_A (1<<0)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 6f4710171151..ffe624ed0c0c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -388,20 +388,20 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
keyidx = pos[3];
if (!(keyidx & BIT(5))) {
if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet without ExtIV"
+ netdev_dbg(skb->dev, "TKIP: received packet without ExtIV"
" flag from %pM\n", hdr->addr2);
}
return -2;
}
keyidx >>= 6;
if (tkey->key_idx != keyidx) {
- printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
+ netdev_dbg(skb->dev, "TKIP: RX tkey->key_idx=%d frame "
"keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
return -6;
}
if (!tkey->key_set) {
if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: received packet from %pM"
+ netdev_dbg(skb->dev, "TKIP: received packet from %pM"
" with keyid=%d that does not have a configured"
" key\n", hdr->addr2, keyidx);
}
@@ -417,7 +417,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
+ netdev_dbg(skb->dev, "TKIP: replay detected: STA=%pM"
" previous TSC %08x%04x received TSC "
"%08x%04x\n", hdr->addr2,
tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
@@ -445,7 +445,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
skcipher_request_zero(req);
if (err) {
if (net_ratelimit()) {
- printk(KERN_DEBUG ": TKIP: failed to decrypt "
+ netdev_dbg(skb->dev, "TKIP: failed to decrypt "
"received packet from %pM\n",
hdr->addr2);
}
@@ -468,7 +468,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
tkey->rx_phase1_done = 0;
}
if (net_ratelimit()) {
- printk(KERN_DEBUG "TKIP: ICV error detected: STA="
+ netdev_dbg(skb->dev, "TKIP: ICV error detected: STA="
"%pM\n", hdr->addr2);
}
tkey->dot11RSNAStatsTKIPICVErrors++;
@@ -559,7 +559,7 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
- printk(KERN_DEBUG "Invalid packet for Michael MIC add "
+ netdev_dbg(skb->dev, "Invalid packet for Michael MIC add "
"(tailroom=%d hdr_len=%d skb->len=%d)\n",
skb_tailroom(skb), hdr_len, skb->len);
return -1;
@@ -628,10 +628,9 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
struct rtl_80211_hdr_4addr *hdr;
hdr = (struct rtl_80211_hdr_4addr *)skb->data;
- printk(KERN_DEBUG "%s: Michael MIC verification failed for "
+ netdev_dbg(skb->dev, "Michael MIC verification failed for "
"MSDU from %pM keyidx=%d\n",
- skb->dev ? skb->dev->name : "N/A", hdr->addr2,
- keyidx);
+ hdr->addr2, keyidx);
if (skb->dev)
ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
tkey->dot11RSNAStatsTKIPLocalMICFailures++;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index aa9dab8a4ae8..a5a1b14f5a40 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -68,8 +68,7 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
sizeof(struct ieee80211_network),
GFP_KERNEL);
if (!ieee->networks) {
- printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
- ieee->dev->name);
+ netdev_warn(ieee->dev, "Out of memory allocating beacons\n");
return -ENOMEM;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 90692db81b71..d8eb907ff301 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -131,7 +131,7 @@ static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
*tag++ = 0x00;
*tag_p = tag;
- printk(KERN_ALERT "This is enable turbo mode IE process\n");
+ netdev_alert(ieee->dev, "This is enable turbo mode IE process\n");
}
#endif
@@ -1270,14 +1270,15 @@ static void ieee80211_associate_step2(struct ieee80211_device *ieee)
static void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
- printk(KERN_INFO "Associated successfully\n");
+
+ netdev_info(ieee->dev, "Associated successfully\n");
if (ieee80211_is_54g(&ieee->current_network) &&
(ieee->modulation & IEEE80211_OFDM_MODULATION)) {
ieee->rate = 108;
- printk(KERN_INFO"Using G rates:%d\n", ieee->rate);
+ netdev_info(ieee->dev, "Using G rates:%d\n", ieee->rate);
} else {
ieee->rate = 22;
- printk(KERN_INFO"Using B rates:%d\n", ieee->rate);
+ netdev_info(ieee->dev, "Using B rates:%d\n", ieee->rate);
}
if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
printk("Successfully associated, ht enabled\n");
@@ -1391,12 +1392,13 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
ieee->current_network.ssid_len = tmp_ssid_len;
- printk(KERN_INFO"Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n",
- ieee->current_network.ssid,
- ieee->current_network.channel,
- ieee->current_network.qos_data.supported,
- ieee->pHTInfo->bEnableHT,
- ieee->current_network.bssht.bdSupportHT);
+ netdev_info(ieee->dev,
+ "Linking with %s,channel:%d, qos:%d, myHT:%d, networkHT:%d\n",
+ ieee->current_network.ssid,
+ ieee->current_network.channel,
+ ieee->current_network.qos_data.supported,
+ ieee->pHTInfo->bEnableHT,
+ ieee->current_network.bssht.bdSupportHT);
//ieee->pHTInfo->IOTAction = 0;
HTResetIOTSetting(ieee->pHTInfo);
@@ -1421,11 +1423,13 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
(ieee->modulation & IEEE80211_OFDM_MODULATION)) {
ieee->rate = 108;
ieee->SetWirelessMode(ieee->dev, IEEE_G);
- printk(KERN_INFO"Using G rates\n");
+ netdev_info(ieee->dev,
+ "Using G rates\n");
} else {
ieee->rate = 22;
ieee->SetWirelessMode(ieee->dev, IEEE_B);
- printk(KERN_INFO"Using B rates\n");
+ netdev_info(ieee->dev,
+ "Using B rates\n");
}
memset(ieee->dot11HTOperationalRateSet, 0, 16);
//HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
@@ -1622,7 +1626,7 @@ ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
if (assoc_rq_parse(skb, dest) != -1)
ieee80211_resp_to_assoc_rq(ieee, dest);
- printk(KERN_INFO"New client associated: %pM\n", dest);
+ netdev_info(ieee->dev, "New client associated: %pM\n", dest);
//FIXME
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index b1baaa18b129..f434a26cdb2f 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -459,8 +459,8 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
else
ieee->raw_tx = 0;
- printk(KERN_INFO"raw TX is %s\n",
- ieee->raw_tx ? "enabled" : "disabled");
+ netdev_info(ieee->dev, "raw TX is %s\n",
+ ieee->raw_tx ? "enabled" : "disabled");
if (ieee->iw_mode == IW_MODE_MONITOR) {
if (prev == 0 && ieee->raw_tx) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index f0b6b8372f91..0ee054d82832 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -180,9 +180,8 @@ int ieee80211_encrypt_fragment(
struct rtl_80211_hdr_3addrqos *header;
header = (struct rtl_80211_hdr_3addrqos *)frag->data;
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "TX packet to %pM\n",
- ieee->dev->name, header->addr1);
+ netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
+ "TX packet to %pM\n", header->addr1);
}
return -1;
}
@@ -204,8 +203,8 @@ int ieee80211_encrypt_fragment(
atomic_dec(&crypt->refcnt);
if (res < 0) {
- printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
- ieee->dev->name, frag->len);
+ netdev_info(ieee->dev, "Encryption failed: len=%d.\n",
+ frag->len);
ieee->ieee_stats.tx_discards++;
return -1;
}
@@ -557,16 +556,15 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) ||
((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
- printk(KERN_WARNING "%s: No xmit handler.\n",
- ieee->dev->name);
+ netdev_warn(ieee->dev, "No xmit handler.\n");
goto success;
}
if (likely(ieee->raw_tx == 0)) {
if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
- printk(KERN_WARNING "%s: skb too small (%d).\n",
- ieee->dev->name, skb->len);
+ netdev_warn(ieee->dev, "skb too small (%d).\n",
+ skb->len);
goto success;
}
@@ -685,8 +683,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
*/
txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
if (unlikely(!txb)) {
- printk(KERN_WARNING "%s: Could not allocate TXB\n",
- ieee->dev->name);
+ netdev_warn(ieee->dev, "Could not allocate TXB\n");
goto failed;
}
txb->encrypted = encrypt;
@@ -779,15 +776,14 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
}
} else {
if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) {
- printk(KERN_WARNING "%s: skb too small (%d).\n",
- ieee->dev->name, skb->len);
+ netdev_warn(ieee->dev, "skb too small (%d).\n",
+ skb->len);
goto success;
}
txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
if (!txb) {
- printk(KERN_WARNING "%s: Could not allocate TXB\n",
- ieee->dev->name);
+ netdev_warn(ieee->dev, "Could not allocate TXB\n");
goto failed;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 33c596f9ec96..22373c0afebc 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -356,9 +356,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
kfree(new_crypt);
new_crypt = NULL;
- printk(KERN_WARNING "%s: could not initialize WEP: "
- "load module ieee80211_crypt_wep\n",
- dev->name);
+ netdev_warn(dev, "could not initialize WEP: "
+ "load module ieee80211_crypt_wep\n");
return -EOPNOTSUPP;
}
*crypt = new_crypt;
@@ -436,7 +435,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
- printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
+ netdev_dbg(ieee->dev, "reset_port failed\n");
return -EINVAL;
}
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 5cee1031a27c..3aabb401b15a 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -380,7 +380,7 @@ bool GetTs(
}
IEEE80211_DEBUG(IEEE80211_DL_TS, "to init current TS, UP:%d, Dir:%d, addr:%pM\n", UP, Dir, Addr);
- // Prepare TS Info releated field
+ // Prepare TS Info related field
pTSInfo->uc_traffic_type = 0; // Traffic type: WMM is reserved in this field
pTSInfo->uc_tsid = UP; // TSID
pTSInfo->uc_direction = Dir; // Direction: if there is DirectLink, this need additional consideration.
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 89dd1fb0b38d..fcfb9024a83f 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -613,13 +613,13 @@ static void rtl8192_proc_init_one(struct net_device *dev)
if (!dir)
return;
- proc_create_single("stats-rx", S_IFREG | S_IRUGO, dir,
+ proc_create_single("stats-rx", S_IFREG | 0444, dir,
proc_get_stats_rx);
- proc_create_single("stats-tx", S_IFREG | S_IRUGO, dir,
+ proc_create_single("stats-tx", S_IFREG | 0444, dir,
proc_get_stats_tx);
- proc_create_single("stats-ap", S_IFREG | S_IRUGO, dir,
+ proc_create_single("stats-ap", S_IFREG | 0444, dir,
proc_get_stats_ap);
- proc_create_single("registers", S_IFREG | S_IRUGO, dir,
+ proc_create_single("registers", S_IFREG | 0444, dir,
proc_get_registers);
}
@@ -4877,50 +4877,50 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
write_nic_byte(dev, SECR, SECR_value);
}
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
- u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
+void setKey(struct net_device *dev, u8 entryno, u8 keyindex, u16 keytype,
+ u8 *macaddr, u8 defaultkey, u32 *keycontent)
{
- u32 TargetCommand = 0;
- u32 TargetContent = 0;
- u16 usConfig = 0;
+ u32 target_command = 0;
+ u32 target_content = 0;
+ u16 us_config = 0;
u8 i;
- if (EntryNo >= TOTAL_CAM_ENTRY)
- RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n");
+ if (entryno >= TOTAL_CAM_ENTRY)
+ RT_TRACE(COMP_ERR, "cam entry exceeds in %s\n", __func__);
RT_TRACE(COMP_SEC,
- "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n",
- dev, EntryNo, KeyIndex, KeyType, MacAddr);
+ "====>to %s, dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n",
+ __func__, dev, entryno, keyindex, keytype, macaddr);
- if (DefaultKey)
- usConfig |= BIT(15) | (KeyType << 2);
+ if (defaultkey)
+ us_config |= BIT(15) | (keytype << 2);
else
- usConfig |= BIT(15) | (KeyType << 2) | KeyIndex;
+ us_config |= BIT(15) | (keytype << 2) | keyindex;
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
- TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
- TargetCommand |= BIT(31) | BIT(16);
+ target_command = i + CAM_CONTENT_COUNT * entryno;
+ target_command |= BIT(31) | BIT(16);
if (i == 0) { /* MAC|Config */
- TargetContent = (u32)(*(MacAddr + 0)) << 16 |
- (u32)(*(MacAddr + 1)) << 24 |
- (u32)usConfig;
+ target_content = (u32)(*(macaddr + 0)) << 16 |
+ (u32)(*(macaddr + 1)) << 24 |
+ (u32)us_config;
- write_nic_dword(dev, WCAMI, TargetContent);
- write_nic_dword(dev, RWCAM, TargetCommand);
+ write_nic_dword(dev, WCAMI, target_content);
+ write_nic_dword(dev, RWCAM, target_command);
} else if (i == 1) { /* MAC */
- TargetContent = (u32)(*(MacAddr + 2)) |
- (u32)(*(MacAddr + 3)) << 8 |
- (u32)(*(MacAddr + 4)) << 16 |
- (u32)(*(MacAddr + 5)) << 24;
- write_nic_dword(dev, WCAMI, TargetContent);
- write_nic_dword(dev, RWCAM, TargetCommand);
+ target_content = (u32)(*(macaddr + 2)) |
+ (u32)(*(macaddr + 3)) << 8 |
+ (u32)(*(macaddr + 4)) << 16 |
+ (u32)(*(macaddr + 5)) << 24;
+ write_nic_dword(dev, WCAMI, target_content);
+ write_nic_dword(dev, RWCAM, target_command);
} else {
/* Key Material */
- if (KeyContent) {
+ if (keycontent) {
write_nic_dword(dev, WCAMI,
- *(KeyContent + i - 2));
- write_nic_dword(dev, RWCAM, TargetCommand);
+ *(keycontent + i - 2));
+ write_nic_dword(dev, RWCAM, target_command);
}
}
}
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 0118edb0b9ab..100532598781 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -588,7 +588,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
hwkey); /* KeyContent */
} else {
- printk("wrong type in WEP, not WEP40 and WEP104\n");
+ netdev_warn(dev, "wrong type in WEP, not WEP40 and WEP104\n");
}
}
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 555e52522be6..37b99cf4b35f 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1531,7 +1531,7 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand,
- priv->nCur40MhzPrimeSC>>1);
+ priv->nCur40MhzPrimeSC >> 1);
rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
priv->nCur40MhzPrimeSC);
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index b6dcda77db1f..c62747c90968 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -6,13 +6,16 @@ config R8712U
select WEXT_PRIV
select FW_LOADER
help
- This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
+ This option adds the Realtek RTL8712 USB device such as the
+ D-Link DWA-130.
+
If built as a module, it will be called r8712u.
config R8712_TX_AGGR
bool "Realtek RTL8712U Transmit Aggregation code"
depends on R8712U && BROKEN
help
- This option provides transmit aggregation for the Realtek RTL8712 USB device.
+ This option provides transmit aggregation for the Realtek
+ RTL8712 USB device.
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 8098f6905554..dabaa8fd34fb 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -535,7 +535,7 @@ struct ieee80211_info_element_hdr {
struct ieee80211_info_element {
u8 id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
/*
@@ -597,7 +597,7 @@ struct ieee80211_txb {
u16 reserved;
u16 frag_size;
u16 payload_size;
- struct sk_buff *fragments[0];
+ struct sk_buff *fragments[];
};
/* SWEEP TABLE ENTRIES NUMBER*/
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 98d7fbfce1a5..254182a6ce8e 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -478,7 +478,7 @@ struct drvint_cmd_parm {
unsigned char *pbuf;
};
-/*------------------- Below are used for RF/BB tunning ---------------------*/
+/*------------------- Below are used for RF/BB tuning ---------------------*/
struct setantenna_parm {
u8 tx_antset;
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 1a39a96b726f..24020257bc58 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -44,9 +44,9 @@ static int init_mp_priv(struct mp_priv *pmp_priv)
pmp_priv->pallocated_mp_xmitframe_buf = kmalloc(NR_MP_XMITFRAME *
sizeof(struct mp_xmit_frame) + 4,
GFP_ATOMIC);
- if (!pmp_priv->pallocated_mp_xmitframe_buf) {
+ if (!pmp_priv->pallocated_mp_xmitframe_buf)
return -ENOMEM;
- }
+
pmp_priv->pmp_xmtframe_buf = pmp_priv->pallocated_mp_xmitframe_buf +
4 -
((addr_t)(pmp_priv->pallocated_mp_xmitframe_buf) & 3);
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 64e2ae436625..59fa6664d868 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -48,7 +48,7 @@ struct eeprom_rw_param {
struct EFUSE_ACCESS_STRUCT {
u16 start_addr;
u16 cnts;
- u8 data[0];
+ u8 data[];
};
struct burst_rw_reg {
@@ -324,7 +324,7 @@ struct mp_ioctl_handler {
struct mp_ioctl_param {
unsigned int subcode;
unsigned int len;
- unsigned char data[0];
+ unsigned char data[];
};
#define GEN_MP_IOCTL_SUBCODE(code) _MP_IOCTL_ ## code ## _CMD_
diff --git a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
index d479f739ff08..ca5072e11e22 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
@@ -30,7 +30,7 @@
/*--------------------------Define Parameters-------------------------------*/
/*============================================================
- * 8192S Regsiter offset definition
+ * 8192S Register offset definition
*============================================================
*
*
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index 0146a774e19d..e93f356ed2b0 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -53,7 +53,7 @@ struct rx_pkt_attrib {
u8 privacy; /* in frame_ctrl field */
u8 bdecrypted;
int hdrlen; /* the WLAN Header Len */
- int encrypt; /* 0 no encrypt. != 0 encrypt algorith */
+ int encrypt; /* 0 no encrypt. != 0 encrypt algorithm */
int iv_len;
int icv_len;
int priority;
@@ -105,7 +105,7 @@ struct recv_priv {
u8 *precv_buf; /* 4 alignment */
struct __queue free_recv_buf_queue;
u32 free_recv_buf_queue_cnt;
- /* For the phy informatiom */
+ /* For the phy information */
s8 rssi;
u8 signal;
u8 noise;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 7117d16a30f9..a76e81330756 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -2417,7 +2417,7 @@ void stop_ap_mode(struct adapter *padapter)
paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
plist = get_next(plist);
- if (paclnode->valid == true) {
+ if (paclnode->valid) {
paclnode->valid = false;
list_del_init(&paclnode->list);
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 13a9b54b4561..efb5135ad743 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -194,14 +194,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
- pcmdpriv->cmd_issued_cnt = pcmdpriv->cmd_done_cnt = pcmdpriv->rsp_cnt = 0;
+ pcmdpriv->cmd_issued_cnt = 0;
+ pcmdpriv->cmd_done_cnt = 0;
+ pcmdpriv->rsp_cnt = 0;
mutex_init(&pcmdpriv->sctx_mutex);
exit:
return res;
}
-static void c2h_wk_callback(_workitem *work);
+static void c2h_wk_callback(_workitem * work);
int rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
@@ -360,11 +362,7 @@ exit:
struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
{
- struct cmd_obj *cmd_obj;
-
- cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
-
- return cmd_obj;
+ return _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
}
void rtw_free_cmd_obj(struct cmd_obj *pcmd)
@@ -520,7 +518,7 @@ post_process:
rtw_free_cmd_obj(pcmd);
} else {
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
- pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+ pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
}
} else {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode));
@@ -989,7 +987,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
}
- /* jeff: set this becasue at least sw key is ready */
+ /* jeff: set this because at least sw key is ready */
padapter->securitypriv.busetkipkey = true;
if (enqueue) {
@@ -2138,7 +2136,8 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
goto exit;
}
- psta->aid = psta->mac_id = passocsta_rsp->cam_id;
+ psta->aid = passocsta_rsp->cam_id;
+ psta->mac_id = passocsta_rsp->cam_id;
spin_lock_bh(&pmlmepriv->lock);
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index 3b8848182221..8794638468e6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -43,9 +43,8 @@ Efuse_Read1ByteFromFakeContent(
u16 Offset,
u8 *Value)
{
- if (Offset >= EFUSE_MAX_HW_SIZE) {
+ if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
- }
/* DbgPrint("Read fake content, offset = %d\n", Offset); */
if (fakeEfuseBank == 0)
*Value = fakeEfuseContent[Offset];
@@ -65,14 +64,12 @@ Efuse_Write1ByteToFakeContent(
u16 Offset,
u8 Value)
{
- if (Offset >= EFUSE_MAX_HW_SIZE) {
+ if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
- }
if (fakeEfuseBank == 0)
fakeEfuseContent[Offset] = Value;
- else {
+ else
fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
- }
return true;
}
@@ -244,10 +241,8 @@ u16 Address)
while (!(Bytetemp & 0x80)) {
Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
k++;
- if (k == 1000) {
- k = 0;
+ if (k == 1000)
break;
- }
}
return rtw_read8(Adapter, EFUSE_CTRL);
} else
@@ -271,8 +266,7 @@ bool bPseudoTest)
/* DBG_871X("===> EFUSE_OneByteRead() start, 0x34 = 0x%X\n", rtw_read32(padapter, EFUSE_TEST)); */
if (bPseudoTest) {
- bResult = Efuse_Read1ByteFromFakeContent(padapter, addr, data);
- return bResult;
+ return Efuse_Read1ByteFromFakeContent(padapter, addr, data);
}
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
@@ -324,8 +318,7 @@ bool bPseudoTest)
/* DBG_871X("===> EFUSE_OneByteWrite() start, 0x34 = 0x%X\n", rtw_read32(padapter, EFUSE_TEST)); */
if (bPseudoTest) {
- bResult = Efuse_Write1ByteToFakeContent(padapter, addr, data);
- return bResult;
+ return Efuse_Write1ByteToFakeContent(padapter, addr, data);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 6018d877a8a6..ca98274ae390 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -217,7 +217,7 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
* rtw_ies_remove_ie - Find matching IEs and remove
* @ies: Address of IEs to search
* @ies_len: Pointer of length of ies, will update to new length
- * @offset: The offset to start scarch
+ * @offset: The offset to start search
* @eid: Element ID to match
* @oui: OUI to match
* @oui_len: OUI length
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index 57168578663a..c3f63f903547 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -37,7 +37,6 @@ jackson@realtek.com.tw
u8 _rtw_read8(struct adapter *adapter, u32 addr)
{
- u8 r_val;
/* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
@@ -45,8 +44,7 @@ u8 _rtw_read8(struct adapter *adapter, u32 addr)
_read8 = pintfhdl->io_ops._read8;
- r_val = _read8(pintfhdl, addr);
- return r_val;
+ return _read8(pintfhdl, addr);
}
u16 _rtw_read16(struct adapter *adapter, u32 addr)
@@ -142,13 +140,10 @@ u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- u32 ret;
_write_port = pintfhdl->io_ops._write_port;
- ret = _write_port(pintfhdl, addr, cnt, pmem);
-
- return ret;
+ return _write_port(pintfhdl, addr, cnt, pmem);
}
int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapter *padapter, struct _io_ops *pops))
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index eb08569db5ea..8b5f6a66bfb8 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -439,7 +439,7 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
- rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */
+ rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 71fcb466019a..d7a58af76ea0 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -2772,16 +2772,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
/* maybe needs check if ap supports rx ampdu. */
if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1) {
- if (pregistrypriv->wifi_spec == 1) {
- /* remove this part because testbed AP should disable RX AMPDU */
- /* phtpriv->ampdu_enable = false; */
- phtpriv->ampdu_enable = true;
- } else {
- phtpriv->ampdu_enable = true;
- }
- } else if (pregistrypriv->ampdu_enable == 2) {
- /* remove this part because testbed AP should disable RX AMPDU */
- /* phtpriv->ampdu_enable = true; */
+ phtpriv->ampdu_enable = true;
}
/* check Max Rx A-MPDU Size */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index c642825ca8ef..8f9da1d49343 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -219,6 +219,7 @@ static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02};
int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch)
{
int i;
+
for (i = 0; ch_set[i].ChannelNum != 0; i++) {
if (ch == ch_set[i].ChannelNum)
break;
@@ -2184,6 +2185,7 @@ unsigned int OnAction_sa_query(struct adapter *padapter, union recv_frame *precv
}
if (0) {
int pp;
+
printk("pattrib->pktlen = %d =>", pattrib->pkt_len);
for (pp = 0; pp < pattrib->pkt_len; pp++)
printk(" %02x ", pframe[pp]);
@@ -2486,6 +2488,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* DBG_871X("ie len =%d\n", cur_network->IELength); */
{
int len_diff;
+
memcpy(pframe, cur_network->IEs, cur_network->IELength);
len_diff = update_hidden_ssid(
pframe+_BEACON_IE_OFFSET_
@@ -2500,6 +2503,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
u8 *wps_ie;
uint wps_ielen;
u8 sr = 0;
+
wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0) {
@@ -2700,6 +2704,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
if (ssid_ie && cur_network->Ssid.SsidLength) {
uint remainder_ielen;
u8 *remainder_ie;
+
remainder_ie = ssid_ie+2;
remainder_ielen = (pframe-remainder_ie);
@@ -4280,6 +4285,7 @@ void site_survey(struct adapter *padapter)
{
struct rtw_ieee80211_channel *ch;
+
if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
survey_channel = ch->hw_value;
@@ -4323,6 +4329,7 @@ void site_survey(struct adapter *padapter)
if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
{
int i;
+
for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
/* IOT issue, When wifi_spec is not set, send one probe req without WPS IE. */
@@ -4351,6 +4358,7 @@ void site_survey(struct adapter *padapter)
#if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
{
struct noise_info info;
+
info.bPauseDIG = false;
info.IGIValue = 0;
info.max_time = channel_scan_time_ms/2;/* ms */
@@ -4518,6 +4526,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
if (p) {
struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
+
bssid->Configuration.DSConfig = HT_info->primary_channel;
} else { /* use current channel */
bssid->Configuration.DSConfig = rtw_get_oper_ch(padapter);
@@ -4551,6 +4560,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
if (p && len > 0) {
struct HT_caps_element *pHT_caps;
+
pHT_caps = (struct HT_caps_element *)(p + 2);
if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & BIT(14))
@@ -4584,6 +4594,7 @@ void start_create_ibss(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+
pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
@@ -5388,6 +5399,7 @@ static void rtw_mlmeext_disconnect(struct adapter *padapter)
/* wakeup macid after disconnect. */
{
struct sta_info *psta;
+
psta = rtw_get_stainfo(&padapter->stapriv, get_my_bssid(pnetwork));
if (psta)
rtw_hal_macid_wakeup(padapter, psta->mac_id);
@@ -5425,6 +5437,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
struct sta_priv *pstapriv = &padapter->stapriv;
u8 join_type;
struct sta_info *psta;
+
if (join_res < 0) {
join_type = 1;
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
@@ -6047,6 +6060,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
struct wlan_bssid_ex *network = &padapter->mlmepriv.cur_network.network;
+
start_bss_network(padapter, (u8 *)network);
return H2C_SUCCESS;
}
@@ -6810,6 +6824,7 @@ u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf)
if ((padapter->rtw_wdev != NULL) && (padapter->rtw_wdev->wiphy)) {
struct regulatory_request request;
+
request.initiator = NL80211_REGDOM_SET_BY_DRIVER;
rtw_reg_notifier(padapter->rtw_wdev->wiphy, &request);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index 30137f0bd984..6ac9184d59a6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -1193,7 +1193,7 @@ inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
/*
* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
* @adapter: pointer to struct adapter structure
-* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
+* @ips_deffer_ms: the ms will prevent from falling into IPS after wakeup
* Return _SUCCESS or _FAIL
*/
@@ -1390,10 +1390,5 @@ void rtw_ps_deny_cancel(struct adapter *padapter, enum PS_DENY_REASON reason)
*/
u32 rtw_ps_deny_get(struct adapter *padapter)
{
- u32 deny;
-
-
- deny = adapter_to_pwrctl(padapter)->ps_deny;
-
- return deny;
+ return adapter_to_pwrctl(padapter)->ps_deny;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 7fa8c84cf5f4..5245098b9ecf 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -1179,7 +1179,7 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_
/* DBG_871X("after handling ps-poll, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, true);
}
@@ -1205,7 +1205,7 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, true);
}
@@ -1953,7 +1953,7 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
for (i = 0; i < nr_subframes; i++) {
sub_pkt = subframes[i];
- /* Indicat the packets to upper layer */
+ /* Indicate the packets to upper layer */
if (sub_pkt) {
rtw_os_recv_indicate_pkt(padapter, sub_pkt, &prframe->u.hdr.attrib);
}
@@ -2606,14 +2606,14 @@ static void rtw_signal_stat_timer_hdl(struct timer_list *t)
if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_strength = recvpriv->signal_strength_data.avg_val;
num_signal_strength = recvpriv->signal_strength_data.total_num;
- /* after avg_vals are accquired, we can re-stat the signal values */
+ /* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_strength_data.update_req = 1;
}
if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_qual = recvpriv->signal_qual_data.avg_val;
num_signal_qual = recvpriv->signal_qual_data.total_num;
- /* after avg_vals are accquired, we can re-stat the signal values */
+ /* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_qual_data.update_req = 1;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 9c4607114cea..5ebf691bd743 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -434,7 +434,6 @@ void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_cod
rtw_secmicappend(&micdata, &header[16], 6);
else
rtw_secmicappend(&micdata, &header[10], 6);
-
}
rtw_secmicappend(&micdata, &priority[0], 4);
@@ -723,7 +722,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
TKIP_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra);
}
-
}
return res;
}
@@ -829,11 +827,9 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo == NULL!!!\n", __func__));
res = _FAIL;
}
-
}
exit:
return res;
-
}
@@ -1219,7 +1215,6 @@ static void construct_mic_header2(
if (!qc_exists && a4_exists) {
for (i = 0; i < 6; i++)
mic_header2[8+i] = mpdu[24+i]; /* A4 */
-
}
if (qc_exists && !a4_exists) {
@@ -1234,7 +1229,6 @@ static void construct_mic_header2(
mic_header2[14] = mpdu[30] & 0x0f;
mic_header2[15] = mpdu[31] & 0x00;
}
-
}
/************************************************/
@@ -1413,7 +1407,6 @@ static sint aes_cipher(u8 *key, uint hdrlen,
}
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
-
}
for (j = 0 ; j < 8; j++)
@@ -1719,7 +1712,6 @@ static sint aes_decipher(u8 *key, uint hdrlen,
}
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
-
}
for (j = 0; j < 8; j++)
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 9590e6f351c1..110338dbe372 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -326,20 +326,20 @@ inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch)
dvobj->on_oper_ch_time = jiffies;
#ifdef DBG_CH_SWITCH
- cnt += snprintf(msg+cnt, len-cnt, "switch to ch %3u", ch);
+ cnt += scnprintf(msg+cnt, len-cnt, "switch to ch %3u", ch);
for (i = 0; i < dvobj->iface_nums; i++) {
struct adapter *iface = dvobj->padapters[i];
- cnt += snprintf(msg+cnt, len-cnt, " ["ADPT_FMT":", ADPT_ARG(iface));
+ cnt += scnprintf(msg+cnt, len-cnt, " ["ADPT_FMT":", ADPT_ARG(iface));
if (iface->mlmeextpriv.cur_channel == ch)
- cnt += snprintf(msg+cnt, len-cnt, "C");
+ cnt += scnprintf(msg+cnt, len-cnt, "C");
else
- cnt += snprintf(msg+cnt, len-cnt, "_");
+ cnt += scnprintf(msg+cnt, len-cnt, "_");
if (iface->wdinfo.listen_channel == ch && !rtw_p2p_chk_state(&iface->wdinfo, P2P_STATE_NONE))
- cnt += snprintf(msg+cnt, len-cnt, "L");
+ cnt += scnprintf(msg+cnt, len-cnt, "L");
else
- cnt += snprintf(msg+cnt, len-cnt, "_");
- cnt += snprintf(msg+cnt, len-cnt, "]");
+ cnt += scnprintf(msg+cnt, len-cnt, "_");
+ cnt += scnprintf(msg+cnt, len-cnt, "]");
}
DBG_871X(FUNC_ADPT_FMT" %s\n", FUNC_ADPT_ARG(adapter), msg);
@@ -1503,7 +1503,7 @@ void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, stru
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- /* to update WMM paramter set while receiving beacon */
+ /* to update WMM parameter set while receiving beacon */
if (!memcmp(pIE->data, WMM_PARA_OUI, 6) && pIE->Length == WLAN_WMM_LEN) /* WMM */
if (WMM_param_handler(padapter, pIE))
report_wmm_edca_update(padapter);
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index fdb585ff5925..571353404a95 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -1180,7 +1180,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
mpdu_len -= pattrib->icv_len;
if (bmcst) {
- /* don't do fragment to broadcat/multicast packets */
+ /* don't do fragment to broadcast/multicast packets */
mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
} else {
mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
@@ -2303,7 +2303,7 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
pstapriv->tim_bitmap |= BIT(psta->aid);
if (update_tim)
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, true);
}
diff --git a/drivers/staging/rtl8723bs/hal/Hal8723BReg.h b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
index ce02457922b7..b9aca99478db 100644
--- a/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
+++ b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
@@ -415,13 +415,13 @@
#define IMR_BCNDMAINT3_8723B BIT23 /* Beacon DMA Interrupt 3 */
#define IMR_BCNDMAINT2_8723B BIT22 /* Beacon DMA Interrupt 2 */
#define IMR_BCNDMAINT1_8723B BIT21 /* Beacon DMA Interrupt 1 */
-#define IMR_BCNDOK7_8723B BIT20 /* Beacon Queue DMA OK Interrup 7 */
-#define IMR_BCNDOK6_8723B BIT19 /* Beacon Queue DMA OK Interrup 6 */
-#define IMR_BCNDOK5_8723B BIT18 /* Beacon Queue DMA OK Interrup 5 */
-#define IMR_BCNDOK4_8723B BIT17 /* Beacon Queue DMA OK Interrup 4 */
-#define IMR_BCNDOK3_8723B BIT16 /* Beacon Queue DMA OK Interrup 3 */
-#define IMR_BCNDOK2_8723B BIT15 /* Beacon Queue DMA OK Interrup 2 */
-#define IMR_BCNDOK1_8723B BIT14 /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_BCNDOK7_8723B BIT20 /* Beacon Queue DMA OK Interrupt 7 */
+#define IMR_BCNDOK6_8723B BIT19 /* Beacon Queue DMA OK Interrupt 6 */
+#define IMR_BCNDOK5_8723B BIT18 /* Beacon Queue DMA OK Interrupt 5 */
+#define IMR_BCNDOK4_8723B BIT17 /* Beacon Queue DMA OK Interrupt 4 */
+#define IMR_BCNDOK3_8723B BIT16 /* Beacon Queue DMA OK Interrupt 3 */
+#define IMR_BCNDOK2_8723B BIT15 /* Beacon Queue DMA OK Interrupt 2 */
+#define IMR_BCNDOK1_8723B BIT14 /* Beacon Queue DMA OK Interrupt 1 */
#define IMR_ATIMEND_E_8723B BIT13 /* ATIM Window End Extension for Win7 */
#define IMR_TXERR_8723B BIT11 /* Tx Error Flag Interrupt Status, write 1 clear. */
#define IMR_RXERR_8723B BIT10 /* Rx Error Flag INT Status, Write 1 clear */
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
index dd349c506da8..c60e8c58d9cc 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
@@ -1807,7 +1807,7 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
result = 0;
WaitCount = 0;
} else {
- /* accquire the BT TRx retry count from BT_Info byte2 */
+ /* acquire the BT TRx retry count from BT_Info byte2 */
retryCount = pCoexSta->btRetryCnt;
btInfoExt = pCoexSta->btInfoExt;
/* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount)); */
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
index 02da0a883594..2779dba92bab 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
@@ -1610,8 +1610,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(13);
else if (maxInterval == 2)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(14);
- else if (maxInterval == 3)
- HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(15);
else
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(15);
} else {
@@ -1619,8 +1617,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(9);
else if (maxInterval == 2)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(10);
- else if (maxInterval == 3)
- HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(11);
else
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(11);
}
@@ -1630,8 +1626,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(5);
else if (maxInterval == 2)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(6);
- else if (maxInterval == 3)
- HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(7);
else
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(7);
} else {
@@ -1639,8 +1633,6 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(1);
else if (maxInterval == 2)
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(2);
- else if (maxInterval == 3)
- HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(3);
else
HAL_BTC8723B2ANT_DMA_DURATION_ADJUST(3);
}
@@ -1654,7 +1646,7 @@ static void halbtc8723b2ant_TdmaDurationAdjust(
result = 0;
WaitCount = 0;
} else {
- /* accquire the BT TRx retry count from BT_Info byte2 */
+ /* acquire the BT TRx retry count from BT_Info byte2 */
retryCount = pCoexSta->btRetryCnt;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
BTC_PRINT(
diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
index 7150d54d49ab..c758d143c57f 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
@@ -44,14 +44,14 @@
#define BTC_ANT_WIFI_AT_CPL_MAIN 0
#define BTC_ANT_WIFI_AT_CPL_AUX 1
-typedef enum _BTC_POWERSAVE_TYPE{
+typedef enum _BTC_POWERSAVE_TYPE {
BTC_PS_WIFI_NATIVE = 0, /* wifi original power save behavior */
BTC_PS_LPS_ON = 1,
BTC_PS_LPS_OFF = 2,
BTC_PS_MAX
} BTC_POWERSAVE_TYPE, *PBTC_POWERSAVE_TYPE;
-typedef enum _BTC_BT_REG_TYPE{
+typedef enum _BTC_BT_REG_TYPE {
BTC_BT_REG_RF = 0,
BTC_BT_REG_MODEM = 1,
BTC_BT_REG_BLUEWIZE = 2,
@@ -60,7 +60,7 @@ typedef enum _BTC_BT_REG_TYPE{
BTC_BT_REG_MAX
} BTC_BT_REG_TYPE, *PBTC_BT_REG_TYPE;
-typedef enum _BTC_CHIP_INTERFACE{
+typedef enum _BTC_CHIP_INTERFACE {
BTC_INTF_UNKNOWN = 0,
BTC_INTF_PCI = 1,
BTC_INTF_USB = 2,
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
index 357802db9aed..7b435840746d 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
@@ -92,7 +92,7 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
u8 *deltaSwingTableIdx_TUP_B;
u8 *deltaSwingTableIdx_TDOWN_B;
- /* 4 2. Initilization (7 steps in total) */
+ /* 4 2. Initialization (7 steps in total) */
ConfigureTxpowerTrack(pDM_Odm, &c);
@@ -213,7 +213,7 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
/* 3 7. If necessary, move the index of swing table to adjust Tx power. */
if (delta > 0 && pDM_Odm->RFCalibrateInfo.TxPowerTrackControl) {
- /* delta" here is used to record the absolute value of differrence. */
+ /* delta" here is used to record the absolute value of difference. */
delta =
ThermalValue > pHalData->EEPROMThermalMeter ?
(ThermalValue - pHalData->EEPROMThermalMeter) :
diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
index c24156873fed..3b34a516075f 100644
--- a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
@@ -118,7 +118,7 @@ u8 HalPwrSeqCmdParsing(
&GET_PWR_CFG_MASK(PwrCfgCmd)
);
- /* Write the value back to sytem register */
+ /* Write the value back to system register */
rtw_write8(padapter, offset, value);
}
break;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 109bd85b0cd8..02676da5c166 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -961,10 +961,7 @@ exit:
u8 rtw_get_mgntframe_raid(struct adapter *adapter, unsigned char network_type)
{
-
- u8 raid;
- raid = (network_type & WIRELESS_11B) ? RATEID_IDX_B : RATEID_IDX_G;
- return raid;
+ return (network_type & WIRELESS_11B) ? RATEID_IDX_B : RATEID_IDX_G;
}
void rtw_hal_update_sta_rate_mask(struct adapter *padapter, struct sta_info *psta)
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index eb7de3617d83..767e2a784f78 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -1498,9 +1498,7 @@ s8 PHY_GetTxPowerByRate(
return value;
}
- value = pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex];
-
- return value;
+ return pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex];
}
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 7d8f21f32fb9..23df729acb7b 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -365,7 +365,7 @@ void rtw_hal_dm_watchdog_in_lps(struct adapter *padapter)
{
if (adapter_to_pwrctl(padapter)->bFwCurrentInPSMode == true) {
if (padapter->HalFunc.hal_dm_watchdog_in_lps)
- padapter->HalFunc.hal_dm_watchdog_in_lps(padapter); /* this fuction caller is in interrupt context */
+ padapter->HalFunc.hal_dm_watchdog_in_lps(padapter); /* this function caller is in interrupt context */
}
}
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index fba3b9e1491b..b77d1fe33a28 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -849,7 +849,7 @@ typedef struct _ODM_PATH_DIVERSITY_ {
u32 PathB_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
} PATHDIV_T, *pPATHDIV_T;
-typedef enum _BASEBAND_CONFIG_PHY_REG_PG_VALUE_TYPE{
+typedef enum _BASEBAND_CONFIG_PHY_REG_PG_VALUE_TYPE {
PHY_REG_PG_RELATIVE_VALUE = 0,
PHY_REG_PG_EXACT_VALUE = 1
} PHY_REG_PG_TYPE;
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
index 95edd148ac24..3ea1972545e5 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -40,16 +40,11 @@ static void odm_SetCrystalCap(void *pDM_VOID, u8 CrystalCap)
static u8 odm_GetDefaultCrytaltalCap(void *pDM_VOID)
{
PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
- u8 CrystalCap = 0x20;
struct adapter *Adapter = pDM_Odm->Adapter;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- CrystalCap = pHalData->CrystalCap;
-
- CrystalCap = CrystalCap & 0x3f;
-
- return CrystalCap;
+ return pHalData->CrystalCap & 0x3f;
}
static void odm_SetATCStatus(void *pDM_VOID, bool ATCStatus)
@@ -303,7 +298,7 @@ void ODM_CfoTracking(void *pDM_VOID)
void ODM_ParsingCFO(void *pDM_VOID, void *pPktinfo_VOID, s8 *pcfotail)
{
PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
- struct odm_packet_info *pPktinfo = (struct odm_packet_info *)pPktinfo_VOID;
+ struct odm_packet_info *pPktinfo = pPktinfo_VOID;
PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
u8 i;
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
index 71919a3d81ab..9c190b1024d8 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -103,10 +103,10 @@ static void odm_RxPhyStatus92CSeries_Parsing(
pDM_Odm->PhyDbgInfo.NumQryPhyStatusCCK++;
/* */
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
/* */
- cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a;
/* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
/* The RSSI formula should be modified according to the gain table */
@@ -178,7 +178,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(
/* */
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
/* */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1)&0x7f)-110;
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.h b/drivers/staging/rtl8723bs/hal/odm_debug.h
index 3e58cb806c8c..a7381173d1a3 100644
--- a/drivers/staging/rtl8723bs/hal/odm_debug.h
+++ b/drivers/staging/rtl8723bs/hal/odm_debug.h
@@ -13,7 +13,7 @@
/* Define the debug levels */
/* */
/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
-/* So that, they can help SW engineer to develope or trace states changed */
+/* So that, they can help SW engineer to developed or trace states changed */
/* and also help HW enginner to trace every operation to and from HW, */
/* e.g IO, Tx, Rx. */
/* */
@@ -34,7 +34,7 @@
#define ODM_DBG_SERIOUS 2
/* */
-/* Abnormal, rare, or unexpeted cases. */
+/* Abnormal, rare, or unexpected cases. */
/* For example, */
/* IRP/Packet/OID canceled, */
/* device suprisely unremoved and so on. */
diff --git a/drivers/staging/rtl8723bs/hal/odm_types.h b/drivers/staging/rtl8723bs/hal/odm_types.h
index 28f42c9c3dd7..c79fc1813c3f 100644
--- a/drivers/staging/rtl8723bs/hal/odm_types.h
+++ b/drivers/staging/rtl8723bs/hal/odm_types.h
@@ -28,7 +28,7 @@ typedef enum _HAL_STATUS {
/* */
-/* Declare for ODM spin lock defintion temporarily fro compile pass. */
+/* Declare for ODM spin lock definition temporarily from compile pass. */
/* */
typedef enum _RT_SPINLOCK_TYPE {
RT_TX_SPINLOCK = 1,
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 71b5a50b6ef6..fd2b74003faf 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -146,7 +146,7 @@ static void ConstructBeacon(struct adapter *padapter, u8 *pframe, u32 *pLength)
SetFrameSubType(pframe, WIFI_BEACON);
pframe += sizeof(struct ieee80211_hdr_3addr);
- pktlen = sizeof (struct ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
/* timestamp will be inserted by hardware */
pframe += 8;
@@ -823,8 +823,11 @@ static void ConstructProbeRsp(struct adapter *padapter, u8 *pframe, u32 *pLength
}
#endif /* CONFIG_AP_WOWLAN */
-/* To check if reserved page content is destroyed by beacon beacuse beacon is too large. */
-/* 2010.06.23. Added by tynli. */
+/*
+ * To check if reserved page content is destroyed by beacon because beacon
+ * is too large.
+ */
+/* 2010.06.23. Added by tynli. */
void CheckFwRsvdPageContent(struct adapter *Adapter)
{
}
@@ -1409,16 +1412,20 @@ void rtl8723b_set_ap_wowlan_cmd(struct adapter *padapter, u8 enable)
}
#endif /* CONFIG_AP_WOWLAN */
-/* */
-/* Description: Fill the reserved packets that FW will use to RSVD page. */
-/* Now we just send 4 types packet to rsvd page. */
-/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
-/* Input: */
-/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
-/* so we need to set the packet length to total lengh. */
-/* true: At the second time, we should send the first packet (default:beacon) */
-/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
-/* 2009.10.15 by tynli. */
+/*
+ * Description: Fill the reserved packets that FW will use to RSVD page.
+ * Now we just send 4 types packet to rsvd page.
+ * (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp.
+ *
+ * Input:
+ *
+ * bDLFinished - false: At the first time we will send all the packets as
+ * a large packet to Hw, so we need to set the packet length to total length.
+ *
+ * true: At the second time, we should send the first packet (default:beacon)
+ * to Hw again and set the length in descriptor to the real beacon length.
+ */
+/* 2009.10.15 by tynli. */
static void rtl8723b_set_FwRsvdPagePkt(
struct adapter *padapter, bool bDLFinished
)
@@ -1599,7 +1606,7 @@ static void rtl8723b_set_FwRsvdPagePkt(
#ifdef CONFIG_GTK_OL
BufIndex += (CurtPktPageNum*PageSize);
- /* if the ap staion info. exists, get the kek, kck from staion info. */
+ /* if the ap station info. exists, get the kek, kck from station info. */
psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
if (!psta) {
memset(kek, 0, RTW_KEK_LEN);
@@ -1652,7 +1659,7 @@ static void rtl8723b_set_FwRsvdPagePkt(
TotalPacketLen = BufIndex-TxDescLen + 256; /* extension memory for FW */
#else
- TotalPacketLen = BufIndex-TxDescLen + sizeof (union pn48); /* IV len */
+ TotalPacketLen = BufIndex - TxDescLen + sizeof(union pn48); /* IV len */
#endif /* CONFIG_GTK_OL */
} else
#endif /* CONFIG_WOWLAN */
@@ -1791,18 +1798,19 @@ error:
}
#ifdef CONFIG_AP_WOWLAN
-/* */
-/* Description: Fill the reserved packets that FW will use to RSVD page. */
-/* Now we just send 2 types packet to rsvd page. (1)Beacon, (2)ProbeRsp. */
-/* */
-/* Input: bDLFinished */
-/* */
-/* false: At the first time we will send all the packets as a large packet to Hw, */
-/* so we need to set the packet length to total lengh. */
-/* */
-/* true: At the second time, we should send the first packet (default:beacon) */
-/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
-/* 2009.10.15 by tynli. */
+/*
+ * Description: Fill the reserved packets that FW will use to RSVD page.
+ * Now we just send 2 types packet to rsvd page. (1)Beacon, (2)ProbeRsp.
+ *
+ * Input: bDLFinished
+ *
+ * false: At the first time we will send all the packets as a large packet to
+ * Hw, so we need to set the packet length to total length.
+ *
+ * true: At the second time, we should send the first packet (default:beacon)
+ * to Hw again and set the length in descriptor to the real beacon length.
+ */
+/* 2009.10.15 by tynli. */
static void rtl8723b_set_AP_FwRsvdPagePkt(
struct adapter *padapter, bool bDLFinished
)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 1e8b61443408..c3051ebaeb78 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -192,7 +192,7 @@ static inline union recv_frame *try_alloc_recvframe(struct recv_priv *precvpriv,
rtw_enqueue_recvbuf_to_head(precvbuf,
&precvpriv->recv_buf_pending_queue);
- /* The case of can't allocte recvframe should be temporary, */
+ /* The case of can't allocate recvframe should be temporary, */
/* schedule again and hope recvframe is available next time. */
tasklet_schedule(&precvpriv->recv_tasklet);
}
@@ -480,10 +480,8 @@ initbuferror:
precvpriv->precv_buf = NULL;
}
- if (precvpriv->pallocated_recv_buf) {
- kfree(precvpriv->pallocated_recv_buf);
- precvpriv->pallocated_recv_buf = NULL;
- }
+ kfree(precvpriv->pallocated_recv_buf);
+ precvpriv->pallocated_recv_buf = NULL;
exit:
return res;
@@ -518,8 +516,6 @@ void rtl8723bs_free_recv_priv(struct adapter *padapter)
precvpriv->precv_buf = NULL;
}
- if (precvpriv->pallocated_recv_buf) {
- kfree(precvpriv->pallocated_recv_buf);
- precvpriv->pallocated_recv_buf = NULL;
- }
+ kfree(precvpriv->pallocated_recv_buf);
+ precvpriv->pallocated_recv_buf = NULL;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index b6d56cfb0a19..44799c4a9f35 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -282,7 +282,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
/* check xmit_buf size enough or not */
txlen = txdesc_size + rtw_wlan_pkt_size(pxmitframe);
- if( !pxmitbuf ||
+ if (!pxmitbuf ||
((_RND(pxmitbuf->len, 8) + txlen) > max_xmit_len) ||
(k >= (rtw_hal_sdio_max_txoqt_free_space(padapter) - 1))
) {
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index e813382e78a6..7853af53051d 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -235,7 +235,7 @@ static void _InitQueueReservedPage(struct adapter *padapter)
if (pHalData->OutEpQueueSel & TX_SELE_LQ)
numLQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_LPQ_8723B : NORMAL_PAGE_NUM_LPQ_8723B;
- /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ /* NOTE: This step shall be proceed before writing REG_RQPN. */
if (pHalData->OutEpQueueSel & TX_SELE_NQ)
numNQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_NPQ_8723B : NORMAL_PAGE_NUM_NPQ_8723B;
@@ -551,18 +551,8 @@ static void HalRxAggr8723BSdio(struct adapter *padapter)
pregistrypriv = &padapter->registrypriv;
- if (pregistrypriv->wifi_spec) {
- /* 2010.04.27 hpfan */
- /* Adjust RxAggrTimeout to close to zero disable RxAggr, suggested by designer */
- /* Timeout value is calculated by 34 / (2^n) */
- valueDMATimeout = 0x06;
- valueDMAPageCount = 0x06;
- } else {
- /* 20130530, Isaac@SD1 suggest 3 kinds of parameter */
- /* TX/RX Balance */
- valueDMATimeout = 0x06;
- valueDMAPageCount = 0x06;
- }
+ valueDMATimeout = 0x06;
+ valueDMAPageCount = 0x06;
rtw_write8(padapter, REG_RXDMA_AGG_PG_TH + 1, valueDMATimeout);
rtw_write8(padapter, REG_RXDMA_AGG_PG_TH, valueDMAPageCount);
diff --git a/drivers/staging/rtl8723bs/include/HalVerDef.h b/drivers/staging/rtl8723bs/include/HalVerDef.h
index 160f34efbfd5..c548fb126683 100644
--- a/drivers/staging/rtl8723bs/include/HalVerDef.h
+++ b/drivers/staging/rtl8723bs/include/HalVerDef.h
@@ -20,7 +20,7 @@ typedef enum tag_HAL_IC_Type_Definition
CHIP_8821 = 7,
CHIP_8723B = 8,
CHIP_8192E = 9,
-}HAL_IC_TYPE_E;
+} HAL_IC_TYPE_E;
/* HAL_CHIP_TYPE_E */
typedef enum tag_HAL_CHIP_Type_Definition
@@ -28,7 +28,7 @@ typedef enum tag_HAL_CHIP_Type_Definition
TEST_CHIP = 0,
NORMAL_CHIP = 1,
FPGA = 2,
-}HAL_CHIP_TYPE_E;
+} HAL_CHIP_TYPE_E;
/* HAL_CUT_VERSION_E */
typedef enum tag_HAL_Cut_Version_Definition
@@ -44,7 +44,7 @@ typedef enum tag_HAL_Cut_Version_Definition
I_CUT_VERSION = 8,
J_CUT_VERSION = 9,
K_CUT_VERSION = 10,
-}HAL_CUT_VERSION_E;
+} HAL_CUT_VERSION_E;
/* HAL_Manufacturer */
typedef enum tag_HAL_Manufacturer_Version_Definition
@@ -52,7 +52,7 @@ typedef enum tag_HAL_Manufacturer_Version_Definition
CHIP_VENDOR_TSMC = 0,
CHIP_VENDOR_UMC = 1,
CHIP_VENDOR_SMIC = 2,
-}HAL_VENDOR_E;
+} HAL_VENDOR_E;
typedef enum tag_HAL_RF_Type_Definition
{
@@ -64,7 +64,7 @@ typedef enum tag_HAL_RF_Type_Definition
RF_TYPE_3T3R = 5,
RF_TYPE_3T4R = 6,
RF_TYPE_4T4R = 7,
-}HAL_RF_TYPE_E;
+} HAL_RF_TYPE_E;
typedef struct tag_HAL_VERSION
{
@@ -74,14 +74,14 @@ typedef struct tag_HAL_VERSION
HAL_VENDOR_E VendorType;
HAL_RF_TYPE_E RFType;
u8 ROMVer;
-}HAL_VERSION,*PHAL_VERSION;
+} HAL_VERSION, *PHAL_VERSION;
/* VERSION_8192C VersionID; */
/* HAL_VERSION VersionID; */
/* Get element */
-#define GET_CVID_IC_TYPE(version) ((HAL_IC_TYPE_E)((version).ICType) )
-#define GET_CVID_CHIP_TYPE(version) ((HAL_CHIP_TYPE_E)((version).ChipType) )
+#define GET_CVID_IC_TYPE(version) ((HAL_IC_TYPE_E)((version).ICType))
+#define GET_CVID_CHIP_TYPE(version) ((HAL_CHIP_TYPE_E)((version).ChipType))
#define GET_CVID_RF_TYPE(version) ((HAL_RF_TYPE_E)((version).RFType))
#define GET_CVID_MANUFACTUER(version) ((HAL_VENDOR_E)((version).VendorType))
#define GET_CVID_CUT_VERSION(version) ((HAL_CUT_VERSION_E)((version).CUTVersion))
@@ -93,8 +93,8 @@ typedef struct tag_HAL_VERSION
/* HAL_VERSION VersionID */
/* HAL_CHIP_TYPE_E */
-#define IS_TEST_CHIP(version) ((GET_CVID_CHIP_TYPE(version) ==TEST_CHIP)? true: false)
-#define IS_NORMAL_CHIP(version) ((GET_CVID_CHIP_TYPE(version) ==NORMAL_CHIP)? true: false)
+#define IS_TEST_CHIP(version) ((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
+#define IS_NORMAL_CHIP(version) ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
/* HAL_CUT_VERSION_E */
#define IS_A_CUT(version) ((GET_CVID_CUT_VERSION(version) == A_CUT_VERSION) ? true : false)
@@ -107,13 +107,13 @@ typedef struct tag_HAL_VERSION
#define IS_K_CUT(version) ((GET_CVID_CUT_VERSION(version) == K_CUT_VERSION) ? true : false)
/* HAL_VENDOR_E */
-#define IS_CHIP_VENDOR_TSMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC)? true: false)
-#define IS_CHIP_VENDOR_UMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC)? true: false)
-#define IS_CHIP_VENDOR_SMIC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_SMIC)? true: false)
+#define IS_CHIP_VENDOR_TSMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
+#define IS_CHIP_VENDOR_UMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
+#define IS_CHIP_VENDOR_SMIC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_SMIC) ? true : false)
/* HAL_RF_TYPE_E */
-#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R)? true : false)
-#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)? true : false)
-#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)? true : false)
+#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false)
+#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
+#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
#endif
diff --git a/drivers/staging/rtl8723bs/include/cmd_osdep.h b/drivers/staging/rtl8723bs/include/cmd_osdep.h
index d3af9f44ad59..5506f513dc01 100644
--- a/drivers/staging/rtl8723bs/include/cmd_osdep.h
+++ b/drivers/staging/rtl8723bs/include/cmd_osdep.h
@@ -10,8 +10,8 @@
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
int rtw_init_evt_priv(struct evt_priv *pevtpriv);
-extern void _rtw_free_evt_priv (struct evt_priv *pevtpriv);
-extern void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv);
+extern void _rtw_free_evt_priv(struct evt_priv *pevtpriv);
+extern void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj);
extern struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue);
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 6ec9087f2eb1..dba75216cbfe 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -77,7 +77,7 @@ enum _NIC_VERSION {
#define SPEC_DEV_ID_RF_CONFIG_2T2R BIT(4)
#define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5)
-struct specific_device_id{
+struct specific_device_id {
u32 flags;
@@ -151,8 +151,8 @@ struct registry_priv
u8 lowrate_two_xmit;
- u8 rf_config ;
- u8 low_power ;
+ u8 rf_config;
+ u8 low_power;
u8 wifi_spec;/* !turbo_mode */
@@ -498,11 +498,11 @@ enum ADAPTER_TYPE {
MAX_ADAPTER = 0xFF,
};
-typedef enum _DRIVER_STATE{
+typedef enum _DRIVER_STATE {
DRIVER_NORMAL = 0,
DRIVER_DISAPPEAR = 1,
DRIVER_REPLACE_DONGLE = 2,
-}DRIVER_STATE;
+} DRIVER_STATE;
struct adapter {
int DriverState;/* for disable driver using module, use dongle to replace module. */
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
index f5c3ce5da70c..a46626d0509a 100644
--- a/drivers/staging/rtl8723bs/include/hal_com.h
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -111,54 +111,54 @@
#define DESC_RATEVHTSS4MCS9 0x53
#define HDATA_RATE(rate)\
-(rate ==DESC_RATE1M)?"CCK_1M":\
-(rate ==DESC_RATE2M)?"CCK_2M":\
-(rate ==DESC_RATE5_5M)?"CCK5_5M":\
-(rate ==DESC_RATE11M)?"CCK_11M":\
-(rate ==DESC_RATE6M)?"OFDM_6M":\
-(rate ==DESC_RATE9M)?"OFDM_9M":\
-(rate ==DESC_RATE12M)?"OFDM_12M":\
-(rate ==DESC_RATE18M)?"OFDM_18M":\
-(rate ==DESC_RATE24M)?"OFDM_24M":\
-(rate ==DESC_RATE36M)?"OFDM_36M":\
-(rate ==DESC_RATE48M)?"OFDM_48M":\
-(rate ==DESC_RATE54M)?"OFDM_54M":\
-(rate ==DESC_RATEMCS0)?"MCS0":\
-(rate ==DESC_RATEMCS1)?"MCS1":\
-(rate ==DESC_RATEMCS2)?"MCS2":\
-(rate ==DESC_RATEMCS3)?"MCS3":\
-(rate ==DESC_RATEMCS4)?"MCS4":\
-(rate ==DESC_RATEMCS5)?"MCS5":\
-(rate ==DESC_RATEMCS6)?"MCS6":\
-(rate ==DESC_RATEMCS7)?"MCS7":\
-(rate ==DESC_RATEMCS8)?"MCS8":\
-(rate ==DESC_RATEMCS9)?"MCS9":\
-(rate ==DESC_RATEMCS10)?"MCS10":\
-(rate ==DESC_RATEMCS11)?"MCS11":\
-(rate ==DESC_RATEMCS12)?"MCS12":\
-(rate ==DESC_RATEMCS13)?"MCS13":\
-(rate ==DESC_RATEMCS14)?"MCS14":\
-(rate ==DESC_RATEMCS15)?"MCS15":\
-(rate ==DESC_RATEVHTSS1MCS0)?"VHTSS1MCS0":\
-(rate ==DESC_RATEVHTSS1MCS1)?"VHTSS1MCS1":\
-(rate ==DESC_RATEVHTSS1MCS2)?"VHTSS1MCS2":\
-(rate ==DESC_RATEVHTSS1MCS3)?"VHTSS1MCS3":\
-(rate ==DESC_RATEVHTSS1MCS4)?"VHTSS1MCS4":\
-(rate ==DESC_RATEVHTSS1MCS5)?"VHTSS1MCS5":\
-(rate ==DESC_RATEVHTSS1MCS6)?"VHTSS1MCS6":\
-(rate ==DESC_RATEVHTSS1MCS7)?"VHTSS1MCS7":\
-(rate ==DESC_RATEVHTSS1MCS8)?"VHTSS1MCS8":\
-(rate ==DESC_RATEVHTSS1MCS9)?"VHTSS1MCS9":\
-(rate ==DESC_RATEVHTSS2MCS0)?"VHTSS2MCS0":\
-(rate ==DESC_RATEVHTSS2MCS1)?"VHTSS2MCS1":\
-(rate ==DESC_RATEVHTSS2MCS2)?"VHTSS2MCS2":\
-(rate ==DESC_RATEVHTSS2MCS3)?"VHTSS2MCS3":\
-(rate ==DESC_RATEVHTSS2MCS4)?"VHTSS2MCS4":\
-(rate ==DESC_RATEVHTSS2MCS5)?"VHTSS2MCS5":\
-(rate ==DESC_RATEVHTSS2MCS6)?"VHTSS2MCS6":\
-(rate ==DESC_RATEVHTSS2MCS7)?"VHTSS2MCS7":\
-(rate ==DESC_RATEVHTSS2MCS8)?"VHTSS2MCS8":\
-(rate ==DESC_RATEVHTSS2MCS9)?"VHTSS2MCS9":"UNKNOW"
+(rate == DESC_RATE1M) ? "CCK_1M" : \
+(rate == DESC_RATE2M) ? "CCK_2M" : \
+(rate == DESC_RATE5_5M) ? "CCK5_5M" : \
+(rate == DESC_RATE11M) ? "CCK_11M" : \
+(rate == DESC_RATE6M) ? "OFDM_6M" : \
+(rate == DESC_RATE9M) ? "OFDM_9M" : \
+(rate == DESC_RATE12M) ? "OFDM_12M" : \
+(rate == DESC_RATE18M) ? "OFDM_18M" : \
+(rate == DESC_RATE24M) ? "OFDM_24M" : \
+(rate == DESC_RATE36M) ? "OFDM_36M" : \
+(rate == DESC_RATE48M) ? "OFDM_48M" : \
+(rate == DESC_RATE54M) ? "OFDM_54M" : \
+(rate == DESC_RATEMCS0) ? "MCS0" : \
+(rate == DESC_RATEMCS1) ? "MCS1" : \
+(rate == DESC_RATEMCS2) ? "MCS2" : \
+(rate == DESC_RATEMCS3) ? "MCS3" : \
+(rate == DESC_RATEMCS4) ? "MCS4" : \
+(rate == DESC_RATEMCS5) ? "MCS5" : \
+(rate == DESC_RATEMCS6) ? "MCS6" : \
+(rate == DESC_RATEMCS7) ? "MCS7" : \
+(rate == DESC_RATEMCS8) ? "MCS8" : \
+(rate == DESC_RATEMCS9) ? "MCS9" : \
+(rate == DESC_RATEMCS10) ? "MCS10" : \
+(rate == DESC_RATEMCS11) ? "MCS11" : \
+(rate == DESC_RATEMCS12) ? "MCS12" : \
+(rate == DESC_RATEMCS13) ? "MCS13" : \
+(rate == DESC_RATEMCS14) ? "MCS14" : \
+(rate == DESC_RATEMCS15) ? "MCS15" : \
+(rate == DESC_RATEVHTSS1MCS0) ? "VHTSS1MCS0" : \
+(rate == DESC_RATEVHTSS1MCS1) ? "VHTSS1MCS1" : \
+(rate == DESC_RATEVHTSS1MCS2) ? "VHTSS1MCS2" : \
+(rate == DESC_RATEVHTSS1MCS3) ? "VHTSS1MCS3" : \
+(rate == DESC_RATEVHTSS1MCS4) ? "VHTSS1MCS4" : \
+(rate == DESC_RATEVHTSS1MCS5) ? "VHTSS1MCS5" : \
+(rate == DESC_RATEVHTSS1MCS6) ? "VHTSS1MCS6" : \
+(rate == DESC_RATEVHTSS1MCS7) ? "VHTSS1MCS7" : \
+(rate == DESC_RATEVHTSS1MCS8) ? "VHTSS1MCS8" : \
+(rate == DESC_RATEVHTSS1MCS9) ? "VHTSS1MCS9" : \
+(rate == DESC_RATEVHTSS2MCS0) ? "VHTSS2MCS0" : \
+(rate == DESC_RATEVHTSS2MCS1) ? "VHTSS2MCS1" : \
+(rate == DESC_RATEVHTSS2MCS2) ? "VHTSS2MCS2" : \
+(rate == DESC_RATEVHTSS2MCS3) ? "VHTSS2MCS3" : \
+(rate == DESC_RATEVHTSS2MCS4) ? "VHTSS2MCS4" : \
+(rate == DESC_RATEVHTSS2MCS5) ? "VHTSS2MCS5" : \
+(rate == DESC_RATEVHTSS2MCS6) ? "VHTSS2MCS6" : \
+(rate == DESC_RATEVHTSS2MCS7) ? "VHTSS2MCS7" : \
+(rate == DESC_RATEVHTSS2MCS8) ? "VHTSS2MCS8" : \
+(rate == DESC_RATEVHTSS2MCS9) ? "VHTSS2MCS9" : "UNKNOW"
enum{
@@ -235,7 +235,7 @@ s32 c2h_evt_read_88xx(struct adapter *adapter, u8 *buf);
u8 rtw_get_mgntframe_raid(struct adapter *adapter, unsigned char network_type);
void rtw_hal_update_sta_rate_mask(struct adapter *padapter, struct sta_info *psta);
-void hw_var_port_switch (struct adapter *adapter);
+void hw_var_port_switch(struct adapter *adapter);
void SetHwReg(struct adapter *padapter, u8 variable, u8 *val);
void GetHwReg(struct adapter *padapter, u8 variable, u8 *val);
diff --git a/drivers/staging/rtl8723bs/include/hal_com_h2c.h b/drivers/staging/rtl8723bs/include/hal_com_h2c.h
index 7dbae5e2050e..b951bc288b89 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_h2c.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_h2c.h
@@ -11,7 +11,7 @@
/* H2C CMD DEFINITION ------------------------------------------------ */
/* */
/* 88e, 8723b, 8812, 8821, 92e use the same FW code base */
-enum h2c_cmd{
+enum h2c_cmd {
/* Common Class: 000 */
H2C_RSVD_PAGE = 0x00,
H2C_MEDIA_STATUS_RPT = 0x01,
@@ -96,9 +96,9 @@ enum h2c_cmd{
#define H2C_PROBERSP_RSVDPAGE_LEN 5
#ifdef CONFIG_WOWLAN
-#define eqMacAddr(a, b) (((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0)
-#define cpMacAddr(des, src) ((des)[0]=(src)[0], (des)[1]=(src)[1], (des)[2]=(src)[2], (des)[3]=(src)[3], (des)[4]=(src)[4], (des)[5]=(src)[5])
-#define cpIpAddr(des, src) ((des)[0]=(src)[0], (des)[1]=(src)[1], (des)[2]=(src)[2], (des)[3]=(src)[3])
+#define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0)
+#define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5])
+#define cpIpAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3])
/* */
/* ARP packet */
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index e9a3006a3e20..9fff4aa36546 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -177,7 +177,7 @@ u8 Channel,
bool *bIn24G
);
-s8 phy_get_tx_pwr_lmt (struct adapter *adapter, u32 RegPwrTblSel,
+s8 phy_get_tx_pwr_lmt(struct adapter *adapter, u32 RegPwrTblSel,
enum BAND_TYPE Band, enum CHANNEL_WIDTH Bandwidth,
u8 RfPath,
u8 DataRate,
diff --git a/drivers/staging/rtl8723bs/include/hal_com_reg.h b/drivers/staging/rtl8723bs/include/hal_com_reg.h
index 31a187b35810..37fa59a352d6 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_reg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_reg.h
@@ -707,13 +707,13 @@ Default: 00b.
/* ALL CCK Rate */
-#define RATE_ALL_CCK RATR_1M|RATR_2M|RATR_55M|RATR_11M
-#define RATE_ALL_OFDM_AG RATR_6M|RATR_9M|RATR_12M|RATR_18M|RATR_24M|\
- RATR_36M|RATR_48M|RATR_54M
-#define RATE_ALL_OFDM_1SS RATR_MCS0|RATR_MCS1|RATR_MCS2|RATR_MCS3 |\
- RATR_MCS4|RATR_MCS5|RATR_MCS6 |RATR_MCS7
-#define RATE_ALL_OFDM_2SS RATR_MCS8|RATR_MCS9 |RATR_MCS10|RATR_MCS11|\
- RATR_MCS12|RATR_MCS13|RATR_MCS14|RATR_MCS15
+#define RATE_ALL_CCK RATR_1M | RATR_2M | RATR_55M | RATR_11M
+#define RATE_ALL_OFDM_AG RATR_6M | RATR_9M | RATR_12M | RATR_18M | RATR_24M |\
+ RATR_36M | RATR_48M | RATR_54M
+#define RATE_ALL_OFDM_1SS RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | RATR_MCS3 |\
+ RATR_MCS4 | RATR_MCS5 | RATR_MCS6 | RATR_MCS7
+#define RATE_ALL_OFDM_2SS RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | RATR_MCS11 |\
+ RATR_MCS12 | RATR_MCS13 | RATR_MCS14 | RATR_MCS15
#define RATE_BITMAP_ALL 0xFFFFF
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 24926ebaf950..1de5acaef8ff 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -296,7 +296,7 @@ enum wowlan_subcode {
WOWLAN_AP_DISABLE = 13
};
-struct wowlan_ioctl_param{
+struct wowlan_ioctl_param {
unsigned int subcode;
unsigned int subcode_value;
unsigned int wakeup_reason;
diff --git a/drivers/staging/rtl8723bs/include/hal_phy.h b/drivers/staging/rtl8723bs/include/hal_phy.h
index c6b9bf139ef6..ed0caa0574e3 100644
--- a/drivers/staging/rtl8723bs/include/hal_phy.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy.h
@@ -25,7 +25,7 @@
#define RF6052_MAX_REG_92C 0x7F
#define RF6052_MAX_REG \
- (RF6052_MAX_REG_88E > RF6052_MAX_REG_92C) ? RF6052_MAX_REG_88E: RF6052_MAX_REG_92C
+ (RF6052_MAX_REG_88E > RF6052_MAX_REG_92C) ? RF6052_MAX_REG_88E : RF6052_MAX_REG_92C
#define GET_RF6052_REAL_MAX_REG(_Adapter) RF6052_MAX_REG_92C
diff --git a/drivers/staging/rtl8723bs/include/hal_phy_cfg.h b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
index b40868b2e76f..419ddb0733aa 100644
--- a/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
@@ -58,9 +58,9 @@ u32 Data
);
/* MAC/BB/RF HAL config */
-int PHY_BBConfig8723B(struct adapter *Adapter );
+int PHY_BBConfig8723B(struct adapter *Adapter);
-int PHY_RFConfig8723B(struct adapter *Adapter );
+int PHY_RFConfig8723B(struct adapter *Adapter);
s32 PHY_MACConfig8723B(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
index 130a94879805..28aca047dce6 100644
--- a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
+++ b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
@@ -46,9 +46,9 @@
{0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
{0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital , 1:isolation*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0},/* disable SW LPS 0x04[10]= 0 and WLSUS_EN 0x04[11]= 0*/ \
- {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , BIT0},/* Disable USB suspend */ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* Disable USB suspend */ \
{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
- {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , 0},/* Enable USB suspend */ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/* Enable USB suspend */ \
{0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]= 1*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]= 0*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/ \
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index 2110552b8e59..7243e656d589 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -98,8 +98,8 @@ enum {
#define WPA_SELECTOR_LEN 4
-extern u8 RTW_WPA_OUI_TYPE[] ;
-extern u16 RTW_WPA_VERSION ;
+extern u8 RTW_WPA_OUI_TYPE[];
+extern u16 RTW_WPA_VERSION;
extern u8 WPA_AUTH_KEY_MGMT_NONE[];
extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[];
extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
@@ -139,7 +139,7 @@ typedef enum _RATEID_IDX_ {
RATEID_IDX_VHT_1SS = 10,
} RATEID_IDX, *PRATEID_IDX;
-typedef enum _RATR_TABLE_MODE{
+typedef enum _RATR_TABLE_MODE {
RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
RATR_INX_WIRELESS_NG = 1, /* GN or N */
RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
@@ -149,7 +149,7 @@ typedef enum _RATR_TABLE_MODE{
RATR_INX_WIRELESS_B = 6,
RATR_INX_WIRELESS_MC = 7,
RATR_INX_WIRELESS_AC_N = 8,
-}RATR_TABLE_MODE, *PRATR_TABLE_MODE;
+} RATR_TABLE_MODE, *PRATR_TABLE_MODE;
enum NETWORK_TYPE
@@ -248,7 +248,7 @@ struct ieee_param_ex {
u8 data[0];
};
-struct sta_data{
+struct sta_data {
u16 aid;
u16 capability;
int flags;
@@ -439,7 +439,7 @@ struct ieee80211_snap_hdr {
#define IEEE80211_OFDM_SHIFT_MASK_A 4
-enum MGN_RATE{
+enum MGN_RATE {
MGN_1M = 0x02,
MGN_2M = 0x04,
MGN_5_5M = 0x0B,
@@ -906,7 +906,7 @@ enum rtw_ieee80211_spectrum_mgmt_actioncode {
RTW_WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
};
-enum _PUBLIC_ACTION{
+enum _PUBLIC_ACTION {
ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */
ACT_PUBLIC_DSE_ENABLE = 1,
ACT_PUBLIC_DSE_DEENABLE = 2,
@@ -953,7 +953,7 @@ enum rtw_ieee80211_back_parties {
};
/* VHT features action code */
-enum rtw_ieee80211_vht_actioncode{
+enum rtw_ieee80211_vht_actioncode {
RTW_WLAN_ACTION_VHT_COMPRESSED_BEAMFORMING = 0,
RTW_WLAN_ACTION_VHT_GROUPID_MANAGEMENT = 1,
RTW_WLAN_ACTION_VHT_OPMODE_NOTIFICATION = 2,
@@ -1128,7 +1128,7 @@ u8 *rtw_get_ie(u8*pbuf, sint index, sint *len, sint limit);
u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen);
int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len);
-void rtw_set_supported_rate(u8 *SupportedRates, uint mode) ;
+void rtw_set_supported_rate(u8 *SupportedRates, uint mode);
unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit);
unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit);
@@ -1142,8 +1142,8 @@ void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie
u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen);
u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
-u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_attr, u32 *len_attr);
-u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_content, uint *len_content);
+u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content);
/**
* for_each_ie - iterate over continuous IEs
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index fa16139fcce6..c59c1384944b 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -50,7 +50,7 @@ void rtw_reset_drv_sw(struct adapter *padapter);
void rtw_dev_unload(struct adapter *padapter);
u32 rtw_start_drv_threads(struct adapter *padapter);
-void rtw_stop_drv_threads (struct adapter *padapter);
+void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index a40cf7b60a69..5f681899bbec 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -80,7 +80,7 @@ enum mstat_f {
#define mstat_tf_idx(flags) ((flags)&0xff)
#define mstat_ff_idx(flags) (((flags)&0xff00) >> 8)
-typedef enum mstat_status{
+typedef enum mstat_status {
MSTAT_ALLOC_SUCCESS = 0,
MSTAT_ALLOC_FAIL,
MSTAT_FREE
@@ -117,7 +117,7 @@ static inline void thread_enter(char *name)
static inline void flush_signals_thread(void)
{
- if (signal_pending (current))
+ if (signal_pending(current))
{
flush_signals(current);
}
@@ -134,14 +134,14 @@ static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *par
}
#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
-#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
+#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0 : 1)) << 2)
static inline u32 _RND4(u32 sz)
{
u32 val;
- val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
+ val = ((sz >> 2) + ((sz & 3) ? 1 : 0)) << 2;
return val;
@@ -152,7 +152,7 @@ static inline u32 _RND8(u32 sz)
u32 val;
- val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
+ val = ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
return val;
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index a2d9de866c4b..1710fa3eeb71 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -80,7 +80,7 @@ static inline struct list_head *get_list_head(struct __queue *queue)
static inline void _set_timer(_timer *ptimer, u32 delay_time)
{
- mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
+ mod_timer(ptimer, (jiffies + (delay_time * HZ / 1000)));
}
static inline void _cancel_timer(_timer *ptimer, u8 *bcancelled)
diff --git a/drivers/staging/rtl8723bs/include/recv_osdep.h b/drivers/staging/rtl8723bs/include/recv_osdep.h
index 1056f615d0f9..e85aafc93f6d 100644
--- a/drivers/staging/rtl8723bs/include/recv_osdep.h
+++ b/drivers/staging/rtl8723bs/include/recv_osdep.h
@@ -9,7 +9,7 @@
extern sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-extern void _rtw_free_recv_priv (struct recv_priv *precvpriv);
+extern void _rtw_free_recv_priv(struct recv_priv *precvpriv);
extern s32 rtw_recv_entry(union recv_frame *precv_frame);
@@ -19,7 +19,7 @@ extern void rtw_recv_returnpacket(_nic_hdl cnxt, _pkt *preturnedpkt);
extern void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-void rtw_free_recv_priv (struct recv_priv *precvpriv);
+void rtw_free_recv_priv(struct recv_priv *precvpriv);
void rtw_os_recv_resource_alloc(struct adapter *padapter, union recv_frame *precvframe);
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
index ecfd2d383ac2..3bfb0e9be582 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
@@ -11,7 +11,7 @@
/* H2C CMD DEFINITION ------------------------------------------------ */
/* */
-enum h2c_cmd_8723B{
+enum h2c_cmd_8723B {
/* Common Class: 000 */
H2C_8723B_RSVD_PAGE = 0x00,
H2C_8723B_MEDIA_STATUS_RPT = 0x01,
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
index 987b9f12a4f4..d712c6d36a08 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
@@ -8,7 +8,7 @@
#define __RTL8723B_RF_H__
-int PHY_RF6052_Config8723B(struct adapter *Adapter );
+int PHY_RF6052_Config8723B(struct adapter *Adapter);
void
PHY_RF6052SetBandwidth8723B(struct adapter *Adapter,
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
index 23a6069c3e5d..320ca65e5faa 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
@@ -176,7 +176,7 @@ typedef struct txdesc_8723b
u32 txbf_path:1;
u32 seq:12;
u32 final_data_rate:8;
-}TXDESC_8723B, *PTXDESC_8723B;
+} TXDESC_8723B, *PTXDESC_8723B;
#ifndef __INC_HAL8723BDESC_H
#define __INC_HAL8723BDESC_H
diff --git a/drivers/staging/rtl8723bs/include/rtw_byteorder.h b/drivers/staging/rtl8723bs/include/rtw_byteorder.h
index f76fbfbed4c7..c3a65f971793 100644
--- a/drivers/staging/rtl8723bs/include/rtw_byteorder.h
+++ b/drivers/staging/rtl8723bs/include/rtw_byteorder.h
@@ -7,7 +7,7 @@
#ifndef _RTL871X_BYTEORDER_H_
#define _RTL871X_BYTEORDER_H_
-#if defined (__LITTLE_ENDIAN)
+#if defined(__LITTLE_ENDIAN)
#include <linux/byteorder/little_endian.h>
#else
# include <linux/byteorder/big_endian.h>
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index b83824ca2e31..3e025a652e38 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -75,7 +75,7 @@ do {\
INIT_LIST_HEAD(&pcmd->list);\
pcmd->cmdcode = code;\
pcmd->parmbuf = (u8 *)(pparm);\
- pcmd->cmdsz = sizeof (*pparm);\
+ pcmd->cmdsz = sizeof(*pparm);\
pcmd->rsp = NULL;\
pcmd->rspsz = 0;\
} while (0)
@@ -129,9 +129,9 @@ extern void rtw_free_cmd_obj(struct cmd_obj *pcmd);
void rtw_stop_cmd_thread(struct adapter *adapter);
int rtw_cmd_thread(void *context);
-extern void rtw_free_cmd_priv (struct cmd_priv *pcmdpriv);
+extern void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
-extern void rtw_free_evt_priv (struct evt_priv *pevtpriv);
+extern void rtw_free_evt_priv(struct evt_priv *pevtpriv);
extern void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
enum rtw_drvextra_cmd_id
@@ -163,10 +163,10 @@ enum LPS_CTRL_TYPE
{
LPS_CTRL_SCAN = 0,
LPS_CTRL_JOINBSS = 1,
- LPS_CTRL_CONNECT =2,
- LPS_CTRL_DISCONNECT =3,
- LPS_CTRL_SPECIAL_PACKET =4,
- LPS_CTRL_LEAVE =5,
+ LPS_CTRL_CONNECT = 2,
+ LPS_CTRL_DISCONNECT = 3,
+ LPS_CTRL_SPECIAL_PACKET = 4,
+ LPS_CTRL_LEAVE = 5,
LPS_CTRL_TRAFFIC_BUSY = 6,
};
@@ -692,40 +692,40 @@ struct getratable_rsp {
/* to get TX, RX retry count */
-struct gettxretrycnt_parm{
+struct gettxretrycnt_parm {
unsigned int rsvd;
};
-struct gettxretrycnt_rsp{
+struct gettxretrycnt_rsp {
unsigned long tx_retrycnt;
};
-struct getrxretrycnt_parm{
+struct getrxretrycnt_parm {
unsigned int rsvd;
};
-struct getrxretrycnt_rsp{
+struct getrxretrycnt_rsp {
unsigned long rx_retrycnt;
};
/* to get BCNOK, BCNERR count */
-struct getbcnokcnt_parm{
+struct getbcnokcnt_parm {
unsigned int rsvd;
};
-struct getbcnokcnt_rsp{
+struct getbcnokcnt_rsp {
unsigned long bcnokcnt;
};
-struct getbcnerrcnt_parm{
+struct getbcnerrcnt_parm {
unsigned int rsvd;
};
-struct getbcnerrcnt_rsp{
+struct getbcnerrcnt_rsp {
unsigned long bcnerrcnt;
};
/* to get current TX power level */
-struct getcurtxpwrlevel_parm{
+struct getcurtxpwrlevel_parm {
unsigned int rsvd;
};
-struct getcurtxpwrlevel_rsp{
+struct getcurtxpwrlevel_rsp {
unsigned short tx_power;
};
@@ -883,56 +883,56 @@ struct _cmd_callback {
enum rtw_h2c_cmd
{
- GEN_CMD_CODE(_Read_MACREG) , /*0*/
- GEN_CMD_CODE(_Write_MACREG) ,
- GEN_CMD_CODE(_Read_BBREG) ,
- GEN_CMD_CODE(_Write_BBREG) ,
- GEN_CMD_CODE(_Read_RFREG) ,
- GEN_CMD_CODE(_Write_RFREG) , /*5*/
- GEN_CMD_CODE(_Read_EEPROM) ,
- GEN_CMD_CODE(_Write_EEPROM) ,
- GEN_CMD_CODE(_Read_EFUSE) ,
- GEN_CMD_CODE(_Write_EFUSE) ,
-
- GEN_CMD_CODE(_Read_CAM) , /*10*/
- GEN_CMD_CODE(_Write_CAM) ,
+ GEN_CMD_CODE(_Read_MACREG), /*0*/
+ GEN_CMD_CODE(_Write_MACREG),
+ GEN_CMD_CODE(_Read_BBREG),
+ GEN_CMD_CODE(_Write_BBREG),
+ GEN_CMD_CODE(_Read_RFREG),
+ GEN_CMD_CODE(_Write_RFREG), /*5*/
+ GEN_CMD_CODE(_Read_EEPROM),
+ GEN_CMD_CODE(_Write_EEPROM),
+ GEN_CMD_CODE(_Read_EFUSE),
+ GEN_CMD_CODE(_Write_EFUSE),
+
+ GEN_CMD_CODE(_Read_CAM), /*10*/
+ GEN_CMD_CODE(_Write_CAM),
GEN_CMD_CODE(_setBCNITV),
GEN_CMD_CODE(_setMBIDCFG),
GEN_CMD_CODE(_JoinBss), /*14*/
- GEN_CMD_CODE(_DisConnect) , /*15*/
- GEN_CMD_CODE(_CreateBss) ,
- GEN_CMD_CODE(_SetOpMode) ,
+ GEN_CMD_CODE(_DisConnect), /*15*/
+ GEN_CMD_CODE(_CreateBss),
+ GEN_CMD_CODE(_SetOpMode),
GEN_CMD_CODE(_SiteSurvey), /*18*/
- GEN_CMD_CODE(_SetAuth) ,
-
- GEN_CMD_CODE(_SetKey) , /*20*/
- GEN_CMD_CODE(_SetStaKey) ,
- GEN_CMD_CODE(_SetAssocSta) ,
- GEN_CMD_CODE(_DelAssocSta) ,
- GEN_CMD_CODE(_SetStaPwrState) ,
- GEN_CMD_CODE(_SetBasicRate) , /*25*/
- GEN_CMD_CODE(_GetBasicRate) ,
- GEN_CMD_CODE(_SetDataRate) ,
- GEN_CMD_CODE(_GetDataRate) ,
- GEN_CMD_CODE(_SetPhyInfo) ,
-
- GEN_CMD_CODE(_GetPhyInfo) , /*30*/
- GEN_CMD_CODE(_SetPhy) ,
- GEN_CMD_CODE(_GetPhy) ,
- GEN_CMD_CODE(_readRssi) ,
- GEN_CMD_CODE(_readGain) ,
- GEN_CMD_CODE(_SetAtim) , /*35*/
- GEN_CMD_CODE(_SetPwrMode) ,
+ GEN_CMD_CODE(_SetAuth),
+
+ GEN_CMD_CODE(_SetKey), /*20*/
+ GEN_CMD_CODE(_SetStaKey),
+ GEN_CMD_CODE(_SetAssocSta),
+ GEN_CMD_CODE(_DelAssocSta),
+ GEN_CMD_CODE(_SetStaPwrState),
+ GEN_CMD_CODE(_SetBasicRate), /*25*/
+ GEN_CMD_CODE(_GetBasicRate),
+ GEN_CMD_CODE(_SetDataRate),
+ GEN_CMD_CODE(_GetDataRate),
+ GEN_CMD_CODE(_SetPhyInfo),
+
+ GEN_CMD_CODE(_GetPhyInfo), /*30*/
+ GEN_CMD_CODE(_SetPhy),
+ GEN_CMD_CODE(_GetPhy),
+ GEN_CMD_CODE(_readRssi),
+ GEN_CMD_CODE(_readGain),
+ GEN_CMD_CODE(_SetAtim), /*35*/
+ GEN_CMD_CODE(_SetPwrMode),
GEN_CMD_CODE(_JoinbssRpt),
- GEN_CMD_CODE(_SetRaTable) ,
- GEN_CMD_CODE(_GetRaTable) ,
+ GEN_CMD_CODE(_SetRaTable),
+ GEN_CMD_CODE(_GetRaTable),
GEN_CMD_CODE(_GetCCXReport), /*40*/
GEN_CMD_CODE(_GetDTMReport),
GEN_CMD_CODE(_GetTXRateStatistics),
GEN_CMD_CODE(_SetUsbSuspend),
GEN_CMD_CODE(_SetH2cLbk),
- GEN_CMD_CODE(_AddBAReq) , /*45*/
+ GEN_CMD_CODE(_AddBAReq), /*45*/
GEN_CMD_CODE(_SetChannel), /*46*/
GEN_CMD_CODE(_SetTxPower),
GEN_CMD_CODE(_SwitchAntenna),
diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h
index 22fc5d730d7b..c90adfb87261 100644
--- a/drivers/staging/rtl8723bs/include/rtw_debug.h
+++ b/drivers/staging/rtl8723bs/include/rtw_debug.h
@@ -131,13 +131,13 @@
#define _MODULE_DEFINE_ _module_efuse_
#endif
-#define RT_TRACE(_Comp, _Level, Fmt) do{}while (0)
-#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) do{}while (0)
+#define RT_TRACE(_Comp, _Level, Fmt) do {} while (0)
+#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) do {} while (0)
#define DBG_871X(x, ...) do {} while (0)
#define MSG_8192C(x, ...) do {} while (0)
-#define DBG_8192C(x,...) do {} while (0)
-#define DBG_871X_LEVEL(x,...) do {} while (0)
+#define DBG_8192C(x, ...) do {} while (0)
+#define DBG_871X_LEVEL(x, ...) do {} while (0)
#undef _dbgdump
@@ -161,8 +161,8 @@
_dbgdump(DRIVER_PREFIX"ERROR " fmt, ##arg);\
else \
_dbgdump(DRIVER_PREFIX fmt, ##arg);\
- }\
- }while (0)
+ } \
+ } while (0)
/* without driver-defined prefix */
#undef _DBG_871X_LEVEL
@@ -173,8 +173,8 @@
_dbgdump("ERROR " fmt, ##arg);\
else \
_dbgdump(fmt, ##arg);\
- }\
- }while (0)
+ } \
+ } while (0)
#define RTW_DBGDUMP NULL /* 'stream' for _dbgdump */
@@ -203,17 +203,17 @@
#undef DBG_871X
#define DBG_871X(...) do {\
_dbgdump(DRIVER_PREFIX __VA_ARGS__);\
- }while (0)
+ } while (0)
#undef MSG_8192C
#define MSG_8192C(...) do {\
_dbgdump(DRIVER_PREFIX __VA_ARGS__);\
- }while (0)
+ } while (0)
#undef DBG_8192C
#define DBG_8192C(...) do {\
_dbgdump(DRIVER_PREFIX __VA_ARGS__);\
- }while (0)
+ } while (0)
#endif /* defined(_dbgdump) */
#endif /* DEBUG */
@@ -227,8 +227,8 @@
if ((_Comp & GlobalDebugComponents) && (_Level <= GlobalDebugLevel)) {\
_dbgdump("%s [0x%08x,%d]", DRIVER_PREFIX, (unsigned int)_Comp, _Level);\
_dbgdump Fmt;\
- }\
- }while (0)
+ } \
+ } while (0)
#endif /* defined(_dbgdump) && defined(_MODULE_DEFINE_) */
@@ -242,7 +242,7 @@
u8 *ptr = (u8 *)_HexData; \
_dbgdump("%s", DRIVER_PREFIX); \
_dbgdump(_TitleString); \
- for (__i = 0; __i<(int)_HexDataLen; __i++) \
+ for (__i = 0; __i < (int)_HexDataLen; __i++) \
{ \
_dbgdump("%02X%s", ptr[__i], (((__i + 1) % 4) == 0)?" ":" "); \
if (((__i + 1) % 16) == 0) _dbgdump("\n"); \
diff --git a/drivers/staging/rtl8723bs/include/rtw_eeprom.h b/drivers/staging/rtl8723bs/include/rtw_eeprom.h
index 1fcd79fa1c21..704c6461333a 100644
--- a/drivers/staging/rtl8723bs/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8723bs/include/rtw_eeprom.h
@@ -91,7 +91,7 @@ typedef enum _RT_CUSTOMER_ID
RT_CID_819x_ALPHA_Dlink = 44,/* add by ylb 20121012 for customer led for alpha */
RT_CID_WNC_NEC = 45,/* add by page for NEC */
RT_CID_DNI_BUFFALO = 46,/* add by page for NEC */
-}RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
+} RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
struct eeprom_priv
{
diff --git a/drivers/staging/rtl8723bs/include/rtw_efuse.h b/drivers/staging/rtl8723bs/include/rtw_efuse.h
index 9d9a5a3e336d..4abcbbc8f513 100644
--- a/drivers/staging/rtl8723bs/include/rtw_efuse.h
+++ b/drivers/staging/rtl8723bs/include/rtw_efuse.h
@@ -57,15 +57,15 @@ enum _EFUSE_DEF_TYPE {
#define EFUSE_MAX_WORD_UNIT 4
/*------------------------------Define structure----------------------------*/
-typedef struct PG_PKT_STRUCT_A{
+typedef struct PG_PKT_STRUCT_A {
u8 offset;
u8 word_en;
u8 data[8];
u8 word_cnts;
-}PGPKT_STRUCT,*PPGPKT_STRUCT;
+} PGPKT_STRUCT, *PPGPKT_STRUCT;
/*------------------------------Define structure----------------------------*/
-typedef struct _EFUSE_HAL{
+typedef struct _EFUSE_HAL {
u8 fakeEfuseBank;
u32 fakeEfuseUsedBytes;
u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE];
@@ -82,7 +82,7 @@ typedef struct _EFUSE_HAL{
u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
-}EFUSE_HAL, *PEFUSE_HAL;
+} EFUSE_HAL, *PEFUSE_HAL;
/*------------------------Export global variable----------------------------*/
diff --git a/drivers/staging/rtl8723bs/include/rtw_event.h b/drivers/staging/rtl8723bs/include/rtw_event.h
index ee80aa21eb10..aeaabab780e5 100644
--- a/drivers/staging/rtl8723bs/include/rtw_event.h
+++ b/drivers/staging/rtl8723bs/include/rtw_event.h
@@ -82,7 +82,7 @@ struct fwevent {
#define C2HEVENT_SZ 32
-struct event_node{
+struct event_node {
unsigned char *node;
unsigned char evt_code;
unsigned short evt_sz;
diff --git a/drivers/staging/rtl8723bs/include/rtw_ht.h b/drivers/staging/rtl8723bs/include/rtw_ht.h
index 952c804dd0fd..4c224c128327 100644
--- a/drivers/staging/rtl8723bs/include/rtw_ht.h
+++ b/drivers/staging/rtl8723bs/include/rtw_ht.h
@@ -38,7 +38,7 @@ struct ht_priv
};
-typedef enum AGGRE_SIZE{
+typedef enum AGGRE_SIZE {
HT_AGG_SIZE_8K = 0,
HT_AGG_SIZE_16K = 1,
HT_AGG_SIZE_32K = 2,
@@ -47,9 +47,9 @@ typedef enum AGGRE_SIZE{
VHT_AGG_SIZE_256K = 5,
VHT_AGG_SIZE_512K = 6,
VHT_AGG_SIZE_1024K = 7,
-}AGGRE_SIZE_E, *PAGGRE_SIZE_E;
+} AGGRE_SIZE_E, *PAGGRE_SIZE_E;
-typedef enum _RT_HT_INF0_CAP{
+typedef enum _RT_HT_INF0_CAP {
RT_HT_CAP_USE_TURBO_AGGR = 0x01,
RT_HT_CAP_USE_LONG_PREAMBLE = 0x02,
RT_HT_CAP_USE_AMPDU = 0x04,
@@ -58,13 +58,13 @@ typedef enum _RT_HT_INF0_CAP{
RT_HT_CAP_USE_92SE = 0x20,
RT_HT_CAP_USE_88C_92C = 0x40,
RT_HT_CAP_USE_AP_CLIENT_MODE = 0x80, /* AP team request to reserve this bit, by Emily */
-}RT_HT_INF0_CAPBILITY, *PRT_HT_INF0_CAPBILITY;
+} RT_HT_INF0_CAPBILITY, *PRT_HT_INF0_CAPBILITY;
-typedef enum _RT_HT_INF1_CAP{
+typedef enum _RT_HT_INF1_CAP {
RT_HT_CAP_USE_VIDEO_CLIENT = 0x01,
RT_HT_CAP_USE_JAGUAR_BCUT = 0x02,
RT_HT_CAP_USE_JAGUAR_CCUT = 0x04,
-}RT_HT_INF1_CAPBILITY, *PRT_HT_INF1_CAPBILITY;
+} RT_HT_INF1_CAPBILITY, *PRT_HT_INF1_CAPBILITY;
#define LDPC_HT_ENABLE_RX BIT0
#define LDPC_HT_ENABLE_TX BIT1
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
index 99d104b3647a..2581b5165d1b 100644
--- a/drivers/staging/rtl8723bs/include/rtw_io.h
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -168,7 +168,7 @@ struct reg_protocol_rd {
u32 Byte2Access : 1;
u32 Byte1Access : 1;
- u32 BurstMode :1 ;
+ u32 BurstMode :1;
u32 FixOrContinuous : 1;
u32 Reserved4 : 16;
@@ -224,7 +224,7 @@ struct reg_protocol_wt {
u32 Byte2Access : 1;
u32 Byte1Access : 1;
- u32 BurstMode :1 ;
+ u32 BurstMode :1;
u32 FixOrContinuous : 1;
u32 Reserved4 : 16;
@@ -259,7 +259,7 @@ struct io_queue {
struct intf_hdl intf;
};
-struct io_priv{
+struct io_priv {
struct adapter *padapter;
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index 362737b83c3a..14e4bce28856 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -85,7 +85,7 @@ typedef enum _RT_SCAN_TYPE {
SCAN_PASSIVE,
SCAN_ACTIVE,
SCAN_MIX,
-}RT_SCAN_TYPE, *PRT_SCAN_TYPE;
+} RT_SCAN_TYPE, *PRT_SCAN_TYPE;
enum _BAND {
GHZ24_50 = 0,
@@ -135,7 +135,7 @@ struct sitesurvey_ctrl {
_timer sitesurvey_ctrl_timer;
};
-typedef struct _RT_LINK_DETECT_T{
+typedef struct _RT_LINK_DETECT_T {
u32 NumTxOkInPeriod;
u32 NumRxOkInPeriod;
u32 NumRxUnicastOkInPeriod;
@@ -148,62 +148,62 @@ typedef struct _RT_LINK_DETECT_T{
/* u8 TrafficBusyState; */
u8 TrafficTransitionCount;
u32 LowPowerTransitionCount;
-}RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
+} RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
struct profile_info {
u8 ssidlen;
- u8 ssid[ WLAN_SSID_MAXLEN ];
- u8 peermac[ ETH_ALEN ];
+ u8 ssid[WLAN_SSID_MAXLEN];
+ u8 peermac[ETH_ALEN];
};
-struct tx_invite_req_info{
+struct tx_invite_req_info {
u8 token;
u8 benable;
- u8 go_ssid[ WLAN_SSID_MAXLEN ];
+ u8 go_ssid[WLAN_SSID_MAXLEN];
u8 ssidlen;
- u8 go_bssid[ ETH_ALEN ];
- u8 peer_macaddr[ ETH_ALEN ];
+ u8 go_bssid[ETH_ALEN];
+ u8 peer_macaddr[ETH_ALEN];
u8 operating_ch; /* This information will be set by using the p2p_set op_ch =x */
u8 peer_ch; /* The listen channel for peer P2P device */
};
-struct tx_invite_resp_info{
+struct tx_invite_resp_info {
u8 token; /* Used to record the dialog token of p2p invitation request frame. */
};
-struct tx_provdisc_req_info{
+struct tx_provdisc_req_info {
u16 wps_config_method_request; /* Used when sending the provisioning request frame */
u16 peer_channel_num[2]; /* The channel number which the receiver stands. */
struct ndis_802_11_ssid ssid;
- u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */
- u8 peerIFAddr[ ETH_ALEN ]; /* Peer interface address */
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 peerIFAddr[ETH_ALEN]; /* Peer interface address */
u8 benable; /* This provision discovery request frame is trigger to send or not */
};
-struct rx_provdisc_req_info{ /* When peer device issue prov_disc_req first, we should store the following informations */
- u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */
+struct rx_provdisc_req_info { /* When peer device issue prov_disc_req first, we should store the following informations */
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
u8 strconfig_method_desc_of_prov_disc_req[4]; /* description for the config method located in the provisioning discovery request frame. */
/* The UI must know this information to know which config method the remote p2p device is requiring. */
};
-struct tx_nego_req_info{
+struct tx_nego_req_info {
u16 peer_channel_num[2]; /* The channel number which the receiver stands. */
- u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
u8 benable; /* This negoitation request frame is trigger to send or not */
};
-struct group_id_info{
- u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of this P2P group */
- u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */
+struct group_id_info {
+ u8 go_device_addr[ETH_ALEN]; /* The GO's device address of this P2P group */
+ u8 ssid[WLAN_SSID_MAXLEN]; /* The SSID of this P2P group */
};
-struct scan_limit_info{
+struct scan_limit_info {
u8 scan_op_ch_only; /* When this flag is set, the driver should just scan the operation channel */
u8 operation_ch[2]; /* Store the operation channel of invitation request frame */
};
-struct cfg80211_wifidirect_info{
+struct cfg80211_wifidirect_info {
_timer remain_on_ch_timer;
u8 restore_channel;
struct ieee80211_channel remain_on_ch_channel;
@@ -213,7 +213,7 @@ struct cfg80211_wifidirect_info{
unsigned long last_ro_ch_time; /* this will be updated at the beginning and end of ro_ch */
};
-struct wifidirect_info{
+struct wifidirect_info {
struct adapter * padapter;
_timer find_phase_timer;
_timer restore_p2p_state_timer;
@@ -225,7 +225,7 @@ struct wifidirect_info{
struct tx_provdisc_req_info tx_prov_disc_info;
struct rx_provdisc_req_info rx_prov_disc_info;
struct tx_invite_req_info invitereq_info;
- struct profile_info profileinfo[ P2P_MAX_PERSISTENT_GROUP_NUM ]; /* Store the profile information of persistent group */
+ struct profile_info profileinfo[P2P_MAX_PERSISTENT_GROUP_NUM]; /* Store the profile information of persistent group */
struct tx_invite_resp_info inviteresp_info;
struct tx_nego_req_info nego_req_info;
struct group_id_info groupid_info; /* Store the group id information when doing the group negotiation handshake. */
@@ -243,17 +243,17 @@ struct wifidirect_info{
u8 support_rate[8];
u8 p2p_wildcard_ssid[P2P_WILDCARD_SSID_LEN];
u8 intent; /* should only include the intent value. */
- u8 p2p_peer_interface_addr[ ETH_ALEN ];
- u8 p2p_peer_device_addr[ ETH_ALEN ];
+ u8 p2p_peer_interface_addr[ETH_ALEN];
+ u8 p2p_peer_device_addr[ETH_ALEN];
u8 peer_intent; /* Included the intent value and tie breaker value. */
- u8 device_name[ WPS_MAX_DEVICE_NAME_LEN ]; /* Device name for displaying on searching device screen */
+ u8 device_name[WPS_MAX_DEVICE_NAME_LEN]; /* Device name for displaying on searching device screen */
u8 device_name_len;
u8 profileindex; /* Used to point to the index of profileinfo array */
u8 peer_operating_ch;
u8 find_phase_state_exchange_cnt;
u16 device_password_id_for_nego; /* The device password ID for group negotation */
u8 negotiation_dialog_token;
- u8 nego_ssid[ WLAN_SSID_MAXLEN ]; /* SSID information for group negotitation */
+ u8 nego_ssid[WLAN_SSID_MAXLEN]; /* SSID information for group negotitation */
u8 nego_ssidlen;
u8 p2p_group_ssid[WLAN_SSID_MAXLEN];
u8 p2p_group_ssid_len;
@@ -287,13 +287,13 @@ struct wifidirect_info{
u8 driver_interface; /* Indicate DRIVER_WEXT or DRIVER_CFG80211 */
};
-struct tdls_ss_record{ /* signal strength record */
+struct tdls_ss_record { /* signal strength record */
u8 macaddr[ETH_ALEN];
u8 rx_pwd_ba11;
u8 is_tdls_sta; /* true: direct link sta, false: else */
};
-struct tdls_info{
+struct tdls_info {
u8 ap_prohibited;
u8 link_established;
u8 sta_cnt;
@@ -489,7 +489,7 @@ int event_thread(void *context);
extern void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
extern int rtw_init_mlme_priv(struct adapter *adapter);/* (struct mlme_priv *pmlmepriv); */
-extern void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
+extern void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
extern sint rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
@@ -526,7 +526,7 @@ static inline void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state |= state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY ==state) {
+ if (_FW_UNDER_SURVEY == state) {
pmlmepriv->bScanInProcess = true;
}
}
@@ -535,7 +535,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state &= ~state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY ==state) {
+ if (_FW_UNDER_SURVEY == state) {
pmlmepriv->bScanInProcess = false;
}
}
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 73e8ec09b6e1..6c1ed6211c7e 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -182,7 +182,7 @@ typedef enum _RT_CHANNEL_DOMAIN
/* Add new channel plan above this line =============== */
RT_CHANNEL_DOMAIN_MAX,
RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F,
-}RT_CHANNEL_DOMAIN, *PRT_CHANNEL_DOMAIN;
+} RT_CHANNEL_DOMAIN, *PRT_CHANNEL_DOMAIN;
typedef enum _RT_CHANNEL_DOMAIN_2G
{
@@ -195,7 +195,7 @@ typedef enum _RT_CHANNEL_DOMAIN_2G
RT_CHANNEL_DOMAIN_2G_NULL = 0x06,
/* Add new channel plan above this line =============== */
RT_CHANNEL_DOMAIN_2G_MAX,
-}RT_CHANNEL_DOMAIN_2G, *PRT_CHANNEL_DOMAIN_2G;
+} RT_CHANNEL_DOMAIN_2G, *PRT_CHANNEL_DOMAIN_2G;
typedef enum _RT_CHANNEL_DOMAIN_5G
{
@@ -237,33 +237,33 @@ typedef enum _RT_CHANNEL_DOMAIN_5G
RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS = 0x21,
RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS = 0x22,
RT_CHANNEL_DOMAIN_5G_MAX,
-}RT_CHANNEL_DOMAIN_5G, *PRT_CHANNEL_DOMAIN_5G;
+} RT_CHANNEL_DOMAIN_5G, *PRT_CHANNEL_DOMAIN_5G;
-#define rtw_is_channel_plan_valid(chplan) (chplan<RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
+#define rtw_is_channel_plan_valid(chplan) (chplan < RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
typedef struct _RT_CHANNEL_PLAN
{
unsigned char Channel[MAX_CHANNEL_NUM];
unsigned char Len;
-}RT_CHANNEL_PLAN, *PRT_CHANNEL_PLAN;
+} RT_CHANNEL_PLAN, *PRT_CHANNEL_PLAN;
typedef struct _RT_CHANNEL_PLAN_2G
{
unsigned char Channel[MAX_CHANNEL_NUM_2G];
unsigned char Len;
-}RT_CHANNEL_PLAN_2G, *PRT_CHANNEL_PLAN_2G;
+} RT_CHANNEL_PLAN_2G, *PRT_CHANNEL_PLAN_2G;
typedef struct _RT_CHANNEL_PLAN_5G
{
unsigned char Channel[MAX_CHANNEL_NUM_5G];
unsigned char Len;
-}RT_CHANNEL_PLAN_5G, *PRT_CHANNEL_PLAN_5G;
+} RT_CHANNEL_PLAN_5G, *PRT_CHANNEL_PLAN_5G;
typedef struct _RT_CHANNEL_PLAN_MAP
{
unsigned char Index2G;
unsigned char Index5G;
-}RT_CHANNEL_PLAN_MAP, *PRT_CHANNEL_PLAN_MAP;
+} RT_CHANNEL_PLAN_MAP, *PRT_CHANNEL_PLAN_MAP;
enum Associated_AP
{
@@ -299,7 +299,7 @@ typedef enum _HT_IOT_PEER
HT_IOT_PEER_REALTEK_JAGUAR_BCUTAP = 16,
HT_IOT_PEER_REALTEK_JAGUAR_CCUTAP = 17,
HT_IOT_PEER_MAX = 18
-}HT_IOT_PEER_E, *PHTIOT_PEER_E;
+} HT_IOT_PEER_E, *PHTIOT_PEER_E;
enum SCAN_STATE
@@ -353,7 +353,7 @@ struct ss_res
#define WIFI_FW_ASSOC_STATE 0x00002000
#define WIFI_FW_ASSOC_SUCCESS 0x00004000
-#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE | WIFI_FW_AUTH_SUCCESS |WIFI_FW_ASSOC_STATE)
+#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE | WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)
struct FW_Sta_Info
{
@@ -434,7 +434,7 @@ typedef struct _RT_CHANNEL_INFO
{
u8 ChannelNum; /* The channel number. */
RT_SCAN_TYPE ScanType; /* Scan type such as passive or active scan. */
-}RT_CHANNEL_INFO, *PRT_CHANNEL_INFO;
+} RT_CHANNEL_INFO, *PRT_CHANNEL_INFO;
int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch);
bool rtw_mlme_band_check(struct adapter *adapter, const u32 ch);
@@ -537,7 +537,7 @@ struct mlme_ext_priv
void init_mlme_default_rate_set(struct adapter *padapter);
void init_mlme_ext_priv(struct adapter *padapter);
int init_hw_mlme_ext(struct adapter *padapter);
-void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
+void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext);
extern void init_mlme_ext_timer(struct adapter *padapter);
extern void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta);
extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
@@ -571,7 +571,7 @@ void SelectChannel(struct adapter *padapter, unsigned char channel);
unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval);
-void read_cam(struct adapter *padapter , u8 entry, u8 *get_key);
+void read_cam(struct adapter *padapter, u8 entry, u8 *get_key);
/* modify HW only */
void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
@@ -708,8 +708,8 @@ void linked_status_chk(struct adapter *padapter);
void _linked_info_dump(struct adapter *padapter);
-void survey_timer_hdl (struct timer_list *t);
-void link_timer_hdl (struct timer_list *t);
+void survey_timer_hdl(struct timer_list *t);
+void link_timer_hdl(struct timer_list *t);
void addba_timer_hdl(struct timer_list *t);
void sa_query_timer_hdl(struct timer_list *t);
/* void reauth_timer_hdl(struct adapter *padapter); */
@@ -802,8 +802,8 @@ struct C2HEvent_Header
unsigned int rsvd;
};
-void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf);
-void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf);
+void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf);
enum rtw_c2h_event
{
@@ -818,10 +818,10 @@ enum rtw_c2h_event
GEN_EVT_CODE(_Survey), /*8*/
GEN_EVT_CODE(_SurveyDone), /*9*/
- GEN_EVT_CODE(_JoinBss) , /*10*/
+ GEN_EVT_CODE(_JoinBss), /*10*/
GEN_EVT_CODE(_AddSTA),
GEN_EVT_CODE(_DelSTA),
- GEN_EVT_CODE(_AtimDone) ,
+ GEN_EVT_CODE(_AtimDone),
GEN_EVT_CODE(_TX_Report),
GEN_EVT_CODE(_CCX_Report), /*15*/
GEN_EVT_CODE(_DTM_Report),
@@ -851,7 +851,7 @@ static struct fwevent wlanevents[] =
{0, NULL},
{0, NULL},
{0, &rtw_survey_event_callback}, /*8*/
- {sizeof (struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/
+ {sizeof(struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/
{0, &rtw_joinbss_event_callback}, /*10*/
{sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
diff --git a/drivers/staging/rtl8723bs/include/rtw_mp.h b/drivers/staging/rtl8723bs/include/rtw_mp.h
index bb3970d58573..e5a801b40582 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mp.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mp.h
@@ -154,7 +154,7 @@ typedef struct _MPT_CONTEXT
u32 mptOutLen;
u8 mptOutBuf[100];
-}MPT_CONTEXT, *PMPT_CONTEXT;
+} MPT_CONTEXT, *PMPT_CONTEXT;
/* endif */
/* E-Fuse */
@@ -276,7 +276,7 @@ typedef struct _IOCMD_STRUCT_ {
u8 cmdclass;
u16 value;
u8 index;
-}IOCMD_STRUCT;
+} IOCMD_STRUCT;
struct rf_reg_param {
u32 path;
@@ -315,7 +315,7 @@ extern u8 mpdatarate[NumRates];
/* MP set force data rate base on the definition. */
enum MPT_RATE_INDEX {
/* CCK rate. */
- MPT_RATE_1M = 0 , /* 0 */
+ MPT_RATE_1M = 0, /* 0 */
MPT_RATE_2M,
MPT_RATE_55M,
MPT_RATE_11M, /* 3 */
@@ -473,9 +473,9 @@ void Hal_SetBandwidth(struct adapter *padapter);
void Hal_SetTxPower(struct adapter *padapter);
void Hal_SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
-void Hal_SetSingleToneTx (struct adapter *padapter , u8 bStart);
-void Hal_SetSingleCarrierTx (struct adapter *padapter, u8 bStart);
-void Hal_SetContinuousTx (struct adapter *padapter, u8 bStart);
+void Hal_SetSingleToneTx(struct adapter *padapter, u8 bStart);
+void Hal_SetSingleCarrierTx(struct adapter *padapter, u8 bStart);
+void Hal_SetContinuousTx(struct adapter *padapter, u8 bStart);
void Hal_SetBandwidth(struct adapter *padapter);
void Hal_SetDataRate(struct adapter *padapter);
@@ -494,8 +494,8 @@ void Hal_TriggerRFThermalMeter(struct adapter *padapter);
u8 Hal_ReadRFThermalMeter(struct adapter *padapter);
void Hal_SetCCKContinuousTx(struct adapter *padapter, u8 bStart);
void Hal_SetOFDMContinuousTx(struct adapter *padapter, u8 bStart);
-void Hal_ProSetCrystalCap (struct adapter *padapter , u32 CrystalCapVal);
-void MP_PHY_SetRFPathSwitch(struct adapter *padapter , bool bMain);
+void Hal_ProSetCrystalCap(struct adapter *padapter, u32 CrystalCapVal);
+void MP_PHY_SetRFPathSwitch(struct adapter *padapter, bool bMain);
u32 mpt_ProQueryCalTxPower(struct adapter *padapter, u8 RfPath);
void MPT_PwrCtlDM(struct adapter *padapter, u32 bstart);
u8 MptToMgntRate(u32 MptRateIdx);
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 012d8f54814f..98c3e92245b7 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -187,7 +187,7 @@ struct rx_pkt_attrib {
/* These definition is used for Rx packet reordering. */
-#define SN_LESS(a, b) (((a-b)&0x800)!= 0)
+#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
#define SN_EQUAL(a, b) (a == b)
/* define REORDER_WIN_SIZE 128 */
/* define REORDER_ENTRY_NUM 128 */
@@ -369,12 +369,12 @@ struct recv_frame_hdr
};
-union recv_frame{
+union recv_frame {
union{
struct list_head list;
struct recv_frame_hdr hdr;
uint mem[RECVFRAME_HDR_ALIGN>>2];
- }u;
+ } u;
/* uint mem[MAX_RXSZ>>2]; */
@@ -388,8 +388,8 @@ enum RX_PACKET_TYPE {
C2H_PACKET
};
-extern union recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */
-extern union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */
+extern union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */
+extern union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */
extern int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue);
#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
@@ -401,7 +401,7 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue);
sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
-struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue);
+struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue);
void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
@@ -444,7 +444,7 @@ static inline u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
return NULL;
}
- precvframe->u.hdr.len -=sz;
+ precvframe->u.hdr.len -= sz;
return precvframe->u.hdr.rx_data;
@@ -471,7 +471,7 @@ static inline u8 *recvframe_put(union recv_frame *precvframe, sint sz)
return NULL;
}
- precvframe->u.hdr.len +=sz;
+ precvframe->u.hdr.len += sz;
return precvframe->u.hdr.rx_tail;
@@ -497,7 +497,7 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
return NULL;
}
- precvframe->u.hdr.len -=sz;
+ precvframe->u.hdr.len -= sz;
return precvframe->u.hdr.rx_tail;
diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
index bea184452edd..aa60b6f867dd 100644
--- a/drivers/staging/rtl8723bs/include/rtw_security.h
+++ b/drivers/staging/rtl8723bs/include/rtw_security.h
@@ -208,7 +208,7 @@ struct sha256_state {
};
#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\
-do{\
+do {\
switch (psecuritypriv->dot11AuthAlgrthm)\
{\
case dot11AuthAlgrthm_Open:\
@@ -220,18 +220,18 @@ do{\
if (bmcst)\
encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
else\
- encry_algo =(u8) psta->dot118021XPrivacy;\
+ encry_algo = (u8)psta->dot118021XPrivacy;\
break;\
case dot11AuthAlgrthm_WAPI:\
encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
break;\
- }\
-}while (0)
+ } \
+} while (0)
#define _AES_IV_LEN_ 8
#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt)\
-do{\
+do {\
switch (encrypt)\
{\
case _WEP40_:\
@@ -255,19 +255,19 @@ do{\
iv_len = 0;\
icv_len = 0;\
break;\
- }\
-}while (0)
+ } \
+} while (0)
#define GET_TKIP_PN(iv, dot11txpn)\
-do{\
- dot11txpn._byte_.TSC0 =iv[2];\
- dot11txpn._byte_.TSC1 =iv[0];\
- dot11txpn._byte_.TSC2 =iv[4];\
- dot11txpn._byte_.TSC3 =iv[5];\
- dot11txpn._byte_.TSC4 =iv[6];\
- dot11txpn._byte_.TSC5 =iv[7];\
-}while (0)
+do {\
+ dot11txpn._byte_.TSC0 = iv[2];\
+ dot11txpn._byte_.TSC1 = iv[0];\
+ dot11txpn._byte_.TSC2 = iv[4];\
+ dot11txpn._byte_.TSC3 = iv[5];\
+ dot11txpn._byte_.TSC4 = iv[6];\
+ dot11txpn._byte_.TSC5 = iv[7];\
+} while (0)
#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index ea1396005a13..cd2be0056aa1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -40,17 +40,17 @@
#define HW_QUEUE_ENTRY 8
#define WEP_IV(pattrib_iv, dot11txpn, keyidx)\
-do{\
+do {\
pattrib_iv[0] = dot11txpn._byte_.TSC0;\
pattrib_iv[1] = dot11txpn._byte_.TSC1;\
pattrib_iv[2] = dot11txpn._byte_.TSC2;\
pattrib_iv[3] = ((keyidx & 0x3)<<6);\
- dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0: (dot11txpn.val+1);\
-}while (0)
+ dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : (dot11txpn.val + 1);\
+} while (0)
#define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\
-do{\
+do {\
pattrib_iv[0] = dot11txpn._byte_.TSC1;\
pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\
pattrib_iv[2] = dot11txpn._byte_.TSC0;\
@@ -59,11 +59,11 @@ do{\
pattrib_iv[5] = dot11txpn._byte_.TSC3;\
pattrib_iv[6] = dot11txpn._byte_.TSC4;\
pattrib_iv[7] = dot11txpn._byte_.TSC5;\
- dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\
-}while (0)
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\
+} while (0)
#define AES_IV(pattrib_iv, dot11txpn, keyidx)\
-do{\
+do {\
pattrib_iv[0] = dot11txpn._byte_.TSC0;\
pattrib_iv[1] = dot11txpn._byte_.TSC1;\
pattrib_iv[2] = 0;\
@@ -72,8 +72,8 @@ do{\
pattrib_iv[5] = dot11txpn._byte_.TSC3;\
pattrib_iv[6] = dot11txpn._byte_.TSC4;\
pattrib_iv[7] = dot11txpn._byte_.TSC5;\
- dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\
-}while (0)
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\
+} while (0)
#define HWXMIT_ENTRY 4
@@ -217,7 +217,7 @@ enum {
XMITBUF_CMD = 2,
};
-struct submit_ctx{
+struct submit_ctx {
unsigned long submit_time; /* */
u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */
int status; /* status for operation */
@@ -274,7 +274,7 @@ struct xmit_buf
u8 pg_num;
u8 agg_num;
-#if defined(DBG_XMIT_BUF)|| defined(DBG_XMIT_BUF_EXT)
+#if defined(DBG_XMIT_BUF) || defined(DBG_XMIT_BUF_EXT)
u8 no;
#endif
@@ -350,7 +350,7 @@ struct hw_txqueue {
sint ac_tag;
};
-struct agg_pkt_info{
+struct agg_pkt_info {
u16 offset;
u16 pkt_len;
};
@@ -484,7 +484,7 @@ void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
-void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
+void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
s32 rtw_alloc_hwxmits(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h
index 3acce5630f8e..c9aa3b5097a7 100644
--- a/drivers/staging/rtl8723bs/include/sta_info.h
+++ b/drivers/staging/rtl8723bs/include/sta_info.h
@@ -31,13 +31,13 @@ struct wlan_acl_pool {
struct __queue acl_node_q;
};
-typedef struct _RSSI_STA{
+typedef struct _RSSI_STA {
s32 UndecoratedSmoothedPWDB;
s32 UndecoratedSmoothedCCK;
s32 UndecoratedSmoothedOFDM;
u64 PacketMap;
u8 ValidBit;
-}RSSI_STA, *PRSSI_STA;
+} RSSI_STA, *PRSSI_STA;
struct stainfo_stats {
@@ -304,7 +304,7 @@ struct sta_info {
#define STA_RX_PKTS_DIFF_ARG(sta) \
sta->sta_stats.rx_mgnt_pkts - sta->sta_stats.last_rx_mgnt_pkts \
, sta->sta_stats.rx_ctrl_pkts - sta->sta_stats.last_rx_ctrl_pkts \
- , sta->sta_stats.rx_data_pkts -sta->sta_stats.last_rx_data_pkts
+ , sta->sta_stats.rx_data_pkts - sta->sta_stats.last_rx_data_pkts
#define STA_PKTS_FMT "(m:%llu, c:%llu, d:%llu)"
@@ -374,7 +374,7 @@ int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta);
struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset);
extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr);
-extern u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta);
+extern u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta);
extern void rtw_free_all_stainfo(struct adapter *padapter);
extern struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr);
extern u32 rtw_init_bcmc_stainfo(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 2faf83704ff0..88a6e982ce01 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -707,7 +707,7 @@ struct HT_caps_element
unsigned char ASEL_caps;
} HT_cap_element;
unsigned char HT_cap[26];
- }u;
+ } u;
} __attribute__ ((packed));
struct HT_info_element
@@ -1102,7 +1102,7 @@ enum P2P_PROTO_WK_ID
P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
- P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
P2P_RO_CH_WK = 6,
};
@@ -1126,8 +1126,8 @@ enum P2P_PROTO_WK_ID
#define WFD_DEVINFO_PC_TDLS 0x0080
#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
-#define IP_MCAST_MAC(mac) ((mac[0]== 0x01) && (mac[1]== 0x00) && (mac[2]== 0x5e))
-#define ICMPV6_MCAST_MAC(mac) ((mac[0]== 0x33) && (mac[1]== 0x33) && (mac[2]!= 0xff))
+#define IP_MCAST_MAC(mac) ((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e))
+#define ICMPV6_MCAST_MAC(mac) ((mac[0] == 0x33) && (mac[1] == 0x33) && (mac[2] != 0xff))
/* Regulatroy Domain */
struct regd_pair_mapping {
diff --git a/drivers/staging/rtl8723bs/include/xmit_osdep.h b/drivers/staging/rtl8723bs/include/xmit_osdep.h
index a61412b54ec0..e9ff274f7474 100644
--- a/drivers/staging/rtl8723bs/include/xmit_osdep.h
+++ b/drivers/staging/rtl8723bs/include/xmit_osdep.h
@@ -35,8 +35,8 @@ void rtw_os_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitb
extern uint rtw_remainder_len(struct pkt_file *pfile);
extern void _rtw_open_pktfile(_pkt *pkt, struct pkt_file *pfile);
-extern uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen);
-extern sint rtw_endofpktfile (struct pkt_file *pfile);
+extern uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
+extern sint rtw_endofpktfile(struct pkt_file *pfile);
extern void rtw_os_pkt_complete(struct adapter *padapter, _pkt *pkt);
extern void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 322cabb97b99..1ba85a43f05a 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -254,7 +254,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
/* DBG_8192C("%s\n", __func__); */
- bssinf_len = pnetwork->network.IELength+sizeof (struct ieee80211_hdr_3addr);
+ bssinf_len = pnetwork->network.IELength + sizeof(struct ieee80211_hdr_3addr);
if (bssinf_len > MAX_BSSINFO_LEN) {
DBG_871X("%s IE Length too long > %d byte\n", __func__, MAX_BSSINFO_LEN);
goto exit;
@@ -263,7 +263,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
{
u16 wapi_len = 0;
- if (rtw_get_wapi_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &wapi_len)>0)
+ if (rtw_get_wapi_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &wapi_len) > 0)
{
if (wapi_len > 0)
{
@@ -286,7 +286,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
wpsie = rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wpsielen);
- if (wpsie && wpsielen>0)
+ if (wpsie && wpsielen > 0)
psr = rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -353,7 +353,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
pbuf += sizeof(struct ieee80211_hdr_3addr);
- len = sizeof (struct ieee80211_hdr_3addr);
+ len = sizeof(struct ieee80211_hdr_3addr);
memcpy(pbuf, pnetwork->network.IEs, pnetwork->network.IELength);
len += pnetwork->network.IELength;
@@ -402,7 +402,7 @@ int rtw_cfg80211_check_bss(struct adapter *padapter)
cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
- return (bss!= NULL);
+ return (bss != NULL);
}
void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter)
@@ -424,7 +424,7 @@ void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter)
struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network);
struct wlan_network *scanned = pmlmepriv->cur_network_scanned;
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true)
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)
{
memcpy(&cur_network->network, pnetwork, sizeof(struct wlan_bssid_ex));
@@ -579,7 +579,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv* psecuritypriv =&(padapter->securitypriv);
+ struct security_priv* psecuritypriv = &(padapter->securitypriv);
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_8192C("%s\n", __func__);
@@ -633,7 +633,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
DBG_8192C("r871x_set_encryption, wep_key_idx =%d, len =%d\n", wep_key_idx, wep_key_len);
- if ((wep_key_idx >= WEP_KEYS) || (wep_key_len<= 0))
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0))
{
ret = -EINVAL;
goto exit;
@@ -673,7 +673,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
}
- if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) /* group key */
+ if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) /* group key */
{
if (param->u.crypt.set_tx == 0) /* group key */
{
@@ -681,7 +681,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
DBG_8192C("%s, set group_key, WEP\n", __func__);
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -696,7 +696,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
@@ -712,7 +712,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
}
else
{
@@ -729,7 +729,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
- pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
if (pbcmc_sta)
{
pbcmc_sta->ieee8021x_blocked = false;
@@ -748,7 +748,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
if (param->u.crypt.set_tx == 1) /* pairwise key */
{
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "WEP") == 0)
{
@@ -799,7 +799,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
if (strcmp(param->u.crypt.alg, "WEP") == 0)
{
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -811,7 +811,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
@@ -825,7 +825,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
}
else
{
@@ -840,7 +840,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
- pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
if (pbcmc_sta)
{
pbcmc_sta->ieee8021x_blocked = false;
@@ -940,7 +940,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) /* 802_1x */
{
- struct sta_info * psta,*pbcmc_sta;
+ struct sta_info * psta, *pbcmc_sta;
struct sta_priv * pstapriv = &padapter->stapriv;
/* DBG_8192C("%s, : dot11AuthAlgrthm == dot11AuthAlgrthm_8021X\n", __func__); */
@@ -959,7 +959,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
psta->ieee8021x_blocked = false;
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
{
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
@@ -970,7 +970,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
DBG_8192C("%s, : param->u.crypt.set_tx == 1\n", __func__);
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "TKIP") == 0)/* set mic key */
{
@@ -978,7 +978,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
- padapter->securitypriv.busetkipkey =false;
+ padapter->securitypriv.busetkipkey = false;
/* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
}
@@ -991,21 +991,21 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
{
if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0)
{
- memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]), 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
padapter->securitypriv.binstallGrpkey = true;
/* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
DBG_871X(" ~~~~set sta key:groupkey\n");
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
- rtw_set_key(padapter,&padapter->securitypriv, param->u.crypt.idx, 1, true);
+ rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true);
}
else if (strcmp(param->u.crypt.alg, "BIP") == 0)
{
/* DBG_871X("BIP key_len =%d , index =%d @@@@@@@@@@@@@@@@@@\n", param->u.crypt.key_len, param->u.crypt.idx); */
/* save the IGTK key, length 16 bytes */
- memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/*DBG_871X("IGTK key below:\n");
for (no = 0;no<16;no++)
printk(" %02x ", padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey[no]);
@@ -1017,7 +1017,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
}
}
- pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
if (pbcmc_sta == NULL)
{
/* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */
@@ -1028,7 +1028,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
if (strcmp(param->u.crypt.alg, "none") != 0)
pbcmc_sta->ieee8021x_blocked = false;
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
{
pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
@@ -1270,8 +1270,8 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
/* for Ad-Hoc/AP mode */
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)
- ||check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)
- ||check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)
+ || check_fwstate(pmlmepriv, WIFI_AP_STATE))
&& check_fwstate(pmlmepriv, _FW_LINKED)
)
{
@@ -1346,7 +1346,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
rtw_wdev->iftype = type;
- if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==false)
+ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false)
{
rtw_wdev->iftype = old_type;
ret = -EPERM;
@@ -1464,7 +1464,7 @@ static int rtw_cfg80211_set_probe_req_wpsp2pie(struct adapter *padapter, char *b
DBG_8192C("%s, ielen =%d\n", __func__, len);
#endif
- if (len>0)
+ if (len > 0)
{
if ((wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen)))
{
@@ -1503,8 +1503,8 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
int ret = 0;
struct ndis_802_11_ssid *ssid = NULL;
struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
- u8 survey_times =3;
- u8 survey_times_for_one_ch =6;
+ u8 survey_times = 3;
+ u8 survey_times_for_one_ch = 6;
struct cfg80211_ssid *ssids = request->ssids;
int j = 0;
bool need_indicate_scan_done = false;
@@ -1556,7 +1556,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
goto check_need_indicate_scan_done;
}
- if (request->ie && request->ie_len>0)
+ if (request->ie && request->ie_len > 0)
{
rtw_cfg80211_set_probe_req_wpsp2pie(padapter, (u8 *)request->ie, request->ie_len);
}
@@ -1610,7 +1610,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
/* parsing channels, n_channels */
memset(ch, 0, sizeof(struct rtw_ieee80211_channel)*RTW_CHANNEL_SCAN_AMOUNT);
- for (i = 0;i<request->n_channels && i<RTW_CHANNEL_SCAN_AMOUNT;i++) {
+ for (i = 0; i < request->n_channels && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
#ifdef DEBUG_CFG80211
DBG_871X(FUNC_ADPT_FMT CHAN_FMT"\n", FUNC_ADPT_ARG(padapter), CHAN_ARG(request->channels[i]));
#endif
@@ -1620,12 +1620,12 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
spin_lock_bh(&pmlmepriv->lock);
if (request->n_channels == 1) {
- for (i = 1;i<survey_times_for_one_ch;i++)
+ for (i = 1; i < survey_times_for_one_ch; i++)
memcpy(&ch[i], &ch[0], sizeof(struct rtw_ieee80211_channel));
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times_for_one_ch);
} else if (request->n_channels <= 4) {
- for (j =request->n_channels-1;j>= 0;j--)
- for (i = 0;i<survey_times;i++)
+ for (j = request->n_channels - 1; j >= 0; j--)
+ for (i = 0; i < survey_times; i++)
{
memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel));
}
@@ -1699,7 +1699,7 @@ static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- if (psecuritypriv->ndisauthtype>Ndis802_11AuthModeWPA)
+ if (psecuritypriv->ndisauthtype > Ndis802_11AuthModeWPA)
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
break;
@@ -1767,7 +1767,7 @@ static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv, u32 ciph
psecuritypriv->ndisencryptstatus = ndisencryptstatus;
/* if (psecuritypriv->dot11PrivacyAlgrthm >= _AES_) */
- /* psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK; */
+ /* psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK; */
}
return 0;
@@ -1799,7 +1799,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
int wpa_ielen = 0;
int wpa2_ielen = 0;
u8 *pwpa, *pwpa2;
- u8 null_addr[]= {0, 0, 0, 0, 0, 0};
+ u8 null_addr[] = {0, 0, 0, 0, 0, 0};
if (pie == NULL || !ielen) {
/* Treat this as normal case, but need to clear WIFI_UNDER_WPS */
@@ -1818,13 +1818,13 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
goto exit;
}
- memcpy(buf, pie , ielen);
+ memcpy(buf, pie, ielen);
/* dump */
{
int i;
DBG_8192C("set wpa_ie(length:%zu):\n", ielen);
- for (i = 0;i<ielen;i =i+8)
+ for (i = 0; i < ielen; i = i + 8)
DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
}
@@ -1835,12 +1835,12 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
}
pwpa = rtw_get_wpa_ie(buf, &wpa_ielen, ielen);
- if (pwpa && wpa_ielen>0)
+ if (pwpa && wpa_ielen > 0)
{
if (rtw_parse_wpa_ie(pwpa, wpa_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
{
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPAPSK;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
memcpy(padapter->securitypriv.supplicant_ie, &pwpa[0], wpa_ielen+2);
DBG_8192C("got wpa_ie, wpa_ielen:%u\n", wpa_ielen);
@@ -1848,12 +1848,12 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
}
pwpa2 = rtw_get_wpa2_ie(buf, &wpa2_ielen, ielen);
- if (pwpa2 && wpa2_ielen>0)
+ if (pwpa2 && wpa2_ielen > 0)
{
if (rtw_parse_wpa2_ie(pwpa2, wpa2_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
{
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPA2PSK;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
memcpy(padapter->securitypriv.supplicant_ie, &pwpa2[0], wpa2_ielen+2);
DBG_8192C("got wpa2_ie, wpa2_ielen:%u\n", wpa2_ielen);
@@ -1873,7 +1873,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
{
case WPA_CIPHER_NONE:
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
@@ -1897,7 +1897,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
{
case WPA_CIPHER_NONE:
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
@@ -1924,7 +1924,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
wps_ie = rtw_get_wps_ie(buf, ielen, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0) {
DBG_8192C("got wps_ie, wps_ielen:%u\n", wps_ielen);
- padapter->securitypriv.wps_ie_len = wps_ielen<MAX_WPS_IE_LEN?wps_ielen:MAX_WPS_IE_LEN;
+ padapter->securitypriv.wps_ie_len = wps_ielen < MAX_WPS_IE_LEN ? wps_ielen : MAX_WPS_IE_LEN;
memcpy(padapter->securitypriv.wps_ie, wps_ie, padapter->securitypriv.wps_ie_len);
set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
} else {
@@ -2027,7 +2027,7 @@ static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
rtw_wdev->iftype = NL80211_IFTYPE_STATION;
- if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) ==false)
+ if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) == false)
{
rtw_wdev->iftype = old_type;
ret = -EPERM;
@@ -2185,7 +2185,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
{
- ret = -EOPNOTSUPP ;
+ ret = -EOPNOTSUPP;
}
kfree(pwep);
@@ -2301,7 +2301,7 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
u8 index, blInserted = false;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
- u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 };
+ u8 strZeroMacAddress[ETH_ALEN] = { 0x00 };
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
@@ -2313,7 +2313,7 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
blInserted = false;
/* overwrite PMKID */
- for (index = 0 ; index<NUM_PMKID_CACHE; index++)
+ for (index = 0 ; index < NUM_PMKID_CACHE; index++)
{
if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN))
{ /* BSSID is matched, the same AP => rewrite with new PMKID. */
@@ -2337,7 +2337,7 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, (u8 *)pmksa->pmkid, WLAN_PMKID_LEN);
psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true;
- psecuritypriv->PMKIDIndex++ ;
+ psecuritypriv->PMKIDIndex++;
if (psecuritypriv->PMKIDIndex == 16)
{
psecuritypriv->PMKIDIndex = 0;
@@ -2357,7 +2357,7 @@ static int cfg80211_rtw_del_pmksa(struct wiphy *wiphy,
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
- for (index = 0 ; index<NUM_PMKID_CACHE; index++)
+ for (index = 0 ; index < NUM_PMKID_CACHE; index++)
{
if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN))
{ /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
@@ -2387,7 +2387,7 @@ static int cfg80211_rtw_flush_pmksa(struct wiphy *wiphy,
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
- memset(&psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
psecuritypriv->PMKIDIndex = 0;
return 0;
@@ -2575,7 +2575,7 @@ static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
.ndo_start_xmit = rtw_cfg80211_monitor_if_xmit_entry,
};
-static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, struct net_device **ndev)
+static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, struct net_device **ndev)
{
int ret = 0;
struct net_device* mon_ndev = NULL;
@@ -2734,7 +2734,7 @@ static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_l
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
- if (head_len<24)
+ if (head_len < 24)
return -EINVAL;
pbuf = rtw_zmalloc(head_len+tail_len);
@@ -3276,7 +3276,7 @@ static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap, enum
ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
}
- else if ((rf_type == RF_1T2R) || (rf_type ==RF_2T2R))
+ else if ((rf_type == RF_1T2R) || (rf_type == RF_2T2R))
{
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 9b9038e7deb1..5059b874080e 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -59,7 +59,7 @@ void rtw_indicate_wx_assoc_event(struct adapter *padapter)
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true)
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)
memcpy(wrqu.ap_addr.sa_data, pnetwork->MacAddress, ETH_ALEN);
else
memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
@@ -86,11 +86,11 @@ static char *translate_scan(struct adapter *padapter,
u32 ht_ielen = 0;
char *custom = NULL;
char *p;
- u16 max_rate = 0, rate, ht_cap =false, vht_cap = false;
+ u16 max_rate = 0, rate, ht_cap = false, vht_cap = false;
u32 i = 0;
u8 bw_40MHz = 0, short_GI = 0;
u16 mcs_rate = 0, vht_data_rate = 0;
- u8 ie_offset = (pnetwork->network.Reserved[0] == 2? 0:12);
+ u8 ie_offset = (pnetwork->network.Reserved[0] == 2 ? 0 : 12);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
u8 ss, sq;
@@ -113,11 +113,11 @@ static char *translate_scan(struct adapter *padapter,
} else {
p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12);
}
- if (p && ht_ielen>0) {
+ if (p && ht_ielen > 0) {
struct rtw_ieee80211_ht_cap *pht_capie;
ht_cap = true;
pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
- memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
bw_40MHz = (le16_to_cpu(pht_capie->cap_info) & IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
short_GI = (le16_to_cpu(pht_capie->cap_info) & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
}
@@ -163,7 +163,7 @@ static char *translate_scan(struct adapter *padapter,
cap = le16_to_cpu(le_tmp);
}
- if (cap & (WLAN_CAPABILITY_IBSS |WLAN_CAPABILITY_BSS)) {
+ if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) {
if (cap & WLAN_CAPABILITY_BSS)
iwe.u.mode = IW_MODE_MASTER;
else
@@ -172,7 +172,7 @@ static char *translate_scan(struct adapter *padapter,
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
}
- if (pnetwork->network.Configuration.DSConfig<1 /*|| pnetwork->network.Configuration.DSConfig>14*/)
+ if (pnetwork->network.Configuration.DSConfig < 1 /*|| pnetwork->network.Configuration.DSConfig > 14*/)
pnetwork->network.Configuration.DSConfig = 1;
/* Add frequency/channel */
@@ -197,12 +197,12 @@ static char *translate_scan(struct adapter *padapter,
if (!custom)
return start;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
- while (pnetwork->network.SupportedRates[i]!= 0) {
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ while (pnetwork->network.SupportedRates[i] != 0) {
rate = pnetwork->network.SupportedRates[i]&0x7F;
if (rate > max_rate)
max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
i++;
}
@@ -239,7 +239,7 @@ static char *translate_scan(struct adapter *padapter,
if (!buf)
return start;
if (wpa_len > 0) {
- p =buf;
+ p = buf;
p += sprintf(p, "wpa_ie =");
for (i = 0; i < wpa_len; i++)
p += sprintf(p, "%02x", wpa_ie[i]);
@@ -258,7 +258,7 @@ static char *translate_scan(struct adapter *padapter,
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
memset(&iwe, 0, sizeof(iwe));
- iwe.cmd =IWEVGENIE;
+ iwe.cmd = IWEVGENIE;
iwe.u.data.length = wpa_len;
start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie);
}
@@ -274,7 +274,7 @@ static char *translate_scan(struct adapter *padapter,
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
memset(&iwe, 0, sizeof(iwe));
- iwe.cmd =IWEVGENIE;
+ iwe.cmd = IWEVGENIE;
iwe.u.data.length = rsn_len;
start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
}
@@ -298,13 +298,13 @@ static char *translate_scan(struct adapter *padapter,
}
while (cnt < total_ielen) {
- if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen>2)) {
+ if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) {
wpsie_ptr = &ie_ptr[cnt];
- iwe.cmd =IWEVGENIE;
+ iwe.cmd = IWEVGENIE;
iwe.u.data.length = (u16)wps_ielen;
start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr);
}
- cnt+=ie_ptr[cnt+1]+2; /* goto next */
+ cnt += ie_ptr[cnt + 1] + 2; /* goto next */
}
}
@@ -352,8 +352,8 @@ static char *translate_scan(struct adapter *padapter,
#if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
{
s16 tmp_noise = 0;
- rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&(pnetwork->network.Configuration.DSConfig), &(tmp_noise));
- iwe.u.qual.noise = tmp_noise ;
+ rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &(pnetwork->network.Configuration.DSConfig), &(tmp_noise));
+ iwe.u.qual.noise = tmp_noise;
}
#else
iwe.u.qual.noise = 0; /* noise level */
@@ -500,7 +500,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
DBG_871X("wep, set_tx = 1\n");
if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
- ret = -EOPNOTSUPP ;
+ ret = -EOPNOTSUPP;
} else {
DBG_871X("wep, set_tx = 0\n");
@@ -508,12 +508,12 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
/* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to fw/cam */
if (wep_key_idx >= WEP_KEYS) {
- ret = -EOPNOTSUPP ;
+ ret = -EOPNOTSUPP;
goto exit;
}
memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
- psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength;
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0, true);
}
@@ -533,20 +533,20 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (strcmp(param->u.crypt.alg, "none") != 0)
psta->ieee8021x_blocked = false;
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
if (param->u.crypt.set_tx == 1) { /* pairwise key */
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
/* DEBUG_ERR(("\nset key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
- padapter->securitypriv.busetkipkey =false;
+ padapter->securitypriv.busetkipkey = false;
/* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
}
@@ -556,11 +556,11 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
rtw_setstakey_cmd(padapter, psta, true, true);
} else { /* group key */
if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0) {
- memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* only TKIP group key need to install this */
if (param->u.crypt.key_len > 16) {
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]), 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
}
padapter->securitypriv.binstallGrpkey = true;
/* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
@@ -568,11 +568,11 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
- rtw_set_key(padapter,&padapter->securitypriv, param->u.crypt.idx, 1, true);
+ rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true);
} else if (strcmp(param->u.crypt.alg, "BIP") == 0) {
/* printk("BIP key_len =%d , index =%d @@@@@@@@@@@@@@@@@@\n", param->u.crypt.key_len, param->u.crypt.idx); */
/* save the IGTK key, length 16 bytes */
- memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/*printk("IGTK key below:\n");
for (no = 0;no<16;no++)
printk(" %02x ", padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey[no]);
@@ -584,7 +584,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
}
- pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
if (pbcmc_sta == NULL) {
/* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */
} else {
@@ -592,7 +592,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (strcmp(param->u.crypt.alg, "none") != 0)
pbcmc_sta->ieee8021x_blocked = false;
- if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
@@ -613,7 +613,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
u8 *buf = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
- u8 null_addr[]= {0, 0, 0, 0, 0, 0};
+ u8 null_addr[] = {0, 0, 0, 0, 0, 0};
if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL)) {
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
@@ -630,13 +630,13 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
goto exit;
}
- memcpy(buf, pie , ielen);
+ memcpy(buf, pie, ielen);
/* dump */
{
int i;
DBG_871X("\n wpa_ie(length:%d):\n", ielen);
- for (i = 0;i<ielen;i =i+8)
+ for (i = 0; i < ielen; i = i + 8)
DBG_871X("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
}
@@ -648,13 +648,13 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPAPSK;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
}
if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPA2PSK;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
}
@@ -666,7 +666,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
switch (group_cipher) {
case WPA_CIPHER_NONE:
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
@@ -689,7 +689,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
switch (pairwise_cipher) {
case WPA_CIPHER_NONE:
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
@@ -712,7 +712,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
{/* set wps_ie */
u16 cnt = 0;
- u8 eid, wps_oui[4]={0x0, 0x50, 0xf2, 0x04};
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
while (cnt < ielen) {
eid = buf[cnt];
@@ -762,7 +762,7 @@ static int rtw_wx_get_name(struct net_device *dev,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
u32 ht_ielen = 0;
char *p;
- u8 ht_cap =false, vht_cap =false;
+ u8 ht_cap = false, vht_cap = false;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
NDIS_802_11_RATES_EX* prates = NULL;
@@ -772,7 +772,7 @@ static int rtw_wx_get_name(struct net_device *dev,
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
/* parsing HT_CAP_IE */
p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
- if (p && ht_ielen>0)
+ if (p && ht_ielen > 0)
ht_cap = true;
prates = &pcur_bss->SupportedRates;
@@ -846,7 +846,7 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType ;
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType;
int ret = 0;
if (_FAIL == rtw_pwr_wakeup(padapter)) {
@@ -878,7 +878,7 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
DBG_871X("set_mode = IW_MODE_INFRA\n");
break;
- default :
+ default:
ret = -EINVAL;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("\n Mode: %s is not supported \n", iw_operation_mode[wrqu->mode]));
goto exit;
@@ -895,7 +895,7 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
}
*/
- if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==false) {
+ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) {
ret = -EPERM;
goto exit;
@@ -939,8 +939,8 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
int intReturn = false;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct iw_pmksa* pPMK = (struct iw_pmksa *)extra;
- u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 };
- u8 strIssueBssid[ ETH_ALEN ] = { 0x00 };
+ u8 strZeroMacAddress[ETH_ALEN] = { 0x00 };
+ u8 strIssueBssid[ETH_ALEN] = { 0x00 };
/*
There are the BSSID information in the bssid.sa_data array.
@@ -960,13 +960,13 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
blInserted = false;
/* overwrite PMKID */
- for (j = 0 ; j<NUM_PMKID_CACHE; j++) {
+ for (j = 0; j < NUM_PMKID_CACHE; j++) {
if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => rewrite with new PMKID. */
DBG_871X("[rtw_wx_set_pmkid] BSSID exists in the PMKList.\n");
memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
- psecuritypriv->PMKIDList[ j ].bUsed = true;
+ psecuritypriv->PMKIDList[j].bUsed = true;
psecuritypriv->PMKIDIndex = j+1;
blInserted = true;
break;
@@ -981,25 +981,25 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
- psecuritypriv->PMKIDList[ psecuritypriv->PMKIDIndex ].bUsed = true;
- psecuritypriv->PMKIDIndex++ ;
+ psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true;
+ psecuritypriv->PMKIDIndex++;
if (psecuritypriv->PMKIDIndex == 16)
psecuritypriv->PMKIDIndex = 0;
}
} else if (pPMK->cmd == IW_PMKSA_REMOVE) {
DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_REMOVE!\n");
intReturn = true;
- for (j = 0 ; j<NUM_PMKID_CACHE; j++) {
+ for (j = 0; j < NUM_PMKID_CACHE; j++) {
if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
eth_zero_addr(psecuritypriv->PMKIDList[j].Bssid);
- psecuritypriv->PMKIDList[ j ].bUsed = false;
+ psecuritypriv->PMKIDList[j].bUsed = false;
break;
}
}
} else if (pPMK->cmd == IW_PMKSA_FLUSH) {
DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_FLUSH!\n");
- memset(&psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
psecuritypriv->PMKIDIndex = 0;
intReturn = true;
}
@@ -1093,7 +1093,7 @@ static int rtw_wx_get_range(struct net_device *dev,
/* Commented by Albert 2009/10/13 */
/* The following code will proivde the security capability to network manager. */
/* If the driver doesn't provide this capability to network manager, */
-/* the WPA/WPA2 routers can't be choosen in the network manager. */
+/* the WPA/WPA2 routers can't be chosen in the network manager. */
/*
#define IW_SCAN_CAPA_NONE 0x00
@@ -1106,11 +1106,11 @@ static int rtw_wx_get_range(struct net_device *dev,
#define IW_SCAN_CAPA_TIME 0x40
*/
- range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2|
- IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP;
+ range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
- range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |IW_SCAN_CAPA_BSSID|
- IW_SCAN_CAPA_CHANNEL|IW_SCAN_CAPA_MODE|IW_SCAN_CAPA_RATE;
+ range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE | IW_SCAN_CAPA_BSSID |
+ IW_SCAN_CAPA_CHANNEL | IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE;
return 0;
}
@@ -1287,7 +1287,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
goto exit;
}
- if (!padapter->hw_init_completed ) {
+ if (!padapter->hw_init_completed) {
ret = -1;
goto exit;
}
@@ -1330,7 +1330,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
} else if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE
&& !memcmp(extra, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) {
- int len = wrqu->data.length -WEXT_CSCAN_HEADER_SIZE;
+ int len = wrqu->data.length - WEXT_CSCAN_HEADER_SIZE;
char *pos = extra+WEXT_CSCAN_HEADER_SIZE;
char section;
char sec_len;
@@ -1339,7 +1339,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
/* DBG_871X("%s COMBO_SCAN header is recognized\n", __func__); */
while (len >= 1) {
- section = *(pos++); len-= 1;
+ section = *(pos++); len -= 1;
switch (section) {
case WEXT_CSCAN_SSID_SECTION:
@@ -1349,9 +1349,9 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
- sec_len = *(pos++); len-= 1;
+ sec_len = *(pos++); len -= 1;
- if (sec_len>0 && sec_len<=len) {
+ if (sec_len > 0 && sec_len <= len) {
ssid[ssid_index].SsidLength = sec_len;
memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
/* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
@@ -1359,29 +1359,29 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
ssid_index++;
}
- pos+=sec_len; len-=sec_len;
+ pos += sec_len; len -= sec_len;
break;
case WEXT_CSCAN_CHANNEL_SECTION:
/* DBG_871X("WEXT_CSCAN_CHANNEL_SECTION\n"); */
- pos+= 1; len-= 1;
+ pos += 1; len -= 1;
break;
case WEXT_CSCAN_ACTV_DWELL_SECTION:
/* DBG_871X("WEXT_CSCAN_ACTV_DWELL_SECTION\n"); */
- pos+=2; len-=2;
+ pos += 2; len -= 2;
break;
case WEXT_CSCAN_PASV_DWELL_SECTION:
/* DBG_871X("WEXT_CSCAN_PASV_DWELL_SECTION\n"); */
- pos+=2; len-=2;
+ pos += 2; len -= 2;
break;
case WEXT_CSCAN_HOME_DWELL_SECTION:
/* DBG_871X("WEXT_CSCAN_HOME_DWELL_SECTION\n"); */
- pos+=2; len-=2;
+ pos += 2; len -= 2;
break;
case WEXT_CSCAN_TYPE_SECTION:
/* DBG_871X("WEXT_CSCAN_TYPE_SECTION\n"); */
- pos+= 1; len-= 1;
+ pos += 1; len -= 1;
break;
default:
/* DBG_871X("Unknown CSCAN section %c\n", section); */
@@ -1391,7 +1391,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
}
- /* jeff: it has still some scan paramater to parse, we only do this now... */
+ /* jeff: it has still some scan parameter to parse, we only do this now... */
_status = rtw_set_802_11_bssid_list_scan(padapter, ssid, RTW_SSID_SCAN_AMOUNT);
} else {
@@ -1463,7 +1463,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
&& rtw_mlme_band_check(padapter, pnetwork->network.Configuration.DSConfig) == true
&& true == rtw_validate_ssid(&(pnetwork->network.Ssid))) {
- ev =translate_scan(padapter, a, pnetwork, ev, stop);
+ ev = translate_scan(padapter, a, pnetwork, ev, stop);
}
plist = get_next(plist);
@@ -1481,7 +1481,7 @@ exit:
DBG_871X("DBG_IOCTL %s:%d return %d\n", __func__, __LINE__, ret);
#endif
- return ret ;
+ return ret;
}
@@ -1570,7 +1570,7 @@ static int rtw_wx_set_essid(struct net_device *dev,
pnetwork->network.Ssid.Ssid));
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) &&
- (pnetwork->network.Ssid.SsidLength ==ndis_ssid.SsidLength)) {
+ (pnetwork->network.Ssid.SsidLength == ndis_ssid.SsidLength)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("rtw_wx_set_essid: find match, set infra mode\n"));
@@ -1651,7 +1651,7 @@ static int rtw_wx_set_rate(struct net_device *dev,
u32 target_rate = wrqu->bitrate.value;
u32 fixed = wrqu->bitrate.fixed;
u32 ratevalue = 0;
- u8 mpdatarate[NumRates]={11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
+ u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
@@ -1706,8 +1706,8 @@ static int rtw_wx_set_rate(struct net_device *dev,
set_rate:
- for (i = 0; i<NumRates; i++) {
- if (ratevalue ==mpdatarate[i]) {
+ for (i = 0; i < NumRates; i++) {
+ if (ratevalue == mpdatarate[i]) {
datarates[i] = mpdatarate[i];
if (fixed == 0)
break;
@@ -1855,7 +1855,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
authmode = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisauthtype =authmode;
+ padapter->securitypriv.ndisauthtype = authmode;
goto exit;
}
@@ -1881,7 +1881,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
authmode = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisauthtype =authmode;
+ padapter->securitypriv.ndisauthtype = authmode;
} else if (erq->flags & IW_ENCODE_RESTRICTED) {
DBG_871X("rtw_wx_set_enc():IW_ENCODE_RESTRICTED\n");
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
@@ -1891,7 +1891,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
authmode = Ndis802_11AuthModeShared;
- padapter->securitypriv.ndisauthtype =authmode;
+ padapter->securitypriv.ndisauthtype = authmode;
} else {
DBG_871X("rtw_wx_set_enc():erq->flags = 0x%x\n", erq->flags);
@@ -1900,7 +1900,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
authmode = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisauthtype =authmode;
+ padapter->securitypriv.ndisauthtype = authmode;
}
wep.KeyIndex = key;
@@ -1909,7 +1909,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
} else {
- wep.KeyLength = 0 ;
+ wep.KeyLength = 0;
if (keyindex_provided == 1) { /* set key_id only, no given KeyMaterial(erq->length == 0). */
padapter->securitypriv.dot11PrivacyKeyIndex = key;
@@ -2093,7 +2093,7 @@ static int rtw_wx_set_auth(struct net_device *dev,
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
}
break;
@@ -2181,7 +2181,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
param->u.crypt.set_tx = 0;
}
- param->u.crypt.idx = (pencoding->flags&0x00FF) -1 ;
+ param->u.crypt.idx = (pencoding->flags & 0x00FF) - 1;
if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
memcpy(param->u.crypt.seq, pext->rx_seq, 8);
@@ -2459,7 +2459,7 @@ static int rtw_get_ap_info(struct net_device *dev,
/* pdata->length = 0;? */
pdata->flags = 0;
- if (pdata->length>=32) {
+ if (pdata->length >= 32) {
if (copy_from_user(data, pdata->pointer, 32)) {
ret = -EINVAL;
goto exit;
@@ -2492,13 +2492,13 @@ static int rtw_get_ap_info(struct net_device *dev,
DBG_871X("BSSID:" MAC_FMT "\n", MAC_ARG(bssid));
pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
- if (pbuf && (wpa_ielen>0)) {
+ if (pbuf && (wpa_ielen > 0)) {
pdata->flags = 1;
break;
}
pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
- if (pbuf && (wpa_ielen>0)) {
+ if (pbuf && (wpa_ielen > 0)) {
pdata->flags = 2;
break;
}
@@ -2510,7 +2510,7 @@ static int rtw_get_ap_info(struct net_device *dev,
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
- if (pdata->length>=34) {
+ if (pdata->length >= 34) {
if (copy_to_user((u8 __force __user *)pdata->pointer+32, (u8 *)&pdata->flags, 1)) {
ret = -EINVAL;
goto exit;
@@ -2541,7 +2541,7 @@ static int rtw_set_pid(struct net_device *dev,
selector = *pdata;
if (selector < 3 && selector >= 0) {
padapter->pid[selector] = *(pdata+1);
- DBG_871X("%s set pid[%d]=%d\n", __func__, selector , padapter->pid[selector]);
+ DBG_871X("%s set pid[%d]=%d\n", __func__, selector, padapter->pid[selector]);
}
else
DBG_871X("%s selector %d error\n", __func__, selector);
@@ -2563,7 +2563,7 @@ static int rtw_wps_start(struct net_device *dev,
u32 u32wps_start = 0;
unsigned int uintRet = 0;
- if ((true == padapter->bDriverStopped) ||(true ==padapter->bSurpriseRemoved) || (NULL == pdata)) {
+ if ((true == padapter->bDriverStopped) || (true == padapter->bSurpriseRemoved) || (NULL == pdata)) {
ret = -EINVAL;
goto exit;
}
@@ -2733,8 +2733,8 @@ static int rtw_dbg_port(struct net_device *dev,
break;
case 0x01: /* dbg mode */
padapter->recvpriv.is_signal_dbg = 1;
- extra_arg = extra_arg>100?100:extra_arg;
- padapter->recvpriv.signal_strength_dbg =extra_arg;
+ extra_arg = extra_arg > 100 ? 100 : extra_arg;
+ padapter->recvpriv.signal_strength_dbg = extra_arg;
break;
}
break;
@@ -2806,7 +2806,7 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_871X("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
DBG_871X("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
- for (i = 0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
if (preorder_ctrl->enable)
DBG_871X("tid =%d, indicate_seq =%d\n", i, preorder_ctrl->indicate_seq);
@@ -2845,7 +2845,7 @@ static int rtw_dbg_port(struct net_device *dev,
spin_lock_bh(&pstapriv->sta_hash_lock);
- for (i = 0; i< NUM_STA; i++) {
+ for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
plist = get_next(phead);
@@ -2872,7 +2872,7 @@ static int rtw_dbg_port(struct net_device *dev,
- for (j = 0;j<16;j++) {
+ for (j = 0; j < 16; j++) {
preorder_ctrl = &psta->recvreorder_ctrl[j];
if (preorder_ctrl->enable)
DBG_871X("tid =%d, indicate_seq =%d\n", j, preorder_ctrl->indicate_seq);
@@ -2900,7 +2900,7 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_871X("enable driver ctrl vcs = %d\n", extra_arg);
padapter->driver_vcs_en = 1;
- if (extra_arg>2)
+ if (extra_arg > 2)
padapter->driver_vcs_type = 1;
else
padapter->driver_vcs_type = extra_arg;
@@ -3048,7 +3048,7 @@ static int rtw_dbg_port(struct net_device *dev,
if (max_rx_rate < 0xc) { /* max_rx_rate < MSC0 -> B or G -> disable HT */
pregistrypriv->ht_enable = 0;
- for (i = 0; i<NumRates; i++) {
+ for (i = 0; i < NumRates; i++) {
if (pmlmeext->datarate[i] > max_rx_rate)
pmlmeext->datarate[i] = 0xff;
}
@@ -3057,7 +3057,7 @@ static int rtw_dbg_port(struct net_device *dev,
else if (max_rx_rate < 0x1c) { /* mcs0~mcs15 */
u32 mcs_bitmap = 0x0;
- for (i = 0; i<((max_rx_rate+1)-0xc); i++)
+ for (i = 0; i < ((max_rx_rate + 1) - 0xc); i++)
mcs_bitmap |= BIT(i);
set_mcs_rate_by_mask(pmlmeext->default_supported_mcs_set, mcs_bitmap);
@@ -3129,9 +3129,9 @@ static int rtw_dbg_port(struct net_device *dev,
*/
int value;
- DBG_871X("Set GPIO Direction! arg = %d , extra_arg =%d\n", arg , extra_arg);
+ DBG_871X("Set GPIO Direction! arg = %d , extra_arg =%d\n", arg, extra_arg);
value = rtw_config_gpio(dev, arg, extra_arg);
- DBG_871X("Set GPIO Direction %s\n", (value ==-1)?"Fail!!!":"Success");
+ DBG_871X("Set GPIO Direction %s\n", (value == -1) ? "Fail!!!" : "Success");
break;
}
case 0x27: /* Set GPIO output direction value */
@@ -3142,15 +3142,15 @@ static int rtw_dbg_port(struct net_device *dev,
*/
int value;
- DBG_871X("Set GPIO Value! arg = %d , extra_arg =%d\n", arg , extra_arg);
+ DBG_871X("Set GPIO Value! arg = %d , extra_arg =%d\n", arg, extra_arg);
value = rtw_set_gpio_output_value(dev, arg, extra_arg);
- DBG_871X("Set GPIO Value %s\n", (value ==-1)?"Fail!!!":"Success");
+ DBG_871X("Set GPIO Value %s\n", (value == -1) ? "Fail!!!" : "Success");
break;
}
#endif
case 0xaa:
{
- if ((extra_arg & 0x7F)> 0x3F) extra_arg = 0xFF;
+ if ((extra_arg & 0x7F) > 0x3F) extra_arg = 0xFF;
DBG_871X("chang data rate to :0x%02x\n", extra_arg);
padapter->fix_rate = extra_arg;
}
@@ -3161,7 +3161,7 @@ static int rtw_dbg_port(struct net_device *dev,
mac_reg_dump(RTW_DBGDUMP, padapter);
else if (extra_arg == 1)
bb_reg_dump(RTW_DBGDUMP, padapter);
- else if (extra_arg ==2)
+ else if (extra_arg == 2)
rf_reg_dump(RTW_DBGDUMP, padapter);
}
break;
@@ -3170,8 +3170,8 @@ static int rtw_dbg_port(struct net_device *dev,
{
u32 odm_flag;
- if (0xf ==extra_arg) {
- rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC,&odm_flag);
+ if (0xf == extra_arg) {
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
DBG_871X(" === DMFlag(0x%08x) ===\n", odm_flag);
DBG_871X("extra_arg = 0 - disable all dynamic func\n");
DBG_871X("extra_arg = 1 - disable DIG- BIT(0)\n");
@@ -3187,7 +3187,7 @@ static int rtw_dbg_port(struct net_device *dev,
extra_arg = 3 - turn on all dynamic func
*/
rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &(extra_arg));
- rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC,&odm_flag);
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
DBG_871X(" === DMFlag(0x%08x) ===\n", odm_flag);
}
}
@@ -3257,7 +3257,7 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
/* ret = ieee80211_wpa_enable(ieee, value); */
switch ((value)&0xff) {
- case 1 : /* WPA */
+ case 1: /* WPA */
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
break;
@@ -3428,7 +3428,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv* psecuritypriv =&(padapter->securitypriv);
+ struct security_priv* psecuritypriv = &(padapter->securitypriv);
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_871X("%s\n", __func__);
@@ -3481,7 +3481,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
DBG_871X("r871x_set_encryption, wep_key_idx =%d, len =%d\n", wep_key_idx, wep_key_len);
- if ((wep_key_idx >= WEP_KEYS) || (wep_key_len<= 0)) {
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
ret = -EINVAL;
goto exit;
}
@@ -3523,7 +3523,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
- psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength;
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
rtw_ap_set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx, 1);
} else {
@@ -3549,7 +3549,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
DBG_871X("%s, set group_key, WEP\n", __func__);
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -3560,7 +3560,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
@@ -3575,7 +3575,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
} else {
DBG_871X("%s, set group_key, none\n", __func__);
@@ -3590,7 +3590,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
- pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
if (pbcmc_sta) {
pbcmc_sta->ieee8021x_blocked = false;
pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
@@ -3604,7 +3604,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
if (param->u.crypt.set_tx == 1) {
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
DBG_871X("%s, set pairwise key, WEP\n", __func__);
@@ -3641,7 +3641,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else { /* group key??? */
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -3649,7 +3649,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
@@ -3661,7 +3661,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
} else {
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
@@ -3674,7 +3674,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
- pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
if (pbcmc_sta) {
pbcmc_sta->ieee8021x_blocked = false;
pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
@@ -3706,7 +3706,7 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int
memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
- if ((pstapriv->max_num_sta>NUM_STA) || (pstapriv->max_num_sta<= 0))
+ if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0))
pstapriv->max_num_sta = NUM_STA;
@@ -3831,12 +3831,12 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
if (psta) {
- u8 updated =false;
+ u8 updated = false;
/* DBG_871X("free psta =%p, aid =%d\n", psta, psta->aid); */
spin_lock_bh(&pstapriv->asoc_list_lock);
- if (list_empty(&psta->asoc_list) ==false) {
+ if (list_empty(&psta->asoc_list) == false) {
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
@@ -3895,12 +3895,12 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
ht_20mhz_set : BIT(5)
*/
- psta_data->sta_set =((psta->nonerp_set) |
- (psta->no_short_slot_time_set <<1) |
- (psta->no_short_preamble_set <<2) |
- (psta->no_ht_gf_set <<3) |
- (psta->no_ht_set <<4) |
- (psta->ht_20mhz_set <<5));
+ psta_data->sta_set = ((psta->nonerp_set) |
+ (psta->no_short_slot_time_set << 1) |
+ (psta->no_short_preamble_set << 2) |
+ (psta->no_ht_gf_set << 3) |
+ (psta->no_ht_set << 4) |
+ (psta->ht_20mhz_set << 5));
psta_data->tx_supp_rates_len = psta->bssratelen;
memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen);
@@ -3969,7 +3969,7 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
- unsigned char wps_oui[4]={0x0, 0x50, 0xf2, 0x04};
+ unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -3986,7 +3986,7 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
kfree(pmlmepriv->wps_beacon_ie);
pmlmepriv->wps_beacon_ie = NULL;
- if (ie_len>0) {
+ if (ie_len > 0) {
pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
pmlmepriv->wps_beacon_ie_len = ie_len;
if (pmlmepriv->wps_beacon_ie == NULL) {
@@ -4024,7 +4024,7 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
kfree(pmlmepriv->wps_probe_resp_ie);
pmlmepriv->wps_probe_resp_ie = NULL;
- if (ie_len>0) {
+ if (ie_len > 0) {
pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
pmlmepriv->wps_probe_resp_ie_len = ie_len;
if (pmlmepriv->wps_probe_resp_ie == NULL) {
@@ -4057,7 +4057,7 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
kfree(pmlmepriv->wps_assoc_resp_ie);
pmlmepriv->wps_assoc_resp_ie = NULL;
- if (ie_len>0) {
+ if (ie_len > 0) {
pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
pmlmepriv->wps_assoc_resp_ie_len = ie_len;
if (pmlmepriv->wps_assoc_resp_ie == NULL) {
@@ -4357,11 +4357,11 @@ static int rtw_wx_set_priv(struct net_device *dev,
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
u8 *probereq_wpsie = ext;
int probereq_wpsie_len = len;
- u8 wps_oui[4]={0x0, 0x50, 0xf2, 0x04};
+ u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) &&
(!memcmp(&probereq_wpsie[2], wps_oui, 4))) {
- cp_sz = probereq_wpsie_len>MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN:probereq_wpsie_len;
+ cp_sz = probereq_wpsie_len > MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN : probereq_wpsie_len;
if (pmlmepriv->wps_probe_req_ie) {
pmlmepriv->wps_probe_req_ie_len = 0;
@@ -4498,7 +4498,7 @@ static int rtw_test(
ret = rtw_hal_fill_h2c_cmd(padapter, param[0], count-1, &param[1]);
pos = sprintf(extra, "H2C ID = 0x%02x content =", param[0]);
- for (i = 1; i<count; i++)
+ for (i = 1; i < count; i++)
pos += sprintf(extra+pos, "%02x,", param[i]);
extra[pos] = 0;
pos--;
@@ -4636,14 +4636,14 @@ static const struct iw_priv_args rtw_private_args[] = {
},
{
SIOCIWFIRSTPRIV + 0x11,
- IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK , "p2p_get"
+ IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "p2p_get"
},
{
SIOCIWFIRSTPRIV + 0x12, 0, 0, "NULL"
},
{
SIOCIWFIRSTPRIV + 0x13,
- IW_PRIV_TYPE_CHAR | 64, IW_PRIV_TYPE_CHAR | 64 , "p2p_get2"
+ IW_PRIV_TYPE_CHAR | 64, IW_PRIV_TYPE_CHAR | 64, "p2p_get2"
},
{
SIOCIWFIRSTPRIV + 0x14,
@@ -4651,14 +4651,14 @@ static const struct iw_priv_args rtw_private_args[] = {
},
{
SIOCIWFIRSTPRIV + 0x15,
- IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | 1024 , "tdls_get"
+ IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | 1024, "tdls_get"
},
{
SIOCIWFIRSTPRIV + 0x16,
IW_PRIV_TYPE_CHAR | 64, 0, "pm_set"
},
- {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ , 0 , "rereg_nd_name"},
+ {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"},
{SIOCIWFIRSTPRIV + 0x1A, IW_PRIV_TYPE_CHAR | 1024, 0, "efuse_set"},
{SIOCIWFIRSTPRIV + 0x1B, IW_PRIV_TYPE_CHAR | 128, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
{
@@ -4667,10 +4667,10 @@ static const struct iw_priv_args rtw_private_args[] = {
},
#ifdef CONFIG_WOWLAN
- { MP_WOW_ENABLE , IW_PRIV_TYPE_CHAR | 1024, 0, "wow_mode" }, /* set */
+ { MP_WOW_ENABLE, IW_PRIV_TYPE_CHAR | 1024, 0, "wow_mode" }, /* set */
#endif
#ifdef CONFIG_AP_WOWLAN
- { MP_AP_WOW_ENABLE , IW_PRIV_TYPE_CHAR | 1024, 0, "ap_wow_mode" }, /* set */
+ { MP_AP_WOW_ENABLE, IW_PRIV_TYPE_CHAR | 1024, 0, "ap_wow_mode" }, /* set */
#endif
};
@@ -4721,7 +4721,7 @@ static iw_handler rtw_private_handler[] = {
static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_statistics *piwstats =&padapter->iwstats;
+ struct iw_statistics *piwstats = &padapter->iwstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
@@ -4760,10 +4760,10 @@ static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
rtw_ps_deny(padapter, PS_DENY_IOCTL);
LeaveAllPowerSaveModeDirect(padapter);
- rtw_hal_set_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&info, false);
+ rtw_hal_set_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &info, false);
/* ODM_InbandNoise_Monitor(podmpriv, true, 0x20, 100); */
rtw_ps_deny_cancel(padapter, PS_DENY_IOCTL);
- rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&(info.chan), &(padapter->recvpriv.noise));
+ rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &(info.chan), &(padapter->recvpriv.noise));
DBG_871X("chan:%d, noise_level:%d\n", info.chan, padapter->recvpriv.noise);
}
#endif
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index 52a5b3156b28..f121dbfaa029 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -64,7 +64,7 @@ void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
indicate_wx_scan_complete_event(padapter);
}
-static RT_PMKID_LIST backupPMKIDList[ NUM_PMKID_CACHE ];
+static RT_PMKID_LIST backupPMKIDList[NUM_PMKID_CACHE];
void rtw_reset_securitypriv(struct adapter *adapter)
{
u8 backupPMKIDIndex = 0;
@@ -83,7 +83,7 @@ void rtw_reset_securitypriv(struct adapter *adapter)
/* Backup the btkip_countermeasure information. */
/* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
- memcpy(&backupPMKIDList[ 0 ], &adapter->securitypriv.PMKIDList[ 0 ], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ memcpy(&backupPMKIDList[0], &adapter->securitypriv.PMKIDList[0], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
backupPMKIDIndex = adapter->securitypriv.PMKIDIndex;
backupTKIPCountermeasure = adapter->securitypriv.btkip_countermeasure;
backupTKIPcountermeasure_time = adapter->securitypriv.btkip_countermeasure_time;
@@ -95,7 +95,7 @@ void rtw_reset_securitypriv(struct adapter *adapter)
/* Added by Albert 2009/02/18 */
/* Restore the PMK information to securitypriv structure for the following connection. */
- memcpy(&adapter->securitypriv.PMKIDList[ 0 ], &backupPMKIDList[ 0 ], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ memcpy(&adapter->securitypriv.PMKIDList[0], &backupPMKIDList[0], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
adapter->securitypriv.PMKIDIndex = backupPMKIDIndex;
adapter->securitypriv.btkip_countermeasure = backupTKIPCountermeasure;
adapter->securitypriv.btkip_countermeasure_time = backupTKIPcountermeasure_time;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 47e984d5b7cb..d29f59bbb613 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -202,8 +202,8 @@ module_param(rtw_tx_pwr_by_rate, int, 0644);
MODULE_PARM_DESC(rtw_tx_pwr_by_rate, "0:Disable, 1:Enable, 2: Depend on efuse");
int _netdev_open(struct net_device *pnetdev);
-int netdev_open (struct net_device *pnetdev);
-static int netdev_close (struct net_device *pnetdev);
+int netdev_open(struct net_device *pnetdev);
+static int netdev_close(struct net_device *pnetdev);
static void loadparam(struct adapter *padapter, _nic_hdl pnetdev)
{
@@ -221,7 +221,7 @@ static void loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->channel = (u8)rtw_channel;
registry_par->wireless_mode = (u8)rtw_wireless_mode;
- registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
+ registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense;
registry_par->vcs_type = (u8)rtw_vcs_type;
registry_par->rts_thresh = (u16)rtw_rts_thresh;
registry_par->frag_thresh = (u16)rtw_frag_thresh;
@@ -554,7 +554,7 @@ u32 rtw_start_drv_threads(struct adapter *padapter)
return _status;
}
-void rtw_stop_drv_threads (struct adapter *padapter)
+void rtw_stop_drv_threads(struct adapter *padapter)
{
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
@@ -1154,7 +1154,7 @@ void rtw_dev_unload(struct adapter *padapter)
DBG_871X("stop cmdthd timeout\n");
break;
} else {
- cnt ++;
+ cnt++;
DBG_871X("cmdthd is running(%d)\n", cnt);
msleep(10);
}
@@ -1274,7 +1274,7 @@ void rtw_suspend_wow(struct adapter *padapter)
padapter->intf_stop(padapter);
}
- /* 2.1 clean interupt */
+ /* 2.1 clean interrupt */
if (padapter->HalFunc.clear_interrupt)
padapter->HalFunc.clear_interrupt(padapter);
@@ -1348,7 +1348,7 @@ void rtw_suspend_ap_wow(struct adapter *padapter)
/* 2. disable interrupt */
rtw_hal_disable_interrupt(padapter); /* It need wait for leaving 32K. */
- /* 2.1 clean interupt */
+ /* 2.1 clean interrupt */
if (padapter->HalFunc.clear_interrupt)
padapter->HalFunc.clear_interrupt(padapter);
@@ -1799,7 +1799,7 @@ int rtw_resume_common(struct adapter *padapter)
pwrpriv->pno_in_resume = false;
#endif
}
- DBG_871X_LEVEL(_drv_always_, "%s:%d in %d ms\n", __func__ , ret,
+ DBG_871X_LEVEL(_drv_always_, "%s:%d in %d ms\n", __func__, ret,
jiffies_to_msecs(jiffies - start_time));
return ret;
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index f5614e56371e..4238209ec175 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -289,7 +289,7 @@ void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
}
/**
- * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
+ * rtw_cbuf_alloc - allocate a rtw_cbuf with given size and do initialization
* @size: size of pointer
*
* Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index 643caccc3069..60c35d92ba29 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -35,7 +35,8 @@ void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
for (i = 0; i < NR_RECVFRAME; i++) {
if (precvframe->u.hdr.pkt) {
- dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
+ /* free skb by driver */
+ dev_kfree_skb_any(precvframe->u.hdr.pkt);
precvframe->u.hdr.pkt = NULL;
}
precvframe++;
@@ -80,7 +81,10 @@ _pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8
((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
!memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
+ /*
+ * remove RFC1042 or Bridge-Tunnel encapsulation and replace
+ * EtherType
+ */
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
@@ -101,7 +105,7 @@ void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt
struct mlme_priv*pmlmepriv = &padapter->mlmepriv;
int ret;
- /* Indicat the packets to upper layer */
+ /* Indicate the packets to upper layer */
if (pkt) {
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
_pkt *pskb2 = NULL;
@@ -109,11 +113,7 @@ void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt
struct sta_priv *pstapriv = &padapter->stapriv;
int bmcast = IS_MCAST(pattrib->dst);
- /* DBG_871X("bmcast =%d\n", bmcast); */
-
if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)) {
- /* DBG_871X("not ap psta =%p, addr =%pM\n", psta, pattrib->dst); */
-
if (bmcast) {
psta = rtw_get_bcmc_stainfo(padapter);
pskb2 = rtw_skb_clone(pkt);
@@ -123,9 +123,6 @@ void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt
if (psta) {
struct net_device *pnetdev = (struct net_device*)padapter->pnetdev;
-
- /* DBG_871X("directly forwarding to the rtw_xmit_entry\n"); */
-
/* skb->ip_summed = CHECKSUM_NONE; */
pkt->dev = pnetdev;
skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt));
@@ -197,7 +194,7 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
key_type |= NL80211_KEYTYPE_PAIRWISE;
}
- cfg80211_michael_mic_failure(padapter->pnetdev, (u8 *)&pmlmepriv->assoc_bssid[ 0 ], key_type, -1,
+ cfg80211_michael_mic_failure(padapter->pnetdev, (u8 *)&pmlmepriv->assoc_bssid[0], key_type, -1,
NULL, GFP_ATOMIC);
memset(&ev, 0x00, sizeof(ev));
@@ -208,7 +205,7 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
}
ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+ memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
memset(&wrqu, 0x00, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
@@ -223,7 +220,7 @@ static void rtw_os_ksocket_send(struct adapter *padapter, union recv_frame *prec
DBG_871X("eth rx: got eth_type = 0x%x\n", pattrib->eth_type);
- if (psta && psta->isrc && psta->pid>0) {
+ if (psta && psta->isrc && psta->pid > 0) {
u16 rx_pid;
rx_pid = *(u16*)(skb->data+ETH_HLEN);
@@ -234,14 +231,10 @@ static void rtw_os_ksocket_send(struct adapter *padapter, union recv_frame *prec
if (rx_pid == psta->pid) {
int i;
u16 len = *(u16*)(skb->data+ETH_HLEN+2);
- /* u16 ctrl_type = *(u16*)(skb->data+ETH_HLEN+4); */
-
- /* DBG_871X("eth, RC: len = 0x%x, ctrl_type = 0x%x\n", len, ctrl_type); */
DBG_871X("eth, RC: len = 0x%x\n", len);
- for (i = 0;i<len;i++)
+ for (i = 0; i < len; i++)
DBG_871X("0x%x\n", *(skb->data+ETH_HLEN+4+i));
- /* DBG_871X("0x%x\n", *(skb->data+ETH_HLEN+6+i)); */
DBG_871X("eth, RC-end\n");
}
@@ -291,7 +284,8 @@ int rtw_recv_indicatepkt(struct adapter *padapter, union recv_frame *precv_frame
rtw_os_recv_indicate_pkt(padapter, skb, pattrib);
- precv_frame->u.hdr.pkt = NULL; /* pointers to NULL before rtw_free_recvframe() */
+ /* pointers to NULL before rtw_free_recvframe() */
+ precv_frame->u.hdr.pkt = NULL;
rtw_free_recvframe(precv_frame, pfree_recv_queue);
@@ -301,11 +295,11 @@ int rtw_recv_indicatepkt(struct adapter *padapter, union recv_frame *precv_frame
_recv_indicatepkt_drop:
- /* enqueue back to free_recv_queue */
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ /* enqueue back to free_recv_queue */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
- DBG_COUNTER(padapter->rx_logs.os_indicate_err);
- return _FAIL;
+ DBG_COUNTER(padapter->rx_logs.os_indicate_err);
+ return _FAIL;
}
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 859f4a0afb95..b093b5629171 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -108,7 +108,7 @@ static void sdio_free_irq(struct dvobj_priv *dvobj)
err = sdio_release_irq(func);
if (err) {
dvobj->drv_dbg.dbg_sdio_free_irq_error_cnt++;
- DBG_871X_LEVEL(_drv_err_,"%s: sdio_release_irq FAIL(%d)!\n", __func__, err);
+ DBG_871X_LEVEL(_drv_err_, "%s: sdio_release_irq FAIL(%d)!\n", __func__, err);
} else
dvobj->drv_dbg.dbg_sdio_free_irq_cnt++;
sdio_release_host(func);
@@ -324,7 +324,7 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
padapter->dvobj = dvobj;
dvobj->if1 = padapter;
- padapter->bDriverStopped =true;
+ padapter->bDriverStopped = true;
dvobj->padapters = padapter;
padapter->iface_id = 0;
@@ -430,7 +430,7 @@ static void rtw_sdio_if1_deinit(struct adapter *if1)
rtw_cancel_all_timer(if1);
#ifdef CONFIG_WOWLAN
- adapter_to_pwrctl(if1)->wowlan_mode =false;
+ adapter_to_pwrctl(if1)->wowlan_mode = false;
DBG_871X_LEVEL(_drv_always_, "%s wowlan_mode:%d\n", __func__, adapter_to_pwrctl(if1)->wowlan_mode);
#endif /* CONFIG_WOWLAN */
@@ -500,7 +500,7 @@ free_dvobj:
if (status != _SUCCESS)
sdio_dvobj_deinit(func);
exit:
- return status == _SUCCESS?0:-ENODEV;
+ return status == _SUCCESS ? 0 : -ENODEV;
}
static void rtw_dev_remove(struct sdio_func *func)
@@ -548,7 +548,7 @@ extern int pm_netdev_close(struct net_device *pnetdev, u8 bnormal);
static int rtw_sdio_suspend(struct device *dev)
{
- struct sdio_func *func =dev_to_sdio_func(dev);
+ struct sdio_func *func = dev_to_sdio_func(dev);
struct dvobj_priv *psdpriv = sdio_get_drvdata(func);
struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv);
struct adapter *padapter = psdpriv->if1;
@@ -585,7 +585,7 @@ static int rtw_resume_process(struct adapter *padapter)
static int rtw_sdio_resume(struct device *dev)
{
- struct sdio_func *func =dev_to_sdio_func(dev);
+ struct sdio_func *func = dev_to_sdio_func(dev);
struct dvobj_priv *psdpriv = sdio_get_drvdata(func);
struct adapter *padapter = psdpriv->if1;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 4e81bc13e57d..fec8a8caaa46 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -15,16 +15,16 @@ uint rtw_remainder_len(struct pkt_file *pfile)
return (pfile->buf_len - ((SIZE_PTR)(pfile->cur_addr) - (SIZE_PTR)(pfile->buf_start)));
}
-void _rtw_open_pktfile (_pkt *pktptr, struct pkt_file *pfile)
+void _rtw_open_pktfile(_pkt *pktptr, struct pkt_file *pfile)
{
pfile->pkt = pktptr;
pfile->cur_addr = pfile->buf_start = pktptr->data;
pfile->pkt_len = pfile->buf_len = pktptr->len;
- pfile->cur_buffer = pfile->buf_start ;
+ pfile->cur_buffer = pfile->buf_start;
}
-uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
+uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
{
uint len = 0;
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index 17c4131f5f62..c6f9375468eb 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -940,7 +940,8 @@ static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
if (maybe_support_aspm)
chip->aspm_l0s_l1_en = 0x03;
- dev_dbg(rtsx_dev(chip), "aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n",
+ dev_dbg(rtsx_dev(chip),
+ "aspm_level[0] = 0x%02x, aspm_level[1] = 0x%02x\n",
chip->aspm_level[0], chip->aspm_level[1]);
if (chip->aspm_l0s_l1_en) {
diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile
index 1cf3849cef23..b89aa4c12e9d 100644
--- a/drivers/staging/sm750fb/Makefile
+++ b/drivers/staging/sm750fb/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FB_SM750) += sm750fb.o
-sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o
-sm750fb-objs += ddk750_display.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
+sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o \
+ ddk750_chip.o ddk750_power.o ddk750_mode.o \
+ ddk750_display.o ddk750_swi2c.o ddk750_sii164.o \
+ ddk750_dvi.o ddk750_hwi2c.o
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index d920256328c3..d30571663585 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -1,16 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
-#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
+#include <linux/miscdevice.h> /* for misc_register, and MISC_DYNAMIC_MINOR */
#include <linux/types.h>
#include <linux/uaccess.h>
#include "speakup.h"
#include "spk_priv.h"
-#ifndef SYNTH_MINOR
-#define SYNTH_MINOR 25
-#endif
-
static int misc_registered;
static int dev_opened;
@@ -67,7 +63,7 @@ static const struct file_operations synth_fops = {
};
static struct miscdevice synth_device = {
- .minor = SYNTH_MINOR,
+ .minor = MISC_DYNAMIC_MINOR,
.name = "synth",
.fops = &synth_fops,
};
@@ -81,7 +77,7 @@ void speakup_register_devsynth(void)
pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
} else {
pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
- MISC_MAJOR, SYNTH_MINOR);
+ MISC_MAJOR, synth_device.minor);
misc_registered = 1;
}
}
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index 5f1bda37f86d..822ceac83068 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -49,7 +49,7 @@ static int cur_item, nstates;
static void build_key_data(void)
{
u_char *kp, counters[MAXFUNCS], ch, ch1;
- u_short *p_key = key_data, key;
+ u_short *p_key, key;
int i, offset = 1;
nstates = (int)(state_tbl[-1]);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 81ecfd1a200d..02471d932d71 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2117,7 +2117,8 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
spk_keydown = 0;
goto out;
}
- value = spk_lastkey = pad_chars[value];
+ value = pad_chars[value];
+ spk_lastkey = value;
spk_keydown++;
spk_parked &= 0xfe;
goto no_map;
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 9d85a3a1af4c..f591ec095582 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -10,7 +10,7 @@
*/
#include <linux/unistd.h>
-#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
+#include <linux/miscdevice.h> /* for misc_register, and MISC_DYNAMIC_MINOR */
#include <linux/poll.h> /* for poll_wait() */
/* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
@@ -20,8 +20,6 @@
#include "speakup.h"
#define DRV_VERSION "2.6"
-#define SOFTSYNTH_MINOR 26 /* might as well give it one more than /dev/synth */
-#define SOFTSYNTHU_MINOR 27 /* might as well give it one more than /dev/synth */
#define PROCSPEECH 0x0d
#define CLEAR_SYNTH 0x18
@@ -375,7 +373,7 @@ static int softsynth_probe(struct spk_synth *synth)
if (misc_registered != 0)
return 0;
memset(&synth_device, 0, sizeof(synth_device));
- synth_device.minor = SOFTSYNTH_MINOR;
+ synth_device.minor = MISC_DYNAMIC_MINOR;
synth_device.name = "softsynth";
synth_device.fops = &softsynth_fops;
if (misc_register(&synth_device)) {
@@ -384,17 +382,19 @@ static int softsynth_probe(struct spk_synth *synth)
}
memset(&synthu_device, 0, sizeof(synthu_device));
- synthu_device.minor = SOFTSYNTHU_MINOR;
+ synthu_device.minor = MISC_DYNAMIC_MINOR;
synthu_device.name = "softsynthu";
synthu_device.fops = &softsynthu_fops;
if (misc_register(&synthu_device)) {
- pr_warn("Couldn't initialize miscdevice /dev/softsynth.\n");
+ pr_warn("Couldn't initialize miscdevice /dev/softsynthu.\n");
return -ENODEV;
}
misc_registered = 1;
- pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR 26)\n");
- pr_info("initialized device: /dev/softsynthu, node (MAJOR 10, MINOR 27)\n");
+ pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR %d)\n",
+ synth_device.minor);
+ pr_info("initialized device: /dev/softsynthu, node (MAJOR 10, MINOR %d)\n",
+ synthu_device.minor);
return 0;
}
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index ac6a74883af4..c75b40838794 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -11,13 +11,11 @@
#ifndef _SPEAKUP_PRIVATE_H
#define _SPEAKUP_PRIVATE_H
+#include <linux/printk.h>
+
#include "spk_types.h"
#include "spk_priv_keyinfo.h"
-#ifndef pr_warn
-#define pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg)
-#endif
-
#define V_LAST_VAR { MAXVARS }
#define SPACE 0x20
#define SYNTH_CHECK 20030716 /* today's date ought to do for check value */
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index 5a9eff08cb96..9b95f77f9265 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -51,7 +51,7 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
return -EOPNOTSUPP;
speakup_tty = tty;
- ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL);
+ ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
if (!ldisc_data)
return -ENOMEM;
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index a2fc72c29894..fc6a9416829c 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -189,7 +189,7 @@ struct spk_synth {
void (*flush)(struct spk_synth *synth);
int (*is_alive)(struct spk_synth *synth);
int (*synth_adjust)(struct st_var_header *var);
- void (*read_buff_add)(u_char);
+ void (*read_buff_add)(u_char c);
unsigned char (*get_index)(struct spk_synth *synth);
struct synth_indexing indexing;
int alive;
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
index f8a4144b239c..cf29f884cbe0 100644
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -15,12 +15,12 @@ normally be unsharable, specifically:
* visorinput - keyboard and mouse
These drivers conform to the standard Linux bus/device model described
-within Documentation/driver-api/driver-model/, and utilize a driver named visorbus to
-present the virtual busses involved. Drivers in the 'visor*' driver set are
-commonly referred to as "guest drivers" or "client drivers". All drivers
-except visorbus expose a device of a specific usable class to the Linux guest
-environment (e.g., block, network, or input), and are collectively referred
-to as "function drivers".
+within Documentation/driver-api/driver-model/, and utilize a driver named
+visorbus to present the virtual busses involved. Drivers in the 'visor*'
+driver set are commonly referred to as "guest drivers" or "client drivers".
+All drivers except visorbus expose a device of a specific usable class to the
+Linux guest environment (e.g., block, network, or input), and are collectively
+referred to as "function drivers".
The back-end for each device is owned and managed by a small,
single-purpose service partition in the s-Par firmware, which communicates
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 9693fb559052..6d202cba8575 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -111,7 +111,7 @@ struct visorinput_devdata {
/* size of following array */
unsigned int keycode_table_bytes;
/* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */
- unsigned char keycode_table[0];
+ unsigned char keycode_table[];
};
static const guid_t visor_keyboard_channel_guid = VISOR_KEYBOARD_CHANNEL_GUID;
diff --git a/drivers/staging/uwb/Kconfig b/drivers/staging/uwb/Kconfig
deleted file mode 100644
index 259e053e1e09..000000000000
--- a/drivers/staging/uwb/Kconfig
+++ /dev/null
@@ -1,72 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# UWB device configuration
-#
-
-menuconfig UWB
- tristate "Ultra Wideband devices"
- default n
- select GENERIC_NET_UTILS
- help
- UWB is a high-bandwidth, low-power, point-to-point radio
- technology using a wide spectrum (3.1-10.6GHz). It is
- optimized for in-room use (480Mbps at 2 meters, 110Mbps at
- 10m). It serves as the transport layer for other protocols,
- such as Wireless USB (WUSB).
-
- The topology is peer to peer; however, higher level
- protocols (such as WUSB) might impose a master/slave
- relationship.
-
- Say Y here if your computer has UWB radio controllers (USB or PCI)
- based. You will need to enable the radio controllers
- below. It is ok to select all of them, no harm done.
-
- For more help check the UWB and WUSB related files in
- <file:Documentation/usb/>.
-
- To compile the UWB stack as a module, choose M here.
-
-if UWB
-
-config UWB_HWA
- tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)"
- depends on USB
- help
- This driver enables the radio controller for HWA USB
- devices. HWA stands for Host Wire Adapter, and it is a UWB
- Radio Controller connected to your system via USB. Most of
- them come with a Wireless USB host controller also.
-
- To compile this driver select Y (built in) or M (module). It
- is safe to select any even if you do not have the hardware.
-
-config UWB_WHCI
- tristate "UWB Radio Control driver for WHCI-compliant cards"
- depends on PCI
- help
- This driver enables the radio controller for WHCI cards.
-
- WHCI is a specification developed by Intel
- (http://www.intel.com/technology/comms/wusb/whci.htm) much
- in the spirit of USB's EHCI, but for UWB and Wireless USB
- radio/host controllers connected via memory mapping (eg:
- PCI). Most of these cards come also with a Wireless USB host
- controller.
-
- To compile this driver select Y (built in) or M (module). It
- is safe to select any even if you do not have the hardware.
-
-config UWB_I1480U
- tristate "Support for Intel Wireless UWB Link 1480 HWA"
- depends on UWB_HWA
- select FW_LOADER
- help
- This driver enables support for the i1480 when connected via
- USB. It consists of a firmware uploader that will enable it
- to behave as an HWA device.
-
- To compile this driver select Y (built in) or M (module). It
- is safe to select any even if you do not have the hardware.
-
-endif # UWB
diff --git a/drivers/staging/uwb/Makefile b/drivers/staging/uwb/Makefile
deleted file mode 100644
index 32f4de7afbd6..000000000000
--- a/drivers/staging/uwb/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_UWB) += uwb.o
-obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o
-obj-$(CONFIG_UWB_HWA) += hwa-rc.o
-obj-$(CONFIG_UWB_I1480U) += i1480/
-
-uwb-objs := \
- address.o \
- allocator.o \
- beacon.o \
- driver.o \
- drp.o \
- drp-avail.o \
- drp-ie.o \
- est.o \
- ie.o \
- ie-rcv.o \
- lc-dev.o \
- lc-rc.o \
- neh.o \
- pal.o \
- radio.o \
- reset.o \
- rsv.o \
- scan.o \
- uwb-debug.o \
- uwbd.o
-
-umc-objs := \
- umc-bus.o \
- umc-dev.o \
- umc-drv.o
diff --git a/drivers/staging/uwb/TODO b/drivers/staging/uwb/TODO
deleted file mode 100644
index abae57000534..000000000000
--- a/drivers/staging/uwb/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-TODO: Remove in late 2019 unless there are users
-
-There seems to not be any real wireless USB devices anywhere in the wild
-anymore. It turned out to be a failed technology :(
-
-This will be removed from the tree if no one objects.
-
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/uwb/address.c b/drivers/staging/uwb/address.c
deleted file mode 100644
index 857d5cd56a95..000000000000
--- a/drivers/staging/uwb/address.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Address management
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- */
-
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/random.h>
-#include <linux/etherdevice.h>
-
-#include "uwb-internal.h"
-
-
-/** Device Address Management command */
-struct uwb_rc_cmd_dev_addr_mgmt {
- struct uwb_rccb rccb;
- u8 bmOperationType;
- u8 baAddr[6];
-} __attribute__((packed));
-
-
-/**
- * Low level command for setting/getting UWB radio's addresses
- *
- * @hwarc: HWA Radio Control interface instance
- * @bmOperationType:
- * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2])
- * @baAddr: address buffer--assumed to have enough data to hold
- * the address type requested.
- * @reply: Pointer to reply buffer (can be stack allocated)
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * @cmd has to be allocated because USB cannot grok USB or vmalloc
- * buffers depending on your combination of host architecture.
- */
-static
-int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc,
- u8 bmOperationType, const u8 *baAddr,
- struct uwb_rc_evt_dev_addr_mgmt *reply)
-{
- int result;
- struct uwb_rc_cmd_dev_addr_mgmt *cmd;
-
- result = -ENOMEM;
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- goto error_kzalloc;
- cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
- cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT);
- cmd->bmOperationType = bmOperationType;
- if (baAddr) {
- size_t size = 0;
- switch (bmOperationType >> 1) {
- case 0: size = 2; break;
- case 1: size = 6; break;
- default: BUG();
- }
- memcpy(cmd->baAddr, baAddr, size);
- }
- reply->rceb.bEventType = UWB_RC_CET_GENERAL;
- reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT;
- result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT",
- &cmd->rccb, sizeof(*cmd),
- &reply->rceb, sizeof(*reply));
- if (result < 0)
- goto error_cmd;
- if (result < sizeof(*reply)) {
- dev_err(&rc->uwb_dev.dev,
- "DEV-ADDR-MGMT: not enough data replied: "
- "%d vs %zu bytes needed\n", result, sizeof(*reply));
- result = -ENOMSG;
- } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(&rc->uwb_dev.dev,
- "DEV-ADDR-MGMT: command execution failed: %s (%d)\n",
- uwb_rc_strerror(reply->bResultCode),
- reply->bResultCode);
- result = -EIO;
- } else
- result = 0;
-error_cmd:
- kfree(cmd);
-error_kzalloc:
- return result;
-}
-
-
-/**
- * Set the UWB RC MAC or device address.
- *
- * @rc: UWB Radio Controller
- * @_addr: Pointer to address to write [assumed to be either a
- * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
- * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC).
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * Some anal retentivity here: even if both 'struct
- * uwb_{dev,mac}_addr' have the actual byte array in the same offset
- * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer
- * to use some syntatic sugar in case someday we decide to change the
- * format of the structs. The compiler will optimize it out anyway.
- */
-static int uwb_rc_addr_set(struct uwb_rc *rc,
- const void *_addr, enum uwb_addr_type type)
-{
- int result;
- u8 bmOperationType = 0x1; /* Set address */
- const struct uwb_dev_addr *dev_addr = _addr;
- const struct uwb_mac_addr *mac_addr = _addr;
- struct uwb_rc_evt_dev_addr_mgmt reply;
- const u8 *baAddr;
-
- result = -EINVAL;
- switch (type) {
- case UWB_ADDR_DEV:
- baAddr = dev_addr->data;
- break;
- case UWB_ADDR_MAC:
- baAddr = mac_addr->data;
- bmOperationType |= 0x2;
- break;
- default:
- return result;
- }
- return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply);
-}
-
-
-/**
- * Get the UWB radio's MAC or device address.
- *
- * @rc: UWB Radio Controller
- * @_addr: Where to write the address data [assumed to be either a
- * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
- * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC).
- * @returns: 0 if ok (and *_addr set), < 0 errno code on error.
- *
- * See comment in uwb_rc_addr_set() about anal retentivity in the
- * type handling of the address variables.
- */
-static int uwb_rc_addr_get(struct uwb_rc *rc,
- void *_addr, enum uwb_addr_type type)
-{
- int result;
- u8 bmOperationType = 0x0; /* Get address */
- struct uwb_rc_evt_dev_addr_mgmt evt;
- struct uwb_dev_addr *dev_addr = _addr;
- struct uwb_mac_addr *mac_addr = _addr;
- u8 *baAddr;
-
- result = -EINVAL;
- switch (type) {
- case UWB_ADDR_DEV:
- baAddr = dev_addr->data;
- break;
- case UWB_ADDR_MAC:
- bmOperationType |= 0x2;
- baAddr = mac_addr->data;
- break;
- default:
- return result;
- }
- result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt);
- if (result == 0)
- switch (type) {
- case UWB_ADDR_DEV:
- memcpy(&dev_addr->data, evt.baAddr,
- sizeof(dev_addr->data));
- break;
- case UWB_ADDR_MAC:
- memcpy(&mac_addr->data, evt.baAddr,
- sizeof(mac_addr->data));
- break;
- default: /* shut gcc up */
- BUG();
- }
- return result;
-}
-
-
-/** Get @rc's MAC address to @addr */
-int uwb_rc_mac_addr_get(struct uwb_rc *rc,
- struct uwb_mac_addr *addr) {
- return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get);
-
-
-/** Get @rc's device address to @addr */
-int uwb_rc_dev_addr_get(struct uwb_rc *rc,
- struct uwb_dev_addr *addr) {
- return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get);
-
-
-/** Set @rc's address to @addr */
-int uwb_rc_mac_addr_set(struct uwb_rc *rc,
- const struct uwb_mac_addr *addr)
-{
- int result = -EINVAL;
- mutex_lock(&rc->uwb_dev.mutex);
- result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC);
- mutex_unlock(&rc->uwb_dev.mutex);
- return result;
-}
-
-
-/** Set @rc's address to @addr */
-int uwb_rc_dev_addr_set(struct uwb_rc *rc,
- const struct uwb_dev_addr *addr)
-{
- int result = -EINVAL;
- mutex_lock(&rc->uwb_dev.mutex);
- result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV);
- rc->uwb_dev.dev_addr = *addr;
- mutex_unlock(&rc->uwb_dev.mutex);
- return result;
-}
-
-/* Returns !0 if given address is already assigned to device. */
-int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_mac_addr *addr = _addr;
-
- if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr))
- return !0;
- return 0;
-}
-
-/* Returns !0 if given address is already assigned to device. */
-int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_dev_addr *addr = _addr;
- if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr))
- return !0;
- return 0;
-}
-
-/**
- * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller
- * @rc: the (local) radio controller device requiring a new DevAddr
- *
- * A new DevAddr is required when:
- * - first setting up a radio controller
- * - if the hardware reports a DevAddr conflict
- *
- * The DevAddr is randomly generated in the generated DevAddr range
- * [0x100, 0xfeff]. The number of devices in a beacon group is limited
- * by mMaxBPLength (96) so this address space will never be exhausted.
- *
- * [ECMA-368] 17.1.1, 17.16.
- */
-int uwb_rc_dev_addr_assign(struct uwb_rc *rc)
-{
- struct uwb_dev_addr new_addr;
-
- do {
- get_random_bytes(new_addr.data, sizeof(new_addr.data));
- } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff
- || __uwb_dev_addr_assigned(rc, &new_addr));
-
- return uwb_rc_dev_addr_set(rc, &new_addr);
-}
-
-/**
- * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event
- * @evt: the DEV_ADDR_CONFLICT notification from the radio controller
- *
- * A new (non-conflicting) DevAddr is assigned to the radio controller.
- *
- * [ECMA-368] 17.1.1.1.
- */
-int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt)
-{
- struct uwb_rc *rc = evt->rc;
-
- return uwb_rc_dev_addr_assign(rc);
-}
-
-/*
- * Print the 48-bit EUI MAC address of the radio controller when
- * reading /sys/class/uwb_rc/XX/mac_address
- */
-static ssize_t uwb_rc_mac_addr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- struct uwb_mac_addr addr;
- ssize_t result;
-
- mutex_lock(&rc->uwb_dev.mutex);
- result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC);
- mutex_unlock(&rc->uwb_dev.mutex);
- if (result >= 0) {
- result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr);
- buf[result++] = '\n';
- }
- return result;
-}
-
-/*
- * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address
- * and if correct, set it.
- */
-static ssize_t uwb_rc_mac_addr_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- struct uwb_mac_addr addr;
- ssize_t result;
-
- if (!mac_pton(buf, addr.data))
- return -EINVAL;
- if (is_multicast_ether_addr(addr.data)) {
- dev_err(&rc->uwb_dev.dev, "refusing to set multicast "
- "MAC address %s\n", buf);
- return -EINVAL;
- }
- result = uwb_rc_mac_addr_set(rc, &addr);
- if (result == 0)
- rc->uwb_dev.mac_addr = addr;
-
- return result < 0 ? result : size;
-}
-DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store);
-
-/** Print @addr to @buf, @return bytes written */
-size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
- int type)
-{
- size_t result;
- if (type)
- result = scnprintf(buf, buf_size, "%pM", addr);
- else
- result = scnprintf(buf, buf_size, "%02x:%02x",
- addr[1], addr[0]);
- return result;
-}
-EXPORT_SYMBOL_GPL(__uwb_addr_print);
diff --git a/drivers/staging/uwb/allocator.c b/drivers/staging/uwb/allocator.c
deleted file mode 100644
index 1f429fba20b7..000000000000
--- a/drivers/staging/uwb/allocator.c
+++ /dev/null
@@ -1,374 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UWB reservation management.
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "uwb.h"
-
-#include "uwb-internal.h"
-
-static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai)
-{
- int col, mas, safe_mas, unsafe_mas;
- unsigned char *bm = ai->bm;
- struct uwb_rsv_col_info *ci = ai->ci;
- unsigned char c;
-
- for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) {
-
- safe_mas = ci->csi.safe_mas_per_col;
- unsafe_mas = ci->csi.unsafe_mas_per_col;
-
- for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) {
- if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) {
-
- if (safe_mas > 0) {
- safe_mas--;
- c = UWB_RSV_MAS_SAFE;
- } else if (unsafe_mas > 0) {
- unsafe_mas--;
- c = UWB_RSV_MAS_UNSAFE;
- } else {
- break;
- }
- bm[col * UWB_MAS_PER_ZONE + mas] = c;
- }
- }
- }
-}
-
-static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai)
-{
- int mas, col, rows;
- unsigned char *bm = ai->bm;
- struct uwb_rsv_row_info *ri = &ai->ri;
- unsigned char c;
-
- rows = 1;
- c = UWB_RSV_MAS_SAFE;
- for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) {
- if (ri->avail[mas] == 1) {
-
- if (rows > ri->used_rows) {
- break;
- } else if (rows > 7) {
- c = UWB_RSV_MAS_UNSAFE;
- }
-
- for (col = 0; col < UWB_NUM_ZONES; col++) {
- if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) {
- bm[col * UWB_NUM_ZONES + mas] = c;
- if(c == UWB_RSV_MAS_SAFE)
- ai->safe_allocated_mases++;
- else
- ai->unsafe_allocated_mases++;
- }
- }
- rows++;
- }
- }
- ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
-}
-
-/*
- * Find the best column set for a given availability, interval, num safe mas and
- * num unsafe mas.
- *
- * The different sets are tried in order as shown below, depending on the interval.
- *
- * interval = 16
- * deep = 0
- * set 1 -> { 8 }
- * deep = 1
- * set 1 -> { 4 }
- * set 2 -> { 12 }
- * deep = 2
- * set 1 -> { 2 }
- * set 2 -> { 6 }
- * set 3 -> { 10 }
- * set 4 -> { 14 }
- * deep = 3
- * set 1 -> { 1 }
- * set 2 -> { 3 }
- * set 3 -> { 5 }
- * set 4 -> { 7 }
- * set 5 -> { 9 }
- * set 6 -> { 11 }
- * set 7 -> { 13 }
- * set 8 -> { 15 }
- *
- * interval = 8
- * deep = 0
- * set 1 -> { 4 12 }
- * deep = 1
- * set 1 -> { 2 10 }
- * set 2 -> { 6 14 }
- * deep = 2
- * set 1 -> { 1 9 }
- * set 2 -> { 3 11 }
- * set 3 -> { 5 13 }
- * set 4 -> { 7 15 }
- *
- * interval = 4
- * deep = 0
- * set 1 -> { 2 6 10 14 }
- * deep = 1
- * set 1 -> { 1 5 9 13 }
- * set 2 -> { 3 7 11 15 }
- *
- * interval = 2
- * deep = 0
- * set 1 -> { 1 3 5 7 9 11 13 15 }
- */
-static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval,
- int num_safe_mas, int num_unsafe_mas)
-{
- struct uwb_rsv_col_info *ci = ai->ci;
- struct uwb_rsv_col_set_info *csi = &ci->csi;
- struct uwb_rsv_col_set_info tmp_csi;
- int deep, set, col, start_col_deep, col_start_set;
- int start_col, max_mas_in_set, lowest_max_mas_in_deep;
- int n_mas;
- int found = UWB_RSV_ALLOC_NOT_FOUND;
-
- tmp_csi.start_col = 0;
- start_col_deep = interval;
- n_mas = num_unsafe_mas + num_safe_mas;
-
- for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) {
- start_col_deep /= 2;
- col_start_set = 0;
- lowest_max_mas_in_deep = UWB_MAS_PER_ZONE;
-
- for (set = 1; set <= (1 << deep); set++) {
- max_mas_in_set = 0;
- start_col = start_col_deep + col_start_set;
- for (col = start_col; col < UWB_NUM_ZONES; col += interval) {
-
- if (ci[col].max_avail_safe >= num_safe_mas &&
- ci[col].max_avail_unsafe >= n_mas) {
- if (ci[col].highest_mas[n_mas] > max_mas_in_set)
- max_mas_in_set = ci[col].highest_mas[n_mas];
- } else {
- max_mas_in_set = 0;
- break;
- }
- }
- if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) {
- lowest_max_mas_in_deep = max_mas_in_set;
-
- tmp_csi.start_col = start_col;
- }
- col_start_set += (interval >> deep);
- }
-
- if (lowest_max_mas_in_deep < 8) {
- csi->start_col = tmp_csi.start_col;
- found = UWB_RSV_ALLOC_FOUND;
- break;
- } else if ((lowest_max_mas_in_deep > 8) &&
- (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) &&
- (found == UWB_RSV_ALLOC_NOT_FOUND)) {
- csi->start_col = tmp_csi.start_col;
- found = UWB_RSV_ALLOC_FOUND;
- }
- }
-
- if (found == UWB_RSV_ALLOC_FOUND) {
- csi->interval = interval;
- csi->safe_mas_per_col = num_safe_mas;
- csi->unsafe_mas_per_col = num_unsafe_mas;
-
- ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas;
- ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas;
- ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
- ai->interval = interval;
- }
- return found;
-}
-
-static void get_row_descriptors(struct uwb_rsv_alloc_info *ai)
-{
- unsigned char *bm = ai->bm;
- struct uwb_rsv_row_info *ri = &ai->ri;
- int col, mas;
-
- ri->free_rows = 16;
- for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
- ri->avail[mas] = 1;
- for (col = 1; col < UWB_NUM_ZONES; col++) {
- if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) {
- ri->free_rows--;
- ri->avail[mas]=0;
- break;
- }
- }
- }
-}
-
-static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci)
-{
- int mas;
- int block_count = 0, start_block = 0;
- int previous_avail = 0;
- int available = 0;
- int safe_mas_in_row[UWB_MAS_PER_ZONE] = {
- 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
- };
-
- rci->max_avail_safe = 0;
-
- for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
- if (!bm[column * UWB_NUM_ZONES + mas]) {
- available++;
- rci->max_avail_unsafe = available;
-
- rci->highest_mas[available] = mas;
-
- if (previous_avail) {
- block_count++;
- if ((block_count > safe_mas_in_row[start_block]) &&
- (!rci->max_avail_safe))
- rci->max_avail_safe = available - 1;
- } else {
- previous_avail = 1;
- start_block = mas;
- block_count = 1;
- }
- } else {
- previous_avail = 0;
- }
- }
- if (!rci->max_avail_safe)
- rci->max_avail_safe = rci->max_avail_unsafe;
-}
-
-static void get_column_descriptors(struct uwb_rsv_alloc_info *ai)
-{
- unsigned char *bm = ai->bm;
- struct uwb_rsv_col_info *ci = ai->ci;
- int col;
-
- for (col = 1; col < UWB_NUM_ZONES; col++) {
- uwb_rsv_fill_column_info(bm, col, &ci[col]);
- }
-}
-
-static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai)
-{
- int n_rows;
- int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW;
- int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW;
- if (ai->min_mas % UWB_USABLE_MAS_PER_ROW)
- min_rows++;
- for (n_rows = max_rows; n_rows >= min_rows; n_rows--) {
- if (n_rows <= ai->ri.free_rows) {
- ai->ri.used_rows = n_rows;
- ai->interval = 1; /* row reservation */
- uwb_rsv_fill_row_alloc(ai);
- return UWB_RSV_ALLOC_FOUND;
- }
- }
- return UWB_RSV_ALLOC_NOT_FOUND;
-}
-
-static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval)
-{
- int n_safe, n_unsafe, n_mas;
- int n_column = UWB_NUM_ZONES / interval;
- int max_per_zone = ai->max_mas / n_column;
- int min_per_zone = ai->min_mas / n_column;
-
- if (ai->min_mas % n_column)
- min_per_zone++;
-
- if (min_per_zone > UWB_MAS_PER_ZONE) {
- return UWB_RSV_ALLOC_NOT_FOUND;
- }
-
- if (max_per_zone > UWB_MAS_PER_ZONE) {
- max_per_zone = UWB_MAS_PER_ZONE;
- }
-
- for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) {
- if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND)
- continue;
- for (n_safe = n_mas; n_safe >= 0; n_safe--) {
- n_unsafe = n_mas - n_safe;
- if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) {
- uwb_rsv_fill_column_alloc(ai);
- return UWB_RSV_ALLOC_FOUND;
- }
- }
- }
- return UWB_RSV_ALLOC_NOT_FOUND;
-}
-
-int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available,
- struct uwb_mas_bm *result)
-{
- struct uwb_rsv_alloc_info *ai;
- int interval;
- int bit_index;
-
- ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
- if (!ai)
- return UWB_RSV_ALLOC_NOT_FOUND;
- ai->min_mas = rsv->min_mas;
- ai->max_mas = rsv->max_mas;
- ai->max_interval = rsv->max_interval;
-
-
- /* fill the not available vector from the available bm */
- for_each_clear_bit(bit_index, available->bm, UWB_NUM_MAS)
- ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL;
-
- if (ai->max_interval == 1) {
- get_row_descriptors(ai);
- if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
- goto alloc_found;
- else
- goto alloc_not_found;
- }
-
- get_column_descriptors(ai);
-
- for (interval = 16; interval >= 2; interval>>=1) {
- if (interval > ai->max_interval)
- continue;
- if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND)
- goto alloc_found;
- }
-
- /* try row reservation if no column is found */
- get_row_descriptors(ai);
- if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
- goto alloc_found;
- else
- goto alloc_not_found;
-
- alloc_found:
- bitmap_zero(result->bm, UWB_NUM_MAS);
- bitmap_zero(result->unsafe_bm, UWB_NUM_MAS);
- /* fill the safe and unsafe bitmaps */
- for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) {
- if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE)
- set_bit(bit_index, result->bm);
- else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE)
- set_bit(bit_index, result->unsafe_bm);
- }
- bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS);
-
- result->safe = ai->safe_allocated_mases;
- result->unsafe = ai->unsafe_allocated_mases;
-
- kfree(ai);
- return UWB_RSV_ALLOC_FOUND;
-
- alloc_not_found:
- kfree(ai);
- return UWB_RSV_ALLOC_NOT_FOUND;
-}
diff --git a/drivers/staging/uwb/beacon.c b/drivers/staging/uwb/beacon.c
deleted file mode 100644
index c483c19c5ef8..000000000000
--- a/drivers/staging/uwb/beacon.c
+++ /dev/null
@@ -1,595 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Beacon management
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/kdev_t.h>
-#include <linux/slab.h>
-
-#include "uwb-internal.h"
-
-/* Start Beaconing command structure */
-struct uwb_rc_cmd_start_beacon {
- struct uwb_rccb rccb;
- __le16 wBPSTOffset;
- u8 bChannelNumber;
-} __attribute__((packed));
-
-
-static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel)
-{
- int result;
- struct uwb_rc_cmd_start_beacon *cmd;
- struct uwb_rc_evt_confirm reply;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
- cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
- cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON);
- cmd->wBPSTOffset = cpu_to_le16(bpst_offset);
- cmd->bChannelNumber = channel;
- reply.rceb.bEventType = UWB_RC_CET_GENERAL;
- reply.rceb.wEvent = UWB_RC_CMD_START_BEACON;
- result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd),
- &reply.rceb, sizeof(reply));
- if (result < 0)
- goto error_cmd;
- if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(&rc->uwb_dev.dev,
- "START-BEACON: command execution failed: %s (%d)\n",
- uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
- result = -EIO;
- }
-error_cmd:
- kfree(cmd);
- return result;
-}
-
-static int uwb_rc_stop_beacon(struct uwb_rc *rc)
-{
- int result;
- struct uwb_rccb *cmd;
- struct uwb_rc_evt_confirm reply;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
- cmd->bCommandType = UWB_RC_CET_GENERAL;
- cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON);
- reply.rceb.bEventType = UWB_RC_CET_GENERAL;
- reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON;
- result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd),
- &reply.rceb, sizeof(reply));
- if (result < 0)
- goto error_cmd;
- if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(&rc->uwb_dev.dev,
- "STOP-BEACON: command execution failed: %s (%d)\n",
- uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
- result = -EIO;
- }
-error_cmd:
- kfree(cmd);
- return result;
-}
-
-/*
- * Start/stop beacons
- *
- * @rc: UWB Radio Controller to operate on
- * @channel: UWB channel on which to beacon (WUSB[table
- * 5-12]). If -1, stop beaconing.
- * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero
- *
- * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
- * of a SET IE command after the device sent the first beacon that includes
- * the IEs specified in the SET IE command. So, after we start beaconing we
- * check if there is anything in the IE cache and call the SET IE command
- * if needed.
- */
-int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
-{
- int result;
- struct device *dev = &rc->uwb_dev.dev;
-
- dev_dbg(dev, "%s: channel = %d\n", __func__, channel);
- if (channel < 0)
- channel = -1;
- if (channel == -1)
- result = uwb_rc_stop_beacon(rc);
- else {
- /* channel >= 0...dah */
- result = uwb_rc_start_beacon(rc, bpst_offset, channel);
- if (result < 0) {
- dev_err(dev, "Cannot start beaconing: %d\n", result);
- return result;
- }
- if (le16_to_cpu(rc->ies->wIELength) > 0) {
- result = uwb_rc_set_ie(rc, rc->ies);
- if (result < 0) {
- dev_err(dev, "Cannot set new IE on device: "
- "%d\n", result);
- result = uwb_rc_stop_beacon(rc);
- channel = -1;
- bpst_offset = 0;
- }
- }
- }
-
- if (result >= 0)
- rc->beaconing = channel;
- return result;
-}
-
-/*
- * Beacon cache
- *
- * The purpose of this is to speed up the lookup of becon information
- * when a new beacon arrives. The UWB Daemon uses it also to keep a
- * tab of which devices are in radio distance and which not. When a
- * device's beacon stays present for more than a certain amount of
- * time, it is considered a new, usable device. When a beacon ceases
- * to be received for a certain amount of time, it is considered that
- * the device is gone.
- *
- * FIXME: use an allocator for the entries
- * FIXME: use something faster for search than a list
- */
-
-void uwb_bce_kfree(struct kref *_bce)
-{
- struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt);
-
- kfree(bce->be);
- kfree(bce);
-}
-
-
-/* Find a beacon by dev addr in the cache */
-static
-struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc,
- const struct uwb_dev_addr *dev_addr)
-{
- struct uwb_beca_e *bce, *next;
- list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
- if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr)))
- goto out;
- }
- bce = NULL;
-out:
- return bce;
-}
-
-/* Find a beacon by dev addr in the cache */
-static
-struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc,
- const struct uwb_mac_addr *mac_addr)
-{
- struct uwb_beca_e *bce, *next;
- list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
- if (!memcmp(bce->mac_addr, mac_addr->data,
- sizeof(struct uwb_mac_addr)))
- goto out;
- }
- bce = NULL;
-out:
- return bce;
-}
-
-/**
- * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr
- * @rc: the radio controller that saw the device
- * @devaddr: DevAddr of the UWB device to find
- *
- * There may be more than one matching device (in the case of a
- * DevAddr conflict), but only the first one is returned.
- */
-struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
- const struct uwb_dev_addr *devaddr)
-{
- struct uwb_dev *found = NULL;
- struct uwb_beca_e *bce;
-
- mutex_lock(&rc->uwb_beca.mutex);
- bce = __uwb_beca_find_bydev(rc, devaddr);
- if (bce)
- found = uwb_dev_try_get(rc, bce->uwb_dev);
- mutex_unlock(&rc->uwb_beca.mutex);
-
- return found;
-}
-
-/**
- * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48
- * @rc: the radio controller that saw the device
- * @devaddr: EUI-48 of the UWB device to find
- */
-struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
- const struct uwb_mac_addr *macaddr)
-{
- struct uwb_dev *found = NULL;
- struct uwb_beca_e *bce;
-
- mutex_lock(&rc->uwb_beca.mutex);
- bce = __uwb_beca_find_bymac(rc, macaddr);
- if (bce)
- found = uwb_dev_try_get(rc, bce->uwb_dev);
- mutex_unlock(&rc->uwb_beca.mutex);
-
- return found;
-}
-
-/* Initialize a beacon cache entry */
-static void uwb_beca_e_init(struct uwb_beca_e *bce)
-{
- mutex_init(&bce->mutex);
- kref_init(&bce->refcnt);
- stats_init(&bce->lqe_stats);
- stats_init(&bce->rssi_stats);
-}
-
-/*
- * Add a beacon to the cache
- *
- * @be: Beacon event information
- * @bf: Beacon frame (part of b, really)
- * @ts_jiffies: Timestamp (in jiffies) when the beacon was received
- */
-static
-struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc,
- struct uwb_rc_evt_beacon *be,
- struct uwb_beacon_frame *bf,
- unsigned long ts_jiffies)
-{
- struct uwb_beca_e *bce;
-
- bce = kzalloc(sizeof(*bce), GFP_KERNEL);
- if (bce == NULL)
- return NULL;
- uwb_beca_e_init(bce);
- bce->ts_jiffies = ts_jiffies;
- bce->uwb_dev = NULL;
- list_add(&bce->node, &rc->uwb_beca.list);
- return bce;
-}
-
-/*
- * Wipe out beacon entries that became stale
- *
- * Remove associated devicest too.
- */
-void uwb_beca_purge(struct uwb_rc *rc)
-{
- struct uwb_beca_e *bce, *next;
- unsigned long expires;
-
- mutex_lock(&rc->uwb_beca.mutex);
- list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
- expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms);
- if (time_after(jiffies, expires)) {
- uwbd_dev_offair(bce);
- }
- }
- mutex_unlock(&rc->uwb_beca.mutex);
-}
-
-/* Clean up the whole beacon cache. Called on shutdown */
-void uwb_beca_release(struct uwb_rc *rc)
-{
- struct uwb_beca_e *bce, *next;
-
- mutex_lock(&rc->uwb_beca.mutex);
- list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
- list_del(&bce->node);
- uwb_bce_put(bce);
- }
- mutex_unlock(&rc->uwb_beca.mutex);
-}
-
-static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be,
- struct uwb_beacon_frame *bf)
-{
- char macbuf[UWB_ADDR_STRSIZE];
- char devbuf[UWB_ADDR_STRSIZE];
- char dstbuf[UWB_ADDR_STRSIZE];
-
- uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier);
- uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr);
- uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr);
- dev_info(&rc->uwb_dev.dev,
- "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n",
- devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset,
- bf->Beacon_Slot_Number, macbuf);
-}
-
-/*
- * @bce: beacon cache entry, referenced
- */
-ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce,
- char *buf, size_t size)
-{
- ssize_t result = 0;
- struct uwb_rc_evt_beacon *be;
- struct uwb_beacon_frame *bf;
- int ies_len;
- struct uwb_ie_hdr *ies;
-
- mutex_lock(&bce->mutex);
-
- be = bce->be;
- if (be) {
- bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
- ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame);
- ies = (struct uwb_ie_hdr *)bf->IEData;
-
- result = uwb_ie_dump_hex(ies, ies_len, buf, size);
- }
-
- mutex_unlock(&bce->mutex);
-
- return result;
-}
-
-/*
- * Verify that the beacon event, frame and IEs are ok
- */
-static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt,
- struct uwb_rc_evt_beacon *be)
-{
- int result = -EINVAL;
- struct uwb_beacon_frame *bf;
- struct device *dev = &rc->uwb_dev.dev;
-
- /* Is there enough data to decode a beacon frame? */
- if (evt->notif.size < sizeof(*be) + sizeof(*bf)) {
- dev_err(dev, "BEACON event: Not enough data to decode "
- "(%zu vs %zu bytes needed)\n", evt->notif.size,
- sizeof(*be) + sizeof(*bf));
- goto error;
- }
- /* FIXME: make sure beacon frame IEs are fine and that the whole thing
- * is consistent */
- result = 0;
-error:
- return result;
-}
-
-/*
- * Handle UWB_RC_EVT_BEACON events
- *
- * We check the beacon cache to see how the received beacon fares. If
- * is there already we refresh the timestamp. If not we create a new
- * entry.
- *
- * According to the WHCI and WUSB specs, only one beacon frame is
- * allowed per notification block, so we don't bother about scanning
- * for more.
- */
-int uwbd_evt_handle_rc_beacon(struct uwb_event *evt)
-{
- int result = -EINVAL;
- struct uwb_rc *rc;
- struct uwb_rc_evt_beacon *be;
- struct uwb_beacon_frame *bf;
- struct uwb_beca_e *bce;
-
- rc = evt->rc;
- be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb);
- result = uwb_verify_beacon(rc, evt, be);
- if (result < 0)
- return result;
-
- /* FIXME: handle alien beacons. */
- if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN ||
- be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) {
- return -ENOSYS;
- }
-
- bf = (struct uwb_beacon_frame *) be->BeaconInfo;
-
- /*
- * Drop beacons from devices with a NULL EUI-48 -- they cannot
- * be uniquely identified.
- *
- * It's expected that these will all be WUSB devices and they
- * have a WUSB specific connection method so ignoring them
- * here shouldn't be a problem.
- */
- if (uwb_mac_addr_bcast(&bf->Device_Identifier))
- return 0;
-
- mutex_lock(&rc->uwb_beca.mutex);
- bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier);
- if (bce == NULL) {
- /* Not in there, a new device is pinging */
- uwb_beacon_print(evt->rc, be, bf);
- bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies);
- if (bce == NULL) {
- mutex_unlock(&rc->uwb_beca.mutex);
- return -ENOMEM;
- }
- }
- mutex_unlock(&rc->uwb_beca.mutex);
-
- mutex_lock(&bce->mutex);
- /* purge old beacon data */
- kfree(bce->be);
-
- /* Update commonly used fields */
- bce->ts_jiffies = evt->ts_jiffies;
- bce->be = be;
- bce->dev_addr = bf->hdr.SrcAddr;
- bce->mac_addr = &bf->Device_Identifier;
- be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset);
- be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength);
- stats_add_sample(&bce->lqe_stats, be->bLQI - 7);
- stats_add_sample(&bce->rssi_stats, be->bRSSI + 18);
-
- /*
- * This might be a beacon from a new device.
- */
- if (bce->uwb_dev == NULL)
- uwbd_dev_onair(evt->rc, bce);
-
- mutex_unlock(&bce->mutex);
-
- return 1; /* we keep the event data */
-}
-
-/*
- * Handle UWB_RC_EVT_BEACON_SIZE events
- *
- * XXXXX
- */
-int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt)
-{
- int result = -EINVAL;
- struct device *dev = &evt->rc->uwb_dev.dev;
- struct uwb_rc_evt_beacon_size *bs;
-
- /* Is there enough data to decode the event? */
- if (evt->notif.size < sizeof(*bs)) {
- dev_err(dev, "BEACON SIZE notification: Not enough data to "
- "decode (%zu vs %zu bytes needed)\n",
- evt->notif.size, sizeof(*bs));
- goto error;
- }
- bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb);
- if (0)
- dev_info(dev, "Beacon size changed to %u bytes "
- "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize));
- else {
- /* temporary hack until we do something with this message... */
- static unsigned count;
- if (++count % 1000 == 0)
- dev_info(dev, "Beacon size changed %u times "
- "(FIXME: action?)\n", count);
- }
- result = 0;
-error:
- return result;
-}
-
-/**
- * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event
- * @evt: the BP_SLOT_CHANGE notification from the radio controller
- *
- * If the event indicates that no beacon period slots were available
- * then radio controller has transitioned to a non-beaconing state.
- * Otherwise, simply save the current beacon slot.
- */
-int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
-{
- struct uwb_rc *rc = evt->rc;
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rc_evt_bp_slot_change *bpsc;
-
- if (evt->notif.size < sizeof(*bpsc)) {
- dev_err(dev, "BP SLOT CHANGE event: Not enough data\n");
- return -EINVAL;
- }
- bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
-
- if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
- dev_err(dev, "stopped beaconing: No free slots in BP\n");
- mutex_lock(&rc->uwb_dev.mutex);
- rc->beaconing = -1;
- mutex_unlock(&rc->uwb_dev.mutex);
- } else
- rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
-
- return 0;
-}
-
-/**
- * Handle UWB_RC_EVT_BPOIE_CHANGE events
- *
- * XXXXX
- */
-struct uwb_ie_bpo {
- struct uwb_ie_hdr hdr;
- u8 bp_length;
- u8 data[];
-} __attribute__((packed));
-
-int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt)
-{
- int result = -EINVAL;
- struct device *dev = &evt->rc->uwb_dev.dev;
- struct uwb_rc_evt_bpoie_change *bpoiec;
- struct uwb_ie_bpo *bpoie;
- static unsigned count; /* FIXME: this is a temp hack */
- size_t iesize;
-
- /* Is there enough data to decode it? */
- if (evt->notif.size < sizeof(*bpoiec)) {
- dev_err(dev, "BPOIEC notification: Not enough data to "
- "decode (%zu vs %zu bytes needed)\n",
- evt->notif.size, sizeof(*bpoiec));
- goto error;
- }
- bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb);
- iesize = le16_to_cpu(bpoiec->wBPOIELength);
- if (iesize < sizeof(*bpoie)) {
- dev_err(dev, "BPOIEC notification: Not enough IE data to "
- "decode (%zu vs %zu bytes needed)\n",
- iesize, sizeof(*bpoie));
- goto error;
- }
- if (++count % 1000 == 0) /* Lame placeholder */
- dev_info(dev, "BPOIE: %u changes received\n", count);
- /*
- * FIXME: At this point we should go over all the IEs in the
- * bpoiec->BPOIE array and act on each.
- */
- result = 0;
-error:
- return result;
-}
-
-/*
- * Print beaconing state.
- */
-static ssize_t uwb_rc_beacon_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- ssize_t result;
-
- mutex_lock(&rc->uwb_dev.mutex);
- result = sprintf(buf, "%d\n", rc->beaconing);
- mutex_unlock(&rc->uwb_dev.mutex);
- return result;
-}
-
-/*
- * Start beaconing on the specified channel, or stop beaconing.
- */
-static ssize_t uwb_rc_beacon_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- int channel;
- ssize_t result = -EINVAL;
-
- result = sscanf(buf, "%d", &channel);
- if (result >= 1)
- result = uwb_radio_force_channel(rc, channel);
-
- return result < 0 ? result : size;
-}
-DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);
diff --git a/drivers/staging/uwb/driver.c b/drivers/staging/uwb/driver.c
deleted file mode 100644
index 5755c2e49ffc..000000000000
--- a/drivers/staging/uwb/driver.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Driver initialization, etc
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- *
- * Life cycle: FIXME: explain
- *
- * UWB radio controller:
- *
- * 1. alloc a uwb_rc, zero it
- * 2. call uwb_rc_init() on it to set it up + ops (won't do any
- * kind of allocation)
- * 3. register (now it is owned by the UWB stack--deregister before
- * freeing/destroying).
- * 4. It lives on it's own now (UWB stack handles)--when it
- * disconnects, call unregister()
- * 5. free it.
- *
- * Make sure you have a reference to the uwb_rc before calling
- * any of the UWB API functions.
- *
- * TODO:
- *
- * 1. Locking and life cycle management is crappy still. All entry
- * points to the UWB HCD API assume you have a reference on the
- * uwb_rc structure and that it won't go away. They mutex lock it
- * before doing anything.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/kdev_t.h>
-#include <linux/random.h>
-
-#include "uwb-internal.h"
-
-
-/* UWB stack attributes (or 'global' constants) */
-
-
-/**
- * If a beacon disappears for longer than this, then we consider the
- * device who was represented by that beacon to be gone.
- *
- * ECMA-368[17.2.3, last para] establishes that a device must not
- * consider a device to be its neighbour if he doesn't receive a beacon
- * for more than mMaxLostBeacons. mMaxLostBeacons is defined in
- * ECMA-368[17.16] as 3; because we can get only one beacon per
- * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time
- * for jitter and stuff and make it 500 ms.
- */
-unsigned long beacon_timeout_ms = 500;
-
-static
-ssize_t beacon_timeout_ms_show(struct class *class,
- struct class_attribute *attr,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms);
-}
-
-static
-ssize_t beacon_timeout_ms_store(struct class *class,
- struct class_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long bt;
- ssize_t result;
- result = sscanf(buf, "%lu", &bt);
- if (result != 1)
- return -EINVAL;
- beacon_timeout_ms = bt;
- return size;
-}
-static CLASS_ATTR_RW(beacon_timeout_ms);
-
-static struct attribute *uwb_class_attrs[] = {
- &class_attr_beacon_timeout_ms.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(uwb_class);
-
-/** Device model classes */
-struct class uwb_rc_class = {
- .name = "uwb_rc",
- .class_groups = uwb_class_groups,
-};
-
-
-static int __init uwb_subsys_init(void)
-{
- int result = 0;
-
- result = uwb_est_create();
- if (result < 0) {
- printk(KERN_ERR "uwb: Can't initialize EST subsystem\n");
- goto error_est_init;
- }
-
- result = class_register(&uwb_rc_class);
- if (result < 0)
- goto error_uwb_rc_class_register;
-
- /* Register the UWB bus */
- result = bus_register(&uwb_bus_type);
- if (result) {
- pr_err("%s - registering bus driver failed\n", __func__);
- goto exit_bus;
- }
-
- uwb_dbg_init();
- return 0;
-
-exit_bus:
- class_unregister(&uwb_rc_class);
-error_uwb_rc_class_register:
- uwb_est_destroy();
-error_est_init:
- return result;
-}
-module_init(uwb_subsys_init);
-
-static void __exit uwb_subsys_exit(void)
-{
- uwb_dbg_exit();
- bus_unregister(&uwb_bus_type);
- class_unregister(&uwb_rc_class);
- uwb_est_destroy();
- return;
-}
-module_exit(uwb_subsys_exit);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Ultra Wide Band core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/uwb/drp-avail.c b/drivers/staging/uwb/drp-avail.c
deleted file mode 100644
index 02392ab82a7d..000000000000
--- a/drivers/staging/uwb/drp-avail.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * DRP availability management
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Reinette Chatre <reinette.chatre@intel.com>
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- *
- * Manage DRP Availability (the MAS available for DRP
- * reservations). Thus:
- *
- * - Handle DRP Availability Change notifications
- *
- * - Allow the reservation manager to indicate MAS reserved/released
- * by local (owned by/targeted at the radio controller)
- * reservations.
- *
- * - Based on the two sources above, generate a DRP Availability IE to
- * be included in the beacon.
- *
- * See also the documentation for struct uwb_drp_avail.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/bitmap.h>
-#include "uwb-internal.h"
-
-/**
- * uwb_drp_avail_init - initialize an RC's MAS availability
- *
- * All MAS are available initially. The RC will inform use which
- * slots are used for the BP (it may change in size).
- */
-void uwb_drp_avail_init(struct uwb_rc *rc)
-{
- bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
- bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
- bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
-}
-
-/*
- * Determine MAS available for new local reservations.
- *
- * avail = global & local & pending
- */
-void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
-{
- bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
- bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
-}
-
-/**
- * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
- * @rc: the radio controller
- * @mas: the MAS to reserve
- *
- * Returns 0 on success, or -EBUSY if the MAS requested aren't available.
- */
-int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
-{
- struct uwb_mas_bm avail;
-
- uwb_drp_available(rc, &avail);
- if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
- return -EBUSY;
-
- bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
- return 0;
-}
-
-/**
- * uwb_drp_avail_reserve - reserve MAS for an established reservation
- * @rc: the radio controller
- * @mas: the MAS to reserve
- */
-void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
-{
- bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
- bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
- rc->drp_avail.ie_valid = false;
-}
-
-/**
- * uwb_drp_avail_release - release MAS from a pending or established reservation
- * @rc: the radio controller
- * @mas: the MAS to release
- */
-void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
-{
- bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
- bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
- rc->drp_avail.ie_valid = false;
- uwb_rsv_handle_drp_avail_change(rc);
-}
-
-/**
- * uwb_drp_avail_ie_update - update the DRP Availability IE
- * @rc: the radio controller
- *
- * avail = global & local
- */
-void uwb_drp_avail_ie_update(struct uwb_rc *rc)
-{
- struct uwb_mas_bm avail;
-
- bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
-
- rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
- rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
- uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
- rc->drp_avail.ie_valid = true;
-}
-
-/**
- * Create an unsigned long from a buffer containing a byte stream.
- *
- * @array: pointer to buffer
- * @itr: index of buffer from where we start
- * @len: the buffer's remaining size may not be exact multiple of
- * sizeof(unsigned long), @len is the length of buffer that needs
- * to be converted. This will be sizeof(unsigned long) or smaller
- * (BUG if not). If it is smaller then we will pad the remaining
- * space of the result with zeroes.
- */
-static
-unsigned long get_val(u8 *array, size_t itr, size_t len)
-{
- unsigned long val = 0;
- size_t top = itr + len;
-
- BUG_ON(len > sizeof(val));
-
- while (itr < top) {
- val <<= 8;
- val |= array[top - 1];
- top--;
- }
- val <<= 8 * (sizeof(val) - len); /* padding */
- return val;
-}
-
-/**
- * Initialize bitmap from data buffer.
- *
- * The bitmap to be converted could come from a IE, for example a
- * DRP Availability IE.
- * From ECMA-368 1.0 [16.8.7]: "
- * octets: 1 1 N * (0 to 32)
- * Element ID Length (=N) DRP Availability Bitmap
- *
- * The DRP Availability Bitmap field is up to 256 bits long, one
- * bit for each MAS in the superframe, where the least-significant
- * bit of the field corresponds to the first MAS in the superframe
- * and successive bits correspond to successive MASs."
- *
- * The DRP Availability bitmap is in octets from 0 to 32, so octet
- * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
- * octets, the bits in octets not included at the end of the bitmap are
- * treated as zero. In this case (when the bitmap is smaller than 32
- * octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
- * with the last octet still containing bits for MAS 1-8, etc.
- *
- * For example:
- * F00F0102 03040506 0708090A 0B0C0D0E 0F010203
- * ^^^^
- * ||||
- * ||||
- * |||\LSB of byte is MAS 9
- * ||\MSB of byte is MAS 16
- * |\LSB of first byte is MAS 1
- * \ MSB of byte is MAS 8
- *
- * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
- *
- * The resulting bitmap will have the following mapping:
- * bit position 0 == MAS 1
- * bit position 1 == MAS 2
- * ...
- * bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
- *
- * @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP)
- * @buffer: pointer to buffer containing bitmap data in big endian
- * format (MSB first)
- * @buffer_size:number of bytes with which bitmap should be initialized
- */
-static
-void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
- size_t buffer_size)
-{
- u8 *buffer = _buffer;
- size_t itr, len;
- unsigned long val;
-
- itr = 0;
- while (itr < buffer_size) {
- len = buffer_size - itr >= sizeof(val) ?
- sizeof(val) : buffer_size - itr;
- val = get_val(buffer, itr, len);
- bmp_itr[itr / sizeof(val)] = val;
- itr += sizeof(val);
- }
-}
-
-
-/**
- * Extract DRP Availability bitmap from the notification.
- *
- * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
- * We convert that to our internal representation.
- */
-static
-int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
-{
- struct device *dev = &evt->rc->uwb_dev.dev;
- struct uwb_rc_evt_drp_avail *drp_evt;
- int result = -EINVAL;
-
- /* Is there enough data to decode the event? */
- if (evt->notif.size < sizeof(*drp_evt)) {
- dev_err(dev, "DRP Availability Change: Not enough "
- "data to decode event [%zu bytes, %zu "
- "needed]\n", evt->notif.size, sizeof(*drp_evt));
- goto error;
- }
- drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
- buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
- result = 0;
-error:
- return result;
-}
-
-
-/**
- * Process an incoming DRP Availability notification.
- *
- * @evt: Event information (packs the actual event data, which
- * radio controller it came to, etc).
- *
- * @returns: 0 on success (so uwbd() frees the event buffer), < 0
- * on error.
- *
- * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
- * the MAS slot is available, bits set to ZERO indicate that the slot
- * is busy.
- *
- * So we clear available slots, we set used slots :)
- *
- * The notification only marks non-availability based on the BP and
- * received DRP IEs that are not for this radio controller. A copy of
- * this bitmap is needed to generate the real availability (which
- * includes local and pending reservations).
- *
- * The DRP Availability IE that this radio controller emits will need
- * to be updated.
- */
-int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
-{
- int result;
- struct uwb_rc *rc = evt->rc;
- DECLARE_BITMAP(bmp, UWB_NUM_MAS);
-
- result = uwbd_evt_get_drp_avail(evt, bmp);
- if (result < 0)
- return result;
-
- mutex_lock(&rc->rsvs_mutex);
- bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
- rc->drp_avail.ie_valid = false;
- uwb_rsv_handle_drp_avail_change(rc);
- mutex_unlock(&rc->rsvs_mutex);
-
- uwb_rsv_sched_update(rc);
-
- return 0;
-}
diff --git a/drivers/staging/uwb/drp-ie.c b/drivers/staging/uwb/drp-ie.c
deleted file mode 100644
index b2a862cf76de..000000000000
--- a/drivers/staging/uwb/drp-ie.c
+++ /dev/null
@@ -1,305 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UWB DRP IE management.
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-
-#include "uwb.h"
-#include "uwb-internal.h"
-
-
-/*
- * Return the reason code for a reservations's DRP IE.
- */
-static int uwb_rsv_reason_code(struct uwb_rsv *rsv)
-{
- static const int reason_codes[] = {
- [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED,
- [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED,
- [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED,
- [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT,
- [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING,
- [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED,
- [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
- [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
- [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
- };
-
- return reason_codes[rsv->state];
-}
-
-/*
- * Return the reason code for a reservations's companion DRP IE .
- */
-static int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
-{
- static const int companion_reason_codes[] = {
- [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
- [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
- [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
- [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
- };
-
- return companion_reason_codes[rsv->state];
-}
-
-/*
- * Return the status bit for a reservations's DRP IE.
- */
-int uwb_rsv_status(struct uwb_rsv *rsv)
-{
- static const int statuses[] = {
- [UWB_RSV_STATE_O_INITIATED] = 0,
- [UWB_RSV_STATE_O_PENDING] = 0,
- [UWB_RSV_STATE_O_MODIFIED] = 1,
- [UWB_RSV_STATE_O_ESTABLISHED] = 1,
- [UWB_RSV_STATE_O_TO_BE_MOVED] = 0,
- [UWB_RSV_STATE_O_MOVE_COMBINING] = 1,
- [UWB_RSV_STATE_O_MOVE_REDUCING] = 1,
- [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1,
- [UWB_RSV_STATE_T_ACCEPTED] = 1,
- [UWB_RSV_STATE_T_CONFLICT] = 0,
- [UWB_RSV_STATE_T_PENDING] = 0,
- [UWB_RSV_STATE_T_DENIED] = 0,
- [UWB_RSV_STATE_T_RESIZED] = 1,
- [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
- [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1,
- [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1,
- [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1,
-
- };
-
- return statuses[rsv->state];
-}
-
-/*
- * Return the status bit for a reservations's companion DRP IE .
- */
-int uwb_rsv_companion_status(struct uwb_rsv *rsv)
-{
- static const int companion_statuses[] = {
- [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0,
- [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
- [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0,
- [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0,
- [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0,
- };
-
- return companion_statuses[rsv->state];
-}
-
-/*
- * Allocate a DRP IE.
- *
- * To save having to free/allocate a DRP IE when its MAS changes,
- * enough memory is allocated for the maxiumum number of DRP
- * allocation fields. This gives an overhead per reservation of up to
- * (UWB_NUM_ZONES - 1) * 4 = 60 octets.
- */
-static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
-{
- struct uwb_ie_drp *drp_ie;
-
- drp_ie = kzalloc(struct_size(drp_ie, allocs, UWB_NUM_ZONES),
- GFP_KERNEL);
- if (drp_ie)
- drp_ie->hdr.element_id = UWB_IE_DRP;
- return drp_ie;
-}
-
-
-/*
- * Fill a DRP IE's allocation fields from a MAS bitmap.
- */
-static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
- struct uwb_mas_bm *mas)
-{
- int z, i, num_fields = 0, next = 0;
- struct uwb_drp_alloc *zones;
- __le16 current_bmp;
- DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
- DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
-
- zones = drp_ie->allocs;
-
- bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
-
- /* Determine unique MAS bitmaps in zones from bitmap. */
- for (z = 0; z < UWB_NUM_ZONES; z++) {
- bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
- if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
- bool found = false;
- current_bmp = (__le16) *tmp_mas_bm;
- for (i = 0; i < next; i++) {
- if (current_bmp == zones[i].mas_bm) {
- zones[i].zone_bm |= 1 << z;
- found = true;
- break;
- }
- }
- if (!found) {
- num_fields++;
- zones[next].zone_bm = 1 << z;
- zones[next].mas_bm = current_bmp;
- next++;
- }
- }
- bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
- }
-
- /* Store in format ready for transmission (le16). */
- for (i = 0; i < num_fields; i++) {
- drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
- drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
- }
-
- drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
- + num_fields * sizeof(struct uwb_drp_alloc);
-}
-
-/**
- * uwb_drp_ie_update - update a reservation's DRP IE
- * @rsv: the reservation
- */
-int uwb_drp_ie_update(struct uwb_rsv *rsv)
-{
- struct uwb_ie_drp *drp_ie;
- struct uwb_rsv_move *mv;
- int unsafe;
-
- if (rsv->state == UWB_RSV_STATE_NONE) {
- kfree(rsv->drp_ie);
- rsv->drp_ie = NULL;
- return 0;
- }
-
- unsafe = rsv->mas.unsafe ? 1 : 0;
-
- if (rsv->drp_ie == NULL) {
- rsv->drp_ie = uwb_drp_ie_alloc();
- if (rsv->drp_ie == NULL)
- return -ENOMEM;
- }
- drp_ie = rsv->drp_ie;
-
- uwb_ie_drp_set_unsafe(drp_ie, unsafe);
- uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker);
- uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
- uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv));
- uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv));
- uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
- uwb_ie_drp_set_type(drp_ie, rsv->type);
-
- if (uwb_rsv_is_owner(rsv)) {
- switch (rsv->target.type) {
- case UWB_RSV_TARGET_DEV:
- drp_ie->dev_addr = rsv->target.dev->dev_addr;
- break;
- case UWB_RSV_TARGET_DEVADDR:
- drp_ie->dev_addr = rsv->target.devaddr;
- break;
- }
- } else
- drp_ie->dev_addr = rsv->owner->dev_addr;
-
- uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
-
- if (uwb_rsv_has_two_drp_ies(rsv)) {
- mv = &rsv->mv;
- if (mv->companion_drp_ie == NULL) {
- mv->companion_drp_ie = uwb_drp_ie_alloc();
- if (mv->companion_drp_ie == NULL)
- return -ENOMEM;
- }
- drp_ie = mv->companion_drp_ie;
-
- /* keep all the same configuration of the main drp_ie */
- memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
-
-
- /* FIXME: handle properly the unsafe bit */
- uwb_ie_drp_set_unsafe(drp_ie, 1);
- uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv));
- uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv));
-
- uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
- }
-
- rsv->ie_valid = true;
- return 0;
-}
-
-/*
- * Set MAS bits from given MAS bitmap in a single zone of large bitmap.
- *
- * We are given a zone id and the MAS bitmap of bits that need to be set in
- * this zone. Note that this zone may already have bits set and this only
- * adds settings - we cannot simply assign the MAS bitmap contents to the
- * zone contents. We iterate over the the bits (MAS) in the zone and set the
- * bits that are set in the given MAS bitmap.
- */
-static
-void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
-{
- int mas;
- u16 mas_mask;
-
- for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
- mas_mask = 1 << mas;
- if (mas_bm & mas_mask)
- set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
- }
-}
-
-/**
- * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
- * @mas: MAS bitmap that will be populated to correspond to the
- * allocation fields in the DRP IE
- * @drp_ie: the DRP IE that contains the allocation fields.
- *
- * The input format is an array of MAS allocation fields (16 bit Zone
- * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
- * 16.8.6. The output is a full 256 bit MAS bitmap.
- *
- * We go over all the allocation fields, for each allocation field we
- * know which zones are impacted. We iterate over all the zones
- * impacted and call a function that will set the correct MAS bits in
- * each zone.
- */
-void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
-{
- int numallocs = (drp_ie->hdr.length - 4) / 4;
- const struct uwb_drp_alloc *alloc;
- int cnt;
- u16 zone_bm, mas_bm;
- u8 zone;
- u16 zone_mask;
-
- bitmap_zero(bm->bm, UWB_NUM_MAS);
-
- for (cnt = 0; cnt < numallocs; cnt++) {
- alloc = &drp_ie->allocs[cnt];
- zone_bm = le16_to_cpu(alloc->zone_bm);
- mas_bm = le16_to_cpu(alloc->mas_bm);
- for (zone = 0; zone < UWB_NUM_ZONES; zone++) {
- zone_mask = 1 << zone;
- if (zone_bm & zone_mask)
- uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
- }
- }
-}
-
diff --git a/drivers/staging/uwb/drp.c b/drivers/staging/uwb/drp.c
deleted file mode 100644
index 869987bede7b..000000000000
--- a/drivers/staging/uwb/drp.c
+++ /dev/null
@@ -1,842 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Dynamic Reservation Protocol handling
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include "uwb-internal.h"
-
-
-/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
-enum uwb_drp_conflict_action {
- /* Reservation is maintained, no action needed */
- UWB_DRP_CONFLICT_MANTAIN = 0,
-
- /* the device shall not transmit frames in conflicting MASs in
- * the following superframe. If the device is the reservation
- * target, it shall also set the Reason Code in its DRP IE to
- * Conflict in its beacon in the following superframe.
- */
- UWB_DRP_CONFLICT_ACT1,
-
- /* the device shall not set the Reservation Status bit to ONE
- * and shall not transmit frames in conflicting MASs. If the
- * device is the reservation target, it shall also set the
- * Reason Code in its DRP IE to Conflict.
- */
- UWB_DRP_CONFLICT_ACT2,
-
- /* the device shall not transmit frames in conflicting MASs in
- * the following superframe. It shall remove the conflicting
- * MASs from the reservation or set the Reservation Status to
- * ZERO in its beacon in the following superframe. If the
- * device is the reservation target, it shall also set the
- * Reason Code in its DRP IE to Conflict.
- */
- UWB_DRP_CONFLICT_ACT3,
-};
-
-
-static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
- struct uwb_rceb *reply, ssize_t reply_size)
-{
- struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
- unsigned long flags;
-
- if (r != NULL) {
- if (r->bResultCode != UWB_RC_RES_SUCCESS)
- dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
- uwb_rc_strerror(r->bResultCode), r->bResultCode);
- } else
- dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
-
- spin_lock_irqsave(&rc->rsvs_lock, flags);
- if (rc->set_drp_ie_pending > 1) {
- rc->set_drp_ie_pending = 0;
- uwb_rsv_queue_update(rc);
- } else {
- rc->set_drp_ie_pending = 0;
- }
- spin_unlock_irqrestore(&rc->rsvs_lock, flags);
-}
-
-/**
- * Construct and send the SET DRP IE
- *
- * @rc: UWB Host controller
- * @returns: >= 0 number of bytes still available in the beacon
- * < 0 errno code on error.
- *
- * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
- * device to include in its beacon at the same time. We thus have to
- * traverse all reservations and include the DRP IEs of all PENDING
- * and NEGOTIATED reservations in a SET DRP command for transmission.
- *
- * A DRP Availability IE is appended.
- *
- * rc->rsvs_mutex is held
- *
- * FIXME We currently ignore the returned value indicating the remaining space
- * in beacon. This could be used to deny reservation requests earlier if
- * determined that they would cause the beacon space to be exceeded.
- */
-int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
-{
- int result;
- struct uwb_rc_cmd_set_drp_ie *cmd;
- struct uwb_rsv *rsv;
- struct uwb_rsv_move *mv;
- int num_bytes = 0;
- u8 *IEDataptr;
-
- result = -ENOMEM;
- /* First traverse all reservations to determine memory needed. */
- list_for_each_entry(rsv, &rc->reservations, rc_node) {
- if (rsv->drp_ie != NULL) {
- num_bytes += rsv->drp_ie->hdr.length + 2;
- if (uwb_rsv_has_two_drp_ies(rsv) &&
- (rsv->mv.companion_drp_ie != NULL)) {
- mv = &rsv->mv;
- num_bytes +=
- mv->companion_drp_ie->hdr.length + 2;
- }
- }
- }
- num_bytes += sizeof(rc->drp_avail.ie);
- cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
- if (cmd == NULL)
- goto error;
- cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
- cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
- cmd->wIELength = num_bytes;
- IEDataptr = (u8 *)&cmd->IEData[0];
-
- /* FIXME: DRV avail IE is not always needed */
- /* put DRP avail IE first */
- memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
- IEDataptr += sizeof(struct uwb_ie_drp_avail);
-
- /* Next traverse all reservations to place IEs in allocated memory. */
- list_for_each_entry(rsv, &rc->reservations, rc_node) {
- if (rsv->drp_ie != NULL) {
- memcpy(IEDataptr, rsv->drp_ie,
- rsv->drp_ie->hdr.length + 2);
- IEDataptr += rsv->drp_ie->hdr.length + 2;
-
- if (uwb_rsv_has_two_drp_ies(rsv) &&
- (rsv->mv.companion_drp_ie != NULL)) {
- mv = &rsv->mv;
- memcpy(IEDataptr, mv->companion_drp_ie,
- mv->companion_drp_ie->hdr.length + 2);
- IEDataptr +=
- mv->companion_drp_ie->hdr.length + 2;
- }
- }
- }
-
- result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
- &cmd->rccb, sizeof(*cmd) + num_bytes,
- UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
- uwb_rc_set_drp_cmd_done, NULL);
-
- rc->set_drp_ie_pending = 1;
-
- kfree(cmd);
-error:
- return result;
-}
-
-/*
- * Evaluate the action to perform using conflict resolution rules
- *
- * Return a uwb_drp_conflict_action.
- */
-static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
- struct uwb_rsv *rsv, int our_status)
-{
- int our_tie_breaker = rsv->tiebreaker;
- int our_type = rsv->type;
- int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
-
- int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
- int ext_status = uwb_ie_drp_status(ext_drp_ie);
- int ext_type = uwb_ie_drp_type(ext_drp_ie);
-
-
- /* [ECMA-368 2nd Edition] 17.4.6 */
- if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
- return UWB_DRP_CONFLICT_MANTAIN;
- }
-
- /* [ECMA-368 2nd Edition] 17.4.6-1 */
- if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
- return UWB_DRP_CONFLICT_MANTAIN;
- }
-
- /* [ECMA-368 2nd Edition] 17.4.6-2 */
- if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
- /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
- return UWB_DRP_CONFLICT_ACT1;
- }
-
- /* [ECMA-368 2nd Edition] 17.4.6-3 */
- if (our_status == 0 && ext_status == 1) {
- return UWB_DRP_CONFLICT_ACT2;
- }
-
- /* [ECMA-368 2nd Edition] 17.4.6-4 */
- if (our_status == 1 && ext_status == 0) {
- return UWB_DRP_CONFLICT_MANTAIN;
- }
-
- /* [ECMA-368 2nd Edition] 17.4.6-5a */
- if (our_tie_breaker == ext_tie_breaker &&
- our_beacon_slot < ext_beacon_slot) {
- return UWB_DRP_CONFLICT_MANTAIN;
- }
-
- /* [ECMA-368 2nd Edition] 17.4.6-5b */
- if (our_tie_breaker != ext_tie_breaker &&
- our_beacon_slot > ext_beacon_slot) {
- return UWB_DRP_CONFLICT_MANTAIN;
- }
-
- if (our_status == 0) {
- if (our_tie_breaker == ext_tie_breaker) {
- /* [ECMA-368 2nd Edition] 17.4.6-6a */
- if (our_beacon_slot > ext_beacon_slot) {
- return UWB_DRP_CONFLICT_ACT2;
- }
- } else {
- /* [ECMA-368 2nd Edition] 17.4.6-6b */
- if (our_beacon_slot < ext_beacon_slot) {
- return UWB_DRP_CONFLICT_ACT2;
- }
- }
- } else {
- if (our_tie_breaker == ext_tie_breaker) {
- /* [ECMA-368 2nd Edition] 17.4.6-7a */
- if (our_beacon_slot > ext_beacon_slot) {
- return UWB_DRP_CONFLICT_ACT3;
- }
- } else {
- /* [ECMA-368 2nd Edition] 17.4.6-7b */
- if (our_beacon_slot < ext_beacon_slot) {
- return UWB_DRP_CONFLICT_ACT3;
- }
- }
- }
- return UWB_DRP_CONFLICT_MANTAIN;
-}
-
-static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
- int ext_beacon_slot,
- struct uwb_rsv *rsv,
- struct uwb_mas_bm *conflicting_mas)
-{
- struct uwb_rc *rc = rsv->rc;
- struct uwb_rsv_move *mv = &rsv->mv;
- struct uwb_drp_backoff_win *bow = &rc->bow;
- int action;
-
- action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
-
- if (uwb_rsv_is_owner(rsv)) {
- switch(action) {
- case UWB_DRP_CONFLICT_ACT2:
- /* try move */
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
- if (bow->can_reserve_extra_mases == false)
- uwb_rsv_backoff_win_increment(rc);
-
- break;
- case UWB_DRP_CONFLICT_ACT3:
- uwb_rsv_backoff_win_increment(rc);
- /* drop some mases with reason modified */
- /* put in the companion the mases to be dropped */
- bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
- default:
- break;
- }
- } else {
- switch(action) {
- case UWB_DRP_CONFLICT_ACT2:
- case UWB_DRP_CONFLICT_ACT3:
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
- default:
- break;
- }
-
- }
-
-}
-
-static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
- struct uwb_rsv *rsv, bool companion_only,
- struct uwb_mas_bm *conflicting_mas)
-{
- struct uwb_rc *rc = rsv->rc;
- struct uwb_drp_backoff_win *bow = &rc->bow;
- struct uwb_rsv_move *mv = &rsv->mv;
- int action;
-
- if (companion_only) {
- /* status of companion is 0 at this point */
- action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
- if (uwb_rsv_is_owner(rsv)) {
- switch(action) {
- case UWB_DRP_CONFLICT_ACT2:
- case UWB_DRP_CONFLICT_ACT3:
- uwb_rsv_set_state(rsv,
- UWB_RSV_STATE_O_ESTABLISHED);
- rsv->needs_release_companion_mas = false;
- if (bow->can_reserve_extra_mases == false)
- uwb_rsv_backoff_win_increment(rc);
- uwb_drp_avail_release(rsv->rc,
- &rsv->mv.companion_mas);
- }
- } else { /* rsv is target */
- switch(action) {
- case UWB_DRP_CONFLICT_ACT2:
- case UWB_DRP_CONFLICT_ACT3:
- uwb_rsv_set_state(rsv,
- UWB_RSV_STATE_T_EXPANDING_CONFLICT);
- /* send_drp_avail_ie = true; */
- }
- }
- } else { /* also base part of the reservation is conflicting */
- if (uwb_rsv_is_owner(rsv)) {
- uwb_rsv_backoff_win_increment(rc);
- /* remove companion part */
- uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
-
- /* drop some mases with reason modified */
-
- /* put in the companion the mases to be dropped */
- bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
- conflicting_mas->bm, UWB_NUM_MAS);
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
- } else { /* it is a target rsv */
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
- /* send_drp_avail_ie = true; */
- }
- }
-}
-
-static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
- struct uwb_rc_evt_drp *drp_evt,
- struct uwb_ie_drp *drp_ie,
- struct uwb_mas_bm *conflicting_mas)
-{
- struct uwb_rsv_move *mv;
-
- /* check if the conflicting reservation has two drp_ies */
- if (uwb_rsv_has_two_drp_ies(rsv)) {
- mv = &rsv->mv;
- if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
- UWB_NUM_MAS)) {
- handle_conflict_expanding(drp_ie,
- drp_evt->beacon_slot_number,
- rsv, false, conflicting_mas);
- } else {
- if (bitmap_intersects(mv->companion_mas.bm,
- conflicting_mas->bm, UWB_NUM_MAS)) {
- handle_conflict_expanding(
- drp_ie, drp_evt->beacon_slot_number,
- rsv, true, conflicting_mas);
- }
- }
- } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
- UWB_NUM_MAS)) {
- handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
- rsv, conflicting_mas);
- }
-}
-
-static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
- struct uwb_rc_evt_drp *drp_evt,
- struct uwb_ie_drp *drp_ie,
- struct uwb_mas_bm *conflicting_mas)
-{
- struct uwb_rsv *rsv;
-
- list_for_each_entry(rsv, &rc->reservations, rc_node) {
- uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
- conflicting_mas);
- }
-}
-
-static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
- struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
- struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
-{
- struct uwb_rsv_move *mv = &rsv->mv;
- int status;
-
- status = uwb_ie_drp_status(drp_ie);
-
- if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
- return;
- }
-
- if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
- /* drp_ie is companion */
- if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
- /* stroke companion */
- uwb_rsv_set_state(rsv,
- UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
- }
- } else {
- if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
- if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
- /* FIXME: there is a conflict, find
- * the conflicting reservations and
- * take a sensible action. Consider
- * that in drp_ie there is the
- * "neighbour" */
- uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
- drp_ie, mas);
- } else {
- /* accept the extra reservation */
- bitmap_copy(mv->companion_mas.bm, mas->bm,
- UWB_NUM_MAS);
- uwb_rsv_set_state(rsv,
- UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
- }
- } else {
- if (status) {
- uwb_rsv_set_state(rsv,
- UWB_RSV_STATE_T_ACCEPTED);
- }
- }
-
- }
-}
-
-/*
- * Based on the DRP IE, transition a target reservation to a new
- * state.
- */
-static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
- struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
-{
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rsv_move *mv = &rsv->mv;
- int status;
- enum uwb_drp_reason reason_code;
- struct uwb_mas_bm mas;
-
- status = uwb_ie_drp_status(drp_ie);
- reason_code = uwb_ie_drp_reason_code(drp_ie);
- uwb_drp_ie_to_bm(&mas, drp_ie);
-
- switch (reason_code) {
- case UWB_DRP_REASON_ACCEPTED:
- uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
- break;
-
- case UWB_DRP_REASON_MODIFIED:
- /* check to see if we have already modified the reservation */
- if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
- break;
- }
-
- /* find if the owner wants to expand or reduce */
- if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
- /* owner is reducing */
- bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
- UWB_NUM_MAS);
- uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
- }
-
- bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
- break;
- default:
- dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
- reason_code, status);
- }
-}
-
-static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
- struct uwb_mas_bm *mas)
-{
- struct uwb_rsv_move *mv = &rsv->mv;
-
- switch (rsv->state) {
- case UWB_RSV_STATE_O_PENDING:
- case UWB_RSV_STATE_O_INITIATED:
- case UWB_RSV_STATE_O_ESTABLISHED:
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
- break;
- case UWB_RSV_STATE_O_MODIFIED:
- if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
- else
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
- break;
-
- case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
- if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
- else
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
- break;
- case UWB_RSV_STATE_O_MOVE_EXPANDING:
- if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
- /* Companion reservation accepted */
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
- } else {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
- }
- break;
- case UWB_RSV_STATE_O_MOVE_COMBINING:
- if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
- else
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
- break;
- default:
- break;
- }
-}
-/*
- * Based on the DRP IE, transition an owner reservation to a new
- * state.
- */
-static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
- struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
- struct uwb_rc_evt_drp *drp_evt)
-{
- struct device *dev = &rc->uwb_dev.dev;
- int status;
- enum uwb_drp_reason reason_code;
- struct uwb_mas_bm mas;
-
- status = uwb_ie_drp_status(drp_ie);
- reason_code = uwb_ie_drp_reason_code(drp_ie);
- uwb_drp_ie_to_bm(&mas, drp_ie);
-
- if (status) {
- switch (reason_code) {
- case UWB_DRP_REASON_ACCEPTED:
- uwb_drp_process_owner_accepted(rsv, &mas);
- break;
- default:
- dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
- reason_code, status);
- }
- } else {
- switch (reason_code) {
- case UWB_DRP_REASON_PENDING:
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
- break;
- case UWB_DRP_REASON_DENIED:
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
- break;
- case UWB_DRP_REASON_CONFLICT:
- /* resolve the conflict */
- bitmap_complement(mas.bm, src->last_availability_bm,
- UWB_NUM_MAS);
- uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
- break;
- default:
- dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
- reason_code, status);
- }
- }
-}
-
-static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
-{
- unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
- mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
-}
-
-static void uwb_cnflt_update_work(struct work_struct *work)
-{
- struct uwb_cnflt_alien *cnflt = container_of(work,
- struct uwb_cnflt_alien,
- cnflt_update_work);
- struct uwb_cnflt_alien *c;
- struct uwb_rc *rc = cnflt->rc;
-
- unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
-
- mutex_lock(&rc->rsvs_mutex);
-
- list_del(&cnflt->rc_node);
-
- /* update rc global conflicting alien bitmap */
- bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
-
- list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
- bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
- c->mas.bm, UWB_NUM_MAS);
- }
-
- queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
- usecs_to_jiffies(delay_us));
-
- kfree(cnflt);
- mutex_unlock(&rc->rsvs_mutex);
-}
-
-static void uwb_cnflt_timer(struct timer_list *t)
-{
- struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
-
- queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
-}
-
-/*
- * We have received an DRP_IE of type Alien BP and we need to make
- * sure we do not transmit in conflicting MASs.
- */
-static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
-{
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_mas_bm mas;
- struct uwb_cnflt_alien *cnflt;
- unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
-
- uwb_drp_ie_to_bm(&mas, drp_ie);
-
- list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
- if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
- /* Existing alien BP reservation conflicting
- * bitmap, just reset the timer */
- uwb_cnflt_alien_stroke_timer(cnflt);
- return;
- }
- }
-
- /* New alien BP reservation conflicting bitmap */
-
- /* alloc and initialize new uwb_cnflt_alien */
- cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
- if (!cnflt) {
- dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
- return;
- }
-
- INIT_LIST_HEAD(&cnflt->rc_node);
- timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
-
- cnflt->rc = rc;
- INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
-
- bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
-
- list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
-
- /* update rc global conflicting alien bitmap */
- bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
-
- queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
-
- /* start the timer */
- uwb_cnflt_alien_stroke_timer(cnflt);
-}
-
-static void uwb_drp_process_not_involved(struct uwb_rc *rc,
- struct uwb_rc_evt_drp *drp_evt,
- struct uwb_ie_drp *drp_ie)
-{
- struct uwb_mas_bm mas;
-
- uwb_drp_ie_to_bm(&mas, drp_ie);
- uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
-}
-
-static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
- struct uwb_rc_evt_drp *drp_evt,
- struct uwb_ie_drp *drp_ie)
-{
- struct uwb_rsv *rsv;
-
- rsv = uwb_rsv_find(rc, src, drp_ie);
- if (!rsv) {
- /*
- * No reservation? It's either for a recently
- * terminated reservation; or the DRP IE couldn't be
- * processed (e.g., an invalid IE or out of memory).
- */
- return;
- }
-
- /*
- * Do nothing with DRP IEs for reservations that have been
- * terminated.
- */
- if (rsv->state == UWB_RSV_STATE_NONE) {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
- return;
- }
-
- if (uwb_ie_drp_owner(drp_ie))
- uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
- else
- uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
-
-}
-
-
-static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
-{
- return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
-}
-
-/*
- * Process a received DRP IE.
- */
-static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
- struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
-{
- if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
- uwb_drp_handle_alien_drp(rc, drp_ie);
- else if (uwb_drp_involves_us(rc, drp_ie))
- uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
- else
- uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
-}
-
-/*
- * Process a received DRP Availability IE
- */
-static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
- struct uwb_ie_drp_avail *drp_availability_ie)
-{
- bitmap_copy(src->last_availability_bm,
- drp_availability_ie->bmp, UWB_NUM_MAS);
-}
-
-/*
- * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
- * from a device.
- */
-static
-void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
- size_t ielen, struct uwb_dev *src_dev)
-{
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_ie_hdr *ie_hdr;
- void *ptr;
-
- ptr = drp_evt->ie_data;
- for (;;) {
- ie_hdr = uwb_ie_next(&ptr, &ielen);
- if (!ie_hdr)
- break;
-
- switch (ie_hdr->element_id) {
- case UWB_IE_DRP_AVAILABILITY:
- uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
- break;
- case UWB_IE_DRP:
- uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
- break;
- default:
- dev_warn(dev, "unexpected IE in DRP notification\n");
- break;
- }
- }
-
- if (ielen > 0)
- dev_warn(dev, "%d octets remaining in DRP notification\n",
- (int)ielen);
-}
-
-/**
- * uwbd_evt_handle_rc_drp - handle a DRP_IE event
- * @evt: the DRP_IE event from the radio controller
- *
- * This processes DRP notifications from the radio controller, either
- * initiating a new reservation or transitioning an existing
- * reservation into a different state.
- *
- * DRP notifications can occur for three different reasons:
- *
- * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
- * the target or source have been received.
- *
- * These DRP IEs could be new or for an existing reservation.
- *
- * If the DRP IE for an existing reservation ceases to be to
- * received for at least mMaxLostBeacons, the reservation should be
- * considered to be terminated. Note that the TERMINATE reason (see
- * below) may not always be signalled (e.g., the remote device has
- * two or more reservations established with the RC).
- *
- * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
- * group conflict with the RC's reservations.
- *
- * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
- * from a device (i.e., it's terminated all reservations).
- *
- * Only the software state of the reservations is changed; the setting
- * of the radio controller's DRP IEs is done after all the events in
- * an event buffer are processed. This saves waiting multiple times
- * for the SET_DRP_IE command to complete.
- */
-int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
-{
- struct device *dev = &evt->rc->uwb_dev.dev;
- struct uwb_rc *rc = evt->rc;
- struct uwb_rc_evt_drp *drp_evt;
- size_t ielength, bytes_left;
- struct uwb_dev_addr src_addr;
- struct uwb_dev *src_dev;
-
- /* Is there enough data to decode the event (and any IEs in
- its payload)? */
- if (evt->notif.size < sizeof(*drp_evt)) {
- dev_err(dev, "DRP event: Not enough data to decode event "
- "[%zu bytes left, %zu needed]\n",
- evt->notif.size, sizeof(*drp_evt));
- return 0;
- }
- bytes_left = evt->notif.size - sizeof(*drp_evt);
- drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
- ielength = le16_to_cpu(drp_evt->ie_length);
- if (bytes_left != ielength) {
- dev_err(dev, "DRP event: Not enough data in payload [%zu"
- "bytes left, %zu declared in the event]\n",
- bytes_left, ielength);
- return 0;
- }
-
- memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
- src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
- if (!src_dev) {
- /*
- * A DRP notification from an unrecognized device.
- *
- * This is probably from a WUSB device that doesn't
- * have an EUI-48 and therefore doesn't show up in the
- * UWB device database. It's safe to simply ignore
- * these.
- */
- return 0;
- }
-
- mutex_lock(&rc->rsvs_mutex);
-
- /* We do not distinguish from the reason */
- uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
-
- mutex_unlock(&rc->rsvs_mutex);
-
- uwb_dev_put(src_dev);
- return 0;
-}
diff --git a/drivers/staging/uwb/est.c b/drivers/staging/uwb/est.c
deleted file mode 100644
index d4141ffdd775..000000000000
--- a/drivers/staging/uwb/est.c
+++ /dev/null
@@ -1,450 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band Radio Control
- * Event Size Tables management
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- *
- * Infrastructure, code and data tables for guessing the size of
- * events received on the notification endpoints of UWB radio
- * controllers.
- *
- * You define a table of events and for each, its size and how to get
- * the extra size.
- *
- * ENTRY POINTS:
- *
- * uwb_est_{init/destroy}(): To initialize/release the EST subsystem.
- *
- * uwb_est_[u]register(): To un/register event size tables
- * uwb_est_grow()
- *
- * uwb_est_find_size(): Get the size of an event
- * uwb_est_get_size()
- */
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "uwb-internal.h"
-
-struct uwb_est {
- u16 type_event_high;
- u16 vendor, product;
- u8 entries;
- const struct uwb_est_entry *entry;
-};
-
-static struct uwb_est *uwb_est;
-static u8 uwb_est_size;
-static u8 uwb_est_used;
-static DEFINE_RWLOCK(uwb_est_lock);
-
-/**
- * WUSB Standard Event Size Table, HWA-RC interface
- *
- * Sizes for events and notifications type 0 (general), high nibble 0.
- */
-static
-struct uwb_est_entry uwb_est_00_00xx[] = {
- [UWB_RC_EVT_IE_RCV] = {
- .size = sizeof(struct uwb_rc_evt_ie_rcv),
- .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
- },
- [UWB_RC_EVT_BEACON] = {
- .size = sizeof(struct uwb_rc_evt_beacon),
- .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
- },
- [UWB_RC_EVT_BEACON_SIZE] = {
- .size = sizeof(struct uwb_rc_evt_beacon_size),
- },
- [UWB_RC_EVT_BPOIE_CHANGE] = {
- .size = sizeof(struct uwb_rc_evt_bpoie_change),
- .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
- wBPOIELength),
- },
- [UWB_RC_EVT_BP_SLOT_CHANGE] = {
- .size = sizeof(struct uwb_rc_evt_bp_slot_change),
- },
- [UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
- .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
- .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
- },
- [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
- .size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
- },
- [UWB_RC_EVT_DRP_AVAIL] = {
- .size = sizeof(struct uwb_rc_evt_drp_avail)
- },
- [UWB_RC_EVT_DRP] = {
- .size = sizeof(struct uwb_rc_evt_drp),
- .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
- },
- [UWB_RC_EVT_BP_SWITCH_STATUS] = {
- .size = sizeof(struct uwb_rc_evt_bp_switch_status),
- },
- [UWB_RC_EVT_CMD_FRAME_RCV] = {
- .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
- .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
- },
- [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
- .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
- .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
- },
- [UWB_RC_CMD_CHANNEL_CHANGE] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_DEV_ADDR_MGMT] = {
- .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
- [UWB_RC_CMD_GET_IE] = {
- .size = sizeof(struct uwb_rc_evt_get_ie),
- .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
- },
- [UWB_RC_CMD_RESET] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_SCAN] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_SET_BEACON_FILTER] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_SET_DRP_IE] = {
- .size = sizeof(struct uwb_rc_evt_set_drp_ie),
- },
- [UWB_RC_CMD_SET_IE] = {
- .size = sizeof(struct uwb_rc_evt_set_ie),
- },
- [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_SET_TX_POWER] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_SLEEP] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_START_BEACON] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_STOP_BEACON] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_BP_MERGE] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_SEND_COMMAND_FRAME] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
- [UWB_RC_CMD_SET_ASIE_NOTIF] = {
- .size = sizeof(struct uwb_rc_evt_confirm),
- },
-};
-
-static
-struct uwb_est_entry uwb_est_01_00xx[] = {
- [UWB_RC_DAA_ENERGY_DETECTED] = {
- .size = sizeof(struct uwb_rc_evt_daa_energy_detected),
- },
- [UWB_RC_SET_DAA_ENERGY_MASK] = {
- .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
- },
- [UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
- .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
- },
-};
-
-/**
- * Initialize the EST subsystem
- *
- * Register the standard tables also.
- *
- * FIXME: tag init
- */
-int uwb_est_create(void)
-{
- int result;
-
- uwb_est_size = 2;
- uwb_est_used = 0;
- uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
- if (uwb_est == NULL)
- return -ENOMEM;
-
- result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
- uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
- if (result < 0)
- goto out;
- result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
- uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
-out:
- return result;
-}
-
-
-/** Clean it up */
-void uwb_est_destroy(void)
-{
- kfree(uwb_est);
- uwb_est = NULL;
- uwb_est_size = uwb_est_used = 0;
-}
-
-
-/**
- * Double the capacity of the EST table
- *
- * @returns 0 if ok, < 0 errno no error.
- */
-static
-int uwb_est_grow(void)
-{
- size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
- void *new = kmalloc_array(2, actual_size, GFP_ATOMIC);
- if (new == NULL)
- return -ENOMEM;
- memcpy(new, uwb_est, actual_size);
- memset(new + actual_size, 0, actual_size);
- kfree(uwb_est);
- uwb_est = new;
- uwb_est_size *= 2;
- return 0;
-}
-
-
-/**
- * Register an event size table
- *
- * Makes room for it if the table is full, and then inserts it in the
- * right position (entries are sorted by type, event_high, vendor and
- * then product).
- *
- * @vendor: vendor code for matching against the device (0x0000 and
- * 0xffff mean any); use 0x0000 to force all to match without
- * checking possible vendor specific ones, 0xfffff to match
- * after checking vendor specific ones.
- *
- * @product: product code from that vendor; same matching rules, use
- * 0x0000 for not allowing vendor specific matches, 0xffff
- * for allowing.
- *
- * This arragement just makes the tables sort differenty. Because the
- * table is sorted by growing type-event_high-vendor-product, a zero
- * vendor will match before than a 0x456a vendor, that will match
- * before a 0xfffff vendor.
- *
- * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
- */
-/* FIXME: add bus type to vendor/product code */
-int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
- const struct uwb_est_entry *entry, size_t entries)
-{
- unsigned long flags;
- unsigned itr;
- int result = 0;
-
- write_lock_irqsave(&uwb_est_lock, flags);
- if (uwb_est_used == uwb_est_size) {
- result = uwb_est_grow();
- if (result < 0)
- goto out;
- }
- /* Find the right spot to insert it in */
- for (itr = 0; itr < uwb_est_used; itr++)
- if (uwb_est[itr].type_event_high < type
- && uwb_est[itr].vendor < vendor
- && uwb_est[itr].product < product)
- break;
-
- /* Shift others to make room for the new one? */
- if (itr < uwb_est_used)
- memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
- uwb_est[itr].type_event_high = type << 8 | event_high;
- uwb_est[itr].vendor = vendor;
- uwb_est[itr].product = product;
- uwb_est[itr].entry = entry;
- uwb_est[itr].entries = entries;
- uwb_est_used++;
-out:
- write_unlock_irqrestore(&uwb_est_lock, flags);
- return result;
-}
-EXPORT_SYMBOL_GPL(uwb_est_register);
-
-
-/**
- * Unregister an event size table
- *
- * This just removes the specified entry and moves the ones after it
- * to fill in the gap. This is needed to keep the list sorted; no
- * reallocation is done to reduce the size of the table.
- *
- * We unregister by all the data we used to register instead of by
- * pointer to the @entry array because we might have used the same
- * table for a bunch of IDs (for example).
- *
- * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
- */
-int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
- const struct uwb_est_entry *entry, size_t entries)
-{
- unsigned long flags;
- unsigned itr;
- struct uwb_est est_cmp = {
- .type_event_high = type << 8 | event_high,
- .vendor = vendor,
- .product = product,
- .entry = entry,
- .entries = entries
- };
- write_lock_irqsave(&uwb_est_lock, flags);
- for (itr = 0; itr < uwb_est_used; itr++)
- if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
- goto found;
- write_unlock_irqrestore(&uwb_est_lock, flags);
- return -ENOENT;
-
-found:
- if (itr < uwb_est_used - 1) /* Not last one? move ones above */
- memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
- uwb_est_used--;
- write_unlock_irqrestore(&uwb_est_lock, flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(uwb_est_unregister);
-
-
-/**
- * Get the size of an event from a table
- *
- * @rceb: pointer to the buffer with the event
- * @rceb_size: size of the area pointed to by @rceb in bytes.
- * @returns: > 0 Size of the event
- * -ENOSPC An area big enough was not provided to look
- * ahead into the event's guts and guess the size.
- * -EINVAL Unknown event code (wEvent).
- *
- * This will look at the received RCEB and guess what is the total
- * size. For variable sized events, it will look further ahead into
- * their length field to see how much data should be read.
- *
- * Note this size is *not* final--the neh (Notification/Event Handle)
- * might specificy an extra size to add.
- */
-static
-ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
- u8 event_low, const struct uwb_rceb *rceb,
- size_t rceb_size)
-{
- unsigned offset;
- ssize_t size;
- struct device *dev = &uwb_rc->uwb_dev.dev;
- const struct uwb_est_entry *entry;
-
- size = -ENOENT;
- if (event_low >= est->entries) { /* in range? */
- dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
- est, est->type_event_high, est->vendor, est->product,
- est->entries, event_low);
- goto out;
- }
- size = -ENOENT;
- entry = &est->entry[event_low];
- if (entry->size == 0 && entry->offset == 0) { /* unknown? */
- dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
- est, est->type_event_high, est->vendor, est->product,
- est->entries, event_low);
- goto out;
- }
- offset = entry->offset; /* extra fries with that? */
- if (offset == 0)
- size = entry->size;
- else {
- /* Ops, got an extra size field at 'offset'--read it */
- const void *ptr = rceb;
- size_t type_size = 0;
- offset--;
- size = -ENOSPC; /* enough data for more? */
- switch (entry->type) {
- case UWB_EST_16: type_size = sizeof(__le16); break;
- case UWB_EST_8: type_size = sizeof(u8); break;
- default: BUG();
- }
- if (offset + type_size > rceb_size) {
- dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
- "not enough data to read extra size\n",
- est, est->type_event_high, est->vendor,
- est->product, est->entries);
- goto out;
- }
- size = entry->size;
- ptr += offset;
- switch (entry->type) {
- case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
- case UWB_EST_8: size += *(u8 *)ptr; break;
- default: BUG();
- }
- }
-out:
- return size;
-}
-
-
-/**
- * Guesses the size of a WA event
- *
- * @rceb: pointer to the buffer with the event
- * @rceb_size: size of the area pointed to by @rceb in bytes.
- * @returns: > 0 Size of the event
- * -ENOSPC An area big enough was not provided to look
- * ahead into the event's guts and guess the size.
- * -EINVAL Unknown event code (wEvent).
- *
- * This will look at the received RCEB and guess what is the total
- * size by checking all the tables registered with
- * uwb_est_register(). For variable sized events, it will look further
- * ahead into their length field to see how much data should be read.
- *
- * Note this size is *not* final--the neh (Notification/Event Handle)
- * might specificy an extra size to add or replace.
- */
-ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
- size_t rceb_size)
-{
- /* FIXME: add vendor/product data */
- ssize_t size;
- struct device *dev = &rc->uwb_dev.dev;
- unsigned long flags;
- unsigned itr;
- u16 type_event_high, event;
-
- read_lock_irqsave(&uwb_est_lock, flags);
- size = -ENOSPC;
- if (rceb_size < sizeof(*rceb))
- goto out;
- event = le16_to_cpu(rceb->wEvent);
- type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
- for (itr = 0; itr < uwb_est_used; itr++) {
- if (uwb_est[itr].type_event_high != type_event_high)
- continue;
- size = uwb_est_get_size(rc, &uwb_est[itr],
- event & 0x00ff, rceb, rceb_size);
- /* try more tables that might handle the same type */
- if (size != -ENOENT)
- goto out;
- }
- dev_dbg(dev,
- "event 0x%02x/%04x/%02x: no handlers available; RCEB %4ph\n",
- (unsigned) rceb->bEventType,
- (unsigned) le16_to_cpu(rceb->wEvent),
- (unsigned) rceb->bEventContext,
- rceb);
- size = -ENOENT;
-out:
- read_unlock_irqrestore(&uwb_est_lock, flags);
- return size;
-}
-EXPORT_SYMBOL_GPL(uwb_est_find_size);
diff --git a/drivers/staging/uwb/hwa-rc.c b/drivers/staging/uwb/hwa-rc.c
deleted file mode 100644
index b6effad749d7..000000000000
--- a/drivers/staging/uwb/hwa-rc.c
+++ /dev/null
@@ -1,929 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6])
- * Radio Control command/event transport
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Initialize the Radio Control interface Driver.
- *
- * For each device probed, creates an 'struct hwarc' which contains
- * just the representation of the UWB Radio Controller, and the logic
- * for reading notifications and passing them to the UWB Core.
- *
- * So we initialize all of those, register the UWB Radio Controller
- * and setup the notification/event handle to pipe the notifications
- * to the UWB management Daemon.
- *
- * Command and event filtering.
- *
- * This is the driver for the Radio Control Interface described in WUSB
- * 1.0. The core UWB module assumes that all drivers are compliant to the
- * WHCI 0.95 specification. We thus create a filter that parses all
- * incoming messages from the (WUSB 1.0) device and manipulate them to
- * conform to the WHCI 0.95 specification. Similarly, outgoing messages
- * are parsed and manipulated to conform to the WUSB 1.0 compliant messages
- * that the device expects. Only a few messages are affected:
- * Affected events:
- * UWB_RC_EVT_BEACON
- * UWB_RC_EVT_BP_SLOT_CHANGE
- * UWB_RC_EVT_DRP_AVAIL
- * UWB_RC_EVT_DRP
- * Affected commands:
- * UWB_RC_CMD_SCAN
- * UWB_RC_CMD_SET_DRP_IE
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-#include "../wusbcore/include/wusb.h"
-#include "../wusbcore/include/wusb-wa.h"
-#include "uwb.h"
-
-#include "uwb-internal.h"
-
-/* The device uses commands and events from the WHCI specification, although
- * reporting itself as WUSB compliant. */
-#define WUSB_QUIRK_WHCI_CMD_EVT 0x01
-
-/**
- * Descriptor for an instance of the UWB Radio Control Driver that
- * attaches to the RCI interface of the Host Wired Adapter.
- *
- * Unless there is a lock specific to the 'data members', all access
- * is protected by uwb_rc->mutex.
- *
- * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to
- * @rd_buffer. Note there is no locking because it is perfectly (heh!)
- * serialized--probe() submits an URB, callback is called, processes
- * the data (synchronously), submits another URB, and so on. There is
- * no concurrent access to the buffer.
- */
-struct hwarc {
- struct usb_device *usb_dev;
- struct usb_interface *usb_iface;
- struct uwb_rc *uwb_rc; /* UWB host controller */
- struct urb *neep_urb; /* Notification endpoint handling */
- struct edc neep_edc;
- void *rd_buffer; /* NEEP read buffer */
-};
-
-
-/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */
-struct uwb_rc_evt_beacon_WUSB_0100 {
- struct uwb_rceb rceb;
- u8 bChannelNumber;
- __le16 wBPSTOffset;
- u8 bLQI;
- u8 bRSSI;
- __le16 wBeaconInfoLength;
- u8 BeaconInfo[];
-} __attribute__((packed));
-
-/**
- * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95
- *
- * @header: the incoming event
- * @buf_size: size of buffer containing incoming event
- * @new_size: size of event after filtering completed
- *
- * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at
- * the time we receive the beacon from WUSB so we just set it to
- * UWB_RC_BEACON_TYPE_NEIGHBOR as a default.
- * The solution below allocates memory upon receipt of every beacon from a
- * WUSB device. This will deteriorate performance. What is the right way to
- * do this?
- */
-static
-int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc,
- struct uwb_rceb **header,
- const size_t buf_size,
- size_t *new_size)
-{
- struct uwb_rc_evt_beacon_WUSB_0100 *be;
- struct uwb_rc_evt_beacon *newbe;
- size_t bytes_left, ielength;
- struct device *dev = &rc->uwb_dev.dev;
-
- be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb);
- bytes_left = buf_size;
- if (bytes_left < sizeof(*be)) {
- dev_err(dev, "Beacon Received Notification: Not enough data "
- "to decode for filtering (%zu vs %zu bytes needed)\n",
- bytes_left, sizeof(*be));
- return -EINVAL;
- }
- bytes_left -= sizeof(*be);
- ielength = le16_to_cpu(be->wBeaconInfoLength);
- if (bytes_left < ielength) {
- dev_err(dev, "Beacon Received Notification: Not enough data "
- "to decode IEs (%zu vs %zu bytes needed)\n",
- bytes_left, ielength);
- return -EINVAL;
- }
- newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC);
- if (newbe == NULL)
- return -ENOMEM;
- newbe->rceb = be->rceb;
- newbe->bChannelNumber = be->bChannelNumber;
- newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR;
- newbe->wBPSTOffset = be->wBPSTOffset;
- newbe->bLQI = be->bLQI;
- newbe->bRSSI = be->bRSSI;
- newbe->wBeaconInfoLength = be->wBeaconInfoLength;
- memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength);
- *header = &newbe->rceb;
- *new_size = sizeof(*newbe) + ielength;
- return 1; /* calling function will free memory */
-}
-
-
-/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */
-struct uwb_rc_evt_drp_avail_WUSB_0100 {
- struct uwb_rceb rceb;
- __le16 wIELength;
- u8 IEData[];
-} __attribute__((packed));
-
-/**
- * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95
- *
- * @header: the incoming event
- * @buf_size: size of buffer containing incoming event
- * @new_size: size of event after filtering completed
- */
-static
-int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc,
- struct uwb_rceb **header,
- const size_t buf_size,
- size_t *new_size)
-{
- struct uwb_rc_evt_drp_avail_WUSB_0100 *da;
- struct uwb_rc_evt_drp_avail *newda;
- struct uwb_ie_hdr *ie_hdr;
- size_t bytes_left, ielength;
- struct device *dev = &rc->uwb_dev.dev;
-
-
- da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb);
- bytes_left = buf_size;
- if (bytes_left < sizeof(*da)) {
- dev_err(dev, "Not enough data to decode DRP Avail "
- "Notification for filtering. Expected %zu, "
- "received %zu.\n", (size_t)sizeof(*da), bytes_left);
- return -EINVAL;
- }
- bytes_left -= sizeof(*da);
- ielength = le16_to_cpu(da->wIELength);
- if (bytes_left < ielength) {
- dev_err(dev, "DRP Avail Notification filter: IE length "
- "[%zu bytes] does not match actual length "
- "[%zu bytes].\n", ielength, bytes_left);
- return -EINVAL;
- }
- if (ielength < sizeof(*ie_hdr)) {
- dev_err(dev, "DRP Avail Notification filter: Not enough "
- "data to decode IE [%zu bytes, %zu needed]\n",
- ielength, sizeof(*ie_hdr));
- return -EINVAL;
- }
- ie_hdr = (void *) da->IEData;
- if (ie_hdr->length > 32) {
- dev_err(dev, "DRP Availability Change event has unexpected "
- "length for filtering. Expected < 32 bytes, "
- "got %zu bytes.\n", (size_t)ie_hdr->length);
- return -EINVAL;
- }
- newda = kzalloc(sizeof(*newda), GFP_ATOMIC);
- if (newda == NULL)
- return -ENOMEM;
- newda->rceb = da->rceb;
- memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length);
- *header = &newda->rceb;
- *new_size = sizeof(*newda);
- return 1; /* calling function will free memory */
-}
-
-
-/* DRP notification (WUSB 1.0 [8.6.3.9]) */
-struct uwb_rc_evt_drp_WUSB_0100 {
- struct uwb_rceb rceb;
- struct uwb_dev_addr wSrcAddr;
- u8 bExplicit;
- __le16 wIELength;
- u8 IEData[];
-} __attribute__((packed));
-
-/**
- * Filter WUSB 1.0 DRP Notification to be WHCI 0.95
- *
- * @header: the incoming event
- * @buf_size: size of buffer containing incoming event
- * @new_size: size of event after filtering completed
- *
- * It is hard to manage DRP reservations without having a Reason code.
- * Unfortunately there is none in the WUSB spec. We just set the default to
- * DRP IE RECEIVED.
- * We do not currently use the bBeaconSlotNumber value, so we set this to
- * zero for now.
- */
-static
-int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc,
- struct uwb_rceb **header,
- const size_t buf_size,
- size_t *new_size)
-{
- struct uwb_rc_evt_drp_WUSB_0100 *drpev;
- struct uwb_rc_evt_drp *newdrpev;
- size_t bytes_left, ielength;
- struct device *dev = &rc->uwb_dev.dev;
-
- drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb);
- bytes_left = buf_size;
- if (bytes_left < sizeof(*drpev)) {
- dev_err(dev, "Not enough data to decode DRP Notification "
- "for filtering. Expected %zu, received %zu.\n",
- (size_t)sizeof(*drpev), bytes_left);
- return -EINVAL;
- }
- ielength = le16_to_cpu(drpev->wIELength);
- bytes_left -= sizeof(*drpev);
- if (bytes_left < ielength) {
- dev_err(dev, "DRP Notification filter: header length [%zu "
- "bytes] does not match actual length [%zu "
- "bytes].\n", ielength, bytes_left);
- return -EINVAL;
- }
- newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC);
- if (newdrpev == NULL)
- return -ENOMEM;
- newdrpev->rceb = drpev->rceb;
- newdrpev->src_addr = drpev->wSrcAddr;
- newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD;
- newdrpev->beacon_slot_number = 0;
- newdrpev->ie_length = drpev->wIELength;
- memcpy(newdrpev->ie_data, drpev->IEData, ielength);
- *header = &newdrpev->rceb;
- *new_size = sizeof(*newdrpev) + ielength;
- return 1; /* calling function will free memory */
-}
-
-
-/* Scan Command (WUSB 1.0 [8.6.2.5]) */
-struct uwb_rc_cmd_scan_WUSB_0100 {
- struct uwb_rccb rccb;
- u8 bChannelNumber;
- u8 bScanState;
-} __attribute__((packed));
-
-/**
- * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command
- *
- * @header: command sent to device (compliant to WHCI 0.95)
- * @size: size of command sent to device
- *
- * We only reduce the size by two bytes because the WUSB 1.0 scan command
- * does not have the last field (wStarttime). Also, make sure we don't send
- * the device an unexpected scan type.
- */
-static
-int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc,
- struct uwb_rccb **header,
- size_t *size)
-{
- struct uwb_rc_cmd_scan *sc;
-
- sc = container_of(*header, struct uwb_rc_cmd_scan, rccb);
-
- if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME)
- sc->bScanState = UWB_SCAN_ONLY;
- /* Don't send the last two bytes. */
- *size -= 2;
- return 0;
-}
-
-
-/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */
-struct uwb_rc_cmd_set_drp_ie_WUSB_0100 {
- struct uwb_rccb rccb;
- u8 bExplicit;
- __le16 wIELength;
- struct uwb_ie_drp IEData[];
-} __attribute__((packed));
-
-/**
- * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command
- *
- * @header: command sent to device (compliant to WHCI 0.95)
- * @size: size of command sent to device
- *
- * WUSB has an extra bExplicit field - we assume always explicit
- * negotiation so this field is set. The command expected by the device is
- * thus larger than the one prepared by the driver so we need to
- * reallocate memory to accommodate this.
- * We trust the driver to send us the correct data so no checking is done
- * on incoming data - evn though it is variable length.
- */
-static
-int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc,
- struct uwb_rccb **header,
- size_t *size)
-{
- struct uwb_rc_cmd_set_drp_ie *orgcmd;
- struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd;
- size_t ielength;
-
- orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb);
- ielength = le16_to_cpu(orgcmd->wIELength);
- cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
- cmd->rccb = orgcmd->rccb;
- cmd->bExplicit = 0;
- cmd->wIELength = orgcmd->wIELength;
- memcpy(cmd->IEData, orgcmd->IEData, ielength);
- *header = &cmd->rccb;
- *size = sizeof(*cmd) + ielength;
- return 1; /* calling function will free memory */
-}
-
-
-/**
- * Filter data from WHCI driver to WUSB device
- *
- * @header: WHCI 0.95 compliant command from driver
- * @size: length of command
- *
- * The routine managing commands to the device (uwb_rc_cmd()) will call the
- * filtering function pointer (if it exists) before it passes any data to
- * the device. At this time the command has been formatted according to
- * WHCI 0.95 and is ready to be sent to the device.
- *
- * The filter function will be provided with the current command and its
- * length. The function will manipulate the command if necessary and
- * potentially reallocate memory for a command that needed more memory that
- * the given command. If new memory was created the function will return 1
- * to indicate to the calling function that the memory need to be freed
- * when not needed any more. The size will contain the new length of the
- * command.
- * If memory has not been allocated we rely on the original mechanisms to
- * free the memory of the command - even when we reduce the value of size.
- */
-static
-int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header,
- size_t *size)
-{
- int result;
- struct uwb_rccb *rccb = *header;
- int cmd = le16_to_cpu(rccb->wCommand);
- switch (cmd) {
- case UWB_RC_CMD_SCAN:
- result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size);
- break;
- case UWB_RC_CMD_SET_DRP_IE:
- result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size);
- break;
- default:
- result = -ENOANO;
- break;
- }
- return result;
-}
-
-
-/**
- * Filter data from WHCI driver to WUSB device
- *
- * @header: WHCI 0.95 compliant command from driver
- * @size: length of command
- *
- * Filter commands based on which protocol the device supports. The WUSB
- * errata should be the same as WHCI 0.95 so we do not filter that here -
- * only WUSB 1.0.
- */
-static
-int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header,
- size_t *size)
-{
- int result = -ENOANO;
- if (rc->version == 0x0100)
- result = hwarc_filter_cmd_WUSB_0100(rc, header, size);
- return result;
-}
-
-
-/**
- * Compute return value as sum of incoming value and value at given offset
- *
- * @rceb: event for which we compute the size, it contains a variable
- * length field.
- * @core_size: size of the "non variable" part of the event
- * @offset: place in event where the length of the variable part is stored
- * @buf_size: total length of buffer in which event arrived - we need to make
- * sure we read the offset in memory that is still part of the event
- */
-static
-ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
- size_t core_size, size_t offset,
- const size_t buf_size)
-{
- ssize_t size = -ENOSPC;
- const void *ptr = rceb;
- size_t type_size = sizeof(__le16);
- struct device *dev = &rc->uwb_dev.dev;
-
- if (offset + type_size >= buf_size) {
- dev_err(dev, "Not enough data to read extra size of event "
- "0x%02x/%04x/%02x, only got %zu bytes.\n",
- rceb->bEventType, le16_to_cpu(rceb->wEvent),
- rceb->bEventContext, buf_size);
- goto out;
- }
- ptr += offset;
- size = core_size + le16_to_cpu(*(__le16 *)ptr);
-out:
- return size;
-}
-
-
-/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */
-struct uwb_rc_evt_bp_slot_change_WUSB_0100 {
- struct uwb_rceb rceb;
- u8 bSlotNumber;
-} __attribute__((packed));
-
-
-/**
- * Filter data from WUSB device to WHCI driver
- *
- * @header: incoming event
- * @buf_size: size of buffer in which event arrived
- * @_event_size: actual size of event in the buffer
- * @new_size: size of event after filtered
- *
- * We don't know how the buffer is constructed - there may be more than one
- * event in it so buffer length does not determine event length. We first
- * determine the expected size of the incoming event. This value is passed
- * back only if the actual filtering succeeded (so we know the computed
- * expected size is correct). This value will be zero if
- * the event did not need any filtering.
- *
- * WHCI interprets the BP Slot Change event's data differently than
- * WUSB. The event sizes are exactly the same. The data field
- * indicates the new beacon slot in which a RC is transmitting its
- * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368
- * 17.16 (Table 117)). We thus know that the WUSB value will not set
- * the bit bNoSlot, so we don't really do anything (placeholder).
- */
-static
-int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
- const size_t buf_size, size_t *_real_size,
- size_t *_new_size)
-{
- int result = -ENOANO;
- struct uwb_rceb *rceb = *header;
- int event = le16_to_cpu(rceb->wEvent);
- ssize_t event_size;
- size_t core_size, offset;
-
- if (rceb->bEventType != UWB_RC_CET_GENERAL)
- goto out;
- switch (event) {
- case UWB_RC_EVT_BEACON:
- core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100);
- offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100,
- wBeaconInfoLength);
- event_size = hwarc_get_event_size(rc, rceb, core_size,
- offset, buf_size);
- if (event_size < 0)
- goto out;
- *_real_size = event_size;
- result = hwarc_filter_evt_beacon_WUSB_0100(rc, header,
- buf_size, _new_size);
- break;
- case UWB_RC_EVT_BP_SLOT_CHANGE:
- *_new_size = *_real_size =
- sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100);
- result = 0;
- break;
-
- case UWB_RC_EVT_DRP_AVAIL:
- core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100);
- offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100,
- wIELength);
- event_size = hwarc_get_event_size(rc, rceb, core_size,
- offset, buf_size);
- if (event_size < 0)
- goto out;
- *_real_size = event_size;
- result = hwarc_filter_evt_drp_avail_WUSB_0100(
- rc, header, buf_size, _new_size);
- break;
-
- case UWB_RC_EVT_DRP:
- core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100);
- offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength);
- event_size = hwarc_get_event_size(rc, rceb, core_size,
- offset, buf_size);
- if (event_size < 0)
- goto out;
- *_real_size = event_size;
- result = hwarc_filter_evt_drp_WUSB_0100(rc, header,
- buf_size, _new_size);
- break;
-
- default:
- break;
- }
-out:
- return result;
-}
-
-/**
- * Filter data from WUSB device to WHCI driver
- *
- * @header: incoming event
- * @buf_size: size of buffer in which event arrived
- * @_event_size: actual size of event in the buffer
- * @_new_size: size of event after filtered
- *
- * Filter events based on which protocol the device supports. The WUSB
- * errata should be the same as WHCI 0.95 so we do not filter that here -
- * only WUSB 1.0.
- *
- * If we don't handle it, we return -ENOANO (why the weird error code?
- * well, so if I get it, I can pinpoint in the code that raised
- * it...after all, not too many places use the higher error codes).
- */
-static
-int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header,
- const size_t buf_size, size_t *_real_size,
- size_t *_new_size)
-{
- int result = -ENOANO;
- if (rc->version == 0x0100)
- result = hwarc_filter_event_WUSB_0100(
- rc, header, buf_size, _real_size, _new_size);
- return result;
-}
-
-
-/**
- * Execute an UWB RC command on HWA
- *
- * @rc: Instance of a Radio Controller that is a HWA
- * @cmd: Buffer containing the RCCB and payload to execute
- * @cmd_size: Size of the command buffer.
- *
- * NOTE: rc's mutex has to be locked
- */
-static
-int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size)
-{
- struct hwarc *hwarc = uwb_rc->priv;
- return usb_control_msg(
- hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0),
- WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- (void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */);
-}
-
-static
-int hwarc_reset(struct uwb_rc *uwb_rc)
-{
- struct hwarc *hwarc = uwb_rc->priv;
- int result;
-
- /* device lock must be held when calling usb_reset_device. */
- result = usb_lock_device_for_reset(hwarc->usb_dev, NULL);
- if (result >= 0) {
- result = usb_reset_device(hwarc->usb_dev);
- usb_unlock_device(hwarc->usb_dev);
- }
-
- return result;
-}
-
-/**
- * Callback for the notification and event endpoint
- *
- * Check's that everything is fine and then passes the read data to
- * the notification/event handling mechanism (neh).
- */
-static
-void hwarc_neep_cb(struct urb *urb)
-{
- struct hwarc *hwarc = urb->context;
- struct usb_interface *usb_iface = hwarc->usb_iface;
- struct device *dev = &usb_iface->dev;
- int result;
-
- switch (result = urb->status) {
- case 0:
- uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer,
- urb->actual_length);
- break;
- case -ECONNRESET: /* Not an error, but a controlled situation; */
- case -ENOENT: /* (we killed the URB)...so, no broadcast */
- goto out;
- case -ESHUTDOWN: /* going away! */
- goto out;
- default: /* On general errors, retry unless it gets ugly */
- if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS,
- EDC_ERROR_TIMEFRAME))
- goto error_exceeded;
- dev_err(dev, "NEEP: URB error %d\n", urb->status);
- }
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result < 0 && result != -ENODEV && result != -EPERM) {
- /* ignoring unrecoverable errors */
- dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
- result);
- goto error;
- }
-out:
- return;
-
-error_exceeded:
- dev_err(dev, "NEEP: URB max acceptable errors "
- "exceeded, resetting device\n");
-error:
- uwb_rc_neh_error(hwarc->uwb_rc, result);
- uwb_rc_reset_all(hwarc->uwb_rc);
- return;
-}
-
-static void hwarc_init(struct hwarc *hwarc)
-{
- edc_init(&hwarc->neep_edc);
-}
-
-/**
- * Initialize the notification/event endpoint stuff
- *
- * Note this is effectively a parallel thread; it knows that
- * hwarc->uwb_rc always exists because the existence of a 'hwarc'
- * means that there is a reverence on the hwarc->uwb_rc (see
- * _probe()), and thus _neep_cb() can execute safely.
- */
-static int hwarc_neep_init(struct uwb_rc *rc)
-{
- struct hwarc *hwarc = rc->priv;
- struct usb_interface *iface = hwarc->usb_iface;
- struct usb_device *usb_dev = interface_to_usbdev(iface);
- struct device *dev = &iface->dev;
- int result;
- struct usb_endpoint_descriptor *epd;
-
- epd = &iface->cur_altsetting->endpoint[0].desc;
- hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL);
- if (hwarc->rd_buffer == NULL) {
- dev_err(dev, "Unable to allocate notification's read buffer\n");
- goto error_rd_buffer;
- }
- hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (hwarc->neep_urb == NULL)
- goto error_urb_alloc;
- usb_fill_int_urb(hwarc->neep_urb, usb_dev,
- usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
- hwarc->rd_buffer, PAGE_SIZE,
- hwarc_neep_cb, hwarc, epd->bInterval);
- result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "Cannot submit notification URB: %d\n", result);
- goto error_neep_submit;
- }
- return 0;
-
-error_neep_submit:
- usb_free_urb(hwarc->neep_urb);
- hwarc->neep_urb = NULL;
-error_urb_alloc:
- free_page((unsigned long)hwarc->rd_buffer);
- hwarc->rd_buffer = NULL;
-error_rd_buffer:
- return -ENOMEM;
-}
-
-
-/** Clean up all the notification endpoint resources */
-static void hwarc_neep_release(struct uwb_rc *rc)
-{
- struct hwarc *hwarc = rc->priv;
-
- usb_kill_urb(hwarc->neep_urb);
- usb_free_urb(hwarc->neep_urb);
- hwarc->neep_urb = NULL;
-
- free_page((unsigned long)hwarc->rd_buffer);
- hwarc->rd_buffer = NULL;
-}
-
-/**
- * Get the version from class-specific descriptor
- *
- * NOTE: this descriptor comes with the big bundled configuration
- * descriptor that includes the interfaces' and endpoints', so
- * we just look for it in the cached copy kept by the USB stack.
- *
- * NOTE2: We convert LE fields to CPU order.
- */
-static int hwarc_get_version(struct uwb_rc *rc)
-{
- int result;
-
- struct hwarc *hwarc = rc->priv;
- struct uwb_rc_control_intf_class_desc *descr;
- struct device *dev = &rc->uwb_dev.dev;
- struct usb_device *usb_dev = hwarc->usb_dev;
- char *itr;
- struct usb_descriptor_header *hdr;
- size_t itr_size, actconfig_idx;
- u16 version;
-
- actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
- sizeof(usb_dev->config[0]);
- itr = usb_dev->rawdescriptors[actconfig_idx];
- itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
- while (itr_size >= sizeof(*hdr)) {
- hdr = (struct usb_descriptor_header *) itr;
- dev_dbg(dev, "Extra device descriptor: "
- "type %02x/%u bytes @ %zu (%zu left)\n",
- hdr->bDescriptorType, hdr->bLength,
- (itr - usb_dev->rawdescriptors[actconfig_idx]),
- itr_size);
- if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL)
- goto found;
- itr += hdr->bLength;
- itr_size -= hdr->bLength;
- }
- dev_err(dev, "cannot find Radio Control Interface Class descriptor\n");
- return -ENODEV;
-
-found:
- result = -EINVAL;
- if (hdr->bLength > itr_size) { /* is it available? */
- dev_err(dev, "incomplete Radio Control Interface Class "
- "descriptor (%zu bytes left, %u needed)\n",
- itr_size, hdr->bLength);
- goto error;
- }
- if (hdr->bLength < sizeof(*descr)) {
- dev_err(dev, "short Radio Control Interface Class "
- "descriptor\n");
- goto error;
- }
- descr = (struct uwb_rc_control_intf_class_desc *) hdr;
- /* Make LE fields CPU order */
- version = __le16_to_cpu(descr->bcdRCIVersion);
- if (version != 0x0100) {
- dev_err(dev, "Device reports protocol version 0x%04x. We "
- "do not support that. \n", version);
- result = -EINVAL;
- goto error;
- }
- rc->version = version;
- dev_dbg(dev, "Device supports WUSB protocol version 0x%04x \n", rc->version);
- result = 0;
-error:
- return result;
-}
-
-/*
- * By creating a 'uwb_rc', we have a reference on it -- that reference
- * is the one we drop when we disconnect.
- *
- * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there
- * is only one altsetting allowed.
- */
-static int hwarc_probe(struct usb_interface *iface,
- const struct usb_device_id *id)
-{
- int result;
- struct uwb_rc *uwb_rc;
- struct hwarc *hwarc;
- struct device *dev = &iface->dev;
-
- if (iface->cur_altsetting->desc.bNumEndpoints < 1)
- return -ENODEV;
- if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
- return -ENODEV;
-
- result = -ENOMEM;
- uwb_rc = uwb_rc_alloc();
- if (uwb_rc == NULL) {
- dev_err(dev, "unable to allocate RC instance\n");
- goto error_rc_alloc;
- }
- hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL);
- if (hwarc == NULL) {
- dev_err(dev, "unable to allocate HWA RC instance\n");
- goto error_alloc;
- }
- hwarc_init(hwarc);
- hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface));
- hwarc->usb_iface = usb_get_intf(iface);
- hwarc->uwb_rc = uwb_rc;
-
- uwb_rc->owner = THIS_MODULE;
- uwb_rc->start = hwarc_neep_init;
- uwb_rc->stop = hwarc_neep_release;
- uwb_rc->cmd = hwarc_cmd;
- uwb_rc->reset = hwarc_reset;
- if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) {
- uwb_rc->filter_cmd = NULL;
- uwb_rc->filter_event = NULL;
- } else {
- uwb_rc->filter_cmd = hwarc_filter_cmd;
- uwb_rc->filter_event = hwarc_filter_event;
- }
-
- result = uwb_rc_add(uwb_rc, dev, hwarc);
- if (result < 0)
- goto error_rc_add;
- result = hwarc_get_version(uwb_rc);
- if (result < 0) {
- dev_err(dev, "cannot retrieve version of RC \n");
- goto error_get_version;
- }
- usb_set_intfdata(iface, hwarc);
- return 0;
-
-error_get_version:
- uwb_rc_rm(uwb_rc);
-error_rc_add:
- usb_put_intf(iface);
- usb_put_dev(hwarc->usb_dev);
- kfree(hwarc);
-error_alloc:
- uwb_rc_put(uwb_rc);
-error_rc_alloc:
- return result;
-}
-
-static void hwarc_disconnect(struct usb_interface *iface)
-{
- struct hwarc *hwarc = usb_get_intfdata(iface);
- struct uwb_rc *uwb_rc = hwarc->uwb_rc;
-
- usb_set_intfdata(hwarc->usb_iface, NULL);
- uwb_rc_rm(uwb_rc);
- usb_put_intf(hwarc->usb_iface);
- usb_put_dev(hwarc->usb_dev);
- kfree(hwarc);
- uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */
-}
-
-static int hwarc_pre_reset(struct usb_interface *iface)
-{
- struct hwarc *hwarc = usb_get_intfdata(iface);
- struct uwb_rc *uwb_rc = hwarc->uwb_rc;
-
- uwb_rc_pre_reset(uwb_rc);
- return 0;
-}
-
-static int hwarc_post_reset(struct usb_interface *iface)
-{
- struct hwarc *hwarc = usb_get_intfdata(iface);
- struct uwb_rc *uwb_rc = hwarc->uwb_rc;
-
- return uwb_rc_post_reset(uwb_rc);
-}
-
-/** USB device ID's that we handle */
-static const struct usb_device_id hwarc_id_table[] = {
- /* D-Link DUB-1210 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
- .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
- /* Intel i1480 (using firmware 1.3PA2-20070828) */
- { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02),
- .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
- /* Alereon 5310 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x01, 0x02),
- .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
- /* Alereon 5611 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x01, 0x02),
- .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
- /* Generic match for the Radio Control interface */
- { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), },
- { },
-};
-MODULE_DEVICE_TABLE(usb, hwarc_id_table);
-
-static struct usb_driver hwarc_driver = {
- .name = "hwa-rc",
- .id_table = hwarc_id_table,
- .probe = hwarc_probe,
- .disconnect = hwarc_disconnect,
- .pre_reset = hwarc_pre_reset,
- .post_reset = hwarc_post_reset,
-};
-
-module_usb_driver(hwarc_driver);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/uwb/i1480/Makefile b/drivers/staging/uwb/i1480/Makefile
deleted file mode 100644
index d26fb9b845ae..000000000000
--- a/drivers/staging/uwb/i1480/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o
diff --git a/drivers/staging/uwb/i1480/dfu/Makefile b/drivers/staging/uwb/i1480/dfu/Makefile
deleted file mode 100644
index 4739fdac5922..000000000000
--- a/drivers/staging/uwb/i1480/dfu/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o
-
-i1480-dfu-usb-objs := \
- dfu.o \
- mac.o \
- phy.o \
- usb.o
-
-
diff --git a/drivers/staging/uwb/i1480/dfu/dfu.c b/drivers/staging/uwb/i1480/dfu/dfu.c
deleted file mode 100644
index 9d51ce8faad1..000000000000
--- a/drivers/staging/uwb/i1480/dfu/dfu.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Wireless UWB Link 1480
- * Main driver
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Common code for firmware upload used by the USB and PCI version;
- * i1480_fw_upload() takes a device descriptor and uses the function
- * pointers it provides to upload firmware and prepare the PHY.
- *
- * As well, provides common functions used by the rest of the code.
- */
-#include "i1480-dfu.h"
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/random.h>
-#include <linux/export.h>
-#include "../../uwb.h"
-
-/*
- * i1480_rceb_check - Check RCEB for expected field values
- * @i1480: pointer to device for which RCEB is being checked
- * @rceb: RCEB being checked
- * @cmd: which command the RCEB is related to
- * @context: expected context
- * @expected_type: expected event type
- * @expected_event: expected event
- *
- * If @cmd is NULL, do not print error messages, but still return an error
- * code.
- *
- * Return 0 if @rceb matches the expected values, -EINVAL otherwise.
- */
-int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb,
- const char *cmd, u8 context, u8 expected_type,
- unsigned expected_event)
-{
- int result = 0;
- struct device *dev = i1480->dev;
- if (rceb->bEventContext != context) {
- if (cmd)
- dev_err(dev, "%s: unexpected context id 0x%02x "
- "(expected 0x%02x)\n", cmd,
- rceb->bEventContext, context);
- result = -EINVAL;
- }
- if (rceb->bEventType != expected_type) {
- if (cmd)
- dev_err(dev, "%s: unexpected event type 0x%02x "
- "(expected 0x%02x)\n", cmd,
- rceb->bEventType, expected_type);
- result = -EINVAL;
- }
- if (le16_to_cpu(rceb->wEvent) != expected_event) {
- if (cmd)
- dev_err(dev, "%s: unexpected event 0x%04x "
- "(expected 0x%04x)\n", cmd,
- le16_to_cpu(rceb->wEvent), expected_event);
- result = -EINVAL;
- }
- return result;
-}
-EXPORT_SYMBOL_GPL(i1480_rceb_check);
-
-
-/*
- * Execute a Radio Control Command
- *
- * Command data has to be in i1480->cmd_buf.
- *
- * @returns size of the reply data filled in i1480->evt_buf or < 0 errno
- * code on error.
- */
-ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size,
- size_t reply_size)
-{
- ssize_t result;
- struct uwb_rceb *reply = i1480->evt_buf;
- struct uwb_rccb *cmd = i1480->cmd_buf;
- u16 expected_event = reply->wEvent;
- u8 expected_type = reply->bEventType;
- u8 context;
-
- init_completion(&i1480->evt_complete);
- i1480->evt_result = -EINPROGRESS;
- do {
- get_random_bytes(&context, 1);
- } while (context == 0x00 || context == 0xff);
- cmd->bCommandContext = context;
- result = i1480->cmd(i1480, cmd_name, cmd_size);
- if (result < 0)
- goto error;
- /* wait for the callback to report a event was received */
- result = wait_for_completion_interruptible_timeout(
- &i1480->evt_complete, HZ);
- if (result == 0) {
- result = -ETIMEDOUT;
- goto error;
- }
- if (result < 0)
- goto error;
- result = i1480->evt_result;
- if (result < 0) {
- dev_err(i1480->dev, "%s: command reply reception failed: %zd\n",
- cmd_name, result);
- goto error;
- }
- /*
- * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a
- * spurious notification after firmware is downloaded. So check whether
- * the receibed RCEB is such notification before assuming that the
- * command has failed.
- */
- if (i1480_rceb_check(i1480, i1480->evt_buf, NULL,
- 0, 0xfd, 0x0022) == 0) {
- /* Now wait for the actual RCEB for this command. */
- result = i1480->wait_init_done(i1480);
- if (result < 0)
- goto error;
- result = i1480->evt_result;
- }
- if (result != reply_size) {
- dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n",
- cmd_name, result, reply_size);
- result = -EINVAL;
- goto error;
- }
- /* Verify we got the right event in response */
- result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context,
- expected_type, expected_event);
-error:
- return result;
-}
-EXPORT_SYMBOL_GPL(i1480_cmd);
-
-
-static
-int i1480_print_state(struct i1480 *i1480)
-{
- int result;
- u32 *buf = (u32 *) i1480->cmd_buf;
-
- result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf));
- if (result < 0) {
- dev_err(i1480->dev, "cannot read U & L states: %d\n", result);
- goto error;
- }
- dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]);
-error:
- return result;
-}
-
-
-/*
- * PCI probe, firmware uploader
- *
- * _mac_fw_upload() will call rc_setup(), which needs an rc_release().
- */
-int i1480_fw_upload(struct i1480 *i1480)
-{
- int result;
-
- result = i1480_pre_fw_upload(i1480); /* PHY pre fw */
- if (result < 0 && result != -ENOENT) {
- i1480_print_state(i1480);
- goto error;
- }
- result = i1480_mac_fw_upload(i1480); /* MAC fw */
- if (result < 0) {
- if (result == -ENOENT)
- dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n",
- i1480->mac_fw_name);
- else
- i1480_print_state(i1480);
- goto error;
- }
- result = i1480_phy_fw_upload(i1480); /* PHY fw */
- if (result < 0 && result != -ENOENT) {
- i1480_print_state(i1480);
- goto error_rc_release;
- }
- /*
- * FIXME: find some reliable way to check whether firmware is running
- * properly. Maybe use some standard request that has no side effects?
- */
- dev_info(i1480->dev, "firmware uploaded successfully\n");
-error_rc_release:
- if (i1480->rc_release)
- i1480->rc_release(i1480);
- result = 0;
-error:
- return result;
-}
-EXPORT_SYMBOL_GPL(i1480_fw_upload);
diff --git a/drivers/staging/uwb/i1480/dfu/i1480-dfu.h b/drivers/staging/uwb/i1480/dfu/i1480-dfu.h
deleted file mode 100644
index b21d058ecc23..000000000000
--- a/drivers/staging/uwb/i1480/dfu/i1480-dfu.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * i1480 Device Firmware Upload
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This driver is the firmware uploader for the Intel Wireless UWB
- * Link 1480 device (both in the USB and PCI incarnations).
- *
- * The process is quite simple: we stop the device, write the firmware
- * to its memory and then restart it. Wait for the device to let us
- * know it is done booting firmware. Ready.
- *
- * We might have to upload before or after a phy firmware (which might
- * be done in two methods, using a normal firmware image or through
- * the MPI port).
- *
- * Because USB and PCI use common methods, we just make ops out of the
- * common operations (read, write, wait_init_done and cmd) and
- * implement them in usb.c and pci.c.
- *
- * The flow is (some parts omitted):
- *
- * i1480_{usb,pci}_probe() On enumerate/discovery
- * i1480_fw_upload()
- * i1480_pre_fw_upload()
- * __mac_fw_upload()
- * fw_hdrs_load()
- * mac_fw_hdrs_push()
- * i1480->write() [i1480_{usb,pci}_write()]
- * i1480_fw_cmp()
- * i1480->read() [i1480_{usb,pci}_read()]
- * i1480_mac_fw_upload()
- * __mac_fw_upload()
- * i1480->setup(()
- * i1480->wait_init_done()
- * i1480_cmd_reset()
- * i1480->cmd() [i1480_{usb,pci}_cmd()]
- * ...
- * i1480_phy_fw_upload()
- * request_firmware()
- * i1480_mpi_write()
- * i1480->cmd() [i1480_{usb,pci}_cmd()]
- *
- * Once the probe function enumerates the device and uploads the
- * firmware, we just exit with -ENODEV, as we don't really want to
- * attach to the device.
- */
-#ifndef __i1480_DFU_H__
-#define __i1480_DFU_H__
-
-#include <linux/types.h>
-#include <linux/completion.h>
-#include "../../include/spec.h"
-
-#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018))
-
-#if i1480_FW > 0x00000302
-#define i1480_RCEB_EXTENDED
-#endif
-
-struct uwb_rccb;
-struct uwb_rceb;
-
-/*
- * Common firmware upload handlers
- *
- * Normally you embed this struct in another one specific to your hw.
- *
- * @write Write to device's memory from buffer.
- * @read Read from device's memory to i1480->evt_buf.
- * @setup Setup device after basic firmware is uploaded
- * @wait_init_done
- * Wait for the device to send a notification saying init
- * is done.
- * @cmd FOP for issuing the command to the hardware. The
- * command data is contained in i1480->cmd_buf and the size
- * is supplied as an argument. The command replied is put
- * in i1480->evt_buf and the size in i1480->evt_result (or if
- * an error, a < 0 errno code).
- *
- * @cmd_buf Memory buffer used to send commands to the device.
- * Allocated by the upper layers i1480_fw_upload().
- * Size has to be @buf_size.
- * @evt_buf Memory buffer used to place the async notifications
- * received by the hw. Allocated by the upper layers
- * i1480_fw_upload().
- * Size has to be @buf_size.
- * @cmd_complete
- * Low level driver uses this to notify code waiting afor
- * an event that the event has arrived and data is in
- * i1480->evt_buf (and size/result in i1480->evt_result).
- * @hw_rev
- * Use this value to activate dfu code to support new revisions
- * of hardware. i1480_init() sets this to a default value.
- * It should be updated by the USB and PCI code.
- */
-struct i1480 {
- struct device *dev;
-
- int (*write)(struct i1480 *, u32 addr, const void *, size_t);
- int (*read)(struct i1480 *, u32 addr, size_t);
- int (*rc_setup)(struct i1480 *);
- void (*rc_release)(struct i1480 *);
- int (*wait_init_done)(struct i1480 *);
- int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size);
- const char *pre_fw_name;
- const char *mac_fw_name;
- const char *mac_fw_name_deprecate; /* FIXME: Will go away */
- const char *phy_fw_name;
- u8 hw_rev;
-
- size_t buf_size; /* size of both evt_buf and cmd_buf */
- void *evt_buf, *cmd_buf;
- ssize_t evt_result;
- struct completion evt_complete;
-};
-
-static inline
-void i1480_init(struct i1480 *i1480)
-{
- i1480->hw_rev = 1;
- init_completion(&i1480->evt_complete);
-}
-
-extern int i1480_fw_upload(struct i1480 *);
-extern int i1480_pre_fw_upload(struct i1480 *);
-extern int i1480_mac_fw_upload(struct i1480 *);
-extern int i1480_phy_fw_upload(struct i1480 *);
-extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t);
-extern int i1480_rceb_check(const struct i1480 *,
- const struct uwb_rceb *, const char *, u8,
- u8, unsigned);
-
-enum {
- /* Vendor specific command type */
- i1480_CET_VS1 = 0xfd,
- /* i1480 commands */
- i1480_CMD_SET_IP_MAS = 0x000e,
- i1480_CMD_GET_MAC_PHY_INFO = 0x0003,
- i1480_CMD_MPI_WRITE = 0x000f,
- i1480_CMD_MPI_READ = 0x0010,
- /* i1480 events */
-#if i1480_FW > 0x00000302
- i1480_EVT_CONFIRM = 0x0002,
- i1480_EVT_RM_INIT_DONE = 0x0101,
- i1480_EVT_DEV_ADD = 0x0103,
- i1480_EVT_DEV_RM = 0x0104,
- i1480_EVT_DEV_ID_CHANGE = 0x0105,
- i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO,
-#else
- i1480_EVT_CONFIRM = 0x0002,
- i1480_EVT_RM_INIT_DONE = 0x0101,
- i1480_EVT_DEV_ADD = 0x0103,
- i1480_EVT_DEV_RM = 0x0104,
- i1480_EVT_DEV_ID_CHANGE = 0x0105,
- i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM,
-#endif
-};
-
-
-struct i1480_evt_confirm {
- struct uwb_rceb rceb;
-#ifdef i1480_RCEB_EXTENDED
- __le16 wParamLength;
-#endif
- u8 bResultCode;
-} __attribute__((packed));
-
-
-struct i1480_rceb {
- struct uwb_rceb rceb;
-#ifdef i1480_RCEB_EXTENDED
- __le16 wParamLength;
-#endif
-} __attribute__((packed));
-
-
-/**
- * Get MAC & PHY Information confirm event structure
- *
- * Confirm event returned by the command.
- */
-struct i1480_evt_confirm_GMPI {
-#if i1480_FW > 0x00000302
- struct uwb_rceb rceb;
- __le16 wParamLength;
- __le16 status;
- u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */
- u8 dev_addr[2];
- __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
- u8 hw_rev;
- u8 phy_vendor;
- u8 phy_rev; /* major v = >> 8; minor = v & 0xff */
- __le16 mac_caps;
- u8 phy_caps[3];
- u8 key_stores;
- __le16 mcast_addr_stores;
- u8 sec_mode_supported;
-#else
- struct uwb_rceb rceb;
- u8 status;
- u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */
- u8 dev_addr[2];
- __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
- __le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */
- __le16 mac_caps;
- u8 phy_caps;
- u8 key_stores;
- __le16 mcast_addr_stores;
- u8 sec_mode_supported;
-#endif
-} __attribute__((packed));
-
-
-struct i1480_cmd_mpi_write {
- struct uwb_rccb rccb;
- __le16 size;
- u8 data[];
-};
-
-
-struct i1480_cmd_mpi_read {
- struct uwb_rccb rccb;
- __le16 size;
- struct {
- u8 page, offset;
- } __attribute__((packed)) data[];
-} __attribute__((packed));
-
-
-struct i1480_evt_mpi_read {
- struct uwb_rceb rceb;
-#ifdef i1480_RCEB_EXTENDED
- __le16 wParamLength;
-#endif
- u8 bResultCode;
- __le16 size;
- struct {
- u8 page, offset, value;
- } __attribute__((packed)) data[];
-} __attribute__((packed));
-
-
-#endif /* #ifndef __i1480_DFU_H__ */
diff --git a/drivers/staging/uwb/i1480/dfu/mac.c b/drivers/staging/uwb/i1480/dfu/mac.c
deleted file mode 100644
index 6e4d6c9cecf5..000000000000
--- a/drivers/staging/uwb/i1480/dfu/mac.c
+++ /dev/null
@@ -1,496 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Wireless UWB Link 1480
- * MAC Firmware upload implementation
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Implementation of the code for parsing the firmware file (extract
- * the headers and binary code chunks) in the fw_*() functions. The
- * code to upload pre and mac firmwares is the same, so it uses a
- * common entry point in __mac_fw_upload(), which uses the i1480
- * function pointers to push the firmware to the device.
- */
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include "../../uwb.h"
-#include "i1480-dfu.h"
-
-/*
- * Descriptor for a continuous segment of MAC fw data
- */
-struct fw_hdr {
- unsigned long address;
- size_t length;
- const u32 *bin;
- struct fw_hdr *next;
-};
-
-
-/* Free a chain of firmware headers */
-static
-void fw_hdrs_free(struct fw_hdr *hdr)
-{
- struct fw_hdr *next;
-
- while (hdr) {
- next = hdr->next;
- kfree(hdr);
- hdr = next;
- }
-}
-
-
-/* Fill a firmware header descriptor from a memory buffer */
-static
-int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt,
- const char *_data, const u32 *data_itr, const u32 *data_top)
-{
- size_t hdr_offset = (const char *) data_itr - _data;
- size_t remaining_size = (void *) data_top - (void *) data_itr;
- if (data_itr + 2 > data_top) {
- dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at "
- "offset %zu, limit %zu\n",
- hdr_cnt, hdr_offset,
- (const char *) data_itr + 2 - _data,
- (const char *) data_top - _data);
- return -EINVAL;
- }
- hdr->next = NULL;
- hdr->address = le32_to_cpu(*data_itr++);
- hdr->length = le32_to_cpu(*data_itr++);
- hdr->bin = data_itr;
- if (hdr->length > remaining_size) {
- dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; "
- "chunk too long (%zu bytes), only %zu left\n",
- hdr_cnt, hdr_offset, hdr->length, remaining_size);
- return -EINVAL;
- }
- return 0;
-}
-
-
-/**
- * Get a buffer where the firmware is supposed to be and create a
- * chain of headers linking them together.
- *
- * @phdr: where to place the pointer to the first header (headers link
- * to the next via the @hdr->next ptr); need to free the whole
- * chain when done.
- *
- * @_data: Pointer to the data buffer.
- *
- * @_data_size: Size of the data buffer (bytes); data size has to be a
- * multiple of 4. Function will fail if not.
- *
- * Goes over the whole binary blob; reads the first chunk and creates
- * a fw hdr from it (which points to where the data is in @_data and
- * the length of the chunk); then goes on to the next chunk until
- * done. Each header is linked to the next.
- */
-static
-int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr,
- const char *_data, size_t data_size)
-{
- int result;
- unsigned hdr_cnt = 0;
- u32 *data = (u32 *) _data, *data_itr, *data_top;
- struct fw_hdr *hdr, **prev_hdr = phdr;
-
- result = -EINVAL;
- /* Check size is ok and pointer is aligned */
- if (data_size % sizeof(u32) != 0)
- goto error;
- if ((unsigned long) _data % sizeof(u16) != 0)
- goto error;
- *phdr = NULL;
- data_itr = data;
- data_top = (u32 *) (_data + data_size);
- while (data_itr < data_top) {
- result = -ENOMEM;
- hdr = kmalloc(sizeof(*hdr), GFP_KERNEL);
- if (hdr == NULL) {
- dev_err(i1480->dev, "Cannot allocate fw header "
- "for chunk #%u\n", hdr_cnt);
- goto error_alloc;
- }
- result = fw_hdr_load(i1480, hdr, hdr_cnt,
- _data, data_itr, data_top);
- if (result < 0)
- goto error_load;
- data_itr += 2 + hdr->length;
- *prev_hdr = hdr;
- prev_hdr = &hdr->next;
- hdr_cnt++;
- };
- *prev_hdr = NULL;
- return 0;
-
-error_load:
- kfree(hdr);
-error_alloc:
- fw_hdrs_free(*phdr);
-error:
- return result;
-}
-
-
-/**
- * Compares a chunk of fw with one in the devices's memory
- *
- * @i1480: Device instance
- * @hdr: Pointer to the firmware chunk
- * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset
- * where the difference was found (plus one).
- *
- * Kind of dirty and simplistic, but does the trick in both the PCI
- * and USB version. We do a quick[er] memcmp(), and if it fails, we do
- * a byte-by-byte to find the offset.
- */
-static
-ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr)
-{
- ssize_t result = 0;
- u32 src_itr = 0, cnt;
- size_t size = hdr->length*sizeof(hdr->bin[0]);
- size_t chunk_size;
- u8 *bin = (u8 *) hdr->bin;
-
- while (size > 0) {
- chunk_size = size < i1480->buf_size ? size : i1480->buf_size;
- result = i1480->read(i1480, hdr->address + src_itr, chunk_size);
- if (result < 0) {
- dev_err(i1480->dev, "error reading for verification: "
- "%zd\n", result);
- goto error;
- }
- if (memcmp(i1480->cmd_buf, bin + src_itr, result)) {
- u8 *buf = i1480->cmd_buf;
- for (cnt = 0; cnt < result; cnt++)
- if (bin[src_itr + cnt] != buf[cnt]) {
- dev_err(i1480->dev, "byte failed at "
- "src_itr %u cnt %u [0x%02x "
- "vs 0x%02x]\n", src_itr, cnt,
- bin[src_itr + cnt], buf[cnt]);
- result = src_itr + cnt + 1;
- goto cmp_failed;
- }
- }
- src_itr += result;
- size -= result;
- }
- result = 0;
-error:
-cmp_failed:
- return result;
-}
-
-
-/**
- * Writes firmware headers to the device.
- *
- * @prd: PRD instance
- * @hdr: Processed firmware
- * @returns: 0 if ok, < 0 errno on error.
- */
-static
-int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr,
- const char *fw_name, const char *fw_tag)
-{
- struct device *dev = i1480->dev;
- ssize_t result = 0;
- struct fw_hdr *hdr_itr;
- int verif_retry_count;
-
- /* Now, header by header, push them to the hw */
- for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) {
- verif_retry_count = 0;
-retry:
- dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n",
- hdr_itr->length * sizeof(hdr_itr->bin[0]),
- hdr_itr->address);
- result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin,
- hdr_itr->length*sizeof(hdr_itr->bin[0]));
- if (result < 0) {
- dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):"
- " %zd\n", fw_tag, fw_name,
- hdr_itr->length * sizeof(hdr_itr->bin[0]),
- hdr_itr->address, result);
- break;
- }
- result = i1480_fw_cmp(i1480, hdr_itr);
- if (result < 0) {
- dev_err(dev, "%s fw '%s': verification read "
- "failed (%zuB @ 0x%lx): %zd\n",
- fw_tag, fw_name,
- hdr_itr->length * sizeof(hdr_itr->bin[0]),
- hdr_itr->address, result);
- break;
- }
- if (result > 0) { /* Offset where it failed + 1 */
- result--;
- dev_err(dev, "%s fw '%s': WARNING: verification "
- "failed at 0x%lx: retrying\n",
- fw_tag, fw_name, hdr_itr->address + result);
- if (++verif_retry_count < 3)
- goto retry; /* write this block again! */
- dev_err(dev, "%s fw '%s': verification failed at 0x%lx: "
- "tried %d times\n", fw_tag, fw_name,
- hdr_itr->address + result, verif_retry_count);
- result = -EINVAL;
- break;
- }
- }
- return result;
-}
-
-
-/** Puts the device in firmware upload mode.*/
-static
-int mac_fw_upload_enable(struct i1480 *i1480)
-{
- int result;
- u32 reg = 0x800000c0;
- u32 *buffer = (u32 *)i1480->cmd_buf;
-
- if (i1480->hw_rev > 1)
- reg = 0x8000d0d4;
- result = i1480->read(i1480, reg, sizeof(u32));
- if (result < 0)
- goto error_cmd;
- *buffer &= ~i1480_FW_UPLOAD_MODE_MASK;
- result = i1480->write(i1480, reg, buffer, sizeof(u32));
- if (result < 0)
- goto error_cmd;
- return 0;
-error_cmd:
- dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result);
- return result;
-}
-
-
-/** Gets the device out of firmware upload mode. */
-static
-int mac_fw_upload_disable(struct i1480 *i1480)
-{
- int result;
- u32 reg = 0x800000c0;
- u32 *buffer = (u32 *)i1480->cmd_buf;
-
- if (i1480->hw_rev > 1)
- reg = 0x8000d0d4;
- result = i1480->read(i1480, reg, sizeof(u32));
- if (result < 0)
- goto error_cmd;
- *buffer |= i1480_FW_UPLOAD_MODE_MASK;
- result = i1480->write(i1480, reg, buffer, sizeof(u32));
- if (result < 0)
- goto error_cmd;
- return 0;
-error_cmd:
- dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result);
- return result;
-}
-
-
-
-/**
- * Generic function for uploading a MAC firmware.
- *
- * @i1480: Device instance
- * @fw_name: Name of firmware file to upload.
- * @fw_tag: Name of the firmware type (for messages)
- * [eg: MAC, PRE]
- * @do_wait: Wait for device to emit initialization done message (0
- * for PRE fws, 1 for MAC fws).
- * @returns: 0 if ok, < 0 errno on error.
- */
-static
-int __mac_fw_upload(struct i1480 *i1480, const char *fw_name,
- const char *fw_tag)
-{
- int result;
- const struct firmware *fw;
- struct fw_hdr *fw_hdrs;
-
- result = request_firmware(&fw, fw_name, i1480->dev);
- if (result < 0) /* Up to caller to complain on -ENOENT */
- goto out;
- result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size);
- if (result < 0) {
- dev_err(i1480->dev, "%s fw '%s': failed to parse firmware "
- "file: %d\n", fw_tag, fw_name, result);
- goto out_release;
- }
- result = mac_fw_upload_enable(i1480);
- if (result < 0)
- goto out_hdrs_release;
- result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag);
- mac_fw_upload_disable(i1480);
-out_hdrs_release:
- if (result >= 0)
- dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name);
- else
- dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), "
- "power cycle device\n", fw_tag, fw_name, result);
- fw_hdrs_free(fw_hdrs);
-out_release:
- release_firmware(fw);
-out:
- return result;
-}
-
-
-/**
- * Upload a pre-PHY firmware
- *
- */
-int i1480_pre_fw_upload(struct i1480 *i1480)
-{
- int result;
- result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE");
- if (result == 0)
- msleep(400);
- return result;
-}
-
-
-/**
- * Reset a the MAC and PHY
- *
- * @i1480: Device's instance
- * @returns: 0 if ok, < 0 errno code on error
- *
- * We put the command on kmalloc'ed memory as some arches cannot do
- * USB from the stack. The reply event is copied from an stage buffer,
- * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
- *
- * We issue the reset to make sure the UWB controller reinits the PHY;
- * this way we can now if the PHY init went ok.
- */
-static
-int i1480_cmd_reset(struct i1480 *i1480)
-{
- int result;
- struct uwb_rccb *cmd = (void *) i1480->cmd_buf;
- struct i1480_evt_reset {
- struct uwb_rceb rceb;
- u8 bResultCode;
- } __attribute__((packed)) *reply = (void *) i1480->evt_buf;
-
- result = -ENOMEM;
- cmd->bCommandType = UWB_RC_CET_GENERAL;
- cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
- reply->rceb.bEventType = UWB_RC_CET_GENERAL;
- reply->rceb.wEvent = UWB_RC_CMD_RESET;
- result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply));
- if (result < 0)
- goto out;
- if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(i1480->dev, "RESET: command execution failed: %u\n",
- reply->bResultCode);
- result = -EIO;
- }
-out:
- return result;
-
-}
-
-
-/* Wait for the MAC FW to start running */
-static
-int i1480_fw_is_running_q(struct i1480 *i1480)
-{
- int cnt = 0;
- int result;
- u32 *val = (u32 *) i1480->cmd_buf;
-
- for (cnt = 0; cnt < 10; cnt++) {
- msleep(100);
- result = i1480->read(i1480, 0x80080000, 4);
- if (result < 0) {
- dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result);
- goto out;
- }
- if (*val == 0x55555555UL) /* fw running? cool */
- goto out;
- }
- dev_err(i1480->dev, "Timed out waiting for fw to start\n");
- result = -ETIMEDOUT;
-out:
- return result;
-
-}
-
-
-/**
- * Upload MAC firmware, wait for it to start
- *
- * @i1480: Device instance
- * @fw_name: Name of the file that contains the firmware
- *
- * This has to be called after the pre fw has been uploaded (if
- * there is any).
- */
-int i1480_mac_fw_upload(struct i1480 *i1480)
-{
- int result = 0, deprecated_name = 0;
- struct i1480_rceb *rcebe = (void *) i1480->evt_buf;
-
- result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC");
- if (result == -ENOENT) {
- result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate,
- "MAC");
- deprecated_name = 1;
- }
- if (result < 0)
- return result;
- if (deprecated_name == 1)
- dev_warn(i1480->dev,
- "WARNING: firmware file name %s is deprecated, "
- "please rename to %s\n",
- i1480->mac_fw_name_deprecate, i1480->mac_fw_name);
- result = i1480_fw_is_running_q(i1480);
- if (result < 0)
- goto error_fw_not_running;
- result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0;
- if (result < 0) {
- dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n",
- result);
- goto error_setup;
- }
- result = i1480->wait_init_done(i1480); /* wait init'on */
- if (result < 0) {
- dev_err(i1480->dev, "MAC fw '%s': Initialization timed out "
- "(%d)\n", i1480->mac_fw_name, result);
- goto error_init_timeout;
- }
- /* verify we got the right initialization done event */
- if (i1480->evt_result != sizeof(*rcebe)) {
- dev_err(i1480->dev, "MAC fw '%s': initialization event returns "
- "wrong size (%zu bytes vs %zu needed)\n",
- i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe));
- goto error_size;
- }
- result = -EIO;
- if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1,
- i1480_EVT_RM_INIT_DONE) < 0) {
- dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x "
- "received; expected 0x%02x/%04x/00\n",
- rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent),
- rcebe->rceb.bEventContext, i1480_CET_VS1,
- i1480_EVT_RM_INIT_DONE);
- goto error_init_timeout;
- }
- result = i1480_cmd_reset(i1480);
- if (result < 0)
- dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n",
- i1480->mac_fw_name, result);
-error_fw_not_running:
-error_init_timeout:
-error_size:
-error_setup:
- return result;
-}
diff --git a/drivers/staging/uwb/i1480/dfu/phy.c b/drivers/staging/uwb/i1480/dfu/phy.c
deleted file mode 100644
index 13512c7dda0b..000000000000
--- a/drivers/staging/uwb/i1480/dfu/phy.c
+++ /dev/null
@@ -1,190 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Wireless UWB Link 1480
- * PHY parameters upload
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Code for uploading the PHY parameters to the PHY through the UWB
- * Radio Control interface.
- *
- * We just send the data through the MPI interface using HWA-like
- * commands and then reset the PHY to make sure it is ok.
- */
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/firmware.h>
-#include "../../../wusbcore/include/wusb.h"
-#include "i1480-dfu.h"
-
-
-/**
- * Write a value array to an address of the MPI interface
- *
- * @i1480: Device descriptor
- * @data: Data array to write
- * @size: Size of the data array
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * The data array is organized into pairs:
- *
- * ADDRESS VALUE
- *
- * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has
- * to be a multiple of three.
- */
-static
-int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size)
-{
- int result;
- struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf;
- struct i1480_evt_confirm *reply = i1480->evt_buf;
-
- BUG_ON(size > 480);
- result = -ENOMEM;
- cmd->rccb.bCommandType = i1480_CET_VS1;
- cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE);
- cmd->size = cpu_to_le16(size);
- memcpy(cmd->data, data, size);
- reply->rceb.bEventType = i1480_CET_VS1;
- reply->rceb.wEvent = i1480_CMD_MPI_WRITE;
- result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply));
- if (result < 0)
- goto out;
- if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n",
- reply->bResultCode);
- result = -EIO;
- }
-out:
- return result;
-}
-
-
-/**
- * Read a value array to from an address of the MPI interface
- *
- * @i1480: Device descriptor
- * @data: where to place the read array
- * @srcaddr: Where to read from
- * @size: Size of the data read array
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * The command data array is organized into pairs ADDR0 ADDR1..., and
- * the returned data in ADDR0 VALUE0 ADDR1 VALUE1...
- *
- * We generate the command array to be a sequential read and then
- * rearrange the result.
- *
- * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply.
- *
- * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount
- * of values we can read is (512 - sizeof(*reply)) / 3
- */
-static
-int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size)
-{
- int result;
- struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf;
- struct i1480_evt_mpi_read *reply = i1480->evt_buf;
- unsigned cnt;
-
- memset(i1480->cmd_buf, 0x69, 512);
- memset(i1480->evt_buf, 0x69, 512);
-
- BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3);
- result = -ENOMEM;
- cmd->rccb.bCommandType = i1480_CET_VS1;
- cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ);
- cmd->size = cpu_to_le16(3*size);
- for (cnt = 0; cnt < size; cnt++) {
- cmd->data[cnt].page = (srcaddr + cnt) >> 8;
- cmd->data[cnt].offset = (srcaddr + cnt) & 0xff;
- }
- reply->rceb.bEventType = i1480_CET_VS1;
- reply->rceb.wEvent = i1480_CMD_MPI_READ;
- result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size,
- sizeof(*reply) + 3*size);
- if (result < 0)
- goto out;
- if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n",
- reply->bResultCode);
- result = -EIO;
- goto out;
- }
- for (cnt = 0; cnt < size; cnt++) {
- if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
- dev_err(i1480->dev, "MPI-READ: page inconsistency at "
- "index %u: expected 0x%02x, got 0x%02x\n", cnt,
- (srcaddr + cnt) >> 8, reply->data[cnt].page);
- if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff))
- dev_err(i1480->dev, "MPI-READ: offset inconsistency at "
- "index %u: expected 0x%02x, got 0x%02x\n", cnt,
- (srcaddr + cnt) & 0x00ff,
- reply->data[cnt].offset);
- data[cnt] = reply->data[cnt].value;
- }
- result = 0;
-out:
- return result;
-}
-
-
-/**
- * Upload a PHY firmware, wait for it to start
- *
- * @i1480: Device instance
- * @fw_name: Name of the file that contains the firmware
- *
- * We assume the MAC fw is up and running. This means we can use the
- * MPI interface to write the PHY firmware. Once done, we issue an
- * MBOA Reset, which will force the MAC to reset and reinitialize the
- * PHY. If that works, we are ready to go.
- *
- * Max packet size for the MPI write is 512, so the max buffer is 480
- * (which gives us 160 byte triads of MSB, LSB and VAL for the data).
- */
-int i1480_phy_fw_upload(struct i1480 *i1480)
-{
- int result;
- const struct firmware *fw;
- const char *data_itr, *data_top;
- const size_t MAX_BLK_SIZE = 480; /* 160 triads */
- size_t data_size;
- u8 phy_stat;
-
- result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev);
- if (result < 0)
- goto out;
- /* Loop writing data in chunks as big as possible until done. */
- for (data_itr = fw->data, data_top = data_itr + fw->size;
- data_itr < data_top; data_itr += MAX_BLK_SIZE) {
- data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr));
- result = i1480_mpi_write(i1480, data_itr, data_size);
- if (result < 0)
- goto error_mpi_write;
- }
- /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */
- result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1);
- if (result < 0) {
- dev_err(i1480->dev, "PHY: can't get status: %d\n", result);
- goto error_mpi_status;
- }
- if (phy_stat != 0) {
- result = -ENODEV;
- dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat);
- goto error_phy_status;
- }
- dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name);
-error_phy_status:
-error_mpi_status:
-error_mpi_write:
- release_firmware(fw);
- if (result < 0)
- dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), "
- "power cycle device\n", i1480->phy_fw_name, result);
-out:
- return result;
-}
diff --git a/drivers/staging/uwb/i1480/dfu/usb.c b/drivers/staging/uwb/i1480/dfu/usb.c
deleted file mode 100644
index d41086bdd783..000000000000
--- a/drivers/staging/uwb/i1480/dfu/usb.c
+++ /dev/null
@@ -1,448 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Wireless UWB Link 1480
- * USB SKU firmware upload implementation
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This driver will prepare the i1480 device to behave as a real
- * Wireless USB HWA adaptor by uploading the firmware.
- *
- * When the device is connected or driver is loaded, i1480_usb_probe()
- * is called--this will allocate and initialize the device structure,
- * fill in the pointers to the common functions (read, write,
- * wait_init_done and cmd for HWA command execution) and once that is
- * done, call the common firmware uploading routine. Then clean up and
- * return -ENODEV, as we don't attach to the device.
- *
- * The rest are the basic ops we implement that the fw upload code
- * uses to do its job. All the ops in the common code are i1480->NAME,
- * the functions are i1480_usb_NAME().
- */
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include "../../uwb.h"
-#include "../../../wusbcore/include/wusb.h"
-#include "../../../wusbcore/include/wusb-wa.h"
-#include "i1480-dfu.h"
-
-struct i1480_usb {
- struct i1480 i1480;
- struct usb_device *usb_dev;
- struct usb_interface *usb_iface;
- struct urb *neep_urb; /* URB for reading from EP1 */
-};
-
-
-static
-void i1480_usb_init(struct i1480_usb *i1480_usb)
-{
- i1480_init(&i1480_usb->i1480);
-}
-
-
-static
-int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
-{
- struct usb_device *usb_dev = interface_to_usbdev(iface);
- int result = -ENOMEM;
-
- i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
- i1480_usb->usb_iface = usb_get_intf(iface);
- usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */
- i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (i1480_usb->neep_urb == NULL)
- goto error;
- return 0;
-
-error:
- usb_set_intfdata(iface, NULL);
- usb_put_intf(iface);
- usb_put_dev(usb_dev);
- return result;
-}
-
-
-static
-void i1480_usb_destroy(struct i1480_usb *i1480_usb)
-{
- usb_kill_urb(i1480_usb->neep_urb);
- usb_free_urb(i1480_usb->neep_urb);
- usb_set_intfdata(i1480_usb->usb_iface, NULL);
- usb_put_intf(i1480_usb->usb_iface);
- usb_put_dev(i1480_usb->usb_dev);
-}
-
-
-/**
- * Write a buffer to a memory address in the i1480 device
- *
- * @i1480: i1480 instance
- * @memory_address:
- * Address where to write the data buffer to.
- * @buffer: Buffer to the data
- * @size: Size of the buffer [has to be < 512].
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
- * so we copy it to the local i1480 buffer before proceeding. In any
- * case, we have a max size we can send.
- */
-static
-int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
- const void *buffer, size_t size)
-{
- int result = 0;
- struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
- size_t buffer_size, itr = 0;
-
- BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
- while (size > 0) {
- buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
- memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
- result = usb_control_msg(
- i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
- 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- memory_address, (memory_address >> 16),
- i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
- if (result < 0)
- break;
- itr += result;
- memory_address += result;
- size -= result;
- }
- return result;
-}
-
-
-/**
- * Read a block [max size 512] of the device's memory to @i1480's buffer.
- *
- * @i1480: i1480 instance
- * @memory_address:
- * Address where to read from.
- * @size: Size to read. Smaller than or equal to 512.
- * @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
- *
- * NOTE: if the memory address or block is incorrect, you might get a
- * stall or a different memory read. Caller has to verify the
- * memory address and size passed back in the @neh structure.
- */
-static
-int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
-{
- ssize_t result = 0, bytes = 0;
- size_t itr, read_size = i1480->buf_size;
- struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
-
- BUG_ON(size > i1480->buf_size);
- BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
- BUG_ON(read_size > 512);
-
- if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */
- read_size = 4;
-
- for (itr = 0; itr < size; itr += read_size) {
- size_t itr_addr = addr + itr;
- size_t itr_size = min(read_size, size - itr);
- result = usb_control_msg(
- i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
- 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- itr_addr, (itr_addr >> 16),
- i1480->cmd_buf + itr, itr_size,
- 100 /* FIXME: arbitrary */);
- if (result < 0) {
- dev_err(i1480->dev, "%s: USB read error: %zd\n",
- __func__, result);
- goto out;
- }
- if (result != itr_size) {
- result = -EIO;
- dev_err(i1480->dev,
- "%s: partial read got only %zu bytes vs %zu expected\n",
- __func__, result, itr_size);
- goto out;
- }
- bytes += result;
- }
- result = bytes;
-out:
- return result;
-}
-
-
-/**
- * Callback for reads on the notification/event endpoint
- *
- * Just enables the completion read handler.
- */
-static
-void i1480_usb_neep_cb(struct urb *urb)
-{
- struct i1480 *i1480 = urb->context;
- struct device *dev = i1480->dev;
-
- switch (urb->status) {
- case 0:
- break;
- case -ECONNRESET: /* Not an error, but a controlled situation; */
- case -ENOENT: /* (we killed the URB)...so, no broadcast */
- dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
- break;
- case -ESHUTDOWN: /* going away! */
- dev_dbg(dev, "NEEP: down %d\n", urb->status);
- break;
- default:
- dev_err(dev, "NEEP: unknown status %d\n", urb->status);
- break;
- }
- i1480->evt_result = urb->actual_length;
- complete(&i1480->evt_complete);
- return;
-}
-
-
-/**
- * Wait for the MAC FW to initialize
- *
- * MAC FW sends a 0xfd/0101/00 notification to EP1 when done
- * initializing. Get that notification into i1480->evt_buf; upper layer
- * will verify it.
- *
- * Set i1480->evt_result with the result of getting the event or its
- * size (if successful).
- *
- * Delivers the data directly to i1480->evt_buf
- */
-static
-int i1480_usb_wait_init_done(struct i1480 *i1480)
-{
- int result;
- struct device *dev = i1480->dev;
- struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
- struct usb_endpoint_descriptor *epd;
-
- init_completion(&i1480->evt_complete);
- i1480->evt_result = -EINPROGRESS;
- epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
- usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
- usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
- i1480->evt_buf, i1480->buf_size,
- i1480_usb_neep_cb, i1480, epd->bInterval);
- result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "init done: cannot submit NEEP read: %d\n",
- result);
- goto error_submit;
- }
- /* Wait for the USB callback to get the data */
- result = wait_for_completion_interruptible_timeout(
- &i1480->evt_complete, HZ);
- if (result <= 0) {
- result = result == 0 ? -ETIMEDOUT : result;
- goto error_wait;
- }
- usb_kill_urb(i1480_usb->neep_urb);
- return 0;
-
-error_wait:
- usb_kill_urb(i1480_usb->neep_urb);
-error_submit:
- i1480->evt_result = result;
- return result;
-}
-
-
-/**
- * Generic function for issuing commands to the i1480
- *
- * @i1480: i1480 instance
- * @cmd_name: Name of the command (for error messages)
- * @cmd: Pointer to command buffer
- * @cmd_size: Size of the command buffer
- * @reply: Buffer for the reply event
- * @reply_size: Expected size back (including RCEB); the reply buffer
- * is assumed to be as big as this.
- * @returns: >= 0 size of the returned event data if ok,
- * < 0 errno code on error.
- *
- * Arms the NE handle, issues the command to the device and checks the
- * basics of the reply event.
- */
-static
-int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
-{
- int result;
- struct device *dev = i1480->dev;
- struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
- struct usb_endpoint_descriptor *epd;
- struct uwb_rccb *cmd = i1480->cmd_buf;
- u8 iface_no;
-
- /* Post a read on the notification & event endpoint */
- iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
- epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
- usb_fill_int_urb(
- i1480_usb->neep_urb, i1480_usb->usb_dev,
- usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
- i1480->evt_buf, i1480->buf_size,
- i1480_usb_neep_cb, i1480, epd->bInterval);
- result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "%s: cannot submit NEEP read: %d\n",
- cmd_name, result);
- goto error_submit_ep1;
- }
- /* Now post the command on EP0 */
- result = usb_control_msg(
- i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
- WA_EXEC_RC_CMD,
- USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
- 0, iface_no,
- cmd, cmd_size,
- 100 /* FIXME: this is totally arbitrary */);
- if (result < 0) {
- dev_err(dev, "%s: control request failed: %d\n",
- cmd_name, result);
- goto error_submit_ep0;
- }
- return result;
-
-error_submit_ep0:
- usb_kill_urb(i1480_usb->neep_urb);
-error_submit_ep1:
- return result;
-}
-
-
-/*
- * Probe a i1480 device for uploading firmware.
- *
- * We attach only to interface #0, which is the radio control interface.
- */
-static
-int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(iface);
- struct i1480_usb *i1480_usb;
- struct i1480 *i1480;
- struct device *dev = &iface->dev;
- int result;
-
- result = -ENODEV;
- if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
- dev_dbg(dev, "not attaching to iface %d\n",
- iface->cur_altsetting->desc.bInterfaceNumber);
- goto error;
- }
- if (iface->num_altsetting > 1 &&
- le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
- /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
- result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
- if (result < 0)
- dev_warn(dev,
- "can't set altsetting 1 on iface 0: %d\n",
- result);
- }
-
- if (iface->cur_altsetting->desc.bNumEndpoints < 1)
- return -ENODEV;
-
- result = -ENOMEM;
- i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
- if (i1480_usb == NULL) {
- dev_err(dev, "Unable to allocate instance\n");
- goto error;
- }
- i1480_usb_init(i1480_usb);
-
- i1480 = &i1480_usb->i1480;
- i1480->buf_size = 512;
- i1480->cmd_buf = kmalloc_array(2, i1480->buf_size, GFP_KERNEL);
- if (i1480->cmd_buf == NULL) {
- dev_err(dev, "Cannot allocate transfer buffers\n");
- result = -ENOMEM;
- goto error_buf_alloc;
- }
- i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
-
- result = i1480_usb_create(i1480_usb, iface);
- if (result < 0) {
- dev_err(dev, "Cannot create instance: %d\n", result);
- goto error_create;
- }
-
- /* setup the fops and upload the firmware */
- i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
- i1480->mac_fw_name = "i1480-usb-0.0.bin";
- i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
- i1480->phy_fw_name = "i1480-phy-0.0.bin";
- i1480->dev = &iface->dev;
- i1480->write = i1480_usb_write;
- i1480->read = i1480_usb_read;
- i1480->rc_setup = NULL;
- i1480->wait_init_done = i1480_usb_wait_init_done;
- i1480->cmd = i1480_usb_cmd;
-
- result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */
- if (result >= 0) {
- usb_reset_device(i1480_usb->usb_dev);
- result = -ENODEV; /* we don't want to bind to the iface */
- }
- i1480_usb_destroy(i1480_usb);
-error_create:
- kfree(i1480->cmd_buf);
-error_buf_alloc:
- kfree(i1480_usb);
-error:
- return result;
-}
-
-MODULE_FIRMWARE("i1480-pre-phy-0.0.bin");
-MODULE_FIRMWARE("i1480-usb-0.0.bin");
-MODULE_FIRMWARE("i1480-phy-0.0.bin");
-
-#define i1480_USB_DEV(v, p) \
-{ \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
- | USB_DEVICE_ID_MATCH_DEV_INFO \
- | USB_DEVICE_ID_MATCH_INT_INFO, \
- .idVendor = (v), \
- .idProduct = (p), \
- .bDeviceClass = 0xff, \
- .bDeviceSubClass = 0xff, \
- .bDeviceProtocol = 0xff, \
- .bInterfaceClass = 0xff, \
- .bInterfaceSubClass = 0xff, \
- .bInterfaceProtocol = 0xff, \
-}
-
-
-/** USB device ID's that we handle */
-static const struct usb_device_id i1480_usb_id_table[] = {
- i1480_USB_DEV(0x8086, 0xdf3b),
- i1480_USB_DEV(0x15a9, 0x0005),
- i1480_USB_DEV(0x07d1, 0x3802),
- i1480_USB_DEV(0x050d, 0x305a),
- i1480_USB_DEV(0x3495, 0x3007),
- {},
-};
-MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
-
-
-static struct usb_driver i1480_dfu_driver = {
- .name = "i1480-dfu-usb",
- .id_table = i1480_usb_id_table,
- .probe = i1480_usb_probe,
- .disconnect = NULL,
-};
-
-module_usb_driver(i1480_dfu_driver);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/uwb/i1480/i1480-est.c b/drivers/staging/uwb/i1480/i1480-est.c
deleted file mode 100644
index 106e0a44b138..000000000000
--- a/drivers/staging/uwb/i1480/i1480-est.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Wireless UWB Link 1480
- * Event Size tables for Wired Adaptors
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include "../uwb.h"
-#include "dfu/i1480-dfu.h"
-
-
-/** Event size table for wEvents 0x00XX */
-static struct uwb_est_entry i1480_est_fd00[] = {
- /* Anybody expecting this response has to use
- * neh->extra_size to specify the real size that will
- * come back. */
- [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) },
- [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) },
-#ifdef i1480_RCEB_EXTENDED
- [0x09] = {
- .size = sizeof(struct i1480_rceb),
- .offset = 1 + offsetof(struct i1480_rceb, wParamLength),
- },
-#endif
-};
-
-/** Event size table for wEvents 0x01XX */
-static struct uwb_est_entry i1480_est_fd01[] = {
- [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) },
- [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 },
- [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 },
- [0xff & i1480_EVT_DEV_ID_CHANGE] = {
- .size = sizeof(struct i1480_rceb) + 2 },
-};
-
-static int __init i1480_est_init(void)
-{
- int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
- i1480_est_fd00,
- ARRAY_SIZE(i1480_est_fd00));
- if (result < 0) {
- printk(KERN_ERR "Can't register EST table fd00: %d\n", result);
- return result;
- }
- result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
- i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
- if (result < 0) {
- printk(KERN_ERR "Can't register EST table fd01: %d\n", result);
- return result;
- }
- return 0;
-}
-module_init(i1480_est_init);
-
-static void __exit i1480_est_exit(void)
-{
- uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
- i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00));
- uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
- i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
-}
-module_exit(i1480_est_exit);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables");
-MODULE_LICENSE("GPL");
-
-/**
- * USB device ID's that we handle
- *
- * [so we are loaded when this kind device is connected]
- */
-static struct usb_device_id __used i1480_est_id_table[] = {
- { USB_DEVICE(0x8086, 0xdf3b), },
- { USB_DEVICE(0x8086, 0x0c3b), },
- { },
-};
-MODULE_DEVICE_TABLE(usb, i1480_est_id_table);
diff --git a/drivers/staging/uwb/ie-rcv.c b/drivers/staging/uwb/ie-rcv.c
deleted file mode 100644
index 51a4706e0dd3..000000000000
--- a/drivers/staging/uwb/ie-rcv.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * IE Received notification handling.
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/bitmap.h>
-#include "uwb-internal.h"
-
-/*
- * Process an incoming IE Received notification.
- */
-int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt)
-{
- int result = -EINVAL;
- struct device *dev = &evt->rc->uwb_dev.dev;
- struct uwb_rc_evt_ie_rcv *iercv;
-
- /* Is there enough data to decode it? */
- if (evt->notif.size < sizeof(*iercv)) {
- dev_err(dev, "IE Received notification: Not enough data to "
- "decode (%zu vs %zu bytes needed)\n",
- evt->notif.size, sizeof(*iercv));
- goto error;
- }
- iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb);
-
- dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]);
-
- if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) {
- dev_warn(dev, "unhandled Relinquish Request IE\n");
- }
-
- return 0;
-error:
- return result;
-}
diff --git a/drivers/staging/uwb/ie.c b/drivers/staging/uwb/ie.c
deleted file mode 100644
index b11678dd0380..000000000000
--- a/drivers/staging/uwb/ie.c
+++ /dev/null
@@ -1,366 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Information Element Handling
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Reinette Chatre <reinette.chatre@intel.com>
- *
- * FIXME: docs
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-#include "uwb-internal.h"
-
-/**
- * uwb_ie_next - get the next IE in a buffer
- * @ptr: start of the buffer containing the IE data
- * @len: length of the buffer
- *
- * Both @ptr and @len are updated so subsequent calls to uwb_ie_next()
- * will get the next IE.
- *
- * NULL is returned (and @ptr and @len will not be updated) if there
- * are no more IEs in the buffer or the buffer is too short.
- */
-struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len)
-{
- struct uwb_ie_hdr *hdr;
- size_t ie_len;
-
- if (*len < sizeof(struct uwb_ie_hdr))
- return NULL;
-
- hdr = *ptr;
- ie_len = sizeof(struct uwb_ie_hdr) + hdr->length;
-
- if (*len < ie_len)
- return NULL;
-
- *ptr += ie_len;
- *len -= ie_len;
-
- return hdr;
-}
-EXPORT_SYMBOL_GPL(uwb_ie_next);
-
-/**
- * uwb_ie_dump_hex - print IEs to a character buffer
- * @ies: the IEs to print.
- * @len: length of all the IEs.
- * @buf: the destination buffer.
- * @size: size of @buf.
- *
- * Returns the number of characters written.
- */
-int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len,
- char *buf, size_t size)
-{
- void *ptr;
- const struct uwb_ie_hdr *ie;
- int r = 0;
- u8 *d;
-
- ptr = (void *)ies;
- for (;;) {
- ie = uwb_ie_next(&ptr, &len);
- if (!ie)
- break;
-
- r += scnprintf(buf + r, size - r, "%02x %02x",
- (unsigned)ie->element_id,
- (unsigned)ie->length);
- d = (uint8_t *)ie + sizeof(struct uwb_ie_hdr);
- while (d != ptr && r < size)
- r += scnprintf(buf + r, size - r, " %02x", (unsigned)*d++);
- if (r < size)
- buf[r++] = '\n';
- };
-
- return r;
-}
-
-/**
- * Get the IEs that a radio controller is sending in its beacon
- *
- * @uwb_rc: UWB Radio Controller
- * @returns: Size read from the system
- *
- * We don't need to lock the uwb_rc's mutex because we don't modify
- * anything. Once done with the iedata buffer, call
- * uwb_rc_ie_release(iedata). Don't call kfree on it.
- */
-static
-ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie)
-{
- ssize_t result;
- struct device *dev = &uwb_rc->uwb_dev.dev;
- struct uwb_rccb *cmd = NULL;
- struct uwb_rceb *reply = NULL;
- struct uwb_rc_evt_get_ie *get_ie;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->bCommandType = UWB_RC_CET_GENERAL;
- cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE);
- result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd),
- UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE,
- &reply);
- kfree(cmd);
- if (result < 0)
- return result;
-
- get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb);
- if (result < sizeof(*get_ie)) {
- dev_err(dev, "not enough data returned for decoding GET IE "
- "(%zu bytes received vs %zu needed)\n",
- result, sizeof(*get_ie));
- return -EINVAL;
- } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) {
- dev_err(dev, "not enough data returned for decoding GET IE "
- "payload (%zu bytes received vs %zu needed)\n", result,
- sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength));
- return -EINVAL;
- }
-
- *pget_ie = get_ie;
- return result;
-}
-
-
-/**
- * Replace all IEs currently being transmitted by a device
- *
- * @cmd: pointer to the SET-IE command with the IEs to set
- * @size: size of @buf
- */
-int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd)
-{
- int result;
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rc_evt_set_ie reply;
-
- reply.rceb.bEventType = UWB_RC_CET_GENERAL;
- reply.rceb.wEvent = UWB_RC_CMD_SET_IE;
- result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb,
- sizeof(*cmd) + le16_to_cpu(cmd->wIELength),
- &reply.rceb, sizeof(reply));
- if (result < 0)
- goto error_cmd;
- else if (result != sizeof(reply)) {
- dev_err(dev, "SET-IE: not enough data to decode reply "
- "(%d bytes received vs %zu needed)\n",
- result, sizeof(reply));
- result = -EIO;
- } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(dev, "SET-IE: command execution failed: %s (%d)\n",
- uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
- result = -EIO;
- } else
- result = 0;
-error_cmd:
- return result;
-}
-
-/* Cleanup the whole IE management subsystem */
-void uwb_rc_ie_init(struct uwb_rc *uwb_rc)
-{
- mutex_init(&uwb_rc->ies_mutex);
-}
-
-
-/**
- * uwb_rc_ie_setup - setup a radio controller's IE manager
- * @uwb_rc: the radio controller.
- *
- * The current set of IEs are obtained from the hardware with a GET-IE
- * command (since the radio controller is not yet beaconing this will
- * be just the hardware's MAC and PHY Capability IEs).
- *
- * Returns 0 on success; -ve on an error.
- */
-int uwb_rc_ie_setup(struct uwb_rc *uwb_rc)
-{
- struct uwb_rc_evt_get_ie *ie_info = NULL;
- int capacity;
-
- capacity = uwb_rc_get_ie(uwb_rc, &ie_info);
- if (capacity < 0)
- return capacity;
-
- mutex_lock(&uwb_rc->ies_mutex);
-
- uwb_rc->ies = (struct uwb_rc_cmd_set_ie *)ie_info;
- uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL;
- uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE);
- uwb_rc->ies_capacity = capacity;
-
- mutex_unlock(&uwb_rc->ies_mutex);
-
- return 0;
-}
-
-
-/* Cleanup the whole IE management subsystem */
-void uwb_rc_ie_release(struct uwb_rc *uwb_rc)
-{
- kfree(uwb_rc->ies);
- uwb_rc->ies = NULL;
- uwb_rc->ies_capacity = 0;
-}
-
-
-static int uwb_rc_ie_add_one(struct uwb_rc *rc, const struct uwb_ie_hdr *new_ie)
-{
- struct uwb_rc_cmd_set_ie *new_ies;
- void *ptr, *prev_ie;
- struct uwb_ie_hdr *ie;
- size_t length, new_ie_len, new_capacity, size, prev_size;
-
- length = le16_to_cpu(rc->ies->wIELength);
- new_ie_len = sizeof(struct uwb_ie_hdr) + new_ie->length;
- new_capacity = sizeof(struct uwb_rc_cmd_set_ie) + length + new_ie_len;
-
- if (new_capacity > rc->ies_capacity) {
- new_ies = krealloc(rc->ies, new_capacity, GFP_KERNEL);
- if (!new_ies)
- return -ENOMEM;
- rc->ies = new_ies;
- }
-
- ptr = rc->ies->IEData;
- size = length;
- for (;;) {
- prev_ie = ptr;
- prev_size = size;
- ie = uwb_ie_next(&ptr, &size);
- if (!ie || ie->element_id > new_ie->element_id)
- break;
- }
-
- memmove(prev_ie + new_ie_len, prev_ie, prev_size);
- memcpy(prev_ie, new_ie, new_ie_len);
- rc->ies->wIELength = cpu_to_le16(length + new_ie_len);
-
- return 0;
-}
-
-/**
- * uwb_rc_ie_add - add new IEs to the radio controller's beacon
- * @uwb_rc: the radio controller.
- * @ies: the buffer containing the new IE or IEs to be added to
- * the device's beacon.
- * @size: length of all the IEs.
- *
- * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
- * after the device sent the first beacon that includes the IEs specified
- * in the SET IE command. We thus cannot send this command if the device is
- * not beaconing. Instead, a SET IE command will be sent later right after
- * we start beaconing.
- *
- * Setting an IE on the device will overwrite all current IEs in device. So
- * we take the current IEs being transmitted by the device, insert the
- * new one, and call SET IE with all the IEs needed.
- *
- * Returns 0 on success; or -ENOMEM.
- */
-int uwb_rc_ie_add(struct uwb_rc *uwb_rc,
- const struct uwb_ie_hdr *ies, size_t size)
-{
- int result = 0;
- void *ptr;
- const struct uwb_ie_hdr *ie;
-
- mutex_lock(&uwb_rc->ies_mutex);
-
- ptr = (void *)ies;
- for (;;) {
- ie = uwb_ie_next(&ptr, &size);
- if (!ie)
- break;
-
- result = uwb_rc_ie_add_one(uwb_rc, ie);
- if (result < 0)
- break;
- }
- if (result >= 0) {
- if (size == 0) {
- if (uwb_rc->beaconing != -1)
- result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
- } else
- result = -EINVAL;
- }
-
- mutex_unlock(&uwb_rc->ies_mutex);
-
- return result;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_ie_add);
-
-
-/*
- * Remove an IE from internal cache
- *
- * We are dealing with our internal IE cache so no need to verify that the
- * IEs are valid (it has been done already).
- *
- * Should be called with ies_mutex held
- *
- * We do not break out once an IE is found in the cache. It is currently
- * possible to have more than one IE with the same ID included in the
- * beacon. We don't reallocate, we just mark the size smaller.
- */
-static
-void uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove)
-{
- struct uwb_ie_hdr *ie;
- size_t len = le16_to_cpu(uwb_rc->ies->wIELength);
- void *ptr;
- size_t size;
-
- ptr = uwb_rc->ies->IEData;
- size = len;
- for (;;) {
- ie = uwb_ie_next(&ptr, &size);
- if (!ie)
- break;
- if (ie->element_id == to_remove) {
- len -= sizeof(struct uwb_ie_hdr) + ie->length;
- memmove(ie, ptr, size);
- ptr = ie;
- }
- }
- uwb_rc->ies->wIELength = cpu_to_le16(len);
-}
-
-
-/**
- * uwb_rc_ie_rm - remove an IE from the radio controller's beacon
- * @uwb_rc: the radio controller.
- * @element_id: the element ID of the IE to remove.
- *
- * Only IEs previously added with uwb_rc_ie_add() may be removed.
- *
- * Returns 0 on success; or -ve the SET-IE command to the radio
- * controller failed.
- */
-int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id)
-{
- int result = 0;
-
- mutex_lock(&uwb_rc->ies_mutex);
-
- uwb_rc_ie_cache_rm(uwb_rc, element_id);
-
- if (uwb_rc->beaconing != -1)
- result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
-
- mutex_unlock(&uwb_rc->ies_mutex);
-
- return result;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
diff --git a/drivers/staging/uwb/include/debug-cmd.h b/drivers/staging/uwb/include/debug-cmd.h
deleted file mode 100644
index f97db6c3bcc0..000000000000
--- a/drivers/staging/uwb/include/debug-cmd.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Ultra Wide Band
- * Debug interface commands
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#ifndef __LINUX__UWB__DEBUG_CMD_H__
-#define __LINUX__UWB__DEBUG_CMD_H__
-
-#include <linux/types.h>
-
-/*
- * Debug interface commands
- *
- * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
- *
- * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
- */
-
-enum uwb_dbg_cmd_type {
- UWB_DBG_CMD_RSV_ESTABLISH = 1,
- UWB_DBG_CMD_RSV_TERMINATE = 2,
- UWB_DBG_CMD_IE_ADD = 3,
- UWB_DBG_CMD_IE_RM = 4,
- UWB_DBG_CMD_RADIO_START = 5,
- UWB_DBG_CMD_RADIO_STOP = 6,
-};
-
-struct uwb_dbg_cmd_rsv_establish {
- __u8 target[6];
- __u8 type;
- __u16 max_mas;
- __u16 min_mas;
- __u8 max_interval;
-};
-
-struct uwb_dbg_cmd_rsv_terminate {
- int index;
-};
-
-struct uwb_dbg_cmd_ie {
- __u8 data[128];
- int len;
-};
-
-struct uwb_dbg_cmd {
- __u32 type;
- union {
- struct uwb_dbg_cmd_rsv_establish rsv_establish;
- struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
- struct uwb_dbg_cmd_ie ie_add;
- struct uwb_dbg_cmd_ie ie_rm;
- };
-};
-
-#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */
diff --git a/drivers/staging/uwb/include/spec.h b/drivers/staging/uwb/include/spec.h
deleted file mode 100644
index 5f75caf7b4ba..000000000000
--- a/drivers/staging/uwb/include/spec.h
+++ /dev/null
@@ -1,767 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Ultra Wide Band
- * UWB Standard definitions
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * All these definitions are based on the ECMA-368 standard.
- *
- * Note all definitions are Little Endian in the wire, and we will
- * convert them to host order before operating on the bitfields (that
- * yes, we use extensively).
- */
-
-#ifndef __LINUX__UWB_SPEC_H__
-#define __LINUX__UWB_SPEC_H__
-
-#include <linux/types.h>
-#include <linux/bitmap.h>
-#include <linux/if_ether.h>
-
-#define i1480_FW 0x00000303
-/* #define i1480_FW 0x00000302 */
-
-/**
- * Number of Medium Access Slots in a superframe.
- *
- * UWB divides time in SuperFrames, each one divided in 256 pieces, or
- * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
- * basic bandwidth allocation unit in UWB.
- */
-enum { UWB_NUM_MAS = 256 };
-
-/**
- * Number of Zones in superframe.
- *
- * UWB divides the superframe into zones with numbering starting from BPST.
- * See MBOA MAC[16.8.6]
- */
-enum { UWB_NUM_ZONES = 16 };
-
-/*
- * Number of MAS in a zone.
- */
-#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
-
-/*
- * Number of MAS required before a row can be considered available.
- */
-#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1)
-
-/*
- * Number of streams per DRP reservation between a pair of devices.
- *
- * [ECMA-368] section 16.8.6.
- */
-enum { UWB_NUM_STREAMS = 8 };
-
-/*
- * mMasLength
- *
- * The length of a MAS in microseconds.
- *
- * [ECMA-368] section 17.16.
- */
-enum { UWB_MAS_LENGTH_US = 256 };
-
-/*
- * mBeaconSlotLength
- *
- * The length of the beacon slot in microseconds.
- *
- * [ECMA-368] section 17.16
- */
-enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
-
-/*
- * mMaxLostBeacons
- *
- * The number beacons missing in consecutive superframes before a
- * device can be considered as unreachable.
- *
- * [ECMA-368] section 17.16
- */
-enum { UWB_MAX_LOST_BEACONS = 3 };
-
-/*
- * mDRPBackOffWinMin
- *
- * The minimum number of superframes to wait before trying to reserve
- * extra MAS.
- *
- * [ECMA-368] section 17.16
- */
-enum { UWB_DRP_BACKOFF_WIN_MIN = 2 };
-
-/*
- * mDRPBackOffWinMax
- *
- * The maximum number of superframes to wait before trying to reserve
- * extra MAS.
- *
- * [ECMA-368] section 17.16
- */
-enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
-
-/*
- * Length of a superframe in microseconds.
- */
-#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
-
-/**
- * UWB MAC address
- *
- * It is *imperative* that this struct is exactly 6 packed bytes (as
- * it is also used to define headers sent down and up the wire/radio).
- */
-struct uwb_mac_addr {
- u8 data[ETH_ALEN];
-} __attribute__((packed));
-
-
-/**
- * UWB device address
- *
- * It is *imperative* that this struct is exactly 6 packed bytes (as
- * it is also used to define headers sent down and up the wire/radio).
- */
-struct uwb_dev_addr {
- u8 data[2];
-} __attribute__((packed));
-
-
-/**
- * Types of UWB addresses
- *
- * Order matters (by size).
- */
-enum uwb_addr_type {
- UWB_ADDR_DEV = 0,
- UWB_ADDR_MAC = 1,
-};
-
-
-/** Size of a char buffer for printing a MAC/device address */
-enum { UWB_ADDR_STRSIZE = 32 };
-
-
-/** UWB WiMedia protocol IDs. */
-enum uwb_prid {
- UWB_PRID_WLP_RESERVED = 0x0000,
- UWB_PRID_WLP = 0x0001,
- UWB_PRID_WUSB_BOT = 0x0010,
- UWB_PRID_WUSB = 0x0010,
- UWB_PRID_WUSB_TOP = 0x001F,
-};
-
-
-/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
-enum uwb_phy_rate {
- UWB_PHY_RATE_53 = 0,
- UWB_PHY_RATE_80,
- UWB_PHY_RATE_106,
- UWB_PHY_RATE_160,
- UWB_PHY_RATE_200,
- UWB_PHY_RATE_320,
- UWB_PHY_RATE_400,
- UWB_PHY_RATE_480,
- UWB_PHY_RATE_INVALID
-};
-
-
-/**
- * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
- */
-enum uwb_scan_type {
- UWB_SCAN_ONLY = 0,
- UWB_SCAN_OUTSIDE_BP,
- UWB_SCAN_WHILE_INACTIVE,
- UWB_SCAN_DISABLED,
- UWB_SCAN_ONLY_STARTTIME,
- UWB_SCAN_TOP
-};
-
-
-/** ACK Policy types (MBOA MAC[7.2.1.3]) */
-enum uwb_ack_pol {
- UWB_ACK_NO = 0,
- UWB_ACK_INM = 1,
- UWB_ACK_B = 2,
- UWB_ACK_B_REQ = 3,
-};
-
-
-/** DRP reservation types ([ECMA-368 table 106) */
-enum uwb_drp_type {
- UWB_DRP_TYPE_ALIEN_BP = 0,
- UWB_DRP_TYPE_HARD,
- UWB_DRP_TYPE_SOFT,
- UWB_DRP_TYPE_PRIVATE,
- UWB_DRP_TYPE_PCA,
-};
-
-
-/** DRP Reason Codes ([ECMA-368] table 107) */
-enum uwb_drp_reason {
- UWB_DRP_REASON_ACCEPTED = 0,
- UWB_DRP_REASON_CONFLICT,
- UWB_DRP_REASON_PENDING,
- UWB_DRP_REASON_DENIED,
- UWB_DRP_REASON_MODIFIED,
-};
-
-/** Relinquish Request Reason Codes ([ECMA-368] table 113) */
-enum uwb_relinquish_req_reason {
- UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0,
- UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION,
-};
-
-/**
- * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
- */
-enum uwb_drp_notif_reason {
- UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
- UWB_DRP_NOTIF_CONFLICT,
- UWB_DRP_NOTIF_TERMINATE,
-};
-
-
-/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
-struct uwb_drp_alloc {
- __le16 zone_bm;
- __le16 mas_bm;
-} __attribute__((packed));
-
-
-/** General MAC Header format (ECMA-368[16.2]) */
-struct uwb_mac_frame_hdr {
- __le16 Frame_Control;
- struct uwb_dev_addr DestAddr;
- struct uwb_dev_addr SrcAddr;
- __le16 Sequence_Control;
- __le16 Access_Information;
-} __attribute__((packed));
-
-
-/**
- * uwb_beacon_frame - a beacon frame including MAC headers
- *
- * [ECMA] section 16.3.
- */
-struct uwb_beacon_frame {
- struct uwb_mac_frame_hdr hdr;
- struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */
- u8 Beacon_Slot_Number;
- u8 Device_Control;
- u8 IEData[];
-} __attribute__((packed));
-
-
-/** Information Element codes (MBOA MAC[T54]) */
-enum uwb_ie {
- UWB_PCA_AVAILABILITY = 2,
- UWB_IE_DRP_AVAILABILITY = 8,
- UWB_IE_DRP = 9,
- UWB_BP_SWITCH_IE = 11,
- UWB_MAC_CAPABILITIES_IE = 12,
- UWB_PHY_CAPABILITIES_IE = 13,
- UWB_APP_SPEC_PROBE_IE = 15,
- UWB_IDENTIFICATION_IE = 19,
- UWB_MASTER_KEY_ID_IE = 20,
- UWB_RELINQUISH_REQUEST_IE = 21,
- UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
- UWB_APP_SPEC_IE = 255,
-};
-
-
-/**
- * Header common to all Information Elements (IEs)
- */
-struct uwb_ie_hdr {
- u8 element_id; /* enum uwb_ie */
- u8 length;
-} __attribute__((packed));
-
-
-/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
-struct uwb_ie_drp {
- struct uwb_ie_hdr hdr;
- __le16 drp_control;
- struct uwb_dev_addr dev_addr;
- struct uwb_drp_alloc allocs[];
-} __attribute__((packed));
-
-static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
-{
- return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
-}
-
-static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
-{
- return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
-}
-
-static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
-{
- return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
-}
-
-static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
-{
- return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
-}
-
-static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
-{
- return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
-}
-
-static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
-{
- return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
-}
-
-static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
-{
- return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
-}
-
-static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
-{
- u16 drp_control = le16_to_cpu(ie->drp_control);
- drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
- ie->drp_control = cpu_to_le16(drp_control);
-}
-
-static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
-{
- u16 drp_control = le16_to_cpu(ie->drp_control);
- drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
- ie->drp_control = cpu_to_le16(drp_control);
-}
-
-static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
- enum uwb_drp_reason reason_code)
-{
- u16 drp_control = le16_to_cpu(ie->drp_control);
- drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
- ie->drp_control = cpu_to_le16(drp_control);
-}
-
-static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
-{
- u16 drp_control = le16_to_cpu(ie->drp_control);
- drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
- ie->drp_control = cpu_to_le16(drp_control);
-}
-
-static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
-{
- u16 drp_control = le16_to_cpu(ie->drp_control);
- drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
- ie->drp_control = cpu_to_le16(drp_control);
-}
-
-static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
-{
- u16 drp_control = le16_to_cpu(ie->drp_control);
- drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
- ie->drp_control = cpu_to_le16(drp_control);
-}
-
-static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
-{
- u16 drp_control = le16_to_cpu(ie->drp_control);
- drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
- ie->drp_control = cpu_to_le16(drp_control);
-}
-
-/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
-struct uwb_ie_drp_avail {
- struct uwb_ie_hdr hdr;
- DECLARE_BITMAP(bmp, UWB_NUM_MAS);
-} __attribute__((packed));
-
-/* Relinqish Request IE ([ECMA-368] section 16.8.19). */
-struct uwb_relinquish_request_ie {
- struct uwb_ie_hdr hdr;
- __le16 relinquish_req_control;
- struct uwb_dev_addr dev_addr;
- struct uwb_drp_alloc allocs[];
-} __attribute__((packed));
-
-static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie)
-{
- return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf;
-}
-
-static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie,
- int reason_code)
-{
- u16 ctrl = le16_to_cpu(ie->relinquish_req_control);
- ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0);
- ie->relinquish_req_control = cpu_to_le16(ctrl);
-}
-
-/**
- * The Vendor ID is set to an OUI that indicates the vendor of the device.
- * ECMA-368 [16.8.10]
- */
-struct uwb_vendor_id {
- u8 data[3];
-} __attribute__((packed));
-
-/**
- * The device type ID
- * FIXME: clarify what this means
- * ECMA-368 [16.8.10]
- */
-struct uwb_device_type_id {
- u8 data[3];
-} __attribute__((packed));
-
-
-/**
- * UWB device information types
- * ECMA-368 [16.8.10]
- */
-enum uwb_dev_info_type {
- UWB_DEV_INFO_VENDOR_ID = 0,
- UWB_DEV_INFO_VENDOR_TYPE,
- UWB_DEV_INFO_NAME,
-};
-
-/**
- * UWB device information found in Identification IE
- * ECMA-368 [16.8.10]
- */
-struct uwb_dev_info {
- u8 type; /* enum uwb_dev_info_type */
- u8 length;
- u8 data[];
-} __attribute__((packed));
-
-/**
- * UWB Identification IE
- * ECMA-368 [16.8.10]
- */
-struct uwb_identification_ie {
- struct uwb_ie_hdr hdr;
- struct uwb_dev_info info[];
-} __attribute__((packed));
-
-/*
- * UWB Radio Controller
- *
- * These definitions are common to the Radio Control layers as
- * exported by the WUSB1.0 HWA and WHCI interfaces.
- */
-
-/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
-struct uwb_rccb {
- u8 bCommandType; /* enum hwa_cet */
- __le16 wCommand; /* Command code */
- u8 bCommandContext; /* Context ID */
-} __attribute__((packed));
-
-
-/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
-struct uwb_rceb {
- u8 bEventType; /* enum hwa_cet */
- __le16 wEvent; /* Event code */
- u8 bEventContext; /* Context ID */
-} __attribute__((packed));
-
-
-enum {
- UWB_RC_CET_GENERAL = 0, /* General Command/Event type */
- UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */
-};
-
-/* Commands to the radio controller */
-enum uwb_rc_cmd {
- UWB_RC_CMD_CHANNEL_CHANGE = 16,
- UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */
- UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */
- UWB_RC_CMD_RESET = 19,
- UWB_RC_CMD_SCAN = 20, /* Scan management */
- UWB_RC_CMD_SET_BEACON_FILTER = 21,
- UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */
- UWB_RC_CMD_SET_IE = 23, /* Information Element management */
- UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
- UWB_RC_CMD_SET_TX_POWER = 25,
- UWB_RC_CMD_SLEEP = 26,
- UWB_RC_CMD_START_BEACON = 27,
- UWB_RC_CMD_STOP_BEACON = 28,
- UWB_RC_CMD_BP_MERGE = 29,
- UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
- UWB_RC_CMD_SET_ASIE_NOTIF = 31,
-};
-
-/* Notifications from the radio controller */
-enum uwb_rc_evt {
- UWB_RC_EVT_IE_RCV = 0,
- UWB_RC_EVT_BEACON = 1,
- UWB_RC_EVT_BEACON_SIZE = 2,
- UWB_RC_EVT_BPOIE_CHANGE = 3,
- UWB_RC_EVT_BP_SLOT_CHANGE = 4,
- UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
- UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
- UWB_RC_EVT_DRP_AVAIL = 7,
- UWB_RC_EVT_DRP = 8,
- UWB_RC_EVT_BP_SWITCH_STATUS = 9,
- UWB_RC_EVT_CMD_FRAME_RCV = 10,
- UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
- /* Events (command responses) use the same code as the command */
- UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
-};
-
-enum uwb_rc_extended_type_1_cmd {
- UWB_RC_SET_DAA_ENERGY_MASK = 32,
- UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
-};
-
-enum uwb_rc_extended_type_1_evt {
- UWB_RC_DAA_ENERGY_DETECTED = 0,
-};
-
-/* Radio Control Result Code. [WHCI] table 3-3. */
-enum {
- UWB_RC_RES_SUCCESS = 0,
- UWB_RC_RES_FAIL,
- UWB_RC_RES_FAIL_HARDWARE,
- UWB_RC_RES_FAIL_NO_SLOTS,
- UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
- UWB_RC_RES_FAIL_INVALID_PARAMETER,
- UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
- UWB_RC_RES_FAIL_INVALID_IE_DATA,
- UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
- UWB_RC_RES_FAIL_CANCELLED,
- UWB_RC_RES_FAIL_INVALID_STATE,
- UWB_RC_RES_FAIL_INVALID_SIZE,
- UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
- UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
- UWB_RC_RES_FAIL_TIME_OUT = 255,
-};
-
-/* Confirm event. [WHCI] section 3.1.3.1 etc. */
-struct uwb_rc_evt_confirm {
- struct uwb_rceb rceb;
- u8 bResultCode;
-} __attribute__((packed));
-
-/* Device Address Management event. [WHCI] section 3.1.3.2. */
-struct uwb_rc_evt_dev_addr_mgmt {
- struct uwb_rceb rceb;
- u8 baAddr[ETH_ALEN];
- u8 bResultCode;
-} __attribute__((packed));
-
-
-/* Get IE Event. [WHCI] section 3.1.3.3. */
-struct uwb_rc_evt_get_ie {
- struct uwb_rceb rceb;
- __le16 wIELength;
- u8 IEData[];
-} __attribute__((packed));
-
-/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
-struct uwb_rc_evt_set_drp_ie {
- struct uwb_rceb rceb;
- __le16 wRemainingSpace;
- u8 bResultCode;
-} __attribute__((packed));
-
-/* Set IE Event. [WHCI] section 3.1.3.8. */
-struct uwb_rc_evt_set_ie {
- struct uwb_rceb rceb;
- __le16 RemainingSpace;
- u8 bResultCode;
-} __attribute__((packed));
-
-/* Scan command. [WHCI] 3.1.3.5. */
-struct uwb_rc_cmd_scan {
- struct uwb_rccb rccb;
- u8 bChannelNumber;
- u8 bScanState;
- __le16 wStartTime;
-} __attribute__((packed));
-
-/* Set DRP IE command. [WHCI] section 3.1.3.7. */
-struct uwb_rc_cmd_set_drp_ie {
- struct uwb_rccb rccb;
- __le16 wIELength;
- struct uwb_ie_drp IEData[];
-} __attribute__((packed));
-
-/* Set IE command. [WHCI] section 3.1.3.8. */
-struct uwb_rc_cmd_set_ie {
- struct uwb_rccb rccb;
- __le16 wIELength;
- u8 IEData[];
-} __attribute__((packed));
-
-/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
-struct uwb_rc_evt_set_daa_energy_mask {
- struct uwb_rceb rceb;
- __le16 wLength;
- u8 result;
-} __attribute__((packed));
-
-/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
-struct uwb_rc_evt_set_notification_filter_ex {
- struct uwb_rceb rceb;
- __le16 wLength;
- u8 result;
-} __attribute__((packed));
-
-/* IE Received notification. [WHCI] section 3.1.4.1. */
-struct uwb_rc_evt_ie_rcv {
- struct uwb_rceb rceb;
- struct uwb_dev_addr SrcAddr;
- __le16 wIELength;
- u8 IEData[];
-} __attribute__((packed));
-
-/* Type of the received beacon. [WHCI] section 3.1.4.2. */
-enum uwb_rc_beacon_type {
- UWB_RC_BEACON_TYPE_SCAN = 0,
- UWB_RC_BEACON_TYPE_NEIGHBOR,
- UWB_RC_BEACON_TYPE_OL_ALIEN,
- UWB_RC_BEACON_TYPE_NOL_ALIEN,
-};
-
-/* Beacon received notification. [WHCI] 3.1.4.2. */
-struct uwb_rc_evt_beacon {
- struct uwb_rceb rceb;
- u8 bChannelNumber;
- u8 bBeaconType;
- __le16 wBPSTOffset;
- u8 bLQI;
- u8 bRSSI;
- __le16 wBeaconInfoLength;
- u8 BeaconInfo[];
-} __attribute__((packed));
-
-
-/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
-struct uwb_rc_evt_beacon_size {
- struct uwb_rceb rceb;
- __le16 wNewBeaconSize;
-} __attribute__((packed));
-
-
-/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
-struct uwb_rc_evt_bpoie_change {
- struct uwb_rceb rceb;
- __le16 wBPOIELength;
- u8 BPOIE[];
-} __attribute__((packed));
-
-
-/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
-struct uwb_rc_evt_bp_slot_change {
- struct uwb_rceb rceb;
- u8 slot_info;
-} __attribute__((packed));
-
-static inline int uwb_rc_evt_bp_slot_change_slot_num(
- const struct uwb_rc_evt_bp_slot_change *evt)
-{
- return evt->slot_info & 0x7f;
-}
-
-static inline int uwb_rc_evt_bp_slot_change_no_slot(
- const struct uwb_rc_evt_bp_slot_change *evt)
-{
- return (evt->slot_info & 0x80) >> 7;
-}
-
-/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
-struct uwb_rc_evt_bp_switch_ie_rcv {
- struct uwb_rceb rceb;
- struct uwb_dev_addr wSrcAddr;
- __le16 wIELength;
- u8 IEData[];
-} __attribute__((packed));
-
-/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
-struct uwb_rc_evt_dev_addr_conflict {
- struct uwb_rceb rceb;
-} __attribute__((packed));
-
-/* DRP notification. [WHCI] section 3.1.4.9. */
-struct uwb_rc_evt_drp {
- struct uwb_rceb rceb;
- struct uwb_dev_addr src_addr;
- u8 reason;
- u8 beacon_slot_number;
- __le16 ie_length;
- u8 ie_data[];
-} __attribute__((packed));
-
-static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
-{
- return evt->reason & 0x0f;
-}
-
-
-/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
-struct uwb_rc_evt_drp_avail {
- struct uwb_rceb rceb;
- DECLARE_BITMAP(bmp, UWB_NUM_MAS);
-} __attribute__((packed));
-
-/* BP switch status notification. [WHCI] section 3.1.4.10. */
-struct uwb_rc_evt_bp_switch_status {
- struct uwb_rceb rceb;
- u8 status;
- u8 slot_offset;
- __le16 bpst_offset;
- u8 move_countdown;
-} __attribute__((packed));
-
-/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
-struct uwb_rc_evt_cmd_frame_rcv {
- struct uwb_rceb rceb;
- __le16 receive_time;
- struct uwb_dev_addr wSrcAddr;
- struct uwb_dev_addr wDstAddr;
- __le16 control;
- __le16 reserved;
- __le16 dataLength;
- u8 data[];
-} __attribute__((packed));
-
-/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
-struct uwb_rc_evt_channel_change_ie_rcv {
- struct uwb_rceb rceb;
- struct uwb_dev_addr wSrcAddr;
- __le16 wIELength;
- u8 IEData[];
-} __attribute__((packed));
-
-/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
-struct uwb_rc_evt_daa_energy_detected {
- struct uwb_rceb rceb;
- __le16 wLength;
- u8 bandID;
- u8 reserved;
- u8 toneBmp[16];
-} __attribute__((packed));
-
-
-/**
- * Radio Control Interface Class Descriptor
- *
- * WUSB 1.0 [8.6.1.2]
- */
-struct uwb_rc_control_intf_class_desc {
- u8 bLength;
- u8 bDescriptorType;
- __le16 bcdRCIVersion;
-} __attribute__((packed));
-
-#endif /* #ifndef __LINUX__UWB_SPEC_H__ */
diff --git a/drivers/staging/uwb/include/umc.h b/drivers/staging/uwb/include/umc.h
deleted file mode 100644
index ddbceb39ad15..000000000000
--- a/drivers/staging/uwb/include/umc.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UWB Multi-interface Controller support.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * UMC (UWB Multi-interface Controller) capabilities (e.g., radio
- * controller, host controller) are presented as devices on the "umc"
- * bus.
- *
- * The radio controller is not strictly a UMC capability but it's
- * useful to present it as such.
- *
- * References:
- *
- * [WHCI] Wireless Host Controller Interface Specification for
- * Certified Wireless Universal Serial Bus, revision 0.95.
- *
- * How this works is kind of convoluted but simple. The whci.ko driver
- * loads when WHCI devices are detected. These WHCI devices expose
- * many devices in the same PCI function (they couldn't have reused
- * functions, no), so for each PCI function that exposes these many
- * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
- * with umc_device_create() and adds it to the bus with
- * umc_device_register().
- *
- * umc_device_register() calls device_register() which will push the
- * bus management code to load your UMC driver's somehting_probe()
- * that you have registered for that capability code.
- *
- * Now when the WHCI device is removed, whci_remove() will go over
- * each umc_dev assigned to each of the PCI function's capabilities
- * and through whci_del_cap() call umc_device_unregister() each
- * created umc_dev. Of course, if you are bound to the device, your
- * driver's something_remove() will be called.
- */
-
-#ifndef _LINUX_UWB_UMC_H_
-#define _LINUX_UWB_UMC_H_
-
-#include <linux/device.h>
-#include <linux/pci.h>
-
-/*
- * UMC capability IDs.
- *
- * 0x00 is reserved so use it for the radio controller device.
- *
- * [WHCI] table 2-8
- */
-#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */
-#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
-
-/**
- * struct umc_dev - UMC capability device
- *
- * @version: version of the specification this capability conforms to.
- * @cap_id: capability ID.
- * @bar: PCI Bar (64 bit) where the resource lies
- * @resource: register space resource.
- * @irq: interrupt line.
- */
-struct umc_dev {
- u16 version;
- u8 cap_id;
- u8 bar;
- struct resource resource;
- unsigned irq;
- struct device dev;
-};
-
-#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
-
-/**
- * struct umc_driver - UMC capability driver
- * @cap_id: supported capability ID.
- * @match: driver specific capability matching function.
- * @match_data: driver specific data for match() (e.g., a
- * table of pci_device_id's if umc_match_pci_id() is used).
- */
-struct umc_driver {
- char *name;
- u8 cap_id;
- int (*match)(struct umc_driver *, struct umc_dev *);
- const void *match_data;
-
- int (*probe)(struct umc_dev *);
- void (*remove)(struct umc_dev *);
- int (*pre_reset)(struct umc_dev *);
- int (*post_reset)(struct umc_dev *);
-
- struct device_driver driver;
-};
-
-#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
-
-extern struct bus_type umc_bus_type;
-
-struct umc_dev *umc_device_create(struct device *parent, int n);
-int __must_check umc_device_register(struct umc_dev *umc);
-void umc_device_unregister(struct umc_dev *umc);
-
-int __must_check __umc_driver_register(struct umc_driver *umc_drv,
- struct module *mod,
- const char *mod_name);
-
-/**
- * umc_driver_register - register a UMC capabiltity driver.
- * @umc_drv: pointer to the driver.
- */
-#define umc_driver_register(umc_drv) \
- __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME)
-
-void umc_driver_unregister(struct umc_driver *umc_drv);
-
-/*
- * Utility function you can use to match (umc_driver->match) against a
- * null-terminated array of 'struct pci_device_id' in
- * umc_driver->match_data.
- */
-int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
-
-/**
- * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
- * @umc_dev: UMC device whose parent PCI device we are looking for
- *
- * DIRTY!!! DON'T RELY ON THIS
- *
- * FIXME: This is as dirty as it gets, but we need some way to check
- * the correct type of umc_dev->parent (so that for example, we can
- * cast to pci_dev). Casting to pci_dev is necessary because at some
- * point we need to request resources from the device. Mapping is
- * easily over come (ioremap and stuff are bus agnostic), but hooking
- * up to some error handlers (such as pci error handlers) might need
- * this.
- *
- * THIS might (probably will) be removed in the future, so don't count
- * on it.
- */
-static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
-{
- struct pci_dev *pci_dev = NULL;
- if (dev_is_pci(umc_dev->dev.parent))
- pci_dev = to_pci_dev(umc_dev->dev.parent);
- return pci_dev;
-}
-
-/**
- * umc_dev_get() - reference a UMC device.
- * @umc_dev: Pointer to UMC device.
- *
- * NOTE: we are assuming in this whole scheme that the parent device
- * is referenced at _probe() time and unreferenced at _remove()
- * time by the parent's subsystem.
- */
-static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
-{
- get_device(&umc_dev->dev);
- return umc_dev;
-}
-
-/**
- * umc_dev_put() - unreference a UMC device.
- * @umc_dev: Pointer to UMC device.
- */
-static inline void umc_dev_put(struct umc_dev *umc_dev)
-{
- put_device(&umc_dev->dev);
-}
-
-/**
- * umc_set_drvdata - set UMC device's driver data.
- * @umc_dev: Pointer to UMC device.
- * @data: Data to set.
- */
-static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
-{
- dev_set_drvdata(&umc_dev->dev, data);
-}
-
-/**
- * umc_get_drvdata - recover UMC device's driver data.
- * @umc_dev: Pointer to UMC device.
- */
-static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
-{
- return dev_get_drvdata(&umc_dev->dev);
-}
-
-int umc_controller_reset(struct umc_dev *umc);
-
-#endif /* #ifndef _LINUX_UWB_UMC_H_ */
diff --git a/drivers/staging/uwb/include/whci.h b/drivers/staging/uwb/include/whci.h
deleted file mode 100644
index 1a5c2cc2b008..000000000000
--- a/drivers/staging/uwb/include/whci.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * References:
- * [WHCI] Wireless Host Controller Interface Specification for
- * Certified Wireless Universal Serial Bus, revision 0.95.
- */
-#ifndef _LINUX_UWB_WHCI_H_
-#define _LINUX_UWB_WHCI_H_
-
-#include <linux/pci.h>
-
-/*
- * UWB interface capability registers (offsets from UWBBASE)
- *
- * [WHCI] section 2.2
- */
-#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */
-# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull)
-#define UWBCAPDATA(n) (8*(n))
-# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull)
-# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull)
-# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull)
-# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32))
-# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull)
-
-/* Size of the WHCI capability data (including the RC capability) for
- a device with n capabilities. */
-#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
-
-
-/*
- * URC registers (offsets from URCBASE)
- *
- * [WHCI] section 2.3
- */
-#define URCCMD 0x00
-# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */
-# define URCCMD_RS (1 << 30) /* Run/Stop */
-# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */
-# define URCCMD_ACTIVE (1 << 15) /* Command is active */
-# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */
-# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */
-#define URCSTS 0x04
-# define URCSTS_EPS (1 << 17) /* Event Processing Status */
-# define URCSTS_HALTED (1 << 16) /* RC halted */
-# define URCSTS_HSE (1 << 10) /* Host System Error...fried */
-# define URCSTS_ER (1 << 9) /* Event Ready */
-# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */
-# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */
-# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */
-#define URCINTR 0x08
-# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */
-#define URCCMDADDR 0x10
-#define URCEVTADDR 0x18
-# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */
-
-
-/** Write 32 bit @value to little endian register at @addr */
-static inline
-void le_writel(u32 value, void __iomem *addr)
-{
- iowrite32(value, addr);
-}
-
-
-/** Read from 32 bit little endian register at @addr */
-static inline
-u32 le_readl(void __iomem *addr)
-{
- return ioread32(addr);
-}
-
-
-/** Write 64 bit @value to little endian register at @addr */
-static inline
-void le_writeq(u64 value, void __iomem *addr)
-{
- iowrite32(value, addr);
- iowrite32(value >> 32, addr + 4);
-}
-
-
-/** Read from 64 bit little endian register at @addr */
-static inline
-u64 le_readq(void __iomem *addr)
-{
- u64 value;
- value = ioread32(addr);
- value |= (u64)ioread32(addr + 4) << 32;
- return value;
-}
-
-extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
- u32 mask, u32 result,
- unsigned long max_ms, const char *tag);
-
-#endif /* #ifndef _LINUX_UWB_WHCI_H_ */
diff --git a/drivers/staging/uwb/lc-dev.c b/drivers/staging/uwb/lc-dev.c
deleted file mode 100644
index 3e5c07fd6b10..000000000000
--- a/drivers/staging/uwb/lc-dev.c
+++ /dev/null
@@ -1,457 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Life cycle of devices
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/export.h>
-#include <linux/err.h>
-#include <linux/kdev_t.h>
-#include <linux/random.h>
-#include <linux/stat.h>
-#include "uwb-internal.h"
-
-/* We initialize addresses to 0xff (invalid, as it is bcast) */
-static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr)
-{
- memset(&addr->data, 0xff, sizeof(addr->data));
-}
-
-static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr)
-{
- memset(&addr->data, 0xff, sizeof(addr->data));
-}
-
-/*
- * Add callback @new to be called when an event occurs in @rc.
- */
-int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new)
-{
- if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
- return -ERESTARTSYS;
- list_add(&new->list_node, &rc->notifs_chain.list);
- mutex_unlock(&rc->notifs_chain.mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(uwb_notifs_register);
-
-/*
- * Remove event handler (callback)
- */
-int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry)
-{
- if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
- return -ERESTARTSYS;
- list_del(&entry->list_node);
- mutex_unlock(&rc->notifs_chain.mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(uwb_notifs_deregister);
-
-/*
- * Notify all event handlers of a given event on @rc
- *
- * We are called with a valid reference to the device, or NULL if the
- * event is not for a particular event (e.g., a BG join event).
- */
-void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event)
-{
- struct uwb_notifs_handler *handler;
- if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
- return;
- if (!list_empty(&rc->notifs_chain.list)) {
- list_for_each_entry(handler, &rc->notifs_chain.list, list_node) {
- handler->cb(handler->data, uwb_dev, event);
- }
- }
- mutex_unlock(&rc->notifs_chain.mutex);
-}
-
-/*
- * Release the backing device of a uwb_dev that has been dynamically allocated.
- */
-static void uwb_dev_sys_release(struct device *dev)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
-
- uwb_bce_put(uwb_dev->bce);
- memset(uwb_dev, 0x69, sizeof(*uwb_dev));
- kfree(uwb_dev);
-}
-
-/*
- * Initialize a UWB device instance
- *
- * Alloc, zero and call this function.
- */
-void uwb_dev_init(struct uwb_dev *uwb_dev)
-{
- mutex_init(&uwb_dev->mutex);
- device_initialize(&uwb_dev->dev);
- uwb_dev->dev.release = uwb_dev_sys_release;
- uwb_dev_addr_init(&uwb_dev->dev_addr);
- uwb_mac_addr_init(&uwb_dev->mac_addr);
- bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS);
-}
-
-static ssize_t uwb_dev_EUI_48_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- char addr[UWB_ADDR_STRSIZE];
-
- uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr);
- return sprintf(buf, "%s\n", addr);
-}
-static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL);
-
-static ssize_t uwb_dev_DevAddr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- char addr[UWB_ADDR_STRSIZE];
-
- uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr);
- return sprintf(buf, "%s\n", addr);
-}
-static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL);
-
-/*
- * Show the BPST of this device.
- *
- * Calculated from the receive time of the device's beacon and it's
- * slot number.
- */
-static ssize_t uwb_dev_BPST_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_beca_e *bce;
- struct uwb_beacon_frame *bf;
- u16 bpst;
-
- bce = uwb_dev->bce;
- mutex_lock(&bce->mutex);
- bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
- bpst = bce->be->wBPSTOffset
- - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US);
- mutex_unlock(&bce->mutex);
-
- return sprintf(buf, "%d\n", bpst);
-}
-static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL);
-
-/*
- * Show the IEs a device is beaconing
- *
- * We need to access the beacon cache, so we just lock it really
- * quick, print the IEs and unlock.
- *
- * We have a reference on the cache entry, so that should be
- * quite safe.
- */
-static ssize_t uwb_dev_IEs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
-
- return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE);
-}
-static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL);
-
-static ssize_t uwb_dev_LQE_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_beca_e *bce = uwb_dev->bce;
- size_t result;
-
- mutex_lock(&bce->mutex);
- result = stats_show(&uwb_dev->bce->lqe_stats, buf);
- mutex_unlock(&bce->mutex);
- return result;
-}
-
-static ssize_t uwb_dev_LQE_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_beca_e *bce = uwb_dev->bce;
- ssize_t result;
-
- mutex_lock(&bce->mutex);
- result = stats_store(&uwb_dev->bce->lqe_stats, buf, size);
- mutex_unlock(&bce->mutex);
- return result;
-}
-static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store);
-
-static ssize_t uwb_dev_RSSI_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_beca_e *bce = uwb_dev->bce;
- size_t result;
-
- mutex_lock(&bce->mutex);
- result = stats_show(&uwb_dev->bce->rssi_stats, buf);
- mutex_unlock(&bce->mutex);
- return result;
-}
-
-static ssize_t uwb_dev_RSSI_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_beca_e *bce = uwb_dev->bce;
- ssize_t result;
-
- mutex_lock(&bce->mutex);
- result = stats_store(&uwb_dev->bce->rssi_stats, buf, size);
- mutex_unlock(&bce->mutex);
- return result;
-}
-static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store);
-
-
-static struct attribute *uwb_dev_attrs[] = {
- &dev_attr_EUI_48.attr,
- &dev_attr_DevAddr.attr,
- &dev_attr_BPST.attr,
- &dev_attr_IEs.attr,
- &dev_attr_LQE.attr,
- &dev_attr_RSSI.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(uwb_dev);
-
-/* UWB bus type. */
-struct bus_type uwb_bus_type = {
- .name = "uwb",
- .dev_groups = uwb_dev_groups,
-};
-
-/**
- * Device SYSFS registration
- */
-static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
-{
- struct device *dev;
-
- dev = &uwb_dev->dev;
- dev->parent = parent_dev;
- dev_set_drvdata(dev, uwb_dev);
-
- return device_add(dev);
-}
-
-
-static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev)
-{
- dev_set_drvdata(&uwb_dev->dev, NULL);
- device_del(&uwb_dev->dev);
-}
-
-
-/**
- * Register and initialize a new UWB device
- *
- * Did you call uwb_dev_init() on it?
- *
- * @parent_rc: is the parent radio controller who has the link to the
- * device. When registering the UWB device that is a UWB
- * Radio Controller, we point back to it.
- *
- * If registering the device that is part of a radio, caller has set
- * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will
- * be allocated.
- */
-int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
- struct uwb_rc *parent_rc)
-{
- int result;
- struct device *dev;
-
- BUG_ON(uwb_dev == NULL);
- BUG_ON(parent_dev == NULL);
- BUG_ON(parent_rc == NULL);
-
- mutex_lock(&uwb_dev->mutex);
- dev = &uwb_dev->dev;
- uwb_dev->rc = parent_rc;
- result = __uwb_dev_sys_add(uwb_dev, parent_dev);
- if (result < 0)
- printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n",
- dev_name(dev), result);
- mutex_unlock(&uwb_dev->mutex);
- return result;
-}
-
-
-void uwb_dev_rm(struct uwb_dev *uwb_dev)
-{
- mutex_lock(&uwb_dev->mutex);
- __uwb_dev_sys_rm(uwb_dev);
- mutex_unlock(&uwb_dev->mutex);
-}
-
-
-static
-int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev)
-{
- struct uwb_dev *target_uwb_dev = __target_uwb_dev;
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- if (uwb_dev == target_uwb_dev) {
- uwb_dev_get(uwb_dev);
- return 1;
- } else
- return 0;
-}
-
-
-/**
- * Given a UWB device descriptor, validate and refcount it
- *
- * @returns NULL if the device does not exist or is quiescing; the ptr to
- * it otherwise.
- */
-struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
-{
- if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev))
- return uwb_dev;
- else
- return NULL;
-}
-EXPORT_SYMBOL_GPL(uwb_dev_try_get);
-
-
-/**
- * Remove a device from the system [grunt for other functions]
- */
-int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc)
-{
- struct device *dev = &uwb_dev->dev;
- char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
-
- uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr);
- uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr);
- dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n",
- macbuf, devbuf,
- uwb_dev->dev.bus->name,
- rc ? dev_name(&(rc->uwb_dev.dev)) : "");
- uwb_dev_rm(uwb_dev);
- list_del(&uwb_dev->bce->node);
- uwb_bce_put(uwb_dev->bce);
- uwb_dev_put(uwb_dev); /* for the creation in _onair() */
-
- return 0;
-}
-
-
-/**
- * A device went off the air, clean up after it!
- *
- * This is called by the UWB Daemon (through the beacon purge function
- * uwb_bcn_cache_purge) when it is detected that a device has been in
- * radio silence for a while.
- *
- * If this device is actually a local radio controller we don't need
- * to go through the offair process, as it is not registered as that.
- *
- * NOTE: uwb_bcn_cache.mutex is held!
- */
-void uwbd_dev_offair(struct uwb_beca_e *bce)
-{
- struct uwb_dev *uwb_dev;
-
- uwb_dev = bce->uwb_dev;
- if (uwb_dev) {
- uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR);
- __uwb_dev_offair(uwb_dev, uwb_dev->rc);
- }
-}
-
-
-/**
- * A device went on the air, start it up!
- *
- * This is called by the UWB Daemon when it is detected that a device
- * has popped up in the radio range of the radio controller.
- *
- * It will just create the freaking device, register the beacon and
- * stuff and yatla, done.
- *
- *
- * NOTE: uwb_beca.mutex is held, bce->mutex is held
- */
-void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
-{
- int result;
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_dev *uwb_dev;
- char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
-
- uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr);
- uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr);
- uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL);
- if (uwb_dev == NULL) {
- dev_err(dev, "new device %s: Cannot allocate memory\n",
- macbuf);
- return;
- }
- uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */
- uwb_dev->dev.bus = &uwb_bus_type;
- uwb_dev->mac_addr = *bce->mac_addr;
- uwb_dev->dev_addr = bce->dev_addr;
- dev_set_name(&uwb_dev->dev, "%s", macbuf);
-
- /* plug the beacon cache */
- bce->uwb_dev = uwb_dev;
- uwb_dev->bce = bce;
- uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
-
- result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
- if (result < 0) {
- dev_err(dev, "new device %s: cannot instantiate device\n",
- macbuf);
- goto error_dev_add;
- }
-
- dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
- macbuf, devbuf, uwb_dev->dev.bus->name,
- dev_name(&(rc->uwb_dev.dev)));
- uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR);
- return;
-
-error_dev_add:
- bce->uwb_dev = NULL;
- uwb_bce_put(bce);
- kfree(uwb_dev);
- return;
-}
-
-/**
- * Iterate over the list of UWB devices, calling a @function on each
- *
- * See docs for bus_for_each()....
- *
- * @rc: radio controller for the devices.
- * @function: function to call.
- * @priv: data to pass to @function.
- * @returns: 0 if no invocation of function() returned a value
- * different to zero. That value otherwise.
- */
-int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv)
-{
- return device_for_each_child(&rc->uwb_dev.dev, priv, function);
-}
-EXPORT_SYMBOL_GPL(uwb_dev_for_each);
diff --git a/drivers/staging/uwb/lc-rc.c b/drivers/staging/uwb/lc-rc.c
deleted file mode 100644
index ee31b221cdc2..000000000000
--- a/drivers/staging/uwb/lc-rc.c
+++ /dev/null
@@ -1,569 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Life cycle of radio controllers
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- *
- * A UWB radio controller is also a UWB device, so it embeds one...
- *
- * List of RCs comes from the 'struct class uwb_rc_class'.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/random.h>
-#include <linux/kdev_t.h>
-#include <linux/etherdevice.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "uwb-internal.h"
-
-static int uwb_rc_index_match(struct device *dev, const void *data)
-{
- const int *index = data;
- struct uwb_rc *rc = dev_get_drvdata(dev);
-
- if (rc->index == *index)
- return 1;
- return 0;
-}
-
-static struct uwb_rc *uwb_rc_find_by_index(int index)
-{
- struct device *dev;
- struct uwb_rc *rc = NULL;
-
- dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
- if (dev) {
- rc = dev_get_drvdata(dev);
- put_device(dev);
- }
-
- return rc;
-}
-
-static int uwb_rc_new_index(void)
-{
- int index = 0;
-
- for (;;) {
- if (!uwb_rc_find_by_index(index))
- return index;
- if (++index < 0)
- index = 0;
- }
-}
-
-/**
- * Release the backing device of a uwb_rc that has been dynamically allocated.
- */
-static void uwb_rc_sys_release(struct device *dev)
-{
- struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
- struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
-
- uwb_rc_ie_release(rc);
- kfree(rc);
-}
-
-
-void uwb_rc_init(struct uwb_rc *rc)
-{
- struct uwb_dev *uwb_dev = &rc->uwb_dev;
-
- uwb_dev_init(uwb_dev);
- rc->uwb_dev.dev.class = &uwb_rc_class;
- rc->uwb_dev.dev.release = uwb_rc_sys_release;
- uwb_rc_neh_create(rc);
- rc->beaconing = -1;
- rc->scan_type = UWB_SCAN_DISABLED;
- INIT_LIST_HEAD(&rc->notifs_chain.list);
- mutex_init(&rc->notifs_chain.mutex);
- INIT_LIST_HEAD(&rc->uwb_beca.list);
- mutex_init(&rc->uwb_beca.mutex);
- uwb_drp_avail_init(rc);
- uwb_rc_ie_init(rc);
- uwb_rsv_init(rc);
- uwb_rc_pal_init(rc);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_init);
-
-
-struct uwb_rc *uwb_rc_alloc(void)
-{
- struct uwb_rc *rc;
- rc = kzalloc(sizeof(*rc), GFP_KERNEL);
- if (rc == NULL)
- return NULL;
- uwb_rc_init(rc);
- return rc;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_alloc);
-
-/*
- * Show the ASIE that is broadcast in the UWB beacon by this uwb_rc device.
- */
-static ssize_t ASIE_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- struct uwb_ie_hdr *ie;
- void *ptr;
- size_t len;
- int result = 0;
-
- /* init empty buffer. */
- result = scnprintf(buf, PAGE_SIZE, "\n");
- mutex_lock(&rc->ies_mutex);
- /* walk IEData looking for an ASIE. */
- ptr = rc->ies->IEData;
- len = le16_to_cpu(rc->ies->wIELength);
- for (;;) {
- ie = uwb_ie_next(&ptr, &len);
- if (!ie)
- break;
- if (ie->element_id == UWB_APP_SPEC_IE) {
- result = uwb_ie_dump_hex(ie,
- ie->length + sizeof(struct uwb_ie_hdr),
- buf, PAGE_SIZE);
- break;
- }
- }
- mutex_unlock(&rc->ies_mutex);
-
- return result;
-}
-
-/*
- * Update the ASIE that is broadcast in the UWB beacon by this uwb_rc device.
- */
-static ssize_t ASIE_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- char ie_buf[255];
- int result, ie_len = 0;
- const char *cur_ptr = buf;
- struct uwb_ie_hdr *ie;
-
- /* empty string means clear the ASIE. */
- if (strlen(buf) <= 1) {
- uwb_rc_ie_rm(rc, UWB_APP_SPEC_IE);
- return size;
- }
-
- /* if non-empty string, convert string of hex chars to binary. */
- while (ie_len < sizeof(ie_buf)) {
- int char_count;
-
- if (sscanf(cur_ptr, " %02hhX %n",
- &(ie_buf[ie_len]), &char_count) > 0) {
- ++ie_len;
- /* skip chars read from cur_ptr. */
- cur_ptr += char_count;
- } else {
- break;
- }
- }
-
- /* validate IE length and type. */
- if (ie_len < sizeof(struct uwb_ie_hdr)) {
- dev_err(dev, "%s: Invalid ASIE size %d.\n", __func__, ie_len);
- return -EINVAL;
- }
-
- ie = (struct uwb_ie_hdr *)ie_buf;
- if (ie->element_id != UWB_APP_SPEC_IE) {
- dev_err(dev, "%s: Invalid IE element type size = 0x%02X.\n",
- __func__, ie->element_id);
- return -EINVAL;
- }
-
- /* bounds check length field from user. */
- if (ie->length > (ie_len - sizeof(struct uwb_ie_hdr)))
- ie->length = ie_len - sizeof(struct uwb_ie_hdr);
-
- /*
- * Valid ASIE received. Remove current ASIE then add the new one using
- * uwb_rc_ie_add.
- */
- uwb_rc_ie_rm(rc, UWB_APP_SPEC_IE);
-
- result = uwb_rc_ie_add(rc, ie, ie->length + sizeof(struct uwb_ie_hdr));
-
- return result >= 0 ? size : result;
-}
-static DEVICE_ATTR_RW(ASIE);
-
-static struct attribute *rc_attrs[] = {
- &dev_attr_mac_address.attr,
- &dev_attr_scan.attr,
- &dev_attr_beacon.attr,
- &dev_attr_ASIE.attr,
- NULL,
-};
-
-static const struct attribute_group rc_attr_group = {
- .attrs = rc_attrs,
-};
-
-/*
- * Registration of sysfs specific stuff
- */
-static int uwb_rc_sys_add(struct uwb_rc *rc)
-{
- return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
-}
-
-
-static void __uwb_rc_sys_rm(struct uwb_rc *rc)
-{
- sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
-}
-
-/**
- * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
- * @rc: the radio controller.
- *
- * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
- * then a random locally administered EUI-48 is generated and set on
- * the device. The probability of address collisions is sufficiently
- * unlikely (1/2^40 = 9.1e-13) that they're not checked for.
- */
-static
-int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
-{
- int result;
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_dev *uwb_dev = &rc->uwb_dev;
- char devname[UWB_ADDR_STRSIZE];
- struct uwb_mac_addr addr;
-
- result = uwb_rc_mac_addr_get(rc, &addr);
- if (result < 0) {
- dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
- return result;
- }
-
- if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
- addr.data[0] = 0x02; /* locally administered and unicast */
- get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
-
- result = uwb_rc_mac_addr_set(rc, &addr);
- if (result < 0) {
- uwb_mac_addr_print(devname, sizeof(devname), &addr);
- dev_err(dev, "cannot set EUI-48 address %s: %d\n",
- devname, result);
- return result;
- }
- }
- uwb_dev->mac_addr = addr;
- return 0;
-}
-
-
-
-static int uwb_rc_setup(struct uwb_rc *rc)
-{
- int result;
- struct device *dev = &rc->uwb_dev.dev;
-
- result = uwb_radio_setup(rc);
- if (result < 0) {
- dev_err(dev, "cannot setup UWB radio: %d\n", result);
- goto error;
- }
- result = uwb_rc_mac_addr_setup(rc);
- if (result < 0) {
- dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
- goto error;
- }
- result = uwb_rc_dev_addr_assign(rc);
- if (result < 0) {
- dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
- goto error;
- }
- result = uwb_rc_ie_setup(rc);
- if (result < 0) {
- dev_err(dev, "cannot setup IE subsystem: %d\n", result);
- goto error_ie_setup;
- }
- result = uwb_rsv_setup(rc);
- if (result < 0) {
- dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
- goto error_rsv_setup;
- }
- uwb_dbg_add_rc(rc);
- return 0;
-
-error_rsv_setup:
- uwb_rc_ie_release(rc);
-error_ie_setup:
-error:
- return result;
-}
-
-
-/**
- * Register a new UWB radio controller
- *
- * Did you call uwb_rc_init() on your rc?
- *
- * We assume that this is being called with a > 0 refcount on
- * it [through ops->{get|put}_device(). We'll take our own, though.
- *
- * @parent_dev is our real device, the one that provides the actual UWB device
- */
-int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
-{
- int result;
- struct device *dev;
- char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
-
- rc->index = uwb_rc_new_index();
-
- dev = &rc->uwb_dev.dev;
- dev_set_name(dev, "uwb%d", rc->index);
-
- rc->priv = priv;
-
- init_waitqueue_head(&rc->uwbd.wq);
- INIT_LIST_HEAD(&rc->uwbd.event_list);
- spin_lock_init(&rc->uwbd.event_list_lock);
-
- uwbd_start(rc);
-
- result = rc->start(rc);
- if (result < 0)
- goto error_rc_start;
-
- result = uwb_rc_setup(rc);
- if (result < 0) {
- dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
- goto error_rc_setup;
- }
-
- result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
- if (result < 0 && result != -EADDRNOTAVAIL)
- goto error_dev_add;
-
- result = uwb_rc_sys_add(rc);
- if (result < 0) {
- dev_err(parent_dev, "cannot register UWB radio controller "
- "dev attributes: %d\n", result);
- goto error_sys_add;
- }
-
- uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
- uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
- dev_info(dev,
- "new uwb radio controller (mac %s dev %s) on %s %s\n",
- macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
- rc->ready = 1;
- return 0;
-
-error_sys_add:
- uwb_dev_rm(&rc->uwb_dev);
-error_dev_add:
-error_rc_setup:
- rc->stop(rc);
-error_rc_start:
- uwbd_stop(rc);
- return result;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_add);
-
-
-static int uwb_dev_offair_helper(struct device *dev, void *priv)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
-
- return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
-}
-
-/*
- * Remove a Radio Controller; stop beaconing/scanning, disconnect all children
- */
-void uwb_rc_rm(struct uwb_rc *rc)
-{
- rc->ready = 0;
-
- uwb_dbg_del_rc(rc);
- uwb_rsv_remove_all(rc);
- uwb_radio_shutdown(rc);
-
- rc->stop(rc);
-
- uwbd_stop(rc);
- uwb_rc_neh_destroy(rc);
-
- uwb_dev_lock(&rc->uwb_dev);
- rc->priv = NULL;
- rc->cmd = NULL;
- uwb_dev_unlock(&rc->uwb_dev);
- mutex_lock(&rc->uwb_beca.mutex);
- uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
- __uwb_rc_sys_rm(rc);
- mutex_unlock(&rc->uwb_beca.mutex);
- uwb_rsv_cleanup(rc);
- uwb_beca_release(rc);
- uwb_dev_rm(&rc->uwb_dev);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_rm);
-
-static int find_rc_try_get(struct device *dev, const void *data)
-{
- const struct uwb_rc *target_rc = data;
- struct uwb_rc *rc = dev_get_drvdata(dev);
-
- if (rc == NULL) {
- WARN_ON(1);
- return 0;
- }
- if (rc == target_rc) {
- if (rc->ready == 0)
- return 0;
- else
- return 1;
- }
- return 0;
-}
-
-/**
- * Given a radio controller descriptor, validate and refcount it
- *
- * @returns NULL if the rc does not exist or is quiescing; the ptr to
- * it otherwise.
- */
-struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
-{
- struct device *dev;
- struct uwb_rc *rc = NULL;
-
- dev = class_find_device(&uwb_rc_class, NULL, target_rc,
- find_rc_try_get);
- if (dev) {
- rc = dev_get_drvdata(dev);
- __uwb_rc_get(rc);
- put_device(dev);
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
-
-/*
- * RC get for external refcount acquirers...
- *
- * Increments the refcount of the device and it's backend modules
- */
-static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
-{
- if (rc->ready == 0)
- return NULL;
- uwb_dev_get(&rc->uwb_dev);
- return rc;
-}
-
-static int find_rc_grandpa(struct device *dev, const void *data)
-{
- const struct device *grandpa_dev = data;
- struct uwb_rc *rc = dev_get_drvdata(dev);
-
- if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
- rc = uwb_rc_get(rc);
- return 1;
- }
- return 0;
-}
-
-/**
- * Locate and refcount a radio controller given a common grand-parent
- *
- * @grandpa_dev Pointer to the 'grandparent' device structure.
- * @returns NULL If the rc does not exist or is quiescing; the ptr to
- * it otherwise, properly referenced.
- *
- * The Radio Control interface (or the UWB Radio Controller) is always
- * an interface of a device. The parent is the interface, the
- * grandparent is the device that encapsulates the interface.
- *
- * There is no need to lock around as the "grandpa" would be
- * refcounted by the target, and to remove the referemes, the
- * uwb_rc_class->sem would have to be taken--we hold it, ergo we
- * should be safe.
- */
-struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
-{
- struct device *dev;
- struct uwb_rc *rc = NULL;
-
- dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
- find_rc_grandpa);
- if (dev) {
- rc = dev_get_drvdata(dev);
- put_device(dev);
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
-
-/**
- * Find a radio controller by device address
- *
- * @returns the pointer to the radio controller, properly referenced
- */
-static int find_rc_dev(struct device *dev, const void *data)
-{
- const struct uwb_dev_addr *addr = data;
- struct uwb_rc *rc = dev_get_drvdata(dev);
-
- if (rc == NULL) {
- WARN_ON(1);
- return 0;
- }
- if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
- rc = uwb_rc_get(rc);
- return 1;
- }
- return 0;
-}
-
-struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
-{
- struct device *dev;
- struct uwb_rc *rc = NULL;
-
- dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
- if (dev) {
- rc = dev_get_drvdata(dev);
- put_device(dev);
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
-
-/**
- * Drop a reference on a radio controller
- *
- * This is the version that should be done by entities external to the
- * UWB Radio Control stack (ie: clients of the API).
- */
-void uwb_rc_put(struct uwb_rc *rc)
-{
- __uwb_rc_put(rc);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_put);
diff --git a/drivers/staging/uwb/neh.c b/drivers/staging/uwb/neh.c
deleted file mode 100644
index 1695584be412..000000000000
--- a/drivers/staging/uwb/neh.c
+++ /dev/null
@@ -1,606 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * WUSB Wire Adapter: Radio Control Interface (WUSB[8])
- * Notification and Event Handling
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI
- * card delivers a stream of notifications and events to the
- * notification end event endpoint or area. This code takes care of
- * getting a buffer with that data, breaking it up in separate
- * notifications and events and then deliver those.
- *
- * Events are answers to commands and they carry a context ID that
- * associates them to the command. Notifications are that,
- * notifications, they come out of the blue and have a context ID of
- * zero. Think of the context ID kind of like a handler. The
- * uwb_rc_neh_* code deals with managing context IDs.
- *
- * This is why you require a handle to operate on a UWB host. When you
- * open a handle a context ID is assigned to you.
- *
- * So, as it is done is:
- *
- * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id)
- * 2. Issue command [rc->cmd(rc, ...)]
- * 3. Arm the timeout timer [uwb_rc_neh_arm()]
- * 4, Release the reference to the neh [uwb_rc_neh_put()]
- * 5. Wait for the callback
- * 6. Command result (RCEB) is passed to the callback
- *
- * If (2) fails, you should remove the handle [uwb_rc_neh_rm()]
- * instead of arming the timer.
- *
- * Handles are for using in *serialized* code, single thread.
- *
- * When the notification/event comes, the IRQ handler/endpoint
- * callback passes the data read to uwb_rc_neh_grok() which will break
- * it up in a discrete series of events, look up who is listening for
- * them and execute the pertinent callbacks.
- *
- * If the reader detects an error while reading the data stream, call
- * uwb_rc_neh_error().
- *
- * CONSTRAINTS/ASSUMPTIONS:
- *
- * - Most notifications/events are small (less thank .5k), copying
- * around is ok.
- *
- * - Notifications/events are ALWAYS smaller than PAGE_SIZE
- *
- * - Notifications/events always come in a single piece (ie: a buffer
- * will always contain entire notifications/events).
- *
- * - we cannot know in advance how long each event is (because they
- * lack a length field in their header--smart move by the standards
- * body, btw). So we need a facility to get the event size given the
- * header. This is what the EST code does (notif/Event Size
- * Tables), check nest.c--as well, you can associate the size to
- * the handle [w/ neh->extra_size()].
- *
- * - Most notifications/events are fixed size; only a few are variable
- * size (NEST takes care of that).
- *
- * - Listeners of events expect them, so they usually provide a
- * buffer, as they know the size. Listeners to notifications don't,
- * so we allocate their buffers dynamically.
- */
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/export.h>
-
-#include "uwb-internal.h"
-
-/*
- * UWB Radio Controller Notification/Event Handle
- *
- * Represents an entity waiting for an event coming from the UWB Radio
- * Controller with a given context id (context) and type (evt_type and
- * evt). On reception of the notification/event, the callback (cb) is
- * called with the event.
- *
- * If the timer expires before the event is received, the callback is
- * called with -ETIMEDOUT as the event size.
- */
-struct uwb_rc_neh {
- struct kref kref;
-
- struct uwb_rc *rc;
- u8 evt_type;
- __le16 evt;
- u8 context;
- u8 completed;
- uwb_rc_cmd_cb_f cb;
- void *arg;
-
- struct timer_list timer;
- struct list_head list_node;
-};
-
-static void uwb_rc_neh_timer(struct timer_list *t);
-
-static void uwb_rc_neh_release(struct kref *kref)
-{
- struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
-
- kfree(neh);
-}
-
-static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
-{
- kref_get(&neh->kref);
-}
-
-/**
- * uwb_rc_neh_put - release reference to a neh
- * @neh: the neh
- */
-void uwb_rc_neh_put(struct uwb_rc_neh *neh)
-{
- kref_put(&neh->kref, uwb_rc_neh_release);
-}
-
-
-/**
- * Assigns @neh a context id from @rc's pool
- *
- * @rc: UWB Radio Controller descriptor; @rc->neh_lock taken
- * @neh: Notification/Event Handle
- * @returns 0 if context id was assigned ok; < 0 errno on error (if
- * all the context IDs are taken).
- *
- * (assumes @wa is locked).
- *
- * NOTE: WUSB spec reserves context ids 0x00 for notifications and
- * 0xff is invalid, so they must not be used. Initialization
- * fills up those two in the bitmap so they are not allocated.
- *
- * We spread the allocation around to reduce the possibility of two
- * consecutive opened @neh's getting the same context ID assigned (to
- * avoid surprises with late events that timed out long time ago). So
- * first we search from where @rc->ctx_roll is, if not found, we
- * search from zero.
- */
-static
-int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
-{
- int result;
- result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
- rc->ctx_roll++);
- if (result < UWB_RC_CTX_MAX)
- goto found;
- result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
- if (result < UWB_RC_CTX_MAX)
- goto found;
- return -ENFILE;
-found:
- set_bit(result, rc->ctx_bm);
- neh->context = result;
- return 0;
-}
-
-
-/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */
-static
-void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
-{
- struct device *dev = &rc->uwb_dev.dev;
- if (neh->context == 0)
- return;
- if (test_bit(neh->context, rc->ctx_bm) == 0) {
- dev_err(dev, "context %u not set in bitmap\n",
- neh->context);
- WARN_ON(1);
- }
- clear_bit(neh->context, rc->ctx_bm);
- neh->context = 0;
-}
-
-/**
- * uwb_rc_neh_add - add a neh for a radio controller command
- * @rc: the radio controller
- * @cmd: the radio controller command
- * @expected_type: the type of the expected response event
- * @expected_event: the expected event ID
- * @cb: callback for when the event is received
- * @arg: argument for the callback
- *
- * Creates a neh and adds it to the list of those waiting for an
- * event. A context ID will be assigned to the command.
- */
-struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
- u8 expected_type, u16 expected_event,
- uwb_rc_cmd_cb_f cb, void *arg)
-{
- int result;
- unsigned long flags;
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rc_neh *neh;
-
- neh = kzalloc(sizeof(*neh), GFP_KERNEL);
- if (neh == NULL) {
- result = -ENOMEM;
- goto error_kzalloc;
- }
-
- kref_init(&neh->kref);
- INIT_LIST_HEAD(&neh->list_node);
- timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
-
- neh->rc = rc;
- neh->evt_type = expected_type;
- neh->evt = cpu_to_le16(expected_event);
- neh->cb = cb;
- neh->arg = arg;
-
- spin_lock_irqsave(&rc->neh_lock, flags);
- result = __uwb_rc_ctx_get(rc, neh);
- if (result >= 0) {
- cmd->bCommandContext = neh->context;
- list_add_tail(&neh->list_node, &rc->neh_list);
- uwb_rc_neh_get(neh);
- }
- spin_unlock_irqrestore(&rc->neh_lock, flags);
- if (result < 0)
- goto error_ctx_get;
-
- return neh;
-
-error_ctx_get:
- kfree(neh);
-error_kzalloc:
- dev_err(dev, "cannot open handle to radio controller: %d\n", result);
- return ERR_PTR(result);
-}
-
-static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
-{
- __uwb_rc_ctx_put(rc, neh);
- list_del(&neh->list_node);
-}
-
-/**
- * uwb_rc_neh_rm - remove a neh.
- * @rc: the radio controller
- * @neh: the neh to remove
- *
- * Remove an active neh immediately instead of waiting for the event
- * (or a time out).
- */
-void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rc->neh_lock, flags);
- __uwb_rc_neh_rm(rc, neh);
- spin_unlock_irqrestore(&rc->neh_lock, flags);
-
- del_timer_sync(&neh->timer);
- uwb_rc_neh_put(neh);
-}
-
-/**
- * uwb_rc_neh_arm - arm an event handler timeout timer
- *
- * @rc: UWB Radio Controller
- * @neh: Notification/event handler for @rc
- *
- * The timer is only armed if the neh is active.
- */
-void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rc->neh_lock, flags);
- if (neh->context)
- mod_timer(&neh->timer,
- jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
- spin_unlock_irqrestore(&rc->neh_lock, flags);
-}
-
-static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
-{
- (*neh->cb)(neh->rc, neh->arg, rceb, size);
- uwb_rc_neh_put(neh);
-}
-
-static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
-{
- return neh->evt_type == rceb->bEventType
- && neh->evt == rceb->wEvent
- && neh->context == rceb->bEventContext;
-}
-
-/**
- * Find the handle waiting for a RC Radio Control Event
- *
- * @rc: UWB Radio Controller
- * @rceb: Pointer to the RCEB buffer
- * @event_size: Pointer to the size of the RCEB buffer. Might be
- * adjusted to take into account the @neh->extra_size
- * settings.
- *
- * If the listener has no buffer (NULL buffer), one is allocated for
- * the right size (the amount of data received). @neh->ptr will point
- * to the event payload, which always starts with a 'struct
- * uwb_rceb'. kfree() it when done.
- */
-static
-struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
- const struct uwb_rceb *rceb)
-{
- struct uwb_rc_neh *neh = NULL, *h;
- unsigned long flags;
-
- spin_lock_irqsave(&rc->neh_lock, flags);
-
- list_for_each_entry(h, &rc->neh_list, list_node) {
- if (uwb_rc_neh_match(h, rceb)) {
- neh = h;
- break;
- }
- }
-
- if (neh)
- __uwb_rc_neh_rm(rc, neh);
-
- spin_unlock_irqrestore(&rc->neh_lock, flags);
-
- return neh;
-}
-
-
-/*
- * Process notifications coming from the radio control interface
- *
- * @rc: UWB Radio Control Interface descriptor
- * @neh: Notification/Event Handler @neh->ptr points to
- * @uwb_evt->buffer.
- *
- * This function is called by the event/notif handling subsystem when
- * notifications arrive (hwarc_probe() arms a notification/event handle
- * that calls back this function for every received notification; this
- * function then will rearm itself).
- *
- * Notification data buffers are dynamically allocated by the NEH
- * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually
- * allocated is space to contain the notification data.
- *
- * Buffers are prefixed with a Radio Control Event Block (RCEB) as
- * defined by the WUSB Wired-Adapter Radio Control interface. We
- * just use it for the notification code.
- *
- * On each case statement we just transcode endianess of the different
- * fields. We declare a pointer to a RCI definition of an event, and
- * then to a UWB definition of the same event (which are the same,
- * remember). Event if we use different pointers
- */
-static
-void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
-{
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_event *uwb_evt;
-
- if (size == -ESHUTDOWN)
- return;
- if (size < 0) {
- dev_err(dev, "ignoring event with error code %zu\n",
- size);
- return;
- }
-
- uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
- if (unlikely(uwb_evt == NULL)) {
- dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
- rceb->bEventType, le16_to_cpu(rceb->wEvent),
- rceb->bEventContext);
- return;
- }
- uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
- uwb_evt->ts_jiffies = jiffies;
- uwb_evt->type = UWB_EVT_TYPE_NOTIF;
- uwb_evt->notif.size = size;
- uwb_evt->notif.rceb = rceb;
-
- uwbd_event_queue(uwb_evt);
-}
-
-static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
-{
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rc_neh *neh;
- struct uwb_rceb *notif;
- unsigned long flags;
-
- if (rceb->bEventContext == 0) {
- notif = kmalloc(size, GFP_ATOMIC);
- if (notif) {
- memcpy(notif, rceb, size);
- uwb_rc_notif(rc, notif, size);
- } else
- dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
- rceb->bEventType, le16_to_cpu(rceb->wEvent),
- rceb->bEventContext, size);
- } else {
- neh = uwb_rc_neh_lookup(rc, rceb);
- if (neh) {
- spin_lock_irqsave(&rc->neh_lock, flags);
- /* to guard against a timeout */
- neh->completed = 1;
- del_timer(&neh->timer);
- spin_unlock_irqrestore(&rc->neh_lock, flags);
- uwb_rc_neh_cb(neh, rceb, size);
- } else
- dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
- rceb->bEventType, le16_to_cpu(rceb->wEvent),
- rceb->bEventContext, size);
- }
-}
-
-/**
- * Given a buffer with one or more UWB RC events/notifications, break
- * them up and dispatch them.
- *
- * @rc: UWB Radio Controller
- * @buf: Buffer with the stream of notifications/events
- * @buf_size: Amount of data in the buffer
- *
- * Note each notification/event starts always with a 'struct
- * uwb_rceb', so the minimum size if 4 bytes.
- *
- * The device may pass us events formatted differently than expected.
- * These are first filtered, potentially creating a new event in a new
- * memory location. If a new event is created by the filter it is also
- * freed here.
- *
- * For each notif/event, tries to guess the size looking at the EST
- * tables, then looks for a neh that is waiting for that event and if
- * found, copies the payload to the neh's buffer and calls it back. If
- * not, the data is ignored.
- *
- * Note that if we can't find a size description in the EST tables, we
- * still might find a size in the 'neh' handle in uwb_rc_neh_lookup().
- *
- * Assumptions:
- *
- * @rc->neh_lock is NOT taken
- *
- * We keep track of various sizes here:
- * size: contains the size of the buffer that is processed for the
- * incoming event. this buffer may contain events that are not
- * formatted as WHCI.
- * real_size: the actual space taken by this event in the buffer.
- * We need to keep track of the real size of an event to be able to
- * advance the buffer correctly.
- * event_size: the size of the event as expected by the core layer
- * [OR] the size of the event after filtering. if the filtering
- * created a new event in a new memory location then this is
- * effectively the size of a new event buffer
- */
-void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
-{
- struct device *dev = &rc->uwb_dev.dev;
- void *itr;
- struct uwb_rceb *rceb;
- size_t size, real_size, event_size;
- int needtofree;
-
- itr = buf;
- size = buf_size;
- while (size > 0) {
- if (size < sizeof(*rceb)) {
- dev_err(dev, "not enough data in event buffer to "
- "process incoming events (%zu left, minimum is "
- "%zu)\n", size, sizeof(*rceb));
- break;
- }
-
- rceb = itr;
- if (rc->filter_event) {
- needtofree = rc->filter_event(rc, &rceb, size,
- &real_size, &event_size);
- if (needtofree < 0 && needtofree != -ENOANO) {
- dev_err(dev, "BUG: Unable to filter event "
- "(0x%02x/%04x/%02x) from "
- "device. \n", rceb->bEventType,
- le16_to_cpu(rceb->wEvent),
- rceb->bEventContext);
- break;
- }
- } else
- needtofree = -ENOANO;
- /* do real processing if there was no filtering or the
- * filtering didn't act */
- if (needtofree == -ENOANO) {
- ssize_t ret = uwb_est_find_size(rc, rceb, size);
- if (ret < 0)
- break;
- if (ret > size) {
- dev_err(dev, "BUG: hw sent incomplete event "
- "0x%02x/%04x/%02x (%zd bytes), only got "
- "%zu bytes. We don't handle that.\n",
- rceb->bEventType, le16_to_cpu(rceb->wEvent),
- rceb->bEventContext, ret, size);
- break;
- }
- real_size = event_size = ret;
- }
- uwb_rc_neh_grok_event(rc, rceb, event_size);
-
- if (needtofree == 1)
- kfree(rceb);
-
- itr += real_size;
- size -= real_size;
- }
-}
-EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
-
-
-/**
- * The entity that reads from the device notification/event channel has
- * detected an error.
- *
- * @rc: UWB Radio Controller
- * @error: Errno error code
- *
- */
-void uwb_rc_neh_error(struct uwb_rc *rc, int error)
-{
- struct uwb_rc_neh *neh;
- unsigned long flags;
-
- for (;;) {
- spin_lock_irqsave(&rc->neh_lock, flags);
- if (list_empty(&rc->neh_list)) {
- spin_unlock_irqrestore(&rc->neh_lock, flags);
- break;
- }
- neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
- __uwb_rc_neh_rm(rc, neh);
- spin_unlock_irqrestore(&rc->neh_lock, flags);
-
- del_timer_sync(&neh->timer);
- uwb_rc_neh_cb(neh, NULL, error);
- }
-}
-EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
-
-
-static void uwb_rc_neh_timer(struct timer_list *t)
-{
- struct uwb_rc_neh *neh = from_timer(neh, t, timer);
- struct uwb_rc *rc = neh->rc;
- unsigned long flags;
-
- spin_lock_irqsave(&rc->neh_lock, flags);
- if (neh->completed) {
- spin_unlock_irqrestore(&rc->neh_lock, flags);
- return;
- }
- if (neh->context)
- __uwb_rc_neh_rm(rc, neh);
- else
- neh = NULL;
- spin_unlock_irqrestore(&rc->neh_lock, flags);
-
- if (neh)
- uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
-}
-
-/** Initializes the @rc's neh subsystem
- */
-void uwb_rc_neh_create(struct uwb_rc *rc)
-{
- spin_lock_init(&rc->neh_lock);
- INIT_LIST_HEAD(&rc->neh_list);
- set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */
- set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */
- rc->ctx_roll = 1;
-}
-
-
-/** Release's the @rc's neh subsystem */
-void uwb_rc_neh_destroy(struct uwb_rc *rc)
-{
- unsigned long flags;
- struct uwb_rc_neh *neh;
-
- for (;;) {
- spin_lock_irqsave(&rc->neh_lock, flags);
- if (list_empty(&rc->neh_list)) {
- spin_unlock_irqrestore(&rc->neh_lock, flags);
- break;
- }
- neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node);
- __uwb_rc_neh_rm(rc, neh);
- spin_unlock_irqrestore(&rc->neh_lock, flags);
-
- del_timer_sync(&neh->timer);
- uwb_rc_neh_put(neh);
- }
-}
diff --git a/drivers/staging/uwb/pal.c b/drivers/staging/uwb/pal.c
deleted file mode 100644
index a541e646a603..000000000000
--- a/drivers/staging/uwb/pal.c
+++ /dev/null
@@ -1,128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UWB PAL support.
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/debugfs.h>
-#include <linux/export.h>
-
-#include "uwb.h"
-#include "uwb-internal.h"
-
-/**
- * uwb_pal_init - initialize a UWB PAL
- * @pal: the PAL to initialize
- */
-void uwb_pal_init(struct uwb_pal *pal)
-{
- INIT_LIST_HEAD(&pal->node);
-}
-EXPORT_SYMBOL_GPL(uwb_pal_init);
-
-/**
- * uwb_pal_register - register a UWB PAL
- * @pal: the PAL
- *
- * The PAL must be initialized with uwb_pal_init().
- */
-int uwb_pal_register(struct uwb_pal *pal)
-{
- struct uwb_rc *rc = pal->rc;
- int ret;
-
- if (pal->device) {
- /* create a link to the uwb_rc in the PAL device's directory. */
- ret = sysfs_create_link(&pal->device->kobj,
- &rc->uwb_dev.dev.kobj, "uwb_rc");
- if (ret < 0)
- return ret;
- /* create a link to the PAL in the UWB device's directory. */
- ret = sysfs_create_link(&rc->uwb_dev.dev.kobj,
- &pal->device->kobj, pal->name);
- if (ret < 0) {
- sysfs_remove_link(&pal->device->kobj, "uwb_rc");
- return ret;
- }
- }
-
- pal->debugfs_dir = uwb_dbg_create_pal_dir(pal);
-
- mutex_lock(&rc->uwb_dev.mutex);
- list_add(&pal->node, &rc->pals);
- mutex_unlock(&rc->uwb_dev.mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(uwb_pal_register);
-
-static int find_rc(struct device *dev, const void *data)
-{
- const struct uwb_rc *target_rc = data;
- struct uwb_rc *rc = dev_get_drvdata(dev);
-
- if (rc == NULL) {
- WARN_ON(1);
- return 0;
- }
- if (rc == target_rc) {
- if (rc->ready == 0)
- return 0;
- else
- return 1;
- }
- return 0;
-}
-
-/**
- * Given a radio controller descriptor see if it is registered.
- *
- * @returns false if the rc does not exist or is quiescing; true otherwise.
- */
-static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
-{
- struct device *dev;
-
- dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
-
- put_device(dev);
-
- return (dev != NULL);
-}
-
-/**
- * uwb_pal_unregister - unregister a UWB PAL
- * @pal: the PAL
- */
-void uwb_pal_unregister(struct uwb_pal *pal)
-{
- struct uwb_rc *rc = pal->rc;
-
- uwb_radio_stop(pal);
-
- mutex_lock(&rc->uwb_dev.mutex);
- list_del(&pal->node);
- mutex_unlock(&rc->uwb_dev.mutex);
-
- debugfs_remove(pal->debugfs_dir);
-
- if (pal->device) {
- /* remove link to the PAL in the UWB device's directory. */
- if (uwb_rc_class_device_exists(rc))
- sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
-
- /* remove link to uwb_rc in the PAL device's directory. */
- sysfs_remove_link(&pal->device->kobj, "uwb_rc");
- }
-}
-EXPORT_SYMBOL_GPL(uwb_pal_unregister);
-
-/**
- * uwb_rc_pal_init - initialize the PAL related parts of a radio controller
- * @rc: the radio controller
- */
-void uwb_rc_pal_init(struct uwb_rc *rc)
-{
- INIT_LIST_HEAD(&rc->pals);
-}
diff --git a/drivers/staging/uwb/radio.c b/drivers/staging/uwb/radio.c
deleted file mode 100644
index 6afb75ce1b5f..000000000000
--- a/drivers/staging/uwb/radio.c
+++ /dev/null
@@ -1,196 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UWB radio (channel) management.
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/export.h>
-
-#include "uwb.h"
-#include "uwb-internal.h"
-
-
-static int uwb_radio_select_channel(struct uwb_rc *rc)
-{
- /*
- * Default to channel 9 (BG1, TFC1) unless the user has
- * selected a specific channel or there are no active PALs.
- */
- if (rc->active_pals == 0)
- return -1;
- if (rc->beaconing_forced)
- return rc->beaconing_forced;
- return 9;
-}
-
-
-/*
- * Notify all active PALs that the channel has changed.
- */
-static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel)
-{
- struct uwb_pal *pal;
-
- list_for_each_entry(pal, &rc->pals, node) {
- if (pal->channel && channel != pal->channel) {
- pal->channel = channel;
- if (pal->channel_changed)
- pal->channel_changed(pal, pal->channel);
- }
- }
-}
-
-/*
- * Change to a new channel and notify any active PALs of the new
- * channel.
- *
- * When stopping the radio, PALs need to be notified first so they can
- * terminate any active reservations.
- */
-static int uwb_radio_change_channel(struct uwb_rc *rc, int channel)
-{
- int ret = 0;
- struct device *dev = &rc->uwb_dev.dev;
-
- dev_dbg(dev, "%s: channel = %d, rc->beaconing = %d\n", __func__,
- channel, rc->beaconing);
-
- if (channel == -1)
- uwb_radio_channel_changed(rc, channel);
-
- if (channel != rc->beaconing) {
- if (rc->beaconing != -1 && channel != -1) {
- /*
- * FIXME: should signal the channel change
- * with a Channel Change IE.
- */
- ret = uwb_radio_change_channel(rc, -1);
- if (ret < 0)
- return ret;
- }
- ret = uwb_rc_beacon(rc, channel, 0);
- }
-
- if (channel != -1)
- uwb_radio_channel_changed(rc, rc->beaconing);
-
- return ret;
-}
-
-/**
- * uwb_radio_start - request that the radio be started
- * @pal: the PAL making the request.
- *
- * If the radio is not already active, a suitable channel is selected
- * and beacons are started.
- */
-int uwb_radio_start(struct uwb_pal *pal)
-{
- struct uwb_rc *rc = pal->rc;
- int ret = 0;
-
- mutex_lock(&rc->uwb_dev.mutex);
-
- if (!pal->channel) {
- pal->channel = -1;
- rc->active_pals++;
- ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc));
- }
-
- mutex_unlock(&rc->uwb_dev.mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(uwb_radio_start);
-
-/**
- * uwb_radio_stop - request that the radio be stopped.
- * @pal: the PAL making the request.
- *
- * Stops the radio if no other PAL is making use of it.
- */
-void uwb_radio_stop(struct uwb_pal *pal)
-{
- struct uwb_rc *rc = pal->rc;
-
- mutex_lock(&rc->uwb_dev.mutex);
-
- if (pal->channel) {
- rc->active_pals--;
- uwb_radio_change_channel(rc, uwb_radio_select_channel(rc));
- pal->channel = 0;
- }
-
- mutex_unlock(&rc->uwb_dev.mutex);
-}
-EXPORT_SYMBOL_GPL(uwb_radio_stop);
-
-/*
- * uwb_radio_force_channel - force a specific channel to be used
- * @rc: the radio controller.
- * @channel: the channel to use; -1 to force the radio to stop; 0 to
- * use the default channel selection algorithm.
- */
-int uwb_radio_force_channel(struct uwb_rc *rc, int channel)
-{
- int ret = 0;
-
- mutex_lock(&rc->uwb_dev.mutex);
-
- rc->beaconing_forced = channel;
- ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc));
-
- mutex_unlock(&rc->uwb_dev.mutex);
- return ret;
-}
-
-/*
- * uwb_radio_setup - setup the radio manager
- * @rc: the radio controller.
- *
- * The radio controller is reset to ensure it's in a known state
- * before it's used.
- */
-int uwb_radio_setup(struct uwb_rc *rc)
-{
- return uwb_rc_reset(rc);
-}
-
-/*
- * uwb_radio_reset_state - reset any radio manager state
- * @rc: the radio controller.
- *
- * All internal radio manager state is reset to values corresponding
- * to a reset radio controller.
- */
-void uwb_radio_reset_state(struct uwb_rc *rc)
-{
- struct uwb_pal *pal;
-
- mutex_lock(&rc->uwb_dev.mutex);
-
- list_for_each_entry(pal, &rc->pals, node) {
- if (pal->channel) {
- pal->channel = -1;
- if (pal->channel_changed)
- pal->channel_changed(pal, -1);
- }
- }
-
- rc->beaconing = -1;
- rc->scanning = -1;
-
- mutex_unlock(&rc->uwb_dev.mutex);
-}
-
-/*
- * uwb_radio_shutdown - shutdown the radio manager
- * @rc: the radio controller.
- *
- * The radio controller is reset.
- */
-void uwb_radio_shutdown(struct uwb_rc *rc)
-{
- uwb_radio_reset_state(rc);
- uwb_rc_reset(rc);
-}
diff --git a/drivers/staging/uwb/reset.c b/drivers/staging/uwb/reset.c
deleted file mode 100644
index 8fc7c14d903e..000000000000
--- a/drivers/staging/uwb/reset.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * UWB basic command support and radio reset
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME:
- *
- * - docs
- *
- * - Now we are serializing (using the uwb_dev->mutex) the command
- * execution; it should be parallelized as much as possible some
- * day.
- */
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-
-#include "uwb-internal.h"
-
-/**
- * Command result codes (WUSB1.0[T8-69])
- */
-static
-const char *__strerror[] = {
- "success",
- "failure",
- "hardware failure",
- "no more slots",
- "beacon is too large",
- "invalid parameter",
- "unsupported power level",
- "time out (wa) or invalid ie data (whci)",
- "beacon size exceeded",
- "cancelled",
- "invalid state",
- "invalid size",
- "ack not received",
- "no more asie notification",
-};
-
-
-/** Return a string matching the given error code */
-const char *uwb_rc_strerror(unsigned code)
-{
- if (code == 255)
- return "time out";
- if (code >= ARRAY_SIZE(__strerror))
- return "unknown error";
- return __strerror[code];
-}
-
-int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- u8 expected_type, u16 expected_event,
- uwb_rc_cmd_cb_f cb, void *arg)
-{
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rc_neh *neh;
- int needtofree = 0;
- int result;
-
- uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */
- if (rc->priv == NULL) {
- uwb_dev_unlock(&rc->uwb_dev);
- return -ESHUTDOWN;
- }
-
- if (rc->filter_cmd) {
- needtofree = rc->filter_cmd(rc, &cmd, &cmd_size);
- if (needtofree < 0 && needtofree != -ENOANO) {
- dev_err(dev, "%s: filter error: %d\n",
- cmd_name, needtofree);
- uwb_dev_unlock(&rc->uwb_dev);
- return needtofree;
- }
- }
-
- neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg);
- if (IS_ERR(neh)) {
- result = PTR_ERR(neh);
- uwb_dev_unlock(&rc->uwb_dev);
- goto out;
- }
-
- result = rc->cmd(rc, cmd, cmd_size);
- uwb_dev_unlock(&rc->uwb_dev);
- if (result < 0)
- uwb_rc_neh_rm(rc, neh);
- else
- uwb_rc_neh_arm(rc, neh);
- uwb_rc_neh_put(neh);
-out:
- if (needtofree == 1)
- kfree(cmd);
- return result < 0 ? result : 0;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_cmd_async);
-
-struct uwb_rc_cmd_done_params {
- struct completion completion;
- struct uwb_rceb *reply;
- ssize_t reply_size;
-};
-
-static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg,
- struct uwb_rceb *reply, ssize_t reply_size)
-{
- struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg;
-
- if (reply_size > 0) {
- if (p->reply)
- reply_size = min(p->reply_size, reply_size);
- else
- p->reply = kmalloc(reply_size, GFP_ATOMIC);
-
- if (p->reply)
- memcpy(p->reply, reply, reply_size);
- else
- reply_size = -ENOMEM;
- }
- p->reply_size = reply_size;
- complete(&p->completion);
-}
-
-
-/**
- * Generic function for issuing commands to the Radio Control Interface
- *
- * @rc: UWB Radio Control descriptor
- * @cmd_name: Name of the command being issued (for error messages)
- * @cmd: Pointer to rccb structure containing the command;
- * normally you embed this structure as the first member of
- * the full command structure.
- * @cmd_size: Size of the whole command buffer pointed to by @cmd.
- * @reply: Pointer to where to store the reply
- * @reply_size: @reply's size
- * @expected_type: Expected type in the return event
- * @expected_event: Expected event code in the return event
- * @preply: Here a pointer to where the event data is received will
- * be stored. Once done with the data, free with kfree().
- *
- * This function is generic; it works for commands that return a fixed
- * and known size or for commands that return a variable amount of data.
- *
- * If a buffer is provided, that is used, although it could be chopped
- * to the maximum size of the buffer. If the buffer is NULL, then one
- * be allocated in *preply with the whole contents of the reply.
- *
- * @rc needs to be referenced
- */
-static
-ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- struct uwb_rceb *reply, size_t reply_size,
- u8 expected_type, u16 expected_event,
- struct uwb_rceb **preply)
-{
- ssize_t result = 0;
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rc_cmd_done_params params;
-
- init_completion(&params.completion);
- params.reply = reply;
- params.reply_size = reply_size;
-
- result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size,
- expected_type, expected_event,
- uwb_rc_cmd_done, &params);
- if (result)
- return result;
-
- wait_for_completion(&params.completion);
-
- if (preply)
- *preply = params.reply;
-
- if (params.reply_size < 0)
- dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x "
- "reception failed: %d\n", cmd_name,
- expected_type, expected_event, cmd->bCommandContext,
- (int)params.reply_size);
- return params.reply_size;
-}
-
-
-/**
- * Generic function for issuing commands to the Radio Control Interface
- *
- * @rc: UWB Radio Control descriptor
- * @cmd_name: Name of the command being issued (for error messages)
- * @cmd: Pointer to rccb structure containing the command;
- * normally you embed this structure as the first member of
- * the full command structure.
- * @cmd_size: Size of the whole command buffer pointed to by @cmd.
- * @reply: Pointer to the beginning of the confirmation event
- * buffer. Normally bigger than an 'struct hwarc_rceb'.
- * You need to fill out reply->bEventType and reply->wEvent (in
- * cpu order) as the function will use them to verify the
- * confirmation event.
- * @reply_size: Size of the reply buffer
- *
- * The function checks that the length returned in the reply is at
- * least as big as @reply_size; if not, it will be deemed an error and
- * -EIO returned.
- *
- * @rc needs to be referenced
- */
-ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- struct uwb_rceb *reply, size_t reply_size)
-{
- struct device *dev = &rc->uwb_dev.dev;
- ssize_t result;
-
- result = __uwb_rc_cmd(rc, cmd_name,
- cmd, cmd_size, reply, reply_size,
- reply->bEventType, reply->wEvent, NULL);
-
- if (result > 0 && result < reply_size) {
- dev_err(dev, "%s: not enough data returned for decoding reply "
- "(%zu bytes received vs at least %zu needed)\n",
- cmd_name, result, reply_size);
- result = -EIO;
- }
- return result;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_cmd);
-
-
-/**
- * Generic function for issuing commands to the Radio Control
- * Interface that return an unknown amount of data
- *
- * @rc: UWB Radio Control descriptor
- * @cmd_name: Name of the command being issued (for error messages)
- * @cmd: Pointer to rccb structure containing the command;
- * normally you embed this structure as the first member of
- * the full command structure.
- * @cmd_size: Size of the whole command buffer pointed to by @cmd.
- * @expected_type: Expected type in the return event
- * @expected_event: Expected event code in the return event
- * @preply: Here a pointer to where the event data is received will
- * be stored. Once done with the data, free with kfree().
- *
- * The function checks that the length returned in the reply is at
- * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an
- * error and -EIO returned.
- *
- * @rc needs to be referenced
- */
-ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- u8 expected_type, u16 expected_event,
- struct uwb_rceb **preply)
-{
- return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0,
- expected_type, expected_event, preply);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_vcmd);
-
-
-/**
- * Reset a UWB Host Controller (and all radio settings)
- *
- * @rc: Host Controller descriptor
- * @returns: 0 if ok, < 0 errno code on error
- *
- * We put the command on kmalloc'ed memory as some arches cannot do
- * USB from the stack. The reply event is copied from an stage buffer,
- * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
- */
-int uwb_rc_reset(struct uwb_rc *rc)
-{
- int result = -ENOMEM;
- struct uwb_rc_evt_confirm reply;
- struct uwb_rccb *cmd;
- size_t cmd_size = sizeof(*cmd);
-
- mutex_lock(&rc->uwb_dev.mutex);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- goto error_kzalloc;
- cmd->bCommandType = UWB_RC_CET_GENERAL;
- cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
- reply.rceb.bEventType = UWB_RC_CET_GENERAL;
- reply.rceb.wEvent = UWB_RC_CMD_RESET;
- result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size,
- &reply.rceb, sizeof(reply));
- if (result < 0)
- goto error_cmd;
- if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(&rc->uwb_dev.dev,
- "RESET: command execution failed: %s (%d)\n",
- uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
- result = -EIO;
- }
-error_cmd:
- kfree(cmd);
-error_kzalloc:
- mutex_unlock(&rc->uwb_dev.mutex);
- return result;
-}
-
-int uwbd_msg_handle_reset(struct uwb_event *evt)
-{
- struct uwb_rc *rc = evt->rc;
- int ret;
-
- dev_info(&rc->uwb_dev.dev, "resetting radio controller\n");
- ret = rc->reset(rc);
- if (ret < 0) {
- dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret);
- goto error;
- }
- return 0;
-error:
- /* Nothing can be done except try the reset again. Wait a bit
- to avoid reset loops during probe() or remove(). */
- msleep(1000);
- uwb_rc_reset_all(rc);
- return ret;
-}
-
-/**
- * uwb_rc_reset_all - request a reset of the radio controller and PALs
- * @rc: the radio controller of the hardware device to be reset.
- *
- * The full hardware reset of the radio controller and all the PALs
- * will be scheduled.
- */
-void uwb_rc_reset_all(struct uwb_rc *rc)
-{
- struct uwb_event *evt;
-
- evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC);
- if (unlikely(evt == NULL))
- return;
-
- evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
- evt->ts_jiffies = jiffies;
- evt->type = UWB_EVT_TYPE_MSG;
- evt->message = UWB_EVT_MSG_RESET;
-
- uwbd_event_queue(evt);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_reset_all);
-
-void uwb_rc_pre_reset(struct uwb_rc *rc)
-{
- rc->stop(rc);
- uwbd_flush(rc);
-
- uwb_radio_reset_state(rc);
- uwb_rsv_remove_all(rc);
-}
-EXPORT_SYMBOL_GPL(uwb_rc_pre_reset);
-
-int uwb_rc_post_reset(struct uwb_rc *rc)
-{
- int ret;
-
- ret = rc->start(rc);
- if (ret)
- goto out;
- ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr);
- if (ret)
- goto out;
- ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr);
- if (ret)
- goto out;
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(uwb_rc_post_reset);
diff --git a/drivers/staging/uwb/rsv.c b/drivers/staging/uwb/rsv.c
deleted file mode 100644
index d593a41c3d8d..000000000000
--- a/drivers/staging/uwb/rsv.c
+++ /dev/null
@@ -1,1000 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UWB reservation management.
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/export.h>
-
-#include "uwb.h"
-#include "uwb-internal.h"
-
-static void uwb_rsv_timer(struct timer_list *t);
-
-static const char *rsv_states[] = {
- [UWB_RSV_STATE_NONE] = "none ",
- [UWB_RSV_STATE_O_INITIATED] = "o initiated ",
- [UWB_RSV_STATE_O_PENDING] = "o pending ",
- [UWB_RSV_STATE_O_MODIFIED] = "o modified ",
- [UWB_RSV_STATE_O_ESTABLISHED] = "o established ",
- [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ",
- [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding",
- [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining",
- [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ",
- [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ",
- [UWB_RSV_STATE_T_CONFLICT] = "t conflict ",
- [UWB_RSV_STATE_T_PENDING] = "t pending ",
- [UWB_RSV_STATE_T_DENIED] = "t denied ",
- [UWB_RSV_STATE_T_RESIZED] = "t resized ",
- [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ",
- [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf",
- [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend",
- [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ",
-};
-
-static const char *rsv_types[] = {
- [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
- [UWB_DRP_TYPE_HARD] = "hard",
- [UWB_DRP_TYPE_SOFT] = "soft",
- [UWB_DRP_TYPE_PRIVATE] = "private",
- [UWB_DRP_TYPE_PCA] = "pca",
-};
-
-bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv)
-{
- static const bool has_two_drp_ies[] = {
- [UWB_RSV_STATE_O_INITIATED] = false,
- [UWB_RSV_STATE_O_PENDING] = false,
- [UWB_RSV_STATE_O_MODIFIED] = false,
- [UWB_RSV_STATE_O_ESTABLISHED] = false,
- [UWB_RSV_STATE_O_TO_BE_MOVED] = false,
- [UWB_RSV_STATE_O_MOVE_COMBINING] = false,
- [UWB_RSV_STATE_O_MOVE_REDUCING] = false,
- [UWB_RSV_STATE_O_MOVE_EXPANDING] = true,
- [UWB_RSV_STATE_T_ACCEPTED] = false,
- [UWB_RSV_STATE_T_CONFLICT] = false,
- [UWB_RSV_STATE_T_PENDING] = false,
- [UWB_RSV_STATE_T_DENIED] = false,
- [UWB_RSV_STATE_T_RESIZED] = false,
- [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true,
- [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true,
- [UWB_RSV_STATE_T_EXPANDING_PENDING] = true,
- [UWB_RSV_STATE_T_EXPANDING_DENIED] = true,
- };
-
- return has_two_drp_ies[rsv->state];
-}
-
-/**
- * uwb_rsv_state_str - return a string for a reservation state
- * @state: the reservation state.
- */
-const char *uwb_rsv_state_str(enum uwb_rsv_state state)
-{
- if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
- return "unknown";
- return rsv_states[state];
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
-
-/**
- * uwb_rsv_type_str - return a string for a reservation type
- * @type: the reservation type
- */
-const char *uwb_rsv_type_str(enum uwb_drp_type type)
-{
- if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
- return "invalid";
- return rsv_types[type];
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
-
-void uwb_rsv_dump(char *text, struct uwb_rsv *rsv)
-{
- struct device *dev = &rsv->rc->uwb_dev.dev;
- struct uwb_dev_addr devaddr;
- char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
-
- uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
- if (rsv->target.type == UWB_RSV_TARGET_DEV)
- devaddr = rsv->target.dev->dev_addr;
- else
- devaddr = rsv->target.devaddr;
- uwb_dev_addr_print(target, sizeof(target), &devaddr);
-
- dev_dbg(dev, "rsv %s %s -> %s: %s\n",
- text, owner, target, uwb_rsv_state_str(rsv->state));
-}
-
-static void uwb_rsv_release(struct kref *kref)
-{
- struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref);
-
- kfree(rsv);
-}
-
-void uwb_rsv_get(struct uwb_rsv *rsv)
-{
- kref_get(&rsv->kref);
-}
-
-void uwb_rsv_put(struct uwb_rsv *rsv)
-{
- kref_put(&rsv->kref, uwb_rsv_release);
-}
-
-/*
- * Get a free stream index for a reservation.
- *
- * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
- * the stream is allocated from a pool of per-RC stream indexes,
- * otherwise a unique stream index for the target is selected.
- */
-static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
-{
- struct uwb_rc *rc = rsv->rc;
- struct device *dev = &rc->uwb_dev.dev;
- unsigned long *streams_bm;
- int stream;
-
- switch (rsv->target.type) {
- case UWB_RSV_TARGET_DEV:
- streams_bm = rsv->target.dev->streams;
- break;
- case UWB_RSV_TARGET_DEVADDR:
- streams_bm = rc->uwb_dev.streams;
- break;
- default:
- return -EINVAL;
- }
-
- stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
- if (stream >= UWB_NUM_STREAMS) {
- dev_err(dev, "%s: no available stream found\n", __func__);
- return -EBUSY;
- }
-
- rsv->stream = stream;
- set_bit(stream, streams_bm);
-
- dev_dbg(dev, "get stream %d\n", rsv->stream);
-
- return 0;
-}
-
-static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
-{
- struct uwb_rc *rc = rsv->rc;
- struct device *dev = &rc->uwb_dev.dev;
- unsigned long *streams_bm;
-
- switch (rsv->target.type) {
- case UWB_RSV_TARGET_DEV:
- streams_bm = rsv->target.dev->streams;
- break;
- case UWB_RSV_TARGET_DEVADDR:
- streams_bm = rc->uwb_dev.streams;
- break;
- default:
- return;
- }
-
- clear_bit(rsv->stream, streams_bm);
-
- dev_dbg(dev, "put stream %d\n", rsv->stream);
-}
-
-void uwb_rsv_backoff_win_timer(struct timer_list *t)
-{
- struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
- struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
- struct device *dev = &rc->uwb_dev.dev;
-
- bow->can_reserve_extra_mases = true;
- if (bow->total_expired <= 4) {
- bow->total_expired++;
- } else {
- /* after 4 backoff window has expired we can exit from
- * the backoff procedure */
- bow->total_expired = 0;
- bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
- }
- dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n", bow->total_expired, bow->n);
-
- /* try to relocate all the "to be moved" relocations */
- uwb_rsv_handle_drp_avail_change(rc);
-}
-
-void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
-{
- struct uwb_drp_backoff_win *bow = &rc->bow;
- struct device *dev = &rc->uwb_dev.dev;
- unsigned timeout_us;
-
- dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window);
-
- bow->can_reserve_extra_mases = false;
-
- if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX)
- return;
-
- bow->window <<= 1;
- bow->n = prandom_u32() & (bow->window - 1);
- dev_dbg(dev, "new_window=%d, n=%d\n", bow->window, bow->n);
-
- /* reset the timer associated variables */
- timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
- bow->total_expired = 0;
- mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
-}
-
-static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
-{
- int sframes = UWB_MAX_LOST_BEACONS;
-
- /*
- * Multicast reservations can become established within 1
- * super frame and should not be terminated if no response is
- * received.
- */
- if (rsv->state == UWB_RSV_STATE_NONE) {
- sframes = 0;
- } else if (rsv->is_multicast) {
- if (rsv->state == UWB_RSV_STATE_O_INITIATED
- || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING
- || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING
- || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING)
- sframes = 1;
- if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
- sframes = 0;
-
- }
-
- if (sframes > 0) {
- /*
- * Add an additional 2 superframes to account for the
- * time to send the SET DRP IE command.
- */
- unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
- mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
- } else
- del_timer(&rsv->timer);
-}
-
-/*
- * Update a reservations state, and schedule an update of the
- * transmitted DRP IEs.
- */
-static void uwb_rsv_state_update(struct uwb_rsv *rsv,
- enum uwb_rsv_state new_state)
-{
- rsv->state = new_state;
- rsv->ie_valid = false;
-
- uwb_rsv_dump("SU", rsv);
-
- uwb_rsv_stroke_timer(rsv);
- uwb_rsv_sched_update(rsv->rc);
-}
-
-static void uwb_rsv_callback(struct uwb_rsv *rsv)
-{
- if (rsv->callback)
- rsv->callback(rsv);
-}
-
-void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
-{
- struct uwb_rsv_move *mv = &rsv->mv;
-
- if (rsv->state == new_state) {
- switch (rsv->state) {
- case UWB_RSV_STATE_O_ESTABLISHED:
- case UWB_RSV_STATE_O_MOVE_EXPANDING:
- case UWB_RSV_STATE_O_MOVE_COMBINING:
- case UWB_RSV_STATE_O_MOVE_REDUCING:
- case UWB_RSV_STATE_T_ACCEPTED:
- case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
- case UWB_RSV_STATE_T_RESIZED:
- case UWB_RSV_STATE_NONE:
- uwb_rsv_stroke_timer(rsv);
- break;
- default:
- /* Expecting a state transition so leave timer
- as-is. */
- break;
- }
- return;
- }
-
- uwb_rsv_dump("SC", rsv);
-
- switch (new_state) {
- case UWB_RSV_STATE_NONE:
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
- uwb_rsv_remove(rsv);
- uwb_rsv_callback(rsv);
- break;
- case UWB_RSV_STATE_O_INITIATED:
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
- break;
- case UWB_RSV_STATE_O_PENDING:
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
- break;
- case UWB_RSV_STATE_O_MODIFIED:
- /* in the companion there are the MASes to drop */
- bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED);
- break;
- case UWB_RSV_STATE_O_ESTABLISHED:
- if (rsv->state == UWB_RSV_STATE_O_MODIFIED
- || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) {
- uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
- rsv->needs_release_companion_mas = false;
- }
- uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
- uwb_rsv_callback(rsv);
- break;
- case UWB_RSV_STATE_O_MOVE_EXPANDING:
- rsv->needs_release_companion_mas = true;
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
- break;
- case UWB_RSV_STATE_O_MOVE_COMBINING:
- rsv->needs_release_companion_mas = false;
- uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
- bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
- rsv->mas.safe += mv->companion_mas.safe;
- rsv->mas.unsafe += mv->companion_mas.unsafe;
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
- break;
- case UWB_RSV_STATE_O_MOVE_REDUCING:
- bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
- rsv->needs_release_companion_mas = true;
- rsv->mas.safe = mv->final_mas.safe;
- rsv->mas.unsafe = mv->final_mas.unsafe;
- bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
- bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS);
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
- break;
- case UWB_RSV_STATE_T_ACCEPTED:
- case UWB_RSV_STATE_T_RESIZED:
- rsv->needs_release_companion_mas = false;
- uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
- uwb_rsv_callback(rsv);
- break;
- case UWB_RSV_STATE_T_DENIED:
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
- break;
- case UWB_RSV_STATE_T_CONFLICT:
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT);
- break;
- case UWB_RSV_STATE_T_PENDING:
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING);
- break;
- case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
- rsv->needs_release_companion_mas = true;
- uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
- uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
- break;
- default:
- dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
- uwb_rsv_state_str(new_state), new_state);
- }
-}
-
-static void uwb_rsv_handle_timeout_work(struct work_struct *work)
-{
- struct uwb_rsv *rsv = container_of(work, struct uwb_rsv,
- handle_timeout_work);
- struct uwb_rc *rc = rsv->rc;
-
- mutex_lock(&rc->rsvs_mutex);
-
- uwb_rsv_dump("TO", rsv);
-
- switch (rsv->state) {
- case UWB_RSV_STATE_O_INITIATED:
- if (rsv->is_multicast) {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
- goto unlock;
- }
- break;
- case UWB_RSV_STATE_O_MOVE_EXPANDING:
- if (rsv->is_multicast) {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
- goto unlock;
- }
- break;
- case UWB_RSV_STATE_O_MOVE_COMBINING:
- if (rsv->is_multicast) {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
- goto unlock;
- }
- break;
- case UWB_RSV_STATE_O_MOVE_REDUCING:
- if (rsv->is_multicast) {
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
- goto unlock;
- }
- break;
- case UWB_RSV_STATE_O_ESTABLISHED:
- if (rsv->is_multicast)
- goto unlock;
- break;
- case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
- /*
- * The time out could be for the main or of the
- * companion DRP, assume it's for the companion and
- * drop that first. A further time out is required to
- * drop the main.
- */
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
- uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
- goto unlock;
- case UWB_RSV_STATE_NONE:
- goto unlock;
- default:
- break;
- }
-
- uwb_rsv_remove(rsv);
-
-unlock:
- mutex_unlock(&rc->rsvs_mutex);
-}
-
-static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
-{
- struct uwb_rsv *rsv;
-
- rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
- if (!rsv)
- return NULL;
-
- INIT_LIST_HEAD(&rsv->rc_node);
- INIT_LIST_HEAD(&rsv->pal_node);
- kref_init(&rsv->kref);
- timer_setup(&rsv->timer, uwb_rsv_timer, 0);
-
- rsv->rc = rc;
- INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
-
- return rsv;
-}
-
-/**
- * uwb_rsv_create - allocate and initialize a UWB reservation structure
- * @rc: the radio controller
- * @cb: callback to use when the reservation completes or terminates
- * @pal_priv: data private to the PAL to be passed in the callback
- *
- * The callback is called when the state of the reservation changes from:
- *
- * - pending to accepted
- * - pending to denined
- * - accepted to terminated
- * - pending to terminated
- */
-struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
-{
- struct uwb_rsv *rsv;
-
- rsv = uwb_rsv_alloc(rc);
- if (!rsv)
- return NULL;
-
- rsv->callback = cb;
- rsv->pal_priv = pal_priv;
-
- return rsv;
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_create);
-
-void uwb_rsv_remove(struct uwb_rsv *rsv)
-{
- uwb_rsv_dump("RM", rsv);
-
- if (rsv->state != UWB_RSV_STATE_NONE)
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
-
- if (rsv->needs_release_companion_mas)
- uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
- uwb_drp_avail_release(rsv->rc, &rsv->mas);
-
- if (uwb_rsv_is_owner(rsv))
- uwb_rsv_put_stream(rsv);
-
- uwb_dev_put(rsv->owner);
- if (rsv->target.type == UWB_RSV_TARGET_DEV)
- uwb_dev_put(rsv->target.dev);
-
- list_del_init(&rsv->rc_node);
- uwb_rsv_put(rsv);
-}
-
-/**
- * uwb_rsv_destroy - free a UWB reservation structure
- * @rsv: the reservation to free
- *
- * The reservation must already be terminated.
- */
-void uwb_rsv_destroy(struct uwb_rsv *rsv)
-{
- uwb_rsv_put(rsv);
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
-
-/**
- * usb_rsv_establish - start a reservation establishment
- * @rsv: the reservation
- *
- * The PAL should fill in @rsv's owner, target, type, max_mas,
- * min_mas, max_interval and is_multicast fields. If the target is a
- * uwb_dev it must be referenced.
- *
- * The reservation's callback will be called when the reservation is
- * accepted, denied or times out.
- */
-int uwb_rsv_establish(struct uwb_rsv *rsv)
-{
- struct uwb_rc *rc = rsv->rc;
- struct uwb_mas_bm available;
- struct device *dev = &rc->uwb_dev.dev;
- int ret;
-
- mutex_lock(&rc->rsvs_mutex);
- ret = uwb_rsv_get_stream(rsv);
- if (ret) {
- dev_err(dev, "%s: uwb_rsv_get_stream failed: %d\n",
- __func__, ret);
- goto out;
- }
-
- rsv->tiebreaker = prandom_u32() & 1;
- /* get available mas bitmap */
- uwb_drp_available(rc, &available);
-
- ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas);
- if (ret == UWB_RSV_ALLOC_NOT_FOUND) {
- ret = -EBUSY;
- uwb_rsv_put_stream(rsv);
- dev_err(dev, "%s: uwb_rsv_find_best_allocation failed: %d\n",
- __func__, ret);
- goto out;
- }
-
- ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas);
- if (ret != 0) {
- uwb_rsv_put_stream(rsv);
- dev_err(dev, "%s: uwb_drp_avail_reserve_pending failed: %d\n",
- __func__, ret);
- goto out;
- }
-
- uwb_rsv_get(rsv);
- list_add_tail(&rsv->rc_node, &rc->reservations);
- rsv->owner = &rc->uwb_dev;
- uwb_dev_get(rsv->owner);
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
-out:
- mutex_unlock(&rc->rsvs_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_establish);
-
-/**
- * uwb_rsv_modify - modify an already established reservation
- * @rsv: the reservation to modify
- * @max_mas: new maximum MAS to reserve
- * @min_mas: new minimum MAS to reserve
- * @max_interval: new max_interval to use
- *
- * FIXME: implement this once there are PALs that use it.
- */
-int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval)
-{
- return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_modify);
-
-/*
- * move an already established reservation (rc->rsvs_mutex must to be
- * taken when tis function is called)
- */
-int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
-{
- struct uwb_rc *rc = rsv->rc;
- struct uwb_drp_backoff_win *bow = &rc->bow;
- struct device *dev = &rc->uwb_dev.dev;
- struct uwb_rsv_move *mv;
- int ret = 0;
-
- if (!bow->can_reserve_extra_mases)
- return -EBUSY;
-
- mv = &rsv->mv;
-
- if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) {
-
- if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) {
- /* We want to move the reservation */
- bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS);
- uwb_drp_avail_reserve_pending(rc, &mv->companion_mas);
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
- }
- } else {
- dev_dbg(dev, "new allocation not found\n");
- }
-
- return ret;
-}
-
-/* It will try to move every reservation in state O_ESTABLISHED giving
- * to the MAS allocator algorithm an availability that is the real one
- * plus the allocation already established from the reservation. */
-void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
-{
- struct uwb_drp_backoff_win *bow = &rc->bow;
- struct uwb_rsv *rsv;
- struct uwb_mas_bm mas;
-
- if (!bow->can_reserve_extra_mases)
- return;
-
- list_for_each_entry(rsv, &rc->reservations, rc_node) {
- if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED ||
- rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) {
- uwb_drp_available(rc, &mas);
- bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS);
- uwb_rsv_try_move(rsv, &mas);
- }
- }
-
-}
-
-/**
- * uwb_rsv_terminate - terminate an established reservation
- * @rsv: the reservation to terminate
- *
- * A reservation is terminated by removing the DRP IE from the beacon,
- * the other end will consider the reservation to be terminated when
- * it does not see the DRP IE for at least mMaxLostBeacons.
- *
- * If applicable, the reference to the target uwb_dev will be released.
- */
-void uwb_rsv_terminate(struct uwb_rsv *rsv)
-{
- struct uwb_rc *rc = rsv->rc;
-
- mutex_lock(&rc->rsvs_mutex);
-
- if (rsv->state != UWB_RSV_STATE_NONE)
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
-
- mutex_unlock(&rc->rsvs_mutex);
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
-
-/**
- * uwb_rsv_accept - accept a new reservation from a peer
- * @rsv: the reservation
- * @cb: call back for reservation changes
- * @pal_priv: data to be passed in the above call back
- *
- * Reservation requests from peers are denied unless a PAL accepts it
- * by calling this function.
- *
- * The PAL call uwb_rsv_destroy() for all accepted reservations before
- * calling uwb_pal_unregister().
- */
-void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
-{
- uwb_rsv_get(rsv);
-
- rsv->callback = cb;
- rsv->pal_priv = pal_priv;
- rsv->state = UWB_RSV_STATE_T_ACCEPTED;
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_accept);
-
-/*
- * Is a received DRP IE for this reservation?
- */
-static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
- struct uwb_ie_drp *drp_ie)
-{
- struct uwb_dev_addr *rsv_src;
- int stream;
-
- stream = uwb_ie_drp_stream_index(drp_ie);
-
- if (rsv->stream != stream)
- return false;
-
- switch (rsv->target.type) {
- case UWB_RSV_TARGET_DEVADDR:
- return rsv->stream == stream;
- case UWB_RSV_TARGET_DEV:
- if (uwb_ie_drp_owner(drp_ie))
- rsv_src = &rsv->owner->dev_addr;
- else
- rsv_src = &rsv->target.dev->dev_addr;
- return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
- }
- return false;
-}
-
-static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
- struct uwb_dev *src,
- struct uwb_ie_drp *drp_ie)
-{
- struct uwb_rsv *rsv;
- struct uwb_pal *pal;
- enum uwb_rsv_state state;
-
- rsv = uwb_rsv_alloc(rc);
- if (!rsv)
- return NULL;
-
- rsv->rc = rc;
- rsv->owner = src;
- uwb_dev_get(rsv->owner);
- rsv->target.type = UWB_RSV_TARGET_DEV;
- rsv->target.dev = &rc->uwb_dev;
- uwb_dev_get(&rc->uwb_dev);
- rsv->type = uwb_ie_drp_type(drp_ie);
- rsv->stream = uwb_ie_drp_stream_index(drp_ie);
- uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
-
- /*
- * See if any PALs are interested in this reservation. If not,
- * deny the request.
- */
- rsv->state = UWB_RSV_STATE_T_DENIED;
- mutex_lock(&rc->uwb_dev.mutex);
- list_for_each_entry(pal, &rc->pals, node) {
- if (pal->new_rsv)
- pal->new_rsv(pal, rsv);
- if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
- break;
- }
- mutex_unlock(&rc->uwb_dev.mutex);
-
- list_add_tail(&rsv->rc_node, &rc->reservations);
- state = rsv->state;
- rsv->state = UWB_RSV_STATE_NONE;
-
- /* FIXME: do something sensible here */
- if (state == UWB_RSV_STATE_T_ACCEPTED
- && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) {
- /* FIXME: do something sensible here */
- } else {
- uwb_rsv_set_state(rsv, state);
- }
-
- return rsv;
-}
-
-/**
- * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations
- * @rsv: the reservation.
- * @mas: returns the available MAS.
- *
- * The usable MAS of a reservation may be less than the negotiated MAS
- * if alien BPs are present.
- */
-void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas)
-{
- bitmap_zero(mas->bm, UWB_NUM_MAS);
- bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
-}
-EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas);
-
-/**
- * uwb_rsv_find - find a reservation for a received DRP IE.
- * @rc: the radio controller
- * @src: source of the DRP IE
- * @drp_ie: the DRP IE
- *
- * If the reservation cannot be found and the DRP IE is from a peer
- * attempting to establish a new reservation, create a new reservation
- * and add it to the list.
- */
-struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
- struct uwb_ie_drp *drp_ie)
-{
- struct uwb_rsv *rsv;
-
- list_for_each_entry(rsv, &rc->reservations, rc_node) {
- if (uwb_rsv_match(rsv, src, drp_ie))
- return rsv;
- }
-
- if (uwb_ie_drp_owner(drp_ie))
- return uwb_rsv_new_target(rc, src, drp_ie);
-
- return NULL;
-}
-
-/*
- * Go through all the reservations and check for timeouts and (if
- * necessary) update their DRP IEs.
- *
- * FIXME: look at building the SET_DRP_IE command here rather than
- * having to rescan the list in uwb_rc_send_all_drp_ie().
- */
-static bool uwb_rsv_update_all(struct uwb_rc *rc)
-{
- struct uwb_rsv *rsv, *t;
- bool ie_updated = false;
-
- list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
- if (!rsv->ie_valid) {
- uwb_drp_ie_update(rsv);
- ie_updated = true;
- }
- }
-
- return ie_updated;
-}
-
-void uwb_rsv_queue_update(struct uwb_rc *rc)
-{
- unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
-
- queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us));
-}
-
-/**
- * uwb_rsv_sched_update - schedule an update of the DRP IEs
- * @rc: the radio controller.
- *
- * To improve performance and ensure correctness with [ECMA-368] the
- * number of SET-DRP-IE commands that are done are limited.
- *
- * DRP IEs update come from two sources: DRP events from the hardware
- * which all occur at the beginning of the superframe ('syncronous'
- * events) and reservation establishment/termination requests from
- * PALs or timers ('asynchronous' events).
- *
- * A delayed work ensures that all the synchronous events result in
- * one SET-DRP-IE command.
- *
- * Additional logic (the set_drp_ie_pending and rsv_updated_postponed
- * flags) will prevent an asynchrous event starting a SET-DRP-IE
- * command if one is currently awaiting a response.
- *
- * FIXME: this does leave a window where an asynchrous event can delay
- * the SET-DRP-IE for a synchronous event by one superframe.
- */
-void uwb_rsv_sched_update(struct uwb_rc *rc)
-{
- spin_lock_irq(&rc->rsvs_lock);
- if (!delayed_work_pending(&rc->rsv_update_work)) {
- if (rc->set_drp_ie_pending > 0) {
- rc->set_drp_ie_pending++;
- goto unlock;
- }
- uwb_rsv_queue_update(rc);
- }
-unlock:
- spin_unlock_irq(&rc->rsvs_lock);
-}
-
-/*
- * Update DRP IEs and, if necessary, the DRP Availability IE and send
- * the updated IEs to the radio controller.
- */
-static void uwb_rsv_update_work(struct work_struct *work)
-{
- struct uwb_rc *rc = container_of(work, struct uwb_rc,
- rsv_update_work.work);
- bool ie_updated;
-
- mutex_lock(&rc->rsvs_mutex);
-
- ie_updated = uwb_rsv_update_all(rc);
-
- if (!rc->drp_avail.ie_valid) {
- uwb_drp_avail_ie_update(rc);
- ie_updated = true;
- }
-
- if (ie_updated && (rc->set_drp_ie_pending == 0))
- uwb_rc_send_all_drp_ie(rc);
-
- mutex_unlock(&rc->rsvs_mutex);
-}
-
-static void uwb_rsv_alien_bp_work(struct work_struct *work)
-{
- struct uwb_rc *rc = container_of(work, struct uwb_rc,
- rsv_alien_bp_work.work);
- struct uwb_rsv *rsv;
-
- mutex_lock(&rc->rsvs_mutex);
-
- list_for_each_entry(rsv, &rc->reservations, rc_node) {
- if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
- uwb_rsv_callback(rsv);
- }
- }
-
- mutex_unlock(&rc->rsvs_mutex);
-}
-
-static void uwb_rsv_timer(struct timer_list *t)
-{
- struct uwb_rsv *rsv = from_timer(rsv, t, timer);
-
- queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
-}
-
-/**
- * uwb_rsv_remove_all - remove all reservations
- * @rc: the radio controller
- *
- * A DRP IE update is not done.
- */
-void uwb_rsv_remove_all(struct uwb_rc *rc)
-{
- struct uwb_rsv *rsv, *t;
-
- mutex_lock(&rc->rsvs_mutex);
- list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
- if (rsv->state != UWB_RSV_STATE_NONE)
- uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
- del_timer_sync(&rsv->timer);
- }
- /* Cancel any postponed update. */
- rc->set_drp_ie_pending = 0;
- mutex_unlock(&rc->rsvs_mutex);
-
- cancel_delayed_work_sync(&rc->rsv_update_work);
- flush_workqueue(rc->rsv_workq);
-
- mutex_lock(&rc->rsvs_mutex);
- list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
- uwb_rsv_remove(rsv);
- }
- mutex_unlock(&rc->rsvs_mutex);
-}
-
-void uwb_rsv_init(struct uwb_rc *rc)
-{
- INIT_LIST_HEAD(&rc->reservations);
- INIT_LIST_HEAD(&rc->cnflt_alien_list);
- mutex_init(&rc->rsvs_mutex);
- spin_lock_init(&rc->rsvs_lock);
- INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
- INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work);
- rc->bow.can_reserve_extra_mases = true;
- rc->bow.total_expired = 0;
- rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
- timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0);
-
- bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
-}
-
-int uwb_rsv_setup(struct uwb_rc *rc)
-{
- char name[16];
-
- snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
- rc->rsv_workq = create_singlethread_workqueue(name);
- if (rc->rsv_workq == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-void uwb_rsv_cleanup(struct uwb_rc *rc)
-{
- uwb_rsv_remove_all(rc);
- destroy_workqueue(rc->rsv_workq);
-}
diff --git a/drivers/staging/uwb/scan.c b/drivers/staging/uwb/scan.c
deleted file mode 100644
index ffc3f452302d..000000000000
--- a/drivers/staging/uwb/scan.c
+++ /dev/null
@@ -1,120 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Scanning management
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal
- * with each other. Currently seems that START_BEACON while
- * SCAN_ONLY will cancel the scan, so we need to update the
- * state here. Clarification request sent by email on
- * 10/05/2005.
- * 10/28/2005 No clear answer heard--maybe we'll hack the API
- * so that when we start beaconing, if the HC is
- * scanning in a mode not compatible with beaconing
- * we just fail.
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include "uwb-internal.h"
-
-
-/**
- * Start/stop scanning in a radio controller
- *
- * @rc: UWB Radio Controller
- * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12]
- * @type: Type of scanning to do.
- * @bpst_offset: value at which to start scanning (if type ==
- * UWB_SCAN_ONLY_STARTTIME)
- * @returns: 0 if ok, < 0 errno code on error
- *
- * We put the command on kmalloc'ed memory as some arches cannot do
- * USB from the stack. The reply event is copied from an stage buffer,
- * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
- */
-int uwb_rc_scan(struct uwb_rc *rc,
- unsigned channel, enum uwb_scan_type type,
- unsigned bpst_offset)
-{
- int result;
- struct uwb_rc_cmd_scan *cmd;
- struct uwb_rc_evt_confirm reply;
-
- result = -ENOMEM;
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- goto error_kzalloc;
- mutex_lock(&rc->uwb_dev.mutex);
- cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
- cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN);
- cmd->bChannelNumber = channel;
- cmd->bScanState = type;
- cmd->wStartTime = cpu_to_le16(bpst_offset);
- reply.rceb.bEventType = UWB_RC_CET_GENERAL;
- reply.rceb.wEvent = UWB_RC_CMD_SCAN;
- result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd),
- &reply.rceb, sizeof(reply));
- if (result < 0)
- goto error_cmd;
- if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
- dev_err(&rc->uwb_dev.dev,
- "SCAN: command execution failed: %s (%d)\n",
- uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
- result = -EIO;
- goto error_cmd;
- }
- rc->scanning = channel;
- rc->scan_type = type;
-error_cmd:
- mutex_unlock(&rc->uwb_dev.mutex);
- kfree(cmd);
-error_kzalloc:
- return result;
-}
-
-/*
- * Print scanning state
- */
-static ssize_t uwb_rc_scan_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- ssize_t result;
-
- mutex_lock(&rc->uwb_dev.mutex);
- result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type);
- mutex_unlock(&rc->uwb_dev.mutex);
- return result;
-}
-
-/*
- *
- */
-static ssize_t uwb_rc_scan_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct uwb_dev *uwb_dev = to_uwb_dev(dev);
- struct uwb_rc *rc = uwb_dev->rc;
- unsigned channel;
- unsigned type;
- unsigned bpst_offset = 0;
- ssize_t result = -EINVAL;
-
- result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset);
- if (result >= 2 && type < UWB_SCAN_TOP)
- result = uwb_rc_scan(rc, channel, type, bpst_offset);
-
- return result < 0 ? result : size;
-}
-
-/** Radio Control sysfs interface (declaration) */
-DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);
diff --git a/drivers/staging/uwb/umc-bus.c b/drivers/staging/uwb/umc-bus.c
deleted file mode 100644
index 8b931f66a720..000000000000
--- a/drivers/staging/uwb/umc-bus.c
+++ /dev/null
@@ -1,211 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Bus for UWB Multi-interface Controller capabilities.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/workqueue.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include "include/umc.h"
-
-static int umc_bus_pre_reset_helper(struct device *dev, void *data)
-{
- int ret = 0;
-
- if (dev->driver) {
- struct umc_dev *umc = to_umc_dev(dev);
- struct umc_driver *umc_drv = to_umc_driver(dev->driver);
-
- if (umc_drv->pre_reset)
- ret = umc_drv->pre_reset(umc);
- else
- device_release_driver(dev);
- }
- return ret;
-}
-
-static int umc_bus_post_reset_helper(struct device *dev, void *data)
-{
- int ret = 0;
-
- if (dev->driver) {
- struct umc_dev *umc = to_umc_dev(dev);
- struct umc_driver *umc_drv = to_umc_driver(dev->driver);
-
- if (umc_drv->post_reset)
- ret = umc_drv->post_reset(umc);
- } else
- ret = device_attach(dev);
-
- return ret;
-}
-
-/**
- * umc_controller_reset - reset the whole UMC controller
- * @umc: the UMC device for the radio controller.
- *
- * Drivers or all capabilities of the controller will have their
- * pre_reset methods called or be unbound from their device. Then all
- * post_reset methods will be called or the drivers will be rebound.
- *
- * Radio controllers must provide pre_reset and post_reset methods and
- * reset the hardware in their start method.
- *
- * If this is called while a probe() or remove() is in progress it
- * will return -EAGAIN and not perform the reset.
- */
-int umc_controller_reset(struct umc_dev *umc)
-{
- struct device *parent = umc->dev.parent;
- int ret = 0;
-
- if (!device_trylock(parent))
- return -EAGAIN;
- ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper);
- if (ret >= 0)
- ret = device_for_each_child(parent, parent, umc_bus_post_reset_helper);
- device_unlock(parent);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(umc_controller_reset);
-
-/**
- * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device.
- * @umc_drv: umc driver with match_data pointing to a zero-terminated
- * table of pci_device_id's.
- * @umc: umc device whose parent is to be matched.
- */
-int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc)
-{
- const struct pci_device_id *id_table = umc_drv->match_data;
- struct pci_dev *pci;
-
- if (!dev_is_pci(umc->dev.parent))
- return 0;
-
- pci = to_pci_dev(umc->dev.parent);
- return pci_match_id(id_table, pci) != NULL;
-}
-EXPORT_SYMBOL_GPL(umc_match_pci_id);
-
-static int umc_bus_rescan_helper(struct device *dev, void *data)
-{
- int ret = 0;
-
- if (!dev->driver)
- ret = device_attach(dev);
-
- return ret;
-}
-
-static void umc_bus_rescan(struct device *parent)
-{
- int err;
-
- /*
- * We can't use bus_rescan_devices() here as it deadlocks when
- * it tries to retake the dev->parent semaphore.
- */
- err = device_for_each_child(parent, NULL, umc_bus_rescan_helper);
- if (err < 0)
- printk(KERN_WARNING "%s: rescan of bus failed: %d\n",
- KBUILD_MODNAME, err);
-}
-
-static int umc_bus_match(struct device *dev, struct device_driver *drv)
-{
- struct umc_dev *umc = to_umc_dev(dev);
- struct umc_driver *umc_driver = to_umc_driver(drv);
-
- if (umc->cap_id == umc_driver->cap_id) {
- if (umc_driver->match)
- return umc_driver->match(umc_driver, umc);
- else
- return 1;
- }
- return 0;
-}
-
-static int umc_device_probe(struct device *dev)
-{
- struct umc_dev *umc;
- struct umc_driver *umc_driver;
- int err;
-
- umc_driver = to_umc_driver(dev->driver);
- umc = to_umc_dev(dev);
-
- get_device(dev);
- err = umc_driver->probe(umc);
- if (err)
- put_device(dev);
- else
- umc_bus_rescan(dev->parent);
-
- return err;
-}
-
-static int umc_device_remove(struct device *dev)
-{
- struct umc_dev *umc;
- struct umc_driver *umc_driver;
-
- umc_driver = to_umc_driver(dev->driver);
- umc = to_umc_dev(dev);
-
- umc_driver->remove(umc);
- put_device(dev);
- return 0;
-}
-
-static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct umc_dev *umc = to_umc_dev(dev);
-
- return sprintf(buf, "0x%02x\n", umc->cap_id);
-}
-static DEVICE_ATTR_RO(capability_id);
-
-static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct umc_dev *umc = to_umc_dev(dev);
-
- return sprintf(buf, "0x%04x\n", umc->version);
-}
-static DEVICE_ATTR_RO(version);
-
-static struct attribute *umc_dev_attrs[] = {
- &dev_attr_capability_id.attr,
- &dev_attr_version.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(umc_dev);
-
-struct bus_type umc_bus_type = {
- .name = "umc",
- .match = umc_bus_match,
- .probe = umc_device_probe,
- .remove = umc_device_remove,
- .dev_groups = umc_dev_groups,
-};
-EXPORT_SYMBOL_GPL(umc_bus_type);
-
-static int __init umc_bus_init(void)
-{
- return bus_register(&umc_bus_type);
-}
-module_init(umc_bus_init);
-
-static void __exit umc_bus_exit(void)
-{
- bus_unregister(&umc_bus_type);
-}
-module_exit(umc_bus_exit);
-
-MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus");
-MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/uwb/umc-dev.c b/drivers/staging/uwb/umc-dev.c
deleted file mode 100644
index 0c71caae00be..000000000000
--- a/drivers/staging/uwb/umc-dev.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UWB Multi-interface Controller device management.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include "include/umc.h"
-
-static void umc_device_release(struct device *dev)
-{
- struct umc_dev *umc = to_umc_dev(dev);
-
- kfree(umc);
-}
-
-/**
- * umc_device_create - allocate a child UMC device
- * @parent: parent of the new UMC device.
- * @n: index of the new device.
- *
- * The new UMC device will have a bus ID of the parent with '-n'
- * appended.
- */
-struct umc_dev *umc_device_create(struct device *parent, int n)
-{
- struct umc_dev *umc;
-
- umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL);
- if (umc) {
- dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n);
- umc->dev.parent = parent;
- umc->dev.bus = &umc_bus_type;
- umc->dev.release = umc_device_release;
-
- umc->dev.dma_mask = parent->dma_mask;
- }
- return umc;
-}
-EXPORT_SYMBOL_GPL(umc_device_create);
-
-/**
- * umc_device_register - register a UMC device
- * @umc: pointer to the UMC device
- *
- * The memory resource for the UMC device is acquired and the device
- * registered with the system.
- */
-int umc_device_register(struct umc_dev *umc)
-{
- int err;
-
- err = request_resource(umc->resource.parent, &umc->resource);
- if (err < 0) {
- dev_err(&umc->dev, "can't allocate resource range %pR: %d\n",
- &umc->resource, err);
- goto error_request_resource;
- }
-
- err = device_register(&umc->dev);
- if (err < 0)
- goto error_device_register;
- return 0;
-
-error_device_register:
- put_device(&umc->dev);
- release_resource(&umc->resource);
-error_request_resource:
- return err;
-}
-EXPORT_SYMBOL_GPL(umc_device_register);
-
-/**
- * umc_device_unregister - unregister a UMC device
- * @umc: pointer to the UMC device
- *
- * First we unregister the device, make sure the driver can do it's
- * resource release thing and then we try to release any left over
- * resources. We take a ref to the device, to make sure it doesn't
- * disappear under our feet.
- */
-void umc_device_unregister(struct umc_dev *umc)
-{
- struct device *dev;
- if (!umc)
- return;
- dev = get_device(&umc->dev);
- device_unregister(&umc->dev);
- release_resource(&umc->resource);
- put_device(dev);
-}
-EXPORT_SYMBOL_GPL(umc_device_unregister);
diff --git a/drivers/staging/uwb/umc-drv.c b/drivers/staging/uwb/umc-drv.c
deleted file mode 100644
index ed3bd220e8c2..000000000000
--- a/drivers/staging/uwb/umc-drv.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UWB Multi-interface Controller driver management.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include "include/umc.h"
-
-int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
- const char *mod_name)
-{
- umc_drv->driver.name = umc_drv->name;
- umc_drv->driver.owner = module;
- umc_drv->driver.mod_name = mod_name;
- umc_drv->driver.bus = &umc_bus_type;
-
- return driver_register(&umc_drv->driver);
-}
-EXPORT_SYMBOL_GPL(__umc_driver_register);
-
-/**
- * umc_driver_register - unregister a UMC capabiltity driver.
- * @umc_drv: pointer to the driver.
- */
-void umc_driver_unregister(struct umc_driver *umc_drv)
-{
- driver_unregister(&umc_drv->driver);
-}
-EXPORT_SYMBOL_GPL(umc_driver_unregister);
diff --git a/drivers/staging/uwb/uwb-debug.c b/drivers/staging/uwb/uwb-debug.c
deleted file mode 100644
index dd14df219ef8..000000000000
--- a/drivers/staging/uwb/uwb-debug.c
+++ /dev/null
@@ -1,354 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Debug support
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- *
- * FIXME: doc
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/notifier.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
-
-#include "include/debug-cmd.h"
-#include "uwb-internal.h"
-
-/*
- * Debug interface
- *
- * Per radio controller debugfs files (in uwb/uwbN/):
- *
- * command: Flexible command interface (see <linux/uwb/debug-cmd.h>).
- *
- * reservations: information on reservations.
- *
- * accept: Set to true (Y or 1) to accept reservation requests from
- * peers.
- *
- * drp_avail: DRP availability information.
- */
-
-struct uwb_dbg {
- struct uwb_pal pal;
-
- bool accept;
- struct list_head rsvs;
-
- struct dentry *root_d;
- struct dentry *command_f;
- struct dentry *reservations_f;
- struct dentry *accept_f;
- struct dentry *drp_avail_f;
- spinlock_t list_lock;
-};
-
-static struct dentry *root_dir;
-
-static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
-{
- struct uwb_dbg *dbg = rsv->pal_priv;
-
- uwb_rsv_dump("debug", rsv);
-
- if (rsv->state == UWB_RSV_STATE_NONE) {
- spin_lock(&dbg->list_lock);
- list_del(&rsv->pal_node);
- spin_unlock(&dbg->list_lock);
- uwb_rsv_destroy(rsv);
- }
-}
-
-static int cmd_rsv_establish(struct uwb_rc *rc,
- struct uwb_dbg_cmd_rsv_establish *cmd)
-{
- struct uwb_mac_addr macaddr;
- struct uwb_rsv *rsv;
- struct uwb_dev *target;
- int ret;
-
- memcpy(&macaddr, cmd->target, sizeof(macaddr));
- target = uwb_dev_get_by_macaddr(rc, &macaddr);
- if (target == NULL)
- return -ENODEV;
-
- rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, rc->dbg);
- if (rsv == NULL) {
- uwb_dev_put(target);
- return -ENOMEM;
- }
-
- rsv->target.type = UWB_RSV_TARGET_DEV;
- rsv->target.dev = target;
- rsv->type = cmd->type;
- rsv->max_mas = cmd->max_mas;
- rsv->min_mas = cmd->min_mas;
- rsv->max_interval = cmd->max_interval;
-
- ret = uwb_rsv_establish(rsv);
- if (ret)
- uwb_rsv_destroy(rsv);
- else {
- spin_lock(&(rc->dbg)->list_lock);
- list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
- spin_unlock(&(rc->dbg)->list_lock);
- }
- return ret;
-}
-
-static int cmd_rsv_terminate(struct uwb_rc *rc,
- struct uwb_dbg_cmd_rsv_terminate *cmd)
-{
- struct uwb_rsv *rsv, *found = NULL;
- int i = 0;
-
- spin_lock(&(rc->dbg)->list_lock);
-
- list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
- if (i == cmd->index) {
- found = rsv;
- uwb_rsv_get(found);
- break;
- }
- i++;
- }
-
- spin_unlock(&(rc->dbg)->list_lock);
-
- if (!found)
- return -EINVAL;
-
- uwb_rsv_terminate(found);
- uwb_rsv_put(found);
-
- return 0;
-}
-
-static int cmd_ie_add(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_add)
-{
- return uwb_rc_ie_add(rc,
- (const struct uwb_ie_hdr *) ie_to_add->data,
- ie_to_add->len);
-}
-
-static int cmd_ie_rm(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_rm)
-{
- return uwb_rc_ie_rm(rc, ie_to_rm->data[0]);
-}
-
-static ssize_t command_write(struct file *file, const char __user *buf,
- size_t len, loff_t *off)
-{
- struct uwb_rc *rc = file->private_data;
- struct uwb_dbg_cmd cmd;
- int ret = 0;
-
- if (len != sizeof(struct uwb_dbg_cmd))
- return -EINVAL;
-
- if (copy_from_user(&cmd, buf, len) != 0)
- return -EFAULT;
-
- switch (cmd.type) {
- case UWB_DBG_CMD_RSV_ESTABLISH:
- ret = cmd_rsv_establish(rc, &cmd.rsv_establish);
- break;
- case UWB_DBG_CMD_RSV_TERMINATE:
- ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate);
- break;
- case UWB_DBG_CMD_IE_ADD:
- ret = cmd_ie_add(rc, &cmd.ie_add);
- break;
- case UWB_DBG_CMD_IE_RM:
- ret = cmd_ie_rm(rc, &cmd.ie_rm);
- break;
- case UWB_DBG_CMD_RADIO_START:
- ret = uwb_radio_start(&rc->dbg->pal);
- break;
- case UWB_DBG_CMD_RADIO_STOP:
- uwb_radio_stop(&rc->dbg->pal);
- break;
- default:
- return -EINVAL;
- }
-
- return ret < 0 ? ret : len;
-}
-
-static const struct file_operations command_fops = {
- .open = simple_open,
- .write = command_write,
- .read = NULL,
- .llseek = no_llseek,
- .owner = THIS_MODULE,
-};
-
-static int reservations_show(struct seq_file *s, void *p)
-{
- struct uwb_rc *rc = s->private;
- struct uwb_rsv *rsv;
-
- mutex_lock(&rc->rsvs_mutex);
-
- list_for_each_entry(rsv, &rc->reservations, rc_node) {
- struct uwb_dev_addr devaddr;
- char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
- bool is_owner;
-
- uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
- if (rsv->target.type == UWB_RSV_TARGET_DEV) {
- devaddr = rsv->target.dev->dev_addr;
- is_owner = &rc->uwb_dev == rsv->owner;
- } else {
- devaddr = rsv->target.devaddr;
- is_owner = true;
- }
- uwb_dev_addr_print(target, sizeof(target), &devaddr);
-
- seq_printf(s, "%c %s -> %s: %s\n",
- is_owner ? 'O' : 'T',
- owner, target, uwb_rsv_state_str(rsv->state));
- seq_printf(s, " stream: %d type: %s\n",
- rsv->stream, uwb_rsv_type_str(rsv->type));
- seq_printf(s, " %*pb\n", UWB_NUM_MAS, rsv->mas.bm);
- }
-
- mutex_unlock(&rc->rsvs_mutex);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(reservations);
-
-static int drp_avail_show(struct seq_file *s, void *p)
-{
- struct uwb_rc *rc = s->private;
-
- seq_printf(s, "global: %*pb\n", UWB_NUM_MAS, rc->drp_avail.global);
- seq_printf(s, "local: %*pb\n", UWB_NUM_MAS, rc->drp_avail.local);
- seq_printf(s, "pending: %*pb\n", UWB_NUM_MAS, rc->drp_avail.pending);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(drp_avail);
-
-static void uwb_dbg_channel_changed(struct uwb_pal *pal, int channel)
-{
- struct device *dev = &pal->rc->uwb_dev.dev;
-
- if (channel > 0)
- dev_info(dev, "debug: channel %d started\n", channel);
- else
- dev_info(dev, "debug: channel stopped\n");
-}
-
-static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv)
-{
- struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal);
-
- if (dbg->accept) {
- spin_lock(&dbg->list_lock);
- list_add_tail(&rsv->pal_node, &dbg->rsvs);
- spin_unlock(&dbg->list_lock);
- uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg);
- }
-}
-
-/**
- * uwb_dbg_add_rc - add a debug interface for a radio controller
- * @rc: the radio controller
- */
-void uwb_dbg_add_rc(struct uwb_rc *rc)
-{
- rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL);
- if (rc->dbg == NULL)
- return;
-
- INIT_LIST_HEAD(&rc->dbg->rsvs);
- spin_lock_init(&(rc->dbg)->list_lock);
-
- uwb_pal_init(&rc->dbg->pal);
- rc->dbg->pal.rc = rc;
- rc->dbg->pal.channel_changed = uwb_dbg_channel_changed;
- rc->dbg->pal.new_rsv = uwb_dbg_new_rsv;
- uwb_pal_register(&rc->dbg->pal);
-
- if (root_dir) {
- rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev),
- root_dir);
- rc->dbg->command_f = debugfs_create_file("command", 0200,
- rc->dbg->root_d, rc,
- &command_fops);
- rc->dbg->reservations_f = debugfs_create_file("reservations", 0444,
- rc->dbg->root_d, rc,
- &reservations_fops);
- rc->dbg->accept_f = debugfs_create_bool("accept", 0644,
- rc->dbg->root_d,
- &rc->dbg->accept);
- rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444,
- rc->dbg->root_d, rc,
- &drp_avail_fops);
- }
-}
-
-/**
- * uwb_dbg_del_rc - remove a radio controller's debug interface
- * @rc: the radio controller
- */
-void uwb_dbg_del_rc(struct uwb_rc *rc)
-{
- struct uwb_rsv *rsv, *t;
-
- if (rc->dbg == NULL)
- return;
-
- list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) {
- uwb_rsv_terminate(rsv);
- }
-
- uwb_pal_unregister(&rc->dbg->pal);
-
- if (root_dir) {
- debugfs_remove(rc->dbg->drp_avail_f);
- debugfs_remove(rc->dbg->accept_f);
- debugfs_remove(rc->dbg->reservations_f);
- debugfs_remove(rc->dbg->command_f);
- debugfs_remove(rc->dbg->root_d);
- }
-}
-
-/**
- * uwb_dbg_exit - initialize the debug interface sub-module
- */
-void uwb_dbg_init(void)
-{
- root_dir = debugfs_create_dir("uwb", NULL);
-}
-
-/**
- * uwb_dbg_exit - clean-up the debug interface sub-module
- */
-void uwb_dbg_exit(void)
-{
- debugfs_remove(root_dir);
-}
-
-/**
- * uwb_dbg_create_pal_dir - create a debugfs directory for a PAL
- * @pal: The PAL.
- */
-struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal)
-{
- struct uwb_rc *rc = pal->rc;
-
- if (root_dir && rc->dbg && rc->dbg->root_d && pal->name)
- return debugfs_create_dir(pal->name, rc->dbg->root_d);
- return NULL;
-}
diff --git a/drivers/staging/uwb/uwb-internal.h b/drivers/staging/uwb/uwb-internal.h
deleted file mode 100644
index 4c2fdac7f610..000000000000
--- a/drivers/staging/uwb/uwb-internal.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Ultra Wide Band
- * UWB internal API
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This contains most of the internal API for UWB. This is stuff used
- * across the stack that of course, is of no interest to the rest.
- *
- * Some parts might end up going public (like uwb_rc_*())...
- */
-
-#ifndef __UWB_INTERNAL_H__
-#define __UWB_INTERNAL_H__
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include "uwb.h"
-
-struct uwb_beca_e;
-
-/* General device API */
-extern void uwb_dev_init(struct uwb_dev *uwb_dev);
-extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *);
-extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
- struct uwb_rc *parent_rc);
-extern void uwb_dev_rm(struct uwb_dev *uwb_dev);
-extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *);
-extern void uwbd_dev_offair(struct uwb_beca_e *);
-void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event);
-
-/* General UWB Radio Controller Internal API */
-extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *);
-static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc)
-{
- uwb_dev_get(&rc->uwb_dev);
- return rc;
-}
-
-static inline void __uwb_rc_put(struct uwb_rc *rc)
-{
- if (rc)
- uwb_dev_put(&rc->uwb_dev);
-}
-
-extern int uwb_rc_reset(struct uwb_rc *rc);
-extern int uwb_rc_beacon(struct uwb_rc *rc,
- int channel, unsigned bpst_offset);
-extern int uwb_rc_scan(struct uwb_rc *rc,
- unsigned channel, enum uwb_scan_type type,
- unsigned bpst_offset);
-extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc);
-
-void uwb_rc_ie_init(struct uwb_rc *);
-int uwb_rc_ie_setup(struct uwb_rc *);
-void uwb_rc_ie_release(struct uwb_rc *);
-int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len,
- char *buf, size_t size);
-int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *);
-
-
-extern const char *uwb_rc_strerror(unsigned code);
-
-/*
- * Time to wait for a response to an RC command.
- *
- * Some commands can take a long time to response. e.g., START_BEACON
- * may scan for several superframes before joining an existing beacon
- * group and this can take around 600 ms.
- */
-#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */
-
-/*
- * Notification/Event Handlers
- */
-
-struct uwb_rc_neh;
-
-extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- u8 expected_type, u16 expected_event,
- uwb_rc_cmd_cb_f cb, void *arg);
-
-
-void uwb_rc_neh_create(struct uwb_rc *rc);
-void uwb_rc_neh_destroy(struct uwb_rc *rc);
-
-struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
- u8 expected_type, u16 expected_event,
- uwb_rc_cmd_cb_f cb, void *arg);
-void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
-void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
-void uwb_rc_neh_put(struct uwb_rc_neh *neh);
-
-/* Event size tables */
-extern int uwb_est_create(void);
-extern void uwb_est_destroy(void);
-
-/*
- * UWB conflicting alien reservations
- */
-struct uwb_cnflt_alien {
- struct uwb_rc *rc;
- struct list_head rc_node;
- struct uwb_mas_bm mas;
- struct timer_list timer;
- struct work_struct cnflt_update_work;
-};
-
-enum uwb_uwb_rsv_alloc_result {
- UWB_RSV_ALLOC_FOUND = 0,
- UWB_RSV_ALLOC_NOT_FOUND,
-};
-
-enum uwb_rsv_mas_status {
- UWB_RSV_MAS_NOT_AVAIL = 1,
- UWB_RSV_MAS_SAFE,
- UWB_RSV_MAS_UNSAFE,
-};
-
-struct uwb_rsv_col_set_info {
- unsigned char start_col;
- unsigned char interval;
- unsigned char safe_mas_per_col;
- unsigned char unsafe_mas_per_col;
-};
-
-struct uwb_rsv_col_info {
- unsigned char max_avail_safe;
- unsigned char max_avail_unsafe;
- unsigned char highest_mas[UWB_MAS_PER_ZONE];
- struct uwb_rsv_col_set_info csi;
-};
-
-struct uwb_rsv_row_info {
- unsigned char avail[UWB_MAS_PER_ZONE];
- unsigned char free_rows;
- unsigned char used_rows;
-};
-
-/*
- * UWB find allocation
- */
-struct uwb_rsv_alloc_info {
- unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES];
- struct uwb_rsv_col_info ci[UWB_NUM_ZONES];
- struct uwb_rsv_row_info ri;
- struct uwb_mas_bm *not_available;
- struct uwb_mas_bm *result;
- int min_mas;
- int max_mas;
- int max_interval;
- int total_allocated_mases;
- int safe_allocated_mases;
- int unsafe_allocated_mases;
- int interval;
-};
-
-int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv,
- struct uwb_mas_bm *available,
- struct uwb_mas_bm *result);
-void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc);
-/*
- * UWB Events & management daemon
- */
-
-/**
- * enum uwb_event_type - types of UWB management daemon events
- *
- * The UWB management daemon (uwbd) can receive two types of events:
- * UWB_EVT_TYPE_NOTIF - notification from the radio controller.
- * UWB_EVT_TYPE_MSG - a simple message.
- */
-enum uwb_event_type {
- UWB_EVT_TYPE_NOTIF,
- UWB_EVT_TYPE_MSG,
-};
-
-/**
- * struct uwb_event_notif - an event for a radio controller notification
- * @size: Size of the buffer (ie: Guaranteed to contain at least
- * a full 'struct uwb_rceb')
- * @rceb: Pointer to a kmalloced() event payload
- */
-struct uwb_event_notif {
- size_t size;
- struct uwb_rceb *rceb;
-};
-
-/**
- * enum uwb_event_message - an event for a message for asynchronous processing
- *
- * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware.
- */
-enum uwb_event_message {
- UWB_EVT_MSG_RESET,
-};
-
-/**
- * UWB Event
- * @rc: Radio controller that emitted the event (referenced)
- * @ts_jiffies: Timestamp, when was it received
- * @type: This event's type.
- */
-struct uwb_event {
- struct list_head list_node;
- struct uwb_rc *rc;
- unsigned long ts_jiffies;
- enum uwb_event_type type;
- union {
- struct uwb_event_notif notif;
- enum uwb_event_message message;
- };
-};
-
-extern void uwbd_start(struct uwb_rc *rc);
-extern void uwbd_stop(struct uwb_rc *rc);
-extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
-extern void uwbd_event_queue(struct uwb_event *);
-void uwbd_flush(struct uwb_rc *rc);
-
-/* UWB event handlers */
-extern int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *);
-extern int uwbd_evt_handle_rc_beacon(struct uwb_event *);
-extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *);
-extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *);
-extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *);
-extern int uwbd_evt_handle_rc_drp(struct uwb_event *);
-extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *);
-
-int uwbd_msg_handle_reset(struct uwb_event *evt);
-
-
-/*
- * Address management
- */
-int uwb_rc_dev_addr_assign(struct uwb_rc *rc);
-int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt);
-
-/*
- * UWB Beacon Cache
- *
- * Each beacon we received is kept in a cache--when we receive that
- * beacon consistently, that means there is a new device that we have
- * to add to the system.
- */
-
-extern unsigned long beacon_timeout_ms;
-
-/**
- * Beacon cache entry
- *
- * @jiffies_refresh: last time a beacon was received that refreshed
- * this cache entry.
- * @uwb_dev: device connected to this beacon. This pointer is not
- * safe, you need to get it with uwb_dev_try_get()
- *
- * @hits: how many time we have seen this beacon since last time we
- * cleared it
- */
-struct uwb_beca_e {
- struct mutex mutex;
- struct kref refcnt;
- struct list_head node;
- struct uwb_mac_addr *mac_addr;
- struct uwb_dev_addr dev_addr;
- u8 hits;
- unsigned long ts_jiffies;
- struct uwb_dev *uwb_dev;
- struct uwb_rc_evt_beacon *be;
- struct stats lqe_stats, rssi_stats; /* radio statistics */
-};
-struct uwb_beacon_frame;
-extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *,
- char *, size_t);
-
-extern void uwb_bce_kfree(struct kref *_bce);
-static inline void uwb_bce_get(struct uwb_beca_e *bce)
-{
- kref_get(&bce->refcnt);
-}
-static inline void uwb_bce_put(struct uwb_beca_e *bce)
-{
- kref_put(&bce->refcnt, uwb_bce_kfree);
-}
-extern void uwb_beca_purge(struct uwb_rc *rc);
-extern void uwb_beca_release(struct uwb_rc *rc);
-
-struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
- const struct uwb_dev_addr *devaddr);
-struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
- const struct uwb_mac_addr *macaddr);
-
-int uwb_radio_setup(struct uwb_rc *rc);
-void uwb_radio_reset_state(struct uwb_rc *rc);
-void uwb_radio_shutdown(struct uwb_rc *rc);
-int uwb_radio_force_channel(struct uwb_rc *rc, int channel);
-
-/* -- UWB Sysfs representation */
-extern struct class uwb_rc_class;
-extern struct bus_type uwb_bus_type;
-extern struct device_attribute dev_attr_mac_address;
-extern struct device_attribute dev_attr_beacon;
-extern struct device_attribute dev_attr_scan;
-
-/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */
-void uwb_rsv_init(struct uwb_rc *rc);
-int uwb_rsv_setup(struct uwb_rc *rc);
-void uwb_rsv_cleanup(struct uwb_rc *rc);
-void uwb_rsv_remove_all(struct uwb_rc *rc);
-void uwb_rsv_get(struct uwb_rsv *rsv);
-void uwb_rsv_put(struct uwb_rsv *rsv);
-bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
-void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
-int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
-void uwb_rsv_backoff_win_timer(struct timer_list *t);
-void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
-int uwb_rsv_status(struct uwb_rsv *rsv);
-int uwb_rsv_companion_status(struct uwb_rsv *rsv);
-
-void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
-void uwb_rsv_remove(struct uwb_rsv *rsv);
-struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
- struct uwb_ie_drp *drp_ie);
-void uwb_rsv_sched_update(struct uwb_rc *rc);
-void uwb_rsv_queue_update(struct uwb_rc *rc);
-
-int uwb_drp_ie_update(struct uwb_rsv *rsv);
-void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
-
-void uwb_drp_avail_init(struct uwb_rc *rc);
-void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail);
-int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
-void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
-void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
-void uwb_drp_avail_ie_update(struct uwb_rc *rc);
-
-/* -- PAL support */
-void uwb_rc_pal_init(struct uwb_rc *rc);
-
-/* -- Misc */
-
-extern ssize_t uwb_mac_frame_hdr_print(char *, size_t,
- const struct uwb_mac_frame_hdr *);
-
-/* -- Debug interface */
-void uwb_dbg_init(void);
-void uwb_dbg_exit(void);
-void uwb_dbg_add_rc(struct uwb_rc *rc);
-void uwb_dbg_del_rc(struct uwb_rc *rc);
-struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal);
-
-static inline void uwb_dev_lock(struct uwb_dev *uwb_dev)
-{
- device_lock(&uwb_dev->dev);
-}
-
-static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev)
-{
- device_unlock(&uwb_dev->dev);
-}
-
-#endif /* #ifndef __UWB_INTERNAL_H__ */
diff --git a/drivers/staging/uwb/uwb.h b/drivers/staging/uwb/uwb.h
deleted file mode 100644
index 6a59706ba3a0..000000000000
--- a/drivers/staging/uwb/uwb.h
+++ /dev/null
@@ -1,817 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Ultra Wide Band
- * UWB API
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: doc: overview of the API, different parts and pointers
- */
-
-#ifndef __LINUX__UWB_H__
-#define __LINUX__UWB_H__
-
-#include <linux/limits.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <asm/page.h>
-#include "include/spec.h"
-
-struct uwb_dev;
-struct uwb_beca_e;
-struct uwb_rc;
-struct uwb_rsv;
-struct uwb_dbg;
-
-/**
- * struct uwb_dev - a UWB Device
- * @rc: UWB Radio Controller that discovered the device (kind of its
- * parent).
- * @bce: a beacon cache entry for this device; or NULL if the device
- * is a local radio controller.
- * @mac_addr: the EUI-48 address of this device.
- * @dev_addr: the current DevAddr used by this device.
- * @beacon_slot: the slot number the beacon is using.
- * @streams: bitmap of streams allocated to reservations targeted at
- * this device. For an RC, this is the streams allocated for
- * reservations targeted at DevAddrs.
- *
- * A UWB device may either by a neighbor or part of a local radio
- * controller.
- */
-struct uwb_dev {
- struct mutex mutex;
- struct list_head list_node;
- struct device dev;
- struct uwb_rc *rc; /* radio controller */
- struct uwb_beca_e *bce; /* Beacon Cache Entry */
-
- struct uwb_mac_addr mac_addr;
- struct uwb_dev_addr dev_addr;
- int beacon_slot;
- DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
- DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS);
-};
-#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
-
-/**
- * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
- *
- * RC[CE]Bs have a 'context ID' field that matches the command with
- * the event received to confirm it.
- *
- * Maximum number of context IDs
- */
-enum { UWB_RC_CTX_MAX = 256 };
-
-
-/** Notification chain head for UWB generated events to listeners */
-struct uwb_notifs_chain {
- struct list_head list;
- struct mutex mutex;
-};
-
-/* Beacon cache list */
-struct uwb_beca {
- struct list_head list;
- size_t entries;
- struct mutex mutex;
-};
-
-/* Event handling thread. */
-struct uwbd {
- int pid;
- struct task_struct *task;
- wait_queue_head_t wq;
- struct list_head event_list;
- spinlock_t event_list_lock;
-};
-
-/**
- * struct uwb_mas_bm - a bitmap of all MAS in a superframe
- * @bm: a bitmap of length #UWB_NUM_MAS
- */
-struct uwb_mas_bm {
- DECLARE_BITMAP(bm, UWB_NUM_MAS);
- DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS);
- int safe;
- int unsafe;
-};
-
-/**
- * uwb_rsv_state - UWB Reservation state.
- *
- * NONE - reservation is not active (no DRP IE being transmitted).
- *
- * Owner reservation states:
- *
- * INITIATED - owner has sent an initial DRP request.
- * PENDING - target responded with pending Reason Code.
- * MODIFIED - reservation manager is modifying an established
- * reservation with a different MAS allocation.
- * ESTABLISHED - the reservation has been successfully negotiated.
- *
- * Target reservation states:
- *
- * DENIED - request is denied.
- * ACCEPTED - request is accepted.
- * PENDING - PAL has yet to make a decision to whether to accept or
- * deny.
- *
- * FIXME: further target states TBD.
- */
-enum uwb_rsv_state {
- UWB_RSV_STATE_NONE = 0,
- UWB_RSV_STATE_O_INITIATED,
- UWB_RSV_STATE_O_PENDING,
- UWB_RSV_STATE_O_MODIFIED,
- UWB_RSV_STATE_O_ESTABLISHED,
- UWB_RSV_STATE_O_TO_BE_MOVED,
- UWB_RSV_STATE_O_MOVE_EXPANDING,
- UWB_RSV_STATE_O_MOVE_COMBINING,
- UWB_RSV_STATE_O_MOVE_REDUCING,
- UWB_RSV_STATE_T_ACCEPTED,
- UWB_RSV_STATE_T_DENIED,
- UWB_RSV_STATE_T_CONFLICT,
- UWB_RSV_STATE_T_PENDING,
- UWB_RSV_STATE_T_EXPANDING_ACCEPTED,
- UWB_RSV_STATE_T_EXPANDING_CONFLICT,
- UWB_RSV_STATE_T_EXPANDING_PENDING,
- UWB_RSV_STATE_T_EXPANDING_DENIED,
- UWB_RSV_STATE_T_RESIZED,
-
- UWB_RSV_STATE_LAST,
-};
-
-enum uwb_rsv_target_type {
- UWB_RSV_TARGET_DEV,
- UWB_RSV_TARGET_DEVADDR,
-};
-
-/**
- * struct uwb_rsv_target - the target of a reservation.
- *
- * Reservations unicast and targeted at a single device
- * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
- * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
- */
-struct uwb_rsv_target {
- enum uwb_rsv_target_type type;
- union {
- struct uwb_dev *dev;
- struct uwb_dev_addr devaddr;
- };
-};
-
-struct uwb_rsv_move {
- struct uwb_mas_bm final_mas;
- struct uwb_ie_drp *companion_drp_ie;
- struct uwb_mas_bm companion_mas;
-};
-
-/*
- * Number of streams reserved for reservations targeted at DevAddrs.
- */
-#define UWB_NUM_GLOBAL_STREAMS 1
-
-typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
-
-/**
- * struct uwb_rsv - a DRP reservation
- *
- * Data structure management:
- *
- * @rc: the radio controller this reservation is for
- * (as target or owner)
- * @rc_node: a list node for the RC
- * @pal_node: a list node for the PAL
- *
- * Owner and target parameters:
- *
- * @owner: the UWB device owning this reservation
- * @target: the target UWB device
- * @type: reservation type
- *
- * Owner parameters:
- *
- * @max_mas: maxiumum number of MAS
- * @min_mas: minimum number of MAS
- * @sparsity: owner selected sparsity
- * @is_multicast: true iff multicast
- *
- * @callback: callback function when the reservation completes
- * @pal_priv: private data for the PAL making the reservation
- *
- * Reservation status:
- *
- * @status: negotiation status
- * @stream: stream index allocated for this reservation
- * @tiebreaker: conflict tiebreaker for this reservation
- * @mas: reserved MAS
- * @drp_ie: the DRP IE
- * @ie_valid: true iff the DRP IE matches the reservation parameters
- *
- * DRP reservations are uniquely identified by the owner, target and
- * stream index. However, when using a DevAddr as a target (e.g., for
- * a WUSB cluster reservation) the responses may be received from
- * devices with different DevAddrs. In this case, reservations are
- * uniquely identified by just the stream index. A number of stream
- * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
- */
-struct uwb_rsv {
- struct uwb_rc *rc;
- struct list_head rc_node;
- struct list_head pal_node;
- struct kref kref;
-
- struct uwb_dev *owner;
- struct uwb_rsv_target target;
- enum uwb_drp_type type;
- int max_mas;
- int min_mas;
- int max_interval;
- bool is_multicast;
-
- uwb_rsv_cb_f callback;
- void *pal_priv;
-
- enum uwb_rsv_state state;
- bool needs_release_companion_mas;
- u8 stream;
- u8 tiebreaker;
- struct uwb_mas_bm mas;
- struct uwb_ie_drp *drp_ie;
- struct uwb_rsv_move mv;
- bool ie_valid;
- struct timer_list timer;
- struct work_struct handle_timeout_work;
-};
-
-static const
-struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
-
-static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
-{
- bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
-}
-
-/**
- * struct uwb_drp_avail - a radio controller's view of MAS usage
- * @global: MAS unused by neighbors (excluding reservations targeted
- * or owned by the local radio controller) or the beaon period
- * @local: MAS unused by local established reservations
- * @pending: MAS unused by local pending reservations
- * @ie: DRP Availability IE to be included in the beacon
- * @ie_valid: true iff @ie is valid and does not need to regenerated from
- * @global and @local
- *
- * Each radio controller maintains a view of MAS usage or
- * availability. MAS available for a new reservation are determined
- * from the intersection of @global, @local, and @pending.
- *
- * The radio controller must transmit a DRP Availability IE that's the
- * intersection of @global and @local.
- *
- * A set bit indicates the MAS is unused and available.
- *
- * rc->rsvs_mutex should be held before accessing this data structure.
- *
- * [ECMA-368] section 17.4.3.
- */
-struct uwb_drp_avail {
- DECLARE_BITMAP(global, UWB_NUM_MAS);
- DECLARE_BITMAP(local, UWB_NUM_MAS);
- DECLARE_BITMAP(pending, UWB_NUM_MAS);
- struct uwb_ie_drp_avail ie;
- bool ie_valid;
-};
-
-struct uwb_drp_backoff_win {
- u8 window;
- u8 n;
- int total_expired;
- struct timer_list timer;
- bool can_reserve_extra_mases;
-};
-
-const char *uwb_rsv_state_str(enum uwb_rsv_state state);
-const char *uwb_rsv_type_str(enum uwb_drp_type type);
-
-struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
- void *pal_priv);
-void uwb_rsv_destroy(struct uwb_rsv *rsv);
-
-int uwb_rsv_establish(struct uwb_rsv *rsv);
-int uwb_rsv_modify(struct uwb_rsv *rsv,
- int max_mas, int min_mas, int sparsity);
-void uwb_rsv_terminate(struct uwb_rsv *rsv);
-
-void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
-
-void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas);
-
-/**
- * Radio Control Interface instance
- *
- *
- * Life cycle rules: those of the UWB Device.
- *
- * @index: an index number for this radio controller, as used in the
- * device name.
- * @version: version of protocol supported by this device
- * @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
- * @cmd: Backend implementation to execute commands; rw and call
- * only with uwb_dev.dev.sem taken.
- * @reset: Hardware reset of radio controller and any PAL controllers.
- * @filter: Backend implementation to manipulate data to and from device
- * to be compliant to specification assumed by driver (WHCI
- * 0.95).
- *
- * uwb_dev.dev.mutex is used to execute commands and update
- * the corresponding structures; can't use a spinlock
- * because rc->cmd() can sleep.
- * @ies: This is a dynamically allocated array cacheing the
- * IEs (settable by the host) that the beacon of this
- * radio controller is currently sending.
- *
- * In reality, we store here the full command we set to
- * the radio controller (which is basically a command
- * prefix followed by all the IEs the beacon currently
- * contains). This way we don't have to realloc and
- * memcpy when setting it.
- *
- * We set this up in uwb_rc_ie_setup(), where we alloc
- * this struct, call get_ie() [so we know which IEs are
- * currently being sent, if any].
- *
- * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
- * amount used is given by sizeof(*ies) plus ies->wIELength
- * (which is a little endian quantity all the time).
- * @ies_mutex: protect the IE cache
- * @dbg: information for the debug interface
- */
-struct uwb_rc {
- struct uwb_dev uwb_dev;
- int index;
- u16 version;
-
- struct module *owner;
- void *priv;
- int (*start)(struct uwb_rc *rc);
- void (*stop)(struct uwb_rc *rc);
- int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
- int (*reset)(struct uwb_rc *rc);
- int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
- int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
- size_t *, size_t *);
-
- spinlock_t neh_lock; /* protects neh_* and ctx_* */
- struct list_head neh_list; /* Open NE handles */
- unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
- u8 ctx_roll;
-
- int beaconing; /* Beaconing state [channel number] */
- int beaconing_forced;
- int scanning;
- enum uwb_scan_type scan_type:3;
- unsigned ready:1;
- struct uwb_notifs_chain notifs_chain;
- struct uwb_beca uwb_beca;
-
- struct uwbd uwbd;
-
- struct uwb_drp_backoff_win bow;
- struct uwb_drp_avail drp_avail;
- struct list_head reservations;
- struct list_head cnflt_alien_list;
- struct uwb_mas_bm cnflt_alien_bitmap;
- struct mutex rsvs_mutex;
- spinlock_t rsvs_lock;
- struct workqueue_struct *rsv_workq;
-
- struct delayed_work rsv_update_work;
- struct delayed_work rsv_alien_bp_work;
- int set_drp_ie_pending;
- struct mutex ies_mutex;
- struct uwb_rc_cmd_set_ie *ies;
- size_t ies_capacity;
-
- struct list_head pals;
- int active_pals;
-
- struct uwb_dbg *dbg;
-};
-
-
-/**
- * struct uwb_pal - a UWB PAL
- * @name: descriptive name for this PAL (wusbhc, wlp, etc.).
- * @device: a device for the PAL. Used to link the PAL and the radio
- * controller in sysfs.
- * @rc: the radio controller the PAL uses.
- * @channel_changed: called when the channel used by the radio changes.
- * A channel of -1 means the channel has been stopped.
- * @new_rsv: called when a peer requests a reservation (may be NULL if
- * the PAL cannot accept reservation requests).
- * @channel: channel being used by the PAL; 0 if the PAL isn't using
- * the radio; -1 if the PAL wishes to use the radio but
- * cannot.
- * @debugfs_dir: a debugfs directory which the PAL can use for its own
- * debugfs files.
- *
- * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
- * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
- *
- * The PALs using a radio controller must register themselves to
- * permit the UWB stack to coordinate usage of the radio between the
- * various PALs or to allow PALs to response to certain requests from
- * peers.
- *
- * A struct uwb_pal should be embedded in a containing structure
- * belonging to the PAL and initialized with uwb_pal_init()). Fields
- * should be set appropriately by the PAL before registering the PAL
- * with uwb_pal_register().
- */
-struct uwb_pal {
- struct list_head node;
- const char *name;
- struct device *device;
- struct uwb_rc *rc;
-
- void (*channel_changed)(struct uwb_pal *pal, int channel);
- void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv);
-
- int channel;
- struct dentry *debugfs_dir;
-};
-
-void uwb_pal_init(struct uwb_pal *pal);
-int uwb_pal_register(struct uwb_pal *pal);
-void uwb_pal_unregister(struct uwb_pal *pal);
-
-int uwb_radio_start(struct uwb_pal *pal);
-void uwb_radio_stop(struct uwb_pal *pal);
-
-/*
- * General public API
- *
- * This API can be used by UWB device drivers or by those implementing
- * UWB Radio Controllers
- */
-struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
- const struct uwb_dev_addr *devaddr);
-struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
-static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
-{
- get_device(&uwb_dev->dev);
-}
-static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
-{
- put_device(&uwb_dev->dev);
-}
-struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
-
-/**
- * Callback function for 'uwb_{dev,rc}_foreach()'.
- *
- * @dev: Linux device instance
- * 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
- * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
- *
- * @returns: 0 to continue the iterations, any other val to stop
- * iterating and return the value to the caller of
- * _foreach().
- */
-typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
-int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
-
-struct uwb_rc *uwb_rc_alloc(void);
-struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
-struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
-void uwb_rc_put(struct uwb_rc *rc);
-
-typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
- struct uwb_rceb *reply, ssize_t reply_size);
-
-int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- u8 expected_type, u16 expected_event,
- uwb_rc_cmd_cb_f cb, void *arg);
-ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- struct uwb_rceb *reply, size_t reply_size);
-ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
- struct uwb_rccb *cmd, size_t cmd_size,
- u8 expected_type, u16 expected_event,
- struct uwb_rceb **preply);
-
-size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
-
-int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
-int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
-int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
-int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
-int __uwb_mac_addr_assigned_check(struct device *, void *);
-int __uwb_dev_addr_assigned_check(struct device *, void *);
-
-/* Print in @buf a pretty repr of @addr */
-static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
- const struct uwb_dev_addr *addr)
-{
- return __uwb_addr_print(buf, buf_size, addr->data, 0);
-}
-
-/* Print in @buf a pretty repr of @addr */
-static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
- const struct uwb_mac_addr *addr)
-{
- return __uwb_addr_print(buf, buf_size, addr->data, 1);
-}
-
-/* @returns 0 if device addresses @addr2 and @addr1 are equal */
-static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
- const struct uwb_dev_addr *addr2)
-{
- return memcmp(addr1, addr2, sizeof(*addr1));
-}
-
-/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
-static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
- const struct uwb_mac_addr *addr2)
-{
- return memcmp(addr1, addr2, sizeof(*addr1));
-}
-
-/* @returns !0 if a MAC @addr is a broadcast address */
-static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
-{
- struct uwb_mac_addr bcast = {
- .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
- };
- return !uwb_mac_addr_cmp(addr, &bcast);
-}
-
-/* @returns !0 if a MAC @addr is all zeroes*/
-static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
-{
- struct uwb_mac_addr unset = {
- .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
- };
- return !uwb_mac_addr_cmp(addr, &unset);
-}
-
-/* @returns !0 if the address is in use. */
-static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
- struct uwb_dev_addr *addr)
-{
- return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
-}
-
-/*
- * UWB Radio Controller API
- *
- * This API is used (in addition to the general API) to implement UWB
- * Radio Controllers.
- */
-void uwb_rc_init(struct uwb_rc *);
-int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
-void uwb_rc_rm(struct uwb_rc *);
-void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
-void uwb_rc_neh_error(struct uwb_rc *, int);
-void uwb_rc_reset_all(struct uwb_rc *rc);
-void uwb_rc_pre_reset(struct uwb_rc *rc);
-int uwb_rc_post_reset(struct uwb_rc *rc);
-
-/**
- * uwb_rsv_is_owner - is the owner of this reservation the RC?
- * @rsv: the reservation
- */
-static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
-{
- return rsv->owner == &rsv->rc->uwb_dev;
-}
-
-/**
- * enum uwb_notifs - UWB events that can be passed to any listeners
- * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group.
- * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group.
- *
- * Higher layers can register callback functions with the radio
- * controller using uwb_notifs_register(). The radio controller
- * maintains a list of all registered handlers and will notify all
- * nodes when an event occurs.
- */
-enum uwb_notifs {
- UWB_NOTIF_ONAIR,
- UWB_NOTIF_OFFAIR,
-};
-
-/* Callback function registered with UWB */
-struct uwb_notifs_handler {
- struct list_head list_node;
- void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
- void *data;
-};
-
-int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
-int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
-
-
-/**
- * UWB radio controller Event Size Entry (for creating entry tables)
- *
- * WUSB and WHCI define events and notifications, and they might have
- * fixed or variable size.
- *
- * Each event/notification has a size which is not necessarily known
- * in advance based on the event code. As well, vendor specific
- * events/notifications will have a size impossible to determine
- * unless we know about the device's specific details.
- *
- * It was way too smart of the spec writers not to think that it would
- * be impossible for a generic driver to skip over vendor specific
- * events/notifications if there are no LENGTH fields in the HEADER of
- * each message...the transaction size cannot be counted on as the
- * spec does not forbid to pack more than one event in a single
- * transaction.
- *
- * Thus, we guess sizes with tables (or for events, when you know the
- * size ahead of time you can use uwb_rc_neh_extra_size*()). We
- * register tables with the known events and their sizes, and then we
- * traverse those tables. For those with variable length, we provide a
- * way to lookup the size inside the event/notification's
- * payload. This allows device-specific event size tables to be
- * registered.
- *
- * @size: Size of the payload
- *
- * @offset: if != 0, at offset @offset-1 starts a field with a length
- * that has to be added to @size. The format of the field is
- * given by @type.
- *
- * @type: Type and length of the offset field. Most common is LE 16
- * bits (that's why that is zero); others are there mostly to
- * cover for bugs and weirdos.
- */
-struct uwb_est_entry {
- size_t size;
- unsigned offset;
- enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
-};
-
-int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
- const struct uwb_est_entry *, size_t entries);
-int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
- const struct uwb_est_entry *, size_t entries);
-ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
- size_t len);
-
-/* -- Misc */
-
-enum {
- EDC_MAX_ERRORS = 10,
- EDC_ERROR_TIMEFRAME = HZ,
-};
-
-/* error density counter */
-struct edc {
- unsigned long timestart;
- u16 errorcount;
-};
-
-static inline
-void edc_init(struct edc *edc)
-{
- edc->timestart = jiffies;
-}
-
-/* Called when an error occurred.
- * This is way to determine if the number of acceptable errors per time
- * period has been exceeded. It is not accurate as there are cases in which
- * this scheme will not work, for example if there are periodic occurrences
- * of errors that straddle updates to the start time. This scheme is
- * sufficient for our usage.
- *
- * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
- */
-static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
-{
- unsigned long now;
-
- now = jiffies;
- if (now - err_hist->timestart > timeframe) {
- err_hist->errorcount = 1;
- err_hist->timestart = now;
- } else if (++err_hist->errorcount > max_err) {
- err_hist->errorcount = 0;
- err_hist->timestart = now;
- return 1;
- }
- return 0;
-}
-
-
-/* Information Element handling */
-
-struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
-int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size);
-int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id);
-
-/*
- * Transmission statistics
- *
- * UWB uses LQI and RSSI (one byte values) for reporting radio signal
- * strength and line quality indication. We do quick and dirty
- * averages of those. They are signed values, btw.
- *
- * For 8 bit quantities, we keep the min, the max, an accumulator
- * (@sigma) and a # of samples. When @samples gets to 255, we compute
- * the average (@sigma / @samples), place it in @sigma and reset
- * @samples to 1 (so we use it as the first sample).
- *
- * Now, statistically speaking, probably I am kicking the kidneys of
- * some books I have in my shelves collecting dust, but I just want to
- * get an approx, not the Nobel.
- *
- * LOCKING: there is no locking per se, but we try to keep a lockless
- * schema. Only _add_samples() modifies the values--as long as you
- * have other locking on top that makes sure that no two calls of
- * _add_sample() happen at the same time, then we are fine. Now, for
- * resetting the values we just set @samples to 0 and that makes the
- * next _add_sample() to start with defaults. Reading the values in
- * _show() currently can race, so you need to make sure the calls are
- * under the same lock that protects calls to _add_sample(). FIXME:
- * currently unlocked (It is not ultraprecise but does the trick. Bite
- * me).
- */
-struct stats {
- s8 min, max;
- s16 sigma;
- atomic_t samples;
-};
-
-static inline
-void stats_init(struct stats *stats)
-{
- atomic_set(&stats->samples, 0);
- wmb();
-}
-
-static inline
-void stats_add_sample(struct stats *stats, s8 sample)
-{
- s8 min, max;
- s16 sigma;
- unsigned samples = atomic_read(&stats->samples);
- if (samples == 0) { /* it was zero before, so we initialize */
- min = 127;
- max = -128;
- sigma = 0;
- } else {
- min = stats->min;
- max = stats->max;
- sigma = stats->sigma;
- }
-
- if (sample < min) /* compute new values */
- min = sample;
- else if (sample > max)
- max = sample;
- sigma += sample;
-
- stats->min = min; /* commit */
- stats->max = max;
- stats->sigma = sigma;
- if (atomic_add_return(1, &stats->samples) > 255) {
- /* wrapped around! reset */
- stats->sigma = sigma / 256;
- atomic_set(&stats->samples, 1);
- }
-}
-
-static inline ssize_t stats_show(struct stats *stats, char *buf)
-{
- int min, max, avg;
- int samples = atomic_read(&stats->samples);
- if (samples == 0)
- min = max = avg = 0;
- else {
- min = stats->min;
- max = stats->max;
- avg = stats->sigma / samples;
- }
- return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
-}
-
-static inline ssize_t stats_store(struct stats *stats, const char *buf,
- size_t size)
-{
- stats_init(stats);
- return size;
-}
-
-#endif /* #ifndef __LINUX__UWB_H__ */
diff --git a/drivers/staging/uwb/uwbd.c b/drivers/staging/uwb/uwbd.c
deleted file mode 100644
index dc5c743d4f5f..000000000000
--- a/drivers/staging/uwb/uwbd.c
+++ /dev/null
@@ -1,356 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Ultra Wide Band
- * Neighborhood Management Daemon
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This daemon takes care of maintaing information that describes the
- * UWB neighborhood that the radios in this machine can see. It also
- * keeps a tab of which devices are visible, makes sure each HC sits
- * on a different channel to avoid interfering, etc.
- *
- * Different drivers (radio controller, device, any API in general)
- * communicate with this daemon through an event queue. Daemon wakes
- * up, takes a list of events and handles them one by one; handling
- * function is extracted from a table based on the event's type and
- * subtype. Events are freed only if the handling function says so.
- *
- * . Lock protecting the event list has to be an spinlock and locked
- * with IRQSAVE because it might be called from an interrupt
- * context (ie: when events arrive and the notification drops
- * down from the ISR).
- *
- * . UWB radio controller drivers queue events to the daemon using
- * uwbd_event_queue(). They just get the event, chew it to make it
- * look like UWBD likes it and pass it in a buffer allocated with
- * uwb_event_alloc().
- *
- * EVENTS
- *
- * Events have a type, a subtype, a length, some other stuff and the
- * data blob, which depends on the event. The header is 'struct
- * uwb_event'; for payloads, see 'struct uwbd_evt_*'.
- *
- * EVENT HANDLER TABLES
- *
- * To find a handling function for an event, the type is used to index
- * a subtype-table in the type-table. The subtype-table is indexed
- * with the subtype to get the function that handles the event. Start
- * with the main type-table 'uwbd_evt_type_handler'.
- *
- * DEVICES
- *
- * Devices are created when a bunch of beacons have been received and
- * it is stablished that the device has stable radio presence. CREATED
- * only, not configured. Devices are ONLY configured when an
- * Application-Specific IE Probe is receieved, in which the device
- * declares which Protocol ID it groks. Then the device is CONFIGURED
- * (and the driver->probe() stuff of the device model is invoked).
- *
- * Devices are considered disconnected when a certain number of
- * beacons are not received in an amount of time.
- *
- * Handler functions are called normally uwbd_evt_handle_*().
- */
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/freezer.h>
-
-#include "uwb-internal.h"
-
-/*
- * UWBD Event handler function signature
- *
- * Return !0 if the event needs not to be freed (ie the handler
- * takes/took care of it). 0 means the daemon code will free the
- * event.
- *
- * @evt->rc is already referenced and guaranteed to exist. See
- * uwb_evt_handle().
- */
-typedef int (*uwbd_evt_handler_f)(struct uwb_event *);
-
-/**
- * Properties of a UWBD event
- *
- * @handler: the function that will handle this event
- * @name: text name of event
- */
-struct uwbd_event {
- uwbd_evt_handler_f handler;
- const char *name;
-};
-
-/* Table of handlers for and properties of the UWBD Radio Control Events */
-static struct uwbd_event uwbd_urc_events[] = {
- [UWB_RC_EVT_IE_RCV] = {
- .handler = uwbd_evt_handle_rc_ie_rcv,
- .name = "IE_RECEIVED"
- },
- [UWB_RC_EVT_BEACON] = {
- .handler = uwbd_evt_handle_rc_beacon,
- .name = "BEACON_RECEIVED"
- },
- [UWB_RC_EVT_BEACON_SIZE] = {
- .handler = uwbd_evt_handle_rc_beacon_size,
- .name = "BEACON_SIZE_CHANGE"
- },
- [UWB_RC_EVT_BPOIE_CHANGE] = {
- .handler = uwbd_evt_handle_rc_bpoie_change,
- .name = "BPOIE_CHANGE"
- },
- [UWB_RC_EVT_BP_SLOT_CHANGE] = {
- .handler = uwbd_evt_handle_rc_bp_slot_change,
- .name = "BP_SLOT_CHANGE"
- },
- [UWB_RC_EVT_DRP_AVAIL] = {
- .handler = uwbd_evt_handle_rc_drp_avail,
- .name = "DRP_AVAILABILITY_CHANGE"
- },
- [UWB_RC_EVT_DRP] = {
- .handler = uwbd_evt_handle_rc_drp,
- .name = "DRP"
- },
- [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
- .handler = uwbd_evt_handle_rc_dev_addr_conflict,
- .name = "DEV_ADDR_CONFLICT",
- },
-};
-
-
-
-struct uwbd_evt_type_handler {
- const char *name;
- struct uwbd_event *uwbd_events;
- size_t size;
-};
-
-/* Table of handlers for each UWBD Event type. */
-static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = {
- [UWB_RC_CET_GENERAL] = {
- .name = "URC",
- .uwbd_events = uwbd_urc_events,
- .size = ARRAY_SIZE(uwbd_urc_events),
- },
-};
-
-static const struct uwbd_event uwbd_message_handlers[] = {
- [UWB_EVT_MSG_RESET] = {
- .handler = uwbd_msg_handle_reset,
- .name = "reset",
- },
-};
-
-/*
- * Handle an URC event passed to the UWB Daemon
- *
- * @evt: the event to handle
- * @returns: 0 if the event can be kfreed, !0 on the contrary
- * (somebody else took ownership) [coincidentally, returning
- * a <0 errno code will free it :)].
- *
- * Looks up the two indirection tables (one for the type, one for the
- * subtype) to decide which function handles it and then calls the
- * handler.
- *
- * The event structure passed to the event handler has the radio
- * controller in @evt->rc referenced. The reference will be dropped
- * once the handler returns, so if it needs it for longer (async),
- * it'll need to take another one.
- */
-static
-int uwbd_event_handle_urc(struct uwb_event *evt)
-{
- int result = -EINVAL;
- struct uwbd_evt_type_handler *type_table;
- uwbd_evt_handler_f handler;
- u8 type, context;
- u16 event;
-
- type = evt->notif.rceb->bEventType;
- event = le16_to_cpu(evt->notif.rceb->wEvent);
- context = evt->notif.rceb->bEventContext;
-
- if (type >= ARRAY_SIZE(uwbd_urc_evt_type_handlers))
- goto out;
- type_table = &uwbd_urc_evt_type_handlers[type];
- if (type_table->uwbd_events == NULL)
- goto out;
- if (event >= type_table->size)
- goto out;
- handler = type_table->uwbd_events[event].handler;
- if (handler == NULL)
- goto out;
-
- result = (*handler)(evt);
-out:
- if (result < 0)
- dev_err(&evt->rc->uwb_dev.dev,
- "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n",
- type, event, context, result);
- return result;
-}
-
-static void uwbd_event_handle_message(struct uwb_event *evt)
-{
- struct uwb_rc *rc;
- int result;
-
- rc = evt->rc;
-
- if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) {
- dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message);
- return;
- }
-
- result = uwbd_message_handlers[evt->message].handler(evt);
- if (result < 0)
- dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n",
- uwbd_message_handlers[evt->message].name, result);
-}
-
-static void uwbd_event_handle(struct uwb_event *evt)
-{
- struct uwb_rc *rc;
- int should_keep;
-
- rc = evt->rc;
-
- if (rc->ready) {
- switch (evt->type) {
- case UWB_EVT_TYPE_NOTIF:
- should_keep = uwbd_event_handle_urc(evt);
- if (should_keep <= 0)
- kfree(evt->notif.rceb);
- break;
- case UWB_EVT_TYPE_MSG:
- uwbd_event_handle_message(evt);
- break;
- default:
- dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type);
- break;
- }
- }
-
- __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */
-}
-
-/**
- * UWB Daemon
- *
- * Listens to all UWB notifications and takes care to track the state
- * of the UWB neighbourhood for the kernel. When we do a run, we
- * spinlock, move the list to a private copy and release the
- * lock. Hold it as little as possible. Not a conflict: it is
- * guaranteed we own the events in the private list.
- *
- * FIXME: should change so we don't have a 1HZ timer all the time, but
- * only if there are devices.
- */
-static int uwbd(void *param)
-{
- struct uwb_rc *rc = param;
- unsigned long flags;
- struct uwb_event *evt;
- int should_stop = 0;
-
- while (1) {
- wait_event_interruptible_timeout(
- rc->uwbd.wq,
- !list_empty(&rc->uwbd.event_list)
- || (should_stop = kthread_should_stop()),
- HZ);
- if (should_stop)
- break;
-
- spin_lock_irqsave(&rc->uwbd.event_list_lock, flags);
- if (!list_empty(&rc->uwbd.event_list)) {
- evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node);
- list_del(&evt->list_node);
- } else
- evt = NULL;
- spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags);
-
- if (evt) {
- uwbd_event_handle(evt);
- kfree(evt);
- }
-
- uwb_beca_purge(rc); /* Purge devices that left */
- }
- return 0;
-}
-
-
-/** Start the UWB daemon */
-void uwbd_start(struct uwb_rc *rc)
-{
- struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
- if (IS_ERR(task)) {
- rc->uwbd.task = NULL;
- printk(KERN_ERR "UWB: Cannot start management daemon; "
- "UWB won't work\n");
- } else {
- rc->uwbd.task = task;
- rc->uwbd.pid = rc->uwbd.task->pid;
- }
-}
-
-/* Stop the UWB daemon and free any unprocessed events */
-void uwbd_stop(struct uwb_rc *rc)
-{
- if (rc->uwbd.task)
- kthread_stop(rc->uwbd.task);
- uwbd_flush(rc);
-}
-
-/*
- * Queue an event for the management daemon
- *
- * When some lower layer receives an event, it uses this function to
- * push it forward to the UWB daemon.
- *
- * Once you pass the event, you don't own it any more, but the daemon
- * does. It will uwb_event_free() it when done, so make sure you
- * uwb_event_alloc()ed it or bad things will happen.
- *
- * If the daemon is not running, we just free the event.
- */
-void uwbd_event_queue(struct uwb_event *evt)
-{
- struct uwb_rc *rc = evt->rc;
- unsigned long flags;
-
- spin_lock_irqsave(&rc->uwbd.event_list_lock, flags);
- if (rc->uwbd.pid != 0) {
- list_add(&evt->list_node, &rc->uwbd.event_list);
- wake_up_all(&rc->uwbd.wq);
- } else {
- __uwb_rc_put(evt->rc);
- if (evt->type == UWB_EVT_TYPE_NOTIF)
- kfree(evt->notif.rceb);
- kfree(evt);
- }
- spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags);
- return;
-}
-
-void uwbd_flush(struct uwb_rc *rc)
-{
- struct uwb_event *evt, *nxt;
-
- spin_lock_irq(&rc->uwbd.event_list_lock);
- list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) {
- if (evt->rc == rc) {
- __uwb_rc_put(rc);
- list_del(&evt->list_node);
- if (evt->type == UWB_EVT_TYPE_NOTIF)
- kfree(evt->notif.rceb);
- kfree(evt);
- }
- }
- spin_unlock_irq(&rc->uwbd.event_list_lock);
-}
diff --git a/drivers/staging/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c
deleted file mode 100644
index a5ab255d7d36..000000000000
--- a/drivers/staging/uwb/whc-rc.c
+++ /dev/null
@@ -1,467 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3])
- * Radio Control command/event transport to the UWB stack
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Initialize and hook up the Radio Control interface.
- *
- * For each device probed, creates an 'struct whcrc' which contains
- * just the representation of the UWB Radio Controller, and the logic
- * for reading notifications and passing them to the UWB Core.
- *
- * So we initialize all of those, register the UWB Radio Controller
- * and setup the notification/event handle to pipe the notifications
- * to the UWB management Daemon.
- *
- * Once uwb_rc_add() is called, the UWB stack takes control, resets
- * the radio and readies the device to take commands the UWB
- * API/user-space.
- *
- * Note this driver is just a transport driver; the commands are
- * formed at the UWB stack and given to this driver who will deliver
- * them to the hw and transfer the replies/notifications back to the
- * UWB stack through the UWB daemon (UWBD).
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include "uwb.h"
-#include "include/whci.h"
-#include "include/umc.h"
-
-#include "uwb-internal.h"
-
-/**
- * Descriptor for an instance of the UWB Radio Control Driver that
- * attaches to the URC interface of the WHCI PCI card.
- *
- * Unless there is a lock specific to the 'data members', all access
- * is protected by uwb_rc->mutex.
- */
-struct whcrc {
- struct umc_dev *umc_dev;
- struct uwb_rc *uwb_rc; /* UWB host controller */
-
- unsigned long area;
- void __iomem *rc_base;
- size_t rc_len;
- spinlock_t irq_lock;
-
- void *evt_buf, *cmd_buf;
- dma_addr_t evt_dma_buf, cmd_dma_buf;
- wait_queue_head_t cmd_wq;
- struct work_struct event_work;
-};
-
-/**
- * Execute an UWB RC command on WHCI/RC
- *
- * @rc: Instance of a Radio Controller that is a whcrc
- * @cmd: Buffer containing the RCCB and payload to execute
- * @cmd_size: Size of the command buffer.
- *
- * We copy the command into whcrc->cmd_buf (as it is pretty and
- * aligned`and physically contiguous) and then press the right keys in
- * the controller's URCCMD register to get it to read it. We might
- * have to wait for the cmd_sem to be open to us.
- *
- * NOTE: rc's mutex has to be locked
- */
-static int whcrc_cmd(struct uwb_rc *uwb_rc,
- const struct uwb_rccb *cmd, size_t cmd_size)
-{
- int result = 0;
- struct whcrc *whcrc = uwb_rc->priv;
- struct device *dev = &whcrc->umc_dev->dev;
- u32 urccmd;
-
- if (cmd_size >= 4096)
- return -EINVAL;
-
- /*
- * If the URC is halted, then the hardware has reset itself.
- * Attempt to recover by restarting the device and then return
- * an error as it's likely that the current command isn't
- * valid for a newly started RC.
- */
- if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
- dev_err(dev, "requesting reset of halted radio controller\n");
- uwb_rc_reset_all(uwb_rc);
- return -EIO;
- }
-
- result = wait_event_timeout(whcrc->cmd_wq,
- !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
- if (result == 0) {
- dev_err(dev, "device is not ready to execute commands\n");
- return -ETIMEDOUT;
- }
-
- memmove(whcrc->cmd_buf, cmd, cmd_size);
- le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
-
- spin_lock(&whcrc->irq_lock);
- urccmd = le_readl(whcrc->rc_base + URCCMD);
- urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
- le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
- whcrc->rc_base + URCCMD);
- spin_unlock(&whcrc->irq_lock);
-
- return 0;
-}
-
-static int whcrc_reset(struct uwb_rc *rc)
-{
- struct whcrc *whcrc = rc->priv;
-
- return umc_controller_reset(whcrc->umc_dev);
-}
-
-/**
- * Reset event reception mechanism and tell hw we are ready to get more
- *
- * We have read all the events in the event buffer, so we are ready to
- * reset it to the beginning.
- *
- * This is only called during initialization or after an event buffer
- * has been retired. This means we can be sure that event processing
- * is disabled and it's safe to update the URCEVTADDR register.
- *
- * There's no need to wait for the event processing to start as the
- * URC will not clear URCCMD_ACTIVE until (internal) event buffer
- * space is available.
- */
-static
-void whcrc_enable_events(struct whcrc *whcrc)
-{
- u32 urccmd;
-
- le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
-
- spin_lock(&whcrc->irq_lock);
- urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
- le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
- spin_unlock(&whcrc->irq_lock);
-}
-
-static void whcrc_event_work(struct work_struct *work)
-{
- struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
- size_t size;
- u64 urcevtaddr;
-
- urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
- size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
-
- uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
- whcrc_enable_events(whcrc);
-}
-
-/**
- * Catch interrupts?
- *
- * We ack inmediately (and expect the hw to do the right thing and
- * raise another IRQ if things have changed :)
- */
-static
-irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
-{
- struct whcrc *whcrc = _whcrc;
- struct device *dev = &whcrc->umc_dev->dev;
- u32 urcsts;
-
- urcsts = le_readl(whcrc->rc_base + URCSTS);
- if (!(urcsts & URCSTS_INT_MASK))
- return IRQ_NONE;
- le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
-
- if (urcsts & URCSTS_HSE) {
- dev_err(dev, "host system error -- hardware halted\n");
- /* FIXME: do something sensible here */
- goto out;
- }
- if (urcsts & URCSTS_ER)
- schedule_work(&whcrc->event_work);
- if (urcsts & URCSTS_RCI)
- wake_up_all(&whcrc->cmd_wq);
-out:
- return IRQ_HANDLED;
-}
-
-
-/**
- * Initialize a UMC RC interface: map regions, get (shared) IRQ
- */
-static
-int whcrc_setup_rc_umc(struct whcrc *whcrc)
-{
- int result = 0;
- struct device *dev = &whcrc->umc_dev->dev;
- struct umc_dev *umc_dev = whcrc->umc_dev;
-
- whcrc->area = umc_dev->resource.start;
- whcrc->rc_len = resource_size(&umc_dev->resource);
- result = -EBUSY;
- if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) {
- dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
- whcrc->rc_len, whcrc->area, result);
- goto error_request_region;
- }
-
- whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len);
- if (whcrc->rc_base == NULL) {
- dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
- whcrc->rc_len, whcrc->area, result);
- goto error_ioremap;
- }
-
- result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
- KBUILD_MODNAME, whcrc);
- if (result < 0) {
- dev_err(dev, "can't allocate IRQ %d: %d\n",
- umc_dev->irq, result);
- goto error_request_irq;
- }
-
- result = -ENOMEM;
- whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
- &whcrc->cmd_dma_buf, GFP_KERNEL);
- if (whcrc->cmd_buf == NULL) {
- dev_err(dev, "Can't allocate cmd transfer buffer\n");
- goto error_cmd_buffer;
- }
-
- whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
- &whcrc->evt_dma_buf, GFP_KERNEL);
- if (whcrc->evt_buf == NULL) {
- dev_err(dev, "Can't allocate evt transfer buffer\n");
- goto error_evt_buffer;
- }
- return 0;
-
-error_evt_buffer:
- dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
- whcrc->cmd_dma_buf);
-error_cmd_buffer:
- free_irq(umc_dev->irq, whcrc);
-error_request_irq:
- iounmap(whcrc->rc_base);
-error_ioremap:
- release_mem_region(whcrc->area, whcrc->rc_len);
-error_request_region:
- return result;
-}
-
-
-/**
- * Release RC's UMC resources
- */
-static
-void whcrc_release_rc_umc(struct whcrc *whcrc)
-{
- struct umc_dev *umc_dev = whcrc->umc_dev;
-
- dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
- whcrc->evt_dma_buf);
- dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
- whcrc->cmd_dma_buf);
- free_irq(umc_dev->irq, whcrc);
- iounmap(whcrc->rc_base);
- release_mem_region(whcrc->area, whcrc->rc_len);
-}
-
-
-/**
- * whcrc_start_rc - start a WHCI radio controller
- * @whcrc: the radio controller to start
- *
- * Reset the UMC device, start the radio controller, enable events and
- * finally enable interrupts.
- */
-static int whcrc_start_rc(struct uwb_rc *rc)
-{
- struct whcrc *whcrc = rc->priv;
- struct device *dev = &whcrc->umc_dev->dev;
-
- /* Reset the thing */
- le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
- if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
- 5000, "hardware reset") < 0)
- return -EBUSY;
-
- /* Set the event buffer, start the controller (enable IRQs later) */
- le_writel(0, whcrc->rc_base + URCINTR);
- le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
- if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
- 5000, "radio controller start") < 0)
- return -ETIMEDOUT;
- whcrc_enable_events(whcrc);
- le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
- return 0;
-}
-
-
-/**
- * whcrc_stop_rc - stop a WHCI radio controller
- * @whcrc: the radio controller to stop
- *
- * Disable interrupts and cancel any pending event processing work
- * before clearing the Run/Stop bit.
- */
-static
-void whcrc_stop_rc(struct uwb_rc *rc)
-{
- struct whcrc *whcrc = rc->priv;
- struct umc_dev *umc_dev = whcrc->umc_dev;
-
- le_writel(0, whcrc->rc_base + URCINTR);
- cancel_work_sync(&whcrc->event_work);
-
- le_writel(0, whcrc->rc_base + URCCMD);
- whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
- URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop");
-}
-
-static void whcrc_init(struct whcrc *whcrc)
-{
- spin_lock_init(&whcrc->irq_lock);
- init_waitqueue_head(&whcrc->cmd_wq);
- INIT_WORK(&whcrc->event_work, whcrc_event_work);
-}
-
-/**
- * Initialize the radio controller.
- *
- * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
- * IRQ handler we use that to determine if the hw is ready to
- * handle events. Looks like a race condition, but it really is
- * not.
- */
-static
-int whcrc_probe(struct umc_dev *umc_dev)
-{
- int result;
- struct uwb_rc *uwb_rc;
- struct whcrc *whcrc;
- struct device *dev = &umc_dev->dev;
-
- result = -ENOMEM;
- uwb_rc = uwb_rc_alloc();
- if (uwb_rc == NULL) {
- dev_err(dev, "unable to allocate RC instance\n");
- goto error_rc_alloc;
- }
- whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
- if (whcrc == NULL) {
- dev_err(dev, "unable to allocate WHC-RC instance\n");
- goto error_alloc;
- }
- whcrc_init(whcrc);
- whcrc->umc_dev = umc_dev;
-
- result = whcrc_setup_rc_umc(whcrc);
- if (result < 0) {
- dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
- goto error_setup_rc_umc;
- }
- whcrc->uwb_rc = uwb_rc;
-
- uwb_rc->owner = THIS_MODULE;
- uwb_rc->cmd = whcrc_cmd;
- uwb_rc->reset = whcrc_reset;
- uwb_rc->start = whcrc_start_rc;
- uwb_rc->stop = whcrc_stop_rc;
-
- result = uwb_rc_add(uwb_rc, dev, whcrc);
- if (result < 0)
- goto error_rc_add;
- umc_set_drvdata(umc_dev, whcrc);
- return 0;
-
-error_rc_add:
- whcrc_release_rc_umc(whcrc);
-error_setup_rc_umc:
- kfree(whcrc);
-error_alloc:
- uwb_rc_put(uwb_rc);
-error_rc_alloc:
- return result;
-}
-
-/**
- * Clean up the radio control resources
- *
- * When we up the command semaphore, everybody possibly held trying to
- * execute a command should be granted entry and then they'll see the
- * host is quiescing and up it (so it will chain to the next waiter).
- * This should not happen (in any case), as we can only remove when
- * there are no handles open...
- */
-static void whcrc_remove(struct umc_dev *umc_dev)
-{
- struct whcrc *whcrc = umc_get_drvdata(umc_dev);
- struct uwb_rc *uwb_rc = whcrc->uwb_rc;
-
- umc_set_drvdata(umc_dev, NULL);
- uwb_rc_rm(uwb_rc);
- whcrc_release_rc_umc(whcrc);
- kfree(whcrc);
- uwb_rc_put(uwb_rc);
-}
-
-static int whcrc_pre_reset(struct umc_dev *umc)
-{
- struct whcrc *whcrc = umc_get_drvdata(umc);
- struct uwb_rc *uwb_rc = whcrc->uwb_rc;
-
- uwb_rc_pre_reset(uwb_rc);
- return 0;
-}
-
-static int whcrc_post_reset(struct umc_dev *umc)
-{
- struct whcrc *whcrc = umc_get_drvdata(umc);
- struct uwb_rc *uwb_rc = whcrc->uwb_rc;
-
- return uwb_rc_post_reset(uwb_rc);
-}
-
-/* PCI device ID's that we handle [so it gets loaded] */
-static struct pci_device_id __used whcrc_id_table[] = {
- { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
- { /* empty last entry */ }
-};
-MODULE_DEVICE_TABLE(pci, whcrc_id_table);
-
-static struct umc_driver whcrc_driver = {
- .name = "whc-rc",
- .cap_id = UMC_CAP_ID_WHCI_RC,
- .probe = whcrc_probe,
- .remove = whcrc_remove,
- .pre_reset = whcrc_pre_reset,
- .post_reset = whcrc_post_reset,
-};
-
-static int __init whcrc_driver_init(void)
-{
- return umc_driver_register(&whcrc_driver);
-}
-module_init(whcrc_driver_init);
-
-static void __exit whcrc_driver_exit(void)
-{
- umc_driver_unregister(&whcrc_driver);
-}
-module_exit(whcrc_driver_exit);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/uwb/whci.c b/drivers/staging/uwb/whci.c
deleted file mode 100644
index a8832f64d708..000000000000
--- a/drivers/staging/uwb/whci.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * WHCI UWB Multi-interface Controller enumerator.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include "include/whci.h"
-#include "include/umc.h"
-
-struct whci_card {
- struct pci_dev *pci;
- void __iomem *uwbbase;
- u8 n_caps;
- struct umc_dev *devs[0];
-};
-
-
-/* Fix faulty HW :( */
-static
-u64 whci_capdata_quirks(struct whci_card *card, u64 capdata)
-{
- u64 capdata_orig = capdata;
- struct pci_dev *pci_dev = card->pci;
- if (pci_dev->vendor == PCI_VENDOR_ID_INTEL
- && (pci_dev->device == 0x0c3b || pci_dev->device == 0004)
- && pci_dev->class == 0x0d1010) {
- switch (UWBCAPDATA_TO_CAP_ID(capdata)) {
- /* WLP capability has 0x100 bytes of aperture */
- case 0x80:
- capdata |= 0x40 << 8; break;
- /* WUSB capability has 0x80 bytes of aperture
- * and ID is 1 */
- case 0x02:
- capdata &= ~0xffff;
- capdata |= 0x2001;
- break;
- }
- }
- if (capdata_orig != capdata)
- dev_warn(&pci_dev->dev,
- "PCI v%04x d%04x c%06x#%02x: "
- "corrected capdata from %016Lx to %016Lx\n",
- pci_dev->vendor, pci_dev->device, pci_dev->class,
- (unsigned)UWBCAPDATA_TO_CAP_ID(capdata),
- (unsigned long long)capdata_orig,
- (unsigned long long)capdata);
- return capdata;
-}
-
-
-/**
- * whci_wait_for - wait for a WHCI register to be set
- *
- * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'.
- */
-int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result,
- unsigned long max_ms, const char *tag)
-{
- unsigned t = 0;
- u32 val;
- for (;;) {
- val = le_readl(reg);
- if ((val & mask) == result)
- break;
- if (t >= max_ms) {
- dev_err(dev, "%s timed out\n", tag);
- return -ETIMEDOUT;
- }
- msleep(10);
- t += 10;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(whci_wait_for);
-
-
-/*
- * NOTE: the capinfo and capdata registers are slightly different
- * (size and cap-id fields). So for cap #0, we need to fill
- * in. Size comes from the size of the register block
- * (statically calculated); cap_id comes from nowhere, we use
- * zero, that is reserved, for the radio controller, because
- * none was defined at the spec level.
- */
-static int whci_add_cap(struct whci_card *card, int n)
-{
- struct umc_dev *umc;
- u64 capdata;
- int bar, err;
-
- umc = umc_device_create(&card->pci->dev, n);
- if (umc == NULL)
- return -ENOMEM;
-
- capdata = le_readq(card->uwbbase + UWBCAPDATA(n));
-
- bar = UWBCAPDATA_TO_BAR(capdata) << 1;
-
- capdata = whci_capdata_quirks(card, capdata);
- /* Capability 0 is the radio controller. It's size is 32
- * bytes (WHCI0.95[2.3, T2-9]). */
- umc->version = UWBCAPDATA_TO_VERSION(capdata);
- umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata);
- umc->bar = bar;
- umc->resource.start = pci_resource_start(card->pci, bar)
- + UWBCAPDATA_TO_OFFSET(capdata);
- umc->resource.end = umc->resource.start
- + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1;
- umc->resource.name = dev_name(&umc->dev);
- umc->resource.flags = card->pci->resource[bar].flags;
- umc->resource.parent = &card->pci->resource[bar];
- umc->irq = card->pci->irq;
-
- err = umc_device_register(umc);
- if (err < 0)
- goto error;
- card->devs[n] = umc;
- return 0;
-
-error:
- kfree(umc);
- return err;
-}
-
-static void whci_del_cap(struct whci_card *card, int n)
-{
- struct umc_dev *umc = card->devs[n];
-
- umc_device_unregister(umc);
-}
-
-static int whci_n_caps(struct pci_dev *pci)
-{
- void __iomem *uwbbase;
- u64 capinfo;
-
- uwbbase = pci_iomap(pci, 0, 8);
- if (!uwbbase)
- return -ENOMEM;
- capinfo = le_readq(uwbbase + UWBCAPINFO);
- pci_iounmap(pci, uwbbase);
-
- return UWBCAPINFO_TO_N_CAPS(capinfo);
-}
-
-static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
-{
- struct whci_card *card;
- int err, n_caps, n;
-
- err = pci_enable_device(pci);
- if (err < 0)
- goto error;
- pci_enable_msi(pci);
- pci_set_master(pci);
- err = -ENXIO;
- if (!pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
- pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
- else if (!pci_set_dma_mask(pci, DMA_BIT_MASK(32)))
- pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
- else
- goto error_dma;
-
- err = n_caps = whci_n_caps(pci);
- if (n_caps < 0)
- goto error_ncaps;
-
- err = -ENOMEM;
- card = kzalloc(sizeof(struct whci_card)
- + sizeof(struct umc_dev *) * (n_caps + 1),
- GFP_KERNEL);
- if (card == NULL)
- goto error_kzalloc;
- card->pci = pci;
- card->n_caps = n_caps;
-
- err = -EBUSY;
- if (!request_mem_region(pci_resource_start(pci, 0),
- UWBCAPDATA_SIZE(card->n_caps),
- "whci (capability data)"))
- goto error_request_memregion;
- err = -ENOMEM;
- card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps));
- if (!card->uwbbase)
- goto error_iomap;
-
- /* Add each capability. */
- for (n = 0; n <= card->n_caps; n++) {
- err = whci_add_cap(card, n);
- if (err < 0 && n == 0) {
- dev_err(&pci->dev, "cannot bind UWB radio controller:"
- " %d\n", err);
- goto error_bind;
- }
- if (err < 0)
- dev_warn(&pci->dev, "warning: cannot bind capability "
- "#%u: %d\n", n, err);
- }
- pci_set_drvdata(pci, card);
- return 0;
-
-error_bind:
- pci_iounmap(pci, card->uwbbase);
-error_iomap:
- release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
-error_request_memregion:
- kfree(card);
-error_kzalloc:
-error_ncaps:
-error_dma:
- pci_disable_msi(pci);
- pci_disable_device(pci);
-error:
- return err;
-}
-
-static void whci_remove(struct pci_dev *pci)
-{
- struct whci_card *card = pci_get_drvdata(pci);
- int n;
-
- pci_set_drvdata(pci, NULL);
- /* Unregister each capability in reverse (so the master device
- * is unregistered last). */
- for (n = card->n_caps; n >= 0 ; n--)
- whci_del_cap(card, n);
- pci_iounmap(pci, card->uwbbase);
- release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
- kfree(card);
- pci_disable_msi(pci);
- pci_disable_device(pci);
-}
-
-static struct pci_device_id whci_id_table[] = {
- { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
- { 0 },
-};
-MODULE_DEVICE_TABLE(pci, whci_id_table);
-
-
-static struct pci_driver whci_driver = {
- .name = "whci",
- .id_table = whci_id_table,
- .probe = whci_probe,
- .remove = whci_remove,
-};
-
-module_pci_driver(whci_driver);
-MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator");
-MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 1ef31a984741..597acef35d0b 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1776,7 +1776,7 @@ static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
video_set_drvdata(vfd, dev);
ret = video_register_device(vfd,
- VFL_TYPE_GRABBER,
+ VFL_TYPE_VIDEO,
video_nr[dev->camera_num]);
if (ret < 0)
return ret;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index 89786c264867..5137fcf203d6 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -85,7 +85,6 @@ struct bm2835_mmal_v4l2_ctrl {
const s64 *imenu; /* integer menu array */
u32 mmal_id; /* mmal parameter id */
bm2835_mmal_v4l2_ctrl_cb *setter;
- bool ignore_errors;
};
struct v4l2_to_mmal_effects_setting {
@@ -912,8 +911,6 @@ static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
pr_warn("ctrl id:%d/MMAL param %08X- returned ret %d\n",
ctrl->id, mmal_ctrl->mmal_id, ret);
- if (mmal_ctrl->ignore_errors)
- ret = 0;
return ret;
}
@@ -923,239 +920,340 @@ static const struct v4l2_ctrl_ops bm2835_mmal_ctrl_ops = {
static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
{
- V4L2_CID_SATURATION, MMAL_CONTROL_TYPE_STD,
- -100, 100, 0, 1, NULL,
- MMAL_PARAMETER_SATURATION,
- ctrl_set_rational,
- false
+ .id = V4L2_CID_SATURATION,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = -100,
+ .max = 100,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_SATURATION,
+ .setter = ctrl_set_rational,
},
{
- V4L2_CID_SHARPNESS, MMAL_CONTROL_TYPE_STD,
- -100, 100, 0, 1, NULL,
- MMAL_PARAMETER_SHARPNESS,
- ctrl_set_rational,
- false
+ .id = V4L2_CID_SHARPNESS,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = -100,
+ .max = 100,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_SHARPNESS,
+ .setter = ctrl_set_rational,
},
{
- V4L2_CID_CONTRAST, MMAL_CONTROL_TYPE_STD,
- -100, 100, 0, 1, NULL,
- MMAL_PARAMETER_CONTRAST,
- ctrl_set_rational,
- false
+ .id = V4L2_CID_CONTRAST,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = -100,
+ .max = 100,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_CONTRAST,
+ .setter = ctrl_set_rational,
},
{
- V4L2_CID_BRIGHTNESS, MMAL_CONTROL_TYPE_STD,
- 0, 100, 50, 1, NULL,
- MMAL_PARAMETER_BRIGHTNESS,
- ctrl_set_rational,
- false
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 100,
+ .def = 50,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_BRIGHTNESS,
+ .setter = ctrl_set_rational,
},
{
- V4L2_CID_ISO_SENSITIVITY, MMAL_CONTROL_TYPE_INT_MENU,
- 0, ARRAY_SIZE(iso_qmenu) - 1, 0, 1, iso_qmenu,
- MMAL_PARAMETER_ISO,
- ctrl_set_iso,
- false
+ .id = V4L2_CID_ISO_SENSITIVITY,
+ .type = MMAL_CONTROL_TYPE_INT_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(iso_qmenu) - 1,
+ .def = 0,
+ .step = 1,
+ .imenu = iso_qmenu,
+ .mmal_id = MMAL_PARAMETER_ISO,
+ .setter = ctrl_set_iso,
},
{
- V4L2_CID_ISO_SENSITIVITY_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
- 0, V4L2_ISO_SENSITIVITY_AUTO, V4L2_ISO_SENSITIVITY_AUTO, 1,
- NULL, MMAL_PARAMETER_ISO,
- ctrl_set_iso,
- false
+ .id = V4L2_CID_ISO_SENSITIVITY_AUTO,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = 0,
+ .max = V4L2_ISO_SENSITIVITY_AUTO,
+ .def = V4L2_ISO_SENSITIVITY_AUTO,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_ISO,
+ .setter = ctrl_set_iso,
},
{
- V4L2_CID_IMAGE_STABILIZATION, MMAL_CONTROL_TYPE_STD,
- 0, 1, 0, 1, NULL,
- MMAL_PARAMETER_VIDEO_STABILISATION,
- ctrl_set_value,
- false
+ .id = V4L2_CID_IMAGE_STABILIZATION,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_VIDEO_STABILISATION,
+ .setter = ctrl_set_value,
},
{
- V4L2_CID_EXPOSURE_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
- ~0x03, V4L2_EXPOSURE_APERTURE_PRIORITY, V4L2_EXPOSURE_AUTO, 0,
- NULL, MMAL_PARAMETER_EXPOSURE_MODE,
- ctrl_set_exposure,
- false
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = ~0x03,
+ .max = V4L2_EXPOSURE_APERTURE_PRIORITY,
+ .def = V4L2_EXPOSURE_AUTO,
+ .step = 0,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_EXPOSURE_MODE,
+ .setter = ctrl_set_exposure,
},
{
- V4L2_CID_EXPOSURE_ABSOLUTE, MMAL_CONTROL_TYPE_STD,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = MMAL_CONTROL_TYPE_STD,
/* Units of 100usecs */
- 1, 1 * 1000 * 10, 100 * 10, 1, NULL,
- MMAL_PARAMETER_SHUTTER_SPEED,
- ctrl_set_exposure,
- false
+ .min = 1,
+ .max = 1 * 1000 * 10,
+ .def = 100 * 10,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_SHUTTER_SPEED,
+ .setter = ctrl_set_exposure,
},
{
- V4L2_CID_AUTO_EXPOSURE_BIAS, MMAL_CONTROL_TYPE_INT_MENU,
- 0, ARRAY_SIZE(ev_bias_qmenu) - 1,
- (ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1, 0, ev_bias_qmenu,
- MMAL_PARAMETER_EXPOSURE_COMP,
- ctrl_set_value_ev,
- false
+ .id = V4L2_CID_AUTO_EXPOSURE_BIAS,
+ .type = MMAL_CONTROL_TYPE_INT_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(ev_bias_qmenu) - 1,
+ .def = (ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1,
+ .step = 0,
+ .imenu = ev_bias_qmenu,
+ .mmal_id = MMAL_PARAMETER_EXPOSURE_COMP,
+ .setter = ctrl_set_value_ev,
},
{
- V4L2_CID_EXPOSURE_AUTO_PRIORITY, MMAL_CONTROL_TYPE_STD,
- 0, 1,
- 0, 1, NULL,
- 0, /* Dummy MMAL ID as it gets mapped into FPS range*/
- ctrl_set_exposure,
- false
+ .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ /* Dummy MMAL ID as it gets mapped into FPS range */
+ .mmal_id = 0,
+ .setter = ctrl_set_exposure,
},
{
- V4L2_CID_EXPOSURE_METERING,
- MMAL_CONTROL_TYPE_STD_MENU,
- ~0x7, V4L2_EXPOSURE_METERING_SPOT,
- V4L2_EXPOSURE_METERING_AVERAGE, 0, NULL,
- MMAL_PARAMETER_EXP_METERING_MODE,
- ctrl_set_metering_mode,
- false
+ .id = V4L2_CID_EXPOSURE_METERING,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = ~0x7,
+ .max = V4L2_EXPOSURE_METERING_SPOT,
+ .def = V4L2_EXPOSURE_METERING_AVERAGE,
+ .step = 0,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_EXP_METERING_MODE,
+ .setter = ctrl_set_metering_mode,
},
{
- V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
- MMAL_CONTROL_TYPE_STD_MENU,
- ~0x3ff, V4L2_WHITE_BALANCE_SHADE, V4L2_WHITE_BALANCE_AUTO, 0,
- NULL,
- MMAL_PARAMETER_AWB_MODE,
- ctrl_set_awb_mode,
- false
+ .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = ~0x3ff,
+ .max = V4L2_WHITE_BALANCE_SHADE,
+ .def = V4L2_WHITE_BALANCE_AUTO,
+ .step = 0,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_AWB_MODE,
+ .setter = ctrl_set_awb_mode,
},
{
- V4L2_CID_RED_BALANCE, MMAL_CONTROL_TYPE_STD,
- 1, 7999, 1000, 1, NULL,
- MMAL_PARAMETER_CUSTOM_AWB_GAINS,
- ctrl_set_awb_gains,
- false
+ .id = V4L2_CID_RED_BALANCE,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 1,
+ .max = 7999,
+ .def = 1000,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_CUSTOM_AWB_GAINS,
+ .setter = ctrl_set_awb_gains,
},
{
- V4L2_CID_BLUE_BALANCE, MMAL_CONTROL_TYPE_STD,
- 1, 7999, 1000, 1, NULL,
- MMAL_PARAMETER_CUSTOM_AWB_GAINS,
- ctrl_set_awb_gains,
- false
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 1,
+ .max = 7999,
+ .def = 1000,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_CUSTOM_AWB_GAINS,
+ .setter = ctrl_set_awb_gains,
},
{
- V4L2_CID_COLORFX, MMAL_CONTROL_TYPE_STD_MENU,
- 0, V4L2_COLORFX_SET_CBCR, V4L2_COLORFX_NONE, 0, NULL,
- MMAL_PARAMETER_IMAGE_EFFECT,
- ctrl_set_image_effect,
- false
+ .id = V4L2_CID_COLORFX,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = 0,
+ .max = V4L2_COLORFX_SET_CBCR,
+ .def = V4L2_COLORFX_NONE,
+ .step = 0,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_IMAGE_EFFECT,
+ .setter = ctrl_set_image_effect,
},
{
- V4L2_CID_COLORFX_CBCR, MMAL_CONTROL_TYPE_STD,
- 0, 0xffff, 0x8080, 1, NULL,
- MMAL_PARAMETER_COLOUR_EFFECT,
- ctrl_set_colfx,
- false
+ .id = V4L2_CID_COLORFX_CBCR,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 0xffff,
+ .def = 0x8080,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_COLOUR_EFFECT,
+ .setter = ctrl_set_colfx,
},
{
- V4L2_CID_ROTATE, MMAL_CONTROL_TYPE_STD,
- 0, 360, 0, 90, NULL,
- MMAL_PARAMETER_ROTATION,
- ctrl_set_rotate,
- false
+ .id = V4L2_CID_ROTATE,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 360,
+ .def = 0,
+ .step = 90,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_ROTATION,
+ .setter = ctrl_set_rotate,
},
{
- V4L2_CID_HFLIP, MMAL_CONTROL_TYPE_STD,
- 0, 1, 0, 1, NULL,
- MMAL_PARAMETER_MIRROR,
- ctrl_set_flip,
- false
+ .id = V4L2_CID_HFLIP,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_MIRROR,
+ .setter = ctrl_set_flip,
},
{
- V4L2_CID_VFLIP, MMAL_CONTROL_TYPE_STD,
- 0, 1, 0, 1, NULL,
- MMAL_PARAMETER_MIRROR,
- ctrl_set_flip,
- false
+ .id = V4L2_CID_VFLIP,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_MIRROR,
+ .setter = ctrl_set_flip,
},
{
- V4L2_CID_MPEG_VIDEO_BITRATE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
- 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
- 0, 0, NULL,
- MMAL_PARAMETER_RATECONTROL,
- ctrl_set_bitrate_mode,
- false
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = 0,
+ .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ .def = 0,
+ .step = 0,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_RATECONTROL,
+ .setter = ctrl_set_bitrate_mode,
},
{
- V4L2_CID_MPEG_VIDEO_BITRATE, MMAL_CONTROL_TYPE_STD,
- 25 * 1000, 25 * 1000 * 1000, 10 * 1000 * 1000, 25 * 1000, NULL,
- MMAL_PARAMETER_VIDEO_BIT_RATE,
- ctrl_set_bitrate,
- false
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 25 * 1000,
+ .max = 25 * 1000 * 1000,
+ .def = 10 * 1000 * 1000,
+ .step = 25 * 1000,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_VIDEO_BIT_RATE,
+ .setter = ctrl_set_bitrate,
},
{
- V4L2_CID_JPEG_COMPRESSION_QUALITY, MMAL_CONTROL_TYPE_STD,
- 1, 100,
- 30, 1, NULL,
- MMAL_PARAMETER_JPEG_Q_FACTOR,
- ctrl_set_image_encode_output,
- false
+ .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 1,
+ .max = 100,
+ .def = 30,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_JPEG_Q_FACTOR,
+ .setter = ctrl_set_image_encode_output,
},
{
- V4L2_CID_POWER_LINE_FREQUENCY, MMAL_CONTROL_TYPE_STD_MENU,
- 0, V4L2_CID_POWER_LINE_FREQUENCY_AUTO,
- 1, 1, NULL,
- MMAL_PARAMETER_FLICKER_AVOID,
- ctrl_set_flicker_avoidance,
- false
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = 0,
+ .max = V4L2_CID_POWER_LINE_FREQUENCY_AUTO,
+ .def = 1,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_FLICKER_AVOID,
+ .setter = ctrl_set_flicker_avoidance,
},
{
- V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER, MMAL_CONTROL_TYPE_STD,
- 0, 1,
- 0, 1, NULL,
- MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER,
- ctrl_set_video_encode_param_output,
- false
+ .id = V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER,
+ .setter = ctrl_set_video_encode_param_output,
},
{
- V4L2_CID_MPEG_VIDEO_H264_PROFILE,
- MMAL_CONTROL_TYPE_STD_MENU,
- ~(BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
- V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
- V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 1, NULL,
- MMAL_PARAMETER_PROFILE,
- ctrl_set_video_encode_profile_level,
- false
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = ~(BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_PROFILE,
+ .setter = ctrl_set_video_encode_profile_level,
},
{
- V4L2_CID_MPEG_VIDEO_H264_LEVEL, MMAL_CONTROL_TYPE_STD_MENU,
- ~(BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
- BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
- V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
- V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 1, NULL,
- MMAL_PARAMETER_PROFILE,
- ctrl_set_video_encode_profile_level,
- false
+ .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ .min = ~(BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+ .def = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_PROFILE,
+ .setter = ctrl_set_video_encode_profile_level,
},
{
- V4L2_CID_SCENE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
- -1, /* Min (mask) is computed at runtime */
- V4L2_SCENE_MODE_TEXT,
- V4L2_SCENE_MODE_NONE, 1, NULL,
- MMAL_PARAMETER_PROFILE,
- ctrl_set_scene_mode,
- false
+ .id = V4L2_CID_SCENE_MODE,
+ .type = MMAL_CONTROL_TYPE_STD_MENU,
+ /* mask is computed at runtime */
+ .min = -1,
+ .max = V4L2_SCENE_MODE_TEXT,
+ .def = V4L2_SCENE_MODE_NONE,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_PROFILE,
+ .setter = ctrl_set_scene_mode,
},
{
- V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, MMAL_CONTROL_TYPE_STD,
- 0, 0x7FFFFFFF, 60, 1, NULL,
- MMAL_PARAMETER_INTRAPERIOD,
- ctrl_set_video_encode_param_output,
- false
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ .type = MMAL_CONTROL_TYPE_STD,
+ .min = 0,
+ .max = 0x7FFFFFFF,
+ .def = 60,
+ .step = 1,
+ .imenu = NULL,
+ .mmal_id = MMAL_PARAMETER_INTRAPERIOD,
+ .setter = ctrl_set_video_encode_param_output,
},
};
@@ -1168,7 +1266,7 @@ int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev)
if ((dev->ctrls[c]) && (v4l2_ctrls[c].setter)) {
ret = v4l2_ctrls[c].setter(dev, dev->ctrls[c],
&v4l2_ctrls[c]);
- if (!v4l2_ctrls[c].ignore_errors && ret) {
+ if (ret) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Failed when setting default values for ctrl %d\n",
c);
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index 141af16ce031..7fc04e38936d 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -33,10 +33,14 @@ enum vchi_crc_control {
enum vchi_callback_reason {
VCHI_CALLBACK_REASON_MIN,
- //This indicates that there is data available
- //handle is the msg id that was transmitted with the data
- // When a message is received and there was no FULL message available previously, send callback
- // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
+ /*
+ * This indicates that there is data available handle is the msg id that
+ * was transmitted with the data
+ * When a message is received and there was no FULL message available
+ * previously, send callback
+ * Tasks get kicked by the callback, reset their event and try and read
+ * from the fifo until it fails
+ */
VCHI_CALLBACK_MSG_AVAILABLE,
VCHI_CALLBACK_MSG_SENT,
VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
@@ -51,9 +55,11 @@ enum vchi_callback_reason {
VCHI_CALLBACK_SERVICE_CLOSED,
- // this side has sent XOFF to peer due to lack of data consumption by service
- // (suggests the service may need to take some recovery action if it has
- // been deliberately holding off consuming data)
+ /*
+ * this side has sent XOFF to peer due to lack of data consumption by
+ * service (suggests the service may need to take some recovery action
+ * if it has been deliberately holding off consuming data)
+ */
VCHI_CALLBACK_SENT_XOFF,
VCHI_CALLBACK_SENT_XON,
@@ -112,12 +118,16 @@ struct vchi_msg_vector {
int32_t vec_len;
};
-// Iterator structure for reading ahead through received message queue. Allocated by client,
-// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
-// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
-// will not proceed to messages received since. Behaviour is undefined if an iterator
-// is used again after messages for that service are removed/dequeued by any
-// means other than vchi_msg_iter_... calls on the iterator itself.
+/*
+ * Iterator structure for reading ahead through received message queue.
+ * Allocated by client, initialised by vchi_msg_look_ahead. Fields are for
+ * internal VCHI use only.
+ * Iterates over messages in queue at the instant of the call to
+ * vchi_msg_lookahead - will not proceed to messages received since.
+ * Behaviour is undefined if an iterator is used again after messages for that
+ * service are removed/dequeued by any means other than vchi_msg_iter_...
+ * calls on the iterator itself.
+ */
struct vchi_msg_iter {
struct opaque_vchi_service_t *service;
void *last;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index ca30bfd52919..c18c6ca0b6c0 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -257,49 +257,6 @@ int vchiq_dump_platform_state(void *dump_context)
return vchiq_dump(dump_context, buf, len + 1);
}
-enum vchiq_status
-vchiq_platform_suspend(struct vchiq_state *state)
-{
- return VCHIQ_ERROR;
-}
-
-enum vchiq_status
-vchiq_platform_resume(struct vchiq_state *state)
-{
- return VCHIQ_SUCCESS;
-}
-
-void
-vchiq_platform_paused(struct vchiq_state *state)
-{
-}
-
-void
-vchiq_platform_resumed(struct vchiq_state *state)
-{
-}
-
-int
-vchiq_platform_videocore_wanted(struct vchiq_state *state)
-{
- return 1; // autosuspend not supported - videocore always wanted
-}
-
-int
-vchiq_platform_use_suspend_timer(void)
-{
- return 0;
-}
-void
-vchiq_dump_platform_use_state(struct vchiq_state *state)
-{
- vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
-}
-void
-vchiq_platform_handle_timeout(struct vchiq_state *state)
-{
- (void)state;
-}
/*
* Local functions
*/
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 4458c1e60fa3..a1ea9777a444 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/compat.h>
#include <linux/dma-mapping.h>
+#include <linux/rcupdate.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#include "vchiq_core.h"
@@ -48,39 +49,6 @@
int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
-#define SUSPEND_TIMER_TIMEOUT_MS 100
-#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
-
-#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
-static const char *const suspend_state_names[] = {
- "VC_SUSPEND_FORCE_CANCELED",
- "VC_SUSPEND_REJECTED",
- "VC_SUSPEND_FAILED",
- "VC_SUSPEND_IDLE",
- "VC_SUSPEND_REQUESTED",
- "VC_SUSPEND_IN_PROGRESS",
- "VC_SUSPEND_SUSPENDED"
-};
-#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
-static const char *const resume_state_names[] = {
- "VC_RESUME_FAILED",
- "VC_RESUME_IDLE",
- "VC_RESUME_REQUESTED",
- "VC_RESUME_IN_PROGRESS",
- "VC_RESUME_RESUMED"
-};
-/* The number of times we allow force suspend to timeout before actually
-** _forcing_ suspend. This is to cater for SW which fails to release vchiq
-** correctly - we don't want to prevent ARM suspend indefinitely in this case.
-*/
-#define FORCE_SUSPEND_FAIL_MAX 8
-
-/* The time in ms allowed for videocore to go idle when force suspend has been
- * requested */
-#define FORCE_SUSPEND_TIMEOUT_MS 200
-
-static void suspend_timer_callback(struct timer_list *t);
-
struct user_service {
struct vchiq_service *service;
void *userdata;
@@ -2129,10 +2097,12 @@ int vchiq_dump_platform_instances(void *dump_context)
/* There is no list of instances, so instead scan all services,
marking those that have been dumped. */
+ rcu_read_lock();
for (i = 0; i < state->unused_service; i++) {
- struct vchiq_service *service = state->services[i];
+ struct vchiq_service *service;
struct vchiq_instance *instance;
+ service = rcu_dereference(state->services[i]);
if (!service || service->base.callback != service_callback)
continue;
@@ -2140,18 +2110,26 @@ int vchiq_dump_platform_instances(void *dump_context)
if (instance)
instance->mark = 0;
}
+ rcu_read_unlock();
for (i = 0; i < state->unused_service; i++) {
- struct vchiq_service *service = state->services[i];
+ struct vchiq_service *service;
struct vchiq_instance *instance;
int err;
- if (!service || service->base.callback != service_callback)
+ rcu_read_lock();
+ service = rcu_dereference(state->services[i]);
+ if (!service || service->base.callback != service_callback) {
+ rcu_read_unlock();
continue;
+ }
instance = service->instance;
- if (!instance || instance->mark)
+ if (!instance || instance->mark) {
+ rcu_read_unlock();
continue;
+ }
+ rcu_read_unlock();
len = snprintf(buf, sizeof(buf),
"Instance %pK: pid %d,%s completions %d/%d",
@@ -2161,7 +2139,6 @@ int vchiq_dump_platform_instances(void *dump_context)
instance->completion_insert -
instance->completion_remove,
MAX_COMPLETIONS);
-
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
@@ -2184,17 +2161,17 @@ int vchiq_dump_platform_service_state(void *dump_context,
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
+ len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
if ((service->base.callback == service_callback) &&
user_service->is_vchi) {
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
", %d/%d messages",
user_service->msg_insert - user_service->msg_remove,
MSG_QUEUE_SIZE);
if (user_service->dequeue_pending)
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" (dequeue pending)");
}
@@ -2258,27 +2235,6 @@ vchiq_fops = {
* Autosuspend related functionality
*/
-int
-vchiq_videocore_wanted(struct vchiq_state *state)
-{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
-
- if (!arm_state)
- /* autosuspend not supported - always return wanted */
- return 1;
- else if (arm_state->blocked_count)
- return 1;
- else if (!arm_state->videocore_use_count)
- /* usage count zero - check for override unless we're forcing */
- if (arm_state->resume_blocked)
- return 0;
- else
- return vchiq_platform_videocore_wanted(state);
- else
- /* non-zero usage count - videocore still required */
- return 1;
-}
-
static enum vchiq_status
vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
struct vchiq_header *header,
@@ -2382,317 +2338,13 @@ vchiq_arm_init_state(struct vchiq_state *state,
atomic_set(&arm_state->ka_use_ack_count, 0);
atomic_set(&arm_state->ka_release_count, 0);
- init_completion(&arm_state->vc_suspend_complete);
-
- init_completion(&arm_state->vc_resume_complete);
- /* Initialise to 'done' state. We only want to block on resume
- * completion while videocore is suspended. */
- set_resume_state(arm_state, VC_RESUME_RESUMED);
-
- init_completion(&arm_state->resume_blocker);
- /* Initialise to 'done' state. We only want to block on this
- * completion while resume is blocked */
- complete_all(&arm_state->resume_blocker);
-
- init_completion(&arm_state->blocked_blocker);
- /* Initialise to 'done' state. We only want to block on this
- * completion while things are waiting on the resume blocker */
- complete_all(&arm_state->blocked_blocker);
-
- arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
- arm_state->suspend_timer_running = 0;
arm_state->state = state;
- timer_setup(&arm_state->suspend_timer, suspend_timer_callback,
- 0);
-
arm_state->first_connect = 0;
}
return VCHIQ_SUCCESS;
}
-/*
-** Functions to modify the state variables;
-** set_suspend_state
-** set_resume_state
-**
-** There are more state variables than we might like, so ensure they remain in
-** step. Suspend and resume state are maintained separately, since most of
-** these state machines can operate independently. However, there are a few
-** states where state transitions in one state machine cause a reset to the
-** other state machine. In addition, there are some completion events which
-** need to occur on state machine reset and end-state(s), so these are also
-** dealt with in these functions.
-**
-** In all states we set the state variable according to the input, but in some
-** cases we perform additional steps outlined below;
-**
-** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
-** The suspend completion is completed after any suspend
-** attempt. When we reset the state machine we also reset
-** the completion. This reset occurs when videocore is
-** resumed, and also if we initiate suspend after a suspend
-** failure.
-**
-** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
-** suspend - ie from this point on we must try to suspend
-** before resuming can occur. We therefore also reset the
-** resume state machine to VC_RESUME_IDLE in this state.
-**
-** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
-** complete_all on the suspend completion to notify
-** anything waiting for suspend to happen.
-**
-** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
-** initiate resume, so no need to alter resume state.
-** We call complete_all on the suspend completion to notify
-** of suspend rejection.
-**
-** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
-** suspend completion and reset the resume state machine.
-**
-** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
-** resume completion is in it's 'done' state whenever
-** videcore is running. Therefore, the VC_RESUME_IDLE
-** state implies that videocore is suspended.
-** Hence, any thread which needs to wait until videocore is
-** running can wait on this completion - it will only block
-** if videocore is suspended.
-**
-** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
-** Call complete_all on the resume completion to unblock
-** any threads waiting for resume. Also reset the suspend
-** state machine to it's idle state.
-**
-** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
-*/
-
-void
-set_suspend_state(struct vchiq_arm_state *arm_state,
- enum vc_suspend_status new_state)
-{
- /* set the state in all cases */
- arm_state->vc_suspend_state = new_state;
-
- /* state specific additional actions */
- switch (new_state) {
- case VC_SUSPEND_FORCE_CANCELED:
- complete_all(&arm_state->vc_suspend_complete);
- break;
- case VC_SUSPEND_REJECTED:
- complete_all(&arm_state->vc_suspend_complete);
- break;
- case VC_SUSPEND_FAILED:
- complete_all(&arm_state->vc_suspend_complete);
- arm_state->vc_resume_state = VC_RESUME_RESUMED;
- complete_all(&arm_state->vc_resume_complete);
- break;
- case VC_SUSPEND_IDLE:
- reinit_completion(&arm_state->vc_suspend_complete);
- break;
- case VC_SUSPEND_REQUESTED:
- break;
- case VC_SUSPEND_IN_PROGRESS:
- set_resume_state(arm_state, VC_RESUME_IDLE);
- break;
- case VC_SUSPEND_SUSPENDED:
- complete_all(&arm_state->vc_suspend_complete);
- break;
- default:
- BUG();
- break;
- }
-}
-
-void
-set_resume_state(struct vchiq_arm_state *arm_state,
- enum vc_resume_status new_state)
-{
- /* set the state in all cases */
- arm_state->vc_resume_state = new_state;
-
- /* state specific additional actions */
- switch (new_state) {
- case VC_RESUME_FAILED:
- break;
- case VC_RESUME_IDLE:
- reinit_completion(&arm_state->vc_resume_complete);
- break;
- case VC_RESUME_REQUESTED:
- break;
- case VC_RESUME_IN_PROGRESS:
- break;
- case VC_RESUME_RESUMED:
- complete_all(&arm_state->vc_resume_complete);
- set_suspend_state(arm_state, VC_SUSPEND_IDLE);
- break;
- default:
- BUG();
- break;
- }
-}
-
-/* should be called with the write lock held */
-inline void
-start_suspend_timer(struct vchiq_arm_state *arm_state)
-{
- del_timer(&arm_state->suspend_timer);
- arm_state->suspend_timer.expires = jiffies +
- msecs_to_jiffies(arm_state->suspend_timer_timeout);
- add_timer(&arm_state->suspend_timer);
- arm_state->suspend_timer_running = 1;
-}
-
-/* should be called with the write lock held */
-static inline void
-stop_suspend_timer(struct vchiq_arm_state *arm_state)
-{
- if (arm_state->suspend_timer_running) {
- del_timer(&arm_state->suspend_timer);
- arm_state->suspend_timer_running = 0;
- }
-}
-
-static inline int
-need_resume(struct vchiq_state *state)
-{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
-
- return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
- (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
- vchiq_videocore_wanted(state);
-}
-
-static inline void
-unblock_resume(struct vchiq_arm_state *arm_state)
-{
- complete_all(&arm_state->resume_blocker);
- arm_state->resume_blocked = 0;
-}
-
-/* Initiate suspend via slot handler. Should be called with the write lock
- * held */
-enum vchiq_status
-vchiq_arm_vcsuspend(struct vchiq_state *state)
-{
- enum vchiq_status status = VCHIQ_ERROR;
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
-
- if (!arm_state)
- goto out;
-
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
- status = VCHIQ_SUCCESS;
-
- switch (arm_state->vc_suspend_state) {
- case VC_SUSPEND_REQUESTED:
- vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
- "requested", __func__);
- break;
- case VC_SUSPEND_IN_PROGRESS:
- vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
- "progress", __func__);
- break;
-
- default:
- /* We don't expect to be in other states, so log but continue
- * anyway */
- vchiq_log_error(vchiq_susp_log_level,
- "%s unexpected suspend state %s", __func__,
- suspend_state_names[arm_state->vc_suspend_state +
- VC_SUSPEND_NUM_OFFSET]);
- /* fall through */
- case VC_SUSPEND_REJECTED:
- case VC_SUSPEND_FAILED:
- /* Ensure any idle state actions have been run */
- set_suspend_state(arm_state, VC_SUSPEND_IDLE);
- /* fall through */
- case VC_SUSPEND_IDLE:
- vchiq_log_info(vchiq_susp_log_level,
- "%s: suspending", __func__);
- set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
- /* kick the slot handler thread to initiate suspend */
- request_poll(state, NULL, 0);
- break;
- }
-
-out:
- vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
- return status;
-}
-
-void
-vchiq_platform_check_suspend(struct vchiq_state *state)
-{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- int susp = 0;
-
- if (!arm_state)
- goto out;
-
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
-
- write_lock_bh(&arm_state->susp_res_lock);
- if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
- arm_state->vc_resume_state == VC_RESUME_RESUMED) {
- set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
- susp = 1;
- }
- write_unlock_bh(&arm_state->susp_res_lock);
-
- if (susp)
- vchiq_platform_suspend(state);
-
-out:
- vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
- return;
-}
-
-void
-vchiq_check_suspend(struct vchiq_state *state)
-{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
-
- if (!arm_state)
- goto out;
-
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
-
- write_lock_bh(&arm_state->susp_res_lock);
- if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
- arm_state->first_connect &&
- !vchiq_videocore_wanted(state)) {
- vchiq_arm_vcsuspend(state);
- }
- write_unlock_bh(&arm_state->susp_res_lock);
-
-out:
- vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
-}
-
-/* This function should be called with the write lock held */
-int
-vchiq_check_resume(struct vchiq_state *state)
-{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- int resume = 0;
-
- if (!arm_state)
- goto out;
-
- vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
-
- if (need_resume(state)) {
- set_resume_state(arm_state, VC_RESUME_REQUESTED);
- request_poll(state, NULL, 0);
- resume = 1;
- }
-
-out:
- vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
- return resume;
-}
-
enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
@@ -2724,88 +2376,15 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
}
write_lock_bh(&arm_state->susp_res_lock);
- while (arm_state->resume_blocked) {
- /* If we call 'use' while force suspend is waiting for suspend,
- * then we're about to block the thread which the force is
- * waiting to complete, so we're bound to just time out. In this
- * case, set the suspend state such that the wait will be
- * canceled, so we can complete as quickly as possible. */
- if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
- VC_SUSPEND_IDLE) {
- set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
- break;
- }
- /* If suspend is already in progress then we need to block */
- if (!try_wait_for_completion(&arm_state->resume_blocker)) {
- /* Indicate that there are threads waiting on the resume
- * blocker. These need to be allowed to complete before
- * a _second_ call to force suspend can complete,
- * otherwise low priority threads might never actually
- * continue */
- arm_state->blocked_count++;
- write_unlock_bh(&arm_state->susp_res_lock);
- vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
- "blocked - waiting...", __func__, entity);
- if (wait_for_completion_killable(
- &arm_state->resume_blocker)) {
- vchiq_log_error(vchiq_susp_log_level, "%s %s "
- "wait for resume blocker interrupted",
- __func__, entity);
- ret = VCHIQ_ERROR;
- write_lock_bh(&arm_state->susp_res_lock);
- arm_state->blocked_count--;
- write_unlock_bh(&arm_state->susp_res_lock);
- goto out;
- }
- vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
- "unblocked", __func__, entity);
- write_lock_bh(&arm_state->susp_res_lock);
- if (--arm_state->blocked_count == 0)
- complete_all(&arm_state->blocked_blocker);
- }
- }
-
- stop_suspend_timer(arm_state);
-
local_uc = ++arm_state->videocore_use_count;
local_entity_uc = ++(*entity_uc);
- /* If there's a pending request which hasn't yet been serviced then
- * just clear it. If we're past VC_SUSPEND_REQUESTED state then
- * vc_resume_complete will block until we either resume or fail to
- * suspend */
- if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
- set_suspend_state(arm_state, VC_SUSPEND_IDLE);
-
- if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
- set_resume_state(arm_state, VC_RESUME_REQUESTED);
- vchiq_log_info(vchiq_susp_log_level,
- "%s %s count %d, state count %d",
- __func__, entity, local_entity_uc, local_uc);
- request_poll(state, NULL, 0);
- } else
- vchiq_log_trace(vchiq_susp_log_level,
- "%s %s count %d, state count %d",
- __func__, entity, *entity_uc, local_uc);
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc, local_uc);
write_unlock_bh(&arm_state->susp_res_lock);
- /* Completion is in a done state when we're not suspended, so this won't
- * block for the non-suspended case. */
- if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
- vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
- __func__, entity);
- if (wait_for_completion_killable(
- &arm_state->vc_resume_complete)) {
- vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
- "resume interrupted", __func__, entity);
- ret = VCHIQ_ERROR;
- goto out;
- }
- vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
- entity);
- }
-
if (ret == VCHIQ_SUCCESS) {
enum vchiq_status status = VCHIQ_SUCCESS;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
@@ -2860,24 +2439,10 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
--arm_state->videocore_use_count;
--(*entity_uc);
- if (!vchiq_videocore_wanted(state)) {
- if (vchiq_platform_use_suspend_timer() &&
- !arm_state->resume_blocked) {
- /* Only use the timer if we're not trying to force
- * suspend (=> resume_blocked) */
- start_suspend_timer(arm_state);
- } else {
- vchiq_log_info(vchiq_susp_log_level,
- "%s %s count %d, state count %d - suspending",
- __func__, entity, *entity_uc,
- arm_state->videocore_use_count);
- vchiq_arm_vcsuspend(state);
- }
- } else
- vchiq_log_trace(vchiq_susp_log_level,
- "%s %s count %d, state count %d",
- __func__, entity, *entity_uc,
- arm_state->videocore_use_count);
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc,
+ arm_state->videocore_use_count);
unlock:
write_unlock_bh(&arm_state->susp_res_lock);
@@ -2932,11 +2497,11 @@ vchiq_instance_get_use_count(struct vchiq_instance *instance)
int use_count = 0, i;
i = 0;
- while ((service = next_service_by_instance(instance->state,
- instance, &i))) {
+ rcu_read_lock();
+ while ((service = __next_service_by_instance(instance->state,
+ instance, &i)))
use_count += service->service_use_count;
- unlock_service(service);
- }
+ rcu_read_unlock();
return use_count;
}
@@ -2959,25 +2524,14 @@ vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
int i;
i = 0;
- while ((service = next_service_by_instance(instance->state,
- instance, &i))) {
+ rcu_read_lock();
+ while ((service = __next_service_by_instance(instance->state,
+ instance, &i)))
service->trace = trace;
- unlock_service(service);
- }
+ rcu_read_unlock();
instance->trace = (trace != 0);
}
-static void suspend_timer_callback(struct timer_list *t)
-{
- struct vchiq_arm_state *arm_state =
- from_timer(arm_state, t, suspend_timer);
- struct vchiq_state *state = arm_state->state;
-
- vchiq_log_info(vchiq_susp_log_level,
- "%s - suspend timer expired - check suspend", __func__);
- vchiq_check_suspend(state);
-}
-
enum vchiq_status
vchiq_use_service(unsigned int handle)
{
@@ -3022,8 +2576,6 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
int only_nonzero = 0;
static const char *nz = "<-- preventing suspend";
- enum vc_suspend_status vc_suspend_state;
- enum vc_resume_status vc_resume_state;
int peer_count;
int vc_use_count;
int active_services;
@@ -3037,16 +2589,16 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
return;
read_lock_bh(&arm_state->susp_res_lock);
- vc_suspend_state = arm_state->vc_suspend_state;
- vc_resume_state = arm_state->vc_resume_state;
peer_count = arm_state->peer_use_count;
vc_use_count = arm_state->videocore_use_count;
active_services = state->unused_service;
if (active_services > MAX_SERVICES)
only_nonzero = 1;
+ rcu_read_lock();
for (i = 0; i < active_services; i++) {
- struct vchiq_service *service_ptr = state->services[i];
+ struct vchiq_service *service_ptr =
+ rcu_dereference(state->services[i]);
if (!service_ptr)
continue;
@@ -3064,16 +2616,10 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
if (found >= MAX_SERVICES)
break;
}
+ rcu_read_unlock();
read_unlock_bh(&arm_state->susp_res_lock);
- vchiq_log_warning(vchiq_susp_log_level,
- "-- Videcore suspend state: %s --",
- suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
- vchiq_log_warning(vchiq_susp_log_level,
- "-- Videcore resume state: %s --",
- resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
-
if (only_nonzero)
vchiq_log_warning(vchiq_susp_log_level, "Too many active "
"services (%d). Only dumping up to first %d services "
@@ -3093,8 +2639,6 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
"--- Overall vchiq instance use count %d", vc_use_count);
kfree(service_data);
-
- vchiq_dump_platform_use_state(state);
}
enum vchiq_status
@@ -3118,24 +2662,16 @@ vchiq_check_service(struct vchiq_service *service)
if (ret == VCHIQ_ERROR) {
vchiq_log_error(vchiq_susp_log_level,
"%s ERROR - %c%c%c%c:%d service count %d, "
- "state count %d, videocore suspend state %s", __func__,
+ "state count %d", __func__,
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
service->client_id, service->service_use_count,
- arm_state->videocore_use_count,
- suspend_state_names[arm_state->vc_suspend_state +
- VC_SUSPEND_NUM_OFFSET]);
+ arm_state->videocore_use_count);
vchiq_dump_service_use_state(service->state);
}
out:
return ret;
}
-/* stub functions */
-void vchiq_on_remote_use_active(struct vchiq_state *state)
-{
- (void)state;
-}
-
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index 19d2a2eefb6a..0784c5002417 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -14,27 +14,8 @@
#include "vchiq_core.h"
#include "vchiq_debugfs.h"
-enum vc_suspend_status {
- VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
- VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
- VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
- VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
- VC_SUSPEND_REQUESTED, /* User has requested suspend */
- VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
- VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
-};
-
-enum vc_resume_status {
- VC_RESUME_FAILED = -1, /* Videocore resume failed */
- VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
- VC_RESUME_REQUESTED, /* User has requested resume */
- VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
- VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
-};
-
enum USE_TYPE_E {
USE_TYPE_SERVICE,
- USE_TYPE_SERVICE_NO_RESUME,
USE_TYPE_VCHIQ
};
@@ -46,19 +27,9 @@ struct vchiq_arm_state {
atomic_t ka_use_ack_count;
atomic_t ka_release_count;
- struct completion vc_suspend_complete;
- struct completion vc_resume_complete;
-
rwlock_t susp_res_lock;
- enum vc_suspend_status vc_suspend_state;
- enum vc_resume_status vc_resume_state;
-
- unsigned int wake_address;
struct vchiq_state *state;
- struct timer_list suspend_timer;
- int suspend_timer_timeout;
- int suspend_timer_running;
/* Global use count for videocore.
** This is equal to the sum of the use counts for all services. When
@@ -72,27 +43,11 @@ struct vchiq_arm_state {
*/
int peer_use_count;
- /* Flag to indicate whether resume is blocked. This happens when the
- ** ARM is suspending
- */
- struct completion resume_blocker;
- int resume_blocked;
- struct completion blocked_blocker;
- int blocked_count;
-
- int autosuspend_override;
-
/* Flag to indicate that the first vchiq connect has made it through.
** This means that both sides should be fully ready, and we should
** be able to suspend after this point.
*/
int first_connect;
-
- unsigned long long suspend_start_time;
- unsigned long long sleep_start_time;
- unsigned long long resume_start_time;
- unsigned long long last_wake_time;
-
};
struct vchiq_drvdata {
@@ -110,18 +65,9 @@ extern struct vchiq_state *
vchiq_get_state(void);
extern enum vchiq_status
-vchiq_arm_vcsuspend(struct vchiq_state *state);
-
-extern enum vchiq_status
-vchiq_arm_vcresume(struct vchiq_state *state);
-
-extern enum vchiq_status
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state);
-extern int
-vchiq_check_resume(struct vchiq_state *state);
-
extern void
vchiq_check_suspend(struct vchiq_state *state);
enum vchiq_status
@@ -133,15 +79,6 @@ vchiq_release_service(unsigned int handle);
extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
-extern enum vchiq_status
-vchiq_platform_suspend(struct vchiq_state *state);
-
-extern int
-vchiq_platform_videocore_wanted(struct vchiq_state *state);
-
-extern int
-vchiq_platform_use_suspend_timer(void);
-
extern void
vchiq_dump_platform_use_state(struct vchiq_state *state);
@@ -151,8 +88,6 @@ vchiq_dump_service_use_state(struct vchiq_state *state);
extern struct vchiq_arm_state*
vchiq_platform_get_arm_state(struct vchiq_state *state);
-extern int
-vchiq_videocore_wanted(struct vchiq_state *state);
extern enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
@@ -176,15 +111,4 @@ vchiq_instance_get_trace(struct vchiq_instance *instance);
extern void
vchiq_instance_set_trace(struct vchiq_instance *instance, int trace);
-extern void
-set_suspend_state(struct vchiq_arm_state *arm_state,
- enum vc_suspend_status new_state);
-
-extern void
-set_resume_state(struct vchiq_arm_state *arm_state,
- enum vc_resume_status new_state);
-
-extern void
-start_suspend_timer(struct vchiq_arm_state *arm_state);
-
#endif /* VCHIQ_ARM_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 76351078affb..edcd97373809 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+
#include "vchiq_core.h"
#define VCHIQ_SLOT_HANDLER_STACK 8192
@@ -54,7 +57,6 @@ int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
-static DEFINE_SPINLOCK(service_spinlock);
DEFINE_SPINLOCK(bulk_waiter_spinlock);
static DEFINE_SPINLOCK(quota_spinlock);
@@ -136,44 +138,41 @@ find_service_by_handle(unsigned int handle)
{
struct vchiq_service *service;
- spin_lock(&service_spinlock);
+ rcu_read_lock();
service = handle_to_service(handle);
- if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
- (service->handle == handle)) {
- WARN_ON(service->ref_count == 0);
- service->ref_count++;
- } else
- service = NULL;
- spin_unlock(&service_spinlock);
-
- if (!service)
- vchiq_log_info(vchiq_core_log_level,
- "Invalid service handle 0x%x", handle);
-
- return service;
+ if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
+ service->handle == handle &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
+ return NULL;
}
struct vchiq_service *
find_service_by_port(struct vchiq_state *state, int localport)
{
- struct vchiq_service *service = NULL;
if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
- spin_lock(&service_spinlock);
- service = state->services[localport];
- if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
- WARN_ON(service->ref_count == 0);
- service->ref_count++;
- } else
- service = NULL;
- spin_unlock(&service_spinlock);
- }
-
- if (!service)
- vchiq_log_info(vchiq_core_log_level,
- "Invalid port %d", localport);
+ struct vchiq_service *service;
- return service;
+ rcu_read_lock();
+ service = rcu_dereference(state->services[localport]);
+ if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ }
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid port %d", localport);
+ return NULL;
}
struct vchiq_service *
@@ -182,22 +181,20 @@ find_service_for_instance(struct vchiq_instance *instance,
{
struct vchiq_service *service;
- spin_lock(&service_spinlock);
+ rcu_read_lock();
service = handle_to_service(handle);
- if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
- (service->handle == handle) &&
- (service->instance == instance)) {
- WARN_ON(service->ref_count == 0);
- service->ref_count++;
- } else
- service = NULL;
- spin_unlock(&service_spinlock);
-
- if (!service)
- vchiq_log_info(vchiq_core_log_level,
- "Invalid service handle 0x%x", handle);
-
- return service;
+ if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
+ service->handle == handle &&
+ service->instance == instance &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
+ return NULL;
}
struct vchiq_service *
@@ -206,121 +203,125 @@ find_closed_service_for_instance(struct vchiq_instance *instance,
{
struct vchiq_service *service;
- spin_lock(&service_spinlock);
+ rcu_read_lock();
service = handle_to_service(handle);
if (service &&
- ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
- (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) &&
- (service->handle == handle) &&
- (service->instance == instance)) {
- WARN_ON(service->ref_count == 0);
- service->ref_count++;
- } else
- service = NULL;
- spin_unlock(&service_spinlock);
-
- if (!service)
- vchiq_log_info(vchiq_core_log_level,
- "Invalid service handle 0x%x", handle);
-
+ (service->srvstate == VCHIQ_SRVSTATE_FREE ||
+ service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
+ service->handle == handle &&
+ service->instance == instance &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
+ return service;
+ }
+ rcu_read_unlock();
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
return service;
}
struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
- int *pidx)
+__next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx)
{
struct vchiq_service *service = NULL;
int idx = *pidx;
- spin_lock(&service_spinlock);
while (idx < state->unused_service) {
- struct vchiq_service *srv = state->services[idx++];
+ struct vchiq_service *srv;
- if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
- (srv->instance == instance)) {
+ srv = rcu_dereference(state->services[idx++]);
+ if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
+ srv->instance == instance) {
service = srv;
- WARN_ON(service->ref_count == 0);
- service->ref_count++;
break;
}
}
- spin_unlock(&service_spinlock);
*pidx = idx;
+ return service;
+}
+
+struct vchiq_service *
+next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx)
+{
+ struct vchiq_service *service;
+ rcu_read_lock();
+ while (1) {
+ service = __next_service_by_instance(state, instance, pidx);
+ if (!service)
+ break;
+ if (kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ break;
+ }
+ }
+ rcu_read_unlock();
return service;
}
void
lock_service(struct vchiq_service *service)
{
- spin_lock(&service_spinlock);
- WARN_ON(!service);
- if (service) {
- WARN_ON(service->ref_count == 0);
- service->ref_count++;
+ if (!service) {
+ WARN(1, "%s service is NULL\n", __func__);
+ return;
}
- spin_unlock(&service_spinlock);
+ kref_get(&service->ref_count);
+}
+
+static void service_release(struct kref *kref)
+{
+ struct vchiq_service *service =
+ container_of(kref, struct vchiq_service, ref_count);
+ struct vchiq_state *state = service->state;
+
+ WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+ rcu_assign_pointer(state->services[service->localport], NULL);
+ if (service->userdata_term)
+ service->userdata_term(service->base.userdata);
+ kfree_rcu(service, rcu);
}
void
unlock_service(struct vchiq_service *service)
{
- spin_lock(&service_spinlock);
if (!service) {
WARN(1, "%s: service is NULL\n", __func__);
- goto unlock;
- }
- if (!service->ref_count) {
- WARN(1, "%s: ref_count is zero\n", __func__);
- goto unlock;
- }
- service->ref_count--;
- if (!service->ref_count) {
- struct vchiq_state *state = service->state;
-
- WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
- state->services[service->localport] = NULL;
- } else {
- service = NULL;
+ return;
}
-unlock:
- spin_unlock(&service_spinlock);
-
- if (service && service->userdata_term)
- service->userdata_term(service->base.userdata);
-
- kfree(service);
+ kref_put(&service->ref_count, service_release);
}
int
vchiq_get_client_id(unsigned int handle)
{
- struct vchiq_service *service = find_service_by_handle(handle);
+ struct vchiq_service *service;
int id;
+ rcu_read_lock();
+ service = handle_to_service(handle);
id = service ? service->client_id : 0;
- if (service)
- unlock_service(service);
-
+ rcu_read_unlock();
return id;
}
void *
vchiq_get_service_userdata(unsigned int handle)
{
- struct vchiq_service *service = handle_to_service(handle);
-
- return service ? service->base.userdata : NULL;
-}
-
-int
-vchiq_get_service_fourcc(unsigned int handle)
-{
- struct vchiq_service *service = handle_to_service(handle);
+ void *userdata;
+ struct vchiq_service *service;
- return service ? service->base.fourcc : 0;
+ rcu_read_lock();
+ service = handle_to_service(handle);
+ userdata = service ? service->base.userdata : NULL;
+ rcu_read_unlock();
+ return userdata;
}
static void
@@ -468,19 +469,23 @@ get_listening_service(struct vchiq_state *state, int fourcc)
WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
+ rcu_read_lock();
for (i = 0; i < state->unused_service; i++) {
- struct vchiq_service *service = state->services[i];
+ struct vchiq_service *service;
+ service = rcu_dereference(state->services[i]);
if (service &&
- (service->public_fourcc == fourcc) &&
- ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
- ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
- (service->remoteport == VCHIQ_PORT_FREE)))) {
- lock_service(service);
+ service->public_fourcc == fourcc &&
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
+ service->remoteport == VCHIQ_PORT_FREE)) &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
return service;
}
}
-
+ rcu_read_unlock();
return NULL;
}
@@ -490,15 +495,20 @@ get_connected_service(struct vchiq_state *state, unsigned int port)
{
int i;
+ rcu_read_lock();
for (i = 0; i < state->unused_service; i++) {
- struct vchiq_service *service = state->services[i];
-
- if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
- && (service->remoteport == port)) {
- lock_service(service);
+ struct vchiq_service *service =
+ rcu_dereference(state->services[i]);
+
+ if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
+ service->remoteport == port &&
+ kref_get_unless_zero(&service->ref_count)) {
+ service = rcu_pointer_handoff(service);
+ rcu_read_unlock();
return service;
}
}
+ rcu_read_unlock();
return NULL;
}
@@ -1798,7 +1808,6 @@ parse_rx_slots(struct vchiq_state *state)
}
/* At this point slot_mutex is held */
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
- vchiq_platform_paused(state);
break;
case VCHIQ_MSG_RESUME:
vchiq_log_trace(vchiq_core_log_level,
@@ -1807,7 +1816,6 @@ parse_rx_slots(struct vchiq_state *state)
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
- vchiq_platform_resumed(state);
break;
case VCHIQ_MSG_REMOTE_USE:
@@ -1817,7 +1825,6 @@ parse_rx_slots(struct vchiq_state *state)
vchiq_on_remote_release(state);
break;
case VCHIQ_MSG_REMOTE_USE_ACTIVE:
- vchiq_on_remote_use_active(state);
break;
default:
@@ -1869,9 +1876,6 @@ slot_handler_func(void *v)
DEBUG_TRACE(SLOT_HANDLER_LINE);
if (state->poll_needed) {
- /* Check if we need to suspend - may change our
- * conn_state */
- vchiq_platform_check_suspend(state);
state->poll_needed = 0;
@@ -1897,10 +1901,6 @@ slot_handler_func(void *v)
}
break;
- case VCHIQ_CONNSTATE_PAUSED:
- vchiq_platform_resume(state);
- break;
-
case VCHIQ_CONNSTATE_RESUMING:
if (queue_message(state, NULL,
VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
@@ -1908,7 +1908,6 @@ slot_handler_func(void *v)
!= VCHIQ_RETRY) {
vchiq_set_conn_state(state,
VCHIQ_CONNSTATE_CONNECTED);
- vchiq_platform_resumed(state);
} else {
/* This should really be impossible,
** since the PAUSE should have flushed
@@ -1918,11 +1917,6 @@ slot_handler_func(void *v)
"message");
}
break;
-
- case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
- case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
- vchiq_platform_handle_timeout(state);
- break;
default:
break;
}
@@ -2284,7 +2278,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
vchiq_userdata_term userdata_term)
{
struct vchiq_service *service;
- struct vchiq_service **pservice = NULL;
+ struct vchiq_service __rcu **pservice = NULL;
struct vchiq_service_quota *service_quota;
int i;
@@ -2296,7 +2290,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
service->base.callback = params->callback;
service->base.userdata = params->userdata;
service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
- service->ref_count = 1;
+ kref_init(&service->ref_count);
service->srvstate = VCHIQ_SRVSTATE_FREE;
service->userdata_term = userdata_term;
service->localport = VCHIQ_PORT_FREE;
@@ -2322,7 +2316,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
mutex_init(&service->bulk_mutex);
memset(&service->stats, 0, sizeof(service->stats));
- /* Although it is perfectly possible to use service_spinlock
+ /* Although it is perfectly possible to use a spinlock
** to protect the creation of services, it is overkill as it
** disables interrupts while the array is searched.
** The only danger is of another thread trying to create a
@@ -2340,17 +2334,17 @@ vchiq_add_service_internal(struct vchiq_state *state,
if (srvstate == VCHIQ_SRVSTATE_OPENING) {
for (i = 0; i < state->unused_service; i++) {
- struct vchiq_service *srv = state->services[i];
-
- if (!srv) {
+ if (!rcu_access_pointer(state->services[i])) {
pservice = &state->services[i];
break;
}
}
} else {
+ rcu_read_lock();
for (i = (state->unused_service - 1); i >= 0; i--) {
- struct vchiq_service *srv = state->services[i];
+ struct vchiq_service *srv;
+ srv = rcu_dereference(state->services[i]);
if (!srv)
pservice = &state->services[i];
else if ((srv->public_fourcc == params->fourcc)
@@ -2363,6 +2357,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
break;
}
}
+ rcu_read_unlock();
}
if (pservice) {
@@ -2374,7 +2369,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
(state->id * VCHIQ_MAX_SERVICES) |
service->localport;
handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
- *pservice = service;
+ rcu_assign_pointer(*pservice, service);
if (pservice == &state->services[state->unused_service])
state->unused_service++;
}
@@ -2437,13 +2432,13 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
status = VCHIQ_RETRY;
vchiq_release_service_internal(service);
} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
- (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
+ (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
vchiq_log_error(vchiq_core_log_level,
- "%d: osi - srvstate = %s (ref %d)",
- service->state->id,
- srvstate_names[service->srvstate],
- service->ref_count);
+ "%d: osi - srvstate = %s (ref %u)",
+ service->state->id,
+ srvstate_names[service->srvstate],
+ kref_read(&service->ref_count));
status = VCHIQ_ERROR;
VCHIQ_SERVICE_STATS_INC(service, error_count);
vchiq_release_service_internal(service);
@@ -3449,10 +3444,13 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
char buf[80];
int len;
int err;
+ unsigned int ref_count;
+ /*Don't include the lock just taken*/
+ ref_count = kref_read(&service->ref_count) - 1;
len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
- service->localport, srvstate_names[service->srvstate],
- service->ref_count - 1); /*Don't include the lock just taken*/
+ service->localport, srvstate_names[service->srvstate],
+ ref_count);
if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
char remoteport[30];
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index c31f953a9986..cedd8e721aae 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -7,6 +7,8 @@
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/kthread.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
#include <linux/wait.h>
#include "vchiq_cfg.h"
@@ -251,7 +253,8 @@ struct vchiq_slot_info {
struct vchiq_service {
struct vchiq_service_base base;
unsigned int handle;
- unsigned int ref_count;
+ struct kref ref_count;
+ struct rcu_head rcu;
int srvstate;
vchiq_userdata_term userdata_term;
unsigned int localport;
@@ -464,7 +467,7 @@ struct vchiq_state {
int error_count;
} stats;
- struct vchiq_service *services[VCHIQ_MAX_SERVICES];
+ struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
@@ -545,12 +548,13 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service,
static inline struct vchiq_service *
handle_to_service(unsigned int handle)
{
+ int idx = handle & (VCHIQ_MAX_SERVICES - 1);
struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
(VCHIQ_MAX_STATES - 1)];
+
if (!state)
return NULL;
-
- return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
+ return rcu_dereference(state->services[idx]);
}
extern struct vchiq_service *
@@ -568,7 +572,13 @@ find_closed_service_for_instance(struct vchiq_instance *instance,
unsigned int handle);
extern struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
+__next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx);
+
+extern struct vchiq_service *
+next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
int *pidx);
extern void
@@ -590,18 +600,6 @@ vchiq_complete_bulk(struct vchiq_bulk *bulk);
extern void
remote_event_signal(struct remote_event *event);
-void
-vchiq_platform_check_suspend(struct vchiq_state *state);
-
-extern void
-vchiq_platform_paused(struct vchiq_state *state);
-
-extern enum vchiq_status
-vchiq_platform_resume(struct vchiq_state *state);
-
-extern void
-vchiq_platform_resumed(struct vchiq_state *state);
-
extern int
vchiq_dump(void *dump_context, const char *str, int len);
@@ -648,9 +646,6 @@ vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate newstate);
extern void
-vchiq_platform_handle_timeout(struct vchiq_state *state);
-
-extern void
vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
extern void
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 07c6a3db5ab6..39b77ea19210 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -13,7 +13,6 @@
#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
(((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
-#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
enum vchiq_reason {
VCHIQ_SERVICE_OPENED, /* service, -, - */
@@ -128,7 +127,6 @@ extern enum vchiq_status vchiq_bulk_receive_handle(unsigned int service,
enum vchiq_bulk_mode mode);
extern int vchiq_get_client_id(unsigned int service);
extern void *vchiq_get_service_userdata(unsigned int service);
-extern int vchiq_get_service_fourcc(unsigned int service);
extern void vchiq_get_config(struct vchiq_config *config);
extern enum vchiq_status vchiq_set_service_option(unsigned int service,
enum vchiq_service_option option, int value);
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 25867cb9d13d..337266add6b2 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -63,6 +63,6 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type);
bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
u64 qwBSSTimestamp);
bool CARDbSetBeaconPeriod(struct vnt_private *priv,
- unsigned short wBeaconInterval);
+ unsigned short wBeaconInterval);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index f69fc687d4c3..5c86cc60eb5c 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -133,7 +133,8 @@ static int device_init_td1_ring(struct vnt_private *priv);
static int device_rx_srv(struct vnt_private *priv, unsigned int idx);
static int device_tx_srv(struct vnt_private *priv, unsigned int idx);
static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
-static void device_free_rx_buf(struct vnt_private *priv, struct vnt_rx_desc *rd);
+static void device_free_rx_buf(struct vnt_private *priv,
+ struct vnt_rx_desc *rd);
static void device_init_registers(struct vnt_private *priv);
static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
static void device_free_td0_ring(struct vnt_private *priv);
@@ -442,7 +443,10 @@ static bool device_init_rings(struct vnt_private *priv)
/*allocate all RD/TD rings a single pool*/
vir_pool = dma_alloc_coherent(&priv->pcid->dev,
- priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
+ priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
+ priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
&priv->pool_dma, GFP_ATOMIC);
if (!vir_pool) {
dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index bfd598a93b04..6b0407694e54 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -58,14 +58,11 @@ void PSvEnablePowerSaving(struct vnt_private *priv,
if (priv->op_mode != NL80211_IFTYPE_ADHOC) {
/* set AID */
VNSvOutPortW(priv->PortOffset + MAC_REG_AIDATIM, wAID);
- } else {
- /* set ATIM Window */
-#if 0 /* TODO atim window */
- MACvWriteATIMW(priv->PortOffset, pMgmt->wCurrATIMWindow);
-#endif
}
+
/* Set AutoSleep */
MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+
/* Set HWUTSF */
MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
@@ -101,10 +98,13 @@ void PSvDisablePowerSaving(struct vnt_private *priv)
{
/* disable power saving hw function */
MACbPSWakeup(priv);
+
/* clear AutoSleep */
MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+
/* clear HWUTSF */
MACvRegBitsOff(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+
/* set always listen beacon */
MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
diff --git a/drivers/staging/vt6656/Makefile b/drivers/staging/vt6656/Makefile
index b64c0d87f612..375f54e9f58b 100644
--- a/drivers/staging/vt6656/Makefile
+++ b/drivers/staging/vt6656/Makefile
@@ -9,13 +9,11 @@ vt6656_stage-y += main_usb.o \
baseband.o \
wcmd.o\
rxtx.o \
- dpc.o \
power.o \
key.o \
rf.o \
usbpipe.o \
channel.o \
- firmware.o \
- int.o
+ firmware.o
obj-$(CONFIG_VT6656) += vt6656_stage.o
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index f18e059ce66b..a19a563d8bcc 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -22,6 +22,8 @@
*
*/
+#include <linux/bits.h>
+#include <linux/kernel.h>
#include "mac.h"
#include "baseband.h"
#include "rf.h"
@@ -132,7 +134,6 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
{
unsigned int frame_time;
unsigned int preamble;
- unsigned int tmp;
unsigned int rate = 0;
if (tx_rate > RATE_54M)
@@ -146,20 +147,11 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
else
preamble = 192;
- frame_time = (frame_length * 80) / rate;
- tmp = (frame_time * rate) / 80;
-
- if (frame_length != tmp)
- frame_time++;
-
+ frame_time = DIV_ROUND_UP(frame_length * 80, rate);
return preamble + frame_time;
}
- frame_time = (frame_length * 8 + 22) / rate;
- tmp = ((frame_time * rate) - 22) / 8;
-
- if (frame_length != tmp)
- frame_time++;
+ frame_time = DIV_ROUND_UP(frame_length * 8 + 22, rate);
frame_time = frame_time * 4;
if (pkt_type != PK_TYPE_11A)
@@ -213,11 +205,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
break;
case RATE_5M:
- count = (bit_count * 10) / 55;
- tmp = (count * 55) / 10;
-
- if (tmp != bit_count)
- count++;
+ count = DIV_ROUND_UP(bit_count * 10, 55);
if (preamble_type == 1)
phy->signal = 0x0a;
@@ -367,9 +355,6 @@ int vnt_vt3184_init(struct vnt_private *priv)
int ret = 0;
u16 length;
u8 *addr;
- u8 *agc;
- u16 length_agc;
- u8 array[256];
u8 data;
ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0, MESSAGE_REQUEST_EEPROM,
@@ -386,8 +371,6 @@ int vnt_vt3184_init(struct vnt_private *priv)
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
- agc = vnt_vt3184_agc;
- length_agc = sizeof(vnt_vt3184_agc);
priv->bb_vga[0] = 0x1C;
priv->bb_vga[1] = 0x10;
@@ -398,8 +381,6 @@ int vnt_vt3184_init(struct vnt_private *priv)
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
- agc = vnt_vt3184_agc;
- length_agc = sizeof(vnt_vt3184_agc);
addr[0xd7] = 0x06;
@@ -413,8 +394,6 @@ int vnt_vt3184_init(struct vnt_private *priv)
priv->bb_rx_conf = vnt_vt3184_vt3226d0[10];
length = sizeof(vnt_vt3184_vt3226d0);
addr = vnt_vt3184_vt3226d0;
- agc = vnt_vt3184_agc;
- length_agc = sizeof(vnt_vt3184_agc);
priv->bb_vga[0] = 0x20;
priv->bb_vga[1] = 0x10;
@@ -430,8 +409,6 @@ int vnt_vt3184_init(struct vnt_private *priv)
priv->bb_rx_conf = vnt_vt3184_vt3226d0[10];
length = sizeof(vnt_vt3184_vt3226d0);
addr = vnt_vt3184_vt3226d0;
- agc = vnt_vt3184_agc;
- length_agc = sizeof(vnt_vt3184_agc);
priv->bb_vga[0] = 0x20;
priv->bb_vga[1] = 0x10;
@@ -447,17 +424,14 @@ int vnt_vt3184_init(struct vnt_private *priv)
goto end;
}
- memcpy(array, addr, length);
-
ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
- MESSAGE_REQUEST_BBREG, length, array);
+ MESSAGE_REQUEST_BBREG, length, addr);
if (ret)
goto end;
- memcpy(array, agc, length_agc);
-
ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBAGC, length_agc, array);
+ MESSAGE_REQUEST_BBAGC,
+ sizeof(vnt_vt3184_agc), vnt_vt3184_agc);
if (ret)
goto end;
@@ -468,7 +442,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
if (ret)
goto end;
- ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
+ ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, BIT(0));
if (ret)
goto end;
} else if (priv->rf_type == RF_VT3226D0) {
@@ -477,7 +451,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
if (ret)
goto end;
- ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
+ ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, BIT(0));
if (ret)
goto end;
}
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 7958fc165462..dc3ab10eb630 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -26,6 +26,7 @@
*
*/
+#include <linux/bits.h>
#include "device.h"
#include "card.h"
#include "baseband.h"
@@ -63,7 +64,8 @@ void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
vnt_mac_reg_bits_on(priv, MAC_REG_MACCR, MACCR_CLRNAV);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
- vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL, 0xb0);
+ vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL,
+ (BIT(7) | BIT(5) | BIT(4)));
vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL,
connection_channel, 0, 0, NULL);
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index 3a83a9ea9a2a..703597a911f4 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -18,6 +18,7 @@
#ifndef __DESC_H__
#define __DESC_H__
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -36,32 +37,32 @@
/*
* bits in the RSR register
*/
-#define RSR_ADDRBROAD 0x80
-#define RSR_ADDRMULTI 0x40
+#define RSR_ADDRBROAD BIT(7)
+#define RSR_ADDRMULTI BIT(6)
#define RSR_ADDRUNI 0x00
-#define RSR_IVLDTYP 0x20 /* invalid packet type */
-#define RSR_IVLDLEN 0x10 /* invalid len (> 2312 byte) */
-#define RSR_BSSIDOK 0x08
-#define RSR_CRCOK 0x04
-#define RSR_BCNSSIDOK 0x02
-#define RSR_ADDROK 0x01
+#define RSR_IVLDTYP BIT(5) /* invalid packet type */
+#define RSR_IVLDLEN BIT(4) /* invalid len (> 2312 byte) */
+#define RSR_BSSIDOK BIT(3)
+#define RSR_CRCOK BIT(2)
+#define RSR_BCNSSIDOK BIT(1)
+#define RSR_ADDROK BIT(0)
/*
* bits in the new RSR register
*/
-#define NEWRSR_DECRYPTOK 0x10
-#define NEWRSR_CFPIND 0x08
-#define NEWRSR_HWUTSF 0x04
-#define NEWRSR_BCNHITAID 0x02
-#define NEWRSR_BCNHITAID0 0x01
+#define NEWRSR_DECRYPTOK BIT(4)
+#define NEWRSR_CFPIND BIT(3)
+#define NEWRSR_HWUTSF BIT(2)
+#define NEWRSR_BCNHITAID BIT(1)
+#define NEWRSR_BCNHITAID0 BIT(0)
/*
* bits in the TSR register
*/
-#define TSR_RETRYTMO 0x08
-#define TSR_TMO 0x04
-#define TSR_ACKDATA 0x02
-#define TSR_VALID 0x01
+#define TSR_RETRYTMO BIT(3)
+#define TSR_TMO BIT(2)
+#define TSR_ACKDATA BIT(1)
+#define TSR_VALID BIT(0)
#define FIFOCTL_AUTO_FB_1 0x1000
#define FIFOCTL_AUTO_FB_0 0x0800
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index fe6c11266123..e6ee9411f080 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -16,6 +16,7 @@
#ifndef __DEVICE_H__
#define __DEVICE_H__
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -129,12 +130,12 @@
#define EEP_OFS_OFDMA_PWR_TBL 0x50
/* Bits in EEP_OFS_ANTENNA */
-#define EEP_ANTENNA_MAIN 0x1
-#define EEP_ANTENNA_AUX 0x2
-#define EEP_ANTINV 0x4
+#define EEP_ANTENNA_MAIN BIT(0)
+#define EEP_ANTENNA_AUX BIT(1)
+#define EEP_ANTINV BIT(2)
/* Bits in EEP_OFS_RADIOCTL */
-#define EEP_RADIOCTL_ENABLE 0x80
+#define EEP_RADIOCTL_ENABLE BIT(7)
/* control commands */
#define MESSAGE_TYPE_READ 0x1
@@ -227,7 +228,6 @@ struct vnt_rcb {
void *priv;
struct urb *urb;
struct sk_buff *skb;
- int in_use;
};
/* used to track bulk out irps */
@@ -244,7 +244,6 @@ struct vnt_usb_send_context {
u8 pkt_no;
u8 pkt_type;
u8 need_ack;
- u8 fb_option;
bool in_use;
unsigned char data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
};
@@ -254,16 +253,6 @@ struct vnt_usb_send_context {
*/
struct vnt_interrupt_buffer {
u8 *data_buf;
- bool in_use;
-};
-
-/*++ NDIS related */
-
-enum {
- STATUS_SUCCESS = 0,
- STATUS_FAILURE,
- STATUS_RESOURCES,
- STATUS_PENDING,
};
/* flags for options */
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
deleted file mode 100644
index a0b60e7d1086..000000000000
--- a/drivers/staging/vt6656/dpc.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: dpc.c
- *
- * Purpose: handle dpc rx functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 20, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "dpc.h"
-#include "device.h"
-#include "mac.h"
-#include "baseband.h"
-#include "rf.h"
-
-int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
- unsigned long bytes_received)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_supported_band *sband;
- struct sk_buff *skb;
- struct ieee80211_rx_status *rx_status;
- struct vnt_rx_header *head;
- struct vnt_rx_tail *tail;
- u32 frame_size;
- int ii;
- u16 rx_bitrate, pay_load_with_padding;
- u8 rate_idx = 0;
- long rx_dbm;
-
- skb = ptr_rcb->skb;
- rx_status = IEEE80211_SKB_RXCB(skb);
-
- /* [31:16]RcvByteCount ( not include 4-byte Status ) */
- head = (struct vnt_rx_header *)skb->data;
- frame_size = head->wbk_status >> 16;
- frame_size += 4;
-
- if (bytes_received != frame_size) {
- dev_dbg(&priv->usb->dev, "------- WRONG Length 1\n");
- return false;
- }
-
- if ((bytes_received > 2372) || (bytes_received <= 40)) {
- /* Frame Size error drop this packet.*/
- dev_dbg(&priv->usb->dev, "------ WRONG Length 2\n");
- return false;
- }
-
- /* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */
- /* -8TSF - 4RSR - 4SQ3 - ?Padding */
-
- /* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */
-
- /*Fix hardware bug => PLCP_Length error */
- if (((bytes_received - head->pay_load_len) > 27) ||
- ((bytes_received - head->pay_load_len) < 24) ||
- (bytes_received < head->pay_load_len)) {
- dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n",
- head->pay_load_len);
- return false;
- }
-
- sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
- rx_bitrate = head->rx_rate * 5; /* rx_rate * 5 */
-
- for (ii = 0; ii < sband->n_bitrates; ii++) {
- if (sband->bitrates[ii].bitrate == rx_bitrate) {
- rate_idx = ii;
- break;
- }
- }
-
- if (ii == sband->n_bitrates) {
- dev_dbg(&priv->usb->dev, "Wrong Rx Bit Rate %d\n", rx_bitrate);
- return false;
- }
-
- pay_load_with_padding = ((head->pay_load_len / 4) +
- ((head->pay_load_len % 4) ? 1 : 0)) * 4;
-
- tail = (struct vnt_rx_tail *)(skb->data +
- sizeof(*head) + pay_load_with_padding);
- priv->tsf_time = le64_to_cpu(tail->tsf_time);
-
- if (tail->rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
- return false;
-
- vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm);
-
- priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
- priv->current_rssi = priv->bb_pre_ed_rssi;
-
- skb_pull(skb, sizeof(*head));
- skb_trim(skb, head->pay_load_len);
-
- rx_status->mactime = priv->tsf_time;
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->signal = rx_dbm;
- rx_status->flag = 0;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
-
- if (!(tail->rsr & RSR_CRCOK))
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
- rx_status->rate_idx = rate_idx;
-
- if (tail->new_rsr & NEWRSR_DECRYPTOK)
- rx_status->flag |= RX_FLAG_DECRYPTED;
-
- ieee80211_rx_irqsafe(priv->hw, skb);
-
- return true;
-}
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
deleted file mode 100644
index e080add823cb..000000000000
--- a/drivers/staging/vt6656/dpc.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: dpc.h
- *
- * Purpose:
- *
- * Author: Jerry Chen
- *
- * Date: Jun. 27, 2002
- *
- */
-
-#ifndef __DPC_H__
-#define __DPC_H__
-
-#include "device.h"
-
-int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
- unsigned long bytes_received);
-
-#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
deleted file mode 100644
index af215860be4c..000000000000
--- a/drivers/staging/vt6656/int.c
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: int.c
- *
- * Purpose: Handle USB interrupt endpoint
- *
- * Author: Jerry Chen
- *
- * Date: Apr. 2, 2004
- *
- * Functions:
- *
- * Revision History:
- * 04-02-2004 Jerry Chen: Initial release
- *
- */
-
-#include "int.h"
-#include "mac.h"
-#include "power.h"
-#include "usbpipe.h"
-
-static const u8 fallback_rate0[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
- {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
- {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
-};
-
-static const u8 fallback_rate1[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
-};
-
-int vnt_int_start_interrupt(struct vnt_private *priv)
-{
- int ret = 0;
- unsigned long flags;
-
- dev_dbg(&priv->usb->dev, "---->Interrupt Polling Thread\n");
-
- spin_lock_irqsave(&priv->lock, flags);
-
- ret = vnt_start_interrupt_urb(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return ret;
-}
-
-static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
-{
- struct vnt_usb_send_context *context;
- struct ieee80211_tx_info *info;
- struct ieee80211_rate *rate;
- u8 tx_retry = (tsr & 0xf0) >> 4;
- s8 idx;
-
- if (pkt_no >= priv->num_tx_context)
- return -EINVAL;
-
- context = priv->tx_context[pkt_no];
-
- if (!context->skb)
- return -EINVAL;
-
- info = IEEE80211_SKB_CB(context->skb);
- idx = info->control.rates[0].idx;
-
- if (context->fb_option && !(tsr & (TSR_TMO | TSR_RETRYTMO))) {
- u8 tx_rate;
- u8 retry = tx_retry;
-
- rate = ieee80211_get_tx_rate(priv->hw, info);
- tx_rate = rate->hw_value - RATE_18M;
-
- if (retry > 4)
- retry = 4;
-
- if (context->fb_option == AUTO_FB_0)
- tx_rate = fallback_rate0[tx_rate][retry];
- else if (context->fb_option == AUTO_FB_1)
- tx_rate = fallback_rate1[tx_rate][retry];
-
- if (info->band == NL80211_BAND_5GHZ)
- idx = tx_rate - RATE_6M;
- else
- idx = tx_rate;
- }
-
- ieee80211_tx_info_clear_status(info);
-
- info->status.rates[0].count = tx_retry;
-
- if (!(tsr & TSR_TMO)) {
- info->status.rates[0].idx = idx;
-
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_ACK;
- }
-
- ieee80211_tx_status_irqsafe(priv->hw, context->skb);
-
- context->in_use = false;
-
- return 0;
-}
-
-void vnt_int_process_data(struct vnt_private *priv)
-{
- struct vnt_interrupt_data *int_data;
- struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
-
- dev_dbg(&priv->usb->dev, "---->s_nsInterruptProcessData\n");
-
- int_data = (struct vnt_interrupt_data *)priv->int_buf.data_buf;
-
- if (int_data->tsr0 & TSR_VALID)
- vnt_int_report_rate(priv, int_data->pkt0, int_data->tsr0);
-
- if (int_data->tsr1 & TSR_VALID)
- vnt_int_report_rate(priv, int_data->pkt1, int_data->tsr1);
-
- if (int_data->tsr2 & TSR_VALID)
- vnt_int_report_rate(priv, int_data->pkt2, int_data->tsr2);
-
- if (int_data->tsr3 & TSR_VALID)
- vnt_int_report_rate(priv, int_data->pkt3, int_data->tsr3);
-
- if (int_data->isr0 != 0) {
- if (int_data->isr0 & ISR_BNTX &&
- priv->op_mode == NL80211_IFTYPE_AP)
- vnt_schedule_command(priv, WLAN_CMD_BECON_SEND);
-
- if (int_data->isr0 & ISR_TBTT &&
- priv->hw->conf.flags & IEEE80211_CONF_PS) {
- if (!priv->wake_up_count)
- priv->wake_up_count =
- priv->hw->conf.listen_interval;
-
- --priv->wake_up_count;
-
- /* Turn on wake up to listen next beacon */
- if (priv->wake_up_count == 1)
- vnt_schedule_command(priv,
- WLAN_CMD_TBTT_WAKEUP);
- }
- priv->current_tsf = le64_to_cpu(int_data->tsf);
-
- low_stats->dot11RTSSuccessCount += int_data->rts_success;
- low_stats->dot11RTSFailureCount += int_data->rts_fail;
- low_stats->dot11ACKFailureCount += int_data->ack_fail;
- low_stats->dot11FCSErrorCount += int_data->fcs_err;
- }
-
- priv->int_buf.in_use = false;
-}
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
deleted file mode 100644
index 8a6d60569ceb..000000000000
--- a/drivers/staging/vt6656/int.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: int.h
- *
- * Purpose:
- *
- * Author: Jerry Chen
- *
- * Date: Apr. 2, 2004
- *
- */
-
-#ifndef __INT_H__
-#define __INT_H__
-
-#include "device.h"
-
-struct vnt_interrupt_data {
- u8 tsr0;
- u8 pkt0;
- u16 time0;
- u8 tsr1;
- u8 pkt1;
- u16 time1;
- u8 tsr2;
- u8 pkt2;
- u16 time2;
- u8 tsr3;
- u8 pkt3;
- u16 time3;
- __le64 tsf;
- u8 isr0;
- u8 isr1;
- u8 rts_success;
- u8 rts_fail;
- u8 ack_fail;
- u8 fcs_err;
- u8 sw[2];
-} __packed;
-
-int vnt_int_start_interrupt(struct vnt_private *priv);
-void vnt_int_process_data(struct vnt_private *priv);
-
-#endif /* __INT_H__ */
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index dcd933a6b66e..ac3b188984d0 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -83,9 +83,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
case VNT_KEY_PAIRWISE:
key_mode |= mode;
key_inx = 4;
- /* Don't save entry for pairwise key for station mode */
- if (priv->op_mode == NL80211_IFTYPE_STATION)
- clear_bit(entry, &priv->key_entry_inuse);
break;
default:
return -EINVAL;
@@ -109,7 +106,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
{
- struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct vnt_private *priv = hw->priv;
u8 *mac_addr = NULL;
u8 key_dec_mode = 0;
@@ -144,23 +140,22 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
break;
case WLAN_CIPHER_SUITE_CCMP:
if (priv->local_id <= MAC_REVISION_A1)
- return -EINVAL;
+ return -EOPNOTSUPP;
key_dec_mode = KEY_CTL_CCMP;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
key_dec_mode, true);
- } else {
- vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
+ else
+ vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
key_dec_mode, true);
- vnt_set_keymode(hw, (u8 *)conf->bssid, key,
- VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
- }
-
return 0;
}
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 0a42308b81e9..c532b27de37f 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -20,6 +20,7 @@
#ifndef __MAC_H__
#define __MAC_H__
+#include <linux/bits.h>
#include "device.h"
#define REV_ID_VT3253_A0 0x00
@@ -142,109 +143,109 @@
#define MAC_REG_RSPINF_A_72 0xfc
/* Bits in the I2MCFG EEPROM register */
-#define I2MCFG_BOUNDCTL 0x80
-#define I2MCFG_WAITCTL 0x20
-#define I2MCFG_SCLOECTL 0x10
-#define I2MCFG_WBUSYCTL 0x08
-#define I2MCFG_NORETRY 0x04
-#define I2MCFG_I2MLDSEQ 0x02
-#define I2MCFG_I2CMFAST 0x01
+#define I2MCFG_BOUNDCTL BIT(7)
+#define I2MCFG_WAITCTL BIT(5)
+#define I2MCFG_SCLOECTL BIT(4)
+#define I2MCFG_WBUSYCTL BIT(3)
+#define I2MCFG_NORETRY BIT(2)
+#define I2MCFG_I2MLDSEQ BIT(1)
+#define I2MCFG_I2CMFAST BIT(0)
/* Bits in the I2MCSR EEPROM register */
-#define I2MCSR_EEMW 0x80
-#define I2MCSR_EEMR 0x40
-#define I2MCSR_AUTOLD 0x08
-#define I2MCSR_NACK 0x02
-#define I2MCSR_DONE 0x01
+#define I2MCSR_EEMW BIT(7)
+#define I2MCSR_EEMR BIT(6)
+#define I2MCSR_AUTOLD BIT(3)
+#define I2MCSR_NACK BIT(1)
+#define I2MCSR_DONE BIT(0)
/* Bits in the TMCTL register */
-#define TMCTL_TSUSP 0x04
-#define TMCTL_TMD 0x02
-#define TMCTL_TE 0x01
+#define TMCTL_TSUSP BIT(2)
+#define TMCTL_TMD BIT(1)
+#define TMCTL_TE BIT(0)
/* Bits in the TFTCTL register */
-#define TFTCTL_HWUTSF 0x80
-#define TFTCTL_TBTTSYNC 0x40
-#define TFTCTL_HWUTSFEN 0x20
-#define TFTCTL_TSFCNTRRD 0x10
-#define TFTCTL_TBTTSYNCEN 0x08
-#define TFTCTL_TSFSYNCEN 0x04
-#define TFTCTL_TSFCNTRST 0x02
-#define TFTCTL_TSFCNTREN 0x01
+#define TFTCTL_HWUTSF BIT(7)
+#define TFTCTL_TBTTSYNC BIT(6)
+#define TFTCTL_HWUTSFEN BIT(5)
+#define TFTCTL_TSFCNTRRD BIT(4)
+#define TFTCTL_TBTTSYNCEN BIT(3)
+#define TFTCTL_TSFSYNCEN BIT(2)
+#define TFTCTL_TSFCNTRST BIT(1)
+#define TFTCTL_TSFCNTREN BIT(0)
/* Bits in the EnhanceCFG_0 register */
#define EnCFG_BBType_a 0x00
-#define EnCFG_BBType_b 0x01
-#define EnCFG_BBType_g 0x02
-#define EnCFG_BBType_MASK 0x03
-#define EnCFG_ProtectMd 0x20
+#define EnCFG_BBType_b BIT(0)
+#define EnCFG_BBType_g BIT(1)
+#define EnCFG_BBType_MASK (BIT(0) | BIT(1))
+#define EnCFG_ProtectMd BIT(5)
/* Bits in the EnhanceCFG_1 register */
-#define EnCFG_BcnSusInd 0x01
-#define EnCFG_BcnSusClr 0x02
+#define EnCFG_BcnSusInd BIT(0)
+#define EnCFG_BcnSusClr BIT(1)
/* Bits in the EnhanceCFG_2 register */
-#define EnCFG_NXTBTTCFPSTR 0x01
-#define EnCFG_BarkerPream 0x02
-#define EnCFG_PktBurstMode 0x04
+#define EnCFG_NXTBTTCFPSTR BIT(0)
+#define EnCFG_BarkerPream BIT(1)
+#define EnCFG_PktBurstMode BIT(2)
/* Bits in the CFG register */
-#define CFG_TKIPOPT 0x80
-#define CFG_RXDMAOPT 0x40
-#define CFG_TMOT_SW 0x20
-#define CFG_TMOT_HWLONG 0x10
+#define CFG_TKIPOPT BIT(7)
+#define CFG_RXDMAOPT BIT(6)
+#define CFG_TMOT_SW BIT(5)
+#define CFG_TMOT_HWLONG BIT(4)
#define CFG_TMOT_HW 0x00
-#define CFG_CFPENDOPT 0x08
-#define CFG_BCNSUSEN 0x04
-#define CFG_NOTXTIMEOUT 0x02
-#define CFG_NOBUFOPT 0x01
+#define CFG_CFPENDOPT BIT(3)
+#define CFG_BCNSUSEN BIT(2)
+#define CFG_NOTXTIMEOUT BIT(1)
+#define CFG_NOBUFOPT BIT(0)
/* Bits in the TEST register */
-#define TEST_LBEXT 0x80
-#define TEST_LBINT 0x40
+#define TEST_LBEXT BIT(7)
+#define TEST_LBINT BIT(6)
#define TEST_LBNONE 0x00
-#define TEST_SOFTINT 0x20
-#define TEST_CONTTX 0x10
-#define TEST_TXPE 0x08
-#define TEST_NAVDIS 0x04
-#define TEST_NOCTS 0x02
-#define TEST_NOACK 0x01
+#define TEST_SOFTINT BIT(5)
+#define TEST_CONTTX BIT(4)
+#define TEST_TXPE BIT(3)
+#define TEST_NAVDIS BIT(2)
+#define TEST_NOCTS BIT(1)
+#define TEST_NOACK BIT(0)
/* Bits in the HOSTCR register */
-#define HOSTCR_TXONST 0x80
-#define HOSTCR_RXONST 0x40
-#define HOSTCR_ADHOC 0x20
-#define HOSTCR_AP 0x10
-#define HOSTCR_TXON 0x08
-#define HOSTCR_RXON 0x04
-#define HOSTCR_MACEN 0x02
-#define HOSTCR_SOFTRST 0x01
+#define HOSTCR_TXONST BIT(7)
+#define HOSTCR_RXONST BIT(6)
+#define HOSTCR_ADHOC BIT(5)
+#define HOSTCR_AP BIT(4)
+#define HOSTCR_TXON BIT(3)
+#define HOSTCR_RXON BIT(2)
+#define HOSTCR_MACEN BIT(1)
+#define HOSTCR_SOFTRST BIT(0)
/* Bits in the MACCR register */
-#define MACCR_SYNCFLUSHOK 0x04
-#define MACCR_SYNCFLUSH 0x02
-#define MACCR_CLRNAV 0x01
+#define MACCR_SYNCFLUSHOK BIT(2)
+#define MACCR_SYNCFLUSH BIT(1)
+#define MACCR_CLRNAV BIT(0)
/* Bits in the RCR register */
-#define RCR_SSID 0x80
-#define RCR_RXALLTYPE 0x40
-#define RCR_UNICAST 0x20
-#define RCR_BROADCAST 0x10
-#define RCR_MULTICAST 0x08
-#define RCR_WPAERR 0x04
-#define RCR_ERRCRC 0x02
-#define RCR_BSSID 0x01
+#define RCR_SSID BIT(7)
+#define RCR_RXALLTYPE BIT(6)
+#define RCR_UNICAST BIT(5)
+#define RCR_BROADCAST BIT(4)
+#define RCR_MULTICAST BIT(3)
+#define RCR_WPAERR BIT(2)
+#define RCR_ERRCRC BIT(1)
+#define RCR_BSSID BIT(0)
/* Bits in the TCR register */
-#define TCR_SYNCDCFOPT 0x02
-#define TCR_AUTOBCNTX 0x01
+#define TCR_SYNCDCFOPT BIT(1)
+#define TCR_AUTOBCNTX BIT(0)
/* ISR1 */
-#define ISR_GPIO3 0x40
-#define ISR_RXNOBUF 0x08
-#define ISR_MIBNEARFULL 0x04
-#define ISR_SOFTINT 0x02
-#define ISR_FETALERR 0x01
+#define ISR_GPIO3 BIT(6)
+#define ISR_RXNOBUF BIT(3)
+#define ISR_MIBNEARFULL BIT(2)
+#define ISR_SOFTINT BIT(1)
+#define ISR_FETALERR BIT(0)
#define LEDSTS_STS 0x06
#define LEDSTS_TMLEN 0x78
@@ -254,85 +255,85 @@
#define LEDSTS_INTER 0x06
/* ISR0 */
-#define ISR_WATCHDOG 0x80
-#define ISR_SOFTTIMER 0x40
-#define ISR_GPIO0 0x20
-#define ISR_TBTT 0x10
-#define ISR_RXDMA0 0x08
-#define ISR_BNTX 0x04
-#define ISR_ACTX 0x01
+#define ISR_WATCHDOG BIT(7)
+#define ISR_SOFTTIMER BIT(6)
+#define ISR_GPIO0 BIT(5)
+#define ISR_TBTT BIT(4)
+#define ISR_RXDMA0 BIT(3)
+#define ISR_BNTX BIT(2)
+#define ISR_ACTX BIT(0)
/* Bits in the PSCFG register */
-#define PSCFG_PHILIPMD 0x40
-#define PSCFG_WAKECALEN 0x20
-#define PSCFG_WAKETMREN 0x10
-#define PSCFG_BBPSPROG 0x08
-#define PSCFG_WAKESYN 0x04
-#define PSCFG_SLEEPSYN 0x02
-#define PSCFG_AUTOSLEEP 0x01
+#define PSCFG_PHILIPMD BIT(6)
+#define PSCFG_WAKECALEN BIT(5)
+#define PSCFG_WAKETMREN BIT(4)
+#define PSCFG_BBPSPROG BIT(3)
+#define PSCFG_WAKESYN BIT(2)
+#define PSCFG_SLEEPSYN BIT(1)
+#define PSCFG_AUTOSLEEP BIT(0)
/* Bits in the PSCTL register */
-#define PSCTL_WAKEDONE 0x20
-#define PSCTL_PS 0x10
-#define PSCTL_GO2DOZE 0x08
-#define PSCTL_LNBCN 0x04
-#define PSCTL_ALBCN 0x02
-#define PSCTL_PSEN 0x01
+#define PSCTL_WAKEDONE BIT(5)
+#define PSCTL_PS BIT(4)
+#define PSCTL_GO2DOZE BIT(3)
+#define PSCTL_LNBCN BIT(2)
+#define PSCTL_ALBCN BIT(1)
+#define PSCTL_PSEN BIT(0)
/* Bits in the PSPWSIG register */
-#define PSSIG_WPE3 0x80
-#define PSSIG_WPE2 0x40
-#define PSSIG_WPE1 0x20
-#define PSSIG_WRADIOPE 0x10
-#define PSSIG_SPE3 0x08
-#define PSSIG_SPE2 0x04
-#define PSSIG_SPE1 0x02
-#define PSSIG_SRADIOPE 0x01
+#define PSSIG_WPE3 BIT(7)
+#define PSSIG_WPE2 BIT(6)
+#define PSSIG_WPE1 BIT(5)
+#define PSSIG_WRADIOPE BIT(4)
+#define PSSIG_SPE3 BIT(3)
+#define PSSIG_SPE2 BIT(2)
+#define PSSIG_SPE1 BIT(1)
+#define PSSIG_SRADIOPE BIT(0)
/* Bits in the BBREGCTL register */
-#define BBREGCTL_DONE 0x04
-#define BBREGCTL_REGR 0x02
-#define BBREGCTL_REGW 0x01
+#define BBREGCTL_DONE BIT(2)
+#define BBREGCTL_REGR BIT(1)
+#define BBREGCTL_REGW BIT(0)
/* Bits in the IFREGCTL register */
-#define IFREGCTL_DONE 0x04
-#define IFREGCTL_IFRF 0x02
-#define IFREGCTL_REGW 0x01
+#define IFREGCTL_DONE BIT(2)
+#define IFREGCTL_IFRF BIT(1)
+#define IFREGCTL_REGW BIT(0)
/* Bits in the SOFTPWRCTL register */
-#define SOFTPWRCTL_RFLEOPT 0x08
-#define SOFTPWRCTL_TXPEINV 0x02
-#define SOFTPWRCTL_SWPECTI 0x01
-#define SOFTPWRCTL_SWPAPE 0x20
-#define SOFTPWRCTL_SWCALEN 0x10
-#define SOFTPWRCTL_SWRADIO_PE 0x08
-#define SOFTPWRCTL_SWPE2 0x04
-#define SOFTPWRCTL_SWPE1 0x02
-#define SOFTPWRCTL_SWPE3 0x01
+#define SOFTPWRCTL_RFLEOPT BIT(3)
+#define SOFTPWRCTL_TXPEINV BIT(1)
+#define SOFTPWRCTL_SWPECTI BIT(0)
+#define SOFTPWRCTL_SWPAPE BIT(5)
+#define SOFTPWRCTL_SWCALEN BIT(4)
+#define SOFTPWRCTL_SWRADIO_PE BIT(3)
+#define SOFTPWRCTL_SWPE2 BIT(2)
+#define SOFTPWRCTL_SWPE1 BIT(1)
+#define SOFTPWRCTL_SWPE3 BIT(0)
/* Bits in the GPIOCTL1 register */
-#define GPIO3_MD 0x20
-#define GPIO3_DATA 0x40
-#define GPIO3_INTMD 0x80
+#define GPIO3_MD BIT(5)
+#define GPIO3_DATA BIT(6)
+#define GPIO3_INTMD BIT(7)
/* Bits in the MISCFFCTL register */
-#define MISCFFCTL_WRITE 0x0001
+#define MISCFFCTL_WRITE BIT(0)
/* Loopback mode */
-#define MAC_LB_EXT 0x02
-#define MAC_LB_INTERNAL 0x01
+#define MAC_LB_EXT BIT(1)
+#define MAC_LB_INTERNAL BIT(0)
#define MAC_LB_NONE 0x00
/* Ethernet address filter type */
#define PKT_TYPE_NONE 0x00 /* turn off receiver */
-#define PKT_TYPE_ALL_MULTICAST 0x80
-#define PKT_TYPE_PROMISCUOUS 0x40
-#define PKT_TYPE_DIRECTED 0x20 /* obselete */
-#define PKT_TYPE_BROADCAST 0x10
-#define PKT_TYPE_MULTICAST 0x08
-#define PKT_TYPE_ERROR_WPA 0x04
-#define PKT_TYPE_ERROR_CRC 0x02
-#define PKT_TYPE_BSSID 0x01
+#define PKT_TYPE_ALL_MULTICAST BIT(7)
+#define PKT_TYPE_PROMISCUOUS BIT(6)
+#define PKT_TYPE_DIRECTED BIT(5) /* obselete */
+#define PKT_TYPE_BROADCAST BIT(4)
+#define PKT_TYPE_MULTICAST BIT(3)
+#define PKT_TYPE_ERROR_WPA BIT(2)
+#define PKT_TYPE_ERROR_CRC BIT(1)
+#define PKT_TYPE_BSSID BIT(0)
#define Default_BI 0x200
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 5e48b3ddb94c..5f78cad3b647 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -21,8 +21,10 @@
*/
#undef __NO_VERSION__
+#include <linux/bits.h>
#include <linux/etherdevice.h>
#include <linux/file.h>
+#include <linux/kernel.h>
#include "device.h"
#include "card.h"
#include "baseband.h"
@@ -30,12 +32,10 @@
#include "power.h"
#include "wcmd.h"
#include "rxtx.h"
-#include "dpc.h"
#include "rf.h"
#include "firmware.h"
#include "usbpipe.h"
#include "channel.h"
-#include "int.h"
/*
* define module options
@@ -99,7 +99,6 @@ static void vnt_set_options(struct vnt_private *priv)
priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->bb_type = BBP_TYPE_DEF;
priv->packet_type = priv->bb_type;
- priv->auto_fb_ctrl = AUTO_FB_0;
priv->preamble_type = 0;
priv->exist_sw_net_addr = false;
}
@@ -109,7 +108,7 @@ static void vnt_set_options(struct vnt_private *priv)
*/
static int vnt_init_registers(struct vnt_private *priv)
{
- int ret = 0;
+ int ret;
struct vnt_cmd_card_init *init_cmd = &priv->init_command;
struct vnt_rsp_card_init *init_rsp = &priv->init_response;
u8 antenna;
@@ -145,7 +144,7 @@ static int vnt_init_registers(struct vnt_private *priv)
init_cmd->init_class = DEVICE_INIT_COLD;
init_cmd->exist_sw_net_addr = priv->exist_sw_net_addr;
- for (ii = 0; ii < 6; ii++)
+ for (ii = 0; ii < ARRAY_SIZE(init_cmd->sw_net_addr); ii++)
init_cmd->sw_net_addr[ii] = priv->current_net_addr[ii];
init_cmd->short_retry_limit = priv->short_retry_limit;
init_cmd->long_retry_limit = priv->long_retry_limit;
@@ -184,7 +183,7 @@ static int vnt_init_registers(struct vnt_private *priv)
priv->cck_pwr = priv->eeprom[EEP_OFS_PWR_CCK];
priv->ofdm_pwr_g = priv->eeprom[EEP_OFS_PWR_OFDMG];
/* load power table */
- for (ii = 0; ii < 14; ii++) {
+ for (ii = 0; ii < ARRAY_SIZE(priv->cck_pwr_tbl); ii++) {
priv->cck_pwr_tbl[ii] =
priv->eeprom[ii + EEP_OFS_CCK_PWR_TBL];
if (priv->cck_pwr_tbl[ii] == 0)
@@ -200,7 +199,7 @@ static int vnt_init_registers(struct vnt_private *priv)
* original zonetype is USA, but custom zonetype is Europe,
* then need to recover 12, 13, 14 channels with 11 channel
*/
- for (ii = 11; ii < 14; ii++) {
+ for (ii = 11; ii < ARRAY_SIZE(priv->cck_pwr_tbl); ii++) {
priv->cck_pwr_tbl[ii] = priv->cck_pwr_tbl[10];
priv->ofdm_pwr_tbl[ii] = priv->ofdm_pwr_tbl[10];
}
@@ -261,9 +260,6 @@ static int vnt_init_registers(struct vnt_private *priv)
if (ret)
goto end;
- /* get Auto Fall Back type */
- priv->auto_fb_ctrl = AUTO_FB_0;
-
/* default Auto Mode */
priv->bb_type = BB_TYPE_11G;
@@ -370,7 +366,7 @@ static int vnt_init_registers(struct vnt_private *priv)
if (ret)
goto end;
- ret = vnt_mac_reg_bits_on(priv, MAC_REG_GPIOCTL0, 0x01);
+ ret = vnt_mac_reg_bits_on(priv, MAC_REG_GPIOCTL0, BIT(0));
if (ret)
goto end;
@@ -435,7 +431,7 @@ static void vnt_free_int_bufs(struct vnt_private *priv)
static int vnt_alloc_bufs(struct vnt_private *priv)
{
- int ret = 0;
+ int ret;
struct vnt_usb_send_context *tx_context;
struct vnt_rcb *rcb;
int ii;
@@ -484,9 +480,6 @@ static int vnt_alloc_bufs(struct vnt_private *priv)
ret = -ENOMEM;
goto free_rx_tx;
}
-
- rcb->in_use = false;
-
/* submit rx urb */
ret = vnt_submit_rx_urb(priv, rcb);
if (ret)
@@ -528,7 +521,7 @@ static void vnt_tx_80211(struct ieee80211_hw *hw,
static int vnt_start(struct ieee80211_hw *hw)
{
- int ret = 0;
+ int ret;
struct vnt_private *priv = hw->priv;
priv->rx_buf_sz = MAX_TOTAL_SIZE_WITH_ALL_HEADERS;
@@ -553,7 +546,7 @@ static int vnt_start(struct ieee80211_hw *hw)
priv->int_interval = 1; /* bInterval is set to 1 */
- ret = vnt_int_start_interrupt(priv);
+ ret = vnt_start_interrupt_urb(priv);
if (ret)
goto free_all;
@@ -632,8 +625,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
priv->op_mode = vif->type;
- vnt_set_bss_mode(priv);
-
/* LED blink on TX */
vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER);
@@ -720,7 +711,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->basic_rates = conf->basic_rates;
vnt_update_top_rates(priv);
- vnt_set_bss_mode(priv);
dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates);
}
@@ -749,11 +739,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->short_slot_time = false;
vnt_set_short_slot_time(priv);
- vnt_update_ifs(priv);
vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
vnt_update_pre_ed_threshold(priv, false);
}
+ if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
+ BSS_CHANGED_ERP_SLOT))
+ vnt_set_bss_mode(priv);
+
if (changed & BSS_CHANGED_TXPOWER)
vnt_rf_setpower(priv, priv->current_rate,
conf->chandef.chan->hw_value);
@@ -777,12 +770,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
TFTCTL_TSFCNTREN);
- vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
- conf->sync_tsf, priv->current_tsf);
-
vnt_mac_set_beacon_interval(priv, conf->beacon_int);
vnt_reset_next_tbtt(priv, conf->beacon_int);
+
+ vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
+ conf->sync_tsf, priv->current_tsf);
+
+ vnt_update_next_tbtt(priv,
+ conf->sync_tsf, conf->beacon_int);
} else {
vnt_clear_current_tsf(priv);
@@ -798,12 +794,11 @@ static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
struct vnt_private *priv = hw->priv;
struct netdev_hw_addr *ha;
u64 mc_filter = 0;
- u32 bit_nr = 0;
+ u32 bit_nr;
netdev_hw_addr_list_for_each(ha, mc_list) {
bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
-
- mc_filter |= 1ULL << (bit_nr & 0x3f);
+ mc_filter |= BIT_ULL(bit_nr);
}
priv->mc_list_count = mc_list->count;
@@ -817,15 +812,11 @@ static void vnt_configure(struct ieee80211_hw *hw,
{
struct vnt_private *priv = hw->priv;
u8 rx_mode = 0;
- int rc;
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
- rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
- MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
-
- if (!rc)
- rx_mode = RCR_MULTICAST | RCR_BROADCAST;
+ vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
+ MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
@@ -862,12 +853,14 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- if (vnt_set_keys(hw, sta, vif, key))
- return -EOPNOTSUPP;
- break;
+ return vnt_set_keys(hw, sta, vif, key);
case DISABLE_KEY:
- if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
+ if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) {
clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
+
+ vnt_mac_disable_keyentry(priv, key->hw_key_idx);
+ }
+
default:
break;
}
@@ -973,7 +966,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct vnt_private *priv;
struct ieee80211_hw *hw;
struct wiphy *wiphy;
- int rc = 0;
+ int rc;
udev = usb_get_dev(interface_to_usbdev(intf));
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 29caba728906..9439d190f431 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -13,7 +13,6 @@
*
* Functions:
* vnt_generate_tx_parameter - Generate tx dma required parameter.
- * vnt_get_duration_le - get tx data required duration
* vnt_get_rtscts_duration_le- get rtx/cts required duration
* vnt_get_rtscts_rsvtime_le- get rts/cts reserved time
* vnt_get_rsvtime- get frame reserved time
@@ -39,30 +38,12 @@ static const u16 vnt_time_stampoff[2][MAX_RATE] = {
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23},
};
-static const u16 vnt_fb_opt0[2][5] = {
- {RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, /* fallback_rate0 */
- {RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, /* fallback_rate1 */
-};
-
-static const u16 vnt_fb_opt1[2][5] = {
- {RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, /* fallback_rate0 */
- {RATE_6M, RATE_6M, RATE_12M, RATE_12M, RATE_18M}, /* fallback_rate1 */
-};
-
#define RTSDUR_BB 0
#define RTSDUR_BA 1
#define RTSDUR_AA 2
#define CTSDUR_BA 3
-#define RTSDUR_BA_F0 4
-#define RTSDUR_AA_F0 5
-#define RTSDUR_BA_F1 6
-#define RTSDUR_AA_F1 7
-#define CTSDUR_BA_F0 8
-#define CTSDUR_BA_F1 9
#define DATADUR_B 10
#define DATADUR_A 11
-#define DATADUR_A_F0 12
-#define DATADUR_A_F1 13
static struct vnt_usb_send_context
*vnt_get_free_context(struct vnt_private *priv)
@@ -184,27 +165,6 @@ static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv, u8 rsv_type,
return cpu_to_le16((u16)rrv_time);
}
-static __le16 vnt_get_duration_le(struct vnt_private *priv, u8 pkt_type,
- int need_ack)
-{
- u32 ack_time = 0;
-
- if (need_ack) {
- if (pkt_type == PK_TYPE_11B)
- ack_time = vnt_get_frame_time(priv->preamble_type,
- pkt_type, 14,
- priv->top_cck_basic_rate);
- else
- ack_time = vnt_get_frame_time(priv->preamble_type,
- pkt_type, 14,
- priv->top_ofdm_basic_rate);
-
- return cpu_to_le16((u16)(priv->sifs + ack_time));
- }
-
- return 0;
-}
-
static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
u8 dur_type, u8 pkt_type, u16 rate)
{
@@ -216,8 +176,6 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
switch (dur_type) {
case RTSDUR_BB:
case RTSDUR_BA:
- case RTSDUR_BA_F0:
- case RTSDUR_BA_F1:
cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
14, priv->top_cck_basic_rate);
dur_time = cts_time + 2 * priv->sifs +
@@ -226,8 +184,6 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
break;
case RTSDUR_AA:
- case RTSDUR_AA_F0:
- case RTSDUR_AA_F1:
cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
14, priv->top_ofdm_basic_rate);
dur_time = cts_time + 2 * priv->sifs +
@@ -236,8 +192,6 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
break;
case CTSDUR_BA:
- case CTSDUR_BA_F0:
- case CTSDUR_BA_F1:
dur_time = priv->sifs + vnt_get_rsvtime(priv,
pkt_type, frame_length, rate, need_ack);
break;
@@ -270,7 +224,6 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
(struct ieee80211_hdr *)tx_context->skb->data;
u32 frame_len = tx_context->frame_len;
u16 rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
/* Get SignalField,ServiceField,Length */
vnt_get_phy_field(priv, frame_len, rate, tx_context->pkt_type, &buf->a);
@@ -278,16 +231,8 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- if (ieee80211_is_nullfunc(hdr->frame_control)) {
- buf->duration_a = hdr->duration_id;
- buf->duration_b = hdr->duration_id;
- } else {
- buf->duration_a = vnt_get_duration_le(priv,
- tx_context->pkt_type, need_ack);
- buf->duration_b = vnt_get_duration_le(priv,
- PK_TYPE_11B, need_ack);
- }
-
+ buf->duration_a = hdr->duration_id;
+ buf->duration_b = hdr->duration_id;
buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
buf->time_stamp_off_b = vnt_time_stamp_off(priv,
priv->top_cck_basic_rate);
@@ -297,63 +242,6 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
return le16_to_cpu(buf->duration_a);
}
-static u16 vnt_rxtx_datahead_g_fb(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_datahead_g_fb *buf)
-{
- struct vnt_private *priv = tx_context->priv;
- u32 frame_len = tx_context->frame_len;
- u16 rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
-
- /* Get SignalField,ServiceField,Length */
- vnt_get_phy_field(priv, frame_len, rate, tx_context->pkt_type, &buf->a);
-
- vnt_get_phy_field(priv, frame_len, priv->top_cck_basic_rate,
- PK_TYPE_11B, &buf->b);
-
- /* Get Duration and TimeStamp */
- buf->duration_a = vnt_get_duration_le(priv, tx_context->pkt_type,
- need_ack);
- buf->duration_b = vnt_get_duration_le(priv, PK_TYPE_11B, need_ack);
-
- buf->duration_a_f0 = vnt_get_duration_le(priv, tx_context->pkt_type,
- need_ack);
- buf->duration_a_f1 = vnt_get_duration_le(priv, tx_context->pkt_type,
- need_ack);
-
- buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
- buf->time_stamp_off_b = vnt_time_stamp_off(priv,
- priv->top_cck_basic_rate);
-
- tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
-
- return le16_to_cpu(buf->duration_a);
-}
-
-static u16 vnt_rxtx_datahead_a_fb(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_datahead_a_fb *buf)
-{
- struct vnt_private *priv = tx_context->priv;
- u16 rate = tx_context->tx_rate;
- u8 pkt_type = tx_context->pkt_type;
- u8 need_ack = tx_context->need_ack;
- u32 frame_len = tx_context->frame_len;
-
- /* Get SignalField,ServiceField,Length */
- vnt_get_phy_field(priv, frame_len, rate, pkt_type, &buf->a);
- /* Get Duration and TimeStampOff */
- buf->duration = vnt_get_duration_le(priv, pkt_type, need_ack);
-
- buf->duration_f0 = vnt_get_duration_le(priv, pkt_type, need_ack);
- buf->duration_f1 = vnt_get_duration_le(priv, pkt_type, need_ack);
-
- buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
-
- tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
-
- return le16_to_cpu(buf->duration);
-}
-
static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
struct vnt_tx_datahead_ab *buf)
{
@@ -362,20 +250,13 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
(struct ieee80211_hdr *)tx_context->skb->data;
u32 frame_len = tx_context->frame_len;
u16 rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
/* Get SignalField,ServiceField,Length */
vnt_get_phy_field(priv, frame_len, rate,
tx_context->pkt_type, &buf->ab);
/* Get Duration and TimeStampOff */
- if (ieee80211_is_nullfunc(hdr->frame_control)) {
- buf->duration = hdr->duration_id;
- } else {
- buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type,
- need_ack);
- }
-
+ buf->duration = hdr->duration_id;
buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
@@ -426,50 +307,6 @@ static u16 vnt_rxtx_rts_g_head(struct vnt_usb_send_context *tx_context,
return vnt_rxtx_datahead_g(tx_context, &buf->data_head);
}
-static u16 vnt_rxtx_rts_g_fb_head(struct vnt_usb_send_context *tx_context,
- struct vnt_rts_g_fb *buf)
-{
- struct vnt_private *priv = tx_context->priv;
- u16 current_rate = tx_context->tx_rate;
- u16 rts_frame_len = 20;
-
- vnt_get_phy_field(priv, rts_frame_len, priv->top_cck_basic_rate,
- PK_TYPE_11B, &buf->b);
- vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
- tx_context->pkt_type, &buf->a);
-
- buf->duration_bb = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BB,
- PK_TYPE_11B,
- priv->top_cck_basic_rate);
- buf->duration_aa = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA,
- tx_context->pkt_type,
- current_rate);
- buf->duration_ba = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
-
- buf->rts_duration_ba_f0 =
- vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA_F0,
- tx_context->pkt_type,
- priv->tx_rate_fb0);
- buf->rts_duration_aa_f0 =
- vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F0,
- tx_context->pkt_type,
- priv->tx_rate_fb0);
- buf->rts_duration_ba_f1 =
- vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA_F1,
- tx_context->pkt_type,
- priv->tx_rate_fb1);
- buf->rts_duration_aa_f1 =
- vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F1,
- tx_context->pkt_type,
- priv->tx_rate_fb1);
-
- vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration_aa);
-
- return vnt_rxtx_datahead_g_fb(tx_context, &buf->data_head);
-}
-
static u16 vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context,
struct vnt_rts_ab *buf)
{
@@ -489,71 +326,6 @@ static u16 vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context,
return vnt_rxtx_datahead_ab(tx_context, &buf->data_head);
}
-static u16 vnt_rxtx_rts_a_fb_head(struct vnt_usb_send_context *tx_context,
- struct vnt_rts_a_fb *buf)
-{
- struct vnt_private *priv = tx_context->priv;
- u16 current_rate = tx_context->tx_rate;
- u16 rts_frame_len = 20;
-
- vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
- tx_context->pkt_type, &buf->a);
-
- buf->duration = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA,
- tx_context->pkt_type,
- current_rate);
-
- buf->rts_duration_f0 =
- vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F0,
- tx_context->pkt_type,
- priv->tx_rate_fb0);
-
- buf->rts_duration_f1 =
- vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA_F1,
- tx_context->pkt_type,
- priv->tx_rate_fb1);
-
- vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration);
-
- return vnt_rxtx_datahead_a_fb(tx_context, &buf->data_head);
-}
-
-static u16 vnt_fill_cts_fb_head(struct vnt_usb_send_context *tx_context,
- union vnt_tx_data_head *head)
-{
- struct vnt_private *priv = tx_context->priv;
- struct vnt_cts_fb *buf = &head->cts_g_fb;
- u32 cts_frame_len = 14;
- u16 current_rate = tx_context->tx_rate;
-
- /* Get SignalField,ServiceField,Length */
- vnt_get_phy_field(priv, cts_frame_len, priv->top_cck_basic_rate,
- PK_TYPE_11B, &buf->b);
-
- buf->duration_ba =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
- /* Get CTSDuration_ba_f0 */
- buf->cts_duration_ba_f0 =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F0,
- tx_context->pkt_type,
- priv->tx_rate_fb0);
- /* Get CTSDuration_ba_f1 */
- buf->cts_duration_ba_f1 =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA_F1,
- tx_context->pkt_type,
- priv->tx_rate_fb1);
- /* Get CTS Frame body */
- buf->data.duration = buf->duration_ba;
- buf->data.frame_control =
- cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
-
- ether_addr_copy(buf->data.ra, priv->current_net_addr);
-
- return vnt_rxtx_datahead_g_fb(tx_context, &buf->data_head);
-}
-
static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
union vnt_tx_data_head *head)
{
@@ -606,9 +378,6 @@ static u16 vnt_rxtx_rts(struct vnt_usb_send_context *tx_context,
if (need_mic)
head = &tx_head->tx_rts.tx.mic.head;
- if (tx_context->fb_option)
- return vnt_rxtx_rts_g_fb_head(tx_context, &head->rts_g_fb);
-
return vnt_rxtx_rts_g_head(tx_context, &head->rts_g);
}
@@ -633,10 +402,6 @@ static u16 vnt_rxtx_cts(struct vnt_usb_send_context *tx_context,
if (need_mic)
head = &tx_head->tx_cts.tx.mic.head;
- /* Fill CTS */
- if (tx_context->fb_option)
- return vnt_fill_cts_fb_head(tx_context, head);
-
return vnt_fill_cts_head(tx_context, head);
}
@@ -664,18 +429,9 @@ static u16 vnt_rxtx_ab(struct vnt_usb_send_context *tx_context,
buf->rts_rrv_time = vnt_get_rtscts_rsvtime_le(priv, 2,
tx_context->pkt_type, frame_len, current_rate);
- if (tx_context->fb_option &&
- tx_context->pkt_type == PK_TYPE_11A)
- return vnt_rxtx_rts_a_fb_head(tx_context,
- &head->rts_a_fb);
-
return vnt_rxtx_rts_ab_head(tx_context, &head->rts_ab);
}
- if (tx_context->pkt_type == PK_TYPE_11A)
- return vnt_rxtx_datahead_a_fb(tx_context,
- &head->data_head_a_fb);
-
return vnt_rxtx_datahead_ab(tx_context, &head->data_head_ab);
}
@@ -792,7 +548,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
struct vnt_usb_send_context *tx_context;
unsigned long flags;
u16 tx_bytes, tx_header_size, tx_body_size, current_rate, duration_id;
- u8 pkt_type, fb_option = AUTO_FB_NONE;
+ u8 pkt_type;
bool need_rts = false;
bool need_mic = false;
@@ -912,33 +668,6 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
tx_buffer_head->current_rate = cpu_to_le16(current_rate);
- /* legacy rates TODO use ieee80211_tx_rate */
- if (current_rate >= RATE_18M && ieee80211_is_data(hdr->frame_control)) {
- if (priv->auto_fb_ctrl == AUTO_FB_0) {
- tx_buffer_head->fifo_ctl |=
- cpu_to_le16(FIFOCTL_AUTO_FB_0);
-
- priv->tx_rate_fb0 =
- vnt_fb_opt0[FB_RATE0][current_rate - RATE_18M];
- priv->tx_rate_fb1 =
- vnt_fb_opt0[FB_RATE1][current_rate - RATE_18M];
-
- fb_option = AUTO_FB_0;
- } else if (priv->auto_fb_ctrl == AUTO_FB_1) {
- tx_buffer_head->fifo_ctl |=
- cpu_to_le16(FIFOCTL_AUTO_FB_1);
-
- priv->tx_rate_fb0 =
- vnt_fb_opt1[FB_RATE0][current_rate - RATE_18M];
- priv->tx_rate_fb1 =
- vnt_fb_opt1[FB_RATE1][current_rate - RATE_18M];
-
- fb_option = AUTO_FB_1;
- }
- }
-
- tx_context->fb_option = fb_option;
-
duration_id = vnt_generate_tx_parameter(tx_context, tx_buffer, &mic_hdr,
need_mic, need_rts);
@@ -954,8 +683,6 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
memcpy(tx_context->hdr, skb->data, tx_body_size);
- hdr->duration_id = cpu_to_le16(duration_id);
-
if (info->control.hw_key) {
tx_key = info->control.hw_key;
if (tx_key->keylen > 0)
@@ -977,7 +704,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
- if (vnt_tx_context(priv, tx_context) != STATUS_PENDING) {
+ if (vnt_tx_context(priv, tx_context)) {
spin_unlock_irqrestore(&priv->lock, flags);
return -EIO;
}
@@ -1021,9 +748,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
vnt_get_phy_field(priv, frame_size, current_rate,
PK_TYPE_11A, &short_head->ab);
- /* Get Duration and TimeStampOff */
- short_head->duration = vnt_get_duration_le(priv,
- PK_TYPE_11A, false);
+ /* Get TimeStampOff */
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
} else {
@@ -1034,9 +759,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
vnt_get_phy_field(priv, frame_size, current_rate,
PK_TYPE_11B, &short_head->ab);
- /* Get Duration and TimeStampOff */
- short_head->duration = vnt_get_duration_le(priv,
- PK_TYPE_11B, false);
+ /* Get TimeStampOff */
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
}
@@ -1045,6 +768,9 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
mgmt_hdr = &beacon_buffer->mgmt_hdr;
memcpy(mgmt_hdr, skb->data, skb->len);
+ /* Get Duration */
+ short_head->duration = mgmt_hdr->duration;
+
/* time stamp always 0 */
mgmt_hdr->u.beacon.timestamp = 0;
@@ -1071,7 +797,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
- if (vnt_tx_context(priv, context) != STATUS_PENDING)
+ if (vnt_tx_context(priv, context))
ieee80211_free_txskb(priv->hw, context->skb);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index d528607e02bf..0e6226af7d41 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -73,18 +73,6 @@ struct vnt_tx_datahead_g {
struct ieee80211_hdr hdr;
} __packed;
-struct vnt_tx_datahead_g_fb {
- struct vnt_phy_field b;
- struct vnt_phy_field a;
- __le16 duration_b;
- __le16 duration_a;
- __le16 duration_a_f0;
- __le16 duration_a_f1;
- __le16 time_stamp_off_b;
- __le16 time_stamp_off_a;
- struct ieee80211_hdr hdr;
-} __packed;
-
struct vnt_tx_datahead_ab {
struct vnt_phy_field ab;
__le16 duration;
@@ -92,15 +80,6 @@ struct vnt_tx_datahead_ab {
struct ieee80211_hdr hdr;
} __packed;
-struct vnt_tx_datahead_a_fb {
- struct vnt_phy_field a;
- __le16 duration;
- __le16 time_stamp_off;
- __le16 duration_f0;
- __le16 duration_f1;
- struct ieee80211_hdr hdr;
-} __packed;
-
/* RTS buffer header */
struct vnt_rts_g {
struct vnt_phy_field b;
@@ -113,21 +92,6 @@ struct vnt_rts_g {
struct vnt_tx_datahead_g data_head;
} __packed;
-struct vnt_rts_g_fb {
- struct vnt_phy_field b;
- struct vnt_phy_field a;
- __le16 duration_ba;
- __le16 duration_aa;
- __le16 duration_bb;
- u16 wReserved;
- __le16 rts_duration_ba_f0;
- __le16 rts_duration_aa_f0;
- __le16 rts_duration_ba_f1;
- __le16 rts_duration_aa_f1;
- struct ieee80211_rts data;
- struct vnt_tx_datahead_g_fb data_head;
-} __packed;
-
struct vnt_rts_ab {
struct vnt_phy_field ab;
__le16 duration;
@@ -136,16 +100,6 @@ struct vnt_rts_ab {
struct vnt_tx_datahead_ab data_head;
} __packed;
-struct vnt_rts_a_fb {
- struct vnt_phy_field a;
- __le16 duration;
- u16 wReserved;
- __le16 rts_duration_f0;
- __le16 rts_duration_f1;
- struct ieee80211_rts data;
- struct vnt_tx_datahead_a_fb data_head;
-} __packed;
-
/* CTS buffer header */
struct vnt_cts {
struct vnt_phy_field b;
@@ -156,29 +110,14 @@ struct vnt_cts {
struct vnt_tx_datahead_g data_head;
} __packed;
-struct vnt_cts_fb {
- struct vnt_phy_field b;
- __le16 duration_ba;
- u16 wReserved;
- __le16 cts_duration_ba_f0;
- __le16 cts_duration_ba_f1;
- struct ieee80211_cts data;
- u16 reserved2;
- struct vnt_tx_datahead_g_fb data_head;
-} __packed;
-
union vnt_tx_data_head {
/* rts g */
struct vnt_rts_g rts_g;
- struct vnt_rts_g_fb rts_g_fb;
/* rts a/b */
struct vnt_rts_ab rts_ab;
- struct vnt_rts_a_fb rts_a_fb;
/* cts g */
struct vnt_cts cts_g;
- struct vnt_cts_fb cts_g_fb;
/* no rts/cts */
- struct vnt_tx_datahead_a_fb data_head_a_fb;
struct vnt_tx_datahead_ab data_head_ab;
};
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 7bfccc48a366..91b62c3dff7b 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -24,12 +24,12 @@
*
*/
-#include "int.h"
#include "rxtx.h"
-#include "dpc.h"
#include "desc.h"
#include "device.h"
#include "usbpipe.h"
+#include "mac.h"
+#include "rf.h"
#define USB_CTL_WAIT 500 /* ms */
@@ -139,6 +139,91 @@ int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data)
reg_off, reg, sizeof(u8), data);
}
+static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
+{
+ struct vnt_usb_send_context *context;
+ struct ieee80211_tx_info *info;
+ u8 tx_retry = (tsr & 0xf0) >> 4;
+ s8 idx;
+
+ if (pkt_no >= priv->num_tx_context)
+ return -EINVAL;
+
+ context = priv->tx_context[pkt_no];
+
+ if (!context->skb)
+ return -EINVAL;
+
+ info = IEEE80211_SKB_CB(context->skb);
+ idx = info->control.rates[0].idx;
+
+ ieee80211_tx_info_clear_status(info);
+
+ info->status.rates[0].count = tx_retry;
+
+ if (!(tsr & TSR_TMO)) {
+ info->status.rates[0].idx = idx;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ ieee80211_tx_status_irqsafe(priv->hw, context->skb);
+
+ context->in_use = false;
+
+ return 0;
+}
+
+static void vnt_int_process_data(struct vnt_private *priv)
+{
+ struct vnt_interrupt_data *int_data;
+ struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
+
+ dev_dbg(&priv->usb->dev, "---->s_nsInterruptProcessData\n");
+
+ int_data = (struct vnt_interrupt_data *)priv->int_buf.data_buf;
+
+ if (int_data->tsr0 & TSR_VALID)
+ vnt_int_report_rate(priv, int_data->pkt0, int_data->tsr0);
+
+ if (int_data->tsr1 & TSR_VALID)
+ vnt_int_report_rate(priv, int_data->pkt1, int_data->tsr1);
+
+ if (int_data->tsr2 & TSR_VALID)
+ vnt_int_report_rate(priv, int_data->pkt2, int_data->tsr2);
+
+ if (int_data->tsr3 & TSR_VALID)
+ vnt_int_report_rate(priv, int_data->pkt3, int_data->tsr3);
+
+ if (int_data->isr0 != 0) {
+ if (int_data->isr0 & ISR_BNTX &&
+ priv->op_mode == NL80211_IFTYPE_AP)
+ vnt_schedule_command(priv, WLAN_CMD_BECON_SEND);
+
+ if (int_data->isr0 & ISR_TBTT &&
+ priv->hw->conf.flags & IEEE80211_CONF_PS) {
+ if (!priv->wake_up_count)
+ priv->wake_up_count =
+ priv->hw->conf.listen_interval;
+
+ if (priv->wake_up_count)
+ --priv->wake_up_count;
+
+ /* Turn on wake up to listen next beacon */
+ if (priv->wake_up_count == 1)
+ vnt_schedule_command(priv,
+ WLAN_CMD_TBTT_WAKEUP);
+ }
+ priv->current_tsf = le64_to_cpu(int_data->tsf);
+
+ low_stats->dot11RTSSuccessCount += int_data->rts_success;
+ low_stats->dot11RTSFailureCount += int_data->rts_fail;
+ low_stats->dot11ACKFailureCount += int_data->ack_fail;
+ low_stats->dot11FCSErrorCount += int_data->fcs_err;
+ }
+}
+
static void vnt_start_interrupt_urb_complete(struct urb *urb)
{
struct vnt_private *priv = urb->context;
@@ -151,37 +236,26 @@ static void vnt_start_interrupt_urb_complete(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- priv->int_buf.in_use = false;
return;
default:
break;
}
- if (status) {
- priv->int_buf.in_use = false;
-
+ if (status)
dev_dbg(&priv->usb->dev, "%s status = %d\n", __func__, status);
- } else {
+ else
vnt_int_process_data(priv);
- }
status = usb_submit_urb(priv->interrupt_urb, GFP_ATOMIC);
if (status)
dev_dbg(&priv->usb->dev, "Submit int URB failed %d\n", status);
- else
- priv->int_buf.in_use = true;
}
int vnt_start_interrupt_urb(struct vnt_private *priv)
{
int ret = 0;
- if (priv->int_buf.in_use) {
- ret = -EBUSY;
- goto err;
- }
-
- priv->int_buf.in_use = true;
+ dev_dbg(&priv->usb->dev, "---->Interrupt Polling Thread\n");
usb_fill_int_urb(priv->interrupt_urb,
priv->usb,
@@ -193,17 +267,110 @@ int vnt_start_interrupt_urb(struct vnt_private *priv)
priv->int_interval);
ret = usb_submit_urb(priv->interrupt_urb, GFP_ATOMIC);
- if (ret) {
+ if (ret)
dev_dbg(&priv->usb->dev, "Submit int URB failed %d\n", ret);
- goto err_submit;
+
+ return ret;
+}
+
+static int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
+ unsigned long bytes_received)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_supported_band *sband;
+ struct sk_buff *skb;
+ struct ieee80211_rx_status *rx_status;
+ struct vnt_rx_header *head;
+ struct vnt_rx_tail *tail;
+ u32 frame_size;
+ int ii;
+ u16 rx_bitrate, pay_load_with_padding;
+ u8 rate_idx = 0;
+ long rx_dbm;
+
+ skb = ptr_rcb->skb;
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /* [31:16]RcvByteCount ( not include 4-byte Status ) */
+ head = (struct vnt_rx_header *)skb->data;
+ frame_size = head->wbk_status >> 16;
+ frame_size += 4;
+
+ if (bytes_received != frame_size) {
+ dev_dbg(&priv->usb->dev, "------- WRONG Length 1\n");
+ return false;
}
- return 0;
+ if ((bytes_received > 2372) || (bytes_received <= 40)) {
+ /* Frame Size error drop this packet.*/
+ dev_dbg(&priv->usb->dev, "------ WRONG Length 2\n");
+ return false;
+ }
-err_submit:
- priv->int_buf.in_use = false;
-err:
- return ret;
+ /* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */
+ /* -8TSF - 4RSR - 4SQ3 - ?Padding */
+
+ /* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */
+
+ /*Fix hardware bug => PLCP_Length error */
+ if (((bytes_received - head->pay_load_len) > 27) ||
+ ((bytes_received - head->pay_load_len) < 24) ||
+ (bytes_received < head->pay_load_len)) {
+ dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n",
+ head->pay_load_len);
+ return false;
+ }
+
+ sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
+ rx_bitrate = head->rx_rate * 5; /* rx_rate * 5 */
+
+ for (ii = 0; ii < sband->n_bitrates; ii++) {
+ if (sband->bitrates[ii].bitrate == rx_bitrate) {
+ rate_idx = ii;
+ break;
+ }
+ }
+
+ if (ii == sband->n_bitrates) {
+ dev_dbg(&priv->usb->dev, "Wrong Rx Bit Rate %d\n", rx_bitrate);
+ return false;
+ }
+
+ pay_load_with_padding = ((head->pay_load_len / 4) +
+ ((head->pay_load_len % 4) ? 1 : 0)) * 4;
+
+ tail = (struct vnt_rx_tail *)(skb->data +
+ sizeof(*head) + pay_load_with_padding);
+ priv->tsf_time = le64_to_cpu(tail->tsf_time);
+
+ if (tail->rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
+ return false;
+
+ vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm);
+
+ priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
+ priv->current_rssi = priv->bb_pre_ed_rssi;
+
+ skb_pull(skb, sizeof(*head));
+ skb_trim(skb, head->pay_load_len);
+
+ rx_status->mactime = priv->tsf_time;
+ rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->signal = rx_dbm;
+ rx_status->flag = 0;
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+
+ if (!(tail->rsr & RSR_CRCOK))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ rx_status->rate_idx = rate_idx;
+
+ if (tail->new_rsr & NEWRSR_DECRYPTOK)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+
+ ieee80211_rx_irqsafe(priv->hw, skb);
+
+ return true;
}
static void vnt_submit_rx_urb_complete(struct urb *urb)
@@ -227,10 +394,8 @@ static void vnt_submit_rx_urb_complete(struct urb *urb)
if (urb->actual_length) {
if (vnt_rx_data(priv, rcb, urb->actual_length)) {
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
- if (!rcb->skb) {
- rcb->in_use = false;
+ if (!rcb->skb)
return;
- }
} else {
skb_push(rcb->skb, skb_headroom(rcb->skb));
skb_trim(rcb->skb, 0);
@@ -240,11 +405,8 @@ static void vnt_submit_rx_urb_complete(struct urb *urb)
skb_tailroom(rcb->skb));
}
- if (usb_submit_urb(urb, GFP_ATOMIC)) {
+ if (usb_submit_urb(urb, GFP_ATOMIC))
dev_dbg(&priv->usb->dev, "Failed to re submit rx skb\n");
-
- rcb->in_use = false;
- }
}
int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb)
@@ -267,13 +429,8 @@ int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb)
rcb);
ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret) {
+ if (ret)
dev_dbg(&priv->usb->dev, "Submit Rx URB failed %d\n", ret);
- goto end;
- }
-
- rcb->in_use = true;
-
end:
return ret;
}
@@ -317,7 +474,7 @@ int vnt_tx_context(struct vnt_private *priv,
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) {
context->in_use = false;
- return STATUS_RESOURCES;
+ return -ENODEV;
}
usb_fill_bulk_urb(urb,
@@ -333,8 +490,7 @@ int vnt_tx_context(struct vnt_private *priv,
dev_dbg(&priv->usb->dev, "Submit Tx URB failed %d\n", status);
context->in_use = false;
- return STATUS_FAILURE;
}
- return STATUS_PENDING;
+ return status;
}
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index 4e3341bc3221..35697b58d748 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -18,6 +18,29 @@
#include "device.h"
+struct vnt_interrupt_data {
+ u8 tsr0;
+ u8 pkt0;
+ u16 time0;
+ u8 tsr1;
+ u8 pkt1;
+ u16 time1;
+ u8 tsr2;
+ u8 pkt2;
+ u16 time2;
+ u8 tsr3;
+ u8 pkt3;
+ u16 time3;
+ __le64 tsf;
+ u8 isr0;
+ u8 isr1;
+ u8 rts_success;
+ u8 rts_fail;
+ u8 ack_fail;
+ u8 fcs_err;
+ u8 sw[2];
+} __packed;
+
#define VNT_REG_BLOCK_SIZE 64
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
index 081d58abd5ac..17db67559f5e 100644
--- a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
@@ -6,7 +6,7 @@ SPI
You have to declare the WFxxx chip in your device tree.
Required properties:
- - compatible: Should be "silabs,wfx-spi"
+ - compatible: Should be "silabs,wf200"
- reg: Chip select address of device
- spi-max-frequency: Maximum SPI clocking speed of device in Hz
- interrupts-extended: Should contain interrupt line (interrupt-parent +
@@ -15,6 +15,7 @@ Required properties:
Optional properties:
- reset-gpios: phandle of gpio that will be used to reset chip during probe.
Without this property, you may encounter issues with warm boot.
+ (Legacy: when compatible == "silabs,wfx-spi", the gpio is inverted.)
Please consult Documentation/devicetree/bindings/spi/spi-bus.txt for optional
SPI connection related properties,
@@ -23,12 +24,12 @@ Example:
&spi1 {
wfx {
- compatible = "silabs,wfx-spi";
+ compatible = "silabs,wf200";
pinctrl-names = "default";
pinctrl-0 = <&wfx_irq &wfx_gpios>;
interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>;
wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio 13 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
reg = <0>;
spi-max-frequency = <42000000>;
};
@@ -44,7 +45,7 @@ case. Thus declaring WFxxx chip in device tree is strongly recommended (and may
become mandatory in the future).
Required properties:
- - compatible: Should be "silabs,wfx-sdio"
+ - compatible: Should be "silabs,wf200"
- reg: Should be 1
In addition, it is recommended to declare a mmc-pwrseq on SDIO host above WFx.
@@ -70,7 +71,7 @@ Example:
#size = <0>;
mmc@1 {
- compatible = "silabs,wfx-sdio";
+ compatible = "silabs,wf200";
reg = <1>;
pinctrl-names = "default";
pinctrl-0 = <&wfx_wakeup>;
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 983c41d1fe7c..9fcab00a3733 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -20,13 +20,13 @@ static void device_wakeup(struct wfx_dev *wdev)
{
if (!wdev->pdata.gpio_wakeup)
return;
- if (gpiod_get_value(wdev->pdata.gpio_wakeup))
+ if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup))
return;
- gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
if (wfx_api_older_than(wdev, 1, 4)) {
if (!completion_done(&wdev->hif.ctrl_ready))
- udelay(2000);
+ usleep_range(2000, 2500);
} else {
// completion.h does not provide any function to wait
// completion without consume it (a kind of
@@ -45,7 +45,7 @@ static void device_release(struct wfx_dev *wdev)
if (!wdev->pdata.gpio_wakeup)
return;
- gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
}
static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
index f8901164c206..dedc3ff58d3e 100644
--- a/drivers/staging/wfx/bus_sdio.c
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -200,25 +200,23 @@ static int wfx_sdio_probe(struct sdio_func *func,
if (ret)
goto err0;
- ret = wfx_sdio_irq_subscribe(bus);
- if (ret)
- goto err1;
-
bus->core = wfx_init_common(&func->dev, &wfx_sdio_pdata,
&wfx_sdio_hwbus_ops, bus);
if (!bus->core) {
ret = -EIO;
- goto err2;
+ goto err1;
}
+ ret = wfx_sdio_irq_subscribe(bus);
+ if (ret)
+ goto err1;
+
ret = wfx_probe(bus->core);
if (ret)
- goto err3;
+ goto err2;
return 0;
-err3:
- wfx_free_common(bus->core);
err2:
wfx_sdio_irq_unsubscribe(bus);
err1:
@@ -234,7 +232,6 @@ static void wfx_sdio_remove(struct sdio_func *func)
struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
wfx_release(bus->core);
- wfx_free_common(bus->core);
wfx_sdio_irq_unsubscribe(bus);
sdio_claim_host(func);
sdio_disable_func(func);
@@ -254,6 +251,7 @@ MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
#ifdef CONFIG_OF
static const struct of_device_id wfx_sdio_of_match[] = {
{ .compatible = "silabs,wfx-sdio" },
+ { .compatible = "silabs,wf200" },
{ },
};
MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index 40bc33035de2..61e99b09decb 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -27,6 +27,8 @@ MODULE_PARM_DESC(gpio_reset, "gpio number for reset. -1 for none.");
#define SET_WRITE 0x7FFF /* usage: and operation */
#define SET_READ 0x8000 /* usage: or operation */
+#define WFX_RESET_INVERTED 1
+
static const struct wfx_platform_data wfx_spi_pdata = {
.file_fw = "wfm_wf200",
.file_pds = "wf200.pds",
@@ -154,6 +156,11 @@ static void wfx_spi_request_rx(struct work_struct *work)
wfx_bh_request_rx(bus->core);
}
+static void wfx_flush_irq_work(void *w)
+{
+ flush_work(w);
+}
+
static size_t wfx_spi_align_size(void *priv, size_t size)
{
// Most of SPI controllers avoid DMA if buffer size is not 32bit aligned
@@ -201,28 +208,31 @@ static int wfx_spi_probe(struct spi_device *func)
if (!bus->gpio_reset) {
dev_warn(&func->dev, "try to load firmware anyway\n");
} else {
- gpiod_set_value(bus->gpio_reset, 0);
- udelay(100);
- gpiod_set_value(bus->gpio_reset, 1);
- udelay(2000);
+ if (spi_get_device_id(func)->driver_data & WFX_RESET_INVERTED)
+ gpiod_toggle_active_low(bus->gpio_reset);
+ gpiod_set_value_cansleep(bus->gpio_reset, 1);
+ usleep_range(100, 150);
+ gpiod_set_value_cansleep(bus->gpio_reset, 0);
+ usleep_range(2000, 2500);
}
- ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler,
- IRQF_TRIGGER_RISING, "wfx", bus);
- if (ret)
- return ret;
-
INIT_WORK(&bus->request_rx, wfx_spi_request_rx);
bus->core = wfx_init_common(&func->dev, &wfx_spi_pdata,
&wfx_spi_hwbus_ops, bus);
if (!bus->core)
return -EIO;
- ret = wfx_probe(bus->core);
+ ret = devm_add_action_or_reset(&func->dev, wfx_flush_irq_work,
+ &bus->request_rx);
if (ret)
- wfx_free_common(bus->core);
+ return ret;
- return ret;
+ ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler,
+ IRQF_TRIGGER_RISING, "wfx", bus);
+ if (ret)
+ return ret;
+
+ return wfx_probe(bus->core);
}
static int wfx_spi_remove(struct spi_device *func)
@@ -230,11 +240,6 @@ static int wfx_spi_remove(struct spi_device *func)
struct wfx_spi_priv *bus = spi_get_drvdata(func);
wfx_release(bus->core);
- wfx_free_common(bus->core);
- // A few IRQ will be sent during device release. Hopefully, no IRQ
- // should happen after wdev/wvif are released.
- devm_free_irq(&func->dev, func->irq, bus);
- flush_work(&bus->request_rx);
return 0;
}
@@ -244,14 +249,16 @@ static int wfx_spi_remove(struct spi_device *func)
* stripped.
*/
static const struct spi_device_id wfx_spi_id[] = {
- { "wfx-spi", 0 },
+ { "wfx-spi", WFX_RESET_INVERTED },
+ { "wf200", 0 },
{ },
};
MODULE_DEVICE_TABLE(spi, wfx_spi_id);
#ifdef CONFIG_OF
static const struct of_device_id wfx_spi_of_match[] = {
- { .compatible = "silabs,wfx-spi" },
+ { .compatible = "silabs,wfx-spi", .data = (void *)WFX_RESET_INVERTED },
+ { .compatible = "silabs,wf200" },
{ },
};
MODULE_DEVICE_TABLE(of, wfx_spi_of_match);
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index 5d198457c6ce..c5b83fedeb55 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -17,7 +17,7 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev,
const struct hif_ind_rx *arg,
struct sk_buff *skb)
{
- struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
size_t iv_len, icv_len;
@@ -62,7 +62,6 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev,
memmove(skb->data + iv_len, skb->data, hdrlen);
skb_pull(skb, iv_len);
return 0;
-
}
void wfx_rx_cb(struct wfx_vif *wvif,
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index 20f4740734f2..42183c70d4df 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -227,7 +227,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif)
memzcmp(policies[i].rates, sizeof(policies[i].rates)))
break;
if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) {
- policies[i].uploaded = 1;
+ policies[i].uploaded = true;
memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
spin_unlock_bh(&wvif->tx_policy_cache.lock);
hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
@@ -300,11 +300,11 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
}
static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
- struct ieee80211_sta *sta,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr)
{
struct wfx_sta_priv *sta_priv =
- sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
+ sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
const u8 *da = ieee80211_get_DA(hdr);
if (sta_priv && sta_priv->link_id)
@@ -368,7 +368,7 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
}
static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
- struct ieee80211_tx_info *tx_info)
+ struct ieee80211_tx_info *tx_info)
{
bool tx_policy_renew = false;
u8 rate_id;
@@ -430,7 +430,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int queue_id = tx_info->hw_queue;
- size_t offset = (size_t) skb->data & 3;
+ size_t offset = (size_t)skb->data & 3;
int wmsg_len = sizeof(struct hif_msg) +
sizeof(struct hif_req_tx) + offset;
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index 04b2147101b6..c545dd75449b 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -61,7 +61,7 @@ static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
static inline struct hif_req_tx *wfx_skb_txreq(struct sk_buff *skb)
{
struct hif_msg *hif = (struct hif_msg *)skb->data;
- struct hif_req_tx *req = (struct hif_req_tx *) hif->body;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
return req;
}
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index 5554d6eddbf3..071b71e2a107 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -95,10 +95,6 @@ struct hif_req_reset {
struct hif_reset_flags reset_flags;
} __packed;
-struct hif_cnf_reset {
- u32 status;
-} __packed;
-
struct hif_req_read_mib {
u16 mib_id;
u16 reserved;
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
index 47e04c59ed93..d3a141d95a0e 100644
--- a/drivers/staging/wfx/hwio.c
+++ b/drivers/staging/wfx/hwio.c
@@ -142,7 +142,7 @@ static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf,
goto err;
if (!(cfg & prefetch))
break;
- udelay(200);
+ usleep_range(200, 250);
}
if (i == 20) {
ret = -ETIMEDOUT;
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 84adad64fc30..3c4c240229ad 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -262,6 +262,16 @@ static int wfx_send_pdata_pds(struct wfx_dev *wdev)
return ret;
}
+static void wfx_free_common(void *data)
+{
+ struct wfx_dev *wdev = data;
+
+ mutex_destroy(&wdev->rx_stats_lock);
+ mutex_destroy(&wdev->conf_mutex);
+ wfx_tx_queues_deinit(wdev);
+ ieee80211_free_hw(wdev->hw);
+}
+
struct wfx_dev *wfx_init_common(struct device *dev,
const struct wfx_platform_data *pdata,
const struct hwbus_ops *hwbus_ops,
@@ -332,15 +342,10 @@ struct wfx_dev *wfx_init_common(struct device *dev,
wfx_init_hif_cmd(&wdev->hif_cmd);
wfx_tx_queues_init(wdev);
- return wdev;
-}
+ if (devm_add_action_or_reset(dev, wfx_free_common, wdev))
+ return NULL;
-void wfx_free_common(struct wfx_dev *wdev)
-{
- mutex_destroy(&wdev->rx_stats_lock);
- mutex_destroy(&wdev->conf_mutex);
- wfx_tx_queues_deinit(wdev);
- ieee80211_free_hw(wdev->hw);
+ return wdev;
}
int wfx_probe(struct wfx_dev *wdev)
@@ -420,7 +425,7 @@ int wfx_probe(struct wfx_dev *wdev)
"enable 'quiescent' power mode with gpio %d and PDS file %s\n",
desc_to_gpio(wdev->pdata.gpio_wakeup),
wdev->pdata.file_pds);
- gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
control_reg_write(wdev, 0);
hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT);
} else {
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
index 875f8c227803..9c9410072def 100644
--- a/drivers/staging/wfx/main.h
+++ b/drivers/staging/wfx/main.h
@@ -34,7 +34,6 @@ struct wfx_dev *wfx_init_common(struct device *dev,
const struct wfx_platform_data *pdata,
const struct hwbus_ops *hwbus_ops,
void *hwbus_priv);
-void wfx_free_common(struct wfx_dev *wdev);
int wfx_probe(struct wfx_dev *wdev);
void wfx_release(struct wfx_dev *wdev);
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 0bcc61feee1d..39d9127ce4b9 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -130,12 +130,12 @@ static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
spin_lock_bh(&queue->queue.lock);
while ((item = __skb_dequeue(&queue->queue)) != NULL)
skb_queue_head(gc_list, item);
- spin_lock_bh(&stats->pending.lock);
+ spin_lock_nested(&stats->pending.lock, 1);
for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
stats->link_map_cache[i] -= queue->link_map_cache[i];
queue->link_map_cache[i] = 0;
}
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock(&stats->pending.lock);
spin_unlock_bh(&queue->queue.lock);
}
@@ -207,9 +207,9 @@ void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
++queue->link_map_cache[tx_priv->link_id];
- spin_lock_bh(&stats->pending.lock);
+ spin_lock_nested(&stats->pending.lock, 1);
++stats->link_map_cache[tx_priv->link_id];
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock(&stats->pending.lock);
spin_unlock_bh(&queue->queue.lock);
}
@@ -237,11 +237,11 @@ static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
__skb_unlink(skb, &queue->queue);
--queue->link_map_cache[tx_priv->link_id];
- spin_lock_bh(&stats->pending.lock);
+ spin_lock_nested(&stats->pending.lock, 1);
__skb_queue_tail(&stats->pending, skb);
if (!--stats->link_map_cache[tx_priv->link_id])
wakeup_stats = true;
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock(&stats->pending.lock);
}
spin_unlock_bh(&queue->queue.lock);
if (wakeup_stats)
@@ -259,10 +259,10 @@ int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
spin_lock_bh(&queue->queue.lock);
++queue->link_map_cache[tx_priv->link_id];
- spin_lock_bh(&stats->pending.lock);
+ spin_lock_nested(&stats->pending.lock, 1);
++stats->link_map_cache[tx_priv->link_id];
__skb_unlink(skb, &stats->pending);
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock(&stats->pending.lock);
__skb_queue_tail(&queue->queue, skb);
spin_unlock_bh(&queue->queue.lock);
return 0;
@@ -481,7 +481,6 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
struct wfx_queue *vif_queue = NULL;
u32 tx_allowed_mask = 0;
u32 vif_tx_allowed_mask = 0;
- const struct wfx_tx_priv *tx_priv = NULL;
struct wfx_vif *wvif;
int not_found;
int burst;
@@ -541,8 +540,7 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
if (!skb)
continue;
- tx_priv = wfx_skb_tx_priv(skb);
- hif = (struct hif_msg *) skb->data;
+ hif = (struct hif_msg *)skb->data;
wvif = wdev_to_wvif(wdev, hif->interface);
WARN_ON(!wvif);
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index 6e1e50048651..9aa14331affd 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -57,8 +57,10 @@ static int send_scan_req(struct wfx_vif *wvif,
wvif->scan_abort = false;
reinit_completion(&wvif->scan_complete);
timeout = hif_scan(wvif, req, start_idx, i - start_idx);
- if (timeout < 0)
+ if (timeout < 0) {
+ wfx_tx_unlock(wvif->wdev);
return timeout;
+ }
ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index af4f4bbd0572..9d430346a58b 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -293,7 +293,6 @@ int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
int old_uapsd = wvif->uapsd_mask;
- int ret = 0;
WARN_ON(queue >= hw->queues);
@@ -307,7 +306,7 @@ int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wfx_update_pm(wvif);
}
mutex_unlock(&wdev->conf_mutex);
- return ret;
+ return 0;
}
int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@@ -909,7 +908,7 @@ static void wfx_update_tim_work(struct work_struct *work)
int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
{
struct wfx_dev *wdev = hw->priv;
- struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv;
struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
schedule_work(&wvif->update_tim_work);
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index 59e58550d139..80c92e8bf8a5 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -2,6 +2,10 @@
config WILC1000
tristate
help
+ Add support for the Atmel WILC1000 802.11 b/g/n SoC.
+ This provides Wi-FI over an SDIO or SPI interface, and
+ is usually found in IoT devices.
+
This module only support IEEE 802.11n WiFi.
config WILC1000_SDIO
@@ -22,6 +26,7 @@ config WILC1000_SPI
tristate "Atmel WILC1000 SPI (WiFi only)"
depends on CFG80211 && INET && SPI
select WILC1000
+ select CRC7
help
This module adds support for the SPI interface of adapters using
WILC1000 chipset. The Atmel WILC1000 has a Serial Peripheral
diff --git a/drivers/staging/wilc1000/cfg80211.c b/drivers/staging/wilc1000/cfg80211.c
index 4863e516ff13..4bdcbc5fd2fd 100644
--- a/drivers/staging/wilc1000/cfg80211.c
+++ b/drivers/staging/wilc1000/cfg80211.c
@@ -6,29 +6,17 @@
#include "cfg80211.h"
-#define FRAME_TYPE_ID 0
-#define ACTION_CAT_ID 24
-#define ACTION_SUBTYPE_ID 25
-#define P2P_PUB_ACTION_SUBTYPE 30
-
-#define ACTION_FRAME 0xd0
-#define GO_INTENT_ATTR_ID 0x04
-#define CHANLIST_ATTR_ID 0x0b
-#define OPERCHAN_ATTR_ID 0x11
-#define PUB_ACTION_ATTR_ID 0x04
-#define P2PELEM_ATTR_ID 0xdd
-
#define GO_NEG_REQ 0x00
#define GO_NEG_RSP 0x01
#define GO_NEG_CONF 0x02
#define P2P_INV_REQ 0x03
#define P2P_INV_RSP 0x04
-#define PUBLIC_ACT_VENDORSPEC 0x09
-#define GAS_INITIAL_REQ 0x0a
-#define GAS_INITIAL_RSP 0x0b
#define WILC_INVALID_CHANNEL 0
+/* Operation at 2.4 GHz with channels 1-13 */
+#define WILC_WLAN_OPERATING_CLASS_2_4GHZ 0x51
+
static const struct ieee80211_txrx_stypes
wilc_wfi_cfg80211_mgmt_types[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
@@ -67,8 +55,50 @@ struct wilc_p2p_mgmt_data {
u8 *buff;
};
-static const u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09};
-static const u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
+struct wilc_p2p_pub_act_frame {
+ u8 category;
+ u8 action;
+ u8 oui[3];
+ u8 oui_type;
+ u8 oui_subtype;
+ u8 dialog_token;
+ u8 elem[];
+} __packed;
+
+struct wilc_vendor_specific_ie {
+ u8 tag_number;
+ u8 tag_len;
+ u8 oui[3];
+ u8 oui_type;
+ u8 attr[];
+} __packed;
+
+struct wilc_attr_entry {
+ u8 attr_type;
+ __le16 attr_len;
+ u8 val[];
+} __packed;
+
+struct wilc_attr_oper_ch {
+ u8 attr_type;
+ __le16 attr_len;
+ u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
+ u8 op_class;
+ u8 op_channel;
+} __packed;
+
+struct wilc_attr_ch_list {
+ u8 attr_type;
+ __le16 attr_len;
+ u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
+ u8 elem[];
+} __packed;
+
+struct wilc_ch_list_elem {
+ u8 op_class;
+ u8 no_of_channels;
+ u8 ch_list[];
+} __packed;
static void cfg_scan_result(enum scan_event scan_event,
struct wilc_rcvd_net_info *info, void *user_void)
@@ -172,9 +202,6 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
} else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
u16 reason = 0;
- priv->p2p.local_random = 0x01;
- priv->p2p.recv_random = 0x00;
- priv->p2p.is_wilc_ie = false;
eth_zero_addr(priv->associated_bss);
wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE);
@@ -446,9 +473,6 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
wilc->sta_ch = WILC_INVALID_CHANNEL;
wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE);
- priv->p2p.local_random = 0x01;
- priv->p2p.recv_random = 0x00;
- priv->p2p.is_wilc_ie = false;
priv->hif_drv->p2p_timeout = 0;
ret = wilc_disconnect(vif);
@@ -864,7 +888,6 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
u32 i;
- int ret = 0;
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
@@ -877,21 +900,20 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
}
}
- if (i < priv->pmkid_list.numpmkid && priv->pmkid_list.numpmkid > 0) {
- for (; i < (priv->pmkid_list.numpmkid - 1); i++) {
- memcpy(priv->pmkid_list.pmkidlist[i].bssid,
- priv->pmkid_list.pmkidlist[i + 1].bssid,
- ETH_ALEN);
- memcpy(priv->pmkid_list.pmkidlist[i].pmkid,
- priv->pmkid_list.pmkidlist[i + 1].pmkid,
- WLAN_PMKID_LEN);
- }
- priv->pmkid_list.numpmkid--;
- } else {
- ret = -EINVAL;
+ if (i == priv->pmkid_list.numpmkid)
+ return -EINVAL;
+
+ for (; i < (priv->pmkid_list.numpmkid - 1); i++) {
+ memcpy(priv->pmkid_list.pmkidlist[i].bssid,
+ priv->pmkid_list.pmkidlist[i + 1].bssid,
+ ETH_ALEN);
+ memcpy(priv->pmkid_list.pmkidlist[i].pmkid,
+ priv->pmkid_list.pmkidlist[i + 1].pmkid,
+ WLAN_PMKID_LEN);
}
+ priv->pmkid_list.numpmkid--;
- return ret;
+ return 0;
}
static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
@@ -903,113 +925,50 @@ static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
return 0;
}
-static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u8 ch_list_attr_idx,
- u8 op_ch_attr_idx, u8 sta_ch)
-{
- int i = 0;
- int j = 0;
-
- if (ch_list_attr_idx) {
- u8 limit = ch_list_attr_idx + 3 + buf[ch_list_attr_idx + 1];
-
- for (i = ch_list_attr_idx + 3; i < limit; i++) {
- if (buf[i] == 0x51) {
- for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
- buf[j] = sta_ch;
- break;
- }
- }
- }
-
- if (op_ch_attr_idx) {
- buf[op_ch_attr_idx + 6] = 0x51;
- buf[op_ch_attr_idx + 7] = sta_ch;
- }
-}
-
-static void wilc_wfi_cfg_parse_rx_action(u8 *buf, u32 len, u8 sta_ch)
+static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch)
{
+ struct wilc_attr_entry *e;
+ struct wilc_attr_ch_list *ch_list;
+ struct wilc_attr_oper_ch *op_ch;
u32 index = 0;
- u8 op_channel_attr_index = 0;
- u8 channel_list_attr_index = 0;
-
- while (index < len) {
- if (buf[index] == GO_INTENT_ATTR_ID)
- buf[index + 3] = (buf[index + 3] & 0x01) | (0x00 << 1);
-
- if (buf[index] == CHANLIST_ATTR_ID)
- channel_list_attr_index = index;
- else if (buf[index] == OPERCHAN_ATTR_ID)
- op_channel_attr_index = index;
- index += buf[index + 1] + 3;
- }
- if (sta_ch != WILC_INVALID_CHANNEL)
- wilc_wfi_cfg_parse_ch_attr(buf, channel_list_attr_index,
- op_channel_attr_index, sta_ch);
-}
+ u8 ch_list_idx = 0;
+ u8 op_ch_idx = 0;
-static void wilc_wfi_cfg_parse_tx_action(u8 *buf, u32 len, bool oper_ch,
- u8 iftype, u8 sta_ch)
-{
- u32 index = 0;
- u8 op_channel_attr_index = 0;
- u8 channel_list_attr_index = 0;
-
- while (index < len) {
- if (buf[index] == GO_INTENT_ATTR_ID) {
- buf[index + 3] = (buf[index + 3] & 0x01) | (0x0f << 1);
+ if (sta_ch == WILC_INVALID_CHANNEL)
+ return;
+ while (index + sizeof(*e) <= len) {
+ e = (struct wilc_attr_entry *)&buf[index];
+ if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST)
+ ch_list_idx = index;
+ else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL)
+ op_ch_idx = index;
+ if (ch_list_idx && op_ch_idx)
break;
- }
-
- if (buf[index] == CHANLIST_ATTR_ID)
- channel_list_attr_index = index;
- else if (buf[index] == OPERCHAN_ATTR_ID)
- op_channel_attr_index = index;
- index += buf[index + 1] + 3;
+ index += le16_to_cpu(e->attr_len) + sizeof(*e);
}
- if (sta_ch != WILC_INVALID_CHANNEL && oper_ch)
- wilc_wfi_cfg_parse_ch_attr(buf, channel_list_attr_index,
- op_channel_attr_index, sta_ch);
-}
-static void wilc_wfi_cfg_parse_rx_vendor_spec(struct wilc_priv *priv, u8 *buff,
- u32 size)
-{
- int i;
- u8 subtype;
- struct wilc_vif *vif = netdev_priv(priv->dev);
-
- subtype = buff[P2P_PUB_ACTION_SUBTYPE];
- if ((subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) &&
- !priv->p2p.is_wilc_ie) {
- for (i = P2P_PUB_ACTION_SUBTYPE; i < size; i++) {
- if (!memcmp(p2p_vendor_spec, &buff[i], 6)) {
- priv->p2p.recv_random = buff[i + 6];
- priv->p2p.is_wilc_ie = true;
+ if (ch_list_idx) {
+ u16 attr_size;
+ struct wilc_ch_list_elem *e;
+ int i;
+
+ ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx];
+ attr_size = le16_to_cpu(ch_list->attr_len);
+ for (i = 0; i < attr_size;) {
+ e = (struct wilc_ch_list_elem *)(ch_list->elem + i);
+ if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) {
+ memset(e->ch_list, sta_ch, e->no_of_channels);
break;
}
+ i += e->no_of_channels;
}
}
- if (priv->p2p.local_random <= priv->p2p.recv_random) {
- netdev_dbg(vif->ndev,
- "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n",
- priv->p2p.local_random, priv->p2p.recv_random);
- return;
- }
-
- if (subtype == GO_NEG_REQ || subtype == GO_NEG_RSP ||
- subtype == P2P_INV_REQ || subtype == P2P_INV_RSP) {
- for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) {
- if (buff[i] == P2PELEM_ATTR_ID &&
- !(memcmp(p2p_oui, &buff[i + 2], 4))) {
- wilc_wfi_cfg_parse_rx_action(&buff[i + 6],
- size - (i + 6),
- vif->wilc->sta_ch);
- break;
- }
- }
+ if (op_ch_idx) {
+ op_ch = (struct wilc_attr_oper_ch *)&buf[op_ch_idx];
+ op_ch->op_class = WILC_WLAN_OPERATING_CLASS_2_4GHZ;
+ op_ch->op_channel = sta_ch;
}
}
@@ -1018,17 +977,22 @@ void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size)
struct wilc *wl = vif->wilc;
struct wilc_priv *priv = &vif->priv;
struct host_if_drv *wfi_drv = priv->hif_drv;
+ struct ieee80211_mgmt *mgmt;
+ struct wilc_vendor_specific_ie *p;
+ struct wilc_p2p_pub_act_frame *d;
+ int ie_offset = offsetof(struct ieee80211_mgmt, u) + sizeof(*d);
+ const u8 *vendor_ie;
u32 header, pkt_offset;
s32 freq;
- __le16 fc;
header = get_unaligned_le32(buff - HOST_HDR_OFFSET);
- pkt_offset = GET_PKT_OFFSET(header);
+ pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
bool ack = false;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)buff;
- if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP ||
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
pkt_offset & IS_MGMT_STATUS_SUCCES)
ack = true;
@@ -1039,44 +1003,33 @@ void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size)
freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ);
- fc = ((struct ieee80211_hdr *)buff)->frame_control;
- if (!ieee80211_is_action(fc)) {
- cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
- return;
- }
+ mgmt = (struct ieee80211_mgmt *)buff;
+ if (!ieee80211_is_action(mgmt->frame_control))
+ goto out_rx_mgmt;
if (priv->cfg_scanning &&
time_after_eq(jiffies, (unsigned long)wfi_drv->p2p_timeout)) {
netdev_dbg(vif->ndev, "Receiving action wrong ch\n");
return;
}
- if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
- u8 subtype = buff[P2P_PUB_ACTION_SUBTYPE];
- switch (buff[ACTION_SUBTYPE_ID]) {
- case GAS_INITIAL_REQ:
- case GAS_INITIAL_RSP:
- break;
+ if (!ieee80211_is_public_action((struct ieee80211_hdr *)buff, size))
+ goto out_rx_mgmt;
- case PUBLIC_ACT_VENDORSPEC:
- if (!memcmp(p2p_oui, &buff[ACTION_SUBTYPE_ID + 1], 4))
- wilc_wfi_cfg_parse_rx_vendor_spec(priv, buff,
- size);
+ d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action);
+ if (d->oui_subtype != GO_NEG_REQ && d->oui_subtype != GO_NEG_RSP &&
+ d->oui_subtype != P2P_INV_REQ && d->oui_subtype != P2P_INV_RSP)
+ goto out_rx_mgmt;
- if ((subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) &&
- priv->p2p.is_wilc_ie)
- size -= 7;
+ vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ buff + ie_offset, size - ie_offset);
+ if (!vendor_ie)
+ goto out_rx_mgmt;
- break;
-
- default:
- netdev_dbg(vif->ndev,
- "%s: Not handled action frame type:%x\n",
- __func__, buff[ACTION_SUBTYPE_ID]);
- break;
- }
- }
+ p = (struct wilc_vendor_specific_ie *)vendor_ie;
+ wilc_wfi_cfg_parse_ch_attr(p->attr, p->tag_len - 4, vif->wilc->sta_ch);
+out_rx_mgmt:
cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
}
@@ -1156,57 +1109,6 @@ static int cancel_remain_on_channel(struct wiphy *wiphy,
return wilc_listen_state_expired(vif, cookie);
}
-static void wilc_wfi_cfg_tx_vendor_spec(struct wilc_priv *priv,
- struct wilc_p2p_mgmt_data *mgmt_tx,
- struct cfg80211_mgmt_tx_params *params,
- u8 iftype, u32 buf_len)
-{
- const u8 *buf = params->buf;
- size_t len = params->len;
- u32 i;
- u8 subtype = buf[P2P_PUB_ACTION_SUBTYPE];
- struct wilc_vif *vif = netdev_priv(priv->dev);
-
- if (subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) {
- if (priv->p2p.local_random == 1 &&
- priv->p2p.recv_random < priv->p2p.local_random) {
- get_random_bytes(&priv->p2p.local_random, 1);
- priv->p2p.local_random++;
- }
- }
-
- if (priv->p2p.local_random <= priv->p2p.recv_random ||
- !(subtype == GO_NEG_REQ || subtype == GO_NEG_RSP ||
- subtype == P2P_INV_REQ || subtype == P2P_INV_RSP))
- return;
-
- for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
- if (buf[i] == P2PELEM_ATTR_ID &&
- !memcmp(p2p_oui, &buf[i + 2], 4)) {
- bool oper_ch = false;
- u8 *tx_buff = &mgmt_tx->buff[i + 6];
-
- if (subtype == P2P_INV_REQ || subtype == P2P_INV_RSP)
- oper_ch = true;
-
- wilc_wfi_cfg_parse_tx_action(tx_buff, len - (i + 6),
- oper_ch, iftype,
- vif->wilc->sta_ch);
-
- break;
- }
- }
-
- if (subtype != P2P_INV_REQ && subtype != P2P_INV_RSP) {
- int vendor_spec_len = sizeof(p2p_vendor_spec);
-
- memcpy(&mgmt_tx->buff[len], p2p_vendor_spec,
- vendor_spec_len);
- mgmt_tx->buff[len + vendor_spec_len] = priv->p2p.local_random;
- mgmt_tx->size = buf_len;
- }
-}
-
static int mgmt_tx(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
@@ -1221,8 +1123,10 @@ static int mgmt_tx(struct wiphy *wiphy,
struct wilc_vif *vif = netdev_priv(wdev->netdev);
struct wilc_priv *priv = &vif->priv;
struct host_if_drv *wfi_drv = priv->hif_drv;
- u32 buf_len = len + sizeof(p2p_vendor_spec) +
- sizeof(priv->p2p.local_random);
+ struct wilc_vendor_specific_ie *p;
+ struct wilc_p2p_pub_act_frame *d;
+ int ie_offset = offsetof(struct ieee80211_mgmt, u) + sizeof(*d);
+ const u8 *vendor_ie;
int ret = 0;
*cookie = prandom_u32();
@@ -1238,14 +1142,13 @@ static int mgmt_tx(struct wiphy *wiphy,
goto out;
}
- mgmt_tx->buff = kmalloc(buf_len, GFP_KERNEL);
+ mgmt_tx->buff = kmemdup(buf, len, GFP_KERNEL);
if (!mgmt_tx->buff) {
ret = -ENOMEM;
kfree(mgmt_tx);
goto out;
}
- memcpy(mgmt_tx->buff, buf, len);
mgmt_tx->size = len;
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
@@ -1254,39 +1157,29 @@ static int mgmt_tx(struct wiphy *wiphy,
goto out_txq_add_pkt;
}
- if (!ieee80211_is_action(mgmt->frame_control))
- goto out_txq_add_pkt;
+ if (!ieee80211_is_public_action((struct ieee80211_hdr *)buf, len))
+ goto out_set_timeout;
- if (buf[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
- if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC ||
- buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF) {
- wilc_set_mac_chnl_num(vif, chan->hw_value);
- vif->wilc->op_ch = chan->hw_value;
- }
- switch (buf[ACTION_SUBTYPE_ID]) {
- case GAS_INITIAL_REQ:
- case GAS_INITIAL_RSP:
- break;
+ d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action);
+ if (d->oui_type != WLAN_OUI_TYPE_WFA_P2P ||
+ d->oui_subtype != GO_NEG_CONF) {
+ wilc_set_mac_chnl_num(vif, chan->hw_value);
+ vif->wilc->op_ch = chan->hw_value;
+ }
- case PUBLIC_ACT_VENDORSPEC:
- if (!memcmp(p2p_oui, &buf[ACTION_SUBTYPE_ID + 1], 4))
- wilc_wfi_cfg_tx_vendor_spec(priv, mgmt_tx,
- params, vif->iftype,
- buf_len);
- else
- netdev_dbg(vif->ndev,
- "Not a P2P public action frame\n");
+ if (d->oui_subtype != P2P_INV_REQ && d->oui_subtype != P2P_INV_RSP)
+ goto out_set_timeout;
- break;
+ vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt_tx->buff + ie_offset,
+ len - ie_offset);
+ if (!vendor_ie)
+ goto out_set_timeout;
- default:
- netdev_dbg(vif->ndev,
- "%s: Not handled action frame type:%x\n",
- __func__, buf[ACTION_SUBTYPE_ID]);
- break;
- }
- }
+ p = (struct wilc_vendor_specific_ie *)vendor_ie;
+ wilc_wfi_cfg_parse_ch_attr(p->attr, p->tag_len - 4, vif->wilc->sta_ch);
+out_set_timeout:
wfi_drv->p2p_timeout = (jiffies + msecs_to_jiffies(wait));
out_txq_add_pkt:
@@ -1400,10 +1293,6 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
struct wilc_vif *vif = netdev_priv(dev);
struct wilc_priv *priv = &vif->priv;
- priv->p2p.local_random = 0x01;
- priv->p2p.recv_random = 0x00;
- priv->p2p.is_wilc_ie = false;
-
switch (type) {
case NL80211_IFTYPE_STATION:
vif->connecting = false;
diff --git a/drivers/staging/wilc1000/hif.c b/drivers/staging/wilc1000/hif.c
index 658790bd465b..6c7de2f8d3f2 100644
--- a/drivers/staging/wilc1000/hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -801,7 +801,7 @@ static void wilc_hif_pack_sta_param(u8 *cur_byte, const u8 *mac,
if (params->ht_capa) {
*cur_byte++ = true;
- memcpy(cur_byte, &params->ht_capa,
+ memcpy(cur_byte, params->ht_capa,
sizeof(struct ieee80211_ht_cap));
} else {
*cur_byte++ = false;
@@ -861,9 +861,8 @@ static int wilc_handle_roc_expired(struct wilc_vif *vif, u64 cookie)
struct wid wid;
int result;
struct host_if_drv *hif_drv = vif->hif_drv;
- struct wilc_priv *priv = wdev_priv(vif->ndev->ieee80211_ptr);
- if (priv->p2p_listen_state) {
+ if (vif->priv.p2p_listen_state) {
remain_on_chan_flag = false;
wid.id = WID_REMAIN_ON_CHAN;
wid.type = WID_STR;
diff --git a/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt b/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt
deleted file mode 100644
index da5235950a70..000000000000
--- a/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-* Microchip WILC wireless SDIO device
-
-The wilc1000 chips can be connected via SDIO. The node is used to specifiy
-child node to the SDIO controller that connects the device to the system.
-
-Required properties:
-- compatible : Should be "microchip,wilc1000-spi"
-- irq-gpios : Connect to a host IRQ
-- reg : Slot ID used in the controller
-
-Optional:
-- bus-width : Number of data lines wired up the slot. Default 1 bit.
-- rtc_clk : Clock connected on the rtc clock line. Must be assigned
- a frequency with assigned-clocks property, and must be
- connected to a clock provider.
-
-Examples:
-mmc1: mmc@fc000000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>;
- non-removable;
- vmmc-supply = <&vcc_mmc1_reg>;
- vqmmc-supply = <&vcc_3v3_reg>;
- status = "okay";
-
- wilc_sdio@0 {
- compatible = "microchip,wilc1000-sdio";
- irq-gpios = <&pioC 27 0>;
- clocks = <&pck1>;
- clock-names = "rtc_clk";
- assigned-clocks = <&pck1>;
- assigned-clock-rates = <32768>;
- status = "okay";
- reg = <0>;
- bus-width = <4>;
- }
- };
-}
diff --git a/drivers/staging/wilc1000/microchip,wilc1000,spi.txt b/drivers/staging/wilc1000/microchip,wilc1000,spi.txt
deleted file mode 100644
index 34236932dbb6..000000000000
--- a/drivers/staging/wilc1000/microchip,wilc1000,spi.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-* Microchip WILC wireless SPI device
-
-The wilc1000 chips can be connected via SPI. This document describes
-the binding for the SPI connected module.
-
-Required properties:
-- compatible : Should be "microchip,wilc1000-spi"
-- spi-max-frequency : Maximum SPI clocking speed of device in Hz
-- reg : Chip select address of device
-- irq-gpios : Connect to a host IRQ
-
-Optional:
-- rtc_clk : Clock connected on the rtc clock line. Must be assigned
- a frequency with assigned-clocks property, and must be
- connected to a clock provider.
-
-Examples:
-
-spi1: spi@fc018000 {
- cs-gpios = <&pioB 21 0>;
- status = "okay";
-
- wilc_spi@0 {
- compatible = "microchip,wilc1000-spi";
- spi-max-frequency = <48000000>;
- reg = <0>;
- irq-gpios = <&pioC 27 0>;
- clocks = <&pck1>;
- clock-names = "rtc_clk";
- assigned-clocks = <&pck1>;
- assigned-clock-rates = <32768>;
- status = "okay";
- };
-};
diff --git a/drivers/staging/wilc1000/microchip,wilc1000.yaml b/drivers/staging/wilc1000/microchip,wilc1000.yaml
new file mode 100644
index 000000000000..2c320eb2a8c4
--- /dev/null
+++ b/drivers/staging/wilc1000/microchip,wilc1000.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/wireless/microchip,wilc1000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip WILC wireless devicetree bindings
+
+maintainers:
+ - Adham Abozaeid <adham.abozaeid@microchip.com>
+ - Ajay Singh <ajay.kathat@microchip.com>
+
+description:
+ The wilc1000 chips can be connected via SPI or SDIO. This document
+ describes the binding to connect wilc devices.
+
+properties:
+ compatible:
+ const: microchip,wilc1000
+
+ spi-max-frequency: true
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description: phandle to the clock connected on rtc clock line.
+ maxItems: 1
+
+ clock-names:
+ const: rtc
+
+required:
+ - compatible
+ - interrupts
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ wifi@0 {
+ compatible = "microchip,wilc1000";
+ spi-max-frequency = <48000000>;
+ reg = <0>;
+ interrupt-parent = <&pioC>;
+ interrupts = <27 0>;
+ clocks = <&pck1>;
+ clock-names = "rtc";
+ };
+ };
+
+ - |
+ mmc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>;
+ non-removable;
+ vmmc-supply = <&vcc_mmc1_reg>;
+ vqmmc-supply = <&vcc_3v3_reg>;
+ bus-width = <4>;
+ wifi@0 {
+ compatible = "microchip,wilc1000";
+ reg = <0>;
+ interrupt-parent = <&pioC>;
+ interrupts = <27 0>;
+ clocks = <&pck1>;
+ clock-names = "rtc";
+ };
+ };
diff --git a/drivers/staging/wilc1000/mon.c b/drivers/staging/wilc1000/mon.c
index 48ac33f06f63..60331417bd98 100644
--- a/drivers/staging/wilc1000/mon.c
+++ b/drivers/staging/wilc1000/mon.c
@@ -40,7 +40,7 @@ void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size)
* The packet offset field contain info about what type of management
* the frame we are dealing with and ack status
*/
- pkt_offset = GET_PKT_OFFSET(header);
+ pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
/* hostapd callback mgmt frame */
diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/staging/wilc1000/netdev.c
index fce5bf2d82fa..f94a17babd12 100644
--- a/drivers/staging/wilc1000/netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -46,29 +46,21 @@ static irqreturn_t isr_bh_routine(int irq, void *userdata)
static int init_irq(struct net_device *dev)
{
- int ret = 0;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wl = vif->wilc;
-
- ret = gpiod_direction_input(wl->gpio_irq);
- if (ret) {
- netdev_err(dev, "could not obtain gpio for WILC_INTR\n");
- return ret;
- }
-
- wl->dev_irq_num = gpiod_to_irq(wl->gpio_irq);
+ int ret;
ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine,
isr_bh_routine,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"WILC_IRQ", dev);
- if (ret < 0)
- netdev_err(dev, "Failed to request IRQ\n");
- else
- netdev_dbg(dev, "IRQ request succeeded IRQ-NUM= %d\n",
- wl->dev_irq_num);
+ if (ret) {
+ netdev_err(dev, "Failed to request IRQ [%d]\n", ret);
+ return ret;
+ }
+ netdev_dbg(dev, "IRQ request succeeded IRQ-NUM= %d\n", wl->dev_irq_num);
- return ret;
+ return 0;
}
static void deinit_irq(struct net_device *dev)
@@ -501,7 +493,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
if (ret)
goto fail_wilc_wlan;
- if (wl->gpio_irq && init_irq(dev)) {
+ if (wl->dev_irq_num && init_irq(dev)) {
ret = -EIO;
goto fail_threads;
}
@@ -577,7 +569,6 @@ static int wilc_mac_open(struct net_device *ndev)
{
struct wilc_vif *vif = netdev_priv(ndev);
struct wilc *wl = vif->wilc;
- struct wilc_priv *priv = wdev_priv(vif->ndev->ieee80211_ptr);
unsigned char mac_add[ETH_ALEN] = {0};
int ret = 0;
@@ -621,7 +612,6 @@ static int wilc_mac_open(struct net_device *ndev)
vif->frame_reg[1].reg);
netif_wake_queue(ndev);
wl->open_ifcs++;
- priv->p2p.local_random = 0x01;
vif->mac_opened = 1;
return 0;
}
@@ -804,8 +794,10 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
u16 type = le16_to_cpup((__le16 *)buff);
if (vif->priv.p2p_listen_state &&
- ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
- (type == vif->frame_reg[1].type && vif->frame_reg[1].reg)))
+ ((type == vif->frame_reg[0].type &&
+ vif->frame_reg[0].reg) ||
+ (type == vif->frame_reg[1].type &&
+ vif->frame_reg[1].reg)))
wilc_wfi_p2p_rx(vif, buff, size);
if (vif->monitor_flag)
diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/staging/wilc1000/netdev.h
index d5f7a6037fbc..61cbec674a62 100644
--- a/drivers/staging/wilc1000/netdev.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -29,8 +29,6 @@
#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
#define DEFAULT_LINK_SPEED 72
-#define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
-
struct wilc_wfi_stats {
unsigned long rx_packets;
unsigned long tx_packets;
@@ -66,12 +64,6 @@ struct wilc_wfi_p2p_listen_params {
u64 listen_cookie;
};
-struct wilc_p2p_var {
- u8 local_random;
- u8 recv_random;
- bool is_wilc_ie;
-};
-
static const u32 wilc_cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -155,7 +147,6 @@ struct wilc_priv {
struct mutex scan_req_lock;
bool p2p_listen_state;
int scanned_cnt;
- struct wilc_p2p_var p2p;
u64 inc_roc_cookie;
};
@@ -218,7 +209,6 @@ struct wilc {
const struct wilc_hif_func *hif_func;
int io_type;
s8 mac_status;
- struct gpio_desc *gpio_irq;
struct clk *rtc_clk;
bool initialized;
int dev_irq_num;
diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/staging/wilc1000/sdio.c
index ca99335687c4..36eb589263bf 100644
--- a/drivers/staging/wilc1000/sdio.c
+++ b/drivers/staging/wilc1000/sdio.c
@@ -7,6 +7,8 @@
#include <linux/clk.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sdio.h>
+#include <linux/of_irq.h>
#include "netdev.h"
#include "cfg80211.h"
@@ -26,9 +28,6 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
struct wilc_sdio {
bool irq_gpio;
u32 block_size;
- int nint;
-/* Max num interrupts allowed in registers 0xf7, 0xf8 */
-#define MAX_NUN_INT_THRPT_ENH2 (5)
int has_thrpt_enh3;
};
@@ -124,35 +123,34 @@ static int wilc_sdio_probe(struct sdio_func *func,
{
struct wilc *wilc;
int ret;
- struct gpio_desc *gpio = NULL;
struct wilc_sdio *sdio_priv;
sdio_priv = kzalloc(sizeof(*sdio_priv), GFP_KERNEL);
if (!sdio_priv)
return -ENOMEM;
- if (IS_ENABLED(CONFIG_WILC1000_HW_OOB_INTR)) {
- gpio = gpiod_get(&func->dev, "irq", GPIOD_IN);
- if (IS_ERR(gpio)) {
- /* get the GPIO descriptor from hardcode GPIO number */
- gpio = gpio_to_desc(GPIO_NUM);
- if (!gpio)
- dev_err(&func->dev, "failed to get irq gpio\n");
- }
- }
-
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
&wilc_hif_sdio);
if (ret) {
kfree(sdio_priv);
return ret;
}
+
+ if (IS_ENABLED(CONFIG_WILC1000_HW_OOB_INTR)) {
+ struct device_node *np = func->card->dev.of_node;
+ int irq_num = of_irq_get(np, 0);
+
+ if (irq_num > 0) {
+ wilc->dev_irq_num = irq_num;
+ sdio_priv->irq_gpio = true;
+ }
+ }
+
sdio_set_drvdata(func, wilc);
wilc->bus_data = sdio_priv;
wilc->dev = &func->dev;
- wilc->gpio_irq = gpio;
- wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc_clk");
+ wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc");
if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
else if (!IS_ERR(wilc->rtc_clk))
@@ -166,10 +164,6 @@ static void wilc_sdio_remove(struct sdio_func *func)
{
struct wilc *wilc = sdio_get_drvdata(func);
- /* free the GPIO in module remove */
- if (wilc->gpio_irq)
- gpiod_put(wilc->gpio_irq);
-
if (!IS_ERR(wilc->rtc_clk))
clk_disable_unprepare(wilc->rtc_clk);
@@ -185,8 +179,8 @@ static int wilc_sdio_reset(struct wilc *wilc)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
- cmd.address = 0x6;
- cmd.data = 0x8;
+ cmd.address = SDIO_CCCR_ABORT;
+ cmd.data = WILC_SDIO_CCCR_ABORT_RESET;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, reset cmd ...\n");
@@ -268,34 +262,38 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
- cmd.address = 0x10c;
+ cmd.address = WILC_SDIO_FBR_CSA_REG;
cmd.data = (u8)adr;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
- dev_err(&func->dev, "Failed cmd52, set 0x10c data...\n");
+ dev_err(&func->dev, "Failed cmd52, set %04x data...\n",
+ cmd.address);
return ret;
}
- cmd.address = 0x10d;
+ cmd.address = WILC_SDIO_FBR_CSA_REG + 1;
cmd.data = (u8)(adr >> 8);
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
- dev_err(&func->dev, "Failed cmd52, set 0x10d data...\n");
+ dev_err(&func->dev, "Failed cmd52, set %04x data...\n",
+ cmd.address);
return ret;
}
- cmd.address = 0x10e;
+ cmd.address = WILC_SDIO_FBR_CSA_REG + 2;
cmd.data = (u8)(adr >> 16);
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
- dev_err(&func->dev, "Failed cmd52, set 0x10e data...\n");
+ dev_err(&func->dev, "Failed cmd52, set %04x data...\n",
+ cmd.address);
return ret;
}
return 0;
}
-static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
+static int wilc_sdio_set_block_size(struct wilc *wilc, u8 func_num,
+ u32 block_size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct sdio_cmd52 cmd;
@@ -304,52 +302,21 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
- cmd.address = 0x10;
+ cmd.address = SDIO_FBR_BASE(func_num) + SDIO_CCCR_BLKSIZE;
cmd.data = (u8)block_size;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
- dev_err(&func->dev, "Failed cmd52, set 0x10 data...\n");
+ dev_err(&func->dev, "Failed cmd52, set %04x data...\n",
+ cmd.address);
return ret;
}
- cmd.address = 0x11;
- cmd.data = (u8)(block_size >> 8);
- ret = wilc_sdio_cmd52(wilc, &cmd);
- if (ret) {
- dev_err(&func->dev, "Failed cmd52, set 0x11 data...\n");
- return ret;
- }
-
- return 0;
-}
-
-/********************************************
- *
- * Function 1
- *
- ********************************************/
-
-static int wilc_sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
-{
- struct sdio_func *func = dev_to_sdio_func(wilc->dev);
- struct sdio_cmd52 cmd;
- int ret;
-
- cmd.read_write = 1;
- cmd.function = 0;
- cmd.raw = 0;
- cmd.address = 0x110;
- cmd.data = (u8)block_size;
- ret = wilc_sdio_cmd52(wilc, &cmd);
- if (ret) {
- dev_err(&func->dev, "Failed cmd52, set 0x110 data...\n");
- return ret;
- }
- cmd.address = 0x111;
+ cmd.address = SDIO_FBR_BASE(func_num) + SDIO_CCCR_BLKSIZE + 1;
cmd.data = (u8)(block_size >> 8);
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
- dev_err(&func->dev, "Failed cmd52, set 0x111 data...\n");
+ dev_err(&func->dev, "Failed cmd52, set %04x data...\n",
+ cmd.address);
return ret;
}
@@ -369,7 +336,7 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cpu_to_le32s(&data);
- if (addr >= 0xf0 && addr <= 0xff) {
+ if (addr >= 0xf0 && addr <= 0xff) { /* only vendor specific registers */
struct sdio_cmd52 cmd;
cmd.read_write = 1;
@@ -393,7 +360,7 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.read_write = 1;
cmd.function = 0;
- cmd.address = 0x10f;
+ cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
cmd.count = 4;
@@ -419,34 +386,19 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.read_write = 1;
if (addr > 0) {
/**
- * has to be word aligned...
- **/
- if (size & 0x3) {
- size += 4;
- size &= ~0x3;
- }
-
- /**
* func 0 access
**/
cmd.function = 0;
- cmd.address = 0x10f;
+ cmd.address = WILC_SDIO_FBR_DATA_REG;
} else {
/**
- * has to be word aligned...
- **/
- if (size & 0x3) {
- size += 4;
- size &= ~0x3;
- }
-
- /**
* func 1 access
**/
cmd.function = 1;
- cmd.address = 0;
+ cmd.address = WILC_SDIO_F1_DATA_REG;
}
+ size = ALIGN(size, 4);
nblk = size / block_size;
nleft = size % block_size;
@@ -502,7 +454,7 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
struct wilc_sdio *sdio_priv = wilc->bus_data;
int ret;
- if (addr >= 0xf0 && addr <= 0xff) {
+ if (addr >= 0xf0 && addr <= 0xff) { /* only vendor specific registers */
struct sdio_cmd52 cmd;
cmd.read_write = 0;
@@ -525,7 +477,7 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
cmd.read_write = 0;
cmd.function = 0;
- cmd.address = 0x10f;
+ cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
cmd.count = 4;
@@ -555,34 +507,19 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.read_write = 0;
if (addr > 0) {
/**
- * has to be word aligned...
- **/
- if (size & 0x3) {
- size += 4;
- size &= ~0x3;
- }
-
- /**
* func 0 access
**/
cmd.function = 0;
- cmd.address = 0x10f;
+ cmd.address = WILC_SDIO_FBR_DATA_REG;
} else {
/**
- * has to be word aligned...
- **/
- if (size & 0x3) {
- size += 4;
- size &= ~0x3;
- }
-
- /**
* func 1 access
**/
cmd.function = 1;
- cmd.address = 0;
+ cmd.address = WILC_SDIO_F1_DATA_REG;
}
+ size = ALIGN(size, 4);
nblk = size / block_size;
nleft = size % block_size;
@@ -651,17 +588,14 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
int loop, ret;
u32 chipid;
- if (!resume)
- sdio_priv->irq_gpio = wilc->dev_irq_num;
-
/**
* function 0 csa enable
**/
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 1;
- cmd.address = 0x100;
- cmd.data = 0x80;
+ cmd.address = SDIO_FBR_BASE(func->num);
+ cmd.data = SDIO_FBR_ENABLE_CSA;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, enable csa...\n");
@@ -671,7 +605,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
/**
* function 0 block size
**/
- ret = wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+ ret = wilc_sdio_set_block_size(wilc, 0, WILC_SDIO_BLOCK_SIZE);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n");
return ret;
@@ -684,8 +618,8 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 1;
- cmd.address = 0x2;
- cmd.data = 0x2;
+ cmd.address = SDIO_CCCR_IOEx;
+ cmd.data = WILC_SDIO_CCCR_IO_EN_FUNC1;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
@@ -699,7 +633,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
cmd.read_write = 0;
cmd.function = 0;
cmd.raw = 0;
- cmd.address = 0x3;
+ cmd.address = SDIO_CCCR_IORx;
loop = 3;
do {
cmd.data = 0;
@@ -709,7 +643,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
"Fail cmd 52, get IOR register...\n");
return ret;
}
- if (cmd.data == 0x2)
+ if (cmd.data == WILC_SDIO_CCCR_IO_EN_FUNC1)
break;
} while (loop--);
@@ -721,7 +655,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
/**
* func 1 is ready, set func 1 block size
**/
- ret = wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+ ret = wilc_sdio_set_block_size(wilc, 1, WILC_SDIO_BLOCK_SIZE);
if (ret) {
dev_err(&func->dev, "Fail set func 1 block size...\n");
return ret;
@@ -733,8 +667,8 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 1;
- cmd.address = 0x4;
- cmd.data = 0x3;
+ cmd.address = SDIO_CCCR_IENx;
+ cmd.data = WILC_SDIO_CCCR_IEN_MASTER | WILC_SDIO_CCCR_IEN_FUNC1;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, set IEN register...\n");
@@ -745,13 +679,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
* make sure can read back chip id correctly
**/
if (!resume) {
- ret = wilc_sdio_read_reg(wilc, 0x1000, &chipid);
+ int rev;
+
+ ret = wilc_sdio_read_reg(wilc, WILC_CHIPID, &chipid);
if (ret) {
dev_err(&func->dev, "Fail cmd read chip id...\n");
return ret;
}
dev_err(&func->dev, "chipid (%08x)\n", chipid);
- if ((chipid & 0xfff) > 0x2a0)
+ rev = FIELD_GET(WILC_CHIP_REV_FIELD, chipid);
+ if (rev > FIELD_GET(WILC_CHIP_REV_FIELD, WILC_1000_BASE_ID_2A))
sdio_priv->has_thrpt_enh3 = 1;
else
sdio_priv->has_thrpt_enh3 = 0;
@@ -773,12 +710,12 @@ static int wilc_sdio_read_size(struct wilc *wilc, u32 *size)
cmd.read_write = 0;
cmd.function = 0;
cmd.raw = 0;
- cmd.address = 0xf2;
+ cmd.address = WILC_SDIO_INTERRUPT_DATA_SZ_REG;
cmd.data = 0;
wilc_sdio_cmd52(wilc, &cmd);
tmp = cmd.data;
- cmd.address = 0xf3;
+ cmd.address = WILC_SDIO_INTERRUPT_DATA_SZ_REG + 1;
cmd.data = 0;
wilc_sdio_cmd52(wilc, &cmd);
tmp |= (cmd.data << 8);
@@ -792,6 +729,7 @@ static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
u32 tmp;
+ u8 irq_flags;
struct sdio_cmd52 cmd;
wilc_sdio_read_size(wilc, &tmp);
@@ -800,46 +738,22 @@ static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
* Read IRQ flags
**/
if (!sdio_priv->irq_gpio) {
- int i;
-
- cmd.read_write = 0;
cmd.function = 1;
- cmd.address = 0x04;
- cmd.data = 0;
- wilc_sdio_cmd52(wilc, &cmd);
-
- if (cmd.data & BIT(0))
- tmp |= INT_0;
- if (cmd.data & BIT(2))
- tmp |= INT_1;
- if (cmd.data & BIT(3))
- tmp |= INT_2;
- if (cmd.data & BIT(4))
- tmp |= INT_3;
- if (cmd.data & BIT(5))
- tmp |= INT_4;
- if (cmd.data & BIT(6))
- tmp |= INT_5;
- for (i = sdio_priv->nint; i < MAX_NUM_INT; i++) {
- if ((tmp >> (IRG_FLAGS_OFFSET + i)) & 0x1) {
- dev_err(&func->dev,
- "Unexpected interrupt (1) : tmp=%x, data=%x\n",
- tmp, cmd.data);
- break;
- }
- }
+ cmd.address = WILC_SDIO_EXT_IRQ_FLAG_REG;
} else {
- u32 irq_flags;
-
- cmd.read_write = 0;
cmd.function = 0;
- cmd.raw = 0;
- cmd.address = 0xf7;
- cmd.data = 0;
- wilc_sdio_cmd52(wilc, &cmd);
- irq_flags = cmd.data & 0x1f;
- tmp |= ((irq_flags >> 0) << IRG_FLAGS_OFFSET);
+ cmd.address = WILC_SDIO_IRQ_FLAG_REG;
}
+ cmd.raw = 0;
+ cmd.read_write = 0;
+ cmd.data = 0;
+ wilc_sdio_cmd52(wilc, &cmd);
+ irq_flags = cmd.data;
+ tmp |= FIELD_PREP(IRG_FLAGS_MASK, cmd.data);
+
+ if (FIELD_GET(UNHANDLED_IRQ_MASK, irq_flags))
+ dev_err(&func->dev, "Unexpected interrupt (1) int=%lx\n",
+ FIELD_GET(UNHANDLED_IRQ_MASK, irq_flags));
*int_status = tmp;
@@ -854,16 +768,11 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
int vmm_ctl;
if (sdio_priv->has_thrpt_enh3) {
- u32 reg;
+ u32 reg = 0;
- if (sdio_priv->irq_gpio) {
- u32 flags;
+ if (sdio_priv->irq_gpio)
+ reg = val & (BIT(MAX_NUM_INT) - 1);
- flags = val & (BIT(MAX_NUN_INT_THRPT_ENH2) - 1);
- reg = flags;
- } else {
- reg = 0;
- }
/* select VMM table 0 */
if (val & SEL_VMM_TBL0)
reg |= BIT(5);
@@ -879,14 +788,14 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
- cmd.address = 0xf8;
+ cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG;
cmd.data = reg;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
- "Failed cmd52, set 0xf8 data (%d) ...\n",
- __LINE__);
+ "Failed cmd52, set (%02x) data (%d) ...\n",
+ cmd.address, __LINE__);
return ret;
}
}
@@ -899,38 +808,36 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
* Must clear each interrupt individually.
*/
u32 flags;
+ int i;
flags = val & (BIT(MAX_NUM_INT) - 1);
- if (flags) {
- int i;
-
- for (i = 0; i < sdio_priv->nint; i++) {
- if (flags & 1) {
- struct sdio_cmd52 cmd;
-
- cmd.read_write = 1;
- cmd.function = 0;
- cmd.raw = 0;
- cmd.address = 0xf8;
- cmd.data = BIT(i);
-
- ret = wilc_sdio_cmd52(wilc, &cmd);
- if (ret) {
- dev_err(&func->dev,
- "Failed cmd52, set 0xf8 data (%d) ...\n",
- __LINE__);
- return ret;
- }
+ for (i = 0; i < NUM_INT_EXT && flags; i++) {
+ if (flags & BIT(i)) {
+ struct sdio_cmd52 cmd;
+
+ cmd.read_write = 1;
+ cmd.function = 0;
+ cmd.raw = 0;
+ cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG;
+ cmd.data = BIT(i);
+
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev,
+ "Failed cmd52, set (%02x) data (%d) ...\n",
+ cmd.address, __LINE__);
+ return ret;
}
- flags >>= 1;
+ flags &= ~BIT(i);
}
+ }
- for (i = sdio_priv->nint; i < MAX_NUM_INT; i++) {
- if (flags & 1)
- dev_err(&func->dev,
- "Unexpected interrupt cleared %d...\n",
- i);
- flags >>= 1;
+ for (i = NUM_INT_EXT; i < MAX_NUM_INT && flags; i++) {
+ if (flags & BIT(i)) {
+ dev_err(&func->dev,
+ "Unexpected interrupt cleared %d...\n",
+ i);
+ flags &= ~BIT(i);
}
}
}
@@ -952,13 +859,13 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 0;
- cmd.address = 0xf6;
+ cmd.address = WILC_SDIO_VMM_TBL_CTRL_REG;
cmd.data = vmm_ctl;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
- "Failed cmd52, set 0xf6 data (%d) ...\n",
- __LINE__);
+ "Failed cmd52, set (%02x) data (%d) ...\n",
+ cmd.address, __LINE__);
return ret;
}
}
@@ -975,13 +882,6 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
return -EINVAL;
}
- if (nint > MAX_NUN_INT_THRPT_ENH2) {
- dev_err(&func->dev,
- "Cannot support more than 5 interrupts when has_thrpt_enh2=1.\n");
- return -EINVAL;
- }
-
- sdio_priv->nint = nint;
/**
* Disable power sequencer
@@ -1097,7 +997,7 @@ static int wilc_sdio_resume(struct device *dev)
}
static const struct of_device_id wilc_of_match[] = {
- { .compatible = "microchip,wilc1000-sdio", },
+ { .compatible = "microchip,wilc1000", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, wilc_of_match);
diff --git a/drivers/staging/wilc1000/spi.c b/drivers/staging/wilc1000/spi.c
index 3ffc7b4fddf6..3f19e3f38a39 100644
--- a/drivers/staging/wilc1000/spi.c
+++ b/drivers/staging/wilc1000/spi.c
@@ -6,72 +6,19 @@
#include <linux/clk.h>
#include <linux/spi/spi.h>
+#include <linux/crc7.h>
#include "netdev.h"
#include "cfg80211.h"
struct wilc_spi {
int crc_off;
- int nint;
};
static const struct wilc_hif_func wilc_hif_spi;
/********************************************
*
- * Crc7
- *
- ********************************************/
-
-static const u8 crc7_syndrome_table[256] = {
- 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
- 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
- 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26,
- 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e,
- 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d,
- 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45,
- 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14,
- 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c,
- 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b,
- 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13,
- 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42,
- 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a,
- 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69,
- 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21,
- 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70,
- 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38,
- 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e,
- 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36,
- 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67,
- 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f,
- 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
- 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04,
- 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55,
- 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d,
- 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a,
- 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52,
- 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03,
- 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b,
- 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28,
- 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60,
- 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31,
- 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79
-};
-
-static u8 crc7_byte(u8 crc, u8 data)
-{
- return crc7_syndrome_table[(crc << 1) ^ data];
-}
-
-static u8 crc7(u8 crc, const u8 *buffer, u32 len)
-{
- while (len--)
- crc = crc7_byte(crc, *buffer++);
- return crc;
-}
-
-/********************************************
- *
* Spi protocol Function
*
********************************************/
@@ -97,25 +44,62 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
#define USE_SPI_DMA 0
+#define WILC_SPI_COMMAND_STAT_SUCCESS 0
+#define WILC_GET_RESP_HDR_START(h) (((h) >> 4) & 0xf)
+
+struct wilc_spi_cmd {
+ u8 cmd_type;
+ union {
+ struct {
+ u8 addr[3];
+ u8 crc[];
+ } __packed simple_cmd;
+ struct {
+ u8 addr[3];
+ u8 size[2];
+ u8 crc[];
+ } __packed dma_cmd;
+ struct {
+ u8 addr[3];
+ u8 size[3];
+ u8 crc[];
+ } __packed dma_cmd_ext;
+ struct {
+ u8 addr[2];
+ __be32 data;
+ u8 crc[];
+ } __packed internal_w_cmd;
+ struct {
+ u8 addr[3];
+ __be32 data;
+ u8 crc[];
+ } __packed w_cmd;
+ } u;
+} __packed;
+
+struct wilc_spi_read_rsp_data {
+ u8 rsp_cmd_type;
+ u8 status;
+ u8 resp_header;
+ u8 resp_data[4];
+ u8 crc[];
+} __packed;
+
+struct wilc_spi_rsp_data {
+ u8 rsp_cmd_type;
+ u8 status;
+} __packed;
+
static int wilc_bus_probe(struct spi_device *spi)
{
int ret;
struct wilc *wilc;
- struct gpio_desc *gpio;
struct wilc_spi *spi_priv;
spi_priv = kzalloc(sizeof(*spi_priv), GFP_KERNEL);
if (!spi_priv)
return -ENOMEM;
- gpio = gpiod_get(&spi->dev, "irq", GPIOD_IN);
- if (IS_ERR(gpio)) {
- /* get the GPIO descriptor from hardcode GPIO number */
- gpio = gpio_to_desc(GPIO_NUM);
- if (!gpio)
- dev_err(&spi->dev, "failed to get the irq gpio\n");
- }
-
ret = wilc_cfg80211_init(&wilc, &spi->dev, WILC_HIF_SPI, &wilc_hif_spi);
if (ret) {
kfree(spi_priv);
@@ -125,7 +109,7 @@ static int wilc_bus_probe(struct spi_device *spi)
spi_set_drvdata(spi, wilc);
wilc->dev = &spi->dev;
wilc->bus_data = spi_priv;
- wilc->gpio_irq = gpio;
+ wilc->dev_irq_num = spi->irq;
wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
@@ -140,10 +124,6 @@ static int wilc_bus_remove(struct spi_device *spi)
{
struct wilc *wilc = spi_get_drvdata(spi);
- /* free the GPIO in module remove */
- if (wilc->gpio_irq)
- gpiod_put(wilc->gpio_irq);
-
if (!IS_ERR(wilc->rtc_clk))
clk_disable_unprepare(wilc->rtc_clk);
@@ -152,7 +132,7 @@ static int wilc_bus_remove(struct spi_device *spi)
}
static const struct of_device_id wilc_of_match[] = {
- { .compatible = "microchip,wilc1000-spi", },
+ { .compatible = "microchip,wilc1000", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, wilc_of_match);
@@ -178,7 +158,10 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
struct spi_transfer tr = {
.tx_buf = b,
.len = len,
- .delay_usecs = 0,
+ .delay = {
+ .value = 0,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
};
char *r_buffer = kzalloc(len, GFP_KERNEL);
@@ -219,7 +202,10 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
struct spi_transfer tr = {
.rx_buf = rb,
.len = rlen,
- .delay_usecs = 0,
+ .delay = {
+ .value = 0,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
};
char *t_buffer = kzalloc(rlen, GFP_KERNEL);
@@ -261,7 +247,10 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
.tx_buf = wb,
.len = rlen,
.bits_per_word = 8,
- .delay_usecs = 0,
+ .delay = {
+ .value = 0,
+ .unit = SPI_DELAY_UNIT_USECS
+ },
};
@@ -284,335 +273,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
return ret;
}
-static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
- u8 clockless)
-{
- struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
- u8 wb[32], rb[32];
- u8 wix, rix;
- u32 len2;
- u8 rsp;
- int len = 0;
- int result = 0;
- int retry;
- u8 crc[2];
-
- wb[0] = cmd;
- switch (cmd) {
- case CMD_SINGLE_READ: /* single word (4 bytes) read */
- wb[1] = (u8)(adr >> 16);
- wb[2] = (u8)(adr >> 8);
- wb[3] = (u8)adr;
- len = 5;
- break;
-
- case CMD_INTERNAL_READ: /* internal register read */
- wb[1] = (u8)(adr >> 8);
- if (clockless == 1)
- wb[1] |= BIT(7);
- wb[2] = (u8)adr;
- wb[3] = 0x00;
- len = 5;
- break;
-
- case CMD_TERMINATE:
- wb[1] = 0x00;
- wb[2] = 0x00;
- wb[3] = 0x00;
- len = 5;
- break;
-
- case CMD_REPEAT:
- wb[1] = 0x00;
- wb[2] = 0x00;
- wb[3] = 0x00;
- len = 5;
- break;
-
- case CMD_RESET:
- wb[1] = 0xff;
- wb[2] = 0xff;
- wb[3] = 0xff;
- len = 5;
- break;
-
- case CMD_DMA_WRITE: /* dma write */
- case CMD_DMA_READ: /* dma read */
- wb[1] = (u8)(adr >> 16);
- wb[2] = (u8)(adr >> 8);
- wb[3] = (u8)adr;
- wb[4] = (u8)(sz >> 8);
- wb[5] = (u8)(sz);
- len = 7;
- break;
-
- case CMD_DMA_EXT_WRITE: /* dma extended write */
- case CMD_DMA_EXT_READ: /* dma extended read */
- wb[1] = (u8)(adr >> 16);
- wb[2] = (u8)(adr >> 8);
- wb[3] = (u8)adr;
- wb[4] = (u8)(sz >> 16);
- wb[5] = (u8)(sz >> 8);
- wb[6] = (u8)(sz);
- len = 8;
- break;
-
- case CMD_INTERNAL_WRITE: /* internal register write */
- wb[1] = (u8)(adr >> 8);
- if (clockless == 1)
- wb[1] |= BIT(7);
- wb[2] = (u8)(adr);
- wb[3] = b[3];
- wb[4] = b[2];
- wb[5] = b[1];
- wb[6] = b[0];
- len = 8;
- break;
-
- case CMD_SINGLE_WRITE: /* single word write */
- wb[1] = (u8)(adr >> 16);
- wb[2] = (u8)(adr >> 8);
- wb[3] = (u8)(adr);
- wb[4] = b[3];
- wb[5] = b[2];
- wb[6] = b[1];
- wb[7] = b[0];
- len = 9;
- break;
-
- default:
- result = -EINVAL;
- break;
- }
-
- if (result)
- return result;
-
- if (!spi_priv->crc_off)
- wb[len - 1] = (crc7(0x7f, (const u8 *)&wb[0], len - 1)) << 1;
- else
- len -= 1;
-
-#define NUM_SKIP_BYTES (1)
-#define NUM_RSP_BYTES (2)
-#define NUM_DATA_HDR_BYTES (1)
-#define NUM_DATA_BYTES (4)
-#define NUM_CRC_BYTES (2)
-#define NUM_DUMMY_BYTES (3)
- if (cmd == CMD_RESET ||
- cmd == CMD_TERMINATE ||
- cmd == CMD_REPEAT) {
- len2 = len + (NUM_SKIP_BYTES + NUM_RSP_BYTES + NUM_DUMMY_BYTES);
- } else if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ) {
- int tmp = NUM_RSP_BYTES + NUM_DATA_HDR_BYTES + NUM_DATA_BYTES
- + NUM_DUMMY_BYTES;
- if (!spi_priv->crc_off)
- len2 = len + tmp + NUM_CRC_BYTES;
- else
- len2 = len + tmp;
- } else {
- len2 = len + (NUM_RSP_BYTES + NUM_DUMMY_BYTES);
- }
-#undef NUM_DUMMY_BYTES
-
- if (len2 > ARRAY_SIZE(wb)) {
- dev_err(&spi->dev, "spi buffer size too small (%d) (%zu)\n",
- len2, ARRAY_SIZE(wb));
- return -EINVAL;
- }
- /* zero spi write buffers. */
- for (wix = len; wix < len2; wix++)
- wb[wix] = 0;
- rix = len;
-
- if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
- dev_err(&spi->dev, "Failed cmd write, bus error...\n");
- return -EINVAL;
- }
-
- /*
- * Command/Control response
- */
- if (cmd == CMD_RESET || cmd == CMD_TERMINATE || cmd == CMD_REPEAT)
- rix++; /* skip 1 byte */
-
- rsp = rb[rix++];
-
- if (rsp != cmd) {
- dev_err(&spi->dev,
- "Failed cmd response, cmd (%02x), resp (%02x)\n",
- cmd, rsp);
- return -EINVAL;
- }
-
- /*
- * State response
- */
- rsp = rb[rix++];
- if (rsp != 0x00) {
- dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
- rsp);
- return -EINVAL;
- }
-
- if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ ||
- cmd == CMD_DMA_READ || cmd == CMD_DMA_EXT_READ) {
- /*
- * Data Respnose header
- */
- retry = 100;
- do {
- /*
- * ensure there is room in buffer later
- * to read data and crc
- */
- if (rix < len2) {
- rsp = rb[rix++];
- } else {
- retry = 0;
- break;
- }
- if (((rsp >> 4) & 0xf) == 0xf)
- break;
- } while (retry--);
-
- if (retry <= 0) {
- dev_err(&spi->dev,
- "Error, data read response (%02x)\n", rsp);
- return -EAGAIN;
- }
- }
-
- if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ) {
- /*
- * Read bytes
- */
- if ((rix + 3) < len2) {
- b[0] = rb[rix++];
- b[1] = rb[rix++];
- b[2] = rb[rix++];
- b[3] = rb[rix++];
- } else {
- dev_err(&spi->dev,
- "buffer overrun when reading data.\n");
- return -EINVAL;
- }
-
- if (!spi_priv->crc_off) {
- /*
- * Read Crc
- */
- if ((rix + 1) < len2) {
- crc[0] = rb[rix++];
- crc[1] = rb[rix++];
- } else {
- dev_err(&spi->dev,
- "buffer overrun when reading crc.\n");
- return -EINVAL;
- }
- }
- } else if ((cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
- int ix;
-
- /* some data may be read in response to dummy bytes. */
- for (ix = 0; (rix < len2) && (ix < sz); )
- b[ix++] = rb[rix++];
-
- sz -= ix;
-
- if (sz > 0) {
- int nbytes;
-
- if (sz <= (DATA_PKT_SZ - ix))
- nbytes = sz;
- else
- nbytes = DATA_PKT_SZ - ix;
-
- /*
- * Read bytes
- */
- if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
- dev_err(&spi->dev,
- "Failed block read, bus err\n");
- return -EINVAL;
- }
-
- /*
- * Read Crc
- */
- if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
- dev_err(&spi->dev,
- "Failed block crc read, bus err\n");
- return -EINVAL;
- }
-
- ix += nbytes;
- sz -= nbytes;
- }
-
- /*
- * if any data in left unread,
- * then read the rest using normal DMA code.
- */
- while (sz > 0) {
- int nbytes;
-
- if (sz <= DATA_PKT_SZ)
- nbytes = sz;
- else
- nbytes = DATA_PKT_SZ;
-
- /*
- * read data response only on the next DMA cycles not
- * the first DMA since data response header is already
- * handled above for the first DMA.
- */
- /*
- * Data Respnose header
- */
- retry = 10;
- do {
- if (wilc_spi_rx(wilc, &rsp, 1)) {
- dev_err(&spi->dev,
- "Failed resp read, bus err\n");
- result = -EINVAL;
- break;
- }
- if (((rsp >> 4) & 0xf) == 0xf)
- break;
- } while (retry--);
-
- if (result)
- break;
-
- /*
- * Read bytes
- */
- if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
- dev_err(&spi->dev,
- "Failed block read, bus err\n");
- result = -EINVAL;
- break;
- }
-
- /*
- * Read Crc
- */
- if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
- dev_err(&spi->dev,
- "Failed block crc read, bus err\n");
- result = -EINVAL;
- break;
- }
-
- ix += nbytes;
- sz -= nbytes;
- }
- }
- return result;
-}
-
static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
{
struct spi_device *spi = to_spi_device(wilc->dev);
@@ -686,19 +346,333 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
* Spi Internal Read/Write Function
*
********************************************/
+static u8 wilc_get_crc7(u8 *buffer, u32 len)
+{
+ return crc7_be(0xfe, buffer, len);
+}
+
+static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
+ u8 clockless)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u8 wb[32], rb[32];
+ int cmd_len, resp_len;
+ u8 crc[2];
+ struct wilc_spi_cmd *c;
+ struct wilc_spi_read_rsp_data *r;
+
+ memset(wb, 0x0, sizeof(wb));
+ memset(rb, 0x0, sizeof(rb));
+ c = (struct wilc_spi_cmd *)wb;
+ c->cmd_type = cmd;
+ if (cmd == CMD_SINGLE_READ) {
+ c->u.simple_cmd.addr[0] = adr >> 16;
+ c->u.simple_cmd.addr[1] = adr >> 8;
+ c->u.simple_cmd.addr[2] = adr;
+ } else if (cmd == CMD_INTERNAL_READ) {
+ c->u.simple_cmd.addr[0] = adr >> 8;
+ if (clockless == 1)
+ c->u.simple_cmd.addr[0] |= BIT(7);
+ c->u.simple_cmd.addr[1] = adr;
+ c->u.simple_cmd.addr[2] = 0x0;
+ } else {
+ dev_err(&spi->dev, "cmd [%x] not supported\n", cmd);
+ return -EINVAL;
+ }
+
+ cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc);
+ resp_len = sizeof(*r);
+ if (!spi_priv->crc_off) {
+ c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
+ cmd_len += 1;
+ resp_len += 2;
+ }
+
+ if (cmd_len + resp_len > ARRAY_SIZE(wb)) {
+ dev_err(&spi->dev,
+ "spi buffer size too small (%d) (%d) (%zu)\n",
+ cmd_len, resp_len, ARRAY_SIZE(wb));
+ return -EINVAL;
+ }
+
+ if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
+ dev_err(&spi->dev, "Failed cmd write, bus error...\n");
+ return -EINVAL;
+ }
+
+ r = (struct wilc_spi_read_rsp_data *)&rb[cmd_len];
+ if (r->rsp_cmd_type != cmd) {
+ dev_err(&spi->dev,
+ "Failed cmd response, cmd (%02x), resp (%02x)\n",
+ cmd, r->rsp_cmd_type);
+ return -EINVAL;
+ }
+
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
+ r->status);
+ return -EINVAL;
+ }
+
+ if (WILC_GET_RESP_HDR_START(r->resp_header) != 0xf) {
+ dev_err(&spi->dev, "Error, data read response (%02x)\n",
+ r->resp_header);
+ return -EINVAL;
+ }
+
+ if (b)
+ memcpy(b, r->resp_data, 4);
+
+ if (!spi_priv->crc_off)
+ memcpy(crc, r->crc, 2);
+
+ return 0;
+}
+
+static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data,
+ u8 clockless)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u8 wb[32], rb[32];
+ int cmd_len, resp_len;
+ struct wilc_spi_cmd *c;
+ struct wilc_spi_rsp_data *r;
+
+ memset(wb, 0x0, sizeof(wb));
+ memset(rb, 0x0, sizeof(rb));
+ c = (struct wilc_spi_cmd *)wb;
+ c->cmd_type = cmd;
+ if (cmd == CMD_INTERNAL_WRITE) {
+ c->u.internal_w_cmd.addr[0] = adr >> 8;
+ if (clockless == 1)
+ c->u.internal_w_cmd.addr[0] |= BIT(7);
+
+ c->u.internal_w_cmd.addr[1] = adr;
+ c->u.internal_w_cmd.data = cpu_to_be32(data);
+ cmd_len = offsetof(struct wilc_spi_cmd, u.internal_w_cmd.crc);
+ if (!spi_priv->crc_off)
+ c->u.internal_w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
+ } else if (cmd == CMD_SINGLE_WRITE) {
+ c->u.w_cmd.addr[0] = adr >> 16;
+ c->u.w_cmd.addr[1] = adr >> 8;
+ c->u.w_cmd.addr[2] = adr;
+ c->u.w_cmd.data = cpu_to_be32(data);
+ cmd_len = offsetof(struct wilc_spi_cmd, u.w_cmd.crc);
+ if (!spi_priv->crc_off)
+ c->u.w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
+ } else {
+ dev_err(&spi->dev, "write cmd [%x] not supported\n", cmd);
+ return -EINVAL;
+ }
+
+ if (!spi_priv->crc_off)
+ cmd_len += 1;
+
+ resp_len = sizeof(*r);
+
+ if (cmd_len + resp_len > ARRAY_SIZE(wb)) {
+ dev_err(&spi->dev,
+ "spi buffer size too small (%d) (%d) (%zu)\n",
+ cmd_len, resp_len, ARRAY_SIZE(wb));
+ return -EINVAL;
+ }
+
+ if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
+ dev_err(&spi->dev, "Failed cmd write, bus error...\n");
+ return -EINVAL;
+ }
+
+ r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
+ if (r->rsp_cmd_type != cmd) {
+ dev_err(&spi->dev,
+ "Failed cmd response, cmd (%02x), resp (%02x)\n",
+ cmd, r->rsp_cmd_type);
+ return -EINVAL;
+ }
+
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
+ r->status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u8 wb[32], rb[32];
+ int cmd_len, resp_len;
+ int retry, ix = 0;
+ u8 crc[2];
+ struct wilc_spi_cmd *c;
+ struct wilc_spi_rsp_data *r;
+
+ memset(wb, 0x0, sizeof(wb));
+ memset(rb, 0x0, sizeof(rb));
+ c = (struct wilc_spi_cmd *)wb;
+ c->cmd_type = cmd;
+ if (cmd == CMD_DMA_WRITE || cmd == CMD_DMA_READ) {
+ c->u.dma_cmd.addr[0] = adr >> 16;
+ c->u.dma_cmd.addr[1] = adr >> 8;
+ c->u.dma_cmd.addr[2] = adr;
+ c->u.dma_cmd.size[0] = sz >> 8;
+ c->u.dma_cmd.size[1] = sz;
+ cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd.crc);
+ if (!spi_priv->crc_off)
+ c->u.dma_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
+ } else if (cmd == CMD_DMA_EXT_WRITE || cmd == CMD_DMA_EXT_READ) {
+ c->u.dma_cmd_ext.addr[0] = adr >> 16;
+ c->u.dma_cmd_ext.addr[1] = adr >> 8;
+ c->u.dma_cmd_ext.addr[2] = adr;
+ c->u.dma_cmd_ext.size[0] = sz >> 16;
+ c->u.dma_cmd_ext.size[1] = sz >> 8;
+ c->u.dma_cmd_ext.size[2] = sz;
+ cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd_ext.crc);
+ if (!spi_priv->crc_off)
+ c->u.dma_cmd_ext.crc[0] = wilc_get_crc7(wb, cmd_len);
+ } else {
+ dev_err(&spi->dev, "dma read write cmd [%x] not supported\n",
+ cmd);
+ return -EINVAL;
+ }
+ if (!spi_priv->crc_off)
+ cmd_len += 1;
+
+ resp_len = sizeof(*r);
+
+ if (cmd_len + resp_len > ARRAY_SIZE(wb)) {
+ dev_err(&spi->dev, "spi buffer size too small (%d)(%d) (%zu)\n",
+ cmd_len, resp_len, ARRAY_SIZE(wb));
+ return -EINVAL;
+ }
+
+ if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
+ dev_err(&spi->dev, "Failed cmd write, bus error...\n");
+ return -EINVAL;
+ }
+
+ r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
+ if (r->rsp_cmd_type != cmd) {
+ dev_err(&spi->dev,
+ "Failed cmd response, cmd (%02x), resp (%02x)\n",
+ cmd, r->rsp_cmd_type);
+ return -EINVAL;
+ }
+
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
+ r->status);
+ return -EINVAL;
+ }
+
+ if (cmd == CMD_DMA_WRITE || cmd == CMD_DMA_EXT_WRITE)
+ return 0;
+
+ while (sz > 0) {
+ int nbytes;
+ u8 rsp;
+
+ if (sz <= DATA_PKT_SZ)
+ nbytes = sz;
+ else
+ nbytes = DATA_PKT_SZ;
+
+ /*
+ * Data Response header
+ */
+ retry = 100;
+ do {
+ if (wilc_spi_rx(wilc, &rsp, 1)) {
+ dev_err(&spi->dev,
+ "Failed resp read, bus err\n");
+ return -EINVAL;
+ }
+ if (WILC_GET_RESP_HDR_START(rsp) == 0xf)
+ break;
+ } while (retry--);
+
+ /*
+ * Read bytes
+ */
+ if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
+ dev_err(&spi->dev,
+ "Failed block read, bus err\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Read Crc
+ */
+ if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
+ dev_err(&spi->dev,
+ "Failed block crc read, bus err\n");
+ return -EINVAL;
+ }
+
+ ix += nbytes;
+ sz -= nbytes;
+ }
+ return 0;
+}
+
+static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ int result;
+ u8 cmd = CMD_SINGLE_READ;
+ u8 clockless = 0;
+
+ if (addr < WILC_SPI_CLOCKLESS_ADDR_LIMIT) {
+ /* Clockless register */
+ cmd = CMD_INTERNAL_READ;
+ clockless = 1;
+ }
+
+ result = wilc_spi_single_read(wilc, cmd, addr, data, clockless);
+ if (result) {
+ dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr);
+ return result;
+ }
+
+ le32_to_cpus(data);
+
+ return 0;
+}
+
+static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ int result;
+
+ if (size <= 4)
+ return -EINVAL;
+
+ result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_READ, addr, buf, size);
+ if (result) {
+ dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr);
+ return result;
+ }
+
+ return 0;
+}
static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat)
{
struct spi_device *spi = to_spi_device(wilc->dev);
int result;
- cpu_to_le32s(&dat);
- result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4,
- 0);
- if (result)
+ result = wilc_spi_write_cmd(wilc, CMD_INTERNAL_WRITE, adr, dat, 0);
+ if (result) {
dev_err(&spi->dev, "Failed internal write cmd...\n");
+ return result;
+ }
- return result;
+ return 0;
}
static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
@@ -706,8 +680,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
struct spi_device *spi = to_spi_device(wilc->dev);
int result;
- result = spi_cmd_complete(wilc, CMD_INTERNAL_READ, adr, (u8 *)data, 4,
- 0);
+ result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, data, 0);
if (result) {
dev_err(&spi->dev, "Failed internal read cmd...\n");
return result;
@@ -715,7 +688,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
le32_to_cpus(data);
- return result;
+ return 0;
}
/********************************************
@@ -731,18 +704,19 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
u8 cmd = CMD_SINGLE_WRITE;
u8 clockless = 0;
- cpu_to_le32s(&data);
- if (addr < 0x30) {
+ if (addr < WILC_SPI_CLOCKLESS_ADDR_LIMIT) {
/* Clockless register */
cmd = CMD_INTERNAL_WRITE;
clockless = 1;
}
- result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless);
- if (result)
+ result = wilc_spi_write_cmd(wilc, cmd, addr, data, clockless);
+ if (result) {
dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr);
+ return result;
+ }
- return result;
+ return 0;
}
static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
@@ -756,7 +730,7 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
if (size <= 4)
return -EINVAL;
- result = spi_cmd_complete(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size, 0);
+ result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size);
if (result) {
dev_err(&spi->dev,
"Failed cmd, write block (%08x)...\n", addr);
@@ -767,51 +741,14 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
* Data
*/
result = spi_data_write(wilc, buf, size);
- if (result)
- dev_err(&spi->dev, "Failed block data write...\n");
-
- return result;
-}
-
-static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
-{
- struct spi_device *spi = to_spi_device(wilc->dev);
- int result;
- u8 cmd = CMD_SINGLE_READ;
- u8 clockless = 0;
-
- if (addr < 0x30) {
- /* Clockless register */
- cmd = CMD_INTERNAL_READ;
- clockless = 1;
- }
-
- result = spi_cmd_complete(wilc, cmd, addr, (u8 *)data, 4, clockless);
if (result) {
- dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr);
+ dev_err(&spi->dev, "Failed block data write...\n");
return result;
}
- le32_to_cpus(data);
-
return 0;
}
-static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
-{
- struct spi_device *spi = to_spi_device(wilc->dev);
- int result;
-
- if (size <= 4)
- return -EINVAL;
-
- result = spi_cmd_complete(wilc, CMD_DMA_EXT_READ, addr, buf, size, 0);
- if (result)
- dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr);
-
- return result;
-}
-
/********************************************
*
* Bus interfaces
@@ -836,7 +773,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
int ret;
if (isinit) {
- ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
if (ret)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
@@ -888,7 +825,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
/*
* make sure can read back chip id correctly
*/
- ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
if (ret) {
dev_err(&spi->dev, "Fail cmd read chip id...\n");
return ret;
@@ -903,26 +840,28 @@ static int wilc_spi_read_size(struct wilc *wilc, u32 *size)
{
int ret;
- ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, size);
- *size = *size & IRQ_DMA_WD_CNT_MASK;
+ ret = spi_internal_read(wilc,
+ WILC_SPI_INT_STATUS - WILC_SPI_REG_BASE, size);
+ *size = FIELD_GET(IRQ_DMA_WD_CNT_MASK, *size);
return ret;
}
static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
{
- return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, int_status);
+ return spi_internal_read(wilc, WILC_SPI_INT_STATUS - WILC_SPI_REG_BASE,
+ int_status);
}
static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
{
- return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE, val);
+ return spi_internal_write(wilc, WILC_SPI_INT_CLEAR - WILC_SPI_REG_BASE,
+ val);
}
static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
{
struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
u32 reg;
int ret, i;
@@ -931,8 +870,6 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
return -EINVAL;
}
- spi_priv->nint = nint;
-
/*
* interrupt pin mux select
*/
diff --git a/drivers/staging/wilc1000/wlan.c b/drivers/staging/wilc1000/wlan.c
index 601e4d1345d2..6a82fb2f283e 100644
--- a/drivers/staging/wilc1000/wlan.c
+++ b/drivers/staging/wilc1000/wlan.c
@@ -11,7 +11,7 @@
static inline bool is_wilc1000(u32 id)
{
- return (id & 0xfffff000) == 0x100000;
+ return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
}
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
@@ -393,62 +393,67 @@ void chip_allow_sleep(struct wilc *wilc)
{
u32 reg = 0;
- wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
+ wilc->hif_func->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, &reg);
- wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
- wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
+ wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
+ reg & ~WILC_SDIO_WAKEUP_BIT);
+ wilc->hif_func->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG, 0);
}
EXPORT_SYMBOL_GPL(chip_allow_sleep);
void chip_wakeup(struct wilc *wilc)
{
u32 reg, clk_status_reg;
+ const struct wilc_hif_func *h = wilc->hif_func;
- if ((wilc->io_type & 0x1) == WILC_HIF_SPI) {
+ if (wilc->io_type == WILC_HIF_SPI) {
do {
- wilc->hif_func->hif_read_reg(wilc, 1, &reg);
- wilc->hif_func->hif_write_reg(wilc, 1, reg | BIT(1));
- wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1));
+ h->hif_read_reg(wilc, WILC_SPI_WAKEUP_REG, &reg);
+ h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
+ reg | WILC_SPI_WAKEUP_BIT);
+ h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
+ reg & ~WILC_SPI_WAKEUP_BIT);
do {
usleep_range(2000, 2500);
wilc_get_chipid(wilc, true);
} while (wilc_get_chipid(wilc, true) == 0);
} while (wilc_get_chipid(wilc, true) == 0);
- } else if ((wilc->io_type & 0x1) == WILC_HIF_SDIO) {
- wilc->hif_func->hif_write_reg(wilc, 0xfa, 1);
+ } else if (wilc->io_type == WILC_HIF_SDIO) {
+ h->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG,
+ WILC_SDIO_HOST_TO_FW_BIT);
usleep_range(200, 400);
- wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
+ h->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, &reg);
do {
- wilc->hif_func->hif_write_reg(wilc, 0xf0,
- reg | BIT(0));
- wilc->hif_func->hif_read_reg(wilc, 0xf1,
- &clk_status_reg);
+ h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
+ reg | WILC_SDIO_WAKEUP_BIT);
+ h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
+ &clk_status_reg);
- while ((clk_status_reg & 0x1) == 0) {
+ while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
usleep_range(2000, 2500);
- wilc->hif_func->hif_read_reg(wilc, 0xf1,
- &clk_status_reg);
+ h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
+ &clk_status_reg);
}
- if ((clk_status_reg & 0x1) == 0) {
- wilc->hif_func->hif_write_reg(wilc, 0xf0,
- reg & (~BIT(0)));
+ if (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
+ h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
+ reg & ~WILC_SDIO_WAKEUP_BIT);
}
- } while ((clk_status_reg & 0x1) == 0);
+ } while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT));
}
if (wilc->chip_ps_state == WILC_CHIP_SLEEPING_MANUAL) {
- if (wilc_get_chipid(wilc, false) < 0x1002b0) {
+ if (wilc_get_chipid(wilc, false) < WILC_1000_BASE_ID_2B) {
u32 val32;
- wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32);
+ h->hif_read_reg(wilc, WILC_REG_4_TO_1_RX, &val32);
val32 |= BIT(6);
- wilc->hif_func->hif_write_reg(wilc, 0x1e1c, val32);
+ h->hif_write_reg(wilc, WILC_REG_4_TO_1_RX, val32);
- wilc->hif_func->hif_read_reg(wilc, 0x1e9c, &val32);
+ h->hif_read_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, &val32);
val32 |= BIT(6);
- wilc->hif_func->hif_write_reg(wilc, 0x1e9c, val32);
+ h->hif_write_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, val32);
}
}
wilc->chip_ps_state = WILC_CHIP_WAKEDUP;
@@ -458,7 +463,7 @@ EXPORT_SYMBOL_GPL(chip_wakeup);
void host_wakeup_notify(struct wilc *wilc)
{
acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
- wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
+ wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_2, 1);
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
}
EXPORT_SYMBOL_GPL(host_wakeup_notify);
@@ -466,7 +471,7 @@ EXPORT_SYMBOL_GPL(host_wakeup_notify);
void host_sleep_notify(struct wilc *wilc)
{
acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
- wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
+ wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_1, 1);
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
}
EXPORT_SYMBOL_GPL(host_sleep_notify);
@@ -508,9 +513,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
vmm_sz = HOST_HDR_OFFSET;
vmm_sz += tqe->buffer_size;
-
- if (vmm_sz & 0x3)
- vmm_sz = (vmm_sz + 4) & ~0x3;
+ vmm_sz = ALIGN(vmm_sz, 4);
if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
break;
@@ -568,11 +571,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
ret = func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, &reg);
if (ret)
break;
- if ((reg >> 2) & 0x1) {
- entries = ((reg >> 3) & 0x3f);
+ if (FIELD_GET(WILC_VMM_ENTRY_AVAILABLE, reg)) {
+ entries = FIELD_GET(WILC_VMM_ENTRY_COUNT, reg);
break;
}
- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
} while (--timeout);
if (timeout <= 0) {
ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0);
@@ -610,6 +612,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
do {
u32 header, buffer_offset;
char *bssid;
+ u8 mgmt_ptk = 0;
tqe = wilc_wlan_txq_remove_from_head(dev);
if (!tqe)
@@ -620,15 +623,16 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
break;
le32_to_cpus(&vmm_table[i]);
- vmm_sz = (vmm_table[i] & 0x3ff);
+ vmm_sz = FIELD_GET(WILC_VMM_BUFFER_SIZE, vmm_table[i]);
vmm_sz *= 4;
- header = (tqe->type << 31) |
- (tqe->buffer_size << 15) |
- vmm_sz;
+
if (tqe->type == WILC_MGMT_PKT)
- header |= BIT(30);
- else
- header &= ~BIT(30);
+ mgmt_ptk = 1;
+
+ header = (FIELD_PREP(WILC_VMM_HDR_TYPE, tqe->type) |
+ FIELD_PREP(WILC_VMM_HDR_MGMT_FIELD, mgmt_ptk) |
+ FIELD_PREP(WILC_VMM_HDR_PKT_SIZE, tqe->buffer_size) |
+ FIELD_PREP(WILC_VMM_HDR_BUFF_SIZE, vmm_sz));
cpu_to_le32s(&header);
memcpy(&txb[offset], &header, 4);
@@ -686,10 +690,10 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
buff_ptr = buffer + offset;
header = get_unaligned_le32(buff_ptr);
- is_cfg_packet = (header >> 31) & 0x1;
- pkt_offset = (header >> 22) & 0x1ff;
- tp_len = (header >> 11) & 0x7ff;
- pkt_len = header & 0x7ff;
+ is_cfg_packet = FIELD_GET(WILC_PKT_HDR_CONFIG_FIELD, header);
+ pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header);
+ tp_len = FIELD_GET(WILC_PKT_HDR_TOTAL_LEN_FIELD, header);
+ pkt_len = FIELD_GET(WILC_PKT_HDR_LEN_FIELD, header);
if (pkt_len == 0 || tp_len == 0)
break;
@@ -699,10 +703,8 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
wilc_wfi_mgmt_rx(wilc, buff_ptr, pkt_len);
} else {
if (!is_cfg_packet) {
- if (pkt_len > 0) {
- wilc_frmw_to_host(wilc, buff_ptr,
- pkt_len, pkt_offset);
- }
+ wilc_frmw_to_host(wilc, buff_ptr, pkt_len,
+ pkt_offset);
} else {
struct wilc_cfg_rsp rsp;
@@ -758,11 +760,11 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
int ret = 0;
struct rxq_entry_t *rqe;
- size = (int_status & 0x7fff) << 2;
+ size = FIELD_GET(WILC_INTERRUPT_DATA_SIZE, int_status) << 2;
while (!size && retries < 10) {
wilc->hif_func->hif_read_size(wilc, &size);
- size = (size & 0x7fff) << 2;
+ size = FIELD_GET(WILC_INTERRUPT_DATA_SIZE, size) << 2;
retries++;
}
@@ -887,7 +889,7 @@ int wilc_wlan_start(struct wilc *wilc)
wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT);
- ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid);
+ ret = wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &chipid);
if (ret) {
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
return ret;
@@ -1128,18 +1130,24 @@ static int init_chip(struct net_device *dev)
chipid = wilc_get_chipid(wilc, true);
if ((chipid & 0xfff) != 0xa0) {
- ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc,
+ WILC_CORTUS_RESET_MUX_SEL,
+ &reg);
if (ret) {
netdev_err(dev, "fail read reg 0x1118\n");
goto release;
}
reg |= BIT(0);
- ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
+ ret = wilc->hif_func->hif_write_reg(wilc,
+ WILC_CORTUS_RESET_MUX_SEL,
+ reg);
if (ret) {
netdev_err(dev, "fail write reg 0x1118\n");
goto release;
}
- ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
+ ret = wilc->hif_func->hif_write_reg(wilc,
+ WILC_CORTUS_BOOT_REGISTER,
+ WILC_CORTUS_BOOT_FROM_IRAM);
if (ret) {
netdev_err(dev, "fail write reg 0xc0000\n");
goto release;
@@ -1159,20 +1167,21 @@ u32 wilc_get_chipid(struct wilc *wilc, bool update)
u32 rfrevid = 0;
if (chipid == 0 || update) {
- wilc->hif_func->hif_read_reg(wilc, 0x1000, &tempchipid);
- wilc->hif_func->hif_read_reg(wilc, 0x13f4, &rfrevid);
+ wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &tempchipid);
+ wilc->hif_func->hif_read_reg(wilc, WILC_RF_REVISION_ID,
+ &rfrevid);
if (!is_wilc1000(tempchipid)) {
chipid = 0;
return chipid;
}
- if (tempchipid == 0x1002a0) {
+ if (tempchipid == WILC_1000_BASE_ID_2A) { /* 0x1002A0 */
if (rfrevid != 0x1)
- tempchipid = 0x1002a1;
- } else if (tempchipid == 0x1002b0) {
+ tempchipid = WILC_1000_BASE_ID_2A_REV1;
+ } else if (tempchipid == WILC_1000_BASE_ID_2B) { /* 0x1002B0 */
if (rfrevid == 0x4)
- tempchipid = 0x1002b1;
+ tempchipid = WILC_1000_BASE_ID_2B_REV1;
else if (rfrevid != 0x3)
- tempchipid = 0x1002b2;
+ tempchipid = WILC_1000_BASE_ID_2B_REV2;
}
chipid = tempchipid;
diff --git a/drivers/staging/wilc1000/wlan.h b/drivers/staging/wilc1000/wlan.h
index 8c4634262adb..7689569cd82f 100644
--- a/drivers/staging/wilc1000/wlan.h
+++ b/drivers/staging/wilc1000/wlan.h
@@ -8,6 +8,7 @@
#define WILC_WLAN_H
#include <linux/types.h>
+#include <linux/bitfield.h>
/********************************************
*
@@ -65,6 +66,8 @@
#define WILC_INTR_CLEAR (WILC_INTR_REG_BASE + 0x30)
#define WILC_INTR_STATUS (WILC_INTR_REG_BASE + 0x40)
+#define WILC_RF_REVISION_ID 0x13f4
+
#define WILC_VMM_TBL_SIZE 64
#define WILC_VMM_TX_TBL_BASE 0x150400
#define WILC_VMM_RX_TBL_BASE 0x150500
@@ -88,10 +91,53 @@
#define WILC_SPI_TX_MODE (WILC_SPI_REG_BASE + 0x20)
#define WILC_SPI_PROTOCOL_CONFIG (WILC_SPI_REG_BASE + 0x24)
#define WILC_SPI_INTR_CTL (WILC_SPI_REG_BASE + 0x2c)
+#define WILC_SPI_INT_STATUS (WILC_SPI_REG_BASE + 0x40)
+#define WILC_SPI_INT_CLEAR (WILC_SPI_REG_BASE + 0x44)
+
+#define WILC_SPI_WAKEUP_REG 0x1
+#define WILC_SPI_WAKEUP_BIT BIT(1)
#define WILC_SPI_PROTOCOL_OFFSET (WILC_SPI_PROTOCOL_CONFIG - \
WILC_SPI_REG_BASE)
+#define WILC_SPI_CLOCKLESS_ADDR_LIMIT 0x30
+
+/* Functions IO enables bits */
+#define WILC_SDIO_CCCR_IO_EN_FUNC1 BIT(1)
+
+/* Function/Interrupt enables bits */
+#define WILC_SDIO_CCCR_IEN_MASTER BIT(0)
+#define WILC_SDIO_CCCR_IEN_FUNC1 BIT(1)
+
+/* Abort CCCR register bits */
+#define WILC_SDIO_CCCR_ABORT_RESET BIT(3)
+
+/* Vendor specific CCCR registers */
+#define WILC_SDIO_WAKEUP_REG 0xf0
+#define WILC_SDIO_WAKEUP_BIT BIT(0)
+
+#define WILC_SDIO_CLK_STATUS_REG 0xf1
+#define WILC_SDIO_CLK_STATUS_BIT BIT(0)
+
+#define WILC_SDIO_INTERRUPT_DATA_SZ_REG 0xf2 /* Read size (2 bytes) */
+
+#define WILC_SDIO_VMM_TBL_CTRL_REG 0xf6
+#define WILC_SDIO_IRQ_FLAG_REG 0xf7
+#define WILC_SDIO_IRQ_CLEAR_FLAG_REG 0xf8
+
+#define WILC_SDIO_HOST_TO_FW_REG 0xfa
+#define WILC_SDIO_HOST_TO_FW_BIT BIT(0)
+
+#define WILC_SDIO_FW_TO_HOST_REG 0xfc
+#define WILC_SDIO_FW_TO_HOST_BIT BIT(0)
+
+/* Function 1 specific FBR register */
+#define WILC_SDIO_FBR_CSA_REG 0x10C /* CSA pointer (3 bytes) */
+#define WILC_SDIO_FBR_DATA_REG 0x10F
+
+#define WILC_SDIO_F1_DATA_REG 0x0
+#define WILC_SDIO_EXT_IRQ_FLAG_REG 0x4
+
#define WILC_AHB_DATA_MEM_BASE 0x30000
#define WILC_AHB_SHARE_MEM_BASE 0xd0000
@@ -112,6 +158,32 @@
#define WILC_HAVE_DISABLE_WILC_UART BIT(7)
#define WILC_HAVE_USE_IRQ_AS_HOST_WAKE BIT(8)
+#define WILC_CORTUS_INTERRUPT_BASE 0x10A8
+#define WILC_CORTUS_INTERRUPT_1 (WILC_CORTUS_INTERRUPT_BASE + 0x4)
+#define WILC_CORTUS_INTERRUPT_2 (WILC_CORTUS_INTERRUPT_BASE + 0x8)
+
+/* tx control register 1 to 4 for RX */
+#define WILC_REG_4_TO_1_RX 0x1e1c
+
+/* tx control register 1 to 4 for TX Bank_0 */
+#define WILC_REG_4_TO_1_TX_BANK0 0x1e9c
+
+#define WILC_CORTUS_RESET_MUX_SEL 0x1118
+#define WILC_CORTUS_BOOT_REGISTER 0xc0000
+
+#define WILC_CORTUS_BOOT_FROM_IRAM 0x71
+
+#define WILC_1000_BASE_ID 0x100000
+
+#define WILC_1000_BASE_ID_2A 0x1002A0
+#define WILC_1000_BASE_ID_2A_REV1 (WILC_1000_BASE_ID_2A + 1)
+
+#define WILC_1000_BASE_ID_2B 0x1002B0
+#define WILC_1000_BASE_ID_2B_REV1 (WILC_1000_BASE_ID_2B + 1)
+#define WILC_1000_BASE_ID_2B_REV2 (WILC_1000_BASE_ID_2B + 2)
+
+#define WILC_CHIP_REV_FIELD GENMASK(11, 0)
+
/********************************************
*
* Wlan Defines
@@ -134,7 +206,23 @@
#define WILC_TX_BUFF_SIZE (64 * 1024)
#define MODALIAS "WILC_SPI"
-#define GPIO_NUM 0x44
+
+#define WILC_PKT_HDR_CONFIG_FIELD BIT(31)
+#define WILC_PKT_HDR_OFFSET_FIELD GENMASK(30, 22)
+#define WILC_PKT_HDR_TOTAL_LEN_FIELD GENMASK(21, 11)
+#define WILC_PKT_HDR_LEN_FIELD GENMASK(10, 0)
+
+#define WILC_INTERRUPT_DATA_SIZE GENMASK(14, 0)
+
+#define WILC_VMM_BUFFER_SIZE GENMASK(9, 0)
+
+#define WILC_VMM_HDR_TYPE BIT(31)
+#define WILC_VMM_HDR_MGMT_FIELD BIT(30)
+#define WILC_VMM_HDR_PKT_SIZE GENMASK(29, 15)
+#define WILC_VMM_HDR_BUFF_SIZE GENMASK(14, 0)
+
+#define WILC_VMM_ENTRY_COUNT GENMASK(8, 3)
+#define WILC_VMM_ENTRY_AVAILABLE BIT(2)
/*******************************************/
/* E0 and later Interrupt flags. */
/*******************************************/
@@ -150,14 +238,16 @@
/* 21: INT5 flag */
/*******************************************/
#define IRG_FLAGS_OFFSET 16
-#define IRQ_DMA_WD_CNT_MASK ((1ul << IRG_FLAGS_OFFSET) - 1)
+#define IRQ_DMA_WD_CNT_MASK GENMASK(IRG_FLAGS_OFFSET - 1, 0)
#define INT_0 BIT(IRG_FLAGS_OFFSET)
#define INT_1 BIT(IRG_FLAGS_OFFSET + 1)
#define INT_2 BIT(IRG_FLAGS_OFFSET + 2)
#define INT_3 BIT(IRG_FLAGS_OFFSET + 3)
#define INT_4 BIT(IRG_FLAGS_OFFSET + 4)
#define INT_5 BIT(IRG_FLAGS_OFFSET + 5)
-#define MAX_NUM_INT 6
+#define MAX_NUM_INT 5
+#define IRG_FLAGS_MASK GENMASK(IRG_FLAGS_OFFSET + MAX_NUM_INT, \
+ IRG_FLAGS_OFFSET)
/*******************************************/
/* E0 and later Interrupt flags. */
@@ -185,6 +275,7 @@
#define DATA_INT_EXT INT_0
#define ALL_INT_EXT DATA_INT_EXT
#define NUM_INT_EXT 1
+#define UNHANDLED_IRQ_MASK GENMASK(MAX_NUM_INT - 1, NUM_INT_EXT)
#define DATA_INT_CLR CLR_INT0
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index bdd7f414fdbb..88e894dd3568 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -355,7 +355,7 @@
/* Commonly used basic types */
struct hfa384x_bytestr {
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct hfa384x_bytestr32 {
@@ -421,7 +421,7 @@ struct hfa384x_authenticate_station_data {
/*-- Configuration Record: WPAData (data portion only) --*/
struct hfa384x_wpa_data {
__le16 datalen;
- u8 data[0]; /* max 80 */
+ u8 data[]; /* max 80 */
} __packed;
/*--------------------------------------------------------------------
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index b71756ab0394..fa1bf8b069fd 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3254,13 +3254,16 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
struct p80211_rxmeta *rxmeta;
u16 data_len;
u16 fc;
+ u16 status;
/* Byte order convert once up front. */
le16_to_cpus(&usbin->rxfrm.desc.status);
le32_to_cpus(&usbin->rxfrm.desc.time);
/* Now handle frame based on port# */
- switch (HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status)) {
+ status = HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status);
+
+ switch (status) {
case 0:
fc = le16_to_cpu(usbin->rxfrm.desc.frame_control);
@@ -3317,8 +3320,9 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
break;
default:
- netdev_warn(hw->wlandev->netdev, "Received frame on unsupported port=%d\n",
- HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status));
+ netdev_warn(hw->wlandev->netdev,
+ "Received frame on unsupported port=%d\n",
+ status);
break;
}
}
@@ -3372,6 +3376,8 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) {
pr_debug("overlen frm: len=%zd\n",
skblen - sizeof(struct p80211_caphdr));
+
+ return;
}
skb = dev_alloc_skb(skblen);
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index ac254542fde6..3dcdd022da61 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -204,7 +204,7 @@ struct p80211pstr {
struct p80211pstrd {
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
/* Maximum pascal string */
@@ -249,7 +249,7 @@ struct p80211itemd {
u32 did;
u16 status;
u16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/* message data item for int, BOUNDEDINT, ENUMINT */
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 352556f6870a..4689b2170e4f 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -180,6 +180,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
cancel_work_sync(&hw->link_bh);
cancel_work_sync(&hw->commsqual_bh);
+ cancel_work_sync(&hw->usb_work);
/* Now we complete any outstanding commands
* and tell everyone who is waiting for their
diff --git a/drivers/staging/wusbcore/Documentation/wusb-cbaf b/drivers/staging/wusbcore/Documentation/wusb-cbaf
deleted file mode 100644
index 8b3d43efce90..000000000000
--- a/drivers/staging/wusbcore/Documentation/wusb-cbaf
+++ /dev/null
@@ -1,130 +0,0 @@
-#! /bin/bash
-#
-
-set -e
-
-progname=$(basename $0)
-function help
-{
- cat <<EOF
-Usage: $progname COMMAND DEVICEs [ARGS]
-
-Command for manipulating the pairing/authentication credentials of a
-Wireless USB device that supports wired-mode Cable-Based-Association.
-
-Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org.
-
-
-DEVICE
-
- sysfs path to the device to authenticate; for example, both this
- guys are the same:
-
- /sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1
- /sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1
-
-COMMAND/ARGS are
-
- start
-
- Start a WUSB host controller (by setting up a CHID)
-
- set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME
-
- Sets host information in the device; after this you can call the
- get-cdid to see how does this device report itself to us.
-
- get-cdid DEVICE
-
- Get the device ID associated to the HOST-CHID we sent with
- 'set-chid'. We might not know about it.
-
- set-cc DEVICE
-
- If we allow the device to connect, set a random new CDID and CK
- (connection key). Device saves them for the next time it wants to
- connect wireless. We save them for that next time also so we can
- authenticate the device (when we see the CDID he uses to id
- itself) and the CK to crypto talk to it.
-
-CHID is always 16 hex bytes in 'XX YY ZZ...' form
-BANDGROUP is almost always 0001
-
-Examples:
-
- You can default most arguments to '' to get a sane value:
-
- $ $progname set-chid '' '' '' "My host name"
-
- A full sequence:
-
- $ $progname set-chid '' '' '' "My host name"
- $ $progname get-cdid ''
- $ $progname set-cc ''
-
-EOF
-}
-
-
-# Defaults
-# FIXME: CHID should come from a database :), band group from the host
-host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff"
-host_band_group="0001"
-host_name=$(hostname)
-
-devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)"
-hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)"
-
-result=0
-case $1 in
- start)
- for dev in ${2:-$hdevs}
- do
- echo $host_CHID > $dev/wusb_chid
- echo I: started host $(basename $dev) >&2
- done
- ;;
- stop)
- for dev in ${2:-$hdevs}
- do
- echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid
- echo I: stopped host $(basename $dev) >&2
- done
- ;;
- set-chid)
- shift
- for dev in ${2:-$devs}; do
- echo "${4:-$host_name}" > $dev/wusb_host_name
- echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups
- echo ${2:-$host_CHID} > $dev/wusb_chid
- done
- ;;
- get-cdid)
- for dev in ${2:-$devs}
- do
- cat $dev/wusb_cdid
- done
- ;;
- set-cc)
- for dev in ${2:-$devs}; do
- shift
- CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
- CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
- echo "$CDID" > $dev/wusb_cdid
- echo "$CK" > $dev/wusb_ck
-
- echo I: CC set >&2
- echo "CHID: $(cat $dev/wusb_chid)"
- echo "CDID:$CDID"
- echo "CK: $CK"
- done
- ;;
- help|h|--help|-h)
- help
- ;;
- *)
- echo "E: Unknown usage" 1>&2
- help 1>&2
- result=1
-esac
-exit $result
diff --git a/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst b/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst
deleted file mode 100644
index dc5e21609bb5..000000000000
--- a/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst
+++ /dev/null
@@ -1,457 +0,0 @@
-================================
-Linux UWB + Wireless USB + WiNET
-================================
-
- Copyright (C) 2005-2006 Intel Corporation
-
- Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License version
- 2 as published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
-
-Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for
-updated content.
-
- * Design-overview.txt-1.8
-
-This code implements a Ultra Wide Band stack for Linux, as well as
-drivers for the USB based UWB radio controllers defined in the
-Wireless USB 1.0 specification (including Wireless USB host controller
-and an Intel WiNET controller).
-
-.. Contents
- 1. Introduction
- 1. HWA: Host Wire adapters, your Wireless USB dongle
-
- 2. DWA: Device Wired Adaptor, a Wireless USB hub for wired
- devices
- 3. WHCI: Wireless Host Controller Interface, the PCI WUSB host
- adapter
- 2. The UWB stack
- 1. Devices and hosts: the basic structure
-
- 2. Host Controller life cycle
-
- 3. On the air: beacons and enumerating the radio neighborhood
-
- 4. Device lists
- 5. Bandwidth allocation
-
- 3. Wireless USB Host Controller drivers
-
- 4. Glossary
-
-
-Introduction
-============
-
-UWB is a wide-band communication protocol that is to serve also as the
-low-level protocol for others (much like TCP sits on IP). Currently
-these others are Wireless USB and TCP/IP, but seems Bluetooth and
-Firewire/1394 are coming along.
-
-UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of
-~-41dB (or 0.074 uW/MHz--geography specific data is still being
-negotiated w/ regulators, so watch for changes). That band is divided in
-a bunch of ~1.5 GHz wide channels (or band groups) composed of three
-subbands/subchannels (528 MHz each). Each channel is independent of each
-other, so you could consider them different "busses". Initially this
-driver considers them all a single one.
-
-Radio time is divided in 65536 us long /superframes/, each one divided
-in 256 256us long /MASs/ (Media Allocation Slots), which are the basic
-time/media allocation units for transferring data. At the beginning of
-each superframe there is a Beacon Period (BP), where every device
-transmit its beacon on a single MAS. The length of the BP depends on how
-many devices are present and the length of their beacons.
-
-Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16
-bit address) and send periodic beacons to advertise themselves and pass
-info on what they are and do. They advertise their capabilities and a
-bunch of other stuff.
-
-The different logical parts of this driver are:
-
- *
-
- *UWB*: the Ultra-Wide-Band stack -- manages the radio and
- associated spectrum to allow for devices sharing it. Allows to
- control bandwidth assignment, beaconing, scanning, etc
-
- *
-
- *WUSB*: the layer that sits on top of UWB to provide Wireless USB.
- The Wireless USB spec defines means to control a UWB radio and to
- do the actual WUSB.
-
-
-HWA: Host Wire adapters, your Wireless USB dongle
--------------------------------------------------
-
-WUSB also defines a device called a Host Wire Adaptor (HWA), which in
-mere terms is a USB dongle that enables your PC to have UWB and Wireless
-USB. The Wireless USB Host Controller in a HWA looks to the host like a
-[Wireless] USB controller connected via USB (!)
-
-The HWA itself is broken in two or three main interfaces:
-
- *
-
- *RC*: Radio control -- this implements an interface to the
- Ultra-Wide-Band radio controller. The driver for this implements a
- USB-based UWB Radio Controller to the UWB stack.
-
- *
-
- *HC*: the wireless USB host controller. It looks like a USB host
- whose root port is the radio and the WUSB devices connect to it.
- To the system it looks like a separate USB host. The driver (will)
- implement a USB host controller (similar to UHCI, OHCI or EHCI)
- for which the root hub is the radio...To reiterate: it is a USB
- controller that is connected via USB instead of PCI.
-
- *
-
- *WINET*: some HW provide a WiNET interface (IP over UWB). This
- package provides a driver for it (it looks like a network
- interface, winetX). The driver detects when there is a link up for
- their type and kick into gear.
-
-
-DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
----------------------------------------------------------------
-
-These are the complement to HWAs. They are a USB host for connecting
-wired devices, but it is connected to your PC connected via Wireless
-USB. To the system it looks like yet another USB host. To the untrained
-eye, it looks like a hub that connects upstream wirelessly.
-
-We still offer no support for this; however, it should share a lot of
-code with the HWA-RC driver; there is a bunch of factorization work that
-has been done to support that in upcoming releases.
-
-
-WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
--------------------------------------------------------------------
-
-This is your usual PCI device that implements WHCI. Similar in concept
-to EHCI, it allows your wireless USB devices (including DWAs) to connect
-to your host via a PCI interface. As in the case of the HWA, it has a
-Radio Control interface and the WUSB Host Controller interface per se.
-
-There is still no driver support for this, but will be in upcoming
-releases.
-
-
-The UWB stack
-=============
-
-The main mission of the UWB stack is to keep a tally of which devices
-are in radio proximity to allow drivers to connect to them. As well, it
-provides an API for controlling the local radio controllers (RCs from
-now on), such as to start/stop beaconing, scan, allocate bandwidth, etc.
-
-
-Devices and hosts: the basic structure
---------------------------------------
-
-The main building block here is the UWB device (struct uwb_dev). For
-each device that pops up in radio presence (ie: the UWB host receives a
-beacon from it) you get a struct uwb_dev that will show up in
-/sys/bus/uwb/devices.
-
-For each RC that is detected, a new struct uwb_rc and struct uwb_dev are
-created. An entry is also created in /sys/class/uwb_rc for each RC.
-
-Each RC driver is implemented by a separate driver that plugs into the
-interface that the UWB stack provides through a struct uwb_rc_ops. The
-spec creators have been nice enough to make the message format the same
-for HWA and WHCI RCs, so the driver is really a very thin transport that
-moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/]
-and sends the replies and notifications back to the API
-[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that
-is chartered, among other things, to keep the tab of how the UWB radio
-neighborhood looks, creating and destroying devices as they show up or
-disappear.
-
-Command execution is very simple: a command block is sent and a event
-block or reply is expected back. For sending/receiving command/events, a
-handle called /neh/ (Notification/Event Handle) is opened with
-/uwb_rc_neh_open()/.
-
-The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for
-the USB connected HWA. Eventually, drivers/whci-rc.c will do the same
-for the PCI connected WHCI controller.
-
-
-Host Controller life cycle
---------------------------
-
-So let's say we connect a dongle to the system: it is detected and
-firmware uploaded if needed [for Intel's i1480
-/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated.
-Now we have a real HWA device connected and
-/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the
-Wire-Adaptor environment and then suck it into the UWB stack's vision of
-the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/].
-
- *
-
- [*] The stack should put a new RC to scan for devices
- [/uwb_rc_scan()/] so it finds what's available around and tries to
- connect to them, but this is policy stuff and should be driven
- from user space. As of now, the operator is expected to do it
- manually; see the release notes for documentation on the procedure.
-
-When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/
-takes time of tearing everything down safely (or not...).
-
-
-On the air: beacons and enumerating the radio neighborhood
-----------------------------------------------------------
-
-So assuming we have devices and we have agreed for a channel to connect
-on (let's say 9), we put the new RC to beacon:
-
- *
-
- $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon
-
-Now it is visible. If there were other devices in the same radio channel
-and beacon group (that's what the zero is for), the dongle's radio
-control interface will send beacon notifications on its
-notification/event endpoint (NEEP). The beacon notifications are part of
-the event stream that is funneled into the API with
-/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB
-daemon through a notification list.
-
-UWBD wakes up and scans the event list; finds a beacon and adds it to
-the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from
-the same device, he considers it to be 'onair' and creates a new device
-[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons
-are received in some time, the device is considered gone and wiped out
-[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge
-the beacon cache of dead devices].
-
-
-Device lists
-------------
-
-All UWB devices are kept in the list of the struct bus_type uwb_bus_type.
-
-
-Bandwidth allocation
---------------------
-
-The UWB stack maintains a local copy of DRP availability through
-processing of incoming *DRP Availability Change* notifications. This
-local copy is currently used to present the current bandwidth
-availability to the user through the sysfs file
-/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth
-availability information will be used by the bandwidth reservation
-routines.
-
-The bandwidth reservation routines are in progress and are thus not
-present in the current release. When completed they will enable a user
-to initiate DRP reservation requests through interaction with sysfs. DRP
-reservation requests from remote UWB devices will also be handled. The
-bandwidth management done by the UWB stack will include callbacks to the
-higher layers will enable the higher layers to use the reservations upon
-completion. [Note: The bandwidth reservation work is in progress and
-subject to change.]
-
-
-Wireless USB Host Controller drivers
-====================================
-
-*WARNING* This section needs a lot of work!
-
-As explained above, there are three different types of HCs in the WUSB
-world: HWA-HC, DWA-HC and WHCI-HC.
-
-HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB
-connected controllers), and their transfer management system is almost
-identical. So is their notification delivery system.
-
-HWA-HC and WHCI-HC share that they are both WUSB host controllers, so
-they have to deal with WUSB device life cycle and maintenance, wireless
-root-hub
-
-HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has
-three endpoints (Notifications, Data Transfer In and Data Transfer
-Out--known as NEP, DTI and DTO in the code).
-
-We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster
-ID and tell the HC to use all that. Then we start it. This means the HC
-starts sending MMCs.
-
- *
-
- The MMCs are blocks of data defined somewhere in the WUSB1.0 spec
- that define a stream in the UWB channel time allocated for sending
- WUSB IEs (host to device commands/notifications) and Device
- Notifications (device initiated to host). Each host defines a
- unique Wireless USB cluster through MMCs. Devices can connect to a
- single cluster at the time. The IEs are Information Elements, and
- among them are the bandwidth allocations that tell each device
- when can they transmit or receive.
-
-Now it all depends on external stimuli.
-
-New device connection
----------------------
-
-A new device pops up, it scans the radio looking for MMCs that give out
-the existence of Wireless USB channels. Once one (or more) are found,
-selects which one to connect to. Sends a /DN_Connect/ (device
-notification connect) during the DNTS (Device Notification Time
-Slot--announced in the MMCs
-
-HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery
-into /devconnect/). This process starts the authentication process for
-the device. First we allocate a /fake port/ and assign an
-unauthenticated address (128 to 255--what we really do is
-0x80 | fake_port_idx). We fiddle with the fake port status and /hub_wq/
-sees a new connection, so he moves on to enable the fake port with a reset.
-
-So now we are in the reset path -- we know we have a non-yet enumerated
-device with an unauthorized address; we ask user space to authenticate
-(FIXME: not yet done, similar to bluetooth pairing), then we do the key
-exchange (FIXME: not yet done) and issue a /set address 0/ to bring the
-device to the default state. Device is authenticated.
-
-From here, the USB stack takes control through the usb_hcd ops. hub_wq
-has seen the port status changes, as we have been toggling them. It will
-start enumerating and doing transfers through usb_hcd->urb_enqueue() to
-read descriptors and move our data.
-
-Device life cycle and keep alives
----------------------------------
-
-Every time there is a successful transfer to/from a device, we update a
-per-device activity timestamp. If not, every now and then we check and
-if the activity timestamp gets old, we ping the device by sending it a
-Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this
-arrives to us as a notification through
-devconnect.c:wusb_handle_dn_alive(). If a device times out, we
-disconnect it from the system (cleaning up internal information and
-toggling the bits in the fake hub port, which kicks hub_wq into removing
-the rest of the stuff).
-
-This is done through devconnect:__wusb_check_devs(), which will scan the
-device list looking for whom needs refreshing.
-
-If the device wants to disconnect, it will either die (ugly) or send a
-/DN_Disconnect/ that will prompt a disconnection from the system.
-
-Sending and receiving data
---------------------------
-
-Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is
-/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and
-DWAs.
-
-Each HC has a number of rpipes and buffers that can be assigned to them;
-when doing a data transfer (xfer), first the rpipe has to be aimed and
-prepared (buffers assigned), then we can start queueing requests for
-data in or out.
-
-Data buffers have to be segmented out before sending--so we send first a
-header (segment request) and then if there is any data, a data buffer
-immediately after to the DTI interface (yep, even the request). If our
-buffer is bigger than the max segment size, then we just do multiple
-requests.
-
-[This sucks, because doing USB scatter gatter in Linux is resource
-intensive, if any...not that the current approach is not. It just has to
-be cleaned up a lot :)].
-
-If reading, we don't send data buffers, just the segment headers saying
-we want to read segments.
-
-When the xfer is executed, we receive a notification that says data is
-ready in the DTI endpoint (handled through
-xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a
-descriptor that gives us the status of the transfer, its identification
-(given when we issued it) and the segment number. If it was a data read,
-we issue another URB to read into the destination buffer the chunk of
-data coming out of the remote endpoint. Done, wait for the next guy. The
-callbacks for the URBs issued from here are the ones that will declare
-the xfer complete at some point and call its callback.
-
-Seems simple, but the implementation is not trivial.
-
- *
-
- *WARNING* Old!!
-
-The main xfer descriptor, wa_xfer (equivalent to a URB) contains an
-array of segments, tallys on segments and buffers and callback
-information. Buried in there is a lot of URBs for executing the segments
-and buffer transfers.
-
-For OUT xfers, there is an array of segments, one URB for each, another
-one of buffer URB. When submitting, we submit URBs for segment request
-1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer
-result data; when all the segments are complete, we call the callback to
-finalize the transfer.
-
-For IN xfers, we only issue URBs for the segments we want to read and
-then wait for the xfer result data.
-
-URB mapping into xfers
-^^^^^^^^^^^^^^^^^^^^^^
-
-This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an
-rpipe to the endpoint where we have to transmit, create a transfer
-context (wa_xfer) and submit it. When the xfer is done, our callback is
-called and we assign the status bits and release the xfer resources.
-
-In dequeue() we are basically cancelling/aborting the transfer. We issue
-a xfer abort request to the HC, cancel all the URBs we had submitted
-and not yet done and when all that is done, the xfer callback will be
-called--this will call the URB callback.
-
-
-Glossary
-========
-
-*DWA* -- Device Wire Adapter
-
-USB host, wired for downstream devices, upstream connects wirelessly
-with Wireless USB.
-
-*EVENT* -- Response to a command on the NEEP
-
-*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB
-
-*NEH* -- Notification/Event Handle
-
-Handle/file descriptor for receiving notifications or events. The WA
-code requires you to get one of this to listen for notifications or
-events on the NEEP.
-
-*NEEP* -- Notification/Event EndPoint
-
-Stuff related to the management of the first endpoint of a HWA USB
-dongle that is used to deliver an stream of events and notifications to
-the host.
-
-*NOTIFICATION* -- Message coming in the NEEP as response to something.
-
-*RC* -- Radio Control
-
-Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by
-InakyPerezGonzalez)
diff --git a/drivers/staging/wusbcore/Kconfig b/drivers/staging/wusbcore/Kconfig
deleted file mode 100644
index a559d023b508..000000000000
--- a/drivers/staging/wusbcore/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Wireless USB Core configuration
-#
-config USB_WUSB
- tristate "Enable Wireless USB extensions"
- depends on UWB && USB
- select CRYPTO
- select CRYPTO_AES
- select CRYPTO_CCM
- help
- Enable the host-side support for Wireless USB.
-
- To compile this support select Y (built in). It is safe to
- select even if you don't have the hardware.
-
-config USB_WUSB_CBAF
- tristate "Support WUSB Cable Based Association (CBA)"
- depends on USB
- help
- Some WUSB devices support Cable Based Association. It's used to
- enable the secure communication between the host and the
- device.
-
- Enable this option if your WUSB device must to be connected
- via wired USB before establishing a wireless link.
-
- It is safe to select even if you don't have a compatible
- hardware.
-
-config USB_WUSB_CBAF_DEBUG
- bool "Enable CBA debug messages"
- depends on USB_WUSB_CBAF
- help
- Say Y here if you want the CBA to produce a bunch of debug messages
- to the system log. Select this if you are having a problem with
- CBA support and want to see more of what is going on.
-
-source "drivers/staging/wusbcore/host/Kconfig"
diff --git a/drivers/staging/wusbcore/Makefile b/drivers/staging/wusbcore/Makefile
deleted file mode 100644
index b47b874268ac..000000000000
--- a/drivers/staging/wusbcore/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-$(CONFIG_USB_WUSB_CBAF_DEBUG) := -DDEBUG
-
-obj-$(CONFIG_USB_WUSB) += wusbcore.o
-obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
-obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o
-
-
-wusbcore-y := \
- crypto.o \
- devconnect.o \
- dev-sysfs.o \
- mmc.o \
- pal.o \
- rh.o \
- reservation.o \
- security.o \
- wusbhc.o
-
-wusb-cbaf-y := cbaf.o
-
-wusb-wa-y := \
- wa-hc.o \
- wa-nep.o \
- wa-rpipe.o \
- wa-xfer.o
-
-obj-y += host/
diff --git a/drivers/staging/wusbcore/TODO b/drivers/staging/wusbcore/TODO
deleted file mode 100644
index abae57000534..000000000000
--- a/drivers/staging/wusbcore/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-TODO: Remove in late 2019 unless there are users
-
-There seems to not be any real wireless USB devices anywhere in the wild
-anymore. It turned out to be a failed technology :(
-
-This will be removed from the tree if no one objects.
-
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/wusbcore/cbaf.c b/drivers/staging/wusbcore/cbaf.c
deleted file mode 100644
index 57062eaf7558..000000000000
--- a/drivers/staging/wusbcore/cbaf.c
+++ /dev/null
@@ -1,645 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB - Cable Based Association
- *
- *
- * Copyright (C) 2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- *
- * WUSB devices have to be paired (associated in WUSB lingo) so
- * that they can connect to the system.
- *
- * One way of pairing is using CBA-Cable Based Association. First
- * time you plug the device with a cable, association is done between
- * host and device and subsequent times, you can connect wirelessly
- * without having to associate again. That's the idea.
- *
- * This driver does nothing Earth shattering. It just provides an
- * interface to chat with the wire-connected device so we can get a
- * CDID (device ID) that might have been previously associated to a
- * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet
- * (connection context), with the CK being the secret, or connection
- * key. This is the pairing data.
- *
- * When a device with the CBA capability connects, the probe routine
- * just creates a bunch of sysfs files that a user space enumeration
- * manager uses to allow it to connect wirelessly to the system or not.
- *
- * The process goes like this:
- *
- * 1. Device plugs, cbaf is loaded, notifications happen.
- *
- * 2. The connection manager (CM) sees a device with CBAF capability
- * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
- *
- * 3. The CM writes the host name, supported band groups, and the CHID
- * (host ID) into the wusb_host_name, wusb_host_band_groups and
- * wusb_chid files. These get sent to the device and the CDID (if
- * any) for this host is requested.
- *
- * 4. The CM can verify that the device's supported band groups
- * (wusb_device_band_groups) are compatible with the host.
- *
- * 5. The CM reads the wusb_cdid file.
- *
- * 6. The CM looks up its database
- *
- * 6.1 If it has a matching CHID,CDID entry, the device has been
- * authorized before (paired) and nothing further needs to be
- * done.
- *
- * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in
- * its database), the device is assumed to be not known. The CM
- * may associate the host with device by: writing a randomly
- * generated CDID to wusb_cdid and then a random CK to wusb_ck
- * (this uploads the new CC to the device).
- *
- * CMD may choose to prompt the user before associating with a new
- * device.
- *
- * 7. Device is unplugged.
- *
- * When the device tries to connect wirelessly, it will present its
- * CDID to the WUSB host controller. The CM will query the
- * database. If the CHID/CDID pair found, it will (with a 4-way
- * handshake) challenge the device to demonstrate it has the CK secret
- * key (from our database) without actually exchanging it. Once
- * satisfied, crypto keys are derived from the CK, the device is
- * connected and all communication is encrypted.
- *
- * References:
- * [WUSB-AM] Association Models Supplement to the Certified Wireless
- * Universal Serial Bus Specification, version 1.0.
- */
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/usb.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include "../uwb/uwb.h"
-#include "include/wusb.h"
-#include "include/association.h"
-
-#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
-
-/* An instance of a Cable-Based-Association-Framework device */
-struct cbaf {
- struct usb_device *usb_dev;
- struct usb_interface *usb_iface;
- void *buffer;
- size_t buffer_size;
-
- struct wusb_ckhdid chid;
- char host_name[CBA_NAME_LEN];
- u16 host_band_groups;
-
- struct wusb_ckhdid cdid;
- char device_name[CBA_NAME_LEN];
- u16 device_band_groups;
-
- struct wusb_ckhdid ck;
-};
-
-/*
- * Verify that a CBAF USB-interface has what we need
- *
- * According to [WUSB-AM], CBA devices should provide at least two
- * interfaces:
- * - RETRIEVE_HOST_INFO
- * - ASSOCIATE
- *
- * If the device doesn't provide these interfaces, we do not know how
- * to deal with it.
- */
-static int cbaf_check(struct cbaf *cbaf)
-{
- int result;
- struct device *dev = &cbaf->usb_iface->dev;
- struct wusb_cbaf_assoc_info *assoc_info;
- struct wusb_cbaf_assoc_request *assoc_request;
- size_t assoc_size;
- void *itr, *top;
- int ar_rhi = 0, ar_assoc = 0;
-
- result = usb_control_msg(
- cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
- CBAF_REQ_GET_ASSOCIATION_INFORMATION,
- USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "Cannot get available association types: %d\n",
- result);
- return result;
- }
-
- assoc_info = cbaf->buffer;
- if (result < sizeof(*assoc_info)) {
- dev_err(dev, "Not enough data to decode association info "
- "header (%zu vs %zu bytes required)\n",
- (size_t)result, sizeof(*assoc_info));
- return result;
- }
-
- assoc_size = le16_to_cpu(assoc_info->Length);
- if (result < assoc_size) {
- dev_err(dev, "Not enough data to decode association info "
- "(%zu vs %zu bytes required)\n",
- (size_t)assoc_size, sizeof(*assoc_info));
- return result;
- }
- /*
- * From now on, we just verify, but won't error out unless we
- * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE}
- * types.
- */
- itr = cbaf->buffer + sizeof(*assoc_info);
- top = cbaf->buffer + assoc_size;
- dev_dbg(dev, "Found %u association requests (%zu bytes)\n",
- assoc_info->NumAssociationRequests, assoc_size);
-
- while (itr < top) {
- u16 ar_type, ar_subtype;
- u32 ar_size;
- const char *ar_name;
-
- assoc_request = itr;
-
- if (top - itr < sizeof(*assoc_request)) {
- dev_err(dev, "Not enough data to decode association "
- "request (%zu vs %zu bytes needed)\n",
- top - itr, sizeof(*assoc_request));
- break;
- }
-
- ar_type = le16_to_cpu(assoc_request->AssociationTypeId);
- ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId);
- ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize);
- ar_name = "unknown";
-
- switch (ar_type) {
- case AR_TYPE_WUSB:
- /* Verify we have what is mandated by [WUSB-AM]. */
- switch (ar_subtype) {
- case AR_TYPE_WUSB_RETRIEVE_HOST_INFO:
- ar_name = "RETRIEVE_HOST_INFO";
- ar_rhi = 1;
- break;
- case AR_TYPE_WUSB_ASSOCIATE:
- /* send assoc data */
- ar_name = "ASSOCIATE";
- ar_assoc = 1;
- break;
- }
- break;
- }
-
- dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
- "(%zu bytes): %s\n",
- assoc_request->AssociationDataIndex, ar_type,
- ar_subtype, (size_t)ar_size, ar_name);
-
- itr += sizeof(*assoc_request);
- }
-
- if (!ar_rhi) {
- dev_err(dev, "Missing RETRIEVE_HOST_INFO association "
- "request\n");
- return -EINVAL;
- }
- if (!ar_assoc) {
- dev_err(dev, "Missing ASSOCIATE association request\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
- .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
- .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
- .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
- .CHID_hdr = WUSB_AR_CHID,
- .LangID_hdr = WUSB_AR_LangID,
- .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName,
-};
-
-/* Send WUSB host information (CHID and name) to a CBAF device */
-static int cbaf_send_host_info(struct cbaf *cbaf)
-{
- struct wusb_cbaf_host_info *hi;
- size_t name_len;
- size_t hi_size;
-
- hi = cbaf->buffer;
- memset(hi, 0, sizeof(*hi));
- *hi = cbaf_host_info_defaults;
- hi->CHID = cbaf->chid;
- hi->LangID = 0; /* FIXME: I guess... */
- strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN);
- name_len = strlen(cbaf->host_name);
- hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
- hi_size = sizeof(*hi) + name_len;
-
- return usb_control_msg(cbaf->usb_dev,
- usb_sndctrlpipe(cbaf->usb_dev, 0),
- CBAF_REQ_SET_ASSOCIATION_RESPONSE,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0x0101,
- cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- hi, hi_size, USB_CTRL_SET_TIMEOUT);
-}
-
-/*
- * Get device's information (CDID) associated to CHID
- *
- * The device will return it's information (CDID, name, bandgroups)
- * associated to the CHID we have set before, or 0 CDID and default
- * name and bandgroup if no CHID set or unknown.
- */
-static int cbaf_cdid_get(struct cbaf *cbaf)
-{
- int result;
- struct device *dev = &cbaf->usb_iface->dev;
- struct wusb_cbaf_device_info *di;
- size_t needed;
-
- di = cbaf->buffer;
- result = usb_control_msg(
- cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
- CBAF_REQ_GET_ASSOCIATION_REQUEST,
- USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "Cannot request device information: %d\n",
- result);
- return result;
- }
-
- needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length);
- if (result < needed) {
- dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
- "%zu bytes needed)\n", (size_t)result, needed);
- return -ENOENT;
- }
-
- strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
- cbaf->cdid = di->CDID;
- cbaf->device_band_groups = le16_to_cpu(di->BandGroups);
-
- return 0;
-}
-
-static ssize_t cbaf_wusb_chid_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- return sprintf(buf, "%16ph\n", cbaf->chid.data);
-}
-
-static ssize_t cbaf_wusb_chid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- ssize_t result;
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- result = sscanf(buf,
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx",
- &cbaf->chid.data[0] , &cbaf->chid.data[1],
- &cbaf->chid.data[2] , &cbaf->chid.data[3],
- &cbaf->chid.data[4] , &cbaf->chid.data[5],
- &cbaf->chid.data[6] , &cbaf->chid.data[7],
- &cbaf->chid.data[8] , &cbaf->chid.data[9],
- &cbaf->chid.data[10], &cbaf->chid.data[11],
- &cbaf->chid.data[12], &cbaf->chid.data[13],
- &cbaf->chid.data[14], &cbaf->chid.data[15]);
-
- if (result != 16)
- return -EINVAL;
-
- result = cbaf_send_host_info(cbaf);
- if (result < 0)
- return result;
- result = cbaf_cdid_get(cbaf);
- if (result < 0)
- return result;
- return size;
-}
-static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
-
-static ssize_t cbaf_wusb_host_name_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name);
-}
-
-static ssize_t cbaf_wusb_host_name_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- ssize_t result;
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- result = sscanf(buf, "%63s", cbaf->host_name);
- if (result != 1)
- return -EINVAL;
-
- return size;
-}
-static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show,
- cbaf_wusb_host_name_store);
-
-static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups);
-}
-
-static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- ssize_t result;
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
- u16 band_groups = 0;
-
- result = sscanf(buf, "%04hx", &band_groups);
- if (result != 1)
- return -EINVAL;
-
- cbaf->host_band_groups = band_groups;
-
- return size;
-}
-
-static DEVICE_ATTR(wusb_host_band_groups, 0600,
- cbaf_wusb_host_band_groups_show,
- cbaf_wusb_host_band_groups_store);
-
-static const struct wusb_cbaf_device_info cbaf_device_info_defaults = {
- .Length_hdr = WUSB_AR_Length,
- .CDID_hdr = WUSB_AR_CDID,
- .BandGroups_hdr = WUSB_AR_BandGroups,
- .LangID_hdr = WUSB_AR_LangID,
- .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName,
-};
-
-static ssize_t cbaf_wusb_cdid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- return sprintf(buf, "%16ph\n", cbaf->cdid.data);
-}
-
-static ssize_t cbaf_wusb_cdid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- ssize_t result;
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
- struct wusb_ckhdid cdid;
-
- result = sscanf(buf,
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx",
- &cdid.data[0] , &cdid.data[1],
- &cdid.data[2] , &cdid.data[3],
- &cdid.data[4] , &cdid.data[5],
- &cdid.data[6] , &cdid.data[7],
- &cdid.data[8] , &cdid.data[9],
- &cdid.data[10], &cdid.data[11],
- &cdid.data[12], &cdid.data[13],
- &cdid.data[14], &cdid.data[15]);
- if (result != 16)
- return -EINVAL;
-
- cbaf->cdid = cdid;
-
- return size;
-}
-static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store);
-
-static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups);
-}
-
-static DEVICE_ATTR(wusb_device_band_groups, 0600,
- cbaf_wusb_device_band_groups_show,
- NULL);
-
-static ssize_t cbaf_wusb_device_name_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name);
-}
-static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
-
-static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
- .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
- .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
- .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
- .Length_hdr = WUSB_AR_Length,
- .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
- .ConnectionContext_hdr = WUSB_AR_ConnectionContext,
- .BandGroups_hdr = WUSB_AR_BandGroups,
-};
-
-static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = {
- .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
- .Length_hdr = WUSB_AR_Length,
- .AssociationStatus_hdr = WUSB_AR_AssociationStatus,
-};
-
-/*
- * Send a new CC to the device.
- */
-static int cbaf_cc_upload(struct cbaf *cbaf)
-{
- int result;
- struct device *dev = &cbaf->usb_iface->dev;
- struct wusb_cbaf_cc_data *ccd;
-
- ccd = cbaf->buffer;
- *ccd = cbaf_cc_data_defaults;
- ccd->CHID = cbaf->chid;
- ccd->CDID = cbaf->cdid;
- ccd->CK = cbaf->ck;
- ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
-
- dev_dbg(dev, "Trying to upload CC:\n");
- dev_dbg(dev, " CHID %16ph\n", ccd->CHID.data);
- dev_dbg(dev, " CDID %16ph\n", ccd->CDID.data);
- dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups);
-
- result = usb_control_msg(
- cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
- CBAF_REQ_SET_ASSOCIATION_RESPONSE,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT);
-
- return result;
-}
-
-static ssize_t cbaf_wusb_ck_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- ssize_t result;
- struct usb_interface *iface = to_usb_interface(dev);
- struct cbaf *cbaf = usb_get_intfdata(iface);
-
- result = sscanf(buf,
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx",
- &cbaf->ck.data[0] , &cbaf->ck.data[1],
- &cbaf->ck.data[2] , &cbaf->ck.data[3],
- &cbaf->ck.data[4] , &cbaf->ck.data[5],
- &cbaf->ck.data[6] , &cbaf->ck.data[7],
- &cbaf->ck.data[8] , &cbaf->ck.data[9],
- &cbaf->ck.data[10], &cbaf->ck.data[11],
- &cbaf->ck.data[12], &cbaf->ck.data[13],
- &cbaf->ck.data[14], &cbaf->ck.data[15]);
- if (result != 16)
- return -EINVAL;
-
- result = cbaf_cc_upload(cbaf);
- if (result < 0)
- return result;
-
- return size;
-}
-static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store);
-
-static struct attribute *cbaf_dev_attrs[] = {
- &dev_attr_wusb_host_name.attr,
- &dev_attr_wusb_host_band_groups.attr,
- &dev_attr_wusb_chid.attr,
- &dev_attr_wusb_cdid.attr,
- &dev_attr_wusb_device_name.attr,
- &dev_attr_wusb_device_band_groups.attr,
- &dev_attr_wusb_ck.attr,
- NULL,
-};
-
-static const struct attribute_group cbaf_dev_attr_group = {
- .name = NULL, /* we want them in the same directory */
- .attrs = cbaf_dev_attrs,
-};
-
-static int cbaf_probe(struct usb_interface *iface,
- const struct usb_device_id *id)
-{
- struct cbaf *cbaf;
- struct device *dev = &iface->dev;
- int result = -ENOMEM;
-
- cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL);
- if (cbaf == NULL)
- goto error_kzalloc;
- cbaf->buffer = kmalloc(512, GFP_KERNEL);
- if (cbaf->buffer == NULL)
- goto error_kmalloc_buffer;
-
- cbaf->buffer_size = 512;
- cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface));
- cbaf->usb_iface = usb_get_intf(iface);
- result = cbaf_check(cbaf);
- if (result < 0) {
- dev_err(dev, "This device is not WUSB-CBAF compliant and is not supported yet.\n");
- goto error_check;
- }
-
- result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group);
- if (result < 0) {
- dev_err(dev, "Can't register sysfs attr group: %d\n", result);
- goto error_create_group;
- }
- usb_set_intfdata(iface, cbaf);
- return 0;
-
-error_create_group:
-error_check:
- usb_put_intf(iface);
- usb_put_dev(cbaf->usb_dev);
- kfree(cbaf->buffer);
-error_kmalloc_buffer:
- kfree(cbaf);
-error_kzalloc:
- return result;
-}
-
-static void cbaf_disconnect(struct usb_interface *iface)
-{
- struct cbaf *cbaf = usb_get_intfdata(iface);
- struct device *dev = &iface->dev;
- sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
- usb_set_intfdata(iface, NULL);
- usb_put_intf(iface);
- usb_put_dev(cbaf->usb_dev);
- kfree(cbaf->buffer);
- /* paranoia: clean up crypto keys */
- kzfree(cbaf);
-}
-
-static const struct usb_device_id cbaf_id_table[] = {
- { USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
- { },
-};
-MODULE_DEVICE_TABLE(usb, cbaf_id_table);
-
-static struct usb_driver cbaf_driver = {
- .name = "wusb-cbaf",
- .id_table = cbaf_id_table,
- .probe = cbaf_probe,
- .disconnect = cbaf_disconnect,
-};
-
-module_usb_driver(cbaf_driver);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Wireless USB Cable Based Association");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/crypto.c b/drivers/staging/wusbcore/crypto.c
deleted file mode 100644
index d7d55ed19a98..000000000000
--- a/drivers/staging/wusbcore/crypto.c
+++ /dev/null
@@ -1,441 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Ultra Wide Band
- * AES-128 CCM Encryption
- *
- * Copyright (C) 2007 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * We don't do any encryption here; we use the Linux Kernel's AES-128
- * crypto modules to construct keys and payload blocks in a way
- * defined by WUSB1.0[6]. Check the erratas, as typos are are patched
- * there.
- *
- * Thanks a zillion to John Keys for his help and clarifications over
- * the designed-by-a-committee text.
- *
- * So the idea is that there is this basic Pseudo-Random-Function
- * defined in WUSB1.0[6.5] which is the core of everything. It works
- * by tweaking some blocks, AES crypting them and then xoring
- * something else with them (this seems to be called CBC(AES) -- can
- * you tell I know jack about crypto?). So we just funnel it into the
- * Linux Crypto API.
- *
- * We leave a crypto test module so we can verify that vectors match,
- * every now and then.
- *
- * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I
- * am learning a lot...
- *
- * Conveniently, some data structures that need to be
- * funneled through AES are...16 bytes in size!
- */
-
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/hash.h>
-#include <crypto/skcipher.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/scatterlist.h>
-#include "../uwb/uwb.h"
-#include "include/wusb.h"
-
-static int debug_crypto_verify;
-
-module_param(debug_crypto_verify, int, 0);
-MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms");
-
-static void wusb_key_dump(const void *buf, size_t len)
-{
- print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_OFFSET, 16, 1,
- buf, len, 0);
-}
-
-/*
- * Block of data, as understood by AES-CCM
- *
- * The code assumes this structure is nothing but a 16 byte array
- * (packed in a struct to avoid common mess ups that I usually do with
- * arrays and enforcing type checking).
- */
-struct aes_ccm_block {
- u8 data[16];
-} __attribute__((packed));
-
-/*
- * Counter-mode Blocks (WUSB1.0[6.4])
- *
- * According to CCM (or so it seems), for the purpose of calculating
- * the MIC, the message is broken in N counter-mode blocks, B0, B1,
- * ... BN.
- *
- * B0 contains flags, the CCM nonce and l(m).
- *
- * B1 contains l(a), the MAC header, the encryption offset and padding.
- *
- * If EO is nonzero, additional blocks are built from payload bytes
- * until EO is exhausted (FIXME: padding to 16 bytes, I guess). The
- * padding is not xmitted.
- */
-
-/* WUSB1.0[T6.4] */
-struct aes_ccm_b0 {
- u8 flags; /* 0x59, per CCM spec */
- struct aes_ccm_nonce ccm_nonce;
- __be16 lm;
-} __attribute__((packed));
-
-/* WUSB1.0[T6.5] */
-struct aes_ccm_b1 {
- __be16 la;
- u8 mac_header[10];
- __le16 eo;
- u8 security_reserved; /* This is always zero */
- u8 padding; /* 0 */
-} __attribute__((packed));
-
-/*
- * Encryption Blocks (WUSB1.0[6.4.4])
- *
- * CCM uses Ax blocks to generate a keystream with which the MIC and
- * the message's payload are encoded. A0 always encrypts/decrypts the
- * MIC. Ax (x>0) are used for the successive payload blocks.
- *
- * The x is the counter, and is increased for each block.
- */
-struct aes_ccm_a {
- u8 flags; /* 0x01, per CCM spec */
- struct aes_ccm_nonce ccm_nonce;
- __be16 counter; /* Value of x */
-} __attribute__((packed));
-
-/* Scratch space for MAC calculations. */
-struct wusb_mac_scratch {
- struct aes_ccm_b0 b0;
- struct aes_ccm_b1 b1;
- struct aes_ccm_a ax;
-};
-
-/*
- * CC-MAC function WUSB1.0[6.5]
- *
- * Take a data string and produce the encrypted CBC Counter-mode MIC
- *
- * Note the names for most function arguments are made to (more or
- * less) match those used in the pseudo-function definition given in
- * WUSB1.0[6.5].
- *
- * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
- *
- * @tfm_aes: AES cipher handle (initialized)
- *
- * @mic: buffer for placing the computed MIC (Message Integrity
- * Code). This is exactly 8 bytes, and we expect the buffer to
- * be at least eight bytes in length.
- *
- * @key: 128 bit symmetric key
- *
- * @n: CCM nonce
- *
- * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
- * we use exactly 14 bytes).
- *
- * @b: data stream to be processed
- *
- * @blen: size of b...
- *
- * Still not very clear how this is done, but looks like this: we
- * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
- * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
- * take the payload and divide it in blocks (16 bytes), xor them with
- * the previous crypto result (16 bytes) and crypt it, repeat the next
- * block with the output of the previous one, rinse wash. So we use
- * the CBC-MAC(AES) shash, that does precisely that. The IV (Initial
- * Vector) is 16 bytes and is set to zero, so
- *
- * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
- * using the 14 bytes of @a to fill up
- * b1.{mac_header,e0,security_reserved,padding}.
- *
- * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of
- * l(m) is orthogonal, they bear no relationship, so it is not
- * in conflict with the parameter's relation that
- * WUSB1.0[6.4.2]) defines.
- *
- * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
- * first errata released on 2005/07.
- *
- * NOTE: we need to clean IV to zero at each invocation to make sure
- * we start with a fresh empty Initial Vector, so that the CBC
- * works ok.
- *
- * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
- * what sg[4] is for. Maybe there is a smarter way to do this.
- */
-static int wusb_ccm_mac(struct crypto_shash *tfm_cbcmac,
- struct wusb_mac_scratch *scratch,
- void *mic,
- const struct aes_ccm_nonce *n,
- const struct aes_ccm_label *a, const void *b,
- size_t blen)
-{
- SHASH_DESC_ON_STACK(desc, tfm_cbcmac);
- u8 iv[AES_BLOCK_SIZE];
-
- /*
- * These checks should be compile time optimized out
- * ensure @a fills b1's mac_header and following fields
- */
- BUILD_BUG_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
- BUILD_BUG_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
- BUILD_BUG_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
- BUILD_BUG_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
-
- /* Setup B0 */
- scratch->b0.flags = 0x59; /* Format B0 */
- scratch->b0.ccm_nonce = *n;
- scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
-
- /* Setup B1
- *
- * The WUSB spec is anything but clear! WUSB1.0[6.5]
- * says that to initialize B1 from A with 'l(a) = blen +
- * 14'--after clarification, it means to use A's contents
- * for MAC Header, EO, sec reserved and padding.
- */
- scratch->b1.la = cpu_to_be16(blen + 14);
- memcpy(&scratch->b1.mac_header, a, sizeof(*a));
-
- desc->tfm = tfm_cbcmac;
- crypto_shash_init(desc);
- crypto_shash_update(desc, (u8 *)&scratch->b0, sizeof(scratch->b0) +
- sizeof(scratch->b1));
- crypto_shash_finup(desc, b, blen, iv);
-
- /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
- * The procedure is to AES crypt the A0 block and XOR the MIC
- * Tag against it; we only do the first 8 bytes and place it
- * directly in the destination buffer.
- */
- scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */
- scratch->ax.ccm_nonce = *n;
- scratch->ax.counter = 0;
-
- /* reuse the CBC-MAC transform to perform the single block encryption */
- crypto_shash_digest(desc, (u8 *)&scratch->ax, sizeof(scratch->ax),
- (u8 *)&scratch->ax);
-
- crypto_xor_cpy(mic, (u8 *)&scratch->ax, iv, 8);
-
- return 8;
-}
-
-/*
- * WUSB Pseudo Random Function (WUSB1.0[6.5])
- *
- * @b: buffer to the source data; cannot be a global or const local
- * (will confuse the scatterlists)
- */
-ssize_t wusb_prf(void *out, size_t out_size,
- const u8 key[16], const struct aes_ccm_nonce *_n,
- const struct aes_ccm_label *a,
- const void *b, size_t blen, size_t len)
-{
- ssize_t result, bytes = 0, bitr;
- struct aes_ccm_nonce n = *_n;
- struct crypto_shash *tfm_cbcmac;
- struct wusb_mac_scratch scratch;
- u64 sfn = 0;
- __le64 sfn_le;
-
- tfm_cbcmac = crypto_alloc_shash("cbcmac(aes)", 0, 0);
- if (IS_ERR(tfm_cbcmac)) {
- result = PTR_ERR(tfm_cbcmac);
- printk(KERN_ERR "E: can't load CBCMAC-AES: %d\n", (int)result);
- goto error_alloc_cbcmac;
- }
-
- result = crypto_shash_setkey(tfm_cbcmac, key, AES_BLOCK_SIZE);
- if (result < 0) {
- printk(KERN_ERR "E: can't set CBCMAC-AES key: %d\n", (int)result);
- goto error_setkey_cbcmac;
- }
-
- for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
- sfn_le = cpu_to_le64(sfn++);
- memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
- result = wusb_ccm_mac(tfm_cbcmac, &scratch, out + bytes,
- &n, a, b, blen);
- if (result < 0)
- goto error_ccm_mac;
- bytes += result;
- }
- result = bytes;
-
-error_ccm_mac:
-error_setkey_cbcmac:
- crypto_free_shash(tfm_cbcmac);
-error_alloc_cbcmac:
- return result;
-}
-
-/* WUSB1.0[A.2] test vectors */
-static const u8 stv_hsmic_key[16] = {
- 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
- 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
-};
-
-static const struct aes_ccm_nonce stv_hsmic_n = {
- .sfn = { 0 },
- .tkid = { 0x76, 0x98, 0x01, },
- .dest_addr = { .data = { 0xbe, 0x00 } },
- .src_addr = { .data = { 0x76, 0x98 } },
-};
-
-/*
- * Out-of-band MIC Generation verification code
- *
- */
-static int wusb_oob_mic_verify(void)
-{
- int result;
- u8 mic[8];
- /* WUSB1.0[A.2] test vectors */
- static const struct usb_handshake stv_hsmic_hs = {
- .bMessageNumber = 2,
- .bStatus = 00,
- .tTKID = { 0x76, 0x98, 0x01 },
- .bReserved = 00,
- .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
- 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
- 0x3c, 0x3d, 0x3e, 0x3f },
- .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
- 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
- 0x2c, 0x2d, 0x2e, 0x2f },
- .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
- 0x14, 0x7b },
- };
- size_t hs_size;
-
- result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs);
- if (result < 0)
- printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result);
- else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) {
- printk(KERN_ERR "E: OOB MIC test: "
- "mismatch between MIC result and WUSB1.0[A2]\n");
- hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC);
- printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size);
- wusb_key_dump(&stv_hsmic_hs, hs_size);
- printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n",
- sizeof(stv_hsmic_n));
- wusb_key_dump(&stv_hsmic_n, sizeof(stv_hsmic_n));
- printk(KERN_ERR "E: MIC out:\n");
- wusb_key_dump(mic, sizeof(mic));
- printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n");
- wusb_key_dump(stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC));
- result = -EINVAL;
- } else
- result = 0;
- return result;
-}
-
-/*
- * Test vectors for Key derivation
- *
- * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1]
- * (errata corrected in 2005/07).
- */
-static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = {
- 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
- 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f
-};
-
-static const struct aes_ccm_nonce stv_keydvt_n_a1 = {
- .sfn = { 0 },
- .tkid = { 0x76, 0x98, 0x01, },
- .dest_addr = { .data = { 0xbe, 0x00 } },
- .src_addr = { .data = { 0x76, 0x98 } },
-};
-
-static const struct wusb_keydvt_out stv_keydvt_out_a1 = {
- .kck = {
- 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
- 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
- },
- .ptk = {
- 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06,
- 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d
- }
-};
-
-/*
- * Performa a test to make sure we match the vectors defined in
- * WUSB1.0[A.1](Errata2006/12)
- */
-static int wusb_key_derive_verify(void)
-{
- int result = 0;
- struct wusb_keydvt_out keydvt_out;
- /* These come from WUSB1.0[A.1] + 2006/12 errata */
- static const struct wusb_keydvt_in stv_keydvt_in_a1 = {
- .hnonce = {
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
- },
- .dnonce = {
- 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
- 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f
- }
- };
-
- result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1,
- &stv_keydvt_in_a1);
- if (result < 0)
- printk(KERN_ERR "E: WUSB key derivation test: "
- "derivation failed: %d\n", result);
- if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) {
- printk(KERN_ERR "E: WUSB key derivation test: "
- "mismatch between key derivation result "
- "and WUSB1.0[A1] Errata 2006/12\n");
- printk(KERN_ERR "E: keydvt in: key\n");
- wusb_key_dump(stv_key_a1, sizeof(stv_key_a1));
- printk(KERN_ERR "E: keydvt in: nonce\n");
- wusb_key_dump(&stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
- printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n");
- wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
- printk(KERN_ERR "E: keydvt out: KCK\n");
- wusb_key_dump(&keydvt_out.kck, sizeof(keydvt_out.kck));
- printk(KERN_ERR "E: keydvt out: PTK\n");
- wusb_key_dump(&keydvt_out.ptk, sizeof(keydvt_out.ptk));
- result = -EINVAL;
- } else
- result = 0;
- return result;
-}
-
-/*
- * Initialize crypto system
- *
- * FIXME: we do nothing now, other than verifying. Later on we'll
- * cache the encryption stuff, so that's why we have a separate init.
- */
-int wusb_crypto_init(void)
-{
- int result;
-
- if (debug_crypto_verify) {
- result = wusb_key_derive_verify();
- if (result < 0)
- return result;
- return wusb_oob_mic_verify();
- }
- return 0;
-}
-
-void wusb_crypto_exit(void)
-{
- /* FIXME: free cached crypto transforms */
-}
diff --git a/drivers/staging/wusbcore/dev-sysfs.c b/drivers/staging/wusbcore/dev-sysfs.c
deleted file mode 100644
index 67b0a4c412b2..000000000000
--- a/drivers/staging/wusbcore/dev-sysfs.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * WUSB devices
- * sysfs bindings
- *
- * Copyright (C) 2007 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Get them out of the way...
- */
-
-#include <linux/jiffies.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include "wusbhc.h"
-
-static ssize_t wusb_disconnect_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct usb_device *usb_dev;
- struct wusbhc *wusbhc;
- unsigned command;
- u8 port_idx;
-
- if (sscanf(buf, "%u", &command) != 1)
- return -EINVAL;
- if (command == 0)
- return size;
- usb_dev = to_usb_device(dev);
- wusbhc = wusbhc_get_by_usb_dev(usb_dev);
- if (wusbhc == NULL)
- return -ENODEV;
-
- mutex_lock(&wusbhc->mutex);
- port_idx = wusb_port_no_to_idx(usb_dev->portnum);
- __wusbhc_dev_disable(wusbhc, port_idx);
- mutex_unlock(&wusbhc->mutex);
- wusbhc_put(wusbhc);
- return size;
-}
-static DEVICE_ATTR_WO(wusb_disconnect);
-
-static ssize_t wusb_cdid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- ssize_t result;
- struct wusb_dev *wusb_dev;
-
- wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
- if (wusb_dev == NULL)
- return -ENODEV;
- result = sprintf(buf, "%16ph\n", wusb_dev->cdid.data);
- wusb_dev_put(wusb_dev);
- return result;
-}
-static DEVICE_ATTR_RO(wusb_cdid);
-
-static ssize_t wusb_ck_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int result;
- struct usb_device *usb_dev;
- struct wusbhc *wusbhc;
- struct wusb_ckhdid ck;
-
- result = sscanf(buf,
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx\n",
- &ck.data[0] , &ck.data[1],
- &ck.data[2] , &ck.data[3],
- &ck.data[4] , &ck.data[5],
- &ck.data[6] , &ck.data[7],
- &ck.data[8] , &ck.data[9],
- &ck.data[10], &ck.data[11],
- &ck.data[12], &ck.data[13],
- &ck.data[14], &ck.data[15]);
- if (result != 16)
- return -EINVAL;
-
- usb_dev = to_usb_device(dev);
- wusbhc = wusbhc_get_by_usb_dev(usb_dev);
- if (wusbhc == NULL)
- return -ENODEV;
- result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
- memzero_explicit(&ck, sizeof(ck));
- wusbhc_put(wusbhc);
- return result < 0 ? result : size;
-}
-static DEVICE_ATTR_WO(wusb_ck);
-
-static struct attribute *wusb_dev_attrs[] = {
- &dev_attr_wusb_disconnect.attr,
- &dev_attr_wusb_cdid.attr,
- &dev_attr_wusb_ck.attr,
- NULL,
-};
-
-static const struct attribute_group wusb_dev_attr_group = {
- .name = NULL, /* we want them in the same directory */
- .attrs = wusb_dev_attrs,
-};
-
-int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev,
- struct wusb_dev *wusb_dev)
-{
- int result = sysfs_create_group(&usb_dev->dev.kobj,
- &wusb_dev_attr_group);
- struct device *dev = &usb_dev->dev;
- if (result < 0)
- dev_err(dev, "Cannot register WUSB-dev attributes: %d\n",
- result);
- return result;
-}
-
-void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev)
-{
- struct usb_device *usb_dev = wusb_dev->usb_dev;
- if (usb_dev)
- sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group);
-}
diff --git a/drivers/staging/wusbcore/devconnect.c b/drivers/staging/wusbcore/devconnect.c
deleted file mode 100644
index 1170f8baf608..000000000000
--- a/drivers/staging/wusbcore/devconnect.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
- * Device Connect handling
- *
- * Copyright (C) 2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- * FIXME: this file needs to be broken up, it's grown too big
- *
- *
- * WUSB1.0[7.1, 7.5.1, ]
- *
- * WUSB device connection is kind of messy. Some background:
- *
- * When a device wants to connect it scans the UWB radio channels
- * looking for a WUSB Channel; a WUSB channel is defined by MMCs
- * (Micro Managed Commands or something like that) [see
- * Design-overview for more on this] .
- *
- * So, device scans the radio, finds MMCs and thus a host and checks
- * when the next DNTS is. It sends a Device Notification Connect
- * (DN_Connect); the host picks it up (through nep.c and notif.c, ends
- * up in wusb_devconnect_ack(), which creates a wusb_dev structure in
- * wusbhc->port[port_number].wusb_dev), assigns an unauth address
- * to the device (this means from 0x80 to 0xfe) and sends, in the MMC
- * a Connect Ack Information Element (ConnAck IE).
- *
- * So now the device now has a WUSB address. From now on, we use
- * that to talk to it in the RPipes.
- *
- * ASSUMPTIONS:
- *
- * - We use the the as device address the port number where it is
- * connected (port 0 doesn't exist). For unauth, it is 128 + that.
- *
- * ROADMAP:
- *
- * This file contains the logic for doing that--entry points:
- *
- * wusb_devconnect_ack() Ack a device until _acked() called.
- * Called by notif.c:wusb_handle_dn_connect()
- * when a DN_Connect is received.
- *
- * wusb_devconnect_acked() Ack done, release resources.
- *
- * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn()
- * for processing a DN_Alive pong from a device.
- *
- * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to
- * process a disconnect request from a
- * device.
- *
- * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when
- * disabling a port.
- *
- * wusb_devconnect_create() Called when creating the host by
- * lc.c:wusbhc_create().
- *
- * wusb_devconnect_destroy() Cleanup called removing the host. Called
- * by lc.c:wusbhc_destroy().
- *
- * Each Wireless USB host maintains a list of DN_Connect requests
- * (actually we maintain a list of pending Connect Acks, the
- * wusbhc->ca_list).
- *
- * LIFE CYCLE OF port->wusb_dev
- *
- * Before the @wusbhc structure put()s the reference it owns for
- * port->wusb_dev [and clean the wusb_dev pointer], it needs to
- * lock @wusbhc->mutex.
- */
-
-#include <linux/jiffies.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/export.h>
-#include "wusbhc.h"
-
-static void wusbhc_devconnect_acked_work(struct work_struct *work);
-
-static void wusb_dev_free(struct wusb_dev *wusb_dev)
-{
- kfree(wusb_dev);
-}
-
-static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
-{
- struct wusb_dev *wusb_dev;
-
- wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
- if (wusb_dev == NULL)
- goto err;
-
- wusb_dev->wusbhc = wusbhc;
-
- INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
-
- return wusb_dev;
-err:
- wusb_dev_free(wusb_dev);
- return NULL;
-}
-
-
-/*
- * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE
- * properly so that it can be added to the MMC.
- *
- * We just get the @wusbhc->ca_list and fill out the first four ones or
- * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB
- * IE is not allocated, we alloc it.
- *
- * @wusbhc->mutex must be taken
- */
-static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc)
-{
- unsigned cnt;
- struct wusb_dev *dev_itr;
- struct wuie_connect_ack *cack_ie;
-
- cack_ie = &wusbhc->cack_ie;
- cnt = 0;
- list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) {
- cack_ie->blk[cnt].CDID = dev_itr->cdid;
- cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr;
- if (++cnt >= WUIE_ELT_MAX)
- break;
- }
- cack_ie->hdr.bLength = sizeof(cack_ie->hdr)
- + cnt * sizeof(cack_ie->blk[0]);
-}
-
-/*
- * Register a new device that wants to connect
- *
- * A new device wants to connect, so we add it to the Connect-Ack
- * list. We give it an address in the unauthorized range (bit 8 set);
- * user space will have to drive authorization further on.
- *
- * @dev_addr: address to use for the device (which is also the port
- * number).
- *
- * @wusbhc->mutex must be taken
- */
-static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc,
- struct wusb_dn_connect *dnc,
- const char *pr_cdid, u8 port_idx)
-{
- struct device *dev = wusbhc->dev;
- struct wusb_dev *wusb_dev;
- int new_connection = wusb_dn_connect_new_connection(dnc);
- u8 dev_addr;
- int result;
-
- /* Is it registered already? */
- list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node)
- if (!memcmp(&wusb_dev->cdid, &dnc->CDID,
- sizeof(wusb_dev->cdid)))
- return wusb_dev;
- /* We don't have it, create an entry, register it */
- wusb_dev = wusb_dev_alloc(wusbhc);
- if (wusb_dev == NULL)
- return NULL;
- wusb_dev_init(wusb_dev);
- wusb_dev->cdid = dnc->CDID;
- wusb_dev->port_idx = port_idx;
-
- /*
- * Devices are always available within the cluster reservation
- * and since the hardware will take the intersection of the
- * per-device availability and the cluster reservation, the
- * per-device availability can simply be set to always
- * available.
- */
- bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS);
-
- /* FIXME: handle reconnects instead of assuming connects are
- always new. */
- if (1 && new_connection == 0)
- new_connection = 1;
- if (new_connection) {
- dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH;
-
- dev_info(dev, "Connecting new WUSB device to address %u, "
- "port %u\n", dev_addr, port_idx);
-
- result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr);
- if (result < 0)
- return NULL;
- }
- wusb_dev->entry_ts = jiffies;
- list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list);
- wusbhc->cack_count++;
- wusbhc_fill_cack_ie(wusbhc);
-
- return wusb_dev;
-}
-
-/*
- * Remove a Connect-Ack context entry from the HCs view
- *
- * @wusbhc->mutex must be taken
- */
-static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
-{
- list_del_init(&wusb_dev->cack_node);
- wusbhc->cack_count--;
- wusbhc_fill_cack_ie(wusbhc);
-}
-
-/*
- * @wusbhc->mutex must be taken */
-static
-void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
-{
- wusbhc_cack_rm(wusbhc, wusb_dev);
- if (wusbhc->cack_count)
- wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
- else
- wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr);
-}
-
-static void wusbhc_devconnect_acked_work(struct work_struct *work)
-{
- struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev,
- devconnect_acked_work);
- struct wusbhc *wusbhc = wusb_dev->wusbhc;
-
- mutex_lock(&wusbhc->mutex);
- wusbhc_devconnect_acked(wusbhc, wusb_dev);
- mutex_unlock(&wusbhc->mutex);
-
- wusb_dev_put(wusb_dev);
-}
-
-/*
- * Ack a device for connection
- *
- * FIXME: docs
- *
- * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal.
- *
- * So we get the connect ack IE (may have been allocated already),
- * find an empty connect block, an empty virtual port, create an
- * address with it (see below), make it an unauth addr [bit 7 set] and
- * set the MMC.
- *
- * Addresses: because WUSB hosts have no downstream hubs, we can do a
- * 1:1 mapping between 'port number' and device
- * address. This simplifies many things, as during this
- * initial connect phase the USB stack has no knowledge of
- * the device and hasn't assigned an address yet--we know
- * USB's choose_address() will use the same heuristics we
- * use here, so we can assume which address will be assigned.
- *
- * USB stack always assigns address 1 to the root hub, so
- * to the port number we add 2 (thus virtual port #0 is
- * addr #2).
- *
- * @wusbhc shall be referenced
- */
-static
-void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
- const char *pr_cdid)
-{
- int result;
- struct device *dev = wusbhc->dev;
- struct wusb_dev *wusb_dev;
- struct wusb_port *port;
- unsigned idx;
-
- mutex_lock(&wusbhc->mutex);
-
- /* Check we are not handling it already */
- for (idx = 0; idx < wusbhc->ports_max; idx++) {
- port = wusb_port_by_idx(wusbhc, idx);
- if (port->wusb_dev
- && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0)
- goto error_unlock;
- }
- /* Look up those fake ports we have for a free one */
- for (idx = 0; idx < wusbhc->ports_max; idx++) {
- port = wusb_port_by_idx(wusbhc, idx);
- if ((port->status & USB_PORT_STAT_POWER)
- && !(port->status & USB_PORT_STAT_CONNECTION))
- break;
- }
- if (idx >= wusbhc->ports_max) {
- dev_err(dev, "Host controller can't connect more devices "
- "(%u already connected); device %s rejected\n",
- wusbhc->ports_max, pr_cdid);
- /* NOTE: we could send a WUIE_Disconnect here, but we haven't
- * event acked, so the device will eventually timeout the
- * connection, right? */
- goto error_unlock;
- }
-
- /* Make sure we are using no crypto on that "virtual port" */
- wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
-
- /* Grab a filled in Connect-Ack context, fill out the
- * Connect-Ack Wireless USB IE, set the MMC */
- wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx);
- if (wusb_dev == NULL)
- goto error_unlock;
- result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
- if (result < 0)
- goto error_unlock;
- /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do
- * three for a good measure */
- msleep(3);
- port->wusb_dev = wusb_dev;
- port->status |= USB_PORT_STAT_CONNECTION;
- port->change |= USB_PORT_STAT_C_CONNECTION;
- /* Now the port status changed to connected; hub_wq will
- * pick the change up and try to reset the port to bring it to
- * the enabled state--so this process returns up to the stack
- * and it calls back into wusbhc_rh_port_reset().
- */
-error_unlock:
- mutex_unlock(&wusbhc->mutex);
- return;
-
-}
-
-/*
- * Disconnect a Wireless USB device from its fake port
- *
- * Marks the port as disconnected so that hub_wq can pick up the change
- * and drops our knowledge about the device.
- *
- * Assumes there is a device connected
- *
- * @port_index: zero based port number
- *
- * NOTE: @wusbhc->mutex is locked
- *
- * WARNING: From here it is not very safe to access anything hanging off
- * wusb_dev
- */
-static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
- struct wusb_port *port)
-{
- struct wusb_dev *wusb_dev = port->wusb_dev;
-
- port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE
- | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET
- | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
- port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE;
- if (wusb_dev) {
- dev_dbg(wusbhc->dev, "disconnecting device from port %d\n", wusb_dev->port_idx);
- if (!list_empty(&wusb_dev->cack_node))
- list_del_init(&wusb_dev->cack_node);
- /* For the one in cack_add() */
- wusb_dev_put(wusb_dev);
- }
- port->wusb_dev = NULL;
-
- /* After a device disconnects, change the GTK (see [WUSB]
- * section 6.2.11.2). */
- if (wusbhc->active)
- wusbhc_gtk_rekey(wusbhc);
-
- /* The Wireless USB part has forgotten about the device already; now
- * hub_wq's timer will pick up the disconnection and remove the USB
- * device from the system
- */
-}
-
-/*
- * Refresh the list of keep alives to emit in the MMC
- *
- * We only publish the first four devices that have a coming timeout
- * condition. Then when we are done processing those, we go for the
- * next ones. We ignore the ones that have timed out already (they'll
- * be purged).
- *
- * This might cause the first devices to timeout the last devices in
- * the port array...FIXME: come up with a better algorithm?
- *
- * Note we can't do much about MMC's ops errors; we hope next refresh
- * will kind of handle it.
- *
- * NOTE: @wusbhc->mutex is locked
- */
-static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
-{
- struct device *dev = wusbhc->dev;
- unsigned cnt;
- struct wusb_dev *wusb_dev;
- struct wusb_port *wusb_port;
- struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie;
- unsigned keep_alives, old_keep_alives;
-
- old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
- keep_alives = 0;
- for (cnt = 0;
- keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max;
- cnt++) {
- unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
-
- wusb_port = wusb_port_by_idx(wusbhc, cnt);
- wusb_dev = wusb_port->wusb_dev;
-
- if (wusb_dev == NULL)
- continue;
- if (wusb_dev->usb_dev == NULL)
- continue;
-
- if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
- dev_err(dev, "KEEPALIVE: device %u timed out\n",
- wusb_dev->addr);
- __wusbhc_dev_disconnect(wusbhc, wusb_port);
- } else if (time_after(jiffies, wusb_dev->entry_ts + tt/3)) {
- /* Approaching timeout cut off, need to refresh */
- ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
- }
- }
- if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */
- ie->bDeviceAddress[keep_alives++] = 0x7f;
- ie->hdr.bLength = sizeof(ie->hdr) +
- keep_alives*sizeof(ie->bDeviceAddress[0]);
- if (keep_alives > 0)
- wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr);
- else if (old_keep_alives != 0)
- wusbhc_mmcie_rm(wusbhc, &ie->hdr);
-}
-
-/*
- * Do a run through all devices checking for timeouts
- */
-static void wusbhc_keep_alive_run(struct work_struct *ws)
-{
- struct delayed_work *dw = to_delayed_work(ws);
- struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer);
-
- mutex_lock(&wusbhc->mutex);
- __wusbhc_keep_alive(wusbhc);
- mutex_unlock(&wusbhc->mutex);
-
- queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
- msecs_to_jiffies(wusbhc->trust_timeout / 2));
-}
-
-/*
- * Find the wusb_dev from its device address.
- *
- * The device can be found directly from the address (see
- * wusb_cack_add() for where the device address is set to port_idx
- * +2), except when the address is zero.
- */
-static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
-{
- int p;
-
- if (addr == 0xff) /* unconnected */
- return NULL;
-
- if (addr > 0) {
- int port = (addr & ~0x80) - 2;
- if (port < 0 || port >= wusbhc->ports_max)
- return NULL;
- return wusb_port_by_idx(wusbhc, port)->wusb_dev;
- }
-
- /* Look for the device with address 0. */
- for (p = 0; p < wusbhc->ports_max; p++) {
- struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev;
- if (wusb_dev && wusb_dev->addr == addr)
- return wusb_dev;
- }
- return NULL;
-}
-
-/*
- * Handle a DN_Alive notification (WUSB1.0[7.6.1])
- *
- * This just updates the device activity timestamp and then refreshes
- * the keep alive IE.
- *
- * @wusbhc shall be referenced and unlocked
- */
-static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr)
-{
- struct wusb_dev *wusb_dev;
-
- mutex_lock(&wusbhc->mutex);
- wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
- if (wusb_dev == NULL) {
- dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n",
- srcaddr);
- } else {
- wusb_dev->entry_ts = jiffies;
- __wusbhc_keep_alive(wusbhc);
- }
- mutex_unlock(&wusbhc->mutex);
-}
-
-/*
- * Handle a DN_Connect notification (WUSB1.0[7.6.1])
- *
- * @wusbhc
- * @pkt_hdr
- * @size: Size of the buffer where the notification resides; if the
- * notification data suggests there should be more data than
- * available, an error will be signaled and the whole buffer
- * consumed.
- *
- * @wusbhc->mutex shall be held
- */
-static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
- struct wusb_dn_hdr *dn_hdr,
- size_t size)
-{
- struct device *dev = wusbhc->dev;
- struct wusb_dn_connect *dnc;
- char pr_cdid[WUSB_CKHDID_STRSIZE];
- static const char *beacon_behaviour[] = {
- "reserved",
- "self-beacon",
- "directed-beacon",
- "no-beacon"
- };
-
- if (size < sizeof(*dnc)) {
- dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n",
- size, sizeof(*dnc));
- return;
- }
-
- dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr);
- sprintf(pr_cdid, "%16ph", dnc->CDID.data);
- dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n",
- pr_cdid,
- wusb_dn_connect_prev_dev_addr(dnc),
- beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)],
- wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect");
- /* ACK the connect */
- wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid);
-}
-
-/*
- * Handle a DN_Disconnect notification (WUSB1.0[7.6.1])
- *
- * Device is going down -- do the disconnect.
- *
- * @wusbhc shall be referenced and unlocked
- */
-static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr)
-{
- struct device *dev = wusbhc->dev;
- struct wusb_dev *wusb_dev;
-
- mutex_lock(&wusbhc->mutex);
- wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
- if (wusb_dev == NULL) {
- dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n",
- srcaddr);
- } else {
- dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n",
- wusb_dev->addr);
- __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc,
- wusb_dev->port_idx));
- }
- mutex_unlock(&wusbhc->mutex);
-}
-
-/*
- * Handle a Device Notification coming a host
- *
- * The Device Notification comes from a host (HWA, DWA or WHCI)
- * wrapped in a set of headers. Somebody else has peeled off those
- * headers for us and we just get one Device Notifications.
- *
- * Invalid DNs (e.g., too short) are discarded.
- *
- * @wusbhc shall be referenced
- *
- * FIXMES:
- * - implement priorities as in WUSB1.0[Table 7-55]?
- */
-void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
- struct wusb_dn_hdr *dn_hdr, size_t size)
-{
- struct device *dev = wusbhc->dev;
-
- if (size < sizeof(struct wusb_dn_hdr)) {
- dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
- (int)size, (int)sizeof(struct wusb_dn_hdr));
- return;
- }
- switch (dn_hdr->bType) {
- case WUSB_DN_CONNECT:
- wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
- break;
- case WUSB_DN_ALIVE:
- wusbhc_handle_dn_alive(wusbhc, srcaddr);
- break;
- case WUSB_DN_DISCONNECT:
- wusbhc_handle_dn_disconnect(wusbhc, srcaddr);
- break;
- case WUSB_DN_MASAVAILCHANGED:
- case WUSB_DN_RWAKE:
- case WUSB_DN_SLEEP:
- /* FIXME: handle these DNs. */
- break;
- case WUSB_DN_EPRDY:
- /* The hardware handles these. */
- break;
- default:
- dev_warn(dev, "unknown DN %u (%d octets) from %u\n",
- dn_hdr->bType, (int)size, srcaddr);
- }
-}
-EXPORT_SYMBOL_GPL(wusbhc_handle_dn);
-
-/*
- * Disconnect a WUSB device from a the cluster
- *
- * @wusbhc
- * @port Fake port where the device is (wusbhc index, not USB port number).
- *
- * In Wireless USB, a disconnect is basically telling the device he is
- * being disconnected and forgetting about him.
- *
- * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100
- * ms and then keep going.
- *
- * We don't do much in case of error; we always pretend we disabled
- * the port and disconnected the device. If physically the request
- * didn't get there (many things can fail in the way there), the stack
- * will reject the device's communication attempts.
- *
- * @wusbhc should be refcounted and locked
- */
-void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx)
-{
- int result;
- struct device *dev = wusbhc->dev;
- struct wusb_dev *wusb_dev;
- struct wuie_disconnect *ie;
-
- wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
- if (wusb_dev == NULL) {
- /* reset no device? ignore */
- dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n",
- port_idx);
- return;
- }
- __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
-
- ie = kzalloc(sizeof(*ie), GFP_KERNEL);
- if (ie == NULL)
- return;
- ie->hdr.bLength = sizeof(*ie);
- ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT;
- ie->bDeviceAddress = wusb_dev->addr;
- result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr);
- if (result < 0)
- dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result);
- else {
- /* At least 6 MMCs, assuming at least 1 MMC per zone. */
- msleep(7*4);
- wusbhc_mmcie_rm(wusbhc, &ie->hdr);
- }
- kfree(ie);
-}
-
-/*
- * Walk over the BOS descriptor, verify and grok it
- *
- * @usb_dev: referenced
- * @wusb_dev: referenced and unlocked
- *
- * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a
- * "flexible" way to wrap all kinds of descriptors inside an standard
- * descriptor (wonder why they didn't use normal descriptors,
- * btw). Not like they lack code.
- *
- * At the end we go to look for the WUSB Device Capabilities
- * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor
- * that is part of the BOS descriptor set. That tells us what does the
- * device support (dual role, beacon type, UWB PHY rates).
- */
-static int wusb_dev_bos_grok(struct usb_device *usb_dev,
- struct wusb_dev *wusb_dev,
- struct usb_bos_descriptor *bos, size_t desc_size)
-{
- ssize_t result;
- struct device *dev = &usb_dev->dev;
- void *itr, *top;
-
- /* Walk over BOS capabilities, verify them */
- itr = (void *)bos + sizeof(*bos);
- top = itr + desc_size - sizeof(*bos);
- while (itr < top) {
- struct usb_dev_cap_header *cap_hdr = itr;
- size_t cap_size;
- u8 cap_type;
- if (top - itr < sizeof(*cap_hdr)) {
- dev_err(dev, "Device BUG? premature end of BOS header "
- "data [offset 0x%02x]: only %zu bytes left\n",
- (int)(itr - (void *)bos), top - itr);
- result = -ENOSPC;
- goto error_bad_cap;
- }
- cap_size = cap_hdr->bLength;
- cap_type = cap_hdr->bDevCapabilityType;
- if (cap_size == 0)
- break;
- if (cap_size > top - itr) {
- dev_err(dev, "Device BUG? premature end of BOS data "
- "[offset 0x%02x cap %02x %zu bytes]: "
- "only %zu bytes left\n",
- (int)(itr - (void *)bos),
- cap_type, cap_size, top - itr);
- result = -EBADF;
- goto error_bad_cap;
- }
- switch (cap_type) {
- case USB_CAP_TYPE_WIRELESS_USB:
- if (cap_size != sizeof(*wusb_dev->wusb_cap_descr))
- dev_err(dev, "Device BUG? WUSB Capability "
- "descriptor is %zu bytes vs %zu "
- "needed\n", cap_size,
- sizeof(*wusb_dev->wusb_cap_descr));
- else
- wusb_dev->wusb_cap_descr = itr;
- break;
- default:
- dev_err(dev, "BUG? Unknown BOS capability 0x%02x "
- "(%zu bytes) at offset 0x%02x\n", cap_type,
- cap_size, (int)(itr - (void *)bos));
- }
- itr += cap_size;
- }
- result = 0;
-error_bad_cap:
- return result;
-}
-
-/*
- * Add information from the BOS descriptors to the device
- *
- * @usb_dev: referenced
- * @wusb_dev: referenced and unlocked
- *
- * So what we do is we alloc a space for the BOS descriptor of 64
- * bytes; read the first four bytes which include the wTotalLength
- * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the
- * whole thing. If not we realloc to that size.
- *
- * Then we call the groking function, that will fill up
- * wusb_dev->wusb_cap_descr, which is what we'll need later on.
- */
-static int wusb_dev_bos_add(struct usb_device *usb_dev,
- struct wusb_dev *wusb_dev)
-{
- ssize_t result;
- struct device *dev = &usb_dev->dev;
- struct usb_bos_descriptor *bos;
- size_t alloc_size = 32, desc_size = 4;
-
- bos = kmalloc(alloc_size, GFP_KERNEL);
- if (bos == NULL)
- return -ENOMEM;
- result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
- if (result < 4) {
- dev_err(dev, "Can't get BOS descriptor or too short: %zd\n",
- result);
- goto error_get_descriptor;
- }
- desc_size = le16_to_cpu(bos->wTotalLength);
- if (desc_size >= alloc_size) {
- kfree(bos);
- alloc_size = desc_size;
- bos = kmalloc(alloc_size, GFP_KERNEL);
- if (bos == NULL)
- return -ENOMEM;
- }
- result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
- if (result < 0 || result != desc_size) {
- dev_err(dev, "Can't get BOS descriptor or too short (need "
- "%zu bytes): %zd\n", desc_size, result);
- goto error_get_descriptor;
- }
- if (result < sizeof(*bos)
- || le16_to_cpu(bos->wTotalLength) != desc_size) {
- dev_err(dev, "Can't get BOS descriptor or too short (need "
- "%zu bytes): %zd\n", desc_size, result);
- goto error_get_descriptor;
- }
-
- result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result);
- if (result < 0)
- goto error_bad_bos;
- wusb_dev->bos = bos;
- return 0;
-
-error_bad_bos:
-error_get_descriptor:
- kfree(bos);
- wusb_dev->wusb_cap_descr = NULL;
- return result;
-}
-
-static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev)
-{
- kfree(wusb_dev->bos);
- wusb_dev->wusb_cap_descr = NULL;
-};
-
-/*
- * USB stack's device addition Notifier Callback
- *
- * Called from drivers/usb/core/hub.c when a new device is added; we
- * use this hook to perform certain WUSB specific setup work on the
- * new device. As well, it is the first time we can connect the
- * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a
- * reference that we'll drop.
- *
- * First we need to determine if the device is a WUSB device (else we
- * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS)
- * [FIXME: maybe we'd need something more definitive]. If so, we track
- * it's usb_busd and from there, the WUSB HC.
- *
- * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we
- * get the wusbhc for the device.
- *
- * We have a reference on @usb_dev (as we are called at the end of its
- * enumeration).
- *
- * NOTE: @usb_dev locked
- */
-static void wusb_dev_add_ncb(struct usb_device *usb_dev)
-{
- int result = 0;
- struct wusb_dev *wusb_dev;
- struct wusbhc *wusbhc;
- struct device *dev = &usb_dev->dev;
- u8 port_idx;
-
- if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
- return; /* skip non wusb and wusb RHs */
-
- usb_set_device_state(usb_dev, USB_STATE_UNAUTHENTICATED);
-
- wusbhc = wusbhc_get_by_usb_dev(usb_dev);
- if (wusbhc == NULL)
- goto error_nodev;
- mutex_lock(&wusbhc->mutex);
- wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
- port_idx = wusb_port_no_to_idx(usb_dev->portnum);
- mutex_unlock(&wusbhc->mutex);
- if (wusb_dev == NULL)
- goto error_nodev;
- wusb_dev->usb_dev = usb_get_dev(usb_dev);
- usb_dev->wusb_dev = wusb_dev_get(wusb_dev);
- result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev);
- if (result < 0) {
- dev_err(dev, "Cannot enable security: %d\n", result);
- goto error_sec_add;
- }
- /* Now query the device for it's BOS and attach it to wusb_dev */
- result = wusb_dev_bos_add(usb_dev, wusb_dev);
- if (result < 0) {
- dev_err(dev, "Cannot get BOS descriptors: %d\n", result);
- goto error_bos_add;
- }
- result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev);
- if (result < 0)
- goto error_add_sysfs;
-out:
- wusb_dev_put(wusb_dev);
- wusbhc_put(wusbhc);
-error_nodev:
- return;
-
-error_add_sysfs:
- wusb_dev_bos_rm(wusb_dev);
-error_bos_add:
- wusb_dev_sec_rm(wusb_dev);
-error_sec_add:
- mutex_lock(&wusbhc->mutex);
- __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
- mutex_unlock(&wusbhc->mutex);
- goto out;
-}
-
-/*
- * Undo all the steps done at connection by the notifier callback
- *
- * NOTE: @usb_dev locked
- */
-static void wusb_dev_rm_ncb(struct usb_device *usb_dev)
-{
- struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
-
- if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
- return; /* skip non wusb and wusb RHs */
-
- wusb_dev_sysfs_rm(wusb_dev);
- wusb_dev_bos_rm(wusb_dev);
- wusb_dev_sec_rm(wusb_dev);
- wusb_dev->usb_dev = NULL;
- usb_dev->wusb_dev = NULL;
- wusb_dev_put(wusb_dev);
- usb_put_dev(usb_dev);
-}
-
-/*
- * Handle notifications from the USB stack (notifier call back)
- *
- * This is called when the USB stack does a
- * usb_{bus,device}_{add,remove}() so we can do WUSB specific
- * handling. It is called with [for the case of
- * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked.
- */
-int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
- void *priv)
-{
- int result = NOTIFY_OK;
-
- switch (val) {
- case USB_DEVICE_ADD:
- wusb_dev_add_ncb(priv);
- break;
- case USB_DEVICE_REMOVE:
- wusb_dev_rm_ncb(priv);
- break;
- case USB_BUS_ADD:
- /* ignore (for now) */
- case USB_BUS_REMOVE:
- break;
- default:
- WARN_ON(1);
- result = NOTIFY_BAD;
- }
- return result;
-}
-
-/*
- * Return a referenced wusb_dev given a @wusbhc and @usb_dev
- */
-struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc,
- struct usb_device *usb_dev)
-{
- struct wusb_dev *wusb_dev;
- u8 port_idx;
-
- port_idx = wusb_port_no_to_idx(usb_dev->portnum);
- BUG_ON(port_idx > wusbhc->ports_max);
- wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
- if (wusb_dev != NULL) /* ops, device is gone */
- wusb_dev_get(wusb_dev);
- return wusb_dev;
-}
-EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev);
-
-void wusb_dev_destroy(struct kref *_wusb_dev)
-{
- struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt);
-
- list_del_init(&wusb_dev->cack_node);
- wusb_dev_free(wusb_dev);
-}
-EXPORT_SYMBOL_GPL(wusb_dev_destroy);
-
-/*
- * Create all the device connect handling infrastructure
- *
- * This is basically the device info array, Connect Acknowledgement
- * (cack) lists, keep-alive timers (and delayed work thread).
- */
-int wusbhc_devconnect_create(struct wusbhc *wusbhc)
-{
- wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE;
- wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr);
- INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run);
-
- wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK;
- wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr);
- INIT_LIST_HEAD(&wusbhc->cack_list);
-
- return 0;
-}
-
-/*
- * Release all resources taken by the devconnect stuff
- */
-void wusbhc_devconnect_destroy(struct wusbhc *wusbhc)
-{
- /* no op */
-}
-
-/*
- * wusbhc_devconnect_start - start accepting device connections
- * @wusbhc: the WUSB HC
- *
- * Sets the Host Info IE to accept all new connections.
- *
- * FIXME: This also enables the keep alives but this is not necessary
- * until there are connected and authenticated devices.
- */
-int wusbhc_devconnect_start(struct wusbhc *wusbhc)
-{
- struct device *dev = wusbhc->dev;
- struct wuie_host_info *hi;
- int result;
-
- hi = kzalloc(sizeof(*hi), GFP_KERNEL);
- if (hi == NULL)
- return -ENOMEM;
-
- hi->hdr.bLength = sizeof(*hi);
- hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO;
- hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL);
- hi->CHID = wusbhc->chid;
- result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr);
- if (result < 0) {
- dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
- goto error_mmcie_set;
- }
- wusbhc->wuie_host_info = hi;
-
- queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
- msecs_to_jiffies(wusbhc->trust_timeout / 2));
-
- return 0;
-
-error_mmcie_set:
- kfree(hi);
- return result;
-}
-
-/*
- * wusbhc_devconnect_stop - stop managing connected devices
- * @wusbhc: the WUSB HC
- *
- * Disconnects any devices still connected, stops the keep alives and
- * removes the Host Info IE.
- */
-void wusbhc_devconnect_stop(struct wusbhc *wusbhc)
-{
- int i;
-
- mutex_lock(&wusbhc->mutex);
- for (i = 0; i < wusbhc->ports_max; i++) {
- if (wusbhc->port[i].wusb_dev)
- __wusbhc_dev_disconnect(wusbhc, &wusbhc->port[i]);
- }
- mutex_unlock(&wusbhc->mutex);
-
- cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
- wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr);
- kfree(wusbhc->wuie_host_info);
- wusbhc->wuie_host_info = NULL;
-}
-
-/*
- * wusb_set_dev_addr - set the WUSB device address used by the host
- * @wusbhc: the WUSB HC the device is connect to
- * @wusb_dev: the WUSB device
- * @addr: new device address
- */
-int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr)
-{
- int result;
-
- wusb_dev->addr = addr;
- result = wusbhc->dev_info_set(wusbhc, wusb_dev);
- if (result < 0)
- dev_err(wusbhc->dev, "device %d: failed to set device "
- "address\n", wusb_dev->port_idx);
- else
- dev_info(wusbhc->dev, "device %d: %s addr %u\n",
- wusb_dev->port_idx,
- (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth",
- wusb_dev->addr);
-
- return result;
-}
diff --git a/drivers/staging/wusbcore/host/Kconfig b/drivers/staging/wusbcore/host/Kconfig
deleted file mode 100644
index 9a73f9360a08..000000000000
--- a/drivers/staging/wusbcore/host/Kconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config USB_WHCI_HCD
- tristate "Wireless USB Host Controller Interface (WHCI) driver"
- depends on USB_PCI && USB && UWB
- select USB_WUSB
- select UWB_WHCI
- help
- A driver for PCI-based Wireless USB Host Controllers that are
- compliant with the WHCI specification.
-
- To compile this driver a module, choose M here: the module
- will be called "whci-hcd".
-
-config USB_HWA_HCD
- tristate "Host Wire Adapter (HWA) driver"
- depends on USB && UWB
- select USB_WUSB
- select UWB_HWA
- help
- This driver enables you to connect Wireless USB devices to
- your system using a Host Wire Adaptor USB dongle. This is an
- UWB Radio Controller and WUSB Host Controller connected to
- your machine via USB (specified in WUSB1.0).
-
- To compile this driver a module, choose M here: the module
- will be called "hwa-hc".
-
diff --git a/drivers/staging/wusbcore/host/Makefile b/drivers/staging/wusbcore/host/Makefile
deleted file mode 100644
index d65ee8a73e21..000000000000
--- a/drivers/staging/wusbcore/host/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_USB_WHCI_HCD) += whci/
-obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
diff --git a/drivers/staging/wusbcore/host/hwa-hc.c b/drivers/staging/wusbcore/host/hwa-hc.c
deleted file mode 100644
index 8d959e91fe27..000000000000
--- a/drivers/staging/wusbcore/host/hwa-hc.c
+++ /dev/null
@@ -1,875 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Host Wire Adapter:
- * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * The HWA driver is a simple layer that forwards requests to the WAHC
- * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
- * Controller) layers.
- *
- * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
- * Host Controller that is connected to your system via USB (a USB
- * dongle that implements a USB host...). There is also a Device Wired
- * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
- * transferring data (it is after all a USB host connected via
- * Wireless USB), we have a common layer called Wire Adapter Host
- * Controller that does all the hard work. The WUSBHC (Wireless USB
- * Host Controller) is the part common to WUSB Host Controllers, the
- * HWA and the PCI-based one, that is implemented following the WHCI
- * spec. All these layers are implemented in ../wusbcore.
- *
- * The main functions are hwahc_op_urb_{en,de}queue(), that pass the
- * job of converting a URB to a Wire Adapter
- *
- * Entry points:
- *
- * hwahc_driver_*() Driver initialization, registration and
- * teardown.
- *
- * hwahc_probe() New device came up, create an instance for
- * it [from device enumeration].
- *
- * hwahc_disconnect() Remove device instance [from device
- * enumeration].
- *
- * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for
- * starting/stopping/etc (some might be made also
- * DWA).
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/workqueue.h>
-#include <linux/wait.h>
-#include <linux/completion.h>
-#include "../wa-hc.h"
-#include "../wusbhc.h"
-
-struct hwahc {
- struct wusbhc wusbhc; /* has to be 1st */
- struct wahc wa;
-};
-
-/*
- * FIXME should be wusbhc
- *
- * NOTE: we need to cache the Cluster ID because later...there is no
- * way to get it :)
- */
-static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
-{
- int result;
- struct wusbhc *wusbhc = &hwahc->wusbhc;
- struct wahc *wa = &hwahc->wa;
- struct device *dev = &wa->usb_iface->dev;
-
- result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_SET_CLUSTER_ID,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- cluster_id,
- wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (result < 0)
- dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
- cluster_id, result);
- else
- wusbhc->cluster_id = cluster_id;
- dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
- return result;
-}
-
-static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
-{
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
-
- return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_SET_NUM_DNTS,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- interval << 8 | slots,
- wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
-}
-
-/*
- * Reset a WUSB host controller and wait for it to complete doing it.
- *
- * @usb_hcd: Pointer to WUSB Host Controller instance.
- *
- */
-static int hwahc_op_reset(struct usb_hcd *usb_hcd)
-{
- int result;
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct device *dev = &hwahc->wa.usb_iface->dev;
-
- mutex_lock(&wusbhc->mutex);
- wa_nep_disarm(&hwahc->wa);
- result = __wa_set_feature(&hwahc->wa, WA_RESET);
- if (result < 0) {
- dev_err(dev, "error commanding HC to reset: %d\n", result);
- goto error_unlock;
- }
- result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
- if (result < 0) {
- dev_err(dev, "error waiting for HC to reset: %d\n", result);
- goto error_unlock;
- }
-error_unlock:
- mutex_unlock(&wusbhc->mutex);
- return result;
-}
-
-/*
- * FIXME: break this function up
- */
-static int hwahc_op_start(struct usb_hcd *usb_hcd)
-{
- u8 addr;
- int result;
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- result = -ENOSPC;
- mutex_lock(&wusbhc->mutex);
- addr = wusb_cluster_id_get();
- if (addr == 0)
- goto error_cluster_id_get;
- result = __hwahc_set_cluster_id(hwahc, addr);
- if (result < 0)
- goto error_set_cluster_id;
-
- usb_hcd->uses_new_polling = 1;
- set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
- usb_hcd->state = HC_STATE_RUNNING;
-
- /*
- * prevent USB core from suspending the root hub since
- * bus_suspend and bus_resume are not yet supported.
- */
- pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev);
-
- result = 0;
-out:
- mutex_unlock(&wusbhc->mutex);
- return result;
-
-error_set_cluster_id:
- wusb_cluster_id_put(addr);
-error_cluster_id_get:
- goto out;
-
-}
-
-/*
- * No need to abort pipes, as when this is called, all the children
- * has been disconnected and that has done it [through
- * usb_disable_interface() -> usb_disable_endpoint() ->
- * hwahc_op_ep_disable() - >rpipe_ep_disable()].
- */
-static void hwahc_op_stop(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-
- mutex_lock(&wusbhc->mutex);
- wusb_cluster_id_put(wusbhc->cluster_id);
- mutex_unlock(&wusbhc->mutex);
-}
-
-static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
-
- /*
- * We cannot query the HWA for the WUSB time since that requires sending
- * a synchronous URB and this function can be called in_interrupt.
- * Instead, query the USB frame number for our parent and use that.
- */
- return usb_get_current_frame_number(wa->usb_dev);
-}
-
-static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
- gfp_t gfp)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
-}
-
-static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
- int status)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- return wa_urb_dequeue(&hwahc->wa, urb, status);
-}
-
-/*
- * Release resources allocated for an endpoint
- *
- * If there is an associated rpipe to this endpoint, go ahead and put it.
- */
-static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
- struct usb_host_endpoint *ep)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- rpipe_ep_disable(&hwahc->wa, ep);
-}
-
-static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
-{
- int result;
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct device *dev = &hwahc->wa.usb_iface->dev;
-
- result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
- if (result < 0) {
- dev_err(dev, "error commanding HC to start: %d\n", result);
- goto error_stop;
- }
- result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
- if (result < 0) {
- dev_err(dev, "error waiting for HC to start: %d\n", result);
- goto error_stop;
- }
- result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "cannot listen to notifications: %d\n", result);
- goto error_stop;
- }
- /*
- * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
- * disable transfer notifications.
- */
- if (hwahc->wa.quirks &
- WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
- struct usb_host_interface *cur_altsetting =
- hwahc->wa.usb_iface->cur_altsetting;
-
- result = usb_control_msg(hwahc->wa.usb_dev,
- usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
- WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- WA_REQ_ALEREON_FEATURE_SET,
- cur_altsetting->desc.bInterfaceNumber,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- /*
- * If we successfully sent the control message, start DTI here
- * because no transfer notifications will be received which is
- * where DTI is normally started.
- */
- if (result == 0)
- result = wa_dti_start(&hwahc->wa);
- else
- result = 0; /* OK. Continue normally. */
-
- if (result < 0) {
- dev_err(dev, "cannot start DTI: %d\n", result);
- goto error_dti_start;
- }
- }
-
- return result;
-
-error_dti_start:
- wa_nep_disarm(&hwahc->wa);
-error_stop:
- __wa_clear_feature(&hwahc->wa, WA_ENABLE);
- return result;
-}
-
-static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
-{
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
- int ret;
-
- ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_CHAN_STOP,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- delay * 1000,
- iface_no,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (ret == 0)
- msleep(delay);
-
- wa_nep_disarm(&hwahc->wa);
- __wa_stop(&hwahc->wa);
-}
-
-/*
- * Set the UWB MAS allocation for the WUSB cluster
- *
- * @stream_index: stream to use (-1 for cancelling the allocation)
- * @mas: mas bitmap to use
- */
-static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
- const struct uwb_mas_bm *mas)
-{
- int result;
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- struct device *dev = &wa->usb_iface->dev;
- u8 mas_le[UWB_NUM_MAS/8];
-
- /* Set the stream index */
- result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_SET_STREAM_IDX,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- stream_index,
- wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
- goto out;
- }
- uwb_mas_bm_copy_le(mas_le, mas);
- /* Set the MAS allocation */
- result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_SET_WUSB_MAS,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- mas_le, 32, USB_CTRL_SET_TIMEOUT);
- if (result < 0)
- dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
-out:
- return result;
-}
-
-/*
- * Add an IE to the host's MMC
- *
- * @interval: See WUSB1.0[8.5.3.1]
- * @repeat_cnt: See WUSB1.0[8.5.3.1]
- * @handle: See WUSB1.0[8.5.3.1]
- * @wuie: Pointer to the header of the WUSB IE data to add.
- * MUST BE allocated in a kmalloc buffer (no stack or
- * vmalloc).
- *
- * NOTE: the format of the WUSB IEs for MMCs are different to the
- * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
- * Id in WUSB IEs). Standards...you gotta love'em.
- */
-static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
- u8 repeat_cnt, u8 handle,
- struct wuie_hdr *wuie)
-{
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
-
- return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_ADD_MMC_IE,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- interval << 8 | repeat_cnt,
- handle << 8 | iface_no,
- wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT);
-}
-
-/*
- * Remove an IE to the host's MMC
- *
- * @handle: See WUSB1.0[8.5.3.1]
- */
-static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
-{
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
- return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_REMOVE_MMC_IE,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, handle << 8 | iface_no,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
-}
-
-/*
- * Update device information for a given fake port
- *
- * @port_idx: Fake port to which device is connected (wusbhc index, not
- * USB port number).
- */
-static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
- struct wusb_dev *wusb_dev)
-{
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
- struct hwa_dev_info *dev_info;
- int ret;
-
- /* fill out the Device Info buffer and send it */
- dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
- if (!dev_info)
- return -ENOMEM;
- uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
- &wusb_dev->availability);
- dev_info->bDeviceAddress = wusb_dev->addr;
-
- /*
- * If the descriptors haven't been read yet, use a default PHY
- * rate of 53.3 Mbit/s only. The correct value will be used
- * when this will be called again as part of the
- * authentication process (which occurs after the descriptors
- * have been read).
- */
- if (wusb_dev->wusb_cap_descr)
- dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
- else
- dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
-
- ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- WUSB_REQ_SET_DEV_INFO,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, wusb_dev->port_idx << 8 | iface_no,
- dev_info, sizeof(struct hwa_dev_info),
- USB_CTRL_SET_TIMEOUT);
- kfree(dev_info);
- return ret;
-}
-
-/*
- * Set host's idea of which encryption (and key) method to use when
- * talking to ad evice on a given port.
- *
- * If key is NULL, it means disable encryption for that "virtual port"
- * (used when we disconnect).
- */
-static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
- const void *key, size_t key_size,
- u8 key_idx)
-{
- int result = -ENOMEM;
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
- struct usb_key_descriptor *keyd;
- size_t keyd_len;
-
- keyd_len = sizeof(*keyd) + key_size;
- keyd = kzalloc(keyd_len, GFP_KERNEL);
- if (keyd == NULL)
- return -ENOMEM;
-
- keyd->bLength = keyd_len;
- keyd->bDescriptorType = USB_DT_KEY;
- keyd->tTKID[0] = (tkid >> 0) & 0xff;
- keyd->tTKID[1] = (tkid >> 8) & 0xff;
- keyd->tTKID[2] = (tkid >> 16) & 0xff;
- memcpy(keyd->bKeyData, key, key_size);
-
- result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- USB_REQ_SET_DESCRIPTOR,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- USB_DT_KEY << 8 | key_idx,
- port_idx << 8 | iface_no,
- keyd, keyd_len, USB_CTRL_SET_TIMEOUT);
-
- kzfree(keyd); /* clear keys etc. */
- return result;
-}
-
-/*
- * Set host's idea of which encryption (and key) method to use when
- * talking to ad evice on a given port.
- *
- * If key is NULL, it means disable encryption for that "virtual port"
- * (used when we disconnect).
- */
-static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
- const void *key, size_t key_size)
-{
- int result = -ENOMEM;
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
- u8 encryption_value;
-
- /* Tell the host which key to use to talk to the device */
- if (key) {
- u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
- WUSB_KEY_INDEX_ORIGINATOR_HOST);
-
- result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
- key, key_size, key_idx);
- if (result < 0)
- goto error_set_key;
- encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
- } else {
- /* FIXME: this should come from wusbhc->etd[UNSECURE].value */
- encryption_value = 0;
- }
-
- /* Set the encryption type for communicating with the device */
- result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- USB_REQ_SET_ENCRYPTION,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- encryption_value, port_idx << 8 | iface_no,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (result < 0)
- dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
- "port index %u to %s (value %d): %d\n", port_idx,
- wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
- wusbhc->ccm1_etd->bEncryptionValue, result);
-error_set_key:
- return result;
-}
-
-/*
- * Set host's GTK key
- */
-static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
- const void *key, size_t key_size)
-{
- u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
- WUSB_KEY_INDEX_ORIGINATOR_HOST);
-
- return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
-}
-
-/*
- * Get the Wire Adapter class-specific descriptor
- *
- * NOTE: this descriptor comes with the big bundled configuration
- * descriptor that includes the interfaces' and endpoints', so
- * we just look for it in the cached copy kept by the USB stack.
- *
- * NOTE2: We convert LE fields to CPU order.
- */
-static int wa_fill_descr(struct wahc *wa)
-{
- int result;
- struct device *dev = &wa->usb_iface->dev;
- char *itr;
- struct usb_device *usb_dev = wa->usb_dev;
- struct usb_descriptor_header *hdr;
- struct usb_wa_descriptor *wa_descr;
- size_t itr_size, actconfig_idx;
-
- actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
- sizeof(usb_dev->config[0]);
- itr = usb_dev->rawdescriptors[actconfig_idx];
- itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
- while (itr_size >= sizeof(*hdr)) {
- hdr = (struct usb_descriptor_header *) itr;
- dev_dbg(dev, "Extra device descriptor: "
- "type %02x/%u bytes @ %zu (%zu left)\n",
- hdr->bDescriptorType, hdr->bLength,
- (itr - usb_dev->rawdescriptors[actconfig_idx]),
- itr_size);
- if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
- goto found;
- itr += hdr->bLength;
- itr_size -= hdr->bLength;
- }
- dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
- return -ENODEV;
-
-found:
- result = -EINVAL;
- if (hdr->bLength > itr_size) { /* is it available? */
- dev_err(dev, "incomplete Wire Adapter Class descriptor "
- "(%zu bytes left, %u needed)\n",
- itr_size, hdr->bLength);
- goto error;
- }
- if (hdr->bLength < sizeof(*wa->wa_descr)) {
- dev_err(dev, "short Wire Adapter Class descriptor\n");
- goto error;
- }
- wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
- if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
- dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
- (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8,
- le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
- result = 0;
-error:
- return result;
-}
-
-static const struct hc_driver hwahc_hc_driver = {
- .description = "hwa-hcd",
- .product_desc = "Wireless USB HWA host controller",
- .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
- .irq = NULL, /* FIXME */
- .flags = HCD_USB25,
- .reset = hwahc_op_reset,
- .start = hwahc_op_start,
- .stop = hwahc_op_stop,
- .get_frame_number = hwahc_op_get_frame_number,
- .urb_enqueue = hwahc_op_urb_enqueue,
- .urb_dequeue = hwahc_op_urb_dequeue,
- .endpoint_disable = hwahc_op_endpoint_disable,
-
- .hub_status_data = wusbhc_rh_status_data,
- .hub_control = wusbhc_rh_control,
- .start_port_reset = wusbhc_rh_start_port_reset,
-};
-
-static int hwahc_security_create(struct hwahc *hwahc)
-{
- int result;
- struct wusbhc *wusbhc = &hwahc->wusbhc;
- struct usb_device *usb_dev = hwahc->wa.usb_dev;
- struct device *dev = &usb_dev->dev;
- struct usb_security_descriptor *secd;
- struct usb_encryption_descriptor *etd;
- void *itr, *top;
- size_t itr_size, needed, bytes;
- u8 index;
- char buf[64];
-
- /* Find the host's security descriptors in the config descr bundle */
- index = (usb_dev->actconfig - usb_dev->config) /
- sizeof(usb_dev->config[0]);
- itr = usb_dev->rawdescriptors[index];
- itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
- top = itr + itr_size;
- result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
- le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
- USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
- if (result == -1) {
- dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
- return 0;
- }
- needed = sizeof(*secd);
- if (top - (void *)secd < needed) {
- dev_err(dev, "BUG? Not enough data to process security "
- "descriptor header (%zu bytes left vs %zu needed)\n",
- top - (void *) secd, needed);
- return 0;
- }
- needed = le16_to_cpu(secd->wTotalLength);
- if (top - (void *)secd < needed) {
- dev_err(dev, "BUG? Not enough data to process security "
- "descriptors (%zu bytes left vs %zu needed)\n",
- top - (void *) secd, needed);
- return 0;
- }
- /* Walk over the sec descriptors and store CCM1's on wusbhc */
- itr = (void *) secd + sizeof(*secd);
- top = (void *) secd + le16_to_cpu(secd->wTotalLength);
- index = 0;
- bytes = 0;
- while (itr < top) {
- etd = itr;
- if (top - itr < sizeof(*etd)) {
- dev_err(dev, "BUG: bad host security descriptor; "
- "not enough data (%zu vs %zu left)\n",
- top - itr, sizeof(*etd));
- break;
- }
- if (etd->bLength < sizeof(*etd)) {
- dev_err(dev, "BUG: bad host encryption descriptor; "
- "descriptor is too short "
- "(%zu vs %zu needed)\n",
- (size_t)etd->bLength, sizeof(*etd));
- break;
- }
- itr += etd->bLength;
- bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
- "%s (0x%02x) ",
- wusb_et_name(etd->bEncryptionType),
- etd->bEncryptionValue);
- wusbhc->ccm1_etd = etd;
- }
- dev_info(dev, "supported encryption types: %s\n", buf);
- if (wusbhc->ccm1_etd == NULL) {
- dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
- return 0;
- }
- /* Pretty print what we support */
- return 0;
-}
-
-static void hwahc_security_release(struct hwahc *hwahc)
-{
- /* nothing to do here so far... */
-}
-
-static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface,
- kernel_ulong_t quirks)
-{
- int result;
- struct device *dev = &iface->dev;
- struct wusbhc *wusbhc = &hwahc->wusbhc;
- struct wahc *wa = &hwahc->wa;
- struct usb_device *usb_dev = interface_to_usbdev(iface);
-
- wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
- wa->usb_iface = usb_get_intf(iface);
- wusbhc->dev = dev;
- /* defer getting the uwb_rc handle until it is needed since it
- * may not have been registered by the hwa_rc driver yet. */
- wusbhc->uwb_rc = NULL;
- result = wa_fill_descr(wa); /* Get the device descriptor */
- if (result < 0)
- goto error_fill_descriptor;
- if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
- dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
- "adapter (%u ports)\n", wa->wa_descr->bNumPorts);
- wusbhc->ports_max = USB_MAXCHILDREN;
- } else {
- wusbhc->ports_max = wa->wa_descr->bNumPorts;
- }
- wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
- wusbhc->start = __hwahc_op_wusbhc_start;
- wusbhc->stop = __hwahc_op_wusbhc_stop;
- wusbhc->mmcie_add = __hwahc_op_mmcie_add;
- wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
- wusbhc->dev_info_set = __hwahc_op_dev_info_set;
- wusbhc->bwa_set = __hwahc_op_bwa_set;
- wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
- wusbhc->set_ptk = __hwahc_op_set_ptk;
- wusbhc->set_gtk = __hwahc_op_set_gtk;
- result = hwahc_security_create(hwahc);
- if (result < 0) {
- dev_err(dev, "Can't initialize security: %d\n", result);
- goto error_security_create;
- }
- wa->wusb = wusbhc; /* FIXME: ugly, need to fix */
- result = wusbhc_create(&hwahc->wusbhc);
- if (result < 0) {
- dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
- goto error_wusbhc_create;
- }
- result = wa_create(&hwahc->wa, iface, quirks);
- if (result < 0)
- goto error_wa_create;
- return 0;
-
-error_wa_create:
- wusbhc_destroy(&hwahc->wusbhc);
-error_wusbhc_create:
- /* WA Descr fill allocs no resources */
-error_security_create:
-error_fill_descriptor:
- usb_put_intf(iface);
- usb_put_dev(usb_dev);
- return result;
-}
-
-static void hwahc_destroy(struct hwahc *hwahc)
-{
- struct wusbhc *wusbhc = &hwahc->wusbhc;
-
- mutex_lock(&wusbhc->mutex);
- __wa_destroy(&hwahc->wa);
- wusbhc_destroy(&hwahc->wusbhc);
- hwahc_security_release(hwahc);
- hwahc->wusbhc.dev = NULL;
- uwb_rc_put(wusbhc->uwb_rc);
- usb_put_intf(hwahc->wa.usb_iface);
- usb_put_dev(hwahc->wa.usb_dev);
- mutex_unlock(&wusbhc->mutex);
-}
-
-static void hwahc_init(struct hwahc *hwahc)
-{
- wa_init(&hwahc->wa);
-}
-
-static int hwahc_probe(struct usb_interface *usb_iface,
- const struct usb_device_id *id)
-{
- int result;
- struct usb_hcd *usb_hcd;
- struct wusbhc *wusbhc;
- struct hwahc *hwahc;
- struct device *dev = &usb_iface->dev;
-
- result = -ENOMEM;
- usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
- if (usb_hcd == NULL) {
- dev_err(dev, "unable to allocate instance\n");
- goto error_alloc;
- }
- usb_hcd->wireless = 1;
- usb_hcd->self.sg_tablesize = ~0;
- wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- hwahc_init(hwahc);
- result = hwahc_create(hwahc, usb_iface, id->driver_info);
- if (result < 0) {
- dev_err(dev, "Cannot initialize internals: %d\n", result);
- goto error_hwahc_create;
- }
- result = usb_add_hcd(usb_hcd, 0, 0);
- if (result < 0) {
- dev_err(dev, "Cannot add HCD: %d\n", result);
- goto error_add_hcd;
- }
- device_wakeup_enable(usb_hcd->self.controller);
- result = wusbhc_b_create(&hwahc->wusbhc);
- if (result < 0) {
- dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
- goto error_wusbhc_b_create;
- }
- return 0;
-
-error_wusbhc_b_create:
- usb_remove_hcd(usb_hcd);
-error_add_hcd:
- hwahc_destroy(hwahc);
-error_hwahc_create:
- usb_put_hcd(usb_hcd);
-error_alloc:
- return result;
-}
-
-static void hwahc_disconnect(struct usb_interface *usb_iface)
-{
- struct usb_hcd *usb_hcd;
- struct wusbhc *wusbhc;
- struct hwahc *hwahc;
-
- usb_hcd = usb_get_intfdata(usb_iface);
- wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- wusbhc_b_destroy(&hwahc->wusbhc);
- usb_remove_hcd(usb_hcd);
- hwahc_destroy(hwahc);
- usb_put_hcd(usb_hcd);
-}
-
-static const struct usb_device_id hwahc_id_table[] = {
- /* Alereon 5310 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
- .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
- WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
- /* Alereon 5611 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
- .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
- WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
- /* FIXME: use class labels for this */
- { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
- {},
-};
-MODULE_DEVICE_TABLE(usb, hwahc_id_table);
-
-static struct usb_driver hwahc_driver = {
- .name = "hwa-hc",
- .probe = hwahc_probe,
- .disconnect = hwahc_disconnect,
- .id_table = hwahc_id_table,
-};
-
-module_usb_driver(hwahc_driver);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/host/whci/Makefile b/drivers/staging/wusbcore/host/whci/Makefile
deleted file mode 100644
index 859d20079df6..000000000000
--- a/drivers/staging/wusbcore/host/whci/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
-
-whci-hcd-y := \
- asl.o \
- debug.o \
- hcd.o \
- hw.o \
- init.o \
- int.o \
- pzl.o \
- qset.o \
- wusb.o
diff --git a/drivers/staging/wusbcore/host/whci/asl.c b/drivers/staging/wusbcore/host/whci/asl.c
deleted file mode 100644
index a2b9a50cfb80..000000000000
--- a/drivers/staging/wusbcore/host/whci/asl.c
+++ /dev/null
@@ -1,376 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) asynchronous schedule management.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/dma-mapping.h>
-#include <linux/usb.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
- struct whc_qset **next, struct whc_qset **prev)
-{
- struct list_head *n, *p;
-
- BUG_ON(list_empty(&whc->async_list));
-
- n = qset->list_node.next;
- if (n == &whc->async_list)
- n = n->next;
- p = qset->list_node.prev;
- if (p == &whc->async_list)
- p = p->prev;
-
- *next = container_of(n, struct whc_qset, list_node);
- *prev = container_of(p, struct whc_qset, list_node);
-
-}
-
-static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
-{
- list_move(&qset->list_node, &whc->async_list);
- qset->in_sw_list = true;
-}
-
-static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
-{
- struct whc_qset *next, *prev;
-
- qset_clear(whc, qset);
-
- /* Link into ASL. */
- qset_get_next_prev(whc, qset, &next, &prev);
- whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
- whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
- qset->in_hw_list = true;
-}
-
-static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
-{
- struct whc_qset *prev, *next;
-
- qset_get_next_prev(whc, qset, &next, &prev);
-
- list_move(&qset->list_node, &whc->async_removed_list);
- qset->in_sw_list = false;
-
- /*
- * No more qsets in the ASL? The caller must stop the ASL as
- * it's no longer valid.
- */
- if (list_empty(&whc->async_list))
- return;
-
- /* Remove from ASL. */
- whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
- qset->in_hw_list = false;
-}
-
-/**
- * process_qset - process any recently inactivated or halted qTDs in a
- * qset.
- *
- * After inactive qTDs are removed, new qTDs can be added if the
- * urb queue still contains URBs.
- *
- * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
- * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
- */
-static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
-{
- enum whc_update update = 0;
- uint32_t status = 0;
-
- while (qset->ntds) {
- struct whc_qtd *td;
-
- td = &qset->qtd[qset->td_start];
- status = le32_to_cpu(td->status);
-
- /*
- * Nothing to do with a still active qTD.
- */
- if (status & QTD_STS_ACTIVE)
- break;
-
- if (status & QTD_STS_HALTED) {
- /* Ug, an error. */
- process_halted_qtd(whc, qset, td);
- /* A halted qTD always triggers an update
- because the qset was either removed or
- reactivated. */
- update |= WHC_UPDATE_UPDATED;
- goto done;
- }
-
- /* Mmm, a completed qTD. */
- process_inactive_qtd(whc, qset, td);
- }
-
- if (!qset->remove)
- update |= qset_add_qtds(whc, qset);
-
-done:
- /*
- * Remove this qset from the ASL if requested, but only if has
- * no qTDs.
- */
- if (qset->remove && qset->ntds == 0) {
- asl_qset_remove(whc, qset);
- update |= WHC_UPDATE_REMOVED;
- }
- return update;
-}
-
-void asl_start(struct whc *whc)
-{
- struct whc_qset *qset;
-
- qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
-
- le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
-
- whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
- whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
- WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
- 1000, "start ASL");
-}
-
-void asl_stop(struct whc *whc)
-{
- whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
- whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
- WUSBSTS_ASYNC_SCHED, 0,
- 1000, "stop ASL");
-}
-
-/**
- * asl_update - request an ASL update and wait for the hardware to be synced
- * @whc: the WHCI HC
- * @wusbcmd: WUSBCMD value to start the update.
- *
- * If the WUSB HC is inactive (i.e., the ASL is stopped) then the
- * update must be skipped as the hardware may not respond to update
- * requests.
- */
-void asl_update(struct whc *whc, uint32_t wusbcmd)
-{
- struct wusbhc *wusbhc = &whc->wusbhc;
- long t;
-
- mutex_lock(&wusbhc->mutex);
- if (wusbhc->active) {
- whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
- t = wait_event_timeout(
- whc->async_list_wq,
- (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
- msecs_to_jiffies(1000));
- if (t == 0)
- whc_hw_error(whc, "ASL update timeout");
- }
- mutex_unlock(&wusbhc->mutex);
-}
-
-/**
- * scan_async_work - scan the ASL for qsets to process.
- *
- * Process each qset in the ASL in turn and then signal the WHC that
- * the ASL has been updated.
- *
- * Then start, stop or update the asynchronous schedule as required.
- */
-void scan_async_work(struct work_struct *work)
-{
- struct whc *whc = container_of(work, struct whc, async_work);
- struct whc_qset *qset, *t;
- enum whc_update update = 0;
-
- spin_lock_irq(&whc->lock);
-
- /*
- * Transerve the software list backwards so new qsets can be
- * safely inserted into the ASL without making it non-circular.
- */
- list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
- if (!qset->in_hw_list) {
- asl_qset_insert(whc, qset);
- update |= WHC_UPDATE_ADDED;
- }
-
- update |= process_qset(whc, qset);
- }
-
- spin_unlock_irq(&whc->lock);
-
- if (update) {
- uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
- if (update & WHC_UPDATE_REMOVED)
- wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
- asl_update(whc, wusbcmd);
- }
-
- /*
- * Now that the ASL is updated, complete the removal of any
- * removed qsets.
- *
- * If the qset was to be reset, do so and reinsert it into the
- * ASL if it has pending transfers.
- */
- spin_lock_irq(&whc->lock);
-
- list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
- qset_remove_complete(whc, qset);
- if (qset->reset) {
- qset_reset(whc, qset);
- if (!list_empty(&qset->stds)) {
- asl_qset_insert_begin(whc, qset);
- queue_work(whc->workqueue, &whc->async_work);
- }
- }
- }
-
- spin_unlock_irq(&whc->lock);
-}
-
-/**
- * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
- * @whc: the WHCI host controller
- * @urb: the URB to enqueue
- * @mem_flags: flags for any memory allocations
- *
- * The qset for the endpoint is obtained and the urb queued on to it.
- *
- * Work is scheduled to update the hardware's view of the ASL.
- */
-int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
-{
- struct whc_qset *qset;
- int err;
- unsigned long flags;
-
- spin_lock_irqsave(&whc->lock, flags);
-
- err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
- if (err < 0) {
- spin_unlock_irqrestore(&whc->lock, flags);
- return err;
- }
-
- qset = get_qset(whc, urb, GFP_ATOMIC);
- if (qset == NULL)
- err = -ENOMEM;
- else
- err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
- if (!err) {
- if (!qset->in_sw_list && !qset->remove)
- asl_qset_insert_begin(whc, qset);
- } else
- usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
-
- spin_unlock_irqrestore(&whc->lock, flags);
-
- if (!err)
- queue_work(whc->workqueue, &whc->async_work);
-
- return err;
-}
-
-/**
- * asl_urb_dequeue - remove an URB (qset) from the async list.
- * @whc: the WHCI host controller
- * @urb: the URB to dequeue
- * @status: the current status of the URB
- *
- * URBs that do yet have qTDs can simply be removed from the software
- * queue, otherwise the qset must be removed from the ASL so the qTDs
- * can be removed.
- */
-int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
-{
- struct whc_urb *wurb = urb->hcpriv;
- struct whc_qset *qset = wurb->qset;
- struct whc_std *std, *t;
- bool has_qtd = false;
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&whc->lock, flags);
-
- ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
- if (ret < 0)
- goto out;
-
- list_for_each_entry_safe(std, t, &qset->stds, list_node) {
- if (std->urb == urb) {
- if (std->qtd)
- has_qtd = true;
- qset_free_std(whc, std);
- } else
- std->qtd = NULL; /* so this std is re-added when the qset is */
- }
-
- if (has_qtd) {
- asl_qset_remove(whc, qset);
- wurb->status = status;
- wurb->is_async = true;
- queue_work(whc->workqueue, &wurb->dequeue_work);
- } else
- qset_remove_urb(whc, qset, urb, status);
-out:
- spin_unlock_irqrestore(&whc->lock, flags);
-
- return ret;
-}
-
-/**
- * asl_qset_delete - delete a qset from the ASL
- */
-void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
-{
- qset->remove = 1;
- queue_work(whc->workqueue, &whc->async_work);
- qset_delete(whc, qset);
-}
-
-/**
- * asl_init - initialize the asynchronous schedule list
- *
- * A dummy qset with no qTDs is added to the ASL to simplify removing
- * qsets (no need to stop the ASL when the last qset is removed).
- */
-int asl_init(struct whc *whc)
-{
- struct whc_qset *qset;
-
- qset = qset_alloc(whc, GFP_KERNEL);
- if (qset == NULL)
- return -ENOMEM;
-
- asl_qset_insert_begin(whc, qset);
- asl_qset_insert(whc, qset);
-
- return 0;
-}
-
-/**
- * asl_clean_up - free ASL resources
- *
- * The ASL is stopped and empty except for the dummy qset.
- */
-void asl_clean_up(struct whc *whc)
-{
- struct whc_qset *qset;
-
- if (!list_empty(&whc->async_list)) {
- qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
- list_del(&qset->list_node);
- qset_free(whc, qset);
- }
-}
diff --git a/drivers/staging/wusbcore/host/whci/debug.c b/drivers/staging/wusbcore/host/whci/debug.c
deleted file mode 100644
index 443da6719147..000000000000
--- a/drivers/staging/wusbcore/host/whci/debug.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) debug.
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/export.h>
-
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-struct whc_dbg {
- struct dentry *di_f;
- struct dentry *asl_f;
- struct dentry *pzl_f;
-};
-
-static void qset_print(struct seq_file *s, struct whc_qset *qset)
-{
- static const char *qh_type[] = {
- "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
- struct whc_std *std;
- struct urb *urb = NULL;
- int i;
-
- seq_printf(s, "qset %08x", (u32)qset->qset_dma);
- if (&qset->list_node == qset->whc->async_list.prev) {
- seq_printf(s, " (dummy)\n");
- } else {
- seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
- qset->qh.info1 & 0x0f,
- (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
- qh_type[(qset->qh.info1 >> 5) & 0x7],
- (qset->qh.info1 >> 16) & 0xffff);
- }
- seq_printf(s, " -> %08x\n", (u32)qset->qh.link);
- seq_printf(s, " info: %08x %08x %08x\n",
- qset->qh.info1, qset->qh.info2, qset->qh.info3);
- seq_printf(s, " sts: %04x errs: %d curwin: %08x\n",
- qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
- seq_printf(s, " TD: sts: %08x opts: %08x\n",
- qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
-
- for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
- seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
- i == qset->td_start ? 'S' : ' ',
- i == qset->td_end ? 'E' : ' ',
- i, qset->qtd[i].status, qset->qtd[i].options,
- (u32)qset->qtd[i].page_list_ptr);
- }
- seq_printf(s, " ntds: %d\n", qset->ntds);
- list_for_each_entry(std, &qset->stds, list_node) {
- if (urb != std->urb) {
- urb = std->urb;
- seq_printf(s, " urb %p transferred: %d bytes\n", urb,
- urb->actual_length);
- }
- if (std->qtd)
- seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n",
- std->qtd - &qset->qtd[0],
- std->len, std->num_pointers ?
- (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
- else
- seq_printf(s, " sTD[-]: %zd bytes @ %08x\n",
- std->len, std->num_pointers ?
- (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
- }
-}
-
-static int di_show(struct seq_file *s, void *p)
-{
- struct whc *whc = s->private;
- int d;
-
- for (d = 0; d < whc->n_devices; d++) {
- struct di_buf_entry *di = &whc->di_buf[d];
-
- seq_printf(s, "DI[%d]\n", d);
- seq_printf(s, " availability: %*pb\n",
- UWB_NUM_MAS, (unsigned long *)di->availability_info);
- seq_printf(s, " %c%c key idx: %d dev addr: %d\n",
- (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
- (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
- (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
- (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
- }
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(di);
-
-static int asl_show(struct seq_file *s, void *p)
-{
- struct whc *whc = s->private;
- struct whc_qset *qset;
-
- list_for_each_entry(qset, &whc->async_list, list_node) {
- qset_print(s, qset);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(asl);
-
-static int pzl_show(struct seq_file *s, void *p)
-{
- struct whc *whc = s->private;
- struct whc_qset *qset;
- int period;
-
- for (period = 0; period < 5; period++) {
- seq_printf(s, "Period %d\n", period);
- list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
- qset_print(s, qset);
- }
- }
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(pzl);
-
-void whc_dbg_init(struct whc *whc)
-{
- if (whc->wusbhc.pal.debugfs_dir == NULL)
- return;
-
- whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL);
- if (whc->dbg == NULL)
- return;
-
- whc->dbg->di_f = debugfs_create_file("di", 0444,
- whc->wusbhc.pal.debugfs_dir, whc,
- &di_fops);
- whc->dbg->asl_f = debugfs_create_file("asl", 0444,
- whc->wusbhc.pal.debugfs_dir, whc,
- &asl_fops);
- whc->dbg->pzl_f = debugfs_create_file("pzl", 0444,
- whc->wusbhc.pal.debugfs_dir, whc,
- &pzl_fops);
-}
-
-void whc_dbg_clean_up(struct whc *whc)
-{
- if (whc->dbg) {
- debugfs_remove(whc->dbg->pzl_f);
- debugfs_remove(whc->dbg->asl_f);
- debugfs_remove(whc->dbg->di_f);
- kfree(whc->dbg);
- }
-}
diff --git a/drivers/staging/wusbcore/host/whci/hcd.c b/drivers/staging/wusbcore/host/whci/hcd.c
deleted file mode 100644
index bee1ff2d35be..000000000000
--- a/drivers/staging/wusbcore/host/whci/hcd.c
+++ /dev/null
@@ -1,356 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) driver.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-/*
- * One time initialization.
- *
- * Nothing to do here.
- */
-static int whc_reset(struct usb_hcd *usb_hcd)
-{
- return 0;
-}
-
-/*
- * Start the wireless host controller.
- *
- * Start device notification.
- *
- * Put hc into run state, set DNTS parameters.
- */
-static int whc_start(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
- u8 bcid;
- int ret;
-
- mutex_lock(&wusbhc->mutex);
-
- le_writel(WUSBINTR_GEN_CMD_DONE
- | WUSBINTR_HOST_ERR
- | WUSBINTR_ASYNC_SCHED_SYNCED
- | WUSBINTR_DNTS_INT
- | WUSBINTR_ERR_INT
- | WUSBINTR_INT,
- whc->base + WUSBINTR);
-
- /* set cluster ID */
- bcid = wusb_cluster_id_get();
- ret = whc_set_cluster_id(whc, bcid);
- if (ret < 0)
- goto out;
- wusbhc->cluster_id = bcid;
-
- /* start HC */
- whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
-
- usb_hcd->uses_new_polling = 1;
- set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
- usb_hcd->state = HC_STATE_RUNNING;
-
-out:
- mutex_unlock(&wusbhc->mutex);
- return ret;
-}
-
-
-/*
- * Stop the wireless host controller.
- *
- * Stop device notification.
- *
- * Wait for pending transfer to stop? Put hc into stop state?
- */
-static void whc_stop(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
-
- mutex_lock(&wusbhc->mutex);
-
- /* stop HC */
- le_writel(0, whc->base + WUSBINTR);
- whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
- whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
- WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
- 100, "HC to halt");
-
- wusb_cluster_id_put(wusbhc->cluster_id);
-
- mutex_unlock(&wusbhc->mutex);
-}
-
-static int whc_get_frame_number(struct usb_hcd *usb_hcd)
-{
- /* Frame numbers are not applicable to WUSB. */
- return -ENOSYS;
-}
-
-
-/*
- * Queue an URB to the ASL or PZL
- */
-static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
- gfp_t mem_flags)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
- int ret;
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_INTERRUPT:
- ret = pzl_urb_enqueue(whc, urb, mem_flags);
- break;
- case PIPE_ISOCHRONOUS:
- dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
- ret = -ENOTSUPP;
- break;
- case PIPE_CONTROL:
- case PIPE_BULK:
- default:
- ret = asl_urb_enqueue(whc, urb, mem_flags);
- break;
- }
-
- return ret;
-}
-
-/*
- * Remove a queued URB from the ASL or PZL.
- */
-static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
- int ret;
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_INTERRUPT:
- ret = pzl_urb_dequeue(whc, urb, status);
- break;
- case PIPE_ISOCHRONOUS:
- ret = -ENOTSUPP;
- break;
- case PIPE_CONTROL:
- case PIPE_BULK:
- default:
- ret = asl_urb_dequeue(whc, urb, status);
- break;
- }
-
- return ret;
-}
-
-/*
- * Wait for all URBs to the endpoint to be completed, then delete the
- * qset.
- */
-static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
- struct usb_host_endpoint *ep)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
- struct whc_qset *qset;
-
- qset = ep->hcpriv;
- if (qset) {
- ep->hcpriv = NULL;
- if (usb_endpoint_xfer_bulk(&ep->desc)
- || usb_endpoint_xfer_control(&ep->desc))
- asl_qset_delete(whc, qset);
- else
- pzl_qset_delete(whc, qset);
- }
-}
-
-static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
- struct usb_host_endpoint *ep)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
- struct whc_qset *qset;
- unsigned long flags;
-
- spin_lock_irqsave(&whc->lock, flags);
-
- qset = ep->hcpriv;
- if (qset) {
- qset->remove = 1;
- qset->reset = 1;
-
- if (usb_endpoint_xfer_bulk(&ep->desc)
- || usb_endpoint_xfer_control(&ep->desc))
- queue_work(whc->workqueue, &whc->async_work);
- else
- queue_work(whc->workqueue, &whc->periodic_work);
- }
-
- spin_unlock_irqrestore(&whc->lock, flags);
-}
-
-
-static const struct hc_driver whc_hc_driver = {
- .description = "whci-hcd",
- .product_desc = "Wireless host controller",
- .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
- .irq = whc_int_handler,
- .flags = HCD_USB2,
-
- .reset = whc_reset,
- .start = whc_start,
- .stop = whc_stop,
- .get_frame_number = whc_get_frame_number,
- .urb_enqueue = whc_urb_enqueue,
- .urb_dequeue = whc_urb_dequeue,
- .endpoint_disable = whc_endpoint_disable,
- .endpoint_reset = whc_endpoint_reset,
-
- .hub_status_data = wusbhc_rh_status_data,
- .hub_control = wusbhc_rh_control,
- .start_port_reset = wusbhc_rh_start_port_reset,
-};
-
-static int whc_probe(struct umc_dev *umc)
-{
- int ret;
- struct usb_hcd *usb_hcd;
- struct wusbhc *wusbhc;
- struct whc *whc;
- struct device *dev = &umc->dev;
-
- usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
- if (usb_hcd == NULL) {
- dev_err(dev, "unable to create hcd\n");
- return -ENOMEM;
- }
-
- usb_hcd->wireless = 1;
- usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
-
- wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- whc = wusbhc_to_whc(wusbhc);
- whc->umc = umc;
-
- ret = whc_init(whc);
- if (ret)
- goto error_whc_init;
-
- wusbhc->dev = dev;
- wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
- if (!wusbhc->uwb_rc) {
- ret = -ENODEV;
- dev_err(dev, "cannot get radio controller\n");
- goto error_uwb_rc;
- }
-
- if (whc->n_devices > USB_MAXCHILDREN) {
- dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
- whc->n_devices);
- wusbhc->ports_max = USB_MAXCHILDREN;
- } else
- wusbhc->ports_max = whc->n_devices;
- wusbhc->mmcies_max = whc->n_mmc_ies;
- wusbhc->start = whc_wusbhc_start;
- wusbhc->stop = whc_wusbhc_stop;
- wusbhc->mmcie_add = whc_mmcie_add;
- wusbhc->mmcie_rm = whc_mmcie_rm;
- wusbhc->dev_info_set = whc_dev_info_set;
- wusbhc->bwa_set = whc_bwa_set;
- wusbhc->set_num_dnts = whc_set_num_dnts;
- wusbhc->set_ptk = whc_set_ptk;
- wusbhc->set_gtk = whc_set_gtk;
-
- ret = wusbhc_create(wusbhc);
- if (ret)
- goto error_wusbhc_create;
-
- ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
- if (ret) {
- dev_err(dev, "cannot add HCD: %d\n", ret);
- goto error_usb_add_hcd;
- }
- device_wakeup_enable(usb_hcd->self.controller);
-
- ret = wusbhc_b_create(wusbhc);
- if (ret) {
- dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
- goto error_wusbhc_b_create;
- }
-
- whc_dbg_init(whc);
-
- return 0;
-
-error_wusbhc_b_create:
- usb_remove_hcd(usb_hcd);
-error_usb_add_hcd:
- wusbhc_destroy(wusbhc);
-error_wusbhc_create:
- uwb_rc_put(wusbhc->uwb_rc);
-error_uwb_rc:
- whc_clean_up(whc);
-error_whc_init:
- usb_put_hcd(usb_hcd);
- return ret;
-}
-
-
-static void whc_remove(struct umc_dev *umc)
-{
- struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
-
- if (usb_hcd) {
- whc_dbg_clean_up(whc);
- wusbhc_b_destroy(wusbhc);
- usb_remove_hcd(usb_hcd);
- wusbhc_destroy(wusbhc);
- uwb_rc_put(wusbhc->uwb_rc);
- whc_clean_up(whc);
- usb_put_hcd(usb_hcd);
- }
-}
-
-static struct umc_driver whci_hc_driver = {
- .name = "whci-hcd",
- .cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
- .probe = whc_probe,
- .remove = whc_remove,
-};
-
-static int __init whci_hc_driver_init(void)
-{
- return umc_driver_register(&whci_hc_driver);
-}
-module_init(whci_hc_driver_init);
-
-static void __exit whci_hc_driver_exit(void)
-{
- umc_driver_unregister(&whci_hc_driver);
-}
-module_exit(whci_hc_driver_exit);
-
-/* PCI device ID's that we handle (so it gets loaded) */
-static struct pci_device_id __used whci_hcd_id_table[] = {
- { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
- { /* empty last entry */ }
-};
-MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
-
-MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
-MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/host/whci/hw.c b/drivers/staging/wusbcore/host/whci/hw.c
deleted file mode 100644
index e4e8914abf42..000000000000
--- a/drivers/staging/wusbcore/host/whci/hw.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) hardware access helpers.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
-{
- unsigned long flags;
- u32 cmd;
-
- spin_lock_irqsave(&whc->lock, flags);
-
- cmd = le_readl(whc->base + WUSBCMD);
- cmd = (cmd & ~mask) | val;
- le_writel(cmd, whc->base + WUSBCMD);
-
- spin_unlock_irqrestore(&whc->lock, flags);
-}
-
-/**
- * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
- * @whc: the WHCI HC
- * @cmd: command to start.
- * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
- * @addr: pointer to any data for the command (may be NULL).
- * @len: length of the data (if any).
- */
-int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
-{
- unsigned long flags;
- dma_addr_t dma_addr;
- int t;
- int ret = 0;
-
- mutex_lock(&whc->mutex);
-
- /* Wait for previous command to complete. */
- t = wait_event_timeout(whc->cmd_wq,
- (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
- WHC_GENCMD_TIMEOUT_MS);
- if (t == 0) {
- dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
- le_readl(whc->base + WUSBGENCMDSTS),
- le_readl(whc->base + WUSBGENCMDPARAMS));
- ret = -ETIMEDOUT;
- goto out;
- }
-
- if (addr) {
- memcpy(whc->gen_cmd_buf, addr, len);
- dma_addr = whc->gen_cmd_buf_dma;
- } else
- dma_addr = 0;
-
- /* Poke registers to start cmd. */
- spin_lock_irqsave(&whc->lock, flags);
-
- le_writel(params, whc->base + WUSBGENCMDPARAMS);
- le_writeq(dma_addr, whc->base + WUSBGENADDR);
-
- le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
- whc->base + WUSBGENCMDSTS);
-
- spin_unlock_irqrestore(&whc->lock, flags);
-out:
- mutex_unlock(&whc->mutex);
-
- return ret;
-}
-
-/**
- * whc_hw_error - recover from a hardware error
- * @whc: the WHCI HC that broke.
- * @reason: a description of the failure.
- *
- * Recover from broken hardware with a full reset.
- */
-void whc_hw_error(struct whc *whc, const char *reason)
-{
- struct wusbhc *wusbhc = &whc->wusbhc;
-
- dev_err(&whc->umc->dev, "hardware error: %s\n", reason);
- wusbhc_reset_all(wusbhc);
-}
diff --git a/drivers/staging/wusbcore/host/whci/init.c b/drivers/staging/wusbcore/host/whci/init.c
deleted file mode 100644
index 55fd458a8f30..000000000000
--- a/drivers/staging/wusbcore/host/whci/init.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) initialization.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/dma-mapping.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-/*
- * Reset the host controller.
- */
-static void whc_hw_reset(struct whc *whc)
-{
- le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
- whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
- 100, "reset");
-}
-
-static void whc_hw_init_di_buf(struct whc *whc)
-{
- int d;
-
- /* Disable all entries in the Device Information buffer. */
- for (d = 0; d < whc->n_devices; d++)
- whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
-
- le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
-}
-
-static void whc_hw_init_dn_buf(struct whc *whc)
-{
- /* Clear the Device Notification buffer to ensure the V (valid)
- * bits are clear. */
- memset(whc->dn_buf, 0, 4096);
-
- le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
-}
-
-int whc_init(struct whc *whc)
-{
- u32 whcsparams;
- int ret, i;
- resource_size_t start, len;
-
- spin_lock_init(&whc->lock);
- mutex_init(&whc->mutex);
- init_waitqueue_head(&whc->cmd_wq);
- init_waitqueue_head(&whc->async_list_wq);
- init_waitqueue_head(&whc->periodic_list_wq);
- whc->workqueue = alloc_ordered_workqueue(dev_name(&whc->umc->dev), 0);
- if (whc->workqueue == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- INIT_WORK(&whc->dn_work, whc_dn_work);
-
- INIT_WORK(&whc->async_work, scan_async_work);
- INIT_LIST_HEAD(&whc->async_list);
- INIT_LIST_HEAD(&whc->async_removed_list);
-
- INIT_WORK(&whc->periodic_work, scan_periodic_work);
- for (i = 0; i < 5; i++)
- INIT_LIST_HEAD(&whc->periodic_list[i]);
- INIT_LIST_HEAD(&whc->periodic_removed_list);
-
- /* Map HC registers. */
- start = whc->umc->resource.start;
- len = whc->umc->resource.end - start + 1;
- if (!request_mem_region(start, len, "whci-hc")) {
- dev_err(&whc->umc->dev, "can't request HC region\n");
- ret = -EBUSY;
- goto error;
- }
- whc->base_phys = start;
- whc->base = ioremap(start, len);
- if (!whc->base) {
- dev_err(&whc->umc->dev, "ioremap\n");
- ret = -ENOMEM;
- goto error;
- }
-
- whc_hw_reset(whc);
-
- /* Read maximum number of devices, keys and MMC IEs. */
- whcsparams = le_readl(whc->base + WHCSPARAMS);
- whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
- whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
- whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
-
- dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
- whc->n_devices, whc->n_keys, whc->n_mmc_ies);
-
- whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
- sizeof(struct whc_qset), 64, 0);
- if (whc->qset_pool == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- ret = asl_init(whc);
- if (ret < 0)
- goto error;
- ret = pzl_init(whc);
- if (ret < 0)
- goto error;
-
- /* Allocate and initialize a buffer for generic commands, the
- Device Information buffer, and the Device Notification
- buffer. */
-
- whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
- &whc->gen_cmd_buf_dma, GFP_KERNEL);
- if (whc->gen_cmd_buf == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
- sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
- &whc->dn_buf_dma, GFP_KERNEL);
- if (!whc->dn_buf) {
- ret = -ENOMEM;
- goto error;
- }
- whc_hw_init_dn_buf(whc);
-
- whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
- sizeof(struct di_buf_entry) * whc->n_devices,
- &whc->di_buf_dma, GFP_KERNEL);
- if (!whc->di_buf) {
- ret = -ENOMEM;
- goto error;
- }
- whc_hw_init_di_buf(whc);
-
- return 0;
-
-error:
- whc_clean_up(whc);
- return ret;
-}
-
-void whc_clean_up(struct whc *whc)
-{
- resource_size_t len;
-
- if (whc->di_buf)
- dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
- whc->di_buf, whc->di_buf_dma);
- if (whc->dn_buf)
- dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
- whc->dn_buf, whc->dn_buf_dma);
- if (whc->gen_cmd_buf)
- dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
- whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
-
- pzl_clean_up(whc);
- asl_clean_up(whc);
-
- dma_pool_destroy(whc->qset_pool);
-
- len = resource_size(&whc->umc->resource);
- if (whc->base)
- iounmap(whc->base);
- if (whc->base_phys)
- release_mem_region(whc->base_phys, len);
-
- if (whc->workqueue)
- destroy_workqueue(whc->workqueue);
-}
diff --git a/drivers/staging/wusbcore/host/whci/int.c b/drivers/staging/wusbcore/host/whci/int.c
deleted file mode 100644
index bdbe35e9366f..000000000000
--- a/drivers/staging/wusbcore/host/whci/int.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) interrupt handling.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-static void transfer_done(struct whc *whc)
-{
- queue_work(whc->workqueue, &whc->async_work);
- queue_work(whc->workqueue, &whc->periodic_work);
-}
-
-irqreturn_t whc_int_handler(struct usb_hcd *hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
- struct whc *whc = wusbhc_to_whc(wusbhc);
- u32 sts;
-
- sts = le_readl(whc->base + WUSBSTS);
- if (!(sts & WUSBSTS_INT_MASK))
- return IRQ_NONE;
- le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
-
- if (sts & WUSBSTS_GEN_CMD_DONE)
- wake_up(&whc->cmd_wq);
-
- if (sts & WUSBSTS_HOST_ERR)
- dev_err(&whc->umc->dev, "FIXME: host system error\n");
-
- if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
- wake_up(&whc->async_list_wq);
-
- if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
- wake_up(&whc->periodic_list_wq);
-
- if (sts & WUSBSTS_DNTS_INT)
- queue_work(whc->workqueue, &whc->dn_work);
-
- /*
- * A transfer completed (see [WHCI] section 4.7.1.2 for when
- * this occurs).
- */
- if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
- transfer_done(whc);
-
- return IRQ_HANDLED;
-}
-
-static int process_dn_buf(struct whc *whc)
-{
- struct wusbhc *wusbhc = &whc->wusbhc;
- struct dn_buf_entry *dn;
- int processed = 0;
-
- for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
- if (dn->status & WHC_DN_STATUS_VALID) {
- wusbhc_handle_dn(wusbhc, dn->src_addr,
- (struct wusb_dn_hdr *)dn->dn_data,
- dn->msg_size);
- dn->status &= ~WHC_DN_STATUS_VALID;
- processed++;
- }
- }
- return processed;
-}
-
-void whc_dn_work(struct work_struct *work)
-{
- struct whc *whc = container_of(work, struct whc, dn_work);
- int processed;
-
- do {
- processed = process_dn_buf(whc);
- } while (processed);
-}
diff --git a/drivers/staging/wusbcore/host/whci/pzl.c b/drivers/staging/wusbcore/host/whci/pzl.c
deleted file mode 100644
index 6dfc075f5798..000000000000
--- a/drivers/staging/wusbcore/host/whci/pzl.c
+++ /dev/null
@@ -1,404 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) periodic schedule management.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/dma-mapping.h>
-#include <linux/usb.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
-{
- switch (period) {
- case 0:
- whc_qset_set_link_ptr(&whc->pz_list[0], addr);
- whc_qset_set_link_ptr(&whc->pz_list[2], addr);
- whc_qset_set_link_ptr(&whc->pz_list[4], addr);
- whc_qset_set_link_ptr(&whc->pz_list[6], addr);
- whc_qset_set_link_ptr(&whc->pz_list[8], addr);
- whc_qset_set_link_ptr(&whc->pz_list[10], addr);
- whc_qset_set_link_ptr(&whc->pz_list[12], addr);
- whc_qset_set_link_ptr(&whc->pz_list[14], addr);
- break;
- case 1:
- whc_qset_set_link_ptr(&whc->pz_list[1], addr);
- whc_qset_set_link_ptr(&whc->pz_list[5], addr);
- whc_qset_set_link_ptr(&whc->pz_list[9], addr);
- whc_qset_set_link_ptr(&whc->pz_list[13], addr);
- break;
- case 2:
- whc_qset_set_link_ptr(&whc->pz_list[3], addr);
- whc_qset_set_link_ptr(&whc->pz_list[11], addr);
- break;
- case 3:
- whc_qset_set_link_ptr(&whc->pz_list[7], addr);
- break;
- case 4:
- whc_qset_set_link_ptr(&whc->pz_list[15], addr);
- break;
- }
-}
-
-/*
- * Return the 'period' to use for this qset. The minimum interval for
- * the endpoint is used so whatever urbs are submitted the device is
- * polled often enough.
- */
-static int qset_get_period(struct whc *whc, struct whc_qset *qset)
-{
- uint8_t bInterval = qset->ep->desc.bInterval;
-
- if (bInterval < 6)
- bInterval = 6;
- if (bInterval > 10)
- bInterval = 10;
- return bInterval - 6;
-}
-
-static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
-{
- int period;
-
- period = qset_get_period(whc, qset);
-
- qset_clear(whc, qset);
- list_move(&qset->list_node, &whc->periodic_list[period]);
- qset->in_sw_list = true;
-}
-
-static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
-{
- list_move(&qset->list_node, &whc->periodic_removed_list);
- qset->in_hw_list = false;
- qset->in_sw_list = false;
-}
-
-/**
- * pzl_process_qset - process any recently inactivated or halted qTDs
- * in a qset.
- *
- * After inactive qTDs are removed, new qTDs can be added if the
- * urb queue still contains URBs.
- *
- * Returns the schedule updates required.
- */
-static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
-{
- enum whc_update update = 0;
- uint32_t status = 0;
-
- while (qset->ntds) {
- struct whc_qtd *td;
-
- td = &qset->qtd[qset->td_start];
- status = le32_to_cpu(td->status);
-
- /*
- * Nothing to do with a still active qTD.
- */
- if (status & QTD_STS_ACTIVE)
- break;
-
- if (status & QTD_STS_HALTED) {
- /* Ug, an error. */
- process_halted_qtd(whc, qset, td);
- /* A halted qTD always triggers an update
- because the qset was either removed or
- reactivated. */
- update |= WHC_UPDATE_UPDATED;
- goto done;
- }
-
- /* Mmm, a completed qTD. */
- process_inactive_qtd(whc, qset, td);
- }
-
- if (!qset->remove)
- update |= qset_add_qtds(whc, qset);
-
-done:
- /*
- * If there are no qTDs in this qset, remove it from the PZL.
- */
- if (qset->remove && qset->ntds == 0) {
- pzl_qset_remove(whc, qset);
- update |= WHC_UPDATE_REMOVED;
- }
-
- return update;
-}
-
-/**
- * pzl_start - start the periodic schedule
- * @whc: the WHCI host controller
- *
- * The PZL must be valid (e.g., all entries in the list should have
- * the T bit set).
- */
-void pzl_start(struct whc *whc)
-{
- le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
-
- whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
- whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
- WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
- 1000, "start PZL");
-}
-
-/**
- * pzl_stop - stop the periodic schedule
- * @whc: the WHCI host controller
- */
-void pzl_stop(struct whc *whc)
-{
- whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
- whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
- WUSBSTS_PERIODIC_SCHED, 0,
- 1000, "stop PZL");
-}
-
-/**
- * pzl_update - request a PZL update and wait for the hardware to be synced
- * @whc: the WHCI HC
- * @wusbcmd: WUSBCMD value to start the update.
- *
- * If the WUSB HC is inactive (i.e., the PZL is stopped) then the
- * update must be skipped as the hardware may not respond to update
- * requests.
- */
-void pzl_update(struct whc *whc, uint32_t wusbcmd)
-{
- struct wusbhc *wusbhc = &whc->wusbhc;
- long t;
-
- mutex_lock(&wusbhc->mutex);
- if (wusbhc->active) {
- whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
- t = wait_event_timeout(
- whc->periodic_list_wq,
- (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0,
- msecs_to_jiffies(1000));
- if (t == 0)
- whc_hw_error(whc, "PZL update timeout");
- }
- mutex_unlock(&wusbhc->mutex);
-}
-
-static void update_pzl_hw_view(struct whc *whc)
-{
- struct whc_qset *qset, *t;
- int period;
- u64 tmp_qh = 0;
-
- for (period = 0; period < 5; period++) {
- list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
- whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
- tmp_qh = qset->qset_dma;
- qset->in_hw_list = true;
- }
- update_pzl_pointers(whc, period, tmp_qh);
- }
-}
-
-/**
- * scan_periodic_work - scan the PZL for qsets to process.
- *
- * Process each qset in the PZL in turn and then signal the WHC that
- * the PZL has been updated.
- *
- * Then start, stop or update the periodic schedule as required.
- */
-void scan_periodic_work(struct work_struct *work)
-{
- struct whc *whc = container_of(work, struct whc, periodic_work);
- struct whc_qset *qset, *t;
- enum whc_update update = 0;
- int period;
-
- spin_lock_irq(&whc->lock);
-
- for (period = 4; period >= 0; period--) {
- list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
- if (!qset->in_hw_list)
- update |= WHC_UPDATE_ADDED;
- update |= pzl_process_qset(whc, qset);
- }
- }
-
- if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
- update_pzl_hw_view(whc);
-
- spin_unlock_irq(&whc->lock);
-
- if (update) {
- uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
- if (update & WHC_UPDATE_REMOVED)
- wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
- pzl_update(whc, wusbcmd);
- }
-
- /*
- * Now that the PZL is updated, complete the removal of any
- * removed qsets.
- *
- * If the qset was to be reset, do so and reinsert it into the
- * PZL if it has pending transfers.
- */
- spin_lock_irq(&whc->lock);
-
- list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
- qset_remove_complete(whc, qset);
- if (qset->reset) {
- qset_reset(whc, qset);
- if (!list_empty(&qset->stds)) {
- qset_insert_in_sw_list(whc, qset);
- queue_work(whc->workqueue, &whc->periodic_work);
- }
- }
- }
-
- spin_unlock_irq(&whc->lock);
-}
-
-/**
- * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
- * @whc: the WHCI host controller
- * @urb: the URB to enqueue
- * @mem_flags: flags for any memory allocations
- *
- * The qset for the endpoint is obtained and the urb queued on to it.
- *
- * Work is scheduled to update the hardware's view of the PZL.
- */
-int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
-{
- struct whc_qset *qset;
- int err;
- unsigned long flags;
-
- spin_lock_irqsave(&whc->lock, flags);
-
- err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
- if (err < 0) {
- spin_unlock_irqrestore(&whc->lock, flags);
- return err;
- }
-
- qset = get_qset(whc, urb, GFP_ATOMIC);
- if (qset == NULL)
- err = -ENOMEM;
- else
- err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
- if (!err) {
- if (!qset->in_sw_list && !qset->remove)
- qset_insert_in_sw_list(whc, qset);
- } else
- usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
-
- spin_unlock_irqrestore(&whc->lock, flags);
-
- if (!err)
- queue_work(whc->workqueue, &whc->periodic_work);
-
- return err;
-}
-
-/**
- * pzl_urb_dequeue - remove an URB (qset) from the periodic list
- * @whc: the WHCI host controller
- * @urb: the URB to dequeue
- * @status: the current status of the URB
- *
- * URBs that do yet have qTDs can simply be removed from the software
- * queue, otherwise the qset must be removed so the qTDs can be safely
- * removed.
- */
-int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
-{
- struct whc_urb *wurb = urb->hcpriv;
- struct whc_qset *qset = wurb->qset;
- struct whc_std *std, *t;
- bool has_qtd = false;
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&whc->lock, flags);
-
- ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
- if (ret < 0)
- goto out;
-
- list_for_each_entry_safe(std, t, &qset->stds, list_node) {
- if (std->urb == urb) {
- if (std->qtd)
- has_qtd = true;
- qset_free_std(whc, std);
- } else
- std->qtd = NULL; /* so this std is re-added when the qset is */
- }
-
- if (has_qtd) {
- pzl_qset_remove(whc, qset);
- update_pzl_hw_view(whc);
- wurb->status = status;
- wurb->is_async = false;
- queue_work(whc->workqueue, &wurb->dequeue_work);
- } else
- qset_remove_urb(whc, qset, urb, status);
-out:
- spin_unlock_irqrestore(&whc->lock, flags);
-
- return ret;
-}
-
-/**
- * pzl_qset_delete - delete a qset from the PZL
- */
-void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
-{
- qset->remove = 1;
- queue_work(whc->workqueue, &whc->periodic_work);
- qset_delete(whc, qset);
-}
-
-/**
- * pzl_init - initialize the periodic zone list
- * @whc: the WHCI host controller
- */
-int pzl_init(struct whc *whc)
-{
- int i;
-
- whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
- &whc->pz_list_dma, GFP_KERNEL);
- if (whc->pz_list == NULL)
- return -ENOMEM;
-
- /* Set T bit on all elements in PZL. */
- for (i = 0; i < 16; i++)
- whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
-
- le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
-
- return 0;
-}
-
-/**
- * pzl_clean_up - free PZL resources
- * @whc: the WHCI host controller
- *
- * The PZL is stopped and empty.
- */
-void pzl_clean_up(struct whc *whc)
-{
- if (whc->pz_list)
- dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
- whc->pz_list_dma);
-}
diff --git a/drivers/staging/wusbcore/host/whci/qset.c b/drivers/staging/wusbcore/host/whci/qset.c
deleted file mode 100644
index 66459b77dc77..000000000000
--- a/drivers/staging/wusbcore/host/whci/qset.c
+++ /dev/null
@@ -1,831 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) qset management.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
-{
- struct whc_qset *qset;
- dma_addr_t dma;
-
- qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
- if (qset == NULL)
- return NULL;
-
- qset->qset_dma = dma;
- qset->whc = whc;
-
- INIT_LIST_HEAD(&qset->list_node);
- INIT_LIST_HEAD(&qset->stds);
-
- return qset;
-}
-
-/**
- * qset_fill_qh - fill the static endpoint state in a qset's QHead
- * @qset: the qset whose QH needs initializing with static endpoint
- * state
- * @urb: an urb for a transfer to this endpoint
- */
-static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
-{
- struct usb_device *usb_dev = urb->dev;
- struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
- struct usb_wireless_ep_comp_descriptor *epcd;
- bool is_out;
- uint8_t phy_rate;
-
- is_out = usb_pipeout(urb->pipe);
-
- qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
-
- epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
- if (epcd) {
- qset->max_seq = epcd->bMaxSequence;
- qset->max_burst = epcd->bMaxBurst;
- } else {
- qset->max_seq = 2;
- qset->max_burst = 1;
- }
-
- /*
- * Initial PHY rate is 53.3 Mbit/s for control endpoints or
- * the maximum supported by the device for other endpoints
- * (unless limited by the user).
- */
- if (usb_pipecontrol(urb->pipe))
- phy_rate = UWB_PHY_RATE_53;
- else {
- uint16_t phy_rates;
-
- phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
- phy_rate = fls(phy_rates) - 1;
- if (phy_rate > whc->wusbhc.phy_rate)
- phy_rate = whc->wusbhc.phy_rate;
- }
-
- qset->qh.info1 = cpu_to_le32(
- QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
- | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
- | usb_pipe_to_qh_type(urb->pipe)
- | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
- | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
- );
- qset->qh.info2 = cpu_to_le32(
- QH_INFO2_BURST(qset->max_burst)
- | QH_INFO2_DBP(0)
- | QH_INFO2_MAX_COUNT(3)
- | QH_INFO2_MAX_RETRY(3)
- | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
- );
- /* FIXME: where can we obtain these Tx parameters from? Why
- * doesn't the chip know what Tx power to use? It knows the Rx
- * strength and can presumably guess the Tx power required
- * from that? */
- qset->qh.info3 = cpu_to_le32(
- QH_INFO3_TX_RATE(phy_rate)
- | QH_INFO3_TX_PWR(0) /* 0 == max power */
- );
-
- qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
-}
-
-/**
- * qset_clear - clear fields in a qset so it may be reinserted into a
- * schedule.
- *
- * The sequence number and current window are not cleared (see
- * qset_reset()).
- */
-void qset_clear(struct whc *whc, struct whc_qset *qset)
-{
- qset->td_start = qset->td_end = qset->ntds = 0;
-
- qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
- qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
- qset->qh.err_count = 0;
- qset->qh.scratch[0] = 0;
- qset->qh.scratch[1] = 0;
- qset->qh.scratch[2] = 0;
-
- memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
-
- init_completion(&qset->remove_complete);
-}
-
-/**
- * qset_reset - reset endpoint state in a qset.
- *
- * Clears the sequence number and current window. This qset must not
- * be in the ASL or PZL.
- */
-void qset_reset(struct whc *whc, struct whc_qset *qset)
-{
- qset->reset = 0;
-
- qset->qh.status &= ~QH_STATUS_SEQ_MASK;
- qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
-}
-
-/**
- * get_qset - get the qset for an async endpoint
- *
- * A new qset is created if one does not already exist.
- */
-struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
- gfp_t mem_flags)
-{
- struct whc_qset *qset;
-
- qset = urb->ep->hcpriv;
- if (qset == NULL) {
- qset = qset_alloc(whc, mem_flags);
- if (qset == NULL)
- return NULL;
-
- qset->ep = urb->ep;
- urb->ep->hcpriv = qset;
- qset_fill_qh(whc, qset, urb);
- }
- return qset;
-}
-
-void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
-{
- qset->remove = 0;
- list_del_init(&qset->list_node);
- complete(&qset->remove_complete);
-}
-
-/**
- * qset_add_qtds - add qTDs for an URB to a qset
- *
- * Returns true if the list (ASL/PZL) must be updated because (for a
- * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
- */
-enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
-{
- struct whc_std *std;
- enum whc_update update = 0;
-
- list_for_each_entry(std, &qset->stds, list_node) {
- struct whc_qtd *qtd;
- uint32_t status;
-
- if (qset->ntds >= WHCI_QSET_TD_MAX
- || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
- break;
-
- if (std->qtd)
- continue; /* already has a qTD */
-
- qtd = std->qtd = &qset->qtd[qset->td_end];
-
- /* Fill in setup bytes for control transfers. */
- if (usb_pipecontrol(std->urb->pipe))
- memcpy(qtd->setup, std->urb->setup_packet, 8);
-
- status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
-
- if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
- status |= QTD_STS_LAST_PKT;
-
- /*
- * For an IN transfer the iAlt field should be set so
- * the h/w will automatically advance to the next
- * transfer. However, if there are 8 or more TDs
- * remaining in this transfer then iAlt cannot be set
- * as it could point to somewhere in this transfer.
- */
- if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
- int ialt;
- ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
- status |= QTD_STS_IALT(ialt);
- } else if (usb_pipein(std->urb->pipe))
- qset->pause_after_urb = std->urb;
-
- if (std->num_pointers)
- qtd->options = cpu_to_le32(QTD_OPT_IOC);
- else
- qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
- qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
-
- qtd->status = cpu_to_le32(status);
-
- if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
- update = WHC_UPDATE_UPDATED;
-
- if (++qset->td_end >= WHCI_QSET_TD_MAX)
- qset->td_end = 0;
- qset->ntds++;
- }
-
- return update;
-}
-
-/**
- * qset_remove_qtd - remove the first qTD from a qset.
- *
- * The qTD might be still active (if it's part of a IN URB that
- * resulted in a short read) so ensure it's deactivated.
- */
-static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
-{
- qset->qtd[qset->td_start].status = 0;
-
- if (++qset->td_start >= WHCI_QSET_TD_MAX)
- qset->td_start = 0;
- qset->ntds--;
-}
-
-static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
-{
- struct scatterlist *sg;
- void *bounce;
- size_t remaining, offset;
-
- bounce = std->bounce_buf;
- remaining = std->len;
-
- sg = std->bounce_sg;
- offset = std->bounce_offset;
-
- while (remaining) {
- size_t len;
-
- len = min(sg->length - offset, remaining);
- memcpy(sg_virt(sg) + offset, bounce, len);
-
- bounce += len;
- remaining -= len;
-
- offset += len;
- if (offset >= sg->length) {
- sg = sg_next(sg);
- offset = 0;
- }
- }
-
-}
-
-/**
- * qset_free_std - remove an sTD and free it.
- * @whc: the WHCI host controller
- * @std: the sTD to remove and free.
- */
-void qset_free_std(struct whc *whc, struct whc_std *std)
-{
- list_del(&std->list_node);
- if (std->bounce_buf) {
- bool is_out = usb_pipeout(std->urb->pipe);
- dma_addr_t dma_addr;
-
- if (std->num_pointers)
- dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
- else
- dma_addr = std->dma_addr;
-
- dma_unmap_single(whc->wusbhc.dev, dma_addr,
- std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (!is_out)
- qset_copy_bounce_to_sg(whc, std);
- kfree(std->bounce_buf);
- }
- if (std->pl_virt) {
- if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
- dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
- std->num_pointers * sizeof(struct whc_page_list_entry),
- DMA_TO_DEVICE);
- kfree(std->pl_virt);
- std->pl_virt = NULL;
- }
- kfree(std);
-}
-
-/**
- * qset_remove_qtds - remove an URB's qTDs (and sTDs).
- */
-static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
- struct urb *urb)
-{
- struct whc_std *std, *t;
-
- list_for_each_entry_safe(std, t, &qset->stds, list_node) {
- if (std->urb != urb)
- break;
- if (std->qtd != NULL)
- qset_remove_qtd(whc, qset);
- qset_free_std(whc, std);
- }
-}
-
-/**
- * qset_free_stds - free any remaining sTDs for an URB.
- */
-static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
-{
- struct whc_std *std, *t;
-
- list_for_each_entry_safe(std, t, &qset->stds, list_node) {
- if (std->urb == urb)
- qset_free_std(qset->whc, std);
- }
-}
-
-static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
-{
- dma_addr_t dma_addr = std->dma_addr;
- dma_addr_t sp, ep;
- size_t pl_len;
- int p;
-
- /* Short buffers don't need a page list. */
- if (std->len <= WHCI_PAGE_SIZE) {
- std->num_pointers = 0;
- return 0;
- }
-
- sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
- ep = dma_addr + std->len;
- std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
-
- pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
- std->pl_virt = kmalloc(pl_len, mem_flags);
- if (std->pl_virt == NULL)
- return -ENOMEM;
- std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
- if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
- kfree(std->pl_virt);
- return -EFAULT;
- }
-
- for (p = 0; p < std->num_pointers; p++) {
- std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
- dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
- }
-
- return 0;
-}
-
-/**
- * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
- */
-static void urb_dequeue_work(struct work_struct *work)
-{
- struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
- struct whc_qset *qset = wurb->qset;
- struct whc *whc = qset->whc;
- unsigned long flags;
-
- if (wurb->is_async)
- asl_update(whc, WUSBCMD_ASYNC_UPDATED
- | WUSBCMD_ASYNC_SYNCED_DB
- | WUSBCMD_ASYNC_QSET_RM);
- else
- pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
- | WUSBCMD_PERIODIC_SYNCED_DB
- | WUSBCMD_PERIODIC_QSET_RM);
-
- spin_lock_irqsave(&whc->lock, flags);
- qset_remove_urb(whc, qset, wurb->urb, wurb->status);
- spin_unlock_irqrestore(&whc->lock, flags);
-}
-
-static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
- struct urb *urb, gfp_t mem_flags)
-{
- struct whc_std *std;
-
- std = kzalloc(sizeof(struct whc_std), mem_flags);
- if (std == NULL)
- return NULL;
-
- std->urb = urb;
- std->qtd = NULL;
-
- INIT_LIST_HEAD(&std->list_node);
- list_add_tail(&std->list_node, &qset->stds);
-
- return std;
-}
-
-static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
- gfp_t mem_flags)
-{
- size_t remaining;
- struct scatterlist *sg;
- int i;
- int ntds = 0;
- struct whc_std *std = NULL;
- struct whc_page_list_entry *new_pl_virt;
- dma_addr_t prev_end = 0;
- size_t pl_len;
- int p = 0;
-
- remaining = urb->transfer_buffer_length;
-
- for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
- dma_addr_t dma_addr;
- size_t dma_remaining;
- dma_addr_t sp, ep;
- int num_pointers;
-
- if (remaining == 0) {
- break;
- }
-
- dma_addr = sg_dma_address(sg);
- dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
-
- while (dma_remaining) {
- size_t dma_len;
-
- /*
- * We can use the previous std (if it exists) provided that:
- * - the previous one ended on a page boundary.
- * - the current one begins on a page boundary.
- * - the previous one isn't full.
- *
- * If a new std is needed but the previous one
- * was not a whole number of packets then this
- * sg list cannot be mapped onto multiple
- * qTDs. Return an error and let the caller
- * sort it out.
- */
- if (!std
- || (prev_end & (WHCI_PAGE_SIZE-1))
- || (dma_addr & (WHCI_PAGE_SIZE-1))
- || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
- if (std && std->len % qset->max_packet != 0)
- return -EINVAL;
- std = qset_new_std(whc, qset, urb, mem_flags);
- if (std == NULL) {
- return -ENOMEM;
- }
- ntds++;
- p = 0;
- }
-
- dma_len = dma_remaining;
-
- /*
- * If the remainder of this element doesn't
- * fit in a single qTD, limit the qTD to a
- * whole number of packets. This allows the
- * remainder to go into the next qTD.
- */
- if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
- dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
- * qset->max_packet - std->len;
- }
-
- std->len += dma_len;
- std->ntds_remaining = -1; /* filled in later */
-
- sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
- ep = dma_addr + dma_len;
- num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
- std->num_pointers += num_pointers;
-
- pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
-
- new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
- if (new_pl_virt == NULL) {
- kfree(std->pl_virt);
- std->pl_virt = NULL;
- return -ENOMEM;
- }
- std->pl_virt = new_pl_virt;
-
- for (;p < std->num_pointers; p++) {
- std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
- dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
- }
-
- prev_end = dma_addr = ep;
- dma_remaining -= dma_len;
- remaining -= dma_len;
- }
- }
-
- /* Now the number of stds is know, go back and fill in
- std->ntds_remaining. */
- list_for_each_entry(std, &qset->stds, list_node) {
- if (std->ntds_remaining == -1) {
- pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
- std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
- pl_len, DMA_TO_DEVICE);
- if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
- return -EFAULT;
- std->ntds_remaining = ntds--;
- }
- }
- return 0;
-}
-
-/**
- * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
- *
- * If the URB contains an sg list whose elements cannot be directly
- * mapped to qTDs then the data must be transferred via bounce
- * buffers.
- */
-static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
- struct urb *urb, gfp_t mem_flags)
-{
- bool is_out = usb_pipeout(urb->pipe);
- size_t max_std_len;
- size_t remaining;
- int ntds = 0;
- struct whc_std *std = NULL;
- void *bounce = NULL;
- struct scatterlist *sg;
- int i;
-
- /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
- max_std_len = qset->max_burst * qset->max_packet;
-
- remaining = urb->transfer_buffer_length;
-
- for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
- size_t len;
- size_t sg_remaining;
- void *orig;
-
- if (remaining == 0) {
- break;
- }
-
- sg_remaining = min_t(size_t, remaining, sg->length);
- orig = sg_virt(sg);
-
- while (sg_remaining) {
- if (!std || std->len == max_std_len) {
- std = qset_new_std(whc, qset, urb, mem_flags);
- if (std == NULL)
- return -ENOMEM;
- std->bounce_buf = kmalloc(max_std_len, mem_flags);
- if (std->bounce_buf == NULL)
- return -ENOMEM;
- std->bounce_sg = sg;
- std->bounce_offset = orig - sg_virt(sg);
- bounce = std->bounce_buf;
- ntds++;
- }
-
- len = min(sg_remaining, max_std_len - std->len);
-
- if (is_out)
- memcpy(bounce, orig, len);
-
- std->len += len;
- std->ntds_remaining = -1; /* filled in later */
-
- bounce += len;
- orig += len;
- sg_remaining -= len;
- remaining -= len;
- }
- }
-
- /*
- * For each of the new sTDs, map the bounce buffers, create
- * page lists (if necessary), and fill in std->ntds_remaining.
- */
- list_for_each_entry(std, &qset->stds, list_node) {
- if (std->ntds_remaining != -1)
- continue;
-
- std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
- is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
- return -EFAULT;
-
- if (qset_fill_page_list(whc, std, mem_flags) < 0)
- return -ENOMEM;
-
- std->ntds_remaining = ntds--;
- }
-
- return 0;
-}
-
-/**
- * qset_add_urb - add an urb to the qset's queue.
- *
- * The URB is chopped into sTDs, one for each qTD that will required.
- * At least one qTD (and sTD) is required even if the transfer has no
- * data (e.g., for some control transfers).
- */
-int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
- gfp_t mem_flags)
-{
- struct whc_urb *wurb;
- int remaining = urb->transfer_buffer_length;
- u64 transfer_dma = urb->transfer_dma;
- int ntds_remaining;
- int ret;
-
- wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
- if (wurb == NULL)
- goto err_no_mem;
- urb->hcpriv = wurb;
- wurb->qset = qset;
- wurb->urb = urb;
- INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
-
- if (urb->num_sgs) {
- ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
- if (ret == -EINVAL) {
- qset_free_stds(qset, urb);
- ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
- }
- if (ret < 0)
- goto err_no_mem;
- return 0;
- }
-
- ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
- if (ntds_remaining == 0)
- ntds_remaining = 1;
-
- while (ntds_remaining) {
- struct whc_std *std;
- size_t std_len;
-
- std_len = remaining;
- if (std_len > QTD_MAX_XFER_SIZE)
- std_len = QTD_MAX_XFER_SIZE;
-
- std = qset_new_std(whc, qset, urb, mem_flags);
- if (std == NULL)
- goto err_no_mem;
-
- std->dma_addr = transfer_dma;
- std->len = std_len;
- std->ntds_remaining = ntds_remaining;
-
- if (qset_fill_page_list(whc, std, mem_flags) < 0)
- goto err_no_mem;
-
- ntds_remaining--;
- remaining -= std_len;
- transfer_dma += std_len;
- }
-
- return 0;
-
-err_no_mem:
- qset_free_stds(qset, urb);
- return -ENOMEM;
-}
-
-/**
- * qset_remove_urb - remove an URB from the urb queue.
- *
- * The URB is returned to the USB subsystem.
- */
-void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
- struct urb *urb, int status)
-{
- struct wusbhc *wusbhc = &whc->wusbhc;
- struct whc_urb *wurb = urb->hcpriv;
-
- usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
- /* Drop the lock as urb->complete() may enqueue another urb. */
- spin_unlock(&whc->lock);
- wusbhc_giveback_urb(wusbhc, urb, status);
- spin_lock(&whc->lock);
-
- kfree(wurb);
-}
-
-/**
- * get_urb_status_from_qtd - get the completed urb status from qTD status
- * @urb: completed urb
- * @status: qTD status
- */
-static int get_urb_status_from_qtd(struct urb *urb, u32 status)
-{
- if (status & QTD_STS_HALTED) {
- if (status & QTD_STS_DBE)
- return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
- else if (status & QTD_STS_BABBLE)
- return -EOVERFLOW;
- else if (status & QTD_STS_RCE)
- return -ETIME;
- return -EPIPE;
- }
- if (usb_pipein(urb->pipe)
- && (urb->transfer_flags & URB_SHORT_NOT_OK)
- && urb->actual_length < urb->transfer_buffer_length)
- return -EREMOTEIO;
- return 0;
-}
-
-/**
- * process_inactive_qtd - process an inactive (but not halted) qTD.
- *
- * Update the urb with the transfer bytes from the qTD, if the urb is
- * completely transferred or (in the case of an IN only) the LPF is
- * set, then the transfer is complete and the urb should be returned
- * to the system.
- */
-void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
- struct whc_qtd *qtd)
-{
- struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
- struct urb *urb = std->urb;
- uint32_t status;
- bool complete;
-
- status = le32_to_cpu(qtd->status);
-
- urb->actual_length += std->len - QTD_STS_TO_LEN(status);
-
- if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
- complete = true;
- else
- complete = whc_std_last(std);
-
- qset_remove_qtd(whc, qset);
- qset_free_std(whc, std);
-
- /*
- * Transfers for this URB are complete? Then return it to the
- * USB subsystem.
- */
- if (complete) {
- qset_remove_qtds(whc, qset, urb);
- qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
-
- /*
- * If iAlt isn't valid then the hardware didn't
- * advance iCur. Adjust the start and end pointers to
- * match iCur.
- */
- if (!(status & QTD_STS_IALT_VALID))
- qset->td_start = qset->td_end
- = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
- qset->pause_after_urb = NULL;
- }
-}
-
-/**
- * process_halted_qtd - process a qset with a halted qtd
- *
- * Remove all the qTDs for the failed URB and return the failed URB to
- * the USB subsystem. Then remove all other qTDs so the qset can be
- * removed.
- *
- * FIXME: this is the point where rate adaptation can be done. If a
- * transfer failed because it exceeded the maximum number of retries
- * then it could be reactivated with a slower rate without having to
- * remove the qset.
- */
-void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
- struct whc_qtd *qtd)
-{
- struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
- struct urb *urb = std->urb;
- int urb_status;
-
- urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
-
- qset_remove_qtds(whc, qset, urb);
- qset_remove_urb(whc, qset, urb, urb_status);
-
- list_for_each_entry(std, &qset->stds, list_node) {
- if (qset->ntds == 0)
- break;
- qset_remove_qtd(whc, qset);
- std->qtd = NULL;
- }
-
- qset->remove = 1;
-}
-
-void qset_free(struct whc *whc, struct whc_qset *qset)
-{
- dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
-}
-
-/**
- * qset_delete - wait for a qset to be unused, then free it.
- */
-void qset_delete(struct whc *whc, struct whc_qset *qset)
-{
- wait_for_completion(&qset->remove_complete);
- qset_free(whc, qset);
-}
diff --git a/drivers/staging/wusbcore/host/whci/whcd.h b/drivers/staging/wusbcore/host/whci/whcd.h
deleted file mode 100644
index a442a2589e83..000000000000
--- a/drivers/staging/wusbcore/host/whci/whcd.h
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) private header.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#ifndef __WHCD_H
-#define __WHCD_H
-
-#include <linux/workqueue.h>
-
-#include "../../../uwb/include/whci.h"
-#include "../../../uwb/include/umc.h"
-#include "whci-hc.h"
-
-/* Generic command timeout. */
-#define WHC_GENCMD_TIMEOUT_MS 100
-
-struct whc_dbg;
-
-struct whc {
- struct wusbhc wusbhc;
- struct umc_dev *umc;
-
- resource_size_t base_phys;
- void __iomem *base;
- int irq;
-
- u8 n_devices;
- u8 n_keys;
- u8 n_mmc_ies;
-
- u64 *pz_list;
- struct dn_buf_entry *dn_buf;
- struct di_buf_entry *di_buf;
- dma_addr_t pz_list_dma;
- dma_addr_t dn_buf_dma;
- dma_addr_t di_buf_dma;
-
- spinlock_t lock;
- struct mutex mutex;
-
- void * gen_cmd_buf;
- dma_addr_t gen_cmd_buf_dma;
- wait_queue_head_t cmd_wq;
-
- struct workqueue_struct *workqueue;
- struct work_struct dn_work;
-
- struct dma_pool *qset_pool;
-
- struct list_head async_list;
- struct list_head async_removed_list;
- wait_queue_head_t async_list_wq;
- struct work_struct async_work;
-
- struct list_head periodic_list[5];
- struct list_head periodic_removed_list;
- wait_queue_head_t periodic_list_wq;
- struct work_struct periodic_work;
-
- struct whc_dbg *dbg;
-};
-
-#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
-
-/**
- * struct whc_std - a software TD.
- * @urb: the URB this sTD is for.
- * @offset: start of the URB's data for this TD.
- * @len: the length of data in the associated TD.
- * @ntds_remaining: number of TDs (starting from this one) in this transfer.
- *
- * @bounce_buf: a bounce buffer if the std was from an urb with a sg
- * list that could not be mapped to qTDs directly.
- * @bounce_sg: the first scatterlist element bounce_buf is for.
- * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
- *
- * Queued URBs may require more TDs than are available in a qset so we
- * use a list of these "software TDs" (sTDs) to hold per-TD data.
- */
-struct whc_std {
- struct urb *urb;
- size_t len;
- int ntds_remaining;
- struct whc_qtd *qtd;
-
- struct list_head list_node;
- int num_pointers;
- dma_addr_t dma_addr;
- struct whc_page_list_entry *pl_virt;
-
- void *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned bounce_offset;
-};
-
-/**
- * struct whc_urb - per URB host controller structure.
- * @urb: the URB this struct is for.
- * @qset: the qset associated to the URB.
- * @dequeue_work: the work to remove the URB when dequeued.
- * @is_async: the URB belongs to async sheduler or not.
- * @status: the status to be returned when calling wusbhc_giveback_urb.
- */
-struct whc_urb {
- struct urb *urb;
- struct whc_qset *qset;
- struct work_struct dequeue_work;
- bool is_async;
- int status;
-};
-
-/**
- * whc_std_last - is this sTD the URB's last?
- * @std: the sTD to check.
- */
-static inline bool whc_std_last(struct whc_std *std)
-{
- return std->ntds_remaining <= 1;
-}
-
-enum whc_update {
- WHC_UPDATE_ADDED = 0x01,
- WHC_UPDATE_REMOVED = 0x02,
- WHC_UPDATE_UPDATED = 0x04,
-};
-
-/* init.c */
-int whc_init(struct whc *whc);
-void whc_clean_up(struct whc *whc);
-
-/* hw.c */
-void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
-int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
-void whc_hw_error(struct whc *whc, const char *reason);
-
-/* wusb.c */
-int whc_wusbhc_start(struct wusbhc *wusbhc);
-void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay);
-int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
- u8 handle, struct wuie_hdr *wuie);
-int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
-int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
-int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
-int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
-int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
- const void *ptk, size_t key_size);
-int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
- const void *gtk, size_t key_size);
-int whc_set_cluster_id(struct whc *whc, u8 bcid);
-
-/* int.c */
-irqreturn_t whc_int_handler(struct usb_hcd *hcd);
-void whc_dn_work(struct work_struct *work);
-
-/* asl.c */
-void asl_start(struct whc *whc);
-void asl_stop(struct whc *whc);
-int asl_init(struct whc *whc);
-void asl_clean_up(struct whc *whc);
-int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
-int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
-void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
-void scan_async_work(struct work_struct *work);
-
-/* pzl.c */
-int pzl_init(struct whc *whc);
-void pzl_clean_up(struct whc *whc);
-void pzl_start(struct whc *whc);
-void pzl_stop(struct whc *whc);
-int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
-int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
-void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
-void scan_periodic_work(struct work_struct *work);
-
-/* qset.c */
-struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
-void qset_free(struct whc *whc, struct whc_qset *qset);
-struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
-void qset_delete(struct whc *whc, struct whc_qset *qset);
-void qset_clear(struct whc *whc, struct whc_qset *qset);
-void qset_reset(struct whc *whc, struct whc_qset *qset);
-int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
- gfp_t mem_flags);
-void qset_free_std(struct whc *whc, struct whc_std *std);
-void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
- struct urb *urb, int status);
-void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
- struct whc_qtd *qtd);
-void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
- struct whc_qtd *qtd);
-enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
-void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
-void pzl_update(struct whc *whc, uint32_t wusbcmd);
-void asl_update(struct whc *whc, uint32_t wusbcmd);
-
-/* debug.c */
-void whc_dbg_init(struct whc *whc);
-void whc_dbg_clean_up(struct whc *whc);
-
-#endif /* #ifndef __WHCD_H */
diff --git a/drivers/staging/wusbcore/host/whci/whci-hc.h b/drivers/staging/wusbcore/host/whci/whci-hc.h
deleted file mode 100644
index 5a86a57a80cc..000000000000
--- a/drivers/staging/wusbcore/host/whci/whci-hc.h
+++ /dev/null
@@ -1,401 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) data structures.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#ifndef _WHCI_WHCI_HC_H
-#define _WHCI_WHCI_HC_H
-
-#include <linux/list.h>
-
-/**
- * WHCI_PAGE_SIZE - page size use by WHCI
- *
- * WHCI assumes that host system uses pages of 4096 octets.
- */
-#define WHCI_PAGE_SIZE 4096
-
-
-/**
- * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
- * qtd.
- *
- * This is 2^20 - 1.
- */
-#define QTD_MAX_XFER_SIZE 1048575
-
-
-/**
- * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
- *
- * This describes the data for a bulk, control or interrupt transfer.
- *
- * [WHCI] section 3.2.4
- */
-struct whc_qtd {
- __le32 status; /*< remaining transfer len and transfer status */
- __le32 options;
- __le64 page_list_ptr; /*< physical pointer to data buffer page list*/
- __u8 setup[8]; /*< setup data for control transfers */
-} __attribute__((packed));
-
-#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */
-#define QTD_STS_HALTED (1 << 30) /* transfer halted */
-#define QTD_STS_DBE (1 << 29) /* data buffer error */
-#define QTD_STS_BABBLE (1 << 28) /* babble detected */
-#define QTD_STS_RCE (1 << 27) /* retry count exceeded */
-#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */
-#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */
-#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */
-#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
-#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */
-#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff)
-
-#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */
-#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */
-
-/**
- * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
- *
- * This describes the data and other parameters for an isochronous
- * transfer.
- *
- * [WHCI] section 3.2.5
- */
-struct whc_itd {
- __le16 presentation_time; /*< presentation time for OUT transfers */
- __u8 num_segments; /*< number of data segments in segment list */
- __u8 status; /*< command execution status */
- __le32 options; /*< misc transfer options */
- __le64 page_list_ptr; /*< physical pointer to data buffer page list */
- __le64 seg_list_ptr; /*< physical pointer to segment list */
-} __attribute__((packed));
-
-#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */
-#define ITD_STS_DBE (1 << 5) /* data buffer error */
-#define ITD_STS_BABBLE (1 << 4) /* babble detected */
-#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
-
-#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */
-#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */
-
-/**
- * Page list entry.
- *
- * A TD's page list must contain sufficient page list entries for the
- * total data length in the TD.
- *
- * [WHCI] section 3.2.4.3
- */
-struct whc_page_list_entry {
- __le64 buf_ptr; /*< physical pointer to buffer */
-} __attribute__((packed));
-
-/**
- * struct whc_seg_list_entry - Segment list entry.
- *
- * Describes a portion of the data buffer described in the containing
- * qTD's page list.
- *
- * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
- * + qtd->seg_list_ptr[seg].offset;
- *
- * Segments can't cross page boundries.
- *
- * [WHCI] section 3.2.5.5
- */
-struct whc_seg_list_entry {
- __le16 len; /*< segment length */
- __u8 idx; /*< index into page list */
- __u8 status; /*< segment status */
- __le16 offset; /*< 12 bit offset into page */
-} __attribute__((packed));
-
-/**
- * struct whc_qhead - endpoint and status information for a qset.
- *
- * [WHCI] section 3.2.6
- */
-struct whc_qhead {
- __le64 link; /*< next qset in list */
- __le32 info1;
- __le32 info2;
- __le32 info3;
- __le16 status;
- __le16 err_count; /*< transaction error count */
- __le32 cur_window;
- __le32 scratch[3]; /*< h/w scratch area */
- union {
- struct whc_qtd qtd;
- struct whc_itd itd;
- } overlay;
-} __attribute__((packed));
-
-#define QH_LINK_PTR_MASK (~0x03Full)
-#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
-#define QH_LINK_IQS (1 << 4) /* isochronous queue set */
-#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */
-#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */
-
-#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */
-#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */
-#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */
-#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */
-#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */
-#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */
-#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */
-#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */
-#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */
-#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */
-#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */
-
-#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */
-#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */
-#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */
-#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */
-#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */
-#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */
-#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
-#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
-
-#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
-#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
-
-#define QH_STATUS_FLOW_CTRL (1 << 15)
-#define QH_STATUS_ICUR(i) ((i) << 5)
-#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
-#define QH_STATUS_SEQ_MASK 0x1f
-
-/**
- * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
- *
- * Returns the QH type field for a USB core pipe type.
- */
-static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
-{
- static const unsigned type[] = {
- [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
- [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT,
- [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL,
- [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK,
- };
- return type[usb_pipetype(pipe)];
-}
-
-/**
- * Maxiumum number of TDs in a qset.
- */
-#define WHCI_QSET_TD_MAX 8
-
-/**
- * struct whc_qset - WUSB data transfers to a specific endpoint
- * @qh: the QHead of this qset
- * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
- * transfers)
- * @itd: up to 8 iTDs (for qsets for isochronous transfers)
- * @qset_dma: DMA address for this qset
- * @whc: WHCI HC this qset is for
- * @ep: endpoint
- * @stds: list of sTDs queued to this qset
- * @ntds: number of qTDs queued (not necessarily the same as nTDs
- * field in the QH)
- * @td_start: index of the first qTD in the list
- * @td_end: index of next free qTD in the list (provided
- * ntds < WHCI_QSET_TD_MAX)
- *
- * Queue Sets (qsets) are added to the asynchronous schedule list
- * (ASL) or the periodic zone list (PZL).
- *
- * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
- * Each TD may refer to at most 1 MiB of data. If a single transfer
- * has > 8MiB of data, TDs can be reused as they are completed since
- * the TD list is used as a circular buffer. Similarly, several
- * (smaller) transfers may be queued in a qset.
- *
- * WHCI controllers may cache portions of the qsets in the ASL and
- * PZL, requiring the WHCD to inform the WHC that the lists have been
- * updated (fields changed or qsets inserted or removed). For safe
- * insertion and removal of qsets from the lists the schedule must be
- * stopped to avoid races in updating the QH link pointers.
- *
- * Since the HC is free to execute qsets in any order, all transfers
- * to an endpoint should use the same qset to ensure transfers are
- * executed in the order they're submitted.
- *
- * [WHCI] section 3.2.3
- */
-struct whc_qset {
- struct whc_qhead qh;
- union {
- struct whc_qtd qtd[WHCI_QSET_TD_MAX];
- struct whc_itd itd[WHCI_QSET_TD_MAX];
- };
-
- /* private data for WHCD */
- dma_addr_t qset_dma;
- struct whc *whc;
- struct usb_host_endpoint *ep;
- struct list_head stds;
- int ntds;
- int td_start;
- int td_end;
- struct list_head list_node;
- unsigned in_sw_list:1;
- unsigned in_hw_list:1;
- unsigned remove:1;
- unsigned reset:1;
- struct urb *pause_after_urb;
- struct completion remove_complete;
- uint16_t max_packet;
- uint8_t max_burst;
- uint8_t max_seq;
-};
-
-static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
-{
- if (target)
- *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
- else
- *ptr = QH_LINK_T;
-}
-
-/**
- * struct di_buf_entry - Device Information (DI) buffer entry.
- *
- * There's one of these per connected device.
- */
-struct di_buf_entry {
- __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
- __le32 addr_sec_info; /*< addressing and security info */
- __le32 reserved[7];
-} __attribute__((packed));
-
-#define WHC_DI_SECURE (1 << 31)
-#define WHC_DI_DISABLE (1 << 30)
-#define WHC_DI_KEY_IDX(k) ((k) << 8)
-#define WHC_DI_KEY_IDX_MASK 0x0000ff00
-#define WHC_DI_DEV_ADDR(a) ((a) << 0)
-#define WHC_DI_DEV_ADDR_MASK 0x000000ff
-
-/**
- * struct dn_buf_entry - Device Notification (DN) buffer entry.
- *
- * [WHCI] section 3.2.8
- */
-struct dn_buf_entry {
- __u8 msg_size; /*< number of octets of valid DN data */
- __u8 reserved1;
- __u8 src_addr; /*< source address */
- __u8 status; /*< buffer entry status */
- __le32 tkid; /*< TKID for source device, valid if secure bit is set */
- __u8 dn_data[56]; /*< up to 56 octets of DN data */
-} __attribute__((packed));
-
-#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */
-#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
-
-#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
-
-/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
- data. [WHCI] section 2.4.7. */
-#define WHC_GEN_CMD_DATA_LEN 256
-
-/*
- * HC registers.
- *
- * [WHCI] section 2.4
- */
-
-#define WHCIVERSION 0x00
-
-#define WHCSPARAMS 0x04
-# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
-# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff)
-# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
-
-#define WUSBCMD 0x08
-# define WUSBCMD_BCID(b) ((b) << 16)
-# define WUSBCMD_BCID_MASK (0xff << 16)
-# define WUSBCMD_ASYNC_QSET_RM (1 << 12)
-# define WUSBCMD_PERIODIC_QSET_RM (1 << 11)
-# define WUSBCMD_WUSBSI(s) ((s) << 8)
-# define WUSBCMD_WUSBSI_MASK (0x7 << 8)
-# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7)
-# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
-# define WUSBCMD_ASYNC_UPDATED (1 << 5)
-# define WUSBCMD_PERIODIC_UPDATED (1 << 4)
-# define WUSBCMD_ASYNC_EN (1 << 3)
-# define WUSBCMD_PERIODIC_EN (1 << 2)
-# define WUSBCMD_WHCRESET (1 << 1)
-# define WUSBCMD_RUN (1 << 0)
-
-#define WUSBSTS 0x0c
-# define WUSBSTS_ASYNC_SCHED (1 << 15)
-# define WUSBSTS_PERIODIC_SCHED (1 << 14)
-# define WUSBSTS_DNTS_SCHED (1 << 13)
-# define WUSBSTS_HCHALTED (1 << 12)
-# define WUSBSTS_GEN_CMD_DONE (1 << 9)
-# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8)
-# define WUSBSTS_DNTS_OVERFLOW (1 << 7)
-# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
-# define WUSBSTS_HOST_ERR (1 << 5)
-# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4)
-# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3)
-# define WUSBSTS_DNTS_INT (1 << 2)
-# define WUSBSTS_ERR_INT (1 << 1)
-# define WUSBSTS_INT (1 << 0)
-# define WUSBSTS_INT_MASK 0x3ff
-
-#define WUSBINTR 0x10
-# define WUSBINTR_GEN_CMD_DONE (1 << 9)
-# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8)
-# define WUSBINTR_DNTS_OVERFLOW (1 << 7)
-# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6)
-# define WUSBINTR_HOST_ERR (1 << 5)
-# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4)
-# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3)
-# define WUSBINTR_DNTS_INT (1 << 2)
-# define WUSBINTR_ERR_INT (1 << 1)
-# define WUSBINTR_INT (1 << 0)
-# define WUSBINTR_ALL 0x3ff
-
-#define WUSBGENCMDSTS 0x14
-# define WUSBGENCMDSTS_ACTIVE (1 << 31)
-# define WUSBGENCMDSTS_ERROR (1 << 24)
-# define WUSBGENCMDSTS_IOC (1 << 23)
-# define WUSBGENCMDSTS_MMCIE_ADD 0x01
-# define WUSBGENCMDSTS_MMCIE_RM 0x02
-# define WUSBGENCMDSTS_SET_MAS 0x03
-# define WUSBGENCMDSTS_CHAN_STOP 0x04
-# define WUSBGENCMDSTS_RWP_EN 0x05
-
-#define WUSBGENCMDPARAMS 0x18
-#define WUSBGENADDR 0x20
-#define WUSBASYNCLISTADDR 0x28
-#define WUSBDNTSBUFADDR 0x30
-#define WUSBDEVICEINFOADDR 0x38
-
-#define WUSBSETSECKEYCMD 0x40
-# define WUSBSETSECKEYCMD_SET (1 << 31)
-# define WUSBSETSECKEYCMD_ERASE (1 << 30)
-# define WUSBSETSECKEYCMD_GTK (1 << 8)
-# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
-
-#define WUSBTKID 0x44
-#define WUSBSECKEY 0x48
-#define WUSBPERIODICLISTBASE 0x58
-#define WUSBMASINDEX 0x60
-
-#define WUSBDNTSCTRL 0x64
-# define WUSBDNTSCTRL_ACTIVE (1 << 31)
-# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
-# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0)
-
-#define WUSBTIME 0x68
-# define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff
-
-#define WUSBBPST 0x6c
-#define WUSBDIBUPDATED 0x70
-
-#endif /* #ifndef _WHCI_WHCI_HC_H */
diff --git a/drivers/staging/wusbcore/host/whci/wusb.c b/drivers/staging/wusbcore/host/whci/wusb.c
deleted file mode 100644
index 6d0068ab35e4..000000000000
--- a/drivers/staging/wusbcore/host/whci/wusb.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless Host Controller (WHC) WUSB operations.
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-
-#include "../../../uwb/include/umc.h"
-#include "../../wusbhc.h"
-
-#include "whcd.h"
-
-static int whc_update_di(struct whc *whc, int idx)
-{
- int offset = idx / 32;
- u32 bit = 1 << (idx % 32);
-
- le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
-
- return whci_wait_for(&whc->umc->dev,
- whc->base + WUSBDIBUPDATED + offset, bit, 0,
- 100, "DI update");
-}
-
-/*
- * WHCI starts MMCs based on there being a valid GTK so these need
- * only start/stop the asynchronous and periodic schedules and send a
- * channel stop command.
- */
-
-int whc_wusbhc_start(struct wusbhc *wusbhc)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
-
- asl_start(whc);
- pzl_start(whc);
-
- return 0;
-}
-
-void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
- u32 stop_time, now_time;
- int ret;
-
- pzl_stop(whc);
- asl_stop(whc);
-
- now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK;
- stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff;
- ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0);
- if (ret == 0)
- msleep(delay);
-}
-
-int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
- u8 handle, struct wuie_hdr *wuie)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
- u32 params;
-
- params = (interval << 24)
- | (repeat_cnt << 16)
- | (wuie->bLength << 8)
- | handle;
-
- return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
-}
-
-int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
- u32 params;
-
- params = handle;
-
- return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
-}
-
-int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
-
- if (stream_index >= 0)
- whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
-
- return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
-}
-
-int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
- int idx = wusb_dev->port_idx;
- struct di_buf_entry *di = &whc->di_buf[idx];
- int ret;
-
- mutex_lock(&whc->mutex);
-
- uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
- di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
- di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
-
- ret = whc_update_di(whc, idx);
-
- mutex_unlock(&whc->mutex);
-
- return ret;
-}
-
-/*
- * Set the number of Device Notification Time Slots (DNTS) and enable
- * device notifications.
- */
-int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
- u32 dntsctrl;
-
- dntsctrl = WUSBDNTSCTRL_ACTIVE
- | WUSBDNTSCTRL_INTERVAL(interval)
- | WUSBDNTSCTRL_SLOTS(slots);
-
- le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
-
- return 0;
-}
-
-static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
- const void *key, size_t key_size, bool is_gtk)
-{
- uint32_t setkeycmd;
- uint32_t seckey[4];
- int i;
- int ret;
-
- memcpy(seckey, key, key_size);
- setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
- if (is_gtk)
- setkeycmd |= WUSBSETSECKEYCMD_GTK;
-
- le_writel(tkid, whc->base + WUSBTKID);
- for (i = 0; i < 4; i++)
- le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
- le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
-
- ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
- WUSBSETSECKEYCMD_SET, 0, 100, "set key");
-
- return ret;
-}
-
-/**
- * whc_set_ptk - set the PTK to use for a device.
- *
- * The index into the key table for this PTK is the same as the
- * device's port index.
- */
-int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
- const void *ptk, size_t key_size)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
- struct di_buf_entry *di = &whc->di_buf[port_idx];
- int ret;
-
- mutex_lock(&whc->mutex);
-
- if (ptk) {
- ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
- if (ret)
- goto out;
-
- di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
- di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
- } else
- di->addr_sec_info &= ~WHC_DI_SECURE;
-
- ret = whc_update_di(whc, port_idx);
-out:
- mutex_unlock(&whc->mutex);
- return ret;
-}
-
-/**
- * whc_set_gtk - set the GTK for subsequent broadcast packets
- *
- * The GTK is stored in the last entry in the key table (the previous
- * N_DEVICES entries are for the per-device PTKs).
- */
-int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
- const void *gtk, size_t key_size)
-{
- struct whc *whc = wusbhc_to_whc(wusbhc);
- int ret;
-
- mutex_lock(&whc->mutex);
-
- ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
-
- mutex_unlock(&whc->mutex);
-
- return ret;
-}
-
-int whc_set_cluster_id(struct whc *whc, u8 bcid)
-{
- whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
- return 0;
-}
diff --git a/drivers/staging/wusbcore/include/association.h b/drivers/staging/wusbcore/include/association.h
deleted file mode 100644
index d7f3cb9b9db5..000000000000
--- a/drivers/staging/wusbcore/include/association.h
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB - Cable Based Association
- *
- * Copyright (C) 2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- */
-#ifndef __LINUX_USB_ASSOCIATION_H
-#define __LINUX_USB_ASSOCIATION_H
-
-
-/*
- * Association attributes
- *
- * Association Models Supplement to WUSB 1.0 T[3-1]
- *
- * Each field in the structures has it's ID, it's length and then the
- * value. This is the actual definition of the field's ID and its
- * length.
- */
-struct wusb_am_attr {
- __u8 id;
- __u8 len;
-};
-
-/* Different fields defined by the spec */
-#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) }
-#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) }
-#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) }
-#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) }
-#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) }
-#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */
-#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */
-#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) }
-#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) }
-#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) }
-#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) }
-
-/* CBAF Control Requests (AMS1.0[T4-1] */
-enum {
- CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01,
- CBAF_REQ_GET_ASSOCIATION_REQUEST,
- CBAF_REQ_SET_ASSOCIATION_RESPONSE
-};
-
-/*
- * CBAF USB-interface defitions
- *
- * No altsettings, one optional interrupt endpoint.
- */
-enum {
- CBAF_IFACECLASS = 0xef,
- CBAF_IFACESUBCLASS = 0x03,
- CBAF_IFACEPROTOCOL = 0x01,
-};
-
-/* Association Information (AMS1.0[T4-3]) */
-struct wusb_cbaf_assoc_info {
- __le16 Length;
- __u8 NumAssociationRequests;
- __le16 Flags;
- __u8 AssociationRequestsArray[];
-} __attribute__((packed));
-
-/* Association Request (AMS1.0[T4-4]) */
-struct wusb_cbaf_assoc_request {
- __u8 AssociationDataIndex;
- __u8 Reserved;
- __le16 AssociationTypeId;
- __le16 AssociationSubTypeId;
- __le32 AssociationTypeInfoSize;
-} __attribute__((packed));
-
-enum {
- AR_TYPE_WUSB = 0x0001,
- AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000,
- AR_TYPE_WUSB_ASSOCIATE = 0x0001,
-};
-
-/* Association Attribute header (AMS1.0[3.8]) */
-struct wusb_cbaf_attr_hdr {
- __le16 id;
- __le16 len;
-} __attribute__((packed));
-
-/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */
-struct wusb_cbaf_host_info {
- struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
- __le16 AssociationTypeId;
- struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
- __le16 AssociationSubTypeId;
- struct wusb_cbaf_attr_hdr CHID_hdr;
- struct wusb_ckhdid CHID;
- struct wusb_cbaf_attr_hdr LangID_hdr;
- __le16 LangID;
- struct wusb_cbaf_attr_hdr HostFriendlyName_hdr;
- __u8 HostFriendlyName[];
-} __attribute__((packed));
-
-/* Device Info (AMS1.0[T4-8])
- *
- * I still don't get this tag'n'header stuff for each goddamn
- * field...
- */
-struct wusb_cbaf_device_info {
- struct wusb_cbaf_attr_hdr Length_hdr;
- __le32 Length;
- struct wusb_cbaf_attr_hdr CDID_hdr;
- struct wusb_ckhdid CDID;
- struct wusb_cbaf_attr_hdr BandGroups_hdr;
- __le16 BandGroups;
- struct wusb_cbaf_attr_hdr LangID_hdr;
- __le16 LangID;
- struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr;
- __u8 DeviceFriendlyName[];
-} __attribute__((packed));
-
-/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */
-struct wusb_cbaf_cc_data {
- struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
- __le16 AssociationTypeId;
- struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
- __le16 AssociationSubTypeId;
- struct wusb_cbaf_attr_hdr Length_hdr;
- __le32 Length;
- struct wusb_cbaf_attr_hdr ConnectionContext_hdr;
- struct wusb_ckhdid CHID;
- struct wusb_ckhdid CDID;
- struct wusb_ckhdid CK;
- struct wusb_cbaf_attr_hdr BandGroups_hdr;
- __le16 BandGroups;
-} __attribute__((packed));
-
-/* CC_DATA - Failure case (AMS1.0[T4-10]) */
-struct wusb_cbaf_cc_data_fail {
- struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
- __le16 AssociationTypeId;
- struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
- __le16 AssociationSubTypeId;
- struct wusb_cbaf_attr_hdr Length_hdr;
- __le16 Length;
- struct wusb_cbaf_attr_hdr AssociationStatus_hdr;
- __u32 AssociationStatus;
-} __attribute__((packed));
-
-#endif /* __LINUX_USB_ASSOCIATION_H */
diff --git a/drivers/staging/wusbcore/include/wusb-wa.h b/drivers/staging/wusbcore/include/wusb-wa.h
deleted file mode 100644
index 64a840b5106e..000000000000
--- a/drivers/staging/wusbcore/include/wusb-wa.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB Wire Adapter constants and structures.
- *
- * Copyright (C) 2005-2006 Intel Corporation.
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * FIXME: docs
- * FIXME: organize properly, group logically
- *
- * All the event structures are defined in uwb/spec.h, as they are
- * common to the WHCI and WUSB radio control interfaces.
- *
- * References:
- * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
- */
-#ifndef __LINUX_USB_WUSB_WA_H
-#define __LINUX_USB_WUSB_WA_H
-
-/**
- * Radio Command Request for the Radio Control Interface
- *
- * Radio Control Interface command and event codes are the same as
- * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
- */
-enum {
- WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
-};
-
-/* Wireless Adapter Requests ([WUSB] table 8-51) */
-enum {
- WUSB_REQ_ADD_MMC_IE = 20,
- WUSB_REQ_REMOVE_MMC_IE = 21,
- WUSB_REQ_SET_NUM_DNTS = 22,
- WUSB_REQ_SET_CLUSTER_ID = 23,
- WUSB_REQ_SET_DEV_INFO = 24,
- WUSB_REQ_GET_TIME = 25,
- WUSB_REQ_SET_STREAM_IDX = 26,
- WUSB_REQ_SET_WUSB_MAS = 27,
- WUSB_REQ_CHAN_STOP = 28,
-};
-
-
-/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
-enum {
- WUSB_TIME_ADJ = 0,
- WUSB_TIME_BPST = 1,
- WUSB_TIME_WUSB = 2,
-};
-
-enum {
- WA_ENABLE = 0x01,
- WA_RESET = 0x02,
- RPIPE_PAUSE = 0x1,
- RPIPE_STALL = 0x2,
-};
-
-/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
-enum {
- WA_STATUS_ENABLED = 0x01,
- WA_STATUS_RESETTING = 0x02
-};
-
-enum rpipe_crs {
- RPIPE_CRS_CTL = 0x01,
- RPIPE_CRS_ISO = 0x02,
- RPIPE_CRS_BULK = 0x04,
- RPIPE_CRS_INTR = 0x08
-};
-
-/**
- * RPipe descriptor ([WUSB] section 8.5.2.11)
- *
- * FIXME: explain rpipes
- */
-struct usb_rpipe_descriptor {
- u8 bLength;
- u8 bDescriptorType;
- __le16 wRPipeIndex;
- __le16 wRequests;
- __le16 wBlocks; /* rw if 0 */
- __le16 wMaxPacketSize; /* rw */
- union {
- u8 dwa_bHSHubAddress; /* rw: DWA. */
- u8 hwa_bMaxBurst; /* rw: HWA. */
- };
- union {
- u8 dwa_bHSHubPort; /* rw: DWA. */
- u8 hwa_bDeviceInfoIndex; /* rw: HWA. */
- };
- u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
- union {
- u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */
- u8 hwa_reserved; /* rw: HWA. */
- };
- u8 bEndpointAddress; /* rw: Target EP address */
- u8 bDataSequence; /* ro: Current Data sequence */
- __le32 dwCurrentWindow; /* ro */
- u8 bMaxDataSequence; /* ro?: max supported seq */
- u8 bInterval; /* rw: */
- u8 bOverTheAirInterval; /* rw: */
- u8 bmAttribute; /* ro? */
- u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
- u8 bmRetryOptions; /* rw? */
- __le16 wNumTransactionErrors; /* rw */
-} __attribute__ ((packed));
-
-/**
- * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
- *
- * These are the notifications coming on the notification endpoint of
- * an HWA and a DWA.
- */
-enum wa_notif_type {
- DWA_NOTIF_RWAKE = 0x91,
- DWA_NOTIF_PORTSTATUS = 0x92,
- WA_NOTIF_TRANSFER = 0x93,
- HWA_NOTIF_BPST_ADJ = 0x94,
- HWA_NOTIF_DN = 0x95,
-};
-
-/**
- * Wire Adapter notification header
- *
- * Notifications coming from a wire adapter use a common header
- * defined in [WUSB] sections 8.4.5 & 8.5.4.
- */
-struct wa_notif_hdr {
- u8 bLength;
- u8 bNotifyType; /* enum wa_notif_type */
-} __packed;
-
-/**
- * HWA DN Received notification [(WUSB] section 8.5.4.2)
- *
- * The DNData is specified in WUSB1.0[7.6]. For each device
- * notification we received, we just need to dispatch it.
- *
- * @dndata: this is really an array of notifications, but all start
- * with the same header.
- */
-struct hwa_notif_dn {
- struct wa_notif_hdr hdr;
- u8 bSourceDeviceAddr; /* from errata 2005/07 */
- u8 bmAttributes;
- struct wusb_dn_hdr dndata[];
-} __packed;
-
-/* [WUSB] section 8.3.3 */
-enum wa_xfer_type {
- WA_XFER_TYPE_CTL = 0x80,
- WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
- WA_XFER_TYPE_ISO = 0x82,
- WA_XFER_RESULT = 0x83,
- WA_XFER_ABORT = 0x84,
- WA_XFER_ISO_PACKET_INFO = 0xA0,
- WA_XFER_ISO_PACKET_STATUS = 0xA1,
-};
-
-/* [WUSB] section 8.3.3 */
-struct wa_xfer_hdr {
- u8 bLength; /* 0x18 */
- u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
- __le16 wRPipe; /* RPipe index */
- __le32 dwTransferID; /* Host-assigned ID */
- __le32 dwTransferLength; /* Length of data to xfer */
- u8 bTransferSegment;
-} __packed;
-
-struct wa_xfer_ctl {
- struct wa_xfer_hdr hdr;
- u8 bmAttribute;
- __le16 wReserved;
- struct usb_ctrlrequest baSetupData;
-} __packed;
-
-struct wa_xfer_bi {
- struct wa_xfer_hdr hdr;
- u8 bReserved;
- __le16 wReserved;
-} __packed;
-
-/* [WUSB] section 8.5.5 */
-struct wa_xfer_hwaiso {
- struct wa_xfer_hdr hdr;
- u8 bReserved;
- __le16 wPresentationTime;
- __le32 dwNumOfPackets;
-} __packed;
-
-struct wa_xfer_packet_info_hwaiso {
- __le16 wLength;
- u8 bPacketType;
- u8 bReserved;
- __le16 PacketLength[0];
-} __packed;
-
-struct wa_xfer_packet_status_len_hwaiso {
- __le16 PacketLength;
- __le16 PacketStatus;
-} __packed;
-
-struct wa_xfer_packet_status_hwaiso {
- __le16 wLength;
- u8 bPacketType;
- u8 bReserved;
- struct wa_xfer_packet_status_len_hwaiso PacketStatus[0];
-} __packed;
-
-/* [WUSB] section 8.3.3.5 */
-struct wa_xfer_abort {
- u8 bLength;
- u8 bRequestType;
- __le16 wRPipe; /* RPipe index */
- __le32 dwTransferID; /* Host-assigned ID */
-} __packed;
-
-/**
- * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
- *
- */
-struct wa_notif_xfer {
- struct wa_notif_hdr hdr;
- u8 bEndpoint;
- u8 Reserved;
-} __packed;
-
-/** Transfer result basic codes [WUSB] table 8-15 */
-enum {
- WA_XFER_STATUS_SUCCESS,
- WA_XFER_STATUS_HALTED,
- WA_XFER_STATUS_DATA_BUFFER_ERROR,
- WA_XFER_STATUS_BABBLE,
- WA_XFER_RESERVED,
- WA_XFER_STATUS_NOT_FOUND,
- WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
- WA_XFER_STATUS_TRANSACTION_ERROR,
- WA_XFER_STATUS_ABORTED,
- WA_XFER_STATUS_RPIPE_NOT_READY,
- WA_XFER_INVALID_FORMAT,
- WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
- WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
-};
-
-/** [WUSB] section 8.3.3.4 */
-struct wa_xfer_result {
- struct wa_notif_hdr hdr;
- __le32 dwTransferID;
- __le32 dwTransferLength;
- u8 bTransferSegment;
- u8 bTransferStatus;
- __le32 dwNumOfPackets;
-} __packed;
-
-/**
- * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
- *
- * NOTE: u16 fields are read Little Endian from the hardware.
- *
- * @bNumPorts is the original max number of devices that the host can
- * connect; we might chop this so the stack can handle
- * it. In case you need to access it, use wusbhc->ports_max
- * if it is a Wireless USB WA.
- */
-struct usb_wa_descriptor {
- u8 bLength;
- u8 bDescriptorType;
- __le16 bcdWAVersion;
- u8 bNumPorts; /* don't use!! */
- u8 bmAttributes; /* Reserved == 0 */
- __le16 wNumRPipes;
- __le16 wRPipeMaxBlock;
- u8 bRPipeBlockSize;
- u8 bPwrOn2PwrGood;
- u8 bNumMMCIEs;
- u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
-} __packed;
-
-/**
- * HWA Device Information Buffer (WUSB1.0[T8.54])
- */
-struct hwa_dev_info {
- u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
- u8 bDeviceAddress;
- __le16 wPHYRates;
- u8 bmDeviceAttribute;
-} __packed;
-
-#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/drivers/staging/wusbcore/include/wusb.h b/drivers/staging/wusbcore/include/wusb.h
deleted file mode 100644
index 09771d1da7bc..000000000000
--- a/drivers/staging/wusbcore/include/wusb.h
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB Standard Definitions
- * Event Size Tables
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * FIXME: docs
- * FIXME: organize properly, group logically
- *
- * All the event structures are defined in uwb/spec.h, as they are
- * common to the WHCI and WUSB radio control interfaces.
- */
-
-#ifndef __WUSB_H__
-#define __WUSB_H__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/usb/ch9.h>
-#include <linux/param.h>
-#include "../../uwb/include/spec.h"
-
-/**
- * WUSB Information Element header
- *
- * I don't know why, they decided to make it different to the MBOA MAC
- * IE Header; beats me.
- */
-struct wuie_hdr {
- u8 bLength;
- u8 bIEIdentifier;
-} __attribute__((packed));
-
-enum {
- WUIE_ID_WCTA = 0x80,
- WUIE_ID_CONNECTACK,
- WUIE_ID_HOST_INFO,
- WUIE_ID_CHANGE_ANNOUNCE,
- WUIE_ID_DEVICE_DISCONNECT,
- WUIE_ID_HOST_DISCONNECT,
- WUIE_ID_KEEP_ALIVE = 0x89,
- WUIE_ID_ISOCH_DISCARD,
- WUIE_ID_RESET_DEVICE,
-};
-
-/**
- * Maximum number of array elements in a WUSB IE.
- *
- * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
- * are "arrays" have to limited to 4 elements. So we define it
- * like that to ease up and submit only the neeed size.
- */
-#define WUIE_ELT_MAX 4
-
-/**
- * Wrapper for the data that defines a CHID, a CDID or a CK
- *
- * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
- * data. In order to avoid confusion and enforce types, we wrap it.
- *
- * Make it packed, as we use it in some hw definitions.
- */
-struct wusb_ckhdid {
- u8 data[16];
-} __attribute__((packed));
-
-static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
-
-#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
-
-/**
- * WUSB IE: Host Information (WUSB1.0[7.5.2])
- *
- * Used to provide information about the host to the Wireless USB
- * devices in range (CHID can be used as an ASCII string).
- */
-struct wuie_host_info {
- struct wuie_hdr hdr;
- __le16 attributes;
- struct wusb_ckhdid CHID;
-} __attribute__((packed));
-
-/**
- * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
- *
- * Used to acknowledge device connect requests. See note for
- * WUIE_ELT_MAX.
- */
-struct wuie_connect_ack {
- struct wuie_hdr hdr;
- struct {
- struct wusb_ckhdid CDID;
- u8 bDeviceAddress; /* 0 means unused */
- u8 bReserved;
- } blk[WUIE_ELT_MAX];
-} __attribute__((packed));
-
-/**
- * WUSB IE Host Information Element, Connect Availability
- *
- * WUSB1.0[7.5.2], bmAttributes description
- */
-enum {
- WUIE_HI_CAP_RECONNECT = 0,
- WUIE_HI_CAP_LIMITED,
- WUIE_HI_CAP_RESERVED,
- WUIE_HI_CAP_ALL,
-};
-
-/**
- * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
- *
- * Tells devices the host is going to stop sending MMCs and will disappear.
- */
-struct wuie_channel_stop {
- struct wuie_hdr hdr;
- u8 attributes;
- u8 timestamp[3];
-} __attribute__((packed));
-
-/**
- * WUSB IE: Keepalive (WUSB1.0[7.5.9])
- *
- * Ask device(s) to send keepalives.
- */
-struct wuie_keep_alive {
- struct wuie_hdr hdr;
- u8 bDeviceAddress[WUIE_ELT_MAX];
-} __attribute__((packed));
-
-/**
- * WUSB IE: Reset device (WUSB1.0[7.5.11])
- *
- * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
- * use it for one at the time...
- *
- * In any case, this request is a wee bit silly: why don't they target
- * by address??
- */
-struct wuie_reset {
- struct wuie_hdr hdr;
- struct wusb_ckhdid CDID;
-} __attribute__((packed));
-
-/**
- * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
- *
- * Tell device to disconnect; we can fit 4 addresses, but we only use
- * it for one at the time...
- */
-struct wuie_disconnect {
- struct wuie_hdr hdr;
- u8 bDeviceAddress;
- u8 padding;
-} __attribute__((packed));
-
-/**
- * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
- *
- * Tells all connected devices to disconnect.
- */
-struct wuie_host_disconnect {
- struct wuie_hdr hdr;
-} __attribute__((packed));
-
-/**
- * WUSB Device Notification header (WUSB1.0[7.6])
- */
-struct wusb_dn_hdr {
- u8 bType;
- u8 notifdata[];
-} __attribute__((packed));
-
-/** Device Notification codes (WUSB1.0[Table 7-54]) */
-enum WUSB_DN {
- WUSB_DN_CONNECT = 0x01,
- WUSB_DN_DISCONNECT = 0x02,
- WUSB_DN_EPRDY = 0x03,
- WUSB_DN_MASAVAILCHANGED = 0x04,
- WUSB_DN_RWAKE = 0x05,
- WUSB_DN_SLEEP = 0x06,
- WUSB_DN_ALIVE = 0x07,
-};
-
-/** WUSB Device Notification Connect */
-struct wusb_dn_connect {
- struct wusb_dn_hdr hdr;
- __le16 attributes;
- struct wusb_ckhdid CDID;
-} __attribute__((packed));
-
-static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
-{
- return le16_to_cpu(dn->attributes) & 0xff;
-}
-
-static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
-{
- return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
-}
-
-static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
-{
- return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
-}
-
-/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
-struct wusb_dn_alive {
- struct wusb_dn_hdr hdr;
-} __attribute__((packed));
-
-/** Device is disconnecting (WUSB1.0[7.6.2]) */
-struct wusb_dn_disconnect {
- struct wusb_dn_hdr hdr;
-} __attribute__((packed));
-
-/* General constants */
-enum {
- WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
-};
-
-/*
- * WUSB Crypto stuff (WUSB1.0[6])
- */
-
-extern const char *wusb_et_name(u8);
-
-/**
- * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
- * the host or the device.
- */
-static inline u8 wusb_key_index(int index, int type, int originator)
-{
- return (originator << 6) | (type << 4) | index;
-}
-
-#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
-#define WUSB_KEY_INDEX_TYPE_ASSOC 1
-#define WUSB_KEY_INDEX_TYPE_GTK 2
-#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
-#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
-/* bits 0-3 used for the key index. */
-#define WUSB_KEY_INDEX_MAX 15
-
-/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
-struct aes_ccm_nonce {
- u8 sfn[6]; /* Little Endian */
- u8 tkid[3]; /* LE */
- struct uwb_dev_addr dest_addr;
- struct uwb_dev_addr src_addr;
-} __attribute__((packed));
-
-/* A CCM operation label, defined on WUSB1.0[6.5.x] */
-struct aes_ccm_label {
- u8 data[14];
-} __attribute__((packed));
-
-/*
- * Input to the key derivation sequence defined in
- * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
- * PRF function.
- */
-struct wusb_keydvt_in {
- u8 hnonce[16];
- u8 dnonce[16];
-} __attribute__((packed));
-
-/*
- * Output from the key derivation sequence defined in
- * WUSB1.0[6.5.1].
- */
-struct wusb_keydvt_out {
- u8 kck[16];
- u8 ptk[16];
-} __attribute__((packed));
-
-/* Pseudo Random Function WUSB1.0[6.5] */
-extern int wusb_crypto_init(void);
-extern void wusb_crypto_exit(void);
-extern ssize_t wusb_prf(void *out, size_t out_size,
- const u8 key[16], const struct aes_ccm_nonce *_n,
- const struct aes_ccm_label *a,
- const void *b, size_t blen, size_t len);
-
-static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
- const struct aes_ccm_nonce *n,
- const struct aes_ccm_label *a,
- const void *b, size_t blen)
-{
- return wusb_prf(out, out_size, key, n, a, b, blen, 64);
-}
-
-static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
- const struct aes_ccm_nonce *n,
- const struct aes_ccm_label *a,
- const void *b, size_t blen)
-{
- return wusb_prf(out, out_size, key, n, a, b, blen, 128);
-}
-
-static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
- const struct aes_ccm_nonce *n,
- const struct aes_ccm_label *a,
- const void *b, size_t blen)
-{
- return wusb_prf(out, out_size, key, n, a, b, blen, 256);
-}
-
-/* Key derivation WUSB1.0[6.5.1] */
-static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
- const u8 key[16],
- const struct aes_ccm_nonce *n,
- const struct wusb_keydvt_in *keydvt_in)
-{
- const struct aes_ccm_label a = { .data = "Pair-wise keys" };
- return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
- keydvt_in, sizeof(*keydvt_in));
-}
-
-/*
- * Out-of-band MIC Generation WUSB1.0[6.5.2]
- *
- * Compute the MIC over @key, @n and @hs and place it in @mic_out.
- *
- * @mic_out: Where to place the 8 byte MIC tag
- * @key: KCK from the derivation process
- * @n: CCM nonce, n->sfn == 0, TKID as established in the
- * process.
- * @hs: Handshake struct for phase 2 of the 4-way.
- * hs->bStatus and hs->bReserved are zero.
- * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
- * hs->dest_addr is the device's USB address padded with 0
- * hs->src_addr is the hosts's UWB device address
- * hs->mic is ignored (as we compute that value).
- */
-static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
- const struct aes_ccm_nonce *n,
- const struct usb_handshake *hs)
-{
- const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
- return wusb_prf_64(mic_out, 8, key, n, &a,
- hs, sizeof(*hs) - sizeof(hs->MIC));
-}
-
-#endif /* #ifndef __WUSB_H__ */
diff --git a/drivers/staging/wusbcore/mmc.c b/drivers/staging/wusbcore/mmc.c
deleted file mode 100644
index 881e1f20d718..000000000000
--- a/drivers/staging/wusbcore/mmc.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
- * MMC (Microscheduled Management Command) handling
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * WUIEs and MMC IEs...well, they are almost the same at the end. MMC
- * IEs are Wireless USB IEs that go into the MMC period...[what is
- * that? look in Design-overview.txt].
- *
- *
- * This is a simple subsystem to keep track of which IEs are being
- * sent by the host in the MMC period.
- *
- * For each WUIE we ask to send, we keep it in an array, so we can
- * request its removal later, or replace the content. They are tracked
- * by pointer, so be sure to use the same pointer if you want to
- * remove it or update the contents.
- *
- * FIXME:
- * - add timers that autoremove intervalled IEs?
- */
-#include <linux/slab.h>
-#include <linux/export.h>
-#include "include/wusb.h"
-#include "wusbhc.h"
-
-/* Initialize the MMCIEs handling mechanism */
-int wusbhc_mmcie_create(struct wusbhc *wusbhc)
-{
- u8 mmcies = wusbhc->mmcies_max;
- wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL);
- if (wusbhc->mmcie == NULL)
- return -ENOMEM;
- mutex_init(&wusbhc->mmcie_mutex);
- return 0;
-}
-
-/* Release resources used by the MMCIEs handling mechanism */
-void wusbhc_mmcie_destroy(struct wusbhc *wusbhc)
-{
- kfree(wusbhc->mmcie);
-}
-
-/*
- * Add or replace an MMC Wireless USB IE.
- *
- * @interval: See WUSB1.0[8.5.3.1]
- * @repeat_cnt: See WUSB1.0[8.5.3.1]
- * @handle: See WUSB1.0[8.5.3.1]
- * @wuie: Pointer to the header of the WUSB IE data to add.
- * MUST BE allocated in a kmalloc buffer (no stack or
- * vmalloc).
- * THE CALLER ALWAYS OWNS THE POINTER (we don't free it
- * on remove, we just forget about it).
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * Goes over the *whole* @wusbhc->mmcie array looking for (a) the
- * first free spot and (b) if @wuie is already in the array (aka:
- * transmitted in the MMCs) the spot were it is.
- *
- * If present, we "overwrite it" (update).
- *
- *
- * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38.
- * The host uses the handle as the 'sort' index. We
- * allocate the last one always for the WUIE_ID_HOST_INFO, and
- * the rest, first come first serve in inverse order.
- *
- * Host software must make sure that it adds the other IEs in
- * the right order... the host hardware is responsible for
- * placing the WCTA IEs in the right place with the other IEs
- * set by host software.
- *
- * NOTE: we can access wusbhc->wa_descr without locking because it is
- * read only.
- */
-int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
- struct wuie_hdr *wuie)
-{
- int result = -ENOBUFS;
- unsigned handle, itr;
-
- /* Search a handle, taking into account the ordering */
- mutex_lock(&wusbhc->mmcie_mutex);
- switch (wuie->bIEIdentifier) {
- case WUIE_ID_HOST_INFO:
- /* Always last */
- handle = wusbhc->mmcies_max - 1;
- break;
- case WUIE_ID_ISOCH_DISCARD:
- dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x "
- "unimplemented\n", wuie->bIEIdentifier);
- result = -ENOSYS;
- goto error_unlock;
- default:
- /* search for it or find the last empty slot */
- handle = ~0;
- for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) {
- if (wusbhc->mmcie[itr] == wuie) {
- handle = itr;
- break;
- }
- if (wusbhc->mmcie[itr] == NULL)
- handle = itr;
- }
- if (handle == ~0)
- goto error_unlock;
- }
- result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle,
- wuie);
- if (result >= 0)
- wusbhc->mmcie[handle] = wuie;
-error_unlock:
- mutex_unlock(&wusbhc->mmcie_mutex);
- return result;
-}
-EXPORT_SYMBOL_GPL(wusbhc_mmcie_set);
-
-/*
- * Remove an MMC IE previously added with wusbhc_mmcie_set()
- *
- * @wuie Pointer used to add the WUIE
- */
-void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie)
-{
- int result;
- unsigned handle, itr;
-
- mutex_lock(&wusbhc->mmcie_mutex);
- for (itr = 0; itr < wusbhc->mmcies_max; itr++) {
- if (wusbhc->mmcie[itr] == wuie) {
- handle = itr;
- goto found;
- }
- }
- mutex_unlock(&wusbhc->mmcie_mutex);
- return;
-
-found:
- result = (wusbhc->mmcie_rm)(wusbhc, handle);
- if (result == 0)
- wusbhc->mmcie[itr] = NULL;
- mutex_unlock(&wusbhc->mmcie_mutex);
-}
-EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm);
-
-static int wusbhc_mmc_start(struct wusbhc *wusbhc)
-{
- int ret;
-
- mutex_lock(&wusbhc->mutex);
- ret = wusbhc->start(wusbhc);
- if (ret >= 0)
- wusbhc->active = 1;
- mutex_unlock(&wusbhc->mutex);
-
- return ret;
-}
-
-static void wusbhc_mmc_stop(struct wusbhc *wusbhc)
-{
- mutex_lock(&wusbhc->mutex);
- wusbhc->active = 0;
- wusbhc->stop(wusbhc, WUSB_CHANNEL_STOP_DELAY_MS);
- mutex_unlock(&wusbhc->mutex);
-}
-
-/*
- * wusbhc_start - start transmitting MMCs and accepting connections
- * @wusbhc: the HC to start
- *
- * Establishes a cluster reservation, enables device connections, and
- * starts MMCs with appropriate DNTS parameters.
- */
-int wusbhc_start(struct wusbhc *wusbhc)
-{
- int result;
- struct device *dev = wusbhc->dev;
-
- WARN_ON(wusbhc->wuie_host_info != NULL);
- BUG_ON(wusbhc->uwb_rc == NULL);
-
- result = wusbhc_rsv_establish(wusbhc);
- if (result < 0) {
- dev_err(dev, "cannot establish cluster reservation: %d\n",
- result);
- goto error_rsv_establish;
- }
-
- result = wusbhc_devconnect_start(wusbhc);
- if (result < 0) {
- dev_err(dev, "error enabling device connections: %d\n",
- result);
- goto error_devconnect_start;
- }
-
- result = wusbhc_sec_start(wusbhc);
- if (result < 0) {
- dev_err(dev, "error starting security in the HC: %d\n",
- result);
- goto error_sec_start;
- }
-
- result = wusbhc->set_num_dnts(wusbhc, wusbhc->dnts_interval,
- wusbhc->dnts_num_slots);
- if (result < 0) {
- dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
- goto error_set_num_dnts;
- }
- result = wusbhc_mmc_start(wusbhc);
- if (result < 0) {
- dev_err(dev, "error starting wusbch: %d\n", result);
- goto error_wusbhc_start;
- }
-
- return 0;
-
-error_wusbhc_start:
- wusbhc_sec_stop(wusbhc);
-error_set_num_dnts:
-error_sec_start:
- wusbhc_devconnect_stop(wusbhc);
-error_devconnect_start:
- wusbhc_rsv_terminate(wusbhc);
-error_rsv_establish:
- return result;
-}
-
-/*
- * wusbhc_stop - stop transmitting MMCs
- * @wusbhc: the HC to stop
- *
- * Stops the WUSB channel and removes the cluster reservation.
- */
-void wusbhc_stop(struct wusbhc *wusbhc)
-{
- wusbhc_mmc_stop(wusbhc);
- wusbhc_sec_stop(wusbhc);
- wusbhc_devconnect_stop(wusbhc);
- wusbhc_rsv_terminate(wusbhc);
-}
-
-/*
- * Set/reset/update a new CHID
- *
- * Depending on the previous state of the MMCs, start, stop or change
- * the sent MMC. This effectively switches the host controller on and
- * off (radio wise).
- */
-int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
-{
- int result = 0;
-
- if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
- chid = NULL;
-
- mutex_lock(&wusbhc->mutex);
- if (chid) {
- if (wusbhc->active) {
- mutex_unlock(&wusbhc->mutex);
- return -EBUSY;
- }
- wusbhc->chid = *chid;
- }
-
- /* register with UWB if we haven't already since we are about to start
- the radio. */
- if ((chid) && (wusbhc->uwb_rc == NULL)) {
- wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);
- if (wusbhc->uwb_rc == NULL) {
- result = -ENODEV;
- dev_err(wusbhc->dev,
- "Cannot get associated UWB Host Controller\n");
- goto error_rc_get;
- }
-
- result = wusbhc_pal_register(wusbhc);
- if (result < 0) {
- dev_err(wusbhc->dev, "Cannot register as a UWB PAL\n");
- goto error_pal_register;
- }
- }
- mutex_unlock(&wusbhc->mutex);
-
- if (chid)
- result = uwb_radio_start(&wusbhc->pal);
- else if (wusbhc->uwb_rc)
- uwb_radio_stop(&wusbhc->pal);
-
- return result;
-
-error_pal_register:
- uwb_rc_put(wusbhc->uwb_rc);
- wusbhc->uwb_rc = NULL;
-error_rc_get:
- mutex_unlock(&wusbhc->mutex);
-
- return result;
-}
-EXPORT_SYMBOL_GPL(wusbhc_chid_set);
diff --git a/drivers/staging/wusbcore/pal.c b/drivers/staging/wusbcore/pal.c
deleted file mode 100644
index 30f569131471..000000000000
--- a/drivers/staging/wusbcore/pal.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB Host Controller
- * UWB Protocol Adaptation Layer (PAL) glue.
- *
- * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- */
-#include "wusbhc.h"
-
-static void wusbhc_channel_changed(struct uwb_pal *pal, int channel)
-{
- struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal);
-
- dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel);
- if (channel < 0)
- wusbhc_stop(wusbhc);
- else
- wusbhc_start(wusbhc);
-}
-
-/**
- * wusbhc_pal_register - register the WUSB HC as a UWB PAL
- * @wusbhc: the WUSB HC
- */
-int wusbhc_pal_register(struct wusbhc *wusbhc)
-{
- uwb_pal_init(&wusbhc->pal);
-
- wusbhc->pal.name = "wusbhc";
- wusbhc->pal.device = wusbhc->usb_hcd.self.controller;
- wusbhc->pal.rc = wusbhc->uwb_rc;
- wusbhc->pal.channel_changed = wusbhc_channel_changed;
-
- return uwb_pal_register(&wusbhc->pal);
-}
-
-/**
- * wusbhc_pal_unregister - unregister the WUSB HC as a UWB PAL
- * @wusbhc: the WUSB HC
- */
-void wusbhc_pal_unregister(struct wusbhc *wusbhc)
-{
- if (wusbhc->uwb_rc)
- uwb_pal_unregister(&wusbhc->pal);
-}
diff --git a/drivers/staging/wusbcore/reservation.c b/drivers/staging/wusbcore/reservation.c
deleted file mode 100644
index b921faac698b..000000000000
--- a/drivers/staging/wusbcore/reservation.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * WUSB cluster reservation management
- *
- * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- */
-#include <linux/kernel.h>
-
-#include "../uwb/uwb.h"
-#include "wusbhc.h"
-
-/*
- * WUSB cluster reservations are multicast reservations with the
- * broadcast cluster ID (BCID) as the target DevAddr.
- *
- * FIXME: consider adjusting the reservation depending on what devices
- * are attached.
- */
-
-static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream,
- const struct uwb_mas_bm *mas)
-{
- if (mas == NULL)
- mas = &uwb_mas_bm_zero;
- return wusbhc->bwa_set(wusbhc, stream, mas);
-}
-
-/**
- * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback
- * @rsv: the reservation
- *
- * Either set or clear the HC's view of the reservation.
- *
- * FIXME: when a reservation is denied the HC should be stopped.
- */
-static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
-{
- struct wusbhc *wusbhc = rsv->pal_priv;
- struct device *dev = wusbhc->dev;
- struct uwb_mas_bm mas;
-
- dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);
- switch (rsv->state) {
- case UWB_RSV_STATE_O_ESTABLISHED:
- uwb_rsv_get_usable_mas(rsv, &mas);
- dev_dbg(dev, "established reservation: %*pb\n",
- UWB_NUM_MAS, mas.bm);
- wusbhc_bwa_set(wusbhc, rsv->stream, &mas);
- break;
- case UWB_RSV_STATE_NONE:
- dev_dbg(dev, "removed reservation\n");
- wusbhc_bwa_set(wusbhc, 0, NULL);
- break;
- default:
- dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state);
- break;
- }
-}
-
-
-/**
- * wusbhc_rsv_establish - establish a reservation for the cluster
- * @wusbhc: the WUSB HC requesting a bandwidth reservation
- */
-int wusbhc_rsv_establish(struct wusbhc *wusbhc)
-{
- struct uwb_rc *rc = wusbhc->uwb_rc;
- struct uwb_rsv *rsv;
- struct uwb_dev_addr bcid;
- int ret;
-
- if (rc == NULL)
- return -ENODEV;
-
- rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
- if (rsv == NULL)
- return -ENOMEM;
-
- bcid.data[0] = wusbhc->cluster_id;
- bcid.data[1] = 0;
-
- rsv->target.type = UWB_RSV_TARGET_DEVADDR;
- rsv->target.devaddr = bcid;
- rsv->type = UWB_DRP_TYPE_PRIVATE;
- rsv->max_mas = 256; /* try to get as much as possible */
- rsv->min_mas = 15; /* one MAS per zone */
- rsv->max_interval = 1; /* max latency is one zone */
- rsv->is_multicast = true;
-
- ret = uwb_rsv_establish(rsv);
- if (ret == 0)
- wusbhc->rsv = rsv;
- else
- uwb_rsv_destroy(rsv);
- return ret;
-}
-
-
-/**
- * wusbhc_rsv_terminate - terminate the cluster reservation
- * @wusbhc: the WUSB host whose reservation is to be terminated
- */
-void wusbhc_rsv_terminate(struct wusbhc *wusbhc)
-{
- if (wusbhc->rsv) {
- uwb_rsv_terminate(wusbhc->rsv);
- uwb_rsv_destroy(wusbhc->rsv);
- wusbhc->rsv = NULL;
- }
-}
diff --git a/drivers/staging/wusbcore/rh.c b/drivers/staging/wusbcore/rh.c
deleted file mode 100644
index 20c08cd9dcbf..000000000000
--- a/drivers/staging/wusbcore/rh.c
+++ /dev/null
@@ -1,426 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB Host Controller
- * Root Hub operations
- *
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * We fake a root hub that has fake ports (as many as simultaneous
- * devices the Wireless USB Host Controller can deal with). For each
- * port we keep an state in @wusbhc->port[index] identical to the one
- * specified in the USB2.0[ch11] spec and some extra device
- * information that complements the one in 'struct usb_device' (as
- * this lacs a hcpriv pointer).
- *
- * Note this is common to WHCI and HWA host controllers.
- *
- * Through here we enable most of the state changes that the USB stack
- * will use to connect or disconnect devices. We need to do some
- * forced adaptation of Wireless USB device states vs. wired:
- *
- * USB: WUSB:
- *
- * Port Powered-off port slot n/a
- * Powered-on port slot available
- * Disconnected port slot available
- * Connected port slot assigned device
- * device sent DN_Connect
- * device was authenticated
- * Enabled device is authenticated, transitioned
- * from unauth -> auth -> default address
- * -> enabled
- * Reset disconnect
- * Disable disconnect
- *
- * This maps the standard USB port states with the WUSB device states
- * so we can fake ports without having to modify the USB stack.
- *
- * FIXME: this process will change in the future
- *
- *
- * ENTRY POINTS
- *
- * Our entry points into here are, as in hcd.c, the USB stack root hub
- * ops defined in the usb_hcd struct:
- *
- * wusbhc_rh_status_data() Provide hub and port status data bitmap
- *
- * wusbhc_rh_control() Execution of all the major requests
- * you can do to a hub (Set|Clear
- * features, get descriptors, status, etc).
- *
- * wusbhc_rh_[suspend|resume]() That
- *
- * wusbhc_rh_start_port_reset() ??? unimplemented
- */
-#include <linux/slab.h>
-#include <linux/export.h>
-#include "wusbhc.h"
-
-/*
- * Reset a fake port
- *
- * Using a Reset Device IE is too heavyweight as it causes the device
- * to enter the UnConnected state and leave the cluster, this can mean
- * that when the device reconnects it is connected to a different fake
- * port.
- *
- * Instead, reset authenticated devices with a SetAddress(0), followed
- * by a SetAddresss(AuthAddr).
- *
- * For unauthenticated devices just pretend to reset but do nothing.
- * If the device initialization continues to fail it will eventually
- * time out after TrustTimeout and enter the UnConnected state.
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- *
- * Supposedly we are the only thread accesing @wusbhc->port; in any
- * case, maybe we should move the mutex locking from
- * wusbhc_devconnect_auth() to here.
- *
- * @port_idx refers to the wusbhc's port index, not the USB port number
- */
-static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
-{
- int result = 0;
- struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
- struct wusb_dev *wusb_dev = port->wusb_dev;
-
- if (wusb_dev == NULL)
- return -ENOTCONN;
-
- port->status |= USB_PORT_STAT_RESET;
- port->change |= USB_PORT_STAT_C_RESET;
-
- if (wusb_dev->addr & WUSB_DEV_ADDR_UNAUTH)
- result = 0;
- else
- result = wusb_dev_update_address(wusbhc, wusb_dev);
-
- port->status &= ~USB_PORT_STAT_RESET;
- port->status |= USB_PORT_STAT_ENABLE;
- port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE;
-
- return result;
-}
-
-/*
- * Return the hub change status bitmap
- *
- * The bits in the change status bitmap are cleared when a
- * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4].
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- *
- * WARNING!! This gets called from atomic context; we cannot get the
- * mutex--the only race condition we can find is some bit
- * changing just after we copy it, which shouldn't be too
- * big of a problem [and we can't make it an spinlock
- * because other parts need to take it and sleep] .
- *
- * @usb_hcd is refcounted, so it won't disappear under us
- * and before killing a host, the polling of the root hub
- * would be stopped anyway.
- */
-int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- size_t cnt, size, bits_set = 0;
-
- /* WE DON'T LOCK, see comment */
- /* round up to bytes. Hub bit is bit 0 so add 1. */
- size = DIV_ROUND_UP(wusbhc->ports_max + 1, 8);
-
- /* clear the output buffer. */
- memset(_buf, 0, size);
- /* set the bit for each changed port. */
- for (cnt = 0; cnt < wusbhc->ports_max; cnt++) {
-
- if (wusb_port_by_idx(wusbhc, cnt)->change) {
- const int bitpos = cnt+1;
-
- _buf[bitpos/8] |= (1 << (bitpos % 8));
- bits_set++;
- }
- }
-
- return bits_set ? size : 0;
-}
-EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
-
-/*
- * Return the hub's descriptor
- *
- * NOTE: almost cut and paste from ehci-hub.c
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked
- */
-static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
- u16 wIndex,
- struct usb_hub_descriptor *descr,
- u16 wLength)
-{
- u16 temp = 1 + (wusbhc->ports_max / 8);
- u8 length = 7 + 2 * temp;
-
- if (wLength < length)
- return -ENOSPC;
- descr->bDescLength = 7 + 2 * temp;
- descr->bDescriptorType = USB_DT_HUB; /* HUB type */
- descr->bNbrPorts = wusbhc->ports_max;
- descr->wHubCharacteristics = cpu_to_le16(
- HUB_CHAR_COMMON_LPSM /* All ports power at once */
- | 0x00 /* not part of compound device */
- | HUB_CHAR_NO_OCPM /* No overcurrent protection */
- | 0x00 /* 8 FS think time FIXME ?? */
- | 0x00); /* No port indicators */
- descr->bPwrOn2PwrGood = 0;
- descr->bHubContrCurrent = 0;
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&descr->u.hs.DeviceRemovable[0], 0, temp);
- memset(&descr->u.hs.DeviceRemovable[temp], 0xff, temp);
- return 0;
-}
-
-/*
- * Clear a hub feature
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- *
- * Nothing to do, so no locking needed ;)
- */
-static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature)
-{
- int result;
-
- switch (feature) {
- case C_HUB_LOCAL_POWER:
- /* FIXME: maybe plug bit 0 to the power input status,
- * if any?
- * see wusbhc_rh_get_hub_status() */
- case C_HUB_OVER_CURRENT:
- result = 0;
- break;
- default:
- result = -EPIPE;
- }
- return result;
-}
-
-/*
- * Return hub status (it is always zero...)
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- *
- * Nothing to do, so no locking needed ;)
- */
-static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf,
- u16 wLength)
-{
- /* FIXME: maybe plug bit 0 to the power input status (if any)? */
- *buf = 0;
- return 0;
-}
-
-/*
- * Set a port feature
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- */
-static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature,
- u8 selector, u8 port_idx)
-{
- struct device *dev = wusbhc->dev;
-
- if (port_idx > wusbhc->ports_max)
- return -EINVAL;
-
- switch (feature) {
- /* According to USB2.0[11.24.2.13]p2, these features
- * are not required to be implemented. */
- case USB_PORT_FEAT_C_OVER_CURRENT:
- case USB_PORT_FEAT_C_ENABLE:
- case USB_PORT_FEAT_C_SUSPEND:
- case USB_PORT_FEAT_C_CONNECTION:
- case USB_PORT_FEAT_C_RESET:
- return 0;
- case USB_PORT_FEAT_POWER:
- /* No such thing, but we fake it works */
- mutex_lock(&wusbhc->mutex);
- wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER;
- mutex_unlock(&wusbhc->mutex);
- return 0;
- case USB_PORT_FEAT_RESET:
- return wusbhc_rh_port_reset(wusbhc, port_idx);
- case USB_PORT_FEAT_ENABLE:
- case USB_PORT_FEAT_SUSPEND:
- dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n",
- port_idx, feature, selector);
- return -ENOSYS;
- default:
- dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n",
- port_idx, feature, selector);
- return -EPIPE;
- }
-
- return 0;
-}
-
-/*
- * Clear a port feature...
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- */
-static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature,
- u8 selector, u8 port_idx)
-{
- int result = 0;
- struct device *dev = wusbhc->dev;
-
- if (port_idx > wusbhc->ports_max)
- return -EINVAL;
-
- mutex_lock(&wusbhc->mutex);
- switch (feature) {
- case USB_PORT_FEAT_POWER: /* fake port always on */
- /* According to USB2.0[11.24.2.7.1.4], no need to implement? */
- case USB_PORT_FEAT_C_OVER_CURRENT:
- break;
- case USB_PORT_FEAT_C_RESET:
- wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET;
- break;
- case USB_PORT_FEAT_C_CONNECTION:
- wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION;
- break;
- case USB_PORT_FEAT_ENABLE:
- __wusbhc_dev_disable(wusbhc, port_idx);
- break;
- case USB_PORT_FEAT_C_ENABLE:
- wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE;
- break;
- case USB_PORT_FEAT_SUSPEND:
- case USB_PORT_FEAT_C_SUSPEND:
- dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n",
- port_idx, feature, selector);
- result = -ENOSYS;
- break;
- default:
- dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n",
- port_idx, feature, selector);
- result = -EPIPE;
- break;
- }
- mutex_unlock(&wusbhc->mutex);
-
- return result;
-}
-
-/*
- * Return the port's status
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- */
-static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
- u32 *_buf, u16 wLength)
-{
- __le16 *buf = (__le16 *)_buf;
-
- if (port_idx > wusbhc->ports_max)
- return -EINVAL;
-
- mutex_lock(&wusbhc->mutex);
- buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status);
- buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change);
- mutex_unlock(&wusbhc->mutex);
-
- return 0;
-}
-
-/*
- * Entry point for Root Hub operations
- *
- * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
- */
-int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
- u16 wIndex, char *buf, u16 wLength)
-{
- int result = -ENOSYS;
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-
- switch (reqntype) {
- case GetHubDescriptor:
- result = wusbhc_rh_get_hub_descr(
- wusbhc, wValue, wIndex,
- (struct usb_hub_descriptor *) buf, wLength);
- break;
- case ClearHubFeature:
- result = wusbhc_rh_clear_hub_feat(wusbhc, wValue);
- break;
- case GetHubStatus:
- result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength);
- break;
-
- case SetPortFeature:
- result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8,
- (wIndex & 0xff) - 1);
- break;
- case ClearPortFeature:
- result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8,
- (wIndex & 0xff) - 1);
- break;
- case GetPortStatus:
- result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1,
- (u32 *)buf, wLength);
- break;
-
- case SetHubFeature:
- default:
- dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) "
- "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype,
- wValue, wIndex, buf, wLength);
- /* dump_stack(); */
- result = -ENOSYS;
- }
- return result;
-}
-EXPORT_SYMBOL_GPL(wusbhc_rh_control);
-
-int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n",
- __func__, usb_hcd, wusbhc, port_idx);
- WARN_ON(1);
- return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset);
-
-static void wusb_port_init(struct wusb_port *port)
-{
- port->status |= USB_PORT_STAT_HIGH_SPEED;
-}
-
-/*
- * Alloc fake port specific fields and status.
- */
-int wusbhc_rh_create(struct wusbhc *wusbhc)
-{
- int result = -ENOMEM;
- size_t port_size, itr;
- port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]);
- wusbhc->port = kzalloc(port_size, GFP_KERNEL);
- if (wusbhc->port == NULL)
- goto error_port_alloc;
- for (itr = 0; itr < wusbhc->ports_max; itr++)
- wusb_port_init(&wusbhc->port[itr]);
- result = 0;
-error_port_alloc:
- return result;
-}
-
-void wusbhc_rh_destroy(struct wusbhc *wusbhc)
-{
- kfree(wusbhc->port);
-}
diff --git a/drivers/staging/wusbcore/security.c b/drivers/staging/wusbcore/security.c
deleted file mode 100644
index 14ac8c98ac9e..000000000000
--- a/drivers/staging/wusbcore/security.c
+++ /dev/null
@@ -1,599 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB Host Controller
- * Security support: encryption enablement, etc
- *
- * Copyright (C) 2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- */
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/usb/ch9.h>
-#include <linux/random.h>
-#include <linux/export.h>
-#include "wusbhc.h"
-#include <asm/unaligned.h>
-
-static void wusbhc_gtk_rekey_work(struct work_struct *work);
-
-int wusbhc_sec_create(struct wusbhc *wusbhc)
-{
- /*
- * WQ is singlethread because we need to serialize rekey operations.
- * Use a separate workqueue for security operations instead of the
- * wusbd workqueue because security operations may need to communicate
- * directly with downstream wireless devices using synchronous URBs.
- * If a device is not responding, this could block other host
- * controller operations.
- */
- wusbhc->wq_security = create_singlethread_workqueue("wusbd_security");
- if (wusbhc->wq_security == NULL) {
- pr_err("WUSB-core: Cannot create wusbd_security workqueue\n");
- return -ENOMEM;
- }
-
- wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) +
- sizeof(wusbhc->gtk.data);
- wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
- wusbhc->gtk.descr.bReserved = 0;
- wusbhc->gtk_index = 0;
-
- INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);
-
- return 0;
-}
-
-
-/* Called when the HC is destroyed */
-void wusbhc_sec_destroy(struct wusbhc *wusbhc)
-{
- destroy_workqueue(wusbhc->wq_security);
-}
-
-
-/**
- * wusbhc_next_tkid - generate a new, currently unused, TKID
- * @wusbhc: the WUSB host controller
- * @wusb_dev: the device whose PTK the TKID is for
- * (or NULL for a TKID for a GTK)
- *
- * The generated TKID consists of two parts: the device's authenticated
- * address (or 0 or a GTK); and an incrementing number. This ensures
- * that TKIDs cannot be shared between devices and by the time the
- * incrementing number wraps around the older TKIDs will no longer be
- * in use (a maximum of two keys may be active at any one time).
- */
-static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
-{
- u32 *tkid;
- u32 addr;
-
- if (wusb_dev == NULL) {
- tkid = &wusbhc->gtk_tkid;
- addr = 0;
- } else {
- tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid;
- addr = wusb_dev->addr & 0x7f;
- }
-
- *tkid = (addr << 8) | ((*tkid + 1) & 0xff);
-
- return *tkid;
-}
-
-static void wusbhc_generate_gtk(struct wusbhc *wusbhc)
-{
- const size_t key_size = sizeof(wusbhc->gtk.data);
- u32 tkid;
-
- tkid = wusbhc_next_tkid(wusbhc, NULL);
-
- wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff;
- wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff;
- wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff;
-
- get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size);
-}
-
-/**
- * wusbhc_sec_start - start the security management process
- * @wusbhc: the WUSB host controller
- *
- * Generate and set an initial GTK on the host controller.
- *
- * Called when the HC is started.
- */
-int wusbhc_sec_start(struct wusbhc *wusbhc)
-{
- const size_t key_size = sizeof(wusbhc->gtk.data);
- int result;
-
- wusbhc_generate_gtk(wusbhc);
-
- result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
- &wusbhc->gtk.descr.bKeyData, key_size);
- if (result < 0)
- dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
- result);
-
- return result;
-}
-
-/**
- * wusbhc_sec_stop - stop the security management process
- * @wusbhc: the WUSB host controller
- *
- * Wait for any pending GTK rekeys to stop.
- */
-void wusbhc_sec_stop(struct wusbhc *wusbhc)
-{
- cancel_work_sync(&wusbhc->gtk_rekey_work);
-}
-
-
-/** @returns encryption type name */
-const char *wusb_et_name(u8 x)
-{
- switch (x) {
- case USB_ENC_TYPE_UNSECURE: return "unsecure";
- case USB_ENC_TYPE_WIRED: return "wired";
- case USB_ENC_TYPE_CCM_1: return "CCM-1";
- case USB_ENC_TYPE_RSA_1: return "RSA-1";
- default: return "unknown";
- }
-}
-EXPORT_SYMBOL_GPL(wusb_et_name);
-
-/*
- * Set the device encryption method
- *
- * We tell the device which encryption method to use; we do this when
- * setting up the device's security.
- */
-static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
-{
- int result;
- struct device *dev = &usb_dev->dev;
- struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
-
- if (value) {
- value = wusb_dev->ccm1_etd.bEncryptionValue;
- } else {
- /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */
- value = 0;
- }
- /* Set device's */
- result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ENCRYPTION,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (result < 0)
- dev_err(dev, "Can't set device's WUSB encryption to "
- "%s (value %d): %d\n",
- wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType),
- wusb_dev->ccm1_etd.bEncryptionValue, result);
- return result;
-}
-
-/*
- * Set the GTK to be used by a device.
- *
- * The device must be authenticated.
- */
-static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
-{
- struct usb_device *usb_dev = wusb_dev->usb_dev;
- u8 key_index = wusb_key_index(wusbhc->gtk_index,
- WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);
-
- return usb_control_msg(
- usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_DESCRIPTOR,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- USB_DT_KEY << 8 | key_index, 0,
- &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
- USB_CTRL_SET_TIMEOUT);
-}
-
-
-/* FIXME: prototype for adding security */
-int wusb_dev_sec_add(struct wusbhc *wusbhc,
- struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
-{
- int result, bytes, secd_size;
- struct device *dev = &usb_dev->dev;
- struct usb_security_descriptor *secd, *new_secd;
- const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
- const void *itr, *top;
- char buf[64];
-
- secd = kmalloc(sizeof(*secd), GFP_KERNEL);
- if (secd == NULL) {
- result = -ENOMEM;
- goto out;
- }
-
- result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
- 0, secd, sizeof(*secd));
- if (result < (int)sizeof(*secd)) {
- dev_err(dev, "Can't read security descriptor or "
- "not enough data: %d\n", result);
- goto out;
- }
- secd_size = le16_to_cpu(secd->wTotalLength);
- new_secd = krealloc(secd, secd_size, GFP_KERNEL);
- if (new_secd == NULL) {
- dev_err(dev,
- "Can't allocate space for security descriptors\n");
- result = -ENOMEM;
- goto out;
- }
- secd = new_secd;
- result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
- 0, secd, secd_size);
- if (result < secd_size) {
- dev_err(dev, "Can't read security descriptor or "
- "not enough data: %d\n", result);
- goto out;
- }
- bytes = 0;
- itr = &secd[1];
- top = (void *)secd + result;
- while (itr < top) {
- etd = itr;
- if (top - itr < sizeof(*etd)) {
- dev_err(dev, "BUG: bad device security descriptor; "
- "not enough data (%zu vs %zu bytes left)\n",
- top - itr, sizeof(*etd));
- break;
- }
- if (etd->bLength < sizeof(*etd)) {
- dev_err(dev, "BUG: bad device encryption descriptor; "
- "descriptor is too short "
- "(%u vs %zu needed)\n",
- etd->bLength, sizeof(*etd));
- break;
- }
- itr += etd->bLength;
- bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
- "%s (0x%02x/%02x) ",
- wusb_et_name(etd->bEncryptionType),
- etd->bEncryptionValue, etd->bAuthKeyIndex);
- if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
- ccm1_etd = etd;
- }
- /* This code only supports CCM1 as of now. */
- /* FIXME: user has to choose which sec mode to use?
- * In theory we want CCM */
- if (ccm1_etd == NULL) {
- dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
- "can't use!\n");
- result = -EINVAL;
- goto out;
- }
- wusb_dev->ccm1_etd = *ccm1_etd;
- dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
- buf, wusb_et_name(ccm1_etd->bEncryptionType),
- ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
- result = 0;
-out:
- kfree(secd);
- return result;
-}
-
-void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
-{
- /* Nothing so far */
-}
-
-/**
- * Update the address of an unauthenticated WUSB device
- *
- * Once we have successfully authenticated, we take it to addr0 state
- * and then to a normal address.
- *
- * Before the device's address (as known by it) was usb_dev->devnum |
- * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum.
- */
-int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
-{
- int result = -ENOMEM;
- struct usb_device *usb_dev = wusb_dev->usb_dev;
- struct device *dev = &usb_dev->dev;
- u8 new_address = wusb_dev->addr & 0x7F;
-
- /* Set address 0 */
- result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "auth failed: can't set address 0: %d\n",
- result);
- goto error_addr0;
- }
- result = wusb_set_dev_addr(wusbhc, wusb_dev, 0);
- if (result < 0)
- goto error_addr0;
- usb_set_device_state(usb_dev, USB_STATE_DEFAULT);
- usb_ep0_reinit(usb_dev);
-
- /* Set new (authenticated) address. */
- result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- new_address, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "auth failed: can't set address %u: %d\n",
- new_address, result);
- goto error_addr;
- }
- result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address);
- if (result < 0)
- goto error_addr;
- usb_set_device_state(usb_dev, USB_STATE_ADDRESS);
- usb_ep0_reinit(usb_dev);
- usb_dev->authenticated = 1;
-error_addr:
-error_addr0:
- return result;
-}
-
-/*
- *
- *
- */
-/* FIXME: split and cleanup */
-int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
- struct wusb_ckhdid *ck)
-{
- int result = -ENOMEM;
- struct usb_device *usb_dev = wusb_dev->usb_dev;
- struct device *dev = &usb_dev->dev;
- u32 tkid;
- struct usb_handshake *hs;
- struct aes_ccm_nonce ccm_n;
- u8 mic[8];
- struct wusb_keydvt_in keydvt_in;
- struct wusb_keydvt_out keydvt_out;
-
- hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
- if (!hs)
- goto error_kzalloc;
-
- /* We need to turn encryption before beginning the 4way
- * hshake (WUSB1.0[.3.2.2]) */
- result = wusb_dev_set_encryption(usb_dev, 1);
- if (result < 0)
- goto error_dev_set_encryption;
-
- tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
-
- hs[0].bMessageNumber = 1;
- hs[0].bStatus = 0;
- put_unaligned_le32(tkid, hs[0].tTKID);
- hs[0].bReserved = 0;
- memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
- get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
- memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
-
- result = usb_control_msg(
- usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_HANDSHAKE,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "Handshake1: request failed: %d\n", result);
- goto error_hs1;
- }
-
- /* Handshake 2, from the device -- need to verify fields */
- result = usb_control_msg(
- usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_GET_HANDSHAKE,
- USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "Handshake2: request failed: %d\n", result);
- goto error_hs2;
- }
-
- result = -EINVAL;
- if (hs[1].bMessageNumber != 2) {
- dev_err(dev, "Handshake2 failed: bad message number %u\n",
- hs[1].bMessageNumber);
- goto error_hs2;
- }
- if (hs[1].bStatus != 0) {
- dev_err(dev, "Handshake2 failed: bad status %u\n",
- hs[1].bStatus);
- goto error_hs2;
- }
- if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
- dev_err(dev, "Handshake2 failed: TKID mismatch "
- "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
- hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
- hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
- goto error_hs2;
- }
- if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
- dev_err(dev, "Handshake2 failed: CDID mismatch\n");
- goto error_hs2;
- }
-
- /* Setup the CCM nonce */
- memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
- put_unaligned_le32(tkid, ccm_n.tkid);
- ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
- ccm_n.dest_addr.data[0] = wusb_dev->addr;
- ccm_n.dest_addr.data[1] = 0;
-
- /* Derive the KCK and PTK from CK, the CCM, H and D nonces */
- memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
- memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
- result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
- if (result < 0) {
- dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
- result);
- goto error_hs2;
- }
-
- /* Compute MIC and verify it */
- result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
- if (result < 0) {
- dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
- result);
- goto error_hs2;
- }
-
- if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
- dev_err(dev, "Handshake2 failed: MIC mismatch\n");
- goto error_hs2;
- }
-
- /* Send Handshake3 */
- hs[2].bMessageNumber = 3;
- hs[2].bStatus = 0;
- put_unaligned_le32(tkid, hs[2].tTKID);
- hs[2].bReserved = 0;
- memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
- memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
- result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
- if (result < 0) {
- dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
- result);
- goto error_hs2;
- }
-
- result = usb_control_msg(
- usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_HANDSHAKE,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "Handshake3: request failed: %d\n", result);
- goto error_hs3;
- }
-
- result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
- keydvt_out.ptk, sizeof(keydvt_out.ptk));
- if (result < 0)
- goto error_wusbhc_set_ptk;
-
- result = wusb_dev_set_gtk(wusbhc, wusb_dev);
- if (result < 0) {
- dev_err(dev, "Set GTK for device: request failed: %d\n",
- result);
- goto error_wusbhc_set_gtk;
- }
-
- /* Update the device's address from unauth to auth */
- if (usb_dev->authenticated == 0) {
- result = wusb_dev_update_address(wusbhc, wusb_dev);
- if (result < 0)
- goto error_dev_update_address;
- }
- result = 0;
- dev_info(dev, "device authenticated\n");
-
-error_dev_update_address:
-error_wusbhc_set_gtk:
-error_wusbhc_set_ptk:
-error_hs3:
-error_hs2:
-error_hs1:
- memset(hs, 0, 3*sizeof(hs[0]));
- memzero_explicit(&keydvt_out, sizeof(keydvt_out));
- memzero_explicit(&keydvt_in, sizeof(keydvt_in));
- memzero_explicit(&ccm_n, sizeof(ccm_n));
- memzero_explicit(mic, sizeof(mic));
- if (result < 0)
- wusb_dev_set_encryption(usb_dev, 0);
-error_dev_set_encryption:
- kfree(hs);
-error_kzalloc:
- return result;
-}
-
-/*
- * Once all connected and authenticated devices have received the new
- * GTK, switch the host to using it.
- */
-static void wusbhc_gtk_rekey_work(struct work_struct *work)
-{
- struct wusbhc *wusbhc = container_of(work,
- struct wusbhc, gtk_rekey_work);
- size_t key_size = sizeof(wusbhc->gtk.data);
- int port_idx;
- struct wusb_dev *wusb_dev, *wusb_dev_next;
- LIST_HEAD(rekey_list);
-
- mutex_lock(&wusbhc->mutex);
- /* generate the new key */
- wusbhc_generate_gtk(wusbhc);
- /* roll the gtk index. */
- wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1);
- /*
- * Save all connected devices on a list while holding wusbhc->mutex and
- * take a reference to each one. Then submit the set key request to
- * them after releasing the lock in order to avoid a deadlock.
- */
- for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) {
- wusb_dev = wusbhc->port[port_idx].wusb_dev;
- if (!wusb_dev || !wusb_dev->usb_dev
- || !wusb_dev->usb_dev->authenticated)
- continue;
-
- wusb_dev_get(wusb_dev);
- list_add_tail(&wusb_dev->rekey_node, &rekey_list);
- }
- mutex_unlock(&wusbhc->mutex);
-
- /* Submit the rekey requests without holding wusbhc->mutex. */
- list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
- rekey_node) {
- list_del_init(&wusb_dev->rekey_node);
- dev_dbg(&wusb_dev->usb_dev->dev,
- "%s: rekey device at port %d\n",
- __func__, wusb_dev->port_idx);
-
- if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
- dev_err(&wusb_dev->usb_dev->dev,
- "%s: rekey device at port %d failed\n",
- __func__, wusb_dev->port_idx);
- }
- wusb_dev_put(wusb_dev);
- }
-
- /* Switch the host controller to use the new GTK. */
- mutex_lock(&wusbhc->mutex);
- wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
- &wusbhc->gtk.descr.bKeyData, key_size);
- mutex_unlock(&wusbhc->mutex);
-}
-
-/**
- * wusbhc_gtk_rekey - generate and distribute a new GTK
- * @wusbhc: the WUSB host controller
- *
- * Generate a new GTK and distribute it to all connected and
- * authenticated devices. When all devices have the new GTK, the host
- * starts using it.
- *
- * This must be called after every device disconnect (see [WUSB]
- * section 6.2.11.2).
- */
-void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
-{
- /*
- * We need to submit a URB to the downstream WUSB devices in order to
- * change the group key. This can't be done while holding the
- * wusbhc->mutex since that is also taken in the urb_enqueue routine
- * and will cause a deadlock. Instead, queue a work item to do
- * it when the lock is not held
- */
- queue_work(wusbhc->wq_security, &wusbhc->gtk_rekey_work);
-}
diff --git a/drivers/staging/wusbcore/wa-hc.c b/drivers/staging/wusbcore/wa-hc.c
deleted file mode 100644
index 6827075fb8a1..000000000000
--- a/drivers/staging/wusbcore/wa-hc.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wire Adapter Host Controller Driver
- * Common items to HWA and DWA based HCDs
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include "wusbhc.h"
-#include "wa-hc.h"
-
-/**
- * Assumes
- *
- * wa->usb_dev and wa->usb_iface initialized and refcounted,
- * wa->wa_descr initialized.
- */
-int wa_create(struct wahc *wa, struct usb_interface *iface,
- kernel_ulong_t quirks)
-{
- int result;
- struct device *dev = &iface->dev;
-
- if (iface->cur_altsetting->desc.bNumEndpoints < 3)
- return -ENODEV;
-
- result = wa_rpipes_create(wa);
- if (result < 0)
- goto error_rpipes_create;
- wa->quirks = quirks;
- /* Fill up Data Transfer EP pointers */
- wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
- wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
- wa->dti_buf_size = usb_endpoint_maxp(wa->dti_epd);
- wa->dti_buf = kmalloc(wa->dti_buf_size, GFP_KERNEL);
- if (wa->dti_buf == NULL) {
- result = -ENOMEM;
- goto error_dti_buf_alloc;
- }
- result = wa_nep_create(wa, iface);
- if (result < 0) {
- dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
- result);
- goto error_nep_create;
- }
- return 0;
-
-error_nep_create:
- kfree(wa->dti_buf);
-error_dti_buf_alloc:
- wa_rpipes_destroy(wa);
-error_rpipes_create:
- return result;
-}
-EXPORT_SYMBOL_GPL(wa_create);
-
-
-void __wa_destroy(struct wahc *wa)
-{
- if (wa->dti_urb) {
- usb_kill_urb(wa->dti_urb);
- usb_put_urb(wa->dti_urb);
- }
- kfree(wa->dti_buf);
- wa_nep_destroy(wa);
- wa_rpipes_destroy(wa);
-}
-EXPORT_SYMBOL_GPL(__wa_destroy);
-
-/**
- * wa_reset_all - reset the WA device
- * @wa: the WA to be reset
- *
- * For HWAs the radio controller and all other PALs are also reset.
- */
-void wa_reset_all(struct wahc *wa)
-{
- /* FIXME: assuming HWA. */
- wusbhc_reset_all(wa->wusb);
-}
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/wa-hc.h b/drivers/staging/wusbcore/wa-hc.h
deleted file mode 100644
index 5a38465724c2..000000000000
--- a/drivers/staging/wusbcore/wa-hc.h
+++ /dev/null
@@ -1,467 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * HWA Host Controller Driver
- * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This driver implements a USB Host Controller (struct usb_hcd) for a
- * Wireless USB Host Controller based on the Wireless USB 1.0
- * Host-Wire-Adapter specification (in layman terms, a USB-dongle that
- * implements a Wireless USB host).
- *
- * Check out the Design-overview.txt file in the source documentation
- * for other details on the implementation.
- *
- * Main blocks:
- *
- * driver glue with the driver API, workqueue daemon
- *
- * lc RC instance life cycle management (create, destroy...)
- *
- * hcd glue with the USB API Host Controller Interface API.
- *
- * nep Notification EndPoint management: collect notifications
- * and queue them with the workqueue daemon.
- *
- * Handle notifications as coming from the NEP. Sends them
- * off others to their respective modules (eg: connect,
- * disconnect and reset go to devconnect).
- *
- * rpipe Remote Pipe management; rpipe is what we use to write
- * to an endpoint on a WUSB device that is connected to a
- * HWA RC.
- *
- * xfer Transfer management -- this is all the code that gets a
- * buffer and pushes it to a device (or viceversa). *
- *
- * Some day a lot of this code will be shared between this driver and
- * the drivers for DWA (xfer, rpipe).
- *
- * All starts at driver.c:hwahc_probe(), when one of this guys is
- * connected. hwahc_disconnect() stops it.
- *
- * During operation, the main driver is devices connecting or
- * disconnecting. They cause the HWA RC to send notifications into
- * nep.c:hwahc_nep_cb() that will dispatch them to
- * notif.c:wa_notif_dispatch(). From there they will fan to cause
- * device connects, disconnects, etc.
- *
- * Note much of the activity is difficult to follow. For example a
- * device connect goes to devconnect, which will cause the "fake" root
- * hub port to show a connect and stop there. Then hub_wq will notice
- * and call into the rh.c:hwahc_rc_port_reset() code to authenticate
- * the device (and this might require user intervention) and enable
- * the port.
- *
- * We also have a timer workqueue going from devconnect.c that
- * schedules in hwahc_devconnect_create().
- *
- * The rest of the traffic is in the usual entry points of a USB HCD,
- * which are hooked up in driver.c:hwahc_rc_driver, and defined in
- * hcd.c.
- */
-
-#ifndef __HWAHC_INTERNAL_H__
-#define __HWAHC_INTERNAL_H__
-
-#include <linux/completion.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include "../uwb/uwb.h"
-#include "include/wusb.h"
-#include "include/wusb-wa.h"
-
-struct wusbhc;
-struct wahc;
-extern void wa_urb_enqueue_run(struct work_struct *ws);
-extern void wa_process_errored_transfers_run(struct work_struct *ws);
-
-/**
- * RPipe instance
- *
- * @descr's fields are kept in LE, as we need to send it back and
- * forth.
- *
- * @wa is referenced when set
- *
- * @segs_available is the number of requests segments that still can
- * be submitted to the controller without overloading
- * it. It is initialized to descr->wRequests when
- * aiming.
- *
- * A rpipe supports a max of descr->wRequests at the same time; before
- * submitting seg_lock has to be taken. If segs_avail > 0, then we can
- * submit; if not, we have to queue them.
- */
-struct wa_rpipe {
- struct kref refcnt;
- struct usb_rpipe_descriptor descr;
- struct usb_host_endpoint *ep;
- struct wahc *wa;
- spinlock_t seg_lock;
- struct list_head seg_list;
- struct list_head list_node;
- atomic_t segs_available;
- u8 buffer[1]; /* For reads/writes on USB */
-};
-
-
-enum wa_dti_state {
- WA_DTI_TRANSFER_RESULT_PENDING,
- WA_DTI_ISOC_PACKET_STATUS_PENDING,
- WA_DTI_BUF_IN_DATA_PENDING
-};
-
-enum wa_quirks {
- /*
- * The Alereon HWA expects the data frames in isochronous transfer
- * requests to be concatenated and not sent as separate packets.
- */
- WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
- /*
- * The Alereon HWA can be instructed to not send transfer notifications
- * as an optimization.
- */
- WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02,
-};
-
-enum wa_vendor_specific_requests {
- WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C,
- WA_REQ_ALEREON_FEATURE_SET = 0x01,
- WA_REQ_ALEREON_FEATURE_CLEAR = 0x00,
-};
-
-#define WA_MAX_BUF_IN_URBS 4
-/**
- * Instance of a HWA Host Controller
- *
- * Except where a more specific lock/mutex applies or atomic, all
- * fields protected by @mutex.
- *
- * @wa_descr Can be accessed without locking because it is in
- * the same area where the device descriptors were
- * read, so it is guaranteed to exist unmodified while
- * the device exists.
- *
- * Endianess has been converted to CPU's.
- *
- * @nep_* can be accessed without locking as its processing is
- * serialized; we submit a NEP URB and it comes to
- * hwahc_nep_cb(), which won't issue another URB until it is
- * done processing it.
- *
- * @xfer_list:
- *
- * List of active transfers to verify existence from a xfer id
- * gotten from the xfer result message. Can't use urb->list because
- * it goes by endpoint, and we don't know the endpoint at the time
- * when we get the xfer result message. We can't really rely on the
- * pointer (will have to change for 64 bits) as the xfer id is 32 bits.
- *
- * @xfer_delayed_list: List of transfers that need to be started
- * (with a workqueue, because they were
- * submitted from an atomic context).
- *
- * FIXME: this needs to be layered up: a wusbhc layer (for sharing
- * commonalities with WHCI), a wa layer (for sharing
- * commonalities with DWA-RC).
- */
-struct wahc {
- struct usb_device *usb_dev;
- struct usb_interface *usb_iface;
-
- /* HC to deliver notifications */
- union {
- struct wusbhc *wusb;
- struct dwahc *dwa;
- };
-
- const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
- const struct usb_wa_descriptor *wa_descr;
-
- struct urb *nep_urb; /* Notification EndPoint [lockless] */
- struct edc nep_edc;
- void *nep_buffer;
- size_t nep_buffer_size;
-
- atomic_t notifs_queued;
-
- u16 rpipes;
- unsigned long *rpipe_bm; /* rpipe usage bitmap */
- struct list_head rpipe_delayed_list; /* delayed RPIPES. */
- spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */
- struct mutex rpipe_mutex; /* assigning resources to endpoints */
-
- /*
- * dti_state is used to track the state of the dti_urb. When dti_state
- * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
- * dti_isoc_xfer_seg identify which xfer the incoming isoc packet
- * status refers to.
- */
- enum wa_dti_state dti_state;
- u32 dti_isoc_xfer_in_progress;
- u8 dti_isoc_xfer_seg;
- struct urb *dti_urb; /* URB for reading xfer results */
- /* URBs for reading data in */
- struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS];
- int active_buf_in_urbs; /* number of buf_in_urbs active. */
- struct edc dti_edc; /* DTI error density counter */
- void *dti_buf;
- size_t dti_buf_size;
-
- unsigned long dto_in_use; /* protect dto endoint serialization */
-
- s32 status; /* For reading status */
-
- struct list_head xfer_list;
- struct list_head xfer_delayed_list;
- struct list_head xfer_errored_list;
- /*
- * lock for the above xfer lists. Can be taken while a xfer->lock is
- * held but not in the reverse order.
- */
- spinlock_t xfer_list_lock;
- struct work_struct xfer_enqueue_work;
- struct work_struct xfer_error_work;
- atomic_t xfer_id_count;
-
- kernel_ulong_t quirks;
-};
-
-
-extern int wa_create(struct wahc *wa, struct usb_interface *iface,
- kernel_ulong_t);
-extern void __wa_destroy(struct wahc *wa);
-extern int wa_dti_start(struct wahc *wa);
-void wa_reset_all(struct wahc *wa);
-
-
-/* Miscellaneous constants */
-enum {
- /** Max number of EPROTO errors we tolerate on the NEP in a
- * period of time */
- HWAHC_EPROTO_MAX = 16,
- /** Period of time for EPROTO errors (in jiffies) */
- HWAHC_EPROTO_PERIOD = 4 * HZ,
-};
-
-
-/* Notification endpoint handling */
-extern int wa_nep_create(struct wahc *, struct usb_interface *);
-extern void wa_nep_destroy(struct wahc *);
-
-static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
-{
- struct urb *urb = wa->nep_urb;
- urb->transfer_buffer = wa->nep_buffer;
- urb->transfer_buffer_length = wa->nep_buffer_size;
- return usb_submit_urb(urb, gfp_mask);
-}
-
-static inline void wa_nep_disarm(struct wahc *wa)
-{
- usb_kill_urb(wa->nep_urb);
-}
-
-
-/* RPipes */
-static inline void wa_rpipe_init(struct wahc *wa)
-{
- INIT_LIST_HEAD(&wa->rpipe_delayed_list);
- spin_lock_init(&wa->rpipe_lock);
- mutex_init(&wa->rpipe_mutex);
-}
-
-static inline void wa_init(struct wahc *wa)
-{
- int index;
-
- edc_init(&wa->nep_edc);
- atomic_set(&wa->notifs_queued, 0);
- wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
- wa_rpipe_init(wa);
- edc_init(&wa->dti_edc);
- INIT_LIST_HEAD(&wa->xfer_list);
- INIT_LIST_HEAD(&wa->xfer_delayed_list);
- INIT_LIST_HEAD(&wa->xfer_errored_list);
- spin_lock_init(&wa->xfer_list_lock);
- INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
- INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
- wa->dto_in_use = 0;
- atomic_set(&wa->xfer_id_count, 1);
- /* init the buf in URBs */
- for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
- usb_init_urb(&(wa->buf_in_urbs[index]));
- wa->active_buf_in_urbs = 0;
-}
-
-/**
- * Destroy a pipe (when refcount drops to zero)
- *
- * Assumes it has been moved to the "QUIESCING" state.
- */
-struct wa_xfer;
-extern void rpipe_destroy(struct kref *_rpipe);
-static inline
-void __rpipe_get(struct wa_rpipe *rpipe)
-{
- kref_get(&rpipe->refcnt);
-}
-extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
- struct urb *, gfp_t);
-static inline void rpipe_put(struct wa_rpipe *rpipe)
-{
- kref_put(&rpipe->refcnt, rpipe_destroy);
-
-}
-extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
-extern void rpipe_clear_feature_stalled(struct wahc *,
- struct usb_host_endpoint *);
-extern int wa_rpipes_create(struct wahc *);
-extern void wa_rpipes_destroy(struct wahc *);
-static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
-{
- atomic_dec(&rpipe->segs_available);
-}
-
-/**
- * Returns true if the rpipe is ready to submit more segments.
- */
-static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
-{
- return atomic_inc_return(&rpipe->segs_available) > 0
- && !list_empty(&rpipe->seg_list);
-}
-
-
-/* Transferring data */
-extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
- struct urb *, gfp_t);
-extern int wa_urb_dequeue(struct wahc *, struct urb *, int);
-extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
-
-
-/* Misc
- *
- * FIXME: Refcounting for the actual @hwahc object is not correct; I
- * mean, this should be refcounting on the HCD underneath, but
- * it is not. In any case, the semantics for HCD refcounting
- * are *weird*...on refcount reaching zero it just frees
- * it...no RC specific function is called...unless I miss
- * something.
- *
- * FIXME: has to go away in favour of a 'struct' hcd based solution
- */
-static inline struct wahc *wa_get(struct wahc *wa)
-{
- usb_get_intf(wa->usb_iface);
- return wa;
-}
-
-static inline void wa_put(struct wahc *wa)
-{
- usb_put_intf(wa->usb_iface);
-}
-
-
-static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
-{
- return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- feature,
- wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
-}
-
-
-static inline int __wa_set_feature(struct wahc *wa, u16 feature)
-{
- return __wa_feature(wa, 1, feature);
-}
-
-
-static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
-{
- return __wa_feature(wa, 0, feature);
-}
-
-
-/**
- * Return the status of a Wire Adapter
- *
- * @wa: Wire Adapter instance
- * @returns < 0 errno code on error, or status bitmap as described
- * in WUSB1.0[8.3.1.6].
- *
- * NOTE: need malloc, some arches don't take USB from the stack
- */
-static inline
-s32 __wa_get_status(struct wahc *wa)
-{
- s32 result;
- result = usb_control_msg(
- wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
- USB_REQ_GET_STATUS,
- USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- &wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT);
- if (result >= 0)
- result = wa->status;
- return result;
-}
-
-
-/**
- * Waits until the Wire Adapter's status matches @mask/@value
- *
- * @wa: Wire Adapter instance.
- * @returns < 0 errno code on error, otherwise status.
- *
- * Loop until the WAs status matches the mask and value (status & mask
- * == value). Timeout if it doesn't happen.
- *
- * FIXME: is there an official specification on how long status
- * changes can take?
- */
-static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
-{
- s32 result;
- unsigned loops = 10;
- do {
- msleep(50);
- result = __wa_get_status(wa);
- if ((result & mask) == value)
- break;
- if (loops-- == 0) {
- result = -ETIMEDOUT;
- break;
- }
- } while (result >= 0);
- return result;
-}
-
-
-/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
-static inline int __wa_stop(struct wahc *wa)
-{
- int result;
- struct device *dev = &wa->usb_iface->dev;
-
- result = __wa_clear_feature(wa, WA_ENABLE);
- if (result < 0 && result != -ENODEV) {
- dev_err(dev, "error commanding HC to stop: %d\n", result);
- goto out;
- }
- result = __wa_wait_status(wa, WA_ENABLE, 0);
- if (result < 0 && result != -ENODEV)
- dev_err(dev, "error waiting for HC to stop: %d\n", result);
-out:
- return 0;
-}
-
-
-#endif /* #ifndef __HWAHC_INTERNAL_H__ */
diff --git a/drivers/staging/wusbcore/wa-nep.c b/drivers/staging/wusbcore/wa-nep.c
deleted file mode 100644
index 5f0656db5482..000000000000
--- a/drivers/staging/wusbcore/wa-nep.c
+++ /dev/null
@@ -1,289 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
- * Notification EndPoint support
- *
- * Copyright (C) 2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This part takes care of getting the notification from the hw
- * only and dispatching through wusbwad into
- * wa_notif_dispatch. Handling is done there.
- *
- * WA notifications are limited in size; most of them are three or
- * four bytes long, and the longest is the HWA Device Notification,
- * which would not exceed 38 bytes (DNs are limited in payload to 32
- * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
- * header (WUSB1.0[8.5.4.2]).
- *
- * It is not clear if more than one Device Notification can be packed
- * in a HWA Notification, I assume no because of the wording in
- * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
- * get is 256 bytes (as the bLength field is a byte).
- *
- * So what we do is we have this buffer and read into it; when a
- * notification arrives we schedule work to a specific, single thread
- * workqueue (so notifications are serialized) and copy the
- * notification data. After scheduling the work, we rearm the read from
- * the notification endpoint.
- *
- * Entry points here are:
- *
- * wa_nep_[create|destroy]() To initialize/release this subsystem
- *
- * wa_nep_cb() Callback for the notification
- * endpoint; when data is ready, this
- * does the dispatching.
- */
-#include <linux/workqueue.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
-
-#include "wa-hc.h"
-#include "wusbhc.h"
-
-/* Structure for queueing notifications to the workqueue */
-struct wa_notif_work {
- struct work_struct work;
- struct wahc *wa;
- size_t size;
- u8 data[];
-};
-
-/*
- * Process incoming notifications from the WA's Notification EndPoint
- * [the wuswad daemon, basically]
- *
- * @_nw: Pointer to a descriptor which has the pointer to the
- * @wa, the size of the buffer and the work queue
- * structure (so we can free all when done).
- * @returns 0 if ok, < 0 errno code on error.
- *
- * All notifications follow the same format; they need to start with a
- * 'struct wa_notif_hdr' header, so it is easy to parse through
- * them. We just break the buffer in individual notifications (the
- * standard doesn't say if it can be done or is forbidden, so we are
- * cautious) and dispatch each.
- *
- * So the handling layers are is:
- *
- * WA specific notification (from NEP)
- * Device Notification Received -> wa_handle_notif_dn()
- * WUSB Device notification generic handling
- * BPST Adjustment -> wa_handle_notif_bpst_adj()
- * ... -> ...
- *
- * @wa has to be referenced
- */
-static void wa_notif_dispatch(struct work_struct *ws)
-{
- void *itr;
- u8 missing = 0;
- struct wa_notif_work *nw = container_of(ws, struct wa_notif_work,
- work);
- struct wahc *wa = nw->wa;
- struct wa_notif_hdr *notif_hdr;
- size_t size;
-
- struct device *dev = &wa->usb_iface->dev;
-
-#if 0
- /* FIXME: need to check for this??? */
- if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */
- goto out; /* screw it */
-#endif
- atomic_dec(&wa->notifs_queued); /* Throttling ctl */
- size = nw->size;
- itr = nw->data;
-
- while (size) {
- if (size < sizeof(*notif_hdr)) {
- missing = sizeof(*notif_hdr) - size;
- goto exhausted_buffer;
- }
- notif_hdr = itr;
- if (size < notif_hdr->bLength)
- goto exhausted_buffer;
- itr += notif_hdr->bLength;
- size -= notif_hdr->bLength;
- /* Dispatch the notification [don't use itr or size!] */
- switch (notif_hdr->bNotifyType) {
- case HWA_NOTIF_DN: {
- struct hwa_notif_dn *hwa_dn;
- hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
- hdr);
- wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
- hwa_dn->dndata,
- notif_hdr->bLength - sizeof(*hwa_dn));
- break;
- }
- case WA_NOTIF_TRANSFER:
- wa_handle_notif_xfer(wa, notif_hdr);
- break;
- case HWA_NOTIF_BPST_ADJ:
- break; /* no action needed for BPST ADJ. */
- case DWA_NOTIF_RWAKE:
- case DWA_NOTIF_PORTSTATUS:
- /* FIXME: unimplemented WA NOTIFs */
- /* fallthru */
- default:
- dev_err(dev, "HWA: unknown notification 0x%x, "
- "%zu bytes; discarding\n",
- notif_hdr->bNotifyType,
- (size_t)notif_hdr->bLength);
- break;
- }
- }
-out:
- wa_put(wa);
- kfree(nw);
- return;
-
- /* THIS SHOULD NOT HAPPEN
- *
- * Buffer exahusted with partial data remaining; just warn and
- * discard the data, as this should not happen.
- */
-exhausted_buffer:
- dev_warn(dev, "HWA: device sent short notification, "
- "%d bytes missing; discarding %d bytes.\n",
- missing, (int)size);
- goto out;
-}
-
-/*
- * Deliver incoming WA notifications to the wusbwa workqueue
- *
- * @wa: Pointer the Wire Adapter Controller Data Streaming
- * instance (part of an 'struct usb_hcd').
- * @size: Size of the received buffer
- * @returns 0 if ok, < 0 errno code on error.
- *
- * The input buffer is @wa->nep_buffer, with @size bytes
- * (guaranteed to fit in the allocated space,
- * @wa->nep_buffer_size).
- */
-static int wa_nep_queue(struct wahc *wa, size_t size)
-{
- int result = 0;
- struct device *dev = &wa->usb_iface->dev;
- struct wa_notif_work *nw;
-
- /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
- BUG_ON(size > wa->nep_buffer_size);
- if (size == 0)
- goto out;
- if (atomic_read(&wa->notifs_queued) > 200) {
- if (printk_ratelimit())
- dev_err(dev, "Too many notifications queued, "
- "throttling back\n");
- goto out;
- }
- nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
- if (nw == NULL) {
- if (printk_ratelimit())
- dev_err(dev, "No memory to queue notification\n");
- result = -ENOMEM;
- goto out;
- }
- INIT_WORK(&nw->work, wa_notif_dispatch);
- nw->wa = wa_get(wa);
- nw->size = size;
- memcpy(nw->data, wa->nep_buffer, size);
- atomic_inc(&wa->notifs_queued); /* Throttling ctl */
- queue_work(wusbd, &nw->work);
-out:
- /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
- return result;
-}
-
-/*
- * Callback for the notification event endpoint
- *
- * Check's that everything is fine and then passes the data to be
- * queued to the workqueue.
- */
-static void wa_nep_cb(struct urb *urb)
-{
- int result;
- struct wahc *wa = urb->context;
- struct device *dev = &wa->usb_iface->dev;
-
- switch (result = urb->status) {
- case 0:
- result = wa_nep_queue(wa, urb->actual_length);
- if (result < 0)
- dev_err(dev, "NEP: unable to process notification(s): "
- "%d\n", result);
- break;
- case -ECONNRESET: /* Not an error, but a controlled situation; */
- case -ENOENT: /* (we killed the URB)...so, no broadcast */
- case -ESHUTDOWN:
- dev_dbg(dev, "NEP: going down %d\n", urb->status);
- goto out;
- default: /* On general errors, we retry unless it gets ugly */
- if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
- EDC_ERROR_TIMEFRAME)) {
- dev_err(dev, "NEP: URB max acceptable errors "
- "exceeded, resetting device\n");
- wa_reset_all(wa);
- goto out;
- }
- dev_err(dev, "NEP: URB error %d\n", urb->status);
- }
- result = wa_nep_arm(wa, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "NEP: cannot submit URB: %d\n", result);
- wa_reset_all(wa);
- }
-out:
- return;
-}
-
-/*
- * Initialize @wa's notification and event's endpoint stuff
- *
- * This includes the allocating the read buffer, the context ID
- * allocation bitmap, the URB and submitting the URB.
- */
-int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
-{
- int result;
- struct usb_endpoint_descriptor *epd;
- struct usb_device *usb_dev = interface_to_usbdev(iface);
- struct device *dev = &iface->dev;
-
- edc_init(&wa->nep_edc);
- epd = &iface->cur_altsetting->endpoint[0].desc;
- wa->nep_buffer_size = 1024;
- wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
- if (!wa->nep_buffer)
- goto error_nep_buffer;
- wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->nep_urb == NULL)
- goto error_urb_alloc;
- usb_fill_int_urb(wa->nep_urb, usb_dev,
- usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
- wa->nep_buffer, wa->nep_buffer_size,
- wa_nep_cb, wa, epd->bInterval);
- result = wa_nep_arm(wa, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "Cannot submit notification URB: %d\n", result);
- goto error_nep_arm;
- }
- return 0;
-
-error_nep_arm:
- usb_free_urb(wa->nep_urb);
-error_urb_alloc:
- kfree(wa->nep_buffer);
-error_nep_buffer:
- return -ENOMEM;
-}
-
-void wa_nep_destroy(struct wahc *wa)
-{
- wa_nep_disarm(wa);
- usb_free_urb(wa->nep_urb);
- kfree(wa->nep_buffer);
-}
diff --git a/drivers/staging/wusbcore/wa-rpipe.c b/drivers/staging/wusbcore/wa-rpipe.c
deleted file mode 100644
index a5734cbcd5ad..000000000000
--- a/drivers/staging/wusbcore/wa-rpipe.c
+++ /dev/null
@@ -1,539 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * WUSB Wire Adapter
- * rpipe management
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * FIXME: docs
- *
- * RPIPE
- *
- * Targeted at different downstream endpoints
- *
- * Descriptor: use to config the remote pipe.
- *
- * The number of blocks could be dynamic (wBlocks in descriptor is
- * 0)--need to schedule them then.
- *
- * Each bit in wa->rpipe_bm represents if an rpipe is being used or
- * not. Rpipes are represented with a 'struct wa_rpipe' that is
- * attached to the hcpriv member of a 'struct usb_host_endpoint'.
- *
- * When you need to xfer data to an endpoint, you get an rpipe for it
- * with wa_ep_rpipe_get(), which gives you a reference to the rpipe
- * and keeps a single one (the first one) with the endpoint. When you
- * are done transferring, you drop that reference. At the end the
- * rpipe is always allocated and bound to the endpoint. There it might
- * be recycled when not used.
- *
- * Addresses:
- *
- * We use a 1:1 mapping mechanism between port address (0 based
- * index, actually) and the address. The USB stack knows about this.
- *
- * USB Stack port number 4 (1 based)
- * WUSB code port index 3 (0 based)
- * USB Address 5 (2 based -- 0 is for default, 1 for root hub)
- *
- * Now, because we don't use the concept as default address exactly
- * like the (wired) USB code does, we need to kind of skip it. So we
- * never take addresses from the urb->pipe, but from the
- * urb->dev->devnum, to make sure that we always have the right
- * destination address.
- */
-#include <linux/atomic.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "wusbhc.h"
-#include "wa-hc.h"
-
-static int __rpipe_get_descr(struct wahc *wa,
- struct usb_rpipe_descriptor *descr, u16 index)
-{
- ssize_t result;
- struct device *dev = &wa->usb_iface->dev;
-
- /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
- * function because the arguments are different.
- */
- result = usb_control_msg(
- wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
- USB_REQ_GET_DESCRIPTOR,
- USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- USB_CTRL_GET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
- index, (int)result);
- goto error;
- }
- if (result < sizeof(*descr)) {
- dev_err(dev, "rpipe %u: got short descriptor "
- "(%zd vs %zd bytes needed)\n",
- index, result, sizeof(*descr));
- result = -EINVAL;
- goto error;
- }
- result = 0;
-
-error:
- return result;
-}
-
-/*
- *
- * The descriptor is assumed to be properly initialized (ie: you got
- * it through __rpipe_get_descr()).
- */
-static int __rpipe_set_descr(struct wahc *wa,
- struct usb_rpipe_descriptor *descr, u16 index)
-{
- ssize_t result;
- struct device *dev = &wa->usb_iface->dev;
-
- /* we cannot use the usb_get_descriptor() function because the
- * arguments are different.
- */
- result = usb_control_msg(
- wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- USB_REQ_SET_DESCRIPTOR,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- USB_CTRL_SET_TIMEOUT);
- if (result < 0) {
- dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
- index, (int)result);
- goto error;
- }
- if (result < sizeof(*descr)) {
- dev_err(dev, "rpipe %u: sent short descriptor "
- "(%zd vs %zd bytes required)\n",
- index, result, sizeof(*descr));
- result = -EINVAL;
- goto error;
- }
- result = 0;
-
-error:
- return result;
-
-}
-
-static void rpipe_init(struct wa_rpipe *rpipe)
-{
- kref_init(&rpipe->refcnt);
- spin_lock_init(&rpipe->seg_lock);
- INIT_LIST_HEAD(&rpipe->seg_list);
- INIT_LIST_HEAD(&rpipe->list_node);
-}
-
-static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wa->rpipe_lock, flags);
- rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
- if (rpipe_idx < wa->rpipes)
- set_bit(rpipe_idx, wa->rpipe_bm);
- spin_unlock_irqrestore(&wa->rpipe_lock, flags);
-
- return rpipe_idx;
-}
-
-static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wa->rpipe_lock, flags);
- clear_bit(rpipe_idx, wa->rpipe_bm);
- spin_unlock_irqrestore(&wa->rpipe_lock, flags);
-}
-
-void rpipe_destroy(struct kref *_rpipe)
-{
- struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
- u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
-
- if (rpipe->ep)
- rpipe->ep->hcpriv = NULL;
- rpipe_put_idx(rpipe->wa, index);
- wa_put(rpipe->wa);
- kfree(rpipe);
-}
-EXPORT_SYMBOL_GPL(rpipe_destroy);
-
-/*
- * Locate an idle rpipe, create an structure for it and return it
- *
- * @wa is referenced and unlocked
- * @crs enum rpipe_attr, required endpoint characteristics
- *
- * The rpipe can be used only sequentially (not in parallel).
- *
- * The rpipe is moved into the "ready" state.
- */
-static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
- gfp_t gfp)
-{
- int result;
- unsigned rpipe_idx;
- struct wa_rpipe *rpipe;
- struct device *dev = &wa->usb_iface->dev;
-
- rpipe = kzalloc(sizeof(*rpipe), gfp);
- if (rpipe == NULL)
- return -ENOMEM;
- rpipe_init(rpipe);
-
- /* Look for an idle pipe */
- for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
- rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
- if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
- break;
- result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
- if (result < 0)
- dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
- rpipe_idx, result);
- else if ((rpipe->descr.bmCharacteristics & crs) != 0)
- goto found;
- rpipe_put_idx(wa, rpipe_idx);
- }
- *prpipe = NULL;
- kfree(rpipe);
- return -ENXIO;
-
-found:
- set_bit(rpipe_idx, wa->rpipe_bm);
- rpipe->wa = wa_get(wa);
- *prpipe = rpipe;
- return 0;
-}
-
-static int __rpipe_reset(struct wahc *wa, unsigned index)
-{
- int result;
- struct device *dev = &wa->usb_iface->dev;
-
- result = usb_control_msg(
- wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- USB_REQ_RPIPE_RESET,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (result < 0)
- dev_err(dev, "rpipe %u: reset failed: %d\n",
- index, result);
- return result;
-}
-
-/*
- * Fake companion descriptor for ep0
- *
- * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
- */
-static struct usb_wireless_ep_comp_descriptor epc0 = {
- .bLength = sizeof(epc0),
- .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
- .bMaxBurst = 1,
- .bMaxSequence = 2,
-};
-
-/*
- * Look for EP companion descriptor
- *
- * Get there, look for Inara in the endpoint's extra descriptors
- */
-static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
- struct device *dev, struct usb_host_endpoint *ep)
-{
- void *itr;
- size_t itr_size;
- struct usb_descriptor_header *hdr;
- struct usb_wireless_ep_comp_descriptor *epcd;
-
- if (ep->desc.bEndpointAddress == 0) {
- epcd = &epc0;
- goto out;
- }
- itr = ep->extra;
- itr_size = ep->extralen;
- epcd = NULL;
- while (itr_size > 0) {
- if (itr_size < sizeof(*hdr)) {
- dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
- "at offset %zu: only %zu bytes left\n",
- ep->desc.bEndpointAddress,
- itr - (void *) ep->extra, itr_size);
- break;
- }
- hdr = itr;
- if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
- epcd = itr;
- break;
- }
- if (hdr->bLength > itr_size) {
- dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
- "at offset %zu (type 0x%02x) "
- "length %d but only %zu bytes left\n",
- ep->desc.bEndpointAddress,
- itr - (void *) ep->extra, hdr->bDescriptorType,
- hdr->bLength, itr_size);
- break;
- }
- itr += hdr->bLength;
- itr_size -= hdr->bLength;
- }
-out:
- return epcd;
-}
-
-/*
- * Aim an rpipe to its device & endpoint destination
- *
- * Make sure we change the address to unauthenticated if the device
- * is WUSB and it is not authenticated.
- */
-static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
- struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
-{
- int result = -ENOMSG; /* better code for lack of companion? */
- struct device *dev = &wa->usb_iface->dev;
- struct usb_device *usb_dev = urb->dev;
- struct usb_wireless_ep_comp_descriptor *epcd;
- u32 ack_window, epcd_max_sequence;
- u8 unauth;
-
- epcd = rpipe_epc_find(dev, ep);
- if (epcd == NULL) {
- dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
- ep->desc.bEndpointAddress);
- goto error;
- }
- unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
- __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
- atomic_set(&rpipe->segs_available,
- le16_to_cpu(rpipe->descr.wRequests));
- /* FIXME: block allocation system; request with queuing and timeout */
- /* FIXME: compute so seg_size > ep->maxpktsize */
- rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
- /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
- if (usb_endpoint_xfer_isoc(&ep->desc))
- rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
- else
- rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
-
- rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,
- epcd->bMaxBurst, 16U), 1U);
- rpipe->descr.hwa_bDeviceInfoIndex =
- wusb_port_no_to_idx(urb->dev->portnum);
- /* FIXME: use maximum speed as supported or recommended by device */
- rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
- UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
-
- dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
- urb->dev->devnum, urb->dev->devnum | unauth,
- le16_to_cpu(rpipe->descr.wRPipeIndex),
- usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
-
- rpipe->descr.hwa_reserved = 0;
-
- rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
- /* FIXME: bDataSequence */
- rpipe->descr.bDataSequence = 0;
-
- /* start with base window of hwa_bMaxBurst bits starting at 0. */
- ack_window = 0xFFFFFFFF >> (32 - rpipe->descr.hwa_bMaxBurst);
- rpipe->descr.dwCurrentWindow = cpu_to_le32(ack_window);
- epcd_max_sequence = max(min_t(unsigned int,
- epcd->bMaxSequence, 32U), 2U);
- rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1;
- rpipe->descr.bInterval = ep->desc.bInterval;
- if (usb_endpoint_xfer_isoc(&ep->desc))
- rpipe->descr.bOverTheAirInterval = epcd->bOverTheAirInterval;
- else
- rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
- /* FIXME: xmit power & preamble blah blah */
- rpipe->descr.bmAttribute = (ep->desc.bmAttributes &
- USB_ENDPOINT_XFERTYPE_MASK);
- /* rpipe->descr.bmCharacteristics RO */
- rpipe->descr.bmRetryOptions = (wa->wusb->retry_count & 0xF);
- /* FIXME: use for assessing link quality? */
- rpipe->descr.wNumTransactionErrors = 0;
- result = __rpipe_set_descr(wa, &rpipe->descr,
- le16_to_cpu(rpipe->descr.wRPipeIndex));
- if (result < 0) {
- dev_err(dev, "Cannot aim rpipe: %d\n", result);
- goto error;
- }
- result = 0;
-error:
- return result;
-}
-
-/*
- * Check an aimed rpipe to make sure it points to where we want
- *
- * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
- * space; when it is like that, we or 0x80 to make an unauth address.
- */
-static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
- const struct usb_host_endpoint *ep,
- const struct urb *urb, gfp_t gfp)
-{
- int result = 0;
- struct device *dev = &wa->usb_iface->dev;
- u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
-
-#define AIM_CHECK(rdf, val, text) \
- do { \
- if (rpipe->descr.rdf != (val)) { \
- dev_err(dev, \
- "rpipe aim discrepancy: " #rdf " " text "\n", \
- rpipe->descr.rdf, (val)); \
- result = -EINVAL; \
- WARN_ON(1); \
- } \
- } while (0)
- AIM_CHECK(hwa_bDeviceInfoIndex, portnum, "(%u vs %u)");
- AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
- UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
- "(%u vs %u)");
- AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
- AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
- AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
-#undef AIM_CHECK
- return result;
-}
-
-#ifndef CONFIG_BUG
-#define CONFIG_BUG 0
-#endif
-
-/*
- * Make sure there is an rpipe allocated for an endpoint
- *
- * If already allocated, we just refcount it; if not, we get an
- * idle one, aim it to the right location and take it.
- *
- * Attaches to ep->hcpriv and rpipe->ep to ep.
- */
-int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
- struct urb *urb, gfp_t gfp)
-{
- int result = 0;
- struct device *dev = &wa->usb_iface->dev;
- struct wa_rpipe *rpipe;
- u8 eptype;
-
- mutex_lock(&wa->rpipe_mutex);
- rpipe = ep->hcpriv;
- if (rpipe != NULL) {
- if (CONFIG_BUG == 1) {
- result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
- if (result < 0)
- goto error;
- }
- __rpipe_get(rpipe);
- dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n",
- ep->desc.bEndpointAddress,
- le16_to_cpu(rpipe->descr.wRPipeIndex));
- } else {
- /* hmm, assign idle rpipe, aim it */
- result = -ENOBUFS;
- eptype = ep->desc.bmAttributes & 0x03;
- result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
- if (result < 0)
- goto error;
- result = rpipe_aim(rpipe, wa, ep, urb, gfp);
- if (result < 0) {
- rpipe_put(rpipe);
- goto error;
- }
- ep->hcpriv = rpipe;
- rpipe->ep = ep;
- __rpipe_get(rpipe); /* for caching into ep->hcpriv */
- dev_dbg(dev, "ep 0x%02x: using rpipe %u\n",
- ep->desc.bEndpointAddress,
- le16_to_cpu(rpipe->descr.wRPipeIndex));
- }
-error:
- mutex_unlock(&wa->rpipe_mutex);
- return result;
-}
-
-/*
- * Allocate the bitmap for each rpipe.
- */
-int wa_rpipes_create(struct wahc *wa)
-{
- wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
- wa->rpipe_bm = bitmap_zalloc(wa->rpipes, GFP_KERNEL);
- if (wa->rpipe_bm == NULL)
- return -ENOMEM;
- return 0;
-}
-
-void wa_rpipes_destroy(struct wahc *wa)
-{
- struct device *dev = &wa->usb_iface->dev;
-
- if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
- WARN_ON(1);
- dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
- wa->rpipes, wa->rpipe_bm);
- }
- bitmap_free(wa->rpipe_bm);
-}
-
-/*
- * Release resources allocated for an endpoint
- *
- * If there is an associated rpipe to this endpoint, Abort any pending
- * transfers and put it. If the rpipe ends up being destroyed,
- * __rpipe_destroy() will cleanup ep->hcpriv.
- *
- * This is called before calling hcd->stop(), so you don't need to do
- * anything else in there.
- */
-void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
-{
- struct wa_rpipe *rpipe;
-
- mutex_lock(&wa->rpipe_mutex);
- rpipe = ep->hcpriv;
- if (rpipe != NULL) {
- u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
-
- usb_control_msg(
- wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- USB_REQ_RPIPE_ABORT,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
- rpipe_put(rpipe);
- }
- mutex_unlock(&wa->rpipe_mutex);
-}
-EXPORT_SYMBOL_GPL(rpipe_ep_disable);
-
-/* Clear the stalled status of an RPIPE. */
-void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
-{
- struct wa_rpipe *rpipe;
-
- mutex_lock(&wa->rpipe_mutex);
- rpipe = ep->hcpriv;
- if (rpipe != NULL) {
- u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
-
- usb_control_msg(
- wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
- }
- mutex_unlock(&wa->rpipe_mutex);
-}
-EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
diff --git a/drivers/staging/wusbcore/wa-xfer.c b/drivers/staging/wusbcore/wa-xfer.c
deleted file mode 100644
index abf88cea37bb..000000000000
--- a/drivers/staging/wusbcore/wa-xfer.c
+++ /dev/null
@@ -1,2927 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * WUSB Wire Adapter
- * Data transfer and URB enqueing
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * How transfers work: get a buffer, break it up in segments (segment
- * size is a multiple of the maxpacket size). For each segment issue a
- * segment request (struct wa_xfer_*), then send the data buffer if
- * out or nothing if in (all over the DTO endpoint).
- *
- * For each submitted segment request, a notification will come over
- * the NEP endpoint and a transfer result (struct xfer_result) will
- * arrive in the DTI URB. Read it, get the xfer ID, see if there is
- * data coming (inbound transfer), schedule a read and handle it.
- *
- * Sounds simple, it is a pain to implement.
- *
- *
- * ENTRY POINTS
- *
- * FIXME
- *
- * LIFE CYCLE / STATE DIAGRAM
- *
- * FIXME
- *
- * THIS CODE IS DISGUSTING
- *
- * Warned you are; it's my second try and still not happy with it.
- *
- * NOTES:
- *
- * - No iso
- *
- * - Supports DMA xfers, control, bulk and maybe interrupt
- *
- * - Does not recycle unused rpipes
- *
- * An rpipe is assigned to an endpoint the first time it is used,
- * and then it's there, assigned, until the endpoint is disabled
- * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
- * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
- * (should be a mutex).
- *
- * Two methods it could be done:
- *
- * (a) set up a timer every time an rpipe's use count drops to 1
- * (which means unused) or when a transfer ends. Reset the
- * timer when a xfer is queued. If the timer expires, release
- * the rpipe [see rpipe_ep_disable()].
- *
- * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
- * when none are found go over the list, check their endpoint
- * and their activity record (if no last-xfer-done-ts in the
- * last x seconds) take it
- *
- * However, due to the fact that we have a set of limited
- * resources (max-segments-at-the-same-time per xfer,
- * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
- * we are going to have to rebuild all this based on an scheduler,
- * to where we have a list of transactions to do and based on the
- * availability of the different required components (blocks,
- * rpipes, segment slots, etc), we go scheduling them. Painful.
- */
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/hash.h>
-#include <linux/ratelimit.h>
-#include <linux/export.h>
-#include <linux/scatterlist.h>
-
-#include "wa-hc.h"
-#include "wusbhc.h"
-
-enum {
- /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
- WA_SEGS_MAX = 128,
-};
-
-enum wa_seg_status {
- WA_SEG_NOTREADY,
- WA_SEG_READY,
- WA_SEG_DELAYED,
- WA_SEG_SUBMITTED,
- WA_SEG_PENDING,
- WA_SEG_DTI_PENDING,
- WA_SEG_DONE,
- WA_SEG_ERROR,
- WA_SEG_ABORTED,
-};
-
-static void wa_xfer_delayed_run(struct wa_rpipe *);
-static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
-
-/*
- * Life cycle governed by 'struct urb' (the refcount of the struct is
- * that of the 'struct urb' and usb_free_urb() would free the whole
- * struct).
- */
-struct wa_seg {
- struct urb tr_urb; /* transfer request urb. */
- struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
- struct urb *dto_urb; /* for data output. */
- struct list_head list_node; /* for rpipe->req_list */
- struct wa_xfer *xfer; /* out xfer */
- u8 index; /* which segment we are */
- int isoc_frame_count; /* number of isoc frames in this segment. */
- int isoc_frame_offset; /* starting frame offset in the xfer URB. */
- /* Isoc frame that the current transfer buffer corresponds to. */
- int isoc_frame_index;
- int isoc_size; /* size of all isoc frames sent by this seg. */
- enum wa_seg_status status;
- ssize_t result; /* bytes xfered or error */
- struct wa_xfer_hdr xfer_hdr;
-};
-
-static inline void wa_seg_init(struct wa_seg *seg)
-{
- usb_init_urb(&seg->tr_urb);
-
- /* set the remaining memory to 0. */
- memset(((void *)seg) + sizeof(seg->tr_urb), 0,
- sizeof(*seg) - sizeof(seg->tr_urb));
-}
-
-/*
- * Protected by xfer->lock
- *
- */
-struct wa_xfer {
- struct kref refcnt;
- struct list_head list_node;
- spinlock_t lock;
- u32 id;
-
- struct wahc *wa; /* Wire adapter we are plugged to */
- struct usb_host_endpoint *ep;
- struct urb *urb; /* URB we are transferring for */
- struct wa_seg **seg; /* transfer segments */
- u8 segs, segs_submitted, segs_done;
- unsigned is_inbound:1;
- unsigned is_dma:1;
- size_t seg_size;
- int result;
-
- gfp_t gfp; /* allocation mask */
-
- struct wusb_dev *wusb_dev; /* for activity timestamps */
-};
-
-static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
- struct wa_seg *seg, int curr_iso_frame);
-static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
- int starting_index, enum wa_seg_status status);
-
-static inline void wa_xfer_init(struct wa_xfer *xfer)
-{
- kref_init(&xfer->refcnt);
- INIT_LIST_HEAD(&xfer->list_node);
- spin_lock_init(&xfer->lock);
-}
-
-/*
- * Destroy a transfer structure
- *
- * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
- * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
- */
-static void wa_xfer_destroy(struct kref *_xfer)
-{
- struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
- if (xfer->seg) {
- unsigned cnt;
- for (cnt = 0; cnt < xfer->segs; cnt++) {
- struct wa_seg *seg = xfer->seg[cnt];
- if (seg) {
- usb_free_urb(seg->isoc_pack_desc_urb);
- if (seg->dto_urb) {
- kfree(seg->dto_urb->sg);
- usb_free_urb(seg->dto_urb);
- }
- usb_free_urb(&seg->tr_urb);
- }
- }
- kfree(xfer->seg);
- }
- kfree(xfer);
-}
-
-static void wa_xfer_get(struct wa_xfer *xfer)
-{
- kref_get(&xfer->refcnt);
-}
-
-static void wa_xfer_put(struct wa_xfer *xfer)
-{
- kref_put(&xfer->refcnt, wa_xfer_destroy);
-}
-
-/*
- * Try to get exclusive access to the DTO endpoint resource. Return true
- * if successful.
- */
-static inline int __wa_dto_try_get(struct wahc *wa)
-{
- return (test_and_set_bit(0, &wa->dto_in_use) == 0);
-}
-
-/* Release the DTO endpoint resource. */
-static inline void __wa_dto_put(struct wahc *wa)
-{
- clear_bit_unlock(0, &wa->dto_in_use);
-}
-
-/* Service RPIPEs that are waiting on the DTO resource. */
-static void wa_check_for_delayed_rpipes(struct wahc *wa)
-{
- unsigned long flags;
- int dto_waiting = 0;
- struct wa_rpipe *rpipe;
-
- spin_lock_irqsave(&wa->rpipe_lock, flags);
- while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
- rpipe = list_first_entry(&wa->rpipe_delayed_list,
- struct wa_rpipe, list_node);
- __wa_xfer_delayed_run(rpipe, &dto_waiting);
- /* remove this RPIPE from the list if it is not waiting. */
- if (!dto_waiting) {
- pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
- __func__,
- le16_to_cpu(rpipe->descr.wRPipeIndex));
- list_del_init(&rpipe->list_node);
- }
- }
- spin_unlock_irqrestore(&wa->rpipe_lock, flags);
-}
-
-/* add this RPIPE to the end of the delayed RPIPE list. */
-static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wa->rpipe_lock, flags);
- /* add rpipe to the list if it is not already on it. */
- if (list_empty(&rpipe->list_node)) {
- pr_debug("%s: adding RPIPE %d to the delayed list.\n",
- __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
- list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
- }
- spin_unlock_irqrestore(&wa->rpipe_lock, flags);
-}
-
-/*
- * xfer is referenced
- *
- * xfer->lock has to be unlocked
- *
- * We take xfer->lock for setting the result; this is a barrier
- * against drivers/usb/core/hcd.c:unlink1() being called after we call
- * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
- * reference to the transfer.
- */
-static void wa_xfer_giveback(struct wa_xfer *xfer)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
- list_del_init(&xfer->list_node);
- usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
- spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
- /* FIXME: segmentation broken -- kills DWA */
- wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
- wa_put(xfer->wa);
- wa_xfer_put(xfer);
-}
-
-/*
- * xfer is referenced
- *
- * xfer->lock has to be unlocked
- */
-static void wa_xfer_completion(struct wa_xfer *xfer)
-{
- if (xfer->wusb_dev)
- wusb_dev_put(xfer->wusb_dev);
- rpipe_put(xfer->ep->hcpriv);
- wa_xfer_giveback(xfer);
-}
-
-/*
- * Initialize a transfer's ID
- *
- * We need to use a sequential number; if we use the pointer or the
- * hash of the pointer, it can repeat over sequential transfers and
- * then it will confuse the HWA....wonder why in hell they put a 32
- * bit handle in there then.
- */
-static void wa_xfer_id_init(struct wa_xfer *xfer)
-{
- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
-}
-
-/* Return the xfer's ID. */
-static inline u32 wa_xfer_id(struct wa_xfer *xfer)
-{
- return xfer->id;
-}
-
-/* Return the xfer's ID in transport format (little endian). */
-static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
-{
- return cpu_to_le32(xfer->id);
-}
-
-/*
- * If transfer is done, wrap it up and return true
- *
- * xfer->lock has to be locked
- */
-static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
-{
- struct device *dev = &xfer->wa->usb_iface->dev;
- unsigned result, cnt;
- struct wa_seg *seg;
- struct urb *urb = xfer->urb;
- unsigned found_short = 0;
-
- result = xfer->segs_done == xfer->segs_submitted;
- if (result == 0)
- goto out;
- urb->actual_length = 0;
- for (cnt = 0; cnt < xfer->segs; cnt++) {
- seg = xfer->seg[cnt];
- switch (seg->status) {
- case WA_SEG_DONE:
- if (found_short && seg->result > 0) {
- dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
- xfer, wa_xfer_id(xfer), cnt,
- seg->result);
- urb->status = -EINVAL;
- goto out;
- }
- urb->actual_length += seg->result;
- if (!(usb_pipeisoc(xfer->urb->pipe))
- && seg->result < xfer->seg_size
- && cnt != xfer->segs-1)
- found_short = 1;
- dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
- "result %zu urb->actual_length %d\n",
- xfer, wa_xfer_id(xfer), seg->index, found_short,
- seg->result, urb->actual_length);
- break;
- case WA_SEG_ERROR:
- xfer->result = seg->result;
- dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
- xfer, wa_xfer_id(xfer), seg->index, seg->result,
- seg->result);
- goto out;
- case WA_SEG_ABORTED:
- xfer->result = seg->result;
- dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
- xfer, wa_xfer_id(xfer), seg->index, seg->result,
- seg->result);
- goto out;
- default:
- dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
- xfer, wa_xfer_id(xfer), cnt, seg->status);
- xfer->result = -EINVAL;
- goto out;
- }
- }
- xfer->result = 0;
-out:
- return result;
-}
-
-/*
- * Mark the given segment as done. Return true if this completes the xfer.
- * This should only be called for segs that have been submitted to an RPIPE.
- * Delayed segs are not marked as submitted so they do not need to be marked
- * as done when cleaning up.
- *
- * xfer->lock has to be locked
- */
-static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
- struct wa_seg *seg, enum wa_seg_status status)
-{
- seg->status = status;
- xfer->segs_done++;
-
- /* check for done. */
- return __wa_xfer_is_done(xfer);
-}
-
-/*
- * Search for a transfer list ID on the HCD's URB list
- *
- * For 32 bit architectures, we use the pointer itself; for 64 bits, a
- * 32-bit hash of the pointer.
- *
- * @returns NULL if not found.
- */
-static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
-{
- unsigned long flags;
- struct wa_xfer *xfer_itr;
- spin_lock_irqsave(&wa->xfer_list_lock, flags);
- list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
- if (id == xfer_itr->id) {
- wa_xfer_get(xfer_itr);
- goto out;
- }
- }
- xfer_itr = NULL;
-out:
- spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
- return xfer_itr;
-}
-
-struct wa_xfer_abort_buffer {
- struct urb urb;
- struct wahc *wa;
- struct wa_xfer_abort cmd;
-};
-
-static void __wa_xfer_abort_cb(struct urb *urb)
-{
- struct wa_xfer_abort_buffer *b = urb->context;
- struct wahc *wa = b->wa;
-
- /*
- * If the abort request URB failed, then the HWA did not get the abort
- * command. Forcibly clean up the xfer without waiting for a Transfer
- * Result from the HWA.
- */
- if (urb->status < 0) {
- struct wa_xfer *xfer;
- struct device *dev = &wa->usb_iface->dev;
-
- xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
- dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
- __func__, urb->status);
- if (xfer) {
- unsigned long flags;
- int done, seg_index = 0;
- struct wa_rpipe *rpipe = xfer->ep->hcpriv;
-
- dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
- __func__, xfer, wa_xfer_id(xfer));
- spin_lock_irqsave(&xfer->lock, flags);
- /* skip done segs. */
- while (seg_index < xfer->segs) {
- struct wa_seg *seg = xfer->seg[seg_index];
-
- if ((seg->status == WA_SEG_DONE) ||
- (seg->status == WA_SEG_ERROR)) {
- ++seg_index;
- } else {
- break;
- }
- }
- /* mark remaining segs as aborted. */
- wa_complete_remaining_xfer_segs(xfer, seg_index,
- WA_SEG_ABORTED);
- done = __wa_xfer_is_done(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- wa_xfer_delayed_run(rpipe);
- wa_xfer_put(xfer);
- } else {
- dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
- __func__, le32_to_cpu(b->cmd.dwTransferID));
- }
- }
-
- wa_put(wa); /* taken in __wa_xfer_abort */
- usb_put_urb(&b->urb);
-}
-
-/*
- * Aborts an ongoing transaction
- *
- * Assumes the transfer is referenced and locked and in a submitted
- * state (mainly that there is an endpoint/rpipe assigned).
- *
- * The callback (see above) does nothing but freeing up the data by
- * putting the URB. Because the URB is allocated at the head of the
- * struct, the whole space we allocated is kfreed. *
- */
-static int __wa_xfer_abort(struct wa_xfer *xfer)
-{
- int result = -ENOMEM;
- struct device *dev = &xfer->wa->usb_iface->dev;
- struct wa_xfer_abort_buffer *b;
- struct wa_rpipe *rpipe = xfer->ep->hcpriv;
-
- b = kmalloc(sizeof(*b), GFP_ATOMIC);
- if (b == NULL)
- goto error_kmalloc;
- b->cmd.bLength = sizeof(b->cmd);
- b->cmd.bRequestType = WA_XFER_ABORT;
- b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
- b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
- b->wa = wa_get(xfer->wa);
-
- usb_init_urb(&b->urb);
- usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
- usb_sndbulkpipe(xfer->wa->usb_dev,
- xfer->wa->dto_epd->bEndpointAddress),
- &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
- result = usb_submit_urb(&b->urb, GFP_ATOMIC);
- if (result < 0)
- goto error_submit;
- return result; /* callback frees! */
-
-
-error_submit:
- wa_put(xfer->wa);
- if (printk_ratelimit())
- dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
- xfer, result);
- kfree(b);
-error_kmalloc:
- return result;
-
-}
-
-/*
- * Calculate the number of isoc frames starting from isoc_frame_offset
- * that will fit a in transfer segment.
- */
-static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
- int isoc_frame_offset, int *total_size)
-{
- int segment_size = 0, frame_count = 0;
- int index = isoc_frame_offset;
- struct usb_iso_packet_descriptor *iso_frame_desc =
- xfer->urb->iso_frame_desc;
-
- while ((index < xfer->urb->number_of_packets)
- && ((segment_size + iso_frame_desc[index].length)
- <= xfer->seg_size)) {
- /*
- * For Alereon HWA devices, only include an isoc frame in an
- * out segment if it is physically contiguous with the previous
- * frame. This is required because those devices expect
- * the isoc frames to be sent as a single USB transaction as
- * opposed to one transaction per frame with standard HWA.
- */
- if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
- && (xfer->is_inbound == 0)
- && (index > isoc_frame_offset)
- && ((iso_frame_desc[index - 1].offset +
- iso_frame_desc[index - 1].length) !=
- iso_frame_desc[index].offset))
- break;
-
- /* this frame fits. count it. */
- ++frame_count;
- segment_size += iso_frame_desc[index].length;
-
- /* move to the next isoc frame. */
- ++index;
- }
-
- *total_size = segment_size;
- return frame_count;
-}
-
-/*
- *
- * @returns < 0 on error, transfer segment request size if ok
- */
-static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
- enum wa_xfer_type *pxfer_type)
-{
- ssize_t result;
- struct device *dev = &xfer->wa->usb_iface->dev;
- size_t maxpktsize;
- struct urb *urb = xfer->urb;
- struct wa_rpipe *rpipe = xfer->ep->hcpriv;
-
- switch (rpipe->descr.bmAttribute & 0x3) {
- case USB_ENDPOINT_XFER_CONTROL:
- *pxfer_type = WA_XFER_TYPE_CTL;
- result = sizeof(struct wa_xfer_ctl);
- break;
- case USB_ENDPOINT_XFER_INT:
- case USB_ENDPOINT_XFER_BULK:
- *pxfer_type = WA_XFER_TYPE_BI;
- result = sizeof(struct wa_xfer_bi);
- break;
- case USB_ENDPOINT_XFER_ISOC:
- *pxfer_type = WA_XFER_TYPE_ISO;
- result = sizeof(struct wa_xfer_hwaiso);
- break;
- default:
- /* never happens */
- BUG();
- result = -EINVAL; /* shut gcc up */
- }
- xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
- xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
-
- maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
- xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
- * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
- /* Compute the segment size and make sure it is a multiple of
- * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
- * a check (FIXME) */
- if (xfer->seg_size < maxpktsize) {
- dev_err(dev,
- "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
- xfer->seg_size, maxpktsize);
- result = -EINVAL;
- goto error;
- }
- xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
- if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
- int index = 0;
-
- xfer->segs = 0;
- /*
- * loop over urb->number_of_packets to determine how many
- * xfer segments will be needed to send the isoc frames.
- */
- while (index < urb->number_of_packets) {
- int seg_size; /* don't care. */
- index += __wa_seg_calculate_isoc_frame_count(xfer,
- index, &seg_size);
- ++xfer->segs;
- }
- } else {
- xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
- xfer->seg_size);
- if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
- xfer->segs = 1;
- }
-
- if (xfer->segs > WA_SEGS_MAX) {
- dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
- (urb->transfer_buffer_length/xfer->seg_size),
- WA_SEGS_MAX);
- result = -EINVAL;
- goto error;
- }
-error:
- return result;
-}
-
-static void __wa_setup_isoc_packet_descr(
- struct wa_xfer_packet_info_hwaiso *packet_desc,
- struct wa_xfer *xfer,
- struct wa_seg *seg) {
- struct usb_iso_packet_descriptor *iso_frame_desc =
- xfer->urb->iso_frame_desc;
- int frame_index;
-
- /* populate isoc packet descriptor. */
- packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
- packet_desc->wLength = cpu_to_le16(struct_size(packet_desc,
- PacketLength,
- seg->isoc_frame_count));
- for (frame_index = 0; frame_index < seg->isoc_frame_count;
- ++frame_index) {
- int offset_index = frame_index + seg->isoc_frame_offset;
- packet_desc->PacketLength[frame_index] =
- cpu_to_le16(iso_frame_desc[offset_index].length);
- }
-}
-
-
-/* Fill in the common request header and xfer-type specific data. */
-static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
- struct wa_xfer_hdr *xfer_hdr0,
- enum wa_xfer_type xfer_type,
- size_t xfer_hdr_size)
-{
- struct wa_rpipe *rpipe = xfer->ep->hcpriv;
- struct wa_seg *seg = xfer->seg[0];
-
- xfer_hdr0 = &seg->xfer_hdr;
- xfer_hdr0->bLength = xfer_hdr_size;
- xfer_hdr0->bRequestType = xfer_type;
- xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
- xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
- xfer_hdr0->bTransferSegment = 0;
- switch (xfer_type) {
- case WA_XFER_TYPE_CTL: {
- struct wa_xfer_ctl *xfer_ctl =
- container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
- xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
- memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
- sizeof(xfer_ctl->baSetupData));
- break;
- }
- case WA_XFER_TYPE_BI:
- break;
- case WA_XFER_TYPE_ISO: {
- struct wa_xfer_hwaiso *xfer_iso =
- container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
- struct wa_xfer_packet_info_hwaiso *packet_desc =
- ((void *)xfer_iso) + xfer_hdr_size;
-
- /* populate the isoc section of the transfer request. */
- xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
- /* populate isoc packet descriptor. */
- __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
- break;
- }
- default:
- BUG();
- };
-}
-
-/*
- * Callback for the OUT data phase of the segment request
- *
- * Check wa_seg_tr_cb(); most comments also apply here because this
- * function does almost the same thing and they work closely
- * together.
- *
- * If the seg request has failed but this DTO phase has succeeded,
- * wa_seg_tr_cb() has already failed the segment and moved the
- * status to WA_SEG_ERROR, so this will go through 'case 0' and
- * effectively do nothing.
- */
-static void wa_seg_dto_cb(struct urb *urb)
-{
- struct wa_seg *seg = urb->context;
- struct wa_xfer *xfer = seg->xfer;
- struct wahc *wa;
- struct device *dev;
- struct wa_rpipe *rpipe;
- unsigned long flags;
- unsigned rpipe_ready = 0;
- int data_send_done = 1, release_dto = 0, holding_dto = 0;
- u8 done = 0;
- int result;
-
- /* free the sg if it was used. */
- kfree(urb->sg);
- urb->sg = NULL;
-
- spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- if (usb_pipeisoc(xfer->urb->pipe)) {
- /* Alereon HWA sends all isoc frames in a single transfer. */
- if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
- seg->isoc_frame_index += seg->isoc_frame_count;
- else
- seg->isoc_frame_index += 1;
- if (seg->isoc_frame_index < seg->isoc_frame_count) {
- data_send_done = 0;
- holding_dto = 1; /* checked in error cases. */
- /*
- * if this is the last isoc frame of the segment, we
- * can release DTO after sending this frame.
- */
- if ((seg->isoc_frame_index + 1) >=
- seg->isoc_frame_count)
- release_dto = 1;
- }
- dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
- wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
- holding_dto, release_dto);
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
-
- switch (urb->status) {
- case 0:
- spin_lock_irqsave(&xfer->lock, flags);
- seg->result += urb->actual_length;
- if (data_send_done) {
- dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
- wa_xfer_id(xfer), seg->index, seg->result);
- if (seg->status < WA_SEG_PENDING)
- seg->status = WA_SEG_PENDING;
- } else {
- /* should only hit this for isoc xfers. */
- /*
- * Populate the dto URB with the next isoc frame buffer,
- * send the URB and release DTO if we no longer need it.
- */
- __wa_populate_dto_urb_isoc(xfer, seg,
- seg->isoc_frame_offset + seg->isoc_frame_index);
-
- /* resubmit the URB with the next isoc frame. */
- /* take a ref on resubmit. */
- wa_xfer_get(xfer);
- result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
- wa_xfer_id(xfer), seg->index, result);
- spin_unlock_irqrestore(&xfer->lock, flags);
- goto error_dto_submit;
- }
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (release_dto) {
- __wa_dto_put(wa);
- wa_check_for_delayed_rpipes(wa);
- }
- break;
- case -ECONNRESET: /* URB unlinked; no need to do anything */
- case -ENOENT: /* as it was done by the who unlinked us */
- if (holding_dto) {
- __wa_dto_put(wa);
- wa_check_for_delayed_rpipes(wa);
- }
- break;
- default: /* Other errors ... */
- dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
- wa_xfer_id(xfer), seg->index, urb->status);
- goto error_default;
- }
-
- /* taken when this URB was submitted. */
- wa_xfer_put(xfer);
- return;
-
-error_dto_submit:
- /* taken on resubmit attempt. */
- wa_xfer_put(xfer);
-error_default:
- spin_lock_irqsave(&xfer->lock, flags);
- rpipe = xfer->ep->hcpriv;
- if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
- EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
- wa_reset_all(wa);
- }
- if (seg->status != WA_SEG_ERROR) {
- seg->result = urb->status;
- __wa_xfer_abort(xfer);
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (holding_dto) {
- __wa_dto_put(wa);
- wa_check_for_delayed_rpipes(wa);
- }
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- /* taken when this URB was submitted. */
- wa_xfer_put(xfer);
-}
-
-/*
- * Callback for the isoc packet descriptor phase of the segment request
- *
- * Check wa_seg_tr_cb(); most comments also apply here because this
- * function does almost the same thing and they work closely
- * together.
- *
- * If the seg request has failed but this phase has succeeded,
- * wa_seg_tr_cb() has already failed the segment and moved the
- * status to WA_SEG_ERROR, so this will go through 'case 0' and
- * effectively do nothing.
- */
-static void wa_seg_iso_pack_desc_cb(struct urb *urb)
-{
- struct wa_seg *seg = urb->context;
- struct wa_xfer *xfer = seg->xfer;
- struct wahc *wa;
- struct device *dev;
- struct wa_rpipe *rpipe;
- unsigned long flags;
- unsigned rpipe_ready = 0;
- u8 done = 0;
-
- switch (urb->status) {
- case 0:
- spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
- wa_xfer_id(xfer), seg->index);
- if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
- seg->status = WA_SEG_PENDING;
- spin_unlock_irqrestore(&xfer->lock, flags);
- break;
- case -ECONNRESET: /* URB unlinked; no need to do anything */
- case -ENOENT: /* as it was done by the who unlinked us */
- break;
- default: /* Other errors ... */
- spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- rpipe = xfer->ep->hcpriv;
- pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
- wa_xfer_id(xfer), seg->index, urb->status);
- if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
- EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
- wa_reset_all(wa);
- }
- if (seg->status != WA_SEG_ERROR) {
- usb_unlink_urb(seg->dto_urb);
- seg->result = urb->status;
- __wa_xfer_abort(xfer);
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_mark_seg_as_done(xfer, seg,
- WA_SEG_ERROR);
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- }
- /* taken when this URB was submitted. */
- wa_xfer_put(xfer);
-}
-
-/*
- * Callback for the segment request
- *
- * If successful transition state (unless already transitioned or
- * outbound transfer); otherwise, take a note of the error, mark this
- * segment done and try completion.
- *
- * Note we don't access until we are sure that the transfer hasn't
- * been cancelled (ECONNRESET, ENOENT), which could mean that
- * seg->xfer could be already gone.
- *
- * We have to check before setting the status to WA_SEG_PENDING
- * because sometimes the xfer result callback arrives before this
- * callback (geeeeeeze), so it might happen that we are already in
- * another state. As well, we don't set it if the transfer is not inbound,
- * as in that case, wa_seg_dto_cb will do it when the OUT data phase
- * finishes.
- */
-static void wa_seg_tr_cb(struct urb *urb)
-{
- struct wa_seg *seg = urb->context;
- struct wa_xfer *xfer = seg->xfer;
- struct wahc *wa;
- struct device *dev;
- struct wa_rpipe *rpipe;
- unsigned long flags;
- unsigned rpipe_ready;
- u8 done = 0;
-
- switch (urb->status) {
- case 0:
- spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
- xfer, wa_xfer_id(xfer), seg->index);
- if (xfer->is_inbound &&
- seg->status < WA_SEG_PENDING &&
- !(usb_pipeisoc(xfer->urb->pipe)))
- seg->status = WA_SEG_PENDING;
- spin_unlock_irqrestore(&xfer->lock, flags);
- break;
- case -ECONNRESET: /* URB unlinked; no need to do anything */
- case -ENOENT: /* as it was done by the who unlinked us */
- break;
- default: /* Other errors ... */
- spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- rpipe = xfer->ep->hcpriv;
- if (printk_ratelimit())
- dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
- xfer, wa_xfer_id(xfer), seg->index,
- urb->status);
- if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
- EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "DTO: URB max acceptable errors "
- "exceeded, resetting device\n");
- wa_reset_all(wa);
- }
- usb_unlink_urb(seg->isoc_pack_desc_urb);
- usb_unlink_urb(seg->dto_urb);
- seg->result = urb->status;
- __wa_xfer_abort(xfer);
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- }
- /* taken when this URB was submitted. */
- wa_xfer_put(xfer);
-}
-
-/*
- * Allocate an SG list to store bytes_to_transfer bytes and copy the
- * subset of the in_sg that matches the buffer subset
- * we are about to transfer.
- */
-static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
- const unsigned int bytes_transferred,
- const unsigned int bytes_to_transfer, int *out_num_sgs)
-{
- struct scatterlist *out_sg;
- unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
- nents;
- struct scatterlist *current_xfer_sg = in_sg;
- struct scatterlist *current_seg_sg, *last_seg_sg;
-
- /* skip previously transferred pages. */
- while ((current_xfer_sg) &&
- (bytes_processed < bytes_transferred)) {
- bytes_processed += current_xfer_sg->length;
-
- /* advance the sg if current segment starts on or past the
- next page. */
- if (bytes_processed <= bytes_transferred)
- current_xfer_sg = sg_next(current_xfer_sg);
- }
-
- /* the data for the current segment starts in current_xfer_sg.
- calculate the offset. */
- if (bytes_processed > bytes_transferred) {
- offset_into_current_page_data = current_xfer_sg->length -
- (bytes_processed - bytes_transferred);
- }
-
- /* calculate the number of pages needed by this segment. */
- nents = DIV_ROUND_UP((bytes_to_transfer +
- offset_into_current_page_data +
- current_xfer_sg->offset),
- PAGE_SIZE);
-
- out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
- if (out_sg) {
- sg_init_table(out_sg, nents);
-
- /* copy the portion of the incoming SG that correlates to the
- * data to be transferred by this segment to the segment SG. */
- last_seg_sg = current_seg_sg = out_sg;
- bytes_processed = 0;
-
- /* reset nents and calculate the actual number of sg entries
- needed. */
- nents = 0;
- while ((bytes_processed < bytes_to_transfer) &&
- current_seg_sg && current_xfer_sg) {
- unsigned int page_len = min((current_xfer_sg->length -
- offset_into_current_page_data),
- (bytes_to_transfer - bytes_processed));
-
- sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
- page_len,
- current_xfer_sg->offset +
- offset_into_current_page_data);
-
- bytes_processed += page_len;
-
- last_seg_sg = current_seg_sg;
- current_seg_sg = sg_next(current_seg_sg);
- current_xfer_sg = sg_next(current_xfer_sg);
-
- /* only the first page may require additional offset. */
- offset_into_current_page_data = 0;
- nents++;
- }
-
- /* update num_sgs and terminate the list since we may have
- * concatenated pages. */
- sg_mark_end(last_seg_sg);
- *out_num_sgs = nents;
- }
-
- return out_sg;
-}
-
-/*
- * Populate DMA buffer info for the isoc dto urb.
- */
-static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
- struct wa_seg *seg, int curr_iso_frame)
-{
- seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- seg->dto_urb->sg = NULL;
- seg->dto_urb->num_sgs = 0;
- /* dto urb buffer address pulled from iso_frame_desc. */
- seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
- xfer->urb->iso_frame_desc[curr_iso_frame].offset;
- /* The Alereon HWA sends a single URB with all isoc segs. */
- if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
- seg->dto_urb->transfer_buffer_length = seg->isoc_size;
- else
- seg->dto_urb->transfer_buffer_length =
- xfer->urb->iso_frame_desc[curr_iso_frame].length;
-}
-
-/*
- * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
- */
-static int __wa_populate_dto_urb(struct wa_xfer *xfer,
- struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
-{
- int result = 0;
-
- if (xfer->is_dma) {
- seg->dto_urb->transfer_dma =
- xfer->urb->transfer_dma + buf_itr_offset;
- seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- seg->dto_urb->sg = NULL;
- seg->dto_urb->num_sgs = 0;
- } else {
- /* do buffer or SG processing. */
- seg->dto_urb->transfer_flags &=
- ~URB_NO_TRANSFER_DMA_MAP;
- /* this should always be 0 before a resubmit. */
- seg->dto_urb->num_mapped_sgs = 0;
-
- if (xfer->urb->transfer_buffer) {
- seg->dto_urb->transfer_buffer =
- xfer->urb->transfer_buffer +
- buf_itr_offset;
- seg->dto_urb->sg = NULL;
- seg->dto_urb->num_sgs = 0;
- } else {
- seg->dto_urb->transfer_buffer = NULL;
-
- /*
- * allocate an SG list to store seg_size bytes
- * and copy the subset of the xfer->urb->sg that
- * matches the buffer subset we are about to
- * read.
- */
- seg->dto_urb->sg = wa_xfer_create_subset_sg(
- xfer->urb->sg,
- buf_itr_offset, buf_itr_size,
- &(seg->dto_urb->num_sgs));
- if (!(seg->dto_urb->sg))
- result = -ENOMEM;
- }
- }
- seg->dto_urb->transfer_buffer_length = buf_itr_size;
-
- return result;
-}
-
-/*
- * Allocate the segs array and initialize each of them
- *
- * The segments are freed by wa_xfer_destroy() when the xfer use count
- * drops to zero; however, because each segment is given the same life
- * cycle as the USB URB it contains, it is actually freed by
- * usb_put_urb() on the contained USB URB (twisted, eh?).
- */
-static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
-{
- int result, cnt, isoc_frame_offset = 0;
- size_t alloc_size = sizeof(*xfer->seg[0])
- - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
- struct usb_device *usb_dev = xfer->wa->usb_dev;
- const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
- struct wa_seg *seg;
- size_t buf_itr, buf_size, buf_itr_size;
-
- result = -ENOMEM;
- xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
- if (xfer->seg == NULL)
- goto error_segs_kzalloc;
- buf_itr = 0;
- buf_size = xfer->urb->transfer_buffer_length;
- for (cnt = 0; cnt < xfer->segs; cnt++) {
- size_t iso_pkt_descr_size = 0;
- int seg_isoc_frame_count = 0, seg_isoc_size = 0;
-
- /*
- * Adjust the size of the segment object to contain space for
- * the isoc packet descriptor buffer.
- */
- if (usb_pipeisoc(xfer->urb->pipe)) {
- seg_isoc_frame_count =
- __wa_seg_calculate_isoc_frame_count(xfer,
- isoc_frame_offset, &seg_isoc_size);
-
- iso_pkt_descr_size =
- sizeof(struct wa_xfer_packet_info_hwaiso) +
- (seg_isoc_frame_count * sizeof(__le16));
- }
- result = -ENOMEM;
- seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
- GFP_ATOMIC);
- if (seg == NULL)
- goto error_seg_kmalloc;
- wa_seg_init(seg);
- seg->xfer = xfer;
- seg->index = cnt;
- usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
- usb_sndbulkpipe(usb_dev,
- dto_epd->bEndpointAddress),
- &seg->xfer_hdr, xfer_hdr_size,
- wa_seg_tr_cb, seg);
- buf_itr_size = min(buf_size, xfer->seg_size);
-
- if (usb_pipeisoc(xfer->urb->pipe)) {
- seg->isoc_frame_count = seg_isoc_frame_count;
- seg->isoc_frame_offset = isoc_frame_offset;
- seg->isoc_size = seg_isoc_size;
- /* iso packet descriptor. */
- seg->isoc_pack_desc_urb =
- usb_alloc_urb(0, GFP_ATOMIC);
- if (seg->isoc_pack_desc_urb == NULL)
- goto error_iso_pack_desc_alloc;
- /*
- * The buffer for the isoc packet descriptor starts
- * after the transfer request header in the
- * segment object memory buffer.
- */
- usb_fill_bulk_urb(
- seg->isoc_pack_desc_urb, usb_dev,
- usb_sndbulkpipe(usb_dev,
- dto_epd->bEndpointAddress),
- (void *)(&seg->xfer_hdr) +
- xfer_hdr_size,
- iso_pkt_descr_size,
- wa_seg_iso_pack_desc_cb, seg);
-
- /* adjust starting frame offset for next seg. */
- isoc_frame_offset += seg_isoc_frame_count;
- }
-
- if (xfer->is_inbound == 0 && buf_size > 0) {
- /* outbound data. */
- seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (seg->dto_urb == NULL)
- goto error_dto_alloc;
- usb_fill_bulk_urb(
- seg->dto_urb, usb_dev,
- usb_sndbulkpipe(usb_dev,
- dto_epd->bEndpointAddress),
- NULL, 0, wa_seg_dto_cb, seg);
-
- if (usb_pipeisoc(xfer->urb->pipe)) {
- /*
- * Fill in the xfer buffer information for the
- * first isoc frame. Subsequent frames in this
- * segment will be filled in and sent from the
- * DTO completion routine, if needed.
- */
- __wa_populate_dto_urb_isoc(xfer, seg,
- seg->isoc_frame_offset);
- } else {
- /* fill in the xfer buffer information. */
- result = __wa_populate_dto_urb(xfer, seg,
- buf_itr, buf_itr_size);
- if (result < 0)
- goto error_seg_outbound_populate;
-
- buf_itr += buf_itr_size;
- buf_size -= buf_itr_size;
- }
- }
- seg->status = WA_SEG_READY;
- }
- return 0;
-
- /*
- * Free the memory for the current segment which failed to init.
- * Use the fact that cnt is left at were it failed. The remaining
- * segments will be cleaned up by wa_xfer_destroy.
- */
-error_seg_outbound_populate:
- usb_free_urb(xfer->seg[cnt]->dto_urb);
-error_dto_alloc:
- usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
-error_iso_pack_desc_alloc:
- kfree(xfer->seg[cnt]);
- xfer->seg[cnt] = NULL;
-error_seg_kmalloc:
-error_segs_kzalloc:
- return result;
-}
-
-/*
- * Allocates all the stuff needed to submit a transfer
- *
- * Breaks the whole data buffer in a list of segments, each one has a
- * structure allocated to it and linked in xfer->seg[index]
- *
- * FIXME: merge setup_segs() and the last part of this function, no
- * need to do two for loops when we could run everything in a
- * single one
- */
-static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
-{
- int result;
- struct device *dev = &xfer->wa->usb_iface->dev;
- enum wa_xfer_type xfer_type = 0; /* shut up GCC */
- size_t xfer_hdr_size, cnt, transfer_size;
- struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
-
- result = __wa_xfer_setup_sizes(xfer, &xfer_type);
- if (result < 0)
- goto error_setup_sizes;
- xfer_hdr_size = result;
- result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
- if (result < 0) {
- dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
- xfer, xfer->segs, result);
- goto error_setup_segs;
- }
- /* Fill the first header */
- xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
- wa_xfer_id_init(xfer);
- __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
-
- /* Fill remaining headers */
- xfer_hdr = xfer_hdr0;
- if (xfer_type == WA_XFER_TYPE_ISO) {
- xfer_hdr0->dwTransferLength =
- cpu_to_le32(xfer->seg[0]->isoc_size);
- for (cnt = 1; cnt < xfer->segs; cnt++) {
- struct wa_xfer_packet_info_hwaiso *packet_desc;
- struct wa_seg *seg = xfer->seg[cnt];
- struct wa_xfer_hwaiso *xfer_iso;
-
- xfer_hdr = &seg->xfer_hdr;
- xfer_iso = container_of(xfer_hdr,
- struct wa_xfer_hwaiso, hdr);
- packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
- /*
- * Copy values from the 0th header. Segment specific
- * values are set below.
- */
- memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
- xfer_hdr->bTransferSegment = cnt;
- xfer_hdr->dwTransferLength =
- cpu_to_le32(seg->isoc_size);
- xfer_iso->dwNumOfPackets =
- cpu_to_le32(seg->isoc_frame_count);
- __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
- seg->status = WA_SEG_READY;
- }
- } else {
- transfer_size = urb->transfer_buffer_length;
- xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
- cpu_to_le32(xfer->seg_size) :
- cpu_to_le32(transfer_size);
- transfer_size -= xfer->seg_size;
- for (cnt = 1; cnt < xfer->segs; cnt++) {
- xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
- memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
- xfer_hdr->bTransferSegment = cnt;
- xfer_hdr->dwTransferLength =
- transfer_size > xfer->seg_size ?
- cpu_to_le32(xfer->seg_size)
- : cpu_to_le32(transfer_size);
- xfer->seg[cnt]->status = WA_SEG_READY;
- transfer_size -= xfer->seg_size;
- }
- }
- xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
- result = 0;
-error_setup_segs:
-error_setup_sizes:
- return result;
-}
-
-/*
- *
- *
- * rpipe->seg_lock is held!
- */
-static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
- struct wa_seg *seg, int *dto_done)
-{
- int result;
-
- /* default to done unless we encounter a multi-frame isoc segment. */
- *dto_done = 1;
-
- /*
- * Take a ref for each segment urb so the xfer cannot disappear until
- * all of the callbacks run.
- */
- wa_xfer_get(xfer);
- /* submit the transfer request. */
- seg->status = WA_SEG_SUBMITTED;
- result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
- if (result < 0) {
- pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
- __func__, xfer, seg->index, result);
- wa_xfer_put(xfer);
- goto error_tr_submit;
- }
- /* submit the isoc packet descriptor if present. */
- if (seg->isoc_pack_desc_urb) {
- wa_xfer_get(xfer);
- result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
- seg->isoc_frame_index = 0;
- if (result < 0) {
- pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
- __func__, xfer, seg->index, result);
- wa_xfer_put(xfer);
- goto error_iso_pack_desc_submit;
- }
- }
- /* submit the out data if this is an out request. */
- if (seg->dto_urb) {
- struct wahc *wa = xfer->wa;
- wa_xfer_get(xfer);
- result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
- if (result < 0) {
- pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
- __func__, xfer, seg->index, result);
- wa_xfer_put(xfer);
- goto error_dto_submit;
- }
- /*
- * If this segment contains more than one isoc frame, hold
- * onto the dto resource until we send all frames.
- * Only applies to non-Alereon devices.
- */
- if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
- && (seg->isoc_frame_count > 1))
- *dto_done = 0;
- }
- rpipe_avail_dec(rpipe);
- return 0;
-
-error_dto_submit:
- usb_unlink_urb(seg->isoc_pack_desc_urb);
-error_iso_pack_desc_submit:
- usb_unlink_urb(&seg->tr_urb);
-error_tr_submit:
- seg->status = WA_SEG_ERROR;
- seg->result = result;
- *dto_done = 1;
- return result;
-}
-
-/*
- * Execute more queued request segments until the maximum concurrent allowed.
- * Return true if the DTO resource was acquired and released.
- *
- * The ugly unlock/lock sequence on the error path is needed as the
- * xfer->lock normally nests the seg_lock and not viceversa.
- */
-static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
-{
- int result, dto_acquired = 0, dto_done = 0;
- struct device *dev = &rpipe->wa->usb_iface->dev;
- struct wa_seg *seg;
- struct wa_xfer *xfer;
- unsigned long flags;
-
- *dto_waiting = 0;
-
- spin_lock_irqsave(&rpipe->seg_lock, flags);
- while (atomic_read(&rpipe->segs_available) > 0
- && !list_empty(&rpipe->seg_list)
- && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
- seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
- list_node);
- list_del(&seg->list_node);
- xfer = seg->xfer;
- /*
- * Get a reference to the xfer in case the callbacks for the
- * URBs submitted by __wa_seg_submit attempt to complete
- * the xfer before this function completes.
- */
- wa_xfer_get(xfer);
- result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
- /* release the dto resource if this RPIPE is done with it. */
- if (dto_done)
- __wa_dto_put(rpipe->wa);
- dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
- xfer, wa_xfer_id(xfer), seg->index,
- atomic_read(&rpipe->segs_available), result);
- if (unlikely(result < 0)) {
- int done;
-
- spin_unlock_irqrestore(&rpipe->seg_lock, flags);
- spin_lock_irqsave(&xfer->lock, flags);
- __wa_xfer_abort(xfer);
- /*
- * This seg was marked as submitted when it was put on
- * the RPIPE seg_list. Mark it done.
- */
- xfer->segs_done++;
- done = __wa_xfer_is_done(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- spin_lock_irqsave(&rpipe->seg_lock, flags);
- }
- wa_xfer_put(xfer);
- }
- /*
- * Mark this RPIPE as waiting if dto was not acquired, there are
- * delayed segs and no active transfers to wake us up later.
- */
- if (!dto_acquired && !list_empty(&rpipe->seg_list)
- && (atomic_read(&rpipe->segs_available) ==
- le16_to_cpu(rpipe->descr.wRequests)))
- *dto_waiting = 1;
-
- spin_unlock_irqrestore(&rpipe->seg_lock, flags);
-
- return dto_done;
-}
-
-static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
-{
- int dto_waiting;
- int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
-
- /*
- * If this RPIPE is waiting on the DTO resource, add it to the tail of
- * the waiting list.
- * Otherwise, if the WA DTO resource was acquired and released by
- * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
- * DTO and failed during that time. Check the delayed list and process
- * any waiters. Start searching from the next RPIPE index.
- */
- if (dto_waiting)
- wa_add_delayed_rpipe(rpipe->wa, rpipe);
- else if (dto_done)
- wa_check_for_delayed_rpipes(rpipe->wa);
-}
-
-/*
- *
- * xfer->lock is taken
- *
- * On failure submitting we just stop submitting and return error;
- * wa_urb_enqueue_b() will execute the completion path
- */
-static int __wa_xfer_submit(struct wa_xfer *xfer)
-{
- int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
- struct wahc *wa = xfer->wa;
- struct device *dev = &wa->usb_iface->dev;
- unsigned cnt;
- struct wa_seg *seg;
- unsigned long flags;
- struct wa_rpipe *rpipe = xfer->ep->hcpriv;
- size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
- u8 available;
- u8 empty;
-
- spin_lock_irqsave(&wa->xfer_list_lock, flags);
- list_add_tail(&xfer->list_node, &wa->xfer_list);
- spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
-
- BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
- result = 0;
- spin_lock_irqsave(&rpipe->seg_lock, flags);
- for (cnt = 0; cnt < xfer->segs; cnt++) {
- int delay_seg = 1;
-
- available = atomic_read(&rpipe->segs_available);
- empty = list_empty(&rpipe->seg_list);
- seg = xfer->seg[cnt];
- if (available && empty) {
- /*
- * Only attempt to acquire DTO if we have a segment
- * to send.
- */
- dto_acquired = __wa_dto_try_get(rpipe->wa);
- if (dto_acquired) {
- delay_seg = 0;
- result = __wa_seg_submit(rpipe, xfer, seg,
- &dto_done);
- dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
- xfer, wa_xfer_id(xfer), cnt, available,
- empty);
- if (dto_done)
- __wa_dto_put(rpipe->wa);
-
- if (result < 0) {
- __wa_xfer_abort(xfer);
- goto error_seg_submit;
- }
- }
- }
-
- if (delay_seg) {
- dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
- xfer, wa_xfer_id(xfer), cnt, available, empty);
- seg->status = WA_SEG_DELAYED;
- list_add_tail(&seg->list_node, &rpipe->seg_list);
- }
- xfer->segs_submitted++;
- }
-error_seg_submit:
- /*
- * Mark this RPIPE as waiting if dto was not acquired, there are
- * delayed segs and no active transfers to wake us up later.
- */
- if (!dto_acquired && !list_empty(&rpipe->seg_list)
- && (atomic_read(&rpipe->segs_available) ==
- le16_to_cpu(rpipe->descr.wRequests)))
- dto_waiting = 1;
- spin_unlock_irqrestore(&rpipe->seg_lock, flags);
-
- if (dto_waiting)
- wa_add_delayed_rpipe(rpipe->wa, rpipe);
- else if (dto_done)
- wa_check_for_delayed_rpipes(rpipe->wa);
-
- return result;
-}
-
-/*
- * Second part of a URB/transfer enqueuement
- *
- * Assumes this comes from wa_urb_enqueue() [maybe through
- * wa_urb_enqueue_run()]. At this point:
- *
- * xfer->wa filled and refcounted
- * xfer->ep filled with rpipe refcounted if
- * delayed == 0
- * xfer->urb filled and refcounted (this is the case when called
- * from wa_urb_enqueue() as we come from usb_submit_urb()
- * and when called by wa_urb_enqueue_run(), as we took an
- * extra ref dropped by _run() after we return).
- * xfer->gfp filled
- *
- * If we fail at __wa_xfer_submit(), then we just check if we are done
- * and if so, we run the completion procedure. However, if we are not
- * yet done, we do nothing and wait for the completion handlers from
- * the submitted URBs or from the xfer-result path to kick in. If xfer
- * result never kicks in, the xfer will timeout from the USB code and
- * dequeue() will be called.
- */
-static int wa_urb_enqueue_b(struct wa_xfer *xfer)
-{
- int result;
- unsigned long flags;
- struct urb *urb = xfer->urb;
- struct wahc *wa = xfer->wa;
- struct wusbhc *wusbhc = wa->wusb;
- struct wusb_dev *wusb_dev;
- unsigned done;
-
- result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
- if (result < 0) {
- pr_err("%s: error_rpipe_get\n", __func__);
- goto error_rpipe_get;
- }
- result = -ENODEV;
- /* FIXME: segmentation broken -- kills DWA */
- mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
- if (urb->dev == NULL) {
- mutex_unlock(&wusbhc->mutex);
- pr_err("%s: error usb dev gone\n", __func__);
- goto error_dev_gone;
- }
- wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
- if (wusb_dev == NULL) {
- mutex_unlock(&wusbhc->mutex);
- dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
- __func__);
- goto error_dev_gone;
- }
- mutex_unlock(&wusbhc->mutex);
-
- spin_lock_irqsave(&xfer->lock, flags);
- xfer->wusb_dev = wusb_dev;
- result = urb->status;
- if (urb->status != -EINPROGRESS) {
- dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
- goto error_dequeued;
- }
-
- result = __wa_xfer_setup(xfer, urb);
- if (result < 0) {
- dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
- goto error_xfer_setup;
- }
- /*
- * Get a xfer reference since __wa_xfer_submit starts asynchronous
- * operations that may try to complete the xfer before this function
- * exits.
- */
- wa_xfer_get(xfer);
- result = __wa_xfer_submit(xfer);
- if (result < 0) {
- dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
- goto error_xfer_submit;
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- wa_xfer_put(xfer);
- return 0;
-
- /*
- * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
- * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
- * setup().
- */
-error_xfer_setup:
-error_dequeued:
- spin_unlock_irqrestore(&xfer->lock, flags);
- /* FIXME: segmentation broken, kills DWA */
- if (wusb_dev)
- wusb_dev_put(wusb_dev);
-error_dev_gone:
- rpipe_put(xfer->ep->hcpriv);
-error_rpipe_get:
- xfer->result = result;
- return result;
-
-error_xfer_submit:
- done = __wa_xfer_is_done(xfer);
- xfer->result = result;
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- wa_xfer_put(xfer);
- /* return success since the completion routine will run. */
- return 0;
-}
-
-/*
- * Execute the delayed transfers in the Wire Adapter @wa
- *
- * We need to be careful here, as dequeue() could be called in the
- * middle. That's why we do the whole thing under the
- * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
- * and then checks the list -- so as we would be acquiring in inverse
- * order, we move the delayed list to a separate list while locked and then
- * submit them without the list lock held.
- */
-void wa_urb_enqueue_run(struct work_struct *ws)
-{
- struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
- struct wa_xfer *xfer, *next;
- struct urb *urb;
- LIST_HEAD(tmp_list);
-
- /* Create a copy of the wa->xfer_delayed_list while holding the lock */
- spin_lock_irq(&wa->xfer_list_lock);
- list_cut_position(&tmp_list, &wa->xfer_delayed_list,
- wa->xfer_delayed_list.prev);
- spin_unlock_irq(&wa->xfer_list_lock);
-
- /*
- * enqueue from temp list without list lock held since wa_urb_enqueue_b
- * can take xfer->lock as well as lock mutexes.
- */
- list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
- list_del_init(&xfer->list_node);
-
- urb = xfer->urb;
- if (wa_urb_enqueue_b(xfer) < 0)
- wa_xfer_giveback(xfer);
- usb_put_urb(urb); /* taken when queuing */
- }
-}
-EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
-
-/*
- * Process the errored transfers on the Wire Adapter outside of interrupt.
- */
-void wa_process_errored_transfers_run(struct work_struct *ws)
-{
- struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
- struct wa_xfer *xfer, *next;
- LIST_HEAD(tmp_list);
-
- pr_info("%s: Run delayed STALL processing.\n", __func__);
-
- /* Create a copy of the wa->xfer_errored_list while holding the lock */
- spin_lock_irq(&wa->xfer_list_lock);
- list_cut_position(&tmp_list, &wa->xfer_errored_list,
- wa->xfer_errored_list.prev);
- spin_unlock_irq(&wa->xfer_list_lock);
-
- /*
- * run rpipe_clear_feature_stalled from temp list without list lock
- * held.
- */
- list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
- struct usb_host_endpoint *ep;
- unsigned long flags;
- struct wa_rpipe *rpipe;
-
- spin_lock_irqsave(&xfer->lock, flags);
- ep = xfer->ep;
- rpipe = ep->hcpriv;
- spin_unlock_irqrestore(&xfer->lock, flags);
-
- /* clear RPIPE feature stalled without holding a lock. */
- rpipe_clear_feature_stalled(wa, ep);
-
- /* complete the xfer. This removes it from the tmp list. */
- wa_xfer_completion(xfer);
-
- /* check for work. */
- wa_xfer_delayed_run(rpipe);
- }
-}
-EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
-
-/*
- * Submit a transfer to the Wire Adapter in a delayed way
- *
- * The process of enqueuing involves possible sleeps() [see
- * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
- * in an atomic section, we defer the enqueue_b() call--else we call direct.
- *
- * @urb: We own a reference to it done by the HCI Linux USB stack that
- * will be given up by calling usb_hcd_giveback_urb() or by
- * returning error from this function -> ergo we don't have to
- * refcount it.
- */
-int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
- struct urb *urb, gfp_t gfp)
-{
- int result;
- struct device *dev = &wa->usb_iface->dev;
- struct wa_xfer *xfer;
- unsigned long my_flags;
- unsigned cant_sleep = irqs_disabled() | in_atomic();
-
- if ((urb->transfer_buffer == NULL)
- && (urb->sg == NULL)
- && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
- && urb->transfer_buffer_length != 0) {
- dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
- dump_stack();
- }
-
- spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
- result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
- spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
- if (result < 0)
- goto error_link_urb;
-
- result = -ENOMEM;
- xfer = kzalloc(sizeof(*xfer), gfp);
- if (xfer == NULL)
- goto error_kmalloc;
-
- result = -ENOENT;
- if (urb->status != -EINPROGRESS) /* cancelled */
- goto error_dequeued; /* before starting? */
- wa_xfer_init(xfer);
- xfer->wa = wa_get(wa);
- xfer->urb = urb;
- xfer->gfp = gfp;
- xfer->ep = ep;
- urb->hcpriv = xfer;
-
- dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
- xfer, urb, urb->pipe, urb->transfer_buffer_length,
- urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
- urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
- cant_sleep ? "deferred" : "inline");
-
- if (cant_sleep) {
- usb_get_urb(urb);
- spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
- list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
- spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
- queue_work(wusbd, &wa->xfer_enqueue_work);
- } else {
- result = wa_urb_enqueue_b(xfer);
- if (result < 0) {
- /*
- * URB submit/enqueue failed. Clean up, return an
- * error and do not run the callback. This avoids
- * an infinite submit/complete loop.
- */
- dev_err(dev, "%s: URB enqueue failed: %d\n",
- __func__, result);
- wa_put(xfer->wa);
- wa_xfer_put(xfer);
- spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
- usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
- spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
- return result;
- }
- }
- return 0;
-
-error_dequeued:
- kfree(xfer);
-error_kmalloc:
- spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
- usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
- spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
-error_link_urb:
- return result;
-}
-EXPORT_SYMBOL_GPL(wa_urb_enqueue);
-
-/*
- * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
- * handler] is called.
- *
- * Until a transfer goes successfully through wa_urb_enqueue() it
- * needs to be dequeued with completion calling; when stuck in delayed
- * or before wa_xfer_setup() is called, we need to do completion.
- *
- * not setup If there is no hcpriv yet, that means that that enqueue
- * still had no time to set the xfer up. Because
- * urb->status should be other than -EINPROGRESS,
- * enqueue() will catch that and bail out.
- *
- * If the transfer has gone through setup, we just need to clean it
- * up. If it has gone through submit(), we have to abort it [with an
- * asynch request] and then make sure we cancel each segment.
- *
- */
-int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
-{
- unsigned long flags;
- struct wa_xfer *xfer;
- struct wa_seg *seg;
- struct wa_rpipe *rpipe;
- unsigned cnt, done = 0, xfer_abort_pending;
- unsigned rpipe_ready = 0;
- int result;
-
- /* check if it is safe to unlink. */
- spin_lock_irqsave(&wa->xfer_list_lock, flags);
- result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
- if ((result == 0) && urb->hcpriv) {
- /*
- * Get a xfer ref to prevent a race with wa_xfer_giveback
- * cleaning up the xfer while we are working with it.
- */
- wa_xfer_get(urb->hcpriv);
- }
- spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
- if (result)
- return result;
-
- xfer = urb->hcpriv;
- if (xfer == NULL)
- return -ENOENT;
- spin_lock_irqsave(&xfer->lock, flags);
- pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
- rpipe = xfer->ep->hcpriv;
- if (rpipe == NULL) {
- pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
- __func__, xfer, wa_xfer_id(xfer),
- "Probably already aborted.\n" );
- result = -ENOENT;
- goto out_unlock;
- }
- /*
- * Check for done to avoid racing with wa_xfer_giveback and completing
- * twice.
- */
- if (__wa_xfer_is_done(xfer)) {
- pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
- xfer, wa_xfer_id(xfer));
- result = -ENOENT;
- goto out_unlock;
- }
- /* Check the delayed list -> if there, release and complete */
- spin_lock(&wa->xfer_list_lock);
- if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
- goto dequeue_delayed;
- spin_unlock(&wa->xfer_list_lock);
- if (xfer->seg == NULL) /* still hasn't reached */
- goto out_unlock; /* setup(), enqueue_b() completes */
- /* Ok, the xfer is in flight already, it's been setup and submitted.*/
- xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
- /*
- * grab the rpipe->seg_lock here to prevent racing with
- * __wa_xfer_delayed_run.
- */
- spin_lock(&rpipe->seg_lock);
- for (cnt = 0; cnt < xfer->segs; cnt++) {
- seg = xfer->seg[cnt];
- pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
- __func__, wa_xfer_id(xfer), cnt, seg->status);
- switch (seg->status) {
- case WA_SEG_NOTREADY:
- case WA_SEG_READY:
- printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
- xfer, cnt, seg->status);
- WARN_ON(1);
- break;
- case WA_SEG_DELAYED:
- /*
- * delete from rpipe delayed list. If no segments on
- * this xfer have been submitted, __wa_xfer_is_done will
- * trigger a giveback below. Otherwise, the submitted
- * segments will be completed in the DTI interrupt.
- */
- seg->status = WA_SEG_ABORTED;
- seg->result = -ENOENT;
- list_del(&seg->list_node);
- xfer->segs_done++;
- break;
- case WA_SEG_DONE:
- case WA_SEG_ERROR:
- case WA_SEG_ABORTED:
- break;
- /*
- * The buf_in data for a segment in the
- * WA_SEG_DTI_PENDING state is actively being read.
- * Let wa_buf_in_cb handle it since it will be called
- * and will increment xfer->segs_done. Cleaning up
- * here could cause wa_buf_in_cb to access the xfer
- * after it has been completed/freed.
- */
- case WA_SEG_DTI_PENDING:
- break;
- /*
- * In the states below, the HWA device already knows
- * about the transfer. If an abort request was sent,
- * allow the HWA to process it and wait for the
- * results. Otherwise, the DTI state and seg completed
- * counts can get out of sync.
- */
- case WA_SEG_SUBMITTED:
- case WA_SEG_PENDING:
- /*
- * Check if the abort was successfully sent. This could
- * be false if the HWA has been removed but we haven't
- * gotten the disconnect notification yet.
- */
- if (!xfer_abort_pending) {
- seg->status = WA_SEG_ABORTED;
- rpipe_ready = rpipe_avail_inc(rpipe);
- xfer->segs_done++;
- }
- break;
- }
- }
- spin_unlock(&rpipe->seg_lock);
- xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
- done = __wa_xfer_is_done(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- wa_xfer_put(xfer);
- return result;
-
-out_unlock:
- spin_unlock_irqrestore(&xfer->lock, flags);
- wa_xfer_put(xfer);
- return result;
-
-dequeue_delayed:
- list_del_init(&xfer->list_node);
- spin_unlock(&wa->xfer_list_lock);
- xfer->result = urb->status;
- spin_unlock_irqrestore(&xfer->lock, flags);
- wa_xfer_giveback(xfer);
- wa_xfer_put(xfer);
- usb_put_urb(urb); /* we got a ref in enqueue() */
- return 0;
-}
-EXPORT_SYMBOL_GPL(wa_urb_dequeue);
-
-/*
- * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
- * codes
- *
- * Positive errno values are internal inconsistencies and should be
- * flagged louder. Negative are to be passed up to the user in the
- * normal way.
- *
- * @status: USB WA status code -- high two bits are stripped.
- */
-static int wa_xfer_status_to_errno(u8 status)
-{
- int errno;
- u8 real_status = status;
- static int xlat[] = {
- [WA_XFER_STATUS_SUCCESS] = 0,
- [WA_XFER_STATUS_HALTED] = -EPIPE,
- [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
- [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
- [WA_XFER_RESERVED] = EINVAL,
- [WA_XFER_STATUS_NOT_FOUND] = 0,
- [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
- [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
- [WA_XFER_STATUS_ABORTED] = -ENOENT,
- [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
- [WA_XFER_INVALID_FORMAT] = EINVAL,
- [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
- [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
- };
- status &= 0x3f;
-
- if (status == 0)
- return 0;
- if (status >= ARRAY_SIZE(xlat)) {
- printk_ratelimited(KERN_ERR "%s(): BUG? "
- "Unknown WA transfer status 0x%02x\n",
- __func__, real_status);
- return -EINVAL;
- }
- errno = xlat[status];
- if (unlikely(errno > 0)) {
- printk_ratelimited(KERN_ERR "%s(): BUG? "
- "Inconsistent WA status: 0x%02x\n",
- __func__, real_status);
- errno = -errno;
- }
- return errno;
-}
-
-/*
- * If a last segment flag and/or a transfer result error is encountered,
- * no other segment transfer results will be returned from the device.
- * Mark the remaining submitted or pending xfers as completed so that
- * the xfer will complete cleanly.
- *
- * xfer->lock must be held
- *
- */
-static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
- int starting_index, enum wa_seg_status status)
-{
- int index;
- struct wa_rpipe *rpipe = xfer->ep->hcpriv;
-
- for (index = starting_index; index < xfer->segs_submitted; index++) {
- struct wa_seg *current_seg = xfer->seg[index];
-
- BUG_ON(current_seg == NULL);
-
- switch (current_seg->status) {
- case WA_SEG_SUBMITTED:
- case WA_SEG_PENDING:
- case WA_SEG_DTI_PENDING:
- rpipe_avail_inc(rpipe);
- /*
- * do not increment RPIPE avail for the WA_SEG_DELAYED case
- * since it has not been submitted to the RPIPE.
- */
- /* fall through */
- case WA_SEG_DELAYED:
- xfer->segs_done++;
- current_seg->status = status;
- break;
- case WA_SEG_ABORTED:
- break;
- default:
- WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
- __func__, wa_xfer_id(xfer), index,
- current_seg->status);
- break;
- }
- }
-}
-
-/* Populate the given urb based on the current isoc transfer state. */
-static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
- struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
-{
- int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
- int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
- struct usb_iso_packet_descriptor *iso_frame_desc =
- xfer->urb->iso_frame_desc;
- const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
- int next_frame_contiguous;
- struct usb_iso_packet_descriptor *iso_frame;
-
- BUG_ON(buf_in_urb->status == -EINPROGRESS);
-
- /*
- * If the current frame actual_length is contiguous with the next frame
- * and actual_length is a multiple of the DTI endpoint max packet size,
- * combine the current frame with the next frame in a single URB. This
- * reduces the number of URBs that must be submitted in that case.
- */
- seg_index = seg->isoc_frame_index;
- do {
- next_frame_contiguous = 0;
-
- iso_frame = &iso_frame_desc[urb_frame_index];
- total_len += iso_frame->actual_length;
- ++urb_frame_index;
- ++seg_index;
-
- if (seg_index < seg->isoc_frame_count) {
- struct usb_iso_packet_descriptor *next_iso_frame;
-
- next_iso_frame = &iso_frame_desc[urb_frame_index];
-
- if ((iso_frame->offset + iso_frame->actual_length) ==
- next_iso_frame->offset)
- next_frame_contiguous = 1;
- }
- } while (next_frame_contiguous
- && ((iso_frame->actual_length % dti_packet_size) == 0));
-
- /* this should always be 0 before a resubmit. */
- buf_in_urb->num_mapped_sgs = 0;
- buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
- iso_frame_desc[urb_start_frame].offset;
- buf_in_urb->transfer_buffer_length = total_len;
- buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- buf_in_urb->transfer_buffer = NULL;
- buf_in_urb->sg = NULL;
- buf_in_urb->num_sgs = 0;
- buf_in_urb->context = seg;
-
- /* return the number of frames included in this URB. */
- return seg_index - seg->isoc_frame_index;
-}
-
-/* Populate the given urb based on the current transfer state. */
-static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
- unsigned int seg_idx, unsigned int bytes_transferred)
-{
- int result = 0;
- struct wa_seg *seg = xfer->seg[seg_idx];
-
- BUG_ON(buf_in_urb->status == -EINPROGRESS);
- /* this should always be 0 before a resubmit. */
- buf_in_urb->num_mapped_sgs = 0;
-
- if (xfer->is_dma) {
- buf_in_urb->transfer_dma = xfer->urb->transfer_dma
- + (seg_idx * xfer->seg_size);
- buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- buf_in_urb->transfer_buffer = NULL;
- buf_in_urb->sg = NULL;
- buf_in_urb->num_sgs = 0;
- } else {
- /* do buffer or SG processing. */
- buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
-
- if (xfer->urb->transfer_buffer) {
- buf_in_urb->transfer_buffer =
- xfer->urb->transfer_buffer
- + (seg_idx * xfer->seg_size);
- buf_in_urb->sg = NULL;
- buf_in_urb->num_sgs = 0;
- } else {
- /* allocate an SG list to store seg_size bytes
- and copy the subset of the xfer->urb->sg
- that matches the buffer subset we are
- about to read. */
- buf_in_urb->sg = wa_xfer_create_subset_sg(
- xfer->urb->sg,
- seg_idx * xfer->seg_size,
- bytes_transferred,
- &(buf_in_urb->num_sgs));
-
- if (!(buf_in_urb->sg)) {
- buf_in_urb->num_sgs = 0;
- result = -ENOMEM;
- }
- buf_in_urb->transfer_buffer = NULL;
- }
- }
- buf_in_urb->transfer_buffer_length = bytes_transferred;
- buf_in_urb->context = seg;
-
- return result;
-}
-
-/*
- * Process a xfer result completion message
- *
- * inbound transfers: need to schedule a buf_in_urb read
- *
- * FIXME: this function needs to be broken up in parts
- */
-static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
- struct wa_xfer_result *xfer_result)
-{
- int result;
- struct device *dev = &wa->usb_iface->dev;
- unsigned long flags;
- unsigned int seg_idx;
- struct wa_seg *seg;
- struct wa_rpipe *rpipe;
- unsigned done = 0;
- u8 usb_status;
- unsigned rpipe_ready = 0;
- unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
- struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
-
- spin_lock_irqsave(&xfer->lock, flags);
- seg_idx = xfer_result->bTransferSegment & 0x7f;
- if (unlikely(seg_idx >= xfer->segs))
- goto error_bad_seg;
- seg = xfer->seg[seg_idx];
- rpipe = xfer->ep->hcpriv;
- usb_status = xfer_result->bTransferStatus;
- dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
- xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
- if (seg->status == WA_SEG_ABORTED
- || seg->status == WA_SEG_ERROR) /* already handled */
- goto segment_aborted;
- if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
- seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
- if (seg->status != WA_SEG_PENDING) {
- if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
- xfer, seg_idx, seg->status);
- seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
- }
- if (usb_status & 0x80) {
- seg->result = wa_xfer_status_to_errno(usb_status);
- dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
- xfer, xfer->id, seg->index, usb_status);
- seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
- WA_SEG_ABORTED : WA_SEG_ERROR;
- goto error_complete;
- }
- /* FIXME: we ignore warnings, tally them for stats */
- if (usb_status & 0x40) /* Warning?... */
- usb_status = 0; /* ... pass */
- /*
- * If the last segment bit is set, complete the remaining segments.
- * When the current segment is completed, either in wa_buf_in_cb for
- * transfers with data or below for no data, the xfer will complete.
- */
- if (xfer_result->bTransferSegment & 0x80)
- wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
- WA_SEG_DONE);
- if (usb_pipeisoc(xfer->urb->pipe)
- && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
- /* set up WA state to read the isoc packet status next. */
- wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
- wa->dti_isoc_xfer_seg = seg_idx;
- wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
- } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
- && (bytes_transferred > 0)) {
- /* IN data phase: read to buffer */
- seg->status = WA_SEG_DTI_PENDING;
- result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
- bytes_transferred);
- if (result < 0)
- goto error_buf_in_populate;
- ++(wa->active_buf_in_urbs);
- result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
- if (result < 0) {
- --(wa->active_buf_in_urbs);
- goto error_submit_buf_in;
- }
- } else {
- /* OUT data phase or no data, complete it -- */
- seg->result = bytes_transferred;
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- return;
-
-error_submit_buf_in:
- if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
- dev_err(dev, "DTI: URB max acceptable errors "
- "exceeded, resetting device\n");
- wa_reset_all(wa);
- }
- if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
- xfer, seg_idx, result);
- seg->result = result;
- kfree(buf_in_urb->sg);
- buf_in_urb->sg = NULL;
-error_buf_in_populate:
- __wa_xfer_abort(xfer);
- seg->status = WA_SEG_ERROR;
-error_complete:
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
- done = __wa_xfer_is_done(xfer);
- /*
- * queue work item to clear STALL for control endpoints.
- * Otherwise, let endpoint_reset take care of it.
- */
- if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
- usb_endpoint_xfer_control(&xfer->ep->desc) &&
- done) {
-
- dev_info(dev, "Control EP stall. Queue delayed work.\n");
- spin_lock(&wa->xfer_list_lock);
- /* move xfer from xfer_list to xfer_errored_list. */
- list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
- spin_unlock(&wa->xfer_list_lock);
- spin_unlock_irqrestore(&xfer->lock, flags);
- queue_work(wusbd, &wa->xfer_error_work);
- } else {
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- }
-
- return;
-
-error_bad_seg:
- spin_unlock_irqrestore(&xfer->lock, flags);
- wa_urb_dequeue(wa, xfer->urb, -ENOENT);
- if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
- if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
- dev_err(dev, "DTI: URB max acceptable errors "
- "exceeded, resetting device\n");
- wa_reset_all(wa);
- }
- return;
-
-segment_aborted:
- /* nothing to do, as the aborter did the completion */
- spin_unlock_irqrestore(&xfer->lock, flags);
-}
-
-/*
- * Process a isochronous packet status message
- *
- * inbound transfers: need to schedule a buf_in_urb read
- */
-static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
-{
- struct device *dev = &wa->usb_iface->dev;
- struct wa_xfer_packet_status_hwaiso *packet_status;
- struct wa_xfer_packet_status_len_hwaiso *status_array;
- struct wa_xfer *xfer;
- unsigned long flags;
- struct wa_seg *seg;
- struct wa_rpipe *rpipe;
- unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
- unsigned first_frame_index = 0, rpipe_ready = 0;
- size_t expected_size;
-
- /* We have a xfer result buffer; check it */
- dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
- urb->actual_length, urb->transfer_buffer);
- packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
- if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
- dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
- packet_status->bPacketType);
- goto error_parse_buffer;
- }
- xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
- if (xfer == NULL) {
- dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
- wa->dti_isoc_xfer_in_progress);
- goto error_parse_buffer;
- }
- spin_lock_irqsave(&xfer->lock, flags);
- if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
- goto error_bad_seg;
- seg = xfer->seg[wa->dti_isoc_xfer_seg];
- rpipe = xfer->ep->hcpriv;
- expected_size = struct_size(packet_status, PacketStatus,
- seg->isoc_frame_count);
- if (urb->actual_length != expected_size) {
- dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %zu needed)\n",
- urb->actual_length, expected_size);
- goto error_bad_seg;
- }
- if (le16_to_cpu(packet_status->wLength) != expected_size) {
- dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
- le16_to_cpu(packet_status->wLength));
- goto error_bad_seg;
- }
- /* write isoc packet status and lengths back to the xfer urb. */
- status_array = packet_status->PacketStatus;
- xfer->urb->start_frame =
- wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
- for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
- struct usb_iso_packet_descriptor *iso_frame_desc =
- xfer->urb->iso_frame_desc;
- const int xfer_frame_index =
- seg->isoc_frame_offset + seg_index;
-
- iso_frame_desc[xfer_frame_index].status =
- wa_xfer_status_to_errno(
- le16_to_cpu(status_array[seg_index].PacketStatus));
- iso_frame_desc[xfer_frame_index].actual_length =
- le16_to_cpu(status_array[seg_index].PacketLength);
- /* track the number of frames successfully transferred. */
- if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
- /* save the starting frame index for buf_in_urb. */
- if (!data_frame_count)
- first_frame_index = seg_index;
- ++data_frame_count;
- }
- }
-
- if (xfer->is_inbound && data_frame_count) {
- int result, total_frames_read = 0, urb_index = 0;
- struct urb *buf_in_urb;
-
- /* IN data phase: read to buffer */
- seg->status = WA_SEG_DTI_PENDING;
-
- /* start with the first frame with data. */
- seg->isoc_frame_index = first_frame_index;
- /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
- do {
- int urb_frame_index, urb_frame_count;
- struct usb_iso_packet_descriptor *iso_frame_desc;
-
- buf_in_urb = &(wa->buf_in_urbs[urb_index]);
- urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
- buf_in_urb, xfer, seg);
- /* advance frame index to start of next read URB. */
- seg->isoc_frame_index += urb_frame_count;
- total_frames_read += urb_frame_count;
-
- ++(wa->active_buf_in_urbs);
- result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
-
- /* skip 0-byte frames. */
- urb_frame_index =
- seg->isoc_frame_offset + seg->isoc_frame_index;
- iso_frame_desc =
- &(xfer->urb->iso_frame_desc[urb_frame_index]);
- while ((seg->isoc_frame_index <
- seg->isoc_frame_count) &&
- (iso_frame_desc->actual_length == 0)) {
- ++(seg->isoc_frame_index);
- ++iso_frame_desc;
- }
- ++urb_index;
-
- } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
- && (seg->isoc_frame_index <
- seg->isoc_frame_count));
-
- if (result < 0) {
- --(wa->active_buf_in_urbs);
- dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
- result);
- wa_reset_all(wa);
- } else if (data_frame_count > total_frames_read)
- /* If we need to read more frames, set DTI busy. */
- dti_busy = 1;
- } else {
- /* OUT transfer or no more IN data, complete it -- */
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (dti_busy)
- wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
- else
- wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- wa_xfer_put(xfer);
- return dti_busy;
-
-error_bad_seg:
- spin_unlock_irqrestore(&xfer->lock, flags);
- wa_xfer_put(xfer);
-error_parse_buffer:
- return dti_busy;
-}
-
-/*
- * Callback for the IN data phase
- *
- * If successful transition state; otherwise, take a note of the
- * error, mark this segment done and try completion.
- *
- * Note we don't access until we are sure that the transfer hasn't
- * been cancelled (ECONNRESET, ENOENT), which could mean that
- * seg->xfer could be already gone.
- */
-static void wa_buf_in_cb(struct urb *urb)
-{
- struct wa_seg *seg = urb->context;
- struct wa_xfer *xfer = seg->xfer;
- struct wahc *wa;
- struct device *dev;
- struct wa_rpipe *rpipe;
- unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
- unsigned long flags;
- int resubmit_dti = 0, active_buf_in_urbs;
- u8 done = 0;
-
- /* free the sg if it was used. */
- kfree(urb->sg);
- urb->sg = NULL;
-
- spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- --(wa->active_buf_in_urbs);
- active_buf_in_urbs = wa->active_buf_in_urbs;
- rpipe = xfer->ep->hcpriv;
-
- if (usb_pipeisoc(xfer->urb->pipe)) {
- struct usb_iso_packet_descriptor *iso_frame_desc =
- xfer->urb->iso_frame_desc;
- int seg_index;
-
- /*
- * Find the next isoc frame with data and count how many
- * frames with data remain.
- */
- seg_index = seg->isoc_frame_index;
- while (seg_index < seg->isoc_frame_count) {
- const int urb_frame_index =
- seg->isoc_frame_offset + seg_index;
-
- if (iso_frame_desc[urb_frame_index].actual_length > 0) {
- /* save the index of the next frame with data */
- if (!isoc_data_frame_count)
- seg->isoc_frame_index = seg_index;
- ++isoc_data_frame_count;
- }
- ++seg_index;
- }
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
-
- switch (urb->status) {
- case 0:
- spin_lock_irqsave(&xfer->lock, flags);
-
- seg->result += urb->actual_length;
- if (isoc_data_frame_count > 0) {
- int result, urb_frame_count;
-
- /* submit a read URB for the next frame with data. */
- urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
- xfer, seg);
- /* advance index to start of next read URB. */
- seg->isoc_frame_index += urb_frame_count;
- ++(wa->active_buf_in_urbs);
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result < 0) {
- --(wa->active_buf_in_urbs);
- dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
- result);
- wa_reset_all(wa);
- }
- /*
- * If we are in this callback and
- * isoc_data_frame_count > 0, it means that the dti_urb
- * submission was delayed in wa_dti_cb. Once
- * we submit the last buf_in_urb, we can submit the
- * delayed dti_urb.
- */
- resubmit_dti = (isoc_data_frame_count ==
- urb_frame_count);
- } else if (active_buf_in_urbs == 0) {
- dev_dbg(dev,
- "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
- xfer, wa_xfer_id(xfer), seg->index,
- seg->result);
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_mark_seg_as_done(xfer, seg,
- WA_SEG_DONE);
- }
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- break;
- case -ECONNRESET: /* URB unlinked; no need to do anything */
- case -ENOENT: /* as it was done by the who unlinked us */
- break;
- default: /* Other errors ... */
- /*
- * Error on data buf read. Only resubmit DTI if it hasn't
- * already been done by previously hitting this error or by a
- * successful completion of the previous buf_in_urb.
- */
- resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
- spin_lock_irqsave(&xfer->lock, flags);
- if (printk_ratelimit())
- dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
- xfer, wa_xfer_id(xfer), seg->index,
- urb->status);
- if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
- EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "DTO: URB max acceptable errors "
- "exceeded, resetting device\n");
- wa_reset_all(wa);
- }
- seg->result = urb->status;
- rpipe_ready = rpipe_avail_inc(rpipe);
- if (active_buf_in_urbs == 0)
- done = __wa_xfer_mark_seg_as_done(xfer, seg,
- WA_SEG_ERROR);
- else
- __wa_xfer_abort(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
- }
-
- if (resubmit_dti) {
- int result;
-
- wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
-
- result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
- result);
- wa_reset_all(wa);
- }
- }
-}
-
-/*
- * Handle an incoming transfer result buffer
- *
- * Given a transfer result buffer, it completes the transfer (possibly
- * scheduling and buffer in read) and then resubmits the DTI URB for a
- * new transfer result read.
- *
- *
- * The xfer_result DTI URB state machine
- *
- * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
- *
- * We start in OFF mode, the first xfer_result notification [through
- * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
- * read.
- *
- * We receive a buffer -- if it is not a xfer_result, we complain and
- * repost the DTI-URB. If it is a xfer_result then do the xfer seg
- * request accounting. If it is an IN segment, we move to RBI and post
- * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
- * repost the DTI-URB and move to RXR state. if there was no IN
- * segment, it will repost the DTI-URB.
- *
- * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
- * errors) in the URBs.
- */
-static void wa_dti_cb(struct urb *urb)
-{
- int result, dti_busy = 0;
- struct wahc *wa = urb->context;
- struct device *dev = &wa->usb_iface->dev;
- u32 xfer_id;
- u8 usb_status;
-
- BUG_ON(wa->dti_urb != urb);
- switch (wa->dti_urb->status) {
- case 0:
- if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
- struct wa_xfer_result *xfer_result;
- struct wa_xfer *xfer;
-
- /* We have a xfer result buffer; check it */
- dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
- urb->actual_length, urb->transfer_buffer);
- if (urb->actual_length != sizeof(*xfer_result)) {
- dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
- urb->actual_length,
- sizeof(*xfer_result));
- break;
- }
- xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
- if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
- dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
- xfer_result->hdr.bLength);
- break;
- }
- if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
- dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
- xfer_result->hdr.bNotifyType);
- break;
- }
- xfer_id = le32_to_cpu(xfer_result->dwTransferID);
- usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
- /* taken care of already */
- dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
- __func__, xfer_id,
- xfer_result->bTransferSegment & 0x7f);
- break;
- }
- xfer = wa_xfer_get_by_id(wa, xfer_id);
- if (xfer == NULL) {
- /* FIXME: transaction not found. */
- dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
- xfer_id, usb_status);
- break;
- }
- wa_xfer_result_chew(wa, xfer, xfer_result);
- wa_xfer_put(xfer);
- } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
- dti_busy = wa_process_iso_packet_status(wa, urb);
- } else {
- dev_err(dev, "DTI Error: unexpected EP state = %d\n",
- wa->dti_state);
- }
- break;
- case -ENOENT: /* (we killed the URB)...so, no broadcast */
- case -ESHUTDOWN: /* going away! */
- dev_dbg(dev, "DTI: going down! %d\n", urb->status);
- goto out;
- default:
- /* Unknown error */
- if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
- EDC_ERROR_TIMEFRAME)) {
- dev_err(dev, "DTI: URB max acceptable errors "
- "exceeded, resetting device\n");
- wa_reset_all(wa);
- goto out;
- }
- if (printk_ratelimit())
- dev_err(dev, "DTI: URB error %d\n", urb->status);
- break;
- }
-
- /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
- if (!dti_busy) {
- result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
- result);
- wa_reset_all(wa);
- }
- }
-out:
- return;
-}
-
-/*
- * Initialize the DTI URB for reading transfer result notifications and also
- * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
- */
-int wa_dti_start(struct wahc *wa)
-{
- const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
- struct device *dev = &wa->usb_iface->dev;
- int result = -ENOMEM, index;
-
- if (wa->dti_urb != NULL) /* DTI URB already started */
- goto out;
-
- wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->dti_urb == NULL)
- goto error_dti_urb_alloc;
- usb_fill_bulk_urb(
- wa->dti_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
- wa->dti_buf, wa->dti_buf_size,
- wa_dti_cb, wa);
-
- /* init the buf in URBs */
- for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
- usb_fill_bulk_urb(
- &(wa->buf_in_urbs[index]), wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev,
- 0x80 | dti_epd->bEndpointAddress),
- NULL, 0, wa_buf_in_cb, wa);
- }
- result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
- result);
- goto error_dti_urb_submit;
- }
-out:
- return 0;
-
-error_dti_urb_submit:
- usb_put_urb(wa->dti_urb);
- wa->dti_urb = NULL;
-error_dti_urb_alloc:
- return result;
-}
-EXPORT_SYMBOL_GPL(wa_dti_start);
-/*
- * Transfer complete notification
- *
- * Called from the notif.c code. We get a notification on EP2 saying
- * that some endpoint has some transfer result data available. We are
- * about to read it.
- *
- * To speed up things, we always have a URB reading the DTI URB; we
- * don't really set it up and start it until the first xfer complete
- * notification arrives, which is what we do here.
- *
- * Follow up in wa_dti_cb(), as that's where the whole state
- * machine starts.
- *
- * @wa shall be referenced
- */
-void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
-{
- struct device *dev = &wa->usb_iface->dev;
- struct wa_notif_xfer *notif_xfer;
- const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
-
- notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
- BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
-
- if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
- /* FIXME: hardcoded limitation, adapt */
- dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
- notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
- goto error;
- }
-
- /* attempt to start the DTI ep processing. */
- if (wa_dti_start(wa) < 0)
- goto error;
-
- return;
-
-error:
- wa_reset_all(wa);
-}
diff --git a/drivers/staging/wusbcore/wusbhc.c b/drivers/staging/wusbcore/wusbhc.c
deleted file mode 100644
index d0b404d258e8..000000000000
--- a/drivers/staging/wusbcore/wusbhc.c
+++ /dev/null
@@ -1,490 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB Host Controller
- * sysfs glue, wusbcore module support and life cycle management
- *
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * Creation/destruction of wusbhc is split in two parts; that that
- * doesn't require the HCD to be added (wusbhc_{create,destroy}) and
- * the one that requires (phase B, wusbhc_b_{create,destroy}).
- *
- * This is so because usb_add_hcd() will start the HC, and thus, all
- * the HC specific stuff has to be already initialized (like sysfs
- * thingies).
- */
-#include <linux/device.h>
-#include <linux/module.h>
-#include "wusbhc.h"
-
-/**
- * Extract the wusbhc that corresponds to a USB Host Controller class device
- *
- * WARNING! Apply only if @dev is that of a
- * wusbhc.usb_hcd.self->class_dev; otherwise, you loose.
- */
-static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
-{
- struct usb_bus *usb_bus = dev_get_drvdata(dev);
- struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus);
- return usb_hcd_to_wusbhc(usb_hcd);
-}
-
-/*
- * Show & store the current WUSB trust timeout
- *
- * We don't do locking--it is an 'atomic' value.
- *
- * The units that we store/show are always MILLISECONDS. However, the
- * value of trust_timeout is jiffies.
- */
-static ssize_t wusb_trust_timeout_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout);
-}
-
-static ssize_t wusb_trust_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
- ssize_t result = -ENOSYS;
- unsigned trust_timeout;
-
- result = sscanf(buf, "%u", &trust_timeout);
- if (result != 1) {
- result = -EINVAL;
- goto out;
- }
- wusbhc->trust_timeout = min_t(unsigned, trust_timeout, 500);
- cancel_delayed_work(&wusbhc->keep_alive_timer);
- flush_workqueue(wusbd);
- queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
- msecs_to_jiffies(wusbhc->trust_timeout / 2));
-out:
- return result < 0 ? result : size;
-}
-static DEVICE_ATTR_RW(wusb_trust_timeout);
-
-/*
- * Show the current WUSB CHID.
- */
-static ssize_t wusb_chid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
- const struct wusb_ckhdid *chid;
-
- if (wusbhc->wuie_host_info != NULL)
- chid = &wusbhc->wuie_host_info->CHID;
- else
- chid = &wusb_ckhdid_zero;
-
- return sprintf(buf, "%16ph\n", chid->data);
-}
-
-/*
- * Store a new CHID.
- *
- * - Write an all zeros CHID and it will stop the controller
- * - Write a non-zero CHID and it will start it.
- *
- * See wusbhc_chid_set() for more info.
- */
-static ssize_t wusb_chid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
- struct wusb_ckhdid chid;
- ssize_t result;
-
- result = sscanf(buf,
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx "
- "%02hhx %02hhx %02hhx %02hhx\n",
- &chid.data[0] , &chid.data[1] ,
- &chid.data[2] , &chid.data[3] ,
- &chid.data[4] , &chid.data[5] ,
- &chid.data[6] , &chid.data[7] ,
- &chid.data[8] , &chid.data[9] ,
- &chid.data[10], &chid.data[11],
- &chid.data[12], &chid.data[13],
- &chid.data[14], &chid.data[15]);
- if (result != 16) {
- dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): "
- "%d\n", (int)result);
- return -EINVAL;
- }
- result = wusbhc_chid_set(wusbhc, &chid);
- return result < 0 ? result : size;
-}
-static DEVICE_ATTR_RW(wusb_chid);
-
-
-static ssize_t wusb_phy_rate_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
-
- return sprintf(buf, "%d\n", wusbhc->phy_rate);
-}
-
-static ssize_t wusb_phy_rate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
- uint8_t phy_rate;
- ssize_t result;
-
- result = sscanf(buf, "%hhu", &phy_rate);
- if (result != 1)
- return -EINVAL;
- if (phy_rate >= UWB_PHY_RATE_INVALID)
- return -EINVAL;
-
- wusbhc->phy_rate = phy_rate;
- return size;
-}
-static DEVICE_ATTR_RW(wusb_phy_rate);
-
-static ssize_t wusb_dnts_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
-
- return sprintf(buf, "num slots: %d\ninterval: %dms\n",
- wusbhc->dnts_num_slots, wusbhc->dnts_interval);
-}
-
-static ssize_t wusb_dnts_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
- uint8_t num_slots, interval;
- ssize_t result;
-
- result = sscanf(buf, "%hhu %hhu", &num_slots, &interval);
-
- if (result != 2)
- return -EINVAL;
-
- wusbhc->dnts_num_slots = num_slots;
- wusbhc->dnts_interval = interval;
-
- return size;
-}
-static DEVICE_ATTR_RW(wusb_dnts);
-
-static ssize_t wusb_retry_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
-
- return sprintf(buf, "%d\n", wusbhc->retry_count);
-}
-
-static ssize_t wusb_retry_count_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
- uint8_t retry_count;
- ssize_t result;
-
- result = sscanf(buf, "%hhu", &retry_count);
-
- if (result != 1)
- return -EINVAL;
-
- wusbhc->retry_count = max_t(uint8_t, retry_count,
- WUSB_RETRY_COUNT_MAX);
-
- return size;
-}
-static DEVICE_ATTR_RW(wusb_retry_count);
-
-/* Group all the WUSBHC attributes */
-static struct attribute *wusbhc_attrs[] = {
- &dev_attr_wusb_trust_timeout.attr,
- &dev_attr_wusb_chid.attr,
- &dev_attr_wusb_phy_rate.attr,
- &dev_attr_wusb_dnts.attr,
- &dev_attr_wusb_retry_count.attr,
- NULL,
-};
-
-static const struct attribute_group wusbhc_attr_group = {
- .name = NULL, /* we want them in the same directory */
- .attrs = wusbhc_attrs,
-};
-
-/*
- * Create a wusbhc instance
- *
- * NOTEs:
- *
- * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been
- * initialized but not added.
- *
- * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling.
- *
- * - fill out wusbhc->uwb_rc and refcount it before calling
- * - fill out the wusbhc->sec_modes array
- */
-int wusbhc_create(struct wusbhc *wusbhc)
-{
- int result = 0;
-
- /* set defaults. These can be overwritten using sysfs attributes. */
- wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
- wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
- wusbhc->dnts_num_slots = 4;
- wusbhc->dnts_interval = 2;
- wusbhc->retry_count = WUSB_RETRY_COUNT_INFINITE;
-
- mutex_init(&wusbhc->mutex);
- result = wusbhc_mmcie_create(wusbhc);
- if (result < 0)
- goto error_mmcie_create;
- result = wusbhc_devconnect_create(wusbhc);
- if (result < 0)
- goto error_devconnect_create;
- result = wusbhc_rh_create(wusbhc);
- if (result < 0)
- goto error_rh_create;
- result = wusbhc_sec_create(wusbhc);
- if (result < 0)
- goto error_sec_create;
- return 0;
-
-error_sec_create:
- wusbhc_rh_destroy(wusbhc);
-error_rh_create:
- wusbhc_devconnect_destroy(wusbhc);
-error_devconnect_create:
- wusbhc_mmcie_destroy(wusbhc);
-error_mmcie_create:
- return result;
-}
-EXPORT_SYMBOL_GPL(wusbhc_create);
-
-static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc)
-{
- return &wusbhc->usb_hcd.self.controller->kobj;
-}
-
-/*
- * Phase B of a wusbhc instance creation
- *
- * Creates fields that depend on wusbhc->usb_hcd having been
- * added. This is where we create the sysfs files in
- * /sys/class/usb_host/usb_hostX/.
- *
- * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper
- * layer (hwahc or whci)
- */
-int wusbhc_b_create(struct wusbhc *wusbhc)
-{
- int result = 0;
- struct device *dev = wusbhc->usb_hcd.self.controller;
-
- result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
- if (result < 0) {
- dev_err(dev, "Cannot register WUSBHC attributes: %d\n",
- result);
- goto error_create_attr_group;
- }
-
- return 0;
-error_create_attr_group:
- return result;
-}
-EXPORT_SYMBOL_GPL(wusbhc_b_create);
-
-void wusbhc_b_destroy(struct wusbhc *wusbhc)
-{
- wusbhc_pal_unregister(wusbhc);
- sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
-}
-EXPORT_SYMBOL_GPL(wusbhc_b_destroy);
-
-void wusbhc_destroy(struct wusbhc *wusbhc)
-{
- wusbhc_sec_destroy(wusbhc);
- wusbhc_rh_destroy(wusbhc);
- wusbhc_devconnect_destroy(wusbhc);
- wusbhc_mmcie_destroy(wusbhc);
-}
-EXPORT_SYMBOL_GPL(wusbhc_destroy);
-
-struct workqueue_struct *wusbd;
-EXPORT_SYMBOL_GPL(wusbd);
-
-/*
- * WUSB Cluster ID allocation map
- *
- * Each WUSB bus in a channel is identified with a Cluster Id in the
- * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff
- * (that's space for 31 WUSB controllers, as 0xff can't be taken). We
- * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff).
- *
- * For each one we taken, we pin it in the bitap
- */
-#define CLUSTER_IDS 32
-static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS);
-static DEFINE_SPINLOCK(wusb_cluster_ids_lock);
-
-/*
- * Get a WUSB Cluster ID
- *
- * Need to release with wusb_cluster_id_put() when done w/ it.
- */
-/* FIXME: coordinate with the choose_addres() from the USB stack */
-/* we want to leave the top of the 128 range for cluster addresses and
- * the bottom for device addresses (as we map them one on one with
- * ports). */
-u8 wusb_cluster_id_get(void)
-{
- u8 id;
- spin_lock(&wusb_cluster_ids_lock);
- id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
- if (id >= CLUSTER_IDS) {
- id = 0;
- goto out;
- }
- set_bit(id, wusb_cluster_id_table);
- id = (u8) 0xff - id;
-out:
- spin_unlock(&wusb_cluster_ids_lock);
- return id;
-
-}
-EXPORT_SYMBOL_GPL(wusb_cluster_id_get);
-
-/*
- * Release a WUSB Cluster ID
- *
- * Obtained it with wusb_cluster_id_get()
- */
-void wusb_cluster_id_put(u8 id)
-{
- id = 0xff - id;
- BUG_ON(id >= CLUSTER_IDS);
- spin_lock(&wusb_cluster_ids_lock);
- WARN_ON(!test_bit(id, wusb_cluster_id_table));
- clear_bit(id, wusb_cluster_id_table);
- spin_unlock(&wusb_cluster_ids_lock);
-}
-EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
-
-/**
- * wusbhc_giveback_urb - return an URB to the USB core
- * @wusbhc: the host controller the URB is from.
- * @urb: the URB.
- * @status: the URB's status.
- *
- * Return an URB to the USB core doing some additional WUSB specific
- * processing.
- *
- * - After a successful transfer, update the trust timeout timestamp
- * for the WUSB device.
- *
- * - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission
- * condition for the WCONNECTACK_IE is that the host has observed
- * the associated device responding to a control transfer.
- */
-void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
-{
- struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc,
- urb->dev);
-
- if (status == 0 && wusb_dev) {
- wusb_dev->entry_ts = jiffies;
-
- /* wusbhc_devconnect_acked() can't be called from
- atomic context so defer it to a work queue. */
- if (!list_empty(&wusb_dev->cack_node))
- queue_work(wusbd, &wusb_dev->devconnect_acked_work);
- else
- wusb_dev_put(wusb_dev);
- }
-
- usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
-}
-EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
-
-/**
- * wusbhc_reset_all - reset the HC hardware
- * @wusbhc: the host controller to reset.
- *
- * Request a full hardware reset of the chip. This will also reset
- * the radio controller and any other PALs.
- */
-void wusbhc_reset_all(struct wusbhc *wusbhc)
-{
- if (wusbhc->uwb_rc)
- uwb_rc_reset_all(wusbhc->uwb_rc);
-}
-EXPORT_SYMBOL_GPL(wusbhc_reset_all);
-
-static struct notifier_block wusb_usb_notifier = {
- .notifier_call = wusb_usb_ncb,
- .priority = INT_MAX /* Need to be called first of all */
-};
-
-static int __init wusbcore_init(void)
-{
- int result;
- result = wusb_crypto_init();
- if (result < 0)
- goto error_crypto_init;
- /* WQ is singlethread because we need to serialize notifications */
- wusbd = create_singlethread_workqueue("wusbd");
- if (wusbd == NULL) {
- result = -ENOMEM;
- printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n");
- goto error_wusbd_create;
- }
- usb_register_notify(&wusb_usb_notifier);
- bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS);
- set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */
- return 0;
-
-error_wusbd_create:
- wusb_crypto_exit();
-error_crypto_init:
- return result;
-
-}
-module_init(wusbcore_init);
-
-static void __exit wusbcore_exit(void)
-{
- clear_bit(0, wusb_cluster_id_table);
- if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
- printk(KERN_ERR "BUG: WUSB Cluster IDs not released on exit: %*pb\n",
- CLUSTER_IDS, wusb_cluster_id_table);
- WARN_ON(1);
- }
- usb_unregister_notify(&wusb_usb_notifier);
- destroy_workqueue(wusbd);
- wusb_crypto_exit();
-}
-module_exit(wusbcore_exit);
-
-MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
-MODULE_DESCRIPTION("Wireless USB core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/wusbhc.h b/drivers/staging/wusbcore/wusbhc.h
deleted file mode 100644
index 716244a2ec44..000000000000
--- a/drivers/staging/wusbcore/wusbhc.h
+++ /dev/null
@@ -1,487 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Wireless USB Host Controller
- * Common infrastructure for WHCI and HWA WUSB-HC drivers
- *
- *
- * Copyright (C) 2005-2006 Intel Corporation
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- *
- * This driver implements parts common to all Wireless USB Host
- * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
- * by:
- *
- * - hwahc: HWA, USB-dongle that implements a Wireless USB host
- * controller, (Wireless USB 1.0 Host-Wire-Adapter specification).
- *
- * - whci: WHCI, a PCI card with a wireless host controller
- * (Wireless Host Controller Interface 1.0 specification).
- *
- * Check out the Design-overview.txt file in the source documentation
- * for other details on the implementation.
- *
- * Main blocks:
- *
- * rh Root Hub emulation (part of the HCD glue)
- *
- * devconnect Handle all the issues related to device connection,
- * authentication, disconnection, timeout, reseting,
- * keepalives, etc.
- *
- * mmc MMC IE broadcasting handling
- *
- * A host controller driver just initializes its stuff and as part of
- * that, creates a 'struct wusbhc' instance that handles all the
- * common WUSB mechanisms. Links in the function ops that are specific
- * to it and then registers the host controller. Ready to run.
- */
-
-#ifndef __WUSBHC_H__
-#define __WUSBHC_H__
-
-#include <linux/usb.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include <linux/workqueue.h>
-#include <linux/usb/hcd.h>
-#include "../uwb/uwb.h"
-#include "include/wusb.h"
-
-/*
- * Time from a WUSB channel stop request to the last transmitted MMC.
- *
- * This needs to be > 4.096 ms in case no MMCs can be transmitted in
- * zone 0.
- */
-#define WUSB_CHANNEL_STOP_DELAY_MS 8
-#define WUSB_RETRY_COUNT_MAX 15
-#define WUSB_RETRY_COUNT_INFINITE 0
-
-/**
- * Wireless USB device
- *
- * Describe a WUSB device connected to the cluster. This struct
- * belongs to the 'struct wusb_port' it is attached to and it is
- * responsible for putting and clearing the pointer to it.
- *
- * Note this "complements" the 'struct usb_device' that the usb_hcd
- * keeps for each connected USB device. However, it extends some
- * information that is not available (there is no hcpriv ptr in it!)
- * *and* most importantly, it's life cycle is different. It is created
- * as soon as we get a DN_Connect (connect request notification) from
- * the device through the WUSB host controller; the USB stack doesn't
- * create the device until we authenticate it. FIXME: this will
- * change.
- *
- * @bos: This is allocated when the BOS descriptors are read from
- * the device and freed upon the wusb_dev struct dying.
- * @wusb_cap_descr: points into @bos, and has been verified to be size
- * safe.
- */
-struct wusb_dev {
- struct kref refcnt;
- struct wusbhc *wusbhc;
- struct list_head cack_node; /* Connect-Ack list */
- struct list_head rekey_node; /* GTK rekey list */
- u8 port_idx;
- u8 addr;
- u8 beacon_type:4;
- struct usb_encryption_descriptor ccm1_etd;
- struct wusb_ckhdid cdid;
- unsigned long entry_ts;
- struct usb_bos_descriptor *bos;
- struct usb_wireless_cap_descriptor *wusb_cap_descr;
- struct uwb_mas_bm availability;
- struct work_struct devconnect_acked_work;
- struct usb_device *usb_dev;
-};
-
-#define WUSB_DEV_ADDR_UNAUTH 0x80
-
-static inline void wusb_dev_init(struct wusb_dev *wusb_dev)
-{
- kref_init(&wusb_dev->refcnt);
- /* no need to init the cack_node */
-}
-
-extern void wusb_dev_destroy(struct kref *_wusb_dev);
-
-static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev)
-{
- kref_get(&wusb_dev->refcnt);
- return wusb_dev;
-}
-
-static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
-{
- kref_put(&wusb_dev->refcnt, wusb_dev_destroy);
-}
-
-/**
- * Wireless USB Host Controller root hub "fake" ports
- * (state and device information)
- *
- * Wireless USB is wireless, so there are no ports; but we
- * fake'em. Each RC can connect a max of devices at the same time
- * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's
- * caps), referred to in wusbhc->ports_max.
- *
- * See rh.c for more information.
- *
- * The @status and @change use the same bits as in USB2.0[11.24.2.7],
- * so we don't have to do much when getting the port's status.
- *
- * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10],
- * include/linux/usb_ch9.h (#define USB_PORT_STAT_*)
- */
-struct wusb_port {
- u16 status;
- u16 change;
- struct wusb_dev *wusb_dev; /* connected device's info */
- u32 ptk_tkid;
-};
-
-/**
- * WUSB Host Controller specifics
- *
- * All fields that are common to all Wireless USB controller types
- * (HWA and WHCI) are grouped here. Host Controller
- * functions/operations that only deal with general Wireless USB HC
- * issues use this data type to refer to the host.
- *
- * @usb_hcd Instantiation of a USB host controller
- * (initialized by upper layer [HWA=HC or WHCI].
- *
- * @dev Device that implements this; initialized by the
- * upper layer (HWA-HC, WHCI...); this device should
- * have a refcount.
- *
- * @trust_timeout After this time without hearing for device
- * activity, we consider the device gone and we have to
- * re-authenticate.
- *
- * Can be accessed w/o locking--however, read to a
- * local variable then use.
- *
- * @chid WUSB Cluster Host ID: this is supposed to be a
- * unique value that doesn't change across reboots (so
- * that your devices do not require re-association).
- *
- * Read/Write protected by @mutex
- *
- * @dev_info This array has ports_max elements. It is used to
- * give the HC information about the WUSB devices (see
- * 'struct wusb_dev_info').
- *
- * For HWA we need to allocate it in heap; for WHCI it
- * needs to be permanently mapped, so we keep it for
- * both and make it easy. Call wusbhc->dev_info_set()
- * to update an entry.
- *
- * @ports_max Number of simultaneous device connections (fake
- * ports) this HC will take. Read-only.
- *
- * @port Array of port status for each fake root port. Guaranteed to
- * always be the same length during device existence
- * [this allows for some unlocked but referenced reading].
- *
- * @mmcies_max Max number of Information Elements this HC can send
- * in its MMC. Read-only.
- *
- * @start Start the WUSB channel.
- *
- * @stop Stop the WUSB channel after the specified number of
- * milliseconds. Channel Stop IEs should be transmitted
- * as required by [WUSB] 4.16.2.1.
- *
- * @mmcie_add HC specific operation (WHCI or HWA) for adding an
- * MMCIE.
- *
- * @mmcie_rm HC specific operation (WHCI or HWA) for removing an
- * MMCIE.
- *
- * @set_ptk: Set the PTK and enable encryption for a device. Or, if
- * the supplied key is NULL, disable encryption for that
- * device.
- *
- * @set_gtk: Set the GTK to be used for all future broadcast packets
- * (i.e., MMCs). With some hardware, setting the GTK may start
- * MMC transmission.
- *
- * NOTE:
- *
- * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid
- * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev
- * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a
- * refcount on it).
- *
- * Most of the times when you need to use it, it will be non-NULL,
- * so there is no real need to check for it (wusb_dev will
- * disappear before usb_dev).
- *
- * - The following fields need to be filled out before calling
- * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
- *
- * - there is no wusbhc_init() method, we do everything in
- * wusbhc_create().
- *
- * - Creation is done in two phases, wusbhc_create() and
- * wusbhc_create_b(); b are the parts that need to be called after
- * calling usb_hcd_add(&wusbhc->usb_hcd).
- */
-struct wusbhc {
- struct usb_hcd usb_hcd; /* HAS TO BE 1st */
- struct device *dev;
- struct uwb_rc *uwb_rc;
- struct uwb_pal pal;
-
- unsigned trust_timeout; /* in jiffies */
- struct wusb_ckhdid chid;
- uint8_t phy_rate;
- uint8_t dnts_num_slots;
- uint8_t dnts_interval;
- uint8_t retry_count;
- struct wuie_host_info *wuie_host_info;
-
- struct mutex mutex; /* locks everything else */
- u16 cluster_id; /* Wireless USB Cluster ID */
- struct wusb_port *port; /* Fake port status handling */
- struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */
- u8 ports_max;
- unsigned active:1; /* currently xmit'ing MMCs */
- struct wuie_keep_alive keep_alive_ie; /* protected by mutex */
- struct delayed_work keep_alive_timer;
- struct list_head cack_list; /* Connect acknowledging */
- size_t cack_count; /* protected by 'mutex' */
- struct wuie_connect_ack cack_ie;
- struct uwb_rsv *rsv; /* cluster bandwidth reservation */
-
- struct mutex mmcie_mutex; /* MMC WUIE handling */
- struct wuie_hdr **mmcie; /* WUIE array */
- u8 mmcies_max;
- /* FIXME: make wusbhc_ops? */
- int (*start)(struct wusbhc *wusbhc);
- void (*stop)(struct wusbhc *wusbhc, int delay);
- int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
- u8 handle, struct wuie_hdr *wuie);
- int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle);
- int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev);
- int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index,
- const struct uwb_mas_bm *);
- int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx,
- u32 tkid, const void *key, size_t key_size);
- int (*set_gtk)(struct wusbhc *wusbhc,
- u32 tkid, const void *key, size_t key_size);
- int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots);
-
- struct {
- struct usb_key_descriptor descr;
- u8 data[16]; /* GTK key data */
- } __attribute__((packed)) gtk;
- u8 gtk_index;
- u32 gtk_tkid;
-
- /* workqueue for WUSB security related tasks. */
- struct workqueue_struct *wq_security;
- struct work_struct gtk_rekey_work;
-
- struct usb_encryption_descriptor *ccm1_etd;
-};
-
-#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd)
-
-
-extern int wusbhc_create(struct wusbhc *);
-extern int wusbhc_b_create(struct wusbhc *);
-extern void wusbhc_b_destroy(struct wusbhc *);
-extern void wusbhc_destroy(struct wusbhc *);
-extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *,
- struct wusb_dev *);
-extern void wusb_dev_sysfs_rm(struct wusb_dev *);
-extern int wusbhc_sec_create(struct wusbhc *);
-extern int wusbhc_sec_start(struct wusbhc *);
-extern void wusbhc_sec_stop(struct wusbhc *);
-extern void wusbhc_sec_destroy(struct wusbhc *);
-extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb,
- int status);
-void wusbhc_reset_all(struct wusbhc *wusbhc);
-
-int wusbhc_pal_register(struct wusbhc *wusbhc);
-void wusbhc_pal_unregister(struct wusbhc *wusbhc);
-
-/*
- * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone
- *
- * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
- *
- * This is a safe assumption as @usb_dev->bus is referenced all the
- * time during the @usb_dev life cycle.
- */
-static inline
-struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
-{
- struct usb_hcd *usb_hcd;
- usb_hcd = bus_to_hcd(usb_dev->bus);
- return usb_get_hcd(usb_hcd);
-}
-
-/*
- * Increment the reference count on a wusbhc.
- *
- * @wusbhc's life cycle is identical to that of the underlying usb_hcd.
- */
-static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc)
-{
- return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL;
-}
-
-/*
- * Return the wusbhc associated to a @usb_dev
- *
- * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
- *
- * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down.
- * WARNING: referenced at the usb_hcd level, unlocked
- *
- * FIXME: move offline
- */
-static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev)
-{
- struct wusbhc *wusbhc = NULL;
- struct usb_hcd *usb_hcd;
- if (usb_dev->devnum > 1 && !usb_dev->wusb) {
- /* but root hubs */
- dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum,
- usb_dev->wusb);
- BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb);
- }
- usb_hcd = usb_hcd_get_by_usb_dev(usb_dev);
- if (usb_hcd == NULL)
- return NULL;
- BUG_ON(usb_hcd->wireless == 0);
- return wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-}
-
-
-static inline void wusbhc_put(struct wusbhc *wusbhc)
-{
- usb_put_hcd(&wusbhc->usb_hcd);
-}
-
-int wusbhc_start(struct wusbhc *wusbhc);
-void wusbhc_stop(struct wusbhc *wusbhc);
-extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *);
-
-/* Device connect handling */
-extern int wusbhc_devconnect_create(struct wusbhc *);
-extern void wusbhc_devconnect_destroy(struct wusbhc *);
-extern int wusbhc_devconnect_start(struct wusbhc *wusbhc);
-extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc);
-extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr,
- struct wusb_dn_hdr *dn_hdr, size_t size);
-extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port);
-extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
- void *priv);
-extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
- u8 addr);
-
-/* Wireless USB fake Root Hub methods */
-extern int wusbhc_rh_create(struct wusbhc *);
-extern void wusbhc_rh_destroy(struct wusbhc *);
-
-extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
-extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
-extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
-
-/* MMC handling */
-extern int wusbhc_mmcie_create(struct wusbhc *);
-extern void wusbhc_mmcie_destroy(struct wusbhc *);
-extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt,
- struct wuie_hdr *);
-extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *);
-
-/* Bandwidth reservation */
-int wusbhc_rsv_establish(struct wusbhc *wusbhc);
-void wusbhc_rsv_terminate(struct wusbhc *wusbhc);
-
-/*
- * I've always said
- * I wanted a wedding in a church...
- *
- * but lately I've been thinking about
- * the Botanical Gardens.
- *
- * We could do it by the tulips.
- * It'll be beautiful
- *
- * --Security!
- */
-extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *,
- struct wusb_dev *);
-extern void wusb_dev_sec_rm(struct wusb_dev *) ;
-extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *,
- struct wusb_ckhdid *ck);
-void wusbhc_gtk_rekey(struct wusbhc *wusbhc);
-int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
-
-
-/* WUSB Cluster ID handling */
-extern u8 wusb_cluster_id_get(void);
-extern void wusb_cluster_id_put(u8);
-
-/*
- * wusb_port_by_idx - return the port associated to a zero-based port index
- *
- * NOTE: valid without locking as long as wusbhc is referenced (as the
- * number of ports doesn't change). The data pointed to has to
- * be verified though :)
- */
-static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc,
- u8 port_idx)
-{
- return &wusbhc->port[port_idx];
-}
-
-/*
- * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to
- * a port_idx.
- *
- * USB stack USB ports are 1 based!!
- *
- * NOTE: only valid for WUSB devices!!!
- */
-static inline u8 wusb_port_no_to_idx(u8 port_no)
-{
- return port_no - 1;
-}
-
-extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *,
- struct usb_device *);
-
-/*
- * Return a referenced wusb_dev given a @usb_dev
- *
- * Returns NULL if the usb_dev is being torn down.
- *
- * FIXME: move offline
- */
-static inline
-struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev)
-{
- struct wusbhc *wusbhc;
- struct wusb_dev *wusb_dev;
- wusbhc = wusbhc_get_by_usb_dev(usb_dev);
- if (wusbhc == NULL)
- return NULL;
- mutex_lock(&wusbhc->mutex);
- wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
- mutex_unlock(&wusbhc->mutex);
- wusbhc_put(wusbhc);
- return wusb_dev;
-}
-
-/* Misc */
-
-extern struct workqueue_struct *wusbd;
-#endif /* #ifndef __WUSBHC_H__ */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 09e55ea0bf5d..59379d662626 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4301,30 +4301,37 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
iscsit_close_session(sess);
return 0;
} else if (atomic_read(&sess->session_logout)) {
pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
sess->session_state = TARG_SESS_STATE_FREE;
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
} else {
pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
sess->session_state = TARG_SESS_STATE_FAILED;
- if (!atomic_read(&sess->session_continuation)) {
- spin_unlock_bh(&sess->conn_lock);
+ if (!atomic_read(&sess->session_continuation))
iscsit_start_time2retain_handler(sess);
- } else
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
}
@@ -4368,8 +4375,7 @@ int iscsit_close_session(struct iscsi_session *sess)
* restart the timer and exit.
*/
if (!in_interrupt()) {
- if (iscsit_check_session_usage_count(sess) == 1)
- iscsit_stop_session(sess, 1, 1);
+ iscsit_check_session_usage_count(sess);
} else {
if (iscsit_check_session_usage_count(sess) == 2) {
atomic_set(&sess->session_logout, 0);
@@ -4430,9 +4436,9 @@ static void iscsit_logout_post_handler_closesession(
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
+ atomic_set(&sess->session_close, 1);
iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
}
static void iscsit_logout_post_handler_samecid(
@@ -4567,49 +4573,6 @@ void iscsit_fail_session(struct iscsi_session *sess)
sess->session_state = TARG_SESS_STATE_FAILED;
}
-int iscsit_free_session(struct iscsi_session *sess)
-{
- u16 conn_count = atomic_read(&sess->nconn);
- struct iscsi_conn *conn, *conn_tmp = NULL;
- int is_last;
-
- spin_lock_bh(&sess->conn_lock);
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
-
- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
- conn_list) {
- if (conn_count == 0)
- break;
-
- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
- is_last = 1;
- } else {
- iscsit_inc_conn_usage_count(conn_tmp);
- is_last = 0;
- }
- iscsit_inc_conn_usage_count(conn);
-
- spin_unlock_bh(&sess->conn_lock);
- iscsit_cause_connection_reinstatement(conn, 1);
- spin_lock_bh(&sess->conn_lock);
-
- iscsit_dec_conn_usage_count(conn);
- if (is_last == 0)
- iscsit_dec_conn_usage_count(conn_tmp);
-
- conn_count--;
- }
-
- if (atomic_read(&sess->nconn)) {
- spin_unlock_bh(&sess->conn_lock);
- wait_for_completion(&sess->session_wait_comp);
- } else
- spin_unlock_bh(&sess->conn_lock);
-
- iscsit_close_session(sess);
- return 0;
-}
-
void iscsit_stop_session(
struct iscsi_session *sess,
int session_sleep,
@@ -4620,8 +4583,6 @@ void iscsit_stop_session(
int is_last;
spin_lock_bh(&sess->conn_lock);
- if (session_sleep)
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
if (connection_sleep) {
list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
@@ -4679,12 +4640,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
continue;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
list_move_tail(&se_sess->sess_list, &free_list);
@@ -4694,7 +4658,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
- iscsit_free_session(sess);
+ list_del_init(&se_sess->sess_list);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
session_count++;
}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index c95f56a3ce31..7409ce2a6607 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsi_conn *);
extern int iscsit_close_session(struct iscsi_session *);
extern void iscsit_fail_session(struct iscsi_session *);
-extern int iscsit_free_session(struct iscsi_session *);
extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 42b369fc415e..0fa1d57b26fa 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1476,20 +1476,23 @@ static void lio_tpg_close_session(struct se_session *se_sess)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
return;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
- iscsit_close_session(sess);
+ iscsit_dec_session_usage_count(sess);
}
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index f53330813207..731ee67fe914 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -156,6 +156,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess_p->conn_lock);
continue;
@@ -166,6 +167,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
atomic_set(&sess_p->session_fall_back_to_erl0, 1);
+ atomic_set(&sess_p->session_close, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -190,7 +192,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
@@ -198,7 +199,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
@@ -486,6 +486,7 @@ static int iscsi_login_non_zero_tsih_s2(
sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e6e175597860..ff82b21fdcce 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -684,7 +684,9 @@ static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
if (ret < 0)
return ret;
- if (val != 0 && val != 1 && val != 2) {
+ if (val != TARGET_UA_INTLCK_CTRL_CLEAR
+ && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
+ && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
pr_err("Illegal value %d\n", val);
return -EINVAL;
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 2d19f0e332b0..4cee1138284b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -767,7 +767,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.emulate_fua_write = 1;
dev->dev_attrib.emulate_fua_read = 1;
dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
@@ -829,7 +829,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size;
- attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
+ attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 6b4b354c88aa..1e031d81e59e 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -63,7 +63,7 @@ static int fc_get_pr_transport_id(
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
- for (i = 0; i < 24; ) {
+ for (i = 0; i < 23; ) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
@@ -341,7 +341,8 @@ static char *iscsi_parse_pr_out_transport_id(
*p = tolower(*p);
p++;
}
- }
+ } else
+ *port_nexus_ptr = NULL;
return &buf[4];
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 51ffd5c002de..1c181d31f4c8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd)),
- GFP_KERNEL, false);
+ GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 6d4cf2643c0a..ca5579ebc81d 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -847,8 +847,17 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ switch (dev->dev_attrib.emulate_ua_intlck_ctrl) {
+ case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA:
+ p[4] = 0x30;
+ break;
+ case TARGET_UA_INTLCK_CTRL_NO_CLEAR:
+ p[4] = 0x20;
+ break;
+ default: /* TARGET_UA_INTLCK_CTRL_CLEAR */
+ p[4] = 0x00;
+ break;
+ }
/*
* From spc4r17, section 7.4.6 Control mode Page
*
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index feeba3966617..afbd492c76a9 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -78,7 +78,7 @@ static int target_check_cdb_and_preempt(struct list_head *list,
}
static bool __target_check_io_state(struct se_cmd *se_cmd,
- struct se_session *tmr_sess, int tas)
+ struct se_session *tmr_sess, bool tas)
{
struct se_session *sess = se_cmd->se_sess;
@@ -251,7 +251,7 @@ static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
struct se_session *tmr_sess,
- int tas,
+ bool tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
@@ -334,7 +334,7 @@ int core_tmr_lun_reset(
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
struct se_session *tmr_sess = NULL;
- int tas;
+ bool tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0ae9e60fc4d5..264a822c0bfa 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1898,7 +1898,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
- cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
+ == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
target_ua_allocate_lun(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
@@ -3349,6 +3350,7 @@ static void target_tmr_work(struct work_struct *work)
cmd->se_tfo->queue_tm_rsp(cmd);
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 151b56002da5..4276690fb6cb 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -199,6 +199,8 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
+ bool dev_ua_intlck_clear = (dev->dev_attrib.emulate_ua_intlck_ctrl
+ == TARGET_UA_INTLCK_CTRL_CLEAR);
if (WARN_ON_ONCE(!sess))
return false;
@@ -229,7 +231,7 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
+ if (!dev_ua_intlck_clear) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -254,8 +256,8 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
" INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->fabric_name,
- (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
+ dev_ua_intlck_clear ? "Releasing" : "Reporting",
+ dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
return head == 0;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 0b9dfa6b17bc..f769bb1e3735 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -2073,6 +2073,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mb->cmd_tail = 0;
mb->cmd_head = 0;
tcmu_flush_dcache_range(mb, sizeof(*mb));
+ clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
del_timer(&udev->cmd_timer);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 425c1070de08..bd3ed6ce7571 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -134,7 +134,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
* Assigned designator
*/
desig_len = desc[7];
- if (desig_len != 16) {
+ if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) {
pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
return -EINVAL;
}
@@ -315,11 +315,6 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
xop->nolb, (unsigned long long)xop->src_lba,
(unsigned long long)xop->dst_lba);
- if (dc != 0) {
- xop->dbl = get_unaligned_be24(&desc[29]);
-
- pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
- }
return 0;
}
@@ -415,7 +410,8 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
struct xcopy_pt_cmd, se_cmd);
- kfree(xpt_cmd);
+ /* xpt_cmd is on the stack, nothing to free here */
+ pr_debug("xpt_cmd done: %p\n", xpt_cmd);
}
static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
@@ -504,7 +500,6 @@ void target_xcopy_release_pt(void)
* @cdb: SCSI CDB to be copied into @xpt_cmd.
* @remote_port: If false, use the LUN through which the XCOPY command has
* been received. If true, use @se_dev->xcopy_lun.
- * @alloc_mem: Whether or not to allocate an SGL list.
*
* Set up a SCSI command (READ or WRITE) that will be used to execute an
* XCOPY command.
@@ -514,12 +509,9 @@ static int target_xcopy_setup_pt_cmd(
struct xcopy_op *xop,
struct se_device *se_dev,
unsigned char *cdb,
- bool remote_port,
- bool alloc_mem)
+ bool remote_port)
{
struct se_cmd *cmd = &xpt_cmd->se_cmd;
- sense_reason_t sense_rc;
- int ret = 0, rc;
/*
* Setup LUN+port to honor reservations based upon xop->op_origin for
@@ -535,46 +527,17 @@ static int target_xcopy_setup_pt_cmd(
cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
cmd->tag = 0;
- sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
- if (sense_rc) {
- ret = -EINVAL;
- goto out;
- }
+ if (target_setup_cmd_from_cdb(cmd, cdb))
+ return -EINVAL;
- if (alloc_mem) {
- rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
- cmd->data_length, false, false);
- if (rc < 0) {
- ret = rc;
- goto out;
- }
- /*
- * Set this bit so that transport_free_pages() allows the
- * caller to release SGLs + physical memory allocated by
- * transport_generic_get_mem()..
- */
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- } else {
- /*
- * Here the previously allocated SGLs for the internal READ
- * are mapped zero-copy to the internal WRITE.
- */
- sense_rc = transport_generic_map_mem_to_cmd(cmd,
- xop->xop_data_sg, xop->xop_data_nents,
- NULL, 0);
- if (sense_rc) {
- ret = -EINVAL;
- goto out;
- }
+ if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
+ xop->xop_data_nents, NULL, 0))
+ return -EINVAL;
- pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
- " %u\n", cmd->t_data_sg, cmd->t_data_nents);
- }
+ pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
+ " %u\n", cmd->t_data_sg, cmd->t_data_nents);
return 0;
-
-out:
- return ret;
}
static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
@@ -604,20 +567,15 @@ static int target_xcopy_read_source(
sector_t src_lba,
u32 src_sectors)
{
- struct xcopy_pt_cmd *xpt_cmd;
- struct se_cmd *se_cmd;
+ struct xcopy_pt_cmd xpt_cmd;
+ struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 length = (src_sectors * src_dev->dev_attrib.block_size);
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
- xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
- if (!xpt_cmd) {
- pr_err("Unable to allocate xcopy_pt_cmd\n");
- return -ENOMEM;
- }
- init_completion(&xpt_cmd->xpt_passthrough_sem);
- se_cmd = &xpt_cmd->se_cmd;
+ memset(&xpt_cmd, 0, sizeof(xpt_cmd));
+ init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = READ_16;
@@ -627,36 +585,24 @@ static int target_xcopy_read_source(
(unsigned long long)src_lba, src_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
- xop->src_pt_cmd = xpt_cmd;
+ DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
- rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
- remote_port, true);
+ rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
+ remote_port);
if (rc < 0) {
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+ goto out;
}
- xop->xop_data_sg = se_cmd->t_data_sg;
- xop->xop_data_nents = se_cmd->t_data_nents;
pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
" memory\n", xop->xop_data_sg, xop->xop_data_nents);
- rc = target_xcopy_issue_pt_cmd(xpt_cmd);
- if (rc < 0) {
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
- }
- /*
- * Clear off the allocated t_data_sg, that has been saved for
- * zero-copy WRITE submission reuse in struct xcopy_op..
- */
- se_cmd->t_data_sg = NULL;
- se_cmd->t_data_nents = 0;
-
- return 0;
+ rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
+ if (rc < 0)
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+out:
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
}
static int target_xcopy_write_destination(
@@ -666,20 +612,15 @@ static int target_xcopy_write_destination(
sector_t dst_lba,
u32 dst_sectors)
{
- struct xcopy_pt_cmd *xpt_cmd;
- struct se_cmd *se_cmd;
+ struct xcopy_pt_cmd xpt_cmd;
+ struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
- xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
- if (!xpt_cmd) {
- pr_err("Unable to allocate xcopy_pt_cmd\n");
- return -ENOMEM;
- }
- init_completion(&xpt_cmd->xpt_passthrough_sem);
- se_cmd = &xpt_cmd->se_cmd;
+ memset(&xpt_cmd, 0, sizeof(xpt_cmd));
+ init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = WRITE_16;
@@ -689,36 +630,21 @@ static int target_xcopy_write_destination(
(unsigned long long)dst_lba, dst_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
- xop->dst_pt_cmd = xpt_cmd;
+ DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
- rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
- remote_port, false);
+ rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
+ remote_port);
if (rc < 0) {
- struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- /*
- * If the failure happened before the t_mem_list hand-off in
- * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
- * core releases this memory on error during X-COPY WRITE I/O.
- */
- src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- src_cmd->t_data_sg = xop->xop_data_sg;
- src_cmd->t_data_nents = xop->xop_data_nents;
-
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
- }
-
- rc = target_xcopy_issue_pt_cmd(xpt_cmd);
- if (rc < 0) {
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+ goto out;
}
- return 0;
+ rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
+ if (rc < 0)
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+out:
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
}
static void target_xcopy_do_work(struct work_struct *work)
@@ -729,7 +655,7 @@ static void target_xcopy_do_work(struct work_struct *work)
sector_t src_lba, dst_lba, end_lba;
unsigned int max_sectors;
int rc = 0;
- unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
+ unsigned short nolb, max_nolb, copied_nolb = 0;
if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
goto err_free;
@@ -759,7 +685,23 @@ static void target_xcopy_do_work(struct work_struct *work)
(unsigned long long)src_lba, (unsigned long long)dst_lba);
while (src_lba < end_lba) {
- cur_nolb = min(nolb, max_nolb);
+ unsigned short cur_nolb = min(nolb, max_nolb);
+ u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size;
+
+ if (cur_bytes != xop->xop_data_bytes) {
+ /*
+ * (Re)allocate a buffer large enough to hold the XCOPY
+ * I/O size, which can be reused each read / write loop.
+ */
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
+ rc = target_alloc_sgl(&xop->xop_data_sg,
+ &xop->xop_data_nents,
+ cur_bytes,
+ false, false);
+ if (rc < 0)
+ goto out;
+ xop->xop_data_bytes = cur_bytes;
+ }
pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
" cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
@@ -777,10 +719,8 @@ static void target_xcopy_do_work(struct work_struct *work)
rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
dst_lba, cur_nolb);
- if (rc < 0) {
- transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+ if (rc < 0)
goto out;
- }
dst_lba += cur_nolb;
pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
@@ -788,14 +728,10 @@ static void target_xcopy_do_work(struct work_struct *work)
copied_nolb += cur_nolb;
nolb -= cur_nolb;
-
- transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
- xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
-
- transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
}
xcopy_pt_undepend_remotedev(xop);
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
kfree(xop);
pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
@@ -809,6 +745,7 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
err_free:
kfree(xop);
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 26ba4c3c9cff..c56a1bde9417 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -5,7 +5,7 @@
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
-#define XCOPY_MAX_SECTORS 1024
+#define XCOPY_MAX_SECTORS 4096
/*
* SPC4r37 6.4.6.1
@@ -18,8 +18,6 @@ enum xcopy_origin_list {
XCOL_DEST_RECV_OP = 0x02,
};
-struct xcopy_pt_cmd;
-
struct xcopy_op {
int op_origin;
@@ -35,11 +33,8 @@ struct xcopy_op {
unsigned short stdi;
unsigned short dtdi;
unsigned short nolb;
- unsigned int dbl;
-
- struct xcopy_pt_cmd *src_pt_cmd;
- struct xcopy_pt_cmd *dst_pt_cmd;
+ u32 xop_data_bytes;
u32 xop_data_nents;
struct scatterlist *xop_data_sg;
struct work_struct xop_work;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 37d22e39fd8d..6aec502c495c 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev)
kref_init(&ctx->refcount);
ctx->teedev = teedev;
- INIT_LIST_HEAD(&ctx->list_shm);
rc = teedev->desc->ops->open(ctx);
if (rc)
goto err;
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index f797171f0434..e55204df31ce 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -37,7 +37,8 @@ struct tee_shm_pool {
* @num_users: number of active users of this device
* @c_no_user: completion used when unregistering the device
* @mutex: mutex protecting @num_users and @idr
- * @idr: register of shared memory object allocated on this device
+ * @idr: register of user space shared memory objects allocated or
+ * registered on this device
* @pool: shared memory pool
*/
struct tee_device {
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 937ac5aaa6d8..bd679b72bd05 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -13,13 +13,13 @@
static void tee_shm_release(struct tee_shm *shm)
{
- struct tee_device *teedev = shm->teedev;
+ struct tee_device *teedev = shm->ctx->teedev;
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- if (shm->ctx)
- list_del(&shm->link);
- mutex_unlock(&teedev->mutex);
+ if (shm->flags & TEE_SHM_DMA_BUF) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm;
@@ -44,8 +44,7 @@ static void tee_shm_release(struct tee_shm *shm)
kfree(shm->pages);
}
- if (shm->ctx)
- teedev_ctx_put(shm->ctx);
+ teedev_ctx_put(shm->ctx);
kfree(shm);
@@ -77,7 +76,7 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
/* Refuse sharing shared memory provided by application */
- if (shm->flags & TEE_SHM_REGISTER)
+ if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL;
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
@@ -91,20 +90,14 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.mmap = tee_shm_op_mmap,
};
-static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
- struct tee_device *teedev,
- size_t size, u32 flags)
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{
+ struct tee_device *teedev = ctx->teedev;
struct tee_shm_pool_mgr *poolm = NULL;
struct tee_shm *shm;
void *ret;
int rc;
- if (ctx && ctx->teedev != teedev) {
- dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
- return ERR_PTR(-EINVAL);
- }
-
if (!(flags & TEE_SHM_MAPPED)) {
dev_err(teedev->dev.parent,
"only mapped allocations supported\n");
@@ -132,7 +125,6 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
}
shm->flags = flags | TEE_SHM_POOL;
- shm->teedev = teedev;
shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF)
poolm = teedev->pool->dma_buf_mgr;
@@ -145,17 +137,18 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
goto err_kfree;
}
- mutex_lock(&teedev->mutex);
- shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
- mutex_unlock(&teedev->mutex);
- if (shm->id < 0) {
- ret = ERR_PTR(shm->id);
- goto err_pool_free;
- }
if (flags & TEE_SHM_DMA_BUF) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (shm->id < 0) {
+ ret = ERR_PTR(shm->id);
+ goto err_pool_free;
+ }
+
exp_info.ops = &tee_shm_dma_buf_ops;
exp_info.size = shm->size;
exp_info.flags = O_RDWR;
@@ -168,18 +161,16 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
}
}
- if (ctx) {
+ if (ctx)
teedev_ctx_get(ctx);
- mutex_lock(&teedev->mutex);
- list_add_tail(&shm->link, &ctx->list_shm);
- mutex_unlock(&teedev->mutex);
- }
return shm;
err_rem:
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
+ if (flags & TEE_SHM_DMA_BUF) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
err_pool_free:
poolm->ops->free(poolm, shm);
err_kfree:
@@ -188,31 +179,8 @@ err_dev_put:
tee_device_put(teedev);
return ret;
}
-
-/**
- * tee_shm_alloc() - Allocate shared memory
- * @ctx: Context that allocates the shared memory
- * @size: Requested size of shared memory
- * @flags: Flags setting properties for the requested shared memory.
- *
- * Memory allocated as global shared memory is automatically freed when the
- * TEE file pointer is closed. The @flags field uses the bits defined by
- * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
- * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
- * associated with a dma-buf handle, else driver private memory.
- */
-struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
-{
- return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
-}
EXPORT_SYMBOL_GPL(tee_shm_alloc);
-struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
-{
- return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
-}
-EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
-
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags)
{
@@ -245,7 +213,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
}
shm->flags = flags | TEE_SHM_REGISTER;
- shm->teedev = teedev;
shm->ctx = ctx;
shm->id = -1;
addr = untagged_addr(addr);
@@ -301,10 +268,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
}
}
- mutex_lock(&teedev->mutex);
- list_add_tail(&shm->link, &ctx->list_shm);
- mutex_unlock(&teedev->mutex);
-
return shm;
err:
if (shm) {
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 5a05db5438d6..91af271e9bb0 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -1,17 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Generic thermal sysfs drivers configuration
+# Generic thermal drivers configuration
#
menuconfig THERMAL
- bool "Generic Thermal sysfs driver"
+ bool "Thermal drivers"
help
- Generic Thermal Sysfs driver offers a generic mechanism for
+ Thermal drivers offer a generic mechanism for
thermal management. Usually it's made up of one or more thermal
- zone and cooling device.
+ zones and cooling devices.
Each thermal zone contains its own temperature, trip points,
- cooling devices.
- All platforms with ACPI thermal support can use this driver.
+ and cooling devices.
+ All platforms with ACPI or Open Firmware thermal support can use
+ this driver.
If you want this support, you should say Y here.
if THERMAL
@@ -251,6 +252,27 @@ config IMX_THERMAL
cpufreq is used as the cooling device to throttle CPUs when the
passive trip is crossed.
+config IMX_SC_THERMAL
+ tristate "Temperature sensor driver for NXP i.MX SoCs with System Controller"
+ depends on IMX_SCU
+ depends on OF
+ help
+ Support for Temperature Monitor (TEMPMON) found on NXP i.MX SoCs with
+ system controller inside, Linux kernel has to communicate with system
+ controller via MU (message unit) IPC to get temperature from thermal
+ sensor. It supports one critical trip point and one
+ passive trip point for each thermal sensor.
+
+config IMX8MM_THERMAL
+ tristate "Temperature sensor driver for Freescale i.MX8MM SoC"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on OF
+ help
+ Support for Thermal Monitoring Unit (TMU) found on Freescale i.MX8MM SoC.
+ It supports one critical trip point and one passive trip point. The
+ cpufreq is used as the cooling device to throttle CPUs when the passive
+ trip is crossed.
+
config MAX77620_THERMAL
tristate "Temperature sensor driver for Maxim MAX77620 PMIC"
depends on MFD_MAX77620
@@ -265,6 +287,7 @@ config QORIQ_THERMAL
tristate "QorIQ Thermal Monitoring Unit"
depends on THERMAL_OF
depends on HAS_IOMEM
+ select REGMAP_MMIO
help
Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms.
It supports one critical trip point and one passive trip point. The
@@ -460,4 +483,11 @@ config UNIPHIER_THERMAL
Enable this to plug in UniPhier on-chip PVT thermal driver into the
thermal framework. The driver supports CPU thermal zone temperature
reporting and a couple of trip points.
+
+config SPRD_THERMAL
+ tristate "Temperature sensor on Spreadtrum SoCs"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Support for the Spreadtrum thermal sensor driver in the Linux thermal
+ framework.
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 9fb88e26fb10..8c8ed7b79915 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -43,6 +43,8 @@ obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
+obj-$(CONFIG_IMX_SC_THERMAL) += imx_sc_thermal.o
+obj-$(CONFIG_IMX8MM_THERMAL) += imx8mm_thermal.o
obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o
obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o
obj-$(CONFIG_DA9062_THERMAL) += da9062-thermal.o
@@ -57,3 +59,4 @@ obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o
obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
+obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index fe83d7a210d4..e297e135c031 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -273,7 +273,7 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
- if (WARN_ON(state > cpufreq_cdev->max_level))
+ if (state > cpufreq_cdev->max_level)
return -EINVAL;
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
@@ -431,9 +431,13 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+ struct cpumask *cpus;
+ unsigned int frequency;
+ unsigned long max_capacity, capacity;
+ int ret;
/* Request state should be less than max_level */
- if (WARN_ON(state > cpufreq_cdev->max_level))
+ if (state > cpufreq_cdev->max_level)
return -EINVAL;
/* Check if the old cooling action is same as new cooling action */
@@ -442,8 +446,20 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
cpufreq_cdev->cpufreq_state = state;
- return freq_qos_update_request(&cpufreq_cdev->qos_req,
- get_state_freq(cpufreq_cdev, state));
+ frequency = get_state_freq(cpufreq_cdev, state);
+
+ ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
+
+ if (ret > 0) {
+ cpus = cpufreq_cdev->policy->cpus;
+ max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
+ capacity = frequency * max_capacity;
+ capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
+ arch_set_thermal_pressure(cpus, max_capacity - capacity);
+ ret = 0;
+ }
+
+ return ret;
}
/* Bind cpufreq callbacks to thermal cooling device ops */
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
new file mode 100644
index 000000000000..0d60f8d7894f
--- /dev/null
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP.
+ *
+ * Author: Anson Huang <Anson.Huang@nxp.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define TER 0x0 /* TMU enable */
+#define TPS 0x4
+#define TRITSR 0x20 /* TMU immediate temp */
+
+#define TER_EN BIT(31)
+#define TRITSR_TEMP0_VAL_MASK 0xff
+#define TRITSR_TEMP1_VAL_MASK 0xff0000
+
+#define PROBE_SEL_ALL GENMASK(31, 30)
+
+#define probe_status_offset(x) (30 + x)
+#define SIGN_BIT BIT(7)
+#define TEMP_VAL_MASK GENMASK(6, 0)
+
+#define VER1_TEMP_LOW_LIMIT 10000
+#define VER2_TEMP_LOW_LIMIT -40000
+#define VER2_TEMP_HIGH_LIMIT 125000
+
+#define TMU_VER1 0x1
+#define TMU_VER2 0x2
+
+struct thermal_soc_data {
+ u32 num_sensors;
+ u32 version;
+ int (*get_temp)(void *, int *);
+};
+
+struct tmu_sensor {
+ struct imx8mm_tmu *priv;
+ u32 hw_id;
+ struct thermal_zone_device *tzd;
+};
+
+struct imx8mm_tmu {
+ void __iomem *base;
+ struct clk *clk;
+ const struct thermal_soc_data *socdata;
+ struct tmu_sensor sensors[0];
+};
+
+static int imx8mm_tmu_get_temp(void *data, int *temp)
+{
+ struct tmu_sensor *sensor = data;
+ struct imx8mm_tmu *tmu = sensor->priv;
+ u32 val;
+
+ val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK;
+ *temp = val * 1000;
+ if (*temp < VER1_TEMP_LOW_LIMIT)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int imx8mp_tmu_get_temp(void *data, int *temp)
+{
+ struct tmu_sensor *sensor = data;
+ struct imx8mm_tmu *tmu = sensor->priv;
+ unsigned long val;
+ bool ready;
+
+ val = readl_relaxed(tmu->base + TRITSR);
+ ready = test_bit(probe_status_offset(sensor->hw_id), &val);
+ if (!ready)
+ return -EAGAIN;
+
+ val = sensor->hw_id ? FIELD_GET(TRITSR_TEMP1_VAL_MASK, val) :
+ FIELD_GET(TRITSR_TEMP0_VAL_MASK, val);
+ if (val & SIGN_BIT) /* negative */
+ val = (~(val & TEMP_VAL_MASK) + 1);
+
+ *temp = val * 1000;
+ if (*temp < VER2_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int tmu_get_temp(void *data, int *temp)
+{
+ struct tmu_sensor *sensor = data;
+ struct imx8mm_tmu *tmu = sensor->priv;
+
+ return tmu->socdata->get_temp(data, temp);
+}
+
+static struct thermal_zone_of_device_ops tmu_tz_ops = {
+ .get_temp = tmu_get_temp,
+};
+
+static void imx8mm_tmu_enable(struct imx8mm_tmu *tmu, bool enable)
+{
+ u32 val;
+
+ val = readl_relaxed(tmu->base + TER);
+ val = enable ? (val | TER_EN) : (val & ~TER_EN);
+ writel_relaxed(val, tmu->base + TER);
+}
+
+static void imx8mm_tmu_probe_sel_all(struct imx8mm_tmu *tmu)
+{
+ u32 val;
+
+ val = readl_relaxed(tmu->base + TPS);
+ val |= PROBE_SEL_ALL;
+ writel_relaxed(val, tmu->base + TPS);
+}
+
+static int imx8mm_tmu_probe(struct platform_device *pdev)
+{
+ const struct thermal_soc_data *data;
+ struct imx8mm_tmu *tmu;
+ int ret;
+ int i;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ tmu = devm_kzalloc(&pdev->dev, struct_size(tmu, sensors,
+ data->num_sensors), GFP_KERNEL);
+ if (!tmu)
+ return -ENOMEM;
+
+ tmu->socdata = data;
+
+ tmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tmu->base))
+ return PTR_ERR(tmu->base);
+
+ tmu->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tmu->clk)) {
+ ret = PTR_ERR(tmu->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get tmu clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tmu->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable tmu clock: %d\n", ret);
+ return ret;
+ }
+
+ /* disable the monitor during initialization */
+ imx8mm_tmu_enable(tmu, false);
+
+ for (i = 0; i < data->num_sensors; i++) {
+ tmu->sensors[i].priv = tmu;
+ tmu->sensors[i].tzd =
+ devm_thermal_zone_of_sensor_register(&pdev->dev, i,
+ &tmu->sensors[i],
+ &tmu_tz_ops);
+ if (IS_ERR(tmu->sensors[i].tzd)) {
+ dev_err(&pdev->dev,
+ "failed to register thermal zone sensor[%d]: %d\n",
+ i, ret);
+ return PTR_ERR(tmu->sensors[i].tzd);
+ }
+ tmu->sensors[i].hw_id = i;
+ }
+
+ platform_set_drvdata(pdev, tmu);
+
+ /* enable all the probes for V2 TMU */
+ if (tmu->socdata->version == TMU_VER2)
+ imx8mm_tmu_probe_sel_all(tmu);
+
+ /* enable the monitor */
+ imx8mm_tmu_enable(tmu, true);
+
+ return 0;
+}
+
+static int imx8mm_tmu_remove(struct platform_device *pdev)
+{
+ struct imx8mm_tmu *tmu = platform_get_drvdata(pdev);
+
+ /* disable TMU */
+ imx8mm_tmu_enable(tmu, false);
+
+ clk_disable_unprepare(tmu->clk);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct thermal_soc_data imx8mm_tmu_data = {
+ .num_sensors = 1,
+ .version = TMU_VER1,
+ .get_temp = imx8mm_tmu_get_temp,
+};
+
+static struct thermal_soc_data imx8mp_tmu_data = {
+ .num_sensors = 2,
+ .version = TMU_VER2,
+ .get_temp = imx8mp_tmu_get_temp,
+};
+
+static const struct of_device_id imx8mm_tmu_table[] = {
+ { .compatible = "fsl,imx8mm-tmu", .data = &imx8mm_tmu_data, },
+ { .compatible = "fsl,imx8mp-tmu", .data = &imx8mp_tmu_data, },
+ { },
+};
+
+static struct platform_driver imx8mm_tmu = {
+ .driver = {
+ .name = "i.mx8mm_thermal",
+ .of_match_table = imx8mm_tmu_table,
+ },
+ .probe = imx8mm_tmu_probe,
+ .remove = imx8mm_tmu_remove,
+};
+module_platform_driver(imx8mm_tmu);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("i.MX8MM Thermal Monitor Unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
new file mode 100644
index 000000000000..a8723b1eb8b0
--- /dev/null
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2020 NXP.
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/firmware/imx/types.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define IMX_SC_MISC_FUNC_GET_TEMP 13
+
+static struct imx_sc_ipc *thermal_ipc_handle;
+
+struct imx_sc_sensor {
+ struct thermal_zone_device *tzd;
+ u32 resource_id;
+};
+
+struct req_get_temp {
+ u16 resource_id;
+ u8 type;
+} __packed __aligned(4);
+
+struct resp_get_temp {
+ s16 celsius;
+ s8 tenths;
+} __packed __aligned(4);
+
+struct imx_sc_msg_misc_get_temp {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct req_get_temp req;
+ struct resp_get_temp resp;
+ } data;
+} __packed __aligned(4);
+
+static int imx_sc_thermal_get_temp(void *data, int *temp)
+{
+ struct imx_sc_msg_misc_get_temp msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ struct imx_sc_sensor *sensor = data;
+ int ret;
+
+ msg.data.req.resource_id = sensor->resource_id;
+ msg.data.req.type = IMX_SC_C_TEMP;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_TEMP;
+ hdr->size = 2;
+
+ ret = imx_scu_call_rpc(thermal_ipc_handle, &msg, true);
+ if (ret) {
+ dev_err(&sensor->tzd->device, "read temp sensor %d failed, ret %d\n",
+ sensor->resource_id, ret);
+ return ret;
+ }
+
+ *temp = msg.data.resp.celsius * 1000 + msg.data.resp.tenths * 100;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops imx_sc_thermal_ops = {
+ .get_temp = imx_sc_thermal_get_temp,
+};
+
+static int imx_sc_thermal_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *child, *sensor_np;
+ struct imx_sc_sensor *sensor;
+ int ret;
+
+ ret = imx_scu_get_handle(&thermal_ipc_handle);
+ if (ret)
+ return ret;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np)
+ return -ENODEV;
+
+ sensor_np = of_node_get(pdev->dev.of_node);
+
+ for_each_available_child_of_node(np, child) {
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor) {
+ of_node_put(sensor_np);
+ return -ENOMEM;
+ }
+
+ ret = thermal_zone_of_get_sensor_id(child,
+ sensor_np,
+ &sensor->resource_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to get valid sensor resource id: %d\n",
+ ret);
+ break;
+ }
+
+ sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ sensor->resource_id,
+ sensor,
+ &imx_sc_thermal_ops);
+ if (IS_ERR(sensor->tzd)) {
+ dev_err(&pdev->dev, "failed to register thermal zone\n");
+ ret = PTR_ERR(sensor->tzd);
+ break;
+ }
+ }
+
+ of_node_put(sensor_np);
+
+ return ret;
+}
+
+static int imx_sc_thermal_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id imx_sc_thermal_table[] = {
+ { .compatible = "fsl,imx-sc-thermal", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, imx_sc_thermal_table);
+
+static struct platform_driver imx_sc_thermal_driver = {
+ .probe = imx_sc_thermal_probe,
+ .remove = imx_sc_thermal_remove,
+ .driver = {
+ .name = "imx-sc-thermal",
+ .of_match_table = imx_sc_thermal_table,
+ },
+};
+module_platform_driver(imx_sc_thermal_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("Thermal driver for NXP i.MX SoCs with system controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index bb6754a5342c..e761c9b42217 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -3,24 +3,17 @@
// Copyright 2013 Freescale Semiconductor, Inc.
#include <linux/clk.h>
-#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/slab.h>
#include <linux/thermal.h>
-#include <linux/types.h>
#include <linux/nvmem-consumer.h>
#define REG_SET 0x4
@@ -872,14 +865,12 @@ static int imx_thermal_remove(struct platform_device *pdev)
clk_disable_unprepare(data->thermal_clk);
thermal_zone_device_unregister(data->tz);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
+ imx_thermal_unregister_legacy_cooling(data);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int imx_thermal_suspend(struct device *dev)
+static int __maybe_unused imx_thermal_suspend(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
@@ -900,7 +891,7 @@ static int imx_thermal_suspend(struct device *dev)
return 0;
}
-static int imx_thermal_resume(struct device *dev)
+static int __maybe_unused imx_thermal_resume(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
@@ -918,7 +909,6 @@ static int imx_thermal_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
imx_thermal_suspend, imx_thermal_resume);
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index 7130e90773ed..a478cff8162a 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -19,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <linux/fs.h>
#include "acpi_thermal_rel.h"
static acpi_handle acpi_thermal_rel_handle;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index efae0c02d898..ceef89c956bd 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -65,7 +65,7 @@ static ssize_t available_uuids_show(struct device *dev,
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
if (priv->uuid_bitmap & (1 << i))
if (PAGE_SIZE - length > 0)
- length += snprintf(&buf[length],
+ length += scnprintf(&buf[length],
PAGE_SIZE - length,
"%s\n",
int3400_thermal_uuids[i]);
@@ -369,8 +369,8 @@ static int int3400_thermal_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3400_thermal_match[] = {
- {"INT1040", 0},
{"INT3400", 0},
+ {"INTC1040", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index aeece1e136a5..f86cbb125e2f 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -282,8 +282,8 @@ static int int3403_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3403_device_ids[] = {
- {"INT1043", 0},
{"INT3403", 0},
+ {"INTC1043", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index b1fd34516e28..297db1d2d960 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -45,6 +45,9 @@
/* JasperLake thermal reporting device */
#define PCI_DEVICE_ID_PROC_JSL_THERMAL 0x4503
+/* TigerLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_TGL_THERMAL 0x9A03
+
#define DRV_NAME "proc_thermal"
struct power_config {
@@ -728,6 +731,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL),
.driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_JSL_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_TGL_THERMAL),
+ .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
{ 0, },
};
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 53216dcbe173..f74b2473440d 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -651,7 +651,7 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
};
static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+ X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_MWAIT, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
index 5d33b350da1c..d704fc104cfd 100644
--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
@@ -64,9 +64,6 @@
#include <asm/cpu_device_id.h>
#include <asm/iosf_mbi.h>
-#define X86_FAMILY_QUARK 0x5
-#define X86_MODEL_QUARK_X1000 0x9
-
/* DTS reset is programmed via QRK_MBI_UNIT_SOC */
#define QRK_DTS_REG_OFFSET_RESET 0x34
#define QRK_DTS_RESET_BIT BIT(0)
@@ -433,7 +430,7 @@ err_ret:
}
static const struct x86_cpu_id qrk_thermal_ids[] __initconst = {
- { X86_VENDOR_INTEL, X86_FAMILY_QUARK, X86_MODEL_QUARK_X1000 },
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
diff --git a/drivers/thermal/intel/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c
index f4be9c14e5d9..92e5c19d03f6 100644
--- a/drivers/thermal/intel/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel/intel_soc_dts_thermal.c
@@ -36,8 +36,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static const struct x86_cpu_id soc_thermal_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
- BYT_SOC_DTS_APIC_IRQ},
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ),
{}
};
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index ddb4a973c698..a006b9fd1d72 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -63,7 +63,7 @@ static int max_id __read_mostly;
/* Array of zone pointers */
static struct zone_device **zones;
/* Serializes interrupt notification, work and hotplug */
-static DEFINE_SPINLOCK(pkg_temp_lock);
+static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
/* Protects zone operation in the work function against hotplug removal */
static DEFINE_MUTEX(thermal_zone_mutex);
@@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
u64 msr_val, wr_val;
mutex_lock(&thermal_zone_mutex);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt;
zonedev = pkg_temp_thermal_get_dev(cpu);
if (!zonedev) {
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
mutex_unlock(&thermal_zone_mutex);
return;
}
@@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
}
enable_pkg_thres_interrupt();
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
/*
* If tzone is not NULL, then thermal_zone_mutex will prevent the
@@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val)
struct zone_device *zonedev;
unsigned long flags;
- spin_lock_irqsave(&pkg_temp_lock, flags);
+ raw_spin_lock_irqsave(&pkg_temp_lock, flags);
++pkg_interrupt_cnt;
disable_pkg_thres_interrupt();
@@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val)
pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
}
- spin_unlock_irqrestore(&pkg_temp_lock, flags);
+ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
return 0;
}
@@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
zonedev->msr_pkg_therm_high);
cpumask_set_cpu(cpu, &zonedev->cpumask);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
zones[id] = zonedev;
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
return 0;
}
@@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
}
/* Protect against work and interrupts */
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
/*
* Check whether this cpu was the current target and store the new
@@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* To cancel the work we need to drop the lock, otherwise
* we might deadlock if the work needs to be flushed.
*/
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
cancel_delayed_work_sync(&zonedev->work);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
/*
* If this is not the last cpu in the package and the work
* did not run after we dropped the lock above, then we
@@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
pkg_thermal_schedule_work(target, &zonedev->work);
}
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */
if (lastcpu)
@@ -478,7 +478,7 @@ static int pkg_thermal_cpu_online(unsigned int cpu)
}
static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS },
+ X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_PTS, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index ef0baa954ff0..874a47d6923f 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -449,6 +449,50 @@ thermal_zone_of_add_sensor(struct device_node *zone,
}
/**
+ * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone
+ * @tz_np: a valid thermal zone device node.
+ * @sensor_np: a sensor node of a valid sensor device.
+ * @id: the sensor ID returned if success.
+ *
+ * This function will get sensor ID from a given thermal zone node and
+ * the sensor node must match the temperature provider @sensor_np.
+ *
+ * Return: 0 on success, proper error code otherwise.
+ */
+
+int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
+ struct device_node *sensor_np,
+ u32 *id)
+{
+ struct of_phandle_args sensor_specs;
+ int ret;
+
+ ret = of_parse_phandle_with_args(tz_np,
+ "thermal-sensors",
+ "#thermal-sensor-cells",
+ 0,
+ &sensor_specs);
+ if (ret)
+ return ret;
+
+ if (sensor_specs.np != sensor_np) {
+ of_node_put(sensor_specs.np);
+ return -ENODEV;
+ }
+
+ if (sensor_specs.args_count > 1)
+ pr_warn("%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
+
+ *id = sensor_specs.args_count ? sensor_specs.args[0] : 0;
+
+ of_node_put(sensor_specs.np);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id);
+
+/**
* thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone
* @dev: a valid struct device pointer of a sensor device. Must contain
* a valid .of_node, for the sensor node.
@@ -499,36 +543,22 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
sensor_np = of_node_get(dev->of_node);
for_each_available_child_of_node(np, child) {
- struct of_phandle_args sensor_specs;
int ret, id;
/* For now, thermal framework supports only 1 sensor per zone */
- ret = of_parse_phandle_with_args(child, "thermal-sensors",
- "#thermal-sensor-cells",
- 0, &sensor_specs);
+ ret = thermal_zone_of_get_sensor_id(child, sensor_np, &id);
if (ret)
continue;
- if (sensor_specs.args_count >= 1) {
- id = sensor_specs.args[0];
- WARN(sensor_specs.args_count > 1,
- "%pOFn: too many cells in sensor specifier %d\n",
- sensor_specs.np, sensor_specs.args_count);
- } else {
- id = 0;
- }
-
- if (sensor_specs.np == sensor_np && id == sensor_id) {
+ if (id == sensor_id) {
tzd = thermal_zone_of_add_sensor(child, sensor_np,
data, ops);
if (!IS_ERR(tzd))
tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
- of_node_put(sensor_specs.np);
of_node_put(child);
goto exit;
}
- of_node_put(sensor_specs.np);
}
exit:
of_node_put(sensor_np);
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index fb77acb8d13b..2a28a5af209e 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -245,7 +245,7 @@ static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
return adc_code * slope + offset;
}
-static int get_temp_8960(struct tsens_sensor *s, int *temp)
+static int get_temp_8960(const struct tsens_sensor *s, int *temp)
{
int ret;
u32 code, trdy;
@@ -279,7 +279,7 @@ static const struct tsens_ops ops_8960 = {
.resume = resume_8960,
};
-const struct tsens_plat_data data_8960 = {
+struct tsens_plat_data data_8960 = {
.num_sensors = 11,
.ops = &ops_8960,
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index c8d57ee0a5bb..172545366636 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -23,6 +23,10 @@
* @low_thresh: lower threshold temperature value
* @low_irq_mask: mask register for lower threshold irqs
* @low_irq_clear: clear register for lower threshold irqs
+ * @crit_viol: critical threshold violated
+ * @crit_thresh: critical threshold temperature value
+ * @crit_irq_mask: mask register for critical threshold irqs
+ * @crit_irq_clear: clear register for critical threshold irqs
*
* Structure containing data about temperature threshold settings and
* irq status if they were violated.
@@ -36,6 +40,10 @@ struct tsens_irq_data {
int low_thresh;
u32 low_irq_mask;
u32 low_irq_clear;
+ u32 crit_viol;
+ u32 crit_thresh;
+ u32 crit_irq_mask;
+ u32 crit_irq_clear;
};
char *qfprom_read(struct device *dev, const char *cname)
@@ -128,7 +136,7 @@ static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
* Return: Temperature in milliCelsius on success, a negative errno will
* be returned in error cases
*/
-static int tsens_hw_to_mC(struct tsens_sensor *s, int field)
+static int tsens_hw_to_mC(const struct tsens_sensor *s, int field)
{
struct tsens_priv *priv = s->priv;
u32 resolution;
@@ -160,7 +168,7 @@ static int tsens_hw_to_mC(struct tsens_sensor *s, int field)
*
* Return: ADC code or temperature in deciCelsius.
*/
-static int tsens_mC_to_hw(struct tsens_sensor *s, int temp)
+static int tsens_mC_to_hw(const struct tsens_sensor *s, int temp)
{
struct tsens_priv *priv = s->priv;
@@ -189,6 +197,9 @@ static void tsens_set_interrupt_v1(struct tsens_priv *priv, u32 hw_id,
case LOWER:
index = LOW_INT_CLEAR_0 + hw_id;
break;
+ case CRITICAL:
+ /* No critical interrupts before v2 */
+ return;
}
regmap_field_write(priv->rf[index], enable ? 0 : 1);
}
@@ -214,6 +225,10 @@ static void tsens_set_interrupt_v2(struct tsens_priv *priv, u32 hw_id,
index_mask = LOW_INT_MASK_0 + hw_id;
index_clear = LOW_INT_CLEAR_0 + hw_id;
break;
+ case CRITICAL:
+ index_mask = CRIT_INT_MASK_0 + hw_id;
+ index_clear = CRIT_INT_CLEAR_0 + hw_id;
+ break;
}
if (enable) {
@@ -268,14 +283,23 @@ static int tsens_threshold_violated(struct tsens_priv *priv, u32 hw_id,
ret = regmap_field_read(priv->rf[LOWER_STATUS_0 + hw_id], &d->low_viol);
if (ret)
return ret;
- if (d->up_viol || d->low_viol)
+
+ if (priv->feat->crit_int) {
+ ret = regmap_field_read(priv->rf[CRITICAL_STATUS_0 + hw_id],
+ &d->crit_viol);
+ if (ret)
+ return ret;
+ }
+
+ if (d->up_viol || d->low_viol || d->crit_viol)
return 1;
return 0;
}
static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id,
- struct tsens_sensor *s, struct tsens_irq_data *d)
+ const struct tsens_sensor *s,
+ struct tsens_irq_data *d)
{
int ret;
@@ -292,22 +316,37 @@ static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id,
ret = regmap_field_read(priv->rf[LOW_INT_MASK_0 + hw_id], &d->low_irq_mask);
if (ret)
return ret;
+ ret = regmap_field_read(priv->rf[CRIT_INT_CLEAR_0 + hw_id],
+ &d->crit_irq_clear);
+ if (ret)
+ return ret;
+ ret = regmap_field_read(priv->rf[CRIT_INT_MASK_0 + hw_id],
+ &d->crit_irq_mask);
+ if (ret)
+ return ret;
+
+ d->crit_thresh = tsens_hw_to_mC(s, CRIT_THRESH_0 + hw_id);
} else {
/* No mask register on older TSENS */
d->up_irq_mask = 0;
d->low_irq_mask = 0;
+ d->crit_irq_clear = 0;
+ d->crit_irq_mask = 0;
+ d->crit_thresh = 0;
}
d->up_thresh = tsens_hw_to_mC(s, UP_THRESH_0 + hw_id);
d->low_thresh = tsens_hw_to_mC(s, LOW_THRESH_0 + hw_id);
- dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u) | clr(%u|%u) | mask(%u|%u)\n",
- hw_id, __func__, (d->up_viol || d->low_viol) ? "(V)" : "",
- d->low_viol, d->up_viol, d->low_irq_clear, d->up_irq_clear,
- d->low_irq_mask, d->up_irq_mask);
- dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d)\n", hw_id, __func__,
- (d->up_viol || d->low_viol) ? "(violation)" : "",
- d->low_thresh, d->up_thresh);
+ dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u|%u) | clr(%u|%u|%u) | mask(%u|%u|%u)\n",
+ hw_id, __func__,
+ (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "",
+ d->low_viol, d->up_viol, d->crit_viol,
+ d->low_irq_clear, d->up_irq_clear, d->crit_irq_clear,
+ d->low_irq_mask, d->up_irq_mask, d->crit_irq_mask);
+ dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d:%d)\n", hw_id, __func__,
+ (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "",
+ d->low_thresh, d->up_thresh, d->crit_thresh);
return 0;
}
@@ -322,6 +361,76 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
}
/**
+ * tsens_critical_irq_thread() - Threaded handler for critical interrupts
+ * @irq: irq number
+ * @data: tsens controller private data
+ *
+ * Check FSM watchdog bark status and clear if needed.
+ * Check all sensors to find ones that violated their critical threshold limits.
+ * Clear and then re-enable the interrupt.
+ *
+ * The level-triggered interrupt might deassert if the temperature returned to
+ * within the threshold limits by the time the handler got scheduled. We
+ * consider the irq to have been handled in that case.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t tsens_critical_irq_thread(int irq, void *data)
+{
+ struct tsens_priv *priv = data;
+ struct tsens_irq_data d;
+ int temp, ret, i;
+ u32 wdog_status, wdog_count;
+
+ if (priv->feat->has_watchdog) {
+ ret = regmap_field_read(priv->rf[WDOG_BARK_STATUS],
+ &wdog_status);
+ if (ret)
+ return ret;
+
+ if (wdog_status) {
+ /* Clear WDOG interrupt */
+ regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 1);
+ regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 0);
+ ret = regmap_field_read(priv->rf[WDOG_BARK_COUNT],
+ &wdog_count);
+ if (ret)
+ return ret;
+ if (wdog_count)
+ dev_dbg(priv->dev, "%s: watchdog count: %d\n",
+ __func__, wdog_count);
+
+ /* Fall through to handle critical interrupts if any */
+ }
+ }
+
+ for (i = 0; i < priv->num_sensors; i++) {
+ const struct tsens_sensor *s = &priv->sensor[i];
+ u32 hw_id = s->hw_id;
+
+ if (IS_ERR(s->tzd))
+ continue;
+ if (!tsens_threshold_violated(priv, hw_id, &d))
+ continue;
+ ret = get_temp_tsens_valid(s, &temp);
+ if (ret) {
+ dev_err(priv->dev, "[%u] %s: error reading sensor\n",
+ hw_id, __func__);
+ continue;
+ }
+
+ tsens_read_irq_state(priv, hw_id, s, &d);
+ if (d.crit_viol &&
+ !masked_irq(hw_id, d.crit_irq_mask, tsens_version(priv))) {
+ /* Mask critical interrupts, unused on Linux */
+ tsens_set_interrupt(priv, hw_id, CRITICAL, false);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* tsens_irq_thread - Threaded interrupt handler for uplow interrupts
* @irq: irq number
* @data: tsens controller private data
@@ -346,10 +455,10 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
for (i = 0; i < priv->num_sensors; i++) {
bool trigger = false;
- struct tsens_sensor *s = &priv->sensor[i];
+ const struct tsens_sensor *s = &priv->sensor[i];
u32 hw_id = s->hw_id;
- if (IS_ERR(priv->sensor[i].tzd))
+ if (IS_ERR(s->tzd))
continue;
if (!tsens_threshold_violated(priv, hw_id, &d))
continue;
@@ -368,7 +477,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
tsens_set_interrupt(priv, hw_id, UPPER, disable);
if (d.up_thresh > temp) {
dev_dbg(priv->dev, "[%u] %s: re-arm upper\n",
- priv->sensor[i].hw_id, __func__);
+ hw_id, __func__);
tsens_set_interrupt(priv, hw_id, UPPER, enable);
} else {
trigger = true;
@@ -379,7 +488,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
tsens_set_interrupt(priv, hw_id, LOWER, disable);
if (d.low_thresh < temp) {
dev_dbg(priv->dev, "[%u] %s: re-arm low\n",
- priv->sensor[i].hw_id, __func__);
+ hw_id, __func__);
tsens_set_interrupt(priv, hw_id, LOWER, enable);
} else {
trigger = true;
@@ -392,7 +501,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
if (trigger) {
dev_dbg(priv->dev, "[%u] %s: TZ update trigger (%d mC)\n",
hw_id, __func__, temp);
- thermal_zone_device_update(priv->sensor[i].tzd,
+ thermal_zone_device_update(s->tzd,
THERMAL_EVENT_UNSPECIFIED);
} else {
dev_dbg(priv->dev, "[%u] %s: no violation: %d\n",
@@ -435,7 +544,7 @@ int tsens_set_trips(void *_sensor, int low, int high)
spin_unlock_irqrestore(&priv->ul_lock, flags);
dev_dbg(dev, "[%u] %s: (%d:%d)->(%d:%d)\n",
- s->hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high);
+ hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high);
return 0;
}
@@ -457,7 +566,7 @@ void tsens_disable_irq(struct tsens_priv *priv)
regmap_field_write(priv->rf[INT_EN], 0);
}
-int get_temp_tsens_valid(struct tsens_sensor *s, int *temp)
+int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp)
{
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
@@ -486,7 +595,7 @@ int get_temp_tsens_valid(struct tsens_sensor *s, int *temp)
return 0;
}
-int get_temp_common(struct tsens_sensor *s, int *temp)
+int get_temp_common(const struct tsens_sensor *s, int *temp)
{
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
@@ -590,6 +699,7 @@ int __init init_common(struct tsens_priv *priv)
{
void __iomem *tm_base, *srot_base;
struct device *dev = priv->dev;
+ u32 ver_minor;
struct resource *res;
u32 enabled;
int ret, i, j;
@@ -602,7 +712,7 @@ int __init init_common(struct tsens_priv *priv)
/* DT with separate SROT and TM address space */
priv->tm_offset = 0;
res = platform_get_resource(op, IORESOURCE_MEM, 1);
- srot_base = devm_ioremap_resource(&op->dev, res);
+ srot_base = devm_ioremap_resource(dev, res);
if (IS_ERR(srot_base)) {
ret = PTR_ERR(srot_base);
goto err_put_device;
@@ -620,7 +730,7 @@ int __init init_common(struct tsens_priv *priv)
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
- tm_base = devm_ioremap_resource(&op->dev, res);
+ tm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(tm_base)) {
ret = PTR_ERR(tm_base);
goto err_put_device;
@@ -639,6 +749,9 @@ int __init init_common(struct tsens_priv *priv)
if (IS_ERR(priv->rf[i]))
return PTR_ERR(priv->rf[i]);
}
+ ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
+ if (ret)
+ goto err_put_device;
}
priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
@@ -683,12 +796,47 @@ int __init init_common(struct tsens_priv *priv)
}
}
+ if (priv->feat->crit_int) {
+ /* Loop might need changes if enum regfield_ids is reordered */
+ for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) {
+ for (i = 0; i < priv->feat->max_sensors; i++) {
+ int idx = j + i;
+
+ priv->rf[idx] =
+ devm_regmap_field_alloc(dev,
+ priv->tm_map,
+ priv->fields[idx]);
+ if (IS_ERR(priv->rf[idx])) {
+ ret = PTR_ERR(priv->rf[idx]);
+ goto err_put_device;
+ }
+ }
+ }
+ }
+
+ if (tsens_version(priv) > VER_1_X && ver_minor > 2) {
+ /* Watchdog is present only on v2.3+ */
+ priv->feat->has_watchdog = 1;
+ for (i = WDOG_BARK_STATUS; i <= CC_MON_MASK; i++) {
+ priv->rf[i] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[i]);
+ if (IS_ERR(priv->rf[i])) {
+ ret = PTR_ERR(priv->rf[i]);
+ goto err_put_device;
+ }
+ }
+ /*
+ * Watchdog is already enabled, unmask the bark.
+ * Disable cycle completion monitoring
+ */
+ regmap_field_write(priv->rf[WDOG_BARK_MASK], 0);
+ regmap_field_write(priv->rf[CC_MON_MASK], 1);
+ }
+
spin_lock_init(&priv->ul_lock);
tsens_enable_irq(priv);
tsens_debug_init(op);
- return 0;
-
err_put_device:
put_device(&op->dev);
return ret;
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 4b8dd6de02ce..959a9371d205 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -327,7 +327,7 @@ static int calibrate_8974(struct tsens_priv *priv)
/* v0.1: 8916, 8974 */
-static const struct tsens_features tsens_v0_1_feat = {
+static struct tsens_features tsens_v0_1_feat = {
.ver_major = VER_0_1,
.crit_int = 0,
.adc = 1,
@@ -377,7 +377,7 @@ static const struct tsens_ops ops_8916 = {
.get_temp = get_temp_common,
};
-const struct tsens_plat_data data_8916 = {
+struct tsens_plat_data data_8916 = {
.num_sensors = 5,
.ops = &ops_8916,
.hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
@@ -392,7 +392,7 @@ static const struct tsens_ops ops_8974 = {
.get_temp = get_temp_common,
};
-const struct tsens_plat_data data_8974 = {
+struct tsens_plat_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
.feat = &tsens_v0_1_feat,
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index bd2ddb684a45..b682a4df0081 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -299,7 +299,7 @@ static int calibrate_8976(struct tsens_priv *priv)
/* v1.x: msm8956,8976,qcs404,405 */
-static const struct tsens_features tsens_v1_feat = {
+static struct tsens_features tsens_v1_feat = {
.ver_major = VER_1_X,
.crit_int = 0,
.adc = 1,
@@ -368,7 +368,7 @@ static const struct tsens_ops ops_generic_v1 = {
.get_temp = get_temp_tsens_valid,
};
-const struct tsens_plat_data data_tsens_v1 = {
+struct tsens_plat_data data_tsens_v1 = {
.ops = &ops_generic_v1,
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
@@ -381,7 +381,7 @@ static const struct tsens_ops ops_8976 = {
};
/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */
-const struct tsens_plat_data data_8976 = {
+struct tsens_plat_data data_8976 = {
.num_sensors = 11,
.ops = &ops_8976,
.hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10},
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index a4d15e1abfdd..b293ed32174b 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -24,10 +24,11 @@
#define TM_Sn_CRITICAL_THRESHOLD_OFF 0x0060
#define TM_Sn_STATUS_OFF 0x00a0
#define TM_TRDY_OFF 0x00e4
+#define TM_WDOG_LOG_OFF 0x013c
/* v2.x: 8996, 8998, sdm845 */
-static const struct tsens_features tsens_v2_feat = {
+static struct tsens_features tsens_v2_feat = {
.ver_major = VER_2_X,
.crit_int = 1,
.adc = 0,
@@ -51,8 +52,9 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
[INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2),
/* TEMPERATURE THRESHOLDS */
- REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11),
- REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23),
+ REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11),
+ REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23),
+ REG_FIELD_FOR_EACH_SENSOR16(CRIT_THRESH, TM_Sn_CRITICAL_THRESHOLD_OFF, 0, 11),
/* INTERRUPTS [CLEAR/STATUS/MASK] */
REG_FIELD_SPLIT_BITS_0_15(LOW_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF),
@@ -61,6 +63,18 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
REG_FIELD_SPLIT_BITS_16_31(UP_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF),
REG_FIELD_SPLIT_BITS_16_31(UP_INT_CLEAR, TM_UPPER_LOWER_INT_CLEAR_OFF),
REG_FIELD_SPLIT_BITS_16_31(UP_INT_MASK, TM_UPPER_LOWER_INT_MASK_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_STATUS, TM_CRITICAL_INT_STATUS_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_CLEAR, TM_CRITICAL_INT_CLEAR_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_MASK, TM_CRITICAL_INT_MASK_OFF),
+
+ /* WATCHDOG on v2.3 or later */
+ [WDOG_BARK_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 31, 31),
+ [WDOG_BARK_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 31, 31),
+ [WDOG_BARK_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 31, 31),
+ [CC_MON_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 30, 30),
+ [CC_MON_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 30, 30),
+ [CC_MON_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 30, 30),
+ [WDOG_BARK_COUNT] = REG_FIELD(TM_WDOG_LOG_OFF, 0, 7),
/* Sn_STATUS */
REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11),
@@ -81,14 +95,14 @@ static const struct tsens_ops ops_generic_v2 = {
.get_temp = get_temp_tsens_valid,
};
-const struct tsens_plat_data data_tsens_v2 = {
+struct tsens_plat_data data_tsens_v2 = {
.ops = &ops_generic_v2,
.feat = &tsens_v2_feat,
.fields = tsens_v2_regfields,
};
/* Kept around for backward compatibility with old msm8996.dtsi */
-const struct tsens_plat_data data_8996 = {
+struct tsens_plat_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
.feat = &tsens_v2_feat,
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 0e7cf5236932..2f77d235cf73 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -85,11 +85,42 @@ static const struct thermal_zone_of_device_ops tsens_of_ops = {
.set_trips = tsens_set_trips,
};
+static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
+ irq_handler_t thread_fn)
+{
+ struct platform_device *pdev;
+ int ret, irq;
+
+ pdev = of_find_device_by_node(priv->dev->of_node);
+ if (!pdev)
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, irqname);
+ if (irq < 0) {
+ ret = irq;
+ /* For old DTs with no IRQ defined */
+ if (irq == -ENXIO)
+ ret = 0;
+ } else {
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, thread_fn,
+ IRQF_ONESHOT,
+ dev_name(&pdev->dev), priv);
+ if (ret)
+ dev_err(&pdev->dev, "%s: failed to get irq\n",
+ __func__);
+ else
+ enable_irq_wake(irq);
+ }
+
+ put_device(&pdev->dev);
+ return ret;
+}
+
static int tsens_register(struct tsens_priv *priv)
{
- int i, ret, irq;
+ int i, ret;
struct thermal_zone_device *tzd;
- struct platform_device *pdev;
for (i = 0; i < priv->num_sensors; i++) {
priv->sensor[i].priv = priv;
@@ -103,32 +134,14 @@ static int tsens_register(struct tsens_priv *priv)
priv->ops->enable(priv, i);
}
- pdev = of_find_device_by_node(priv->dev->of_node);
- if (!pdev)
- return -ENODEV;
-
- irq = platform_get_irq_byname(pdev, "uplow");
- if (irq < 0) {
- ret = irq;
- /* For old DTs with no IRQ defined */
- if (irq == -ENXIO)
- ret = 0;
- goto err_put_device;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, irq,
- NULL, tsens_irq_thread,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- dev_name(&pdev->dev), priv);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to get irq\n", __func__);
- goto err_put_device;
- }
+ ret = tsens_register_irq(priv, "uplow", tsens_irq_thread);
+ if (ret < 0)
+ return ret;
- enable_irq_wake(irq);
+ if (priv->feat->crit_int)
+ ret = tsens_register_irq(priv, "critical",
+ tsens_critical_irq_thread);
-err_put_device:
- put_device(&pdev->dev);
return ret;
}
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index e24a865fbc34..502acf0e6828 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -23,6 +23,7 @@
struct tsens_priv;
+/* IP version numbers in ascending order */
enum tsens_ver {
VER_0_1 = 0,
VER_1_X,
@@ -32,6 +33,7 @@ enum tsens_ver {
enum tsens_irq_type {
LOWER,
UPPER,
+ CRITICAL,
};
/**
@@ -67,7 +69,7 @@ struct tsens_ops {
/* mandatory callbacks */
int (*init)(struct tsens_priv *priv);
int (*calibrate)(struct tsens_priv *priv);
- int (*get_temp)(struct tsens_sensor *s, int *temp);
+ int (*get_temp)(const struct tsens_sensor *s, int *temp);
/* optional callbacks */
int (*enable)(struct tsens_priv *priv, int i);
void (*disable)(struct tsens_priv *priv);
@@ -374,6 +376,82 @@ enum regfield_ids {
CRITICAL_STATUS_13,
CRITICAL_STATUS_14,
CRITICAL_STATUS_15,
+ CRIT_INT_STATUS_0, /* CRITICAL interrupt status */
+ CRIT_INT_STATUS_1,
+ CRIT_INT_STATUS_2,
+ CRIT_INT_STATUS_3,
+ CRIT_INT_STATUS_4,
+ CRIT_INT_STATUS_5,
+ CRIT_INT_STATUS_6,
+ CRIT_INT_STATUS_7,
+ CRIT_INT_STATUS_8,
+ CRIT_INT_STATUS_9,
+ CRIT_INT_STATUS_10,
+ CRIT_INT_STATUS_11,
+ CRIT_INT_STATUS_12,
+ CRIT_INT_STATUS_13,
+ CRIT_INT_STATUS_14,
+ CRIT_INT_STATUS_15,
+ CRIT_INT_CLEAR_0, /* CRITICAL interrupt clear */
+ CRIT_INT_CLEAR_1,
+ CRIT_INT_CLEAR_2,
+ CRIT_INT_CLEAR_3,
+ CRIT_INT_CLEAR_4,
+ CRIT_INT_CLEAR_5,
+ CRIT_INT_CLEAR_6,
+ CRIT_INT_CLEAR_7,
+ CRIT_INT_CLEAR_8,
+ CRIT_INT_CLEAR_9,
+ CRIT_INT_CLEAR_10,
+ CRIT_INT_CLEAR_11,
+ CRIT_INT_CLEAR_12,
+ CRIT_INT_CLEAR_13,
+ CRIT_INT_CLEAR_14,
+ CRIT_INT_CLEAR_15,
+ CRIT_INT_MASK_0, /* CRITICAL interrupt mask */
+ CRIT_INT_MASK_1,
+ CRIT_INT_MASK_2,
+ CRIT_INT_MASK_3,
+ CRIT_INT_MASK_4,
+ CRIT_INT_MASK_5,
+ CRIT_INT_MASK_6,
+ CRIT_INT_MASK_7,
+ CRIT_INT_MASK_8,
+ CRIT_INT_MASK_9,
+ CRIT_INT_MASK_10,
+ CRIT_INT_MASK_11,
+ CRIT_INT_MASK_12,
+ CRIT_INT_MASK_13,
+ CRIT_INT_MASK_14,
+ CRIT_INT_MASK_15,
+ CRIT_THRESH_0, /* CRITICAL threshold values */
+ CRIT_THRESH_1,
+ CRIT_THRESH_2,
+ CRIT_THRESH_3,
+ CRIT_THRESH_4,
+ CRIT_THRESH_5,
+ CRIT_THRESH_6,
+ CRIT_THRESH_7,
+ CRIT_THRESH_8,
+ CRIT_THRESH_9,
+ CRIT_THRESH_10,
+ CRIT_THRESH_11,
+ CRIT_THRESH_12,
+ CRIT_THRESH_13,
+ CRIT_THRESH_14,
+ CRIT_THRESH_15,
+
+ /* WATCHDOG */
+ WDOG_BARK_STATUS,
+ WDOG_BARK_CLEAR,
+ WDOG_BARK_MASK,
+ WDOG_BARK_COUNT,
+
+ /* CYCLE COMPLETION MONITOR */
+ CC_MON_STATUS,
+ CC_MON_CLEAR,
+ CC_MON_MASK,
+
MIN_STATUS_0, /* MIN threshold violated */
MIN_STATUS_1,
MIN_STATUS_2,
@@ -418,6 +496,7 @@ enum regfield_ids {
* @adc: do the sensors only output adc code (instead of temperature)?
* @srot_split: does the IP neatly splits the register space into SROT and TM,
* with SROT only being available to secure boot firmware?
+ * @has_watchdog: does this IP support watchdog functionality?
* @max_sensors: maximum sensors supported by this version of the IP
*/
struct tsens_features {
@@ -425,6 +504,7 @@ struct tsens_features {
unsigned int crit_int:1;
unsigned int adc:1;
unsigned int srot_split:1;
+ unsigned int has_watchdog:1;
unsigned int max_sensors;
};
@@ -440,12 +520,14 @@ struct tsens_plat_data {
const u32 num_sensors;
const struct tsens_ops *ops;
unsigned int *hw_ids;
- const struct tsens_features *feat;
+ struct tsens_features *feat;
const struct reg_field *fields;
};
/**
* struct tsens_context - Registers to be saved/restored across a context loss
+ * @threshold: Threshold register value
+ * @control: Control register value
*/
struct tsens_context {
int threshold;
@@ -460,6 +542,8 @@ struct tsens_context {
* @srot_map: pointer to SROT register address space
* @tm_offset: deal with old device trees that don't address TM and SROT
* address space separately
+ * @ul_lock: lock while processing upper/lower threshold interrupts
+ * @crit_lock: lock while processing critical threshold interrupts
* @rf: array of regmap_fields used to store value of the field
* @ctx: registers to be saved and restored during suspend/resume
* @feat: features of the IP
@@ -481,36 +565,37 @@ struct tsens_priv {
struct regmap_field *rf[MAX_REGFIELDS];
struct tsens_context ctx;
- const struct tsens_features *feat;
+ struct tsens_features *feat;
const struct reg_field *fields;
const struct tsens_ops *ops;
struct dentry *debug_root;
struct dentry *debug;
- struct tsens_sensor sensor[0];
+ struct tsens_sensor sensor[];
};
char *qfprom_read(struct device *dev, const char *cname);
void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode);
int init_common(struct tsens_priv *priv);
-int get_temp_tsens_valid(struct tsens_sensor *s, int *temp);
-int get_temp_common(struct tsens_sensor *s, int *temp);
+int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp);
+int get_temp_common(const struct tsens_sensor *s, int *temp);
int tsens_enable_irq(struct tsens_priv *priv);
void tsens_disable_irq(struct tsens_priv *priv);
int tsens_set_trips(void *_sensor, int low, int high);
irqreturn_t tsens_irq_thread(int irq, void *data);
+irqreturn_t tsens_critical_irq_thread(int irq, void *data);
/* TSENS target */
-extern const struct tsens_plat_data data_8960;
+extern struct tsens_plat_data data_8960;
/* TSENS v0.1 targets */
-extern const struct tsens_plat_data data_8916, data_8974;
+extern struct tsens_plat_data data_8916, data_8974;
/* TSENS v1 targets */
-extern const struct tsens_plat_data data_tsens_v1, data_8976;
+extern struct tsens_plat_data data_tsens_v1, data_8976;
/* TSENS v2 targets */
-extern const struct tsens_plat_data data_8996, data_tsens_v2;
+extern struct tsens_plat_data data_8996, data_tsens_v2;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 874bc46e6c73..028a6bbf75dc 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -3,12 +3,11 @@
// Copyright 2016 Freescale Semiconductor, Inc.
#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <linux/thermal.h>
@@ -228,6 +227,14 @@ static const struct regmap_access_table qoriq_rd_table = {
.n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges),
};
+static void qoriq_tmu_action(void *p)
+{
+ struct qoriq_tmu_data *data = p;
+
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
+ clk_disable_unprepare(data->clk);
+}
+
static int qoriq_tmu_probe(struct platform_device *pdev)
{
int ret;
@@ -278,6 +285,10 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
return ret;
}
+ ret = devm_add_action_or_reset(dev, qoriq_tmu_action, data);
+ if (ret)
+ return ret;
+
/* version register offset at: 0xbf8 on both v1 and v2 */
ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver);
if (ret) {
@@ -290,35 +301,17 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
ret = qoriq_tmu_calibration(dev, data); /* TMU calibration */
if (ret < 0)
- goto err;
+ return ret;
ret = qoriq_tmu_register_tmu_zone(dev, data);
if (ret < 0) {
dev_err(dev, "Failed to register sensors\n");
- ret = -ENODEV;
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, data);
return 0;
-
-err:
- clk_disable_unprepare(data->clk);
-
- return ret;
-}
-
-static int qoriq_tmu_remove(struct platform_device *pdev)
-{
- struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
-
- /* Disable monitoring */
- regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
-
- clk_disable_unprepare(data->clk);
-
- return 0;
}
static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
@@ -365,7 +358,6 @@ static struct platform_driver qoriq_tmu = {
.of_match_table = qoriq_tmu_match,
},
.probe = qoriq_tmu_probe,
- .remove = qoriq_tmu_remove,
};
module_platform_driver(qoriq_tmu);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 72877bdc072d..58fe7c1ef00b 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -81,8 +81,6 @@ struct rcar_gen3_thermal_tsc {
void __iomem *base;
struct thermal_zone_device *zone;
struct equation_coefs coef;
- int low;
- int high;
int tj_t;
int id; /* thermal channel id */
};
@@ -204,12 +202,14 @@ static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
return INT_FIXPT(val);
}
-static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
+static int rcar_gen3_thermal_update_range(struct rcar_gen3_thermal_tsc *tsc)
{
- struct rcar_gen3_thermal_tsc *tsc = devdata;
+ int temperature, low, high;
+
+ rcar_gen3_thermal_get_temp(tsc, &temperature);
- low = clamp_val(low, -40000, 120000);
- high = clamp_val(high, -40000, 120000);
+ low = temperature - MCELSIUS(1);
+ high = temperature + MCELSIUS(1);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP1,
rcar_gen3_thermal_mcelsius_to_temp(tsc, low));
@@ -217,15 +217,11 @@ static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP2,
rcar_gen3_thermal_mcelsius_to_temp(tsc, high));
- tsc->low = low;
- tsc->high = high;
-
return 0;
}
static const struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
- .set_trips = rcar_gen3_thermal_set_trips,
};
static void rcar_thermal_irq_set(struct rcar_gen3_thermal_priv *priv, bool on)
@@ -246,9 +242,11 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
for (i = 0; i < priv->num_tscs; i++) {
status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
- if (status)
+ if (status) {
+ rcar_gen3_thermal_update_range(priv->tscs[i]);
thermal_zone_device_update(priv->tscs[i]->zone,
THERMAL_EVENT_UNSPECIFIED);
+ }
}
return IRQ_HANDLED;
@@ -325,6 +323,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
.data = &rcar_gen3_ths_tj_1_m3_w,
},
{
+ .compatible = "renesas,r8a77961-thermal",
+ .data = &rcar_gen3_ths_tj_1_m3_w,
+ },
+ {
.compatible = "renesas,r8a77965-thermal",
.data = &rcar_gen3_ths_tj_1,
},
@@ -446,14 +448,15 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
goto error_unregister;
ret = devm_add_action_or_reset(dev, rcar_gen3_hwmon_action, zone);
- if (ret) {
+ if (ret)
goto error_unregister;
- }
ret = of_thermal_get_ntrips(tsc->zone);
if (ret < 0)
goto error_unregister;
+ rcar_gen3_thermal_update_range(tsc);
+
dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
}
@@ -492,7 +495,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
priv->thermal_init(tsc);
- rcar_gen3_thermal_set_trips(tsc, tsc->low, tsc->high);
+ rcar_gen3_thermal_update_range(tsc);
}
rcar_thermal_irq_set(priv, true);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 8f1aafa2044e..e0c1f2409035 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -95,7 +95,6 @@ struct rcar_thermal_priv {
struct mutex lock;
struct list_head list;
int id;
- u32 ctemp;
};
#define rcar_thermal_for_each_priv(pos, common) \
@@ -201,7 +200,6 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
struct device *dev = rcar_priv_to_dev(priv);
int i;
u32 ctemp, old, new;
- int ret = -EINVAL;
mutex_lock(&priv->lock);
@@ -247,37 +245,29 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
((ctemp - 1) << 0)));
}
- dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
-
- priv->ctemp = ctemp;
- ret = 0;
err_out_unlock:
mutex_unlock(&priv->lock);
- return ret;
+
+ return ctemp ? ctemp : -EINVAL;
}
static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
int *temp)
{
- int tmp;
- int ret;
-
- ret = rcar_thermal_update_temp(priv);
- if (ret < 0)
- return ret;
+ int ctemp;
- mutex_lock(&priv->lock);
- if (priv->chip->ctemp_bands == 1)
- tmp = MCELSIUS((priv->ctemp * 5) - 65);
- else if (priv->ctemp < 24)
- tmp = MCELSIUS(((priv->ctemp * 55) - 720) / 10);
- else
- tmp = MCELSIUS((priv->ctemp * 5) - 60);
- mutex_unlock(&priv->lock);
+ ctemp = rcar_thermal_update_temp(priv);
+ if (ctemp < 0)
+ return ctemp;
/* Guaranteed operating range is -45C to 125C. */
- *temp = tmp;
+ if (priv->chip->ctemp_bands == 1)
+ *temp = MCELSIUS((ctemp * 5) - 65);
+ else if (ctemp < 24)
+ *temp = MCELSIUS(((ctemp * 55) - 720) / 10);
+ else
+ *temp = MCELSIUS((ctemp * 5) - 60);
return 0;
}
@@ -387,28 +377,17 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
static void rcar_thermal_work(struct work_struct *work)
{
struct rcar_thermal_priv *priv;
- int cctemp, nctemp;
int ret;
priv = container_of(work, struct rcar_thermal_priv, work.work);
- ret = rcar_thermal_get_current_temp(priv, &cctemp);
- if (ret < 0)
- return;
-
ret = rcar_thermal_update_temp(priv);
if (ret < 0)
return;
rcar_thermal_irq_enable(priv);
- ret = rcar_thermal_get_current_temp(priv, &nctemp);
- if (ret < 0)
- return;
-
- if (nctemp != cctemp)
- thermal_zone_device_update(priv->zone,
- THERMAL_EVENT_UNSPECIFIED);
+ thermal_zone_device_update(priv->zone, THERMAL_EVENT_UNSPECIFIED);
}
static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
@@ -521,8 +500,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM,
mres++);
common->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(common->base))
- return PTR_ERR(common->base);
+ if (IS_ERR(common->base)) {
+ ret = PTR_ERR(common->base);
+ goto error_unregister;
+ }
idle = 0; /* polling delay is not needed */
}
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fd4a17812f33..e9a90bc23b11 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1094,7 +1094,9 @@ static int exynos_tmu_probe(struct platform_device *pdev)
&exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
ret = PTR_ERR(data->tzd);
- dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to register sensor: %d\n",
+ ret);
goto err_sclk;
}
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
new file mode 100644
index 000000000000..a340374e8c51
--- /dev/null
+++ b/drivers/thermal/sprd_thermal.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define SPRD_THM_CTL 0x0
+#define SPRD_THM_INT_EN 0x4
+#define SPRD_THM_INT_STS 0x8
+#define SPRD_THM_INT_RAW_STS 0xc
+#define SPRD_THM_DET_PERIOD 0x10
+#define SPRD_THM_INT_CLR 0x14
+#define SPRD_THM_INT_CLR_ST 0x18
+#define SPRD_THM_MON_PERIOD 0x4c
+#define SPRD_THM_MON_CTL 0x50
+#define SPRD_THM_INTERNAL_STS1 0x54
+#define SPRD_THM_RAW_READ_MSK 0x3ff
+
+#define SPRD_THM_OFFSET(id) ((id) * 0x4)
+#define SPRD_THM_TEMP(id) (SPRD_THM_OFFSET(id) + 0x5c)
+#define SPRD_THM_THRES(id) (SPRD_THM_OFFSET(id) + 0x2c)
+
+#define SPRD_THM_SEN(id) BIT((id) + 2)
+#define SPRD_THM_SEN_OVERHEAT_EN(id) BIT((id) + 8)
+#define SPRD_THM_SEN_OVERHEAT_ALARM_EN(id) BIT((id) + 0)
+
+/* bits definitions for register THM_CTL */
+#define SPRD_THM_SET_RDY_ST BIT(13)
+#define SPRD_THM_SET_RDY BIT(12)
+#define SPRD_THM_MON_EN BIT(1)
+#define SPRD_THM_EN BIT(0)
+
+/* bits definitions for register THM_INT_CTL */
+#define SPRD_THM_BIT_INT_EN BIT(26)
+#define SPRD_THM_OVERHEAT_EN BIT(25)
+#define SPRD_THM_OTP_TRIP_SHIFT 10
+
+/* bits definitions for register SPRD_THM_INTERNAL_STS1 */
+#define SPRD_THM_TEMPER_RDY BIT(0)
+
+#define SPRD_THM_DET_PERIOD_DATA 0x800
+#define SPRD_THM_DET_PERIOD_MASK GENMASK(19, 0)
+#define SPRD_THM_MON_MODE 0x7
+#define SPRD_THM_MON_MODE_MASK GENMASK(3, 0)
+#define SPRD_THM_MON_PERIOD_DATA 0x10
+#define SPRD_THM_MON_PERIOD_MASK GENMASK(15, 0)
+#define SPRD_THM_THRES_MASK GENMASK(19, 0)
+#define SPRD_THM_INT_CLR_MASK GENMASK(24, 0)
+
+/* thermal sensor calibration parameters */
+#define SPRD_THM_TEMP_LOW -40000
+#define SPRD_THM_TEMP_HIGH 120000
+#define SPRD_THM_OTP_TEMP 120000
+#define SPRD_THM_HOT_TEMP 75000
+#define SPRD_THM_RAW_DATA_LOW 0
+#define SPRD_THM_RAW_DATA_HIGH 1000
+#define SPRD_THM_SEN_NUM 8
+#define SPRD_THM_DT_OFFSET 24
+#define SPRD_THM_RATION_OFFSET 17
+#define SPRD_THM_RATION_SIGN 16
+
+#define SPRD_THM_RDYST_POLLING_TIME 10
+#define SPRD_THM_RDYST_TIMEOUT 700
+#define SPRD_THM_TEMP_READY_POLL_TIME 10000
+#define SPRD_THM_TEMP_READY_TIMEOUT 600000
+#define SPRD_THM_MAX_SENSOR 8
+
+struct sprd_thermal_sensor {
+ struct thermal_zone_device *tzd;
+ struct sprd_thermal_data *data;
+ struct device *dev;
+ int cal_slope;
+ int cal_offset;
+ int id;
+};
+
+struct sprd_thermal_data {
+ const struct sprd_thm_variant_data *var_data;
+ struct sprd_thermal_sensor *sensor[SPRD_THM_MAX_SENSOR];
+ struct clk *clk;
+ void __iomem *base;
+ u32 ratio_off;
+ int ratio_sign;
+ int nr_sensors;
+};
+
+/*
+ * The conversion between ADC and temperature is based on linear relationship,
+ * and use idea_k to specify the slope and ideal_b to specify the offset.
+ *
+ * Since different Spreadtrum SoCs have different ideal_k and ideal_b,
+ * we should save ideal_k and ideal_b in the device data structure.
+ */
+struct sprd_thm_variant_data {
+ u32 ideal_k;
+ u32 ideal_b;
+};
+
+static const struct sprd_thm_variant_data ums512_data = {
+ .ideal_k = 262,
+ .ideal_b = 66400,
+};
+
+static inline void sprd_thm_update_bits(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(reg);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ writel(tmp, reg);
+}
+
+static int sprd_thm_cal_read(struct device_node *np, const char *cell_id,
+ u32 *val)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+
+ cell = of_nvmem_cell_get(np, cell_id);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ if (len > sizeof(u32)) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ memcpy(val, buf, len);
+
+ kfree(buf);
+ return 0;
+}
+
+static int sprd_thm_sensor_calibration(struct device_node *np,
+ struct sprd_thermal_data *thm,
+ struct sprd_thermal_sensor *sen)
+{
+ int ret;
+ /*
+ * According to thermal datasheet, the default calibration offset is 64,
+ * and the default ratio is 1000.
+ */
+ int dt_offset = 64, ratio = 1000;
+
+ ret = sprd_thm_cal_read(np, "sen_delta_cal", &dt_offset);
+ if (ret)
+ return ret;
+
+ ratio += thm->ratio_sign * thm->ratio_off;
+
+ /*
+ * According to the ideal slope K and ideal offset B, combined with
+ * calibration value of thermal from efuse, then calibrate the real
+ * slope k and offset b:
+ * k_cal = (k * ratio) / 1000.
+ * b_cal = b + (dt_offset - 64) * 500.
+ */
+ sen->cal_slope = (thm->var_data->ideal_k * ratio) / 1000;
+ sen->cal_offset = thm->var_data->ideal_b + (dt_offset - 128) * 250;
+
+ return 0;
+}
+
+static int sprd_thm_rawdata_to_temp(struct sprd_thermal_sensor *sen,
+ u32 rawdata)
+{
+ clamp(rawdata, (u32)SPRD_THM_RAW_DATA_LOW, (u32)SPRD_THM_RAW_DATA_HIGH);
+
+ /*
+ * According to the thermal datasheet, the formula of converting
+ * adc value to the temperature value should be:
+ * T_final = k_cal * x - b_cal.
+ */
+ return sen->cal_slope * rawdata - sen->cal_offset;
+}
+
+static int sprd_thm_temp_to_rawdata(int temp, struct sprd_thermal_sensor *sen)
+{
+ u32 val;
+
+ clamp(temp, (int)SPRD_THM_TEMP_LOW, (int)SPRD_THM_TEMP_HIGH);
+
+ /*
+ * According to the thermal datasheet, the formula of converting
+ * adc value to the temperature value should be:
+ * T_final = k_cal * x - b_cal.
+ */
+ val = (temp + sen->cal_offset) / sen->cal_slope;
+
+ return clamp(val, val, (u32)(SPRD_THM_RAW_DATA_HIGH - 1));
+}
+
+static int sprd_thm_read_temp(void *devdata, int *temp)
+{
+ struct sprd_thermal_sensor *sen = devdata;
+ u32 data;
+
+ data = readl(sen->data->base + SPRD_THM_TEMP(sen->id)) &
+ SPRD_THM_RAW_READ_MSK;
+
+ *temp = sprd_thm_rawdata_to_temp(sen, data);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops sprd_thm_ops = {
+ .get_temp = sprd_thm_read_temp,
+};
+
+static int sprd_thm_poll_ready_status(struct sprd_thermal_data *thm)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Wait for thermal ready status before configuring thermal parameters.
+ */
+ ret = readl_poll_timeout(thm->base + SPRD_THM_CTL, val,
+ !(val & SPRD_THM_SET_RDY_ST),
+ SPRD_THM_RDYST_POLLING_TIME,
+ SPRD_THM_RDYST_TIMEOUT);
+ if (ret)
+ return ret;
+
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_MON_EN,
+ SPRD_THM_MON_EN);
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SET_RDY,
+ SPRD_THM_SET_RDY);
+ return 0;
+}
+
+static int sprd_thm_wait_temp_ready(struct sprd_thermal_data *thm)
+{
+ u32 val;
+
+ /* Wait for first temperature data ready before reading temperature */
+ return readl_poll_timeout(thm->base + SPRD_THM_INTERNAL_STS1, val,
+ !(val & SPRD_THM_TEMPER_RDY),
+ SPRD_THM_TEMP_READY_POLL_TIME,
+ SPRD_THM_TEMP_READY_TIMEOUT);
+}
+
+static int sprd_thm_set_ready(struct sprd_thermal_data *thm)
+{
+ int ret;
+
+ ret = sprd_thm_poll_ready_status(thm);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear interrupt status, enable thermal interrupt and enable thermal.
+ *
+ * The SPRD thermal controller integrates a hardware interrupt signal,
+ * which means if the temperature is overheat, it will generate an
+ * interrupt and notify the event to PMIC automatically to shutdown the
+ * system. So here we should enable the interrupt bits, though we have
+ * not registered an irq handler.
+ */
+ writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR);
+ sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN,
+ SPRD_THM_BIT_INT_EN, SPRD_THM_BIT_INT_EN);
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_EN, SPRD_THM_EN);
+ return 0;
+}
+
+static void sprd_thm_sensor_init(struct sprd_thermal_data *thm,
+ struct sprd_thermal_sensor *sen)
+{
+ u32 otp_rawdata, hot_rawdata;
+
+ otp_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_OTP_TEMP, sen);
+ hot_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_HOT_TEMP, sen);
+
+ /* Enable the sensor' overheat temperature protection interrupt */
+ sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN,
+ SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id),
+ SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id));
+
+ /* Set the sensor' overheat and hot threshold temperature */
+ sprd_thm_update_bits(thm->base + SPRD_THM_THRES(sen->id),
+ SPRD_THM_THRES_MASK,
+ (otp_rawdata << SPRD_THM_OTP_TRIP_SHIFT) |
+ hot_rawdata);
+
+ /* Enable the corresponding sensor */
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SEN(sen->id),
+ SPRD_THM_SEN(sen->id));
+}
+
+static void sprd_thm_para_config(struct sprd_thermal_data *thm)
+{
+ /* Set the period of two valid temperature detection action */
+ sprd_thm_update_bits(thm->base + SPRD_THM_DET_PERIOD,
+ SPRD_THM_DET_PERIOD_MASK, SPRD_THM_DET_PERIOD);
+
+ /* Set the sensors' monitor mode */
+ sprd_thm_update_bits(thm->base + SPRD_THM_MON_CTL,
+ SPRD_THM_MON_MODE_MASK, SPRD_THM_MON_MODE);
+
+ /* Set the sensors' monitor period */
+ sprd_thm_update_bits(thm->base + SPRD_THM_MON_PERIOD,
+ SPRD_THM_MON_PERIOD_MASK, SPRD_THM_MON_PERIOD);
+}
+
+static void sprd_thm_toggle_sensor(struct sprd_thermal_sensor *sen, bool on)
+{
+ struct thermal_zone_device *tzd = sen->tzd;
+
+ tzd->ops->set_mode(tzd,
+ on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
+}
+
+static int sprd_thm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *sen_child;
+ struct sprd_thermal_data *thm;
+ struct sprd_thermal_sensor *sen;
+ const struct sprd_thm_variant_data *pdata;
+ int ret, i;
+ u32 val;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ thm = devm_kzalloc(&pdev->dev, sizeof(*thm), GFP_KERNEL);
+ if (!thm)
+ return -ENOMEM;
+
+ thm->var_data = pdata;
+ thm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!thm->base)
+ return -ENOMEM;
+
+ thm->nr_sensors = of_get_child_count(np);
+ if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
+ dev_err(&pdev->dev, "incorrect sensor count\n");
+ return -EINVAL;
+ }
+
+ thm->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(thm->clk)) {
+ dev_err(&pdev->dev, "failed to get enable clock\n");
+ return PTR_ERR(thm->clk);
+ }
+
+ ret = clk_prepare_enable(thm->clk);
+ if (ret)
+ return ret;
+
+ sprd_thm_para_config(thm);
+
+ ret = sprd_thm_cal_read(np, "thm_sign_cal", &val);
+ if (ret)
+ goto disable_clk;
+
+ if (val > 0)
+ thm->ratio_sign = -1;
+ else
+ thm->ratio_sign = 1;
+
+ ret = sprd_thm_cal_read(np, "thm_ratio_cal", &thm->ratio_off);
+ if (ret)
+ goto disable_clk;
+
+ for_each_child_of_node(np, sen_child) {
+ sen = devm_kzalloc(&pdev->dev, sizeof(*sen), GFP_KERNEL);
+ if (!sen) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
+
+ sen->data = thm;
+ sen->dev = &pdev->dev;
+
+ ret = of_property_read_u32(sen_child, "reg", &sen->id);
+ if (ret) {
+ dev_err(&pdev->dev, "get sensor reg failed");
+ goto disable_clk;
+ }
+
+ ret = sprd_thm_sensor_calibration(sen_child, thm, sen);
+ if (ret) {
+ dev_err(&pdev->dev, "efuse cal analysis failed");
+ goto disable_clk;
+ }
+
+ sprd_thm_sensor_init(thm, sen);
+
+ sen->tzd = devm_thermal_zone_of_sensor_register(sen->dev,
+ sen->id,
+ sen,
+ &sprd_thm_ops);
+ if (IS_ERR(sen->tzd)) {
+ dev_err(&pdev->dev, "register thermal zone failed %d\n",
+ sen->id);
+ ret = PTR_ERR(sen->tzd);
+ goto disable_clk;
+ }
+
+ thm->sensor[sen->id] = sen;
+ }
+
+ ret = sprd_thm_set_ready(thm);
+ if (ret)
+ goto disable_clk;
+
+ ret = sprd_thm_wait_temp_ready(thm);
+ if (ret)
+ goto disable_clk;
+
+ for (i = 0; i < thm->nr_sensors; i++)
+ sprd_thm_toggle_sensor(thm->sensor[i], true);
+
+ platform_set_drvdata(pdev, thm);
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(thm->clk);
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void sprd_thm_hw_suspend(struct sprd_thermal_data *thm)
+{
+ int i;
+
+ for (i = 0; i < thm->nr_sensors; i++) {
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_SEN(thm->sensor[i]->id), 0);
+ }
+
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_EN, 0x0);
+}
+
+static int sprd_thm_suspend(struct device *dev)
+{
+ struct sprd_thermal_data *thm = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < thm->nr_sensors; i++)
+ sprd_thm_toggle_sensor(thm->sensor[i], false);
+
+ sprd_thm_hw_suspend(thm);
+ clk_disable_unprepare(thm->clk);
+
+ return 0;
+}
+
+static int sprd_thm_hw_resume(struct sprd_thermal_data *thm)
+{
+ int ret, i;
+
+ for (i = 0; i < thm->nr_sensors; i++) {
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_SEN(thm->sensor[i]->id),
+ SPRD_THM_SEN(thm->sensor[i]->id));
+ }
+
+ ret = sprd_thm_poll_ready_status(thm);
+ if (ret)
+ return ret;
+
+ writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR);
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_EN, SPRD_THM_EN);
+ return sprd_thm_wait_temp_ready(thm);
+}
+
+static int sprd_thm_resume(struct device *dev)
+{
+ struct sprd_thermal_data *thm = dev_get_drvdata(dev);
+ int ret, i;
+
+ ret = clk_prepare_enable(thm->clk);
+ if (ret)
+ return ret;
+
+ ret = sprd_thm_hw_resume(thm);
+ if (ret)
+ goto disable_clk;
+
+ for (i = 0; i < thm->nr_sensors; i++)
+ sprd_thm_toggle_sensor(thm->sensor[i], true);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(thm->clk);
+ return ret;
+}
+#endif
+
+static int sprd_thm_remove(struct platform_device *pdev)
+{
+ struct sprd_thermal_data *thm = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < thm->nr_sensors; i++) {
+ sprd_thm_toggle_sensor(thm->sensor[i], false);
+ devm_thermal_zone_of_sensor_unregister(&pdev->dev,
+ thm->sensor[i]->tzd);
+ }
+
+ clk_disable_unprepare(thm->clk);
+ return 0;
+}
+
+static const struct of_device_id sprd_thermal_of_match[] = {
+ { .compatible = "sprd,ums512-thermal", .data = &ums512_data },
+ { },
+};
+
+static const struct dev_pm_ops sprd_thermal_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sprd_thm_suspend, sprd_thm_resume)
+};
+
+static struct platform_driver sprd_thermal_driver = {
+ .probe = sprd_thm_probe,
+ .remove = sprd_thm_remove,
+ .driver = {
+ .name = "sprd-thermal",
+ .pm = &sprd_thermal_pm_ops,
+ .of_match_table = sprd_thermal_of_match,
+ },
+};
+
+module_platform_driver(sprd_thermal_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index ad9e3bf8fdf6..9314e3df6a42 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -478,7 +478,8 @@ static int stm_thermal_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
+static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
+ stm_thermal_suspend, stm_thermal_resume);
static const struct thermal_zone_of_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 2fa78f738568..263b0420fbe4 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/types.h>
@@ -24,7 +24,6 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/io.h>
#include "ti-bandgap.h"
@@ -743,27 +742,13 @@ exit:
static int ti_bandgap_tshut_init(struct ti_bandgap *bgp,
struct platform_device *pdev)
{
- int gpio_nr = bgp->tshut_gpio;
int status;
- /* Request for gpio_86 line */
- status = gpio_request(gpio_nr, "tshut");
- if (status < 0) {
- dev_err(bgp->dev, "Could not request for TSHUT GPIO:%i\n", 86);
- return status;
- }
- status = gpio_direction_input(gpio_nr);
- if (status) {
- dev_err(bgp->dev, "Cannot set input TSHUT GPIO %d\n", gpio_nr);
- return status;
- }
-
- status = request_irq(gpio_to_irq(gpio_nr), ti_bandgap_tshut_irq_handler,
+ status = request_irq(gpiod_to_irq(bgp->tshut_gpiod),
+ ti_bandgap_tshut_irq_handler,
IRQF_TRIGGER_RISING, "tshut", NULL);
- if (status) {
- gpio_free(gpio_nr);
+ if (status)
dev_err(bgp->dev, "request irq failed for TSHUT");
- }
return 0;
}
@@ -860,11 +845,10 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
} while (res);
if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- bgp->tshut_gpio = of_get_gpio(node, 0);
- if (!gpio_is_valid(bgp->tshut_gpio)) {
- dev_err(&pdev->dev, "invalid gpio for tshut (%d)\n",
- bgp->tshut_gpio);
- return ERR_PTR(-EINVAL);
+ bgp->tshut_gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
+ if (IS_ERR(bgp->tshut_gpiod)) {
+ dev_err(&pdev->dev, "invalid gpio for tshut\n");
+ return ERR_CAST(bgp->tshut_gpiod);
}
}
@@ -1046,10 +1030,8 @@ put_clks:
put_fclock:
clk_put(bgp->fclock);
free_irqs:
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- free_irq(gpio_to_irq(bgp->tshut_gpio), NULL);
- gpio_free(bgp->tshut_gpio);
- }
+ if (TI_BANDGAP_HAS(bgp, TSHUT))
+ free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL);
return ret;
}
@@ -1079,10 +1061,8 @@ int ti_bandgap_remove(struct platform_device *pdev)
if (TI_BANDGAP_HAS(bgp, TALERT))
free_irq(bgp->irq, bgp);
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- free_irq(gpio_to_irq(bgp->tshut_gpio), NULL);
- gpio_free(bgp->tshut_gpio);
- }
+ if (TI_BANDGAP_HAS(bgp, TSHUT))
+ free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL);
return 0;
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index bb9b0f7faf99..fce4657e9486 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -13,6 +13,8 @@
#include <linux/types.h>
#include <linux/err.h>
+struct gpio_desc;
+
/**
* DOC: bandgap driver data structure
* ==================================
@@ -199,7 +201,7 @@ struct ti_bandgap {
struct clk *div_clk;
spinlock_t lock; /* shields this struct */
int irq;
- int tshut_gpio;
+ struct gpio_desc *tshut_gpiod;
u32 clk_rate;
};
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index b7980c856898..68c1b93ac5d9 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -147,10 +147,10 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
if (!uuid_is_null(&uuids[i]))
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
&uuids[i]);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s",
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
i < tb->nboot_acl - 1 ? "," : "\n");
}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 921d164b3f35..b451a5aa90b5 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -247,7 +247,7 @@ struct tb_drom_entry_header {
struct tb_drom_entry_generic {
struct tb_drom_entry_header header;
- u8 data[0];
+ u8 data[];
} __packed;
struct tb_drom_entry_port {
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 13e88109742e..fbbe32ca1e69 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -114,7 +114,7 @@ struct icm_notification {
struct ep_name_entry {
u8 len;
u8 type;
- u8 data[0];
+ u8 data[];
};
#define EP_NAME_INTEL_VSS 0x10
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index b341fc60c4ba..50c7534ba31e 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -182,6 +182,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (ret)
+ return ret;
+
if (val & ROUTER_CS_26_ONS)
return -EOPNOTSUPP;
@@ -259,6 +262,7 @@ int usb4_switch_setup(struct tb_switch *sw)
/**
* usb4_switch_read_uid() - Read UID from USB4 router
* @sw: USB4 router
+ * @uid: UID is stored here
*
* Reads 64-bit UID from USB4 router config space.
*/
@@ -296,6 +300,9 @@ static int usb4_switch_drom_read_block(struct tb_switch *sw,
/**
* usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
* @sw: USB4 router
+ * @address: Byte address inside DROM to start reading
+ * @buf: Buffer where the DROM content is stored
+ * @size: Number of bytes to read from DROM
*
* Uses USB4 router operations to read router DROM. For devices this
* should always work but for hosts it may return %-EOPNOTSUPP in which
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index a312cb33a99b..2dff93d7a501 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -137,7 +137,6 @@ config LEGACY_PTYS
security. This option enables these legacy devices; on most
systems, it is safe to say N.
-
config LEGACY_PTY_COUNT
int "Maximum number of legacy PTY in use"
depends on LEGACY_PTYS
@@ -151,6 +150,31 @@ config LEGACY_PTY_COUNT
When not in use, each legacy PTY occupies 12 bytes on 32-bit
architectures and 24 bytes on 64-bit architectures.
+config LDISC_AUTOLOAD
+ bool "Automatically load TTY Line Disciplines"
+ default y
+ help
+ Historically the kernel has always automatically loaded any
+ line discipline that is in a kernel module when a user asks
+ for it to be loaded with the TIOCSETD ioctl, or through other
+ means. This is not always the best thing to do on systems
+ where you know you will not be using some of the more
+ "ancient" line disciplines, so prevent the kernel from doing
+ this unless the request is coming from a process with the
+ CAP_SYS_MODULE permissions.
+
+ Say 'Y' here if you trust your userspace users to do the right
+ thing, or if you have only provided the line disciplines that
+ you know you will be using, or if you wish to continue to use
+ the traditional method of on-demand loading of these modules
+ by any user.
+
+ This functionality can be changed at runtime with the
+ dev.tty.ldisc_autoload sysctl, this configuration option will
+ only set the default value of this functionality.
+
+source "drivers/tty/serial/Kconfig"
+
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
depends on HAS_IOMEM
@@ -270,16 +294,6 @@ config SYNCLINK_GT
synchronous and asynchronous serial adapters
manufactured by Microgate Systems, Ltd. (www.microgate.com)
-config NOZOMI
- tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
- depends on PCI
- help
- If you have a HSDPA driver Broadband Wireless Data Card -
- Globe Trotter PCMCIA card, say Y here.
-
- To compile this driver as a module, choose M here, the module
- will be called nozomi.
-
config ISI
tristate "Multi-Tech multiport card support"
depends on SERIAL_NONSTANDARD && PCI
@@ -302,43 +316,6 @@ config N_HDLC
The module will be called n_hdlc. If you want to do that, say M
here.
-config N_GSM
- tristate "GSM MUX line discipline support (EXPERIMENTAL)"
- depends on NET
- help
- This line discipline provides support for the GSM MUX protocol and
- presents the mux as a set of 61 individual tty devices.
-
-config TRACE_ROUTER
- tristate "Trace data router for MIPI P1149.7 cJTAG standard"
- depends on TRACE_SINK
- help
- The trace router uses the Linux tty line discipline framework to
- route trace data coming from a tty port (say UART for example) to
- the trace sink line discipline driver and to another tty port (say
- USB). This is part of a solution for the MIPI P1149.7, compact JTAG,
- standard, which is for debugging mobile devices. The PTI driver in
- drivers/misc/pti.c defines the majority of this MIPI solution.
-
- You should select this driver if the target kernel is meant for
- a mobile device containing a modem. Then you will need to select
- "Trace data sink for MIPI P1149.7 cJTAG standard" line discipline
- driver.
-
-config TRACE_SINK
- tristate "Trace data sink for MIPI P1149.7 cJTAG standard"
- help
- The trace sink uses the Linux line discipline framework to receive
- trace data coming from the trace router line discipline driver
- to a user-defined tty port target, like USB.
- This is to provide a way to extract modem trace data on
- devices that do not have a PTI HW module, or just need modem
- trace data to come out of a different HW output port.
- This is part of a solution for the P1149.7, compact JTAG, standard.
-
- If you select this option, you need to select
- "Trace data router for MIPI P1149.7 cJTAG standard".
-
config PPC_EPAPR_HV_BYTECHAN
bool "ePAPR hypervisor byte channel driver"
depends on PPC
@@ -374,20 +351,6 @@ config PPC_EARLY_DEBUG_EHV_BC_HANDLE
there simply will be no early console output. This is true also
if you don't boot under a hypervisor at all.
-config NULL_TTY
- tristate "NULL TTY driver"
- help
- Say Y here if you want a NULL TTY which simply discards messages.
-
- This is useful to allow userspace applications which expect a console
- device to work without modifications even when no console is
- available or desired.
-
- In order to use this driver, you should redirect the console to this
- TTY, or boot the kernel with console=ttynull.
-
- If unsure, say N.
-
config GOLDFISH_TTY
tristate "Goldfish TTY Driver"
depends on GOLDFISH
@@ -401,6 +364,23 @@ config GOLDFISH_TTY_EARLY_CONSOLE
default y if GOLDFISH_TTY=y
select SERIAL_EARLYCON
+config N_GSM
+ tristate "GSM MUX line discipline support (EXPERIMENTAL)"
+ depends on NET
+ help
+ This line discipline provides support for the GSM MUX protocol and
+ presents the mux as a set of 61 individual tty devices.
+
+config NOZOMI
+ tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
+ depends on PCI
+ help
+ If you have a HSDPA driver Broadband Wireless Data Card -
+ Globe Trotter PCMCIA card, say Y here.
+
+ To compile this driver as a module, choose M here, the module
+ will be called nozomi.
+
config MIPS_EJTAG_FDC_TTY
bool "MIPS EJTAG Fast Debug Channel TTY"
depends on MIPS_CDMM
@@ -448,33 +428,58 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
+config TRACE_ROUTER
+ tristate "Trace data router for MIPI P1149.7 cJTAG standard"
+ depends on TRACE_SINK
+ help
+ The trace router uses the Linux tty line discipline framework to
+ route trace data coming from a tty port (say UART for example) to
+ the trace sink line discipline driver and to another tty port (say
+ USB). This is part of a solution for the MIPI P1149.7, compact JTAG,
+ standard, which is for debugging mobile devices. The PTI driver in
+ drivers/misc/pti.c defines the majority of this MIPI solution.
+
+ You should select this driver if the target kernel is meant for
+ a mobile device containing a modem. Then you will need to select
+ "Trace data sink for MIPI P1149.7 cJTAG standard" line discipline
+ driver.
+
+config TRACE_SINK
+ tristate "Trace data sink for MIPI P1149.7 cJTAG standard"
+ help
+ The trace sink uses the Linux line discipline framework to receive
+ trace data coming from the trace router line discipline driver
+ to a user-defined tty port target, like USB.
+ This is to provide a way to extract modem trace data on
+ devices that do not have a PTI HW module, or just need modem
+ trace data to come out of a different HW output port.
+ This is part of a solution for the P1149.7, compact JTAG, standard.
+
+ If you select this option, you need to select
+ "Trace data router for MIPI P1149.7 cJTAG standard".
+
config VCC
tristate "Sun Virtual Console Concentrator"
depends on SUN_LDOMS
help
Support for Sun logical domain consoles.
-config LDISC_AUTOLOAD
- bool "Automatically load TTY Line Disciplines"
- default y
- help
- Historically the kernel has always automatically loaded any
- line discipline that is in a kernel module when a user asks
- for it to be loaded with the TIOCSETD ioctl, or through other
- means. This is not always the best thing to do on systems
- where you know you will not be using some of the more
- "ancient" line disciplines, so prevent the kernel from doing
- this unless the request is coming from a process with the
- CAP_SYS_MODULE permissions.
-
- Say 'Y' here if you trust your userspace users to do the right
- thing, or if you have only provided the line disciplines that
- you know you will be using, or if you wish to continue to use
- the traditional method of on-demand loading of these modules
- by any user.
-
- This functionality can be changed at runtime with the
- dev.tty.ldisc_autoload sysctl, this configuration option will
- only set the default value of this functionality.
+source "drivers/tty/hvc/Kconfig"
endif # TTY
+
+source "drivers/tty/serdev/Kconfig"
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 769e0a5d1dfc..3c6dd06ec5fb 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -136,6 +136,21 @@ static int find_console_handle(void)
return 1;
}
+static unsigned int local_ev_byte_channel_send(unsigned int handle,
+ unsigned int *count,
+ const char *p)
+{
+ char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
+ unsigned int c = *count;
+
+ if (c < sizeof(buffer)) {
+ memcpy(buffer, p, c);
+ memset(&buffer[c], 0, sizeof(buffer) - c);
+ p = buffer;
+ }
+ return ev_byte_channel_send(handle, count, p);
+}
+
/*************************** EARLY CONSOLE DRIVER ***************************/
#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
@@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data)
do {
count = 1;
- ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
+ ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
&count, &data);
} while (ret == EV_EAGAIN);
}
@@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s,
while (count) {
len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES);
do {
- ret = ev_byte_channel_send(handle, &len, s);
+ ret = local_ev_byte_channel_send(handle, &len, s);
} while (ret == EV_EAGAIN);
count -= len;
s += len;
@@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE),
EV_BYTE_CHANNEL_MAX_BYTES);
- ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
+ ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
/* 'len' is valid only if the return code is 0 or EV_EAGAIN */
if (!ret || (ret == EV_EAGAIN))
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 6a3c97d345a0..d1b27b0522a3 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-if TTY
config HVC_DRIVER
bool
@@ -89,7 +88,7 @@ config HVC_DCC
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV_SBI
+ depends on RISCV_SBI_V01
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
@@ -113,5 +112,3 @@ config HVCS
will depend on arch specific APIs exported from hvcserver.ko
which will also be compiled when this driver is built as a
module.
-
-endif # TTY
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 27284a2dcd2b..436cc51c92c3 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
vtermnos[index] = vtermno;
cons_ops[index] = ops;
- /* reserve all indices up to and including this index */
- if (last_hvc < index)
- last_hvc = index;
-
/* check if we need to re-register the kernel console */
hvc_check_console(index);
@@ -960,13 +956,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
cons_ops[i] == hp->ops)
break;
- /* no matching slot, just use a counter */
- if (i >= MAX_NR_HVC_CONSOLES)
- i = ++last_hvc;
+ if (i >= MAX_NR_HVC_CONSOLES) {
+
+ /* find 'empty' slot for console */
+ for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
+ }
+
+ /* no matching slot, just use a counter */
+ if (i == MAX_NR_HVC_CONSOLES)
+ i = ++last_hvc + MAX_NR_HVC_CONSOLES;
+ }
hp->index = i;
- cons_ops[i] = ops;
- vtermnos[i] = vtermno;
+ if (i < MAX_NR_HVC_CONSOLES) {
+ cons_ops[i] = ops;
+ vtermnos[i] = vtermno;
+ }
list_add_tail(&(hp->next), &hvc_structs);
mutex_unlock(&hvc_structs_mutex);
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index e9319954c832..18d005814e4b 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* hvc_console.h
* Copyright (C) 2005 IBM Corporation
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 620d8488b83e..21e76a2ec182 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -243,6 +243,7 @@ done:
/* Fall back to a 3 byte encoding */
word.bytes = 3;
word.word &= 0x00ffffff;
+ /* Fall through */
case 3:
/* 3 byte encoding */
word.word |= 0x82000000;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index f1c90fa2978e..d77ed82a4840 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -97,7 +97,19 @@ struct gsm_msg {
u8 ctrl; /* Control byte + flags */
unsigned int len; /* Length of data block (can be zero) */
unsigned char *data; /* Points into buffer but not at the start */
- unsigned char buffer[0];
+ unsigned char buffer[];
+};
+
+enum gsm_dlci_state {
+ DLCI_CLOSED,
+ DLCI_OPENING, /* Sending SABM not seen UA */
+ DLCI_OPEN, /* SABM/UA complete */
+ DLCI_CLOSING, /* Sending DISC not seen UA/DM */
+};
+
+enum gsm_dlci_mode {
+ DLCI_MODE_ABM, /* Normal Asynchronous Balanced Mode */
+ DLCI_MODE_ADM, /* Asynchronous Disconnected Mode */
};
/*
@@ -113,32 +125,25 @@ struct gsm_msg {
struct gsm_dlci {
struct gsm_mux *gsm;
int addr;
- int state;
-#define DLCI_CLOSED 0
-#define DLCI_OPENING 1 /* Sending SABM not seen UA */
-#define DLCI_OPEN 2 /* SABM/UA complete */
-#define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */
+ enum gsm_dlci_state state;
struct mutex mutex;
/* Link layer */
- int mode;
-#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
-#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
+ enum gsm_dlci_mode mode;
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
/* Uplink tty if active */
struct tty_port port; /* The tty bound to this DLCI if there is one */
- struct kfifo *fifo; /* Queue fifo for the DLCI */
- struct kfifo _fifo; /* For new fifo API porting only */
+ struct kfifo fifo; /* Queue fifo for the DLCI */
int adaption; /* Adaption layer in use */
int prev_adaption;
u32 modem_rx; /* Our incoming virtual modem lines */
u32 modem_tx; /* Our outgoing modem lines */
- int dead; /* Refuse re-open */
+ bool dead; /* Refuse re-open */
/* Flow control */
- int throttled; /* Private copy of throttle state */
- int constipated; /* Throttle status for outgoing */
+ bool throttled; /* Private copy of throttle state */
+ bool constipated; /* Throttle status for outgoing */
/* Packetised I/O */
struct sk_buff *skb; /* Frame being sent */
struct sk_buff_head skb_list; /* Queued frames */
@@ -168,6 +173,20 @@ struct gsm_control {
int error; /* Error if any */
};
+enum gsm_mux_state {
+ GSM_SEARCH,
+ GSM_START,
+ GSM_ADDRESS,
+ GSM_CONTROL,
+ GSM_LEN,
+ GSM_DATA,
+ GSM_FCS,
+ GSM_OVERRUN,
+ GSM_LEN0,
+ GSM_LEN1,
+ GSM_SSOF,
+};
+
/*
* Each GSM mux we have is represented by this structure. If we are
* operating as an ldisc then we use this structure as our ldisc
@@ -192,22 +211,11 @@ struct gsm_mux {
/* Framing Layer */
unsigned char *buf;
- int state;
-#define GSM_SEARCH 0
-#define GSM_START 1
-#define GSM_ADDRESS 2
-#define GSM_CONTROL 3
-#define GSM_LEN 4
-#define GSM_DATA 5
-#define GSM_FCS 6
-#define GSM_OVERRUN 7
-#define GSM_LEN0 8
-#define GSM_LEN1 9
-#define GSM_SSOF 10
+ enum gsm_mux_state state;
unsigned int len;
unsigned int address;
unsigned int count;
- int escape;
+ bool escape;
int encoding;
u8 control;
u8 fcs;
@@ -224,9 +232,9 @@ struct gsm_mux {
unsigned int mru;
unsigned int mtu;
int initiator; /* Did we initiate connection */
- int dead; /* Has the mux been shut down */
+ bool dead; /* Has the mux been shut down */
struct gsm_dlci *dlci[NUM_DLCI];
- int constipated; /* Asked by remote to shut up */
+ bool constipated; /* Asked by remote to shut up */
spinlock_t tx_lock;
unsigned int tx_bytes; /* TX data outstanding */
@@ -796,7 +804,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
total_size = 0;
while (1) {
- len = kfifo_len(dlci->fifo);
+ len = kfifo_len(&dlci->fifo);
if (len == 0)
return total_size;
@@ -820,7 +828,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
*dp++ = gsm_encode_modem(dlci);
break;
}
- WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+ WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
__gsm_data_queue(dlci, msg);
total_size += size;
}
@@ -1034,9 +1042,9 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
fc = (modem & MDM_FC) || !(modem & MDM_RTR);
if (fc && !dlci->constipated) {
/* Need to throttle our output on this device */
- dlci->constipated = 1;
+ dlci->constipated = true;
} else if (!fc && dlci->constipated) {
- dlci->constipated = 0;
+ dlci->constipated = false;
gsm_dlci_data_kick(dlci);
}
@@ -1199,8 +1207,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
struct gsm_dlci *dlci = gsm->dlci[0];
/* Modem wishes to close down */
if (dlci) {
- dlci->dead = 1;
- gsm->dead = 1;
+ dlci->dead = true;
+ gsm->dead = true;
gsm_dlci_begin_close(dlci);
}
}
@@ -1211,7 +1219,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
break;
case CMD_FCON:
/* Modem can accept data again */
- gsm->constipated = 0;
+ gsm->constipated = false;
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
spin_lock_irqsave(&gsm->tx_lock, flags);
@@ -1220,7 +1228,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
break;
case CMD_FCOFF:
/* Modem wants us to STFU */
- gsm->constipated = 1;
+ gsm->constipated = true;
gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
break;
case CMD_MSC:
@@ -1424,9 +1432,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
dlci->state = DLCI_CLOSED;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
- kfifo_reset(dlci->fifo);
+ kfifo_reset(&dlci->fifo);
} else
- dlci->gsm->dead = 1;
+ dlci->gsm->dead = true;
wake_up(&dlci->gsm->event);
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
@@ -1496,6 +1504,9 @@ static void gsm_dlci_t1(struct timer_list *t)
} else
gsm_dlci_close(dlci);
break;
+ default:
+ pr_debug("%s: unhandled state: %d\n", __func__, dlci->state);
+ break;
}
}
@@ -1645,8 +1656,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
return NULL;
spin_lock_init(&dlci->lock);
mutex_init(&dlci->mutex);
- dlci->fifo = &dlci->_fifo;
- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+ if (kfifo_alloc(&dlci->fifo, 4096, GFP_KERNEL) < 0) {
kfree(dlci);
return NULL;
}
@@ -1681,7 +1691,7 @@ static void gsm_dlci_free(struct tty_port *port)
del_timer_sync(&dlci->t1);
dlci->gsm->dlci[dlci->addr] = NULL;
- kfifo_free(dlci->fifo);
+ kfifo_free(&dlci->fifo);
while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
dev_kfree_skb(dlci->skb);
kfree(dlci);
@@ -1810,6 +1820,10 @@ static void gsm_queue(struct gsm_mux *gsm)
case DLCI_OPENING:
gsm_dlci_open(dlci);
break;
+ default:
+ pr_debug("%s: unhandled state: %d\n", __func__,
+ dlci->state);
+ break;
}
break;
case DM: /* DM can be valid unsolicited */
@@ -1923,6 +1937,9 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
break;
}
break;
+ default:
+ pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
+ break;
}
}
@@ -1959,7 +1976,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
}
if (c == GSM1_ESCAPE) {
- gsm->escape = 1;
+ gsm->escape = true;
return;
}
@@ -1969,7 +1986,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
if (gsm->escape) {
c ^= GSM1_ESCAPE_BITS;
- gsm->escape = 0;
+ gsm->escape = false;
}
switch (gsm->state) {
case GSM_START: /* First byte after SOF */
@@ -1997,6 +2014,9 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
break;
case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
break;
+ default:
+ pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
+ break;
}
}
@@ -2061,7 +2081,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
struct gsm_dlci *dlci = gsm->dlci[0];
struct gsm_msg *txq, *ntxq;
- gsm->dead = 1;
+ gsm->dead = true;
spin_lock(&gsm_mux_lock);
for (i = 0; i < MAX_MUX; i++) {
@@ -2078,7 +2098,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
del_timer_sync(&gsm->t2_timer);
/* Now we are sure T2 has stopped */
if (dlci)
- dlci->dead = 1;
+ dlci->dead = true;
/* Free up any link layer users */
mutex_lock(&gsm->mutex);
@@ -2132,7 +2152,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
dlci = gsm_dlci_alloc(gsm, 0);
if (dlci == NULL)
return -ENOMEM;
- gsm->dead = 0; /* Tty opens are now permissible */
+ gsm->dead = false; /* Tty opens are now permissible */
return 0;
}
@@ -2216,7 +2236,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
gsm->encoding = 1;
gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
gsm->mtu = 64;
- gsm->dead = 1; /* Avoid early tty opens */
+ gsm->dead = true; /* Avoid early tty opens */
return gsm;
}
@@ -2618,11 +2638,11 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
switch (cmd) {
case GSMIOC_GETCONF:
gsm_copy_config_values(gsm, &c);
- if (copy_to_user((void *)arg, &c, sizeof(c)))
+ if (copy_to_user((void __user *)arg, &c, sizeof(c)))
return -EFAULT;
return 0;
case GSMIOC_SETCONF:
- if (copy_from_user(&c, (void *)arg, sizeof(c)))
+ if (copy_from_user(&c, (void __user *)arg, sizeof(c)))
return -EFAULT;
return gsm_config(gsm, &c);
case GSMIOC_GETFIRST:
@@ -2769,7 +2789,7 @@ static void gsm_destroy_network(struct gsm_dlci *dlci)
{
struct gsm_mux_net *mux_net;
- pr_debug("destroy network interface");
+ pr_debug("destroy network interface\n");
if (!dlci->net)
return;
mux_net = netdev_priv(dlci->net);
@@ -2798,7 +2818,7 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
if (nc->adaption != 3 && nc->adaption != 4)
return -EPROTONOSUPPORT;
- pr_debug("create network interface");
+ pr_debug("create network interface\n");
netname = "gsm%d";
if (nc->if_name[0] != '\0')
@@ -2806,7 +2826,7 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
net = alloc_netdev(sizeof(struct gsm_mux_net), netname,
NET_NAME_UNKNOWN, gsm_mux_net_init);
if (!net) {
- pr_err("alloc_netdev failed");
+ pr_err("alloc_netdev failed\n");
return -ENOMEM;
}
net->mtu = dlci->gsm->mtu;
@@ -2824,7 +2844,7 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
dlci->data = gsm_mux_rx_netchar;
dlci->net = net;
- pr_debug("register netdev");
+ pr_debug("register netdev\n");
retval = register_netdev(net);
if (retval) {
pr_err("network register fail %d\n", retval);
@@ -3030,7 +3050,7 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
/* Stuff the bytes into the fifo queue */
- sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ sent = kfifo_in_locked(&dlci->fifo, buf, len, &dlci->lock);
/* Need to kick the channel */
gsm_dlci_data_kick(dlci);
return sent;
@@ -3041,7 +3061,7 @@ static int gsmtty_write_room(struct tty_struct *tty)
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
- return TX_SIZE - kfifo_len(dlci->fifo);
+ return TX_SIZE - kfifo_len(&dlci->fifo);
}
static int gsmtty_chars_in_buffer(struct tty_struct *tty)
@@ -3049,7 +3069,7 @@ static int gsmtty_chars_in_buffer(struct tty_struct *tty)
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return -EINVAL;
- return kfifo_len(dlci->fifo);
+ return kfifo_len(&dlci->fifo);
}
static void gsmtty_flush_buffer(struct tty_struct *tty)
@@ -3061,7 +3081,7 @@ static void gsmtty_flush_buffer(struct tty_struct *tty)
then the data being transmitted can't simply be junked once
it has first hit the stack. Until then we can just blow it
away */
- kfifo_reset(dlci->fifo);
+ kfifo_reset(&dlci->fifo);
/* Need to unhook this DLCI from the transmit queue logic */
}
@@ -3152,7 +3172,7 @@ static void gsmtty_throttle(struct tty_struct *tty)
return;
if (C_CRTSCTS(tty))
dlci->modem_tx &= ~TIOCM_DTR;
- dlci->throttled = 1;
+ dlci->throttled = true;
/* Send an MSC with DTR cleared */
gsmtty_modem_update(dlci, 0);
}
@@ -3164,7 +3184,7 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
return;
if (C_CRTSCTS(tty))
dlci->modem_tx |= TIOCM_DTR;
- dlci->throttled = 0;
+ dlci->throttled = false;
/* Send an MSC with DTR set */
gsmtty_modem_update(dlci, 0);
}
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 27b506bf03ce..991f49ee4026 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -18,7 +18,7 @@
* All HDLC data is frame oriented which means:
*
* 1. tty write calls represent one complete transmit frame of data
- * The device driver should accept the complete frame or none of
+ * The device driver should accept the complete frame or none of
* the frame (busy) in the write method. Each write call should have
* a byte count in the range of 2-65535 bytes (2 is min HDLC frame
* with 1 addr byte and 1 ctrl byte). The max byte count of 65535
@@ -39,7 +39,7 @@
* tty read calls.
*
* 3. tty read calls returns an entire frame of data or nothing.
- *
+ *
* 4. all send and receive data is considered raw. No processing
* or translation is performed by the line discipline, regardless
* of the tty flags
@@ -87,9 +87,6 @@
#include <linux/interrupt.h>
#include <linux/ptrace.h>
-#undef VERSION
-#define VERSION(major,minor,patch) (((((major)<<8)+(minor))<<8)+(patch))
-
#include <linux/poll.h>
#include <linux/in.h>
#include <linux/ioctl.h>
@@ -107,7 +104,7 @@
/*
* Buffers for individual HDLC frames
*/
-#define MAX_HDLC_FRAME_SIZE 65535
+#define MAX_HDLC_FRAME_SIZE 65535
#define DEFAULT_RX_BUF_COUNT 10
#define MAX_RX_BUF_COUNT 60
#define DEFAULT_TX_BUF_COUNT 3
@@ -127,11 +124,8 @@ struct n_hdlc_buf_list {
/**
* struct n_hdlc - per device instance data structure
* @magic - magic value for structure
- * @flags - miscellaneous control flags
- * @tty - ptr to TTY structure
- * @backup_tty - TTY to use if tty gets closed
* @tbusy - reentrancy flag for tx wakeup code
- * @woke_up - FIXME: describe this field
+ * @woke_up - tx wakeup needs to be run again as it was called while @tbusy
* @tx_buf_list - list of pending transmit frame buffers
* @rx_buf_list - list of received frame buffers
* @tx_free_buf_list - list unused transmit frame buffers
@@ -139,11 +133,8 @@ struct n_hdlc_buf_list {
*/
struct n_hdlc {
int magic;
- __u32 flags;
- struct tty_struct *tty;
- struct tty_struct *backup_tty;
- int tbusy;
- int woke_up;
+ bool tbusy;
+ bool woke_up;
struct n_hdlc_buf_list tx_buf_list;
struct n_hdlc_buf_list rx_buf_list;
struct n_hdlc_buf_list tx_free_buf_list;
@@ -161,39 +152,14 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
/* Local functions */
-static struct n_hdlc *n_hdlc_alloc (void);
-
-/* debug level can be set by insmod for debugging purposes */
-#define DEBUG_LEVEL_INFO 1
-static int debuglevel;
+static struct n_hdlc *n_hdlc_alloc(void);
/* max frame size for memory allocations */
static int maxframe = 4096;
-/* TTY callbacks */
-
-static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
- __u8 __user *buf, size_t nr);
-static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr);
-static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
- poll_table *wait);
-static int n_hdlc_tty_open(struct tty_struct *tty);
-static void n_hdlc_tty_close(struct tty_struct *tty);
-static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
- char *fp, int count);
-static void n_hdlc_tty_wakeup(struct tty_struct *tty);
-
-#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
-
-#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data))
-#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty)
-
static void flush_rx_queue(struct tty_struct *tty)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
struct n_hdlc_buf *buf;
while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
@@ -202,79 +168,22 @@ static void flush_rx_queue(struct tty_struct *tty)
static void flush_tx_queue(struct tty_struct *tty)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
struct n_hdlc_buf *buf;
while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
}
-static struct tty_ldisc_ops n_hdlc_ldisc = {
- .owner = THIS_MODULE,
- .magic = TTY_LDISC_MAGIC,
- .name = "hdlc",
- .open = n_hdlc_tty_open,
- .close = n_hdlc_tty_close,
- .read = n_hdlc_tty_read,
- .write = n_hdlc_tty_write,
- .ioctl = n_hdlc_tty_ioctl,
- .poll = n_hdlc_tty_poll,
- .receive_buf = n_hdlc_tty_receive,
- .write_wakeup = n_hdlc_tty_wakeup,
- .flush_buffer = flush_rx_queue,
-};
-
-/**
- * n_hdlc_release - release an n_hdlc per device line discipline info structure
- * @n_hdlc - per device line discipline info structure
- */
-static void n_hdlc_release(struct n_hdlc *n_hdlc)
+static void n_hdlc_free_buf_list(struct n_hdlc_buf_list *list)
{
- struct tty_struct *tty = n_hdlc2tty (n_hdlc);
struct n_hdlc_buf *buf;
-
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_release() called\n",__FILE__,__LINE__);
-
- /* Ensure that the n_hdlcd process is not hanging on select()/poll() */
- wake_up_interruptible (&tty->read_wait);
- wake_up_interruptible (&tty->write_wait);
-
- if (tty->disc_data == n_hdlc)
- tty->disc_data = NULL; /* Break the tty->n_hdlc link */
-
- /* Release transmit and receive buffers */
- for(;;) {
- buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
- if (buf) {
- kfree(buf);
- } else
- break;
- }
- for(;;) {
- buf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
- if (buf) {
- kfree(buf);
- } else
- break;
- }
- for(;;) {
- buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
- if (buf) {
- kfree(buf);
- } else
- break;
- }
- for(;;) {
- buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
- if (buf) {
- kfree(buf);
- } else
- break;
- }
- kfree(n_hdlc);
-
-} /* end of n_hdlc_release() */
+
+ do {
+ buf = n_hdlc_buf_get(list);
+ kfree(buf);
+ } while (buf);
+}
/**
* n_hdlc_tty_close - line discipline close
@@ -285,34 +194,26 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
*/
static void n_hdlc_tty_close(struct tty_struct *tty)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
-
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_close() called\n",__FILE__,__LINE__);
-
- if (n_hdlc != NULL) {
- if (n_hdlc->magic != HDLC_MAGIC) {
- printk (KERN_WARNING"n_hdlc: trying to close unopened tty!\n");
- return;
- }
+ struct n_hdlc *n_hdlc = tty->disc_data;
+
+ if (n_hdlc->magic != HDLC_MAGIC) {
+ pr_warn("n_hdlc: trying to close unopened tty!\n");
+ return;
+ }
#if defined(TTY_NO_WRITE_SPLIT)
- clear_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
+ clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
#endif
- tty->disc_data = NULL;
- if (tty == n_hdlc->backup_tty)
- n_hdlc->backup_tty = NULL;
- if (tty != n_hdlc->tty)
- return;
- if (n_hdlc->backup_tty) {
- n_hdlc->tty = n_hdlc->backup_tty;
- } else {
- n_hdlc_release (n_hdlc);
- }
- }
-
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_close() success\n",__FILE__,__LINE__);
-
+ tty->disc_data = NULL;
+
+ /* Ensure that the n_hdlcd process is not hanging on select()/poll() */
+ wake_up_interruptible(&tty->read_wait);
+ wake_up_interruptible(&tty->write_wait);
+
+ n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list);
+ n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list);
+ n_hdlc_free_buf_list(&n_hdlc->rx_buf_list);
+ n_hdlc_free_buf_list(&n_hdlc->tx_buf_list);
+ kfree(n_hdlc);
} /* end of n_hdlc_tty_close() */
/**
@@ -321,44 +222,35 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
*
* Returns 0 if success, otherwise error code
*/
-static int n_hdlc_tty_open (struct tty_struct *tty)
+static int n_hdlc_tty_open(struct tty_struct *tty)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
+
+ pr_debug("%s() called (device=%s)\n", __func__, tty->name);
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_open() called (device=%s)\n",
- __FILE__,__LINE__,
- tty->name);
-
/* There should not be an existing table for this slot. */
if (n_hdlc) {
- printk (KERN_ERR"n_hdlc_tty_open:tty already associated!\n" );
+ pr_err("%s: tty already associated!\n", __func__);
return -EEXIST;
}
-
+
n_hdlc = n_hdlc_alloc();
if (!n_hdlc) {
- printk (KERN_ERR "n_hdlc_alloc failed\n");
+ pr_err("%s: n_hdlc_alloc failed\n", __func__);
return -ENFILE;
}
-
+
tty->disc_data = n_hdlc;
- n_hdlc->tty = tty;
tty->receive_room = 65536;
-
-#if defined(TTY_NO_WRITE_SPLIT)
+
/* change tty_io write() to not split large writes into 8K chunks */
- set_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
-#endif
-
+ set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+
/* flush receive data from driver */
tty_driver_flush_buffer(tty);
-
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__);
-
+
return 0;
-
+
} /* end of n_tty_hdlc_open() */
/**
@@ -376,26 +268,22 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
unsigned long flags;
struct n_hdlc_buf *tbuf;
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_send_frames() called\n",__FILE__,__LINE__);
- check_again:
-
- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+check_again:
+
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
if (n_hdlc->tbusy) {
- n_hdlc->woke_up = 1;
- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ n_hdlc->woke_up = true;
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
return;
}
- n_hdlc->tbusy = 1;
- n_hdlc->woke_up = 0;
+ n_hdlc->tbusy = true;
+ n_hdlc->woke_up = false;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
while (tbuf) {
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)sending frame %p, count=%d\n",
- __FILE__,__LINE__,tbuf,tbuf->count);
-
+ pr_debug("sending frame %p, count=%d\n", tbuf, tbuf->count);
+
/* Send the next block of data to device */
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
@@ -409,24 +297,20 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
/* pretending it was accepted by driver */
if (actual < 0)
actual = tbuf->count;
-
+
if (actual == tbuf->count) {
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)frame %p completed\n",
- __FILE__,__LINE__,tbuf);
-
+ pr_debug("frame %p completed\n", tbuf);
+
/* free current transmit buffer */
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
/* wait up sleeping writers */
wake_up_interruptible(&tty->write_wait);
-
+
/* get next pending transmit buffer */
tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
} else {
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)frame %p pending\n",
- __FILE__,__LINE__,tbuf);
+ pr_debug("frame %p pending\n", tbuf);
/*
* the buffer was not accepted by driver,
@@ -436,21 +320,17 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
break;
}
}
-
+
if (!tbuf)
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-
+
/* Clear the re-entry flag */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
- n_hdlc->tbusy = 0;
- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
-
- if (n_hdlc->woke_up)
- goto check_again;
-
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_send_frames() exit\n",__FILE__,__LINE__);
-
+ n_hdlc->tbusy = false;
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+
+ if (n_hdlc->woke_up)
+ goto check_again;
} /* end of n_hdlc_send_frames() */
/**
@@ -461,21 +341,9 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
*/
static void n_hdlc_tty_wakeup(struct tty_struct *tty)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_wakeup() called\n",__FILE__,__LINE__);
-
- if (!n_hdlc)
- return;
-
- if (tty != n_hdlc->tty) {
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- return;
- }
-
- n_hdlc_send_frames (n_hdlc, tty);
-
+ n_hdlc_send_frames(n_hdlc, tty);
} /* end of n_hdlc_tty_wakeup() */
/**
@@ -491,59 +359,50 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
char *flags, int count)
{
- register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ register struct n_hdlc *n_hdlc = tty->disc_data;
register struct n_hdlc_buf *buf;
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_receive() called count=%d\n",
- __FILE__,__LINE__, count);
-
- /* This can happen if stuff comes in on the backup tty */
- if (!n_hdlc || tty != n_hdlc->tty)
- return;
-
+ pr_debug("%s() called count=%d\n", __func__, count);
+
/* verify line is using HDLC discipline */
if (n_hdlc->magic != HDLC_MAGIC) {
- printk("%s(%d) line not using HDLC discipline\n",
- __FILE__,__LINE__);
+ pr_err("line not using HDLC discipline\n");
return;
}
-
- if ( count>maxframe ) {
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d) rx count>maxframesize, data discarded\n",
- __FILE__,__LINE__);
+
+ if (count > maxframe) {
+ pr_debug("rx count>maxframesize, data discarded\n");
return;
}
- /* get a free HDLC buffer */
+ /* get a free HDLC buffer */
buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
if (!buf) {
- /* no buffers in free list, attempt to allocate another rx buffer */
- /* unless the maximum count has been reached */
+ /*
+ * no buffers in free list, attempt to allocate another rx
+ * buffer unless the maximum count has been reached
+ */
if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
buf = kmalloc(struct_size(buf, buf, maxframe),
GFP_ATOMIC);
}
-
+
if (!buf) {
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d) no more rx buffers, data discarded\n",
- __FILE__,__LINE__);
+ pr_debug("no more rx buffers, data discarded\n");
return;
}
-
+
/* copy received data to HDLC buffer */
- memcpy(buf->buf,data,count);
- buf->count=count;
+ memcpy(buf->buf, data, count);
+ buf->count = count;
/* add HDLC buffer to list of received frames */
n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf);
-
+
/* wake up any blocked reads and perform async signalling */
- wake_up_interruptible (&tty->read_wait);
- if (n_hdlc->tty->fasync != NULL)
- kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
+ wake_up_interruptible(&tty->read_wait);
+ if (tty->fasync != NULL)
+ kill_fasync(&tty->fasync, SIGIO, POLL_IN);
} /* end of n_hdlc_tty_receive() */
@@ -553,28 +412,21 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
* @file - pointer to open file object
* @buf - pointer to returned data buffer
* @nr - size of returned data buffer
- *
+ *
* Returns the number of bytes returned or error code.
*/
static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
__u8 __user *buf, size_t nr)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
int ret = 0;
struct n_hdlc_buf *rbuf;
DECLARE_WAITQUEUE(wait, current);
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
-
- /* Validate the pointers */
- if (!n_hdlc)
- return -EIO;
-
/* verify user access to buffer */
if (!access_ok(buf, nr)) {
- printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user "
- "buffer\n", __FILE__, __LINE__);
+ pr_warn("%s(%d) %s() can't verify user buffer\n",
+ __FILE__, __LINE__, __func__);
return -EFAULT;
}
@@ -610,7 +462,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
break;
}
-
+
/* no data */
if (tty_io_nonblock(tty, file)) {
ret = -EAGAIN;
@@ -629,7 +481,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
__set_current_state(TASK_RUNNING);
return ret;
-
+
} /* end of n_hdlc_tty_read() */
/**
@@ -638,43 +490,34 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
* @file - pointer to file object data
* @data - pointer to transmit data (one frame)
* @count - size of transmit frame in bytes
- *
+ *
* Returns the number of bytes written (or error code).
*/
static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *data, size_t count)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
int error = 0;
DECLARE_WAITQUEUE(wait, current);
struct n_hdlc_buf *tbuf;
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_write() called count=%zd\n",
- __FILE__,__LINE__,count);
-
- /* Verify pointers */
- if (!n_hdlc)
- return -EIO;
+ pr_debug("%s() called count=%zd\n", __func__, count);
if (n_hdlc->magic != HDLC_MAGIC)
return -EIO;
/* verify frame size */
- if (count > maxframe ) {
- if (debuglevel & DEBUG_LEVEL_INFO)
- printk (KERN_WARNING
- "n_hdlc_tty_write: truncating user packet "
- "from %lu to %d\n", (unsigned long) count,
- maxframe );
+ if (count > maxframe) {
+ pr_debug("%s: truncating user packet from %zu to %d\n",
+ __func__, count, maxframe);
count = maxframe;
}
-
+
add_wait_queue(&tty->write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
-
+
tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
if (tbuf)
break;
@@ -684,15 +527,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
break;
}
schedule();
-
- n_hdlc = tty2n_hdlc (tty);
- if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
- tty != n_hdlc->tty) {
- printk("n_hdlc_tty_write: %p invalid after wait!\n", n_hdlc);
- error = -EIO;
- break;
- }
-
+
if (signal_pending(current)) {
error = -EINTR;
break;
@@ -702,18 +537,18 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
- if (!error) {
+ if (!error) {
/* Retrieve the user's buffer */
memcpy(tbuf->buf, data, count);
/* Send the data */
tbuf->count = error = count;
- n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
- n_hdlc_send_frames(n_hdlc,tty);
+ n_hdlc_buf_put(&n_hdlc->tx_buf_list, tbuf);
+ n_hdlc_send_frames(n_hdlc, tty);
}
return error;
-
+
} /* end of n_hdlc_tty_write() */
/**
@@ -728,32 +563,30 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
int error = 0;
int count;
unsigned long flags;
struct n_hdlc_buf *buf = NULL;
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
- __FILE__,__LINE__,cmd);
-
+ pr_debug("%s() called %d\n", __func__, cmd);
+
/* Verify the status of the device */
- if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC)
+ if (n_hdlc->magic != HDLC_MAGIC)
return -EBADF;
switch (cmd) {
case FIONREAD:
/* report count of read data available */
/* in next available frame (if any) */
- spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
+ spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock, flags);
buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
struct n_hdlc_buf, list_item);
if (buf)
count = buf->count;
else
count = 0;
- spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
+ spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock, flags);
error = put_user(count, (int __user *)arg);
break;
@@ -761,12 +594,12 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
/* get the pending tx byte count in the driver */
count = tty_chars_in_buffer(tty);
/* add size of next output frame in queue */
- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
struct n_hdlc_buf, list_item);
if (buf)
count += buf->count;
- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
error = put_user(count, (int __user *)arg);
break;
@@ -783,7 +616,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
break;
}
return error;
-
+
} /* end of n_hdlc_tty_ioctl() */
/**
@@ -791,7 +624,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
* @tty - pointer to tty instance data
* @filp - pointer to open file object for device
* @poll_table - wait queue for operations
- *
+ *
* Determine which operations (read/write) will not block and return info
* to caller.
* Returns a bit mask containing info on which ops will not block.
@@ -799,33 +632,50 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait)
{
- struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ struct n_hdlc *n_hdlc = tty->disc_data;
__poll_t mask = 0;
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_poll() called\n",__FILE__,__LINE__);
-
- if (n_hdlc && n_hdlc->magic == HDLC_MAGIC && tty == n_hdlc->tty) {
- /* queue current process into any wait queue that */
- /* may awaken in the future (read and write) */
-
- poll_wait(filp, &tty->read_wait, wait);
- poll_wait(filp, &tty->write_wait, wait);
-
- /* set bits for operations that won't block */
- if (!list_empty(&n_hdlc->rx_buf_list.list))
- mask |= EPOLLIN | EPOLLRDNORM; /* readable */
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
- mask |= EPOLLHUP;
- if (tty_hung_up_p(filp))
- mask |= EPOLLHUP;
- if (!tty_is_writelocked(tty) &&
- !list_empty(&n_hdlc->tx_free_buf_list.list))
- mask |= EPOLLOUT | EPOLLWRNORM; /* writable */
- }
+ if (n_hdlc->magic != HDLC_MAGIC)
+ return 0;
+
+ /*
+ * queue the current process into any wait queue that may awaken in the
+ * future (read and write)
+ */
+ poll_wait(filp, &tty->read_wait, wait);
+ poll_wait(filp, &tty->write_wait, wait);
+
+ /* set bits for operations that won't block */
+ if (!list_empty(&n_hdlc->rx_buf_list.list))
+ mask |= EPOLLIN | EPOLLRDNORM; /* readable */
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= EPOLLHUP;
+ if (tty_hung_up_p(filp))
+ mask |= EPOLLHUP;
+ if (!tty_is_writelocked(tty) &&
+ !list_empty(&n_hdlc->tx_free_buf_list.list))
+ mask |= EPOLLOUT | EPOLLWRNORM; /* writable */
+
return mask;
} /* end of n_hdlc_tty_poll() */
+static void n_hdlc_alloc_buf(struct n_hdlc_buf_list *list, unsigned int count,
+ const char *name)
+{
+ struct n_hdlc_buf *buf;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
+ if (!buf) {
+ pr_debug("%s(), kmalloc() failed for %s buffer %u\n",
+ __func__, name, i);
+ return;
+ }
+ n_hdlc_buf_put(list, buf);
+ }
+}
+
/**
* n_hdlc_alloc - allocate an n_hdlc instance data structure
*
@@ -833,8 +683,6 @@ static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
*/
static struct n_hdlc *n_hdlc_alloc(void)
{
- struct n_hdlc_buf *buf;
- int i;
struct n_hdlc *n_hdlc = kzalloc(sizeof(*n_hdlc), GFP_KERNEL);
if (!n_hdlc)
@@ -850,30 +698,14 @@ static struct n_hdlc *n_hdlc_alloc(void)
INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
- /* allocate free rx buffer list */
- for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
- buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
- if (buf)
- n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,buf);
- else if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_alloc(), kalloc() failed for rx buffer %d\n",__FILE__,__LINE__, i);
- }
-
- /* allocate free tx buffer list */
- for(i=0;i<DEFAULT_TX_BUF_COUNT;i++) {
- buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
- if (buf)
- n_hdlc_buf_put(&n_hdlc->tx_free_buf_list,buf);
- else if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_alloc(), kalloc() failed for tx buffer %d\n",__FILE__,__LINE__, i);
- }
-
+ n_hdlc_alloc_buf(&n_hdlc->rx_free_buf_list, DEFAULT_RX_BUF_COUNT, "rx");
+ n_hdlc_alloc_buf(&n_hdlc->tx_free_buf_list, DEFAULT_TX_BUF_COUNT, "tx");
+
/* Initialize the control block */
n_hdlc->magic = HDLC_MAGIC;
- n_hdlc->flags = 0;
-
+
return n_hdlc;
-
+
} /* end of n_hdlc_alloc() */
/**
@@ -915,7 +747,7 @@ static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
/**
* n_hdlc_buf_get - remove and return an HDLC buffer from list
* @buf_list - pointer to HDLC buffer list
- *
+ *
* Remove and return an HDLC buffer from the head of the specified HDLC buffer
* list.
* Returns a pointer to HDLC buffer if available, otherwise %NULL.
@@ -938,44 +770,39 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
return buf;
} /* end of n_hdlc_buf_get() */
-static const char hdlc_banner[] __initconst =
- KERN_INFO "HDLC line discipline maxframe=%u\n";
-static const char hdlc_register_ok[] __initconst =
- KERN_INFO "N_HDLC line discipline registered.\n";
-static const char hdlc_register_fail[] __initconst =
- KERN_ERR "error registering line discipline: %d\n";
+static struct tty_ldisc_ops n_hdlc_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "hdlc",
+ .open = n_hdlc_tty_open,
+ .close = n_hdlc_tty_close,
+ .read = n_hdlc_tty_read,
+ .write = n_hdlc_tty_write,
+ .ioctl = n_hdlc_tty_ioctl,
+ .poll = n_hdlc_tty_poll,
+ .receive_buf = n_hdlc_tty_receive,
+ .write_wakeup = n_hdlc_tty_wakeup,
+ .flush_buffer = flush_rx_queue,
+};
static int __init n_hdlc_init(void)
{
int status;
/* range check maxframe arg */
- if (maxframe < 4096)
- maxframe = 4096;
- else if (maxframe > 65535)
- maxframe = 65535;
-
- printk(hdlc_banner, maxframe);
+ maxframe = clamp(maxframe, 4096, MAX_HDLC_FRAME_SIZE);
status = tty_register_ldisc(N_HDLC, &n_hdlc_ldisc);
if (!status)
- printk(hdlc_register_ok);
+ pr_info("N_HDLC line discipline registered with maxframe=%d\n",
+ maxframe);
else
- printk(hdlc_register_fail, status);
+ pr_err("N_HDLC: error registering line discipline: %d\n",
+ status);
return status;
-
-} /* end of init_module() */
-
-#ifdef CONFIG_SPARC
-#undef __exitdata
-#define __exitdata
-#endif
-static const char hdlc_unregister_ok[] __exitdata =
- KERN_INFO "N_HDLC: line discipline unregistered\n";
-static const char hdlc_unregister_fail[] __exitdata =
- KERN_ERR "N_HDLC: can't unregister line discipline (err = %d)\n";
+} /* end of init_module() */
static void __exit n_hdlc_exit(void)
{
@@ -983,9 +810,10 @@ static void __exit n_hdlc_exit(void)
int status = tty_unregister_ldisc(N_HDLC);
if (status)
- printk(hdlc_unregister_fail, status);
+ pr_err("N_HDLC: can't unregister line discipline (err = %d)\n",
+ status);
else
- printk(hdlc_unregister_ok);
+ pr_info("N_HDLC: line discipline unregistered\n");
}
module_init(n_hdlc_init);
@@ -993,6 +821,5 @@ module_exit(n_hdlc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul Fulghum paulkf@microgate.com");
-module_param(debuglevel, int, 0);
module_param(maxframe, int, 0);
MODULE_ALIAS_LDISC(N_HDLC);
diff --git a/drivers/tty/n_tracesink.h b/drivers/tty/n_tracesink.h
index 1b846330c855..7031d515a700 100644
--- a/drivers/tty/n_tracesink.h
+++ b/drivers/tty/n_tracesink.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* n_tracesink.h - Kernel driver API to route trace data in kernel space.
*
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index f9c584244f72..1794d84e7bf6 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -84,7 +84,7 @@
#ifdef N_TTY_TRACE
# define n_tty_trace(f, args...) trace_printk(f, ##args)
#else
-# define n_tty_trace(f, args...)
+# define n_tty_trace(f, args...) no_printk(f, ##args)
#endif
struct n_tty_data {
@@ -654,9 +654,9 @@ static size_t __process_echoes(struct tty_struct *tty)
op = echo_buf(ldata, tail + 1);
switch (op) {
+ case ECHO_OP_ERASE_TAB: {
unsigned int num_chars, num_bs;
- case ECHO_OP_ERASE_TAB:
if (MASK(ldata->echo_commit) == MASK(tail + 2))
goto not_yet_stored;
num_chars = echo_buf(ldata, tail + 2);
@@ -687,7 +687,7 @@ static size_t __process_echoes(struct tty_struct *tty)
}
tail += 3;
break;
-
+ }
case ECHO_OP_SET_CANON_COL:
ldata->canon_column = ldata->column;
tail += 2;
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index ed99948f3b7f..d42b854cb7df 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -301,7 +301,7 @@ struct ctrl_dl {
unsigned int DCD:1;
unsigned int RI:1;
unsigned int CTS:1;
- unsigned int reserverd:4;
+ unsigned int reserved:4;
u8 port;
} __attribute__ ((packed));
@@ -839,40 +839,39 @@ static char *interrupt2str(u16 interrupt)
static char buf[TMP_BUF_MAX];
char *p = buf;
- interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL;
- interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "MDM_DL2 ") : NULL;
-
- interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "MDM_UL1 ") : NULL;
- interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "MDM_UL2 ") : NULL;
-
- interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "DIAG_DL1 ") : NULL;
- interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "DIAG_DL2 ") : NULL;
-
- interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "DIAG_UL ") : NULL;
-
- interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "APP1_DL ") : NULL;
- interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "APP2_DL ") : NULL;
-
- interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "APP1_UL ") : NULL;
- interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "APP2_UL ") : NULL;
-
- interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "CTRL_DL ") : NULL;
- interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "CTRL_UL ") : NULL;
-
- interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
- "RESET ") : NULL;
+ if (interrupt & MDM_DL1)
+ p += scnprintf(p, TMP_BUF_MAX, "MDM_DL1 ");
+ if (interrupt & MDM_DL2)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "MDM_DL2 ");
+ if (interrupt & MDM_UL1)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "MDM_UL1 ");
+ if (interrupt & MDM_UL2)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "MDM_UL2 ");
+ if (interrupt & DIAG_DL1)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_DL1 ");
+ if (interrupt & DIAG_DL2)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_DL2 ");
+
+ if (interrupt & DIAG_UL)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_UL ");
+
+ if (interrupt & APP1_DL)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP1_DL ");
+ if (interrupt & APP2_DL)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP2_DL ");
+
+ if (interrupt & APP1_UL)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP1_UL ");
+ if (interrupt & APP2_UL)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "APP2_UL ");
+
+ if (interrupt & CTRL_DL)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "CTRL_DL ");
+ if (interrupt & CTRL_UL)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "CTRL_UL ");
+
+ if (interrupt & RESET)
+ p += scnprintf(p, TMP_BUF_MAX - (p - buf), "RESET ");
return buf;
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index fbaa4ec85560..e2138e7d5dc6 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
tty_port_init(&info->port);
info->port.ops = &rocket_port_ops;
info->flags &= ~ROCKET_MODE_MASK;
- switch (pc104[board][line]) {
- case 422:
- info->flags |= ROCKET_MODE_RS422;
- break;
- case 485:
- info->flags |= ROCKET_MODE_RS485;
- break;
- case 232:
- default:
+ if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1))
+ switch (pc104[board][line]) {
+ case 422:
+ info->flags |= ROCKET_MODE_RS422;
+ break;
+ case 485:
+ info->flags |= ROCKET_MODE_RS485;
+ break;
+ case 232:
+ default:
+ info->flags |= ROCKET_MODE_RS232;
+ break;
+ }
+ else
info->flags |= ROCKET_MODE_RS232;
- break;
- }
info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 33ad9d6de532..52bb21205bb6 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for 8250/16550-type serial ports
*
@@ -156,7 +156,9 @@ void serial8250_rpm_put(struct uart_8250_port *p);
void serial8250_rpm_get_tx(struct uart_8250_port *p);
void serial8250_rpm_put_tx(struct uart_8250_port *p);
-int serial8250_em485_init(struct uart_8250_port *p);
+int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485);
+void serial8250_em485_start_tx(struct uart_8250_port *p);
+void serial8250_em485_stop_tx(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
/* MCR <-> TIOCM conversion */
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index e70e3cc30050..12d03e678295 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -6,6 +6,10 @@
*
* Based on 8250_lpc18xx.c:
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * The bcm2835aux is capable of RTS auto flow-control, but this driver doesn't
+ * take advantage of it yet. When adding support, be sure not to enable it
+ * simultaneously to rs485.
*/
#include <linux/clk.h>
@@ -16,16 +20,64 @@
#include "8250.h"
+#define BCM2835_AUX_UART_CNTL 8
+#define BCM2835_AUX_UART_CNTL_RXEN 0x01 /* Receiver enable */
+#define BCM2835_AUX_UART_CNTL_TXEN 0x02 /* Transmitter enable */
+#define BCM2835_AUX_UART_CNTL_AUTORTS 0x04 /* RTS set by RX fill level */
+#define BCM2835_AUX_UART_CNTL_AUTOCTS 0x08 /* CTS stops transmitter */
+#define BCM2835_AUX_UART_CNTL_RTS3 0x00 /* RTS set until 3 chars left */
+#define BCM2835_AUX_UART_CNTL_RTS2 0x10 /* RTS set until 2 chars left */
+#define BCM2835_AUX_UART_CNTL_RTS1 0x20 /* RTS set until 1 chars left */
+#define BCM2835_AUX_UART_CNTL_RTS4 0x30 /* RTS set until 4 chars left */
+#define BCM2835_AUX_UART_CNTL_RTSINV 0x40 /* Invert auto RTS polarity */
+#define BCM2835_AUX_UART_CNTL_CTSINV 0x80 /* Invert auto CTS polarity */
+
/**
* struct bcm2835aux_data - driver private data of BCM2835 auxiliary UART
* @clk: clock producer of the port's uartclk
* @line: index of the port's serial8250_ports[] entry
+ * @cntl: cached copy of CNTL register
*/
struct bcm2835aux_data {
struct clk *clk;
int line;
+ u32 cntl;
};
+static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up)
+{
+ if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
+ struct bcm2835aux_data *data = dev_get_drvdata(up->port.dev);
+
+ data->cntl &= ~BCM2835_AUX_UART_CNTL_RXEN;
+ serial_out(up, BCM2835_AUX_UART_CNTL, data->cntl);
+ }
+
+ /*
+ * On the bcm2835aux, the MCR register contains no other
+ * flags besides RTS. So no need for a read-modify-write.
+ */
+ if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
+ serial8250_out_MCR(up, 0);
+ else
+ serial8250_out_MCR(up, UART_MCR_RTS);
+}
+
+static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up)
+{
+ if (up->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ serial8250_out_MCR(up, 0);
+ else
+ serial8250_out_MCR(up, UART_MCR_RTS);
+
+ if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
+ struct bcm2835aux_data *data = dev_get_drvdata(up->port.dev);
+
+ data->cntl |= BCM2835_AUX_UART_CNTL_RXEN;
+ serial_out(up, BCM2835_AUX_UART_CNTL, data->cntl);
+ }
+}
+
static int bcm2835aux_serial_probe(struct platform_device *pdev)
{
struct uart_8250_port up = { };
@@ -47,6 +99,14 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
up.port.fifosize = 8;
up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
UPF_SKIP_TEST | UPF_IOREMAP;
+ up.port.rs485_config = serial8250_em485_config;
+ up.rs485_start_tx = bcm2835aux_rs485_start_tx;
+ up.rs485_stop_tx = bcm2835aux_rs485_stop_tx;
+
+ /* initialize cached copy with power-on reset value */
+ data->cntl = BCM2835_AUX_UART_CNTL_RXEN | BCM2835_AUX_UART_CNTL_TXEN;
+
+ platform_set_drvdata(pdev, data);
/* get the clock - this also enables the HW */
data->clk = devm_clk_get(&pdev->dev, NULL);
@@ -102,8 +162,6 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
}
data->line = ret;
- platform_set_drvdata(pdev, data);
-
return 0;
dis_clk:
@@ -137,6 +195,24 @@ static struct platform_driver bcm2835aux_serial_driver = {
};
module_platform_driver(bcm2835aux_serial_driver);
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+
+static int __init early_bcm2835aux_setup(struct earlycon_device *device,
+ const char *options)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->port.iotype = UPIO_MEM32;
+ device->port.regshift = 2;
+
+ return early_serial8250_setup(device, NULL);
+}
+
+OF_EARLYCON_DECLARE(bcm2835aux, "brcm,bcm2835-aux-uart",
+ early_bcm2835aux_setup);
+#endif
+
MODULE_DESCRIPTION("BCM2835 auxiliar UART driver");
MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index f2a33c9082a6..45d9117cab68 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -608,6 +608,14 @@ static int univ8250_console_setup(struct console *co, char *options)
return retval;
}
+static int univ8250_console_exit(struct console *co)
+{
+ struct uart_port *port;
+
+ port = &serial8250_ports[co->index].port;
+ return serial8250_console_exit(port);
+}
+
/**
* univ8250_console_match - non-standard console matching
* @co: registering console
@@ -666,6 +674,7 @@ static struct console univ8250_console = {
.write = univ8250_console_write,
.device = uart_console_device,
.setup = univ8250_console_setup,
+ .exit = univ8250_console_exit,
.match = univ8250_console_match,
.flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
@@ -1007,14 +1016,18 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->port.unthrottle = up->port.unthrottle;
uart->port.rs485_config = up->port.rs485_config;
uart->port.rs485 = up->port.rs485;
+ uart->rs485_start_tx = up->rs485_start_tx;
+ uart->rs485_stop_tx = up->rs485_stop_tx;
uart->dma = up->dma;
/* Take tx_loadsz from fifosize if it wasn't set separately */
if (uart->port.fifosize && !uart->tx_loadsz)
uart->tx_loadsz = uart->port.fifosize;
- if (up->port.dev)
+ if (up->port.dev) {
uart->port.dev = up->port.dev;
+ uart_get_rs485_mode(uart->port.dev, &uart->port.rs485);
+ }
if (up->port.flags & UPF_FIXED_TYPE)
uart->port.type = up->port.type;
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index 87a4db2a8aba..9a12953832d3 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Synopsys DesignWare 8250 library header file. */
#include <linux/types.h>
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index d330da76d6b6..59449b6500cd 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -135,7 +135,7 @@ struct exar8250 {
unsigned int nr;
struct exar8250_board *board;
void __iomem *virt;
- int line[0];
+ int line[];
};
static void exar_pm(struct uart_port *port, unsigned int state, unsigned int old)
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 60eff3240c8a..4dee8a9e0c95 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -156,6 +156,11 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
return 0;
}
+static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
+{
+ return 0;
+}
+
#ifdef CONFIG_SERIAL_8250_DMA
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
.nr_channels = 2,
@@ -356,6 +361,7 @@ static const struct lpss8250_board byt_board = {
static const struct lpss8250_board ehl_board = {
.freq = 200000000,
.base_baud = 12500000,
+ .setup = ehl_serial_setup,
};
static const struct lpss8250_board qrk_board = {
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 4d067f515f74..f839380c2f4c 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -32,6 +32,7 @@
#define MTK_UART_RXTRI_AD 0x14 /* RX Trigger address */
#define MTK_UART_FRACDIV_L 0x15 /* Fractional divider LSB address */
#define MTK_UART_FRACDIV_M 0x16 /* Fractional divider MSB address */
+#define MTK_UART_DEBUG0 0x18
#define MTK_UART_IER_XOFFI 0x20 /* Enable XOFF character interrupt */
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
@@ -388,9 +389,18 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
static int __maybe_unused mtk8250_runtime_suspend(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
+ struct uart_8250_port *up = serial8250_get_port(data->line);
- clk_disable_unprepare(data->uart_clk);
- clk_disable_unprepare(data->bus_clk);
+ /* wait until UART in idle status */
+ while
+ (serial_in(up, MTK_UART_DEBUG0));
+
+ if (data->clk_count == 0U) {
+ dev_dbg(dev, "%s clock count is 0\n", __func__);
+ } else {
+ clk_disable_unprepare(data->bus_clk);
+ data->clk_count--;
+ }
return 0;
}
@@ -400,16 +410,16 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev)
struct mtk8250_data *data = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(data->uart_clk);
- if (err) {
- dev_warn(dev, "Can't enable clock\n");
- return err;
- }
-
- err = clk_prepare_enable(data->bus_clk);
- if (err) {
- dev_warn(dev, "Can't enable bus clock\n");
- return err;
+ if (data->clk_count > 0U) {
+ dev_dbg(dev, "%s clock count is %d\n", __func__,
+ data->clk_count);
+ } else {
+ err = clk_prepare_enable(data->bus_clk);
+ if (err) {
+ dev_warn(dev, "Can't enable bus clock\n");
+ return err;
+ }
+ data->clk_count++;
}
return 0;
@@ -419,12 +429,14 @@ static void
mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
{
if (!state)
- pm_runtime_get_sync(port->dev);
+ if (!mtk8250_runtime_resume(port->dev))
+ pm_runtime_get_sync(port->dev);
serial8250_do_pm(port, state, old);
if (state)
- pm_runtime_put_sync_suspend(port->dev);
+ if (!pm_runtime_put_sync_suspend(port->dev))
+ mtk8250_runtime_suspend(port->dev);
}
#ifdef CONFIG_SERIAL_8250_DMA
@@ -501,6 +513,8 @@ static int mtk8250_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ data->clk_count = 0;
+
if (pdev->dev.of_node) {
err = mtk8250_probe_of(pdev, &uart.port, data);
if (err)
@@ -533,6 +547,7 @@ static int mtk8250_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ pm_runtime_enable(&pdev->dev);
err = mtk8250_runtime_resume(&pdev->dev);
if (err)
return err;
@@ -541,9 +556,6 @@ static int mtk8250_probe(struct platform_device *pdev)
if (data->line < 0)
return data->line;
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1);
return 0;
@@ -556,11 +568,13 @@ static int mtk8250_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
serial8250_unregister_port(data->line);
- mtk8250_runtime_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mtk8250_runtime_suspend(&pdev->dev);
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index f6687756ec5e..65e9045dafe6 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -7,7 +7,6 @@
#include <linux/console.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/of_address.h>
@@ -26,67 +25,16 @@ struct of_serial_info {
int line;
};
-#ifdef CONFIG_ARCH_TEGRA
-static void tegra_serial_handle_break(struct uart_port *p)
-{
- unsigned int status, tmout = 10000;
-
- do {
- status = p->serial_in(p, UART_LSR);
- if (status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS))
- status = p->serial_in(p, UART_RX);
- else
- break;
- if (--tmout == 0)
- break;
- udelay(1);
- } while (1);
-}
-#else
-static inline void tegra_serial_handle_break(struct uart_port *port)
-{
-}
-#endif
-
-static int of_8250_rs485_config(struct uart_port *port,
- struct serial_rs485 *rs485)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- /* Clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- port->rs485 = *rs485;
-
- /*
- * Both serial8250_em485_init and serial8250_em485_destroy
- * are idempotent
- */
- if (rs485->flags & SER_RS485_ENABLED) {
- int ret = serial8250_em485_init(up);
-
- if (ret) {
- rs485->flags &= ~SER_RS485_ENABLED;
- port->rs485.flags &= ~SER_RS485_ENABLED;
- }
- return ret;
- }
-
- serial8250_em485_destroy(up);
-
- return 0;
-}
-
/*
* Fill a struct uart_port for a given device node
*/
static int of_platform_serial_setup(struct platform_device *ofdev,
- int type, struct uart_port *port,
+ int type, struct uart_8250_port *up,
struct of_serial_info *info)
{
struct resource resource;
struct device_node *np = ofdev->dev.of_node;
+ struct uart_port *port = &up->port;
u32 clk, spd, prop;
int ret, irq;
@@ -207,13 +155,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->flags |= UPF_SKIP_TEST;
port->dev = &ofdev->dev;
- port->rs485_config = of_8250_rs485_config;
+ port->rs485_config = serial8250_em485_config;
+ up->rs485_start_tx = serial8250_em485_start_tx;
+ up->rs485_stop_tx = serial8250_em485_stop_tx;
switch (type) {
- case PORT_TEGRA:
- port->handle_break = tegra_serial_handle_break;
- break;
-
case PORT_RT2880:
port->iotype = UPIO_AU;
break;
@@ -258,7 +204,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
return -ENOMEM;
memset(&port8250, 0, sizeof(port8250));
- ret = of_platform_serial_setup(ofdev, port_type, &port8250.port, info);
+ ret = of_platform_serial_setup(ofdev, port_type, &port8250, info);
if (ret)
goto err_free;
@@ -358,7 +304,6 @@ static const struct of_device_id of_platform_serial_table[] = {
{ .compatible = "ns16550", .data = (void *)PORT_16550, },
{ .compatible = "ns16750", .data = (void *)PORT_16750, },
{ .compatible = "ns16850", .data = (void *)PORT_16850, },
- { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
{ .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, },
{ .compatible = "ralink,rt2880-uart", .data = (void *)PORT_RT2880, },
{ .compatible = "intel,xscale-uart", .data = (void *)PORT_XSCALE, },
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 6f343ca08440..16cfb887c5a3 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -40,6 +40,7 @@
* The same errata is applicable to AM335x and DRA7x processors too.
*/
#define UART_ERRATA_CLOCK_DISABLE (1 << 3)
+#define UART_HAS_EFR2 BIT(4)
#define OMAP_UART_FCR_RX_TRIG 6
#define OMAP_UART_FCR_TX_TRIG 4
@@ -93,6 +94,10 @@
#define OMAP_UART_REV_52 0x0502
#define OMAP_UART_REV_63 0x0603
+/* Enhanced features register 2 */
+#define UART_OMAP_EFR2 0x23
+#define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
+
struct omap8250_priv {
int line;
u8 habit;
@@ -105,6 +110,8 @@ struct omap8250_priv {
u8 delayed_restore;
u16 quot;
+ u8 tx_trigger;
+ u8 rx_trigger;
bool is_suspending;
int wakeirq;
int wakeups_enabled;
@@ -118,6 +125,17 @@ struct omap8250_priv {
bool throttled;
};
+struct omap8250_dma_params {
+ u32 rx_size;
+ u8 rx_trigger;
+ u8 tx_trigger;
+};
+
+struct omap8250_platdata {
+ struct omap8250_dma_params *dma_params;
+ u8 habit;
+};
+
#ifdef CONFIG_SERIAL_8250_DMA
static void omap_8250_rx_dma_flush(struct uart_8250_port *p);
#else
@@ -295,8 +313,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_RESTORE(16) |
OMAP_UART_TCR_HALT(52));
serial_out(up, UART_TI752_TLR,
- TRIGGER_TLR_MASK(TX_TRIGGER) << UART_TI752_TLR_TX |
- TRIGGER_TLR_MASK(RX_TRIGGER) << UART_TI752_TLR_RX);
+ TRIGGER_TLR_MASK(priv->tx_trigger) << UART_TI752_TLR_TX |
+ TRIGGER_TLR_MASK(priv->rx_trigger) << UART_TI752_TLR_RX);
serial_out(up, UART_LCR, 0);
@@ -435,8 +453,8 @@ static void omap_8250_set_termios(struct uart_port *port,
* This is because threshold and trigger values are the same.
*/
up->fcr = UART_FCR_ENABLE_FIFO;
- up->fcr |= TRIGGER_FCR_MASK(TX_TRIGGER) << OMAP_UART_FCR_TX_TRIG;
- up->fcr |= TRIGGER_FCR_MASK(RX_TRIGGER) << OMAP_UART_FCR_RX_TRIG;
+ up->fcr |= TRIGGER_FCR_MASK(priv->tx_trigger) << OMAP_UART_FCR_TX_TRIG;
+ up->fcr |= TRIGGER_FCR_MASK(priv->rx_trigger) << OMAP_UART_FCR_RX_TRIG;
priv->scr = OMAP_UART_SCR_RX_TRIG_GRANU1_MASK | OMAP_UART_SCR_TX_EMPTY |
OMAP_UART_SCR_TX_TRIG_GRANU1_MASK;
@@ -569,7 +587,7 @@ static void omap8250_uart_qos_work(struct work_struct *work)
struct omap8250_priv *priv;
priv = container_of(work, struct omap8250_priv, qos_work);
- pm_qos_update_request(&priv->pm_qos_request, priv->latency);
+ cpu_latency_qos_update_request(&priv->pm_qos_request, priv->latency);
}
#ifdef CONFIG_SERIAL_8250_DMA
@@ -651,7 +669,7 @@ static int omap_8250_startup(struct uart_port *port)
priv->wer |= OMAP_UART_TX_WAKEUP_EN;
serial_out(up, UART_OMAP_WER, priv->wer);
- if (up->dma)
+ if (up->dma && !(priv->habit & UART_HAS_EFR2))
up->dma->rx_dma(up);
pm_runtime_mark_last_busy(port->dev);
@@ -676,6 +694,8 @@ static void omap_8250_shutdown(struct uart_port *port)
pm_runtime_get_sync(port->dev);
serial_out(up, UART_OMAP_WER, 0);
+ if (priv->habit & UART_HAS_EFR2)
+ serial_out(up, UART_OMAP_EFR2, 0x0);
up->ier = 0;
serial_out(up, UART_IER, 0);
@@ -699,14 +719,12 @@ static void omap_8250_shutdown(struct uart_port *port)
static void omap_8250_throttle(struct uart_port *port)
{
struct omap8250_priv *priv = port->private_data;
- struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
pm_runtime_get_sync(port->dev);
spin_lock_irqsave(&port->lock, flags);
- up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
- serial_out(up, UART_IER, up->ier);
+ port->ops->stop_rx(port);
priv->throttled = true;
spin_unlock_irqrestore(&port->lock, flags);
@@ -714,36 +732,6 @@ static void omap_8250_throttle(struct uart_port *port)
pm_runtime_put_autosuspend(port->dev);
}
-static int omap_8250_rs485_config(struct uart_port *port,
- struct serial_rs485 *rs485)
-{
- struct uart_8250_port *up = up_to_u8250p(port);
-
- /* Clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- port->rs485 = *rs485;
-
- /*
- * Both serial8250_em485_init and serial8250_em485_destroy
- * are idempotent
- */
- if (rs485->flags & SER_RS485_ENABLED) {
- int ret = serial8250_em485_init(up);
-
- if (ret) {
- rs485->flags &= ~SER_RS485_ENABLED;
- port->rs485.flags &= ~SER_RS485_ENABLED;
- }
- return ret;
- }
-
- serial8250_em485_destroy(up);
-
- return 0;
-}
-
static void omap_8250_unthrottle(struct uart_port *port)
{
struct omap8250_priv *priv = port->private_data;
@@ -757,6 +745,7 @@ static void omap_8250_unthrottle(struct uart_port *port)
if (up->dma)
up->dma->rx_dma(up);
up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ port->read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&port->lock, flags);
@@ -767,32 +756,50 @@ static void omap_8250_unthrottle(struct uart_port *port)
#ifdef CONFIG_SERIAL_8250_DMA
static int omap_8250_rx_dma(struct uart_8250_port *p);
+/* Must be called while priv->rx_dma_lock is held */
static void __dma_rx_do_complete(struct uart_8250_port *p)
{
- struct omap8250_priv *priv = p->port.private_data;
struct uart_8250_dma *dma = p->dma;
struct tty_port *tty_port = &p->port.state->port;
+ struct dma_chan *rxchan = dma->rxchan;
+ dma_cookie_t cookie;
struct dma_tx_state state;
int count;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&priv->rx_dma_lock, flags);
-
if (!dma->rx_running)
- goto unlock;
+ goto out;
+ cookie = dma->rx_cookie;
dma->rx_running = 0;
- dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ dmaengine_tx_status(rxchan, cookie, &state);
+
+ count = dma->rx_size - state.residue + state.in_flight_bytes;
+ if (count < dma->rx_size) {
+ dmaengine_terminate_async(rxchan);
+
+ /*
+ * Poll for teardown to complete which guarantees in
+ * flight data is drained.
+ */
+ if (state.in_flight_bytes) {
+ int poll_count = 25;
- count = dma->rx_size - state.residue;
+ while (dmaengine_tx_status(rxchan, cookie, NULL) &&
+ poll_count--)
+ cpu_relax();
+ if (!poll_count)
+ dev_err(p->port.dev, "teardown incomplete\n");
+ }
+ }
+ if (!count)
+ goto out;
ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += ret;
p->port.icount.buf_overrun += count - ret;
-unlock:
- spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
+out:
tty_flip_buffer_push(tty_port);
}
@@ -818,8 +825,12 @@ static void __dma_rx_complete(void *param)
return;
}
__dma_rx_do_complete(p);
- if (!priv->throttled)
- omap_8250_rx_dma(p);
+ if (!priv->throttled) {
+ p->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_out(p, UART_IER, p->ier);
+ if (!(priv->habit & UART_HAS_EFR2))
+ omap_8250_rx_dma(p);
+ }
spin_unlock_irqrestore(&p->port.lock, flags);
}
@@ -845,10 +856,8 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
if (WARN_ON_ONCE(ret))
priv->rx_dma_broken = true;
}
- spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
-
__dma_rx_do_complete(p);
- dmaengine_terminate_all(dma->rxchan);
+ spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
}
static int omap_8250_rx_dma(struct uart_8250_port *p)
@@ -864,8 +873,20 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
spin_lock_irqsave(&priv->rx_dma_lock, flags);
- if (dma->rx_running)
+ if (dma->rx_running) {
+ enum dma_status state;
+
+ state = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, NULL);
+ if (state == DMA_COMPLETE) {
+ /*
+ * Disable RX interrupts to allow RX DMA completion
+ * callback to run.
+ */
+ p->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+ serial_out(p, UART_IER, p->ier);
+ }
goto out;
+ }
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
dma->rx_size, DMA_DEV_TO_MEM,
@@ -1036,6 +1057,46 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
return omap_8250_rx_dma(up);
}
+static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
+ u8 iir, unsigned char status)
+{
+ if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
+ (iir & UART_IIR_RDI)) {
+ if (handle_rx_dma(up, iir)) {
+ status = serial8250_rx_chars(up, status);
+ omap_8250_rx_dma(up);
+ }
+ }
+
+ return status;
+}
+
+static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
+ unsigned char status)
+{
+ /*
+ * Queue a new transfer if FIFO has data.
+ */
+ if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
+ (up->ier & UART_IER_RDI)) {
+ omap_8250_rx_dma(up);
+ serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
+ } else if ((iir & 0x3f) == UART_IIR_RX_TIMEOUT) {
+ /*
+ * Disable RX timeout, read IIR to clear
+ * current timeout condition, clear EFR2 to
+ * periodic timeouts, re-enable interrupts.
+ */
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+ serial_out(up, UART_IER, up->ier);
+ omap_8250_rx_dma_flush(up);
+ serial_in(up, UART_IIR);
+ serial_out(up, UART_OMAP_EFR2, 0x0);
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
/*
* This is mostly serial8250_handle_irq(). We have a slightly different DMA
* hoook for RX/TX and need different logic for them in the ISR. Therefore we
@@ -1044,6 +1105,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
static int omap_8250_dma_handle_irq(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ struct omap8250_priv *priv = up->port.private_data;
unsigned char status;
unsigned long flags;
u8 iir;
@@ -1053,19 +1115,18 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
iir = serial_port_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
serial8250_rpm_put(up);
- return 0;
+ return IRQ_HANDLED;
}
spin_lock_irqsave(&port->lock, flags);
status = serial_port_in(port, UART_LSR);
- if (status & (UART_LSR_DR | UART_LSR_BI)) {
- if (handle_rx_dma(up, iir)) {
- status = serial8250_rx_chars(up, status);
- omap_8250_rx_dma(up);
- }
- }
+ if (priv->habit & UART_HAS_EFR2)
+ am654_8250_handle_rx_dma(up, iir, status);
+ else
+ status = omap_8250_handle_rx_dma(up, iir, status);
+
serial8250_modem_status(up);
if (status & UART_LSR_THRE && up->dma->tx_err) {
if (uart_tx_stopped(&up->port) ||
@@ -1107,18 +1168,41 @@ static int omap8250_no_handle_irq(struct uart_port *port)
return 0;
}
-static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
-static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
-static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
+static struct omap8250_dma_params am654_dma = {
+ .rx_size = SZ_2K,
+ .rx_trigger = 1,
+ .tx_trigger = TX_TRIGGER,
+};
+
+static struct omap8250_dma_params am33xx_dma = {
+ .rx_size = RX_TRIGGER,
+ .rx_trigger = RX_TRIGGER,
+ .tx_trigger = TX_TRIGGER,
+};
+
+static struct omap8250_platdata am654_platdata = {
+ .dma_params = &am654_dma,
+ .habit = UART_HAS_EFR2,
+};
+
+static struct omap8250_platdata am33xx_platdata = {
+ .dma_params = &am33xx_dma,
+ .habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE,
+};
+
+static struct omap8250_platdata omap4_platdata = {
+ .dma_params = &am33xx_dma,
+ .habit = UART_ERRATA_CLOCK_DISABLE,
+};
static const struct of_device_id omap8250_dt_ids[] = {
- { .compatible = "ti,am654-uart" },
+ { .compatible = "ti,am654-uart", .data = &am654_platdata, },
{ .compatible = "ti,omap2-uart" },
{ .compatible = "ti,omap3-uart" },
- { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
- { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
- { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
- { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
+ { .compatible = "ti,omap4-uart", .data = &omap4_platdata, },
+ { .compatible = "ti,am3352-uart", .data = &am33xx_platdata, },
+ { .compatible = "ti,am4372-uart", .data = &am33xx_platdata, },
+ { .compatible = "ti,dra742-uart", .data = &omap4_platdata, },
{},
};
MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
@@ -1129,10 +1213,10 @@ static int omap8250_probe(struct platform_device *pdev)
struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
struct device_node *np = pdev->dev.of_node;
struct omap8250_priv *priv;
+ const struct omap8250_platdata *pdata;
struct uart_8250_port up;
int ret;
void __iomem *membase;
- const struct of_device_id *id;
if (!regs || !irq) {
dev_err(&pdev->dev, "missing registers or irq\n");
@@ -1187,7 +1271,9 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.shutdown = omap_8250_shutdown;
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
- up.port.rs485_config = omap_8250_rs485_config;
+ up.port.rs485_config = serial8250_em485_config;
+ up.rs485_start_tx = serial8250_em485_start_tx;
+ up.rs485_stop_tx = serial8250_em485_stop_tx;
up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
ret = of_alias_get_id(np, "serial");
@@ -1211,9 +1297,9 @@ static int omap8250_probe(struct platform_device *pdev)
priv->wakeirq = irq_of_parse_and_map(np, 1);
- id = of_match_device(of_match_ptr(omap8250_dt_ids), &pdev->dev);
- if (id && id->data)
- priv->habit |= *(u8 *)id->data;
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (pdata)
+ priv->habit |= pdata->habit;
if (!up.port.uartclk) {
up.port.uartclk = DEFAULT_CLK_SPEED;
@@ -1222,15 +1308,15 @@ static int omap8250_probe(struct platform_device *pdev)
DEFAULT_CLK_SPEED);
}
- priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- priv->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- pm_qos_add_request(&priv->pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
- priv->latency);
+ priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency);
INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
spin_lock_init(&priv->rx_dma_lock);
device_init_wakeup(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
/*
@@ -1244,12 +1330,13 @@ static int omap8250_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
pm_runtime_irq_safe(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(&up, priv);
up.port.handle_irq = omap8250_no_handle_irq;
+ priv->rx_trigger = RX_TRIGGER;
+ priv->tx_trigger = TX_TRIGGER;
#ifdef CONFIG_SERIAL_8250_DMA
/*
* Oh DMA support. If there are no DMA properties in the DT then
@@ -1261,13 +1348,26 @@ static int omap8250_probe(struct platform_device *pdev)
*/
ret = of_property_count_strings(np, "dma-names");
if (ret == 2) {
+ struct omap8250_dma_params *dma_params = NULL;
+
up.dma = &priv->omap8250_dma;
- priv->omap8250_dma.fn = the_no_dma_filter_fn;
- priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
- priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
- priv->omap8250_dma.rx_size = RX_TRIGGER;
- priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER;
- priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER;
+ up.dma->fn = the_no_dma_filter_fn;
+ up.dma->tx_dma = omap_8250_tx_dma;
+ up.dma->rx_dma = omap_8250_rx_dma;
+ if (pdata)
+ dma_params = pdata->dma_params;
+
+ if (dma_params) {
+ up.dma->rx_size = dma_params->rx_size;
+ up.dma->rxconf.src_maxburst = dma_params->rx_trigger;
+ up.dma->txconf.dst_maxburst = dma_params->tx_trigger;
+ priv->rx_trigger = dma_params->rx_trigger;
+ priv->tx_trigger = dma_params->tx_trigger;
+ } else {
+ up.dma->rx_size = RX_TRIGGER;
+ up.dma->rxconf.src_maxburst = RX_TRIGGER;
+ up.dma->txconf.dst_maxburst = TX_TRIGGER;
+ }
}
#endif
ret = serial8250_register_8250_port(&up);
@@ -1295,7 +1395,7 @@ static int omap8250_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
serial8250_unregister_port(priv->line);
- pm_qos_remove_request(&priv->pm_qos_request);
+ cpu_latency_qos_remove_request(&priv->pm_qos_request);
device_init_wakeup(&pdev->dev, false);
return 0;
}
@@ -1445,7 +1545,7 @@ static int omap8250_runtime_suspend(struct device *dev)
if (up->dma && up->dma->rxchan)
omap_8250_rx_dma_flush(up);
- priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
schedule_work(&priv->qos_work);
return 0;
@@ -1465,7 +1565,7 @@ static int omap8250_runtime_resume(struct device *dev)
if (omap8250_lost_context(up))
omap8250_restore_regs(up);
- if (up->dma && up->dma->rxchan)
+ if (up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2))
omap_8250_rx_dma(up);
priv->latency = priv->calc_latency;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 939685fed396..0804469ff052 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -53,7 +53,7 @@ struct serial_private {
unsigned int nr;
struct pci_serial_quirk *quirk;
const struct pciserial_board *board;
- int line[0];
+ int line[];
};
static const struct pci_device_id pci_use_msi[] = {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 0325f2e53b74..f77bf820b7a3 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -557,17 +557,6 @@ static void serial8250_clear_fifos(struct uart_8250_port *p)
}
}
-static inline void serial8250_em485_rts_after_send(struct uart_8250_port *p)
-{
- unsigned char mcr = serial8250_in_MCR(p);
-
- if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
- mcr |= UART_MCR_RTS;
- else
- mcr &= ~UART_MCR_RTS;
- serial8250_out_MCR(p, mcr);
-}
-
static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t);
static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t);
@@ -615,7 +604,7 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put);
*
* Return 0 - success, -errno - otherwise
*/
-int serial8250_em485_init(struct uart_8250_port *p)
+static int serial8250_em485_init(struct uart_8250_port *p)
{
if (p->em485)
return 0;
@@ -632,11 +621,12 @@ int serial8250_em485_init(struct uart_8250_port *p)
p->em485->start_tx_timer.function = &serial8250_em485_handle_start_tx;
p->em485->port = p;
p->em485->active_timer = NULL;
- serial8250_em485_rts_after_send(p);
+ p->em485->tx_stopped = true;
+
+ p->rs485_stop_tx(p);
return 0;
}
-EXPORT_SYMBOL_GPL(serial8250_em485_init);
/**
* serial8250_em485_destroy() - put uart_8250_port into normal state
@@ -664,6 +654,52 @@ void serial8250_em485_destroy(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
+/**
+ * serial8250_em485_config() - generic ->rs485_config() callback
+ * @port: uart port
+ * @rs485: rs485 settings
+ *
+ * Generic callback usable by 8250 uart drivers to activate rs485 settings
+ * if the uart is incapable of driving RTS as a Transmit Enable signal in
+ * hardware, relying on software emulation instead.
+ */
+int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* pick sane settings if the user hasn't */
+ if (!!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !!(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ }
+
+ /* clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
+ memset(rs485->padding, 0, sizeof(rs485->padding));
+ port->rs485 = *rs485;
+
+ /*
+ * Both serial8250_em485_init() and serial8250_em485_destroy()
+ * are idempotent.
+ */
+ if (rs485->flags & SER_RS485_ENABLED) {
+ int ret = serial8250_em485_init(up);
+
+ if (ret) {
+ rs485->flags &= ~SER_RS485_ENABLED;
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
+ return ret;
+ }
+
+ serial8250_em485_destroy(up);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_em485_config);
+
/*
* These two wrappers ensure that enable_runtime_pm_tx() can be called more than
* once and disable_runtime_pm_tx() will still disable RPM because the fifo is
@@ -1318,8 +1354,8 @@ out_lock:
fintek_8250_probe(up);
if (up->capabilities != old_capabilities) {
- pr_warn("%s: detected caps %08x should be %08x\n",
- port->name, old_capabilities, up->capabilities);
+ dev_warn(port->dev, "detected caps %08x should be %08x\n",
+ old_capabilities, up->capabilities);
}
out:
DEBUG_AUTOCONF("iir=%d ", scratch);
@@ -1394,9 +1430,21 @@ static void serial8250_stop_rx(struct uart_port *port)
serial8250_rpm_put(up);
}
-static void __do_stop_tx_rs485(struct uart_8250_port *p)
+/**
+ * serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback
+ * @up: uart 8250 port
+ *
+ * Generic callback usable by 8250 uart drivers to stop rs485 transmission.
+ */
+void serial8250_em485_stop_tx(struct uart_8250_port *p)
{
- serial8250_em485_rts_after_send(p);
+ unsigned char mcr = serial8250_in_MCR(p);
+
+ if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ mcr |= UART_MCR_RTS;
+ else
+ mcr &= ~UART_MCR_RTS;
+ serial8250_out_MCR(p, mcr);
/*
* Empty the RX FIFO, we are not interested in anything
@@ -1410,6 +1458,8 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
serial_port_out(&p->port, UART_IER, p->ier);
}
}
+EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
+
static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
{
struct uart_8250_em485 *em485;
@@ -1422,8 +1472,9 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
serial8250_rpm_get(p);
spin_lock_irqsave(&p->port.lock, flags);
if (em485->active_timer == &em485->stop_tx_timer) {
- __do_stop_tx_rs485(p);
+ p->rs485_stop_tx(p);
em485->active_timer = NULL;
+ em485->tx_stopped = true;
}
spin_unlock_irqrestore(&p->port.lock, flags);
serial8250_rpm_put(p);
@@ -1444,7 +1495,7 @@ static void __stop_tx_rs485(struct uart_8250_port *p)
struct uart_8250_em485 *em485 = p->em485;
/*
- * __do_stop_tx_rs485 is going to set RTS according to config
+ * rs485_stop_tx() is going to set RTS according to config
* AND flush RX FIFO if required.
*/
if (p->port.rs485.delay_rts_after_send > 0) {
@@ -1452,7 +1503,9 @@ static void __stop_tx_rs485(struct uart_8250_port *p)
start_hrtimer_ms(&em485->stop_tx_timer,
p->port.rs485.delay_rts_after_send);
} else {
- __do_stop_tx_rs485(p);
+ p->rs485_stop_tx(p);
+ em485->active_timer = NULL;
+ em485->tx_stopped = true;
}
}
@@ -1477,8 +1530,6 @@ static inline void __stop_tx(struct uart_8250_port *p)
if ((lsr & BOTH_EMPTY) != BOTH_EMPTY)
return;
- em485->active_timer = NULL;
-
__stop_tx_rs485(p);
}
__do_stop_tx(p);
@@ -1528,25 +1579,42 @@ static inline void __start_tx(struct uart_port *port)
}
}
-static inline void start_tx_rs485(struct uart_port *port)
+/**
+ * serial8250_em485_start_tx() - generic ->rs485_start_tx() callback
+ * @up: uart 8250 port
+ *
+ * Generic callback usable by 8250 uart drivers to start rs485 transmission.
+ * Assumes that setting the RTS bit in the MCR register means RTS is high.
+ * (Some chips use inverse semantics.) Further assumes that reception is
+ * stoppable by disabling the UART_IER_RDI interrupt. (Some chips set the
+ * UART_LSR_DR bit even when UART_IER_RDI is disabled, foiling this approach.)
+ */
+void serial8250_em485_start_tx(struct uart_8250_port *up)
{
- struct uart_8250_port *up = up_to_u8250p(port);
- struct uart_8250_em485 *em485 = up->em485;
- unsigned char mcr;
+ unsigned char mcr = serial8250_in_MCR(up);
if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
serial8250_stop_rx(&up->port);
+ if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
+ mcr |= UART_MCR_RTS;
+ else
+ mcr &= ~UART_MCR_RTS;
+ serial8250_out_MCR(up, mcr);
+}
+EXPORT_SYMBOL_GPL(serial8250_em485_start_tx);
+
+static inline void start_tx_rs485(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ struct uart_8250_em485 *em485 = up->em485;
+
em485->active_timer = NULL;
- mcr = serial8250_in_MCR(up);
- if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
- !!(mcr & UART_MCR_RTS)) {
- if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
- mcr |= UART_MCR_RTS;
- else
- mcr &= ~UART_MCR_RTS;
- serial8250_out_MCR(up, mcr);
+ if (em485->tx_stopped) {
+ em485->tx_stopped = false;
+
+ up->rs485_start_tx(up);
if (up->port.rs485.delay_rts_before_send > 0) {
em485->active_timer = &em485->start_tx_timer;
@@ -1683,7 +1751,7 @@ void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
lsr &= port->read_status_mask;
if (lsr & UART_LSR_BI) {
- pr_debug("%s: handling break\n", __func__);
+ dev_dbg(port->dev, "handling break\n");
flag = TTY_BREAK;
} else if (lsr & UART_LSR_PE)
flag = TTY_PARITY;
@@ -1815,6 +1883,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned char status;
unsigned long flags;
struct uart_8250_port *up = up_to_u8250p(port);
+ bool skip_rx = false;
if (iir & UART_IIR_NO_INT)
return 0;
@@ -1823,7 +1892,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ /*
+ * If port is stopped and there are no error conditions in the
+ * FIFO, then don't drain the FIFO, as this may lead to TTY buffer
+ * overflow. Not servicing, RX FIFO would trigger auto HW flow
+ * control when FIFO occupancy reaches preset threshold, thus
+ * halting RX. This only works when auto HW flow control is
+ * available.
+ */
+ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
+ (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
+ !(port->read_status_mask & UART_LSR_DR))
+ skip_rx = true;
+
+ if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
@@ -1924,6 +2006,13 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_8250_port *up = up_to_u8250p(port);
unsigned char mcr;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ if (serial8250_in_MCR(up) & UART_MCR_RTS)
+ mctrl |= TIOCM_RTS;
+ else
+ mctrl &= ~TIOCM_RTS;
+ }
+
mcr = serial8250_TIOCM_to_MCR(mctrl);
mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
@@ -2134,7 +2223,7 @@ int serial8250_do_startup(struct uart_port *port)
*/
if (!(port->flags & UPF_BUGGY_UART) &&
(serial_port_in(port, UART_LSR) == 0xff)) {
- pr_info_ratelimited("%s: LSR safety check engaged!\n", port->name);
+ dev_info_ratelimited(port->dev, "LSR safety check engaged!\n");
retval = -ENODEV;
goto out;
}
@@ -2166,8 +2255,7 @@ int serial8250_do_startup(struct uart_port *port)
(port->type == PORT_ALTR_16550_F128)) && (port->fifosize > 1)) {
/* Bounds checking of TX threshold (valid 0 to fifosize-2) */
if ((up->tx_loadsz < 2) || (up->tx_loadsz > port->fifosize)) {
- pr_err("%s TX FIFO Threshold errors, skipping\n",
- port->name);
+ dev_err(port->dev, "TX FIFO Threshold errors, skipping\n");
} else {
serial_port_out(port, UART_ALTR_AFR,
UART_ALTR_EN_TXFIFO_LW);
@@ -2268,8 +2356,7 @@ int serial8250_do_startup(struct uart_port *port)
if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
if (!(up->bugs & UART_BUG_TXEN)) {
up->bugs |= UART_BUG_TXEN;
- pr_debug("%s - enabling bad tx status workarounds\n",
- port->name);
+ dev_dbg(port->dev, "enabling bad tx status workarounds\n");
}
} else {
up->bugs &= ~UART_BUG_TXEN;
@@ -2294,10 +2381,14 @@ dont_test_tx_en:
* Request DMA channels for both RX and TX.
*/
if (up->dma) {
- retval = serial8250_request_dma(up);
- if (retval) {
- pr_warn_ratelimited("%s - failed to request DMA\n",
- port->name);
+ const char *msg = NULL;
+
+ if (uart_console(port))
+ msg = "forbid DMA for kernel console";
+ else if (serial8250_request_dma(up))
+ msg = "failed to request DMA";
+ if (msg) {
+ dev_warn_ratelimited(port->dev, "%s\n", msg);
up->dma = NULL;
}
}
@@ -2880,7 +2971,7 @@ static int do_serial8250_get_rxtrig(struct tty_port *port)
return rxtrig_bytes;
}
-static ssize_t serial8250_get_attr_rx_trig_bytes(struct device *dev,
+static ssize_t rx_trig_bytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tty_port *port = dev_get_drvdata(dev);
@@ -2926,7 +3017,7 @@ static int do_serial8250_set_rxtrig(struct tty_port *port, unsigned char bytes)
return ret;
}
-static ssize_t serial8250_set_attr_rx_trig_bytes(struct device *dev,
+static ssize_t rx_trig_bytes_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct tty_port *port = dev_get_drvdata(dev);
@@ -2947,18 +3038,16 @@ static ssize_t serial8250_set_attr_rx_trig_bytes(struct device *dev,
return count;
}
-static DEVICE_ATTR(rx_trig_bytes, S_IRUSR | S_IWUSR | S_IRGRP,
- serial8250_get_attr_rx_trig_bytes,
- serial8250_set_attr_rx_trig_bytes);
+static DEVICE_ATTR_RW(rx_trig_bytes);
static struct attribute *serial8250_dev_attrs[] = {
&dev_attr_rx_trig_bytes.attr,
- NULL,
- };
+ NULL
+};
static struct attribute_group serial8250_dev_attr_group = {
.attrs = serial8250_dev_attrs,
- };
+};
static void register_dev_spec_attr_grp(struct uart_8250_port *up)
{
@@ -2987,6 +3076,9 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (flags & UART_CONFIG_TYPE)
autoconfig(up);
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ port->rs485_config(port, &port->rs485);
+
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
@@ -3127,10 +3219,14 @@ static void serial8250_console_restore(struct uart_8250_port *up)
* any possible real use of the port...
*
* The console_lock must be held when we get here.
+ *
+ * Doing runtime PM is really a bad idea for the kernel console.
+ * Thus, we assume the function is called when device is powered up.
*/
void serial8250_console_write(struct uart_8250_port *up, const char *s,
unsigned int count)
{
+ struct uart_8250_em485 *em485 = up->em485;
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier;
@@ -3138,8 +3234,6 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
touch_nmi_watchdog();
- serial8250_rpm_get(up);
-
if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
@@ -3161,6 +3255,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
up->canary = 0;
}
+ if (em485) {
+ if (em485->tx_stopped)
+ up->rs485_start_tx(up);
+ mdelay(port->rs485.delay_rts_before_send);
+ }
+
uart_console_write(port, s, count, serial8250_console_putchar);
/*
@@ -3168,6 +3268,13 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
* and restore the IER
*/
wait_for_xmitr(up, BOTH_EMPTY);
+
+ if (em485) {
+ mdelay(port->rs485.delay_rts_after_send);
+ if (em485->tx_stopped)
+ up->rs485_stop_tx(up);
+ }
+
serial_port_out(port, UART_IER, ier);
/*
@@ -3182,7 +3289,6 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
- serial8250_rpm_put(up);
}
static unsigned int probe_baud(struct uart_port *port)
@@ -3206,6 +3312,7 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ int ret;
if (!port->iobase && !port->membase)
return -ENODEV;
@@ -3215,7 +3322,22 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
else if (probe)
baud = probe_baud(port);
- return uart_set_options(port, port->cons, baud, parity, bits, flow);
+ ret = uart_set_options(port, port->cons, baud, parity, bits, flow);
+ if (ret)
+ return ret;
+
+ if (port->dev)
+ pm_runtime_get_sync(port->dev);
+
+ return 0;
+}
+
+int serial8250_console_exit(struct uart_port *port)
+{
+ if (port->dev)
+ pm_runtime_put_sync(port->dev);
+
+ return 0;
}
#endif /* CONFIG_SERIAL_8250_CONSOLE */
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index c47188860e32..11612d174716 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -123,7 +123,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
uart.port.regshift = 2;
uart.port.irq = irqres->start;
uart.port.fifosize = 64;
- uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST;
+ uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST | UPF_FIXED_TYPE;
uart.port.dev = &pdev->dev;
uart.port.uartclk = clk_get_rate(data->clk);
uart.port.pm = serial_pxa_pm;
diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c
new file mode 100644
index 000000000000..c0ffad1572c6
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_tegra.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Serial Port driver for Tegra devices
+ *
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "8250.h"
+
+struct tegra_uart {
+ struct clk *clk;
+ struct reset_control *rst;
+ int line;
+};
+
+static void tegra_uart_handle_break(struct uart_port *p)
+{
+ unsigned int status, tmout = 10000;
+
+ do {
+ status = p->serial_in(p, UART_LSR);
+ if (status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS))
+ status = p->serial_in(p, UART_RX);
+ else
+ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while (1);
+}
+
+static int tegra_uart_probe(struct platform_device *pdev)
+{
+ struct uart_8250_port port8250;
+ struct tegra_uart *uart;
+ struct uart_port *port;
+ struct resource *res;
+ int ret;
+
+ uart = devm_kzalloc(&pdev->dev, sizeof(*uart), GFP_KERNEL);
+ if (!uart)
+ return -ENOMEM;
+
+ memset(&port8250, 0, sizeof(port8250));
+
+ port = &port8250.port;
+ spin_lock_init(&port->lock);
+
+ port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT |
+ UPF_FIXED_TYPE;
+ port->iotype = UPIO_MEM32;
+ port->regshift = 2;
+ port->type = PORT_TEGRA;
+ port->irqflags |= IRQF_SHARED;
+ port->dev = &pdev->dev;
+ port->handle_break = tegra_uart_handle_break;
+
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret >= 0)
+ port->line = ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+
+ port->irq = ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ port->membase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!port->membase)
+ return -ENOMEM;
+
+ port->mapbase = res->start;
+ port->mapsize = resource_size(res);
+
+ uart->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (IS_ERR(uart->rst))
+ return PTR_ERR(uart->rst);
+
+ if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &port->uartclk)) {
+ uart->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(uart->clk)) {
+ dev_err(&pdev->dev, "failed to get clock!\n");
+ return -ENODEV;
+ }
+
+ ret = clk_prepare_enable(uart->clk);
+ if (ret < 0)
+ return ret;
+
+ port->uartclk = clk_get_rate(uart->clk);
+ }
+
+ ret = reset_control_deassert(uart->rst);
+ if (ret)
+ goto err_clkdisable;
+
+ ret = serial8250_register_8250_port(&port8250);
+ if (ret < 0)
+ goto err_clkdisable;
+
+ platform_set_drvdata(pdev, uart);
+ uart->line = ret;
+
+ return 0;
+
+err_clkdisable:
+ clk_disable_unprepare(uart->clk);
+
+ return ret;
+}
+
+static int tegra_uart_remove(struct platform_device *pdev)
+{
+ struct tegra_uart *uart = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(uart->line);
+ reset_control_assert(uart->rst);
+ clk_disable_unprepare(uart->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_uart_suspend(struct device *dev)
+{
+ struct tegra_uart *uart = dev_get_drvdata(dev);
+ struct uart_8250_port *port8250 = serial8250_get_port(uart->line);
+ struct uart_port *port = &port8250->port;
+
+ serial8250_suspend_port(uart->line);
+
+ if (!uart_console(port) || console_suspend_enabled)
+ clk_disable_unprepare(uart->clk);
+
+ return 0;
+}
+
+static int tegra_uart_resume(struct device *dev)
+{
+ struct tegra_uart *uart = dev_get_drvdata(dev);
+ struct uart_8250_port *port8250 = serial8250_get_port(uart->line);
+ struct uart_port *port = &port8250->port;
+
+ if (!uart_console(port) || console_suspend_enabled)
+ clk_prepare_enable(uart->clk);
+
+ serial8250_resume_port(uart->line);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tegra_uart_pm_ops, tegra_uart_suspend,
+ tegra_uart_resume);
+
+static const struct of_device_id tegra_uart_of_match[] = {
+ { .compatible = "nvidia,tegra20-uart", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
+
+static const struct acpi_device_id tegra_uart_acpi_match[] = {
+ { "NVDA0100", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, tegra_uart_acpi_match);
+
+static struct platform_driver tegra_uart_driver = {
+ .driver = {
+ .name = "tegra-uart",
+ .pm = &tegra_uart_pm_ops,
+ .of_match_table = tegra_uart_of_match,
+ .acpi_match_table = ACPI_PTR(tegra_uart_acpi_match),
+ },
+ .probe = tegra_uart_probe,
+ .remove = tegra_uart_remove,
+};
+
+module_platform_driver(tegra_uart_driver);
+
+MODULE_AUTHOR("Jeff Brasen <jbrasen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra 8250 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index f16824bbb573..af0688156dd0 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -500,6 +500,15 @@ config SERIAL_8250_PXA
applicable to both devicetree and legacy boards, and early console is
part of its support.
+config SERIAL_8250_TEGRA
+ tristate "8250 support for Tegra serial ports"
+ default SERIAL_8250
+ depends on SERIAL_8250
+ depends on ARCH_TEGRA || COMPILE_TEST
+ help
+ Select this option if you have machine with an NVIDIA Tegra SoC and
+ wish to enable 8250 serial driver for the Tegra serial interfaces.
+
config SERIAL_OF_PLATFORM
tristate "Devicetree based probing for 8250 ports"
depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 51a6079d3f1f..a8bfb654d490 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
+obj-$(CONFIG_SERIAL_8250_TEGRA) += 8250_tegra.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 52eaac21ff9f..adf9e80e7dc9 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -3,8 +3,6 @@
# Serial device configuration
#
-if TTY
-
menu "Serial drivers"
depends on HAS_IOMEM
@@ -88,7 +86,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI"
- depends on RISCV_SBI
+ depends on RISCV_SBI_V01
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
@@ -260,15 +258,6 @@ config SERIAL_SAMSUNG_UARTS
help
Select the number of available UART ports for the Samsung S3C
serial driver
-
-config SERIAL_SAMSUNG_DEBUG
- bool "Samsung SoC serial debug"
- depends on SERIAL_SAMSUNG && DEBUG_LL
- help
- Add support for debugging the serial driver. Since this is
- generally being used as a console, we use our own output
- routines that go via the low-level debug printascii()
- function.
config SERIAL_SAMSUNG_CONSOLE
bool "Support for console on Samsung SoC serial port"
@@ -1111,7 +1100,7 @@ config SERIAL_SC16IS7XX_SPI
help
Enable SC16IS7xx driver on SPI bus,
If required say y, and say n to spi if not required,
- This is additional support to exsisting driver.
+ This is additional support to existing driver.
You must select at least one bus for the driver to be built.
config SERIAL_TIMBERDALE
@@ -1279,6 +1268,7 @@ config SERIAL_AR933X
tristate "AR933X serial port support"
depends on HAVE_CLK && ATH79
select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
help
If you have an Atheros AR933X SOC based board and want to use the
built-in UART of the SoC, say Y to this option.
@@ -1452,8 +1442,8 @@ config SERIAL_MEN_Z135
config SERIAL_SPRD
tristate "Support for Spreadtrum serial"
- depends on ARCH_SPRD
select SERIAL_CORE
+ depends on COMMON_CLK
help
This enables the driver for the Spreadtrum's serial.
@@ -1576,5 +1566,3 @@ endmenu
config SERIAL_MCTRL_GPIO
tristate
-
-endif # TTY
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index ea12f10610b6..7e7f1398019f 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -13,6 +13,7 @@
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -29,6 +30,8 @@
#include <asm/mach-ath79/ar933x_uart.h>
+#include "serial_mctrl_gpio.h"
+
#define DRIVER_NAME "ar933x-uart"
#define AR933X_UART_MAX_SCALE 0xff
@@ -47,6 +50,8 @@ struct ar933x_uart_port {
unsigned int min_baud;
unsigned int max_baud;
struct clk *clk;
+ struct mctrl_gpios *gpios;
+ struct gpio_desc *rts_gpiod;
};
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
@@ -100,6 +105,18 @@ static inline void ar933x_uart_stop_tx_interrupt(struct ar933x_uart_port *up)
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
+static inline void ar933x_uart_start_rx_interrupt(struct ar933x_uart_port *up)
+{
+ up->ier |= AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
+static inline void ar933x_uart_stop_rx_interrupt(struct ar933x_uart_port *up)
+{
+ up->ier &= ~AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch)
{
unsigned int rdata;
@@ -125,11 +142,21 @@ static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
static unsigned int ar933x_uart_get_mctrl(struct uart_port *port)
{
- return TIOCM_CAR;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
+ int ret = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ mctrl_gpio_get(up->gpios, &ret);
+
+ return ret;
}
static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
+
+ mctrl_gpio_set(up->gpios, mctrl);
}
static void ar933x_uart_start_tx(struct uart_port *port)
@@ -140,6 +167,37 @@ static void ar933x_uart_start_tx(struct uart_port *port)
ar933x_uart_start_tx_interrupt(up);
}
+static void ar933x_uart_wait_tx_complete(struct ar933x_uart_port *up)
+{
+ unsigned int status;
+ unsigned int timeout = 60000;
+
+ /* Wait up to 60ms for the character(s) to be sent. */
+ do {
+ status = ar933x_uart_read(up, AR933X_UART_CS_REG);
+ if (--timeout == 0)
+ break;
+ udelay(1);
+ } while (status & AR933X_UART_CS_TX_BUSY);
+
+ if (timeout == 0)
+ dev_err(up->port.dev, "waiting for TX timed out\n");
+}
+
+static void ar933x_uart_rx_flush(struct ar933x_uart_port *up)
+{
+ unsigned int status;
+
+ /* clear RX_VALID interrupt */
+ ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_RX_VALID);
+
+ /* remove characters from the RX FIFO */
+ do {
+ ar933x_uart_write(up, AR933X_UART_DATA_REG, AR933X_UART_DATA_RX_CSR);
+ status = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ } while (status & AR933X_UART_DATA_RX_CSR);
+}
+
static void ar933x_uart_stop_tx(struct uart_port *port)
{
struct ar933x_uart_port *up =
@@ -153,8 +211,7 @@ static void ar933x_uart_stop_rx(struct uart_port *port)
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
- up->ier &= ~AR933X_UART_INT_RX_VALID;
- ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+ ar933x_uart_stop_rx_interrupt(up);
}
static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
@@ -336,11 +393,20 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
+ struct serial_rs485 *rs485conf = &up->port.rs485;
int count;
+ bool half_duplex_send = false;
if (uart_tx_stopped(&up->port))
return;
+ if ((rs485conf->flags & SER_RS485_ENABLED) &&
+ (up->port.x_char || !uart_circ_empty(xmit))) {
+ ar933x_uart_stop_rx_interrupt(up);
+ gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_ON_SEND));
+ half_duplex_send = true;
+ }
+
count = up->port.fifosize;
do {
unsigned int rdata;
@@ -368,8 +434,14 @@ static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
- if (!uart_circ_empty(xmit))
+ if (!uart_circ_empty(xmit)) {
ar933x_uart_start_tx_interrupt(up);
+ } else if (half_duplex_send) {
+ ar933x_uart_wait_tx_complete(up);
+ ar933x_uart_rx_flush(up);
+ ar933x_uart_start_rx_interrupt(up);
+ gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
+ }
}
static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
@@ -427,8 +499,7 @@ static int ar933x_uart_startup(struct uart_port *port)
AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
/* Enable RX interrupts */
- up->ier = AR933X_UART_INT_RX_VALID;
- ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+ ar933x_uart_start_rx_interrupt(up);
spin_unlock_irqrestore(&up->port.lock, flags);
@@ -511,6 +582,21 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
+static int ar933x_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485conf)
+{
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
+
+ if ((rs485conf->flags & SER_RS485_ENABLED) &&
+ !up->rts_gpiod) {
+ dev_err(port->dev, "RS485 needs rts-gpio\n");
+ return 1;
+ }
+ port->rs485 = *rs485conf;
+ return 0;
+}
+
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
@@ -680,6 +766,8 @@ static int ar933x_uart_probe(struct platform_device *pdev)
goto err_disable_clk;
}
+ uart_get_rs485_mode(&pdev->dev, &port->rs485);
+
port->mapbase = mem_res->start;
port->line = id;
port->irq = irq_res->start;
@@ -690,6 +778,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port->regshift = 2;
port->fifosize = AR933X_UART_FIFO_SIZE;
port->ops = &ar933x_uart_ops;
+ port->rs485_config = ar933x_config_rs485;
baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1);
up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD);
@@ -697,6 +786,18 @@ static int ar933x_uart_probe(struct platform_device *pdev)
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
+ up->gpios = mctrl_gpio_init(port, 0);
+ if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS)
+ return PTR_ERR(up->gpios);
+
+ up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
+
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !up->rts_gpiod) {
+ dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
+
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
ar933x_console_ports[up->port.line] = up;
#endif
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index a39c87a7c2e1..8d7080efad9b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -20,15 +20,12 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/atmel_pdc.h>
#include <linux/uaccess.h>
#include <linux/platform_data/atmel.h>
#include <linux/timer.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/suspend.h>
@@ -2679,18 +2676,8 @@ static struct console atmel_console = {
#define ATMEL_CONSOLE_DEVICE (&atmel_console)
-static inline bool atmel_is_console_port(struct uart_port *port)
-{
- return port->cons && port->cons->index == port->line;
-}
-
#else
#define ATMEL_CONSOLE_DEVICE NULL
-
-static inline bool atmel_is_console_port(struct uart_port *port)
-{
- return false;
-}
#endif
static struct uart_driver atmel_uart = {
@@ -2719,14 +2706,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_is_console_port(port) && console_suspend_enabled) {
+ if (uart_console(port) && console_suspend_enabled) {
/* Drain the TX shifter */
while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
ATMEL_US_TXEMPTY))
cpu_relax();
}
- if (atmel_is_console_port(port) && !console_suspend_enabled) {
+ if (uart_console(port) && !console_suspend_enabled) {
/* Cache register values as we won't get a full shutdown/startup
* cycle
*/
@@ -2762,7 +2749,7 @@ static int atmel_serial_resume(struct platform_device *pdev)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
- if (atmel_is_console_port(port) && !console_suspend_enabled) {
+ if (uart_console(port) && !console_suspend_enabled) {
atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
@@ -2916,7 +2903,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
goto err_add_port;
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
- if (atmel_is_console_port(&atmel_port->uart)
+ if (uart_console(&atmel_port->uart)
&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
/*
* The serial core enabled the clock for us, so undo
@@ -2959,7 +2946,7 @@ err_add_port:
kfree(atmel_port->rx_ring.buf);
atmel_port->rx_ring.buf = NULL;
err_alloc_ring:
- if (!atmel_is_console_port(&atmel_port->uart)) {
+ if (!uart_console(&atmel_port->uart)) {
clk_put(atmel_port->clk);
atmel_port->clk = NULL;
}
diff --git a/drivers/tty/serial/atmel_serial.h b/drivers/tty/serial/atmel_serial.h
index d811d4f2d0c0..0d8a0f9cc5c3 100644
--- a/drivers/tty/serial/atmel_serial.h
+++ b/drivers/tty/serial/atmel_serial.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* include/linux/atmel_serial.h
*
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 9f175a92fb5d..6113b953ce25 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for CPM (SCC/SMC) serial ports
*
@@ -13,6 +13,8 @@
#include <linux/platform_device.h>
#include <linux/fs_uart_pd.h>
+struct gpio_desc;
+
#if defined(CONFIG_CPM2)
#include "cpm_uart_cpm2.h"
#elif defined(CONFIG_CPM1)
@@ -80,7 +82,7 @@ struct uart_cpm_port {
int wait_closing;
/* value to combine with opcode to form cpm command */
u32 command;
- int gpios[NUM_GPIOS];
+ struct gpio_desc *gpios[NUM_GPIOS];
};
extern int cpm_uart_nr;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d4b81b06e0cb..a04f74d2e854 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -30,8 +30,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/clk.h>
#include <asm/io.h>
@@ -88,11 +87,11 @@ static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_cpm_port *pinfo =
container_of(port, struct uart_cpm_port, port);
- if (pinfo->gpios[GPIO_RTS] >= 0)
- gpio_set_value(pinfo->gpios[GPIO_RTS], !(mctrl & TIOCM_RTS));
+ if (pinfo->gpios[GPIO_RTS])
+ gpiod_set_value(pinfo->gpios[GPIO_RTS], !(mctrl & TIOCM_RTS));
- if (pinfo->gpios[GPIO_DTR] >= 0)
- gpio_set_value(pinfo->gpios[GPIO_DTR], !(mctrl & TIOCM_DTR));
+ if (pinfo->gpios[GPIO_DTR])
+ gpiod_set_value(pinfo->gpios[GPIO_DTR], !(mctrl & TIOCM_DTR));
}
static unsigned int cpm_uart_get_mctrl(struct uart_port *port)
@@ -101,23 +100,23 @@ static unsigned int cpm_uart_get_mctrl(struct uart_port *port)
container_of(port, struct uart_cpm_port, port);
unsigned int mctrl = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
- if (pinfo->gpios[GPIO_CTS] >= 0) {
- if (gpio_get_value(pinfo->gpios[GPIO_CTS]))
+ if (pinfo->gpios[GPIO_CTS]) {
+ if (gpiod_get_value(pinfo->gpios[GPIO_CTS]))
mctrl &= ~TIOCM_CTS;
}
- if (pinfo->gpios[GPIO_DSR] >= 0) {
- if (gpio_get_value(pinfo->gpios[GPIO_DSR]))
+ if (pinfo->gpios[GPIO_DSR]) {
+ if (gpiod_get_value(pinfo->gpios[GPIO_DSR]))
mctrl &= ~TIOCM_DSR;
}
- if (pinfo->gpios[GPIO_DCD] >= 0) {
- if (gpio_get_value(pinfo->gpios[GPIO_DCD]))
+ if (pinfo->gpios[GPIO_DCD]) {
+ if (gpiod_get_value(pinfo->gpios[GPIO_DCD]))
mctrl &= ~TIOCM_CAR;
}
- if (pinfo->gpios[GPIO_RI] >= 0) {
- if (!gpio_get_value(pinfo->gpios[GPIO_RI]))
+ if (pinfo->gpios[GPIO_RI]) {
+ if (!gpiod_get_value(pinfo->gpios[GPIO_RI]))
mctrl |= TIOCM_RNG;
}
@@ -1139,6 +1138,7 @@ static int cpm_uart_init_port(struct device_node *np,
{
const u32 *data;
void __iomem *mem, *pram;
+ struct device *dev = pinfo->port.dev;
int len;
int ret;
int i;
@@ -1211,29 +1211,23 @@ static int cpm_uart_init_port(struct device_node *np,
}
for (i = 0; i < NUM_GPIOS; i++) {
- int gpio;
+ struct gpio_desc *gpiod;
- pinfo->gpios[i] = -1;
+ pinfo->gpios[i] = NULL;
- gpio = of_get_gpio(np, i);
+ gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
- if (gpio_is_valid(gpio)) {
- ret = gpio_request(gpio, "cpm_uart");
- if (ret) {
- pr_err("can't request gpio #%d: %d\n", i, ret);
- continue;
- }
+ if (gpiod) {
if (i == GPIO_RTS || i == GPIO_DTR)
- ret = gpio_direction_output(gpio, 0);
+ ret = gpiod_direction_output(gpiod, 0);
else
- ret = gpio_direction_input(gpio);
+ ret = gpiod_direction_input(gpiod);
if (ret) {
pr_err("can't set direction for gpio #%d: %d\n",
i, ret);
- gpio_free(gpio);
continue;
}
- pinfo->gpios[i] = gpio;
+ pinfo->gpios[i] = gpiod;
}
}
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index c14873b67803..2ae9190b64bb 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -170,6 +170,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
int __init setup_earlycon(char *buf)
{
const struct earlycon_id **p_match;
+ bool empty_compatible = true;
if (!buf || !buf[0])
return -EINVAL;
@@ -177,6 +178,7 @@ int __init setup_earlycon(char *buf)
if (early_con.flags & CON_ENABLED)
return -EALREADY;
+again:
for (p_match = __earlycon_table; p_match < __earlycon_table_end;
p_match++) {
const struct earlycon_id *match = *p_match;
@@ -185,6 +187,10 @@ int __init setup_earlycon(char *buf)
if (strncmp(buf, match->name, len))
continue;
+ /* prefer entries with empty compatible */
+ if (empty_compatible && *match->compatible)
+ continue;
+
if (buf[len]) {
if (buf[len] != ',')
continue;
@@ -195,6 +201,11 @@ int __init setup_earlycon(char *buf)
return register_earlycon(buf, match);
}
+ if (empty_compatible) {
+ empty_compatible = false;
+ goto again;
+ }
+
return -ENOENT;
}
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 2ac87128d7fd..f12f29cf4f31 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -200,7 +200,7 @@ static void efm32_uart_rx_chars(struct efm32_uart_port *efm_port)
/*
* This is a reserved bit and I only saw it read as 0. But to be
* sure not to be confused too much by new devices adhere to the
- * warning in the reference manual that reserverd bits might
+ * warning in the reference manual that reserved bits might
* read as 1 in the future.
*/
rxdata &= ~SW_UARTn_RXDATAX_BERR;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c31b8f3db6bf..5d41075964f2 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -234,6 +234,7 @@ static DEFINE_IDA(fsl_lpuart_ida);
enum lpuart_type {
VF610_LPUART,
LS1021A_LPUART,
+ LS1028A_LPUART,
IMX7ULP_LPUART,
IMX8QXP_LPUART,
};
@@ -278,11 +279,16 @@ static const struct lpuart_soc_data vf_data = {
.iotype = UPIO_MEM,
};
-static const struct lpuart_soc_data ls_data = {
+static const struct lpuart_soc_data ls1021a_data = {
.devtype = LS1021A_LPUART,
.iotype = UPIO_MEM32BE,
};
+static const struct lpuart_soc_data ls1028a_data = {
+ .devtype = LS1028A_LPUART,
+ .iotype = UPIO_MEM32,
+};
+
static struct lpuart_soc_data imx7ulp_data = {
.devtype = IMX7ULP_LPUART,
.iotype = UPIO_MEM32,
@@ -297,7 +303,8 @@ static struct lpuart_soc_data imx8qxp_data = {
static const struct of_device_id lpuart_dt_ids[] = {
{ .compatible = "fsl,vf610-lpuart", .data = &vf_data, },
- { .compatible = "fsl,ls1021a-lpuart", .data = &ls_data, },
+ { .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, },
+ { .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, },
{ .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, },
{ /* sentinel */ }
@@ -307,6 +314,11 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
/* Forward declare this for the dma callbacks*/
static void lpuart_dma_tx_complete(void *arg);
+static inline bool is_ls1028a_lpuart(struct lpuart_port *sport)
+{
+ return sport->devtype == LS1028A_LPUART;
+}
+
static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
{
return sport->devtype == IMX8QXP_LPUART;
@@ -409,6 +421,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
struct circ_buf *xmit = &sport->port.state->xmit;
struct scatterlist *sgl = sport->tx_sgl;
struct device *dev = sport->port.dev;
+ struct dma_chan *chan = sport->dma_tx_chan;
int ret;
if (sport->dma_tx_in_progress)
@@ -427,17 +440,19 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sg_set_buf(sgl + 1, xmit->buf, xmit->head);
}
- ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
if (!ret) {
dev_err(dev, "DMA mapping error for TX.\n");
return;
}
- sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
+ sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl,
ret, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!sport->dma_tx_desc) {
- dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
dev_err(dev, "Cannot prepare TX slave DMA!\n");
return;
}
@@ -446,7 +461,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_desc->callback_param = sport;
sport->dma_tx_in_progress = true;
sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
- dma_async_issue_pending(sport->dma_tx_chan);
+ dma_async_issue_pending(chan);
}
static bool lpuart_stopped_or_empty(struct uart_port *port)
@@ -459,11 +474,13 @@ static void lpuart_dma_tx_complete(void *arg)
struct lpuart_port *sport = arg;
struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
+ struct dma_chan *chan = sport->dma_tx_chan;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
- dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
+ DMA_TO_DEVICE);
xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);
@@ -529,15 +546,16 @@ static bool lpuart_is_32(struct lpuart_port *sport)
static void lpuart_flush_buffer(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ struct dma_chan *chan = sport->dma_tx_chan;
u32 val;
if (sport->lpuart_dma_tx_use) {
if (sport->dma_tx_in_progress) {
- dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0],
+ dma_unmap_sg(chan->device->dev, &sport->tx_sgl[0],
sport->dma_tx_nents, DMA_TO_DEVICE);
sport->dma_tx_in_progress = false;
}
- dmaengine_terminate_all(sport->dma_tx_chan);
+ dmaengine_terminate_all(chan);
}
if (lpuart_is_32(sport)) {
@@ -993,6 +1011,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
struct tty_port *port = &sport->port.state->port;
struct dma_tx_state state;
enum dma_status dmastat;
+ struct dma_chan *chan = sport->dma_rx_chan;
struct circ_buf *ring = &sport->rx_ring;
unsigned long flags;
int count = 0;
@@ -1053,10 +1072,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
spin_lock_irqsave(&sport->port.lock, flags);
- dmastat = dmaengine_tx_status(sport->dma_rx_chan,
- sport->dma_rx_cookie,
- &state);
-
+ dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
if (dmastat == DMA_ERROR) {
dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1064,7 +1080,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
}
/* CPU claims ownership of RX DMA buffer */
- dma_sync_sg_for_cpu(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ dma_sync_sg_for_cpu(chan->device->dev, &sport->rx_sgl, 1,
+ DMA_FROM_DEVICE);
/*
* ring->head points to the end of data already written by the DMA.
@@ -1106,7 +1123,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
sport->port.icount.rx += count;
}
- dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1,
+ dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
DMA_FROM_DEVICE);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1138,6 +1155,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
struct tty_port *port = &sport->port.state->port;
struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
+ struct dma_chan *chan = sport->dma_rx_chan;
baud = tty_get_baud_rate(tty);
@@ -1159,7 +1177,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
return -ENOMEM;
sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
- nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1,
+ DMA_FROM_DEVICE);
if (!nent) {
dev_err(sport->port.dev, "DMA Rx mapping error\n");
@@ -1170,7 +1189,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_rx_sconfig.src_maxburst = 1;
dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
- ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
+ ret = dmaengine_slave_config(chan, &dma_rx_sconfig);
if (ret < 0) {
dev_err(sport->port.dev,
@@ -1178,7 +1197,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
return ret;
}
- sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
+ sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan,
sg_dma_address(&sport->rx_sgl),
sport->rx_sgl.length,
sport->rx_sgl.length / 2,
@@ -1192,7 +1211,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
sport->dma_rx_desc->callback_param = sport;
sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
- dma_async_issue_pending(sport->dma_rx_chan);
+ dma_async_issue_pending(chan);
if (lpuart_is_32(sport)) {
unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
@@ -1210,11 +1229,12 @@ static void lpuart_dma_rx_free(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
+ struct dma_chan *chan = sport->dma_rx_chan;
- if (sport->dma_rx_chan)
- dmaengine_terminate_all(sport->dma_rx_chan);
+ if (chan)
+ dmaengine_terminate_all(chan);
- dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
sport->rx_ring.head = 0;
@@ -1490,39 +1510,77 @@ static void rx_dma_timer_init(struct lpuart_port *sport)
add_timer(&sport->lpuart_timer);
}
+static void lpuart_request_dma(struct lpuart_port *sport)
+{
+ sport->dma_tx_chan = dma_request_chan(sport->port.dev, "tx");
+ if (IS_ERR(sport->dma_tx_chan)) {
+ dev_info_once(sport->port.dev,
+ "DMA tx channel request failed, operating without tx DMA (%ld)\n",
+ PTR_ERR(sport->dma_tx_chan));
+ sport->dma_tx_chan = NULL;
+ }
+
+ sport->dma_rx_chan = dma_request_chan(sport->port.dev, "rx");
+ if (IS_ERR(sport->dma_rx_chan)) {
+ dev_info_once(sport->port.dev,
+ "DMA rx channel request failed, operating without rx DMA (%ld)\n",
+ PTR_ERR(sport->dma_rx_chan));
+ sport->dma_rx_chan = NULL;
+ }
+}
+
static void lpuart_tx_dma_startup(struct lpuart_port *sport)
{
u32 uartbaud;
+ int ret;
- if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) {
- init_waitqueue_head(&sport->dma_wait);
- sport->lpuart_dma_tx_use = true;
- if (lpuart_is_32(sport)) {
- uartbaud = lpuart32_read(&sport->port, UARTBAUD);
- lpuart32_write(&sport->port,
- uartbaud | UARTBAUD_TDMAE, UARTBAUD);
- } else {
- writeb(readb(sport->port.membase + UARTCR5) |
- UARTCR5_TDMAS, sport->port.membase + UARTCR5);
- }
+ if (!sport->dma_tx_chan)
+ goto err;
+
+ ret = lpuart_dma_tx_request(&sport->port);
+ if (ret)
+ goto err;
+
+ init_waitqueue_head(&sport->dma_wait);
+ sport->lpuart_dma_tx_use = true;
+ if (lpuart_is_32(sport)) {
+ uartbaud = lpuart32_read(&sport->port, UARTBAUD);
+ lpuart32_write(&sport->port,
+ uartbaud | UARTBAUD_TDMAE, UARTBAUD);
} else {
- sport->lpuart_dma_tx_use = false;
+ writeb(readb(sport->port.membase + UARTCR5) |
+ UARTCR5_TDMAS, sport->port.membase + UARTCR5);
}
+
+ return;
+
+err:
+ sport->lpuart_dma_tx_use = false;
}
static void lpuart_rx_dma_startup(struct lpuart_port *sport)
{
- if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
- /* set Rx DMA timeout */
- sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
- if (!sport->dma_rx_timeout)
- sport->dma_rx_timeout = 1;
+ int ret;
- sport->lpuart_dma_rx_use = true;
- rx_dma_timer_init(sport);
- } else {
- sport->lpuart_dma_rx_use = false;
- }
+ if (!sport->dma_rx_chan)
+ goto err;
+
+ ret = lpuart_start_rx_dma(sport);
+ if (ret)
+ goto err;
+
+ /* set Rx DMA timeout */
+ sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
+ if (!sport->dma_rx_timeout)
+ sport->dma_rx_timeout = 1;
+
+ sport->lpuart_dma_rx_use = true;
+ rx_dma_timer_init(sport);
+
+ return;
+
+err:
+ sport->lpuart_dma_rx_use = false;
}
static int lpuart_startup(struct uart_port *port)
@@ -1541,6 +1599,8 @@ static int lpuart_startup(struct uart_port *port)
sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK);
+ lpuart_request_dma(sport);
+
spin_lock_irqsave(&sport->port.lock, flags);
lpuart_setup_watermark_enable(sport);
@@ -1587,11 +1647,23 @@ static int lpuart32_startup(struct uart_port *port)
sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK);
+ /*
+ * The LS1028A has a fixed length of 16 words. Although it supports the
+ * RX/TXSIZE fields their encoding is different. Eg the reference manual
+ * states 0b101 is 16 words.
+ */
+ if (is_ls1028a_lpuart(sport)) {
+ sport->rxfifo_size = 16;
+ sport->txfifo_size = 16;
+ sport->port.fifosize = sport->txfifo_size;
+ }
+
+ lpuart_request_dma(sport);
+
spin_lock_irqsave(&sport->port.lock, flags);
lpuart32_setup_watermark_enable(sport);
-
lpuart_rx_dma_startup(sport);
lpuart_tx_dma_startup(sport);
@@ -1615,6 +1687,11 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
dmaengine_terminate_all(sport->dma_tx_chan);
}
}
+
+ if (sport->dma_tx_chan)
+ dma_release_channel(sport->dma_tx_chan);
+ if (sport->dma_rx_chan)
+ dma_release_channel(sport->dma_rx_chan);
}
static void lpuart_shutdown(struct uart_port *port)
@@ -1811,11 +1888,12 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
spin_unlock_irqrestore(&sport->port.lock, flags);
}
-static void
-lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate)
+static void __lpuart32_serial_setbrg(struct uart_port *port,
+ unsigned int baudrate, bool use_rx_dma,
+ bool use_tx_dma)
{
u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp;
- u32 clk = sport->port.uartclk;
+ u32 clk = port->uartclk;
/*
* The idea is to use the best OSR (over-sampling rate) possible.
@@ -1861,10 +1939,10 @@ lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate)
/* handle buadrate outside acceptable rate */
if (baud_diff > ((baudrate / 100) * 3))
- dev_warn(sport->port.dev,
+ dev_warn(port->dev,
"unacceptable baud rate difference of more than 3%%\n");
- tmp = lpuart32_read(&sport->port, UARTBAUD);
+ tmp = lpuart32_read(port, UARTBAUD);
if ((osr > 3) && (osr < 8))
tmp |= UARTBAUD_BOTHEDGE;
@@ -1875,14 +1953,23 @@ lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate)
tmp &= ~UARTBAUD_SBR_MASK;
tmp |= sbr & UARTBAUD_SBR_MASK;
- if (!sport->lpuart_dma_rx_use)
+ if (!use_rx_dma)
tmp &= ~UARTBAUD_RDMAE;
- if (!sport->lpuart_dma_tx_use)
+ if (!use_tx_dma)
tmp &= ~UARTBAUD_TDMAE;
- lpuart32_write(&sport->port, tmp, UARTBAUD);
+ lpuart32_write(port, tmp, UARTBAUD);
+}
+
+static void lpuart32_serial_setbrg(struct lpuart_port *sport,
+ unsigned int baudrate)
+{
+ __lpuart32_serial_setbrg(&sport->port, baudrate,
+ sport->lpuart_dma_rx_use,
+ sport->lpuart_dma_tx_use);
}
+
static void
lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -2376,6 +2463,30 @@ static int __init lpuart32_early_console_setup(struct earlycon_device *device,
return 0;
}
+static int __init ls1028a_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ u32 cr;
+
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->port.iotype = UPIO_MEM32;
+ device->con->write = lpuart32_early_write;
+
+ /* set the baudrate */
+ if (device->port.uartclk && device->baud)
+ __lpuart32_serial_setbrg(&device->port, device->baud,
+ false, false);
+
+ /* enable transmitter */
+ cr = lpuart32_read(&device->port, UARTCTRL);
+ cr |= UARTCTRL_TE;
+ lpuart32_write(&device->port, cr, UARTCTRL);
+
+ return 0;
+}
+
static int __init lpuart32_imx_early_console_setup(struct earlycon_device *device,
const char *opt)
{
@@ -2390,6 +2501,7 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
}
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
@@ -2520,16 +2632,6 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config(&sport->port, &sport->port.rs485);
- sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
- if (!sport->dma_tx_chan)
- dev_info(sport->port.dev, "DMA tx channel request failed, "
- "operating without tx DMA\n");
-
- sport->dma_rx_chan = dma_request_slave_channel(sport->port.dev, "rx");
- if (!sport->dma_rx_chan)
- dev_info(sport->port.dev, "DMA rx channel request failed, "
- "operating without rx DMA\n");
-
return 0;
failed_attach_port:
diff --git a/drivers/tty/serial/icom.h b/drivers/tty/serial/icom.h
index 8a77e739b333..26e3aa7b01e2 100644
--- a/drivers/tty/serial/icom.h
+++ b/drivers/tty/serial/icom.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* icom.h
*
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 31033d517e82..7d16fe41932f 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -39,7 +39,7 @@
#include <linux/fs.h>
#include <linux/ip.h>
#include <linux/dmapool.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/wait.h>
@@ -61,7 +61,6 @@
#define IFX_SPI_HEADER_F (-2)
#define PO_POST_DELAY 200
-#define IFX_MDM_RST_PMU 4
/* forward reference */
static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev);
@@ -81,7 +80,7 @@ static struct notifier_block ifx_modem_reboot_notifier_block = {
static int ifx_modem_power_off(struct ifx_spi_device *ifx_dev)
{
- gpio_set_value(IFX_MDM_RST_PMU, 1);
+ gpiod_set_value(ifx_dev->gpio.pmu_reset, 1);
msleep(PO_POST_DELAY);
return 0;
@@ -107,7 +106,7 @@ static int ifx_modem_reboot_callback(struct notifier_block *nfb,
*/
static inline void mrdy_set_high(struct ifx_spi_device *ifx)
{
- gpio_set_value(ifx->gpio.mrdy, 1);
+ gpiod_set_value(ifx->gpio.mrdy, 1);
}
/**
@@ -117,7 +116,7 @@ static inline void mrdy_set_high(struct ifx_spi_device *ifx)
*/
static inline void mrdy_set_low(struct ifx_spi_device *ifx)
{
- gpio_set_value(ifx->gpio.mrdy, 0);
+ gpiod_set_value(ifx->gpio.mrdy, 0);
}
/**
@@ -244,7 +243,7 @@ static inline void swap_buf_32(unsigned char *buf, int len, void *end)
*/
static void mrdy_assert(struct ifx_spi_device *ifx_dev)
{
- int val = gpio_get_value(ifx_dev->gpio.srdy);
+ int val = gpiod_get_value(ifx_dev->gpio.srdy);
if (!val) {
if (!test_and_set_bit(IFX_SPI_STATE_TIMER_PENDING,
&ifx_dev->flags)) {
@@ -691,7 +690,7 @@ complete_exit:
clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &(ifx_dev->flags));
queue_length = kfifo_len(&ifx_dev->tx_fifo);
- srdy = gpio_get_value(ifx_dev->gpio.srdy);
+ srdy = gpiod_get_value(ifx_dev->gpio.srdy);
if (!srdy)
ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_SRDY);
@@ -898,7 +897,7 @@ static irqreturn_t ifx_spi_srdy_interrupt(int irq, void *dev)
static irqreturn_t ifx_spi_reset_interrupt(int irq, void *dev)
{
struct ifx_spi_device *ifx_dev = dev;
- int val = gpio_get_value(ifx_dev->gpio.reset_out);
+ int val = gpiod_get_value(ifx_dev->gpio.reset_out);
int solreset = test_bit(MR_START, &ifx_dev->mdm_reset_state);
if (val == 0) {
@@ -954,14 +953,14 @@ static int ifx_spi_reset(struct ifx_spi_device *ifx_dev)
* to reset properly
*/
set_bit(MR_START, &ifx_dev->mdm_reset_state);
- gpio_set_value(ifx_dev->gpio.po, 0);
- gpio_set_value(ifx_dev->gpio.reset, 0);
+ gpiod_set_value(ifx_dev->gpio.po, 0);
+ gpiod_set_value(ifx_dev->gpio.reset, 0);
msleep(25);
- gpio_set_value(ifx_dev->gpio.reset, 1);
+ gpiod_set_value(ifx_dev->gpio.reset, 1);
msleep(1);
- gpio_set_value(ifx_dev->gpio.po, 1);
+ gpiod_set_value(ifx_dev->gpio.po, 1);
msleep(1);
- gpio_set_value(ifx_dev->gpio.po, 0);
+ gpiod_set_value(ifx_dev->gpio.po, 0);
ret = wait_event_timeout(ifx_dev->mdm_reset_wait,
test_bit(MR_COMPLETE,
&ifx_dev->mdm_reset_state),
@@ -992,22 +991,23 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
int srdy;
struct ifx_modem_platform_data *pl_data;
struct ifx_spi_device *ifx_dev;
+ struct device *dev = &spi->dev;
if (saved_ifx_dev) {
- dev_dbg(&spi->dev, "ignoring subsequent detection");
+ dev_dbg(dev, "ignoring subsequent detection");
return -ENODEV;
}
- pl_data = dev_get_platdata(&spi->dev);
+ pl_data = dev_get_platdata(dev);
if (!pl_data) {
- dev_err(&spi->dev, "missing platform data!");
+ dev_err(dev, "missing platform data!");
return -ENODEV;
}
/* initialize structure to hold our device variables */
ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_KERNEL);
if (!ifx_dev) {
- dev_err(&spi->dev, "spi device allocation failed");
+ dev_err(dev, "spi device allocation failed");
return -ENOMEM;
}
saved_ifx_dev = ifx_dev;
@@ -1026,7 +1026,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
spi->bits_per_word = spi_bpw;
ret = spi_setup(spi);
if (ret) {
- dev_err(&spi->dev, "SPI setup wasn't successful %d", ret);
+ dev_err(dev, "SPI setup wasn't successful %d", ret);
kfree(ifx_dev);
return -ENODEV;
}
@@ -1049,7 +1049,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
&ifx_dev->tx_bus,
GFP_KERNEL);
if (!ifx_dev->tx_buffer) {
- dev_err(&spi->dev, "DMA-TX buffer allocation failed");
+ dev_err(dev, "DMA-TX buffer allocation failed");
ret = -ENOMEM;
goto error_ret;
}
@@ -1058,7 +1058,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
&ifx_dev->rx_bus,
GFP_KERNEL);
if (!ifx_dev->rx_buffer) {
- dev_err(&spi->dev, "DMA-RX buffer allocation failed");
+ dev_err(dev, "DMA-RX buffer allocation failed");
ret = -ENOMEM;
goto error_ret;
}
@@ -1075,122 +1075,83 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
/* create our tty port */
ret = ifx_spi_create_port(ifx_dev);
if (ret != 0) {
- dev_err(&spi->dev, "create default tty port failed");
+ dev_err(dev, "create default tty port failed");
goto error_ret;
}
- ifx_dev->gpio.reset = pl_data->rst_pmu;
- ifx_dev->gpio.po = pl_data->pwr_on;
- ifx_dev->gpio.mrdy = pl_data->mrdy;
- ifx_dev->gpio.srdy = pl_data->srdy;
- ifx_dev->gpio.reset_out = pl_data->rst_out;
-
- dev_info(&spi->dev, "gpios %d, %d, %d, %d, %d",
- ifx_dev->gpio.reset, ifx_dev->gpio.po, ifx_dev->gpio.mrdy,
- ifx_dev->gpio.srdy, ifx_dev->gpio.reset_out);
-
- /* Configure gpios */
- ret = gpio_request(ifx_dev->gpio.reset, "ifxModem");
- if (ret < 0) {
- dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET)",
- ifx_dev->gpio.reset);
+ ifx_dev->gpio.reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ifx_dev->gpio.reset)) {
+ dev_err(dev, "could not obtain reset GPIO\n");
+ ret = PTR_ERR(ifx_dev->gpio.reset);
goto error_ret;
}
- ret += gpio_direction_output(ifx_dev->gpio.reset, 0);
- ret += gpio_export(ifx_dev->gpio.reset, 1);
- if (ret) {
- dev_err(&spi->dev, "Unable to configure GPIO%d (RESET)",
- ifx_dev->gpio.reset);
- ret = -EBUSY;
- goto error_ret2;
- }
-
- ret = gpio_request(ifx_dev->gpio.po, "ifxModem");
- ret += gpio_direction_output(ifx_dev->gpio.po, 0);
- ret += gpio_export(ifx_dev->gpio.po, 1);
- if (ret) {
- dev_err(&spi->dev, "Unable to configure GPIO%d (ON)",
- ifx_dev->gpio.po);
- ret = -EBUSY;
- goto error_ret3;
- }
-
- ret = gpio_request(ifx_dev->gpio.mrdy, "ifxModem");
- if (ret < 0) {
- dev_err(&spi->dev, "Unable to allocate GPIO%d (MRDY)",
- ifx_dev->gpio.mrdy);
- goto error_ret3;
- }
- ret += gpio_export(ifx_dev->gpio.mrdy, 1);
- ret += gpio_direction_output(ifx_dev->gpio.mrdy, 0);
- if (ret) {
- dev_err(&spi->dev, "Unable to configure GPIO%d (MRDY)",
- ifx_dev->gpio.mrdy);
- ret = -EBUSY;
- goto error_ret4;
+ gpiod_set_consumer_name(ifx_dev->gpio.reset, "ifxModem reset");
+ ifx_dev->gpio.po = devm_gpiod_get(dev, "power", GPIOD_OUT_LOW);
+ if (IS_ERR(ifx_dev->gpio.po)) {
+ dev_err(dev, "could not obtain power GPIO\n");
+ ret = PTR_ERR(ifx_dev->gpio.po);
+ goto error_ret;
}
-
- ret = gpio_request(ifx_dev->gpio.srdy, "ifxModem");
- if (ret < 0) {
- dev_err(&spi->dev, "Unable to allocate GPIO%d (SRDY)",
- ifx_dev->gpio.srdy);
- ret = -EBUSY;
- goto error_ret4;
+ gpiod_set_consumer_name(ifx_dev->gpio.po, "ifxModem power");
+ ifx_dev->gpio.mrdy = devm_gpiod_get(dev, "mrdy", GPIOD_OUT_LOW);
+ if (IS_ERR(ifx_dev->gpio.mrdy)) {
+ dev_err(dev, "could not obtain mrdy GPIO\n");
+ ret = PTR_ERR(ifx_dev->gpio.mrdy);
+ goto error_ret;
}
- ret += gpio_export(ifx_dev->gpio.srdy, 1);
- ret += gpio_direction_input(ifx_dev->gpio.srdy);
- if (ret) {
- dev_err(&spi->dev, "Unable to configure GPIO%d (SRDY)",
- ifx_dev->gpio.srdy);
- ret = -EBUSY;
- goto error_ret5;
+ gpiod_set_consumer_name(ifx_dev->gpio.mrdy, "ifxModem mrdy");
+ ifx_dev->gpio.srdy = devm_gpiod_get(dev, "srdy", GPIOD_IN);
+ if (IS_ERR(ifx_dev->gpio.srdy)) {
+ dev_err(dev, "could not obtain srdy GPIO\n");
+ ret = PTR_ERR(ifx_dev->gpio.srdy);
+ goto error_ret;
}
-
- ret = gpio_request(ifx_dev->gpio.reset_out, "ifxModem");
- if (ret < 0) {
- dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET_OUT)",
- ifx_dev->gpio.reset_out);
- goto error_ret5;
+ gpiod_set_consumer_name(ifx_dev->gpio.srdy, "ifxModem srdy");
+ ifx_dev->gpio.reset_out = devm_gpiod_get(dev, "rst_out", GPIOD_IN);
+ if (IS_ERR(ifx_dev->gpio.reset_out)) {
+ dev_err(dev, "could not obtain rst_out GPIO\n");
+ ret = PTR_ERR(ifx_dev->gpio.reset_out);
+ goto error_ret;
}
- ret += gpio_export(ifx_dev->gpio.reset_out, 1);
- ret += gpio_direction_input(ifx_dev->gpio.reset_out);
- if (ret) {
- dev_err(&spi->dev, "Unable to configure GPIO%d (RESET_OUT)",
- ifx_dev->gpio.reset_out);
- ret = -EBUSY;
- goto error_ret6;
+ gpiod_set_consumer_name(ifx_dev->gpio.reset_out, "ifxModem reset out");
+ ifx_dev->gpio.pmu_reset = devm_gpiod_get(dev, "pmu_reset", GPIOD_ASIS);
+ if (IS_ERR(ifx_dev->gpio.pmu_reset)) {
+ dev_err(dev, "could not obtain pmu_reset GPIO\n");
+ ret = PTR_ERR(ifx_dev->gpio.pmu_reset);
+ goto error_ret;
}
+ gpiod_set_consumer_name(ifx_dev->gpio.pmu_reset, "ifxModem PMU reset");
- ret = request_irq(gpio_to_irq(ifx_dev->gpio.reset_out),
+ ret = request_irq(gpiod_to_irq(ifx_dev->gpio.reset_out),
ifx_spi_reset_interrupt,
IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, DRVNAME,
ifx_dev);
if (ret) {
- dev_err(&spi->dev, "Unable to get irq %x\n",
- gpio_to_irq(ifx_dev->gpio.reset_out));
- goto error_ret6;
+ dev_err(dev, "Unable to get irq %x\n",
+ gpiod_to_irq(ifx_dev->gpio.reset_out));
+ goto error_ret;
}
ret = ifx_spi_reset(ifx_dev);
- ret = request_irq(gpio_to_irq(ifx_dev->gpio.srdy),
+ ret = request_irq(gpiod_to_irq(ifx_dev->gpio.srdy),
ifx_spi_srdy_interrupt, IRQF_TRIGGER_RISING, DRVNAME,
ifx_dev);
if (ret) {
- dev_err(&spi->dev, "Unable to get irq %x",
- gpio_to_irq(ifx_dev->gpio.srdy));
- goto error_ret7;
+ dev_err(dev, "Unable to get irq %x",
+ gpiod_to_irq(ifx_dev->gpio.srdy));
+ goto error_ret2;
}
/* set pm runtime power state and register with power system */
- pm_runtime_set_active(&spi->dev);
- pm_runtime_enable(&spi->dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
/* handle case that modem is already signaling SRDY */
/* no outgoing tty open at this point, this just satisfies the
* modem's read and should reset communication properly
*/
- srdy = gpio_get_value(ifx_dev->gpio.srdy);
+ srdy = gpiod_get_value(ifx_dev->gpio.srdy);
if (srdy) {
mrdy_assert(ifx_dev);
@@ -1199,18 +1160,8 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
mrdy_set_low(ifx_dev);
return 0;
-error_ret7:
- free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
-error_ret6:
- gpio_free(ifx_dev->gpio.srdy);
-error_ret5:
- gpio_free(ifx_dev->gpio.mrdy);
-error_ret4:
- gpio_free(ifx_dev->gpio.reset);
-error_ret3:
- gpio_free(ifx_dev->gpio.po);
error_ret2:
- gpio_free(ifx_dev->gpio.reset_out);
+ free_irq(gpiod_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
error_ret:
ifx_spi_free_device(ifx_dev);
saved_ifx_dev = NULL;
@@ -1234,14 +1185,8 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
pm_runtime_disable(&spi->dev);
/* free irq */
- free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
- free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
-
- gpio_free(ifx_dev->gpio.srdy);
- gpio_free(ifx_dev->gpio.mrdy);
- gpio_free(ifx_dev->gpio.reset);
- gpio_free(ifx_dev->gpio.po);
- gpio_free(ifx_dev->gpio.reset_out);
+ free_irq(gpiod_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
+ free_irq(gpiod_to_irq(ifx_dev->gpio.srdy), ifx_dev);
/* free allocations */
ifx_spi_free_device(ifx_dev);
diff --git a/drivers/tty/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
index c5a2514212ff..ecb841d928a7 100644
--- a/drivers/tty/serial/ifx6x60.h
+++ b/drivers/tty/serial/ifx6x60.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/****************************************************************************
*
* Driver for the IFX spi modem.
@@ -10,6 +10,8 @@
#ifndef _IFX6X60_H
#define _IFX6X60_H
+struct gpio_desc;
+
#define DRVNAME "ifx6x60"
#define TTYNAME "ttyIFX"
@@ -94,11 +96,12 @@ struct ifx_spi_device {
struct {
/* gpio lines */
- unsigned short srdy; /* slave-ready gpio */
- unsigned short mrdy; /* master-ready gpio */
- unsigned short reset; /* modem-reset gpio */
- unsigned short po; /* modem-on gpio */
- unsigned short reset_out; /* modem-in-reset gpio */
+ struct gpio_desc *srdy; /* slave-ready gpio */
+ struct gpio_desc *mrdy; /* master-ready gpio */
+ struct gpio_desc *reset; /* modem-reset gpio */
+ struct gpio_desc *po; /* modem-on gpio */
+ struct gpio_desc *reset_out; /* modem-in-reset gpio */
+ struct gpio_desc *pmu_reset; /* PMU reset gpio */
/* state/stats */
int unack_srdy_int_nb;
} gpio;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index d337782b3648..f4d68109bc8b 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -195,6 +195,8 @@ struct imx_port {
unsigned int have_rtscts:1;
unsigned int have_rtsgpio:1;
unsigned int dte_mode:1;
+ unsigned int inverted_tx:1;
+ unsigned int inverted_rx:1;
struct clk *clk_ipg;
struct clk *clk_per;
const struct imx_uart_data *devdata;
@@ -1335,7 +1337,7 @@ static int imx_uart_startup(struct uart_port *port)
int retval, i;
unsigned long flags;
int dma_is_inited = 0;
- u32 ucr1, ucr2, ucr4;
+ u32 ucr1, ucr2, ucr3, ucr4;
retval = clk_prepare_enable(sport->clk_per);
if (retval)
@@ -1387,11 +1389,29 @@ static int imx_uart_startup(struct uart_port *port)
imx_uart_writel(sport, ucr1, UCR1);
- ucr4 = imx_uart_readl(sport, UCR4) & ~UCR4_OREN;
+ ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR);
if (!sport->dma_is_enabled)
ucr4 |= UCR4_OREN;
+ if (sport->inverted_rx)
+ ucr4 |= UCR4_INVR;
imx_uart_writel(sport, ucr4, UCR4);
+ ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_INVT;
+ /*
+ * configure tx polarity before enabling tx
+ */
+ if (sport->inverted_tx)
+ ucr3 |= UCR3_INVT;
+
+ if (!imx_uart_is_imx1(sport)) {
+ ucr3 |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
+
+ if (sport->dte_mode)
+ /* disable broken interrupts */
+ ucr3 &= ~(UCR3_RI | UCR3_DCD);
+ }
+ imx_uart_writel(sport, ucr3, UCR3);
+
ucr2 = imx_uart_readl(sport, UCR2) & ~UCR2_ATEN;
ucr2 |= (UCR2_RXEN | UCR2_TXEN);
if (!sport->have_rtscts)
@@ -1404,20 +1424,6 @@ static int imx_uart_startup(struct uart_port *port)
ucr2 &= ~UCR2_RTSEN;
imx_uart_writel(sport, ucr2, UCR2);
- if (!imx_uart_is_imx1(sport)) {
- u32 ucr3;
-
- ucr3 = imx_uart_readl(sport, UCR3);
-
- ucr3 |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
-
- if (sport->dte_mode)
- /* disable broken interrupts */
- ucr3 &= ~(UCR3_RI | UCR3_DCD);
-
- imx_uart_writel(sport, ucr3, UCR3);
- }
-
/*
* Enable modem status interrupts
*/
@@ -2184,6 +2190,12 @@ static int imx_uart_probe_dt(struct imx_port *sport,
if (of_get_property(np, "rts-gpios", NULL))
sport->have_rtsgpio = 1;
+ if (of_get_property(np, "fsl,inverted-tx", NULL))
+ sport->inverted_tx = 1;
+
+ if (of_get_property(np, "fsl,inverted-rx", NULL))
+ sport->inverted_rx = 1;
+
return 0;
}
#else
diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index 7a128aaa3a66..8489c07f4cd5 100644
--- a/drivers/tty/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index c7d51b51898f..c9f94fa82be4 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -169,15 +169,13 @@ static int configure_kgdboc(void)
if (!p)
goto noconfig;
- cons = console_drivers;
- while (cons) {
+ for_each_console(cons) {
int idx;
if (cons->device && cons->device(cons, &idx) == p &&
idx == tty_line) {
kgdboc_io_ops.is_console = 1;
break;
}
- cons = cons->next;
}
kgdb_tty_driver = p;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index f67226df30d4..c5e46ff972e4 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -11,7 +11,6 @@
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/device.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 48017cec7f2f..f7d6b3c9ea45 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -37,8 +37,6 @@
#include <linux/of_gpio.h>
#include <linux/platform_data/serial-omap.h>
-#include <dt-bindings/gpio/gpio.h>
-
#define OMAP_MAX_HSUART_PORTS 10
#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y))
@@ -831,7 +829,7 @@ static void serial_omap_uart_qos_work(struct work_struct *work)
struct uart_omap_port *up = container_of(work, struct uart_omap_port,
qos_work);
- pm_qos_update_request(&up->pm_qos_request, up->latency);
+ cpu_latency_qos_update_request(&up->pm_qos_request, up->latency);
}
static void
@@ -1722,10 +1720,9 @@ static int serial_omap_probe(struct platform_device *pdev)
DEFAULT_CLK_SPEED);
}
- up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- pm_qos_add_request(&up->pm_qos_request,
- PM_QOS_CPU_DMA_LATENCY, up->latency);
+ up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
platform_set_drvdata(pdev, up);
@@ -1759,7 +1756,7 @@ err_add_port:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- pm_qos_remove_request(&up->pm_qos_request);
+ cpu_latency_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(up->dev, false);
err_rs485:
err_port_line:
@@ -1777,7 +1774,7 @@ static int serial_omap_remove(struct platform_device *dev)
pm_runtime_dont_use_autosuspend(up->dev);
pm_runtime_put_sync(up->dev);
pm_runtime_disable(up->dev);
- pm_qos_remove_request(&up->pm_qos_request);
+ cpu_latency_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(&dev->dev, false);
return 0;
@@ -1869,7 +1866,7 @@ static int serial_omap_runtime_suspend(struct device *dev)
serial_omap_enable_wakeup(up, true);
- up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
schedule_work(&up->qos_work);
return 0;
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 42c8cc93b603..c149f8c30007 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -680,6 +680,12 @@ static int owl_uart_probe(struct platform_device *pdev)
return PTR_ERR(owl_port->clk);
}
+ ret = clk_prepare_enable(owl_port->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable clk\n");
+ return ret;
+ }
+
owl_port->port.dev = &pdev->dev;
owl_port->port.line = pdev->id;
owl_port->port.type = PORT_OWL;
@@ -712,6 +718,7 @@ static int owl_uart_remove(struct platform_device *pdev)
uart_remove_one_port(&owl_uart_driver, &owl_port->port);
owl_uart_ports[pdev->id] = NULL;
+ clk_disable_unprepare(owl_port->clk);
return 0;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 0a96217dba67..40fa7a27722d 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -310,32 +310,32 @@ static ssize_t port_show_regs(struct file *file, char __user *user_buf,
if (!buf)
return 0;
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"PCH EG20T port[%d] regs:\n", priv->port.line);
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"=================================\n");
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"IER: \t0x%02x\n", ioread8(priv->membase + UART_IER));
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"IIR: \t0x%02x\n", ioread8(priv->membase + UART_IIR));
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"LCR: \t0x%02x\n", ioread8(priv->membase + UART_LCR));
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"MCR: \t0x%02x\n", ioread8(priv->membase + UART_MCR));
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"LSR: \t0x%02x\n", ioread8(priv->membase + UART_LSR));
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"MSR: \t0x%02x\n", ioread8(priv->membase + UART_MSR));
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"BRCSR: \t0x%02x\n",
ioread8(priv->membase + PCH_UART_BRCSR));
lcr = ioread8(priv->membase + UART_LCR);
iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR);
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"DLL: \t0x%02x\n", ioread8(priv->membase + UART_DLL));
- len += snprintf(buf + len, PCH_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, PCH_REGS_BUFSIZE - len,
"DLM: \t0x%02x\n", ioread8(priv->membase + UART_DLM));
iowrite8(lcr, priv->membase + UART_LCR);
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 484b7e8d5381..0a12fb11e698 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -768,11 +768,6 @@ static int __init pic32_console_init(void)
}
console_initcall(pic32_console_init);
-static inline bool is_pic32_console_port(struct uart_port *port)
-{
- return port->cons && port->cons->index == port->line;
-}
-
/*
* Late console initialization.
*/
@@ -873,8 +868,7 @@ static int pic32_uart_probe(struct platform_device *pdev)
}
#ifdef CONFIG_SERIAL_PIC32_CONSOLE
- if (is_pic32_console_port(port) &&
- (pic32_console.flags & CON_ENABLED)) {
+ if (uart_console(port) && (pic32_console.flags & CON_ENABLED)) {
/* The peripheral clock has been enabled by console_setup,
* so disable it till the port is used.
*/
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
index 2f2b56927dc6..b15639cc336b 100644
--- a/drivers/tty/serial/pic32_uart.h
+++ b/drivers/tty/serial/pic32_uart.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* PIC32 Integrated Serial Driver.
*
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 0bd1684cabb3..6119090ce045 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -21,6 +21,7 @@
/* UART specific GENI registers */
#define SE_UART_LOOPBACK_CFG 0x22c
+#define SE_UART_IO_MACRO_CTRL 0x240
#define SE_UART_TX_TRANS_CFG 0x25c
#define SE_UART_TX_WORD_LEN 0x268
#define SE_UART_TX_STOP_BIT_LEN 0x26c
@@ -95,6 +96,12 @@
#define CTS_RTS_SORTED BIT(1)
#define RX_TX_CTS_RTS_SORTED (RX_TX_SORTED | CTS_RTS_SORTED)
+/* UART pin swap value */
+#define DEFAULT_IO_MACRO_IO0_IO1_MASK GENMASK(3, 0)
+#define IO_MACRO_IO0_SEL 0x3
+#define DEFAULT_IO_MACRO_IO2_IO3_MASK GENMASK(15, 4)
+#define IO_MACRO_IO2_IO3_SWAP 0x4640
+
#ifdef CONFIG_CONSOLE_POLL
#define CONSOLE_RX_BYTES_PW 1
#else
@@ -113,12 +120,14 @@ struct qcom_geni_serial_port {
unsigned int baud;
unsigned int tx_bytes_pw;
unsigned int rx_bytes_pw;
- u32 *rx_fifo;
+ void *rx_fifo;
u32 loopback;
bool brk;
unsigned int tx_remaining;
int wakeup_irq;
+ bool rx_tx_swap;
+ bool cts_rts_swap;
};
static const struct uart_ops qcom_geni_console_pops;
@@ -505,7 +514,6 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
{
- unsigned char *buf;
struct tty_port *tport;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
u32 num_bytes_pw = port->tx_fifo_width / BITS_PER_BYTE;
@@ -517,8 +525,7 @@ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
if (drop)
return 0;
- buf = (unsigned char *)port->rx_fifo;
- ret = tty_insert_flip_string(tport, buf, bytes);
+ ret = tty_insert_flip_string(tport, port->rx_fifo, bytes);
if (ret != bytes) {
dev_err(uport->dev, "%s:Unable to push data ret %d_bytes %d\n",
__func__, ret, bytes);
@@ -818,17 +825,7 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
- unsigned long flags;
-
- /* Stop the console before stopping the current tx */
- if (uart_console(uport))
- console_stop(uport->cons);
-
disable_irq(uport->irq);
- spin_lock_irqsave(&uport->lock, flags);
- qcom_geni_serial_stop_tx(uport);
- qcom_geni_serial_stop_rx(uport);
- spin_unlock_irqrestore(&uport->lock, flags);
}
static int qcom_geni_serial_port_setup(struct uart_port *uport)
@@ -836,6 +833,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
u32 proto;
+ u32 pin_swap;
if (uart_console(uport)) {
port->tx_bytes_pw = 1;
@@ -856,6 +854,20 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
get_tx_fifo_size(port);
writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
+
+ pin_swap = readl(uport->membase + SE_UART_IO_MACRO_CTRL);
+ if (port->rx_tx_swap) {
+ pin_swap &= ~DEFAULT_IO_MACRO_IO2_IO3_MASK;
+ pin_swap |= IO_MACRO_IO2_IO3_SWAP;
+ }
+ if (port->cts_rts_swap) {
+ pin_swap &= ~DEFAULT_IO_MACRO_IO0_IO1_MASK;
+ pin_swap |= IO_MACRO_IO0_SEL;
+ }
+ /* Configure this register if RX-TX, CTS-RTS pins are swapped */
+ if (port->rx_tx_swap || port->cts_rts_swap)
+ writel(pin_swap, uport->membase + SE_UART_IO_MACRO_CTRL);
+
/*
* Make an unconditional cancel on the main sequencer to reset
* it else we could end up in data loss scenarios.
@@ -868,12 +880,6 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
false, false, true);
geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
geni_se_select_mode(&port->se, GENI_SE_FIFO);
- if (!uart_console(uport)) {
- port->rx_fifo = devm_kcalloc(uport->dev,
- port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
- if (!port->rx_fifo)
- return -ENOMEM;
- }
port->setup = true;
return 0;
@@ -1284,6 +1290,13 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+ if (!console) {
+ port->rx_fifo = devm_kcalloc(uport->dev,
+ port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
+ if (!port->rx_fifo)
+ return -ENOMEM;
+ }
+
port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
"qcom_geni_serial_%s%d",
uart_console(uport) ? "console" : "uart", uport->line);
@@ -1299,6 +1312,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
if (!console)
port->wakeup_irq = platform_get_irq_optional(pdev, 1);
+ if (of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"))
+ port->rx_tx_swap = true;
+
+ if (of_property_read_bool(pdev->dev.of_node, "cts-rts-swap"))
+ port->cts_rts_swap = true;
+
uport->private_data = drv;
platform_set_drvdata(pdev, port);
port->handle_rx = console ? handle_rx_console : handle_rx_uart;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 7d3ae31cc720..06e8071d5601 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -329,7 +329,7 @@ struct sc16is7xx_port {
struct task_struct *kworker_task;
struct kthread_work irq_work;
struct mutex efr_lock;
- struct sc16is7xx_one p[0];
+ struct sc16is7xx_one p[];
};
static unsigned long sc16is7xx_lines;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 76e506ee335c..66a5e2faf57e 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/serial.h> /* for serial_state and serial_icounter_struct */
#include <linux/serial_core.h>
+#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/security.h>
@@ -40,6 +41,8 @@ static struct lock_class_key port_lock_key;
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
+#define SYSRQ_TIMEOUT (HZ * 5)
+
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -1908,6 +1911,24 @@ static int uart_proc_show(struct seq_file *m, void *v)
}
#endif
+static inline bool uart_console_enabled(struct uart_port *port)
+{
+ return uart_console(port) && (port->cons->flags & CON_ENABLED);
+}
+
+/*
+ * Ensure that the serial console lock is initialised early.
+ * If this port is a console, then the spinlock is already initialised.
+ */
+static inline void uart_port_spin_lock_init(struct uart_port *port)
+{
+ if (uart_console(port))
+ return;
+
+ spin_lock_init(&port->lock);
+ lockdep_set_class(&port->lock, &port_lock_key);
+}
+
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/**
* uart_console_write - write a console message to a serial port
@@ -2060,16 +2081,7 @@ uart_set_options(struct uart_port *port, struct console *co,
struct ktermios termios;
static struct ktermios dummy;
- /*
- * Ensure that the serial console lock is initialised
- * early.
- * If this port is a console, then the spinlock is already
- * initialised.
- */
- if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) {
- spin_lock_init(&port->lock);
- lockdep_set_class(&port->lock, &port_lock_key);
- }
+ uart_port_spin_lock_init(port);
memset(&termios, 0, sizeof(struct ktermios));
@@ -2605,7 +2617,7 @@ struct tty_driver *uart_console_device(struct console *co, int *index)
}
EXPORT_SYMBOL_GPL(uart_console_device);
-static ssize_t uart_get_attr_uartclk(struct device *dev,
+static ssize_t uartclk_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2615,7 +2627,7 @@ static ssize_t uart_get_attr_uartclk(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.baud_base * 16);
}
-static ssize_t uart_get_attr_type(struct device *dev,
+static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2624,7 +2636,8 @@ static ssize_t uart_get_attr_type(struct device *dev,
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.type);
}
-static ssize_t uart_get_attr_line(struct device *dev,
+
+static ssize_t line_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2634,7 +2647,7 @@ static ssize_t uart_get_attr_line(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.line);
}
-static ssize_t uart_get_attr_port(struct device *dev,
+static ssize_t port_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2648,7 +2661,7 @@ static ssize_t uart_get_attr_port(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%lX\n", ioaddr);
}
-static ssize_t uart_get_attr_irq(struct device *dev,
+static ssize_t irq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2658,7 +2671,7 @@ static ssize_t uart_get_attr_irq(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.irq);
}
-static ssize_t uart_get_attr_flags(struct device *dev,
+static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2668,7 +2681,7 @@ static ssize_t uart_get_attr_flags(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%X\n", tmp.flags);
}
-static ssize_t uart_get_attr_xmit_fifo_size(struct device *dev,
+static ssize_t xmit_fifo_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2678,8 +2691,7 @@ static ssize_t uart_get_attr_xmit_fifo_size(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.xmit_fifo_size);
}
-
-static ssize_t uart_get_attr_close_delay(struct device *dev,
+static ssize_t close_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2689,8 +2701,7 @@ static ssize_t uart_get_attr_close_delay(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.close_delay);
}
-
-static ssize_t uart_get_attr_closing_wait(struct device *dev,
+static ssize_t closing_wait_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2700,7 +2711,7 @@ static ssize_t uart_get_attr_closing_wait(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.closing_wait);
}
-static ssize_t uart_get_attr_custom_divisor(struct device *dev,
+static ssize_t custom_divisor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2710,7 +2721,7 @@ static ssize_t uart_get_attr_custom_divisor(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.custom_divisor);
}
-static ssize_t uart_get_attr_io_type(struct device *dev,
+static ssize_t io_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2720,7 +2731,7 @@ static ssize_t uart_get_attr_io_type(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.io_type);
}
-static ssize_t uart_get_attr_iomem_base(struct device *dev,
+static ssize_t iomem_base_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2730,7 +2741,7 @@ static ssize_t uart_get_attr_iomem_base(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%lX\n", (unsigned long)tmp.iomem_base);
}
-static ssize_t uart_get_attr_iomem_reg_shift(struct device *dev,
+static ssize_t iomem_reg_shift_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
@@ -2740,40 +2751,92 @@ static ssize_t uart_get_attr_iomem_reg_shift(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.iomem_reg_shift);
}
-static DEVICE_ATTR(type, S_IRUSR | S_IRGRP, uart_get_attr_type, NULL);
-static DEVICE_ATTR(line, S_IRUSR | S_IRGRP, uart_get_attr_line, NULL);
-static DEVICE_ATTR(port, S_IRUSR | S_IRGRP, uart_get_attr_port, NULL);
-static DEVICE_ATTR(irq, S_IRUSR | S_IRGRP, uart_get_attr_irq, NULL);
-static DEVICE_ATTR(flags, S_IRUSR | S_IRGRP, uart_get_attr_flags, NULL);
-static DEVICE_ATTR(xmit_fifo_size, S_IRUSR | S_IRGRP, uart_get_attr_xmit_fifo_size, NULL);
-static DEVICE_ATTR(uartclk, S_IRUSR | S_IRGRP, uart_get_attr_uartclk, NULL);
-static DEVICE_ATTR(close_delay, S_IRUSR | S_IRGRP, uart_get_attr_close_delay, NULL);
-static DEVICE_ATTR(closing_wait, S_IRUSR | S_IRGRP, uart_get_attr_closing_wait, NULL);
-static DEVICE_ATTR(custom_divisor, S_IRUSR | S_IRGRP, uart_get_attr_custom_divisor, NULL);
-static DEVICE_ATTR(io_type, S_IRUSR | S_IRGRP, uart_get_attr_io_type, NULL);
-static DEVICE_ATTR(iomem_base, S_IRUSR | S_IRGRP, uart_get_attr_iomem_base, NULL);
-static DEVICE_ATTR(iomem_reg_shift, S_IRUSR | S_IRGRP, uart_get_attr_iomem_reg_shift, NULL);
+static ssize_t console_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tty_port *port = dev_get_drvdata(dev);
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport;
+ bool console = false;
+
+ mutex_lock(&port->mutex);
+ uport = uart_port_check(state);
+ if (uport)
+ console = uart_console_enabled(uport);
+ mutex_unlock(&port->mutex);
+
+ return sprintf(buf, "%c\n", console ? 'Y' : 'N');
+}
+
+static ssize_t console_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tty_port *port = dev_get_drvdata(dev);
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport;
+ bool oldconsole, newconsole;
+ int ret;
+
+ ret = kstrtobool(buf, &newconsole);
+ if (ret)
+ return ret;
+
+ mutex_lock(&port->mutex);
+ uport = uart_port_check(state);
+ if (uport) {
+ oldconsole = uart_console_enabled(uport);
+ if (oldconsole && !newconsole) {
+ ret = unregister_console(uport->cons);
+ } else if (!oldconsole && newconsole) {
+ if (uart_console(uport))
+ register_console(uport->cons);
+ else
+ ret = -ENOENT;
+ }
+ } else {
+ ret = -ENXIO;
+ }
+ mutex_unlock(&port->mutex);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RO(uartclk);
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(line);
+static DEVICE_ATTR_RO(port);
+static DEVICE_ATTR_RO(irq);
+static DEVICE_ATTR_RO(flags);
+static DEVICE_ATTR_RO(xmit_fifo_size);
+static DEVICE_ATTR_RO(close_delay);
+static DEVICE_ATTR_RO(closing_wait);
+static DEVICE_ATTR_RO(custom_divisor);
+static DEVICE_ATTR_RO(io_type);
+static DEVICE_ATTR_RO(iomem_base);
+static DEVICE_ATTR_RO(iomem_reg_shift);
+static DEVICE_ATTR_RW(console);
static struct attribute *tty_dev_attrs[] = {
+ &dev_attr_uartclk.attr,
&dev_attr_type.attr,
&dev_attr_line.attr,
&dev_attr_port.attr,
&dev_attr_irq.attr,
&dev_attr_flags.attr,
&dev_attr_xmit_fifo_size.attr,
- &dev_attr_uartclk.attr,
&dev_attr_close_delay.attr,
&dev_attr_closing_wait.attr,
&dev_attr_custom_divisor.attr,
&dev_attr_io_type.attr,
&dev_attr_iomem_base.attr,
&dev_attr_iomem_reg_shift.attr,
- NULL,
- };
+ &dev_attr_console.attr,
+ NULL
+};
static const struct attribute_group tty_dev_attr_group = {
.attrs = tty_dev_attrs,
- };
+};
/**
* uart_add_one_port - attach a driver-defined port structure
@@ -2824,14 +2887,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
goto out;
}
- /*
- * If this port is a console, then the spinlock is already
- * initialised.
- */
- if (!(uart_console(uport) && (uport->cons->flags & CON_ENABLED))) {
- spin_lock_init(&uport->lock);
- lockdep_set_class(&uport->lock, &port_lock_key);
- }
+ uart_port_spin_lock_init(uport);
+
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
@@ -3082,6 +3139,60 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
}
EXPORT_SYMBOL_GPL(uart_insert_char);
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+static const char sysrq_toggle_seq[] = CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE;
+
+static void uart_sysrq_on(struct work_struct *w)
+{
+ int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
+
+ sysrq_toggle_support(1);
+ pr_info("SysRq is enabled by magic sequence '%*pE' on serial\n",
+ sysrq_toggle_seq_len, sysrq_toggle_seq);
+}
+static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
+
+/**
+ * uart_try_toggle_sysrq - Enables SysRq from serial line
+ * @port: uart_port structure where char(s) after BREAK met
+ * @ch: new character in the sequence after received BREAK
+ *
+ * Enables magic SysRq when the required sequence is met on port
+ * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
+ *
+ * Returns false if @ch is out of enabling sequence and should be
+ * handled some other way, true if @ch was consumed.
+ */
+static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
+{
+ int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
+
+ if (!sysrq_toggle_seq_len)
+ return false;
+
+ BUILD_BUG_ON(ARRAY_SIZE(sysrq_toggle_seq) >= U8_MAX);
+ if (sysrq_toggle_seq[port->sysrq_seq] != ch) {
+ port->sysrq_seq = 0;
+ return false;
+ }
+
+ if (++port->sysrq_seq < sysrq_toggle_seq_len) {
+ port->sysrq = jiffies + SYSRQ_TIMEOUT;
+ return true;
+ }
+
+ schedule_work(&sysrq_enable_work);
+
+ port->sysrq = 0;
+ return true;
+}
+#else
+static inline bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
+{
+ return false;
+}
+#endif
+
int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
{
if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
@@ -3091,9 +3202,13 @@ int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
return 0;
if (ch && time_before(jiffies, port->sysrq)) {
- handle_sysrq(ch);
- port->sysrq = 0;
- return 1;
+ if (sysrq_mask()) {
+ handle_sysrq(ch);
+ port->sysrq = 0;
+ return 1;
+ }
+ if (uart_try_toggle_sysrq(port, ch))
+ return 1;
}
port->sysrq = 0;
@@ -3110,9 +3225,13 @@ int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
return 0;
if (ch && time_before(jiffies, port->sysrq)) {
- port->sysrq_ch = ch;
- port->sysrq = 0;
- return 1;
+ if (sysrq_mask()) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ if (uart_try_toggle_sysrq(port, ch))
+ return 1;
}
port->sysrq = 0;
@@ -3120,22 +3239,19 @@ int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
}
EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char);
-void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags)
+__releases(&port->lock)
{
- int sysrq_ch;
+ if (port->has_sysrq) {
+ int sysrq_ch = port->sysrq_ch;
- if (!port->has_sysrq) {
- spin_unlock_irqrestore(&port->lock, irqflags);
- return;
+ port->sysrq_ch = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+ } else {
+ spin_unlock_irqrestore(&port->lock, flags);
}
-
- sysrq_ch = port->sysrq_ch;
- port->sysrq_ch = 0;
-
- spin_unlock_irqrestore(&port->lock, irqflags);
-
- if (sysrq_ch)
- handle_sysrq(sysrq_ch);
}
EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq);
@@ -3149,14 +3265,12 @@ int uart_handle_break(struct uart_port *port)
if (port->handle_break)
port->handle_break(port);
- if (port->has_sysrq) {
- if (port->cons && port->cons->index == port->line) {
- if (!port->sysrq) {
- port->sysrq = jiffies + HZ*5;
- return 1;
- }
- port->sysrq = 0;
+ if (port->has_sysrq && uart_console(port)) {
+ if (!port->sysrq) {
+ port->sysrq = jiffies + SYSRQ_TIMEOUT;
+ return 1;
}
+ port->sysrq = 0;
}
if (port->flags & UPF_SAK)
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index 1b2ff503b2c2..b134a0ffc894 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Helpers for controlling modem lines via GPIO
*
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index c073aa7001c4..e1179e74a2b8 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -870,9 +870,16 @@ static void sci_receive_chars(struct uart_port *port)
tty_insert_flip_char(tport, c, TTY_NORMAL);
} else {
for (i = 0; i < count; i++) {
- char c = serial_port_in(port, SCxRDR);
-
- status = serial_port_in(port, SCxSR);
+ char c;
+
+ if (port->type == PORT_SCIF ||
+ port->type == PORT_HSCIF) {
+ status = serial_port_in(port, SCxSR);
+ c = serial_port_in(port, SCxRDR);
+ } else {
+ c = serial_port_in(port, SCxRDR);
+ status = serial_port_in(port, SCxSR);
+ }
if (uart_handle_sysrq_char(port, c)) {
count--; i--;
continue;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index d5f81b98e4d7..0b5110dad051 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -618,10 +618,10 @@ static void sifive_serial_shutdown(struct uart_port *port)
*
* On the V0 SoC, the UART IP block is derived from the CPU clock source
* after a synchronous divide-by-two divider, so any CPU clock rate change
- * requires the UART baud rate to be updated. This presumably could corrupt any
- * serial word currently being transmitted or received. It would probably
- * be better to stop receives and transmits, then complete the baud rate
- * change, then re-enable them.
+ * requires the UART baud rate to be updated. This presumably corrupts any
+ * serial word currently being transmitted or received. In order to avoid
+ * corrupting the output data stream, we drain the transmit queue before
+ * allowing the clock's rate to be changed.
*/
static int sifive_serial_clk_notifier(struct notifier_block *nb,
unsigned long event, void *data)
@@ -629,6 +629,26 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb,
struct clk_notifier_data *cnd = data;
struct sifive_serial_port *ssp = notifier_to_sifive_serial_port(nb);
+ if (event == PRE_RATE_CHANGE) {
+ /*
+ * The TX watermark is always set to 1 by this driver, which
+ * means that the TX busy bit will lower when there are 0 bytes
+ * left in the TX queue -- in other words, when the TX FIFO is
+ * empty.
+ */
+ __ssp_wait_for_xmitr(ssp);
+ /*
+ * On the cycle the TX FIFO goes empty there is still a full
+ * UART frame left to be transmitted in the shift register.
+ * The UART provides no way for software to directly determine
+ * when that last frame has been transmitted, so we just sleep
+ * here instead. As we're not tracking the number of stop bits
+ * they're just worst cased here. The rest of the serial
+ * framing parameters aren't configurable by software.
+ */
+ udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate));
+ }
+
if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) {
ssp->clkin_rate = cnd->new_rate;
__ssp_update_div(ssp);
@@ -709,6 +729,29 @@ static const char *sifive_serial_type(struct uart_port *port)
return port->type == PORT_SIFIVE_V0 ? "SiFive UART v0" : NULL;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int sifive_serial_poll_get_char(struct uart_port *port)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+ char is_empty, ch;
+
+ ch = __ssp_receive_char(ssp, &is_empty);
+ if (is_empty)
+ return NO_POLL_CHAR;
+
+ return ch;
+}
+
+static void sifive_serial_poll_put_char(struct uart_port *port,
+ unsigned char c)
+{
+ struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+
+ __ssp_wait_for_xmitr(ssp);
+ __ssp_transmit_char(ssp, c);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
/*
* Early console support
*/
@@ -840,6 +883,7 @@ console_initcall(sifive_console_init);
static void __ssp_add_console_port(struct sifive_serial_port *ssp)
{
+ spin_lock_init(&ssp->port.lock);
sifive_serial_console_ports[ssp->port.line] = ssp;
}
@@ -877,6 +921,10 @@ static const struct uart_ops sifive_serial_uops = {
.request_port = sifive_serial_request_port,
.config_port = sifive_serial_config_port,
.verify_port = sifive_serial_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = sifive_serial_poll_get_char,
+ .poll_put_char = sifive_serial_poll_put_char,
+#endif
};
static struct uart_driver sifive_serial_uart_driver = {
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 637b09d3fe79..fb88ac565227 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Drivers for CSR SiRFprimaII onboard UARTs.
*
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 3d3c70634589..9a7ae6384edf 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -1013,7 +1013,7 @@ static void sprd_console_write(struct console *co, const char *s,
spin_unlock_irqrestore(&port->lock, flags);
}
-static int __init sprd_console_setup(struct console *co, char *options)
+static int sprd_console_setup(struct console *co, char *options)
{
struct sprd_uart_port *sprd_uart_port;
int baud = 115200;
@@ -1102,29 +1102,6 @@ static struct uart_driver sprd_uart_driver = {
.cons = SPRD_CONSOLE,
};
-static int sprd_probe_dt_alias(int index, struct device *dev)
-{
- struct device_node *np;
- int ret = index;
-
- if (!IS_ENABLED(CONFIG_OF))
- return ret;
-
- np = dev->of_node;
- if (!np)
- return ret;
-
- ret = of_alias_get_id(np, "serial");
- if (ret < 0)
- ret = index;
- else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
- dev_warn(dev, "requested serial port %d not available.\n", ret);
- ret = index;
- }
-
- return ret;
-}
-
static int sprd_remove(struct platform_device *dev)
{
struct sprd_uart_port *sup = platform_get_drvdata(dev);
@@ -1132,14 +1109,13 @@ static int sprd_remove(struct platform_device *dev)
if (sup) {
uart_remove_one_port(&sprd_uart_driver, &sup->port);
sprd_port[sup->port.line] = NULL;
+ sprd_rx_free_buf(sup);
sprd_ports_num--;
}
if (!sprd_ports_num)
uart_unregister_driver(&sprd_uart_driver);
- sprd_rx_free_buf(sup);
-
return 0;
}
@@ -1147,7 +1123,8 @@ static bool sprd_uart_is_console(struct uart_port *uport)
{
struct console *cons = sprd_uart_driver.cons;
- if (cons && cons->index >= 0 && cons->index == uport->line)
+ if ((cons && cons->index >= 0 && cons->index == uport->line) ||
+ of_console_check(uport->dev->of_node, SPRD_TTY_NAME, uport->line))
return true;
return false;
@@ -1203,14 +1180,11 @@ static int sprd_probe(struct platform_device *pdev)
int index;
int ret;
- for (index = 0; index < ARRAY_SIZE(sprd_port); index++)
- if (sprd_port[index] == NULL)
- break;
-
- if (index == ARRAY_SIZE(sprd_port))
- return -EBUSY;
-
- index = sprd_probe_dt_alias(index, &pdev->dev);
+ index = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (index < 0 || index >= ARRAY_SIZE(sprd_port)) {
+ dev_err(&pdev->dev, "got a wrong serial alias id %d\n", index);
+ return -EINVAL;
+ }
sprd_port[index] = devm_kzalloc(&pdev->dev, sizeof(*sprd_port[index]),
GFP_KERNEL);
@@ -1262,10 +1236,8 @@ static int sprd_probe(struct platform_device *pdev)
sprd_ports_num++;
ret = uart_add_one_port(&sprd_uart_driver, up);
- if (ret) {
- sprd_port[index] = NULL;
+ if (ret)
sprd_remove(pdev);
- }
platform_set_drvdata(pdev, up);
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index a175c1094dc8..db8bf0d4982d 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics SA 2017
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index eafada8fb6fa..e35073e93a5b 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -567,6 +567,9 @@ static int hv_probe(struct platform_device *op)
sunserial_console_match(&sunhv_console, op->dev.of_node,
&sunhv_reg, port->line, false);
+ /* We need to initialize lock even for non-registered console */
+ spin_lock_init(&port->lock);
+
err = uart_add_one_port(&sunhv_reg, port);
if (err)
goto out_unregister_driver;
diff --git a/drivers/tty/serial/timbuart.h b/drivers/tty/serial/timbuart.h
index fb00b172117d..007e59af636d 100644
--- a/drivers/tty/serial/timbuart.h
+++ b/drivers/tty/serial/timbuart.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* timbuart.c timberdale FPGA GPIO driver
* Copyright (c) 2009 Intel Corporation
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 98db9dc168ff..35e9e8faf8de 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -26,13 +26,15 @@
#define CDNS_UART_TTY_NAME "ttyPS"
#define CDNS_UART_NAME "xuartps"
+#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */
+#define CDNS_UART_MINOR 0 /* works best with devtmpfs */
+#define CDNS_UART_NR_PORTS 16
#define CDNS_UART_FIFO_SIZE 64 /* FIFO size */
#define CDNS_UART_REGISTER_SPACE 0x1000
#define TX_TIMEOUT 500000
/* Rx Trigger level */
static int rx_trigger_level = 56;
-static int uartps_major;
module_param(rx_trigger_level, uint, 0444);
MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
@@ -188,7 +190,6 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
* @pclk: APB clock
* @cdns_uart_driver: Pointer to UART driver
* @baud: Current baud rate
- * @id: Port ID
* @clk_rate_change_nb: Notifier block for clock changes
* @quirks: Flags for RXBS support.
*/
@@ -198,7 +199,6 @@ struct cdns_uart {
struct clk *pclk;
struct uart_driver *cdns_uart_driver;
unsigned int baud;
- int id;
struct notifier_block clk_rate_change_nb;
u32 quirks;
bool cts_override;
@@ -650,8 +650,8 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
unsigned int status;
status = readl(port->membase + CDNS_UART_SR) &
- CDNS_UART_SR_TXEMPTY;
- return status ? TIOCSER_TEMT : 0;
+ (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE);
+ return (status == CDNS_UART_SR_TXEMPTY) ? TIOCSER_TEMT : 0;
}
/**
@@ -693,20 +693,8 @@ static void cdns_uart_set_termios(struct uart_port *port,
u32 cval = 0;
unsigned int baud, minbaud, maxbaud;
unsigned long flags;
- unsigned int ctrl_reg, mode_reg, val;
- int err;
-
- /* Wait for the transmit FIFO to empty before making changes */
- if (!(readl(port->membase + CDNS_UART_CR) &
- CDNS_UART_CR_TX_DIS)) {
- err = readl_poll_timeout(port->membase + CDNS_UART_SR,
- val, (val & CDNS_UART_SR_TXEMPTY),
- 1000, TX_TIMEOUT);
- if (err) {
- dev_err(port->dev, "timed out waiting for tx empty");
- return;
- }
- }
+ unsigned int ctrl_reg, mode_reg;
+
spin_lock_irqsave(&port->lock, flags);
/* Disable the TX and RX to set baud rate */
@@ -1145,6 +1133,8 @@ static const struct uart_ops cdns_uart_ops = {
#endif
};
+static struct uart_driver cdns_uart_uart_driver;
+
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
/**
* cdns_uart_console_putchar - write the character to the FIFO buffer
@@ -1284,6 +1274,16 @@ static int cdns_uart_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
+
+static struct console cdns_uart_console = {
+ .name = CDNS_UART_TTY_NAME,
+ .write = cdns_uart_console_write,
+ .device = uart_console_device,
+ .setup = cdns_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */
+ .data = &cdns_uart_uart_driver,
+};
#endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */
#ifdef CONFIG_PM_SLEEP
@@ -1415,89 +1415,8 @@ static const struct of_device_id cdns_uart_of_match[] = {
};
MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
-/*
- * Maximum number of instances without alias IDs but if there is alias
- * which target "< MAX_UART_INSTANCES" range this ID can't be used.
- */
-#define MAX_UART_INSTANCES 32
-
-/* Stores static aliases list */
-static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES);
-static int alias_bitmap_initialized;
-
-/* Stores actual bitmap of allocated IDs with alias IDs together */
-static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES);
-/* Protect bitmap operations to have unique IDs */
-static DEFINE_MUTEX(bitmap_lock);
-
-static int cdns_get_id(struct platform_device *pdev)
-{
- int id, ret;
-
- mutex_lock(&bitmap_lock);
-
- /* Alias list is stable that's why get alias bitmap only once */
- if (!alias_bitmap_initialized) {
- ret = of_alias_get_alias_list(cdns_uart_of_match, "serial",
- alias_bitmap, MAX_UART_INSTANCES);
- if (ret && ret != -EOVERFLOW) {
- mutex_unlock(&bitmap_lock);
- return ret;
- }
-
- alias_bitmap_initialized++;
- }
-
- /* Make sure that alias ID is not taken by instance without alias */
- bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES);
-
- dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n",
- MAX_UART_INSTANCES, bitmap);
-
- /* Look for a serialN alias */
- id = of_alias_get_id(pdev->dev.of_node, "serial");
- if (id < 0) {
- dev_warn(&pdev->dev,
- "No serial alias passed. Using the first free id\n");
-
- /*
- * Start with id 0 and check if there is no serial0 alias
- * which points to device which is compatible with this driver.
- * If alias exists then try next free position.
- */
- id = 0;
-
- for (;;) {
- dev_info(&pdev->dev, "Checking id %d\n", id);
- id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id);
-
- /* No free empty instance */
- if (id == MAX_UART_INSTANCES) {
- dev_err(&pdev->dev, "No free ID\n");
- mutex_unlock(&bitmap_lock);
- return -EINVAL;
- }
-
- dev_dbg(&pdev->dev, "The empty id is %d\n", id);
- /* Check if ID is empty */
- if (!test_and_set_bit(id, bitmap)) {
- /* Break the loop if bit is taken */
- dev_dbg(&pdev->dev,
- "Selected ID %d allocation passed\n",
- id);
- break;
- }
- dev_dbg(&pdev->dev,
- "Selected ID %d allocation failed\n", id);
- /* if taking bit fails then try next one */
- id++;
- }
- }
-
- mutex_unlock(&bitmap_lock);
-
- return id;
-}
+/* Temporary variable for storing number of instances */
+static int instances;
/**
* cdns_uart_probe - Platform driver probe
@@ -1507,16 +1426,11 @@ static int cdns_get_id(struct platform_device *pdev)
*/
static int cdns_uart_probe(struct platform_device *pdev)
{
- int rc, irq;
+ int rc, id, irq;
struct uart_port *port;
struct resource *res;
struct cdns_uart *cdns_uart_data;
const struct of_device_id *match;
- struct uart_driver *cdns_uart_uart_driver;
- char *driver_name;
-#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
- struct console *cdns_uart_console;
-#endif
cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
GFP_KERNEL);
@@ -1526,64 +1440,36 @@ static int cdns_uart_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- cdns_uart_uart_driver = devm_kzalloc(&pdev->dev,
- sizeof(*cdns_uart_uart_driver),
- GFP_KERNEL);
- if (!cdns_uart_uart_driver)
- return -ENOMEM;
-
- cdns_uart_data->id = cdns_get_id(pdev);
- if (cdns_uart_data->id < 0)
- return cdns_uart_data->id;
+ /* Look for a serialN alias */
+ id = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (id < 0)
+ id = 0;
- /* There is a need to use unique driver name */
- driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d",
- CDNS_UART_NAME, cdns_uart_data->id);
- if (!driver_name) {
- rc = -ENOMEM;
- goto err_out_id;
+ if (id >= CDNS_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Cannot get uart_port structure\n");
+ return -ENODEV;
}
- cdns_uart_uart_driver->owner = THIS_MODULE;
- cdns_uart_uart_driver->driver_name = driver_name;
- cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME;
- cdns_uart_uart_driver->major = uartps_major;
- cdns_uart_uart_driver->minor = cdns_uart_data->id;
- cdns_uart_uart_driver->nr = 1;
-
+ if (!cdns_uart_uart_driver.state) {
+ cdns_uart_uart_driver.owner = THIS_MODULE;
+ cdns_uart_uart_driver.driver_name = CDNS_UART_NAME;
+ cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME;
+ cdns_uart_uart_driver.major = CDNS_UART_MAJOR;
+ cdns_uart_uart_driver.minor = CDNS_UART_MINOR;
+ cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
- cdns_uart_console = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_console),
- GFP_KERNEL);
- if (!cdns_uart_console) {
- rc = -ENOMEM;
- goto err_out_id;
- }
-
- strncpy(cdns_uart_console->name, CDNS_UART_TTY_NAME,
- sizeof(cdns_uart_console->name));
- cdns_uart_console->index = cdns_uart_data->id;
- cdns_uart_console->write = cdns_uart_console_write;
- cdns_uart_console->device = uart_console_device;
- cdns_uart_console->setup = cdns_uart_console_setup;
- cdns_uart_console->flags = CON_PRINTBUFFER;
- cdns_uart_console->data = cdns_uart_uart_driver;
- cdns_uart_uart_driver->cons = cdns_uart_console;
+ cdns_uart_uart_driver.cons = &cdns_uart_console;
+ cdns_uart_console.index = id;
#endif
- rc = uart_register_driver(cdns_uart_uart_driver);
- if (rc < 0) {
- dev_err(&pdev->dev, "Failed to register driver\n");
- goto err_out_id;
+ rc = uart_register_driver(&cdns_uart_uart_driver);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to register driver\n");
+ return rc;
+ }
}
- cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
-
- /*
- * Setting up proper name_base needs to be done after uart
- * registration because tty_driver structure is not filled.
- * name_base is 0 by default.
- */
- cdns_uart_uart_driver->tty_driver->name_base = cdns_uart_data->id;
+ cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver;
match = of_match_node(cdns_uart_of_match, pdev->dev.of_node);
if (match && match->data) {
@@ -1661,6 +1547,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->ops = &cdns_uart_ops;
port->fifosize = CDNS_UART_FIFO_SIZE;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE);
+ port->line = id;
/*
* Register the port.
@@ -1692,7 +1579,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
console_port = port;
#endif
- rc = uart_add_one_port(cdns_uart_uart_driver, port);
+ rc = uart_add_one_port(&cdns_uart_uart_driver, port);
if (rc) {
dev_err(&pdev->dev,
"uart_add_one_port() failed; err=%i\n", rc);
@@ -1702,13 +1589,15 @@ static int cdns_uart_probe(struct platform_device *pdev)
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
/* This is not port which is used for console that's why clean it up */
if (console_port == port &&
- !(cdns_uart_uart_driver->cons->flags & CON_ENABLED))
+ !(cdns_uart_uart_driver.cons->flags & CON_ENABLED))
console_port = NULL;
#endif
- uartps_major = cdns_uart_uart_driver->tty_driver->major;
cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
"cts-override");
+
+ instances++;
+
return 0;
err_out_pm_disable:
@@ -1724,12 +1613,8 @@ err_out_clk_disable:
err_out_clk_dis_pclk:
clk_disable_unprepare(cdns_uart_data->pclk);
err_out_unregister_driver:
- uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
-err_out_id:
- mutex_lock(&bitmap_lock);
- if (cdns_uart_data->id < MAX_UART_INSTANCES)
- clear_bit(cdns_uart_data->id, bitmap);
- mutex_unlock(&bitmap_lock);
+ if (!instances)
+ uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
return rc;
}
@@ -1752,10 +1637,6 @@ static int cdns_uart_remove(struct platform_device *pdev)
#endif
rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port);
port->mapbase = 0;
- mutex_lock(&bitmap_lock);
- if (cdns_uart_data->id < MAX_UART_INSTANCES)
- clear_bit(cdns_uart_data->id, bitmap);
- mutex_unlock(&bitmap_lock);
clk_disable_unprepare(cdns_uart_data->uartclk);
clk_disable_unprepare(cdns_uart_data->pclk);
pm_runtime_disable(&pdev->dev);
@@ -1768,13 +1649,8 @@ static int cdns_uart_remove(struct platform_device *pdev)
console_port = NULL;
#endif
- /* If this is last instance major number should be initialized */
- mutex_lock(&bitmap_lock);
- if (bitmap_empty(bitmap, MAX_UART_INSTANCES))
- uartps_major = 0;
- mutex_unlock(&bitmap_lock);
-
- uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
+ if (!--instances)
+ uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
return rc;
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index f724962a5906..0dc3878794fd 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -63,6 +63,19 @@ static bool sysrq_on(void)
return sysrq_enabled || sysrq_always_enabled;
}
+/**
+ * sysrq_mask - Getter for sysrq_enabled mask.
+ *
+ * Return: 1 if sysrq is always enabled, enabled sysrq_key_op mask otherwise.
+ */
+int sysrq_mask(void)
+{
+ if (sysrq_always_enabled)
+ return 1;
+ return sysrq_enabled;
+}
+EXPORT_SYMBOL_GPL(sysrq_mask);
+
/*
* A value of 1 means 'all', other nonzero values are an op mask:
*/
@@ -1046,6 +1059,7 @@ int sysrq_toggle_support(int enable_mask)
return 0;
}
+EXPORT_SYMBOL_GPL(sysrq_toggle_support);
static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
struct sysrq_key_op *remove_op_p)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index d7d2e4b844bc..d54a549c5892 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -35,18 +35,18 @@
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
#define isspace(c) ((c) == ' ')
-extern void poke_blanked_console(void);
-
/* FIXME: all this needs locking */
-/* Variables for selection control. */
-/* Use a dynamic buffer, instead of static (Dec 1994) */
-struct vc_data *sel_cons; /* must not be deallocated */
-static int use_unicode;
-static volatile int sel_start = -1; /* cleared by clear_selection */
-static int sel_end;
-static int sel_buffer_lth;
-static char *sel_buffer;
-static DEFINE_MUTEX(sel_lock);
+static struct vc_selection {
+ struct mutex lock;
+ struct vc_data *cons; /* must not be deallocated */
+ char *buffer;
+ unsigned int buf_len;
+ volatile int start; /* cleared by clear_selection */
+ int end;
+} vc_sel = {
+ .lock = __MUTEX_INITIALIZER(vc_sel.lock),
+ .start = -1,
+};
/* clear_selection, highlight and highlight_pointer can be called
from interrupt (via scrollback/front) */
@@ -54,22 +54,21 @@ static DEFINE_MUTEX(sel_lock);
/* set reverse video on characters s-e of console with selection. */
static inline void highlight(const int s, const int e)
{
- invert_screen(sel_cons, s, e-s+2, 1);
+ invert_screen(vc_sel.cons, s, e-s+2, 1);
}
/* use complementary color to show the pointer */
static inline void highlight_pointer(const int where)
{
- complement_pos(sel_cons, where);
+ complement_pos(vc_sel.cons, where);
}
static u32
-sel_pos(int n)
+sel_pos(int n, bool unicode)
{
- if (use_unicode)
- return screen_glyph_unicode(sel_cons, n / 2);
- return inverse_translate(sel_cons, screen_glyph(sel_cons, n),
- 0);
+ if (unicode)
+ return screen_glyph_unicode(vc_sel.cons, n / 2);
+ return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n), 0);
}
/**
@@ -81,13 +80,18 @@ sel_pos(int n)
void clear_selection(void)
{
highlight_pointer(-1); /* hide the pointer */
- if (sel_start != -1) {
- highlight(sel_start, sel_end);
- sel_start = -1;
+ if (vc_sel.start != -1) {
+ highlight(vc_sel.start, vc_sel.end);
+ vc_sel.start = -1;
}
}
EXPORT_SYMBOL_GPL(clear_selection);
+bool vc_is_sel(struct vc_data *vc)
+{
+ return vc == vc_sel.cons;
+}
+
/*
* User settable table: what characters are to be considered alphabetic?
* 128 bits. Locked by the console lock.
@@ -186,9 +190,10 @@ static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *
struct vc_data *vc = vc_cons[fg_console].d;
int new_sel_start, new_sel_end, spc;
char *bp, *obp;
- int i, ps, pe, multiplier;
+ int i, ps, pe;
u32 c;
- int mode, ret = 0;
+ int ret = 0;
+ bool unicode;
poke_blanked_console();
@@ -211,57 +216,51 @@ static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *
return 0;
}
- if (ps > pe) /* make sel_start <= sel_end */
+ if (ps > pe) /* make vc_sel.start <= vc_sel.end */
swap(ps, pe);
- if (sel_cons != vc_cons[fg_console].d) {
+ if (vc_sel.cons != vc_cons[fg_console].d) {
clear_selection();
- sel_cons = vc_cons[fg_console].d;
+ vc_sel.cons = vc_cons[fg_console].d;
}
- mode = vt_do_kdgkbmode(fg_console);
- if (mode == K_UNICODE)
- use_unicode = 1;
- else
- use_unicode = 0;
-
- switch (v->sel_mode)
- {
- case TIOCL_SELCHAR: /* character-by-character selection */
+ unicode = vt_do_kdgkbmode(fg_console) == K_UNICODE;
+
+ switch (v->sel_mode) {
+ case TIOCL_SELCHAR: /* character-by-character selection */
+ new_sel_start = ps;
+ new_sel_end = pe;
+ break;
+ case TIOCL_SELWORD: /* word-by-word selection */
+ spc = isspace(sel_pos(ps, unicode));
+ for (new_sel_start = ps; ; ps -= 2) {
+ if ((spc && !isspace(sel_pos(ps, unicode))) ||
+ (!spc && !inword(sel_pos(ps, unicode))))
+ break;
new_sel_start = ps;
+ if (!(ps % vc->vc_size_row))
+ break;
+ }
+
+ spc = isspace(sel_pos(pe, unicode));
+ for (new_sel_end = pe; ; pe += 2) {
+ if ((spc && !isspace(sel_pos(pe, unicode))) ||
+ (!spc && !inword(sel_pos(pe, unicode))))
+ break;
new_sel_end = pe;
- break;
- case TIOCL_SELWORD: /* word-by-word selection */
- spc = isspace(sel_pos(ps));
- for (new_sel_start = ps; ; ps -= 2)
- {
- if ((spc && !isspace(sel_pos(ps))) ||
- (!spc && !inword(sel_pos(ps))))
- break;
- new_sel_start = ps;
- if (!(ps % vc->vc_size_row))
- break;
- }
- spc = isspace(sel_pos(pe));
- for (new_sel_end = pe; ; pe += 2)
- {
- if ((spc && !isspace(sel_pos(pe))) ||
- (!spc && !inword(sel_pos(pe))))
- break;
- new_sel_end = pe;
- if (!((pe + 2) % vc->vc_size_row))
- break;
- }
- break;
- case TIOCL_SELLINE: /* line-by-line selection */
- new_sel_start = ps - ps % vc->vc_size_row;
- new_sel_end = pe + vc->vc_size_row
- - pe % vc->vc_size_row - 2;
- break;
- case TIOCL_SELPOINTER:
- highlight_pointer(pe);
- return 0;
- default:
- return -EINVAL;
+ if (!((pe + 2) % vc->vc_size_row))
+ break;
+ }
+ break;
+ case TIOCL_SELLINE: /* line-by-line selection */
+ new_sel_start = rounddown(ps, vc->vc_size_row);
+ new_sel_end = rounddown(pe, vc->vc_size_row) +
+ vc->vc_size_row - 2;
+ break;
+ case TIOCL_SELPOINTER:
+ highlight_pointer(pe);
+ return 0;
+ default:
+ return -EINVAL;
}
/* remove the pointer */
@@ -270,56 +269,56 @@ static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *
/* select to end of line if on trailing space */
if (new_sel_end > new_sel_start &&
!atedge(new_sel_end, vc->vc_size_row) &&
- isspace(sel_pos(new_sel_end))) {
+ isspace(sel_pos(new_sel_end, unicode))) {
for (pe = new_sel_end + 2; ; pe += 2)
- if (!isspace(sel_pos(pe)) ||
+ if (!isspace(sel_pos(pe, unicode)) ||
atedge(pe, vc->vc_size_row))
break;
- if (isspace(sel_pos(pe)))
+ if (isspace(sel_pos(pe, unicode)))
new_sel_end = pe;
}
- if (sel_start == -1) /* no current selection */
+ if (vc_sel.start == -1) /* no current selection */
highlight(new_sel_start, new_sel_end);
- else if (new_sel_start == sel_start)
+ else if (new_sel_start == vc_sel.start)
{
- if (new_sel_end == sel_end) /* no action required */
+ if (new_sel_end == vc_sel.end) /* no action required */
return 0;
- else if (new_sel_end > sel_end) /* extend to right */
- highlight(sel_end + 2, new_sel_end);
+ else if (new_sel_end > vc_sel.end) /* extend to right */
+ highlight(vc_sel.end + 2, new_sel_end);
else /* contract from right */
- highlight(new_sel_end + 2, sel_end);
+ highlight(new_sel_end + 2, vc_sel.end);
}
- else if (new_sel_end == sel_end)
+ else if (new_sel_end == vc_sel.end)
{
- if (new_sel_start < sel_start) /* extend to left */
- highlight(new_sel_start, sel_start - 2);
+ if (new_sel_start < vc_sel.start) /* extend to left */
+ highlight(new_sel_start, vc_sel.start - 2);
else /* contract from left */
- highlight(sel_start, new_sel_start - 2);
+ highlight(vc_sel.start, new_sel_start - 2);
}
else /* some other case; start selection from scratch */
{
clear_selection();
highlight(new_sel_start, new_sel_end);
}
- sel_start = new_sel_start;
- sel_end = new_sel_end;
+ vc_sel.start = new_sel_start;
+ vc_sel.end = new_sel_end;
/* Allocate a new buffer before freeing the old one ... */
- multiplier = use_unicode ? 4 : 1; /* chars can take up to 4 bytes */
- bp = kmalloc_array((sel_end - sel_start) / 2 + 1, multiplier,
+ /* chars can take up to 4 bytes with unicode */
+ bp = kmalloc_array((vc_sel.end - vc_sel.start) / 2 + 1, unicode ? 4 : 1,
GFP_KERNEL);
if (!bp) {
printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
return -ENOMEM;
}
- kfree(sel_buffer);
- sel_buffer = bp;
+ kfree(vc_sel.buffer);
+ vc_sel.buffer = bp;
obp = bp;
- for (i = sel_start; i <= sel_end; i += 2) {
- c = sel_pos(i);
- if (use_unicode)
+ for (i = vc_sel.start; i <= vc_sel.end; i += 2) {
+ c = sel_pos(i, unicode);
+ if (unicode)
bp += store_utf8(c, bp);
else
*bp++ = c;
@@ -335,7 +334,7 @@ static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *
obp = bp;
}
}
- sel_buffer_lth = bp - sel_buffer;
+ vc_sel.buf_len = bp - vc_sel.buffer;
return ret;
}
@@ -344,11 +343,11 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
{
int ret;
- mutex_lock(&sel_lock);
+ mutex_lock(&vc_sel.lock);
console_lock();
ret = __set_selection_kernel(v, tty);
console_unlock();
- mutex_unlock(&sel_lock);
+ mutex_unlock(&vc_sel.lock);
return ret;
}
@@ -380,26 +379,26 @@ int paste_selection(struct tty_struct *tty)
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
- mutex_lock(&sel_lock);
- while (sel_buffer && sel_buffer_lth > pasted) {
+ mutex_lock(&vc_sel.lock);
+ while (vc_sel.buffer && vc_sel.buf_len > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
if (tty_throttled(tty)) {
- mutex_unlock(&sel_lock);
+ mutex_unlock(&vc_sel.lock);
schedule();
- mutex_lock(&sel_lock);
+ mutex_lock(&vc_sel.lock);
continue;
}
__set_current_state(TASK_RUNNING);
- count = sel_buffer_lth - pasted;
- count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
+ count = vc_sel.buf_len - pasted;
+ count = tty_ldisc_receive_buf(ld, vc_sel.buffer + pasted, NULL,
count);
pasted += count;
}
- mutex_unlock(&sel_lock);
+ mutex_unlock(&vc_sel.lock);
remove_wait_queue(&vc->paste_wait, &wait);
__set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 15d27698054a..48a8199f7845 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -81,6 +81,7 @@
#include <linux/errno.h>
#include <linux/kd.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/console.h>
@@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = kmalloc(memsize, GFP_KERNEL);
+ p = vmalloc(memsize);
if (!p)
return NULL;
@@ -364,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
return uniscr;
}
+static void vc_uniscr_free(struct uni_screen *uniscr)
+{
+ vfree(uniscr);
+}
+
static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
{
- kfree(vc->vc_uni_screen);
+ vc_uniscr_free(vc->vc_uni_screen);
vc->vc_uni_screen = new_uniscr;
}
@@ -890,8 +896,9 @@ static void hide_softcursor(struct vc_data *vc)
static void hide_cursor(struct vc_data *vc)
{
- if (vc == sel_cons)
+ if (vc_is_sel(vc))
clear_selection();
+
vc->vc_sw->con_cursor(vc, CM_ERASE);
hide_softcursor(vc);
}
@@ -901,7 +908,7 @@ static void set_cursor(struct vc_data *vc)
if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS)
return;
if (vc->vc_deccm) {
- if (vc == sel_cons)
+ if (vc_is_sel(vc))
clear_selection();
add_softcursor(vc);
if ((vc->vc_cursor_type & 0x0f) != 1)
@@ -1074,6 +1081,17 @@ static void visual_deinit(struct vc_data *vc)
module_put(vc->vc_sw->owner);
}
+static void vc_port_destruct(struct tty_port *port)
+{
+ struct vc_data *vc = container_of(port, struct vc_data, port);
+
+ kfree(vc);
+}
+
+static const struct tty_port_operations vc_port_ops = {
+ .destruct = vc_port_destruct,
+};
+
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
struct vt_notifier_param param;
@@ -1099,6 +1117,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
vc_cons[currcons].d = vc;
tty_port_init(&vc->port);
+ vc->port.ops = &vc_port_ops;
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
@@ -1193,7 +1212,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
- if (new_screen_size > (4 << 20))
+ if (new_screen_size > KMALLOC_MAX_SIZE)
return -EINVAL;
newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
@@ -1207,7 +1226,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
}
}
- if (vc == sel_cons)
+ if (vc_is_sel(vc))
clear_selection();
old_rows = vc->vc_rows;
@@ -1216,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
err = resize_screen(vc, new_cols, new_rows, user);
if (err) {
kfree(newscreen);
- kfree(new_uniscr);
+ vc_uniscr_free(new_uniscr);
return err;
}
@@ -1901,67 +1920,65 @@ static void set_mode(struct vc_data *vc, int on_off)
/* console_lock is held */
static void setterm_command(struct vc_data *vc)
{
- switch(vc->vc_par[0]) {
- case 1: /* set color for underline mode */
- if (vc->vc_can_do_color &&
- vc->vc_par[1] < 16) {
- vc->vc_ulcolor = color_table[vc->vc_par[1]];
- if (vc->vc_underline)
- update_attr(vc);
- }
- break;
- case 2: /* set color for half intensity mode */
- if (vc->vc_can_do_color &&
- vc->vc_par[1] < 16) {
- vc->vc_halfcolor = color_table[vc->vc_par[1]];
- if (vc->vc_intensity == 0)
- update_attr(vc);
- }
- break;
- case 8: /* store colors as defaults */
- vc->vc_def_color = vc->vc_attr;
- if (vc->vc_hi_font_mask == 0x100)
- vc->vc_def_color >>= 1;
- default_attr(vc);
- update_attr(vc);
- break;
- case 9: /* set blanking interval */
- blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60;
- poke_blanked_console();
- break;
- case 10: /* set bell frequency in Hz */
- if (vc->vc_npar >= 1)
- vc->vc_bell_pitch = vc->vc_par[1];
- else
- vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
- break;
- case 11: /* set bell duration in msec */
- if (vc->vc_npar >= 1)
- vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
- msecs_to_jiffies(vc->vc_par[1]) : 0;
- else
- vc->vc_bell_duration = DEFAULT_BELL_DURATION;
- break;
- case 12: /* bring specified console to the front */
- if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1))
- set_console(vc->vc_par[1] - 1);
- break;
- case 13: /* unblank the screen */
- poke_blanked_console();
- break;
- case 14: /* set vesa powerdown interval */
- vesa_off_interval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60 * HZ;
- break;
- case 15: /* activate the previous console */
- set_console(last_console);
- break;
- case 16: /* set cursor blink duration in msec */
- if (vc->vc_npar >= 1 && vc->vc_par[1] >= 50 &&
- vc->vc_par[1] <= USHRT_MAX)
- vc->vc_cur_blink_ms = vc->vc_par[1];
- else
- vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
- break;
+ switch (vc->vc_par[0]) {
+ case 1: /* set color for underline mode */
+ if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
+ vc->vc_ulcolor = color_table[vc->vc_par[1]];
+ if (vc->vc_underline)
+ update_attr(vc);
+ }
+ break;
+ case 2: /* set color for half intensity mode */
+ if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
+ vc->vc_halfcolor = color_table[vc->vc_par[1]];
+ if (vc->vc_intensity == 0)
+ update_attr(vc);
+ }
+ break;
+ case 8: /* store colors as defaults */
+ vc->vc_def_color = vc->vc_attr;
+ if (vc->vc_hi_font_mask == 0x100)
+ vc->vc_def_color >>= 1;
+ default_attr(vc);
+ update_attr(vc);
+ break;
+ case 9: /* set blanking interval */
+ blankinterval = min(vc->vc_par[1], 60U) * 60;
+ poke_blanked_console();
+ break;
+ case 10: /* set bell frequency in Hz */
+ if (vc->vc_npar >= 1)
+ vc->vc_bell_pitch = vc->vc_par[1];
+ else
+ vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
+ break;
+ case 11: /* set bell duration in msec */
+ if (vc->vc_npar >= 1)
+ vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
+ msecs_to_jiffies(vc->vc_par[1]) : 0;
+ else
+ vc->vc_bell_duration = DEFAULT_BELL_DURATION;
+ break;
+ case 12: /* bring specified console to the front */
+ if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1))
+ set_console(vc->vc_par[1] - 1);
+ break;
+ case 13: /* unblank the screen */
+ poke_blanked_console();
+ break;
+ case 14: /* set vesa powerdown interval */
+ vesa_off_interval = min(vc->vc_par[1], 60U) * 60 * HZ;
+ break;
+ case 15: /* activate the previous console */
+ set_console(last_console);
+ break;
+ case 16: /* set cursor blink duration in msec */
+ if (vc->vc_npar >= 1 && vc->vc_par[1] >= 50 &&
+ vc->vc_par[1] <= USHRT_MAX)
+ vc->vc_cur_blink_ms = vc->vc_par[1];
+ else
+ vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
+ break;
}
}
@@ -2576,8 +2593,6 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
if (in_interrupt())
return count;
- might_sleep();
-
console_lock();
vc = tty->driver_data;
if (vc == NULL) {
@@ -3253,6 +3268,7 @@ static int con_install(struct tty_driver *driver, struct tty_struct *tty)
tty->driver_data = vc;
vc->port.tty = tty;
+ tty_port_get(&vc->port);
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
@@ -3288,6 +3304,13 @@ static void con_shutdown(struct tty_struct *tty)
console_unlock();
}
+static void con_cleanup(struct tty_struct *tty)
+{
+ struct vc_data *vc = tty->driver_data;
+
+ tty_port_put(&vc->port);
+}
+
static int default_color = 7; /* white */
static int default_italic_color = 2; // green (ASCII)
static int default_underline_color = 3; // cyan (ASCII)
@@ -3413,7 +3436,8 @@ static const struct tty_operations con_ops = {
.throttle = con_throttle,
.unthrottle = con_unthrottle,
.resize = vt_resize,
- .shutdown = con_shutdown
+ .shutdown = con_shutdown,
+ .cleanup = con_cleanup,
};
static struct cdev vc0_cdev;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index ee6c91ef1f6c..daf61c28ba76 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -39,11 +39,32 @@
#include <linux/kbd_diacr.h>
#include <linux/selection.h>
-char vt_dont_switch;
-extern struct tty_driver *console_driver;
+bool vt_dont_switch;
-#define VT_IS_IN_USE(i) (console_driver->ttys[i] && console_driver->ttys[i]->count)
-#define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || vc_cons[i].d == sel_cons)
+static inline bool vt_in_use(unsigned int i)
+{
+ const struct vc_data *vc = vc_cons[i].d;
+
+ /*
+ * console_lock must be held to prevent the vc from being deallocated
+ * while we're checking whether it's in-use.
+ */
+ WARN_CONSOLE_UNLOCKED();
+
+ return vc && kref_read(&vc->port.kref) > 1;
+}
+
+static inline bool vt_busy(int i)
+{
+ if (vt_in_use(i))
+ return true;
+ if (i == fg_console)
+ return true;
+ if (vc_is_sel(vc_cons[i].d))
+ return true;
+
+ return false;
+}
/*
* Console (vt and kd) routines, as defined by USL SVR4 manual, and by
@@ -289,16 +310,14 @@ static int vt_disallocate(unsigned int vc_num)
int ret = 0;
console_lock();
- if (VT_BUSY(vc_num))
+ if (vt_busy(vc_num))
ret = -EBUSY;
else if (vc_num)
vc = vc_deallocate(vc_num);
console_unlock();
- if (vc && vc_num >= MIN_NR_CONSOLES) {
- tty_port_destroy(&vc->port);
- kfree(vc);
- }
+ if (vc && vc_num >= MIN_NR_CONSOLES)
+ tty_port_put(&vc->port);
return ret;
}
@@ -311,17 +330,15 @@ static void vt_disallocate_all(void)
console_lock();
for (i = 1; i < MAX_NR_CONSOLES; i++)
- if (!VT_BUSY(i))
+ if (!vt_busy(i))
vc[i] = vc_deallocate(i);
else
vc[i] = NULL;
console_unlock();
for (i = 1; i < MAX_NR_CONSOLES; i++) {
- if (vc[i] && i >= MIN_NR_CONSOLES) {
- tty_port_destroy(&vc[i]->port);
- kfree(vc[i]);
- }
+ if (vc[i] && i >= MIN_NR_CONSOLES)
+ tty_port_put(&vc[i]->port);
}
}
@@ -335,22 +352,13 @@ int vt_ioctl(struct tty_struct *tty,
{
struct vc_data *vc = tty->driver_data;
struct console_font_op op; /* used in multiple places here */
- unsigned int console;
+ unsigned int console = vc->vc_num;
unsigned char ucval;
unsigned int uival;
void __user *up = (void __user *)arg;
int i, perm;
int ret = 0;
- console = vc->vc_num;
-
-
- if (!vc_cons_allocated(console)) { /* impossible? */
- ret = -ENOIOCTLCMD;
- goto out;
- }
-
-
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
@@ -641,15 +649,16 @@ int vt_ioctl(struct tty_struct *tty,
struct vt_stat __user *vtstat = up;
unsigned short state, mask;
- /* Review: FIXME: Console lock ? */
if (put_user(fg_console + 1, &vtstat->v_active))
ret = -EFAULT;
else {
state = 1; /* /dev/tty0 is always open */
+ console_lock(); /* required by vt_in_use() */
for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask;
++i, mask <<= 1)
- if (VT_IS_IN_USE(i))
+ if (vt_in_use(i))
state |= mask;
+ console_unlock();
ret = put_user(state, &vtstat->v_state);
}
break;
@@ -659,10 +668,11 @@ int vt_ioctl(struct tty_struct *tty,
* Returns the first available (non-opened) console.
*/
case VT_OPENQRY:
- /* FIXME: locking ? - but then this is a stupid API */
+ console_lock(); /* required by vt_in_use() */
for (i = 0; i < MAX_NR_CONSOLES; ++i)
- if (! VT_IS_IN_USE(i))
+ if (!vt_in_use(i))
break;
+ console_unlock();
uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
goto setint;
@@ -1011,12 +1021,12 @@ int vt_ioctl(struct tty_struct *tty,
case VT_LOCKSWITCH:
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
- vt_dont_switch = 1;
+ vt_dont_switch = true;
break;
case VT_UNLOCKSWITCH:
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
- vt_dont_switch = 0;
+ vt_dont_switch = false;
break;
case VT_GETHIFONTMASK:
ret = put_user(vc->vc_hi_font_mask,
@@ -1180,14 +1190,9 @@ long vt_compat_ioctl(struct tty_struct *tty,
{
struct vc_data *vc = tty->driver_data;
struct console_font_op op; /* used in multiple places here */
- unsigned int console = vc->vc_num;
void __user *up = compat_ptr(arg);
int perm;
-
- if (!vc_cons_allocated(console)) /* impossible? */
- return -ENOIOCTLCMD;
-
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a57698985f9c..6e725c6c6256 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -996,6 +996,44 @@ err_device_create:
}
EXPORT_SYMBOL_GPL(__uio_register_device);
+static void devm_uio_unregister_device(struct device *dev, void *res)
+{
+ uio_unregister_device(*(struct uio_info **)res);
+}
+
+/**
+ * devm_uio_register_device - Resource managed uio_register_device()
+ * @owner: module that creates the new device
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
+int __devm_uio_register_device(struct module *owner,
+ struct device *parent,
+ struct uio_info *info)
+{
+ struct uio_info **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = info;
+ ret = __uio_register_device(owner, parent, info);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ devres_add(parent, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__devm_uio_register_device);
+
/**
* uio_unregister_device - unregister a industrial IO device
* @info: UIO device capabilities
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index fc25ce90da3b..ae319ef3a832 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -99,6 +99,13 @@ static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
return 0;
}
+static void uio_pdrv_genirq_cleanup(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_disable(dev);
+}
+
static int uio_pdrv_genirq_probe(struct platform_device *pdev)
{
struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
@@ -213,28 +220,16 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
*/
pm_runtime_enable(&pdev->dev);
- ret = uio_register_device(&pdev->dev, priv->uioinfo);
- if (ret) {
- dev_err(&pdev->dev, "unable to register uio device\n");
- pm_runtime_disable(&pdev->dev);
+ ret = devm_add_action_or_reset(&pdev->dev, uio_pdrv_genirq_cleanup,
+ &pdev->dev);
+ if (ret)
return ret;
- }
-
- platform_set_drvdata(pdev, priv);
- return 0;
-}
-
-static int uio_pdrv_genirq_remove(struct platform_device *pdev)
-{
- struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev);
- uio_unregister_device(priv->uioinfo);
- pm_runtime_disable(&pdev->dev);
-
- priv->uioinfo->handler = NULL;
- priv->uioinfo->irqcontrol = NULL;
+ ret = devm_uio_register_device(&pdev->dev, priv->uioinfo);
+ if (ret)
+ dev_err(&pdev->dev, "unable to register uio device\n");
- return 0;
+ return ret;
}
static int uio_pdrv_genirq_runtime_nop(struct device *dev)
@@ -271,7 +266,6 @@ MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
static struct platform_driver uio_pdrv_genirq = {
.probe = uio_pdrv_genirq_probe,
- .remove = uio_pdrv_genirq_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &uio_pdrv_genirq_dev_pm_ops,
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 635cf0466b59..e9fed9a88737 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -350,7 +350,7 @@ struct l1_code {
u8 string_header[E4_L1_STRING_HEADER];
u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
- u8 code[0];
+ u8 code[];
} __packed;
/* structures describing a block within a DSP page */
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index d3bdc4cc47aa..d96658e2e209 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/******************************************************************************
* usbatm.h - Generic USB xDSL driver core
*
@@ -164,7 +164,7 @@ struct usbatm_data {
unsigned char *cell_buf; /* holds partial rx cell */
unsigned int buf_usage;
- struct urb *urbs[0];
+ struct urb *urbs[];
};
static inline void *to_usbatm_driver_data(struct usb_interface *intf)
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index 3b181d4c7a03..6b6b04a3fe0f 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* c67x00-hcd.h: Cypress C67X00 USB HCD
*
diff --git a/drivers/usb/c67x00/c67x00.h b/drivers/usb/c67x00/c67x00.h
index 7ce10928b037..a4456d0fffa9 100644
--- a/drivers/usb/c67x00/c67x00.h
+++ b/drivers/usb/c67x00/c67x00.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* c67x00.h: Cypress C67X00 USB register and field definitions
*
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index b0a29efe7d31..deeea618ba33 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -201,4 +201,4 @@ MODULE_DEVICE_TABLE(pci, cdns3_pci_ids);
MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Cadence USBSS PCI wrapperr");
+MODULE_DESCRIPTION("Cadence USBSS PCI wrapper");
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index c6a79ca15858..5685ba11480b 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -52,8 +52,8 @@ enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE,
struct cdns_ti {
struct device *dev;
void __iomem *usbss;
- int usb2_only:1;
- int vbus_divider:1;
+ unsigned usb2_only:1;
+ unsigned vbus_divider:1;
struct clk *usb2_refclk;
struct clk *lpm_clk;
};
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index c2123ef8d8a3..4aafba20f450 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -330,9 +330,9 @@ exit:
*
* Returns role
*/
-static enum usb_role cdns3_role_get(struct device *dev)
+static enum usb_role cdns3_role_get(struct usb_role_switch *sw)
{
- struct cdns3 *cdns = dev_get_drvdata(dev);
+ struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
return cdns->role;
}
@@ -346,9 +346,9 @@ static enum usb_role cdns3_role_get(struct device *dev)
* - Role switch for dual-role devices
* - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices
*/
-static int cdns3_role_set(struct device *dev, enum usb_role role)
+static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
{
- struct cdns3 *cdns = dev_get_drvdata(dev);
+ struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
int ret = 0;
pm_runtime_get_sync(cdns->dev);
@@ -423,12 +423,6 @@ pm_put:
return ret;
}
-static const struct usb_role_switch_desc cdns3_switch_desc = {
- .set = cdns3_role_set,
- .get = cdns3_role_get,
- .allow_userspace_control = true,
-};
-
/**
* cdns3_probe - probe for cdns3 core device
* @pdev: Pointer to cdns3 core platform device
@@ -437,6 +431,7 @@ static const struct usb_role_switch_desc cdns3_switch_desc = {
*/
static int cdns3_probe(struct platform_device *pdev)
{
+ struct usb_role_switch_desc sw_desc = { };
struct device *dev = &pdev->dev;
struct resource *res;
struct cdns3 *cdns;
@@ -529,7 +524,12 @@ static int cdns3_probe(struct platform_device *pdev)
if (ret)
goto err3;
- cdns->role_sw = usb_role_switch_register(dev, &cdns3_switch_desc);
+ sw_desc.set = cdns3_role_set;
+ sw_desc.get = cdns3_role_get;
+ sw_desc.allow_userspace_control = true;
+ sw_desc.driver_data = cdns;
+
+ cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
if (IS_ERR(cdns->role_sw)) {
ret = PTR_ERR(cdns->role_sw);
dev_warn(dev, "Unable to register Role Switch\n");
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 3574dbb09366..4d43f3b28309 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -82,7 +82,7 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
* @ptr: address of device controller register to be read and changed
* @mask: bits requested to clar
*/
-void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
+static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) & ~mask;
writel(mask, ptr);
@@ -137,7 +137,7 @@ struct usb_request *cdns3_next_request(struct list_head *list)
*
* Returns buffer or NULL if no buffers in list
*/
-struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
+static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
{
return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
}
@@ -148,7 +148,7 @@ struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
*
* Returns request or NULL if no requests in list
*/
-struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
+static struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
{
return list_first_entry_or_null(list, struct cdns3_request, list);
}
@@ -190,7 +190,7 @@ dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
return priv_ep->trb_pool_dma + offset;
}
-int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
+static int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
{
switch (priv_ep->type) {
case USB_ENDPOINT_XFER_ISOC:
@@ -345,7 +345,7 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
}
-void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
+static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
{
struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
int current_trb = priv_req->start_trb;
@@ -511,7 +511,7 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
}
}
-struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
+static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req)
{
@@ -551,7 +551,7 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
return &priv_req->request;
}
-int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
+static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req)
{
@@ -836,7 +836,7 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
}
-void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
+static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
{
/* Work around for stale data address in TRB*/
if (priv_ep->wa1_set) {
@@ -1380,7 +1380,7 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
- struct cdns3_trb *trb = priv_req->trb;
+ struct cdns3_trb *trb;
int current_index = 0;
int handled = 0;
int doorbell;
@@ -1904,7 +1904,7 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
return 0;
}
-void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
@@ -1925,7 +1925,7 @@ void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
}
-void cdns3_configure_dmult(struct cdns3_device *priv_dev,
+static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
@@ -2548,7 +2548,7 @@ found:
link_trb = priv_req->trb;
/* Update ring only if removed request is on pending_req_list list */
- if (req_on_hw_ring) {
+ if (req_on_hw_ring && link_trb) {
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
((priv_req->end_trb + 1) * TRB_SIZE));
link_trb->control = (link_trb->control & TRB_CYCLE) |
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
index f003a7801872..52765b098b9e 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/gadget.h
@@ -1199,7 +1199,7 @@ struct cdns3_aligned_buf {
void *buf;
dma_addr_t dma;
u32 size;
- int in_use:1;
+ unsigned in_use:1;
struct list_head list;
};
@@ -1308,8 +1308,8 @@ struct cdns3_device {
unsigned u2_allowed:1;
unsigned is_selfpowered:1;
unsigned setup_pending:1;
- int hw_configured_flag:1;
- int wake_up_flag:1;
+ unsigned hw_configured_flag:1;
+ unsigned wake_up_flag:1;
unsigned status_completion_no_call:1;
unsigned using_streams:1;
int out_mem_is_allocated;
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index 98da99510be7..b1540ce93264 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* bits.h - register bits of the ChipIdea USB IP core
*
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index d49d5e1235d0..644ecaef17ee 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ci.h - common structures, functions, and macros of the ChipIdea driver
*
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index d8e7eb2f97b9..a479af3ae31d 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -393,8 +393,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
}
if (pdata.flags & CI_HDRC_PMQOS)
- pm_qos_add_request(&data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&data->pm_qos_req, 0);
ret = imx_get_clks(dev);
if (ret)
@@ -478,7 +477,7 @@ disable_hsic_regulator:
/* don't overwrite original ret (cf. EPROBE_DEFER) */
regulator_disable(data->hsic_pad_regulator);
if (pdata.flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
+ cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
return ret;
}
@@ -499,7 +498,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
if (data->ci_pdev) {
imx_disable_unprepare_clks(&pdev->dev);
if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
+ cpu_latency_qos_remove_request(&data->pm_qos_req);
if (data->hsic_pad_regulator)
regulator_disable(data->hsic_pad_regulator);
}
@@ -527,7 +526,7 @@ static int __maybe_unused imx_controller_suspend(struct device *dev)
imx_disable_unprepare_clks(dev);
if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
+ cpu_latency_qos_remove_request(&data->pm_qos_req);
data->in_lpm = true;
@@ -547,8 +546,7 @@ static int __maybe_unused imx_controller_resume(struct device *dev)
}
if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_add_request(&data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&data->pm_qos_req, 0);
ret = imx_prepare_enable_clks(dev);
if (ret)
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index de2aac9a2868..c2051aeba13f 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2012 Freescale Semiconductor, Inc.
*/
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index af648ba6544d..46105457e1ca 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
- if (!IS_ERR(ci->platdata->vbus_extcon.edev)) {
+ if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
HS_PHY_SESS_VLD_CTRL_EN,
HS_PHY_SESS_VLD_CTRL_EN);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 52139c2a9924..ae0bdc036464 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -600,9 +600,9 @@ static int ci_cable_notifier(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
-static enum usb_role ci_usb_role_switch_get(struct device *dev)
+static enum usb_role ci_usb_role_switch_get(struct usb_role_switch *sw)
{
- struct ci_hdrc *ci = dev_get_drvdata(dev);
+ struct ci_hdrc *ci = usb_role_switch_get_drvdata(sw);
enum usb_role role;
unsigned long flags;
@@ -613,9 +613,10 @@ static enum usb_role ci_usb_role_switch_get(struct device *dev)
return role;
}
-static int ci_usb_role_switch_set(struct device *dev, enum usb_role role)
+static int ci_usb_role_switch_set(struct usb_role_switch *sw,
+ enum usb_role role)
{
- struct ci_hdrc *ci = dev_get_drvdata(dev);
+ struct ci_hdrc *ci = usb_role_switch_get_drvdata(sw);
struct ci_hdrc_cable *cable = NULL;
enum usb_role current_role = ci_role_to_usb_role(ci);
enum ci_role ci_role = usb_role_to_ci_role(role);
@@ -1118,6 +1119,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
if (ci_role_switch.fwnode) {
+ ci_role_switch.driver_data = ci;
ci->role_switch = usb_role_switch_register(dev,
&ci_role_switch);
if (IS_ERR(ci->role_switch)) {
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index fbfb02e05c97..be63924ea82e 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -170,6 +170,13 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
dev_dbg(ci->dev, "switching from %s to %s\n",
ci_role(ci)->name, ci->roles[role]->name);
+ if (ci->vbus_active && ci->role == CI_ROLE_GADGET)
+ /*
+ * vbus disconnect event is lost due to role
+ * switch occurs during system suspend.
+ */
+ usb_gadget_vbus_disconnect(&ci->gadget);
+
ci_role_stop(ci);
if (role == CI_ROLE_GADGET &&
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 4f8b8179ec96..5e7a6e571dd2 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2013-2014 Freescale Semiconductor, Inc.
*
diff --git a/drivers/usb/chipidea/otg_fsm.h b/drivers/usb/chipidea/otg_fsm.h
index 2b49d29bf2fb..1f5c5ae0e71e 100644
--- a/drivers/usb/chipidea/otg_fsm.h
+++ b/drivers/usb/chipidea/otg_fsm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 4c4ac30db498..921bcf14dc06 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1532,7 +1532,7 @@ static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
if (is_active) {
- pm_runtime_get_sync(&_gadget->dev);
+ pm_runtime_get_sync(ci->dev);
hw_device_reset(ci);
spin_lock_irq(&ci->lock);
if (ci->driver) {
@@ -1552,7 +1552,7 @@ static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
_gadget_stop_activity(&ci->gadget);
- pm_runtime_put_sync(&_gadget->dev);
+ pm_runtime_put_sync(ci->dev);
usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
}
}
@@ -1637,12 +1637,12 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
return 0;
- pm_runtime_get_sync(&ci->gadget.dev);
+ pm_runtime_get_sync(ci->dev);
if (is_on)
hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
else
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
- pm_runtime_put_sync(&ci->gadget.dev);
+ pm_runtime_put_sync(ci->dev);
return 0;
}
@@ -1840,7 +1840,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
CI_HDRC_CONTROLLER_STOPPED_EVENT);
_gadget_stop_activity(&ci->gadget);
spin_lock_irqsave(&ci->lock, flags);
- pm_runtime_put(&ci->gadget.dev);
+ pm_runtime_put(ci->dev);
}
spin_unlock_irqrestore(&ci->lock, flags);
@@ -1971,9 +1971,6 @@ static int udc_start(struct ci_hdrc *ci)
if (retval)
goto destroy_eps;
- pm_runtime_no_callbacks(&ci->gadget.dev);
- pm_runtime_enable(&ci->gadget.dev);
-
return retval;
destroy_eps:
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index e023735d94b7..ebb11b625bb8 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* udc.h - ChipIdea UDC structures
*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 47f09a6ce7bd..ded8d93834ca 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -412,9 +412,12 @@ static void acm_ctrl_irq(struct urb *urb)
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval && retval != -EPERM)
+ if (retval && retval != -EPERM && retval != -ENODEV)
dev_err(&acm->control->dev,
"%s - usb_submit_urb failed: %d\n", __func__, retval);
+ else
+ dev_vdbg(&acm->control->dev,
+ "control resubmission terminated %d\n", retval);
}
static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
@@ -430,6 +433,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
dev_err(&acm->data->dev,
"urb %d failed submission with %d\n",
index, res);
+ } else {
+ dev_vdbg(&acm->data->dev, "intended failure %d\n", res);
}
set_bit(index, &acm->read_urbs_free);
return res;
@@ -471,6 +476,7 @@ static void acm_read_bulk_callback(struct urb *urb)
int status = urb->status;
bool stopped = false;
bool stalled = false;
+ bool cooldown = false;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
@@ -497,6 +503,14 @@ static void acm_read_bulk_callback(struct urb *urb)
__func__, status);
stopped = true;
break;
+ case -EOVERFLOW:
+ case -EPROTO:
+ dev_dbg(&acm->data->dev,
+ "%s - cooling babbling device\n", __func__);
+ usb_mark_last_busy(acm->dev);
+ set_bit(rb->index, &acm->urbs_in_error_delay);
+ cooldown = true;
+ break;
default:
dev_dbg(&acm->data->dev,
"%s - nonzero urb status received: %d\n",
@@ -518,9 +532,11 @@ static void acm_read_bulk_callback(struct urb *urb)
*/
smp_mb__after_atomic();
- if (stopped || stalled) {
+ if (stopped || stalled || cooldown) {
if (stalled)
schedule_work(&acm->work);
+ else if (cooldown)
+ schedule_delayed_work(&acm->dwork, HZ / 2);
return;
}
@@ -557,14 +573,20 @@ static void acm_softint(struct work_struct *work)
struct acm *acm = container_of(work, struct acm, work);
if (test_bit(EVENT_RX_STALL, &acm->flags)) {
- if (!(usb_autopm_get_interface(acm->data))) {
+ smp_mb(); /* against acm_suspend() */
+ if (!acm->susp_count) {
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->read_urbs[i]);
usb_clear_halt(acm->dev, acm->in);
acm_submit_read_urbs(acm, GFP_KERNEL);
- usb_autopm_put_interface(acm->data);
+ clear_bit(EVENT_RX_STALL, &acm->flags);
}
- clear_bit(EVENT_RX_STALL, &acm->flags);
+ }
+
+ if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
+ for (i = 0; i < ACM_NR; i++)
+ if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
+ acm_submit_read_urb(acm, i, GFP_NOIO);
}
if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
@@ -923,16 +945,16 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
mutex_lock(&acm->port.mutex);
- if ((ss->close_delay != old_close_delay) ||
- (ss->closing_wait != old_closing_wait)) {
- if (!capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((ss->close_delay != old_close_delay) ||
+ (ss->closing_wait != old_closing_wait))
retval = -EPERM;
- else {
- acm->port.close_delay = close_delay;
- acm->port.closing_wait = closing_wait;
- }
- } else
- retval = -EOPNOTSUPP;
+ else
+ retval = -EOPNOTSUPP;
+ } else {
+ acm->port.close_delay = close_delay;
+ acm->port.closing_wait = closing_wait;
+ }
mutex_unlock(&acm->port.mutex);
return retval;
@@ -1333,6 +1355,7 @@ made_compressed_probe:
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
INIT_WORK(&acm->work, acm_softint);
+ INIT_DELAYED_WORK(&acm->dwork, acm_softint);
init_waitqueue_head(&acm->wioctl);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
@@ -1542,6 +1565,7 @@ static void acm_disconnect(struct usb_interface *intf)
acm_kill_urbs(acm);
cancel_work_sync(&acm->work);
+ cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1584,6 +1608,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
acm_kill_urbs(acm);
cancel_work_sync(&acm->work);
+ cancel_delayed_work_sync(&acm->dwork);
+ acm->urbs_in_error_delay = 0;
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca1c026382c2..cd5e9d8ab237 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -109,8 +109,11 @@ struct acm {
# define EVENT_TTY_WAKEUP 0
# define EVENT_RX_STALL 1
# define ACM_THROTTLED 2
+# define ACM_ERROR_DELAY 3
+ unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */
struct usb_cdc_line_coding line; /* bits, stop, parity */
- struct work_struct work; /* work queue entry for line discipline waking up */
+ struct work_struct work; /* work queue entry for various purposes*/
+ struct delayed_work dwork; /* for cool downs needed in error recovery */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
unsigned int ctrlout; /* output control lines (DTR, RTS) */
struct async_icount iocount; /* counters for control line changes */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6833c918abce..d93d94d7ff50 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
{
struct usb_memory *usbm = NULL;
struct usb_dev_state *ps = file->private_data;
+ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
size_t size = vma->vm_end - vma->vm_start;
void *mem;
unsigned long flags;
@@ -250,11 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
- if (remap_pfn_range(vma, vma->vm_start,
- virt_to_phys(usbm->mem) >> PAGE_SHIFT,
- size, vma->vm_page_prot) < 0) {
- dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
- return -EAGAIN;
+ if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(usbm->mem) >> PAGE_SHIFT,
+ size, vma->vm_page_prot) < 0) {
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+ return -EAGAIN;
+ }
+ } else {
+ if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
+ size)) {
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+ return -EAGAIN;
+ }
}
vma->vm_flags |= VM_IO;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 2b27d232d7a7..f81606c6a35b 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -261,9 +261,19 @@ static int usb_probe_device(struct device *dev)
*/
if (!udriver->supports_autosuspend)
error = usb_autoresume_device(udev);
+ if (error)
+ return error;
- if (!error)
- error = udriver->probe(udev);
+ if (udriver->generic_subclass)
+ error = usb_generic_driver_probe(udev);
+ if (error)
+ return error;
+
+ error = udriver->probe(udev);
+ if (error == -ENODEV && udriver != &usb_generic_driver) {
+ udev->use_generic_driver = 1;
+ return -EPROBE_DEFER;
+ }
return error;
}
@@ -273,7 +283,10 @@ static int usb_unbind_device(struct device *dev)
struct usb_device *udev = to_usb_device(dev);
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
- udriver->disconnect(udev);
+ if (udriver->disconnect)
+ udriver->disconnect(udev);
+ if (udriver->generic_subclass)
+ usb_generic_driver_disconnect(udev);
if (!udriver->supports_autosuspend)
usb_autosuspend_device(udev);
return 0;
@@ -790,17 +803,42 @@ const struct usb_device_id *usb_match_id(struct usb_interface *interface,
}
EXPORT_SYMBOL_GPL(usb_match_id);
+const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
+ const struct usb_device_id *id)
+{
+ if (!id)
+ return NULL;
+
+ for (; id->idVendor || id->idProduct ; id++) {
+ if (usb_match_device(udev, id))
+ return id;
+ }
+
+ return NULL;
+}
+
static int usb_device_match(struct device *dev, struct device_driver *drv)
{
/* devices and interfaces are handled separately */
if (is_usb_device(dev)) {
+ struct usb_device *udev;
+ struct usb_device_driver *udrv;
/* interface drivers never match devices */
if (!is_usb_device_driver(drv))
return 0;
- /* TODO: Add real matching code */
- return 1;
+ udev = to_usb_device(dev);
+ udrv = to_usb_device_driver(drv);
+
+ if (udrv->id_table &&
+ usb_device_match_id(udev, udrv->id_table) != NULL) {
+ return 1;
+ }
+
+ if (udrv->match)
+ return udrv->match(udev);
+ return 0;
} else if (is_usb_interface(dev)) {
struct usb_interface *intf;
@@ -1149,7 +1187,10 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
udev->do_remote_wakeup = 0;
udriver = &usb_generic_driver;
}
- status = udriver->suspend(udev, msg);
+ if (udriver->suspend)
+ status = udriver->suspend(udev, msg);
+ if (status == 0 && udriver->generic_subclass)
+ status = usb_generic_driver_suspend(udev, msg);
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
@@ -1181,7 +1222,10 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
udev->reset_resume = 1;
udriver = to_usb_device_driver(udev->dev.driver);
- status = udriver->resume(udev, msg);
+ if (udriver->generic_subclass)
+ status = usb_generic_driver_resume(udev, msg);
+ if (status == 0 && udriver->resume)
+ status = udriver->resume(udev, msg);
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 38f8b3e31762..4626227a6dd2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -195,7 +195,38 @@ int usb_choose_configuration(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_choose_configuration);
-static int generic_probe(struct usb_device *udev)
+static int __check_usb_generic(struct device_driver *drv, void *data)
+{
+ struct usb_device *udev = data;
+ struct usb_device_driver *udrv;
+
+ if (!is_usb_device_driver(drv))
+ return 0;
+ udrv = to_usb_device_driver(drv);
+ if (udrv == &usb_generic_driver)
+ return 0;
+ if (!udrv->id_table)
+ return 0;
+
+ return usb_device_match_id(udev, udrv->id_table) != NULL;
+}
+
+static bool usb_generic_driver_match(struct usb_device *udev)
+{
+ if (udev->use_generic_driver)
+ return true;
+
+ /*
+ * If any other driver wants the device, leave the device to this other
+ * driver.
+ */
+ if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_usb_generic))
+ return false;
+
+ return true;
+}
+
+int usb_generic_driver_probe(struct usb_device *udev)
{
int err, c;
@@ -222,7 +253,7 @@ static int generic_probe(struct usb_device *udev)
return 0;
}
-static void generic_disconnect(struct usb_device *udev)
+void usb_generic_driver_disconnect(struct usb_device *udev)
{
usb_notify_remove_device(udev);
@@ -234,7 +265,7 @@ static void generic_disconnect(struct usb_device *udev)
#ifdef CONFIG_PM
-static int generic_suspend(struct usb_device *udev, pm_message_t msg)
+int usb_generic_driver_suspend(struct usb_device *udev, pm_message_t msg)
{
int rc;
@@ -262,7 +293,7 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
return rc;
}
-static int generic_resume(struct usb_device *udev, pm_message_t msg)
+int usb_generic_driver_resume(struct usb_device *udev, pm_message_t msg)
{
int rc;
@@ -285,11 +316,12 @@ static int generic_resume(struct usb_device *udev, pm_message_t msg)
struct usb_device_driver usb_generic_driver = {
.name = "usb",
- .probe = generic_probe,
- .disconnect = generic_disconnect,
+ .match = usb_generic_driver_match,
+ .probe = usb_generic_driver_probe,
+ .disconnect = usb_generic_driver_disconnect,
#ifdef CONFIG_PM
- .suspend = generic_suspend,
- .resume = generic_resume,
+ .suspend = usb_generic_driver_suspend,
+ .resume = usb_generic_driver_resume,
#endif
.supports_autosuspend = 1,
};
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 54cd8ef795ec..fc748c731832 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -39,6 +39,7 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
+#define USB_PRODUCT_USB5534B 0x5534
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@@ -1223,6 +1224,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
+ /* Don't set the change_bits when the device
+ * was powered off.
+ */
+ if (test_bit(port1, hub->power_bits))
+ set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
@@ -2723,13 +2729,11 @@ static bool use_new_scheme(struct usb_device *udev, int retry,
{
int old_scheme_first_port =
port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
- int quick_enumeration = (udev->speed == USB_SPEED_HIGH);
if (udev->speed >= USB_SPEED_SUPER)
return false;
- return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first
- || quick_enumeration);
+ return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
}
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
@@ -3088,6 +3092,15 @@ static int check_port_resume_type(struct usb_device *udev,
if (portchange & USB_PORT_STAT_C_ENABLE)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
+
+ /*
+ * Whatever made this reset-resume necessary may have
+ * turned on the port1 bit in hub->change_bits. But after
+ * a successful reset-resume we want the bit to be clear;
+ * if it was on it would indicate that something happened
+ * following the reset-resume.
+ */
+ clear_bit(port1, hub->change_bits);
}
return status;
@@ -5609,8 +5622,11 @@ out_hdev_lock:
}
static const struct usb_device_id hub_id_table[] = {
- { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT
+ | USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_SMSC,
+ .idProduct = USB_PRODUCT_USB5534B,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 5adf489428aa..6197938dcc2d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -5,6 +5,7 @@
* Released under the GPLv2 only.
*/
+#include <linux/acpi.h>
#include <linux/pci.h> /* for scatterlist macros */
#include <linux/usb.h>
#include <linux/module.h>
@@ -588,12 +589,13 @@ void usb_sg_cancel(struct usb_sg_request *io)
int i, retval;
spin_lock_irqsave(&io->lock, flags);
- if (io->status) {
+ if (io->status || io->count == 0) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
+ io->count++; /* Keep the request alive until we're done */
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
@@ -607,6 +609,12 @@ void usb_sg_cancel(struct usb_sg_request *io)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
+
+ spin_lock_irqsave(&io->lock, flags);
+ io->count--;
+ if (!io->count)
+ complete(&io->complete);
+ spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
@@ -1136,11 +1144,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
if (usb_endpoint_out(epaddr)) {
ep = dev->ep_out[epnum];
- if (reset_hardware)
+ if (reset_hardware && epnum != 0)
dev->ep_out[epnum] = NULL;
} else {
ep = dev->ep_in[epnum];
- if (reset_hardware)
+ if (reset_hardware && epnum != 0)
dev->ep_in[epnum] = NULL;
}
if (ep) {
@@ -1941,6 +1949,7 @@ free_interfaces:
intf->dev.of_node = usb_of_get_interface_node(dev,
configuration, ifnum);
}
+ ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev));
intf->dev.driver = NULL;
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index da30b5664ff3..3e8efe759c3e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -430,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Corsair K70 RGB RAPDIFIRE */
+ { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
+
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index f19694e69f5c..9f4320b9d7fc 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -849,7 +849,7 @@ static struct attribute *dev_string_attrs[] = {
static umode_t dev_string_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct usb_device *udev = to_usb_device(dev);
if (a == &dev_attr_manufacturer.attr) {
@@ -883,7 +883,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct usb_device *udev = to_usb_device(dev);
size_t nleft = count;
size_t srclen, n;
@@ -1233,7 +1233,7 @@ static struct attribute *intf_assoc_attrs[] = {
static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct usb_interface *intf = to_usb_interface(dev);
if (intf->intf_assoc == NULL)
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 9043d7242d67..50b2fc7fcc0e 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -86,7 +86,7 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
{
enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *upc;
+ union acpi_object *upc = NULL;
acpi_status status;
/*
@@ -98,11 +98,12 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
* no connectable, the port would be not used.
*/
status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ goto out;
+
upc = buffer.pointer;
- if (!upc || (upc->type != ACPI_TYPE_PACKAGE)
- || upc->package.count != 4) {
+ if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4)
goto out;
- }
if (upc->package.elements[0].integer.value)
if (pld->user_visible)
@@ -186,7 +187,7 @@ usb_acpi_find_companion_for_port(struct usb_port *port_dev)
handle = adev->handle;
status = acpi_get_physical_device_location(handle, &pld);
- if (!ACPI_FAILURE(status) && pld) {
+ if (ACPI_SUCCESS(status) && pld) {
port_dev->location = USB_ACPI_LOCATION_VALID
| pld->group_token << 8 | pld->group_position;
port_dev->connect_type = usb_acpi_get_connect_type(handle, pld);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 3ad0ee57e859..64ed4023a8c8 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -50,6 +50,12 @@ extern void usb_release_bos_descriptor(struct usb_device *dev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
+extern int usb_generic_driver_probe(struct usb_device *udev);
+extern void usb_generic_driver_disconnect(struct usb_device *udev);
+extern int usb_generic_driver_suspend(struct usb_device *udev,
+ pm_message_t msg);
+extern int usb_generic_driver_resume(struct usb_device *udev,
+ pm_message_t msg);
static inline unsigned usb_get_max_power(struct usb_device *udev,
struct usb_host_config *c)
@@ -66,6 +72,8 @@ extern int usb_match_one_id_intf(struct usb_device *dev,
const struct usb_device_id *id);
extern int usb_match_device(struct usb_device *dev,
const struct usb_device_id *id);
+extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
+ const struct usb_device_id *id);
extern void usb_forced_unbind_intf(struct usb_interface *intf);
extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 968e03b89d04..99b0bdfe0012 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -411,6 +411,10 @@ enum dwc2_ep0_state {
* register.
* 0 - Deactivate the transceiver (default)
* 1 - Activate the transceiver
+ * @activate_stm_id_vb_detection: Activate external ID pin and Vbus level
+ * detection using GGPIO register.
+ * 0 - Deactivate the external level detection (default)
+ * 1 - Activate the external level detection
* @g_dma: Enables gadget dma usage (default: autodetect).
* @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
* @g_rx_fifo_size: The periodic rx fifo size for the device, in
@@ -481,6 +485,7 @@ struct dwc2_core_params {
bool service_interval;
u8 hird_threshold;
bool activate_stm_fs_transceiver;
+ bool activate_stm_id_vb_detection;
bool ipg_isoc_en;
u16 max_packet_count;
u32 max_transfer_size;
@@ -874,6 +879,8 @@ struct dwc2_hregs_backup {
* removed once all SoCs support usb transceiver.
* @supplies: Definition of USB power supplies
* @vbus_supply: Regulator supplying vbus.
+ * @usb33d: Optional 3.3v regulator used on some stm32 devices to
+ * supply ID and VBUS detection hardware.
* @lock: Spinlock that protects all the driver data structures
* @priv: Stores a pointer to the struct usb_hcd
* @queuing_high_bandwidth: True if multiple packets of a high-bandwidth
@@ -1061,6 +1068,7 @@ struct dwc2_hsotg {
struct dwc2_hsotg_plat *plat;
struct regulator_bulk_data supplies[DWC2_NUM_SUPPLIES];
struct regulator *vbus_supply;
+ struct regulator *usb33d;
spinlock_t lock;
void *priv;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 92ed32ec1607..12b98b466287 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1646,7 +1646,8 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
- status = 1 << USB_DEVICE_SELF_POWERED;
+ status = hsotg->gadget.is_selfpowered <<
+ USB_DEVICE_SELF_POWERED;
status |= hsotg->remote_wakeup_allowed <<
USB_DEVICE_REMOTE_WAKEUP;
reply = cpu_to_le16(status);
@@ -4528,6 +4529,26 @@ static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
}
/**
+ * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
+ * @gadget: The usb gadget state
+ * @is_selfpowered: Whether the device is self-powered
+ *
+ * Set if the device is self or bus powered.
+ */
+static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ gadget->is_selfpowered = !!is_selfpowered;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+}
+
+/**
* dwc2_hsotg_pullup - connect/disconnect the USB PHY
* @gadget: The usb gadget state
* @is_on: Current state of the USB PHY
@@ -4618,6 +4639,7 @@ static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
.get_frame = dwc2_hsotg_gadget_getframe,
+ .set_selfpowered = dwc2_hsotg_set_selfpowered,
.udc_start = dwc2_hsotg_udc_start,
.udc_stop = dwc2_hsotg_udc_stop,
.pullup = dwc2_hsotg_pullup,
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 8ca6d12a6f57..1224fa9df604 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -199,7 +199,7 @@ struct dwc2_hcd_urb {
u32 flags;
u16 interval;
struct dwc2_hcd_pipe_info pipe_info;
- struct dwc2_hcd_iso_packet_desc iso_descs[0];
+ struct dwc2_hcd_iso_packet_desc iso_descs[];
};
/* Phases for control transfers */
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 510e87ec0be8..c4027bbcedec 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -54,6 +54,12 @@
#define GOTGCTL_HSTSETHNPEN BIT(10)
#define GOTGCTL_HNPREQ BIT(9)
#define GOTGCTL_HSTNEGSCS BIT(8)
+#define GOTGCTL_BVALOVAL BIT(7)
+#define GOTGCTL_BVALOEN BIT(6)
+#define GOTGCTL_AVALOVAL BIT(5)
+#define GOTGCTL_AVALOEN BIT(4)
+#define GOTGCTL_VBVALOVAL BIT(3)
+#define GOTGCTL_VBVALOEN BIT(2)
#define GOTGCTL_SESREQ BIT(1)
#define GOTGCTL_SESREQSCS BIT(0)
@@ -227,6 +233,8 @@
#define GPVNDCTL HSOTG_REG(0x0034)
#define GGPIO HSOTG_REG(0x0038)
#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16)
+#define GGPIO_STM32_OTG_GCCFG_VBDEN BIT(21)
+#define GGPIO_STM32_OTG_GCCFG_IDEN BIT(22)
#define GUID HSOTG_REG(0x003c)
#define GSNPSID HSOTG_REG(0x0040)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 31e090ac9f1e..8ccc83f7eb3f 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -163,6 +163,35 @@ static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)
p->host_perio_tx_fifo_size = 256;
}
+static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->speed = DWC2_SPEED_PARAM_FULL;
+ p->host_rx_fifo_size = 128;
+ p->host_nperio_tx_fifo_size = 96;
+ p->host_perio_tx_fifo_size = 96;
+ p->max_packet_count = 256;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
+ p->i2c_enable = false;
+ p->activate_stm_fs_transceiver = true;
+ p->activate_stm_id_vb_detection = true;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+}
+
+static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->activate_stm_id_vb_detection = true;
+ p->host_rx_fifo_size = 440;
+ p->host_nperio_tx_fifo_size = 256;
+ p->host_perio_tx_fifo_size = 256;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+}
+
const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
{ .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
@@ -186,6 +215,10 @@ const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "st,stm32f4x9-hsotg" },
{ .compatible = "st,stm32f7-hsotg",
.data = dwc2_set_stm32f7_hsotg_params },
+ { .compatible = "st,stm32mp15-fsotg",
+ .data = dwc2_set_stm32mp15_fsotg_params },
+ { .compatible = "st,stm32mp15-hsotg",
+ .data = dwc2_set_stm32mp15_hsotg_params },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 3c6ce09a6db5..69972750e161 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -285,7 +285,9 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
ret = devm_regulator_bulk_get(hsotg->dev, ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
if (ret) {
- dev_err(hsotg->dev, "failed to request supplies: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(hsotg->dev, "failed to request supplies: %d\n",
+ ret);
return ret;
}
return 0;
@@ -312,6 +314,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
if (hsotg->gadget_enabled)
dwc2_hsotg_remove(hsotg);
+ if (hsotg->params.activate_stm_id_vb_detection)
+ regulator_disable(hsotg->usb33d);
+
if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
@@ -392,8 +397,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
return retval;
}
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- hsotg->regs = devm_ioremap_resource(&dev->dev, res);
+ hsotg->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res);
if (IS_ERR(hsotg->regs))
return PTR_ERR(hsotg->regs);
@@ -464,10 +468,35 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
goto error;
+ if (hsotg->params.activate_stm_id_vb_detection) {
+ u32 ggpio;
+
+ hsotg->usb33d = devm_regulator_get(hsotg->dev, "usb33d");
+ if (IS_ERR(hsotg->usb33d)) {
+ retval = PTR_ERR(hsotg->usb33d);
+ if (retval != -EPROBE_DEFER)
+ dev_err(hsotg->dev,
+ "failed to request usb33d supply: %d\n",
+ retval);
+ goto error;
+ }
+ retval = regulator_enable(hsotg->usb33d);
+ if (retval) {
+ dev_err(hsotg->dev,
+ "failed to enable usb33d supply: %d\n", retval);
+ goto error;
+ }
+
+ ggpio = dwc2_readl(hsotg, GGPIO);
+ ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
+ ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
+ dwc2_writel(hsotg, ggpio, GGPIO);
+ }
+
if (hsotg->dr_mode != USB_DR_MODE_HOST) {
retval = dwc2_gadget_init(hsotg);
if (retval)
- goto error;
+ goto error_init;
hsotg->gadget_enabled = 1;
}
@@ -493,7 +522,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval) {
if (hsotg->gadget_enabled)
dwc2_hsotg_remove(hsotg);
- goto error;
+ goto error_init;
}
hsotg->hcd_enabled = 1;
}
@@ -509,6 +538,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
return 0;
+error_init:
+ if (hsotg->params.activate_stm_id_vb_detection)
+ regulator_disable(hsotg->usb33d);
error:
dwc2_lowlevel_hw_disable(hsotg);
return retval;
@@ -523,6 +555,37 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
if (is_device_mode)
dwc2_hsotg_suspend(dwc2);
+ if (dwc2->params.activate_stm_id_vb_detection) {
+ unsigned long flags;
+ u32 ggpio, gotgctl;
+
+ /*
+ * Need to force the mode to the current mode to avoid Mode
+ * Mismatch Interrupt when ID detection will be disabled.
+ */
+ dwc2_force_mode(dwc2, !is_device_mode);
+
+ spin_lock_irqsave(&dwc2->lock, flags);
+ gotgctl = dwc2_readl(dwc2, GOTGCTL);
+ /* bypass debounce filter, enable overrides */
+ gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS;
+ gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN;
+ /* Force A / B session if needed */
+ if (gotgctl & GOTGCTL_ASESVLD)
+ gotgctl |= GOTGCTL_AVALOVAL;
+ if (gotgctl & GOTGCTL_BSESVLD)
+ gotgctl |= GOTGCTL_BVALOVAL;
+ dwc2_writel(dwc2, gotgctl, GOTGCTL);
+ spin_unlock_irqrestore(&dwc2->lock, flags);
+
+ ggpio = dwc2_readl(dwc2, GGPIO);
+ ggpio &= ~GGPIO_STM32_OTG_GCCFG_IDEN;
+ ggpio &= ~GGPIO_STM32_OTG_GCCFG_VBDEN;
+ dwc2_writel(dwc2, ggpio, GGPIO);
+
+ regulator_disable(dwc2->usb33d);
+ }
+
if (dwc2->ll_hw_enabled &&
(is_device_mode || dwc2_host_can_poweroff_phy(dwc2))) {
ret = __dwc2_lowlevel_hw_disable(dwc2);
@@ -544,6 +607,34 @@ static int __maybe_unused dwc2_resume(struct device *dev)
}
dwc2->phy_off_for_suspend = false;
+ if (dwc2->params.activate_stm_id_vb_detection) {
+ unsigned long flags;
+ u32 ggpio, gotgctl;
+
+ ret = regulator_enable(dwc2->usb33d);
+ if (ret)
+ return ret;
+
+ ggpio = dwc2_readl(dwc2, GGPIO);
+ ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
+ ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
+ dwc2_writel(dwc2, ggpio, GGPIO);
+
+ /* ID/VBUS detection startup time */
+ usleep_range(5000, 7000);
+
+ spin_lock_irqsave(&dwc2->lock, flags);
+ gotgctl = dwc2_readl(dwc2, GOTGCTL);
+ gotgctl &= ~GOTGCTL_DBNCE_FLTR_BYPASS;
+ gotgctl &= ~(GOTGCTL_BVALOEN | GOTGCTL_AVALOEN |
+ GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL);
+ dwc2_writel(dwc2, gotgctl, GOTGCTL);
+ spin_unlock_irqrestore(&dwc2->lock, flags);
+ }
+
+ /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
+ dwc2_force_dr_mode(dwc2);
+
if (dwc2_is_device_mode(dwc2))
ret = dwc2_hsotg_resume(dwc2);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 206caa0ea1c6..7a2304565a73 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -4,6 +4,7 @@ config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
depends on (USB || USB_GADGET) && HAS_DMA
select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
help
Say Y or M here if your system has a Dual Role SuperSpeed
USB controller based on the DesignWare USB3 IP Core.
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1d85c42b9c67..edc17155cb2b 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -289,12 +289,6 @@ done:
return 0;
}
-static const struct clk_bulk_data dwc3_core_clks[] = {
- { .id = "ref" },
- { .id = "bus_early" },
- { .id = "suspend" },
-};
-
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
@@ -1029,6 +1023,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (dwc->dis_tx_ipgap_linecheck_quirk)
reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
+ if (dwc->parkmode_disable_ss_quirk)
+ reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
+
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
@@ -1342,6 +1339,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
"snps,dis-del-phy-power-chg-quirk");
dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
"snps,dis-tx-ipgap-linecheck-quirk");
+ dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
+ "snps,parkmode-disable-ss-quirk");
dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
"snps,tx_de_emphasis_quirk");
@@ -1441,11 +1440,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (!dwc)
return -ENOMEM;
- dwc->clks = devm_kmemdup(dev, dwc3_core_clks, sizeof(dwc3_core_clks),
- GFP_KERNEL);
- if (!dwc->clks)
- return -ENOMEM;
-
dwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1476,22 +1470,23 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- dwc->reset = devm_reset_control_get_optional_shared(dev, NULL);
+ dwc->reset = devm_reset_control_array_get(dev, true, true);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
if (dev->of_node) {
- dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
-
- ret = devm_clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+ ret = devm_clk_bulk_get_all(dev, &dwc->clks);
if (ret == -EPROBE_DEFER)
return ret;
/*
* Clocks are optional, but new DT platforms should support all
* clocks as required by the DT-binding.
*/
- if (ret)
+ if (ret < 0)
dwc->num_clks = 0;
+ else
+ dwc->num_clks = ret;
+
}
ret = reset_control_deassert(dwc->reset);
@@ -1637,6 +1632,8 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
+ if (pm_runtime_suspended(dwc->dev))
+ break;
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 77c4a9abe365..4c171a8e215f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -25,6 +25,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
#include <linux/ulpi/interface.h>
#include <linux/phy/phy.h>
@@ -249,6 +250,7 @@
#define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
/* Global User Control 1 Register */
+#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
@@ -305,10 +307,14 @@
/* Global TX Fifo Size Register */
#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */
-#define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */
-#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
+#define DWC31_GTXFIFOSIZ_TXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */
+#define DWC3_GTXFIFOSIZ_TXFDEP(n) ((n) & 0xffff)
#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
+/* Global RX Fifo Size Register */
+#define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */
+#define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff)
+
/* Global Event Size Registers */
#define DWC3_GEVNTSIZ_INTMASK BIT(31)
#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
@@ -953,6 +959,9 @@ struct dwc3_scratchpad_array {
* @hsphy_mode: UTMI phy mode, one of following:
* - USBPHY_INTERFACE_MODE_UTMI
* - USBPHY_INTERFACE_MODE_UTMIW
+ * @role_sw: usb_role_switch handle
+ * @role_switch_default_mode: default operation mode of controller while
+ * usb role is USB_ROLE_NONE.
* @usb2_phy: pointer to USB2 PHY
* @usb3_phy: pointer to USB3 PHY
* @usb2_generic_phy: pointer to USB2 PHY
@@ -1024,6 +1033,8 @@ struct dwc3_scratchpad_array {
* change quirk.
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
+ * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
+ * instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
* @tx_de_emphasis: Tx de-emphasis value
* 0 - -6dB de-emphasis
@@ -1086,6 +1097,8 @@ struct dwc3 {
struct extcon_dev *edev;
struct notifier_block edev_nb;
enum usb_phy_interface hsphy_mode;
+ struct usb_role_switch *role_sw;
+ enum usb_dr_mode role_switch_default_mode;
u32 fladj;
u32 irq_gadget;
@@ -1215,6 +1228,7 @@ struct dwc3 {
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
unsigned dis_tx_ipgap_linecheck_quirk:1;
+ unsigned parkmode_disable_ss_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index c946d64142ad..7db1ffc92bbd 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -476,6 +476,94 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
return edev;
}
+#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
+#define ROLE_SWITCH 1
+static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
+ enum usb_role role)
+{
+ struct dwc3 *dwc = usb_role_switch_get_drvdata(sw);
+ u32 mode;
+
+ switch (role) {
+ case USB_ROLE_HOST:
+ mode = DWC3_GCTL_PRTCAP_HOST;
+ break;
+ case USB_ROLE_DEVICE:
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ break;
+ default:
+ if (dwc->role_switch_default_mode == USB_DR_MODE_HOST)
+ mode = DWC3_GCTL_PRTCAP_HOST;
+ else
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ break;
+ }
+
+ dwc3_set_mode(dwc, mode);
+ return 0;
+}
+
+static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw)
+{
+ struct dwc3 *dwc = usb_role_switch_get_drvdata(sw);
+ unsigned long flags;
+ enum usb_role role;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ role = USB_ROLE_HOST;
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ role = USB_ROLE_DEVICE;
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ role = dwc->current_otg_role;
+ break;
+ default:
+ if (dwc->role_switch_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else
+ role = USB_ROLE_DEVICE;
+ break;
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return role;
+}
+
+static int dwc3_setup_role_switch(struct dwc3 *dwc)
+{
+ struct usb_role_switch_desc dwc3_role_switch = {NULL};
+ const char *str;
+ u32 mode;
+ int ret;
+
+ ret = device_property_read_string(dwc->dev, "role-switch-default-mode",
+ &str);
+ if (ret >= 0 && !strncmp(str, "host", strlen("host"))) {
+ dwc->role_switch_default_mode = USB_DR_MODE_HOST;
+ mode = DWC3_GCTL_PRTCAP_HOST;
+ } else {
+ dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ }
+
+ dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
+ dwc3_role_switch.set = dwc3_usb_role_switch_set;
+ dwc3_role_switch.get = dwc3_usb_role_switch_get;
+ dwc3_role_switch.driver_data = dwc;
+ dwc->role_sw = usb_role_switch_register(dwc->dev, &dwc3_role_switch);
+ if (IS_ERR(dwc->role_sw))
+ return PTR_ERR(dwc->role_sw);
+
+ dwc3_set_mode(dwc, mode);
+ return 0;
+}
+#else
+#define ROLE_SWITCH 0
+#define dwc3_setup_role_switch(x) 0
+#endif
+
int dwc3_drd_init(struct dwc3 *dwc)
{
int ret, irq;
@@ -484,7 +572,12 @@ int dwc3_drd_init(struct dwc3 *dwc)
if (IS_ERR(dwc->edev))
return PTR_ERR(dwc->edev);
- if (dwc->edev) {
+ if (ROLE_SWITCH &&
+ device_property_read_bool(dwc->dev, "usb-role-switch")) {
+ ret = dwc3_setup_role_switch(dwc);
+ if (ret < 0)
+ return ret;
+ } else if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);
@@ -531,6 +624,9 @@ void dwc3_drd_exit(struct dwc3 *dwc)
{
unsigned long flags;
+ if (dwc->role_sw)
+ usb_role_switch_unregister(dwc->role_sw);
+
if (dwc->edev)
extcon_unregister_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 90bb022737da..48b68b6f0dc8 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -162,6 +162,12 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
.suspend_clk_idx = -1,
};
+static const struct dwc3_exynos_driverdata exynos5420_drvdata = {
+ .clk_names = { "usbdrd30", "usbdrd30_susp_clk"},
+ .num_clks = 2,
+ .suspend_clk_idx = 1,
+};
+
static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
.clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
.num_clks = 4,
@@ -179,6 +185,9 @@ static const struct of_device_id exynos_dwc3_match[] = {
.compatible = "samsung,exynos5250-dwusb3",
.data = &exynos5250_drvdata,
}, {
+ .compatible = "samsung,exynos5420-dwusb3",
+ .data = &exynos5420_drvdata,
+ }, {
.compatible = "samsung,exynos5433-dwusb3",
.data = &exynos5433_drvdata,
}, {
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 8a3ec1a951fe..b81d085bc534 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -107,10 +107,37 @@ static const char *phy_names[PHY_COUNT] = {
"usb2-phy0", "usb2-phy1", "usb3-phy0",
};
+static struct clk_bulk_data meson_g12a_clocks[] = {
+ { .id = NULL },
+};
+
+static struct clk_bulk_data meson_a1_clocks[] = {
+ { .id = "usb_ctrl" },
+ { .id = "usb_bus" },
+ { .id = "xtal_usb_ctrl" },
+};
+
+struct dwc3_meson_g12a_drvdata {
+ bool otg_switch_supported;
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static struct dwc3_meson_g12a_drvdata g12a_drvdata = {
+ .otg_switch_supported = true,
+ .clks = meson_g12a_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+};
+
+static struct dwc3_meson_g12a_drvdata a1_drvdata = {
+ .otg_switch_supported = false,
+ .clks = meson_a1_clocks,
+ .num_clks = ARRAY_SIZE(meson_a1_clocks),
+};
+
struct dwc3_meson_g12a {
struct device *dev;
struct regmap *regmap;
- struct clk *clk;
struct reset_control *reset;
struct phy *phys[PHY_COUNT];
enum usb_dr_mode otg_mode;
@@ -120,6 +147,7 @@ struct dwc3_meson_g12a {
struct regulator *vbus;
struct usb_role_switch_desc switch_desc;
struct usb_role_switch *role_switch;
+ const struct dwc3_meson_g12a_drvdata *drvdata;
};
static void dwc3_meson_g12a_usb2_set_mode(struct dwc3_meson_g12a *priv,
@@ -151,7 +179,7 @@ static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv)
U2P_R0_POWER_ON_RESET,
U2P_R0_POWER_ON_RESET);
- if (i == USB2_OTG_PHY) {
+ if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) {
regmap_update_bits(priv->regmap,
U2P_R0 + (U2P_REG_SIZE * i),
U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
@@ -295,7 +323,7 @@ static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
{
int ret;
- if (!priv->phys[USB2_OTG_PHY])
+ if (!priv->drvdata->otg_switch_supported || !priv->phys[USB2_OTG_PHY])
return -EINVAL;
if (mode == PHY_MODE_USB_HOST)
@@ -321,9 +349,10 @@ static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
return 0;
}
-static int dwc3_meson_g12a_role_set(struct device *dev, enum usb_role role)
+static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw,
+ enum usb_role role)
{
- struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+ struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw);
enum phy_mode mode;
if (role == USB_ROLE_NONE)
@@ -338,9 +367,9 @@ static int dwc3_meson_g12a_role_set(struct device *dev, enum usb_role role)
return dwc3_meson_g12a_otg_mode_set(priv, mode);
}
-static enum usb_role dwc3_meson_g12a_role_get(struct device *dev)
+static enum usb_role dwc3_meson_g12a_role_get(struct usb_role_switch *sw)
{
- struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+ struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw);
return priv->otg_phy_mode == PHY_MODE_USB_HOST ?
USB_ROLE_HOST : USB_ROLE_DEVICE;
@@ -380,14 +409,61 @@ static struct device *dwc3_meson_g12_find_child(struct device *dev,
return &pdev->dev;
}
+static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
+ struct dwc3_meson_g12a *priv)
+{
+ enum phy_mode otg_id;
+ int ret, irq;
+ struct device *dev = &pdev->dev;
+
+ if (!priv->drvdata->otg_switch_supported)
+ return 0;
+
+ if (priv->otg_mode == USB_DR_MODE_OTG) {
+ /* Ack irq before registering */
+ regmap_update_bits(priv->regmap, USB_R5,
+ USB_R5_ID_DIG_IRQ, 0);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ dwc3_meson_g12a_irq_thread,
+ IRQF_ONESHOT, pdev->name, priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup OTG mode corresponding to the ID pin */
+ if (priv->otg_mode == USB_DR_MODE_OTG) {
+ otg_id = dwc3_meson_g12a_get_id(priv);
+ if (otg_id != priv->otg_phy_mode) {
+ if (dwc3_meson_g12a_otg_mode_set(priv, otg_id))
+ dev_warn(dev, "Failed to switch OTG mode\n");
+ }
+ }
+
+ /* Setup role switcher */
+ priv->switch_desc.usb2_port = dwc3_meson_g12_find_child(dev,
+ "snps,dwc3");
+ priv->switch_desc.udc = dwc3_meson_g12_find_child(dev, "snps,dwc2");
+ priv->switch_desc.allow_userspace_control = true;
+ priv->switch_desc.set = dwc3_meson_g12a_role_set;
+ priv->switch_desc.get = dwc3_meson_g12a_role_get;
+ priv->switch_desc.driver_data = priv;
+
+ priv->role_switch = usb_role_switch_register(dev, &priv->switch_desc);
+ if (IS_ERR(priv->role_switch))
+ dev_warn(dev, "Unable to register Role Switch\n");
+
+ return 0;
+}
+
static int dwc3_meson_g12a_probe(struct platform_device *pdev)
{
struct dwc3_meson_g12a *priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
- enum phy_mode otg_id;
- int ret, i, irq;
+ int ret, i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -409,17 +485,18 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
priv->vbus = NULL;
}
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ priv->drvdata = of_device_get_match_data(&pdev->dev);
- ret = clk_prepare_enable(priv->clk);
+ ret = devm_clk_bulk_get(dev,
+ priv->drvdata->num_clks,
+ priv->drvdata->clks);
if (ret)
return ret;
- devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- priv->clk);
+ ret = clk_bulk_prepare_enable(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, priv);
priv->dev = dev;
@@ -433,41 +510,28 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = reset_control_reset(priv->reset);
if (ret)
- return ret;
+ goto err_disable_clks;
ret = dwc3_meson_g12a_get_phys(priv);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
- return ret;
+ goto err_disable_clks;
}
/* Get dr_mode */
priv->otg_mode = usb_get_dr_mode(dev);
- if (priv->otg_mode == USB_DR_MODE_OTG) {
- /* Ack irq before registering */
- regmap_update_bits(priv->regmap, USB_R5,
- USB_R5_ID_DIG_IRQ, 0);
-
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- dwc3_meson_g12a_irq_thread,
- IRQF_ONESHOT, pdev->name, priv);
- if (ret)
- return ret;
- }
-
dwc3_meson_g12a_usb_init(priv);
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
- return ret;
+ goto err_disable_clks;
}
/* Set PHY Power */
@@ -478,31 +542,12 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
}
ret = of_platform_populate(np, NULL, NULL, dev);
- if (ret) {
- clk_disable_unprepare(priv->clk);
+ if (ret)
goto err_phys_power;
- }
- /* Setup OTG mode corresponding to the ID pin */
- if (priv->otg_mode == USB_DR_MODE_OTG) {
- otg_id = dwc3_meson_g12a_get_id(priv);
- if (otg_id != priv->otg_phy_mode) {
- if (dwc3_meson_g12a_otg_mode_set(priv, otg_id))
- dev_warn(dev, "Failed to switch OTG mode\n");
- }
- }
-
- /* Setup role switcher */
- priv->switch_desc.usb2_port = dwc3_meson_g12_find_child(dev,
- "snps,dwc3");
- priv->switch_desc.udc = dwc3_meson_g12_find_child(dev, "snps,dwc2");
- priv->switch_desc.allow_userspace_control = true;
- priv->switch_desc.set = dwc3_meson_g12a_role_set;
- priv->switch_desc.get = dwc3_meson_g12a_role_get;
-
- priv->role_switch = usb_role_switch_register(dev, &priv->switch_desc);
- if (IS_ERR(priv->role_switch))
- dev_warn(dev, "Unable to register Role Switch\n");
+ ret = dwc3_meson_g12a_otg_init(pdev, priv);
+ if (ret)
+ goto err_phys_power;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -518,6 +563,10 @@ err_phys_exit:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_exit(priv->phys[i]);
+err_disable_clks:
+ clk_bulk_disable_unprepare(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+
return ret;
}
@@ -527,7 +576,8 @@ static int dwc3_meson_g12a_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
- usb_role_switch_unregister(priv->role_switch);
+ if (priv->drvdata->otg_switch_supported)
+ usb_role_switch_unregister(priv->role_switch);
of_platform_depopulate(dev);
@@ -540,6 +590,9 @@ static int dwc3_meson_g12a_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
+ clk_bulk_disable_unprepare(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+
return 0;
}
@@ -547,7 +600,8 @@ static int __maybe_unused dwc3_meson_g12a_runtime_suspend(struct device *dev)
{
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
- clk_disable(priv->clk);
+ clk_bulk_disable_unprepare(priv->drvdata->num_clks,
+ priv->drvdata->clks);
return 0;
}
@@ -556,7 +610,8 @@ static int __maybe_unused dwc3_meson_g12a_runtime_resume(struct device *dev)
{
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
- return clk_enable(priv->clk);
+ return clk_bulk_prepare_enable(priv->drvdata->num_clks,
+ priv->drvdata->clks);
}
static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev)
@@ -619,7 +674,14 @@ static const struct dev_pm_ops dwc3_meson_g12a_dev_pm_ops = {
};
static const struct of_device_id dwc3_meson_g12a_match[] = {
- { .compatible = "amlogic,meson-g12a-usb-ctrl" },
+ {
+ .compatible = "amlogic,meson-g12a-usb-ctrl",
+ .data = &g12a_drvdata,
+ },
+ {
+ .compatible = "amlogic,meson-a1-usb-ctrl",
+ .data = &a1_drvdata,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dwc3_meson_g12a_match);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7051611229c9..b67372737dc9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -114,6 +114,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 261af9e38ddd..1dfd024cd06b 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/irq.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/extcon.h>
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1e00bf2d65a2..585cb3deea7a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1521,7 +1521,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
for (i = 0; i < req->num_trbs; i++) {
struct dwc3_trb *trb;
- trb = req->trb + i;
+ trb = &dep->trb_pool[dep->trb_dequeue];
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
}
@@ -1728,7 +1728,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
u32 reg;
u8 link_state;
- u8 speed;
/*
* According to the Databook Remote wakeup request should
@@ -1738,16 +1737,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
*/
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- speed = reg & DWC3_DSTS_CONNECTSPD;
- if ((speed == DWC3_DSTS_SUPERSPEED) ||
- (speed == DWC3_DSTS_SUPERSPEED_PLUS))
- return 0;
-
link_state = DWC3_DSTS_USBLNKST(reg);
switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
+ case DWC3_LINK_STATE_RESUME:
break;
default:
return -EINVAL;
@@ -2227,7 +2223,6 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
int mdwidth;
- int kbytes;
int size;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
@@ -2236,24 +2231,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
if (dwc3_is_usb31(dwc))
- size = DWC31_GTXFIFOSIZ_TXFDEF(size);
+ size = DWC31_GTXFIFOSIZ_TXFDEP(size);
else
- size = DWC3_GTXFIFOSIZ_TXFDEF(size);
+ size = DWC3_GTXFIFOSIZ_TXFDEP(size);
/* FIFO Depth is in MDWDITH bytes. Multiply */
size *= mdwidth;
- kbytes = size / 1024;
- if (kbytes == 0)
- kbytes = 1;
-
/*
- * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for
- * internal overhead. We don't really know how these are used,
- * but documentation say it exists.
+ * To meet performance requirement, a minimum TxFIFO size of 3x
+ * MaxPacketSize is recommended for endpoints that support burst and a
+ * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
+ * support burst. Use those numbers and we can calculate the max packet
+ * limit as below.
*/
- size -= mdwidth * (kbytes + 1);
- size /= kbytes;
+ if (dwc->maximum_speed >= USB_SPEED_SUPER)
+ size /= 3;
+ else
+ size /= 2;
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
@@ -2271,8 +2266,39 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
+ int mdwidth;
+ int size;
+
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
- usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
+ /* MDWIDTH is represented in bits, convert to bytes */
+ mdwidth /= 8;
+
+ /* All OUT endpoints share a single RxFIFO space */
+ size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
+ if (dwc3_is_usb31(dwc))
+ size = DWC31_GRXFIFOSIZ_RXFDEP(size);
+ else
+ size = DWC3_GRXFIFOSIZ_RXFDEP(size);
+
+ /* FIFO depth is in MDWDITH bytes */
+ size *= mdwidth;
+
+ /*
+ * To meet performance requirement, a minimum recommended RxFIFO size
+ * is defined as follow:
+ * RxFIFO size >= (3 x MaxPacketSize) +
+ * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
+ *
+ * Then calculate the max packet limit as below.
+ */
+ size -= (3 * 8) + 16;
+ if (size < 0)
+ size = 0;
+ else
+ size /= 3;
+
+ usb_ep_set_maxpacket_limit(&dep->endpoint, size);
dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
@@ -2457,9 +2483,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
- if (trb->ctrl & DWC3_TRB_CTRL_HWO)
- break;
-
req->sg = sg_next(s);
req->num_pending_sgs--;
@@ -2484,14 +2507,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
- /*
- * For OUT direction, host may send less than the setup
- * length. Return true for all OUT requests.
- */
- if (!req->direction)
- return true;
-
- return req->request.actual == req->request.length;
+ return req->num_pending_sgs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -2515,8 +2531,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
- if (!dwc3_gadget_ep_request_completed(req) ||
- req->num_pending_sgs) {
+ if (!dwc3_gadget_ep_request_completed(req)) {
__dwc3_gadget_kick_transfer(dep);
goto out;
}
@@ -2570,10 +2585,8 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
- if (stop) {
+ if (stop)
dwc3_stop_active_transfer(dep, true, true);
- dep->flags = DWC3_EP_ENABLED;
- }
/*
* WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index fa252870c926..86dbd012b984 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* host.c - DesignWare USB3 DRD Controller Host Glue
*
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
@@ -7,6 +7,7 @@
* Authors: Felipe Balbi <balbi@ti.com>,
*/
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#include "core.h"
@@ -75,6 +76,7 @@ int dwc3_host_init(struct dwc3 *dwc)
}
xhci->dev.parent = dwc->dev;
+ ACPI_COMPANION_SET(&xhci->dev, ACPI_COMPANION(dwc->dev));
dwc->xhci = xhci;
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 9edff17111f7..3054b89512ff 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -227,6 +227,8 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__field(u32, size)
__field(u32, ctrl)
__field(u32, type)
+ __field(u32, enqueue)
+ __field(u32, dequeue)
),
TP_fast_assign(
__assign_str(name, dep->name);
@@ -236,9 +238,12 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
__entry->type = usb_endpoint_type(dep->endpoint.desc);
+ __entry->enqueue = dep->trb_enqueue;
+ __entry->dequeue = dep->trb_dequeue;
),
- TP_printk("%s: trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
- __get_str(name), __entry->trb, __entry->bph, __entry->bpl,
+ TP_printk("%s: trb %p (E%d:D%d) buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ __get_str(name), __entry->trb, __entry->enqueue,
+ __entry->dequeue, __entry->bph, __entry->bpl,
({char *s;
int pcm = ((__entry->size >> 24) & 3) + 1;
switch (__entry->type) {
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 971c6b92484a..171280c80228 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -728,19 +728,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
case COMP_USB_TRANSACTION_ERROR:
case COMP_STALL_ERROR:
default:
- if (ep_id == XDBC_EPID_OUT)
+ if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
xdbc.flags |= XDBC_FLAGS_OUT_STALL;
- if (ep_id == XDBC_EPID_IN)
+ if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
xdbc.flags |= XDBC_FLAGS_IN_STALL;
xdbc_trace("endpoint %d stalled\n", ep_id);
break;
}
- if (ep_id == XDBC_EPID_IN) {
+ if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
- } else if (ep_id == XDBC_EPID_OUT) {
+ } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
} else {
xdbc_trace("invalid endpoint id %d\n", ep_id);
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
index 673686eeddd7..6e2b7266a695 100644
--- a/drivers/usb/early/xhci-dbc.h
+++ b/drivers/usb/early/xhci-dbc.h
@@ -120,8 +120,22 @@ struct xdbc_ring {
u32 cycle_state;
};
-#define XDBC_EPID_OUT 2
-#define XDBC_EPID_IN 3
+/*
+ * These are the "Endpoint ID" (also known as "Context Index") values for the
+ * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data
+ * structure.
+ * According to the "eXtensible Host Controller Interface for Universal Serial
+ * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer
+ * Rings", these should be 0 and 1, and those are the values AMD machines give
+ * you; but Intel machines seem to use the formula from section "4.5.1 Device
+ * Context Index", which is supposed to be used for the Device Context only.
+ * Luckily the values from Intel don't overlap with those from AMD, so we can
+ * just test for both.
+ */
+#define XDBC_EPID_OUT 0
+#define XDBC_EPID_IN 1
+#define XDBC_EPID_OUT_INTEL 2
+#define XDBC_EPID_IN_INTEL 3
struct xdbc_state {
u16 vendor;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 223f72d4d9ed..cb4950cf1cdc 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -861,6 +861,11 @@ static int set_config(struct usb_composite_dev *cdev,
else
power = min(power, 900U);
done:
+ if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
+ usb_gadget_set_selfpowered(gadget);
+ else
+ usb_gadget_clear_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
@@ -2279,6 +2284,7 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->suspended = 1;
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, 2);
}
@@ -2307,6 +2313,9 @@ void composite_resume(struct usb_gadget *gadget)
else
maxpower = min(maxpower, 900U);
+ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ usb_gadget_clear_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, maxpower);
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 32b637e3e1fa..6a9aa4413d64 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -260,6 +260,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
char *name;
int ret;
+ if (strlen(page) < len)
+ return -EOVERFLOW;
+
name = kstrdup(page, GFP_KERNEL);
if (!name)
return -ENOMEM;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 571917677d35..10f01f974f67 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1120,6 +1120,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (unlikely(ret)) {
+ io_data->req = NULL;
usb_ep_free_request(ep->ep, req);
goto error_lock;
}
@@ -1703,7 +1704,7 @@ static void ffs_data_put(struct ffs_data *ffs)
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
- waitqueue_active(&ffs->ep0req_completion.wait) ||
+ swait_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait));
destroy_workqueue(ffs->io_completion_wq);
kfree(ffs->dev_name);
@@ -1812,6 +1813,10 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs->state = FFS_READ_DESCRIPTORS;
ffs->setup_state = FFS_NO_SETUP;
ffs->flags = 0;
+
+ ffs->ms_os_descs_ext_prop_count = 0;
+ ffs->ms_os_descs_ext_prop_name_len = 0;
+ ffs->ms_os_descs_ext_prop_data_len = 0;
}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 7c96c4665178..950d2a85f098 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -216,6 +216,7 @@
#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 8b72b192c747..d7f6cc51b7ec 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -48,7 +48,7 @@ struct f_phonet {
struct usb_ep *in_ep, *out_ep;
struct usb_request *in_req;
- struct usb_request *out_reqv[0];
+ struct usb_request *out_reqv[];
};
static int phonet_rxq_size = 17;
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
index 6677ae932de0..349deae7cabd 100644
--- a/drivers/usb/gadget/function/f_uac1_legacy.c
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -752,8 +752,6 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
audio->out_ep = ep;
audio->out_ep->desc = &as_out_ep_desc;
- status = -ENOMEM;
-
/* copy descriptors, and track endpoint copies */
status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
NULL);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index fb0a892687c0..0b9712616455 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -428,7 +428,7 @@ uvc_register_video(struct uvc_device *uvc)
video_set_drvdata(&uvc->vdev, uvc);
- ret = video_register_device(&uvc->vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&uvc->vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0)
return ret;
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index e5e3a2553aaa..bdeb1e233fc9 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -172,11 +172,6 @@ enum data_direction {
DATA_DIR_NONE
};
-static inline u32 get_unaligned_be24(u8 *buf)
-{
- return 0xffffff & (u32) get_unaligned_be32(buf - 1);
-}
-
static inline struct fsg_lun *fsg_lun_from_dev(struct device *dev)
{
return container_of(dev, struct fsg_lun, dev);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 6e7e1a9202e6..f02c38b32a2b 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -13,32 +13,28 @@
# With help from a special transceiver and a "Mini-AB" jack, systems with
# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
#
+# A Linux "Gadget Driver" talks to the USB Peripheral Controller
+# driver through the abstract "gadget" API. Some other operating
+# systems call these "client" drivers, of which "class drivers"
+# are a subset (implementing a USB device class specification).
+# A gadget driver implements one or more USB functions using
+# the peripheral hardware.
+#
+# Gadget drivers are hardware-neutral, or "platform independent",
+# except that they sometimes must understand quirks or limitations
+# of the particular controllers they work with. For example, when
+# a controller doesn't support alternate configurations or provide
+# enough of the right types of endpoints, the gadget driver might
+# not be able work with that controller, or might need to implement
+# a less common variant of a device class protocol.
+#
+# The available choices each represent a single precomposed USB
+# gadget configuration. In the device model, each option contains
+# both the device instantiation as a child for a USB gadget
+# controller, and the relevant drivers for each function declared
+# by the device.
-choice
- tristate "USB Gadget precomposed configurations"
- default USB_ETH
- optional
- help
- A Linux "Gadget Driver" talks to the USB Peripheral Controller
- driver through the abstract "gadget" API. Some other operating
- systems call these "client" drivers, of which "class drivers"
- are a subset (implementing a USB device class specification).
- A gadget driver implements one or more USB functions using
- the peripheral hardware.
-
- Gadget drivers are hardware-neutral, or "platform independent",
- except that they sometimes must understand quirks or limitations
- of the particular controllers they work with. For example, when
- a controller doesn't support alternate configurations or provide
- enough of the right types of endpoints, the gadget driver might
- not be able work with that controller, or might need to implement
- a less common variant of a device class protocol.
-
- The available choices each represent a single precomposed USB
- gadget configuration. In the device model, each option contains
- both the device instantiation as a child for a USB gadget
- controller, and the relevant drivers for each function declared
- by the device.
+menu "USB Gadget precomposed configurations"
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
@@ -516,4 +512,15 @@ config USB_G_WEBCAM
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_webcam".
-endchoice
+config USB_RAW_GADGET
+ tristate "USB Raw Gadget"
+ help
+ USB Raw Gadget is a kernel module that provides a userspace interface
+ for the USB Gadget subsystem. Essentially it allows to emulate USB
+ devices from userspace. See Documentation/usb/raw-gadget.rst for
+ details.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "raw_gadget".
+
+endmenu
diff --git a/drivers/usb/gadget/legacy/Makefile b/drivers/usb/gadget/legacy/Makefile
index abd0c3e66a05..4d864bf82799 100644
--- a/drivers/usb/gadget/legacy/Makefile
+++ b/drivers/usb/gadget/legacy/Makefile
@@ -43,3 +43,4 @@ obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
obj-$(CONFIG_USB_G_NCM) += g_ncm.o
obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o
obj-$(CONFIG_USB_GADGET_TARGET) += tcm_usb_gadget.o
+obj-$(CONFIG_USB_RAW_GADGET) += raw_gadget.o
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index dd81fd538cb8..a748ed0842e8 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail;
+ }
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 8d7a556ece30..563363aba48f 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index 9eea2d18f2bf..265c392810d7 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -174,7 +174,7 @@ put:
}
static struct usb_composite_driver midi_driver = {
- .name = (char *) longname,
+ .name = longname,
.dev = &device_desc,
.strings = dev_strings,
.max_speed = USB_SPEED_HIGH,
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b47938dff1a2..3afddd3bea6e 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -344,7 +344,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
- value = wait_event_interruptible (done.wait, done.done);
+ value = wait_for_completion_interruptible(&done);
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
@@ -353,7 +353,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
- wait_event (done.wait, done.done);
+ wait_for_completion(&done);
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
@@ -1361,7 +1361,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
req->buf = dev->rbuf;
req->context = NULL;
- value = -EOPNOTSUPP;
switch (ctrl->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
@@ -1736,7 +1735,7 @@ static struct usb_gadget_driver gadgetfs_driver = {
.suspend = gadgetfs_suspend,
.driver = {
- .name = (char *) shortname,
+ .name = shortname,
},
};
@@ -1784,7 +1783,7 @@ static ssize_t
dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
{
struct dev_data *dev = fd->private_data;
- ssize_t value = len, length = len;
+ ssize_t value, length = len;
unsigned total;
u32 tag;
char *kbuf;
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index c61e71ba7045..0f1b45e3abd1 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
new file mode 100644
index 000000000000..e01e366d89cd
--- /dev/null
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -0,0 +1,1280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB Raw Gadget driver.
+ * See Documentation/usb/raw-gadget.rst for more details.
+ *
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/gadget.h>
+
+#include <uapi/linux/usb/raw_gadget.h>
+
+#define DRIVER_DESC "USB Raw Gadget"
+#define DRIVER_NAME "raw-gadget"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Andrey Konovalov");
+MODULE_LICENSE("GPL");
+
+/*----------------------------------------------------------------------*/
+
+#define RAW_EVENT_QUEUE_SIZE 16
+
+struct raw_event_queue {
+ /* See the comment in raw_event_queue_fetch() for locking details. */
+ spinlock_t lock;
+ struct semaphore sema;
+ struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
+ int size;
+};
+
+static void raw_event_queue_init(struct raw_event_queue *queue)
+{
+ spin_lock_init(&queue->lock);
+ sema_init(&queue->sema, 0);
+ queue->size = 0;
+}
+
+static int raw_event_queue_add(struct raw_event_queue *queue,
+ enum usb_raw_event_type type, size_t length, const void *data)
+{
+ unsigned long flags;
+ struct usb_raw_event *event;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) {
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return -ENOMEM;
+ }
+ event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
+ if (!event) {
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return -ENOMEM;
+ }
+ event->type = type;
+ event->length = length;
+ if (event->length)
+ memcpy(&event->data[0], data, length);
+ queue->events[queue->size] = event;
+ queue->size++;
+ up(&queue->sema);
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return 0;
+}
+
+static struct usb_raw_event *raw_event_queue_fetch(
+ struct raw_event_queue *queue)
+{
+ int ret;
+ unsigned long flags;
+ struct usb_raw_event *event;
+
+ /*
+ * This function can be called concurrently. We first check that
+ * there's at least one event queued by decrementing the semaphore,
+ * and then take the lock to protect queue struct fields.
+ */
+ ret = down_interruptible(&queue->sema);
+ if (ret)
+ return ERR_PTR(ret);
+ spin_lock_irqsave(&queue->lock, flags);
+ /*
+ * queue->size must have the same value as queue->sema counter (before
+ * the down_interruptible() call above), so this check is a fail-safe.
+ */
+ if (WARN_ON(!queue->size)) {
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return ERR_PTR(-ENODEV);
+ }
+ event = queue->events[0];
+ queue->size--;
+ memmove(&queue->events[0], &queue->events[1],
+ queue->size * sizeof(queue->events[0]));
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return event;
+}
+
+static void raw_event_queue_destroy(struct raw_event_queue *queue)
+{
+ int i;
+
+ for (i = 0; i < queue->size; i++)
+ kfree(queue->events[i]);
+ queue->size = 0;
+}
+
+/*----------------------------------------------------------------------*/
+
+struct raw_dev;
+
+enum ep_state {
+ STATE_EP_DISABLED,
+ STATE_EP_ENABLED,
+};
+
+struct raw_ep {
+ struct raw_dev *dev;
+ enum ep_state state;
+ struct usb_ep *ep;
+ u8 addr;
+ struct usb_request *req;
+ bool urb_queued;
+ bool disabling;
+ ssize_t status;
+};
+
+enum dev_state {
+ STATE_DEV_INVALID = 0,
+ STATE_DEV_OPENED,
+ STATE_DEV_INITIALIZED,
+ STATE_DEV_RUNNING,
+ STATE_DEV_CLOSED,
+ STATE_DEV_FAILED
+};
+
+struct raw_dev {
+ struct kref count;
+ spinlock_t lock;
+
+ const char *udc_name;
+ struct usb_gadget_driver driver;
+
+ /* Reference to misc device: */
+ struct device *dev;
+
+ /* Protected by lock: */
+ enum dev_state state;
+ bool gadget_registered;
+ struct usb_gadget *gadget;
+ struct usb_request *req;
+ bool ep0_in_pending;
+ bool ep0_out_pending;
+ bool ep0_urb_queued;
+ ssize_t ep0_status;
+ struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
+ int eps_num;
+
+ struct completion ep0_done;
+ struct raw_event_queue queue;
+};
+
+static struct raw_dev *dev_new(void)
+{
+ struct raw_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ /* Matches kref_put() in raw_release(). */
+ kref_init(&dev->count);
+ spin_lock_init(&dev->lock);
+ init_completion(&dev->ep0_done);
+ raw_event_queue_init(&dev->queue);
+ return dev;
+}
+
+static void dev_free(struct kref *kref)
+{
+ struct raw_dev *dev = container_of(kref, struct raw_dev, count);
+ int i;
+
+ kfree(dev->udc_name);
+ kfree(dev->driver.udc_name);
+ if (dev->req) {
+ if (dev->ep0_urb_queued)
+ usb_ep_dequeue(dev->gadget->ep0, dev->req);
+ usb_ep_free_request(dev->gadget->ep0, dev->req);
+ }
+ raw_event_queue_destroy(&dev->queue);
+ for (i = 0; i < dev->eps_num; i++) {
+ if (dev->eps[i].state == STATE_EP_DISABLED)
+ continue;
+ usb_ep_disable(dev->eps[i].ep);
+ usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
+ kfree(dev->eps[i].ep->desc);
+ dev->eps[i].state = STATE_EP_DISABLED;
+ }
+ kfree(dev);
+}
+
+/*----------------------------------------------------------------------*/
+
+static int raw_queue_event(struct raw_dev *dev,
+ enum usb_raw_event_type type, size_t length, const void *data)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ ret = raw_event_queue_add(&dev->queue, type, length, data);
+ if (ret < 0) {
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ return ret;
+}
+
+static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct raw_dev *dev = req->context;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (req->status)
+ dev->ep0_status = req->status;
+ else
+ dev->ep0_status = req->actual;
+ if (dev->ep0_in_pending)
+ dev->ep0_in_pending = false;
+ else
+ dev->ep0_out_pending = false;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ complete(&dev->ep0_done);
+}
+
+static u8 get_ep_addr(const char *name)
+{
+ /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
+ * parse the endpoint address from its name. We deliberately use
+ * deprecated simple_strtoul() function here, as the number isn't
+ * followed by '\0' nor '\n'.
+ */
+ if (isdigit(name[2]))
+ return simple_strtoul(&name[2], NULL, 10);
+ /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
+ return USB_RAW_EP_ADDR_ANY;
+}
+
+static int gadget_bind(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ int ret = 0, i = 0;
+ struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
+ struct usb_request *req;
+ struct usb_ep *ep;
+ unsigned long flags;
+
+ if (strcmp(gadget->name, dev->udc_name) != 0)
+ return -ENODEV;
+
+ set_gadget_data(gadget, dev);
+ req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
+ if (!req) {
+ dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
+ set_gadget_data(gadget, NULL);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->req = req;
+ dev->req->context = dev;
+ dev->req->complete = gadget_ep0_complete;
+ dev->gadget = gadget;
+ gadget_for_each_ep(ep, dev->gadget) {
+ dev->eps[i].ep = ep;
+ dev->eps[i].addr = get_ep_addr(ep->name);
+ dev->eps[i].state = STATE_EP_DISABLED;
+ i++;
+ }
+ dev->eps_num = i;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* Matches kref_put() in gadget_unbind(). */
+ kref_get(&dev->count);
+
+ ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
+ if (ret < 0)
+ dev_err(&gadget->dev, "failed to queue event\n");
+
+ return ret;
+}
+
+static void gadget_unbind(struct usb_gadget *gadget)
+{
+ struct raw_dev *dev = get_gadget_data(gadget);
+
+ set_gadget_data(gadget, NULL);
+ /* Matches kref_get() in gadget_bind(). */
+ kref_put(&dev->count, dev_free);
+}
+
+static int gadget_setup(struct usb_gadget *gadget,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int ret = 0;
+ struct raw_dev *dev = get_gadget_data(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_err(&gadget->dev, "ignoring, device is not running\n");
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ if (dev->ep0_in_pending || dev->ep0_out_pending) {
+ dev_dbg(&gadget->dev, "stalling, request already pending\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
+ dev->ep0_in_pending = true;
+ else
+ dev->ep0_out_pending = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
+ if (ret < 0)
+ dev_err(&gadget->dev, "failed to queue event\n");
+ goto out;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+out:
+ return ret;
+}
+
+/* These are currently unused but present in case UDC driver requires them. */
+static void gadget_disconnect(struct usb_gadget *gadget) { }
+static void gadget_suspend(struct usb_gadget *gadget) { }
+static void gadget_resume(struct usb_gadget *gadget) { }
+static void gadget_reset(struct usb_gadget *gadget) { }
+
+/*----------------------------------------------------------------------*/
+
+static struct miscdevice raw_misc_device;
+
+static int raw_open(struct inode *inode, struct file *fd)
+{
+ struct raw_dev *dev;
+
+ /* Nonblocking I/O is not supported yet. */
+ if (fd->f_flags & O_NONBLOCK)
+ return -EINVAL;
+
+ dev = dev_new();
+ if (!dev)
+ return -ENOMEM;
+ fd->private_data = dev;
+ dev->state = STATE_DEV_OPENED;
+ dev->dev = raw_misc_device.this_device;
+ return 0;
+}
+
+static int raw_release(struct inode *inode, struct file *fd)
+{
+ int ret = 0;
+ struct raw_dev *dev = fd->private_data;
+ unsigned long flags;
+ bool unregister = false;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_CLOSED;
+ if (!dev->gadget) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ goto out_put;
+ }
+ if (dev->gadget_registered)
+ unregister = true;
+ dev->gadget_registered = false;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (unregister) {
+ ret = usb_gadget_unregister_driver(&dev->driver);
+ if (ret != 0)
+ dev_err(dev->dev,
+ "usb_gadget_unregister_driver() failed with %d\n",
+ ret);
+ /* Matches kref_get() in raw_ioctl_run(). */
+ kref_put(&dev->count, dev_free);
+ }
+
+out_put:
+ /* Matches dev_new() in raw_open(). */
+ kref_put(&dev->count, dev_free);
+ return ret;
+}
+
+/*----------------------------------------------------------------------*/
+
+static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ struct usb_raw_init arg;
+ char *udc_driver_name;
+ char *udc_device_name;
+ unsigned long flags;
+
+ if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
+ return -EFAULT;
+
+ switch (arg.speed) {
+ case USB_SPEED_UNKNOWN:
+ arg.speed = USB_SPEED_HIGH;
+ break;
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
+ if (!udc_driver_name)
+ return -ENOMEM;
+ ret = strscpy(udc_driver_name, &arg.driver_name[0],
+ UDC_NAME_LENGTH_MAX);
+ if (ret < 0) {
+ kfree(udc_driver_name);
+ return ret;
+ }
+ ret = 0;
+
+ udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
+ if (!udc_device_name) {
+ kfree(udc_driver_name);
+ return -ENOMEM;
+ }
+ ret = strscpy(udc_device_name, &arg.device_name[0],
+ UDC_NAME_LENGTH_MAX);
+ if (ret < 0) {
+ kfree(udc_driver_name);
+ kfree(udc_device_name);
+ return ret;
+ }
+ ret = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_OPENED) {
+ dev_dbg(dev->dev, "fail, device is not opened\n");
+ kfree(udc_driver_name);
+ kfree(udc_device_name);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ dev->udc_name = udc_driver_name;
+
+ dev->driver.function = DRIVER_DESC;
+ dev->driver.max_speed = arg.speed;
+ dev->driver.setup = gadget_setup;
+ dev->driver.disconnect = gadget_disconnect;
+ dev->driver.bind = gadget_bind;
+ dev->driver.unbind = gadget_unbind;
+ dev->driver.suspend = gadget_suspend;
+ dev->driver.resume = gadget_resume;
+ dev->driver.reset = gadget_reset;
+ dev->driver.driver.name = DRIVER_NAME;
+ dev->driver.udc_name = udc_device_name;
+ dev->driver.match_existing_only = 1;
+
+ dev->state = STATE_DEV_INITIALIZED;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (value)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_INITIALIZED) {
+ dev_dbg(dev->dev, "fail, device is not initialized\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_gadget_probe_driver(&dev->driver);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (ret) {
+ dev_err(dev->dev,
+ "fail, usb_gadget_probe_driver returned %d\n", ret);
+ dev->state = STATE_DEV_FAILED;
+ goto out_unlock;
+ }
+ dev->gadget_registered = true;
+ dev->state = STATE_DEV_RUNNING;
+ /* Matches kref_put() in raw_release(). */
+ kref_get(&dev->count);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
+{
+ struct usb_raw_event arg;
+ unsigned long flags;
+ struct usb_raw_event *event;
+ uint32_t length;
+
+ if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -EINVAL;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ event = raw_event_queue_fetch(&dev->queue);
+ if (PTR_ERR(event) == -EINTR) {
+ dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
+ return -EINTR;
+ }
+ if (IS_ERR(event)) {
+ dev_err(&dev->gadget->dev, "failed to fetch event\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENODEV;
+ }
+ length = min(arg.length, event->length);
+ if (copy_to_user((void __user *)value, event, sizeof(*event) + length))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
+ bool get_from_user)
+{
+ void *data;
+
+ if (copy_from_user(io, ptr, sizeof(*io)))
+ return ERR_PTR(-EFAULT);
+ if (io->ep >= USB_RAW_EPS_NUM_MAX)
+ return ERR_PTR(-EINVAL);
+ if (!usb_raw_io_flags_valid(io->flags))
+ return ERR_PTR(-EINVAL);
+ if (io->length > PAGE_SIZE)
+ return ERR_PTR(-EINVAL);
+ if (get_from_user)
+ data = memdup_user(ptr + sizeof(*io), io->length);
+ else {
+ data = kmalloc(io->length, GFP_KERNEL);
+ if (!data)
+ data = ERR_PTR(-ENOMEM);
+ }
+ return data;
+}
+
+static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ void *data, bool in)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (dev->ep0_urb_queued) {
+ dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if ((in && !dev->ep0_in_pending) ||
+ (!in && !dev->ep0_out_pending)) {
+ dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (WARN_ON(in && dev->ep0_out_pending)) {
+ ret = -ENODEV;
+ dev->state = STATE_DEV_FAILED;
+ goto out_done;
+ }
+ if (WARN_ON(!in && dev->ep0_in_pending)) {
+ ret = -ENODEV;
+ dev->state = STATE_DEV_FAILED;
+ goto out_done;
+ }
+
+ dev->req->buf = data;
+ dev->req->length = io->length;
+ dev->req->zero = usb_raw_io_flags_zero(io->flags);
+ dev->ep0_urb_queued = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
+ if (ret) {
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_queue returned %d\n", ret);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+ goto out_done;
+ }
+
+ ret = wait_for_completion_interruptible(&dev->ep0_done);
+ if (ret) {
+ dev_dbg(&dev->gadget->dev, "wait interrupted\n");
+ usb_ep_dequeue(dev->gadget->ep0, dev->req);
+ wait_for_completion(&dev->ep0_done);
+ spin_lock_irqsave(&dev->lock, flags);
+ goto out_done;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = dev->ep0_status;
+
+out_done:
+ dev->ep0_urb_queued = false;
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ void *data;
+ struct usb_raw_ep_io io;
+
+ data = raw_alloc_io_data(&io, (void __user *)value, true);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ ret = raw_process_ep0_io(dev, &io, data, true);
+ kfree(data);
+ return ret;
+}
+
+static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ void *data;
+ struct usb_raw_ep_io io;
+ unsigned int length;
+
+ data = raw_alloc_io_data(&io, (void __user *)value, false);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ ret = raw_process_ep0_io(dev, &io, data, false);
+ if (ret < 0)
+ goto free;
+
+ length = min(io.length, (unsigned int)ret);
+ if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
+ ret = -EFAULT;
+ else
+ ret = length;
+free:
+ kfree(data);
+ return ret;
+}
+
+static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (value)
+ return -EINVAL;
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (dev->ep0_urb_queued) {
+ dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
+ dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = usb_ep_set_halt(dev->gadget->ep0);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_set_halt returned %d\n", ret);
+
+ if (dev->ep0_in_pending)
+ dev->ep0_in_pending = false;
+ else
+ dev->ep0_out_pending = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0, i;
+ unsigned long flags;
+ struct usb_endpoint_descriptor *desc;
+ struct raw_ep *ep;
+
+ desc = memdup_user((void __user *)value, sizeof(*desc));
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ /*
+ * Endpoints with a maxpacket length of 0 can cause crashes in UDC
+ * drivers.
+ */
+ if (usb_endpoint_maxp(desc) == 0) {
+ dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_free;
+ }
+
+ for (i = 0; i < dev->eps_num; i++) {
+ ep = &dev->eps[i];
+ if (ep->state != STATE_EP_DISABLED)
+ continue;
+ if (ep->addr != usb_endpoint_num(desc) &&
+ ep->addr != USB_RAW_EP_ADDR_ANY)
+ continue;
+ if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
+ continue;
+ ep->ep->desc = desc;
+ ret = usb_ep_enable(ep->ep);
+ if (ret < 0) {
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_enable returned %d\n", ret);
+ goto out_free;
+ }
+ ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
+ if (!ep->req) {
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_alloc_request failed\n");
+ usb_ep_disable(ep->ep);
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ ep->state = STATE_EP_ENABLED;
+ ep->ep->driver_data = ep;
+ ret = i;
+ goto out_unlock;
+ }
+
+ dev_dbg(&dev->gadget->dev, "fail, no gadget endpoints available\n");
+ ret = -EBUSY;
+
+out_free:
+ kfree(desc);
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0, i = value;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (i < 0 || i >= dev->eps_num) {
+ dev_dbg(dev->dev, "fail, invalid endpoint\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (dev->eps[i].state == STATE_EP_DISABLED) {
+ dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (dev->eps[i].disabling) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, disable already in progress\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (dev->eps[i].urb_queued) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, waiting for urb completion\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ dev->eps[i].disabling = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ usb_ep_disable(dev->eps[i].ep);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
+ kfree(dev->eps[i].ep->desc);
+ dev->eps[i].state = STATE_EP_DISABLED;
+ dev->eps[i].disabling = false;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
+ unsigned long value, bool set, bool halt)
+{
+ int ret = 0, i = value;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (i < 0 || i >= dev->eps_num) {
+ dev_dbg(dev->dev, "fail, invalid endpoint\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (dev->eps[i].state == STATE_EP_DISABLED) {
+ dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (dev->eps[i].disabling) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, disable is in progress\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (dev->eps[i].urb_queued) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, waiting for urb completion\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, can't halt/wedge ISO endpoint\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (set && halt) {
+ ret = usb_ep_set_halt(dev->eps[i].ep);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_set_halt returned %d\n", ret);
+ } else if (!set && halt) {
+ ret = usb_ep_clear_halt(dev->eps[i].ep);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_clear_halt returned %d\n", ret);
+ } else if (set && !halt) {
+ ret = usb_ep_set_wedge(dev->eps[i].ep);
+ if (ret < 0)
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_set_wedge returned %d\n", ret);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
+ struct raw_dev *dev = r_ep->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (req->status)
+ r_ep->status = req->status;
+ else
+ r_ep->status = req->actual;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ complete((struct completion *)req->context);
+}
+
+static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ void *data, bool in)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct raw_ep *ep;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (io->ep >= dev->eps_num) {
+ dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ep = &dev->eps[io->ep];
+ if (ep->state != STATE_EP_ENABLED) {
+ dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (ep->disabling) {
+ dev_dbg(&dev->gadget->dev,
+ "fail, endpoint is already being disabled\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if (ep->urb_queued) {
+ dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) {
+ dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ep->dev = dev;
+ ep->req->context = &done;
+ ep->req->complete = gadget_ep_complete;
+ ep->req->buf = data;
+ ep->req->length = io->length;
+ ep->req->zero = usb_raw_io_flags_zero(io->flags);
+ ep->urb_queued = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
+ if (ret) {
+ dev_err(&dev->gadget->dev,
+ "fail, usb_ep_queue returned %d\n", ret);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+ goto out_done;
+ }
+
+ ret = wait_for_completion_interruptible(&done);
+ if (ret) {
+ dev_dbg(&dev->gadget->dev, "wait interrupted\n");
+ usb_ep_dequeue(ep->ep, ep->req);
+ wait_for_completion(&done);
+ spin_lock_irqsave(&dev->lock, flags);
+ goto out_done;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = ep->status;
+
+out_done:
+ ep->urb_queued = false;
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ char *data;
+ struct usb_raw_ep_io io;
+
+ data = raw_alloc_io_data(&io, (void __user *)value, true);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ ret = raw_process_ep_io(dev, &io, data, true);
+ kfree(data);
+ return ret;
+}
+
+static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ char *data;
+ struct usb_raw_ep_io io;
+ unsigned int length;
+
+ data = raw_alloc_io_data(&io, (void __user *)value, false);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ ret = raw_process_ep_io(dev, &io, data, false);
+ if (ret < 0)
+ goto free;
+
+ length = min(io.length, (unsigned int)ret);
+ if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
+ ret = -EFAULT;
+ else
+ ret = length;
+free:
+ kfree(data);
+ return ret;
+}
+
+static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (value)
+ return -EINVAL;
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ usb_gadget_vbus_draw(dev->gadget, 2 * value);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static void fill_ep_caps(struct usb_ep_caps *caps,
+ struct usb_raw_ep_caps *raw_caps)
+{
+ raw_caps->type_control = caps->type_control;
+ raw_caps->type_iso = caps->type_iso;
+ raw_caps->type_bulk = caps->type_bulk;
+ raw_caps->type_int = caps->type_int;
+ raw_caps->dir_in = caps->dir_in;
+ raw_caps->dir_out = caps->dir_out;
+}
+
+static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
+{
+ limits->maxpacket_limit = ep->maxpacket_limit;
+ limits->max_streams = ep->max_streams;
+}
+
+static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
+{
+ int ret = 0, i;
+ unsigned long flags;
+ struct usb_raw_eps_info *info;
+ struct raw_ep *ep;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state != STATE_DEV_RUNNING) {
+ dev_dbg(dev->dev, "fail, device is not running\n");
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ goto out_free;
+ }
+ if (!dev->gadget) {
+ dev_dbg(dev->dev, "fail, gadget is not bound\n");
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ goto out_free;
+ }
+
+ memset(info, 0, sizeof(*info));
+ for (i = 0; i < dev->eps_num; i++) {
+ ep = &dev->eps[i];
+ strscpy(&info->eps[i].name[0], ep->ep->name,
+ USB_RAW_EP_NAME_MAX);
+ info->eps[i].addr = ep->addr;
+ fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
+ fill_ep_limits(ep->ep, &info->eps[i].limits);
+ }
+ ret = dev->eps_num;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (copy_to_user((void __user *)value, info, sizeof(*info)))
+ ret = -EFAULT;
+
+out_free:
+ kfree(info);
+out:
+ return ret;
+}
+
+static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
+{
+ struct raw_dev *dev = fd->private_data;
+ int ret = 0;
+
+ if (!dev)
+ return -EBUSY;
+
+ switch (cmd) {
+ case USB_RAW_IOCTL_INIT:
+ ret = raw_ioctl_init(dev, value);
+ break;
+ case USB_RAW_IOCTL_RUN:
+ ret = raw_ioctl_run(dev, value);
+ break;
+ case USB_RAW_IOCTL_EVENT_FETCH:
+ ret = raw_ioctl_event_fetch(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP0_WRITE:
+ ret = raw_ioctl_ep0_write(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP0_READ:
+ ret = raw_ioctl_ep0_read(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP_ENABLE:
+ ret = raw_ioctl_ep_enable(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP_DISABLE:
+ ret = raw_ioctl_ep_disable(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP_WRITE:
+ ret = raw_ioctl_ep_write(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP_READ:
+ ret = raw_ioctl_ep_read(dev, value);
+ break;
+ case USB_RAW_IOCTL_CONFIGURE:
+ ret = raw_ioctl_configure(dev, value);
+ break;
+ case USB_RAW_IOCTL_VBUS_DRAW:
+ ret = raw_ioctl_vbus_draw(dev, value);
+ break;
+ case USB_RAW_IOCTL_EPS_INFO:
+ ret = raw_ioctl_eps_info(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP0_STALL:
+ ret = raw_ioctl_ep0_stall(dev, value);
+ break;
+ case USB_RAW_IOCTL_EP_SET_HALT:
+ ret = raw_ioctl_ep_set_clear_halt_wedge(
+ dev, value, true, true);
+ break;
+ case USB_RAW_IOCTL_EP_CLEAR_HALT:
+ ret = raw_ioctl_ep_set_clear_halt_wedge(
+ dev, value, false, true);
+ break;
+ case USB_RAW_IOCTL_EP_SET_WEDGE:
+ ret = raw_ioctl_ep_set_clear_halt_wedge(
+ dev, value, true, false);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*----------------------------------------------------------------------*/
+
+static const struct file_operations raw_fops = {
+ .open = raw_open,
+ .unlocked_ioctl = raw_ioctl,
+ .compat_ioctl = raw_ioctl,
+ .release = raw_release,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice raw_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DRIVER_NAME,
+ .fops = &raw_fops,
+};
+
+module_misc_device(raw_misc_device);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 797d6ace8994..3a7179e90f4e 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -441,11 +441,20 @@ config USB_GADGET_XILINX
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
+config USB_MAX3420_UDC
+ tristate "MAX3420 (USB-over-SPI) support"
+ depends on SPI
+ help
+ The Maxim MAX3420 chip supports USB2.0 full-speed peripheral mode.
+ The MAX3420 is run by SPI interface, and hence the dependency.
+
+ To compile this driver as a module, choose M here: the module will
+ be called max3420_udc
+
config USB_TEGRA_XUDC
tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PHY_TEGRA_XUSB
- select USB_ROLE_SWITCH
help
Enables NVIDIA Tegra USB 3.0 device mode controller driver.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f6777e654a8e..f5a7ce28aecd 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o
obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/
obj-$(CONFIG_USB_BDC_UDC) += bdc/
+obj-$(CONFIG_USB_MAX3420_UDC) += max3420_udc.o
diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h
index dfdef6a28904..0262383f8c79 100644
--- a/drivers/usb/gadget/udc/amd5536udc.h
+++ b/drivers/usb/gadget/udc/amd5536udc.h
@@ -440,7 +440,7 @@ struct udc_ep_regs {
/* endpoint data descriptor pointer */
u32 desptr;
- /* reserverd */
+ /* reserved */
u32 reserved;
/* write/read confirmation */
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index bfd1c9e80a1f..80685e4306f3 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -202,7 +202,7 @@ MODULE_DEVICE_TABLE(pci, pci_id);
/* PCI functions */
static struct pci_driver udc_pci_driver = {
- .name = (char *) name,
+ .name = name,
.id_table = pci_id,
.probe = udc_pci_probe,
.remove = udc_pci_remove,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
index 83ba8a2eb6af..605500b19cf3 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
+++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
@@ -4,5 +4,5 @@ config USB_ASPEED_VHUB
depends on ARCH_ASPEED || COMPILE_TEST
depends on USB_LIBCOMPOSITE
help
- USB peripheral controller for the Aspeed AST2500 family
- SoCs supporting the "vHub" functionality and USB2.0
+ USB peripheral controller for the Aspeed AST2400, AST2500 and
+ AST2600 family SoCs supporting the "vHub" functionality and USB2.0
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index 90b134d5dca9..f8d35dd60c34 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -99,7 +99,7 @@ static irqreturn_t ast_vhub_irq(int irq, void *data)
{
struct ast_vhub *vhub = data;
irqreturn_t iret = IRQ_NONE;
- u32 istat;
+ u32 i, istat;
/* Stale interrupt while tearing down */
if (!vhub->ep0_bufs)
@@ -121,10 +121,10 @@ static irqreturn_t ast_vhub_irq(int irq, void *data)
/* Handle generic EPs first */
if (istat & VHUB_IRQ_EP_POOL_ACK_STALL) {
- u32 i, ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR);
+ u32 ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR);
writel(ep_acks, vhub->regs + AST_VHUB_EP_ACK_ISR);
- for (i = 0; ep_acks && i < AST_VHUB_NUM_GEN_EPs; i++) {
+ for (i = 0; ep_acks && i < vhub->max_epns; i++) {
u32 mask = VHUB_EP_IRQ(i);
if (ep_acks & mask) {
ast_vhub_epn_ack_irq(&vhub->epns[i]);
@@ -134,21 +134,11 @@ static irqreturn_t ast_vhub_irq(int irq, void *data)
}
/* Handle device interrupts */
- if (istat & (VHUB_IRQ_DEVICE1 |
- VHUB_IRQ_DEVICE2 |
- VHUB_IRQ_DEVICE3 |
- VHUB_IRQ_DEVICE4 |
- VHUB_IRQ_DEVICE5)) {
- if (istat & VHUB_IRQ_DEVICE1)
- ast_vhub_dev_irq(&vhub->ports[0].dev);
- if (istat & VHUB_IRQ_DEVICE2)
- ast_vhub_dev_irq(&vhub->ports[1].dev);
- if (istat & VHUB_IRQ_DEVICE3)
- ast_vhub_dev_irq(&vhub->ports[2].dev);
- if (istat & VHUB_IRQ_DEVICE4)
- ast_vhub_dev_irq(&vhub->ports[3].dev);
- if (istat & VHUB_IRQ_DEVICE5)
- ast_vhub_dev_irq(&vhub->ports[4].dev);
+ for (i = 0; i < vhub->max_ports; i++) {
+ u32 dev_mask = VHUB_IRQ_DEVICE1 << i;
+
+ if (istat & dev_mask)
+ ast_vhub_dev_irq(&vhub->ports[i].dev);
}
/* Handle top-level vHub EP0 interrupts */
@@ -182,7 +172,7 @@ static irqreturn_t ast_vhub_irq(int irq, void *data)
void ast_vhub_init_hw(struct ast_vhub *vhub)
{
- u32 ctrl;
+ u32 ctrl, port_mask, epn_mask;
UDCDBG(vhub,"(Re)Starting HW ...\n");
@@ -222,15 +212,20 @@ void ast_vhub_init_hw(struct ast_vhub *vhub)
}
/* Reset all devices */
- writel(VHUB_SW_RESET_ALL, vhub->regs + AST_VHUB_SW_RESET);
+ port_mask = GENMASK(vhub->max_ports, 1);
+ writel(VHUB_SW_RESET_ROOT_HUB |
+ VHUB_SW_RESET_DMA_CONTROLLER |
+ VHUB_SW_RESET_EP_POOL |
+ port_mask, vhub->regs + AST_VHUB_SW_RESET);
udelay(1);
writel(0, vhub->regs + AST_VHUB_SW_RESET);
/* Disable and cleanup EP ACK/NACK interrupts */
+ epn_mask = GENMASK(vhub->max_epns - 1, 0);
writel(0, vhub->regs + AST_VHUB_EP_ACK_IER);
writel(0, vhub->regs + AST_VHUB_EP_NACK_IER);
- writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_ACK_ISR);
- writel(VHUB_EP_IRQ_ALL, vhub->regs + AST_VHUB_EP_NACK_ISR);
+ writel(epn_mask, vhub->regs + AST_VHUB_EP_ACK_ISR);
+ writel(epn_mask, vhub->regs + AST_VHUB_EP_NACK_ISR);
/* Default settings for EP0, enable HW hub EP1 */
writel(0, vhub->regs + AST_VHUB_EP0_CTRL);
@@ -273,7 +268,7 @@ static int ast_vhub_remove(struct platform_device *pdev)
return 0;
/* Remove devices */
- for (i = 0; i < AST_VHUB_NUM_PORTS; i++)
+ for (i = 0; i < vhub->max_ports; i++)
ast_vhub_del_dev(&vhub->ports[i].dev);
spin_lock_irqsave(&vhub->lock, flags);
@@ -295,7 +290,7 @@ static int ast_vhub_remove(struct platform_device *pdev)
if (vhub->ep0_bufs)
dma_free_coherent(&pdev->dev,
AST_VHUB_EP0_MAX_PACKET *
- (AST_VHUB_NUM_PORTS + 1),
+ (vhub->max_ports + 1),
vhub->ep0_bufs,
vhub->ep0_bufs_dma);
vhub->ep0_bufs = NULL;
@@ -309,11 +304,32 @@ static int ast_vhub_probe(struct platform_device *pdev)
struct ast_vhub *vhub;
struct resource *res;
int i, rc = 0;
+ const struct device_node *np = pdev->dev.of_node;
vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL);
if (!vhub)
return -ENOMEM;
+ rc = of_property_read_u32(np, "aspeed,vhub-downstream-ports",
+ &vhub->max_ports);
+ if (rc < 0)
+ vhub->max_ports = AST_VHUB_NUM_PORTS;
+
+ vhub->ports = devm_kcalloc(&pdev->dev, vhub->max_ports,
+ sizeof(*vhub->ports), GFP_KERNEL);
+ if (!vhub->ports)
+ return -ENOMEM;
+
+ rc = of_property_read_u32(np, "aspeed,vhub-generic-endpoints",
+ &vhub->max_epns);
+ if (rc < 0)
+ vhub->max_epns = AST_VHUB_NUM_GEN_EPs;
+
+ vhub->epns = devm_kcalloc(&pdev->dev, vhub->max_epns,
+ sizeof(*vhub->epns), GFP_KERNEL);
+ if (!vhub->epns)
+ return -ENOMEM;
+
spin_lock_init(&vhub->lock);
vhub->pdev = pdev;
@@ -366,7 +382,7 @@ static int ast_vhub_probe(struct platform_device *pdev)
*/
vhub->ep0_bufs = dma_alloc_coherent(&pdev->dev,
AST_VHUB_EP0_MAX_PACKET *
- (AST_VHUB_NUM_PORTS + 1),
+ (vhub->max_ports + 1),
&vhub->ep0_bufs_dma, GFP_KERNEL);
if (!vhub->ep0_bufs) {
dev_err(&pdev->dev, "Failed to allocate EP0 DMA buffers\n");
@@ -380,7 +396,7 @@ static int ast_vhub_probe(struct platform_device *pdev)
ast_vhub_init_ep0(vhub, &vhub->ep0, NULL);
/* Init devices */
- for (i = 0; i < AST_VHUB_NUM_PORTS && rc == 0; i++)
+ for (i = 0; i < vhub->max_ports && rc == 0; i++)
rc = ast_vhub_init_dev(vhub, i);
if (rc)
goto err;
@@ -407,6 +423,9 @@ static const struct of_device_id ast_vhub_dt_ids[] = {
{
.compatible = "aspeed,ast2500-usb-vhub",
},
+ {
+ .compatible = "aspeed,ast2600-usb-vhub",
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ast_vhub_dt_ids);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index 4008e7a51188..d268306a7bfe 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -77,7 +77,7 @@ static void ast_vhub_dev_enable(struct ast_vhub_dev *d)
writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA);
/* Clear stall on all EPs */
- for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) {
+ for (i = 0; i < d->max_epns; i++) {
struct ast_vhub_ep *ep = d->epns[i];
if (ep && (ep->epn.stalled || ep->epn.wedged)) {
@@ -137,7 +137,7 @@ static int ast_vhub_ep_feature(struct ast_vhub_dev *d,
is_set ? "SET" : "CLEAR", ep_num, wValue);
if (ep_num == 0)
return std_req_complete;
- if (ep_num >= AST_VHUB_NUM_GEN_EPs || !d->epns[ep_num - 1])
+ if (ep_num >= d->max_epns || !d->epns[ep_num - 1])
return std_req_stall;
if (wValue != USB_ENDPOINT_HALT)
return std_req_driver;
@@ -181,7 +181,7 @@ static int ast_vhub_ep_status(struct ast_vhub_dev *d,
DDBG(d, "GET_STATUS(ep%d)\n", ep_num);
- if (ep_num >= AST_VHUB_NUM_GEN_EPs)
+ if (ep_num >= d->max_epns)
return std_req_stall;
if (ep_num != 0) {
ep = d->epns[ep_num - 1];
@@ -299,7 +299,7 @@ static void ast_vhub_dev_nuke(struct ast_vhub_dev *d)
{
unsigned int i;
- for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) {
+ for (i = 0; i < d->max_epns; i++) {
if (!d->epns[i])
continue;
ast_vhub_nuke(d->epns[i], -ESHUTDOWN);
@@ -416,10 +416,10 @@ static struct usb_ep *ast_vhub_udc_match_ep(struct usb_gadget *gadget,
* that will allow the generic code to use our
* assigned address.
*/
- for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++)
+ for (i = 0; i < d->max_epns; i++)
if (d->epns[i] == NULL)
break;
- if (i >= AST_VHUB_NUM_GEN_EPs)
+ if (i >= d->max_epns)
return NULL;
addr = i + 1;
@@ -526,6 +526,7 @@ void ast_vhub_del_dev(struct ast_vhub_dev *d)
usb_del_gadget_udc(&d->gadget);
device_unregister(d->port_dev);
+ kfree(d->epns);
}
static void ast_vhub_dev_release(struct device *dev)
@@ -547,13 +548,24 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
ast_vhub_init_ep0(vhub, &d->ep0, d);
/*
+ * A USB device can have up to 30 endpoints besides control
+ * endpoint 0.
+ */
+ d->max_epns = min_t(u32, vhub->max_epns, 30);
+ d->epns = kcalloc(d->max_epns, sizeof(*d->epns), GFP_KERNEL);
+ if (!d->epns)
+ return -ENOMEM;
+
+ /*
* The UDC core really needs us to have separate and uniquely
* named "parent" devices for each port so we create a sub device
* here for that purpose
*/
d->port_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!d->port_dev)
- return -ENOMEM;
+ if (!d->port_dev) {
+ rc = -ENOMEM;
+ goto fail_alloc;
+ }
device_initialize(d->port_dev);
d->port_dev->release = ast_vhub_dev_release;
d->port_dev->parent = parent;
@@ -584,6 +596,8 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
device_del(d->port_dev);
fail_add:
put_device(d->port_dev);
+ fail_alloc:
+ kfree(d->epns);
return rc;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 7475c74aa5c5..0bd6b20435b8 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -800,10 +800,10 @@ struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
/* Find a free one (no device) */
spin_lock_irqsave(&vhub->lock, flags);
- for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++)
+ for (i = 0; i < vhub->max_epns; i++)
if (vhub->epns[i].dev == NULL)
break;
- if (i >= AST_VHUB_NUM_GEN_EPs) {
+ if (i >= vhub->max_epns) {
spin_unlock_irqrestore(&vhub->lock, flags);
return NULL;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 19b3517e04c0..6e565c3dbb5b 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -93,11 +93,7 @@ static void ast_vhub_patch_dev_desc_usb1(struct usb_device_descriptor *desc)
USB_DT_INTERFACE_SIZE + \
USB_DT_ENDPOINT_SIZE)
-static const struct ast_vhub_full_cdesc {
- struct usb_config_descriptor cfg;
- struct usb_interface_descriptor intf;
- struct usb_endpoint_descriptor ep;
-} __attribute__ ((packed)) ast_vhub_conf_desc = {
+static const struct ast_vhub_full_cdesc ast_vhub_conf_desc = {
.cfg = {
.bLength = USB_DT_CONFIG_SIZE,
.bDescriptorType = USB_DT_CONFIG,
@@ -266,6 +262,7 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
u8 desc_type, u16 len)
{
size_t dsize;
+ struct ast_vhub *vhub = ep->vhub;
EPDBG(ep, "GET_DESCRIPTOR(type:%d)\n", desc_type);
@@ -281,20 +278,20 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
switch(desc_type) {
case USB_DT_DEVICE:
dsize = USB_DT_DEVICE_SIZE;
- memcpy(ep->buf, &ast_vhub_dev_desc, dsize);
- BUILD_BUG_ON(dsize > sizeof(ast_vhub_dev_desc));
+ memcpy(ep->buf, &vhub->vhub_dev_desc, dsize);
+ BUILD_BUG_ON(dsize > sizeof(vhub->vhub_dev_desc));
BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
case USB_DT_CONFIG:
dsize = AST_VHUB_CONF_DESC_SIZE;
- memcpy(ep->buf, &ast_vhub_conf_desc, dsize);
- BUILD_BUG_ON(dsize > sizeof(ast_vhub_conf_desc));
+ memcpy(ep->buf, &vhub->vhub_conf_desc, dsize);
+ BUILD_BUG_ON(dsize > sizeof(vhub->vhub_conf_desc));
BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
case USB_DT_HUB:
dsize = AST_VHUB_HUB_DESC_SIZE;
- memcpy(ep->buf, &ast_vhub_hub_desc, dsize);
- BUILD_BUG_ON(dsize > sizeof(ast_vhub_hub_desc));
+ memcpy(ep->buf, &vhub->vhub_hub_desc, dsize);
+ BUILD_BUG_ON(dsize > sizeof(vhub->vhub_hub_desc));
BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
default:
@@ -317,7 +314,8 @@ static int ast_vhub_rep_string(struct ast_vhub_ep *ep,
u8 string_id, u16 lang_id,
u16 len)
{
- int rc = usb_gadget_get_string (&ast_vhub_strings, string_id, ep->buf);
+ int rc = usb_gadget_get_string(&ep->vhub->vhub_str_desc,
+ string_id, ep->buf);
/*
* This should never happen unless we put too big strings in
@@ -504,7 +502,7 @@ static void ast_vhub_wake_work(struct work_struct *work)
* we let the normal host wake path deal with it later.
*/
spin_lock_irqsave(&vhub->lock, flags);
- for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
+ for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
@@ -587,7 +585,7 @@ static enum std_req_rc ast_vhub_set_port_feature(struct ast_vhub_ep *ep,
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_port *p;
- if (port == 0 || port > AST_VHUB_NUM_PORTS)
+ if (port == 0 || port > vhub->max_ports)
return std_req_stall;
port--;
p = &vhub->ports[port];
@@ -630,7 +628,7 @@ static enum std_req_rc ast_vhub_clr_port_feature(struct ast_vhub_ep *ep,
struct ast_vhub *vhub = ep->vhub;
struct ast_vhub_port *p;
- if (port == 0 || port > AST_VHUB_NUM_PORTS)
+ if (port == 0 || port > vhub->max_ports)
return std_req_stall;
port--;
p = &vhub->ports[port];
@@ -676,7 +674,7 @@ static enum std_req_rc ast_vhub_get_port_stat(struct ast_vhub_ep *ep,
struct ast_vhub *vhub = ep->vhub;
u16 stat, chg;
- if (port == 0 || port > AST_VHUB_NUM_PORTS)
+ if (port == 0 || port > vhub->max_ports)
return std_req_stall;
port--;
@@ -757,7 +755,7 @@ void ast_vhub_hub_suspend(struct ast_vhub *vhub)
* Forward to unsuspended ports without changing
* their connection status.
*/
- for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
+ for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
@@ -780,7 +778,7 @@ void ast_vhub_hub_resume(struct ast_vhub *vhub)
* Forward to unsuspended ports without changing
* their connection status.
*/
- for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
+ for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
if (!(p->status & USB_PORT_STAT_SUSPEND))
@@ -814,7 +812,7 @@ void ast_vhub_hub_reset(struct ast_vhub *vhub)
* Clear all port status, disable gadgets and "suspend"
* them. They will be woken up by a port reset.
*/
- for (i = 0; i < AST_VHUB_NUM_PORTS; i++) {
+ for (i = 0; i < vhub->max_ports; i++) {
struct ast_vhub_port *p = &vhub->ports[i];
/* Only keep the connected flag */
@@ -834,9 +832,31 @@ void ast_vhub_hub_reset(struct ast_vhub *vhub)
writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
}
+static void ast_vhub_init_desc(struct ast_vhub *vhub)
+{
+ /* Initialize vhub Device Descriptor. */
+ memcpy(&vhub->vhub_dev_desc, &ast_vhub_dev_desc,
+ sizeof(vhub->vhub_dev_desc));
+
+ /* Initialize vhub Configuration Descriptor. */
+ memcpy(&vhub->vhub_conf_desc, &ast_vhub_conf_desc,
+ sizeof(vhub->vhub_conf_desc));
+
+ /* Initialize vhub Hub Descriptor. */
+ memcpy(&vhub->vhub_hub_desc, &ast_vhub_hub_desc,
+ sizeof(vhub->vhub_hub_desc));
+ vhub->vhub_hub_desc.bNbrPorts = vhub->max_ports;
+
+ /* Initialize vhub String Descriptors. */
+ memcpy(&vhub->vhub_str_desc, &ast_vhub_strings,
+ sizeof(vhub->vhub_str_desc));
+}
+
void ast_vhub_init_hub(struct ast_vhub *vhub)
{
vhub->speed = USB_SPEED_UNKNOWN;
INIT_WORK(&vhub->wake_work, ast_vhub_wake_work);
+
+ ast_vhub_init_desc(vhub);
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 761919e220d3..fac79ef6d669 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -2,6 +2,9 @@
#ifndef __ASPEED_VHUB_H
#define __ASPEED_VHUB_H
+#include <linux/usb.h>
+#include <linux/usb/ch11.h>
+
/*****************************
* *
* VHUB register definitions *
@@ -76,17 +79,9 @@
#define VHUB_SW_RESET_DEVICE2 (1 << 2)
#define VHUB_SW_RESET_DEVICE1 (1 << 1)
#define VHUB_SW_RESET_ROOT_HUB (1 << 0)
-#define VHUB_SW_RESET_ALL (VHUB_SW_RESET_EP_POOL | \
- VHUB_SW_RESET_DMA_CONTROLLER | \
- VHUB_SW_RESET_DEVICE5 | \
- VHUB_SW_RESET_DEVICE4 | \
- VHUB_SW_RESET_DEVICE3 | \
- VHUB_SW_RESET_DEVICE2 | \
- VHUB_SW_RESET_DEVICE1 | \
- VHUB_SW_RESET_ROOT_HUB)
+
/* EP ACK/NACK IRQ masks */
#define VHUB_EP_IRQ(n) (1 << (n))
-#define VHUB_EP_IRQ_ALL 0x7fff /* 15 EPs */
/* USB status reg */
#define VHUB_USBSTS_HISPEED (1 << 27)
@@ -210,6 +205,11 @@
* *
****************************************/
+/*
+ * AST_VHUB_NUM_GEN_EPs and AST_VHUB_NUM_PORTS are kept to avoid breaking
+ * existing AST2400/AST2500 platforms. AST2600 and future vhub revisions
+ * should define number of downstream ports and endpoints in device tree.
+ */
#define AST_VHUB_NUM_GEN_EPs 15 /* Generic non-0 EPs */
#define AST_VHUB_NUM_PORTS 5 /* vHub ports */
#define AST_VHUB_EP0_MAX_PACKET 64 /* EP0's max packet size */
@@ -312,7 +312,7 @@ struct ast_vhub_ep {
/* Registers */
void __iomem *regs;
- /* Index in global pool (0..14) */
+ /* Index in global pool (zero-based) */
unsigned int g_idx;
/* DMA Descriptors */
@@ -342,7 +342,7 @@ struct ast_vhub_dev {
struct ast_vhub *vhub;
void __iomem *regs;
- /* Device index (0...4) and name string */
+ /* Device index (zero-based) and name string */
unsigned int index;
const char *name;
@@ -358,7 +358,8 @@ struct ast_vhub_dev {
/* Endpoint structures */
struct ast_vhub_ep ep0;
- struct ast_vhub_ep *epns[AST_VHUB_NUM_GEN_EPs];
+ struct ast_vhub_ep **epns;
+ u32 max_epns;
};
#define to_ast_dev(__g) container_of(__g, struct ast_vhub_dev, gadget)
@@ -373,6 +374,12 @@ struct ast_vhub_port {
struct ast_vhub_dev dev;
};
+struct ast_vhub_full_cdesc {
+ struct usb_config_descriptor cfg;
+ struct usb_interface_descriptor intf;
+ struct usb_endpoint_descriptor ep;
+} __packed;
+
/* Global vhub structure */
struct ast_vhub {
struct platform_device *pdev;
@@ -393,10 +400,12 @@ struct ast_vhub {
bool ep1_stalled : 1;
/* Per-port info */
- struct ast_vhub_port ports[AST_VHUB_NUM_PORTS];
+ struct ast_vhub_port *ports;
+ u32 max_ports;
/* Generic EP data structures */
- struct ast_vhub_ep epns[AST_VHUB_NUM_GEN_EPs];
+ struct ast_vhub_ep *epns;
+ u32 max_epns;
/* Upstream bus is suspended ? */
bool suspended : 1;
@@ -409,6 +418,12 @@ struct ast_vhub {
/* Upstream bus speed captured at bus reset */
unsigned int speed;
+
+ /* Standard USB Descriptors of the vhub. */
+ struct usb_device_descriptor vhub_dev_desc;
+ struct ast_vhub_full_cdesc vhub_conf_desc;
+ struct usb_hub_descriptor vhub_hub_desc;
+ struct usb_gadget_strings vhub_str_desc;
};
/* Standard request handlers result codes */
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 1b2b548c59a0..eede5cedacb4 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -2021,7 +2021,7 @@ static struct platform_driver at91_udc_driver = {
.suspend = at91udc_suspend,
.resume = at91udc_resume,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
.of_match_table = at91_udc_dt_ids,
},
};
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 6e0432141c40..b771a854e29c 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -185,7 +185,7 @@ static int regs_dbg_release(struct inode *inode, struct file *file)
return 0;
}
-const struct file_operations queue_dbg_fops = {
+static const struct file_operations queue_dbg_fops = {
.owner = THIS_MODULE,
.open = queue_dbg_open,
.llseek = no_llseek,
@@ -193,7 +193,7 @@ const struct file_operations queue_dbg_fops = {
.release = queue_dbg_release,
};
-const struct file_operations regs_dbg_fops = {
+static const struct file_operations regs_dbg_fops = {
.owner = THIS_MODULE,
.open = regs_dbg_open,
.llseek = generic_file_llseek,
@@ -1951,10 +1951,10 @@ static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
usba_start(udc);
} else {
udc->suspended = false;
- usba_stop(udc);
-
if (udc->driver->disconnect)
udc->driver->disconnect(&udc->gadget);
+
+ usba_stop(udc);
}
udc->vbus_prev = vbus;
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index a4d9b5e1e50e..d49c6dc1082d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
{
struct bdc *bdc = ep->bdc;
- if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
+ if (req == NULL)
return;
dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 4c9d1e49d5ed..6e3e3ebf715f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1134,7 +1134,7 @@ static struct platform_driver dummy_udc_driver = {
.suspend = dummy_udc_suspend,
.resume = dummy_udc_resume,
.driver = {
- .name = (char *) gadget_name,
+ .name = gadget_name,
},
};
@@ -2720,7 +2720,7 @@ static struct platform_driver dummy_hcd_driver = {
.suspend = dummy_hcd_suspend,
.resume = dummy_hcd_resume,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
},
};
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 21f3e6c4e4d6..d6ca50f01985 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1199,7 +1199,7 @@ err:
static struct platform_driver fotg210_driver = {
.driver = {
- .name = (char *)udc_name,
+ .name = udc_name,
},
.probe = fotg210_udc_probe,
.remove = fotg210_udc_remove,
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index ec6eda426223..febabde62f71 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -53,7 +53,6 @@
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name[] = "fsl-usb2-udc";
-static const char driver_desc[] = DRIVER_DESC;
static struct usb_dr_device __iomem *dr_regs;
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 00e3f66836a9..9af8b415f303 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1507,7 +1507,7 @@ clean_up:
static struct platform_driver fusb300_driver = {
.remove = fusb300_remove,
.driver = {
- .name = (char *) udc_name,
+ .name = udc_name,
},
};
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 4a46f661d0e4..91dcb1995c27 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1844,7 +1844,7 @@ static const struct pci_device_id pci_ids[] = { {
MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver goku_pci_driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
.id_table = pci_ids,
.probe = goku_probe,
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index d14b2bb3f67c..cb997b82c008 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3267,7 +3267,7 @@ static struct platform_driver lpc32xx_udc_driver = {
.suspend = lpc32xx_udc_suspend,
.resume = lpc32xx_udc_resume,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
.of_match_table = of_match_ptr(lpc32xx_udc_of_match),
},
};
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index a8288df6aadf..75d16a8902e6 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1691,7 +1691,7 @@ clean_up:
static struct platform_driver m66592_driver = {
.remove = m66592_remove,
.driver = {
- .name = (char *) udc_name,
+ .name = udc_name,
},
};
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
new file mode 100644
index 000000000000..8fbc083b6732
--- /dev/null
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -0,0 +1,1331 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MAX3420 Device Controller driver for USB.
+ *
+ * Author: Jaswinder Singh Brar <jaswinder.singh@linaro.org>
+ * (C) Copyright 2019-2020 Linaro Ltd
+ *
+ * Based on:
+ * o MAX3420E datasheet
+ * http://datasheets.maximintegrated.com/en/ds/MAX3420E.pdf
+ * o MAX342{0,1}E Programming Guides
+ * https://pdfserv.maximintegrated.com/en/an/AN3598.pdf
+ * https://pdfserv.maximintegrated.com/en/an/AN3785.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/prefetch.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio/consumer.h>
+
+#define MAX3420_MAX_EPS 4
+#define MAX3420_EP_MAX_PACKET 64 /* Same for all Endpoints */
+#define MAX3420_EPNAME_SIZE 16 /* Buffer size for endpoint name */
+
+#define MAX3420_ACKSTAT BIT(0)
+
+#define MAX3420_SPI_DIR_RD 0 /* read register from MAX3420 */
+#define MAX3420_SPI_DIR_WR 1 /* write register to MAX3420 */
+
+/* SPI commands: */
+#define MAX3420_SPI_DIR_SHIFT 1
+#define MAX3420_SPI_REG_SHIFT 3
+
+#define MAX3420_REG_EP0FIFO 0
+#define MAX3420_REG_EP1FIFO 1
+#define MAX3420_REG_EP2FIFO 2
+#define MAX3420_REG_EP3FIFO 3
+#define MAX3420_REG_SUDFIFO 4
+#define MAX3420_REG_EP0BC 5
+#define MAX3420_REG_EP1BC 6
+#define MAX3420_REG_EP2BC 7
+#define MAX3420_REG_EP3BC 8
+
+#define MAX3420_REG_EPSTALLS 9
+ #define ACKSTAT BIT(6)
+ #define STLSTAT BIT(5)
+ #define STLEP3IN BIT(4)
+ #define STLEP2IN BIT(3)
+ #define STLEP1OUT BIT(2)
+ #define STLEP0OUT BIT(1)
+ #define STLEP0IN BIT(0)
+
+#define MAX3420_REG_CLRTOGS 10
+ #define EP3DISAB BIT(7)
+ #define EP2DISAB BIT(6)
+ #define EP1DISAB BIT(5)
+ #define CTGEP3IN BIT(4)
+ #define CTGEP2IN BIT(3)
+ #define CTGEP1OUT BIT(2)
+
+#define MAX3420_REG_EPIRQ 11
+#define MAX3420_REG_EPIEN 12
+ #define SUDAVIRQ BIT(5)
+ #define IN3BAVIRQ BIT(4)
+ #define IN2BAVIRQ BIT(3)
+ #define OUT1DAVIRQ BIT(2)
+ #define OUT0DAVIRQ BIT(1)
+ #define IN0BAVIRQ BIT(0)
+
+#define MAX3420_REG_USBIRQ 13
+#define MAX3420_REG_USBIEN 14
+ #define OSCOKIRQ BIT(0)
+ #define RWUDNIRQ BIT(1)
+ #define BUSACTIRQ BIT(2)
+ #define URESIRQ BIT(3)
+ #define SUSPIRQ BIT(4)
+ #define NOVBUSIRQ BIT(5)
+ #define VBUSIRQ BIT(6)
+ #define URESDNIRQ BIT(7)
+
+#define MAX3420_REG_USBCTL 15
+ #define HOSCSTEN BIT(7)
+ #define VBGATE BIT(6)
+ #define CHIPRES BIT(5)
+ #define PWRDOWN BIT(4)
+ #define CONNECT BIT(3)
+ #define SIGRWU BIT(2)
+
+#define MAX3420_REG_CPUCTL 16
+ #define IE BIT(0)
+
+#define MAX3420_REG_PINCTL 17
+ #define EP3INAK BIT(7)
+ #define EP2INAK BIT(6)
+ #define EP0INAK BIT(5)
+ #define FDUPSPI BIT(4)
+ #define INTLEVEL BIT(3)
+ #define POSINT BIT(2)
+ #define GPXB BIT(1)
+ #define GPXA BIT(0)
+
+#define MAX3420_REG_REVISION 18
+
+#define MAX3420_REG_FNADDR 19
+ #define FNADDR_MASK 0x7f
+
+#define MAX3420_REG_IOPINS 20
+#define MAX3420_REG_IOPINS2 21
+#define MAX3420_REG_GPINIRQ 22
+#define MAX3420_REG_GPINIEN 23
+#define MAX3420_REG_GPINPOL 24
+#define MAX3420_REG_HIRQ 25
+#define MAX3420_REG_HIEN 26
+#define MAX3420_REG_MODE 27
+#define MAX3420_REG_PERADDR 28
+#define MAX3420_REG_HCTL 29
+#define MAX3420_REG_HXFR 30
+#define MAX3420_REG_HRSL 31
+
+#define ENABLE_IRQ BIT(0)
+#define IOPIN_UPDATE BIT(1)
+#define REMOTE_WAKEUP BIT(2)
+#define CONNECT_HOST GENMASK(4, 3)
+#define HCONNECT (1 << 3)
+#define HDISCONNECT (3 << 3)
+#define UDC_START GENMASK(6, 5)
+#define START (1 << 5)
+#define STOP (3 << 5)
+#define ENABLE_EP GENMASK(8, 7)
+#define ENABLE (1 << 7)
+#define DISABLE (3 << 7)
+#define STALL_EP GENMASK(10, 9)
+#define STALL (1 << 9)
+#define UNSTALL (3 << 9)
+
+#define MAX3420_CMD(c) FIELD_PREP(GENMASK(7, 3), c)
+#define MAX3420_SPI_CMD_RD(c) (MAX3420_CMD(c) | (0 << 1))
+#define MAX3420_SPI_CMD_WR(c) (MAX3420_CMD(c) | (1 << 1))
+
+struct max3420_req {
+ struct usb_request usb_req;
+ struct list_head queue;
+ struct max3420_ep *ep;
+};
+
+struct max3420_ep {
+ struct usb_ep ep_usb;
+ struct max3420_udc *udc;
+ struct list_head queue;
+ char name[MAX3420_EPNAME_SIZE];
+ unsigned int maxpacket;
+ spinlock_t lock;
+ int halted;
+ u32 todo;
+ int id;
+};
+
+struct max3420_udc {
+ struct usb_gadget gadget;
+ struct max3420_ep ep[MAX3420_MAX_EPS];
+ struct usb_gadget_driver *driver;
+ struct task_struct *thread_task;
+ int remote_wkp, is_selfpowered;
+ bool vbus_active, softconnect;
+ struct usb_ctrlrequest setup;
+ struct mutex spi_bus_mutex;
+ struct max3420_req ep0req;
+ struct spi_device *spi;
+ struct device *dev;
+ spinlock_t lock;
+ bool suspended;
+ u8 ep0buf[64];
+ u32 todo;
+};
+
+#define to_max3420_req(r) container_of((r), struct max3420_req, usb_req)
+#define to_max3420_ep(e) container_of((e), struct max3420_ep, ep_usb)
+#define to_udc(g) container_of((g), struct max3420_udc, gadget)
+
+#define DRIVER_DESC "MAX3420 USB Device-Mode Driver"
+static const char driver_name[] = "max3420-udc";
+
+/* Control endpoint configuration.*/
+static const struct usb_endpoint_descriptor ep0_desc = {
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(MAX3420_EP_MAX_PACKET),
+};
+
+static void spi_ack_ctrl(struct max3420_udc *udc)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 txdata[1];
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ txdata[0] = MAX3420_ACKSTAT;
+ transfer.tx_buf = txdata;
+ transfer.len = 1;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static u8 spi_rd8_ack(struct max3420_udc *udc, u8 reg, int actstat)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 txdata[2], rxdata[2];
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ txdata[0] = MAX3420_SPI_CMD_RD(reg) | (actstat ? MAX3420_ACKSTAT : 0);
+ transfer.tx_buf = txdata;
+ transfer.rx_buf = rxdata;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+
+ return rxdata[1];
+}
+
+static u8 spi_rd8(struct max3420_udc *udc, u8 reg)
+{
+ return spi_rd8_ack(udc, reg, 0);
+}
+
+static void spi_wr8_ack(struct max3420_udc *udc, u8 reg, u8 val, int actstat)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 txdata[2];
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ txdata[0] = MAX3420_SPI_CMD_WR(reg) | (actstat ? MAX3420_ACKSTAT : 0);
+ txdata[1] = val;
+
+ transfer.tx_buf = txdata;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static void spi_wr8(struct max3420_udc *udc, u8 reg, u8 val)
+{
+ spi_wr8_ack(udc, reg, val, 0);
+}
+
+static void spi_rd_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {};
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ local_buf[0] = MAX3420_SPI_CMD_RD(reg);
+ transfer.tx_buf = &local_buf[0];
+ transfer.rx_buf = &local_buf[0];
+ transfer.len = len + 1;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+
+ memcpy(buf, &local_buf[1], len);
+}
+
+static void spi_wr_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {};
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ local_buf[0] = MAX3420_SPI_CMD_WR(reg);
+ memcpy(&local_buf[1], buf, len);
+
+ transfer.tx_buf = local_buf;
+ transfer.len = len + 1;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static int spi_max3420_enable(struct max3420_ep *ep)
+{
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+ u8 epdis, epien;
+ int todo;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ todo = ep->todo & ENABLE_EP;
+ ep->todo &= ~ENABLE_EP;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (!todo || ep->id == 0)
+ return false;
+
+ epien = spi_rd8(udc, MAX3420_REG_EPIEN);
+ epdis = spi_rd8(udc, MAX3420_REG_CLRTOGS);
+
+ if (todo == ENABLE) {
+ epdis &= ~BIT(ep->id + 4);
+ epien |= BIT(ep->id + 1);
+ } else {
+ epdis |= BIT(ep->id + 4);
+ epien &= ~BIT(ep->id + 1);
+ }
+
+ spi_wr8(udc, MAX3420_REG_CLRTOGS, epdis);
+ spi_wr8(udc, MAX3420_REG_EPIEN, epien);
+
+ return true;
+}
+
+static int spi_max3420_stall(struct max3420_ep *ep)
+{
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+ u8 epstalls;
+ int todo;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ todo = ep->todo & STALL_EP;
+ ep->todo &= ~STALL_EP;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (!todo || ep->id == 0)
+ return false;
+
+ epstalls = spi_rd8(udc, MAX3420_REG_EPSTALLS);
+ if (todo == STALL) {
+ ep->halted = 1;
+ epstalls |= BIT(ep->id + 1);
+ } else {
+ u8 clrtogs;
+
+ ep->halted = 0;
+ epstalls &= ~BIT(ep->id + 1);
+ clrtogs = spi_rd8(udc, MAX3420_REG_CLRTOGS);
+ clrtogs |= BIT(ep->id + 1);
+ spi_wr8(udc, MAX3420_REG_CLRTOGS, clrtogs);
+ }
+ spi_wr8(udc, MAX3420_REG_EPSTALLS, epstalls | ACKSTAT);
+
+ return true;
+}
+
+static int spi_max3420_rwkup(struct max3420_udc *udc)
+{
+ unsigned long flags;
+ int wake_remote;
+ u8 usbctl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ wake_remote = udc->todo & REMOTE_WAKEUP;
+ udc->todo &= ~REMOTE_WAKEUP;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (!wake_remote || !udc->suspended)
+ return false;
+
+ /* Set Remote-WkUp Signal*/
+ usbctl = spi_rd8(udc, MAX3420_REG_USBCTL);
+ usbctl |= SIGRWU;
+ spi_wr8(udc, MAX3420_REG_USBCTL, usbctl);
+
+ msleep_interruptible(5);
+
+ /* Clear Remote-WkUp Signal*/
+ usbctl = spi_rd8(udc, MAX3420_REG_USBCTL);
+ usbctl &= ~SIGRWU;
+ spi_wr8(udc, MAX3420_REG_USBCTL, usbctl);
+
+ udc->suspended = false;
+
+ return true;
+}
+
+static void max3420_nuke(struct max3420_ep *ep, int status);
+static void __max3420_stop(struct max3420_udc *udc)
+{
+ u8 val;
+ int i;
+
+ /* clear all pending requests */
+ for (i = 1; i < MAX3420_MAX_EPS; i++)
+ max3420_nuke(&udc->ep[i], -ECONNRESET);
+
+ /* Disable IRQ to CPU */
+ spi_wr8(udc, MAX3420_REG_CPUCTL, 0);
+
+ val = spi_rd8(udc, MAX3420_REG_USBCTL);
+ val |= PWRDOWN;
+ if (udc->is_selfpowered)
+ val &= ~HOSCSTEN;
+ else
+ val |= HOSCSTEN;
+ spi_wr8(udc, MAX3420_REG_USBCTL, val);
+}
+
+static void __max3420_start(struct max3420_udc *udc)
+{
+ u8 val;
+
+ /* Need this delay if bus-powered,
+ * but even for self-powered it helps stability
+ */
+ msleep_interruptible(250);
+
+ /* configure SPI */
+ spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI);
+
+ /* Chip Reset */
+ spi_wr8(udc, MAX3420_REG_USBCTL, CHIPRES);
+ msleep_interruptible(5);
+ spi_wr8(udc, MAX3420_REG_USBCTL, 0);
+
+ /* Poll for OSC to stabilize */
+ while (1) {
+ val = spi_rd8(udc, MAX3420_REG_USBIRQ);
+ if (val & OSCOKIRQ)
+ break;
+ cond_resched();
+ }
+
+ /* Enable PULL-UP only when Vbus detected */
+ val = spi_rd8(udc, MAX3420_REG_USBCTL);
+ val |= VBGATE | CONNECT;
+ spi_wr8(udc, MAX3420_REG_USBCTL, val);
+
+ val = URESDNIRQ | URESIRQ;
+ if (udc->is_selfpowered)
+ val |= NOVBUSIRQ;
+ spi_wr8(udc, MAX3420_REG_USBIEN, val);
+
+ /* Enable only EP0 interrupts */
+ val = IN0BAVIRQ | OUT0DAVIRQ | SUDAVIRQ;
+ spi_wr8(udc, MAX3420_REG_EPIEN, val);
+
+ /* Enable IRQ to CPU */
+ spi_wr8(udc, MAX3420_REG_CPUCTL, IE);
+}
+
+static int max3420_start(struct max3420_udc *udc)
+{
+ unsigned long flags;
+ int todo;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ todo = udc->todo & UDC_START;
+ udc->todo &= ~UDC_START;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (!todo)
+ return false;
+
+ if (udc->vbus_active && udc->softconnect)
+ __max3420_start(udc);
+ else
+ __max3420_stop(udc);
+
+ return true;
+}
+
+static irqreturn_t max3420_vbus_handler(int irq, void *dev_id)
+{
+ struct max3420_udc *udc = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* its a vbus change interrupt */
+ udc->vbus_active = !udc->vbus_active;
+ udc->todo |= UDC_START;
+ usb_udc_vbus_handler(&udc->gadget, udc->vbus_active);
+ usb_gadget_set_state(&udc->gadget, udc->vbus_active
+ ? USB_STATE_POWERED : USB_STATE_NOTATTACHED);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task &&
+ udc->thread_task->state != TASK_RUNNING)
+ wake_up_process(udc->thread_task);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max3420_irq_handler(int irq, void *dev_id)
+{
+ struct max3420_udc *udc = dev_id;
+ struct spi_device *spi = udc->spi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if ((udc->todo & ENABLE_IRQ) == 0) {
+ disable_irq_nosync(spi->irq);
+ udc->todo |= ENABLE_IRQ;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task &&
+ udc->thread_task->state != TASK_RUNNING)
+ wake_up_process(udc->thread_task);
+
+ return IRQ_HANDLED;
+}
+
+static void max3420_getstatus(struct max3420_udc *udc)
+{
+ struct max3420_ep *ep;
+ u16 status = 0;
+
+ switch (udc->setup.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ /* Get device status */
+ status = udc->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED;
+ status |= (udc->remote_wkp << USB_DEVICE_REMOTE_WAKEUP);
+ break;
+ case USB_RECIP_INTERFACE:
+ if (udc->driver->setup(&udc->gadget, &udc->setup) < 0)
+ goto stall;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = &udc->ep[udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK];
+ if (udc->setup.wIndex & USB_DIR_IN) {
+ if (!ep->ep_usb.caps.dir_in)
+ goto stall;
+ } else {
+ if (!ep->ep_usb.caps.dir_out)
+ goto stall;
+ }
+ if (ep->halted)
+ status = 1 << USB_ENDPOINT_HALT;
+ break;
+ default:
+ goto stall;
+ }
+
+ status = cpu_to_le16(status);
+ spi_wr_buf(udc, MAX3420_REG_EP0FIFO, &status, 2);
+ spi_wr8_ack(udc, MAX3420_REG_EP0BC, 2, 1);
+ return;
+stall:
+ dev_err(udc->dev, "Can't respond to getstatus request\n");
+ spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT);
+}
+
+static void max3420_set_clear_feature(struct max3420_udc *udc)
+{
+ struct max3420_ep *ep;
+ int set = udc->setup.bRequest == USB_REQ_SET_FEATURE;
+ unsigned long flags;
+ int id;
+
+ switch (udc->setup.bRequestType) {
+ case USB_RECIP_DEVICE:
+ if (udc->setup.wValue != USB_DEVICE_REMOTE_WAKEUP)
+ break;
+
+ if (udc->setup.bRequest == USB_REQ_SET_FEATURE)
+ udc->remote_wkp = 1;
+ else
+ udc->remote_wkp = 0;
+
+ return spi_ack_ctrl(udc);
+
+ case USB_RECIP_ENDPOINT:
+ if (udc->setup.wValue != USB_ENDPOINT_HALT)
+ break;
+
+ id = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ ep = &udc->ep[id];
+
+ spin_lock_irqsave(&ep->lock, flags);
+ ep->todo &= ~STALL_EP;
+ if (set)
+ ep->todo |= STALL;
+ else
+ ep->todo |= UNSTALL;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ spi_max3420_stall(ep);
+ return;
+ default:
+ break;
+ }
+
+ dev_err(udc->dev, "Can't respond to SET/CLEAR FEATURE\n");
+ spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT);
+}
+
+static void max3420_handle_setup(struct max3420_udc *udc)
+{
+ struct usb_ctrlrequest setup;
+ u8 addr;
+
+ spi_rd_buf(udc, MAX3420_REG_SUDFIFO, (void *)&setup, 8);
+
+ udc->setup = setup;
+ udc->setup.wValue = cpu_to_le16(setup.wValue);
+ udc->setup.wIndex = cpu_to_le16(setup.wIndex);
+ udc->setup.wLength = cpu_to_le16(setup.wLength);
+
+ switch (udc->setup.bRequest) {
+ case USB_REQ_GET_STATUS:
+ /* Data+Status phase form udc */
+ if ((udc->setup.bRequestType &
+ (USB_DIR_IN | USB_TYPE_MASK)) !=
+ (USB_DIR_IN | USB_TYPE_STANDARD)) {
+ break;
+ }
+ return max3420_getstatus(udc);
+ case USB_REQ_SET_ADDRESS:
+ /* Status phase from udc */
+ if (udc->setup.bRequestType != (USB_DIR_OUT |
+ USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
+ break;
+ }
+ addr = spi_rd8_ack(udc, MAX3420_REG_FNADDR, 1);
+ dev_dbg(udc->dev, "Assigned Address=%d\n", udc->setup.wValue);
+ return;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ /* Requests with no data phase, status phase from udc */
+ if ((udc->setup.bRequestType & USB_TYPE_MASK)
+ != USB_TYPE_STANDARD)
+ break;
+ return max3420_set_clear_feature(udc);
+ default:
+ break;
+ }
+
+ if (udc->driver->setup(&udc->gadget, &setup) < 0) {
+ /* Stall EP0 */
+ spi_wr8(udc, MAX3420_REG_EPSTALLS,
+ STLEP0IN | STLEP0OUT | STLSTAT);
+ }
+}
+
+static void max3420_req_done(struct max3420_req *req, int status)
+{
+ struct max3420_ep *ep = req->ep;
+ struct max3420_udc *udc = ep->udc;
+
+ if (req->usb_req.status == -EINPROGRESS)
+ req->usb_req.status = status;
+ else
+ status = req->usb_req.status;
+
+ if (status && status != -ESHUTDOWN)
+ dev_err(udc->dev, "%s done %p, status %d\n",
+ ep->ep_usb.name, req, status);
+
+ if (req->usb_req.complete)
+ req->usb_req.complete(&ep->ep_usb, &req->usb_req);
+}
+
+static int max3420_do_data(struct max3420_udc *udc, int ep_id, int in)
+{
+ struct max3420_ep *ep = &udc->ep[ep_id];
+ struct max3420_req *req;
+ int done, length, psz;
+ void *buf;
+
+ if (list_empty(&ep->queue))
+ return false;
+
+ req = list_first_entry(&ep->queue, struct max3420_req, queue);
+ buf = req->usb_req.buf + req->usb_req.actual;
+
+ psz = ep->ep_usb.maxpacket;
+ length = req->usb_req.length - req->usb_req.actual;
+ length = min(length, psz);
+
+ if (length == 0) {
+ done = 1;
+ goto xfer_done;
+ }
+
+ done = 0;
+ if (in) {
+ prefetch(buf);
+ spi_wr_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length);
+ spi_wr8(udc, MAX3420_REG_EP0BC + ep_id, length);
+ if (length < psz)
+ done = 1;
+ } else {
+ psz = spi_rd8(udc, MAX3420_REG_EP0BC + ep_id);
+ length = min(length, psz);
+ prefetchw(buf);
+ spi_rd_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length);
+ if (length < ep->ep_usb.maxpacket)
+ done = 1;
+ }
+
+ req->usb_req.actual += length;
+
+ if (req->usb_req.actual == req->usb_req.length)
+ done = 1;
+
+xfer_done:
+ if (done) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ list_del_init(&req->queue);
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (ep_id == 0)
+ spi_ack_ctrl(udc);
+
+ max3420_req_done(req, 0);
+ }
+
+ return true;
+}
+
+static int max3420_handle_irqs(struct max3420_udc *udc)
+{
+ u8 epien, epirq, usbirq, usbien, reg[4];
+ bool ret = false;
+
+ spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 4);
+ epirq = reg[0];
+ epien = reg[1];
+ usbirq = reg[2];
+ usbien = reg[3];
+
+ usbirq &= usbien;
+ epirq &= epien;
+
+ if (epirq & SUDAVIRQ) {
+ spi_wr8(udc, MAX3420_REG_EPIRQ, SUDAVIRQ);
+ max3420_handle_setup(udc);
+ return true;
+ }
+
+ if (usbirq & VBUSIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, VBUSIRQ);
+ dev_dbg(udc->dev, "Cable plugged in\n");
+ return true;
+ }
+
+ if (usbirq & NOVBUSIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, NOVBUSIRQ);
+ dev_dbg(udc->dev, "Cable pulled out\n");
+ return true;
+ }
+
+ if (usbirq & URESIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, URESIRQ);
+ dev_dbg(udc->dev, "USB Reset - Start\n");
+ return true;
+ }
+
+ if (usbirq & URESDNIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, URESDNIRQ);
+ dev_dbg(udc->dev, "USB Reset - END\n");
+ spi_wr8(udc, MAX3420_REG_USBIEN, URESDNIRQ | URESIRQ);
+ spi_wr8(udc, MAX3420_REG_EPIEN, SUDAVIRQ | IN0BAVIRQ
+ | OUT0DAVIRQ);
+ return true;
+ }
+
+ if (usbirq & SUSPIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, SUSPIRQ);
+ dev_dbg(udc->dev, "USB Suspend - Enter\n");
+ udc->suspended = true;
+ return true;
+ }
+
+ if (usbirq & BUSACTIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, BUSACTIRQ);
+ dev_dbg(udc->dev, "USB Suspend - Exit\n");
+ udc->suspended = false;
+ return true;
+ }
+
+ if (usbirq & RWUDNIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, RWUDNIRQ);
+ dev_dbg(udc->dev, "Asked Host to wakeup\n");
+ return true;
+ }
+
+ if (usbirq & OSCOKIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, OSCOKIRQ);
+ dev_dbg(udc->dev, "Osc stabilized, start work\n");
+ return true;
+ }
+
+ if (epirq & OUT0DAVIRQ && max3420_do_data(udc, 0, 0)) {
+ spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT0DAVIRQ, 1);
+ ret = true;
+ }
+
+ if (epirq & IN0BAVIRQ && max3420_do_data(udc, 0, 1))
+ ret = true;
+
+ if (epirq & OUT1DAVIRQ && max3420_do_data(udc, 1, 0)) {
+ spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT1DAVIRQ, 1);
+ ret = true;
+ }
+
+ if (epirq & IN2BAVIRQ && max3420_do_data(udc, 2, 1))
+ ret = true;
+
+ if (epirq & IN3BAVIRQ && max3420_do_data(udc, 3, 1))
+ ret = true;
+
+ return ret;
+}
+
+static int max3420_thread(void *dev_id)
+{
+ struct max3420_udc *udc = dev_id;
+ struct spi_device *spi = udc->spi;
+ int i, loop_again = 1;
+ unsigned long flags;
+
+ while (!kthread_should_stop()) {
+ if (!loop_again) {
+ ktime_t kt = ns_to_ktime(1000 * 1000 * 250); /* 250ms */
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (udc->todo & ENABLE_IRQ) {
+ enable_irq(spi->irq);
+ udc->todo &= ~ENABLE_IRQ;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
+ }
+ loop_again = 0;
+
+ mutex_lock(&udc->spi_bus_mutex);
+
+ /* If bus-vbus_active and disconnected */
+ if (!udc->vbus_active || !udc->softconnect)
+ goto loop;
+
+ if (max3420_start(udc)) {
+ loop_again = 1;
+ goto loop;
+ }
+
+ if (max3420_handle_irqs(udc)) {
+ loop_again = 1;
+ goto loop;
+ }
+
+ if (spi_max3420_rwkup(udc)) {
+ loop_again = 1;
+ goto loop;
+ }
+
+ max3420_do_data(udc, 0, 1); /* get done with the EP0 ZLP */
+
+ for (i = 1; i < MAX3420_MAX_EPS; i++) {
+ struct max3420_ep *ep = &udc->ep[i];
+
+ if (spi_max3420_enable(ep))
+ loop_again = 1;
+ if (spi_max3420_stall(ep))
+ loop_again = 1;
+ }
+loop:
+ mutex_unlock(&udc->spi_bus_mutex);
+ }
+
+ set_current_state(TASK_RUNNING);
+ dev_info(udc->dev, "SPI thread exiting");
+ return 0;
+}
+
+static int max3420_ep_set_halt(struct usb_ep *_ep, int stall)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ ep->todo &= ~STALL_EP;
+ if (stall)
+ ep->todo |= STALL;
+ else
+ ep->todo |= UNSTALL;
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ wake_up_process(udc->thread_task);
+
+ dev_dbg(udc->dev, "%sStall %s\n", stall ? "" : "Un", ep->name);
+ return 0;
+}
+
+static int __max3420_ep_enable(struct max3420_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned int maxp = usb_endpoint_maxp(desc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ ep->ep_usb.desc = desc;
+ ep->ep_usb.maxpacket = maxp;
+
+ ep->todo &= ~ENABLE_EP;
+ ep->todo |= ENABLE;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ return 0;
+}
+
+static int max3420_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+
+ __max3420_ep_enable(ep, desc);
+
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static void max3420_nuke(struct max3420_ep *ep, int status)
+{
+ struct max3420_req *req, *r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ list_for_each_entry_safe(req, r, &ep->queue, queue) {
+ list_del_init(&req->queue);
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ max3420_req_done(req, status);
+ spin_lock_irqsave(&ep->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+static void __max3420_ep_disable(struct max3420_ep *ep)
+{
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ ep->ep_usb.desc = NULL;
+
+ ep->todo &= ~ENABLE_EP;
+ ep->todo |= DISABLE;
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ dev_dbg(udc->dev, "Disabled %s\n", ep->name);
+}
+
+static int max3420_ep_disable(struct usb_ep *_ep)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+
+ max3420_nuke(ep, -ESHUTDOWN);
+
+ __max3420_ep_disable(ep);
+
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static struct usb_request *max3420_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_req *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->ep = ep;
+
+ return &req->usb_req;
+}
+
+static void max3420_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ kfree(to_max3420_req(_req));
+}
+
+static int max3420_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t ignored)
+{
+ struct max3420_req *req = to_max3420_req(_req);
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ list_add_tail(&req->queue, &ep->queue);
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ wake_up_process(udc->thread_task);
+ return 0;
+}
+
+static int max3420_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct max3420_req *t, *req = to_max3420_req(_req);
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ /* Pluck the descriptor from queue */
+ list_for_each_entry(t, &ep->queue, queue)
+ if (t == req) {
+ list_del_init(&req->queue);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (t == req)
+ max3420_req_done(req, -ECONNRESET);
+
+ return 0;
+}
+
+static const struct usb_ep_ops max3420_ep_ops = {
+ .enable = max3420_ep_enable,
+ .disable = max3420_ep_disable,
+ .alloc_request = max3420_alloc_request,
+ .free_request = max3420_free_request,
+ .queue = max3420_ep_queue,
+ .dequeue = max3420_ep_dequeue,
+ .set_halt = max3420_ep_set_halt,
+};
+
+static int max3420_wakeup(struct usb_gadget *gadget)
+{
+ struct max3420_udc *udc = to_udc(gadget);
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Only if wakeup allowed by host */
+ if (udc->remote_wkp) {
+ udc->todo |= REMOTE_WAKEUP;
+ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task &&
+ udc->thread_task->state != TASK_RUNNING)
+ wake_up_process(udc->thread_task);
+ return ret;
+}
+
+static int max3420_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct max3420_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* hook up the driver */
+ driver->driver.bus = NULL;
+ udc->driver = driver;
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ udc->gadget.is_selfpowered = udc->is_selfpowered;
+ udc->remote_wkp = 0;
+ udc->softconnect = true;
+ udc->todo |= UDC_START;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task &&
+ udc->thread_task->state != TASK_RUNNING)
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static int max3420_udc_stop(struct usb_gadget *gadget)
+{
+ struct max3420_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->is_selfpowered = udc->gadget.is_selfpowered;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->driver = NULL;
+ udc->softconnect = false;
+ udc->todo |= UDC_START;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task &&
+ udc->thread_task->state != TASK_RUNNING)
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops max3420_udc_ops = {
+ .udc_start = max3420_udc_start,
+ .udc_stop = max3420_udc_stop,
+ .wakeup = max3420_wakeup,
+};
+
+static void max3420_eps_init(struct max3420_udc *udc)
+{
+ int idx;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ for (idx = 0; idx < MAX3420_MAX_EPS; idx++) {
+ struct max3420_ep *ep = &udc->ep[idx];
+
+ spin_lock_init(&ep->lock);
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->udc = udc;
+ ep->id = idx;
+ ep->halted = 0;
+ ep->maxpacket = 0;
+ ep->ep_usb.name = ep->name;
+ ep->ep_usb.ops = &max3420_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep_usb, MAX3420_EP_MAX_PACKET);
+
+ if (idx == 0) { /* For EP0 */
+ ep->ep_usb.desc = &ep0_desc;
+ ep->ep_usb.maxpacket = usb_endpoint_maxp(&ep0_desc);
+ ep->ep_usb.caps.type_control = true;
+ ep->ep_usb.caps.dir_in = true;
+ ep->ep_usb.caps.dir_out = true;
+ snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep0");
+ continue;
+ }
+
+ if (idx == 1) { /* EP1 is OUT */
+ ep->ep_usb.caps.dir_in = false;
+ ep->ep_usb.caps.dir_out = true;
+ snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep1-bulk-out");
+ } else { /* EP2 & EP3 are IN */
+ ep->ep_usb.caps.dir_in = true;
+ ep->ep_usb.caps.dir_out = false;
+ snprintf(ep->name, MAX3420_EPNAME_SIZE,
+ "ep%d-bulk-in", idx);
+ }
+ ep->ep_usb.caps.type_iso = false;
+ ep->ep_usb.caps.type_int = false;
+ ep->ep_usb.caps.type_bulk = true;
+
+ list_add_tail(&ep->ep_usb.ep_list,
+ &udc->gadget.ep_list);
+ }
+}
+
+static int max3420_probe(struct spi_device *spi)
+{
+ struct max3420_udc *udc;
+ int err, irq;
+ u8 reg[8];
+
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ dev_err(&spi->dev, "UDC needs full duplex to work\n");
+ return -EINVAL;
+ }
+
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "Unable to setup SPI bus\n");
+ return -EFAULT;
+ }
+
+ udc = devm_kzalloc(&spi->dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ udc->spi = spi;
+
+ udc->remote_wkp = 0;
+
+ /* Setup gadget structure */
+ udc->gadget.ops = &max3420_udc_ops;
+ udc->gadget.max_speed = USB_SPEED_FULL;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.ep0 = &udc->ep[0].ep_usb;
+ udc->gadget.name = driver_name;
+
+ spin_lock_init(&udc->lock);
+ mutex_init(&udc->spi_bus_mutex);
+
+ udc->ep0req.ep = &udc->ep[0];
+ udc->ep0req.usb_req.buf = udc->ep0buf;
+ INIT_LIST_HEAD(&udc->ep0req.queue);
+
+ /* setup Endpoints */
+ max3420_eps_init(udc);
+
+ /* configure SPI */
+ spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8);
+ spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI);
+
+ err = usb_add_gadget_udc(&spi->dev, &udc->gadget);
+ if (err)
+ return err;
+
+ udc->dev = &udc->gadget.dev;
+
+ spi_set_drvdata(spi, udc);
+
+ irq = of_irq_get_byname(spi->dev.of_node, "udc");
+ err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0,
+ "max3420", udc);
+ if (err < 0)
+ return err;
+
+ udc->thread_task = kthread_create(max3420_thread, udc,
+ "max3420-thread");
+ if (IS_ERR(udc->thread_task))
+ return PTR_ERR(udc->thread_task);
+
+ irq = of_irq_get_byname(spi->dev.of_node, "vbus");
+ if (irq <= 0) { /* no vbus irq implies self-powered design */
+ udc->is_selfpowered = 1;
+ udc->vbus_active = true;
+ udc->todo |= UDC_START;
+ usb_udc_vbus_handler(&udc->gadget, udc->vbus_active);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
+ max3420_start(udc);
+ } else {
+ udc->is_selfpowered = 0;
+ /* Detect current vbus status */
+ spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8);
+ if (reg[7] != 0xff)
+ udc->vbus_active = true;
+
+ err = devm_request_irq(&spi->dev, irq,
+ max3420_vbus_handler, 0, "vbus", udc);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int max3420_remove(struct spi_device *spi)
+{
+ struct max3420_udc *udc = spi_get_drvdata(spi);
+ unsigned long flags;
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ kthread_stop(udc->thread_task);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct of_device_id max3420_udc_of_match[] = {
+ { .compatible = "maxim,max3420-udc"},
+ { .compatible = "maxim,max3421-udc"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, max3420_udc_of_match);
+
+static struct spi_driver max3420_driver = {
+ .driver = {
+ .name = "max3420-udc",
+ .of_match_table = of_match_ptr(max3420_udc_of_match),
+ },
+ .probe = max3420_probe,
+ .remove = max3420_remove,
+};
+
+module_spi_driver(max3420_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index a8273b589456..5af0fe9c61d7 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev)
err_req:
release_mem_region(base, len);
err:
+ kfree(dev);
+
return ret;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 1fd1b9186e46..5eff85eeaa5a 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2861,6 +2861,8 @@ static void ep_clear_seqnum(struct net2280_ep *ep)
static void handle_stat0_irqs_superspeed(struct net2280 *dev,
struct net2280_ep *ep, struct usb_ctrlrequest r)
{
+ struct net2280_ep *e;
+ u16 status;
int tmp = 0;
#define w_value le16_to_cpu(r.wValue)
@@ -2868,9 +2870,6 @@ static void handle_stat0_irqs_superspeed(struct net2280 *dev,
#define w_length le16_to_cpu(r.wLength)
switch (r.bRequest) {
- struct net2280_ep *e;
- u16 status;
-
case USB_REQ_SET_CONFIGURATION:
dev->addressed_state = !w_value;
goto usb3_delegate;
@@ -3857,7 +3856,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver net2280_pci_driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
.id_table = pci_ids,
.probe = net2280_probe,
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index bd12417996db..bf87c6c0d7f6 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -3001,7 +3001,7 @@ static struct platform_driver udc_driver = {
.suspend = omap_udc_suspend,
.resume = omap_udc_resume,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
},
};
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 582a16165ea9..537094b485bf 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1968,7 +1968,7 @@ clean_up2:
static struct platform_driver r8a66597_driver = {
.remove = r8a66597_remove,
.driver = {
- .name = (char *) udc_name,
+ .name = udc_name,
},
};
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index c5c3c14df67a..0c418ce50ba0 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2355,14 +2355,14 @@ static const struct usb_gadget_ops renesas_usb3_gadget_ops = {
.set_selfpowered = renesas_usb3_set_selfpowered,
};
-static enum usb_role renesas_usb3_role_switch_get(struct device *dev)
+static enum usb_role renesas_usb3_role_switch_get(struct usb_role_switch *sw)
{
- struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw);
enum usb_role cur_role;
- pm_runtime_get_sync(dev);
+ pm_runtime_get_sync(usb3_to_dev(usb3));
cur_role = usb3_is_host(usb3) ? USB_ROLE_HOST : USB_ROLE_DEVICE;
- pm_runtime_put(dev);
+ pm_runtime_put(usb3_to_dev(usb3));
return cur_role;
}
@@ -2372,7 +2372,7 @@ static void handle_ext_role_switch_states(struct device *dev,
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
struct device *host = usb3->host_dev;
- enum usb_role cur_role = renesas_usb3_role_switch_get(dev);
+ enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw);
switch (role) {
case USB_ROLE_NONE:
@@ -2424,7 +2424,7 @@ static void handle_role_switch_states(struct device *dev,
{
struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
struct device *host = usb3->host_dev;
- enum usb_role cur_role = renesas_usb3_role_switch_get(dev);
+ enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw);
if (cur_role == USB_ROLE_HOST && role == USB_ROLE_DEVICE) {
device_release_driver(host);
@@ -2438,19 +2438,19 @@ static void handle_role_switch_states(struct device *dev,
}
}
-static int renesas_usb3_role_switch_set(struct device *dev,
+static int renesas_usb3_role_switch_set(struct usb_role_switch *sw,
enum usb_role role)
{
- struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw);
- pm_runtime_get_sync(dev);
+ pm_runtime_get_sync(usb3_to_dev(usb3));
if (usb3->role_sw_by_connector)
- handle_ext_role_switch_states(dev, role);
+ handle_ext_role_switch_states(usb3_to_dev(usb3), role);
else
- handle_role_switch_states(dev, role);
+ handle_role_switch_states(usb3_to_dev(usb3), role);
- pm_runtime_put(dev);
+ pm_runtime_put(usb3_to_dev(usb3));
return 0;
}
@@ -2831,6 +2831,8 @@ static int renesas_usb3_probe(struct platform_device *pdev)
renesas_usb3_role_switch_desc.fwnode = dev_fwnode(&pdev->dev);
}
+ renesas_usb3_role_switch_desc.driver_data = usb3;
+
INIT_WORK(&usb3->role_work, renesas_usb3_role_work);
usb3->role_sw = usb_role_switch_register(&pdev->dev,
&renesas_usb3_role_switch_desc);
@@ -2906,7 +2908,7 @@ static struct platform_driver renesas_usb3_driver = {
.probe = renesas_usb3_probe,
.remove = renesas_usb3_remove,
.driver = {
- .name = (char *)udc_name,
+ .name = udc_name,
.pm = &renesas_usb3_pm_ops,
.of_match_table = of_match_ptr(usb3_of_match),
},
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 21252fbc0319..aaca1b0a2f59 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1285,7 +1285,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
hsudc->supplies);
if (ret != 0) {
- dev_err(dev, "failed to request supplies: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request supplies: %d\n", ret);
goto err_supplies;
}
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 634c2c19a176..dfabc54cdc27 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -26,7 +26,9 @@
#include <linux/reset.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
#include <linux/usb/role.h>
+#include <linux/usb/phy.h>
#include <linux/workqueue.h>
/* XUSB_DEV registers */
@@ -477,17 +479,21 @@ struct tegra_xudc {
struct clk_bulk_data *clks;
- enum usb_role device_mode;
- struct usb_role_switch *usb_role_sw;
+ bool device_mode;
struct work_struct usb_role_sw_work;
- struct phy *usb3_phy;
- struct phy *utmi_phy;
+ struct phy **usb3_phy;
+ struct phy *curr_usb3_phy;
+ struct phy **utmi_phy;
+ struct phy *curr_utmi_phy;
struct tegra_xudc_save_regs saved_regs;
bool suspended;
bool powergated;
+ struct usb_phy **usbphy;
+ struct notifier_block vbus_nb;
+
struct completion disconnect_complete;
bool selfpowered;
@@ -517,6 +523,7 @@ struct tegra_xudc_soc {
unsigned int num_supplies;
const char * const *clock_names;
unsigned int num_clks;
+ unsigned int num_phys;
bool u1_enable;
bool u2_enable;
bool lpm_enable;
@@ -598,19 +605,18 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
pm_runtime_get_sync(xudc->dev);
- err = phy_power_on(xudc->utmi_phy);
+ err = phy_power_on(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "utmi power on failed %d\n", err);
- err = phy_power_on(xudc->usb3_phy);
+ err = phy_power_on(xudc->curr_usb3_phy);
if (err < 0)
dev_err(xudc->dev, "usb3 phy power on failed %d\n", err);
dev_dbg(xudc->dev, "device mode on\n");
- tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
-
- xudc->device_mode = USB_ROLE_DEVICE;
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
+ USB_ROLE_DEVICE);
}
static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
@@ -625,7 +631,7 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
reinit_completion(&xudc->disconnect_complete);
- tegra_xusb_padctl_set_vbus_override(xudc->padctl, false);
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
PORTSC_PLS_SHIFT;
@@ -643,8 +649,6 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
xudc_writel(xudc, val, PORTSC);
}
- xudc->device_mode = USB_ROLE_NONE;
-
/* Wait for disconnect event. */
if (connected)
wait_for_completion(&xudc->disconnect_complete);
@@ -652,11 +656,11 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
/* Make sure interrupt handler has completed before powergating. */
synchronize_irq(xudc->irq);
- err = phy_power_off(xudc->utmi_phy);
+ err = phy_power_off(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "utmi_phy power off failed %d\n", err);
- err = phy_power_off(xudc->usb3_phy);
+ err = phy_power_off(xudc->curr_usb3_phy);
if (err < 0)
dev_err(xudc->dev, "usb3_phy power off failed %d\n", err);
@@ -668,29 +672,57 @@ static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
usb_role_sw_work);
- if (!xudc->usb_role_sw ||
- usb_role_switch_get_role(xudc->usb_role_sw) == USB_ROLE_DEVICE)
+ if (xudc->device_mode)
tegra_xudc_device_mode_on(xudc);
else
tegra_xudc_device_mode_off(xudc);
+}
+static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
+ struct usb_phy *usbphy)
+{
+ unsigned int i;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
+ return i;
+ }
+
+ dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
+ return -1;
}
-static int tegra_xudc_usb_role_sw_set(struct device *dev, enum usb_role role)
+static int tegra_xudc_vbus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct tegra_xudc *xudc = dev_get_drvdata(dev);
- unsigned long flags;
+ struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
+ vbus_nb);
+ struct usb_phy *usbphy = (struct usb_phy *)data;
+ int phy_index;
- dev_dbg(dev, "%s role is %d\n", __func__, role);
+ dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
- spin_lock_irqsave(&xudc->lock, flags);
+ if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
+ (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
+ dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
+ xudc->device_mode);
+ return NOTIFY_OK;
+ }
- if (!xudc->suspended)
- schedule_work(&xudc->usb_role_sw_work);
+ xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
+ false;
- spin_unlock_irqrestore(&xudc->lock, flags);
+ phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
+ dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
+ phy_index);
- return 0;
+ if (!xudc->suspended && phy_index != -1) {
+ xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
+ xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
+ schedule_work(&xudc->usb_role_sw_work);
+ }
+
+ return NOTIFY_OK;
}
static void tegra_xudc_plc_reset_work(struct work_struct *work)
@@ -708,9 +740,11 @@ static void tegra_xudc_plc_reset_work(struct work_struct *work)
if (pls == PORTSC_PLS_INACTIVE) {
dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
- tegra_xusb_padctl_set_vbus_override(xudc->padctl,
- false);
- tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
+ USB_ROLE_NONE);
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
+ USB_ROLE_DEVICE);
+
xudc->wait_csc = false;
}
}
@@ -729,8 +763,7 @@ static void tegra_xudc_port_reset_war_work(struct work_struct *work)
spin_lock_irqsave(&xudc->lock, flags);
- if ((xudc->device_mode == USB_ROLE_DEVICE)
- && xudc->wait_for_sec_prc) {
+ if (xudc->device_mode && xudc->wait_for_sec_prc) {
pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
PORTSC_PLS_SHIFT;
dev_dbg(xudc->dev, "pls = %x\n", pls);
@@ -738,7 +771,8 @@ static void tegra_xudc_port_reset_war_work(struct work_struct *work)
if (pls == PORTSC_PLS_DISABLED) {
dev_dbg(xudc->dev, "toggle vbus\n");
/* PRC doesn't complete in 100ms, toggle the vbus */
- ret = tegra_phy_xusb_utmi_port_reset(xudc->utmi_phy);
+ ret = tegra_phy_xusb_utmi_port_reset(
+ xudc->curr_utmi_phy);
if (ret == 1)
xudc->wait_for_sec_prc = 0;
}
@@ -1927,6 +1961,7 @@ static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
unsigned long flags;
u32 val;
int ret;
+ unsigned int i;
if (!driver)
return -EINVAL;
@@ -1962,6 +1997,10 @@ static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
xudc_writel(xudc, val, CTRL);
}
+ for (i = 0; i < xudc->soc->num_phys; i++)
+ if (xudc->usbphy[i])
+ otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
+
xudc->driver = driver;
unlock:
dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
@@ -1977,11 +2016,16 @@ static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
struct tegra_xudc *xudc = to_xudc(gadget);
unsigned long flags;
u32 val;
+ unsigned int i;
pm_runtime_get_sync(xudc->dev);
spin_lock_irqsave(&xudc->lock, flags);
+ for (i = 0; i < xudc->soc->num_phys; i++)
+ if (xudc->usbphy[i])
+ otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
+
val = xudc_readl(xudc, CTRL);
val &= ~(CTRL_IE | CTRL_ENABLE);
xudc_writel(xudc, val, CTRL);
@@ -3314,33 +3358,120 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
}
-static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
+static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
{
- int err;
+ int err = 0, usb3;
+ unsigned int i;
- err = phy_init(xudc->utmi_phy);
- if (err < 0) {
- dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
- return err;
- }
+ xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->utmi_phy), GFP_KERNEL);
+ if (!xudc->utmi_phy)
+ return -ENOMEM;
- err = phy_init(xudc->usb3_phy);
- if (err < 0) {
- dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
- goto exit_utmi_phy;
+ xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->usb3_phy), GFP_KERNEL);
+ if (!xudc->usb3_phy)
+ return -ENOMEM;
+
+ xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->usbphy), GFP_KERNEL);
+ if (!xudc->usbphy)
+ return -ENOMEM;
+
+ xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ char phy_name[] = "usb.-.";
+
+ /* Get USB2 phy */
+ snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
+ xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->utmi_phy[i])) {
+ err = PTR_ERR(xudc->utmi_phy[i]);
+ if (err != -EPROBE_DEFER)
+ dev_err(xudc->dev, "failed to get usb2-%d phy: %d\n",
+ i, err);
+
+ goto clean_up;
+ } else if (xudc->utmi_phy[i]) {
+ /* Get usb-phy, if utmi phy is available */
+ xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
+ xudc->utmi_phy[i]->dev.of_node,
+ &xudc->vbus_nb);
+ if (IS_ERR(xudc->usbphy[i])) {
+ err = PTR_ERR(xudc->usbphy[i]);
+ dev_err(xudc->dev, "failed to get usbphy-%d: %d\n",
+ i, err);
+ goto clean_up;
+ }
+ } else if (!xudc->utmi_phy[i]) {
+ /* if utmi phy is not available, ignore USB3 phy get */
+ continue;
+ }
+
+ /* Get USB3 phy */
+ usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+ if (usb3 < 0)
+ continue;
+
+ snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
+ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->usb3_phy[i])) {
+ err = PTR_ERR(xudc->usb3_phy[i]);
+ if (err != -EPROBE_DEFER)
+ dev_err(xudc->dev, "failed to get usb3-%d phy: %d\n",
+ usb3, err);
+
+ goto clean_up;
+ } else if (xudc->usb3_phy[i])
+ dev_dbg(xudc->dev, "usb3_phy-%d registered", usb3);
}
- return 0;
+ return err;
+
+clean_up:
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ xudc->usb3_phy[i] = NULL;
+ xudc->utmi_phy[i] = NULL;
+ xudc->usbphy[i] = NULL;
+ }
-exit_utmi_phy:
- phy_exit(xudc->utmi_phy);
return err;
}
static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
{
- phy_exit(xudc->usb3_phy);
- phy_exit(xudc->utmi_phy);
+ unsigned int i;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ phy_exit(xudc->usb3_phy[i]);
+ phy_exit(xudc->utmi_phy[i]);
+ }
+}
+
+static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ err = phy_init(xudc->utmi_phy[i]);
+ if (err < 0) {
+ dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
+ goto exit_phy;
+ }
+
+ err = phy_init(xudc->usb3_phy[i]);
+ if (err < 0) {
+ dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
+ goto exit_phy;
+ }
+ }
+ return 0;
+
+exit_phy:
+ tegra_xudc_phy_exit(xudc);
+ return err;
}
static const char * const tegra210_xudc_supply_names[] = {
@@ -3368,6 +3499,7 @@ static struct tegra_xudc_soc tegra210_xudc_soc_data = {
.num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
.clock_names = tegra210_xudc_clock_names,
.num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
+ .num_phys = 4,
.u1_enable = false,
.u2_enable = true,
.lpm_enable = false,
@@ -3380,6 +3512,7 @@ static struct tegra_xudc_soc tegra210_xudc_soc_data = {
static struct tegra_xudc_soc tegra186_xudc_soc_data = {
.clock_names = tegra186_xudc_clock_names,
.num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .num_phys = 4,
.u1_enable = true,
.u2_enable = true,
.lpm_enable = false,
@@ -3457,7 +3590,6 @@ static int tegra_xudc_probe(struct platform_device *pdev)
{
struct tegra_xudc *xudc;
struct resource *res;
- struct usb_role_switch_desc role_sx_desc = { 0 };
unsigned int i;
int err;
@@ -3492,11 +3624,8 @@ static int tegra_xudc_probe(struct platform_device *pdev)
}
xudc->irq = platform_get_irq(pdev, 0);
- if (xudc->irq < 0) {
- dev_err(xudc->dev, "failed to get IRQ: %d\n",
- xudc->irq);
+ if (xudc->irq < 0)
return xudc->irq;
- }
err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
dev_name(&pdev->dev), xudc);
@@ -3546,19 +3675,9 @@ static int tegra_xudc_probe(struct platform_device *pdev)
goto put_padctl;
}
- xudc->usb3_phy = devm_phy_optional_get(&pdev->dev, "usb3");
- if (IS_ERR(xudc->usb3_phy)) {
- err = PTR_ERR(xudc->usb3_phy);
- dev_err(xudc->dev, "failed to get usb3 phy: %d\n", err);
- goto disable_regulator;
- }
-
- xudc->utmi_phy = devm_phy_optional_get(&pdev->dev, "usb2");
- if (IS_ERR(xudc->utmi_phy)) {
- err = PTR_ERR(xudc->utmi_phy);
- dev_err(xudc->dev, "failed to get usb2 phy: %d\n", err);
+ err = tegra_xudc_phy_get(xudc);
+ if (err)
goto disable_regulator;
- }
err = tegra_xudc_powerdomain_init(xudc);
if (err)
@@ -3587,24 +3706,6 @@ static int tegra_xudc_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&xudc->port_reset_war_work,
tegra_xudc_port_reset_war_work);
- if (of_property_read_bool(xudc->dev->of_node, "usb-role-switch")) {
- role_sx_desc.set = tegra_xudc_usb_role_sw_set;
- role_sx_desc.fwnode = dev_fwnode(xudc->dev);
-
- xudc->usb_role_sw = usb_role_switch_register(xudc->dev,
- &role_sx_desc);
- if (IS_ERR(xudc->usb_role_sw)) {
- err = PTR_ERR(xudc->usb_role_sw);
- dev_err(xudc->dev, "Failed to register USB role SW: %d",
- err);
- goto free_eps;
- }
- } else {
- /* Set the mode as device mode and this keeps phy always ON */
- dev_info(xudc->dev, "Set usb role to device mode always");
- schedule_work(&xudc->usb_role_sw_work);
- }
-
pm_runtime_enable(&pdev->dev);
xudc->gadget.ops = &tegra_xudc_gadget_ops;
@@ -3639,15 +3740,12 @@ put_padctl:
static int tegra_xudc_remove(struct platform_device *pdev)
{
struct tegra_xudc *xudc = platform_get_drvdata(pdev);
+ unsigned int i;
pm_runtime_get_sync(xudc->dev);
cancel_delayed_work(&xudc->plc_reset_work);
-
- if (xudc->usb_role_sw) {
- usb_role_switch_unregister(xudc->usb_role_sw);
- cancel_work_sync(&xudc->usb_role_sw_work);
- }
+ cancel_work_sync(&xudc->usb_role_sw_work);
usb_del_gadget_udc(&xudc->gadget);
@@ -3658,8 +3756,10 @@ static int tegra_xudc_remove(struct platform_device *pdev)
regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
- phy_power_off(xudc->utmi_phy);
- phy_power_off(xudc->usb3_phy);
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ phy_power_off(xudc->utmi_phy[i]);
+ phy_power_off(xudc->usb3_phy[i]);
+ }
tegra_xudc_phy_exit(xudc);
@@ -3740,11 +3840,11 @@ static int __maybe_unused tegra_xudc_suspend(struct device *dev)
flush_work(&xudc->usb_role_sw_work);
- /* Forcibly disconnect before powergating. */
- tegra_xudc_device_mode_off(xudc);
-
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev)) {
+ /* Forcibly disconnect before powergating. */
+ tegra_xudc_device_mode_off(xudc);
tegra_xudc_powergate(xudc);
+ }
pm_runtime_disable(dev);
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index bd4f6ef534d9..1300c457d9ed 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -110,11 +110,12 @@ static int mv_ehci_probe(struct platform_device *pdev)
struct resource *r;
int retval = -ENODEV;
u32 offset;
+ u32 status;
if (usb_disabled())
return -ENODEV;
- hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, "mv ehci");
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
@@ -213,6 +214,14 @@ static int mv_ehci_probe(struct platform_device *pdev)
device_wakeup_enable(hcd->self.controller);
}
+ if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_HSIC) {
+ status = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ /* These "reserved" bits actually enable HSIC mode. */
+ status |= BIT(25);
+ status &= ~GENMASK(31, 30);
+ ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+ }
+
dev_info(&pdev->dev,
"successful find EHCI device with regs 0x%p irq %d"
" working in %s mode\n", hcd->regs, hcd->irq,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index b0882c13a1d1..1a48ab1bd3b2 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -384,7 +384,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver ehci_pci_driver = {
- .name = (char *) hcd_name,
+ .name = hcd_name,
.id_table = pci_ids,
.probe = ehci_pci_probe,
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 769749ca5961..e4fc3f66d43b 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -29,6 +29,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ehci_pdriver.h>
@@ -44,6 +46,9 @@ struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
struct reset_control *rsts;
bool reset_on_resume;
+ bool quirk_poll;
+ struct timer_list poll_timer;
+ struct delayed_work poll_work;
};
static const char hcd_name[] = "ehci-platform";
@@ -118,6 +123,111 @@ static struct usb_ehci_pdata ehci_platform_defaults = {
.power_off = ehci_platform_power_off,
};
+/**
+ * quirk_poll_check_port_status - Poll port_status if the device sticks
+ * @ehci: the ehci hcd pointer
+ *
+ * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting
+ * stuck very rarely after a full/low usb device was disconnected. To
+ * detect such a situation, the controllers require a special way which poll
+ * the EHCI PORTSC register.
+ *
+ * Return: true if the controller's port_status indicated getting stuck
+ */
+static bool quirk_poll_check_port_status(struct ehci_hcd *ehci)
+{
+ u32 port_status = ehci_readl(ehci, &ehci->regs->port_status[0]);
+
+ if (!(port_status & PORT_OWNER) &&
+ (port_status & PORT_POWER) &&
+ !(port_status & PORT_CONNECT) &&
+ (port_status & PORT_LS_MASK))
+ return true;
+
+ return false;
+}
+
+/**
+ * quirk_poll_rebind_companion - rebind comanion device to recover
+ * @ehci: the ehci hcd pointer
+ *
+ * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting
+ * stuck very rarely after a full/low usb device was disconnected. To
+ * recover from such a situation, the controllers require changing the OHCI
+ * functional state.
+ */
+static void quirk_poll_rebind_companion(struct ehci_hcd *ehci)
+{
+ struct device *companion_dev;
+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
+
+ companion_dev = usb_of_get_companion_dev(hcd->self.controller);
+ if (!companion_dev)
+ return;
+
+ device_release_driver(companion_dev);
+ if (device_attach(companion_dev) < 0)
+ ehci_err(ehci, "%s: failed\n", __func__);
+
+ put_device(companion_dev);
+}
+
+static void quirk_poll_work(struct work_struct *work)
+{
+ struct ehci_platform_priv *priv =
+ container_of(to_delayed_work(work), struct ehci_platform_priv,
+ poll_work);
+ struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd,
+ priv);
+
+ /* check the status twice to reduce misdetection rate */
+ if (!quirk_poll_check_port_status(ehci))
+ return;
+ udelay(10);
+ if (!quirk_poll_check_port_status(ehci))
+ return;
+
+ ehci_dbg(ehci, "%s: detected getting stuck. rebind now!\n", __func__);
+ quirk_poll_rebind_companion(ehci);
+}
+
+static void quirk_poll_timer(struct timer_list *t)
+{
+ struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer);
+ struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd,
+ priv);
+
+ if (quirk_poll_check_port_status(ehci)) {
+ /*
+ * Now scheduling the work for testing the port more. Note that
+ * updating the status is possible to be delayed when
+ * reconnection. So, this uses delayed work with 5 ms delay
+ * to avoid misdetection.
+ */
+ schedule_delayed_work(&priv->poll_work, msecs_to_jiffies(5));
+ }
+
+ mod_timer(&priv->poll_timer, jiffies + HZ);
+}
+
+static void quirk_poll_init(struct ehci_platform_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->poll_work, quirk_poll_work);
+ timer_setup(&priv->poll_timer, quirk_poll_timer, 0);
+ mod_timer(&priv->poll_timer, jiffies + HZ);
+}
+
+static void quirk_poll_end(struct ehci_platform_priv *priv)
+{
+ del_timer_sync(&priv->poll_timer);
+ cancel_delayed_work(&priv->poll_work);
+}
+
+static const struct soc_device_attribute quirk_poll_match[] = {
+ { .family = "R-Car Gen3" },
+ { /* sentinel*/ }
+};
+
static int ehci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
@@ -176,6 +286,9 @@ static int ehci_platform_probe(struct platform_device *dev)
"has-transaction-translator"))
hcd->has_tt = 1;
+ if (soc_device_match(quirk_poll_match))
+ priv->quirk_poll = true;
+
for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
if (IS_ERR(priv->clks[clk])) {
@@ -247,6 +360,9 @@ static int ehci_platform_probe(struct platform_device *dev)
device_enable_async_suspend(hcd->self.controller);
platform_set_drvdata(dev, hcd);
+ if (priv->quirk_poll)
+ quirk_poll_init(priv);
+
return err;
err_power:
@@ -273,6 +389,9 @@ static int ehci_platform_remove(struct platform_device *dev)
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk;
+ if (priv->quirk_poll)
+ quirk_poll_end(priv);
+
usb_remove_hcd(hcd);
if (pdata->power_off)
@@ -297,9 +416,13 @@ static int ehci_platform_suspend(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
bool do_wakeup = device_may_wakeup(dev);
int ret;
+ if (priv->quirk_poll)
+ quirk_poll_end(priv);
+
ret = ehci_suspend(hcd, do_wakeup);
if (ret)
return ret;
@@ -331,6 +454,10 @@ static int ehci_platform_resume(struct device *dev)
}
ehci_resume(hcd, priv->reset_on_resume);
+
+ if (priv->quirk_poll)
+ quirk_poll_init(priv);
+
return 0;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index d6433f206c17..10d51daa6a1b 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -282,7 +282,7 @@ done:
struct dma_aligned_buffer {
void *kmalloc_ptr;
void *old_xfer_buffer;
- u8 data[0];
+ u8 data[];
};
static void free_dma_aligned_buffer(struct urb *urb)
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index ac5e967907d1..229b3de319e6 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -255,7 +255,7 @@ struct ehci_hcd { /* one per controller */
struct list_head tt_list;
/* platform-specific data -- must come last */
- unsigned long priv[0] __aligned(sizeof(s64));
+ unsigned long priv[] __aligned(sizeof(s64));
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -460,7 +460,7 @@ struct ehci_iso_sched {
struct list_head td_list;
unsigned span;
unsigned first_packet;
- struct ehci_iso_packet packet[0];
+ struct ehci_iso_packet packet[];
};
/*
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 04733876c9c6..a8e1048278d0 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -396,6 +396,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
case PIPE_CONTROL:
/* 1 td fro setup,1 for ack */
size = 2;
+ fallthrough;
case PIPE_BULK:
/* one td for every 4096 bytes(can be up to 8k) */
size += urb->transfer_buffer_length / 4096;
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index 1b4db95e5c43..6cee40ec65b4 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -490,7 +490,7 @@ struct fotg210_iso_packet {
struct fotg210_iso_sched {
struct list_head td_list;
unsigned span;
- struct fotg210_iso_packet packet[0];
+ struct fotg210_iso_packet packet[];
};
/*
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index f4e13a3fddee..22117a6aeb4a 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -288,7 +288,7 @@ MODULE_DEVICE_TABLE (pci, pci_ids);
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver ohci_pci_driver = {
- .name = (char *) hcd_name,
+ .name = hcd_name,
.id_table = pci_ids,
.probe = usb_hcd_pci_probe,
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index b015b00774b2..27c26ca10bfd 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -337,7 +337,7 @@ typedef struct urb_priv {
u16 length; // # tds in this request
u16 td_cnt; // tds already serviced
struct list_head pending;
- struct td *td [0]; // all TDs in this request
+ struct td *td[]; // all TDs in this request
} urb_priv_t;
@@ -435,7 +435,7 @@ struct ohci_hcd {
struct dentry *debug_dir;
/* platform-specific data -- must come last */
- unsigned long priv[0] __aligned(sizeof(s64));
+ unsigned long priv[] __aligned(sizeof(s64));
};
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 72a34a1eb618..adaf4063690a 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1792,7 +1792,7 @@ struct platform_driver sl811h_driver = {
.suspend = sl811h_suspend,
.resume = sl811h_resume,
.driver = {
- .name = (char *) hcd_name,
+ .name = hcd_name,
},
};
EXPORT_SYMBOL(sl811h_driver);
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 0fa3d72bae26..957c87efc746 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -294,7 +294,7 @@ static const struct pci_device_id uhci_pci_ids[] = { {
MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
static struct pci_driver uhci_pci_driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.id_table = uhci_pci_ids,
.probe = usb_hcd_pci_probe,
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 3c4abb5a1c3f..5546e7e013a8 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -219,8 +219,7 @@ static int xhci_histb_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- histb->ctrl = devm_ioremap_resource(&pdev->dev, res);
+ histb->ctrl = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(histb->ctrl))
return PTR_ERR(histb->ctrl);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index af92b2576fe9..f37316d2c8fa 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -567,6 +567,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
*/
static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
+ __must_hold(&xhci->lock)
{
struct xhci_hub *rhub;
struct xhci_port *port;
@@ -617,6 +618,7 @@ static void xhci_port_set_test_mode(struct xhci_hcd *xhci,
static int xhci_enter_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex, unsigned long *flags)
+ __must_hold(&xhci->lock)
{
int i, retval;
@@ -1306,7 +1308,47 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
wIndex, link_state);
goto error;
}
+
+ /*
+ * set link to U0, steps depend on current link state.
+ * U3: set link to U0 and wait for u3exit completion.
+ * U1/U2: no PLC complete event, only set link to U0.
+ * Resume/Recovery: device initiated U0, only wait for
+ * completion
+ */
+ if (link_state == USB_SS_PORT_LS_U0) {
+ u32 pls = temp & PORT_PLS_MASK;
+ bool wait_u0 = false;
+
+ /* already in U0 */
+ if (pls == XDEV_U0)
+ break;
+ if (pls == XDEV_U3 ||
+ pls == XDEV_RESUME ||
+ pls == XDEV_RECOVERY) {
+ wait_u0 = true;
+ reinit_completion(&bus_state->u3exit_done[wIndex]);
+ }
+ if (pls <= XDEV_U3) /* U1, U2, U3 */
+ xhci_set_link_state(xhci, ports[wIndex],
+ USB_SS_PORT_LS_U0);
+ if (!wait_u0) {
+ if (pls > XDEV_U3)
+ goto error;
+ break;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
+ msecs_to_jiffies(100)))
+ xhci_dbg(xhci, "missing U0 port change event for port %d\n",
+ wIndex);
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = readl(ports[wIndex]->addr);
+ break;
+ }
+
if (link_state == USB_SS_PORT_LS_U3) {
+ int retries = 16;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
if (slot_id) {
@@ -1317,17 +1359,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
- }
-
- xhci_set_link_state(xhci, ports[wIndex], link_state);
-
- spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20); /* wait device to enter */
- spin_lock_irqsave(&xhci->lock, flags);
-
- temp = readl(ports[wIndex]->addr);
- if (link_state == USB_SS_PORT_LS_U3)
+ xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ while (retries--) {
+ usleep_range(4000, 8000);
+ temp = readl(ports[wIndex]->addr);
+ if ((temp & PORT_PLS_MASK) == XDEV_U3)
+ break;
+ }
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = readl(ports[wIndex]->addr);
bus_state->suspended_ports |= 1 << wIndex;
+ }
break;
case USB_PORT_FEAT_POWER:
/*
@@ -1528,6 +1571,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
}
if ((temp & PORT_RC))
reset_change = true;
+ if (temp & PORT_OC)
+ status = 1;
}
if (!status && !reset_change) {
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
@@ -1593,6 +1638,13 @@ retry:
port_index);
goto retry;
}
+ /* bail out if port detected a over-current condition */
+ if (t1 & PORT_OC) {
+ bus_state->bus_suspended = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n");
+ return -EBUSY;
+ }
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 884c601bfa15..9764122c9cdf 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2552,6 +2552,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb3_rhub.bus_state.resume_done[i] = 0;
/* Only the USB 2.0 completions will ever be used. */
init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
+ init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
}
if (scratchpad_alloc(xhci, flags))
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 5ac458b7d2e0..acd56517215a 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -95,7 +95,7 @@ struct mu3h_sch_ep_info {
u32 pkts;
u32 cs_count;
u32 burst_mode;
- u32 bw_budget_table[0];
+ u32 bw_budget_table[];
};
#define MU3C_U3_PORT_MAX 4
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1fddc41fa1f3..766b74723e64 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -50,6 +50,7 @@
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
+#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -217,7 +218,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -244,6 +246,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS;
@@ -550,7 +555,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
- .name = (char *) hcd_name,
+ .name = hcd_name,
.id_table = pci_ids,
.probe = xhci_pci_probe,
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 315b4552693c..ea460b9682d5 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -219,8 +219,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_runtime;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto put_hcd;
@@ -363,6 +362,7 @@ static int xhci_plat_remove(struct platform_device *dev)
struct clk *reg_clk = xhci->reg_clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd;
+ pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(shared_hcd);
@@ -376,8 +376,9 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
- pm_runtime_set_suspended(&dev->dev);
pm_runtime_disable(&dev->dev);
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_set_suspended(&dev->dev);
return 0;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d23f7408c81f..2c255d0620b0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -547,6 +547,23 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
stream_id);
return;
}
+ /*
+ * A cancelled TD can complete with a stall if HW cached the trb.
+ * In this case driver can't find cur_td, but if the ring is empty we
+ * can move the dequeue pointer to the current enqueue position.
+ */
+ if (!cur_td) {
+ if (list_empty(&ep_ring->td_list)) {
+ state->new_deq_seg = ep_ring->enq_seg;
+ state->new_deq_ptr = ep_ring->enqueue;
+ state->new_cycle_state = ep_ring->cycle_state;
+ goto done;
+ } else {
+ xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n");
+ return;
+ }
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
@@ -592,6 +609,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = new_seg;
state->new_deq_ptr = new_deq;
+done:
/* Don't update the ring cycle state for the producer (us). */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cycle state = 0x%x", state->new_cycle_state);
@@ -955,6 +973,7 @@ void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
struct xhci_hcd *xhci = ep->xhci;
unsigned long flags;
+ u32 usbsts;
spin_lock_irqsave(&xhci->lock, flags);
@@ -965,8 +984,11 @@ void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
return;
}
+ usbsts = readl(&xhci->op_regs->status);
xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
+ xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(usbsts));
+
ep->ep_state &= ~EP_STOP_CMD_PENDING;
xhci_halt(xhci);
@@ -1677,6 +1699,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
(portsc & PORT_PLS_MASK) == XDEV_U1 ||
(portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
+ complete(&bus_state->u3exit_done[hcd_portnum]);
/* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
@@ -1851,8 +1874,8 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
if (reset_type == EP_HARD_RESET) {
ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
- xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
- xhci_clear_hub_tt_buffer(xhci, td, ep);
+ xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id,
+ td);
}
xhci_ring_cmd_db(xhci);
}
@@ -1973,11 +1996,18 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code)) {
- /* Issue a reset endpoint command to clear the host side
- * halt, followed by a set dequeue command to move the
- * dequeue pointer past the TD.
- * The class driver clears the device side halt later.
+ /*
+ * xhci internal endpoint state will go to a "halt" state for
+ * any stall, including default control pipe protocol stall.
+ * To clear the host side halt we need to issue a reset endpoint
+ * command, followed by a set dequeue command to move past the
+ * TD.
+ * Class drivers clear the device side halt from a functional
+ * stall later. Hub TT buffer should only be cleared for FS/LS
+ * devices behind HS hubs for functional stalls.
*/
+ if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR))
+ xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
ep_ring->stream_id, td, EP_HARD_RESET);
} else {
@@ -2413,6 +2443,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
status = -EPIPE;
break;
case COMP_SPLIT_TRANSACTION_ERROR:
+ xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
+ slot_id, ep_index);
+ status = -EPROTO;
+ break;
case COMP_USB_TRANSACTION_ERROR:
xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
slot_id, ep_index);
@@ -2530,6 +2564,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
slot_id, ep_index);
}
+ if (trb_comp_code == COMP_STALL_ERROR ||
+ xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+ trb_comp_code)) {
+ xhci_cleanup_halted_endpoint(xhci, slot_id,
+ ep_index,
+ ep_ring->stream_id,
+ NULL,
+ EP_HARD_RESET);
+ }
goto cleanup;
}
@@ -3390,8 +3433,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* New sg entry */
--num_sgs;
sent_len -= block_len;
- if (num_sgs != 0) {
- sg = sg_next(sg);
+ sg = sg_next(sg);
+ if (num_sgs != 0 && sg) {
block_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
addr += sent_len;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 8163aefc6c6b..2eaf5c0af80c 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -24,6 +24,9 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/role.h>
#include <soc/tegra/pmc.h>
#include "xhci.h"
@@ -203,6 +206,8 @@ struct tegra_xusb_soc {
bool scale_ss_clock;
bool has_ipfs;
+ bool lpm_support;
+ bool otg_reset_sspi;
};
struct tegra_xusb_context {
@@ -250,6 +255,14 @@ struct tegra_xusb {
struct phy **phys;
unsigned int num_phys;
+ struct usb_phy **usbphy;
+ unsigned int num_usb_phys;
+ int otg_usb2_port;
+ int otg_usb3_port;
+ bool host_mode;
+ struct notifier_block id_nb;
+ struct work_struct id_work;
+
/* Firmware loading related */
struct {
size_t size;
@@ -1081,6 +1094,205 @@ static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
return err;
}
+static void tegra_xhci_set_port_power(struct tegra_xusb *tegra, bool main,
+ bool set)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ struct usb_hcd *hcd = main ? xhci->main_hcd : xhci->shared_hcd;
+ unsigned int wait = (!main && !set) ? 1000 : 10;
+ u16 typeReq = set ? SetPortFeature : ClearPortFeature;
+ u16 wIndex = main ? tegra->otg_usb2_port + 1 : tegra->otg_usb3_port + 1;
+ u32 status;
+ u32 stat_power = main ? USB_PORT_STAT_POWER : USB_SS_PORT_STAT_POWER;
+ u32 status_val = set ? stat_power : 0;
+
+ dev_dbg(tegra->dev, "%s():%s %s port power\n", __func__,
+ set ? "set" : "clear", main ? "HS" : "SS");
+
+ hcd->driver->hub_control(hcd, typeReq, USB_PORT_FEAT_POWER, wIndex,
+ NULL, 0);
+
+ do {
+ tegra_xhci_hc_driver.hub_control(hcd, GetPortStatus, 0, wIndex,
+ (char *) &status, sizeof(status));
+ if (status_val == (status & stat_power))
+ break;
+
+ if (!main && !set)
+ usleep_range(600, 700);
+ else
+ usleep_range(10, 20);
+ } while (--wait > 0);
+
+ if (status_val != (status & stat_power))
+ dev_info(tegra->dev, "failed to %s %s PP %d\n",
+ set ? "set" : "clear",
+ main ? "HS" : "SS", status);
+}
+
+static struct phy *tegra_xusb_get_phy(struct tegra_xusb *tegra, char *name,
+ int port)
+{
+ unsigned int i, phy_count = 0;
+
+ for (i = 0; i < tegra->soc->num_types; i++) {
+ if (!strncmp(tegra->soc->phy_types[i].name, "usb2",
+ strlen(name)))
+ return tegra->phys[phy_count+port];
+
+ phy_count += tegra->soc->phy_types[i].num;
+ }
+
+ return NULL;
+}
+
+static void tegra_xhci_id_work(struct work_struct *work)
+{
+ struct tegra_xusb *tegra = container_of(work, struct tegra_xusb,
+ id_work);
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ struct tegra_xusb_mbox_msg msg;
+ struct phy *phy = tegra_xusb_get_phy(tegra, "usb2",
+ tegra->otg_usb2_port);
+ u32 status;
+ int ret;
+
+ dev_dbg(tegra->dev, "host mode %s\n", tegra->host_mode ? "on" : "off");
+
+ mutex_lock(&tegra->lock);
+
+ if (tegra->host_mode)
+ phy_set_mode_ext(phy, PHY_MODE_USB_OTG, USB_ROLE_HOST);
+ else
+ phy_set_mode_ext(phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
+
+ mutex_unlock(&tegra->lock);
+
+ if (tegra->host_mode) {
+ /* switch to host mode */
+ if (tegra->otg_usb3_port >= 0) {
+ if (tegra->soc->otg_reset_sspi) {
+ /* set PP=0 */
+ tegra_xhci_hc_driver.hub_control(
+ xhci->shared_hcd, GetPortStatus,
+ 0, tegra->otg_usb3_port+1,
+ (char *) &status, sizeof(status));
+ if (status & USB_SS_PORT_STAT_POWER)
+ tegra_xhci_set_port_power(tegra, false,
+ false);
+
+ /* reset OTG port SSPI */
+ msg.cmd = MBOX_CMD_RESET_SSPI;
+ msg.data = tegra->otg_usb3_port+1;
+
+ ret = tegra_xusb_mbox_send(tegra, &msg);
+ if (ret < 0) {
+ dev_info(tegra->dev,
+ "failed to RESET_SSPI %d\n",
+ ret);
+ }
+ }
+
+ tegra_xhci_set_port_power(tegra, false, true);
+ }
+
+ tegra_xhci_set_port_power(tegra, true, true);
+
+ } else {
+ if (tegra->otg_usb3_port >= 0)
+ tegra_xhci_set_port_power(tegra, false, false);
+
+ tegra_xhci_set_port_power(tegra, true, false);
+ }
+}
+
+static int tegra_xusb_get_usb2_port(struct tegra_xusb *tegra,
+ struct usb_phy *usbphy)
+{
+ unsigned int i;
+
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ if (tegra->usbphy[i] && usbphy == tegra->usbphy[i])
+ return i;
+ }
+
+ return -1;
+}
+
+static int tegra_xhci_id_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct tegra_xusb *tegra = container_of(nb, struct tegra_xusb,
+ id_nb);
+ struct usb_phy *usbphy = (struct usb_phy *)data;
+
+ dev_dbg(tegra->dev, "%s(): action is %d", __func__, usbphy->last_event);
+
+ if ((tegra->host_mode && usbphy->last_event == USB_EVENT_ID) ||
+ (!tegra->host_mode && usbphy->last_event != USB_EVENT_ID)) {
+ dev_dbg(tegra->dev, "Same role(%d) received. Ignore",
+ tegra->host_mode);
+ return NOTIFY_OK;
+ }
+
+ tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
+ tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(
+ tegra->padctl,
+ tegra->otg_usb2_port);
+
+ tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;
+
+ schedule_work(&tegra->id_work);
+
+ return NOTIFY_OK;
+}
+
+static int tegra_xusb_init_usb_phy(struct tegra_xusb *tegra)
+{
+ unsigned int i;
+
+ tegra->usbphy = devm_kcalloc(tegra->dev, tegra->num_usb_phys,
+ sizeof(*tegra->usbphy), GFP_KERNEL);
+ if (!tegra->usbphy)
+ return -ENOMEM;
+
+ INIT_WORK(&tegra->id_work, tegra_xhci_id_work);
+ tegra->id_nb.notifier_call = tegra_xhci_id_notify;
+
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
+
+ if (!phy)
+ continue;
+
+ tegra->usbphy[i] = devm_usb_get_phy_by_node(tegra->dev,
+ phy->dev.of_node,
+ &tegra->id_nb);
+ if (!IS_ERR(tegra->usbphy[i])) {
+ dev_dbg(tegra->dev, "usbphy-%d registered", i);
+ otg_set_host(tegra->usbphy[i]->otg, &tegra->hcd->self);
+ } else {
+ /*
+ * usb-phy is optional, continue if its not available.
+ */
+ tegra->usbphy[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
+{
+ unsigned int i;
+
+ cancel_work_sync(&tegra->id_work);
+
+ for (i = 0; i < tegra->num_usb_phys; i++)
+ if (tegra->usbphy[i])
+ otg_set_host(tegra->usbphy[i]->otg, NULL);
+}
+
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb *tegra;
@@ -1254,8 +1466,11 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
- for (i = 0; i < tegra->soc->num_types; i++)
+ for (i = 0; i < tegra->soc->num_types; i++) {
+ if (!strncmp(tegra->soc->phy_types[i].name, "usb2", 4))
+ tegra->num_usb_phys = tegra->soc->phy_types[i].num;
tegra->num_phys += tegra->soc->phy_types[i].num;
+ }
tegra->phys = devm_kcalloc(&pdev->dev, tegra->num_phys,
sizeof(*tegra->phys), GFP_KERNEL);
@@ -1384,6 +1599,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto remove_usb3;
}
+ err = tegra_xusb_init_usb_phy(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init USB PHY: %d\n", err);
+ goto remove_usb3;
+ }
+
return 0;
remove_usb3:
@@ -1420,6 +1641,8 @@ static int tegra_xusb_remove(struct platform_device *pdev)
struct tegra_xusb *tegra = platform_get_drvdata(pdev);
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ tegra_xusb_deinit_usb_phy(tegra);
+
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
xhci->shared_hcd = NULL;
@@ -1694,6 +1917,7 @@ static const struct tegra_xusb_soc tegra124_soc = {
},
.scale_ss_clock = true,
.has_ipfs = true,
+ .otg_reset_sspi = false,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
@@ -1733,6 +1957,7 @@ static const struct tegra_xusb_soc tegra210_soc = {
},
.scale_ss_clock = false,
.has_ipfs = true,
+ .otg_reset_sspi = true,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
@@ -1773,12 +1998,14 @@ static const struct tegra_xusb_soc tegra186_soc = {
},
.scale_ss_clock = false,
.has_ipfs = false,
+ .otg_reset_sspi = false,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
},
+ .lpm_support = true,
};
static const char * const tegra194_supply_names[] = {
@@ -1802,12 +2029,14 @@ static const struct tegra_xusb_soc tegra194_soc = {
},
.scale_ss_clock = false,
.has_ipfs = false,
+ .otg_reset_sspi = false,
.mbox = {
.cmd = 0x68,
.data_in = 0x6c,
.data_out = 0x70,
.owner = 0x74,
},
+ .lpm_support = true,
};
MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
@@ -1832,7 +2061,11 @@ static struct platform_driver tegra_xusb_driver = {
static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct tegra_xusb *tegra = dev_get_drvdata(dev);
+
xhci->quirks |= XHCI_PLAT;
+ if (tegra && tegra->soc->lpm_support)
+ xhci->quirks |= XHCI_LPM_SUPPORT;
}
static int tegra_xhci_setup(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dbac0fa9748d..bee5deccc83d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1157,8 +1157,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_zero_64b_regs(xhci);
- xhci_reset(xhci);
+ retval = xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
+ if (retval)
+ return retval;
xhci_cleanup_msix(xhci);
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
@@ -3029,19 +3031,19 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
added_ctxs, added_ctxs);
}
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *td)
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_td *td)
{
struct xhci_dequeue_state deq_state;
- struct usb_device *udev = td->urb->dev;
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Cleaning up stalled endpoint ring");
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
- xhci_find_new_dequeue_state(xhci, udev->slot_id,
- ep_index, stream_id, td, &deq_state);
+ xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
+ &deq_state);
if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
return;
@@ -3052,7 +3054,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Queueing new dequeue state");
- xhci_queue_new_dequeue_state(xhci, udev->slot_id,
+ xhci_queue_new_dequeue_state(xhci, slot_id,
ep_index, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
@@ -3063,7 +3065,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Setting up input context for "
"configure endpoint command");
- xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
+ xhci_setup_input_ctx_for_quirk(xhci, slot_id,
ep_index, &deq_state);
}
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3ecee10fdcdc..86cfefdd6632 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1642,7 +1642,7 @@ struct xhci_scratchpad {
struct urb_priv {
int num_tds;
int num_tds_done;
- struct xhci_td td[0];
+ struct xhci_td td[];
};
/*
@@ -1694,6 +1694,7 @@ struct xhci_bus_state {
/* Which ports are waiting on RExit to U0 transition. */
unsigned long rexit_ports;
struct completion rexit_done[USB_MAXCHILDREN];
+ struct completion u3exit_done[USB_MAXCHILDREN];
};
@@ -1901,7 +1902,7 @@ struct xhci_hcd {
void *dbc;
/* platform-specific data -- must come last */
- unsigned long priv[0] __aligned(sizeof(s64));
+ unsigned long priv[] __aligned(sizeof(s64));
};
/* Platform specific overrides to generic XHCI hc_driver ops */
@@ -2115,8 +2116,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *td);
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_td *td);
void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
void xhci_handle_command_timeout(struct work_struct *work);
@@ -2589,6 +2591,35 @@ static inline const char *xhci_decode_portsc(u32 portsc)
return str;
}
+static inline const char *xhci_decode_usbsts(u32 usbsts)
+{
+ static char str[256];
+ int ret = 0;
+
+ if (usbsts == ~(u32)0)
+ return " 0xffffffff";
+ if (usbsts & STS_HALT)
+ ret += sprintf(str + ret, " HCHalted");
+ if (usbsts & STS_FATAL)
+ ret += sprintf(str + ret, " HSE");
+ if (usbsts & STS_EINT)
+ ret += sprintf(str + ret, " EINT");
+ if (usbsts & STS_PORT)
+ ret += sprintf(str + ret, " PCD");
+ if (usbsts & STS_SAVE)
+ ret += sprintf(str + ret, " SSS");
+ if (usbsts & STS_RESTORE)
+ ret += sprintf(str + ret, " RSS");
+ if (usbsts & STS_SRE)
+ ret += sprintf(str + ret, " SRE");
+ if (usbsts & STS_CNR)
+ ret += sprintf(str + ret, " CNR");
+ if (usbsts & STS_HCE)
+ ret += sprintf(str + ret, " HCE");
+
+ return str;
+}
+
static inline const char *xhci_decode_doorbell(u32 slot, u32 doorbell)
{
static char str[256];
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 834b2494da73..833a460ee55a 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -137,6 +137,16 @@ config USB_APPLEDISPLAY
Say Y here if you want to control the backlight of Apple Cinema
Displays over USB. This driver provides a sysfs interface.
+config APPLE_MFI_FASTCHARGE
+ tristate "Fast charge control for iOS devices"
+ select POWER_SUPPLY
+ help
+ Say Y here if you want to control whether iOS devices will
+ fast charge from the USB interface, as implemented in "MFi"
+ chargers.
+
+ It is safe to say M here.
+
source "drivers/usb/misc/sisusbvga/Kconfig"
config USB_LD
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 0d416eb624bb..da39bddb0604 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_EMI26) += emi26.o
obj-$(CONFIG_USB_EMI62) += emi62.o
obj-$(CONFIG_USB_EZUSB_FX2) += ezusb.o
obj-$(CONFIG_USB_FTDI_ELAN) += ftdi-elan.o
+obj-$(CONFIG_APPLE_MFI_FASTCHARGE) += apple-mfi-fastcharge.o
obj-$(CONFIG_USB_IDMOUSE) += idmouse.o
obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o
obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
new file mode 100644
index 000000000000..b403094a6b3a
--- /dev/null
+++ b/drivers/usb/misc/apple-mfi-fastcharge.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Fast-charge control for Apple "MFi" devices
+ *
+ * Copyright (C) 2019 Bastien Nocera <hadess@hadess.net>
+ */
+
+/* Standard include files */
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("Fast-charge control for Apple \"MFi\" devices");
+MODULE_LICENSE("GPL");
+
+#define TRICKLE_CURRENT_MA 0
+#define FAST_CURRENT_MA 2500
+
+#define APPLE_VENDOR_ID 0x05ac /* Apple */
+
+/* The product ID is defined as starting with 0x12nn, as per the
+ * "Choosing an Apple Device USB Configuration" section in
+ * release R9 (2012) of the "MFi Accessory Hardware Specification"
+ *
+ * To distinguish an Apple device, a USB host can check the device
+ * descriptor of attached USB devices for the following fields:
+ * â–  Vendor ID: 0x05AC
+ * â–  Product ID: 0x12nn
+ *
+ * Those checks will be done in .match() and .probe().
+ */
+
+static const struct usb_device_id mfi_fc_id_table[] = {
+ { .idVendor = APPLE_VENDOR_ID,
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR },
+ {},
+};
+
+MODULE_DEVICE_TABLE(usb, mfi_fc_id_table);
+
+/* Driver-local specific stuff */
+struct mfi_device {
+ struct usb_device *udev;
+ struct power_supply *battery;
+ int charge_type;
+};
+
+static int apple_mfi_fc_set_charge_type(struct mfi_device *mfi,
+ const union power_supply_propval *val)
+{
+ int current_ma;
+ int retval;
+ __u8 request_type;
+
+ if (mfi->charge_type == val->intval) {
+ dev_dbg(&mfi->udev->dev, "charge type %d already set\n",
+ mfi->charge_type);
+ return 0;
+ }
+
+ switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_TYPE_TRICKLE:
+ current_ma = TRICKLE_CURRENT_MA;
+ break;
+ case POWER_SUPPLY_CHARGE_TYPE_FAST:
+ current_ma = FAST_CURRENT_MA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ request_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ retval = usb_control_msg(mfi->udev, usb_sndctrlpipe(mfi->udev, 0),
+ 0x40, /* Vendorâ€defined power request */
+ request_type,
+ current_ma, /* wValue, current offset */
+ current_ma, /* wIndex, current offset */
+ NULL, 0, USB_CTRL_GET_TIMEOUT);
+ if (retval) {
+ dev_dbg(&mfi->udev->dev, "retval = %d\n", retval);
+ return retval;
+ }
+
+ mfi->charge_type = val->intval;
+
+ return 0;
+}
+
+static int apple_mfi_fc_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mfi_device *mfi = power_supply_get_drvdata(psy);
+
+ dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = mfi->charge_type;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ default:
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int apple_mfi_fc_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mfi_device *mfi = power_supply_get_drvdata(psy);
+ int ret;
+
+ dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
+
+ ret = pm_runtime_get_sync(&mfi->udev->dev);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = apple_mfi_fc_set_charge_type(mfi, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_mark_last_busy(&mfi->udev->dev);
+ pm_runtime_put_autosuspend(&mfi->udev->dev);
+
+ return ret;
+}
+
+static int apple_mfi_fc_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static enum power_supply_property apple_mfi_fc_properties[] = {
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_SCOPE
+};
+
+static const struct power_supply_desc apple_mfi_fc_desc = {
+ .name = "apple_mfi_fastcharge",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = apple_mfi_fc_properties,
+ .num_properties = ARRAY_SIZE(apple_mfi_fc_properties),
+ .get_property = apple_mfi_fc_get_property,
+ .set_property = apple_mfi_fc_set_property,
+ .property_is_writeable = apple_mfi_fc_property_is_writeable
+};
+
+static int mfi_fc_probe(struct usb_device *udev)
+{
+ struct power_supply_config battery_cfg = {};
+ struct mfi_device *mfi = NULL;
+ int err, idProduct;
+
+ idProduct = le16_to_cpu(udev->descriptor.idProduct);
+ /* See comment above mfi_fc_id_table[] */
+ if (idProduct < 0x1200 || idProduct > 0x12ff) {
+ return -ENODEV;
+ }
+
+ mfi = kzalloc(sizeof(struct mfi_device), GFP_KERNEL);
+ if (!mfi) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ battery_cfg.drv_data = mfi;
+
+ mfi->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ mfi->battery = power_supply_register(&udev->dev,
+ &apple_mfi_fc_desc,
+ &battery_cfg);
+ if (IS_ERR(mfi->battery)) {
+ dev_err(&udev->dev, "Can't register battery\n");
+ err = PTR_ERR(mfi->battery);
+ goto error;
+ }
+
+ mfi->udev = usb_get_dev(udev);
+ dev_set_drvdata(&udev->dev, mfi);
+
+ return 0;
+
+error:
+ kfree(mfi);
+ return err;
+}
+
+static void mfi_fc_disconnect(struct usb_device *udev)
+{
+ struct mfi_device *mfi;
+
+ mfi = dev_get_drvdata(&udev->dev);
+ if (mfi->battery)
+ power_supply_unregister(mfi->battery);
+ dev_set_drvdata(&udev->dev, NULL);
+ usb_put_dev(mfi->udev);
+ kfree(mfi);
+}
+
+static struct usb_device_driver mfi_fc_driver = {
+ .name = "apple-mfi-fastcharge",
+ .probe = mfi_fc_probe,
+ .disconnect = mfi_fc_disconnect,
+ .id_table = mfi_fc_id_table,
+ .generic_subclass = 1,
+};
+
+static int __init mfi_fc_driver_init(void)
+{
+ return usb_register_device_driver(&mfi_fc_driver, THIS_MODULE);
+}
+
+static void __exit mfi_fc_driver_exit(void)
+{
+ usb_deregister_device_driver(&mfi_fc_driver);
+}
+
+module_init(mfi_fc_driver_init);
+module_exit(mfi_fc_driver_exit);
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 2ab9600d0898..fc8a5da4a07c 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
/* High level: Gfx (indexed) register access */
#ifdef CONFIG_USB_SISUSBVGA_CON
-int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
+int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data)
{
return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
-int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
+int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data)
{
return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
#endif
-int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
+int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 data)
{
int ret;
@@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
return ret;
}
-int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
+int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 *data)
{
int ret;
@@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
return ret;
}
-int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
+int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx,
u8 myand, u8 myor)
{
int ret;
@@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
}
static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
- int port, u8 idx, u8 data, u8 mask)
+ u32 port, u8 idx, u8 data, u8 mask)
{
int ret;
u8 tmp;
@@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
return ret;
}
-int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
+int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 myor)
{
return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
}
-int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
+int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand)
{
return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
@@ -2785,8 +2785,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
struct sisusb_command *y, unsigned long arg)
{
- int retval, port, length;
- u32 address;
+ int retval, length;
+ u32 port, address;
/* All our commands require the device
* to be initialized.
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index 1782c759c4ad..ace09985dae4 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = {
int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo);
-extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data);
-extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data);
-extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data);
+extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data);
+extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 data);
-extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 * data);
-extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand, u8 myor);
-extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 myor);
-extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand);
void sisusb_delete(struct kref *kref);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index bc5ecd5ff565..39cb14164652 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -414,7 +414,7 @@ static ssize_t mon_text_read_t(struct file *file, char __user *buf,
mon_text_read_head_t(rp, &ptr, ep);
mon_text_read_statset(rp, &ptr, ep);
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
" %d", ep->length);
mon_text_read_data(rp, &ptr, ep);
@@ -462,7 +462,7 @@ static ssize_t mon_text_read_u(struct file *file, char __user *buf,
} else {
mon_text_read_statset(rp, &ptr, ep);
}
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ ptr.cnt += scnprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
" %d", ep->length);
mon_text_read_data(rp, &ptr, ep);
@@ -520,7 +520,7 @@ static void mon_text_read_head_t(struct mon_reader_text *rp,
case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break;
default: /* PIPE_BULK */ utype = 'B';
}
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
"%lx %u %c %c%c:%03u:%02u",
ep->id, ep->tstamp, ep->type,
utype, udir, ep->devnum, ep->epnum);
@@ -538,7 +538,7 @@ static void mon_text_read_head_u(struct mon_reader_text *rp,
case USB_ENDPOINT_XFER_CONTROL: utype = 'C'; break;
default: /* PIPE_BULK */ utype = 'B';
}
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
"%lx %u %c %c%c:%d:%03u:%u",
ep->id, ep->tstamp, ep->type,
utype, udir, ep->busnum, ep->devnum, ep->epnum);
@@ -549,7 +549,7 @@ static void mon_text_read_statset(struct mon_reader_text *rp,
{
if (ep->setup_flag == 0) { /* Setup packet is present and captured */
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" s %02x %02x %04x %04x %04x",
ep->setup[0],
ep->setup[1],
@@ -557,10 +557,10 @@ static void mon_text_read_statset(struct mon_reader_text *rp,
(ep->setup[5] << 8) | ep->setup[4],
(ep->setup[7] << 8) | ep->setup[6]);
} else if (ep->setup_flag != '-') { /* Unable to capture setup packet */
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %c __ __ ____ ____ ____", ep->setup_flag);
} else { /* No setup for this kind of URB */
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d", ep->status);
}
}
@@ -568,7 +568,7 @@ static void mon_text_read_statset(struct mon_reader_text *rp,
static void mon_text_read_intstat(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%d", ep->status, ep->interval);
}
@@ -576,10 +576,10 @@ static void mon_text_read_isostat(struct mon_reader_text *rp,
struct mon_text_ptr *p, const struct mon_event_text *ep)
{
if (ep->type == 'S') {
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%d:%d", ep->status, ep->interval, ep->start_frame);
} else {
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%d:%d:%d",
ep->status, ep->interval, ep->start_frame, ep->error_count);
}
@@ -592,7 +592,7 @@ static void mon_text_read_isodesc(struct mon_reader_text *rp,
int i;
const struct mon_iso_desc *dp;
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d", ep->numdesc);
ndesc = ep->numdesc;
if (ndesc > ISODESC_MAX)
@@ -601,7 +601,7 @@ static void mon_text_read_isodesc(struct mon_reader_text *rp,
ndesc = 0;
dp = ep->isodesc;
for (i = 0; i < ndesc; i++) {
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %d:%u:%u", dp->status, dp->offset, dp->length);
dp++;
}
@@ -614,28 +614,28 @@ static void mon_text_read_data(struct mon_reader_text *rp,
if ((data_len = ep->length) > 0) {
if (ep->data_flag == 0) {
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" =");
if (data_len >= DATA_MAX)
data_len = DATA_MAX;
for (i = 0; i < data_len; i++) {
if (i % 4 == 0) {
- p->cnt += snprintf(p->pbuf + p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt,
p->limit - p->cnt,
" ");
}
- p->cnt += snprintf(p->pbuf + p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt,
p->limit - p->cnt,
"%02x", ep->data[i]);
}
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
"\n");
} else {
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt,
" %c\n", ep->data_flag);
}
} else {
- p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n");
+ p->cnt += scnprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n");
}
}
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
index c96e5dab0a48..fdeade6254ae 100644
--- a/drivers/usb/mtu3/mtu3_debugfs.c
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -276,7 +276,7 @@ static const struct file_operations mtu3_ep_fops = {
.release = single_release,
};
-static struct debugfs_reg32 mtu3_prb_regs[] = {
+static const struct debugfs_reg32 mtu3_prb_regs[] = {
dump_prb_reg("enable", U3D_SSUSB_PRB_CTRL0),
dump_prb_reg("byte-sell", U3D_SSUSB_PRB_CTRL1),
dump_prb_reg("byte-selh", U3D_SSUSB_PRB_CTRL2),
@@ -349,7 +349,7 @@ static const struct file_operations mtu3_probe_fops = {
static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
{
struct ssusb_mtk *ssusb = mtu->ssusb;
- struct debugfs_reg32 *regs;
+ const struct debugfs_reg32 *regs;
struct dentry *dir_prb;
int i;
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 08e18448e8b8..04f666e85731 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -320,9 +320,9 @@ void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value);
}
-static int ssusb_role_sw_set(struct device *dev, enum usb_role role)
+static int ssusb_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
{
- struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw);
bool to_host = false;
if (role == USB_ROLE_HOST)
@@ -334,9 +334,9 @@ static int ssusb_role_sw_set(struct device *dev, enum usb_role role)
return 0;
}
-static enum usb_role ssusb_role_sw_get(struct device *dev)
+static enum usb_role ssusb_role_sw_get(struct usb_role_switch *sw)
{
- struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ struct ssusb_mtk *ssusb = usb_role_switch_get_drvdata(sw);
enum usb_role role;
role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
@@ -356,6 +356,7 @@ static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx)
role_sx_desc.set = ssusb_role_sw_set;
role_sx_desc.get = ssusb_role_sw_get;
role_sx_desc.fwnode = dev_fwnode(ssusb->dev);
+ role_sx_desc.driver_data = ssusb;
otg_sx->role_sw = usb_role_switch_register(ssusb->dev, &role_sx_desc);
return PTR_ERR_OR_ZERO(otg_sx->role_sw);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index eb2ded1026ee..3b0d1c20ebe6 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -110,9 +110,11 @@ config USB_MUSB_UX500
config USB_MUSB_JZ4740
tristate "JZ4740"
+ depends on OF
depends on MIPS || COMPILE_TEST
depends on USB_MUSB_GADGET
depends on USB=n || USB_OTG_BLACKLIST_HUB
+ select USB_ROLE_SWITCH
config USB_MUSB_MEDIATEK
tristate "MediaTek platforms"
@@ -144,7 +146,7 @@ config USB_UX500_DMA
config USB_INVENTRA_DMA
bool 'Inventra'
- depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK || USB_MUSB_JZ4740
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index bc0109f4700b..e64dd30e80e7 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -12,23 +12,29 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/usb/role.h>
#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
struct jz4740_glue {
struct platform_device *pdev;
+ struct musb *musb;
struct clk *clk;
+ struct usb_role_switch *role_sw;
};
static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
{
unsigned long flags;
- irqreturn_t retval = IRQ_NONE;
+ irqreturn_t retval = IRQ_NONE, retval_dma = IRQ_NONE;
struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
+ if (IS_ENABLED(CONFIG_USB_INVENTRA_DMA) && musb->dma_controller)
+ retval_dma = dma_controller_irq(irq, musb->dma_controller);
+
musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
@@ -46,7 +52,10 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
spin_unlock_irqrestore(&musb->lock, flags);
- return retval;
+ if (retval == IRQ_HANDLED || retval_dma == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
}
static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
@@ -66,11 +75,40 @@ static const struct musb_hdrc_config jz4740_musb_config = {
.fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
};
+static int jz4740_musb_role_switch_set(struct usb_role_switch *sw,
+ enum usb_role role)
+{
+ struct jz4740_glue *glue = usb_role_switch_get_drvdata(sw);
+ struct usb_phy *phy = glue->musb->xceiv;
+
+ switch (role) {
+ case USB_ROLE_NONE:
+ atomic_notifier_call_chain(&phy->notifier, USB_EVENT_NONE, phy);
+ break;
+ case USB_ROLE_DEVICE:
+ atomic_notifier_call_chain(&phy->notifier, USB_EVENT_VBUS, phy);
+ break;
+ case USB_ROLE_HOST:
+ atomic_notifier_call_chain(&phy->notifier, USB_EVENT_ID, phy);
+ break;
+ }
+
+ return 0;
+}
+
static int jz4740_musb_init(struct musb *musb)
{
struct device *dev = musb->controller->parent;
+ struct jz4740_glue *glue = dev_get_drvdata(dev);
+ struct usb_role_switch_desc role_sw_desc = {
+ .set = jz4740_musb_role_switch_set,
+ .driver_data = glue,
+ .fwnode = dev_fwnode(dev),
+ };
int err;
+ glue->musb = musb;
+
if (dev->of_node)
musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
else
@@ -82,6 +120,12 @@ static int jz4740_musb_init(struct musb *musb)
return err;
}
+ glue->role_sw = usb_role_switch_register(dev, &role_sw_desc);
+ if (IS_ERR(glue->role_sw)) {
+ dev_err(dev, "Failed to register USB role switch");
+ return PTR_ERR(glue->role_sw);
+ }
+
/*
* Silicon does not implement ConfigData register.
* Set dyn_fifo to avoid reading EP config from hardware.
@@ -93,14 +137,24 @@ static int jz4740_musb_init(struct musb *musb)
return 0;
}
-/*
- * DMA has not been confirmed to work with CONFIG_USB_INVENTRA_DMA,
- * so let's not set up the dma function pointers yet.
- */
+static int jz4740_musb_exit(struct musb *musb)
+{
+ struct jz4740_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ usb_role_switch_unregister(glue->role_sw);
+
+ return 0;
+}
+
static const struct musb_platform_ops jz4740_musb_ops = {
.quirks = MUSB_DMA_INVENTRA | MUSB_INDEXED_EP,
.fifo_mode = 2,
.init = jz4740_musb_init,
+ .exit = jz4740_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create_noirq,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
};
static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
@@ -109,10 +163,37 @@ static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
.platform_ops = &jz4740_musb_ops,
};
+static struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+};
+
+static struct musb_hdrc_config jz4770_musb_config = {
+ .multipoint = 1,
+ .num_eps = 11,
+ .ram_bits = 11,
+ .fifo_cfg = jz4770_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4770_musb_fifo_cfg),
+};
+
+static const struct musb_hdrc_platform_data jz4770_musb_pdata = {
+ .mode = MUSB_PERIPHERAL, /* TODO: support OTG */
+ .config = &jz4770_musb_config,
+ .platform_ops = &jz4740_musb_ops,
+};
+
static int jz4740_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct musb_hdrc_platform_data *pdata = &jz4740_musb_pdata;
+ const struct musb_hdrc_platform_data *pdata;
struct platform_device *musb;
struct jz4740_glue *glue;
struct clk *clk;
@@ -122,6 +203,12 @@ static int jz4740_probe(struct platform_device *pdev)
if (!glue)
return -ENOMEM;
+ pdata = of_device_get_match_data(dev);
+ if (!pdata) {
+ dev_err(dev, "missing platform data");
+ return -EINVAL;
+ }
+
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
dev_err(dev, "failed to allocate musb device");
@@ -142,6 +229,8 @@ static int jz4740_probe(struct platform_device *pdev)
}
musb->dev.parent = dev;
+ musb->dev.dma_mask = &musb->dev.coherent_dma_mask;
+ musb->dev.coherent_dma_mask = DMA_BIT_MASK(32);
glue->pdev = musb;
glue->clk = clk;
@@ -186,20 +275,19 @@ static int jz4740_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id jz4740_musb_of_match[] = {
- { .compatible = "ingenic,jz4740-musb" },
+ { .compatible = "ingenic,jz4740-musb", .data = &jz4740_musb_pdata },
+ { .compatible = "ingenic,jz4770-musb", .data = &jz4770_musb_pdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
-#endif
static struct platform_driver jz4740_driver = {
.probe = jz4740_probe,
.remove = jz4740_remove,
.driver = {
.name = "musb-jz4740",
- .of_match_table = of_match_ptr(jz4740_musb_of_match),
+ .of_match_table = jz4740_musb_of_match,
},
};
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 6b88c2f5d970..6196b0e8d77d 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -115,9 +115,8 @@ static void mtk_musb_clks_disable(struct mtk_glue *glue)
clk_disable_unprepare(glue->main);
}
-static int musb_usb_role_sx_set(struct device *dev, enum usb_role role)
+static int mtk_otg_switch_set(struct mtk_glue *glue, enum usb_role role)
{
- struct mtk_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue->musb;
u8 devctl = readb(musb->mregs + MUSB_DEVCTL);
enum usb_role new_role;
@@ -168,9 +167,14 @@ static int musb_usb_role_sx_set(struct device *dev, enum usb_role role)
return 0;
}
-static enum usb_role musb_usb_role_sx_get(struct device *dev)
+static int musb_usb_role_sx_set(struct usb_role_switch *sw, enum usb_role role)
{
- struct mtk_glue *glue = dev_get_drvdata(dev);
+ return mtk_otg_switch_set(usb_role_switch_get_drvdata(sw), role);
+}
+
+static enum usb_role musb_usb_role_sx_get(struct usb_role_switch *sw)
+{
+ struct mtk_glue *glue = usb_role_switch_get_drvdata(sw);
return glue->role;
}
@@ -182,6 +186,7 @@ static int mtk_otg_switch_init(struct mtk_glue *glue)
role_sx_desc.set = musb_usb_role_sx_set;
role_sx_desc.get = musb_usb_role_sx_get;
role_sx_desc.fwnode = dev_fwnode(glue->dev);
+ role_sx_desc.driver_data = glue;
glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
return PTR_ERR_OR_ZERO(glue->role_sw);
@@ -288,8 +293,7 @@ static int mtk_musb_set_mode(struct musb *musb, u8 mode)
return -EINVAL;
}
- glue->role = new_role;
- musb_usb_role_sx_set(dev, glue->role);
+ mtk_otg_switch_set(glue, new_role);
return 0;
}
@@ -444,7 +448,7 @@ static int mtk_musb_probe(struct platform_device *pdev)
struct platform_device_info pinfo;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- int ret = -ENOMEM;
+ int ret;
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index f616fb489542..d590110539ab 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2945,7 +2945,7 @@ static const struct dev_pm_ops musb_dev_pm_ops = {
static struct platform_driver musb_driver = {
.driver = {
- .name = (char *)musb_driver_name,
+ .name = musb_driver_name,
.bus = &platform_bus_type,
.pm = MUSB_DEV_PM_OPS,
.dev_groups = musb_groups,
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 886c9b602f8c..8736f4251a22 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1436,10 +1436,7 @@ done:
* We need to map sg if the transfer_buffer is
* NULL.
*/
- if (!urb->transfer_buffer)
- qh->use_sg = true;
-
- if (qh->use_sg) {
+ if (!urb->transfer_buffer) {
/* sg_miter_start is already done in musb_ep_program */
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
@@ -1447,9 +1444,8 @@ done:
status = -EINVAL;
goto done;
}
- urb->transfer_buffer = qh->sg_miter.addr;
length = min_t(u32, length, qh->sg_miter.length);
- musb_write_fifo(hw_ep, length, urb->transfer_buffer);
+ musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
qh->sg_miter.consumed = length;
sg_miter_stop(&qh->sg_miter);
} else {
@@ -1458,11 +1454,6 @@ done:
qh->segsize = length;
- if (qh->use_sg) {
- if (offset + length >= urb->transfer_buffer_length)
- qh->use_sg = false;
- }
-
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
@@ -1977,8 +1968,10 @@ finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
- if (qh->use_sg)
+ if (qh->use_sg) {
qh->use_sg = false;
+ urb->transfer_buffer = NULL;
+ }
if (urb->status == -EINPROGRESS)
urb->status = status;
@@ -2550,7 +2543,7 @@ static int musb_bus_resume(struct usb_hcd *hcd)
struct musb_temp_buffer {
void *kmalloc_ptr;
void *old_xfer_buffer;
- u8 data[0];
+ u8 data[];
};
static void musb_free_temp_buffer(struct urb *urb)
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 5d449089e3ad..99890d1bbfcb 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -156,7 +156,7 @@ static u8 tusb_readb(void __iomem *addr, u32 offset)
return val;
}
-static void tusb_writeb(void __iomem *addr, unsigned offset, u8 data)
+static void tusb_writeb(void __iomem *addr, u32 offset, u8 data)
{
u16 tmp;
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index ff24fca0a2d9..4b3fa78995cf 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -184,4 +184,12 @@ config USB_ULPI_VIEWPORT
Provides read/write operations to the ULPI phy register set for
controllers with a viewport register (e.g. Chipidea/ARC controllers).
+config JZ4770_PHY
+ tristate "Ingenic JZ4770 Transceiver Driver"
+ depends on MIPS || COMPILE_TEST
+ select USB_PHY
+ help
+ This driver provides PHY support for the USB controller found
+ on the JZ4770 SoC from Ingenic.
+
endmenu
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index df1d99010079..b352bdbe8712 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o
obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o
+obj-$(CONFIG_JZ4770_PHY) += phy-jz4770.o
diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c
new file mode 100644
index 000000000000..3ea1f5b9bcf8
--- /dev/null
+++ b/drivers/usb/phy/phy-jz4770.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic JZ4770 USB PHY driver
+ * Copyright (c) Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+
+#define REG_USBPCR_OFFSET 0x00
+#define REG_USBRDT_OFFSET 0x04
+#define REG_USBVBFIL_OFFSET 0x08
+#define REG_USBPCR1_OFFSET 0x0c
+
+/* USBPCR */
+#define USBPCR_USB_MODE BIT(31)
+#define USBPCR_AVLD_REG BIT(30)
+#define USBPCR_INCRM BIT(27)
+#define USBPCR_CLK12_EN BIT(26)
+#define USBPCR_COMMONONN BIT(25)
+#define USBPCR_VBUSVLDEXT BIT(24)
+#define USBPCR_VBUSVLDEXTSEL BIT(23)
+#define USBPCR_POR BIT(22)
+#define USBPCR_SIDDQ BIT(21)
+#define USBPCR_OTG_DISABLE BIT(20)
+#define USBPCR_TXPREEMPHTUNE BIT(6)
+
+#define USBPCR_IDPULLUP_LSB 28
+#define USBPCR_IDPULLUP_MASK GENMASK(29, USBPCR_IDPULLUP_LSB)
+#define USBPCR_IDPULLUP_ALWAYS (3 << USBPCR_IDPULLUP_LSB)
+#define USBPCR_IDPULLUP_SUSPEND (1 << USBPCR_IDPULLUP_LSB)
+#define USBPCR_IDPULLUP_OTG (0 << USBPCR_IDPULLUP_LSB)
+
+#define USBPCR_COMPDISTUNE_LSB 17
+#define USBPCR_COMPDISTUNE_MASK GENMASK(19, USBPCR_COMPDISTUNE_LSB)
+#define USBPCR_COMPDISTUNE_DFT 4
+
+#define USBPCR_OTGTUNE_LSB 14
+#define USBPCR_OTGTUNE_MASK GENMASK(16, USBPCR_OTGTUNE_LSB)
+#define USBPCR_OTGTUNE_DFT 4
+
+#define USBPCR_SQRXTUNE_LSB 11
+#define USBPCR_SQRXTUNE_MASK GENMASK(13, USBPCR_SQRXTUNE_LSB)
+#define USBPCR_SQRXTUNE_DFT 3
+
+#define USBPCR_TXFSLSTUNE_LSB 7
+#define USBPCR_TXFSLSTUNE_MASK GENMASK(10, USBPCR_TXFSLSTUNE_LSB)
+#define USBPCR_TXFSLSTUNE_DFT 3
+
+#define USBPCR_TXRISETUNE_LSB 4
+#define USBPCR_TXRISETUNE_MASK GENMASK(5, USBPCR_TXRISETUNE_LSB)
+#define USBPCR_TXRISETUNE_DFT 3
+
+#define USBPCR_TXVREFTUNE_LSB 0
+#define USBPCR_TXVREFTUNE_MASK GENMASK(3, USBPCR_TXVREFTUNE_LSB)
+#define USBPCR_TXVREFTUNE_DFT 5
+
+/* USBRDT */
+#define USBRDT_VBFIL_LD_EN BIT(25)
+#define USBRDT_IDDIG_EN BIT(24)
+#define USBRDT_IDDIG_REG BIT(23)
+
+#define USBRDT_USBRDT_LSB 0
+#define USBRDT_USBRDT_MASK GENMASK(22, USBRDT_USBRDT_LSB)
+
+/* USBPCR1 */
+#define USBPCR1_UHC_POWON BIT(5)
+
+struct jz4770_phy {
+ struct usb_phy phy;
+ struct usb_otg otg;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct regulator *vcc_supply;
+};
+
+static inline struct jz4770_phy *otg_to_jz4770_phy(struct usb_otg *otg)
+{
+ return container_of(otg, struct jz4770_phy, otg);
+}
+
+static inline struct jz4770_phy *phy_to_jz4770_phy(struct usb_phy *phy)
+{
+ return container_of(phy, struct jz4770_phy, phy);
+}
+
+static int jz4770_phy_set_peripheral(struct usb_otg *otg,
+ struct usb_gadget *gadget)
+{
+ struct jz4770_phy *priv = otg_to_jz4770_phy(otg);
+ u32 reg;
+
+ reg = readl(priv->base + REG_USBPCR_OFFSET);
+ reg &= ~USBPCR_USB_MODE;
+ reg |= USBPCR_VBUSVLDEXT | USBPCR_VBUSVLDEXTSEL | USBPCR_OTG_DISABLE;
+ writel(reg, priv->base + REG_USBPCR_OFFSET);
+
+ return 0;
+}
+
+static int jz4770_phy_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ struct jz4770_phy *priv = otg_to_jz4770_phy(otg);
+ u32 reg;
+
+ reg = readl(priv->base + REG_USBPCR_OFFSET);
+ reg &= ~(USBPCR_VBUSVLDEXT | USBPCR_VBUSVLDEXTSEL | USBPCR_OTG_DISABLE);
+ reg |= USBPCR_USB_MODE;
+ writel(reg, priv->base + REG_USBPCR_OFFSET);
+
+ return 0;
+}
+
+static int jz4770_phy_init(struct usb_phy *phy)
+{
+ struct jz4770_phy *priv = phy_to_jz4770_phy(phy);
+ int err;
+ u32 reg;
+
+ err = regulator_enable(priv->vcc_supply);
+ if (err) {
+ dev_err(priv->dev, "Unable to enable VCC: %d", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(priv->dev, "Unable to start clock: %d", err);
+ return err;
+ }
+
+ reg = USBPCR_AVLD_REG | USBPCR_COMMONONN | USBPCR_IDPULLUP_ALWAYS |
+ (USBPCR_COMPDISTUNE_DFT << USBPCR_COMPDISTUNE_LSB) |
+ (USBPCR_OTGTUNE_DFT << USBPCR_OTGTUNE_LSB) |
+ (USBPCR_SQRXTUNE_DFT << USBPCR_SQRXTUNE_LSB) |
+ (USBPCR_TXFSLSTUNE_DFT << USBPCR_TXFSLSTUNE_LSB) |
+ (USBPCR_TXRISETUNE_DFT << USBPCR_TXRISETUNE_LSB) |
+ (USBPCR_TXVREFTUNE_DFT << USBPCR_TXVREFTUNE_LSB) |
+ USBPCR_POR;
+ writel(reg, priv->base + REG_USBPCR_OFFSET);
+
+ /* Wait for PHY to reset */
+ usleep_range(30, 300);
+ writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET);
+ usleep_range(300, 1000);
+
+ return 0;
+}
+
+static void jz4770_phy_shutdown(struct usb_phy *phy)
+{
+ struct jz4770_phy *priv = phy_to_jz4770_phy(phy);
+
+ clk_disable_unprepare(priv->clk);
+ regulator_disable(priv->vcc_supply);
+}
+
+static void jz4770_phy_remove(void *phy)
+{
+ usb_remove_phy(phy);
+}
+
+static int jz4770_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct jz4770_phy *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = dev;
+ priv->phy.dev = dev;
+ priv->phy.otg = &priv->otg;
+ priv->phy.label = "jz4770-phy";
+ priv->phy.init = jz4770_phy_init;
+ priv->phy.shutdown = jz4770_phy_shutdown;
+
+ priv->otg.state = OTG_STATE_UNDEFINED;
+ priv->otg.usb_phy = &priv->phy;
+ priv->otg.set_host = jz4770_phy_set_host;
+ priv->otg.set_peripheral = jz4770_phy_set_peripheral;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ dev_err(dev, "Failed to map registers");
+ return PTR_ERR(priv->base);
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clock");
+ return err;
+ }
+
+ priv->vcc_supply = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(priv->vcc_supply)) {
+ err = PTR_ERR(priv->vcc_supply);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator");
+ return err;
+ }
+
+ err = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Unable to register PHY");
+ return err;
+ }
+
+ return devm_add_action_or_reset(dev, jz4770_phy_remove, &priv->phy);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id jz4770_phy_of_matches[] = {
+ { .compatible = "ingenic,jz4770-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, jz4770_phy_of_matches);
+#endif
+
+static struct platform_driver jz4770_phy_driver = {
+ .probe = jz4770_phy_probe,
+ .driver = {
+ .name = "jz4770-phy",
+ .of_match_table = of_match_ptr(jz4770_phy_of_matches),
+ },
+};
+module_platform_driver(jz4770_phy_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4770 USB PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 6153cc35aba0..cffe2aced488 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -12,12 +12,11 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/slab.h>
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index bfebf1f2e991..9a7e655d5280 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -377,7 +377,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq1, status);
- return status;
+ goto err_put_regulator;
}
status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
@@ -386,8 +386,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq2, status);
- free_irq(twl->irq1, twl);
- return status;
+ goto err_free_irq1;
}
twl->asleep = 0;
@@ -396,6 +395,13 @@ static int twl6030_usb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
return 0;
+
+err_free_irq1:
+ free_irq(twl->irq1, twl);
+err_put_regulator:
+ regulator_put(twl->usb3v3);
+
+ return status;
}
static int twl6030_usb_remove(struct platform_device *pdev)
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 63a00ff26655..5b17709821df 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -48,7 +48,7 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
mutex_lock(&sw->lock);
- ret = sw->set(sw->dev.parent, role);
+ ret = sw->set(sw, role);
if (!ret)
sw->role = role;
@@ -75,7 +75,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
mutex_lock(&sw->lock);
if (sw->get)
- role = sw->get(sw->dev.parent);
+ role = sw->get(sw);
else
role = sw->role;
@@ -199,7 +199,7 @@ EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
static umode_t
usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct usb_role_switch *sw = to_role_switch(dev);
if (sw->allow_userspace_control)
@@ -329,7 +329,9 @@ usb_role_switch_register(struct device *parent,
sw->dev.fwnode = desc->fwnode;
sw->dev.class = role_class;
sw->dev.type = &usb_role_dev_type;
- dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
+ dev_set_drvdata(&sw->dev, desc->driver_data);
+ dev_set_name(&sw->dev, "%s-role-switch",
+ desc->name ? desc->name : dev_name(parent));
ret = device_register(&sw->dev);
if (ret) {
@@ -356,6 +358,27 @@ void usb_role_switch_unregister(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
+/**
+ * usb_role_switch_set_drvdata - Assign private data pointer to a switch
+ * @sw: USB Role Switch
+ * @data: Private data pointer
+ */
+void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data)
+{
+ dev_set_drvdata(&sw->dev, data);
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_set_drvdata);
+
+/**
+ * usb_role_switch_get_drvdata - Get the private data pointer of a switch
+ * @sw: USB Role Switch
+ */
+void *usb_role_switch_get_drvdata(struct usb_role_switch *sw)
+{
+ return dev_get_drvdata(&sw->dev);
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_get_drvdata);
+
static int __init usb_roles_init(void)
{
role_class = class_create(THIS_MODULE, "usb_role");
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 80d6559bbcb2..5c96e929acea 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -42,6 +42,7 @@
#define DRV_NAME "intel_xhci_usb_sw"
struct intel_xhci_usb_data {
+ struct device *dev;
struct usb_role_switch *role_sw;
void __iomem *base;
bool enable_sw_switch;
@@ -51,9 +52,10 @@ static const struct software_node intel_xhci_usb_node = {
"intel-xhci-usb-sw",
};
-static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role)
+static int intel_xhci_usb_set_role(struct usb_role_switch *sw,
+ enum usb_role role)
{
- struct intel_xhci_usb_data *data = dev_get_drvdata(dev);
+ struct intel_xhci_usb_data *data = usb_role_switch_get_drvdata(sw);
unsigned long timeout;
acpi_status status;
u32 glk, val;
@@ -66,11 +68,11 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role)
*/
status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
- dev_err(dev, "Error could not acquire lock\n");
+ dev_err(data->dev, "Error could not acquire lock\n");
return -EIO;
}
- pm_runtime_get_sync(dev);
+ pm_runtime_get_sync(data->dev);
/*
* Set idpin value as requested.
@@ -112,7 +114,7 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role)
do {
val = readl(data->base + DUAL_ROLE_CFG1);
if (!!(val & HOST_MODE) == (role == USB_ROLE_HOST)) {
- pm_runtime_put(dev);
+ pm_runtime_put(data->dev);
return 0;
}
@@ -120,21 +122,21 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role)
usleep_range(5000, 10000);
} while (time_before(jiffies, timeout));
- pm_runtime_put(dev);
+ pm_runtime_put(data->dev);
- dev_warn(dev, "Timeout waiting for role-switch\n");
+ dev_warn(data->dev, "Timeout waiting for role-switch\n");
return -ETIMEDOUT;
}
-static enum usb_role intel_xhci_usb_get_role(struct device *dev)
+static enum usb_role intel_xhci_usb_get_role(struct usb_role_switch *sw)
{
- struct intel_xhci_usb_data *data = dev_get_drvdata(dev);
+ struct intel_xhci_usb_data *data = usb_role_switch_get_drvdata(sw);
enum usb_role role;
u32 val;
- pm_runtime_get_sync(dev);
+ pm_runtime_get_sync(data->dev);
val = readl(data->base + DUAL_ROLE_CFG0);
- pm_runtime_put(dev);
+ pm_runtime_put(data->dev);
if (!(val & SW_IDPIN))
role = USB_ROLE_HOST;
@@ -175,7 +177,9 @@ static int intel_xhci_usb_probe(struct platform_device *pdev)
sw_desc.get = intel_xhci_usb_get_role,
sw_desc.allow_userspace_control = true,
sw_desc.fwnode = software_node_fwnode(&intel_xhci_usb_node);
+ sw_desc.driver_data = data;
+ data->dev = dev;
data->enable_sw_switch = !device_property_read_bool(dev,
"sw_switch_disable");
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 578ebdd86520..91055a191995 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1472,7 +1472,7 @@ static int digi_read_oob_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
struct tty_struct *tty;
- struct digi_port *priv = usb_get_serial_port_data(port);
+ struct digi_port *priv;
unsigned char *buf = urb->transfer_buffer;
int opcode, line, status, val;
unsigned long flags;
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 43fa1f0716b7..dcda7fb164b4 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Fintek F81232 USB to serial adaptor driver
+ * Fintek F81532A/534A/535/536 USB to 2/4/8/12 serial adaptor driver
*
* Copyright (C) 2012 Greg Kroah-Hartman (gregkh@linuxfoundation.org)
* Copyright (C) 2012 Linux Foundation
@@ -21,11 +22,45 @@
#include <linux/usb/serial.h>
#include <linux/serial_reg.h>
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x1934, 0x0706) },
+#define F81232_ID \
+ { USB_DEVICE(0x1934, 0x0706) } /* 1 port UART device */
+
+#define F81534A_SERIES_ID \
+ { USB_DEVICE(0x2c42, 0x1602) }, /* In-Box 2 port UART device */ \
+ { USB_DEVICE(0x2c42, 0x1604) }, /* In-Box 4 port UART device */ \
+ { USB_DEVICE(0x2c42, 0x1605) }, /* In-Box 8 port UART device */ \
+ { USB_DEVICE(0x2c42, 0x1606) }, /* In-Box 12 port UART device */ \
+ { USB_DEVICE(0x2c42, 0x1608) }, /* Non-Flash type */ \
+ { USB_DEVICE(0x2c42, 0x1632) }, /* 2 port UART device */ \
+ { USB_DEVICE(0x2c42, 0x1634) }, /* 4 port UART device */ \
+ { USB_DEVICE(0x2c42, 0x1635) }, /* 8 port UART device */ \
+ { USB_DEVICE(0x2c42, 0x1636) } /* 12 port UART device */
+
+#define F81534A_CTRL_ID \
+ { USB_DEVICE(0x2c42, 0x16f8) } /* Global control device */
+
+static const struct usb_device_id f81232_id_table[] = {
+ F81232_ID,
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE(usb, id_table);
+
+static const struct usb_device_id f81534a_id_table[] = {
+ F81534A_SERIES_ID,
+ { } /* Terminating entry */
+};
+
+static const struct usb_device_id f81534a_ctrl_id_table[] = {
+ F81534A_CTRL_ID,
+ { } /* Terminating entry */
+};
+
+static const struct usb_device_id combined_id_table[] = {
+ F81232_ID,
+ F81534A_SERIES_ID,
+ F81534A_CTRL_ID,
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, combined_id_table);
/* Maximum baudrate for F81232 */
#define F81232_MAX_BAUDRATE 1500000
@@ -35,6 +70,7 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define F81232_REGISTER_REQUEST 0xa0
#define F81232_GET_REGISTER 0xc0
#define F81232_SET_REGISTER 0x40
+#define F81534A_ACCESS_REG_RETRY 2
#define SERIAL_BASE_ADDRESS 0x0120
#define RECEIVE_BUFFER_REGISTER (0x00 + SERIAL_BASE_ADDRESS)
@@ -61,6 +97,22 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define F81232_CLK_14_77_MHZ (BIT(1) | BIT(0))
#define F81232_CLK_MASK GENMASK(1, 0)
+#define F81534A_MODE_REG 0x107
+#define F81534A_TRIGGER_MASK GENMASK(3, 2)
+#define F81534A_TRIGGER_MULTIPLE_4X BIT(3)
+#define F81534A_FIFO_128BYTE (BIT(1) | BIT(0))
+
+/* Serial port self GPIO control, 2bytes [control&output data][input data] */
+#define F81534A_GPIO_REG 0x10e
+#define F81534A_GPIO_MODE2_DIR BIT(6) /* 1: input, 0: output */
+#define F81534A_GPIO_MODE1_DIR BIT(5)
+#define F81534A_GPIO_MODE0_DIR BIT(4)
+#define F81534A_GPIO_MODE2_OUTPUT BIT(2)
+#define F81534A_GPIO_MODE1_OUTPUT BIT(1)
+#define F81534A_GPIO_MODE0_OUTPUT BIT(0)
+
+#define F81534A_CTRL_CMD_ENABLE_PORT 0x116
+
struct f81232_private {
struct mutex lock;
u8 modem_control;
@@ -322,10 +374,38 @@ exit:
__func__, retval);
}
+static char f81232_handle_lsr(struct usb_serial_port *port, u8 lsr)
+{
+ struct f81232_private *priv = usb_get_serial_port_data(port);
+ char tty_flag = TTY_NORMAL;
+
+ if (!(lsr & UART_LSR_BRK_ERROR_BITS))
+ return tty_flag;
+
+ if (lsr & UART_LSR_BI) {
+ tty_flag = TTY_BREAK;
+ port->icount.brk++;
+ usb_serial_handle_break(port);
+ } else if (lsr & UART_LSR_PE) {
+ tty_flag = TTY_PARITY;
+ port->icount.parity++;
+ } else if (lsr & UART_LSR_FE) {
+ tty_flag = TTY_FRAME;
+ port->icount.frame++;
+ }
+
+ if (lsr & UART_LSR_OE) {
+ port->icount.overrun++;
+ schedule_work(&priv->lsr_work);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+ }
+
+ return tty_flag;
+}
+
static void f81232_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct f81232_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
char tty_flag;
unsigned int i;
@@ -341,29 +421,8 @@ static void f81232_process_read_urb(struct urb *urb)
/* bulk-in data: [LSR(1Byte)+DATA(1Byte)][LSR(1Byte)+DATA(1Byte)]... */
for (i = 0; i < urb->actual_length; i += 2) {
- tty_flag = TTY_NORMAL;
lsr = data[i];
-
- if (lsr & UART_LSR_BRK_ERROR_BITS) {
- if (lsr & UART_LSR_BI) {
- tty_flag = TTY_BREAK;
- port->icount.brk++;
- usb_serial_handle_break(port);
- } else if (lsr & UART_LSR_PE) {
- tty_flag = TTY_PARITY;
- port->icount.parity++;
- } else if (lsr & UART_LSR_FE) {
- tty_flag = TTY_FRAME;
- port->icount.frame++;
- }
-
- if (lsr & UART_LSR_OE) {
- port->icount.overrun++;
- schedule_work(&priv->lsr_work);
- tty_insert_flip_char(&port->port, 0,
- TTY_OVERRUN);
- }
- }
+ tty_flag = f81232_handle_lsr(port, lsr);
if (port->port.console && port->sysrq) {
if (usb_serial_handle_sysrq_char(port, data[i + 1]))
@@ -376,6 +435,47 @@ static void f81232_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
+static void f81534a_process_read_urb(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ unsigned char *data = urb->transfer_buffer;
+ char tty_flag;
+ unsigned int i;
+ u8 lsr;
+ u8 len;
+
+ if (urb->actual_length < 3) {
+ dev_err(&port->dev, "short message received: %d\n",
+ urb->actual_length);
+ return;
+ }
+
+ len = data[0];
+ if (len != urb->actual_length) {
+ dev_err(&port->dev, "malformed message received: %d (%d)\n",
+ urb->actual_length, len);
+ return;
+ }
+
+ /* bulk-in data: [LEN][Data.....][LSR] */
+ lsr = data[len - 1];
+ tty_flag = f81232_handle_lsr(port, lsr);
+
+ if (port->port.console && port->sysrq) {
+ for (i = 1; i < len - 1; ++i) {
+ if (!usb_serial_handle_sysrq_char(port, data[i])) {
+ tty_insert_flip_char(&port->port, data[i],
+ tty_flag);
+ }
+ }
+ } else {
+ tty_insert_flip_string_fixed_flag(&port->port, &data[1],
+ tty_flag, len - 2);
+ }
+
+ tty_flip_buffer_push(&port->port);
+}
+
static void f81232_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
@@ -659,6 +759,24 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
return 0;
}
+static int f81534a_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ int status;
+ u8 mask;
+ u8 val;
+
+ val = F81534A_TRIGGER_MULTIPLE_4X | F81534A_FIFO_128BYTE;
+ mask = F81534A_TRIGGER_MASK | F81534A_FIFO_128BYTE;
+
+ status = f81232_set_mask_register(port, F81534A_MODE_REG, mask, val);
+ if (status) {
+ dev_err(&port->dev, "failed to set MODE_REG: %d\n", status);
+ return status;
+ }
+
+ return f81232_open(tty, port);
+}
+
static void f81232_close(struct usb_serial_port *port)
{
struct f81232_private *port_priv = usb_get_serial_port_data(port);
@@ -678,6 +796,20 @@ static void f81232_dtr_rts(struct usb_serial_port *port, int on)
f81232_set_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
}
+static bool f81232_tx_empty(struct usb_serial_port *port)
+{
+ int status;
+ u8 tmp;
+
+ status = f81232_get_register(port, LINE_STATUS_REGISTER, &tmp);
+ if (!status) {
+ if ((tmp & UART_LSR_TEMT) != UART_LSR_TEMT)
+ return false;
+ }
+
+ return true;
+}
+
static int f81232_carrier_raised(struct usb_serial_port *port)
{
u8 msr;
@@ -728,11 +860,98 @@ static void f81232_lsr_worker(struct work_struct *work)
dev_warn(&port->dev, "read LSR failed: %d\n", status);
}
+static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg,
+ u16 size, void *val)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ int retry = F81534A_ACCESS_REG_RETRY;
+ int status;
+ u8 *tmp;
+
+ tmp = kmemdup(val, size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ while (retry--) {
+ status = usb_control_msg(dev,
+ usb_sndctrlpipe(dev, 0),
+ F81232_REGISTER_REQUEST,
+ F81232_SET_REGISTER,
+ reg,
+ 0,
+ tmp,
+ size,
+ USB_CTRL_SET_TIMEOUT);
+ if (status < 0) {
+ status = usb_translate_errors(status);
+ if (status == -EIO)
+ continue;
+ } else if (status != size) {
+ /* Retry on short transfers */
+ status = -EIO;
+ continue;
+ } else {
+ status = 0;
+ }
+
+ break;
+ }
+
+ if (status) {
+ dev_err(&intf->dev, "failed to set register 0x%x: %d\n",
+ reg, status);
+ }
+
+ kfree(tmp);
+ return status;
+}
+
+static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en)
+{
+ unsigned char enable[2] = {0};
+ int status;
+
+ /*
+ * Enable all available serial ports, define as following:
+ * bit 15 : Reset behavior (when HUB got soft reset)
+ * 0: maintain all serial port enabled state.
+ * 1: disable all serial port.
+ * bit 0~11 : Serial port enable bit.
+ */
+ if (en) {
+ enable[0] = 0xff;
+ enable[1] = 0x8f;
+ }
+
+ status = f81534a_ctrl_set_register(intf, F81534A_CTRL_CMD_ENABLE_PORT,
+ sizeof(enable), enable);
+ if (status)
+ dev_err(&intf->dev, "failed to enable ports: %d\n", status);
+
+ return status;
+}
+
+static int f81534a_ctrl_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return f81534a_ctrl_enable_all_ports(intf, true);
+}
+
+static void f81534a_ctrl_disconnect(struct usb_interface *intf)
+{
+ f81534a_ctrl_enable_all_ports(intf, false);
+}
+
+static int f81534a_ctrl_resume(struct usb_interface *intf)
+{
+ return f81534a_ctrl_enable_all_ports(intf, true);
+}
+
static int f81232_port_probe(struct usb_serial_port *port)
{
struct f81232_private *priv;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&port->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -748,14 +967,17 @@ static int f81232_port_probe(struct usb_serial_port *port)
return 0;
}
-static int f81232_port_remove(struct usb_serial_port *port)
+static int f81534a_port_probe(struct usb_serial_port *port)
{
- struct f81232_private *priv;
+ int status;
- priv = usb_get_serial_port_data(port);
- kfree(priv);
+ /* tri-state with pull-high, default RS232 Mode */
+ status = f81232_set_register(port, F81534A_GPIO_REG,
+ F81534A_GPIO_MODE2_DIR);
+ if (status)
+ return status;
- return 0;
+ return f81232_port_probe(port);
}
static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
@@ -799,7 +1021,7 @@ static struct usb_serial_driver f81232_device = {
.owner = THIS_MODULE,
.name = "f81232",
},
- .id_table = id_table,
+ .id_table = f81232_id_table,
.num_ports = 1,
.bulk_in_size = 256,
.bulk_out_size = 256,
@@ -813,22 +1035,82 @@ static struct usb_serial_driver f81232_device = {
.tiocmget = f81232_tiocmget,
.tiocmset = f81232_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
+ .tx_empty = f81232_tx_empty,
.process_read_urb = f81232_process_read_urb,
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
- .port_remove = f81232_port_remove,
+ .suspend = f81232_suspend,
+ .resume = f81232_resume,
+};
+
+static struct usb_serial_driver f81534a_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "f81534a",
+ },
+ .id_table = f81534a_id_table,
+ .num_ports = 1,
+ .open = f81534a_open,
+ .close = f81232_close,
+ .dtr_rts = f81232_dtr_rts,
+ .carrier_raised = f81232_carrier_raised,
+ .get_serial = f81232_get_serial_info,
+ .break_ctl = f81232_break_ctl,
+ .set_termios = f81232_set_termios,
+ .tiocmget = f81232_tiocmget,
+ .tiocmset = f81232_tiocmset,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
+ .tx_empty = f81232_tx_empty,
+ .process_read_urb = f81534a_process_read_urb,
+ .read_int_callback = f81232_read_int_callback,
+ .port_probe = f81534a_port_probe,
.suspend = f81232_suspend,
.resume = f81232_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
&f81232_device,
+ &f81534a_device,
NULL,
};
-module_usb_serial_driver(serial_drivers, id_table);
+static struct usb_driver f81534a_ctrl_driver = {
+ .name = "f81534a_ctrl",
+ .id_table = f81534a_ctrl_id_table,
+ .probe = f81534a_ctrl_probe,
+ .disconnect = f81534a_ctrl_disconnect,
+ .resume = f81534a_ctrl_resume,
+};
+
+static int __init f81232_init(void)
+{
+ int status;
+
+ status = usb_register_driver(&f81534a_ctrl_driver, THIS_MODULE,
+ KBUILD_MODNAME);
+ if (status)
+ return status;
+
+ status = usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME,
+ combined_id_table);
+ if (status) {
+ usb_deregister(&f81534a_ctrl_driver);
+ return status;
+ }
+
+ return 0;
+}
+
+static void __exit f81232_exit(void)
+{
+ usb_serial_deregister_drivers(serial_drivers);
+ usb_deregister(&f81534a_ctrl_driver);
+}
+
+module_init(f81232_init);
+module_exit(f81232_exit);
-MODULE_DESCRIPTION("Fintek F81232 USB to serial adaptor driver");
+MODULE_DESCRIPTION("Fintek F81232/532A/534A/535/536 USB to serial driver");
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index ffd984142171..d63072fee099 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length);
- } else if (bulk_data ||
- getLayerId(data) == GARMIN_LAYERID_APPL) {
+ } else if (bulk_data || (data_length >= sizeof(u32) &&
+ getLayerId(data) == GARMIN_LAYERID_APPL)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN;
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 1be8bea372a2..5cdf180cda23 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -417,7 +417,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
/*
* Make sure URB is marked as free before checking the throttled flag
* to avoid racing with unthrottle() on another CPU. Matches the
- * smp_mb() in unthrottle().
+ * smp_mb__after_atomic() in unthrottle().
*/
smp_mb__after_atomic();
@@ -489,7 +489,7 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
* Matches the smp_mb__after_atomic() in
* usb_serial_generic_read_bulk_callback().
*/
- smp_mb();
+ smp_mb__after_atomic();
usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
@@ -609,12 +609,10 @@ EXPORT_SYMBOL_GPL(usb_serial_handle_break);
* @tty: tty for the port
* @status: new carrier detect status, nonzero if active
*/
-void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+void usb_serial_handle_dcd_change(struct usb_serial_port *port,
struct tty_struct *tty, unsigned int status)
{
- struct tty_port *port = &usb_port->port;
-
- dev_dbg(&usb_port->dev, "%s - status %d\n", __func__, status);
+ dev_dbg(&port->dev, "%s - status %d\n", __func__, status);
if (tty) {
struct tty_ldisc *ld = tty_ldisc_ref(tty);
@@ -627,7 +625,7 @@ void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
}
if (status)
- wake_up_interruptible(&port->open_wait);
+ wake_up_interruptible(&port->port.open_wait);
else if (tty && !C_CLOCAL(tty))
tty_hangup(tty);
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 5737add6a2a4..4cca0b836f43 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -710,7 +710,7 @@ static void edge_interrupt_callback(struct urb *urb)
/* grab the txcredits for the ports if available */
position = 2;
portNumber = 0;
- while ((position < length) &&
+ while ((position < length - 1) &&
(portNumber < edge_serial->serial->num_ports)) {
txCredits = data[position] | (data[position+1] << 8);
if (txCredits) {
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index c38e87ac5ea9..0d1a5bb4636e 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -593,7 +593,7 @@ struct ti_i2c_desc {
__u8 Type; // Type of descriptor
__le16 Size; // Size of data only not including header
__u8 CheckSum; // Checksum (8 bit sum of data only)
- __u8 Data[0]; // Data starts here
+ __u8 Data[]; // Data starts here
} __attribute__((packed));
// for 5152 devices only (type 2 record)
@@ -601,7 +601,7 @@ struct ti_i2c_desc {
struct ti_i2c_firmware_rec {
__u8 Ver_Major; // Firmware Major version number
__u8 Ver_Minor; // Firmware Minor version number
- __u8 Data[0]; // Download starts here
+ __u8 Data[]; // Download starts here
} __attribute__((packed));
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0b5dcf973d94..8bfffca3e4ae 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1992,8 +1992,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ { USB_DEVICE_INTERFACE_CLASS(0x1435, 0xd191, 0xff), /* Wistron Neweb D19Q1 */
+ .driver_info = RSVD(1) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1690, 0x7588, 0xff), /* ASKEY WWHC050 */
+ .driver_info = RSVD(1) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2033, 0xff), /* BroadMobi BM806U */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 613f91add03d..ce0401d3137f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index ef23acc9b9ce..73075b9351c5 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -219,7 +219,7 @@ struct ti_write_data_bytes {
u8 bDataCounter;
__be16 wBaseAddrHi;
__be16 wBaseAddrLo;
- u8 bData[0];
+ u8 bData[];
} __packed;
struct ti_read_data_request {
@@ -234,7 +234,7 @@ struct ti_read_data_bytes {
__u8 bCmdCode;
__u8 bModuleId;
__u8 bErrorCode;
- __u8 bData[0];
+ __u8 bData[];
} __packed;
/* Interrupt struct */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index dc7a65b9ec98..27e3bb58c872 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -288,7 +288,7 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
/**
* serial_cleanup - free resources post close/hangup
- * @port: port to free up
+ * @tty: tty to clean up
*
* Do the resource freeing and refcount dropping for the port.
* Avoid freeing the console.
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 3670fda02c34..d592071119ba 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status);
+/*
+ * This driver needs its own workqueue, as we need to control memory allocation.
+ *
+ * In the course of error handling and power management uas_wait_for_pending_cmnds()
+ * needs to flush pending work items. In these contexts we cannot allocate memory
+ * by doing block IO as we would deadlock. For the same reason we cannot wait
+ * for anything allocating memory not heeding these constraints.
+ *
+ * So we have to control all work items that can be on the workqueue we flush.
+ * Hence we cannot share a queue and need our own.
+ */
+static struct workqueue_struct *workqueue;
+
static void uas_do_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
@@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work)
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
else
- schedule_work(&devinfo->work);
+ queue_work(workqueue, &devinfo->work);
}
out:
spin_unlock_irqrestore(&devinfo->lock, flags);
@@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo)
lockdep_assert_held(&devinfo->lock);
cmdinfo->state |= IS_IN_WORK_LIST;
- schedule_work(&devinfo->work);
+ queue_work(workqueue, &devinfo->work);
}
static void uas_zap_pending(struct uas_dev_info *devinfo, int result)
@@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
struct uas_cmd_info *ci = (void *)&cmnd->SCp;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ if (status == -ENODEV) /* too late */
+ return;
+
scmd_printk(KERN_INFO, cmnd,
"%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
prefix, status, cmdinfo->uas_tag,
@@ -1226,7 +1242,31 @@ static struct usb_driver uas_driver = {
.id_table = uas_usb_ids,
};
-module_usb_driver(uas_driver);
+static int __init uas_init(void)
+{
+ int rv;
+
+ workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
+ if (!workqueue)
+ return -ENOMEM;
+
+ rv = usb_register(&uas_driver);
+ if (rv) {
+ destroy_workqueue(workqueue);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit uas_exit(void)
+{
+ usb_deregister(&uas_driver);
+ destroy_workqueue(workqueue);
+}
+
+module_init(uas_init);
+module_exit(uas_exit);
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(USB_STORAGE);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1880f3e13f57..f6c3681fa2e9 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2323,6 +2323,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
USB_SC_DEVICE,USB_PR_DEVICE,NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Cyril Roelandt <tipecaml@gmail.com> */
+UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
"iRiver",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 1b23741036ee..37157ed9a881 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -28,6 +28,13 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
+/* Reported-by: Julian Groß <julian.g@posteo.de> */
+UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
+ "LaCie",
+ "2Big Quadra USB3",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 9a79cd9762f3..94a64729dc27 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -121,12 +121,12 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
.initFunction = init_function, \
}
-static struct us_unusual_dev us_unusual_dev_list[] = {
+static const struct us_unusual_dev us_unusual_dev_list[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
};
-static struct us_unusual_dev for_dynamic_ids =
+static const struct us_unusual_dev for_dynamic_ids =
USUAL_DEV(USB_SC_SCSI, USB_PR_BULK);
#undef UNUSUAL_DEV
@@ -583,7 +583,7 @@ EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks);
/* Get the unusual_devs entries and the string descriptors */
static int get_device_info(struct us_data *us, const struct usb_device_id *id,
- struct us_unusual_dev *unusual_dev)
+ const struct us_unusual_dev *unusual_dev)
{
struct usb_device *dev = us->pusb_dev;
struct usb_interface_descriptor *idesc =
@@ -933,7 +933,7 @@ static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
int usb_stor_probe1(struct us_data **pus,
struct usb_interface *intf,
const struct usb_device_id *id,
- struct us_unusual_dev *unusual_dev,
+ const struct us_unusual_dev *unusual_dev,
struct scsi_host_template *sht)
{
struct Scsi_Host *host;
@@ -1092,7 +1092,7 @@ static struct scsi_host_template usb_stor_host_template;
static int storage_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct us_unusual_dev *unusual_dev;
+ const struct us_unusual_dev *unusual_dev;
struct us_data *us;
int result;
int size;
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 85052cd66839..5850d624cac7 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -93,7 +93,8 @@ struct us_data {
struct mutex dev_mutex; /* protect pusb_dev */
struct usb_device *pusb_dev; /* this usb_device */
struct usb_interface *pusb_intf; /* this interface */
- struct us_unusual_dev *unusual_dev; /* device-filter entry */
+ const struct us_unusual_dev *unusual_dev;
+ /* device-filter entry */
unsigned long fflags; /* fixed flags from filter */
unsigned long dflags; /* dynamic atomic bitflags */
unsigned int send_bulk_pipe; /* cached pipe values */
@@ -185,7 +186,7 @@ extern int usb_stor_post_reset(struct usb_interface *iface);
extern int usb_stor_probe1(struct us_data **pus,
struct usb_interface *intf,
const struct usb_device_id *id,
- struct us_unusual_dev *unusual_dev,
+ const struct us_unusual_dev *unusual_dev,
struct scsi_host_template *sht);
extern int usb_stor_probe2(struct us_data *us);
extern void usb_stor_disconnect(struct usb_interface *intf);
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index cfd12e523678..529512827d8f 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -40,7 +40,7 @@
.driver_info = (flags) \
}
-struct usb_device_id usb_storage_usb_ids[] = {
+const struct usb_device_id usb_storage_usb_ids[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
};
@@ -68,7 +68,7 @@ struct ignore_entry {
.bcdmax = bcdDeviceMax, \
}
-static struct ignore_entry ignore_ids[] = {
+static const struct ignore_entry ignore_ids[] = {
# include "unusual_alauda.h"
# include "unusual_cypress.h"
# include "unusual_datafab.h"
@@ -92,7 +92,7 @@ int usb_usual_ignore_device(struct usb_interface *intf)
{
struct usb_device *udev;
unsigned vid, pid, bcd;
- struct ignore_entry *p;
+ const struct ignore_entry *p;
udev = interface_to_usbdev(intf);
vid = le16_to_cpu(udev->descriptor.idVendor);
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 2e45eb479386..e8ddb81cb6df 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -30,17 +30,10 @@ static int typec_altmode_set_state(struct typec_altmode *adev,
{
bool is_port = is_typec_port(adev->dev.parent);
struct altmode *port_altmode;
- int ret;
port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
- ret = typec_altmode_set_mux(port_altmode, conf, data);
- if (ret)
- return ret;
-
- blocking_notifier_call_chain(&port_altmode->nh, conf, NULL);
-
- return 0;
+ return typec_altmode_set_mux(port_altmode, conf, data);
}
/* -------------------------------------------------------------------------- */
@@ -82,9 +75,6 @@ int typec_altmode_notify(struct typec_altmode *adev,
if (ret)
return ret;
- blocking_notifier_call_chain(is_port ? &altmode->nh : &partner->nh,
- conf, data);
-
if (partner->adev.ops && partner->adev.ops->notify)
return partner->adev.ops->notify(&partner->adev, conf, data);
@@ -208,7 +198,10 @@ EXPORT_SYMBOL_GPL(typec_altmode_vdm);
const struct typec_altmode *
typec_altmode_get_partner(struct typec_altmode *adev)
{
- return adev ? &to_altmode(adev)->partner->adev : NULL;
+ if (!adev || !to_altmode(adev)->partner)
+ return NULL;
+
+ return &to_altmode(adev)->partner->adev;
}
EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
diff --git a/drivers/usb/typec/bus.h b/drivers/usb/typec/bus.h
index 0c9661c96473..8ba8112d2740 100644
--- a/drivers/usb/typec/bus.h
+++ b/drivers/usb/typec/bus.h
@@ -22,8 +22,6 @@ struct altmode {
struct altmode *partner;
struct altmode *plug[2];
-
- struct blocking_notifier_head nh;
};
#define to_altmode(d) container_of(d, struct altmode, adev)
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 7c44e930602f..8d894bdff77d 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -206,69 +206,6 @@ static void typec_altmode_put_partner(struct altmode *altmode)
put_device(&adev->dev);
}
-static void *typec_port_match(struct device_connection *con, int ep, void *data)
-{
- struct device *dev;
-
- /*
- * FIXME: Check does the fwnode supports the requested SVID. If it does
- * we need to return ERR_PTR(-PROBE_DEFER) when there is no device.
- */
- if (con->fwnode)
- return class_find_device_by_fwnode(typec_class, con->fwnode);
-
- dev = class_find_device_by_name(typec_class, con->endpoint[ep]);
-
- return dev ? dev : ERR_PTR(-EPROBE_DEFER);
-}
-
-struct typec_altmode *
-typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode,
- struct notifier_block *nb)
-{
- struct typec_device_id id = { svid, mode, };
- struct device *altmode_dev;
- struct device *port_dev;
- struct altmode *altmode;
- int ret;
-
- /* Find the port linked to the caller */
- port_dev = device_connection_find_match(dev, NULL, NULL,
- typec_port_match);
- if (IS_ERR_OR_NULL(port_dev))
- return port_dev ? ERR_CAST(port_dev) : ERR_PTR(-ENODEV);
-
- /* Find the altmode with matching svid */
- altmode_dev = device_find_child(port_dev, &id, altmode_match);
-
- put_device(port_dev);
-
- if (!altmode_dev)
- return ERR_PTR(-ENODEV);
-
- altmode = to_altmode(to_typec_altmode(altmode_dev));
-
- /* Register notifier */
- ret = blocking_notifier_chain_register(&altmode->nh, nb);
- if (ret) {
- put_device(altmode_dev);
- return ERR_PTR(ret);
- }
-
- return &altmode->adev;
-}
-EXPORT_SYMBOL_GPL(typec_altmode_register_notifier);
-
-void typec_altmode_unregister_notifier(struct typec_altmode *adev,
- struct notifier_block *nb)
-{
- struct altmode *altmode = to_altmode(adev);
-
- blocking_notifier_chain_unregister(&altmode->nh, nb);
- put_device(&adev->dev);
-}
-EXPORT_SYMBOL_GPL(typec_altmode_unregister_notifier);
-
/**
* typec_altmode_update_active - Report Enter/Exit mode
* @adev: Handle to the alternate mode
@@ -432,7 +369,28 @@ static struct attribute *typec_altmode_attrs[] = {
&dev_attr_vdo.attr,
NULL
};
-ATTRIBUTE_GROUPS(typec_altmode);
+
+static umode_t typec_altmode_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct typec_altmode *adev = to_typec_altmode(kobj_to_dev(kobj));
+
+ if (attr == &dev_attr_active.attr)
+ if (!adev->ops || !adev->ops->activate)
+ return 0444;
+
+ return attr->mode;
+}
+
+static struct attribute_group typec_altmode_group = {
+ .is_visible = typec_altmode_attr_is_visible,
+ .attrs = typec_altmode_attrs,
+};
+
+static const struct attribute_group *typec_altmode_groups[] = {
+ &typec_altmode_group,
+ NULL
+};
static int altmode_id_get(struct device *dev)
{
@@ -517,9 +475,7 @@ typec_register_altmode(struct device *parent,
dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
/* Link partners and plugs with the ports */
- if (is_port)
- BLOCKING_INIT_NOTIFIER_HEAD(&alt->nh);
- else
+ if (!is_port)
typec_altmode_set_partner(alt);
/* The partners are bind to drivers */
@@ -859,7 +815,7 @@ struct typec_cable *typec_cable_get(struct typec_port *port)
EXPORT_SYMBOL_GPL(typec_cable_get);
/**
- * typec_cable_get - Decrement the reference count on USB Type-C cable
+ * typec_cable_put - Decrement the reference count on USB Type-C cable
* @cable: The USB Type-C cable
*/
void typec_cable_put(struct typec_cable *cable)
@@ -1091,11 +1047,6 @@ static ssize_t power_role_store(struct device *dev,
struct typec_port *port = to_typec_port(dev);
int ret;
- if (!port->cap->pd_revision) {
- dev_dbg(dev, "USB Power Delivery not supported\n");
- return -EOPNOTSUPP;
- }
-
if (!port->ops || !port->ops->pr_set) {
dev_dbg(dev, "power role swapping not supported\n");
return -EOPNOTSUPP;
@@ -1293,6 +1244,25 @@ static ssize_t usb_power_delivery_revision_show(struct device *dev,
}
static DEVICE_ATTR_RO(usb_power_delivery_revision);
+static ssize_t orientation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_port *p = to_typec_port(dev);
+ enum typec_orientation orientation = typec_get_orientation(p);
+
+ switch (orientation) {
+ case TYPEC_ORIENTATION_NORMAL:
+ return sprintf(buf, "%s\n", "normal");
+ case TYPEC_ORIENTATION_REVERSE:
+ return sprintf(buf, "%s\n", "reverse");
+ case TYPEC_ORIENTATION_NONE:
+ default:
+ return sprintf(buf, "%s\n", "unknown");
+ }
+}
+static DEVICE_ATTR_RO(orientation);
+
static struct attribute *typec_attrs[] = {
&dev_attr_data_role.attr,
&dev_attr_power_operation_mode.attr,
@@ -1303,9 +1273,54 @@ static struct attribute *typec_attrs[] = {
&dev_attr_usb_typec_revision.attr,
&dev_attr_vconn_source.attr,
&dev_attr_port_type.attr,
+ &dev_attr_orientation.attr,
NULL,
};
-ATTRIBUTE_GROUPS(typec);
+
+static umode_t typec_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct typec_port *port = to_typec_port(kobj_to_dev(kobj));
+
+ if (attr == &dev_attr_data_role.attr) {
+ if (port->cap->data != TYPEC_PORT_DRD ||
+ !port->ops || !port->ops->dr_set)
+ return 0444;
+ } else if (attr == &dev_attr_power_role.attr) {
+ if (port->cap->type != TYPEC_PORT_DRP ||
+ !port->ops || !port->ops->pr_set)
+ return 0444;
+ } else if (attr == &dev_attr_vconn_source.attr) {
+ if (!port->cap->pd_revision ||
+ !port->ops || !port->ops->vconn_set)
+ return 0444;
+ } else if (attr == &dev_attr_preferred_role.attr) {
+ if (port->cap->type != TYPEC_PORT_DRP ||
+ !port->ops || !port->ops->try_role)
+ return 0444;
+ } else if (attr == &dev_attr_port_type.attr) {
+ if (!port->ops || !port->ops->port_type_set)
+ return 0;
+ if (port->cap->type != TYPEC_PORT_DRP)
+ return 0444;
+ } else if (attr == &dev_attr_orientation.attr) {
+ if (port->cap->orientation_aware)
+ return 0444;
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static struct attribute_group typec_group = {
+ .is_visible = typec_attr_is_visible,
+ .attrs = typec_attrs,
+};
+
+static const struct attribute_group *typec_groups[] = {
+ &typec_group,
+ NULL
+};
static int typec_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -1495,13 +1510,13 @@ int typec_set_orientation(struct typec_port *port,
{
int ret;
- if (port->sw) {
- ret = port->sw->set(port->sw, orientation);
- if (ret)
- return ret;
- }
+ ret = typec_switch_set(port->sw, orientation);
+ if (ret)
+ return ret;
port->orientation = orientation;
+ sysfs_notify(&port->dev.kobj, NULL, "orientation");
+ kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -1533,7 +1548,7 @@ int typec_set_mode(struct typec_port *port, int mode)
state.mode = mode;
- return port->mux ? port->mux->set(port->mux, &state) : 0;
+ return typec_mux_set(port->mux, &state);
}
EXPORT_SYMBOL_GPL(typec_set_mode);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 5baf0f416c73..52ad277e4565 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -17,11 +17,6 @@
#include "bus.h"
-static int name_match(struct device *dev, const void *name)
-{
- return !strcmp((const char *)name, dev_name(dev));
-}
-
static bool dev_name_ends_with(struct device *dev, const char *suffix)
{
const char *name = dev_name(dev);
@@ -44,41 +39,36 @@ static void *typec_switch_match(struct device_connection *con, int ep,
{
struct device *dev;
- if (con->fwnode) {
- if (con->id && !fwnode_property_present(con->fwnode, con->id))
- return NULL;
+ if (con->id && !fwnode_property_present(con->fwnode, con->id))
+ return NULL;
- dev = class_find_device(&typec_mux_class, NULL, con->fwnode,
- switch_fwnode_match);
- } else {
- dev = class_find_device(&typec_mux_class, NULL,
- con->endpoint[ep], name_match);
- }
+ dev = class_find_device(&typec_mux_class, NULL, con->fwnode,
+ switch_fwnode_match);
return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
- * typec_switch_get - Find USB Type-C orientation switch
- * @dev: The caller device
+ * fwnode_typec_switch_get - Find USB Type-C orientation switch
+ * @fwnode: The caller device node
*
* Finds a switch linked with @dev. Returns a reference to the switch on
* success, NULL if no matching connection was found, or
* ERR_PTR(-EPROBE_DEFER) when a connection was found but the switch
* has not been enumerated yet.
*/
-struct typec_switch *typec_switch_get(struct device *dev)
+struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode)
{
struct typec_switch *sw;
- sw = device_connection_find_match(dev, "orientation-switch", NULL,
+ sw = fwnode_connection_find_match(fwnode, "orientation-switch", NULL,
typec_switch_match);
if (!IS_ERR_OR_NULL(sw))
WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
return sw;
}
-EXPORT_SYMBOL_GPL(typec_switch_get);
+EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
/**
* typec_put_switch - Release USB Type-C orientation switch
@@ -137,7 +127,8 @@ typec_switch_register(struct device *parent,
sw->dev.class = &typec_mux_class;
sw->dev.type = &typec_switch_dev_type;
sw->dev.driver_data = desc->drvdata;
- dev_set_name(&sw->dev, "%s-switch", dev_name(parent));
+ dev_set_name(&sw->dev, "%s-switch",
+ desc->name ? desc->name : dev_name(parent));
ret = device_add(&sw->dev);
if (ret) {
@@ -150,6 +141,16 @@ typec_switch_register(struct device *parent,
}
EXPORT_SYMBOL_GPL(typec_switch_register);
+int typec_switch_set(struct typec_switch *sw,
+ enum typec_orientation orientation)
+{
+ if (IS_ERR_OR_NULL(sw))
+ return 0;
+
+ return sw->set(sw, orientation);
+}
+EXPORT_SYMBOL_GPL(typec_switch_set);
+
/**
* typec_switch_unregister - Unregister USB Type-C orientation switch
* @sw: USB Type-C orientation switch
@@ -191,13 +192,6 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data)
u16 *val;
int i;
- if (!con->fwnode) {
- dev = class_find_device(&typec_mux_class, NULL,
- con->endpoint[ep], name_match);
-
- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
- }
-
/*
* Check has the identifier already been "consumed". If it
* has, no need to do any extra connection identification.
@@ -247,8 +241,8 @@ find_mux:
}
/**
- * typec_mux_get - Find USB Type-C Multiplexer
- * @dev: The caller device
+ * fwnode_typec_mux_get - Find USB Type-C Multiplexer
+ * @fwnode: The caller device node
* @desc: Alt Mode description
*
* Finds a mux linked to the caller. This function is primarily meant for the
@@ -256,19 +250,19 @@ find_mux:
* matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
* was found but the mux has not been enumerated yet.
*/
-struct typec_mux *typec_mux_get(struct device *dev,
- const struct typec_altmode_desc *desc)
+struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
+ const struct typec_altmode_desc *desc)
{
struct typec_mux *mux;
- mux = device_connection_find_match(dev, "mode-switch", (void *)desc,
+ mux = fwnode_connection_find_match(fwnode, "mode-switch", (void *)desc,
typec_mux_match);
if (!IS_ERR_OR_NULL(mux))
WARN_ON(!try_module_get(mux->dev.parent->driver->owner));
return mux;
}
-EXPORT_SYMBOL_GPL(typec_mux_get);
+EXPORT_SYMBOL_GPL(fwnode_typec_mux_get);
/**
* typec_mux_put - Release handle to a Multiplexer
@@ -285,6 +279,15 @@ void typec_mux_put(struct typec_mux *mux)
}
EXPORT_SYMBOL_GPL(typec_mux_put);
+int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+{
+ if (IS_ERR_OR_NULL(mux))
+ return 0;
+
+ return mux->set(mux, state);
+}
+EXPORT_SYMBOL_GPL(typec_mux_set);
+
static void typec_mux_release(struct device *dev)
{
kfree(to_typec_mux(dev));
@@ -326,7 +329,8 @@ typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
mux->dev.class = &typec_mux_class;
mux->dev.type = &typec_mux_dev_type;
mux->dev.driver_data = desc->drvdata;
- dev_set_name(&mux->dev, "%s-mux", dev_name(parent));
+ dev_set_name(&mux->dev, "%s-mux",
+ desc->name ? desc->name : dev_name(parent));
ret = device_add(&mux->dev);
if (ret) {
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 01ed0d5e10e8..77eb97b2aa86 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -9,4 +9,13 @@ config TYPEC_MUX_PI3USB30532
Say Y or M if your system has a Pericom PI3USB30532 Type-C cross
switch / mux chip found on some devices with a Type-C port.
+config TYPEC_MUX_INTEL_PMC
+ tristate "Intel PMC mux control"
+ depends on INTEL_PMC_IPC
+ select USB_ROLE_SWITCH
+ help
+ Driver for USB muxes controlled by Intel PMC FW. Intel PMC FW can
+ control the USB role switch and also the multiplexer/demultiplexer
+ switches used with USB Type-C Alternate Modes.
+
endmenu
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 1332e469b8a0..280a6f553115 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
+obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
new file mode 100644
index 000000000000..c22e5c4bbf1a
--- /dev/null
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Intel PMC USB mux control
+ *
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/usb/role.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_tbt.h>
+
+#include <asm/intel_pmc_ipc.h>
+
+#define PMC_USBC_CMD 0xa7
+
+/* "Usage" OOB Message field values */
+enum {
+ PMC_USB_CONNECT,
+ PMC_USB_DISCONNECT,
+ PMC_USB_SAFE_MODE,
+ PMC_USB_ALT_MODE,
+ PMC_USB_DP_HPD,
+};
+
+#define PMC_USB_MSG_USB2_PORT_SHIFT 0
+#define PMC_USB_MSG_USB3_PORT_SHIFT 4
+#define PMC_USB_MSG_UFP_SHIFT 4
+#define PMC_USB_MSG_ORI_HSL_SHIFT 5
+#define PMC_USB_MSG_ORI_AUX_SHIFT 6
+
+/* Alt Mode Request */
+struct altmode_req {
+ u8 usage;
+ u8 mode_type;
+ u8 mode_id;
+ u8 reserved;
+ u32 mode_data;
+} __packed;
+
+#define PMC_USB_MODE_TYPE_SHIFT 4
+
+enum {
+ PMC_USB_MODE_TYPE_USB,
+ PMC_USB_MODE_TYPE_DP,
+ PMC_USB_MODE_TYPE_TBT,
+};
+
+/* Common Mode Data bits */
+#define PMC_USB_ALTMODE_ACTIVE_CABLE BIT(2)
+
+#define PMC_USB_ALTMODE_ORI_SHIFT 1
+#define PMC_USB_ALTMODE_UFP_SHIFT 3
+#define PMC_USB_ALTMODE_ORI_AUX_SHIFT 4
+#define PMC_USB_ALTMODE_ORI_HSL_SHIFT 5
+
+/* DP specific Mode Data bits */
+#define PMC_USB_ALTMODE_DP_MODE_SHIFT 8
+
+/* TBT specific Mode Data bits */
+#define PMC_USB_ALTMODE_HPD_HIGH BIT(14)
+#define PMC_USB_ALTMODE_TBT_TYPE BIT(17)
+#define PMC_USB_ALTMODE_CABLE_TYPE BIT(18)
+#define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20)
+#define PMC_USB_ALTMODE_FORCE_LSR BIT(23)
+#define PMC_USB_ALTMODE_CABLE_SPD(_s_) (((_s_) & GENMASK(2, 0)) << 25)
+#define PMC_USB_ALTMODE_CABLE_USB31 1
+#define PMC_USB_ALTMODE_CABLE_10GPS 2
+#define PMC_USB_ALTMODE_CABLE_20GPS 3
+#define PMC_USB_ALTMODE_TBT_GEN(_g_) (((_g_) & GENMASK(1, 0)) << 28)
+
+/* Display HPD Request bits */
+#define PMC_USB_DP_HPD_LVL BIT(4)
+#define PMC_USB_DP_HPD_IRQ BIT(5)
+
+struct pmc_usb;
+
+struct pmc_usb_port {
+ int num;
+ struct pmc_usb *pmc;
+ struct typec_mux *typec_mux;
+ struct typec_switch *typec_sw;
+ struct usb_role_switch *usb_sw;
+
+ enum typec_orientation orientation;
+ enum usb_role role;
+
+ u8 usb2_port;
+ u8 usb3_port;
+};
+
+struct pmc_usb {
+ u8 num_ports;
+ struct device *dev;
+ struct pmc_usb_port *port;
+};
+
+static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
+{
+ u8 response[4];
+
+ /*
+ * Error bit will always be 0 with the USBC command.
+ * Status can be checked from the response message.
+ */
+ intel_pmc_ipc_command(PMC_USBC_CMD, 0, msg, len,
+ (void *)response, 1);
+
+ if (response[2]) {
+ if (response[2] & BIT(1))
+ return -EIO;
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state)
+{
+ struct typec_displayport_data *data = state->data;
+ u8 msg[2] = { };
+
+ msg[0] = PMC_USB_DP_HPD;
+ msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+
+ msg[1] = PMC_USB_DP_HPD_IRQ;
+
+ if (data->status & DP_STATUS_HPD_STATE)
+ msg[1] |= PMC_USB_DP_HPD_LVL;
+
+ return pmc_usb_command(port, msg, sizeof(msg));
+}
+
+static int
+pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
+{
+ struct typec_displayport_data *data = state->data;
+ struct altmode_req req = { };
+
+ if (data->status & DP_STATUS_IRQ_HPD)
+ return pmc_usb_mux_dp_hpd(port, state);
+
+ req.usage = PMC_USB_ALT_MODE;
+ req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ req.mode_type = PMC_USB_MODE_TYPE_DP << PMC_USB_MODE_TYPE_SHIFT;
+
+ req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
+ req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
+ req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
+ req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
+
+ req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
+ PMC_USB_ALTMODE_DP_MODE_SHIFT;
+
+ if (data->status & DP_STATUS_HPD_STATE)
+ req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
+
+ return pmc_usb_command(port, (void *)&req, sizeof(req));
+}
+
+static int
+pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
+{
+ struct typec_thunderbolt_data *data = state->data;
+ u8 cable_speed = TBT_CABLE_SPEED(data->cable_mode);
+ struct altmode_req req = { };
+
+ req.usage = PMC_USB_ALT_MODE;
+ req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT;
+
+ req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
+ req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
+ req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
+ req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
+
+ if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3)
+ req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE;
+
+ if (data->cable_mode & TBT_CABLE_OPTICAL)
+ req.mode_data |= PMC_USB_ALTMODE_CABLE_TYPE;
+
+ if (data->cable_mode & TBT_CABLE_LINK_TRAINING)
+ req.mode_data |= PMC_USB_ALTMODE_ACTIVE_LINK;
+
+ if (data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE)
+ req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
+
+ req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed);
+
+ return pmc_usb_command(port, (void *)&req, sizeof(req));
+}
+
+static int pmc_usb_mux_safe_state(struct pmc_usb_port *port)
+{
+ u8 msg;
+
+ msg = PMC_USB_SAFE_MODE;
+ msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+
+ return pmc_usb_command(port, &msg, sizeof(msg));
+}
+
+static int pmc_usb_connect(struct pmc_usb_port *port)
+{
+ u8 msg[2];
+
+ msg[0] = PMC_USB_CONNECT;
+ msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+
+ msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
+ msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_HSL_SHIFT;
+ msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_AUX_SHIFT;
+
+ return pmc_usb_command(port, msg, sizeof(msg));
+}
+
+static int pmc_usb_disconnect(struct pmc_usb_port *port)
+{
+ u8 msg[2];
+
+ msg[0] = PMC_USB_DISCONNECT;
+ msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+
+ msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
+
+ return pmc_usb_command(port, msg, sizeof(msg));
+}
+
+static int
+pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+{
+ struct pmc_usb_port *port = typec_mux_get_drvdata(mux);
+
+ if (!state->alt)
+ return 0;
+
+ if (state->mode == TYPEC_STATE_SAFE)
+ return pmc_usb_mux_safe_state(port);
+
+ switch (state->alt->svid) {
+ case USB_TYPEC_TBT_SID:
+ return pmc_usb_mux_tbt(port, state);
+ case USB_TYPEC_DP_SID:
+ return pmc_usb_mux_dp(port, state);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int pmc_usb_set_orientation(struct typec_switch *sw,
+ enum typec_orientation orientation)
+{
+ struct pmc_usb_port *port = typec_switch_get_drvdata(sw);
+
+ if (port->orientation == orientation)
+ return 0;
+
+ port->orientation = orientation;
+
+ if (port->role) {
+ if (orientation == TYPEC_ORIENTATION_NONE)
+ return pmc_usb_disconnect(port);
+ else
+ return pmc_usb_connect(port);
+ }
+
+ return 0;
+}
+
+static int pmc_usb_set_role(struct usb_role_switch *sw, enum usb_role role)
+{
+ struct pmc_usb_port *port = usb_role_switch_get_drvdata(sw);
+
+ if (port->role == role)
+ return 0;
+
+ port->role = role;
+
+ if (port->orientation) {
+ if (role == USB_ROLE_NONE)
+ return pmc_usb_disconnect(port);
+ else
+ return pmc_usb_connect(port);
+ }
+
+ return 0;
+}
+
+static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
+ struct fwnode_handle *fwnode)
+{
+ struct pmc_usb_port *port = &pmc->port[index];
+ struct usb_role_switch_desc desc = { };
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ int ret;
+
+ ret = fwnode_property_read_u8(fwnode, "usb2-port-number", &port->usb2_port);
+ if (ret)
+ return ret;
+
+ ret = fwnode_property_read_u8(fwnode, "usb3-port-number", &port->usb3_port);
+ if (ret)
+ return ret;
+
+ port->num = index;
+ port->pmc = pmc;
+
+ sw_desc.fwnode = fwnode;
+ sw_desc.drvdata = port;
+ sw_desc.name = fwnode_get_name(fwnode);
+ sw_desc.set = pmc_usb_set_orientation;
+
+ port->typec_sw = typec_switch_register(pmc->dev, &sw_desc);
+ if (IS_ERR(port->typec_sw))
+ return PTR_ERR(port->typec_sw);
+
+ mux_desc.fwnode = fwnode;
+ mux_desc.drvdata = port;
+ mux_desc.name = fwnode_get_name(fwnode);
+ mux_desc.set = pmc_usb_mux_set;
+
+ port->typec_mux = typec_mux_register(pmc->dev, &mux_desc);
+ if (IS_ERR(port->typec_mux)) {
+ ret = PTR_ERR(port->typec_mux);
+ goto err_unregister_switch;
+ }
+
+ desc.fwnode = fwnode;
+ desc.driver_data = port;
+ desc.name = fwnode_get_name(fwnode);
+ desc.set = pmc_usb_set_role;
+
+ port->usb_sw = usb_role_switch_register(pmc->dev, &desc);
+ if (IS_ERR(port->usb_sw)) {
+ ret = PTR_ERR(port->usb_sw);
+ goto err_unregister_mux;
+ }
+
+ return 0;
+
+err_unregister_mux:
+ typec_mux_unregister(port->typec_mux);
+
+err_unregister_switch:
+ typec_switch_unregister(port->typec_sw);
+
+ return ret;
+}
+
+static int pmc_usb_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct pmc_usb *pmc;
+ int i = 0;
+ int ret;
+
+ pmc = devm_kzalloc(&pdev->dev, sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return -ENOMEM;
+
+ device_for_each_child_node(&pdev->dev, fwnode)
+ pmc->num_ports++;
+
+ pmc->port = devm_kcalloc(&pdev->dev, pmc->num_ports,
+ sizeof(struct pmc_usb_port), GFP_KERNEL);
+ if (!pmc->port)
+ return -ENOMEM;
+
+ pmc->dev = &pdev->dev;
+
+ /*
+ * For every physical USB connector (USB2 and USB3 combo) there is a
+ * child ACPI device node under the PMC mux ACPI device object.
+ */
+ for (i = 0; i < pmc->num_ports; i++) {
+ fwnode = device_get_next_child_node(pmc->dev, fwnode);
+ if (!fwnode)
+ break;
+
+ ret = pmc_usb_register_port(pmc, i, fwnode);
+ if (ret)
+ goto err_remove_ports;
+ }
+
+ platform_set_drvdata(pdev, pmc);
+
+ return 0;
+
+err_remove_ports:
+ for (i = 0; i < pmc->num_ports; i++) {
+ typec_switch_unregister(pmc->port[i].typec_sw);
+ typec_mux_unregister(pmc->port[i].typec_mux);
+ }
+
+ return ret;
+}
+
+static int pmc_usb_remove(struct platform_device *pdev)
+{
+ struct pmc_usb *pmc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pmc->num_ports; i++) {
+ typec_switch_unregister(pmc->port[i].typec_sw);
+ typec_mux_unregister(pmc->port[i].typec_mux);
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id pmc_usb_acpi_ids[] = {
+ { "INTC105C", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pmc_usb_acpi_ids);
+
+static struct platform_driver pmc_usb_driver = {
+ .driver = {
+ .name = "intel_pmc_usb",
+ .acpi_match_table = ACPI_PTR(pmc_usb_acpi_ids),
+ },
+ .probe = pmc_usb_probe,
+ .remove = pmc_usb_remove,
+};
+
+module_platform_driver(pmc_usb_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel PMC USB mux control");
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 46457c133d2b..7afe275b17d0 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -114,8 +114,8 @@ pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
static int pi3usb30532_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct typec_switch_desc sw_desc;
- struct typec_mux_desc mux_desc;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
struct pi3usb30532 *pi;
int ret;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index f3087ef8265c..82b19ebd7838 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -373,6 +373,14 @@ struct pd_rx_event {
((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
(port)->port_type == TYPEC_PORT_DRP)
+#define tcpm_data_role_for_source(port) \
+ ((port)->typec_caps.data == TYPEC_PORT_UFP ? \
+ TYPEC_DEVICE : TYPEC_HOST)
+
+#define tcpm_data_role_for_sink(port) \
+ ((port)->typec_caps.data == TYPEC_PORT_DFP ? \
+ TYPEC_HOST : TYPEC_DEVICE)
+
static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
{
if (port->port_type == TYPEC_PORT_DRP) {
@@ -788,10 +796,30 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached,
else
orientation = TYPEC_ORIENTATION_REVERSE;
- if (data == TYPEC_HOST)
- usb_role = USB_ROLE_HOST;
- else
- usb_role = USB_ROLE_DEVICE;
+ if (port->typec_caps.data == TYPEC_PORT_DRD) {
+ if (data == TYPEC_HOST)
+ usb_role = USB_ROLE_HOST;
+ else
+ usb_role = USB_ROLE_DEVICE;
+ } else if (port->typec_caps.data == TYPEC_PORT_DFP) {
+ if (data == TYPEC_HOST) {
+ if (role == TYPEC_SOURCE)
+ usb_role = USB_ROLE_HOST;
+ else
+ usb_role = USB_ROLE_NONE;
+ } else {
+ return -ENOTSUPP;
+ }
+ } else {
+ if (data == TYPEC_DEVICE) {
+ if (role == TYPEC_SINK)
+ usb_role = USB_ROLE_DEVICE;
+ else
+ usb_role = USB_ROLE_NONE;
+ } else {
+ return -ENOTSUPP;
+ }
+ }
ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
if (ret < 0)
@@ -1817,7 +1845,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, SOFT_RESET, 0);
break;
case PD_CTRL_DR_SWAP:
- if (port->port_type != TYPEC_PORT_DRP) {
+ if (port->typec_caps.data != TYPEC_PORT_DRD) {
tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
break;
}
@@ -2618,7 +2646,8 @@ static int tcpm_src_attach(struct tcpm_port *port)
if (ret < 0)
return ret;
- ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
+ ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
+ tcpm_data_role_for_source(port));
if (ret < 0)
return ret;
@@ -2740,7 +2769,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
if (ret < 0)
return ret;
- ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
+ ret = tcpm_set_roles(port, true, TYPEC_SINK,
+ tcpm_data_role_for_sink(port));
if (ret < 0)
return ret;
@@ -2766,7 +2796,8 @@ static int tcpm_acc_attach(struct tcpm_port *port)
if (port->attached)
return 0;
- ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
+ ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
+ tcpm_data_role_for_source(port));
if (ret < 0)
return ret;
@@ -3293,7 +3324,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_vconn(port, true);
tcpm_set_vbus(port, false);
tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
- TYPEC_HOST);
+ tcpm_data_role_for_source(port));
tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
break;
case SRC_HARD_RESET_VBUS_ON:
@@ -3308,7 +3339,7 @@ static void run_state_machine(struct tcpm_port *port)
if (port->pd_capable)
tcpm_set_charge(port, false);
tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
- TYPEC_DEVICE);
+ tcpm_data_role_for_sink(port));
/*
* VBUS may or may not toggle, depending on the adapter.
* If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
@@ -3649,8 +3680,12 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
case SRC_SEND_CAPABILITIES:
case SRC_READY:
if (tcpm_port_is_disconnected(port) ||
- !tcpm_port_is_source(port))
- tcpm_set_state(port, SRC_UNATTACHED, 0);
+ !tcpm_port_is_source(port)) {
+ if (port->port_type == TYPEC_PORT_SRC)
+ tcpm_set_state(port, SRC_UNATTACHED, 0);
+ else
+ tcpm_set_state(port, SNK_UNATTACHED, 0);
+ }
break;
case SNK_UNATTACHED:
if (tcpm_port_is_sink(port))
@@ -3759,6 +3794,14 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
*/
break;
+ case PORT_RESET:
+ case PORT_RESET_WAIT_OFF:
+ /*
+ * State set back to default mode once the timer completes.
+ * Ignore CC changes here.
+ */
+ break;
+
default:
if (tcpm_port_is_disconnected(port))
tcpm_set_state(port, unattached_state(port), 0);
@@ -3820,6 +3863,15 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
case SRC_TRY_DEBOUNCE:
/* Do nothing, waiting for sink detection */
break;
+
+ case PORT_RESET:
+ case PORT_RESET_WAIT_OFF:
+ /*
+ * State set back to default mode once the timer completes.
+ * Ignore vbus changes here.
+ */
+ break;
+
default:
break;
}
@@ -3873,10 +3925,19 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
case PORT_RESET_WAIT_OFF:
tcpm_set_state(port, tcpm_default_state(port), 0);
break;
+
case SRC_TRY_WAIT:
case SRC_TRY_DEBOUNCE:
/* Do nothing, waiting for sink detection */
break;
+
+ case PORT_RESET:
+ /*
+ * State set back to default mode once the timer completes.
+ * Ignore vbus changes here.
+ */
+ break;
+
default:
if (port->pwr_role == TYPEC_SINK &&
port->attached)
@@ -3969,7 +4030,7 @@ static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
mutex_lock(&port->swap_lock);
mutex_lock(&port->lock);
- if (port->port_type != TYPEC_PORT_DRP) {
+ if (port->typec_caps.data != TYPEC_PORT_DRD) {
ret = -EINVAL;
goto port_unlock;
}
@@ -4711,6 +4772,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
port->typec_caps.driver_data = port;
port->typec_caps.ops = &tcpm_ops;
+ port->typec_caps.orientation_aware = 1;
port->partner_desc.identity = &port->partner_ident;
port->port_type = port->typec_caps.type;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index d5a6aac86327..ddf2ad3752de 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -270,9 +270,16 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
switch (desc->svid) {
case USB_TYPEC_DP_SID:
- case USB_TYPEC_NVIDIA_VLINK_SID:
alt = ucsi_register_displayport(con, override, i, desc);
break;
+ case USB_TYPEC_NVIDIA_VLINK_SID:
+ if (desc->vdo == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
+ alt = typec_port_register_altmode(con->port,
+ desc);
+ else
+ alt = ucsi_register_displayport(con, override,
+ i, desc);
+ break;
default:
alt = typec_port_register_altmode(con->port, desc);
break;
@@ -400,7 +407,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
struct typec_altmode_desc desc;
struct ucsi_altmode alt[2];
u64 command;
- int num = 1;
+ int num;
int ret;
int len;
int j;
@@ -475,7 +482,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
while (adev[i]) {
if (recipient == UCSI_RECIPIENT_SOP &&
(adev[i]->svid == USB_TYPEC_DP_SID ||
- adev[i]->svid == USB_TYPEC_NVIDIA_VLINK_SID)) {
+ (adev[i]->svid == USB_TYPEC_NVIDIA_VLINK_SID &&
+ adev[i]->vdo != USB_TYPEC_NVIDIA_VLINK_DBG_VDO))) {
pdev = typec_altmode_get_partner(adev[i]);
ucsi_displayport_remove_partner((void *)pdev);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index e434b9c9a9eb..8e831108f481 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -119,12 +119,14 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_SET_PDR_ACCEPT_ROLE_SWAPS BIT(25)
/* GET_ALTERNATE_MODES command bits */
+#define UCSI_ALTMODE_RECIPIENT(_r_) (((_r_) >> 16) & 0x7)
#define UCSI_GET_ALTMODE_RECIPIENT(_r_) ((u64)(_r_) << 16)
#define UCSI_RECIPIENT_CON 0
#define UCSI_RECIPIENT_SOP 1
#define UCSI_RECIPIENT_SOP_P 2
#define UCSI_RECIPIENT_SOP_PP 3
#define UCSI_GET_ALTMODE_CONNECTOR_NUMBER(_r_) ((u64)(_r_) << 24)
+#define UCSI_ALTMODE_OFFSET(_r_) (((_r_) >> 32) & 0xff)
#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32)
#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40)
@@ -340,4 +342,11 @@ static inline void
ucsi_displayport_remove_partner(struct typec_altmode *adev) { }
#endif /* CONFIG_TYPEC_DP_ALTMODE */
+/*
+ * NVIDIA VirtualLink (svid 0x955) has two altmode. VirtualLink
+ * DP mode with vdo=0x1 and NVIDIA test mode with vdo=0x3
+ */
+#define USB_TYPEC_NVIDIA_VLINK_DP_VDO 0x1
+#define USB_TYPEC_NVIDIA_VLINK_DBG_VDO 0x3
+
#endif /* __DRIVER_USB_TYPEC_UCSI_H */
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index a5b8530490db..bff96d64dddf 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -125,6 +125,10 @@ struct version_format {
#define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v')
#define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
+/* Altmode offset for NVIDIA Function Test Board (FTB) */
+#define NVIDIA_FTB_DP_OFFSET (2)
+#define NVIDIA_FTB_DBG_OFFSET (3)
+
struct version_info {
struct version_format base;
struct version_format app;
@@ -477,24 +481,65 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
*cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
}
+/*
+ * Change the order of vdo values of NVIDIA test device FTB
+ * (Function Test Board) which reports altmode list with vdo=0x3
+ * first and then vdo=0x. Current logic to assign mode value is
+ * based on order in altmode list and it causes a mismatch of CON
+ * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
+ * first and then vdo=0x3
+ */
+static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
+ struct ucsi_altmode *alt)
+{
+ switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
+ case NVIDIA_FTB_DP_OFFSET:
+ if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
+ alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
+ DP_CAP_DP_SIGNALING | DP_CAP_USB |
+ DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
+ break;
+ case NVIDIA_FTB_DBG_OFFSET:
+ if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
+ alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
+ break;
+ default:
+ break;
+ }
+}
+
static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
- int ret;
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
+ struct ucsi_altmode *alt;
+ int ret;
ret = ccg_read(uc, reg, val, val_len);
if (ret)
return ret;
- if (offset == UCSI_MESSAGE_IN) {
- if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_GET_CURRENT_CAM &&
- uc->has_multiple_dp) {
+ if (offset != UCSI_MESSAGE_IN)
+ return ret;
+
+ switch (UCSI_COMMAND(uc->last_cmd_sent)) {
+ case UCSI_GET_CURRENT_CAM:
+ if (uc->has_multiple_dp)
ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
+ break;
+ case UCSI_GET_ALTERNATE_MODES:
+ if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
+ UCSI_RECIPIENT_SOP) {
+ alt = val;
+ if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
+ ucsi_ccg_nvidia_altmode(uc, alt);
}
- uc->last_cmd_sent = 0;
+ break;
+ default:
+ break;
}
+ uc->last_cmd_sent = 0;
return ret;
}
@@ -1219,6 +1264,7 @@ static int ccg_restart(struct ucsi_ccg *uc)
return status;
}
+ pm_runtime_enable(uc->dev);
return 0;
}
@@ -1234,6 +1280,7 @@ static void ccg_update_firmware(struct work_struct *work)
if (flash_mode != FLASH_NOT_NEEDED) {
ucsi_unregister(uc->ucsi);
+ pm_runtime_disable(uc->dev);
free_irq(uc->irq, uc);
ccg_fw_update(uc, flash_mode);
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
new file mode 100644
index 000000000000..e8140065c8a5
--- /dev/null
+++ b/drivers/vdpa/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig VDPA
+ tristate "vDPA drivers"
+ help
+ Enable this module to support vDPA device that uses a
+ datapath which complies with virtio specifications with
+ vendor specific control path.
+
+if VDPA
+
+config VDPA_SIM
+ tristate "vDPA device simulator"
+ depends on RUNTIME_TESTING_MENU && HAS_DMA && VHOST_DPN
+ select VHOST_RING
+ default n
+ help
+ vDPA networking device simulator which loop TX traffic back
+ to RX. This device is used for testing, prototyping and
+ development of vDPA.
+
+config IFCVF
+ tristate "Intel IFC VF vDPA driver"
+ depends on PCI_MSI
+ default n
+ help
+ This kernel module can drive Intel IFC VF NIC to offload
+ virtio dataplane traffic to hardware.
+ To compile this driver as a module, choose M here: the module will
+ be called ifcvf.
+
+endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
new file mode 100644
index 000000000000..8bbb686ca7a2
--- /dev/null
+++ b/drivers/vdpa/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VDPA) += vdpa.o
+obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
+obj-$(CONFIG_IFCVF) += ifcvf/
diff --git a/drivers/vdpa/ifcvf/Makefile b/drivers/vdpa/ifcvf/Makefile
new file mode 100644
index 000000000000..d709915995ab
--- /dev/null
+++ b/drivers/vdpa/ifcvf/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IFCVF) += ifcvf.o
+ifcvf-$(CONFIG_IFCVF) += ifcvf_main.o ifcvf_base.o
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
new file mode 100644
index 000000000000..e24371d644b5
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#include "ifcvf_base.h"
+
+static inline u8 ifc_ioread8(u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 ifc_ioread16 (__le16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 ifc_ioread32(__le32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static void ifc_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo, __le32 __iomem *hi)
+{
+ ifc_iowrite32((u32)val, lo);
+ ifc_iowrite32(val >> 32, hi);
+}
+
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
+{
+ return container_of(hw, struct ifcvf_adapter, vf);
+}
+
+static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
+ struct virtio_pci_cap *cap)
+{
+ struct ifcvf_adapter *ifcvf;
+ struct pci_dev *pdev;
+ u32 length, offset;
+ u8 bar;
+
+ length = le32_to_cpu(cap->length);
+ offset = le32_to_cpu(cap->offset);
+ bar = cap->bar;
+
+ ifcvf= vf_to_adapter(hw);
+ pdev = ifcvf->pdev;
+
+ if (bar >= IFCVF_PCI_MAX_RESOURCE) {
+ IFCVF_DBG(pdev,
+ "Invalid bar number %u to get capabilities\n", bar);
+ return NULL;
+ }
+
+ if (offset + length > pci_resource_len(pdev, bar)) {
+ IFCVF_DBG(pdev,
+ "offset(%u) + len(%u) overflows bar%u's capability\n",
+ offset, length, bar);
+ return NULL;
+ }
+
+ return hw->base[bar] + offset;
+}
+
+static int ifcvf_read_config_range(struct pci_dev *dev,
+ uint32_t *val, int size, int where)
+{
+ int ret, i;
+
+ for (i = 0; i < size; i += 4) {
+ ret = pci_read_config_dword(dev, where + i, val + i / 4);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
+{
+ struct virtio_pci_cap cap;
+ u16 notify_off;
+ int ret;
+ u8 pos;
+ u32 i;
+
+ ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
+ return -EIO;
+ }
+
+ while (pos) {
+ ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
+ sizeof(cap), pos);
+ if (ret < 0) {
+ IFCVF_ERR(pdev,
+ "Failed to get PCI capability at %x\n", pos);
+ break;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR)
+ goto next;
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
+ hw->common_cfg);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ pci_read_config_dword(pdev, pos + sizeof(cap),
+ &hw->notify_off_multiplier);
+ hw->notify_bar = cap.bar;
+ hw->notify_base = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->notify_base = %p\n",
+ hw->notify_base);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->net_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->isr == NULL || hw->net_cfg == NULL) {
+ IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ ifc_iowrite16(i, &hw->common_cfg->queue_select);
+ notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
+ hw->vring[i].notify_addr = hw->notify_base +
+ notify_off * hw->notify_off_multiplier;
+ }
+
+ hw->lm_cfg = hw->base[IFCVF_LM_BAR];
+
+ IFCVF_DBG(pdev,
+ "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
+ hw->common_cfg, hw->notify_base, hw->isr,
+ hw->net_cfg, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+u8 ifcvf_get_status(struct ifcvf_hw *hw)
+{
+ return ifc_ioread8(&hw->common_cfg->device_status);
+}
+
+void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
+{
+ ifc_iowrite8(status, &hw->common_cfg->device_status);
+}
+
+void ifcvf_reset(struct ifcvf_hw *hw)
+{
+ ifcvf_set_status(hw, 0);
+ /* flush set_status, make sure VF is stopped, reset */
+ ifcvf_get_status(hw);
+}
+
+static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
+{
+ if (status != 0)
+ status |= ifcvf_get_status(hw);
+
+ ifcvf_set_status(hw, status);
+ ifcvf_get_status(hw);
+}
+
+u64 ifcvf_get_features(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ u32 features_lo, features_hi;
+
+ ifc_iowrite32(0, &cfg->device_feature_select);
+ features_lo = ifc_ioread32(&cfg->device_feature);
+
+ ifc_iowrite32(1, &cfg->device_feature_select);
+ features_hi = ifc_ioread32(&cfg->device_feature);
+
+ return ((u64)features_hi << 32) | features_lo;
+}
+
+void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+ void *dst, int length)
+{
+ u8 old_gen, new_gen, *p;
+ int i;
+
+ WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ do {
+ old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = ifc_ioread8(hw->net_cfg + offset + i);
+
+ new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+ const void *src, int length)
+{
+ const u8 *p;
+ int i;
+
+ p = src;
+ WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ for (i = 0; i < length; i++)
+ ifc_iowrite8(*p++, hw->net_cfg + offset + i);
+}
+
+static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ ifc_iowrite32(0, &cfg->guest_feature_select);
+ ifc_iowrite32((u32)features, &cfg->guest_feature);
+
+ ifc_iowrite32(1, &cfg->guest_feature_select);
+ ifc_iowrite32(features >> 32, &cfg->guest_feature);
+}
+
+static int ifcvf_config_features(struct ifcvf_hw *hw)
+{
+ struct ifcvf_adapter *ifcvf;
+
+ ifcvf = vf_to_adapter(hw);
+ ifcvf_set_features(hw, hw->req_features);
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
+
+ if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
+{
+ struct ifcvf_lm_cfg __iomem *ifcvf_lm;
+ void __iomem *avail_idx_addr;
+ u16 last_avail_idx;
+ u32 q_pair_id;
+
+ ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
+ q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
+ last_avail_idx = ifc_ioread16(avail_idx_addr);
+
+ return last_avail_idx;
+}
+
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num)
+{
+ struct ifcvf_lm_cfg __iomem *ifcvf_lm;
+ void __iomem *avail_idx_addr;
+ u32 q_pair_id;
+
+ ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
+ q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
+ hw->vring[qid].last_avail_idx = num;
+ ifc_iowrite16(num, avail_idx_addr);
+
+ return 0;
+}
+
+static int ifcvf_hw_enable(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg;
+ struct ifcvf_adapter *ifcvf;
+ u32 i;
+
+ ifcvf = vf_to_adapter(hw);
+ cfg = hw->common_cfg;
+ ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
+
+ if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ if (!hw->vring[i].ready)
+ break;
+
+ ifc_iowrite16(i, &cfg->queue_select);
+ ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+ ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
+ ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
+
+ if (ifc_ioread16(&cfg->queue_msix_vector) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(ifcvf->pdev,
+ "No msix vector for queue %u\n", i);
+ return -EINVAL;
+ }
+
+ ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
+ ifc_iowrite16(1, &cfg->queue_enable);
+ }
+
+ return 0;
+}
+
+static void ifcvf_hw_disable(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg;
+ u32 i;
+
+ cfg = hw->common_cfg;
+ ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ ifc_iowrite16(i, &cfg->queue_select);
+ ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+ }
+
+ ifc_ioread16(&cfg->queue_msix_vector);
+}
+
+int ifcvf_start_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_reset(hw);
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
+
+ if (ifcvf_config_features(hw) < 0)
+ return -EINVAL;
+
+ if (ifcvf_hw_enable(hw) < 0)
+ return -EINVAL;
+
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
+
+ return 0;
+}
+
+void ifcvf_stop_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_hw_disable(hw);
+ ifcvf_reset(hw);
+}
+
+void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
+{
+ ifc_iowrite16(qid, hw->vring[qid].notify_addr);
+}
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
new file mode 100644
index 000000000000..e80307092351
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#ifndef _IFCVF_H_
+#define _IFCVF_H_
+
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/vdpa.h>
+#include <uapi/linux/virtio_net.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_pci.h>
+
+#define IFCVF_VENDOR_ID 0x1AF4
+#define IFCVF_DEVICE_ID 0x1041
+#define IFCVF_SUBSYS_VENDOR_ID 0x8086
+#define IFCVF_SUBSYS_DEVICE_ID 0x001A
+
+#define IFCVF_SUPPORTED_FEATURES \
+ ((1ULL << VIRTIO_NET_F_MAC) | \
+ (1ULL << VIRTIO_F_ANY_LAYOUT) | \
+ (1ULL << VIRTIO_F_VERSION_1) | \
+ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF))
+
+/* Only one queue pair for now. */
+#define IFCVF_MAX_QUEUE_PAIRS 1
+
+#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
+#define IFCVF_QUEUE_MAX 32768
+#define IFCVF_MSI_CONFIG_OFF 0
+#define IFCVF_MSI_QUEUE_OFF 1
+#define IFCVF_PCI_MAX_RESOURCE 6
+
+#define IFCVF_LM_CFG_SIZE 0x40
+#define IFCVF_LM_RING_STATE_OFFSET 0x20
+#define IFCVF_LM_BAR 4
+
+#define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__)
+#define IFCVF_DBG(pdev, fmt, ...) dev_dbg(&pdev->dev, fmt, ##__VA_ARGS__)
+#define IFCVF_INFO(pdev, fmt, ...) dev_info(&pdev->dev, fmt, ##__VA_ARGS__)
+
+#define ifcvf_private_to_vf(adapter) \
+ (&((struct ifcvf_adapter *)adapter)->vf)
+
+#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1)
+
+struct vring_info {
+ u64 desc;
+ u64 avail;
+ u64 used;
+ u16 size;
+ u16 last_avail_idx;
+ bool ready;
+ void __iomem *notify_addr;
+ u32 irq;
+ struct vdpa_callback cb;
+ char msix_name[256];
+};
+
+struct ifcvf_hw {
+ u8 __iomem *isr;
+ /* Live migration */
+ u8 __iomem *lm_cfg;
+ u16 nr_vring;
+ /* Notification bar number */
+ u8 notify_bar;
+ /* Notificaiton bar address */
+ void __iomem *notify_base;
+ u32 notify_off_multiplier;
+ u64 req_features;
+ struct virtio_pci_common_cfg __iomem *common_cfg;
+ void __iomem *net_cfg;
+ struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
+ void __iomem * const *base;
+};
+
+struct ifcvf_adapter {
+ struct vdpa_device vdpa;
+ struct pci_dev *pdev;
+ struct ifcvf_hw vf;
+};
+
+struct ifcvf_vring_lm_cfg {
+ u32 idx_addr[2];
+ u8 reserved[IFCVF_LM_CFG_SIZE - 8];
+};
+
+struct ifcvf_lm_cfg {
+ u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
+ struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS];
+};
+
+int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
+int ifcvf_start_hw(struct ifcvf_hw *hw);
+void ifcvf_stop_hw(struct ifcvf_hw *hw);
+void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
+void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+ void *dst, int length);
+void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+ const void *src, int length);
+u8 ifcvf_get_status(struct ifcvf_hw *hw);
+void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
+void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
+void ifcvf_reset(struct ifcvf_hw *hw);
+u64 ifcvf_get_features(struct ifcvf_hw *hw);
+u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num);
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
+#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
new file mode 100644
index 000000000000..abf6a061cab6
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include "ifcvf_base.h"
+
+#define VERSION_STRING "0.1"
+#define DRIVER_AUTHOR "Intel Corporation"
+#define IFCVF_DRIVER_NAME "ifcvf"
+
+static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
+{
+ struct vring_info *vring = arg;
+
+ if (vring->cb.callback)
+ return vring->cb.callback(vring->cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static int ifcvf_start_datapath(void *private)
+{
+ struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
+ u8 status;
+ int ret;
+
+ vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
+ ret = ifcvf_start_hw(vf);
+ if (ret < 0) {
+ status = ifcvf_get_status(vf);
+ status |= VIRTIO_CONFIG_S_FAILED;
+ ifcvf_set_status(vf, status);
+ }
+
+ return ret;
+}
+
+static int ifcvf_stop_datapath(void *private)
+{
+ struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
+ int i;
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ vf->vring[i].cb.callback = NULL;
+
+ ifcvf_stop_hw(vf);
+
+ return 0;
+}
+
+static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
+{
+ struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
+ int i;
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ vf->vring[i].last_avail_idx = 0;
+ vf->vring[i].desc = 0;
+ vf->vring[i].avail = 0;
+ vf->vring[i].used = 0;
+ vf->vring[i].ready = 0;
+ vf->vring[i].cb.callback = NULL;
+ vf->vring[i].cb.private = NULL;
+ }
+
+ ifcvf_reset(vf);
+}
+
+static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
+{
+ return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
+}
+
+static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+
+ return &adapter->vf;
+}
+
+static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ u64 features;
+
+ features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
+
+ return features;
+}
+
+static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->req_features = features;
+
+ return 0;
+}
+
+static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_status(vf);
+}
+
+static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
+{
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
+
+ vf = vdpa_to_vf(vdpa_dev);
+ adapter = dev_get_drvdata(vdpa_dev->dev.parent);
+
+ if (status == 0) {
+ ifcvf_stop_datapath(adapter);
+ ifcvf_reset_vring(adapter);
+ return;
+ }
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ if (ifcvf_start_datapath(adapter) < 0)
+ IFCVF_ERR(adapter->pdev,
+ "Failed to set ifcvf vdpa status %u\n",
+ status);
+ }
+
+ ifcvf_set_status(vf, status);
+}
+
+static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_QUEUE_MAX;
+}
+
+static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_vq_state(vf, qid);
+}
+
+static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ u64 num)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_set_vq_state(vf, qid, num);
+}
+
+static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].cb = *cb;
+}
+
+static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
+ u16 qid, bool ready)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].ready = ready;
+}
+
+static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->vring[qid].ready;
+}
+
+static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
+ u32 num)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].size = num;
+}
+
+static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].desc = desc_area;
+ vf->vring[qid].avail = driver_area;
+ vf->vring[qid].used = device_area;
+
+ return 0;
+}
+
+static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ ifcvf_notify_queue(vf, qid);
+}
+
+static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ioread8(&vf->common_cfg->config_generation);
+}
+
+static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
+{
+ return VIRTIO_ID_NET;
+}
+
+static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_SUBSYS_VENDOR_ID;
+}
+
+static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_QUEUE_ALIGNMENT;
+}
+
+static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ WARN_ON(offset + len > sizeof(struct virtio_net_config));
+ ifcvf_read_net_config(vf, offset, buf, len);
+}
+
+static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ WARN_ON(offset + len > sizeof(struct virtio_net_config));
+ ifcvf_write_net_config(vf, offset, buf, len);
+}
+
+static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
+ struct vdpa_callback *cb)
+{
+ /* We don't support config interrupt */
+}
+
+/*
+ * IFCVF currently does't have on-chip IOMMU, so not
+ * implemented set_map()/dma_map()/dma_unmap()
+ */
+static const struct vdpa_config_ops ifc_vdpa_ops = {
+ .get_features = ifcvf_vdpa_get_features,
+ .set_features = ifcvf_vdpa_set_features,
+ .get_status = ifcvf_vdpa_get_status,
+ .set_status = ifcvf_vdpa_set_status,
+ .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
+ .get_vq_state = ifcvf_vdpa_get_vq_state,
+ .set_vq_state = ifcvf_vdpa_set_vq_state,
+ .set_vq_cb = ifcvf_vdpa_set_vq_cb,
+ .set_vq_ready = ifcvf_vdpa_set_vq_ready,
+ .get_vq_ready = ifcvf_vdpa_get_vq_ready,
+ .set_vq_num = ifcvf_vdpa_set_vq_num,
+ .set_vq_address = ifcvf_vdpa_set_vq_address,
+ .kick_vq = ifcvf_vdpa_kick_vq,
+ .get_generation = ifcvf_vdpa_get_generation,
+ .get_device_id = ifcvf_vdpa_get_device_id,
+ .get_vendor_id = ifcvf_vdpa_get_vendor_id,
+ .get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_config = ifcvf_vdpa_get_config,
+ .set_config = ifcvf_vdpa_set_config,
+ .set_config_cb = ifcvf_vdpa_set_config_cb,
+};
+
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int vector, i, ret, irq;
+
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
+ pci_name(pdev), i);
+ vector = i + IFCVF_MSI_QUEUE_OFF;
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_intr_handler, 0,
+ vf->vring[i].msix_name,
+ &vf->vring[i]);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed to request irq for vq %d\n", i);
+ return ret;
+ }
+ vf->vring[i].irq = irq;
+ }
+
+ return 0;
+}
+
+static void ifcvf_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to enable device\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
+ IFCVF_DRIVER_NAME);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MMIO region\n");
+ return ret;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev, "No usable DMA confiugration\n");
+ return ret;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "No usable coherent DMA confiugration\n");
+ return ret;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
+ IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to alloc irq vectors\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed for adding devres for freeing irq vectors\n");
+ return ret;
+ }
+
+ adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+ dev, &ifc_vdpa_ops);
+ if (adapter == NULL) {
+ IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+ return -ENOMEM;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, adapter);
+
+ vf = &adapter->vf;
+ vf->base = pcim_iomap_table(pdev);
+
+ adapter->pdev = pdev;
+ adapter->vdpa.dma_dev = &pdev->dev;
+
+ ret = ifcvf_request_irq(adapter);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MSI-X irq\n");
+ goto err;
+ }
+
+ ret = ifcvf_init_hw(vf, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+ goto err;
+ }
+
+ ret = vdpa_register_device(&adapter->vdpa);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ put_device(&adapter->vdpa.dev);
+ return ret;
+}
+
+static void ifcvf_remove(struct pci_dev *pdev)
+{
+ struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
+
+ vdpa_unregister_device(&adapter->vdpa);
+}
+
+static struct pci_device_id ifcvf_pci_ids[] = {
+ { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
+ IFCVF_DEVICE_ID,
+ IFCVF_SUBSYS_VENDOR_ID,
+ IFCVF_SUBSYS_DEVICE_ID) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
+
+static struct pci_driver ifcvf_driver = {
+ .name = IFCVF_DRIVER_NAME,
+ .id_table = ifcvf_pci_ids,
+ .probe = ifcvf_probe,
+ .remove = ifcvf_remove,
+};
+
+module_pci_driver(ifcvf_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(VERSION_STRING);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
new file mode 100644
index 000000000000..ff6562f602e0
--- /dev/null
+++ b/drivers/vdpa/vdpa.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vDPA bus.
+ *
+ * Copyright (c) 2020, Red Hat. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/vdpa.h>
+
+static DEFINE_IDA(vdpa_index_ida);
+
+static int vdpa_dev_probe(struct device *d)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(d);
+ struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
+ int ret = 0;
+
+ if (drv && drv->probe)
+ ret = drv->probe(vdev);
+
+ return ret;
+}
+
+static int vdpa_dev_remove(struct device *d)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(d);
+ struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
+
+ if (drv && drv->remove)
+ drv->remove(vdev);
+
+ return 0;
+}
+
+static struct bus_type vdpa_bus = {
+ .name = "vdpa",
+ .probe = vdpa_dev_probe,
+ .remove = vdpa_dev_remove,
+};
+
+static void vdpa_release_dev(struct device *d)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(d);
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ if (ops->free)
+ ops->free(vdev);
+
+ ida_simple_remove(&vdpa_index_ida, vdev->index);
+ kfree(vdev);
+}
+
+/**
+ * __vdpa_alloc_device - allocate and initilaize a vDPA device
+ * This allows driver to some prepartion after device is
+ * initialized but before registered.
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @size: size of the parent structure that contains private data
+ *
+ * Drvier should use vdap_alloc_device() wrapper macro instead of
+ * using this directly.
+ *
+ * Returns an error when parent/config/dma_dev is not set or fail to get
+ * ida.
+ */
+struct vdpa_device *__vdpa_alloc_device(struct device *parent,
+ const struct vdpa_config_ops *config,
+ size_t size)
+{
+ struct vdpa_device *vdev;
+ int err = -EINVAL;
+
+ if (!config)
+ goto err;
+
+ if (!!config->dma_map != !!config->dma_unmap)
+ goto err;
+
+ err = -ENOMEM;
+ vdev = kzalloc(size, GFP_KERNEL);
+ if (!vdev)
+ goto err;
+
+ err = ida_simple_get(&vdpa_index_ida, 0, 0, GFP_KERNEL);
+ if (err < 0)
+ goto err_ida;
+
+ vdev->dev.bus = &vdpa_bus;
+ vdev->dev.parent = parent;
+ vdev->dev.release = vdpa_release_dev;
+ vdev->index = err;
+ vdev->config = config;
+
+ err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
+ if (err)
+ goto err_name;
+
+ device_initialize(&vdev->dev);
+
+ return vdev;
+
+err_name:
+ ida_simple_remove(&vdpa_index_ida, vdev->index);
+err_ida:
+ kfree(vdev);
+err:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
+
+/**
+ * vdpa_register_device - register a vDPA device
+ * Callers must have a succeed call of vdpa_alloc_device() before.
+ * @vdev: the vdpa device to be registered to vDPA bus
+ *
+ * Returns an error when fail to add to vDPA bus
+ */
+int vdpa_register_device(struct vdpa_device *vdev)
+{
+ return device_add(&vdev->dev);
+}
+EXPORT_SYMBOL_GPL(vdpa_register_device);
+
+/**
+ * vdpa_unregister_device - unregister a vDPA device
+ * @vdev: the vdpa device to be unregisted from vDPA bus
+ */
+void vdpa_unregister_device(struct vdpa_device *vdev)
+{
+ device_unregister(&vdev->dev);
+}
+EXPORT_SYMBOL_GPL(vdpa_unregister_device);
+
+/**
+ * __vdpa_register_driver - register a vDPA device driver
+ * @drv: the vdpa device driver to be registered
+ * @owner: module owner of the driver
+ *
+ * Returns an err when fail to do the registration
+ */
+int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
+{
+ drv->driver.bus = &vdpa_bus;
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__vdpa_register_driver);
+
+/**
+ * vdpa_unregister_driver - unregister a vDPA device driver
+ * @drv: the vdpa device driver to be unregistered
+ */
+void vdpa_unregister_driver(struct vdpa_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
+
+static int vdpa_init(void)
+{
+ return bus_register(&vdpa_bus);
+}
+
+static void __exit vdpa_exit(void)
+{
+ bus_unregister(&vdpa_bus);
+ ida_destroy(&vdpa_index_ida);
+}
+core_initcall(vdpa_init);
+module_exit(vdpa_exit);
+
+MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile
new file mode 100644
index 000000000000..b40278f65e04
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
new file mode 100644
index 000000000000..01c456f7c1f7
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDPA networking device simulator.
+ *
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uuid.h>
+#include <linux/iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/sysfs.h>
+#include <linux/file.h>
+#include <linux/etherdevice.h>
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <linux/vhost_iotlb.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_net.h>
+
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define DRV_DESC "vDPA Device Simulator"
+#define DRV_LICENSE "GPL v2"
+
+struct vdpasim_virtqueue {
+ struct vringh vring;
+ struct vringh_kiov iov;
+ unsigned short head;
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u32 num;
+ void *private;
+ irqreturn_t (*cb)(void *data);
+};
+
+#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
+#define VDPASIM_QUEUE_MAX 256
+#define VDPASIM_DEVICE_ID 0x1
+#define VDPASIM_VENDOR_ID 0
+#define VDPASIM_VQ_NUM 0x2
+#define VDPASIM_NAME "vdpasim-netdev"
+
+static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM);
+
+/* State of each vdpasim device */
+struct vdpasim {
+ struct vdpa_device vdpa;
+ struct vdpasim_virtqueue vqs[2];
+ struct work_struct work;
+ /* spinlock to synchronize virtqueue state */
+ spinlock_t lock;
+ struct virtio_net_config config;
+ struct vhost_iotlb *iommu;
+ void *buffer;
+ u32 status;
+ u32 generation;
+ u64 features;
+};
+
+static struct vdpasim *vdpasim_dev;
+
+static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
+{
+ return container_of(vdpa, struct vdpasim, vdpa);
+}
+
+static struct vdpasim *dev_to_sim(struct device *dev)
+{
+ struct vdpa_device *vdpa = dev_to_vdpa(dev);
+
+ return vdpa_to_sim(vdpa);
+}
+
+static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+{
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ vringh_init_iotlb(&vq->vring, vdpasim_features,
+ VDPASIM_QUEUE_MAX, false,
+ (struct vring_desc *)(uintptr_t)vq->desc_addr,
+ (struct vring_avail *)
+ (uintptr_t)vq->driver_addr,
+ (struct vring_used *)
+ (uintptr_t)vq->device_addr);
+}
+
+static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
+{
+ vq->ready = 0;
+ vq->desc_addr = 0;
+ vq->driver_addr = 0;
+ vq->device_addr = 0;
+ vq->cb = NULL;
+ vq->private = NULL;
+ vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX,
+ false, NULL, NULL, NULL);
+}
+
+static void vdpasim_reset(struct vdpasim *vdpasim)
+{
+ int i;
+
+ for (i = 0; i < VDPASIM_VQ_NUM; i++)
+ vdpasim_vq_reset(&vdpasim->vqs[i]);
+
+ vhost_iotlb_reset(vdpasim->iommu);
+
+ vdpasim->features = 0;
+ vdpasim->status = 0;
+ ++vdpasim->generation;
+}
+
+static void vdpasim_work(struct work_struct *work)
+{
+ struct vdpasim *vdpasim = container_of(work, struct
+ vdpasim, work);
+ struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
+ struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
+ size_t read, write, total_write;
+ int err;
+ int pkts = 0;
+
+ spin_lock(&vdpasim->lock);
+
+ if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto out;
+
+ if (!txq->ready || !rxq->ready)
+ goto out;
+
+ while (true) {
+ total_write = 0;
+ err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
+ &txq->head, GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
+ &rxq->head, GFP_ATOMIC);
+ if (err <= 0) {
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ break;
+ }
+
+ while (true) {
+ read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
+ vdpasim->buffer,
+ PAGE_SIZE);
+ if (read <= 0)
+ break;
+
+ write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
+ vdpasim->buffer, read);
+ if (write <= 0)
+ break;
+
+ total_write += write;
+ }
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (txq->cb)
+ txq->cb(txq->private);
+ if (rxq->cb)
+ rxq->cb(rxq->private);
+ local_bh_enable();
+
+ if (++pkts > 4) {
+ schedule_work(&vdpasim->work);
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock(&vdpasim->lock);
+}
+
+static int dir_to_perm(enum dma_data_direction dir)
+{
+ int perm = -EFAULT;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ perm = VHOST_MAP_WO;
+ break;
+ case DMA_TO_DEVICE:
+ perm = VHOST_MAP_RO;
+ break;
+ case DMA_BIDIRECTIONAL:
+ perm = VHOST_MAP_RW;
+ break;
+ default:
+ break;
+ }
+
+ return perm;
+}
+
+static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+ u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
+ int ret, perm = dir_to_perm(dir);
+
+ if (perm < 0)
+ return DMA_MAPPING_ERROR;
+
+ /* For simplicity, use identical mapping to avoid e.g iova
+ * allocator.
+ */
+ ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
+ pa, dir_to_perm(dir));
+ if (ret)
+ return DMA_MAPPING_ERROR;
+
+ return (dma_addr_t)(pa);
+}
+
+static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+
+ vhost_iotlb_del_range(iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+}
+
+static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+ void *addr = kmalloc(size, flag);
+ int ret;
+
+ if (!addr)
+ *dma_addr = DMA_MAPPING_ERROR;
+ else {
+ u64 pa = virt_to_phys(addr);
+
+ ret = vhost_iotlb_add_range(iommu, (u64)pa,
+ (u64)pa + size - 1,
+ pa, VHOST_MAP_RW);
+ if (ret) {
+ *dma_addr = DMA_MAPPING_ERROR;
+ kfree(addr);
+ addr = NULL;
+ } else
+ *dma_addr = (dma_addr_t)pa;
+ }
+
+ return addr;
+}
+
+static void vdpasim_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+
+ vhost_iotlb_del_range(iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ kfree(phys_to_virt((uintptr_t)dma_addr));
+}
+
+static const struct dma_map_ops vdpasim_dma_ops = {
+ .map_page = vdpasim_map_page,
+ .unmap_page = vdpasim_unmap_page,
+ .alloc = vdpasim_alloc_coherent,
+ .free = vdpasim_free_coherent,
+};
+
+static const struct vdpa_config_ops vdpasim_net_config_ops;
+
+static struct vdpasim *vdpasim_create(void)
+{
+ struct virtio_net_config *config;
+ struct vdpasim *vdpasim;
+ struct device *dev;
+ int ret = -ENOMEM;
+
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL,
+ &vdpasim_net_config_ops);
+ if (!vdpasim)
+ goto err_alloc;
+
+ INIT_WORK(&vdpasim->work, vdpasim_work);
+ spin_lock_init(&vdpasim->lock);
+
+ dev = &vdpasim->vdpa.dev;
+ dev->coherent_dma_mask = DMA_BIT_MASK(64);
+ set_dma_ops(dev, &vdpasim_dma_ops);
+
+ vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
+ if (!vdpasim->iommu)
+ goto err_iommu;
+
+ vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vdpasim->buffer)
+ goto err_iommu;
+
+ config = &vdpasim->config;
+ config->mtu = 1500;
+ config->status = VIRTIO_NET_S_LINK_UP;
+ eth_random_addr(config->mac);
+
+ vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
+ vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
+
+ vdpasim->vdpa.dma_dev = dev;
+ ret = vdpa_register_device(&vdpasim->vdpa);
+ if (ret)
+ goto err_iommu;
+
+ return vdpasim;
+
+err_iommu:
+ put_device(dev);
+err_alloc:
+ return ERR_PTR(ret);
+}
+
+static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ vq->desc_addr = desc_area;
+ vq->driver_addr = driver_area;
+ vq->device_addr = device_area;
+
+ return 0;
+}
+
+static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ vq->num = num;
+}
+
+static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ if (vq->ready)
+ schedule_work(&vdpasim->work);
+}
+
+static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_callback *cb)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ vq->cb = cb->callback;
+ vq->private = cb->private;
+}
+
+static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ spin_lock(&vdpasim->lock);
+ vq->ready = ready;
+ if (vq->ready)
+ vdpasim_queue_ready(vdpasim, idx);
+ spin_unlock(&vdpasim->lock);
+}
+
+static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ return vq->ready;
+}
+
+static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ struct vringh *vrh = &vq->vring;
+
+ spin_lock(&vdpasim->lock);
+ vrh->last_avail_idx = state;
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
+static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ struct vringh *vrh = &vq->vring;
+
+ return vrh->last_avail_idx;
+}
+
+static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
+{
+ return VDPASIM_QUEUE_ALIGN;
+}
+
+static u64 vdpasim_get_features(struct vdpa_device *vdpa)
+{
+ return vdpasim_features;
+}
+
+static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ /* DMA mapping must be done by driver */
+ if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ return -EINVAL;
+
+ vdpasim->features = features & vdpasim_features;
+
+ return 0;
+}
+
+static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ /* We don't support config interrupt */
+}
+
+static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ return VDPASIM_QUEUE_MAX;
+}
+
+static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
+{
+ return VDPASIM_DEVICE_ID;
+}
+
+static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
+{
+ return VDPASIM_VENDOR_ID;
+}
+
+static u8 vdpasim_get_status(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ u8 status;
+
+ spin_lock(&vdpasim->lock);
+ status = vdpasim->status;
+ spin_unlock(&vdpasim->lock);
+
+ return status;
+}
+
+static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->status = status;
+ if (status == 0)
+ vdpasim_reset(vdpasim);
+ spin_unlock(&vdpasim->lock);
+}
+
+static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ if (offset + len < sizeof(struct virtio_net_config))
+ memcpy(buf, &vdpasim->config + offset, len);
+}
+
+static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ /* No writable config supportted by vdpasim */
+}
+
+static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->generation;
+}
+
+static int vdpasim_set_map(struct vdpa_device *vdpa,
+ struct vhost_iotlb *iotlb)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = 0ULL - 1;
+ int ret;
+
+ vhost_iotlb_reset(vdpasim->iommu);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
+ map->last, map->addr, map->perm);
+ if (ret)
+ goto err;
+ }
+ return 0;
+
+err:
+ vhost_iotlb_reset(vdpasim->iommu);
+ return ret;
+}
+
+static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
+ u64 pa, u32 perm)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vhost_iotlb_add_range(vdpasim->iommu, iova,
+ iova + size - 1, pa, perm);
+}
+
+static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+
+ return 0;
+}
+
+static void vdpasim_free(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ cancel_work_sync(&vdpasim->work);
+ kfree(vdpasim->buffer);
+ if (vdpasim->iommu)
+ vhost_iotlb_free(vdpasim->iommu);
+}
+
+static const struct vdpa_config_ops vdpasim_net_config_ops = {
+ .set_vq_address = vdpasim_set_vq_address,
+ .set_vq_num = vdpasim_set_vq_num,
+ .kick_vq = vdpasim_kick_vq,
+ .set_vq_cb = vdpasim_set_vq_cb,
+ .set_vq_ready = vdpasim_set_vq_ready,
+ .get_vq_ready = vdpasim_get_vq_ready,
+ .set_vq_state = vdpasim_set_vq_state,
+ .get_vq_state = vdpasim_get_vq_state,
+ .get_vq_align = vdpasim_get_vq_align,
+ .get_features = vdpasim_get_features,
+ .set_features = vdpasim_set_features,
+ .set_config_cb = vdpasim_set_config_cb,
+ .get_vq_num_max = vdpasim_get_vq_num_max,
+ .get_device_id = vdpasim_get_device_id,
+ .get_vendor_id = vdpasim_get_vendor_id,
+ .get_status = vdpasim_get_status,
+ .set_status = vdpasim_set_status,
+ .get_config = vdpasim_get_config,
+ .set_config = vdpasim_set_config,
+ .get_generation = vdpasim_get_generation,
+ .set_map = vdpasim_set_map,
+ .dma_map = vdpasim_dma_map,
+ .dma_unmap = vdpasim_dma_unmap,
+ .free = vdpasim_free,
+};
+
+static int __init vdpasim_dev_init(void)
+{
+ vdpasim_dev = vdpasim_create();
+
+ if (!IS_ERR(vdpasim_dev))
+ return 0;
+
+ return PTR_ERR(vdpasim_dev);
+}
+
+static void __exit vdpasim_dev_exit(void)
+{
+ struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
+
+ vdpa_unregister_device(vdpa);
+}
+
+module_init(vdpasim_dev_init)
+module_exit(vdpasim_dev_exit)
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 379a02c36e37..6c6b37b5c04e 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -9,7 +9,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/eventfd.h>
@@ -54,6 +53,12 @@ module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_idle_d3,
"Disable using the PCI D3 low power state for idle, unused devices");
+static bool enable_sriov;
+#ifdef CONFIG_PCI_IOV
+module_param(enable_sriov, bool, 0644);
+MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
+#endif
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -466,6 +471,44 @@ out:
vfio_pci_set_power_state(vdev, PCI_D3hot);
}
+static struct pci_driver vfio_pci_driver;
+
+static struct vfio_pci_device *get_pf_vdev(struct vfio_pci_device *vdev,
+ struct vfio_device **pf_dev)
+{
+ struct pci_dev *physfn = pci_physfn(vdev->pdev);
+
+ if (!vdev->pdev->is_virtfn)
+ return NULL;
+
+ *pf_dev = vfio_device_get_from_dev(&physfn->dev);
+ if (!*pf_dev)
+ return NULL;
+
+ if (pci_dev_driver(physfn) != &vfio_pci_driver) {
+ vfio_device_put(*pf_dev);
+ return NULL;
+ }
+
+ return vfio_device_data(*pf_dev);
+}
+
+static void vfio_pci_vf_token_user_add(struct vfio_pci_device *vdev, int val)
+{
+ struct vfio_device *pf_dev;
+ struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev);
+
+ if (!pf_vdev)
+ return;
+
+ mutex_lock(&pf_vdev->vf_token->lock);
+ pf_vdev->vf_token->users += val;
+ WARN_ON(pf_vdev->vf_token->users < 0);
+ mutex_unlock(&pf_vdev->vf_token->lock);
+
+ vfio_device_put(pf_dev);
+}
+
static void vfio_pci_release(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
@@ -473,6 +516,7 @@ static void vfio_pci_release(void *device_data)
mutex_lock(&vdev->reflck->lock);
if (!(--vdev->refcnt)) {
+ vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
}
@@ -498,6 +542,7 @@ static int vfio_pci_open(void *device_data)
goto error;
vfio_spapr_pci_eeh_open(vdev->pdev);
+ vfio_pci_vf_token_user_add(vdev, 1);
}
vdev->refcnt++;
error:
@@ -1140,6 +1185,65 @@ hot_reset_release:
return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
ioeventfd.data, count, ioeventfd.fd);
+ } else if (cmd == VFIO_DEVICE_FEATURE) {
+ struct vfio_device_feature feature;
+ uuid_t uuid;
+
+ minsz = offsetofend(struct vfio_device_feature, flags);
+
+ if (copy_from_user(&feature, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (feature.argsz < minsz)
+ return -EINVAL;
+
+ /* Check unknown flags */
+ if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK |
+ VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_GET |
+ VFIO_DEVICE_FEATURE_PROBE))
+ return -EINVAL;
+
+ /* GET & SET are mutually exclusive except with PROBE */
+ if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_GET))
+ return -EINVAL;
+
+ switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
+ if (!vdev->vf_token)
+ return -ENOTTY;
+
+ /*
+ * We do not support GET of the VF Token UUID as this
+ * could expose the token of the previous device user.
+ */
+ if (feature.flags & VFIO_DEVICE_FEATURE_GET)
+ return -EINVAL;
+
+ if (feature.flags & VFIO_DEVICE_FEATURE_PROBE)
+ return 0;
+
+ /* Don't SET unless told to do so */
+ if (!(feature.flags & VFIO_DEVICE_FEATURE_SET))
+ return -EINVAL;
+
+ if (feature.argsz < minsz + sizeof(uuid))
+ return -EINVAL;
+
+ if (copy_from_user(&uuid, (void __user *)(arg + minsz),
+ sizeof(uuid)))
+ return -EFAULT;
+
+ mutex_lock(&vdev->vf_token->lock);
+ uuid_copy(&vdev->vf_token->uuid, &uuid);
+ mutex_unlock(&vdev->vf_token->lock);
+
+ return 0;
+ default:
+ return -ENOTTY;
+ }
}
return -ENOTTY;
@@ -1278,6 +1382,150 @@ static void vfio_pci_request(void *device_data, unsigned int count)
mutex_unlock(&vdev->igate);
}
+static int vfio_pci_validate_vf_token(struct vfio_pci_device *vdev,
+ bool vf_token, uuid_t *uuid)
+{
+ /*
+ * There's always some degree of trust or collaboration between SR-IOV
+ * PF and VFs, even if just that the PF hosts the SR-IOV capability and
+ * can disrupt VFs with a reset, but often the PF has more explicit
+ * access to deny service to the VF or access data passed through the
+ * VF. We therefore require an opt-in via a shared VF token (UUID) to
+ * represent this trust. This both prevents that a VF driver might
+ * assume the PF driver is a trusted, in-kernel driver, and also that
+ * a PF driver might be replaced with a rogue driver, unknown to in-use
+ * VF drivers.
+ *
+ * Therefore when presented with a VF, if the PF is a vfio device and
+ * it is bound to the vfio-pci driver, the user needs to provide a VF
+ * token to access the device, in the form of appending a vf_token to
+ * the device name, for example:
+ *
+ * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
+ *
+ * When presented with a PF which has VFs in use, the user must also
+ * provide the current VF token to prove collaboration with existing
+ * VF users. If VFs are not in use, the VF token provided for the PF
+ * device will act to set the VF token.
+ *
+ * If the VF token is provided but unused, an error is generated.
+ */
+ if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
+ return 0; /* No VF token provided or required */
+
+ if (vdev->pdev->is_virtfn) {
+ struct vfio_device *pf_dev;
+ struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev);
+ bool match;
+
+ if (!pf_vdev) {
+ if (!vf_token)
+ return 0; /* PF is not vfio-pci, no VF token */
+
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, PF not bound to vfio-pci\n");
+ return -EINVAL;
+ }
+
+ if (!vf_token) {
+ vfio_device_put(pf_dev);
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ mutex_lock(&pf_vdev->vf_token->lock);
+ match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
+ mutex_unlock(&pf_vdev->vf_token->lock);
+
+ vfio_device_put(pf_dev);
+
+ if (!match) {
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vdev->vf_token) {
+ mutex_lock(&vdev->vf_token->lock);
+ if (vdev->vf_token->users) {
+ if (!vf_token) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vf_token) {
+ uuid_copy(&vdev->vf_token->uuid, uuid);
+ }
+
+ mutex_unlock(&vdev->vf_token->lock);
+ } else if (vf_token) {
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, not a PF or VF\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define VF_TOKEN_ARG "vf_token="
+
+static int vfio_pci_match(void *device_data, char *buf)
+{
+ struct vfio_pci_device *vdev = device_data;
+ bool vf_token = false;
+ uuid_t uuid;
+ int ret;
+
+ if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
+ return 0; /* No match */
+
+ if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
+ buf += strlen(pci_name(vdev->pdev));
+
+ if (*buf != ' ')
+ return 0; /* No match: non-whitespace after name */
+
+ while (*buf) {
+ if (*buf == ' ') {
+ buf++;
+ continue;
+ }
+
+ if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
+ strlen(VF_TOKEN_ARG))) {
+ buf += strlen(VF_TOKEN_ARG);
+
+ if (strlen(buf) < UUID_STRING_LEN)
+ return -EINVAL;
+
+ ret = uuid_parse(buf, &uuid);
+ if (ret)
+ return ret;
+
+ vf_token = true;
+ buf += UUID_STRING_LEN;
+ } else {
+ /* Unknown/duplicate option */
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
+ if (ret)
+ return ret;
+
+ return 1; /* Match */
+}
+
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
.open = vfio_pci_open,
@@ -1287,10 +1535,40 @@ static const struct vfio_device_ops vfio_pci_ops = {
.write = vfio_pci_write,
.mmap = vfio_pci_mmap,
.request = vfio_pci_request,
+ .match = vfio_pci_match,
};
static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
+static struct pci_driver vfio_pci_driver;
+
+static int vfio_pci_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct vfio_pci_device *vdev = container_of(nb,
+ struct vfio_pci_device, nb);
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *physfn = pci_physfn(pdev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
+ pci_name(pdev));
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ vfio_pci_ops.name);
+ } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ struct pci_driver *drv = pci_dev_driver(pdev);
+
+ if (drv && drv != &vfio_pci_driver)
+ pci_warn(vdev->pdev,
+ "VF %s bound to driver %s while PF bound to vfio-pci\n",
+ pci_name(pdev), drv->name);
+ }
+
+ return 0;
+}
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -1302,12 +1580,12 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -EINVAL;
/*
- * Prevent binding to PFs with VFs enabled, this too easily allows
- * userspace instance with VFs and PFs from the same device, which
- * cannot work. Disabling SR-IOV here would initiate removing the
- * VFs, which would unbind the driver, which is prone to blocking
- * if that VF is also in use by vfio-pci. Just reject these PFs
- * and let the user sort it out.
+ * Prevent binding to PFs with VFs enabled, the VFs might be in use
+ * by the host or other users. We cannot capture the VFs if they
+ * already exist, nor can we track VF users. Disabling SR-IOV here
+ * would initiate removing the VFs, which would unbind the driver,
+ * which is prone to blocking if that VF is also in use by vfio-pci.
+ * Just reject these PFs and let the user sort it out.
*/
if (pci_num_vf(pdev)) {
pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
@@ -1320,8 +1598,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
- vfio_iommu_group_put(group, &pdev->dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_group_put;
}
vdev->pdev = pdev;
@@ -1332,18 +1610,27 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&vdev->ioeventfds_list);
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
- if (ret) {
- vfio_iommu_group_put(group, &pdev->dev);
- kfree(vdev);
- return ret;
- }
+ if (ret)
+ goto out_free;
ret = vfio_pci_reflck_attach(vdev);
- if (ret) {
- vfio_del_group_dev(&pdev->dev);
- vfio_iommu_group_put(group, &pdev->dev);
- kfree(vdev);
- return ret;
+ if (ret)
+ goto out_del_group_dev;
+
+ if (pdev->is_physfn) {
+ vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
+ if (!vdev->vf_token) {
+ ret = -ENOMEM;
+ goto out_reflck;
+ }
+
+ mutex_init(&vdev->vf_token->lock);
+ uuid_gen(&vdev->vf_token->uuid);
+
+ vdev->nb.notifier_call = vfio_pci_bus_notifier;
+ ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
+ if (ret)
+ goto out_vf_token;
}
if (vfio_pci_is_vga(pdev)) {
@@ -1369,16 +1656,39 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
return ret;
+
+out_vf_token:
+ kfree(vdev->vf_token);
+out_reflck:
+ vfio_pci_reflck_put(vdev->reflck);
+out_del_group_dev:
+ vfio_del_group_dev(&pdev->dev);
+out_free:
+ kfree(vdev);
+out_group_put:
+ vfio_iommu_group_put(group, &pdev->dev);
+ return ret;
}
static void vfio_pci_remove(struct pci_dev *pdev)
{
struct vfio_pci_device *vdev;
+ pci_disable_sriov(pdev);
+
vdev = vfio_del_group_dev(&pdev->dev);
if (!vdev)
return;
+ if (vdev->vf_token) {
+ WARN_ON(vdev->vf_token->users);
+ mutex_destroy(&vdev->vf_token->lock);
+ kfree(vdev->vf_token);
+ }
+
+ if (vdev->nb.notifier_call)
+ bus_unregister_notifier(&pci_bus_type, &vdev->nb);
+
vfio_pci_reflck_put(vdev->reflck);
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
@@ -1427,16 +1737,48 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
+static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
+{
+ struct vfio_pci_device *vdev;
+ struct vfio_device *device;
+ int ret = 0;
+
+ might_sleep();
+
+ if (!enable_sriov)
+ return -ENOENT;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ vdev = vfio_device_data(device);
+ if (!vdev) {
+ vfio_device_put(device);
+ return -ENODEV;
+ }
+
+ if (nr_virtfn == 0)
+ pci_disable_sriov(pdev);
+ else
+ ret = pci_enable_sriov(pdev, nr_virtfn);
+
+ vfio_device_put(device);
+
+ return ret < 0 ? ret : nr_virtfn;
+}
+
static const struct pci_error_handlers vfio_err_handlers = {
.error_detected = vfio_pci_aer_err_detected,
};
static struct pci_driver vfio_pci_driver = {
- .name = "vfio-pci",
- .id_table = NULL, /* only dynamic ids */
- .probe = vfio_pci_probe,
- .remove = vfio_pci_remove,
- .err_handler = &vfio_err_handlers,
+ .name = "vfio-pci",
+ .id_table = NULL, /* only dynamic ids */
+ .probe = vfio_pci_probe,
+ .remove = vfio_pci_remove,
+ .sriov_configure = vfio_pci_sriov_configure,
+ .err_handler = &vfio_err_handlers,
};
static DEFINE_MUTEX(reflck_lock);
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index df4d96038cd4..ed20d73cc27c 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -422,8 +422,14 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
&mmio_atsd)) {
- dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
- mmio_atsd = 0;
+ if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0,
+ &mmio_atsd)) {
+ dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
+ mmio_atsd = 0;
+ } else {
+ dev_warn(&vdev->pdev->dev,
+ "Using fallback ibm,mmio-atsd[0] for ATSD.\n");
+ }
}
if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 8a2c7607d513..36ec69081ecd 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -12,6 +12,8 @@
#include <linux/pci.h>
#include <linux/irqbypass.h>
#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/notifier.h>
#ifndef VFIO_PCI_PRIVATE_H
#define VFIO_PCI_PRIVATE_H
@@ -84,6 +86,12 @@ struct vfio_pci_reflck {
struct mutex lock;
};
+struct vfio_pci_vf_token {
+ struct mutex lock;
+ uuid_t uuid;
+ int users;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_NUM_BARS];
@@ -122,6 +130,8 @@ struct vfio_pci_device {
struct list_head dummy_resources_list;
struct mutex ioeventfds_lock;
struct list_head ioeventfds_list;
+ struct vfio_pci_vf_token *vf_token;
+ struct notifier_block nb;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index ae1a5eb98620..1e2769010089 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -44,7 +44,7 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i)
{
struct platform_device *pdev = (struct platform_device *) vdev->opaque;
- return platform_get_irq(pdev, i);
+ return platform_get_irq_optional(pdev, i);
}
static int vfio_platform_probe(struct platform_device *pdev)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index c8482624ca34..765e0e5d83ed 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -875,11 +875,23 @@ EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
char *buf)
{
- struct vfio_device *it, *device = NULL;
+ struct vfio_device *it, *device = ERR_PTR(-ENODEV);
mutex_lock(&group->device_lock);
list_for_each_entry(it, &group->device_list, group_next) {
- if (!strcmp(dev_name(it->dev), buf)) {
+ int ret;
+
+ if (it->ops->match) {
+ ret = it->ops->match(it->device_data, buf);
+ if (ret < 0) {
+ device = ERR_PTR(ret);
+ break;
+ }
+ } else {
+ ret = !strcmp(dev_name(it->dev), buf);
+ }
+
+ if (ret) {
device = it;
vfio_device_get(device);
break;
@@ -1430,8 +1442,8 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
return -EPERM;
device = vfio_device_get_from_name(group, buf);
- if (!device)
- return -ENODEV;
+ if (IS_ERR(device))
+ return PTR_ERR(device);
ret = device->ops->open(device->device_data);
if (ret) {
@@ -1720,6 +1732,44 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
}
EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
+/**
+ * External user API, exported by symbols to be linked dynamically.
+ * The external user passes in a device pointer
+ * to verify that:
+ * - A VFIO group is assiciated with the device;
+ * - IOMMU is set for the group.
+ * If both checks passed, vfio_group_get_external_user_from_dev()
+ * increments the container user counter to prevent the VFIO group
+ * from disposal before external user exits and returns the pointer
+ * to the VFIO group.
+ *
+ * When the external user finishes using the VFIO group, it calls
+ * vfio_group_put_external_user() to release the VFIO group and
+ * decrement the container user counter.
+ *
+ * @dev [in] : device
+ * Return error PTR or pointer to VFIO group.
+ */
+
+struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev)
+{
+ struct vfio_group *group;
+ int ret;
+
+ group = vfio_group_get_from_dev(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ ret = vfio_group_add_container_user(group);
+ if (ret) {
+ vfio_group_put(group);
+ return ERR_PTR(ret);
+ }
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev);
+
void vfio_group_put_external_user(struct vfio_group *group)
{
vfio_group_try_dissolve_container(group);
@@ -1961,6 +2011,146 @@ err_unpin_pages:
}
EXPORT_SYMBOL(vfio_unpin_pages);
+/*
+ * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
+ * VFIO group.
+ *
+ * The caller needs to call vfio_group_get_external_user() or
+ * vfio_group_get_external_user_from_dev() prior to calling this interface,
+ * so as to prevent the VFIO group from disposal in the middle of the call.
+ * But it can keep the reference to the VFIO group for several calls into
+ * this interface.
+ * After finishing using of the VFIO group, the caller needs to release the
+ * VFIO group by calling vfio_group_put_external_user().
+ *
+ * @group [in] : VFIO group
+ * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned.
+ * @npage [in] : count of elements in user_iova_pfn array.
+ * This count should not be greater
+ * VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @prot [in] : protection flags
+ * @phys_pfn [out] : array of host PFNs
+ * Return error or number of pages pinned.
+ */
+int vfio_group_pin_pages(struct vfio_group *group,
+ unsigned long *user_iova_pfn, int npage,
+ int prot, unsigned long *phys_pfn)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!group || !user_iova_pfn || !phys_pfn || !npage)
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->pin_pages))
+ ret = driver->ops->pin_pages(container->iommu_data,
+ user_iova_pfn, npage,
+ prot, phys_pfn);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_group_pin_pages);
+
+/*
+ * Unpin a set of guest IOVA PFNs for a VFIO group.
+ *
+ * The caller needs to call vfio_group_get_external_user() or
+ * vfio_group_get_external_user_from_dev() prior to calling this interface,
+ * so as to prevent the VFIO group from disposal in the middle of the call.
+ * But it can keep the reference to the VFIO group for several calls into
+ * this interface.
+ * After finishing using of the VFIO group, the caller needs to release the
+ * VFIO group by calling vfio_group_put_external_user().
+ *
+ * @group [in] : vfio group
+ * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned.
+ * @npage [in] : count of elements in user_iova_pfn array.
+ * This count should not be greater than
+ * VFIO_PIN_PAGES_MAX_ENTRIES.
+ * Return error or number of pages unpinned.
+ */
+int vfio_group_unpin_pages(struct vfio_group *group,
+ unsigned long *user_iova_pfn, int npage)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!group || !user_iova_pfn || !npage)
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->unpin_pages))
+ ret = driver->ops->unpin_pages(container->iommu_data,
+ user_iova_pfn, npage);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_group_unpin_pages);
+
+
+/*
+ * This interface allows the CPUs to perform some sort of virtual DMA on
+ * behalf of the device.
+ *
+ * CPUs read/write from/into a range of IOVAs pointing to user space memory
+ * into/from a kernel buffer.
+ *
+ * As the read/write of user space memory is conducted via the CPUs and is
+ * not a real device DMA, it is not necessary to pin the user space memory.
+ *
+ * The caller needs to call vfio_group_get_external_user() or
+ * vfio_group_get_external_user_from_dev() prior to calling this interface,
+ * so as to prevent the VFIO group from disposal in the middle of the call.
+ * But it can keep the reference to the VFIO group for several calls into
+ * this interface.
+ * After finishing using of the VFIO group, the caller needs to release the
+ * VFIO group by calling vfio_group_put_external_user().
+ *
+ * @group [in] : VFIO group
+ * @user_iova [in] : base IOVA of a user space buffer
+ * @data [in] : pointer to kernel buffer
+ * @len [in] : kernel buffer length
+ * @write : indicate read or write
+ * Return error code on failure or 0 on success.
+ */
+int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
+ void *data, size_t len, bool write)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret = 0;
+
+ if (!group || !data || len <= 0)
+ return -EINVAL;
+
+ container = group->container;
+ driver = container->iommu_driver;
+
+ if (likely(driver && driver->ops->dma_rw))
+ ret = driver->ops->dma_rw(container->iommu_data,
+ user_iova, data, len, write);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_dma_rw);
+
static int vfio_register_iommu_notifier(struct vfio_group *group,
unsigned long *events,
struct notifier_block *nb)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a177bf2c6683..cc1d64765ce7 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -27,6 +27,7 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/rbtree.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
@@ -341,8 +342,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
- *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- if (is_invalid_reserved_pfn(*pfn))
+ if (!follow_pfn(vma, vaddr, pfn) &&
+ is_invalid_reserved_pfn(*pfn))
ret = 0;
}
done:
@@ -554,7 +555,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
continue;
}
- remote_vaddr = dma->vaddr + iova - dma->iova;
+ remote_vaddr = dma->vaddr + (iova - dma->iova);
ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
do_accounting);
if (ret)
@@ -1786,7 +1787,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (resv_msi) {
ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
- if (ret)
+ if (ret && ret != -ENODEV)
goto out_detach;
}
@@ -2305,6 +2306,80 @@ static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
return blocking_notifier_chain_unregister(&iommu->notifier, nb);
}
+static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
+ dma_addr_t user_iova, void *data,
+ size_t count, bool write,
+ size_t *copied)
+{
+ struct mm_struct *mm;
+ unsigned long vaddr;
+ struct vfio_dma *dma;
+ bool kthread = current->mm == NULL;
+ size_t offset;
+
+ *copied = 0;
+
+ dma = vfio_find_dma(iommu, user_iova, 1);
+ if (!dma)
+ return -EINVAL;
+
+ if ((write && !(dma->prot & IOMMU_WRITE)) ||
+ !(dma->prot & IOMMU_READ))
+ return -EPERM;
+
+ mm = get_task_mm(dma->task);
+
+ if (!mm)
+ return -EPERM;
+
+ if (kthread)
+ use_mm(mm);
+ else if (current->mm != mm)
+ goto out;
+
+ offset = user_iova - dma->iova;
+
+ if (count > dma->size - offset)
+ count = dma->size - offset;
+
+ vaddr = dma->vaddr + offset;
+
+ if (write)
+ *copied = copy_to_user((void __user *)vaddr, data,
+ count) ? 0 : count;
+ else
+ *copied = copy_from_user(data, (void __user *)vaddr,
+ count) ? 0 : count;
+ if (kthread)
+ unuse_mm(mm);
+out:
+ mmput(mm);
+ return *copied ? 0 : -EFAULT;
+}
+
+static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
+ void *data, size_t count, bool write)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ int ret = 0;
+ size_t done;
+
+ mutex_lock(&iommu->lock);
+ while (count > 0) {
+ ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
+ count, write, &done);
+ if (ret)
+ break;
+
+ count -= done;
+ data += done;
+ user_iova += done;
+ }
+
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1",
.owner = THIS_MODULE,
@@ -2317,6 +2392,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.unpin_pages = vfio_iommu_type1_unpin_pages,
.register_notifier = vfio_iommu_type1_register_notifier,
.unregister_notifier = vfio_iommu_type1_unregister_notifier,
+ .dma_rw = vfio_iommu_type1_dma_rw,
};
static int __init vfio_iommu_type1_init(void)
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 3d03ccbd1adc..c4f273793595 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,7 +1,43 @@
# SPDX-License-Identifier: GPL-2.0-only
+config VHOST_IOTLB
+ tristate
+ help
+ Generic IOTLB implementation for vhost and vringh.
+ This option is selected by any driver which needs to support
+ an IOMMU in software.
+
+config VHOST_RING
+ tristate
+ select VHOST_IOTLB
+ help
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
+
+config VHOST_DPN
+ bool
+ depends on !ARM || AEABI
+ default y
+ help
+ Anything selecting VHOST or VHOST_RING must depend on VHOST_DPN.
+ This excludes the deprecated ARM ABI since that forces a 4 byte
+ alignment on all structs - incompatible with virtio spec requirements.
+
+config VHOST
+ tristate
+ select VHOST_IOTLB
+ help
+ This option is selected by any driver which needs to access
+ the core of vhost.
+
+menuconfig VHOST_MENU
+ bool "VHOST drivers"
+ default y
+
+if VHOST_MENU
+
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
- depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
+ depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) && VHOST_DPN
select VHOST
---help---
This kernel module can be loaded in host kernel to accelerate
@@ -13,7 +49,7 @@ config VHOST_NET
config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
- depends on TARGET_CORE && EVENTFD
+ depends on TARGET_CORE && EVENTFD && VHOST_DPN
select VHOST
default n
---help---
@@ -22,9 +58,9 @@ config VHOST_SCSI
config VHOST_VSOCK
tristate "vhost virtio-vsock driver"
- depends on VSOCKETS && EVENTFD
- select VIRTIO_VSOCKETS_COMMON
+ depends on VSOCKETS && EVENTFD && VHOST_DPN
select VHOST
+ select VIRTIO_VSOCKETS_COMMON
default n
---help---
This kernel module can be loaded in the host kernel to provide AF_VSOCK
@@ -34,11 +70,17 @@ config VHOST_VSOCK
To compile this driver as a module, choose M here: the module will be called
vhost_vsock.
-config VHOST
- tristate
- ---help---
- This option is selected by any driver which needs to access
- the core of vhost.
+config VHOST_VDPA
+ tristate "Vhost driver for vDPA-based backend"
+ depends on EVENTFD && VHOST_DPN
+ select VHOST
+ depends on VDPA
+ help
+ This kernel module can be loaded in host kernel to accelerate
+ guest virtio devices with the vDPA-based backends.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vhost_vdpa.
config VHOST_CROSS_ENDIAN_LEGACY
bool "Cross-endian support for vhost"
@@ -54,3 +96,5 @@ config VHOST_CROSS_ENDIAN_LEGACY
adds some overhead, it is disabled by default.
If unsure, say "N".
+
+endif
diff --git a/drivers/vhost/Kconfig.vringh b/drivers/vhost/Kconfig.vringh
deleted file mode 100644
index c1fe36a9b8d4..000000000000
--- a/drivers/vhost/Kconfig.vringh
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config VHOST_RING
- tristate
- ---help---
- This option is selected by any driver which needs to access
- the host side of a virtio ring.
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 6c6df24f770c..f3e1897cce85 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -10,4 +10,10 @@ vhost_vsock-y := vsock.o
obj-$(CONFIG_VHOST_RING) += vringh.o
+obj-$(CONFIG_VHOST_VDPA) += vhost_vdpa.o
+vhost_vdpa-y := vdpa.o
+
obj-$(CONFIG_VHOST) += vhost.o
+
+obj-$(CONFIG_VHOST_IOTLB) += vhost_iotlb.o
+vhost_iotlb-y := iotlb.o
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
new file mode 100644
index 000000000000..1f0ca6e44410
--- /dev/null
+++ b/drivers/vhost/iotlb.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Red Hat, Inc.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ * IOTLB implementation for vhost.
+ */
+#include <linux/slab.h>
+#include <linux/vhost_iotlb.h>
+#include <linux/module.h>
+
+#define MOD_VERSION "0.1"
+#define MOD_DESC "VHOST IOTLB"
+#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define MOD_LICENSE "GPL v2"
+
+#define START(map) ((map)->start)
+#define LAST(map) ((map)->last)
+
+INTERVAL_TREE_DEFINE(struct vhost_iotlb_map,
+ rb, __u64, __subtree_last,
+ START, LAST, static inline, vhost_iotlb_itree);
+
+/**
+ * vhost_iotlb_map_free - remove a map node and free it
+ * @iotlb: the IOTLB
+ * @map: the map that want to be remove and freed
+ */
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map)
+{
+ vhost_iotlb_itree_remove(map, &iotlb->root);
+ list_del(&map->link);
+ kfree(map);
+ iotlb->nmaps--;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
+
+/**
+ * vhost_iotlb_add_range - add a new range to vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ * @addr: the address that is mapped to @start
+ * @perm: access permission of this range
+ *
+ * Returns an error last is smaller than start or memory allocation
+ * fails
+ */
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm)
+{
+ struct vhost_iotlb_map *map;
+
+ if (last < start)
+ return -EFAULT;
+
+ if (iotlb->limit &&
+ iotlb->nmaps == iotlb->limit &&
+ iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
+ map = list_first_entry(&iotlb->list, typeof(*map), link);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+
+ map = kmalloc(sizeof(*map), GFP_ATOMIC);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = start;
+ map->size = last - start + 1;
+ map->last = last;
+ map->addr = addr;
+ map->perm = perm;
+
+ iotlb->nmaps++;
+ vhost_iotlb_itree_insert(map, &iotlb->root);
+
+ INIT_LIST_HEAD(&map->link);
+ list_add_tail(&map->link, &iotlb->list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
+
+/**
+ * vring_iotlb_del_range - delete overlapped ranges from vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ */
+void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ struct vhost_iotlb_map *map;
+
+ while ((map = vhost_iotlb_itree_iter_first(&iotlb->root,
+ start, last)))
+ vhost_iotlb_map_free(iotlb, map);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
+
+/**
+ * vhost_iotlb_alloc - add a new vhost IOTLB
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ *
+ * Returns an error is memory allocation fails
+ */
+struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
+{
+ struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL);
+
+ if (!iotlb)
+ return NULL;
+
+ iotlb->root = RB_ROOT_CACHED;
+ iotlb->limit = limit;
+ iotlb->nmaps = 0;
+ iotlb->flags = flags;
+ INIT_LIST_HEAD(&iotlb->list);
+
+ return iotlb;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_alloc);
+
+/**
+ * vhost_iotlb_reset - reset vhost IOTLB (free all IOTLB entries)
+ * @iotlb: the IOTLB to be reset
+ */
+void vhost_iotlb_reset(struct vhost_iotlb *iotlb)
+{
+ vhost_iotlb_del_range(iotlb, 0ULL, 0ULL - 1);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_reset);
+
+/**
+ * vhost_iotlb_free - reset and free vhost IOTLB
+ * @iotlb: the IOTLB to be freed
+ */
+void vhost_iotlb_free(struct vhost_iotlb *iotlb)
+{
+ if (iotlb) {
+ vhost_iotlb_reset(iotlb);
+ kfree(iotlb);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_free);
+
+/**
+ * vhost_iotlb_itree_first - return the first overlapped range
+ * @iotlb: the IOTLB
+ * @start: start of IOVA range
+ * @end: end of IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_first(&iotlb->root, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first);
+
+/**
+ * vhost_iotlb_itree_first - return the next overlapped range
+ * @iotlb: the IOTLB
+ * @start: start of IOVA range
+ * @end: end of IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_next(map, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_next);
+
+MODULE_VERSION(MOD_VERSION);
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_LICENSE(MOD_LICENSE);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 18e205eeb9af..2927f02cc7e1 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
return;
vhost_poll_stop(poll);
}
@@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
if (!sock)
return 0;
@@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
return;
vhost_disable_notify(&net->dev, vq);
- sock = rvq->private_data;
+ sock = vhost_vq_get_backend(rvq);
busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
tvq->busyloop_timeout;
@@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */
- if (!vhost_sock_zcopy(tvq->private_data))
- vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
+ if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
+ vhost_tx_batch(net, tnvq,
+ vhost_vq_get_backend(tvq),
+ msghdr);
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
@@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
- struct socket *sock = vq->private_data;
+ struct socket *sock = vhost_vq_get_backend(vq);
struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
@@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
struct socket *sock;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
if (!sock)
goto out;
@@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
int recv_pkts = 0;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
if (!sock)
goto out;
@@ -1324,7 +1326,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
UIO_MAXIOV + VHOST_NET_BATCH,
- VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT,
+ NULL);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
@@ -1344,9 +1347,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
vhost_net_disable_vq(n, vq);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
vhost_net_buf_unproduce(nvq);
nvq->rx_ring = NULL;
mutex_unlock(&vq->mutex);
@@ -1520,7 +1523,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
- oldsock = vq->private_data;
+ oldsock = vhost_vq_get_backend(vq);
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
@@ -1530,7 +1533,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
vhost_net_disable_vq(n, vq);
- vq->private_data = sock;
+ vhost_vq_set_backend(vq, sock);
vhost_net_buf_unproduce(nvq);
r = vhost_vq_init_access(vq);
if (r)
@@ -1567,7 +1570,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0;
err_used:
- vq->private_data = oldsock;
+ vhost_vq_set_backend(vq, oldsock);
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
@@ -1586,7 +1589,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
- struct vhost_umem *umem;
+ struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0b949a14bce3..c39952243fd3 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -452,7 +452,7 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
unsigned out, in;
int head, ret;
- if (!vq->private_data) {
+ if (!vhost_vq_get_backend(vq)) {
vs->vs_events_missed = true;
return;
}
@@ -892,7 +892,7 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
} else {
struct vhost_scsi_tpg **vs_tpg, *tpg;
- vs_tpg = vq->private_data; /* validated at handler entry */
+ vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
tpg = READ_ONCE(vs_tpg[*vc->target]);
if (unlikely(!tpg)) {
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
*/
- vs_tpg = vq->private_data;
+ vs_tpg = vhost_vq_get_backend(vq);
if (!vs_tpg)
goto out;
@@ -1184,7 +1184,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
*/
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
memset(&vc, 0, sizeof(vc));
@@ -1322,7 +1322,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
if (vs->vs_events_missed)
@@ -1460,7 +1460,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
- vq->private_data = vs_tpg;
+ vhost_vq_set_backend(vq, vs_tpg);
vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
@@ -1547,7 +1547,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
}
@@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
- VHOST_SCSI_WEIGHT, 0);
+ VHOST_SCSI_WEIGHT, 0, NULL);
vhost_scsi_init_inflight(vs, NULL);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index e37c92d4d7ad..9a3a09005e03 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
void *private;
mutex_lock(&vq->mutex);
- private = vq->private_data;
+ private = vhost_vq_get_backend(vq);
if (!private) {
mutex_unlock(&vq->mutex);
return;
@@ -120,7 +120,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
- VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
+ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL);
f->private_data = n;
@@ -133,8 +133,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n,
void *private;
mutex_lock(&vq->mutex);
- private = vq->private_data;
- vq->private_data = NULL;
+ private = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
return private;
}
@@ -198,8 +198,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL;
/* start polling new socket */
- oldpriv = vq->private_data;
- vq->private_data = priv;
+ oldpriv = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, priv);
r = vhost_vq_init_access(&n->vqs[index]);
@@ -225,7 +225,7 @@ static long vhost_test_reset_owner(struct vhost_test *n)
{
void *priv = NULL;
long err;
- struct vhost_umem *umem;
+ struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
new file mode 100644
index 000000000000..0968361e3b77
--- /dev/null
+++ b/drivers/vhost/vdpa.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Intel Corporation.
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Author: Tiwei Bie <tiwei.bie@intel.com>
+ * Jason Wang <jasowang@redhat.com>
+ *
+ * Thanks Michael S. Tsirkin for the valuable comments and
+ * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
+ * their supports.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/uuid.h>
+#include <linux/vdpa.h>
+#include <linux/nospec.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+
+#include "vhost.h"
+
+enum {
+ VHOST_VDPA_FEATURES =
+ (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
+ (1ULL << VIRTIO_F_RING_PACKED) |
+ (1ULL << VIRTIO_F_ORDER_PLATFORM) |
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX),
+
+ VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
+ (1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+ (1ULL << VIRTIO_NET_F_MTU) |
+ (1ULL << VIRTIO_NET_F_MAC) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) |
+ (1ULL << VIRTIO_NET_F_GUEST_UFO) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO) |
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_NET_F_STATUS) |
+ (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
+};
+
+/* Currently, only network backend w/o multiqueue is supported. */
+#define VHOST_VDPA_VQ_MAX 2
+
+#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
+
+struct vhost_vdpa {
+ struct vhost_dev vdev;
+ struct iommu_domain *domain;
+ struct vhost_virtqueue *vqs;
+ struct completion completion;
+ struct vdpa_device *vdpa;
+ struct device dev;
+ struct cdev cdev;
+ atomic_t opened;
+ int nvqs;
+ int virtio_id;
+ int minor;
+};
+
+static DEFINE_IDA(vhost_vdpa_ida);
+
+static dev_t vhost_vdpa_major;
+
+static const u64 vhost_vdpa_features[] = {
+ [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
+};
+
+static void handle_vq_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+
+ ops->kick_vq(v->vdpa, vq - v->vqs);
+}
+
+static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
+{
+ struct vhost_virtqueue *vq = private;
+ struct eventfd_ctx *call_ctx = vq->call_ctx;
+
+ if (call_ctx)
+ eventfd_signal(call_ctx, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void vhost_vdpa_reset(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->set_status(vdpa, 0);
+}
+
+static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 device_id;
+
+ device_id = ops->get_device_id(vdpa);
+
+ if (copy_to_user(argp, &device_id, sizeof(device_id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status;
+
+ status = ops->get_status(vdpa);
+
+ if (copy_to_user(statusp, &status, sizeof(status)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status;
+
+ if (copy_from_user(&status, statusp, sizeof(status)))
+ return -EFAULT;
+
+ /*
+ * Userspace shouldn't remove status bits unless reset the
+ * status to 0.
+ */
+ if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
+ return -EINVAL;
+
+ ops->set_status(vdpa, status);
+
+ return 0;
+}
+
+static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
+ struct vhost_vdpa_config *c)
+{
+ long size = 0;
+
+ switch (v->virtio_id) {
+ case VIRTIO_ID_NET:
+ size = sizeof(struct virtio_net_config);
+ break;
+ }
+
+ if (c->len == 0)
+ return -EINVAL;
+
+ if (c->len > size - c->off)
+ return -E2BIG;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+ buf = kvzalloc(config.len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ops->get_config(vdpa, config.off, buf, config.len);
+
+ if (copy_to_user(c->buf, buf, config.len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ kvfree(buf);
+ return 0;
+}
+
+static long vhost_vdpa_set_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+ buf = kvzalloc(config.len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, c->buf, config.len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ ops->set_config(vdpa, config.off, buf, config.len);
+
+ kvfree(buf);
+ return 0;
+}
+
+static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u64 features;
+
+ features = ops->get_features(vdpa);
+ features &= vhost_vdpa_features[v->virtio_id];
+
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u64 features;
+
+ /*
+ * It's not allowed to change the features after they have
+ * been negotiated.
+ */
+ if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
+ return -EBUSY;
+
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+
+ if (features & ~vhost_vdpa_features[v->virtio_id])
+ return -EINVAL;
+
+ if (ops->set_features(vdpa, features))
+ return -EINVAL;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u16 num;
+
+ num = ops->get_vq_num_max(vdpa);
+
+ if (copy_to_user(argp, &num, sizeof(num)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ void __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_callback cb;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, (u32 __user *)argp);
+ if (r < 0)
+ return r;
+
+ if (idx >= v->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, v->nvqs);
+ vq = &v->vqs[idx];
+
+ if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ ops->set_vq_ready(vdpa, idx, s.num);
+ return 0;
+ }
+
+ if (cmd == VHOST_GET_VRING_BASE)
+ vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
+
+ r = vhost_vring_ioctl(&v->vdev, cmd, argp);
+ if (r)
+ return r;
+
+ switch (cmd) {
+ case VHOST_SET_VRING_ADDR:
+ if (ops->set_vq_address(vdpa, idx,
+ (u64)(uintptr_t)vq->desc,
+ (u64)(uintptr_t)vq->avail,
+ (u64)(uintptr_t)vq->used))
+ r = -EINVAL;
+ break;
+
+ case VHOST_SET_VRING_BASE:
+ if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
+ r = -EINVAL;
+ break;
+
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx) {
+ cb.callback = vhost_vdpa_virtqueue_cb;
+ cb.private = vq;
+ } else {
+ cb.callback = NULL;
+ cb.private = NULL;
+ }
+ ops->set_vq_cb(vdpa, idx, &cb);
+ break;
+
+ case VHOST_SET_VRING_NUM:
+ ops->set_vq_num(vdpa, idx, vq->num);
+ break;
+ }
+
+ return r;
+}
+
+static long vhost_vdpa_unlocked_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+ void __user *argp = (void __user *)arg;
+ long r;
+
+ mutex_lock(&d->mutex);
+
+ switch (cmd) {
+ case VHOST_VDPA_GET_DEVICE_ID:
+ r = vhost_vdpa_get_device_id(v, argp);
+ break;
+ case VHOST_VDPA_GET_STATUS:
+ r = vhost_vdpa_get_status(v, argp);
+ break;
+ case VHOST_VDPA_SET_STATUS:
+ r = vhost_vdpa_set_status(v, argp);
+ break;
+ case VHOST_VDPA_GET_CONFIG:
+ r = vhost_vdpa_get_config(v, argp);
+ break;
+ case VHOST_VDPA_SET_CONFIG:
+ r = vhost_vdpa_set_config(v, argp);
+ break;
+ case VHOST_GET_FEATURES:
+ r = vhost_vdpa_get_features(v, argp);
+ break;
+ case VHOST_SET_FEATURES:
+ r = vhost_vdpa_set_features(v, argp);
+ break;
+ case VHOST_VDPA_GET_VRING_NUM:
+ r = vhost_vdpa_get_vring_num(v, argp);
+ break;
+ case VHOST_SET_LOG_BASE:
+ case VHOST_SET_LOG_FD:
+ r = -ENOIOCTLCMD;
+ break;
+ default:
+ r = vhost_dev_ioctl(&v->vdev, cmd, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vdpa_vring_ioctl(v, cmd, argp);
+ break;
+ }
+
+ mutex_unlock(&d->mutex);
+ return r;
+}
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct vhost_iotlb_map *map;
+ struct page *page;
+ unsigned long pfn, pinned;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ pinned = map->size >> PAGE_SHIFT;
+ for (pfn = map->addr >> PAGE_SHIFT;
+ pinned > 0; pfn++, pinned--) {
+ page = pfn_to_page(pfn);
+ if (map->perm & VHOST_ACCESS_WO)
+ set_page_dirty_lock(page);
+ unpin_user_page(page);
+ }
+ atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
+{
+ struct vhost_dev *dev = &v->vdev;
+
+ vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
+ kfree(dev->iotlb);
+ dev->iotlb = NULL;
+}
+
+static int perm_to_iommu_flags(u32 perm)
+{
+ int flags = 0;
+
+ switch (perm) {
+ case VHOST_ACCESS_WO:
+ flags |= IOMMU_WRITE;
+ break;
+ case VHOST_ACCESS_RO:
+ flags |= IOMMU_READ;
+ break;
+ case VHOST_ACCESS_RW:
+ flags |= (IOMMU_WRITE | IOMMU_READ);
+ break;
+ default:
+ WARN(1, "invalidate vhost IOTLB permission\n");
+ break;
+ }
+
+ return flags | IOMMU_CACHE;
+}
+
+static int vhost_vdpa_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 pa, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ int r = 0;
+
+ r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
+ pa, perm);
+ if (r)
+ return r;
+
+ if (ops->dma_map)
+ r = ops->dma_map(vdpa, iova, size, pa, perm);
+ else if (ops->set_map)
+ r = ops->set_map(vdpa, dev->iotlb);
+ else
+ r = iommu_map(v->domain, iova, pa, size,
+ perm_to_iommu_flags(perm));
+
+ return r;
+}
+
+static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
+
+ if (ops->dma_map)
+ ops->dma_unmap(vdpa, iova, size);
+ else if (ops->set_map)
+ ops->set_map(vdpa, dev->iotlb);
+ else
+ iommu_unmap(v->domain, iova, size);
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct page **page_list;
+ unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ unsigned int gup_flags = FOLL_LONGTERM;
+ unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+ unsigned long locked, lock_limit, pinned, i;
+ u64 iova = msg->iova;
+ int ret = 0;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ if (msg->perm & VHOST_ACCESS_WO)
+ gup_flags |= FOLL_WRITE;
+
+ npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+ if (!npages)
+ return -EINVAL;
+
+ down_read(&dev->mm->mmap_sem);
+
+ locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (locked > lock_limit) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cur_base = msg->uaddr & PAGE_MASK;
+ iova &= PAGE_MASK;
+
+ while (npages) {
+ pinned = min_t(unsigned long, npages, list_size);
+ ret = pin_user_pages(cur_base, pinned,
+ gup_flags, page_list, NULL);
+ if (ret != pinned)
+ goto out;
+
+ if (!last_pfn)
+ map_pfn = page_to_pfn(page_list[0]);
+
+ for (i = 0; i < ret; i++) {
+ unsigned long this_pfn = page_to_pfn(page_list[i]);
+ u64 csize;
+
+ if (last_pfn && (this_pfn != last_pfn + 1)) {
+ /* Pin a contiguous chunk of memory */
+ csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ if (vhost_vdpa_map(v, iova, csize,
+ map_pfn << PAGE_SHIFT,
+ msg->perm))
+ goto out;
+ map_pfn = this_pfn;
+ iova += csize;
+ }
+
+ last_pfn = this_pfn;
+ }
+
+ cur_base += ret << PAGE_SHIFT;
+ npages -= ret;
+ }
+
+ /* Pin the rest chunk */
+ ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
+ map_pfn << PAGE_SHIFT, msg->perm);
+out:
+ if (ret) {
+ vhost_vdpa_unmap(v, msg->iova, msg->size);
+ atomic64_sub(npages, &dev->mm->pinned_vm);
+ }
+ up_read(&dev->mm->mmap_sem);
+ free_page((unsigned long)page_list);
+ return ret;
+}
+
+static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+ int r = 0;
+
+ r = vhost_dev_check_owner(dev);
+ if (r)
+ return r;
+
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ r = vhost_vdpa_process_iotlb_update(v, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ vhost_vdpa_unmap(v, msg->iova, msg->size);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
+static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vdpa *v = file->private_data;
+ struct vhost_dev *dev = &v->vdev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+ struct bus_type *bus;
+ int ret;
+
+ /* Device want to do DMA by itself */
+ if (ops->set_map || ops->dma_map)
+ return 0;
+
+ bus = dma_dev->bus;
+ if (!bus)
+ return -EFAULT;
+
+ if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
+ return -ENOTSUPP;
+
+ v->domain = iommu_domain_alloc(bus);
+ if (!v->domain)
+ return -EIO;
+
+ ret = iommu_attach_device(v->domain, dma_dev);
+ if (ret)
+ goto err_attach;
+
+ return 0;
+
+err_attach:
+ iommu_domain_free(v->domain);
+ return ret;
+}
+
+static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+
+ if (v->domain) {
+ iommu_detach_device(v->domain, dma_dev);
+ iommu_domain_free(v->domain);
+ }
+
+ v->domain = NULL;
+}
+
+static int vhost_vdpa_open(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v;
+ struct vhost_dev *dev;
+ struct vhost_virtqueue **vqs;
+ int nvqs, i, r, opened;
+
+ v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
+
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (opened)
+ return -EBUSY;
+
+ nvqs = v->nvqs;
+ vhost_vdpa_reset(v);
+
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ dev = &v->vdev;
+ for (i = 0; i < nvqs; i++) {
+ vqs[i] = &v->vqs[i];
+ vqs[i]->handle_kick = handle_vq_kick;
+ }
+ vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
+ vhost_vdpa_process_iotlb_msg);
+
+ dev->iotlb = vhost_iotlb_alloc(0, 0);
+ if (!dev->iotlb) {
+ r = -ENOMEM;
+ goto err_init_iotlb;
+ }
+
+ r = vhost_vdpa_alloc_domain(v);
+ if (r)
+ goto err_init_iotlb;
+
+ filep->private_data = v;
+
+ return 0;
+
+err_init_iotlb:
+ vhost_dev_cleanup(&v->vdev);
+err:
+ atomic_dec(&v->opened);
+ return r;
+}
+
+static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+
+ mutex_lock(&d->mutex);
+ filep->private_data = NULL;
+ vhost_vdpa_reset(v);
+ vhost_dev_stop(&v->vdev);
+ vhost_vdpa_iotlb_free(v);
+ vhost_vdpa_free_domain(v);
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
+ mutex_unlock(&d->mutex);
+
+ atomic_dec(&v->opened);
+ complete(&v->completion);
+
+ return 0;
+}
+
+static const struct file_operations vhost_vdpa_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vdpa_open,
+ .release = vhost_vdpa_release,
+ .write_iter = vhost_vdpa_chr_write_iter,
+ .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static void vhost_vdpa_release_dev(struct device *device)
+{
+ struct vhost_vdpa *v =
+ container_of(device, struct vhost_vdpa, dev);
+
+ ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ kfree(v->vqs);
+ kfree(v);
+}
+
+static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+{
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa *v;
+ int minor, nvqs = VHOST_VDPA_VQ_MAX;
+ int r;
+
+ /* Currently, we only accept the network devices. */
+ if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
+ return -ENOTSUPP;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!v)
+ return -ENOMEM;
+
+ minor = ida_simple_get(&vhost_vdpa_ida, 0,
+ VHOST_VDPA_DEV_MAX, GFP_KERNEL);
+ if (minor < 0) {
+ kfree(v);
+ return minor;
+ }
+
+ atomic_set(&v->opened, 0);
+ v->minor = minor;
+ v->vdpa = vdpa;
+ v->nvqs = nvqs;
+ v->virtio_id = ops->get_device_id(vdpa);
+
+ device_initialize(&v->dev);
+ v->dev.release = vhost_vdpa_release_dev;
+ v->dev.parent = &vdpa->dev;
+ v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
+ v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
+ GFP_KERNEL);
+ if (!v->vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
+ if (r)
+ goto err;
+
+ cdev_init(&v->cdev, &vhost_vdpa_fops);
+ v->cdev.owner = THIS_MODULE;
+
+ r = cdev_device_add(&v->cdev, &v->dev);
+ if (r)
+ goto err;
+
+ init_completion(&v->completion);
+ vdpa_set_drvdata(vdpa, v);
+
+ return 0;
+
+err:
+ put_device(&v->dev);
+ return r;
+}
+
+static void vhost_vdpa_remove(struct vdpa_device *vdpa)
+{
+ struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
+ int opened;
+
+ cdev_device_del(&v->cdev, &v->dev);
+
+ do {
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (!opened)
+ break;
+ wait_for_completion(&v->completion);
+ } while (1);
+
+ put_device(&v->dev);
+}
+
+static struct vdpa_driver vhost_vdpa_driver = {
+ .driver = {
+ .name = "vhost_vdpa",
+ },
+ .probe = vhost_vdpa_probe,
+ .remove = vhost_vdpa_remove,
+};
+
+static int __init vhost_vdpa_init(void)
+{
+ int r;
+
+ r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
+ "vhost-vdpa");
+ if (r)
+ goto err_alloc_chrdev;
+
+ r = vdpa_register_driver(&vhost_vdpa_driver);
+ if (r)
+ goto err_vdpa_register_driver;
+
+ return 0;
+
+err_vdpa_register_driver:
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+err_alloc_chrdev:
+ return r;
+}
+module_init(vhost_vdpa_init);
+
+static void __exit vhost_vdpa_exit(void)
+{
+ vdpa_unregister_driver(&vhost_vdpa_driver);
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+}
+module_exit(vhost_vdpa_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f44340b41494..21a59b598ed8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -50,10 +50,6 @@ enum {
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
-INTERVAL_TREE_DEFINE(struct vhost_umem_node,
- rb, __u64, __subtree_last,
- START, LAST, static inline, vhost_umem_interval_tree);
-
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
@@ -457,7 +453,9 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
- int iov_limit, int weight, int byte_weight)
+ int iov_limit, int weight, int byte_weight,
+ int (*msg_handler)(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg))
{
struct vhost_virtqueue *vq;
int i;
@@ -473,6 +471,7 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->iov_limit = iov_limit;
dev->weight = weight;
dev->byte_weight = byte_weight;
+ dev->msg_handler = msg_handler;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
@@ -581,21 +580,25 @@ err_mm:
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
-struct vhost_umem *vhost_dev_reset_owner_prepare(void)
+static struct vhost_iotlb *iotlb_alloc(void)
+{
+ return vhost_iotlb_alloc(max_iotlb_entries,
+ VHOST_IOTLB_FLAG_RETIRE);
+}
+
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
{
- return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
+ return iotlb_alloc();
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
-void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
{
int i;
vhost_dev_cleanup(dev);
- /* Restore memory to default empty mapping. */
- INIT_LIST_HEAD(&umem->umem_list);
dev->umem = umem;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
@@ -618,28 +621,6 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
-static void vhost_umem_free(struct vhost_umem *umem,
- struct vhost_umem_node *node)
-{
- vhost_umem_interval_tree_remove(node, &umem->umem_tree);
- list_del(&node->link);
- kfree(node);
- umem->numem--;
-}
-
-static void vhost_umem_clean(struct vhost_umem *umem)
-{
- struct vhost_umem_node *node, *tmp;
-
- if (!umem)
- return;
-
- list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
- vhost_umem_free(umem, node);
-
- kvfree(umem);
-}
-
static void vhost_clear_msg(struct vhost_dev *dev)
{
struct vhost_msg_node *node, *n;
@@ -677,9 +658,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
/* No one will access memory at this point */
- vhost_umem_clean(dev->umem);
+ vhost_iotlb_free(dev->umem);
dev->umem = NULL;
- vhost_umem_clean(dev->iotlb);
+ vhost_iotlb_free(dev->iotlb);
dev->iotlb = NULL;
vhost_clear_msg(dev);
wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
@@ -715,27 +696,26 @@ static bool vhost_overflow(u64 uaddr, u64 size)
}
/* Caller should have vq mutex and device mutex. */
-static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
+static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
int log_all)
{
- struct vhost_umem_node *node;
+ struct vhost_iotlb_map *map;
if (!umem)
return false;
- list_for_each_entry(node, &umem->umem_list, link) {
- unsigned long a = node->userspace_addr;
+ list_for_each_entry(map, &umem->list, link) {
+ unsigned long a = map->addr;
- if (vhost_overflow(node->userspace_addr, node->size))
+ if (vhost_overflow(map->addr, map->size))
return false;
- if (!access_ok((void __user *)a,
- node->size))
+ if (!access_ok((void __user *)a, map->size))
return false;
else if (log_all && !log_access_ok(log_base,
- node->start,
- node->size))
+ map->start,
+ map->size))
return false;
}
return true;
@@ -745,17 +725,17 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
u64 addr, unsigned int size,
int type)
{
- const struct vhost_umem_node *node = vq->meta_iotlb[type];
+ const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
- if (!node)
+ if (!map)
return NULL;
- return (void *)(uintptr_t)(node->userspace_addr + addr - node->start);
+ return (void __user *)(uintptr_t)(map->addr + addr - map->start);
}
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
-static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
+static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
int log_all)
{
int i;
@@ -889,7 +869,7 @@ static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
* not happen in this case.
*/
static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
- void *addr, unsigned int size,
+ void __user *addr, unsigned int size,
int type)
{
void __user *uaddr = vhost_vq_meta_fetch(vq,
@@ -1020,47 +1000,6 @@ static inline int vhost_get_desc(struct vhost_virtqueue *vq,
return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
}
-static int vhost_new_umem_range(struct vhost_umem *umem,
- u64 start, u64 size, u64 end,
- u64 userspace_addr, int perm)
-{
- struct vhost_umem_node *tmp, *node;
-
- if (!size)
- return -EFAULT;
-
- node = kmalloc(sizeof(*node), GFP_ATOMIC);
- if (!node)
- return -ENOMEM;
-
- if (umem->numem == max_iotlb_entries) {
- tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
- vhost_umem_free(umem, tmp);
- }
-
- node->start = start;
- node->size = size;
- node->last = end;
- node->userspace_addr = userspace_addr;
- node->perm = perm;
- INIT_LIST_HEAD(&node->link);
- list_add_tail(&node->link, &umem->umem_list);
- vhost_umem_interval_tree_insert(node, &umem->umem_tree);
- umem->numem++;
-
- return 0;
-}
-
-static void vhost_del_umem_range(struct vhost_umem *umem,
- u64 start, u64 end)
-{
- struct vhost_umem_node *node;
-
- while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
- start, end)))
- vhost_umem_free(umem, node);
-}
-
static void vhost_iotlb_notify_vq(struct vhost_dev *d,
struct vhost_iotlb_msg *msg)
{
@@ -1117,9 +1056,9 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
vhost_vq_meta_reset(dev);
- if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
- msg->iova + msg->size - 1,
- msg->uaddr, msg->perm)) {
+ if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1,
+ msg->uaddr, msg->perm)) {
ret = -ENOMEM;
break;
}
@@ -1131,8 +1070,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
vhost_vq_meta_reset(dev);
- vhost_del_umem_range(dev->iotlb, msg->iova,
- msg->iova + msg->size - 1);
+ vhost_iotlb_del_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1);
break;
default:
ret = -EINVAL;
@@ -1178,7 +1117,12 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
ret = -EINVAL;
goto done;
}
- if (vhost_process_iotlb_msg(dev, &msg)) {
+
+ if (dev->msg_handler)
+ ret = dev->msg_handler(dev, &msg);
+ else
+ ret = vhost_process_iotlb_msg(dev, &msg);
+ if (ret) {
ret = -EFAULT;
goto done;
}
@@ -1311,44 +1255,42 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
}
static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
- const struct vhost_umem_node *node,
+ const struct vhost_iotlb_map *map,
int type)
{
int access = (type == VHOST_ADDR_USED) ?
VHOST_ACCESS_WO : VHOST_ACCESS_RO;
- if (likely(node->perm & access))
- vq->meta_iotlb[type] = node;
+ if (likely(map->perm & access))
+ vq->meta_iotlb[type] = map;
}
static bool iotlb_access_ok(struct vhost_virtqueue *vq,
int access, u64 addr, u64 len, int type)
{
- const struct vhost_umem_node *node;
- struct vhost_umem *umem = vq->iotlb;
+ const struct vhost_iotlb_map *map;
+ struct vhost_iotlb *umem = vq->iotlb;
u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
if (vhost_vq_meta_fetch(vq, addr, len, type))
return true;
while (len > s) {
- node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
- addr,
- last);
- if (node == NULL || node->start > addr) {
+ map = vhost_iotlb_itree_first(umem, addr, last);
+ if (map == NULL || map->start > addr) {
vhost_iotlb_miss(vq, addr, access);
return false;
- } else if (!(node->perm & access)) {
+ } else if (!(map->perm & access)) {
/* Report the possible access violation by
* request another translation from userspace.
*/
return false;
}
- size = node->size - addr + node->start;
+ size = map->size - addr + map->start;
if (orig_addr == addr && size >= len)
- vhost_vq_meta_update(vq, node, type);
+ vhost_vq_meta_update(vq, map, type);
s += size;
addr += size;
@@ -1364,12 +1306,12 @@ int vq_meta_prefetch(struct vhost_virtqueue *vq)
if (!vq->iotlb)
return 1;
- return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
+ return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
- iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
+ iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
vhost_get_avail_size(vq, num),
VHOST_ADDR_AVAIL) &&
- iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
+ iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
vhost_get_used_size(vq, num), VHOST_ADDR_USED);
}
EXPORT_SYMBOL_GPL(vq_meta_prefetch);
@@ -1408,25 +1350,11 @@ bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
-static struct vhost_umem *vhost_umem_alloc(void)
-{
- struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
-
- if (!umem)
- return NULL;
-
- umem->umem_tree = RB_ROOT_CACHED;
- umem->numem = 0;
- INIT_LIST_HEAD(&umem->umem_list);
-
- return umem;
-}
-
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem;
struct vhost_memory_region *region;
- struct vhost_umem *newumem, *oldumem;
+ struct vhost_iotlb *newumem, *oldumem;
unsigned long size = offsetof(struct vhost_memory, regions);
int i;
@@ -1448,7 +1376,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
}
- newumem = vhost_umem_alloc();
+ newumem = iotlb_alloc();
if (!newumem) {
kvfree(newmem);
return -ENOMEM;
@@ -1457,13 +1385,12 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
for (region = newmem->regions;
region < newmem->regions + mem.nregions;
region++) {
- if (vhost_new_umem_range(newumem,
- region->guest_phys_addr,
- region->memory_size,
- region->guest_phys_addr +
- region->memory_size - 1,
- region->userspace_addr,
- VHOST_ACCESS_RW))
+ if (vhost_iotlb_add_range(newumem,
+ region->guest_phys_addr,
+ region->guest_phys_addr +
+ region->memory_size - 1,
+ region->userspace_addr,
+ VHOST_MAP_RW))
goto err;
}
@@ -1481,11 +1408,11 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
}
kvfree(newmem);
- vhost_umem_clean(oldumem);
+ vhost_iotlb_free(oldumem);
return 0;
err:
- vhost_umem_clean(newumem);
+ vhost_iotlb_free(newumem);
kvfree(newmem);
return -EFAULT;
}
@@ -1726,10 +1653,10 @@ EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
{
- struct vhost_umem *niotlb, *oiotlb;
+ struct vhost_iotlb *niotlb, *oiotlb;
int i;
- niotlb = vhost_umem_alloc();
+ niotlb = iotlb_alloc();
if (!niotlb)
return -ENOMEM;
@@ -1745,7 +1672,7 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
mutex_unlock(&vq->mutex);
}
- vhost_umem_clean(oiotlb);
+ vhost_iotlb_free(oiotlb);
return 0;
}
@@ -1875,8 +1802,8 @@ static int log_write(void __user *log_base,
static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
{
- struct vhost_umem *umem = vq->umem;
- struct vhost_umem_node *u;
+ struct vhost_iotlb *umem = vq->umem;
+ struct vhost_iotlb_map *u;
u64 start, end, l, min;
int r;
bool hit = false;
@@ -1886,16 +1813,15 @@ static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
/* More than one GPAs can be mapped into a single HVA. So
* iterate all possible umems here to be safe.
*/
- list_for_each_entry(u, &umem->umem_list, link) {
- if (u->userspace_addr > hva - 1 + len ||
- u->userspace_addr - 1 + u->size < hva)
+ list_for_each_entry(u, &umem->list, link) {
+ if (u->addr > hva - 1 + len ||
+ u->addr - 1 + u->size < hva)
continue;
- start = max(u->userspace_addr, hva);
- end = min(u->userspace_addr - 1 + u->size,
- hva - 1 + len);
+ start = max(u->addr, hva);
+ end = min(u->addr - 1 + u->size, hva - 1 + len);
l = end - start + 1;
r = log_write(vq->log_base,
- u->start + start - u->userspace_addr,
+ u->start + start - u->addr,
l);
if (r < 0)
return r;
@@ -2046,9 +1972,9 @@ EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access)
{
- const struct vhost_umem_node *node;
+ const struct vhost_iotlb_map *map;
struct vhost_dev *dev = vq->dev;
- struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
+ struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
@@ -2060,25 +1986,24 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
break;
}
- node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
- addr, addr + len - 1);
- if (node == NULL || node->start > addr) {
+ map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
+ if (map == NULL || map->start > addr) {
if (umem != dev->iotlb) {
ret = -EFAULT;
break;
}
ret = -EAGAIN;
break;
- } else if (!(node->perm & access)) {
+ } else if (!(map->perm & access)) {
ret = -EPERM;
break;
}
_iov = iov + ret;
- size = node->size - addr + node->start;
+ size = map->size - addr + map->start;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
- (node->userspace_addr + addr - node->start);
+ (map->addr + addr - map->start);
s += size;
addr += size;
++ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a123fd70847e..f8403bd46b85 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,6 +12,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
+#include <linux/vhost_iotlb.h>
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -52,27 +53,6 @@ struct vhost_log {
u64 len;
};
-#define START(node) ((node)->start)
-#define LAST(node) ((node)->last)
-
-struct vhost_umem_node {
- struct rb_node rb;
- struct list_head link;
- __u64 start;
- __u64 last;
- __u64 size;
- __u64 userspace_addr;
- __u32 perm;
- __u32 flags_padding;
- __u64 __subtree_last;
-};
-
-struct vhost_umem {
- struct rb_root_cached umem_tree;
- struct list_head umem_list;
- int numem;
-};
-
enum vhost_uaddr_type {
VHOST_ADDR_DESC = 0,
VHOST_ADDR_AVAIL = 1,
@@ -90,7 +70,7 @@ struct vhost_virtqueue {
struct vring_desc __user *desc;
struct vring_avail __user *avail;
struct vring_used __user *used;
- const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
+ const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
struct eventfd_ctx *call_ctx;
struct eventfd_ctx *error_ctx;
@@ -128,8 +108,8 @@ struct vhost_virtqueue {
struct iovec *indirect;
struct vring_used_elem *heads;
/* Protected by virtqueue mutex. */
- struct vhost_umem *umem;
- struct vhost_umem *iotlb;
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
void *private_data;
u64 acked_features;
u64 acked_backend_features;
@@ -164,8 +144,8 @@ struct vhost_dev {
struct eventfd_ctx *log_ctx;
struct llist_head work_list;
struct task_struct *worker;
- struct vhost_umem *umem;
- struct vhost_umem *iotlb;
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
struct list_head read_list;
struct list_head pending_list;
@@ -174,16 +154,20 @@ struct vhost_dev {
int weight;
int byte_weight;
u64 kcov_handle;
+ int (*msg_handler)(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg);
};
bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
- int nvqs, int iov_limit, int weight, int byte_weight);
+ int nvqs, int iov_limit, int weight, int byte_weight,
+ int (*msg_handler)(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg));
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
-struct vhost_umem *vhost_dev_reset_owner_prepare(void);
-void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
@@ -229,6 +213,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct iov_iter *from);
int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map);
+
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
if ((vq)->error_ctx) \
@@ -244,6 +231,33 @@ enum {
(1ULL << VIRTIO_F_VERSION_1)
};
+/**
+ * vhost_vq_set_backend - Set backend.
+ *
+ * @vq Virtqueue.
+ * @private_data The private data.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ */
+static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
+ void *private_data)
+{
+ vq->private_data = private_data;
+}
+
+/**
+ * vhost_vq_get_backend - Get backend.
+ *
+ * @vq Virtqueue.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ * Return: Private data previously set with vhost_vq_set_backend.
+ */
+static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
+{
+ return vq->private_data;
+}
+
static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
{
return vq->acked_features & (1ULL << bit);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index a0a2d74967ef..ba8e0d6cfd97 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -13,6 +13,11 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/export.h>
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+#include <linux/bvec.h>
+#include <linux/highmem.h>
+#include <linux/vhost_iotlb.h>
+#endif
#include <uapi/linux/virtio_config.h>
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
@@ -71,9 +76,11 @@ static inline int __vringh_get_head(const struct vringh *vrh,
}
/* Copy some bytes to/from the iovec. Returns num copied. */
-static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
+static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
+ struct vringh_kiov *iov,
void *ptr, size_t len,
- int (*xfer)(void *addr, void *ptr,
+ int (*xfer)(const struct vringh *vrh,
+ void *addr, void *ptr,
size_t len))
{
int err, done = 0;
@@ -82,7 +89,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
size_t partlen;
partlen = min(iov->iov[iov->i].iov_len, len);
- err = xfer(iov->iov[iov->i].iov_base, ptr, partlen);
+ err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
if (err)
return err;
done += partlen;
@@ -96,6 +103,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
/* Fix up old iov element then increment. */
iov->iov[iov->i].iov_len = iov->consumed;
iov->iov[iov->i].iov_base -= iov->consumed;
+
iov->consumed = 0;
iov->i++;
@@ -227,7 +235,8 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src,
u64 addr,
struct vringh_range *r),
struct vringh_range *range,
- int (*copy)(void *dst, const void *src, size_t len))
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
{
size_t part, len = sizeof(struct vring_desc);
@@ -241,7 +250,7 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src,
if (!rcheck(vrh, addr, &part, range, getrange))
return -EINVAL;
- err = copy(dst, src, part);
+ err = copy(vrh, dst, src, part);
if (err)
return err;
@@ -262,7 +271,8 @@ __vringh_iov(struct vringh *vrh, u16 i,
struct vringh_range *)),
bool (*getrange)(struct vringh *, u64, struct vringh_range *),
gfp_t gfp,
- int (*copy)(void *dst, const void *src, size_t len))
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
{
int err, count = 0, up_next, desc_max;
struct vring_desc desc, *descs;
@@ -291,7 +301,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
&slowrange, copy);
else
- err = copy(&desc, &descs[i], sizeof(desc));
+ err = copy(vrh, &desc, &descs[i], sizeof(desc));
if (unlikely(err))
goto fail;
@@ -404,7 +414,8 @@ static inline int __vringh_complete(struct vringh *vrh,
unsigned int num_used,
int (*putu16)(const struct vringh *vrh,
__virtio16 *p, u16 val),
- int (*putused)(struct vring_used_elem *dst,
+ int (*putused)(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem
*src, unsigned num))
{
@@ -420,12 +431,12 @@ static inline int __vringh_complete(struct vringh *vrh,
/* Compiler knows num_used == 1 sometimes, hence extra check */
if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
u16 part = vrh->vring.num - off;
- err = putused(&used_ring->ring[off], used, part);
+ err = putused(vrh, &used_ring->ring[off], used, part);
if (!err)
- err = putused(&used_ring->ring[0], used + part,
+ err = putused(vrh, &used_ring->ring[0], used + part,
num_used - part);
} else
- err = putused(&used_ring->ring[off], used, num_used);
+ err = putused(vrh, &used_ring->ring[off], used, num_used);
if (err) {
vringh_bad("Failed to write %u used entries %u at %p",
@@ -564,13 +575,15 @@ static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
return put_user(v, (__force __virtio16 __user *)p);
}
-static inline int copydesc_user(void *dst, const void *src, size_t len)
+static inline int copydesc_user(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
-static inline int putused_user(struct vring_used_elem *dst,
+static inline int putused_user(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
@@ -578,13 +591,15 @@ static inline int putused_user(struct vring_used_elem *dst,
sizeof(*dst) * num) ? -EFAULT : 0;
}
-static inline int xfer_from_user(void *src, void *dst, size_t len)
+static inline int xfer_from_user(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
-static inline int xfer_to_user(void *dst, void *src, size_t len)
+static inline int xfer_to_user(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
{
return copy_to_user((__force void __user *)dst, src, len) ?
-EFAULT : 0;
@@ -706,7 +721,7 @@ EXPORT_SYMBOL(vringh_getdesc_user);
*/
ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
{
- return vringh_iov_xfer((struct vringh_kiov *)riov,
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
dst, len, xfer_from_user);
}
EXPORT_SYMBOL(vringh_iov_pull_user);
@@ -722,7 +737,7 @@ EXPORT_SYMBOL(vringh_iov_pull_user);
ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
const void *src, size_t len)
{
- return vringh_iov_xfer((struct vringh_kiov *)wiov,
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
(void *)src, len, xfer_to_user);
}
EXPORT_SYMBOL(vringh_iov_push_user);
@@ -832,13 +847,15 @@ static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
return 0;
}
-static inline int copydesc_kern(void *dst, const void *src, size_t len)
+static inline int copydesc_kern(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
{
memcpy(dst, src, len);
return 0;
}
-static inline int putused_kern(struct vring_used_elem *dst,
+static inline int putused_kern(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
@@ -846,13 +863,15 @@ static inline int putused_kern(struct vring_used_elem *dst,
return 0;
}
-static inline int xfer_kern(void *src, void *dst, size_t len)
+static inline int xfer_kern(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
{
memcpy(dst, src, len);
return 0;
}
-static inline int kern_xfer(void *dst, void *src, size_t len)
+static inline int kern_xfer(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
{
memcpy(dst, src, len);
return 0;
@@ -949,7 +968,7 @@ EXPORT_SYMBOL(vringh_getdesc_kern);
*/
ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
{
- return vringh_iov_xfer(riov, dst, len, xfer_kern);
+ return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
}
EXPORT_SYMBOL(vringh_iov_pull_kern);
@@ -964,7 +983,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern);
ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
const void *src, size_t len)
{
- return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
+ return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
}
EXPORT_SYMBOL(vringh_iov_push_kern);
@@ -1042,4 +1061,365 @@ int vringh_need_notify_kern(struct vringh *vrh)
}
EXPORT_SYMBOL(vringh_need_notify_kern);
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
+
+static int iotlb_translate(const struct vringh *vrh,
+ u64 addr, u64 len, struct bio_vec iov[],
+ int iov_size, u32 perm)
+{
+ struct vhost_iotlb_map *map;
+ struct vhost_iotlb *iotlb = vrh->iotlb;
+ int ret = 0;
+ u64 s = 0;
+
+ while (len > s) {
+ u64 size, pa, pfn;
+
+ if (unlikely(ret >= iov_size)) {
+ ret = -ENOBUFS;
+ break;
+ }
+
+ map = vhost_iotlb_itree_first(iotlb, addr,
+ addr + len - 1);
+ if (!map || map->start > addr) {
+ ret = -EINVAL;
+ break;
+ } else if (!(map->perm & perm)) {
+ ret = -EPERM;
+ break;
+ }
+
+ size = map->size - addr + map->start;
+ pa = map->addr + addr - map->start;
+ pfn = pa >> PAGE_SHIFT;
+ iov[ret].bv_page = pfn_to_page(pfn);
+ iov[ret].bv_len = min(len - s, size);
+ iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
+ s += size;
+ addr += size;
+ ++ret;
+ }
+
+ return ret;
+}
+
+static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ struct iov_iter iter;
+ struct bio_vec iov[16];
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len, iov, 16, VHOST_MAP_RO);
+ if (ret < 0)
+ return ret;
+
+ iov_iter_bvec(&iter, READ, iov, ret, len);
+
+ ret = copy_from_iter(dst, len, &iter);
+
+ return ret;
+}
+
+static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ struct iov_iter iter;
+ struct bio_vec iov[16];
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len, iov, 16, VHOST_MAP_WO);
+ if (ret < 0)
+ return ret;
+
+ iov_iter_bvec(&iter, WRITE, iov, ret, len);
+
+ return copy_to_iter(src, len, &iter);
+}
+
+static inline int getu16_iotlb(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p)
+{
+ struct bio_vec iov;
+ void *kaddr, *from;
+ int ret;
+
+ /* Atomic read is needed for getu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ &iov, 1, VHOST_MAP_RO);
+ if (ret < 0)
+ return ret;
+
+ kaddr = kmap_atomic(iov.bv_page);
+ from = kaddr + iov.bv_offset;
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
+ kunmap_atomic(kaddr);
+
+ return 0;
+}
+
+static inline int putu16_iotlb(const struct vringh *vrh,
+ __virtio16 *p, u16 val)
+{
+ struct bio_vec iov;
+ void *kaddr, *to;
+ int ret;
+
+ /* Atomic write is needed for putu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ &iov, 1, VHOST_MAP_WO);
+ if (ret < 0)
+ return ret;
+
+ kaddr = kmap_atomic(iov.bv_page);
+ to = kaddr + iov.bv_offset;
+ WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
+ kunmap_atomic(kaddr);
+
+ return 0;
+}
+
+static inline int copydesc_iotlb(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, (void *)src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_to_iotlb(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int putused_iotlb(const struct vringh *vrh,
+ struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ int size = num * sizeof(*dst);
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
+ if (ret != size)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userpace descriptor pointer.
+ * @avail: the userpace avail pointer.
+ * @used: the userpace used pointer.
+ *
+ * Returns an error if num is invalid.
+ */
+int vringh_init_iotlb(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used)
+{
+ return vringh_init_kern(vrh, features, num, weak_barriers,
+ desc, avail, used);
+}
+EXPORT_SYMBOL(vringh_init_iotlb);
+
+/**
+ * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vring
+ * @iotlb: iotlb associated with this vring
+ */
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
+{
+ vrh->iotlb = iotlb;
+}
+EXPORT_SYMBOL(vringh_set_iotlb);
+
+/**
+ * vringh_getdesc_iotlb - get next available descriptor from ring with
+ * IOTLB.
+ * @vrh: the kernelspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @head: head index we received, for passing to vringh_complete_iotlb().
+ * @gfp: flags for allocating larger riov/wiov.
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you may need to clean up riov and wiov, even on error!
+ */
+int vringh_getdesc_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp)
+{
+ int err;
+
+ err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
+ gfp, copydesc_iotlb);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_iotlb);
+
+/**
+ * vringh_iov_pull_iotlb - copy bytes from vring_iov.
+ * @vrh: the vring.
+ * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ void *dst, size_t len)
+{
+ return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_pull_iotlb);
+
+/**
+ * vringh_iov_push_iotlb - copy bytes into vring_iov.
+ * @vrh: the vring.
+ * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
+ struct vringh_kiov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_push_iotlb);
+
+/**
+ * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
+ * @vrh: the vring.
+ * @num: the number of descriptors to put back (ie. num
+ * vringh_get_iotlb() to undo).
+ *
+ * The next vringh_get_iotlb() will return the old descriptor(s) again.
+ */
+void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
+{
+ /* We only update vring_avail_event(vr) when we want to be notified,
+ * so we haven't changed that yet.
+ */
+ vrh->last_avail_idx -= num;
+}
+EXPORT_SYMBOL(vringh_abandon_iotlb);
+
+/**
+ * vringh_complete_iotlb - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_iotlb.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_iotlb() after one or more calls
+ * to this function.
+ */
+int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
+
+ return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
+}
+EXPORT_SYMBOL(vringh_complete_iotlb);
+
+/**
+ * vringh_notify_enable_iotlb - we want to know if something changes.
+ * @vrh: the vring.
+ *
+ * This always enables notifications, but returns false if there are
+ * now more buffers available in the vring.
+ */
+bool vringh_notify_enable_iotlb(struct vringh *vrh)
+{
+ return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_notify_enable_iotlb);
+
+/**
+ * vringh_notify_disable_iotlb - don't tell us if something changes.
+ * @vrh: the vring.
+ *
+ * This is our normal running state: we disable and then only enable when
+ * we're going to sleep.
+ */
+void vringh_notify_disable_iotlb(struct vringh *vrh)
+{
+ __vringh_notify_disable(vrh, putu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_notify_disable_iotlb);
+
+/**
+ * vringh_need_notify_iotlb - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_iotlb() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_iotlb(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_need_notify_iotlb);
+
+#endif
+
MODULE_LICENSE("GPL");
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index c2d7d57e98cf..fb4e944c4d0d 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
/* Avoid further vmexits, we're already processing the virtqueue */
@@ -181,14 +181,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
break;
}
- vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
- added = true;
-
- /* Deliver to monitoring devices all correctly transmitted
- * packets.
+ /* Deliver to monitoring devices all packets that we
+ * will transmit.
*/
virtio_transport_deliver_tap_pkt(pkt);
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+ added = true;
+
pkt->off += payload_len;
total_len += payload_len;
@@ -196,6 +196,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* to send it with the next available buffer.
*/
if (pkt->off < pkt->len) {
+ /* We are queueing the same virtio_vsock_pkt to handle
+ * the remaining bytes, and we want to deliver it
+ * to monitoring devices in the next iteration.
+ */
+ pkt->tap_delivered = false;
+
spin_lock_bh(&vsock->send_pkt_list_lock);
list_add(&pkt->list, &vsock->send_pkt_list);
spin_unlock_bh(&vsock->send_pkt_list_lock);
@@ -440,7 +446,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
vhost_disable_notify(&vsock->dev, vq);
@@ -533,8 +539,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err_vq;
}
- if (!vq->private_data) {
- vq->private_data = vsock;
+ if (!vhost_vq_get_backend(vq)) {
+ vhost_vq_set_backend(vq, vsock);
ret = vhost_vq_init_access(vq);
if (ret)
goto err_vq;
@@ -543,18 +549,23 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
mutex_unlock(&vq->mutex);
}
+ /* Some packets may have been queued before the device was started,
+ * let's kick the send worker to send them.
+ */
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
mutex_unlock(&vsock->dev.mutex);
return 0;
err_vq:
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
err:
@@ -577,7 +588,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
struct vhost_virtqueue *vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
@@ -621,7 +632,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
- VHOST_VSOCK_WEIGHT);
+ VHOST_VSOCK_WEIGHT, NULL);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0093bbd0d326..7d22d7377606 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -9,7 +9,7 @@ menu "Backlight & LCD device support"
# LCD
#
config LCD_CLASS_DEVICE
- tristate "Lowlevel LCD controls"
+ tristate "Lowlevel LCD controls"
help
This framework adds support for low-level control of LCD.
Some framebuffer devices connect to platform-specific LCD modules
@@ -141,10 +141,10 @@ endif # LCD_CLASS_DEVICE
# Backlight
#
config BACKLIGHT_CLASS_DEVICE
- tristate "Lowlevel Backlight controls"
+ tristate "Lowlevel Backlight controls"
help
This framework adds support for low-level control of the LCD
- backlight. This includes support for brightness and power.
+ backlight. This includes support for brightness and power.
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
@@ -272,7 +272,7 @@ config BACKLIGHT_APPLE
tristate "Apple Backlight Driver"
depends on X86 && ACPI
help
- If you have an Intel-based Apple say Y to enable a driver for its
+ If you have an Intel-based Apple say Y to enable a driver for its
backlight.
config BACKLIGHT_TOSA
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 68f7592c5060..25ef0cbd7583 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/fb.h>
#include <linux/lcd.h>
#include <linux/spi/spi.h>
@@ -90,9 +90,8 @@ struct corgi_lcd {
int mode;
char buf[2];
- int gpio_backlight_on;
- int gpio_backlight_cont;
- int gpio_backlight_cont_inverted;
+ struct gpio_desc *backlight_on;
+ struct gpio_desc *backlight_cont;
void (*kick_battery)(void);
};
@@ -403,13 +402,13 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
corgi_ssp_lcdtg_send(lcd, DUTYCTRL_ADRS, intensity);
/* Bit 5 via GPIO_BACKLIGHT_CONT */
- cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted;
+ cont = !!(intensity & 0x20);
- if (gpio_is_valid(lcd->gpio_backlight_cont))
- gpio_set_value_cansleep(lcd->gpio_backlight_cont, cont);
+ if (lcd->backlight_cont)
+ gpiod_set_value_cansleep(lcd->backlight_cont, cont);
- if (gpio_is_valid(lcd->gpio_backlight_on))
- gpio_set_value_cansleep(lcd->gpio_backlight_on, intensity);
+ if (lcd->backlight_on)
+ gpiod_set_value_cansleep(lcd->backlight_on, intensity);
if (lcd->kick_battery)
lcd->kick_battery();
@@ -482,48 +481,17 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
struct corgi_lcd_platform_data *pdata)
{
struct spi_device *spi = lcd->spi_dev;
- int err;
-
- lcd->gpio_backlight_on = -1;
- lcd->gpio_backlight_cont = -1;
-
- if (gpio_is_valid(pdata->gpio_backlight_on)) {
- err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_on,
- "BL_ON");
- if (err) {
- dev_err(&spi->dev,
- "failed to request GPIO%d for backlight_on\n",
- pdata->gpio_backlight_on);
- return err;
- }
-
- lcd->gpio_backlight_on = pdata->gpio_backlight_on;
- gpio_direction_output(lcd->gpio_backlight_on, 0);
- }
- if (gpio_is_valid(pdata->gpio_backlight_cont)) {
- err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_cont,
- "BL_CONT");
- if (err) {
- dev_err(&spi->dev,
- "failed to request GPIO%d for backlight_cont\n",
- pdata->gpio_backlight_cont);
- return err;
- }
-
- lcd->gpio_backlight_cont = pdata->gpio_backlight_cont;
-
- /* spitz and akita use both GPIOs for backlight, and
- * have inverted polarity of GPIO_BACKLIGHT_CONT
- */
- if (gpio_is_valid(lcd->gpio_backlight_on)) {
- lcd->gpio_backlight_cont_inverted = 1;
- gpio_direction_output(lcd->gpio_backlight_cont, 1);
- } else {
- lcd->gpio_backlight_cont_inverted = 0;
- gpio_direction_output(lcd->gpio_backlight_cont, 0);
- }
- }
+ lcd->backlight_on = devm_gpiod_get_optional(&spi->dev,
+ "BL_ON", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->backlight_on))
+ return PTR_ERR(lcd->backlight_on);
+
+ lcd->backlight_cont = devm_gpiod_get_optional(&spi->dev, "BL_CONT",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->backlight_cont))
+ return PTR_ERR(lcd->backlight_cont);
+
return 0;
}
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index efb4efc2a13d..82b8d7594701 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -7,7 +7,6 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -258,8 +257,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
&data->post_pwm_on_delay);
of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
- data->enable_gpio = -EINVAL;
-
/*
* Determine the number of brightness levels, if this property is not
* set a default table of brightness levels will be used.
@@ -503,22 +500,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
}
/*
- * Compatibility fallback for drivers still using the integer GPIO
- * platform data. Must go away soon.
- */
- if (!pb->enable_gpio && gpio_is_valid(data->enable_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev, data->enable_gpio,
- GPIOF_OUT_INIT_HIGH, "enable");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n",
- data->enable_gpio, ret);
- goto err_alloc;
- }
-
- pb->enable_gpio = gpio_to_desc(data->enable_gpio);
- }
-
- /*
* If the GPIO is not known to be already configured as output, that
* is, if gpiod_get_direction returns either 1 or -EINVAL, change the
* direction to output and set the GPIO as active.
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 70c10ea1c38b..3c01b0d2414f 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -27,7 +27,7 @@ config VGACON_SOFT_SCROLLBACK
depends on VGA_CONSOLE
default n
help
- The scrollback buffer of the standard VGA console is located in
+ The scrollback buffer of the standard VGA console is located in
the VGA RAM. The size of this RAM is fixed and is quite small.
If you require a larger scrollback buffer, this can be placed in
System RAM which is dynamically allocated during initialization.
@@ -84,36 +84,36 @@ config MDA_CONSOLE
If unsure, say N.
config SGI_NEWPORT_CONSOLE
- tristate "SGI Newport Console support"
+ tristate "SGI Newport Console support"
depends on SGI_IP22 && HAS_IOMEM
- select FONT_SUPPORT
- help
- Say Y here if you want the console on the Newport aka XL graphics
- card of your Indy. Most people say Y here.
+ select FONT_SUPPORT
+ help
+ Say Y here if you want the console on the Newport aka XL graphics
+ card of your Indy. Most people say Y here.
config DUMMY_CONSOLE
bool
default y
config DUMMY_CONSOLE_COLUMNS
- int "Initial number of console screen columns"
- depends on DUMMY_CONSOLE && !ARM
- default 160 if PARISC
- default 80
- help
- On PA-RISC, the default value is 160, which should fit a 1280x1024
- monitor.
- Select 80 if you use a 640x480 resolution by default.
+ int "Initial number of console screen columns"
+ depends on DUMMY_CONSOLE && !ARM
+ default 160 if PARISC
+ default 80
+ help
+ On PA-RISC, the default value is 160, which should fit a 1280x1024
+ monitor.
+ Select 80 if you use a 640x480 resolution by default.
config DUMMY_CONSOLE_ROWS
- int "Initial number of console screen rows"
- depends on DUMMY_CONSOLE && !ARM
- default 64 if PARISC
- default 25
- help
- On PA-RISC, the default value is 64, which should fit a 1280x1024
- monitor.
- Select 25 if you use a 640x480 resolution by default.
+ int "Initial number of console screen rows"
+ depends on DUMMY_CONSOLE && !ARM
+ default 64 if PARISC
+ default 25
+ help
+ On PA-RISC, the default value is 64, which should fit a 1280x1024
+ monitor.
+ Select 25 if you use a 640x480 resolution by default.
config FRAMEBUFFER_CONSOLE
bool "Framebuffer Console support"
@@ -129,11 +129,11 @@ config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
depends on FRAMEBUFFER_CONSOLE
default n
---help---
- If this option is selected, the framebuffer console will
- automatically select the primary display device (if the architecture
+ If this option is selected, the framebuffer console will
+ automatically select the primary display device (if the architecture
supports this feature). Otherwise, the framebuffer console will
- always select the first framebuffer driver that is loaded. The latter
- is the default behavior.
+ always select the first framebuffer driver that is loaded. The latter
+ is the default behavior.
You can always override the automatic selection of the primary device
by using the fbcon=map: boot option.
@@ -144,11 +144,11 @@ config FRAMEBUFFER_CONSOLE_ROTATION
bool "Framebuffer Console Rotation"
depends on FRAMEBUFFER_CONSOLE
help
- Enable display rotation for the framebuffer console. This is done
- in software and may be significantly slower than a normally oriented
- display. Note that the rotation is done at the console level only
- such that other users of the framebuffer will remain normally
- oriented.
+ Enable display rotation for the framebuffer console. This is done
+ in software and may be significantly slower than a normally oriented
+ display. Note that the rotation is done at the console level only
+ such that other users of the framebuffer will remain normally
+ oriented.
config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
bool "Framebuffer Console Deferred Takeover"
@@ -162,14 +162,14 @@ config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
black screen as soon as fbcon loads.
config STI_CONSOLE
- bool "STI text console"
+ bool "STI text console"
depends on PARISC && HAS_IOMEM
- select FONT_SUPPORT
- default y
- help
- The STI console is the builtin display/keyboard on HP-PARISC
- machines. Say Y here to build support for it into your kernel.
- The alternative is to use your primary serial port as a console.
+ select FONT_SUPPORT
+ default y
+ help
+ The STI console is the builtin display/keyboard on HP-PARISC
+ machines. Say Y here to build support for it into your kernel.
+ The alternative is to use your primary serial port as a console.
endmenu
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index f65991a67af2..91b0a719d221 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -435,7 +435,7 @@ config FB_FM2
config FB_ARC
tristate "Arc Monochrome LCD board support"
- depends on FB && X86
+ depends on FB && (X86 || COMPILE_TEST)
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -1639,7 +1639,7 @@ config FB_VT8500
config FB_WM8505
bool "Wondermedia WM8xxx-series frame buffer support"
- depends on (FB = y) && ARM && ARCH_VT8500
+ depends on (FB = y) && HAS_IOMEM && (ARCH_VT8500 || COMPILE_TEST)
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
@@ -1827,7 +1827,7 @@ config FB_FSL_DIU
config FB_W100
tristate "W100 frame buffer support"
- depends on FB && ARCH_PXA
+ depends on FB && HAS_IOMEM && (ARCH_PXA || COMPILE_TEST)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1844,7 +1844,8 @@ config FB_W100
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
- depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK
+ depends on FB && HAVE_CLK && HAS_IOMEM
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 27cb65fa2ba2..9c37e28fb78b 100644
--- a/drivers/video/fbdev/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
@@ -618,14 +618,13 @@ static int aty_var_to_pll_8398(const struct fb_info *info, u32 vclk_per,
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
- u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
+ u32 mach64MinFreq, mach64MaxFreq;
u16 m, n, k = 0, save_m, save_n, twoToKth;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
- mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
save_m = 0;
save_n = 0;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 3af00e3b965e..e116a3f9ad56 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -849,12 +849,6 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
case 9 ... 16:
v.bits_per_pixel = 16;
break;
- case 17 ... 24:
-#if 0 /* Doesn't seem to work */
- v.bits_per_pixel = 24;
- break;
-#endif
- return -EINVAL;
case 25 ... 32:
v.bits_per_pixel = 32;
break;
@@ -1650,14 +1644,14 @@ static int radeonfb_set_par(struct fb_info *info)
struct fb_var_screeninfo *mode = &info->var;
struct radeon_regs *newmode;
int hTotal, vTotal, hSyncStart, hSyncEnd,
- hSyncPol, vSyncStart, vSyncEnd, vSyncPol, cSync;
+ vSyncStart, vSyncEnd;
u8 hsync_adj_tab[] = {0, 0x12, 9, 9, 6, 5};
u8 hsync_fudge_fp[] = {2, 2, 0, 0, 5, 5};
u32 sync, h_sync_pol, v_sync_pol, dotClock, pixClock;
int i, freq;
int format = 0;
int nopllcalc = 0;
- int hsync_start, hsync_fudge, bytpp, hsync_wid, vsync_wid;
+ int hsync_start, hsync_fudge, hsync_wid, vsync_wid;
int primary_mon = PRIMARY_MONITOR(rinfo);
int depth = var_to_depth(mode);
int use_rmx = 0;
@@ -1730,13 +1724,7 @@ static int radeonfb_set_par(struct fb_info *info)
else if (vsync_wid > 0x1f) /* max */
vsync_wid = 0x1f;
- hSyncPol = mode->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
- vSyncPol = mode->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
- cSync = mode->sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
-
format = radeon_get_dstbpp(depth);
- bytpp = mode->bits_per_pixel >> 3;
if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD))
hsync_fudge = hsync_fudge_fp[format-1];
@@ -2548,16 +2536,6 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
if (rinfo->mon2_EDID)
sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr);
-#if 0
- /* restore original state
- *
- * Doesn't quite work yet, I suspect if we come from a legacy
- * VGA mode (or worse, text mode), we need to do some VGA black
- * magic here that I know nothing about. --BenH
- */
- radeon_write_mode (rinfo, &rinfo->init_state, 1);
- #endif
-
del_timer_sync(&rinfo->lvds_timer);
arch_phys_wc_del(rinfo->wc_cookie);
unregister_framebuffer(info);
diff --git a/drivers/video/fbdev/c2p_core.h b/drivers/video/fbdev/c2p_core.h
index 45a6d895a7d7..cf5f1ebce65e 100644
--- a/drivers/video/fbdev/c2p_core.h
+++ b/drivers/video/fbdev/c2p_core.h
@@ -12,6 +12,8 @@
* for more details.
*/
+#include <linux/build_bug.h>
+
/*
* Basic transpose step
@@ -27,8 +29,6 @@ static inline void _transp(u32 d[], unsigned int i1, unsigned int i2,
}
-extern void c2p_unsupported(void);
-
static __always_inline u32 get_mask(unsigned int n)
{
switch (n) {
@@ -48,7 +48,7 @@ static __always_inline u32 get_mask(unsigned int n)
return 0x0000ffff;
}
- c2p_unsupported();
+ BUILD_BUG();
return 0;
}
@@ -91,7 +91,7 @@ static __always_inline void transp8(u32 d[], unsigned int n, unsigned int m)
return;
}
- c2p_unsupported();
+ BUILD_BUG();
}
@@ -118,7 +118,7 @@ static __always_inline void transp4(u32 d[], unsigned int n, unsigned int m)
return;
}
- c2p_unsupported();
+ BUILD_BUG();
}
@@ -138,7 +138,7 @@ static __always_inline void transp4x(u32 d[], unsigned int n, unsigned int m)
return;
}
- c2p_unsupported();
+ BUILD_BUG();
}
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index a620b51cf7d0..6a745eb46ca1 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -509,8 +509,7 @@ static int cg14_probe(struct platform_device *op)
if (!par->regs || !par->clut || !par->cursor || !info->screen_base)
goto out_unmap_regs;
- is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) ==
- (8 * 1024 * 1024));
+ is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024));
BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index 37710316a680..26cbc965497c 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -16,7 +16,6 @@ fb-y += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
fbcon_ccw.o
endif
endif
-fb-objs := $(fb-y)
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bb6ae995c2e5..9d28a8e3328f 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -873,7 +873,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
int oldidx = con2fb_map[unit];
struct fb_info *info = registered_fb[newidx];
struct fb_info *oldinfo = NULL;
- int found, err = 0;
+ int found, err = 0;
WARN_CONSOLE_UNLOCKED();
@@ -895,31 +895,30 @@ static int set_con2fb_map(int unit, int newidx, int user)
con2fb_map[unit] = newidx;
if (!err && !found)
- err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
-
+ err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
- if (!err && oldinfo && !search_fb_in_map(oldidx))
- err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
- found);
+ if (!err && oldinfo && !search_fb_in_map(oldidx))
+ err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
+ found);
- if (!err) {
- int show_logo = (fg_console == 0 && !user &&
- logo_shown != FBCON_LOGO_DONTSHOW);
+ if (!err) {
+ int show_logo = (fg_console == 0 && !user &&
+ logo_shown != FBCON_LOGO_DONTSHOW);
- if (!found)
- fbcon_add_cursor_timer(info);
- con2fb_map_boot[unit] = newidx;
- con2fb_init_display(vc, info, unit, show_logo);
+ if (!found)
+ fbcon_add_cursor_timer(info);
+ con2fb_map_boot[unit] = newidx;
+ con2fb_init_display(vc, info, unit, show_logo);
}
if (!search_fb_in_map(info_idx))
info_idx = newidx;
- return err;
+ return err;
}
/*
@@ -1283,6 +1282,9 @@ finished:
if (!con_is_bound(&fb_con))
fbcon_exit();
+ if (vc->vc_num == logo_shown)
+ logo_shown = FBCON_LOGO_CANSHOW;
+
return;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index d04554959ea7..30e73ec4ad5c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -663,20 +663,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
fb_logo.depth = 1;
- if (fb_logo.depth > 4 && depth > 4) {
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- fb_logo.needs_truepalette = 1;
- break;
- case FB_VISUAL_DIRECTCOLOR:
- fb_logo.needs_directpalette = 1;
- fb_logo.needs_cmapreset = 1;
- break;
- case FB_VISUAL_PSEUDOCOLOR:
- fb_logo.needs_cmapreset = 1;
- break;
- }
- }
+ if (fb_logo.depth > 4 && depth > 4) {
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ fb_logo.needs_truepalette = 1;
+ break;
+ case FB_VISUAL_DIRECTCOLOR:
+ fb_logo.needs_directpalette = 1;
+ fb_logo.needs_cmapreset = 1;
+ break;
+ case FB_VISUAL_PSEUDOCOLOR:
+ fb_logo.needs_cmapreset = 1;
+ break;
+ }
+ }
height = fb_logo.logo->height;
if (fb_center_logo)
@@ -1065,19 +1065,19 @@ fb_blank(struct fb_info *info, int blank)
struct fb_event event;
int ret = -EINVAL;
- if (blank > FB_BLANK_POWERDOWN)
- blank = FB_BLANK_POWERDOWN;
+ if (blank > FB_BLANK_POWERDOWN)
+ blank = FB_BLANK_POWERDOWN;
event.info = info;
event.data = &blank;
if (info->fbops->fb_blank)
- ret = info->fbops->fb_blank(blank, info);
+ ret = info->fbops->fb_blank(blank, info);
if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
- return ret;
+ return ret;
}
EXPORT_SYMBOL(fb_blank);
@@ -1115,7 +1115,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
break;
case FBIOGET_FSCREENINFO:
lock_fb_info(info);
- fix = info->fix;
+ memcpy(&fix, &info->fix, sizeof(fix));
if (info->flags & FBINFO_HIDE_SMEM_START)
fix.smem_start = 0;
unlock_fb_info(info);
diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c
index 845b79da2a7c..05837a3b985c 100644
--- a/drivers/video/fbdev/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
@@ -108,7 +108,6 @@ static int g364fb_pan_display(struct fb_var_screeninfo *var,
static int g364fb_setcolreg(u_int regno, u_int red, u_int green,
u_int blue, u_int transp,
struct fb_info *info);
-static int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor);
static int g364fb_blank(int blank, struct fb_info *info);
static const struct fb_ops g364fb_ops = {
@@ -119,28 +118,8 @@ static const struct fb_ops g364fb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
- .fb_cursor = g364fb_cursor,
};
-int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
-{
-
- switch (cursor->enable) {
- case CM_ERASE:
- *(unsigned int *) CTLA_REG |= CURS_TOGGLE;
- break;
-
- case CM_MOVE:
- case CM_DRAW:
- *(unsigned int *) CTLA_REG &= ~CURS_TOGGLE;
- *(unsigned int *) CURS_POS_REG =
- ((x * fontwidth(p)) << 12) | ((y * fontheight(p)) -
- info->var.yoffset);
- break;
- }
- return 0;
-}
-
/*
* Pan or Wrap the Display
*
@@ -194,11 +173,9 @@ static int g364fb_setcolreg(u_int regno, u_int red, u_int green,
*/
int __init g364fb_init(void)
{
- volatile unsigned int *pal_ptr =
- (volatile unsigned int *) CLR_PAL_REG;
volatile unsigned int *curs_pal_ptr =
(volatile unsigned int *) CURS_PAL_REG;
- int mem, i, j;
+ int mem, i;
if (fb_get_options("g364fb", NULL))
return -ENODEV;
@@ -230,8 +207,8 @@ int __init g364fb_init(void)
*/
*(unsigned short *) (CURS_PAT_REG + 14 * 64) = 0xffff;
*(unsigned short *) (CURS_PAT_REG + 15 * 64) = 0xffff;
- fb_var.xres_virtual = fbvar.xres;
- fb_fix.line_length = (xres / 8) * fb_var.bits_per_pixel;
+ fb_var.xres_virtual = fb_var.xres;
+ fb_fix.line_length = fb_var.xres_virtual * fb_var.bits_per_pixel / 8;
fb_fix.smem_start = 0x40000000; /* physical address */
/* get size of video memory; this is special for the JAZZ hardware */
mem = (r4030_read_reg32(JAZZ_R4030_CONFIG) >> 8) & 3;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index f47d50e560c0..e4c3c8b65da4 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -594,8 +594,8 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
if (!t) {
pr_err("Time out on waiting resolution response\n");
- ret = -ETIMEDOUT;
- goto out;
+ ret = -ETIMEDOUT;
+ goto out;
}
if (msg->resolution_resp.resolution_count == 0) {
diff --git a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
index 0aeeaa10708b..9fde0e3b69ec 100644
--- a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
@@ -331,7 +331,7 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
u32 ulScale;
u32 ulLeft, ulRight;
u32 ulSrcLeft, ulSrcRight;
- u32 ulScaleLeft, ulScaleRight;
+ u32 ulScaleLeft;
u32 ulhDecim;
u32 ulsVal;
u32 ulVertDecFactor;
@@ -470,7 +470,6 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
* round down the pixel pos to the nearest 8 pixels.
*/
ulScaleLeft = ulSrcLeft;
- ulScaleRight = ulSrcRight;
/* shift fxscale until it is in the range of the scaler */
ulhDecim = 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 36cc718b96ae..570439b32655 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1376,6 +1376,12 @@ static struct video_board vbG200 = {
.accelID = FB_ACCEL_MATROX_MGAG200,
.lowlevel = &matrox_G100
};
+static struct video_board vbG200eW = {
+ .maxvram = 0x800000,
+ .maxdisplayable = 0x800000,
+ .accelID = FB_ACCEL_MATROX_MGAG200,
+ .lowlevel = &matrox_G100
+};
/* from doc it looks like that accelerator can draw only to low 16MB :-( Direct accesses & displaying are OK for
whole 32MB */
static struct video_board vbG400 = {
@@ -1494,6 +1500,13 @@ static struct board {
MGA_G200,
&vbG200,
"MGA-G200 (PCI)"},
+ {PCI_VENDOR_ID_MATROX, 0x0532, 0xFF,
+ 0, 0,
+ DEVF_G200,
+ 250000,
+ MGA_G200,
+ &vbG200eW,
+ "MGA-G200eW (PCI)"},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_AGP, 0xFF,
PCI_SS_VENDOR_ID_MATROX, PCI_SS_ID_MATROX_GENERIC,
DEVF_G200,
@@ -2136,6 +2149,8 @@ static const struct pci_device_id matroxfb_devices[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_PCI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_MATROX, 0x0532,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_AGP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400,
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index 335d4983dc52..167585a889d3 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1406,7 +1406,7 @@ struct mmphw_ctrl {
/*pathes*/
int path_num;
- struct mmphw_path_plat path_plats[0];
+ struct mmphw_path_plat path_plats[];
};
static inline int overlay_is_vid(struct mmp_overlay *overlay)
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index c583c018304d..c24de9107958 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -168,27 +168,26 @@ static int nvidia_panel_tweak(struct nvidia_par *par,
{
int tweak = 0;
- if (par->paneltweak) {
- tweak = par->paneltweak;
- } else {
- /* begin flat panel hacks */
- /* This is unfortunate, but some chips need this register
- tweaked or else you get artifacts where adjacent pixels are
- swapped. There are no hard rules for what to set here so all
- we can do is experiment and apply hacks. */
-
- if(((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
- /* At least one NV34 laptop needs this workaround. */
- tweak = -1;
- }
-
- if((par->Chipset & 0xfff0) == 0x0310) {
- tweak = 1;
- }
- /* end flat panel hacks */
- }
-
- return tweak;
+ if (par->paneltweak) {
+ tweak = par->paneltweak;
+ } else {
+ /* Begin flat panel hacks.
+ * This is unfortunate, but some chips need this register
+ * tweaked or else you get artifacts where adjacent pixels are
+ * swapped. There are no hard rules for what to set here so all
+ * we can do is experiment and apply hacks.
+ */
+ if (((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
+ /* At least one NV34 laptop needs this workaround. */
+ tweak = -1;
+ }
+
+ if ((par->Chipset & 0xfff0) == 0x0310)
+ tweak = 1;
+ /* end flat panel hacks */
+ }
+
+ return tweak;
}
static void nvidia_screen_off(struct nvidia_par *par, int on)
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 8dfa9158ba78..836e7b1639ce 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1154,16 +1154,12 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
r = fbdev->ctrl->setcolreg(regno, red, green, blue,
transp, update_hw_pal);
*/
- /* Fallthrough */
r = -EINVAL;
break;
case OMAPFB_COLOR_RGB565:
case OMAPFB_COLOR_RGB444:
case OMAPFB_COLOR_RGB24P:
case OMAPFB_COLOR_RGB24U:
- if (r != 0)
- break;
-
if (regno < 16) {
u32 pal;
pal = ((red >> (16 - var->red.length)) <<
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 9b9ec1468347..aef8a3042590 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -769,7 +769,7 @@ failed_free_fbmem:
dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
failed_free_info:
- kfree(info);
+ framebuffer_release(info);
dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
return ret;
@@ -779,7 +779,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
{
struct pxa168fb_info *fbi = platform_get_drvdata(pdev);
struct fb_info *info;
- int irq;
unsigned int data;
if (!fbi)
@@ -799,8 +798,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
- irq = platform_get_irq(pdev, 0);
-
dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, info->fix.smem_start);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 74ffb446e00c..4279e13a3b58 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -36,7 +36,6 @@
#include "pxa3xx-gcu.h"
#define DRV_NAME "pxa3xx-gcu"
-#define MISCDEV_MINOR 197
#define REG_GCCR 0x00
#define GCCR_SYNC_CLR (1 << 9)
@@ -595,7 +594,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
* container_of(). This isn't really necessary as we have a fixed minor
* number anyway, but this is to avoid statics. */
- priv->misc_dev.minor = MISCDEV_MINOR,
+ priv->misc_dev.minor = PXA3XX_GCU_MINOR,
priv->misc_dev.name = DRV_NAME,
priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
@@ -638,7 +637,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
ret = misc_register(&priv->misc_dev);
if (ret < 0) {
dev_err(dev, "misc_register() for minor %d failed\n",
- MISCDEV_MINOR);
+ PXA3XX_GCU_MINOR);
goto err_free_dma;
}
@@ -714,7 +713,7 @@ module_platform_driver(pxa3xx_gcu_driver);
MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(MISCDEV_MINOR);
+MODULE_ALIAS_MISCDEV(PXA3XX_GCU_MINOR);
MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
"Denis Oliver Kropp <dok@directfb.org>, "
"Daniel Mack <daniel@caiaq.de>");
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 8048499e398d..eaea8c373753 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -746,9 +746,9 @@ s1d13xxxfb_remove(struct platform_device *pdev)
}
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1);
+ resource_size(&pdev->resource[0]));
release_mem_region(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1);
+ resource_size(&pdev->resource[1]));
return 0;
}
@@ -788,14 +788,14 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
}
if (!request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1, "s1d13xxxfb mem")) {
+ resource_size(&pdev->resource[0]), "s1d13xxxfb mem")) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto bail;
}
if (!request_mem_region(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1, "s1d13xxxfb regs")) {
+ resource_size(&pdev->resource[1]), "s1d13xxxfb regs")) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto bail;
@@ -810,7 +810,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
default_par = info->par;
default_par->regs = ioremap(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1);
+ resource_size(&pdev->resource[1]));
if (!default_par->regs) {
printk(KERN_ERR PFX "unable to map registers\n");
ret = -ENOMEM;
@@ -819,7 +819,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
info->pseudo_palette = default_par->pseudo_palette;
info->screen_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1);
+ resource_size(&pdev->resource[0]));
if (!info->screen_base) {
printk(KERN_ERR PFX "unable to map framebuffer\n");
@@ -857,9 +857,9 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
info->fix = s1d13xxxfb_fix;
info->fix.mmio_start = pdev->resource[1].start;
- info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
+ info->fix.mmio_len = resource_size(&pdev->resource[1]);
info->fix.smem_start = pdev->resource[0].start;
- info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ info->fix.smem_len = resource_size(&pdev->resource[0]);
printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
default_par->regs, info->fix.smem_len / 1024, info->screen_base);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 5bb653db0cec..2d285cc384cf 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1053,7 +1053,7 @@ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
}
/* Fake monspecs to fill in fbinfo structure */
-static struct fb_monspecs monspecs = {
+static const struct fb_monspecs monspecs = {
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 50,
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 4ea6f932b334..8a27d12e6ea8 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1572,7 +1572,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &sh_mobile_lcdc_overlay_ops;
info->device = priv->dev;
- info->screen_base = ovl->fb_mem;
+ info->screen_buffer = ovl->fb_mem;
info->par = ovl;
/* Initialize fixed screen information. Restrict pan to 2 lines steps
@@ -2056,7 +2056,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &sh_mobile_lcdc_ops;
info->device = priv->dev;
- info->screen_base = ch->fb_mem;
+ info->screen_buffer = ch->fb_mem;
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 142535267fec..12fa1050f3eb 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -89,7 +89,7 @@ struct ssd1307fb_par {
struct ssd1307fb_array {
u8 type;
- u8 data[0];
+ u8 data[];
};
static const struct fb_fix_screeninfo ssd1307fb_fix = {
@@ -791,6 +791,8 @@ static int ssd1307fb_remove(struct i2c_client *client)
pwm_disable(par->pwm);
pwm_put(par->pwm);
}
+ if (par->vbat_reg)
+ regulator_disable(par->vbat_reg);
fb_deferred_io_cleanup(info);
__free_pages(__va(info->fix.smem_start), get_order(info->fix.smem_len));
framebuffer_release(info);
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index ad26cbffbc6f..2d6e2738b792 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -61,9 +61,9 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
#define BITS_PER_PIXEL 16
/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
-static void *remapped_base;
-static void *remapped_regs;
-static void *remapped_fbuf;
+static void __iomem *remapped_base;
+static void __iomem *remapped_regs;
+static void __iomem *remapped_fbuf;
#define REMAPPED_FB_LEN 0x15ffff
@@ -635,7 +635,7 @@ static int w100fb_resume(struct platform_device *dev)
#endif
-int w100fb_probe(struct platform_device *pdev)
+static int w100fb_probe(struct platform_device *pdev)
{
int err = -EIO;
struct w100fb_mach_info *inf;
@@ -807,10 +807,11 @@ static int w100fb_remove(struct platform_device *pdev)
static void w100_soft_reset(void)
{
- u16 val = readw((u16 *) remapped_base + cfgSTATUS);
- writew(val | 0x08, (u16 *) remapped_base + cfgSTATUS);
+ u16 val = readw((u16 __iomem *)remapped_base + cfgSTATUS);
+
+ writew(val | 0x08, (u16 __iomem *)remapped_base + cfgSTATUS);
udelay(100);
- writew(0x00, (u16 *) remapped_base + cfgSTATUS);
+ writew(0x00, (u16 __iomem *)remapped_base + cfgSTATUS);
udelay(100);
}
@@ -1022,7 +1023,8 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
return pll_entry->pll_table;
pll_entry++;
} while (pll_entry->xtal_freq);
- return 0;
+
+ return NULL;
}
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index b656eff58c23..8f4d674fa0d0 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -339,7 +339,7 @@ static int wm8505fb_probe(struct platform_device *pdev)
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
- fbi->fb.screen_base = fb_mem_virt;
+ fbi->fb.screen_buffer = fb_mem_virt;
fbi->fb.screen_size = fb_mem_len;
fbi->contrast = 0x10;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 9c82e2a0a411..856a8c4e84a2 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -53,18 +53,14 @@ static void hdmi_infoframe_set_checksum(void *buffer, size_t size)
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
- *
- * Returns 0 on success or a negative error code on failure.
*/
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = HDMI_AVI_INFOFRAME_SIZE;
-
- return 0;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
@@ -1553,7 +1549,6 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
const void *buffer, size_t size)
{
const u8 *ptr = buffer;
- int ret;
if (size < HDMI_INFOFRAME_SIZE(AVI))
return -EINVAL;
@@ -1566,9 +1561,7 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AVI)) != 0)
return -EINVAL;
- ret = hdmi_avi_infoframe_init(frame);
- if (ret)
- return ret;
+ hdmi_avi_infoframe_init(frame);
ptr += HDMI_INFOFRAME_HEADER_SIZE;
diff --git a/drivers/video/logo/.gitignore b/drivers/video/logo/.gitignore
index 9dda1b26b2e4..5311d207c0b9 100644
--- a/drivers/video/logo/.gitignore
+++ b/drivers/video/logo/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
*_mono.c
*_vga16.c
*_clut224.c
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index d823d558c0c4..b690a8a4bf9e 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1553,8 +1553,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
#ifdef CONFIG_COMPAT
case VBG_IOCTL_HGCM_CALL_32(0):
f32bit = true;
+ fallthrough;
#endif
- /* Fall through */
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 50920b6fc319..7396187ee32a 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -311,7 +311,7 @@ static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
switch (type) {
default:
WARN_ON(1);
- /* Fall through */
+ fallthrough;
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 078615cf2afc..69a32dfc318a 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -43,6 +43,19 @@ config VIRTIO_PCI_LEGACY
If unsure, say Y.
+config VIRTIO_VDPA
+ tristate "vDPA driver for virtio devices"
+ depends on VDPA
+ select VIRTIO
+ help
+ This driver provides support for virtio based paravirtual
+ device driver over vDPA bus. For this to be useful, you need
+ an appropriate vDPA device implementation that operates on a
+ physical device to allow the datapath of virtio to be
+ offloaded to hardware.
+
+ If unsure, say M.
+
config VIRTIO_PMEM
tristate "Support for virtio pmem driver"
depends on VIRTIO
@@ -58,6 +71,7 @@ config VIRTIO_BALLOON
tristate "Virtio balloon driver"
depends on VIRTIO
select MEMORY_BALLOON
+ select PAGE_REPORTING
---help---
This driver supports increasing and decreasing the amount
of memory within a KVM guest.
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 3a2b5c5dcf46..29a1386ecc03 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -6,3 +6,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
+obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 341458fd95ca..51086a5afdd4 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -14,11 +14,13 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>
+#include <linux/oom.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/pseudo_fs.h>
+#include <linux/page_reporting.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -27,7 +29,9 @@
*/
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
-#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
+/* Maximum number of (4k) pages to deflate on OOM notifications. */
+#define VIRTIO_BALLOON_OOM_NR_PAGES 256
+#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC)
@@ -47,6 +51,7 @@ enum virtio_balloon_vq {
VIRTIO_BALLOON_VQ_DEFLATE,
VIRTIO_BALLOON_VQ_STATS,
VIRTIO_BALLOON_VQ_FREE_PAGE,
+ VIRTIO_BALLOON_VQ_REPORTING,
VIRTIO_BALLOON_VQ_MAX
};
@@ -112,8 +117,15 @@ struct virtio_balloon {
/* Memory statistics */
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
- /* To register a shrinker to shrink memory upon memory pressure */
+ /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
struct shrinker shrinker;
+
+ /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
+ struct notifier_block oom_nb;
+
+ /* Free page reporting device */
+ struct virtqueue *reporting_vq;
+ struct page_reporting_dev_info pr_dev_info;
};
static struct virtio_device_id id_table[] = {
@@ -153,6 +165,33 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
}
+static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
+ struct scatterlist *sg, unsigned int nents)
+{
+ struct virtio_balloon *vb =
+ container_of(pr_dev_info, struct virtio_balloon, pr_dev_info);
+ struct virtqueue *vq = vb->reporting_vq;
+ unsigned int unused, err;
+
+ /* We should always be able to add these buffers to an empty queue. */
+ err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
+
+ /*
+ * In the extremely unlikely case that something has occurred and we
+ * are able to trigger an error we will simply display a warning
+ * and exit without actually processing the pages.
+ */
+ if (WARN_ON_ONCE(err))
+ return err;
+
+ virtqueue_kick(vq);
+
+ /* When host has read buffer, this completes via balloon_ack */
+ wait_event(vb->acked, virtqueue_get_buf(vq, &unused));
+
+ return 0;
+}
+
static void set_page_pfns(struct virtio_balloon *vb,
__virtio32 pfns[], struct page *page)
{
@@ -481,6 +520,7 @@ static int init_vqs(struct virtio_balloon *vb)
names[VIRTIO_BALLOON_VQ_STATS] = NULL;
callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+ names[VIRTIO_BALLOON_VQ_REPORTING] = NULL;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
names[VIRTIO_BALLOON_VQ_STATS] = "stats";
@@ -492,6 +532,11 @@ static int init_vqs(struct virtio_balloon *vb)
callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
}
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
+ names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq";
+ callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
+ }
+
err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
vqs, callbacks, names, NULL, NULL);
if (err)
@@ -524,6 +569,9 @@ static int init_vqs(struct virtio_balloon *vb)
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
+ vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING];
+
return 0;
}
@@ -532,7 +580,7 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
&vb->config_read_bitmap))
virtio_cread(vb->vdev, struct virtio_balloon_config,
- free_page_report_cmd_id,
+ free_page_hint_cmd_id,
&vb->cmd_id_received_cache);
return vb->cmd_id_received_cache;
@@ -788,50 +836,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
}
-static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
- unsigned long pages_to_free)
-{
- return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
- VIRTIO_BALLOON_PAGES_PER_PAGE;
-}
-
-static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
- unsigned long pages_to_free)
-{
- unsigned long pages_freed = 0;
-
- /*
- * One invocation of leak_balloon can deflate at most
- * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
- * multiple times to deflate pages till reaching pages_to_free.
- */
- while (vb->num_pages && pages_freed < pages_to_free)
- pages_freed += leak_balloon_pages(vb,
- pages_to_free - pages_freed);
-
- update_balloon_size(vb);
-
- return pages_freed;
-}
-
static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
- unsigned long pages_to_free, pages_freed = 0;
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- pages_to_free = sc->nr_to_scan;
-
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
- pages_freed = shrink_free_pages(vb, pages_to_free);
-
- if (pages_freed >= pages_to_free)
- return pages_freed;
-
- pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed);
-
- return pages_freed;
+ return shrink_free_pages(vb, sc->nr_to_scan);
}
static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
@@ -839,12 +850,22 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
{
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- unsigned long count;
- count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
- count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
+ return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
+}
+
+static int virtio_balloon_oom_notify(struct notifier_block *nb,
+ unsigned long dummy, void *parm)
+{
+ struct virtio_balloon *vb = container_of(nb,
+ struct virtio_balloon, oom_nb);
+ unsigned long *freed = parm;
+
+ *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
+ VIRTIO_BALLOON_PAGES_PER_PAGE;
+ update_balloon_size(vb);
- return count;
+ return NOTIFY_OK;
}
static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
@@ -864,7 +885,6 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
- __u32 poison_val;
int err;
if (!vdev->config->get) {
@@ -930,27 +950,65 @@ static int virtballoon_probe(struct virtio_device *vdev)
VIRTIO_BALLOON_CMD_ID_STOP);
spin_lock_init(&vb->free_page_list_lock);
INIT_LIST_HEAD(&vb->free_page_list);
- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
- memset(&poison_val, PAGE_POISON, sizeof(poison_val));
- virtio_cwrite(vb->vdev, struct virtio_balloon_config,
- poison_val, &poison_val);
- }
- }
- /*
- * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
- * shrinker needs to be registered to relieve memory pressure.
- */
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
+ /*
+ * We're allowed to reuse any free pages, even if they are
+ * still to be processed by the host.
+ */
err = virtio_balloon_register_shrinker(vb);
if (err)
goto out_del_balloon_wq;
}
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
+ vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
+ vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
+ err = register_oom_notifier(&vb->oom_nb);
+ if (err < 0)
+ goto out_unregister_shrinker;
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
+ /* Start with poison val of 0 representing general init */
+ __u32 poison_val = 0;
+
+ /*
+ * Let the hypervisor know that we are expecting a
+ * specific value to be written back in balloon pages.
+ */
+ if (!want_init_on_free())
+ memset(&poison_val, PAGE_POISON, sizeof(poison_val));
+
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config,
+ poison_val, &poison_val);
+ }
+
+ vb->pr_dev_info.report = virtballoon_free_page_report;
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
+ unsigned int capacity;
+
+ capacity = virtqueue_get_vring_size(vb->reporting_vq);
+ if (capacity < PAGE_REPORTING_CAPACITY) {
+ err = -ENOSPC;
+ goto out_unregister_oom;
+ }
+
+ err = page_reporting_register(&vb->pr_dev_info);
+ if (err)
+ goto out_unregister_oom;
+ }
+
virtio_device_ready(vdev);
if (towards_target(vb))
virtballoon_changed(vdev);
return 0;
+out_unregister_oom:
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ unregister_oom_notifier(&vb->oom_nb);
+out_unregister_shrinker:
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ virtio_balloon_unregister_shrinker(vb);
out_del_balloon_wq:
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
destroy_workqueue(vb->balloon_wq);
@@ -989,7 +1047,11 @@ static void virtballoon_remove(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
+ page_reporting_unregister(&vb->pr_dev_info);
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ unregister_oom_notifier(&vb->oom_nb);
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
virtio_balloon_unregister_shrinker(vb);
spin_lock_irq(&vb->stop_update_lock);
vb->stop_update = true;
@@ -1045,7 +1107,10 @@ static int virtballoon_restore(struct virtio_device *vdev)
static int virtballoon_validate(struct virtio_device *vdev)
{
- if (!page_poisoning_enabled())
+ /* Tell the host whether we care about poisoned pages. */
+ if (!want_init_on_free() &&
+ (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) ||
+ !page_poisoning_enabled()))
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
@@ -1058,6 +1123,7 @@ static unsigned int features[] = {
VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
VIRTIO_BALLOON_F_FREE_PAGE_HINT,
VIRTIO_BALLOON_F_PAGE_POISON,
+ VIRTIO_BALLOON_F_REPORTING,
};
static struct virtio_driver virtio_balloon_driver = {
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 5ae529671b3d..efaf65b0f42d 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -3,6 +3,7 @@
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/input.h>
+#include <linux/slab.h>
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_input.h>
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
new file mode 100644
index 000000000000..c30eb55030be
--- /dev/null
+++ b/drivers/virtio/virtio_vdpa.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VIRTIO based driver for vDPA device
+ *
+ * Copyright (c) 2020, Red Hat. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/virtio.h>
+#include <linux/vdpa.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+
+#define MOD_VERSION "0.1"
+#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define MOD_DESC "vDPA bus driver for virtio devices"
+#define MOD_LICENSE "GPL v2"
+
+struct virtio_vdpa_device {
+ struct virtio_device vdev;
+ struct vdpa_device *vdpa;
+ u64 features;
+
+ /* The lock to protect virtqueue list */
+ spinlock_t lock;
+ /* List of virtio_vdpa_vq_info */
+ struct list_head virtqueues;
+};
+
+struct virtio_vdpa_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
+ /* the list node for the virtqueues list */
+ struct list_head node;
+};
+
+static inline struct virtio_vdpa_device *
+to_virtio_vdpa_device(struct virtio_device *dev)
+{
+ return container_of(dev, struct virtio_vdpa_device, vdev);
+}
+
+static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
+{
+ return to_virtio_vdpa_device(vdev)->vdpa;
+}
+
+static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
+ void *buf, unsigned len)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->get_config(vdpa, offset, buf, len);
+}
+
+static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
+ const void *buf, unsigned len)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->set_config(vdpa, offset, buf, len);
+}
+
+static u32 virtio_vdpa_generation(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_generation)
+ return ops->get_generation(vdpa);
+
+ return 0;
+}
+
+static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_status(vdpa);
+}
+
+static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->set_status(vdpa, status);
+}
+
+static void virtio_vdpa_reset(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->set_status(vdpa, 0);
+}
+
+static bool virtio_vdpa_notify(struct virtqueue *vq)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->kick_vq(vdpa, vq->index);
+
+ return true;
+}
+
+static irqreturn_t virtio_vdpa_config_cb(void *private)
+{
+ struct virtio_vdpa_device *vd_dev = private;
+
+ virtio_config_changed(&vd_dev->vdev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
+{
+ struct virtio_vdpa_vq_info *info = private;
+
+ return vring_interrupt(0, info->vq);
+}
+
+static struct virtqueue *
+virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name, bool ctx)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct virtio_vdpa_vq_info *info;
+ struct vdpa_callback cb;
+ struct virtqueue *vq;
+ u64 desc_addr, driver_addr, device_addr;
+ unsigned long flags;
+ u32 align, num;
+ int err;
+
+ if (!name)
+ return NULL;
+
+ /* Queue shouldn't already be set up. */
+ if (ops->get_vq_ready(vdpa, index))
+ return ERR_PTR(-ENOENT);
+
+ /* Allocate and fill out our active queue description */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ num = ops->get_vq_num_max(vdpa);
+ if (num == 0) {
+ err = -ENOENT;
+ goto error_new_virtqueue;
+ }
+
+ /* Create the vring */
+ align = ops->get_vq_align(vdpa);
+ vq = vring_create_virtqueue(index, num, align, vdev,
+ true, true, ctx,
+ virtio_vdpa_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_new_virtqueue;
+ }
+
+ /* Setup virtqueue callback */
+ cb.callback = virtio_vdpa_virtqueue_cb;
+ cb.private = info;
+ ops->set_vq_cb(vdpa, index, &cb);
+ ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
+
+ desc_addr = virtqueue_get_desc_addr(vq);
+ driver_addr = virtqueue_get_avail_addr(vq);
+ device_addr = virtqueue_get_used_addr(vq);
+
+ if (ops->set_vq_address(vdpa, index,
+ desc_addr, driver_addr,
+ device_addr)) {
+ err = -EINVAL;
+ goto err_vq;
+ }
+
+ ops->set_vq_ready(vdpa, index, 1);
+
+ vq->priv = info;
+ info->vq = vq;
+
+ spin_lock_irqsave(&vd_dev->lock, flags);
+ list_add(&info->node, &vd_dev->virtqueues);
+ spin_unlock_irqrestore(&vd_dev->lock, flags);
+
+ return vq;
+
+err_vq:
+ vring_del_virtqueue(vq);
+error_new_virtqueue:
+ ops->set_vq_ready(vdpa, index, 0);
+ /* VDPA driver should make sure vq is stopeed here */
+ WARN_ON(ops->get_vq_ready(vdpa, index));
+ kfree(info);
+ return ERR_PTR(err);
+}
+
+static void virtio_vdpa_del_vq(struct virtqueue *vq)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
+ struct vdpa_device *vdpa = vd_dev->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct virtio_vdpa_vq_info *info = vq->priv;
+ unsigned int index = vq->index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vd_dev->lock, flags);
+
+ /* Select and deactivate the queue */
+ ops->set_vq_ready(vdpa, index, 0);
+ WARN_ON(ops->get_vq_ready(vdpa, index));
+
+ vring_del_virtqueue(vq);
+
+ kfree(info);
+}
+
+static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ virtio_vdpa_del_vq(vq);
+}
+
+static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[],
+ const bool *ctx,
+ struct irq_affinity *desc)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_callback cb;
+ int i, err, queue_idx = 0;
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
+ callbacks[i], names[i], ctx ?
+ ctx[i] : false);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto err_setup_vq;
+ }
+ }
+
+ cb.callback = virtio_vdpa_config_cb;
+ cb.private = vd_dev;
+ ops->set_config_cb(vdpa, &cb);
+
+ return 0;
+
+err_setup_vq:
+ virtio_vdpa_del_vqs(vdev);
+ return err;
+}
+
+static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_features(vdpa);
+}
+
+static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ return ops->set_features(vdpa, vdev->features);
+}
+
+static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
+ struct vdpa_device *vdpa = vd_dev->vdpa;
+
+ return dev_name(&vdpa->dev);
+}
+
+static const struct virtio_config_ops virtio_vdpa_config_ops = {
+ .get = virtio_vdpa_get,
+ .set = virtio_vdpa_set,
+ .generation = virtio_vdpa_generation,
+ .get_status = virtio_vdpa_get_status,
+ .set_status = virtio_vdpa_set_status,
+ .reset = virtio_vdpa_reset,
+ .find_vqs = virtio_vdpa_find_vqs,
+ .del_vqs = virtio_vdpa_del_vqs,
+ .get_features = virtio_vdpa_get_features,
+ .finalize_features = virtio_vdpa_finalize_features,
+ .bus_name = virtio_vdpa_bus_name,
+};
+
+static void virtio_vdpa_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_vdpa_device *vd_dev =
+ container_of(vdev, struct virtio_vdpa_device, vdev);
+
+ kfree(vd_dev);
+}
+
+static int virtio_vdpa_probe(struct vdpa_device *vdpa)
+{
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct virtio_vdpa_device *vd_dev, *reg_dev = NULL;
+ int ret = -EINVAL;
+
+ vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL);
+ if (!vd_dev)
+ return -ENOMEM;
+
+ vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
+ vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
+ vd_dev->vdev.config = &virtio_vdpa_config_ops;
+ vd_dev->vdpa = vdpa;
+ INIT_LIST_HEAD(&vd_dev->virtqueues);
+ spin_lock_init(&vd_dev->lock);
+
+ vd_dev->vdev.id.device = ops->get_device_id(vdpa);
+ if (vd_dev->vdev.id.device == 0)
+ goto err;
+
+ vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa);
+ ret = register_virtio_device(&vd_dev->vdev);
+ reg_dev = vd_dev;
+ if (ret)
+ goto err;
+
+ vdpa_set_drvdata(vdpa, vd_dev);
+
+ return 0;
+
+err:
+ if (reg_dev)
+ put_device(&vd_dev->vdev.dev);
+ else
+ kfree(vd_dev);
+ return ret;
+}
+
+static void virtio_vdpa_remove(struct vdpa_device *vdpa)
+{
+ struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa);
+
+ unregister_virtio_device(&vd_dev->vdev);
+}
+
+static struct vdpa_driver virtio_vdpa_driver = {
+ .driver = {
+ .name = "virtio_vdpa",
+ },
+ .probe = virtio_vdpa_probe,
+ .remove = virtio_vdpa_remove,
+};
+
+module_vdpa_driver(virtio_vdpa_driver);
+
+MODULE_VERSION(MOD_VERSION);
+MODULE_LICENSE(MOD_LICENSE);
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_DESCRIPTION(MOD_DESC);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9ea2b43d4b01..0663c604bd64 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -584,6 +584,14 @@ config DAVINCI_WATCHDOG
NOTE: once enabled, this timer cannot be disabled.
Say N if you are unsure.
+config K3_RTI_WATCHDOG
+ tristate "Texas Instruments K3 RTI watchdog"
+ depends on ARCH_K3 || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here if you want to include support for the K3 watchdog
+ timer (RTI module) available in the K3 generation of processors.
+
config ORION_WATCHDOG
tristate "Orion watchdog"
depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110)
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 2ee352bf3372..6de2e4ceef19 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
+obj-$(CONFIG_K3_RTI_WATCHDOG) += rti_wdt.o
obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index f8d58bf0bf66..1fe472f56cb3 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -244,6 +244,11 @@ static const struct regmap_config imx2_wdt_regmap_config = {
.max_register = 0x8,
};
+static void imx2_wdt_action(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -292,6 +297,10 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, imx2_wdt_action, wdev->clk);
+ if (ret)
+ return ret;
+
regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
@@ -315,32 +324,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
*/
regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
- ret = watchdog_register_device(wdog);
- if (ret)
- goto disable_clk;
-
- dev_info(dev, "timeout %d sec (nowayout=%d)\n",
- wdog->timeout, nowayout);
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(wdev->clk);
- return ret;
-}
-
-static int __exit imx2_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdog = platform_get_drvdata(pdev);
- struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
-
- watchdog_unregister_device(wdog);
-
- if (imx2_wdt_is_running(wdev)) {
- imx2_wdt_ping(wdog);
- dev_crit(&pdev->dev, "Device removed: Expect reboot!\n");
- }
- return 0;
+ return devm_watchdog_register_device(dev, wdog);
}
static void imx2_wdt_shutdown(struct platform_device *pdev)
@@ -417,7 +401,6 @@ static const struct of_device_id imx2_wdt_dt_ids[] = {
MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
static struct platform_driver imx2_wdt_driver = {
- .remove = __exit_p(imx2_wdt_remove),
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 11b9e7c6b7f5..7993c8c41b3a 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -4,7 +4,6 @@
*/
#include <linux/clk.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 8ed89f032ebf..60a32469f7de 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -6,13 +6,11 @@
#include <linux/arm-smccc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/io.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/reboot.h>
#include <linux/watchdog.h>
#define DEFAULT_TIMEOUT 60
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
index 9c773c3d6d5d..765577f11c8d 100644
--- a/drivers/watchdog/npcm_wdt.c
+++ b/drivers/watchdog/npcm_wdt.c
@@ -103,30 +103,29 @@ static int npcm_wdt_stop(struct watchdog_device *wdd)
return 0;
}
-
static int npcm_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
if (timeout < 2)
wdd->timeout = 1;
else if (timeout < 3)
- wdd->timeout = 2;
+ wdd->timeout = 2;
else if (timeout < 6)
- wdd->timeout = 5;
+ wdd->timeout = 5;
else if (timeout < 11)
- wdd->timeout = 10;
+ wdd->timeout = 10;
else if (timeout < 22)
- wdd->timeout = 21;
+ wdd->timeout = 21;
else if (timeout < 44)
- wdd->timeout = 43;
+ wdd->timeout = 43;
else if (timeout < 87)
- wdd->timeout = 86;
+ wdd->timeout = 86;
else if (timeout < 173)
- wdd->timeout = 172;
+ wdd->timeout = 172;
else if (timeout < 688)
- wdd->timeout = 687;
+ wdd->timeout = 687;
else
- wdd->timeout = 2750;
+ wdd->timeout = 2750;
if (watchdog_active(wdd))
npcm_wdt_start(wdd);
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 8e6dfe76f9c9..4ddb4ea2e4a3 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -52,7 +52,7 @@
#define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT)
static bool nowayout = WATCHDOG_NOWAYOUT;
-static int heartbeat = -1; /* module parameter (seconds) */
+static int heartbeat; /* module parameter (seconds) */
struct orion_watchdog;
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 1213179f863c..0937b8d33104 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -192,6 +192,7 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->wdev.timeout = PM8916_WDT_DEFAULT_TIMEOUT;
wdt->wdev.pretimeout = 0;
watchdog_set_drvdata(&wdt->wdev, wdt);
+ platform_set_drvdata(pdev, wdt);
watchdog_init_timeout(&wdt->wdev, 0, dev);
pm8916_wdt_configure_timers(&wdt->wdev);
@@ -199,6 +200,29 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, &wdt->wdev);
}
+static int __maybe_unused pm8916_wdt_suspend(struct device *dev)
+{
+ struct pm8916_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdev))
+ return pm8916_wdt_stop(&wdt->wdev);
+
+ return 0;
+}
+
+static int __maybe_unused pm8916_wdt_resume(struct device *dev)
+{
+ struct pm8916_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdev))
+ return pm8916_wdt_start(&wdt->wdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pm8916_wdt_pm_ops, pm8916_wdt_suspend,
+ pm8916_wdt_resume);
+
static const struct of_device_id pm8916_wdt_id_table[] = {
{ .compatible = "qcom,pm8916-wdt" },
{ }
@@ -210,6 +234,7 @@ static struct platform_driver pm8916_wdt_driver = {
.driver = {
.name = "pm8916-wdt",
.of_match_table = of_match_ptr(pm8916_wdt_id_table),
+ .pm = &pm8916_wdt_pm_ops,
},
};
module_platform_driver(pm8916_wdt_driver);
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index eb47fe5ed280..ab7465d186fd 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -40,6 +40,11 @@ static const u32 reg_offset_data_kpss[] = {
[WDT_BITE_TIME] = 0x14,
};
+struct qcom_wdt_match_data {
+ const u32 *offset;
+ bool pretimeout;
+};
+
struct qcom_wdt {
struct watchdog_device wdd;
unsigned long rate;
@@ -179,19 +184,29 @@ static void qcom_clk_disable_unprepare(void *data)
clk_disable_unprepare(data);
}
+static const struct qcom_wdt_match_data match_data_apcs_tmr = {
+ .offset = reg_offset_data_apcs_tmr,
+ .pretimeout = false,
+};
+
+static const struct qcom_wdt_match_data match_data_kpss = {
+ .offset = reg_offset_data_kpss,
+ .pretimeout = true,
+};
+
static int qcom_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_wdt *wdt;
struct resource *res;
struct device_node *np = dev->of_node;
- const u32 *regs;
+ const struct qcom_wdt_match_data *data;
u32 percpu_offset;
int irq, ret;
struct clk *clk;
- regs = of_device_get_match_data(dev);
- if (!regs) {
+ data = of_device_get_match_data(dev);
+ if (!data) {
dev_err(dev, "Unsupported QCOM WDT module\n");
return -ENODEV;
}
@@ -247,9 +262,8 @@ static int qcom_wdt_probe(struct platform_device *pdev)
/* check if there is pretimeout support */
irq = platform_get_irq_optional(pdev, 0);
- if (irq > 0) {
- ret = devm_request_irq(dev, irq, qcom_wdt_isr,
- IRQF_TRIGGER_RISING,
+ if (data->pretimeout && irq > 0) {
+ ret = devm_request_irq(dev, irq, qcom_wdt_isr, 0,
"wdt_bark", &wdt->wdd);
if (ret)
return ret;
@@ -267,7 +281,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
wdt->wdd.parent = dev;
- wdt->layout = regs;
+ wdt->layout = data->offset;
if (readl(wdt_addr(wdt, WDT_STS)) & 1)
wdt->wdd.bootstatus = WDIOF_CARDRESET;
@@ -311,9 +325,9 @@ static int __maybe_unused qcom_wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qcom_wdt_pm_ops, qcom_wdt_suspend, qcom_wdt_resume);
static const struct of_device_id qcom_wdt_of_table[] = {
- { .compatible = "qcom,kpss-timer", .data = reg_offset_data_apcs_tmr },
- { .compatible = "qcom,scss-timer", .data = reg_offset_data_apcs_tmr },
- { .compatible = "qcom,kpss-wdt", .data = reg_offset_data_kpss },
+ { .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr },
+ { .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr },
+ { .compatible = "qcom,kpss-wdt", .data = &match_data_kpss },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
new file mode 100644
index 000000000000..d456dd72d99a
--- /dev/null
+++ b/drivers/watchdog/rti_wdt.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Watchdog driver for the K3 RTI module
+ *
+ * (c) Copyright 2019-2020 Texas Instruments Inc.
+ * All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_HEARTBEAT 60
+
+/* Max heartbeat is calculated at 32kHz source clock */
+#define MAX_HEARTBEAT 1000
+
+/* Timer register set definition */
+#define RTIDWDCTRL 0x90
+#define RTIDWDPRLD 0x94
+#define RTIWDSTATUS 0x98
+#define RTIWDKEY 0x9c
+#define RTIDWDCNTR 0xa0
+#define RTIWWDRXCTRL 0xa4
+#define RTIWWDSIZECTRL 0xa8
+
+#define RTIWWDRX_NMI 0xa
+
+#define RTIWWDSIZE_50P 0x50
+
+#define WDENABLE_KEY 0xa98559da
+
+#define WDKEY_SEQ0 0xe51a
+#define WDKEY_SEQ1 0xa35c
+
+#define WDT_PRELOAD_SHIFT 13
+
+#define WDT_PRELOAD_MAX 0xfff
+
+#define DWDST BIT(1)
+
+static int heartbeat;
+
+/*
+ * struct to hold data for each WDT device
+ * @base - base io address of WD device
+ * @freq - source clock frequency of WDT
+ * @wdd - hold watchdog device as is in WDT core
+ */
+struct rti_wdt_device {
+ void __iomem *base;
+ unsigned long freq;
+ struct watchdog_device wdd;
+};
+
+static int rti_wdt_start(struct watchdog_device *wdd)
+{
+ u32 timer_margin;
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+ /* set timeout period */
+ timer_margin = (u64)wdd->timeout * wdt->freq;
+ timer_margin >>= WDT_PRELOAD_SHIFT;
+ if (timer_margin > WDT_PRELOAD_MAX)
+ timer_margin = WDT_PRELOAD_MAX;
+ writel_relaxed(timer_margin, wdt->base + RTIDWDPRLD);
+
+ /*
+ * RTI only supports a windowed mode, where the watchdog can only
+ * be petted during the open window; not too early or not too late.
+ * The HW configuration options only allow for the open window size
+ * to be 50% or less than that; we obviouly want to configure the open
+ * window as large as possible so we select the 50% option. To avoid
+ * any glitches, we accommodate 5% safety margin also, so we setup
+ * the min_hw_hearbeat at 55% of the timeout period.
+ */
+ wdd->min_hw_heartbeat_ms = 11 * wdd->timeout * 1000 / 20;
+
+ /* Generate NMI when wdt expires */
+ writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
+
+ /* Open window size 50%; this is the largest window size available */
+ writel_relaxed(RTIWWDSIZE_50P, wdt->base + RTIWWDSIZECTRL);
+
+ readl_relaxed(wdt->base + RTIWWDSIZECTRL);
+
+ /* enable watchdog */
+ writel_relaxed(WDENABLE_KEY, wdt->base + RTIDWDCTRL);
+ return 0;
+}
+
+static int rti_wdt_ping(struct watchdog_device *wdd)
+{
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+ /* put watchdog in service state */
+ writel_relaxed(WDKEY_SEQ0, wdt->base + RTIWDKEY);
+ /* put watchdog in active state */
+ writel_relaxed(WDKEY_SEQ1, wdt->base + RTIWDKEY);
+
+ return 0;
+}
+
+static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ u64 timer_counter;
+ u32 val;
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+ /* if timeout has occurred then return 0 */
+ val = readl_relaxed(wdt->base + RTIWDSTATUS);
+ if (val & DWDST)
+ return 0;
+
+ timer_counter = readl_relaxed(wdt->base + RTIDWDCNTR);
+
+ do_div(timer_counter, wdt->freq);
+
+ return timer_counter;
+}
+
+static const struct watchdog_info rti_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "K3 RTI Watchdog",
+};
+
+static const struct watchdog_ops rti_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rti_wdt_start,
+ .ping = rti_wdt_ping,
+ .get_timeleft = rti_wdt_get_timeleft,
+};
+
+static int rti_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *wdt_mem;
+ struct watchdog_device *wdd;
+ struct rti_wdt_device *wdt;
+ struct clk *clk;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ clk = clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ wdt->freq = clk_get_rate(clk);
+
+ clk_put(clk);
+
+ if (!wdt->freq) {
+ dev_err(dev, "Failed to get fck rate.\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "runtime pm failed\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+
+ wdd = &wdt->wdd;
+ wdd->info = &rti_wdt_info;
+ wdd->ops = &rti_wdt_ops;
+ wdd->min_timeout = 1;
+ wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) /
+ wdt->freq * 1000;
+ wdd->timeout = DEFAULT_HEARTBEAT;
+ wdd->parent = dev;
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
+ watchdog_set_drvdata(wdd, wdt);
+ watchdog_set_nowayout(wdd, 1);
+ watchdog_set_restart_priority(wdd, 128);
+
+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(wdt->base)) {
+ ret = PTR_ERR(wdt->base);
+ goto err_iomap;
+ }
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(dev, "cannot register watchdog device\n");
+ goto err_iomap;
+ }
+
+ return 0;
+
+err_iomap:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static int rti_wdt_remove(struct platform_device *pdev)
+{
+ struct rti_wdt_device *wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&wdt->wdd);
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rti_wdt_of_match[] = {
+ { .compatible = "ti,j7-rti-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rti_wdt_of_match);
+
+static struct platform_driver rti_wdt_driver = {
+ .driver = {
+ .name = "rti-wdt",
+ .of_match_table = rti_wdt_of_match,
+ },
+ .probe = rti_wdt_probe,
+ .remove = rti_wdt_remove,
+};
+
+module_platform_driver(rti_wdt_driver);
+
+MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");
+MODULE_DESCRIPTION("K3 RTI Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rti-wdt");
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 53e04926a7b2..190d26e2e75f 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd)
{
struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+ writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
writel_relaxed(0, wdt->base + WDTCONTROL);
writel_relaxed(0, wdt->base + WDTLOAD);
writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
+ /* Flush posted writes. */
+ readl_relaxed(wdt->base + WDTLOCK);
+
return 0;
}
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 861daf4f37b2..423844757812 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -39,6 +39,10 @@
static DEFINE_IDA(watchdog_ida);
+static int stop_on_reboot = -1;
+module_param(stop_on_reboot, int, 0444);
+MODULE_PARM_DESC(stop_on_reboot, "Stop watchdogs on reboot (0=keep watching, 1=stop)");
+
/*
* Deferred Registration infrastructure.
*
@@ -254,6 +258,14 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
}
+ /* Module parameter to force watchdog policy on reboot. */
+ if (stop_on_reboot != -1) {
+ if (stop_on_reboot)
+ set_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
+ else
+ clear_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
+ }
+
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8b5c742f24e8..7e4cd34a8c20 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -282,6 +282,7 @@ static int watchdog_start(struct watchdog_device *wdd)
if (err == 0) {
set_bit(WDOG_ACTIVE, &wdd->status);
wd_data->last_keepalive = started_at;
+ wd_data->last_hw_keepalive = started_at;
watchdog_update_worker(wdd);
}
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 030ce240620d..d96ad8f38bd2 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
-#include <linux/gpio.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -29,7 +28,6 @@ struct wm831x_wdt_drvdata {
struct watchdog_device wdt;
struct wm831x *wm831x;
struct mutex lock;
- int update_gpio;
int update_state;
};
@@ -103,14 +101,6 @@ static int wm831x_wdt_ping(struct watchdog_device *wdt_dev)
mutex_lock(&driver_data->lock);
- if (driver_data->update_gpio) {
- gpio_set_value_cansleep(driver_data->update_gpio,
- driver_data->update_state);
- driver_data->update_state = !driver_data->update_state;
- ret = 0;
- goto out;
- }
-
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (!(reg & WM831X_WDOG_RST_SRC)) {
@@ -239,23 +229,6 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT;
reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT;
- if (pdata->update_gpio) {
- ret = devm_gpio_request_one(dev, pdata->update_gpio,
- GPIOF_OUT_INIT_LOW,
- "Watchdog update");
- if (ret < 0) {
- dev_err(wm831x->dev,
- "Failed to request update GPIO: %d\n",
- ret);
- return ret;
- }
-
- driver_data->update_gpio = pdata->update_gpio;
-
- /* Make sure the watchdog takes hardware updates */
- reg |= WM831X_WDOG_RST_SRC;
- }
-
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg);
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index 4a363a8b2d20..cab86a08456b 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -422,7 +422,7 @@ static int ziirave_firm_upload(struct watchdog_device *wdd,
static const struct watchdog_info ziirave_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
- .identity = "Zodiac RAVE Watchdog",
+ .identity = "RAVE Switch Watchdog",
};
static const struct watchdog_ops ziirave_wdt_ops = {
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index f192b6f42da9..ec975decb5de 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -94,7 +94,7 @@ static int setup_cpu_watcher(struct notifier_block *notifier,
for_each_possible_cpu(cpu) {
if (vcpu_online(cpu) == 0) {
- (void)cpu_down(cpu);
+ device_offline(get_cpu_device(cpu));
set_cpu_present(cpu, false);
}
}
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 8edef51c92e5..64df919a2111 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -53,37 +53,37 @@ static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
}
-static void evtchn_2l_clear_pending(unsigned port)
+static void evtchn_2l_clear_pending(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
sync_clear_bit(port, BM(&s->evtchn_pending[0]));
}
-static void evtchn_2l_set_pending(unsigned port)
+static void evtchn_2l_set_pending(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
sync_set_bit(port, BM(&s->evtchn_pending[0]));
}
-static bool evtchn_2l_is_pending(unsigned port)
+static bool evtchn_2l_is_pending(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
}
-static bool evtchn_2l_test_and_set_mask(unsigned port)
+static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
}
-static void evtchn_2l_mask(unsigned port)
+static void evtchn_2l_mask(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
sync_set_bit(port, BM(&s->evtchn_mask[0]));
}
-static void evtchn_2l_unmask(unsigned port)
+static void evtchn_2l_unmask(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
unsigned int cpu = get_cpu();
@@ -173,7 +173,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
/* Timer interrupt has highest priority. */
irq = irq_from_virq(cpu, VIRQ_TIMER);
if (irq != -1) {
- unsigned int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
word_idx = evtchn / BITS_PER_LONG;
bit_idx = evtchn % BITS_PER_LONG;
if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
@@ -228,7 +228,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
do {
xen_ulong_t bits;
- int port;
+ evtchn_port_t port;
bits = MASK_LSBS(pending_bits, bit_idx);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 499eff7d3f65..3a791c8485d0 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -116,7 +116,7 @@ static void clear_evtchn_to_irq_all(void)
}
}
-static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
+static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
{
unsigned row;
unsigned col;
@@ -143,7 +143,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
return 0;
}
-int get_evtchn_to_irq(unsigned evtchn)
+int get_evtchn_to_irq(evtchn_port_t evtchn)
{
if (evtchn >= xen_evtchn_max_channels())
return -1;
@@ -162,7 +162,7 @@ struct irq_info *info_for_irq(unsigned irq)
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
enum xen_irq_type type,
- unsigned evtchn,
+ evtchn_port_t evtchn,
unsigned short cpu)
{
int ret;
@@ -184,7 +184,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
}
static int xen_irq_info_evtchn_setup(unsigned irq,
- unsigned evtchn)
+ evtchn_port_t evtchn)
{
struct irq_info *info = info_for_irq(irq);
@@ -193,7 +193,7 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
static int xen_irq_info_ipi_setup(unsigned cpu,
unsigned irq,
- unsigned evtchn,
+ evtchn_port_t evtchn,
enum ipi_vector ipi)
{
struct irq_info *info = info_for_irq(irq);
@@ -207,7 +207,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
static int xen_irq_info_virq_setup(unsigned cpu,
unsigned irq,
- unsigned evtchn,
+ evtchn_port_t evtchn,
unsigned virq)
{
struct irq_info *info = info_for_irq(irq);
@@ -220,7 +220,7 @@ static int xen_irq_info_virq_setup(unsigned cpu,
}
static int xen_irq_info_pirq_setup(unsigned irq,
- unsigned evtchn,
+ evtchn_port_t evtchn,
unsigned pirq,
unsigned gsi,
uint16_t domid,
@@ -245,7 +245,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
/*
* Accessors for packed IRQ information.
*/
-unsigned int evtchn_from_irq(unsigned irq)
+evtchn_port_t evtchn_from_irq(unsigned irq)
{
if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
return 0;
@@ -253,7 +253,7 @@ unsigned int evtchn_from_irq(unsigned irq)
return info_for_irq(irq)->evtchn;
}
-unsigned irq_from_evtchn(unsigned int evtchn)
+unsigned int irq_from_evtchn(evtchn_port_t evtchn)
{
return get_evtchn_to_irq(evtchn);
}
@@ -304,7 +304,7 @@ unsigned cpu_from_irq(unsigned irq)
return info_for_irq(irq)->cpu;
}
-unsigned int cpu_from_evtchn(unsigned int evtchn)
+unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
{
int irq = get_evtchn_to_irq(evtchn);
unsigned ret = 0;
@@ -330,9 +330,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
{
- int irq = get_evtchn_to_irq(chn);
+ int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1);
@@ -354,7 +354,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
*/
void notify_remote_via_irq(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
notify_remote_via_evtchn(evtchn);
@@ -445,7 +445,7 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq);
}
-static void xen_evtchn_close(unsigned int port)
+static void xen_evtchn_close(evtchn_port_t port)
{
struct evtchn_close close;
@@ -472,7 +472,7 @@ static void pirq_query_unmask(int irq)
static void eoi_pirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
@@ -508,7 +508,7 @@ static unsigned int __startup_pirq(unsigned int irq)
{
struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq);
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
int rc;
BUG_ON(info->type != IRQT_PIRQ);
@@ -561,7 +561,7 @@ static void shutdown_pirq(struct irq_data *data)
{
unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq);
- unsigned evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
BUG_ON(info->type != IRQT_PIRQ);
@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(unsigned int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
struct irq_info *info = irq_get_handler_data(irq);
if (info->refcnt > 0) {
@@ -827,7 +827,7 @@ int xen_pirq_from_irq(unsigned irq)
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
-int bind_evtchn_to_irq(unsigned int evtchn)
+int bind_evtchn_to_irq(evtchn_port_t evtchn)
{
int irq;
int ret;
@@ -870,8 +870,8 @@ EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
- int evtchn, irq;
- int ret;
+ evtchn_port_t evtchn;
+ int ret, irq;
mutex_lock(&irq_mapping_update_lock);
@@ -909,7 +909,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
}
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- unsigned int remote_port)
+ evtchn_port_t remote_port)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -924,10 +924,11 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
-static int find_virq(unsigned int virq, unsigned int cpu)
+static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
{
struct evtchn_status status;
- int port, rc = -ENOENT;
+ evtchn_port_t port;
+ int rc = -ENOENT;
memset(&status, 0, sizeof(status));
for (port = 0; port < xen_evtchn_max_channels(); port++) {
@@ -939,7 +940,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
if (status.status != EVTCHNSTAT_virq)
continue;
if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
- rc = port;
+ *evtchn = port;
break;
}
}
@@ -962,7 +963,8 @@ EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{
struct evtchn_bind_virq bind_virq;
- int evtchn, irq, ret;
+ evtchn_port_t evtchn = 0;
+ int irq, ret;
mutex_lock(&irq_mapping_update_lock);
@@ -988,9 +990,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
evtchn = bind_virq.port;
else {
if (ret == -EEXIST)
- ret = find_virq(virq, cpu);
+ ret = find_virq(virq, cpu, &evtchn);
BUG_ON(ret < 0);
- evtchn = ret;
}
ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
@@ -1019,7 +1020,7 @@ static void unbind_from_irq(unsigned int irq)
mutex_unlock(&irq_mapping_update_lock);
}
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags,
const char *devname, void *dev_id)
@@ -1040,7 +1041,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- unsigned int remote_port,
+ evtchn_port_t remote_port,
irq_handler_t handler,
unsigned long irqflags,
const char *devname,
@@ -1132,7 +1133,7 @@ int xen_set_irq_priority(unsigned irq, unsigned priority)
}
EXPORT_SYMBOL_GPL(xen_set_irq_priority);
-int evtchn_make_refcounted(unsigned int evtchn)
+int evtchn_make_refcounted(evtchn_port_t evtchn)
{
int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info;
@@ -1153,7 +1154,7 @@ int evtchn_make_refcounted(unsigned int evtchn)
}
EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
-int evtchn_get(unsigned int evtchn)
+int evtchn_get(evtchn_port_t evtchn)
{
int irq;
struct irq_info *info;
@@ -1186,7 +1187,7 @@ int evtchn_get(unsigned int evtchn)
}
EXPORT_SYMBOL_GPL(evtchn_get);
-void evtchn_put(unsigned int evtchn)
+void evtchn_put(evtchn_port_t evtchn)
{
int irq = get_evtchn_to_irq(evtchn);
if (WARN_ON(irq == -1))
@@ -1252,7 +1253,7 @@ void xen_hvm_evtchn_do_upcall(void)
EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */
-void rebind_evtchn_irq(int evtchn, int irq)
+void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
{
struct irq_info *info = info_for_irq(irq);
@@ -1284,7 +1285,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
+static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
int masked;
@@ -1342,7 +1343,7 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
static void enable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
@@ -1350,7 +1351,7 @@ static void enable_dynirq(struct irq_data *data)
static void disable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
mask_evtchn(evtchn);
@@ -1358,7 +1359,7 @@ static void disable_dynirq(struct irq_data *data)
static void ack_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (!VALID_EVTCHN(evtchn))
return;
@@ -1385,7 +1386,7 @@ static void mask_ack_dynirq(struct irq_data *data)
static int retrigger_dynirq(struct irq_data *data)
{
- unsigned int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
int masked;
if (!VALID_EVTCHN(evtchn))
@@ -1440,7 +1441,8 @@ static void restore_pirqs(void)
static void restore_cpu_virqs(unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
- int virq, irq, evtchn;
+ evtchn_port_t evtchn;
+ int virq, irq;
for (virq = 0; virq < NR_VIRQS; virq++) {
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
@@ -1465,7 +1467,8 @@ static void restore_cpu_virqs(unsigned int cpu)
static void restore_cpu_ipis(unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
- int ipi, irq, evtchn;
+ evtchn_port_t evtchn;
+ int ipi, irq;
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
@@ -1489,7 +1492,7 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn);
@@ -1497,7 +1500,7 @@ void xen_clear_irq_pending(int irq)
EXPORT_SYMBOL(xen_clear_irq_pending);
void xen_set_irq_pending(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
set_evtchn(evtchn);
@@ -1505,7 +1508,7 @@ void xen_set_irq_pending(int irq)
bool xen_test_irq_pending(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
bool ret = false;
if (VALID_EVTCHN(evtchn))
@@ -1667,7 +1670,7 @@ module_param(fifo_events, bool, 0);
void __init xen_init_IRQ(void)
{
int ret = -EINVAL;
- unsigned int evtchn;
+ evtchn_port_t evtchn;
if (fifo_events)
ret = xen_evtchn_fifo_init();
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 76b318e88382..c60ee0450173 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -82,7 +82,7 @@ static unsigned event_array_pages __read_mostly;
#endif
-static inline event_word_t *event_word_from_port(unsigned port)
+static inline event_word_t *event_word_from_port(evtchn_port_t port)
{
unsigned i = port / EVENT_WORDS_PER_PAGE;
@@ -140,7 +140,7 @@ static void init_array_page(event_word_t *array_page)
static int evtchn_fifo_setup(struct irq_info *info)
{
- unsigned port = info->evtchn;
+ evtchn_port_t port = info->evtchn;
unsigned new_array_pages;
int ret;
@@ -191,37 +191,37 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
/* no-op */
}
-static void evtchn_fifo_clear_pending(unsigned port)
+static void evtchn_fifo_clear_pending(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
-static void evtchn_fifo_set_pending(unsigned port)
+static void evtchn_fifo_set_pending(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
-static bool evtchn_fifo_is_pending(unsigned port)
+static bool evtchn_fifo_is_pending(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
-static bool evtchn_fifo_test_and_set_mask(unsigned port)
+static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
-static void evtchn_fifo_mask(unsigned port)
+static void evtchn_fifo_mask(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
-static bool evtchn_fifo_is_masked(unsigned port)
+static bool evtchn_fifo_is_masked(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
@@ -242,7 +242,7 @@ static void clear_masked(volatile event_word_t *word)
} while (w != old);
}
-static void evtchn_fifo_unmask(unsigned port)
+static void evtchn_fifo_unmask(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
@@ -270,7 +270,7 @@ static uint32_t clear_linked(volatile event_word_t *word)
return w & EVTCHN_FIFO_LINK_MASK;
}
-static void handle_irq_for_port(unsigned port)
+static void handle_irq_for_port(evtchn_port_t port)
{
int irq;
@@ -286,7 +286,7 @@ static void consume_one_event(unsigned cpu,
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
- unsigned port;
+ evtchn_port_t port;
event_word_t *word;
head = q->head[priority];
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 82938cff6c7a..10684feb094e 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -33,7 +33,7 @@ struct irq_info {
int refcnt;
enum xen_irq_type type; /* type */
unsigned irq;
- unsigned int evtchn; /* event channel */
+ evtchn_port_t evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
union {
@@ -60,12 +60,12 @@ struct evtchn_ops {
int (*setup)(struct irq_info *info);
void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
- void (*clear_pending)(unsigned port);
- void (*set_pending)(unsigned port);
- bool (*is_pending)(unsigned port);
- bool (*test_and_set_mask)(unsigned port);
- void (*mask)(unsigned port);
- void (*unmask)(unsigned port);
+ void (*clear_pending)(evtchn_port_t port);
+ void (*set_pending)(evtchn_port_t port);
+ bool (*is_pending)(evtchn_port_t port);
+ bool (*test_and_set_mask)(evtchn_port_t port);
+ void (*mask)(evtchn_port_t port);
+ void (*unmask)(evtchn_port_t port);
void (*handle_events)(unsigned cpu);
void (*resume)(void);
@@ -74,11 +74,11 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops;
extern int **evtchn_to_irq;
-int get_evtchn_to_irq(unsigned int evtchn);
+int get_evtchn_to_irq(evtchn_port_t evtchn);
struct irq_info *info_for_irq(unsigned irq);
unsigned cpu_from_irq(unsigned irq);
-unsigned cpu_from_evtchn(unsigned int evtchn);
+unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
static inline unsigned xen_evtchn_max_channels(void)
{
@@ -102,32 +102,32 @@ static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
evtchn_ops->bind_to_cpu(info, cpu);
}
-static inline void clear_evtchn(unsigned port)
+static inline void clear_evtchn(evtchn_port_t port)
{
evtchn_ops->clear_pending(port);
}
-static inline void set_evtchn(unsigned port)
+static inline void set_evtchn(evtchn_port_t port)
{
evtchn_ops->set_pending(port);
}
-static inline bool test_evtchn(unsigned port)
+static inline bool test_evtchn(evtchn_port_t port)
{
return evtchn_ops->is_pending(port);
}
-static inline bool test_and_set_mask(unsigned port)
+static inline bool test_and_set_mask(evtchn_port_t port)
{
return evtchn_ops->test_and_set_mask(port);
}
-static inline void mask_evtchn(unsigned port)
+static inline void mask_evtchn(evtchn_port_t port)
{
return evtchn_ops->mask(port);
}
-static inline void unmask_evtchn(unsigned port)
+static inline void unmask_evtchn(evtchn_port_t port)
{
return evtchn_ops->unmask(port);
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 052b55a14ebc..6e0b1dd5573c 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -83,7 +83,7 @@ struct per_user_data {
struct user_evtchn {
struct rb_node node;
struct per_user_data *user;
- unsigned port;
+ evtchn_port_t port;
bool enabled;
};
@@ -138,7 +138,8 @@ static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
kfree(evtchn);
}
-static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
+static struct user_evtchn *find_evtchn(struct per_user_data *u,
+ evtchn_port_t port)
{
struct rb_node *node = u->evtchns.rb_node;
@@ -163,7 +164,7 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
struct per_user_data *u = evtchn->user;
WARN(!evtchn->enabled,
- "Interrupt for port %d, but apparently not enabled; per-user %p\n",
+ "Interrupt for port %u, but apparently not enabled; per-user %p\n",
evtchn->port, u);
disable_irq_nosync(irq);
@@ -286,7 +287,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
mutex_lock(&u->bind_mutex);
for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
- unsigned port = kbuf[i];
+ evtchn_port_t port = kbuf[i];
struct user_evtchn *evtchn;
evtchn = find_evtchn(u, port);
@@ -361,7 +362,7 @@ static int evtchn_resize_ring(struct per_user_data *u)
return 0;
}
-static int evtchn_bind_to_user(struct per_user_data *u, int port)
+static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
{
struct user_evtchn *evtchn;
struct evtchn_close close;
@@ -423,7 +424,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
static DEFINE_PER_CPU(int, bind_last_selected_cpu);
-static void evtchn_bind_interdom_next_vcpu(int evtchn)
+static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
{
unsigned int selected_cpu, irq;
struct irq_desc *desc;
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 9a3960ecff6c..20d7d059dadb 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -15,6 +15,7 @@
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/types.h>
+#include <xen/interface/event_channel.h>
struct gntdev_dmabuf_priv;
@@ -38,7 +39,7 @@ struct gntdev_unmap_notify {
int flags;
/* Address relative to the start of the gntdev_grant_map. */
int addr;
- int event;
+ evtchn_port_t event;
};
struct gntdev_grant_map {
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 0258415ca0b2..50651e566564 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -652,7 +652,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
struct gntdev_grant_map *map;
int rc;
int out_flags;
- unsigned int out_event;
+ evtchn_port_t out_event;
if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index c57c71b7d53d..cf4ce3e9358d 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -300,7 +300,7 @@ static struct sock_mapping *pvcalls_new_active_socket(
struct pvcalls_fedata *fedata,
uint64_t id,
grant_ref_t ref,
- uint32_t evtchn,
+ evtchn_port_t evtchn,
struct socket *sock)
{
int ret;
@@ -905,7 +905,8 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
static int backend_connect(struct xenbus_device *dev)
{
- int err, evtchn;
+ int err;
+ evtchn_port_t evtchn;
grant_ref_t ring_ref;
struct pvcalls_fedata *fedata = NULL;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 57592a6b5c9e..b43b5595e988 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -368,12 +368,12 @@ out:
return -ENOMEM;
}
-static int create_active(struct sock_mapping *map, int *evtchn)
+static int create_active(struct sock_mapping *map, evtchn_port_t *evtchn)
{
void *bytes;
int ret = -ENOMEM, irq = -1, i;
- *evtchn = -1;
+ *evtchn = 0;
init_waitqueue_head(&map->active.inflight_conn_req);
bytes = map->active.data.in;
@@ -404,7 +404,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
return 0;
out_error:
- if (*evtchn >= 0)
+ if (*evtchn > 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
return ret;
}
@@ -415,7 +415,8 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL;
struct xen_pvcalls_request *req;
- int notify, req_id, ret, evtchn;
+ int notify, req_id, ret;
+ evtchn_port_t evtchn;
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
@@ -765,7 +766,8 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
struct sock_mapping *map;
struct sock_mapping *map2 = NULL;
struct xen_pvcalls_request *req;
- int notify, req_id, ret, evtchn, nonblock;
+ int notify, req_id, ret, nonblock;
+ evtchn_port_t evtchn;
map = pvcalls_enter_sock(sock);
if (IS_ERR(map))
@@ -1125,7 +1127,8 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
static int pvcalls_front_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
- int ret = -ENOMEM, evtchn, i;
+ int ret = -ENOMEM, i;
+ evtchn_port_t evtchn;
unsigned int max_page_order, function_calls, len;
char *versions;
grant_ref_t gref_head = 0;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index b20e43e148ce..da51a5d34e6e 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -320,7 +320,7 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
if (val & PCI_MSIX_FLAGS_ENABLE)
ret |= INTERRUPT_TYPE_MSIX;
}
- return ret;
+ return ret ?: INTERRUPT_TYPE_NONE;
}
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 28c45180a12e..5fe431c79f25 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -65,10 +65,10 @@ struct config_field_entry {
void *data;
};
-#define INTERRUPT_TYPE_NONE (1<<0)
-#define INTERRUPT_TYPE_INTX (1<<1)
-#define INTERRUPT_TYPE_MSI (1<<2)
-#define INTERRUPT_TYPE_MSIX (1<<3)
+#define INTERRUPT_TYPE_NONE (0)
+#define INTERRUPT_TYPE_INTX (1<<0)
+#define INTERRUPT_TYPE_MSI (1<<1)
+#define INTERRUPT_TYPE_MSIX (1<<2)
extern bool xen_pcibk_permissive;
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 833b2d2c4318..f2115587855f 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -105,13 +105,13 @@ static void free_pdev(struct xen_pcibk_device *pdev)
}
static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
- int remote_evtchn)
+ evtchn_port_t remote_evtchn)
{
int err = 0;
void *vaddr;
dev_dbg(&pdev->xdev->dev,
- "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
+ "Attaching to frontend resources - gnt_ref=%d evtchn=%u\n",
gnt_ref, remote_evtchn);
err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr);
@@ -142,7 +142,8 @@ out:
static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
{
int err = 0;
- int gnt_ref, remote_evtchn;
+ int gnt_ref;
+ evtchn_port_t remote_evtchn;
char *magic = NULL;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ba0942e481bc..75c0a2e9a6db 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -854,7 +854,8 @@ unmap_page:
static int scsiback_map(struct vscsibk_info *info)
{
struct xenbus_device *dev = info->dev;
- unsigned int ring_ref, evtchn;
+ unsigned int ring_ref;
+ evtchn_port_t evtchn;
int err;
err = xenbus_gather(XBT_NIL, dev->otherend,
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e17ca8156171..040d2a43e8e3 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -391,7 +391,7 @@ EXPORT_SYMBOL_GPL(xenbus_grant_ring);
* error, the device will switch to XenbusStateClosing, and the error will be
* saved in the store.
*/
-int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
+int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
{
struct evtchn_alloc_unbound alloc_unbound;
int err;
@@ -414,7 +414,7 @@ EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
/**
* Free an existing event channel. Returns 0 on success or -errno on error.
*/
-int xenbus_free_evtchn(struct xenbus_device *dev, int port)
+int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
{
struct evtchn_close close;
int err;
@@ -423,7 +423,7 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int port)
err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
if (err)
- xenbus_dev_error(dev, err, "freeing event channel %d", port);
+ xenbus_dev_error(dev, err, "freeing event channel %u", port);
return err;
}
@@ -448,7 +448,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr)
{
- return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+ int err;
+
+ err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+ /* Some hypervisors are buggy and can return 1. */
+ if (err > 0)
+ err = GNTST_general_error;
+
+ return err;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -517,6 +524,48 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
return err;
}
+/**
+ * xenbus_unmap_ring
+ * @dev: xenbus device
+ * @handles: grant handle array
+ * @nr_handles: number of handles in the array
+ * @vaddrs: addresses to unmap
+ *
+ * Unmap memory in this domain that was imported from another domain.
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
+ unsigned int nr_handles, unsigned long *vaddrs)
+{
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+ int i;
+ int err;
+
+ if (nr_handles > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ for (i = 0; i < nr_handles; i++)
+ gnttab_set_unmap_op(&unmap[i], vaddrs[i],
+ GNTMAP_host_map, handles[i]);
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
+ BUG();
+
+ err = GNTST_okay;
+ for (i = 0; i < nr_handles; i++) {
+ if (unmap[i].status != GNTST_okay) {
+ xenbus_dev_error(dev, unmap[i].status,
+ "unmapping page at handle %d error %d",
+ handles[i], unmap[i].status);
+ err = unmap[i].status;
+ break;
+ }
+ }
+
+ return err;
+}
+
struct map_ring_valloc_hvm
{
unsigned int idx;
@@ -608,45 +657,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
return err;
}
-
-/**
- * xenbus_map_ring
- * @dev: xenbus device
- * @gnt_refs: grant reference array
- * @nr_grefs: number of grant reference
- * @handles: pointer to grant handle to be filled
- * @vaddrs: addresses to be mapped to
- * @leaked: fail to clean up a failed map, caller should not free vaddr
- *
- * Map pages of memory into this domain from another domain's grant table.
- * xenbus_map_ring does not allocate the virtual address space (you must do
- * this yourself!). It only maps in the pages to the specified address.
- * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
- * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
- * XenbusStateClosing and the first error message will be saved in XenStore.
- * Further more if we fail to map the ring, caller should check @leaked.
- * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
- * should not free the address space of @vaddr.
- */
-int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
- unsigned int nr_grefs, grant_handle_t *handles,
- unsigned long *vaddrs, bool *leaked)
-{
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- int i;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = (unsigned long)vaddrs[i];
-
- return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
- phys_addrs, GNTMAP_host_map, leaked);
-}
-EXPORT_SYMBOL_GPL(xenbus_map_ring);
-
-
/**
* xenbus_unmap_ring_vfree
* @dev: xenbus device
@@ -859,51 +869,6 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
}
/**
- * xenbus_unmap_ring
- * @dev: xenbus device
- * @handles: grant handle array
- * @nr_handles: number of handles in the array
- * @vaddrs: addresses to unmap
- *
- * Unmap memory in this domain that was imported from another domain.
- * Returns 0 on success and returns GNTST_* on error
- * (see xen/include/interface/grant_table.h).
- */
-int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t *handles, unsigned int nr_handles,
- unsigned long *vaddrs)
-{
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
- int i;
- int err;
-
- if (nr_handles > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- for (i = 0; i < nr_handles; i++)
- gnttab_set_unmap_op(&unmap[i], vaddrs[i],
- GNTMAP_host_map, handles[i]);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
- BUG();
-
- err = GNTST_okay;
- for (i = 0; i < nr_handles; i++) {
- if (unmap[i].status != GNTST_okay) {
- xenbus_dev_error(dev, unmap[i].status,
- "unmapping page at handle %d error %d",
- handles[i], unmap[i].status);
- err = unmap[i].status;
- break;
- }
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
-
-
-/**
* xenbus_read_driver_state
* @path: path for driver
*
diff --git a/drivers/zorro/.gitignore b/drivers/zorro/.gitignore
index 34f980bd8ff6..acd6ffb8d77d 100644
--- a/drivers/zorro/.gitignore
+++ b/drivers/zorro/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
devlist.h
gen-devlist
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index fa23b7366b98..0dd7cbcec2b0 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -28,7 +28,7 @@
* zorro_device_id structure or %NULL if there is no match.
*/
-const struct zorro_device_id *
+static const struct zorro_device_id *
zorro_match_device(const struct zorro_device_id *ids,
const struct zorro_dev *z)
{
@@ -39,7 +39,6 @@ zorro_match_device(const struct zorro_device_id *ids,
}
return NULL;
}
-EXPORT_SYMBOL(zorro_match_device);
static int zorro_device_probe(struct device *dev)
@@ -120,9 +119,9 @@ EXPORT_SYMBOL(zorro_unregister_driver);
* @ids: array of Zorro device id structures to search in
* @dev: the Zorro device structure to match against
*
- * Used by a driver to check whether a Zorro device present in the
- * system is in its list of supported devices.Returns the matching
- * zorro_device_id structure or %NULL if there is no match.
+ * Used by the driver core to check whether a Zorro device present in the
+ * system is in a driver's list of supported devices. Returns 1 if
+ * supported, and 0 if there is no match.
*/
static int zorro_bus_match(struct device *dev, struct device_driver *drv)
@@ -134,12 +133,7 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv)
if (!ids)
return 0;
- while (ids->id) {
- if (ids->id == ZORRO_WILDCARD || ids->id == z->id)
- return 1;
- ids++;
- }
- return 0;
+ return !!zorro_match_device(ids, z);
}
static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 8eeb84c239db..47c733817903 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -41,7 +41,7 @@ struct zorro_dev *zorro_autocon;
struct zorro_bus {
struct device dev;
- struct zorro_dev devices[0];
+ struct zorro_dev devices[];
};
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index ac0bab3412d9..f84df9fb4c20 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -1,5 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
+ /*
+ * Zorro bus
+ */
+
+extern struct bus_type zorro_bus_type;
+
+
#ifdef CONFIG_ZORRO_NAMES
extern void zorro_name_device(struct zorro_dev *z);
#else